From f00cc5da85405dbe6a9f26b03a93194eae590f30 Mon Sep 17 00:00:00 2001 From: syiming Date: Sun, 14 Jun 2020 20:33:49 +0800 Subject: [PATCH 001/128] Initial commit for faster rcnn resnet v1 fpn feature extractor. 1. Setup code structure for fpn feature extractor --- ...n_resnet_v1_fpn_keras_feature_extractor.py | 39 +++++++++++++++++++ 1 file changed, 39 insertions(+) create mode 100644 research/object_detection/models/faster_rcnn_resnet_v1_fpn_keras_feature_extractor.py diff --git a/research/object_detection/models/faster_rcnn_resnet_v1_fpn_keras_feature_extractor.py b/research/object_detection/models/faster_rcnn_resnet_v1_fpn_keras_feature_extractor.py new file mode 100644 index 000000000..08c38e4b4 --- /dev/null +++ b/research/object_detection/models/faster_rcnn_resnet_v1_fpn_keras_feature_extractor.py @@ -0,0 +1,39 @@ +"""Faster RCNN Keras-based Resnet v1 FPN Feature Extractor.""" + +from object_detection.meta_architectures import faster_rcnn_meta_arch +from object_detection.models import feature_map_generators +from object_detection.models.keras_models import resnet_v1 +from object_detection.models.keras_models import model_utils +from object_detection.utils import ops + + +class FasterRCNNFPNKerasFeatureExtractor( + faster_rcnn_meta_arch.FasterRCNNKerasFeatureExtractor): + """Faster RCNN Feature Extractor using Keras-based Resnet v1 FPN features.""" + + def __init__(self, ...): + # TODO: constructor + pass + + def build(self, ...): + # TODO: Build the structure, should be very similar as ssd_*_fpn_keras_feature_extractor.py + # ResNet-101 (object_detection.models.keras_models) + # object_detection.models.feature_map_generators + pass + + def preprocess(self, ...): + # TODO: should be the same as others + pass + + def _extract_proposal_features(self, ...): + # TODO: Extracts first stage RPN features + # Fpn_feature_levels + pass + + def _extract_box_classifier_features(self, ...): + # TODO: Extracts second stage box classifier features. + pass + + def restore_from_classification_checkpoint_fn(self, ...): + # follow the none fpn version + pass -- GitLab From 69ce1c457b90bc51fc46c7ef12812f2feb8c7c0d Mon Sep 17 00:00:00 2001 From: syiming Date: Thu, 18 Jun 2020 00:51:26 +0800 Subject: [PATCH 002/128] draft for faster rcnn resnet vi fpn feature extractor --- ...n_resnet_v1_fpn_keras_feature_extractor.py | 246 ++++++++++++++++-- 1 file changed, 218 insertions(+), 28 deletions(-) diff --git a/research/object_detection/models/faster_rcnn_resnet_v1_fpn_keras_feature_extractor.py b/research/object_detection/models/faster_rcnn_resnet_v1_fpn_keras_feature_extractor.py index 08c38e4b4..5efea8c87 100644 --- a/research/object_detection/models/faster_rcnn_resnet_v1_fpn_keras_feature_extractor.py +++ b/research/object_detection/models/faster_rcnn_resnet_v1_fpn_keras_feature_extractor.py @@ -1,39 +1,229 @@ -"""Faster RCNN Keras-based Resnet v1 FPN Feature Extractor.""" +# Copyright 2020 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Faster RCNN Keras-based Resnet V1 FPN Feature Extractor.""" + +import tensorflow.compat.v1 as tf from object_detection.meta_architectures import faster_rcnn_meta_arch from object_detection.models import feature_map_generators from object_detection.models.keras_models import resnet_v1 from object_detection.models.keras_models import model_utils from object_detection.utils import ops +from object_detection.utils import shape_utils +_RESNET_MODEL_OUTPUT_LAYERS = { + 'resnet_v1_50': ['conv2_block3_out', 'conv3_block4_out', + 'conv4_block6_out', 'conv5_block3_out'], + 'resnet_v1_101': ['conv2_block3_out', 'conv3_block4_out', + 'conv4_block23_out', 'conv5_block3_out'], + 'resnet_v1_152': ['conv2_block3_out', 'conv3_block8_out', + 'conv4_block36_out', 'conv5_block3_out'], +} -class FasterRCNNFPNKerasFeatureExtractor( - faster_rcnn_meta_arch.FasterRCNNKerasFeatureExtractor): - """Faster RCNN Feature Extractor using Keras-based Resnet v1 FPN features.""" - def __init__(self, ...): - # TODO: constructor - pass +class FasterRCNNResnetV1FPNKerasFeatureExtractor( + faster_rcnn_meta_arch.FasterRCNNFeatureExtractor): + """Faster RCNN Feature Extractor using Keras-based Resnet V1 FPN features.""" - def build(self, ...): - # TODO: Build the structure, should be very similar as ssd_*_fpn_keras_feature_extractor.py - # ResNet-101 (object_detection.models.keras_models) - # object_detection.models.feature_map_generators - pass - - def preprocess(self, ...): - # TODO: should be the same as others - pass - - def _extract_proposal_features(self, ...): - # TODO: Extracts first stage RPN features - # Fpn_feature_levels - pass - - def _extract_box_classifier_features(self, ...): - # TODO: Extracts second stage box classifier features. - pass + def __init__(self, + is_training, + first_stage_features_stride, + conv_hyperparams, + min_depth, + depth_multiplier, + resnet_v1_base_model, + resnet_v1_base_model_name, + batch_norm_trainable=False, + weight_decay=0.0, + fpn_min_level=3, + fpn_max_level=7, + additional_layer_depth=256, + override_base_feature_extractor_hyperparams=False): + # FIXME: fix doc string for fpn min level and fpn max level + """Constructor. + + Args: + is_training: See base class. + first_stage_features_stride: See base class. + + conv_hyperparameters: a `hyperparams_builder.KerasLayerHyperparams` object + containing convolution hyperparameters for the layers added on top of + the base feature extractor. + min_depth: Minimum number of filters in the convolutional layers. + depth_multiplier: The depth multiplier to modify the number of filters + in the convolutional layers. + resnet_v1_base_model: base resnet v1 network to use. One of + the resnet_v1.resnet_v1_{50,101,152} models. + resnet_v1_base_model_name: model name under which to construct resnet v1. + + batch_norm_trainable: See base class. + weight_decay: See base class. + + fpn_min_level: the highest resolution feature map to use in FPN. The valid + values are {2, 3, 4, 5} which map to MobileNet v1 layers + {Conv2d_3_pointwise, Conv2d_5_pointwise, Conv2d_11_pointwise, + Conv2d_13_pointwise}, respectively. + fpn_max_level: the smallest resolution feature map to construct or use in + FPN. FPN constructions uses features maps starting from fpn_min_level + upto the fpn_max_level. In the case that there are not enough feature + maps in the backbone network, additional feature maps are created by + applying stride 2 convolutions until we get the desired number of fpn + levels. + additional_layer_depth: additional feature map layer channel depth. + override_base_feature_extractor_hyperparams: Whether to override + hyperparameters of the base feature extractor with the one from + `conv_hyperparams`. + + Raises: + + """ + super(FasterRCNNResnetV1FPNKerasFeatureExtractor, self).__init__( + is_training=is_training, + first_stage_features_stride=first_stage_features_stride, + batch_norm_trainable=batch_norm_trainable, + weight_decay=weight_decay) + self._conv_hyperparams = conv_hyperparams + self._min_depth = min_depth + self._depth_multiplier = depth_multiplier + self._additional_layer_depth = additional_layer_depth + self._freeze_batchnorm = (not batch_norm_trainable) + self._override_base_feature_extractor_hyperparams = + override_base_feature_extractor_hyperparams + self._fpn_min_level = fpn_min_level + self._fpn_max_level = fpn_max_level + self._resnet_v1_base_model = resnet_v1_base_model + self._resnet_v1_base_model_name = resnet_v1_base_model_name + self._resnet_block_names = ['block1', 'block2', 'block3', 'block4'] + self.classification_backbone = None + self._fpn_features_generator = None + self._coarse_feature_layers = [] + + def build(self,): + # TODO: Refine doc string + """Build Resnet V1 FPN architecture.""" + full_resnet_v1_model = self._resnet_v1_base_model( + batchnorm_training=self._train_batch_norm, + conv_hyperparams=(self._conv_hyperparams + if self._override_base_feature_extractor_hyperparams + else None), + min_depth=self._min_depth, + depth_multiplier=self._depth_multiplier, + classes=None, + weights=None, + include_top=False) + output_layers = _RESNET_MODEL_OUTPUT_LAYERS[self._resnet_v1_base_model_name] + outputs = [full_resnet_v1_model.get_layer(output_layer_name).output + for output_layer_name in output_layers] + self.classification_backbone = tf.keras.Model( + inputs=full_resnet_v1_model.inputs, + outputs=outputs) + self._depth_fn = lambda d: max( + int(d * self._depth_multiplier), self._min_depth) + self._base_fpn_max_level = min(self._fpn_max_level, 5) + self._num_levels = self._base_fpn_max_level + 1 - self._fpn_min_level + self._fpn_features_generator = ( + feature_map_generators.KerasFpnTopDownFeatureMaps( + num_levels=self._num_levels, + depth=self._depth_fn(self._additional_layer_depth), + is_training=self._is_training, + conv_hyperparams=self._conv_hyperparams, + freeze_batchnorm=self._freeze_batchnorm, + name='FeatureMaps')) + # Construct coarse feature layers + depth = self._depth_fn(self._additional_layer_depth) + for i in range(self._base_fpn_max_level, self._fpn_max_level): + layers = [] + layer_name = 'bottom_up_block{}'.format(i) + layers.append( + tf.keras.layers.Conv2D( + depth, + [3, 3], + padding='SAME', + strides=2, + name=layer_name + '_conv', + **self._conv_hyperparams.params())) + layers.append( + self._conv_hyperparams.build_batch_norm( + training=(self._is_training and not self._freeze_batchnorm), + name=layer_name + '_batchnorm')) + layers.append( + self._conv_hyperparams.build_activation_layer( + name=layer_name)) + self._coarse_feature_layers.append(layers) + self.built = True + + def preprocess(self, resized_inputs): + """Faster R-CNN Resnet V1 preprocessing. + + VGG style channel mean subtraction as described here: + https://gist.github.com/ksimonyan/211839e770f7b538e2d8#file-readme-md + Note that if the number of channels is not equal to 3, the mean subtraction + will be skipped and the original resized_inputs will be returned. + + Args: + resized_inputs: A [batch, height_in, width_in, channels] float32 tensor + representing a batch of images with values between 0 and 255.0. + + Returns: + preprocessed_inputs: A [batch, height_out, width_out, channels] float32 + tensor representing a batch of images. + + """ + if resized_inputs.shape.as_list()[3] == 3: + channel_means = [123.68, 116.779, 103.939] + return resized_inputs - [[channel_means]] + else: + return resized_inputs - def restore_from_classification_checkpoint_fn(self, ...): - # follow the none fpn version - pass + def _extract_proposal_features(self, preprocessed_inputs, scope=None): + # TODO: doc string + """""" + preprocessed_inputs = shape_utils.check_min_image_dim( + 129, preprocessed_inputs) + + with tf.name_scope(scope): + with tf.name_scope('ResnetV1FPN'): + image_features = self.classification_backbone(preprocessed_inputs) + + feature_block_list = [] + for level in range(self._fpn_min_level, self._base_fpn_max_level + 1): + feature_block_list.append('block{}'.format(level - 1)) + feature_block_map = dict( + list(zip(self._resnet_block_names, image_features))) + fpn_input_image_features = [ + (feature_block, feature_block_map[feature_block]) + for feature_block in feature_block_list] + fpn_features = self._fpn_features_generator(fpn_input_image_features) + + return fpn_features + + + def _extract_box_classifier_features(self, proposal_feature_maps, scope=None): + with tf.name_scope(scope): + with tf.name_scope('ResnetV1FPN'): + feature_maps = [] + for level in range(self._fpn_min_level, self._base_fpn_max_level + 1): + feature_maps.append(proposal_feature_maps['top_down_block{}'.format(level-1)]) + self.last_feature_map = proposal_feature_maps['top_down_block{}'.format( + self._base_fpn_max_level - 1)] + + for coarse_feature_layers in self._coarse_feature_layers: + for layer in coarse_feature_layers: + last_feature_map = layer(last_feature_map) + feature_maps.append(self.last_feature_map) + + return feature_maps + -- GitLab From 33a4c064d432737116a3e101d2a15217d54971a6 Mon Sep 17 00:00:00 2001 From: syiming Date: Thu, 18 Jun 2020 07:12:34 +0800 Subject: [PATCH 003/128] draft for faster rcnn resnet v1 fpn feature extractor --- ...n_resnet_v1_fpn_keras_feature_extractor.py | 166 ++++++++++++------ 1 file changed, 114 insertions(+), 52 deletions(-) diff --git a/research/object_detection/models/faster_rcnn_resnet_v1_fpn_keras_feature_extractor.py b/research/object_detection/models/faster_rcnn_resnet_v1_fpn_keras_feature_extractor.py index 5efea8c87..2cb0b3827 100644 --- a/research/object_detection/models/faster_rcnn_resnet_v1_fpn_keras_feature_extractor.py +++ b/research/object_detection/models/faster_rcnn_resnet_v1_fpn_keras_feature_extractor.py @@ -88,8 +88,10 @@ class FasterRCNNResnetV1FPNKerasFeatureExtractor( `conv_hyperparams`. Raises: - + ValueError: If `first_stage_features_stride` is not 8 or 16. """ + if first_stage_features_stride != 8 and first_stage_features_stride != 16: + raise ValueError('`first_stage_features_stride` must be 8 or 16.') super(FasterRCNNResnetV1FPNKerasFeatureExtractor, self).__init__( is_training=is_training, first_stage_features_stride=first_stage_features_stride, @@ -109,39 +111,38 @@ class FasterRCNNResnetV1FPNKerasFeatureExtractor( self._resnet_block_names = ['block1', 'block2', 'block3', 'block4'] self.classification_backbone = None self._fpn_features_generator = None - self._coarse_feature_layers = [] def build(self,): # TODO: Refine doc string """Build Resnet V1 FPN architecture.""" - full_resnet_v1_model = self._resnet_v1_base_model( - batchnorm_training=self._train_batch_norm, - conv_hyperparams=(self._conv_hyperparams - if self._override_base_feature_extractor_hyperparams - else None), - min_depth=self._min_depth, - depth_multiplier=self._depth_multiplier, - classes=None, - weights=None, - include_top=False) - output_layers = _RESNET_MODEL_OUTPUT_LAYERS[self._resnet_v1_base_model_name] - outputs = [full_resnet_v1_model.get_layer(output_layer_name).output - for output_layer_name in output_layers] - self.classification_backbone = tf.keras.Model( - inputs=full_resnet_v1_model.inputs, - outputs=outputs) - self._depth_fn = lambda d: max( - int(d * self._depth_multiplier), self._min_depth) - self._base_fpn_max_level = min(self._fpn_max_level, 5) - self._num_levels = self._base_fpn_max_level + 1 - self._fpn_min_level - self._fpn_features_generator = ( - feature_map_generators.KerasFpnTopDownFeatureMaps( - num_levels=self._num_levels, - depth=self._depth_fn(self._additional_layer_depth), - is_training=self._is_training, - conv_hyperparams=self._conv_hyperparams, - freeze_batchnorm=self._freeze_batchnorm, - name='FeatureMaps')) + # full_resnet_v1_model = self._resnet_v1_base_model( + # batchnorm_training=self._train_batch_norm, + # conv_hyperparams=(self._conv_hyperparams + # if self._override_base_feature_extractor_hyperparams + # else None), + # min_depth=self._min_depth, + # depth_multiplier=self._depth_multiplier, + # classes=None, + # weights=None, + # include_top=False) + # output_layers = _RESNET_MODEL_OUTPUT_LAYERS[self._resnet_v1_base_model_name] + # outputs = [full_resnet_v1_model.get_layer(output_layer_name).output + # for output_layer_name in output_layers] + # self.classification_backbone = tf.keras.Model( + # inputs=full_resnet_v1_model.inputs, + # outputs=outputs) + # self._depth_fn = lambda d: max( + # int(d * self._depth_multiplier), self._min_depth) + # self._base_fpn_max_level = min(self._fpn_max_level, 5) + # self._num_levels = self._base_fpn_max_level + 1 - self._fpn_min_level + # self._fpn_features_generator = ( + # feature_map_generators.KerasFpnTopDownFeatureMaps( + # num_levels=self._num_levels, + # depth=self._depth_fn(self._additional_layer_depth), + # is_training=self._is_training, + # conv_hyperparams=self._conv_hyperparams, + # freeze_batchnorm=self._freeze_batchnorm, + # name='FeatureMaps')) # Construct coarse feature layers depth = self._depth_fn(self._additional_layer_depth) for i in range(self._base_fpn_max_level, self._fpn_max_level): @@ -188,16 +189,75 @@ class FasterRCNNResnetV1FPNKerasFeatureExtractor( else: return resized_inputs - def _extract_proposal_features(self, preprocessed_inputs, scope=None): - # TODO: doc string - """""" - preprocessed_inputs = shape_utils.check_min_image_dim( - 129, preprocessed_inputs) + # def _extract_proposal_features(self, preprocessed_inputs, scope=None): + # # TODO: doc string + # """""" + # preprocessed_inputs = shape_utils.check_min_image_dim( + # 129, preprocessed_inputs) + + # with tf.name_scope(scope): + # with tf.name_scope('ResnetV1FPN'): + # image_features = self.classification_backbone(preprocessed_inputs) + + # feature_block_list = [] + # for level in range(self._fpn_min_level, self._base_fpn_max_level + 1): + # feature_block_list.append('block{}'.format(level - 1)) + # feature_block_map = dict( + # list(zip(self._resnet_block_names, image_features))) + # fpn_input_image_features = [ + # (feature_block, feature_block_map[feature_block]) + # for feature_block in feature_block_list] + # fpn_features = self._fpn_features_generator(fpn_input_image_features) + + # return fpn_features + + def get_proposal_feature_extractor_model(self, name=None): + """Returns a model that extracts first stage RPN features. + + Extracts features using the first half of the Resnet v1 network. + + Args: + name: A scope name to construct all variables within. - with tf.name_scope(scope): + Returns: + A Keras model that takes preprocessed_inputs: + A [batch, height, width, channels] float32 tensor + representing a batch of images. + """ + with tf.name_scope(name): with tf.name_scope('ResnetV1FPN'): - image_features = self.classification_backbone(preprocessed_inputs) + full_resnet_v1_model = self._resnet_v1_base_model( + batchnorm_training=self._train_batch_norm, + conv_hyperparams=(self._conv_hyperparams + if self._override_base_feature_extractor_hyperparams + else None), + min_depth=self._min_depth, + depth_multiplier=self._depth_multiplier, + classes=None, + weights=None, + include_top=False) + output_layers = _RESNET_MODEL_OUTPUT_LAYERS[self._resnet_v1_base_model_name] + outputs = [full_resnet_v1_model.get_layer(output_layer_name).output + for output_layer_name in output_layers] + self.classification_backbone = tf.keras.Model( + inputs=full_resnet_v1_model.inputs, + outputs=outputs) + backbone_outputs = self.classification_backbone(full_resnet_v1_model.inputs) + # construct FPN feature generator + self._depth_fn = lambda d: max( + int(d * self._depth_multiplier), self._min_depth) + self._base_fpn_max_level = min(self._fpn_max_level, 5) + self._num_levels = self._base_fpn_max_level + 1 - self._fpn_min_level + self._fpn_features_generator = ( + feature_map_generators.KerasFpnTopDownFeatureMaps( + num_levels=self._num_levels, + depth=self._depth_fn(self._additional_layer_depth), + is_training=self._is_training, + conv_hyperparams=self._conv_hyperparams, + freeze_batchnorm=self._freeze_batchnorm, + name='FeatureMaps')) + feature_block_list = [] for level in range(self._fpn_min_level, self._base_fpn_max_level + 1): feature_block_list.append('block{}'.format(level - 1)) @@ -208,22 +268,24 @@ class FasterRCNNResnetV1FPNKerasFeatureExtractor( for feature_block in feature_block_list] fpn_features = self._fpn_features_generator(fpn_input_image_features) - return fpn_features - + feature_extractor_model = tf.keras.models.Model( + inputs=self.full_resnet_v1_model.inputs, outputs=fpn_features) + return feature_extractor_model - def _extract_box_classifier_features(self, proposal_feature_maps, scope=None): - with tf.name_scope(scope): - with tf.name_scope('ResnetV1FPN'): - feature_maps = [] - for level in range(self._fpn_min_level, self._base_fpn_max_level + 1): - feature_maps.append(proposal_feature_maps['top_down_block{}'.format(level-1)]) - self.last_feature_map = proposal_feature_maps['top_down_block{}'.format( - self._base_fpn_max_level - 1)] + # def _extract_box_classifier_features(self, proposal_feature_maps, scope=None): + # with tf.name_scope(scope): + # with tf.name_scope('ResnetV1FPN'): + # feature_maps = [] + # for level in range(self._fpn_min_level, self._base_fpn_max_level + 1): + # feature_maps.append(proposal_feature_maps['top_down_block{}'.format(level-1)]) + # self.last_feature_map = proposal_feature_maps['top_down_block{}'.format( + # self._base_fpn_max_level - 1)] - for coarse_feature_layers in self._coarse_feature_layers: - for layer in coarse_feature_layers: - last_feature_map = layer(last_feature_map) - feature_maps.append(self.last_feature_map) + # for coarse_feature_layers in self._coarse_feature_layers: + # for layer in coarse_feature_layers: + # last_feature_map = layer(last_feature_map) + # feature_maps.append(self.last_feature_map) - return feature_maps + # return feature_maps + def get_box_classifier_feature_extractor_model(self, name=None): -- GitLab From e649274ea724d476cdc76ed01624dd9378d27a08 Mon Sep 17 00:00:00 2001 From: syiming Date: Fri, 19 Jun 2020 00:31:46 +0800 Subject: [PATCH 004/128] add seperated class for resnet 50 101 152 --- ...n_resnet_v1_fpn_keras_feature_extractor.py | 144 +++++++++++++++++- 1 file changed, 142 insertions(+), 2 deletions(-) diff --git a/research/object_detection/models/faster_rcnn_resnet_v1_fpn_keras_feature_extractor.py b/research/object_detection/models/faster_rcnn_resnet_v1_fpn_keras_feature_extractor.py index 2cb0b3827..658309013 100644 --- a/research/object_detection/models/faster_rcnn_resnet_v1_fpn_keras_feature_extractor.py +++ b/research/object_detection/models/faster_rcnn_resnet_v1_fpn_keras_feature_extractor.py @@ -40,12 +40,12 @@ class FasterRCNNResnetV1FPNKerasFeatureExtractor( def __init__(self, is_training, + resnet_v1_base_model, + resnet_v1_base_model_name, first_stage_features_stride, conv_hyperparams, min_depth, depth_multiplier, - resnet_v1_base_model, - resnet_v1_base_model_name, batch_norm_trainable=False, weight_decay=0.0, fpn_min_level=3, @@ -289,3 +289,143 @@ class FasterRCNNResnetV1FPNKerasFeatureExtractor( # return feature_maps def get_box_classifier_feature_extractor_model(self, name=None): + + + +class FasterRCNNResnet50FPNKerasFeatureExtractor( + FasterRCNNResnetV1FPNKerasFeatureExtractor): + """Faster RCNN with Resnet50 FPN feature extractor implementation.""" + + def __init__(self, + is_training, + first_stage_features_stride=16, + conv_hyperparams=None, + min_depth=16, + depth_multiplier=1, + batch_norm_trainable=False, + weight_decay=0.0, + fpn_min_level=3, + fpn_max_level=7, + additional_layer_depth=256, + override_base_feature_extractor_hyperparams=False): + """Constructor. + + Args: + is_training: See base class. + first_stage_features_stride: See base class. + conv_hyperparams: See base class. + min_depth: See base class. + depth_multiplier: See base class. + batch_norm_trainable: See base class. + weight_decay: See base class. + fpn_min_level: See base class. + fpn_max_level: See base class. + additional_layer_depth: See base class. + override_base_feature_extractor_hyperparams: See base class. + """ + super(FasterRCNNResnet50KerasFeatureExtractor, self).__init__( + is_training=is_training, + first_stage_features_stride=first_stage_features_stride, + conv_hyperparams=conv_hyperparameters, + min_depth=min_depth, + depth_multiplier=depth_multiplier, + resnet_v1_base_model=resnet_v1.resnet_v1_50, + resnet_v1_base_model_name='resnet_v1_50', + batch_norm_trainable=batch_norm_trainable, + weight_decay=weight_decay, + fpn_min_level=fpn_min_level, + fpn_max_level=fpn_max_level, + additional_layer_depth=additional_layer_depth, + override_base_feature_extractor_hyperparams=override_base_feature_extractor_hyperparams) + +class FasterRCNNResnet101FPNKerasFeatureExtractor( + FasterRCNNResnetV1FPNKerasFeatureExtractor): + """Faster RCNN with Resnet101 FPN feature extractor implementation.""" + def __init__(self, + is_training, + first_stage_features_stride=16, + conv_hyperparams=None, + min_depth=16, + depth_multiplier=1, + batch_norm_trainable=False, + weight_decay=0.0, + fpn_min_level=3, + fpn_max_level=7, + additional_layer_depth=256, + override_base_feature_extractor_hyperparams=False): + """Constructor. + + Args: + is_training: See base class. + first_stage_features_stride: See base class. + conv_hyperparams: See base class. + min_depth: See base class. + depth_multiplier: See base class. + batch_norm_trainable: See base class. + weight_decay: See base class. + fpn_min_level: See base class. + fpn_max_level: See base class. + additional_layer_depth: See base class. + override_base_feature_extractor_hyperparams: See base class. + """ + super(FasterRCNNResnet50KerasFeatureExtractor, self).__init__( + is_training=is_training, + first_stage_features_stride=first_stage_features_stride, + conv_hyperparams=conv_hyperparameters, + min_depth=min_depth, + depth_multiplier=depth_multiplier, + resnet_v1_base_model=resnet_v1.resnet_v1_101, + resnet_v1_base_model_name='resnet_v1_101', + batch_norm_trainable=batch_norm_trainable, + weight_decay=weight_decay, + fpn_min_level=fpn_min_level, + fpn_max_level=fpn_max_level, + additional_layer_depth=additional_layer_depth, + override_base_feature_extractor_hyperparams=override_base_feature_extractor_hyperparams) + + +class FasterRCNNResnet152FPNKerasFeatureExtractor( + FasterRCNNResnetV1FPNKerasFeatureExtractor): + """Faster RCNN with Resnet152 FPN feature extractor implementation.""" + + def __init__(self, + is_training, + first_stage_features_stride=16, + conv_hyperparams=None, + min_depth=16, + depth_multiplier=1, + batch_norm_trainable=False, + weight_decay=0.0, + fpn_min_level=3, + fpn_max_level=7, + additional_layer_depth=256, + override_base_feature_extractor_hyperparams=False): + """Constructor. + + Args: + is_training: See base class. + first_stage_features_stride: See base class. + conv_hyperparams: See base class. + min_depth: See base class. + depth_multiplier: See base class. + batch_norm_trainable: See base class. + weight_decay: See base class. + fpn_min_level: See base class. + fpn_max_level: See base class. + additional_layer_depth: See base class. + override_base_feature_extractor_hyperparams: See base class. + """ + super(FasterRCNNResnet50KerasFeatureExtractor, self).__init__( + is_training=is_training, + first_stage_features_stride=first_stage_features_stride, + conv_hyperparams=conv_hyperparameters, + min_depth=min_depth, + depth_multiplier=depth_multiplier, + resnet_v1_base_model=resnet_v1.resnet_v1_152, + resnet_v1_base_model_name='resnet_v1_152', + batch_norm_trainable=batch_norm_trainable, + weight_decay=weight_decay, + fpn_min_level=fpn_min_level, + fpn_max_level=fpn_max_level, + additional_layer_depth=additional_layer_depth, + override_base_feature_extractor_hyperparams=override_base_feature_extractor_hyperparams) \ No newline at end of file -- GitLab From cc2642a9d6d321b2793e2b0a1216a01914495774 Mon Sep 17 00:00:00 2001 From: syiming Date: Fri, 19 Jun 2020 02:11:17 +0800 Subject: [PATCH 005/128] draft for get_box_classifier_feature_extractor_model --- ...n_resnet_v1_fpn_keras_feature_extractor.py | 25 ++++++++++++++++++- 1 file changed, 24 insertions(+), 1 deletion(-) diff --git a/research/object_detection/models/faster_rcnn_resnet_v1_fpn_keras_feature_extractor.py b/research/object_detection/models/faster_rcnn_resnet_v1_fpn_keras_feature_extractor.py index 658309013..a36e61ec6 100644 --- a/research/object_detection/models/faster_rcnn_resnet_v1_fpn_keras_feature_extractor.py +++ b/research/object_detection/models/faster_rcnn_resnet_v1_fpn_keras_feature_extractor.py @@ -289,7 +289,30 @@ class FasterRCNNResnetV1FPNKerasFeatureExtractor( # return feature_maps def get_box_classifier_feature_extractor_model(self, name=None): + """Returns a model that extracts second stage box classifier features. + TODO: doc + + Args: + name: A scope name to construct all variables within. + + Returns: + A Keras model that takes proposal_feature_maps: + A 4-D float tensor with shape + [batch_size * self.max_num_proposals, crop_height, crop_width, depth] + representing the feature map cropped to each proposal. + And returns proposal_classifier_features: + A 4-D float tensor with shape + [batch_size * self.max_num_proposals, height, width, depth] + representing box classifier features for each proposal. + """ + with tf.name_scope(name): + with tf.name_scope('ResnetV1FPN'): + feature_extractor_model = tf.keras.models.Sequential([ + Dense(unit=1024, activation='ReLU'), + Dense(unit=1024, activation='ReLU') + ]) + return feature_extractor_model class FasterRCNNResnet50FPNKerasFeatureExtractor( @@ -428,4 +451,4 @@ class FasterRCNNResnet152FPNKerasFeatureExtractor( fpn_min_level=fpn_min_level, fpn_max_level=fpn_max_level, additional_layer_depth=additional_layer_depth, - override_base_feature_extractor_hyperparams=override_base_feature_extractor_hyperparams) \ No newline at end of file + override_base_feature_extractor_hyperparams=override_base_feature_extractor_hyperparams) -- GitLab From 37b9a1362e348a7e1f9db01c25da02c92f5b7cfc Mon Sep 17 00:00:00 2001 From: syiming Date: Fri, 19 Jun 2020 02:19:07 +0800 Subject: [PATCH 006/128] remove unused code --- ...n_resnet_v1_fpn_keras_feature_extractor.py | 114 ++---------------- 1 file changed, 11 insertions(+), 103 deletions(-) diff --git a/research/object_detection/models/faster_rcnn_resnet_v1_fpn_keras_feature_extractor.py b/research/object_detection/models/faster_rcnn_resnet_v1_fpn_keras_feature_extractor.py index a36e61ec6..03bfcdb15 100644 --- a/research/object_detection/models/faster_rcnn_resnet_v1_fpn_keras_feature_extractor.py +++ b/research/object_detection/models/faster_rcnn_resnet_v1_fpn_keras_feature_extractor.py @@ -102,7 +102,7 @@ class FasterRCNNResnetV1FPNKerasFeatureExtractor( self._depth_multiplier = depth_multiplier self._additional_layer_depth = additional_layer_depth self._freeze_batchnorm = (not batch_norm_trainable) - self._override_base_feature_extractor_hyperparams = + self._override_base_feature_extractor_hyperparams = \ override_base_feature_extractor_hyperparams self._fpn_min_level = fpn_min_level self._fpn_max_level = fpn_max_level @@ -112,60 +112,6 @@ class FasterRCNNResnetV1FPNKerasFeatureExtractor( self.classification_backbone = None self._fpn_features_generator = None - def build(self,): - # TODO: Refine doc string - """Build Resnet V1 FPN architecture.""" - # full_resnet_v1_model = self._resnet_v1_base_model( - # batchnorm_training=self._train_batch_norm, - # conv_hyperparams=(self._conv_hyperparams - # if self._override_base_feature_extractor_hyperparams - # else None), - # min_depth=self._min_depth, - # depth_multiplier=self._depth_multiplier, - # classes=None, - # weights=None, - # include_top=False) - # output_layers = _RESNET_MODEL_OUTPUT_LAYERS[self._resnet_v1_base_model_name] - # outputs = [full_resnet_v1_model.get_layer(output_layer_name).output - # for output_layer_name in output_layers] - # self.classification_backbone = tf.keras.Model( - # inputs=full_resnet_v1_model.inputs, - # outputs=outputs) - # self._depth_fn = lambda d: max( - # int(d * self._depth_multiplier), self._min_depth) - # self._base_fpn_max_level = min(self._fpn_max_level, 5) - # self._num_levels = self._base_fpn_max_level + 1 - self._fpn_min_level - # self._fpn_features_generator = ( - # feature_map_generators.KerasFpnTopDownFeatureMaps( - # num_levels=self._num_levels, - # depth=self._depth_fn(self._additional_layer_depth), - # is_training=self._is_training, - # conv_hyperparams=self._conv_hyperparams, - # freeze_batchnorm=self._freeze_batchnorm, - # name='FeatureMaps')) - # Construct coarse feature layers - depth = self._depth_fn(self._additional_layer_depth) - for i in range(self._base_fpn_max_level, self._fpn_max_level): - layers = [] - layer_name = 'bottom_up_block{}'.format(i) - layers.append( - tf.keras.layers.Conv2D( - depth, - [3, 3], - padding='SAME', - strides=2, - name=layer_name + '_conv', - **self._conv_hyperparams.params())) - layers.append( - self._conv_hyperparams.build_batch_norm( - training=(self._is_training and not self._freeze_batchnorm), - name=layer_name + '_batchnorm')) - layers.append( - self._conv_hyperparams.build_activation_layer( - name=layer_name)) - self._coarse_feature_layers.append(layers) - self.built = True - def preprocess(self, resized_inputs): """Faster R-CNN Resnet V1 preprocessing. @@ -188,28 +134,6 @@ class FasterRCNNResnetV1FPNKerasFeatureExtractor( return resized_inputs - [[channel_means]] else: return resized_inputs - - # def _extract_proposal_features(self, preprocessed_inputs, scope=None): - # # TODO: doc string - # """""" - # preprocessed_inputs = shape_utils.check_min_image_dim( - # 129, preprocessed_inputs) - - # with tf.name_scope(scope): - # with tf.name_scope('ResnetV1FPN'): - # image_features = self.classification_backbone(preprocessed_inputs) - - # feature_block_list = [] - # for level in range(self._fpn_min_level, self._base_fpn_max_level + 1): - # feature_block_list.append('block{}'.format(level - 1)) - # feature_block_map = dict( - # list(zip(self._resnet_block_names, image_features))) - # fpn_input_image_features = [ - # (feature_block, feature_block_map[feature_block]) - # for feature_block in feature_block_list] - # fpn_features = self._fpn_features_generator(fpn_input_image_features) - - # return fpn_features def get_proposal_feature_extractor_model(self, name=None): """Returns a model that extracts first stage RPN features. @@ -262,32 +186,16 @@ class FasterRCNNResnetV1FPNKerasFeatureExtractor( for level in range(self._fpn_min_level, self._base_fpn_max_level + 1): feature_block_list.append('block{}'.format(level - 1)) feature_block_map = dict( - list(zip(self._resnet_block_names, image_features))) + list(zip(self._resnet_block_names, backbone_outputs))) fpn_input_image_features = [ (feature_block, feature_block_map[feature_block]) for feature_block in feature_block_list] fpn_features = self._fpn_features_generator(fpn_input_image_features) feature_extractor_model = tf.keras.models.Model( - inputs=self.full_resnet_v1_model.inputs, outputs=fpn_features) + inputs=full_resnet_v1_model.inputs, outputs=fpn_features) return feature_extractor_model - # def _extract_box_classifier_features(self, proposal_feature_maps, scope=None): - # with tf.name_scope(scope): - # with tf.name_scope('ResnetV1FPN'): - # feature_maps = [] - # for level in range(self._fpn_min_level, self._base_fpn_max_level + 1): - # feature_maps.append(proposal_feature_maps['top_down_block{}'.format(level-1)]) - # self.last_feature_map = proposal_feature_maps['top_down_block{}'.format( - # self._base_fpn_max_level - 1)] - - # for coarse_feature_layers in self._coarse_feature_layers: - # for layer in coarse_feature_layers: - # last_feature_map = layer(last_feature_map) - # feature_maps.append(self.last_feature_map) - - # return feature_maps - def get_box_classifier_feature_extractor_model(self, name=None): """Returns a model that extracts second stage box classifier features. @@ -309,8 +217,8 @@ class FasterRCNNResnetV1FPNKerasFeatureExtractor( with tf.name_scope(name): with tf.name_scope('ResnetV1FPN'): feature_extractor_model = tf.keras.models.Sequential([ - Dense(unit=1024, activation='ReLU'), - Dense(unit=1024, activation='ReLU') + tf.keras.layers.Dense(unit=1024, activation='ReLU'), + tf.keras.layers.Dense(unit=1024, activation='ReLU') ]) return feature_extractor_model @@ -346,10 +254,10 @@ class FasterRCNNResnet50FPNKerasFeatureExtractor( additional_layer_depth: See base class. override_base_feature_extractor_hyperparams: See base class. """ - super(FasterRCNNResnet50KerasFeatureExtractor, self).__init__( + super(FasterRCNNResnet50FPNKerasFeatureExtractor, self).__init__( is_training=is_training, first_stage_features_stride=first_stage_features_stride, - conv_hyperparams=conv_hyperparameters, + conv_hyperparams=conv_hyperparams, min_depth=min_depth, depth_multiplier=depth_multiplier, resnet_v1_base_model=resnet_v1.resnet_v1_50, @@ -391,10 +299,10 @@ class FasterRCNNResnet101FPNKerasFeatureExtractor( additional_layer_depth: See base class. override_base_feature_extractor_hyperparams: See base class. """ - super(FasterRCNNResnet50KerasFeatureExtractor, self).__init__( + super(FasterRCNNResnet101FPNKerasFeatureExtractor, self).__init__( is_training=is_training, first_stage_features_stride=first_stage_features_stride, - conv_hyperparams=conv_hyperparameters, + conv_hyperparams=conv_hyperparams, min_depth=min_depth, depth_multiplier=depth_multiplier, resnet_v1_base_model=resnet_v1.resnet_v1_101, @@ -438,10 +346,10 @@ class FasterRCNNResnet152FPNKerasFeatureExtractor( additional_layer_depth: See base class. override_base_feature_extractor_hyperparams: See base class. """ - super(FasterRCNNResnet50KerasFeatureExtractor, self).__init__( + super(FasterRCNNResnet152FPNKerasFeatureExtractor, self).__init__( is_training=is_training, first_stage_features_stride=first_stage_features_stride, - conv_hyperparams=conv_hyperparameters, + conv_hyperparams=conv_hyperparams, min_depth=min_depth, depth_multiplier=depth_multiplier, resnet_v1_base_model=resnet_v1.resnet_v1_152, -- GitLab From e899a6fa13dbe093aedb5aa651bc321424bf710d Mon Sep 17 00:00:00 2001 From: syiming Date: Fri, 19 Jun 2020 18:56:02 +0800 Subject: [PATCH 007/128] Init test file for faster_rcnn_fpn_keras_feature extractor --- .../faster_rcnn_resnet_v1_fpn_keras_feature_extractor_tf2_test.py | 0 1 file changed, 0 insertions(+), 0 deletions(-) create mode 100644 research/object_detection/models/faster_rcnn_resnet_v1_fpn_keras_feature_extractor_tf2_test.py diff --git a/research/object_detection/models/faster_rcnn_resnet_v1_fpn_keras_feature_extractor_tf2_test.py b/research/object_detection/models/faster_rcnn_resnet_v1_fpn_keras_feature_extractor_tf2_test.py new file mode 100644 index 000000000..e69de29bb -- GitLab From d26ef0e6c0ca69cebc745283042e12a6e1613990 Mon Sep 17 00:00:00 2001 From: syiming Date: Fri, 19 Jun 2020 22:28:42 +0800 Subject: [PATCH 008/128] add unit test for get_proposal_feature_extractor_model --- ...v1_fpn_keras_feature_extractor_tf2_test.py | 66 +++++++++++++++++++ 1 file changed, 66 insertions(+) diff --git a/research/object_detection/models/faster_rcnn_resnet_v1_fpn_keras_feature_extractor_tf2_test.py b/research/object_detection/models/faster_rcnn_resnet_v1_fpn_keras_feature_extractor_tf2_test.py index e69de29bb..f55fe6477 100644 --- a/research/object_detection/models/faster_rcnn_resnet_v1_fpn_keras_feature_extractor_tf2_test.py +++ b/research/object_detection/models/faster_rcnn_resnet_v1_fpn_keras_feature_extractor_tf2_test.py @@ -0,0 +1,66 @@ +# Copyright 2020 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Tests for models.faster_rcnn_resnet_v1_fpn_keras_feature_extractor.""" +import unittest +import tensorflow.compat.v1 as tf + +from google.protobuf import text_format + +from object_detection.builders import hyperparams_builder +from object_detection.models import faster_rcnn_resnet_v1_fpn_keras_feature_extractor as frcnn_res_fpn +from object_detection.utils import tf_version +from object_detection.protos import hyperparams_pb2 + + +@unittest.skipIf(tf_version.is_tf1(), 'Skipping TF2.X only test.') +class FasterRCNNResnetV1FPNKerasFeatureExtractorTest(tf.test.TestCase): + + def _build_conv_hyperparams(self): + conv_hyperparams = hyperparams_pb2.Hyperparams() + conv_hyperparams_text_proto = """ + regularizer { + l2_regularizer { + } + } + initializer { + truncated_normal_initializer { + } + } + """ + text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams) + return hyperparams_builder.KerasLayerHyperparams(conv_hyperparams) + + def _build_feature_extractor(self, architecture='resnet_v1_50'): + return frcnn_res_fpn.FasterRCNNResnet50FPNKerasFeatureExtractor( + is_training=False + , + conv_hyperparams=self._build_conv_hyperparams(), + first_stage_features_stride=16, + batch_norm_trainable=False, + weight_decay=0.0) + + def test_extract_proposal_features_returns_expected_size(self): + feature_extractor = self._build_feature_extractor() + preprocessed_inputs = tf.random_uniform( + [2, 160, 160, 3], maxval=255, dtype=tf.float32) + rpn_feature_maps = feature_extractor.get_proposal_feature_extractor_model( + name='TestScope')(preprocessed_inputs) + features_shapes = [tf.shape(rpn_feature_map) + for name, rpn_feature_map in rpn_feature_maps.items()] + + self.assertAllEqual(features_shapes[0].numpy(), [2, 20, 20, 256]) + self.assertAllEqual(features_shapes[1].numpy(), [2, 10, 10, 256]) + self.assertAllEqual(features_shapes[2].numpy(), [2, 5, 5, 256]) -- GitLab From 6a2c9932a359c489eff4a8cb35a8372eced7340e Mon Sep 17 00:00:00 2001 From: syiming Date: Fri, 19 Jun 2020 22:35:17 +0800 Subject: [PATCH 009/128] add unit test for get_proposal_feature_extractor_model smaller input size --- ...esnet_v1_fpn_keras_feature_extractor_tf2_test.py | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/research/object_detection/models/faster_rcnn_resnet_v1_fpn_keras_feature_extractor_tf2_test.py b/research/object_detection/models/faster_rcnn_resnet_v1_fpn_keras_feature_extractor_tf2_test.py index f55fe6477..3c2f6055e 100644 --- a/research/object_detection/models/faster_rcnn_resnet_v1_fpn_keras_feature_extractor_tf2_test.py +++ b/research/object_detection/models/faster_rcnn_resnet_v1_fpn_keras_feature_extractor_tf2_test.py @@ -53,6 +53,19 @@ class FasterRCNNResnetV1FPNKerasFeatureExtractorTest(tf.test.TestCase): weight_decay=0.0) def test_extract_proposal_features_returns_expected_size(self): + feature_extractor = self._build_feature_extractor() + preprocessed_inputs = tf.random_uniform( + [2, 320, 320, 3], maxval=255, dtype=tf.float32) + rpn_feature_maps = feature_extractor.get_proposal_feature_extractor_model( + name='TestScope')(preprocessed_inputs) + features_shapes = [tf.shape(rpn_feature_map) + for name, rpn_feature_map in rpn_feature_maps.items()] + + self.assertAllEqual(features_shapes[0].numpy(), [2, 40, 40, 256]) + self.assertAllEqual(features_shapes[1].numpy(), [2, 20, 20, 256]) + self.assertAllEqual(features_shapes[2].numpy(), [2, 10, 10, 256]) + + def test_extract_proposal_features_half_size_input(self): feature_extractor = self._build_feature_extractor() preprocessed_inputs = tf.random_uniform( [2, 160, 160, 3], maxval=255, dtype=tf.float32) -- GitLab From cadd3badf789bfe8786513dcb7374e65c87a2912 Mon Sep 17 00:00:00 2001 From: syiming Date: Fri, 19 Jun 2020 22:37:21 +0800 Subject: [PATCH 010/128] add unit test for get_proposal_feature_extractor_model incorrect dimension --- ...rcnn_resnet_v1_fpn_keras_feature_extractor_tf2_test.py | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/research/object_detection/models/faster_rcnn_resnet_v1_fpn_keras_feature_extractor_tf2_test.py b/research/object_detection/models/faster_rcnn_resnet_v1_fpn_keras_feature_extractor_tf2_test.py index 3c2f6055e..54c63ef73 100644 --- a/research/object_detection/models/faster_rcnn_resnet_v1_fpn_keras_feature_extractor_tf2_test.py +++ b/research/object_detection/models/faster_rcnn_resnet_v1_fpn_keras_feature_extractor_tf2_test.py @@ -77,3 +77,11 @@ class FasterRCNNResnetV1FPNKerasFeatureExtractorTest(tf.test.TestCase): self.assertAllEqual(features_shapes[0].numpy(), [2, 20, 20, 256]) self.assertAllEqual(features_shapes[1].numpy(), [2, 10, 10, 256]) self.assertAllEqual(features_shapes[2].numpy(), [2, 5, 5, 256]) + + def test_extract_proposal_features_dies_with_incorrect_rank_inputs(self): + feature_extractor = self._build_feature_extractor() + preprocessed_inputs = tf.random_uniform( + [224, 224, 3], maxval=255, dtype=tf.float32) + with self.assertRaises(tf.errors.InvalidArgumentError): + feature_extractor.get_proposal_feature_extractor_model( + name='TestScope')(preprocessed_inputs) \ No newline at end of file -- GitLab From c4969f7ca4f6c46f7e88f9c264061b8f020c0715 Mon Sep 17 00:00:00 2001 From: syiming Date: Sat, 20 Jun 2020 00:29:18 +0800 Subject: [PATCH 011/128] change keras model output format --- .../faster_rcnn_resnet_v1_fpn_keras_feature_extractor.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/research/object_detection/models/faster_rcnn_resnet_v1_fpn_keras_feature_extractor.py b/research/object_detection/models/faster_rcnn_resnet_v1_fpn_keras_feature_extractor.py index 03bfcdb15..f548d51c6 100644 --- a/research/object_detection/models/faster_rcnn_resnet_v1_fpn_keras_feature_extractor.py +++ b/research/object_detection/models/faster_rcnn_resnet_v1_fpn_keras_feature_extractor.py @@ -191,9 +191,10 @@ class FasterRCNNResnetV1FPNKerasFeatureExtractor( (feature_block, feature_block_map[feature_block]) for feature_block in feature_block_list] fpn_features = self._fpn_features_generator(fpn_input_image_features) + features_maps = [fpn_feature for _, fpn_feature in fpn_features.items()] feature_extractor_model = tf.keras.models.Model( - inputs=full_resnet_v1_model.inputs, outputs=fpn_features) + inputs=full_resnet_v1_model.inputs, outputs=features_maps) return feature_extractor_model def get_box_classifier_feature_extractor_model(self, name=None): -- GitLab From 51e82b3c80461d4f29708cabfdc70553a2d606b4 Mon Sep 17 00:00:00 2001 From: syiming Date: Sat, 20 Jun 2020 01:07:39 +0800 Subject: [PATCH 012/128] Update size on extract_proposal_features tests --- ...v1_fpn_keras_feature_extractor_tf2_test.py | 51 ++++++++++++++----- 1 file changed, 37 insertions(+), 14 deletions(-) diff --git a/research/object_detection/models/faster_rcnn_resnet_v1_fpn_keras_feature_extractor_tf2_test.py b/research/object_detection/models/faster_rcnn_resnet_v1_fpn_keras_feature_extractor_tf2_test.py index 54c63ef73..9b7c688bd 100644 --- a/research/object_detection/models/faster_rcnn_resnet_v1_fpn_keras_feature_extractor_tf2_test.py +++ b/research/object_detection/models/faster_rcnn_resnet_v1_fpn_keras_feature_extractor_tf2_test.py @@ -45,8 +45,7 @@ class FasterRCNNResnetV1FPNKerasFeatureExtractorTest(tf.test.TestCase): def _build_feature_extractor(self, architecture='resnet_v1_50'): return frcnn_res_fpn.FasterRCNNResnet50FPNKerasFeatureExtractor( - is_training=False - , + is_training=False, conv_hyperparams=self._build_conv_hyperparams(), first_stage_features_stride=16, batch_norm_trainable=False, @@ -55,28 +54,30 @@ class FasterRCNNResnetV1FPNKerasFeatureExtractorTest(tf.test.TestCase): def test_extract_proposal_features_returns_expected_size(self): feature_extractor = self._build_feature_extractor() preprocessed_inputs = tf.random_uniform( - [2, 320, 320, 3], maxval=255, dtype=tf.float32) + [2, 448, 448, 3], maxval=255, dtype=tf.float32) rpn_feature_maps = feature_extractor.get_proposal_feature_extractor_model( name='TestScope')(preprocessed_inputs) features_shapes = [tf.shape(rpn_feature_map) - for name, rpn_feature_map in rpn_feature_maps.items()] - - self.assertAllEqual(features_shapes[0].numpy(), [2, 40, 40, 256]) - self.assertAllEqual(features_shapes[1].numpy(), [2, 20, 20, 256]) - self.assertAllEqual(features_shapes[2].numpy(), [2, 10, 10, 256]) + for rpn_feature_map in rpn_feature_maps] + + self.assertAllEqual(features_shapes[0].numpy(), [2, 112, 112, 256]) + self.assertAllEqual(features_shapes[1].numpy(), [2, 56, 56, 256]) + self.assertAllEqual(features_shapes[2].numpy(), [2, 28, 28, 256]) + self.assertAllEqual(features_shapes[3].numpy(), [2, 14, 14, 256]) def test_extract_proposal_features_half_size_input(self): feature_extractor = self._build_feature_extractor() preprocessed_inputs = tf.random_uniform( - [2, 160, 160, 3], maxval=255, dtype=tf.float32) + [2, 224, 224, 3], maxval=255, dtype=tf.float32) rpn_feature_maps = feature_extractor.get_proposal_feature_extractor_model( name='TestScope')(preprocessed_inputs) features_shapes = [tf.shape(rpn_feature_map) - for name, rpn_feature_map in rpn_feature_maps.items()] + for rpn_feature_map in rpn_feature_maps] - self.assertAllEqual(features_shapes[0].numpy(), [2, 20, 20, 256]) - self.assertAllEqual(features_shapes[1].numpy(), [2, 10, 10, 256]) - self.assertAllEqual(features_shapes[2].numpy(), [2, 5, 5, 256]) + self.assertAllEqual(features_shapes[0].numpy(), [2, 56, 56, 256]) + self.assertAllEqual(features_shapes[1].numpy(), [2, 28, 28, 256]) + self.assertAllEqual(features_shapes[2].numpy(), [2, 14, 14, 256]) + self.assertAllEqual(features_shapes[3].numpy(), [2, 7, 7, 256]) def test_extract_proposal_features_dies_with_incorrect_rank_inputs(self): feature_extractor = self._build_feature_extractor() @@ -84,4 +85,26 @@ class FasterRCNNResnetV1FPNKerasFeatureExtractorTest(tf.test.TestCase): [224, 224, 3], maxval=255, dtype=tf.float32) with self.assertRaises(tf.errors.InvalidArgumentError): feature_extractor.get_proposal_feature_extractor_model( - name='TestScope')(preprocessed_inputs) \ No newline at end of file + name='TestScope')(preprocessed_inputs) + + # def test_extract_box_classifier_features_returns_expected_size(self): + # feature_extractor = self._build_feature_extractor() + # proposal_feature_maps = tf.random_uniform( + # [3, 7, 7, 1024], maxval=255, dtype=tf.float32) + # model = feature_extractor.get_box_classifier_feature_extractor_model( + # name='TestScope') + # proposal_classifier_features = ( + # model(proposal_feature_maps)) + # features_shape = tf.shape(proposal_classifier_features) + # # Note: due to a slight mismatch in slim and keras resnet definitions + # # the output shape of the box classifier is slightly different compared to + # # that of the slim implementation. The keras version is more `canonical` + # # in that it more accurately reflects the original authors' implementation. + # # TODO(jonathanhuang): make the output shape match that of the slim + # # implementation by using atrous convolutions. + # self.assertAllEqual(features_shape.numpy(), [3, 4, 4, 2048]) + + +if __name__ == '__main__': + tf.enable_v2_behavior() + tf.test.main() \ No newline at end of file -- GitLab From 7a1475141a004f4f525fb5dafdd5ecaec4cc0388 Mon Sep 17 00:00:00 2001 From: syiming Date: Sat, 20 Jun 2020 01:19:41 +0800 Subject: [PATCH 013/128] change default fpn_min_level to 2 --- .../faster_rcnn_resnet_v1_fpn_keras_feature_extractor.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/research/object_detection/models/faster_rcnn_resnet_v1_fpn_keras_feature_extractor.py b/research/object_detection/models/faster_rcnn_resnet_v1_fpn_keras_feature_extractor.py index f548d51c6..303855658 100644 --- a/research/object_detection/models/faster_rcnn_resnet_v1_fpn_keras_feature_extractor.py +++ b/research/object_detection/models/faster_rcnn_resnet_v1_fpn_keras_feature_extractor.py @@ -48,7 +48,7 @@ class FasterRCNNResnetV1FPNKerasFeatureExtractor( depth_multiplier, batch_norm_trainable=False, weight_decay=0.0, - fpn_min_level=3, + fpn_min_level=2, fpn_max_level=7, additional_layer_depth=256, override_base_feature_extractor_hyperparams=False): @@ -236,7 +236,7 @@ class FasterRCNNResnet50FPNKerasFeatureExtractor( depth_multiplier=1, batch_norm_trainable=False, weight_decay=0.0, - fpn_min_level=3, + fpn_min_level=2, fpn_max_level=7, additional_layer_depth=256, override_base_feature_extractor_hyperparams=False): @@ -281,7 +281,7 @@ class FasterRCNNResnet101FPNKerasFeatureExtractor( depth_multiplier=1, batch_norm_trainable=False, weight_decay=0.0, - fpn_min_level=3, + fpn_min_level=2, fpn_max_level=7, additional_layer_depth=256, override_base_feature_extractor_hyperparams=False): @@ -328,7 +328,7 @@ class FasterRCNNResnet152FPNKerasFeatureExtractor( depth_multiplier=1, batch_norm_trainable=False, weight_decay=0.0, - fpn_min_level=3, + fpn_min_level=2, fpn_max_level=7, additional_layer_depth=256, override_base_feature_extractor_hyperparams=False): -- GitLab From ef68a82699c7b933bc0976a8e4fabc742451e23c Mon Sep 17 00:00:00 2001 From: syiming Date: Sat, 20 Jun 2020 02:03:06 +0800 Subject: [PATCH 014/128] fix error in get_box_classifier_feature_extractor_model --- .../faster_rcnn_resnet_v1_fpn_keras_feature_extractor.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/research/object_detection/models/faster_rcnn_resnet_v1_fpn_keras_feature_extractor.py b/research/object_detection/models/faster_rcnn_resnet_v1_fpn_keras_feature_extractor.py index 303855658..63905145e 100644 --- a/research/object_detection/models/faster_rcnn_resnet_v1_fpn_keras_feature_extractor.py +++ b/research/object_detection/models/faster_rcnn_resnet_v1_fpn_keras_feature_extractor.py @@ -218,8 +218,9 @@ class FasterRCNNResnetV1FPNKerasFeatureExtractor( with tf.name_scope(name): with tf.name_scope('ResnetV1FPN'): feature_extractor_model = tf.keras.models.Sequential([ - tf.keras.layers.Dense(unit=1024, activation='ReLU'), - tf.keras.layers.Dense(unit=1024, activation='ReLU') + tf.keras.layers.Flatten(), + tf.keras.layers.Dense(units=1024, activation='relu'), + tf.keras.layers.Dense(units=1024, activation='relu') ]) return feature_extractor_model -- GitLab From 8db480c918ede02f1235f28252bdabcaee876c79 Mon Sep 17 00:00:00 2001 From: syiming Date: Sat, 20 Jun 2020 02:03:24 +0800 Subject: [PATCH 015/128] add shape test for get_box_classifier_feature_extractor_model --- ...v1_fpn_keras_feature_extractor_tf2_test.py | 27 ++++++++----------- 1 file changed, 11 insertions(+), 16 deletions(-) diff --git a/research/object_detection/models/faster_rcnn_resnet_v1_fpn_keras_feature_extractor_tf2_test.py b/research/object_detection/models/faster_rcnn_resnet_v1_fpn_keras_feature_extractor_tf2_test.py index 9b7c688bd..27e32fceb 100644 --- a/research/object_detection/models/faster_rcnn_resnet_v1_fpn_keras_feature_extractor_tf2_test.py +++ b/research/object_detection/models/faster_rcnn_resnet_v1_fpn_keras_feature_extractor_tf2_test.py @@ -87,22 +87,17 @@ class FasterRCNNResnetV1FPNKerasFeatureExtractorTest(tf.test.TestCase): feature_extractor.get_proposal_feature_extractor_model( name='TestScope')(preprocessed_inputs) - # def test_extract_box_classifier_features_returns_expected_size(self): - # feature_extractor = self._build_feature_extractor() - # proposal_feature_maps = tf.random_uniform( - # [3, 7, 7, 1024], maxval=255, dtype=tf.float32) - # model = feature_extractor.get_box_classifier_feature_extractor_model( - # name='TestScope') - # proposal_classifier_features = ( - # model(proposal_feature_maps)) - # features_shape = tf.shape(proposal_classifier_features) - # # Note: due to a slight mismatch in slim and keras resnet definitions - # # the output shape of the box classifier is slightly different compared to - # # that of the slim implementation. The keras version is more `canonical` - # # in that it more accurately reflects the original authors' implementation. - # # TODO(jonathanhuang): make the output shape match that of the slim - # # implementation by using atrous convolutions. - # self.assertAllEqual(features_shape.numpy(), [3, 4, 4, 2048]) + def test_extract_box_classifier_features_returns_expected_size(self): + feature_extractor = self._build_feature_extractor() + proposal_feature_maps = tf.random_uniform( + [3, 7, 7, 1024], maxval=255, dtype=tf.float32) + model = feature_extractor.get_box_classifier_feature_extractor_model( + name='TestScope') + proposal_classifier_features = ( + model(proposal_feature_maps)) + features_shape = tf.shape(proposal_classifier_features) + + self.assertAllEqual(features_shape.numpy(), [3, 1024]) if __name__ == '__main__': -- GitLab From 7140eede200e06d9f55e0a788d16a7fbea11686b Mon Sep 17 00:00:00 2001 From: syiming Date: Mon, 22 Jun 2020 14:07:21 +0800 Subject: [PATCH 016/128] fix coding style --- ...et_v1_fpn_keras_feature_extractor_tf2_test.py | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/research/object_detection/models/faster_rcnn_resnet_v1_fpn_keras_feature_extractor_tf2_test.py b/research/object_detection/models/faster_rcnn_resnet_v1_fpn_keras_feature_extractor_tf2_test.py index 27e32fceb..b90d4260f 100644 --- a/research/object_detection/models/faster_rcnn_resnet_v1_fpn_keras_feature_extractor_tf2_test.py +++ b/research/object_detection/models/faster_rcnn_resnet_v1_fpn_keras_feature_extractor_tf2_test.py @@ -43,22 +43,22 @@ class FasterRCNNResnetV1FPNKerasFeatureExtractorTest(tf.test.TestCase): text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams) return hyperparams_builder.KerasLayerHyperparams(conv_hyperparams) - def _build_feature_extractor(self, architecture='resnet_v1_50'): + def _build_feature_extractor(self): return frcnn_res_fpn.FasterRCNNResnet50FPNKerasFeatureExtractor( is_training=False, conv_hyperparams=self._build_conv_hyperparams(), first_stage_features_stride=16, batch_norm_trainable=False, weight_decay=0.0) - + def test_extract_proposal_features_returns_expected_size(self): feature_extractor = self._build_feature_extractor() preprocessed_inputs = tf.random_uniform( [2, 448, 448, 3], maxval=255, dtype=tf.float32) rpn_feature_maps = feature_extractor.get_proposal_feature_extractor_model( name='TestScope')(preprocessed_inputs) - features_shapes = [tf.shape(rpn_feature_map) - for rpn_feature_map in rpn_feature_maps] + features_shapes = [tf.shape(rpn_feature_map) + for rpn_feature_map in rpn_feature_maps] self.assertAllEqual(features_shapes[0].numpy(), [2, 112, 112, 256]) self.assertAllEqual(features_shapes[1].numpy(), [2, 56, 56, 256]) @@ -71,9 +71,9 @@ class FasterRCNNResnetV1FPNKerasFeatureExtractorTest(tf.test.TestCase): [2, 224, 224, 3], maxval=255, dtype=tf.float32) rpn_feature_maps = feature_extractor.get_proposal_feature_extractor_model( name='TestScope')(preprocessed_inputs) - features_shapes = [tf.shape(rpn_feature_map) - for rpn_feature_map in rpn_feature_maps] - + features_shapes = [tf.shape(rpn_feature_map) + for rpn_feature_map in rpn_feature_maps] + self.assertAllEqual(features_shapes[0].numpy(), [2, 56, 56, 256]) self.assertAllEqual(features_shapes[1].numpy(), [2, 28, 28, 256]) self.assertAllEqual(features_shapes[2].numpy(), [2, 14, 14, 256]) @@ -102,4 +102,4 @@ class FasterRCNNResnetV1FPNKerasFeatureExtractorTest(tf.test.TestCase): if __name__ == '__main__': tf.enable_v2_behavior() - tf.test.main() \ No newline at end of file + tf.test.main() -- GitLab From 1cf70ed77c0cbd0b8f7114d86337474ed199f877 Mon Sep 17 00:00:00 2001 From: syiming Date: Mon, 22 Jun 2020 14:28:47 +0800 Subject: [PATCH 017/128] Fix coding style for feature extractor. --- ...n_resnet_v1_fpn_keras_feature_extractor.py | 63 +++++++++---------- 1 file changed, 31 insertions(+), 32 deletions(-) diff --git a/research/object_detection/models/faster_rcnn_resnet_v1_fpn_keras_feature_extractor.py b/research/object_detection/models/faster_rcnn_resnet_v1_fpn_keras_feature_extractor.py index 63905145e..d900731b9 100644 --- a/research/object_detection/models/faster_rcnn_resnet_v1_fpn_keras_feature_extractor.py +++ b/research/object_detection/models/faster_rcnn_resnet_v1_fpn_keras_feature_extractor.py @@ -20,9 +20,7 @@ import tensorflow.compat.v1 as tf from object_detection.meta_architectures import faster_rcnn_meta_arch from object_detection.models import feature_map_generators from object_detection.models.keras_models import resnet_v1 -from object_detection.models.keras_models import model_utils -from object_detection.utils import ops -from object_detection.utils import shape_utils + _RESNET_MODEL_OUTPUT_LAYERS = { 'resnet_v1_50': ['conv2_block3_out', 'conv3_block4_out', @@ -35,7 +33,7 @@ _RESNET_MODEL_OUTPUT_LAYERS = { class FasterRCNNResnetV1FPNKerasFeatureExtractor( - faster_rcnn_meta_arch.FasterRCNNFeatureExtractor): + faster_rcnn_meta_arch.FasterRCNNKerasFeatureExtractor): """Faster RCNN Feature Extractor using Keras-based Resnet V1 FPN features.""" def __init__(self, @@ -52,30 +50,24 @@ class FasterRCNNResnetV1FPNKerasFeatureExtractor( fpn_max_level=7, additional_layer_depth=256, override_base_feature_extractor_hyperparams=False): - # FIXME: fix doc string for fpn min level and fpn max level """Constructor. Args: is_training: See base class. + resnet_v1_base_model: base resnet v1 network to use. One of + the resnet_v1.resnet_v1_{50,101,152} models. + resnet_v1_base_model_name: model name under which to construct resnet v1. first_stage_features_stride: See base class. - conv_hyperparameters: a `hyperparams_builder.KerasLayerHyperparams` object containing convolution hyperparameters for the layers added on top of the base feature extractor. min_depth: Minimum number of filters in the convolutional layers. depth_multiplier: The depth multiplier to modify the number of filters in the convolutional layers. - resnet_v1_base_model: base resnet v1 network to use. One of - the resnet_v1.resnet_v1_{50,101,152} models. - resnet_v1_base_model_name: model name under which to construct resnet v1. - batch_norm_trainable: See base class. weight_decay: See base class. - fpn_min_level: the highest resolution feature map to use in FPN. The valid - values are {2, 3, 4, 5} which map to MobileNet v1 layers - {Conv2d_3_pointwise, Conv2d_5_pointwise, Conv2d_11_pointwise, - Conv2d_13_pointwise}, respectively. + values are {2, 3, 4, 5} which map to Resnet v1 layers. fpn_max_level: the smallest resolution feature map to construct or use in FPN. FPN constructions uses features maps starting from fpn_min_level upto the fpn_max_level. In the case that there are not enough feature @@ -92,22 +84,24 @@ class FasterRCNNResnetV1FPNKerasFeatureExtractor( """ if first_stage_features_stride != 8 and first_stage_features_stride != 16: raise ValueError('`first_stage_features_stride` must be 8 or 16.') + super(FasterRCNNResnetV1FPNKerasFeatureExtractor, self).__init__( - is_training=is_training, - first_stage_features_stride=first_stage_features_stride, - batch_norm_trainable=batch_norm_trainable, - weight_decay=weight_decay) + is_training=is_training, + first_stage_features_stride=first_stage_features_stride, + batch_norm_trainable=batch_norm_trainable, + weight_decay=weight_decay) + + self._resnet_v1_base_model = resnet_v1_base_model + self._resnet_v1_base_model_name = resnet_v1_base_model_name self._conv_hyperparams = conv_hyperparams self._min_depth = min_depth self._depth_multiplier = depth_multiplier + self._fpn_min_level = fpn_min_level + self._fpn_max_level = fpn_max_level self._additional_layer_depth = additional_layer_depth self._freeze_batchnorm = (not batch_norm_trainable) self._override_base_feature_extractor_hyperparams = \ override_base_feature_extractor_hyperparams - self._fpn_min_level = fpn_min_level - self._fpn_max_level = fpn_max_level - self._resnet_v1_base_model = resnet_v1_base_model - self._resnet_v1_base_model_name = resnet_v1_base_model_name self._resnet_block_names = ['block1', 'block2', 'block3', 'block4'] self.classification_backbone = None self._fpn_features_generator = None @@ -134,11 +128,11 @@ class FasterRCNNResnetV1FPNKerasFeatureExtractor( return resized_inputs - [[channel_means]] else: return resized_inputs - + def get_proposal_feature_extractor_model(self, name=None): """Returns a model that extracts first stage RPN features. - Extracts features using the first half of the Resnet v1 network. + Extracts features using the Resnet v1 FPN network. Args: name: A scope name to construct all variables within. @@ -147,6 +141,9 @@ class FasterRCNNResnetV1FPNKerasFeatureExtractor( A Keras model that takes preprocessed_inputs: A [batch, height, width, channels] float32 tensor representing a batch of images. + + And returns rpn_feature_map: + A list of tensors with shape [batch, height, width, depth] """ with tf.name_scope(name): with tf.name_scope('ResnetV1FPN'): @@ -162,7 +159,7 @@ class FasterRCNNResnetV1FPNKerasFeatureExtractor( include_top=False) output_layers = _RESNET_MODEL_OUTPUT_LAYERS[self._resnet_v1_base_model_name] outputs = [full_resnet_v1_model.get_layer(output_layer_name).output - for output_layer_name in output_layers] + for output_layer_name in output_layers] self.classification_backbone = tf.keras.Model( inputs=full_resnet_v1_model.inputs, outputs=outputs) @@ -181,7 +178,7 @@ class FasterRCNNResnetV1FPNKerasFeatureExtractor( conv_hyperparams=self._conv_hyperparams, freeze_batchnorm=self._freeze_batchnorm, name='FeatureMaps')) - + feature_block_list = [] for level in range(self._fpn_min_level, self._base_fpn_max_level + 1): feature_block_list.append('block{}'.format(level - 1)) @@ -200,7 +197,7 @@ class FasterRCNNResnetV1FPNKerasFeatureExtractor( def get_box_classifier_feature_extractor_model(self, name=None): """Returns a model that extracts second stage box classifier features. - TODO: doc + Construct two fully connected layer to extract the box classifier features. Args: name: A scope name to construct all variables within. @@ -210,17 +207,18 @@ class FasterRCNNResnetV1FPNKerasFeatureExtractor( A 4-D float tensor with shape [batch_size * self.max_num_proposals, crop_height, crop_width, depth] representing the feature map cropped to each proposal. + And returns proposal_classifier_features: A 4-D float tensor with shape - [batch_size * self.max_num_proposals, height, width, depth] + [batch_size * self.max_num_proposals, 1024] representing box classifier features for each proposal. """ with tf.name_scope(name): with tf.name_scope('ResnetV1FPN'): feature_extractor_model = tf.keras.models.Sequential([ - tf.keras.layers.Flatten(), - tf.keras.layers.Dense(units=1024, activation='relu'), - tf.keras.layers.Dense(units=1024, activation='relu') + tf.keras.layers.Flatten(), + tf.keras.layers.Dense(units=1024, activation='relu'), + tf.keras.layers.Dense(units=1024, activation='relu') ]) return feature_extractor_model @@ -228,7 +226,7 @@ class FasterRCNNResnetV1FPNKerasFeatureExtractor( class FasterRCNNResnet50FPNKerasFeatureExtractor( FasterRCNNResnetV1FPNKerasFeatureExtractor): """Faster RCNN with Resnet50 FPN feature extractor implementation.""" - + def __init__(self, is_training, first_stage_features_stride=16, @@ -271,6 +269,7 @@ class FasterRCNNResnet50FPNKerasFeatureExtractor( additional_layer_depth=additional_layer_depth, override_base_feature_extractor_hyperparams=override_base_feature_extractor_hyperparams) + class FasterRCNNResnet101FPNKerasFeatureExtractor( FasterRCNNResnetV1FPNKerasFeatureExtractor): """Faster RCNN with Resnet101 FPN feature extractor implementation.""" -- GitLab From 76516b2e3cfc370373a43b4c744744afb6f71dca Mon Sep 17 00:00:00 2001 From: syiming Date: Mon, 22 Jun 2020 14:56:38 +0800 Subject: [PATCH 018/128] add faster rcnn resnet v1 fpn feature extractor to model builder --- research/object_detection/builders/model_builder.py | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/research/object_detection/builders/model_builder.py b/research/object_detection/builders/model_builder.py index cdb17e88b..6d95c7118 100644 --- a/research/object_detection/builders/model_builder.py +++ b/research/object_detection/builders/model_builder.py @@ -52,6 +52,7 @@ if tf_version.is_tf2(): from object_detection.models import faster_rcnn_inception_resnet_v2_keras_feature_extractor as frcnn_inc_res_keras from object_detection.models import faster_rcnn_resnet_keras_feature_extractor as frcnn_resnet_keras from object_detection.models import ssd_resnet_v1_fpn_keras_feature_extractor as ssd_resnet_v1_fpn_keras + from object_detection.models import faster_rcnn_resnet_v1_fpn_keras_feature_extractor as frcnn_resnet_fpn_keras from object_detection.models.ssd_mobilenet_v1_fpn_keras_feature_extractor import SSDMobileNetV1FpnKerasFeatureExtractor from object_detection.models.ssd_mobilenet_v1_keras_feature_extractor import SSDMobileNetV1KerasFeatureExtractor from object_detection.models.ssd_mobilenet_v2_fpn_keras_feature_extractor import SSDMobileNetV2FpnKerasFeatureExtractor @@ -109,6 +110,12 @@ if tf_version.is_tf2(): frcnn_resnet_keras.FasterRCNNResnet152KerasFeatureExtractor, 'faster_rcnn_inception_resnet_v2_keras': frcnn_inc_res_keras.FasterRCNNInceptionResnetV2KerasFeatureExtractor, + 'fasret_rcnn_resnet50_fpn_keras': + frcnn_resnet_fpn_keras.FasterRCNNResnet50FPNKerasFeatureExtractor, + 'fasret_rcnn_resnet101_fpn_keras': + frcnn_resnet_fpn_keras.FasterRCNNResnet101FPNKerasFeatureExtractor, + 'fasret_rcnn_resnet152_fpn_keras': + frcnn_resnet_fpn_keras.FasterRCNNResnet152FPNKerasFeatureExtractor, } CENTER_NET_EXTRACTOR_FUNCTION_MAP = { -- GitLab From 248dbe7cea53783bbb3c9a6de7c45ddce26d187d Mon Sep 17 00:00:00 2001 From: syiming Date: Mon, 29 Jun 2020 15:34:04 +0800 Subject: [PATCH 019/128] rename fpn feature extractors --- .../builders/model_builder.py | 6 ++--- ...n_resnet_v1_fpn_keras_feature_extractor.py | 22 +++++++++---------- 2 files changed, 14 insertions(+), 14 deletions(-) diff --git a/research/object_detection/builders/model_builder.py b/research/object_detection/builders/model_builder.py index 6d95c7118..f3c9ad53d 100644 --- a/research/object_detection/builders/model_builder.py +++ b/research/object_detection/builders/model_builder.py @@ -111,11 +111,11 @@ if tf_version.is_tf2(): 'faster_rcnn_inception_resnet_v2_keras': frcnn_inc_res_keras.FasterRCNNInceptionResnetV2KerasFeatureExtractor, 'fasret_rcnn_resnet50_fpn_keras': - frcnn_resnet_fpn_keras.FasterRCNNResnet50FPNKerasFeatureExtractor, + frcnn_resnet_fpn_keras.FasterRCNNResnet50FpnKerasFeatureExtractor, 'fasret_rcnn_resnet101_fpn_keras': - frcnn_resnet_fpn_keras.FasterRCNNResnet101FPNKerasFeatureExtractor, + frcnn_resnet_fpn_keras.FasterRCNNResnet101FpnKerasFeatureExtractor, 'fasret_rcnn_resnet152_fpn_keras': - frcnn_resnet_fpn_keras.FasterRCNNResnet152FPNKerasFeatureExtractor, + frcnn_resnet_fpn_keras.FasterRCNNResnet152FpnKerasFeatureExtractor, } CENTER_NET_EXTRACTOR_FUNCTION_MAP = { diff --git a/research/object_detection/models/faster_rcnn_resnet_v1_fpn_keras_feature_extractor.py b/research/object_detection/models/faster_rcnn_resnet_v1_fpn_keras_feature_extractor.py index d900731b9..d620d1a29 100644 --- a/research/object_detection/models/faster_rcnn_resnet_v1_fpn_keras_feature_extractor.py +++ b/research/object_detection/models/faster_rcnn_resnet_v1_fpn_keras_feature_extractor.py @@ -32,7 +32,7 @@ _RESNET_MODEL_OUTPUT_LAYERS = { } -class FasterRCNNResnetV1FPNKerasFeatureExtractor( +class FasterRCNNResnetV1FpnKerasFeatureExtractor( faster_rcnn_meta_arch.FasterRCNNKerasFeatureExtractor): """Faster RCNN Feature Extractor using Keras-based Resnet V1 FPN features.""" @@ -85,7 +85,7 @@ class FasterRCNNResnetV1FPNKerasFeatureExtractor( if first_stage_features_stride != 8 and first_stage_features_stride != 16: raise ValueError('`first_stage_features_stride` must be 8 or 16.') - super(FasterRCNNResnetV1FPNKerasFeatureExtractor, self).__init__( + super(FasterRCNNResnetV1FpnKerasFeatureExtractor, self).__init__( is_training=is_training, first_stage_features_stride=first_stage_features_stride, batch_norm_trainable=batch_norm_trainable, @@ -223,8 +223,8 @@ class FasterRCNNResnetV1FPNKerasFeatureExtractor( return feature_extractor_model -class FasterRCNNResnet50FPNKerasFeatureExtractor( - FasterRCNNResnetV1FPNKerasFeatureExtractor): +class FasterRCNNResnet50FpnKerasFeatureExtractor( + FasterRCNNResnetV1FpnKerasFeatureExtractor): """Faster RCNN with Resnet50 FPN feature extractor implementation.""" def __init__(self, @@ -254,7 +254,7 @@ class FasterRCNNResnet50FPNKerasFeatureExtractor( additional_layer_depth: See base class. override_base_feature_extractor_hyperparams: See base class. """ - super(FasterRCNNResnet50FPNKerasFeatureExtractor, self).__init__( + super(FasterRCNNResnet50FpnKerasFeatureExtractor, self).__init__( is_training=is_training, first_stage_features_stride=first_stage_features_stride, conv_hyperparams=conv_hyperparams, @@ -270,8 +270,8 @@ class FasterRCNNResnet50FPNKerasFeatureExtractor( override_base_feature_extractor_hyperparams=override_base_feature_extractor_hyperparams) -class FasterRCNNResnet101FPNKerasFeatureExtractor( - FasterRCNNResnetV1FPNKerasFeatureExtractor): +class FasterRCNNResnet101FpnKerasFeatureExtractor( + FasterRCNNResnetV1FpnKerasFeatureExtractor): """Faster RCNN with Resnet101 FPN feature extractor implementation.""" def __init__(self, is_training, @@ -300,7 +300,7 @@ class FasterRCNNResnet101FPNKerasFeatureExtractor( additional_layer_depth: See base class. override_base_feature_extractor_hyperparams: See base class. """ - super(FasterRCNNResnet101FPNKerasFeatureExtractor, self).__init__( + super(FasterRCNNResnet101FpnKerasFeatureExtractor, self).__init__( is_training=is_training, first_stage_features_stride=first_stage_features_stride, conv_hyperparams=conv_hyperparams, @@ -316,8 +316,8 @@ class FasterRCNNResnet101FPNKerasFeatureExtractor( override_base_feature_extractor_hyperparams=override_base_feature_extractor_hyperparams) -class FasterRCNNResnet152FPNKerasFeatureExtractor( - FasterRCNNResnetV1FPNKerasFeatureExtractor): +class FasterRCNNResnet152FpnKerasFeatureExtractor( + FasterRCNNResnetV1FpnKerasFeatureExtractor): """Faster RCNN with Resnet152 FPN feature extractor implementation.""" def __init__(self, @@ -347,7 +347,7 @@ class FasterRCNNResnet152FPNKerasFeatureExtractor( additional_layer_depth: See base class. override_base_feature_extractor_hyperparams: See base class. """ - super(FasterRCNNResnet152FPNKerasFeatureExtractor, self).__init__( + super(FasterRCNNResnet152FpnKerasFeatureExtractor, self).__init__( is_training=is_training, first_stage_features_stride=first_stage_features_stride, conv_hyperparams=conv_hyperparams, -- GitLab From e4244a469ab0c9be3b0458483c2cee2da8c55e92 Mon Sep 17 00:00:00 2001 From: syiming Date: Mon, 29 Jun 2020 15:39:14 +0800 Subject: [PATCH 020/128] modify doc string for Feature extractor: drop word "implementation" --- .../faster_rcnn_resnet_v1_fpn_keras_feature_extractor.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/research/object_detection/models/faster_rcnn_resnet_v1_fpn_keras_feature_extractor.py b/research/object_detection/models/faster_rcnn_resnet_v1_fpn_keras_feature_extractor.py index d620d1a29..f484cd88e 100644 --- a/research/object_detection/models/faster_rcnn_resnet_v1_fpn_keras_feature_extractor.py +++ b/research/object_detection/models/faster_rcnn_resnet_v1_fpn_keras_feature_extractor.py @@ -225,7 +225,7 @@ class FasterRCNNResnetV1FpnKerasFeatureExtractor( class FasterRCNNResnet50FpnKerasFeatureExtractor( FasterRCNNResnetV1FpnKerasFeatureExtractor): - """Faster RCNN with Resnet50 FPN feature extractor implementation.""" + """Faster RCNN with Resnet50 FPN feature extractor.""" def __init__(self, is_training, @@ -272,7 +272,7 @@ class FasterRCNNResnet50FpnKerasFeatureExtractor( class FasterRCNNResnet101FpnKerasFeatureExtractor( FasterRCNNResnetV1FpnKerasFeatureExtractor): - """Faster RCNN with Resnet101 FPN feature extractor implementation.""" + """Faster RCNN with Resnet101 FPN feature extractor.""" def __init__(self, is_training, first_stage_features_stride=16, @@ -318,7 +318,7 @@ class FasterRCNNResnet101FpnKerasFeatureExtractor( class FasterRCNNResnet152FpnKerasFeatureExtractor( FasterRCNNResnetV1FpnKerasFeatureExtractor): - """Faster RCNN with Resnet152 FPN feature extractor implementation.""" + """Faster RCNN with Resnet152 FPN feature extractor.""" def __init__(self, is_training, -- GitLab From 2d19f98e75d58f25c311bb4b62e9956f5ac0d092 Mon Sep 17 00:00:00 2001 From: syiming Date: Mon, 29 Jun 2020 15:42:20 +0800 Subject: [PATCH 021/128] add todo in get_box_classifier_feature_extractor_model --- .../models/faster_rcnn_resnet_v1_fpn_keras_feature_extractor.py | 1 + 1 file changed, 1 insertion(+) diff --git a/research/object_detection/models/faster_rcnn_resnet_v1_fpn_keras_feature_extractor.py b/research/object_detection/models/faster_rcnn_resnet_v1_fpn_keras_feature_extractor.py index f484cd88e..94927a016 100644 --- a/research/object_detection/models/faster_rcnn_resnet_v1_fpn_keras_feature_extractor.py +++ b/research/object_detection/models/faster_rcnn_resnet_v1_fpn_keras_feature_extractor.py @@ -215,6 +215,7 @@ class FasterRCNNResnetV1FpnKerasFeatureExtractor( """ with tf.name_scope(name): with tf.name_scope('ResnetV1FPN'): + # TODO: Add a batchnorm layer between two fc layers. feature_extractor_model = tf.keras.models.Sequential([ tf.keras.layers.Flatten(), tf.keras.layers.Dense(units=1024, activation='relu'), -- GitLab From 9dcd43dd244aa2e7c922b649e7daaf7298a2580c Mon Sep 17 00:00:00 2001 From: syiming Date: Mon, 29 Jun 2020 15:44:41 +0800 Subject: [PATCH 022/128] remove enable_v2_behavior --- ...er_rcnn_resnet_v1_fpn_keras_feature_extractor_tf2_test.py | 5 ----- 1 file changed, 5 deletions(-) diff --git a/research/object_detection/models/faster_rcnn_resnet_v1_fpn_keras_feature_extractor_tf2_test.py b/research/object_detection/models/faster_rcnn_resnet_v1_fpn_keras_feature_extractor_tf2_test.py index b90d4260f..2ccb963c8 100644 --- a/research/object_detection/models/faster_rcnn_resnet_v1_fpn_keras_feature_extractor_tf2_test.py +++ b/research/object_detection/models/faster_rcnn_resnet_v1_fpn_keras_feature_extractor_tf2_test.py @@ -98,8 +98,3 @@ class FasterRCNNResnetV1FPNKerasFeatureExtractorTest(tf.test.TestCase): features_shape = tf.shape(proposal_classifier_features) self.assertAllEqual(features_shape.numpy(), [3, 1024]) - - -if __name__ == '__main__': - tf.enable_v2_behavior() - tf.test.main() -- GitLab From aef0470534d618114207d39686d8e88a980768b7 Mon Sep 17 00:00:00 2001 From: syiming Date: Mon, 29 Jun 2020 15:47:41 +0800 Subject: [PATCH 023/128] drop unittest test_extract_proposal_features_dies_with_incorrect_rank_inputs --- ...rcnn_resnet_v1_fpn_keras_feature_extractor_tf2_test.py | 8 -------- 1 file changed, 8 deletions(-) diff --git a/research/object_detection/models/faster_rcnn_resnet_v1_fpn_keras_feature_extractor_tf2_test.py b/research/object_detection/models/faster_rcnn_resnet_v1_fpn_keras_feature_extractor_tf2_test.py index 2ccb963c8..9ad48757e 100644 --- a/research/object_detection/models/faster_rcnn_resnet_v1_fpn_keras_feature_extractor_tf2_test.py +++ b/research/object_detection/models/faster_rcnn_resnet_v1_fpn_keras_feature_extractor_tf2_test.py @@ -79,14 +79,6 @@ class FasterRCNNResnetV1FPNKerasFeatureExtractorTest(tf.test.TestCase): self.assertAllEqual(features_shapes[2].numpy(), [2, 14, 14, 256]) self.assertAllEqual(features_shapes[3].numpy(), [2, 7, 7, 256]) - def test_extract_proposal_features_dies_with_incorrect_rank_inputs(self): - feature_extractor = self._build_feature_extractor() - preprocessed_inputs = tf.random_uniform( - [224, 224, 3], maxval=255, dtype=tf.float32) - with self.assertRaises(tf.errors.InvalidArgumentError): - feature_extractor.get_proposal_feature_extractor_model( - name='TestScope')(preprocessed_inputs) - def test_extract_box_classifier_features_returns_expected_size(self): feature_extractor = self._build_feature_extractor() proposal_feature_maps = tf.random_uniform( -- GitLab From 1538da90525a1c194b973e6ca8a206a28b66f71e Mon Sep 17 00:00:00 2001 From: syiming Date: Mon, 29 Jun 2020 16:22:32 +0800 Subject: [PATCH 024/128] modify feature extractor name --- ...ter_rcnn_resnet_v1_fpn_keras_feature_extractor_tf2_test.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/research/object_detection/models/faster_rcnn_resnet_v1_fpn_keras_feature_extractor_tf2_test.py b/research/object_detection/models/faster_rcnn_resnet_v1_fpn_keras_feature_extractor_tf2_test.py index 9ad48757e..c0b40c176 100644 --- a/research/object_detection/models/faster_rcnn_resnet_v1_fpn_keras_feature_extractor_tf2_test.py +++ b/research/object_detection/models/faster_rcnn_resnet_v1_fpn_keras_feature_extractor_tf2_test.py @@ -26,7 +26,7 @@ from object_detection.protos import hyperparams_pb2 @unittest.skipIf(tf_version.is_tf1(), 'Skipping TF2.X only test.') -class FasterRCNNResnetV1FPNKerasFeatureExtractorTest(tf.test.TestCase): +class FasterRCNNResnetV1FpnKerasFeatureExtractorTest(tf.test.TestCase): def _build_conv_hyperparams(self): conv_hyperparams = hyperparams_pb2.Hyperparams() @@ -44,7 +44,7 @@ class FasterRCNNResnetV1FPNKerasFeatureExtractorTest(tf.test.TestCase): return hyperparams_builder.KerasLayerHyperparams(conv_hyperparams) def _build_feature_extractor(self): - return frcnn_res_fpn.FasterRCNNResnet50FPNKerasFeatureExtractor( + return frcnn_res_fpn.FasterRCNNResnet50FpnKerasFeatureExtractor( is_training=False, conv_hyperparams=self._build_conv_hyperparams(), first_stage_features_stride=16, -- GitLab From 7d0dc18319c972342e6dc710493cb37787fe4c46 Mon Sep 17 00:00:00 2001 From: syiming Date: Mon, 29 Jun 2020 16:43:26 +0800 Subject: [PATCH 025/128] remove min_depth and depth_multiplier feature --- ...n_resnet_v1_fpn_keras_feature_extractor.py | 31 +------------------ 1 file changed, 1 insertion(+), 30 deletions(-) diff --git a/research/object_detection/models/faster_rcnn_resnet_v1_fpn_keras_feature_extractor.py b/research/object_detection/models/faster_rcnn_resnet_v1_fpn_keras_feature_extractor.py index 94927a016..c7b3fbab9 100644 --- a/research/object_detection/models/faster_rcnn_resnet_v1_fpn_keras_feature_extractor.py +++ b/research/object_detection/models/faster_rcnn_resnet_v1_fpn_keras_feature_extractor.py @@ -42,8 +42,6 @@ class FasterRCNNResnetV1FpnKerasFeatureExtractor( resnet_v1_base_model_name, first_stage_features_stride, conv_hyperparams, - min_depth, - depth_multiplier, batch_norm_trainable=False, weight_decay=0.0, fpn_min_level=2, @@ -61,9 +59,6 @@ class FasterRCNNResnetV1FpnKerasFeatureExtractor( conv_hyperparameters: a `hyperparams_builder.KerasLayerHyperparams` object containing convolution hyperparameters for the layers added on top of the base feature extractor. - min_depth: Minimum number of filters in the convolutional layers. - depth_multiplier: The depth multiplier to modify the number of filters - in the convolutional layers. batch_norm_trainable: See base class. weight_decay: See base class. fpn_min_level: the highest resolution feature map to use in FPN. The valid @@ -94,8 +89,6 @@ class FasterRCNNResnetV1FpnKerasFeatureExtractor( self._resnet_v1_base_model = resnet_v1_base_model self._resnet_v1_base_model_name = resnet_v1_base_model_name self._conv_hyperparams = conv_hyperparams - self._min_depth = min_depth - self._depth_multiplier = depth_multiplier self._fpn_min_level = fpn_min_level self._fpn_max_level = fpn_max_level self._additional_layer_depth = additional_layer_depth @@ -152,8 +145,6 @@ class FasterRCNNResnetV1FpnKerasFeatureExtractor( conv_hyperparams=(self._conv_hyperparams if self._override_base_feature_extractor_hyperparams else None), - min_depth=self._min_depth, - depth_multiplier=self._depth_multiplier, classes=None, weights=None, include_top=False) @@ -166,14 +157,12 @@ class FasterRCNNResnetV1FpnKerasFeatureExtractor( backbone_outputs = self.classification_backbone(full_resnet_v1_model.inputs) # construct FPN feature generator - self._depth_fn = lambda d: max( - int(d * self._depth_multiplier), self._min_depth) self._base_fpn_max_level = min(self._fpn_max_level, 5) self._num_levels = self._base_fpn_max_level + 1 - self._fpn_min_level self._fpn_features_generator = ( feature_map_generators.KerasFpnTopDownFeatureMaps( num_levels=self._num_levels, - depth=self._depth_fn(self._additional_layer_depth), + depth=self._additional_layer_depth, is_training=self._is_training, conv_hyperparams=self._conv_hyperparams, freeze_batchnorm=self._freeze_batchnorm, @@ -232,8 +221,6 @@ class FasterRCNNResnet50FpnKerasFeatureExtractor( is_training, first_stage_features_stride=16, conv_hyperparams=None, - min_depth=16, - depth_multiplier=1, batch_norm_trainable=False, weight_decay=0.0, fpn_min_level=2, @@ -246,8 +233,6 @@ class FasterRCNNResnet50FpnKerasFeatureExtractor( is_training: See base class. first_stage_features_stride: See base class. conv_hyperparams: See base class. - min_depth: See base class. - depth_multiplier: See base class. batch_norm_trainable: See base class. weight_decay: See base class. fpn_min_level: See base class. @@ -259,8 +244,6 @@ class FasterRCNNResnet50FpnKerasFeatureExtractor( is_training=is_training, first_stage_features_stride=first_stage_features_stride, conv_hyperparams=conv_hyperparams, - min_depth=min_depth, - depth_multiplier=depth_multiplier, resnet_v1_base_model=resnet_v1.resnet_v1_50, resnet_v1_base_model_name='resnet_v1_50', batch_norm_trainable=batch_norm_trainable, @@ -278,8 +261,6 @@ class FasterRCNNResnet101FpnKerasFeatureExtractor( is_training, first_stage_features_stride=16, conv_hyperparams=None, - min_depth=16, - depth_multiplier=1, batch_norm_trainable=False, weight_decay=0.0, fpn_min_level=2, @@ -292,8 +273,6 @@ class FasterRCNNResnet101FpnKerasFeatureExtractor( is_training: See base class. first_stage_features_stride: See base class. conv_hyperparams: See base class. - min_depth: See base class. - depth_multiplier: See base class. batch_norm_trainable: See base class. weight_decay: See base class. fpn_min_level: See base class. @@ -305,8 +284,6 @@ class FasterRCNNResnet101FpnKerasFeatureExtractor( is_training=is_training, first_stage_features_stride=first_stage_features_stride, conv_hyperparams=conv_hyperparams, - min_depth=min_depth, - depth_multiplier=depth_multiplier, resnet_v1_base_model=resnet_v1.resnet_v1_101, resnet_v1_base_model_name='resnet_v1_101', batch_norm_trainable=batch_norm_trainable, @@ -325,8 +302,6 @@ class FasterRCNNResnet152FpnKerasFeatureExtractor( is_training, first_stage_features_stride=16, conv_hyperparams=None, - min_depth=16, - depth_multiplier=1, batch_norm_trainable=False, weight_decay=0.0, fpn_min_level=2, @@ -339,8 +314,6 @@ class FasterRCNNResnet152FpnKerasFeatureExtractor( is_training: See base class. first_stage_features_stride: See base class. conv_hyperparams: See base class. - min_depth: See base class. - depth_multiplier: See base class. batch_norm_trainable: See base class. weight_decay: See base class. fpn_min_level: See base class. @@ -352,8 +325,6 @@ class FasterRCNNResnet152FpnKerasFeatureExtractor( is_training=is_training, first_stage_features_stride=first_stage_features_stride, conv_hyperparams=conv_hyperparams, - min_depth=min_depth, - depth_multiplier=depth_multiplier, resnet_v1_base_model=resnet_v1.resnet_v1_152, resnet_v1_base_model_name='resnet_v1_152', batch_norm_trainable=batch_norm_trainable, -- GitLab From 70c974d36e63955b82f4df23989d5aa902e1d51b Mon Sep 17 00:00:00 2001 From: syiming Date: Mon, 29 Jun 2020 17:10:02 +0800 Subject: [PATCH 026/128] Add coarse layers for faster rcnn fpn keras model 1. Add coarse layers 2. Update corresponding unit test to check the size of the coarse layer is correct --- ...n_resnet_v1_fpn_keras_feature_extractor.py | 36 +++++++++++++++++-- ...v1_fpn_keras_feature_extractor_tf2_test.py | 4 +++ 2 files changed, 38 insertions(+), 2 deletions(-) diff --git a/research/object_detection/models/faster_rcnn_resnet_v1_fpn_keras_feature_extractor.py b/research/object_detection/models/faster_rcnn_resnet_v1_fpn_keras_feature_extractor.py index c7b3fbab9..a9f7aea1b 100644 --- a/research/object_detection/models/faster_rcnn_resnet_v1_fpn_keras_feature_extractor.py +++ b/research/object_detection/models/faster_rcnn_resnet_v1_fpn_keras_feature_extractor.py @@ -98,6 +98,7 @@ class FasterRCNNResnetV1FpnKerasFeatureExtractor( self._resnet_block_names = ['block1', 'block2', 'block3', 'block4'] self.classification_backbone = None self._fpn_features_generator = None + self._coarse_feature_layers = [] def preprocess(self, resized_inputs): """Faster R-CNN Resnet V1 preprocessing. @@ -177,10 +178,41 @@ class FasterRCNNResnetV1FpnKerasFeatureExtractor( (feature_block, feature_block_map[feature_block]) for feature_block in feature_block_list] fpn_features = self._fpn_features_generator(fpn_input_image_features) - features_maps = [fpn_feature for _, fpn_feature in fpn_features.items()] + + # Construct coarse feature layers + for i in range(self._base_fpn_max_level, self._fpn_max_level): + layers = [] + layer_name = 'bottom_up_block{}'.format(i) + layers.append( + tf.keras.layers.Conv2D( + self._additional_layer_depth, + [3, 3], + padding='SAME', + strides=2, + name=layer_name + '_conv', + **self._conv_hyperparams.params())) + layers.append( + self._conv_hyperparams.build_batch_norm( + training=(self._is_training and not self._freeze_batchnorm), + name=layer_name + '_batchnorm')) + layers.append( + self._conv_hyperparams.build_activation_layer( + name=layer_name)) + self._coarse_feature_layers.append(layers) + + feature_maps = [] + for level in range(self._fpn_min_level, self._base_fpn_max_level + 1): + feature_maps.append(fpn_features['top_down_block{}'.format(level-1)]) + last_feature_map = fpn_features['top_down_block{}'.format( + self._base_fpn_max_level - 1)] + + for coarse_feature_layers in self._coarse_feature_layers: + for layer in coarse_feature_layers: + last_feature_map = layer(last_feature_map) + feature_maps.append(last_feature_map) feature_extractor_model = tf.keras.models.Model( - inputs=full_resnet_v1_model.inputs, outputs=features_maps) + inputs=full_resnet_v1_model.inputs, outputs=feature_maps) return feature_extractor_model def get_box_classifier_feature_extractor_model(self, name=None): diff --git a/research/object_detection/models/faster_rcnn_resnet_v1_fpn_keras_feature_extractor_tf2_test.py b/research/object_detection/models/faster_rcnn_resnet_v1_fpn_keras_feature_extractor_tf2_test.py index c0b40c176..4782b1be2 100644 --- a/research/object_detection/models/faster_rcnn_resnet_v1_fpn_keras_feature_extractor_tf2_test.py +++ b/research/object_detection/models/faster_rcnn_resnet_v1_fpn_keras_feature_extractor_tf2_test.py @@ -64,6 +64,8 @@ class FasterRCNNResnetV1FpnKerasFeatureExtractorTest(tf.test.TestCase): self.assertAllEqual(features_shapes[1].numpy(), [2, 56, 56, 256]) self.assertAllEqual(features_shapes[2].numpy(), [2, 28, 28, 256]) self.assertAllEqual(features_shapes[3].numpy(), [2, 14, 14, 256]) + self.assertAllEqual(features_shapes[4].numpy(), [2, 7, 7, 256]) + self.assertAllEqual(features_shapes[5].numpy(), [2, 4, 4, 256]) def test_extract_proposal_features_half_size_input(self): feature_extractor = self._build_feature_extractor() @@ -78,6 +80,8 @@ class FasterRCNNResnetV1FpnKerasFeatureExtractorTest(tf.test.TestCase): self.assertAllEqual(features_shapes[1].numpy(), [2, 28, 28, 256]) self.assertAllEqual(features_shapes[2].numpy(), [2, 14, 14, 256]) self.assertAllEqual(features_shapes[3].numpy(), [2, 7, 7, 256]) + self.assertAllEqual(features_shapes[4].numpy(), [2, 4, 4, 256]) + self.assertAllEqual(features_shapes[5].numpy(), [2, 2, 2, 256]) def test_extract_box_classifier_features_returns_expected_size(self): feature_extractor = self._build_feature_extractor() -- GitLab From 2e13b9a71de6b1d5032c755a35cc15e4cf6f08b6 Mon Sep 17 00:00:00 2001 From: syiming Date: Tue, 30 Jun 2020 07:29:44 +0800 Subject: [PATCH 027/128] change the default max_fpn_level to 6 --- .../faster_rcnn_resnet_v1_fpn_keras_feature_extractor.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/research/object_detection/models/faster_rcnn_resnet_v1_fpn_keras_feature_extractor.py b/research/object_detection/models/faster_rcnn_resnet_v1_fpn_keras_feature_extractor.py index a9f7aea1b..d0532b784 100644 --- a/research/object_detection/models/faster_rcnn_resnet_v1_fpn_keras_feature_extractor.py +++ b/research/object_detection/models/faster_rcnn_resnet_v1_fpn_keras_feature_extractor.py @@ -45,7 +45,7 @@ class FasterRCNNResnetV1FpnKerasFeatureExtractor( batch_norm_trainable=False, weight_decay=0.0, fpn_min_level=2, - fpn_max_level=7, + fpn_max_level=6, additional_layer_depth=256, override_base_feature_extractor_hyperparams=False): """Constructor. @@ -256,7 +256,7 @@ class FasterRCNNResnet50FpnKerasFeatureExtractor( batch_norm_trainable=False, weight_decay=0.0, fpn_min_level=2, - fpn_max_level=7, + fpn_max_level=6, additional_layer_depth=256, override_base_feature_extractor_hyperparams=False): """Constructor. @@ -296,7 +296,7 @@ class FasterRCNNResnet101FpnKerasFeatureExtractor( batch_norm_trainable=False, weight_decay=0.0, fpn_min_level=2, - fpn_max_level=7, + fpn_max_level=6, additional_layer_depth=256, override_base_feature_extractor_hyperparams=False): """Constructor. @@ -337,7 +337,7 @@ class FasterRCNNResnet152FpnKerasFeatureExtractor( batch_norm_trainable=False, weight_decay=0.0, fpn_min_level=2, - fpn_max_level=7, + fpn_max_level=6, additional_layer_depth=256, override_base_feature_extractor_hyperparams=False): """Constructor. -- GitLab From 0e59ecc71c88ee98a44b53eb13fb1909b14b6518 Mon Sep 17 00:00:00 2001 From: syiming Date: Tue, 30 Jun 2020 07:31:30 +0800 Subject: [PATCH 028/128] change unittest since default max_fpn_level is change to 6 --- ...aster_rcnn_resnet_v1_fpn_keras_feature_extractor_tf2_test.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/research/object_detection/models/faster_rcnn_resnet_v1_fpn_keras_feature_extractor_tf2_test.py b/research/object_detection/models/faster_rcnn_resnet_v1_fpn_keras_feature_extractor_tf2_test.py index 4782b1be2..a120cce92 100644 --- a/research/object_detection/models/faster_rcnn_resnet_v1_fpn_keras_feature_extractor_tf2_test.py +++ b/research/object_detection/models/faster_rcnn_resnet_v1_fpn_keras_feature_extractor_tf2_test.py @@ -65,7 +65,6 @@ class FasterRCNNResnetV1FpnKerasFeatureExtractorTest(tf.test.TestCase): self.assertAllEqual(features_shapes[2].numpy(), [2, 28, 28, 256]) self.assertAllEqual(features_shapes[3].numpy(), [2, 14, 14, 256]) self.assertAllEqual(features_shapes[4].numpy(), [2, 7, 7, 256]) - self.assertAllEqual(features_shapes[5].numpy(), [2, 4, 4, 256]) def test_extract_proposal_features_half_size_input(self): feature_extractor = self._build_feature_extractor() @@ -81,7 +80,6 @@ class FasterRCNNResnetV1FpnKerasFeatureExtractorTest(tf.test.TestCase): self.assertAllEqual(features_shapes[2].numpy(), [2, 14, 14, 256]) self.assertAllEqual(features_shapes[3].numpy(), [2, 7, 7, 256]) self.assertAllEqual(features_shapes[4].numpy(), [2, 4, 4, 256]) - self.assertAllEqual(features_shapes[5].numpy(), [2, 2, 2, 256]) def test_extract_box_classifier_features_returns_expected_size(self): feature_extractor = self._build_feature_extractor() -- GitLab From 4faea59a79bd6a038b964619560646d56ed66577 Mon Sep 17 00:00:00 2001 From: syiming Date: Wed, 1 Jul 2020 14:27:55 +0800 Subject: [PATCH 029/128] remove extraeneous spaces --- .../models/faster_rcnn_resnet_v1_fpn_keras_feature_extractor.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/research/object_detection/models/faster_rcnn_resnet_v1_fpn_keras_feature_extractor.py b/research/object_detection/models/faster_rcnn_resnet_v1_fpn_keras_feature_extractor.py index d0532b784..d3a80dceb 100644 --- a/research/object_detection/models/faster_rcnn_resnet_v1_fpn_keras_feature_extractor.py +++ b/research/object_detection/models/faster_rcnn_resnet_v1_fpn_keras_feature_extractor.py @@ -199,7 +199,7 @@ class FasterRCNNResnetV1FpnKerasFeatureExtractor( self._conv_hyperparams.build_activation_layer( name=layer_name)) self._coarse_feature_layers.append(layers) - + feature_maps = [] for level in range(self._fpn_min_level, self._base_fpn_max_level + 1): feature_maps.append(fpn_features['top_down_block{}'.format(level-1)]) -- GitLab From 705dbf33aaceb0ea3b9f6c0a135887583887a8d8 Mon Sep 17 00:00:00 2001 From: syiming Date: Fri, 10 Jul 2020 03:31:39 +0800 Subject: [PATCH 030/128] merge --- ...n_resnet_v1_fpn_keras_feature_extractor.py | 79 ++++++++++++------- 1 file changed, 52 insertions(+), 27 deletions(-) diff --git a/research/object_detection/models/faster_rcnn_resnet_v1_fpn_keras_feature_extractor.py b/research/object_detection/models/faster_rcnn_resnet_v1_fpn_keras_feature_extractor.py index d3a80dceb..7b9f1d200 100644 --- a/research/object_detection/models/faster_rcnn_resnet_v1_fpn_keras_feature_extractor.py +++ b/research/object_detection/models/faster_rcnn_resnet_v1_fpn_keras_feature_extractor.py @@ -20,6 +20,7 @@ import tensorflow.compat.v1 as tf from object_detection.meta_architectures import faster_rcnn_meta_arch from object_detection.models import feature_map_generators from object_detection.models.keras_models import resnet_v1 +from object_detection.utils import ops _RESNET_MODEL_OUTPUT_LAYERS = { @@ -31,6 +32,49 @@ _RESNET_MODEL_OUTPUT_LAYERS = { 'conv4_block36_out', 'conv5_block3_out'], } +class ResnetFPN(tf.keras.layers.Layer): + def __init__(self, + backbone_classifier, + fpn_features_generator, + coarse_feature_layers, + fpn_min_level, + resnet_block_names, + base_fpn_max_level): + super(ResnetFPN, self).__init__() + self.classification_backbone = backbone_classifier + self.fpn_features_generator = fpn_features_generator + self.coarse_feature_layers = coarse_feature_layers + self._fpn_min_level = fpn_min_level + self._resnet_block_names = resnet_block_names + self._base_fpn_max_level = base_fpn_max_level + + def call(self, inputs): + inputs = ops.pad_to_multiple(inputs, 32) + backbone_outputs = self.classification_backbone(inputs) + + feature_block_list = [] + for level in range(self._fpn_min_level, self._base_fpn_max_level + 1): + feature_block_list.append('block{}'.format(level - 1)) + feature_block_map = dict( + list(zip(self._resnet_block_names, backbone_outputs))) + fpn_input_image_features = [ + (feature_block, feature_block_map[feature_block]) + for feature_block in feature_block_list] + fpn_features = self.fpn_features_generator(fpn_input_image_features) + + feature_maps = [] + for level in range(self._fpn_min_level, self._base_fpn_max_level + 1): + feature_maps.append(fpn_features['top_down_block{}'.format(level-1)]) + last_feature_map = fpn_features['top_down_block{}'.format( + self._base_fpn_max_level - 1)] + + for coarse_feature_layers in self.coarse_feature_layers: + for layer in coarse_feature_layers: + last_feature_map = layer(last_feature_map) + feature_maps.append(last_feature_map) + + return feature_maps + class FasterRCNNResnetV1FpnKerasFeatureExtractor( faster_rcnn_meta_arch.FasterRCNNKerasFeatureExtractor): @@ -155,9 +199,7 @@ class FasterRCNNResnetV1FpnKerasFeatureExtractor( self.classification_backbone = tf.keras.Model( inputs=full_resnet_v1_model.inputs, outputs=outputs) - backbone_outputs = self.classification_backbone(full_resnet_v1_model.inputs) - # construct FPN feature generator self._base_fpn_max_level = min(self._fpn_max_level, 5) self._num_levels = self._base_fpn_max_level + 1 - self._fpn_min_level self._fpn_features_generator = ( @@ -168,17 +210,7 @@ class FasterRCNNResnetV1FpnKerasFeatureExtractor( conv_hyperparams=self._conv_hyperparams, freeze_batchnorm=self._freeze_batchnorm, name='FeatureMaps')) - - feature_block_list = [] - for level in range(self._fpn_min_level, self._base_fpn_max_level + 1): - feature_block_list.append('block{}'.format(level - 1)) - feature_block_map = dict( - list(zip(self._resnet_block_names, backbone_outputs))) - fpn_input_image_features = [ - (feature_block, feature_block_map[feature_block]) - for feature_block in feature_block_list] - fpn_features = self._fpn_features_generator(fpn_input_image_features) - + # Construct coarse feature layers for i in range(self._base_fpn_max_level, self._fpn_max_level): layers = [] @@ -199,20 +231,13 @@ class FasterRCNNResnetV1FpnKerasFeatureExtractor( self._conv_hyperparams.build_activation_layer( name=layer_name)) self._coarse_feature_layers.append(layers) - - feature_maps = [] - for level in range(self._fpn_min_level, self._base_fpn_max_level + 1): - feature_maps.append(fpn_features['top_down_block{}'.format(level-1)]) - last_feature_map = fpn_features['top_down_block{}'.format( - self._base_fpn_max_level - 1)] - - for coarse_feature_layers in self._coarse_feature_layers: - for layer in coarse_feature_layers: - last_feature_map = layer(last_feature_map) - feature_maps.append(last_feature_map) - - feature_extractor_model = tf.keras.models.Model( - inputs=full_resnet_v1_model.inputs, outputs=feature_maps) + + feature_extractor_model = ResnetFPN(self.classification_backbone, + self._fpn_features_generator, + self._coarse_feature_layers, + self._fpn_min_level, + self._resnet_block_names, + self._base_fpn_max_level) return feature_extractor_model def get_box_classifier_feature_extractor_model(self, name=None): -- GitLab From 086d387398da8e09234c3d53933d25e86660cbf4 Mon Sep 17 00:00:00 2001 From: syiming Date: Fri, 17 Jul 2020 12:40:29 +0800 Subject: [PATCH 031/128] fix typo --- research/object_detection/builders/model_builder.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/research/object_detection/builders/model_builder.py b/research/object_detection/builders/model_builder.py index f3c9ad53d..33dc56d32 100644 --- a/research/object_detection/builders/model_builder.py +++ b/research/object_detection/builders/model_builder.py @@ -110,11 +110,11 @@ if tf_version.is_tf2(): frcnn_resnet_keras.FasterRCNNResnet152KerasFeatureExtractor, 'faster_rcnn_inception_resnet_v2_keras': frcnn_inc_res_keras.FasterRCNNInceptionResnetV2KerasFeatureExtractor, - 'fasret_rcnn_resnet50_fpn_keras': + 'faster_rcnn_resnet50_fpn_keras': frcnn_resnet_fpn_keras.FasterRCNNResnet50FpnKerasFeatureExtractor, - 'fasret_rcnn_resnet101_fpn_keras': + 'faster_rcnn_resnet101_fpn_keras': frcnn_resnet_fpn_keras.FasterRCNNResnet101FpnKerasFeatureExtractor, - 'fasret_rcnn_resnet152_fpn_keras': + 'faster_rcnn_resnet152_fpn_keras': frcnn_resnet_fpn_keras.FasterRCNNResnet152FpnKerasFeatureExtractor, } -- GitLab From 938f102d54bd74cadc2e17df630773b6795037be Mon Sep 17 00:00:00 2001 From: syiming Date: Mon, 13 Jul 2020 16:19:24 +0800 Subject: [PATCH 032/128] reshape output for second stage --- .../faster_rcnn_resnet_v1_fpn_keras_feature_extractor.py | 3 ++- ...ster_rcnn_resnet_v1_fpn_keras_feature_extractor_tf2_test.py | 2 +- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/research/object_detection/models/faster_rcnn_resnet_v1_fpn_keras_feature_extractor.py b/research/object_detection/models/faster_rcnn_resnet_v1_fpn_keras_feature_extractor.py index 7b9f1d200..7bd2e3830 100644 --- a/research/object_detection/models/faster_rcnn_resnet_v1_fpn_keras_feature_extractor.py +++ b/research/object_detection/models/faster_rcnn_resnet_v1_fpn_keras_feature_extractor.py @@ -265,7 +265,8 @@ class FasterRCNNResnetV1FpnKerasFeatureExtractor( feature_extractor_model = tf.keras.models.Sequential([ tf.keras.layers.Flatten(), tf.keras.layers.Dense(units=1024, activation='relu'), - tf.keras.layers.Dense(units=1024, activation='relu') + tf.keras.layers.Dense(units=1024, activation='relu'), + tf.keras.layers.Reshape((1, 1, 1024)) ]) return feature_extractor_model diff --git a/research/object_detection/models/faster_rcnn_resnet_v1_fpn_keras_feature_extractor_tf2_test.py b/research/object_detection/models/faster_rcnn_resnet_v1_fpn_keras_feature_extractor_tf2_test.py index a120cce92..193a27756 100644 --- a/research/object_detection/models/faster_rcnn_resnet_v1_fpn_keras_feature_extractor_tf2_test.py +++ b/research/object_detection/models/faster_rcnn_resnet_v1_fpn_keras_feature_extractor_tf2_test.py @@ -91,4 +91,4 @@ class FasterRCNNResnetV1FpnKerasFeatureExtractorTest(tf.test.TestCase): model(proposal_feature_maps)) features_shape = tf.shape(proposal_classifier_features) - self.assertAllEqual(features_shape.numpy(), [3, 1024]) + self.assertAllEqual(features_shape.numpy(), [3, 1, 1, 1024]) -- GitLab From c41aedff097bf5d4c936e42bd5743a63e30c7426 Mon Sep 17 00:00:00 2001 From: syiming Date: Mon, 13 Jul 2020 16:14:58 +0800 Subject: [PATCH 033/128] moving fpn message to fpn.proto --- .../object_detection/protos/faster_rcnn.proto | 1 + research/object_detection/protos/fpn.proto | 29 +++++++++++++++++++ 2 files changed, 30 insertions(+) create mode 100644 research/object_detection/protos/fpn.proto diff --git a/research/object_detection/protos/faster_rcnn.proto b/research/object_detection/protos/faster_rcnn.proto index 486cc77ea..7ce9ac044 100644 --- a/research/object_detection/protos/faster_rcnn.proto +++ b/research/object_detection/protos/faster_rcnn.proto @@ -8,6 +8,7 @@ import "object_detection/protos/hyperparams.proto"; import "object_detection/protos/image_resizer.proto"; import "object_detection/protos/losses.proto"; import "object_detection/protos/post_processing.proto"; +import "object_detection/protos/fpn.proto"; // Configuration for Faster R-CNN models. // See meta_architectures/faster_rcnn_meta_arch.py and models/model_builder.py diff --git a/research/object_detection/protos/fpn.proto b/research/object_detection/protos/fpn.proto new file mode 100644 index 000000000..626a7035c --- /dev/null +++ b/research/object_detection/protos/fpn.proto @@ -0,0 +1,29 @@ +syntax = "proto2"; + +package object_detection.protos; + +// Configuration for Feature Pyramid Networks. +message FeaturePyramidNetworks { + // We recommend to use multi_resolution_feature_map_generator with FPN, and + // the levels there must match the levels defined below for better + // performance. + // Correspondence from FPN levels to Resnet/Mobilenet V1 feature maps: + // FPN Level Resnet Feature Map Mobilenet-V1 Feature Map + // 2 Block 1 Conv2d_3_pointwise + // 3 Block 2 Conv2d_5_pointwise + // 4 Block 3 Conv2d_11_pointwise + // 5 Block 4 Conv2d_13_pointwise + // 6 Bottomup_5 bottom_up_Conv2d_14 + // 7 Bottomup_6 bottom_up_Conv2d_15 + // 8 Bottomup_7 bottom_up_Conv2d_16 + // 9 Bottomup_8 bottom_up_Conv2d_17 + + // minimum level in feature pyramid + optional int32 min_level = 1 [default = 3]; + + // maximum level in feature pyramid + optional int32 max_level = 2 [default = 7]; + + // channel depth for additional coarse feature layers. + optional int32 additional_layer_depth = 3 [default = 256]; +} -- GitLab From 4267bf411a800177315e14c56c8c30f4ee6cb5e8 Mon Sep 17 00:00:00 2001 From: syiming Date: Fri, 17 Jul 2020 13:01:14 +0800 Subject: [PATCH 034/128] remove fpn message from ssd.proto, use fpn.proto --- research/object_detection/protos/ssd.proto | 28 +--------------------- 1 file changed, 1 insertion(+), 27 deletions(-) diff --git a/research/object_detection/protos/ssd.proto b/research/object_detection/protos/ssd.proto index 3fdcd9937..e8ae96cb9 100644 --- a/research/object_detection/protos/ssd.proto +++ b/research/object_detection/protos/ssd.proto @@ -11,6 +11,7 @@ import "object_detection/protos/losses.proto"; import "object_detection/protos/matcher.proto"; import "object_detection/protos/post_processing.proto"; import "object_detection/protos/region_similarity_calculator.proto"; +import "object_detection/protos/fpn.proto"; // Configuration for Single Shot Detection (SSD) models. // Next id: 27 @@ -203,33 +204,6 @@ message SsdFeatureExtractor { } -// Configuration for Feature Pyramid Networks. -message FeaturePyramidNetworks { - // We recommend to use multi_resolution_feature_map_generator with FPN, and - // the levels there must match the levels defined below for better - // performance. - // Correspondence from FPN levels to Resnet/Mobilenet V1 feature maps: - // FPN Level Resnet Feature Map Mobilenet-V1 Feature Map - // 2 Block 1 Conv2d_3_pointwise - // 3 Block 2 Conv2d_5_pointwise - // 4 Block 3 Conv2d_11_pointwise - // 5 Block 4 Conv2d_13_pointwise - // 6 Bottomup_5 bottom_up_Conv2d_14 - // 7 Bottomup_6 bottom_up_Conv2d_15 - // 8 Bottomup_7 bottom_up_Conv2d_16 - // 9 Bottomup_8 bottom_up_Conv2d_17 - - // minimum level in feature pyramid - optional int32 min_level = 1 [default = 3]; - - // maximum level in feature pyramid - optional int32 max_level = 2 [default = 7]; - - // channel depth for additional coarse feature layers. - optional int32 additional_layer_depth = 3 [default = 256]; - -} - // Configuration for Bidirectional Feature Pyramid Networks. message BidirectionalFeaturePyramidNetworks { // minimum level in the feature pyramid. -- GitLab From ee4366480f7c888566d2ee1e0ac3836440496d40 Mon Sep 17 00:00:00 2001 From: syiming Date: Wed, 8 Jul 2020 15:02:14 +0800 Subject: [PATCH 035/128] Merge --- research/object_detection/protos/faster_rcnn.proto | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/research/object_detection/protos/faster_rcnn.proto b/research/object_detection/protos/faster_rcnn.proto index 7ce9ac044..734f4b36d 100644 --- a/research/object_detection/protos/faster_rcnn.proto +++ b/research/object_detection/protos/faster_rcnn.proto @@ -213,4 +213,16 @@ message FasterRcnnFeatureExtractor { // When training with a relative large batch size (e.g. 8), it could be // desirable to enable batch norm update. optional bool batch_norm_trainable = 3 [default = false]; + + // Hyperparameters that affect the layers of feature extractor added on top + // of the base feature extractor. + optional Hyperparams conv_hyperparams = 4; + + // if the value is set to true, the base feature extractor's hyperparams will + // be overridden with the `conv_hyperparams`. + optional bool override_base_feature_extractor_hyperparams = 9 + [default = false]; + + // Feature Pyramid Networks config. + optional FeaturePyramidNetworks fpn = 10; } -- GitLab From d1d33691cc67acb50d91535b7c499d0a340286e1 Mon Sep 17 00:00:00 2001 From: syiming Date: Wed, 8 Jul 2020 15:06:25 +0800 Subject: [PATCH 036/128] changing argument sequece to make it consistent with model builder --- ...ter_rcnn_resnet_v1_fpn_keras_feature_extractor.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/research/object_detection/models/faster_rcnn_resnet_v1_fpn_keras_feature_extractor.py b/research/object_detection/models/faster_rcnn_resnet_v1_fpn_keras_feature_extractor.py index 7bd2e3830..2c05a3d6d 100644 --- a/research/object_detection/models/faster_rcnn_resnet_v1_fpn_keras_feature_extractor.py +++ b/research/object_detection/models/faster_rcnn_resnet_v1_fpn_keras_feature_extractor.py @@ -278,8 +278,8 @@ class FasterRCNNResnet50FpnKerasFeatureExtractor( def __init__(self, is_training, first_stage_features_stride=16, - conv_hyperparams=None, batch_norm_trainable=False, + conv_hyperparams=None, weight_decay=0.0, fpn_min_level=2, fpn_max_level=6, @@ -290,8 +290,8 @@ class FasterRCNNResnet50FpnKerasFeatureExtractor( Args: is_training: See base class. first_stage_features_stride: See base class. - conv_hyperparams: See base class. batch_norm_trainable: See base class. + conv_hyperparams: See base class. weight_decay: See base class. fpn_min_level: See base class. fpn_max_level: See base class. @@ -318,8 +318,8 @@ class FasterRCNNResnet101FpnKerasFeatureExtractor( def __init__(self, is_training, first_stage_features_stride=16, - conv_hyperparams=None, batch_norm_trainable=False, + conv_hyperparams=None, weight_decay=0.0, fpn_min_level=2, fpn_max_level=6, @@ -330,8 +330,8 @@ class FasterRCNNResnet101FpnKerasFeatureExtractor( Args: is_training: See base class. first_stage_features_stride: See base class. - conv_hyperparams: See base class. batch_norm_trainable: See base class. + conv_hyperparams: See base class. weight_decay: See base class. fpn_min_level: See base class. fpn_max_level: See base class. @@ -359,8 +359,8 @@ class FasterRCNNResnet152FpnKerasFeatureExtractor( def __init__(self, is_training, first_stage_features_stride=16, - conv_hyperparams=None, batch_norm_trainable=False, + conv_hyperparams=None, weight_decay=0.0, fpn_min_level=2, fpn_max_level=6, @@ -371,8 +371,8 @@ class FasterRCNNResnet152FpnKerasFeatureExtractor( Args: is_training: See base class. first_stage_features_stride: See base class. - conv_hyperparams: See base class. batch_norm_trainable: See base class. + conv_hyperparams: See base class. weight_decay: See base class. fpn_min_level: See base class. fpn_max_level: See base class. -- GitLab From 6c5ddd545b874a9119257f28a3004e477417e2c7 Mon Sep 17 00:00:00 2001 From: syiming Date: Fri, 17 Jul 2020 23:56:03 +0800 Subject: [PATCH 037/128] add doc string for resnetfpn layers --- ..._rcnn_resnet_v1_fpn_keras_feature_extractor.py | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/research/object_detection/models/faster_rcnn_resnet_v1_fpn_keras_feature_extractor.py b/research/object_detection/models/faster_rcnn_resnet_v1_fpn_keras_feature_extractor.py index 2c05a3d6d..c1b75d1b8 100644 --- a/research/object_detection/models/faster_rcnn_resnet_v1_fpn_keras_feature_extractor.py +++ b/research/object_detection/models/faster_rcnn_resnet_v1_fpn_keras_feature_extractor.py @@ -33,6 +33,8 @@ _RESNET_MODEL_OUTPUT_LAYERS = { } class ResnetFPN(tf.keras.layers.Layer): + """Construct Resnet FPN layer.""" + def __init__(self, backbone_classifier, fpn_features_generator, @@ -40,6 +42,19 @@ class ResnetFPN(tf.keras.layers.Layer): fpn_min_level, resnet_block_names, base_fpn_max_level): + """Constructor. + + Args: + backbone_classifier: Classifier backbone. Should be one of 'resnet_v1_50', + 'resnet_v1_101', 'resnet_v1_152'. + fpn_features_generator: KerasFpnTopDownFeatureMaps that accepts a + dictionary of features and returns a ordered dictionary of fpn features. + coarse_feature_layers: Coarse feature layers for fpn. + fpn_min_level: the highest resolution feature map to use in FPN. The valid + values are {2, 3, 4, 5} which map to Resnet v1 layers. + resnet_block_names: a list of block names of resnet. + base_fpn_max_level: maximum level of fpn without coarse feature layers. + """ super(ResnetFPN, self).__init__() self.classification_backbone = backbone_classifier self.fpn_features_generator = fpn_features_generator -- GitLab From 8ed3a8fa2cc709a80ff4f44d16d36ce551b1dfaa Mon Sep 17 00:00:00 2001 From: Kaushik Shivakumar Date: Thu, 23 Jul 2020 00:20:49 +0000 Subject: [PATCH 038/128] hungarian --- .../matchers/hungarian_matcher.py | 63 ++++++++++++++ .../matchers/hungarian_matcher_tf2_test.py | 87 +++++++++++++++++++ 2 files changed, 150 insertions(+) create mode 100644 research/object_detection/matchers/hungarian_matcher.py create mode 100644 research/object_detection/matchers/hungarian_matcher_tf2_test.py diff --git a/research/object_detection/matchers/hungarian_matcher.py b/research/object_detection/matchers/hungarian_matcher.py new file mode 100644 index 000000000..bc31c7728 --- /dev/null +++ b/research/object_detection/matchers/hungarian_matcher.py @@ -0,0 +1,63 @@ +# Copyright 2020 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Hungarian bipartite matcher implementation.""" + +import tensorflow.compat.v1 as tf +import numpy as np + +from object_detection.core import matcher +from scipy.optimize import linear_sum_assignment + +class HungarianBipartiteMatcher(matcher.Matcher): + """Wraps a Tensorflow greedy bipartite matcher.""" + + def __init__(self): + """Constructs a Matcher.""" + + super(HungarianBipartiteMatcher, self).__init__() + + def _match(self, similarity_matrix, valid_rows): + """Optimally bipartite matches a collection rows and columns. + + Args: + similarity_matrix: Float tensor of shape [N, M] with pairwise similarity + where higher values mean more similar. + valid_rows: A boolean tensor of shape [N] indicating the rows that are + valid. + + Returns: + match_results: int32 tensor of shape [M] with match_results[i]=-1 + meaning that column i is not matched and otherwise that it is matched to + row match_results[i]. + """ + valid_row_sim_matrix = tf.gather(similarity_matrix, + tf.squeeze(tf.where(valid_rows), axis=-1)) + distance_matrix = -1 * valid_row_sim_matrix + + def numpy_wrapper(inputs): + def numpy_matching(input_matrix): + row_indices, col_indices = linear_sum_assignment(input_matrix) + match_results = np.full(input_matrix.shape[1], -1) + for i in range(len(col_indices)): + match_results[col_indices[i]] = row_indices[i] + return match_results.astype(np.int32) + + return tf.numpy_function(numpy_matching, inputs, Tout=[tf.int32]) + + matching_result = tf.autograph.experimental.do_not_convert( + numpy_wrapper)([distance_matrix]) + + return tf.reshape(matching_result, [-1]) diff --git a/research/object_detection/matchers/hungarian_matcher_tf2_test.py b/research/object_detection/matchers/hungarian_matcher_tf2_test.py new file mode 100644 index 000000000..c2d13f082 --- /dev/null +++ b/research/object_detection/matchers/hungarian_matcher_tf2_test.py @@ -0,0 +1,87 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Tests for object_detection.core.bipartite_matcher.""" +import unittest +import numpy as np +import tensorflow.compat.v1 as tf + +from object_detection.utils import test_case +from object_detection.utils import tf_version + +if tf_version.is_tf2(): + from object_detection.matchers import hungarian_matcher # pylint: disable=g-import-not-at-top + + +@unittest.skipIf(tf_version.is_tf1(), 'Skipping TF2.X only test.') +class GreedyBipartiteMatcherTest(test_case.TestCase): + + def test_get_expected_matches_when_all_rows_are_valid(self): + similarity_matrix = np.array([[0.50, 0.1, 0.8], [0.15, 0.2, 0.3]], + dtype=np.float32) + valid_rows = np.ones([2], dtype=np.bool) + expected_match_results = [-1, 1, 0] + + matcher = hungarian_matcher.HungarianBipartiteMatcher() + match_results_out = matcher.match(similarity_matrix, valid_rows=valid_rows) + + self.assertAllEqual(match_results_out._match_results.numpy(), expected_match_results) + + + def test_get_expected_matches_with_all_rows_be_default(self): + similarity_matrix = np.array([[0.50, 0.1, 0.8], [0.15, 0.2, 0.3]], + dtype=np.float32) + expected_match_results = [-1, 1, 0] + + matcher = hungarian_matcher.HungarianBipartiteMatcher() + match_results_out = matcher.match(similarity_matrix) + + self.assertAllEqual(match_results_out._match_results.numpy(), expected_match_results) + + def test_get_no_matches_with_zero_valid_rows(self): + similarity_matrix = np.array([[0.50, 0.1, 0.8], [0.15, 0.2, 0.3]], + dtype=np.float32) + valid_rows = np.zeros([2], dtype=np.bool) + expected_match_results = [-1, -1, -1] + + matcher = hungarian_matcher.HungarianBipartiteMatcher() + match_results_out = matcher.match(similarity_matrix, valid_rows=valid_rows) + + self.assertAllEqual(match_results_out._match_results.numpy(), expected_match_results) + + def test_get_expected_matches_with_only_one_valid_row(self): + similarity_matrix = np.array([[0.50, 0.1, 0.8], [0.15, 0.2, 0.3]], + dtype=np.float32) + valid_rows = np.array([True, False], dtype=np.bool) + expected_match_results = [-1, -1, 0] + + matcher = hungarian_matcher.HungarianBipartiteMatcher() + match_results_out = matcher.match(similarity_matrix, valid_rows=valid_rows) + + self.assertAllEqual(match_results_out._match_results.numpy(), expected_match_results) + + def test_get_expected_matches_with_only_one_valid_row_at_bottom(self): + similarity_matrix = np.array([[0.15, 0.2, 0.3], [0.50, 0.1, 0.8]], + dtype=np.float32) + valid_rows = np.array([False, True], dtype=np.bool) + expected_match_results = [-1, -1, 0] + + matcher = hungarian_matcher.HungarianBipartiteMatcher() + match_results_out = matcher.match(similarity_matrix, valid_rows=valid_rows) + + self.assertAllEqual(match_results_out._match_results.numpy(), expected_match_results) + +if __name__ == '__main__': + tf.test.main() -- GitLab From ca244433f76d9a059e1909ad977b346194ff7600 Mon Sep 17 00:00:00 2001 From: Kaushik Shivakumar Date: Thu, 23 Jul 2020 00:21:38 +0000 Subject: [PATCH 039/128] remove unnecessary file --- research/object_detection/model_main_tf2.py | 107 -------------------- 1 file changed, 107 deletions(-) delete mode 100644 research/object_detection/model_main_tf2.py diff --git a/research/object_detection/model_main_tf2.py b/research/object_detection/model_main_tf2.py deleted file mode 100644 index c6c770278..000000000 --- a/research/object_detection/model_main_tf2.py +++ /dev/null @@ -1,107 +0,0 @@ -# Lint as: python3 -# Copyright 2020 The TensorFlow Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -r"""Creates and runs TF2 object detection models. - -For local training/evaluation run: -PIPELINE_CONFIG_PATH=path/to/pipeline.config -MODEL_DIR=/tmp/model_outputs -NUM_TRAIN_STEPS=10000 -SAMPLE_1_OF_N_EVAL_EXAMPLES=1 -python model_main_tf2.py -- \ - --model_dir=$MODEL_DIR --num_train_steps=$NUM_TRAIN_STEPS \ - --sample_1_of_n_eval_examples=$SAMPLE_1_OF_N_EVAL_EXAMPLES \ - --pipeline_config_path=$PIPELINE_CONFIG_PATH \ - --alsologtostderr -""" -from absl import flags -import tensorflow.compat.v2 as tf -from object_detection import model_lib_v2 - -flags.DEFINE_string('pipeline_config_path', None, 'Path to pipeline config ' - 'file.') -flags.DEFINE_integer('num_train_steps', None, 'Number of train steps.') -flags.DEFINE_bool('use_tpu', False, 'Whether to use TPUs') -flags.DEFINE_bool('eval_on_train_data', False, 'Enable evaluating on train ' - 'data (only supported in distributed training).') -flags.DEFINE_integer('sample_1_of_n_eval_examples', None, 'Will sample one of ' - 'every n eval input examples, where n is provided.') -flags.DEFINE_integer('sample_1_of_n_eval_on_train_examples', 5, 'Will sample ' - 'one of every n train input examples for evaluation, ' - 'where n is provided. This is only used if ' - '`eval_training_data` is True.') -flags.DEFINE_string( - 'model_dir', None, 'Path to output model directory ' - 'where event and checkpoint files will be written.') -flags.DEFINE_string( - 'checkpoint_dir', None, 'Path to directory holding a checkpoint. If ' - '`checkpoint_dir` is provided, this binary operates in eval-only mode, ' - 'writing resulting metrics to `model_dir`.') - -flags.DEFINE_integer('eval_timeout', 3600, 'Number of seconds to wait for an' - 'evaluation checkpoint before exiting.') - -flags.DEFINE_bool('use_tpu', False, 'Whether the job is executing on a TPU.') -flags.DEFINE_string( - 'tpu_name', - default=None, - help='Name of the Cloud TPU for Cluster Resolvers.') -flags.DEFINE_integer( - 'num_workers', 1, 'When num_workers > 1, training uses ' - 'MultiWorkerMirroredStrategy. When num_workers = 1 it uses ' - 'MirroredStrategy.') - -FLAGS = flags.FLAGS - - -def main(unused_argv): - flags.mark_flag_as_required('model_dir') - flags.mark_flag_as_required('pipeline_config_path') - tf.config.set_soft_device_placement(True) - - if FLAGS.checkpoint_dir: - model_lib_v2.eval_continuously( - pipeline_config_path=FLAGS.pipeline_config_path, - model_dir=FLAGS.model_dir, - train_steps=FLAGS.num_train_steps, - sample_1_of_n_eval_examples=FLAGS.sample_1_of_n_eval_examples, - sample_1_of_n_eval_on_train_examples=( - FLAGS.sample_1_of_n_eval_on_train_examples), - checkpoint_dir=FLAGS.checkpoint_dir, - wait_interval=300, timeout=FLAGS.eval_timeout) - else: - if FLAGS.use_tpu: - # TPU is automatically inferred if tpu_name is None and - # we are running under cloud ai-platform. - resolver = tf.distribute.cluster_resolver.TPUClusterResolver( - FLAGS.tpu_name) - tf.config.experimental_connect_to_cluster(resolver) - tf.tpu.experimental.initialize_tpu_system(resolver) - strategy = tf.distribute.experimental.TPUStrategy(resolver) - elif FLAGS.num_workers > 1: - strategy = tf.distribute.experimental.MultiWorkerMirroredStrategy() - else: - strategy = tf.compat.v2.distribute.MirroredStrategy() - - with strategy.scope(): - model_lib_v2.train_loop( - pipeline_config_path=FLAGS.pipeline_config_path, - model_dir=FLAGS.model_dir, - train_steps=FLAGS.num_train_steps, - use_tpu=FLAGS.use_tpu) - -if __name__ == '__main__': - tf.compat.v1.app.run() -- GitLab From fbe9b495f1758957cf3425752bdd85c643569bfc Mon Sep 17 00:00:00 2001 From: Kaushik Shivakumar Date: Thu, 23 Jul 2020 00:23:31 +0000 Subject: [PATCH 040/128] model main tf2 --- research/object_detection/model_main_tf2.py | 106 ++++++++++++++++++++ 1 file changed, 106 insertions(+) create mode 100644 research/object_detection/model_main_tf2.py diff --git a/research/object_detection/model_main_tf2.py b/research/object_detection/model_main_tf2.py new file mode 100644 index 000000000..82c7c7acf --- /dev/null +++ b/research/object_detection/model_main_tf2.py @@ -0,0 +1,106 @@ +# Lint as: python3 +# Copyright 2020 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +r"""Creates and runs TF2 object detection models. + +For local training/evaluation run: +PIPELINE_CONFIG_PATH=path/to/pipeline.config +MODEL_DIR=/tmp/model_outputs +NUM_TRAIN_STEPS=10000 +SAMPLE_1_OF_N_EVAL_EXAMPLES=1 +python model_main_tf2.py -- \ + --model_dir=$MODEL_DIR --num_train_steps=$NUM_TRAIN_STEPS \ + --sample_1_of_n_eval_examples=$SAMPLE_1_OF_N_EVAL_EXAMPLES \ + --pipeline_config_path=$PIPELINE_CONFIG_PATH \ + --alsologtostderr +""" +from absl import flags +import tensorflow.compat.v2 as tf +from object_detection import model_lib_v2 + +flags.DEFINE_string('pipeline_config_path', None, 'Path to pipeline config ' + 'file.') +flags.DEFINE_integer('num_train_steps', None, 'Number of train steps.') +flags.DEFINE_bool('eval_on_train_data', False, 'Enable evaluating on train ' + 'data (only supported in distributed training).') +flags.DEFINE_integer('sample_1_of_n_eval_examples', None, 'Will sample one of ' + 'every n eval input examples, where n is provided.') +flags.DEFINE_integer('sample_1_of_n_eval_on_train_examples', 5, 'Will sample ' + 'one of every n train input examples for evaluation, ' + 'where n is provided. This is only used if ' + '`eval_training_data` is True.') +flags.DEFINE_string( + 'model_dir', None, 'Path to output model directory ' + 'where event and checkpoint files will be written.') +flags.DEFINE_string( + 'checkpoint_dir', None, 'Path to directory holding a checkpoint. If ' + '`checkpoint_dir` is provided, this binary operates in eval-only mode, ' + 'writing resulting metrics to `model_dir`.') + +flags.DEFINE_integer('eval_timeout', 3600, 'Number of seconds to wait for an' + 'evaluation checkpoint before exiting.') + +flags.DEFINE_bool('use_tpu', False, 'Whether the job is executing on a TPU.') +flags.DEFINE_string( + 'tpu_name', + default=None, + help='Name of the Cloud TPU for Cluster Resolvers.') +flags.DEFINE_integer( + 'num_workers', 1, 'When num_workers > 1, training uses ' + 'MultiWorkerMirroredStrategy. When num_workers = 1 it uses ' + 'MirroredStrategy.') + +FLAGS = flags.FLAGS + + +def main(unused_argv): + flags.mark_flag_as_required('model_dir') + flags.mark_flag_as_required('pipeline_config_path') + tf.config.set_soft_device_placement(True) + + if FLAGS.checkpoint_dir: + model_lib_v2.eval_continuously( + pipeline_config_path=FLAGS.pipeline_config_path, + model_dir=FLAGS.model_dir, + train_steps=FLAGS.num_train_steps, + sample_1_of_n_eval_examples=FLAGS.sample_1_of_n_eval_examples, + sample_1_of_n_eval_on_train_examples=( + FLAGS.sample_1_of_n_eval_on_train_examples), + checkpoint_dir=FLAGS.checkpoint_dir, + wait_interval=300, timeout=FLAGS.eval_timeout) + else: + if FLAGS.use_tpu: + # TPU is automatically inferred if tpu_name is None and + # we are running under cloud ai-platform. + resolver = tf.distribute.cluster_resolver.TPUClusterResolver( + FLAGS.tpu_name) + tf.config.experimental_connect_to_cluster(resolver) + tf.tpu.experimental.initialize_tpu_system(resolver) + strategy = tf.distribute.experimental.TPUStrategy(resolver) + elif FLAGS.num_workers > 1: + strategy = tf.distribute.experimental.MultiWorkerMirroredStrategy() + else: + strategy = tf.compat.v2.distribute.MirroredStrategy() + + with strategy.scope(): + model_lib_v2.train_loop( + pipeline_config_path=FLAGS.pipeline_config_path, + model_dir=FLAGS.model_dir, + train_steps=FLAGS.num_train_steps, + use_tpu=FLAGS.use_tpu) + +if __name__ == '__main__': + tf.compat.v1.app.run() -- GitLab From d96d2e3e5d7ef2d690f85d665bad12bbdf94ec18 Mon Sep 17 00:00:00 2001 From: Kaushik Shivakumar Date: Thu, 23 Jul 2020 00:30:57 +0000 Subject: [PATCH 041/128] finish pylint --- .../matchers/hungarian_matcher.py | 4 +-- .../matchers/hungarian_matcher_tf2_test.py | 32 +++++++++++-------- 2 files changed, 20 insertions(+), 16 deletions(-) diff --git a/research/object_detection/matchers/hungarian_matcher.py b/research/object_detection/matchers/hungarian_matcher.py index bc31c7728..101de395f 100644 --- a/research/object_detection/matchers/hungarian_matcher.py +++ b/research/object_detection/matchers/hungarian_matcher.py @@ -58,6 +58,6 @@ class HungarianBipartiteMatcher(matcher.Matcher): return tf.numpy_function(numpy_matching, inputs, Tout=[tf.int32]) matching_result = tf.autograph.experimental.do_not_convert( - numpy_wrapper)([distance_matrix]) - + numpy_wrapper)([distance_matrix]) + return tf.reshape(matching_result, [-1]) diff --git a/research/object_detection/matchers/hungarian_matcher_tf2_test.py b/research/object_detection/matchers/hungarian_matcher_tf2_test.py index c2d13f082..3a281e76b 100644 --- a/research/object_detection/matchers/hungarian_matcher_tf2_test.py +++ b/research/object_detection/matchers/hungarian_matcher_tf2_test.py @@ -26,7 +26,7 @@ if tf_version.is_tf2(): @unittest.skipIf(tf_version.is_tf1(), 'Skipping TF2.X only test.') -class GreedyBipartiteMatcherTest(test_case.TestCase): +class HungarianBipartiteMatcherTest(test_case.TestCase): def test_get_expected_matches_when_all_rows_are_valid(self): similarity_matrix = np.array([[0.50, 0.1, 0.8], [0.15, 0.2, 0.3]], @@ -37,9 +37,9 @@ class GreedyBipartiteMatcherTest(test_case.TestCase): matcher = hungarian_matcher.HungarianBipartiteMatcher() match_results_out = matcher.match(similarity_matrix, valid_rows=valid_rows) - self.assertAllEqual(match_results_out._match_results.numpy(), expected_match_results) + self.assertAllEqual(match_results_out._match_results.numpy(), + expected_match_results) - def test_get_expected_matches_with_all_rows_be_default(self): similarity_matrix = np.array([[0.50, 0.1, 0.8], [0.15, 0.2, 0.3]], dtype=np.float32) @@ -47,41 +47,45 @@ class GreedyBipartiteMatcherTest(test_case.TestCase): matcher = hungarian_matcher.HungarianBipartiteMatcher() match_results_out = matcher.match(similarity_matrix) - - self.assertAllEqual(match_results_out._match_results.numpy(), expected_match_results) + + self.assertAllEqual(match_results_out._match_results.numpy(), + expected_match_results) def test_get_no_matches_with_zero_valid_rows(self): similarity_matrix = np.array([[0.50, 0.1, 0.8], [0.15, 0.2, 0.3]], dtype=np.float32) valid_rows = np.zeros([2], dtype=np.bool) expected_match_results = [-1, -1, -1] - + matcher = hungarian_matcher.HungarianBipartiteMatcher() match_results_out = matcher.match(similarity_matrix, valid_rows=valid_rows) - - self.assertAllEqual(match_results_out._match_results.numpy(), expected_match_results) + + self.assertAllEqual(match_results_out._match_results.numpy(), + expected_match_results) def test_get_expected_matches_with_only_one_valid_row(self): similarity_matrix = np.array([[0.50, 0.1, 0.8], [0.15, 0.2, 0.3]], dtype=np.float32) valid_rows = np.array([True, False], dtype=np.bool) expected_match_results = [-1, -1, 0] - + matcher = hungarian_matcher.HungarianBipartiteMatcher() match_results_out = matcher.match(similarity_matrix, valid_rows=valid_rows) - - self.assertAllEqual(match_results_out._match_results.numpy(), expected_match_results) + + self.assertAllEqual(match_results_out._match_results.numpy(), + expected_match_results) def test_get_expected_matches_with_only_one_valid_row_at_bottom(self): similarity_matrix = np.array([[0.15, 0.2, 0.3], [0.50, 0.1, 0.8]], dtype=np.float32) valid_rows = np.array([False, True], dtype=np.bool) expected_match_results = [-1, -1, 0] - + matcher = hungarian_matcher.HungarianBipartiteMatcher() match_results_out = matcher.match(similarity_matrix, valid_rows=valid_rows) - - self.assertAllEqual(match_results_out._match_results.numpy(), expected_match_results) + + self.assertAllEqual(match_results_out._match_results.numpy(), + expected_match_results) if __name__ == '__main__': tf.test.main() -- GitLab From e6abe8214afc047dbec7fd9b6efb206b91a977d9 Mon Sep 17 00:00:00 2001 From: Kaushik Shivakumar Date: Fri, 24 Jul 2020 08:17:07 +0000 Subject: [PATCH 042/128] fix pr --- .../object_detection/matchers/hungarian_matcher.py | 6 +++--- .../matchers/hungarian_matcher_tf2_test.py | 13 +++++++++++++ 2 files changed, 16 insertions(+), 3 deletions(-) diff --git a/research/object_detection/matchers/hungarian_matcher.py b/research/object_detection/matchers/hungarian_matcher.py index 101de395f..b63db14c3 100644 --- a/research/object_detection/matchers/hungarian_matcher.py +++ b/research/object_detection/matchers/hungarian_matcher.py @@ -21,8 +21,9 @@ import numpy as np from object_detection.core import matcher from scipy.optimize import linear_sum_assignment + class HungarianBipartiteMatcher(matcher.Matcher): - """Wraps a Tensorflow greedy bipartite matcher.""" + """Wraps a Hungarian bipartite matcher into TensorFlow.""" def __init__(self): """Constructs a Matcher.""" @@ -51,8 +52,7 @@ class HungarianBipartiteMatcher(matcher.Matcher): def numpy_matching(input_matrix): row_indices, col_indices = linear_sum_assignment(input_matrix) match_results = np.full(input_matrix.shape[1], -1) - for i in range(len(col_indices)): - match_results[col_indices[i]] = row_indices[i] + match_results[col_indices] = row_indices return match_results.astype(np.int32) return tf.numpy_function(numpy_matching, inputs, Tout=[tf.int32]) diff --git a/research/object_detection/matchers/hungarian_matcher_tf2_test.py b/research/object_detection/matchers/hungarian_matcher_tf2_test.py index 3a281e76b..9003afbca 100644 --- a/research/object_detection/matchers/hungarian_matcher_tf2_test.py +++ b/research/object_detection/matchers/hungarian_matcher_tf2_test.py @@ -87,5 +87,18 @@ class HungarianBipartiteMatcherTest(test_case.TestCase): self.assertAllEqual(match_results_out._match_results.numpy(), expected_match_results) + def test_get_expected_matches_with_two_valid_rows(self): + similarity_matrix = np.array([[0.15, 0.2, 0.3], [0.50, 0.1, 0.8], + [0.84, 0.32, 0.2]], + dtype=np.float32) + valid_rows = np.array([True, False, True], dtype=np.bool) + expected_match_results = [1, -1, 0] + + matcher = hungarian_matcher.HungarianBipartiteMatcher() + match_results_out = matcher.match(similarity_matrix, valid_rows=valid_rows) + + self.assertAllEqual(match_results_out._match_results.numpy(), + expected_match_results) + if __name__ == '__main__': tf.test.main() -- GitLab From b3f63b000712dbfab7d3441be3e8326bbfcaf668 Mon Sep 17 00:00:00 2001 From: Srihari Humbarwadi Date: Fri, 24 Jul 2020 19:36:06 +0530 Subject: [PATCH 043/128] Fixed typos - Fixed `intermediate_scale` in `Anchor` - "ratio" and "divisible" in doc string --- official/vision/detection/dataloader/anchor.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/official/vision/detection/dataloader/anchor.py b/official/vision/detection/dataloader/anchor.py index f46f74800..b6ea226d8 100644 --- a/official/vision/detection/dataloader/anchor.py +++ b/official/vision/detection/dataloader/anchor.py @@ -46,14 +46,14 @@ class Anchor(object): num_scales: integer number representing intermediate scales added on each level. For instances, num_scales=2 adds one additional intermediate anchor scales [2^0, 2^0.5] on each level. - aspect_ratios: list of float numbers representing the aspect raito anchors + aspect_ratios: list of float numbers representing the aspect ratio anchors added on each level. The number indicates the ratio of width to height. For instances, aspect_ratios=[1.0, 2.0, 0.5] adds three anchors on each scale level. anchor_size: float number representing the scale of size of the base anchor to the feature stride 2^level. image_size: a list of integer numbers or Tensors representing - [height, width] of the input image size.The image_size should be divided + [height, width] of the input image size.The image_size should be divisible by the largest feature stride 2^max_level. """ self.min_level = min_level @@ -77,8 +77,8 @@ class Anchor(object): for scale in range(self.num_scales): for aspect_ratio in self.aspect_ratios: stride = 2 ** level - intermidate_scale = 2 ** (scale / float(self.num_scales)) - base_anchor_size = self.anchor_size * stride * intermidate_scale + intermediate_scale = 2 ** (scale / float(self.num_scales)) + base_anchor_size = self.anchor_size * stride * intermediate_scale aspect_x = aspect_ratio ** 0.5 aspect_y = aspect_ratio ** -0.5 half_anchor_size_x = base_anchor_size * aspect_x / 2.0 -- GitLab From 26f976c1910204d419625c96b0b9ae769b2d172c Mon Sep 17 00:00:00 2001 From: syiming Date: Sun, 19 Jul 2020 14:25:53 +0800 Subject: [PATCH 044/128] add pad_to_multiple --- .../faster_rcnn_resnet_v1_fpn_keras_feature_extractor.py | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/research/object_detection/models/faster_rcnn_resnet_v1_fpn_keras_feature_extractor.py b/research/object_detection/models/faster_rcnn_resnet_v1_fpn_keras_feature_extractor.py index c1b75d1b8..438a16c83 100644 --- a/research/object_detection/models/faster_rcnn_resnet_v1_fpn_keras_feature_extractor.py +++ b/research/object_detection/models/faster_rcnn_resnet_v1_fpn_keras_feature_extractor.py @@ -39,6 +39,7 @@ class ResnetFPN(tf.keras.layers.Layer): backbone_classifier, fpn_features_generator, coarse_feature_layers, + pad_to_multiple, fpn_min_level, resnet_block_names, base_fpn_max_level): @@ -59,12 +60,13 @@ class ResnetFPN(tf.keras.layers.Layer): self.classification_backbone = backbone_classifier self.fpn_features_generator = fpn_features_generator self.coarse_feature_layers = coarse_feature_layers + self.pad_to_multiple = pad_to_multiple self._fpn_min_level = fpn_min_level self._resnet_block_names = resnet_block_names self._base_fpn_max_level = base_fpn_max_level def call(self, inputs): - inputs = ops.pad_to_multiple(inputs, 32) + inputs = ops.pad_to_multiple(inputs, self.pad_to_multiple) backbone_outputs = self.classification_backbone(inputs) feature_block_list = [] @@ -102,6 +104,7 @@ class FasterRCNNResnetV1FpnKerasFeatureExtractor( first_stage_features_stride, conv_hyperparams, batch_norm_trainable=False, + pad_to_multiple=32, weight_decay=0.0, fpn_min_level=2, fpn_max_level=6, @@ -152,6 +155,7 @@ class FasterRCNNResnetV1FpnKerasFeatureExtractor( self._fpn_max_level = fpn_max_level self._additional_layer_depth = additional_layer_depth self._freeze_batchnorm = (not batch_norm_trainable) + self._pad_to_multiple = pad_to_multiple self._override_base_feature_extractor_hyperparams = \ override_base_feature_extractor_hyperparams self._resnet_block_names = ['block1', 'block2', 'block3', 'block4'] @@ -250,6 +254,7 @@ class FasterRCNNResnetV1FpnKerasFeatureExtractor( feature_extractor_model = ResnetFPN(self.classification_backbone, self._fpn_features_generator, self._coarse_feature_layers, + self._pad_to_multiple, self._fpn_min_level, self._resnet_block_names, self._base_fpn_max_level) -- GitLab From 0e0f739b965cd99b7cc8c02b810a5c8174294507 Mon Sep 17 00:00:00 2001 From: syiming Date: Sun, 19 Jul 2020 14:35:27 +0800 Subject: [PATCH 045/128] force pad to multiple pad tensors of zero with same dtype of input --- research/object_detection/utils/ops.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/research/object_detection/utils/ops.py b/research/object_detection/utils/ops.py index 0cd83d38d..9492b2137 100644 --- a/research/object_detection/utils/ops.py +++ b/research/object_detection/utils/ops.py @@ -216,13 +216,13 @@ def pad_to_multiple(tensor, multiple): height_pad = tf.zeros([ batch_size, padded_tensor_height - tensor_height, tensor_width, tensor_depth - ]) + ], dtype=tensor.dtype) tensor = tf.concat([tensor, height_pad], 1) if padded_tensor_width != tensor_width: width_pad = tf.zeros([ batch_size, padded_tensor_height, padded_tensor_width - tensor_width, tensor_depth - ]) + ], dtype=tensor.dtype) tensor = tf.concat([tensor, width_pad], 2) return tensor -- GitLab From 145ac875399f34e944ff3727742a8e02e41e6410 Mon Sep 17 00:00:00 2001 From: syiming Date: Sun, 26 Jul 2020 18:38:08 +0800 Subject: [PATCH 046/128] fix coding style --- ...n_resnet_v1_fpn_keras_feature_extractor.py | 114 ++++++++++-------- 1 file changed, 62 insertions(+), 52 deletions(-) diff --git a/research/object_detection/models/faster_rcnn_resnet_v1_fpn_keras_feature_extractor.py b/research/object_detection/models/faster_rcnn_resnet_v1_fpn_keras_feature_extractor.py index 438a16c83..06ee71cf6 100644 --- a/research/object_detection/models/faster_rcnn_resnet_v1_fpn_keras_feature_extractor.py +++ b/research/object_detection/models/faster_rcnn_resnet_v1_fpn_keras_feature_extractor.py @@ -35,15 +35,15 @@ _RESNET_MODEL_OUTPUT_LAYERS = { class ResnetFPN(tf.keras.layers.Layer): """Construct Resnet FPN layer.""" - def __init__(self, - backbone_classifier, - fpn_features_generator, - coarse_feature_layers, - pad_to_multiple, - fpn_min_level, - resnet_block_names, - base_fpn_max_level): - """Constructor. + def __init__(self, + backbone_classifier, + fpn_features_generator, + coarse_feature_layers, + pad_to_multiple, + fpn_min_level, + resnet_block_names, + base_fpn_max_level): + """Constructor. Args: backbone_classifier: Classifier backbone. Should be one of 'resnet_v1_50', @@ -56,41 +56,51 @@ class ResnetFPN(tf.keras.layers.Layer): resnet_block_names: a list of block names of resnet. base_fpn_max_level: maximum level of fpn without coarse feature layers. """ - super(ResnetFPN, self).__init__() - self.classification_backbone = backbone_classifier - self.fpn_features_generator = fpn_features_generator - self.coarse_feature_layers = coarse_feature_layers - self.pad_to_multiple = pad_to_multiple - self._fpn_min_level = fpn_min_level - self._resnet_block_names = resnet_block_names - self._base_fpn_max_level = base_fpn_max_level - - def call(self, inputs): - inputs = ops.pad_to_multiple(inputs, self.pad_to_multiple) - backbone_outputs = self.classification_backbone(inputs) - - feature_block_list = [] - for level in range(self._fpn_min_level, self._base_fpn_max_level + 1): - feature_block_list.append('block{}'.format(level - 1)) - feature_block_map = dict( - list(zip(self._resnet_block_names, backbone_outputs))) - fpn_input_image_features = [ - (feature_block, feature_block_map[feature_block]) - for feature_block in feature_block_list] - fpn_features = self.fpn_features_generator(fpn_input_image_features) - - feature_maps = [] - for level in range(self._fpn_min_level, self._base_fpn_max_level + 1): - feature_maps.append(fpn_features['top_down_block{}'.format(level-1)]) - last_feature_map = fpn_features['top_down_block{}'.format( - self._base_fpn_max_level - 1)] - - for coarse_feature_layers in self.coarse_feature_layers: - for layer in coarse_feature_layers: - last_feature_map = layer(last_feature_map) - feature_maps.append(last_feature_map) - - return feature_maps + super(ResnetFPN, self).__init__() + self.classification_backbone = backbone_classifier + self.fpn_features_generator = fpn_features_generator + self.coarse_feature_layers = coarse_feature_layers + self.pad_to_multiple = pad_to_multiple + self._fpn_min_level = fpn_min_level + self._resnet_block_names = resnet_block_names + self._base_fpn_max_level = base_fpn_max_level + + def call(self, inputs): + """Create ResnetFPN layer. + + Args: + inputs: A [batch, height_out, width_out, channels] float32 tensor + representing a batch of images. + + Return: + feature_maps: A list of tensors with shape [batch, height, width, depth] + represent extracted features. + """ + inputs = ops.pad_to_multiple(inputs, self.pad_to_multiple) + backbone_outputs = self.classification_backbone(inputs) + + feature_block_list = [] + for level in range(self._fpn_min_level, self._base_fpn_max_level + 1): + feature_block_list.append('block{}'.format(level - 1)) + feature_block_map = dict( + list(zip(self._resnet_block_names, backbone_outputs))) + fpn_input_image_features = [ + (feature_block, feature_block_map[feature_block]) + for feature_block in feature_block_list] + fpn_features = self.fpn_features_generator(fpn_input_image_features) + + feature_maps = [] + for level in range(self._fpn_min_level, self._base_fpn_max_level + 1): + feature_maps.append(fpn_features['top_down_block{}'.format(level-1)]) + last_feature_map = fpn_features['top_down_block{}'.format( + self._base_fpn_max_level - 1)] + + for coarse_feature_layers in self.coarse_feature_layers: + for layer in coarse_feature_layers: + last_feature_map = layer(last_feature_map) + feature_maps.append(last_feature_map) + + return feature_maps class FasterRCNNResnetV1FpnKerasFeatureExtractor( @@ -229,7 +239,7 @@ class FasterRCNNResnetV1FpnKerasFeatureExtractor( conv_hyperparams=self._conv_hyperparams, freeze_batchnorm=self._freeze_batchnorm, name='FeatureMaps')) - + # Construct coarse feature layers for i in range(self._base_fpn_max_level, self._fpn_max_level): layers = [] @@ -250,14 +260,14 @@ class FasterRCNNResnetV1FpnKerasFeatureExtractor( self._conv_hyperparams.build_activation_layer( name=layer_name)) self._coarse_feature_layers.append(layers) - + feature_extractor_model = ResnetFPN(self.classification_backbone, - self._fpn_features_generator, - self._coarse_feature_layers, - self._pad_to_multiple, - self._fpn_min_level, - self._resnet_block_names, - self._base_fpn_max_level) + self._fpn_features_generator, + self._coarse_feature_layers, + self._pad_to_multiple, + self._fpn_min_level, + self._resnet_block_names, + self._base_fpn_max_level) return feature_extractor_model def get_box_classifier_feature_extractor_model(self, name=None): -- GitLab From a85c5b9699f1c080dbe7b1a9bf4e721209d6cd1a Mon Sep 17 00:00:00 2001 From: syiming Date: Sun, 26 Jul 2020 19:13:31 +0800 Subject: [PATCH 047/128] add pad_to_multiple --- research/object_detection/protos/faster_rcnn.proto | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/research/object_detection/protos/faster_rcnn.proto b/research/object_detection/protos/faster_rcnn.proto index 734f4b36d..3a8fe4e51 100644 --- a/research/object_detection/protos/faster_rcnn.proto +++ b/research/object_detection/protos/faster_rcnn.proto @@ -220,9 +220,14 @@ message FasterRcnnFeatureExtractor { // if the value is set to true, the base feature extractor's hyperparams will // be overridden with the `conv_hyperparams`. - optional bool override_base_feature_extractor_hyperparams = 9 + optional bool override_base_feature_extractor_hyperparams = 5 [default = false]; + // The nearest multiple to zero-pad the input height and width dimensions to. + // For example, if pad_to_multiple = 2, input dimensions are zero-padded + // until the resulting dimensions are even. + optional int32 pad_to_multiple = 6 [default = 32]; + // Feature Pyramid Networks config. - optional FeaturePyramidNetworks fpn = 10; + optional FeaturePyramidNetworks fpn = 7; } -- GitLab From e9b70e67a57fe3ab8ad03b8f966069bd0845e64a Mon Sep 17 00:00:00 2001 From: "A. Unique TensorFlower" Date: Mon, 27 Jul 2020 13:37:37 -0700 Subject: [PATCH 048/128] Internal change PiperOrigin-RevId: 323430879 --- official/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/official/requirements.txt b/official/requirements.txt index a29867a17..c700d2911 100644 --- a/official/requirements.txt +++ b/official/requirements.txt @@ -21,7 +21,7 @@ pyyaml # CV related dependencies opencv-python-headless Pillow --e git+https://github.com/cocodataset/cocoapi#egg=pycocotools&subdirectory=PythonAPI +pycocotools # NLP related dependencies seqeval sentencepiece -- GitLab From b8014d558826fb65307ebbca8230f20a7c5326dc Mon Sep 17 00:00:00 2001 From: Simon Kornblith Date: Mon, 27 Jul 2020 19:43:53 -0700 Subject: [PATCH 049/128] Internal change PiperOrigin-RevId: 323493207 --- orbit/controller.py | 11 ++++++++--- orbit/controller_test.py | 3 ++- orbit/utils.py | 2 +- 3 files changed, 11 insertions(+), 5 deletions(-) diff --git a/orbit/controller.py b/orbit/controller.py index 54de7ca8d..8f4bc6247 100644 --- a/orbit/controller.py +++ b/orbit/controller.py @@ -16,8 +16,9 @@ """A light weight utilities to train TF2 models.""" import time -from typing import Callable, Optional, Text, Union +from typing import Callable, Dict, Optional, Text, Union from absl import logging +import numpy as np from orbit import runner from orbit import utils @@ -177,7 +178,7 @@ class Controller: if checkpoint_at_completion: self.save_checkpoint() - def evaluate(self, steps: int = None): + def evaluate(self, steps: int = None) -> Optional[Dict[Text, np.number]]: """Runs evaluation. This method calls the `evaluate` method on the Evaluator object for `steps` @@ -186,10 +187,12 @@ class Controller: Args: steps: The number of steps to evaluate for. + Returns: + The evaluation results as a dictionary of numpy values. + Raises: ValueError: If no checkpoint found in `self.checkpoint_manager.directory`. ValueError: If `evaluator` is not provided. - """ if self.evaluator is None: raise ValueError("`evaluator` must be provided to call `evaluate()` " @@ -217,6 +220,8 @@ class Controller: self.eval_summary_manager.write_summaries(eval_outputs) self.eval_summary_manager.flush() + return eval_outputs + def restore_checkpoint(self, checkpoint_path: Text = None): """Restore or initialize the model. diff --git a/orbit/controller_test.py b/orbit/controller_test.py index 9e24c1277..b28f5d3a9 100644 --- a/orbit/controller_test.py +++ b/orbit/controller_test.py @@ -329,7 +329,7 @@ class ControllerTest(tf.test.TestCase, parameterized.TestCase): checkpoint_manager=checkpoint_manager, summary_dir=os.path.join(self.model_dir, "summaries/train"), eval_summary_dir=os.path.join(self.model_dir, "summaries/eval")) - test_controller.evaluate(steps=2) + eval_results = test_controller.evaluate(steps=2) # Only eval summaries are written self.assertFalse( @@ -339,6 +339,7 @@ class ControllerTest(tf.test.TestCase, parameterized.TestCase): self.assertNotEmpty( summaries_with_matching_keyword( "eval_loss", os.path.join(self.model_dir, "summaries/eval"))) + self.assertIn("eval_loss", eval_results) # Tests continuous eval with timeout and timeout_fn. done_file = os.path.join(self.model_dir, "summaries/eval/Done") diff --git a/orbit/utils.py b/orbit/utils.py index c649ef8ce..177a002cf 100644 --- a/orbit/utils.py +++ b/orbit/utils.py @@ -378,7 +378,7 @@ def get_value(x) -> np.ndarray: x: input variable. Returns: - A Numpy array. + A Numpy array or number. """ if not tf.is_tensor(x): return x -- GitLab From f8c2a9178cce19c36899888bb0cbd30f6ba5aa07 Mon Sep 17 00:00:00 2001 From: Hongkun Yu Date: Mon, 27 Jul 2020 20:43:22 -0700 Subject: [PATCH 050/128] Migrate to tf gelu. Will remove activations/gelu.py after dependencies are cleaned up. PiperOrigin-RevId: 323499265 --- official/modeling/activations/gelu.py | 10 +--------- official/nlp/xlnet/xlnet_modeling.py | 22 ++-------------------- 2 files changed, 3 insertions(+), 29 deletions(-) diff --git a/official/modeling/activations/gelu.py b/official/modeling/activations/gelu.py index c045bffa9..dc4de8204 100644 --- a/official/modeling/activations/gelu.py +++ b/official/modeling/activations/gelu.py @@ -14,12 +14,6 @@ # ============================================================================== """Gaussian error linear unit.""" -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import math - import tensorflow as tf @@ -35,6 +29,4 @@ def gelu(x): Returns: `x` with the GELU activation applied. """ - cdf = 0.5 * (1.0 + tf.tanh( - (math.sqrt(2 / math.pi) * (x + 0.044715 * tf.pow(x, 3))))) - return x * cdf + return tf.keras.activations.gelu(x, approximate=True) diff --git a/official/nlp/xlnet/xlnet_modeling.py b/official/nlp/xlnet/xlnet_modeling.py index f1f6d23ad..5eefaf909 100644 --- a/official/nlp/xlnet/xlnet_modeling.py +++ b/official/nlp/xlnet/xlnet_modeling.py @@ -14,32 +14,14 @@ # ============================================================================== """Keras layers of XLNet model in TF 2.0.""" -from __future__ import absolute_import -from __future__ import division -# from __future__ import google_type_annotations -from __future__ import print_function - import copy -import numpy as np +import functools import tensorflow as tf from official.nlp.xlnet import data_utils -def gelu(x): - """Gaussian Error Linear Unit. - - This is a smoother version of the RELU. - Original paper: https://arxiv.org/abs/1606.08415 - Args: - x: float Tensor to perform activation. - - Returns: - `x` with the GELU activation applied. - """ - cdf = 0.5 * (1.0 + tf.tanh( - (np.sqrt(2 / np.pi) * (x + 0.044715 * tf.pow(x, 3))))) - return x * cdf +gelu = functools.partial(tf.keras.activations.gelu, approximate=True) def rel_shift(x, klen=-1): -- GitLab From a540c3d78e4ae04cee0bbec2dfce12a740e95eb2 Mon Sep 17 00:00:00 2001 From: "A. Unique TensorFlower" Date: Mon, 27 Jul 2020 23:12:27 -0700 Subject: [PATCH 051/128] Internal change PiperOrigin-RevId: 323515364 --- official/modeling/training/distributed_executor.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/official/modeling/training/distributed_executor.py b/official/modeling/training/distributed_executor.py index 11451260c..520c69d04 100644 --- a/official/modeling/training/distributed_executor.py +++ b/official/modeling/training/distributed_executor.py @@ -698,7 +698,8 @@ class DistributedExecutor(object): logging.info( 'Checkpoint file %s found and restoring from ' 'checkpoint', checkpoint_path) - checkpoint.restore(checkpoint_path) + status = checkpoint.restore(checkpoint_path) + status.expect_partial().assert_existing_objects_matched() self.global_train_step = model.optimizer.iterations eval_iterator = self._get_input_iterator(eval_input_fn, strategy) -- GitLab From 9556ebcc076e696dfd698bda8e25698db0625036 Mon Sep 17 00:00:00 2001 From: Ruoxin Sang Date: Mon, 27 Jul 2020 23:56:43 -0700 Subject: [PATCH 052/128] Internal change PiperOrigin-RevId: 323518930 --- orbit/controller.py | 4 +- orbit/controller_test.py | 76 +++++++++++++++++++++++++++++++++++++ orbit/runner.py | 6 ++- orbit/standard_runner.py | 6 ++- orbit/utils.py | 81 +++++++++++++++++++++++++++++----------- 5 files changed, 145 insertions(+), 28 deletions(-) diff --git a/orbit/controller.py b/orbit/controller.py index 8f4bc6247..dac71d15f 100644 --- a/orbit/controller.py +++ b/orbit/controller.py @@ -207,7 +207,7 @@ class Controller: else: logging.info("Evaluating at train step: %s", current_step) - with self.eval_summary_manager.summary_writer.as_default(): + with self.eval_summary_manager.summary_writer().as_default(): eval_outputs = self.evaluator.evaluate(steps) if eval_outputs: @@ -339,7 +339,7 @@ class Controller: current_step += num_steps num_steps = tf.convert_to_tensor(num_steps, dtype=tf.int32) - with self.summary_manager.summary_writer.as_default(): + with self.summary_manager.summary_writer().as_default(): # Create a lambda that returns true when summaries should be written. should_record = False # Allows static optimization in no-summary cases. if self.summary_interval: diff --git a/orbit/controller_test.py b/orbit/controller_test.py index b28f5d3a9..2e4a815b0 100644 --- a/orbit/controller_test.py +++ b/orbit/controller_test.py @@ -158,6 +158,57 @@ class TestEvaluator(standard_runner.StandardEvaluator): } +class TestEvaluatorWithNestedSummary(standard_runner.StandardEvaluator): + """Implements the training and evaluation APIs for the test model.""" + + def __init__(self): + self.strategy = tf.distribute.get_strategy() + self.model = create_model() + dataset = self.strategy.experimental_distribute_datasets_from_function( + dataset_fn) + dataset2 = self.strategy.experimental_distribute_datasets_from_function( + dataset_fn) + self.loss = tf.keras.metrics.Mean("loss", dtype=tf.float32) + self.accuracy = tf.keras.metrics.CategoricalAccuracy( + "accuracy", dtype=tf.float32) + self.loss2 = tf.keras.metrics.Mean("loss", dtype=tf.float32) + self.accuracy2 = tf.keras.metrics.CategoricalAccuracy( + "accuracy", dtype=tf.float32) + standard_runner.StandardEvaluator.__init__( + self, eval_dataset={ + "dataset": dataset, + "dataset2": dataset2 + }) + + def eval_step(self, iterator): + + def _replicated_step(loss, accuracy, inputs): + """Replicated evaluation step.""" + inputs, targets = inputs + outputs = self.model(inputs) + loss.update_state(tf.keras.losses.MSE(targets, outputs)) + accuracy.update_state(targets, outputs) + + self.strategy.run( + lambda inputs: _replicated_step(self.loss, self.accuracy, inputs), + args=(next(iterator["dataset"]),)) + self.strategy.run( + lambda inputs: _replicated_step(self.loss2, self.accuracy2, inputs), + args=(next(iterator["dataset2"]),)) + + def eval_end(self): + return { + "dataset": { + "loss": self.loss.result(), + "accuracy": self.accuracy.result() + }, + "dataset2": { + "loss": self.loss2.result(), + "accuracy": self.accuracy2.result() + }, + } + + class TestTrainerWithSummaries(standard_runner.StandardTrainer): """A Trainer model with summaries for testing purposes.""" @@ -570,6 +621,31 @@ class ControllerTest(tf.test.TestCase, parameterized.TestCase): self.assertLen( summaries_with_matching_keyword("eval_loss", self.model_dir), 2) + def test_evaluate_with_nested_summaries(self): + test_evaluator = TestEvaluatorWithNestedSummary() + test_controller = controller.Controller( + evaluator=test_evaluator, + global_step=tf.Variable(0, dtype=tf.int64), + eval_summary_dir=self.model_dir) + test_controller.evaluate(steps=5) + + self.assertNotEmpty( + tf.io.gfile.listdir(os.path.join(self.model_dir, "dataset"))) + self.assertNotEmpty( + summaries_with_matching_keyword( + "loss", os.path.join(self.model_dir, "dataset"))) + self.assertNotEmpty( + summaries_with_matching_keyword( + "accuracy", os.path.join(self.model_dir, "dataset"))) + + self.assertNotEmpty( + tf.io.gfile.listdir(os.path.join(self.model_dir, "dataset2"))) + self.assertNotEmpty( + summaries_with_matching_keyword( + "loss", os.path.join(self.model_dir, "dataset2"))) + self.assertNotEmpty( + summaries_with_matching_keyword( + "accuracy", os.path.join(self.model_dir, "dataset2"))) if __name__ == "__main__": tf.test.main() diff --git a/orbit/runner.py b/orbit/runner.py index a7af0cde6..3ea3422c7 100644 --- a/orbit/runner.py +++ b/orbit/runner.py @@ -45,7 +45,8 @@ class AbstractTrainer(tf.Module, metaclass=abc.ABCMeta): Returns: The function may return a dictionary of `Tensors` or numpy arrays, which - will be written to logs and as TensorBoard summaries. + will be written to logs and as TensorBoard summaries. It can also be a + nested dictionary, yielding a hierarchy of summary directories. """ pass @@ -67,6 +68,7 @@ class AbstractEvaluator(tf.Module, metaclass=abc.ABCMeta): Returns: The function may return a dictionary of `Tensors` or numpy arrays, which - will be written to logs and as TensorBoard summaries. + will be written to logs and as TensorBoard summaries. It can also be a + nested dictionary, yielding a hierarchy of summary directories. """ pass diff --git a/orbit/standard_runner.py b/orbit/standard_runner.py index 543dbf5ff..2eb077141 100644 --- a/orbit/standard_runner.py +++ b/orbit/standard_runner.py @@ -144,7 +144,8 @@ class StandardTrainer(runner.AbstractTrainer, metaclass=abc.ABCMeta): Returns: The function may return a dictionary of `Tensors`, which will be - written to logs and as TensorBoard summaries. + written to logs and as TensorBoard summaries. It can also be a + nested dictionary, yielding a hierarchy of summary directories. """ pass @@ -261,7 +262,8 @@ class StandardEvaluator(runner.AbstractEvaluator, metaclass=abc.ABCMeta): Returns: The function may return a dictionary of `Tensors`, which will be - written to logs and as TensorBoard summaries. + written to logs and as TensorBoard summaries. It can also be a + nested dictionary, yielding a hierarchy of summary directories. """ pass diff --git a/orbit/utils.py b/orbit/utils.py index 177a002cf..3b392bc27 100644 --- a/orbit/utils.py +++ b/orbit/utils.py @@ -20,6 +20,7 @@ import contextlib import functools import inspect +import os import numpy as np import tensorflow as tf @@ -153,44 +154,80 @@ class SummaryManager: self._enabled = (summary_dir is not None) self._summary_dir = summary_dir self._summary_fn = summary_fn - self._summary_writer = None + self._summary_writers = {} if global_step is None: self._global_step = tf.summary.experimental.get_step() else: self._global_step = global_step - @property - def summary_writer(self): - """Returns the underlying summary writer.""" - if self._summary_writer is not None: - return self._summary_writer + def summary_writer(self, relative_path=""): + """Returns the underlying summary writer. + + Args: + relative_path: The current path in which to write summaries, relative to + the summary directory. By default it is empty, which specifies the root + directory. + """ + if self._summary_writers and relative_path in self._summary_writers: + return self._summary_writers[relative_path] if self._enabled: - self._summary_writer = tf.summary.create_file_writer(self._summary_dir) + self._summary_writers[relative_path] = tf.summary.create_file_writer( + os.path.join(self._summary_dir, relative_path)) else: - self._summary_writer = tf.summary.create_noop_writer() - return self._summary_writer + self._summary_writers[relative_path] = tf.summary.create_noop_writer() + return self._summary_writers[relative_path] def flush(self): - """Flush the underlying summary writer.""" + """Flush the underlying summary writers.""" if self._enabled: - tf.summary.flush(self.summary_writer) - - def write_summaries(self, items): - """Write a bulk of summaries. + tf.nest.map_structure(tf.summary.flush, self._summary_writers) + + def write_summaries(self, summary_dict): + """Write summaries for the given values. + + This recursively creates sub-directories for any nested dictionaries + provided in `summary_dict`, yielding a hierarchy of directories which will + then be reflected in the TensorBoard UI as different colored curves. + + E.g. users may evaluate on muliple datasets and return `summary_dict` as a + nested + dictionary. + ``` + { + "dataset": { + "loss": loss, + "accuracy": accuracy + }, + "dataset2": { + "loss": loss2, + "accuracy": accuracy2 + }, + } + ``` + It will create two sub directories "dataset" and "dataset2" inside summary + root directory. And each directory write both "loss" and "accuracy" + summaries inside. Args: - items: a dictionary of `Tensors` for writing summaries. + summary_dict: A dictionary of values. If any value in `summary_dict` is + itself a dictionary, then the function will recursively create + subdirectories with names given by the keys in the dictionary. The + Tensor values are summarized using the summary writer instance specific + to the parent relative path. """ - # TODO(rxsang): Support writing summaries with nested structure, so users - # can split the summaries into different directories for nicer visualization - # in Tensorboard, like train and eval metrics. if not self._enabled: return - - with self.summary_writer.as_default(): - for name, tensor in items.items(): - self._summary_fn(name, tensor, step=self._global_step) + self._write_summaries(summary_dict) + + def _write_summaries(self, summary_dict, relative_path=""): + for name, value in summary_dict.items(): + if isinstance(value, dict): + self._write_summaries( + value, relative_path=os.path.join(relative_path, name)) + else: + with self.summary_writer(relative_path).as_default(): + self._summary_fn(name, value, step=self._global_step) class Trigger(metaclass=abc.ABCMeta): -- GitLab From df7e66d20e784d4e986f0c6daf122e56c6f5bc4c Mon Sep 17 00:00:00 2001 From: Srihari Humbarwadi Date: Tue, 28 Jul 2020 19:17:42 +0530 Subject: [PATCH 053/128] fixed typos in doc strings and variable naming --- official/modeling/training/distributed_executor.py | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/official/modeling/training/distributed_executor.py b/official/modeling/training/distributed_executor.py index 11451260c..73b0960f4 100644 --- a/official/modeling/training/distributed_executor.py +++ b/official/modeling/training/distributed_executor.py @@ -63,8 +63,8 @@ def metrics_as_dict(metric): """Puts input metric(s) into a list. Args: - metric: metric(s) to be put into the list. `metric` could be a object, a - list or a dict of tf.keras.metrics.Metric or has the `required_method`. + metric: metric(s) to be put into the list. `metric` could be an object, a + list, or a dict of tf.keras.metrics.Metric or has the `required_method`. Returns: A dictionary of valid metrics. @@ -351,7 +351,8 @@ class DistributedExecutor(object): train_input_fn: (params: dict) -> tf.data.Dataset training data input function. eval_input_fn: (Optional) same type as train_input_fn. If not None, will - trigger evaluting metric on eval data. If None, will not run eval step. + trigger evaluating metric on eval data. If None, will not run the eval + step. model_dir: the folder path for model checkpoints. total_steps: total training steps. iterations_per_loop: train steps per loop. After each loop, this job will @@ -672,7 +673,7 @@ class DistributedExecutor(object): raise ValueError('if `eval_metric_fn` is specified, ' 'eval_metric_fn must be a callable.') - old_phrase = tf.keras.backend.learning_phase() + old_phase = tf.keras.backend.learning_phase() tf.keras.backend.set_learning_phase(0) params = self._params strategy = self._strategy @@ -709,7 +710,7 @@ class DistributedExecutor(object): summary_writer(metrics=eval_metric_result, step=current_step) reset_states(eval_metric) - tf.keras.backend.set_learning_phase(old_phrase) + tf.keras.backend.set_learning_phase(old_phase) return eval_metric_result, current_step def predict(self): @@ -759,7 +760,7 @@ class ExecutorBuilder(object): Args: strategy_type: string. One of 'tpu', 'mirrored', 'multi_worker_mirrored'. - If None. User is responsible to set the strategy before calling + If None, the user is responsible to set the strategy before calling build_executor(...). strategy_config: necessary config for constructing the proper Strategy. Check strategy_flags_dict() for examples of the structure. -- GitLab From 3e73c76c7a5373dafd71ef9231896dabcb696cc5 Mon Sep 17 00:00:00 2001 From: syiming Date: Wed, 29 Jul 2020 00:30:33 +0800 Subject: [PATCH 054/128] change batchnorm trainable to true --- .../faster_rcnn_resnet_v1_fpn_keras_feature_extractor.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/research/object_detection/models/faster_rcnn_resnet_v1_fpn_keras_feature_extractor.py b/research/object_detection/models/faster_rcnn_resnet_v1_fpn_keras_feature_extractor.py index 06ee71cf6..1a14eb616 100644 --- a/research/object_detection/models/faster_rcnn_resnet_v1_fpn_keras_feature_extractor.py +++ b/research/object_detection/models/faster_rcnn_resnet_v1_fpn_keras_feature_extractor.py @@ -113,7 +113,7 @@ class FasterRCNNResnetV1FpnKerasFeatureExtractor( resnet_v1_base_model_name, first_stage_features_stride, conv_hyperparams, - batch_norm_trainable=False, + batch_norm_trainable=True, pad_to_multiple=32, weight_decay=0.0, fpn_min_level=2, @@ -308,7 +308,7 @@ class FasterRCNNResnet50FpnKerasFeatureExtractor( def __init__(self, is_training, first_stage_features_stride=16, - batch_norm_trainable=False, + batch_norm_trainable=True, conv_hyperparams=None, weight_decay=0.0, fpn_min_level=2, @@ -348,7 +348,7 @@ class FasterRCNNResnet101FpnKerasFeatureExtractor( def __init__(self, is_training, first_stage_features_stride=16, - batch_norm_trainable=False, + batch_norm_trainable=True, conv_hyperparams=None, weight_decay=0.0, fpn_min_level=2, @@ -389,7 +389,7 @@ class FasterRCNNResnet152FpnKerasFeatureExtractor( def __init__(self, is_training, first_stage_features_stride=16, - batch_norm_trainable=False, + batch_norm_trainable=True, conv_hyperparams=None, weight_decay=0.0, fpn_min_level=2, -- GitLab From 95d8a879ea2ddc63c48deed3f8e28d55d1b3a2d5 Mon Sep 17 00:00:00 2001 From: syiming Date: Wed, 29 Jul 2020 00:37:38 +0800 Subject: [PATCH 055/128] change resnetFPN as an internal class --- .../faster_rcnn_resnet_v1_fpn_keras_feature_extractor.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/research/object_detection/models/faster_rcnn_resnet_v1_fpn_keras_feature_extractor.py b/research/object_detection/models/faster_rcnn_resnet_v1_fpn_keras_feature_extractor.py index 1a14eb616..7ccf9db76 100644 --- a/research/object_detection/models/faster_rcnn_resnet_v1_fpn_keras_feature_extractor.py +++ b/research/object_detection/models/faster_rcnn_resnet_v1_fpn_keras_feature_extractor.py @@ -32,7 +32,7 @@ _RESNET_MODEL_OUTPUT_LAYERS = { 'conv4_block36_out', 'conv5_block3_out'], } -class ResnetFPN(tf.keras.layers.Layer): +class _ResnetFPN(tf.keras.layers.Layer): """Construct Resnet FPN layer.""" def __init__(self, @@ -56,7 +56,7 @@ class ResnetFPN(tf.keras.layers.Layer): resnet_block_names: a list of block names of resnet. base_fpn_max_level: maximum level of fpn without coarse feature layers. """ - super(ResnetFPN, self).__init__() + super(_ResnetFPN, self).__init__() self.classification_backbone = backbone_classifier self.fpn_features_generator = fpn_features_generator self.coarse_feature_layers = coarse_feature_layers @@ -66,7 +66,7 @@ class ResnetFPN(tf.keras.layers.Layer): self._base_fpn_max_level = base_fpn_max_level def call(self, inputs): - """Create ResnetFPN layer. + """Create internal ResnetFPN layer. Args: inputs: A [batch, height_out, width_out, channels] float32 tensor @@ -261,7 +261,7 @@ class FasterRCNNResnetV1FpnKerasFeatureExtractor( name=layer_name)) self._coarse_feature_layers.append(layers) - feature_extractor_model = ResnetFPN(self.classification_backbone, + feature_extractor_model = _ResnetFPN(self.classification_backbone, self._fpn_features_generator, self._coarse_feature_layers, self._pad_to_multiple, -- GitLab From bd27aa70ab64666ee7eb5901c81a08161e7135d7 Mon Sep 17 00:00:00 2001 From: syiming Date: Wed, 29 Jul 2020 00:40:25 +0800 Subject: [PATCH 056/128] add batchnorm layer --- .../models/faster_rcnn_resnet_v1_fpn_keras_feature_extractor.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/research/object_detection/models/faster_rcnn_resnet_v1_fpn_keras_feature_extractor.py b/research/object_detection/models/faster_rcnn_resnet_v1_fpn_keras_feature_extractor.py index 7ccf9db76..dc24fb9f6 100644 --- a/research/object_detection/models/faster_rcnn_resnet_v1_fpn_keras_feature_extractor.py +++ b/research/object_detection/models/faster_rcnn_resnet_v1_fpn_keras_feature_extractor.py @@ -295,6 +295,8 @@ class FasterRCNNResnetV1FpnKerasFeatureExtractor( feature_extractor_model = tf.keras.models.Sequential([ tf.keras.layers.Flatten(), tf.keras.layers.Dense(units=1024, activation='relu'), + self._conv_hyperparams.build_batch_norm( + training=(self._is_training and not self._freeze_batchnorm)), tf.keras.layers.Dense(units=1024, activation='relu'), tf.keras.layers.Reshape((1, 1, 1024)) ]) -- GitLab From 17821c0dcdc377bd5ba779b54b3ca49f90d5a6a3 Mon Sep 17 00:00:00 2001 From: syiming Date: Wed, 29 Jul 2020 00:44:18 +0800 Subject: [PATCH 057/128] fix doc string for get_box_classifier_feature_extractor_model --- .../models/faster_rcnn_resnet_v1_fpn_keras_feature_extractor.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/research/object_detection/models/faster_rcnn_resnet_v1_fpn_keras_feature_extractor.py b/research/object_detection/models/faster_rcnn_resnet_v1_fpn_keras_feature_extractor.py index dc24fb9f6..c6dd3b710 100644 --- a/research/object_detection/models/faster_rcnn_resnet_v1_fpn_keras_feature_extractor.py +++ b/research/object_detection/models/faster_rcnn_resnet_v1_fpn_keras_feature_extractor.py @@ -286,7 +286,7 @@ class FasterRCNNResnetV1FpnKerasFeatureExtractor( And returns proposal_classifier_features: A 4-D float tensor with shape - [batch_size * self.max_num_proposals, 1024] + [batch_size * self.max_num_proposals, 1, 1, 1024] representing box classifier features for each proposal. """ with tf.name_scope(name): -- GitLab From 4152a5c73a79624e84c0516e50b9dd1acd856365 Mon Sep 17 00:00:00 2001 From: syiming Date: Wed, 29 Jul 2020 00:54:44 +0800 Subject: [PATCH 058/128] move BidirectionalFeaturePyramidNetworks to fpn.proto --- research/object_detection/protos/fpn.proto | 20 ++++++++++++++++++++ research/object_detection/protos/ssd.proto | 21 --------------------- 2 files changed, 20 insertions(+), 21 deletions(-) diff --git a/research/object_detection/protos/fpn.proto b/research/object_detection/protos/fpn.proto index 626a7035c..cc7f6dcd3 100644 --- a/research/object_detection/protos/fpn.proto +++ b/research/object_detection/protos/fpn.proto @@ -27,3 +27,23 @@ message FeaturePyramidNetworks { // channel depth for additional coarse feature layers. optional int32 additional_layer_depth = 3 [default = 256]; } + +// Configuration for Bidirectional Feature Pyramid Networks. +message BidirectionalFeaturePyramidNetworks { + // minimum level in the feature pyramid. + optional int32 min_level = 1 [default = 3]; + + // maximum level in the feature pyramid. + optional int32 max_level = 2 [default = 7]; + + // The number of repeated top-down bottom-up iterations for BiFPN-based + // feature extractors (bidirectional feature pyramid networks). + optional int32 num_iterations = 3; + + // The number of filters (channels) to use in feature pyramid layers for + // BiFPN-based feature extractors (bidirectional feature pyramid networks). + optional int32 num_filters = 4; + + // Method used to combine inputs to BiFPN nodes. + optional string combine_method = 5 [default = 'fast_attention']; +} diff --git a/research/object_detection/protos/ssd.proto b/research/object_detection/protos/ssd.proto index e8ae96cb9..09eabac1e 100644 --- a/research/object_detection/protos/ssd.proto +++ b/research/object_detection/protos/ssd.proto @@ -203,24 +203,3 @@ message SsdFeatureExtractor { optional int32 num_layers = 12 [default = 6]; } - -// Configuration for Bidirectional Feature Pyramid Networks. -message BidirectionalFeaturePyramidNetworks { - // minimum level in the feature pyramid. - optional int32 min_level = 1 [default = 3]; - - // maximum level in the feature pyramid. - optional int32 max_level = 2 [default = 7]; - - // The number of repeated top-down bottom-up iterations for BiFPN-based - // feature extractors (bidirectional feature pyramid networks). - optional int32 num_iterations = 3; - - // The number of filters (channels) to use in feature pyramid layers for - // BiFPN-based feature extractors (bidirectional feature pyramid networks). - optional int32 num_filters = 4; - - // Method used to combine inputs to BiFPN nodes. - optional string combine_method = 5 [default = 'fast_attention']; -} - -- GitLab From 006df62528e4116b9956c0c5d47edfe87d24f6e9 Mon Sep 17 00:00:00 2001 From: Vivek Rathod Date: Tue, 28 Jul 2020 10:59:22 -0700 Subject: [PATCH 059/128] Remove unused import. PiperOrigin-RevId: 323608772 --- research/object_detection/protos/input_reader.proto | 1 - 1 file changed, 1 deletion(-) diff --git a/research/object_detection/protos/input_reader.proto b/research/object_detection/protos/input_reader.proto index 91632429c..c6bacd289 100644 --- a/research/object_detection/protos/input_reader.proto +++ b/research/object_detection/protos/input_reader.proto @@ -2,7 +2,6 @@ syntax = "proto2"; package object_detection.protos; -import "object_detection/protos/image_resizer.proto"; // Configuration proto for defining input readers that generate Object Detection // Examples from input sources. Input readers are expected to generate a -- GitLab From 322d2eb7044291d7bb581f3a38e58c4ff54fa0e4 Mon Sep 17 00:00:00 2001 From: "A. Unique TensorFlower" Date: Tue, 28 Jul 2020 12:21:08 -0700 Subject: [PATCH 060/128] Internal change PiperOrigin-RevId: 323627192 --- official/nlp/xlnet/xlnet_modeling.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/official/nlp/xlnet/xlnet_modeling.py b/official/nlp/xlnet/xlnet_modeling.py index 5eefaf909..803ec4add 100644 --- a/official/nlp/xlnet/xlnet_modeling.py +++ b/official/nlp/xlnet/xlnet_modeling.py @@ -15,13 +15,13 @@ """Keras layers of XLNet model in TF 2.0.""" import copy -import functools import tensorflow as tf from official.nlp.xlnet import data_utils -gelu = functools.partial(tf.keras.activations.gelu, approximate=True) +def gelu(x): + return tf.keras.activations.gelu(x, approximate=True) def rel_shift(x, klen=-1): -- GitLab From 902e05aae717af1b71a631408343a29c61dfdc73 Mon Sep 17 00:00:00 2001 From: Hongkun Yu Date: Tue, 28 Jul 2020 13:43:05 -0700 Subject: [PATCH 061/128] Remove oauth2client version PiperOrigin-RevId: 323644473 --- official/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/official/requirements.txt b/official/requirements.txt index c700d2911..4559e176e 100644 --- a/official/requirements.txt +++ b/official/requirements.txt @@ -3,7 +3,7 @@ google-api-python-client>=1.6.7 google-cloud-bigquery>=0.31.0 kaggle>=1.3.9 numpy>=1.15.4 -oauth2client>=4.1.2 +oauth2client pandas>=0.22.0 psutil>=5.4.3 py-cpuinfo>=3.3.0 -- GitLab From ab2c8ba9496c29ab1e699de54a5770ce7ca13457 Mon Sep 17 00:00:00 2001 From: Hongkun Yu Date: Tue, 28 Jul 2020 14:26:58 -0700 Subject: [PATCH 062/128] Internal change PiperOrigin-RevId: 323653596 --- official/nlp/transformer/compute_bleu.py | 4 ++-- official/nlp/transformer/data_download.py | 2 +- orbit/standard_runner_test.py | 1 - 3 files changed, 3 insertions(+), 4 deletions(-) diff --git a/official/nlp/transformer/compute_bleu.py b/official/nlp/transformer/compute_bleu.py index f7dfd542b..b0bed0ce7 100644 --- a/official/nlp/transformer/compute_bleu.py +++ b/official/nlp/transformer/compute_bleu.py @@ -26,7 +26,7 @@ import re import sys import unicodedata -from absl import app as absl_app +from absl import app from absl import flags import six from six.moves import range @@ -149,4 +149,4 @@ if __name__ == "__main__": tf.logging.set_verbosity(tf.logging.INFO) define_compute_bleu_flags() FLAGS = flags.FLAGS - absl_app.run(main) + app.run(main) diff --git a/official/nlp/transformer/data_download.py b/official/nlp/transformer/data_download.py index e5f666856..ee68d67f7 100644 --- a/official/nlp/transformer/data_download.py +++ b/official/nlp/transformer/data_download.py @@ -436,4 +436,4 @@ if __name__ == "__main__": logging.set_verbosity(logging.INFO) define_data_download_flags() FLAGS = flags.FLAGS - absl_app.run(main) + tf.app.run(main) diff --git a/orbit/standard_runner_test.py b/orbit/standard_runner_test.py index 77da0b8b2..2721b680e 100644 --- a/orbit/standard_runner_test.py +++ b/orbit/standard_runner_test.py @@ -14,7 +14,6 @@ # limitations under the License. # ============================================================================== """Tests for orbit.standard_runner.""" -# pylint: disable=g-bad-import-order from orbit import standard_runner -- GitLab From 555722af8f1f74046349f53287f924f14523edb6 Mon Sep 17 00:00:00 2001 From: "A. Unique TensorFlower" Date: Tue, 28 Jul 2020 15:39:17 -0700 Subject: [PATCH 063/128] Internal change PiperOrigin-RevId: 323668407 --- orbit/utils.py | 37 +++++++++++++++++++++++++++++++------ orbit/utils_test.py | 34 ++++++++++++++++++++++++++++++++++ 2 files changed, 65 insertions(+), 6 deletions(-) create mode 100644 orbit/utils_test.py diff --git a/orbit/utils.py b/orbit/utils.py index 3b392bc27..c084ca247 100644 --- a/orbit/utils.py +++ b/orbit/utils.py @@ -96,6 +96,30 @@ def create_tf_while_loop_fn(step_fn): return loop_fn +def create_global_step() -> tf.Variable: + """Creates a `tf.Variable` suitable for use as a global step counter. + + Creating and managing a global step variable may be necessary for + `AbstractTrainer` subclasses that perform multiple parameter updates per + `Controller` "step", or use different optimizers on different steps. + + In these cases, an `optimizer.iterations` property generally can't be used + directly, since it would correspond to parameter updates instead of iterations + in the `Controller`'s training loop. Such use cases should simply call + `step.assign_add(1)` at the end of each step. + + Returns: + A non-trainable scalar `tf.Variable` of dtype `tf.int64`, with only the + first replica's value retained when synchronizing across replicas in + a distributed setting. + """ + return tf.Variable( + 0, + dtype=tf.int64, + trainable=False, + aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA) + + def make_distributed_dataset(strategy, dataset_or_fn, *args, **kwargs): """A helper function to create distributed dataset. @@ -186,13 +210,13 @@ class SummaryManager: def write_summaries(self, summary_dict): """Write summaries for the given values. - This recursively creates sub-directories for any nested dictionaries + This recursively creates subdirectories for any nested dictionaries provided in `summary_dict`, yielding a hierarchy of directories which will then be reflected in the TensorBoard UI as different colored curves. E.g. users may evaluate on muliple datasets and return `summary_dict` as a - nested - dictionary. + nested dictionary. + ``` { "dataset": { @@ -205,9 +229,10 @@ class SummaryManager: }, } ``` - It will create two sub directories "dataset" and "dataset2" inside summary - root directory. And each directory write both "loss" and "accuracy" - summaries inside. + + This will create two subdirectories "dataset" and "dataset2" inside the + summary root directory. Each directory will contain event files including + both "loss" and "accuracy" summaries. Args: summary_dict: A dictionary of values. If any value in `summary_dict` is diff --git a/orbit/utils_test.py b/orbit/utils_test.py new file mode 100644 index 000000000..0fc33aecc --- /dev/null +++ b/orbit/utils_test.py @@ -0,0 +1,34 @@ +# Lint as: python3 +# Copyright 2020 The Orbit Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Tests for orbit.utils.""" + +from orbit import utils + +import tensorflow as tf + + +class UtilsTest(tf.test.TestCase): + + def test_create_global_step(self): + step = utils.create_global_step() + self.assertEqual(step.dtype, tf.int64) + self.assertEqual(step, 0) + step.assign_add(1) + self.assertEqual(step, 1) + + +if __name__ == '__main__': + tf.test.main() -- GitLab From 5a1b5af396c2ef257e6134da754ae59d351516ba Mon Sep 17 00:00:00 2001 From: Hongkun Yu Date: Tue, 28 Jul 2020 17:14:21 -0700 Subject: [PATCH 064/128] Use absl app. PiperOrigin-RevId: 323685656 --- official/nlp/transformer/data_download.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/official/nlp/transformer/data_download.py b/official/nlp/transformer/data_download.py index ee68d67f7..389486606 100644 --- a/official/nlp/transformer/data_download.py +++ b/official/nlp/transformer/data_download.py @@ -23,7 +23,7 @@ import random import tarfile # pylint: disable=g-bad-import-order -from absl import app as absl_app +from absl import app from absl import flags from absl import logging import six @@ -436,4 +436,4 @@ if __name__ == "__main__": logging.set_verbosity(logging.INFO) define_data_download_flags() FLAGS = flags.FLAGS - tf.app.run(main) + app.run(main) -- GitLab From 250701c644424bde6e01c9c6f180ec3e813c86f9 Mon Sep 17 00:00:00 2001 From: Hongkun Yu Date: Tue, 28 Jul 2020 20:42:29 -0700 Subject: [PATCH 065/128] Remove TokenClassication network which is a single dense layer. PiperOrigin-RevId: 323711016 --- .../nlp/nlp_modeling_library_intro.ipynb | 2 +- official/nlp/modeling/models/README.md | 4 +- .../nlp/modeling/models/bert_classifier.py | 6 +- .../nlp/modeling/models/bert_pretrainer.py | 6 +- .../nlp/modeling/models/bert_span_labeler.py | 6 +- .../modeling/models/bert_token_classifier.py | 33 +-- official/nlp/modeling/networks/README.md | 3 - official/nlp/modeling/networks/__init__.py | 1 - .../modeling/networks/token_classification.py | 85 -------- .../networks/token_classification_test.py | 192 ------------------ 10 files changed, 23 insertions(+), 315 deletions(-) delete mode 100644 official/nlp/modeling/networks/token_classification.py delete mode 100644 official/nlp/modeling/networks/token_classification_test.py diff --git a/official/colab/nlp/nlp_modeling_library_intro.ipynb b/official/colab/nlp/nlp_modeling_library_intro.ipynb index 722d115a3..f5ffcef96 100644 --- a/official/colab/nlp/nlp_modeling_library_intro.ipynb +++ b/official/colab/nlp/nlp_modeling_library_intro.ipynb @@ -478,7 +478,7 @@ "source": [ "### Build a BertClassifier model wrapping TransformerEncoder\n", "\n", - "[BertClassifier](https://github.com/tensorflow/models/blob/master/official/nlp/modeling/models/bert_classifier.py) implements a simple token classification model containing a single classification head using the `TokenClassification` network." + "[BertClassifier](https://github.com/tensorflow/models/blob/master/official/nlp/modeling/models/bert_classifier.py) implements a [CLS] token classification model containing a single classification head." ] }, { diff --git a/official/nlp/modeling/models/README.md b/official/nlp/modeling/models/README.md index c2e572b6f..41f4e8004 100644 --- a/official/nlp/modeling/models/README.md +++ b/official/nlp/modeling/models/README.md @@ -10,8 +10,8 @@ model containing a single classification head using the Classification network. It can be used as a regression model as well. * [`BertTokenClassifier`](bert_token_classifier.py) implements a simple token -classification model containing a single classification head using the -TokenClassification network. +classification model containing a single classification head over the sequence +output embeddings. * [`BertSpanLabeler`](bert_span_labeler.py) implementats a simple single-span start-end predictor (that is, a model that predicts two values: a start token diff --git a/official/nlp/modeling/models/bert_classifier.py b/official/nlp/modeling/models/bert_classifier.py index 89d24f316..8584d8fb0 100644 --- a/official/nlp/modeling/models/bert_classifier.py +++ b/official/nlp/modeling/models/bert_classifier.py @@ -12,12 +12,8 @@ # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== -"""Trainer network for BERT-style models.""" +"""BERT cls-token classifier.""" # pylint: disable=g-classes-have-attributes -from __future__ import absolute_import -from __future__ import division -# from __future__ import google_type_annotations -from __future__ import print_function import tensorflow as tf diff --git a/official/nlp/modeling/models/bert_pretrainer.py b/official/nlp/modeling/models/bert_pretrainer.py index 73933a158..b1ae8dc54 100644 --- a/official/nlp/modeling/models/bert_pretrainer.py +++ b/official/nlp/modeling/models/bert_pretrainer.py @@ -12,12 +12,8 @@ # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== -"""Trainer network for BERT-style models.""" +"""BERT Pre-training model.""" # pylint: disable=g-classes-have-attributes -from __future__ import absolute_import -from __future__ import division -# from __future__ import google_type_annotations -from __future__ import print_function import copy from typing import List, Optional diff --git a/official/nlp/modeling/models/bert_span_labeler.py b/official/nlp/modeling/models/bert_span_labeler.py index 37cc58dd9..5985b3b86 100644 --- a/official/nlp/modeling/models/bert_span_labeler.py +++ b/official/nlp/modeling/models/bert_span_labeler.py @@ -12,12 +12,8 @@ # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== -"""Trainer network for BERT-style models.""" +"""BERT Question Answering model.""" # pylint: disable=g-classes-have-attributes -from __future__ import absolute_import -from __future__ import division -# from __future__ import google_type_annotations -from __future__ import print_function import tensorflow as tf diff --git a/official/nlp/modeling/models/bert_token_classifier.py b/official/nlp/modeling/models/bert_token_classifier.py index 208f59bb9..04d0d394f 100644 --- a/official/nlp/modeling/models/bert_token_classifier.py +++ b/official/nlp/modeling/models/bert_token_classifier.py @@ -12,17 +12,11 @@ # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== -"""Trainer network for BERT-style models.""" +"""BERT token classifier.""" # pylint: disable=g-classes-have-attributes -from __future__ import absolute_import -from __future__ import division -# from __future__ import google_type_annotations -from __future__ import print_function import tensorflow as tf -from official.nlp.modeling import networks - @tf.keras.utils.register_keras_serializable(package='Text') class BertTokenClassifier(tf.keras.Model): @@ -77,16 +71,23 @@ class BertTokenClassifier(tf.keras.Model): sequence_output = tf.keras.layers.Dropout( rate=dropout_rate)(sequence_output) - self.classifier = networks.TokenClassification( - input_width=sequence_output.shape[-1], - num_classes=num_classes, - initializer=initializer, - output=output, - name='classification') - predictions = self.classifier(sequence_output) - + self.classifier = tf.keras.layers.Dense( + num_classes, + activation=None, + kernel_initializer=initializer, + name='predictions/transform/logits') + self.logits = self.classifier(sequence_output) + if output == 'logits': + output_tensors = self.logits + elif output == 'predictions': + output_tensors = tf.keras.layers.Activation(tf.nn.log_softmax)( + self.logits) + else: + raise ValueError( + ('Unknown `output` value "%s". `output` can be either "logits" or ' + '"predictions"') % output) super(BertTokenClassifier, self).__init__( - inputs=inputs, outputs=predictions, **kwargs) + inputs=inputs, outputs=output_tensors, **kwargs) @property def checkpoint_items(self): diff --git a/official/nlp/modeling/networks/README.md b/official/nlp/modeling/networks/README.md index 42347373e..24a890260 100644 --- a/official/nlp/modeling/networks/README.md +++ b/official/nlp/modeling/networks/README.md @@ -20,8 +20,5 @@ into two smaller matrices and shares parameters across layers. intended for use as a classification or regression (if number of classes is set to 1) head. -* [`TokenClassification`](token_classification.py) contains a single hidden -layer, and is intended for use as a token classification head. - * [`SpanLabeling`](span_labeling.py) implements a single-span labeler (that is, a prediction head that can predict one start and end index per batch item) based on a single dense hidden layer. It can be used in the SQuAD task. diff --git a/official/nlp/modeling/networks/__init__.py b/official/nlp/modeling/networks/__init__.py index b8443e9f9..24d46eda2 100644 --- a/official/nlp/modeling/networks/__init__.py +++ b/official/nlp/modeling/networks/__init__.py @@ -17,5 +17,4 @@ from official.nlp.modeling.networks.albert_transformer_encoder import AlbertTran from official.nlp.modeling.networks.classification import Classification from official.nlp.modeling.networks.encoder_scaffold import EncoderScaffold from official.nlp.modeling.networks.span_labeling import SpanLabeling -from official.nlp.modeling.networks.token_classification import TokenClassification from official.nlp.modeling.networks.transformer_encoder import TransformerEncoder diff --git a/official/nlp/modeling/networks/token_classification.py b/official/nlp/modeling/networks/token_classification.py deleted file mode 100644 index 8206c9688..000000000 --- a/official/nlp/modeling/networks/token_classification.py +++ /dev/null @@ -1,85 +0,0 @@ -# Copyright 2019 The TensorFlow Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""Classification network.""" -# pylint: disable=g-classes-have-attributes -from __future__ import absolute_import -from __future__ import division -# from __future__ import google_type_annotations -from __future__ import print_function - -import tensorflow as tf - - -@tf.keras.utils.register_keras_serializable(package='Text') -class TokenClassification(tf.keras.Model): - """TokenClassification network head for BERT modeling. - - This network implements a simple token classifier head based on a dense layer. - *Note* that the network is constructed by - [Keras Functional API](https://keras.io/guides/functional_api/). - - Arguments: - input_width: The innermost dimension of the input tensor to this network. - num_classes: The number of classes that this network should classify to. - activation: The activation, if any, for the dense layer in this network. - initializer: The initializer for the dense layer in this network. Defaults - to a Glorot uniform initializer. - output: The output style for this network. Can be either 'logits' or - 'predictions'. - """ - - def __init__(self, - input_width, - num_classes, - initializer='glorot_uniform', - output='logits', - **kwargs): - self._self_setattr_tracking = False - self._config_dict = { - 'input_width': input_width, - 'num_classes': num_classes, - 'initializer': initializer, - 'output': output, - } - - sequence_data = tf.keras.layers.Input( - shape=(None, input_width), name='sequence_data', dtype=tf.float32) - - self.logits = tf.keras.layers.Dense( - num_classes, - activation=None, - kernel_initializer=initializer, - name='predictions/transform/logits')( - sequence_data) - predictions = tf.keras.layers.Activation(tf.nn.log_softmax)(self.logits) - - if output == 'logits': - output_tensors = self.logits - elif output == 'predictions': - output_tensors = predictions - else: - raise ValueError( - ('Unknown `output` value "%s". `output` can be either "logits" or ' - '"predictions"') % output) - - super(TokenClassification, self).__init__( - inputs=[sequence_data], outputs=output_tensors, **kwargs) - - def get_config(self): - return self._config_dict - - @classmethod - def from_config(cls, config, custom_objects=None): - return cls(**config) diff --git a/official/nlp/modeling/networks/token_classification_test.py b/official/nlp/modeling/networks/token_classification_test.py deleted file mode 100644 index eb695c784..000000000 --- a/official/nlp/modeling/networks/token_classification_test.py +++ /dev/null @@ -1,192 +0,0 @@ -# Copyright 2019 The TensorFlow Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""Tests for token classification network.""" - -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import numpy as np -import tensorflow as tf - -from tensorflow.python.keras import keras_parameterized # pylint: disable=g-direct-tensorflow-import -from official.nlp.modeling.networks import token_classification - - -# This decorator runs the test in V1, V2-Eager, and V2-Functional mode. It -# guarantees forward compatibility of this code for the V2 switchover. -@keras_parameterized.run_all_keras_modes -class TokenClassificationTest(keras_parameterized.TestCase): - - def test_network_creation(self): - """Validate that the Keras object can be created.""" - sequence_length = 5 - input_width = 512 - num_classes = 10 - test_object = token_classification.TokenClassification( - input_width=input_width, num_classes=num_classes) - # Create a 3-dimensional input (the first dimension is implicit). - sequence_data = tf.keras.Input(shape=(sequence_length, input_width), - dtype=tf.float32) - output = test_object(sequence_data) - - # Validate that the outputs are of the expected shape. - expected_output_shape = [None, sequence_length, num_classes] - self.assertEqual(expected_output_shape, output.shape.as_list()) - - def test_network_invocation(self): - """Validate that the Keras object can be invoked.""" - sequence_length = 5 - input_width = 512 - num_classes = 10 - test_object = token_classification.TokenClassification( - input_width=input_width, num_classes=num_classes, output='predictions') - # Create a 3-dimensional input (the first dimension is implicit). - sequence_data = tf.keras.Input(shape=(sequence_length, input_width), - dtype=tf.float32) - output = test_object(sequence_data) - - # Invoke the network as part of a Model. - model = tf.keras.Model(sequence_data, output) - input_data = 10 * np.random.random_sample((3, sequence_length, input_width)) - _ = model.predict(input_data) - - def test_network_invocation_with_internal_logits(self): - """Validate that the logit outputs are correct.""" - sequence_length = 5 - input_width = 512 - num_classes = 10 - test_object = token_classification.TokenClassification( - input_width=input_width, num_classes=num_classes, output='predictions') - - # Create a 3-dimensional input (the first dimension is implicit). - sequence_data = tf.keras.Input(shape=(sequence_length, input_width), - dtype=tf.float32) - output = test_object(sequence_data) - model = tf.keras.Model(sequence_data, output) - logits_model = tf.keras.Model(test_object.inputs, test_object.logits) - - batch_size = 3 - input_data = 10 * np.random.random_sample( - (batch_size, sequence_length, input_width)) - outputs = model.predict(input_data) - logits = logits_model.predict(input_data) - - # Ensure that the tensor shapes are correct. - expected_output_shape = (batch_size, sequence_length, num_classes) - self.assertEqual(expected_output_shape, outputs.shape) - self.assertEqual(expected_output_shape, logits.shape) - - # Ensure that the logits, when softmaxed, create the outputs. - input_tensor = tf.keras.Input(expected_output_shape[1:]) - output_tensor = tf.keras.layers.Activation(tf.nn.log_softmax)(input_tensor) - softmax_model = tf.keras.Model(input_tensor, output_tensor) - - calculated_softmax = softmax_model.predict(logits) - self.assertAllClose(outputs, calculated_softmax) - - def test_network_invocation_with_internal_and_external_logits(self): - """Validate that the logit outputs are correct.""" - sequence_length = 5 - input_width = 512 - num_classes = 10 - test_object = token_classification.TokenClassification( - input_width=input_width, num_classes=num_classes, output='logits') - - # Create a 3-dimensional input (the first dimension is implicit). - sequence_data = tf.keras.Input(shape=(sequence_length, input_width), - dtype=tf.float32) - output = test_object(sequence_data) - model = tf.keras.Model(sequence_data, output) - logits_model = tf.keras.Model(test_object.inputs, test_object.logits) - - batch_size = 3 - input_data = 10 * np.random.random_sample( - (batch_size, sequence_length, input_width)) - outputs = model.predict(input_data) - logits = logits_model.predict(input_data) - - # Ensure that the tensor shapes are correct. - expected_output_shape = (batch_size, sequence_length, num_classes) - self.assertEqual(expected_output_shape, outputs.shape) - self.assertEqual(expected_output_shape, logits.shape) - - self.assertAllClose(outputs, logits) - - def test_network_invocation_with_logit_output(self): - """Validate that the logit outputs are correct.""" - sequence_length = 5 - input_width = 512 - num_classes = 10 - test_object = token_classification.TokenClassification( - input_width=input_width, num_classes=num_classes, output='predictions') - logit_object = token_classification.TokenClassification( - input_width=input_width, num_classes=num_classes, output='logits') - logit_object.set_weights(test_object.get_weights()) - - # Create a 3-dimensional input (the first dimension is implicit). - sequence_data = tf.keras.Input(shape=(sequence_length, input_width), - dtype=tf.float32) - output = test_object(sequence_data) - logit_output = logit_object(sequence_data) - - model = tf.keras.Model(sequence_data, output) - logits_model = tf.keras.Model(sequence_data, logit_output) - - batch_size = 3 - input_data = 10 * np.random.random_sample( - (batch_size, sequence_length, input_width)) - outputs = model.predict(input_data) - logits = logits_model.predict(input_data) - - # Ensure that the tensor shapes are correct. - expected_output_shape = (batch_size, sequence_length, num_classes) - self.assertEqual(expected_output_shape, outputs.shape) - self.assertEqual(expected_output_shape, logits.shape) - - # Ensure that the logits, when softmaxed, create the outputs. - input_tensor = tf.keras.Input(expected_output_shape[1:]) - output_tensor = tf.keras.layers.Activation(tf.nn.log_softmax)(input_tensor) - softmax_model = tf.keras.Model(input_tensor, output_tensor) - - calculated_softmax = softmax_model.predict(logits) - self.assertAllClose(outputs, calculated_softmax) - - def test_serialize_deserialize(self): - # Create a network object that sets all of its config options. - network = token_classification.TokenClassification( - input_width=128, - num_classes=10, - initializer='zeros', - output='predictions') - - # Create another network object from the first object's config. - new_network = token_classification.TokenClassification.from_config( - network.get_config()) - - # Validate that the config can be forced to JSON. - _ = new_network.to_json() - - # If the serialization was successful, the new config should match the old. - self.assertAllEqual(network.get_config(), new_network.get_config()) - - def test_unknown_output_type_fails(self): - with self.assertRaisesRegex(ValueError, 'Unknown `output` value "bad".*'): - _ = token_classification.TokenClassification( - input_width=128, num_classes=10, output='bad') - - -if __name__ == '__main__': - tf.test.main() -- GitLab From a565d720bcde6e5c77d0993a4efc30f3e7891350 Mon Sep 17 00:00:00 2001 From: "A. Unique TensorFlower" Date: Wed, 29 Jul 2020 00:24:42 -0700 Subject: [PATCH 066/128] Internal change PiperOrigin-RevId: 323732686 --- official/core/base_task.py | 15 ++++++++++++++- .../modeling/hyperparams/config_definitions.py | 2 ++ official/nlp/tasks/masked_lm.py | 16 ---------------- official/nlp/tasks/question_answering.py | 14 -------------- official/nlp/tasks/tagging.py | 15 --------------- 5 files changed, 16 insertions(+), 46 deletions(-) diff --git a/official/core/base_task.py b/official/core/base_task.py index 4477b1ca4..dc2633f97 100644 --- a/official/core/base_task.py +++ b/official/core/base_task.py @@ -18,6 +18,7 @@ import abc import functools from typing import Any, Callable, Optional +from absl import logging import six import tensorflow as tf @@ -67,7 +68,19 @@ class Task(tf.Module): Args: model: The keras.Model built or used by this task. """ - pass + ckpt_dir_or_file = self.task_config.init_checkpoint + logging.info("Trying to load pretrained checkpoint from %s", + ckpt_dir_or_file) + if tf.io.gfile.isdir(ckpt_dir_or_file): + ckpt_dir_or_file = tf.train.latest_checkpoint(ckpt_dir_or_file) + if not ckpt_dir_or_file: + return + + ckpt = tf.train.Checkpoint(**model.checkpoint_items) + status = ckpt.restore(ckpt_dir_or_file) + status.expect_partial().assert_existing_objects_matched() + logging.info("Finished loading pretrained checkpoint from %s", + ckpt_dir_or_file) @abc.abstractmethod def build_model(self) -> tf.keras.Model: diff --git a/official/modeling/hyperparams/config_definitions.py b/official/modeling/hyperparams/config_definitions.py index 5ba24f5b0..e9f7f60f4 100644 --- a/official/modeling/hyperparams/config_definitions.py +++ b/official/modeling/hyperparams/config_definitions.py @@ -179,6 +179,7 @@ class TrainerConfig(base_config.Config): max_to_keep: max checkpoints to keep. continuous_eval_timeout: maximum number of seconds to wait between checkpoints, if set to None, continuous eval will wait indefinitely. + This is only used continuous_train_and_eval and continuous_eval modes. train_steps: number of train steps. validation_steps: number of eval steps. If `None`, the entire eval dataset is used. @@ -205,6 +206,7 @@ class TrainerConfig(base_config.Config): @dataclasses.dataclass class TaskConfig(base_config.Config): + init_checkpoint: str = "" model: base_config.Config = None train_data: DataConfig = DataConfig() validation_data: DataConfig = DataConfig() diff --git a/official/nlp/tasks/masked_lm.py b/official/nlp/tasks/masked_lm.py index 98b5b9ba7..ebf01278f 100644 --- a/official/nlp/tasks/masked_lm.py +++ b/official/nlp/tasks/masked_lm.py @@ -14,7 +14,6 @@ # limitations under the License. # ============================================================================== """Masked language task.""" -from absl import logging import dataclasses import tensorflow as tf @@ -27,7 +26,6 @@ from official.nlp.data import data_loader_factory @dataclasses.dataclass class MaskedLMConfig(cfg.TaskConfig): """The model config.""" - init_checkpoint: str = '' model: bert.BertPretrainerConfig = bert.BertPretrainerConfig(cls_heads=[ bert.ClsHeadConfig( inner_dim=768, num_classes=2, dropout_rate=0.1, name='next_sentence') @@ -174,17 +172,3 @@ class MaskedLMTask(base_task.Task): aux_losses=model.losses) self.process_metrics(metrics, inputs, outputs) return {self.loss: loss} - - def initialize(self, model: tf.keras.Model): - ckpt_dir_or_file = self.task_config.init_checkpoint - if tf.io.gfile.isdir(ckpt_dir_or_file): - ckpt_dir_or_file = tf.train.latest_checkpoint(ckpt_dir_or_file) - if not ckpt_dir_or_file: - return - # Restoring all modules defined by the model, e.g. encoder, masked_lm and - # cls pooler. The best initialization may vary case by case. - ckpt = tf.train.Checkpoint(**model.checkpoint_items) - status = ckpt.read(ckpt_dir_or_file) - status.expect_partial().assert_existing_objects_matched() - logging.info('Finished loading pretrained checkpoint from %s', - ckpt_dir_or_file) diff --git a/official/nlp/tasks/question_answering.py b/official/nlp/tasks/question_answering.py index 9f2a1007a..f862fb837 100644 --- a/official/nlp/tasks/question_answering.py +++ b/official/nlp/tasks/question_answering.py @@ -290,17 +290,3 @@ class QuestionAnsweringTask(base_task.Task): eval_metrics = {'exact_match': eval_metrics['exact_match'], 'final_f1': eval_metrics['final_f1']} return eval_metrics - - def initialize(self, model): - """Load a pretrained checkpoint (if exists) and then train from iter 0.""" - ckpt_dir_or_file = self.task_config.init_checkpoint - if tf.io.gfile.isdir(ckpt_dir_or_file): - ckpt_dir_or_file = tf.train.latest_checkpoint(ckpt_dir_or_file) - if not ckpt_dir_or_file: - return - - ckpt = tf.train.Checkpoint(**model.checkpoint_items) - status = ckpt.read(ckpt_dir_or_file) - status.expect_partial().assert_existing_objects_matched() - logging.info('Finished loading pretrained checkpoint from %s', - ckpt_dir_or_file) diff --git a/official/nlp/tasks/tagging.py b/official/nlp/tasks/tagging.py index 4af0d8ee2..70a983b94 100644 --- a/official/nlp/tasks/tagging.py +++ b/official/nlp/tasks/tagging.py @@ -14,7 +14,6 @@ # limitations under the License. # ============================================================================== """Tagging (e.g., NER/POS) task.""" -import logging from typing import List, Optional, Tuple import dataclasses @@ -215,20 +214,6 @@ class TaggingTask(base_task.Task): seqeval_metrics.accuracy_score(label_class, predict_class), } - def initialize(self, model): - """Load a pretrained checkpoint (if exists) and then train from iter 0.""" - ckpt_dir_or_file = self.task_config.init_checkpoint - if tf.io.gfile.isdir(ckpt_dir_or_file): - ckpt_dir_or_file = tf.train.latest_checkpoint(ckpt_dir_or_file) - if not ckpt_dir_or_file: - return - - ckpt = tf.train.Checkpoint(**model.checkpoint_items) - status = ckpt.restore(ckpt_dir_or_file) - status.expect_partial().assert_existing_objects_matched() - logging.info('Finished loading pretrained checkpoint from %s', - ckpt_dir_or_file) - def predict(task: TaggingTask, params: cfg.DataConfig, model: tf.keras.Model) -> Tuple[List[List[int]], List[int]]: -- GitLab From 855d29dba61a53553e8fc89237773368fd7e9cdf Mon Sep 17 00:00:00 2001 From: Sara Beery Date: Wed, 29 Jul 2020 09:38:24 -0700 Subject: [PATCH 067/128] Converting context r-cnn dataset tools to TF2 PiperOrigin-RevId: 323805090 --- .../context_rcnn/add_context_to_examples.py | 2 +- ...py => add_context_to_examples_tf2_test.py} | 26 +++--- .../create_cococameratraps_tfexample_main.py | 16 +--- ...ate_cococameratraps_tfexample_tf2_test.py} | 14 ++- .../context_rcnn/generate_detection_data.py | 28 ++---- ...py => generate_detection_data_tf2_test.py} | 92 ++++++++----------- .../context_rcnn/generate_embedding_data.py | 37 ++------ ...py => generate_embedding_data_tf2_test.py} | 90 ++++++++---------- .../object_detection/packages/tf1/setup.py | 2 +- .../object_detection/packages/tf2/setup.py | 4 +- 10 files changed, 120 insertions(+), 191 deletions(-) rename research/object_detection/dataset_tools/context_rcnn/{add_context_to_examples_tf1_test.py => add_context_to_examples_tf2_test.py} (95%) rename research/object_detection/dataset_tools/context_rcnn/{create_cococameratraps_tfexample_tf1_test.py => create_cococameratraps_tfexample_tf2_test.py} (94%) rename research/object_detection/dataset_tools/context_rcnn/{generate_detection_data_tf1_test.py => generate_detection_data_tf2_test.py} (75%) rename research/object_detection/dataset_tools/context_rcnn/{generate_embedding_data_tf1_test.py => generate_embedding_data_tf2_test.py} (81%) diff --git a/research/object_detection/dataset_tools/context_rcnn/add_context_to_examples.py b/research/object_detection/dataset_tools/context_rcnn/add_context_to_examples.py index 89f89467c..334feb765 100644 --- a/research/object_detection/dataset_tools/context_rcnn/add_context_to_examples.py +++ b/research/object_detection/dataset_tools/context_rcnn/add_context_to_examples.py @@ -53,7 +53,7 @@ import os import numpy as np import PIL.Image import six -import tensorflow.compat.v1 as tf +import tensorflow as tf try: import apache_beam as beam # pylint:disable=g-import-not-at-top diff --git a/research/object_detection/dataset_tools/context_rcnn/add_context_to_examples_tf1_test.py b/research/object_detection/dataset_tools/context_rcnn/add_context_to_examples_tf2_test.py similarity index 95% rename from research/object_detection/dataset_tools/context_rcnn/add_context_to_examples_tf1_test.py rename to research/object_detection/dataset_tools/context_rcnn/add_context_to_examples_tf2_test.py index 42f970c22..ae4e02bdc 100644 --- a/research/object_detection/dataset_tools/context_rcnn/add_context_to_examples_tf1_test.py +++ b/research/object_detection/dataset_tools/context_rcnn/add_context_to_examples_tf2_test.py @@ -25,11 +25,12 @@ import unittest import numpy as np import six -import tensorflow.compat.v1 as tf +import tensorflow as tf -from object_detection.dataset_tools.context_rcnn import add_context_to_examples from object_detection.utils import tf_version +if tf_version.is_tf2(): + from object_detection.dataset_tools.context_rcnn import add_context_to_examples # pylint:disable=g-import-not-at-top try: import apache_beam as beam # pylint:disable=g-import-not-at-top @@ -42,7 +43,7 @@ def InMemoryTFRecord(entries): temp = tempfile.NamedTemporaryFile(delete=False) filename = temp.name try: - with tf.python_io.TFRecordWriter(filename) as writer: + with tf.io.TFRecordWriter(filename) as writer: for value in entries: writer.write(value) yield filename @@ -70,13 +71,12 @@ def FloatListFeature(value): return tf.train.Feature(float_list=tf.train.FloatList(value=value)) -@unittest.skipIf(tf_version.is_tf2(), 'Skipping TF1.X only test.') +@unittest.skipIf(tf_version.is_tf1(), 'Skipping TF2.X only test.') class GenerateContextDataTest(tf.test.TestCase): def _create_first_tf_example(self): - with self.test_session(): - encoded_image = tf.image.encode_jpeg( - tf.constant(np.ones((4, 4, 3)).astype(np.uint8))).eval() + encoded_image = tf.io.encode_jpeg( + tf.constant(np.ones((4, 4, 3)).astype(np.uint8))).numpy() example = tf.train.Example(features=tf.train.Features(feature={ 'image/encoded': BytesFeature(encoded_image), @@ -105,9 +105,8 @@ class GenerateContextDataTest(tf.test.TestCase): return example.SerializeToString() def _create_second_tf_example(self): - with self.test_session(): - encoded_image = tf.image.encode_jpeg( - tf.constant(np.ones((4, 4, 3)).astype(np.uint8))).eval() + encoded_image = tf.io.encode_jpeg( + tf.constant(np.ones((4, 4, 3)).astype(np.uint8))).numpy() example = tf.train.Example(features=tf.train.Features(feature={ 'image/encoded': BytesFeature(encoded_image), @@ -353,7 +352,8 @@ class GenerateContextDataTest(tf.test.TestCase): p.run() filenames = tf.io.gfile.glob(output_tfrecord + '-?????-of-?????') actual_output = [] - record_iterator = tf.python_io.tf_record_iterator(path=filenames[0]) + record_iterator = tf.data.TFRecordDataset( + tf.convert_to_tensor(filenames)).as_numpy_iterator() for record in record_iterator: actual_output.append(record) self.assertEqual(len(actual_output), 2) @@ -383,8 +383,8 @@ class GenerateContextDataTest(tf.test.TestCase): p.run() filenames = tf.io.gfile.glob(output_tfrecord + '-?????-of-?????') actual_output = [] - record_iterator = tf.python_io.tf_record_iterator( - path=filenames[0]) + record_iterator = tf.data.TFRecordDataset( + tf.convert_to_tensor(filenames)).as_numpy_iterator() for record in record_iterator: actual_output.append(record) self.assertEqual(len(actual_output), 1) diff --git a/research/object_detection/dataset_tools/context_rcnn/create_cococameratraps_tfexample_main.py b/research/object_detection/dataset_tools/context_rcnn/create_cococameratraps_tfexample_main.py index bafc406be..dbf3cad0e 100644 --- a/research/object_detection/dataset_tools/context_rcnn/create_cococameratraps_tfexample_main.py +++ b/research/object_detection/dataset_tools/context_rcnn/create_cococameratraps_tfexample_main.py @@ -37,11 +37,10 @@ import argparse import hashlib import io import json -import logging import os import numpy as np import PIL.Image -import tensorflow.compat.v1 as tf +import tensorflow as tf from object_detection.utils import dataset_util try: @@ -110,16 +109,9 @@ class ParseImage(beam.DoFn): encoded_jpg = fid.read() encoded_jpg_io = io.BytesIO(encoded_jpg) image = PIL.Image.open(encoded_jpg_io) - # Ensure the image can be read by tf - with tf.Graph().as_default(): - image = tf.image.decode_jpeg(encoded_jpg, channels=3) - init_op = tf.initialize_all_tables() - with tf.Session() as sess: - sess.run(init_op) - sess.run(image) - except Exception as e: # pylint: disable=broad-except + image = tf.io.decode_jpeg(encoded_jpg, channels=3) + except Exception: # pylint: disable=broad-except # The image file is missing or corrupt - tf.logging.error(str(e)) return [] key = hashlib.sha256(encoded_jpg).hexdigest() @@ -257,8 +249,6 @@ def create_pipeline(pipeline, keep_bboxes: Whether to keep any bounding boxes that exist in the json file """ - logging.info('Reading data from COCO-CameraTraps Dataset.') - data = load_json_data(input_annotations_file) num_shards = int(np.ceil(float(len(data['images']))/num_images_per_shard)) diff --git a/research/object_detection/dataset_tools/context_rcnn/create_cococameratraps_tfexample_tf1_test.py b/research/object_detection/dataset_tools/context_rcnn/create_cococameratraps_tfexample_tf2_test.py similarity index 94% rename from research/object_detection/dataset_tools/context_rcnn/create_cococameratraps_tfexample_tf1_test.py rename to research/object_detection/dataset_tools/context_rcnn/create_cococameratraps_tfexample_tf2_test.py index 19018a3a1..0a1ac203f 100644 --- a/research/object_detection/dataset_tools/context_rcnn/create_cococameratraps_tfexample_tf1_test.py +++ b/research/object_detection/dataset_tools/context_rcnn/create_cococameratraps_tfexample_tf2_test.py @@ -25,17 +25,19 @@ import unittest import numpy as np from PIL import Image -import tensorflow.compat.v1 as tf -from object_detection.dataset_tools.context_rcnn import create_cococameratraps_tfexample_main +import tensorflow as tf from object_detection.utils import tf_version +if tf_version.is_tf2(): + from object_detection.dataset_tools.context_rcnn import create_cococameratraps_tfexample_main # pylint:disable=g-import-not-at-top + try: import apache_beam as beam # pylint:disable=g-import-not-at-top except ModuleNotFoundError: pass -@unittest.skipIf(tf_version.is_tf2(), 'Skipping TF1.X only test.') +@unittest.skipIf(tf_version.is_tf1(), 'Skipping TF2.X only test.') class CreateCOCOCameraTrapsTfexampleTest(tf.test.TestCase): IMAGE_HEIGHT = 360 @@ -175,7 +177,8 @@ class CreateCOCOCameraTrapsTfexampleTest(tf.test.TestCase): p.run() filenames = tf.io.gfile.glob(output_tfrecord + '-?????-of-?????') actual_output = [] - record_iterator = tf.python_io.tf_record_iterator(path=filenames[0]) + record_iterator = tf.data.TFRecordDataset( + tf.convert_to_tensor(filenames)).as_numpy_iterator() for record in record_iterator: actual_output.append(record) self.assertEqual(len(actual_output), num_frames) @@ -198,7 +201,8 @@ class CreateCOCOCameraTrapsTfexampleTest(tf.test.TestCase): p.run() filenames = tf.io.gfile.glob(output_tfrecord+'-?????-of-?????') actual_output = [] - record_iterator = tf.python_io.tf_record_iterator(path=filenames[0]) + record_iterator = tf.data.TFRecordDataset( + tf.convert_to_tensor(filenames)).as_numpy_iterator() for record in record_iterator: actual_output.append(record) self.assertEqual(len(actual_output), num_frames) diff --git a/research/object_detection/dataset_tools/context_rcnn/generate_detection_data.py b/research/object_detection/dataset_tools/context_rcnn/generate_detection_data.py index aafac9edf..eb04cc8cd 100644 --- a/research/object_detection/dataset_tools/context_rcnn/generate_detection_data.py +++ b/research/object_detection/dataset_tools/context_rcnn/generate_detection_data.py @@ -48,7 +48,8 @@ from __future__ import print_function import argparse import os import threading -import tensorflow.compat.v1 as tf +import tensorflow as tf + try: import apache_beam as beam # pylint:disable=g-import-not-at-top except ModuleNotFoundError: @@ -85,22 +86,7 @@ class GenerateDetectionDataFn(beam.DoFn): # one instance across all threads in the worker. This is possible since # tf.Session.run() is thread safe. with self.session_lock: - if self._session is None: - graph = tf.Graph() - self._session = tf.Session(graph=graph) - with graph.as_default(): - meta_graph = tf.saved_model.loader.load( - self._session, [tf.saved_model.tag_constants.SERVING], - self._model_dir) - signature = meta_graph.signature_def['serving_default'] - input_tensor_name = signature.inputs['inputs'].name - self._input = graph.get_tensor_by_name(input_tensor_name) - self._boxes_node = graph.get_tensor_by_name( - signature.outputs['detection_boxes'].name) - self._scores_node = graph.get_tensor_by_name( - signature.outputs['detection_scores'].name) - self._num_detections_node = graph.get_tensor_by_name( - signature.outputs['num_detections'].name) + self._detect_fn = tf.saved_model.load(self._model_dir) def process(self, tfrecord_entry): return self._run_inference_and_generate_detections(tfrecord_entry) @@ -112,9 +98,11 @@ class GenerateDetectionDataFn(beam.DoFn): # There are already ground truth boxes for this image, just keep them. return [input_example] - detection_boxes, detection_scores, num_detections = self._session.run( - [self._boxes_node, self._scores_node, self._num_detections_node], - feed_dict={self._input: [tfrecord_entry]}) + detections = self._detect_fn.signatures['serving_default']( + (tf.expand_dims(tf.convert_to_tensor(tfrecord_entry), 0))) + detection_boxes = detections['detection_boxes'] + num_detections = detections['num_detections'] + detection_scores = detections['detection_scores'] example = tf.train.Example() diff --git a/research/object_detection/dataset_tools/context_rcnn/generate_detection_data_tf1_test.py b/research/object_detection/dataset_tools/context_rcnn/generate_detection_data_tf2_test.py similarity index 75% rename from research/object_detection/dataset_tools/context_rcnn/generate_detection_data_tf1_test.py rename to research/object_detection/dataset_tools/context_rcnn/generate_detection_data_tf2_test.py index 545e83233..db5a716dd 100644 --- a/research/object_detection/dataset_tools/context_rcnn/generate_detection_data_tf1_test.py +++ b/research/object_detection/dataset_tools/context_rcnn/generate_detection_data_tf2_test.py @@ -24,15 +24,17 @@ import tempfile import unittest import numpy as np import six -import tensorflow.compat.v1 as tf +import tensorflow as tf -from object_detection import exporter +from object_detection import exporter_lib_v2 from object_detection.builders import model_builder from object_detection.core import model -from object_detection.dataset_tools.context_rcnn import generate_detection_data from object_detection.protos import pipeline_pb2 from object_detection.utils import tf_version +if tf_version.is_tf2(): + from object_detection.dataset_tools.context_rcnn import generate_detection_data # pylint:disable=g-import-not-at-top + if six.PY2: import mock # pylint: disable=g-import-not-at-top else: @@ -45,17 +47,23 @@ except ModuleNotFoundError: class FakeModel(model.DetectionModel): - """A Fake Detection model with expected output nodes from post-processing.""" + + def __init__(self, conv_weight_scalar=1.0): + super(FakeModel, self).__init__(num_classes=5) + self._conv = tf.keras.layers.Conv2D( + filters=1, kernel_size=1, strides=(1, 1), padding='valid', + kernel_initializer=tf.keras.initializers.Constant( + value=conv_weight_scalar)) def preprocess(self, inputs): true_image_shapes = [] # Doesn't matter for the fake model. return tf.identity(inputs), true_image_shapes def predict(self, preprocessed_inputs, true_image_shapes): - return {'image': tf.layers.conv2d(preprocessed_inputs, 3, 1)} + return {'image': self._conv(preprocessed_inputs)} def postprocess(self, prediction_dict, true_image_shapes): - with tf.control_dependencies(prediction_dict.values()): + with tf.control_dependencies(list(prediction_dict.values())): postprocessed_tensors = { 'detection_boxes': tf.constant([[[0.0, 0.1, 0.5, 0.6], [0.5, 0.5, 0.8, 0.8]]], tf.float32), @@ -89,7 +97,7 @@ def InMemoryTFRecord(entries): temp = tempfile.NamedTemporaryFile(delete=False) filename = temp.name try: - with tf.python_io.TFRecordWriter(filename) as writer: + with tf.io.TFRecordWriter(filename) as writer: for value in entries: writer.write(value) yield filename @@ -97,7 +105,7 @@ def InMemoryTFRecord(entries): os.unlink(filename) -@unittest.skipIf(tf_version.is_tf2(), 'Skipping TF1.X only test.') +@unittest.skipIf(tf_version.is_tf1(), 'Skipping TF2.X only test.') class GenerateDetectionDataTest(tf.test.TestCase): def _save_checkpoint_from_mock_model(self, checkpoint_path): @@ -106,64 +114,39 @@ class GenerateDetectionDataTest(tf.test.TestCase): Args: checkpoint_path: Path to save checkpoint from Fake model. """ - g = tf.Graph() - with g.as_default(): - mock_model = FakeModel(num_classes=5) - preprocessed_inputs, true_image_shapes = mock_model.preprocess( - tf.placeholder(tf.float32, shape=[None, None, None, 3])) - predictions = mock_model.predict(preprocessed_inputs, true_image_shapes) - mock_model.postprocess(predictions, true_image_shapes) - tf.train.get_or_create_global_step() - saver = tf.train.Saver() - init = tf.global_variables_initializer() - with self.test_session(graph=g) as sess: - sess.run(init) - saver.save(sess, checkpoint_path) + mock_model = FakeModel() + fake_image = tf.zeros(shape=[1, 10, 10, 3], dtype=tf.float32) + preprocessed_inputs, true_image_shapes = mock_model.preprocess(fake_image) + predictions = mock_model.predict(preprocessed_inputs, true_image_shapes) + mock_model.postprocess(predictions, true_image_shapes) + ckpt = tf.train.Checkpoint(model=mock_model) + exported_checkpoint_manager = tf.train.CheckpointManager( + ckpt, checkpoint_path, max_to_keep=1) + exported_checkpoint_manager.save(checkpoint_number=0) def _export_saved_model(self): tmp_dir = self.get_temp_dir() - checkpoint_path = os.path.join(tmp_dir, 'model.ckpt') - self._save_checkpoint_from_mock_model(checkpoint_path) + self._save_checkpoint_from_mock_model(tmp_dir) output_directory = os.path.join(tmp_dir, 'output') saved_model_path = os.path.join(output_directory, 'saved_model') tf.io.gfile.makedirs(output_directory) with mock.patch.object( model_builder, 'build', autospec=True) as mock_builder: - mock_builder.return_value = FakeModel(num_classes=5) + mock_builder.return_value = FakeModel() + output_directory = os.path.join(tmp_dir, 'output') pipeline_config = pipeline_pb2.TrainEvalPipelineConfig() - pipeline_config.eval_config.use_moving_averages = False - detection_model = model_builder.build(pipeline_config.model, - is_training=False) - outputs, placeholder_tensor = exporter.build_detection_graph( + exporter_lib_v2.export_inference_graph( input_type='tf_example', - detection_model=detection_model, - input_shape=None, - output_collection_name='inference_op', - graph_hook_fn=None) - output_node_names = ','.join(outputs.keys()) - saver = tf.train.Saver() - input_saver_def = saver.as_saver_def() - frozen_graph_def = exporter.freeze_graph_with_def_protos( - input_graph_def=tf.get_default_graph().as_graph_def(), - input_saver_def=input_saver_def, - input_checkpoint=checkpoint_path, - output_node_names=output_node_names, - restore_op_name='save/restore_all', - filename_tensor_name='save/Const:0', - output_graph='', - clear_devices=True, - initializer_nodes='') - exporter.write_saved_model( - saved_model_path=saved_model_path, - frozen_graph_def=frozen_graph_def, - inputs=placeholder_tensor, - outputs=outputs) - return saved_model_path + pipeline_config=pipeline_config, + trained_checkpoint_dir=tmp_dir, + output_directory=output_directory) + saved_model_path = os.path.join(output_directory, 'saved_model') + return saved_model_path def _create_tf_example(self): with self.test_session(): - encoded_image = tf.image.encode_jpeg( - tf.constant(np.ones((4, 6, 3)).astype(np.uint8))).eval() + encoded_image = tf.io.encode_jpeg( + tf.constant(np.ones((4, 6, 3)).astype(np.uint8))).numpy() def BytesFeature(value): return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value])) @@ -264,7 +247,8 @@ class GenerateDetectionDataTest(tf.test.TestCase): p.run() filenames = tf.io.gfile.glob(output_tfrecord + '-?????-of-?????') actual_output = [] - record_iterator = tf.python_io.tf_record_iterator(path=filenames[0]) + record_iterator = tf.data.TFRecordDataset( + tf.convert_to_tensor(filenames)).as_numpy_iterator() for record in record_iterator: actual_output.append(record) self.assertEqual(len(actual_output), 1) diff --git a/research/object_detection/dataset_tools/context_rcnn/generate_embedding_data.py b/research/object_detection/dataset_tools/context_rcnn/generate_embedding_data.py index 74d15901d..30ea62835 100644 --- a/research/object_detection/dataset_tools/context_rcnn/generate_embedding_data.py +++ b/research/object_detection/dataset_tools/context_rcnn/generate_embedding_data.py @@ -55,7 +55,7 @@ import threading import numpy as np import six -import tensorflow.compat.v1 as tf +import tensorflow as tf try: import apache_beam as beam # pylint:disable=g-import-not-at-top @@ -95,27 +95,7 @@ class GenerateEmbeddingDataFn(beam.DoFn): # one instance across all threads in the worker. This is possible since # tf.Session.run() is thread safe. with self.session_lock: - if self._session is None: - graph = tf.Graph() - self._session = tf.Session(graph=graph) - with graph.as_default(): - meta_graph = tf.saved_model.loader.load( - self._session, [tf.saved_model.tag_constants.SERVING], - self._model_dir) - signature = meta_graph.signature_def['serving_default'] - input_tensor_name = signature.inputs['inputs'].name - detection_features_name = signature.outputs['detection_features'].name - detection_boxes_name = signature.outputs['detection_boxes'].name - num_detections_name = signature.outputs['num_detections'].name - self._input = graph.get_tensor_by_name(input_tensor_name) - self._embedding_node = graph.get_tensor_by_name(detection_features_name) - self._box_node = graph.get_tensor_by_name(detection_boxes_name) - self._scores_node = graph.get_tensor_by_name( - signature.outputs['detection_scores'].name) - self._num_detections = graph.get_tensor_by_name(num_detections_name) - tf.logging.info(signature.outputs['detection_features'].name) - tf.logging.info(signature.outputs['detection_boxes'].name) - tf.logging.info(signature.outputs['num_detections'].name) + self._detect_fn = tf.saved_model.load(self._model_dir) def process(self, tfrecord_entry): return self._run_inference_and_generate_embedding(tfrecord_entry) @@ -184,13 +164,12 @@ class GenerateEmbeddingDataFn(beam.DoFn): example.features.feature['image/unix_time'].float_list.value.extend( [unix_time]) - (detection_features, detection_boxes, num_detections, - detection_scores) = self._session.run( - [ - self._embedding_node, self._box_node, self._num_detections[0], - self._scores_node - ], - feed_dict={self._input: [tfrecord_entry]}) + detections = self._detect_fn.signatures['serving_default']( + (tf.expand_dims(tf.convert_to_tensor(tfrecord_entry), 0))) + detection_features = detections['detection_features'] + detection_boxes = detections['detection_boxes'] + num_detections = detections['num_detections'] + detection_scores = detections['detection_scores'] num_detections = int(num_detections) embed_all = [] diff --git a/research/object_detection/dataset_tools/context_rcnn/generate_embedding_data_tf1_test.py b/research/object_detection/dataset_tools/context_rcnn/generate_embedding_data_tf2_test.py similarity index 81% rename from research/object_detection/dataset_tools/context_rcnn/generate_embedding_data_tf1_test.py rename to research/object_detection/dataset_tools/context_rcnn/generate_embedding_data_tf2_test.py index 71d1d600d..5c8503c98 100644 --- a/research/object_detection/dataset_tools/context_rcnn/generate_embedding_data_tf1_test.py +++ b/research/object_detection/dataset_tools/context_rcnn/generate_embedding_data_tf2_test.py @@ -23,14 +23,15 @@ import tempfile import unittest import numpy as np import six -import tensorflow.compat.v1 as tf -from object_detection import exporter +import tensorflow as tf +from object_detection import exporter_lib_v2 from object_detection.builders import model_builder from object_detection.core import model -from object_detection.dataset_tools.context_rcnn import generate_embedding_data from object_detection.protos import pipeline_pb2 from object_detection.utils import tf_version +if tf_version.is_tf2(): + from object_detection.dataset_tools.context_rcnn import generate_embedding_data # pylint:disable=g-import-not-at-top if six.PY2: import mock # pylint: disable=g-import-not-at-top @@ -44,14 +45,20 @@ except ModuleNotFoundError: class FakeModel(model.DetectionModel): - """A Fake Detection model with expected output nodes from post-processing.""" + + def __init__(self, conv_weight_scalar=1.0): + super(FakeModel, self).__init__(num_classes=5) + self._conv = tf.keras.layers.Conv2D( + filters=1, kernel_size=1, strides=(1, 1), padding='valid', + kernel_initializer=tf.keras.initializers.Constant( + value=conv_weight_scalar)) def preprocess(self, inputs): true_image_shapes = [] # Doesn't matter for the fake model. return tf.identity(inputs), true_image_shapes def predict(self, preprocessed_inputs, true_image_shapes): - return {'image': tf.layers.conv2d(preprocessed_inputs, 3, 1)} + return {'image': self._conv(preprocessed_inputs)} def postprocess(self, prediction_dict, true_image_shapes): with tf.control_dependencies(prediction_dict.values()): @@ -96,7 +103,7 @@ def InMemoryTFRecord(entries): temp = tempfile.NamedTemporaryFile(delete=False) filename = temp.name try: - with tf.python_io.TFRecordWriter(filename) as writer: + with tf.io.TFRecordWriter(filename) as writer: for value in entries: writer.write(value) yield filename @@ -104,7 +111,7 @@ def InMemoryTFRecord(entries): os.unlink(temp.name) -@unittest.skipIf(tf_version.is_tf2(), 'Skipping TF1.X only test.') +@unittest.skipIf(tf_version.is_tf1(), 'Skipping TF2.X only test.') class GenerateEmbeddingData(tf.test.TestCase): def _save_checkpoint_from_mock_model(self, checkpoint_path): @@ -113,64 +120,38 @@ class GenerateEmbeddingData(tf.test.TestCase): Args: checkpoint_path: Path to save checkpoint from Fake model. """ - g = tf.Graph() - with g.as_default(): - mock_model = FakeModel(num_classes=5) - preprocessed_inputs, true_image_shapes = mock_model.preprocess( - tf.placeholder(tf.float32, shape=[None, None, None, 3])) - predictions = mock_model.predict(preprocessed_inputs, true_image_shapes) - mock_model.postprocess(predictions, true_image_shapes) - tf.train.get_or_create_global_step() - saver = tf.train.Saver() - init = tf.global_variables_initializer() - with self.test_session(graph=g) as sess: - sess.run(init) - saver.save(sess, checkpoint_path) + mock_model = FakeModel() + fake_image = tf.zeros(shape=[1, 10, 10, 3], dtype=tf.float32) + preprocessed_inputs, true_image_shapes = mock_model.preprocess(fake_image) + predictions = mock_model.predict(preprocessed_inputs, true_image_shapes) + mock_model.postprocess(predictions, true_image_shapes) + ckpt = tf.train.Checkpoint(model=mock_model) + exported_checkpoint_manager = tf.train.CheckpointManager( + ckpt, checkpoint_path, max_to_keep=1) + exported_checkpoint_manager.save(checkpoint_number=0) def _export_saved_model(self): tmp_dir = self.get_temp_dir() - checkpoint_path = os.path.join(tmp_dir, 'model.ckpt') - self._save_checkpoint_from_mock_model(checkpoint_path) + self._save_checkpoint_from_mock_model(tmp_dir) output_directory = os.path.join(tmp_dir, 'output') saved_model_path = os.path.join(output_directory, 'saved_model') tf.io.gfile.makedirs(output_directory) with mock.patch.object( model_builder, 'build', autospec=True) as mock_builder: - mock_builder.return_value = FakeModel(num_classes=5) + mock_builder.return_value = FakeModel() + output_directory = os.path.join(tmp_dir, 'output') pipeline_config = pipeline_pb2.TrainEvalPipelineConfig() - pipeline_config.eval_config.use_moving_averages = False - detection_model = model_builder.build(pipeline_config.model, - is_training=False) - outputs, placeholder_tensor = exporter.build_detection_graph( + exporter_lib_v2.export_inference_graph( input_type='tf_example', - detection_model=detection_model, - input_shape=None, - output_collection_name='inference_op', - graph_hook_fn=None) - output_node_names = ','.join(outputs.keys()) - saver = tf.train.Saver() - input_saver_def = saver.as_saver_def() - frozen_graph_def = exporter.freeze_graph_with_def_protos( - input_graph_def=tf.get_default_graph().as_graph_def(), - input_saver_def=input_saver_def, - input_checkpoint=checkpoint_path, - output_node_names=output_node_names, - restore_op_name='save/restore_all', - filename_tensor_name='save/Const:0', - output_graph='', - clear_devices=True, - initializer_nodes='') - exporter.write_saved_model( - saved_model_path=saved_model_path, - frozen_graph_def=frozen_graph_def, - inputs=placeholder_tensor, - outputs=outputs) - return saved_model_path + pipeline_config=pipeline_config, + trained_checkpoint_dir=tmp_dir, + output_directory=output_directory) + saved_model_path = os.path.join(output_directory, 'saved_model') + return saved_model_path def _create_tf_example(self): - with self.test_session(): - encoded_image = tf.image.encode_jpeg( - tf.constant(np.ones((4, 4, 3)).astype(np.uint8))).eval() + encoded_image = tf.io.encode_jpeg( + tf.constant(np.ones((4, 4, 3)).astype(np.uint8))).numpy() def BytesFeature(value): return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value])) @@ -335,7 +316,8 @@ class GenerateEmbeddingData(tf.test.TestCase): filenames = tf.io.gfile.glob( output_tfrecord + '-?????-of-?????') actual_output = [] - record_iterator = tf.python_io.tf_record_iterator(path=filenames[0]) + record_iterator = tf.data.TFRecordDataset( + tf.convert_to_tensor(filenames)).as_numpy_iterator() for record in record_iterator: actual_output.append(record) self.assertEqual(len(actual_output), 1) diff --git a/research/object_detection/packages/tf1/setup.py b/research/object_detection/packages/tf1/setup.py index 1cd4923cb..dc3bfaca0 100644 --- a/research/object_detection/packages/tf1/setup.py +++ b/research/object_detection/packages/tf1/setup.py @@ -3,7 +3,7 @@ import os from setuptools import find_packages from setuptools import setup -REQUIRED_PACKAGES = ['apache-beam', 'pillow', 'lxml', 'matplotlib', 'Cython', +REQUIRED_PACKAGES = ['pillow', 'lxml', 'matplotlib', 'Cython', 'contextlib2', 'tf-slim', 'six', 'pycocotools', 'scipy', 'pandas'] diff --git a/research/object_detection/packages/tf2/setup.py b/research/object_detection/packages/tf2/setup.py index 09738ee07..1067b0ecf 100644 --- a/research/object_detection/packages/tf2/setup.py +++ b/research/object_detection/packages/tf2/setup.py @@ -6,7 +6,9 @@ from setuptools import setup # Note: adding apache-beam to required packages causes conflict with # tf-models-offical requirements. These packages request for incompatible # oauth2client package. -REQUIRED_PACKAGES = ['pillow', 'lxml', 'matplotlib', 'Cython', 'contextlib2', +REQUIRED_PACKAGES = ['avro-python3==1.8.1', 'apache-beam', + 'pillow', 'lxml', + 'matplotlib', 'Cython', 'contextlib2', 'tf-slim', 'six', 'pycocotools', 'scipy', 'pandas', 'tf-models-official'] -- GitLab From bda18166ec804e33e88fd0c4b33decc1308926a6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Andr=C3=A9=20Susano=20Pinto?= Date: Wed, 29 Jul 2020 11:18:07 -0700 Subject: [PATCH 068/128] Create output directories when converting bert checkpoint. PiperOrigin-RevId: 323827690 --- official/nlp/bert/tf2_encoder_checkpoint_converter.py | 1 + 1 file changed, 1 insertion(+) diff --git a/official/nlp/bert/tf2_encoder_checkpoint_converter.py b/official/nlp/bert/tf2_encoder_checkpoint_converter.py index 2faf6ea2c..b9edf7c4e 100644 --- a/official/nlp/bert/tf2_encoder_checkpoint_converter.py +++ b/official/nlp/bert/tf2_encoder_checkpoint_converter.py @@ -73,6 +73,7 @@ def _create_bert_model(cfg): def convert_checkpoint(bert_config, output_path, v1_checkpoint): """Converts a V1 checkpoint into an OO V2 checkpoint.""" output_dir, _ = os.path.split(output_path) + tf.io.gfile.makedirs(output_dir) # Create a temporary V1 name-converted checkpoint in the output directory. temporary_checkpoint_dir = os.path.join(output_dir, "temp_v1") -- GitLab From 23804bc55cd6d2596cec279c6f25ab5a9a6abd99 Mon Sep 17 00:00:00 2001 From: xinliupitt Date: Wed, 29 Jul 2020 16:36:50 -0400 Subject: [PATCH 069/128] transformer, attention layers --- official/nlp/modeling/layers/attention.py | 3 +- official/nlp/modeling/layers/transformer.py | 76 +++++++++++++++++---- 2 files changed, 64 insertions(+), 15 deletions(-) diff --git a/official/nlp/modeling/layers/attention.py b/official/nlp/modeling/layers/attention.py index 83b565c4f..b44c4432f 100644 --- a/official/nlp/modeling/layers/attention.py +++ b/official/nlp/modeling/layers/attention.py @@ -523,9 +523,8 @@ class CachedAttention(MultiHeadAttention): # Take the dot product between "query" and "key" to get the raw # attention scores. + query = tf.multiply(query,1.0 / math.sqrt(float(self._key_size))) attention_scores = tf.einsum(self._dot_product_equation, key, query) - attention_scores = tf.multiply(attention_scores, - 1.0 / math.sqrt(float(self._key_size))) # Normalize the attention scores to probabilities. # `attention_scores` = [B, N, F, T] diff --git a/official/nlp/modeling/layers/transformer.py b/official/nlp/modeling/layers/transformer.py index 5f87ab9ff..caa53dc03 100644 --- a/official/nlp/modeling/layers/transformer.py +++ b/official/nlp/modeling/layers/transformer.py @@ -65,6 +65,9 @@ class Transformer(tf.keras.layers.Layer): activity_regularizer=None, kernel_constraint=None, bias_constraint=None, + use_bias=True, + norm_first=False, + norm_epsilon=1e-12, **kwargs): super(Transformer, self).__init__(**kwargs) @@ -81,6 +84,9 @@ class Transformer(tf.keras.layers.Layer): self._activity_regularizer = tf.keras.regularizers.get(activity_regularizer) self._kernel_constraint = tf.keras.constraints.get(kernel_constraint) self._bias_constraint = tf.keras.constraints.get(bias_constraint) + self._use_bias = use_bias + self._norm_first = norm_first + self._norm_epsilon = norm_epsilon def build(self, input_shape): input_tensor = input_shape[0] if len(input_shape) == 2 else input_shape @@ -117,6 +123,7 @@ class Transformer(tf.keras.layers.Layer): num_heads=self._num_heads, key_size=self._attention_head_size, dropout=self._attention_dropout_rate, + use_bias=self._use_bias, name="self_attention", **common_kwargs) # pylint: disable=protected-access @@ -132,7 +139,7 @@ class Transformer(tf.keras.layers.Layer): tf.keras.layers.LayerNormalization( name="self_attention_layer_norm", axis=-1, - epsilon=1e-12, + epsilon=self._norm_epsilon, dtype=tf.float32)) self._intermediate_dense = tf.keras.layers.experimental.EinsumDense( "abc,cd->abd", @@ -157,7 +164,8 @@ class Transformer(tf.keras.layers.Layer): self._output_dropout = tf.keras.layers.Dropout(rate=self._dropout_rate) # Use float32 in layernorm for numeric stability. self._output_layer_norm = tf.keras.layers.LayerNormalization( - name="output_layer_norm", axis=-1, epsilon=1e-12, dtype=tf.float32) + name="output_layer_norm", axis=-1, epsilon=self._norm_epsilon, + dtype=tf.float32) super(Transformer, self).build(input_shape) @@ -203,13 +211,22 @@ class Transformer(tf.keras.layers.Layer): target_tensor = input_tensor[:, 0:self._output_range, :] attention_mask = attention_mask[:, 0:self._output_range, :] else: + if self._norm_first: + source_tensor = input_tensor + input_tensor = self._attention_layer_norm(input_tensor) target_tensor = input_tensor attention_output = self._attention_layer( query=target_tensor, value=input_tensor, attention_mask=attention_mask) attention_output = self._attention_dropout(attention_output) - attention_output = self._attention_layer_norm(target_tensor + - attention_output) + if self._norm_first: + attention_output = source_tensor + attention_output + else: + attention_output = self._attention_layer_norm(target_tensor + + attention_output) + if self._norm_first: + source_attention_output = attention_output + attention_output = self._output_layer_norm(attention_output) intermediate_output = self._intermediate_dense(attention_output) intermediate_output = self._intermediate_activation_layer( intermediate_output) @@ -219,7 +236,10 @@ class Transformer(tf.keras.layers.Layer): # is always fp32 for now. Cast layer_output to fp32 for the subsequent # add. layer_output = tf.cast(layer_output, tf.float32) - layer_output = self._output_layer_norm(layer_output + attention_output) + if self._norm_first: + layer_output = source_attention_output + layer_output + else: + layer_output = self._output_layer_norm(layer_output + attention_output) return layer_output @@ -273,6 +293,9 @@ class TransformerDecoderLayer(tf.keras.layers.Layer): activity_regularizer=None, kernel_constraint=None, bias_constraint=None, + use_bias=True, + norm_first=False, + norm_epsilon=1e-12, **kwargs): super(TransformerDecoderLayer, self).__init__(**kwargs) self.num_attention_heads = num_attention_heads @@ -289,6 +312,9 @@ class TransformerDecoderLayer(tf.keras.layers.Layer): self._activity_regularizer = tf.keras.regularizers.get(activity_regularizer) self._kernel_constraint = tf.keras.constraints.get(kernel_constraint) self._bias_constraint = tf.keras.constraints.get(bias_constraint) + self._use_bias = use_bias + self._norm_first = norm_first + self._norm_epsilon = norm_epsilon if self.multi_channel_cross_attention: self._cross_attention_cls = multi_channel_attention.MultiChannelAttention else: @@ -318,6 +344,7 @@ class TransformerDecoderLayer(tf.keras.layers.Layer): num_heads=self.num_attention_heads, key_size=self.attention_head_size, dropout=self.attention_dropout_rate, + use_bias=self._use_bias, name="self_attention", **common_kwargs) self.self_attention_output_dense = tf.keras.layers.experimental.EinsumDense( @@ -330,13 +357,15 @@ class TransformerDecoderLayer(tf.keras.layers.Layer): rate=self.dropout_rate) self.self_attention_layer_norm = ( tf.keras.layers.LayerNormalization( - name="self_attention_layer_norm", axis=-1, epsilon=1e-12)) + name="self_attention_layer_norm", + axis=-1, epsilon=self._norm_epsilon)) # Encoder-decoder attention. self.encdec_attention = self._cross_attention_cls( num_heads=self.num_attention_heads, key_size=self.attention_head_size, dropout=self.attention_dropout_rate, output_shape=hidden_size, + use_bias=self._use_bias, name="attention/encdec", **common_kwargs) @@ -344,7 +373,8 @@ class TransformerDecoderLayer(tf.keras.layers.Layer): rate=self.dropout_rate) self.encdec_attention_layer_norm = ( tf.keras.layers.LayerNormalization( - name="attention/encdec_output_layer_norm", axis=-1, epsilon=1e-12)) + name="attention/encdec_output_layer_norm", + axis=-1, epsilon=self._norm_epsilon)) # Feed-forward projection. self.intermediate_dense = tf.keras.layers.experimental.EinsumDense( @@ -363,7 +393,7 @@ class TransformerDecoderLayer(tf.keras.layers.Layer): **common_kwargs) self.output_dropout = tf.keras.layers.Dropout(rate=self.dropout_rate) self.output_layer_norm = tf.keras.layers.LayerNormalization( - name="output_layer_norm", axis=-1, epsilon=1e-12) + name="output_layer_norm", axis=-1, epsilon=self._norm_epsilon) super(TransformerDecoderLayer, self).build(input_shape) def common_layers_with_encoder(self): @@ -384,6 +414,9 @@ class TransformerDecoderLayer(tf.keras.layers.Layer): "TransformerDecoderLayer must have 4 inputs, but it got: %d" % len(inputs)) input_tensor, memory, attention_mask, self_attention_mask = inputs[:4] + source_tensor = input_tensor + if self._norm_first: + input_tensor = self.self_attention_layer_norm(input_tensor) self_attention_output, cache = self.self_attention( query=input_tensor, value=input_tensor, @@ -391,8 +424,15 @@ class TransformerDecoderLayer(tf.keras.layers.Layer): cache=cache, decode_loop_step=decode_loop_step) self_attention_output = self.self_attention_dropout(self_attention_output) - self_attention_output = self.self_attention_layer_norm( - input_tensor + self_attention_output) + if self._norm_first: + self_attention_output = source_tensor + self_attention_output + else: + self_attention_output = self.self_attention_layer_norm( + input_tensor + self_attention_output) + if self._norm_first: + source_self_attention_output = self_attention_output + self_attention_output = self.encdec_attention_layer_norm( + self_attention_output) cross_attn_inputs = dict( query=self_attention_output, value=memory, @@ -402,13 +442,23 @@ class TransformerDecoderLayer(tf.keras.layers.Layer): cross_attn_inputs["context_attention_weights"] = inputs[-1] attention_output = self.encdec_attention(**cross_attn_inputs) attention_output = self.encdec_attention_dropout(attention_output) - attention_output = self.encdec_attention_layer_norm(self_attention_output + - attention_output) + if self._norm_first: + attention_output = source_self_attention_output + attention_output + else: + attention_output = self.encdec_attention_layer_norm( + self_attention_output + + attention_output) + if self._norm_first: + source_attention_output = attention_output + attention_output = self.output_layer_norm(attention_output) intermediate_output = self.intermediate_dense(attention_output) intermediate_output = self.intermediate_activation_layer( intermediate_output) layer_output = self.output_dense(intermediate_output) layer_output = self.output_dropout(layer_output) - layer_output = self.output_layer_norm(layer_output + attention_output) + if self._norm_first: + layer_output = source_attention_output + layer_output + else: + layer_output = self.output_layer_norm(layer_output + attention_output) return layer_output, cache -- GitLab From d39321b13b6da70b428901ded3341df338aff348 Mon Sep 17 00:00:00 2001 From: xinliupitt Date: Wed, 29 Jul 2020 16:53:14 -0400 Subject: [PATCH 070/128] docstrings --- official/nlp/modeling/layers/attention.py | 3 ++- official/nlp/modeling/layers/transformer.py | 8 ++++++++ 2 files changed, 10 insertions(+), 1 deletion(-) diff --git a/official/nlp/modeling/layers/attention.py b/official/nlp/modeling/layers/attention.py index b44c4432f..092265f03 100644 --- a/official/nlp/modeling/layers/attention.py +++ b/official/nlp/modeling/layers/attention.py @@ -521,9 +521,10 @@ class CachedAttention(MultiHeadAttention): if cache: key, value = self._update_cache(key, value, cache, decode_loop_step) + query = tf.multiply(query,1.0 / math.sqrt(float(self._key_size))) + # Take the dot product between "query" and "key" to get the raw # attention scores. - query = tf.multiply(query,1.0 / math.sqrt(float(self._key_size))) attention_scores = tf.einsum(self._dot_product_equation, key, query) # Normalize the attention scores to probabilities. diff --git a/official/nlp/modeling/layers/transformer.py b/official/nlp/modeling/layers/transformer.py index caa53dc03..c61d18216 100644 --- a/official/nlp/modeling/layers/transformer.py +++ b/official/nlp/modeling/layers/transformer.py @@ -49,6 +49,10 @@ class Transformer(tf.keras.layers.Layer): activity_regularizer: Regularizer for dense layer activity. kernel_constraint: Constraint for dense layer kernels. bias_constraint: Constraint for dense layer kernels. + use_bias: Whether to enable use_bias in attention layer. + norm_first: Whether to normalize inputs to attention and intermediate dense + layers. + norm_epsilon: Epsilon value to initialize normalization layers. """ def __init__(self, @@ -277,6 +281,10 @@ class TransformerDecoderLayer(tf.keras.layers.Layer): activity_regularizer: Regularizer for dense layer activity. kernel_constraint: Constraint for dense layer kernels. bias_constraint: Constraint for dense layer kernels. + use_bias: Whether to enable use_bias in attention layer. + norm_first: Whether to normalize inputs to attention and intermediate dense + layers. + norm_epsilon: Epsilon value to initialize normalization layers. """ def __init__(self, -- GitLab From aa773be8f5af83bf011a85bf54ef2ff0e7ca0c18 Mon Sep 17 00:00:00 2001 From: Mitchel Humpherys Date: Wed, 29 Jul 2020 14:47:53 -0700 Subject: [PATCH 071/128] Fix notebook URL There's an extraneous `colab_tutorials/` in there. --- .../colab_tutorials/object_detection_tutorial.ipynb | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/research/object_detection/colab_tutorials/object_detection_tutorial.ipynb b/research/object_detection/colab_tutorials/object_detection_tutorial.ipynb index b4eac1ade..0dd125320 100644 --- a/research/object_detection/colab_tutorials/object_detection_tutorial.ipynb +++ b/research/object_detection/colab_tutorials/object_detection_tutorial.ipynb @@ -10,11 +10,11 @@ "# Object Detection API Demo\n", "\n", "\u003ctable align=\"left\"\u003e\u003ctd\u003e\n", - " \u003ca target=\"_blank\" href=\"https://colab.sandbox.google.com/github/tensorflow/models/blob/master/research/object_detection/colab_tutorials/colab_tutorials/object_detection_tutorial.ipynb\"\u003e\n", + " \u003ca target=\"_blank\" href=\"https://colab.sandbox.google.com/github/tensorflow/models/blob/master/research/object_detection/colab_tutorials/object_detection_tutorial.ipynb\"\u003e\n", " \u003cimg src=\"https://www.tensorflow.org/images/colab_logo_32px.png\" /\u003eRun in Google Colab\n", " \u003c/a\u003e\n", "\u003c/td\u003e\u003ctd\u003e\n", - " \u003ca target=\"_blank\" href=\"https://github.com/tensorflow/models/blob/master/research/object_detection/colab_tutorials/colab_tutorials/object_detection_tutorial.ipynb\"\u003e\n", + " \u003ca target=\"_blank\" href=\"https://github.com/tensorflow/models/blob/master/research/object_detection/colab_tutorials/object_detection_tutorial.ipynb\"\u003e\n", " \u003cimg width=32px src=\"https://www.tensorflow.org/images/GitHub-Mark-32px.png\" /\u003eView source on GitHub\u003c/a\u003e\n", "\u003c/td\u003e\u003c/table\u003e" ] -- GitLab From 3e0fa93255f207dac8bd661336a4973f922c8bbe Mon Sep 17 00:00:00 2001 From: xinliupitt Date: Wed, 29 Jul 2020 19:53:36 -0400 Subject: [PATCH 072/128] whitespace --- official/nlp/modeling/layers/attention.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/official/nlp/modeling/layers/attention.py b/official/nlp/modeling/layers/attention.py index 092265f03..6558bc0fc 100644 --- a/official/nlp/modeling/layers/attention.py +++ b/official/nlp/modeling/layers/attention.py @@ -521,7 +521,7 @@ class CachedAttention(MultiHeadAttention): if cache: key, value = self._update_cache(key, value, cache, decode_loop_step) - query = tf.multiply(query,1.0 / math.sqrt(float(self._key_size))) + query = tf.multiply(query, 1.0 / math.sqrt(float(self._key_size))) # Take the dot product between "query" and "key" to get the raw # attention scores. -- GitLab From b658b16dc76f34e7d8db91ad2f37aaff3ea8f5dc Mon Sep 17 00:00:00 2001 From: Vivek Rathod Date: Wed, 29 Jul 2020 19:01:39 -0700 Subject: [PATCH 073/128] Use BackupHandler from tf_slim instead of OD API's clone. PiperOrigin-RevId: 323916181 --- .../data_decoders/tf_example_decoder.py | 38 +------------------ 1 file changed, 2 insertions(+), 36 deletions(-) diff --git a/research/object_detection/data_decoders/tf_example_decoder.py b/research/object_detection/data_decoders/tf_example_decoder.py index 04cc4db59..adf4bd689 100644 --- a/research/object_detection/data_decoders/tf_example_decoder.py +++ b/research/object_detection/data_decoders/tf_example_decoder.py @@ -124,40 +124,6 @@ class _ClassTensorHandler(slim_example_decoder.Tensor): self._display_name_to_id_table.lookup(unmapped_tensor)) -class _BackupHandler(slim_example_decoder.ItemHandler): - """An ItemHandler that tries two ItemHandlers in order.""" - - def __init__(self, handler, backup): - """Initializes the BackupHandler handler. - - If the first Handler's tensors_to_item returns a Tensor with no elements, - the second Handler is used. - - Args: - handler: The primary ItemHandler. - backup: The backup ItemHandler. - - Raises: - ValueError: if either is not an ItemHandler. - """ - if not isinstance(handler, slim_example_decoder.ItemHandler): - raise ValueError('Primary handler is of type %s instead of ItemHandler' % - type(handler)) - if not isinstance(backup, slim_example_decoder.ItemHandler): - raise ValueError( - 'Backup handler is of type %s instead of ItemHandler' % type(backup)) - self._handler = handler - self._backup = backup - super(_BackupHandler, self).__init__(handler.keys + backup.keys) - - def tensors_to_item(self, keys_to_tensors): - item = self._handler.tensors_to_item(keys_to_tensors) - return tf.cond( - pred=tf.equal(tf.reduce_prod(tf.shape(item)), 0), - true_fn=lambda: self._backup.tensors_to_item(keys_to_tensors), - false_fn=lambda: item) - - class TfExampleDecoder(data_decoder.DataDecoder): """Tensorflow Example proto decoder.""" @@ -405,12 +371,12 @@ class TfExampleDecoder(data_decoder.DataDecoder): if label_map_proto_file: # If the label_map_proto is provided, try to use it in conjunction with # the class text, and fall back to a materialized ID. - label_handler = _BackupHandler( + label_handler = slim_example_decoder.BackupHandler( _ClassTensorHandler( 'image/object/class/text', label_map_proto_file, default_value=''), slim_example_decoder.Tensor('image/object/class/label')) - image_label_handler = _BackupHandler( + image_label_handler = slim_example_decoder.BackupHandler( _ClassTensorHandler( fields.TfExampleFields.image_class_text, label_map_proto_file, -- GitLab From 5250478059a4297919b40e0d29bda9ceb013cf27 Mon Sep 17 00:00:00 2001 From: Hongkun Yu Date: Wed, 29 Jul 2020 20:50:35 -0700 Subject: [PATCH 074/128] Remove backward compatibility hack. PiperOrigin-RevId: 323927843 --- official/nlp/modeling/layers/transformer.py | 6 ------ 1 file changed, 6 deletions(-) diff --git a/official/nlp/modeling/layers/transformer.py b/official/nlp/modeling/layers/transformer.py index 5f87ab9ff..fcbc29721 100644 --- a/official/nlp/modeling/layers/transformer.py +++ b/official/nlp/modeling/layers/transformer.py @@ -119,12 +119,6 @@ class Transformer(tf.keras.layers.Layer): dropout=self._attention_dropout_rate, name="self_attention", **common_kwargs) - # pylint: disable=protected-access - # Temporarily handling for checkpoint compatible changes. - self._attention_layer._build_from_signature( - query=input_tensor_shape, value=input_tensor_shape) - self._attention_output_dense = self._attention_layer._output_dense - # pylint: enable=protected-access self._attention_dropout = tf.keras.layers.Dropout(rate=self._dropout_rate) # Use float32 in layernorm for numeric stability. # It is probably safe in mixed_float16, but we haven't validated this yet. -- GitLab From ccc6076093696dc06925aaa61f8824208189479a Mon Sep 17 00:00:00 2001 From: Tomer Kaftan Date: Wed, 29 Jul 2020 22:17:12 -0700 Subject: [PATCH 075/128] Pre-emptively disable the KerasTensors refactoring for the detection models in tensorflow models/official/vision/detection, because they rely on several unsupported things that will stop working entirely when the refactoring goes live. Specifically: * The custom layers implement `__call__` instead of `call` and rely on manually enter the keras backend graph * The vision models try to use `tf.while_loop` as Keras op layers during functional API construction, which is unsupported. Updating the models to avoid this would subtly change the variable names and break the pre-existing tf1-style name-based checkpoints, so for now we will just disable the KerasTensors refactoring for these models. PiperOrigin-RevId: 323937426 --- official/vision/detection/modeling/architecture/keras_utils.py | 1 + 1 file changed, 1 insertion(+) diff --git a/official/vision/detection/modeling/architecture/keras_utils.py b/official/vision/detection/modeling/architecture/keras_utils.py index 0fd432e2c..530f8f1e2 100644 --- a/official/vision/detection/modeling/architecture/keras_utils.py +++ b/official/vision/detection/modeling/architecture/keras_utils.py @@ -23,6 +23,7 @@ from tensorflow.python.keras import backend try: from tensorflow.python.keras.engine import keras_tensor # pylint: disable=g-import-not-at-top,unused-import + keras_tensor.disable_keras_tensors() except ImportError: keras_tensor = None -- GitLab From 5366f605bf613725f02ebdfa2b011505c9a8705a Mon Sep 17 00:00:00 2001 From: "A. Unique TensorFlower" Date: Wed, 29 Jul 2020 23:55:33 -0700 Subject: [PATCH 076/128] Internal change PiperOrigin-RevId: 323948101 --- official/recommendation/ncf_input_pipeline.py | 19 ++++++++++++++++--- official/recommendation/ncf_keras_main.py | 4 +++- 2 files changed, 19 insertions(+), 4 deletions(-) diff --git a/official/recommendation/ncf_input_pipeline.py b/official/recommendation/ncf_input_pipeline.py index 425f0f097..0a5b1c710 100644 --- a/official/recommendation/ncf_input_pipeline.py +++ b/official/recommendation/ncf_input_pipeline.py @@ -32,7 +32,8 @@ from official.recommendation import movielens def create_dataset_from_tf_record_files(input_file_pattern, pre_batch_size, batch_size, - is_training=True): + is_training=True, + rebatch=False): """Creates dataset from (tf)records files for training/evaluation.""" if pre_batch_size != batch_size: raise ValueError("Pre-batch ({}) size is not equal to batch " @@ -51,6 +52,12 @@ def create_dataset_from_tf_record_files(input_file_pattern, dataset = dataset.map( decode_fn, num_parallel_calls=tf.data.experimental.AUTOTUNE) + if rebatch: + # A workaround for TPU Pod evaluation dataset. + # TODO (b/162341937) remove once it's fixed. + dataset = dataset.unbatch() + dataset = dataset.batch(pre_batch_size) + dataset = dataset.prefetch(tf.data.experimental.AUTOTUNE) return dataset @@ -151,12 +158,18 @@ def create_ncf_input_data(params, params["train_dataset_path"], input_meta_data["train_prebatch_size"], params["batch_size"], - is_training=True) + is_training=True, + rebatch=False) + + # Re-batch evaluation dataset for TPU Pods. + # TODO (b/162341937) remove once it's fixed. + eval_rebatch = (params["use_tpu"] and strategy.num_replicas_in_sync > 8) eval_dataset = create_dataset_from_tf_record_files( params["eval_dataset_path"], input_meta_data["eval_prebatch_size"], params["eval_batch_size"], - is_training=False) + is_training=False, + rebatch=eval_rebatch) num_train_steps = int(input_meta_data["num_train_steps"]) num_eval_steps = int(input_meta_data["num_eval_steps"]) diff --git a/official/recommendation/ncf_keras_main.py b/official/recommendation/ncf_keras_main.py index 50c5c15e7..c850539d4 100644 --- a/official/recommendation/ncf_keras_main.py +++ b/official/recommendation/ncf_keras_main.py @@ -235,6 +235,7 @@ def run_ncf(_): params = ncf_common.parse_flags(FLAGS) params["distribute_strategy"] = strategy + params["use_tpu"] = (FLAGS.distribution_strategy == "tpu") if params["use_tpu"] and not params["keras_use_ctl"]: logging.error("Custom training loop must be used when using TPUStrategy.") @@ -491,7 +492,8 @@ def run_ncf_custom_training(params, logging.info("Done training epoch %s, epoch loss=%.3f", epoch + 1, train_loss) - eval_input_iterator = iter(eval_input_dataset) + eval_input_iterator = iter( + strategy.experimental_distribute_dataset(eval_input_dataset)) hr_sum = 0.0 hr_count = 0.0 -- GitLab From 1c5dca7f47d5bea8dcdaf989ffb3a5984de4d27a Mon Sep 17 00:00:00 2001 From: xinliupitt Date: Thu, 30 Jul 2020 10:11:13 -0400 Subject: [PATCH 077/128] get_config and doc --- official/nlp/modeling/layers/transformer.py | 61 +++++++++++++++++++-- 1 file changed, 56 insertions(+), 5 deletions(-) diff --git a/official/nlp/modeling/layers/transformer.py b/official/nlp/modeling/layers/transformer.py index c61d18216..5249cd5ee 100644 --- a/official/nlp/modeling/layers/transformer.py +++ b/official/nlp/modeling/layers/transformer.py @@ -49,9 +49,11 @@ class Transformer(tf.keras.layers.Layer): activity_regularizer: Regularizer for dense layer activity. kernel_constraint: Constraint for dense layer kernels. bias_constraint: Constraint for dense layer kernels. - use_bias: Whether to enable use_bias in attention layer. + use_bias: Whether to enable use_bias in attention layer. If set False, + use_bias in attention layer is disabled. norm_first: Whether to normalize inputs to attention and intermediate dense - layers. + layers. If set False, output of attention and intermediate dense layers + is normalized. norm_epsilon: Epsilon value to initialize normalization layers. """ @@ -200,7 +202,13 @@ class Transformer(tf.keras.layers.Layer): "kernel_constraint": tf.keras.constraints.serialize(self._kernel_constraint), "bias_constraint": - tf.keras.constraints.serialize(self._bias_constraint) + tf.keras.constraints.serialize(self._bias_constraint), + "use_bias": + self._use_bias, + "norm_first": + self._norm_first, + "norm_epsilon": + self._norm_epsilon } base_config = super(Transformer, self).get_config() return dict(list(base_config.items()) + list(config.items())) @@ -281,9 +289,11 @@ class TransformerDecoderLayer(tf.keras.layers.Layer): activity_regularizer: Regularizer for dense layer activity. kernel_constraint: Constraint for dense layer kernels. bias_constraint: Constraint for dense layer kernels. - use_bias: Whether to enable use_bias in attention layer. + use_bias: Whether to enable use_bias in attention layer. If set False, + use_bias in attention layer is disabled. norm_first: Whether to normalize inputs to attention and intermediate dense - layers. + layers. If set False, output of attention and intermediate dense layers + is normalized. norm_epsilon: Epsilon value to initialize normalization layers. """ @@ -404,6 +414,47 @@ class TransformerDecoderLayer(tf.keras.layers.Layer): name="output_layer_norm", axis=-1, epsilon=self._norm_epsilon) super(TransformerDecoderLayer, self).build(input_shape) + def get_config(self): + config = { + "num_attention_heads": + self.num_attention_heads, + "intermediate_size": + self.intermediate_size, + "intermediate_activation": + self.intermediate_activation, + "dropout_rate": + self.dropout_rate, + "attention_dropout_rate": + self.attention_dropout_rate, + "multi_channel_cross_attention": + self.multi_channel_cross_attention, + "kernel_initializer": + tf.keras.initializers.serialize(self._kernel_initializer), + "bias_initializer": + tf.keras.initializers.serialize(self._bias_initializer), + "kernel_regularizer": + tf.keras.regularizers.serialize(self._kernel_regularizer), + "bias_regularizer": + tf.keras.regularizers.serialize(self._bias_regularizer), + "activity_regularizer": + tf.keras.regularizers.serialize(self._activity_regularizer), + "kernel_constraint": + tf.keras.constraints.serialize(self._kernel_constraint), + "bias_constraint": + tf.keras.constraints.serialize(self._bias_constraint), + "use_bias": + self._use_bias, + "norm_first": + self._norm_first, + "norm_epsilon": + self._norm_epsilon, + "cross_attention_cls": + self._cross_attention_cls + } + base_config = super(TransformerDecoderLayer, self).get_config() + return dict(list(base_config.items()) + list(config.items())) + + def common_layers_with_encoder(self): """Gets layer objects that can make a Transformer encoder block.""" return [ -- GitLab From 6733661288bbb48ec3fcdf58b069164b3fe7a7a5 Mon Sep 17 00:00:00 2001 From: Vighnesh Birodkar Date: Thu, 30 Jul 2020 12:04:14 -0700 Subject: [PATCH 078/128] Add point to convert_keras_models.py in ResNet error message. PiperOrigin-RevId: 324052434 --- .../models/center_net_resnet_feature_extractor.py | 6 ++++-- .../models/center_net_resnet_v1_fpn_feature_extractor.py | 4 +++- 2 files changed, 7 insertions(+), 3 deletions(-) diff --git a/research/object_detection/models/center_net_resnet_feature_extractor.py b/research/object_detection/models/center_net_resnet_feature_extractor.py index 5b9ab5cd0..630c9cfdc 100644 --- a/research/object_detection/models/center_net_resnet_feature_extractor.py +++ b/research/object_detection/models/center_net_resnet_feature_extractor.py @@ -129,8 +129,10 @@ class CenterNetResnetFeatureExtractor(CenterNetFeatureExtractor): else: supported_types = ['classification'] raise ValueError( - ('Sub model {} is not defined for ResNet.'.format(sub_model_type) + - 'Supported types are {}.'.format(supported_types))) + ('Sub model {} is not defined for ResNet.'.format(sub_model_type) + + 'Supported types are {}.'.format(supported_types) + + 'Use the script convert_keras_models.py to create your own ' + + 'classification checkpoints.')) def resnet_v2_101(channel_means, channel_stds, bgr_ordering): diff --git a/research/object_detection/models/center_net_resnet_v1_fpn_feature_extractor.py b/research/object_detection/models/center_net_resnet_v1_fpn_feature_extractor.py index 264799782..7382f9219 100644 --- a/research/object_detection/models/center_net_resnet_v1_fpn_feature_extractor.py +++ b/research/object_detection/models/center_net_resnet_v1_fpn_feature_extractor.py @@ -166,7 +166,9 @@ class CenterNetResnetV1FpnFeatureExtractor(CenterNetFeatureExtractor): supported_types = ['classification'] raise ValueError( ('Sub model {} is not defined for ResNet FPN.'.format(sub_model_type) - + 'Supported types are {}.'.format(supported_types))) + + 'Supported types are {}.'.format(supported_types)) + + 'Use the script convert_keras_models.py to create your own ' + + 'classification checkpoints.') def resnet_v1_101_fpn(channel_means, channel_stds, bgr_ordering): -- GitLab From 72efa8548099a0aadf3e7355f063c0e678d8bb00 Mon Sep 17 00:00:00 2001 From: xinliupitt Date: Thu, 30 Jul 2020 16:31:53 -0400 Subject: [PATCH 079/128] transformer test case --- official/nlp/modeling/layers/transformer.py | 4 +- .../nlp/modeling/layers/transformer_test.py | 72 +++++++++++++++++++ 2 files changed, 73 insertions(+), 3 deletions(-) diff --git a/official/nlp/modeling/layers/transformer.py b/official/nlp/modeling/layers/transformer.py index 5249cd5ee..a014c118c 100644 --- a/official/nlp/modeling/layers/transformer.py +++ b/official/nlp/modeling/layers/transformer.py @@ -447,9 +447,7 @@ class TransformerDecoderLayer(tf.keras.layers.Layer): "norm_first": self._norm_first, "norm_epsilon": - self._norm_epsilon, - "cross_attention_cls": - self._cross_attention_cls + self._norm_epsilon } base_config = super(TransformerDecoderLayer, self).get_config() return dict(list(base_config.items()) + list(config.items())) diff --git a/official/nlp/modeling/layers/transformer_test.py b/official/nlp/modeling/layers/transformer_test.py index 4c748a0e3..5fa05f015 100644 --- a/official/nlp/modeling/layers/transformer_test.py +++ b/official/nlp/modeling/layers/transformer_test.py @@ -218,6 +218,44 @@ class TransformerLayerTest(keras_parameterized.TestCase): self.assertAllEqual([1, input_length, width], output_data.shape) +@keras_parameterized.run_all_keras_modes +class TransformerArgumentTest(keras_parameterized.TestCase): + + def test_use_bias(self): + num_attention_heads = 2 + hidden_size = 16 + encoder_block = transformer.Transformer( + num_attention_heads=num_attention_heads, + intermediate_size=32, + intermediate_activation='relu', + dropout_rate=0.1, + attention_dropout_rate=0.1, + use_bias=False, + norm_first=True, + norm_epsilon=1e-6) + # Forward path. + dummy_tensor = tf.zeros([2, 4, 16], dtype=tf.float32) + dummy_mask = tf.zeros([2, 4, 4], dtype=tf.float32) + inputs = [dummy_tensor, dummy_mask] + output = encoder_block(inputs) + self.assertEqual(output.shape, (2, 4, hidden_size)) + + def test_get_config(self): + num_attention_heads = 2 + encoder_block = transformer.Transformer( + num_attention_heads=num_attention_heads, + intermediate_size=32, + intermediate_activation='relu', + dropout_rate=0.1, + attention_dropout_rate=0.1, + use_bias=False, + norm_first=True, + norm_epsilon=1e-6) + encoder_block_config = encoder_block.get_config() + new_encoder_block = transformer.Transformer.from_config( + encoder_block_config) + self.assertEqual(encoder_block_config, new_encoder_block.get_config()) + def _create_cache(batch_size, init_decode_length, num_heads, head_size): return { 'key': @@ -251,6 +289,40 @@ class TransformerDecoderLayerTest(keras_parameterized.TestCase): self.assertEqual(output.shape, (2, 4, hidden_size)) self.assertEqual(cache['value'].shape, (2, 4, 2, 8)) + def test_use_bias(self): + num_attention_heads = 2 + hidden_size = 16 + decoder_block = transformer.TransformerDecoderLayer( + num_attention_heads=num_attention_heads, + intermediate_size=32, + intermediate_activation='relu', + dropout_rate=0.1, + attention_dropout_rate=0.1, + use_bias=False, + norm_first=True, + norm_epsilon=1e-6) + # Forward path. + dummy_tensor = tf.zeros([2, 4, 16], dtype=tf.float32) + dummy_mask = tf.zeros([2, 4, 4], dtype=tf.float32) + inputs = [dummy_tensor, dummy_tensor, dummy_mask, dummy_mask] + output, _ = decoder_block(inputs) + self.assertEqual(output.shape, (2, 4, hidden_size)) + + def test_get_config(self): + num_attention_heads = 2 + decoder_block = transformer.TransformerDecoderLayer( + num_attention_heads=num_attention_heads, + intermediate_size=32, + intermediate_activation='relu', + dropout_rate=0.1, + attention_dropout_rate=0.1, + use_bias=False, + norm_first=True, + norm_epsilon=1e-6) + decoder_block_config = decoder_block.get_config() + new_decoder_block = transformer.TransformerDecoderLayer.from_config( + decoder_block_config) + self.assertEqual(decoder_block_config, new_decoder_block.get_config()) if __name__ == '__main__': tf.test.main() -- GitLab From 673231ffc7c87f4b54d3ecef7ff80595e99647dd Mon Sep 17 00:00:00 2001 From: xinliupitt Date: Thu, 30 Jul 2020 16:43:16 -0400 Subject: [PATCH 080/128] func name change --- official/nlp/modeling/layers/transformer_test.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/official/nlp/modeling/layers/transformer_test.py b/official/nlp/modeling/layers/transformer_test.py index 5fa05f015..cca87bc2f 100644 --- a/official/nlp/modeling/layers/transformer_test.py +++ b/official/nlp/modeling/layers/transformer_test.py @@ -221,7 +221,7 @@ class TransformerLayerTest(keras_parameterized.TestCase): @keras_parameterized.run_all_keras_modes class TransformerArgumentTest(keras_parameterized.TestCase): - def test_use_bias(self): + def test_use_bias_norm_first(self): num_attention_heads = 2 hidden_size = 16 encoder_block = transformer.Transformer( @@ -289,7 +289,7 @@ class TransformerDecoderLayerTest(keras_parameterized.TestCase): self.assertEqual(output.shape, (2, 4, hidden_size)) self.assertEqual(cache['value'].shape, (2, 4, 2, 8)) - def test_use_bias(self): + def test_use_bias_norm_first(self): num_attention_heads = 2 hidden_size = 16 decoder_block = transformer.TransformerDecoderLayer( -- GitLab From 2e785497ed6dc4bc0e641f944618aabcc7e151f8 Mon Sep 17 00:00:00 2001 From: "A. Unique TensorFlower" Date: Thu, 30 Jul 2020 14:32:49 -0700 Subject: [PATCH 081/128] Internal change PiperOrigin-RevId: 324084377 --- official/nlp/modeling/layers/__init__.py | 1 + 1 file changed, 1 insertion(+) diff --git a/official/nlp/modeling/layers/__init__.py b/official/nlp/modeling/layers/__init__.py index 2cd8e7b9e..5720430ab 100644 --- a/official/nlp/modeling/layers/__init__.py +++ b/official/nlp/modeling/layers/__init__.py @@ -23,6 +23,7 @@ from official.nlp.modeling.layers.masked_softmax import MaskedSoftmax from official.nlp.modeling.layers.multi_channel_attention import * from official.nlp.modeling.layers.on_device_embedding import OnDeviceEmbedding from official.nlp.modeling.layers.position_embedding import PositionEmbedding +from official.nlp.modeling.layers.position_embedding import RelativePositionEmbedding from official.nlp.modeling.layers.rezero_transformer import ReZeroTransformer from official.nlp.modeling.layers.self_attention_mask import SelfAttentionMask from official.nlp.modeling.layers.talking_heads_attention import TalkingHeadsAttention -- GitLab From a62c2bfcc94ad5b1ccb31ebf3a869c68e857b7d9 Mon Sep 17 00:00:00 2001 From: Hongkun Yu Date: Thu, 30 Jul 2020 20:14:13 -0700 Subject: [PATCH 082/128] Internal change PiperOrigin-RevId: 324137297 --- .../tf2_albert_encoder_checkpoint_converter.py | 2 +- official/nlp/bert/bert_models.py | 7 +++---- official/nlp/bert/bert_models_test.py | 14 ++++---------- .../bert/tf2_encoder_checkpoint_converter.py | 2 +- official/nlp/configs/encoders.py | 2 -- official/nlp/modeling/layers/masked_lm_test.py | 9 +-------- .../modeling/models/bert_classifier_test.py | 6 +++--- .../nlp/modeling/models/bert_pretrainer.py | 3 ++- .../modeling/models/bert_span_labeler_test.py | 9 ++++----- .../nlp/modeling/models/electra_pretrainer.py | 4 ---- .../modeling/models/electra_pretrainer_test.py | 9 ++++++--- .../networks/albert_transformer_encoder.py | 15 ++++----------- .../albert_transformer_encoder_test.py | 4 ---- .../nlp/modeling/networks/encoder_scaffold.py | 7 ++++--- .../modeling/networks/transformer_encoder.py | 18 +++++++----------- .../networks/transformer_encoder_test.py | 7 ------- official/nlp/nhnet/models.py | 1 - 17 files changed, 40 insertions(+), 79 deletions(-) diff --git a/official/nlp/albert/tf2_albert_encoder_checkpoint_converter.py b/official/nlp/albert/tf2_albert_encoder_checkpoint_converter.py index 402bc1445..afd2ab19d 100644 --- a/official/nlp/albert/tf2_albert_encoder_checkpoint_converter.py +++ b/official/nlp/albert/tf2_albert_encoder_checkpoint_converter.py @@ -86,7 +86,7 @@ def _create_albert_model(cfg): activation=activations.gelu, dropout_rate=cfg.hidden_dropout_prob, attention_dropout_rate=cfg.attention_probs_dropout_prob, - sequence_length=cfg.max_position_embeddings, + max_sequence_length=cfg.max_position_embeddings, type_vocab_size=cfg.type_vocab_size, initializer=tf.keras.initializers.TruncatedNormal( stddev=cfg.initializer_range)) diff --git a/official/nlp/bert/bert_models.py b/official/nlp/bert/bert_models.py index 9d16150d0..807c96581 100644 --- a/official/nlp/bert/bert_models.py +++ b/official/nlp/bert/bert_models.py @@ -104,14 +104,14 @@ class BertPretrainLossAndMetricLayer(tf.keras.layers.Layer): @gin.configurable def get_transformer_encoder(bert_config, - sequence_length, + sequence_length=None, transformer_encoder_cls=None, output_range=None): """Gets a 'TransformerEncoder' object. Args: bert_config: A 'modeling.BertConfig' or 'modeling.AlbertConfig' object. - sequence_length: Maximum sequence length of the training data. + sequence_length: [Deprecated]. transformer_encoder_cls: A EncoderScaffold class. If it is None, uses the default BERT encoder implementation. output_range: the sequence output range, [0, output_range). Default setting @@ -120,13 +120,13 @@ def get_transformer_encoder(bert_config, Returns: A networks.TransformerEncoder object. """ + del sequence_length if transformer_encoder_cls is not None: # TODO(hongkuny): evaluate if it is better to put cfg definition in gin. embedding_cfg = dict( vocab_size=bert_config.vocab_size, type_vocab_size=bert_config.type_vocab_size, hidden_size=bert_config.hidden_size, - seq_length=sequence_length, max_seq_length=bert_config.max_position_embeddings, initializer=tf.keras.initializers.TruncatedNormal( stddev=bert_config.initializer_range), @@ -161,7 +161,6 @@ def get_transformer_encoder(bert_config, activation=tf_utils.get_activation(bert_config.hidden_act), dropout_rate=bert_config.hidden_dropout_prob, attention_dropout_rate=bert_config.attention_probs_dropout_prob, - sequence_length=sequence_length, max_sequence_length=bert_config.max_position_embeddings, type_vocab_size=bert_config.type_vocab_size, embedding_width=bert_config.embedding_size, diff --git a/official/nlp/bert/bert_models_test.py b/official/nlp/bert/bert_models_test.py index 93763b45b..0c6e3ec43 100644 --- a/official/nlp/bert/bert_models_test.py +++ b/official/nlp/bert/bert_models_test.py @@ -56,8 +56,6 @@ class BertModelsTest(tf.test.TestCase): # Expect two output from encoder: sequence and classification output. self.assertIsInstance(encoder.output, list) self.assertLen(encoder.output, 2) - # shape should be [batch size, seq_length, hidden_size] - self.assertEqual(encoder.output[0].shape.as_list(), [None, 5, 16]) # shape should be [batch size, hidden_size] self.assertEqual(encoder.output[1].shape.as_list(), [None, 16]) @@ -74,16 +72,12 @@ class BertModelsTest(tf.test.TestCase): # Expect two output from model: start positions and end positions self.assertIsInstance(model.output, list) self.assertLen(model.output, 2) - # shape should be [batch size, seq_length] - self.assertEqual(model.output[0].shape.as_list(), [None, 5]) - # shape should be [batch size, seq_length] - self.assertEqual(model.output[1].shape.as_list(), [None, 5]) # Expect two output from core_model: sequence and classification output. self.assertIsInstance(core_model.output, list) self.assertLen(core_model.output, 2) - # shape should be [batch size, seq_length, hidden_size] - self.assertEqual(core_model.output[0].shape.as_list(), [None, 5, 16]) + # shape should be [batch size, None, hidden_size] + self.assertEqual(core_model.output[0].shape.as_list(), [None, None, 16]) # shape should be [batch size, hidden_size] self.assertEqual(core_model.output[1].shape.as_list(), [None, 16]) @@ -104,8 +98,8 @@ class BertModelsTest(tf.test.TestCase): # Expect two output from core_model: sequence and classification output. self.assertIsInstance(core_model.output, list) self.assertLen(core_model.output, 2) - # shape should be [batch size, 1, hidden_size] - self.assertEqual(core_model.output[0].shape.as_list(), [None, 1, 16]) + # shape should be [batch size, None, hidden_size] + self.assertEqual(core_model.output[0].shape.as_list(), [None, None, 16]) # shape should be [batch size, hidden_size] self.assertEqual(core_model.output[1].shape.as_list(), [None, 16]) diff --git a/official/nlp/bert/tf2_encoder_checkpoint_converter.py b/official/nlp/bert/tf2_encoder_checkpoint_converter.py index b9edf7c4e..835a152f7 100644 --- a/official/nlp/bert/tf2_encoder_checkpoint_converter.py +++ b/official/nlp/bert/tf2_encoder_checkpoint_converter.py @@ -61,7 +61,7 @@ def _create_bert_model(cfg): activation=activations.gelu, dropout_rate=cfg.hidden_dropout_prob, attention_dropout_rate=cfg.attention_probs_dropout_prob, - sequence_length=cfg.max_position_embeddings, + max_sequence_length=cfg.max_position_embeddings, type_vocab_size=cfg.type_vocab_size, initializer=tf.keras.initializers.TruncatedNormal( stddev=cfg.initializer_range), diff --git a/official/nlp/configs/encoders.py b/official/nlp/configs/encoders.py index f2a4a10a4..b7467634a 100644 --- a/official/nlp/configs/encoders.py +++ b/official/nlp/configs/encoders.py @@ -54,7 +54,6 @@ def instantiate_encoder_from_cfg( vocab_size=config.vocab_size, type_vocab_size=config.type_vocab_size, hidden_size=config.hidden_size, - seq_length=None, max_seq_length=config.max_position_embeddings, initializer=tf.keras.initializers.TruncatedNormal( stddev=config.initializer_range), @@ -90,7 +89,6 @@ def instantiate_encoder_from_cfg( activation=tf_utils.get_activation(config.hidden_activation), dropout_rate=config.dropout_rate, attention_dropout_rate=config.attention_dropout_rate, - sequence_length=None, max_sequence_length=config.max_position_embeddings, type_vocab_size=config.type_vocab_size, initializer=tf.keras.initializers.TruncatedNormal( diff --git a/official/nlp/modeling/layers/masked_lm_test.py b/official/nlp/modeling/layers/masked_lm_test.py index 12e28ec95..2297ce5fc 100644 --- a/official/nlp/modeling/layers/masked_lm_test.py +++ b/official/nlp/modeling/layers/masked_lm_test.py @@ -34,7 +34,6 @@ class MaskedLMTest(keras_parameterized.TestCase): def create_layer(self, vocab_size, - sequence_length, hidden_size, output='predictions', xformer_stack=None): @@ -44,7 +43,6 @@ class MaskedLMTest(keras_parameterized.TestCase): xformer_stack = transformer_encoder.TransformerEncoder( vocab_size=vocab_size, num_layers=1, - sequence_length=sequence_length, hidden_size=hidden_size, num_attention_heads=4, ) @@ -62,7 +60,6 @@ class MaskedLMTest(keras_parameterized.TestCase): num_predictions = 21 test_layer = self.create_layer( vocab_size=vocab_size, - sequence_length=sequence_length, hidden_size=hidden_size) # Make sure that the output tensor of the masked LM is the right shape. @@ -81,19 +78,16 @@ class MaskedLMTest(keras_parameterized.TestCase): xformer_stack = transformer_encoder.TransformerEncoder( vocab_size=vocab_size, num_layers=1, - sequence_length=sequence_length, hidden_size=hidden_size, num_attention_heads=4, ) test_layer = self.create_layer( vocab_size=vocab_size, - sequence_length=sequence_length, hidden_size=hidden_size, xformer_stack=xformer_stack, output='predictions') logit_layer = self.create_layer( vocab_size=vocab_size, - sequence_length=sequence_length, hidden_size=hidden_size, xformer_stack=xformer_stack, output='logits') @@ -134,7 +128,6 @@ class MaskedLMTest(keras_parameterized.TestCase): num_predictions = 21 test_layer = self.create_layer( vocab_size=vocab_size, - sequence_length=sequence_length, hidden_size=hidden_size) # Create a model from the masked LM layer. @@ -155,7 +148,7 @@ class MaskedLMTest(keras_parameterized.TestCase): def test_unknown_output_type_fails(self): with self.assertRaisesRegex(ValueError, 'Unknown `output` value "bad".*'): _ = self.create_layer( - vocab_size=8, sequence_length=8, hidden_size=8, output='bad') + vocab_size=8, hidden_size=8, output='bad') if __name__ == '__main__': diff --git a/official/nlp/modeling/models/bert_classifier_test.py b/official/nlp/modeling/models/bert_classifier_test.py index 8e00c0313..b05ded47b 100644 --- a/official/nlp/modeling/models/bert_classifier_test.py +++ b/official/nlp/modeling/models/bert_classifier_test.py @@ -38,7 +38,7 @@ class BertClassifierTest(keras_parameterized.TestCase): vocab_size = 100 sequence_length = 512 test_network = networks.TransformerEncoder( - vocab_size=vocab_size, num_layers=2, sequence_length=sequence_length) + vocab_size=vocab_size, num_layers=2) # Create a BERT trainer with the created network. bert_trainer_model = bert_classifier.BertClassifier( @@ -62,7 +62,7 @@ class BertClassifierTest(keras_parameterized.TestCase): # Build a transformer network to use within the BERT trainer. (Here, we use # a short sequence_length for convenience.) test_network = networks.TransformerEncoder( - vocab_size=100, num_layers=2, sequence_length=2) + vocab_size=100, num_layers=2) # Create a BERT trainer with the created network. bert_trainer_model = bert_classifier.BertClassifier( @@ -83,7 +83,7 @@ class BertClassifierTest(keras_parameterized.TestCase): # Build a transformer network to use within the BERT trainer. (Here, we use # a short sequence_length for convenience.) test_network = networks.TransformerEncoder( - vocab_size=100, num_layers=2, sequence_length=5) + vocab_size=100, num_layers=2) # Create a BERT trainer with the created network. (Note that all the args # are different, so we can catch any serialization mismatches.) diff --git a/official/nlp/modeling/models/bert_pretrainer.py b/official/nlp/modeling/models/bert_pretrainer.py index b1ae8dc54..0dc67849f 100644 --- a/official/nlp/modeling/models/bert_pretrainer.py +++ b/official/nlp/modeling/models/bert_pretrainer.py @@ -94,7 +94,8 @@ class BertPretrainer(tf.keras.Model): if isinstance(cls_output, list): cls_output = cls_output[-1] sequence_output_length = sequence_output.shape.as_list()[1] - if sequence_output_length < num_token_predictions: + if sequence_output_length is not None and (sequence_output_length < + num_token_predictions): raise ValueError( "The passed network's output length is %s, which is less than the " 'requested num_token_predictions %s.' % diff --git a/official/nlp/modeling/models/bert_span_labeler_test.py b/official/nlp/modeling/models/bert_span_labeler_test.py index d05e91b52..6c6e143ff 100644 --- a/official/nlp/modeling/models/bert_span_labeler_test.py +++ b/official/nlp/modeling/models/bert_span_labeler_test.py @@ -36,7 +36,7 @@ class BertSpanLabelerTest(keras_parameterized.TestCase): vocab_size = 100 sequence_length = 512 test_network = networks.TransformerEncoder( - vocab_size=vocab_size, num_layers=2, sequence_length=sequence_length) + vocab_size=vocab_size, num_layers=2) # Create a BERT trainer with the created network. bert_trainer_model = bert_span_labeler.BertSpanLabeler(test_network) @@ -59,9 +59,8 @@ class BertSpanLabelerTest(keras_parameterized.TestCase): """Validate compilation using explicit output names.""" # Build a transformer network to use within the BERT trainer. vocab_size = 100 - sequence_length = 512 test_network = networks.TransformerEncoder( - vocab_size=vocab_size, num_layers=2, sequence_length=sequence_length) + vocab_size=vocab_size, num_layers=2) # Create a BERT trainer with the created network. bert_trainer_model = bert_span_labeler.BertSpanLabeler(test_network) @@ -81,7 +80,7 @@ class BertSpanLabelerTest(keras_parameterized.TestCase): # Build a transformer network to use within the BERT trainer. (Here, we use # a short sequence_length for convenience.) test_network = networks.TransformerEncoder( - vocab_size=100, num_layers=2, sequence_length=2) + vocab_size=100, num_layers=2) # Create a BERT trainer with the created network. bert_trainer_model = bert_span_labeler.BertSpanLabeler(test_network) @@ -101,7 +100,7 @@ class BertSpanLabelerTest(keras_parameterized.TestCase): # Build a transformer network to use within the BERT trainer. (Here, we use # a short sequence_length for convenience.) test_network = networks.TransformerEncoder( - vocab_size=100, num_layers=2, sequence_length=5) + vocab_size=100, num_layers=2) # Create a BERT trainer with the created network. (Note that all the args # are different, so we can catch any serialization mismatches.) diff --git a/official/nlp/modeling/models/electra_pretrainer.py b/official/nlp/modeling/models/electra_pretrainer.py index f4ab8b901..9ee72365d 100644 --- a/official/nlp/modeling/models/electra_pretrainer.py +++ b/official/nlp/modeling/models/electra_pretrainer.py @@ -50,7 +50,6 @@ class ElectraPretrainer(tf.keras.Model): vocab_size: Size of generator output vocabulary num_classes: Number of classes to predict from the classification network for the generator network (not used now) - sequence_length: Input sequence length num_token_predictions: Number of tokens to predict from the masked LM. mlm_activation: The activation (if any) to use in the masked LM and classification networks. If None, no activation will be used. @@ -67,7 +66,6 @@ class ElectraPretrainer(tf.keras.Model): discriminator_network, vocab_size, num_classes, - sequence_length, num_token_predictions, mlm_activation=None, mlm_initializer='glorot_uniform', @@ -80,7 +78,6 @@ class ElectraPretrainer(tf.keras.Model): 'discriminator_network': discriminator_network, 'vocab_size': vocab_size, 'num_classes': num_classes, - 'sequence_length': sequence_length, 'num_token_predictions': num_token_predictions, 'mlm_activation': mlm_activation, 'mlm_initializer': mlm_initializer, @@ -94,7 +91,6 @@ class ElectraPretrainer(tf.keras.Model): self.discriminator_network = discriminator_network self.vocab_size = vocab_size self.num_classes = num_classes - self.sequence_length = sequence_length self.num_token_predictions = num_token_predictions self.mlm_activation = mlm_activation self.mlm_initializer = mlm_initializer diff --git a/official/nlp/modeling/models/electra_pretrainer_test.py b/official/nlp/modeling/models/electra_pretrainer_test.py index 408ff39d4..67fe37925 100644 --- a/official/nlp/modeling/models/electra_pretrainer_test.py +++ b/official/nlp/modeling/models/electra_pretrainer_test.py @@ -36,9 +36,13 @@ class ElectraPretrainerTest(keras_parameterized.TestCase): vocab_size = 100 sequence_length = 512 test_generator_network = networks.TransformerEncoder( - vocab_size=vocab_size, num_layers=2, sequence_length=sequence_length) + vocab_size=vocab_size, + num_layers=2, + max_sequence_length=sequence_length) test_discriminator_network = networks.TransformerEncoder( - vocab_size=vocab_size, num_layers=2, sequence_length=sequence_length) + vocab_size=vocab_size, + num_layers=2, + max_sequence_length=sequence_length) # Create a ELECTRA trainer with the created network. num_classes = 3 @@ -48,7 +52,6 @@ class ElectraPretrainerTest(keras_parameterized.TestCase): discriminator_network=test_discriminator_network, vocab_size=vocab_size, num_classes=num_classes, - sequence_length=sequence_length, num_token_predictions=num_token_predictions, disallow_correct=True) diff --git a/official/nlp/modeling/networks/albert_transformer_encoder.py b/official/nlp/modeling/networks/albert_transformer_encoder.py index 5787ac206..8bd12956e 100644 --- a/official/nlp/modeling/networks/albert_transformer_encoder.py +++ b/official/nlp/modeling/networks/albert_transformer_encoder.py @@ -53,9 +53,6 @@ class AlbertTransformerEncoder(tf.keras.Model): num_layers: The number of transformer layers. num_attention_heads: The number of attention heads for each transformer. The hidden size must be divisible by the number of attention heads. - sequence_length: The sequence length that this encoder expects. If None, the - sequence length is dynamic; if an integer, the encoder will require - sequences padded to this length. max_sequence_length: The maximum sequence length that this encoder can consume. If None, max_sequence_length uses the value from sequence length. This determines the variable shape for positional embeddings. @@ -74,8 +71,7 @@ class AlbertTransformerEncoder(tf.keras.Model): hidden_size=768, num_layers=12, num_attention_heads=12, - sequence_length=512, - max_sequence_length=None, + max_sequence_length=512, type_vocab_size=16, intermediate_size=3072, activation=activations.gelu, @@ -86,8 +82,6 @@ class AlbertTransformerEncoder(tf.keras.Model): activation = tf.keras.activations.get(activation) initializer = tf.keras.initializers.get(initializer) - if not max_sequence_length: - max_sequence_length = sequence_length self._self_setattr_tracking = False self._config_dict = { 'vocab_size': vocab_size, @@ -95,7 +89,6 @@ class AlbertTransformerEncoder(tf.keras.Model): 'hidden_size': hidden_size, 'num_layers': num_layers, 'num_attention_heads': num_attention_heads, - 'sequence_length': sequence_length, 'max_sequence_length': max_sequence_length, 'type_vocab_size': type_vocab_size, 'intermediate_size': intermediate_size, @@ -106,11 +99,11 @@ class AlbertTransformerEncoder(tf.keras.Model): } word_ids = tf.keras.layers.Input( - shape=(sequence_length,), dtype=tf.int32, name='input_word_ids') + shape=(None,), dtype=tf.int32, name='input_word_ids') mask = tf.keras.layers.Input( - shape=(sequence_length,), dtype=tf.int32, name='input_mask') + shape=(None,), dtype=tf.int32, name='input_mask') type_ids = tf.keras.layers.Input( - shape=(sequence_length,), dtype=tf.int32, name='input_type_ids') + shape=(None,), dtype=tf.int32, name='input_type_ids') if embedding_width is None: embedding_width = hidden_size diff --git a/official/nlp/modeling/networks/albert_transformer_encoder_test.py b/official/nlp/modeling/networks/albert_transformer_encoder_test.py index 44368e494..48fcc3e2a 100644 --- a/official/nlp/modeling/networks/albert_transformer_encoder_test.py +++ b/official/nlp/modeling/networks/albert_transformer_encoder_test.py @@ -48,7 +48,6 @@ class AlbertTransformerEncoderTest(keras_parameterized.TestCase): kwargs = dict( vocab_size=100, hidden_size=hidden_size, - sequence_length=sequence_length, num_attention_heads=2, num_layers=3) if expected_dtype == tf.float16: @@ -92,7 +91,6 @@ class AlbertTransformerEncoderTest(keras_parameterized.TestCase): vocab_size=vocab_size, embedding_width=8, hidden_size=hidden_size, - sequence_length=sequence_length, num_attention_heads=2, num_layers=3, type_vocab_size=num_types) @@ -123,7 +121,6 @@ class AlbertTransformerEncoderTest(keras_parameterized.TestCase): vocab_size=vocab_size, embedding_width=8, hidden_size=hidden_size, - sequence_length=sequence_length, max_sequence_length=max_sequence_length, num_attention_heads=2, num_layers=3, @@ -141,7 +138,6 @@ class AlbertTransformerEncoderTest(keras_parameterized.TestCase): hidden_size=32, num_layers=3, num_attention_heads=2, - sequence_length=21, max_sequence_length=21, type_vocab_size=12, intermediate_size=1223, diff --git a/official/nlp/modeling/networks/encoder_scaffold.py b/official/nlp/modeling/networks/encoder_scaffold.py index 287f0d61f..3daa18b45 100644 --- a/official/nlp/modeling/networks/encoder_scaffold.py +++ b/official/nlp/modeling/networks/encoder_scaffold.py @@ -129,16 +129,17 @@ class EncoderScaffold(tf.keras.Model): embeddings, attention_mask = self._embedding_network(inputs) else: self._embedding_network = None + seq_length = embedding_cfg.get('seq_length', None) word_ids = tf.keras.layers.Input( - shape=(embedding_cfg['seq_length'],), + shape=(seq_length,), dtype=tf.int32, name='input_word_ids') mask = tf.keras.layers.Input( - shape=(embedding_cfg['seq_length'],), + shape=(seq_length,), dtype=tf.int32, name='input_mask') type_ids = tf.keras.layers.Input( - shape=(embedding_cfg['seq_length'],), + shape=(seq_length,), dtype=tf.int32, name='input_type_ids') inputs = [word_ids, mask, type_ids] diff --git a/official/nlp/modeling/networks/transformer_encoder.py b/official/nlp/modeling/networks/transformer_encoder.py index 60605de6c..8c0026300 100644 --- a/official/nlp/modeling/networks/transformer_encoder.py +++ b/official/nlp/modeling/networks/transformer_encoder.py @@ -48,9 +48,8 @@ class TransformerEncoder(tf.keras.Model): num_layers: The number of transformer layers. num_attention_heads: The number of attention heads for each transformer. The hidden size must be divisible by the number of attention heads. - sequence_length: The sequence length that this encoder expects. If None, the - sequence length is dynamic; if an integer, the encoder will require - sequences padded to this length. + sequence_length: [Deprecated]. TODO(hongkuny): remove this argument once no + user is using it. max_sequence_length: The maximum sequence length that this encoder can consume. If None, max_sequence_length uses the value from sequence length. This determines the variable shape for positional embeddings. @@ -83,8 +82,8 @@ class TransformerEncoder(tf.keras.Model): hidden_size=768, num_layers=12, num_attention_heads=12, - sequence_length=512, - max_sequence_length=None, + sequence_length=None, + max_sequence_length=512, type_vocab_size=16, intermediate_size=3072, activation=activations.gelu, @@ -99,15 +98,12 @@ class TransformerEncoder(tf.keras.Model): activation = tf.keras.activations.get(activation) initializer = tf.keras.initializers.get(initializer) - if not max_sequence_length: - max_sequence_length = sequence_length self._self_setattr_tracking = False self._config_dict = { 'vocab_size': vocab_size, 'hidden_size': hidden_size, 'num_layers': num_layers, 'num_attention_heads': num_attention_heads, - 'sequence_length': sequence_length, 'max_sequence_length': max_sequence_length, 'type_vocab_size': type_vocab_size, 'intermediate_size': intermediate_size, @@ -121,11 +117,11 @@ class TransformerEncoder(tf.keras.Model): } word_ids = tf.keras.layers.Input( - shape=(sequence_length,), dtype=tf.int32, name='input_word_ids') + shape=(None,), dtype=tf.int32, name='input_word_ids') mask = tf.keras.layers.Input( - shape=(sequence_length,), dtype=tf.int32, name='input_mask') + shape=(None,), dtype=tf.int32, name='input_mask') type_ids = tf.keras.layers.Input( - shape=(sequence_length,), dtype=tf.int32, name='input_type_ids') + shape=(None,), dtype=tf.int32, name='input_type_ids') if embedding_width is None: embedding_width = hidden_size diff --git a/official/nlp/modeling/networks/transformer_encoder_test.py b/official/nlp/modeling/networks/transformer_encoder_test.py index e9fbc3aaa..69a4e2b62 100644 --- a/official/nlp/modeling/networks/transformer_encoder_test.py +++ b/official/nlp/modeling/networks/transformer_encoder_test.py @@ -42,7 +42,6 @@ class TransformerEncoderTest(keras_parameterized.TestCase): test_network = transformer_encoder.TransformerEncoder( vocab_size=100, hidden_size=hidden_size, - sequence_length=sequence_length, num_attention_heads=2, num_layers=3) # Create the inputs (note that the first dimension is implicit). @@ -71,7 +70,6 @@ class TransformerEncoderTest(keras_parameterized.TestCase): test_network = transformer_encoder.TransformerEncoder( vocab_size=100, hidden_size=hidden_size, - sequence_length=sequence_length, num_attention_heads=2, num_layers=3, return_all_encoder_outputs=True) @@ -100,7 +98,6 @@ class TransformerEncoderTest(keras_parameterized.TestCase): test_network = transformer_encoder.TransformerEncoder( vocab_size=100, hidden_size=hidden_size, - sequence_length=sequence_length, num_attention_heads=2, num_layers=3) # Create the inputs (note that the first dimension is implicit). @@ -132,7 +129,6 @@ class TransformerEncoderTest(keras_parameterized.TestCase): test_network = transformer_encoder.TransformerEncoder( vocab_size=vocab_size, hidden_size=hidden_size, - sequence_length=sequence_length, num_attention_heads=2, num_layers=3, type_vocab_size=num_types, @@ -163,7 +159,6 @@ class TransformerEncoderTest(keras_parameterized.TestCase): test_network = transformer_encoder.TransformerEncoder( vocab_size=vocab_size, hidden_size=hidden_size, - sequence_length=sequence_length, max_sequence_length=max_sequence_length, num_attention_heads=2, num_layers=3, @@ -177,7 +172,6 @@ class TransformerEncoderTest(keras_parameterized.TestCase): test_network = transformer_encoder.TransformerEncoder( vocab_size=vocab_size, hidden_size=hidden_size, - sequence_length=sequence_length, max_sequence_length=max_sequence_length, num_attention_heads=2, num_layers=3, @@ -196,7 +190,6 @@ class TransformerEncoderTest(keras_parameterized.TestCase): hidden_size=32, num_layers=3, num_attention_heads=2, - sequence_length=21, max_sequence_length=21, type_vocab_size=12, intermediate_size=1223, diff --git a/official/nlp/nhnet/models.py b/official/nlp/nhnet/models.py index e091fe6be..c8190b5ba 100644 --- a/official/nlp/nhnet/models.py +++ b/official/nlp/nhnet/models.py @@ -413,7 +413,6 @@ def get_bert2bert_layers(params: configs.BERT2BERTConfig): activation=tf_utils.get_activation(bert_config.hidden_act), dropout_rate=bert_config.hidden_dropout_prob, attention_dropout_rate=bert_config.attention_probs_dropout_prob, - sequence_length=None, max_sequence_length=bert_config.max_position_embeddings, type_vocab_size=bert_config.type_vocab_size, initializer=tf.keras.initializers.TruncatedNormal( -- GitLab From 5eb294f84bd3f415b548980e69fee63db1f6f1df Mon Sep 17 00:00:00 2001 From: Abdullah Rashwan Date: Thu, 30 Jul 2020 20:43:27 -0700 Subject: [PATCH 083/128] Internal change PiperOrigin-RevId: 324140487 --- official/core/base_task.py | 47 ------------- official/core/exp_factory.py | 37 ++++++++++ official/core/task_factory.py | 68 +++++++++++++++++++ .../hyperparams/config_definitions.py | 14 ---- official/nlp/tasks/electra_task.py | 3 +- official/nlp/tasks/masked_lm.py | 3 +- official/nlp/tasks/question_answering.py | 3 +- official/nlp/tasks/sentence_prediction.py | 3 +- official/nlp/tasks/tagging.py | 3 +- 9 files changed, 115 insertions(+), 66 deletions(-) create mode 100644 official/core/exp_factory.py create mode 100644 official/core/task_factory.py diff --git a/official/core/base_task.py b/official/core/base_task.py index dc2633f97..76ebd8e14 100644 --- a/official/core/base_task.py +++ b/official/core/base_task.py @@ -23,7 +23,6 @@ import six import tensorflow as tf from official.modeling.hyperparams import config_definitions as cfg -from official.utils import registry @six.add_metaclass(abc.ABCMeta) @@ -295,49 +294,3 @@ class Task(tf.Module): """Optional reduce of aggregated logs over validation steps.""" return {} - -_REGISTERED_TASK_CLS = {} - - -# TODO(b/158268740): Move these outside the base class file. -# TODO(b/158741360): Add type annotations once pytype checks across modules. -def register_task_cls(task_config_cls): - """Decorates a factory of Tasks for lookup by a subclass of TaskConfig. - - This decorator supports registration of tasks as follows: - - ``` - @dataclasses.dataclass - class MyTaskConfig(TaskConfig): - # Add fields here. - pass - - @register_task_cls(MyTaskConfig) - class MyTask(Task): - # Inherits def __init__(self, task_config). - pass - - my_task_config = MyTaskConfig() - my_task = get_task(my_task_config) # Returns MyTask(my_task_config). - ``` - - Besisdes a class itself, other callables that create a Task from a TaskConfig - can be decorated by the result of this function, as long as there is at most - one registration for each config class. - - Args: - task_config_cls: a subclass of TaskConfig (*not* an instance of TaskConfig). - Each task_config_cls can only be used for a single registration. - - Returns: - A callable for use as class decorator that registers the decorated class - for creation from an instance of task_config_cls. - """ - return registry.register(_REGISTERED_TASK_CLS, task_config_cls) - - -# The user-visible get_task() is defined after classes have been registered. -# TODO(b/158741360): Add type annotations once pytype checks across modules. -def get_task_cls(task_config_cls): - task_cls = registry.lookup(_REGISTERED_TASK_CLS, task_config_cls) - return task_cls diff --git a/official/core/exp_factory.py b/official/core/exp_factory.py new file mode 100644 index 000000000..8270565b7 --- /dev/null +++ b/official/core/exp_factory.py @@ -0,0 +1,37 @@ +# Lint as: python3 +# Copyright 2020 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Experiment factory methods.""" + +from official.modeling.hyperparams import config_definitions as cfg +from official.utils import registry + + +_REGISTERED_CONFIGS = {} + + +def register_config_factory(name): + """Register ExperimentConfig factory method.""" + return registry.register(_REGISTERED_CONFIGS, name) + + +def get_exp_config_creater(exp_name: str): + """Looks up ExperimentConfig factory methods.""" + exp_creater = registry.lookup(_REGISTERED_CONFIGS, exp_name) + return exp_creater + + +def get_exp_config(exp_name: str) -> cfg.ExperimentConfig: + return get_exp_config_creater(exp_name)() diff --git a/official/core/task_factory.py b/official/core/task_factory.py new file mode 100644 index 000000000..394031ae9 --- /dev/null +++ b/official/core/task_factory.py @@ -0,0 +1,68 @@ +# Lint as: python3 +# Copyright 2020 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""A global factory to register and access all registered tasks.""" + +from official.utils import registry + +_REGISTERED_TASK_CLS = {} + + +# TODO(b/158741360): Add type annotations once pytype checks across modules. +def register_task_cls(task_config_cls): + """Decorates a factory of Tasks for lookup by a subclass of TaskConfig. + + This decorator supports registration of tasks as follows: + + ``` + @dataclasses.dataclass + class MyTaskConfig(TaskConfig): + # Add fields here. + pass + + @register_task_cls(MyTaskConfig) + class MyTask(Task): + # Inherits def __init__(self, task_config). + pass + + my_task_config = MyTaskConfig() + my_task = get_task(my_task_config) # Returns MyTask(my_task_config). + ``` + + Besisdes a class itself, other callables that create a Task from a TaskConfig + can be decorated by the result of this function, as long as there is at most + one registration for each config class. + + Args: + task_config_cls: a subclass of TaskConfig (*not* an instance of TaskConfig). + Each task_config_cls can only be used for a single registration. + + Returns: + A callable for use as class decorator that registers the decorated class + for creation from an instance of task_config_cls. + """ + return registry.register(_REGISTERED_TASK_CLS, task_config_cls) + + +def get_task(task_config, **kwargs): + """Creates a Task (of suitable subclass type) from task_config.""" + return get_task_cls(task_config.__class__)(task_config, **kwargs) + + +# The user-visible get_task() is defined after classes have been registered. +# TODO(b/158741360): Add type annotations once pytype checks across modules. +def get_task_cls(task_config_cls): + task_cls = registry.lookup(_REGISTERED_TASK_CLS, task_config_cls) + return task_cls diff --git a/official/modeling/hyperparams/config_definitions.py b/official/modeling/hyperparams/config_definitions.py index e9f7f60f4..c58b1de7f 100644 --- a/official/modeling/hyperparams/config_definitions.py +++ b/official/modeling/hyperparams/config_definitions.py @@ -21,7 +21,6 @@ import dataclasses from official.modeling.hyperparams import base_config from official.modeling.optimization.configs import optimization_config -from official.utils import registry OptimizationConfig = optimization_config.OptimizationConfig @@ -219,16 +218,3 @@ class ExperimentConfig(base_config.Config): trainer: TrainerConfig = TrainerConfig() runtime: RuntimeConfig = RuntimeConfig() - -_REGISTERED_CONFIGS = {} - - -def register_config_factory(name): - """Register ExperimentConfig factory method.""" - return registry.register(_REGISTERED_CONFIGS, name) - - -def get_exp_config_creater(exp_name: str): - """Looks up ExperimentConfig factory methods.""" - exp_creater = registry.lookup(_REGISTERED_CONFIGS, exp_name) - return exp_creater diff --git a/official/nlp/tasks/electra_task.py b/official/nlp/tasks/electra_task.py index 4eb16e4c3..a34cabf08 100644 --- a/official/nlp/tasks/electra_task.py +++ b/official/nlp/tasks/electra_task.py @@ -18,6 +18,7 @@ import dataclasses import tensorflow as tf from official.core import base_task +from official.core import task_factory from official.modeling.hyperparams import config_definitions as cfg from official.nlp.configs import bert from official.nlp.configs import electra @@ -39,7 +40,7 @@ class ELECTRAPretrainConfig(cfg.TaskConfig): validation_data: cfg.DataConfig = cfg.DataConfig() -@base_task.register_task_cls(ELECTRAPretrainConfig) +@task_factory.register_task_cls(ELECTRAPretrainConfig) class ELECTRAPretrainTask(base_task.Task): """ELECTRA Pretrain Task (Masked LM + Replaced Token Detection).""" diff --git a/official/nlp/tasks/masked_lm.py b/official/nlp/tasks/masked_lm.py index ebf01278f..b42c95192 100644 --- a/official/nlp/tasks/masked_lm.py +++ b/official/nlp/tasks/masked_lm.py @@ -18,6 +18,7 @@ import dataclasses import tensorflow as tf from official.core import base_task +from official.core import task_factory from official.modeling.hyperparams import config_definitions as cfg from official.nlp.configs import bert from official.nlp.data import data_loader_factory @@ -34,7 +35,7 @@ class MaskedLMConfig(cfg.TaskConfig): validation_data: cfg.DataConfig = cfg.DataConfig() -@base_task.register_task_cls(MaskedLMConfig) +@task_factory.register_task_cls(MaskedLMConfig) class MaskedLMTask(base_task.Task): """Mock task object for testing.""" diff --git a/official/nlp/tasks/question_answering.py b/official/nlp/tasks/question_answering.py index f862fb837..aaa20e854 100644 --- a/official/nlp/tasks/question_answering.py +++ b/official/nlp/tasks/question_answering.py @@ -23,6 +23,7 @@ import tensorflow as tf import tensorflow_hub as hub from official.core import base_task +from official.core import task_factory from official.modeling.hyperparams import base_config from official.modeling.hyperparams import config_definitions as cfg from official.nlp.bert import squad_evaluate_v1_1 @@ -57,7 +58,7 @@ class QuestionAnsweringConfig(cfg.TaskConfig): validation_data: cfg.DataConfig = cfg.DataConfig() -@base_task.register_task_cls(QuestionAnsweringConfig) +@task_factory.register_task_cls(QuestionAnsweringConfig) class QuestionAnsweringTask(base_task.Task): """Task object for question answering.""" diff --git a/official/nlp/tasks/sentence_prediction.py b/official/nlp/tasks/sentence_prediction.py index 907b83ed1..f8cfefdb1 100644 --- a/official/nlp/tasks/sentence_prediction.py +++ b/official/nlp/tasks/sentence_prediction.py @@ -26,6 +26,7 @@ import tensorflow as tf import tensorflow_hub as hub from official.core import base_task +from official.core import task_factory from official.modeling.hyperparams import base_config from official.modeling.hyperparams import config_definitions as cfg from official.nlp.configs import encoders @@ -62,7 +63,7 @@ class SentencePredictionConfig(cfg.TaskConfig): validation_data: cfg.DataConfig = cfg.DataConfig() -@base_task.register_task_cls(SentencePredictionConfig) +@task_factory.register_task_cls(SentencePredictionConfig) class SentencePredictionTask(base_task.Task): """Task object for sentence_prediction.""" diff --git a/official/nlp/tasks/tagging.py b/official/nlp/tasks/tagging.py index 70a983b94..d1a63a610 100644 --- a/official/nlp/tasks/tagging.py +++ b/official/nlp/tasks/tagging.py @@ -25,6 +25,7 @@ import tensorflow as tf import tensorflow_hub as hub from official.core import base_task +from official.core import task_factory from official.modeling.hyperparams import base_config from official.modeling.hyperparams import config_definitions as cfg from official.nlp.configs import encoders @@ -80,7 +81,7 @@ def _masked_labels_and_weights(y_true): return masked_y_true, tf.cast(mask, tf.float32) -@base_task.register_task_cls(TaggingConfig) +@task_factory.register_task_cls(TaggingConfig) class TaggingTask(base_task.Task): """Task object for tagging (e.g., NER or POS).""" -- GitLab From a8ba923c873f9848d0f6453f3e2e3fa2dd1187dc Mon Sep 17 00:00:00 2001 From: Jaeyoun Kim Date: Thu, 30 Jul 2020 21:14:21 -0700 Subject: [PATCH 084/128] Deprecate old models (#8934) Deprecate old models --- research/README.md | 73 +- research/adv_imagenet_models/README.md | 91 - .../eval_on_adversarial.py | 331 - research/adv_imagenet_models/imagenet.py | 118 - .../inception_resnet_v2.py | 358 - research/adversarial_crypto/README.md | 62 - research/adversarial_crypto/train_eval.py | 276 - research/adversarial_logit_pairing/README.md | 281 - .../adversarial_attack.py | 219 - .../datasets/__init__.py | 0 .../datasets/dataset_factory.py | 62 - .../datasets/imagenet_input.py | 255 - .../datasets/tiny_imagenet_input.py | 157 - research/adversarial_logit_pairing/eval.py | 181 - .../adversarial_logit_pairing/model_lib.py | 189 - .../tiny_imagenet_converter/converter.py | 241 - research/adversarial_logit_pairing/train.py | 288 - .../AdditiveGaussianNoiseAutoencoderRunner.py | 58 - research/autoencoder/AutoencoderRunner.py | 55 - .../MaskingNoiseAutoencoderRunner.py | 55 - research/autoencoder/README.md | 3 - .../VariationalAutoencoderRunner.py | 56 - research/autoencoder/__init__.py | 0 .../autoencoder_models/Autoencoder.py | 91 - .../DenoisingAutoencoder.py | 129 - .../VariationalAutoencoder.py | 70 - .../autoencoder_models/__init__.py | 0 research/brain_coder/README.md | 34 - research/brain_coder/WORKSPACE | 5 - research/brain_coder/common/BUILD | 106 - research/brain_coder/common/bf.py | 234 - research/brain_coder/common/bf_test.py | 137 - research/brain_coder/common/config_lib.py | 337 - .../brain_coder/common/config_lib_test.py | 425 - research/brain_coder/common/reward.py | 390 - research/brain_coder/common/reward_test.py | 311 - research/brain_coder/common/rollout.py | 306 - research/brain_coder/common/rollout_test.py | 129 - research/brain_coder/common/schedules.py | 301 - research/brain_coder/common/schedules_test.py | 139 - research/brain_coder/common/utils.py | 558 - research/brain_coder/common/utils_test.py | 382 - research/brain_coder/single_task/BUILD | 244 - research/brain_coder/single_task/README.md | 192 - .../aggregate_experiment_results.py | 380 - .../single_task/aggregate_tuning_results.py | 71 - .../brain_coder/single_task/code_tasks.py | 1381 - .../single_task/code_tasks_test.py | 108 - research/brain_coder/single_task/data.py | 89 - research/brain_coder/single_task/defaults.py | 82 - research/brain_coder/single_task/ga_lib.py | 472 - research/brain_coder/single_task/ga_train.py | 324 - .../brain_coder/single_task/ga_train_test.py | 51 - .../single_task/launch_training.sh | 72 - .../brain_coder/single_task/launch_tuning.sh | 87 - research/brain_coder/single_task/misc.py | 149 - research/brain_coder/single_task/pg_agent.py | 1297 - .../brain_coder/single_task/pg_agent_test.py | 395 - research/brain_coder/single_task/pg_train.py | 782 - .../brain_coder/single_task/pg_train_test.py | 87 - .../brain_coder/single_task/results_lib.py | 155 - .../single_task/results_lib_test.py | 84 - research/brain_coder/single_task/run.py | 142 - .../brain_coder/single_task/run_eval_tasks.py | 296 - .../brain_coder/single_task/test_tasks.py | 127 - .../single_task/test_tasks_test.py | 63 - research/brain_coder/single_task/tune.py | 262 - .../cognitive_mapping_and_planning/.gitignore | 4 - .../cognitive_mapping_and_planning/README.md | 127 - .../__init__.py | 0 .../cfgs/__init__.py | 0 .../cfgs/config_cmp.py | 283 - .../cfgs/config_common.py | 261 - .../cfgs/config_distill.py | 114 - .../cfgs/config_vision_baseline.py | 173 - .../data/.gitignore | 3 - .../data/README.md | 33 - .../datasets/__init__.py | 0 .../datasets/factory.py | 113 - .../datasets/nav_env.py | 1465 - .../datasets/nav_env_config.py | 127 - .../matplotlibrc | 1 - .../output/.gitignore | 1 - .../output/README.md | 16 - .../patches/GLES2_2_0.py.patch | 14 - .../patches/apply_patches.sh | 18 - .../patches/ctypesloader.py.patch | 15 - .../render/__init__.py | 0 .../render/depth_rgb_encoded.fp | 30 - .../render/depth_rgb_encoded.vp | 15 - .../render/rgb_flat_color.fp | 11 - .../render/rgb_flat_color.vp | 18 - .../render/swiftshader_renderer.py | 427 - .../requirements.txt | 9 - .../scripts/__init__.py | 0 .../scripts/script_distill.py | 177 - .../scripts/script_download_init_models.sh | 18 - .../scripts/script_env_vis.py | 186 - .../scripts/script_nav_agent_release.py | 253 - .../scripts/script_plot_trajectory.py | 339 - .../script_preprocess_annoations_S3DIS.py | 197 - .../script_preprocess_annoations_S3DIS.sh | 24 - .../scripts/script_preprocess_meshes_S3DIS.sh | 37 - .../scripts/script_test_pretrained_models.sh | 63 - .../src/__init__.py | 0 .../src/depth_utils.py | 96 - .../src/file_utils.py | 42 - .../src/graph_utils.py | 552 - .../src/map_utils.py | 245 - .../src/rotation_utils.py | 73 - .../src/utils.py | 168 - .../tfcode/__init__.py | 0 .../tfcode/cmp.py | 553 - .../tfcode/cmp_summary.py | 213 - .../tfcode/cmp_utils.py | 164 - .../tfcode/nav_utils.py | 435 - .../tfcode/tf_utils.py | 840 - .../tfcode/vision_baseline_lstm.py | 533 - research/compression/README.md | 19 - research/compression/entropy_coder/README.md | 109 - .../compression/entropy_coder/__init__.py | 0 .../entropy_coder/all_models/__init__.py | 0 .../entropy_coder/all_models/all_models.py | 19 - .../all_models/all_models_test.py | 68 - .../configs/gru_prime3/model_config.json | 4 - .../configs/synthetic/input_config.json | 4 - .../configs/synthetic/model_config.json | 4 - .../configs/synthetic/train_config.json | 6 - .../entropy_coder/core/code_loader.py | 73 - .../entropy_coder/core/config_helper.py | 52 - .../core/entropy_coder_single.py | 116 - .../entropy_coder/core/entropy_coder_train.py | 184 - .../dataset/gen_synthetic_dataset.py | 89 - .../dataset/gen_synthetic_single.py | 72 - .../entropy_coder/dataset/synthetic_model.py | 75 - .../compression/entropy_coder/lib/__init__.py | 0 .../entropy_coder/lib/block_base.py | 258 - .../entropy_coder/lib/block_util.py | 101 - .../compression/entropy_coder/lib/blocks.py | 24 - .../entropy_coder/lib/blocks_binarizer.py | 35 - .../lib/blocks_entropy_coding.py | 49 - .../lib/blocks_entropy_coding_test.py | 56 - .../entropy_coder/lib/blocks_lstm.py | 263 - .../entropy_coder/lib/blocks_lstm_test.py | 113 - .../entropy_coder/lib/blocks_masked_conv2d.py | 226 - .../lib/blocks_masked_conv2d_lstm.py | 79 - .../lib/blocks_masked_conv2d_test.py | 207 - .../entropy_coder/lib/blocks_operator.py | 87 - .../entropy_coder/lib/blocks_operator_test.py | 64 - .../entropy_coder/lib/blocks_std.py | 363 - .../entropy_coder/lib/blocks_std_test.py | 340 - .../entropy_coder/model/__init__.py | 0 .../model/entropy_coder_model.py | 55 - .../entropy_coder/model/model_factory.py | 53 - .../entropy_coder/progressive/__init__.py | 0 .../entropy_coder/progressive/progressive.py | 242 - research/compression/image_encoder/README.md | 105 - research/compression/image_encoder/decoder.py | 127 - research/compression/image_encoder/encoder.py | 105 - .../compression/image_encoder/example.png | Bin 3155141 -> 0 bytes research/compression/image_encoder/msssim.py | 217 - research/deep_contextual_bandits/README.md | 444 - .../algorithms/bb_alpha_divergence_model.py | 373 - .../bf_variational_neural_bandit_model.py | 352 - .../algorithms/bootstrapped_bnn_sampling.py | 98 - .../algorithms/fixed_policy_sampling.py | 51 - .../linear_full_posterior_sampling.py | 164 - .../bandits/algorithms/multitask_gp.py | 374 - .../bandits/algorithms/neural_bandit_model.py | 220 - .../algorithms/neural_linear_sampling.py | 180 - .../algorithms/parameter_noise_sampling.py | 187 - .../algorithms/posterior_bnn_sampling.py | 92 - .../bandits/algorithms/uniform_sampling.py | 43 - .../variational_neural_bandit_model.py | 346 - .../bandits/core/bandit_algorithm.py | 34 - .../bandits/core/bayesian_nn.py | 36 - .../bandits/core/contextual_bandit.py | 125 - .../bandits/core/contextual_dataset.py | 166 - .../bandits/data/data_sampler.py | 374 - .../bandits/data/synthetic_data_sampler.py | 179 - .../deep_contextual_bandits/example_main.py | 454 - research/domain_adaptation/README.md | 124 - research/domain_adaptation/WORKSPACE | 0 research/domain_adaptation/__init__.py | 0 research/domain_adaptation/datasets/BUILD | 45 - .../domain_adaptation/datasets/__init__.py | 0 .../datasets/dataset_factory.py | 107 - .../datasets/download_and_convert_mnist_m.py | 237 - .../domain_adaptation/datasets/mnist_m.py | 98 - .../domain_adaptation/domain_separation/BUILD | 157 - .../domain_separation/__init__.py | 0 .../domain_separation/_grl_ops.so | Bin 26002 -> 0 bytes .../domain_separation/dsn.py | 355 - .../domain_separation/dsn_eval.py | 161 - .../domain_separation/dsn_test.py | 157 - .../domain_separation/dsn_train.py | 278 - .../domain_separation/grl_op_grads.py | 34 - .../domain_separation/grl_op_kernels.cc | 47 - .../domain_separation/grl_op_shapes.py | 16 - .../domain_separation/grl_ops.cc | 36 - .../domain_separation/grl_ops.py | 28 - .../domain_separation/grl_ops_test.py | 73 - .../domain_separation/losses.py | 290 - .../domain_separation/losses_test.py | 110 - .../domain_separation/models.py | 443 - .../domain_separation/models_test.py | 167 - .../domain_separation/utils.py | 183 - .../pixel_domain_adaptation/BUILD | 90 - .../pixel_domain_adaptation/README.md | 0 .../pixel_domain_adaptation/baselines/BUILD | 23 - .../baselines/README.md | 60 - .../baselines/baseline_eval.py | 141 - .../baselines/baseline_train.py | 161 - .../pixel_domain_adaptation/hparams.py | 201 - .../pixel_domain_adaptation/pixelda_eval.py | 298 - .../pixel_domain_adaptation/pixelda_losses.py | 385 - .../pixel_domain_adaptation/pixelda_model.py | 713 - .../pixelda_preprocess.py | 129 - .../pixelda_preprocess_test.py | 69 - .../pixelda_task_towers.py | 317 - .../pixel_domain_adaptation/pixelda_train.py | 409 - .../pixel_domain_adaptation/pixelda_utils.py | 195 - research/feelvos/CONTRIBUTING.md | 28 - research/feelvos/LICENSE | 202 - research/feelvos/README.md | 102 - research/feelvos/__init__.py | 14 - research/feelvos/common.py | 163 - research/feelvos/correlation_cost/README.md | 36 - research/feelvos/correlation_cost/build.sh | 37 - .../correlation_cost/clone_dependencies.sh | 31 - research/feelvos/correlation_cost/compile.sh | 46 - research/feelvos/correlation_cost/fix_code.sh | 33 - research/feelvos/correlation_cost/get_code.sh | 32 - research/feelvos/datasets/__init__.py | 14 - .../feelvos/datasets/build_davis2017_data.py | 163 - .../datasets/download_and_convert_davis17.sh | 77 - .../datasets/tfsequence_example_decoder.py | 118 - research/feelvos/datasets/video_dataset.py | 196 - research/feelvos/eval.sh | 86 - research/feelvos/input_preprocess.py | 280 - research/feelvos/model.py | 480 - research/feelvos/train.py | 630 - research/feelvos/train.sh | 92 - research/feelvos/utils/__init__.py | 14 - research/feelvos/utils/embedding_utils.py | 1082 - .../feelvos/utils/embedding_utils_test.py | 213 - research/feelvos/utils/eval_utils.py | 153 - research/feelvos/utils/mask_damaging.py | 176 - research/feelvos/utils/train_utils.py | 269 - .../feelvos/utils/video_input_generator.py | 558 - research/feelvos/vis_video.py | 500 - research/fivo/.gitattributes | 2 - research/fivo/.gitignore | 104 - research/fivo/README.md | 215 - research/fivo/bin/download_pianorolls.sh | 30 - research/fivo/bin/run_eval.sh | 29 - research/fivo/bin/run_sample.sh | 33 - research/fivo/bin/run_tests.sh | 25 - research/fivo/bin/run_train.sh | 31 - research/fivo/experimental/README.md | 1 - research/fivo/experimental/bounds.py | 673 - research/fivo/experimental/data.py | 192 - research/fivo/experimental/models.py | 1227 - research/fivo/experimental/run.sh | 54 - research/fivo/experimental/summary_utils.py | 332 - research/fivo/experimental/train.py | 637 - research/fivo/fivo/__init__.py | 0 research/fivo/fivo/bounds.py | 317 - research/fivo/fivo/bounds_test.py | 183 - research/fivo/fivo/data/__init__.py | 0 .../fivo/data/calculate_pianoroll_mean.py | 65 - .../fivo/fivo/data/create_timit_dataset.py | 180 - research/fivo/fivo/data/datasets.py | 453 - research/fivo/fivo/data/datasets_test.py | 303 - research/fivo/fivo/ghmm_runners.py | 235 - research/fivo/fivo/ghmm_runners_test.py | 106 - research/fivo/fivo/models/__init__.py | 0 research/fivo/fivo/models/base.py | 342 - research/fivo/fivo/models/ghmm.py | 483 - research/fivo/fivo/models/ghmm_test.py | 313 - research/fivo/fivo/models/srnn.py | 587 - research/fivo/fivo/models/srnn_test.py | 105 - research/fivo/fivo/models/vrnn.py | 572 - research/fivo/fivo/models/vrnn_test.py | 137 - research/fivo/fivo/nested_utils.py | 139 - research/fivo/fivo/nested_utils_test.py | 125 - research/fivo/fivo/runners.py | 489 - research/fivo/fivo/runners_test.py | 242 - research/fivo/fivo/smc.py | 338 - research/fivo/fivo/smc_test.py | 241 - .../fivo/fivo/test_data/tiny_pianoroll.pkl | 10979 ---- .../test_data/tiny_speech_dataset.tfrecord | Bin 144 -> 0 bytes research/fivo/fivo/test_utils.py | 144 - research/fivo/run_fivo.py | 142 - research/global_objectives/README.md | 152 - research/global_objectives/loss_layers.py | 930 - .../global_objectives/loss_layers_example.py | 211 - .../global_objectives/loss_layers_test.py | 1379 - research/global_objectives/test_all.py | 37 - research/global_objectives/util.py | 348 - research/global_objectives/util_test.py | 333 - research/im2txt/.gitignore | 7 - research/im2txt/README.md | 342 - research/im2txt/WORKSPACE | 1 - .../conda-env/ubuntu-18-04-environment.yaml | 142 - .../g3doc/COCO_val2014_000000224477.jpg | Bin 194898 -> 0 bytes research/im2txt/g3doc/example_captions.jpg | Bin 430666 -> 0 bytes .../g3doc/show_and_tell_architecture.png | Bin 900803 -> 0 bytes research/im2txt/im2txt/BUILD | 96 - research/im2txt/im2txt/configuration.py | 104 - .../im2txt/im2txt/data/build_mscoco_data.py | 483 - .../data/download_and_preprocess_mscoco.sh | 90 - research/im2txt/im2txt/evaluate.py | 198 - research/im2txt/im2txt/inference_utils/BUILD | 31 - .../inference_utils/caption_generator.py | 213 - .../inference_utils/caption_generator_test.py | 178 - .../inference_utils/inference_wrapper_base.py | 181 - .../im2txt/inference_utils/vocabulary.py | 78 - research/im2txt/im2txt/inference_wrapper.py | 51 - research/im2txt/im2txt/ops/BUILD | 32 - research/im2txt/im2txt/ops/image_embedding.py | 114 - .../im2txt/im2txt/ops/image_embedding_test.py | 136 - .../im2txt/im2txt/ops/image_processing.py | 133 - research/im2txt/im2txt/ops/inputs.py | 204 - research/im2txt/im2txt/run_inference.py | 85 - research/im2txt/im2txt/show_and_tell_model.py | 358 - .../im2txt/im2txt/show_and_tell_model_test.py | 200 - research/im2txt/im2txt/train.py | 114 - research/inception/.gitignore | 7 - research/inception/README.md | 858 - research/inception/WORKSPACE | 1 - .../g3doc/inception_v3_architecture.png | Bin 346842 -> 0 bytes research/inception/inception/BUILD | 198 - .../inception/data/build_image_data.py | 436 - .../inception/data/build_imagenet_data.py | 707 - .../data/download_and_preprocess_flowers.sh | 96 - .../download_and_preprocess_flowers_mac.sh | 96 - .../data/download_and_preprocess_imagenet.sh | 101 - .../inception/data/download_imagenet.sh | 104 - ...imagenet_2012_validation_synset_labels.txt | 50000 ---------------- .../data/imagenet_lsvrc_2015_synsets.txt | 1000 - .../inception/data/imagenet_metadata.txt | 21842 ------- .../preprocess_imagenet_validation_data.py | 89 - .../inception/data/process_bounding_boxes.py | 254 - research/inception/inception/dataset.py | 103 - research/inception/inception/flowers_data.py | 52 - research/inception/inception/flowers_eval.py | 40 - research/inception/inception/flowers_train.py | 41 - .../inception/inception/image_processing.py | 513 - research/inception/inception/imagenet_data.py | 59 - .../inception/imagenet_distributed_train.py | 66 - research/inception/inception/imagenet_eval.py | 46 - .../inception/inception/imagenet_train.py | 41 - .../inception/inception_distributed_train.py | 314 - .../inception/inception/inception_eval.py | 171 - .../inception/inception/inception_model.py | 157 - .../inception/inception/inception_train.py | 357 - research/inception/inception/slim/BUILD | 112 - research/inception/inception/slim/README.md | 621 - .../inception/slim/collections_test.py | 181 - .../inception/slim/inception_model.py | 356 - .../inception/slim/inception_test.py | 134 - research/inception/inception/slim/losses.py | 174 - .../inception/inception/slim/losses_test.py | 177 - research/inception/inception/slim/ops.py | 473 - research/inception/inception/slim/ops_test.py | 687 - research/inception/inception/slim/scopes.py | 170 - .../inception/inception/slim/scopes_test.py | 162 - research/inception/inception/slim/slim.py | 24 - .../inception/inception/slim/variables.py | 289 - .../inception/slim/variables_test.py | 392 - research/keypointnet/CONTRIBUTING.md | 28 - research/keypointnet/LICENSE | 202 - research/keypointnet/README.md | 46 - research/keypointnet/main.py | 697 - research/keypointnet/tools/gen_tfrecords.py | 99 - research/keypointnet/tools/render.py | 310 - research/keypointnet/utils.py | 307 - research/learned_optimizer/.gitignore | 0 research/learned_optimizer/BUILD | 33 - research/learned_optimizer/README.md | 47 - research/learned_optimizer/metaopt.py | 639 - research/learned_optimizer/metarun.py | 394 - research/learned_optimizer/optimizer/BUILD | 69 - .../optimizer/coordinatewise_rnn.py | 316 - .../optimizer/global_learning_rate.py | 40 - .../optimizer/hierarchical_rnn.py | 792 - .../optimizer/learning_rate_schedule.py | 60 - .../learned_optimizer/optimizer/rnn_cells.py | 68 - .../optimizer/trainable_adam.py | 210 - .../optimizer/trainable_optimizer.py | 574 - research/learned_optimizer/optimizer/utils.py | 278 - research/learned_optimizer/problems/BUILD | 43 - .../learned_optimizer/problems/datasets.py | 218 - .../problems/model_adapter.py | 190 - .../problems/problem_generator.py | 1016 - .../problems/problem_sets.py | 561 - .../problems/problem_spec.py | 33 - .../README.md | 61 - .../data_utils.py | 243 - .../memory.py | 392 - .../learning_to_remember_rare_events/model.py | 302 - .../learning_to_remember_rare_events/train.py | 242 - .../learning_unsupervised_learning/.gitignore | 1 - .../learning_unsupervised_learning/README.md | 40 - .../__init__.py | 0 .../architectures/__init__.py | 17 - .../architectures/common.py | 153 - .../architectures/more_local_weight_update.py | 861 - .../datasets/__init__.py | 16 - .../datasets/common.py | 29 - .../datasets/mnist.py | 74 - .../evaluation.py | 76 - .../meta_objective/__init__.py | 18 - .../meta_objective/linear_regression.py | 258 - .../meta_objective/sklearn.py | 167 - .../meta_objective/utils.py | 78 - .../optimizers.py | 133 - .../run_eval.py | 122 - .../summary_utils.py | 181 - .../learning_unsupervised_learning/utils.py | 287 - .../variable_replace.py | 112 - research/lexnet_nc/README.md | 215 - research/lexnet_nc/extract_paths.py | 119 - research/lexnet_nc/get_indicative_paths.py | 111 - research/lexnet_nc/learn_classifier.py | 223 - research/lexnet_nc/learn_path_embeddings.py | 186 - research/lexnet_nc/lexnet_common.py | 197 - research/lexnet_nc/lexnet_model.py | 438 - research/lexnet_nc/path_model.py | 547 - .../lexnet_nc/sorted_paths_to_examples.py | 202 - .../lexnet_nc/text_embeddings_to_binary.py | 48 - research/lm_1b/BUILD | 27 - research/lm_1b/README.md | 198 - research/lm_1b/data_utils.py | 279 - research/lm_1b/lm_1b_eval.py | 308 - research/lm_commonsense/README.md | 170 - research/lm_commonsense/eval.py | 190 - research/lm_commonsense/method.jpg | Bin 69059 -> 0 bytes research/lm_commonsense/utils.py | 368 - research/maskgan/README.md | 111 - research/maskgan/data/__init__.py | 0 research/maskgan/data/imdb_loader.py | 136 - research/maskgan/data/ptb_loader.py | 123 - research/maskgan/generate_samples.py | 281 - research/maskgan/losses/__init__.py | 0 research/maskgan/losses/losses.py | 186 - research/maskgan/model_utils/__init__.py | 0 research/maskgan/model_utils/helper.py | 158 - .../maskgan/model_utils/model_construction.py | 234 - research/maskgan/model_utils/model_losses.py | 327 - .../maskgan/model_utils/model_optimization.py | 194 - research/maskgan/model_utils/model_utils.py | 291 - research/maskgan/model_utils/n_gram.py | 66 - .../maskgan/model_utils/variable_mapping.py | 745 - research/maskgan/models/__init__.py | 0 research/maskgan/models/attention_utils.py | 477 - research/maskgan/models/bidirectional.py | 75 - research/maskgan/models/bidirectional_vd.py | 116 - .../maskgan/models/bidirectional_zaremba.py | 83 - research/maskgan/models/cnn.py | 93 - research/maskgan/models/critic_vd.py | 108 - research/maskgan/models/evaluation_utils.py | 280 - research/maskgan/models/feedforward.py | 98 - research/maskgan/models/rnn.py | 211 - research/maskgan/models/rnn_nas.py | 234 - research/maskgan/models/rnn_vd.py | 118 - research/maskgan/models/rnn_zaremba.py | 196 - research/maskgan/models/rollout.py | 384 - research/maskgan/models/seq2seq.py | 277 - research/maskgan/models/seq2seq_nas.py | 333 - research/maskgan/models/seq2seq_vd.py | 609 - research/maskgan/models/seq2seq_zaremba.py | 305 - research/maskgan/nas_utils/__init__.py | 0 research/maskgan/nas_utils/configs.py | 46 - research/maskgan/nas_utils/custom_cell.py | 166 - .../maskgan/nas_utils/variational_dropout.py | 61 - research/maskgan/pretrain_mask_gan.py | 231 - research/maskgan/regularization/__init__.py | 0 .../regularization/variational_dropout.py | 56 - research/maskgan/regularization/zoneout.py | 64 - research/maskgan/sample_shuffler.py | 95 - research/maskgan/train_mask_gan.py | 1167 - research/namignizer/.gitignore | 6 - research/namignizer/README.md | 86 - research/namignizer/data_utils.py | 119 - research/namignizer/model.py | 136 - research/namignizer/names.py | 259 - research/neural_gpu/README.md | 87 - research/neural_gpu/data_utils.py | 458 - research/neural_gpu/neural_gpu.py | 747 - research/neural_gpu/neural_gpu_trainer.py | 1027 - research/neural_gpu/program_utils.py | 444 - research/neural_gpu/wmt_utils.py | 437 - research/neural_programmer/README.md | 26 - research/neural_programmer/data_utils.py | 666 - research/neural_programmer/model.py | 679 - .../neural_programmer/neural_programmer.py | 239 - research/neural_programmer/nn_utils.py | 68 - research/neural_programmer/parameters.py | 89 - research/neural_programmer/wiki_data.py | 532 - research/next_frame_prediction/README.md | 89 - .../next_frame_prediction/cross_conv/BUILD | 48 - .../next_frame_prediction/cross_conv/eval.py | 119 - .../cross_conv/example_gen.py | 93 - .../next_frame_prediction/cross_conv/model.py | 233 - .../cross_conv/reader.py | 86 - .../cross_conv/sprites_gen.py | 98 - .../next_frame_prediction/cross_conv/train.py | 122 - .../g3doc/cross_conv.png | Bin 17636 -> 0 bytes .../g3doc/cross_conv2.png | Bin 16939 -> 0 bytes .../g3doc/cross_conv3.png | Bin 47713 -> 0 bytes research/ptn/.gitignore | 8 - research/ptn/BUILD | 94 - research/ptn/README.md | 75 - research/ptn/WORKSPACE | 0 research/ptn/eval_ptn.py | 132 - research/ptn/eval_rotator.py | 126 - research/ptn/input_generator.py | 130 - research/ptn/losses.py | 178 - research/ptn/metrics.py | 111 - research/ptn/model_ptn.py | 232 - research/ptn/model_rotator.py | 266 - research/ptn/model_voxel_generation.py | 222 - research/ptn/nets/BUILD | 64 - research/ptn/nets/deeprotator_factory.py | 91 - research/ptn/nets/im2vox_factory.py | 92 - research/ptn/nets/perspective_projector.py | 53 - research/ptn/nets/perspective_transform.py | 278 - research/ptn/nets/ptn_encoder.py | 54 - research/ptn/nets/ptn_im_decoder.py | 81 - research/ptn/nets/ptn_rotator.py | 58 - research/ptn/nets/ptn_vox_decoder.py | 118 - research/ptn/pretrain_rotator.py | 236 - research/ptn/train_ptn.py | 230 - research/ptn/utils.py | 119 - research/qa_kg/README.md | 83 - research/qa_kg/exp_1_hop/config.py | 80 - research/qa_kg/exp_1_hop/test.py | 135 - research/qa_kg/exp_1_hop/train_gt_layout.py | 194 - research/qa_kg/model_n2nmn/__init__.py | 0 research/qa_kg/model_n2nmn/assembler.py | 145 - research/qa_kg/model_n2nmn/model.py | 119 - research/qa_kg/model_n2nmn/modules.py | 131 - research/qa_kg/model_n2nmn/netgen_att.py | 295 - research/qa_kg/util/__init__.py | 0 research/qa_kg/util/data_reader.py | 231 - research/qa_kg/util/misc.py | 77 - research/qa_kg/util/nn.py | 55 - research/real_nvp/README.md | 282 - research/real_nvp/__init__.py | 0 research/real_nvp/celeba_formatting.py | 96 - research/real_nvp/imnet_formatting.py | 105 - research/real_nvp/lsun_formatting.py | 105 - .../real_nvp/real_nvp_multiscale_dataset.py | 1639 - research/real_nvp/real_nvp_utils.py | 475 - research/sentiment_analysis/README.md | 26 - research/sentiment_analysis/__init__.py | 0 research/sentiment_analysis/data/__init__.py | 0 research/sentiment_analysis/data/dataset.py | 52 - research/sentiment_analysis/data/imdb.py | 54 - research/sentiment_analysis/data/util.py | 32 - research/sentiment_analysis/sentiment_main.py | 115 - .../sentiment_analysis/sentiment_model.py | 50 - research/seq2species/README.md | 187 - research/seq2species/build_model.py | 506 - research/seq2species/configuration.py | 77 - research/seq2species/input.py | 325 - research/seq2species/protos/BUILD | 16 - research/seq2species/protos/__init__.py | 0 research/seq2species/protos/seq2label.proto | 49 - research/seq2species/run_training.py | 293 - research/seq2species/run_training_test.py | 118 - research/seq2species/seq2label_utils.py | 95 - research/seq2species/test_utils.py | 106 - research/skip_thoughts/.gitignore | 8 - research/skip_thoughts/README.md | 479 - research/skip_thoughts/WORKSPACE | 0 research/skip_thoughts/skip_thoughts/BUILD | 87 - .../skip_thoughts/skip_thoughts/__init__.py | 0 .../skip_thoughts/configuration.py | 110 - .../skip_thoughts/skip_thoughts/data/BUILD | 23 - .../skip_thoughts/data/__init__.py | 0 .../skip_thoughts/data/preprocess_dataset.py | 301 - .../skip_thoughts/data/special_words.py | 27 - .../skip_thoughts/encoder_manager.py | 134 - .../skip_thoughts/skip_thoughts/evaluate.py | 117 - .../skip_thoughts/skip_thoughts/ops/BUILD | 17 - .../skip_thoughts/ops/__init__.py | 0 .../skip_thoughts/ops/gru_cell.py | 134 - .../skip_thoughts/ops/input_ops.py | 118 - .../skip_thoughts/skip_thoughts_encoder.py | 258 - .../skip_thoughts/skip_thoughts_model.py | 369 - .../skip_thoughts/skip_thoughts_model_test.py | 191 - .../skip_thoughts/track_perplexity.py | 201 - research/skip_thoughts/skip_thoughts/train.py | 99 - .../skip_thoughts/vocabulary_expansion.py | 203 - research/steve/README.md | 94 - research/steve/agent.py | 143 - research/steve/config.py | 38 - research/steve/config/algos/ddpg.json | 3 - research/steve/config/algos/mve_mean.json | 14 - research/steve/config/algos/mve_tdk.json | 14 - research/steve/config/algos/mve_tdlambda.json | 14 - research/steve/config/algos/steve.json | 15 - research/steve/config/algos/steve_cov.json | 16 - research/steve/config/core/basic.json | 32 - research/steve/config/core/bayesian.json | 26 - research/steve/config/core/model.json | 16 - research/steve/config/envs/flagrun.json | 12 - research/steve/config/envs/halfcheetah.json | 12 - research/steve/config/envs/hardcore.json | 12 - research/steve/config/envs/hopper.json | 12 - research/steve/config/envs/humanoid.json | 12 - research/steve/config/envs/rshum.json | 12 - research/steve/config/envs/swimmer.json | 12 - research/steve/config/envs/walker2d.json | 12 - .../config/experimental_setups/speedrun.json | 11 - .../baselines/ensemble_mve_tdk0.json | 1 - .../baselines/ensemble_mve_tdk1.json | 1 - .../baselines/ensemble_mve_tdk2.json | 1 - .../ablations/baselines/mve_25tdlambda0.json | 10 - .../ablations/baselines/mve_25tdlambda1.json | 10 - .../ablations/baselines/mve_25tdlambda2.json | 10 - .../ablations/baselines/mve_75tdlambda0.json | 10 - .../ablations/baselines/mve_75tdlambda1.json | 10 - .../ablations/baselines/mve_75tdlambda2.json | 10 - .../ablations/baselines/mve_meank0.json | 1 - .../ablations/baselines/mve_meank1.json | 1 - .../ablations/baselines/mve_meank2.json | 1 - .../ablations/baselines/steve_cov0.json | 1 - .../ablations/baselines/steve_cov1.json | 1 - .../ablations/baselines/steve_cov2.json | 1 - .../ablations/horizons/steve_1h0.json | 10 - .../ablations/horizons/steve_1h1.json | 10 - .../ablations/horizons/steve_1h2.json | 10 - .../ablations/horizons/steve_2h0.json | 10 - .../ablations/horizons/steve_2h1.json | 10 - .../ablations/horizons/steve_2h2.json | 10 - .../ablations/horizons/steve_5h0.json | 10 - .../ablations/horizons/steve_5h1.json | 10 - .../ablations/horizons/steve_5h2.json | 10 - .../experiments/goodruns/flagrun/ddpg0.json | 1 - .../experiments/goodruns/flagrun/ddpg1.json | 1 - .../experiments/goodruns/flagrun/ddpg2.json | 1 - .../experiments/goodruns/flagrun/ddpg3.json | 1 - .../goodruns/flagrun/mve_tdk0.json | 1 - .../goodruns/flagrun/mve_tdk1.json | 1 - .../goodruns/flagrun/mve_tdk2.json | 1 - .../goodruns/flagrun/mve_tdk3.json | 1 - .../experiments/goodruns/flagrun/steve0.json | 1 - .../experiments/goodruns/flagrun/steve1.json | 1 - .../experiments/goodruns/flagrun/steve2.json | 1 - .../experiments/goodruns/flagrun/steve3.json | 1 - .../goodruns/halfcheetah/ddpg0.json | 1 - .../goodruns/halfcheetah/ddpg1.json | 1 - .../goodruns/halfcheetah/ddpg2.json | 1 - .../goodruns/halfcheetah/ddpg3.json | 1 - .../goodruns/halfcheetah/mve_tdk0.json | 1 - .../goodruns/halfcheetah/mve_tdk1.json | 1 - .../goodruns/halfcheetah/mve_tdk2.json | 1 - .../goodruns/halfcheetah/mve_tdk3.json | 1 - .../goodruns/halfcheetah/steve0.json | 1 - .../goodruns/halfcheetah/steve1.json | 1 - .../goodruns/halfcheetah/steve2.json | 1 - .../goodruns/halfcheetah/steve3.json | 1 - .../experiments/goodruns/hardcore/ddpg0.json | 1 - .../experiments/goodruns/hardcore/ddpg1.json | 1 - .../experiments/goodruns/hardcore/ddpg2.json | 1 - .../experiments/goodruns/hardcore/ddpg3.json | 1 - .../goodruns/hardcore/mve_tdk0.json | 1 - .../goodruns/hardcore/mve_tdk1.json | 1 - .../goodruns/hardcore/mve_tdk2.json | 1 - .../goodruns/hardcore/mve_tdk3.json | 1 - .../experiments/goodruns/hardcore/steve0.json | 1 - .../experiments/goodruns/hardcore/steve1.json | 1 - .../experiments/goodruns/hardcore/steve2.json | 1 - .../experiments/goodruns/hardcore/steve3.json | 1 - .../experiments/goodruns/hopper/ddpg0.json | 1 - .../experiments/goodruns/hopper/ddpg1.json | 1 - .../experiments/goodruns/hopper/ddpg2.json | 1 - .../experiments/goodruns/hopper/ddpg3.json | 1 - .../experiments/goodruns/hopper/mve_tdk0.json | 1 - .../experiments/goodruns/hopper/mve_tdk1.json | 1 - .../experiments/goodruns/hopper/mve_tdk2.json | 1 - .../experiments/goodruns/hopper/mve_tdk3.json | 1 - .../experiments/goodruns/hopper/steve0.json | 1 - .../experiments/goodruns/hopper/steve1.json | 1 - .../experiments/goodruns/hopper/steve2.json | 1 - .../experiments/goodruns/hopper/steve3.json | 1 - .../experiments/goodruns/humanoid/ddpg0.json | 1 - .../experiments/goodruns/humanoid/ddpg1.json | 1 - .../experiments/goodruns/humanoid/ddpg2.json | 1 - .../experiments/goodruns/humanoid/ddpg3.json | 1 - .../goodruns/humanoid/mve_tdk0.json | 1 - .../goodruns/humanoid/mve_tdk1.json | 1 - .../goodruns/humanoid/mve_tdk2.json | 1 - .../goodruns/humanoid/mve_tdk3.json | 1 - .../experiments/goodruns/humanoid/steve0.json | 1 - .../experiments/goodruns/humanoid/steve1.json | 1 - .../experiments/goodruns/humanoid/steve2.json | 1 - .../experiments/goodruns/humanoid/steve3.json | 1 - .../experiments/goodruns/rshum/ddpg0.json | 1 - .../experiments/goodruns/rshum/ddpg1.json | 1 - .../experiments/goodruns/rshum/ddpg2.json | 1 - .../experiments/goodruns/rshum/ddpg3.json | 1 - .../experiments/goodruns/rshum/mve_tdk0.json | 1 - .../experiments/goodruns/rshum/mve_tdk1.json | 1 - .../experiments/goodruns/rshum/mve_tdk2.json | 1 - .../experiments/goodruns/rshum/mve_tdk3.json | 1 - .../experiments/goodruns/rshum/steve0.json | 1 - .../experiments/goodruns/rshum/steve1.json | 1 - .../experiments/goodruns/rshum/steve2.json | 1 - .../experiments/goodruns/rshum/steve3.json | 1 - .../experiments/goodruns/swimmer/ddpg0.json | 1 - .../experiments/goodruns/swimmer/ddpg1.json | 1 - .../experiments/goodruns/swimmer/ddpg2.json | 1 - .../experiments/goodruns/swimmer/ddpg3.json | 1 - .../goodruns/swimmer/mve_tdk0.json | 1 - .../goodruns/swimmer/mve_tdk1.json | 1 - .../goodruns/swimmer/mve_tdk2.json | 1 - .../goodruns/swimmer/mve_tdk3.json | 1 - .../experiments/goodruns/swimmer/steve0.json | 1 - .../experiments/goodruns/swimmer/steve1.json | 1 - .../experiments/goodruns/swimmer/steve2.json | 1 - .../experiments/goodruns/swimmer/steve3.json | 1 - .../experiments/goodruns/walker2d/ddpg0.json | 1 - .../experiments/goodruns/walker2d/ddpg1.json | 1 - .../experiments/goodruns/walker2d/ddpg2.json | 1 - .../experiments/goodruns/walker2d/ddpg3.json | 1 - .../goodruns/walker2d/mve_tdk0.json | 1 - .../goodruns/walker2d/mve_tdk1.json | 1 - .../goodruns/walker2d/mve_tdk2.json | 1 - .../goodruns/walker2d/mve_tdk3.json | 1 - .../experiments/goodruns/walker2d/steve0.json | 1 - .../experiments/goodruns/walker2d/steve1.json | 1 - .../experiments/goodruns/walker2d/steve2.json | 1 - .../experiments/goodruns/walker2d/steve3.json | 1 - .../speedruns/flagrun/speedy_ddpg0.json | 1 - .../speedruns/flagrun/speedy_ddpg1.json | 1 - .../speedruns/flagrun/speedy_mve_tdk0.json | 1 - .../speedruns/flagrun/speedy_mve_tdk1.json | 1 - .../speedruns/flagrun/speedy_steve0.json | 1 - .../speedruns/flagrun/speedy_steve1.json | 1 - .../speedruns/humanoid/speedy_ddpg0.json | 1 - .../speedruns/humanoid/speedy_ddpg1.json | 1 - .../speedruns/humanoid/speedy_mve_tdk0.json | 1 - .../speedruns/humanoid/speedy_mve_tdk1.json | 1 - .../speedruns/humanoid/speedy_steve0.json | 1 - .../speedruns/humanoid/speedy_steve1.json | 1 - research/steve/envwrap.py | 106 - research/steve/learner.py | 272 - research/steve/master.py | 85 - research/steve/nn.py | 189 - research/steve/replay.py | 109 - research/steve/toy_demo.py | 430 - research/steve/util.py | 164 - research/steve/valuerl.py | 307 - research/steve/valuerl_learner.py | 81 - research/steve/visualizer.py | 107 - research/steve/worldmodel.py | 104 - research/steve/worldmodel_learner.py | 55 - research/street/README.md | 268 - research/street/cc/rnn_ops.cc | 538 - research/street/g3doc/avdessapins.png | Bin 171819 -> 0 bytes research/street/g3doc/vgslspecs.md | 324 - research/street/python/decoder.py | 244 - research/street/python/decoder_test.py | 57 - research/street/python/errorcounter.py | 123 - research/street/python/errorcounter_test.py | 124 - research/street/python/fsns_urls.py | 49 - research/street/python/fsns_urls.txt | 1282 - research/street/python/nn_ops.py | 253 - research/street/python/shapes.py | 217 - research/street/python/shapes_test.py | 171 - research/street/python/vgsl_eval.py | 49 - research/street/python/vgsl_input.py | 150 - research/street/python/vgsl_model.py | 601 - research/street/python/vgsl_model_test.py | 248 - research/street/python/vgsl_train.py | 55 - research/street/python/vgslspecs.py | 534 - research/street/python/vgslspecs_test.py | 122 - research/street/testdata/arial-32-tiny | Bin 5038 -> 0 bytes .../testdata/arial.charset_size=105.txt | 112 - research/street/testdata/charset_size=134.txt | 139 - research/street/testdata/charset_size_10.txt | 10 - research/street/testdata/mnist-tiny | Bin 1004 -> 0 bytes research/street/testdata/numbers-16-tiny | Bin 1600 -> 0 bytes .../testdata/numbers.charset_size=12.txt | 12 - research/struct2depth/BUILD | 1 - research/struct2depth/README.md | 151 - research/struct2depth/alignment.py | 54 - research/struct2depth/gen_data_city.py | 158 - research/struct2depth/gen_data_kitti.py | 149 - research/struct2depth/inference.py | 416 - research/struct2depth/model.py | 848 - research/struct2depth/nets.py | 525 - research/struct2depth/optimize.py | 383 - research/struct2depth/project.py | 326 - research/struct2depth/reader.py | 344 - research/struct2depth/train.py | 259 - research/struct2depth/util.py | 252 - research/swivel/.gitignore | 14 - research/swivel/README.md | 185 - research/swivel/analogy.cc | 365 - research/swivel/distributed.sh | 54 - research/swivel/eval.mk | 101 - research/swivel/fastprep.cc | 692 - research/swivel/fastprep.mk | 60 - research/swivel/glove_to_shards.py | 198 - research/swivel/nearest.py | 76 - research/swivel/prep.py | 317 - research/swivel/swivel.py | 489 - research/swivel/text2bin.py | 88 - research/swivel/vecs.py | 92 - research/swivel/wordsim.py | 93 - research/tcn/BUILD | 213 - research/tcn/README.md | 559 - research/tcn/WORKSPACE | 2 - research/tcn/alignment.py | 133 - research/tcn/configs/pouring.yml | 58 - research/tcn/configs/tcn_default.yml | 115 - research/tcn/configs/test_estimator.yml | 29 - research/tcn/data_providers.py | 505 - research/tcn/data_providers_test.py | 69 - research/tcn/dataset/images_to_videos.py | 86 - research/tcn/dataset/videos_to_tfrecords.py | 458 - research/tcn/dataset/webcam.py | 491 - research/tcn/download_pretrained.py | 54 - research/tcn/estimators/base_estimator.py | 700 - research/tcn/estimators/get_estimator.py | 60 - research/tcn/estimators/mvtcn_estimator.py | 165 - research/tcn/estimators/svtcn_estimator.py | 100 - research/tcn/estimators/svtcn_loss.py | 217 - research/tcn/estimators/svtcn_loss_test.py | 106 - research/tcn/eval.py | 63 - research/tcn/g3doc/alignment.png | Bin 84303 -> 0 bytes research/tcn/g3doc/all_error.png | Bin 208454 -> 0 bytes research/tcn/g3doc/avg_error.png | Bin 98633 -> 0 bytes research/tcn/g3doc/loss.png | Bin 105220 -> 0 bytes research/tcn/g3doc/pca.png | Bin 945803 -> 0 bytes research/tcn/g3doc/val_loss.png | Bin 111129 -> 0 bytes research/tcn/generate_videos.py | 426 - research/tcn/labeled_eval.py | 309 - research/tcn/labeled_eval_test.py | 86 - research/tcn/model.py | 410 - research/tcn/preprocessing.py | 686 - research/tcn/train.py | 61 - research/tcn/utils/luatables.py | 80 - research/tcn/utils/progress.py | 50 - research/tcn/utils/util.py | 247 - research/tcn/visualize_embeddings.py | 198 - research/textsum/BUILD | 64 - research/textsum/README.md | 171 - research/textsum/batch_reader.py | 265 - research/textsum/beam_search.py | 156 - research/textsum/data.py | 215 - research/textsum/data/data | Bin 33582 -> 0 bytes research/textsum/data/vocab | 10003 ---- research/textsum/data_convert_example.py | 65 - research/textsum/seq2seq_attention.py | 213 - research/textsum/seq2seq_attention_decode.py | 162 - research/textsum/seq2seq_attention_model.py | 300 - research/textsum/seq2seq_lib.py | 137 - research/transformer/README.md | 63 - research/transformer/cluttered_mnist.py | 174 - research/transformer/data/README.md | 20 - research/transformer/example.py | 61 - research/transformer/spatial_transformer.py | 205 - research/transformer/tf_utils.py | 129 - research/video_prediction/README.md | 102 - research/video_prediction/download_data.sh | 55 - research/video_prediction/lstm_ops.py | 104 - research/video_prediction/prediction_input.py | 119 - research/video_prediction/prediction_model.py | 350 - research/video_prediction/prediction_train.py | 255 - research/video_prediction/push_datafiles.txt | 274 - 877 files changed, 14 insertions(+), 231878 deletions(-) delete mode 100644 research/adv_imagenet_models/README.md delete mode 100644 research/adv_imagenet_models/eval_on_adversarial.py delete mode 100644 research/adv_imagenet_models/imagenet.py delete mode 100644 research/adv_imagenet_models/inception_resnet_v2.py delete mode 100644 research/adversarial_crypto/README.md delete mode 100644 research/adversarial_crypto/train_eval.py delete mode 100644 research/adversarial_logit_pairing/README.md delete mode 100644 research/adversarial_logit_pairing/adversarial_attack.py delete mode 100644 research/adversarial_logit_pairing/datasets/__init__.py delete mode 100644 research/adversarial_logit_pairing/datasets/dataset_factory.py delete mode 100644 research/adversarial_logit_pairing/datasets/imagenet_input.py delete mode 100644 research/adversarial_logit_pairing/datasets/tiny_imagenet_input.py delete mode 100644 research/adversarial_logit_pairing/eval.py delete mode 100644 research/adversarial_logit_pairing/model_lib.py delete mode 100644 research/adversarial_logit_pairing/tiny_imagenet_converter/converter.py delete mode 100644 research/adversarial_logit_pairing/train.py delete mode 100644 research/autoencoder/AdditiveGaussianNoiseAutoencoderRunner.py delete mode 100644 research/autoencoder/AutoencoderRunner.py delete mode 100644 research/autoencoder/MaskingNoiseAutoencoderRunner.py delete mode 100644 research/autoencoder/README.md delete mode 100644 research/autoencoder/VariationalAutoencoderRunner.py delete mode 100644 research/autoencoder/__init__.py delete mode 100644 research/autoencoder/autoencoder_models/Autoencoder.py delete mode 100644 research/autoencoder/autoencoder_models/DenoisingAutoencoder.py delete mode 100644 research/autoencoder/autoencoder_models/VariationalAutoencoder.py delete mode 100644 research/autoencoder/autoencoder_models/__init__.py delete mode 100644 research/brain_coder/README.md delete mode 100644 research/brain_coder/WORKSPACE delete mode 100644 research/brain_coder/common/BUILD delete mode 100644 research/brain_coder/common/bf.py delete mode 100644 research/brain_coder/common/bf_test.py delete mode 100644 research/brain_coder/common/config_lib.py delete mode 100644 research/brain_coder/common/config_lib_test.py delete mode 100644 research/brain_coder/common/reward.py delete mode 100644 research/brain_coder/common/reward_test.py delete mode 100644 research/brain_coder/common/rollout.py delete mode 100644 research/brain_coder/common/rollout_test.py delete mode 100644 research/brain_coder/common/schedules.py delete mode 100644 research/brain_coder/common/schedules_test.py delete mode 100644 research/brain_coder/common/utils.py delete mode 100644 research/brain_coder/common/utils_test.py delete mode 100644 research/brain_coder/single_task/BUILD delete mode 100644 research/brain_coder/single_task/README.md delete mode 100644 research/brain_coder/single_task/aggregate_experiment_results.py delete mode 100644 research/brain_coder/single_task/aggregate_tuning_results.py delete mode 100644 research/brain_coder/single_task/code_tasks.py delete mode 100644 research/brain_coder/single_task/code_tasks_test.py delete mode 100644 research/brain_coder/single_task/data.py delete mode 100644 research/brain_coder/single_task/defaults.py delete mode 100644 research/brain_coder/single_task/ga_lib.py delete mode 100644 research/brain_coder/single_task/ga_train.py delete mode 100644 research/brain_coder/single_task/ga_train_test.py delete mode 100755 research/brain_coder/single_task/launch_training.sh delete mode 100755 research/brain_coder/single_task/launch_tuning.sh delete mode 100644 research/brain_coder/single_task/misc.py delete mode 100644 research/brain_coder/single_task/pg_agent.py delete mode 100644 research/brain_coder/single_task/pg_agent_test.py delete mode 100644 research/brain_coder/single_task/pg_train.py delete mode 100644 research/brain_coder/single_task/pg_train_test.py delete mode 100644 research/brain_coder/single_task/results_lib.py delete mode 100644 research/brain_coder/single_task/results_lib_test.py delete mode 100644 research/brain_coder/single_task/run.py delete mode 100755 research/brain_coder/single_task/run_eval_tasks.py delete mode 100644 research/brain_coder/single_task/test_tasks.py delete mode 100644 research/brain_coder/single_task/test_tasks_test.py delete mode 100644 research/brain_coder/single_task/tune.py delete mode 100644 research/cognitive_mapping_and_planning/.gitignore delete mode 100644 research/cognitive_mapping_and_planning/README.md delete mode 100644 research/cognitive_mapping_and_planning/__init__.py delete mode 100644 research/cognitive_mapping_and_planning/cfgs/__init__.py delete mode 100644 research/cognitive_mapping_and_planning/cfgs/config_cmp.py delete mode 100644 research/cognitive_mapping_and_planning/cfgs/config_common.py delete mode 100644 research/cognitive_mapping_and_planning/cfgs/config_distill.py delete mode 100644 research/cognitive_mapping_and_planning/cfgs/config_vision_baseline.py delete mode 100644 research/cognitive_mapping_and_planning/data/.gitignore delete mode 100644 research/cognitive_mapping_and_planning/data/README.md delete mode 100644 research/cognitive_mapping_and_planning/datasets/__init__.py delete mode 100644 research/cognitive_mapping_and_planning/datasets/factory.py delete mode 100644 research/cognitive_mapping_and_planning/datasets/nav_env.py delete mode 100644 research/cognitive_mapping_and_planning/datasets/nav_env_config.py delete mode 100644 research/cognitive_mapping_and_planning/matplotlibrc delete mode 100644 research/cognitive_mapping_and_planning/output/.gitignore delete mode 100644 research/cognitive_mapping_and_planning/output/README.md delete mode 100644 research/cognitive_mapping_and_planning/patches/GLES2_2_0.py.patch delete mode 100644 research/cognitive_mapping_and_planning/patches/apply_patches.sh delete mode 100644 research/cognitive_mapping_and_planning/patches/ctypesloader.py.patch delete mode 100644 research/cognitive_mapping_and_planning/render/__init__.py delete mode 100644 research/cognitive_mapping_and_planning/render/depth_rgb_encoded.fp delete mode 100644 research/cognitive_mapping_and_planning/render/depth_rgb_encoded.vp delete mode 100644 research/cognitive_mapping_and_planning/render/rgb_flat_color.fp delete mode 100644 research/cognitive_mapping_and_planning/render/rgb_flat_color.vp delete mode 100644 research/cognitive_mapping_and_planning/render/swiftshader_renderer.py delete mode 100644 research/cognitive_mapping_and_planning/requirements.txt delete mode 100644 research/cognitive_mapping_and_planning/scripts/__init__.py delete mode 100644 research/cognitive_mapping_and_planning/scripts/script_distill.py delete mode 100644 research/cognitive_mapping_and_planning/scripts/script_download_init_models.sh delete mode 100644 research/cognitive_mapping_and_planning/scripts/script_env_vis.py delete mode 100644 research/cognitive_mapping_and_planning/scripts/script_nav_agent_release.py delete mode 100644 research/cognitive_mapping_and_planning/scripts/script_plot_trajectory.py delete mode 100644 research/cognitive_mapping_and_planning/scripts/script_preprocess_annoations_S3DIS.py delete mode 100644 research/cognitive_mapping_and_planning/scripts/script_preprocess_annoations_S3DIS.sh delete mode 100644 research/cognitive_mapping_and_planning/scripts/script_preprocess_meshes_S3DIS.sh delete mode 100644 research/cognitive_mapping_and_planning/scripts/script_test_pretrained_models.sh delete mode 100644 research/cognitive_mapping_and_planning/src/__init__.py delete mode 100644 research/cognitive_mapping_and_planning/src/depth_utils.py delete mode 100644 research/cognitive_mapping_and_planning/src/file_utils.py delete mode 100644 research/cognitive_mapping_and_planning/src/graph_utils.py delete mode 100644 research/cognitive_mapping_and_planning/src/map_utils.py delete mode 100644 research/cognitive_mapping_and_planning/src/rotation_utils.py delete mode 100644 research/cognitive_mapping_and_planning/src/utils.py delete mode 100644 research/cognitive_mapping_and_planning/tfcode/__init__.py delete mode 100644 research/cognitive_mapping_and_planning/tfcode/cmp.py delete mode 100644 research/cognitive_mapping_and_planning/tfcode/cmp_summary.py delete mode 100644 research/cognitive_mapping_and_planning/tfcode/cmp_utils.py delete mode 100644 research/cognitive_mapping_and_planning/tfcode/nav_utils.py delete mode 100644 research/cognitive_mapping_and_planning/tfcode/tf_utils.py delete mode 100644 research/cognitive_mapping_and_planning/tfcode/vision_baseline_lstm.py delete mode 100644 research/compression/README.md delete mode 100644 research/compression/entropy_coder/README.md delete mode 100644 research/compression/entropy_coder/__init__.py delete mode 100644 research/compression/entropy_coder/all_models/__init__.py delete mode 100644 research/compression/entropy_coder/all_models/all_models.py delete mode 100644 research/compression/entropy_coder/all_models/all_models_test.py delete mode 100644 research/compression/entropy_coder/configs/gru_prime3/model_config.json delete mode 100644 research/compression/entropy_coder/configs/synthetic/input_config.json delete mode 100644 research/compression/entropy_coder/configs/synthetic/model_config.json delete mode 100644 research/compression/entropy_coder/configs/synthetic/train_config.json delete mode 100644 research/compression/entropy_coder/core/code_loader.py delete mode 100644 research/compression/entropy_coder/core/config_helper.py delete mode 100644 research/compression/entropy_coder/core/entropy_coder_single.py delete mode 100644 research/compression/entropy_coder/core/entropy_coder_train.py delete mode 100644 research/compression/entropy_coder/dataset/gen_synthetic_dataset.py delete mode 100644 research/compression/entropy_coder/dataset/gen_synthetic_single.py delete mode 100644 research/compression/entropy_coder/dataset/synthetic_model.py delete mode 100644 research/compression/entropy_coder/lib/__init__.py delete mode 100644 research/compression/entropy_coder/lib/block_base.py delete mode 100644 research/compression/entropy_coder/lib/block_util.py delete mode 100644 research/compression/entropy_coder/lib/blocks.py delete mode 100644 research/compression/entropy_coder/lib/blocks_binarizer.py delete mode 100644 research/compression/entropy_coder/lib/blocks_entropy_coding.py delete mode 100644 research/compression/entropy_coder/lib/blocks_entropy_coding_test.py delete mode 100644 research/compression/entropy_coder/lib/blocks_lstm.py delete mode 100644 research/compression/entropy_coder/lib/blocks_lstm_test.py delete mode 100644 research/compression/entropy_coder/lib/blocks_masked_conv2d.py delete mode 100644 research/compression/entropy_coder/lib/blocks_masked_conv2d_lstm.py delete mode 100644 research/compression/entropy_coder/lib/blocks_masked_conv2d_test.py delete mode 100644 research/compression/entropy_coder/lib/blocks_operator.py delete mode 100644 research/compression/entropy_coder/lib/blocks_operator_test.py delete mode 100644 research/compression/entropy_coder/lib/blocks_std.py delete mode 100644 research/compression/entropy_coder/lib/blocks_std_test.py delete mode 100644 research/compression/entropy_coder/model/__init__.py delete mode 100644 research/compression/entropy_coder/model/entropy_coder_model.py delete mode 100644 research/compression/entropy_coder/model/model_factory.py delete mode 100644 research/compression/entropy_coder/progressive/__init__.py delete mode 100644 research/compression/entropy_coder/progressive/progressive.py delete mode 100644 research/compression/image_encoder/README.md delete mode 100644 research/compression/image_encoder/decoder.py delete mode 100644 research/compression/image_encoder/encoder.py delete mode 100644 research/compression/image_encoder/example.png delete mode 100644 research/compression/image_encoder/msssim.py delete mode 100644 research/deep_contextual_bandits/README.md delete mode 100644 research/deep_contextual_bandits/bandits/algorithms/bb_alpha_divergence_model.py delete mode 100644 research/deep_contextual_bandits/bandits/algorithms/bf_variational_neural_bandit_model.py delete mode 100644 research/deep_contextual_bandits/bandits/algorithms/bootstrapped_bnn_sampling.py delete mode 100644 research/deep_contextual_bandits/bandits/algorithms/fixed_policy_sampling.py delete mode 100644 research/deep_contextual_bandits/bandits/algorithms/linear_full_posterior_sampling.py delete mode 100644 research/deep_contextual_bandits/bandits/algorithms/multitask_gp.py delete mode 100644 research/deep_contextual_bandits/bandits/algorithms/neural_bandit_model.py delete mode 100644 research/deep_contextual_bandits/bandits/algorithms/neural_linear_sampling.py delete mode 100644 research/deep_contextual_bandits/bandits/algorithms/parameter_noise_sampling.py delete mode 100644 research/deep_contextual_bandits/bandits/algorithms/posterior_bnn_sampling.py delete mode 100644 research/deep_contextual_bandits/bandits/algorithms/uniform_sampling.py delete mode 100644 research/deep_contextual_bandits/bandits/algorithms/variational_neural_bandit_model.py delete mode 100644 research/deep_contextual_bandits/bandits/core/bandit_algorithm.py delete mode 100644 research/deep_contextual_bandits/bandits/core/bayesian_nn.py delete mode 100644 research/deep_contextual_bandits/bandits/core/contextual_bandit.py delete mode 100644 research/deep_contextual_bandits/bandits/core/contextual_dataset.py delete mode 100644 research/deep_contextual_bandits/bandits/data/data_sampler.py delete mode 100644 research/deep_contextual_bandits/bandits/data/synthetic_data_sampler.py delete mode 100644 research/deep_contextual_bandits/example_main.py delete mode 100644 research/domain_adaptation/README.md delete mode 100644 research/domain_adaptation/WORKSPACE delete mode 100644 research/domain_adaptation/__init__.py delete mode 100644 research/domain_adaptation/datasets/BUILD delete mode 100644 research/domain_adaptation/datasets/__init__.py delete mode 100644 research/domain_adaptation/datasets/dataset_factory.py delete mode 100644 research/domain_adaptation/datasets/download_and_convert_mnist_m.py delete mode 100644 research/domain_adaptation/datasets/mnist_m.py delete mode 100644 research/domain_adaptation/domain_separation/BUILD delete mode 100644 research/domain_adaptation/domain_separation/__init__.py delete mode 100755 research/domain_adaptation/domain_separation/_grl_ops.so delete mode 100644 research/domain_adaptation/domain_separation/dsn.py delete mode 100644 research/domain_adaptation/domain_separation/dsn_eval.py delete mode 100644 research/domain_adaptation/domain_separation/dsn_test.py delete mode 100644 research/domain_adaptation/domain_separation/dsn_train.py delete mode 100644 research/domain_adaptation/domain_separation/grl_op_grads.py delete mode 100644 research/domain_adaptation/domain_separation/grl_op_kernels.cc delete mode 100644 research/domain_adaptation/domain_separation/grl_op_shapes.py delete mode 100644 research/domain_adaptation/domain_separation/grl_ops.cc delete mode 100644 research/domain_adaptation/domain_separation/grl_ops.py delete mode 100644 research/domain_adaptation/domain_separation/grl_ops_test.py delete mode 100644 research/domain_adaptation/domain_separation/losses.py delete mode 100644 research/domain_adaptation/domain_separation/losses_test.py delete mode 100644 research/domain_adaptation/domain_separation/models.py delete mode 100644 research/domain_adaptation/domain_separation/models_test.py delete mode 100644 research/domain_adaptation/domain_separation/utils.py delete mode 100644 research/domain_adaptation/pixel_domain_adaptation/BUILD delete mode 100644 research/domain_adaptation/pixel_domain_adaptation/README.md delete mode 100644 research/domain_adaptation/pixel_domain_adaptation/baselines/BUILD delete mode 100644 research/domain_adaptation/pixel_domain_adaptation/baselines/README.md delete mode 100644 research/domain_adaptation/pixel_domain_adaptation/baselines/baseline_eval.py delete mode 100644 research/domain_adaptation/pixel_domain_adaptation/baselines/baseline_train.py delete mode 100644 research/domain_adaptation/pixel_domain_adaptation/hparams.py delete mode 100644 research/domain_adaptation/pixel_domain_adaptation/pixelda_eval.py delete mode 100644 research/domain_adaptation/pixel_domain_adaptation/pixelda_losses.py delete mode 100644 research/domain_adaptation/pixel_domain_adaptation/pixelda_model.py delete mode 100644 research/domain_adaptation/pixel_domain_adaptation/pixelda_preprocess.py delete mode 100644 research/domain_adaptation/pixel_domain_adaptation/pixelda_preprocess_test.py delete mode 100644 research/domain_adaptation/pixel_domain_adaptation/pixelda_task_towers.py delete mode 100644 research/domain_adaptation/pixel_domain_adaptation/pixelda_train.py delete mode 100644 research/domain_adaptation/pixel_domain_adaptation/pixelda_utils.py delete mode 100644 research/feelvos/CONTRIBUTING.md delete mode 100644 research/feelvos/LICENSE delete mode 100644 research/feelvos/README.md delete mode 100644 research/feelvos/__init__.py delete mode 100644 research/feelvos/common.py delete mode 100644 research/feelvos/correlation_cost/README.md delete mode 100755 research/feelvos/correlation_cost/build.sh delete mode 100755 research/feelvos/correlation_cost/clone_dependencies.sh delete mode 100755 research/feelvos/correlation_cost/compile.sh delete mode 100755 research/feelvos/correlation_cost/fix_code.sh delete mode 100755 research/feelvos/correlation_cost/get_code.sh delete mode 100644 research/feelvos/datasets/__init__.py delete mode 100644 research/feelvos/datasets/build_davis2017_data.py delete mode 100644 research/feelvos/datasets/download_and_convert_davis17.sh delete mode 100644 research/feelvos/datasets/tfsequence_example_decoder.py delete mode 100644 research/feelvos/datasets/video_dataset.py delete mode 100755 research/feelvos/eval.sh delete mode 100644 research/feelvos/input_preprocess.py delete mode 100644 research/feelvos/model.py delete mode 100644 research/feelvos/train.py delete mode 100755 research/feelvos/train.sh delete mode 100644 research/feelvos/utils/__init__.py delete mode 100644 research/feelvos/utils/embedding_utils.py delete mode 100644 research/feelvos/utils/embedding_utils_test.py delete mode 100644 research/feelvos/utils/eval_utils.py delete mode 100644 research/feelvos/utils/mask_damaging.py delete mode 100644 research/feelvos/utils/train_utils.py delete mode 100644 research/feelvos/utils/video_input_generator.py delete mode 100644 research/feelvos/vis_video.py delete mode 100644 research/fivo/.gitattributes delete mode 100644 research/fivo/.gitignore delete mode 100644 research/fivo/README.md delete mode 100644 research/fivo/bin/download_pianorolls.sh delete mode 100644 research/fivo/bin/run_eval.sh delete mode 100644 research/fivo/bin/run_sample.sh delete mode 100644 research/fivo/bin/run_tests.sh delete mode 100644 research/fivo/bin/run_train.sh delete mode 100644 research/fivo/experimental/README.md delete mode 100644 research/fivo/experimental/bounds.py delete mode 100644 research/fivo/experimental/data.py delete mode 100644 research/fivo/experimental/models.py delete mode 100644 research/fivo/experimental/run.sh delete mode 100644 research/fivo/experimental/summary_utils.py delete mode 100644 research/fivo/experimental/train.py delete mode 100644 research/fivo/fivo/__init__.py delete mode 100644 research/fivo/fivo/bounds.py delete mode 100644 research/fivo/fivo/bounds_test.py delete mode 100644 research/fivo/fivo/data/__init__.py delete mode 100644 research/fivo/fivo/data/calculate_pianoroll_mean.py delete mode 100644 research/fivo/fivo/data/create_timit_dataset.py delete mode 100644 research/fivo/fivo/data/datasets.py delete mode 100644 research/fivo/fivo/data/datasets_test.py delete mode 100644 research/fivo/fivo/ghmm_runners.py delete mode 100644 research/fivo/fivo/ghmm_runners_test.py delete mode 100644 research/fivo/fivo/models/__init__.py delete mode 100644 research/fivo/fivo/models/base.py delete mode 100644 research/fivo/fivo/models/ghmm.py delete mode 100644 research/fivo/fivo/models/ghmm_test.py delete mode 100644 research/fivo/fivo/models/srnn.py delete mode 100644 research/fivo/fivo/models/srnn_test.py delete mode 100644 research/fivo/fivo/models/vrnn.py delete mode 100644 research/fivo/fivo/models/vrnn_test.py delete mode 100644 research/fivo/fivo/nested_utils.py delete mode 100644 research/fivo/fivo/nested_utils_test.py delete mode 100644 research/fivo/fivo/runners.py delete mode 100644 research/fivo/fivo/runners_test.py delete mode 100644 research/fivo/fivo/smc.py delete mode 100644 research/fivo/fivo/smc_test.py delete mode 100644 research/fivo/fivo/test_data/tiny_pianoroll.pkl delete mode 100644 research/fivo/fivo/test_data/tiny_speech_dataset.tfrecord delete mode 100644 research/fivo/fivo/test_utils.py delete mode 100644 research/fivo/run_fivo.py delete mode 100644 research/global_objectives/README.md delete mode 100644 research/global_objectives/loss_layers.py delete mode 100644 research/global_objectives/loss_layers_example.py delete mode 100644 research/global_objectives/loss_layers_test.py delete mode 100644 research/global_objectives/test_all.py delete mode 100644 research/global_objectives/util.py delete mode 100644 research/global_objectives/util_test.py delete mode 100644 research/im2txt/.gitignore delete mode 100644 research/im2txt/README.md delete mode 100644 research/im2txt/WORKSPACE delete mode 100644 research/im2txt/conda-env/ubuntu-18-04-environment.yaml delete mode 100644 research/im2txt/g3doc/COCO_val2014_000000224477.jpg delete mode 100644 research/im2txt/g3doc/example_captions.jpg delete mode 100644 research/im2txt/g3doc/show_and_tell_architecture.png delete mode 100644 research/im2txt/im2txt/BUILD delete mode 100644 research/im2txt/im2txt/configuration.py delete mode 100644 research/im2txt/im2txt/data/build_mscoco_data.py delete mode 100755 research/im2txt/im2txt/data/download_and_preprocess_mscoco.sh delete mode 100644 research/im2txt/im2txt/evaluate.py delete mode 100644 research/im2txt/im2txt/inference_utils/BUILD delete mode 100644 research/im2txt/im2txt/inference_utils/caption_generator.py delete mode 100644 research/im2txt/im2txt/inference_utils/caption_generator_test.py delete mode 100644 research/im2txt/im2txt/inference_utils/inference_wrapper_base.py delete mode 100644 research/im2txt/im2txt/inference_utils/vocabulary.py delete mode 100644 research/im2txt/im2txt/inference_wrapper.py delete mode 100644 research/im2txt/im2txt/ops/BUILD delete mode 100644 research/im2txt/im2txt/ops/image_embedding.py delete mode 100644 research/im2txt/im2txt/ops/image_embedding_test.py delete mode 100644 research/im2txt/im2txt/ops/image_processing.py delete mode 100644 research/im2txt/im2txt/ops/inputs.py delete mode 100644 research/im2txt/im2txt/run_inference.py delete mode 100644 research/im2txt/im2txt/show_and_tell_model.py delete mode 100644 research/im2txt/im2txt/show_and_tell_model_test.py delete mode 100644 research/im2txt/im2txt/train.py delete mode 100644 research/inception/.gitignore delete mode 100644 research/inception/README.md delete mode 100644 research/inception/WORKSPACE delete mode 100644 research/inception/g3doc/inception_v3_architecture.png delete mode 100644 research/inception/inception/BUILD delete mode 100755 research/inception/inception/data/build_image_data.py delete mode 100644 research/inception/inception/data/build_imagenet_data.py delete mode 100755 research/inception/inception/data/download_and_preprocess_flowers.sh delete mode 100644 research/inception/inception/data/download_and_preprocess_flowers_mac.sh delete mode 100755 research/inception/inception/data/download_and_preprocess_imagenet.sh delete mode 100755 research/inception/inception/data/download_imagenet.sh delete mode 100644 research/inception/inception/data/imagenet_2012_validation_synset_labels.txt delete mode 100644 research/inception/inception/data/imagenet_lsvrc_2015_synsets.txt delete mode 100644 research/inception/inception/data/imagenet_metadata.txt delete mode 100755 research/inception/inception/data/preprocess_imagenet_validation_data.py delete mode 100755 research/inception/inception/data/process_bounding_boxes.py delete mode 100644 research/inception/inception/dataset.py delete mode 100644 research/inception/inception/flowers_data.py delete mode 100644 research/inception/inception/flowers_eval.py delete mode 100644 research/inception/inception/flowers_train.py delete mode 100644 research/inception/inception/image_processing.py delete mode 100644 research/inception/inception/imagenet_data.py delete mode 100644 research/inception/inception/imagenet_distributed_train.py delete mode 100644 research/inception/inception/imagenet_eval.py delete mode 100644 research/inception/inception/imagenet_train.py delete mode 100644 research/inception/inception/inception_distributed_train.py delete mode 100644 research/inception/inception/inception_eval.py delete mode 100644 research/inception/inception/inception_model.py delete mode 100644 research/inception/inception/inception_train.py delete mode 100644 research/inception/inception/slim/BUILD delete mode 100644 research/inception/inception/slim/README.md delete mode 100644 research/inception/inception/slim/collections_test.py delete mode 100644 research/inception/inception/slim/inception_model.py delete mode 100644 research/inception/inception/slim/inception_test.py delete mode 100644 research/inception/inception/slim/losses.py delete mode 100644 research/inception/inception/slim/losses_test.py delete mode 100644 research/inception/inception/slim/ops.py delete mode 100644 research/inception/inception/slim/ops_test.py delete mode 100644 research/inception/inception/slim/scopes.py delete mode 100644 research/inception/inception/slim/scopes_test.py delete mode 100644 research/inception/inception/slim/slim.py delete mode 100644 research/inception/inception/slim/variables.py delete mode 100644 research/inception/inception/slim/variables_test.py delete mode 100644 research/keypointnet/CONTRIBUTING.md delete mode 100644 research/keypointnet/LICENSE delete mode 100644 research/keypointnet/README.md delete mode 100644 research/keypointnet/main.py delete mode 100644 research/keypointnet/tools/gen_tfrecords.py delete mode 100644 research/keypointnet/tools/render.py delete mode 100644 research/keypointnet/utils.py delete mode 100644 research/learned_optimizer/.gitignore delete mode 100644 research/learned_optimizer/BUILD delete mode 100644 research/learned_optimizer/README.md delete mode 100644 research/learned_optimizer/metaopt.py delete mode 100644 research/learned_optimizer/metarun.py delete mode 100644 research/learned_optimizer/optimizer/BUILD delete mode 100644 research/learned_optimizer/optimizer/coordinatewise_rnn.py delete mode 100644 research/learned_optimizer/optimizer/global_learning_rate.py delete mode 100644 research/learned_optimizer/optimizer/hierarchical_rnn.py delete mode 100644 research/learned_optimizer/optimizer/learning_rate_schedule.py delete mode 100644 research/learned_optimizer/optimizer/rnn_cells.py delete mode 100644 research/learned_optimizer/optimizer/trainable_adam.py delete mode 100644 research/learned_optimizer/optimizer/trainable_optimizer.py delete mode 100644 research/learned_optimizer/optimizer/utils.py delete mode 100644 research/learned_optimizer/problems/BUILD delete mode 100644 research/learned_optimizer/problems/datasets.py delete mode 100644 research/learned_optimizer/problems/model_adapter.py delete mode 100644 research/learned_optimizer/problems/problem_generator.py delete mode 100644 research/learned_optimizer/problems/problem_sets.py delete mode 100644 research/learned_optimizer/problems/problem_spec.py delete mode 100644 research/learning_to_remember_rare_events/README.md delete mode 100644 research/learning_to_remember_rare_events/data_utils.py delete mode 100644 research/learning_to_remember_rare_events/memory.py delete mode 100644 research/learning_to_remember_rare_events/model.py delete mode 100644 research/learning_to_remember_rare_events/train.py delete mode 100644 research/learning_unsupervised_learning/.gitignore delete mode 100644 research/learning_unsupervised_learning/README.md delete mode 100644 research/learning_unsupervised_learning/__init__.py delete mode 100644 research/learning_unsupervised_learning/architectures/__init__.py delete mode 100644 research/learning_unsupervised_learning/architectures/common.py delete mode 100644 research/learning_unsupervised_learning/architectures/more_local_weight_update.py delete mode 100644 research/learning_unsupervised_learning/datasets/__init__.py delete mode 100644 research/learning_unsupervised_learning/datasets/common.py delete mode 100644 research/learning_unsupervised_learning/datasets/mnist.py delete mode 100644 research/learning_unsupervised_learning/evaluation.py delete mode 100644 research/learning_unsupervised_learning/meta_objective/__init__.py delete mode 100644 research/learning_unsupervised_learning/meta_objective/linear_regression.py delete mode 100644 research/learning_unsupervised_learning/meta_objective/sklearn.py delete mode 100644 research/learning_unsupervised_learning/meta_objective/utils.py delete mode 100644 research/learning_unsupervised_learning/optimizers.py delete mode 100644 research/learning_unsupervised_learning/run_eval.py delete mode 100644 research/learning_unsupervised_learning/summary_utils.py delete mode 100644 research/learning_unsupervised_learning/utils.py delete mode 100644 research/learning_unsupervised_learning/variable_replace.py delete mode 100644 research/lexnet_nc/README.md delete mode 100755 research/lexnet_nc/extract_paths.py delete mode 100755 research/lexnet_nc/get_indicative_paths.py delete mode 100755 research/lexnet_nc/learn_classifier.py delete mode 100755 research/lexnet_nc/learn_path_embeddings.py delete mode 100644 research/lexnet_nc/lexnet_common.py delete mode 100644 research/lexnet_nc/lexnet_model.py delete mode 100644 research/lexnet_nc/path_model.py delete mode 100755 research/lexnet_nc/sorted_paths_to_examples.py delete mode 100755 research/lexnet_nc/text_embeddings_to_binary.py delete mode 100644 research/lm_1b/BUILD delete mode 100644 research/lm_1b/README.md delete mode 100644 research/lm_1b/data_utils.py delete mode 100644 research/lm_1b/lm_1b_eval.py delete mode 100644 research/lm_commonsense/README.md delete mode 100644 research/lm_commonsense/eval.py delete mode 100644 research/lm_commonsense/method.jpg delete mode 100644 research/lm_commonsense/utils.py delete mode 100644 research/maskgan/README.md delete mode 100644 research/maskgan/data/__init__.py delete mode 100644 research/maskgan/data/imdb_loader.py delete mode 100644 research/maskgan/data/ptb_loader.py delete mode 100644 research/maskgan/generate_samples.py delete mode 100644 research/maskgan/losses/__init__.py delete mode 100644 research/maskgan/losses/losses.py delete mode 100644 research/maskgan/model_utils/__init__.py delete mode 100644 research/maskgan/model_utils/helper.py delete mode 100644 research/maskgan/model_utils/model_construction.py delete mode 100644 research/maskgan/model_utils/model_losses.py delete mode 100644 research/maskgan/model_utils/model_optimization.py delete mode 100644 research/maskgan/model_utils/model_utils.py delete mode 100644 research/maskgan/model_utils/n_gram.py delete mode 100644 research/maskgan/model_utils/variable_mapping.py delete mode 100644 research/maskgan/models/__init__.py delete mode 100644 research/maskgan/models/attention_utils.py delete mode 100644 research/maskgan/models/bidirectional.py delete mode 100644 research/maskgan/models/bidirectional_vd.py delete mode 100644 research/maskgan/models/bidirectional_zaremba.py delete mode 100644 research/maskgan/models/cnn.py delete mode 100644 research/maskgan/models/critic_vd.py delete mode 100644 research/maskgan/models/evaluation_utils.py delete mode 100644 research/maskgan/models/feedforward.py delete mode 100644 research/maskgan/models/rnn.py delete mode 100644 research/maskgan/models/rnn_nas.py delete mode 100644 research/maskgan/models/rnn_vd.py delete mode 100644 research/maskgan/models/rnn_zaremba.py delete mode 100644 research/maskgan/models/rollout.py delete mode 100644 research/maskgan/models/seq2seq.py delete mode 100644 research/maskgan/models/seq2seq_nas.py delete mode 100644 research/maskgan/models/seq2seq_vd.py delete mode 100644 research/maskgan/models/seq2seq_zaremba.py delete mode 100644 research/maskgan/nas_utils/__init__.py delete mode 100644 research/maskgan/nas_utils/configs.py delete mode 100644 research/maskgan/nas_utils/custom_cell.py delete mode 100644 research/maskgan/nas_utils/variational_dropout.py delete mode 100644 research/maskgan/pretrain_mask_gan.py delete mode 100644 research/maskgan/regularization/__init__.py delete mode 100644 research/maskgan/regularization/variational_dropout.py delete mode 100644 research/maskgan/regularization/zoneout.py delete mode 100644 research/maskgan/sample_shuffler.py delete mode 100644 research/maskgan/train_mask_gan.py delete mode 100644 research/namignizer/.gitignore delete mode 100644 research/namignizer/README.md delete mode 100644 research/namignizer/data_utils.py delete mode 100644 research/namignizer/model.py delete mode 100644 research/namignizer/names.py delete mode 100644 research/neural_gpu/README.md delete mode 100644 research/neural_gpu/data_utils.py delete mode 100644 research/neural_gpu/neural_gpu.py delete mode 100644 research/neural_gpu/neural_gpu_trainer.py delete mode 100644 research/neural_gpu/program_utils.py delete mode 100644 research/neural_gpu/wmt_utils.py delete mode 100644 research/neural_programmer/README.md delete mode 100644 research/neural_programmer/data_utils.py delete mode 100644 research/neural_programmer/model.py delete mode 100644 research/neural_programmer/neural_programmer.py delete mode 100644 research/neural_programmer/nn_utils.py delete mode 100644 research/neural_programmer/parameters.py delete mode 100644 research/neural_programmer/wiki_data.py delete mode 100644 research/next_frame_prediction/README.md delete mode 100644 research/next_frame_prediction/cross_conv/BUILD delete mode 100644 research/next_frame_prediction/cross_conv/eval.py delete mode 100644 research/next_frame_prediction/cross_conv/example_gen.py delete mode 100644 research/next_frame_prediction/cross_conv/model.py delete mode 100644 research/next_frame_prediction/cross_conv/reader.py delete mode 100644 research/next_frame_prediction/cross_conv/sprites_gen.py delete mode 100644 research/next_frame_prediction/cross_conv/train.py delete mode 100644 research/next_frame_prediction/g3doc/cross_conv.png delete mode 100644 research/next_frame_prediction/g3doc/cross_conv2.png delete mode 100644 research/next_frame_prediction/g3doc/cross_conv3.png delete mode 100644 research/ptn/.gitignore delete mode 100644 research/ptn/BUILD delete mode 100644 research/ptn/README.md delete mode 100644 research/ptn/WORKSPACE delete mode 100644 research/ptn/eval_ptn.py delete mode 100644 research/ptn/eval_rotator.py delete mode 100644 research/ptn/input_generator.py delete mode 100644 research/ptn/losses.py delete mode 100644 research/ptn/metrics.py delete mode 100644 research/ptn/model_ptn.py delete mode 100644 research/ptn/model_rotator.py delete mode 100644 research/ptn/model_voxel_generation.py delete mode 100644 research/ptn/nets/BUILD delete mode 100644 research/ptn/nets/deeprotator_factory.py delete mode 100644 research/ptn/nets/im2vox_factory.py delete mode 100644 research/ptn/nets/perspective_projector.py delete mode 100644 research/ptn/nets/perspective_transform.py delete mode 100644 research/ptn/nets/ptn_encoder.py delete mode 100644 research/ptn/nets/ptn_im_decoder.py delete mode 100644 research/ptn/nets/ptn_rotator.py delete mode 100644 research/ptn/nets/ptn_vox_decoder.py delete mode 100644 research/ptn/pretrain_rotator.py delete mode 100644 research/ptn/train_ptn.py delete mode 100644 research/ptn/utils.py delete mode 100644 research/qa_kg/README.md delete mode 100644 research/qa_kg/exp_1_hop/config.py delete mode 100644 research/qa_kg/exp_1_hop/test.py delete mode 100644 research/qa_kg/exp_1_hop/train_gt_layout.py delete mode 100644 research/qa_kg/model_n2nmn/__init__.py delete mode 100644 research/qa_kg/model_n2nmn/assembler.py delete mode 100644 research/qa_kg/model_n2nmn/model.py delete mode 100644 research/qa_kg/model_n2nmn/modules.py delete mode 100644 research/qa_kg/model_n2nmn/netgen_att.py delete mode 100644 research/qa_kg/util/__init__.py delete mode 100644 research/qa_kg/util/data_reader.py delete mode 100644 research/qa_kg/util/misc.py delete mode 100644 research/qa_kg/util/nn.py delete mode 100644 research/real_nvp/README.md delete mode 100644 research/real_nvp/__init__.py delete mode 100644 research/real_nvp/celeba_formatting.py delete mode 100644 research/real_nvp/imnet_formatting.py delete mode 100644 research/real_nvp/lsun_formatting.py delete mode 100644 research/real_nvp/real_nvp_multiscale_dataset.py delete mode 100644 research/real_nvp/real_nvp_utils.py delete mode 100644 research/sentiment_analysis/README.md delete mode 100644 research/sentiment_analysis/__init__.py delete mode 100644 research/sentiment_analysis/data/__init__.py delete mode 100644 research/sentiment_analysis/data/dataset.py delete mode 100644 research/sentiment_analysis/data/imdb.py delete mode 100644 research/sentiment_analysis/data/util.py delete mode 100644 research/sentiment_analysis/sentiment_main.py delete mode 100644 research/sentiment_analysis/sentiment_model.py delete mode 100644 research/seq2species/README.md delete mode 100644 research/seq2species/build_model.py delete mode 100644 research/seq2species/configuration.py delete mode 100644 research/seq2species/input.py delete mode 100644 research/seq2species/protos/BUILD delete mode 100644 research/seq2species/protos/__init__.py delete mode 100644 research/seq2species/protos/seq2label.proto delete mode 100644 research/seq2species/run_training.py delete mode 100644 research/seq2species/run_training_test.py delete mode 100644 research/seq2species/seq2label_utils.py delete mode 100644 research/seq2species/test_utils.py delete mode 100644 research/skip_thoughts/.gitignore delete mode 100644 research/skip_thoughts/README.md delete mode 100644 research/skip_thoughts/WORKSPACE delete mode 100644 research/skip_thoughts/skip_thoughts/BUILD delete mode 100644 research/skip_thoughts/skip_thoughts/__init__.py delete mode 100644 research/skip_thoughts/skip_thoughts/configuration.py delete mode 100644 research/skip_thoughts/skip_thoughts/data/BUILD delete mode 100644 research/skip_thoughts/skip_thoughts/data/__init__.py delete mode 100644 research/skip_thoughts/skip_thoughts/data/preprocess_dataset.py delete mode 100644 research/skip_thoughts/skip_thoughts/data/special_words.py delete mode 100644 research/skip_thoughts/skip_thoughts/encoder_manager.py delete mode 100644 research/skip_thoughts/skip_thoughts/evaluate.py delete mode 100644 research/skip_thoughts/skip_thoughts/ops/BUILD delete mode 100644 research/skip_thoughts/skip_thoughts/ops/__init__.py delete mode 100644 research/skip_thoughts/skip_thoughts/ops/gru_cell.py delete mode 100644 research/skip_thoughts/skip_thoughts/ops/input_ops.py delete mode 100644 research/skip_thoughts/skip_thoughts/skip_thoughts_encoder.py delete mode 100644 research/skip_thoughts/skip_thoughts/skip_thoughts_model.py delete mode 100644 research/skip_thoughts/skip_thoughts/skip_thoughts_model_test.py delete mode 100644 research/skip_thoughts/skip_thoughts/track_perplexity.py delete mode 100644 research/skip_thoughts/skip_thoughts/train.py delete mode 100644 research/skip_thoughts/skip_thoughts/vocabulary_expansion.py delete mode 100644 research/steve/README.md delete mode 100644 research/steve/agent.py delete mode 100644 research/steve/config.py delete mode 100644 research/steve/config/algos/ddpg.json delete mode 100644 research/steve/config/algos/mve_mean.json delete mode 100644 research/steve/config/algos/mve_tdk.json delete mode 100644 research/steve/config/algos/mve_tdlambda.json delete mode 100644 research/steve/config/algos/steve.json delete mode 100644 research/steve/config/algos/steve_cov.json delete mode 100644 research/steve/config/core/basic.json delete mode 100644 research/steve/config/core/bayesian.json delete mode 100644 research/steve/config/core/model.json delete mode 100644 research/steve/config/envs/flagrun.json delete mode 100644 research/steve/config/envs/halfcheetah.json delete mode 100644 research/steve/config/envs/hardcore.json delete mode 100644 research/steve/config/envs/hopper.json delete mode 100644 research/steve/config/envs/humanoid.json delete mode 100644 research/steve/config/envs/rshum.json delete mode 100644 research/steve/config/envs/swimmer.json delete mode 100644 research/steve/config/envs/walker2d.json delete mode 100644 research/steve/config/experimental_setups/speedrun.json delete mode 100644 research/steve/config/experiments/ablations/baselines/ensemble_mve_tdk0.json delete mode 100644 research/steve/config/experiments/ablations/baselines/ensemble_mve_tdk1.json delete mode 100644 research/steve/config/experiments/ablations/baselines/ensemble_mve_tdk2.json delete mode 100644 research/steve/config/experiments/ablations/baselines/mve_25tdlambda0.json delete mode 100644 research/steve/config/experiments/ablations/baselines/mve_25tdlambda1.json delete mode 100644 research/steve/config/experiments/ablations/baselines/mve_25tdlambda2.json delete mode 100644 research/steve/config/experiments/ablations/baselines/mve_75tdlambda0.json delete mode 100644 research/steve/config/experiments/ablations/baselines/mve_75tdlambda1.json delete mode 100644 research/steve/config/experiments/ablations/baselines/mve_75tdlambda2.json delete mode 100644 research/steve/config/experiments/ablations/baselines/mve_meank0.json delete mode 100644 research/steve/config/experiments/ablations/baselines/mve_meank1.json delete mode 100644 research/steve/config/experiments/ablations/baselines/mve_meank2.json delete mode 100644 research/steve/config/experiments/ablations/baselines/steve_cov0.json delete mode 100644 research/steve/config/experiments/ablations/baselines/steve_cov1.json delete mode 100644 research/steve/config/experiments/ablations/baselines/steve_cov2.json delete mode 100644 research/steve/config/experiments/ablations/horizons/steve_1h0.json delete mode 100644 research/steve/config/experiments/ablations/horizons/steve_1h1.json delete mode 100644 research/steve/config/experiments/ablations/horizons/steve_1h2.json delete mode 100644 research/steve/config/experiments/ablations/horizons/steve_2h0.json delete mode 100644 research/steve/config/experiments/ablations/horizons/steve_2h1.json delete mode 100644 research/steve/config/experiments/ablations/horizons/steve_2h2.json delete mode 100644 research/steve/config/experiments/ablations/horizons/steve_5h0.json delete mode 100644 research/steve/config/experiments/ablations/horizons/steve_5h1.json delete mode 100644 research/steve/config/experiments/ablations/horizons/steve_5h2.json delete mode 100644 research/steve/config/experiments/goodruns/flagrun/ddpg0.json delete mode 100644 research/steve/config/experiments/goodruns/flagrun/ddpg1.json delete mode 100644 research/steve/config/experiments/goodruns/flagrun/ddpg2.json delete mode 100644 research/steve/config/experiments/goodruns/flagrun/ddpg3.json delete mode 100644 research/steve/config/experiments/goodruns/flagrun/mve_tdk0.json delete mode 100644 research/steve/config/experiments/goodruns/flagrun/mve_tdk1.json delete mode 100644 research/steve/config/experiments/goodruns/flagrun/mve_tdk2.json delete mode 100644 research/steve/config/experiments/goodruns/flagrun/mve_tdk3.json delete mode 100644 research/steve/config/experiments/goodruns/flagrun/steve0.json delete mode 100644 research/steve/config/experiments/goodruns/flagrun/steve1.json delete mode 100644 research/steve/config/experiments/goodruns/flagrun/steve2.json delete mode 100644 research/steve/config/experiments/goodruns/flagrun/steve3.json delete mode 100644 research/steve/config/experiments/goodruns/halfcheetah/ddpg0.json delete mode 100644 research/steve/config/experiments/goodruns/halfcheetah/ddpg1.json delete mode 100644 research/steve/config/experiments/goodruns/halfcheetah/ddpg2.json delete mode 100644 research/steve/config/experiments/goodruns/halfcheetah/ddpg3.json delete mode 100644 research/steve/config/experiments/goodruns/halfcheetah/mve_tdk0.json delete mode 100644 research/steve/config/experiments/goodruns/halfcheetah/mve_tdk1.json delete mode 100644 research/steve/config/experiments/goodruns/halfcheetah/mve_tdk2.json delete mode 100644 research/steve/config/experiments/goodruns/halfcheetah/mve_tdk3.json delete mode 100644 research/steve/config/experiments/goodruns/halfcheetah/steve0.json delete mode 100644 research/steve/config/experiments/goodruns/halfcheetah/steve1.json delete mode 100644 research/steve/config/experiments/goodruns/halfcheetah/steve2.json delete mode 100644 research/steve/config/experiments/goodruns/halfcheetah/steve3.json delete mode 100644 research/steve/config/experiments/goodruns/hardcore/ddpg0.json delete mode 100644 research/steve/config/experiments/goodruns/hardcore/ddpg1.json delete mode 100644 research/steve/config/experiments/goodruns/hardcore/ddpg2.json delete mode 100644 research/steve/config/experiments/goodruns/hardcore/ddpg3.json delete mode 100644 research/steve/config/experiments/goodruns/hardcore/mve_tdk0.json delete mode 100644 research/steve/config/experiments/goodruns/hardcore/mve_tdk1.json delete mode 100644 research/steve/config/experiments/goodruns/hardcore/mve_tdk2.json delete mode 100644 research/steve/config/experiments/goodruns/hardcore/mve_tdk3.json delete mode 100644 research/steve/config/experiments/goodruns/hardcore/steve0.json delete mode 100644 research/steve/config/experiments/goodruns/hardcore/steve1.json delete mode 100644 research/steve/config/experiments/goodruns/hardcore/steve2.json delete mode 100644 research/steve/config/experiments/goodruns/hardcore/steve3.json delete mode 100644 research/steve/config/experiments/goodruns/hopper/ddpg0.json delete mode 100644 research/steve/config/experiments/goodruns/hopper/ddpg1.json delete mode 100644 research/steve/config/experiments/goodruns/hopper/ddpg2.json delete mode 100644 research/steve/config/experiments/goodruns/hopper/ddpg3.json delete mode 100644 research/steve/config/experiments/goodruns/hopper/mve_tdk0.json delete mode 100644 research/steve/config/experiments/goodruns/hopper/mve_tdk1.json delete mode 100644 research/steve/config/experiments/goodruns/hopper/mve_tdk2.json delete mode 100644 research/steve/config/experiments/goodruns/hopper/mve_tdk3.json delete mode 100644 research/steve/config/experiments/goodruns/hopper/steve0.json delete mode 100644 research/steve/config/experiments/goodruns/hopper/steve1.json delete mode 100644 research/steve/config/experiments/goodruns/hopper/steve2.json delete mode 100644 research/steve/config/experiments/goodruns/hopper/steve3.json delete mode 100644 research/steve/config/experiments/goodruns/humanoid/ddpg0.json delete mode 100644 research/steve/config/experiments/goodruns/humanoid/ddpg1.json delete mode 100644 research/steve/config/experiments/goodruns/humanoid/ddpg2.json delete mode 100644 research/steve/config/experiments/goodruns/humanoid/ddpg3.json delete mode 100644 research/steve/config/experiments/goodruns/humanoid/mve_tdk0.json delete mode 100644 research/steve/config/experiments/goodruns/humanoid/mve_tdk1.json delete mode 100644 research/steve/config/experiments/goodruns/humanoid/mve_tdk2.json delete mode 100644 research/steve/config/experiments/goodruns/humanoid/mve_tdk3.json delete mode 100644 research/steve/config/experiments/goodruns/humanoid/steve0.json delete mode 100644 research/steve/config/experiments/goodruns/humanoid/steve1.json delete mode 100644 research/steve/config/experiments/goodruns/humanoid/steve2.json delete mode 100644 research/steve/config/experiments/goodruns/humanoid/steve3.json delete mode 100644 research/steve/config/experiments/goodruns/rshum/ddpg0.json delete mode 100644 research/steve/config/experiments/goodruns/rshum/ddpg1.json delete mode 100644 research/steve/config/experiments/goodruns/rshum/ddpg2.json delete mode 100644 research/steve/config/experiments/goodruns/rshum/ddpg3.json delete mode 100644 research/steve/config/experiments/goodruns/rshum/mve_tdk0.json delete mode 100644 research/steve/config/experiments/goodruns/rshum/mve_tdk1.json delete mode 100644 research/steve/config/experiments/goodruns/rshum/mve_tdk2.json delete mode 100644 research/steve/config/experiments/goodruns/rshum/mve_tdk3.json delete mode 100644 research/steve/config/experiments/goodruns/rshum/steve0.json delete mode 100644 research/steve/config/experiments/goodruns/rshum/steve1.json delete mode 100644 research/steve/config/experiments/goodruns/rshum/steve2.json delete mode 100644 research/steve/config/experiments/goodruns/rshum/steve3.json delete mode 100644 research/steve/config/experiments/goodruns/swimmer/ddpg0.json delete mode 100644 research/steve/config/experiments/goodruns/swimmer/ddpg1.json delete mode 100644 research/steve/config/experiments/goodruns/swimmer/ddpg2.json delete mode 100644 research/steve/config/experiments/goodruns/swimmer/ddpg3.json delete mode 100644 research/steve/config/experiments/goodruns/swimmer/mve_tdk0.json delete mode 100644 research/steve/config/experiments/goodruns/swimmer/mve_tdk1.json delete mode 100644 research/steve/config/experiments/goodruns/swimmer/mve_tdk2.json delete mode 100644 research/steve/config/experiments/goodruns/swimmer/mve_tdk3.json delete mode 100644 research/steve/config/experiments/goodruns/swimmer/steve0.json delete mode 100644 research/steve/config/experiments/goodruns/swimmer/steve1.json delete mode 100644 research/steve/config/experiments/goodruns/swimmer/steve2.json delete mode 100644 research/steve/config/experiments/goodruns/swimmer/steve3.json delete mode 100644 research/steve/config/experiments/goodruns/walker2d/ddpg0.json delete mode 100644 research/steve/config/experiments/goodruns/walker2d/ddpg1.json delete mode 100644 research/steve/config/experiments/goodruns/walker2d/ddpg2.json delete mode 100644 research/steve/config/experiments/goodruns/walker2d/ddpg3.json delete mode 100644 research/steve/config/experiments/goodruns/walker2d/mve_tdk0.json delete mode 100644 research/steve/config/experiments/goodruns/walker2d/mve_tdk1.json delete mode 100644 research/steve/config/experiments/goodruns/walker2d/mve_tdk2.json delete mode 100644 research/steve/config/experiments/goodruns/walker2d/mve_tdk3.json delete mode 100644 research/steve/config/experiments/goodruns/walker2d/steve0.json delete mode 100644 research/steve/config/experiments/goodruns/walker2d/steve1.json delete mode 100644 research/steve/config/experiments/goodruns/walker2d/steve2.json delete mode 100644 research/steve/config/experiments/goodruns/walker2d/steve3.json delete mode 100644 research/steve/config/experiments/speedruns/flagrun/speedy_ddpg0.json delete mode 100644 research/steve/config/experiments/speedruns/flagrun/speedy_ddpg1.json delete mode 100644 research/steve/config/experiments/speedruns/flagrun/speedy_mve_tdk0.json delete mode 100644 research/steve/config/experiments/speedruns/flagrun/speedy_mve_tdk1.json delete mode 100644 research/steve/config/experiments/speedruns/flagrun/speedy_steve0.json delete mode 100644 research/steve/config/experiments/speedruns/flagrun/speedy_steve1.json delete mode 100644 research/steve/config/experiments/speedruns/humanoid/speedy_ddpg0.json delete mode 100644 research/steve/config/experiments/speedruns/humanoid/speedy_ddpg1.json delete mode 100644 research/steve/config/experiments/speedruns/humanoid/speedy_mve_tdk0.json delete mode 100644 research/steve/config/experiments/speedruns/humanoid/speedy_mve_tdk1.json delete mode 100644 research/steve/config/experiments/speedruns/humanoid/speedy_steve0.json delete mode 100644 research/steve/config/experiments/speedruns/humanoid/speedy_steve1.json delete mode 100644 research/steve/envwrap.py delete mode 100644 research/steve/learner.py delete mode 100644 research/steve/master.py delete mode 100644 research/steve/nn.py delete mode 100644 research/steve/replay.py delete mode 100644 research/steve/toy_demo.py delete mode 100644 research/steve/util.py delete mode 100644 research/steve/valuerl.py delete mode 100644 research/steve/valuerl_learner.py delete mode 100644 research/steve/visualizer.py delete mode 100644 research/steve/worldmodel.py delete mode 100644 research/steve/worldmodel_learner.py delete mode 100644 research/street/README.md delete mode 100644 research/street/cc/rnn_ops.cc delete mode 100644 research/street/g3doc/avdessapins.png delete mode 100644 research/street/g3doc/vgslspecs.md delete mode 100644 research/street/python/decoder.py delete mode 100644 research/street/python/decoder_test.py delete mode 100644 research/street/python/errorcounter.py delete mode 100644 research/street/python/errorcounter_test.py delete mode 100644 research/street/python/fsns_urls.py delete mode 100644 research/street/python/fsns_urls.txt delete mode 100644 research/street/python/nn_ops.py delete mode 100644 research/street/python/shapes.py delete mode 100644 research/street/python/shapes_test.py delete mode 100644 research/street/python/vgsl_eval.py delete mode 100644 research/street/python/vgsl_input.py delete mode 100644 research/street/python/vgsl_model.py delete mode 100644 research/street/python/vgsl_model_test.py delete mode 100644 research/street/python/vgsl_train.py delete mode 100644 research/street/python/vgslspecs.py delete mode 100644 research/street/python/vgslspecs_test.py delete mode 100644 research/street/testdata/arial-32-tiny delete mode 100644 research/street/testdata/arial.charset_size=105.txt delete mode 100644 research/street/testdata/charset_size=134.txt delete mode 100644 research/street/testdata/charset_size_10.txt delete mode 100644 research/street/testdata/mnist-tiny delete mode 100644 research/street/testdata/numbers-16-tiny delete mode 100644 research/street/testdata/numbers.charset_size=12.txt delete mode 100644 research/struct2depth/BUILD delete mode 100644 research/struct2depth/README.md delete mode 100644 research/struct2depth/alignment.py delete mode 100644 research/struct2depth/gen_data_city.py delete mode 100644 research/struct2depth/gen_data_kitti.py delete mode 100644 research/struct2depth/inference.py delete mode 100644 research/struct2depth/model.py delete mode 100644 research/struct2depth/nets.py delete mode 100644 research/struct2depth/optimize.py delete mode 100644 research/struct2depth/project.py delete mode 100644 research/struct2depth/reader.py delete mode 100644 research/struct2depth/train.py delete mode 100644 research/struct2depth/util.py delete mode 100644 research/swivel/.gitignore delete mode 100644 research/swivel/README.md delete mode 100644 research/swivel/analogy.cc delete mode 100644 research/swivel/distributed.sh delete mode 100644 research/swivel/eval.mk delete mode 100644 research/swivel/fastprep.cc delete mode 100644 research/swivel/fastprep.mk delete mode 100755 research/swivel/glove_to_shards.py delete mode 100644 research/swivel/nearest.py delete mode 100644 research/swivel/prep.py delete mode 100755 research/swivel/swivel.py delete mode 100644 research/swivel/text2bin.py delete mode 100644 research/swivel/vecs.py delete mode 100644 research/swivel/wordsim.py delete mode 100644 research/tcn/BUILD delete mode 100644 research/tcn/README.md delete mode 100644 research/tcn/WORKSPACE delete mode 100644 research/tcn/alignment.py delete mode 100644 research/tcn/configs/pouring.yml delete mode 100644 research/tcn/configs/tcn_default.yml delete mode 100644 research/tcn/configs/test_estimator.yml delete mode 100644 research/tcn/data_providers.py delete mode 100644 research/tcn/data_providers_test.py delete mode 100644 research/tcn/dataset/images_to_videos.py delete mode 100644 research/tcn/dataset/videos_to_tfrecords.py delete mode 100644 research/tcn/dataset/webcam.py delete mode 100644 research/tcn/download_pretrained.py delete mode 100644 research/tcn/estimators/base_estimator.py delete mode 100644 research/tcn/estimators/get_estimator.py delete mode 100644 research/tcn/estimators/mvtcn_estimator.py delete mode 100644 research/tcn/estimators/svtcn_estimator.py delete mode 100644 research/tcn/estimators/svtcn_loss.py delete mode 100644 research/tcn/estimators/svtcn_loss_test.py delete mode 100644 research/tcn/eval.py delete mode 100644 research/tcn/g3doc/alignment.png delete mode 100644 research/tcn/g3doc/all_error.png delete mode 100644 research/tcn/g3doc/avg_error.png delete mode 100644 research/tcn/g3doc/loss.png delete mode 100644 research/tcn/g3doc/pca.png delete mode 100644 research/tcn/g3doc/val_loss.png delete mode 100644 research/tcn/generate_videos.py delete mode 100644 research/tcn/labeled_eval.py delete mode 100644 research/tcn/labeled_eval_test.py delete mode 100644 research/tcn/model.py delete mode 100644 research/tcn/preprocessing.py delete mode 100644 research/tcn/train.py delete mode 100644 research/tcn/utils/luatables.py delete mode 100644 research/tcn/utils/progress.py delete mode 100644 research/tcn/utils/util.py delete mode 100644 research/tcn/visualize_embeddings.py delete mode 100644 research/textsum/BUILD delete mode 100644 research/textsum/README.md delete mode 100644 research/textsum/batch_reader.py delete mode 100644 research/textsum/beam_search.py delete mode 100644 research/textsum/data.py delete mode 100644 research/textsum/data/data delete mode 100644 research/textsum/data/vocab delete mode 100644 research/textsum/data_convert_example.py delete mode 100644 research/textsum/seq2seq_attention.py delete mode 100644 research/textsum/seq2seq_attention_decode.py delete mode 100644 research/textsum/seq2seq_attention_model.py delete mode 100644 research/textsum/seq2seq_lib.py delete mode 100644 research/transformer/README.md delete mode 100644 research/transformer/cluttered_mnist.py delete mode 100644 research/transformer/data/README.md delete mode 100644 research/transformer/example.py delete mode 100644 research/transformer/spatial_transformer.py delete mode 100644 research/transformer/tf_utils.py delete mode 100644 research/video_prediction/README.md delete mode 100755 research/video_prediction/download_data.sh delete mode 100644 research/video_prediction/lstm_ops.py delete mode 100644 research/video_prediction/prediction_input.py delete mode 100644 research/video_prediction/prediction_model.py delete mode 100644 research/video_prediction/prediction_train.py delete mode 100644 research/video_prediction/push_datafiles.txt diff --git a/research/README.md b/research/README.md index f9e84fb86..204955b3e 100644 --- a/research/README.md +++ b/research/README.md @@ -7,14 +7,17 @@ This directory contains code implementations and pre-trained models of published The research models are maintained by their respective authors. ## Table of Contents -- [Modeling Libraries and Models](#modeling-libraries-and-models) -- [Models and Implementations](#models-and-implementations) - * [Computer Vision](#computer-vision) - * [Natural Language Processing](#natural-language-processing) - * [Audio and Speech](#audio-and-speech) - * [Reinforcement Learning](#reinforcement-learning) - * [Others](#others) -- [Archived Models and Implementations](#warning-archived-models-and-implementations) (:no_entry_sign: No longer maintained) +- [TensorFlow Research Models](#tensorflow-research-models) + - [Table of Contents](#table-of-contents) + - [Modeling Libraries and Models](#modeling-libraries-and-models) + - [Models and Implementations](#models-and-implementations) + - [Computer Vision](#computer-vision) + - [Natural Language Processing](#natural-language-processing) + - [Audio and Speech](#audio-and-speech) + - [Reinforcement Learning](#reinforcement-learning) + - [Others](#others) + - [Old Models and Implementations in TensorFlow 1](#old-models-and-implementations-in-tensorflow-1) + - [Contributions](#contributions) ## Modeling Libraries and Models @@ -49,6 +52,7 @@ The research models are maintained by their respective authors. | Directory | Paper(s) | Conference | Maintainer(s) | |-----------|----------|------------|---------------| | [audioset](audioset) | [1] [Audio Set: An ontology and human-labeled dataset for audio events](https://research.google/pubs/pub45857/)
[2] [CNN Architectures for Large-Scale Audio Classification](https://research.google/pubs/pub45611/) | ICASSP 2017 | plakal, dpwe | +| [deep_speech](deep_speech) | [Deep Speech 2](https://arxiv.org/abs/1512.02595) | ICLR 2016 | yhliang2018 | ### Reinforcement Learning @@ -64,58 +68,9 @@ The research models are maintained by their respective authors. | [lfads](lfads) | [LFADS - Latent Factor Analysis via Dynamical Systems](https://arxiv.org/abs/1608.06315) | | jazcollins, sussillo | | [rebar](rebar) | [REBAR: Low-variance, unbiased gradient estimates for discrete latent variable models](https://arxiv.org/abs/1703.07370) | NIPS 2017 | gjtucker | ---- - -## :warning: Archived Models and Implementations - -The following research models are no longer maintained. +### Old Models and Implementations in TensorFlow 1 -**Note**: We will remove archived models from the master branch in June, 2020. -After removal, you will still be able to access archived models in the archive branch. - -| Directory | Paper(s) | Conference | Maintainer(s) | -|-----------|----------|------------|---------------| -| [adv_imagenet_models](adv_imagenet_models) | [1] [Adversarial Machine Learning at Scale](https://arxiv.org/abs/1611.01236)
[2] [Ensemble Adversarial Training: Attacks and Defenses](https://arxiv.org/abs/1705.07204) | [1] ICLR 2017
[2] ICLR 2018 | alexeykurakin | -| [adversarial_crypto](adversarial_crypto) | [Learning to Protect Communications with Adversarial Neural Cryptography](https://arxiv.org/abs/1610.06918) | | dave-andersen | -| [adversarial_logit_pairing](adversarial_logit_pairing) | [Adversarial Logit Pairing](https://arxiv.org/abs/1803.06373) | | alexeykurakin | -| [autoencoder](autoencoder) | Various autoencoders | | snurkabill | -| [brain_coder](brain_coder) | [Neural Program Synthesis with Priority Queue Training](https://arxiv.org/abs/1801.03526) | | danabo, mnorouzi | -| [cognitive_mapping_and_planning](cognitive_mapping_and_planning) | [Cognitive Mapping and Planning for Visual Navigation](https://arxiv.org/abs/1702.03920) | CVPR 2017 | s-gupta | -| [compression](compression) | [Full Resolution Image Compression with Recurrent Neural Networks](https://arxiv.org/abs/1608.05148) | CVPR 2017 | nmjohn | -| [deep_contextual_bandits](deep_contextual_bandits) | [Deep Bayesian Bandits Showdown: An Empirical Comparison of Bayesian Deep Networks for Thompson Sampling](https://arxiv.org/abs/1802.09127) | ICLR 2018 | rikel | -| [deep_speech](deep_speech) | [Deep Speech 2](https://arxiv.org/abs/1512.02595) | ICLR 2016 | yhliang2018 | -| [domain_adaptation](domain_adaptation) | [1] [Domain Separation Networks](https://arxiv.org/abs/1608.06019)
[2] [Unsupervised Pixel-Level Domain Adaptation with Generative Adversarial Networks](https://arxiv.org/abs/1612.05424) | NIPS 2016 | bousmalis, dmrd | -| [feelvos](feelvos)| [FEELVOS](https://arxiv.org/abs/1902.09513) | CVPR 2019 | pvoigtlaender, yuningchai, aquariusjay | -| [fivo](fivo)| [Filtering variational objectives for training generative sequence models](https://arxiv.org/abs/1705.09279) | NIPS 2017 | dieterichlawson | -| [global_objectives](global_objectives) | [Scalable Learning of Non-Decomposable Objectives](https://arxiv.org/abs/1608.04802) | AISTATS 2017 | mackeya-google | -| [im2txt](im2txt) | [Show and Tell: Lessons learned from the 2015 MSCOCO Image Captioning Challenge](https://arxiv.org/abs/1609.06647) | TPAMI 2016 | cshallue | -| [inception](inception) | [Rethinking the Inception Architecture for Computer Vision](https://arxiv.org/abs/1512.00567) | CVPR 2016 | shlens, vincentvanhoucke | -| [keypointnet](keypointnet) | [KeypointNet](https://arxiv.org/abs/1807.03146) | | mnorouzi | -| [learned_optimizer](learned_optimizer) | [Learned Optimizers that Scale and Generalize](https://arxiv.org/abs/1703.04813) | ICML 2017 | olganw, nirum | -| [learning_to_remember_rare_events](learning_to_remember_rare_events) | [Learning to Remember Rare Events](https://arxiv.org/abs/1703.03129) | ICLR 2017| lukaszkaiser, ofirnachum | -| [learning_unsupervised_learning](learning_unsupervised_learning) | [Meta-Learning Update Rules for Unsupervised Representation Learning](https://arxiv.org/abs/1804.00222) | ICLR 2019 | lukemetz, nirum | -| [lexnet_nc](lexnet_nc) | [Olive Oil is Made of Olives, Baby Oil is Made for Babies: Interpreting Noun Compounds using Paraphrases in a Neural Model](https://arxiv.org/abs/1803.08073) | NAACL 2018 | vered1986, waterson | -| [lm_1b](lm_1b) | [Exploring the Limits of Language Modeling](https://arxiv.org/abs/1602.02410) | | oriolvinyals, panyx0718 | -| [lm_commonsense](lm_commonsense) | [A Simple Method for Commonsense Reasoning](https://arxiv.org/abs/1806.02847) | | thtrieu | -| [maskgan](maskgan)| [MaskGAN: Better Text Generation via Filling in the](https://arxiv.org/abs/1801.07736) | ICLR 2018 | liamb315, a-dai | -| [namignizer](namignizer)| Namignizer | | knathanieltucker | -| [neural_gpu](neural_gpu)| [Neural GPUs Learn Algorithms](https://arxiv.org/abs/1511.08228) | | lukaszkaiser | -| [neural_programmer](neural_programmer) | [Learning a Natural Language Interface with Neural Programmer](https://arxiv.org/abs/1611.08945) | ICLR 2017 | arvind2505 | -| [next_frame_prediction](next_frame_prediction) | [Visual Dynamics: Probabilistic Future Frame Synthesis via Cross Convolutional Networks](https://arxiv.org/abs/1607.02586) | NIPS 2016 | panyx0718 | -| [ptn](ptn) | [Perspective Transformer Nets: Learning Single-View 3D Object Reconstruction without 3D Supervision](https://arxiv.org/abs/1612.00814) | NIPS 2016 | xcyan, arkanath, hellojas, honglaklee | -| [qa_kg](qa_kg) | [Learning to Reason: End-to-End Module Networks for Visual Question Answering](https://arxiv.org/abs/1704.05526) | ICCV 2017 | yuyuz | -| [real_nvp](real_nvp) | [Density estimation using Real NVP](https://arxiv.org/abs/1605.08803) | ICLR 2017 | laurent-dinh | -| [sentiment_analysis](sentiment_analysis)| [Effective Use of Word Order for Text Categorization with Convolutional Neural Networks](https://arxiv.org/abs/1412.1058) | NAACL HLT 2015 | sculd | -| [seq2species](seq2species) | [Seq2Species: A deep learning approach to pattern recognition for short DNA sequences](https://doi.org/10.1101/353474) | | apbusia, depristo | -| [skip_thoughts](skip_thoughts) | [Skip-Thought Vectors](https://arxiv.org/abs/1506.06726) | | cshallue | -| [steve](steve) | [Sample-Efficient Reinforcement Learning with Stochastic Ensemble Value Expansion](https://arxiv.org/abs/1807.01675) | NeurIPS 2018 | buckman-google | -| [street](street) | [End-to-End Interpretation of the French Street Name Signs Dataset](https://arxiv.org/abs/1702.03970) | ECCV 2016 | theraysmith | -| [struct2depth](struct2depth)| [Depth Prediction Without the Sensors: Leveraging Structure for Unsupervised Learning from Monocular Videos](https://arxiv.org/abs/1811.06152) | AAAI 2019 | aneliaangelova | -| [swivel](swivel) | [Swivel: Improving Embeddings by Noticing What's Missing](https://arxiv.org/abs/1602.02215) | | waterson | -| [tcn](tcn) | [Time-Contrastive Networks: Self-Supervised Learning from Video](https://arxiv.org/abs/1704.06888) | ICRA 2018 | coreylynch, sermanet | -| [textsum](textsum)| [A Neural Attention Model for Abstractive Sentence Summarization](https://arxiv.org/abs/1509.00685) | EMNLP 2015 | panyx0718, peterjliu | -| [transformer](transformer) | [Spatial Transformer Network](https://arxiv.org/abs/1506.02025) | NIPS 2015 | daviddao| -| [video_prediction](video_prediction) | [Unsupervised Learning for Physical Interaction through Video Prediction](https://arxiv.org/abs/1605.07157) | NIPS 2016 | cbfinn | +:warning: If you are looking for old models, please visit the [Archive branch](https://github.com/tensorflow/models/tree/archive/research). --- diff --git a/research/adv_imagenet_models/README.md b/research/adv_imagenet_models/README.md deleted file mode 100644 index 6129f7347..000000000 --- a/research/adv_imagenet_models/README.md +++ /dev/null @@ -1,91 +0,0 @@ -![No Maintenance Intended](https://img.shields.io/badge/No%20Maintenance%20Intended-%E2%9C%95-red.svg) -![TensorFlow Requirement: 1.x](https://img.shields.io/badge/TensorFlow%20Requirement-1.x-brightgreen) -![TensorFlow 2 Not Supported](https://img.shields.io/badge/TensorFlow%202%20Not%20Supported-%E2%9C%95-red.svg) - -# Adversarially trained ImageNet models - -Pre-trained ImageNet models from the following papers: - -* [Adversarial Machine Learning at Scale](https://arxiv.org/abs/1611.01236) -* [Ensemble Adversarial Training: Attacks and Defenses](https://arxiv.org/abs/1705.07204) - -## Contact - -Author: Alexey Kurakin, -github: [AlexeyKurakin](https://github.com/AlexeyKurakin) - -## Pre-requesites and installation - -Ensure that you have installed TensorFlow 1.1 or greater -([instructions](https://www.tensorflow.org/install/)). - -You also need copy of ImageNet dataset if you want to run provided example. -Follow -[Preparing the dataset](https://github.com/tensorflow/models/tree/master/research/slim#Data) -instructions in TF-Slim library to get and preprocess ImageNet data. - -## Available models - -Following pre-trained models are available: - -Network Architecture | Adversarial training | Checkpoint ----------------------|----------------------|---------------- -Inception v3 | Step L.L. | [adv_inception_v3_2017_08_18.tar.gz](http://download.tensorflow.org/models/adv_inception_v3_2017_08_18.tar.gz) -Inception v3 | Step L.L. on ensemble of 3 models | [ens3_adv_inception_v3_2017_08_18.tar.gz](http://download.tensorflow.org/models/ens3_adv_inception_v3_2017_08_18.tar.gz) -Inception v3 | Step L.L. on ensemble of 4 models| [ens4_adv_inception_v3_2017_08_18.tar.gz](http://download.tensorflow.org/models/ens4_adv_inception_v3_2017_08_18.tar.gz) -Inception ResNet v2 | Step L.L. | [adv_inception_resnet_v2_2017_12_18.tar.gz](http://download.tensorflow.org/models/adv_inception_resnet_v2_2017_12_18.tar.gz) -Inception ResNet v2 | Step L.L. on ensemble of 3 models | [ens_adv_inception_resnet_v2_2017_08_18.tar.gz](http://download.tensorflow.org/models/ens_adv_inception_resnet_v2_2017_08_18.tar.gz) - -All checkpoints are compatible with -[TF-Slim](https://github.com/tensorflow/models/tree/master/research/slim) -implementation of Inception v3 and Inception Resnet v2. - -## How to evaluate models on ImageNet test data - -Python script `eval_on_adversarial.py` allow you to evaluate provided models -on white-box adversarial examples generated from ImageNet test set. - -Usage is following: - -```bash -# ${MODEL_NAME} - type of network architecture, -# either "inception_v3" or "inception_resnet_v2" -# ${CHECKPOINT_PATH} - path to model checkpoint -# ${DATASET_DIR} - directory with ImageNet test set -# ${ADV_METHOD} - which method to use to generate adversarial images, -# supported method: -# "none" - use clean images from the dataset -# "stepll" - one step towards least likely class method (StepLL), -# see https://arxiv.org/abs/1611.01236 for details -# "stepllnoise" - RAND+StepLL method from https://arxiv.org/abs/1705.07204 -# ${ADV_EPS} - size of adversarial perturbation, ignored when method is none -python eval_on_adversarial.py \ - --model_name=${MODEL_NAME} \ - --checkpoint_path=${CHECKPOINT_PATH} \ - --dataset_dir=${DATASET_DIR} \ - --batch_size=50 \ - --adversarial_method=${ADV_METHOD} \ - --adversarial_eps=${ADV_EPS} -``` - -Below is an example how to evaluate one of the models on RAND+StepLL adversarial -examples: - -```bash -# Download checkpoint -CHECKPOINT_DIR=/tmp/checkpoints -mkdir ${CHECKPOINT_DIR} -wget http://download.tensorflow.org/models/ens_adv_inception_resnet_v2_2017_08_18.tar.gz -tar -xvf ens_adv_inception_resnet_v2_2017_08_18.tar.gz -mv ens_adv_inception_resnet_v2.ckpt* ${CHECKPOINT_DIR} -rm ens_adv_inception_resnet_v2_2017_08_18.tar.gz - -# Run evaluation -python eval_on_adversarial.py \ - --model_name=inception_v3 \ - --checkpoint_path=${CHECKPOINT_DIR}/ens_adv_inception_resnet_v2.ckpt \ - --dataset_dir=${DATASET_DIR} \ - --batch_size=50 \ - --adversarial_method=stepllnoise \ - --adversarial_eps=16 -``` diff --git a/research/adv_imagenet_models/eval_on_adversarial.py b/research/adv_imagenet_models/eval_on_adversarial.py deleted file mode 100644 index f9188845c..000000000 --- a/research/adv_imagenet_models/eval_on_adversarial.py +++ /dev/null @@ -1,331 +0,0 @@ -# Copyright 2017 The TensorFlow Authors All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -"""Script which evaluates model on adversarial examples.""" - -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import math -import imagenet -import inception_resnet_v2 - -import tensorflow as tf -from tensorflow.contrib.slim.nets import inception - - -slim = tf.contrib.slim - -tf.app.flags.DEFINE_integer( - 'batch_size', 50, 'The number of samples in each batch.') - -tf.app.flags.DEFINE_integer( - 'max_num_batches', None, - 'Max number of batches to evaluate by default use all.') - -tf.app.flags.DEFINE_string( - 'master', '', 'The address of the TensorFlow master to use.') - -tf.app.flags.DEFINE_string( - 'checkpoint_path', '/tmp/tfmodel/', - 'The directory where the model was written to or an absolute path to a ' - 'checkpoint file.') - -tf.app.flags.DEFINE_integer( - 'num_preprocessing_threads', 4, - 'The number of threads used to create the batches.') - -tf.app.flags.DEFINE_string( - 'split_name', 'validation', 'The name of the train/test split.') - -tf.app.flags.DEFINE_string( - 'dataset_dir', None, 'The directory where the dataset files are stored.') - -tf.app.flags.DEFINE_string( - 'model_name', 'inception_v3', - 'Name of the model to use, either "inception_v3" or "inception_resnet_v2"') - -tf.app.flags.DEFINE_float( - 'moving_average_decay', None, - 'The decay to use for the moving average.' - 'If left as None, then moving averages are not used.') - -tf.app.flags.DEFINE_string( - 'adversarial_method', 'none', - 'What kind of adversarial examples to use for evaluation. ' - 'Could be one of: "none", "stepll", "stepllnoise".') - -tf.app.flags.DEFINE_float( - 'adversarial_eps', 0.0, - 'Size of adversarial perturbation in range [0, 255].') - - -FLAGS = tf.app.flags.FLAGS - - -IMAGE_SIZE = 299 -NUM_CLASSES = 1001 - - -def preprocess_for_eval(image, height, width, - central_fraction=0.875, scope=None): - """Prepare one image for evaluation. - - If height and width are specified it would output an image with that size by - applying resize_bilinear. - If central_fraction is specified it would crop the central fraction of the - input image. - - Args: - image: 3-D Tensor of image. If dtype is tf.float32 then the range should be - [0, 1], otherwise it would converted to tf.float32 assuming that the range - is [0, MAX], where MAX is largest positive representable number for - int(8/16/32) data type (see `tf.image.convert_image_dtype` for details) - height: integer - width: integer - central_fraction: Optional Float, fraction of the image to crop. - scope: Optional scope for name_scope. - Returns: - 3-D float Tensor of prepared image. - """ - with tf.name_scope(scope, 'eval_image', [image, height, width]): - if image.dtype != tf.float32: - image = tf.image.convert_image_dtype(image, dtype=tf.float32) - # Crop the central region of the image with an area containing 87.5% of - # the original image. - if central_fraction: - image = tf.image.central_crop(image, central_fraction=central_fraction) - - if height and width: - # Resize the image to the specified height and width. - image = tf.expand_dims(image, 0) - image = tf.image.resize_bilinear(image, [height, width], - align_corners=False) - image = tf.squeeze(image, [0]) - image = tf.subtract(image, 0.5) - image = tf.multiply(image, 2.0) - return image - - -def create_model(x, reuse=None): - """Create model graph. - - Args: - x: input images - reuse: reuse parameter which will be passed to underlying variable scopes. - Should be None first call and True every subsequent call. - - Returns: - (logits, end_points) - tuple of model logits and enpoints - - Raises: - ValueError: if model type specified by --model_name flag is invalid. - """ - if FLAGS.model_name == 'inception_v3': - with slim.arg_scope(inception.inception_v3_arg_scope()): - return inception.inception_v3( - x, num_classes=NUM_CLASSES, is_training=False, reuse=reuse) - elif FLAGS.model_name == 'inception_resnet_v2': - with slim.arg_scope(inception_resnet_v2.inception_resnet_v2_arg_scope()): - return inception_resnet_v2.inception_resnet_v2( - x, num_classes=NUM_CLASSES, is_training=False, reuse=reuse) - else: - raise ValueError('Invalid model name: %s' % (FLAGS.model_name)) - - -def step_target_class_adversarial_images(x, eps, one_hot_target_class): - """Base code for one step towards target class methods. - - Args: - x: source images - eps: size of adversarial perturbation - one_hot_target_class: one hot encoded target classes for all images - - Returns: - tensor with adversarial images - """ - logits, end_points = create_model(x, reuse=True) - cross_entropy = tf.losses.softmax_cross_entropy(one_hot_target_class, - logits, - label_smoothing=0.1, - weights=1.0) - cross_entropy += tf.losses.softmax_cross_entropy(one_hot_target_class, - end_points['AuxLogits'], - label_smoothing=0.1, - weights=0.4) - x_adv = x - eps * tf.sign(tf.gradients(cross_entropy, x)[0]) - x_adv = tf.clip_by_value(x_adv, -1.0, 1.0) - return tf.stop_gradient(x_adv) - - -def stepll_adversarial_images(x, eps): - """One step towards least likely class (Step L.L.) adversarial examples. - - This method is an alternative to FGSM which does not use true classes. - Method is described in the "Adversarial Machine Learning at Scale" paper, - https://arxiv.org/abs/1611.01236 - - Args: - x: source images - eps: size of adversarial perturbation - - Returns: - adversarial images - """ - logits, _ = create_model(x, reuse=True) - least_likely_class = tf.argmin(logits, 1) - one_hot_ll_class = tf.one_hot(least_likely_class, NUM_CLASSES) - return step_target_class_adversarial_images(x, eps, one_hot_ll_class) - - -def stepllnoise_adversarial_images(x, eps): - """Step L.L. with noise method. - - This is an imporvement of Step L.L. method. This method is better against - adversarially trained models which learn to mask gradient. - Method is described in the section "New randomized one shot attack" of - "Ensemble Adversarial Training: Attacks and Defenses" paper, - https://arxiv.org/abs/1705.07204 - - Args: - x: source images - eps: size of adversarial perturbation - - Returns: - adversarial images - """ - logits, _ = create_model(x, reuse=True) - least_likely_class = tf.argmin(logits, 1) - one_hot_ll_class = tf.one_hot(least_likely_class, NUM_CLASSES) - x_noise = x + eps / 2 * tf.sign(tf.random_normal(x.shape)) - return step_target_class_adversarial_images(x_noise, eps / 2, - one_hot_ll_class) - - -def get_input_images(dataset_images): - """Gets input images for the evaluation. - - Args: - dataset_images: tensor with dataset images - - Returns: - tensor with input images, which is either dataset images or adversarial - images. - - Raises: - ValueError: if adversarial method specified by --adversarial_method flag - is invalid. - """ - # adversarial_eps defines max difference of values of pixels if - # pixels are in range [0, 255]. However values of dataset pixels are - # in range [-1, 1], so converting epsilon. - eps = FLAGS.adversarial_eps / 255 * 2.0 - - if FLAGS.adversarial_method == 'stepll': - return stepll_adversarial_images(dataset_images, eps) - elif FLAGS.adversarial_method == 'stepllnoise': - return stepllnoise_adversarial_images(dataset_images, eps) - elif FLAGS.adversarial_method == 'none': - return dataset_images - else: - raise ValueError('Invalid adversarial method: %s' - % (FLAGS.adversarial_method)) - - -def main(_): - if not FLAGS.dataset_dir: - raise ValueError('You must supply the dataset directory with --dataset_dir') - - tf.logging.set_verbosity(tf.logging.INFO) - with tf.Graph().as_default(): - tf_global_step = tf.train.get_or_create_global_step() - - ################### - # Prepare dataset # - ################### - dataset = imagenet.get_split(FLAGS.split_name, FLAGS.dataset_dir) - provider = slim.dataset_data_provider.DatasetDataProvider( - dataset, - shuffle=False, - common_queue_capacity=2 * FLAGS.batch_size, - common_queue_min=FLAGS.batch_size) - [dataset_image, label] = provider.get(['image', 'label']) - dataset_image = preprocess_for_eval(dataset_image, IMAGE_SIZE, IMAGE_SIZE) - dataset_images, labels = tf.train.batch( - [dataset_image, label], - batch_size=FLAGS.batch_size, - num_threads=FLAGS.num_preprocessing_threads, - capacity=5 * FLAGS.batch_size) - - ######################################## - # Define the model and input exampeles # - ######################################## - create_model(tf.placeholder(tf.float32, shape=dataset_images.shape)) - input_images = get_input_images(dataset_images) - logits, _ = create_model(input_images, reuse=True) - - if FLAGS.moving_average_decay > 0: - variable_averages = tf.train.ExponentialMovingAverage( - FLAGS.moving_average_decay, tf_global_step) - variables_to_restore = variable_averages.variables_to_restore( - slim.get_model_variables()) - variables_to_restore[tf_global_step.op.name] = tf_global_step - else: - variables_to_restore = slim.get_variables_to_restore() - - ###################### - # Define the metrics # - ###################### - predictions = tf.argmax(logits, 1) - labels = tf.squeeze(labels) - names_to_values, names_to_updates = slim.metrics.aggregate_metric_map({ - 'Accuracy': slim.metrics.streaming_accuracy(predictions, labels), - 'Recall_5': slim.metrics.streaming_sparse_recall_at_k( - logits, tf.reshape(labels, [-1, 1]), 5), - }) - - ###################### - # Run evaluation # - ###################### - if FLAGS.max_num_batches: - num_batches = FLAGS.max_num_batches - else: - # This ensures that we make a single pass over all of the data. - num_batches = math.ceil(dataset.num_samples / float(FLAGS.batch_size)) - - if tf.gfile.IsDirectory(FLAGS.checkpoint_path): - checkpoint_path = tf.train.latest_checkpoint(FLAGS.checkpoint_path) - else: - checkpoint_path = FLAGS.checkpoint_path - - tf.logging.info('Evaluating %s' % checkpoint_path) - - top1_accuracy, top5_accuracy = slim.evaluation.evaluate_once( - master=FLAGS.master, - checkpoint_path=checkpoint_path, - logdir=None, - summary_op=None, - num_evals=num_batches, - eval_op=list(names_to_updates.values()), - final_op=[names_to_values['Accuracy'], names_to_values['Recall_5']], - variables_to_restore=variables_to_restore) - - print('Top1 Accuracy: ', top1_accuracy) - print('Top5 Accuracy: ', top5_accuracy) - - -if __name__ == '__main__': - tf.app.run() diff --git a/research/adv_imagenet_models/imagenet.py b/research/adv_imagenet_models/imagenet.py deleted file mode 100644 index 26c4c7a38..000000000 --- a/research/adv_imagenet_models/imagenet.py +++ /dev/null @@ -1,118 +0,0 @@ -# Copyright 2017 The TensorFlow Authors All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -"""Provides data for the ImageNet ILSVRC 2012 Dataset plus some bounding boxes. - -Some images have one or more bounding boxes associated with the label of the -image. See details here: http://image-net.org/download-bboxes - -WARNING: Don't use for object detection, in this case all the bounding boxes -of the image belong to just one class. -""" -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import os -import tensorflow as tf - -slim = tf.contrib.slim - -_FILE_PATTERN = '%s-*' - -_SPLITS_TO_SIZES = { - 'train': 1281167, - 'validation': 50000, -} - -_ITEMS_TO_DESCRIPTIONS = { - 'image': 'A color image of varying height and width.', - 'label': 'The label id of the image, integer between 0 and 999', - 'label_text': 'The text of the label.', - 'object/bbox': 'A list of bounding boxes.', - 'object/label': 'A list of labels, one per each object.', -} - -_NUM_CLASSES = 1001 - - -def get_split(split_name, dataset_dir, file_pattern=None, reader=None): - """Gets a dataset tuple with instructions for reading ImageNet. - - Args: - split_name: A train/test split name. - dataset_dir: The base directory of the dataset sources. - file_pattern: The file pattern to use when matching the dataset sources. - It is assumed that the pattern contains a '%s' string so that the split - name can be inserted. - reader: The TensorFlow reader type. - - Returns: - A `Dataset` namedtuple. - - Raises: - ValueError: if `split_name` is not a valid train/test split. - """ - if split_name not in _SPLITS_TO_SIZES: - raise ValueError('split name %s was not recognized.' % split_name) - - if not file_pattern: - file_pattern = _FILE_PATTERN - file_pattern = os.path.join(dataset_dir, file_pattern % split_name) - - # Allowing None in the signature so that dataset_factory can use the default. - if reader is None: - reader = tf.TFRecordReader - - keys_to_features = { - 'image/encoded': tf.FixedLenFeature( - (), tf.string, default_value=''), - 'image/format': tf.FixedLenFeature( - (), tf.string, default_value='jpeg'), - 'image/class/label': tf.FixedLenFeature( - [], dtype=tf.int64, default_value=-1), - 'image/class/text': tf.FixedLenFeature( - [], dtype=tf.string, default_value=''), - 'image/object/bbox/xmin': tf.VarLenFeature( - dtype=tf.float32), - 'image/object/bbox/ymin': tf.VarLenFeature( - dtype=tf.float32), - 'image/object/bbox/xmax': tf.VarLenFeature( - dtype=tf.float32), - 'image/object/bbox/ymax': tf.VarLenFeature( - dtype=tf.float32), - 'image/object/class/label': tf.VarLenFeature( - dtype=tf.int64), - } - - items_to_handlers = { - 'image': slim.tfexample_decoder.Image('image/encoded', 'image/format'), - 'label': slim.tfexample_decoder.Tensor('image/class/label'), - 'label_text': slim.tfexample_decoder.Tensor('image/class/text'), - 'object/bbox': slim.tfexample_decoder.BoundingBox( - ['ymin', 'xmin', 'ymax', 'xmax'], 'image/object/bbox/'), - 'object/label': slim.tfexample_decoder.Tensor('image/object/class/label'), - } - - decoder = slim.tfexample_decoder.TFExampleDecoder( - keys_to_features, items_to_handlers) - - return slim.dataset.Dataset( - data_sources=file_pattern, - reader=reader, - decoder=decoder, - num_samples=_SPLITS_TO_SIZES[split_name], - items_to_descriptions=_ITEMS_TO_DESCRIPTIONS, - num_classes=_NUM_CLASSES) diff --git a/research/adv_imagenet_models/inception_resnet_v2.py b/research/adv_imagenet_models/inception_resnet_v2.py deleted file mode 100644 index 2f690e8d2..000000000 --- a/research/adv_imagenet_models/inception_resnet_v2.py +++ /dev/null @@ -1,358 +0,0 @@ -# Copyright 2017 The TensorFlow Authors All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -"""Contains the definition of the Inception Resnet V2 architecture. - -As described in http://arxiv.org/abs/1602.07261. - - Inception-v4, Inception-ResNet and the Impact of Residual Connections - on Learning - Christian Szegedy, Sergey Ioffe, Vincent Vanhoucke, Alex Alemi -""" -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - - -import tensorflow as tf - -slim = tf.contrib.slim - - -def block35(net, scale=1.0, activation_fn=tf.nn.relu, scope=None, reuse=None): - """Builds the 35x35 resnet block.""" - with tf.variable_scope(scope, 'Block35', [net], reuse=reuse): - with tf.variable_scope('Branch_0'): - tower_conv = slim.conv2d(net, 32, 1, scope='Conv2d_1x1') - with tf.variable_scope('Branch_1'): - tower_conv1_0 = slim.conv2d(net, 32, 1, scope='Conv2d_0a_1x1') - tower_conv1_1 = slim.conv2d(tower_conv1_0, 32, 3, scope='Conv2d_0b_3x3') - with tf.variable_scope('Branch_2'): - tower_conv2_0 = slim.conv2d(net, 32, 1, scope='Conv2d_0a_1x1') - tower_conv2_1 = slim.conv2d(tower_conv2_0, 48, 3, scope='Conv2d_0b_3x3') - tower_conv2_2 = slim.conv2d(tower_conv2_1, 64, 3, scope='Conv2d_0c_3x3') - mixed = tf.concat(axis=3, values=[tower_conv, tower_conv1_1, tower_conv2_2]) - up = slim.conv2d(mixed, net.get_shape()[3], 1, normalizer_fn=None, - activation_fn=None, scope='Conv2d_1x1') - net += scale * up - if activation_fn: - net = activation_fn(net) - return net - - -def block17(net, scale=1.0, activation_fn=tf.nn.relu, scope=None, reuse=None): - """Builds the 17x17 resnet block.""" - with tf.variable_scope(scope, 'Block17', [net], reuse=reuse): - with tf.variable_scope('Branch_0'): - tower_conv = slim.conv2d(net, 192, 1, scope='Conv2d_1x1') - with tf.variable_scope('Branch_1'): - tower_conv1_0 = slim.conv2d(net, 128, 1, scope='Conv2d_0a_1x1') - tower_conv1_1 = slim.conv2d(tower_conv1_0, 160, [1, 7], - scope='Conv2d_0b_1x7') - tower_conv1_2 = slim.conv2d(tower_conv1_1, 192, [7, 1], - scope='Conv2d_0c_7x1') - mixed = tf.concat(axis=3, values=[tower_conv, tower_conv1_2]) - up = slim.conv2d(mixed, net.get_shape()[3], 1, normalizer_fn=None, - activation_fn=None, scope='Conv2d_1x1') - net += scale * up - if activation_fn: - net = activation_fn(net) - return net - - -def block8(net, scale=1.0, activation_fn=tf.nn.relu, scope=None, reuse=None): - """Builds the 8x8 resnet block.""" - with tf.variable_scope(scope, 'Block8', [net], reuse=reuse): - with tf.variable_scope('Branch_0'): - tower_conv = slim.conv2d(net, 192, 1, scope='Conv2d_1x1') - with tf.variable_scope('Branch_1'): - tower_conv1_0 = slim.conv2d(net, 192, 1, scope='Conv2d_0a_1x1') - tower_conv1_1 = slim.conv2d(tower_conv1_0, 224, [1, 3], - scope='Conv2d_0b_1x3') - tower_conv1_2 = slim.conv2d(tower_conv1_1, 256, [3, 1], - scope='Conv2d_0c_3x1') - mixed = tf.concat(axis=3, values=[tower_conv, tower_conv1_2]) - up = slim.conv2d(mixed, net.get_shape()[3], 1, normalizer_fn=None, - activation_fn=None, scope='Conv2d_1x1') - net += scale * up - if activation_fn: - net = activation_fn(net) - return net - - -def inception_resnet_v2_base(inputs, - final_endpoint='Conv2d_7b_1x1', - output_stride=16, - align_feature_maps=False, - scope=None): - """Inception model from http://arxiv.org/abs/1602.07261. - - Constructs an Inception Resnet v2 network from inputs to the given final - endpoint. This method can construct the network up to the final inception - block Conv2d_7b_1x1. - - Args: - inputs: a tensor of size [batch_size, height, width, channels]. - final_endpoint: specifies the endpoint to construct the network up to. It - can be one of ['Conv2d_1a_3x3', 'Conv2d_2a_3x3', 'Conv2d_2b_3x3', - 'MaxPool_3a_3x3', 'Conv2d_3b_1x1', 'Conv2d_4a_3x3', 'MaxPool_5a_3x3', - 'Mixed_5b', 'Mixed_6a', 'PreAuxLogits', 'Mixed_7a', 'Conv2d_7b_1x1'] - output_stride: A scalar that specifies the requested ratio of input to - output spatial resolution. Only supports 8 and 16. - align_feature_maps: When true, changes all the VALID paddings in the network - to SAME padding so that the feature maps are aligned. - scope: Optional variable_scope. - - Returns: - tensor_out: output tensor corresponding to the final_endpoint. - end_points: a set of activations for external use, for example summaries or - losses. - - Raises: - ValueError: if final_endpoint is not set to one of the predefined values, - or if the output_stride is not 8 or 16, or if the output_stride is 8 and - we request an end point after 'PreAuxLogits'. - """ - if output_stride != 8 and output_stride != 16: - raise ValueError('output_stride must be 8 or 16.') - - padding = 'SAME' if align_feature_maps else 'VALID' - - end_points = {} - - def add_and_check_final(name, net): - end_points[name] = net - return name == final_endpoint - - with tf.variable_scope(scope, 'InceptionResnetV2', [inputs]): - with slim.arg_scope([slim.conv2d, slim.max_pool2d, slim.avg_pool2d], - stride=1, padding='SAME'): - # 149 x 149 x 32 - net = slim.conv2d(inputs, 32, 3, stride=2, padding=padding, - scope='Conv2d_1a_3x3') - if add_and_check_final('Conv2d_1a_3x3', net): return net, end_points - - # 147 x 147 x 32 - net = slim.conv2d(net, 32, 3, padding=padding, - scope='Conv2d_2a_3x3') - if add_and_check_final('Conv2d_2a_3x3', net): return net, end_points - # 147 x 147 x 64 - net = slim.conv2d(net, 64, 3, scope='Conv2d_2b_3x3') - if add_and_check_final('Conv2d_2b_3x3', net): return net, end_points - # 73 x 73 x 64 - net = slim.max_pool2d(net, 3, stride=2, padding=padding, - scope='MaxPool_3a_3x3') - if add_and_check_final('MaxPool_3a_3x3', net): return net, end_points - # 73 x 73 x 80 - net = slim.conv2d(net, 80, 1, padding=padding, - scope='Conv2d_3b_1x1') - if add_and_check_final('Conv2d_3b_1x1', net): return net, end_points - # 71 x 71 x 192 - net = slim.conv2d(net, 192, 3, padding=padding, - scope='Conv2d_4a_3x3') - if add_and_check_final('Conv2d_4a_3x3', net): return net, end_points - # 35 x 35 x 192 - net = slim.max_pool2d(net, 3, stride=2, padding=padding, - scope='MaxPool_5a_3x3') - if add_and_check_final('MaxPool_5a_3x3', net): return net, end_points - - # 35 x 35 x 320 - with tf.variable_scope('Mixed_5b'): - with tf.variable_scope('Branch_0'): - tower_conv = slim.conv2d(net, 96, 1, scope='Conv2d_1x1') - with tf.variable_scope('Branch_1'): - tower_conv1_0 = slim.conv2d(net, 48, 1, scope='Conv2d_0a_1x1') - tower_conv1_1 = slim.conv2d(tower_conv1_0, 64, 5, - scope='Conv2d_0b_5x5') - with tf.variable_scope('Branch_2'): - tower_conv2_0 = slim.conv2d(net, 64, 1, scope='Conv2d_0a_1x1') - tower_conv2_1 = slim.conv2d(tower_conv2_0, 96, 3, - scope='Conv2d_0b_3x3') - tower_conv2_2 = slim.conv2d(tower_conv2_1, 96, 3, - scope='Conv2d_0c_3x3') - with tf.variable_scope('Branch_3'): - tower_pool = slim.avg_pool2d(net, 3, stride=1, padding='SAME', - scope='AvgPool_0a_3x3') - tower_pool_1 = slim.conv2d(tower_pool, 64, 1, - scope='Conv2d_0b_1x1') - net = tf.concat( - [tower_conv, tower_conv1_1, tower_conv2_2, tower_pool_1], 3) - - if add_and_check_final('Mixed_5b', net): return net, end_points - # TODO(alemi): Register intermediate endpoints - net = slim.repeat(net, 10, block35, scale=0.17) - - # 17 x 17 x 1088 if output_stride == 8, - # 33 x 33 x 1088 if output_stride == 16 - use_atrous = output_stride == 8 - - with tf.variable_scope('Mixed_6a'): - with tf.variable_scope('Branch_0'): - tower_conv = slim.conv2d(net, 384, 3, stride=1 if use_atrous else 2, - padding=padding, - scope='Conv2d_1a_3x3') - with tf.variable_scope('Branch_1'): - tower_conv1_0 = slim.conv2d(net, 256, 1, scope='Conv2d_0a_1x1') - tower_conv1_1 = slim.conv2d(tower_conv1_0, 256, 3, - scope='Conv2d_0b_3x3') - tower_conv1_2 = slim.conv2d(tower_conv1_1, 384, 3, - stride=1 if use_atrous else 2, - padding=padding, - scope='Conv2d_1a_3x3') - with tf.variable_scope('Branch_2'): - tower_pool = slim.max_pool2d(net, 3, stride=1 if use_atrous else 2, - padding=padding, - scope='MaxPool_1a_3x3') - net = tf.concat([tower_conv, tower_conv1_2, tower_pool], 3) - - if add_and_check_final('Mixed_6a', net): return net, end_points - - # TODO(alemi): register intermediate endpoints - with slim.arg_scope([slim.conv2d], rate=2 if use_atrous else 1): - net = slim.repeat(net, 20, block17, scale=0.10) - if add_and_check_final('PreAuxLogits', net): return net, end_points - - if output_stride == 8: - # TODO(gpapan): Properly support output_stride for the rest of the net. - raise ValueError('output_stride==8 is only supported up to the ' - 'PreAuxlogits end_point for now.') - - # 8 x 8 x 2080 - with tf.variable_scope('Mixed_7a'): - with tf.variable_scope('Branch_0'): - tower_conv = slim.conv2d(net, 256, 1, scope='Conv2d_0a_1x1') - tower_conv_1 = slim.conv2d(tower_conv, 384, 3, stride=2, - padding=padding, - scope='Conv2d_1a_3x3') - with tf.variable_scope('Branch_1'): - tower_conv1 = slim.conv2d(net, 256, 1, scope='Conv2d_0a_1x1') - tower_conv1_1 = slim.conv2d(tower_conv1, 288, 3, stride=2, - padding=padding, - scope='Conv2d_1a_3x3') - with tf.variable_scope('Branch_2'): - tower_conv2 = slim.conv2d(net, 256, 1, scope='Conv2d_0a_1x1') - tower_conv2_1 = slim.conv2d(tower_conv2, 288, 3, - scope='Conv2d_0b_3x3') - tower_conv2_2 = slim.conv2d(tower_conv2_1, 320, 3, stride=2, - padding=padding, - scope='Conv2d_1a_3x3') - with tf.variable_scope('Branch_3'): - tower_pool = slim.max_pool2d(net, 3, stride=2, - padding=padding, - scope='MaxPool_1a_3x3') - net = tf.concat( - [tower_conv_1, tower_conv1_1, tower_conv2_2, tower_pool], 3) - - if add_and_check_final('Mixed_7a', net): return net, end_points - - # TODO(alemi): register intermediate endpoints - net = slim.repeat(net, 9, block8, scale=0.20) - net = block8(net, activation_fn=None) - - # 8 x 8 x 1536 - net = slim.conv2d(net, 1536, 1, scope='Conv2d_7b_1x1') - if add_and_check_final('Conv2d_7b_1x1', net): return net, end_points - - raise ValueError('final_endpoint (%s) not recognized', final_endpoint) - - -def inception_resnet_v2(inputs, num_classes=1001, is_training=True, - dropout_keep_prob=0.8, - reuse=None, - scope='InceptionResnetV2', - create_aux_logits=True): - """Creates the Inception Resnet V2 model. - - Args: - inputs: a 4-D tensor of size [batch_size, height, width, 3]. - num_classes: number of predicted classes. - is_training: whether is training or not. - dropout_keep_prob: float, the fraction to keep before final layer. - reuse: whether or not the network and its variables should be reused. To be - able to reuse 'scope' must be given. - scope: Optional variable_scope. - create_aux_logits: Whether to include the auxilliary logits. - - Returns: - logits: the logits outputs of the model. - end_points: the set of end_points from the inception model. - """ - end_points = {} - - with tf.variable_scope(scope, 'InceptionResnetV2', [inputs, num_classes], - reuse=reuse) as scope: - with slim.arg_scope([slim.batch_norm, slim.dropout], - is_training=is_training): - - net, end_points = inception_resnet_v2_base(inputs, scope=scope) - - if create_aux_logits: - with tf.variable_scope('AuxLogits'): - aux = end_points['PreAuxLogits'] - aux = slim.avg_pool2d(aux, 5, stride=3, padding='VALID', - scope='Conv2d_1a_3x3') - aux = slim.conv2d(aux, 128, 1, scope='Conv2d_1b_1x1') - aux = slim.conv2d(aux, 768, aux.get_shape()[1:3], - padding='VALID', scope='Conv2d_2a_5x5') - aux = slim.flatten(aux) - aux = slim.fully_connected(aux, num_classes, activation_fn=None, - scope='Logits') - end_points['AuxLogits'] = aux - - with tf.variable_scope('Logits'): - net = slim.avg_pool2d(net, net.get_shape()[1:3], padding='VALID', - scope='AvgPool_1a_8x8') - net = slim.flatten(net) - - net = slim.dropout(net, dropout_keep_prob, is_training=is_training, - scope='Dropout') - - end_points['PreLogitsFlatten'] = net - logits = slim.fully_connected(net, num_classes, activation_fn=None, - scope='Logits') - end_points['Logits'] = logits - end_points['Predictions'] = tf.nn.softmax(logits, name='Predictions') - - return logits, end_points -inception_resnet_v2.default_image_size = 299 - - -def inception_resnet_v2_arg_scope(weight_decay=0.00004, - batch_norm_decay=0.9997, - batch_norm_epsilon=0.001): - """Returns the scope with the default parameters for inception_resnet_v2. - - Args: - weight_decay: the weight decay for weights variables. - batch_norm_decay: decay for the moving average of batch_norm momentums. - batch_norm_epsilon: small float added to variance to avoid dividing by zero. - - Returns: - a arg_scope with the parameters needed for inception_resnet_v2. - """ - # Set weight_decay for weights in conv2d and fully_connected layers. - with slim.arg_scope([slim.conv2d, slim.fully_connected], - weights_regularizer=slim.l2_regularizer(weight_decay), - biases_regularizer=slim.l2_regularizer(weight_decay)): - - batch_norm_params = { - 'decay': batch_norm_decay, - 'epsilon': batch_norm_epsilon, - } - # Set activation_fn and parameters for batch_norm. - with slim.arg_scope([slim.conv2d], activation_fn=tf.nn.relu, - normalizer_fn=slim.batch_norm, - normalizer_params=batch_norm_params) as scope: - return scope diff --git a/research/adversarial_crypto/README.md b/research/adversarial_crypto/README.md deleted file mode 100644 index 3822def13..000000000 --- a/research/adversarial_crypto/README.md +++ /dev/null @@ -1,62 +0,0 @@ -![No Maintenance Intended](https://img.shields.io/badge/No%20Maintenance%20Intended-%E2%9C%95-red.svg) -![TensorFlow Requirement: 1.x](https://img.shields.io/badge/TensorFlow%20Requirement-1.x-brightgreen) -![TensorFlow 2 Not Supported](https://img.shields.io/badge/TensorFlow%202%20Not%20Supported-%E2%9C%95-red.svg) - -# Learning to Protect Communications with Adversarial Neural Cryptography - -This is a slightly-updated model used for the paper -["Learning to Protect Communications with Adversarial Neural -Cryptography"](https://arxiv.org/abs/1610.06918). - -> We ask whether neural networks can learn to use secret keys to protect -> information from other neural networks. Specifically, we focus on ensuring -> confidentiality properties in a multiagent system, and we specify those -> properties in terms of an adversary. Thus, a system may consist of neural -> networks named Alice and Bob, and we aim to limit what a third neural -> network named Eve learns from eavesdropping on the communication between -> Alice and Bob. We do not prescribe specific cryptographic algorithms to -> these neural networks; instead, we train end-to-end, adversarially. -> We demonstrate that the neural networks can learn how to perform forms of -> encryption and decryption, and also how to apply these operations -> selectively in order to meet confidentiality goals. - -This code allows you to train encoder/decoder/adversary network triplets -and evaluate their effectiveness on randomly generated input and key -pairs. - -## Prerequisites - -The only software requirements for running the encoder and decoder is having -TensorFlow installed. - -Requires TensorFlow r0.12 or later. - -## Training and evaluating - -After installing TensorFlow and ensuring that your paths are configured -appropriately: - -``` -python train_eval.py -``` - -This will begin training a fresh model. If and when the model becomes -sufficiently well-trained, it will reset the Eve model multiple times -and retrain it from scratch, outputting the accuracy thus obtained -in each run. - -## Model differences from the paper - -The model has been simplified slightly from the one described in -the paper - the convolutional layer width was reduced by a factor -of two. In the version in the paper, there was a nonlinear unit -after the fully-connected layer; that nonlinear has been removed -here. These changes improve the robustness of training. The -initializer for the convolution layers has switched to the -`tf.contrib.layers default` of `xavier_initializer` instead of -a simpler `truncated_normal`. - -## Contact information - -This model repository is maintained by David G. Andersen -([dave-andersen](https://github.com/dave-andersen)). diff --git a/research/adversarial_crypto/train_eval.py b/research/adversarial_crypto/train_eval.py deleted file mode 100644 index df7a00ad5..000000000 --- a/research/adversarial_crypto/train_eval.py +++ /dev/null @@ -1,276 +0,0 @@ -# Copyright 2016 The TensorFlow Authors All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -"""Adversarial training to learn trivial encryption functions, -from the paper "Learning to Protect Communications with -Adversarial Neural Cryptography", Abadi & Andersen, 2016. - -https://arxiv.org/abs/1610.06918 - -This program creates and trains three neural networks, -termed Alice, Bob, and Eve. Alice takes inputs -in_m (message), in_k (key) and outputs 'ciphertext'. - -Bob takes inputs in_k, ciphertext and tries to reconstruct -the message. - -Eve is an adversarial network that takes input ciphertext -and also tries to reconstruct the message. - -The main function attempts to train these networks and then -evaluates them, all on random plaintext and key values. - -""" - -# TensorFlow Python 3 compatibility -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function -import signal -import sys -from six.moves import xrange # pylint: disable=redefined-builtin -import tensorflow as tf - -flags = tf.app.flags - -flags.DEFINE_float('learning_rate', 0.0008, 'Constant learning rate') -flags.DEFINE_integer('batch_size', 4096, 'Batch size') - -FLAGS = flags.FLAGS - -# Input and output configuration. -TEXT_SIZE = 16 -KEY_SIZE = 16 - -# Training parameters. -ITERS_PER_ACTOR = 1 -EVE_MULTIPLIER = 2 # Train Eve 2x for every step of Alice/Bob -# Train until either max loops or Alice/Bob "good enough": -MAX_TRAINING_LOOPS = 850000 -BOB_LOSS_THRESH = 0.02 # Exit when Bob loss < 0.02 and Eve > 7.7 bits -EVE_LOSS_THRESH = 7.7 - -# Logging and evaluation. -PRINT_EVERY = 200 # In training, log every 200 steps. -EVE_EXTRA_ROUNDS = 2000 # At end, train eve a bit more. -RETRAIN_EVE_ITERS = 10000 # Retrain eve up to ITERS*LOOPS times. -RETRAIN_EVE_LOOPS = 25 # With an evaluation each loop -NUMBER_OF_EVE_RESETS = 5 # And do this up to 5 times with a fresh eve. -# Use EVAL_BATCHES samples each time we check accuracy. -EVAL_BATCHES = 1 - - -def batch_of_random_bools(batch_size, n): - """Return a batch of random "boolean" numbers. - - Args: - batch_size: Batch size dimension of returned tensor. - n: number of entries per batch. - - Returns: - A [batch_size, n] tensor of "boolean" numbers, where each number is - preresented as -1 or 1. - """ - - as_int = tf.random.uniform( - [batch_size, n], minval=0, maxval=2, dtype=tf.int32) - expanded_range = (as_int * 2) - 1 - return tf.cast(expanded_range, tf.float32) - - -class AdversarialCrypto(object): - """Primary model implementation class for Adversarial Neural Crypto. - - This class contains the code for the model itself, - and when created, plumbs the pathways from Alice to Bob and - Eve, creates the optimizers and loss functions, etc. - - Attributes: - eve_loss: Eve's loss function. - bob_loss: Bob's loss function. Different units from eve_loss. - eve_optimizer: A tf op that runs Eve's optimizer. - bob_optimizer: A tf op that runs Bob's optimizer. - bob_reconstruction_loss: Bob's message reconstruction loss, - which is comparable to eve_loss. - reset_eve_vars: Execute this op to completely reset Eve. - """ - - def get_message_and_key(self): - """Generate random pseudo-boolean key and message values.""" - - batch_size = tf.compat.v1.placeholder_with_default(FLAGS.batch_size, shape=[]) - - in_m = batch_of_random_bools(batch_size, TEXT_SIZE) - in_k = batch_of_random_bools(batch_size, KEY_SIZE) - return in_m, in_k - - def model(self, collection, message, key=None): - """The model for Alice, Bob, and Eve. If key=None, the first fully connected layer - takes only the message as inputs. Otherwise, it uses both the key - and the message. - - Args: - collection: The graph keys collection to add new vars to. - message: The input message to process. - key: The input key (if any) to use. - """ - - if key is not None: - combined_message = tf.concat(axis=1, values=[message, key]) - else: - combined_message = message - - # Ensure that all variables created are in the specified collection. - with tf.contrib.framework.arg_scope( - [tf.contrib.layers.fully_connected, tf.contrib.layers.conv2d], - variables_collections=[collection]): - - fc = tf.contrib.layers.fully_connected( - combined_message, - TEXT_SIZE + KEY_SIZE, - biases_initializer=tf.constant_initializer(0.0), - activation_fn=None) - - # Perform a sequence of 1D convolutions (by expanding the message out to 2D - # and then squeezing it back down). - fc = tf.expand_dims(fc, 2) # 2D - fc = tf.expand_dims(fc, 3) # 3D -- conv2d needs a depth - # 2,1 -> 1,2 - conv = tf.contrib.layers.conv2d( - fc, 2, 2, 2, 'SAME', activation_fn=tf.nn.sigmoid) - # 1,2 -> 1, 2 - conv = tf.contrib.layers.conv2d( - conv, 2, 1, 1, 'SAME', activation_fn=tf.nn.sigmoid) - # 1,2 -> 1, 1 - conv = tf.contrib.layers.conv2d( - conv, 1, 1, 1, 'SAME', activation_fn=tf.nn.tanh) - conv = tf.squeeze(conv, 3) - conv = tf.squeeze(conv, 2) - return conv - - def __init__(self): - in_m, in_k = self.get_message_and_key() - encrypted = self.model('alice', in_m, in_k) - decrypted = self.model('bob', encrypted, in_k) - eve_out = self.model('eve', encrypted, None) - - self.reset_eve_vars = tf.group( - *[w.initializer for w in tf.compat.v1.get_collection('eve')]) - - optimizer = tf.compat.v1.train.AdamOptimizer(learning_rate=FLAGS.learning_rate) - - # Eve's goal is to decrypt the entire message: - eve_bits_wrong = tf.reduce_sum( - tf.abs((eve_out + 1.0) / 2.0 - (in_m + 1.0) / 2.0), [1]) - self.eve_loss = tf.reduce_sum(eve_bits_wrong) - self.eve_optimizer = optimizer.minimize( - self.eve_loss, var_list=tf.compat.v1.get_collection('eve')) - - # Alice and Bob want to be accurate... - self.bob_bits_wrong = tf.reduce_sum( - tf.abs((decrypted + 1.0) / 2.0 - (in_m + 1.0) / 2.0), [1]) - # ... and to not let Eve do better than guessing. - self.bob_reconstruction_loss = tf.reduce_sum(self.bob_bits_wrong) - bob_eve_error_deviation = tf.abs(float(TEXT_SIZE) / 2.0 - eve_bits_wrong) - # 7-9 bits wrong is OK too, so we squish the error function a bit. - # Without doing this, we often tend to hang out at 0.25 / 7.5 error, - # and it seems bad to have continued, high communication error. - bob_eve_loss = tf.reduce_sum( - tf.square(bob_eve_error_deviation) / (TEXT_SIZE / 2)**2) - - # Rescale the losses to [0, 1] per example and combine. - self.bob_loss = (self.bob_reconstruction_loss / TEXT_SIZE + bob_eve_loss) - - self.bob_optimizer = optimizer.minimize( - self.bob_loss, - var_list=(tf.compat.v1.get_collection('alice') + tf.compat.v1.get_collection('bob'))) - - -def doeval(s, ac, n, itercount): - """Evaluate the current network on n batches of random examples. - - Args: - s: The current TensorFlow session - ac: an instance of the AdversarialCrypto class - n: The number of iterations to run. - itercount: Iteration count label for logging. - - Returns: - Bob and Eve's loss, as a percent of bits incorrect. - """ - - bob_loss_accum = 0 - eve_loss_accum = 0 - for _ in xrange(n): - bl, el = s.run([ac.bob_reconstruction_loss, ac.eve_loss]) - bob_loss_accum += bl - eve_loss_accum += el - bob_loss_percent = bob_loss_accum / (n * FLAGS.batch_size) - eve_loss_percent = eve_loss_accum / (n * FLAGS.batch_size) - print('%10d\t%20.2f\t%20.2f'%(itercount, bob_loss_percent, eve_loss_percent)) - sys.stdout.flush() - return bob_loss_percent, eve_loss_percent - - -def train_until_thresh(s, ac): - for j in xrange(MAX_TRAINING_LOOPS): - for _ in xrange(ITERS_PER_ACTOR): - s.run(ac.bob_optimizer) - for _ in xrange(ITERS_PER_ACTOR * EVE_MULTIPLIER): - s.run(ac.eve_optimizer) - if j % PRINT_EVERY == 0: - bob_avg_loss, eve_avg_loss = doeval(s, ac, EVAL_BATCHES, j) - if (bob_avg_loss < BOB_LOSS_THRESH and eve_avg_loss > EVE_LOSS_THRESH): - print('Target losses achieved.') - return True - return False - - -def train_and_evaluate(): - """Run the full training and evaluation loop.""" - - ac = AdversarialCrypto() - init = tf.compat.v1.global_variables_initializer() - - with tf.compat.v1.Session() as s: - s.run(init) - print('# Batch size: ', FLAGS.batch_size) - print('# %10s\t%20s\t%20s'%("Iter","Bob_Recon_Error","Eve_Recon_Error")) - - if train_until_thresh(s, ac): - for _ in xrange(EVE_EXTRA_ROUNDS): - s.run(ac.eve_optimizer) - print('Loss after eve extra training:') - doeval(s, ac, EVAL_BATCHES * 2, 0) - for _ in xrange(NUMBER_OF_EVE_RESETS): - print('Resetting Eve') - s.run(ac.reset_eve_vars) - eve_counter = 0 - for _ in xrange(RETRAIN_EVE_LOOPS): - for _ in xrange(RETRAIN_EVE_ITERS): - eve_counter += 1 - s.run(ac.eve_optimizer) - doeval(s, ac, EVAL_BATCHES, eve_counter) - doeval(s, ac, EVAL_BATCHES, eve_counter) - - -def main(unused_argv): - # Exit more quietly with Ctrl-C. - signal.signal(signal.SIGINT, signal.SIG_DFL) - train_and_evaluate() - - -if __name__ == '__main__': - tf.compat.v1.app.run() diff --git a/research/adversarial_logit_pairing/README.md b/research/adversarial_logit_pairing/README.md deleted file mode 100644 index d3f576836..000000000 --- a/research/adversarial_logit_pairing/README.md +++ /dev/null @@ -1,281 +0,0 @@ -![No Maintenance Intended](https://img.shields.io/badge/No%20Maintenance%20Intended-%E2%9C%95-red.svg) -![TensorFlow Requirement: 1.x](https://img.shields.io/badge/TensorFlow%20Requirement-1.x-brightgreen) -![TensorFlow 2 Not Supported](https://img.shields.io/badge/TensorFlow%202%20Not%20Supported-%E2%9C%95-red.svg) - -# Adversarial logit pairing - -This directory contains implementation of -[Adversarial logit pairing](https://arxiv.org/abs/1803.06373) paper as well as -few models pre-trained on ImageNet and Tiny ImageNet. - -Please contact [Alexey Kurakin](https://github.com/AlexeyKurakin) regarding -this code. - -## Pre-requesites - -Code dependencies: - -* TensorFlow 1.8 and Python 2.7 (other versions may work, but were not tested) -* [Abseil Python](https://github.com/abseil/abseil-py). -* Script which converts Tiny Imagenet dataset into TFRecord format also - depends on [Pandas](https://pandas.pydata.org/). - -## Datasets - -To use this code you need to download datasets. You only need to download -those datasets which you're going to use. Following list of datasets is -supported: - -* [ImageNet](http://www.image-net.org/). Follow - [Preparing the datasets](https://github.com/tensorflow/models/tree/master/research/slim#Data) - instructions in TF-Slim documentation to download and convert ImageNet dataset - to TFRecord format. - -* [Tiny ImageNet](https://tiny-imagenet.herokuapp.com/). - To obtain Tiny ImageNet dataset do following: - - ``` - # Download zip archive with TinyImagenet - curl -O http://cs231n.stanford.edu/tiny-imagenet-200.zip - - # Extract archive - unzip tiny-imagenet-200.zip - - # Convert dataset to TFRecord format - mkdir tiny-imagenet-tfrecord - python tiny_imagenet_converter/converter.py \ - --input_dir=tiny-imagenet-200 \ - --output_dir=tiny-imagenet-tfrecord - ``` - -## Running the code - -NOTE: Provided code supports distributed training on multiple machines, -and all provided checkpoints were trained in a distributed way. However it is -beyond the scope of this document to describe how to do distributed training. -Readed should refer to -[other material](https://www.tensorflow.org/deploy/distributed) to learn -about it. - -### Training - -Following command runs training: - -``` -# Following arguments has to be specified for training: -# - MAX_NUMBER_OF_TRAINING_STEPS - maximum number of training steps, -# omit this flag or set it to -1 to have unlimited number of training steps. -# - MODEL_NAME - name of the model, now only "resnet_v2_50" is supported. -# - MOVING_AVG_DECAY - decay rate for exponential moving average of the -# trainable variables. Training with exponential moving average usually -# leads to better accuracy. Default of 0.9999. -1 disable exponential moving -# average. Default works well, so typically you set it only if you want -# to disable this feature. -# - HYPERPARAMETERS - string with hyperparameters, -# see model_lib.py for full list of hyperparameters. -# - DATASET - dataset, either "imagenet" or "tiny_imagenet". -# - IMAGE_SIZE - size of the image (single number). -# - OUTPUT_DIRECTORY - directory where to write results. -# - IMAGENET_DIR - directory with ImageNet dataset in TFRecord format. -# - TINY_IMAGENET_DIR - directory with Tiny ImageNet dataset in TFRecord format. -# -# Note that only one of IMAGENET_DIR or TINY_IMAGENET_DIR has to be provided -# depending on which dataset you use. -# -python train.py \ - --max_steps="${MAX_NUMBER_OF_TRAINING_STEPS}" \ - --model_name="${MODEL_NAME}" \ - --moving_average_decay="${MOVING_AVG_DECAY}" \ - --hparams="${HYPERPARAMETERS}" \ - --dataset="${DATASET}" \ - --dataset_image_size="${IMAGE_SIZE}" \ - --output_dir="${OUTPUT_DIRECTORY}" \ - --imagenet_data_dir="${IMAGENET_DIR}" \ - --tiny_imagenet_data_dir="${TINY_IMAGENET_DIR}" -``` - -Full list of training hyperparameters could be found in `model_lib.py`. -These hyperparameters control learning rate schedule, optimizer, weight decay, -label smoothing and adversarial training. - -Adversarial training is controlled by following hyperparameters: - -* `train_adv_method` - method which is used to craft adversarial examples during - training. Could be one of the following: - - * `clean` - perform regular training with clean examples; - * `pgd_EPS_STEP_NITER` - use non targeted PGD with maximum size of - perturbation equal to `EPS`, step size equal to `STEP` - and number of iterations equal to `NITER`. Size of perturbation and step - size are expected to be integers between 1 and 255. - * `pgdll_EPS_STEP_NITER` - use targeted PGD, where target class is least - likely prediction of the network. - * `pgdrnd_EPS_STEP_NITER` - use targeted PGD, where target class is chosen - randomly. - -* `train_lp_weight` - weight of adversarial logit pairing loss. If zero or - negarive, then no logit pairing is performed and training is done using - mixed minibatch PGD. If positive then adversarial logit pairing term is added - to the loss. - -Below is example of how to run training with adversarial logit pairing on -ImageNet 64x64: - -``` -python train.py \ - --model_name="resnet_v2_50" \ - --hparams="train_adv_method=pgdll_16_2_10,train_lp_weight=0.5" \ - --dataset="imagenet" \ - --dataset_image_size=64 \ - --output_dir="/tmp/adv_train" \ - --imagenet_data_dir="${IMAGENET_DIR}" -``` - -### Fine tuning - -Provided trainin script could be used to fine tune pre-trained checkpoint. -Following command does this: - -``` -# Fine tuning adds following additional arguments: -# - SCOPES_DO_NOT_LOAD_FROM_CHECKPOINT - comma separates list of scopes of -# variables, which should not be loadeded from checkpoint (and default -# initialization should be used instead). -# SCOPES_DO_NOT_LOAD_FROM_CHECKPOINT should be either same or a subset of -# LIST_OF_SCOPES_OF_TRAINABLE_VARS. -# - LIST_OF_SCOPES_OF_TRAINABLE_VARS - comma separated list of scopes of -# trainable variables. Only variables which are prefixed with these scopes -# will be trained. -# - PATH_TO_PRETRAINED_CHECKPOINT - directory with pretrained checkpoint which -# is used as initialization for fine tuning. -# -python train.py \ - --max_steps="${MAX_NUMBER_OF_TRAINING_STEPS}" \ - --model_name="${MODEL_NAME}" \ - --moving_average_decay="${MOVING_AVG_DECAY}" \ - --hparams="${HYPERPARAMETERS}" \ - --dataset="${DATASET}" \ - --dataset_image_size="${IMAGE_SIZE}" \ - --output_dir="${OUTPUT_DIRECTORY}" \ - --imagenet_data_dir="${IMAGENET_DIR}" \ - --tiny_imagenet_data_dir="${TINY_IMAGENET_DIR}" \ - --finetune_exclude_pretrained_scopes="${SCOPES_DO_NOT_LOAD_FROM_CHECKPOINT}" \ - --finetune_trainable_scopes="${LIST_OF_SCOPES_OF_TRAINABLE_VARS}" \ - --finetune_checkpoint_path="${PATH_TO_PRETRAINED_CHECKPOINT}" -``` - -Below is an example of how to fine tune last few layers of the model on -Tiny Imagenet dataset: - -``` -python train.py \ - --model_name="resnet_v2_50" \ - --hparams="train_adv_method=pgdll_16_2_10,train_lp_weight=0.5,learning_rate=0.02" \ - --dataset="tiny_imagenet" \ - --dataset_image_size=64 \ - --output_dir="/tmp/adv_finetune" \ - --tiny_imagenet_data_dir="${TINY_IMAGENET_DIR}" \ - --finetune_exclude_pretrained_scopes="resnet_v2_50/logits" \ - --finetune_trainable_scopes="resnet_v2_50/logits,resnet_v2_50/postnorm" \ - --finetune_checkpoint_path="/tmp/adv_train" -``` - -### Evaluation - -Following command runs evaluation: - -``` -# Following arguments should be provided for eval: -# - TRAINING_DIRECTORY - directory where training checkpoints are saved. -# - TRAINABLE_SCOPES - when loading checkpoint which was obtained by fine tuning -# this argument should be the same as LIST_OF_SCOPES_OF_TRAINABLE_VARS -# during training. Otherwise it should be empty. -# This is needed to properly load exponential moving average variables. -# If exponential moving averages are disabled then this flag could be -# omitted. -# - EVAL_SUBDIR_NAME - name of the subdirectory inside TRAINING_DIRECTORY -# where evaluation code will be saving event files. -# - DATASET - name of the dataset. -# - IMAGE_SIZE - size of the image in the dataset. -# - DATSET_SPLIT_NAME - name of the split in the dataset, -# either 'train' or 'validation'. Default is 'validation'. -# - MODEL_NAME - name of the model. -# - MOVING_AVG_DECAY - decay rate for exponential moving average. -# - ADV_METHOD_FOR_EVAL - should be "clean" to evaluate on clean example or -# description of the adversarial method to evaluate on adversarial examples. -# - HYPERPARAMETERS - hyperparameters, only "eval_batch_size" matters for eval -# - NUMBER_OF_EXAMPLES - how many examples from the dataset use for evaluation, -# specify -1 to use all examples. -# - EVAL_ONCE - if True then evaluate only once, otherwise keep evaluation -# running repeatedly on new checkpoints. Repeated evaluation might be useful -# when running concurrent with training. -# - IMAGENET_DIR - directory with ImageNet dataset in TFRecord format. -# - TINY_IMAGENET_DIR - directory with Tiny ImageNet dataset in TFRecord format. -# -python eval.py \ - --train_dir="${TRAINING_DIRECTORY} \ - --trainable_scopes="${TRAINABLE_SCOPES}" \ - --eval_name="${EVAL_SUBDIR_NAME}" \ - --dataset="${DATASET}" \ - --dataset_image_size="${IMAGE_SIZE}" \ - --split_name="${DATSET_SPLIT_NAME}" \ - --model_name="${MODEL_NAME}" \ - --moving_average_decay="${MOVING_AVG_DECAY}" \ - --adv_method="${ADV_METHOD_FOR_EVAL}" \ - --hparams="${HYPERPARAMETERS}" \ - --num_examples="${NUMBER_OF_EXAMPLES}" \ - --eval_once="${EVAL_ONCE}" \ - --imagenet_data_dir="${IMAGENET_DIR}" \ - --tiny_imagenet_data_dir="${TINY_IMAGENET_DIR}" -``` - -Example of running evaluation on 10000 of clean examples from ImageNet -training set: - -``` -python eval.py \ - --train_dir=/tmp/adv_train \ - --dataset=imagenet \ - --dataset_image_size=64 \ - --split_name=train \ - --adv_method=clean \ - --hparams="eval_batch_size=50" \ - --num_examples=10000 \ - --eval_once=True \ - --imagenet_data_dir="${IMAGENET_DIR}" -``` - -Example of running evaluatin on adversarial images generated from Tiny ImageNet -validation set using fine-tuned checkpoint: - -``` -python eval.py \ - --train_dir=tmp/adv_finetune \ - --trainable_scopes="resnet_v2_50/logits,resnet_v2_50/postnorm" \ - --dataset=tiny_imagenet \ - --dataset_image_size=64 \ - --adv_method=pgdrnd_16_2_10 \ - --hparams="eval_batch_size=50" \ - --eval_once=True \ - --tiny_imagenet_data_dir="${TINY_IMAGENET_DIR}" -``` - -### Pre-trained models - -Following set of pre-trained checkpoints released with this code: - -| Model | Dataset | Accuracy on
clean images | Accuracy on
`pgdll_16_1_20` | Accuracy on
`pgdll_16_2_10` | -| ----------- | ------------ | --------------- | --------------------------- | -------------- | -| [Baseline ResNet-v2-50](http://download.tensorflow.org/models/adversarial_logit_pairing/imagenet64_base_2018_06_26.ckpt.tar.gz) | ImageNet 64x64 | 60.5% | 1.8% | 3.5% | -| [ALP-trained ResNet-v2-50](http://download.tensorflow.org/models/adversarial_logit_pairing/imagenet64_alp025_2018_06_26.ckpt.tar.gz) | ImageNet 64x64 | 55.7% | 27.5% | 27.8% | -| [Baseline ResNet-v2-50](http://download.tensorflow.org/models/adversarial_logit_pairing/tiny_imagenet_base_2018_06_26.ckpt.tar.gz) | Tiny ImageNet | 69.2% | 0.1% | 0.3% | -| [ALP-trained ResNet-v2-50](http://download.tensorflow.org/models/adversarial_logit_pairing/tiny_imagenet_alp05_2018_06_26.ckpt.tar.gz) | Tiny ImageNet | 72.0% | 41.3% | 40.8% | - - -* All provided checkpoints were initially trained with exponential moving - average. However for ease of use they were re-saved without it. - So to load and use provided checkpoints you need to specify - `--moving_average_decay=-1` flag. -* All ALP models were trained with `pgdll_16_2_10` adversarial examples. -* All Tiny Imagenet models were obtained by fine tuning corresponding - ImageNet 64x64 models. ALP-trained models were fine tuned with ALP. diff --git a/research/adversarial_logit_pairing/adversarial_attack.py b/research/adversarial_logit_pairing/adversarial_attack.py deleted file mode 100644 index 804bd64bc..000000000 --- a/research/adversarial_logit_pairing/adversarial_attack.py +++ /dev/null @@ -1,219 +0,0 @@ -# Copyright 2018 Google Inc. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -"""Library with adversarial attacks. - -This library designed to be self-contained and have no dependencies other -than TensorFlow. It only contains PGD / Iterative FGSM attacks, -see https://arxiv.org/abs/1706.06083 and https://arxiv.org/abs/1607.02533 -for details. - -For wider set of adversarial attacks refer to Cleverhans library: -https://github.com/tensorflow/cleverhans -""" - -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import tensorflow as tf - - -def generate_pgd_common(x, - bounds, - model_fn, - attack_params, - one_hot_labels, - perturbation_multiplier): - """Common code for generating PGD adversarial examples. - - Args: - x: original examples. - bounds: tuple with bounds of image values, bounds[0] < bounds[1]. - model_fn: model function with signature model_fn(images). - attack_params: parameters of the attack. - one_hot_labels: one hot label vector to use in the loss. - perturbation_multiplier: multiplier of adversarial perturbation, - either +1.0 or -1.0. - - Returns: - Tensor with adversarial examples. - - Raises: - ValueError: if attack parameters are invalid. - """ - # parse attack_params - # Format of attack_params: 'EPS_STEP_NITER' - # where EPS - epsilon, STEP - step size, NITER - number of iterations - params_list = attack_params.split('_') - if len(params_list) != 3: - raise ValueError('Invalid parameters of PGD attack: %s' % attack_params) - epsilon = int(params_list[0]) - step_size = int(params_list[1]) - niter = int(params_list[2]) - - # rescale epsilon and step size to image bounds - epsilon = float(epsilon) / 255.0 * (bounds[1] - bounds[0]) - step_size = float(step_size) / 255.0 * (bounds[1] - bounds[0]) - - # clipping boundaries - clip_min = tf.maximum(x - epsilon, bounds[0]) - clip_max = tf.minimum(x + epsilon, bounds[1]) - - # compute starting point - start_x = x + tf.random_uniform(tf.shape(x), -epsilon, epsilon) - start_x = tf.clip_by_value(start_x, clip_min, clip_max) - - # main iteration of PGD - loop_vars = [0, start_x] - - def loop_cond(index, _): - return index < niter - - def loop_body(index, adv_images): - logits = model_fn(adv_images) - loss = tf.reduce_sum( - tf.nn.softmax_cross_entropy_with_logits_v2( - labels=one_hot_labels, - logits=logits)) - perturbation = step_size * tf.sign(tf.gradients(loss, adv_images)[0]) - new_adv_images = adv_images + perturbation_multiplier * perturbation - new_adv_images = tf.clip_by_value(new_adv_images, clip_min, clip_max) - return index + 1, new_adv_images - - with tf.control_dependencies([start_x]): - _, result = tf.while_loop( - loop_cond, - loop_body, - loop_vars, - back_prop=False, - parallel_iterations=1) - return result - - -def generate_pgd_ll(x, bounds, model_fn, attack_params): - # pylint: disable=g-doc-args - """Generats targeted PGD adversarial examples with least likely target class. - - See generate_pgd_common for description of arguments. - - Returns: - Tensor with adversarial examples. - """ - # pylint: enable=g-doc-args - - # compute one hot least likely class - logits = model_fn(x) - num_classes = tf.shape(logits)[1] - one_hot_labels = tf.one_hot(tf.argmin(model_fn(x), axis=1), num_classes) - - return generate_pgd_common(x, bounds, model_fn, attack_params, - one_hot_labels=one_hot_labels, - perturbation_multiplier=-1.0) - - -def generate_pgd_rand(x, bounds, model_fn, attack_params): - # pylint: disable=g-doc-args - """Generats targeted PGD adversarial examples with random target class. - - See generate_pgd_common for description of arguments. - - Returns: - Tensor with adversarial examples. - """ - # pylint: enable=g-doc-args - - # compute one hot random class - logits = model_fn(x) - batch_size = tf.shape(logits)[0] - num_classes = tf.shape(logits)[1] - random_labels = tf.random_uniform(shape=[batch_size], - minval=0, - maxval=num_classes, - dtype=tf.int32) - one_hot_labels = tf.one_hot(random_labels, num_classes) - - return generate_pgd_common(x, bounds, model_fn, attack_params, - one_hot_labels=one_hot_labels, - perturbation_multiplier=-1.0) - - -def generate_pgd(x, bounds, model_fn, attack_params): - # pylint: disable=g-doc-args - """Generats non-targeted PGD adversarial examples. - - See generate_pgd_common for description of arguments. - - Returns: - tensor with adversarial examples. - """ - # pylint: enable=g-doc-args - - # compute one hot predicted class - logits = model_fn(x) - num_classes = tf.shape(logits)[1] - one_hot_labels = tf.one_hot(tf.argmax(model_fn(x), axis=1), num_classes) - - return generate_pgd_common(x, bounds, model_fn, attack_params, - one_hot_labels=one_hot_labels, - perturbation_multiplier=1.0) - - -def generate_adversarial_examples(x, bounds, model_fn, attack_description): - """Generates adversarial examples. - - Args: - x: original examples. - bounds: tuple with bounds of image values, bounds[0] < bounds[1] - model_fn: model function with signature model_fn(images). - attack_description: string which describes an attack, see notes below for - details. - - Returns: - Tensor with adversarial examples. - - Raises: - ValueError: if attack description is invalid. - - - Attack description could be one of the following strings: - - "clean" - no attack, return original images. - - "pgd_EPS_STEP_NITER" - non-targeted PGD attack. - - "pgdll_EPS_STEP_NITER" - tageted PGD attack with least likely target class. - - "pgdrnd_EPS_STEP_NITER" - targetd PGD attack with random target class. - - Meaning of attack parameters is following: - - EPS - maximum size of adversarial perturbation, between 0 and 255. - - STEP - step size of one iteration of PGD, between 0 and 255. - - NITER - number of iterations. - """ - if attack_description == 'clean': - return x - idx = attack_description.find('_') - if idx < 0: - raise ValueError('Invalid value of attack description %s' - % attack_description) - attack_name = attack_description[:idx] - attack_params = attack_description[idx+1:] - if attack_name == 'pgdll': - return generate_pgd_ll(x, bounds, model_fn, attack_params) - elif attack_name == 'pgdrnd': - return generate_pgd_rand(x, bounds, model_fn, attack_params) - elif attack_name == 'pgd': - return generate_pgd(x, bounds, model_fn, attack_params) - else: - raise ValueError('Invalid value of attack description %s' - % attack_description) - diff --git a/research/adversarial_logit_pairing/datasets/__init__.py b/research/adversarial_logit_pairing/datasets/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/research/adversarial_logit_pairing/datasets/dataset_factory.py b/research/adversarial_logit_pairing/datasets/dataset_factory.py deleted file mode 100644 index 01c36d4ff..000000000 --- a/research/adversarial_logit_pairing/datasets/dataset_factory.py +++ /dev/null @@ -1,62 +0,0 @@ -# Copyright 2018 Google Inc. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -"""Library which creates datasets.""" - -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -from datasets import imagenet_input -from datasets import tiny_imagenet_input - - -def get_dataset(dataset_name, split, batch_size, image_size, is_training): - """Returns dataset. - - Args: - dataset_name: name of the dataset, "imagenet" or "tiny_imagenet". - split: name of the split, "train" or "validation". - batch_size: size of the minibatch. - image_size: size of the one side of the image. Output images will be - resized to square shape image_size*image_size. - is_training: if True then training preprocessing is done, otherwise eval - preprocessing is done. - - Raises: - ValueError: if dataset_name is invalid. - - Returns: - dataset: instance of tf.data.Dataset with the dataset. - num_examples: number of examples in given split of the dataset. - num_classes: number of classes in the dataset. - bounds: tuple with bounds of image values. All returned image pixels - are between bounds[0] and bounds[1]. - """ - if dataset_name == 'tiny_imagenet': - dataset = tiny_imagenet_input.tiny_imagenet_input( - split, batch_size, image_size, is_training) - num_examples = tiny_imagenet_input.num_examples_per_epoch(split) - num_classes = 200 - bounds = (-1, 1) - elif dataset_name == 'imagenet': - dataset = imagenet_input.imagenet_input( - split, batch_size, image_size, is_training) - num_examples = imagenet_input.num_examples_per_epoch(split) - num_classes = 1001 - bounds = (-1, 1) - else: - raise ValueError('Invalid dataset %s' % dataset_name) - return dataset, num_examples, num_classes, bounds diff --git a/research/adversarial_logit_pairing/datasets/imagenet_input.py b/research/adversarial_logit_pairing/datasets/imagenet_input.py deleted file mode 100644 index 0b210b8ce..000000000 --- a/research/adversarial_logit_pairing/datasets/imagenet_input.py +++ /dev/null @@ -1,255 +0,0 @@ -# Copyright 2018 Google Inc. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -"""Imagenet input.""" - -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import os -from absl import flags -import tensorflow as tf - -FLAGS = flags.FLAGS - - -flags.DEFINE_string('imagenet_data_dir', None, - 'Directory with Imagenet dataset in TFRecord format.') - - -def _decode_and_random_crop(image_buffer, bbox, image_size): - """Randomly crops image and then scales to target size.""" - with tf.name_scope('distorted_bounding_box_crop', - values=[image_buffer, bbox]): - sample_distorted_bounding_box = tf.image.sample_distorted_bounding_box( - tf.image.extract_jpeg_shape(image_buffer), - bounding_boxes=bbox, - min_object_covered=0.1, - aspect_ratio_range=[0.75, 1.33], - area_range=[0.08, 1.0], - max_attempts=10, - use_image_if_no_bounding_boxes=True) - bbox_begin, bbox_size, _ = sample_distorted_bounding_box - - # Crop the image to the specified bounding box. - offset_y, offset_x, _ = tf.unstack(bbox_begin) - target_height, target_width, _ = tf.unstack(bbox_size) - crop_window = tf.stack([offset_y, offset_x, target_height, target_width]) - image = tf.image.decode_and_crop_jpeg(image_buffer, crop_window, channels=3) - image = tf.image.convert_image_dtype( - image, dtype=tf.float32) - - image = tf.image.resize_bicubic([image], - [image_size, image_size])[0] - - return image - - -def _decode_and_center_crop(image_buffer, image_size): - """Crops to center of image with padding then scales to target size.""" - shape = tf.image.extract_jpeg_shape(image_buffer) - image_height = shape[0] - image_width = shape[1] - - padded_center_crop_size = tf.cast( - 0.875 * tf.cast(tf.minimum(image_height, image_width), tf.float32), - tf.int32) - - offset_height = ((image_height - padded_center_crop_size) + 1) // 2 - offset_width = ((image_width - padded_center_crop_size) + 1) // 2 - crop_window = tf.stack([offset_height, offset_width, - padded_center_crop_size, padded_center_crop_size]) - image = tf.image.decode_and_crop_jpeg(image_buffer, crop_window, channels=3) - image = tf.image.convert_image_dtype( - image, dtype=tf.float32) - - image = tf.image.resize_bicubic([image], - [image_size, image_size])[0] - - return image - - -def _normalize(image): - """Rescale image to [-1, 1] range.""" - return tf.multiply(tf.subtract(image, 0.5), 2.0) - - -def image_preprocessing(image_buffer, bbox, image_size, is_training): - """Does image decoding and preprocessing. - - Args: - image_buffer: string tensor with encoded image. - bbox: bounding box of the object at the image. - image_size: image size. - is_training: whether to do training or eval preprocessing. - - Returns: - Tensor with the image. - """ - if is_training: - image = _decode_and_random_crop(image_buffer, bbox, image_size) - image = _normalize(image) - image = tf.image.random_flip_left_right(image) - else: - image = _decode_and_center_crop(image_buffer, image_size) - image = _normalize(image) - image = tf.reshape(image, [image_size, image_size, 3]) - return image - - -def imagenet_parser(value, image_size, is_training): - """Parse an ImageNet record from a serialized string Tensor. - - Args: - value: encoded example. - image_size: size of the output image. - is_training: if True then do training preprocessing, - otherwise do eval preprocessing. - - Returns: - image: tensor with the image. - label: true label of the image. - """ - keys_to_features = { - 'image/encoded': - tf.FixedLenFeature((), tf.string, ''), - 'image/format': - tf.FixedLenFeature((), tf.string, 'jpeg'), - 'image/class/label': - tf.FixedLenFeature([], tf.int64, -1), - 'image/class/text': - tf.FixedLenFeature([], tf.string, ''), - 'image/object/bbox/xmin': - tf.VarLenFeature(dtype=tf.float32), - 'image/object/bbox/ymin': - tf.VarLenFeature(dtype=tf.float32), - 'image/object/bbox/xmax': - tf.VarLenFeature(dtype=tf.float32), - 'image/object/bbox/ymax': - tf.VarLenFeature(dtype=tf.float32), - 'image/object/class/label': - tf.VarLenFeature(dtype=tf.int64), - } - - parsed = tf.parse_single_example(value, keys_to_features) - - image_buffer = tf.reshape(parsed['image/encoded'], shape=[]) - - xmin = tf.expand_dims(parsed['image/object/bbox/xmin'].values, 0) - ymin = tf.expand_dims(parsed['image/object/bbox/ymin'].values, 0) - xmax = tf.expand_dims(parsed['image/object/bbox/xmax'].values, 0) - ymax = tf.expand_dims(parsed['image/object/bbox/ymax'].values, 0) - # Note that ordering is (y, x) - bbox = tf.concat([ymin, xmin, ymax, xmax], 0) - # Force the variable number of bounding boxes into the shape - # [1, num_boxes, coords]. - bbox = tf.expand_dims(bbox, 0) - bbox = tf.transpose(bbox, [0, 2, 1]) - - image = image_preprocessing( - image_buffer=image_buffer, - bbox=bbox, - image_size=image_size, - is_training=is_training - ) - - # Labels are in [1, 1000] range - label = tf.cast( - tf.reshape(parsed['image/class/label'], shape=[]), dtype=tf.int32) - - return image, label - - -def imagenet_input(split, batch_size, image_size, is_training): - """Returns ImageNet dataset. - - Args: - split: name of the split, "train" or "validation". - batch_size: size of the minibatch. - image_size: size of the one side of the image. Output images will be - resized to square shape image_size*image_size. - is_training: if True then training preprocessing is done, otherwise eval - preprocessing is done. - - Raises: - ValueError: if name of the split is incorrect. - - Returns: - Instance of tf.data.Dataset with the dataset. - """ - if split.lower().startswith('train'): - file_pattern = os.path.join(FLAGS.imagenet_data_dir, 'train-*') - elif split.lower().startswith('validation'): - file_pattern = os.path.join(FLAGS.imagenet_data_dir, 'validation-*') - else: - raise ValueError('Invalid split: %s' % split) - - dataset = tf.data.Dataset.list_files(file_pattern, shuffle=is_training) - - if is_training: - dataset = dataset.repeat() - - def fetch_dataset(filename): - return tf.data.TFRecordDataset(filename, buffer_size=8*1024*1024) - - # Read the data from disk in parallel - dataset = dataset.apply( - tf.data.experimental.parallel_interleave( - fetch_dataset, cycle_length=4, sloppy=True)) - dataset = dataset.shuffle(1024) - - # Parse, preprocess, and batch the data in parallel - dataset = dataset.apply( - tf.data.experimental.map_and_batch( - lambda value: imagenet_parser(value, image_size, is_training), - batch_size=batch_size, - num_parallel_batches=4, - drop_remainder=True)) - - def set_shapes(images, labels): - """Statically set the batch_size dimension.""" - images.set_shape(images.get_shape().merge_with( - tf.TensorShape([batch_size, None, None, None]))) - labels.set_shape(labels.get_shape().merge_with( - tf.TensorShape([batch_size]))) - return images, labels - - # Assign static batch size dimension - dataset = dataset.map(set_shapes) - - # Prefetch overlaps in-feed with training - dataset = dataset.prefetch(tf.data.experimental.AUTOTUNE) - return dataset - - -def num_examples_per_epoch(split): - """Returns the number of examples in the data set. - - Args: - split: name of the split, "train" or "validation". - - Raises: - ValueError: if split name is incorrect. - - Returns: - Number of example in the split. - """ - if split.lower().startswith('train'): - return 1281167 - elif split.lower().startswith('validation'): - return 50000 - else: - raise ValueError('Invalid split: %s' % split) diff --git a/research/adversarial_logit_pairing/datasets/tiny_imagenet_input.py b/research/adversarial_logit_pairing/datasets/tiny_imagenet_input.py deleted file mode 100644 index 6d216d53e..000000000 --- a/research/adversarial_logit_pairing/datasets/tiny_imagenet_input.py +++ /dev/null @@ -1,157 +0,0 @@ -# Copyright 2018 Google Inc. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -"""Tiny imagenet input.""" - -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import os -from absl import flags -import tensorflow as tf - -FLAGS = flags.FLAGS - - -flags.DEFINE_string('tiny_imagenet_data_dir', None, - 'Directory with Tiny Imagenet dataset in TFRecord format.') - - -def tiny_imagenet_parser(value, image_size, is_training): - """Parses tiny imagenet example. - - Args: - value: encoded example. - image_size: size of the image. - is_training: if True then do training preprocessing (which includes - random cropping), otherwise do eval preprocessing. - - Returns: - image: tensor with the image. - label: true label of the image. - """ - keys_to_features = { - 'image/encoded': tf.FixedLenFeature((), tf.string, ''), - 'label/tiny_imagenet': tf.FixedLenFeature([], tf.int64, -1), - } - - parsed = tf.parse_single_example(value, keys_to_features) - - image_buffer = tf.reshape(parsed['image/encoded'], shape=[]) - image = tf.image.decode_image(image_buffer, channels=3) - image = tf.image.convert_image_dtype( - image, dtype=tf.float32) - - # Crop image - if is_training: - bbox_begin, bbox_size, _ = tf.image.sample_distorted_bounding_box( - tf.shape(image), - bounding_boxes=tf.constant([0.0, 0.0, 1.0, 1.0], - dtype=tf.float32, - shape=[1, 1, 4]), - min_object_covered=0.5, - aspect_ratio_range=[0.75, 1.33], - area_range=[0.5, 1.0], - max_attempts=20, - use_image_if_no_bounding_boxes=True) - image = tf.slice(image, bbox_begin, bbox_size) - - # resize image - image = tf.image.resize_bicubic([image], [image_size, image_size])[0] - - # Rescale image to [-1, 1] range. - image = tf.multiply(tf.subtract(image, 0.5), 2.0) - - image = tf.reshape(image, [image_size, image_size, 3]) - - # Labels are in [0, 199] range - label = tf.cast( - tf.reshape(parsed['label/tiny_imagenet'], shape=[]), dtype=tf.int32) - - return image, label - - -def tiny_imagenet_input(split, batch_size, image_size, is_training): - """Returns Tiny Imagenet Dataset. - - Args: - split: name of the split, "train" or "validation". - batch_size: size of the minibatch. - image_size: size of the one side of the image. Output images will be - resized to square shape image_size*image_size. - is_training: if True then training preprocessing is done, otherwise eval - preprocessing is done.instance of tf.data.Dataset with the dataset. - - Raises: - ValueError: if name of the split is incorrect. - - Returns: - Instance of tf.data.Dataset with the dataset. - """ - if split.lower().startswith('train'): - filepath = os.path.join(FLAGS.tiny_imagenet_data_dir, 'train.tfrecord') - elif split.lower().startswith('validation'): - filepath = os.path.join(FLAGS.tiny_imagenet_data_dir, 'validation.tfrecord') - else: - raise ValueError('Invalid split: %s' % split) - - dataset = tf.data.TFRecordDataset(filepath, buffer_size=8*1024*1024) - - if is_training: - dataset = dataset.shuffle(10000) - dataset = dataset.repeat() - - dataset = dataset.apply( - tf.data.experimental.map_and_batch( - lambda value: tiny_imagenet_parser(value, image_size, is_training), - batch_size=batch_size, - num_parallel_batches=4, - drop_remainder=True)) - - def set_shapes(images, labels): - """Statically set the batch_size dimension.""" - images.set_shape(images.get_shape().merge_with( - tf.TensorShape([batch_size, None, None, None]))) - labels.set_shape(labels.get_shape().merge_with( - tf.TensorShape([batch_size]))) - return images, labels - - # Assign static batch size dimension - dataset = dataset.map(set_shapes) - - dataset = dataset.prefetch(tf.data.experimental.AUTOTUNE) - - return dataset - - -def num_examples_per_epoch(split): - """Returns the number of examples in the data set. - - Args: - split: name of the split, "train" or "validation". - - Raises: - ValueError: if split name is incorrect. - - Returns: - Number of example in the split. - """ - if split.lower().startswith('train'): - return 100000 - elif split.lower().startswith('validation'): - return 10000 - else: - raise ValueError('Invalid split: %s' % split) diff --git a/research/adversarial_logit_pairing/eval.py b/research/adversarial_logit_pairing/eval.py deleted file mode 100644 index 504cc0b0b..000000000 --- a/research/adversarial_logit_pairing/eval.py +++ /dev/null @@ -1,181 +0,0 @@ -# Copyright 2018 Google Inc. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -"""Program which runs evaluation of Imagenet 64x64 and TinyImagenet models.""" - -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import os - -from absl import app -from absl import flags - -import tensorflow as tf - -import adversarial_attack -import model_lib -from datasets import dataset_factory - -FLAGS = flags.FLAGS - - -flags.DEFINE_string('train_dir', None, - 'Training directory. If specified then this program ' - 'runs in continuous evaluation mode.') - -flags.DEFINE_string('checkpoint_path', None, - 'Path to the file with checkpoint. If specified then ' - 'this program evaluates only provided checkpoint one time.') - -flags.DEFINE_string('output_file', None, - 'Name of output file. Used only in single evaluation mode.') - -flags.DEFINE_string('eval_name', 'default', 'Name for eval subdirectory.') - -flags.DEFINE_string('master', '', 'Tensorflow master.') - -flags.DEFINE_string('model_name', 'resnet_v2_50', 'Name of the model.') - -flags.DEFINE_string('adv_method', 'clean', - 'Method which is used to generate adversarial examples.') - -flags.DEFINE_string('dataset', 'imagenet', - 'Dataset: "tiny_imagenet" or "imagenet".') - -flags.DEFINE_integer('dataset_image_size', 64, - 'Size of the images in the dataset.') - -flags.DEFINE_string('hparams', '', 'Hyper parameters.') - -flags.DEFINE_string('split_name', 'validation', 'Name of the split.') - -flags.DEFINE_float('moving_average_decay', 0.9999, - 'The decay to use for the moving average.') - -flags.DEFINE_integer('eval_interval_secs', 120, - 'The frequency, in seconds, with which evaluation is run.') - -flags.DEFINE_integer( - 'num_examples', -1, - 'If positive - maximum number of example to use for evaluation.') - -flags.DEFINE_bool('eval_once', False, - 'If true then evaluate model only once.') - -flags.DEFINE_string('trainable_scopes', None, - 'If set then it defines list of variable scopes for ' - 'trainable variables.') - - -def main(_): - if not FLAGS.train_dir and not FLAGS.checkpoint_path: - print('Either --train_dir or --checkpoint_path flags has to be provided.') - if FLAGS.train_dir and FLAGS.checkpoint_path: - print('Only one of --train_dir or --checkpoint_path should be provided.') - params = model_lib.default_hparams() - params.parse(FLAGS.hparams) - tf.logging.info('User provided hparams: %s', FLAGS.hparams) - tf.logging.info('All hyper parameters: %s', params) - batch_size = params.eval_batch_size - graph = tf.Graph() - with graph.as_default(): - # dataset - dataset, num_examples, num_classes, bounds = dataset_factory.get_dataset( - FLAGS.dataset, - FLAGS.split_name, - batch_size, - FLAGS.dataset_image_size, - is_training=False) - dataset_iterator = dataset.make_one_shot_iterator() - images, labels = dataset_iterator.get_next() - if FLAGS.num_examples > 0: - num_examples = min(num_examples, FLAGS.num_examples) - - # setup model - global_step = tf.train.get_or_create_global_step() - model_fn_two_args = model_lib.get_model(FLAGS.model_name, num_classes) - model_fn = lambda x: model_fn_two_args(x, is_training=False) - if not FLAGS.adv_method or FLAGS.adv_method == 'clean': - logits = model_fn(images) - else: - adv_examples = adversarial_attack.generate_adversarial_examples( - images, bounds, model_fn, FLAGS.adv_method) - logits = model_fn(adv_examples) - - # update trainable variables if fine tuning is used - model_lib.filter_trainable_variables(FLAGS.trainable_scopes) - - # Setup the moving averages - if FLAGS.moving_average_decay and (FLAGS.moving_average_decay > 0): - variable_averages = tf.train.ExponentialMovingAverage( - FLAGS.moving_average_decay, global_step) - variables_to_restore = variable_averages.variables_to_restore( - tf.contrib.framework.get_model_variables()) - variables_to_restore[global_step.op.name] = global_step - else: - variables_to_restore = tf.contrib.framework.get_variables_to_restore() - - # Setup evaluation metric - with tf.name_scope('Eval'): - names_to_values, names_to_updates = ( - tf.contrib.metrics.aggregate_metric_map({ - 'Accuracy': tf.metrics.accuracy(labels, tf.argmax(logits, 1)), - 'Top5': tf.metrics.recall_at_k(tf.to_int64(labels), logits, 5) - })) - - for name, value in names_to_values.iteritems(): - tf.summary.scalar(name, value) - - # Run evaluation - num_batches = int(num_examples / batch_size) - if FLAGS.train_dir: - output_dir = os.path.join(FLAGS.train_dir, FLAGS.eval_name) - if not tf.gfile.Exists(output_dir): - tf.gfile.MakeDirs(output_dir) - tf.contrib.training.evaluate_repeatedly( - FLAGS.train_dir, - master=FLAGS.master, - scaffold=tf.train.Scaffold( - saver=tf.train.Saver(variables_to_restore)), - eval_ops=names_to_updates.values(), - eval_interval_secs=FLAGS.eval_interval_secs, - hooks=[ - tf.contrib.training.StopAfterNEvalsHook(num_batches), - tf.contrib.training.SummaryAtEndHook(output_dir), - tf.train.LoggingTensorHook(names_to_values, at_end=True), - ], - max_number_of_evaluations=1 if FLAGS.eval_once else None) - else: - result = tf.contrib.training.evaluate_once( - FLAGS.checkpoint_path, - master=FLAGS.master, - scaffold=tf.train.Scaffold( - saver=tf.train.Saver(variables_to_restore)), - eval_ops=names_to_updates.values(), - final_ops=names_to_values, - hooks=[ - tf.contrib.training.StopAfterNEvalsHook(num_batches), - tf.train.LoggingTensorHook(names_to_values, at_end=True), - ]) - if FLAGS.output_file: - with tf.gfile.Open(FLAGS.output_file, 'a') as f: - f.write('%s,%.3f,%.3f\n' - % (FLAGS.eval_name, result['Accuracy'], result['Top5'])) - - -if __name__ == '__main__': - app.run(main) diff --git a/research/adversarial_logit_pairing/model_lib.py b/research/adversarial_logit_pairing/model_lib.py deleted file mode 100644 index 1499a378e..000000000 --- a/research/adversarial_logit_pairing/model_lib.py +++ /dev/null @@ -1,189 +0,0 @@ -# Copyright 2018 Google Inc. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -"""Library with common functions for training and eval.""" - -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import six - -import tensorflow as tf - -from tensorflow.contrib.slim.nets import resnet_v2 - - -def default_hparams(): - """Returns default hyperparameters.""" - return tf.contrib.training.HParams( - # Batch size for training and evaluation. - batch_size=32, - eval_batch_size=50, - - # General training parameters. - weight_decay=0.0001, - label_smoothing=0.1, - - # Parameters of the adversarial training. - train_adv_method='clean', # adversarial training method - train_lp_weight=0.0, # Weight of adversarial logit pairing loss - - # Parameters of the optimizer. - optimizer='rms', # possible values are: 'rms', 'momentum', 'adam' - momentum=0.9, # momentum - rmsprop_decay=0.9, # Decay term for RMSProp - rmsprop_epsilon=1.0, # Epsilon term for RMSProp - - # Parameters of learning rate schedule. - lr_schedule='exp_decay', # Possible values: 'exp_decay', 'step', 'fixed' - learning_rate=0.045, - lr_decay_factor=0.94, # Learning exponential decay - lr_num_epochs_per_decay=2.0, # Number of epochs per lr decay - lr_list=[1.0 / 6, 2.0 / 6, 3.0 / 6, - 4.0 / 6, 5.0 / 6, 1.0, 0.1, 0.01, - 0.001, 0.0001], - lr_decay_epochs=[1, 2, 3, 4, 5, 30, 60, 80, - 90]) - - -def get_lr_schedule(hparams, examples_per_epoch, replicas_to_aggregate=1): - """Returns TensorFlow op which compute learning rate. - - Args: - hparams: hyper parameters. - examples_per_epoch: number of training examples per epoch. - replicas_to_aggregate: number of training replicas running in parallel. - - Raises: - ValueError: if learning rate schedule specified in hparams is incorrect. - - Returns: - learning_rate: tensor with learning rate. - steps_per_epoch: number of training steps per epoch. - """ - global_step = tf.train.get_or_create_global_step() - steps_per_epoch = float(examples_per_epoch) / float(hparams.batch_size) - if replicas_to_aggregate > 0: - steps_per_epoch /= replicas_to_aggregate - - if hparams.lr_schedule == 'exp_decay': - decay_steps = long(steps_per_epoch * hparams.lr_num_epochs_per_decay) - learning_rate = tf.train.exponential_decay( - hparams.learning_rate, - global_step, - decay_steps, - hparams.lr_decay_factor, - staircase=True) - elif hparams.lr_schedule == 'step': - lr_decay_steps = [long(epoch * steps_per_epoch) - for epoch in hparams.lr_decay_epochs] - learning_rate = tf.train.piecewise_constant( - global_step, lr_decay_steps, hparams.lr_list) - elif hparams.lr_schedule == 'fixed': - learning_rate = hparams.learning_rate - else: - raise ValueError('Invalid value of lr_schedule: %s' % hparams.lr_schedule) - - if replicas_to_aggregate > 0: - learning_rate *= replicas_to_aggregate - - return learning_rate, steps_per_epoch - - -def get_optimizer(hparams, learning_rate): - """Returns optimizer. - - Args: - hparams: hyper parameters. - learning_rate: learning rate tensor. - - Raises: - ValueError: if type of optimizer specified in hparams is incorrect. - - Returns: - Instance of optimizer class. - """ - if hparams.optimizer == 'rms': - optimizer = tf.train.RMSPropOptimizer(learning_rate, - hparams.rmsprop_decay, - hparams.momentum, - hparams.rmsprop_epsilon) - elif hparams.optimizer == 'momentum': - optimizer = tf.train.MomentumOptimizer(learning_rate, - hparams.momentum) - elif hparams.optimizer == 'adam': - optimizer = tf.train.AdamOptimizer(learning_rate) - else: - raise ValueError('Invalid value of optimizer: %s' % hparams.optimizer) - return optimizer - - -RESNET_MODELS = {'resnet_v2_50': resnet_v2.resnet_v2_50} - - -def get_model(model_name, num_classes): - """Returns function which creates model. - - Args: - model_name: Name of the model. - num_classes: Number of classes. - - Raises: - ValueError: If model_name is invalid. - - Returns: - Function, which creates model when called. - """ - if model_name.startswith('resnet'): - def resnet_model(images, is_training, reuse=tf.AUTO_REUSE): - with tf.contrib.framework.arg_scope(resnet_v2.resnet_arg_scope()): - resnet_fn = RESNET_MODELS[model_name] - logits, _ = resnet_fn(images, num_classes, is_training=is_training, - reuse=reuse) - logits = tf.reshape(logits, [-1, num_classes]) - return logits - return resnet_model - else: - raise ValueError('Invalid model: %s' % model_name) - - -def filter_trainable_variables(trainable_scopes): - """Keep only trainable variables which are prefixed with given scopes. - - Args: - trainable_scopes: either list of trainable scopes or string with comma - separated list of trainable scopes. - - This function removes all variables which are not prefixed with given - trainable_scopes from collection of trainable variables. - Useful during network fine tuning, when you only need to train subset of - variables. - """ - if not trainable_scopes: - return - if isinstance(trainable_scopes, six.string_types): - trainable_scopes = [scope.strip() for scope in trainable_scopes.split(',')] - trainable_scopes = {scope for scope in trainable_scopes if scope} - if not trainable_scopes: - return - trainable_collection = tf.get_collection_ref( - tf.GraphKeys.TRAINABLE_VARIABLES) - non_trainable_vars = [ - v for v in trainable_collection - if not any([v.op.name.startswith(s) for s in trainable_scopes]) - ] - for v in non_trainable_vars: - trainable_collection.remove(v) diff --git a/research/adversarial_logit_pairing/tiny_imagenet_converter/converter.py b/research/adversarial_logit_pairing/tiny_imagenet_converter/converter.py deleted file mode 100644 index 4fdccc320..000000000 --- a/research/adversarial_logit_pairing/tiny_imagenet_converter/converter.py +++ /dev/null @@ -1,241 +0,0 @@ -# Copyright 2018 Google Inc. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -"""Converts Tiny Imagenet dataset into TFRecord format. - -As an output this program generates following files in TFRecord format: -- train.tfrecord -- validation.tfrecord -- test.tfrecord - -Generated train and validation files will contain tf.Example entries with -following features: -- image/encoded - encoded image -- image/format - image format -- label/wnid - label WordNet ID -- label/imagenet - imagenet label [1 ... 1000] -- label/tiny_imagenet - tiny imagenet label [0 ... 199] -- bbox/xmin -- bbox/ymin -- bbox/xmax -- bbox/ymax - -Test file will contain entries with 'image/encoded' and 'image/format' features. -""" - -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -from collections import namedtuple -import os -import random - -from absl import app -from absl import flags -from absl import logging - -import pandas as pd - -import tensorflow as tf - - -FLAGS = flags.FLAGS - -flags.DEFINE_string('input_dir', '', 'Input directory') -flags.DEFINE_string('output_dir', '', 'Output directory') - -flags.DEFINE_string('imagenet_synsets_path', '', - 'Optional path to /imagenet_lsvrc_2015_synsets.txt') - - -ImageMetadata = namedtuple('ImageMetadata', ['label', 'x1', 'y1', 'x2', 'y2']) - - -class WnIdToNodeIdConverter(object): - """Converts WordNet IDs to numerical labels.""" - - def __init__(self, wnids_path, background_class): - self._wnid_to_node_id = {} - self._node_id_to_wnid = {} - with tf.gfile.Open(wnids_path) as f: - wnids_sequence = [wnid.strip() for wnid in f.readlines() if wnid.strip()] - node_id_offset = 1 if background_class else 0 - for i, label in enumerate(wnids_sequence): - self._wnid_to_node_id[label] = i + node_id_offset - self._node_id_to_wnid[i + node_id_offset] = label - - def to_node_id(self, wnid): - return self._wnid_to_node_id[wnid] - - def to_wnid(self, node_id): - return self._node_id_to_wnid[node_id] - - def all_wnids(self): - return self._wnid_to_node_id.keys() - - -def read_tiny_imagenet_annotations(annotations_filename, - images_dir, - one_label=None): - """Reads one file with Tiny Imagenet annotations.""" - result = [] - if one_label: - column_names = ['filename', 'x1', 'y1', 'x2', 'y2'] - else: - column_names = ['filename', 'label', 'x1', 'y1', 'x2', 'y2'] - with tf.gfile.Open(annotations_filename) as f: - data = pd.read_csv(f, sep='\t', names=column_names) - for row in data.itertuples(): - label = one_label if one_label else getattr(row, 'label') - full_filename = os.path.join(images_dir, getattr(row, 'filename')) - result.append((full_filename, - ImageMetadata(label=label, - x1=getattr(row, 'x1'), - y1=getattr(row, 'y1'), - x2=getattr(row, 'x2'), - y2=getattr(row, 'y2')))) - return result - - -def read_validation_annotations(validation_dir): - """Reads validation data annotations.""" - return read_tiny_imagenet_annotations( - os.path.join(validation_dir, 'val_annotations.txt'), - os.path.join(validation_dir, 'images')) - - -def read_training_annotations(training_dir): - """Reads training data annotations.""" - result = [] - sub_dirs = tf.gfile.ListDirectory(training_dir) - for sub_dir in sub_dirs: - if not sub_dir.startswith('n'): - logging.warning('Found non-class directory in training dir: %s', sub_dir) - continue - sub_dir_results = read_tiny_imagenet_annotations( - os.path.join(training_dir, sub_dir, sub_dir + '_boxes.txt'), - os.path.join(training_dir, sub_dir, 'images'), - one_label=sub_dir) - result.extend(sub_dir_results) - return result - - -def read_test_annotations(test_dir): - """Reads test data annotations.""" - files = tf.gfile.ListDirectory(os.path.join(test_dir, 'images')) - return [(os.path.join(test_dir, 'images', f), None) - for f in files if f.endswith('.JPEG')] - - -def get_image_format(filename): - """Returns image format from filename.""" - filename = filename.lower() - if filename.endswith('jpeg') or filename.endswith('jpg'): - return 'jpeg' - elif filename.endswith('png'): - return 'png' - else: - raise ValueError('Unrecognized file format: %s' % filename) - - -class TinyImagenetWriter(object): - """Helper class which writes Tiny Imagenet dataset into TFRecord file.""" - - def __init__(self, tiny_imagenet_wnid_conveter, imagenet_wnid_converter): - self.tiny_imagenet_wnid_conveter = tiny_imagenet_wnid_conveter - self.imagenet_wnid_converter = imagenet_wnid_converter - - def write_tf_record(self, - annotations, - output_file): - """Generates TFRecord file from given list of annotations.""" - with tf.python_io.TFRecordWriter(output_file) as writer: - for image_filename, image_metadata in annotations: - with tf.gfile.Open(image_filename) as f: - image_buffer = f.read() - image_format = get_image_format(image_filename) - features = { - 'image/encoded': tf.train.Feature( - bytes_list=tf.train.BytesList(value=[image_buffer])), - 'image/format': tf.train.Feature( - bytes_list=tf.train.BytesList(value=[image_format])) - } - if image_metadata: - # bounding box features - features['bbox/xmin'] = tf.train.Feature( - int64_list=tf.train.Int64List(value=[image_metadata.x1])) - features['bbox/ymin'] = tf.train.Feature( - int64_list=tf.train.Int64List(value=[image_metadata.y1])) - features['bbox/xmax'] = tf.train.Feature( - int64_list=tf.train.Int64List(value=[image_metadata.x2])) - features['bbox/ymax'] = tf.train.Feature( - int64_list=tf.train.Int64List(value=[image_metadata.y2])) - # tiny imagenet label, from [0, 200) iterval - tiny_imagenet_label = self.tiny_imagenet_wnid_conveter.to_node_id( - image_metadata.label) - features['label/wnid'] = tf.train.Feature( - bytes_list=tf.train.BytesList(value=image_metadata.label)) - features['label/tiny_imagenet'] = tf.train.Feature( - int64_list=tf.train.Int64List(value=[tiny_imagenet_label])) - # full imagenet label, from [1, 1001) interval - if self.imagenet_wnid_converter: - imagenet_label = self.imagenet_wnid_converter.to_node_id( - image_metadata.label) - features['label/imagenet'] = tf.train.Feature( - int64_list=tf.train.Int64List(value=[imagenet_label])) - example = tf.train.Example(features=tf.train.Features(feature=features)) - writer.write(example.SerializeToString()) - - -def main(_): - assert FLAGS.input_dir, 'Input directory must be provided' - assert FLAGS.output_dir, 'Output directory must be provided' - - # Create WordNet ID conveters for tiny imagenet and possibly for imagenet - tiny_imagenet_wnid_conveter = WnIdToNodeIdConverter( - os.path.join(FLAGS.input_dir, 'wnids.txt'), - background_class=False) - if FLAGS.imagenet_synsets_path: - imagenet_wnid_converter = WnIdToNodeIdConverter(FLAGS.imagenet_synsets_path, - background_class=True) - else: - imagenet_wnid_converter = None - - # read tiny imagenet annotations - train_annotations = read_training_annotations( - os.path.join(FLAGS.input_dir, 'train')) - random.shuffle(train_annotations) - val_annotations = read_validation_annotations( - os.path.join(FLAGS.input_dir, 'val')) - test_filenames = read_test_annotations(os.path.join(FLAGS.input_dir, 'test')) - - # Generate TFRecord files - writer = TinyImagenetWriter(tiny_imagenet_wnid_conveter, - imagenet_wnid_converter) - tf.logging.info('Converting %d training images', len(train_annotations)) - writer.write_tf_record(train_annotations, - os.path.join(FLAGS.output_dir, 'train.tfrecord')) - tf.logging.info('Converting %d validation images ', len(val_annotations)) - writer.write_tf_record(val_annotations, - os.path.join(FLAGS.output_dir, 'validation.tfrecord')) - tf.logging.info('Converting %d test images', len(test_filenames)) - writer.write_tf_record(test_filenames, - os.path.join(FLAGS.output_dir, 'test.tfrecord')) - tf.logging.info('All files are converted') - - -if __name__ == '__main__': - app.run(main) diff --git a/research/adversarial_logit_pairing/train.py b/research/adversarial_logit_pairing/train.py deleted file mode 100644 index dd20969f8..000000000 --- a/research/adversarial_logit_pairing/train.py +++ /dev/null @@ -1,288 +0,0 @@ -# Copyright 2018 Google Inc. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -"""Program which train models.""" - -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -from absl import app -from absl import flags - -import tensorflow as tf - -import adversarial_attack -import model_lib -from datasets import dataset_factory - -FLAGS = flags.FLAGS - - -flags.DEFINE_integer('max_steps', -1, 'Number of steps to stop at.') - -flags.DEFINE_string('output_dir', None, - 'Training directory where checkpoints will be saved.') - -flags.DEFINE_integer('ps_tasks', 0, 'Number of parameter servers.') - -flags.DEFINE_integer('task', 0, 'Task ID for running distributed training.') - -flags.DEFINE_string('master', '', 'Tensorflow master.') - -flags.DEFINE_string('model_name', 'resnet_v2_50', 'Name of the model.') - -flags.DEFINE_string('dataset', 'imagenet', - 'Dataset: "tiny_imagenet" or "imagenet".') - -flags.DEFINE_integer('dataset_image_size', 64, - 'Size of the images in the dataset.') - -flags.DEFINE_integer('num_summary_images', 3, - 'Number of images to display in Tensorboard.') - -flags.DEFINE_integer( - 'save_summaries_steps', 100, - 'The frequency with which summaries are saved, in steps.') - -flags.DEFINE_integer( - 'save_summaries_secs', None, - 'The frequency with which summaries are saved, in seconds.') - -flags.DEFINE_integer( - 'save_model_steps', 500, - 'The frequency with which the model is saved, in steps.') - -flags.DEFINE_string('hparams', '', 'Hyper parameters.') - -flags.DEFINE_integer('replicas_to_aggregate', 1, - 'Number of gradients to collect before param updates.') - -flags.DEFINE_integer('worker_replicas', 1, 'Number of worker replicas.') - -flags.DEFINE_float('moving_average_decay', 0.9999, - 'The decay to use for the moving average.') - -# Flags to control fine tuning - -flags.DEFINE_string('finetune_checkpoint_path', None, - 'Path to checkpoint for fine tuning. ' - 'If None then no fine tuning is done.') - -flags.DEFINE_string('finetune_exclude_pretrained_scopes', '', - 'Variable scopes to exclude when loading checkpoint for ' - 'fine tuning.') - -flags.DEFINE_string('finetune_trainable_scopes', None, - 'If set then it defines list of variable scopes for ' - 'trainable variables.') - - -def _get_finetuning_init_fn(variable_averages): - """Returns an init functions, used for fine tuning.""" - if not FLAGS.finetune_checkpoint_path: - return None - - if tf.train.latest_checkpoint(FLAGS.output_dir): - return None - - if tf.gfile.IsDirectory(FLAGS.finetune_checkpoint_path): - checkpoint_path = tf.train.latest_checkpoint(FLAGS.finetune_checkpoint_path) - else: - checkpoint_path = FLAGS.finetune_checkpoint_path - - if not checkpoint_path: - tf.logging.warning('Not doing fine tuning, can not find checkpoint in %s', - FLAGS.finetune_checkpoint_path) - return None - - tf.logging.info('Fine-tuning from %s', checkpoint_path) - - if FLAGS.finetune_exclude_pretrained_scopes: - exclusions = { - scope.strip() - for scope in FLAGS.finetune_exclude_pretrained_scopes.split(',') - } - else: - exclusions = set() - - filtered_model_variables = [ - v for v in tf.contrib.framework.get_model_variables() - if not any([v.op.name.startswith(e) for e in exclusions]) - ] - - if variable_averages: - variables_to_restore = {} - for v in filtered_model_variables: - # variables_to_restore[variable_averages.average_name(v)] = v - if v in tf.trainable_variables(): - variables_to_restore[variable_averages.average_name(v)] = v - else: - variables_to_restore[v.op.name] = v - else: - variables_to_restore = {v.op.name: v for v in filtered_model_variables} - - assign_fn = tf.contrib.framework.assign_from_checkpoint_fn( - checkpoint_path, - variables_to_restore) - if assign_fn: - return lambda _, sess: assign_fn(sess) - else: - return None - - -def main(_): - assert FLAGS.output_dir, '--output_dir has to be provided' - if not tf.gfile.Exists(FLAGS.output_dir): - tf.gfile.MakeDirs(FLAGS.output_dir) - params = model_lib.default_hparams() - params.parse(FLAGS.hparams) - tf.logging.info('User provided hparams: %s', FLAGS.hparams) - tf.logging.info('All hyper parameters: %s', params) - batch_size = params.batch_size - graph = tf.Graph() - with graph.as_default(): - with tf.device(tf.train.replica_device_setter(ps_tasks=FLAGS.ps_tasks)): - # dataset - dataset, examples_per_epoch, num_classes, bounds = ( - dataset_factory.get_dataset( - FLAGS.dataset, - 'train', - batch_size, - FLAGS.dataset_image_size, - is_training=True)) - dataset_iterator = dataset.make_one_shot_iterator() - images, labels = dataset_iterator.get_next() - one_hot_labels = tf.one_hot(labels, num_classes) - - # set up model - global_step = tf.train.get_or_create_global_step() - model_fn = model_lib.get_model(FLAGS.model_name, num_classes) - if params.train_adv_method == 'clean': - logits = model_fn(images, is_training=True) - adv_examples = None - else: - model_fn_eval_mode = lambda x: model_fn(x, is_training=False) - adv_examples = adversarial_attack.generate_adversarial_examples( - images, bounds, model_fn_eval_mode, params.train_adv_method) - all_examples = tf.concat([images, adv_examples], axis=0) - logits = model_fn(all_examples, is_training=True) - one_hot_labels = tf.concat([one_hot_labels, one_hot_labels], axis=0) - - # update trainable variables if fine tuning is used - model_lib.filter_trainable_variables( - FLAGS.finetune_trainable_scopes) - - # set up losses - total_loss = tf.losses.softmax_cross_entropy( - onehot_labels=one_hot_labels, - logits=logits, - label_smoothing=params.label_smoothing) - tf.summary.scalar('loss_xent', total_loss) - - if params.train_lp_weight > 0: - images1, images2 = tf.split(logits, 2) - loss_lp = tf.losses.mean_squared_error( - images1, images2, weights=params.train_lp_weight) - tf.summary.scalar('loss_lp', loss_lp) - total_loss += loss_lp - - if params.weight_decay > 0: - loss_wd = ( - params.weight_decay - * tf.add_n([tf.nn.l2_loss(v) for v in tf.trainable_variables()]) - ) - tf.summary.scalar('loss_wd', loss_wd) - total_loss += loss_wd - - # Setup the moving averages: - if FLAGS.moving_average_decay and (FLAGS.moving_average_decay > 0): - with tf.name_scope('moving_average'): - moving_average_variables = tf.contrib.framework.get_model_variables() - variable_averages = tf.train.ExponentialMovingAverage( - FLAGS.moving_average_decay, global_step) - else: - moving_average_variables = None - variable_averages = None - - # set up optimizer and training op - learning_rate, steps_per_epoch = model_lib.get_lr_schedule( - params, examples_per_epoch, FLAGS.replicas_to_aggregate) - - optimizer = model_lib.get_optimizer(params, learning_rate) - - optimizer = tf.train.SyncReplicasOptimizer( - opt=optimizer, - replicas_to_aggregate=FLAGS.replicas_to_aggregate, - total_num_replicas=FLAGS.worker_replicas, - variable_averages=variable_averages, - variables_to_average=moving_average_variables) - - train_op = tf.contrib.training.create_train_op( - total_loss, optimizer, - update_ops=tf.get_collection(tf.GraphKeys.UPDATE_OPS)) - - tf.summary.image('images', images[0:FLAGS.num_summary_images]) - if adv_examples is not None: - tf.summary.image('adv_images', adv_examples[0:FLAGS.num_summary_images]) - tf.summary.scalar('total_loss', total_loss) - tf.summary.scalar('learning_rate', learning_rate) - tf.summary.scalar('current_epoch', - tf.to_double(global_step) / steps_per_epoch) - - # Training - is_chief = FLAGS.task == 0 - - scaffold = tf.train.Scaffold( - init_fn=_get_finetuning_init_fn(variable_averages)) - hooks = [ - tf.train.LoggingTensorHook({'total_loss': total_loss, - 'global_step': global_step}, - every_n_iter=1), - tf.train.NanTensorHook(total_loss), - ] - chief_only_hooks = [ - tf.train.SummarySaverHook(save_steps=FLAGS.save_summaries_steps, - save_secs=FLAGS.save_summaries_secs, - output_dir=FLAGS.output_dir, - scaffold=scaffold), - tf.train.CheckpointSaverHook(FLAGS.output_dir, - save_steps=FLAGS.save_model_steps, - scaffold=scaffold), - ] - - if FLAGS.max_steps > 0: - hooks.append( - tf.train.StopAtStepHook(last_step=FLAGS.max_steps)) - - # hook for sync replica training - hooks.append(optimizer.make_session_run_hook(is_chief)) - - with tf.train.MonitoredTrainingSession( - master=FLAGS.master, - is_chief=is_chief, - checkpoint_dir=FLAGS.output_dir, - scaffold=scaffold, - hooks=hooks, - chief_only_hooks=chief_only_hooks, - save_checkpoint_secs=None, - save_summaries_steps=None, - save_summaries_secs=None) as session: - while not session.should_stop(): - session.run([train_op]) - - -if __name__ == '__main__': - app.run(main) diff --git a/research/autoencoder/AdditiveGaussianNoiseAutoencoderRunner.py b/research/autoencoder/AdditiveGaussianNoiseAutoencoderRunner.py deleted file mode 100644 index 8d8ee0865..000000000 --- a/research/autoencoder/AdditiveGaussianNoiseAutoencoderRunner.py +++ /dev/null @@ -1,58 +0,0 @@ -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import numpy as np -import sklearn.preprocessing as prep -import tensorflow as tf -from tensorflow.examples.tutorials.mnist import input_data - -from autoencoder_models.DenoisingAutoencoder import AdditiveGaussianNoiseAutoencoder - -mnist = input_data.read_data_sets('MNIST_data', one_hot=True) - - -def standard_scale(X_train, X_test): - preprocessor = prep.StandardScaler().fit(X_train) - X_train = preprocessor.transform(X_train) - X_test = preprocessor.transform(X_test) - return X_train, X_test - - -def get_random_block_from_data(data, batch_size): - start_index = np.random.randint(0, len(data) - batch_size) - return data[start_index:(start_index + batch_size)] - - -X_train, X_test = standard_scale(mnist.train.images, mnist.test.images) - -n_samples = int(mnist.train.num_examples) -training_epochs = 20 -batch_size = 128 -display_step = 1 - -autoencoder = AdditiveGaussianNoiseAutoencoder( - n_input=784, - n_hidden=200, - transfer_function=tf.nn.softplus, - optimizer=tf.train.AdamOptimizer(learning_rate = 0.001), - scale=0.01) - -for epoch in range(training_epochs): - avg_cost = 0. - total_batch = int(n_samples / batch_size) - # Loop over all batches - for i in range(total_batch): - batch_xs = get_random_block_from_data(X_train, batch_size) - - # Fit training using batch data - cost = autoencoder.partial_fit(batch_xs) - # Compute average loss - avg_cost += cost / n_samples * batch_size - - # Display logs per epoch step - if epoch % display_step == 0: - print("Epoch:", '%d,' % (epoch + 1), - "Cost:", "{:.9f}".format(avg_cost)) - -print("Total cost: " + str(autoencoder.calc_total_cost(X_test))) diff --git a/research/autoencoder/AutoencoderRunner.py b/research/autoencoder/AutoencoderRunner.py deleted file mode 100644 index 7f1ab2ecd..000000000 --- a/research/autoencoder/AutoencoderRunner.py +++ /dev/null @@ -1,55 +0,0 @@ -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import numpy as np -import sklearn.preprocessing as prep -import tensorflow as tf -from tensorflow.examples.tutorials.mnist import input_data - -from autoencoder_models.Autoencoder import Autoencoder - -mnist = input_data.read_data_sets('MNIST_data', one_hot=True) - - -def standard_scale(X_train, X_test): - preprocessor = prep.StandardScaler().fit(X_train) - X_train = preprocessor.transform(X_train) - X_test = preprocessor.transform(X_test) - return X_train, X_test - - -def get_random_block_from_data(data, batch_size): - start_index = np.random.randint(0, len(data) - batch_size) - return data[start_index:(start_index + batch_size)] - - -X_train, X_test = standard_scale(mnist.train.images, mnist.test.images) - -n_samples = int(mnist.train.num_examples) -training_epochs = 20 -batch_size = 128 -display_step = 1 - -autoencoder = Autoencoder(n_layers=[784, 200], - transfer_function = tf.nn.softplus, - optimizer = tf.train.AdamOptimizer(learning_rate = 0.001)) - -for epoch in range(training_epochs): - avg_cost = 0. - total_batch = int(n_samples / batch_size) - # Loop over all batches - for i in range(total_batch): - batch_xs = get_random_block_from_data(X_train, batch_size) - - # Fit training using batch data - cost = autoencoder.partial_fit(batch_xs) - # Compute average loss - avg_cost += cost / n_samples * batch_size - - # Display logs per epoch step - if epoch % display_step == 0: - print("Epoch:", '%d,' % (epoch + 1), - "Cost:", "{:.9f}".format(avg_cost)) - -print("Total cost: " + str(autoencoder.calc_total_cost(X_test))) diff --git a/research/autoencoder/MaskingNoiseAutoencoderRunner.py b/research/autoencoder/MaskingNoiseAutoencoderRunner.py deleted file mode 100644 index b776302e2..000000000 --- a/research/autoencoder/MaskingNoiseAutoencoderRunner.py +++ /dev/null @@ -1,55 +0,0 @@ -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import numpy as np -import sklearn.preprocessing as prep -import tensorflow as tf -from tensorflow.examples.tutorials.mnist import input_data - -from autoencoder_models.DenoisingAutoencoder import MaskingNoiseAutoencoder - -mnist = input_data.read_data_sets('MNIST_data', one_hot=True) - - -def standard_scale(X_train, X_test): - preprocessor = prep.StandardScaler().fit(X_train) - X_train = preprocessor.transform(X_train) - X_test = preprocessor.transform(X_test) - return X_train, X_test - - -def get_random_block_from_data(data, batch_size): - start_index = np.random.randint(0, len(data) - batch_size) - return data[start_index:(start_index + batch_size)] - - -X_train, X_test = standard_scale(mnist.train.images, mnist.test.images) - -n_samples = int(mnist.train.num_examples) -training_epochs = 100 -batch_size = 128 -display_step = 1 - -autoencoder = MaskingNoiseAutoencoder( - n_input=784, - n_hidden=200, - transfer_function=tf.nn.softplus, - optimizer=tf.train.AdamOptimizer(learning_rate=0.001), - dropout_probability=0.95) - -for epoch in range(training_epochs): - avg_cost = 0. - total_batch = int(n_samples / batch_size) - for i in range(total_batch): - batch_xs = get_random_block_from_data(X_train, batch_size) - - cost = autoencoder.partial_fit(batch_xs) - - avg_cost += cost / n_samples * batch_size - - if epoch % display_step == 0: - print("Epoch:", '%d,' % (epoch + 1), - "Cost:", "{:.9f}".format(avg_cost)) - -print("Total cost: " + str(autoencoder.calc_total_cost(X_test))) diff --git a/research/autoencoder/README.md b/research/autoencoder/README.md deleted file mode 100644 index cba7b3b66..000000000 --- a/research/autoencoder/README.md +++ /dev/null @@ -1,3 +0,0 @@ -![No Maintenance Intended](https://img.shields.io/badge/No%20Maintenance%20Intended-%E2%9C%95-red.svg) -![TensorFlow Requirement: 1.x](https://img.shields.io/badge/TensorFlow%20Requirement-1.x-brightgreen) -![TensorFlow 2 Not Supported](https://img.shields.io/badge/TensorFlow%202%20Not%20Supported-%E2%9C%95-red.svg) diff --git a/research/autoencoder/VariationalAutoencoderRunner.py b/research/autoencoder/VariationalAutoencoderRunner.py deleted file mode 100644 index f5ce0045f..000000000 --- a/research/autoencoder/VariationalAutoencoderRunner.py +++ /dev/null @@ -1,56 +0,0 @@ -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import numpy as np -import sklearn.preprocessing as prep -import tensorflow as tf -from tensorflow.examples.tutorials.mnist import input_data - -from autoencoder_models.VariationalAutoencoder import VariationalAutoencoder - -mnist = input_data.read_data_sets('MNIST_data', one_hot=True) - - -def min_max_scale(X_train, X_test): - preprocessor = prep.MinMaxScaler().fit(X_train) - X_train = preprocessor.transform(X_train) - X_test = preprocessor.transform(X_test) - return X_train, X_test - - -def get_random_block_from_data(data, batch_size): - start_index = np.random.randint(0, len(data) - batch_size) - return data[start_index:(start_index + batch_size)] - - -X_train, X_test = min_max_scale(mnist.train.images, mnist.test.images) - -n_samples = int(mnist.train.num_examples) -training_epochs = 20 -batch_size = 128 -display_step = 1 - -autoencoder = VariationalAutoencoder( - n_input=784, - n_hidden=200, - optimizer=tf.train.AdamOptimizer(learning_rate = 0.001)) - -for epoch in range(training_epochs): - avg_cost = 0. - total_batch = int(n_samples / batch_size) - # Loop over all batches - for i in range(total_batch): - batch_xs = get_random_block_from_data(X_train, batch_size) - - # Fit training using batch data - cost = autoencoder.partial_fit(batch_xs) - # Compute average loss - avg_cost += cost / n_samples * batch_size - - # Display logs per epoch step - if epoch % display_step == 0: - print("Epoch:", '%d,' % (epoch + 1), - "Cost:", "{:.9f}".format(avg_cost)) - -print("Total cost: " + str(autoencoder.calc_total_cost(X_test))) diff --git a/research/autoencoder/__init__.py b/research/autoencoder/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/research/autoencoder/autoencoder_models/Autoencoder.py b/research/autoencoder/autoencoder_models/Autoencoder.py deleted file mode 100644 index 788a14642..000000000 --- a/research/autoencoder/autoencoder_models/Autoencoder.py +++ /dev/null @@ -1,91 +0,0 @@ -import numpy as np -import tensorflow as tf - - -class Autoencoder(object): - - def __init__(self, n_layers, transfer_function=tf.nn.softplus, optimizer=tf.train.AdamOptimizer()): - self.n_layers = n_layers - self.transfer = transfer_function - - network_weights = self._initialize_weights() - self.weights = network_weights - - # model - self.x = tf.placeholder(tf.float32, [None, self.n_layers[0]]) - self.hidden_encode = [] - h = self.x - for layer in range(len(self.n_layers)-1): - h = self.transfer( - tf.add(tf.matmul(h, self.weights['encode'][layer]['w']), - self.weights['encode'][layer]['b'])) - self.hidden_encode.append(h) - - self.hidden_recon = [] - for layer in range(len(self.n_layers)-1): - h = self.transfer( - tf.add(tf.matmul(h, self.weights['recon'][layer]['w']), - self.weights['recon'][layer]['b'])) - self.hidden_recon.append(h) - self.reconstruction = self.hidden_recon[-1] - - # cost - self.cost = 0.5 * tf.reduce_sum(tf.pow(tf.subtract(self.reconstruction, self.x), 2.0)) - self.optimizer = optimizer.minimize(self.cost) - - init = tf.global_variables_initializer() - self.sess = tf.Session() - self.sess.run(init) - - - def _initialize_weights(self): - all_weights = dict() - initializer = tf.contrib.layers.xavier_initializer() - # Encoding network weights - encoder_weights = [] - for layer in range(len(self.n_layers)-1): - w = tf.Variable( - initializer((self.n_layers[layer], self.n_layers[layer + 1]), - dtype=tf.float32)) - b = tf.Variable( - tf.zeros([self.n_layers[layer + 1]], dtype=tf.float32)) - encoder_weights.append({'w': w, 'b': b}) - # Recon network weights - recon_weights = [] - for layer in range(len(self.n_layers)-1, 0, -1): - w = tf.Variable( - initializer((self.n_layers[layer], self.n_layers[layer - 1]), - dtype=tf.float32)) - b = tf.Variable( - tf.zeros([self.n_layers[layer - 1]], dtype=tf.float32)) - recon_weights.append({'w': w, 'b': b}) - all_weights['encode'] = encoder_weights - all_weights['recon'] = recon_weights - return all_weights - - def partial_fit(self, X): - cost, opt = self.sess.run((self.cost, self.optimizer), feed_dict={self.x: X}) - return cost - - def calc_total_cost(self, X): - return self.sess.run(self.cost, feed_dict={self.x: X}) - - def transform(self, X): - return self.sess.run(self.hidden_encode[-1], feed_dict={self.x: X}) - - def generate(self, hidden=None): - if hidden is None: - hidden = np.random.normal(size=self.weights['encode'][-1]['b']) - return self.sess.run(self.reconstruction, feed_dict={self.hidden_encode[-1]: hidden}) - - def reconstruct(self, X): - return self.sess.run(self.reconstruction, feed_dict={self.x: X}) - - def getWeights(self): - raise NotImplementedError - return self.sess.run(self.weights) - - def getBiases(self): - raise NotImplementedError - return self.sess.run(self.weights) - diff --git a/research/autoencoder/autoencoder_models/DenoisingAutoencoder.py b/research/autoencoder/autoencoder_models/DenoisingAutoencoder.py deleted file mode 100644 index 22b5dcb44..000000000 --- a/research/autoencoder/autoencoder_models/DenoisingAutoencoder.py +++ /dev/null @@ -1,129 +0,0 @@ -import tensorflow as tf - -class AdditiveGaussianNoiseAutoencoder(object): - def __init__(self, n_input, n_hidden, transfer_function = tf.nn.softplus, optimizer = tf.train.AdamOptimizer(), - scale = 0.1): - self.n_input = n_input - self.n_hidden = n_hidden - self.transfer = transfer_function - self.scale = tf.placeholder(tf.float32) - self.training_scale = scale - network_weights = self._initialize_weights() - self.weights = network_weights - - # model - self.x = tf.placeholder(tf.float32, [None, self.n_input]) - self.hidden = self.transfer(tf.add(tf.matmul(self.x + scale * tf.random_normal((n_input,)), - self.weights['w1']), - self.weights['b1'])) - self.reconstruction = tf.add(tf.matmul(self.hidden, self.weights['w2']), self.weights['b2']) - - # cost - self.cost = 0.5 * tf.reduce_sum(tf.pow(tf.subtract(self.reconstruction, self.x), 2.0)) - self.optimizer = optimizer.minimize(self.cost) - - init = tf.global_variables_initializer() - self.sess = tf.Session() - self.sess.run(init) - - def _initialize_weights(self): - all_weights = dict() - all_weights['w1'] = tf.get_variable("w1", shape=[self.n_input, self.n_hidden], - initializer=tf.contrib.layers.xavier_initializer()) - all_weights['b1'] = tf.Variable(tf.zeros([self.n_hidden], dtype = tf.float32)) - all_weights['w2'] = tf.Variable(tf.zeros([self.n_hidden, self.n_input], dtype = tf.float32)) - all_weights['b2'] = tf.Variable(tf.zeros([self.n_input], dtype = tf.float32)) - return all_weights - - def partial_fit(self, X): - cost, opt = self.sess.run((self.cost, self.optimizer), feed_dict = {self.x: X, - self.scale: self.training_scale - }) - return cost - - def calc_total_cost(self, X): - return self.sess.run(self.cost, feed_dict = {self.x: X, - self.scale: self.training_scale - }) - - def transform(self, X): - return self.sess.run(self.hidden, feed_dict = {self.x: X, - self.scale: self.training_scale - }) - - def generate(self, hidden=None): - if hidden is None: - hidden = self.sess.run(tf.random_normal([1, self.n_hidden])) - return self.sess.run(self.reconstruction, feed_dict = {self.hidden: hidden}) - - def reconstruct(self, X): - return self.sess.run(self.reconstruction, feed_dict = {self.x: X, - self.scale: self.training_scale - }) - - def getWeights(self): - return self.sess.run(self.weights['w1']) - - def getBiases(self): - return self.sess.run(self.weights['b1']) - - -class MaskingNoiseAutoencoder(object): - def __init__(self, n_input, n_hidden, transfer_function = tf.nn.softplus, optimizer = tf.train.AdamOptimizer(), - dropout_probability = 0.95): - self.n_input = n_input - self.n_hidden = n_hidden - self.transfer = transfer_function - self.dropout_probability = dropout_probability - self.keep_prob = tf.placeholder(tf.float32) - - network_weights = self._initialize_weights() - self.weights = network_weights - - # model - self.x = tf.placeholder(tf.float32, [None, self.n_input]) - self.hidden = self.transfer(tf.add(tf.matmul(tf.nn.dropout(self.x, self.keep_prob), self.weights['w1']), - self.weights['b1'])) - self.reconstruction = tf.add(tf.matmul(self.hidden, self.weights['w2']), self.weights['b2']) - - # cost - self.cost = 0.5 * tf.reduce_sum(tf.pow(tf.subtract(self.reconstruction, self.x), 2.0)) - self.optimizer = optimizer.minimize(self.cost) - - init = tf.global_variables_initializer() - self.sess = tf.Session() - self.sess.run(init) - - def _initialize_weights(self): - all_weights = dict() - all_weights['w1'] = tf.get_variable("w1", shape=[self.n_input, self.n_hidden], - initializer=tf.contrib.layers.xavier_initializer()) - all_weights['b1'] = tf.Variable(tf.zeros([self.n_hidden], dtype = tf.float32)) - all_weights['w2'] = tf.Variable(tf.zeros([self.n_hidden, self.n_input], dtype = tf.float32)) - all_weights['b2'] = tf.Variable(tf.zeros([self.n_input], dtype = tf.float32)) - return all_weights - - def partial_fit(self, X): - cost, opt = self.sess.run((self.cost, self.optimizer), - feed_dict = {self.x: X, self.keep_prob: self.dropout_probability}) - return cost - - def calc_total_cost(self, X): - return self.sess.run(self.cost, feed_dict = {self.x: X, self.keep_prob: 1.0}) - - def transform(self, X): - return self.sess.run(self.hidden, feed_dict = {self.x: X, self.keep_prob: 1.0}) - - def generate(self, hidden=None): - if hidden is None: - hidden = self.sess.run(tf.random_normal([1, self.n_hidden])) - return self.sess.run(self.reconstruction, feed_dict = {self.hidden: hidden}) - - def reconstruct(self, X): - return self.sess.run(self.reconstruction, feed_dict = {self.x: X, self.keep_prob: 1.0}) - - def getWeights(self): - return self.sess.run(self.weights['w1']) - - def getBiases(self): - return self.sess.run(self.weights['b1']) diff --git a/research/autoencoder/autoencoder_models/VariationalAutoencoder.py b/research/autoencoder/autoencoder_models/VariationalAutoencoder.py deleted file mode 100644 index 3c2556ab8..000000000 --- a/research/autoencoder/autoencoder_models/VariationalAutoencoder.py +++ /dev/null @@ -1,70 +0,0 @@ -import tensorflow as tf - -class VariationalAutoencoder(object): - - def __init__(self, n_input, n_hidden, optimizer = tf.train.AdamOptimizer()): - self.n_input = n_input - self.n_hidden = n_hidden - - network_weights = self._initialize_weights() - self.weights = network_weights - - # model - self.x = tf.placeholder(tf.float32, [None, self.n_input]) - self.z_mean = tf.add(tf.matmul(self.x, self.weights['w1']), self.weights['b1']) - self.z_log_sigma_sq = tf.add(tf.matmul(self.x, self.weights['log_sigma_w1']), self.weights['log_sigma_b1']) - - # sample from gaussian distribution - eps = tf.random_normal(tf.stack([tf.shape(self.x)[0], self.n_hidden]), 0, 1, dtype = tf.float32) - self.z = tf.add(self.z_mean, tf.multiply(tf.sqrt(tf.exp(self.z_log_sigma_sq)), eps)) - - self.reconstruction = tf.add(tf.matmul(self.z, self.weights['w2']), self.weights['b2']) - - # cost - reconstr_loss = 0.5 * tf.reduce_sum(tf.pow(tf.subtract(self.reconstruction, self.x), 2.0), 1) - latent_loss = -0.5 * tf.reduce_sum(1 + self.z_log_sigma_sq - - tf.square(self.z_mean) - - tf.exp(self.z_log_sigma_sq), 1) - self.cost = tf.reduce_mean(reconstr_loss + latent_loss) - self.optimizer = optimizer.minimize(self.cost) - - init = tf.global_variables_initializer() - self.sess = tf.Session() - self.sess.run(init) - - def _initialize_weights(self): - all_weights = dict() - all_weights['w1'] = tf.get_variable("w1", shape=[self.n_input, self.n_hidden], - initializer=tf.contrib.layers.xavier_initializer()) - all_weights['log_sigma_w1'] = tf.get_variable("log_sigma_w1", shape=[self.n_input, self.n_hidden], - initializer=tf.contrib.layers.xavier_initializer()) - all_weights['b1'] = tf.Variable(tf.zeros([self.n_hidden], dtype=tf.float32)) - all_weights['log_sigma_b1'] = tf.Variable(tf.zeros([self.n_hidden], dtype=tf.float32)) - all_weights['w2'] = tf.Variable(tf.zeros([self.n_hidden, self.n_input], dtype=tf.float32)) - all_weights['b2'] = tf.Variable(tf.zeros([self.n_input], dtype=tf.float32)) - return all_weights - - def partial_fit(self, X): - cost, opt = self.sess.run((self.cost, self.optimizer), feed_dict={self.x: X}) - return cost - - def calc_total_cost(self, X): - return self.sess.run(self.cost, feed_dict = {self.x: X}) - - def transform(self, X): - return self.sess.run(self.z_mean, feed_dict={self.x: X}) - - def generate(self, hidden = None): - if hidden is None: - hidden = self.sess.run(tf.random_normal([1, self.n_hidden])) - return self.sess.run(self.reconstruction, feed_dict={self.z: hidden}) - - def reconstruct(self, X): - return self.sess.run(self.reconstruction, feed_dict={self.x: X}) - - def getWeights(self): - return self.sess.run(self.weights['w1']) - - def getBiases(self): - return self.sess.run(self.weights['b1']) - diff --git a/research/autoencoder/autoencoder_models/__init__.py b/research/autoencoder/autoencoder_models/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/research/brain_coder/README.md b/research/brain_coder/README.md deleted file mode 100644 index 3e2a1656d..000000000 --- a/research/brain_coder/README.md +++ /dev/null @@ -1,34 +0,0 @@ -![No Maintenance Intended](https://img.shields.io/badge/No%20Maintenance%20Intended-%E2%9C%95-red.svg) -![TensorFlow Requirement: 1.x](https://img.shields.io/badge/TensorFlow%20Requirement-1.x-brightgreen) -![TensorFlow 2 Not Supported](https://img.shields.io/badge/TensorFlow%202%20Not%20Supported-%E2%9C%95-red.svg) - -# Brain Coder - -*Authors: Daniel Abolafia, Mohammad Norouzi, Quoc Le* - -Brain coder is a code synthesis experimental environment. We provide code that reproduces the results from our recent paper [Neural Program Synthesis with Priority Queue Training](https://arxiv.org/abs/1801.03526). See single_task/README.md for details on how to build and reproduce those experiments. - -## Installation - -First install dependencies seperately: - -* [bazel](https://docs.bazel.build/versions/master/install.html) -* [TensorFlow](https://www.tensorflow.org/install/) -* [scipy](https://www.scipy.org/install.html) -* [absl-py](https://github.com/abseil/abseil-py) - -Note: even if you already have these dependencies installed, make sure they are -up-to-date to avoid unnecessary debugging. - - -## Building - -Use bazel from the top-level repo directory. - -For example: - -```bash -bazel build single_task:run -``` - -View README.md files in subdirectories for more details. diff --git a/research/brain_coder/WORKSPACE b/research/brain_coder/WORKSPACE deleted file mode 100644 index 7c07b5325..000000000 --- a/research/brain_coder/WORKSPACE +++ /dev/null @@ -1,5 +0,0 @@ -git_repository( - name = "subpar", - remote = "https://github.com/google/subpar", - tag = "1.0.0", -) diff --git a/research/brain_coder/common/BUILD b/research/brain_coder/common/BUILD deleted file mode 100644 index b5f79c250..000000000 --- a/research/brain_coder/common/BUILD +++ /dev/null @@ -1,106 +0,0 @@ -licenses(["notice"]) - -package(default_visibility = [ - "//:__subpackages__", -]) - -py_library( - name = "bf", - srcs = ["bf.py"], -) - -py_test( - name = "bf_test", - srcs = ["bf_test.py"], - deps = [ - ":bf", - # tensorflow dep - ], -) - -py_library( - name = "config_lib", - srcs = ["config_lib.py"], -) - -py_test( - name = "config_lib_test", - srcs = ["config_lib_test.py"], - deps = [ - ":config_lib", - # tensorflow dep - ], -) - -py_library( - name = "reward", - srcs = ["reward.py"], -) - -py_test( - name = "reward_test", - srcs = ["reward_test.py"], - deps = [ - ":reward", - # numpy dep - # tensorflow dep - ], -) - -py_library( - name = "rollout", - srcs = ["rollout.py"], - deps = [ - ":utils", - # numpy dep - # scipy dep - ], -) - -py_test( - name = "rollout_test", - srcs = ["rollout_test.py"], - deps = [ - ":rollout", - # numpy dep - # tensorflow dep - ], -) - -py_library( - name = "schedules", - srcs = ["schedules.py"], - deps = [":config_lib"], -) - -py_test( - name = "schedules_test", - srcs = ["schedules_test.py"], - deps = [ - ":config_lib", - ":schedules", - # numpy dep - # tensorflow dep - ], -) - -py_library( - name = "utils", - srcs = ["utils.py"], - deps = [ - # file dep - # absl dep /logging - # numpy dep - # tensorflow dep - ], -) - -py_test( - name = "utils_test", - srcs = ["utils_test.py"], - deps = [ - ":utils", - # numpy dep - # tensorflow dep - ], -) diff --git a/research/brain_coder/common/bf.py b/research/brain_coder/common/bf.py deleted file mode 100644 index f049c4525..000000000 --- a/research/brain_coder/common/bf.py +++ /dev/null @@ -1,234 +0,0 @@ -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -"""BrainF**k interpreter. - -Language info: https://en.wikipedia.org/wiki/Brainfuck - -Based on public implementation: -https://github.com/pocmo/Python-Brainfuck/blob/master/brainfuck.py -""" - -from collections import namedtuple -import time - - -EvalResult = namedtuple( - 'EvalResult', ['output', 'success', 'failure_reason', 'steps', 'time', - 'memory', 'program_trace']) - - -ExecutionSnapshot = namedtuple( - 'ExecutionSnapshot', - ['codeptr', 'codechar', 'memptr', 'memval', 'memory', 'next_input', - 'output_buffer']) - - -class Status(object): - SUCCESS = 'success' - TIMEOUT = 'timeout' - STEP_LIMIT = 'step-limit' - SYNTAX_ERROR = 'syntax-error' - - -CHARS = INT_TO_CHAR = ['>', '<', '+', '-', '[', ']', '.', ','] -CHAR_TO_INT = dict([(c, i) for i, c in enumerate(INT_TO_CHAR)]) - - -class LookAheadIterator(object): - """Same API as Python iterator, with additional peek method.""" - - def __init__(self, iterable): - self._it = iter(iterable) - self._current_element = None - self._done = False - self._preload_next() - - def _preload_next(self): - try: - self._current_element = self._it.next() - except StopIteration: - self._done = True - - def next(self): - if self._done: - raise StopIteration - element = self._current_element - self._preload_next() - return element - - def peek(self, default_value=None): - if self._done: - if default_value is None: - raise StopIteration - return default_value - return self._current_element - - -def buildbracemap(code): - """Build jump map. - - Args: - code: List or string or BF chars. - - Returns: - bracemap: dict mapping open and close brace positions in the code to their - destination jumps. Specifically, positions of matching open/close braces - if they exist. - correct_syntax: True if all braces match. False if there are unmatched - braces in the code. Even if there are unmatched braces, a bracemap will - be built, and unmatched braces will map to themselves. - """ - bracestack, bracemap = [], {} - - correct_syntax = True - for position, command in enumerate(code): - if command == '[': - bracestack.append(position) - if command == ']': - if not bracestack: # Unmatched closing brace. - bracemap[position] = position # Don't jump to any position. - correct_syntax = False - continue - start = bracestack.pop() - bracemap[start] = position - bracemap[position] = start - if bracestack: # Unmatched opening braces. - for pos in bracestack: - bracemap[pos] = pos # Don't jump to any position. - correct_syntax = False - return bracemap, correct_syntax - - -def evaluate(code, input_buffer=None, init_memory=None, base=256, timeout=1.0, - max_steps=None, require_correct_syntax=True, output_memory=False, - debug=False): - """Execute BF code. - - Args: - code: String or list of BF characters. Any character not in CHARS will be - ignored. - input_buffer: A list of ints which will be used as the program's input - stream. Each read op "," will read an int from this list. 0's will be - read once the end of the list is reached, or if no input buffer is - given. - init_memory: A list of ints. Memory for first k positions will be - initialized to this list (where k = len(init_memory)). Memory positions - are initialized to 0 by default. - base: Integer base for the memory. When a memory value is incremented to - `base` it will overflow to 0. When a memory value is decremented to -1 - it will underflow to `base` - 1. - timeout: Time limit for program execution in seconds. Set to None to - disable. - max_steps: Execution step limit. An execution step is the execution of one - operation (code character), even if that op has been executed before. - Execution exits when this many steps are reached. Set to None to - disable. Disabled by default. - require_correct_syntax: If True, unmatched braces will cause `evaluate` to - return without executing the code. The failure reason will be - `Status.SYNTAX_ERROR`. If False, unmatched braces are ignored - and execution will continue. - output_memory: If True, the state of the memory at the end of execution is - returned. - debug: If True, then a full program trace will be returned. - - Returns: - EvalResult namedtuple containing - output: List of ints which were written out by the program with the "." - operation. - success: Boolean. Whether execution completed successfully. - failure_reason: One of the attributes of `Status`. Gives extra info - about why execution was not successful. - steps: Number of execution steps the program ran for. - time: Amount of time in seconds the program ran for. - memory: If `output_memory` is True, a list of memory cells up to the last - one written to. otherwise, None. - """ - input_iter = ( - LookAheadIterator(input_buffer) if input_buffer is not None - else LookAheadIterator([])) - - # Null memory value. This is the value of an empty memory. Also the value - # returned by the read operation when the input buffer is empty, or the - # end of the buffer is reached. - null_value = 0 - - code = list(code) - bracemap, correct_syntax = buildbracemap(code) # will modify code list - if require_correct_syntax and not correct_syntax: - return EvalResult([], False, Status.SYNTAX_ERROR, 0, 0.0, - [] if output_memory else None, [] if debug else None) - - output_buffer = [] - - codeptr, cellptr = 0, 0 - - cells = list(init_memory) if init_memory else [0] - - program_trace = [] if debug else None - success = True - reason = Status.SUCCESS - start_time = time.time() - steps = 0 - while codeptr < len(code): - command = code[codeptr] - - if debug: - # Add step to program trace. - program_trace.append(ExecutionSnapshot( - codeptr=codeptr, codechar=command, memptr=cellptr, - memval=cells[cellptr], memory=list(cells), - next_input=input_iter.peek(null_value), - output_buffer=list(output_buffer))) - - if command == '>': - cellptr += 1 - if cellptr == len(cells): cells.append(null_value) - - if command == '<': - cellptr = 0 if cellptr <= 0 else cellptr - 1 - - if command == '+': - cells[cellptr] = cells[cellptr] + 1 if cells[cellptr] < (base - 1) else 0 - - if command == '-': - cells[cellptr] = cells[cellptr] - 1 if cells[cellptr] > 0 else (base - 1) - - if command == '[' and cells[cellptr] == 0: codeptr = bracemap[codeptr] - if command == ']' and cells[cellptr] != 0: codeptr = bracemap[codeptr] - - if command == '.': output_buffer.append(cells[cellptr]) - if command == ',': cells[cellptr] = next(input_iter, null_value) - - codeptr += 1 - steps += 1 - - if timeout is not None and time.time() - start_time > timeout: - success = False - reason = Status.TIMEOUT - break - if max_steps is not None and steps >= max_steps: - success = False - reason = Status.STEP_LIMIT - break - - if debug: - # Add step to program trace. - command = code[codeptr] if codeptr < len(code) else '' - program_trace.append(ExecutionSnapshot( - codeptr=codeptr, codechar=command, memptr=cellptr, - memval=cells[cellptr], memory=list(cells), - next_input=input_iter.peek(null_value), - output_buffer=list(output_buffer))) - - return EvalResult( - output=output_buffer, - success=success, - failure_reason=reason, - steps=steps, - time=time.time() - start_time, - memory=cells if output_memory else None, - program_trace=program_trace) - - diff --git a/research/brain_coder/common/bf_test.py b/research/brain_coder/common/bf_test.py deleted file mode 100644 index 2cbf50560..000000000 --- a/research/brain_coder/common/bf_test.py +++ /dev/null @@ -1,137 +0,0 @@ -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -"""Tests for common.bf.""" - -import tensorflow as tf - -from common import bf # brain coder - - -class BfTest(tf.test.TestCase): - - def assertCorrectOutput(self, target_output, eval_result): - self.assertEqual(target_output, eval_result.output) - self.assertTrue(eval_result.success) - self.assertEqual(bf.Status.SUCCESS, eval_result.failure_reason) - - def testBasicOps(self): - self.assertCorrectOutput( - [3, 1, 2], - bf.evaluate('+++.--.+.')) - self.assertCorrectOutput( - [1, 1, 2], - bf.evaluate('+.<.>++.')) - self.assertCorrectOutput( - [0], - bf.evaluate('+,.')) - self.assertCorrectOutput( - [ord(char) for char in 'Hello World!\n'], - bf.evaluate( - '>++++++++[-<+++++++++>]<.>>+>-[+]++>++>+++[>[->+++<<+++>]<<]>-----' - '.>->+++..+++.>-.<<+[>[+>+]>>]<--------------.>>.+++.------.-------' - '-.>+.>+.')) - - def testBase(self): - self.assertCorrectOutput( - [1, 4], - bf.evaluate('+.--.', base=5, input_buffer=[])) - - def testInputBuffer(self): - self.assertCorrectOutput( - [2, 3, 4], - bf.evaluate('>,[>,]<[.<]', input_buffer=[4, 3, 2])) - - def testBadChars(self): - self.assertCorrectOutput( - [2, 3, 4], - bf.evaluate('>,[>,]hello----.[[[[[>+.', - input_buffer=[], - base=10, - require_correct_syntax=False)) - - eval_result = bf.evaluate( - '+++.]]]]>----.[[[[[>+.', - input_buffer=[], - base=10, - require_correct_syntax=True) - self.assertEqual([], eval_result.output) - self.assertFalse(eval_result.success) - self.assertEqual(bf.Status.SYNTAX_ERROR, - eval_result.failure_reason) - - def testTimeout(self): - er = bf.evaluate('+.[].', base=5, input_buffer=[], timeout=0.1) - self.assertEqual( - ([1], False, bf.Status.TIMEOUT), - (er.output, er.success, er.failure_reason)) - self.assertTrue(0.07 < er.time < 0.21) - - er = bf.evaluate('+.[-].', base=5, input_buffer=[], timeout=0.1) - self.assertEqual( - ([1, 0], True, bf.Status.SUCCESS), - (er.output, er.success, er.failure_reason)) - self.assertTrue(er.time < 0.15) - - def testMaxSteps(self): - er = bf.evaluate('+.[].', base=5, input_buffer=[], timeout=None, - max_steps=100) - self.assertEqual( - ([1], False, bf.Status.STEP_LIMIT, 100), - (er.output, er.success, er.failure_reason, er.steps)) - - er = bf.evaluate('+.[-].', base=5, input_buffer=[], timeout=None, - max_steps=100) - self.assertEqual( - ([1, 0], True, bf.Status.SUCCESS), - (er.output, er.success, er.failure_reason)) - self.assertTrue(er.steps < 100) - - def testOutputMemory(self): - er = bf.evaluate('+>++>+++>++++.', base=256, input_buffer=[], - output_memory=True) - self.assertEqual( - ([4], True, bf.Status.SUCCESS), - (er.output, er.success, er.failure_reason)) - self.assertEqual([1, 2, 3, 4], er.memory) - - def testProgramTrace(self): - es = bf.ExecutionSnapshot - er = bf.evaluate(',[.>,].', base=256, input_buffer=[2, 1], debug=True) - self.assertEqual( - [es(codeptr=0, codechar=',', memptr=0, memval=0, memory=[0], - next_input=2, output_buffer=[]), - es(codeptr=1, codechar='[', memptr=0, memval=2, memory=[2], - next_input=1, output_buffer=[]), - es(codeptr=2, codechar='.', memptr=0, memval=2, memory=[2], - next_input=1, output_buffer=[]), - es(codeptr=3, codechar='>', memptr=0, memval=2, memory=[2], - next_input=1, output_buffer=[2]), - es(codeptr=4, codechar=',', memptr=1, memval=0, memory=[2, 0], - next_input=1, output_buffer=[2]), - es(codeptr=5, codechar=']', memptr=1, memval=1, memory=[2, 1], - next_input=0, output_buffer=[2]), - es(codeptr=2, codechar='.', memptr=1, memval=1, memory=[2, 1], - next_input=0, output_buffer=[2]), - es(codeptr=3, codechar='>', memptr=1, memval=1, memory=[2, 1], - next_input=0, output_buffer=[2, 1]), - es(codeptr=4, codechar=',', memptr=2, memval=0, memory=[2, 1, 0], - next_input=0, output_buffer=[2, 1]), - es(codeptr=5, codechar=']', memptr=2, memval=0, memory=[2, 1, 0], - next_input=0, output_buffer=[2, 1]), - es(codeptr=6, codechar='.', memptr=2, memval=0, memory=[2, 1, 0], - next_input=0, output_buffer=[2, 1]), - es(codeptr=7, codechar='', memptr=2, memval=0, memory=[2, 1, 0], - next_input=0, output_buffer=[2, 1, 0])], - er.program_trace) - - -if __name__ == '__main__': - tf.test.main() diff --git a/research/brain_coder/common/config_lib.py b/research/brain_coder/common/config_lib.py deleted file mode 100644 index 733fa202f..000000000 --- a/research/brain_coder/common/config_lib.py +++ /dev/null @@ -1,337 +0,0 @@ -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -"""Objects for storing configuration and passing config into binaries. - -Config class stores settings and hyperparameters for models, data, and anything -else that may be specific to a particular run. -""" - -import ast -import itertools -from six.moves import xrange - - -class Config(dict): - """Stores model configuration, hyperparameters, or dataset parameters.""" - - def __getattr__(self, attr): - return self[attr] - - def __setattr__(self, attr, value): - self[attr] = value - - def pretty_str(self, new_lines=True, indent=2, final_indent=0): - prefix = (' ' * indent) if new_lines else '' - final_prefix = (' ' * final_indent) if new_lines else '' - kv = ['%s%s=%s' % (prefix, k, - (repr(v) if not isinstance(v, Config) - else v.pretty_str(new_lines=new_lines, - indent=indent+2, - final_indent=indent))) - for k, v in self.items()] - if new_lines: - return 'Config(\n%s\n%s)' % (',\n'.join(kv), final_prefix) - else: - return 'Config(%s)' % ', '.join(kv) - - def _update_iterator(self, *args, **kwargs): - """Convert mixed input into an iterator over (key, value) tuples. - - Follows the dict.update call signature. - - Args: - *args: (Optional) Pass a dict or iterable of (key, value) 2-tuples as - an unnamed argument. Only one unnamed argument allowed. - **kwargs: (Optional) Pass (key, value) pairs as named arguments, where the - argument name is the key and the argument value is the value. - - Returns: - An iterator over (key, value) tuples given in the input. - - Raises: - TypeError: If more than one unnamed argument is given. - """ - if len(args) > 1: - raise TypeError('Expected at most 1 unnamed arguments, got %d' - % len(args)) - obj = args[0] if args else dict() - if isinstance(obj, dict): - return itertools.chain(obj.items(), kwargs.items()) - # Assume obj is an iterable of 2-tuples. - return itertools.chain(obj, kwargs.items()) - - def make_default(self, keys=None): - """Convert OneOf objects into their default configs. - - Recursively calls into Config objects. - - Args: - keys: Iterable of key names to check. If None, all keys in self will be - used. - """ - if keys is None: - keys = self.keys() - for k in keys: - # Replace OneOf with its default value. - if isinstance(self[k], OneOf): - self[k] = self[k].default() - # Recursively call into all Config objects, even those that came from - # OneOf objects in the previous code line (for nested OneOf objects). - if isinstance(self[k], Config): - self[k].make_default() - - def update(self, *args, **kwargs): - """Same as dict.update except nested Config objects are updated. - - Args: - *args: (Optional) Pass a dict or list of (key, value) 2-tuples as unnamed - argument. - **kwargs: (Optional) Pass (key, value) pairs as named arguments, where the - argument name is the key and the argument value is the value. - """ - key_set = set(self.keys()) - for k, v in self._update_iterator(*args, **kwargs): - if k in key_set: - key_set.remove(k) # This key is updated so exclude from make_default. - if k in self and isinstance(self[k], Config) and isinstance(v, dict): - self[k].update(v) - elif k in self and isinstance(self[k], OneOf) and isinstance(v, dict): - # Replace OneOf with the chosen config. - self[k] = self[k].update(v) - else: - self[k] = v - self.make_default(key_set) - - def strict_update(self, *args, **kwargs): - """Same as Config.update except keys and types are not allowed to change. - - If a given key is not already in this instance, an exception is raised. If a - given value does not have the same type as the existing value for the same - key, an exception is raised. Use this method to catch config mistakes. - - Args: - *args: (Optional) Pass a dict or list of (key, value) 2-tuples as unnamed - argument. - **kwargs: (Optional) Pass (key, value) pairs as named arguments, where the - argument name is the key and the argument value is the value. - - Raises: - TypeError: If more than one unnamed argument is given. - TypeError: If new value type does not match existing type. - KeyError: If a given key is not already defined in this instance. - """ - key_set = set(self.keys()) - for k, v in self._update_iterator(*args, **kwargs): - if k in self: - key_set.remove(k) # This key is updated so exclude from make_default. - if isinstance(self[k], Config): - if not isinstance(v, dict): - raise TypeError('dict required for Config value, got %s' % type(v)) - self[k].strict_update(v) - elif isinstance(self[k], OneOf): - if not isinstance(v, dict): - raise TypeError('dict required for OneOf value, got %s' % type(v)) - # Replace OneOf with the chosen config. - self[k] = self[k].strict_update(v) - else: - if not isinstance(v, type(self[k])): - raise TypeError('Expecting type %s for key %s, got type %s' - % (type(self[k]), k, type(v))) - self[k] = v - else: - raise KeyError( - 'Key %s does not exist. New key creation not allowed in ' - 'strict_update.' % k) - self.make_default(key_set) - - @staticmethod - def from_str(config_str): - """Inverse of Config.__str__.""" - parsed = ast.literal_eval(config_str) - assert isinstance(parsed, dict) - - def _make_config(dictionary): - for k, v in dictionary.items(): - if isinstance(v, dict): - dictionary[k] = _make_config(v) - return Config(**dictionary) - return _make_config(parsed) - - @staticmethod - def parse(key_val_string): - """Parse hyperparameter string into Config object. - - Format is 'key=val,key=val,...' - Values can be any python literal, or another Config object encoded as - 'c(key=val,key=val,...)'. - c(...) expressions can be arbitrarily nested. - - Example: - 'a=1,b=3e-5,c=[1,2,3],d="hello world",e={"a":1,"b":2},f=c(x=1,y=[10,20])' - - Args: - key_val_string: The hyperparameter string. - - Returns: - Config object parsed from the input string. - """ - if not key_val_string.strip(): - return Config() - def _pair_to_kv(pair): - split_index = pair.find('=') - key, val = pair[:split_index].strip(), pair[split_index+1:].strip() - if val.startswith('c(') and val.endswith(')'): - val = Config.parse(val[2:-1]) - else: - val = ast.literal_eval(val) - return key, val - return Config(**dict([_pair_to_kv(pair) - for pair in _comma_iterator(key_val_string)])) - - -class OneOf(object): - """Stores branching config. - - In some cases there may be options which each have their own set of config - params. For example, if specifying config for an environment, each environment - can have custom config options. OneOf is a way to organize branching config. - - Usage example: - one_of = OneOf( - [Config(a=1, b=2), - Config(a=2, c='hello'), - Config(a=3, d=10, e=-10)], - a=1) - config = one_of.strict_update(Config(a=3, d=20)) - config == {'a': 3, 'd': 20, 'e': -10} - """ - - def __init__(self, choices, **kwargs): - """Constructor. - - Usage: OneOf([Config(...), Config(...), ...], attribute=default_value) - - Args: - choices: An iterable of Config objects. When update/strict_update is - called on this OneOf, one of these Config will be selected. - **kwargs: Give exactly one config attribute to branch on. The value of - this attribute during update/strict_update will determine which - Config is used. - - Raises: - ValueError: If kwargs does not contain exactly one entry. Should give one - named argument which is used as the attribute to condition on. - """ - if len(kwargs) != 1: - raise ValueError( - 'Incorrect usage. Must give exactly one named argument. The argument ' - 'name is the config attribute to condition on, and the argument ' - 'value is the default choice. Got %d named arguments.' % len(kwargs)) - key, default_value = kwargs.items()[0] - self.key = key - self.default_value = default_value - - # Make sure each choice is a Config object. - for config in choices: - if not isinstance(config, Config): - raise TypeError('choices must be a list of Config objects. Got %s.' - % type(config)) - - # Map value for key to the config with that value. - self.value_map = {config[key]: config for config in choices} - self.default_config = self.value_map[self.default_value] - - # Make sure there are no duplicate values. - if len(self.value_map) != len(choices): - raise ValueError('Multiple choices given for the same value of %s.' % key) - - # Check that the default value is valid. - if self.default_value not in self.value_map: - raise ValueError( - 'Default value is not an available choice. Got %s=%s. Choices are %s.' - % (key, self.default_value, self.value_map.keys())) - - def default(self): - return self.default_config - - def update(self, other): - """Choose a config and update it. - - If `other` is a Config, one of the config choices is selected and updated. - Otherwise `other` is returned. - - Args: - other: Will update chosen config with this value by calling `update` on - the config. - - Returns: - The chosen config after updating it, or `other` if no config could be - selected. - """ - if not isinstance(other, Config): - return other - if self.key not in other or other[self.key] not in self.value_map: - return other - target = self.value_map[other[self.key]] - target.update(other) - return target - - def strict_update(self, config): - """Choose a config and update it. - - `config` must be a Config object. `config` must have the key used to select - among the config choices, and that key must have a value which one of the - config choices has. - - Args: - config: A Config object. the chosen config will be update by calling - `strict_update`. - - Returns: - The chosen config after updating it. - - Raises: - TypeError: If `config` is not a Config instance. - ValueError: If `config` does not have the branching key in its key set. - ValueError: If the value of the config's branching key is not one of the - valid choices. - """ - if not isinstance(config, Config): - raise TypeError('Expecting Config instance, got %s.' % type(config)) - if self.key not in config: - raise ValueError( - 'Branching key %s required but not found in %s' % (self.key, config)) - if config[self.key] not in self.value_map: - raise ValueError( - 'Value %s for key %s is not a possible choice. Choices are %s.' - % (config[self.key], self.key, self.value_map.keys())) - target = self.value_map[config[self.key]] - target.strict_update(config) - return target - - -def _next_comma(string, start_index): - """Finds the position of the next comma not used in a literal collection.""" - paren_count = 0 - for i in xrange(start_index, len(string)): - c = string[i] - if c == '(' or c == '[' or c == '{': - paren_count += 1 - elif c == ')' or c == ']' or c == '}': - paren_count -= 1 - if paren_count == 0 and c == ',': - return i - return -1 - - -def _comma_iterator(string): - index = 0 - while 1: - next_index = _next_comma(string, index) - if next_index == -1: - yield string[index:] - return - yield string[index:next_index] - index = next_index + 1 diff --git a/research/brain_coder/common/config_lib_test.py b/research/brain_coder/common/config_lib_test.py deleted file mode 100644 index cdc96f92d..000000000 --- a/research/brain_coder/common/config_lib_test.py +++ /dev/null @@ -1,425 +0,0 @@ -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -"""Tests for common.config_lib.""" - -import tensorflow as tf - -from common import config_lib # brain coder - - -class ConfigLibTest(tf.test.TestCase): - - def testConfig(self): - config = config_lib.Config(hello='world', foo='bar', num=123, f=56.7) - self.assertEqual('world', config.hello) - self.assertEqual('bar', config['foo']) - config.hello = 'everyone' - config['bar'] = 9000 - self.assertEqual('everyone', config['hello']) - self.assertEqual(9000, config.bar) - self.assertEqual(5, len(config)) - - def testConfigUpdate(self): - config = config_lib.Config(a=1, b=2, c=3) - config.update({'b': 10, 'd': 4}) - self.assertEqual({'a': 1, 'b': 10, 'c': 3, 'd': 4}, config) - - config = config_lib.Config(a=1, b=2, c=3) - config.update(b=10, d=4) - self.assertEqual({'a': 1, 'b': 10, 'c': 3, 'd': 4}, config) - - config = config_lib.Config(a=1, b=2, c=3) - config.update({'e': 5}, b=10, d=4) - self.assertEqual({'a': 1, 'b': 10, 'c': 3, 'd': 4, 'e': 5}, config) - - config = config_lib.Config( - a=1, - b=2, - x=config_lib.Config( - l='a', - y=config_lib.Config(m=1, n=2), - z=config_lib.Config( - q=config_lib.Config(a=10, b=20), - r=config_lib.Config(s=1, t=2)))) - config.update(x={'y': {'m': 10}, 'z': {'r': {'s': 5}}}) - self.assertEqual( - config_lib.Config( - a=1, b=2, - x=config_lib.Config( - l='a', - y=config_lib.Config(m=10, n=2), - z=config_lib.Config( - q=config_lib.Config(a=10, b=20), - r=config_lib.Config(s=5, t=2)))), - config) - - config = config_lib.Config( - foo='bar', - num=100, - x=config_lib.Config(a=1, b=2, c=config_lib.Config(h=10, i=20, j=30)), - y=config_lib.Config(qrs=5, tuv=10), - d={'a': 1, 'b': 2}, - l=[1, 2, 3]) - config.update( - config_lib.Config( - foo='hat', - num=50.5, - x={'a': 5, 'z': -10}, - y=config_lib.Config(wxyz=-1)), - d={'a': 10, 'c': 20}, - l=[3, 4, 5, 6]) - self.assertEqual( - config_lib.Config( - foo='hat', - num=50.5, - x=config_lib.Config(a=5, b=2, z=-10, - c=config_lib.Config(h=10, i=20, j=30)), - y=config_lib.Config(qrs=5, tuv=10, wxyz=-1), - d={'a': 10, 'c': 20}, - l=[3, 4, 5, 6]), - config) - self.assertTrue(isinstance(config.x, config_lib.Config)) - self.assertTrue(isinstance(config.x.c, config_lib.Config)) - self.assertTrue(isinstance(config.y, config_lib.Config)) - - config = config_lib.Config( - foo='bar', - num=100, - x=config_lib.Config(a=1, b=2, c=config_lib.Config(h=10, i=20, j=30)), - y=config_lib.Config(qrs=5, tuv=10), - d={'a': 1, 'b': 2}, - l=[1, 2, 3]) - config.update( - config_lib.Config( - foo=1234, - num='hello', - x={'a': 5, 'z': -10, 'c': {'h': -5, 'k': 40}}, - y=[1, 2, 3, 4], - d='stuff', - l={'a': 1, 'b': 2})) - self.assertEqual( - config_lib.Config( - foo=1234, - num='hello', - x=config_lib.Config(a=5, b=2, z=-10, - c=config_lib.Config(h=-5, i=20, j=30, k=40)), - y=[1, 2, 3, 4], - d='stuff', - l={'a': 1, 'b': 2}), - config) - self.assertTrue(isinstance(config.x, config_lib.Config)) - self.assertTrue(isinstance(config.x.c, config_lib.Config)) - self.assertTrue(isinstance(config.y, list)) - - def testConfigStrictUpdate(self): - config = config_lib.Config(a=1, b=2, c=3) - config.strict_update({'b': 10, 'c': 20}) - self.assertEqual({'a': 1, 'b': 10, 'c': 20}, config) - - config = config_lib.Config(a=1, b=2, c=3) - config.strict_update(b=10, c=20) - self.assertEqual({'a': 1, 'b': 10, 'c': 20}, config) - - config = config_lib.Config(a=1, b=2, c=3, d=4) - config.strict_update({'d': 100}, b=10, a=20) - self.assertEqual({'a': 20, 'b': 10, 'c': 3, 'd': 100}, config) - - config = config_lib.Config( - a=1, - b=2, - x=config_lib.Config( - l='a', - y=config_lib.Config(m=1, n=2), - z=config_lib.Config( - q=config_lib.Config(a=10, b=20), - r=config_lib.Config(s=1, t=2)))) - config.strict_update(x={'y': {'m': 10}, 'z': {'r': {'s': 5}}}) - self.assertEqual( - config_lib.Config( - a=1, b=2, - x=config_lib.Config( - l='a', - y=config_lib.Config(m=10, n=2), - z=config_lib.Config( - q=config_lib.Config(a=10, b=20), - r=config_lib.Config(s=5, t=2)))), - config) - - config = config_lib.Config( - foo='bar', - num=100, - x=config_lib.Config(a=1, b=2, c=config_lib.Config(h=10, i=20, j=30)), - y=config_lib.Config(qrs=5, tuv=10), - d={'a': 1, 'b': 2}, - l=[1, 2, 3]) - config.strict_update( - config_lib.Config( - foo='hat', - num=50, - x={'a': 5, 'c': {'h': 100}}, - y=config_lib.Config(tuv=-1)), - d={'a': 10, 'c': 20}, - l=[3, 4, 5, 6]) - self.assertEqual( - config_lib.Config( - foo='hat', - num=50, - x=config_lib.Config(a=5, b=2, - c=config_lib.Config(h=100, i=20, j=30)), - y=config_lib.Config(qrs=5, tuv=-1), - d={'a': 10, 'c': 20}, - l=[3, 4, 5, 6]), - config) - - def testConfigStrictUpdateFail(self): - config = config_lib.Config(a=1, b=2, c=3, x=config_lib.Config(a=1, b=2)) - with self.assertRaises(KeyError): - config.strict_update({'b': 10, 'c': 20, 'd': 50}) - with self.assertRaises(KeyError): - config.strict_update(b=10, d=50) - with self.assertRaises(KeyError): - config.strict_update(x={'c': 3}) - with self.assertRaises(TypeError): - config.strict_update(a='string') - with self.assertRaises(TypeError): - config.strict_update(x={'a': 'string'}) - with self.assertRaises(TypeError): - config.strict_update(x=[1, 2, 3]) - - def testConfigFromStr(self): - config = config_lib.Config.from_str("{'c': {'d': 5}, 'b': 2, 'a': 1}") - self.assertEqual( - {'c': {'d': 5}, 'b': 2, 'a': 1}, config) - self.assertTrue(isinstance(config, config_lib.Config)) - self.assertTrue(isinstance(config.c, config_lib.Config)) - - def testConfigParse(self): - config = config_lib.Config.parse( - 'hello="world",num=1234.5,lst=[10,20.5,True,"hi",("a","b","c")],' - 'dct={9:10,"stuff":"qwerty","subdict":{1:True,2:False}},' - 'subconfig=c(a=1,b=[1,2,[3,4]],c=c(f="f",g="g"))') - self.assertEqual( - {'hello': 'world', 'num': 1234.5, - 'lst': [10, 20.5, True, 'hi', ('a', 'b', 'c')], - 'dct': {9: 10, 'stuff': 'qwerty', 'subdict': {1: True, 2: False}}, - 'subconfig': {'a': 1, 'b': [1, 2, [3, 4]], 'c': {'f': 'f', 'g': 'g'}}}, - config) - self.assertTrue(isinstance(config, config_lib.Config)) - self.assertTrue(isinstance(config.subconfig, config_lib.Config)) - self.assertTrue(isinstance(config.subconfig.c, config_lib.Config)) - self.assertFalse(isinstance(config.dct, config_lib.Config)) - self.assertFalse(isinstance(config.dct['subdict'], config_lib.Config)) - self.assertTrue(isinstance(config.lst[4], tuple)) - - def testConfigParseErrors(self): - with self.assertRaises(SyntaxError): - config_lib.Config.parse('a=[1,2,b="hello"') - with self.assertRaises(SyntaxError): - config_lib.Config.parse('a=1,b=c(x="a",y="b"') - with self.assertRaises(SyntaxError): - config_lib.Config.parse('a=1,b=c(x="a")y="b"') - with self.assertRaises(SyntaxError): - config_lib.Config.parse('a=1,b=c(x="a"),y="b",') - - def testOneOf(self): - def make_config(): - return config_lib.Config( - data=config_lib.OneOf( - [config_lib.Config(task=1, a='hello'), - config_lib.Config(task=2, a='world', b='stuff'), - config_lib.Config(task=3, c=1234)], - task=2), - model=config_lib.Config(stuff=1)) - - config = make_config() - config.update(config_lib.Config.parse( - 'model=c(stuff=2),data=c(task=1,a="hi")')) - self.assertEqual( - config_lib.Config( - data=config_lib.Config(task=1, a='hi'), - model=config_lib.Config(stuff=2)), - config) - - config = make_config() - config.update(config_lib.Config.parse( - 'model=c(stuff=2),data=c(task=2,a="hi")')) - self.assertEqual( - config_lib.Config( - data=config_lib.Config(task=2, a='hi', b='stuff'), - model=config_lib.Config(stuff=2)), - config) - - config = make_config() - config.update(config_lib.Config.parse( - 'model=c(stuff=2),data=c(task=3)')) - self.assertEqual( - config_lib.Config( - data=config_lib.Config(task=3, c=1234), - model=config_lib.Config(stuff=2)), - config) - - config = make_config() - config.update(config_lib.Config.parse( - 'model=c(stuff=2)')) - self.assertEqual( - config_lib.Config( - data=config_lib.Config(task=2, a='world', b='stuff'), - model=config_lib.Config(stuff=2)), - config) - - config = make_config() - config.update(config_lib.Config.parse( - 'model=c(stuff=2),data=c(task=4,d=9999)')) - self.assertEqual( - config_lib.Config( - data=config_lib.Config(task=4, d=9999), - model=config_lib.Config(stuff=2)), - config) - - config = make_config() - config.update(config_lib.Config.parse( - 'model=c(stuff=2),data=5')) - self.assertEqual( - config_lib.Config( - data=5, - model=config_lib.Config(stuff=2)), - config) - - def testOneOfStrict(self): - def make_config(): - return config_lib.Config( - data=config_lib.OneOf( - [config_lib.Config(task=1, a='hello'), - config_lib.Config(task=2, a='world', b='stuff'), - config_lib.Config(task=3, c=1234)], - task=2), - model=config_lib.Config(stuff=1)) - - config = make_config() - config.strict_update(config_lib.Config.parse( - 'model=c(stuff=2),data=c(task=1,a="hi")')) - self.assertEqual( - config_lib.Config( - data=config_lib.Config(task=1, a='hi'), - model=config_lib.Config(stuff=2)), - config) - - config = make_config() - config.strict_update(config_lib.Config.parse( - 'model=c(stuff=2),data=c(task=2,a="hi")')) - self.assertEqual( - config_lib.Config( - data=config_lib.Config(task=2, a='hi', b='stuff'), - model=config_lib.Config(stuff=2)), - config) - - config = make_config() - config.strict_update(config_lib.Config.parse( - 'model=c(stuff=2),data=c(task=3)')) - self.assertEqual( - config_lib.Config( - data=config_lib.Config(task=3, c=1234), - model=config_lib.Config(stuff=2)), - config) - - config = make_config() - config.strict_update(config_lib.Config.parse( - 'model=c(stuff=2)')) - self.assertEqual( - config_lib.Config( - data=config_lib.Config(task=2, a='world', b='stuff'), - model=config_lib.Config(stuff=2)), - config) - - def testNestedOneOf(self): - def make_config(): - return config_lib.Config( - data=config_lib.OneOf( - [config_lib.Config(task=1, a='hello'), - config_lib.Config( - task=2, - a=config_lib.OneOf( - [config_lib.Config(x=1, y=2), - config_lib.Config(x=-1, y=1000, z=4)], - x=1)), - config_lib.Config(task=3, c=1234)], - task=2), - model=config_lib.Config(stuff=1)) - - config = make_config() - config.update(config_lib.Config.parse( - 'model=c(stuff=2),data=c(task=2,a=c(x=-1,z=8))')) - self.assertEqual( - config_lib.Config( - data=config_lib.Config( - task=2, - a=config_lib.Config(x=-1, y=1000, z=8)), - model=config_lib.Config(stuff=2)), - config) - - config = make_config() - config.strict_update(config_lib.Config.parse( - 'model=c(stuff=2),data=c(task=2,a=c(x=-1,z=8))')) - self.assertEqual( - config_lib.Config( - data=config_lib.Config( - task=2, - a=config_lib.Config(x=-1, y=1000, z=8)), - model=config_lib.Config(stuff=2)), - config) - - config = make_config() - config.update(config_lib.Config.parse('model=c(stuff=2)')) - self.assertEqual( - config_lib.Config( - data=config_lib.Config( - task=2, - a=config_lib.Config(x=1, y=2)), - model=config_lib.Config(stuff=2)), - config) - - config = make_config() - config.strict_update(config_lib.Config.parse('model=c(stuff=2)')) - self.assertEqual( - config_lib.Config( - data=config_lib.Config( - task=2, - a=config_lib.Config(x=1, y=2)), - model=config_lib.Config(stuff=2)), - config) - - def testOneOfStrictErrors(self): - def make_config(): - return config_lib.Config( - data=config_lib.OneOf( - [config_lib.Config(task=1, a='hello'), - config_lib.Config(task=2, a='world', b='stuff'), - config_lib.Config(task=3, c=1234)], - task=2), - model=config_lib.Config(stuff=1)) - - config = make_config() - with self.assertRaises(TypeError): - config.strict_update(config_lib.Config.parse( - 'model=c(stuff=2),data=[1,2,3]')) - - config = make_config() - with self.assertRaises(KeyError): - config.strict_update(config_lib.Config.parse( - 'model=c(stuff=2),data=c(task=3,c=5678,d=9999)')) - - config = make_config() - with self.assertRaises(ValueError): - config.strict_update(config_lib.Config.parse( - 'model=c(stuff=2),data=c(task=4,d=9999)')) - - config = make_config() - with self.assertRaises(TypeError): - config.strict_update(config_lib.Config.parse( - 'model=c(stuff=2),data=5')) - - -if __name__ == '__main__': - tf.test.main() diff --git a/research/brain_coder/common/reward.py b/research/brain_coder/common/reward.py deleted file mode 100644 index 87e01c9c5..000000000 --- a/research/brain_coder/common/reward.py +++ /dev/null @@ -1,390 +0,0 @@ -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -"""Reward functions, distance functions, and reward managers.""" - -from abc import ABCMeta -from abc import abstractmethod -from math import log - - -# All sequences here are assumed to be lists of ints bounded -# between 0 and `base`-1 (inclusive). - - -################################# -### Scalar Distance Functions ### -################################# - - -def abs_diff(a, b, base=0): - """Absolute value of difference between scalars. - - abs_diff is symmetric, i.e. `a` and `b` are interchangeable. - - Args: - a: First argument. An int. - b: Seconds argument. An int. - base: Dummy argument so that the argument signature matches other scalar - diff functions. abs_diff is the same in all bases. - - Returns: - abs(a - b). - """ - del base # Unused. - return abs(a - b) - - -def mod_abs_diff(a, b, base): - """Shortest distance between `a` and `b` in the modular integers base `base`. - - The smallest distance between a and b is returned. - Example: mod_abs_diff(1, 99, 100) ==> 2. It is not 98. - - mod_abs_diff is symmetric, i.e. `a` and `b` are interchangeable. - - Args: - a: First argument. An int. - b: Seconds argument. An int. - base: The modulo base. A positive int. - - Returns: - Shortest distance. - """ - diff = abs(a - b) - if diff >= base: - diff %= base - return min(diff, (-diff) + base) - - -############################### -### List Distance Functions ### -############################### - - -def absolute_distance(pred, target, base, scalar_diff_fn=abs_diff): - """Asymmetric list distance function. - - List distance is the sum of element-wise distances, like Hamming distance, but - where `pred` can be longer or shorter than `target`. For each position in both - `pred` and `target`, distance between those elements is computed with - `scalar_diff_fn`. For missing or extra elements in `pred`, the maximum - distance is assigned, which is equal to `base`. - - Distance is 0 when `pred` and `target` are identical, and will be a positive - integer when they are not. - - Args: - pred: Prediction list. Distance from this list is computed. - target: Target list. Distance to this list is computed. - base: The integer base to use. For example, a list of chars would use base - 256. - scalar_diff_fn: Element-wise distance function. - - Returns: - List distance between `pred` and `target`. - """ - d = 0 - for i, target_t in enumerate(target): - if i >= len(pred): - d += base # A missing slot is worth the max distance. - else: - # Add element-wise distance for this slot. - d += scalar_diff_fn(pred[i], target_t, base) - if len(pred) > len(target): - # Each extra slot is worth the max distance. - d += (len(pred) - len(target)) * base - return d - - -def log_absolute_distance(pred, target, base): - """Asymmetric list distance function that uses log distance. - - A list distance which computes sum of element-wise distances, similar to - `absolute_distance`. Unlike `absolute_distance`, this scales the resulting - distance to be a float. - - Element-wise distance are log-scale. Distance between two list changes - relatively less for elements that are far apart, but changes a lot (goes to 0 - faster) when values get close together. - - Args: - pred: List of ints. Computes distance from this list to the target. - target: List of ints. This is the "correct" list which the prediction list - is trying to match. - base: Integer base. - - Returns: - Float distance normalized so that when `pred` is at most as long as `target` - the distance is between 0.0 and 1.0. Distance grows unboundedly large - as `pred` grows past `target` in length. - """ - if not target: - length_normalizer = 1.0 - if not pred: - # Distance between [] and [] is 0.0 since they are equal. - return 0.0 - else: - length_normalizer = float(len(target)) - # max_dist is the maximum element-wise distance, before taking log and - # scaling. Since we use `mod_abs_diff`, it would be (base // 2), but we add - # 1 to it so that missing or extra positions get the maximum penalty. - max_dist = base // 2 + 1 - - # The log-distance will be scaled by a factor. - # Note: +1 is added to the numerator and denominator to avoid log(0). This - # only has a translational effect, i.e. log(dist + 1) / log(max_dist + 1). - factor = log(max_dist + 1) - - d = 0.0 # Total distance to be computed. - for i, target_t in enumerate(target): - if i >= len(pred): - # Assign the max element-wise distance for missing positions. This is 1.0 - # after scaling. - d += 1.0 - else: - # Add the log-dist divided by a scaling factor. - d += log(mod_abs_diff(pred[i], target_t, base) + 1) / factor - if len(pred) > len(target): - # Add the max element-wise distance for each extra position. - # Since max dist after scaling is 1, this is just the difference in list - # lengths. - d += (len(pred) - len(target)) - return d / length_normalizer # Normalize again by the target length. - - -######################## -### Reward Functions ### -######################## - -# Reward functions assign reward based on program output. -# Warning: only use these functions as the terminal rewards in episodes, i.e. -# for the "final" programs. - - -def absolute_distance_reward(pred, target, base, scalar_diff_fn=abs_diff): - """Reward function based on absolute_distance function. - - Maximum reward, 1.0, is given when the lists are equal. Reward is scaled - so that 0.0 reward is given when `pred` is the empty list (assuming `target` - is not empty). Reward can go negative when `pred` is longer than `target`. - - This is an asymmetric reward function, so which list is the prediction and - which is the target matters. - - Args: - pred: Prediction sequence. This should be the sequence outputted by the - generated code. List of ints n, where 0 <= n < base. - target: Target sequence. The correct sequence that the generated code needs - to output. List of ints n, where 0 <= n < base. - base: Base of the computation. - scalar_diff_fn: Element-wise distance function. - - Returns: - Reward computed based on `pred` and `target`. A float. - """ - unit_dist = float(base * len(target)) - if unit_dist == 0: - unit_dist = base - dist = absolute_distance(pred, target, base, scalar_diff_fn=scalar_diff_fn) - return (unit_dist - dist) / unit_dist - - -def absolute_mod_distance_reward(pred, target, base): - """Same as `absolute_distance_reward` but `mod_abs_diff` scalar diff is used. - - Args: - pred: Prediction sequence. This should be the sequence outputted by the - generated code. List of ints n, where 0 <= n < base. - target: Target sequence. The correct sequence that the generated code needs - to output. List of ints n, where 0 <= n < base. - base: Base of the computation. - - Returns: - Reward computed based on `pred` and `target`. A float. - """ - return absolute_distance_reward(pred, target, base, mod_abs_diff) - - -def absolute_log_distance_reward(pred, target, base): - """Compute reward using `log_absolute_distance`. - - Maximum reward, 1.0, is given when the lists are equal. Reward is scaled - so that 0.0 reward is given when `pred` is the empty list (assuming `target` - is not empty). Reward can go negative when `pred` is longer than `target`. - - This is an asymmetric reward function, so which list is the prediction and - which is the target matters. - - This reward function has the nice property that much more reward is given - for getting the correct value (at each position) than for there being any - value at all. For example, in base 100, lets say pred = [1] * 1000 - and target = [10] * 1000. A lot of reward would be given for being 80% - accurate (worst element-wise distance is 50, distances here are 9) using - `absolute_distance`. `log_absolute_distance` on the other hand will give - greater and greater reward increments the closer each predicted value gets to - the target. That makes the reward given for accuracy somewhat independant of - the base. - - Args: - pred: Prediction sequence. This should be the sequence outputted by the - generated code. List of ints n, where 0 <= n < base. - target: Target sequence. The correct sequence that the generated code needs - to output. List of ints n, where 0 <= n < base. - base: Base of the computation. - - Returns: - Reward computed based on `pred` and `target`. A float. - """ - return 1.0 - log_absolute_distance(pred, target, base) - - -####################### -### Reward Managers ### -####################### - -# Reward managers assign reward to many code attempts throughout an episode. - - -class RewardManager(object): - """Reward managers administer reward across an episode. - - Reward managers are used for "editor" environments. These are environments - where the agent has some way to edit its code over time, and run its code - many time in the same episode, so that it can make incremental improvements. - - Reward managers are instantiated with a target sequence, which is the known - correct program output. The manager is called on the output from a proposed - code, and returns reward. If many proposal outputs are tried, reward may be - some stateful function that takes previous tries into account. This is done, - in part, so that an agent cannot accumulate unbounded reward just by trying - junk programs as often as possible. So reward managers should not give the - same reward twice if the next proposal is not better than the last. - """ - __metaclass__ = ABCMeta - - def __init__(self, target, base, distance_fn=absolute_distance): - self._target = list(target) - self._base = base - self._distance_fn = distance_fn - - @abstractmethod - def __call__(self, sequence): - """Call this reward manager like a function to get reward. - - Calls to reward manager are stateful, and will take previous sequences - into account. Repeated calls with the same sequence may produce different - rewards. - - Args: - sequence: List of integers (each between 0 and base - 1). This is the - proposal sequence. Reward will be computed based on the distance - from this sequence to the target (distance function and target are - given in the constructor), as well as previous sequences tried during - the lifetime of this object. - - Returns: - Float value. The reward received from this call. - """ - return 0.0 - - -class DeltaRewardManager(RewardManager): - """Simple reward manager that assigns reward for the net change in distance. - - Given some (possibly asymmetric) list distance function, gives reward for - relative changes in prediction distance to the target. - - For example, if on the first call the distance is 3.0, the change in distance - is -3 (from starting distance of 0). That relative change will be scaled to - produce a negative reward for this step. On the next call, the distance is 2.0 - which is a +1 change, and that will be scaled to give a positive reward. - If the final call has distance 0 (the target is achieved), that is another - positive change of +2. The total reward across all 3 calls is then 0, which is - the highest posible episode total. - - Reward is scaled so that the maximum element-wise distance is worth 1.0. - Maximum total episode reward attainable is 0. - """ - - def __init__(self, target, base, distance_fn=absolute_distance): - super(DeltaRewardManager, self).__init__(target, base, distance_fn) - self._last_diff = 0 - - def _diff(self, seq): - return self._distance_fn(seq, self._target, self._base) - - def _delta_reward(self, seq): - # Reward is relative to previous sequence diff. - # Reward is scaled so that maximum token difference is worth 1.0. - # Reward = (last_diff - this_diff) / self.base. - # Reward is positive if this sequence is closer to the target than the - # previous sequence, and negative if this sequence is further away. - diff = self._diff(seq) - reward = (self._last_diff - diff) / float(self._base) - self._last_diff = diff - return reward - - def __call__(self, seq): - return self._delta_reward(seq) - - -class FloorRewardManager(RewardManager): - """Assigns positive reward for each step taken closer to the target. - - Given some (possibly asymmetric) list distance function, gives reward for - whenever a new episode minimum distance is reached. No reward is given if - the distance regresses to a higher value, so that the sum of rewards - for the episode is positive. - - Reward is scaled so that the maximum element-wise distance is worth 1.0. - Maximum total episode reward attainable is len(target). - - If the prediction sequence is longer than the target, a reward of -1 is given. - Subsequence predictions which are also longer get 0 reward. The -1 penalty - will be canceled out with a +1 reward when a prediction is given which is at - most the length of the target. - """ - - def __init__(self, target, base, distance_fn=absolute_distance): - super(FloorRewardManager, self).__init__(target, base, distance_fn) - self._last_diff = 0 - self._min_diff = self._max_diff() - self._too_long_penality_given = False - - def _max_diff(self): - return self._distance_fn([], self._target, self._base) - - def _diff(self, seq): - return self._distance_fn(seq, self._target, self._base) - - def _delta_reward(self, seq): - # Reward is only given if this sequence is closer to the target than any - # previous sequence. - # Reward is scaled so that maximum token difference is worth 1.0 - # Reward = (min_diff - this_diff) / self.base - # Reward is always positive. - diff = self._diff(seq) - if diff < self._min_diff: - reward = (self._min_diff - diff) / float(self._base) - self._min_diff = diff - else: - reward = 0.0 - return reward - - def __call__(self, seq): - if len(seq) > len(self._target): # Output is too long. - if not self._too_long_penality_given: - self._too_long_penality_given = True - reward = -1.0 - else: - reward = 0.0 # Don't give this penalty more than once. - return reward - - reward = self._delta_reward(seq) - if self._too_long_penality_given: - reward += 1.0 # Return the subtracted reward. - self._too_long_penality_given = False - return reward - diff --git a/research/brain_coder/common/reward_test.py b/research/brain_coder/common/reward_test.py deleted file mode 100644 index 38a1d4ace..000000000 --- a/research/brain_coder/common/reward_test.py +++ /dev/null @@ -1,311 +0,0 @@ -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -"""Tests for common.reward.""" - -from math import log -import numpy as np -import tensorflow as tf - -from common import reward # brain coder - - -class RewardTest(tf.test.TestCase): - - def testAbsDiff(self): - self.assertEqual(5, reward.abs_diff(15, 20)) - self.assertEqual(5, reward.abs_diff(20, 15)) - - def testModAbsDiff(self): - self.assertEqual(5, reward.mod_abs_diff(15, 20, 25)) - self.assertEqual(5, reward.mod_abs_diff(20, 15, 25)) - self.assertEqual(2, reward.mod_abs_diff(1, 24, 25)) - self.assertEqual(2, reward.mod_abs_diff(24, 1, 25)) - - self.assertEqual(0, reward.mod_abs_diff(0, 0, 5)) - self.assertEqual(1, reward.mod_abs_diff(0, 1, 5)) - self.assertEqual(2, reward.mod_abs_diff(0, 2, 5)) - self.assertEqual(2, reward.mod_abs_diff(0, 3, 5)) - self.assertEqual(1, reward.mod_abs_diff(0, 4, 5)) - - self.assertEqual(0, reward.mod_abs_diff(-1, 4, 5)) - self.assertEqual(1, reward.mod_abs_diff(-5, 4, 5)) - self.assertEqual(1, reward.mod_abs_diff(-7, 4, 5)) - self.assertEqual(1, reward.mod_abs_diff(13, 4, 5)) - self.assertEqual(1, reward.mod_abs_diff(15, 4, 5)) - - def testAbsoluteDistance_AbsDiffMethod(self): - self.assertEqual( - 4, - reward.absolute_distance([0], [4], 5, scalar_diff_fn=reward.abs_diff)) - self.assertEqual( - 0, - reward.absolute_distance([4], [4], 5, scalar_diff_fn=reward.abs_diff)) - self.assertEqual( - 0, - reward.absolute_distance([], [], 5, scalar_diff_fn=reward.abs_diff)) - self.assertEqual( - 5, - reward.absolute_distance([1], [], 5, scalar_diff_fn=reward.abs_diff)) - self.assertEqual( - 5, - reward.absolute_distance([], [1], 5, scalar_diff_fn=reward.abs_diff)) - self.assertEqual( - 0, - reward.absolute_distance([1, 2, 3], [1, 2, 3], 5, - scalar_diff_fn=reward.abs_diff)) - self.assertEqual( - 1, - reward.absolute_distance([1, 2, 4], [1, 2, 3], 5, - scalar_diff_fn=reward.abs_diff)) - self.assertEqual( - 1, - reward.absolute_distance([1, 2, 2], [1, 2, 3], 5, - scalar_diff_fn=reward.abs_diff)) - self.assertEqual( - 5, - reward.absolute_distance([1, 2], [1, 2, 3], 5, - scalar_diff_fn=reward.abs_diff)) - self.assertEqual( - 5, - reward.absolute_distance([1, 2, 3, 4], [1, 2, 3], 5, - scalar_diff_fn=reward.abs_diff)) - self.assertEqual( - 6, - reward.absolute_distance([4, 4, 4], [1, 2, 3], 5, - scalar_diff_fn=reward.abs_diff)) - - def testAbsoluteDistance_ModDiffMethod(self): - self.assertEqual( - 1, - reward.absolute_distance([0], [4], 5, - scalar_diff_fn=reward.mod_abs_diff)) - self.assertEqual( - 0, - reward.absolute_distance([4], [4], 5, - scalar_diff_fn=reward.mod_abs_diff)) - self.assertEqual( - 0, - reward.absolute_distance([], [], 5, - scalar_diff_fn=reward.mod_abs_diff)) - self.assertEqual( - 5, - reward.absolute_distance([1], [], 5, - scalar_diff_fn=reward.mod_abs_diff)) - self.assertEqual( - 5, - reward.absolute_distance([], [1], 5, - scalar_diff_fn=reward.mod_abs_diff)) - self.assertEqual( - 0, - reward.absolute_distance([1, 2, 3], [1, 2, 3], 5, - scalar_diff_fn=reward.mod_abs_diff)) - self.assertEqual( - 1, - reward.absolute_distance([1, 2, 4], [1, 2, 3], 5, - scalar_diff_fn=reward.mod_abs_diff)) - self.assertEqual( - 1, - reward.absolute_distance([1, 2, 2], [1, 2, 3], 5, - scalar_diff_fn=reward.mod_abs_diff)) - self.assertEqual( - 5, - reward.absolute_distance([1, 2], [1, 2, 3], 5, - scalar_diff_fn=reward.mod_abs_diff)) - self.assertEqual( - 5, - reward.absolute_distance([1, 2, 3, 4], [1, 2, 3], 5, - scalar_diff_fn=reward.mod_abs_diff)) - self.assertEqual( - 5, - reward.absolute_distance([4, 4, 4], [1, 2, 3], 5, - scalar_diff_fn=reward.mod_abs_diff)) - - def testLogAbsoluteDistance(self): - def log_diff(diff, base): - return log(diff + 1) / log(base // 2 + 2) - - self.assertEqual( - log_diff(1, 5), - reward.log_absolute_distance([0], [4], 5)) - self.assertEqual( - log_diff(2, 5), - reward.log_absolute_distance([1], [4], 5)) - self.assertEqual( - log_diff(2, 5), - reward.log_absolute_distance([2], [4], 5)) - self.assertEqual( - log_diff(1, 5), - reward.log_absolute_distance([3], [4], 5)) - self.assertEqual( - log_diff(3, 5), # max_dist = base // 2 + 1 = 3 - reward.log_absolute_distance([], [4], 5)) - self.assertEqual( - 0 + log_diff(3, 5), # max_dist = base // 2 + 1 = 3 - reward.log_absolute_distance([4, 4], [4], 5)) - self.assertEqual( - 0, - reward.log_absolute_distance([4], [4], 5)) - self.assertEqual( - 0, - reward.log_absolute_distance([], [], 5)) - self.assertEqual( - 1, - reward.log_absolute_distance([1], [], 5)) - self.assertEqual( - 1, - reward.log_absolute_distance([], [1], 5)) - - self.assertEqual( - 0, - reward.log_absolute_distance([1, 2, 3], [1, 2, 3], 5)) - self.assertEqual( - log_diff(1, 5) / 3, # divided by target length. - reward.log_absolute_distance([1, 2, 4], [1, 2, 3], 5)) - self.assertEqual( - log_diff(1, 5) / 3, - reward.log_absolute_distance([1, 2, 2], [1, 2, 3], 5)) - self.assertEqual( - log_diff(3, 5) / 3, # max_dist - reward.log_absolute_distance([1, 2], [1, 2, 3], 5)) - self.assertEqual( - log_diff(3, 5) / 3, # max_dist - reward.log_absolute_distance([1, 2, 3, 4], [1, 2, 3], 5)) - # Add log differences for each position. - self.assertEqual( - (log_diff(2, 5) + log_diff(2, 5) + log_diff(1, 5)) / 3, - reward.log_absolute_distance([4, 4, 4], [1, 2, 3], 5)) - - def testAbsoluteDistanceReward(self): - self.assertEqual( - 1, - reward.absolute_distance_reward([1, 2, 3], [1, 2, 3], 5)) - self.assertEqual( - 1 - 1 / (5 * 3.), # 1 - distance / (base * target_len) - reward.absolute_distance_reward([1, 2, 4], [1, 2, 3], 5)) - self.assertEqual( - 1 - 1 / (5 * 3.), - reward.absolute_distance_reward([1, 2, 2], [1, 2, 3], 5)) - self.assertTrue(np.isclose( - 1 - 5 / (5 * 3.), - reward.absolute_distance_reward([1, 2], [1, 2, 3], 5))) - self.assertTrue(np.isclose( - 1 - 5 / (5 * 3.), - reward.absolute_distance_reward([1, 2, 3, 4], [1, 2, 3], 5))) - # Add log differences for each position. - self.assertEqual( - 1 - (3 + 2 + 1) / (5 * 3.), - reward.absolute_distance_reward([4, 4, 4], [1, 2, 3], 5)) - self.assertEqual( - 1, - reward.absolute_distance_reward([], [], 5)) - - def testAbsoluteModDistanceReward(self): - self.assertEqual( - 1, - reward.absolute_mod_distance_reward([1, 2, 3], [1, 2, 3], 5)) - self.assertEqual( - 1 - 1 / (5 * 3.), # 1 - distance / (base * target_len) - reward.absolute_mod_distance_reward([1, 2, 4], [1, 2, 3], 5)) - self.assertEqual( - 1 - 1 / (5 * 3.), - reward.absolute_mod_distance_reward([1, 2, 2], [1, 2, 3], 5)) - self.assertTrue(np.isclose( - 1 - 5 / (5 * 3.), - reward.absolute_mod_distance_reward([1, 2], [1, 2, 3], 5))) - self.assertTrue(np.isclose( - 1 - 5 / (5 * 3.), - reward.absolute_mod_distance_reward([1, 2, 3, 4], [1, 2, 3], 5))) - # Add log differences for each position. - self.assertTrue(np.isclose( - 1 - (2 + 2 + 1) / (5 * 3.), - reward.absolute_mod_distance_reward([4, 4, 4], [1, 2, 3], 5))) - self.assertTrue(np.isclose( - 1 - (1 + 2 + 2) / (5 * 3.), - reward.absolute_mod_distance_reward([0, 1, 2], [4, 4, 4], 5))) - self.assertEqual( - 1, - reward.absolute_mod_distance_reward([], [], 5)) - - def testAbsoluteLogDistanceReward(self): - def log_diff(diff, base): - return log(diff + 1) / log(base // 2 + 2) - - self.assertEqual( - 1, - reward.absolute_log_distance_reward([1, 2, 3], [1, 2, 3], 5)) - self.assertEqual( - 1 - log_diff(1, 5) / 3, # divided by target length. - reward.absolute_log_distance_reward([1, 2, 4], [1, 2, 3], 5)) - self.assertEqual( - 1 - log_diff(1, 5) / 3, - reward.absolute_log_distance_reward([1, 2, 2], [1, 2, 3], 5)) - self.assertEqual( - 1 - log_diff(3, 5) / 3, # max_dist - reward.absolute_log_distance_reward([1, 2], [1, 2, 3], 5)) - self.assertEqual( - 1 - log_diff(3, 5) / 3, # max_dist - reward.absolute_log_distance_reward([1, 2, 3, 4], [1, 2, 3], 5)) - # Add log differences for each position. - self.assertEqual( - 1 - (log_diff(2, 5) + log_diff(2, 5) + log_diff(1, 5)) / 3, - reward.absolute_log_distance_reward([4, 4, 4], [1, 2, 3], 5)) - self.assertEqual( - 1 - (log_diff(1, 5) + log_diff(2, 5) + log_diff(2, 5)) / 3, - reward.absolute_log_distance_reward([0, 1, 2], [4, 4, 4], 5)) - self.assertEqual( - 1, - reward.absolute_log_distance_reward([], [], 5)) - - def testDeltaRewardManager(self): - reward_manager = reward.DeltaRewardManager( - [1, 2, 3, 4], base=5, distance_fn=reward.absolute_distance) - self.assertEqual(-3, reward_manager([1])) - self.assertEqual(0, reward_manager([1])) - self.assertEqual(4 / 5., reward_manager([1, 3])) - self.assertEqual(-4 / 5, reward_manager([1])) - self.assertEqual(3, reward_manager([1, 2, 3, 4])) - self.assertEqual(-1, reward_manager([1, 2, 3])) - self.assertEqual(0, reward_manager([1, 2, 3, 4, 3])) - self.assertEqual(-1, reward_manager([1, 2, 3, 4, 3, 2])) - self.assertEqual(2, reward_manager([1, 2, 3, 4])) - self.assertEqual(0, reward_manager([1, 2, 3, 4])) - self.assertEqual(0, reward_manager([1, 2, 3, 4])) - - def testFloorRewardMananger(self): - reward_manager = reward.FloorRewardManager( - [1, 2, 3, 4], base=5, distance_fn=reward.absolute_distance) - self.assertEqual(1, reward_manager([1])) - self.assertEqual(0, reward_manager([1])) - self.assertEqual(4 / 5., reward_manager([1, 3])) - self.assertEqual(0, reward_manager([1])) - self.assertEqual(1 / 5., reward_manager([1, 2])) - self.assertEqual(0, reward_manager([0, 1])) - self.assertEqual(0, reward_manager([])) - self.assertEqual(0, reward_manager([1, 2])) - self.assertEqual(2, reward_manager([1, 2, 3, 4])) - self.assertEqual(0, reward_manager([1, 2, 3])) - self.assertEqual(-1, reward_manager([1, 2, 3, 4, 3])) - self.assertEqual(0, reward_manager([1, 2, 3, 4, 3, 2])) - self.assertEqual(1, reward_manager([1, 2, 3, 4])) - self.assertEqual(0, reward_manager([1, 2, 3, 4])) - self.assertEqual(0, reward_manager([1, 2, 3, 4])) - - reward_manager = reward.FloorRewardManager( - [1, 2, 3, 4], base=5, distance_fn=reward.absolute_distance) - self.assertEqual(1, reward_manager([1])) - self.assertEqual(-1, reward_manager([1, 0, 0, 0, 0, 0])) - self.assertEqual(0, reward_manager([1, 2, 3, 4, 0, 0])) - self.assertEqual(0, reward_manager([1, 2, 3, 4, 0])) - self.assertEqual(1, reward_manager([])) - self.assertEqual(0, reward_manager([])) - self.assertEqual(0, reward_manager([1])) - self.assertEqual(1, reward_manager([1, 2])) - self.assertEqual(-1, reward_manager([1, 2, 3, 4, 0, 0])) - self.assertEqual(0, reward_manager([1, 1, 1, 1, 1])) - self.assertEqual(1 + 2, reward_manager([1, 2, 3, 4])) - - -if __name__ == '__main__': - tf.test.main() diff --git a/research/brain_coder/common/rollout.py b/research/brain_coder/common/rollout.py deleted file mode 100644 index e377aa662..000000000 --- a/research/brain_coder/common/rollout.py +++ /dev/null @@ -1,306 +0,0 @@ -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -"""Utilities related to computing training batches from episode rollouts. - -Implementations here are based on code from Open AI: -https://github.com/openai/universe-starter-agent/blob/master/a3c.py. -""" - -from collections import namedtuple -import numpy as np -import scipy.signal - -from common import utils # brain coder - - -class Rollout(object): - """Holds a rollout for an episode. - - A rollout is a record of the states observed in some environment and actions - taken by the agent to arrive at those states. Other information includes - rewards received after each action, values estimated for each state, whether - the rollout concluded the episide, and total reward received. Everything - should be given in time order. - - At each time t, the agent sees state s_t, takes action a_t, and then receives - reward r_t. The agent may optionally estimate a state value V(s_t) for each - state. - - For an episode of length T: - states = [s_0, ..., s_(T-1)] - actions = [a_0, ..., a_(T-1)] - rewards = [r_0, ..., r_(T-1)] - values = [V(s_0), ..., V(s_(T-1))] - - Note that there is an extra state s_T observed after taking action a_(T-1), - but this is not included in the rollout. - - Rollouts have an `terminated` attribute which is True when the rollout is - "finalized", i.e. it holds a full episode. terminated will be False when - time steps are still being added to it. - """ - - def __init__(self): - self.states = [] - self.actions = [] - self.rewards = [] - self.values = [] - self.total_reward = 0.0 - self.terminated = False - - def add(self, state, action, reward, value=0.0, terminated=False): - """Add the next timestep to this rollout. - - Args: - state: The state observed at the start of this timestep. - action: The action taken after observing the given state. - reward: The reward received for taking the given action. - value: The value estimated for the given state. - terminated: Whether this timestep ends the episode. - - Raises: - ValueError: If this.terminated is already True, meaning that the episode - has already ended. - """ - if self.terminated: - raise ValueError( - 'Trying to add timestep to an already terminal rollout.') - self.states += [state] - self.actions += [action] - self.rewards += [reward] - self.values += [value] - self.terminated = terminated - self.total_reward += reward - - def add_many(self, states, actions, rewards, values=None, terminated=False): - """Add many timesteps to this rollout. - - Arguments are the same as `add`, but are lists of equal size. - - Args: - states: The states observed. - actions: The actions taken. - rewards: The rewards received. - values: The values estimated for the given states. - terminated: Whether this sequence ends the episode. - - Raises: - ValueError: If the lengths of all the input lists are not equal. - ValueError: If this.terminated is already True, meaning that the episode - has already ended. - """ - if len(states) != len(actions): - raise ValueError( - 'Number of states and actions must be the same. Got %d states and ' - '%d actions' % (len(states), len(actions))) - if len(states) != len(rewards): - raise ValueError( - 'Number of states and rewards must be the same. Got %d states and ' - '%d rewards' % (len(states), len(rewards))) - if values is not None and len(states) != len(values): - raise ValueError( - 'Number of states and values must be the same. Got %d states and ' - '%d values' % (len(states), len(values))) - if self.terminated: - raise ValueError( - 'Trying to add timesteps to an already terminal rollout.') - self.states += states - self.actions += actions - self.rewards += rewards - self.values += values if values is not None else [0.0] * len(states) - self.terminated = terminated - self.total_reward += sum(rewards) - - def extend(self, other): - """Append another rollout to this rollout.""" - assert not self.terminated - self.states.extend(other.states) - self.actions.extend(other.actions) - self.rewards.extend(other.rewards) - self.values.extend(other.values) - self.terminated = other.terminated - self.total_reward += other.total_reward - - -def discount(x, gamma): - """Returns discounted sums for each value in x, with discount factor gamma. - - This can be used to compute the return (discounted sum of rewards) at each - timestep given a sequence of rewards. See the definitions for return and - REINFORCE in section 3 of https://arxiv.org/pdf/1602.01783.pdf. - - Let g^k mean gamma ** k. - For list [x_0, ..., x_N], the following list of discounted sums is computed: - [x_0 + g^1 * x_1 + g^2 * x_2 + ... g^N * x_N, - x_1 + g^1 * x_2 + g^2 * x_3 + ... g^(N-1) * x_N, - x_2 + g^1 * x_3 + g^2 * x_4 + ... g^(N-2) * x_N, - ..., - x_(N-1) + g^1 * x_N, - x_N] - - Args: - x: List of numbers [x_0, ..., x_N]. - gamma: Float between 0 and 1 (inclusive). This is the discount factor. - - Returns: - List of discounted sums. - """ - return scipy.signal.lfilter([1], [1, -gamma], x[::-1], axis=0)[::-1] - - -def discounted_advantage_and_rewards(rewards, values, gamma, lambda_=1.0): - """Compute advantages and returns (discounted sum of rewards). - - For an episode of length T, rewards = [r_0, ..., r_(T-1)]. - Each reward r_t is observed after taking action a_t at state s_t. A final - state s_T is observed but no reward is given at this state since no action - a_T is taken (otherwise there would be a new state s_(T+1)). - - `rewards` and `values` are for a single episode. Return R_t is the discounted - sum of future rewards starting at time t, where `gamma` is the discount - factor. - R_t = r_t + gamma * r_(t+1) + gamma**2 * r_(t+2) + ... - + gamma**(T-1-t) * r_(T-1) - - Advantage A(a_t, s_t) is approximated by computing A(a_t, s_t) = R_t - V(s_t) - where V(s_t) is an approximation of the value at that state, given in the - `values` list. Returns R_t are needed for all REINFORCE algorithms. Advantage - is used for the advantage actor critic variant of REINFORCE. - See algorithm S3 in https://arxiv.org/pdf/1602.01783.pdf. - - Additionally another parameter `lambda_` controls the bias-variance tradeoff. - See "Generalized Advantage Estimation": https://arxiv.org/abs/1506.02438. - lambda_ = 1 reduces to regular advantage. - 0 <= lambda_ < 1 trades off variance for bias, with lambda_ = 0 being the - most biased. - - Bootstrapping is also supported. If an episode does not end in a terminal - state (either because the episode was ended early, or the environment does not - have end states), the true return cannot be computed from the rewards alone. - However, it can be estimated by computing the value (an approximation of - return) of the last state s_T. Thus the `values` list will have an extra item: - values = [V(s_0), ..., V(s_(T-1)), V(s_T)]. - - Args: - rewards: List of observed rewards [r_0, ..., r_(T-1)]. - values: List of estimated values [V(s_0), ..., V(s_(T-1))] with an optional - extra V(s_T) item. - gamma: Discount factor. Number between 0 and 1. 1 means no discount. - If not 1, gamma is typically near 1, like 0.99. - lambda_: Bias-variance tradeoff factor. Between 0 and 1. - - Returns: - empirical_values: Returns at each timestep. - generalized_advantage: Avantages at each timestep. - - Raises: - ValueError: If shapes of `rewards` and `values` are not rank 1. - ValueError: If len(values) not in (len(rewards), len(rewards) + 1). - """ - rewards = np.asarray(rewards, dtype=np.float32) - values = np.asarray(values, dtype=np.float32) - if rewards.ndim != 1: - raise ValueError('Single episode only. rewards must be rank 1.') - if values.ndim != 1: - raise ValueError('Single episode only. values must be rank 1.') - if len(values) == len(rewards): - # No bootstrapping. - values = np.append(values, 0) - empirical_values = discount(rewards, gamma) - elif len(values) == len(rewards) + 1: - # With bootstrapping. - # Last value is for the terminal state (final state after last action was - # taken). - empirical_values = discount(np.append(rewards, values[-1]), gamma)[:-1] - else: - raise ValueError('values should contain the same number of items or one ' - 'more item than rewards') - delta = rewards + gamma * values[1:] - values[:-1] - generalized_advantage = discount(delta, gamma * lambda_) - - # empirical_values is the discounted sum of rewards into the future. - # generalized_advantage is the target for each policy update. - return empirical_values, generalized_advantage - - -"""Batch holds a minibatch of episodes. - -Let bi = batch_index, i.e. the index of each episode in the minibatch. -Let t = time. - -Attributes: - states: States for each timestep in each episode. Indexed by states[bi, t]. - actions: Actions for each timestep in each episode. Indexed by actions[bi, t]. - discounted_adv: Advantages (computed by discounted_advantage_and_rewards) - for each timestep in each episode. Indexed by discounted_adv[bi, t]. - discounted_r: Returns (discounted sum of rewards computed by - discounted_advantage_and_rewards) for each timestep in each episode. - Indexed by discounted_r[bi, t]. - total_rewards: Total reward for each episode, i.e. sum of rewards across all - timesteps (not discounted). Indexed by total_rewards[bi]. - episode_lengths: Number of timesteps in each episode. If an episode has - N actions, N rewards, and N states, then its length is N. Indexed by - episode_lengths[bi]. - batch_size: Number of episodes in this minibatch. An integer. - max_time: Maximum episode length in the batch. An integer. -""" # pylint: disable=pointless-string-statement -Batch = namedtuple( - 'Batch', - ['states', 'actions', 'discounted_adv', 'discounted_r', 'total_rewards', - 'episode_lengths', 'batch_size', 'max_time']) - - -def process_rollouts(rollouts, gamma, lambda_=1.0): - """Convert a batch of rollouts into tensors ready to be fed into a model. - - Lists from each episode are stacked into 2D tensors and padded with 0s up to - the maximum timestep in the batch. - - Args: - rollouts: A list of Rollout instances. - gamma: The discount factor. A number between 0 and 1 (inclusive). See gamma - argument in discounted_advantage_and_rewards. - lambda_: See lambda_ argument in discounted_advantage_and_rewards. - - Returns: - Batch instance. states, actions, discounted_adv, and discounted_r are - numpy arrays with shape (batch_size, max_episode_length). episode_lengths - is a list of ints. total_rewards is a list of floats (total reward in each - episode). batch_size and max_time are ints. - - Raises: - ValueError: If any of the rollouts are not terminal. - """ - for ro in rollouts: - if not ro.terminated: - raise ValueError('Can only process terminal rollouts.') - - episode_lengths = [len(ro.states) for ro in rollouts] - batch_size = len(rollouts) - max_time = max(episode_lengths) - - states = utils.stack_pad([ro.states for ro in rollouts], 0, max_time) - actions = utils.stack_pad([ro.actions for ro in rollouts], 0, max_time) - - discounted_rewards = [None] * batch_size - discounted_adv = [None] * batch_size - for i, ro in enumerate(rollouts): - disc_r, disc_adv = discounted_advantage_and_rewards( - ro.rewards, ro.values, gamma, lambda_) - discounted_rewards[i] = disc_r - discounted_adv[i] = disc_adv - discounted_rewards = utils.stack_pad(discounted_rewards, 0, max_time) - discounted_adv = utils.stack_pad(discounted_adv, 0, max_time) - - total_rewards = [sum(ro.rewards) for ro in rollouts] - - return Batch(states=states, - actions=actions, - discounted_adv=discounted_adv, - discounted_r=discounted_rewards, - total_rewards=total_rewards, - episode_lengths=episode_lengths, - batch_size=batch_size, - max_time=max_time) diff --git a/research/brain_coder/common/rollout_test.py b/research/brain_coder/common/rollout_test.py deleted file mode 100644 index 5be4cb0fa..000000000 --- a/research/brain_coder/common/rollout_test.py +++ /dev/null @@ -1,129 +0,0 @@ -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -"""Tests for common.rollout.""" - -import numpy as np -import tensorflow as tf - -from common import rollout as rollout_lib # brain coder - - -class RolloutTest(tf.test.TestCase): - - def MakeRollout(self, states, actions, rewards, values=None, terminated=True): - rollout = rollout_lib.Rollout() - rollout.add_many( - states=states, actions=actions, rewards=rewards, values=values, - terminated=terminated) - return rollout - - def testDiscount(self): - discounted = np.array([1.0 / 2 ** n for n in range(4, -1, -1)]) - discounted[:2] += [1.0 / 2 ** n for n in range(1, -1, -1)] - - self.assertTrue(np.array_equal( - rollout_lib.discount([0.0, 1.0, 0.0, 0.0, 1.0], 0.50), - discounted)) - self.assertTrue(np.array_equal( - rollout_lib.discount(np.array([0.0, 1.0, 0.0, 0.0, 1.0]), 0.50), - discounted)) - - def testDiscountedAdvantageAndRewards(self): - # lambda=1, No bootstrapping. - values = [0.1, 0.5, 0.5, 0.25] - (empirical_values, - generalized_advantage) = rollout_lib.discounted_advantage_and_rewards( - [0.0, 0.0, 0.0, 1.0], - values, - gamma=0.75, - lambda_=1.0) - expected_discounted_r = ( - np.array([1.0 * 0.75 ** n for n in range(3, -1, -1)])) - expected_adv = expected_discounted_r - values - self.assertTrue(np.array_equal(empirical_values, expected_discounted_r)) - self.assertTrue(np.allclose(generalized_advantage, expected_adv)) - - # lambda=1, With bootstrapping. - values = [0.1, 0.5, 0.5, 0.25, 0.75] - (empirical_values, - generalized_advantage) = rollout_lib.discounted_advantage_and_rewards( - [0.0, 0.0, 0.0, 1.0], - values, - gamma=0.75, - lambda_=1.0) - expected_discounted_r = ( - np.array([0.75 * 0.75 ** n for n in range(4, 0, -1)]) - + np.array([1.0 * 0.75 ** n for n in range(3, -1, -1)])) - expected_adv = expected_discounted_r - values[:-1] - self.assertTrue(np.array_equal(empirical_values, expected_discounted_r)) - self.assertTrue(np.allclose(generalized_advantage, expected_adv)) - - # lambda=0.5, With bootstrapping. - values = [0.1, 0.5, 0.5, 0.25, 0.75] - rewards = [0.0, 0.0, 0.0, 1.0] - l = 0.5 # lambda - g = 0.75 # gamma - (empirical_values, - generalized_advantage) = rollout_lib.discounted_advantage_and_rewards( - rewards, - values, - gamma=g, - lambda_=l) - expected_discounted_r = ( - np.array([0.75 * g ** n for n in range(4, 0, -1)]) - + np.array([1.0 * g ** n for n in range(3, -1, -1)])) - expected_adv = [0.0] * len(values) - for t in range(3, -1, -1): - delta_t = rewards[t] + g * values[t + 1] - values[t] - expected_adv[t] = delta_t + g * l * expected_adv[t + 1] - expected_adv = expected_adv[:-1] - self.assertTrue(np.array_equal(empirical_values, expected_discounted_r)) - self.assertTrue(np.allclose(generalized_advantage, expected_adv)) - - def testProcessRollouts(self): - g = 0.95 - rollouts = [ - self.MakeRollout( - states=[3, 6, 9], - actions=[1, 2, 3], - rewards=[1.0, -1.0, 0.5], - values=[0.5, 0.5, 0.1]), - self.MakeRollout( - states=[10], - actions=[5], - rewards=[1.0], - values=[0.5])] - batch = rollout_lib.process_rollouts(rollouts, gamma=g) - - self.assertEqual(2, batch.batch_size) - self.assertEqual(3, batch.max_time) - self.assertEqual([3, 1], batch.episode_lengths) - self.assertEqual([0.5, 1.0], batch.total_rewards) - self.assertEqual( - [[3, 6, 9], [10, 0, 0]], - batch.states.tolist()) - self.assertEqual( - [[1, 2, 3], [5, 0, 0]], - batch.actions.tolist()) - - rew1, rew2 = rollouts[0].rewards, rollouts[1].rewards - expected_discounted_rewards = [ - [rew1[0] + g * rew1[1] + g * g * rew1[2], - rew1[1] + g * rew1[2], - rew1[2]], - [rew2[0], 0.0, 0.0]] - expected_advantages = [ - [dr - v - for dr, v - in zip(expected_discounted_rewards[0], rollouts[0].values)], - [expected_discounted_rewards[1][0] - rollouts[1].values[0], 0.0, 0.0]] - self.assertTrue( - np.allclose(expected_discounted_rewards, batch.discounted_r)) - self.assertTrue( - np.allclose(expected_advantages, batch.discounted_adv)) - - -if __name__ == '__main__': - tf.test.main() diff --git a/research/brain_coder/common/schedules.py b/research/brain_coder/common/schedules.py deleted file mode 100644 index fff2481e5..000000000 --- a/research/brain_coder/common/schedules.py +++ /dev/null @@ -1,301 +0,0 @@ -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -"""Schedule functions for controlling hparams over time.""" - -from abc import ABCMeta -from abc import abstractmethod -import math - -from common import config_lib # brain coder - - -class Schedule(object): - """Schedule is a function which sets a hyperparameter's value over time. - - For example, a schedule can be used to decay an hparams, or oscillate it over - time. - - This object is constructed with an instance of config_lib.Config (will be - specific to each class implementation). For example if this is a decay - schedule, the config may specify the rate of decay and decay start time. Then - the object instance is called like a function, mapping global step (an integer - counting how many calls to the train op have been made) to the hparam value. - - Properties of a schedule function f(t): - 0) Domain of t is the non-negative integers (t may be 0). - 1) Range of f is the reals. - 2) Schedule functions can assume that they will be called in time order. This - allows schedules to be stateful. - 3) Schedule functions should be deterministic. Two schedule instances with the - same config must always give the same value for each t, and regardless of - what t's it was previously called on. Users may call f(t) on arbitrary - (positive) time jumps. Essentially, multiple schedule instances used in - replica training will behave the same. - 4) Duplicate successive calls on the same time are allowed. - """ - __metaclass__ = ABCMeta - - @abstractmethod - def __init__(self, config): - """Construct this schedule with a config specific to each class impl. - - Args: - config: An instance of config_lib.Config. - """ - pass - - @abstractmethod - def __call__(self, global_step): - """Map `global_step` to a value. - - `global_step` is an integer counting how many calls to the train op have - been made across all replicas (hence why it is global). Implementations - may assume calls to be made in time order, i.e. `global_step` now >= - previous `global_step` values. - - Args: - global_step: Non-negative integer. - - Returns: - Hparam value at this step. A number. - """ - pass - - -class ConstSchedule(Schedule): - """Constant function. - - config: - const: Constant value at every step. - - f(t) = const. - """ - - def __init__(self, config): - super(ConstSchedule, self).__init__(config) - self.const = config.const - - def __call__(self, global_step): - return self.const - - -class LinearDecaySchedule(Schedule): - """Linear decay function. - - config: - initial: Decay starts from this value. - final: Decay ends at this value. - start_time: Step when decay starts. Constant before it. - end_time: When decay ends. Constant after it. - - f(t) is a linear function when start_time <= t <= end_time, with slope of - (final - initial) / (end_time - start_time). f(t) = initial - when t <= start_time. f(t) = final when t >= end_time. - - If start_time == end_time, this becomes a step function. - """ - - def __init__(self, config): - super(LinearDecaySchedule, self).__init__(config) - self.initial = config.initial - self.final = config.final - self.start_time = config.start_time - self.end_time = config.end_time - - if self.end_time < self.start_time: - raise ValueError('start_time must be before end_time.') - - # Linear interpolation. - self._time_diff = float(self.end_time - self.start_time) - self._diff = float(self.final - self.initial) - self._slope = ( - self._diff / self._time_diff if self._time_diff > 0 else float('inf')) - - def __call__(self, global_step): - if global_step <= self.start_time: - return self.initial - if global_step > self.end_time: - return self.final - return self.initial + (global_step - self.start_time) * self._slope - - -class ExponentialDecaySchedule(Schedule): - """Exponential decay function. - - See https://en.wikipedia.org/wiki/Exponential_decay. - - Use this decay function to decay over orders of magnitude. For example, to - decay learning rate from 1e-2 to 1e-6. Exponential decay will decay the - exponent linearly. - - config: - initial: Decay starts from this value. - final: Decay ends at this value. - start_time: Step when decay starts. Constant before it. - end_time: When decay ends. Constant after it. - - f(t) is an exponential decay function when start_time <= t <= end_time. The - decay rate and amplitude are chosen so that f(t) = initial when - t = start_time, and f(t) = final when t = end_time. f(t) is constant for - t < start_time or t > end_time. initial and final must be positive values. - - If start_time == end_time, this becomes a step function. - """ - - def __init__(self, config): - super(ExponentialDecaySchedule, self).__init__(config) - self.initial = config.initial - self.final = config.final - self.start_time = config.start_time - self.end_time = config.end_time - - if self.initial <= 0 or self.final <= 0: - raise ValueError('initial and final must be positive numbers.') - - # Linear interpolation in log space. - self._linear_fn = LinearDecaySchedule( - config_lib.Config( - initial=math.log(self.initial), - final=math.log(self.final), - start_time=self.start_time, - end_time=self.end_time)) - - def __call__(self, global_step): - return math.exp(self._linear_fn(global_step)) - - -class SmootherstepDecaySchedule(Schedule): - """Smootherstep decay function. - - A sigmoidal like transition from initial to final values. A smoother - transition than linear and exponential decays, hence the name. - See https://en.wikipedia.org/wiki/Smoothstep. - - config: - initial: Decay starts from this value. - final: Decay ends at this value. - start_time: Step when decay starts. Constant before it. - end_time: When decay ends. Constant after it. - - f(t) is fully defined here: - https://en.wikipedia.org/wiki/Smoothstep#Variations. - - f(t) is smooth, as in its first-derivative exists everywhere. - """ - - def __init__(self, config): - super(SmootherstepDecaySchedule, self).__init__(config) - self.initial = config.initial - self.final = config.final - self.start_time = config.start_time - self.end_time = config.end_time - - if self.end_time < self.start_time: - raise ValueError('start_time must be before end_time.') - - self._time_diff = float(self.end_time - self.start_time) - self._diff = float(self.final - self.initial) - - def __call__(self, global_step): - if global_step <= self.start_time: - return self.initial - if global_step > self.end_time: - return self.final - x = (global_step - self.start_time) / self._time_diff - - # Smootherstep - return self.initial + x * x * x * (x * (x * 6 - 15) + 10) * self._diff - - -class HardOscillatorSchedule(Schedule): - """Hard oscillator function. - - config: - high: Max value of the oscillator. Value at constant plateaus. - low: Min value of the oscillator. Value at constant valleys. - start_time: Global step when oscillation starts. Constant before this. - period: Width of one oscillation, i.e. number of steps over which the - oscillation takes place. - transition_fraction: Fraction of the period spent transitioning between high - and low values. 50% of this time is spent rising, and 50% of this time - is spent falling. 50% of the remaining time is spent constant at the - high value, and 50% of the remaining time is spent constant at the low - value. transition_fraction = 1.0 means the entire period is spent - rising and falling. transition_fraction = 0.0 means no time is spent - rising and falling, i.e. the function jumps instantaneously between - high and low. - - f(t) = high when t < start_time. - f(t) is periodic when t >= start_time, with f(t + period) = f(t). - f(t) is linear with positive slope when rising, and negative slope when - falling. At the start of the period t0, f(t0) = high and begins to descend. - At the middle of the period f is low and is constant until the ascension - begins. f then rises from low to high and is constant again until the period - repeats. - - Note: when transition_fraction is 0, f starts the period low and ends high. - """ - - def __init__(self, config): - super(HardOscillatorSchedule, self).__init__(config) - self.high = config.high - self.low = config.low - self.start_time = config.start_time - self.period = float(config.period) - self.transition_fraction = config.transition_fraction - self.half_transition_fraction = config.transition_fraction / 2.0 - - if self.transition_fraction < 0 or self.transition_fraction > 1.0: - raise ValueError('transition_fraction must be between 0 and 1.0') - if self.period <= 0: - raise ValueError('period must be positive') - - self._slope = ( - float(self.high - self.low) / self.half_transition_fraction - if self.half_transition_fraction > 0 else float('inf')) - - def __call__(self, global_step): - if global_step < self.start_time: - return self.high - period_pos = ((global_step - self.start_time) / self.period) % 1.0 - if period_pos >= 0.5: - # ascending - period_pos -= 0.5 - if period_pos < self.half_transition_fraction: - return self.low + period_pos * self._slope - else: - return self.high - else: - # descending - if period_pos < self.half_transition_fraction: - return self.high - period_pos * self._slope - else: - return self.low - - -_NAME_TO_CONFIG = { - 'const': ConstSchedule, - 'linear_decay': LinearDecaySchedule, - 'exp_decay': ExponentialDecaySchedule, - 'smooth_decay': SmootherstepDecaySchedule, - 'hard_osc': HardOscillatorSchedule, -} - - -def make_schedule(config): - """Schedule factory. - - Given `config` containing a `fn` property, a Schedule implementation is - instantiated with `config`. See `_NAME_TO_CONFIG` for `fn` options. - - Args: - config: Config with a `fn` option that specifies which Schedule - implementation to use. `config` is passed into the constructor. - - Returns: - A Schedule impl instance. - """ - schedule_class = _NAME_TO_CONFIG[config.fn] - return schedule_class(config) diff --git a/research/brain_coder/common/schedules_test.py b/research/brain_coder/common/schedules_test.py deleted file mode 100644 index b17022f45..000000000 --- a/research/brain_coder/common/schedules_test.py +++ /dev/null @@ -1,139 +0,0 @@ -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -"""Tests for common.schedules.""" - -from math import exp -from math import sqrt -import numpy as np -from six.moves import xrange -import tensorflow as tf - -from common import config_lib # brain coder -from common import schedules # brain coder - - -class SchedulesTest(tf.test.TestCase): - - def ScheduleTestHelper(self, config, schedule_subtype, io_values): - """Run common checks for schedules. - - Args: - config: Config object which is passed into schedules.make_schedule. - schedule_subtype: The expected schedule type to be instantiated. - io_values: List of (input, output) pairs. Must be in ascending input - order. No duplicate inputs. - """ - - # Check that make_schedule makes the correct type. - f = schedules.make_schedule(config) - self.assertTrue(isinstance(f, schedule_subtype)) - - # Check that multiple instances returned from make_schedule behave the same. - fns = [schedules.make_schedule(config) for _ in xrange(3)] - - # Check that all the inputs map to the right outputs. - for i, o in io_values: - for f in fns: - f_out = f(i) - self.assertTrue( - np.isclose(o, f_out), - 'Wrong value at input %d. Expected %s, got %s' % (i, o, f_out)) - - # Check that a subset of the io_values are still correct. - f = schedules.make_schedule(config) - subseq = [io_values[i**2] for i in xrange(int(sqrt(len(io_values))))] - if subseq[-1] != io_values[-1]: - subseq.append(io_values[-1]) - for i, o in subseq: - f_out = f(i) - self.assertTrue( - np.isclose(o, f_out), - 'Wrong value at input %d. Expected %s, got %s' % (i, o, f_out)) - - # Check duplicate calls. - f = schedules.make_schedule(config) - for i, o in io_values: - for _ in xrange(3): - f_out = f(i) - self.assertTrue( - np.isclose(o, f_out), - 'Duplicate calls at input %d are not equal. Expected %s, got %s' - % (i, o, f_out)) - - def testConstSchedule(self): - self.ScheduleTestHelper( - config_lib.Config(fn='const', const=5), - schedules.ConstSchedule, - [(0, 5), (1, 5), (10, 5), (20, 5), (100, 5), (1000000, 5)]) - - def testLinearDecaySchedule(self): - self.ScheduleTestHelper( - config_lib.Config(fn='linear_decay', initial=2, final=0, start_time=10, - end_time=20), - schedules.LinearDecaySchedule, - [(0, 2), (1, 2), (10, 2), (11, 1.8), (15, 1), (19, 0.2), (20, 0), - (100000, 0)]) - - # Test step function. - self.ScheduleTestHelper( - config_lib.Config(fn='linear_decay', initial=2, final=0, start_time=10, - end_time=10), - schedules.LinearDecaySchedule, - [(0, 2), (1, 2), (10, 2), (11, 0), (15, 0)]) - - def testExponentialDecaySchedule(self): - self.ScheduleTestHelper( - config_lib.Config(fn='exp_decay', initial=exp(-1), final=exp(-6), - start_time=10, end_time=20), - schedules.ExponentialDecaySchedule, - [(0, exp(-1)), (1, exp(-1)), (10, exp(-1)), (11, exp(-1/2. - 1)), - (15, exp(-5/2. - 1)), (19, exp(-9/2. - 1)), (20, exp(-6)), - (100000, exp(-6))]) - - # Test step function. - self.ScheduleTestHelper( - config_lib.Config(fn='exp_decay', initial=exp(-1), final=exp(-6), - start_time=10, end_time=10), - schedules.ExponentialDecaySchedule, - [(0, exp(-1)), (1, exp(-1)), (10, exp(-1)), (11, exp(-6)), - (15, exp(-6))]) - - def testSmootherstepDecaySchedule(self): - self.ScheduleTestHelper( - config_lib.Config(fn='smooth_decay', initial=2, final=0, start_time=10, - end_time=20), - schedules.SmootherstepDecaySchedule, - [(0, 2), (1, 2), (10, 2), (11, 1.98288), (15, 1), (19, 0.01712), - (20, 0), (100000, 0)]) - - # Test step function. - self.ScheduleTestHelper( - config_lib.Config(fn='smooth_decay', initial=2, final=0, start_time=10, - end_time=10), - schedules.SmootherstepDecaySchedule, - [(0, 2), (1, 2), (10, 2), (11, 0), (15, 0)]) - - def testHardOscillatorSchedule(self): - self.ScheduleTestHelper( - config_lib.Config(fn='hard_osc', high=2, low=0, start_time=100, - period=10, transition_fraction=0.5), - schedules.HardOscillatorSchedule, - [(0, 2), (1, 2), (10, 2), (100, 2), (101, 1.2), (102, 0.4), (103, 0), - (104, 0), (105, 0), (106, 0.8), (107, 1.6), (108, 2), (109, 2), - (110, 2), (111, 1.2), (112, 0.4), (115, 0), (116, 0.8), (119, 2), - (120, 2), (100001, 1.2), (100002, 0.4), (100005, 0), (100006, 0.8), - (100010, 2)]) - - # Test instantaneous step. - self.ScheduleTestHelper( - config_lib.Config(fn='hard_osc', high=2, low=0, start_time=100, - period=10, transition_fraction=0), - schedules.HardOscillatorSchedule, - [(0, 2), (1, 2), (10, 2), (99, 2), (100, 0), (104, 0), (105, 2), - (106, 2), (109, 2), (110, 0)]) - - -if __name__ == '__main__': - tf.test.main() diff --git a/research/brain_coder/common/utils.py b/research/brain_coder/common/utils.py deleted file mode 100644 index fa5f1c507..000000000 --- a/research/brain_coder/common/utils.py +++ /dev/null @@ -1,558 +0,0 @@ -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -"""Configuration class.""" - -import bisect -from collections import deque -import cPickle -import heapq -import random - -from absl import logging -import numpy as np -import six -from six.moves import xrange -import tensorflow as tf - - -def tuple_to_record(tuple_, record_type): - return record_type(**dict(zip(record_type.__slots__, tuple_))) - - -def make_record(type_name, attributes, defaults=None): - """Factory for mutable record classes. - - A record acts just like a collections.namedtuple except slots are writable. - One exception is that record classes are not equivalent to tuples or other - record classes of the same length. - - Note, each call to `make_record` produces a unique type. Two calls will make - different types even if `type_name` is the same each time. - - Args: - type_name: Name of the record type to create. - attributes: List of names of each record attribute. The order of the list - is preserved. - defaults: (optional) default values for attributes. A dict mapping attribute - names to values. - - Returns: - A new record type. - - Raises: - ValueError: If, - `defaults` is not a dict, - `attributes` contains duplicate names, - `defaults` keys are not contained in `attributes`. - """ - if defaults is None: - defaults = {} - if not isinstance(defaults, dict): - raise ValueError('defaults must be a dict.') - attr_set = set(attributes) - if len(attr_set) < len(attributes): - raise ValueError('No duplicate attributes allowed.') - if not set(defaults.keys()).issubset(attr_set): - raise ValueError('Default attributes must be given in the attributes list.') - - class RecordClass(object): - """A record type. - - Acts like mutable tuple with named slots. - """ - __slots__ = list(attributes) - _defaults = dict(defaults) - - def __init__(self, *args, **kwargs): - if len(args) > len(self.__slots__): - raise ValueError('Too many arguments. %s has length %d.' - % (type(self).__name__, len(self.__slots__))) - for attr, val in self._defaults.items(): - setattr(self, attr, val) - for i, arg in enumerate(args): - setattr(self, self.__slots__[i], arg) - for attr, val in kwargs.items(): - setattr(self, attr, val) - for attr in self.__slots__: - if not hasattr(self, attr): - raise ValueError('Required attr "%s" is not set.' % attr) - - def __len__(self): - return len(self.__slots__) - - def __iter__(self): - for attr in self.__slots__: - yield getattr(self, attr) - - def __getitem__(self, index): - return getattr(self, self.__slots__[index]) - - def __setitem__(self, index, value): - return setattr(self, self.__slots__[index], value) - - def __eq__(self, other): - # Types must be equal as well as values. - return (isinstance(other, type(self)) - and all(a == b for a, b in zip(self, other))) - - def __str__(self): - return '%s(%s)' % ( - type(self).__name__, - ', '.join(attr + '=' + str(getattr(self, attr)) - for attr in self.__slots__)) - - def __repr__(self): - return str(self) - - RecordClass.__name__ = type_name - return RecordClass - - -# Making minibatches. -def stack_pad(tensors, pad_axes=None, pad_to_lengths=None, dtype=np.float32, - pad_value=0): - """Stack tensors along 0-th dim and pad them to be the same shape. - - Args: - tensors: Any list of iterables (python list, numpy array, etc). Can be 1D - or multi-D iterables. - pad_axes: An int or list of ints. Axes to pad along. - pad_to_lengths: Length in each dimension. If pad_axes was an int, this is an - int or None. If pad_axes was a list of ints, this is a list of mixed int - and None types with the same length, or None. A None length means the - maximum length among the given tensors is used. - dtype: Type of output numpy array. Defaults to np.float32. - pad_value: Value to use for padding. Defaults to 0. - - Returns: - Numpy array containing the tensors stacked along the 0-th dimension and - padded along the specified dimensions. - - Raises: - ValueError: If the tensors do not have equal shapes along non-padded - dimensions. - """ - tensors = [np.asarray(t) for t in tensors] - max_lengths = [max(l) for l in zip(*[t.shape for t in tensors])] - same_axes = dict(enumerate(max_lengths)) - if pad_axes is None: - pad_axes = [] - if isinstance(pad_axes, six.integer_types): - if pad_to_lengths is not None: - max_lengths[pad_axes] = pad_to_lengths - del same_axes[pad_axes] - else: - if pad_to_lengths is None: - pad_to_lengths = [None] * len(pad_axes) - for i, l in zip(pad_axes, pad_to_lengths): - if l is not None: - max_lengths[i] = l - del same_axes[i] - same_axes_items = same_axes.items() - dest = np.full([len(tensors)] + max_lengths, pad_value, dtype=dtype) - for i, t in enumerate(tensors): - for j, l in same_axes_items: - if t.shape[j] != l: - raise ValueError( - 'Tensor at index %d does not have size %d along axis %d' - % (i, l, j)) - dest[[i] + [slice(0, d) for d in t.shape]] = t - return dest - - -class RandomQueue(deque): - - def __init__(self, capacity): - super(RandomQueue, self).__init__([], capacity) - self.capacity = capacity - - def random_sample(self, sample_size): - idx = np.random.choice(len(self), sample_size) - return [self[i] for i in idx] - - def push(self, item): - # Append to right. Oldest element will be popped from left. - self.append(item) - - -class MPQItemContainer(object): - """Class for holding an item with its score. - - Defines a comparison function for use in the heap-queue. - """ - - def __init__(self, score, item, extra_data): - self.item = item - self.score = score - self.extra_data = extra_data - - def __cmp__(self, other): - assert isinstance(other, type(self)) - return cmp(self.score, other.score) - - def __iter__(self): - """Allows unpacking like a tuple.""" - yield self.score - yield self.item - yield self.extra_data - - def __repr__(self): - """String representation of this item. - - `extra_data` is not included in the representation. We are assuming that - `extra_data` is not easily interpreted by a human (if it was, it should be - hashable, like a string or tuple). - - Returns: - String representation of `self`. - """ - return str((self.score, self.item)) - - def __str__(self): - return repr(self) - - -class MaxUniquePriorityQueue(object): - """A maximum priority queue where duplicates are not added. - - The top items by score remain in the queue. When the capacity is reached, - the lowest scored item in the queue will be dropped. - - This implementation differs from a typical priority queue, in that the minimum - score is popped, instead of the maximum. Largest scores remain stuck in the - queue. This is useful for accumulating the best known items from a population. - - The items used to determine uniqueness must be hashable, but additional - non-hashable data may be stored with each item. - """ - - def __init__(self, capacity): - self.capacity = capacity - self.heap = [] - self.unique_items = set() - - def push(self, score, item, extra_data=None): - """Push an item onto the queue. - - If the queue is at capacity, the item with the smallest score will be - dropped. Note that it is assumed each item has exactly one score. The same - item with a different score will still be dropped. - - Args: - score: Number used to prioritize items in the queue. Largest scores are - kept in the queue. - item: A hashable item to be stored. Duplicates of this item will not be - added to the queue. - extra_data: An extra (possible not hashable) data to store with the item. - """ - if item in self.unique_items: - return - if len(self.heap) >= self.capacity: - _, popped_item, _ = heapq.heappushpop( - self.heap, MPQItemContainer(score, item, extra_data)) - self.unique_items.add(item) - self.unique_items.remove(popped_item) - else: - heapq.heappush(self.heap, MPQItemContainer(score, item, extra_data)) - self.unique_items.add(item) - - def pop(self): - """Pop the item with the lowest score. - - Returns: - score: Item's score. - item: The item that was popped. - extra_data: Any extra data stored with the item. - """ - if not self.heap: - return () - score, item, extra_data = heapq.heappop(self.heap) - self.unique_items.remove(item) - return score, item, extra_data - - def get_max(self): - """Peek at the item with the highest score. - - Returns: - Same as `pop`. - """ - if not self.heap: - return () - score, item, extra_data = heapq.nlargest(1, self.heap)[0] - return score, item, extra_data - - def get_min(self): - """Peek at the item with the lowest score. - - Returns: - Same as `pop`. - """ - if not self.heap: - return () - score, item, extra_data = heapq.nsmallest(1, self.heap)[0] - return score, item, extra_data - - def random_sample(self, sample_size): - """Randomly select items from the queue. - - This does not modify the queue. - - Items are drawn from a uniform distribution, and not weighted by score. - - Args: - sample_size: Number of random samples to draw. The same item can be - sampled multiple times. - - Returns: - List of sampled items (of length `sample_size`). Each element in the list - is a tuple: (item, extra_data). - """ - idx = np.random.choice(len(self.heap), sample_size) - return [(self.heap[i].item, self.heap[i].extra_data) for i in idx] - - def iter_in_order(self): - """Iterate over items in the queue from largest score to smallest. - - Yields: - item: Hashable item. - extra_data: Extra data stored with the item. - """ - for _, item, extra_data in heapq.nlargest(len(self.heap), self.heap): - yield item, extra_data - - def __len__(self): - return len(self.heap) - - def __iter__(self): - for _, item, _ in self.heap: - yield item - - def __repr__(self): - return '[' + ', '.join(repr(c) for c in self.heap) + ']' - - def __str__(self): - return repr(self) - - -class RouletteWheel(object): - """Randomly samples stored objects proportionally to their given weights. - - Stores objects and weights. Acts like a roulette wheel where each object is - given a slice of the roulette disk proportional to its weight. - - This can be used as a replay buffer where past experiences are sampled - proportionally to their weights. A good choice of "weight" for reinforcement - learning is exp(reward / temperature) where temperature -> inf makes the - distribution more uniform and temperature -> 0 makes the distribution more - peaky. - - To prevent experiences from being overweighted by appearing in the replay - buffer multiple times, a "unique mode" is supported where duplicate - experiences are ignored. In unique mode, weights can be quickly retrieved from - keys. - """ - - def __init__(self, unique_mode=False, save_file=None): - """Construct empty RouletteWheel. - - If `save_file` is not None, and the file already exists on disk, whatever - is in the file will be loaded into this instance. This allows jobs using - RouletteWheel to resume after preemption. - - Args: - unique_mode: If True, puts this RouletteWheel into unique mode, where - objects are added with hashable keys, so that duplicates are ignored. - save_file: Optional file path to save to. Must be a string containing - an absolute path to a file, or None. File will be Python pickle - format. - """ - self.unique_mode = unique_mode - self.objects = [] - self.weights = [] - self.partial_sums = [] - if self.unique_mode: - self.keys_to_weights = {} - self.save_file = save_file - self.save_to_disk_buffer = [] - - if save_file is not None and tf.gfile.Exists(save_file): - # Load from disk. - with tf.gfile.OpenFast(save_file, 'r') as f: - count = 0 - while 1: - try: - obj, weight, key = cPickle.load(f) - except EOFError: - break - else: - self.add(obj, weight, key) - count += 1 - logging.info('Loaded %d samples from disk.', count) - # Clear buffer since these items are already on disk. - self.save_to_disk_buffer = [] - - def __iter__(self): - return iter(zip(self.objects, self.weights)) - - def __len__(self): - return len(self.objects) - - def is_empty(self): - """Returns whether there is anything in the roulette wheel.""" - return not self.partial_sums - - @property - def total_weight(self): - """Total cumulative weight across all objects.""" - if self.partial_sums: - return self.partial_sums[-1] - return 0.0 - - def has_key(self, key): - if self.unique_mode: - RuntimeError('has_key method can only be called in unique mode.') - return key in self.keys_to_weights - - def get_weight(self, key): - if self.unique_mode: - RuntimeError('get_weight method can only be called in unique mode.') - return self.keys_to_weights[key] - - def add(self, obj, weight, key=None): - """Add one object and its weight to the roulette wheel. - - Args: - obj: Any object to be stored. - weight: A non-negative float. The given object will be drawn with - probability proportional to this weight when sampling. - key: This argument is only used when in unique mode. To allow `obj` to - be an unhashable type, like list, a separate hashable key is given. - Each `key` should be unique to each `obj`. `key` is used to check if - `obj` has been added to the roulette wheel before. - - Returns: - True if the object was added, False if it was not added due to it being - a duplicate (this only happens in unique mode). - - Raises: - ValueError: If `weight` is negative. - ValueError: If `key` is not given when in unique mode, or if `key` is - given when not in unique mode. - """ - if weight < 0: - raise ValueError('Weight must be non-negative') - if self.unique_mode: - if key is None: - raise ValueError( - 'Hashable key required for objects when unique mode is enabled.') - if key in self.keys_to_weights: - # Weight updates are not allowed. Ignore the given value of `weight`. - return False - self.keys_to_weights[key] = weight - elif key is not None: - raise ValueError( - 'key argument should not be used when unique mode is disabled.') - self.objects.append(obj) - self.weights.append(weight) - self.partial_sums.append(self.total_weight + weight) - if self.save_file is not None: - # Record new item in buffer. - self.save_to_disk_buffer.append((obj, weight, key)) - return True - - def add_many(self, objs, weights, keys=None): - """Add many object and their weights to the roulette wheel. - - Arguments are the same as the `add` method, except each is a list. Lists - must all be the same length. - - Args: - objs: List of objects to be stored. - weights: List of non-negative floats. See `add` method. - keys: List of hashable keys. This argument is only used when in unique - mode. See `add` method. - - Returns: - Number of objects added. This number will be less than the number of - objects provided if we are in unique mode and some keys are already - in the roulette wheel. - - Raises: - ValueError: If `keys` argument is provided when unique_mode == False, or - is not provided when unique_mode == True. - ValueError: If any of the lists are not the same length. - ValueError: If any of the weights are negative. - """ - if keys is not None and not self.unique_mode: - raise ValueError('Not in unique mode. Do not provide keys.') - elif keys is None and self.unique_mode: - raise ValueError('In unique mode. You must provide hashable keys.') - if keys and len(objs) != len(keys): - raise ValueError('Number of objects does not equal number of keys.') - if len(objs) != len(weights): - raise ValueError('Number of objects does not equal number of weights.') - return sum([self.add(obj, weights[i], key=keys[i] if keys else None) - for i, obj in enumerate(objs)]) - - def sample(self): - """Spin the roulette wheel. - - Randomly select an object with probability proportional to its weight. - - Returns: - object: The selected object. - weight: The weight of the selected object. - - Raises: - RuntimeError: If the roulette wheel is empty. - """ - if self.is_empty(): - raise RuntimeError('Trying to sample from empty roulette wheel.') - spin = random.random() * self.total_weight - - # Binary search. - i = bisect.bisect_right(self.partial_sums, spin) - if i == len(self.partial_sums): - # This should not happen since random.random() will always be strictly - # less than 1.0, and the last partial sum equals self.total_weight(). - # However it may happen due to rounding error. In that case it is easy to - # handle this, just select the last object. - i -= 1 - - return self.objects[i], self.weights[i] - - def sample_many(self, count): - """Spin the roulette wheel `count` times and return the results.""" - if self.is_empty(): - raise RuntimeError('Trying to sample from empty roulette wheel.') - return [self.sample() for _ in xrange(count)] - - def incremental_save(self, log_info=False): - """Write new entries to disk. - - This performs an append operation on the `save_file` given in the - constructor. Any entries added since the last call to `incremental_save` - will be appended to the file. - - If a new RouletteWheel is constructed with the same `save_file`, all the - entries written there will be automatically loaded into the instance. - This is useful when a job resumes after preemption. - - Args: - log_info: If True, info about this operation will be logged. - - Raises: - RuntimeError: If `save_file` given in the constructor is None. - """ - if self.save_file is None: - raise RuntimeError('Cannot call incremental_save. `save_file` is None.') - if log_info: - logging.info('Saving %d new samples to disk.', - len(self.save_to_disk_buffer)) - with tf.gfile.OpenFast(self.save_file, 'a') as f: - for entry in self.save_to_disk_buffer: - cPickle.dump(entry, f) - # Clear the buffer. - self.save_to_disk_buffer = [] diff --git a/research/brain_coder/common/utils_test.py b/research/brain_coder/common/utils_test.py deleted file mode 100644 index 569c2877d..000000000 --- a/research/brain_coder/common/utils_test.py +++ /dev/null @@ -1,382 +0,0 @@ -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -"""Tests for common.utils. -""" - -from collections import Counter -import random -import tempfile -import numpy as np -import tensorflow as tf - -from common import utils # brain coder - - -class UtilsTest(tf.test.TestCase): - - def testStackPad(self): - # 1D. - tensors = [[1, 2, 3], [4, 5, 6, 7, 8], [9]] - result = utils.stack_pad(tensors, pad_axes=0, pad_to_lengths=6) - self.assertTrue(np.array_equal( - result, - np.asarray([[1, 2, 3, 0, 0, 0], - [4, 5, 6, 7, 8, 0], - [9, 0, 0, 0, 0, 0]], dtype=np.float32))) - - # 3D. - tensors = [[[[1, 2, 3], [4, 5, 6]]], - [[[7, 8, 9], [0, 1, 2]], [[3, 4, 5], [6, 7, 8]]], - [[[0, 1, 2]], [[3, 4, 5]]]] - result = utils.stack_pad(tensors, pad_axes=[0, 1], pad_to_lengths=[2, 2]) - self.assertTrue(np.array_equal( - result, - np.asarray([[[[1, 2, 3], [4, 5, 6]], - [[0, 0, 0], [0, 0, 0]]], - [[[7, 8, 9], [0, 1, 2]], - [[3, 4, 5], [6, 7, 8]]], - [[[0, 1, 2], [0, 0, 0]], - [[3, 4, 5], [0, 0, 0]]]], dtype=np.float32))) - - def testStackPadNoAxes(self): - # 2D. - tensors = [[[1, 2, 3], [4, 5, 6]], - [[7, 8, 9], [1, 2, 3]], - [[4, 5, 6], [7, 8, 9]]] - result = utils.stack_pad(tensors) - self.assertTrue(np.array_equal( - result, - np.asarray(tensors))) - - def testStackPadNoneLength(self): - # 1D. - tensors = [[1, 2, 3], [4, 5, 6, 7, 8], [9]] - result = utils.stack_pad(tensors, pad_axes=0, pad_to_lengths=None) - self.assertTrue(np.array_equal( - result, - np.asarray([[1, 2, 3, 0, 0], - [4, 5, 6, 7, 8], - [9, 0, 0, 0, 0]], dtype=np.float32))) - - # 3D. - tensors = [[[[1, 2, 3], [4, 5, 6]]], - [[[7, 8, 9], [0, 1, 2]], [[3, 4, 5], [6, 7, 8]]], - [[[0, 1, 2]], [[3, 4, 5]]]] - result = utils.stack_pad(tensors, pad_axes=[0, 1], pad_to_lengths=None) - self.assertTrue(np.array_equal( - result, - np.asarray([[[[1, 2, 3], [4, 5, 6]], - [[0, 0, 0], [0, 0, 0]]], - [[[7, 8, 9], [0, 1, 2]], - [[3, 4, 5], [6, 7, 8]]], - [[[0, 1, 2], [0, 0, 0]], - [[3, 4, 5], [0, 0, 0]]]], dtype=np.float32))) - - # 3D with partial pad_to_lengths. - tensors = [[[[1, 2, 3], [4, 5, 6]]], - [[[7, 8, 9], [0, 1, 2]], [[3, 4, 5], [6, 7, 8]]], - [[[0, 1, 2]], [[3, 4, 5]]]] - result = utils.stack_pad(tensors, pad_axes=[0, 1], pad_to_lengths=[None, 3]) - self.assertTrue(np.array_equal( - result, - np.asarray([[[[1, 2, 3], [4, 5, 6], [0, 0, 0]], - [[0, 0, 0], [0, 0, 0], [0, 0, 0]]], - [[[7, 8, 9], [0, 1, 2], [0, 0, 0]], - [[3, 4, 5], [6, 7, 8], [0, 0, 0]]], - [[[0, 1, 2], [0, 0, 0], [0, 0, 0]], - [[3, 4, 5], [0, 0, 0], [0, 0, 0]]]], dtype=np.float32))) - - def testStackPadValueError(self): - # 3D. - tensors = [[[[1, 2, 3], [4, 5, 6]]], - [[[7, 8, 9], [0, 1, 2]], [[3, 4, 5], [6, 7, 8]]], - [[[0, 1, 2]], [[3, 4, 5]]], - [[[1, 2, 3, 4]]]] - - # Not all tensors have the same shape along axis 2. - with self.assertRaises(ValueError): - utils.stack_pad(tensors, pad_axes=[0, 1], pad_to_lengths=[2, 2]) - - def testRecord(self): - my_record = utils.make_record('my_record', ['a', 'b', 'c'], {'b': 55}) - inst = my_record(a=1, b=2, c=3) - self.assertEqual(1, inst.a) - self.assertEqual(2, inst.b) - self.assertEqual(3, inst.c) - self.assertEqual(1, inst[0]) - self.assertEqual(2, inst[1]) - self.assertEqual(3, inst[2]) - self.assertEqual([1, 2, 3], list(iter(inst))) - self.assertEqual(3, len(inst)) - - inst.b = 999 - self.assertEqual(999, inst.b) - self.assertEqual(999, inst[1]) - - inst2 = my_record(1, 999, 3) - self.assertTrue(inst == inst2) - inst2[1] = 3 - self.assertFalse(inst == inst2) - - inst3 = my_record(a=1, c=3) - inst.b = 55 - self.assertEqual(inst, inst3) - - def testRecordUnique(self): - record1 = utils.make_record('record1', ['a', 'b', 'c']) - record2 = utils.make_record('record2', ['a', 'b', 'c']) - self.assertNotEqual(record1(1, 2, 3), record2(1, 2, 3)) - self.assertEqual(record1(1, 2, 3), record1(1, 2, 3)) - - def testTupleToRecord(self): - my_record = utils.make_record('my_record', ['a', 'b', 'c']) - inst = utils.tuple_to_record((5, 6, 7), my_record) - self.assertEqual(my_record(5, 6, 7), inst) - - def testRecordErrors(self): - my_record = utils.make_record('my_record', ['a', 'b', 'c'], {'b': 10}) - - with self.assertRaises(ValueError): - my_record(c=5) # Did not provide required argument 'a'. - with self.assertRaises(ValueError): - my_record(1, 2, 3, 4) # Too many arguments. - - def testRandomQueue(self): - np.random.seed(567890) - queue = utils.RandomQueue(5) - queue.push(5) - queue.push(6) - queue.push(7) - queue.push(8) - queue.push(9) - queue.push(10) - self.assertTrue(5 not in queue) - sample = queue.random_sample(1000) - self.assertEqual(1000, len(sample)) - self.assertEqual([6, 7, 8, 9, 10], sorted(np.unique(sample).tolist())) - - def testMaxUniquePriorityQueue(self): - queue = utils.MaxUniquePriorityQueue(5) - queue.push(1.0, 'string 1') - queue.push(-0.5, 'string 2') - queue.push(0.5, 'string 3') - self.assertEqual((-0.5, 'string 2', None), queue.pop()) - queue.push(0.1, 'string 4') - queue.push(1.5, 'string 5') - queue.push(0.0, 'string 6') - queue.push(0.2, 'string 7') - self.assertEqual((1.5, 'string 5', None), queue.get_max()) - self.assertEqual((0.1, 'string 4', None), queue.get_min()) - self.assertEqual( - [('string 5', None), ('string 1', None), ('string 3', None), - ('string 7', None), ('string 4', None)], - list(queue.iter_in_order())) - - def testMaxUniquePriorityQueue_Duplicates(self): - queue = utils.MaxUniquePriorityQueue(5) - queue.push(0.0, 'string 1') - queue.push(0.0, 'string 2') - queue.push(0.0, 'string 3') - self.assertEqual((0.0, 'string 1', None), queue.pop()) - self.assertEqual((0.0, 'string 2', None), queue.pop()) - self.assertEqual((0.0, 'string 3', None), queue.pop()) - self.assertEqual(0, len(queue)) - queue.push(0.1, 'string 4') - queue.push(1.5, 'string 5') - queue.push(0.3, 'string 6') - queue.push(0.2, 'string 7') - queue.push(0.0, 'string 8') - queue.push(1.5, 'string 5') - queue.push(1.5, 'string 5') - self.assertEqual((1.5, 'string 5', None), queue.get_max()) - self.assertEqual((0.0, 'string 8', None), queue.get_min()) - self.assertEqual( - [('string 5', None), ('string 6', None), ('string 7', None), - ('string 4', None), ('string 8', None)], - list(queue.iter_in_order())) - - def testMaxUniquePriorityQueue_ExtraData(self): - queue = utils.MaxUniquePriorityQueue(5) - queue.push(1.0, 'string 1', [1, 2, 3]) - queue.push(0.5, 'string 2', [4, 5, 6]) - queue.push(0.5, 'string 3', [7, 8, 9]) - queue.push(0.5, 'string 2', [10, 11, 12]) - self.assertEqual((0.5, 'string 2', [4, 5, 6]), queue.pop()) - self.assertEqual((0.5, 'string 3', [7, 8, 9]), queue.pop()) - self.assertEqual((1.0, 'string 1', [1, 2, 3]), queue.pop()) - self.assertEqual(0, len(queue)) - queue.push(0.5, 'string 2', [10, 11, 12]) - self.assertEqual((0.5, 'string 2', [10, 11, 12]), queue.pop()) - - def testRouletteWheel(self): - random.seed(12345678987654321) - r = utils.RouletteWheel() - self.assertTrue(r.is_empty()) - with self.assertRaises(RuntimeError): - r.sample() # Cannot sample when empty. - self.assertEqual(0, r.total_weight) - self.assertEqual(True, r.add('a', 0.1)) - self.assertFalse(r.is_empty()) - self.assertEqual(0.1, r.total_weight) - self.assertEqual(True, r.add('b', 0.01)) - self.assertEqual(0.11, r.total_weight) - self.assertEqual(True, r.add('c', 0.5)) - self.assertEqual(True, r.add('d', 0.1)) - self.assertEqual(True, r.add('e', 0.05)) - self.assertEqual(True, r.add('f', 0.03)) - self.assertEqual(True, r.add('g', 0.001)) - self.assertEqual(0.791, r.total_weight) - self.assertFalse(r.is_empty()) - - # Check that sampling is correct. - obj, weight = r.sample() - self.assertTrue(isinstance(weight, float), 'Type: %s' % type(weight)) - self.assertTrue((obj, weight) in r) - for obj, weight in r.sample_many(100): - self.assertTrue(isinstance(weight, float), 'Type: %s' % type(weight)) - self.assertTrue((obj, weight) in r) - - # Check that sampling distribution is correct. - n = 1000000 - c = Counter(r.sample_many(n)) - for obj, w in r: - estimated_w = c[(obj, w)] / float(n) * r.total_weight - self.assertTrue( - np.isclose(w, estimated_w, atol=1e-3), - 'Expected %s, got %s, for object %s' % (w, estimated_w, obj)) - - def testRouletteWheel_AddMany(self): - random.seed(12345678987654321) - r = utils.RouletteWheel() - self.assertTrue(r.is_empty()) - with self.assertRaises(RuntimeError): - r.sample() # Cannot sample when empty. - self.assertEqual(0, r.total_weight) - count = r.add_many( - ['a', 'b', 'c', 'd', 'e', 'f', 'g'], - [0.1, 0.01, 0.5, 0.1, 0.05, 0.03, 0.001]) - self.assertEqual(7, count) - self.assertFalse(r.is_empty()) - self.assertEqual(0.791, r.total_weight) - - # Adding no items is allowed. - count = r.add_many([], []) - self.assertEqual(0, count) - self.assertFalse(r.is_empty()) - self.assertEqual(0.791, r.total_weight) - - # Check that sampling is correct. - obj, weight = r.sample() - self.assertTrue(isinstance(weight, float), 'Type: %s' % type(weight)) - self.assertTrue((obj, weight) in r) - for obj, weight in r.sample_many(100): - self.assertTrue(isinstance(weight, float), 'Type: %s' % type(weight)) - self.assertTrue((obj, weight) in r) - - # Check that sampling distribution is correct. - n = 1000000 - c = Counter(r.sample_many(n)) - for obj, w in r: - estimated_w = c[(obj, w)] / float(n) * r.total_weight - self.assertTrue( - np.isclose(w, estimated_w, atol=1e-3), - 'Expected %s, got %s, for object %s' % (w, estimated_w, obj)) - - def testRouletteWheel_AddZeroWeights(self): - r = utils.RouletteWheel() - self.assertEqual(True, r.add('a', 0)) - self.assertFalse(r.is_empty()) - self.assertEqual(4, r.add_many(['b', 'c', 'd', 'e'], [0, 0.1, 0, 0])) - self.assertEqual( - [('a', 0.0), ('b', 0.0), ('c', 0.1), ('d', 0.0), ('e', 0.0)], - list(r)) - - def testRouletteWheel_UniqueMode(self): - random.seed(12345678987654321) - r = utils.RouletteWheel(unique_mode=True) - self.assertEqual(True, r.add([1, 2, 3], 1, 'a')) - self.assertEqual(True, r.add([4, 5], 0.5, 'b')) - self.assertEqual(False, r.add([1, 2, 3], 1.5, 'a')) - self.assertEqual( - [([1, 2, 3], 1.0), ([4, 5], 0.5)], - list(r)) - self.assertEqual(1.5, r.total_weight) - self.assertEqual( - 2, - r.add_many( - [[5, 6, 2, 3], [1, 2, 3], [8], [1, 2, 3]], - [0.1, 0.2, 0.1, 2.0], - ['c', 'a', 'd', 'a'])) - self.assertEqual( - [([1, 2, 3], 1.0), ([4, 5], 0.5), ([5, 6, 2, 3], 0.1), ([8], 0.1)], - list(r)) - self.assertTrue(np.isclose(1.7, r.total_weight)) - self.assertEqual(0, r.add_many([], [], [])) # Adding no items is allowed. - with self.assertRaises(ValueError): - # Key not given. - r.add([7, 8, 9], 2.0) - with self.assertRaises(ValueError): - # Keys not given. - r.add_many([[7, 8, 9], [10]], [2.0, 2.0]) - self.assertEqual(True, r.has_key('a')) - self.assertEqual(True, r.has_key('b')) - self.assertEqual(False, r.has_key('z')) - self.assertEqual(1.0, r.get_weight('a')) - self.assertEqual(0.5, r.get_weight('b')) - - r = utils.RouletteWheel(unique_mode=False) - self.assertEqual(True, r.add([1, 2, 3], 1)) - self.assertEqual(True, r.add([4, 5], 0.5)) - self.assertEqual(True, r.add([1, 2, 3], 1.5)) - self.assertEqual( - [([1, 2, 3], 1.0), ([4, 5], 0.5), ([1, 2, 3], 1.5)], - list(r)) - self.assertEqual(3, r.total_weight) - self.assertEqual( - 4, - r.add_many( - [[5, 6, 2, 3], [1, 2, 3], [8], [1, 2, 3]], - [0.1, 0.2, 0.1, 0.2])) - self.assertEqual( - [([1, 2, 3], 1.0), ([4, 5], 0.5), ([1, 2, 3], 1.5), - ([5, 6, 2, 3], 0.1), ([1, 2, 3], 0.2), ([8], 0.1), ([1, 2, 3], 0.2)], - list(r)) - self.assertTrue(np.isclose(3.6, r.total_weight)) - with self.assertRaises(ValueError): - # Key is given. - r.add([7, 8, 9], 2.0, 'a') - with self.assertRaises(ValueError): - # Keys are given. - r.add_many([[7, 8, 9], [10]], [2.0, 2.0], ['a', 'b']) - - def testRouletteWheel_IncrementalSave(self): - f = tempfile.NamedTemporaryFile() - r = utils.RouletteWheel(unique_mode=True, save_file=f.name) - entries = [ - ([1, 2, 3], 0.1, 'a'), - ([4, 5], 0.2, 'b'), - ([6], 0.3, 'c'), - ([7, 8, 9, 10], 0.25, 'd'), - ([-1, -2], 0.15, 'e'), - ([-3, -4, -5], 0.5, 'f')] - - self.assertTrue(r.is_empty()) - for i in range(0, len(entries), 2): - r.add(*entries[i]) - r.add(*entries[i + 1]) - r.incremental_save() - - r2 = utils.RouletteWheel(unique_mode=True, save_file=f.name) - self.assertEqual(i + 2, len(r2)) - count = 0 - for j, (obj, weight) in enumerate(r2): - self.assertEqual(entries[j][0], obj) - self.assertEqual(entries[j][1], weight) - self.assertEqual(weight, r2.get_weight(entries[j][2])) - count += 1 - self.assertEqual(i + 2, count) - -if __name__ == '__main__': - tf.test.main() diff --git a/research/brain_coder/single_task/BUILD b/research/brain_coder/single_task/BUILD deleted file mode 100644 index 47e91b12b..000000000 --- a/research/brain_coder/single_task/BUILD +++ /dev/null @@ -1,244 +0,0 @@ -licenses(["notice"]) - -package(default_visibility = [ - "//learning/brain/research/neural_coder:__subpackages__", -]) - -load("@subpar//:subpar.bzl", "par_binary") - -par_binary( - name = "run", - srcs = ["run.py"], - deps = [ - ":defaults", - ":ga_train", - ":pg_train", - # absl dep :app - # absl dep /flags - # absl dep /logging - ], -) - -par_binary( - name = "tune", - srcs = ["tune.py"], - deps = [ - ":defaults", - ":run", - # file dep - # absl dep :app - # absl dep /flags - # absl dep /logging - # numpy dep - # tensorflow dep - ], -) - -py_library( - name = "ga_train", - srcs = ["ga_train.py"], - deps = [ - ":data", - ":defaults", - ":ga_lib", - ":results_lib", - # file dep - # absl dep /flags - # absl dep /logging - # numpy dep - # tensorflow dep - "//common:utils", # project - ], -) - -py_library( - name = "ga_lib", - srcs = ["ga_lib.py"], - deps = [ - ":misc", - # absl dep /flags - # absl dep /logging - # numpy dep - "//common:bf", # project - "//common:utils", # project - ], -) - -py_test( - name = "ga_train_test", - srcs = ["ga_train_test.py"], - deps = [ - ":defaults", - ":run", - # absl dep /flags - # tensorflow dep - ], -) - -py_library( - name = "pg_train", - srcs = ["pg_train.py"], - deps = [ - ":data", - ":defaults", - ":pg_agent", - ":results_lib", - # file dep - # absl dep /flags - # absl dep /logging - # tensorflow dep - # tensorflow internal dep # build_cleaner: keep - ], -) - -py_library( - name = "pg_agent", - srcs = ["pg_agent.py"], - deps = [ - ":misc", - # file dep - # absl dep /logging - # numpy dep - # tensorflow dep - "//common:rollout", # project - "//common:utils", # project - ], -) - -py_test( - name = "pg_agent_test", - srcs = ["pg_agent_test.py"], - deps = [ - ":data", - ":defaults", - ":misc", - ":pg_agent", - ":pg_train", - # absl dep /logging - # numpy dep - # tensorflow dep - "//common:utils", # project - ], -) - -py_library( - name = "defaults", - srcs = ["defaults.py"], - deps = [ - # absl dep /logging - "//common:config_lib", # project - ], -) - -py_library( - name = "misc", - srcs = ["misc.py"], -) - -py_library( - name = "data", - srcs = ["data.py"], - deps = [ - ":code_tasks", - # absl dep /logging - ], -) - -py_library( - name = "code_tasks", - srcs = ["code_tasks.py"], - deps = [ - ":misc", - ":test_tasks", - # absl dep /logging - # numpy dep - "//common:bf", # project - "//common:reward", # project - ], -) - -py_test( - name = "code_tasks_test", - srcs = ["code_tasks_test.py"], - deps = [ - ":code_tasks", - ":defaults", - # numpy dep - # tensorflow dep - ], -) - -py_library( - name = "test_tasks", - srcs = ["test_tasks.py"], - deps = [ - ":misc", - "//common:reward", # project - ], -) - -py_test( - name = "test_tasks_test", - srcs = ["test_tasks_test.py"], - deps = [ - ":misc", - ":test_tasks", - # numpy dep - # tensorflow dep - ], -) - -py_test( - name = "pg_train_test", - size = "large", - srcs = ["pg_train_test.py"], - deps = [ - ":defaults", - ":run", - # absl dep /logging - # tensorflow dep - ], -) - -py_library( - name = "results_lib", - srcs = ["results_lib.py"], - deps = [ - # file dep - # tensorflow dep - ], -) - -py_test( - name = "results_lib_test", - srcs = ["results_lib_test.py"], - deps = [ - ":results_lib", - # tensorflow dep - ], -) - -par_binary( - name = "aggregate_experiment_results", - srcs = ["aggregate_experiment_results.py"], - deps = [ - ":misc", - ":results_lib", - # file dep - # absl dep :app - # absl dep /flags - # numpy dep - # tensorflow dep - ], -) - -par_binary( - name = "aggregate_tuning_results", - srcs = ["aggregate_tuning_results.py"], - deps = [ - # file dep - # absl dep :app - # absl dep /flags - # tensorflow dep - ], -) diff --git a/research/brain_coder/single_task/README.md b/research/brain_coder/single_task/README.md deleted file mode 100644 index 69eaabcc6..000000000 --- a/research/brain_coder/single_task/README.md +++ /dev/null @@ -1,192 +0,0 @@ -# Experiments for ICLR 2018 paper. - -[Neural Program Synthesis with Priority Queue Training](https://arxiv.org/abs/1801.03526). - -Runs policy gradient (REINFORCE), priority queue training, genetic algorithm, -and uniform random search. - -Run all examples below out of your top-level repo directory, i.e. where your git -clone resides. - - -## Just tell me how to run something and see results -```bash -# These tasks are the fastest to learn. 'echo' and 'count-down' are very -# easy. run_eval_tasks.py will do most of the work to run all the jobs. -# Should take between 10 and 30 minutes. - -# How many repetitions each experiment will run. In the paper, we use 25. Less -# reps means faster experiments, but noisier results. -REPS=25 - -# Extra description in the job names for these experiments. Use this description -# to distinguish between multiple runs of the same experiment. -DESC="demo" - -# The tasks to run. -TASKS="reverse echo-second-seq" - -# The model types and max NPE. -EXPS=( pg-20M topk-20M ga-20M rand-20M ) - -# Where training data is saved. This is chosen by launch_training.sh. Custom -# implementations of launch_training.sh may use different locations. -MODELS_DIR="/tmp/models" - -# Run run_eval_tasks.py for each experiment name in EXPS. -for exp in "${EXPS[@]}" -do - ./single_task/run_eval_tasks.py \ - --exp "$exp" --tasks $TASKS --desc "$DESC" --reps $REPS -done - -# During training or after completion, run this to aggregate results into a -# table. This is also useful for seeing how much progress has been made. -# Make sure the arguments here match the settings used above. -# Note: This can take a few minutes because it reads from every experiment -# directory. -bazel run single_task:aggregate_experiment_results -- \ - --models_dir="$MODELS_DIR" \ - --max_npe="20M" \ - --task_list="$TASKS" \ - --model_types="[('pg', '$DESC'), ('topk', '$DESC'), ('ga', '$DESC'), - ('rand', '$DESC')]" \ - --csv_file="/tmp/results_table.csv" -``` - - -## Reproduce tuning results in paper -```bash -bazel build -c opt single_task:tune.par - -# PG and TopK Tuning. -MAX_NPE=5000000 -CONFIG=" -env=c(task_cycle=['reverse-tune','remove-tune']), -agent=c( - algorithm='pg', - grad_clip_threshold=50.0,param_init_factor=0.5,entropy_beta=0.05,lr=1e-5, - optimizer='rmsprop',ema_baseline_decay=0.99,topk_loss_hparam=0.0,topk=0, - replay_temperature=1.0,alpha=0.0,eos_token=False), -timestep_limit=50,batch_size=64" - -./single_task/launch_tuning.sh \ - --job_name="iclr_pg_gridsearch.reverse-remove" \ - --config="$CONFIG" \ - --max_npe="$MAX_NPE" \ - --num_workers_per_tuner=1 \ - --num_ps_per_tuner=0 \ - --num_tuners=1 \ - --num_repetitions=50 \ - --hparam_space_type="pg" \ - --stop_on_success=true -./single_task/launch_tuning.sh \ - --job_name="iclr_pg_topk_gridsearch.reverse-remove" \ - --config="$CONFIG" \ - --max_npe="$MAX_NPE" \ - --num_workers_per_tuner=1 \ - --num_ps_per_tuner=0 \ - --num_tuners=1 \ - --num_repetitions=50 \ - --hparam_space_type="pg-topk" \ - --fixed_hparams="topk=10" \ - --stop_on_success=true -./single_task/launch_tuning.sh \ - --job_name="iclr_topk_gridsearch.reverse-remove" \ - --config="$CONFIG" \ - --max_npe="$MAX_NPE" \ - --num_workers_per_tuner=1 \ - --num_ps_per_tuner=0 \ - --num_tuners=1 \ - --num_repetitions=50 \ - --hparam_space_type="topk" \ - --fixed_hparams="topk=10" \ - --stop_on_success=true - -# GA Tuning. -CONFIG=" -env=c(task_cycle=['reverse-tune','remove-char-tune']), -agent=c(algorithm='ga'), -timestep_limit=50" -./single_task/launch_tuning.sh \ - --job_name="iclr_ga_gridsearch.reverse-remove" \ - --config="$CONFIG" \ - --max_npe="$MAX_NPE" \ - --num_workers_per_tuner=25 \ - --num_ps_per_tuner=0 \ - --num_tuners=1 \ - --num_repetitions=50 \ - --hparam_space_type="ga" \ - --stop_on_success=true - -# Aggregate tuning results. Run after tuning jobs complete. -bazel run -c opt single_task:aggregate_tuning_results -- \ - --tuning_dir="$MODELS_DIR/iclr_pg_gridsearch.reverse-remove" -bazel run -c opt single_task:aggregate_tuning_results -- \ - --tuning_dir="$MODELS_DIR/iclr_pg_topk_gridsearch.reverse-remove" -bazel run -c opt single_task:aggregate_tuning_results -- \ - --tuning_dir="$MODELS_DIR/iclr_topk_gridsearch.reverse-remove" -bazel run -c opt single_task:aggregate_tuning_results -- \ - --tuning_dir="$MODELS_DIR/iclr_ga_gridsearch.reverse-remove" -``` - -## Reproduce eval results in paper -```bash -DESC="v0" # Description for each experiment. "Version 0" is a good default. -EXPS=( pg-5M topk-5M ga-5M rand-5M pg-20M topk-20M ga-20M rand-20M ) -for exp in "${EXPS[@]}" -do - ./single_task/run_eval_tasks.py \ - --exp "$exp" --iclr_tasks --desc "$DESC" -done -``` - -## Run single experiment -```bash -EXP="topk-20M" # Learning algorithm + max-NPE -TASK="reverse" # Coding task -DESC="v0" # Description for each experiment. "Version 0" is a good default. -./single_task/run_eval_tasks.py \ - --exp "$EXP" --task "$TASK" --desc "$DESC" -``` - -## Fetch eval results into a table -```bash -# These arguments should match the settings you used to run the experiments. -MODELS_DIR="/tmp/models" -MAX_NPE="20M" -DESC="v0" # Same description used in the experiments. -# MODEL_TYPES specifies each model type and the description used in their -# experiments. -MODEL_TYPES="[('pg', '$DESC'), ('topk', '$DESC'), - ('ga', '$DESC'), ('rand', '$DESC')]" -TASKS="" # Empty string will default to all ICLR tasks. -# To specify custom task list, give task names separated by spaces. Example: -# TASKS="reverse remove-char" -bazel run single_task:aggregate_experiment_results -- \ - --models_dir="$MODELS_DIR" \ - --max_npe="$MAX_NPE" \ - --task_list="$TASKS" \ - --model_types="$MODEL_TYPES" \ - --csv_file="/tmp/results_table.csv" -``` - -## Reproduce shortest code examples in paper -```bash -# Maximum NPE is higher here. We only do 1 repetition, and the algorithm needs -# time to simplify its solution. -MODELS_DIR="/tmp/models" -NPE="500M" -DESC="short-code" -./single_task/run_eval_tasks.py \ - --exp "simpl-$NPE" --desc "$DESC" --iclr_tasks --reps 1 - -# Aggregate best code strings. Run after training completes. -TASKS="" # Empty string. Will default to all ICLR tasks. -bazel run single_task:aggregate_experiment_results -- \ - --models_dir="$MODELS_DIR" \ - --max_npe="$NPE" \ - --task_list="$TASKS" \ - --model_types="[('topk', '$DESC')]" \ - --data=code -``` diff --git a/research/brain_coder/single_task/aggregate_experiment_results.py b/research/brain_coder/single_task/aggregate_experiment_results.py deleted file mode 100644 index f10625300..000000000 --- a/research/brain_coder/single_task/aggregate_experiment_results.py +++ /dev/null @@ -1,380 +0,0 @@ -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -r"""This script crawls experiment directories for results and aggregates them. - -Usage example: - -MODELS_DIR="/tmp/models" -bazel run single_task:aggregate_experiment_results -- \ - --models_dir="$MODELS_DIR" \ - --max_npe="20M" \ - --task_list="add echo" \ - --model_types="[('topk', 'v0'), ('ga', 'v0')]" \ - --csv_file=/tmp/results_table.csv -""" - -import ast -from collections import namedtuple -import csv -import os -import re -import StringIO -import sys - -from absl import app -from absl import flags -import numpy as np -import tensorflow as tf - -from single_task import misc # brain coder -from single_task import results_lib # brain coder - -DEFAULT_MODELS = [('pg', 'v0'), ('topk', 'v0'), ('ga', 'v0'), ('rand', 'v0')] -DEFAULT_TASKS = [ - 'reverse', 'remove-char', 'count-char', 'add', 'bool-logic', 'print-hello', - 'echo-twice', 'echo-thrice', 'copy-reverse', 'zero-cascade', 'cascade', - 'shift-left', 'shift-right', 'riffle', 'unriffle', 'middle-char', - 'remove-last', 'remove-last-two', 'echo-alternating', 'echo-half', 'length', - 'echo-second-seq', 'echo-nth-seq', 'substring', 'divide-2', 'dedup'] - -FLAGS = flags.FLAGS -flags.DEFINE_string( - 'models_dir', '', - 'Absolute path where results folders are found.') -flags.DEFINE_string( - 'exp_prefix', 'bf_rl_iclr', - 'Prefix for all experiment folders.') -flags.DEFINE_string( - 'max_npe', '5M', - 'String representation of max NPE of the experiments.') -flags.DEFINE_spaceseplist( - 'task_list', DEFAULT_TASKS, - 'List of task names separated by spaces. If empty string, defaults to ' - '`DEFAULT_TASKS`. These are the rows of the results table.') -flags.DEFINE_string( - 'model_types', str(DEFAULT_MODELS), - 'String representation of a python list of 2-tuples, each a model_type + ' - 'job description pair. Descriptions allow you to choose among different ' - 'runs of the same experiment. These are the columns of the results table.') -flags.DEFINE_string( - 'csv_file', '/tmp/results_table.csv', - 'Where to write results table. Format is CSV.') -flags.DEFINE_enum( - 'data', 'success_rates', ['success_rates', 'code'], - 'What type of data to aggregate.') - - -def make_csv_string(table): - """Convert 2D list to CSV string.""" - s = StringIO.StringIO() - writer = csv.writer(s) - writer.writerows(table) - value = s.getvalue() - s.close() - return value - - -def process_results(metrics): - """Extract useful information from given metrics. - - Args: - metrics: List of results dicts. These should have been written to disk by - training jobs. - - Returns: - Dict mapping stats names to values. - - Raises: - ValueError: If max_npe or max_global_repetitions values are inconsistant - across dicts in the `metrics` list. - """ - count = len(metrics) - success_count = 0 - total_npe = 0 # Counting NPE across all runs. - success_npe = 0 # Counting NPE in successful runs only. - max_npe = 0 - max_repetitions = 0 - for metric_dict in metrics: - if not max_npe: - max_npe = metric_dict['max_npe'] - elif max_npe != metric_dict['max_npe']: - raise ValueError( - 'Invalid experiment. Different reps have different max-NPE settings.') - if not max_repetitions: - max_repetitions = metric_dict['max_global_repetitions'] - elif max_repetitions != metric_dict['max_global_repetitions']: - raise ValueError( - 'Invalid experiment. Different reps have different num-repetition ' - 'settings.') - if metric_dict['found_solution']: - success_count += 1 - success_npe += metric_dict['npe'] - total_npe += metric_dict['npe'] - stats = {} - stats['max_npe'] = max_npe - stats['max_repetitions'] = max_repetitions - stats['repetitions'] = count - stats['successes'] = success_count # successful reps - stats['failures'] = count - success_count # failed reps - stats['success_npe'] = success_npe - stats['total_npe'] = total_npe - if success_count: - # Only successful runs counted. - stats['avg_success_npe'] = stats['success_npe'] / float(success_count) - else: - stats['avg_success_npe'] = 0.0 - if count: - stats['success_rate'] = success_count / float(count) - stats['avg_total_npe'] = stats['total_npe'] / float(count) - else: - stats['success_rate'] = 0.0 - stats['avg_total_npe'] = 0.0 - - return stats - - -ProcessedResults = namedtuple('ProcessedResults', ['metrics', 'processed']) - - -def get_results_for_experiment( - models_dir, task_name, model_type='pg', max_npe='5M', desc='v0', - name_prefix='bf_rl_paper', extra_desc=''): - """Get and process results for a given experiment. - - An experiment is a set of runs with the same hyperparameters and environment. - It is uniquely specified by a (task_name, model_type, max_npe) triple, as - well as an optional description. - - We assume that each experiment has a folder with the same name as the job that - ran the experiment. The name is computed by - "%name_prefix%.%desc%-%max_npe%_%task_name%". - - Args: - models_dir: Parent directory containing experiment folders. - task_name: String name of task (the coding env). See code_tasks.py or - run_eval_tasks.py - model_type: Name of the algorithm, such as 'pg', 'topk', 'ga', 'rand'. - max_npe: String SI unit representation of the maximum NPE threshold for the - experiment. For example, "5M" means 5 million. - desc: Description. - name_prefix: Prefix of job names. Normally leave this as default. - extra_desc: Optional extra description at the end of the job name. - - Returns: - ProcessedResults namedtuple instance, containing - metrics: Raw dicts read from disk. - processed: Stats computed by `process_results`. - - Raises: - ValueError: If max_npe in the metrics does not match NPE in the experiment - folder name. - """ - folder = name_prefix + '.{0}.{1}-{2}_{3}'.format(desc, model_type, max_npe, - task_name) - if extra_desc: - folder += '.' + extra_desc - - results = results_lib.Results(os.path.join(models_dir, folder)) - metrics, _ = results.read_all() - processed = process_results(metrics) - if (not np.isclose(processed['max_npe'], misc.si_to_int(max_npe)) - and processed['repetitions']): - raise ValueError( - 'Invalid experiment. Max-NPE setting does not match expected max-NPE ' - 'in experiment name.') - return ProcessedResults(metrics=metrics, processed=processed) - - -BestCodeResults = namedtuple( - 'BestCodeResults', - ['code', 'reward', 'npe', 'folder', 'finished', 'error']) - - -class BestCodeResultError(object): - success = 0 - no_solution_found = 1 - experiment_does_not_exist = 2 - - -def get_best_code_for_experiment( - models_dir, task_name, model_type='pg', max_npe='5M', desc=0, - name_prefix='bf_rl_paper', extra_desc=''): - """Like `get_results_for_experiment`, but fetches the code solutions.""" - folder = name_prefix + '.{0}.{1}-{2}_{3}'.format(desc, model_type, max_npe, - task_name) - if extra_desc: - folder += '.' + extra_desc - - log_dir = os.path.join(models_dir, folder, 'logs') - search_regex = r'^solutions_([0-9])+\.txt$' - try: - all_children = tf.gfile.ListDirectory(log_dir) - except tf.errors.NotFoundError: - return BestCodeResults( - code=None, reward=0.0, npe=0, folder=folder, finished=False, - error=BestCodeResultError.experiment_does_not_exist) - solution_files = [ - fname for fname in all_children if re.search(search_regex, fname)] - max_reward = 0.0 - npe = 0 - best_code = None - for fname in solution_files: - with tf.gfile.FastGFile(os.path.join(log_dir, fname), 'r') as reader: - results = [ast.literal_eval(entry) for entry in reader] - for res in results: - if res['reward'] > max_reward: - best_code = res['code'] - max_reward = res['reward'] - npe = res['npe'] - error = ( - BestCodeResultError.success if best_code - else BestCodeResultError.no_solution_found) - try: - # If there is a status.txt file, check if it contains the status of the job. - with tf.gfile.FastGFile(os.path.join(log_dir, 'status.txt'), 'r') as f: - # Job is done, so mark this experiment as finished. - finished = f.read().lower().strip() == 'done' - except tf.errors.NotFoundError: - # No status file has been written, so the experiment is not done. No need to - # report an error here, because we do not require that experiment jobs write - # out a status.txt file until they have finished. - finished = False - return BestCodeResults( - code=best_code, reward=max_reward, npe=npe, folder=folder, - finished=finished, error=error) - - -def make_results_table( - models=None, - tasks=None, - max_npe='5M', - name_prefix='bf_rl_paper', - extra_desc='', - models_dir='/tmp'): - """Creates a table of results: algorithm + version by tasks. - - Args: - models: The table columns. A list of (algorithm, desc) tuples. - tasks: The table rows. List of task names. - max_npe: String SI unit representation of the maximum NPE threshold for the - experiment. For example, "5M" means 5 million. All entries in the table - share the same max-NPE. - name_prefix: Name prefix used in logging directory for the experiment. - extra_desc: Extra description added to name of logging directory for the - experiment. - models_dir: Parent directory containing all experiment folders. - - Returns: - A 2D list holding the table cells. - """ - if models is None: - models = DEFAULT_MODELS - if tasks is None: - tasks = DEFAULT_TASKS - model_results = {} - for model_type, desc in models: - model_results[model_type] = { - tname: get_results_for_experiment( - models_dir, tname, model_type, max_npe, desc, - name_prefix=name_prefix, extra_desc=extra_desc - ).processed - for tname in tasks} - - def info(stats): - return [str(stats['repetitions']), - '%.2f' % stats['success_rate'], - str(int(stats['avg_total_npe']))] - - rows = [['max NPE: ' + max_npe] - + misc.flatten([['{0} ({1})'.format(m, d), '', ''] - for m, d in models])] - rows.append( - [''] + misc.flatten([['reps', 'success rate', 'avg NPE'] - for _ in models])) - for tname in tasks: - rows.append( - [tname] - + misc.flatten([info(model_results[model][tname]) - for model, _ in models])) - - return rows - - -def print_results_table(results_table): - """Print human readable results table to stdout.""" - print('') - print('=== Results Table ===') - print('Format: # reps [success rate, avg total NPE]') - - def info_str(info_row): - # num_runs (success_rate, avg_total_npe) - if not info_row[0]: - return '0' - return '%s [%s, %s]' % (str(info_row[0]).ljust(2), info_row[1], info_row[2]) - - nc = len(results_table[0]) # num cols - out_table = [ - [results_table[0][0]] + [results_table[0][i] for i in range(1, nc, 3)]] - for row in results_table[2:]: - out_table.append([row[0]] + [info_str(row[i:i+3]) for i in range(1, nc, 3)]) - - nc = len(out_table[0]) # num cols - col_widths = [max(len(row[col]) for row in out_table) for col in range(nc)] - - table_string = '' - for row in out_table: - table_string += ''.join( - [row[c].ljust(col_widths[c] + 2) for c in range(nc)]) + '\n' - - print(table_string) - - -def main(argv): - del argv # Unused. - - name_prefix = FLAGS.exp_prefix - print('Experiments prefix: %s' % name_prefix) - - model_types = ast.literal_eval(FLAGS.model_types) - - if FLAGS.data == 'success_rates': - results_table = make_results_table( - models=model_types, tasks=FLAGS.task_list, max_npe=FLAGS.max_npe, - models_dir=FLAGS.models_dir, - name_prefix=name_prefix, extra_desc='') - with tf.gfile.FastGFile(FLAGS.csv_file, 'w') as f: - f.write(make_csv_string(results_table)) - - print_results_table(results_table) - else: - # Best code - print('* = experiment is still running') - print('') - print('=== Best Synthesized Code ===') - for model_type, desc in model_types: - print('%s (%s)' % (model_type, desc)) - sys.stdout.flush() - for tname in FLAGS.task_list: - res = get_best_code_for_experiment( - FLAGS.models_dir, tname, model_type, FLAGS.max_npe, desc, - name_prefix=name_prefix, extra_desc='') - unfinished_mark = '' if res.finished else ' *' - tname += unfinished_mark - if res.error == BestCodeResultError.success: - print(' %s' % tname) - print(' %s' % res.code) - print(' R=%.6f, NPE=%s' % (res.reward, misc.int_to_si(res.npe))) - elif res.error == BestCodeResultError.experiment_does_not_exist: - print(' Experiment does not exist. Check arguments.') - print(' Experiment folder: %s' % res.folder) - break - else: - print(' %s' % tname) - print(' (none)') - sys.stdout.flush() - - -if __name__ == '__main__': - app.run(main) diff --git a/research/brain_coder/single_task/aggregate_tuning_results.py b/research/brain_coder/single_task/aggregate_tuning_results.py deleted file mode 100644 index bb2e008ce..000000000 --- a/research/brain_coder/single_task/aggregate_tuning_results.py +++ /dev/null @@ -1,71 +0,0 @@ -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -r"""After running tuning, use this script to aggregate the results. - -Usage: - -OUT_DIR="" -bazel run -c opt single_task:aggregate_tuning_results -- \ - --alsologtostderr \ - --tuning_dir="$OUT_DIR" -""" - -import ast -import os - -from absl import app -from absl import flags -import tensorflow as tf - - -FLAGS = flags.FLAGS -flags.DEFINE_string( - 'tuning_dir', '', - 'Absolute path where results tuning trial folders are found.') - - -def main(argv): - del argv # Unused. - - try: - trial_dirs = tf.gfile.ListDirectory(FLAGS.tuning_dir) - except tf.errors.NotFoundError: - print('Tuning directory %s does not exist.' % (FLAGS.tuning_dir,)) - return - - metrics = [] - for trial_dir in trial_dirs: - tuning_results_file = os.path.join( - FLAGS.tuning_dir, trial_dir, 'tuning_results.txt') - if tf.gfile.Exists(tuning_results_file): - with tf.gfile.FastGFile(tuning_results_file, 'r') as reader: - for line in reader: - metrics.append(ast.literal_eval(line.replace(': nan,', ': 0.0,'))) - - if not metrics: - print('No trials found.') - return - - num_trials = [m['num_trials'] for m in metrics] - assert all(n == num_trials[0] for n in num_trials) - num_trials = num_trials[0] - print('Found %d completed trials out of %d' % (len(metrics), num_trials)) - - # Sort by objective descending. - sorted_trials = sorted(metrics, key=lambda m: -m['objective']) - - for i, metrics in enumerate(sorted_trials): - hparams = metrics['hparams'] - keys = sorted(hparams.keys()) - print( - str(i).ljust(4) + ': ' - + '{0:.2f}'.format(metrics['objective']).ljust(10) - + '[' - + ','.join(['{}={}'.format(k, hparams[k]).ljust(24) for k in keys]) - + ']') - - -if __name__ == '__main__': - app.run(main) diff --git a/research/brain_coder/single_task/code_tasks.py b/research/brain_coder/single_task/code_tasks.py deleted file mode 100644 index 27cc7ecd1..000000000 --- a/research/brain_coder/single_task/code_tasks.py +++ /dev/null @@ -1,1381 +0,0 @@ -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -"""Tasks for RL.""" - -import abc -import copy -import itertools -import random - -from absl import logging -import numpy as np -from six.moves import xrange - -from common import bf # brain coder -from common import reward as r # brain coder -from single_task import misc # brain coder -from single_task import test_tasks # brain coder - - -MAX_EXECUTION_STEPS = 5000 - - -def make_task(task_name, override_kwargs=None, max_code_length=100, - require_correct_syntax=False, - do_code_simplification=False, - correct_bonus=2.0, code_length_bonus=1.0): - """Make tasks with setting from paper.""" - logging.info('Making paper-config task.') - n = 16 # Number of test cases. - task_mapping = { - 'print-hello': ( - PrintTask, dict(base=27, fixed_string=[8, 5, 12, 12, 15])), - 'print': (PrintIntTask, dict(base=256, fixed_string=[1, 2, 3, 4, 5])), - 'echo': (EchoTask, dict(base=27, min_length=1, max_length=6)), - 'remove-char': ( - RemoveCharTask, dict(base=256, n=n, min_len=1, max_len=6)), - 'reverse': ( - ReverseTask, dict(base=256, n=n, min_len=1, max_len=6)), - 'reverse-tune': ( - ReverseTaskV2, dict(base=256, reward_type='static-bylen')), - 'remove-char-tune': (RemoveCharTaskV2, dict(base=27)), - 'prefix': (CommonPrefixTask, dict(base=27)), - 'find': (FindSubStrTask, dict(base=27)), - 'sort3': (SortFixedTaskV2, dict(base=27, n=150, length=3)), - 'count-char': (CountCharTaskV2, dict(n=n, max_len=6)), - 'bool-logic': (BooleanLogicTask, dict()), - 'add': (AddTask, dict(n=9)), - 'echo-twice': (EchoTwiceTask, dict(n=n)), - 'echo-thrice': (EchoThriceTask, dict(n=n)), - 'copy-reverse': (CopyReverseTask, dict(n=n)), - 'zero-cascade': (EchoZeroCascadeTask, dict(n=n)), - 'cascade': (EchoCascadeTask, dict(n=n)), - 'shift-left': (ShiftLeftTask, dict(n=n)), - 'shift-right': (ShiftRightTask, dict(n=n)), - 'riffle': (RiffleTask, dict(n=n)), - 'unriffle': (UnriffleTask, dict(n=n)), - 'middle-char': (MiddleCharTask, dict(n=n)), - 'remove-last': (RemoveLastTask, dict(n=n)), - 'remove-last-two': (RemoveLastTwoTask, dict(n=n)), - 'echo-alternating': (EchoAlternatingTask, dict(n=n)), - 'echo-half': (EchoHalfTask, dict(n=n)), - 'length': (LengthTask, dict(n=n)), - 'echo-second-seq': (EchoSecondSequenceTask, dict(n=n)), - 'echo-nth-seq': (EchoNthSequenceTask, dict(n=n)), - 'substring': (SubstringTask, dict(n=n)), - 'divide-2': (Divide2Task, dict(n=n)), - 'dedup': (DedupTask, dict(n=n)), - 'remove-target-char': (RemoveTargetCharTask, dict(n=n)), - 'list-index': (ListIndexTask, dict(n=n)), - 'fib': (FibonacciTask, dict()), - 'count-down': (BottlesOfBeerTask, dict()), - 'split': (SplitTask, dict()), - 'trim-left': (TrimLeftTask, dict()), - 'circle-route': ( - JudgeRouteCircleTask, dict(n=100, max_len=32)), - 'multiply': (MultiplyTask, dict(n=100)), - 'divmod': (DivModTask, dict(n=100)), - } - - if task_name not in task_mapping: - # Test tasks. - if task_name == 'test-hill-climb': - return test_tasks.BasicTaskManager(test_tasks.HillClimbingTask()) - raise ValueError('Unknown task type "%s"' % task_name) - task_cls, kwargs = task_mapping[task_name] - - if override_kwargs: - if not isinstance(override_kwargs, dict): - raise ValueError( - 'override_kwargs must be a dict, got: %s', override_kwargs) - kwargs.update(override_kwargs) - - task = task_cls(**kwargs) - - reward_fn = r.absolute_distance_reward - # reward_fn = r.absolute_mod_distance_reward - # reward_fn = r.absolute_log_distance_reward - logging.info('Using reward function: %s', reward_fn.__name__) - - # We want reward with and without code simplification to be scaled the same - # way. Without code simplification, give the maximum code length bonus - # every time. - min_code_length = 0.0 if do_code_simplification else max_code_length - - return MultiIOTaskManager( - task=task, correct_bonus=correct_bonus, - code_length_bonus=code_length_bonus, - max_code_length=max_code_length, min_code_length=min_code_length, - reward_fn=reward_fn, require_correct_syntax=require_correct_syntax) - - -def concat(lists): - if not lists: - return [] - l = lists[0] - for k in lists[1:]: - l += k - return l - - -def concat_join(lists, sep): - if not lists: - return [] - l = lists[0] - for k in lists[1:]: - l += [sep] + k - return l - - -def clipped_linear(x, x0, y0, slope, y_range): - min_y, max_y = y_range - return min(max(slope * (x - x0) + y0, min_y), max_y) - - -class MultiIOTaskManager(object): - """Supports tasks which test the code with multiple I/O examples.""" - - def __init__(self, task, max_code_length=32, min_code_length=0, - max_execution_steps=MAX_EXECUTION_STEPS, correct_bonus=1.0, - code_length_bonus=1.0, failure_reward=-2.0, reward_fn=None, - require_correct_syntax=False): - assert isinstance(task, BaseTask) - self.task = task - self.max_code_length = max_code_length - self.min_code_length = min_code_length - self.max_execution_steps = max_execution_steps - self.require_correct_syntax = require_correct_syntax - self.correct_bonus = correct_bonus - self.code_length_bonus = code_length_bonus - self.failure_reward = failure_reward - self.time_penalty = ( - 1.0 / (max_code_length - min_code_length) - if max_code_length > min_code_length else 0.0) - if reward_fn is None: - self.reward_fn = r.absolute_distance_reward - else: - self.reward_fn = reward_fn - self.input_type = ( - task.input_type if hasattr(task, 'input_type') else misc.IOType.integer) - self.output_type = ( - task.output_type if hasattr(task, 'output_type') - else misc.IOType.integer) - self._compute_best_reward() - - def _compute_best_reward(self): - io_seqs = self.task.make_io_set() - reward = 0.0 - for _, output_seq in io_seqs: - reward += self.reward_fn(output_seq, output_seq, self.task.base) - reward += self.correct_bonus - reward += self.code_length_bonus # Bonus for shortest code. - self.best_reward = reward - self.good_reward = 0.75 * reward - logging.info('Known best reward: %.4f', self.best_reward) - - def _score_batch(self, code_strings): - return [self._score_code(code) for code in code_strings] - - def _score_code(self, code): - """Run test cases on code and compute reward. - - Args: - code: A single BF code string. - - Returns: - misc.RewardInfo namedtuple instance containing reward and code execution - information, including inputs, expected outputs, code outputs, input - and output types, and reason for the reward obtained. - """ - # Get list of 2-tuples, each containing an input sequence and an output - # sequence. - io_seqs = self.task.make_io_set() - terminal_reward = 0.0 - results = [] - reason = 'correct' - for input_seq, output_seq in io_seqs: - eval_result = bf.evaluate( - code, input_buffer=input_seq, timeout=0.1, - max_steps=self.max_execution_steps, - base=self.task.base, - require_correct_syntax=self.require_correct_syntax) - result, success = eval_result.output, eval_result.success - if not success: - # Code execution timed out. - terminal_reward = self.failure_reward - results = [] - reason = eval_result.failure_reason - break - else: - terminal_reward += self.reward_fn(result, output_seq, self.task.base) - if result == output_seq: - terminal_reward += self.correct_bonus # Bonus for correct answer. - - # Only add additional reward for shorter code. Subtracting reward - # interferes with the main objective. Only optimize for length once - # any solution is found. - if self.min_code_length == self.max_code_length: - terminal_reward += self.code_length_bonus - else: - terminal_reward += self.code_length_bonus * clipped_linear( - x=len(code), x0=self.min_code_length, y0=1.0, - slope=-self.time_penalty, y_range=(0.0, 1.0)) - - # reason remains 'correct' if it is already - elif reason == 'correct': - reason = 'wrong' - results.append(result) - - # Return list of rewards, one for each char in the code. All are 0 except - # for the terminal reward. - terminal_reward /= self.best_reward - return misc.RewardInfo( - episode_rewards=[0.0] * (len(code) - 1) + [terminal_reward], - input_case=misc.IOTuple(i for i, o in io_seqs), - correct_output=misc.IOTuple(o for i, o in io_seqs), - code_output=misc.IOTuple(results), - input_type=self.input_type, - output_type=self.output_type, - reason=reason) - - def rl_batch(self, batch_size): - """Produces list of reward functions. One for each program in the batch.""" - return [self._score_code] * batch_size - - -def conditional_overwrite(current_value, new_value, allowed_overwrite_values): - if current_value in allowed_overwrite_values: - return new_value - return current_value - - -class BaseTask(object): - """A coding task. - - All coding tasks should inherit this class. - """ - __metaclass__ = abc.ABCMeta - - def __init__(self, base=256): - self.base = base # All tasks must set the integer base that the expect. - - @abc.abstractmethod - def make_io_set(self): - """Generate a set of test cases for the task. - - Returns: - List of tuples, where each tuple is (input_case, output_case). - input_case and output_case are lists of integers. - """ - pass - - -# ============================================================================== -# ICLR tasks. -# ============================================================================== - - -class PrintTask(BaseTask): - """Print string coding task. - - Code needs to output a fixed string (given as a hyperparameter to the - task constructor). Program input is ignored. - """ - - def __init__(self, base, fixed_string=None): - super(type(self), self).__init__() - self.base = base # base includes EOS - self.eos = 0 - if fixed_string: - self.fixed_string = fixed_string - else: - self.fixed_string = [1, 2, 3, 0] # ABC - self.min_length = self.max_length = len(self.fixed_string) - - def make_io_set(self): - return [(list(), list(self.fixed_string))] - - -class RemoveCharTaskV2(BaseTask): - """Remove character coding task (version 2). - - Code needs to pipe input to output, but with all the 'A' (value 1) chars - removed. 'A' appears exactly once in each input. - - Test cases are hard-coded. - """ - - def __init__(self, base): - super(type(self), self).__init__() - self.base = base - self.eos = 0 - self.remove_char = 1 - assert base >= 27 - - def make_io_set(self): - rm = self.remove_char - return [ - ([rm, 0], [0]), - ([20, rm, 0], [20, 0]), - ([rm, 13, 0], [13, 0]), - ([6, rm, 17, 0], [6, 17, 0]), - ([rm, 11, 24, 0], [11, 24, 0]), - ([2, 16, 21, rm, 0], [2, 16, 21, 0]), - ([18, rm, 12, 26, 7, 0], [18, 12, 26, 7, 0]), - ([9, 10, 22, rm, 4, 0], [9, 10, 22, 4, 0])] - - -class RemoveCharTask(BaseTask): - """Remove character coding task. - - Code needs to pipe input to output, but with all the 'A' (value 1) chars - removed. 'A' appears at least once in each input. - - Test cases are dynamically generated, allowing for the number of test cases - to be a hyperparameter. - """ - - def __init__(self, base, n, min_len, max_len): - super(type(self), self).__init__() - self.base = base - self.eos = 0 - self.remove_char = 1 - assert base >= 27 - self._io_pairs = self._make_io_examples(n, min_len, max_len) - - def _make_io_examples(self, n, min_len, max_len): - """Generate test cases for the task.""" - rand = random.Random(6849275409234) # Test cases are fixed, but varied. - io_examples = [] - for _ in xrange(n): - length = rand.randrange(min_len, max_len + 1) - rm_char_pos = rand.randrange(0, length) - input_seq = [rand.randrange(1, self.base) for _ in xrange(length)] - input_seq[rm_char_pos] = self.remove_char - output_seq = list(input_seq) - del output_seq[rm_char_pos] - output_seq.append(0) - io_examples.append((input_seq, output_seq)) - return io_examples - - def make_io_set(self): - return copy.deepcopy(self._io_pairs) - - -class ReverseTaskV2(BaseTask): - """Reverse string coding task (version 2). - - Code needs to pipe input to output, but in reverse order. - - Stochastic test case = new test case randomly generated for every run of - `make_io_set`, i.e. different test cases every time code is scored. - - Task supports different types of test cases: - rand-one: Code is scored on one stochastic test case. - rand-many: Code is scored on 5 stochastic test cases. - static-bylen: Code is scored on 5 static test cases. There is one test - case for string lengths 1 through 5. - rand-bylen: Code is scored on 5 stochastic test cases, where there is one - test case for string lengths 1 through 5. - """ - - def __init__(self, base, reward_type): - super(type(self), self).__init__() - self.base = base # base includes EOS - assert base >= 27 - self.eos = 0 - self.io_pair_fn = { - # One random example at a time. - 'rand-one': lambda: self._io_rand(1), - # K randomy examples at a time (any lengths). - 'rand-many': lambda: self._io_rand(5), - # Static examples, one for each length. - 'static-bylen': self._io_static_by_len, - # Random examples, one for each length. - 'rand-bylen': self._io_rand_by_len}[reward_type] - - def _make_io_examples(self, sequences): - outputs = [list(i) for i in sequences] - for o in outputs: - o.reverse() - o.append(0) - inputs = [i + [0] for i in sequences] - return zip(inputs, outputs) - - def _io_rand(self, k): - inputs = [(np.random.choice(26, random.randrange(1, 6)) + 1).tolist() - for _ in xrange(k)] - return self._make_io_examples(inputs) - - def _io_rand_by_len(self, k=5): - inputs = [(np.random.choice(26, length) + 1).tolist() - for length in xrange(1, k + 1)] - return self._make_io_examples(inputs) - - def _io_static_by_len(self): - return [ - ([7, 0], [7, 0]), - ([6, 2, 0], [2, 6, 0]), - ([5, 1, 10, 0], [10, 1, 5, 0]), - ([8, 6, 5, 15, 0], [15, 5, 6, 8, 0]), - ([10, 12, 5, 2, 7, 0], [7, 2, 5, 12, 10, 0])] - - def make_io_set(self): - return self.io_pair_fn() - - -class ReverseTask(BaseTask): - """Reverse string coding task. - - Code needs to pipe input to output, but in reverse order. - - Test cases are dynamically generated, allowing for the number of test cases - to be a hyperparameter. - """ - - def __init__(self, base, n, min_len, max_len): - super(type(self), self).__init__() - self.base = base # base includes EOS - assert base >= 27 - self.eos = 0 - self._io_pairs = self._make_io_examples(n, min_len, max_len) - - def _make_io_examples(self, n, min_len, max_len): - """Generate test cases for the task.""" - rand = random.Random(6849275409234) # Test cases are fixed, but varied. - io_examples = [] - for _ in xrange(n): - length = rand.randrange(min_len, max_len + 1) - input_seq = [rand.randrange(1, self.base) for _ in xrange(length)] - output_seq = list(input_seq) - output_seq.reverse() - output_seq.append(0) - io_examples.append((input_seq, output_seq)) - return io_examples - - def make_io_set(self): - return copy.deepcopy(self._io_pairs) - - -class CommonPrefixTask(BaseTask): - """Common prefix coding task. - - Code needs to output the common prefix between two input lists. Input lists - are variable length, where each list ends with a 0. A common prefix is a - sequence which both lists start with. - """ - - def __init__(self, base): - super(type(self), self).__init__() - assert base >= 27 - self.base = base - self.eos = 0 - - def make_io_set(self): - return [ - ([12, 24, 18, 0, 12, 5, 0], [12, 0]), - ([1, 2, 3, 0, 1, 2, 17, 14, 0], [1, 2, 0]), - ([15, 2, 1, 9, 2, 0, 15, 2, 1, 25, 8, 14, 0], [15, 2, 1, 0]), - ([14, 9, 7, 8, 6, 16, 0, 14, 9, 7, 8, 8, 6, 8, 26, 0], - [14, 9, 7, 8, 0]), - ([12, 4, 16, 22, 1, 17, 0, 12, 4, 16, 22, 1, 8, 10, 0], - [12, 4, 16, 22, 1, 0])] - - -class CountCharTask(BaseTask): - - def __init__(self): - super(type(self), self).__init__() - self.base = 27 - self.eos = 0 - self.char = 1 - self.input_type = misc.IOType.string - self.output_type = misc.IOType.integer - - def make_io_set(self): - return [ - ([10, 0], [0]), - ([1, 0], [1]), - ([1, 1, 0], [2]), - ([11, 1, 0], [1]), - ([1, 24, 0], [1]), - ([13, 6, 0], [0]), - ([9, 2, 7, 0], [0]), - ([1, 24, 11, 0], [1]), - ([19, 1, 1, 0], [2]), - ([1, 6, 1, 0], [2]), - ([22, 16, 17, 9, 0], [0]), - ([1, 1, 1, 19, 0], [3]), - ([1, 1, 1, 1, 0], [4]), - ([9, 4, 19, 11, 5, 0], [0]), - ([24, 11, 26, 1, 15, 0], [1]), - ([1, 1, 20, 1, 1, 0], [4]), - ([1, 1, 1, 1, 1, 0], [5])] - - -class CountCharTaskV2(BaseTask): - """Count char coding task (version 2). - - Code must output the number of occurances of character 'A' (value 1) in an - input string. - - Test cases are dynamically generated, allowing for the number of test cases - to be a hyperparameter. - """ - - def __init__(self, n, max_len): - super(type(self), self).__init__() - self.base = 27 - self.eos = 0 - self.char = 1 - self.other_chars = [c for c in xrange(self.base) - if c not in (self.eos, self.char)] - self.input_type = misc.IOType.string - self.output_type = misc.IOType.integer - self._io_pairs = self._make_io_examples(n, max_len) - - def _make_io_examples(self, n, max_len): - """Generate test cases for the task.""" - rand = random.Random(6849275409234) # Test cases are fixed, but varied. - io_examples = [] - io_examples.append(([10, 0], [0])) - io_examples.append(([1, 0], [1])) - io_examples.append(([1, 1, 0], [2])) - io_examples.append(([9, 4, 19, 11, 5, 0], [0])) - io_examples.append(([24, 11, 26, 1, 15, 0], [1])) - for _ in xrange(n - 5): - length = rand.randrange(2, max_len + 1) - num_chars = rand.randrange(0, max_len + 1) - input_seq = [self.char] * num_chars + [0] * (length - num_chars) - rand.shuffle(input_seq) - for i in xrange(len(input_seq)): - if not input_seq[i]: - input_seq[i] = self.other_chars[rand.randrange(len(self.other_chars))] - output_seq = [num_chars] - io_examples.append((input_seq, output_seq)) - return io_examples - - def make_io_set(self): - return copy.deepcopy(self._io_pairs) - - -class AddTask(BaseTask): - """Addition coding task. - - Code needs to read in two integers and output their sum mod the BF base, - followed by a terminating 0. - """ - - def __init__(self, n=16): - super(type(self), self).__init__() - self.base = 256 - self.input_type = misc.IOType.integer - self.output_type = misc.IOType.integer - self._io_pairs = self._make_io_examples(n) - - def _make_io_examples(self, n): - """Generate test cases for the task.""" - rand = random.Random(6849275409234) # Test cases are fixed, but varied. - io_examples = [ - ([4, 0], [4, 0]), - ([0, 5], [5, 0]), - ([1, 2], [3, 0]), - ([67, 21], [88, 0]), - ([55, 56], [111, 0]), - ([128, 33], [161, 0]), - ([221, 251], [216, 0]), - ([130, 127], [1, 0]), - ([255, 1], [0, 0])] - extra_examples = max(n - len(io_examples), 0) - for _ in xrange(extra_examples): - a = rand.randrange(256) - b = rand.randrange(256) - input_seq = [a, b] - output_seq = [(a + b) % 256, 0] - io_examples.append((input_seq, output_seq)) - return io_examples - - def make_io_set(self): - return copy.deepcopy(self._io_pairs) - - -class BooleanLogicTask(BaseTask): - """Boolean logic (truth table) coding task. - - Code needs to memorize a boolean truth table. Specifically, it must encode a - mapping from triple of bools to a single bool. - """ - - def __init__(self): - super(type(self), self).__init__() - self.base = 2 - self.input_type = misc.IOType.boolean - self.output_type = misc.IOType.boolean - # X(~Z) + (~Y)(~Z) + (~X)YZ - self._truth_fn = ( - lambda x, y, z: # pylint: disable=g-long-lambda - (x and not z) or (not y and not z) or (not x and y and z)) - self._test_cases = [ - ([x, y, z], [int(self._truth_fn(x, y, z))]) - for x, y, z in itertools.product(range(2), range(2), range(2))] - - def make_io_set(self): - return copy.deepcopy(self._test_cases) - - -# ------------------------------------------------------------------------------ -# The following tasks are generated from known BF solutions. This guarantees -# that each task can be solved within the maximum code length, and maximum -# execution steps. -# ------------------------------------------------------------------------------ - - -def default_input_fn_factory(min_length=1, max_length=6, base=256): - def _input_gen(rand): - l = rand.randrange(min_length, max_length + 1) - return [rand.randrange(base) for _ in xrange(l)] - return _input_gen - - -class KnownCodeBaseTask(BaseTask): - """These tasks generate their test cases from a known BF solution. - - This ensures that each task has a solution which is under the max character - length, and that it solves the test cases under the max number of execution - steps. - """ - - def __init__(self, code_solution, make_input_fn, n=100, base=256, - max_steps=5000, seed=6849275409234): - super(KnownCodeBaseTask, self).__init__() - # Make sure known solution is less than the code length used in experiments. - assert len(code_solution) < 100 - self.code_solution = code_solution - self.make_input_fn = make_input_fn - self.n = n - self.base = base - self.max_steps = max_steps - self.seed = seed - self._test_cases = list(self._test_case_generator(code_solution)) - - def _test_case_generator(self, code_solution): - rand = random.Random(self.seed) - for _ in xrange(self.n): - input_case = self.make_input_fn(rand) - result = bf.evaluate( - code_solution, input_buffer=input_case, max_steps=self.max_steps, - base=self.base, require_correct_syntax=False) - if not result.success: - raise RuntimeError( - 'Program must succeed. Failed on input: %s' % input_case) - yield input_case, result.output - - def make_io_set(self): - return copy.deepcopy(self._test_cases) - - -class EchoTwiceTask(KnownCodeBaseTask): - """Echo twice.""" - - def __init__(self, **kwargs): - super(type(self), self).__init__( - '>,.[>,.]<[<]>[.>].', - default_input_fn_factory(), - **kwargs) - - -class EchoThriceTask(KnownCodeBaseTask): - """Echo three times.""" - - def __init__(self, **kwargs): - super(type(self), self).__init__( - '>,.[>,.]<[<]>[.>].<[<]>[.>].', - default_input_fn_factory(), - **kwargs) - - -class CopyReverseTask(KnownCodeBaseTask): - """Echo forwards, backwards, and then forwards again.""" - - def __init__(self, **kwargs): - super(type(self), self).__init__( - '>,.[>,.]<[.<].>[.>].', - default_input_fn_factory(), - **kwargs) - - -class EchoZeroCascadeTask(KnownCodeBaseTask): - """Print k-th char with k zeros inbetween (1-indexed).""" - - def __init__(self, **kwargs): - super(type(self), self).__init__( - ',[.>[->+>.<<]>+[-<+>]<<,]', - default_input_fn_factory(), - **kwargs) - - -class EchoCascadeTask(KnownCodeBaseTask): - """Print k-th char k times (1-indexed).""" - - def __init__(self, **kwargs): - super(type(self), self).__init__( - ',>>+<<[>>[-<+>]<[->+<<.>]>+<<,].', - default_input_fn_factory(base=20), - **kwargs) - - -class ShiftLeftTask(KnownCodeBaseTask): - """Circulate shift input left.""" - - def __init__(self, **kwargs): - super(type(self), self).__init__( - ',>,[.,]<.,.', - default_input_fn_factory(), - **kwargs) - - -class ShiftRightTask(KnownCodeBaseTask): - """Circular shift input right.""" - - def __init__(self, **kwargs): - super(type(self), self).__init__( - '>,[>,]<.[-]<[<]>[.>].', - default_input_fn_factory(), - **kwargs) - - -class RiffleTask(KnownCodeBaseTask): - """Shuffle like a deck of cards. - - For input of length N, output values in the following index order: - N-1, 0, N-2, 1, N-3, 2, ... - """ - - def __init__(self, **kwargs): - super(type(self), self).__init__( - '>,[>,]<[.[-]<[<]>.[-]>[>]<]', - default_input_fn_factory(base=20, max_length=8), - **kwargs) - - -class UnriffleTask(KnownCodeBaseTask): - """Inverse of riffle.""" - - def __init__(self, **kwargs): - super(type(self), self).__init__( - '>,[>,[.[-]],]<[.<].', - default_input_fn_factory(base=20, max_length=8), - **kwargs) - - -class MiddleCharTask(KnownCodeBaseTask): - """Print middle char if length is odd, or 0 if even.""" - - def __init__(self, **kwargs): - super(type(self), self).__init__( - '>,[>,]<<[[>]<[,<[<]>,>[>]][>]<<]>.', - default_input_fn_factory(max_length=10), - **kwargs) - - -class RemoveLastTask(KnownCodeBaseTask): - """Remove last character.""" - - def __init__(self, **kwargs): - super(type(self), self).__init__( - ',>,[[<.[-]>[-<+>]],].', - default_input_fn_factory(base=20), - **kwargs) - - -class RemoveLastTwoTask(KnownCodeBaseTask): - """Remove last two characters.""" - - def __init__(self, **kwargs): - super(type(self), self).__init__( - ',>,>,[[<<.[-]>[-<+>]>[-<+>]],].', - default_input_fn_factory(base=10), - **kwargs) - - -class EchoAlternatingTask(KnownCodeBaseTask): - # Print even numbered chars first (0-indexed), then odd numbered chars - - def __init__(self, **kwargs): - super(type(self), self).__init__( - '>,[.,>,]<<[<]>[.>].', - default_input_fn_factory(base=20, max_length=8), - **kwargs) - - -class EchoHalfTask(KnownCodeBaseTask): - """Echo only first half of the input (round down when odd lengthed).""" - - def __init__(self, **kwargs): - super(type(self), self).__init__( - '>>+>,[[<]>+[>],]<[<]>-[-[-<<+>]<[>]>]<<[->+<]>[[>]>.,<+[<]>-].', - default_input_fn_factory(base=20, max_length=9), - **kwargs) - - -class LengthTask(KnownCodeBaseTask): - """Print length of the input sequence.""" - - def __init__(self, **kwargs): - super(type(self), self).__init__( - '>+>,[[<]>+[>],]<[<]>-.', - default_input_fn_factory(max_length=14), - **kwargs) - - -class EchoSecondSequenceTask(KnownCodeBaseTask): - """Echo second sequence. Sequences are separated by 0.""" - - def __init__(self, **kwargs): - def echo_second_gen(rand): - l = rand.randrange(1, 6) - x = [rand.randrange(256) for _ in xrange(l)] - l = rand.randrange(1, 6) - y = [rand.randrange(256) for _ in xrange(l)] - return x + [0] + y + [0] - super(type(self), self).__init__( - ',[,],[.,].', - echo_second_gen, - **kwargs) - - -class EchoNthSequenceTask(KnownCodeBaseTask): - """Echo n-th sequence (1-indexed). Sequences are separated by 0.""" - - def __init__(self, **kwargs): - def echo_nth_gen(rand): - k = rand.randrange(1, 7) - n = rand.randrange(1, k + 1) - x = [] - for _ in xrange(k): - l = rand.randrange(0, 4) - x += [rand.randrange(256) for _ in xrange(l)] + [0] - return [n] + x - super(type(self), self).__init__( - ',-[->,[,]<],[.,].', - echo_nth_gen, - **kwargs) - - -class SubstringTask(KnownCodeBaseTask): - """Echo substring. - - First two inputs are i and l, where i is the starting index (0-indexed) - and l is the length of the substring. - """ - - def __init__(self, **kwargs): - def substring_gen(rand): - l = rand.randrange(2, 16) - i, j = sorted([rand.randrange(l), rand.randrange(l)]) - n = j - i - x = [rand.randrange(256) for _ in xrange(l)] + [0] - return [i, n] + x - super(type(self), self).__init__( - '>,<,>[->,<]>,<<[->>.,<<]', - substring_gen, - **kwargs) - - -class Divide2Task(KnownCodeBaseTask): - """Divide by 2 (integer floor division).""" - - def __init__(self, **kwargs): - def int_input_gen(rand): - return [rand.randrange(256)] - super(type(self), self).__init__( - ',[-[->>+<]>[<]<]>>.', - int_input_gen, - **kwargs) - - -class DedupTask(KnownCodeBaseTask): - """Deduplicate adjacent duplicate chars.""" - - def __init__(self, **kwargs): - def dedup_input_gen(rand): - np_random = np.random.RandomState(rand.randrange(2147483647)) - num_unique = rand.randrange(1, 5) - unique = np_random.choice(6, num_unique, replace=False) + 1 - return [v for v in unique for _ in xrange(rand.randrange(1, 5))] + [0] - super(type(self), self).__init__( - '>>,.[[-<+<+>>],[-<->]<[[-<->]<.>]<[->>+<<]>>]', - dedup_input_gen, - **kwargs) - - -# ============================================================================== -# Extra tasks. -# ============================================================================== - - -class PrintIntTask(BaseTask): - """Print integer coding task. - - Code needs to output a fixed single value (given as a hyperparameter to the - task constructor). Program input is ignored. - """ - - def __init__(self, base, fixed_string): - super(type(self), self).__init__() - self.base = base - self.eos = 0 - self.fixed_string = fixed_string - self.input_type = misc.IOType.integer - self.output_type = misc.IOType.integer - - def make_io_set(self): - return [(list(), list(self.fixed_string))] - - -class EchoTask(BaseTask): - """Echo string coding task. - - Code needs to pipe input to putput (without any modifications). - """ - - def __init__(self, base, min_length=1, max_length=5): - super(type(self), self).__init__() - self.base = base # base includes EOS - self.eos = 0 - self.min_length = min_length - self.max_length = max_length - self._io_pairs = self._make_io_examples(25) - - def _make_io_examples(self, n): - # Test cases are fixed, but varied. - np_random = np.random.RandomState(1234567890) - io_pairs = [] - for _ in xrange(n): - length = np_random.randint(self.min_length, self.max_length + 1) - input_seq = np_random.randint(1, self.base, length).tolist() + [self.eos] - output_seq = list(input_seq) - io_pairs.append((input_seq, output_seq)) - return io_pairs - - def make_io_set(self): - return copy.deepcopy(self._io_pairs) - - -class JudgeRouteCircleTask(BaseTask): - """Judge route circle coding task. - - Code needs to determine if the given route makes a closed loop. - Encoding: U = 1, R = 2, D = 3, L = 4. - - Based on - https://leetcode.com/problems/judge-route-circle/description/ - """ - base = 256 - input_type = misc.IOType.integer - output_type = misc.IOType.integer - - def __init__(self, n, max_len=12): - super(type(self), self).__init__() - self.eos = 0 - self._io_pairs = self._make_io_examples(n, max_len) - self.input_type = misc.IOType.integer - self.output_type = misc.IOType.integer - - def _solve(self, input_seq): - assert input_seq[-1] == 0 - pos = [0, 0] # (x, y) - for move in input_seq[:-1]: - assert 0 < move <= 4 - if move & 1 == 0: # Left or Right. - pos[0] += 3 - move # Add or subtract 1. - else: - pos[1] += 2 - move # Add or subtract 1. - return [int(not pos[0] and not pos[1])] - - def _make_io_examples(self, n, max_len): - """Generate test cases for the task.""" - rand = random.Random(6849275409234) # Test cases are fixed, but varied. - io_examples = [] - io_examples.append(([0], [1])) - io_examples.append(([4, 2, 0], [1])) - io_examples.append(([2, 4, 0], [1])) - io_examples.append(([3, 1, 0], [1])) - io_examples.append(([1, 3, 0], [1])) - io_examples.append(([1, 0], [0])) - io_examples.append(([2, 0], [0])) - io_examples.append(([3, 0], [0])) - io_examples.append(([4, 0], [0])) - for _ in xrange(n): - is_true = rand.randrange(2) - length = rand.randrange(1, max_len + 1) - if is_true: - # Make a true case. - length = (length >> 1) << 1 # Make even. - partition = (rand.randrange(length + 1) >> 1) << 1 - a = partition >> 1 - b = (length - partition) >> 1 - counts = {1: a, 2: b, 3: a, 4: b} - else: - # Make a false case. - partitions = ( - [0] - + sorted([rand.randrange(length + 1) for _ in range(3)]) - + [length]) - counts = {n: partitions[n] - partitions[n - 1] for n in range(1, 5)} - if counts[1] == counts[3] and counts[2] == counts[4]: - # By chance we sampled a true case. Make it false by exchanging - # one count between even and odd pairs. - base = 1 + 2 * rand.randrange(2) - a, b = (base, base + 1) if rand.randrange(2) else (base + 1, base) - if counts[a] == length or counts[b] == 0: - # If counts are at their extreme values, then swap who gets - # incremented and decremented. - a, b = b, a - counts[a] += 1 - counts[b] -= 1 - assert counts[a] <= length and counts[b] >= 0 - assert sum(counts.values()) == length - input_seq = [n for n in xrange(1, 5) for _ in xrange(counts[n])] - rand.shuffle(input_seq) - input_seq += [0] - output_seq = self._solve(input_seq) - assert output_seq[0] == is_true - io_examples.append((input_seq, output_seq)) - return io_examples - - def make_io_set(self): - return copy.deepcopy(self._io_pairs) - - -class MultiplyTask(BaseTask): - """Multiply coding task. - - Code needs to multiple two ints. - - Solution: - http://robl.co/brief-look-at-brainfuck/ - ,>,><<[->[->+>+<<]>>[-<<+>>]<<<]>>. - """ - base = 512 - input_type = misc.IOType.integer - output_type = misc.IOType.integer - - def __init__(self, n): - super(type(self), self).__init__() - self.eos = 0 - self._io_pairs = self._make_io_examples(n) - self.input_type = misc.IOType.integer - self.output_type = misc.IOType.integer - - def _factors(self, n): - return set(i for i in range(1, int(n**0.5) + 1) if n % i == 0) - - def _make_io_examples(self, n): - """Generate test cases for the task.""" - rand = random.Random(6849275409234) # Test cases are fixed, but varied. - io_examples = [] - for _ in xrange(n): - n = rand.randrange(self.base) - if n == 0: - a, b = 0, rand.randrange(self.base) - else: - f = list(self._factors(n)) - a = f[rand.randrange(len(f))] - b = n // a - if rand.randrange(2): - a, b = b, a - io_examples.append(([a, b], [n])) - return io_examples - - def make_io_set(self): - return copy.deepcopy(self._io_pairs) - - -class DivModTask(BaseTask): - """Divmod coding task. - - Code needs to take the quotient and remainder of two ints. - - Solution: - http://robl.co/brief-look-at-brainfuck/ - ,>,><<[>[->+>+<<]>[-<<-[>]>>>[<[-<->]<[>]>>[[-]>>+<]>-<]<<]>>>+<<[-<<+>>]<<<]> - >>>>[-<<<<<+>>>>>]<<<<<.>.> - """ - base = 512 - input_type = misc.IOType.integer - output_type = misc.IOType.integer - - def __init__(self, n): - super(type(self), self).__init__() - self.eos = 0 - self._io_pairs = self._make_io_examples(n) - self.input_type = misc.IOType.integer - self.output_type = misc.IOType.integer - - def _make_io_examples(self, n): - rand = random.Random(6849275409234) # Test cases are fixed, but varied. - io_examples = [] - for _ in xrange(n): - n = rand.randrange(0, self.base) - k = rand.randrange(1, self.base) # Divisor cannot be 0. - io_examples.append(([n, k], list(divmod(n, k)))) - return io_examples - - def make_io_set(self): - return copy.deepcopy(self._io_pairs) - - -class FibonacciTask(BaseTask): - - def __init__(self): - super(type(self), self).__init__() - self.base = 256 - self.input_type = misc.IOType.integer - self.output_type = misc.IOType.integer - - def make_io_set(self): - return [ - ([0], [0, 1]), - ([1], [1, 1]), - ([2], [1, 2]), - ([3], [2, 3]), - ([4], [3, 5]), - ([5], [5, 8]), - ([6], [8, 13]), - ([7], [13, 21]), - ([8], [21, 34]), - ([9], [34, 55]), - ([10], [55, 89]), - ([11], [89, 144]), - ([12], [144, 233]), - ([13], [233, 121])] - - -class FindSubStrTask(BaseTask): - """Find sub-string coding task. - - Code needs to output a bool: True if the input string contains a hard-coded - substring, 'AB' (values [1, 2]). - """ - - def __init__(self, base): - super(type(self), self).__init__() - assert base >= 27 - self.base = base - self.eos = 0 - self.find_str = [1, 2] - self.input_type = misc.IOType.string - self.output_type = misc.IOType.boolean - - def make_io_set(self): - return [ - ([1, 1, 23, 0], [0]), - ([21, 3, 2, 0], [0]), - ([2, 1, 19, 0], [0]), - ([2, 24, 15, 3, 0], [0]), - ([24, 6, 10, 16, 4, 0], [0]), - ([1, 2, 12, 0], [1]), - ([7, 1, 2, 0], [1]), - ([1, 2, 11, 3, 0], [1]), - ([1, 1, 2, 18, 0], [1]), - ([7, 25, 1, 2, 0], [1]), - ([3, 1, 2, 11, 8, 0], [1]), - ([15, 16, 20, 1, 2, 0], [1])] - - -class SortFixedTask(BaseTask): - """Sort list coding task. - - Code needs to output a sorted input list. The task consists of lists of the - same length L, where L is provided to this task's constructor as a - hyperparameter. - """ - - def __init__(self, base, length=3): - super(type(self), self).__init__() - assert base >= 27 - self.base = base - self.eos = 0 - self.length = length - assert length == 3 # More lengths will be supported. - - def make_io_set(self): - if self.length == 3: - return [ - ([1, 20, 6], [1, 6, 20]), - ([13, 6, 7], [6, 7, 13]), - ([24, 2, 23], [2, 23, 24]), - ([16, 12, 3], [3, 12, 16]), - ([11, 24, 4], [4, 11, 24]), - ([10, 1, 19], [1, 10, 19])] - - -class SortFixedTaskV2(BaseTask): - """Sort list coding task (version 2). - - Code needs to output a sorted input list. The task consists of lists of the - same length L, where L is provided to this task's constructor as a - hyperparameter. - - Test cases are dynamically generated, allowing for the number of test cases - to be a hyperparameter. - """ - - def __init__(self, base, n, length=3): - super(type(self), self).__init__() - assert base >= 27 - self.base = base - self.eos = 0 - self._io_pairs = self._make_io_examples(n, length) - self.input_type = misc.IOType.integer - self.output_type = misc.IOType.integer - - def _make_io_examples(self, n, length): - rand = random.Random(6849275409234) # Test cases are fixed, but varied. - io_examples = [] - for _ in xrange(n): - input_seq = [rand.randrange(1, self.base) for _ in xrange(length)] - output_seq = sorted(input_seq) - io_examples.append((input_seq, output_seq)) - return io_examples - - def make_io_set(self): - return copy.deepcopy(self._io_pairs) - - -class RemoveTargetCharTask(KnownCodeBaseTask): - """Remove target character from string, where first input is the target. - - Target can appear multiple times. - """ - - def __init__(self, **kwargs): - def randrange_hole(rand, a, hole, b): - x = rand.randrange(a, b - 1) - if x >= hole: - return x + 1 - return x - def remove_target_char_gen(rand): - char = rand.randrange(1, 6) - l = rand.randrange(1, 8) - input_seq = [randrange_hole(rand, 1, char, 256) for _ in xrange(l)] - idx = range(l) - rand.shuffle(idx) - num_targets = rand.randrange(0, l) - for pos in idx[:num_targets]: - input_seq[pos] = char - return [char] + input_seq + [0] - super(type(self), self).__init__( - ',>>>,[<<<[->+>+<<]>>[->->+<<]>[>[-<+>]<.[-]]>[-]<<<[-<+>]>>,].', - remove_target_char_gen, - **kwargs) - - -class ListIndexTask(KnownCodeBaseTask): - """Echo i-th value in the given list.""" - - def __init__(self, **kwargs): - def array_index_gen(rand): - l = rand.randrange(1, 16) - i = rand.randrange(l) - return [i] + [rand.randrange(256) for _ in xrange(l)] + [0] - super(type(self), self).__init__( - ',[->,<]>,.', - array_index_gen, - **kwargs) - - -# ============================================================================== -# Tasks based on primaryobjects paper. -# ============================================================================== - - -def string2tokens(string): - return [ord(c) for c in string] - - -def stringlist2tokens(strings): - return [string2tokens(string) for string in strings] - - -def string2tokens_b27(string): - return [ord(c.lower()) - ord('a') + 1 for c in string] - - -def stringlist2tokens_b27(strings): - return [string2tokens_b27(string) for string in strings] - - -class BottlesOfBeerTask(BaseTask): - """Bottles of beer coding task. - - This is a counting task. Code needs to read in an int N and then output - every int from N to 0, each separated by a 0. - """ - base = 256 - input_type = misc.IOType.integer - output_type = misc.IOType.integer - - def make_io_set(self): - return [ - ([1], [1, 0]), - ([2], [2, 0, 1, 0]), - ([3], [3, 0, 2, 0, 1, 0]), - ([4], [4, 0, 3, 0, 2, 0, 1, 0]), - ([5], [5, 0, 4, 0, 3, 0, 2, 0, 1, 0]), - ([6], [6, 0, 5, 0, 4, 0, 3, 0, 2, 0, 1, 0])] - - -class SplitTask(BaseTask): - """Split coding task. - - Code needs to pipe input strings to output, but insert a 0 after every 3 - characters. This is in essence splitting the string into intervals of length - 3. - """ - base = 28 - input_type = misc.IOType.string - output_type = misc.IOType.integer - - def _splicer(self, lst, insert, interval=3): - for i, item in enumerate(lst): - yield item - if (i + 1) % interval == 0 and i < len(lst) - 1: - yield insert - - def __init__(self): - super(type(self), self).__init__() - inputs = stringlist2tokens_b27( - ['hello', 'orange', 'spaghetti', 'wins', 'one']) - targets = [list(self._splicer(i, 27)) for i in inputs] - self._test_cases = list(zip(inputs, targets)) - - def make_io_set(self): - return copy.deepcopy(self._test_cases) - - -class TrimLeftTask(BaseTask): - """Trim left coding task. - - Code needs to pipe input strings to output, but remove everything before the - first quotation char ("). - """ - base = 256 - input_type = misc.IOType.integer - output_type = misc.IOType.integer - - def __init__(self): - super(type(self), self).__init__() - inputs = stringlist2tokens( - ['a "inside" over', 'xy "test" rights', 'ca6 "foresting" service', - 'abc"def"yz.', 'A"B"']) - targets = stringlist2tokens( - ['"inside" over', '"test" rights', '"foresting" service', '"def"yz.', - '"B"']) - self._test_cases = list(zip(inputs, targets)) - - def make_io_set(self): - return copy.deepcopy(self._test_cases) diff --git a/research/brain_coder/single_task/code_tasks_test.py b/research/brain_coder/single_task/code_tasks_test.py deleted file mode 100644 index d3260a1a5..000000000 --- a/research/brain_coder/single_task/code_tasks_test.py +++ /dev/null @@ -1,108 +0,0 @@ -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -"""Tests for code_tasks.""" - -import numpy as np -import tensorflow as tf - -from single_task import code_tasks # brain coder -from single_task import defaults # brain coder - - -def pad(string, pad_length, pad_char): - return string + pad_char * (pad_length - len(string)) - - -class CodeTasksTest(tf.test.TestCase): - - def assertClose(self, a, b): - self.assertTrue( - np.isclose(a, b, atol=1e-4), - 'Expecting approximately equal values. Got: %s, %s' % (a, b)) - - def testMultiIOTaskManager(self): - maxlen = 100 - padchr = '[' - task = code_tasks.make_paper_task( - 'print', timestep_limit=maxlen, do_code_simplification=False) - reward_fns = task.rl_batch(1) - r = reward_fns[0] - self.assertClose( - r(pad('++++++++.---.+++++++...', maxlen, padchr)).episode_rewards[-1], - 0.2444) - self.assertClose( - r(pad('++++++++.---.+++++++..+++.', - maxlen, padchr)).episode_rewards[-1], - 1.0) - - task = code_tasks.make_paper_task( - 'print', timestep_limit=maxlen, do_code_simplification=True) - reward_fns = task.rl_batch(1) - r = reward_fns[0] - self.assertClose( - r('++++++++.---.+++++++...').episode_rewards[-1], - 0.2444) - self.assertClose( - r('++++++++.---.+++++++..+++.').episode_rewards[-1], - 0.935) - self.assertClose( - r(pad('++++++++.---.+++++++..+++.', - maxlen, padchr)).episode_rewards[-1], - 0.75) - - task = code_tasks.make_paper_task( - 'reverse', timestep_limit=maxlen, do_code_simplification=False) - reward_fns = task.rl_batch(1) - r = reward_fns[0] - self.assertClose( - r(pad('>,>,>,.<.<.<.', maxlen, padchr)).episode_rewards[-1], - 0.1345) - self.assertClose( - r(pad(',[>,]+[,<.]', maxlen, padchr)).episode_rewards[-1], - 1.0) - - task = code_tasks.make_paper_task( - 'reverse', timestep_limit=maxlen, do_code_simplification=True) - reward_fns = task.rl_batch(1) - r = reward_fns[0] - self.assertClose(r('>,>,>,.<.<.<.').episode_rewards[-1], 0.1324) - self.assertClose(r(',[>,]+[,<.]').episode_rewards[-1], 0.9725) - self.assertClose( - r(pad(',[>,]+[,<.]', maxlen, padchr)).episode_rewards[-1], - 0.75) - - def testMakeTask(self): - maxlen = 100 - padchr = '[' - config = defaults.default_config_with_updates( - 'env=c(config_for_iclr=False,fixed_string=[8,5,12,12,15])') - task = code_tasks.make_task(config.env, 'print', timestep_limit=maxlen) - reward_fns = task.rl_batch(1) - r = reward_fns[0] - self.assertClose( - r('++++++++.---.+++++++...').episode_rewards[-1], - 0.2444) - self.assertClose( - r('++++++++.---.+++++++..+++.').episode_rewards[-1], - 0.935) - self.assertClose( - r(pad('++++++++.---.+++++++..+++.', - maxlen, padchr)).episode_rewards[-1], - 0.75) - - def testKnownCodeBaseTask(self): - maxlen = 100 - padchr = '[' - task = code_tasks.make_paper_task( - 'shift-left', timestep_limit=maxlen, do_code_simplification=False) - reward_fns = task.rl_batch(1) - r = reward_fns[0] - self.assertClose( - r(pad(',>,[.,]<.,.', maxlen, padchr)).episode_rewards[-1], - 1.0) - - -if __name__ == '__main__': - tf.test.main() diff --git a/research/brain_coder/single_task/data.py b/research/brain_coder/single_task/data.py deleted file mode 100644 index 8f34464f5..000000000 --- a/research/brain_coder/single_task/data.py +++ /dev/null @@ -1,89 +0,0 @@ -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -"""Manage data for pretraining and RL tasks.""" - -import ast -from collections import namedtuple - -from absl import logging - -from single_task import code_tasks # brain coder - - -RLBatch = namedtuple('RLBatch', ['reward_fns', 'batch_size', 'good_reward']) - - -class DataManager(object): - """Interface between environment and model.""" - - def __init__(self, global_config, run_number=None, - do_code_simplification=False): - """Constructs a DataManager. - - Args: - global_config: A config_lib.Config instance containing all config. See - config in defaults.py. - run_number: Which run this is (of the same experiment). This should be set - when a task cycle is defined in the config. A task cycle is a list of - tasks to cycle through repeatedly, and the selected task is a function - of the run number, i.e. 0-th run, 1-st run, 2-nd run, etc... - This can be None if only a single task is set in the config. - do_code_simplification: When global_config.env.config_for_iclr is True, - use this option to create code simplification (code golf) tasks, vs - fixed length coding tasks. If True, a task with code simplification - reward will be constructed. - - Raises: - ValueError: If global_config.env.task and global_config.env.task_cycle - are both set, or both not set. Only one should be given. - ValueError: If global_config.env.task_cycle is set but run_number is None. - """ - env_config = global_config.env - self.batch_size = global_config.batch_size - - if env_config.task_cycle: - if env_config.task: - raise ValueError('Do not set both `task` and `task_cycle`.') - if run_number is None: - raise ValueError('Do not use task_cycle for single-run experiment.') - index = run_number % len(env_config.task_cycle) - self.task_name = env_config.task_cycle[index] - logging.info('run_number: %d, task_cycle index: %d', run_number, index) - logging.info('task_cycle: %s', env_config.task_cycle) - elif env_config.task: - self.task_name = env_config.task - else: - raise ValueError('Either `task` or `task_cycle` must be set.') - logging.info('Task for this run: "%s"', self.task_name) - - logging.info('config_for_iclr=True; do_code_simplification=%s', - do_code_simplification) - self.rl_task = code_tasks.make_task( - task_name=self.task_name, - override_kwargs=ast.literal_eval(env_config.task_kwargs), - max_code_length=global_config.timestep_limit, - require_correct_syntax=env_config.correct_syntax, - do_code_simplification=do_code_simplification, - correct_bonus=env_config.task_manager_config.correct_bonus, - code_length_bonus=env_config.task_manager_config.code_length_bonus) - - def sample_rl_batch(self): - """Create reward functions from the current task. - - Returns: - RLBatch namedtuple instance, which holds functions and information for - a minibatch of episodes. - * reward_fns: A reward function for each episode. Maps code string to - reward. - * batch_size: Number of episodes in this minibatch. - * good_reward: Estimated threshold of rewards which indicate the algorithm - is starting to solve the task. This is a heuristic that tries to - reduce the amount of stuff written to disk. - """ - reward_fns = self.rl_task.rl_batch(self.batch_size) - return RLBatch( - reward_fns=reward_fns, - batch_size=self.batch_size, - good_reward=self.rl_task.good_reward) diff --git a/research/brain_coder/single_task/defaults.py b/research/brain_coder/single_task/defaults.py deleted file mode 100644 index d9bd8b942..000000000 --- a/research/brain_coder/single_task/defaults.py +++ /dev/null @@ -1,82 +0,0 @@ -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -"""Default configuration for agent and environment.""" - -from absl import logging - -from common import config_lib # brain coder - - -def default_config(): - return config_lib.Config( - agent=config_lib.OneOf( - [config_lib.Config( - algorithm='pg', - policy_lstm_sizes=[35,35], - # Set value_lstm_sizes to None to share weights with policy. - value_lstm_sizes=[35,35], - obs_embedding_size=10, - grad_clip_threshold=10.0, - param_init_factor=1.0, - lr=5e-5, - pi_loss_hparam=1.0, - vf_loss_hparam=0.5, - entropy_beta=1e-2, - regularizer=0.0, - softmax_tr=1.0, # Reciprocal temperature. - optimizer='rmsprop', # 'adam', 'sgd', 'rmsprop' - topk=0, # Top-k unique codes will be stored. - topk_loss_hparam=0.0, # off policy loss multiplier. - # Uniformly sample this many episodes from topk buffer per batch. - # If topk is 0, this has no effect. - topk_batch_size=1, - # Exponential moving average baseline for REINFORCE. - # If zero, A2C is used. - # If non-zero, should be close to 1, like .99, .999, etc. - ema_baseline_decay=0.99, - # Whether agent can emit EOS token. If true, agent can emit EOS - # token which ends the episode early (ends the sequence). - # If false, agent must emit tokens until the timestep limit is - # reached. e.g. True means variable length code, False means fixed - # length code. - # WARNING: Making this false slows things down. - eos_token=False, - replay_temperature=1.0, - # Replay probability. 1 = always replay, 0 = always on policy. - alpha=0.0, - # Whether to normalize importance weights in each minibatch. - iw_normalize=True), - config_lib.Config( - algorithm='ga', - crossover_rate=0.99, - mutation_rate=0.086), - config_lib.Config( - algorithm='rand')], - algorithm='pg', - ), - env=config_lib.Config( - # If True, task-specific settings are not needed. - task='', # 'print', 'echo', 'reverse', 'remove', ... - task_cycle=[], # If non-empty, reptitions will cycle through tasks. - task_kwargs='{}', # Python dict literal. - task_manager_config=config_lib.Config( - # Reward recieved per test case. These bonuses will be scaled - # based on how many test cases there are. - correct_bonus=2.0, # Bonus for code getting correct answer. - code_length_bonus=1.0), # Maximum bonus for short code. - correct_syntax=False, - ), - batch_size=64, - timestep_limit=32) - - -def default_config_with_updates(config_string, do_logging=True): - if do_logging: - logging.info('Config string: "%s"', config_string) - config = default_config() - config.strict_update(config_lib.Config.parse(config_string)) - if do_logging: - logging.info('Config:\n%s', config.pretty_str()) - return config diff --git a/research/brain_coder/single_task/ga_lib.py b/research/brain_coder/single_task/ga_lib.py deleted file mode 100644 index fadb96482..000000000 --- a/research/brain_coder/single_task/ga_lib.py +++ /dev/null @@ -1,472 +0,0 @@ -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -"""Genetic algorithm for BF tasks. - -Inspired by https://github.com/primaryobjects/AI-Programmer. -GA function code borrowed from https://github.com/DEAP/deap. -""" - -from collections import namedtuple -import random - -from absl import flags -from absl import logging -import numpy as np -from six.moves import xrange - -from common import bf # brain coder -from common import utils # brain coder -from single_task import misc # brain coder - -FLAGS = flags.FLAGS - -# Saving reward of previous programs saves computation if a program appears -# again. -USE_REWARD_CACHE = True # Disable this if GA is using up too much memory. -GENES = bf.CHARS -MAX_PROGRAM_STEPS = 500 -STEP_BONUS = True - -ALPHANUM_CHARS = ( - ['_'] + - [chr(ord('a') + i_) for i_ in range(26)] + - [chr(ord('A') + i_) for i_ in range(26)] + - [chr(ord('0') + i_) for i_ in range(10)]) - -Result = namedtuple( - 'Result', - ['reward', 'inputs', 'code_outputs', 'target_outputs', 'type_in', - 'type_out', 'base', 'correct']) - - -class IOType(object): - string = 'string' - integer = 'integer' - - -class CustomType(object): - - def __init__(self, to_str_fn): - self.to_str_fn = to_str_fn - - def __call__(self, obj): - return self.to_str_fn(obj) - - -def tokens_list_repr(tokens, repr_type, base): - """Make human readable representation of program IO.""" - if isinstance(repr_type, CustomType): - return repr_type(tokens) - elif repr_type == IOType.string: - chars = ( - [ALPHANUM_CHARS[t] for t in tokens] if base < len(ALPHANUM_CHARS) - else [chr(t) for t in tokens]) - return ''.join(chars) - elif repr_type == IOType.integer: - return str(tokens) - raise ValueError('No such representation type "%s"', repr_type) - - -def io_repr(result): - """Make human readable representation of test cases.""" - inputs = ','.join( - tokens_list_repr(tokens, result.type_in, result.base) - for tokens in result.inputs) - code_outputs = ','.join( - tokens_list_repr(tokens, result.type_out, result.base) - for tokens in result.code_outputs) - target_outputs = ','.join( - tokens_list_repr(tokens, result.type_out, result.base) - for tokens in result.target_outputs) - return inputs, target_outputs, code_outputs - - -def make_task_eval_fn(task_manager): - """Returns a wrapper that converts an RL task into a GA task. - - Args: - task_manager: Is a task manager object from code_tasks.py - - Returns: - A function that takes as input a single list of a code chars, and outputs - a Result namedtuple instance containing the reward and information about - code execution. - """ - def to_data_list(single_or_tuple): - if isinstance(single_or_tuple, misc.IOTuple): - return list(single_or_tuple) - return [single_or_tuple] - - def to_ga_type(rl_type): - if rl_type == misc.IOType.string: - return IOType.string - return IOType.integer - - # Wrapper function. - def evalbf(bf_chars): - result = task_manager._score_code(''.join(bf_chars)) - reward = sum(result.episode_rewards) - correct = result.reason == 'correct' - return Result( - reward=reward, - inputs=to_data_list(result.input_case), - code_outputs=to_data_list(result.code_output), - target_outputs=to_data_list(result.correct_output), - type_in=to_ga_type(result.input_type), - type_out=to_ga_type(result.output_type), - correct=correct, - base=task_manager.task.base) - - return evalbf - - -def debug_str(individual, task_eval_fn): - res = task_eval_fn(individual) - input_str, target_output_str, code_output_str = io_repr(res) - return ( - ''.join(individual) + - ' | ' + input_str + - ' | ' + target_output_str + - ' | ' + code_output_str + - ' | ' + str(res.reward) + - ' | ' + str(res.correct)) - - -def mutate_single(code_tokens, mutation_rate): - """Mutate a single code string. - - Args: - code_tokens: A string/list/Individual of BF code chars. Must end with EOS - symbol '_'. - mutation_rate: Float between 0 and 1 which sets the probability of each char - being mutated. - - Returns: - An Individual instance containing the mutated code string. - - Raises: - ValueError: If `code_tokens` does not end with EOS symbol. - """ - if len(code_tokens) <= 1: - return code_tokens - if code_tokens[-1] == '_': - # Do this check to ensure that the code strings have not been corrupted. - raise ValueError('`code_tokens` must end with EOS symbol.') - else: - cs = Individual(code_tokens) - eos = [] - mutated = False - for pos in range(len(cs)): - if random.random() < mutation_rate: - mutated = True - new_char = GENES[random.randrange(len(GENES))] - x = random.random() - if x < 0.25 and pos != 0 and pos != len(cs) - 1: - # Insertion mutation. - if random.random() < 0.50: - # Shift up. - cs = cs[:pos] + [new_char] + cs[pos:-1] - else: - # Shift down. - cs = cs[1:pos] + [new_char] + cs[pos:] - elif x < 0.50: - # Deletion mutation. - if random.random() < 0.50: - # Shift down. - cs = cs[:pos] + cs[pos + 1:] + [new_char] - else: - # Shift up. - cs = [new_char] + cs[:pos] + cs[pos + 1:] - elif x < 0.75: - # Shift rotate mutation (position invariant). - if random.random() < 0.50: - # Shift down. - cs = cs[1:] + [cs[0]] - else: - # Shift up. - cs = [cs[-1]] + cs[:-1] - else: - # Replacement mutation. - cs = cs[:pos] + [new_char] + cs[pos + 1:] - assert len(cs) + len(eos) == len(code_tokens) - if mutated: - return Individual(cs + eos) - else: - return Individual(code_tokens) - - -def crossover(parent1, parent2): - """Performs crossover mating between two code strings. - - Crossover mating is where a random position is selected, and the chars - after that point are swapped. The resulting new code strings are returned. - - Args: - parent1: First code string. - parent2: Second code string. - - Returns: - A 2-tuple of children, i.e. the resulting code strings after swapping. - """ - max_parent, min_parent = ( - (parent1, parent2) if len(parent1) > len(parent2) - else (parent2, parent1)) - pos = random.randrange(len(max_parent)) - if pos >= len(min_parent): - child1 = max_parent[:pos] - child2 = min_parent + max_parent[pos:] - else: - child1 = max_parent[:pos] + min_parent[pos:] - child2 = min_parent[:pos] + max_parent[pos:] - return Individual(child1), Individual(child2) - - -def _make_even(n): - """Return largest even integer less than or equal to `n`.""" - return (n >> 1) << 1 - - -def mutate_and_crossover(population, mutation_rate, crossover_rate): - """Take a generational step over a population. - - Transforms population of parents into population of children (of the same - size) via crossover mating and then mutation on the resulting children. - - Args: - population: Parent population. A list of Individual objects. - mutation_rate: Probability of mutation. See `mutate_single`. - crossover_rate: Probability that two parents will mate. - - Returns: - Child population. A list of Individual objects. - """ - children = [None] * len(population) - for i in xrange(0, _make_even(len(population)), 2): - p1 = population[i] - p2 = population[i + 1] - if random.random() < crossover_rate: - p1, p2 = crossover(p1, p2) - c1 = mutate_single(p1, mutation_rate) - c2 = mutate_single(p2, mutation_rate) - children[i] = c1 - children[i + 1] = c2 - if children[-1] is None: - children[-1] = population[-1] - return children - - -def ga_loop(population, cxpb, mutpb, ngen, task_eval_fn, halloffame=None, - checkpoint_writer=None): - """A bare bones genetic algorithm. - - Similar to chapter 7 of Back, Fogel and Michalewicz, "Evolutionary - Computation 1 : Basic Algorithms and Operators", 2000. - - Args: - population: A list of individuals. - cxpb: The probability of mating two individuals. - mutpb: The probability of mutating a gene. - ngen: The number of generation. Unlimited if zero. - task_eval_fn: A python function which maps an Individual to a Result - namedtuple. - halloffame: (optional) a utils.MaxUniquePriorityQueue object that will be - used to aggregate the best individuals found during search. - checkpoint_writer: (optional) an object that can save and load populations. - Needs to have `write`, `load`, and `has_checkpoint` methods. Used to - periodically save progress. In event of a restart, the population will - be loaded from disk. - - Returns: - GaResult namedtuple instance. This contains information about the GA run, - including the resulting population, best reward (fitness) obtained, and - the best code string found. - """ - - has_checkpoint = False - if checkpoint_writer and checkpoint_writer.has_checkpoint(): - try: - gen, population, halloffame = checkpoint_writer.load() - except EOFError: # Data was corrupted. Start over. - pass - else: - has_checkpoint = True - logging.info( - 'Loaded population from checkpoint. Starting at generation %d', gen) - - # Evaluate the individuals with an invalid fitness - invalid_ind = [ind for ind in population if not ind.fitness.valid] - for ind in invalid_ind: - ind.fitness.values = task_eval_fn(ind).reward, - for _, ind in halloffame.iter_in_order(): - ind.fitness.values = task_eval_fn(ind).reward, - - if not has_checkpoint: - # Evaluate the individuals with an invalid fitness - invalid_ind = [ind for ind in population if not ind.fitness.valid] - for ind in invalid_ind: - ind.fitness.values = task_eval_fn(ind).reward, - - if halloffame is not None: - for ind in population: - halloffame.push(ind.fitness.values, tuple(ind), ind) - - logging.info('Initialized new population.') - - gen = 1 - - pop_size = len(population) - program_reward_cache = {} if USE_REWARD_CACHE else None - - # Begin the generational process - while ngen == 0 or gen <= ngen: - # Select the next generation individuals - offspring = roulette_selection(population, pop_size - len(halloffame)) - - # Vary the pool of individuals - # offspring = varAnd(offspring, toolbox, cxpb, mutpb) - offspring = mutate_and_crossover( - offspring, mutation_rate=mutpb, crossover_rate=cxpb) - - # Evaluate the individuals with an invalid fitness - invalid_ind = [ind for ind in offspring if not ind.fitness.valid] - for ind in invalid_ind: - str_repr = ''.join(ind) - if program_reward_cache is not None and str_repr in program_reward_cache: - ind.fitness.values = (program_reward_cache[str_repr],) - else: - eval_result = task_eval_fn(ind) - ind.fitness.values = (eval_result.reward,) - if program_reward_cache is not None: - program_reward_cache[str_repr] = eval_result.reward - - # Replace the current population by the offspring - population = list(offspring) - - # Update the hall of fame with the generated individuals - if halloffame is not None: - for ind in population: - halloffame.push(ind.fitness.values, tuple(ind), ind) - - # elitism - population.extend([ind for _, ind in halloffame.iter_in_order()]) - - if gen % 100 == 0: - top_code = '\n'.join([debug_str(ind, task_eval_fn) - for ind in topk(population, k=4)]) - logging.info('gen: %d\nNPE: %d\n%s\n\n', gen, gen * pop_size, top_code) - - best_code = ''.join(halloffame.get_max()[1]) - res = task_eval_fn(best_code) - - # Write population and hall-of-fame to disk. - if checkpoint_writer: - checkpoint_writer.write(gen, population, halloffame) - - if res.correct: - logging.info('Solution found:\n%s\nreward = %s\n', - best_code, res.reward) - break - - gen += 1 - - best_code = ''.join(halloffame.get_max()[1]) - res = task_eval_fn(best_code) - - return GaResult( - population=population, best_code=best_code, reward=res.reward, - solution_found=res.correct, generations=gen, - num_programs=gen * len(population), - max_generations=ngen, max_num_programs=ngen * len(population)) - - -GaResult = namedtuple( - 'GaResult', - ['population', 'best_code', 'reward', 'generations', 'num_programs', - 'solution_found', 'max_generations', 'max_num_programs']) - - -def reward_conversion(reward): - """Convert real value into positive value.""" - if reward <= 0: - return 0.05 - return reward + 0.05 - - -def roulette_selection(population, k): - """Select `k` individuals with prob proportional to fitness. - - Each of the `k` selections is independent. - - Warning: - The roulette selection by definition cannot be used for minimization - or when the fitness can be smaller or equal to 0. - - Args: - population: A list of Individual objects to select from. - k: The number of individuals to select. - - Returns: - A list of selected individuals. - """ - fitnesses = np.asarray( - [reward_conversion(ind.fitness.values[0]) - for ind in population]) - assert np.all(fitnesses > 0) - - sum_fits = fitnesses.sum() - chosen = [None] * k - for i in xrange(k): - u = random.random() * sum_fits - sum_ = 0 - for ind, fitness in zip(population, fitnesses): - sum_ += fitness - if sum_ > u: - chosen[i] = Individual(ind) - break - if not chosen[i]: - chosen[i] = Individual(population[-1]) - - return chosen - - -def make_population(make_individual_fn, n): - return [make_individual_fn() for _ in xrange(n)] - - -def best(population): - best_ind = None - for ind in population: - if best_ind is None or best_ind.fitness.values < ind.fitness.values: - best_ind = ind - return best_ind - - -def topk(population, k): - q = utils.MaxUniquePriorityQueue(k) - for ind in population: - q.push(ind.fitness.values, tuple(ind), ind) - return [ind for _, ind in q.iter_in_order()] - - -class Fitness(object): - - def __init__(self): - self.values = () - - @property - def valid(self): - """Assess if a fitness is valid or not.""" - return bool(self.values) - - -class Individual(list): - - def __init__(self, *args): - super(Individual, self).__init__(*args) - self.fitness = Fitness() - - -def random_individual(genome_size): - return lambda: Individual(np.random.choice(GENES, genome_size).tolist()) diff --git a/research/brain_coder/single_task/ga_train.py b/research/brain_coder/single_task/ga_train.py deleted file mode 100644 index 630eca427..000000000 --- a/research/brain_coder/single_task/ga_train.py +++ /dev/null @@ -1,324 +0,0 @@ -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -"""Genetic algorithm for BF tasks. - -Also contains the uniform random search algorithm. - -Inspired by https://github.com/primaryobjects/AI-Programmer. -GA function code borrowed from https://github.com/DEAP/deap. -""" - -import cPickle -import os -import sys -from time import sleep - -from absl import flags -from absl import logging -import numpy as np -from six.moves import xrange -import tensorflow as tf - -from common import utils # brain coder -from single_task import data # brain coder -from single_task import defaults # brain coder -from single_task import ga_lib # brain coder -from single_task import results_lib # brain coder - -FLAGS = flags.FLAGS - - -def define_tuner_hparam_space(hparam_space_type): - """Define tunable hparams for grid search.""" - if hparam_space_type != 'ga': - raise ValueError('Hparam space is not valid: "%s"' % hparam_space_type) - return { - 'population_size': [10, 25, 50, 100, 500], - 'crossover_rate': [0.2, 0.5, 0.7, 0.9, 0.95], - 'mutation_rate': [0.01, 0.03, 0.05, 0.1, 0.15]} - - -def write_hparams_to_config(config, hparams, hparam_space_type): - """Write hparams given by the tuner into the Config object.""" - if hparam_space_type != 'ga': - raise ValueError('Hparam space is not valid: "%s"' % hparam_space_type) - config.batch_size = hparams.population_size - config.agent.crossover_rate = hparams.crossover_rate - config.agent.mutation_rate = hparams.mutation_rate - - -class CheckpointWriter(object): - """Manages loading and saving GA populations to disk. - - This object is used by the genetic algorithm to save progress periodically - so that a recent population can be loaded from disk in the event of a restart. - """ - - def __init__(self, checkpoint_dir, population_size): - self.checkpoint_file = os.path.join(checkpoint_dir, 'checkpoint.pickle') - self.population_size = population_size - - def write(self, gen, population, halloffame): - """Write GA state to disk. - - Overwrites previous saved state. - - Args: - gen: Generation number. - population: List of Individual objects. - halloffame: Hall-of-fame buffer. Typically a priority queue. - """ - raw = cPickle.dumps((gen, population, halloffame)) - with tf.gfile.FastGFile(self.checkpoint_file, 'w') as f: - f.write(raw) - - def load(self): - """Loads GA state from disk. - - Loads whatever is on disk, which will be whatever the most recent call - to `write` wrote. - - Returns: - gen: Generation number. - population: List of Individual objects. - halloffame: Hall-of-fame buffer. Typically a priority queue. - """ - with tf.gfile.FastGFile(self.checkpoint_file, 'r') as f: - raw = f.read() - objs = cPickle.loads(raw) - # Validate data. - assert isinstance(objs, tuple) and len(objs) == 3, ( - 'Expecting a 3-tuple, but got %s instead.' % (objs,)) - gen, population, halloffame = objs - assert isinstance(gen, int), ( - 'Expecting `gen` to be an integer, got %s' % (gen,)) - assert ( - isinstance(population, list) - and len(population) == self.population_size - ), ( - 'Expecting `population` to be a list with size %d, got %s' - % (self.population_size, population)) - assert halloffame is None or len(halloffame) == 2, ( - 'Expecting hall-of-fame object to have length two, got length %d' - % len(halloffame)) - logging.info('Loaded pop from checkpoint file: "%s".', - self.checkpoint_file) - return gen, population, halloffame - - def has_checkpoint(self): - """Checks if a checkpoint exists on disk, and if so returns True.""" - return tf.gfile.Exists(self.checkpoint_file) - - -def run_training(config=None, tuner=None, logdir=None, trial_name=None, # pylint: disable=unused-argument - is_chief=True): - """Do all training runs. - - This is the top level training function for policy gradient based models. - Run this from the main function. - - Args: - config: config_lib.Config instance containing global config (agent and - environment hparams). If None, config will be parsed from FLAGS.config. - tuner: (unused) A tuner instance. Leave as None if not tuning. - logdir: Parent directory where all data from all runs will be written. If - None, FLAGS.logdir will be used. - trial_name: (unused) If tuning, set this to a unique string that identifies - this trial. If `tuner` is not None, this also must be set. - is_chief: True if this worker is the chief. - - Returns: - List of results dicts which were written to disk. Each training run gets a - results dict. Results dict contains metrics, i.e. (name, value) pairs which - give information about the training run. - - Raises: - ValueError: If FLAGS.num_workers does not divide FLAGS.num_repetitions. - ValueError: If results dicts read from disk contain invalid data. - """ - if not config: - # If custom config is not given, get it from flags. - config = defaults.default_config_with_updates(FLAGS.config) - if not logdir: - logdir = FLAGS.logdir - - if FLAGS.num_repetitions % FLAGS.num_workers != 0: - raise ValueError('Number of workers must divide number of repetitions') - num_local_reps = FLAGS.num_repetitions // FLAGS.num_workers - logging.info('Running %d reps globally.', FLAGS.num_repetitions) - logging.info('This worker will run %d local reps.', num_local_reps) - if FLAGS.max_npe: - max_generations = FLAGS.max_npe // config.batch_size - logging.info('Max samples per rep: %d', FLAGS.max_npe) - logging.info('Max generations per rep: %d', max_generations) - else: - max_generations = sys.maxint - logging.info('Running unlimited generations.') - - assert FLAGS.num_workers > 0 - logging.info('Starting experiment. Directory: "%s"', logdir) - results = results_lib.Results(logdir, FLAGS.task_id) - local_results_list = results.read_this_shard() - if local_results_list: - if local_results_list[0]['max_npe'] != FLAGS.max_npe: - raise ValueError( - 'Cannot resume training. Max-NPE changed. Was %s, now %s', - local_results_list[0]['max_npe'], FLAGS.max_npe) - if local_results_list[0]['max_global_repetitions'] != FLAGS.num_repetitions: - raise ValueError( - 'Cannot resume training. Number of repetitions changed. Was %s, ' - 'now %s', - local_results_list[0]['max_global_repetitions'], - FLAGS.num_repetitions) - start_rep = len(local_results_list) - - for rep in xrange(start_rep, num_local_reps): - global_rep = num_local_reps * FLAGS.task_id + rep - logging.info( - 'Starting repetition: Rep = %d. (global rep = %d)', - rep, global_rep) - - # Save data for each rep, like checkpoints, goes into separate folders. - run_dir = os.path.join(logdir, 'run_%d' % global_rep) - - if not tf.gfile.IsDirectory(run_dir): - tf.gfile.MakeDirs(run_dir) - checkpoint_writer = CheckpointWriter(run_dir, - population_size=config.batch_size) - - data_manager = data.DataManager(config, run_number=global_rep) - task_eval_fn = ga_lib.make_task_eval_fn(data_manager.rl_task) - - if config.agent.algorithm == 'rand': - logging.info('Running random search.') - assert FLAGS.max_npe - result = run_random_search( - FLAGS.max_npe, run_dir, task_eval_fn, config.timestep_limit) - else: - assert config.agent.algorithm == 'ga' - logging.info('Running genetic algorithm.') - pop = ga_lib.make_population( - ga_lib.random_individual(config.timestep_limit), - n=config.batch_size) - hof = utils.MaxUniquePriorityQueue(2) # Hall of fame. - result = ga_lib.ga_loop( - pop, - cxpb=config.agent.crossover_rate, mutpb=config.agent.mutation_rate, - task_eval_fn=task_eval_fn, - ngen=max_generations, halloffame=hof, - checkpoint_writer=checkpoint_writer) - - logging.info('Finished rep. Num gens: %d', result.generations) - - results_dict = { - 'max_npe': FLAGS.max_npe, - 'batch_size': config.batch_size, - 'max_batches': FLAGS.max_npe // config.batch_size, - 'npe': result.num_programs, - 'max_global_repetitions': FLAGS.num_repetitions, - 'max_local_repetitions': num_local_reps, - 'code_solution': result.best_code if result.solution_found else '', - 'best_reward': result.reward, - 'num_batches': result.generations, - 'found_solution': result.solution_found, - 'task': data_manager.task_name, - 'global_rep': global_rep} - logging.info('results_dict: %s', results_dict) - results.append(results_dict) - - if is_chief: - logging.info( - 'Worker is chief. Waiting for all workers to finish so that results ' - 'can be reported to the tuner.') - - global_results_list, shard_stats = results.read_all( - num_shards=FLAGS.num_workers) - while not all(s.finished for s in shard_stats): - logging.info( - 'Still waiting on these workers: %s', - ', '.join( - ['%d (%d reps left)' - % (i, s.max_local_reps - s.num_local_reps_completed) - for i, s in enumerate(shard_stats) - if not s.finished])) - sleep(60) - global_results_list, shard_stats = results.read_all( - num_shards=FLAGS.num_workers) - - logging.info( - '%d results obtained. Chief worker is exiting the experiment.', - len(global_results_list)) - - return global_results_list - - -def run_random_search(max_num_programs, checkpoint_dir, task_eval_fn, - timestep_limit): - """Run uniform random search routine. - - Randomly samples programs from a uniform distribution until either a valid - program is found, or the maximum NPE is reached. Results are written to disk - and returned. - - Args: - max_num_programs: Maximum NPE (number of programs executed). If no solution - is found after this many programs are tried, the run is stopped and - considered a failure. - checkpoint_dir: Where to save state during the run. - task_eval_fn: Function that maps code string to result containing total - reward and info about success. - timestep_limit: Maximum length of code strings. - - Returns: - ga_lib.GaResult namedtuple instance. This contains the best code and highest - reward found. - """ - checkpoint_file = os.path.join(checkpoint_dir, 'random_search.txt') - num_programs_seen = 0 - found_solution = False - best_code = '' - best_reward = 0.0 - if tf.gfile.Exists(checkpoint_file): - try: - with tf.gfile.FastGFile(checkpoint_file, 'r') as f: - lines = list(f) - num_programs_seen = int(lines[0]) - found_solution = bool(int(lines[1])) - if found_solution: - best_code = lines[2] - best_reward = float(lines[3]) - except: # pylint: disable=bare-except - pass - - while not found_solution and num_programs_seen < max_num_programs: - if num_programs_seen % 1000 == 0: - logging.info('num_programs_seen = %d', num_programs_seen) - with tf.gfile.FastGFile(checkpoint_file, 'w') as f: - f.write(str(num_programs_seen) + '\n') - f.write(str(int(found_solution)) + '\n') - - code = np.random.choice(ga_lib.GENES, timestep_limit).tolist() - res = task_eval_fn(code) - found_solution = res.correct - num_programs_seen += 1 - - if found_solution: - best_code = ''.join(code) - best_reward = res.reward - - logging.info('num_programs_seen = %d', num_programs_seen) - logging.info('found solution: %s', found_solution) - with tf.gfile.FastGFile(checkpoint_file, 'w') as f: - f.write(str(num_programs_seen) + '\n') - f.write(str(int(found_solution)) + '\n') - if found_solution: - f.write(best_code + '\n') - f.write(str(best_reward) + '\n') - - return ga_lib.GaResult( - population=[], best_code=best_code, reward=best_reward, - solution_found=found_solution, generations=num_programs_seen, - num_programs=num_programs_seen, max_generations=max_num_programs, - max_num_programs=max_num_programs) diff --git a/research/brain_coder/single_task/ga_train_test.py b/research/brain_coder/single_task/ga_train_test.py deleted file mode 100644 index ff69ad849..000000000 --- a/research/brain_coder/single_task/ga_train_test.py +++ /dev/null @@ -1,51 +0,0 @@ -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -"""Tests for ga_train. - -Tests that ga runs for a few generations without crashing. -""" - -from absl import flags -import tensorflow as tf - -from single_task import defaults # brain coder -from single_task import run # brain coder - -FLAGS = flags.FLAGS - - -class GaTest(tf.test.TestCase): - - def RunTrainingSteps(self, config_string, num_steps=10): - """Run a few training steps with the given config. - - Just check that nothing crashes. - - Args: - config_string: Config encoded in a string. See - $REPO_PATH/common/config_lib.py - num_steps: Number of training steps to run. Defaults to 10. - """ - config = defaults.default_config_with_updates(config_string) - FLAGS.max_npe = num_steps * config.batch_size - FLAGS.logdir = tf.test.get_temp_dir() - FLAGS.config = config_string - run.main(None) - - def testGeneticAlgorithm(self): - self.RunTrainingSteps( - 'env=c(task="reverse"),' - 'agent=c(algorithm="ga"),' - 'timestep_limit=40,batch_size=64') - - def testUniformRandomSearch(self): - self.RunTrainingSteps( - 'env=c(task="reverse"),' - 'agent=c(algorithm="rand"),' - 'timestep_limit=40,batch_size=64') - - -if __name__ == '__main__': - tf.test.main() diff --git a/research/brain_coder/single_task/launch_training.sh b/research/brain_coder/single_task/launch_training.sh deleted file mode 100755 index a4a4688ed..000000000 --- a/research/brain_coder/single_task/launch_training.sh +++ /dev/null @@ -1,72 +0,0 @@ -#!/bin/bash -# Launches training jobs. -# Modify this file to launch workers with your prefered cloud API. -# The following implementation runs each worker as a subprocess on the local -# machine. - -MODELS_DIR="/tmp/models" - -# Get command line options. -OPTS=$(getopt -n "$0" -o "" --long "job_name:,config:,num_workers:,num_ps:,max_npe:,num_repetitions:,stop_on_success:" -- "$@") -if [ $? != 0 ] ; then echo "Failed parsing options." >&2 ; exit 1 ; fi - -eval set -- "$OPTS" - -JOB_NAME="" # Name of the process and the logs directory. -CONFIG="" # Model and environment hparams. -# NUM_WORKERS: Number of workers to launch for this training job. If using -# neural networks, each worker will be 1 replica. -NUM_WORKERS=1 -# NUM_PS: Number of parameter servers to launch for this training job. Only set -# this if using neural networks. For 1 worker, no parameter servers are needed. -# For more than 1 worker, at least 1 parameter server is needed to store the -# global model. -NUM_PS=0 -# MAX_NPE: Maximum number of programs executed. Training will quit once this -# threshold is reached. If 0, the threshold is infinite. -MAX_NPE=0 -NUM_REPETITIONS=1 # How many times to run this experiment. -STOP_ON_SUCCESS=true # Whether to halt training when a solution is found. - -# Parse options into variables. -while true; do - case "$1" in - --job_name ) JOB_NAME="$2"; shift; shift ;; - --config ) CONFIG="$2"; shift; shift ;; - --num_workers ) NUM_WORKERS="$2"; shift; shift ;; - --num_ps ) NUM_PS="$2"; shift; shift ;; - --max_npe ) MAX_NPE="$2"; shift; shift ;; - --num_repetitions ) NUM_REPETITIONS="$2"; shift; shift ;; - --stop_on_success ) STOP_ON_SUCCESS="$2"; shift; shift ;; - -- ) shift; break ;; - * ) break ;; - esac -done - -# Launch jobs. -# TODO: multi-worker RL training - -LOGDIR="$MODELS_DIR/$JOB_NAME" -mkdir -p $LOGDIR - -BIN_DIR="bazel-bin/single_task" -for (( i=0; i "$LOGDIR/task_$i.log" & # Run as subprocess - echo "Launched task $i. Logs: $LOGDIR/task_$i.log" -done - - -# Use "pidof run.par" to find jobs. -# Kill with "pkill run.par" diff --git a/research/brain_coder/single_task/launch_tuning.sh b/research/brain_coder/single_task/launch_tuning.sh deleted file mode 100755 index 97ce51b54..000000000 --- a/research/brain_coder/single_task/launch_tuning.sh +++ /dev/null @@ -1,87 +0,0 @@ -#!/bin/bash -# Launches tuning jobs. -# Modify this file to launch workers with your prefered cloud API. -# The following implementation runs each worker as a subprocess on the local -# machine. - -MODELS_DIR="/tmp/models" - -# Get command line options. -OPTS=$(getopt -n "$0" -o "" --long "job_name:,config:,num_tuners:,num_workers_per_tuner:,num_ps_per_tuner:,max_npe:,num_repetitions:,stop_on_success:,fixed_hparams:,hparam_space_type:" -- "$@") -if [ $? != 0 ] ; then echo "Failed parsing options." >&2 ; exit 1 ; fi - -eval set -- "$OPTS" - -JOB_NAME="" # Name of the process and the logs directory. -CONFIG="" # Model and environment hparams. -# NUM_TUNERS: Number of tuning jobs to launch. Each tuning job can train a -# hparam combination. So more tuners means more hparams tried in parallel. -NUM_TUNERS=1 -# NUM_WORKERS_PER_TUNER: Number of workers to launch for each tuning job. If -# using neural networks, each worker will be 1 replica. -NUM_WORKERS_PER_TUNER=1 -# NUM_PS_PER_TUNER: Number of parameter servers to launch for this tuning job. -# Only set this if using neural networks. For 1 worker per tuner, no parameter -# servers are needed. For more than 1 worker per tuner, at least 1 parameter -# server per tuner is needed to store the global model for each tuner. -NUM_PS_PER_TUNER=0 -# MAX_NPE: Maximum number of programs executed. Training will quit once this -# threshold is reached. If 0, the threshold is infinite. -MAX_NPE=0 -NUM_REPETITIONS=25 # How many times to run this experiment. -STOP_ON_SUCCESS=true # Whether to halt training when a solution is found. -# FIXED_HPARAMS: Hold hparams fixed in the grid search. This reduces the search -# space. -FIXED_HPARAMS="" -# HPARAM_SPACE_TYPE: Specifies the hparam search space. See -# `define_tuner_hparam_space` functions defined in pg_train.py and ga_train.py. -HPARAM_SPACE_TYPE="pg" - -# Parse options into variables. -while true; do - case "$1" in - --job_name ) JOB_NAME="$2"; shift; shift ;; - --config ) CONFIG="$2"; shift; shift ;; - --num_tuners ) NUM_TUNERS="$2"; shift; shift ;; - --num_workers_per_tuner ) NUM_WORKERS_PER_TUNER="$2"; shift; shift ;; - --num_ps_per_tuner ) NUM_PS_PER_TUNER="$2"; shift; shift ;; - --max_npe ) MAX_NPE="$2"; shift; shift ;; - --num_repetitions ) NUM_REPETITIONS="$2"; shift; shift ;; - --stop_on_success ) STOP_ON_SUCCESS="$2"; shift; shift ;; - --fixed_hparams ) FIXED_HPARAMS="$2"; shift; shift ;; - --hparam_space_type ) HPARAM_SPACE_TYPE="$2"; shift; shift ;; - -- ) shift; break ;; - * ) break ;; - esac -done - -# Launch jobs. -# TODO: multi-worker RL training - -LOGDIR="$MODELS_DIR/$JOB_NAME" -mkdir -p $LOGDIR - -BIN_DIR="bazel-bin/single_task" -for ((tuner=0;tuner "$LOGDIR/tuner_$tuner.task_$i.log" & # Run as subprocess - echo "Launched tuner $tuner, task $i. Logs: $LOGDIR/tuner_$tuner.task_$i.log" - done -done - -# Use "pidof tune.par" to find jobs. -# Kill with "pkill tune.par" diff --git a/research/brain_coder/single_task/misc.py b/research/brain_coder/single_task/misc.py deleted file mode 100644 index 07061d81c..000000000 --- a/research/brain_coder/single_task/misc.py +++ /dev/null @@ -1,149 +0,0 @@ -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -"""Utilities specific to this project.""" - -from collections import namedtuple -from six import string_types - - -##################### -# BF-lang utilities # -##################### - - -BF_EOS_INT = 0 # Also used as SOS (start of sequence). -BF_EOS_CHAR = TEXT_EOS_CHAR = '_' -BF_LANG_INTS = range(1, 9) -BF_INT_TO_CHAR = [BF_EOS_CHAR, '>', '<', '+', '-', '[', ']', '.', ','] -BF_CHAR_TO_INT = dict([(c, i) for i, c in enumerate(BF_INT_TO_CHAR)]) - - -RewardInfo = namedtuple('RewardInfo', ['episode_rewards', 'input_case', - 'correct_output', - 'code_output', 'reason', 'input_type', - 'output_type']) - - -class IOType(object): - string = 'string' - integer = 'integer' - boolean = 'boolean' - - -class IOTuple(tuple): - pass - - -def flatten(lst): - return [item for row in lst for item in row] - - -def bf_num_tokens(): - # BF tokens plus EOS. - return len(BF_INT_TO_CHAR) - - -def bf_char2int(bf_char): - """Convert BF code char to int token.""" - return BF_CHAR_TO_INT[bf_char] - - -def bf_int2char(bf_int): - """Convert BF int token to code char.""" - return BF_INT_TO_CHAR[bf_int] - - -def bf_tokens_to_string(bf_tokens, truncate=True): - """Convert token list to code string. Will truncate at EOS token. - - Args: - bf_tokens: Python list of ints representing the code string. - truncate: If true, the output string will end at the first EOS token. - If false, the entire token list is converted to string. - - Returns: - String representation of the tokens. - - Raises: - ValueError: If bf_tokens is not a python list. - """ - if not isinstance(bf_tokens, list): - raise ValueError('Only python list supported here.') - if truncate: - try: - eos_index = bf_tokens.index(BF_EOS_INT) - except ValueError: - eos_index = len(bf_tokens) - else: - eos_index = len(bf_tokens) - return ''.join([BF_INT_TO_CHAR[t] for t in bf_tokens[:eos_index]]) - - -def bf_string_to_tokens(bf_string): - """Convert string to token list. Will strip and append EOS token.""" - tokens = [BF_CHAR_TO_INT[char] for char in bf_string.strip()] - tokens.append(BF_EOS_INT) - return tokens - - -def tokens_to_text(tokens): - """Convert token list to human readable text.""" - return ''.join( - [TEXT_EOS_CHAR if t == 0 else chr(t - 1 + ord('A')) for t in tokens]) - - -################################### -# Number representation utilities # -################################### - - -# https://en.wikipedia.org/wiki/Metric_prefix -si_magnitudes = { - 'k': 1e3, - 'm': 1e6, - 'g': 1e9} - - -def si_to_int(s): - """Convert string ending with SI magnitude to int. - - Examples: 5K ==> 5000, 12M ==> 12000000. - - Args: - s: String in the form 'xx..xP' where x is a digit and P is an SI prefix. - - Returns: - Integer equivalent to the string. - """ - if isinstance(s, string_types) and s[-1].lower() in si_magnitudes.keys(): - return int(int(s[:-1]) * si_magnitudes[s[-1].lower()]) - return int(s) - - -def int_to_si(n): - """Convert integer to string with SI magnitude. - - `n` will be truncated. - - Examples: 5432 ==> 5k, 12345678 ==> 12M - - Args: - n: Integer to represent as a string. - - Returns: - String representation of `n` containing SI magnitude. - """ - m = abs(n) - sign = -1 if n < 0 else 1 - if m < 1e3: - return str(n) - if m < 1e6: - return '{0}K'.format(sign*int(m / 1e3)) - if m < 1e9: - return '{0}M'.format(sign*int(m / 1e6)) - if m < 1e12: - return '{0}G'.format(sign*int(m / 1e9)) - return str(m) - diff --git a/research/brain_coder/single_task/pg_agent.py b/research/brain_coder/single_task/pg_agent.py deleted file mode 100644 index 13fc7da2d..000000000 --- a/research/brain_coder/single_task/pg_agent.py +++ /dev/null @@ -1,1297 +0,0 @@ -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -"""Language model agent. - -Agent outputs code in a sequence just like a language model. Can be trained -as a language model or using RL, or a combination of the two. -""" - -from collections import namedtuple -from math import exp -from math import log -import time - -from absl import logging -import numpy as np -from six.moves import xrange -import tensorflow as tf - -from common import rollout as rollout_lib # brain coder -from common import utils # brain coder -from single_task import misc # brain coder - - -# Experiments in the ICLR 2018 paper used reduce_sum instead of reduce_mean for -# some losses. We make all loses be batch_size independent, and multiply the -# changed losses by 64, which was the fixed batch_size when the experiments -# where run. The loss hyperparameters still match what is reported in the paper. -MAGIC_LOSS_MULTIPLIER = 64 - - -def rshift_time(tensor_2d, fill=misc.BF_EOS_INT): - """Right shifts a 2D tensor along the time dimension (axis-1).""" - dim_0 = tf.shape(tensor_2d)[0] - fill_tensor = tf.fill([dim_0, 1], fill) - return tf.concat([fill_tensor, tensor_2d[:, :-1]], axis=1) - - -def join(a, b): - # Concat a and b along 0-th dim. - if a is None or len(a) == 0: # pylint: disable=g-explicit-length-test - return b - if b is None or len(b) == 0: # pylint: disable=g-explicit-length-test - return a - return np.concatenate((a, b)) - - -def make_optimizer(kind, lr): - if kind == 'sgd': - return tf.train.GradientDescentOptimizer(lr) - elif kind == 'adam': - return tf.train.AdamOptimizer(lr) - elif kind == 'rmsprop': - return tf.train.RMSPropOptimizer(learning_rate=lr, decay=0.99) - else: - raise ValueError('Optimizer type "%s" not recognized.' % kind) - - -class LinearWrapper(tf.contrib.rnn.RNNCell): - """RNNCell wrapper that adds a linear layer to the output.""" - - def __init__(self, cell, output_size, dtype=tf.float32, suppress_index=None): - self.cell = cell - self._output_size = output_size - self._dtype = dtype - self._suppress_index = suppress_index - self.smallest_float = -2.4e38 - - def __call__(self, inputs, state, scope=None): - with tf.variable_scope(type(self).__name__): - outputs, state = self.cell(inputs, state, scope=scope) - logits = tf.matmul( - outputs, - tf.get_variable('w_output', - [self.cell.output_size, self.output_size], - dtype=self._dtype)) - if self._suppress_index is not None: - # Replace the target index with -inf, so that it never gets selected. - batch_size = tf.shape(logits)[0] - logits = tf.concat( - [logits[:, :self._suppress_index], - tf.fill([batch_size, 1], self.smallest_float), - logits[:, self._suppress_index + 1:]], - axis=1) - - return logits, state - - @property - def output_size(self): - return self._output_size - - @property - def state_size(self): - return self.cell.state_size - - def zero_state(self, batch_size, dtype): - return self.cell.zero_state(batch_size, dtype) - - -UpdateStepResult = namedtuple( - 'UpdateStepResult', - ['global_step', 'global_npe', 'summaries_list', 'gradients_dict']) - - -class AttrDict(dict): - """Dict with attributes as keys. - - https://stackoverflow.com/a/14620633 - """ - - def __init__(self, *args, **kwargs): - super(AttrDict, self).__init__(*args, **kwargs) - self.__dict__ = self - - -class LMAgent(object): - """Language model agent.""" - action_space = misc.bf_num_tokens() - observation_space = misc.bf_num_tokens() - - def __init__(self, global_config, task_id=0, - logging_file=None, - experience_replay_file=None, - global_best_reward_fn=None, - found_solution_op=None, - assign_code_solution_fn=None, - program_count=None, - do_iw_summaries=False, - stop_on_success=True, - dtype=tf.float32, - verbose_level=0, - is_local=True): - self.config = config = global_config.agent - self.logging_file = logging_file - self.experience_replay_file = experience_replay_file - self.task_id = task_id - self.verbose_level = verbose_level - self.global_best_reward_fn = global_best_reward_fn - self.found_solution_op = found_solution_op - self.assign_code_solution_fn = assign_code_solution_fn - self.parent_scope_name = tf.get_variable_scope().name - self.dtype = dtype - self.allow_eos_token = config.eos_token - self.stop_on_success = stop_on_success - self.pi_loss_hparam = config.pi_loss_hparam - self.vf_loss_hparam = config.vf_loss_hparam - self.is_local = is_local - - self.top_reward = 0.0 - self.embeddings_trainable = True - - self.no_op = tf.no_op() - - self.learning_rate = tf.constant( - config.lr, dtype=dtype, name='learning_rate') - self.initializer = tf.contrib.layers.variance_scaling_initializer( - factor=config.param_init_factor, - mode='FAN_AVG', - uniform=True, - dtype=dtype) # TF's default initializer. - tf.get_variable_scope().set_initializer(self.initializer) - - self.a2c = config.ema_baseline_decay == 0 - if not self.a2c: - logging.info('Using exponential moving average REINFORCE baselines.') - self.ema_baseline_decay = config.ema_baseline_decay - self.ema_by_len = [0.0] * global_config.timestep_limit - else: - logging.info('Using advantage (a2c) with learned value function.') - self.ema_baseline_decay = 0.0 - self.ema_by_len = None - - # Top-k - if config.topk and config.topk_loss_hparam: - self.topk_loss_hparam = config.topk_loss_hparam - self.topk_batch_size = config.topk_batch_size - if self.topk_batch_size <= 0: - raise ValueError('topk_batch_size must be a positive integer. Got %s', - self.topk_batch_size) - self.top_episodes = utils.MaxUniquePriorityQueue(config.topk) - logging.info('Made max-priorty-queue with capacity %d', - self.top_episodes.capacity) - else: - self.top_episodes = None - self.topk_loss_hparam = 0.0 - logging.info('No max-priorty-queue') - - # Experience replay. - self.replay_temperature = config.replay_temperature - self.num_replay_per_batch = int(global_config.batch_size * config.alpha) - self.num_on_policy_per_batch = ( - global_config.batch_size - self.num_replay_per_batch) - self.replay_alpha = ( - self.num_replay_per_batch / float(global_config.batch_size)) - logging.info('num_replay_per_batch: %d', self.num_replay_per_batch) - logging.info('num_on_policy_per_batch: %d', self.num_on_policy_per_batch) - logging.info('replay_alpha: %s', self.replay_alpha) - if self.num_replay_per_batch > 0: - # Train with off-policy episodes from replay buffer. - start_time = time.time() - self.experience_replay = utils.RouletteWheel( - unique_mode=True, save_file=experience_replay_file) - logging.info('Took %s sec to load replay buffer from disk.', - int(time.time() - start_time)) - logging.info('Replay buffer file location: "%s"', - self.experience_replay.save_file) - else: - # Only train on-policy. - self.experience_replay = None - - if program_count is not None: - self.program_count = program_count - self.program_count_add_ph = tf.placeholder( - tf.int64, [], 'program_count_add_ph') - self.program_count_add_op = self.program_count.assign_add( - self.program_count_add_ph) - - ################################ - # RL policy and value networks # - ################################ - batch_size = global_config.batch_size - logging.info('batch_size: %d', batch_size) - - self.policy_cell = LinearWrapper( - tf.contrib.rnn.MultiRNNCell( - [tf.contrib.rnn.BasicLSTMCell(cell_size) - for cell_size in config.policy_lstm_sizes]), - self.action_space, - dtype=dtype, - suppress_index=None if self.allow_eos_token else misc.BF_EOS_INT) - self.value_cell = LinearWrapper( - tf.contrib.rnn.MultiRNNCell( - [tf.contrib.rnn.BasicLSTMCell(cell_size) - for cell_size in config.value_lstm_sizes]), - 1, - dtype=dtype) - - obs_embedding_scope = 'obs_embed' - with tf.variable_scope( - obs_embedding_scope, - initializer=tf.random_uniform_initializer(minval=-1.0, maxval=1.0)): - obs_embeddings = tf.get_variable( - 'embeddings', - [self.observation_space, config.obs_embedding_size], - dtype=dtype, trainable=self.embeddings_trainable) - self.obs_embeddings = obs_embeddings - - ################################ - # RL policy and value networks # - ################################ - - initial_state = tf.fill([batch_size], misc.BF_EOS_INT) - def loop_fn(loop_time, cell_output, cell_state, loop_state): - """Function called by tf.nn.raw_rnn to instantiate body of the while_loop. - - See https://www.tensorflow.org/api_docs/python/tf/nn/raw_rnn for more - information. - - When time is 0, and cell_output, cell_state, loop_state are all None, - `loop_fn` will create the initial input, internal cell state, and loop - state. When time > 0, `loop_fn` will operate on previous cell output, - state, and loop state. - - Args: - loop_time: A scalar tensor holding the current timestep (zero based - counting). - cell_output: Output of the raw_rnn cell at the current timestep. - cell_state: Cell internal state at the current timestep. - loop_state: Additional loop state. These tensors were returned by the - previous call to `loop_fn`. - - Returns: - elements_finished: Bool tensor of shape [batch_size] which marks each - sequence in the batch as being finished or not finished. - next_input: A tensor containing input to be fed into the cell at the - next timestep. - next_cell_state: Cell internal state to be fed into the cell at the - next timestep. - emit_output: Tensor to be added to the TensorArray returned by raw_rnn - as output from the while_loop. - next_loop_state: Additional loop state. These tensors will be fed back - into the next call to `loop_fn` as `loop_state`. - """ - if cell_output is None: # 0th time step. - next_cell_state = self.policy_cell.zero_state(batch_size, dtype) - elements_finished = tf.zeros([batch_size], tf.bool) - output_lengths = tf.ones([batch_size], dtype=tf.int32) - next_input = tf.gather(obs_embeddings, initial_state) - emit_output = None - next_loop_state = ( - tf.TensorArray(dtype=tf.int32, size=0, dynamic_size=True), - output_lengths, - elements_finished - ) - else: - scaled_logits = cell_output * config.softmax_tr # Scale temperature. - prev_chosen, prev_output_lengths, prev_elements_finished = loop_state - next_cell_state = cell_state - chosen_outputs = tf.to_int32(tf.where( - tf.logical_not(prev_elements_finished), - tf.multinomial(logits=scaled_logits, num_samples=1)[:, 0], - tf.zeros([batch_size], dtype=tf.int64))) - elements_finished = tf.logical_or( - tf.equal(chosen_outputs, misc.BF_EOS_INT), - loop_time >= global_config.timestep_limit) - output_lengths = tf.where( - elements_finished, - prev_output_lengths, - # length includes EOS token. empty seq has len 1. - tf.tile(tf.expand_dims(loop_time + 1, 0), [batch_size]) - ) - next_input = tf.gather(obs_embeddings, chosen_outputs) - emit_output = scaled_logits - next_loop_state = (prev_chosen.write(loop_time - 1, chosen_outputs), - output_lengths, - tf.logical_or(prev_elements_finished, - elements_finished)) - return (elements_finished, next_input, next_cell_state, emit_output, - next_loop_state) - - with tf.variable_scope('policy'): - (decoder_outputs_ta, - _, # decoder_state - (sampled_output_ta, output_lengths, _)) = tf.nn.raw_rnn( - cell=self.policy_cell, - loop_fn=loop_fn) - policy_logits = tf.transpose(decoder_outputs_ta.stack(), (1, 0, 2), - name='policy_logits') - sampled_tokens = tf.transpose(sampled_output_ta.stack(), (1, 0), - name='sampled_tokens') - # Add SOS to beginning of the sequence. - rshift_sampled_tokens = rshift_time(sampled_tokens, fill=misc.BF_EOS_INT) - - # Initial state is 0, 2nd state is first token. - # Note: If value of last state is computed, this will be used as bootstrap. - if self.a2c: - with tf.variable_scope('value'): - value_output, _ = tf.nn.dynamic_rnn( - self.value_cell, - tf.gather(obs_embeddings, rshift_sampled_tokens), - sequence_length=output_lengths, - dtype=dtype) - value = tf.squeeze(value_output, axis=[2]) - else: - value = tf.zeros([], dtype=dtype) - - # for sampling actions from the agent, and which told tensors for doing - # gradient updates on the agent. - self.sampled_batch = AttrDict( - logits=policy_logits, - value=value, - tokens=sampled_tokens, - episode_lengths=output_lengths, - probs=tf.nn.softmax(policy_logits), - log_probs=tf.nn.log_softmax(policy_logits)) - - # adjusted_lengths can be less than the full length of each episode. - # Use this to train on only part of an episode (starting from t=0). - self.adjusted_lengths = tf.placeholder( - tf.int32, [None], name='adjusted_lengths') - self.policy_multipliers = tf.placeholder( - dtype, - [None, None], - name='policy_multipliers') - # Empirical value, i.e. discounted sum of observed future rewards from each - # time step in the episode. - self.empirical_values = tf.placeholder( - dtype, - [None, None], - name='empirical_values') - - # Off-policy training. Just add supervised loss to the RL loss. - self.off_policy_targets = tf.placeholder( - tf.int32, - [None, None], - name='off_policy_targets') - self.off_policy_target_lengths = tf.placeholder( - tf.int32, [None], name='off_policy_target_lengths') - - self.actions = tf.placeholder(tf.int32, [None, None], name='actions') - # Add SOS to beginning of the sequence. - inputs = rshift_time(self.actions, fill=misc.BF_EOS_INT) - with tf.variable_scope('policy', reuse=True): - logits, _ = tf.nn.dynamic_rnn( - self.policy_cell, tf.gather(obs_embeddings, inputs), - sequence_length=self.adjusted_lengths, - dtype=dtype) - - if self.a2c: - with tf.variable_scope('value', reuse=True): - value_output, _ = tf.nn.dynamic_rnn( - self.value_cell, - tf.gather(obs_embeddings, inputs), - sequence_length=self.adjusted_lengths, - dtype=dtype) - value2 = tf.squeeze(value_output, axis=[2]) - else: - value2 = tf.zeros([], dtype=dtype) - - self.given_batch = AttrDict( - logits=logits, - value=value2, - tokens=sampled_tokens, - episode_lengths=self.adjusted_lengths, - probs=tf.nn.softmax(logits), - log_probs=tf.nn.log_softmax(logits)) - - # Episode masks. - max_episode_length = tf.shape(self.actions)[1] - # range_row shape: [1, max_episode_length] - range_row = tf.expand_dims(tf.range(max_episode_length), 0) - episode_masks = tf.cast( - tf.less(range_row, tf.expand_dims(self.given_batch.episode_lengths, 1)), - dtype=dtype) - episode_masks_3d = tf.expand_dims(episode_masks, 2) - - # Length adjusted episodes. - self.a_probs = a_probs = self.given_batch.probs * episode_masks_3d - self.a_log_probs = a_log_probs = ( - self.given_batch.log_probs * episode_masks_3d) - self.a_value = a_value = self.given_batch.value * episode_masks - self.a_policy_multipliers = a_policy_multipliers = ( - self.policy_multipliers * episode_masks) - if self.a2c: - self.a_empirical_values = a_empirical_values = ( - self.empirical_values * episode_masks) - - # pi_loss is scalar - acs_onehot = tf.one_hot(self.actions, self.action_space, dtype=dtype) - self.acs_onehot = acs_onehot - chosen_masked_log_probs = acs_onehot * a_log_probs - pi_target = tf.expand_dims(a_policy_multipliers, -1) - pi_loss_per_step = chosen_masked_log_probs * pi_target # Maximize. - self.pi_loss = pi_loss = ( - -tf.reduce_mean(tf.reduce_sum(pi_loss_per_step, axis=[1, 2]), axis=0) - * MAGIC_LOSS_MULTIPLIER) # Minimize. - assert len(self.pi_loss.shape) == 0 # pylint: disable=g-explicit-length-test - - # shape: [batch_size, time] - self.chosen_log_probs = tf.reduce_sum(chosen_masked_log_probs, axis=2) - self.chosen_probs = tf.reduce_sum(acs_onehot * a_probs, axis=2) - - # loss of value function - if self.a2c: - vf_loss_per_step = tf.square(a_value - a_empirical_values) - self.vf_loss = vf_loss = ( - tf.reduce_mean(tf.reduce_sum(vf_loss_per_step, axis=1), axis=0) - * MAGIC_LOSS_MULTIPLIER) # Minimize. - assert len(self.vf_loss.shape) == 0 # pylint: disable=g-explicit-length-test - else: - self.vf_loss = vf_loss = 0.0 - - # Maximize entropy regularizer - self.entropy = entropy = ( - -tf.reduce_mean( - tf.reduce_sum(a_probs * a_log_probs, axis=[1, 2]), axis=0) - * MAGIC_LOSS_MULTIPLIER) # Maximize - self.negentropy = -entropy # Minimize negentropy. - assert len(self.negentropy.shape) == 0 # pylint: disable=g-explicit-length-test - - # off-policy loss - self.offp_switch = tf.placeholder(dtype, [], name='offp_switch') - if self.top_episodes is not None: - # Add SOS to beginning of the sequence. - offp_inputs = tf.gather(obs_embeddings, - rshift_time(self.off_policy_targets, - fill=misc.BF_EOS_INT)) - with tf.variable_scope('policy', reuse=True): - offp_logits, _ = tf.nn.dynamic_rnn( - self.policy_cell, offp_inputs, self.off_policy_target_lengths, - dtype=dtype) # shape: [batch_size, time, action_space] - topk_loss_per_step = tf.nn.sparse_softmax_cross_entropy_with_logits( - labels=self.off_policy_targets, - logits=offp_logits, - name='topk_loss_per_logit') - # Take mean over batch dimension so that the loss multiplier strength is - # independent of batch size. Sum over time dimension. - topk_loss = tf.reduce_mean( - tf.reduce_sum(topk_loss_per_step, axis=1), axis=0) - assert len(topk_loss.shape) == 0 # pylint: disable=g-explicit-length-test - self.topk_loss = topk_loss * self.offp_switch - logging.info('Including off policy loss.') - else: - self.topk_loss = topk_loss = 0.0 - - self.entropy_hparam = tf.constant( - config.entropy_beta, dtype=dtype, name='entropy_beta') - - self.pi_loss_term = pi_loss * self.pi_loss_hparam - self.vf_loss_term = vf_loss * self.vf_loss_hparam - self.entropy_loss_term = self.negentropy * self.entropy_hparam - self.topk_loss_term = self.topk_loss_hparam * topk_loss - self.loss = ( - self.pi_loss_term - + self.vf_loss_term - + self.entropy_loss_term - + self.topk_loss_term) - - params = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, - tf.get_variable_scope().name) - self.trainable_variables = params - self.sync_variables = self.trainable_variables - non_embedding_params = [p for p in params - if obs_embedding_scope not in p.name] - self.non_embedding_params = non_embedding_params - self.params = params - - if config.regularizer: - logging.info('Adding L2 regularizer with scale %.2f.', - config.regularizer) - self.regularizer = config.regularizer * sum( - tf.nn.l2_loss(w) for w in non_embedding_params) - self.loss += self.regularizer - else: - logging.info('Skipping regularizer.') - self.regularizer = 0.0 - - # Only build gradients graph for local model. - if self.is_local: - unclipped_grads = tf.gradients(self.loss, params) - self.dense_unclipped_grads = [ - tf.convert_to_tensor(g) for g in unclipped_grads] - self.grads, self.global_grad_norm = tf.clip_by_global_norm( - unclipped_grads, config.grad_clip_threshold) - self.gradients_dict = dict(zip(params, self.grads)) - self.optimizer = make_optimizer(config.optimizer, self.learning_rate) - self.all_variables = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, - tf.get_variable_scope().name) - - self.do_iw_summaries = do_iw_summaries - if self.do_iw_summaries: - b = None - self.log_iw_replay_ph = tf.placeholder(tf.float32, [b], - 'log_iw_replay_ph') - self.log_iw_policy_ph = tf.placeholder(tf.float32, [b], - 'log_iw_policy_ph') - self.log_prob_replay_ph = tf.placeholder(tf.float32, [b], - 'log_prob_replay_ph') - self.log_prob_policy_ph = tf.placeholder(tf.float32, [b], - 'log_prob_policy_ph') - self.log_norm_replay_weights_ph = tf.placeholder( - tf.float32, [b], 'log_norm_replay_weights_ph') - self.iw_summary_op = tf.summary.merge([ - tf.summary.histogram('is/log_iw_replay', self.log_iw_replay_ph), - tf.summary.histogram('is/log_iw_policy', self.log_iw_policy_ph), - tf.summary.histogram('is/log_prob_replay', self.log_prob_replay_ph), - tf.summary.histogram('is/log_prob_policy', self.log_prob_policy_ph), - tf.summary.histogram( - 'is/log_norm_replay_weights', self.log_norm_replay_weights_ph), - ]) - - def make_summary_ops(self): - """Construct summary ops for the model.""" - # size = number of timesteps across entire batch. Number normalized by size - # will not be affected by the amount of padding at the ends of sequences - # in the batch. - size = tf.cast( - tf.reduce_sum(self.given_batch.episode_lengths), dtype=self.dtype) - offp_size = tf.cast(tf.reduce_sum(self.off_policy_target_lengths), - dtype=self.dtype) - scope_prefix = self.parent_scope_name - - def _remove_prefix(prefix, name): - assert name.startswith(prefix) - return name[len(prefix):] - - # RL summaries. - self.rl_summary_op = tf.summary.merge( - [tf.summary.scalar('model/policy_loss', self.pi_loss / size), - tf.summary.scalar('model/value_loss', self.vf_loss / size), - tf.summary.scalar('model/topk_loss', self.topk_loss / offp_size), - tf.summary.scalar('model/entropy', self.entropy / size), - tf.summary.scalar('model/loss', self.loss / size), - tf.summary.scalar('model/grad_norm', - tf.global_norm(self.grads)), - tf.summary.scalar('model/unclipped_grad_norm', self.global_grad_norm), - tf.summary.scalar('model/non_embedding_var_norm', - tf.global_norm(self.non_embedding_params)), - tf.summary.scalar('hparams/entropy_beta', self.entropy_hparam), - tf.summary.scalar('hparams/topk_loss_hparam', self.topk_loss_hparam), - tf.summary.scalar('hparams/learning_rate', self.learning_rate), - tf.summary.scalar('model/trainable_var_norm', - tf.global_norm(self.trainable_variables)), - tf.summary.scalar('loss/loss', self.loss), - tf.summary.scalar('loss/entropy', self.entropy_loss_term), - tf.summary.scalar('loss/vf', self.vf_loss_term), - tf.summary.scalar('loss/policy', self.pi_loss_term), - tf.summary.scalar('loss/offp', self.topk_loss_term)] + - [tf.summary.scalar( - 'param_norms/' + _remove_prefix(scope_prefix + '/', p.name), - tf.norm(p)) - for p in self.params] + - [tf.summary.scalar( - 'grad_norms/' + _remove_prefix(scope_prefix + '/', p.name), - tf.norm(g)) - for p, g in zip(self.params, self.grads)] + - [tf.summary.scalar( - 'unclipped_grad_norms/' + _remove_prefix(scope_prefix + '/', - p.name), - tf.norm(g)) - for p, g in zip(self.params, self.dense_unclipped_grads)]) - - self.text_summary_placeholder = tf.placeholder(tf.string, shape=[]) - self.rl_text_summary_op = tf.summary.text('rl', - self.text_summary_placeholder) - - def _rl_text_summary(self, session, step, npe, tot_r, num_steps, - input_case, code_output, code, reason): - """Logs summary about a single episode and creates a text_summary for TB. - - Args: - session: tf.Session instance. - step: Global training step. - npe: Number of programs executed so far. - tot_r: Total reward. - num_steps: Number of timesteps in the episode (i.e. code length). - input_case: Inputs for test cases. - code_output: Outputs produced by running the code on the inputs. - code: String representation of the code. - reason: Reason for the reward assigned by the task. - - Returns: - Serialized text summary data for tensorboard. - """ - if not input_case: - input_case = ' ' - if not code_output: - code_output = ' ' - if not code: - code = ' ' - text = ( - 'Tot R: **%.2f**; Len: **%d**; Reason: **%s**\n\n' - 'Input: **`%s`**; Output: **`%s`**\n\nCode: **`%s`**' - % (tot_r, num_steps, reason, input_case, code_output, code)) - text_summary = session.run(self.rl_text_summary_op, - {self.text_summary_placeholder: text}) - logging.info( - 'Step %d.\t NPE: %d\t Reason: %s.\t Tot R: %.2f.\t Length: %d. ' - '\tInput: %s \tOutput: %s \tProgram: %s', - step, npe, reason, tot_r, num_steps, input_case, - code_output, code) - return text_summary - - def _rl_reward_summary(self, total_rewards): - """Create summary ops that report on episode rewards. - - Creates summaries for average, median, max, and min rewards in the batch. - - Args: - total_rewards: Tensor of shape [batch_size] containing the total reward - from each episode in the batch. - - Returns: - tf.Summary op. - """ - tr = np.asarray(total_rewards) - reward_summary = tf.Summary(value=[ - tf.Summary.Value( - tag='reward/avg', - simple_value=np.mean(tr)), - tf.Summary.Value( - tag='reward/med', - simple_value=np.median(tr)), - tf.Summary.Value( - tag='reward/max', - simple_value=np.max(tr)), - tf.Summary.Value( - tag='reward/min', - simple_value=np.min(tr))]) - return reward_summary - - def _iw_summary(self, session, replay_iw, replay_log_probs, - norm_replay_weights, on_policy_iw, - on_policy_log_probs): - """Compute summaries for importance weights at a given batch. - - Args: - session: tf.Session instance. - replay_iw: Importance weights for episodes from replay buffer. - replay_log_probs: Total log probabilities of the replay episodes under the - current policy. - norm_replay_weights: Normalized replay weights, i.e. values in `replay_iw` - divided by the total weight in the entire replay buffer. Note, this is - also the probability of selecting each episode from the replay buffer - (in a roulette wheel replay buffer). - on_policy_iw: Importance weights for episodes sampled from the current - policy. - on_policy_log_probs: Total log probabilities of the on-policy episodes - under the current policy. - - Returns: - Serialized TF summaries. Use a summary writer to write these summaries to - disk. - """ - return session.run( - self.iw_summary_op, - {self.log_iw_replay_ph: np.log(replay_iw), - self.log_iw_policy_ph: np.log(on_policy_iw), - self.log_norm_replay_weights_ph: np.log(norm_replay_weights), - self.log_prob_replay_ph: replay_log_probs, - self.log_prob_policy_ph: on_policy_log_probs}) - - def _compute_iw(self, policy_log_probs, replay_weights): - """Compute importance weights for a batch of episodes. - - Arguments are iterables of length batch_size. - - Args: - policy_log_probs: Log probability of each episode under the current - policy. - replay_weights: Weight of each episode in the replay buffer. 0 for - episodes not sampled from the replay buffer (i.e. sampled from the - policy). - - Returns: - Numpy array of shape [batch_size] containing the importance weight for - each episode in the batch. - """ - log_total_replay_weight = log(self.experience_replay.total_weight) - - # importance weight - # = 1 / [(1 - a) + a * exp(log(replay_weight / total_weight / p))] - # = 1 / ((1-a) + a*q/p) - a = float(self.replay_alpha) - a_com = 1.0 - a # compliment of a - importance_weights = np.asarray( - [1.0 / (a_com - + a * exp((log(replay_weight) - log_total_replay_weight) - - log_p)) - if replay_weight > 0 else 1.0 / a_com - for log_p, replay_weight - in zip(policy_log_probs, replay_weights)]) - return importance_weights - - def update_step(self, session, rl_batch, train_op, global_step_op, - return_gradients=False): - """Perform gradient update on the model. - - Args: - session: tf.Session instance. - rl_batch: RLBatch instance from data.py. Use DataManager to create a - RLBatch for each call to update_step. RLBatch contains a batch of - tasks. - train_op: A TF op which will perform the gradient update. LMAgent does not - own its training op, so that trainers can do distributed training - and construct a specialized training op. - global_step_op: A TF op which will return the current global step when - run (should not increment it). - return_gradients: If True, the gradients will be saved and returned from - this method call. This is useful for testing. - - Returns: - Results from the update step in a UpdateStepResult namedtuple, including - global step, global NPE, serialized summaries, and optionally gradients. - """ - assert self.is_local - - # Do update for REINFORCE or REINFORCE + replay buffer. - if self.experience_replay is None: - # Train with on-policy REINFORCE. - - # Sample new programs from the policy. - num_programs_from_policy = rl_batch.batch_size - (batch_actions, - batch_values, - episode_lengths) = session.run( - [self.sampled_batch.tokens, self.sampled_batch.value, - self.sampled_batch.episode_lengths]) - if episode_lengths.size == 0: - # This should not happen. - logging.warn( - 'Shapes:\n' - 'batch_actions.shape: %s\n' - 'batch_values.shape: %s\n' - 'episode_lengths.shape: %s\n', - batch_actions.shape, batch_values.shape, episode_lengths.shape) - - # Compute rewards. - code_scores = compute_rewards( - rl_batch, batch_actions, episode_lengths) - code_strings = code_scores.code_strings - batch_tot_r = code_scores.total_rewards - test_cases = code_scores.test_cases - code_outputs = code_scores.code_outputs - reasons = code_scores.reasons - - # Process on-policy samples. - batch_targets, batch_returns = process_episodes( - code_scores.batch_rewards, episode_lengths, a2c=self.a2c, - baselines=self.ema_by_len, - batch_values=batch_values) - batch_policy_multipliers = batch_targets - batch_emp_values = batch_returns if self.a2c else [[]] - adjusted_lengths = episode_lengths - - if self.top_episodes: - assert len(self.top_episodes) > 0 # pylint: disable=g-explicit-length-test - off_policy_targets = [ - item for item, _ - in self.top_episodes.random_sample(self.topk_batch_size)] - off_policy_target_lengths = [len(t) for t in off_policy_targets] - off_policy_targets = utils.stack_pad(off_policy_targets, pad_axes=0, - dtype=np.int32) - offp_switch = 1 - else: - off_policy_targets = [[0]] - off_policy_target_lengths = [1] - offp_switch = 0 - - fetches = { - 'global_step': global_step_op, - 'program_count': self.program_count, - 'summaries': self.rl_summary_op, - 'train_op': train_op, - 'gradients': self.gradients_dict if return_gradients else self.no_op} - fetched = session.run( - fetches, - {self.actions: batch_actions, - self.empirical_values: batch_emp_values, - self.policy_multipliers: batch_policy_multipliers, - self.adjusted_lengths: adjusted_lengths, - self.off_policy_targets: off_policy_targets, - self.off_policy_target_lengths: off_policy_target_lengths, - self.offp_switch: offp_switch}) - - combined_adjusted_lengths = adjusted_lengths - combined_returns = batch_returns - else: - # Train with REINFORCE + off-policy replay buffer by using importance - # sampling. - - # Sample new programs from the policy. - # Note: batch size is constant. A full batch will be sampled, but not all - # programs will be executed and added to the replay buffer. Those which - # are not executed will be discarded and not counted. - batch_actions, batch_values, episode_lengths, log_probs = session.run( - [self.sampled_batch.tokens, self.sampled_batch.value, - self.sampled_batch.episode_lengths, self.sampled_batch.log_probs]) - if episode_lengths.size == 0: - # This should not happen. - logging.warn( - 'Shapes:\n' - 'batch_actions.shape: %s\n' - 'batch_values.shape: %s\n' - 'episode_lengths.shape: %s\n', - batch_actions.shape, batch_values.shape, episode_lengths.shape) - - # Sample from experince replay buffer - empty_replay_buffer = ( - self.experience_replay.is_empty() - if self.experience_replay is not None else True) - num_programs_from_replay_buff = ( - self.num_replay_per_batch if not empty_replay_buffer else 0) - num_programs_from_policy = ( - rl_batch.batch_size - num_programs_from_replay_buff) - if (not empty_replay_buffer) and num_programs_from_replay_buff: - result = self.experience_replay.sample_many( - num_programs_from_replay_buff) - experience_samples, replay_weights = zip(*result) - (replay_actions, - replay_rewards, - _, # log probs - replay_adjusted_lengths) = zip(*experience_samples) - - replay_batch_actions = utils.stack_pad(replay_actions, pad_axes=0, - dtype=np.int32) - - # compute log probs for replay samples under current policy - all_replay_log_probs, = session.run( - [self.given_batch.log_probs], - {self.actions: replay_batch_actions, - self.adjusted_lengths: replay_adjusted_lengths}) - replay_log_probs = [ - np.choose(replay_actions[i], all_replay_log_probs[i, :l].T).sum() - for i, l in enumerate(replay_adjusted_lengths)] - else: - # Replay buffer is empty. Do not sample from it. - replay_actions = None - replay_policy_multipliers = None - replay_adjusted_lengths = None - replay_log_probs = None - replay_weights = None - replay_returns = None - on_policy_weights = [0] * num_programs_from_replay_buff - - assert not self.a2c # TODO(danabo): Support A2C with importance sampling. - - # Compute rewards. - code_scores = compute_rewards( - rl_batch, batch_actions, episode_lengths, - batch_size=num_programs_from_policy) - code_strings = code_scores.code_strings - batch_tot_r = code_scores.total_rewards - test_cases = code_scores.test_cases - code_outputs = code_scores.code_outputs - reasons = code_scores.reasons - - # Process on-policy samples. - p = num_programs_from_policy - batch_targets, batch_returns = process_episodes( - code_scores.batch_rewards, episode_lengths[:p], a2c=False, - baselines=self.ema_by_len) - batch_policy_multipliers = batch_targets - batch_emp_values = [[]] - on_policy_returns = batch_returns - - # Process off-policy samples. - if (not empty_replay_buffer) and num_programs_from_replay_buff: - offp_batch_rewards = [ - [0.0] * (l - 1) + [r] - for l, r in zip(replay_adjusted_lengths, replay_rewards)] - assert len(offp_batch_rewards) == num_programs_from_replay_buff - assert len(replay_adjusted_lengths) == num_programs_from_replay_buff - replay_batch_targets, replay_returns = process_episodes( - offp_batch_rewards, replay_adjusted_lengths, a2c=False, - baselines=self.ema_by_len) - # Convert 2D array back into ragged 2D list. - replay_policy_multipliers = [ - replay_batch_targets[i, :l] - for i, l - in enumerate( - replay_adjusted_lengths[:num_programs_from_replay_buff])] - - adjusted_lengths = episode_lengths[:num_programs_from_policy] - - if self.top_episodes: - assert len(self.top_episodes) > 0 # pylint: disable=g-explicit-length-test - off_policy_targets = [ - item for item, _ - in self.top_episodes.random_sample(self.topk_batch_size)] - off_policy_target_lengths = [len(t) for t in off_policy_targets] - off_policy_targets = utils.stack_pad(off_policy_targets, pad_axes=0, - dtype=np.int32) - offp_switch = 1 - else: - off_policy_targets = [[0]] - off_policy_target_lengths = [1] - offp_switch = 0 - - # On-policy episodes. - if num_programs_from_policy: - separate_actions = [ - batch_actions[i, :l] - for i, l in enumerate(adjusted_lengths)] - chosen_log_probs = [ - np.choose(separate_actions[i], log_probs[i, :l].T) - for i, l in enumerate(adjusted_lengths)] - new_experiences = [ - (separate_actions[i], - batch_tot_r[i], - chosen_log_probs[i].sum(), l) - for i, l in enumerate(adjusted_lengths)] - on_policy_policy_multipliers = [ - batch_policy_multipliers[i, :l] - for i, l in enumerate(adjusted_lengths)] - (on_policy_actions, - _, # rewards - on_policy_log_probs, - on_policy_adjusted_lengths) = zip(*new_experiences) - else: - new_experiences = [] - on_policy_policy_multipliers = [] - on_policy_actions = [] - on_policy_log_probs = [] - on_policy_adjusted_lengths = [] - - if (not empty_replay_buffer) and num_programs_from_replay_buff: - # Look for new experiences in replay buffer. Assign weight if an episode - # is in the buffer. - on_policy_weights = [0] * num_programs_from_policy - for i, cs in enumerate(code_strings): - if self.experience_replay.has_key(cs): - on_policy_weights[i] = self.experience_replay.get_weight(cs) - - # Randomly select on-policy or off policy episodes to train on. - combined_actions = join(replay_actions, on_policy_actions) - combined_policy_multipliers = join( - replay_policy_multipliers, on_policy_policy_multipliers) - combined_adjusted_lengths = join( - replay_adjusted_lengths, on_policy_adjusted_lengths) - combined_returns = join(replay_returns, on_policy_returns) - combined_actions = utils.stack_pad(combined_actions, pad_axes=0) - combined_policy_multipliers = utils.stack_pad(combined_policy_multipliers, - pad_axes=0) - # P - combined_on_policy_log_probs = join(replay_log_probs, on_policy_log_probs) - # Q - # Assume weight is zero for all sequences sampled from the policy. - combined_q_weights = join(replay_weights, on_policy_weights) - - # Importance adjustment. Naive formulation: - # E_{x~p}[f(x)] ~= 1/N sum_{x~p}(f(x)) ~= 1/N sum_{x~q}(f(x) * p(x)/q(x)). - # p(x) is the policy, and q(x) is the off-policy distribution, i.e. replay - # buffer distribution. Importance weight w(x) = p(x) / q(x). - - # Instead of sampling from the replay buffer only, we sample from a - # mixture distribution of the policy and replay buffer. - # We are sampling from the mixture a*q(x) + (1-a)*p(x), where 0 <= a <= 1. - # Thus the importance weight w(x) = p(x) / (a*q(x) + (1-a)*p(x)) - # = 1 / ((1-a) + a*q(x)/p(x)) where q(x) is 0 for x sampled from the - # policy. - # Note: a = self.replay_alpha - if empty_replay_buffer: - # The replay buffer is empty. - # Do no gradient update this step. The replay buffer will have stuff in - # it next time. - combined_policy_multipliers *= 0 - elif not num_programs_from_replay_buff: - combined_policy_multipliers = np.ones([len(combined_actions), 1], - dtype=np.float32) - else: - # If a < 1 compute importance weights - # importance weight - # = 1 / [(1 - a) + a * exp(log(replay_weight / total_weight / p))] - # = 1 / ((1-a) + a*q/p) - importance_weights = self._compute_iw(combined_on_policy_log_probs, - combined_q_weights) - if self.config.iw_normalize: - importance_weights *= ( - float(rl_batch.batch_size) / importance_weights.sum()) - combined_policy_multipliers *= importance_weights.reshape(-1, 1) - - # Train on replay batch, top-k MLE. - assert self.program_count is not None - fetches = { - 'global_step': global_step_op, - 'program_count': self.program_count, - 'summaries': self.rl_summary_op, - 'train_op': train_op, - 'gradients': self.gradients_dict if return_gradients else self.no_op} - fetched = session.run( - fetches, - {self.actions: combined_actions, - self.empirical_values: [[]], # replay_emp_values, - self.policy_multipliers: combined_policy_multipliers, - self.adjusted_lengths: combined_adjusted_lengths, - self.off_policy_targets: off_policy_targets, - self.off_policy_target_lengths: off_policy_target_lengths, - self.offp_switch: offp_switch}) - - # Add to experience replay buffer. - self.experience_replay.add_many( - objs=new_experiences, - weights=[exp(r / self.replay_temperature) for r in batch_tot_r], - keys=code_strings) - - # Update program count. - session.run( - [self.program_count_add_op], - {self.program_count_add_ph: num_programs_from_policy}) - - # Update EMA baselines on the mini-batch which we just did traning on. - if not self.a2c: - for i in xrange(rl_batch.batch_size): - episode_length = combined_adjusted_lengths[i] - empirical_returns = combined_returns[i, :episode_length] - for j in xrange(episode_length): - # Update ema_baselines in place. - self.ema_by_len[j] = ( - self.ema_baseline_decay * self.ema_by_len[j] - + (1 - self.ema_baseline_decay) * empirical_returns[j]) - - global_step = fetched['global_step'] - global_npe = fetched['program_count'] - core_summaries = fetched['summaries'] - summaries_list = [core_summaries] - - if num_programs_from_policy: - s_i = 0 - text_summary = self._rl_text_summary( - session, - global_step, - global_npe, - batch_tot_r[s_i], - episode_lengths[s_i], test_cases[s_i], - code_outputs[s_i], code_strings[s_i], reasons[s_i]) - reward_summary = self._rl_reward_summary(batch_tot_r) - - is_best = False - if self.global_best_reward_fn: - # Save best reward. - best_reward = np.max(batch_tot_r) - is_best = self.global_best_reward_fn(session, best_reward) - - if self.found_solution_op is not None and 'correct' in reasons: - session.run(self.found_solution_op) - - # Save program to disk for record keeping. - if self.stop_on_success: - solutions = [ - {'code': code_strings[i], 'reward': batch_tot_r[i], - 'npe': global_npe} - for i in xrange(len(reasons)) if reasons[i] == 'correct'] - elif is_best: - solutions = [ - {'code': code_strings[np.argmax(batch_tot_r)], - 'reward': np.max(batch_tot_r), - 'npe': global_npe}] - else: - solutions = [] - if solutions: - if self.assign_code_solution_fn: - self.assign_code_solution_fn(session, solutions[0]['code']) - with tf.gfile.FastGFile(self.logging_file, 'a') as writer: - for solution_dict in solutions: - writer.write(str(solution_dict) + '\n') - - max_i = np.argmax(batch_tot_r) - max_tot_r = batch_tot_r[max_i] - if max_tot_r >= self.top_reward: - if max_tot_r >= self.top_reward: - self.top_reward = max_tot_r - logging.info('Top code: r=%.2f, \t%s', max_tot_r, code_strings[max_i]) - if self.top_episodes is not None: - self.top_episodes.push( - max_tot_r, tuple(batch_actions[max_i, :episode_lengths[max_i]])) - - summaries_list += [text_summary, reward_summary] - - if self.do_iw_summaries and not empty_replay_buffer: - # prob of replay samples under replay buffer sampling. - norm_replay_weights = [ - w / self.experience_replay.total_weight - for w in replay_weights] - replay_iw = self._compute_iw(replay_log_probs, replay_weights) - on_policy_iw = self._compute_iw(on_policy_log_probs, on_policy_weights) - summaries_list.append( - self._iw_summary( - session, replay_iw, replay_log_probs, norm_replay_weights, - on_policy_iw, on_policy_log_probs)) - - return UpdateStepResult( - global_step=global_step, - global_npe=global_npe, - summaries_list=summaries_list, - gradients_dict=fetched['gradients']) - - -def io_to_text(io_case, io_type): - if isinstance(io_case, misc.IOTuple): - # If there are many strings, join them with ','. - return ','.join([io_to_text(e, io_type) for e in io_case]) - if io_type == misc.IOType.string: - # There is one string. Return it. - return misc.tokens_to_text(io_case) - if (io_type == misc.IOType.integer - or io_type == misc.IOType.boolean): - if len(io_case) == 1: - return str(io_case[0]) - return str(io_case) - - -CodeScoreInfo = namedtuple( - 'CodeScoreInfo', - ['code_strings', 'batch_rewards', 'total_rewards', 'test_cases', - 'code_outputs', 'reasons']) - - -def compute_rewards(rl_batch, batch_actions, episode_lengths, batch_size=None): - """Compute rewards for each episode in the batch. - - Args: - rl_batch: A data.RLBatch instance. This holds information about the task - each episode is solving, and a reward function for each episode. - batch_actions: Contains batch of episodes. Each sequence of actions will be - converted into a BF program and then scored. A numpy array of shape - [batch_size, max_sequence_length]. - episode_lengths: The sequence length of each episode in the batch. Iterable - of length batch_size. - batch_size: (optional) number of programs to score. Use this to limit the - number of programs executed from this batch. For example, when doing - importance sampling some of the on-policy episodes will be discarded - and they should not be executed. `batch_size` can be less than or equal - to the size of the input batch. - - Returns: - CodeScoreInfo namedtuple instance. This holds not just the computed rewards, - but additional information computed during code execution which can be used - for debugging and monitoring. this includes: BF code strings, test cases - the code was executed on, code outputs from those test cases, and reasons - for success or failure. - """ - code_strings = [ - ''.join([misc.bf_int2char(a) for a in action_sequence[:l]]) - for action_sequence, l in zip(batch_actions, episode_lengths)] - if batch_size is None: - batch_size = len(code_strings) - else: - assert batch_size <= len(code_strings) - code_strings = code_strings[:batch_size] - - if isinstance(rl_batch.reward_fns, (list, tuple)): - # reward_fns is a list of functions, same length as code_strings. - assert len(rl_batch.reward_fns) >= batch_size - r_fn_results = [ - rl_batch.reward_fns[i](code_strings[i]) for i in xrange(batch_size)] - else: - # reward_fns is allowed to be one function which processes a batch of code - # strings. This is useful for efficiency and batch level computation. - r_fn_results = rl_batch.reward_fns(code_strings) - - # Expecting that r_fn returns a list of rewards. Length of list equals - # length of the code string (including EOS char). - - batch_rewards = [r.episode_rewards for r in r_fn_results] - total_rewards = [sum(b) for b in batch_rewards] - test_cases = [io_to_text(r.input_case, r.input_type) for r in r_fn_results] - code_outputs = [io_to_text(r.code_output, r.output_type) - for r in r_fn_results] - reasons = [r.reason for r in r_fn_results] - return CodeScoreInfo( - code_strings=code_strings, - batch_rewards=batch_rewards, - total_rewards=total_rewards, - test_cases=test_cases, - code_outputs=code_outputs, - reasons=reasons) - - -def process_episodes( - batch_rewards, episode_lengths, a2c=False, baselines=None, - batch_values=None): - """Compute REINFORCE targets. - - REINFORCE here takes the form: - grad_t = grad[log(pi(a_t|c_t))*target_t] - where c_t is context: i.e. RNN state or environment state (or both). - - Two types of targets are supported: - 1) Advantage actor critic (a2c). - 2) Vanilla REINFORCE with baseline. - - Args: - batch_rewards: Rewards received in each episode in the batch. A numpy array - of shape [batch_size, max_sequence_length]. Note, these are per-timestep - rewards, not total reward. - episode_lengths: Length of each episode. An iterable of length batch_size. - a2c: A bool. Whether to compute a2c targets (True) or vanilla targets - (False). - baselines: If a2c is False, provide baselines for each timestep. This is a - list (or indexable container) of length max_time. Note: baselines are - shared across all episodes, which is why there is no batch dimension. - It is up to the caller to update baselines accordingly. - batch_values: If a2c is True, provide values computed by a value estimator. - A numpy array of shape [batch_size, max_sequence_length]. - - Returns: - batch_targets: REINFORCE targets for each episode and timestep. A numpy - array of shape [batch_size, max_sequence_length]. - batch_returns: Returns computed for each episode and timestep. This is for - reference, and is not used in the REINFORCE gradient update (but was - used to compute the targets). A numpy array of shape - [batch_size, max_sequence_length]. - """ - num_programs = len(batch_rewards) - assert num_programs <= len(episode_lengths) - batch_returns = [None] * num_programs - batch_targets = [None] * num_programs - for i in xrange(num_programs): - episode_length = episode_lengths[i] - assert len(batch_rewards[i]) == episode_length - # Compute target for each timestep. - # If we are computing A2C: - # target_t = advantage_t = R_t - V(c_t) - # where V(c_t) is a learned value function (provided as `values`). - # Otherwise: - # target_t = R_t - baselines[t] - # where `baselines` are provided. - # In practice we use a more generalized formulation of advantage. See docs - # for `discounted_advantage_and_rewards`. - if a2c: - # Compute advantage. - assert batch_values is not None - episode_values = batch_values[i, :episode_length] - episode_rewards = batch_rewards[i] - emp_val, gen_adv = rollout_lib.discounted_advantage_and_rewards( - episode_rewards, episode_values, gamma=1.0, lambda_=1.0) - batch_returns[i] = emp_val - batch_targets[i] = gen_adv - else: - # Compute return for each timestep. See section 3 of - # https://arxiv.org/pdf/1602.01783.pdf - assert baselines is not None - empirical_returns = rollout_lib.discount(batch_rewards[i], gamma=1.0) - targets = [None] * episode_length - for j in xrange(episode_length): - targets[j] = empirical_returns[j] - baselines[j] - batch_returns[i] = empirical_returns - batch_targets[i] = targets - batch_returns = utils.stack_pad(batch_returns, 0) - if num_programs: - batch_targets = utils.stack_pad(batch_targets, 0) - else: - batch_targets = np.array([], dtype=np.float32) - - return (batch_targets, batch_returns) diff --git a/research/brain_coder/single_task/pg_agent_test.py b/research/brain_coder/single_task/pg_agent_test.py deleted file mode 100644 index 503d37eca..000000000 --- a/research/brain_coder/single_task/pg_agent_test.py +++ /dev/null @@ -1,395 +0,0 @@ -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -"""Tests for pg_agent.""" - -from collections import Counter - -from absl import logging -import numpy as np -from six.moves import xrange -import tensorflow as tf - -from common import utils # brain coder -from single_task import data # brain coder -from single_task import defaults # brain coder -from single_task import misc # brain coder -from single_task import pg_agent as agent_lib # brain coder -from single_task import pg_train # brain coder - - -# Symmetric mean absolute percentage error (SMAPE). -# https://en.wikipedia.org/wiki/Symmetric_mean_absolute_percentage_error -def smape(a, b): - return 2.0 * abs(a - b) / float(a + b) - - -def onehot(dim, num_dims): - value = np.zeros(num_dims, dtype=np.float32) - value[dim] = 1 - return value - - -def random_sequence(max_length, num_tokens, eos=0): - length = np.random.randint(1, max_length - 1) - return np.append(np.random.randint(1, num_tokens, length), eos) - - -def repeat_and_pad(v, rep, total_len): - return [v] * rep + [0.0] * (total_len - rep) - - -class AgentTest(tf.test.TestCase): - - def testProcessEpisodes(self): - batch_size = 3 - - def reward_fn(code_string): - return misc.RewardInfo( - episode_rewards=[float(ord(c)) for c in code_string], - input_case=[], - correct_output=[], - code_output=[], - input_type=misc.IOType.integer, - output_type=misc.IOType.integer, - reason='none') - - rl_batch = data.RLBatch( - reward_fns=[reward_fn for _ in range(batch_size)], - batch_size=batch_size, - good_reward=10.0) - batch_actions = np.asarray([ - [4, 5, 3, 6, 8, 1, 0, 0], - [1, 2, 3, 4, 0, 0, 0, 0], - [8, 7, 6, 5, 4, 3, 2, 1]], dtype=np.int32) - batch_values = np.asarray([ - [0, 1, 2, 1, 0, 1, 1, 0], - [0, 2, 1, 2, 1, 0, 0, 0], - [0, 1, 1, 0, 0, 0, 1, 1]], dtype=np.float32) - episode_lengths = np.asarray([7, 5, 8], dtype=np.int32) - - scores = agent_lib.compute_rewards( - rl_batch, batch_actions, episode_lengths) - batch_targets, batch_returns = agent_lib.process_episodes( - scores.batch_rewards, episode_lengths, a2c=True, - batch_values=batch_values) - self.assertEqual( - [[473.0, 428.0, 337.0, 294.0, 201.0, 157.0, 95.0, 0.0], - [305.0, 243.0, 183.0, 140.0, 95.0, 0.0, 0.0, 0.0], - [484.0, 440.0, 394.0, 301.0, 210.0, 165.0, 122.0, 62.0]], - batch_returns.tolist()) - self.assertEqual( - [[473.0, 427.0, 335.0, 293.0, 201.0, 156.0, 94.0, 0.0], - [305.0, 241.0, 182.0, 138.0, 94.0, 0.0, 0.0, 0.0], - [484.0, 439.0, 393.0, 301.0, 210.0, 165.0, 121.0, 61.0]], - batch_targets.tolist()) - - def testVarUpdates(self): - """Tests that variables get updated as expected. - - For the RL update, check that gradients are non-zero and that the global - model gets updated. - """ - config = defaults.default_config_with_updates( - 'env=c(task="reverse"),' - 'agent=c(algorithm="pg",eos_token=True,optimizer="sgd",lr=1.0)') - lr = config.agent.lr - - tf.reset_default_graph() - trainer = pg_train.AsyncTrainer( - config, task_id=0, ps_tasks=0, num_workers=1) - global_init_op = tf.variables_initializer( - tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, 'global')) - with tf.Session() as sess: - sess.run(global_init_op) # Initialize global copy. - trainer.initialize(sess) - model = trainer.model - global_vars = sess.run(trainer.global_model.trainable_variables) - local_vars = sess.run(model.trainable_variables) - - # Make sure names match. - g_prefix = 'global/' - l_prefix = 'local/' - for g, l in zip(trainer.global_model.trainable_variables, - model.trainable_variables): - self.assertEqual(g.name[len(g_prefix):], l.name[len(l_prefix):]) - - # Assert that shapes and values are the same between global and local - # models. - for g, l in zip(global_vars, local_vars): - self.assertEqual(g.shape, l.shape) - self.assertTrue(np.array_equal(g, l)) - - # Make all gradients dense tensors. - for param, grad in model.gradients_dict.items(): - if isinstance(grad, tf.IndexedSlices): - # Converts to dense tensor. - model.gradients_dict[param] = tf.multiply(grad, 1.0) - - # Perform update. - results = model.update_step( - sess, trainer.data_manager.sample_rl_batch(), trainer.train_op, - trainer.global_step, return_gradients=True) - grads_dict = results.gradients_dict - for grad in grads_dict.values(): - self.assertIsNotNone(grad) - self.assertTrue(np.count_nonzero(grad) > 0) - global_update = sess.run(trainer.global_model.trainable_variables) - for tf_var, var_before, var_after in zip( - model.trainable_variables, local_vars, global_update): - # Check that the params were updated. - self.assertTrue(np.allclose( - var_after, - var_before - grads_dict[tf_var] * lr)) - - # Test that global to local sync works. - sess.run(trainer.sync_op) - global_vars = sess.run(trainer.global_model.trainable_variables) - local_vars = sess.run(model.trainable_variables) - for l, g in zip(local_vars, global_vars): - self.assertTrue(np.allclose(l, g)) - - def testMonteCarloGradients(self): - """Test Monte Carlo estimate of REINFORCE gradient. - - Test that the Monte Carlo estimate of the REINFORCE gradient is - approximately equal to the true gradient. We compute the true gradient for a - toy environment with a very small action space. - - Similar to section 5 of https://arxiv.org/pdf/1505.00521.pdf. - """ - # Test may have different outcome on different machines due to different - # rounding behavior of float arithmetic. - tf.reset_default_graph() - tf.set_random_seed(12345678987654321) - np.random.seed(1294024302) - max_length = 2 - num_tokens = misc.bf_num_tokens() - eos = misc.BF_EOS_INT - assert eos == 0 - def sequence_iterator(max_length): - """Iterates through all sequences up to the given length.""" - yield [eos] - for a in xrange(1, num_tokens): - if max_length > 1: - for sub_seq in sequence_iterator(max_length - 1): - yield [a] + sub_seq - else: - yield [a] - actions = list(sequence_iterator(max_length)) - - # This batch contains all possible episodes up to max_length. - actions_batch = utils.stack_pad(actions, 0) - lengths_batch = [len(s) for s in actions] - - reward_map = {tuple(a): np.random.randint(-1, 7) for a in actions_batch} - # reward_map = {tuple(a): np.random.normal(3, 1) - # for a in actions_batch} # normal distribution - # reward_map = {tuple(a): 1.0 - # for a in actions_batch} # expected reward is 1 - - n = 100000 # MC sample size. - config = defaults.default_config_with_updates( - 'env=c(task="print"),' - 'agent=c(algorithm="pg",optimizer="sgd",lr=1.0,ema_baseline_decay=0.99,' - 'entropy_beta=0.0,topk_loss_hparam=0.0,regularizer=0.0,' - 'policy_lstm_sizes=[10],eos_token=True),' - 'batch_size='+str(n)+',timestep_limit='+str(max_length)) - - dtype = tf.float64 - trainer = pg_train.AsyncTrainer( - config, task_id=0, ps_tasks=0, num_workers=1, dtype=dtype) - model = trainer.model - actions_ph = model.actions - lengths_ph = model.adjusted_lengths - multipliers_ph = model.policy_multipliers - - global_init_op = tf.variables_initializer( - tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, 'global')) - with tf.Session() as sess, sess.graph.as_default(): - sess.run(global_init_op) # Initialize global copy. - trainer.initialize(sess) - - # Compute exact gradients. - # exact_grads = sum(P(a) * grad(log P(a)) * R(a) for a in actions_batch) - true_loss_unnormalized = 0.0 - exact_grads = [np.zeros(v.shape) for v in model.trainable_variables] - episode_probs_map = {} - grads_map = {} - for a_idx in xrange(len(actions_batch)): - a = actions_batch[a_idx] - grads_result, probs_result, loss = sess.run( - [model.dense_unclipped_grads, model.chosen_probs, model.loss], - {actions_ph: [a], - lengths_ph: [lengths_batch[a_idx]], - multipliers_ph: [ - repeat_and_pad(reward_map[tuple(a)], - lengths_batch[a_idx], - max_length)]}) - # Take product over time axis. - episode_probs_result = np.prod(probs_result[0, :lengths_batch[a_idx]]) - for i in range(0, len(exact_grads)): - exact_grads[i] += grads_result[i] * episode_probs_result - episode_probs_map[tuple(a)] = episode_probs_result - reward_map[tuple(a)] = reward_map[tuple(a)] - grads_map[tuple(a)] = grads_result - true_loss_unnormalized += loss - # Normalize loss. Since each episode is feed into the model one at a time, - # normalization needs to be done manually. - true_loss = true_loss_unnormalized / float(len(actions_batch)) - - # Compute Monte Carlo gradients. - # E_a~P[grad(log P(a)) R(a)] is aprox. eq. to - # sum(grad(log P(a)) R(a) for a in actions_sampled_from_P) / n - # where len(actions_sampled_from_P) == n. - # - # In other words, sample from the policy and compute the gradients of the - # log probs weighted by the returns. This will excersize the code in - # agent.py - sampled_actions, sampled_lengths = sess.run( - [model.sampled_tokens, model.episode_lengths]) - pi_multipliers = [ - repeat_and_pad(reward_map[tuple(a)], l, max_length) - for a, l in zip(sampled_actions, sampled_lengths)] - mc_grads_unnormalized, sampled_probs, mc_loss_unnormalized = sess.run( - [model.dense_unclipped_grads, model.chosen_probs, model.loss], - {actions_ph: sampled_actions, - multipliers_ph: pi_multipliers, - lengths_ph: sampled_lengths}) - # Loss is already normalized across the minibatch, so no normalization - # is needed. - mc_grads = mc_grads_unnormalized - mc_loss = mc_loss_unnormalized - - # Make sure true loss and MC loss are similar. - loss_error = smape(true_loss, mc_loss) - self.assertTrue(loss_error < 0.15, msg='actual: %s' % loss_error) - - # Check that probs computed for episodes sampled from the model are the same - # as the recorded true probs. - for i in range(100): - acs = tuple(sampled_actions[i].tolist()) - sampled_prob = np.prod(sampled_probs[i, :sampled_lengths[i]]) - self.assertTrue(np.isclose(episode_probs_map[acs], sampled_prob)) - - # Make sure MC estimates of true probs are close. - counter = Counter(tuple(e) for e in sampled_actions) - for acs, count in counter.iteritems(): - mc_prob = count / float(len(sampled_actions)) - true_prob = episode_probs_map[acs] - error = smape(mc_prob, true_prob) - self.assertTrue( - error < 0.15, - msg='actual: %s; count: %s; mc_prob: %s; true_prob: %s' - % (error, count, mc_prob, true_prob)) - - # Manually recompute MC gradients and make sure they match MC gradients - # computed in TF. - mc_grads_recompute = [np.zeros(v.shape) for v in model.trainable_variables] - for i in range(n): - acs = tuple(sampled_actions[i].tolist()) - for i in range(0, len(mc_grads_recompute)): - mc_grads_recompute[i] += grads_map[acs][i] - for i in range(0, len(mc_grads_recompute)): - self.assertTrue(np.allclose(mc_grads[i], mc_grads_recompute[i] / n)) - - # Check angle between gradients as fraction of pi. - for index in range(len(mc_grads)): - v1 = mc_grads[index].reshape(-1) - v2 = exact_grads[index].reshape(-1) - # angle = arccos(v1 . v2 / (|v1|*|v2|)) - angle_rad = np.arccos( - np.dot(v1, v2) / (np.linalg.norm(v1) * np.linalg.norm(v2))) - logging.info('angle / pi: %s', angle_rad / np.pi) - angle_frac = angle_rad / np.pi - self.assertTrue(angle_frac < 0.02, msg='actual: %s' % angle_frac) - # Check norms. - for index in range(len(mc_grads)): - v1_norm = np.linalg.norm(mc_grads[index].reshape(-1)) - v2_norm = np.linalg.norm(exact_grads[index].reshape(-1)) - error = smape(v1_norm, v2_norm) - self.assertTrue(error < 0.02, msg='actual: %s' % error) - - # Check expected rewards. - # E_a~P[R(a)] approx eq sum(P(a) * R(a) for a in actions) - mc_expected_reward = np.mean( - [reward_map[tuple(a)] for a in sampled_actions]) - exact_expected_reward = np.sum( - [episode_probs_map[k] * reward_map[k] for k in reward_map]) - error = smape(mc_expected_reward, exact_expected_reward) - self.assertTrue(error < 0.005, msg='actual: %s' % angle_frac) - - def testNumericalGradChecking(self): - # Similar to - # http://ufldl.stanford.edu/wiki/index.php/Gradient_checking_and_advanced_optimization. - epsilon = 1e-4 - eos = misc.BF_EOS_INT - self.assertEqual(0, eos) - config = defaults.default_config_with_updates( - 'env=c(task="print"),' - 'agent=c(algorithm="pg",optimizer="sgd",lr=1.0,ema_baseline_decay=0.99,' - 'entropy_beta=0.0,topk_loss_hparam=0.0,policy_lstm_sizes=[10],' - 'eos_token=True),' - 'batch_size=64') - dtype = tf.float64 - tf.reset_default_graph() - tf.set_random_seed(12345678987654321) - np.random.seed(1294024302) - trainer = pg_train.AsyncTrainer( - config, task_id=0, ps_tasks=0, num_workers=1, dtype=dtype) - model = trainer.model - actions_ph = model.actions - lengths_ph = model.adjusted_lengths - multipliers_ph = model.policy_multipliers - loss = model.pi_loss - global_init_op = tf.variables_initializer( - tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, 'global')) - - assign_add_placeholders = [None] * len(model.trainable_variables) - assign_add_ops = [None] * len(model.trainable_variables) - param_shapes = [None] * len(model.trainable_variables) - for i, param in enumerate(model.trainable_variables): - param_shapes[i] = param.get_shape().as_list() - assign_add_placeholders[i] = tf.placeholder(dtype, - np.prod(param_shapes[i])) - assign_add_ops[i] = param.assign_add( - tf.reshape(assign_add_placeholders[i], param_shapes[i])) - - with tf.Session() as sess: - sess.run(global_init_op) # Initialize global copy. - trainer.initialize(sess) - - actions_raw = [random_sequence(10, 9) for _ in xrange(16)] - actions_batch = utils.stack_pad(actions_raw, 0) - lengths_batch = [len(l) for l in actions_raw] - feed = {actions_ph: actions_batch, - multipliers_ph: np.ones_like(actions_batch), - lengths_ph: lengths_batch} - - estimated_grads = [None] * len(model.trainable_variables) - for i, param in enumerate(model.trainable_variables): - param_size = np.prod(param_shapes[i]) - estimated_grads[i] = np.zeros(param_size, dtype=np.float64) - for index in xrange(param_size): - e = onehot(index, param_size) * epsilon - sess.run(assign_add_ops[i], - {assign_add_placeholders[i]: e}) - j_plus = sess.run(loss, feed) - sess.run(assign_add_ops[i], - {assign_add_placeholders[i]: -2 * e}) - j_minus = sess.run(loss, feed) - sess.run(assign_add_ops[i], - {assign_add_placeholders[i]: e}) - estimated_grads[i][index] = (j_plus - j_minus) / (2 * epsilon) - estimated_grads[i] = estimated_grads[i].reshape(param_shapes[i]) - - analytic_grads = sess.run(model.dense_unclipped_grads, feed) - - for g1, g2 in zip(estimated_grads[1:], analytic_grads[1:]): - logging.info('norm (g1-g2): %s', np.abs(g1 - g2).mean()) - self.assertTrue(np.allclose(g1, g2)) - - -if __name__ == '__main__': - tf.test.main() diff --git a/research/brain_coder/single_task/pg_train.py b/research/brain_coder/single_task/pg_train.py deleted file mode 100644 index fde7cc847..000000000 --- a/research/brain_coder/single_task/pg_train.py +++ /dev/null @@ -1,782 +0,0 @@ -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -r"""Train RL agent on coding tasks.""" - -import contextlib -import cPickle -import cProfile -import marshal -import os -import time - -from absl import flags -from absl import logging -import tensorflow as tf - -# internal session lib import - -from single_task import data # brain coder -from single_task import defaults # brain coder -from single_task import pg_agent as agent_lib # brain coder -from single_task import results_lib # brain coder - - -FLAGS = flags.FLAGS -flags.DEFINE_string( - 'master', '', - 'URL of the TensorFlow master to use.') -flags.DEFINE_integer( - 'ps_tasks', 0, - 'Number of parameter server tasks. Only set to 0 for ' - 'single worker training.') -flags.DEFINE_integer( - 'summary_interval', 10, - 'How often to write summaries.') -flags.DEFINE_integer( - 'summary_tasks', 16, - 'If greater than 0 only tasks 0 through summary_tasks - 1 ' - 'will write summaries. If 0, all tasks will write ' - 'summaries.') -flags.DEFINE_bool( - 'stop_on_success', True, - 'If True, training will stop as soon as a solution is found. ' - 'If False, training will continue indefinitely until another ' - 'stopping condition is reached.') -flags.DEFINE_bool( - 'do_profiling', False, - 'If True, cProfile profiler will run and results will be ' - 'written to logdir. WARNING: Results will not be written if ' - 'the code crashes. Make sure it exists successfully.') -flags.DEFINE_integer('model_v', 0, 'Model verbosity level.') -flags.DEFINE_bool( - 'delayed_graph_cleanup', True, - 'If true, container for n-th run will not be reset until the (n+1)-th run ' - 'is complete. This greatly reduces the chance that a worker is still ' - 'using the n-th container when it is cleared.') - - -def define_tuner_hparam_space(hparam_space_type): - """Define tunable hparams for grid search.""" - if hparam_space_type not in ('pg', 'pg-topk', 'topk', 'is'): - raise ValueError('Hparam space is not valid: "%s"' % hparam_space_type) - - # Discrete hparam space is stored as a dict from hparam name to discrete - # values. - hparam_space = {} - - if hparam_space_type in ('pg', 'pg-topk', 'is'): - # Add a floating point parameter named learning rate. - hparam_space['lr'] = [1e-5, 1e-4, 1e-3] - hparam_space['entropy_beta'] = [0.005, 0.01, 0.05, 0.10] - else: # 'topk' - # Add a floating point parameter named learning rate. - hparam_space['lr'] = [1e-5, 1e-4, 1e-3] - hparam_space['entropy_beta'] = [0.0, 0.005, 0.01, 0.05, 0.10] - - if hparam_space_type in ('topk', 'pg-topk'): - # topk tuning will be enabled. - hparam_space['topk'] = [10] - hparam_space['topk_loss_hparam'] = [1.0, 10.0, 50.0, 200.0] - - elif hparam_space_type == 'is': - # importance sampling tuning will be enabled. - hparam_space['replay_temperature'] = [0.25, 0.5, 1.0, 2.0] - hparam_space['alpha'] = [0.5, 0.75, 63/64.] - - return hparam_space - - -def write_hparams_to_config(config, hparams, hparam_space_type): - """Write hparams given by the tuner into the Config object.""" - if hparam_space_type not in ('pg', 'pg-topk', 'topk', 'is'): - raise ValueError('Hparam space is not valid: "%s"' % hparam_space_type) - - config.agent.lr = hparams.lr - config.agent.entropy_beta = hparams.entropy_beta - - if hparam_space_type in ('topk', 'pg-topk'): - # topk tuning will be enabled. - config.agent.topk = hparams.topk - config.agent.topk_loss_hparam = hparams.topk_loss_hparam - elif hparam_space_type == 'is': - # importance sampling tuning will be enabled. - config.agent.replay_temperature = hparams.replay_temperature - config.agent.alpha = hparams.alpha - - -def make_initialized_variable(value, name, shape=None, dtype=tf.float32): - """Create a tf.Variable with a constant initializer. - - Args: - value: Constant value to initialize the variable with. This is the value - that the variable starts with. - name: Name of the variable in the TF graph. - shape: Shape of the variable. If None, variable will be a scalar. - dtype: Data type of the variable. Should be a TF dtype. Defaults to - tf.float32. - - Returns: - tf.Variable instance. - """ - if shape is None: - shape = [] - return tf.get_variable( - name=name, shape=shape, initializer=tf.constant_initializer(value), - dtype=dtype, trainable=False) - - -class AsyncTrainer(object): - """Manages graph creation and training. - - This async trainer creates a global model on the parameter server, and a local - model (for this worker). Gradient updates are sent to the global model, and - the updated weights are synced to the local copy. - """ - - def __init__(self, config, task_id, ps_tasks, num_workers, is_chief=True, - summary_writer=None, - dtype=tf.float32, - summary_interval=1, - run_number=0, - logging_dir='/tmp', model_v=0): - self.config = config - self.data_manager = data.DataManager( - config, run_number=run_number, - do_code_simplification=not FLAGS.stop_on_success) - self.task_id = task_id - self.ps_tasks = ps_tasks - self.is_chief = is_chief - if ps_tasks == 0: - assert task_id == 0, 'No parameter servers specified. Expecting 1 task.' - assert num_workers == 1, ( - 'No parameter servers specified. Expecting 1 task.') - worker_device = '/job:localhost/replica:%d/task:0/cpu:0' % task_id - # worker_device = '/cpu:0' - # ps_device = '/cpu:0' - else: - assert num_workers > 0, 'There must be at least 1 training worker.' - worker_device = '/job:worker/replica:%d/task:0/cpu:0' % task_id - # ps_device = '/job:ps/replica:0/task:0/cpu:0' - logging.info('worker_device: %s', worker_device) - - logging_file = os.path.join( - logging_dir, 'solutions_%d.txt' % task_id) - experience_replay_file = os.path.join( - logging_dir, 'replay_buffer_%d.pickle' % task_id) - self.topk_file = os.path.join( - logging_dir, 'topk_buffer_%d.pickle' % task_id) - - tf.get_variable_scope().set_use_resource(True) - - # global model - with tf.device(tf.train.replica_device_setter(ps_tasks, - ps_device='/job:ps/replica:0', - worker_device=worker_device)): - with tf.variable_scope('global'): - global_model = agent_lib.LMAgent(config, dtype=dtype, is_local=False) - global_params_dict = {p.name: p - for p in global_model.sync_variables} - self.global_model = global_model - self.global_step = make_initialized_variable( - 0, 'global_step', dtype=tf.int64) - - self.global_best_reward = make_initialized_variable( - -10.0, 'global_best_reward', dtype=tf.float64) - self.is_best_model = make_initialized_variable( - False, 'is_best_model', dtype=tf.bool) - self.reset_is_best_model = self.is_best_model.assign(False) - self.global_best_reward_placeholder = tf.placeholder( - tf.float64, [], name='global_best_reward_placeholder') - self.assign_global_best_reward_op = tf.group( - self.global_best_reward.assign( - self.global_best_reward_placeholder), - self.is_best_model.assign(True)) - def assign_global_best_reward_fn(session, reward): - reward = round(reward, 10) - best_reward = round(session.run(self.global_best_reward), 10) - is_best = reward > best_reward - if is_best: - session.run(self.assign_global_best_reward_op, - {self.global_best_reward_placeholder: reward}) - return is_best - self.assign_global_best_reward_fn = assign_global_best_reward_fn - - # Any worker will set to true when it finds a solution. - self.found_solution_flag = make_initialized_variable( - False, 'found_solution_flag', dtype=tf.bool) - self.found_solution_op = self.found_solution_flag.assign(True) - - self.run_number = make_initialized_variable( - run_number, 'run_number', dtype=tf.int32) - - # Store a solution when found. - self.code_solution_variable = tf.get_variable( - 'code_solution', [], tf.string, - initializer=tf.constant_initializer('')) - self.code_solution_ph = tf.placeholder( - tf.string, [], name='code_solution_ph') - self.code_solution_assign_op = self.code_solution_variable.assign( - self.code_solution_ph) - def assign_code_solution_fn(session, code_solution_string): - session.run(self.code_solution_assign_op, - {self.code_solution_ph: code_solution_string}) - self.assign_code_solution_fn = assign_code_solution_fn - - # Count all programs sampled from policy. This does not include - # programs sampled from replay buffer. - # This equals NPE (number of programs executed). Only programs sampled - # from the policy need to be executed. - self.program_count = make_initialized_variable( - 0, 'program_count', dtype=tf.int64) - - # local model - with tf.device(worker_device): - with tf.variable_scope('local'): - self.model = model = agent_lib.LMAgent( - config, - task_id=task_id, - logging_file=logging_file, - experience_replay_file=experience_replay_file, - dtype=dtype, - global_best_reward_fn=self.assign_global_best_reward_fn, - found_solution_op=self.found_solution_op, - assign_code_solution_fn=self.assign_code_solution_fn, - program_count=self.program_count, - stop_on_success=FLAGS.stop_on_success, - verbose_level=model_v) - local_params = model.trainable_variables - local_params_dict = {p.name: p for p in local_params} - - # Pull global params to local model. - def _global_to_local_scope(name): - assert name.startswith('global/') - return 'local' + name[6:] - sync_dict = { - local_params_dict[_global_to_local_scope(p_name)]: p - for p_name, p in global_params_dict.items()} - self.sync_op = tf.group(*[v_local.assign(v_global) - for v_local, v_global - in sync_dict.items()]) - - # Pair local gradients with global params. - grad_var_dict = { - gradient: sync_dict[local_var] - for local_var, gradient in model.gradients_dict.items()} - - # local model - model.make_summary_ops() # Don't put summaries under 'local' scope. - with tf.variable_scope('local'): - self.train_op = model.optimizer.apply_gradients( - grad_var_dict.items(), global_step=self.global_step) - self.local_init_op = tf.variables_initializer( - tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, - tf.get_variable_scope().name)) - - self.local_step = 0 - self.last_summary_time = time.time() - self.summary_interval = summary_interval - self.summary_writer = summary_writer - self.cached_global_step = -1 - self.cached_global_npe = -1 - - logging.info('summary_interval: %d', self.summary_interval) - - # Load top-k buffer. - if self.model.top_episodes is not None and tf.gfile.Exists(self.topk_file): - try: - with tf.gfile.FastGFile(self.topk_file, 'r') as f: - self.model.top_episodes = cPickle.loads(f.read()) - logging.info( - 'Loaded top-k buffer from disk with %d items. Location: "%s"', - len(self.model.top_episodes), self.topk_file) - except (cPickle.UnpicklingError, EOFError) as e: - logging.warn( - 'Failed to load existing top-k buffer from disk. Removing bad file.' - '\nLocation: "%s"\nException: %s', self.topk_file, str(e)) - tf.gfile.Remove(self.topk_file) - - def initialize(self, session): - """Run initialization ops.""" - session.run(self.local_init_op) - session.run(self.sync_op) - self.cached_global_step, self.cached_global_npe = session.run( - [self.global_step, self.program_count]) - - def update_global_model(self, session): - """Run an update step. - - 1) Asynchronously copy global weights to local model. - 2) Call into local model's update_step method, which does the following: - a) Sample batch of programs from policy. - b) Compute rewards. - c) Compute gradients and update the global model asynchronously. - 3) Write tensorboard summaries to disk. - - Args: - session: tf.Session instance. - """ - session.run(self.sync_op) # Copy weights from global to local. - - with session.as_default(): - result = self.model.update_step( - session, self.data_manager.sample_rl_batch(), self.train_op, - self.global_step) - global_step = result.global_step - global_npe = result.global_npe - summaries = result.summaries_list - self.cached_global_step = global_step - self.cached_global_npe = global_npe - self.local_step += 1 - - if self.summary_writer and self.local_step % self.summary_interval == 0: - if not isinstance(summaries, (tuple, list)): - summaries = [summaries] - summaries.append(self._local_step_summary()) - if self.is_chief: - (global_best_reward, - found_solution_flag, - program_count) = session.run( - [self.global_best_reward, - self.found_solution_flag, - self.program_count]) - summaries.append( - tf.Summary( - value=[tf.Summary.Value( - tag='model/best_reward', - simple_value=global_best_reward)])) - summaries.append( - tf.Summary( - value=[tf.Summary.Value( - tag='model/solution_found', - simple_value=int(found_solution_flag))])) - summaries.append( - tf.Summary( - value=[tf.Summary.Value( - tag='model/program_count', - simple_value=program_count)])) - for s in summaries: - self.summary_writer.add_summary(s, global_step) - self.last_summary_time = time.time() - - def _local_step_summary(self): - """Compute number of local steps per time increment.""" - dt = time.time() - self.last_summary_time - steps_per_time = self.summary_interval / float(dt) - return tf.Summary(value=[ - tf.Summary.Value( - tag='local_step/per_sec', - simple_value=steps_per_time), - tf.Summary.Value( - tag='local_step/step', - simple_value=self.local_step)]) - - def maybe_save_best_model(self, session, saver, checkpoint_file): - """Check if this model got the highest reward and save to disk if so.""" - if self.is_chief and session.run(self.is_best_model): - logging.info('Saving best model to "%s"', checkpoint_file) - saver.save(session, checkpoint_file) - session.run(self.reset_is_best_model) - - def save_replay_buffer(self): - """Save replay buffer to disk. - - Call this periodically so that training can recover if jobs go down. - """ - if self.model.experience_replay is not None: - logging.info('Saving experience replay buffer to "%s".', - self.model.experience_replay.save_file) - self.model.experience_replay.incremental_save(True) - - def delete_replay_buffer(self): - """Delete replay buffer from disk. - - Call this at the end of training to clean up. Replay buffer can get very - large. - """ - if self.model.experience_replay is not None: - logging.info('Deleting experience replay buffer at "%s".', - self.model.experience_replay.save_file) - tf.gfile.Remove(self.model.experience_replay.save_file) - - def save_topk_buffer(self): - """Save top-k buffer to disk. - - Call this periodically so that training can recover if jobs go down. - """ - if self.model.top_episodes is not None: - logging.info('Saving top-k buffer to "%s".', self.topk_file) - # Overwrite previous data each time. - with tf.gfile.FastGFile(self.topk_file, 'w') as f: - f.write(cPickle.dumps(self.model.top_episodes)) - - -@contextlib.contextmanager -def managed_session(sv, master='', config=None, - start_standard_services=True, - close_summary_writer=True, - max_wait_secs=7200): - # Same as Supervisor.managed_session, but with configurable timeout. - try: - sess = sv.prepare_or_wait_for_session( - master=master, config=config, - start_standard_services=start_standard_services, - max_wait_secs=max_wait_secs) - yield sess - except tf.errors.DeadlineExceededError: - raise - except Exception as e: # pylint: disable=broad-except - sv.request_stop(e) - finally: - try: - # Request all the threads to stop and wait for them to do so. Any - # exception raised by the threads is raised again from stop(). - # Passing stop_grace_period_secs is for blocked enqueue/dequeue - # threads which are not checking for `should_stop()`. They - # will be stopped when we close the session further down. - sv.stop(close_summary_writer=close_summary_writer) - finally: - # Close the session to finish up all pending calls. We do not care - # about exceptions raised when closing. This takes care of - # blocked enqueue/dequeue calls. - try: - sess.close() - except Exception: # pylint: disable=broad-except - # Silently ignore exceptions raised by close(). - pass - - -def train(config, is_chief, tuner=None, run_dir=None, run_number=0, - results_writer=None): - """Run training loop. - - Args: - config: config_lib.Config instance containing global config (agent and env). - is_chief: True if this worker is chief. Chief worker manages writing some - data to disk and initialization of the global model. - tuner: A tuner instance. If not tuning, leave as None. - run_dir: Directory where all data for this run will be written. If None, - run_dir = FLAGS.logdir. Set this argument when doing multiple runs. - run_number: Which run is this. - results_writer: Managest writing training results to disk. Results are a - dict of metric names and values. - - Returns: - The trainer object used to run training updates. - """ - logging.info('Will run asynchronous training.') - - if run_dir is None: - run_dir = FLAGS.logdir - train_dir = os.path.join(run_dir, 'train') - best_model_checkpoint = os.path.join(train_dir, 'best.ckpt') - events_dir = '%s/events_%d' % (run_dir, FLAGS.task_id) - logging.info('Events directory: %s', events_dir) - - logging_dir = os.path.join(run_dir, 'logs') - if not tf.gfile.Exists(logging_dir): - tf.gfile.MakeDirs(logging_dir) - status_file = os.path.join(logging_dir, 'status.txt') - - if FLAGS.summary_tasks and FLAGS.task_id < FLAGS.summary_tasks: - summary_writer = tf.summary.FileWriter(events_dir) - else: - summary_writer = None - - # Only profile task 0. - if FLAGS.do_profiling: - logging.info('Profiling enabled') - profiler = cProfile.Profile() - profiler.enable() - else: - profiler = None - - trainer = AsyncTrainer( - config, FLAGS.task_id, FLAGS.ps_tasks, FLAGS.num_workers, - is_chief=is_chief, - summary_interval=FLAGS.summary_interval, - summary_writer=summary_writer, - logging_dir=logging_dir, - run_number=run_number, - model_v=FLAGS.model_v) - - variables_to_save = [v for v in tf.global_variables() - if v.name.startswith('global')] - global_init_op = tf.variables_initializer(variables_to_save) - saver = tf.train.Saver(variables_to_save) - - var_list = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, - tf.get_variable_scope().name) - logging.info('Trainable vars:') - for v in var_list: - logging.info(' %s, %s, %s', v.name, v.device, v.get_shape()) - - logging.info('All vars:') - for v in tf.global_variables(): - logging.info(' %s, %s, %s', v.name, v.device, v.get_shape()) - - def init_fn(unused_sess): - logging.info('No checkpoint found. Initialized global params.') - - sv = tf.train.Supervisor(is_chief=is_chief, - logdir=train_dir, - saver=saver, - summary_op=None, - init_op=global_init_op, - init_fn=init_fn, - summary_writer=summary_writer, - ready_op=tf.report_uninitialized_variables( - variables_to_save), - ready_for_local_init_op=None, - global_step=trainer.global_step, - save_model_secs=30, - save_summaries_secs=30) - - # Add a thread that periodically checks if this Trial should stop - # based on an early stopping policy. - if tuner: - sv.Loop(60, tuner.check_for_stop, (sv.coord,)) - - last_replay_save_time = time.time() - - global_step = -1 - logging.info( - 'Starting session. ' - 'If this hangs, we\'re mostly likely waiting to connect ' - 'to the parameter server. One common cause is that the parameter ' - 'server DNS name isn\'t resolving yet, or is misspecified.') - should_retry = True - supervisor_deadline_exceeded = False - while should_retry: - try: - with managed_session( - sv, FLAGS.master, max_wait_secs=60) as session, session.as_default(): - should_retry = False - do_training = True - - try: - trainer.initialize(session) - if session.run(trainer.run_number) != run_number: - # If we loaded existing model from disk, and the saved run number is - # different, throw an exception. - raise RuntimeError( - 'Expecting to be on run %d, but is actually on run %d. ' - 'run_dir: "%s"' - % (run_number, session.run(trainer.run_number), run_dir)) - global_step = trainer.cached_global_step - logging.info('Starting training at step=%d', global_step) - while do_training: - trainer.update_global_model(session) - - if is_chief: - trainer.maybe_save_best_model( - session, saver, best_model_checkpoint) - global_step = trainer.cached_global_step - global_npe = trainer.cached_global_npe - - if time.time() - last_replay_save_time >= 30: - trainer.save_replay_buffer() - trainer.save_topk_buffer() - last_replay_save_time = time.time() - - # Stopping conditions. - if tuner and tuner.should_trial_stop(): - logging.info('Tuner requested early stopping. Finishing.') - do_training = False - if is_chief and FLAGS.stop_on_success: - found_solution = session.run(trainer.found_solution_flag) - if found_solution: - do_training = False - logging.info('Solution found. Finishing.') - if FLAGS.max_npe and global_npe >= FLAGS.max_npe: - # Max NPE (number of programs executed) reached. - logging.info('Max NPE reached. Finishing.') - do_training = False - if sv.should_stop(): - logging.info('Supervisor issued stop. Finishing.') - do_training = False - - except tf.errors.NotFoundError: - # Catch "Error while reading resource variable". - # The chief worker likely destroyed the container, so do not retry. - logging.info('Caught NotFoundError. Quitting.') - do_training = False - should_retry = False - break - except tf.errors.InternalError as e: - # Catch "Invalid variable reference." - if str(e).startswith('Invalid variable reference.'): - # The chief worker likely destroyed the container, so do not - # retry. - logging.info( - 'Caught "InternalError: Invalid variable reference.". ' - 'Quitting.') - do_training = False - should_retry = False - break - else: - # Pass exception through. - raise - - # Exited training loop. Write results to disk. - if is_chief and results_writer: - assert not should_retry - with tf.gfile.FastGFile(status_file, 'w') as f: - f.write('done') - (program_count, - found_solution, - code_solution, - best_reward, - global_step) = session.run( - [trainer.program_count, - trainer.found_solution_flag, - trainer.code_solution_variable, - trainer.global_best_reward, - trainer.global_step]) - results_dict = { - 'max_npe': FLAGS.max_npe, - 'batch_size': config.batch_size, - 'max_batches': FLAGS.max_npe // config.batch_size, - 'npe': program_count, - 'max_global_repetitions': FLAGS.num_repetitions, - 'max_local_repetitions': FLAGS.num_repetitions, - 'code_solution': code_solution, - 'best_reward': best_reward, - 'num_batches': global_step, - 'found_solution': found_solution, - 'task': trainer.data_manager.task_name, - 'global_rep': run_number} - logging.info('results_dict: %s', results_dict) - results_writer.append(results_dict) - - except tf.errors.AbortedError: - # Catch "Graph handle is not found" error due to preempted jobs. - logging.info('Caught AbortedError. Retying.') - should_retry = True - except tf.errors.DeadlineExceededError: - supervisor_deadline_exceeded = True - should_retry = False - - if is_chief: - logging.info('This is chief worker. Stopping all workers.') - sv.stop() - - if supervisor_deadline_exceeded: - logging.info('Supervisor timed out. Quitting.') - else: - logging.info('Reached %s steps. Worker stopped.', global_step) - - # Dump profiling. - """ - How to use profiling data. - - Download the profiler dump to your local machine, say to PROF_FILE_PATH. - In a separate script, run something like the following: - - import pstats - p = pstats.Stats(PROF_FILE_PATH) - p.strip_dirs().sort_stats('cumtime').print_stats() - - This will sort by 'cumtime', which "is the cumulative time spent in this and - all subfunctions (from invocation till exit)." - https://docs.python.org/2/library/profile.html#instant-user-s-manual - """ # pylint: disable=pointless-string-statement - if profiler: - prof_file = os.path.join(run_dir, 'task_%d.prof' % FLAGS.task_id) - logging.info('Done profiling.\nDumping to "%s".', prof_file) - profiler.create_stats() - with tf.gfile.Open(prof_file, 'w') as f: - f.write(marshal.dumps(profiler.stats)) - - return trainer - - -def run_training(config=None, tuner=None, logdir=None, trial_name=None, - is_chief=True): - """Do all training runs. - - This is the top level training function for policy gradient based models. - Run this from the main function. - - Args: - config: config_lib.Config instance containing global config (agent and - environment hparams). If None, config will be parsed from FLAGS.config. - tuner: A tuner instance. Leave as None if not tuning. - logdir: Parent directory where all data from all runs will be written. If - None, FLAGS.logdir will be used. - trial_name: If tuning, set this to a unique string that identifies this - trial. If `tuner` is not None, this also must be set. - is_chief: True if this worker is the chief. - - Returns: - List of results dicts which were written to disk. Each training run gets a - results dict. Results dict contains metrics, i.e. (name, value) pairs which - give information about the training run. - - Raises: - ValueError: If results dicts read from disk contain invalid data. - """ - if not config: - # If custom config is not given, get it from flags. - config = defaults.default_config_with_updates(FLAGS.config) - if not logdir: - logdir = FLAGS.logdir - if not tf.gfile.Exists(logdir): - tf.gfile.MakeDirs(logdir) - assert FLAGS.num_repetitions > 0 - results = results_lib.Results(logdir) - results_list, _ = results.read_all() - - logging.info('Starting experiment. Directory: "%s"', logdir) - - if results_list: - if results_list[0]['max_npe'] != FLAGS.max_npe: - raise ValueError( - 'Cannot resume training. Max-NPE changed. Was %s, now %s', - results_list[0]['max_npe'], FLAGS.max_npe) - if results_list[0]['max_global_repetitions'] != FLAGS.num_repetitions: - raise ValueError( - 'Cannot resume training. Number of repetitions changed. Was %s, ' - 'now %s', - results_list[0]['max_global_repetitions'], - FLAGS.num_repetitions) - - while len(results_list) < FLAGS.num_repetitions: - run_number = len(results_list) - rep_container_name = trial_name if trial_name else 'container' - if FLAGS.num_repetitions > 1: - rep_dir = os.path.join(logdir, 'run_%d' % run_number) - rep_container_name = rep_container_name + '_run_' + str(run_number) - else: - rep_dir = logdir - - logging.info( - 'Starting repetition %d (%d out of %d)', run_number, run_number + 1, - FLAGS.num_repetitions) - - # Train will write result to disk. - with tf.container(rep_container_name): - trainer = train(config, is_chief, tuner, rep_dir, run_number, results) - logging.info('Done training.') - - if is_chief: - # Destroy current container immediately (clears current graph). - logging.info('Clearing shared variables.') - tf.Session.reset(FLAGS.master, containers=[rep_container_name]) - logging.info('Shared variables cleared.') - - # Delete replay buffer on disk. - assert trainer - trainer.delete_replay_buffer() - else: - # Give chief worker time to clean up. - sleep_sec = 30.0 - logging.info('Sleeping for %s sec.', sleep_sec) - time.sleep(sleep_sec) - tf.reset_default_graph() - logging.info('Default graph reset.') - - # Expecting that train wrote new result to disk before returning. - results_list, _ = results.read_all() - return results_list diff --git a/research/brain_coder/single_task/pg_train_test.py b/research/brain_coder/single_task/pg_train_test.py deleted file mode 100644 index 0a562e533..000000000 --- a/research/brain_coder/single_task/pg_train_test.py +++ /dev/null @@ -1,87 +0,0 @@ -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -"""Tests for pg_train. - -These tests excersize code paths available through configuration options. -Training will be run for just a few steps with the goal being to check that -nothing crashes. -""" - -from absl import flags -import tensorflow as tf - -from single_task import defaults # brain coder -from single_task import run # brain coder - -FLAGS = flags.FLAGS - - -class TrainTest(tf.test.TestCase): - - def RunTrainingSteps(self, config_string, num_steps=10): - """Run a few training steps with the given config. - - Just check that nothing crashes. - - Args: - config_string: Config encoded in a string. See - $REPO_PATH/common/config_lib.py - num_steps: Number of training steps to run. Defaults to 10. - """ - config = defaults.default_config_with_updates(config_string) - FLAGS.master = '' - FLAGS.max_npe = num_steps * config.batch_size - FLAGS.summary_interval = 1 - FLAGS.logdir = tf.test.get_temp_dir() - FLAGS.config = config_string - tf.reset_default_graph() - run.main(None) - - def testVanillaPolicyGradient(self): - self.RunTrainingSteps( - 'env=c(task="reverse"),' - 'agent=c(algorithm="pg"),' - 'timestep_limit=90,batch_size=64') - - def testVanillaPolicyGradient_VariableLengthSequences(self): - self.RunTrainingSteps( - 'env=c(task="reverse"),' - 'agent=c(algorithm="pg",eos_token=False),' - 'timestep_limit=90,batch_size=64') - - def testVanillaActorCritic(self): - self.RunTrainingSteps( - 'env=c(task="reverse"),' - 'agent=c(algorithm="pg",ema_baseline_decay=0.0),' - 'timestep_limit=90,batch_size=64') - - def testPolicyGradientWithTopK(self): - self.RunTrainingSteps( - 'env=c(task="reverse"),' - 'agent=c(algorithm="pg",topk_loss_hparam=1.0,topk=10),' - 'timestep_limit=90,batch_size=64') - - def testVanillaActorCriticWithTopK(self): - self.RunTrainingSteps( - 'env=c(task="reverse"),' - 'agent=c(algorithm="pg",ema_baseline_decay=0.0,topk_loss_hparam=1.0,' - 'topk=10),' - 'timestep_limit=90,batch_size=64') - - def testPolicyGradientWithTopK_VariableLengthSequences(self): - self.RunTrainingSteps( - 'env=c(task="reverse"),' - 'agent=c(algorithm="pg",topk_loss_hparam=1.0,topk=10,eos_token=False),' - 'timestep_limit=90,batch_size=64') - - def testPolicyGradientWithImportanceSampling(self): - self.RunTrainingSteps( - 'env=c(task="reverse"),' - 'agent=c(algorithm="pg",alpha=0.5),' - 'timestep_limit=90,batch_size=64') - - -if __name__ == '__main__': - tf.test.main() diff --git a/research/brain_coder/single_task/results_lib.py b/research/brain_coder/single_task/results_lib.py deleted file mode 100644 index fd28fdd49..000000000 --- a/research/brain_coder/single_task/results_lib.py +++ /dev/null @@ -1,155 +0,0 @@ -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -"""Results object manages distributed reading and writing of results to disk.""" - -import ast -from collections import namedtuple -import os -import re -from six.moves import xrange -import tensorflow as tf - - -ShardStats = namedtuple( - 'ShardStats', - ['num_local_reps_completed', 'max_local_reps', 'finished']) - - -def ge_non_zero(a, b): - return a >= b and b > 0 - - -def get_shard_id(file_name): - assert file_name[-4:].lower() == '.txt' - return int(file_name[file_name.rfind('_') + 1: -4]) - - -class Results(object): - """Manages reading and writing training results to disk asynchronously. - - Each worker writes to its own file, so that there are no race conditions when - writing happens. However any worker may read any file, as is the case for - `read_all`. Writes are expected to be atomic so that workers will never - read incomplete data, and this is likely to be the case on Unix systems. - Reading out of date data is fine, as workers calling `read_all` will wait - until data from every worker has been written before proceeding. - """ - file_template = 'experiment_results_{0}.txt' - search_regex = r'^experiment_results_([0-9])+\.txt$' - - def __init__(self, log_dir, shard_id=0): - """Construct `Results` instance. - - Args: - log_dir: Where to write results files. - shard_id: Unique id for this file (i.e. shard). Each worker that will - be writing results should use a different shard id. If there are - N shards, each shard should be numbered 0 through N-1. - """ - # Use different files for workers so that they can write to disk async. - assert 0 <= shard_id - self.file_name = self.file_template.format(shard_id) - self.log_dir = log_dir - self.results_file = os.path.join(self.log_dir, self.file_name) - - def append(self, metrics): - """Append results to results list on disk.""" - with tf.gfile.FastGFile(self.results_file, 'a') as writer: - writer.write(str(metrics) + '\n') - - def read_this_shard(self): - """Read only from this shard.""" - return self._read_shard(self.results_file) - - def _read_shard(self, results_file): - """Read only from the given shard file.""" - try: - with tf.gfile.FastGFile(results_file, 'r') as reader: - results = [ast.literal_eval(entry) for entry in reader] - except tf.errors.NotFoundError: - # No results written to disk yet. Return empty list. - return [] - return results - - def _get_max_local_reps(self, shard_results): - """Get maximum number of repetitions the given shard needs to complete. - - Worker working on each shard needs to complete a certain number of runs - before it finishes. This method will return that number so that we can - determine which shards are still not done. - - We assume that workers are including a 'max_local_repetitions' value in - their results, which should be the total number of repetitions it needs to - run. - - Args: - shard_results: Dict mapping metric names to values. This should be read - from a shard on disk. - - Returns: - Maximum number of repetitions the given shard needs to complete. - """ - mlrs = [r['max_local_repetitions'] for r in shard_results] - if not mlrs: - return 0 - for n in mlrs[1:]: - assert n == mlrs[0], 'Some reps have different max rep.' - return mlrs[0] - - def read_all(self, num_shards=None): - """Read results across all shards, i.e. get global results list. - - Args: - num_shards: (optional) specifies total number of shards. If the caller - wants information about which shards are incomplete, provide this - argument (so that shards which have yet to be created are still - counted as incomplete shards). Otherwise, no information about - incomplete shards will be returned. - - Returns: - aggregate: Global list of results (across all shards). - shard_stats: List of ShardStats instances, one for each shard. Or None if - `num_shards` is None. - """ - try: - all_children = tf.gfile.ListDirectory(self.log_dir) - except tf.errors.NotFoundError: - if num_shards is None: - return [], None - return [], [[] for _ in xrange(num_shards)] - shard_ids = { - get_shard_id(fname): fname - for fname in all_children if re.search(self.search_regex, fname)} - - if num_shards is None: - aggregate = [] - shard_stats = None - for results_file in shard_ids.values(): - aggregate.extend(self._read_shard( - os.path.join(self.log_dir, results_file))) - else: - results_per_shard = [None] * num_shards - for shard_id in xrange(num_shards): - if shard_id in shard_ids: - results_file = shard_ids[shard_id] - results_per_shard[shard_id] = self._read_shard( - os.path.join(self.log_dir, results_file)) - else: - results_per_shard[shard_id] = [] - - # Compute shard stats. - shard_stats = [] - for shard_results in results_per_shard: - max_local_reps = self._get_max_local_reps(shard_results) - shard_stats.append(ShardStats( - num_local_reps_completed=len(shard_results), - max_local_reps=max_local_reps, - finished=ge_non_zero(len(shard_results), max_local_reps))) - - # Compute aggregate. - aggregate = [ - r for shard_results in results_per_shard for r in shard_results] - - return aggregate, shard_stats diff --git a/research/brain_coder/single_task/results_lib_test.py b/research/brain_coder/single_task/results_lib_test.py deleted file mode 100644 index 6fe838d74..000000000 --- a/research/brain_coder/single_task/results_lib_test.py +++ /dev/null @@ -1,84 +0,0 @@ -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -"""Tests for results_lib.""" - -import contextlib -import os -import shutil -import tempfile -from six.moves import xrange -import tensorflow as tf - -from single_task import results_lib # brain coder - - -@contextlib.contextmanager -def temporary_directory(suffix='', prefix='tmp', base_path=None): - """A context manager to create a temporary directory and clean up on exit. - - The parameters are the same ones expected by tempfile.mkdtemp. - The directory will be securely and atomically created. - Everything under it will be removed when exiting the context. - - Args: - suffix: optional suffix. - prefix: options prefix. - base_path: the base path under which to create the temporary directory. - Yields: - The absolute path of the new temporary directory. - """ - temp_dir_path = tempfile.mkdtemp(suffix, prefix, base_path) - try: - yield temp_dir_path - finally: - try: - shutil.rmtree(temp_dir_path) - except OSError as e: - if e.message == 'Cannot call rmtree on a symbolic link': - # Interesting synthetic exception made up by shutil.rmtree. - # Means we received a symlink from mkdtemp. - # Also means must clean up the symlink instead. - os.unlink(temp_dir_path) - else: - raise - - -def freeze(dictionary): - """Convert dict to hashable frozenset.""" - return frozenset(dictionary.iteritems()) - - -class ResultsLibTest(tf.test.TestCase): - - def testResults(self): - with temporary_directory() as logdir: - results_obj = results_lib.Results(logdir) - self.assertEqual(results_obj.read_this_shard(), []) - results_obj.append( - {'foo': 1.5, 'bar': 2.5, 'baz': 0}) - results_obj.append( - {'foo': 5.5, 'bar': -1, 'baz': 2}) - self.assertEqual( - results_obj.read_this_shard(), - [{'foo': 1.5, 'bar': 2.5, 'baz': 0}, - {'foo': 5.5, 'bar': -1, 'baz': 2}]) - - def testShardedResults(self): - with temporary_directory() as logdir: - n = 4 # Number of shards. - results_objs = [ - results_lib.Results(logdir, shard_id=i) for i in xrange(n)] - for i, robj in enumerate(results_objs): - robj.append({'foo': i, 'bar': 1 + i * 2}) - results_list, _ = results_objs[0].read_all() - - # Check results. Order does not matter here. - self.assertEqual( - set(freeze(r) for r in results_list), - set(freeze({'foo': i, 'bar': 1 + i * 2}) for i in xrange(n))) - - -if __name__ == '__main__': - tf.test.main() diff --git a/research/brain_coder/single_task/run.py b/research/brain_coder/single_task/run.py deleted file mode 100644 index 9d8f37c97..000000000 --- a/research/brain_coder/single_task/run.py +++ /dev/null @@ -1,142 +0,0 @@ -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -r"""Run training. - -Choose training algorithm and task(s) and follow these examples. - -Run synchronous policy gradient training locally: - -CONFIG="agent=c(algorithm='pg'),env=c(task='reverse')" -OUT_DIR="/tmp/bf_pg_local" -rm -rf $OUT_DIR -bazel run -c opt single_task:run -- \ - --alsologtostderr \ - --config="$CONFIG" \ - --max_npe=0 \ - --logdir="$OUT_DIR" \ - --summary_interval=1 \ - --model_v=0 -learning/brain/tensorboard/tensorboard.sh --port 12345 --logdir "$OUT_DIR" - - -Run genetic algorithm locally: - -CONFIG="agent=c(algorithm='ga'),env=c(task='reverse')" -OUT_DIR="/tmp/bf_ga_local" -rm -rf $OUT_DIR -bazel run -c opt single_task:run -- \ - --alsologtostderr \ - --config="$CONFIG" \ - --max_npe=0 \ - --logdir="$OUT_DIR" - - -Run uniform random search locally: - -CONFIG="agent=c(algorithm='rand'),env=c(task='reverse')" -OUT_DIR="/tmp/bf_rand_local" -rm -rf $OUT_DIR -bazel run -c opt single_task:run -- \ - --alsologtostderr \ - --config="$CONFIG" \ - --max_npe=0 \ - --logdir="$OUT_DIR" -""" - -from absl import app -from absl import flags -from absl import logging - -from single_task import defaults # brain coder -from single_task import ga_train # brain coder -from single_task import pg_train # brain coder - -FLAGS = flags.FLAGS -flags.DEFINE_string('config', '', 'Configuration.') -flags.DEFINE_string( - 'logdir', None, 'Absolute path where to write results.') -flags.DEFINE_integer('task_id', 0, 'ID for this worker.') -flags.DEFINE_integer('num_workers', 1, 'How many workers there are.') -flags.DEFINE_integer( - 'max_npe', 0, - 'NPE = number of programs executed. Maximum number of programs to execute ' - 'in each run. Training will complete when this threshold is reached. Set ' - 'to 0 for unlimited training.') -flags.DEFINE_integer( - 'num_repetitions', 1, - 'Number of times the same experiment will be run (globally across all ' - 'workers). Each run is independent.') -flags.DEFINE_string( - 'log_level', 'INFO', - 'The threshold for what messages will be logged. One of DEBUG, INFO, WARN, ' - 'ERROR, or FATAL.') - - -# To register an algorithm: -# 1) Add dependency in the BUILD file to this build rule. -# 2) Import the algorithm's module at the top of this file. -# 3) Add a new entry in the following dict. The key is the algorithm name -# (used to select the algorithm in the config). The value is the module -# defining the expected functions for training and tuning. See the docstring -# for `get_namespace` for further details. -ALGORITHM_REGISTRATION = { - 'pg': pg_train, - 'ga': ga_train, - 'rand': ga_train, -} - - -def get_namespace(config_string): - """Get namespace for the selected algorithm. - - Users who want to add additional algorithm types should modify this function. - The algorithm's namespace should contain the following functions: - run_training: Run the main training loop. - define_tuner_hparam_space: Return the hparam tuning space for the algo. - write_hparams_to_config: Helper for tuning. Write hparams chosen for tuning - to the Config object. - Look at pg_train.py and ga_train.py for function signatures and - implementations. - - Args: - config_string: String representation of a Config object. This will get - parsed into a Config in order to determine what algorithm to use. - - Returns: - algorithm_namespace: The module corresponding to the algorithm given in the - config. - config: The Config object resulting from parsing `config_string`. - - Raises: - ValueError: If config.agent.algorithm is not one of the registered - algorithms. - """ - config = defaults.default_config_with_updates(config_string) - if config.agent.algorithm not in ALGORITHM_REGISTRATION: - raise ValueError('Unknown algorithm type "%s"' % (config.agent.algorithm,)) - else: - return ALGORITHM_REGISTRATION[config.agent.algorithm], config - - -def main(argv): - del argv # Unused. - - logging.set_verbosity(FLAGS.log_level) - - flags.mark_flag_as_required('logdir') - if FLAGS.num_workers <= 0: - raise ValueError('num_workers flag must be greater than 0.') - if FLAGS.task_id < 0: - raise ValueError('task_id flag must be greater than or equal to 0.') - if FLAGS.task_id >= FLAGS.num_workers: - raise ValueError( - 'task_id flag must be strictly less than num_workers flag.') - - ns, _ = get_namespace(FLAGS.config) - ns.run_training(is_chief=FLAGS.task_id == 0) - - -if __name__ == '__main__': - app.run(main) diff --git a/research/brain_coder/single_task/run_eval_tasks.py b/research/brain_coder/single_task/run_eval_tasks.py deleted file mode 100755 index eb684c344..000000000 --- a/research/brain_coder/single_task/run_eval_tasks.py +++ /dev/null @@ -1,296 +0,0 @@ -#!/usr/bin/env python -from __future__ import print_function - -r"""This script can launch any eval experiments from the paper. - -This is a script. Run with python, not bazel. - -Usage: -./single_task/run_eval_tasks.py \ - --exp EXP --desc DESC [--tuning_tasks] [--iclr_tasks] [--task TASK] \ - [--tasks TASK1 TASK2 ...] - -where EXP is one of the keys in `experiments`, -and DESC is a string description of the set of experiments (such as "v0") - -Set only one of these flags: ---tuning_tasks flag only runs tuning tasks. ---iclr_tasks flag only runs the tasks included in the paper. ---regression_tests flag runs tasks which function as regression tests. ---task flag manually selects a single task to run. ---tasks flag takes a custom list of tasks. - -Other flags: ---reps N specifies N repetitions per experiment, Default is 25. ---training_replicas R specifies that R workers will be launched to train one - task (for neural network algorithms). These workers will update a global - model stored on a parameter server. Defaults to 1. If R > 1, a parameter - server will also be launched. - - -Run everything: -exps=( pg-20M pg-topk-20M topk-20M ga-20M rand-20M ) -BIN_DIR="single_task" -for exp in "${exps[@]}" -do - ./$BIN_DIR/run_eval_tasks.py \ - --exp "$exp" --iclr_tasks -done -""" - -import argparse -from collections import namedtuple -import subprocess - - -S = namedtuple('S', ['length']) -default_length = 100 - - -iclr_tasks = [ - 'reverse', 'remove-char', 'count-char', 'add', 'bool-logic', 'print-hello', - 'echo-twice', 'echo-thrice', 'copy-reverse', 'zero-cascade', 'cascade', - 'shift-left', 'shift-right', 'riffle', 'unriffle', 'middle-char', - 'remove-last', 'remove-last-two', 'echo-alternating', 'echo-half', 'length', - 'echo-second-seq', 'echo-nth-seq', 'substring', 'divide-2', 'dedup'] - - -regression_test_tasks = ['reverse', 'test-hill-climb'] - - -E = namedtuple( - 'E', - ['name', 'method_type', 'config', 'simplify', 'batch_size', 'max_npe']) - - -def make_experiment_settings(name, **kwargs): - # Unpack experiment info from name. - def split_last(string, char): - i = string.rindex(char) - return string[:i], string[i+1:] - def si_to_int(si_string): - return int( - si_string.upper().replace('K', '0'*3).replace('M', '0'*6) - .replace('G', '0'*9)) - method_type, max_npe = split_last(name, '-') - assert method_type - assert max_npe - return E( - name=name, method_type=method_type, max_npe=si_to_int(max_npe), **kwargs) - - -experiments_set = { - make_experiment_settings( - 'pg-20M', - config='entropy_beta=0.05,lr=0.0001,topk_loss_hparam=0.0,topk=0,' - 'pi_loss_hparam=1.0,alpha=0.0', - simplify=False, - batch_size=64), - make_experiment_settings( - 'pg-topk-20M', - config='entropy_beta=0.01,lr=0.0001,topk_loss_hparam=50.0,topk=10,' - 'pi_loss_hparam=1.0,alpha=0.0', - simplify=False, - batch_size=64), - make_experiment_settings( - 'topk-20M', - config='entropy_beta=0.01,lr=0.0001,topk_loss_hparam=200.0,topk=10,' - 'pi_loss_hparam=0.0,alpha=0.0', - simplify=False, - batch_size=64), - make_experiment_settings( - 'topk-0ent-20M', - config='entropy_beta=0.000,lr=0.0001,topk_loss_hparam=200.0,topk=10,' - 'pi_loss_hparam=0.0,alpha=0.0', - simplify=False, - batch_size=64), - make_experiment_settings( - 'ga-20M', - config='crossover_rate=0.95,mutation_rate=0.15', - simplify=False, - batch_size=100), # Population size. - make_experiment_settings( - 'rand-20M', - config='', - simplify=False, - batch_size=1), - make_experiment_settings( - 'simpl-500M', - config='entropy_beta=0.05,lr=0.0001,topk_loss_hparam=0.5,topk=10,' - 'pi_loss_hparam=1.0,alpha=0.0', - simplify=True, - batch_size=64), -} - -experiments = {e.name: e for e in experiments_set} - - -# pylint: disable=redefined-outer-name -def parse_args(extra_args=()): - """Parse arguments and extract task and experiment info.""" - parser = argparse.ArgumentParser(description='Run all eval tasks.') - parser.add_argument('--exp', required=True) - parser.add_argument('--tuning_tasks', action='store_true') - parser.add_argument('--iclr_tasks', action='store_true') - parser.add_argument('--regression_tests', action='store_true') - parser.add_argument('--desc', default='v0') - parser.add_argument('--reps', default=25) - parser.add_argument('--task') - parser.add_argument('--tasks', nargs='+') - for arg_string, default in extra_args: - parser.add_argument(arg_string, default=default) - args = parser.parse_args() - - print('Running experiment: %s' % (args.exp,)) - if args.desc: - print('Extra description: "%s"' % (args.desc,)) - if args.exp not in experiments: - raise ValueError('Experiment name is not valid') - experiment_name = args.exp - experiment_settings = experiments[experiment_name] - assert experiment_settings.name == experiment_name - - if args.tasks: - print('Launching tasks from args: %s' % (args.tasks,)) - tasks = {t: S(length=default_length) for t in args.tasks} - elif args.task: - print('Launching single task "%s"' % args.task) - tasks = {args.task: S(length=default_length)} - elif args.tuning_tasks: - print('Only running tuning tasks') - tasks = {name: S(length=default_length) - for name in ['reverse-tune', 'remove-char-tune']} - elif args.iclr_tasks: - print('Running eval tasks from ICLR paper.') - tasks = {name: S(length=default_length) for name in iclr_tasks} - elif args.regression_tests: - tasks = {name: S(length=default_length) for name in regression_test_tasks} - print('Tasks: %s' % tasks.keys()) - - print('reps = %d' % (int(args.reps),)) - - return args, tasks, experiment_settings - - -def run(command_string): - subprocess.call(command_string, shell=True) - - -if __name__ == '__main__': - LAUNCH_TRAINING_COMMAND = 'single_task/launch_training.sh' - COMPILE_COMMAND = 'bazel build -c opt single_task:run.par' - - args, tasks, experiment_settings = parse_args( - extra_args=(('--training_replicas', 1),)) - - if experiment_settings.method_type in ( - 'pg', 'pg-topk', 'topk', 'topk-0ent', 'simpl'): - # Runs PG and TopK. - - def make_run_cmd(job_name, task, max_npe, num_reps, code_length, - batch_size, do_simplify, custom_config_str): - """Constructs terminal command for launching NN based algorithms. - - The arguments to this function will be used to create config for the - experiment. - - Args: - job_name: Name of the job to launch. Should uniquely identify this - experiment run. - task: Name of the coding task to solve. - max_npe: Maximum number of programs executed. An integer. - num_reps: Number of times to run the experiment. An integer. - code_length: Maximum allowed length of synthesized code. - batch_size: Minibatch size for gradient descent. - do_simplify: Whether to run the experiment in code simplification mode. - A bool. - custom_config_str: Additional config for the model config string. - - Returns: - The terminal command that launches the specified experiment. - """ - config = """ - env=c(task='{0}',correct_syntax=False), - agent=c( - algorithm='pg', - policy_lstm_sizes=[35,35],value_lstm_sizes=[35,35], - grad_clip_threshold=50.0,param_init_factor=0.5,regularizer=0.0, - softmax_tr=1.0,optimizer='rmsprop',ema_baseline_decay=0.99, - eos_token={3},{4}), - timestep_limit={1},batch_size={2} - """.replace(' ', '').replace('\n', '').format( - task, code_length, batch_size, do_simplify, custom_config_str) - num_ps = 0 if args.training_replicas == 1 else 1 - return ( - r'{0} --job_name={1} --config="{2}" --max_npe={3} ' - '--num_repetitions={4} --num_workers={5} --num_ps={6} ' - '--stop_on_success={7}' - .format(LAUNCH_TRAINING_COMMAND, job_name, config, max_npe, num_reps, - args.training_replicas, num_ps, str(not do_simplify).lower())) - - else: - # Runs GA and Rand. - assert experiment_settings.method_type in ('ga', 'rand') - - def make_run_cmd(job_name, task, max_npe, num_reps, code_length, - batch_size, do_simplify, custom_config_str): - """Constructs terminal command for launching GA or uniform random search. - - The arguments to this function will be used to create config for the - experiment. - - Args: - job_name: Name of the job to launch. Should uniquely identify this - experiment run. - task: Name of the coding task to solve. - max_npe: Maximum number of programs executed. An integer. - num_reps: Number of times to run the experiment. An integer. - code_length: Maximum allowed length of synthesized code. - batch_size: Minibatch size for gradient descent. - do_simplify: Whether to run the experiment in code simplification mode. - A bool. - custom_config_str: Additional config for the model config string. - - Returns: - The terminal command that launches the specified experiment. - """ - assert not do_simplify - if custom_config_str: - custom_config_str = ',' + custom_config_str - config = """ - env=c(task='{0}',correct_syntax=False), - agent=c( - algorithm='{4}' - {3}), - timestep_limit={1},batch_size={2} - """.replace(' ', '').replace('\n', '').format( - task, code_length, batch_size, custom_config_str, - experiment_settings.method_type) - num_workers = num_reps # Do each rep in parallel. - return ( - r'{0} --job_name={1} --config="{2}" --max_npe={3} ' - '--num_repetitions={4} --num_workers={5} --num_ps={6} ' - '--stop_on_success={7}' - .format(LAUNCH_TRAINING_COMMAND, job_name, config, max_npe, num_reps, - num_workers, 0, str(not do_simplify).lower())) - - print('Compiling...') - run(COMPILE_COMMAND) - - print('Launching %d coding tasks...' % len(tasks)) - for task, task_settings in tasks.iteritems(): - name = 'bf_rl_iclr' - desc = '{0}.{1}_{2}'.format(args.desc, experiment_settings.name, task) - job_name = '{}.{}'.format(name, desc) - print('Job name: %s' % job_name) - reps = int(args.reps) if not experiment_settings.simplify else 1 - run_cmd = make_run_cmd( - job_name, task, experiment_settings.max_npe, reps, - task_settings.length, experiment_settings.batch_size, - experiment_settings.simplify, - experiment_settings.config) - print('Running command:\n' + run_cmd) - run(run_cmd) - - print('Done.') -# pylint: enable=redefined-outer-name diff --git a/research/brain_coder/single_task/test_tasks.py b/research/brain_coder/single_task/test_tasks.py deleted file mode 100644 index fb07a1265..000000000 --- a/research/brain_coder/single_task/test_tasks.py +++ /dev/null @@ -1,127 +0,0 @@ -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -"""Tasks that test correctness of algorithms.""" - -from six.moves import xrange -from common import reward as reward_lib # brain coder -from single_task import misc # brain coder - - -class BasicTaskManager(object): - """Wraps a generic reward function.""" - - def __init__(self, reward_fn): - self.reward_fn = reward_fn - self.good_reward = 1.0 - - def _score_string(self, string): - actions = misc.bf_string_to_tokens(string) - reward, correct = self.reward_fn(actions) - return misc.RewardInfo( - episode_rewards=[0.0] * (len(string) - 1) + [reward], - input_case=None, - correct_output=None, - code_output=actions, - input_type=None, - output_type=misc.IOType.integer, - reason='correct' if correct else 'wrong') - - def rl_batch(self, batch_size): - reward_fns = [self._score_string] * batch_size - return reward_fns - - -class Trie(object): - """Trie for sequences.""" - EOS = () - - def __init__(self): - self.trie = {} - - def insert(self, sequence): - d = self.trie - for e in sequence: - if e not in d: - d[e] = {} - d = d[e] - d[self.EOS] = True # Terminate sequence. - - def prefix_match(self, sequence): - """Return prefix of `sequence` which exists in the trie.""" - d = self.trie - index = 0 - for i, e in enumerate(sequence + [self.EOS]): - index = i - if e in d: - d = d[e] - if e == self.EOS: - return sequence, True - else: - break - return sequence[:index], False - - def next_choices(self, sequence): - d = self.trie - for e in sequence: - if e in d: - d = d[e] - else: - raise ValueError('Sequence not a prefix: %s' % (sequence,)) - return d.keys() - - -class HillClimbingTask(object): - """Simple task that tests reward hill climbing ability. - - There are a set of paths (sequences of tokens) which are rewarded. The total - reward for a path is proportional to its length, so the longest path is the - target. Shorter paths can be dead ends. - """ - - def __init__(self): - # Paths are sequences of sub-sequences. Here we form unique sub-sequences - # out of 3 arbitrary ints. We use sub-sequences instead of single entities - # to make the task harder by making the episodes last longer, i.e. more - # for the agent to remember. - a = (1, 2, 3) - b = (4, 5, 6) - c = (7, 8, 7) - d = (6, 5, 4) - e = (3, 2, 1) - f = (8, 5, 1) - g = (6, 4, 2) - h = (1, 8, 3) - self.paths = Trie() - self.paths.insert([a, b, h]) - self.paths.insert([a, b, c, d, e, f, g, h]) - self.paths.insert([a, b, c, d, e, b, a]) - self.paths.insert([a, b, g, h]) - self.paths.insert([a, e, f, g]) - self.correct_sequence = misc.flatten([a, b, c, d, e, f, g, h]) - - def distance_fn(a, b): - len_diff = abs(len(a) - len(b)) - return sum(reward_lib.mod_abs_diff(ai - 1, bi - 1, 8) - for ai, bi in zip(a, b)) + len_diff * 4 # 8 / 2 = 4 - self.distance_fn = distance_fn - - def __call__(self, actions): - # Compute reward for action sequence. - actions = [a for a in actions if a > 0] - sequence = [tuple(actions[i: i + 3]) for i in xrange(0, len(actions), 3)] - prefix, complete = self.paths.prefix_match(sequence) - if complete: - return float(len(prefix)), actions == self.correct_sequence - if len(prefix) == len(sequence): - return float(len(prefix)), False - next_pred = sequence[len(prefix)] - choices = self.paths.next_choices(prefix) - if choices == [()]: - return (len(prefix) - len(next_pred) / 3.0), False - min_dist = min(self.distance_fn(c, next_pred) for c in choices) - # +1 reward for each element in the sequence correct, plus fraction torwards - # closest next element. - # Maximum distance possible is num_actions * base / 2 = 3 * 8 / 2 = 12 - return (len(prefix) + (1 - min_dist / 12.0)), False diff --git a/research/brain_coder/single_task/test_tasks_test.py b/research/brain_coder/single_task/test_tasks_test.py deleted file mode 100644 index bc905c693..000000000 --- a/research/brain_coder/single_task/test_tasks_test.py +++ /dev/null @@ -1,63 +0,0 @@ -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -"""Tests for test_tasks.""" - -import numpy as np -import tensorflow as tf - -from single_task import misc # brain coder -from single_task import test_tasks # brain coder - - -def get_reward(reward_fn, candidate): - return sum(reward_fn(misc.bf_tokens_to_string(candidate)).episode_rewards) - - -class TestTasksTest(tf.test.TestCase): - - def testHillClimbingTask(self): - task = test_tasks.BasicTaskManager(test_tasks.HillClimbingTask()) - reward_fns = task.rl_batch(1) - reward_fn = reward_fns[0] - self.assertTrue(np.isclose(get_reward(reward_fn, [1, 2, 0]), 8 / 12.)) - self.assertTrue(np.isclose(get_reward(reward_fn, [1, 2, 2, 0]), 11 / 12.)) - self.assertTrue(np.isclose(get_reward(reward_fn, [1, 2, 3, 0]), 1.0)) - self.assertTrue( - np.isclose(get_reward(reward_fn, [1, 2, 3, 4, 5, 2, 0]), 1. + 8 / 12.)) - self.assertTrue( - np.isclose(get_reward(reward_fn, [1, 2, 3, 4, 5, 6, 0]), 2.0)) - self.assertTrue( - np.isclose(get_reward(reward_fn, [1, 2, 3, 4, 5, 6, 1, 8, 3, 0]), 3.0)) - self.assertTrue( - np.isclose(get_reward(reward_fn, [1, 2, 3, 4, 5, 6, 7, 8, 7, 0]), 3.0)) - self.assertTrue( - np.isclose(get_reward(reward_fn, [1, 2, 3, 4, 5, 6, 1, 8, 3, 1, 0]), - 3.0 - 4 / 12.)) - self.assertTrue( - np.isclose( - get_reward(reward_fn, [1, 2, 3, 4, 5, 6, 1, 8, 3, 1, 1, 1, 1, 0]), - 2.0)) - self.assertTrue( - np.isclose(get_reward(reward_fn, [1, 2, 3, 4, 5, 6, 7, 8, 7, 3, 0]), - 3.0 + 1 / 12.)) - self.assertTrue( - np.isclose( - get_reward(reward_fn, [1, 2, 3, 4, 5, 6, 7, 8, 7, 6, 5, 4, 3, 2, 1, - 8, 5, 1, 6, 4, 2, 1, 8, 3, 0]), - 8.0)) - self.assertTrue( - np.isclose( - get_reward(reward_fn, [1, 2, 3, 4, 5, 6, 7, 8, 7, 6, 5, 4, 3, 2, 1, - 8, 5, 1, 6, 4, 2, 1, 8, 3, 1, 1, 0]), - 8.0 - 8 / 12.)) - self.assertTrue( - np.isclose(get_reward(reward_fn, [1, 2, 3, 4, 5, 6, 7, 8, 7, 6, 5, 4, 3, - 2, 1, 8, 5, 1, 6, 4, 2, 1, 8, 3, 1, 1, - 1, 1, 1, 1, 1, 0]), - 7.0)) - - -if __name__ == '__main__': - tf.test.main() diff --git a/research/brain_coder/single_task/tune.py b/research/brain_coder/single_task/tune.py deleted file mode 100644 index 3473b5e94..000000000 --- a/research/brain_coder/single_task/tune.py +++ /dev/null @@ -1,262 +0,0 @@ -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -r"""Run grid search. - -Look at launch_tuning.sh for details on how to tune at scale. - -Usage example: -Tune with one worker on the local machine. - -CONFIG="agent=c(algorithm='pg')," -CONFIG+="env=c(task_cycle=['reverse-tune', 'remove-tune'])" -HPARAM_SPACE_TYPE="pg" -OUT_DIR="/tmp/bf_pg_tune" -MAX_NPE=5000000 -NUM_REPETITIONS=50 -rm -rf $OUT_DIR -mkdir $OUT_DIR -bazel run -c opt single_task:tune -- \ - --alsologtostderr \ - --config="$CONFIG" \ - --max_npe="$MAX_NPE" \ - --num_repetitions="$NUM_REPETITIONS" \ - --logdir="$OUT_DIR" \ - --summary_interval=1 \ - --model_v=0 \ - --hparam_space="$HPARAM_SPACE_TYPE" \ - --tuner_id=0 \ - --num_tuners=1 \ - 2>&1 >"$OUT_DIR/tuner_0.log" -learning/brain/tensorboard/tensorboard.sh --port 12345 --logdir "$OUT_DIR" -""" - -import ast -import os - -from absl import app -from absl import flags -from absl import logging -import numpy as np -from six.moves import xrange -import tensorflow as tf - -from single_task import defaults # brain coder -from single_task import run as run_lib # brain coder - -FLAGS = flags.FLAGS -flags.DEFINE_integer( - 'tuner_id', 0, - 'The unique ID for this tuning worker.') -flags.DEFINE_integer( - 'num_tuners', 1, - 'How many tuners are there.') -flags.DEFINE_string( - 'hparam_space', 'default', - 'String name which denotes the hparam space to tune over. This is ' - 'algorithm dependent.') -flags.DEFINE_string( - 'fixed_hparams', '', - 'HParams string. Used to fix hparams during tuning.') -flags.DEFINE_float( - 'success_rate_objective_weight', 1.0, - 'How much to weight success rate vs num programs seen. By default, only ' - 'success rate is optimized (this is the setting used in the paper).') - - -def parse_hparams_string(hparams_str): - hparams = {} - for term in hparams_str.split(','): - if not term: - continue - name, value = term.split('=') - hparams[name.strip()] = ast.literal_eval(value) - return hparams - - -def int_to_multibase(n, bases): - digits = [0] * len(bases) - for i, b in enumerate(bases): - n, d = divmod(n, b) - digits[i] = d - return digits - - -def hparams_for_index(index, tuning_space): - keys = sorted(tuning_space.keys()) - indices = int_to_multibase(index, [len(tuning_space[k]) for k in keys]) - return tf.contrib.training.HParams( - **{k: tuning_space[k][i] for k, i in zip(keys, indices)}) - - -def run_tuner_loop(ns): - """Run tuning loop for this worker.""" - is_chief = FLAGS.task_id == 0 - tuning_space = ns.define_tuner_hparam_space( - hparam_space_type=FLAGS.hparam_space) - fixed_hparams = parse_hparams_string(FLAGS.fixed_hparams) - for name, value in fixed_hparams.iteritems(): - tuning_space[name] = [value] - tuning_space_size = np.prod([len(values) for values in tuning_space.values()]) - - num_local_trials, remainder = divmod(tuning_space_size, FLAGS.num_tuners) - if FLAGS.tuner_id < remainder: - num_local_trials += 1 - starting_trial_id = ( - num_local_trials * FLAGS.tuner_id + min(remainder, FLAGS.tuner_id)) - - logging.info('tuning_space_size: %d', tuning_space_size) - logging.info('num_local_trials: %d', num_local_trials) - logging.info('starting_trial_id: %d', starting_trial_id) - - for local_trial_index in xrange(num_local_trials): - trial_config = defaults.default_config_with_updates(FLAGS.config) - global_trial_index = local_trial_index + starting_trial_id - trial_name = 'trial_' + str(global_trial_index) - trial_dir = os.path.join(FLAGS.logdir, trial_name) - hparams = hparams_for_index(global_trial_index, tuning_space) - ns.write_hparams_to_config( - trial_config, hparams, hparam_space_type=FLAGS.hparam_space) - - results_list = ns.run_training( - config=trial_config, tuner=None, logdir=trial_dir, is_chief=is_chief, - trial_name=trial_name) - - if not is_chief: - # Only chief worker needs to write tuning results to disk. - continue - - objective, metrics = compute_tuning_objective( - results_list, hparams, trial_name, num_trials=tuning_space_size) - logging.info('metrics:\n%s', metrics) - logging.info('objective: %s', objective) - logging.info('programs_seen_fraction: %s', - metrics['programs_seen_fraction']) - logging.info('success_rate: %s', metrics['success_rate']) - logging.info('success_rate_objective_weight: %s', - FLAGS.success_rate_objective_weight) - - tuning_results_file = os.path.join(trial_dir, 'tuning_results.txt') - with tf.gfile.FastGFile(tuning_results_file, 'a') as writer: - writer.write(str(metrics) + '\n') - - logging.info('Trial %s complete.', trial_name) - - -def compute_tuning_objective(results_list, hparams, trial_name, num_trials): - """Compute tuning objective and metrics given results and trial information. - - Args: - results_list: List of results dicts read from disk. These are written by - workers. - hparams: tf.contrib.training.HParams instance containing the hparams used - in this trial (only the hparams which are being tuned). - trial_name: Name of this trial. Used to create a trial directory. - num_trials: Total number of trials that need to be run. This is saved in the - metrics dict for future reference. - - Returns: - objective: The objective computed for this trial. Choose the hparams for the - trial with the largest objective value. - metrics: Information about this trial. A dict. - """ - found_solution = [r['found_solution'] for r in results_list] - successful_program_counts = [ - r['npe'] for r in results_list if r['found_solution']] - - success_rate = sum(found_solution) / float(len(results_list)) - - max_programs = FLAGS.max_npe # Per run. - all_program_counts = [ - r['npe'] if r['found_solution'] else max_programs - for r in results_list] - programs_seen_fraction = ( - float(sum(all_program_counts)) - / (max_programs * len(all_program_counts))) - - # min/max/avg stats are over successful runs. - metrics = { - 'num_runs': len(results_list), - 'num_succeeded': sum(found_solution), - 'success_rate': success_rate, - 'programs_seen_fraction': programs_seen_fraction, - 'avg_programs': np.mean(successful_program_counts), - 'max_possible_programs_per_run': max_programs, - 'global_step': sum([r['num_batches'] for r in results_list]), - 'hparams': hparams.values(), - 'trial_name': trial_name, - 'num_trials': num_trials} - - # Report stats per tasks. - tasks = [r['task'] for r in results_list] - for task in set(tasks): - task_list = [r for r in results_list if r['task'] == task] - found_solution = [r['found_solution'] for r in task_list] - successful_rewards = [ - r['best_reward'] for r in task_list - if r['found_solution']] - successful_num_batches = [ - r['num_batches'] - for r in task_list if r['found_solution']] - successful_program_counts = [ - r['npe'] for r in task_list if r['found_solution']] - metrics_append = { - task + '__num_runs': len(task_list), - task + '__num_succeeded': sum(found_solution), - task + '__success_rate': ( - sum(found_solution) / float(len(task_list)))} - metrics.update(metrics_append) - if any(found_solution): - metrics_append = { - task + '__min_reward': min(successful_rewards), - task + '__max_reward': max(successful_rewards), - task + '__avg_reward': np.median(successful_rewards), - task + '__min_programs': min(successful_program_counts), - task + '__max_programs': max(successful_program_counts), - task + '__avg_programs': np.mean(successful_program_counts), - task + '__min_batches': min(successful_num_batches), - task + '__max_batches': max(successful_num_batches), - task + '__avg_batches': np.mean(successful_num_batches)} - metrics.update(metrics_append) - - # Objective will be maximized. - # Maximize success rate, minimize num programs seen. - # Max objective is always 1. - weight = FLAGS.success_rate_objective_weight - objective = ( - weight * success_rate - + (1 - weight) * (1 - programs_seen_fraction)) - metrics['objective'] = objective - - return objective, metrics - - -def main(argv): - del argv - - logging.set_verbosity(FLAGS.log_level) - - if not FLAGS.logdir: - raise ValueError('logdir flag must be provided.') - if FLAGS.num_workers <= 0: - raise ValueError('num_workers flag must be greater than 0.') - if FLAGS.task_id < 0: - raise ValueError('task_id flag must be greater than or equal to 0.') - if FLAGS.task_id >= FLAGS.num_workers: - raise ValueError( - 'task_id flag must be strictly less than num_workers flag.') - if FLAGS.num_tuners <= 0: - raise ValueError('num_tuners flag must be greater than 0.') - if FLAGS.tuner_id < 0: - raise ValueError('tuner_id flag must be greater than or equal to 0.') - if FLAGS.tuner_id >= FLAGS.num_tuners: - raise ValueError( - 'tuner_id flag must be strictly less than num_tuners flag.') - - ns, _ = run_lib.get_namespace(FLAGS.config) - run_tuner_loop(ns) - - -if __name__ == '__main__': - app.run(main) diff --git a/research/cognitive_mapping_and_planning/.gitignore b/research/cognitive_mapping_and_planning/.gitignore deleted file mode 100644 index cbc6a8f02..000000000 --- a/research/cognitive_mapping_and_planning/.gitignore +++ /dev/null @@ -1,4 +0,0 @@ -deps -*.pyc -lib*.so -lib*.so* diff --git a/research/cognitive_mapping_and_planning/README.md b/research/cognitive_mapping_and_planning/README.md deleted file mode 100644 index 4457bafbb..000000000 --- a/research/cognitive_mapping_and_planning/README.md +++ /dev/null @@ -1,127 +0,0 @@ -![No Maintenance Intended](https://img.shields.io/badge/No%20Maintenance%20Intended-%E2%9C%95-red.svg) -![TensorFlow Requirement: 1.x](https://img.shields.io/badge/TensorFlow%20Requirement-1.x-brightgreen) -![TensorFlow 2 Not Supported](https://img.shields.io/badge/TensorFlow%202%20Not%20Supported-%E2%9C%95-red.svg) - -# Cognitive Mapping and Planning for Visual Navigation -**Saurabh Gupta, James Davidson, Sergey Levine, Rahul Sukthankar, Jitendra Malik** - -**Computer Vision and Pattern Recognition (CVPR) 2017.** - -**[ArXiv](https://arxiv.org/abs/1702.03920), -[Project Website](https://sites.google.com/corp/view/cognitive-mapping-and-planning/)** - -### Citing -If you find this code base and models useful in your research, please consider -citing the following paper: - ``` - @inproceedings{gupta2017cognitive, - title={Cognitive Mapping and Planning for Visual Navigation}, - author={Gupta, Saurabh and Davidson, James and Levine, Sergey and - Sukthankar, Rahul and Malik, Jitendra}, - booktitle={CVPR}, - year={2017} - } - ``` - -### Contents -1. [Requirements: software](#requirements-software) -2. [Requirements: data](#requirements-data) -3. [Test Pre-trained Models](#test-pre-trained-models) -4. [Train your Own Models](#train-your-own-models) - -### Requirements: software -1. Python Virtual Env Setup: All code is implemented in Python but depends on a - small number of python packages and a couple of C libraries. We recommend - using virtual environment for installing these python packages and python - bindings for these C libraries. - ```Shell - VENV_DIR=venv - pip install virtualenv - virtualenv $VENV_DIR - source $VENV_DIR/bin/activate - - # You may need to upgrade pip for installing openv-python. - pip install --upgrade pip - # Install simple dependencies. - pip install -r requirements.txt - - # Patch bugs in dependencies. - sh patches/apply_patches.sh - ``` - -2. Install [Tensorflow](https://www.tensorflow.org/) inside this virtual - environment. You will need to use one of the latest nightly builds - (see instructions [here](https://github.com/tensorflow/tensorflow#installation)). - -3. Swiftshader: We use - [Swiftshader](https://github.com/google/swiftshader.git), a CPU based - renderer to render the meshes. It is possible to use other renderers, - replace `SwiftshaderRenderer` in `render/swiftshader_renderer.py` with - bindings to your renderer. - ```Shell - mkdir -p deps - git clone --recursive https://github.com/google/swiftshader.git deps/swiftshader-src - cd deps/swiftshader-src && git checkout 91da6b00584afd7dcaed66da88e2b617429b3950 - git submodule update - mkdir build && cd build && cmake .. && make -j 16 libEGL libGLESv2 - cd ../../../ - cp deps/swiftshader-src/build/libEGL* libEGL.so.1 - cp deps/swiftshader-src/build/libGLESv2* libGLESv2.so.2 - ``` - -4. PyAssimp: We use [PyAssimp](https://github.com/assimp/assimp.git) to load - meshes. It is possible to use other libraries to load meshes, replace - `Shape` `render/swiftshader_renderer.py` with bindings to your library for - loading meshes. - ```Shell - mkdir -p deps - git clone https://github.com/assimp/assimp.git deps/assimp-src - cd deps/assimp-src - git checkout 2afeddd5cb63d14bc77b53740b38a54a97d94ee8 - cmake CMakeLists.txt -G 'Unix Makefiles' && make -j 16 - cd port/PyAssimp && python setup.py install - cd ../../../.. - cp deps/assimp-src/lib/libassimp* . - ``` - -5. graph-tool: We use [graph-tool](https://git.skewed.de/count0/graph-tool) - library for graph processing. - ```Shell - mkdir -p deps - # If the following git clone command fails, you can also download the source - # from https://downloads.skewed.de/graph-tool/graph-tool-2.2.44.tar.bz2 - git clone https://git.skewed.de/count0/graph-tool deps/graph-tool-src - cd deps/graph-tool-src && git checkout 178add3a571feb6666f4f119027705d95d2951ab - bash autogen.sh - ./configure --disable-cairo --disable-sparsehash --prefix=$HOME/.local - make -j 16 - make install - cd ../../ - ``` - -### Requirements: data -1. Download the Stanford 3D Indoor Spaces Dataset (S3DIS Dataset) and ImageNet - Pre-trained models for initializing different models. Follow instructions in - `data/README.md` - -### Test Pre-trained Models -1. Download pre-trained models. See `output/README.md`. - -2. Test models using `scripts/script_test_pretrained_models.sh`. - -### Train Your Own Models -All models were trained asynchronously with 16 workers each worker using data -from a single floor. The default hyper-parameters correspond to this setting. -See [distributed training with -Tensorflow](https://www.tensorflow.org/deploy/distributed) for setting up -distributed training. Training with a single worker is possible with the current -code base but will require some minor changes to allow each worker to load all -training environments. - -### Contact -For questions or issues open an issue on the tensorflow/models [issues -tracker](https://github.com/tensorflow/models/issues). Please assign issues to -@s-gupta. - -### Credits -This code was written by Saurabh Gupta (@s-gupta). diff --git a/research/cognitive_mapping_and_planning/__init__.py b/research/cognitive_mapping_and_planning/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/research/cognitive_mapping_and_planning/cfgs/__init__.py b/research/cognitive_mapping_and_planning/cfgs/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/research/cognitive_mapping_and_planning/cfgs/config_cmp.py b/research/cognitive_mapping_and_planning/cfgs/config_cmp.py deleted file mode 100644 index 715eee2b9..000000000 --- a/research/cognitive_mapping_and_planning/cfgs/config_cmp.py +++ /dev/null @@ -1,283 +0,0 @@ -# Copyright 2016 The TensorFlow Authors All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -import os, sys -import numpy as np -from tensorflow.python.platform import app -from tensorflow.python.platform import flags -import logging -import src.utils as utils -import cfgs.config_common as cc - - -import tensorflow as tf - - -rgb_resnet_v2_50_path = 'data/init_models/resnet_v2_50/model.ckpt-5136169' -d_resnet_v2_50_path = 'data/init_models/distill_rgb_to_d_resnet_v2_50/model.ckpt-120002' - -def get_default_args(): - summary_args = utils.Foo(display_interval=1, test_iters=26, - arop_full_summary_iters=14) - - control_args = utils.Foo(train=False, test=False, - force_batchnorm_is_training_at_test=False, - reset_rng_seed=False, only_eval_when_done=False, - test_mode=None) - return summary_args, control_args - -def get_default_cmp_args(): - batch_norm_param = {'center': True, 'scale': True, - 'activation_fn':tf.nn.relu} - - mapper_arch_args = utils.Foo( - dim_reduce_neurons=64, - fc_neurons=[1024, 1024], - fc_out_size=8, - fc_out_neurons=64, - encoder='resnet_v2_50', - deconv_neurons=[64, 32, 16, 8, 4, 2], - deconv_strides=[2, 2, 2, 2, 2, 2], - deconv_layers_per_block=2, - deconv_kernel_size=4, - fc_dropout=0.5, - combine_type='wt_avg_logits', - batch_norm_param=batch_norm_param) - - readout_maps_arch_args = utils.Foo( - num_neurons=[], - strides=[], - kernel_size=None, - layers_per_block=None) - - arch_args = utils.Foo( - vin_val_neurons=8, vin_action_neurons=8, vin_ks=3, vin_share_wts=False, - pred_neurons=[64, 64], pred_batch_norm_param=batch_norm_param, - conv_on_value_map=0, fr_neurons=16, fr_ver='v2', fr_inside_neurons=64, - fr_stride=1, crop_remove_each=30, value_crop_size=4, - action_sample_type='sample', action_sample_combine_type='one_or_other', - sample_gt_prob_type='inverse_sigmoid_decay', dagger_sample_bn_false=True, - vin_num_iters=36, isd_k=750., use_agent_loc=False, multi_scale=True, - readout_maps=False, rom_arch=readout_maps_arch_args) - - return arch_args, mapper_arch_args - -def get_arch_vars(arch_str): - if arch_str == '': vals = [] - else: vals = arch_str.split('_') - ks = ['var1', 'var2', 'var3'] - ks = ks[:len(vals)] - - # Exp Ver. - if len(vals) == 0: ks.append('var1'); vals.append('v0') - # custom arch. - if len(vals) == 1: ks.append('var2'); vals.append('') - # map scape for projection baseline. - if len(vals) == 2: ks.append('var3'); vals.append('fr2') - - assert(len(vals) == 3) - - vars = utils.Foo() - for k, v in zip(ks, vals): - setattr(vars, k, v) - - logging.error('arch_vars: %s', vars) - return vars - -def process_arch_str(args, arch_str): - # This function modifies args. - args.arch, args.mapper_arch = get_default_cmp_args() - - arch_vars = get_arch_vars(arch_str) - - args.navtask.task_params.outputs.ego_maps = True - args.navtask.task_params.outputs.ego_goal_imgs = True - args.navtask.task_params.outputs.egomotion = True - args.navtask.task_params.toy_problem = False - - if arch_vars.var1 == 'lmap': - args = process_arch_learned_map(args, arch_vars) - - elif arch_vars.var1 == 'pmap': - args = process_arch_projected_map(args, arch_vars) - - else: - logging.fatal('arch_vars.var1 should be lmap or pmap, but is %s', arch_vars.var1) - assert(False) - - return args - -def process_arch_learned_map(args, arch_vars): - # Multiscale vision based system. - args.navtask.task_params.input_type = 'vision' - args.navtask.task_params.outputs.images = True - - if args.navtask.camera_param.modalities[0] == 'rgb': - args.solver.pretrained_path = rgb_resnet_v2_50_path - elif args.navtask.camera_param.modalities[0] == 'depth': - args.solver.pretrained_path = d_resnet_v2_50_path - - if arch_vars.var2 == 'Ssc': - sc = 1./args.navtask.task_params.step_size - args.arch.vin_num_iters = 40 - args.navtask.task_params.map_scales = [sc] - max_dist = args.navtask.task_params.max_dist * \ - args.navtask.task_params.num_goals - args.navtask.task_params.map_crop_sizes = [2*max_dist] - - args.arch.fr_stride = 1 - args.arch.vin_action_neurons = 8 - args.arch.vin_val_neurons = 3 - args.arch.fr_inside_neurons = 32 - - args.mapper_arch.pad_map_with_zeros_each = [24] - args.mapper_arch.deconv_neurons = [64, 32, 16] - args.mapper_arch.deconv_strides = [1, 2, 1] - - elif (arch_vars.var2 == 'Msc' or arch_vars.var2 == 'MscROMms' or - arch_vars.var2 == 'MscROMss' or arch_vars.var2 == 'MscNoVin'): - # Code for multi-scale planner. - args.arch.vin_num_iters = 8 - args.arch.crop_remove_each = 4 - args.arch.value_crop_size = 8 - - sc = 1./args.navtask.task_params.step_size - max_dist = args.navtask.task_params.max_dist * \ - args.navtask.task_params.num_goals - n_scales = np.log2(float(max_dist) / float(args.arch.vin_num_iters)) - n_scales = int(np.ceil(n_scales)+1) - - args.navtask.task_params.map_scales = \ - list(sc*(0.5**(np.arange(n_scales))[::-1])) - args.navtask.task_params.map_crop_sizes = [16 for x in range(n_scales)] - - args.arch.fr_stride = 1 - args.arch.vin_action_neurons = 8 - args.arch.vin_val_neurons = 3 - args.arch.fr_inside_neurons = 32 - - args.mapper_arch.pad_map_with_zeros_each = [0 for _ in range(n_scales)] - args.mapper_arch.deconv_neurons = [64*n_scales, 32*n_scales, 16*n_scales] - args.mapper_arch.deconv_strides = [1, 2, 1] - - if arch_vars.var2 == 'MscNoVin': - # No planning version. - args.arch.fr_stride = [1, 2, 1, 2] - args.arch.vin_action_neurons = None - args.arch.vin_val_neurons = 16 - args.arch.fr_inside_neurons = 32 - - args.arch.crop_remove_each = 0 - args.arch.value_crop_size = 4 - args.arch.vin_num_iters = 0 - - elif arch_vars.var2 == 'MscROMms' or arch_vars.var2 == 'MscROMss': - # Code with read outs, MscROMms flattens and reads out, - # MscROMss does not flatten and produces output at multiple scales. - args.navtask.task_params.outputs.readout_maps = True - args.navtask.task_params.map_resize_method = 'antialiasing' - args.arch.readout_maps = True - - if arch_vars.var2 == 'MscROMms': - args.arch.rom_arch.num_neurons = [64, 1] - args.arch.rom_arch.kernel_size = 4 - args.arch.rom_arch.strides = [2,2] - args.arch.rom_arch.layers_per_block = 2 - - args.navtask.task_params.readout_maps_crop_sizes = [64] - args.navtask.task_params.readout_maps_scales = [sc] - - elif arch_vars.var2 == 'MscROMss': - args.arch.rom_arch.num_neurons = \ - [64, len(args.navtask.task_params.map_scales)] - args.arch.rom_arch.kernel_size = 4 - args.arch.rom_arch.strides = [1,1] - args.arch.rom_arch.layers_per_block = 1 - - args.navtask.task_params.readout_maps_crop_sizes = \ - args.navtask.task_params.map_crop_sizes - args.navtask.task_params.readout_maps_scales = \ - args.navtask.task_params.map_scales - - else: - logging.fatal('arch_vars.var2 not one of Msc, MscROMms, MscROMss, MscNoVin.') - assert(False) - - map_channels = args.mapper_arch.deconv_neurons[-1] / \ - (2*len(args.navtask.task_params.map_scales)) - args.navtask.task_params.map_channels = map_channels - - return args - -def process_arch_projected_map(args, arch_vars): - # Single scale vision based system which does not use a mapper but instead - # uses an analytically estimated map. - ds = int(arch_vars.var3[2]) - args.navtask.task_params.input_type = 'analytical_counts' - args.navtask.task_params.outputs.analytical_counts = True - - assert(args.navtask.task_params.modalities[0] == 'depth') - args.navtask.camera_param.img_channels = None - - analytical_counts = utils.Foo(map_sizes=[512/ds], - xy_resolution=[5.*ds], - z_bins=[[-10, 10, 150, 200]], - non_linearity=[arch_vars.var2]) - args.navtask.task_params.analytical_counts = analytical_counts - - sc = 1./ds - args.arch.vin_num_iters = 36 - args.navtask.task_params.map_scales = [sc] - args.navtask.task_params.map_crop_sizes = [512/ds] - - args.arch.fr_stride = [1,2] - args.arch.vin_action_neurons = 8 - args.arch.vin_val_neurons = 3 - args.arch.fr_inside_neurons = 32 - - map_channels = len(analytical_counts.z_bins[0]) + 1 - args.navtask.task_params.map_channels = map_channels - args.solver.freeze_conv = False - - return args - -def get_args_for_config(config_name): - args = utils.Foo() - - args.summary, args.control = get_default_args() - - exp_name, mode_str = config_name.split('+') - arch_str, solver_str, navtask_str = exp_name.split('.') - logging.error('config_name: %s', config_name) - logging.error('arch_str: %s', arch_str) - logging.error('navtask_str: %s', navtask_str) - logging.error('solver_str: %s', solver_str) - logging.error('mode_str: %s', mode_str) - - args.solver = cc.process_solver_str(solver_str) - args.navtask = cc.process_navtask_str(navtask_str) - - args = process_arch_str(args, arch_str) - args.arch.isd_k = args.solver.isd_k - - # Train, test, etc. - mode, imset = mode_str.split('_') - args = cc.adjust_args_for_mode(args, mode) - args.navtask.building_names = args.navtask.dataset.get_split(imset) - args.control.test_name = '{:s}_on_{:s}'.format(mode, imset) - - # Log the arguments - logging.error('%s', args) - return args diff --git a/research/cognitive_mapping_and_planning/cfgs/config_common.py b/research/cognitive_mapping_and_planning/cfgs/config_common.py deleted file mode 100644 index 440bf5b72..000000000 --- a/research/cognitive_mapping_and_planning/cfgs/config_common.py +++ /dev/null @@ -1,261 +0,0 @@ -# Copyright 2016 The TensorFlow Authors All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -import os -import numpy as np -import logging -import src.utils as utils -import datasets.nav_env_config as nec -from datasets import factory - -def adjust_args_for_mode(args, mode): - if mode == 'train': - args.control.train = True - - elif mode == 'val1': - # Same settings as for training, to make sure nothing wonky is happening - # there. - args.control.test = True - args.control.test_mode = 'val' - args.navtask.task_params.batch_size = 32 - - elif mode == 'val2': - # No data augmentation, not sampling but taking the argmax action, not - # sampling from the ground truth at all. - args.control.test = True - args.arch.action_sample_type = 'argmax' - args.arch.sample_gt_prob_type = 'zero' - args.navtask.task_params.data_augment = \ - utils.Foo(lr_flip=0, delta_angle=0, delta_xy=0, relight=False, - relight_fast=False, structured=False) - args.control.test_mode = 'val' - args.navtask.task_params.batch_size = 32 - - elif mode == 'bench': - # Actually testing the agent in settings that are kept same between - # different runs. - args.navtask.task_params.batch_size = 16 - args.control.test = True - args.arch.action_sample_type = 'argmax' - args.arch.sample_gt_prob_type = 'zero' - args.navtask.task_params.data_augment = \ - utils.Foo(lr_flip=0, delta_angle=0, delta_xy=0, relight=False, - relight_fast=False, structured=False) - args.summary.test_iters = 250 - args.control.only_eval_when_done = True - args.control.reset_rng_seed = True - args.control.test_mode = 'test' - else: - logging.fatal('Unknown mode: %s.', mode) - assert(False) - return args - -def get_solver_vars(solver_str): - if solver_str == '': vals = []; - else: vals = solver_str.split('_') - ks = ['clip', 'dlw', 'long', 'typ', 'isdk', 'adam_eps', 'init_lr']; - ks = ks[:len(vals)] - - # Gradient clipping or not. - if len(vals) == 0: ks.append('clip'); vals.append('noclip'); - # data loss weight. - if len(vals) == 1: ks.append('dlw'); vals.append('dlw20') - # how long to train for. - if len(vals) == 2: ks.append('long'); vals.append('nolong') - # Adam - if len(vals) == 3: ks.append('typ'); vals.append('adam2') - # reg loss wt - if len(vals) == 4: ks.append('rlw'); vals.append('rlw1') - # isd_k - if len(vals) == 5: ks.append('isdk'); vals.append('isdk415') # 415, inflexion at 2.5k. - # adam eps - if len(vals) == 6: ks.append('adam_eps'); vals.append('aeps1en8') - # init lr - if len(vals) == 7: ks.append('init_lr'); vals.append('lr1en3') - - assert(len(vals) == 8) - - vars = utils.Foo() - for k, v in zip(ks, vals): - setattr(vars, k, v) - logging.error('solver_vars: %s', vars) - return vars - -def process_solver_str(solver_str): - solver = utils.Foo( - seed=0, learning_rate_decay=None, clip_gradient_norm=None, max_steps=None, - initial_learning_rate=None, momentum=None, steps_per_decay=None, - logdir=None, sync=False, adjust_lr_sync=True, wt_decay=0.0001, - data_loss_wt=None, reg_loss_wt=None, freeze_conv=True, num_workers=1, - task=0, ps_tasks=0, master='local', typ=None, momentum2=None, - adam_eps=None) - - # Clobber with overrides from solver str. - solver_vars = get_solver_vars(solver_str) - - solver.data_loss_wt = float(solver_vars.dlw[3:].replace('x', '.')) - solver.adam_eps = float(solver_vars.adam_eps[4:].replace('x', '.').replace('n', '-')) - solver.initial_learning_rate = float(solver_vars.init_lr[2:].replace('x', '.').replace('n', '-')) - solver.reg_loss_wt = float(solver_vars.rlw[3:].replace('x', '.')) - solver.isd_k = float(solver_vars.isdk[4:].replace('x', '.')) - - long = solver_vars.long - if long == 'long': - solver.steps_per_decay = 40000 - solver.max_steps = 120000 - elif long == 'long2': - solver.steps_per_decay = 80000 - solver.max_steps = 120000 - elif long == 'nolong' or long == 'nol': - solver.steps_per_decay = 20000 - solver.max_steps = 60000 - else: - logging.fatal('solver_vars.long should be long, long2, nolong or nol.') - assert(False) - - clip = solver_vars.clip - if clip == 'noclip' or clip == 'nocl': - solver.clip_gradient_norm = 0 - elif clip[:4] == 'clip': - solver.clip_gradient_norm = float(clip[4:].replace('x', '.')) - else: - logging.fatal('Unknown solver_vars.clip: %s', clip) - assert(False) - - typ = solver_vars.typ - if typ == 'adam': - solver.typ = 'adam' - solver.momentum = 0.9 - solver.momentum2 = 0.999 - solver.learning_rate_decay = 1.0 - elif typ == 'adam2': - solver.typ = 'adam' - solver.momentum = 0.9 - solver.momentum2 = 0.999 - solver.learning_rate_decay = 0.1 - elif typ == 'sgd': - solver.typ = 'sgd' - solver.momentum = 0.99 - solver.momentum2 = None - solver.learning_rate_decay = 0.1 - else: - logging.fatal('Unknown solver_vars.typ: %s', typ) - assert(False) - - logging.error('solver: %s', solver) - return solver - -def get_navtask_vars(navtask_str): - if navtask_str == '': vals = [] - else: vals = navtask_str.split('_') - - ks_all = ['dataset_name', 'modality', 'task', 'history', 'max_dist', - 'num_steps', 'step_size', 'n_ori', 'aux_views', 'data_aug'] - ks = ks_all[:len(vals)] - - # All data or not. - if len(vals) == 0: ks.append('dataset_name'); vals.append('sbpd') - # modality - if len(vals) == 1: ks.append('modality'); vals.append('rgb') - # semantic task? - if len(vals) == 2: ks.append('task'); vals.append('r2r') - # number of history frames. - if len(vals) == 3: ks.append('history'); vals.append('h0') - # max steps - if len(vals) == 4: ks.append('max_dist'); vals.append('32') - # num steps - if len(vals) == 5: ks.append('num_steps'); vals.append('40') - # step size - if len(vals) == 6: ks.append('step_size'); vals.append('8') - # n_ori - if len(vals) == 7: ks.append('n_ori'); vals.append('4') - # Auxiliary views. - if len(vals) == 8: ks.append('aux_views'); vals.append('nv0') - # Normal data augmentation as opposed to structured data augmentation (if set - # to straug. - if len(vals) == 9: ks.append('data_aug'); vals.append('straug') - - assert(len(vals) == 10) - for i in range(len(ks)): - assert(ks[i] == ks_all[i]) - - vars = utils.Foo() - for k, v in zip(ks, vals): - setattr(vars, k, v) - logging.error('navtask_vars: %s', vals) - return vars - -def process_navtask_str(navtask_str): - navtask = nec.nav_env_base_config() - - # Clobber with overrides from strings. - navtask_vars = get_navtask_vars(navtask_str) - - navtask.task_params.n_ori = int(navtask_vars.n_ori) - navtask.task_params.max_dist = int(navtask_vars.max_dist) - navtask.task_params.num_steps = int(navtask_vars.num_steps) - navtask.task_params.step_size = int(navtask_vars.step_size) - navtask.task_params.data_augment.delta_xy = int(navtask_vars.step_size)/2. - n_aux_views_each = int(navtask_vars.aux_views[2]) - aux_delta_thetas = np.concatenate((np.arange(n_aux_views_each) + 1, - -1 -np.arange(n_aux_views_each))) - aux_delta_thetas = aux_delta_thetas*np.deg2rad(navtask.camera_param.fov) - navtask.task_params.aux_delta_thetas = aux_delta_thetas - - if navtask_vars.data_aug == 'aug': - navtask.task_params.data_augment.structured = False - elif navtask_vars.data_aug == 'straug': - navtask.task_params.data_augment.structured = True - else: - logging.fatal('Unknown navtask_vars.data_aug %s.', navtask_vars.data_aug) - assert(False) - - navtask.task_params.num_history_frames = int(navtask_vars.history[1:]) - navtask.task_params.n_views = 1+navtask.task_params.num_history_frames - - navtask.task_params.goal_channels = int(navtask_vars.n_ori) - - if navtask_vars.task == 'hard': - navtask.task_params.type = 'rng_rejection_sampling_many' - navtask.task_params.rejection_sampling_M = 2000 - navtask.task_params.min_dist = 10 - elif navtask_vars.task == 'r2r': - navtask.task_params.type = 'room_to_room_many' - elif navtask_vars.task == 'ST': - # Semantic task at hand. - navtask.task_params.goal_channels = \ - len(navtask.task_params.semantic_task.class_map_names) - navtask.task_params.rel_goal_loc_dim = \ - len(navtask.task_params.semantic_task.class_map_names) - navtask.task_params.type = 'to_nearest_obj_acc' - else: - logging.fatal('navtask_vars.task: should be hard or r2r, ST') - assert(False) - - if navtask_vars.modality == 'rgb': - navtask.camera_param.modalities = ['rgb'] - navtask.camera_param.img_channels = 3 - elif navtask_vars.modality == 'd': - navtask.camera_param.modalities = ['depth'] - navtask.camera_param.img_channels = 2 - - navtask.task_params.img_height = navtask.camera_param.height - navtask.task_params.img_width = navtask.camera_param.width - navtask.task_params.modalities = navtask.camera_param.modalities - navtask.task_params.img_channels = navtask.camera_param.img_channels - navtask.task_params.img_fov = navtask.camera_param.fov - - navtask.dataset = factory.get_dataset(navtask_vars.dataset_name) - return navtask diff --git a/research/cognitive_mapping_and_planning/cfgs/config_distill.py b/research/cognitive_mapping_and_planning/cfgs/config_distill.py deleted file mode 100644 index 53be2f8a5..000000000 --- a/research/cognitive_mapping_and_planning/cfgs/config_distill.py +++ /dev/null @@ -1,114 +0,0 @@ -# Copyright 2016 The TensorFlow Authors All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -import pprint -import copy -import os -from tensorflow.python.platform import app -from tensorflow.python.platform import flags -import logging -import src.utils as utils -import cfgs.config_common as cc - - -import tensorflow as tf - -rgb_resnet_v2_50_path = 'cache/resnet_v2_50_inception_preprocessed/model.ckpt-5136169' - -def get_default_args(): - robot = utils.Foo(radius=15, base=10, height=140, sensor_height=120, - camera_elevation_degree=-15) - - camera_param = utils.Foo(width=225, height=225, z_near=0.05, z_far=20.0, - fov=60., modalities=['rgb', 'depth']) - - env = utils.Foo(padding=10, resolution=5, num_point_threshold=2, - valid_min=-10, valid_max=200, n_samples_per_face=200) - - data_augment = utils.Foo(lr_flip=0, delta_angle=1, delta_xy=4, relight=False, - relight_fast=False, structured=False) - - task_params = utils.Foo(num_actions=4, step_size=4, num_steps=0, - batch_size=32, room_seed=0, base_class='Building', - task='mapping', n_ori=6, data_augment=data_augment, - output_transform_to_global_map=False, - output_canonical_map=False, - output_incremental_transform=False, - output_free_space=False, move_type='shortest_path', - toy_problem=0) - - buildinger_args = utils.Foo(building_names=['area1_gates_wingA_floor1_westpart'], - env_class=None, robot=robot, - task_params=task_params, env=env, - camera_param=camera_param) - - solver_args = utils.Foo(seed=0, learning_rate_decay=0.1, - clip_gradient_norm=0, max_steps=120000, - initial_learning_rate=0.001, momentum=0.99, - steps_per_decay=40000, logdir=None, sync=False, - adjust_lr_sync=True, wt_decay=0.0001, - data_loss_wt=1.0, reg_loss_wt=1.0, - num_workers=1, task=0, ps_tasks=0, master='local') - - summary_args = utils.Foo(display_interval=1, test_iters=100) - - control_args = utils.Foo(train=False, test=False, - force_batchnorm_is_training_at_test=False) - - arch_args = utils.Foo(rgb_encoder='resnet_v2_50', d_encoder='resnet_v2_50') - - return utils.Foo(solver=solver_args, - summary=summary_args, control=control_args, arch=arch_args, - buildinger=buildinger_args) - -def get_vars(config_name): - vars = config_name.split('_') - if len(vars) == 1: # All data or not. - vars.append('noall') - if len(vars) == 2: # n_ori - vars.append('4') - logging.error('vars: %s', vars) - return vars - -def get_args_for_config(config_name): - args = get_default_args() - config_name, mode = config_name.split('+') - vars = get_vars(config_name) - - logging.info('config_name: %s, mode: %s', config_name, mode) - - args.buildinger.task_params.n_ori = int(vars[2]) - args.solver.freeze_conv = True - args.solver.pretrained_path = rgb_resnet_v2_50_path - args.buildinger.task_params.img_channels = 5 - args.solver.data_loss_wt = 0.00001 - - if vars[0] == 'v0': - None - else: - logging.error('config_name: %s undefined', config_name) - - args.buildinger.task_params.height = args.buildinger.camera_param.height - args.buildinger.task_params.width = args.buildinger.camera_param.width - args.buildinger.task_params.modalities = args.buildinger.camera_param.modalities - - if vars[1] == 'all': - args = cc.get_args_for_mode_building_all(args, mode) - elif vars[1] == 'noall': - args = cc.get_args_for_mode_building(args, mode) - - # Log the arguments - logging.error('%s', args) - return args diff --git a/research/cognitive_mapping_and_planning/cfgs/config_vision_baseline.py b/research/cognitive_mapping_and_planning/cfgs/config_vision_baseline.py deleted file mode 100644 index 3cc64fe59..000000000 --- a/research/cognitive_mapping_and_planning/cfgs/config_vision_baseline.py +++ /dev/null @@ -1,173 +0,0 @@ -# Copyright 2016 The TensorFlow Authors All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -import pprint -import os -import numpy as np -from tensorflow.python.platform import app -from tensorflow.python.platform import flags -import logging -import src.utils as utils -import cfgs.config_common as cc -import datasets.nav_env_config as nec - - -import tensorflow as tf - -FLAGS = flags.FLAGS - -get_solver_vars = cc.get_solver_vars -get_navtask_vars = cc.get_navtask_vars - - -rgb_resnet_v2_50_path = 'data/init_models/resnet_v2_50/model.ckpt-5136169' -d_resnet_v2_50_path = 'data/init_models/distill_rgb_to_d_resnet_v2_50/model.ckpt-120002' - -def get_default_args(): - summary_args = utils.Foo(display_interval=1, test_iters=26, - arop_full_summary_iters=14) - - control_args = utils.Foo(train=False, test=False, - force_batchnorm_is_training_at_test=False, - reset_rng_seed=False, only_eval_when_done=False, - test_mode=None) - return summary_args, control_args - -def get_default_baseline_args(): - batch_norm_param = {'center': True, 'scale': True, - 'activation_fn':tf.nn.relu} - arch_args = utils.Foo( - pred_neurons=[], goal_embed_neurons=[], img_embed_neurons=[], - batch_norm_param=batch_norm_param, dim_reduce_neurons=64, combine_type='', - encoder='resnet_v2_50', action_sample_type='sample', - action_sample_combine_type='one_or_other', - sample_gt_prob_type='inverse_sigmoid_decay', dagger_sample_bn_false=True, - isd_k=750., use_visit_count=False, lstm_output=False, lstm_ego=False, - lstm_img=False, fc_dropout=0.0, embed_goal_for_state=False, - lstm_output_init_state_from_goal=False) - return arch_args - -def get_arch_vars(arch_str): - if arch_str == '': vals = [] - else: vals = arch_str.split('_') - - ks = ['ver', 'lstm_dim', 'dropout'] - - # Exp Ver - if len(vals) == 0: vals.append('v0') - # LSTM dimentsions - if len(vals) == 1: vals.append('lstm2048') - # Dropout - if len(vals) == 2: vals.append('noDO') - - assert(len(vals) == 3) - - vars = utils.Foo() - for k, v in zip(ks, vals): - setattr(vars, k, v) - - logging.error('arch_vars: %s', vars) - return vars - -def process_arch_str(args, arch_str): - # This function modifies args. - args.arch = get_default_baseline_args() - arch_vars = get_arch_vars(arch_str) - - args.navtask.task_params.outputs.rel_goal_loc = True - args.navtask.task_params.input_type = 'vision' - args.navtask.task_params.outputs.images = True - - if args.navtask.camera_param.modalities[0] == 'rgb': - args.solver.pretrained_path = rgb_resnet_v2_50_path - elif args.navtask.camera_param.modalities[0] == 'depth': - args.solver.pretrained_path = d_resnet_v2_50_path - else: - logging.fatal('Neither of rgb or d') - - if arch_vars.dropout == 'DO': - args.arch.fc_dropout = 0.5 - - args.tfcode = 'B' - - exp_ver = arch_vars.ver - if exp_ver == 'v0': - # Multiplicative interaction between goal loc and image features. - args.arch.combine_type = 'multiply' - args.arch.pred_neurons = [256, 256] - args.arch.goal_embed_neurons = [64, 8] - args.arch.img_embed_neurons = [1024, 512, 256*8] - - elif exp_ver == 'v1': - # Additive interaction between goal and image features. - args.arch.combine_type = 'add' - args.arch.pred_neurons = [256, 256] - args.arch.goal_embed_neurons = [64, 256] - args.arch.img_embed_neurons = [1024, 512, 256] - - elif exp_ver == 'v2': - # LSTM at the output on top of multiple interactions. - args.arch.combine_type = 'multiply' - args.arch.goal_embed_neurons = [64, 8] - args.arch.img_embed_neurons = [1024, 512, 256*8] - args.arch.lstm_output = True - args.arch.lstm_output_dim = int(arch_vars.lstm_dim[4:]) - args.arch.pred_neurons = [256] # The other is inside the LSTM. - - elif exp_ver == 'v0blind': - # LSTM only on the goal location. - args.arch.combine_type = 'goalonly' - args.arch.goal_embed_neurons = [64, 256] - args.arch.img_embed_neurons = [2] # I dont know what it will do otherwise. - args.arch.lstm_output = True - args.arch.lstm_output_dim = 256 - args.arch.pred_neurons = [256] # The other is inside the LSTM. - - else: - logging.fatal('exp_ver: %s undefined', exp_ver) - assert(False) - - # Log the arguments - logging.error('%s', args) - return args - -def get_args_for_config(config_name): - args = utils.Foo() - - args.summary, args.control = get_default_args() - - exp_name, mode_str = config_name.split('+') - arch_str, solver_str, navtask_str = exp_name.split('.') - logging.error('config_name: %s', config_name) - logging.error('arch_str: %s', arch_str) - logging.error('navtask_str: %s', navtask_str) - logging.error('solver_str: %s', solver_str) - logging.error('mode_str: %s', mode_str) - - args.solver = cc.process_solver_str(solver_str) - args.navtask = cc.process_navtask_str(navtask_str) - - args = process_arch_str(args, arch_str) - args.arch.isd_k = args.solver.isd_k - - # Train, test, etc. - mode, imset = mode_str.split('_') - args = cc.adjust_args_for_mode(args, mode) - args.navtask.building_names = args.navtask.dataset.get_split(imset) - args.control.test_name = '{:s}_on_{:s}'.format(mode, imset) - - # Log the arguments - logging.error('%s', args) - return args diff --git a/research/cognitive_mapping_and_planning/data/.gitignore b/research/cognitive_mapping_and_planning/data/.gitignore deleted file mode 100644 index 2b6d5e466..000000000 --- a/research/cognitive_mapping_and_planning/data/.gitignore +++ /dev/null @@ -1,3 +0,0 @@ -stanford_building_parser_dataset_raw -stanford_building_parser_dataset -init_models diff --git a/research/cognitive_mapping_and_planning/data/README.md b/research/cognitive_mapping_and_planning/data/README.md deleted file mode 100644 index a89283453..000000000 --- a/research/cognitive_mapping_and_planning/data/README.md +++ /dev/null @@ -1,33 +0,0 @@ -This directory contains the data needed for training and benchmarking various -navigation models. - -1. Download the data from the [dataset website] - (http://buildingparser.stanford.edu/dataset.html). - 1. [Raw meshes](https://goo.gl/forms/2YSPaO2UKmn5Td5m2). We need the meshes - which are in the noXYZ folder. Download the tar files and place them in - the `stanford_building_parser_dataset_raw` folder. You need to download - `area_1_noXYZ.tar`, `area_3_noXYZ.tar`, `area_5a_noXYZ.tar`, - `area_5b_noXYZ.tar`, `area_6_noXYZ.tar` for training and - `area_4_noXYZ.tar` for evaluation. - 2. [Annotations](https://goo.gl/forms/4SoGp4KtH1jfRqEj2) for setting up - tasks. We will need the file called `Stanford3dDataset_v1.2.zip`. Place - the file in the directory `stanford_building_parser_dataset_raw`. - -2. Preprocess the data. - 1. Extract meshes using `scripts/script_preprocess_meshes_S3DIS.sh`. After - this `ls data/stanford_building_parser_dataset/mesh` should have 6 - folders `area1`, `area3`, `area4`, `area5a`, `area5b`, `area6`, with - textures and obj files within each directory. - 2. Extract out room information and semantics from zip file using - `scripts/script_preprocess_annoations_S3DIS.sh`. After this there should - be `room-dimension` and `class-maps` folder in - `data/stanford_building_parser_dataset`. (If you find this script to - crash because of an exception in np.loadtxt while processing - `Area_5/office_19/Annotations/ceiling_1.txt`, there is a special - character on line 323474, that should be removed manually.) - -3. Download ImageNet Pre-trained models. We used ResNet-v2-50 for representing - images. For RGB images this is pre-trained on ImageNet. For Depth images we - [distill](https://arxiv.org/abs/1507.00448) the RGB model to depth images - using paired RGB-D images. Both there models are available through - `scripts/script_download_init_models.sh` diff --git a/research/cognitive_mapping_and_planning/datasets/__init__.py b/research/cognitive_mapping_and_planning/datasets/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/research/cognitive_mapping_and_planning/datasets/factory.py b/research/cognitive_mapping_and_planning/datasets/factory.py deleted file mode 100644 index 3f7b5c0a6..000000000 --- a/research/cognitive_mapping_and_planning/datasets/factory.py +++ /dev/null @@ -1,113 +0,0 @@ -# Copyright 2016 The TensorFlow Authors All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -r"""Wrapper for selecting the navigation environment that we want to train and -test on. -""" -import numpy as np -import os, glob -import platform - -import logging -from tensorflow.python.platform import app -from tensorflow.python.platform import flags - -import render.swiftshader_renderer as renderer -import src.file_utils as fu -import src.utils as utils - -def get_dataset(dataset_name): - if dataset_name == 'sbpd': - dataset = StanfordBuildingParserDataset(dataset_name) - else: - logging.fatal('Not one of sbpd') - return dataset - -class Loader(): - def get_data_dir(): - pass - - def get_meta_data(self, file_name, data_dir=None): - if data_dir is None: - data_dir = self.get_data_dir() - full_file_name = os.path.join(data_dir, 'meta', file_name) - assert(fu.exists(full_file_name)), \ - '{:s} does not exist'.format(full_file_name) - ext = os.path.splitext(full_file_name)[1] - if ext == '.txt': - ls = [] - with fu.fopen(full_file_name, 'r') as f: - for l in f: - ls.append(l.rstrip()) - elif ext == '.pkl': - ls = utils.load_variables(full_file_name) - return ls - - def load_building(self, name, data_dir=None): - if data_dir is None: - data_dir = self.get_data_dir() - out = {} - out['name'] = name - out['data_dir'] = data_dir - out['room_dimension_file'] = os.path.join(data_dir, 'room-dimension', - name+'.pkl') - out['class_map_folder'] = os.path.join(data_dir, 'class-maps') - return out - - def load_building_meshes(self, building): - dir_name = os.path.join(building['data_dir'], 'mesh', building['name']) - mesh_file_name = glob.glob1(dir_name, '*.obj')[0] - mesh_file_name_full = os.path.join(dir_name, mesh_file_name) - logging.error('Loading building from obj file: %s', mesh_file_name_full) - shape = renderer.Shape(mesh_file_name_full, load_materials=True, - name_prefix=building['name']+'_') - return [shape] - -class StanfordBuildingParserDataset(Loader): - def __init__(self, ver): - self.ver = ver - self.data_dir = None - - def get_data_dir(self): - if self.data_dir is None: - self.data_dir = 'data/stanford_building_parser_dataset/' - return self.data_dir - - def get_benchmark_sets(self): - return self._get_benchmark_sets() - - def get_split(self, split_name): - if self.ver == 'sbpd': - return self._get_split(split_name) - else: - logging.fatal('Unknown version.') - - def _get_benchmark_sets(self): - sets = ['train1', 'val', 'test'] - return sets - - def _get_split(self, split_name): - train = ['area1', 'area5a', 'area5b', 'area6'] - train1 = ['area1'] - val = ['area3'] - test = ['area4'] - - sets = {} - sets['train'] = train - sets['train1'] = train1 - sets['val'] = val - sets['test'] = test - sets['all'] = sorted(list(set(train + val + test))) - return sets[split_name] diff --git a/research/cognitive_mapping_and_planning/datasets/nav_env.py b/research/cognitive_mapping_and_planning/datasets/nav_env.py deleted file mode 100644 index 5710e26dc..000000000 --- a/research/cognitive_mapping_and_planning/datasets/nav_env.py +++ /dev/null @@ -1,1465 +0,0 @@ -# Copyright 2016 The TensorFlow Authors All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -r"""Navidation Environment. Includes the following classes along with some -helper functions. - Building: Loads buildings, computes traversibility, exposes functionality for - rendering images. - - GridWorld: Base class which implements functionality for moving an agent on a - grid world. - - NavigationEnv: Base class which generates navigation problems on a grid world. - - VisualNavigationEnv: Builds upon NavigationEnv and Building to provide - interface that is used externally to train the agent. - - MeshMapper: Class used for distilling the model, testing the mapper. - - BuildingMultiplexer: Wrapper class that instantiates a VisualNavigationEnv for - each building and multiplexes between them as needed. -""" - -import numpy as np -import os -import re -import matplotlib.pyplot as plt - -import graph_tool as gt -import graph_tool.topology - -from tensorflow.python.platform import gfile -import logging -import src.file_utils as fu -import src.utils as utils -import src.graph_utils as gu -import src.map_utils as mu -import src.depth_utils as du -import render.swiftshader_renderer as sru -from render.swiftshader_renderer import SwiftshaderRenderer -import cv2 - -label_nodes_with_class = gu.label_nodes_with_class -label_nodes_with_class_geodesic = gu.label_nodes_with_class_geodesic -get_distance_node_list = gu.get_distance_node_list -convert_to_graph_tool = gu.convert_to_graph_tool -generate_graph = gu.generate_graph -get_hardness_distribution = gu.get_hardness_distribution -rng_next_goal_rejection_sampling = gu.rng_next_goal_rejection_sampling -rng_next_goal = gu.rng_next_goal -rng_room_to_room = gu.rng_room_to_room -rng_target_dist_field = gu.rng_target_dist_field - -compute_traversibility = mu.compute_traversibility -make_map = mu.make_map -resize_maps = mu.resize_maps -pick_largest_cc = mu.pick_largest_cc -get_graph_origin_loc = mu.get_graph_origin_loc -generate_egocentric_maps = mu.generate_egocentric_maps -generate_goal_images = mu.generate_goal_images -get_map_to_predict = mu.get_map_to_predict - -bin_points = du.bin_points -make_geocentric = du.make_geocentric -get_point_cloud_from_z = du.get_point_cloud_from_z -get_camera_matrix = du.get_camera_matrix - -def _get_semantic_maps(folder_name, building_name, map, flip): - # Load file from the cache. - file_name = '{:s}_{:d}_{:d}_{:d}_{:d}_{:d}_{:d}.pkl' - file_name = file_name.format(building_name, map.size[0], map.size[1], - map.origin[0], map.origin[1], map.resolution, - flip) - file_name = os.path.join(folder_name, file_name) - logging.info('Loading semantic maps from %s.', file_name) - - if fu.exists(file_name): - a = utils.load_variables(file_name) - maps = a['maps'] #HxWx#C - cats = a['cats'] - else: - logging.error('file_name: %s not found.', file_name) - maps = None - cats = None - return maps, cats - -def _select_classes(all_maps, all_cats, cats_to_use): - inds = [] - for c in cats_to_use: - ind = all_cats.index(c) - inds.append(ind) - out_maps = all_maps[:,:,inds] - return out_maps - -def _get_room_dimensions(file_name, resolution, origin, flip=False): - if fu.exists(file_name): - a = utils.load_variables(file_name)['room_dimension'] - names = a.keys() - dims = np.concatenate(a.values(), axis=0).reshape((-1,6)) - ind = np.argsort(names) - dims = dims[ind,:] - names = [names[x] for x in ind] - if flip: - dims_new = dims*1 - dims_new[:,1] = -dims[:,4] - dims_new[:,4] = -dims[:,1] - dims = dims_new*1 - - dims = dims*100. - dims[:,0] = dims[:,0] - origin[0] - dims[:,1] = dims[:,1] - origin[1] - dims[:,3] = dims[:,3] - origin[0] - dims[:,4] = dims[:,4] - origin[1] - dims = dims / resolution - out = {'names': names, 'dims': dims} - else: - out = None - return out - -def _filter_rooms(room_dims, room_regex): - pattern = re.compile(room_regex) - ind = [] - for i, name in enumerate(room_dims['names']): - if pattern.match(name): - ind.append(i) - new_room_dims = {} - new_room_dims['names'] = [room_dims['names'][i] for i in ind] - new_room_dims['dims'] = room_dims['dims'][ind,:]*1 - return new_room_dims - -def _label_nodes_with_room_id(xyt, room_dims): - # Label the room with the ID into things. - node_room_id = -1*np.ones((xyt.shape[0], 1)) - dims = room_dims['dims'] - for x, name in enumerate(room_dims['names']): - all_ = np.concatenate((xyt[:,[0]] >= dims[x,0], - xyt[:,[0]] <= dims[x,3], - xyt[:,[1]] >= dims[x,1], - xyt[:,[1]] <= dims[x,4]), axis=1) - node_room_id[np.all(all_, axis=1), 0] = x - return node_room_id - -def get_path_ids(start_node_id, end_node_id, pred_map): - id = start_node_id - path = [id] - while id != end_node_id: - id = pred_map[id] - path.append(id) - return path - -def image_pre(images, modalities): - # Assumes images are ...xHxWxC. - # We always assume images are RGB followed by Depth. - if 'depth' in modalities: - d = images[...,-1][...,np.newaxis]*1. - d[d < 0.01] = np.NaN; isnan = np.isnan(d); - d = 100./d; d[isnan] = 0.; - images = np.concatenate((images[...,:-1], d, isnan), axis=images.ndim-1) - if 'rgb' in modalities: - images[...,:3] = images[...,:3]*1. - 128 - return images - -def _get_relative_goal_loc(goal_loc, loc, theta): - r = np.sqrt(np.sum(np.square(goal_loc - loc), axis=1)) - t = np.arctan2(goal_loc[:,1] - loc[:,1], goal_loc[:,0] - loc[:,0]) - t = t-theta[:,0] + np.pi/2 - return np.expand_dims(r,axis=1), np.expand_dims(t, axis=1) - -def _gen_perturbs(rng, batch_size, num_steps, lr_flip, delta_angle, delta_xy, - structured): - perturbs = [] - for i in range(batch_size): - # Doing things one by one for each episode in this batch. This way this - # remains replicatable even when we change the batch size. - p = np.zeros((num_steps+1, 4)) - if lr_flip: - # Flip the whole trajectory. - p[:,3] = rng.rand(1)-0.5 - if delta_angle > 0: - if structured: - p[:,2] = (rng.rand(1)-0.5)* delta_angle - else: - p[:,2] = (rng.rand(p.shape[0])-0.5)* delta_angle - if delta_xy > 0: - if structured: - p[:,:2] = (rng.rand(1, 2)-0.5)*delta_xy - else: - p[:,:2] = (rng.rand(p.shape[0], 2)-0.5)*delta_xy - perturbs.append(p) - return perturbs - -def get_multiplexer_class(args, task_number): - assert(args.task_params.base_class == 'Building') - logging.info('Returning BuildingMultiplexer') - R = BuildingMultiplexer(args, task_number) - return R - -class GridWorld(): - def __init__(self): - """Class members that will be assigned by any class that actually uses this - class.""" - self.restrict_to_largest_cc = None - self.robot = None - self.env = None - self.category_list = None - self.traversible = None - - def get_loc_axis(self, node, delta_theta, perturb=None): - """Based on the node orientation returns X, and Y axis. Used to sample the - map in egocentric coordinate frame. - """ - if type(node) == tuple: - node = np.array([node]) - if perturb is None: - perturb = np.zeros((node.shape[0], 4)) - xyt = self.to_actual_xyt_vec(node) - x = xyt[:,[0]] + perturb[:,[0]] - y = xyt[:,[1]] + perturb[:,[1]] - t = xyt[:,[2]] + perturb[:,[2]] - theta = t*delta_theta - loc = np.concatenate((x,y), axis=1) - x_axis = np.concatenate((np.cos(theta), np.sin(theta)), axis=1) - y_axis = np.concatenate((np.cos(theta+np.pi/2.), np.sin(theta+np.pi/2.)), - axis=1) - # Flip the sampled map where need be. - y_axis[np.where(perturb[:,3] > 0)[0], :] *= -1. - return loc, x_axis, y_axis, theta - - def to_actual_xyt(self, pqr): - """Converts from node to location on the map.""" - (p, q, r) = pqr - if self.task.n_ori == 6: - out = (p - q * 0.5 + self.task.origin_loc[0], - q * np.sqrt(3.) / 2. + self.task.origin_loc[1], r) - elif self.task.n_ori == 4: - out = (p + self.task.origin_loc[0], - q + self.task.origin_loc[1], r) - return out - - def to_actual_xyt_vec(self, pqr): - """Converts from node array to location array on the map.""" - p = pqr[:,0][:, np.newaxis] - q = pqr[:,1][:, np.newaxis] - r = pqr[:,2][:, np.newaxis] - if self.task.n_ori == 6: - out = np.concatenate((p - q * 0.5 + self.task.origin_loc[0], - q * np.sqrt(3.) / 2. + self.task.origin_loc[1], - r), axis=1) - elif self.task.n_ori == 4: - out = np.concatenate((p + self.task.origin_loc[0], - q + self.task.origin_loc[1], - r), axis=1) - return out - - def raw_valid_fn_vec(self, xyt): - """Returns if the given set of nodes is valid or not.""" - height = self.traversible.shape[0] - width = self.traversible.shape[1] - x = np.round(xyt[:,[0]]).astype(np.int32) - y = np.round(xyt[:,[1]]).astype(np.int32) - is_inside = np.all(np.concatenate((x >= 0, y >= 0, - x < width, y < height), axis=1), axis=1) - x = np.minimum(np.maximum(x, 0), width-1) - y = np.minimum(np.maximum(y, 0), height-1) - ind = np.ravel_multi_index((y,x), self.traversible.shape) - is_traversible = self.traversible.ravel()[ind] - - is_valid = np.all(np.concatenate((is_inside[:,np.newaxis], is_traversible), - axis=1), axis=1) - return is_valid - - - def valid_fn_vec(self, pqr): - """Returns if the given set of nodes is valid or not.""" - xyt = self.to_actual_xyt_vec(np.array(pqr)) - height = self.traversible.shape[0] - width = self.traversible.shape[1] - x = np.round(xyt[:,[0]]).astype(np.int32) - y = np.round(xyt[:,[1]]).astype(np.int32) - is_inside = np.all(np.concatenate((x >= 0, y >= 0, - x < width, y < height), axis=1), axis=1) - x = np.minimum(np.maximum(x, 0), width-1) - y = np.minimum(np.maximum(y, 0), height-1) - ind = np.ravel_multi_index((y,x), self.traversible.shape) - is_traversible = self.traversible.ravel()[ind] - - is_valid = np.all(np.concatenate((is_inside[:,np.newaxis], is_traversible), - axis=1), axis=1) - return is_valid - - def get_feasible_actions(self, node_ids): - """Returns the feasible set of actions from the current node.""" - a = np.zeros((len(node_ids), self.task_params.num_actions), dtype=np.int32) - gtG = self.task.gtG - next_node = [] - for i, c in enumerate(node_ids): - neigh = gtG.vertex(c).out_neighbours() - neigh_edge = gtG.vertex(c).out_edges() - nn = {} - for n, e in zip(neigh, neigh_edge): - _ = gtG.ep['action'][e] - a[i,_] = 1 - nn[_] = int(n) - next_node.append(nn) - return a, next_node - - def take_action(self, current_node_ids, action): - """Returns the new node after taking the action action. Stays at the current - node if the action is invalid.""" - actions, next_node_ids = self.get_feasible_actions(current_node_ids) - new_node_ids = [] - for i, (c,a) in enumerate(zip(current_node_ids, action)): - if actions[i,a] == 1: - new_node_ids.append(next_node_ids[i][a]) - else: - new_node_ids.append(c) - return new_node_ids - - def set_r_obj(self, r_obj): - """Sets the SwiftshaderRenderer object used for rendering.""" - self.r_obj = r_obj - -class Building(GridWorld): - def __init__(self, building_name, robot, env, - category_list=None, small=False, flip=False, logdir=None, - building_loader=None): - - self.restrict_to_largest_cc = True - self.robot = robot - self.env = env - self.logdir = logdir - - # Load the building meta data. - building = building_loader.load_building(building_name) - if small: - building['mesh_names'] = building['mesh_names'][:5] - - # New code. - shapess = building_loader.load_building_meshes(building) - if flip: - for shapes in shapess: - shapes.flip_shape() - - vs = [] - for shapes in shapess: - vs.append(shapes.get_vertices()[0]) - vs = np.concatenate(vs, axis=0) - map = make_map(env.padding, env.resolution, vertex=vs, sc=100.) - map = compute_traversibility( - map, robot.base, robot.height, robot.radius, env.valid_min, - env.valid_max, env.num_point_threshold, shapess=shapess, sc=100., - n_samples_per_face=env.n_samples_per_face) - - room_dims = _get_room_dimensions(building['room_dimension_file'], - env.resolution, map.origin, flip=flip) - class_maps, class_map_names = _get_semantic_maps( - building['class_map_folder'], building_name, map, flip) - - self.class_maps = class_maps - self.class_map_names = class_map_names - self.building = building - self.shapess = shapess - self.map = map - self.traversible = map.traversible*1 - self.building_name = building_name - self.room_dims = room_dims - self.flipped = flip - self.renderer_entitiy_ids = [] - - if self.restrict_to_largest_cc: - self.traversible = pick_largest_cc(self.traversible) - - def load_building_into_scene(self): - # Loads the scene. - self.renderer_entitiy_ids += self.r_obj.load_shapes(self.shapess) - # Free up memory, we dont need the mesh or the materials anymore. - self.shapess = None - - def add_entity_at_nodes(self, nodes, height, shape): - xyt = self.to_actual_xyt_vec(nodes) - nxy = xyt[:,:2]*1. - nxy = nxy * self.map.resolution - nxy = nxy + self.map.origin - Ts = np.concatenate((nxy, nxy[:,:1]), axis=1) - Ts[:,2] = height; Ts = Ts / 100.; - - # Merge all the shapes into a single shape and add that shape. - shape.replicate_shape(Ts) - entity_ids = self.r_obj.load_shapes([shape]) - self.renderer_entitiy_ids += entity_ids - return entity_ids - - def add_shapes(self, shapes): - scene = self.r_obj.viz.scene() - for shape in shapes: - scene.AddShape(shape) - - def add_materials(self, materials): - scene = self.r_obj.viz.scene() - for material in materials: - scene.AddOrUpdateMaterial(material) - - def set_building_visibility(self, visibility): - self.r_obj.set_entity_visible(self.renderer_entitiy_ids, visibility) - - def render_nodes(self, nodes, perturb=None, aux_delta_theta=0.): - self.set_building_visibility(True) - if perturb is None: - perturb = np.zeros((len(nodes), 4)) - - imgs = [] - r = 2 - elevation_z = r * np.tan(np.deg2rad(self.robot.camera_elevation_degree)) - - for i in range(len(nodes)): - xyt = self.to_actual_xyt(nodes[i]) - lookat_theta = 3.0 * np.pi / 2.0 - (xyt[2]+perturb[i,2]+aux_delta_theta) * (self.task.delta_theta) - nxy = np.array([xyt[0]+perturb[i,0], xyt[1]+perturb[i,1]]).reshape(1, -1) - nxy = nxy * self.map.resolution - nxy = nxy + self.map.origin - camera_xyz = np.zeros((1, 3)) - camera_xyz[...] = [nxy[0, 0], nxy[0, 1], self.robot.sensor_height] - camera_xyz = camera_xyz / 100. - lookat_xyz = np.array([-r * np.sin(lookat_theta), - -r * np.cos(lookat_theta), elevation_z]) - lookat_xyz = lookat_xyz + camera_xyz[0, :] - self.r_obj.position_camera(camera_xyz[0, :].tolist(), - lookat_xyz.tolist(), [0.0, 0.0, 1.0]) - img = self.r_obj.render(take_screenshot=True, output_type=0) - img = [x for x in img if x is not None] - img = np.concatenate(img, axis=2).astype(np.float32) - if perturb[i,3]>0: - img = img[:,::-1,:] - imgs.append(img) - - self.set_building_visibility(False) - return imgs - - -class MeshMapper(Building): - def __init__(self, robot, env, task_params, building_name, category_list, - flip, logdir=None, building_loader=None): - Building.__init__(self, building_name, robot, env, category_list, - small=task_params.toy_problem, flip=flip, logdir=logdir, - building_loader=building_loader) - self.task_params = task_params - self.task = None - self._preprocess_for_task(self.task_params.building_seed) - - def _preprocess_for_task(self, seed): - if self.task is None or self.task.seed != seed: - rng = np.random.RandomState(seed) - origin_loc = get_graph_origin_loc(rng, self.traversible) - self.task = utils.Foo(seed=seed, origin_loc=origin_loc, - n_ori=self.task_params.n_ori) - G = generate_graph(self.valid_fn_vec, - self.task_params.step_size, self.task.n_ori, - (0, 0, 0)) - gtG, nodes, nodes_to_id = convert_to_graph_tool(G) - self.task.gtG = gtG - self.task.nodes = nodes - self.task.delta_theta = 2.0*np.pi/(self.task.n_ori*1.) - self.task.nodes_to_id = nodes_to_id - logging.info('Building %s, #V=%d, #E=%d', self.building_name, - self.task.nodes.shape[0], self.task.gtG.num_edges()) - - if self.logdir is not None: - write_traversible = cv2.applyColorMap(self.traversible.astype(np.uint8)*255, cv2.COLORMAP_JET) - img_path = os.path.join(self.logdir, - '{:s}_{:d}_graph.png'.format(self.building_name, - seed)) - node_xyt = self.to_actual_xyt_vec(self.task.nodes) - plt.set_cmap('jet'); - fig, ax = utils.subplot(plt, (1,1), (12,12)) - ax.plot(node_xyt[:,0], node_xyt[:,1], 'm.') - ax.imshow(self.traversible, origin='lower'); - ax.set_axis_off(); ax.axis('equal'); - ax.set_title('{:s}, {:d}, {:d}'.format(self.building_name, - self.task.nodes.shape[0], - self.task.gtG.num_edges())) - if self.room_dims is not None: - for i, r in enumerate(self.room_dims['dims']*1): - min_ = r[:3]*1 - max_ = r[3:]*1 - xmin, ymin, zmin = min_ - xmax, ymax, zmax = max_ - - ax.plot([xmin, xmax, xmax, xmin, xmin], - [ymin, ymin, ymax, ymax, ymin], 'g') - with fu.fopen(img_path, 'w') as f: - fig.savefig(f, bbox_inches='tight', transparent=True, pad_inches=0) - plt.close(fig) - - - def _gen_rng(self, rng): - # instances is a list of list of node_ids. - if self.task_params.move_type == 'circle': - _, _, _, _, paths = rng_target_dist_field(self.task_params.batch_size, - self.task.gtG, rng, 0, 1, - compute_path=True) - instances_ = paths - - instances = [] - for instance_ in instances_: - instance = instance_ - for i in range(self.task_params.num_steps): - instance.append(self.take_action([instance[-1]], [1])[0]) - instances.append(instance) - - elif self.task_params.move_type == 'shortest_path': - _, _, _, _, paths = rng_target_dist_field(self.task_params.batch_size, - self.task.gtG, rng, - self.task_params.num_steps, - self.task_params.num_steps+1, - compute_path=True) - instances = paths - - elif self.task_params.move_type == 'circle+forward': - _, _, _, _, paths = rng_target_dist_field(self.task_params.batch_size, - self.task.gtG, rng, 0, 1, - compute_path=True) - instances_ = paths - instances = [] - for instance_ in instances_: - instance = instance_ - for i in range(self.task_params.n_ori-1): - instance.append(self.take_action([instance[-1]], [1])[0]) - while len(instance) <= self.task_params.num_steps: - while self.take_action([instance[-1]], [3])[0] == instance[-1] and len(instance) <= self.task_params.num_steps: - instance.append(self.take_action([instance[-1]], [2])[0]) - if len(instance) <= self.task_params.num_steps: - instance.append(self.take_action([instance[-1]], [3])[0]) - instances.append(instance) - - # Do random perturbation if needed. - perturbs = _gen_perturbs(rng, self.task_params.batch_size, - self.task_params.num_steps, - self.task_params.data_augment.lr_flip, - self.task_params.data_augment.delta_angle, - self.task_params.data_augment.delta_xy, - self.task_params.data_augment.structured) - return instances, perturbs - - def worker(self, instances, perturbs): - # Output the images and the free space. - - # Make the instances be all the same length. - for i in range(len(instances)): - for j in range(self.task_params.num_steps - len(instances[i]) + 1): - instances[i].append(instances[i][-1]) - if perturbs[i].shape[0] < self.task_params.num_steps+1: - p = np.zeros((self.task_params.num_steps+1, 4)) - p[:perturbs[i].shape[0], :] = perturbs[i] - p[perturbs[i].shape[0]:, :] = perturbs[i][-1,:] - perturbs[i] = p - - instances_ = [] - for instance in instances: - instances_ = instances_ + instance - perturbs_ = np.concatenate(perturbs, axis=0) - - instances_nodes = self.task.nodes[instances_,:] - instances_nodes = [tuple(x) for x in instances_nodes] - - imgs_ = self.render_nodes(instances_nodes, perturbs_) - imgs = []; next = 0; - for instance in instances: - img_i = [] - for _ in instance: - img_i.append(imgs_[next]) - next = next+1 - imgs.append(img_i) - imgs = np.array(imgs) - - # Render out the maps in the egocentric view for all nodes and not just the - # last node. - all_nodes = [] - for x in instances: - all_nodes = all_nodes + x - all_perturbs = np.concatenate(perturbs, axis=0) - loc, x_axis, y_axis, theta = self.get_loc_axis( - self.task.nodes[all_nodes, :]*1, delta_theta=self.task.delta_theta, - perturb=all_perturbs) - fss = None - valids = None - loc_on_map = None - theta_on_map = None - cum_fs = None - cum_valid = None - incremental_locs = None - incremental_thetas = None - - if self.task_params.output_free_space: - fss, valids = get_map_to_predict(loc, x_axis, y_axis, - map=self.traversible*1., - map_size=self.task_params.map_size) - fss = np.array(fss) > 0.5 - fss = np.reshape(fss, [self.task_params.batch_size, - self.task_params.num_steps+1, - self.task_params.map_size, - self.task_params.map_size]) - valids = np.reshape(np.array(valids), fss.shape) - - if self.task_params.output_transform_to_global_map: - # Output the transform to the global map. - loc_on_map = np.reshape(loc*1, [self.task_params.batch_size, - self.task_params.num_steps+1, -1]) - # Converting to location wrt to first location so that warping happens - # properly. - theta_on_map = np.reshape(theta*1, [self.task_params.batch_size, - self.task_params.num_steps+1, -1]) - - if self.task_params.output_incremental_transform: - # Output the transform to the global map. - incremental_locs_ = np.reshape(loc*1, [self.task_params.batch_size, - self.task_params.num_steps+1, -1]) - incremental_locs_[:,1:,:] -= incremental_locs_[:,:-1,:] - t0 = -np.pi/2+np.reshape(theta*1, [self.task_params.batch_size, - self.task_params.num_steps+1, -1]) - t = t0*1 - incremental_locs = incremental_locs_*1 - incremental_locs[:,:,0] = np.sum(incremental_locs_ * np.concatenate((np.cos(t), np.sin(t)), axis=-1), axis=-1) - incremental_locs[:,:,1] = np.sum(incremental_locs_ * np.concatenate((np.cos(t+np.pi/2), np.sin(t+np.pi/2)), axis=-1), axis=-1) - incremental_locs[:,0,:] = incremental_locs_[:,0,:] - # print incremental_locs_[0,:,:], incremental_locs[0,:,:], t0[0,:,:] - - incremental_thetas = np.reshape(theta*1, [self.task_params.batch_size, - self.task_params.num_steps+1, - -1]) - incremental_thetas[:,1:,:] += -incremental_thetas[:,:-1,:] - - if self.task_params.output_canonical_map: - loc_ = loc[0::(self.task_params.num_steps+1), :] - x_axis = np.zeros_like(loc_); x_axis[:,1] = 1 - y_axis = np.zeros_like(loc_); y_axis[:,0] = -1 - cum_fs, cum_valid = get_map_to_predict(loc_, x_axis, y_axis, - map=self.traversible*1., - map_size=self.task_params.map_size) - cum_fs = np.array(cum_fs) > 0.5 - cum_fs = np.reshape(cum_fs, [self.task_params.batch_size, 1, - self.task_params.map_size, - self.task_params.map_size]) - cum_valid = np.reshape(np.array(cum_valid), cum_fs.shape) - - - inputs = {'fs_maps': fss, - 'valid_maps': valids, - 'imgs': imgs, - 'loc_on_map': loc_on_map, - 'theta_on_map': theta_on_map, - 'cum_fs_maps': cum_fs, - 'cum_valid_maps': cum_valid, - 'incremental_thetas': incremental_thetas, - 'incremental_locs': incremental_locs} - return inputs - - def pre(self, inputs): - inputs['imgs'] = image_pre(inputs['imgs'], self.task_params.modalities) - if inputs['loc_on_map'] is not None: - inputs['loc_on_map'] = inputs['loc_on_map'] - inputs['loc_on_map'][:,[0],:] - if inputs['theta_on_map'] is not None: - inputs['theta_on_map'] = np.pi/2. - inputs['theta_on_map'] - return inputs - -def _nav_env_reset_helper(type, rng, nodes, batch_size, gtG, max_dist, - num_steps, num_goals, data_augment, **kwargs): - """Generates and returns a new episode.""" - max_compute = max_dist + 4*num_steps - if type == 'general': - start_node_ids, end_node_ids, dist, pred_map, paths = \ - rng_target_dist_field(batch_size, gtG, rng, max_dist, max_compute, - nodes=nodes, compute_path=False) - target_class = None - - elif type == 'room_to_room_many': - goal_node_ids = []; dists = []; - node_room_ids = kwargs['node_room_ids'] - # Sample the first one - start_node_ids_, end_node_ids_, dist_, _, _ = rng_room_to_room( - batch_size, gtG, rng, max_dist, max_compute, - node_room_ids=node_room_ids, nodes=nodes) - start_node_ids = start_node_ids_ - goal_node_ids.append(end_node_ids_) - dists.append(dist_) - for n in range(num_goals-1): - start_node_ids_, end_node_ids_, dist_, _, _ = rng_next_goal( - goal_node_ids[n], batch_size, gtG, rng, max_dist, - max_compute, node_room_ids=node_room_ids, nodes=nodes, - dists_from_start_node=dists[n]) - goal_node_ids.append(end_node_ids_) - dists.append(dist_) - target_class = None - - elif type == 'rng_rejection_sampling_many': - num_goals = num_goals - goal_node_ids = []; dists = []; - - n_ori = kwargs['n_ori'] - step_size = kwargs['step_size'] - min_dist = kwargs['min_dist'] - sampling_distribution = kwargs['sampling_distribution'] - target_distribution = kwargs['target_distribution'] - rejection_sampling_M = kwargs['rejection_sampling_M'] - distribution_bins = kwargs['distribution_bins'] - - for n in range(num_goals): - if n == 0: input_nodes = None - else: input_nodes = goal_node_ids[n-1] - start_node_ids_, end_node_ids_, dist_, _, _, _, _ = rng_next_goal_rejection_sampling( - input_nodes, batch_size, gtG, rng, max_dist, min_dist, - max_compute, sampling_distribution, target_distribution, nodes, - n_ori, step_size, distribution_bins, rejection_sampling_M) - if n == 0: start_node_ids = start_node_ids_ - goal_node_ids.append(end_node_ids_) - dists.append(dist_) - target_class = None - - elif type == 'room_to_room_back': - num_goals = num_goals - assert(num_goals == 2), 'num_goals must be 2.' - goal_node_ids = []; dists = []; - node_room_ids = kwargs['node_room_ids'] - # Sample the first one. - start_node_ids_, end_node_ids_, dist_, _, _ = rng_room_to_room( - batch_size, gtG, rng, max_dist, max_compute, - node_room_ids=node_room_ids, nodes=nodes) - start_node_ids = start_node_ids_ - goal_node_ids.append(end_node_ids_) - dists.append(dist_) - - # Set second goal to be starting position, and compute distance to the start node. - goal_node_ids.append(start_node_ids) - dist = [] - for i in range(batch_size): - dist_ = gt.topology.shortest_distance( - gt.GraphView(gtG, reversed=True), - source=gtG.vertex(start_node_ids[i]), target=None) - dist_ = np.array(dist_.get_array()) - dist.append(dist_) - dists.append(dist) - target_class = None - - elif type[:14] == 'to_nearest_obj': - # Generate an episode by sampling one of the target classes (with - # probability proportional to the number of nodes in the world). - # With the sampled class sample a node that is within some distance from - # the sampled class. - class_nodes = kwargs['class_nodes'] - sampling = kwargs['sampling'] - dist_to_class = kwargs['dist_to_class'] - - assert(num_goals == 1), 'Only supports a single goal.' - ind = rng.choice(class_nodes.shape[0], size=batch_size) - target_class = class_nodes[ind,1] - start_node_ids = []; dists = []; goal_node_ids = []; - - for t in target_class: - if sampling == 'uniform': - max_dist = max_dist - cnts = np.bincount(dist_to_class[t], minlength=max_dist+1)*1. - cnts[max_dist+1:] = 0 - p_each = 1./ cnts / (max_dist+1.) - p_each[cnts == 0] = 0 - p = p_each[dist_to_class[t]]*1.; p = p/np.sum(p) - start_node_id = rng.choice(p.shape[0], size=1, p=p)[0] - else: - logging.fatal('Sampling not one of uniform.') - start_node_ids.append(start_node_id) - dists.append(dist_to_class[t]) - # Dummy goal node, same as the start node, so that vis is better. - goal_node_ids.append(start_node_id) - dists = [dists] - goal_node_ids = [goal_node_ids] - - return start_node_ids, goal_node_ids, dists, target_class - - -class NavigationEnv(GridWorld, Building): - """Wrapper around GridWorld which sets up navigation tasks. - """ - def _debug_save_hardness(self, seed): - out_path = os.path.join(self.logdir, '{:s}_{:d}_hardness.png'.format(self.building_name, seed)) - batch_size = 4000 - rng = np.random.RandomState(0) - start_node_ids, end_node_ids, dists, pred_maps, paths, hardnesss, gt_dists = \ - rng_next_goal_rejection_sampling( - None, batch_size, self.task.gtG, rng, self.task_params.max_dist, - self.task_params.min_dist, self.task_params.max_dist, - self.task.sampling_distribution, self.task.target_distribution, - self.task.nodes, self.task_params.n_ori, self.task_params.step_size, - self.task.distribution_bins, self.task.rejection_sampling_M) - bins = self.task.distribution_bins - n_bins = self.task.n_bins - with plt.style.context('ggplot'): - fig, axes = utils.subplot(plt, (1,2), (10,10)) - ax = axes[0] - _ = ax.hist(hardnesss, bins=bins, weights=np.ones_like(hardnesss)/len(hardnesss)) - ax.plot(bins[:-1]+0.5/n_bins, self.task.target_distribution, 'g') - ax.plot(bins[:-1]+0.5/n_bins, self.task.sampling_distribution, 'b') - ax.grid('on') - - ax = axes[1] - _ = ax.hist(gt_dists, bins=np.arange(self.task_params.max_dist+1)) - ax.grid('on') - ax.set_title('Mean: {:0.2f}, Median: {:0.2f}'.format(np.mean(gt_dists), - np.median(gt_dists))) - with fu.fopen(out_path, 'w') as f: - fig.savefig(f, bbox_inches='tight', transparent=True, pad_inches=0) - - def _debug_save_map_nodes(self, seed): - """Saves traversible space along with nodes generated on the graph. Takes - the seed as input.""" - img_path = os.path.join(self.logdir, '{:s}_{:d}_graph.png'.format(self.building_name, seed)) - node_xyt = self.to_actual_xyt_vec(self.task.nodes) - plt.set_cmap('jet'); - fig, ax = utils.subplot(plt, (1,1), (12,12)) - ax.plot(node_xyt[:,0], node_xyt[:,1], 'm.') - ax.set_axis_off(); ax.axis('equal'); - - if self.room_dims is not None: - for i, r in enumerate(self.room_dims['dims']*1): - min_ = r[:3]*1 - max_ = r[3:]*1 - xmin, ymin, zmin = min_ - xmax, ymax, zmax = max_ - - ax.plot([xmin, xmax, xmax, xmin, xmin], - [ymin, ymin, ymax, ymax, ymin], 'g') - ax.imshow(self.traversible, origin='lower'); - with fu.fopen(img_path, 'w') as f: - fig.savefig(f, bbox_inches='tight', transparent=True, pad_inches=0) - - def _debug_semantic_maps(self, seed): - """Saves traversible space along with nodes generated on the graph. Takes - the seed as input.""" - for i, cls in enumerate(self.task_params.semantic_task.class_map_names): - img_path = os.path.join(self.logdir, '{:s}_flip{:d}_{:s}_graph.png'.format(self.building_name, seed, cls)) - maps = self.traversible*1. - maps += 0.5*(self.task.class_maps_dilated[:,:,i]) - write_traversible = (maps*1.+1.)/3.0 - write_traversible = (write_traversible*255.).astype(np.uint8)[:,:,np.newaxis] - write_traversible = write_traversible + np.zeros((1,1,3), dtype=np.uint8) - fu.write_image(img_path, write_traversible[::-1,:,:]) - - def _preprocess_for_task(self, seed): - """Sets up the task field for doing navigation on the grid world.""" - if self.task is None or self.task.seed != seed: - rng = np.random.RandomState(seed) - origin_loc = get_graph_origin_loc(rng, self.traversible) - self.task = utils.Foo(seed=seed, origin_loc=origin_loc, - n_ori=self.task_params.n_ori) - G = generate_graph(self.valid_fn_vec, self.task_params.step_size, - self.task.n_ori, (0, 0, 0)) - gtG, nodes, nodes_to_id = convert_to_graph_tool(G) - self.task.gtG = gtG - self.task.nodes = nodes - self.task.delta_theta = 2.0*np.pi/(self.task.n_ori*1.) - self.task.nodes_to_id = nodes_to_id - - logging.info('Building %s, #V=%d, #E=%d', self.building_name, - self.task.nodes.shape[0], self.task.gtG.num_edges()) - type = self.task_params.type - if type == 'general': - # Do nothing - _ = None - - elif type == 'room_to_room_many' or type == 'room_to_room_back': - if type == 'room_to_room_back': - assert(self.task_params.num_goals == 2), 'num_goals must be 2.' - - self.room_dims = _filter_rooms(self.room_dims, self.task_params.room_regex) - xyt = self.to_actual_xyt_vec(self.task.nodes) - self.task.node_room_ids = _label_nodes_with_room_id(xyt, self.room_dims) - self.task.reset_kwargs = {'node_room_ids': self.task.node_room_ids} - - elif type == 'rng_rejection_sampling_many': - n_bins = 20 - rejection_sampling_M = self.task_params.rejection_sampling_M - min_dist = self.task_params.min_dist - bins = np.arange(n_bins+1)/(n_bins*1.) - target_d = np.zeros(n_bins); target_d[...] = 1./n_bins; - - sampling_d = get_hardness_distribution( - self.task.gtG, self.task_params.max_dist, self.task_params.min_dist, - np.random.RandomState(0), 4000, bins, self.task.nodes, - self.task_params.n_ori, self.task_params.step_size) - - self.task.reset_kwargs = {'distribution_bins': bins, - 'target_distribution': target_d, - 'sampling_distribution': sampling_d, - 'rejection_sampling_M': rejection_sampling_M, - 'n_bins': n_bins, - 'n_ori': self.task_params.n_ori, - 'step_size': self.task_params.step_size, - 'min_dist': self.task_params.min_dist} - self.task.n_bins = n_bins - self.task.distribution_bins = bins - self.task.target_distribution = target_d - self.task.sampling_distribution = sampling_d - self.task.rejection_sampling_M = rejection_sampling_M - - if self.logdir is not None: - self._debug_save_hardness(seed) - - elif type[:14] == 'to_nearest_obj': - self.room_dims = _filter_rooms(self.room_dims, self.task_params.room_regex) - xyt = self.to_actual_xyt_vec(self.task.nodes) - - self.class_maps = _select_classes(self.class_maps, - self.class_map_names, - self.task_params.semantic_task.class_map_names)*1 - self.class_map_names = self.task_params.semantic_task.class_map_names - nodes_xyt = self.to_actual_xyt_vec(np.array(self.task.nodes)) - - tt = utils.Timer(); tt.tic(); - if self.task_params.type == 'to_nearest_obj_acc': - self.task.class_maps_dilated, self.task.node_class_label = label_nodes_with_class_geodesic( - nodes_xyt, self.class_maps, - self.task_params.semantic_task.pix_distance+8, self.map.traversible, - ff_cost=1., fo_cost=1., oo_cost=4., connectivity=8.) - - dists = [] - for i in range(len(self.class_map_names)): - class_nodes_ = np.where(self.task.node_class_label[:,i])[0] - dists.append(get_distance_node_list(gtG, source_nodes=class_nodes_, direction='to')) - self.task.dist_to_class = dists - a_, b_ = np.where(self.task.node_class_label) - self.task.class_nodes = np.concatenate((a_[:,np.newaxis], b_[:,np.newaxis]), axis=1) - - if self.logdir is not None: - self._debug_semantic_maps(seed) - - self.task.reset_kwargs = {'sampling': self.task_params.semantic_task.sampling, - 'class_nodes': self.task.class_nodes, - 'dist_to_class': self.task.dist_to_class} - - if self.logdir is not None: - self._debug_save_map_nodes(seed) - - def reset(self, rngs): - rng = rngs[0]; rng_perturb = rngs[1]; - nodes = self.task.nodes - tp = self.task_params - - start_node_ids, goal_node_ids, dists, target_class = \ - _nav_env_reset_helper(tp.type, rng, self.task.nodes, tp.batch_size, - self.task.gtG, tp.max_dist, tp.num_steps, - tp.num_goals, tp.data_augment, - **(self.task.reset_kwargs)) - - start_nodes = [tuple(nodes[_,:]) for _ in start_node_ids] - goal_nodes = [[tuple(nodes[_,:]) for _ in __] for __ in goal_node_ids] - data_augment = tp.data_augment - perturbs = _gen_perturbs(rng_perturb, tp.batch_size, - (tp.num_steps+1)*tp.num_goals, - data_augment.lr_flip, data_augment.delta_angle, - data_augment.delta_xy, data_augment.structured) - perturbs = np.array(perturbs) # batch x steps x 4 - end_perturbs = perturbs[:,-(tp.num_goals):,:]*1 # fixed perturb for the goal. - perturbs = perturbs[:,:-(tp.num_goals),:]*1 - - history = -np.ones((tp.batch_size, tp.num_steps*tp.num_goals), dtype=np.int32) - self.episode = utils.Foo( - start_nodes=start_nodes, start_node_ids=start_node_ids, - goal_nodes=goal_nodes, goal_node_ids=goal_node_ids, dist_to_goal=dists, - perturbs=perturbs, goal_perturbs=end_perturbs, history=history, - target_class=target_class, history_frames=[]) - return start_node_ids - - def take_action(self, current_node_ids, action, step_number): - """In addition to returning the action, also returns the reward that the - agent receives.""" - goal_number = step_number / self.task_params.num_steps - new_node_ids = GridWorld.take_action(self, current_node_ids, action) - rewards = [] - for i, n in enumerate(new_node_ids): - reward = 0 - if n == self.episode.goal_node_ids[goal_number][i]: - reward = self.task_params.reward_at_goal - reward = reward - self.task_params.reward_time_penalty - rewards.append(reward) - return new_node_ids, rewards - - - def get_optimal_action(self, current_node_ids, step_number): - """Returns the optimal action from the current node.""" - goal_number = step_number / self.task_params.num_steps - gtG = self.task.gtG - a = np.zeros((len(current_node_ids), self.task_params.num_actions), dtype=np.int32) - d_dict = self.episode.dist_to_goal[goal_number] - for i, c in enumerate(current_node_ids): - neigh = gtG.vertex(c).out_neighbours() - neigh_edge = gtG.vertex(c).out_edges() - ds = np.array([d_dict[i][int(x)] for x in neigh]) - ds_min = np.min(ds) - for i_, e in enumerate(neigh_edge): - if ds[i_] == ds_min: - _ = gtG.ep['action'][e] - a[i, _] = 1 - return a - - def get_targets(self, current_node_ids, step_number): - """Returns the target actions from the current node.""" - action = self.get_optimal_action(current_node_ids, step_number) - action = np.expand_dims(action, axis=1) - return vars(utils.Foo(action=action)) - - def get_targets_name(self): - """Returns the list of names of the targets.""" - return ['action'] - - def cleanup(self): - self.episode = None - -class VisualNavigationEnv(NavigationEnv): - """Class for doing visual navigation in environments. Functions for computing - features on states, etc. - """ - def __init__(self, robot, env, task_params, category_list=None, - building_name=None, flip=False, logdir=None, - building_loader=None, r_obj=None): - tt = utils.Timer() - tt.tic() - Building.__init__(self, building_name, robot, env, category_list, - small=task_params.toy_problem, flip=flip, logdir=logdir, - building_loader=building_loader) - - self.set_r_obj(r_obj) - self.task_params = task_params - self.task = None - self.episode = None - self._preprocess_for_task(self.task_params.building_seed) - if hasattr(self.task_params, 'map_scales'): - self.task.scaled_maps = resize_maps( - self.traversible.astype(np.float32)*1, self.task_params.map_scales, - self.task_params.map_resize_method) - else: - logging.fatal('VisualNavigationEnv does not support scale_f anymore.') - self.task.readout_maps_scaled = resize_maps( - self.traversible.astype(np.float32)*1, - self.task_params.readout_maps_scales, - self.task_params.map_resize_method) - tt.toc(log_at=1, log_str='VisualNavigationEnv __init__: ') - - def get_weight(self): - return self.task.nodes.shape[0] - - def get_common_data(self): - goal_nodes = self.episode.goal_nodes - start_nodes = self.episode.start_nodes - perturbs = self.episode.perturbs - goal_perturbs = self.episode.goal_perturbs - target_class = self.episode.target_class - - goal_locs = []; rel_goal_locs = []; - for i in range(len(goal_nodes)): - end_nodes = goal_nodes[i] - goal_loc, _, _, goal_theta = self.get_loc_axis( - np.array(end_nodes), delta_theta=self.task.delta_theta, - perturb=goal_perturbs[:,i,:]) - - # Compute the relative location to all goals from the starting location. - loc, _, _, theta = self.get_loc_axis(np.array(start_nodes), - delta_theta=self.task.delta_theta, - perturb=perturbs[:,0,:]) - r_goal, t_goal = _get_relative_goal_loc(goal_loc*1., loc, theta) - rel_goal_loc = np.concatenate((r_goal*np.cos(t_goal), r_goal*np.sin(t_goal), - np.cos(goal_theta-theta), - np.sin(goal_theta-theta)), axis=1) - rel_goal_locs.append(np.expand_dims(rel_goal_loc, axis=1)) - goal_locs.append(np.expand_dims(goal_loc, axis=1)) - - map = self.traversible*1. - maps = np.repeat(np.expand_dims(np.expand_dims(map, axis=0), axis=0), - self.task_params.batch_size, axis=0)*1 - if self.task_params.type[:14] == 'to_nearest_obj': - for i in range(self.task_params.batch_size): - maps[i,0,:,:] += 0.5*(self.task.class_maps_dilated[:,:,target_class[i]]) - - rel_goal_locs = np.concatenate(rel_goal_locs, axis=1) - goal_locs = np.concatenate(goal_locs, axis=1) - maps = np.expand_dims(maps, axis=-1) - - if self.task_params.type[:14] == 'to_nearest_obj': - rel_goal_locs = np.zeros((self.task_params.batch_size, 1, - len(self.task_params.semantic_task.class_map_names)), - dtype=np.float32) - goal_locs = np.zeros((self.task_params.batch_size, 1, 2), - dtype=np.float32) - for i in range(self.task_params.batch_size): - t = target_class[i] - rel_goal_locs[i,0,t] = 1. - goal_locs[i,0,0] = t - goal_locs[i,0,1] = np.NaN - - return vars(utils.Foo(orig_maps=maps, goal_loc=goal_locs, - rel_goal_loc_at_start=rel_goal_locs)) - - def pre_common_data(self, inputs): - return inputs - - - def get_features(self, current_node_ids, step_number): - task_params = self.task_params - goal_number = step_number / self.task_params.num_steps - end_nodes = self.task.nodes[self.episode.goal_node_ids[goal_number],:]*1 - current_nodes = self.task.nodes[current_node_ids,:]*1 - end_perturbs = self.episode.goal_perturbs[:,goal_number,:][:,np.newaxis,:] - perturbs = self.episode.perturbs - target_class = self.episode.target_class - - # Append to history. - self.episode.history[:,step_number] = np.array(current_node_ids) - - # Render out the images from current node. - outs = {} - - if self.task_params.outputs.images: - imgs_all = [] - imgs = self.render_nodes([tuple(x) for x in current_nodes], - perturb=perturbs[:,step_number,:]) - imgs_all.append(imgs) - aux_delta_thetas = self.task_params.aux_delta_thetas - for i in range(len(aux_delta_thetas)): - imgs = self.render_nodes([tuple(x) for x in current_nodes], - perturb=perturbs[:,step_number,:], - aux_delta_theta=aux_delta_thetas[i]) - imgs_all.append(imgs) - imgs_all = np.array(imgs_all) # A x B x H x W x C - imgs_all = np.transpose(imgs_all, axes=[1,0,2,3,4]) - imgs_all = np.expand_dims(imgs_all, axis=1) # B x N x A x H x W x C - if task_params.num_history_frames > 0: - if step_number == 0: - # Append the same frame 4 times - for i in range(task_params.num_history_frames+1): - self.episode.history_frames.insert(0, imgs_all*1.) - self.episode.history_frames.insert(0, imgs_all) - self.episode.history_frames.pop() - imgs_all_with_history = np.concatenate(self.episode.history_frames, axis=2) - else: - imgs_all_with_history = imgs_all - outs['imgs'] = imgs_all_with_history # B x N x A x H x W x C - - if self.task_params.outputs.node_ids: - outs['node_ids'] = np.array(current_node_ids).reshape((-1,1,1)) - outs['perturbs'] = np.expand_dims(perturbs[:,step_number, :]*1., axis=1) - - if self.task_params.outputs.analytical_counts: - assert(self.task_params.modalities == ['depth']) - d = image_pre(outs['imgs']*1., self.task_params.modalities) - cm = get_camera_matrix(self.task_params.img_width, - self.task_params.img_height, - self.task_params.img_fov) - XYZ = get_point_cloud_from_z(100./d[...,0], cm) - XYZ = make_geocentric(XYZ*100., self.robot.sensor_height, - self.robot.camera_elevation_degree) - for i in range(len(self.task_params.analytical_counts.map_sizes)): - non_linearity = self.task_params.analytical_counts.non_linearity[i] - count, isvalid = bin_points(XYZ*1., - map_size=self.task_params.analytical_counts.map_sizes[i], - xy_resolution=self.task_params.analytical_counts.xy_resolution[i], - z_bins=self.task_params.analytical_counts.z_bins[i]) - assert(count.shape[2] == 1), 'only works for n_views equal to 1.' - count = count[:,:,0,:,:,:] - isvalid = isvalid[:,:,0,:,:,:] - if non_linearity == 'none': - None - elif non_linearity == 'min10': - count = np.minimum(count, 10.) - elif non_linearity == 'sqrt': - count = np.sqrt(count) - else: - logging.fatal('Undefined non_linearity.') - outs['analytical_counts_{:d}'.format(i)] = count - - # Compute the goal location in the cordinate frame of the robot. - if self.task_params.outputs.rel_goal_loc: - if self.task_params.type[:14] != 'to_nearest_obj': - loc, _, _, theta = self.get_loc_axis(current_nodes, - delta_theta=self.task.delta_theta, - perturb=perturbs[:,step_number,:]) - goal_loc, _, _, goal_theta = self.get_loc_axis(end_nodes, - delta_theta=self.task.delta_theta, - perturb=end_perturbs[:,0,:]) - r_goal, t_goal = _get_relative_goal_loc(goal_loc, loc, theta) - - rel_goal_loc = np.concatenate((r_goal*np.cos(t_goal), r_goal*np.sin(t_goal), - np.cos(goal_theta-theta), - np.sin(goal_theta-theta)), axis=1) - outs['rel_goal_loc'] = np.expand_dims(rel_goal_loc, axis=1) - elif self.task_params.type[:14] == 'to_nearest_obj': - rel_goal_loc = np.zeros((self.task_params.batch_size, 1, - len(self.task_params.semantic_task.class_map_names)), - dtype=np.float32) - for i in range(self.task_params.batch_size): - t = target_class[i] - rel_goal_loc[i,0,t] = 1. - outs['rel_goal_loc'] = rel_goal_loc - - # Location on map to plot the trajectory during validation. - if self.task_params.outputs.loc_on_map: - loc, x_axis, y_axis, theta = self.get_loc_axis(current_nodes, - delta_theta=self.task.delta_theta, - perturb=perturbs[:,step_number,:]) - outs['loc_on_map'] = np.expand_dims(loc, axis=1) - - # Compute gt_dist to goal - if self.task_params.outputs.gt_dist_to_goal: - gt_dist_to_goal = np.zeros((len(current_node_ids), 1), dtype=np.float32) - for i, n in enumerate(current_node_ids): - gt_dist_to_goal[i,0] = self.episode.dist_to_goal[goal_number][i][n] - outs['gt_dist_to_goal'] = np.expand_dims(gt_dist_to_goal, axis=1) - - # Free space in front of you, map and goal as images. - if self.task_params.outputs.ego_maps: - loc, x_axis, y_axis, theta = self.get_loc_axis(current_nodes, - delta_theta=self.task.delta_theta, - perturb=perturbs[:,step_number,:]) - maps = generate_egocentric_maps(self.task.scaled_maps, - self.task_params.map_scales, - self.task_params.map_crop_sizes, loc, - x_axis, y_axis, theta) - - for i in range(len(self.task_params.map_scales)): - outs['ego_maps_{:d}'.format(i)] = \ - np.expand_dims(np.expand_dims(maps[i], axis=1), axis=-1) - - if self.task_params.outputs.readout_maps: - loc, x_axis, y_axis, theta = self.get_loc_axis(current_nodes, - delta_theta=self.task.delta_theta, - perturb=perturbs[:,step_number,:]) - maps = generate_egocentric_maps(self.task.readout_maps_scaled, - self.task_params.readout_maps_scales, - self.task_params.readout_maps_crop_sizes, - loc, x_axis, y_axis, theta) - for i in range(len(self.task_params.readout_maps_scales)): - outs['readout_maps_{:d}'.format(i)] = \ - np.expand_dims(np.expand_dims(maps[i], axis=1), axis=-1) - - # Images for the goal. - if self.task_params.outputs.ego_goal_imgs: - if self.task_params.type[:14] != 'to_nearest_obj': - loc, x_axis, y_axis, theta = self.get_loc_axis(current_nodes, - delta_theta=self.task.delta_theta, - perturb=perturbs[:,step_number,:]) - goal_loc, _, _, _ = self.get_loc_axis(end_nodes, - delta_theta=self.task.delta_theta, - perturb=end_perturbs[:,0,:]) - rel_goal_orientation = np.mod( - np.int32(current_nodes[:,2:] - end_nodes[:,2:]), self.task_params.n_ori) - goal_dist, goal_theta = _get_relative_goal_loc(goal_loc, loc, theta) - goals = generate_goal_images(self.task_params.map_scales, - self.task_params.map_crop_sizes, - self.task_params.n_ori, goal_dist, - goal_theta, rel_goal_orientation) - for i in range(len(self.task_params.map_scales)): - outs['ego_goal_imgs_{:d}'.format(i)] = np.expand_dims(goals[i], axis=1) - - elif self.task_params.type[:14] == 'to_nearest_obj': - for i in range(len(self.task_params.map_scales)): - num_classes = len(self.task_params.semantic_task.class_map_names) - outs['ego_goal_imgs_{:d}'.format(i)] = np.zeros((self.task_params.batch_size, 1, - self.task_params.map_crop_sizes[i], - self.task_params.map_crop_sizes[i], - self.task_params.goal_channels)) - for i in range(self.task_params.batch_size): - t = target_class[i] - for j in range(len(self.task_params.map_scales)): - outs['ego_goal_imgs_{:d}'.format(j)][i,:,:,:,t] = 1. - - # Incremental locs and theta (for map warping), always in the original scale - # of the map, the subequent steps in the tf code scale appropriately. - # Scaling is done by just multiplying incremental_locs appropriately. - if self.task_params.outputs.egomotion: - if step_number == 0: - # Zero Ego Motion - incremental_locs = np.zeros((self.task_params.batch_size, 1, 2), dtype=np.float32) - incremental_thetas = np.zeros((self.task_params.batch_size, 1, 1), dtype=np.float32) - else: - previous_nodes = self.task.nodes[self.episode.history[:,step_number-1], :]*1 - loc, _, _, theta = self.get_loc_axis(current_nodes, - delta_theta=self.task.delta_theta, - perturb=perturbs[:,step_number,:]) - previous_loc, _, _, previous_theta = self.get_loc_axis( - previous_nodes, delta_theta=self.task.delta_theta, - perturb=perturbs[:,step_number-1,:]) - - incremental_locs_ = np.reshape(loc-previous_loc, [self.task_params.batch_size, 1, -1]) - - t = -np.pi/2+np.reshape(theta*1, [self.task_params.batch_size, 1, -1]) - incremental_locs = incremental_locs_*1 - incremental_locs[:,:,0] = np.sum(incremental_locs_ * - np.concatenate((np.cos(t), np.sin(t)), - axis=-1), axis=-1) - incremental_locs[:,:,1] = np.sum(incremental_locs_ * - np.concatenate((np.cos(t+np.pi/2), - np.sin(t+np.pi/2)), - axis=-1), axis=-1) - incremental_thetas = np.reshape(theta-previous_theta, - [self.task_params.batch_size, 1, -1]) - outs['incremental_locs'] = incremental_locs - outs['incremental_thetas'] = incremental_thetas - - if self.task_params.outputs.visit_count: - # Output the visit count for this state, how many times has the current - # state been visited, and how far in the history was the last visit - # (except this one) - visit_count = np.zeros((self.task_params.batch_size, 1), dtype=np.int32) - last_visit = -np.ones((self.task_params.batch_size, 1), dtype=np.int32) - if step_number >= 1: - h = self.episode.history[:,:(step_number)] - visit_count[:,0] = np.sum(h == np.array(current_node_ids).reshape([-1,1]), - axis=1) - last_visit[:,0] = np.argmax(h[:,::-1] == np.array(current_node_ids).reshape([-1,1]), - axis=1) + 1 - last_visit[visit_count == 0] = -1 # -1 if not visited. - outs['visit_count'] = np.expand_dims(visit_count, axis=1) - outs['last_visit'] = np.expand_dims(last_visit, axis=1) - return outs - - def get_features_name(self): - f = [] - if self.task_params.outputs.images: - f.append('imgs') - if self.task_params.outputs.rel_goal_loc: - f.append('rel_goal_loc') - if self.task_params.outputs.loc_on_map: - f.append('loc_on_map') - if self.task_params.outputs.gt_dist_to_goal: - f.append('gt_dist_to_goal') - if self.task_params.outputs.ego_maps: - for i in range(len(self.task_params.map_scales)): - f.append('ego_maps_{:d}'.format(i)) - if self.task_params.outputs.readout_maps: - for i in range(len(self.task_params.readout_maps_scales)): - f.append('readout_maps_{:d}'.format(i)) - if self.task_params.outputs.ego_goal_imgs: - for i in range(len(self.task_params.map_scales)): - f.append('ego_goal_imgs_{:d}'.format(i)) - if self.task_params.outputs.egomotion: - f.append('incremental_locs') - f.append('incremental_thetas') - if self.task_params.outputs.visit_count: - f.append('visit_count') - f.append('last_visit') - if self.task_params.outputs.analytical_counts: - for i in range(len(self.task_params.analytical_counts.map_sizes)): - f.append('analytical_counts_{:d}'.format(i)) - if self.task_params.outputs.node_ids: - f.append('node_ids') - f.append('perturbs') - return f - - def pre_features(self, inputs): - if self.task_params.outputs.images: - inputs['imgs'] = image_pre(inputs['imgs'], self.task_params.modalities) - return inputs - -class BuildingMultiplexer(): - def __init__(self, args, task_number): - params = vars(args) - for k in params.keys(): - setattr(self, k, params[k]) - self.task_number = task_number - self._pick_data(task_number) - logging.info('Env Class: %s.', self.env_class) - if self.task_params.task == 'planning': - self._setup_planner() - elif self.task_params.task == 'mapping': - self._setup_mapper() - elif self.task_params.task == 'map+plan': - self._setup_mapper() - else: - logging.error('Undefined task: %s'.format(self.task_params.task)) - - def _pick_data(self, task_number): - logging.error('Input Building Names: %s', self.building_names) - self.flip = [np.mod(task_number / len(self.building_names), 2) == 1] - id = np.mod(task_number, len(self.building_names)) - self.building_names = [self.building_names[id]] - self.task_params.building_seed = task_number - logging.error('BuildingMultiplexer: Picked Building Name: %s', self.building_names) - self.building_names = self.building_names[0].split('+') - self.flip = [self.flip[0] for _ in self.building_names] - logging.error('BuildingMultiplexer: Picked Building Name: %s', self.building_names) - logging.error('BuildingMultiplexer: Flipping Buildings: %s', self.flip) - logging.error('BuildingMultiplexer: Set building_seed: %d', self.task_params.building_seed) - self.num_buildings = len(self.building_names) - logging.error('BuildingMultiplexer: Num buildings: %d', self.num_buildings) - - def _setup_planner(self): - # Load building env class. - self.buildings = [] - for i, building_name in enumerate(self.building_names): - b = self.env_class(robot=self.robot, env=self.env, - task_params=self.task_params, - building_name=building_name, flip=self.flip[i], - logdir=self.logdir, building_loader=self.dataset) - self.buildings.append(b) - - def _setup_mapper(self): - # Set up the renderer. - cp = self.camera_param - rgb_shader, d_shader = sru.get_shaders(cp.modalities) - r_obj = SwiftshaderRenderer() - r_obj.init_display(width=cp.width, height=cp.height, fov=cp.fov, - z_near=cp.z_near, z_far=cp.z_far, rgb_shader=rgb_shader, - d_shader=d_shader) - self.r_obj = r_obj - r_obj.clear_scene() - - # Load building env class. - self.buildings = [] - wt = [] - for i, building_name in enumerate(self.building_names): - b = self.env_class(robot=self.robot, env=self.env, - task_params=self.task_params, - building_name=building_name, flip=self.flip[i], - logdir=self.logdir, building_loader=self.dataset, - r_obj=r_obj) - wt.append(b.get_weight()) - b.load_building_into_scene() - b.set_building_visibility(False) - self.buildings.append(b) - wt = np.array(wt).astype(np.float32) - wt = wt / np.sum(wt+0.0001) - self.building_sampling_weights = wt - - def sample_building(self, rng): - if self.num_buildings == 1: - building_id = rng.choice(range(len(self.building_names))) - else: - building_id = rng.choice(self.num_buildings, - p=self.building_sampling_weights) - b = self.buildings[building_id] - instances = b._gen_rng(rng) - self._building_id = building_id - return self.buildings[building_id], instances - - def sample_env(self, rngs): - rng = rngs[0]; - if self.num_buildings == 1: - building_id = rng.choice(range(len(self.building_names))) - else: - building_id = rng.choice(self.num_buildings, - p=self.building_sampling_weights) - return self.buildings[building_id] - - def pre(self, inputs): - return self.buildings[self._building_id].pre(inputs) - - def __del__(self): - self.r_obj.clear_scene() - logging.error('Clearing scene.') diff --git a/research/cognitive_mapping_and_planning/datasets/nav_env_config.py b/research/cognitive_mapping_and_planning/datasets/nav_env_config.py deleted file mode 100644 index 3d71c5767..000000000 --- a/research/cognitive_mapping_and_planning/datasets/nav_env_config.py +++ /dev/null @@ -1,127 +0,0 @@ -# Copyright 2016 The TensorFlow Authors All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -"""Configs for stanford navigation environment. - -Base config for stanford navigation enviornment. -""" -import numpy as np -import src.utils as utils -import datasets.nav_env as nav_env - -def nav_env_base_config(): - """Returns the base config for stanford navigation environment. - - Returns: - Base config for stanford navigation environment. - """ - robot = utils.Foo(radius=15, - base=10, - height=140, - sensor_height=120, - camera_elevation_degree=-15) - - env = utils.Foo(padding=10, - resolution=5, - num_point_threshold=2, - valid_min=-10, - valid_max=200, - n_samples_per_face=200) - - camera_param = utils.Foo(width=225, - height=225, - z_near=0.05, - z_far=20.0, - fov=60., - modalities=['rgb'], - img_channels=3) - - data_augment = utils.Foo(lr_flip=0, - delta_angle=0.5, - delta_xy=4, - relight=True, - relight_fast=False, - structured=False) # if True, uses the same perturb for the whole episode. - - outputs = utils.Foo(images=True, - rel_goal_loc=False, - loc_on_map=True, - gt_dist_to_goal=True, - ego_maps=False, - ego_goal_imgs=False, - egomotion=False, - visit_count=False, - analytical_counts=False, - node_ids=True, - readout_maps=False) - - # class_map_names=['board', 'chair', 'door', 'sofa', 'table'] - class_map_names = ['chair', 'door', 'table'] - semantic_task = utils.Foo(class_map_names=class_map_names, pix_distance=16, - sampling='uniform') - - # time per iteration for cmp is 0.82 seconds per episode with 3.4s overhead per batch. - task_params = utils.Foo(max_dist=32, - step_size=8, - num_steps=40, - num_actions=4, - batch_size=4, - building_seed=0, - num_goals=1, - img_height=None, - img_width=None, - img_channels=None, - modalities=None, - outputs=outputs, - map_scales=[1.], - map_crop_sizes=[64], - rel_goal_loc_dim=4, - base_class='Building', - task='map+plan', - n_ori=4, - type='room_to_room_many', - data_augment=data_augment, - room_regex='^((?!hallway).)*$', - toy_problem=False, - map_channels=1, - gt_coverage=False, - input_type='maps', - full_information=False, - aux_delta_thetas=[], - semantic_task=semantic_task, - num_history_frames=0, - node_ids_dim=1, - perturbs_dim=4, - map_resize_method='linear_noantialiasing', - readout_maps_channels=1, - readout_maps_scales=[], - readout_maps_crop_sizes=[], - n_views=1, - reward_time_penalty=0.1, - reward_at_goal=1., - discount_factor=0.99, - rejection_sampling_M=100, - min_dist=None) - - navtask_args = utils.Foo( - building_names=['area1_gates_wingA_floor1_westpart'], - env_class=nav_env.VisualNavigationEnv, - robot=robot, - task_params=task_params, - env=env, - camera_param=camera_param, - cache_rooms=True) - return navtask_args - diff --git a/research/cognitive_mapping_and_planning/matplotlibrc b/research/cognitive_mapping_and_planning/matplotlibrc deleted file mode 100644 index ed5097572..000000000 --- a/research/cognitive_mapping_and_planning/matplotlibrc +++ /dev/null @@ -1 +0,0 @@ -backend : agg diff --git a/research/cognitive_mapping_and_planning/output/.gitignore b/research/cognitive_mapping_and_planning/output/.gitignore deleted file mode 100644 index a767cafbb..000000000 --- a/research/cognitive_mapping_and_planning/output/.gitignore +++ /dev/null @@ -1 +0,0 @@ -* diff --git a/research/cognitive_mapping_and_planning/output/README.md b/research/cognitive_mapping_and_planning/output/README.md deleted file mode 100644 index 7518c3874..000000000 --- a/research/cognitive_mapping_and_planning/output/README.md +++ /dev/null @@ -1,16 +0,0 @@ -### Pre-Trained Models - -We provide the following pre-trained models: - -Config Name | Checkpoint | Mean Dist. | 50%ile Dist. | 75%ile Dist. | Success %age | -:-: | :-: | :-: | :-: | :-: | :-: | -cmp.lmap_Msc.clip5.sbpd_d_r2r | [ckpt](http://download.tensorflow.org/models/cognitive_mapping_and_planning/2017_04_16/cmp.lmap_Msc.clip5.sbpd_d_r2r.tar) | 4.79 | 0 | 1 | 78.9 | -cmp.lmap_Msc.clip5.sbpd_rgb_r2r | [ckpt](http://download.tensorflow.org/models/cognitive_mapping_and_planning/2017_04_16/cmp.lmap_Msc.clip5.sbpd_rgb_r2r.tar) | 7.74 | 0 | 14 | 62.4 | -cmp.lmap_Msc.clip5.sbpd_d_ST | [ckpt](http://download.tensorflow.org/models/cognitive_mapping_and_planning/2017_04_16/cmp.lmap_Msc.clip5.sbpd_d_ST.tar) | 10.67 | 9 | 19 | 39.7 | -cmp.lmap_Msc.clip5.sbpd_rgb_ST | [ckpt](http://download.tensorflow.org/models/cognitive_mapping_and_planning/2017_04_16/cmp.lmap_Msc.clip5.sbpd_rgb_ST.tar) | 11.27 | 10 | 19 | 35.6 | -cmp.lmap_Msc.clip5.sbpd_d_r2r_h0_64_80 | [ckpt](http:////download.tensorflow.org/models/cognitive_mapping_and_planning/2017_04_16/cmp.lmap_Msc.clip5.sbpd_d_r2r_h0_64_80.tar) | 11.6 | 0 | 19 | 66.9 | -bl.v2.noclip.sbpd_d_r2r | [ckpt](http://download.tensorflow.org/models/cognitive_mapping_and_planning/2017_04_16/bl.v2.noclip.sbpd_d_r2r.tar) | 5.90 | 0 | 6 | 71.2 | -bl.v2.noclip.sbpd_rgb_r2r | [ckpt](http://download.tensorflow.org/models/cognitive_mapping_and_planning/2017_04_16/bl.v2.noclip.sbpd_rgb_r2r.tar) | 10.21 | 1 | 21 | 53.4 | -bl.v2.noclip.sbpd_d_ST | [ckpt](http://download.tensorflow.org/models/cognitive_mapping_and_planning/2017_04_16/bl.v2.noclip.sbpd_d_ST.tar) | 13.29 | 14 | 23 | 28.0 | -bl.v2.noclip.sbpd_rgb_ST | [ckpt](http://download.tensorflow.org/models/cognitive_mapping_and_planning/2017_04_16/bl.v2.noclip.sbpd_rgb_ST.tar) | 13.37 | 13 | 20 | 24.2 | -bl.v2.noclip.sbpd_d_r2r_h0_64_80 | [ckpt](http:////download.tensorflow.org/models/cognitive_mapping_and_planning/2017_04_16/bl.v2.noclip.sbpd_d_r2r_h0_64_80.tar) | 15.30 | 0 | 29 | 57.9 | diff --git a/research/cognitive_mapping_and_planning/patches/GLES2_2_0.py.patch b/research/cognitive_mapping_and_planning/patches/GLES2_2_0.py.patch deleted file mode 100644 index de1be442d..000000000 --- a/research/cognitive_mapping_and_planning/patches/GLES2_2_0.py.patch +++ /dev/null @@ -1,14 +0,0 @@ -10c10 -< from OpenGL import platform, constant, arrays ---- -> from OpenGL import platform, constant, arrays, contextdata -249a250 -> from OpenGL._bytes import _NULL_8_BYTE -399c400 -< array = ArrayDatatype.asArray( pointer, type ) ---- -> array = arrays.ArrayDatatype.asArray( pointer, type ) -405c406 -< ArrayDatatype.voidDataPointer( array ) ---- -> arrays.ArrayDatatype.voidDataPointer( array ) diff --git a/research/cognitive_mapping_and_planning/patches/apply_patches.sh b/research/cognitive_mapping_and_planning/patches/apply_patches.sh deleted file mode 100644 index 4a7860582..000000000 --- a/research/cognitive_mapping_and_planning/patches/apply_patches.sh +++ /dev/null @@ -1,18 +0,0 @@ -# Copyright 2016 The TensorFlow Authors All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -echo $VIRTUAL_ENV -patch $VIRTUAL_ENV/local/lib/python2.7/site-packages/OpenGL/GLES2/VERSION/GLES2_2_0.py patches/GLES2_2_0.py.patch -patch $VIRTUAL_ENV/local/lib/python2.7/site-packages/OpenGL/platform/ctypesloader.py patches/ctypesloader.py.patch diff --git a/research/cognitive_mapping_and_planning/patches/ctypesloader.py.patch b/research/cognitive_mapping_and_planning/patches/ctypesloader.py.patch deleted file mode 100644 index 27dd43b18..000000000 --- a/research/cognitive_mapping_and_planning/patches/ctypesloader.py.patch +++ /dev/null @@ -1,15 +0,0 @@ -45c45,46 -< return dllType( name, mode ) ---- -> print './' + name -> return dllType( './' + name, mode ) -47,48c48,53 -< err.args += (name,fullName) -< raise ---- -> try: -> print name -> return dllType( name, mode ) -> except: -> err.args += (name,fullName) -> raise diff --git a/research/cognitive_mapping_and_planning/render/__init__.py b/research/cognitive_mapping_and_planning/render/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/research/cognitive_mapping_and_planning/render/depth_rgb_encoded.fp b/research/cognitive_mapping_and_planning/render/depth_rgb_encoded.fp deleted file mode 100644 index 23e93d27f..000000000 --- a/research/cognitive_mapping_and_planning/render/depth_rgb_encoded.fp +++ /dev/null @@ -1,30 +0,0 @@ -// This shader computes per-pixel depth (-z coordinate in the camera space, or -// orthogonal distance to the camera plane). The result is multiplied by the -// `kFixedPointFraction` constant and is encoded to RGB channels as an integer -// (R being the least significant byte). - -#ifdef GL_ES -#ifdef GL_FRAGMENT_PRECISION_HIGH -precision highp float; -#else -precision mediump float; -#endif -#endif - -const float kFixedPointFraction = 1000.0; - -varying float vDepth; - -void main(void) { - float d = vDepth; - - // Encode the depth to RGB. - d *= (kFixedPointFraction / 255.0); - gl_FragColor.r = mod(d, 1.0); - d = (d - gl_FragColor.r) / 255.0; - gl_FragColor.g = mod(d, 1.0); - d = (d - gl_FragColor.g) / 255.0; - gl_FragColor.b = mod(d, 1.0); - - gl_FragColor.a = 1.0; -} diff --git a/research/cognitive_mapping_and_planning/render/depth_rgb_encoded.vp b/research/cognitive_mapping_and_planning/render/depth_rgb_encoded.vp deleted file mode 100644 index 2db74f14a..000000000 --- a/research/cognitive_mapping_and_planning/render/depth_rgb_encoded.vp +++ /dev/null @@ -1,15 +0,0 @@ -uniform mat4 uViewMatrix; -uniform mat4 uProjectionMatrix; - -attribute vec3 aPosition; - -varying float vDepth; - -void main(void) { - vec4 worldPosition = vec4(aPosition, 1.0); - vec4 viewPosition = uViewMatrix * worldPosition; - gl_Position = uProjectionMatrix * viewPosition; - - // Orthogonal depth is simply -z in the camera space. - vDepth = -viewPosition.z; -} diff --git a/research/cognitive_mapping_and_planning/render/rgb_flat_color.fp b/research/cognitive_mapping_and_planning/render/rgb_flat_color.fp deleted file mode 100644 index c8c24d761..000000000 --- a/research/cognitive_mapping_and_planning/render/rgb_flat_color.fp +++ /dev/null @@ -1,11 +0,0 @@ -precision highp float; -varying vec4 vColor; -varying vec2 vTextureCoord; - -uniform sampler2D uTexture; - -void main(void) { - vec4 color = vColor; - color = texture2D(uTexture, vTextureCoord); - gl_FragColor = color; -} diff --git a/research/cognitive_mapping_and_planning/render/rgb_flat_color.vp b/research/cognitive_mapping_and_planning/render/rgb_flat_color.vp deleted file mode 100644 index ebc791734..000000000 --- a/research/cognitive_mapping_and_planning/render/rgb_flat_color.vp +++ /dev/null @@ -1,18 +0,0 @@ -uniform mat4 uViewMatrix; -uniform mat4 uProjectionMatrix; -uniform vec4 uColor; - -attribute vec4 aColor; -attribute vec3 aPosition; -attribute vec2 aTextureCoord; - -varying vec4 vColor; -varying vec2 vTextureCoord; - -void main(void) { - vec4 worldPosition = vec4(aPosition, 1.0); - gl_Position = uProjectionMatrix * (uViewMatrix * worldPosition); - - vColor = aColor * uColor; - vTextureCoord = aTextureCoord; -} diff --git a/research/cognitive_mapping_and_planning/render/swiftshader_renderer.py b/research/cognitive_mapping_and_planning/render/swiftshader_renderer.py deleted file mode 100644 index 74b1be72c..000000000 --- a/research/cognitive_mapping_and_planning/render/swiftshader_renderer.py +++ /dev/null @@ -1,427 +0,0 @@ -# Copyright 2016 The TensorFlow Authors All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -r"""Implements loading and rendering of meshes. Contains 2 classes: - Shape: Class that exposes high level functions for loading and manipulating - shapes. This currently is bound to assimp - (https://github.com/assimp/assimp). If you want to interface to a different - library, reimplement this class with bindings to your mesh loading library. - - SwiftshaderRenderer: Class that renders Shapes. Currently this uses python - bindings to OpenGL (EGL), bindings to an alternate renderer may be implemented - here. -""" - -import numpy as np, os -import cv2, ctypes, logging, os, numpy as np -import pyassimp as assimp -from OpenGL.GLES2 import * -from OpenGL.EGL import * -import src.rotation_utils as ru - -__version__ = 'swiftshader_renderer' - -def get_shaders(modalities): - rgb_shader = 'rgb_flat_color' if 'rgb' in modalities else None - d_shader = 'depth_rgb_encoded' if 'depth' in modalities else None - return rgb_shader, d_shader - -def sample_points_on_faces(vs, fs, rng, n_samples_per_face): - idx = np.repeat(np.arange(fs.shape[0]), n_samples_per_face) - - r = rng.rand(idx.size, 2) - r1 = r[:,:1]; r2 = r[:,1:]; sqrt_r1 = np.sqrt(r1); - - v1 = vs[fs[idx, 0], :]; v2 = vs[fs[idx, 1], :]; v3 = vs[fs[idx, 2], :]; - pts = (1-sqrt_r1)*v1 + sqrt_r1*(1-r2)*v2 + sqrt_r1*r2*v3 - - v1 = vs[fs[:,0], :]; v2 = vs[fs[:, 1], :]; v3 = vs[fs[:, 2], :]; - ar = 0.5*np.sqrt(np.sum(np.cross(v1-v3, v2-v3)**2, 1)) - - return pts, ar, idx - -class Shape(): - def get_pyassimp_load_options(self): - load_flags = assimp.postprocess.aiProcess_Triangulate; - load_flags = load_flags | assimp.postprocess.aiProcess_SortByPType; - load_flags = load_flags | assimp.postprocess.aiProcess_OptimizeMeshes; - load_flags = load_flags | assimp.postprocess.aiProcess_RemoveRedundantMaterials; - load_flags = load_flags | assimp.postprocess.aiProcess_FindDegenerates; - load_flags = load_flags | assimp.postprocess.aiProcess_GenSmoothNormals; - load_flags = load_flags | assimp.postprocess.aiProcess_JoinIdenticalVertices; - load_flags = load_flags | assimp.postprocess.aiProcess_ImproveCacheLocality; - load_flags = load_flags | assimp.postprocess.aiProcess_GenUVCoords; - load_flags = load_flags | assimp.postprocess.aiProcess_FindInvalidData; - return load_flags - - def __init__(self, obj_file, material_file=None, load_materials=True, - name_prefix='', name_suffix=''): - if material_file is not None: - logging.error('Ignoring material file input, reading them off obj file.') - load_flags = self.get_pyassimp_load_options() - scene = assimp.load(obj_file, processing=load_flags) - filter_ind = self._filter_triangles(scene.meshes) - self.meshes = [scene.meshes[i] for i in filter_ind] - for m in self.meshes: - m.name = name_prefix + m.name + name_suffix - - dir_name = os.path.dirname(obj_file) - # Load materials - materials = None - if load_materials: - materials = [] - for m in self.meshes: - file_name = os.path.join(dir_name, m.material.properties[('file', 1)]) - assert(os.path.exists(file_name)), \ - 'Texture file {:s} foes not exist.'.format(file_name) - img_rgb = cv2.imread(file_name)[::-1,:,::-1] - if img_rgb.shape[0] != img_rgb.shape[1]: - logging.warn('Texture image not square.') - sz = np.maximum(img_rgb.shape[0], img_rgb.shape[1]) - sz = int(np.power(2., np.ceil(np.log2(sz)))) - img_rgb = cv2.resize(img_rgb, (sz,sz), interpolation=cv2.INTER_LINEAR) - else: - sz = img_rgb.shape[0] - sz_ = int(np.power(2., np.ceil(np.log2(sz)))) - if sz != sz_: - logging.warn('Texture image not square of power of 2 size. ' + - 'Changing size from %d to %d.', sz, sz_) - sz = sz_ - img_rgb = cv2.resize(img_rgb, (sz,sz), interpolation=cv2.INTER_LINEAR) - materials.append(img_rgb) - self.scene = scene - self.materials = materials - - def _filter_triangles(self, meshes): - select = [] - for i in range(len(meshes)): - if meshes[i].primitivetypes == 4: - select.append(i) - return select - - def flip_shape(self): - for m in self.meshes: - m.vertices[:,1] = -m.vertices[:,1] - bb = m.faces*1 - bb[:,1] = m.faces[:,2] - bb[:,2] = m.faces[:,1] - m.faces = bb - # m.vertices[:,[0,1]] = m.vertices[:,[1,0]] - - def get_vertices(self): - vs = [] - for m in self.meshes: - vs.append(m.vertices) - vss = np.concatenate(vs, axis=0) - return vss, vs - - def get_faces(self): - vs = [] - for m in self.meshes: - v = m.faces - vs.append(v) - return vs - - def get_number_of_meshes(self): - return len(self.meshes) - - def scale(self, sx=1., sy=1., sz=1.): - pass - - def sample_points_on_face_of_shape(self, i, n_samples_per_face, sc): - v = self.meshes[i].vertices*sc - f = self.meshes[i].faces - p, face_areas, face_idx = sample_points_on_faces( - v, f, np.random.RandomState(0), n_samples_per_face) - return p, face_areas, face_idx - - def __del__(self): - scene = self.scene - assimp.release(scene) - -class SwiftshaderRenderer(): - def __init__(self): - self.entities = {} - - def init_display(self, width, height, fov, z_near, z_far, rgb_shader, - d_shader): - self.init_renderer_egl(width, height) - dir_path = os.path.dirname(os.path.realpath(__file__)) - if d_shader is not None and rgb_shader is not None: - logging.fatal('Does not support setting both rgb_shader and d_shader.') - - if d_shader is not None: - assert rgb_shader is None - shader = d_shader - self.modality = 'depth' - - if rgb_shader is not None: - assert d_shader is None - shader = rgb_shader - self.modality = 'rgb' - - self.create_shaders(os.path.join(dir_path, shader+'.vp'), - os.path.join(dir_path, shader + '.fp')) - aspect = width*1./(height*1.) - self.set_camera(fov, z_near, z_far, aspect) - - def init_renderer_egl(self, width, height): - major,minor = ctypes.c_long(),ctypes.c_long() - logging.info('init_renderer_egl: EGL_DEFAULT_DISPLAY: %s', EGL_DEFAULT_DISPLAY) - - egl_display = eglGetDisplay(EGL_DEFAULT_DISPLAY) - logging.info('init_renderer_egl: egl_display: %s', egl_display) - - eglInitialize(egl_display, major, minor) - logging.info('init_renderer_egl: EGL_OPENGL_API, EGL_OPENGL_ES_API: %s, %s', - EGL_OPENGL_API, EGL_OPENGL_ES_API) - eglBindAPI(EGL_OPENGL_ES_API) - - num_configs = ctypes.c_long() - configs = (EGLConfig*1)() - local_attributes = [EGL_RED_SIZE, 8, EGL_GREEN_SIZE, 8, EGL_BLUE_SIZE, 8, - EGL_DEPTH_SIZE, 16, EGL_SURFACE_TYPE, EGL_PBUFFER_BIT, - EGL_RENDERABLE_TYPE, EGL_OPENGL_ES2_BIT, EGL_NONE,] - logging.error('init_renderer_egl: local attributes: %s', local_attributes) - local_attributes = arrays.GLintArray.asArray(local_attributes) - success = eglChooseConfig(egl_display, local_attributes, configs, 1, num_configs) - logging.error('init_renderer_egl: eglChooseConfig success, num_configs: %d, %d', success, num_configs.value) - egl_config = configs[0] - - - context_attributes = [EGL_CONTEXT_CLIENT_VERSION, 2, EGL_NONE] - context_attributes = arrays.GLintArray.asArray(context_attributes) - egl_context = eglCreateContext(egl_display, egl_config, EGL_NO_CONTEXT, context_attributes) - - buffer_attributes = [EGL_WIDTH, width, EGL_HEIGHT, height, EGL_NONE] - buffer_attributes = arrays.GLintArray.asArray(buffer_attributes) - egl_surface = eglCreatePbufferSurface(egl_display, egl_config, buffer_attributes) - - - eglMakeCurrent(egl_display, egl_surface, egl_surface, egl_context) - logging.error("init_renderer_egl: egl_display: %s egl_surface: %s, egl_config: %s", egl_display, egl_surface, egl_context) - - glViewport(0, 0, width, height); - - self.egl_display = egl_display - self.egl_surface = egl_surface - self.egl_config = egl_config - self.egl_mapping = {} - self.render_timer = None - self.load_timer = None - self.height = height - self.width = width - - def create_shaders(self, v_shader_file, f_shader_file): - v_shader = glCreateShader(GL_VERTEX_SHADER) - with open(v_shader_file, 'r') as f: - ls = '' - for l in f: - ls = ls + l - glShaderSource(v_shader, ls) - glCompileShader(v_shader); - assert(glGetShaderiv(v_shader, GL_COMPILE_STATUS) == 1) - - f_shader = glCreateShader(GL_FRAGMENT_SHADER) - with open(f_shader_file, 'r') as f: - ls = '' - for l in f: - ls = ls + l - glShaderSource(f_shader, ls) - glCompileShader(f_shader); - assert(glGetShaderiv(f_shader, GL_COMPILE_STATUS) == 1) - - egl_program = glCreateProgram(); - assert(egl_program) - glAttachShader(egl_program, v_shader) - glAttachShader(egl_program, f_shader) - glLinkProgram(egl_program); - assert(glGetProgramiv(egl_program, GL_LINK_STATUS) == 1) - glUseProgram(egl_program) - - glBindAttribLocation(egl_program, 0, "aPosition") - glBindAttribLocation(egl_program, 1, "aColor") - glBindAttribLocation(egl_program, 2, "aTextureCoord") - - self.egl_program = egl_program - self.egl_mapping['vertexs'] = 0 - self.egl_mapping['vertexs_color'] = 1 - self.egl_mapping['vertexs_tc'] = 2 - - glClearColor(0.0, 0.0, 0.0, 1.0); - # glEnable(GL_CULL_FACE); glCullFace(GL_BACK); - glEnable(GL_DEPTH_TEST); - - glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT) - - def set_camera(self, fov_vertical, z_near, z_far, aspect): - width = 2*np.tan(np.deg2rad(fov_vertical)/2.0)*z_near*aspect; - height = 2*np.tan(np.deg2rad(fov_vertical)/2.0)*z_near; - egl_program = self.egl_program - c = np.eye(4, dtype=np.float32) - c[3,3] = 0 - c[3,2] = -1 - c[2,2] = -(z_near+z_far)/(z_far-z_near) - c[2,3] = -2.0*(z_near*z_far)/(z_far-z_near) - c[0,0] = 2.0*z_near/width - c[1,1] = 2.0*z_near/height - c = c.T - - projection_matrix_o = glGetUniformLocation(egl_program, 'uProjectionMatrix') - projection_matrix = np.eye(4, dtype=np.float32) - projection_matrix[...] = c - projection_matrix = np.reshape(projection_matrix, (-1)) - glUniformMatrix4fv(projection_matrix_o, 1, GL_FALSE, projection_matrix) - - - def load_default_object(self): - v = np.array([[0.0, 0.5, 0.0, 1.0, 1.0, 0.0, 1.0], - [-0.5, -0.5, 0.0, 1.0, 0.0, 1.0, 1.0], - [0.5, -0.5, 0.0, 1.0, 1.0, 1.0, 1.0]], dtype=np.float32) - v = np.concatenate((v,v+0.1), axis=0) - v = np.ascontiguousarray(v, dtype=np.float32) - - vbo = glGenBuffers(1) - glBindBuffer (GL_ARRAY_BUFFER, vbo) - glBufferData (GL_ARRAY_BUFFER, v.dtype.itemsize*v.size, v, GL_STATIC_DRAW) - glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, 28, ctypes.c_void_p(0)) - glVertexAttribPointer(1, 4, GL_FLOAT, GL_FALSE, 28, ctypes.c_void_p(12)) - glEnableVertexAttribArray(0); - glEnableVertexAttribArray(1); - - self.num_to_render = 6; - - def _actual_render(self): - for entity_id, entity in self.entities.iteritems(): - if entity['visible']: - vbo = entity['vbo'] - tbo = entity['tbo'] - num = entity['num'] - - glBindBuffer(GL_ARRAY_BUFFER, vbo) - glVertexAttribPointer(self.egl_mapping['vertexs'], 3, GL_FLOAT, GL_FALSE, - 20, ctypes.c_void_p(0)) - glVertexAttribPointer(self.egl_mapping['vertexs_tc'], 2, GL_FLOAT, - GL_FALSE, 20, ctypes.c_void_p(12)) - glEnableVertexAttribArray(self.egl_mapping['vertexs']); - glEnableVertexAttribArray(self.egl_mapping['vertexs_tc']); - - glBindTexture(GL_TEXTURE_2D, tbo) - glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR); - glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR); - glDrawArrays(GL_TRIANGLES, 0, num) - - def render(self, take_screenshot=False, output_type=0): - # self.render_timer.tic() - self._actual_render() - # self.render_timer.toc(log_at=1000, log_str='render timer', type='time') - - np_rgb_img = None - np_d_img = None - c = 1000. - if take_screenshot: - if self.modality == 'rgb': - screenshot_rgba = np.zeros((self.height, self.width, 4), dtype=np.uint8) - glReadPixels(0, 0, self.width, self.height, GL_RGBA, GL_UNSIGNED_BYTE, screenshot_rgba) - np_rgb_img = screenshot_rgba[::-1,:,:3]; - - if self.modality == 'depth': - screenshot_d = np.zeros((self.height, self.width, 4), dtype=np.uint8) - glReadPixels(0, 0, self.width, self.height, GL_RGBA, GL_UNSIGNED_BYTE, screenshot_d) - np_d_img = screenshot_d[::-1,:,:3]; - np_d_img = np_d_img[:,:,2]*(255.*255./c) + np_d_img[:,:,1]*(255./c) + np_d_img[:,:,0]*(1./c) - np_d_img = np_d_img.astype(np.float32) - np_d_img[np_d_img == 0] = np.NaN - np_d_img = np_d_img[:,:,np.newaxis] - - glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT) - return np_rgb_img, np_d_img - - def _load_mesh_into_gl(self, mesh, material): - vvt = np.concatenate((mesh.vertices, mesh.texturecoords[0,:,:2]), axis=1) - vvt = np.ascontiguousarray(vvt[mesh.faces.reshape((-1)),:], dtype=np.float32) - num = vvt.shape[0] - vvt = np.reshape(vvt, (-1)) - - vbo = glGenBuffers(1) - glBindBuffer(GL_ARRAY_BUFFER, vbo) - glBufferData(GL_ARRAY_BUFFER, vvt.dtype.itemsize*vvt.size, vvt, GL_STATIC_DRAW) - - tbo = glGenTextures(1) - glBindTexture(GL_TEXTURE_2D, tbo) - glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB, material.shape[1], - material.shape[0], 0, GL_RGB, GL_UNSIGNED_BYTE, - np.reshape(material, (-1))) - return num, vbo, tbo - - def load_shapes(self, shapes): - entities = self.entities - entity_ids = [] - for i, shape in enumerate(shapes): - for j in range(len(shape.meshes)): - name = shape.meshes[j].name - assert name not in entities, '{:s} entity already exists.'.format(name) - num, vbo, tbo = self._load_mesh_into_gl(shape.meshes[j], shape.materials[j]) - entities[name] = {'num': num, 'vbo': vbo, 'tbo': tbo, 'visible': False} - entity_ids.append(name) - return entity_ids - - def set_entity_visible(self, entity_ids, visibility): - for entity_id in entity_ids: - self.entities[entity_id]['visible'] = visibility - - def position_camera(self, camera_xyz, lookat_xyz, up): - camera_xyz = np.array(camera_xyz) - lookat_xyz = np.array(lookat_xyz) - up = np.array(up) - lookat_to = lookat_xyz - camera_xyz - lookat_from = np.array([0, 1., 0.]) - up_from = np.array([0, 0., 1.]) - up_to = up * 1. - # np.set_printoptions(precision=2, suppress=True) - # print up_from, lookat_from, up_to, lookat_to - r = ru.rotate_camera_to_point_at(up_from, lookat_from, up_to, lookat_to) - R = np.eye(4, dtype=np.float32) - R[:3,:3] = r - - t = np.eye(4, dtype=np.float32) - t[:3,3] = -camera_xyz - - view_matrix = np.dot(R.T, t) - flip_yz = np.eye(4, dtype=np.float32) - flip_yz[1,1] = 0; flip_yz[2,2] = 0; flip_yz[1,2] = 1; flip_yz[2,1] = -1; - view_matrix = np.dot(flip_yz, view_matrix) - view_matrix = view_matrix.T - # print np.concatenate((R, t, view_matrix), axis=1) - view_matrix = np.reshape(view_matrix, (-1)) - view_matrix_o = glGetUniformLocation(self.egl_program, 'uViewMatrix') - glUniformMatrix4fv(view_matrix_o, 1, GL_FALSE, view_matrix) - return None, None #camera_xyz, q - - def clear_scene(self): - keys = self.entities.keys() - for entity_id in keys: - entity = self.entities.pop(entity_id, None) - vbo = entity['vbo'] - tbo = entity['tbo'] - num = entity['num'] - glDeleteBuffers(1, [vbo]) - glDeleteTextures(1, [tbo]) - - def __del__(self): - self.clear_scene() - eglMakeCurrent(self.egl_display, EGL_NO_SURFACE, EGL_NO_SURFACE, EGL_NO_CONTEXT) - eglDestroySurface(self.egl_display, self.egl_surface) - eglTerminate(self.egl_display) diff --git a/research/cognitive_mapping_and_planning/requirements.txt b/research/cognitive_mapping_and_planning/requirements.txt deleted file mode 100644 index 306c807a6..000000000 --- a/research/cognitive_mapping_and_planning/requirements.txt +++ /dev/null @@ -1,9 +0,0 @@ -numpy -pillow -PyOpenGL -PyOpenGL-accelerate -six -networkx -scikit-image -scipy -opencv-python diff --git a/research/cognitive_mapping_and_planning/scripts/__init__.py b/research/cognitive_mapping_and_planning/scripts/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/research/cognitive_mapping_and_planning/scripts/script_distill.py b/research/cognitive_mapping_and_planning/scripts/script_distill.py deleted file mode 100644 index 010c69041..000000000 --- a/research/cognitive_mapping_and_planning/scripts/script_distill.py +++ /dev/null @@ -1,177 +0,0 @@ -# Copyright 2016 The TensorFlow Authors All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -r""" Script to setup the grid moving agent. - -blaze build --define=ION_GFX_OGLES20=1 -c opt --copt=-mavx --config=cuda_clang \ - learning/brain/public/tensorflow_std_server{,_gpu} \ - experimental/users/saurabhgupta/navigation/cmp/scripts/script_distill.par \ - experimental/users/saurabhgupta/navigation/cmp/scripts/script_distill - - -./blaze-bin/experimental/users/saurabhgupta/navigation/cmp/scripts/script_distill \ - --logdir=/cns/iq-d/home/saurabhgupta/output/stanford-distill/local/v0/ \ - --config_name 'v0+train' --gfs_user robot-intelligence-gpu - -""" -import sys, os, numpy as np -import copy -import argparse, pprint -import time -import cProfile - - -import tensorflow as tf -from tensorflow.contrib import slim -from tensorflow.python.framework import ops -from tensorflow.contrib.framework.python.ops import variables - -import logging -from tensorflow.python.platform import gfile -from tensorflow.python.platform import app -from tensorflow.python.platform import flags -from cfgs import config_distill -from tfcode import tf_utils -import src.utils as utils -import src.file_utils as fu -import tfcode.distillation as distill -import datasets.nav_env as nav_env - -FLAGS = flags.FLAGS - -flags.DEFINE_string('master', 'local', - 'The name of the TensorFlow master to use.') -flags.DEFINE_integer('ps_tasks', 0, 'The number of parameter servers. If the ' - 'value is 0, then the parameters are handled locally by ' - 'the worker.') -flags.DEFINE_integer('task', 0, 'The Task ID. This value is used when training ' - 'with multiple workers to identify each worker.') - -flags.DEFINE_integer('num_workers', 1, '') - -flags.DEFINE_string('config_name', '', '') - -flags.DEFINE_string('logdir', '', '') - -def main(_): - args = config_distill.get_args_for_config(FLAGS.config_name) - args.logdir = FLAGS.logdir - args.solver.num_workers = FLAGS.num_workers - args.solver.task = FLAGS.task - args.solver.ps_tasks = FLAGS.ps_tasks - args.solver.master = FLAGS.master - - args.buildinger.env_class = nav_env.MeshMapper - fu.makedirs(args.logdir) - args.buildinger.logdir = args.logdir - R = nav_env.get_multiplexor_class(args.buildinger, args.solver.task) - - if False: - pr = cProfile.Profile() - pr.enable() - rng = np.random.RandomState(0) - for i in range(1): - b, instances_perturbs = R.sample_building(rng) - inputs = b.worker(*(instances_perturbs)) - for j in range(inputs['imgs'].shape[0]): - p = os.path.join('tmp', '{:d}.png'.format(j)) - img = inputs['imgs'][j,0,:,:,:3]*1 - img = (img).astype(np.uint8) - fu.write_image(p, img) - print(inputs['imgs'].shape) - inputs = R.pre(inputs) - pr.disable() - pr.print_stats(2) - - if args.control.train: - if not gfile.Exists(args.logdir): - gfile.MakeDirs(args.logdir) - - m = utils.Foo() - m.tf_graph = tf.Graph() - - config = tf.ConfigProto() - config.device_count['GPU'] = 1 - config.gpu_options.allow_growth = True - config.gpu_options.per_process_gpu_memory_fraction = 0.8 - - with m.tf_graph.as_default(): - with tf.device(tf.train.replica_device_setter(args.solver.ps_tasks)): - m = distill.setup_to_run(m, args, is_training=True, - batch_norm_is_training=True) - - train_step_kwargs = distill.setup_train_step_kwargs_mesh( - m, R, os.path.join(args.logdir, 'train'), - rng_seed=args.solver.task, is_chief=args.solver.task==0, iters=1, - train_display_interval=args.summary.display_interval) - - final_loss = slim.learning.train( - train_op=m.train_op, - logdir=args.logdir, - master=args.solver.master, - is_chief=args.solver.task == 0, - number_of_steps=args.solver.max_steps, - train_step_fn=tf_utils.train_step_custom, - train_step_kwargs=train_step_kwargs, - global_step=m.global_step_op, - init_op=m.init_op, - init_fn=m.init_fn, - sync_optimizer=m.sync_optimizer, - saver=m.saver_op, - summary_op=None, session_config=config) - - if args.control.test: - m = utils.Foo() - m.tf_graph = tf.Graph() - checkpoint_dir = os.path.join(format(args.logdir)) - with m.tf_graph.as_default(): - m = distill.setup_to_run(m, args, is_training=False, - batch_norm_is_training=args.control.force_batchnorm_is_training_at_test) - - train_step_kwargs = distill.setup_train_step_kwargs_mesh( - m, R, os.path.join(args.logdir, args.control.test_name), - rng_seed=args.solver.task+1, is_chief=args.solver.task==0, - iters=args.summary.test_iters, train_display_interval=None) - - sv = slim.learning.supervisor.Supervisor( - graph=ops.get_default_graph(), logdir=None, init_op=m.init_op, - summary_op=None, summary_writer=None, global_step=None, saver=m.saver_op) - - last_checkpoint = None - while True: - last_checkpoint = slim.evaluation.wait_for_new_checkpoint(checkpoint_dir, last_checkpoint) - checkpoint_iter = int(os.path.basename(last_checkpoint).split('-')[1]) - start = time.time() - logging.info('Starting evaluation at %s using checkpoint %s.', - time.strftime('%Y-%m-%d-%H:%M:%S', time.localtime()), - last_checkpoint) - - config = tf.ConfigProto() - config.device_count['GPU'] = 1 - config.gpu_options.allow_growth = True - config.gpu_options.per_process_gpu_memory_fraction = 0.8 - - with sv.managed_session(args.solver.master,config=config, - start_standard_services=False) as sess: - sess.run(m.init_op) - sv.saver.restore(sess, last_checkpoint) - sv.start_queue_runners(sess) - vals, _ = tf_utils.train_step_custom( - sess, None, m.global_step_op, train_step_kwargs, mode='val') - if checkpoint_iter >= args.solver.max_steps: - break - -if __name__ == '__main__': - app.run() diff --git a/research/cognitive_mapping_and_planning/scripts/script_download_init_models.sh b/research/cognitive_mapping_and_planning/scripts/script_download_init_models.sh deleted file mode 100644 index 1900bd0b0..000000000 --- a/research/cognitive_mapping_and_planning/scripts/script_download_init_models.sh +++ /dev/null @@ -1,18 +0,0 @@ -# Script to download models to initialize the RGB and D models for training.We -# use ResNet-v2-50 for both modalities. - -mkdir -p data/init_models -cd data/init_models - -# RGB Models are initialized by pre-training on ImageNet. -mkdir -p resnet_v2_50 -RGB_URL="http://download.tensorflow.org/models/resnet_v2_50_2017_04_14.tar.gz" -wget $RGB_URL -tar -xf resnet_v2_50_2017_04_14.tar.gz -C resnet_v2_50 - -# Depth models are initialized by distilling the RGB model to D images using -# Cross-Modal Distillation (https://arxiv.org/abs/1507.00448). -mkdir -p distill_rgb_to_d_resnet_v2_50 -D_URL="http://download.tensorflow.org/models/cognitive_mapping_and_planning/2017_04_16/distill_rgb_to_d_resnet_v2_50.tar" -wget $D_URL -tar -xf distill_rgb_to_d_resnet_v2_50.tar -C distill_rgb_to_d_resnet_v2_50 diff --git a/research/cognitive_mapping_and_planning/scripts/script_env_vis.py b/research/cognitive_mapping_and_planning/scripts/script_env_vis.py deleted file mode 100644 index 3690ff484..000000000 --- a/research/cognitive_mapping_and_planning/scripts/script_env_vis.py +++ /dev/null @@ -1,186 +0,0 @@ -# Copyright 2016 The TensorFlow Authors All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -"""A simple python function to walk in the enviornments that we have created. -PYTHONPATH='.' PYOPENGL_PLATFORM=egl python scripts/script_env_vis.py \ - --dataset_name sbpd --building_name area3 -""" -import sys -import numpy as np -import matplotlib -matplotlib.use('TkAgg') -from PIL import ImageTk, Image -import Tkinter as tk -import logging -from tensorflow.python.platform import app -from tensorflow.python.platform import flags - -import datasets.nav_env_config as nec -import datasets.nav_env as nav_env -import cv2 -from datasets import factory -import render.swiftshader_renderer as renderer - -SwiftshaderRenderer = renderer.SwiftshaderRenderer -VisualNavigationEnv = nav_env.VisualNavigationEnv - -FLAGS = flags.FLAGS -flags.DEFINE_string('dataset_name', 'sbpd', 'Name of the dataset.') -flags.DEFINE_float('fov', 60., 'Field of view') -flags.DEFINE_integer('image_size', 512, 'Size of the image.') -flags.DEFINE_string('building_name', '', 'Name of the building.') - -def get_args(): - navtask = nec.nav_env_base_config() - navtask.task_params.type = 'rng_rejection_sampling_many' - navtask.task_params.rejection_sampling_M = 2000 - navtask.task_params.min_dist = 10 - sz = FLAGS.image_size - navtask.camera_param.fov = FLAGS.fov - navtask.camera_param.height = sz - navtask.camera_param.width = sz - navtask.task_params.img_height = sz - navtask.task_params.img_width = sz - - # navtask.task_params.semantic_task.class_map_names = ['chair', 'door', 'table'] - # navtask.task_params.type = 'to_nearest_obj_acc' - - logging.info('navtask: %s', navtask) - return navtask - -def load_building(dataset_name, building_name): - dataset = factory.get_dataset(dataset_name) - - navtask = get_args() - cp = navtask.camera_param - rgb_shader, d_shader = renderer.get_shaders(cp.modalities) - r_obj = SwiftshaderRenderer() - r_obj.init_display(width=cp.width, height=cp.height, - fov=cp.fov, z_near=cp.z_near, z_far=cp.z_far, - rgb_shader=rgb_shader, d_shader=d_shader) - r_obj.clear_scene() - b = VisualNavigationEnv(robot=navtask.robot, env=navtask.env, - task_params=navtask.task_params, - building_name=building_name, flip=False, - logdir=None, building_loader=dataset, - r_obj=r_obj) - b.load_building_into_scene() - b.set_building_visibility(False) - return b - -def walk_through(b): - # init agent at a random location in the environment. - init_env_state = b.reset([np.random.RandomState(0), np.random.RandomState(0)]) - - global current_node - rng = np.random.RandomState(0) - current_node = rng.choice(b.task.nodes.shape[0]) - - root = tk.Tk() - image = b.render_nodes(b.task.nodes[[current_node],:])[0] - print(image.shape) - image = image.astype(np.uint8) - im = Image.fromarray(image) - im = ImageTk.PhotoImage(im) - panel = tk.Label(root, image=im) - - map_size = b.traversible.shape - sc = np.max(map_size)/256. - loc = np.array([[map_size[1]/2., map_size[0]/2.]]) - x_axis = np.zeros_like(loc); x_axis[:,1] = sc - y_axis = np.zeros_like(loc); y_axis[:,0] = -sc - cum_fs, cum_valid = nav_env.get_map_to_predict(loc, x_axis, y_axis, - map=b.traversible*1., - map_size=256) - cum_fs = cum_fs[0] - cum_fs = cv2.applyColorMap((cum_fs*255).astype(np.uint8), cv2.COLORMAP_JET) - im = Image.fromarray(cum_fs) - im = ImageTk.PhotoImage(im) - panel_overhead = tk.Label(root, image=im) - - def refresh(): - global current_node - image = b.render_nodes(b.task.nodes[[current_node],:])[0] - image = image.astype(np.uint8) - im = Image.fromarray(image) - im = ImageTk.PhotoImage(im) - panel.configure(image=im) - panel.image = im - - def left_key(event): - global current_node - current_node = b.take_action([current_node], [2], 1)[0][0] - refresh() - - def up_key(event): - global current_node - current_node = b.take_action([current_node], [3], 1)[0][0] - refresh() - - def right_key(event): - global current_node - current_node = b.take_action([current_node], [1], 1)[0][0] - refresh() - - def quit(event): - root.destroy() - - panel_overhead.grid(row=4, column=5, rowspan=1, columnspan=1, - sticky=tk.W+tk.E+tk.N+tk.S) - panel.bind('', left_key) - panel.bind('', up_key) - panel.bind('', right_key) - panel.bind('q', quit) - panel.focus_set() - panel.grid(row=0, column=0, rowspan=5, columnspan=5, - sticky=tk.W+tk.E+tk.N+tk.S) - root.mainloop() - -def simple_window(): - root = tk.Tk() - - image = np.zeros((128, 128, 3), dtype=np.uint8) - image[32:96, 32:96, 0] = 255 - im = Image.fromarray(image) - im = ImageTk.PhotoImage(im) - - image = np.zeros((128, 128, 3), dtype=np.uint8) - image[32:96, 32:96, 1] = 255 - im2 = Image.fromarray(image) - im2 = ImageTk.PhotoImage(im2) - - panel = tk.Label(root, image=im) - - def left_key(event): - panel.configure(image=im2) - panel.image = im2 - - def quit(event): - sys.exit() - - panel.bind('', left_key) - panel.bind('', left_key) - panel.bind('', left_key) - panel.bind('q', quit) - panel.focus_set() - panel.pack(side = "bottom", fill = "both", expand = "yes") - root.mainloop() - -def main(_): - b = load_building(FLAGS.dataset_name, FLAGS.building_name) - walk_through(b) - -if __name__ == '__main__': - app.run() diff --git a/research/cognitive_mapping_and_planning/scripts/script_nav_agent_release.py b/research/cognitive_mapping_and_planning/scripts/script_nav_agent_release.py deleted file mode 100644 index dab2819a6..000000000 --- a/research/cognitive_mapping_and_planning/scripts/script_nav_agent_release.py +++ /dev/null @@ -1,253 +0,0 @@ -# Copyright 2016 The TensorFlow Authors All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -r""" Script to train and test the grid navigation agent. -Usage: - 1. Testing a model. - CUDA_VISIBLE_DEVICES=0 LD_LIBRARY_PATH=/opt/cuda-8.0/lib64:/opt/cudnnv51/lib64 \ - PYTHONPATH='.' PYOPENGL_PLATFORM=egl python scripts/script_nav_agent_release.py \ - --config_name cmp.lmap_Msc.clip5.sbpd_d_r2r+bench_test \ - --logdir output/cmp.lmap_Msc.clip5.sbpd_d_r2r - - 2. Training a model (locally). - CUDA_VISIBLE_DEVICES=0 LD_LIBRARY_PATH=/opt/cuda-8.0/lib64:/opt/cudnnv51/lib64 \ - PYTHONPATH='.' PYOPENGL_PLATFORM=egl python scripts/script_nav_agent_release.py \ - --config_name cmp.lmap_Msc.clip5.sbpd_d_r2r+train_train \ - --logdir output/cmp.lmap_Msc.clip5.sbpd_d_r2r_ - - 3. Training a model (distributed). - # See https://www.tensorflow.org/deploy/distributed on how to setup distributed - # training. - CUDA_VISIBLE_DEVICES=0 LD_LIBRARY_PATH=/opt/cuda-8.0/lib64:/opt/cudnnv51/lib64 \ - PYTHONPATH='.' PYOPENGL_PLATFORM=egl python scripts/script_nav_agent_release.py \ - --config_name cmp.lmap_Msc.clip5.sbpd_d_r2r+train_train \ - --logdir output/cmp.lmap_Msc.clip5.sbpd_d_r2r_ \ - --ps_tasks $num_ps --master $master_name --task $worker_id -""" - -import sys, os, numpy as np -import copy -import argparse, pprint -import time -import cProfile -import platform - - -import tensorflow as tf -from tensorflow.contrib import slim -from tensorflow.python.framework import ops -from tensorflow.contrib.framework.python.ops import variables - -import logging -from tensorflow.python.platform import gfile -from tensorflow.python.platform import app -from tensorflow.python.platform import flags -from cfgs import config_cmp -from cfgs import config_vision_baseline -import datasets.nav_env as nav_env -import src.file_utils as fu -import src.utils as utils -import tfcode.cmp as cmp -from tfcode import tf_utils -from tfcode import vision_baseline_lstm - -FLAGS = flags.FLAGS - -flags.DEFINE_string('master', '', - 'The address of the tensorflow master') -flags.DEFINE_integer('ps_tasks', 0, 'The number of parameter servers. If the ' - 'value is 0, then the parameters are handled locally by ' - 'the worker.') -flags.DEFINE_integer('task', 0, 'The Task ID. This value is used when training ' - 'with multiple workers to identify each worker.') - -flags.DEFINE_integer('num_workers', 1, '') - -flags.DEFINE_string('config_name', '', '') - -flags.DEFINE_string('logdir', '', '') - -flags.DEFINE_integer('solver_seed', 0, '') - -flags.DEFINE_integer('delay_start_iters', 20, '') - -logging.basicConfig(level=logging.INFO) - -def main(_): - _launcher(FLAGS.config_name, FLAGS.logdir) - -def _launcher(config_name, logdir): - args = _setup_args(config_name, logdir) - - fu.makedirs(args.logdir) - - if args.control.train: - _train(args) - - if args.control.test: - _test(args) - -def get_args_for_config(config_name): - configs = config_name.split('.') - type = configs[0] - config_name = '.'.join(configs[1:]) - if type == 'cmp': - args = config_cmp.get_args_for_config(config_name) - args.setup_to_run = cmp.setup_to_run - args.setup_train_step_kwargs = cmp.setup_train_step_kwargs - - elif type == 'bl': - args = config_vision_baseline.get_args_for_config(config_name) - args.setup_to_run = vision_baseline_lstm.setup_to_run - args.setup_train_step_kwargs = vision_baseline_lstm.setup_train_step_kwargs - - else: - logging.fatal('Unknown type: {:s}'.format(type)) - return args - -def _setup_args(config_name, logdir): - args = get_args_for_config(config_name) - args.solver.num_workers = FLAGS.num_workers - args.solver.task = FLAGS.task - args.solver.ps_tasks = FLAGS.ps_tasks - args.solver.master = FLAGS.master - args.solver.seed = FLAGS.solver_seed - args.logdir = logdir - args.navtask.logdir = None - return args - -def _train(args): - container_name = "" - - R = lambda: nav_env.get_multiplexer_class(args.navtask, args.solver.task) - m = utils.Foo() - m.tf_graph = tf.Graph() - - config = tf.ConfigProto() - config.device_count['GPU'] = 1 - - with m.tf_graph.as_default(): - with tf.device(tf.train.replica_device_setter(args.solver.ps_tasks, - merge_devices=True)): - with tf.container(container_name): - m = args.setup_to_run(m, args, is_training=True, - batch_norm_is_training=True, summary_mode='train') - - train_step_kwargs = args.setup_train_step_kwargs( - m, R(), os.path.join(args.logdir, 'train'), rng_seed=args.solver.task, - is_chief=args.solver.task==0, - num_steps=args.navtask.task_params.num_steps*args.navtask.task_params.num_goals, iters=1, - train_display_interval=args.summary.display_interval, - dagger_sample_bn_false=args.arch.dagger_sample_bn_false) - - delay_start = (args.solver.task*(args.solver.task+1))/2 * FLAGS.delay_start_iters - logging.error('delaying start for task %d by %d steps.', - args.solver.task, delay_start) - - additional_args = {} - final_loss = slim.learning.train( - train_op=m.train_op, - logdir=args.logdir, - master=args.solver.master, - is_chief=args.solver.task == 0, - number_of_steps=args.solver.max_steps, - train_step_fn=tf_utils.train_step_custom_online_sampling, - train_step_kwargs=train_step_kwargs, - global_step=m.global_step_op, - init_op=m.init_op, - init_fn=m.init_fn, - sync_optimizer=m.sync_optimizer, - saver=m.saver_op, - startup_delay_steps=delay_start, - summary_op=None, session_config=config, **additional_args) - -def _test(args): - args.solver.master = '' - container_name = "" - checkpoint_dir = os.path.join(format(args.logdir)) - logging.error('Checkpoint_dir: %s', args.logdir) - - config = tf.ConfigProto(); - config.device_count['GPU'] = 1; - - m = utils.Foo() - m.tf_graph = tf.Graph() - - rng_data_seed = 0; rng_action_seed = 0; - R = lambda: nav_env.get_multiplexer_class(args.navtask, rng_data_seed) - with m.tf_graph.as_default(): - with tf.container(container_name): - m = args.setup_to_run( - m, args, is_training=False, - batch_norm_is_training=args.control.force_batchnorm_is_training_at_test, - summary_mode=args.control.test_mode) - train_step_kwargs = args.setup_train_step_kwargs( - m, R(), os.path.join(args.logdir, args.control.test_name), - rng_seed=rng_data_seed, is_chief=True, - num_steps=args.navtask.task_params.num_steps*args.navtask.task_params.num_goals, - iters=args.summary.test_iters, train_display_interval=None, - dagger_sample_bn_false=args.arch.dagger_sample_bn_false) - - saver = slim.learning.tf_saver.Saver(variables.get_variables_to_restore()) - - sv = slim.learning.supervisor.Supervisor( - graph=ops.get_default_graph(), logdir=None, init_op=m.init_op, - summary_op=None, summary_writer=None, global_step=None, saver=m.saver_op) - - last_checkpoint = None - reported = False - while True: - last_checkpoint_ = None - while last_checkpoint_ is None: - last_checkpoint_ = slim.evaluation.wait_for_new_checkpoint( - checkpoint_dir, last_checkpoint, seconds_to_sleep=10, timeout=60) - if last_checkpoint_ is None: break - - last_checkpoint = last_checkpoint_ - checkpoint_iter = int(os.path.basename(last_checkpoint).split('-')[1]) - - logging.info('Starting evaluation at %s using checkpoint %s.', - time.strftime('%Y-%m-%d-%H:%M:%S', time.localtime()), - last_checkpoint) - - if (args.control.only_eval_when_done == False or - checkpoint_iter >= args.solver.max_steps): - start = time.time() - logging.info('Starting evaluation at %s using checkpoint %s.', - time.strftime('%Y-%m-%d-%H:%M:%S', time.localtime()), - last_checkpoint) - - with sv.managed_session(args.solver.master, config=config, - start_standard_services=False) as sess: - sess.run(m.init_op) - sv.saver.restore(sess, last_checkpoint) - sv.start_queue_runners(sess) - if args.control.reset_rng_seed: - train_step_kwargs['rng_data'] = [np.random.RandomState(rng_data_seed), - np.random.RandomState(rng_data_seed)] - train_step_kwargs['rng_action'] = np.random.RandomState(rng_action_seed) - vals, _ = tf_utils.train_step_custom_online_sampling( - sess, None, m.global_step_op, train_step_kwargs, - mode=args.control.test_mode) - should_stop = False - - if checkpoint_iter >= args.solver.max_steps: - should_stop = True - - if should_stop: - break - -if __name__ == '__main__': - app.run() diff --git a/research/cognitive_mapping_and_planning/scripts/script_plot_trajectory.py b/research/cognitive_mapping_and_planning/scripts/script_plot_trajectory.py deleted file mode 100644 index 08273a83b..000000000 --- a/research/cognitive_mapping_and_planning/scripts/script_plot_trajectory.py +++ /dev/null @@ -1,339 +0,0 @@ -# Copyright 2016 The TensorFlow Authors All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -r""" -Code for plotting trajectories in the top view, and also plot first person views -from saved trajectories. Does not run the network but only loads the mesh data -to plot the view points. - CUDA_VISIBLE_DEVICES=0 LD_LIBRARY_PATH=/opt/cuda-8.0/lib64:/opt/cudnnv51/lib64 - PYTHONPATH='.' PYOPENGL_PLATFORM=egl python scripts/script_plot_trajectory.py \ - --first_person --num_steps 40 \ - --config_name cmp.lmap_Msc.clip5.sbpd_d_r2r \ - --imset test --alsologtostderr --base_dir output --out_dir vis - -""" -import os, sys, numpy as np, copy -import matplotlib -matplotlib.use("Agg") -import matplotlib.pyplot as plt -import matplotlib.animation as animation -from matplotlib.gridspec import GridSpec - -import tensorflow as tf -from tensorflow.contrib import slim -import cv2 -import logging -from tensorflow.python.platform import gfile -from tensorflow.python.platform import app -from tensorflow.python.platform import flags - -from datasets import nav_env -import scripts.script_nav_agent_release as sna -import src.file_utils as fu -from src import graph_utils -from src import utils -FLAGS = flags.FLAGS - -flags.DEFINE_string('out_dir', 'vis', 'Directory where to store the output') -flags.DEFINE_string('type', '', 'Optional type.') -flags.DEFINE_bool('first_person', False, 'Visualize the first person view.') -flags.DEFINE_bool('top_view', False, 'Visualize the trajectory in the top view.') -flags.DEFINE_integer('num_steps', 40, 'Number of steps to run the model for.') -flags.DEFINE_string('imset', 'test', '') -flags.DEFINE_string('base_dir', 'output', 'Cache directory.') - -def _get_suffix_str(): - return '' - - -def _load_trajectory(): - base_dir = FLAGS.base_dir - config_name = FLAGS.config_name+_get_suffix_str() - - dir_name = os.path.join(base_dir, FLAGS.type, config_name) - logging.info('Waiting for snapshot in directory %s.', dir_name) - last_checkpoint = slim.evaluation.wait_for_new_checkpoint(dir_name, None) - checkpoint_iter = int(os.path.basename(last_checkpoint).split('-')[1]) - - # Load the distances. - a = utils.load_variables(os.path.join(dir_name, 'bench_on_'+FLAGS.imset, - 'all_locs_at_t_{:d}.pkl'.format(checkpoint_iter))) - return a - -def _compute_hardness(): - # Load the stanford data to compute the hardness. - if FLAGS.type == '': - args = sna.get_args_for_config(FLAGS.config_name+'+bench_'+FLAGS.imset) - else: - args = sna.get_args_for_config(FLAGS.type+'.'+FLAGS.config_name+'+bench_'+FLAGS.imset) - - args.navtask.logdir = None - R = lambda: nav_env.get_multiplexer_class(args.navtask, 0) - R = R() - - rng_data = [np.random.RandomState(0), np.random.RandomState(0)] - - # Sample a room. - h_dists = [] - gt_dists = [] - for i in range(250): - e = R.sample_env(rng_data) - nodes = e.task.nodes - - # Initialize the agent. - init_env_state = e.reset(rng_data) - - gt_dist_to_goal = [e.episode.dist_to_goal[0][j][s] - for j, s in enumerate(e.episode.start_node_ids)] - - for j in range(args.navtask.task_params.batch_size): - start_node_id = e.episode.start_node_ids[j] - end_node_id =e.episode.goal_node_ids[0][j] - h_dist = graph_utils.heuristic_fn_vec( - nodes[[start_node_id],:], nodes[[end_node_id], :], - n_ori=args.navtask.task_params.n_ori, - step_size=args.navtask.task_params.step_size)[0][0] - gt_dist = e.episode.dist_to_goal[0][j][start_node_id] - h_dists.append(h_dist) - gt_dists.append(gt_dist) - - h_dists = np.array(h_dists) - gt_dists = np.array(gt_dists) - e = R.sample_env([np.random.RandomState(0), np.random.RandomState(0)]) - input = e.get_common_data() - orig_maps = input['orig_maps'][0,0,:,:,0] - return h_dists, gt_dists, orig_maps - -def plot_trajectory_first_person(dt, orig_maps, out_dir): - out_dir = os.path.join(out_dir, FLAGS.config_name+_get_suffix_str(), - FLAGS.imset) - fu.makedirs(out_dir) - - # Load the model so that we can render. - plt.set_cmap('gray') - samples_per_action = 8; wait_at_action = 0; - - Writer = animation.writers['mencoder'] - writer = Writer(fps=3*(samples_per_action+wait_at_action), - metadata=dict(artist='anonymous'), bitrate=1800) - - args = sna.get_args_for_config(FLAGS.config_name + '+bench_'+FLAGS.imset) - args.navtask.logdir = None - navtask_ = copy.deepcopy(args.navtask) - navtask_.camera_param.modalities = ['rgb'] - navtask_.task_params.modalities = ['rgb'] - sz = 512 - navtask_.camera_param.height = sz - navtask_.camera_param.width = sz - navtask_.task_params.img_height = sz - navtask_.task_params.img_width = sz - R = lambda: nav_env.get_multiplexer_class(navtask_, 0) - R = R() - b = R.buildings[0] - - f = [0 for _ in range(wait_at_action)] + \ - [float(_)/samples_per_action for _ in range(samples_per_action)]; - - # Generate things for it to render. - inds_to_do = [] - inds_to_do += [1, 4, 10] #1291, 1268, 1273, 1289, 1302, 1426, 1413, 1449, 1399, 1390] - - for i in inds_to_do: - fig = plt.figure(figsize=(10,8)) - gs = GridSpec(3,4) - gs.update(wspace=0.05, hspace=0.05, left=0.0, top=0.97, right=1.0, bottom=0.) - ax = fig.add_subplot(gs[:,:-1]) - ax1 = fig.add_subplot(gs[0,-1]) - ax2 = fig.add_subplot(gs[1,-1]) - ax3 = fig.add_subplot(gs[2,-1]) - axes = [ax, ax1, ax2, ax3] - # ax = fig.add_subplot(gs[:,:]) - # axes = [ax] - for ax in axes: - ax.set_axis_off() - - node_ids = dt['all_node_ids'][i, :, 0]*1 - # Prune so that last node is not repeated more than 3 times? - if np.all(node_ids[-4:] == node_ids[-1]): - while node_ids[-4] == node_ids[-1]: - node_ids = node_ids[:-1] - num_steps = np.minimum(FLAGS.num_steps, len(node_ids)) - - xyt = b.to_actual_xyt_vec(b.task.nodes[node_ids]) - xyt_diff = xyt[1:,:] - xyt[:-1:,:] - xyt_diff[:,2] = np.mod(xyt_diff[:,2], 4) - ind = np.where(xyt_diff[:,2] == 3)[0] - xyt_diff[ind, 2] = -1 - xyt_diff = np.expand_dims(xyt_diff, axis=1) - to_cat = [xyt_diff*_ for _ in f] - perturbs_all = np.concatenate(to_cat, axis=1) - perturbs_all = np.concatenate([perturbs_all, np.zeros_like(perturbs_all[:,:,:1])], axis=2) - node_ids_all = np.expand_dims(node_ids, axis=1)*1 - node_ids_all = np.concatenate([node_ids_all for _ in f], axis=1) - node_ids_all = np.reshape(node_ids_all[:-1,:], -1) - perturbs_all = np.reshape(perturbs_all, [-1, 4]) - imgs = b.render_nodes(b.task.nodes[node_ids_all,:], perturb=perturbs_all) - - # Get action at each node. - actions = [] - _, action_to_nodes = b.get_feasible_actions(node_ids) - for j in range(num_steps-1): - action_to_node = action_to_nodes[j] - node_to_action = dict(zip(action_to_node.values(), action_to_node.keys())) - actions.append(node_to_action[node_ids[j+1]]) - - def init_fn(): - return fig, - gt_dist_to_goal = [] - - # Render trajectories. - def worker(j): - # Plot the image. - step_number = j/(samples_per_action + wait_at_action) - img = imgs[j]; ax = axes[0]; ax.clear(); ax.set_axis_off(); - img = img.astype(np.uint8); ax.imshow(img); - tt = ax.set_title( - "First Person View\n" + - "Top corners show diagnostics (distance, agents' action) not input to agent.", - fontsize=12) - plt.setp(tt, color='white') - - # Distance to goal. - t = 'Dist to Goal:\n{:2d} steps'.format(int(dt['all_d_at_t'][i, step_number])) - t = ax.text(0.01, 0.99, t, - horizontalalignment='left', - verticalalignment='top', - fontsize=20, color='red', - transform=ax.transAxes, alpha=1.0) - t.set_bbox(dict(color='white', alpha=0.85, pad=-0.1)) - - # Action to take. - action_latex = ['$\odot$ ', '$\curvearrowright$ ', '$\curvearrowleft$ ', r'$\Uparrow$ '] - t = ax.text(0.99, 0.99, action_latex[actions[step_number]], - horizontalalignment='right', - verticalalignment='top', - fontsize=40, color='green', - transform=ax.transAxes, alpha=1.0) - t.set_bbox(dict(color='white', alpha=0.85, pad=-0.1)) - - - # Plot the map top view. - ax = axes[-1] - if j == 0: - # Plot the map - locs = dt['all_locs'][i,:num_steps,:] - goal_loc = dt['all_goal_locs'][i,:,:] - xymin = np.minimum(np.min(goal_loc, axis=0), np.min(locs, axis=0)) - xymax = np.maximum(np.max(goal_loc, axis=0), np.max(locs, axis=0)) - xy1 = (xymax+xymin)/2. - 0.7*np.maximum(np.max(xymax-xymin), 24) - xy2 = (xymax+xymin)/2. + 0.7*np.maximum(np.max(xymax-xymin), 24) - - ax.set_axis_on() - ax.patch.set_facecolor((0.333, 0.333, 0.333)) - ax.set_xticks([]); ax.set_yticks([]); - ax.imshow(orig_maps, origin='lower', vmin=-1.0, vmax=2.0) - ax.plot(goal_loc[:,0], goal_loc[:,1], 'g*', markersize=12) - - locs = dt['all_locs'][i,:1,:] - ax.plot(locs[:,0], locs[:,1], 'b.', markersize=12) - - ax.set_xlim([xy1[0], xy2[0]]) - ax.set_ylim([xy1[1], xy2[1]]) - - locs = dt['all_locs'][i,step_number,:] - locs = np.expand_dims(locs, axis=0) - ax.plot(locs[:,0], locs[:,1], 'r.', alpha=1.0, linewidth=0, markersize=4) - tt = ax.set_title('Trajectory in topview', fontsize=14) - plt.setp(tt, color='white') - return fig, - - line_ani = animation.FuncAnimation(fig, worker, - (num_steps-1)*(wait_at_action+samples_per_action), - interval=500, blit=True, init_func=init_fn) - tmp_file_name = 'tmp.mp4' - line_ani.save(tmp_file_name, writer=writer, savefig_kwargs={'facecolor':'black'}) - out_file_name = os.path.join(out_dir, 'vis_{:04d}.mp4'.format(i)) - print(out_file_name) - - if fu.exists(out_file_name): - gfile.Remove(out_file_name) - gfile.Copy(tmp_file_name, out_file_name) - gfile.Remove(tmp_file_name) - plt.close(fig) - -def plot_trajectory(dt, hardness, orig_maps, out_dir): - out_dir = os.path.join(out_dir, FLAGS.config_name+_get_suffix_str(), - FLAGS.imset) - fu.makedirs(out_dir) - out_file = os.path.join(out_dir, 'all_locs_at_t.pkl') - dt['hardness'] = hardness - utils.save_variables(out_file, dt.values(), dt.keys(), overwrite=True) - - #Plot trajectories onto the maps - plt.set_cmap('gray') - for i in range(4000): - goal_loc = dt['all_goal_locs'][i, :, :] - locs = np.concatenate((dt['all_locs'][i,:,:], - dt['all_locs'][i,:,:]), axis=0) - xymin = np.minimum(np.min(goal_loc, axis=0), np.min(locs, axis=0)) - xymax = np.maximum(np.max(goal_loc, axis=0), np.max(locs, axis=0)) - xy1 = (xymax+xymin)/2. - 1.*np.maximum(np.max(xymax-xymin), 24) - xy2 = (xymax+xymin)/2. + 1.*np.maximum(np.max(xymax-xymin), 24) - - fig, ax = utils.tight_imshow_figure(plt, figsize=(6,6)) - ax.set_axis_on() - ax.patch.set_facecolor((0.333, 0.333, 0.333)) - ax.set_xticks([]) - ax.set_yticks([]) - - all_locs = dt['all_locs'][i,:,:]*1 - uniq = np.where(np.any(all_locs[1:,:] != all_locs[:-1,:], axis=1))[0]+1 - uniq = np.sort(uniq).tolist() - uniq.insert(0,0) - uniq = np.array(uniq) - all_locs = all_locs[uniq, :] - - ax.plot(dt['all_locs'][i, 0, 0], - dt['all_locs'][i, 0, 1], 'b.', markersize=24) - ax.plot(dt['all_goal_locs'][i, 0, 0], - dt['all_goal_locs'][i, 0, 1], 'g*', markersize=19) - ax.plot(all_locs[:,0], all_locs[:,1], 'r', alpha=0.4, linewidth=2) - ax.scatter(all_locs[:,0], all_locs[:,1], - c=5+np.arange(all_locs.shape[0])*1./all_locs.shape[0], - cmap='Reds', s=30, linewidth=0) - ax.imshow(orig_maps, origin='lower', vmin=-1.0, vmax=2.0, aspect='equal') - ax.set_xlim([xy1[0], xy2[0]]) - ax.set_ylim([xy1[1], xy2[1]]) - - file_name = os.path.join(out_dir, 'trajectory_{:04d}.png'.format(i)) - print(file_name) - with fu.fopen(file_name, 'w') as f: - plt.savefig(f) - plt.close(fig) - - -def main(_): - a = _load_trajectory() - h_dists, gt_dists, orig_maps = _compute_hardness() - hardness = 1.-h_dists*1./ gt_dists - - if FLAGS.top_view: - plot_trajectory(a, hardness, orig_maps, out_dir=FLAGS.out_dir) - - if FLAGS.first_person: - plot_trajectory_first_person(a, orig_maps, out_dir=FLAGS.out_dir) - -if __name__ == '__main__': - app.run() diff --git a/research/cognitive_mapping_and_planning/scripts/script_preprocess_annoations_S3DIS.py b/research/cognitive_mapping_and_planning/scripts/script_preprocess_annoations_S3DIS.py deleted file mode 100644 index 58f32d121..000000000 --- a/research/cognitive_mapping_and_planning/scripts/script_preprocess_annoations_S3DIS.py +++ /dev/null @@ -1,197 +0,0 @@ -# Copyright 2016 The TensorFlow Authors All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -import os -import glob -import numpy as np -import logging -import cPickle -from datasets import nav_env -from datasets import factory -from src import utils -from src import map_utils as mu - -logging.basicConfig(level=logging.INFO) -DATA_DIR = 'data/stanford_building_parser_dataset_raw/' - -mkdir_if_missing = utils.mkdir_if_missing -save_variables = utils.save_variables - -def _get_semantic_maps(building_name, transform, map_, flip, cats): - rooms = get_room_in_building(building_name) - maps = [] - for cat in cats: - maps.append(np.zeros((map_.size[1], map_.size[0]))) - - for r in rooms: - room = load_room(building_name, r, category_list=cats) - classes = room['class_id'] - for i, cat in enumerate(cats): - c_ind = cats.index(cat) - ind = [_ for _, c in enumerate(classes) if c == c_ind] - if len(ind) > 0: - vs = [room['vertexs'][x]*1 for x in ind] - vs = np.concatenate(vs, axis=0) - if transform: - vs = np.array([vs[:,1], vs[:,0], vs[:,2]]).T - vs[:,0] = -vs[:,0] - vs[:,1] += 4.20 - vs[:,0] += 6.20 - vs = vs*100. - if flip: - vs[:,1] = -vs[:,1] - maps[i] = maps[i] + \ - mu._project_to_map(map_, vs, ignore_points_outside_map=True) - return maps - -def _map_building_name(building_name): - b = int(building_name.split('_')[0][4]) - out_name = 'Area_{:d}'.format(b) - if b == 5: - if int(building_name.split('_')[0][5]) == 1: - transform = True - else: - transform = False - else: - transform = False - return out_name, transform - -def get_categories(): - cats = ['beam', 'board', 'bookcase', 'ceiling', 'chair', 'clutter', 'column', - 'door', 'floor', 'sofa', 'table', 'wall', 'window'] - return cats - -def _write_map_files(b_in, b_out, transform): - cats = get_categories() - - env = utils.Foo(padding=10, resolution=5, num_point_threshold=2, - valid_min=-10, valid_max=200, n_samples_per_face=200) - robot = utils.Foo(radius=15, base=10, height=140, sensor_height=120, - camera_elevation_degree=-15) - - building_loader = factory.get_dataset('sbpd') - for flip in [False, True]: - b = nav_env.Building(b_out, robot, env, flip=flip, - building_loader=building_loader) - logging.info("building_in: %s, building_out: %s, transform: %d", b_in, - b_out, transform) - maps = _get_semantic_maps(b_in, transform, b.map, flip, cats) - maps = np.transpose(np.array(maps), axes=[1,2,0]) - - # Load file from the cache. - file_name = '{:s}_{:d}_{:d}_{:d}_{:d}_{:d}_{:d}.pkl' - file_name = file_name.format(b.building_name, b.map.size[0], b.map.size[1], - b.map.origin[0], b.map.origin[1], - b.map.resolution, flip) - out_file = os.path.join(DATA_DIR, 'processing', 'class-maps', file_name) - logging.info('Writing semantic maps to %s.', out_file) - save_variables(out_file, [maps, cats], ['maps', 'cats'], overwrite=True) - -def _transform_area5b(room_dimension): - for a in room_dimension.keys(): - r = room_dimension[a]*1 - r[[0,1,3,4]] = r[[1,0,4,3]] - r[[0,3]] = -r[[3,0]] - r[[1,4]] += 4.20 - r[[0,3]] += 6.20 - room_dimension[a] = r - return room_dimension - -def collect_room(building_name, room_name): - room_dir = os.path.join(DATA_DIR, 'Stanford3dDataset_v1.2', building_name, - room_name, 'Annotations') - files = glob.glob1(room_dir, '*.txt') - files = sorted(files, key=lambda s: s.lower()) - vertexs = []; colors = []; - for f in files: - file_name = os.path.join(room_dir, f) - logging.info(' %s', file_name) - a = np.loadtxt(file_name) - vertex = a[:,:3]*1. - color = a[:,3:]*1 - color = color.astype(np.uint8) - vertexs.append(vertex) - colors.append(color) - files = [f.split('.')[0] for f in files] - out = {'vertexs': vertexs, 'colors': colors, 'names': files} - return out - -def load_room(building_name, room_name, category_list=None): - room = collect_room(building_name, room_name) - room['building_name'] = building_name - room['room_name'] = room_name - instance_id = range(len(room['names'])) - room['instance_id'] = instance_id - if category_list is not None: - name = [r.split('_')[0] for r in room['names']] - class_id = [] - for n in name: - if n in category_list: - class_id.append(category_list.index(n)) - else: - class_id.append(len(category_list)) - room['class_id'] = class_id - room['category_list'] = category_list - return room - -def get_room_in_building(building_name): - building_dir = os.path.join(DATA_DIR, 'Stanford3dDataset_v1.2', building_name) - rn = os.listdir(building_dir) - rn = [x for x in rn if os.path.isdir(os.path.join(building_dir, x))] - rn = sorted(rn, key=lambda s: s.lower()) - return rn - -def write_room_dimensions(b_in, b_out, transform): - rooms = get_room_in_building(b_in) - room_dimension = {} - for r in rooms: - room = load_room(b_in, r, category_list=None) - vertex = np.concatenate(room['vertexs'], axis=0) - room_dimension[r] = np.concatenate((np.min(vertex, axis=0), np.max(vertex, axis=0)), axis=0) - if transform == 1: - room_dimension = _transform_area5b(room_dimension) - - out_file = os.path.join(DATA_DIR, 'processing', 'room-dimension', b_out+'.pkl') - save_variables(out_file, [room_dimension], ['room_dimension'], overwrite=True) - -def write_room_dimensions_all(I): - mkdir_if_missing(os.path.join(DATA_DIR, 'processing', 'room-dimension')) - bs_in = ['Area_1', 'Area_2', 'Area_3', 'Area_4', 'Area_5', 'Area_5', 'Area_6'] - bs_out = ['area1', 'area2', 'area3', 'area4', 'area5a', 'area5b', 'area6'] - transforms = [0, 0, 0, 0, 0, 1, 0] - - for i in I: - b_in = bs_in[i] - b_out = bs_out[i] - t = transforms[i] - write_room_dimensions(b_in, b_out, t) - -def write_class_maps_all(I): - mkdir_if_missing(os.path.join(DATA_DIR, 'processing', 'class-maps')) - bs_in = ['Area_1', 'Area_2', 'Area_3', 'Area_4', 'Area_5', 'Area_5', 'Area_6'] - bs_out = ['area1', 'area2', 'area3', 'area4', 'area5a', 'area5b', 'area6'] - transforms = [0, 0, 0, 0, 0, 1, 0] - - for i in I: - b_in = bs_in[i] - b_out = bs_out[i] - t = transforms[i] - _write_map_files(b_in, b_out, t) - - -if __name__ == '__main__': - write_room_dimensions_all([0, 2, 3, 4, 5, 6]) - write_class_maps_all([0, 2, 3, 4, 5, 6]) - diff --git a/research/cognitive_mapping_and_planning/scripts/script_preprocess_annoations_S3DIS.sh b/research/cognitive_mapping_and_planning/scripts/script_preprocess_annoations_S3DIS.sh deleted file mode 100644 index 1384fabe6..000000000 --- a/research/cognitive_mapping_and_planning/scripts/script_preprocess_annoations_S3DIS.sh +++ /dev/null @@ -1,24 +0,0 @@ -# Copyright 2016 The TensorFlow Authors All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -cd data/stanford_building_parser_dataset_raw -unzip Stanford3dDataset_v1.2.zip -cd ../../ -PYOPENGL_PLATFORM=egl PYTHONPATH='.' python scripts/script_preprocess_annoations_S3DIS.py - -mv data/stanford_building_parser_dataset_raw/processing/room-dimension data/stanford_building_parser_dataset/. -mv data/stanford_building_parser_dataset_raw/processing/class-maps data/stanford_building_parser_dataset/. - -echo "You may now delete data/stanford_building_parser_dataset_raw if needed." diff --git a/research/cognitive_mapping_and_planning/scripts/script_preprocess_meshes_S3DIS.sh b/research/cognitive_mapping_and_planning/scripts/script_preprocess_meshes_S3DIS.sh deleted file mode 100644 index 557a4dde6..000000000 --- a/research/cognitive_mapping_and_planning/scripts/script_preprocess_meshes_S3DIS.sh +++ /dev/null @@ -1,37 +0,0 @@ -# Copyright 2016 The TensorFlow Authors All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -mkdir -p data/stanford_building_parser_dataset -mkdir -p data/stanford_building_parser_dataset/mesh -cd data/stanford_building_parser_dataset_raw - -# Untar the files and extract the meshes. -for t in "1" "3" "4" "5a" "5b" "6"; do - tar -xf area_"$t"_noXYZ.tar area_$t/3d/rgb_textures - mv area_$t/3d/rgb_textures ../stanford_building_parser_dataset/mesh/area$t - rmdir area_$t/3d - rmdir area_$t -done - -cd ../../ - -# Preprocess meshes to remove the group and chunk information. -cd data/stanford_building_parser_dataset/ -for t in "1" "3" "4" "5a" "5b" "6"; do - obj_name=`ls mesh/area$t/*.obj` - cp $obj_name "$obj_name".bck - cat $obj_name.bck | grep -v '^g' | grep -v '^o' > $obj_name -done -cd ../../ diff --git a/research/cognitive_mapping_and_planning/scripts/script_test_pretrained_models.sh b/research/cognitive_mapping_and_planning/scripts/script_test_pretrained_models.sh deleted file mode 100644 index a4299fff5..000000000 --- a/research/cognitive_mapping_and_planning/scripts/script_test_pretrained_models.sh +++ /dev/null @@ -1,63 +0,0 @@ -# Copyright 2016 The TensorFlow Authors All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -# Test CMP models. -CUDA_VISIBLE_DEVICES=0 LD_LIBRARY_PATH=/opt/cuda-8.0/lib64:/opt/cudnnv51/lib64 PYTHONPATH='.' PYOPENGL_PLATFORM=egl \ - python scripts/script_nav_agent_release.py --config_name cmp.lmap_Msc.clip5.sbpd_d_r2r+bench_test \ - --logdir output/cmp.lmap_Msc.clip5.sbpd_d_r2r - -CUDA_VISIBLE_DEVICES=0 LD_LIBRARY_PATH=/opt/cuda-8.0/lib64:/opt/cudnnv51/lib64 PYTHONPATH='.' PYOPENGL_PLATFORM=egl \ - python scripts/script_nav_agent_release.py --config_name cmp.lmap_Msc.clip5.sbpd_rgb_r2r+bench_test \ - --logdir output/cmp.lmap_Msc.clip5.sbpd_rgb_r2r - -CUDA_VISIBLE_DEVICES=0 LD_LIBRARY_PATH=/opt/cuda-8.0/lib64:/opt/cudnnv51/lib64 PYTHONPATH='.' PYOPENGL_PLATFORM=egl \ - python scripts/script_nav_agent_release.py --config_name cmp.lmap_Msc.clip5.sbpd_d_ST+bench_test \ - --logdir output/cmp.lmap_Msc.clip5.sbpd_d_ST - -CUDA_VISIBLE_DEVICES=0 LD_LIBRARY_PATH=/opt/cuda-8.0/lib64:/opt/cudnnv51/lib64 PYTHONPATH='.' PYOPENGL_PLATFORM=egl \ - python scripts/script_nav_agent_release.py --config_name cmp.lmap_Msc.clip5.sbpd_rgb_ST+bench_test \ - --logdir output/cmp.lmap_Msc.clip5.sbpd_rgb_ST - -CUDA_VISIBLE_DEVICES=0 LD_LIBRARY_PATH=/opt/cuda-8.0/lib64:/opt/cudnnv51/lib64 PYTHONPATH='.' PYOPENGL_PLATFORM=egl \ - python scripts/script_nav_agent_release.py --config_name cmp.lmap_Msc.clip5.sbpd_d_r2r_h0_64_80+bench_test \ - --logdir output/cmp.lmap_Msc.clip5.sbpd_d_r2r_h0_64_80 - -# Test LSTM baseline models. -CUDA_VISIBLE_DEVICES=0 LD_LIBRARY_PATH=/opt/cuda-8.0/lib64:/opt/cudnnv51/lib64 PYTHONPATH='.' PYOPENGL_PLATFORM=egl \ - python scripts/script_nav_agent_release.py --config_name bl.v2.noclip.sbpd_d_r2r+bench_test \ - --logdir output/bl.v2.noclip.sbpd_d_r2r - -CUDA_VISIBLE_DEVICES=0 LD_LIBRARY_PATH=/opt/cuda-8.0/lib64:/opt/cudnnv51/lib64 PYTHONPATH='.' PYOPENGL_PLATFORM=egl \ - python scripts/script_nav_agent_release.py --config_name bl.v2.noclip.sbpd_rgb_r2r+bench_test \ - --logdir output/bl.v2.noclip.sbpd_rgb_r2r - -CUDA_VISIBLE_DEVICES=0 LD_LIBRARY_PATH=/opt/cuda-8.0/lib64:/opt/cudnnv51/lib64 PYTHONPATH='.' PYOPENGL_PLATFORM=egl \ - python scripts/script_nav_agent_release.py --config_name bl.v2.noclip.sbpd_d_ST+bench_test \ - --logdir output/bl.v2.noclip.sbpd_d_ST - -CUDA_VISIBLE_DEVICES=0 LD_LIBRARY_PATH=/opt/cuda-8.0/lib64:/opt/cudnnv51/lib64 PYTHONPATH='.' PYOPENGL_PLATFORM=egl \ - python scripts/script_nav_agent_release.py --config_name bl.v2.noclip.sbpd_rgb_ST+bench_test \ - --logdir output/bl.v2.noclip.sbpd_rgb_ST - -CUDA_VISIBLE_DEVICES=0 LD_LIBRARY_PATH=/opt/cuda-8.0/lib64:/opt/cudnnv51/lib64 PYTHONPATH='.' PYOPENGL_PLATFORM=egl \ - python scripts/script_nav_agent_release.py --config_name bl.v2.noclip.sbpd_d_r2r_h0_64_80+bench_test \ - --logdir output/bl.v2.noclip.sbpd_d_r2r_h0_64_80 - -# Visualize test trajectories in top view. -# CUDA_VISIBLE_DEVICES=0 LD_LIBRARY_PATH=/opt/cuda-8.0/lib64:/opt/cudnnv51/lib64 PYTHONPATH='.' PYOPENGL_PLATFORM=egl \ -# python scripts/script_plot_trajectory.py \ -# --first_person --num_steps 40 \ -# --config_name cmp.lmap_Msc.clip5.sbpd_d_r2r \ -# --imset test --alsologtostderr diff --git a/research/cognitive_mapping_and_planning/src/__init__.py b/research/cognitive_mapping_and_planning/src/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/research/cognitive_mapping_and_planning/src/depth_utils.py b/research/cognitive_mapping_and_planning/src/depth_utils.py deleted file mode 100644 index 35f14fc7c..000000000 --- a/research/cognitive_mapping_and_planning/src/depth_utils.py +++ /dev/null @@ -1,96 +0,0 @@ -# Copyright 2016 The TensorFlow Authors All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -"""Utilities for processing depth images. -""" -import numpy as np -import src.rotation_utils as ru -import src.utils as utils - -def get_camera_matrix(width, height, fov): - """Returns a camera matrix from image size and fov.""" - xc = (width-1.) / 2. - zc = (height-1.) / 2. - f = (width / 2.) / np.tan(np.deg2rad(fov / 2.)) - camera_matrix = utils.Foo(xc=xc, zc=zc, f=f) - return camera_matrix - -def get_point_cloud_from_z(Y, camera_matrix): - """Projects the depth image Y into a 3D point cloud. - Inputs: - Y is ...xHxW - camera_matrix - Outputs: - X is positive going right - Y is positive into the image - Z is positive up in the image - XYZ is ...xHxWx3 - """ - x, z = np.meshgrid(np.arange(Y.shape[-1]), - np.arange(Y.shape[-2]-1, -1, -1)) - for i in range(Y.ndim-2): - x = np.expand_dims(x, axis=0) - z = np.expand_dims(z, axis=0) - X = (x-camera_matrix.xc) * Y / camera_matrix.f - Z = (z-camera_matrix.zc) * Y / camera_matrix.f - XYZ = np.concatenate((X[...,np.newaxis], Y[...,np.newaxis], - Z[...,np.newaxis]), axis=X.ndim) - return XYZ - -def make_geocentric(XYZ, sensor_height, camera_elevation_degree): - """Transforms the point cloud into geocentric coordinate frame. - Input: - XYZ : ...x3 - sensor_height : height of the sensor - camera_elevation_degree : camera elevation to rectify. - Output: - XYZ : ...x3 - """ - R = ru.get_r_matrix([1.,0.,0.], angle=np.deg2rad(camera_elevation_degree)) - XYZ = np.matmul(XYZ.reshape(-1,3), R.T).reshape(XYZ.shape) - XYZ[...,2] = XYZ[...,2] + sensor_height - return XYZ - -def bin_points(XYZ_cms, map_size, z_bins, xy_resolution): - """Bins points into xy-z bins - XYZ_cms is ... x H x W x3 - Outputs is ... x map_size x map_size x (len(z_bins)+1) - """ - sh = XYZ_cms.shape - XYZ_cms = XYZ_cms.reshape([-1, sh[-3], sh[-2], sh[-1]]) - n_z_bins = len(z_bins)+1 - map_center = (map_size-1.)/2. - counts = [] - isvalids = [] - for XYZ_cm in XYZ_cms: - isnotnan = np.logical_not(np.isnan(XYZ_cm[:,:,0])) - X_bin = np.round(XYZ_cm[:,:,0] / xy_resolution + map_center).astype(np.int32) - Y_bin = np.round(XYZ_cm[:,:,1] / xy_resolution + map_center).astype(np.int32) - Z_bin = np.digitize(XYZ_cm[:,:,2], bins=z_bins).astype(np.int32) - - isvalid = np.array([X_bin >= 0, X_bin < map_size, Y_bin >= 0, Y_bin < map_size, - Z_bin >= 0, Z_bin < n_z_bins, isnotnan]) - isvalid = np.all(isvalid, axis=0) - - ind = (Y_bin * map_size + X_bin) * n_z_bins + Z_bin - ind[np.logical_not(isvalid)] = 0 - count = np.bincount(ind.ravel(), isvalid.ravel().astype(np.int32), - minlength=map_size*map_size*n_z_bins) - count = np.reshape(count, [map_size, map_size, n_z_bins]) - counts.append(count) - isvalids.append(isvalid) - counts = np.array(counts).reshape(list(sh[:-3]) + [map_size, map_size, n_z_bins]) - isvalids = np.array(isvalids).reshape(list(sh[:-3]) + [sh[-3], sh[-2], 1]) - return counts, isvalids diff --git a/research/cognitive_mapping_and_planning/src/file_utils.py b/research/cognitive_mapping_and_planning/src/file_utils.py deleted file mode 100644 index b386236ca..000000000 --- a/research/cognitive_mapping_and_planning/src/file_utils.py +++ /dev/null @@ -1,42 +0,0 @@ -# Copyright 2016 The TensorFlow Authors All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -"""Utilities for manipulating files. -""" -import os -import numpy as np -import PIL -from tensorflow.python.platform import gfile -import cv2 - -exists = lambda path: gfile.Exists(path) -fopen = lambda path, mode: gfile.Open(path, mode) -makedirs = lambda path: gfile.MakeDirs(path) -listdir = lambda path: gfile.ListDir(path) -copyfile = lambda a, b, o: gfile.Copy(a,b,o) - -def write_image(image_path, rgb): - ext = os.path.splitext(image_path)[1] - with gfile.GFile(image_path, 'w') as f: - img_str = cv2.imencode(ext, rgb[:,:,::-1])[1].tostring() - f.write(img_str) - -def read_image(image_path, type='rgb'): - with fopen(image_path, 'r') as f: - I = PIL.Image.open(f) - II = np.array(I) - if type == 'rgb': - II = II[:,:,:3] - return II diff --git a/research/cognitive_mapping_and_planning/src/graph_utils.py b/research/cognitive_mapping_and_planning/src/graph_utils.py deleted file mode 100644 index cd99fd22a..000000000 --- a/research/cognitive_mapping_and_planning/src/graph_utils.py +++ /dev/null @@ -1,552 +0,0 @@ -# Copyright 2016 The TensorFlow Authors All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -"""Various function to manipulate graphs for computing distances. -""" -import skimage.morphology -import numpy as np -import networkx as nx -import itertools -import logging -from datasets.nav_env import get_path_ids -import graph_tool as gt -import graph_tool.topology -import graph_tool.generation -import src.utils as utils - -# Compute shortest path from all nodes to or from all source nodes -def get_distance_node_list(gtG, source_nodes, direction, weights=None): - gtG_ = gt.Graph(gtG) - v = gtG_.add_vertex() - - if weights is not None: - weights = gtG_.edge_properties[weights] - - for s in source_nodes: - e = gtG_.add_edge(s, int(v)) - if weights is not None: - weights[e] = 0. - - if direction == 'to': - dist = gt.topology.shortest_distance( - gt.GraphView(gtG_, reversed=True), source=gtG_.vertex(int(v)), - target=None, weights=weights) - elif direction == 'from': - dist = gt.topology.shortest_distance( - gt.GraphView(gtG_, reversed=False), source=gtG_.vertex(int(v)), - target=None, weights=weights) - dist = np.array(dist.get_array()) - dist = dist[:-1] - if weights is None: - dist = dist-1 - return dist - -# Functions for semantically labelling nodes in the traversal graph. -def generate_lattice(sz_x, sz_y): - """Generates a lattice with sz_x vertices along x and sz_y vertices along y - direction Each of these vertices is step_size distance apart. Origin is at - (0,0). """ - g = gt.generation.lattice([sz_x, sz_y]) - x, y = np.meshgrid(np.arange(sz_x), np.arange(sz_y)) - x = np.reshape(x, [-1,1]); y = np.reshape(y, [-1,1]); - nodes = np.concatenate((x,y), axis=1) - return g, nodes - -def add_diagonal_edges(g, nodes, sz_x, sz_y, edge_len): - offset = [sz_x+1, sz_x-1] - for o in offset: - s = np.arange(nodes.shape[0]-o-1) - t = s + o - ind = np.all(np.abs(nodes[s,:] - nodes[t,:]) == np.array([[1,1]]), axis=1) - s = s[ind][:,np.newaxis] - t = t[ind][:,np.newaxis] - st = np.concatenate((s,t), axis=1) - for i in range(st.shape[0]): - e = g.add_edge(st[i,0], st[i,1], add_missing=False) - g.ep['wts'][e] = edge_len - -def convert_traversible_to_graph(traversible, ff_cost=1., fo_cost=1., - oo_cost=1., connectivity=4): - assert(connectivity == 4 or connectivity == 8) - - sz_x = traversible.shape[1] - sz_y = traversible.shape[0] - g, nodes = generate_lattice(sz_x, sz_y) - - # Assign costs. - edge_wts = g.new_edge_property('float') - g.edge_properties['wts'] = edge_wts - wts = np.ones(g.num_edges(), dtype=np.float32) - edge_wts.get_array()[:] = wts - - if connectivity == 8: - add_diagonal_edges(g, nodes, sz_x, sz_y, np.sqrt(2.)) - - se = np.array([[int(e.source()), int(e.target())] for e in g.edges()]) - s_xy = nodes[se[:,0]] - t_xy = nodes[se[:,1]] - s_t = np.ravel_multi_index((s_xy[:,1], s_xy[:,0]), traversible.shape) - t_t = np.ravel_multi_index((t_xy[:,1], t_xy[:,0]), traversible.shape) - s_t = traversible.ravel()[s_t] - t_t = traversible.ravel()[t_t] - - wts = np.zeros(g.num_edges(), dtype=np.float32) - wts[np.logical_and(s_t == True, t_t == True)] = ff_cost - wts[np.logical_and(s_t == False, t_t == False)] = oo_cost - wts[np.logical_xor(s_t, t_t)] = fo_cost - - edge_wts = g.edge_properties['wts'] - for i, e in enumerate(g.edges()): - edge_wts[e] = edge_wts[e] * wts[i] - # d = edge_wts.get_array()*1. - # edge_wts.get_array()[:] = d*wts - return g, nodes - -def label_nodes_with_class(nodes_xyt, class_maps, pix): - """ - Returns: - class_maps__: one-hot class_map for each class. - node_class_label: one-hot class_map for each class, nodes_xyt.shape[0] x n_classes - """ - # Assign each pixel to a node. - selem = skimage.morphology.disk(pix) - class_maps_ = class_maps*1. - for i in range(class_maps.shape[2]): - class_maps_[:,:,i] = skimage.morphology.dilation(class_maps[:,:,i]*1, selem) - class_maps__ = np.argmax(class_maps_, axis=2) - class_maps__[np.max(class_maps_, axis=2) == 0] = -1 - - # For each node pick out the label from this class map. - x = np.round(nodes_xyt[:,[0]]).astype(np.int32) - y = np.round(nodes_xyt[:,[1]]).astype(np.int32) - ind = np.ravel_multi_index((y,x), class_maps__.shape) - node_class_label = class_maps__.ravel()[ind][:,0] - - # Convert to one hot versions. - class_maps_one_hot = np.zeros(class_maps.shape, dtype=np.bool) - node_class_label_one_hot = np.zeros((node_class_label.shape[0], class_maps.shape[2]), dtype=np.bool) - for i in range(class_maps.shape[2]): - class_maps_one_hot[:,:,i] = class_maps__ == i - node_class_label_one_hot[:,i] = node_class_label == i - return class_maps_one_hot, node_class_label_one_hot - -def label_nodes_with_class_geodesic(nodes_xyt, class_maps, pix, traversible, - ff_cost=1., fo_cost=1., oo_cost=1., - connectivity=4): - """Labels nodes in nodes_xyt with class labels using geodesic distance as - defined by traversible from class_maps. - Inputs: - nodes_xyt - class_maps: counts for each class. - pix: distance threshold to consider close enough to target. - traversible: binary map of whether traversible or not. - Output: - labels: For each node in nodes_xyt returns a label of the class or -1 is - unlabelled. - """ - g, nodes = convert_traversible_to_graph(traversible, ff_cost=ff_cost, - fo_cost=fo_cost, oo_cost=oo_cost, - connectivity=connectivity) - - class_dist = np.zeros_like(class_maps*1.) - n_classes = class_maps.shape[2] - if False: - # Assign each pixel to a class based on number of points. - selem = skimage.morphology.disk(pix) - class_maps_ = class_maps*1. - class_maps__ = np.argmax(class_maps_, axis=2) - class_maps__[np.max(class_maps_, axis=2) == 0] = -1 - - # Label nodes with classes. - for i in range(n_classes): - # class_node_ids = np.where(class_maps__.ravel() == i)[0] - class_node_ids = np.where(class_maps[:,:,i].ravel() > 0)[0] - dist_i = get_distance_node_list(g, class_node_ids, 'to', weights='wts') - class_dist[:,:,i] = np.reshape(dist_i, class_dist[:,:,i].shape) - class_map_geodesic = (class_dist <= pix) - class_map_geodesic = np.reshape(class_map_geodesic, [-1, n_classes]) - - # For each node pick out the label from this class map. - x = np.round(nodes_xyt[:,[0]]).astype(np.int32) - y = np.round(nodes_xyt[:,[1]]).astype(np.int32) - ind = np.ravel_multi_index((y,x), class_dist[:,:,0].shape) - node_class_label = class_map_geodesic[ind[:,0],:] - class_map_geodesic = class_dist <= pix - return class_map_geodesic, node_class_label - -def _get_next_nodes_undirected(n, sc, n_ori): - nodes_to_add = [] - nodes_to_validate = [] - (p, q, r) = n - nodes_to_add.append((n, (p, q, r), 0)) - if n_ori == 4: - for _ in [1, 2, 3, 4]: - if _ == 1: - v = (p - sc, q, r) - elif _ == 2: - v = (p + sc, q, r) - elif _ == 3: - v = (p, q - sc, r) - elif _ == 4: - v = (p, q + sc, r) - nodes_to_validate.append((n, v, _)) - return nodes_to_add, nodes_to_validate - -def _get_next_nodes(n, sc, n_ori): - nodes_to_add = [] - nodes_to_validate = [] - (p, q, r) = n - for r_, a_ in zip([-1, 0, 1], [1, 0, 2]): - nodes_to_add.append((n, (p, q, np.mod(r+r_, n_ori)), a_)) - - if n_ori == 6: - if r == 0: - v = (p + sc, q, r) - elif r == 1: - v = (p + sc, q + sc, r) - elif r == 2: - v = (p, q + sc, r) - elif r == 3: - v = (p - sc, q, r) - elif r == 4: - v = (p - sc, q - sc, r) - elif r == 5: - v = (p, q - sc, r) - elif n_ori == 4: - if r == 0: - v = (p + sc, q, r) - elif r == 1: - v = (p, q + sc, r) - elif r == 2: - v = (p - sc, q, r) - elif r == 3: - v = (p, q - sc, r) - nodes_to_validate.append((n,v,3)) - - return nodes_to_add, nodes_to_validate - -def generate_graph(valid_fn_vec=None, sc=1., n_ori=6, - starting_location=(0, 0, 0), vis=False, directed=True): - timer = utils.Timer() - timer.tic() - if directed: G = nx.DiGraph(directed=True) - else: G = nx.Graph() - G.add_node(starting_location) - new_nodes = G.nodes() - while len(new_nodes) != 0: - nodes_to_add = [] - nodes_to_validate = [] - for n in new_nodes: - if directed: - na, nv = _get_next_nodes(n, sc, n_ori) - else: - na, nv = _get_next_nodes_undirected(n, sc, n_ori) - nodes_to_add = nodes_to_add + na - if valid_fn_vec is not None: - nodes_to_validate = nodes_to_validate + nv - else: - node_to_add = nodes_to_add + nv - - # Validate nodes. - vs = [_[1] for _ in nodes_to_validate] - valids = valid_fn_vec(vs) - - for nva, valid in zip(nodes_to_validate, valids): - if valid: - nodes_to_add.append(nva) - - new_nodes = [] - for n,v,a in nodes_to_add: - if not G.has_node(v): - new_nodes.append(v) - G.add_edge(n, v, action=a) - - timer.toc(average=True, log_at=1, log_str='src.graph_utils.generate_graph') - return (G) - -def vis_G(G, ax, vertex_color='r', edge_color='b', r=None): - if edge_color is not None: - for e in G.edges(): - XYT = zip(*e) - x = XYT[-3] - y = XYT[-2] - t = XYT[-1] - if r is None or t[0] == r: - ax.plot(x, y, edge_color) - if vertex_color is not None: - XYT = zip(*G.nodes()) - x = XYT[-3] - y = XYT[-2] - t = XYT[-1] - ax.plot(x, y, vertex_color + '.') - -def convert_to_graph_tool(G): - timer = utils.Timer() - timer.tic() - gtG = gt.Graph(directed=G.is_directed()) - gtG.ep['action'] = gtG.new_edge_property('int') - - nodes_list = G.nodes() - nodes_array = np.array(nodes_list) - - nodes_id = np.zeros((nodes_array.shape[0],), dtype=np.int64) - - for i in range(nodes_array.shape[0]): - v = gtG.add_vertex() - nodes_id[i] = int(v) - - # d = {key: value for (key, value) in zip(nodes_list, nodes_id)} - d = dict(itertools.izip(nodes_list, nodes_id)) - - for src, dst, data in G.edges_iter(data=True): - e = gtG.add_edge(d[src], d[dst]) - gtG.ep['action'][e] = data['action'] - nodes_to_id = d - timer.toc(average=True, log_at=1, log_str='src.graph_utils.convert_to_graph_tool') - return gtG, nodes_array, nodes_to_id - - -def _rejection_sampling(rng, sampling_d, target_d, bins, hardness, M): - bin_ind = np.digitize(hardness, bins)-1 - i = 0 - ratio = target_d[bin_ind] / (M*sampling_d[bin_ind]) - while i < ratio.size and rng.rand() > ratio[i]: - i = i+1 - return i - -def heuristic_fn_vec(n1, n2, n_ori, step_size): - # n1 is a vector and n2 is a single point. - dx = (n1[:,0] - n2[0,0])/step_size - dy = (n1[:,1] - n2[0,1])/step_size - dt = n1[:,2] - n2[0,2] - dt = np.mod(dt, n_ori) - dt = np.minimum(dt, n_ori-dt) - - if n_ori == 6: - if dx*dy > 0: - d = np.maximum(np.abs(dx), np.abs(dy)) - else: - d = np.abs(dy-dx) - elif n_ori == 4: - d = np.abs(dx) + np.abs(dy) - - return (d + dt).reshape((-1,1)) - -def get_hardness_distribution(gtG, max_dist, min_dist, rng, trials, bins, nodes, - n_ori, step_size): - heuristic_fn = lambda node_ids, node_id: \ - heuristic_fn_vec(nodes[node_ids, :], nodes[[node_id], :], n_ori, step_size) - num_nodes = gtG.num_vertices() - gt_dists = []; h_dists = []; - for i in range(trials): - end_node_id = rng.choice(num_nodes) - gt_dist = gt.topology.shortest_distance(gt.GraphView(gtG, reversed=True), - source=gtG.vertex(end_node_id), - target=None, max_dist=max_dist) - gt_dist = np.array(gt_dist.get_array()) - ind = np.where(np.logical_and(gt_dist <= max_dist, gt_dist >= min_dist))[0] - gt_dist = gt_dist[ind] - h_dist = heuristic_fn(ind, end_node_id)[:,0] - gt_dists.append(gt_dist) - h_dists.append(h_dist) - gt_dists = np.concatenate(gt_dists) - h_dists = np.concatenate(h_dists) - hardness = 1. - h_dists*1./gt_dists - hist, _ = np.histogram(hardness, bins) - hist = hist.astype(np.float64) - hist = hist / np.sum(hist) - return hist - -def rng_next_goal_rejection_sampling(start_node_ids, batch_size, gtG, rng, - max_dist, min_dist, max_dist_to_compute, - sampling_d, target_d, - nodes, n_ori, step_size, bins, M): - sample_start_nodes = start_node_ids is None - dists = []; pred_maps = []; end_node_ids = []; start_node_ids_ = []; - hardnesss = []; gt_dists = []; - num_nodes = gtG.num_vertices() - for i in range(batch_size): - done = False - while not done: - if sample_start_nodes: - start_node_id = rng.choice(num_nodes) - else: - start_node_id = start_node_ids[i] - - gt_dist = gt.topology.shortest_distance( - gt.GraphView(gtG, reversed=False), source=start_node_id, target=None, - max_dist=max_dist) - gt_dist = np.array(gt_dist.get_array()) - ind = np.where(np.logical_and(gt_dist <= max_dist, gt_dist >= min_dist))[0] - ind = rng.permutation(ind) - gt_dist = gt_dist[ind]*1. - h_dist = heuristic_fn_vec(nodes[ind, :], nodes[[start_node_id], :], - n_ori, step_size)[:,0] - hardness = 1. - h_dist / gt_dist - sampled_ind = _rejection_sampling(rng, sampling_d, target_d, bins, - hardness, M) - if sampled_ind < ind.size: - # print sampled_ind - end_node_id = ind[sampled_ind] - hardness = hardness[sampled_ind] - gt_dist = gt_dist[sampled_ind] - done = True - - # Compute distance from end node to all nodes, to return. - dist, pred_map = gt.topology.shortest_distance( - gt.GraphView(gtG, reversed=True), source=end_node_id, target=None, - max_dist=max_dist_to_compute, pred_map=True) - dist = np.array(dist.get_array()) - pred_map = np.array(pred_map.get_array()) - - hardnesss.append(hardness); dists.append(dist); pred_maps.append(pred_map); - start_node_ids_.append(start_node_id); end_node_ids.append(end_node_id); - gt_dists.append(gt_dist); - paths = None - return start_node_ids_, end_node_ids, dists, pred_maps, paths, hardnesss, gt_dists - - -def rng_next_goal(start_node_ids, batch_size, gtG, rng, max_dist, - max_dist_to_compute, node_room_ids, nodes=None, - compute_path=False, dists_from_start_node=None): - # Compute the distance field from the starting location, and then pick a - # destination in another room if possible otherwise anywhere outside this - # room. - dists = []; pred_maps = []; paths = []; end_node_ids = []; - for i in range(batch_size): - room_id = node_room_ids[start_node_ids[i]] - # Compute distances. - if dists_from_start_node == None: - dist, pred_map = gt.topology.shortest_distance( - gt.GraphView(gtG, reversed=False), source=gtG.vertex(start_node_ids[i]), - target=None, max_dist=max_dist_to_compute, pred_map=True) - dist = np.array(dist.get_array()) - else: - dist = dists_from_start_node[i] - - # Randomly sample nodes which are within max_dist. - near_ids = dist <= max_dist - near_ids = near_ids[:, np.newaxis] - # Check to see if there is a non-negative node which is close enough. - non_same_room_ids = node_room_ids != room_id - non_hallway_ids = node_room_ids != -1 - good1_ids = np.logical_and(near_ids, np.logical_and(non_same_room_ids, non_hallway_ids)) - good2_ids = np.logical_and(near_ids, non_hallway_ids) - good3_ids = near_ids - if np.any(good1_ids): - end_node_id = rng.choice(np.where(good1_ids)[0]) - elif np.any(good2_ids): - end_node_id = rng.choice(np.where(good2_ids)[0]) - elif np.any(good3_ids): - end_node_id = rng.choice(np.where(good3_ids)[0]) - else: - logging.error('Did not find any good nodes.') - - # Compute distance to this new goal for doing distance queries. - dist, pred_map = gt.topology.shortest_distance( - gt.GraphView(gtG, reversed=True), source=gtG.vertex(end_node_id), - target=None, max_dist=max_dist_to_compute, pred_map=True) - dist = np.array(dist.get_array()) - pred_map = np.array(pred_map.get_array()) - - dists.append(dist) - pred_maps.append(pred_map) - end_node_ids.append(end_node_id) - - path = None - if compute_path: - path = get_path_ids(start_node_ids[i], end_node_ids[i], pred_map) - paths.append(path) - - return start_node_ids, end_node_ids, dists, pred_maps, paths - - -def rng_room_to_room(batch_size, gtG, rng, max_dist, max_dist_to_compute, - node_room_ids, nodes=None, compute_path=False): - # Sample one of the rooms, compute the distance field. Pick a destination in - # another room if possible otherwise anywhere outside this room. - dists = []; pred_maps = []; paths = []; start_node_ids = []; end_node_ids = []; - room_ids = np.unique(node_room_ids[node_room_ids[:,0] >= 0, 0]) - for i in range(batch_size): - room_id = rng.choice(room_ids) - end_node_id = rng.choice(np.where(node_room_ids[:,0] == room_id)[0]) - end_node_ids.append(end_node_id) - - # Compute distances. - dist, pred_map = gt.topology.shortest_distance( - gt.GraphView(gtG, reversed=True), source=gtG.vertex(end_node_id), - target=None, max_dist=max_dist_to_compute, pred_map=True) - dist = np.array(dist.get_array()) - pred_map = np.array(pred_map.get_array()) - dists.append(dist) - pred_maps.append(pred_map) - - # Randomly sample nodes which are within max_dist. - near_ids = dist <= max_dist - near_ids = near_ids[:, np.newaxis] - - # Check to see if there is a non-negative node which is close enough. - non_same_room_ids = node_room_ids != room_id - non_hallway_ids = node_room_ids != -1 - good1_ids = np.logical_and(near_ids, np.logical_and(non_same_room_ids, non_hallway_ids)) - good2_ids = np.logical_and(near_ids, non_hallway_ids) - good3_ids = near_ids - if np.any(good1_ids): - start_node_id = rng.choice(np.where(good1_ids)[0]) - elif np.any(good2_ids): - start_node_id = rng.choice(np.where(good2_ids)[0]) - elif np.any(good3_ids): - start_node_id = rng.choice(np.where(good3_ids)[0]) - else: - logging.error('Did not find any good nodes.') - - start_node_ids.append(start_node_id) - - path = None - if compute_path: - path = get_path_ids(start_node_ids[i], end_node_ids[i], pred_map) - paths.append(path) - - return start_node_ids, end_node_ids, dists, pred_maps, paths - - -def rng_target_dist_field(batch_size, gtG, rng, max_dist, max_dist_to_compute, - nodes=None, compute_path=False): - # Sample a single node, compute distance to all nodes less than max_dist, - # sample nodes which are a particular distance away. - dists = []; pred_maps = []; paths = []; start_node_ids = [] - end_node_ids = rng.choice(gtG.num_vertices(), size=(batch_size,), - replace=False).tolist() - - for i in range(batch_size): - dist, pred_map = gt.topology.shortest_distance( - gt.GraphView(gtG, reversed=True), source=gtG.vertex(end_node_ids[i]), - target=None, max_dist=max_dist_to_compute, pred_map=True) - dist = np.array(dist.get_array()) - pred_map = np.array(pred_map.get_array()) - dists.append(dist) - pred_maps.append(pred_map) - - # Randomly sample nodes which are withing max_dist - near_ids = np.where(dist <= max_dist)[0] - start_node_id = rng.choice(near_ids, size=(1,), replace=False)[0] - start_node_ids.append(start_node_id) - - path = None - if compute_path: - path = get_path_ids(start_node_ids[i], end_node_ids[i], pred_map) - paths.append(path) - - return start_node_ids, end_node_ids, dists, pred_maps, paths diff --git a/research/cognitive_mapping_and_planning/src/map_utils.py b/research/cognitive_mapping_and_planning/src/map_utils.py deleted file mode 100644 index 6756131a9..000000000 --- a/research/cognitive_mapping_and_planning/src/map_utils.py +++ /dev/null @@ -1,245 +0,0 @@ -# Copyright 2016 The TensorFlow Authors All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -"""Various function to compute the ground truth map for training etc. -""" -import copy -import skimage.morphology -import logging -import numpy as np -import scipy.ndimage -import matplotlib.pyplot as plt -import PIL - -import src.utils as utils -import cv2 - -def _get_xy_bounding_box(vertex, padding): - """Returns the xy bounding box of the environment.""" - min_ = np.floor(np.min(vertex[:, :2], axis=0) - padding).astype(np.int) - max_ = np.ceil(np.max(vertex[:, :2], axis=0) + padding).astype(np.int) - return min_, max_ - -def _project_to_map(map, vertex, wt=None, ignore_points_outside_map=False): - """Projects points to map, returns how many points are present at each - location.""" - num_points = np.zeros((map.size[1], map.size[0])) - vertex_ = vertex[:, :2] - map.origin - vertex_ = np.round(vertex_ / map.resolution).astype(np.int) - if ignore_points_outside_map: - good_ind = np.all(np.array([vertex_[:,1] >= 0, vertex_[:,1] < map.size[1], - vertex_[:,0] >= 0, vertex_[:,0] < map.size[0]]), - axis=0) - vertex_ = vertex_[good_ind, :] - if wt is not None: - wt = wt[good_ind, :] - if wt is None: - np.add.at(num_points, (vertex_[:, 1], vertex_[:, 0]), 1) - else: - assert(wt.shape[0] == vertex.shape[0]), \ - 'number of weights should be same as vertices.' - np.add.at(num_points, (vertex_[:, 1], vertex_[:, 0]), wt) - return num_points - -def make_map(padding, resolution, vertex=None, sc=1.): - """Returns a map structure.""" - min_, max_ = _get_xy_bounding_box(vertex*sc, padding=padding) - sz = np.ceil((max_ - min_ + 1) / resolution).astype(np.int32) - max_ = min_ + sz * resolution - 1 - map = utils.Foo(origin=min_, size=sz, max=max_, resolution=resolution, - padding=padding) - return map - -def _fill_holes(img, thresh): - """Fills holes less than thresh area (assumes 4 connectivity when computing - hole area.""" - l, n = scipy.ndimage.label(np.logical_not(img)) - img_ = img == True - cnts = np.bincount(l.reshape(-1)) - for i, cnt in enumerate(cnts): - if cnt < thresh: - l[l == i] = -1 - img_[l == -1] = True - return img_ - -def compute_traversibility(map, robot_base, robot_height, robot_radius, - valid_min, valid_max, num_point_threshold, shapess, - sc=100., n_samples_per_face=200): - """Returns a bit map with pixels that are traversible or not as long as the - robot center is inside this volume we are good colisions can be detected by - doing a line search on things, or walking from current location to final - location in the bitmap, or doing bwlabel on the traversibility map.""" - - tt = utils.Timer() - tt.tic() - num_obstcale_points = np.zeros((map.size[1], map.size[0])) - num_points = np.zeros((map.size[1], map.size[0])) - - for i, shapes in enumerate(shapess): - for j in range(shapes.get_number_of_meshes()): - p, face_areas, face_idx = shapes.sample_points_on_face_of_shape( - j, n_samples_per_face, sc) - wt = face_areas[face_idx]/n_samples_per_face - - ind = np.all(np.concatenate( - (p[:, [2]] > robot_base, - p[:, [2]] < robot_base + robot_height), axis=1),axis=1) - num_obstcale_points += _project_to_map(map, p[ind, :], wt[ind]) - - ind = np.all(np.concatenate( - (p[:, [2]] > valid_min, - p[:, [2]] < valid_max), axis=1),axis=1) - num_points += _project_to_map(map, p[ind, :], wt[ind]) - - selem = skimage.morphology.disk(robot_radius / map.resolution) - obstacle_free = skimage.morphology.binary_dilation( - _fill_holes(num_obstcale_points > num_point_threshold, 20), selem) != True - valid_space = _fill_holes(num_points > num_point_threshold, 20) - traversible = np.all(np.concatenate((obstacle_free[...,np.newaxis], - valid_space[...,np.newaxis]), axis=2), - axis=2) - # plt.imshow(np.concatenate((obstacle_free, valid_space, traversible), axis=1)) - # plt.show() - - map_out = copy.deepcopy(map) - map_out.num_obstcale_points = num_obstcale_points - map_out.num_points = num_points - map_out.traversible = traversible - map_out.obstacle_free = obstacle_free - map_out.valid_space = valid_space - tt.toc(log_at=1, log_str='src.map_utils.compute_traversibility: ') - return map_out - - -def resize_maps(map, map_scales, resize_method): - scaled_maps = [] - for i, sc in enumerate(map_scales): - if resize_method == 'antialiasing': - # Resize using open cv so that we can compute the size. - # Use PIL resize to use anti aliasing feature. - map_ = cv2.resize(map*1, None, None, fx=sc, fy=sc, interpolation=cv2.INTER_LINEAR) - w = map_.shape[1]; h = map_.shape[0] - - map_img = PIL.Image.fromarray((map*255).astype(np.uint8)) - map__img = map_img.resize((w,h), PIL.Image.ANTIALIAS) - map_ = np.asarray(map__img).astype(np.float32) - map_ = map_/255. - map_ = np.minimum(map_, 1.0) - map_ = np.maximum(map_, 0.0) - elif resize_method == 'linear_noantialiasing': - map_ = cv2.resize(map*1, None, None, fx=sc, fy=sc, interpolation=cv2.INTER_LINEAR) - else: - logging.error('Unknown resizing method') - scaled_maps.append(map_) - return scaled_maps - - -def pick_largest_cc(traversible): - out = scipy.ndimage.label(traversible)[0] - cnt = np.bincount(out.reshape(-1))[1:] - return out == np.argmax(cnt) + 1 - -def get_graph_origin_loc(rng, traversible): - """Erode the traversibility mask so that we get points in the bulk of the - graph, and not end up with a situation where the graph is localized in the - corner of a cramped room. Output Locs is in the coordinate frame of the - map.""" - - aa = pick_largest_cc(skimage.morphology.binary_erosion(traversible == True, - selem=np.ones((15,15)))) - y, x = np.where(aa > 0) - ind = rng.choice(y.size) - locs = np.array([x[ind], y[ind]]) - locs = locs + rng.rand(*(locs.shape)) - 0.5 - return locs - - -def generate_egocentric_maps(scaled_maps, map_scales, map_crop_sizes, loc, - x_axis, y_axis, theta): - maps = [] - for i, (map_, sc, map_crop_size) in enumerate(zip(scaled_maps, map_scales, map_crop_sizes)): - maps_i = np.array(get_map_to_predict(loc*sc, x_axis, y_axis, map_, - map_crop_size, - interpolation=cv2.INTER_LINEAR)[0]) - maps_i[np.isnan(maps_i)] = 0 - maps.append(maps_i) - return maps - -def generate_goal_images(map_scales, map_crop_sizes, n_ori, goal_dist, - goal_theta, rel_goal_orientation): - goal_dist = goal_dist[:,0] - goal_theta = goal_theta[:,0] - rel_goal_orientation = rel_goal_orientation[:,0] - - goals = []; - # Generate the map images. - for i, (sc, map_crop_size) in enumerate(zip(map_scales, map_crop_sizes)): - goal_i = np.zeros((goal_dist.shape[0], map_crop_size, map_crop_size, n_ori), - dtype=np.float32) - x = goal_dist*np.cos(goal_theta)*sc + (map_crop_size-1.)/2. - y = goal_dist*np.sin(goal_theta)*sc + (map_crop_size-1.)/2. - - for j in range(goal_dist.shape[0]): - gc = rel_goal_orientation[j] - x0 = np.floor(x[j]).astype(np.int32); x1 = x0 + 1; - y0 = np.floor(y[j]).astype(np.int32); y1 = y0 + 1; - if x0 >= 0 and x0 <= map_crop_size-1: - if y0 >= 0 and y0 <= map_crop_size-1: - goal_i[j, y0, x0, gc] = (x1-x[j])*(y1-y[j]) - if y1 >= 0 and y1 <= map_crop_size-1: - goal_i[j, y1, x0, gc] = (x1-x[j])*(y[j]-y0) - - if x1 >= 0 and x1 <= map_crop_size-1: - if y0 >= 0 and y0 <= map_crop_size-1: - goal_i[j, y0, x1, gc] = (x[j]-x0)*(y1-y[j]) - if y1 >= 0 and y1 <= map_crop_size-1: - goal_i[j, y1, x1, gc] = (x[j]-x0)*(y[j]-y0) - - goals.append(goal_i) - return goals - -def get_map_to_predict(src_locs, src_x_axiss, src_y_axiss, map, map_size, - interpolation=cv2.INTER_LINEAR): - fss = [] - valids = [] - - center = (map_size-1.0)/2.0 - dst_theta = np.pi/2.0 - dst_loc = np.array([center, center]) - dst_x_axis = np.array([np.cos(dst_theta), np.sin(dst_theta)]) - dst_y_axis = np.array([np.cos(dst_theta+np.pi/2), np.sin(dst_theta+np.pi/2)]) - - def compute_points(center, x_axis, y_axis): - points = np.zeros((3,2),dtype=np.float32) - points[0,:] = center - points[1,:] = center + x_axis - points[2,:] = center + y_axis - return points - - dst_points = compute_points(dst_loc, dst_x_axis, dst_y_axis) - for i in range(src_locs.shape[0]): - src_loc = src_locs[i,:] - src_x_axis = src_x_axiss[i,:] - src_y_axis = src_y_axiss[i,:] - src_points = compute_points(src_loc, src_x_axis, src_y_axis) - M = cv2.getAffineTransform(src_points, dst_points) - - fs = cv2.warpAffine(map, M, (map_size, map_size), None, flags=interpolation, - borderValue=np.NaN) - valid = np.invert(np.isnan(fs)) - valids.append(valid) - fss.append(fs) - return fss, valids - diff --git a/research/cognitive_mapping_and_planning/src/rotation_utils.py b/research/cognitive_mapping_and_planning/src/rotation_utils.py deleted file mode 100644 index 8d6d4f3cb..000000000 --- a/research/cognitive_mapping_and_planning/src/rotation_utils.py +++ /dev/null @@ -1,73 +0,0 @@ -# Copyright 2016 The TensorFlow Authors All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -"""Utilities for generating and applying rotation matrices. -""" -import numpy as np - -ANGLE_EPS = 0.001 - - -def normalize(v): - return v / np.linalg.norm(v) - - -def get_r_matrix(ax_, angle): - ax = normalize(ax_) - if np.abs(angle) > ANGLE_EPS: - S_hat = np.array( - [[0.0, -ax[2], ax[1]], [ax[2], 0.0, -ax[0]], [-ax[1], ax[0], 0.0]], - dtype=np.float32) - R = np.eye(3) + np.sin(angle)*S_hat + \ - (1-np.cos(angle))*(np.linalg.matrix_power(S_hat, 2)) - else: - R = np.eye(3) - return R - - -def r_between(v_from_, v_to_): - v_from = normalize(v_from_) - v_to = normalize(v_to_) - ax = normalize(np.cross(v_from, v_to)) - angle = np.arccos(np.dot(v_from, v_to)) - return get_r_matrix(ax, angle) - - -def rotate_camera_to_point_at(up_from, lookat_from, up_to, lookat_to): - inputs = [up_from, lookat_from, up_to, lookat_to] - for i in range(4): - inputs[i] = normalize(np.array(inputs[i]).reshape((-1,))) - up_from, lookat_from, up_to, lookat_to = inputs - r1 = r_between(lookat_from, lookat_to) - - new_x = np.dot(r1, np.array([1, 0, 0]).reshape((-1, 1))).reshape((-1)) - to_x = normalize(np.cross(lookat_to, up_to)) - angle = np.arccos(np.dot(new_x, to_x)) - if angle > ANGLE_EPS: - if angle < np.pi - ANGLE_EPS: - ax = normalize(np.cross(new_x, to_x)) - flip = np.dot(lookat_to, ax) - if flip > 0: - r2 = get_r_matrix(lookat_to, angle) - elif flip < 0: - r2 = get_r_matrix(lookat_to, -1. * angle) - else: - # Angle of rotation is too close to 180 degrees, direction of rotation - # does not matter. - r2 = get_r_matrix(lookat_to, angle) - else: - r2 = np.eye(3) - return np.dot(r2, r1) - diff --git a/research/cognitive_mapping_and_planning/src/utils.py b/research/cognitive_mapping_and_planning/src/utils.py deleted file mode 100644 index a1b9e4426..000000000 --- a/research/cognitive_mapping_and_planning/src/utils.py +++ /dev/null @@ -1,168 +0,0 @@ -# Copyright 2016 The TensorFlow Authors All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -r"""Generaly Utilities. -""" - -import numpy as np, cPickle, os, time -from six.moves import xrange -import src.file_utils as fu -import logging - -class Timer(): - def __init__(self): - self.calls = 0. - self.start_time = 0. - self.time_per_call = 0. - self.total_time = 0. - self.last_log_time = 0. - - def tic(self): - self.start_time = time.time() - - def toc(self, average=True, log_at=-1, log_str='', type='calls'): - if self.start_time == 0: - logging.error('Timer not started by calling tic().') - t = time.time() - diff = time.time() - self.start_time - self.total_time += diff - self.calls += 1. - self.time_per_call = self.total_time/self.calls - - if type == 'calls' and log_at > 0 and np.mod(self.calls, log_at) == 0: - _ = [] - logging.info('%s: %f seconds.', log_str, self.time_per_call) - elif type == 'time' and log_at > 0 and t - self.last_log_time >= log_at: - _ = [] - logging.info('%s: %f seconds.', log_str, self.time_per_call) - self.last_log_time = t - - if average: - return self.time_per_call - else: - return diff - -class Foo(object): - def __init__(self, **kwargs): - self.__dict__.update(kwargs) - def __str__(self): - str_ = '' - for v in vars(self).keys(): - a = getattr(self, v) - if True: #isinstance(v, object): - str__ = str(a) - str__ = str__.replace('\n', '\n ') - else: - str__ = str(a) - str_ += '{:s}: {:s}'.format(v, str__) - str_ += '\n' - return str_ - - -def dict_equal(dict1, dict2): - assert(set(dict1.keys()) == set(dict2.keys())), "Sets of keys between 2 dictionaries are different." - for k in dict1.keys(): - assert(type(dict1[k]) == type(dict2[k])), "Type of key '{:s}' if different.".format(k) - if type(dict1[k]) == np.ndarray: - assert(dict1[k].dtype == dict2[k].dtype), "Numpy Type of key '{:s}' if different.".format(k) - assert(np.allclose(dict1[k], dict2[k])), "Value for key '{:s}' do not match.".format(k) - else: - assert(dict1[k] == dict2[k]), "Value for key '{:s}' do not match.".format(k) - return True - -def subplot(plt, Y_X, sz_y_sz_x = (10, 10)): - Y,X = Y_X - sz_y, sz_x = sz_y_sz_x - plt.rcParams['figure.figsize'] = (X*sz_x, Y*sz_y) - fig, axes = plt.subplots(Y, X) - plt.subplots_adjust(wspace=0.1, hspace=0.1) - return fig, axes - -def tic_toc_print(interval, string): - global tic_toc_print_time_old - if 'tic_toc_print_time_old' not in globals(): - tic_toc_print_time_old = time.time() - print(string) - else: - new_time = time.time() - if new_time - tic_toc_print_time_old > interval: - tic_toc_print_time_old = new_time; - print(string) - -def mkdir_if_missing(output_dir): - if not fu.exists(output_dir): - fu.makedirs(output_dir) - -def save_variables(pickle_file_name, var, info, overwrite = False): - if fu.exists(pickle_file_name) and overwrite == False: - raise Exception('{:s} exists and over write is false.'.format(pickle_file_name)) - # Construct the dictionary - assert(type(var) == list); assert(type(info) == list); - d = {} - for i in xrange(len(var)): - d[info[i]] = var[i] - with fu.fopen(pickle_file_name, 'w') as f: - cPickle.dump(d, f, cPickle.HIGHEST_PROTOCOL) - -def load_variables(pickle_file_name): - if fu.exists(pickle_file_name): - with fu.fopen(pickle_file_name, 'r') as f: - d = cPickle.load(f) - return d - else: - raise Exception('{:s} does not exists.'.format(pickle_file_name)) - -def voc_ap(rec, prec): - rec = rec.reshape((-1,1)) - prec = prec.reshape((-1,1)) - z = np.zeros((1,1)) - o = np.ones((1,1)) - mrec = np.vstack((z, rec, o)) - mpre = np.vstack((z, prec, z)) - for i in range(len(mpre)-2, -1, -1): - mpre[i] = max(mpre[i], mpre[i+1]) - - I = np.where(mrec[1:] != mrec[0:-1])[0]+1; - ap = 0; - for i in I: - ap = ap + (mrec[i] - mrec[i-1])*mpre[i]; - return ap - -def tight_imshow_figure(plt, figsize=None): - fig = plt.figure(figsize=figsize) - ax = plt.Axes(fig, [0,0,1,1]) - ax.set_axis_off() - fig.add_axes(ax) - return fig, ax - -def calc_pr(gt, out, wt=None): - if wt is None: - wt = np.ones((gt.size,1)) - - gt = gt.astype(np.float64).reshape((-1,1)) - wt = wt.astype(np.float64).reshape((-1,1)) - out = out.astype(np.float64).reshape((-1,1)) - - gt = gt*wt - tog = np.concatenate([gt, wt, out], axis=1)*1. - ind = np.argsort(tog[:,2], axis=0)[::-1] - tog = tog[ind,:] - cumsumsortgt = np.cumsum(tog[:,0]) - cumsumsortwt = np.cumsum(tog[:,1]) - prec = cumsumsortgt / cumsumsortwt - rec = cumsumsortgt / np.sum(tog[:,0]) - - ap = voc_ap(rec, prec) - return ap, rec, prec diff --git a/research/cognitive_mapping_and_planning/tfcode/__init__.py b/research/cognitive_mapping_and_planning/tfcode/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/research/cognitive_mapping_and_planning/tfcode/cmp.py b/research/cognitive_mapping_and_planning/tfcode/cmp.py deleted file mode 100644 index 228ef90fd..000000000 --- a/research/cognitive_mapping_and_planning/tfcode/cmp.py +++ /dev/null @@ -1,553 +0,0 @@ -# Copyright 2016 The TensorFlow Authors All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -"""Code for setting up the network for CMP. - -Sets up the mapper and the planner. -""" - -import sys, os, numpy as np -import matplotlib.pyplot as plt -import copy -import argparse, pprint -import time - - -import tensorflow as tf - -from tensorflow.contrib import slim -from tensorflow.contrib.slim import arg_scope - -import logging -from tensorflow.python.platform import app -from tensorflow.python.platform import flags -from src import utils -import src.file_utils as fu -import tfcode.nav_utils as nu -import tfcode.cmp_utils as cu -import tfcode.cmp_summary as cmp_s -from tfcode import tf_utils - -value_iteration_network = cu.value_iteration_network -rotate_preds = cu.rotate_preds -deconv = cu.deconv -get_visual_frustum = cu.get_visual_frustum -fr_v2 = cu.fr_v2 - -setup_train_step_kwargs = nu.default_train_step_kwargs -compute_losses_multi_or = nu.compute_losses_multi_or - -get_repr_from_image = nu.get_repr_from_image - -_save_d_at_t = nu.save_d_at_t -_save_all = nu.save_all -_eval_ap = nu.eval_ap -_eval_dist = nu.eval_dist -_plot_trajectories = nu.plot_trajectories - -_vis_readout_maps = cmp_s._vis_readout_maps -_vis = cmp_s._vis -_summary_vis = cmp_s._summary_vis -_summary_readout_maps = cmp_s._summary_readout_maps -_add_summaries = cmp_s._add_summaries - -def _inputs(problem): - # Set up inputs. - with tf.name_scope('inputs'): - inputs = [] - inputs.append(('orig_maps', tf.float32, - (problem.batch_size, 1, None, None, 1))) - inputs.append(('goal_loc', tf.float32, - (problem.batch_size, problem.num_goals, 2))) - common_input_data, _ = tf_utils.setup_inputs(inputs) - - inputs = [] - if problem.input_type == 'vision': - # Multiple images from an array of cameras. - inputs.append(('imgs', tf.float32, - (problem.batch_size, None, len(problem.aux_delta_thetas)+1, - problem.img_height, problem.img_width, - problem.img_channels))) - elif problem.input_type == 'analytical_counts': - for i in range(len(problem.map_crop_sizes)): - inputs.append(('analytical_counts_{:d}'.format(i), tf.float32, - (problem.batch_size, None, problem.map_crop_sizes[i], - problem.map_crop_sizes[i], problem.map_channels))) - - if problem.outputs.readout_maps: - for i in range(len(problem.readout_maps_crop_sizes)): - inputs.append(('readout_maps_{:d}'.format(i), tf.float32, - (problem.batch_size, None, - problem.readout_maps_crop_sizes[i], - problem.readout_maps_crop_sizes[i], - problem.readout_maps_channels))) - - for i in range(len(problem.map_crop_sizes)): - inputs.append(('ego_goal_imgs_{:d}'.format(i), tf.float32, - (problem.batch_size, None, problem.map_crop_sizes[i], - problem.map_crop_sizes[i], problem.goal_channels))) - for s in ['sum_num', 'sum_denom', 'max_denom']: - inputs.append(('running_'+s+'_{:d}'.format(i), tf.float32, - (problem.batch_size, 1, problem.map_crop_sizes[i], - problem.map_crop_sizes[i], problem.map_channels))) - - inputs.append(('incremental_locs', tf.float32, - (problem.batch_size, None, 2))) - inputs.append(('incremental_thetas', tf.float32, - (problem.batch_size, None, 1))) - inputs.append(('step_number', tf.int32, (1, None, 1))) - inputs.append(('node_ids', tf.int32, (problem.batch_size, None, - problem.node_ids_dim))) - inputs.append(('perturbs', tf.float32, (problem.batch_size, None, - problem.perturbs_dim))) - - # For plotting result plots - inputs.append(('loc_on_map', tf.float32, (problem.batch_size, None, 2))) - inputs.append(('gt_dist_to_goal', tf.float32, (problem.batch_size, None, 1))) - - step_input_data, _ = tf_utils.setup_inputs(inputs) - - inputs = [] - inputs.append(('action', tf.int32, (problem.batch_size, None, problem.num_actions))) - train_data, _ = tf_utils.setup_inputs(inputs) - train_data.update(step_input_data) - train_data.update(common_input_data) - return common_input_data, step_input_data, train_data - -def readout_general(multi_scale_belief, num_neurons, strides, layers_per_block, - kernel_size, batch_norm_is_training_op, wt_decay): - multi_scale_belief = tf.stop_gradient(multi_scale_belief) - with tf.variable_scope('readout_maps_deconv'): - x, outs = deconv(multi_scale_belief, batch_norm_is_training_op, - wt_decay=wt_decay, neurons=num_neurons, strides=strides, - layers_per_block=layers_per_block, kernel_size=kernel_size, - conv_fn=slim.conv2d_transpose, offset=0, - name='readout_maps_deconv') - probs = tf.sigmoid(x) - return x, probs - - -def running_combine(fss_logits, confs_probs, incremental_locs, - incremental_thetas, previous_sum_num, previous_sum_denom, - previous_max_denom, map_size, num_steps): - # fss_logits is B x N x H x W x C - # confs_logits is B x N x H x W x C - # incremental_locs is B x N x 2 - # incremental_thetas is B x N x 1 - # previous_sum_num etc is B x 1 x H x W x C - - with tf.name_scope('combine_{:d}'.format(num_steps)): - running_sum_nums_ = []; running_sum_denoms_ = []; - running_max_denoms_ = []; - - fss_logits_ = tf.unstack(fss_logits, axis=1, num=num_steps) - confs_probs_ = tf.unstack(confs_probs, axis=1, num=num_steps) - incremental_locs_ = tf.unstack(incremental_locs, axis=1, num=num_steps) - incremental_thetas_ = tf.unstack(incremental_thetas, axis=1, num=num_steps) - running_sum_num = tf.unstack(previous_sum_num, axis=1, num=1)[0] - running_sum_denom = tf.unstack(previous_sum_denom, axis=1, num=1)[0] - running_max_denom = tf.unstack(previous_max_denom, axis=1, num=1)[0] - - for i in range(num_steps): - # Rotate the previous running_num and running_denom - running_sum_num, running_sum_denom, running_max_denom = rotate_preds( - incremental_locs_[i], incremental_thetas_[i], map_size, - [running_sum_num, running_sum_denom, running_max_denom], - output_valid_mask=False)[0] - # print i, num_steps, running_sum_num.get_shape().as_list() - running_sum_num = running_sum_num + fss_logits_[i] * confs_probs_[i] - running_sum_denom = running_sum_denom + confs_probs_[i] - running_max_denom = tf.maximum(running_max_denom, confs_probs_[i]) - running_sum_nums_.append(running_sum_num) - running_sum_denoms_.append(running_sum_denom) - running_max_denoms_.append(running_max_denom) - - running_sum_nums = tf.stack(running_sum_nums_, axis=1) - running_sum_denoms = tf.stack(running_sum_denoms_, axis=1) - running_max_denoms = tf.stack(running_max_denoms_, axis=1) - return running_sum_nums, running_sum_denoms, running_max_denoms - -def get_map_from_images(imgs, mapper_arch, task_params, freeze_conv, wt_decay, - is_training, batch_norm_is_training_op, num_maps, - split_maps=True): - # Hit image with a resnet. - n_views = len(task_params.aux_delta_thetas) + 1 - out = utils.Foo() - - images_reshaped = tf.reshape(imgs, - shape=[-1, task_params.img_height, - task_params.img_width, - task_params.img_channels], name='re_image') - - x, out.vars_to_restore = get_repr_from_image( - images_reshaped, task_params.modalities, task_params.data_augment, - mapper_arch.encoder, freeze_conv, wt_decay, is_training) - - # Reshape into nice things so that these can be accumulated over time steps - # for faster backprop. - sh_before = x.get_shape().as_list() - out.encoder_output = tf.reshape(x, shape=[task_params.batch_size, -1, n_views] + sh_before[1:]) - x = tf.reshape(out.encoder_output, shape=[-1] + sh_before[1:]) - - # Add a layer to reduce dimensions for a fc layer. - if mapper_arch.dim_reduce_neurons > 0: - ks = 1; neurons = mapper_arch.dim_reduce_neurons; - init_var = np.sqrt(2.0/(ks**2)/neurons) - batch_norm_param = mapper_arch.batch_norm_param - batch_norm_param['is_training'] = batch_norm_is_training_op - out.conv_feat = slim.conv2d(x, neurons, kernel_size=ks, stride=1, - normalizer_fn=slim.batch_norm, normalizer_params=batch_norm_param, - padding='SAME', scope='dim_reduce', - weights_regularizer=slim.l2_regularizer(wt_decay), - weights_initializer=tf.random_normal_initializer(stddev=init_var)) - reshape_conv_feat = slim.flatten(out.conv_feat) - sh = reshape_conv_feat.get_shape().as_list() - out.reshape_conv_feat = tf.reshape(reshape_conv_feat, shape=[-1, sh[1]*n_views]) - - with tf.variable_scope('fc'): - # Fully connected layers to compute the representation in top-view space. - fc_batch_norm_param = {'center': True, 'scale': True, - 'activation_fn':tf.nn.relu, - 'is_training': batch_norm_is_training_op} - f = out.reshape_conv_feat - out_neurons = (mapper_arch.fc_out_size**2)*mapper_arch.fc_out_neurons - neurons = mapper_arch.fc_neurons + [out_neurons] - f, _ = tf_utils.fc_network(f, neurons=neurons, wt_decay=wt_decay, - name='fc', offset=0, - batch_norm_param=fc_batch_norm_param, - is_training=is_training, - dropout_ratio=mapper_arch.fc_dropout) - f = tf.reshape(f, shape=[-1, mapper_arch.fc_out_size, - mapper_arch.fc_out_size, - mapper_arch.fc_out_neurons], name='re_fc') - - # Use pool5 to predict the free space map via deconv layers. - with tf.variable_scope('deconv'): - x, outs = deconv(f, batch_norm_is_training_op, wt_decay=wt_decay, - neurons=mapper_arch.deconv_neurons, - strides=mapper_arch.deconv_strides, - layers_per_block=mapper_arch.deconv_layers_per_block, - kernel_size=mapper_arch.deconv_kernel_size, - conv_fn=slim.conv2d_transpose, offset=0, name='deconv') - - # Reshape x the right way. - sh = x.get_shape().as_list() - x = tf.reshape(x, shape=[task_params.batch_size, -1] + sh[1:]) - out.deconv_output = x - - # Separate out the map and the confidence predictions, pass the confidence - # through a sigmoid. - if split_maps: - with tf.name_scope('split'): - out_all = tf.split(value=x, axis=4, num_or_size_splits=2*num_maps) - out.fss_logits = out_all[:num_maps] - out.confs_logits = out_all[num_maps:] - with tf.name_scope('sigmoid'): - out.confs_probs = [tf.nn.sigmoid(x) for x in out.confs_logits] - return out - -def setup_to_run(m, args, is_training, batch_norm_is_training, summary_mode): - assert(args.arch.multi_scale), 'removed support for old single scale code.' - # Set up the model. - tf.set_random_seed(args.solver.seed) - task_params = args.navtask.task_params - - batch_norm_is_training_op = \ - tf.placeholder_with_default(batch_norm_is_training, shape=[], - name='batch_norm_is_training_op') - - # Setup the inputs - m.input_tensors = {} - m.train_ops = {} - m.input_tensors['common'], m.input_tensors['step'], m.input_tensors['train'] = \ - _inputs(task_params) - - m.init_fn = None - - if task_params.input_type == 'vision': - m.vision_ops = get_map_from_images( - m.input_tensors['step']['imgs'], args.mapper_arch, - task_params, args.solver.freeze_conv, - args.solver.wt_decay, is_training, batch_norm_is_training_op, - num_maps=len(task_params.map_crop_sizes)) - - # Load variables from snapshot if needed. - if args.solver.pretrained_path is not None: - m.init_fn = slim.assign_from_checkpoint_fn(args.solver.pretrained_path, - m.vision_ops.vars_to_restore) - - # Set up caching of vision features if needed. - if args.solver.freeze_conv: - m.train_ops['step_data_cache'] = [m.vision_ops.encoder_output] - else: - m.train_ops['step_data_cache'] = [] - - # Set up blobs that are needed for the computation in rest of the graph. - m.ego_map_ops = m.vision_ops.fss_logits - m.coverage_ops = m.vision_ops.confs_probs - - # Zero pad these to make them same size as what the planner expects. - for i in range(len(m.ego_map_ops)): - if args.mapper_arch.pad_map_with_zeros_each[i] > 0: - paddings = np.zeros((5,2), dtype=np.int32) - paddings[2:4,:] = args.mapper_arch.pad_map_with_zeros_each[i] - paddings_op = tf.constant(paddings, dtype=tf.int32) - m.ego_map_ops[i] = tf.pad(m.ego_map_ops[i], paddings=paddings_op) - m.coverage_ops[i] = tf.pad(m.coverage_ops[i], paddings=paddings_op) - - elif task_params.input_type == 'analytical_counts': - m.ego_map_ops = []; m.coverage_ops = [] - for i in range(len(task_params.map_crop_sizes)): - ego_map_op = m.input_tensors['step']['analytical_counts_{:d}'.format(i)] - coverage_op = tf.cast(tf.greater_equal( - tf.reduce_max(ego_map_op, reduction_indices=[4], - keep_dims=True), 1), tf.float32) - coverage_op = tf.ones_like(ego_map_op) * coverage_op - m.ego_map_ops.append(ego_map_op) - m.coverage_ops.append(coverage_op) - m.train_ops['step_data_cache'] = [] - - num_steps = task_params.num_steps - num_goals = task_params.num_goals - - map_crop_size_ops = [] - for map_crop_size in task_params.map_crop_sizes: - map_crop_size_ops.append(tf.constant(map_crop_size, dtype=tf.int32, shape=(2,))) - - with tf.name_scope('check_size'): - is_single_step = tf.equal(tf.unstack(tf.shape(m.ego_map_ops[0]), num=5)[1], 1) - - fr_ops = []; value_ops = []; - fr_intermediate_ops = []; value_intermediate_ops = []; - crop_value_ops = []; - resize_crop_value_ops = []; - confs = []; occupancys = []; - - previous_value_op = None - updated_state = []; state_names = []; - - for i in range(len(task_params.map_crop_sizes)): - map_crop_size = task_params.map_crop_sizes[i] - with tf.variable_scope('scale_{:d}'.format(i)): - # Accumulate the map. - fn = lambda ns: running_combine( - m.ego_map_ops[i], - m.coverage_ops[i], - m.input_tensors['step']['incremental_locs'] * task_params.map_scales[i], - m.input_tensors['step']['incremental_thetas'], - m.input_tensors['step']['running_sum_num_{:d}'.format(i)], - m.input_tensors['step']['running_sum_denom_{:d}'.format(i)], - m.input_tensors['step']['running_max_denom_{:d}'.format(i)], - map_crop_size, ns) - - running_sum_num, running_sum_denom, running_max_denom = \ - tf.cond(is_single_step, lambda: fn(1), lambda: fn(num_steps*num_goals)) - updated_state += [running_sum_num, running_sum_denom, running_max_denom] - state_names += ['running_sum_num_{:d}'.format(i), - 'running_sum_denom_{:d}'.format(i), - 'running_max_denom_{:d}'.format(i)] - - # Concat the accumulated map and goal - occupancy = running_sum_num / tf.maximum(running_sum_denom, 0.001) - conf = running_max_denom - # print occupancy.get_shape().as_list() - - # Concat occupancy, how much occupied and goal. - with tf.name_scope('concat'): - sh = [-1, map_crop_size, map_crop_size, task_params.map_channels] - occupancy = tf.reshape(occupancy, shape=sh) - conf = tf.reshape(conf, shape=sh) - - sh = [-1, map_crop_size, map_crop_size, task_params.goal_channels] - goal = tf.reshape(m.input_tensors['step']['ego_goal_imgs_{:d}'.format(i)], shape=sh) - to_concat = [occupancy, conf, goal] - - if previous_value_op is not None: - to_concat.append(previous_value_op) - - x = tf.concat(to_concat, 3) - - # Pass the map, previous rewards and the goal through a few convolutional - # layers to get fR. - fr_op, fr_intermediate_op = fr_v2( - x, output_neurons=args.arch.fr_neurons, - inside_neurons=args.arch.fr_inside_neurons, - is_training=batch_norm_is_training_op, name='fr', - wt_decay=args.solver.wt_decay, stride=args.arch.fr_stride) - - # Do Value Iteration on the fR - if args.arch.vin_num_iters > 0: - value_op, value_intermediate_op = value_iteration_network( - fr_op, num_iters=args.arch.vin_num_iters, - val_neurons=args.arch.vin_val_neurons, - action_neurons=args.arch.vin_action_neurons, - kernel_size=args.arch.vin_ks, share_wts=args.arch.vin_share_wts, - name='vin', wt_decay=args.solver.wt_decay) - else: - value_op = fr_op - value_intermediate_op = [] - - # Crop out and upsample the previous value map. - remove = args.arch.crop_remove_each - if remove > 0: - crop_value_op = value_op[:, remove:-remove, remove:-remove,:] - else: - crop_value_op = value_op - crop_value_op = tf.reshape(crop_value_op, shape=[-1, args.arch.value_crop_size, - args.arch.value_crop_size, - args.arch.vin_val_neurons]) - if i < len(task_params.map_crop_sizes)-1: - # Reshape it to shape of the next scale. - previous_value_op = tf.image.resize_bilinear(crop_value_op, - map_crop_size_ops[i+1], - align_corners=True) - resize_crop_value_ops.append(previous_value_op) - - occupancys.append(occupancy) - confs.append(conf) - value_ops.append(value_op) - crop_value_ops.append(crop_value_op) - fr_ops.append(fr_op) - fr_intermediate_ops.append(fr_intermediate_op) - - m.value_ops = value_ops - m.value_intermediate_ops = value_intermediate_ops - m.fr_ops = fr_ops - m.fr_intermediate_ops = fr_intermediate_ops - m.final_value_op = crop_value_op - m.crop_value_ops = crop_value_ops - m.resize_crop_value_ops = resize_crop_value_ops - m.confs = confs - m.occupancys = occupancys - - sh = [-1, args.arch.vin_val_neurons*((args.arch.value_crop_size)**2)] - m.value_features_op = tf.reshape(m.final_value_op, sh, name='reshape_value_op') - - # Determine what action to take. - with tf.variable_scope('action_pred'): - batch_norm_param = args.arch.pred_batch_norm_param - if batch_norm_param is not None: - batch_norm_param['is_training'] = batch_norm_is_training_op - m.action_logits_op, _ = tf_utils.fc_network( - m.value_features_op, neurons=args.arch.pred_neurons, - wt_decay=args.solver.wt_decay, name='pred', offset=0, - num_pred=task_params.num_actions, - batch_norm_param=batch_norm_param) - m.action_prob_op = tf.nn.softmax(m.action_logits_op) - - init_state = tf.constant(0., dtype=tf.float32, shape=[ - task_params.batch_size, 1, map_crop_size, map_crop_size, - task_params.map_channels]) - - m.train_ops['state_names'] = state_names - m.train_ops['updated_state'] = updated_state - m.train_ops['init_state'] = [init_state for _ in updated_state] - - m.train_ops['step'] = m.action_prob_op - m.train_ops['common'] = [m.input_tensors['common']['orig_maps'], - m.input_tensors['common']['goal_loc']] - m.train_ops['batch_norm_is_training_op'] = batch_norm_is_training_op - m.loss_ops = []; m.loss_ops_names = []; - - if args.arch.readout_maps: - with tf.name_scope('readout_maps'): - all_occupancys = tf.concat(m.occupancys + m.confs, 3) - readout_maps, probs = readout_general( - all_occupancys, num_neurons=args.arch.rom_arch.num_neurons, - strides=args.arch.rom_arch.strides, - layers_per_block=args.arch.rom_arch.layers_per_block, - kernel_size=args.arch.rom_arch.kernel_size, - batch_norm_is_training_op=batch_norm_is_training_op, - wt_decay=args.solver.wt_decay) - - gt_ego_maps = [m.input_tensors['step']['readout_maps_{:d}'.format(i)] - for i in range(len(task_params.readout_maps_crop_sizes))] - m.readout_maps_gt = tf.concat(gt_ego_maps, 4) - gt_shape = tf.shape(m.readout_maps_gt) - m.readout_maps_logits = tf.reshape(readout_maps, gt_shape) - m.readout_maps_probs = tf.reshape(probs, gt_shape) - - # Add a loss op - m.readout_maps_loss_op = tf.losses.sigmoid_cross_entropy( - tf.reshape(m.readout_maps_gt, [-1, len(task_params.readout_maps_crop_sizes)]), - tf.reshape(readout_maps, [-1, len(task_params.readout_maps_crop_sizes)]), - scope='loss') - m.readout_maps_loss_op = 10.*m.readout_maps_loss_op - - ewma_decay = 0.99 if is_training else 0.0 - weight = tf.ones_like(m.input_tensors['train']['action'], dtype=tf.float32, - name='weight') - m.reg_loss_op, m.data_loss_op, m.total_loss_op, m.acc_ops = \ - compute_losses_multi_or(m.action_logits_op, - m.input_tensors['train']['action'], weights=weight, - num_actions=task_params.num_actions, - data_loss_wt=args.solver.data_loss_wt, - reg_loss_wt=args.solver.reg_loss_wt, - ewma_decay=ewma_decay) - - if args.arch.readout_maps: - m.total_loss_op = m.total_loss_op + m.readout_maps_loss_op - m.loss_ops += [m.readout_maps_loss_op] - m.loss_ops_names += ['readout_maps_loss'] - - m.loss_ops += [m.reg_loss_op, m.data_loss_op, m.total_loss_op] - m.loss_ops_names += ['reg_loss', 'data_loss', 'total_loss'] - - if args.solver.freeze_conv: - vars_to_optimize = list(set(tf.trainable_variables()) - - set(m.vision_ops.vars_to_restore)) - else: - vars_to_optimize = None - - m.lr_op, m.global_step_op, m.train_op, m.should_stop_op, m.optimizer, \ - m.sync_optimizer = tf_utils.setup_training( - m.total_loss_op, - args.solver.initial_learning_rate, - args.solver.steps_per_decay, - args.solver.learning_rate_decay, - args.solver.momentum, - args.solver.max_steps, - args.solver.sync, - args.solver.adjust_lr_sync, - args.solver.num_workers, - args.solver.task, - vars_to_optimize=vars_to_optimize, - clip_gradient_norm=args.solver.clip_gradient_norm, - typ=args.solver.typ, momentum2=args.solver.momentum2, - adam_eps=args.solver.adam_eps) - - if args.arch.sample_gt_prob_type == 'inverse_sigmoid_decay': - m.sample_gt_prob_op = tf_utils.inverse_sigmoid_decay(args.arch.isd_k, - m.global_step_op) - elif args.arch.sample_gt_prob_type == 'zero': - m.sample_gt_prob_op = tf.constant(-1.0, dtype=tf.float32) - - elif args.arch.sample_gt_prob_type.split('_')[0] == 'step': - step = int(args.arch.sample_gt_prob_type.split('_')[1]) - m.sample_gt_prob_op = tf_utils.step_gt_prob( - step, m.input_tensors['step']['step_number'][0,0,0]) - - m.sample_action_type = args.arch.action_sample_type - m.sample_action_combine_type = args.arch.action_sample_combine_type - - m.summary_ops = { - summary_mode: _add_summaries(m, args, summary_mode, - args.summary.arop_full_summary_iters)} - - m.init_op = tf.group(tf.global_variables_initializer(), - tf.local_variables_initializer()) - m.saver_op = tf.train.Saver(keep_checkpoint_every_n_hours=4, - write_version=tf.train.SaverDef.V2) - return m diff --git a/research/cognitive_mapping_and_planning/tfcode/cmp_summary.py b/research/cognitive_mapping_and_planning/tfcode/cmp_summary.py deleted file mode 100644 index 55313bfbd..000000000 --- a/research/cognitive_mapping_and_planning/tfcode/cmp_summary.py +++ /dev/null @@ -1,213 +0,0 @@ -# Copyright 2016 The TensorFlow Authors All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -"""Code for setting up summaries for CMP. -""" - -import sys, os, numpy as np -import matplotlib.pyplot as plt - - -import tensorflow as tf - -from tensorflow.contrib import slim -from tensorflow.contrib.slim import arg_scope - -import logging -from tensorflow.python.platform import app -from tensorflow.python.platform import flags -from src import utils -import src.file_utils as fu -import tfcode.nav_utils as nu - -def _vis_readout_maps(outputs, global_step, output_dir, metric_summary, N): - # outputs is [gt_map, pred_map]: - if N >= 0: - outputs = outputs[:N] - N = len(outputs) - - plt.set_cmap('jet') - fig, axes = utils.subplot(plt, (N, outputs[0][0].shape[4]*2), (5,5)) - axes = axes.ravel()[::-1].tolist() - for i in range(N): - gt_map, pred_map = outputs[i] - for j in [0]: - for k in range(gt_map.shape[4]): - # Display something like the midpoint of the trajectory. - id = np.int(gt_map.shape[1]/2) - - ax = axes.pop(); - ax.imshow(gt_map[j,id,:,:,k], origin='lower', interpolation='none', - vmin=0., vmax=1.) - ax.set_axis_off(); - if i == 0: ax.set_title('gt_map') - - ax = axes.pop(); - ax.imshow(pred_map[j,id,:,:,k], origin='lower', interpolation='none', - vmin=0., vmax=1.) - ax.set_axis_off(); - if i == 0: ax.set_title('pred_map') - - file_name = os.path.join(output_dir, 'readout_map_{:d}.png'.format(global_step)) - with fu.fopen(file_name, 'w') as f: - fig.savefig(f, bbox_inches='tight', transparent=True, pad_inches=0) - plt.close(fig) - -def _vis(outputs, global_step, output_dir, metric_summary, N): - # Plot the value map, goal for various maps to see what if the model is - # learning anything useful. - # - # outputs is [values, goals, maps, occupancy, conf]. - # - if N >= 0: - outputs = outputs[:N] - N = len(outputs) - - plt.set_cmap('jet') - fig, axes = utils.subplot(plt, (N, outputs[0][0].shape[4]*5), (5,5)) - axes = axes.ravel()[::-1].tolist() - for i in range(N): - values, goals, maps, occupancy, conf = outputs[i] - for j in [0]: - for k in range(values.shape[4]): - # Display something like the midpoint of the trajectory. - id = np.int(values.shape[1]/2) - - ax = axes.pop(); - ax.imshow(goals[j,id,:,:,k], origin='lower', interpolation='none') - ax.set_axis_off(); - if i == 0: ax.set_title('goal') - - ax = axes.pop(); - ax.imshow(occupancy[j,id,:,:,k], origin='lower', interpolation='none') - ax.set_axis_off(); - if i == 0: ax.set_title('occupancy') - - ax = axes.pop(); - ax.imshow(conf[j,id,:,:,k], origin='lower', interpolation='none', - vmin=0., vmax=1.) - ax.set_axis_off(); - if i == 0: ax.set_title('conf') - - ax = axes.pop(); - ax.imshow(values[j,id,:,:,k], origin='lower', interpolation='none') - ax.set_axis_off(); - if i == 0: ax.set_title('value') - - ax = axes.pop(); - ax.imshow(maps[j,id,:,:,k], origin='lower', interpolation='none') - ax.set_axis_off(); - if i == 0: ax.set_title('incr map') - - file_name = os.path.join(output_dir, 'value_vis_{:d}.png'.format(global_step)) - with fu.fopen(file_name, 'w') as f: - fig.savefig(f, bbox_inches='tight', transparent=True, pad_inches=0) - plt.close(fig) - -def _summary_vis(m, batch_size, num_steps, arop_full_summary_iters): - arop = []; arop_summary_iters = []; arop_eval_fns = []; - vis_value_ops = []; vis_goal_ops = []; vis_map_ops = []; - vis_occupancy_ops = []; vis_conf_ops = []; - for i, val_op in enumerate(m.value_ops): - vis_value_op = tf.reduce_mean(tf.abs(val_op), axis=3, keep_dims=True) - vis_value_ops.append(vis_value_op) - - vis_occupancy_op = tf.reduce_mean(tf.abs(m.occupancys[i]), 3, True) - vis_occupancy_ops.append(vis_occupancy_op) - - vis_conf_op = tf.reduce_max(tf.abs(m.confs[i]), axis=3, keep_dims=True) - vis_conf_ops.append(vis_conf_op) - - ego_goal_imgs_i_op = m.input_tensors['step']['ego_goal_imgs_{:d}'.format(i)] - vis_goal_op = tf.reduce_max(ego_goal_imgs_i_op, 4, True) - vis_goal_ops.append(vis_goal_op) - - vis_map_op = tf.reduce_mean(tf.abs(m.ego_map_ops[i]), 4, True) - vis_map_ops.append(vis_map_op) - - vis_goal_ops = tf.concat(vis_goal_ops, 4) - vis_map_ops = tf.concat(vis_map_ops, 4) - vis_value_ops = tf.concat(vis_value_ops, 3) - vis_occupancy_ops = tf.concat(vis_occupancy_ops, 3) - vis_conf_ops = tf.concat(vis_conf_ops, 3) - - sh = tf.unstack(tf.shape(vis_value_ops))[1:] - vis_value_ops = tf.reshape(vis_value_ops, shape=[batch_size, -1] + sh) - - sh = tf.unstack(tf.shape(vis_conf_ops))[1:] - vis_conf_ops = tf.reshape(vis_conf_ops, shape=[batch_size, -1] + sh) - - sh = tf.unstack(tf.shape(vis_occupancy_ops))[1:] - vis_occupancy_ops = tf.reshape(vis_occupancy_ops, shape=[batch_size,-1] + sh) - - # Save memory, only return time steps that need to be visualized, factor of - # 32 CPU memory saving. - id = np.int(num_steps/2) - vis_goal_ops = tf.expand_dims(vis_goal_ops[:,id,:,:,:], axis=1) - vis_map_ops = tf.expand_dims(vis_map_ops[:,id,:,:,:], axis=1) - vis_value_ops = tf.expand_dims(vis_value_ops[:,id,:,:,:], axis=1) - vis_conf_ops = tf.expand_dims(vis_conf_ops[:,id,:,:,:], axis=1) - vis_occupancy_ops = tf.expand_dims(vis_occupancy_ops[:,id,:,:,:], axis=1) - - arop += [[vis_value_ops, vis_goal_ops, vis_map_ops, vis_occupancy_ops, - vis_conf_ops]] - arop_summary_iters += [arop_full_summary_iters] - arop_eval_fns += [_vis] - return arop, arop_summary_iters, arop_eval_fns - -def _summary_readout_maps(m, num_steps, arop_full_summary_iters): - arop = []; arop_summary_iters = []; arop_eval_fns = []; - id = np.int(num_steps-1) - vis_readout_maps_gt = m.readout_maps_gt - vis_readout_maps_prob = tf.reshape(m.readout_maps_probs, - shape=tf.shape(vis_readout_maps_gt)) - vis_readout_maps_gt = tf.expand_dims(vis_readout_maps_gt[:,id,:,:,:], 1) - vis_readout_maps_prob = tf.expand_dims(vis_readout_maps_prob[:,id,:,:,:], 1) - arop += [[vis_readout_maps_gt, vis_readout_maps_prob]] - arop_summary_iters += [arop_full_summary_iters] - arop_eval_fns += [_vis_readout_maps] - return arop, arop_summary_iters, arop_eval_fns - -def _add_summaries(m, args, summary_mode, arop_full_summary_iters): - task_params = args.navtask.task_params - - summarize_ops = [m.lr_op, m.global_step_op, m.sample_gt_prob_op] + \ - m.loss_ops + m.acc_ops - summarize_names = ['lr', 'global_step', 'sample_gt_prob_op'] + \ - m.loss_ops_names + ['acc_{:d}'.format(i) for i in range(len(m.acc_ops))] - to_aggregate = [0, 0, 0] + [1]*len(m.loss_ops_names) + [1]*len(m.acc_ops) - - scope_name = 'summary' - with tf.name_scope(scope_name): - s_ops = nu.add_default_summaries(summary_mode, arop_full_summary_iters, - summarize_ops, summarize_names, - to_aggregate, m.action_prob_op, - m.input_tensors, scope_name=scope_name) - if summary_mode == 'val': - arop, arop_summary_iters, arop_eval_fns = _summary_vis( - m, task_params.batch_size, task_params.num_steps, - arop_full_summary_iters) - s_ops.additional_return_ops += arop - s_ops.arop_summary_iters += arop_summary_iters - s_ops.arop_eval_fns += arop_eval_fns - - if args.arch.readout_maps: - arop, arop_summary_iters, arop_eval_fns = _summary_readout_maps( - m, task_params.num_steps, arop_full_summary_iters) - s_ops.additional_return_ops += arop - s_ops.arop_summary_iters += arop_summary_iters - s_ops.arop_eval_fns += arop_eval_fns - - return s_ops diff --git a/research/cognitive_mapping_and_planning/tfcode/cmp_utils.py b/research/cognitive_mapping_and_planning/tfcode/cmp_utils.py deleted file mode 100644 index 6d87c697b..000000000 --- a/research/cognitive_mapping_and_planning/tfcode/cmp_utils.py +++ /dev/null @@ -1,164 +0,0 @@ -# Copyright 2016 The TensorFlow Authors All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -"""Utility functions for setting up the CMP graph. -""" - -import os, numpy as np -import matplotlib.pyplot as plt - - -import tensorflow as tf - -from tensorflow.contrib import slim -from tensorflow.contrib.slim import arg_scope -import logging -from src import utils -import src.file_utils as fu -from tfcode import tf_utils - -resnet_v2 = tf_utils.resnet_v2 -custom_residual_block = tf_utils.custom_residual_block - -def value_iteration_network( - fr, num_iters, val_neurons, action_neurons, kernel_size, share_wts=False, - name='vin', wt_decay=0.0001, activation_fn=None, shape_aware=False): - """ - Constructs a Value Iteration Network, convolutions and max pooling across - channels. - Input: - fr: NxWxHxC - val_neurons: Number of channels for maintaining the value. - action_neurons: Computes action_neurons * val_neurons at each iteration to - max pool over. - Output: - value image: NxHxWx(val_neurons) - """ - init_var = np.sqrt(2.0/(kernel_size**2)/(val_neurons*action_neurons)) - vals = [] - with tf.variable_scope(name) as varscope: - if shape_aware == False: - fr_shape = tf.unstack(tf.shape(fr)) - val_shape = tf.stack(fr_shape[:-1] + [val_neurons]) - val = tf.zeros(val_shape, name='val_init') - else: - val = tf.expand_dims(tf.zeros_like(fr[:,:,:,0]), dim=-1) * \ - tf.constant(0., dtype=tf.float32, shape=[1,1,1,val_neurons]) - val_shape = tf.shape(val) - vals.append(val) - for i in range(num_iters): - if share_wts: - # The first Value Iteration maybe special, so it can have its own - # paramterss. - scope = 'conv' - if i == 0: scope = 'conv_0' - if i > 1: varscope.reuse_variables() - else: - scope = 'conv_{:d}'.format(i) - val = slim.conv2d(tf.concat([val, fr], 3, name='concat_{:d}'.format(i)), - num_outputs=action_neurons*val_neurons, - kernel_size=kernel_size, stride=1, activation_fn=activation_fn, - scope=scope, normalizer_fn=None, - weights_regularizer=slim.l2_regularizer(wt_decay), - weights_initializer=tf.random_normal_initializer(stddev=init_var), - biases_initializer=tf.zeros_initializer()) - val = tf.reshape(val, [-1, action_neurons*val_neurons, 1, 1], - name='re_{:d}'.format(i)) - val = slim.max_pool2d(val, kernel_size=[action_neurons,1], - stride=[action_neurons,1], padding='VALID', - scope='val_{:d}'.format(i)) - val = tf.reshape(val, val_shape, name='unre_{:d}'.format(i)) - vals.append(val) - return val, vals - - -def rotate_preds(loc_on_map, relative_theta, map_size, preds, - output_valid_mask): - with tf.name_scope('rotate'): - flow_op = tf_utils.get_flow(loc_on_map, relative_theta, map_size=map_size) - if type(preds) != list: - rotated_preds, valid_mask_warps = tf_utils.dense_resample(preds, flow_op, - output_valid_mask) - else: - rotated_preds = [] ;valid_mask_warps = [] - for pred in preds: - rotated_pred, valid_mask_warp = tf_utils.dense_resample(pred, flow_op, - output_valid_mask) - rotated_preds.append(rotated_pred) - valid_mask_warps.append(valid_mask_warp) - return rotated_preds, valid_mask_warps - -def get_visual_frustum(map_size, shape_like, expand_dims=[0,0]): - with tf.name_scope('visual_frustum'): - l = np.tril(np.ones(map_size)) ;l = l + l[:,::-1] - l = (l == 2).astype(np.float32) - for e in expand_dims: - l = np.expand_dims(l, axis=e) - confs_probs = tf.constant(l, dtype=tf.float32) - confs_probs = tf.ones_like(shape_like, dtype=tf.float32) * confs_probs - return confs_probs - -def deconv(x, is_training, wt_decay, neurons, strides, layers_per_block, - kernel_size, conv_fn, name, offset=0): - """Generates a up sampling network with residual connections. - """ - batch_norm_param = {'center': True, 'scale': True, - 'activation_fn': tf.nn.relu, - 'is_training': is_training} - outs = [] - for i, (neuron, stride) in enumerate(zip(neurons, strides)): - for s in range(layers_per_block): - scope = '{:s}_{:d}_{:d}'.format(name, i+1+offset,s+1) - x = custom_residual_block(x, neuron, kernel_size, stride, scope, - is_training, wt_decay, use_residual=True, - residual_stride_conv=True, conv_fn=conv_fn, - batch_norm_param=batch_norm_param) - stride = 1 - outs.append((x,True)) - return x, outs - -def fr_v2(x, output_neurons, inside_neurons, is_training, name='fr', - wt_decay=0.0001, stride=1, updates_collections=tf.GraphKeys.UPDATE_OPS): - """Performs fusion of information between the map and the reward map. - Inputs - x: NxHxWxC1 - - Outputs - fr map: NxHxWx(output_neurons) - """ - if type(stride) != list: - stride = [stride] - with slim.arg_scope(resnet_v2.resnet_utils.resnet_arg_scope( - is_training=is_training, weight_decay=wt_decay)): - with slim.arg_scope([slim.batch_norm], updates_collections=updates_collections) as arg_sc: - # Change the updates_collections for the conv normalizer_params to None - for i in range(len(arg_sc.keys())): - if 'convolution' in arg_sc.keys()[i]: - arg_sc.values()[i]['normalizer_params']['updates_collections'] = updates_collections - with slim.arg_scope(arg_sc): - bottleneck = resnet_v2.bottleneck - blocks = [] - for i, s in enumerate(stride): - b = resnet_v2.resnet_utils.Block( - 'block{:d}'.format(i + 1), bottleneck, [{ - 'depth': output_neurons, - 'depth_bottleneck': inside_neurons, - 'stride': stride[i] - }]) - blocks.append(b) - x, outs = resnet_v2.resnet_v2(x, blocks, num_classes=None, global_pool=False, - output_stride=None, include_root_block=False, - reuse=False, scope=name) - return x, outs diff --git a/research/cognitive_mapping_and_planning/tfcode/nav_utils.py b/research/cognitive_mapping_and_planning/tfcode/nav_utils.py deleted file mode 100644 index 2f764f33d..000000000 --- a/research/cognitive_mapping_and_planning/tfcode/nav_utils.py +++ /dev/null @@ -1,435 +0,0 @@ -# Copyright 2016 The TensorFlow Authors All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -"""Various losses for training navigation agents. - -Defines various loss functions for navigation agents, -compute_losses_multi_or. -""" - -import os, numpy as np -import matplotlib.pyplot as plt - - -import tensorflow as tf - -from tensorflow.contrib import slim -from tensorflow.contrib.slim import arg_scope -from tensorflow.contrib.slim.nets import resnet_v2 -from tensorflow.python.training import moving_averages -import logging -from src import utils -import src.file_utils as fu -from tfcode import tf_utils - - -def compute_losses_multi_or(logits, actions_one_hot, weights=None, - num_actions=-1, data_loss_wt=1., reg_loss_wt=1., - ewma_decay=0.99, reg_loss_op=None): - assert(num_actions > 0), 'num_actions must be specified and must be > 0.' - - with tf.name_scope('loss'): - if weights is None: - weight = tf.ones_like(actions_one_hot, dtype=tf.float32, name='weight') - - actions_one_hot = tf.cast(tf.reshape(actions_one_hot, [-1, num_actions], - 're_actions_one_hot'), tf.float32) - weights = tf.reduce_sum(tf.reshape(weights, [-1, num_actions], 're_weight'), - reduction_indices=1) - total = tf.reduce_sum(weights) - - action_prob = tf.nn.softmax(logits) - action_prob = tf.reduce_sum(tf.multiply(action_prob, actions_one_hot), - reduction_indices=1) - example_loss = -tf.log(tf.maximum(tf.constant(1e-4), action_prob)) - - data_loss_op = tf.reduce_sum(example_loss * weights) / total - if reg_loss_op is None: - if reg_loss_wt > 0: - reg_loss_op = tf.add_n(tf.losses.get_regularization_losses()) - else: - reg_loss_op = tf.constant(0.) - - if reg_loss_wt > 0: - total_loss_op = data_loss_wt*data_loss_op + reg_loss_wt*reg_loss_op - else: - total_loss_op = data_loss_wt*data_loss_op - - is_correct = tf.cast(tf.greater(action_prob, 0.5, name='pred_class'), tf.float32) - acc_op = tf.reduce_sum(is_correct*weights) / total - - ewma_acc_op = moving_averages.weighted_moving_average( - acc_op, ewma_decay, weight=total, name='ewma_acc') - - acc_ops = [ewma_acc_op] - - return reg_loss_op, data_loss_op, total_loss_op, acc_ops - - -def get_repr_from_image(images_reshaped, modalities, data_augment, encoder, - freeze_conv, wt_decay, is_training): - # Pass image through lots of convolutional layers, to obtain pool5 - if modalities == ['rgb']: - with tf.name_scope('pre_rgb'): - x = (images_reshaped + 128.) / 255. # Convert to brightness between 0 and 1. - if data_augment.relight and is_training: - x = tf_utils.distort_image(x, fast_mode=data_augment.relight_fast) - x = (x-0.5)*2.0 - scope_name = encoder - elif modalities == ['depth']: - with tf.name_scope('pre_d'): - d_image = images_reshaped - x = 2*(d_image[...,0] - 80.0)/100.0 - y = d_image[...,1] - d_image = tf.concat([tf.expand_dims(x, -1), tf.expand_dims(y, -1)], 3) - x = d_image - scope_name = 'd_'+encoder - - resnet_is_training = is_training and (not freeze_conv) - with slim.arg_scope(resnet_v2.resnet_utils.resnet_arg_scope(resnet_is_training)): - fn = getattr(tf_utils, encoder) - x, end_points = fn(x, num_classes=None, global_pool=False, - output_stride=None, reuse=None, - scope=scope_name) - vars_ = slim.get_variables_to_restore() - - conv_feat = x - return conv_feat, vars_ - -def default_train_step_kwargs(m, obj, logdir, rng_seed, is_chief, num_steps, - iters, train_display_interval, - dagger_sample_bn_false): - train_step_kwargs = {} - train_step_kwargs['obj'] = obj - train_step_kwargs['m'] = m - - # rng_data has 2 independent rngs, one for sampling episodes and one for - # sampling perturbs (so that we can make results reproducible. - train_step_kwargs['rng_data'] = [np.random.RandomState(rng_seed), - np.random.RandomState(rng_seed)] - train_step_kwargs['rng_action'] = np.random.RandomState(rng_seed) - if is_chief: - train_step_kwargs['writer'] = tf.summary.FileWriter(logdir) #, m.tf_graph) - else: - train_step_kwargs['writer'] = None - train_step_kwargs['iters'] = iters - train_step_kwargs['train_display_interval'] = train_display_interval - train_step_kwargs['num_steps'] = num_steps - train_step_kwargs['logdir'] = logdir - train_step_kwargs['dagger_sample_bn_false'] = dagger_sample_bn_false - return train_step_kwargs - -# Utilities for visualizing and analysing validation output. -def save_d_at_t(outputs, global_step, output_dir, metric_summary, N): - """Save distance to goal at all time steps. - - Args: - outputs : [gt_dist_to_goal]. - global_step : number of iterations. - output_dir : output directory. - metric_summary : to append scalars to summary. - N : number of outputs to process. - - """ - d_at_t = np.concatenate(map(lambda x: x[0][:,:,0]*1, outputs), axis=0) - fig, axes = utils.subplot(plt, (1,1), (5,5)) - axes.plot(np.arange(d_at_t.shape[1]), np.mean(d_at_t, axis=0), 'r.') - axes.set_xlabel('time step') - axes.set_ylabel('dist to next goal') - axes.grid('on') - file_name = os.path.join(output_dir, 'dist_at_t_{:d}.png'.format(global_step)) - with fu.fopen(file_name, 'w') as f: - fig.savefig(f, bbox_inches='tight', transparent=True, pad_inches=0) - file_name = os.path.join(output_dir, 'dist_at_t_{:d}.pkl'.format(global_step)) - utils.save_variables(file_name, [d_at_t], ['d_at_t'], overwrite=True) - plt.close(fig) - return None - -def save_all(outputs, global_step, output_dir, metric_summary, N): - """Save numerous statistics. - - Args: - outputs : [locs, goal_loc, gt_dist_to_goal, node_ids, perturbs] - global_step : number of iterations. - output_dir : output directory. - metric_summary : to append scalars to summary. - N : number of outputs to process. - """ - all_locs = np.concatenate(map(lambda x: x[0], outputs), axis=0) - all_goal_locs = np.concatenate(map(lambda x: x[1], outputs), axis=0) - all_d_at_t = np.concatenate(map(lambda x: x[2][:,:,0]*1, outputs), axis=0) - all_node_ids = np.concatenate(map(lambda x: x[3], outputs), axis=0) - all_perturbs = np.concatenate(map(lambda x: x[4], outputs), axis=0) - - file_name = os.path.join(output_dir, 'all_locs_at_t_{:d}.pkl'.format(global_step)) - vars = [all_locs, all_goal_locs, all_d_at_t, all_node_ids, all_perturbs] - var_names = ['all_locs', 'all_goal_locs', 'all_d_at_t', 'all_node_ids', 'all_perturbs'] - utils.save_variables(file_name, vars, var_names, overwrite=True) - return None - -def eval_ap(outputs, global_step, output_dir, metric_summary, N, num_classes=4): - """Processes the collected outputs to compute AP for action prediction. - - Args: - outputs : [logits, labels] - global_step : global_step. - output_dir : where to store results. - metric_summary : summary object to add summaries to. - N : number of outputs to process. - num_classes : number of classes to compute AP over, and to reshape tensors. - """ - if N >= 0: - outputs = outputs[:N] - logits = np.concatenate(map(lambda x: x[0], outputs), axis=0).reshape((-1, num_classes)) - labels = np.concatenate(map(lambda x: x[1], outputs), axis=0).reshape((-1, num_classes)) - aps = [] - for i in range(logits.shape[1]): - ap, rec, prec = utils.calc_pr(labels[:,i], logits[:,i]) - ap = ap[0] - tf_utils.add_value_to_summary(metric_summary, 'aps/ap_{:d}: '.format(i), ap) - aps.append(ap) - return aps - -def eval_dist(outputs, global_step, output_dir, metric_summary, N): - """Processes the collected outputs during validation to - 1. Plot the distance over time curve. - 2. Compute mean and median distances. - 3. Plots histogram of end distances. - - Args: - outputs : [locs, goal_loc, gt_dist_to_goal]. - global_step : global_step. - output_dir : where to store results. - metric_summary : summary object to add summaries to. - N : number of outputs to process. - """ - SUCCESS_THRESH = 3 - if N >= 0: - outputs = outputs[:N] - - # Plot distance at time t. - d_at_t = [] - for i in range(len(outputs)): - locs, goal_loc, gt_dist_to_goal = outputs[i] - d_at_t.append(gt_dist_to_goal[:,:,0]*1) - - # Plot the distance. - fig, axes = utils.subplot(plt, (1,1), (5,5)) - d_at_t = np.concatenate(d_at_t, axis=0) - axes.plot(np.arange(d_at_t.shape[1]), np.mean(d_at_t, axis=0), 'r.') - axes.set_xlabel('time step') - axes.set_ylabel('dist to next goal') - axes.grid('on') - file_name = os.path.join(output_dir, 'dist_at_t_{:d}.png'.format(global_step)) - with fu.fopen(file_name, 'w') as f: - fig.savefig(f, bbox_inches='tight', transparent=True, pad_inches=0) - file_name = os.path.join(output_dir, 'dist_at_t_{:d}.pkl'.format(global_step)) - utils.save_variables(file_name, [d_at_t], ['d_at_t'], overwrite=True) - plt.close(fig) - - # Plot the trajectories and the init_distance and final distance. - d_inits = [] - d_ends = [] - for i in range(len(outputs)): - locs, goal_loc, gt_dist_to_goal = outputs[i] - d_inits.append(gt_dist_to_goal[:,0,0]*1) - d_ends.append(gt_dist_to_goal[:,-1,0]*1) - - # Plot the distance. - fig, axes = utils.subplot(plt, (1,1), (5,5)) - d_inits = np.concatenate(d_inits, axis=0) - d_ends = np.concatenate(d_ends, axis=0) - axes.plot(d_inits+np.random.rand(*(d_inits.shape))-0.5, - d_ends+np.random.rand(*(d_ends.shape))-0.5, '.', mec='red', mew=1.0) - axes.set_xlabel('init dist'); axes.set_ylabel('final dist'); - axes.grid('on'); axes.axis('equal'); - title_str = 'mean: {:0.1f}, 50: {:0.1f}, 75: {:0.2f}, s: {:0.1f}' - title_str = title_str.format( - np.mean(d_ends), np.median(d_ends), np.percentile(d_ends, q=75), - 100*(np.mean(d_ends <= SUCCESS_THRESH))) - axes.set_title(title_str) - file_name = os.path.join(output_dir, 'dist_{:d}.png'.format(global_step)) - with fu.fopen(file_name, 'w') as f: - fig.savefig(f, bbox_inches='tight', transparent=True, pad_inches=0) - - file_name = os.path.join(output_dir, 'dist_{:d}.pkl'.format(global_step)) - utils.save_variables(file_name, [d_inits, d_ends], ['d_inits', 'd_ends'], - overwrite=True) - plt.close(fig) - - # Plot the histogram of the end_distance. - with plt.style.context('seaborn-white'): - d_ends_ = np.sort(d_ends) - d_inits_ = np.sort(d_inits) - leg = []; - fig, ax = utils.subplot(plt, (1,1), (5,5)) - ax.grid('on') - ax.set_xlabel('Distance from goal'); ax.xaxis.label.set_fontsize(16); - ax.set_ylabel('Fraction of data'); ax.yaxis.label.set_fontsize(16); - ax.plot(d_ends_, np.arange(d_ends_.size)*1./d_ends_.size, 'r') - ax.plot(d_inits_, np.arange(d_inits_.size)*1./d_inits_.size, 'k') - leg.append('Final'); leg.append('Init'); - ax.legend(leg, fontsize='x-large'); - ax.set_axis_on() - title_str = 'mean: {:0.1f}, 50: {:0.1f}, 75: {:0.2f}, s: {:0.1f}' - title_str = title_str.format( - np.mean(d_ends), np.median(d_ends), np.percentile(d_ends, q=75), - 100*(np.mean(d_ends <= SUCCESS_THRESH))) - ax.set_title(title_str) - file_name = os.path.join(output_dir, 'dist_hist_{:d}.png'.format(global_step)) - with fu.fopen(file_name, 'w') as f: - fig.savefig(f, bbox_inches='tight', transparent=True, pad_inches=0) - - # Log distance metrics. - tf_utils.add_value_to_summary(metric_summary, 'dists/success_init: ', - 100*(np.mean(d_inits <= SUCCESS_THRESH))) - tf_utils.add_value_to_summary(metric_summary, 'dists/success_end: ', - 100*(np.mean(d_ends <= SUCCESS_THRESH))) - tf_utils.add_value_to_summary(metric_summary, 'dists/dist_init (75): ', - np.percentile(d_inits, q=75)) - tf_utils.add_value_to_summary(metric_summary, 'dists/dist_end (75): ', - np.percentile(d_ends, q=75)) - tf_utils.add_value_to_summary(metric_summary, 'dists/dist_init (median): ', - np.median(d_inits)) - tf_utils.add_value_to_summary(metric_summary, 'dists/dist_end (median): ', - np.median(d_ends)) - tf_utils.add_value_to_summary(metric_summary, 'dists/dist_init (mean): ', - np.mean(d_inits)) - tf_utils.add_value_to_summary(metric_summary, 'dists/dist_end (mean): ', - np.mean(d_ends)) - return np.median(d_inits), np.median(d_ends), np.mean(d_inits), np.mean(d_ends), \ - np.percentile(d_inits, q=75), np.percentile(d_ends, q=75), \ - 100*(np.mean(d_inits) <= SUCCESS_THRESH), 100*(np.mean(d_ends) <= SUCCESS_THRESH) - -def plot_trajectories(outputs, global_step, output_dir, metric_summary, N): - """Processes the collected outputs during validation to plot the trajectories - in the top view. - - Args: - outputs : [locs, orig_maps, goal_loc]. - global_step : global_step. - output_dir : where to store results. - metric_summary : summary object to add summaries to. - N : number of outputs to process. - """ - if N >= 0: - outputs = outputs[:N] - N = len(outputs) - - plt.set_cmap('gray') - fig, axes = utils.subplot(plt, (N, outputs[0][1].shape[0]), (5,5)) - axes = axes.ravel()[::-1].tolist() - for i in range(N): - locs, orig_maps, goal_loc = outputs[i] - is_semantic = np.isnan(goal_loc[0,0,1]) - for j in range(orig_maps.shape[0]): - ax = axes.pop(); - ax.plot(locs[j,0,0], locs[j,0,1], 'ys') - # Plot one by one, so that they come in different colors. - for k in range(goal_loc.shape[1]): - if not is_semantic: - ax.plot(goal_loc[j,k,0], goal_loc[j,k,1], 's') - if False: - ax.plot(locs[j,:,0], locs[j,:,1], 'r.', ms=3) - ax.imshow(orig_maps[j,0,:,:,0], origin='lower') - ax.set_axis_off(); - else: - ax.scatter(locs[j,:,0], locs[j,:,1], c=np.arange(locs.shape[1]), - cmap='jet', s=10, lw=0) - ax.imshow(orig_maps[j,0,:,:,0], origin='lower', vmin=-1.0, vmax=2.0) - if not is_semantic: - xymin = np.minimum(np.min(goal_loc[j,:,:], axis=0), np.min(locs[j,:,:], axis=0)) - xymax = np.maximum(np.max(goal_loc[j,:,:], axis=0), np.max(locs[j,:,:], axis=0)) - else: - xymin = np.min(locs[j,:,:], axis=0) - xymax = np.max(locs[j,:,:], axis=0) - xy1 = (xymax+xymin)/2. - np.maximum(np.max(xymax-xymin), 12) - xy2 = (xymax+xymin)/2. + np.maximum(np.max(xymax-xymin), 12) - ax.set_xlim([xy1[0], xy2[0]]) - ax.set_ylim([xy1[1], xy2[1]]) - ax.set_axis_off() - file_name = os.path.join(output_dir, 'trajectory_{:d}.png'.format(global_step)) - with fu.fopen(file_name, 'w') as f: - fig.savefig(f, bbox_inches='tight', transparent=True, pad_inches=0) - plt.close(fig) - return None - -def add_default_summaries(mode, arop_full_summary_iters, summarize_ops, - summarize_names, to_aggregate, action_prob_op, - input_tensors, scope_name): - assert(mode == 'train' or mode == 'val' or mode == 'test'), \ - 'add_default_summaries mode is neither train or val or test.' - - s_ops = tf_utils.get_default_summary_ops() - - if mode == 'train': - s_ops.summary_ops, s_ops.print_summary_ops, additional_return_ops, \ - arop_summary_iters, arop_eval_fns = tf_utils.simple_summaries( - summarize_ops, summarize_names, mode, to_aggregate=False, - scope_name=scope_name) - s_ops.additional_return_ops += additional_return_ops - s_ops.arop_summary_iters += arop_summary_iters - s_ops.arop_eval_fns += arop_eval_fns - elif mode == 'val': - s_ops.summary_ops, s_ops.print_summary_ops, additional_return_ops, \ - arop_summary_iters, arop_eval_fns = tf_utils.simple_summaries( - summarize_ops, summarize_names, mode, to_aggregate=to_aggregate, - scope_name=scope_name) - s_ops.additional_return_ops += additional_return_ops - s_ops.arop_summary_iters += arop_summary_iters - s_ops.arop_eval_fns += arop_eval_fns - - elif mode == 'test': - s_ops.summary_ops, s_ops.print_summary_ops, additional_return_ops, \ - arop_summary_iters, arop_eval_fns = tf_utils.simple_summaries( - [], [], mode, to_aggregate=[], scope_name=scope_name) - s_ops.additional_return_ops += additional_return_ops - s_ops.arop_summary_iters += arop_summary_iters - s_ops.arop_eval_fns += arop_eval_fns - - - if mode == 'val': - arop = s_ops.additional_return_ops - arop += [[action_prob_op, input_tensors['train']['action']]] - arop += [[input_tensors['step']['loc_on_map'], - input_tensors['common']['goal_loc'], - input_tensors['step']['gt_dist_to_goal']]] - arop += [[input_tensors['step']['loc_on_map'], - input_tensors['common']['orig_maps'], - input_tensors['common']['goal_loc']]] - s_ops.arop_summary_iters += [-1, arop_full_summary_iters, - arop_full_summary_iters] - s_ops.arop_eval_fns += [eval_ap, eval_dist, plot_trajectories] - - elif mode == 'test': - arop = s_ops.additional_return_ops - arop += [[input_tensors['step']['loc_on_map'], - input_tensors['common']['goal_loc'], - input_tensors['step']['gt_dist_to_goal']]] - arop += [[input_tensors['step']['gt_dist_to_goal']]] - arop += [[input_tensors['step']['loc_on_map'], - input_tensors['common']['goal_loc'], - input_tensors['step']['gt_dist_to_goal'], - input_tensors['step']['node_ids'], - input_tensors['step']['perturbs']]] - arop += [[input_tensors['step']['loc_on_map'], - input_tensors['common']['orig_maps'], - input_tensors['common']['goal_loc']]] - s_ops.arop_summary_iters += [-1, -1, -1, arop_full_summary_iters] - s_ops.arop_eval_fns += [eval_dist, save_d_at_t, save_all, - plot_trajectories] - return s_ops - - diff --git a/research/cognitive_mapping_and_planning/tfcode/tf_utils.py b/research/cognitive_mapping_and_planning/tfcode/tf_utils.py deleted file mode 100644 index 5f96d8ff5..000000000 --- a/research/cognitive_mapping_and_planning/tfcode/tf_utils.py +++ /dev/null @@ -1,840 +0,0 @@ -# Copyright 2016 The TensorFlow Authors All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -import numpy as np -import sys -import tensorflow as tf -import src.utils as utils -import logging -from tensorflow.contrib import slim -from tensorflow.contrib.metrics.python.ops import confusion_matrix_ops -from tensorflow.contrib.slim import arg_scope -from tensorflow.contrib.slim.nets import resnet_v2 -from tensorflow.python.framework import dtypes -from tensorflow.python.ops import array_ops -from tensorflow.python.ops import check_ops -from tensorflow.python.ops import math_ops -from tensorflow.python.ops import variable_scope -sys.path.insert(0, '../slim') -from preprocessing import inception_preprocessing as ip - -resnet_v2_50 = resnet_v2.resnet_v2_50 - - -def custom_residual_block(x, neurons, kernel_size, stride, name, is_training, - wt_decay=0.0001, use_residual=True, - residual_stride_conv=True, conv_fn=slim.conv2d, - batch_norm_param=None): - - # batch norm x and relu - init_var = np.sqrt(2.0/(kernel_size**2)/neurons) - with arg_scope([conv_fn], - weights_regularizer=slim.l2_regularizer(wt_decay), - weights_initializer=tf.random_normal_initializer(stddev=init_var), - biases_initializer=tf.zeros_initializer()): - - if batch_norm_param is None: - batch_norm_param = {'center': True, 'scale': False, - 'activation_fn':tf.nn.relu, - 'is_training': is_training} - - y = slim.batch_norm(x, scope=name+'_bn', **batch_norm_param) - - y = conv_fn(y, num_outputs=neurons, kernel_size=kernel_size, stride=stride, - activation_fn=None, scope=name+'_1', - normalizer_fn=slim.batch_norm, - normalizer_params=batch_norm_param) - - y = conv_fn(y, num_outputs=neurons, kernel_size=kernel_size, - stride=1, activation_fn=None, scope=name+'_2') - - if use_residual: - if stride != 1 or x.get_shape().as_list()[-1] != neurons: - batch_norm_param_ = dict(batch_norm_param) - batch_norm_param_['activation_fn'] = None - x = conv_fn(x, num_outputs=neurons, kernel_size=1, - stride=stride if residual_stride_conv else 1, - activation_fn=None, scope=name+'_0_1x1', - normalizer_fn=slim.batch_norm, - normalizer_params=batch_norm_param_) - if not residual_stride_conv: - x = slim.avg_pool2d(x, 1, stride=stride, scope=name+'_0_avg') - - y = tf.add(x, y, name=name+'_add') - - return y - -def step_gt_prob(step, step_number_op): - # Change samping probability from 1 to -1 at step steps. - with tf.name_scope('step_gt_prob'): - out = tf.cond(tf.less(step_number_op, step), - lambda: tf.constant(1.), lambda: tf.constant(-1.)) - return out - -def inverse_sigmoid_decay(k, global_step_op): - with tf.name_scope('inverse_sigmoid_decay'): - k = tf.constant(k, dtype=tf.float32) - tmp = k*tf.exp(-tf.cast(global_step_op, tf.float32)/k) - tmp = tmp / (1. + tmp) - return tmp - -def dense_resample(im, flow_im, output_valid_mask, name='dense_resample'): - """ Resample reward at particular locations. - Args: - im: ...xHxWxC matrix to sample from. - flow_im: ...xHxWx2 matrix, samples the image using absolute offsets as given - by the flow_im. - """ - with tf.name_scope(name): - valid_mask = None - - x, y = tf.unstack(flow_im, axis=-1) - x = tf.cast(tf.reshape(x, [-1]), tf.float32) - y = tf.cast(tf.reshape(y, [-1]), tf.float32) - - # constants - shape = tf.unstack(tf.shape(im)) - channels = shape[-1] - width = shape[-2] - height = shape[-3] - num_batch = tf.cast(tf.reduce_prod(tf.stack(shape[:-3])), 'int32') - zero = tf.constant(0, dtype=tf.int32) - - # Round up and down. - x0 = tf.cast(tf.floor(x), 'int32'); x1 = x0 + 1; - y0 = tf.cast(tf.floor(y), 'int32'); y1 = y0 + 1; - - if output_valid_mask: - valid_mask = tf.logical_and( - tf.logical_and(tf.less_equal(x, tf.cast(width, tf.float32)-1.), tf.greater_equal(x, 0.)), - tf.logical_and(tf.less_equal(y, tf.cast(height, tf.float32)-1.), tf.greater_equal(y, 0.))) - valid_mask = tf.reshape(valid_mask, shape=shape[:-1] + [1]) - - x0 = tf.clip_by_value(x0, zero, width-1) - x1 = tf.clip_by_value(x1, zero, width-1) - y0 = tf.clip_by_value(y0, zero, height-1) - y1 = tf.clip_by_value(y1, zero, height-1) - - dim2 = width; dim1 = width * height; - - # Create base index - base = tf.reshape(tf.range(num_batch) * dim1, shape=[-1,1]) - base = tf.reshape(tf.tile(base, [1, height*width]), shape=[-1]) - - base_y0 = base + y0 * dim2 - base_y1 = base + y1 * dim2 - idx_a = base_y0 + x0 - idx_b = base_y1 + x0 - idx_c = base_y0 + x1 - idx_d = base_y1 + x1 - - # use indices to lookup pixels in the flat image and restore channels dim - sh = tf.stack([tf.constant(-1,dtype=tf.int32), channels]) - im_flat = tf.cast(tf.reshape(im, sh), dtype=tf.float32) - pixel_a = tf.gather(im_flat, idx_a) - pixel_b = tf.gather(im_flat, idx_b) - pixel_c = tf.gather(im_flat, idx_c) - pixel_d = tf.gather(im_flat, idx_d) - - # and finally calculate interpolated values - x1_f = tf.to_float(x1) - y1_f = tf.to_float(y1) - - wa = tf.expand_dims(((x1_f - x) * (y1_f - y)), 1) - wb = tf.expand_dims((x1_f - x) * (1.0 - (y1_f - y)), 1) - wc = tf.expand_dims(((1.0 - (x1_f - x)) * (y1_f - y)), 1) - wd = tf.expand_dims(((1.0 - (x1_f - x)) * (1.0 - (y1_f - y))), 1) - - output = tf.add_n([wa * pixel_a, wb * pixel_b, wc * pixel_c, wd * pixel_d]) - output = tf.reshape(output, shape=tf.shape(im)) - return output, valid_mask - -def get_flow(t, theta, map_size, name_scope='gen_flow'): - """ - Rotates the map by theta and translates the rotated map by t. - - Assume that the robot rotates by an angle theta and then moves forward by - translation t. This function returns the flow field field. For every pixel in - the new image it tells us which pixel in the original image it came from: - NewI(x, y) = OldI(flow_x(x,y), flow_y(x,y)). - - Assume there is a point p in the original image. Robot rotates by R and moves - forward by t. p1 = Rt*p; p2 = p1 - t; (the world moves in opposite direction. - So, p2 = Rt*p - t, thus p2 came from R*(p2+t), which is what this function - calculates. - - t: ... x 2 (translation for B batches of N motions each). - theta: ... x 1 (rotation for B batches of N motions each). - - Output: ... x map_size x map_size x 2 - """ - - with tf.name_scope(name_scope): - tx, ty = tf.unstack(tf.reshape(t, shape=[-1, 1, 1, 1, 2]), axis=4) - theta = tf.reshape(theta, shape=[-1, 1, 1, 1]) - c = tf.constant((map_size-1.)/2., dtype=tf.float32) - - x, y = np.meshgrid(np.arange(map_size), np.arange(map_size)) - x = tf.constant(x[np.newaxis, :, :, np.newaxis], dtype=tf.float32, name='x', - shape=[1, map_size, map_size, 1]) - y = tf.constant(y[np.newaxis, :, :, np.newaxis], dtype=tf.float32, name='y', - shape=[1,map_size, map_size, 1]) - - x = x-(-tx+c) - y = y-(-ty+c) - - sin_theta = tf.sin(theta) - cos_theta = tf.cos(theta) - xr = cos_theta*x - sin_theta*y - yr = sin_theta*x + cos_theta*y - - xr = xr + c - yr = yr + c - - flow = tf.stack([xr, yr], axis=-1) - sh = tf.unstack(tf.shape(t), axis=0) - sh = tf.stack(sh[:-1]+[tf.constant(_, dtype=tf.int32) for _ in [map_size, map_size, 2]]) - flow = tf.reshape(flow, shape=sh) - return flow - -def distort_image(im, fast_mode=False): - # All images in the same batch are transformed the same way, but over - # iterations you see different distortions. - # im should be float with values between 0 and 1. - im_ = tf.reshape(im, shape=(-1,1,3)) - im_ = ip.apply_with_random_selector( - im_, lambda x, ordering: ip.distort_color(x, ordering, fast_mode), - num_cases=4) - im_ = tf.reshape(im_, tf.shape(im)) - return im_ - -def fc_network(x, neurons, wt_decay, name, num_pred=None, offset=0, - batch_norm_param=None, dropout_ratio=0.0, is_training=None): - if dropout_ratio > 0: - assert(is_training is not None), \ - 'is_training needs to be defined when trainnig with dropout.' - - repr = [] - for i, neuron in enumerate(neurons): - init_var = np.sqrt(2.0/neuron) - if batch_norm_param is not None: - x = slim.fully_connected(x, neuron, activation_fn=None, - weights_initializer=tf.random_normal_initializer(stddev=init_var), - weights_regularizer=slim.l2_regularizer(wt_decay), - normalizer_fn=slim.batch_norm, - normalizer_params=batch_norm_param, - biases_initializer=tf.zeros_initializer(), - scope='{:s}_{:d}'.format(name, offset+i)) - else: - x = slim.fully_connected(x, neuron, activation_fn=tf.nn.relu, - weights_initializer=tf.random_normal_initializer(stddev=init_var), - weights_regularizer=slim.l2_regularizer(wt_decay), - biases_initializer=tf.zeros_initializer(), - scope='{:s}_{:d}'.format(name, offset+i)) - if dropout_ratio > 0: - x = slim.dropout(x, keep_prob=1-dropout_ratio, is_training=is_training, - scope='{:s}_{:d}'.format('dropout_'+name, offset+i)) - repr.append(x) - - if num_pred is not None: - init_var = np.sqrt(2.0/num_pred) - x = slim.fully_connected(x, num_pred, - weights_regularizer=slim.l2_regularizer(wt_decay), - weights_initializer=tf.random_normal_initializer(stddev=init_var), - biases_initializer=tf.zeros_initializer(), - activation_fn=None, - scope='{:s}_pred'.format(name)) - return x, repr - -def concat_state_x_list(f, names): - af = {} - for i, k in enumerate(names): - af[k] = np.concatenate([x[i] for x in f], axis=1) - return af - -def concat_state_x(f, names): - af = {} - for k in names: - af[k] = np.concatenate([x[k] for x in f], axis=1) - # af[k] = np.swapaxes(af[k], 0, 1) - return af - -def sample_action(rng, action_probs, optimal_action, sample_gt_prob, - type='sample', combine_type='one_or_other'): - optimal_action_ = optimal_action/np.sum(optimal_action+0., 1, keepdims=True) - action_probs_ = action_probs/np.sum(action_probs+0.001, 1, keepdims=True) - batch_size = action_probs_.shape[0] - - action = np.zeros((batch_size), dtype=np.int32) - action_sample_wt = np.zeros((batch_size), dtype=np.float32) - if combine_type == 'add': - sample_gt_prob_ = np.minimum(np.maximum(sample_gt_prob, 0.), 1.) - - for i in range(batch_size): - if combine_type == 'one_or_other': - sample_gt = rng.rand() < sample_gt_prob - if sample_gt: distr_ = optimal_action_[i,:]*1. - else: distr_ = action_probs_[i,:]*1. - elif combine_type == 'add': - distr_ = optimal_action_[i,:]*sample_gt_prob_ + \ - (1.-sample_gt_prob_)*action_probs_[i,:] - distr_ = distr_ / np.sum(distr_) - - if type == 'sample': - action[i] = np.argmax(rng.multinomial(1, distr_, size=1)) - elif type == 'argmax': - action[i] = np.argmax(distr_) - action_sample_wt[i] = action_probs_[i, action[i]] / distr_[action[i]] - return action, action_sample_wt - -def train_step_custom_online_sampling(sess, train_op, global_step, - train_step_kwargs, mode='train'): - m = train_step_kwargs['m'] - obj = train_step_kwargs['obj'] - rng_data = train_step_kwargs['rng_data'] - rng_action = train_step_kwargs['rng_action'] - writer = train_step_kwargs['writer'] - iters = train_step_kwargs['iters'] - num_steps = train_step_kwargs['num_steps'] - logdir = train_step_kwargs['logdir'] - dagger_sample_bn_false = train_step_kwargs['dagger_sample_bn_false'] - train_display_interval = train_step_kwargs['train_display_interval'] - if 'outputs' not in m.train_ops: - m.train_ops['outputs'] = [] - - s_ops = m.summary_ops[mode] - val_additional_ops = [] - - # Print all variables here. - if False: - v = tf.get_collection(tf.GraphKeys.VARIABLES) - v_op = [_.value() for _ in v] - v_op_value = sess.run(v_op) - - filter = lambda x, y: 'Adam' in x.name - # filter = lambda x, y: np.is_any_nan(y) - ind = [i for i, (_, __) in enumerate(zip(v, v_op_value)) if filter(_, __)] - v = [v[i] for i in ind] - v_op_value = [v_op_value[i] for i in ind] - - for i in range(len(v)): - logging.info('XXXX: variable: %30s, is_any_nan: %5s, norm: %f.', - v[i].name, np.any(np.isnan(v_op_value[i])), - np.linalg.norm(v_op_value[i])) - - tt = utils.Timer() - for i in range(iters): - tt.tic() - # Sample a room. - e = obj.sample_env(rng_data) - - # Initialize the agent. - init_env_state = e.reset(rng_data) - - # Get and process the common data. - input = e.get_common_data() - input = e.pre_common_data(input) - feed_dict = prepare_feed_dict(m.input_tensors['common'], input) - if dagger_sample_bn_false: - feed_dict[m.train_ops['batch_norm_is_training_op']] = False - common_data = sess.run(m.train_ops['common'], feed_dict=feed_dict) - - states = [] - state_features = [] - state_targets = [] - net_state_to_input = [] - step_data_cache = [] - executed_actions = [] - rewards = [] - action_sample_wts = [] - states.append(init_env_state) - - net_state = sess.run(m.train_ops['init_state'], feed_dict=feed_dict) - net_state = dict(zip(m.train_ops['state_names'], net_state)) - net_state_to_input.append(net_state) - for j in range(num_steps): - f = e.get_features(states[j], j) - f = e.pre_features(f) - f.update(net_state) - f['step_number'] = np.ones((1,1,1), dtype=np.int32)*j - state_features.append(f) - - feed_dict = prepare_feed_dict(m.input_tensors['step'], state_features[-1]) - optimal_action = e.get_optimal_action(states[j], j) - for x, v in zip(m.train_ops['common'], common_data): - feed_dict[x] = v - if dagger_sample_bn_false: - feed_dict[m.train_ops['batch_norm_is_training_op']] = False - outs = sess.run([m.train_ops['step'], m.sample_gt_prob_op, - m.train_ops['step_data_cache'], - m.train_ops['updated_state'], - m.train_ops['outputs']], feed_dict=feed_dict) - action_probs = outs[0] - sample_gt_prob = outs[1] - step_data_cache.append(dict(zip(m.train_ops['step_data_cache'], outs[2]))) - net_state = outs[3] - if hasattr(e, 'update_state'): - outputs = outs[4] - outputs = dict(zip(m.train_ops['output_names'], outputs)) - e.update_state(outputs, j) - state_targets.append(e.get_targets(states[j], j)) - - if j < num_steps-1: - # Sample from action_probs and optimal action. - action, action_sample_wt = sample_action( - rng_action, action_probs, optimal_action, sample_gt_prob, - m.sample_action_type, m.sample_action_combine_type) - next_state, reward = e.take_action(states[j], action, j) - executed_actions.append(action) - states.append(next_state) - rewards.append(reward) - action_sample_wts.append(action_sample_wt) - net_state = dict(zip(m.train_ops['state_names'], net_state)) - net_state_to_input.append(net_state) - - # Concatenate things together for training. - rewards = np.array(rewards).T - action_sample_wts = np.array(action_sample_wts).T - executed_actions = np.array(executed_actions).T - all_state_targets = concat_state_x(state_targets, e.get_targets_name()) - all_state_features = concat_state_x(state_features, - e.get_features_name()+['step_number']) - # all_state_net = concat_state_x(net_state_to_input, - # m.train_ops['state_names']) - all_step_data_cache = concat_state_x(step_data_cache, - m.train_ops['step_data_cache']) - - dict_train = dict(input) - dict_train.update(all_state_features) - dict_train.update(all_state_targets) - # dict_train.update(all_state_net) - dict_train.update(net_state_to_input[0]) - dict_train.update(all_step_data_cache) - dict_train.update({'rewards': rewards, - 'action_sample_wts': action_sample_wts, - 'executed_actions': executed_actions}) - feed_dict = prepare_feed_dict(m.input_tensors['train'], dict_train) - for x in m.train_ops['step_data_cache']: - feed_dict[x] = all_step_data_cache[x] - if mode == 'train': - n_step = sess.run(global_step) - - if np.mod(n_step, train_display_interval) == 0: - total_loss, np_global_step, summary, print_summary = sess.run( - [train_op, global_step, s_ops.summary_ops, s_ops.print_summary_ops], - feed_dict=feed_dict) - logging.error("") - else: - total_loss, np_global_step, summary = sess.run( - [train_op, global_step, s_ops.summary_ops], feed_dict=feed_dict) - - if writer is not None and summary is not None: - writer.add_summary(summary, np_global_step) - - should_stop = sess.run(m.should_stop_op) - - if mode != 'train': - arop = [[] for j in range(len(s_ops.additional_return_ops))] - for j in range(len(s_ops.additional_return_ops)): - if s_ops.arop_summary_iters[j] < 0 or i < s_ops.arop_summary_iters[j]: - arop[j] = s_ops.additional_return_ops[j] - val = sess.run(arop, feed_dict=feed_dict) - val_additional_ops.append(val) - tt.toc(log_at=60, log_str='val timer {:d} / {:d}: '.format(i, iters), - type='time') - - if mode != 'train': - # Write the default val summaries. - summary, print_summary, np_global_step = sess.run( - [s_ops.summary_ops, s_ops.print_summary_ops, global_step]) - if writer is not None and summary is not None: - writer.add_summary(summary, np_global_step) - - # write custom validation ops - val_summarys = [] - val_additional_ops = zip(*val_additional_ops) - if len(s_ops.arop_eval_fns) > 0: - val_metric_summary = tf.summary.Summary() - for i in range(len(s_ops.arop_eval_fns)): - val_summary = None - if s_ops.arop_eval_fns[i] is not None: - val_summary = s_ops.arop_eval_fns[i](val_additional_ops[i], - np_global_step, logdir, - val_metric_summary, - s_ops.arop_summary_iters[i]) - val_summarys.append(val_summary) - if writer is not None: - writer.add_summary(val_metric_summary, np_global_step) - - # Return the additional val_ops - total_loss = (val_additional_ops, val_summarys) - should_stop = None - - return total_loss, should_stop - -def train_step_custom_v2(sess, train_op, global_step, train_step_kwargs, - mode='train'): - m = train_step_kwargs['m'] - obj = train_step_kwargs['obj'] - rng = train_step_kwargs['rng'] - writer = train_step_kwargs['writer'] - iters = train_step_kwargs['iters'] - logdir = train_step_kwargs['logdir'] - train_display_interval = train_step_kwargs['train_display_interval'] - - s_ops = m.summary_ops[mode] - val_additional_ops = [] - - # Print all variables here. - if False: - v = tf.get_collection(tf.GraphKeys.VARIABLES) - v_op = [_.value() for _ in v] - v_op_value = sess.run(v_op) - - filter = lambda x, y: 'Adam' in x.name - # filter = lambda x, y: np.is_any_nan(y) - ind = [i for i, (_, __) in enumerate(zip(v, v_op_value)) if filter(_, __)] - v = [v[i] for i in ind] - v_op_value = [v_op_value[i] for i in ind] - - for i in range(len(v)): - logging.info('XXXX: variable: %30s, is_any_nan: %5s, norm: %f.', - v[i].name, np.any(np.isnan(v_op_value[i])), - np.linalg.norm(v_op_value[i])) - - tt = utils.Timer() - for i in range(iters): - tt.tic() - e = obj.sample_env(rng) - rngs = e.gen_rng(rng) - input_data = e.gen_data(*rngs) - input_data = e.pre_data(input_data) - feed_dict = prepare_feed_dict(m.input_tensors, input_data) - - if mode == 'train': - n_step = sess.run(global_step) - - if np.mod(n_step, train_display_interval) == 0: - total_loss, np_global_step, summary, print_summary = sess.run( - [train_op, global_step, s_ops.summary_ops, s_ops.print_summary_ops], - feed_dict=feed_dict) - else: - total_loss, np_global_step, summary = sess.run( - [train_op, global_step, s_ops.summary_ops], - feed_dict=feed_dict) - - if writer is not None and summary is not None: - writer.add_summary(summary, np_global_step) - - should_stop = sess.run(m.should_stop_op) - - if mode != 'train': - arop = [[] for j in range(len(s_ops.additional_return_ops))] - for j in range(len(s_ops.additional_return_ops)): - if s_ops.arop_summary_iters[j] < 0 or i < s_ops.arop_summary_iters[j]: - arop[j] = s_ops.additional_return_ops[j] - val = sess.run(arop, feed_dict=feed_dict) - val_additional_ops.append(val) - tt.toc(log_at=60, log_str='val timer {:d} / {:d}: '.format(i, iters), - type='time') - - if mode != 'train': - # Write the default val summaries. - summary, print_summary, np_global_step = sess.run( - [s_ops.summary_ops, s_ops.print_summary_ops, global_step]) - if writer is not None and summary is not None: - writer.add_summary(summary, np_global_step) - - # write custom validation ops - val_summarys = [] - val_additional_ops = zip(*val_additional_ops) - if len(s_ops.arop_eval_fns) > 0: - val_metric_summary = tf.summary.Summary() - for i in range(len(s_ops.arop_eval_fns)): - val_summary = None - if s_ops.arop_eval_fns[i] is not None: - val_summary = s_ops.arop_eval_fns[i](val_additional_ops[i], - np_global_step, logdir, - val_metric_summary, - s_ops.arop_summary_iters[i]) - val_summarys.append(val_summary) - if writer is not None: - writer.add_summary(val_metric_summary, np_global_step) - - # Return the additional val_ops - total_loss = (val_additional_ops, val_summarys) - should_stop = None - - return total_loss, should_stop - -def train_step_custom(sess, train_op, global_step, train_step_kwargs, - mode='train'): - m = train_step_kwargs['m'] - params = train_step_kwargs['params'] - rng = train_step_kwargs['rng'] - writer = train_step_kwargs['writer'] - iters = train_step_kwargs['iters'] - gen_rng = train_step_kwargs['gen_rng'] - logdir = train_step_kwargs['logdir'] - gen_data = train_step_kwargs['gen_data'] - pre_data = train_step_kwargs['pre_data'] - train_display_interval = train_step_kwargs['train_display_interval'] - - val_additional_ops = [] - # Print all variables here. - if False: - v = tf.get_collection(tf.GraphKeys.VARIABLES) - for _ in v: - val = sess.run(_.value()) - logging.info('variable: %30s, is_any_nan: %5s, norm: %f.', _.name, - np.any(np.isnan(val)), np.linalg.norm(val)) - - for i in range(iters): - rngs = gen_rng(params, rng) - input_data = gen_data(params, *rngs) - input_data = pre_data(params, input_data) - feed_dict = prepare_feed_dict(m.input_tensors, input_data) - - if mode == 'train': - n_step = sess.run(global_step) - - if np.mod(n_step, train_display_interval) == 0: - total_loss, np_global_step, summary, print_summary = sess.run( - [train_op, global_step, m.summary_op[mode], m.print_summary_op[mode]], - feed_dict=feed_dict) - else: - total_loss, np_global_step, summary = sess.run( - [train_op, global_step, m.summary_op[mode]], - feed_dict=feed_dict) - - if writer is not None: - writer.add_summary(summary, np_global_step) - - should_stop = sess.run(m.should_stop_op) - - if mode == 'val': - val = sess.run(m.agg_update_op[mode] + m.additional_return_op[mode], - feed_dict=feed_dict) - val_additional_ops.append(val[len(m.agg_update_op[mode]):]) - - if mode == 'val': - summary, print_summary, np_global_step = sess.run( - [m.summary_op[mode], m.print_summary_op[mode], global_step]) - if writer is not None: - writer.add_summary(summary, np_global_step) - sess.run([m.agg_reset_op[mode]]) - - # write custom validation ops - if m.eval_metrics_fn[mode] is not None: - val_metric_summary = m.eval_metrics_fn[mode](val_additional_ops, - np_global_step, logdir) - if writer is not None: - writer.add_summary(val_metric_summary, np_global_step) - - total_loss = val_additional_ops - should_stop = None - - return total_loss, should_stop - -def setup_training(loss_op, initial_learning_rate, steps_per_decay, - learning_rate_decay, momentum, max_steps, - sync=False, adjust_lr_sync=True, - num_workers=1, replica_id=0, vars_to_optimize=None, - clip_gradient_norm=0, typ=None, momentum2=0.999, - adam_eps=1e-8): - if sync and adjust_lr_sync: - initial_learning_rate = initial_learning_rate * num_workers - max_steps = np.int(max_steps / num_workers) - steps_per_decay = np.int(steps_per_decay / num_workers) - - global_step_op = slim.get_or_create_global_step() - lr_op = tf.train.exponential_decay(initial_learning_rate, - global_step_op, steps_per_decay, learning_rate_decay, staircase=True) - if typ == 'sgd': - optimizer = tf.train.MomentumOptimizer(lr_op, momentum) - elif typ == 'adam': - optimizer = tf.train.AdamOptimizer(learning_rate=lr_op, beta1=momentum, - beta2=momentum2, epsilon=adam_eps) - - if sync: - - sync_optimizer = tf.train.SyncReplicasOptimizer(optimizer, - replicas_to_aggregate=num_workers, - replica_id=replica_id, - total_num_replicas=num_workers) - train_op = slim.learning.create_train_op(loss_op, sync_optimizer, - variables_to_train=vars_to_optimize, - clip_gradient_norm=clip_gradient_norm) - else: - sync_optimizer = None - train_op = slim.learning.create_train_op(loss_op, optimizer, - variables_to_train=vars_to_optimize, - clip_gradient_norm=clip_gradient_norm) - should_stop_op = tf.greater_equal(global_step_op, max_steps) - return lr_op, global_step_op, train_op, should_stop_op, optimizer, sync_optimizer - -def add_value_to_summary(metric_summary, tag, val, log=True, tag_str=None): - """Adds a scalar summary to the summary object. Optionally also logs to - logging.""" - new_value = metric_summary.value.add(); - new_value.tag = tag - new_value.simple_value = val - if log: - if tag_str is None: - tag_str = tag + '%f' - logging.info(tag_str, val) - -def add_scalar_summary_op(tensor, name=None, - summary_key='summaries', print_summary_key='print_summaries', prefix=''): - collections = [] - op = tf.summary.scalar(name, tensor, collections=collections) - if summary_key != print_summary_key: - tf.add_to_collection(summary_key, op) - - op = tf.Print(op, [tensor], ' {:-<25s}: '.format(name) + prefix) - tf.add_to_collection(print_summary_key, op) - return op - -def setup_inputs(inputs): - input_tensors = {} - input_shapes = {} - for (name, typ, sz) in inputs: - _ = tf.placeholder(typ, shape=sz, name=name) - input_tensors[name] = _ - input_shapes[name] = sz - return input_tensors, input_shapes - -def prepare_feed_dict(input_tensors, inputs): - feed_dict = {} - for n in input_tensors.keys(): - feed_dict[input_tensors[n]] = inputs[n].astype(input_tensors[n].dtype.as_numpy_dtype) - return feed_dict - -def simple_add_summaries(summarize_ops, summarize_names, - summary_key='summaries', - print_summary_key='print_summaries', prefix=''): - for op, name, in zip(summarize_ops, summarize_names): - add_scalar_summary_op(op, name, summary_key, print_summary_key, prefix) - - summary_op = tf.summary.merge_all(summary_key) - print_summary_op = tf.summary.merge_all(print_summary_key) - return summary_op, print_summary_op - -def add_summary_ops(m, summarize_ops, summarize_names, to_aggregate=None, - summary_key='summaries', - print_summary_key='print_summaries', prefix=''): - if type(to_aggregate) != list: - to_aggregate = [to_aggregate for _ in summarize_ops] - - # set up aggregating metrics - if np.any(to_aggregate): - agg_ops = [] - for op, name, to_agg in zip(summarize_ops, summarize_names, to_aggregate): - if to_agg: - # agg_ops.append(slim.metrics.streaming_mean(op, return_reset_op=True)) - agg_ops.append(tf.contrib.metrics.streaming_mean(op)) - # agg_ops.append(tf.contrib.metrics.streaming_mean(op, return_reset_op=True)) - else: - agg_ops.append([None, None, None]) - - # agg_values_op, agg_update_op, agg_reset_op = zip(*agg_ops) - # agg_update_op = [x for x in agg_update_op if x is not None] - # agg_reset_op = [x for x in agg_reset_op if x is not None] - agg_values_op, agg_update_op = zip(*agg_ops) - agg_update_op = [x for x in agg_update_op if x is not None] - agg_reset_op = [tf.no_op()] - else: - agg_values_op = [None for _ in to_aggregate] - agg_update_op = [tf.no_op()] - agg_reset_op = [tf.no_op()] - - for op, name, to_agg, agg_op in zip(summarize_ops, summarize_names, to_aggregate, agg_values_op): - if to_agg: - add_scalar_summary_op(agg_op, name, summary_key, print_summary_key, prefix) - else: - add_scalar_summary_op(op, name, summary_key, print_summary_key, prefix) - - summary_op = tf.summary.merge_all(summary_key) - print_summary_op = tf.summary.merge_all(print_summary_key) - return summary_op, print_summary_op, agg_update_op, agg_reset_op - - - -def accum_val_ops(outputs, names, global_step, output_dir, metric_summary, N): - """Processes the collected outputs to compute AP for action prediction. - - Args: - outputs : List of scalar ops to summarize. - names : Name of the scalar ops. - global_step : global_step. - output_dir : where to store results. - metric_summary : summary object to add summaries to. - N : number of outputs to process. - """ - outs = [] - if N >= 0: - outputs = outputs[:N] - for i in range(len(outputs[0])): - scalar = np.array(map(lambda x: x[i], outputs)) - assert(scalar.ndim == 1) - add_value_to_summary(metric_summary, names[i], np.mean(scalar), - tag_str='{:>27s}: [{:s}]: %f'.format(names[i], '')) - outs.append(np.mean(scalar)) - return outs - -def get_default_summary_ops(): - return utils.Foo(summary_ops=None, print_summary_ops=None, - additional_return_ops=[], arop_summary_iters=[], - arop_eval_fns=[]) - - -def simple_summaries(summarize_ops, summarize_names, mode, to_aggregate=False, - scope_name='summary'): - - if type(to_aggregate) != list: - to_aggregate = [to_aggregate for _ in summarize_ops] - - summary_key = '{:s}_summaries'.format(mode) - print_summary_key = '{:s}_print_summaries'.format(mode) - prefix=' [{:s}]: '.format(mode) - - # Default ops for things that dont need to be aggregated. - if not np.all(to_aggregate): - for op, name, to_agg in zip(summarize_ops, summarize_names, to_aggregate): - if not to_agg: - add_scalar_summary_op(op, name, summary_key, print_summary_key, prefix) - summary_ops = tf.summary.merge_all(summary_key) - print_summary_ops = tf.summary.merge_all(print_summary_key) - else: - summary_ops = tf.no_op() - print_summary_ops = tf.no_op() - - # Default ops for things that dont need to be aggregated. - if np.any(to_aggregate): - additional_return_ops = [[summarize_ops[i] - for i, x in enumerate(to_aggregate )if x]] - arop_summary_iters = [-1] - s_names = ['{:s}/{:s}'.format(scope_name, summarize_names[i]) - for i, x in enumerate(to_aggregate) if x] - fn = lambda outputs, global_step, output_dir, metric_summary, N: \ - accum_val_ops(outputs, s_names, global_step, output_dir, metric_summary, - N) - arop_eval_fns = [fn] - else: - additional_return_ops = [] - arop_summary_iters = [] - arop_eval_fns = [] - return summary_ops, print_summary_ops, additional_return_ops, \ - arop_summary_iters, arop_eval_fns diff --git a/research/cognitive_mapping_and_planning/tfcode/vision_baseline_lstm.py b/research/cognitive_mapping_and_planning/tfcode/vision_baseline_lstm.py deleted file mode 100644 index ccf3ab23b..000000000 --- a/research/cognitive_mapping_and_planning/tfcode/vision_baseline_lstm.py +++ /dev/null @@ -1,533 +0,0 @@ -# Copyright 2016 The TensorFlow Authors All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -import numpy as np - - -import tensorflow as tf - -from tensorflow.contrib import slim - -import logging -from tensorflow.python.platform import app -from tensorflow.python.platform import flags -from src import utils -import src.file_utils as fu -import tfcode.nav_utils as nu -from tfcode import tf_utils - -setup_train_step_kwargs = nu.default_train_step_kwargs -compute_losses_multi_or = nu.compute_losses_multi_or -get_repr_from_image = nu.get_repr_from_image - -_save_d_at_t = nu.save_d_at_t -_save_all = nu.save_all -_eval_ap = nu.eval_ap -_eval_dist = nu.eval_dist -_plot_trajectories = nu.plot_trajectories - -def lstm_online(cell_fn, num_steps, inputs, state, varscope): - # inputs is B x num_steps x C, C channels. - # state is 2 tuple with B x 1 x C1, B x 1 x C2 - # Output state is always B x 1 x C - inputs = tf.unstack(inputs, axis=1, num=num_steps) - state = tf.unstack(state, axis=1, num=1)[0] - outputs = [] - - if num_steps > 1: - varscope.reuse_variables() - - for s in range(num_steps): - output, state = cell_fn(inputs[s], state) - outputs.append(output) - outputs = tf.stack(outputs, axis=1) - state = tf.stack([state], axis=1) - return outputs, state - -def _inputs(problem, lstm_states, lstm_state_dims): - # Set up inputs. - with tf.name_scope('inputs'): - n_views = problem.n_views - - inputs = [] - inputs.append(('orig_maps', tf.float32, - (problem.batch_size, 1, None, None, 1))) - inputs.append(('goal_loc', tf.float32, - (problem.batch_size, problem.num_goals, 2))) - - # For initing LSTM. - inputs.append(('rel_goal_loc_at_start', tf.float32, - (problem.batch_size, problem.num_goals, - problem.rel_goal_loc_dim))) - common_input_data, _ = tf_utils.setup_inputs(inputs) - - inputs = [] - inputs.append(('imgs', tf.float32, (problem.batch_size, None, n_views, - problem.img_height, problem.img_width, - problem.img_channels))) - # Goal location as a tuple of delta location and delta theta. - inputs.append(('rel_goal_loc', tf.float32, (problem.batch_size, None, - problem.rel_goal_loc_dim))) - if problem.outputs.visit_count: - inputs.append(('visit_count', tf.int32, (problem.batch_size, None, 1))) - inputs.append(('last_visit', tf.int32, (problem.batch_size, None, 1))) - - for i, (state, dim) in enumerate(zip(lstm_states, lstm_state_dims)): - inputs.append((state, tf.float32, (problem.batch_size, 1, dim))) - - if problem.outputs.egomotion: - inputs.append(('incremental_locs', tf.float32, - (problem.batch_size, None, 2))) - inputs.append(('incremental_thetas', tf.float32, - (problem.batch_size, None, 1))) - - inputs.append(('step_number', tf.int32, (1, None, 1))) - inputs.append(('node_ids', tf.int32, (problem.batch_size, None, - problem.node_ids_dim))) - inputs.append(('perturbs', tf.float32, (problem.batch_size, None, - problem.perturbs_dim))) - - # For plotting result plots - inputs.append(('loc_on_map', tf.float32, (problem.batch_size, None, 2))) - inputs.append(('gt_dist_to_goal', tf.float32, (problem.batch_size, None, 1))) - step_input_data, _ = tf_utils.setup_inputs(inputs) - - inputs = [] - inputs.append(('executed_actions', tf.int32, (problem.batch_size, None))) - inputs.append(('rewards', tf.float32, (problem.batch_size, None))) - inputs.append(('action_sample_wts', tf.float32, (problem.batch_size, None))) - inputs.append(('action', tf.int32, (problem.batch_size, None, - problem.num_actions))) - train_data, _ = tf_utils.setup_inputs(inputs) - train_data.update(step_input_data) - train_data.update(common_input_data) - return common_input_data, step_input_data, train_data - - -def _add_summaries(m, summary_mode, arop_full_summary_iters): - summarize_ops = [m.lr_op, m.global_step_op, m.sample_gt_prob_op, - m.total_loss_op, m.data_loss_op, m.reg_loss_op] + m.acc_ops - summarize_names = ['lr', 'global_step', 'sample_gt_prob_op', 'total_loss', - 'data_loss', 'reg_loss'] + \ - ['acc_{:d}'.format(i) for i in range(len(m.acc_ops))] - to_aggregate = [0, 0, 0, 1, 1, 1] + [1]*len(m.acc_ops) - - scope_name = 'summary' - with tf.name_scope(scope_name): - s_ops = nu.add_default_summaries(summary_mode, arop_full_summary_iters, - summarize_ops, summarize_names, - to_aggregate, m.action_prob_op, - m.input_tensors, scope_name=scope_name) - m.summary_ops = {summary_mode: s_ops} - -def visit_count_fc(visit_count, last_visit, embed_neurons, wt_decay, fc_dropout): - with tf.variable_scope('embed_visit_count'): - visit_count = tf.reshape(visit_count, shape=[-1]) - last_visit = tf.reshape(last_visit, shape=[-1]) - - visit_count = tf.clip_by_value(visit_count, clip_value_min=-1, - clip_value_max=15) - last_visit = tf.clip_by_value(last_visit, clip_value_min=-1, - clip_value_max=15) - visit_count = tf.one_hot(visit_count, depth=16, axis=1, dtype=tf.float32, - on_value=10., off_value=0.) - last_visit = tf.one_hot(last_visit, depth=16, axis=1, dtype=tf.float32, - on_value=10., off_value=0.) - f = tf.concat([visit_count, last_visit], 1) - x, _ = tf_utils.fc_network( - f, neurons=embed_neurons, wt_decay=wt_decay, name='visit_count_embed', - offset=0, batch_norm_param=None, dropout_ratio=fc_dropout, - is_training=is_training) - return x - -def lstm_setup(name, x, batch_size, is_single_step, lstm_dim, lstm_out, - num_steps, state_input_op): - # returns state_name, state_init_op, updated_state_op, out_op - with tf.name_scope('reshape_'+name): - sh = x.get_shape().as_list() - x = tf.reshape(x, shape=[batch_size, -1, sh[-1]]) - - with tf.variable_scope(name) as varscope: - cell = tf.contrib.rnn.LSTMCell( - num_units=lstm_dim, forget_bias=1.0, state_is_tuple=False, - num_proj=lstm_out, use_peepholes=True, - initializer=tf.random_uniform_initializer(-0.01, 0.01, seed=0), - cell_clip=None, proj_clip=None) - - sh = [batch_size, 1, lstm_dim+lstm_out] - state_init_op = tf.constant(0., dtype=tf.float32, shape=sh) - - fn = lambda ns: lstm_online(cell, ns, x, state_input_op, varscope) - out_op, updated_state_op = tf.cond(is_single_step, lambda: fn(1), lambda: - fn(num_steps)) - - return name, state_init_op, updated_state_op, out_op - -def combine_setup(name, combine_type, embed_img, embed_goal, num_img_neuorons=None, - num_goal_neurons=None): - with tf.name_scope(name + '_' + combine_type): - if combine_type == 'add': - # Simple concat features from goal and image - out = embed_img + embed_goal - - elif combine_type == 'multiply': - # Multiply things together - re_embed_img = tf.reshape( - embed_img, shape=[-1, num_img_neuorons / num_goal_neurons, - num_goal_neurons]) - re_embed_goal = tf.reshape(embed_goal, shape=[-1, num_goal_neurons, 1]) - x = tf.matmul(re_embed_img, re_embed_goal, transpose_a=False, transpose_b=False) - out = slim.flatten(x) - elif combine_type == 'none' or combine_type == 'imgonly': - out = embed_img - elif combine_type == 'goalonly': - out = embed_goal - else: - logging.fatal('Undefined combine_type: %s', combine_type) - return out - - -def preprocess_egomotion(locs, thetas): - with tf.name_scope('pre_ego'): - pre_ego = tf.concat([locs, tf.sin(thetas), tf.cos(thetas)], 2) - sh = pre_ego.get_shape().as_list() - pre_ego = tf.reshape(pre_ego, [-1, sh[-1]]) - return pre_ego - -def setup_to_run(m, args, is_training, batch_norm_is_training, summary_mode): - # Set up the model. - tf.set_random_seed(args.solver.seed) - task_params = args.navtask.task_params - num_steps = task_params.num_steps - num_goals = task_params.num_goals - num_actions = task_params.num_actions - num_actions_ = num_actions - - n_views = task_params.n_views - - batch_norm_is_training_op = \ - tf.placeholder_with_default(batch_norm_is_training, shape=[], - name='batch_norm_is_training_op') - # Setup the inputs - m.input_tensors = {} - lstm_states = []; lstm_state_dims = []; - state_names = []; updated_state_ops = []; init_state_ops = []; - if args.arch.lstm_output: - lstm_states += ['lstm_output'] - lstm_state_dims += [args.arch.lstm_output_dim+task_params.num_actions] - if args.arch.lstm_ego: - lstm_states += ['lstm_ego'] - lstm_state_dims += [args.arch.lstm_ego_dim + args.arch.lstm_ego_out] - lstm_states += ['lstm_img'] - lstm_state_dims += [args.arch.lstm_img_dim + args.arch.lstm_img_out] - elif args.arch.lstm_img: - # An LSTM only on the image - lstm_states += ['lstm_img'] - lstm_state_dims += [args.arch.lstm_img_dim + args.arch.lstm_img_out] - else: - # No LSTMs involved here. - None - - m.input_tensors['common'], m.input_tensors['step'], m.input_tensors['train'] = \ - _inputs(task_params, lstm_states, lstm_state_dims) - - with tf.name_scope('check_size'): - is_single_step = tf.equal(tf.unstack(tf.shape(m.input_tensors['step']['imgs']), - num=6)[1], 1) - - images_reshaped = tf.reshape(m.input_tensors['step']['imgs'], - shape=[-1, task_params.img_height, task_params.img_width, - task_params.img_channels], name='re_image') - - rel_goal_loc_reshaped = tf.reshape(m.input_tensors['step']['rel_goal_loc'], - shape=[-1, task_params.rel_goal_loc_dim], name='re_rel_goal_loc') - - x, vars_ = get_repr_from_image( - images_reshaped, task_params.modalities, task_params.data_augment, - args.arch.encoder, args.solver.freeze_conv, args.solver.wt_decay, - is_training) - - # Reshape into nice things so that these can be accumulated over time steps - # for faster backprop. - sh_before = x.get_shape().as_list() - m.encoder_output = tf.reshape( - x, shape=[task_params.batch_size, -1, n_views] + sh_before[1:]) - x = tf.reshape(m.encoder_output, shape=[-1] + sh_before[1:]) - - # Add a layer to reduce dimensions for a fc layer. - if args.arch.dim_reduce_neurons > 0: - ks = 1; neurons = args.arch.dim_reduce_neurons; - init_var = np.sqrt(2.0/(ks**2)/neurons) - batch_norm_param = args.arch.batch_norm_param - batch_norm_param['is_training'] = batch_norm_is_training_op - m.conv_feat = slim.conv2d( - x, neurons, kernel_size=ks, stride=1, normalizer_fn=slim.batch_norm, - normalizer_params=batch_norm_param, padding='SAME', scope='dim_reduce', - weights_regularizer=slim.l2_regularizer(args.solver.wt_decay), - weights_initializer=tf.random_normal_initializer(stddev=init_var)) - reshape_conv_feat = slim.flatten(m.conv_feat) - sh = reshape_conv_feat.get_shape().as_list() - m.reshape_conv_feat = tf.reshape(reshape_conv_feat, - shape=[-1, sh[1]*n_views]) - - # Restore these from a checkpoint. - if args.solver.pretrained_path is not None: - m.init_fn = slim.assign_from_checkpoint_fn(args.solver.pretrained_path, - vars_) - else: - m.init_fn = None - - # Hit the goal_location with a bunch of fully connected layers, to embed it - # into some space. - with tf.variable_scope('embed_goal'): - batch_norm_param = args.arch.batch_norm_param - batch_norm_param['is_training'] = batch_norm_is_training_op - m.embed_goal, _ = tf_utils.fc_network( - rel_goal_loc_reshaped, neurons=args.arch.goal_embed_neurons, - wt_decay=args.solver.wt_decay, name='goal_embed', offset=0, - batch_norm_param=batch_norm_param, dropout_ratio=args.arch.fc_dropout, - is_training=is_training) - - if args.arch.embed_goal_for_state: - with tf.variable_scope('embed_goal_for_state'): - batch_norm_param = args.arch.batch_norm_param - batch_norm_param['is_training'] = batch_norm_is_training_op - m.embed_goal_for_state, _ = tf_utils.fc_network( - m.input_tensors['common']['rel_goal_loc_at_start'][:,0,:], - neurons=args.arch.goal_embed_neurons, wt_decay=args.solver.wt_decay, - name='goal_embed', offset=0, batch_norm_param=batch_norm_param, - dropout_ratio=args.arch.fc_dropout, is_training=is_training) - - # Hit the goal_location with a bunch of fully connected layers, to embed it - # into some space. - with tf.variable_scope('embed_img'): - batch_norm_param = args.arch.batch_norm_param - batch_norm_param['is_training'] = batch_norm_is_training_op - m.embed_img, _ = tf_utils.fc_network( - m.reshape_conv_feat, neurons=args.arch.img_embed_neurons, - wt_decay=args.solver.wt_decay, name='img_embed', offset=0, - batch_norm_param=batch_norm_param, dropout_ratio=args.arch.fc_dropout, - is_training=is_training) - - # For lstm_ego, and lstm_image, embed the ego motion, accumulate it into an - # LSTM, combine with image features and accumulate those in an LSTM. Finally - # combine what you get from the image LSTM with the goal to output an action. - if args.arch.lstm_ego: - ego_reshaped = preprocess_egomotion(m.input_tensors['step']['incremental_locs'], - m.input_tensors['step']['incremental_thetas']) - with tf.variable_scope('embed_ego'): - batch_norm_param = args.arch.batch_norm_param - batch_norm_param['is_training'] = batch_norm_is_training_op - m.embed_ego, _ = tf_utils.fc_network( - ego_reshaped, neurons=args.arch.ego_embed_neurons, - wt_decay=args.solver.wt_decay, name='ego_embed', offset=0, - batch_norm_param=batch_norm_param, dropout_ratio=args.arch.fc_dropout, - is_training=is_training) - - state_name, state_init_op, updated_state_op, out_op = lstm_setup( - 'lstm_ego', m.embed_ego, task_params.batch_size, is_single_step, - args.arch.lstm_ego_dim, args.arch.lstm_ego_out, num_steps*num_goals, - m.input_tensors['step']['lstm_ego']) - state_names += [state_name] - init_state_ops += [state_init_op] - updated_state_ops += [updated_state_op] - - # Combine the output with the vision features. - m.img_ego_op = combine_setup('img_ego', args.arch.combine_type_ego, - m.embed_img, out_op, - args.arch.img_embed_neurons[-1], - args.arch.lstm_ego_out) - - # LSTM on these vision features. - state_name, state_init_op, updated_state_op, out_op = lstm_setup( - 'lstm_img', m.img_ego_op, task_params.batch_size, is_single_step, - args.arch.lstm_img_dim, args.arch.lstm_img_out, num_steps*num_goals, - m.input_tensors['step']['lstm_img']) - state_names += [state_name] - init_state_ops += [state_init_op] - updated_state_ops += [updated_state_op] - - m.img_for_goal = out_op - num_img_for_goal_neurons = args.arch.lstm_img_out - - elif args.arch.lstm_img: - # LSTM on just the image features. - state_name, state_init_op, updated_state_op, out_op = lstm_setup( - 'lstm_img', m.embed_img, task_params.batch_size, is_single_step, - args.arch.lstm_img_dim, args.arch.lstm_img_out, num_steps*num_goals, - m.input_tensors['step']['lstm_img']) - state_names += [state_name] - init_state_ops += [state_init_op] - updated_state_ops += [updated_state_op] - m.img_for_goal = out_op - num_img_for_goal_neurons = args.arch.lstm_img_out - - else: - m.img_for_goal = m.embed_img - num_img_for_goal_neurons = args.arch.img_embed_neurons[-1] - - - if args.arch.use_visit_count: - m.embed_visit_count = visit_count_fc( - m.input_tensors['step']['visit_count'], - m.input_tensors['step']['last_visit'], args.arch.goal_embed_neurons, - args.solver.wt_decay, args.arch.fc_dropout, is_training=is_training) - m.embed_goal = m.embed_goal + m.embed_visit_count - - m.combined_f = combine_setup('img_goal', args.arch.combine_type, - m.img_for_goal, m.embed_goal, - num_img_for_goal_neurons, - args.arch.goal_embed_neurons[-1]) - - # LSTM on the combined representation. - if args.arch.lstm_output: - name = 'lstm_output' - # A few fully connected layers here. - with tf.variable_scope('action_pred'): - batch_norm_param = args.arch.batch_norm_param - batch_norm_param['is_training'] = batch_norm_is_training_op - x, _ = tf_utils.fc_network( - m.combined_f, neurons=args.arch.pred_neurons, - wt_decay=args.solver.wt_decay, name='pred', offset=0, - batch_norm_param=batch_norm_param, dropout_ratio=args.arch.fc_dropout) - - if args.arch.lstm_output_init_state_from_goal: - # Use the goal embedding to initialize the LSTM state. - # UGLY CLUGGY HACK: if this is doing computation for a single time step - # then this will not involve back prop, so we can use the state input from - # the feed dict, otherwise we compute the state representation from the - # goal and feed that in. Necessary for using goal location to generate the - # state representation. - m.embed_goal_for_state = tf.expand_dims(m.embed_goal_for_state, dim=1) - state_op = tf.cond(is_single_step, lambda: m.input_tensors['step'][name], - lambda: m.embed_goal_for_state) - state_name, state_init_op, updated_state_op, out_op = lstm_setup( - name, x, task_params.batch_size, is_single_step, - args.arch.lstm_output_dim, - num_actions_, - num_steps*num_goals, state_op) - init_state_ops += [m.embed_goal_for_state] - else: - state_op = m.input_tensors['step'][name] - state_name, state_init_op, updated_state_op, out_op = lstm_setup( - name, x, task_params.batch_size, is_single_step, - args.arch.lstm_output_dim, - num_actions_, num_steps*num_goals, state_op) - init_state_ops += [state_init_op] - - state_names += [state_name] - updated_state_ops += [updated_state_op] - - out_op = tf.reshape(out_op, shape=[-1, num_actions_]) - if num_actions_ > num_actions: - m.action_logits_op = out_op[:,:num_actions] - m.baseline_op = out_op[:,num_actions:] - else: - m.action_logits_op = out_op - m.baseline_op = None - m.action_prob_op = tf.nn.softmax(m.action_logits_op) - - else: - # A few fully connected layers here. - with tf.variable_scope('action_pred'): - batch_norm_param = args.arch.batch_norm_param - batch_norm_param['is_training'] = batch_norm_is_training_op - out_op, _ = tf_utils.fc_network( - m.combined_f, neurons=args.arch.pred_neurons, - wt_decay=args.solver.wt_decay, name='pred', offset=0, - num_pred=num_actions_, - batch_norm_param=batch_norm_param, - dropout_ratio=args.arch.fc_dropout, is_training=is_training) - if num_actions_ > num_actions: - m.action_logits_op = out_op[:,:num_actions] - m.baseline_op = out_op[:,num_actions:] - else: - m.action_logits_op = out_op - m.baseline_op = None - m.action_prob_op = tf.nn.softmax(m.action_logits_op) - - m.train_ops = {} - m.train_ops['step'] = m.action_prob_op - m.train_ops['common'] = [m.input_tensors['common']['orig_maps'], - m.input_tensors['common']['goal_loc'], - m.input_tensors['common']['rel_goal_loc_at_start']] - m.train_ops['state_names'] = state_names - m.train_ops['init_state'] = init_state_ops - m.train_ops['updated_state'] = updated_state_ops - m.train_ops['batch_norm_is_training_op'] = batch_norm_is_training_op - - # Flat list of ops which cache the step data. - m.train_ops['step_data_cache'] = [tf.no_op()] - - if args.solver.freeze_conv: - m.train_ops['step_data_cache'] = [m.encoder_output] - else: - m.train_ops['step_data_cache'] = [] - - ewma_decay = 0.99 if is_training else 0.0 - weight = tf.ones_like(m.input_tensors['train']['action'], dtype=tf.float32, - name='weight') - - m.reg_loss_op, m.data_loss_op, m.total_loss_op, m.acc_ops = \ - compute_losses_multi_or( - m.action_logits_op, m.input_tensors['train']['action'], - weights=weight, num_actions=num_actions, - data_loss_wt=args.solver.data_loss_wt, - reg_loss_wt=args.solver.reg_loss_wt, ewma_decay=ewma_decay) - - - if args.solver.freeze_conv: - vars_to_optimize = list(set(tf.trainable_variables()) - set(vars_)) - else: - vars_to_optimize = None - - m.lr_op, m.global_step_op, m.train_op, m.should_stop_op, m.optimizer, \ - m.sync_optimizer = tf_utils.setup_training( - m.total_loss_op, - args.solver.initial_learning_rate, - args.solver.steps_per_decay, - args.solver.learning_rate_decay, - args.solver.momentum, - args.solver.max_steps, - args.solver.sync, - args.solver.adjust_lr_sync, - args.solver.num_workers, - args.solver.task, - vars_to_optimize=vars_to_optimize, - clip_gradient_norm=args.solver.clip_gradient_norm, - typ=args.solver.typ, momentum2=args.solver.momentum2, - adam_eps=args.solver.adam_eps) - - - if args.arch.sample_gt_prob_type == 'inverse_sigmoid_decay': - m.sample_gt_prob_op = tf_utils.inverse_sigmoid_decay(args.arch.isd_k, - m.global_step_op) - elif args.arch.sample_gt_prob_type == 'zero': - m.sample_gt_prob_op = tf.constant(-1.0, dtype=tf.float32) - elif args.arch.sample_gt_prob_type.split('_')[0] == 'step': - step = int(args.arch.sample_gt_prob_type.split('_')[1]) - m.sample_gt_prob_op = tf_utils.step_gt_prob( - step, m.input_tensors['step']['step_number'][0,0,0]) - - m.sample_action_type = args.arch.action_sample_type - m.sample_action_combine_type = args.arch.action_sample_combine_type - _add_summaries(m, summary_mode, args.summary.arop_full_summary_iters) - - m.init_op = tf.group(tf.global_variables_initializer(), - tf.local_variables_initializer()) - m.saver_op = tf.train.Saver(keep_checkpoint_every_n_hours=4, - write_version=tf.train.SaverDef.V2) - - return m diff --git a/research/compression/README.md b/research/compression/README.md deleted file mode 100644 index 7f431b5ea..000000000 --- a/research/compression/README.md +++ /dev/null @@ -1,19 +0,0 @@ -![No Maintenance Intended](https://img.shields.io/badge/No%20Maintenance%20Intended-%E2%9C%95-red.svg) -![TensorFlow Requirement: 1.x](https://img.shields.io/badge/TensorFlow%20Requirement-1.x-brightgreen) -![TensorFlow 2 Not Supported](https://img.shields.io/badge/TensorFlow%202%20Not%20Supported-%E2%9C%95-red.svg) - -# Compression with Neural Networks - -This is a [TensorFlow](http://www.tensorflow.org/) model repo containing -research on compression with neural networks. This repo currently contains -code for the following papers: - -[Full Resolution Image Compression with Recurrent Neural Networks](https://arxiv.org/abs/1608.05148) - -## Organization -[Image Encoder](image_encoder/): Encoding and decoding images into their binary representation. - -[Entropy Coder](entropy_coder/): Lossless compression of the binary representation. - -## Contact Info -Model repository maintained by Nick Johnston ([nmjohn](https://github.com/nmjohn)). diff --git a/research/compression/entropy_coder/README.md b/research/compression/entropy_coder/README.md deleted file mode 100644 index 59e889990..000000000 --- a/research/compression/entropy_coder/README.md +++ /dev/null @@ -1,109 +0,0 @@ -# Neural net based entropy coding - -This is a [TensorFlow](http://www.tensorflow.org/) model for additional -lossless compression of bitstreams generated by neural net based image -encoders as described in -[https://arxiv.org/abs/1703.10114](https://arxiv.org/abs/1703.10114). - -To be more specific, the entropy coder aims at compressing further binary -codes which have a 3D tensor structure with: - -* the first two dimensions of the tensors corresponding to the height and -the width of the binary codes, -* the last dimension being the depth of the codes. The last dimension can be -sliced into N groups of K, where each additional group is used by the image -decoder to add more details to the reconstructed image. - -The code in this directory only contains the underlying code probability model -but does not perform the actual compression using arithmetic coding. -The code probability model is enough to compute the theoretical compression -ratio. - - -## Prerequisites -The only software requirements for running the encoder and decoder is having -Tensorflow installed. - -You will also need to add the top level source directory of the entropy coder -to your `PYTHONPATH`, for example: - -`export PYTHONPATH=${PYTHONPATH}:/tmp/models/compression` - - -## Training the entropy coder - -### Synthetic dataset -If you do not have a training dataset, there is a simple code generative model -that you can use to generate a dataset and play with the entropy coder. -The generative model is located under dataset/gen\_synthetic\_dataset.py. Note -that this simple generative model is not going to give good results on real -images as it is not supposed to be close to the statistics of the binary -representation of encoded images. Consider it as a toy dataset, no more, no -less. - -To generate a synthetic dataset with 20000 samples: - -`mkdir -p /tmp/dataset` - -`python ./dataset/gen_synthetic_dataset.py --dataset_dir=/tmp/dataset/ ---count=20000` - -Note that the generator has not been optimized at all, generating the synthetic -dataset is currently pretty slow. - -### Training - -If you just want to play with the entropy coder trainer, here is the command -line that can be used to train the entropy coder on the synthetic dataset: - -`mkdir -p /tmp/entropy_coder_train` - -`python ./core/entropy_coder_train.py --task=0 ---train_dir=/tmp/entropy_coder_train/ ---model=progressive ---model_config=./configs/synthetic/model_config.json ---train_config=./configs/synthetic/train_config.json ---input_config=./configs/synthetic/input_config.json -` - -Training is configured using 3 files formatted using JSON: - -* One file is used to configure the underlying entropy coder model. - Currently, only the *progressive* model is supported. - This model takes 2 mandatory parameters and an optional one: - * `layer_depth`: the number of bits per layer (a.k.a. iteration). - Background: the image decoder takes each layer to add more detail - to the image. - * `layer_count`: the maximum number of layers that should be supported - by the model. This should be equal or greater than the maximum number - of layers in the input binary codes. - * `coded_layer_count`: This can be used to consider only partial codes, - keeping only the first `coded_layer_count` layers and ignoring the - remaining layers. If left empty, the binary codes are left unchanged. -* One file to configure the training, including the learning rate, ... - The meaning of the parameters are pretty straightforward. Note that this - file is only used during training and is not needed during inference. -* One file to specify the input dataset to use during training. - The dataset is formatted using tf.RecordIO. - - -## Inference: file size after entropy coding. - -### Using a synthetic sample - -Here is the command line to generate a single synthetic sample formatted -in the same way as what is provided by the image encoder: - -`python ./dataset/gen_synthetic_single.py ---sample_filename=/tmp/dataset/sample_0000.npz` - -To actually compute the additional compression ratio using the entropy coder -trained in the previous step: - -`python ./core/entropy_coder_single.py ---model=progressive ---model_config=./configs/synthetic/model_config.json ---input_codes=/tmp/dataset/sample_0000.npz ---checkpoint=/tmp/entropy_coder_train/model.ckpt-209078` - -where the checkpoint number should be adjusted accordingly. diff --git a/research/compression/entropy_coder/__init__.py b/research/compression/entropy_coder/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/research/compression/entropy_coder/all_models/__init__.py b/research/compression/entropy_coder/all_models/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/research/compression/entropy_coder/all_models/all_models.py b/research/compression/entropy_coder/all_models/all_models.py deleted file mode 100644 index e376dac73..000000000 --- a/research/compression/entropy_coder/all_models/all_models.py +++ /dev/null @@ -1,19 +0,0 @@ -# Copyright 2017 The TensorFlow Authors All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -"""Import and register all the entropy coder models.""" - -# pylint: disable=unused-import -from entropy_coder.progressive import progressive diff --git a/research/compression/entropy_coder/all_models/all_models_test.py b/research/compression/entropy_coder/all_models/all_models_test.py deleted file mode 100644 index b8aff504a..000000000 --- a/research/compression/entropy_coder/all_models/all_models_test.py +++ /dev/null @@ -1,68 +0,0 @@ -# Copyright 2017 The TensorFlow Authors All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -"""Basic test of all registered models.""" - -import tensorflow as tf - -# pylint: disable=unused-import -import all_models -# pylint: enable=unused-import -from entropy_coder.model import model_factory - - -class AllModelsTest(tf.test.TestCase): - - def testBuildModelForTraining(self): - factory = model_factory.GetModelRegistry() - model_names = factory.GetAvailableModels() - - for m in model_names: - tf.reset_default_graph() - - global_step = tf.Variable(tf.zeros([], dtype=tf.int64), - trainable=False, - name='global_step') - - optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.1) - - batch_size = 3 - height = 40 - width = 20 - depth = 5 - binary_codes = tf.placeholder(dtype=tf.float32, - shape=[batch_size, height, width, depth]) - - # Create a model with the default configuration. - print('Creating model: {}'.format(m)) - model = factory.CreateModel(m) - model.Initialize(global_step, - optimizer, - model.GetConfigStringForUnitTest()) - self.assertTrue(model.loss is None, 'model: {}'.format(m)) - self.assertTrue(model.train_op is None, 'model: {}'.format(m)) - self.assertTrue(model.average_code_length is None, 'model: {}'.format(m)) - - # Build the Tensorflow graph corresponding to the model. - model.BuildGraph(binary_codes) - self.assertTrue(model.loss is not None, 'model: {}'.format(m)) - self.assertTrue(model.average_code_length is not None, - 'model: {}'.format(m)) - if model.train_op is None: - print('Model {} is not trainable'.format(m)) - - -if __name__ == '__main__': - tf.test.main() diff --git a/research/compression/entropy_coder/configs/gru_prime3/model_config.json b/research/compression/entropy_coder/configs/gru_prime3/model_config.json deleted file mode 100644 index cf63a4c45..000000000 --- a/research/compression/entropy_coder/configs/gru_prime3/model_config.json +++ /dev/null @@ -1,4 +0,0 @@ -{ - "layer_count": 16, - "layer_depth": 32 -} diff --git a/research/compression/entropy_coder/configs/synthetic/input_config.json b/research/compression/entropy_coder/configs/synthetic/input_config.json deleted file mode 100644 index 18455e651..000000000 --- a/research/compression/entropy_coder/configs/synthetic/input_config.json +++ /dev/null @@ -1,4 +0,0 @@ -{ - "data": "/tmp/dataset/synthetic_dataset", - "unique_code_size": true -} diff --git a/research/compression/entropy_coder/configs/synthetic/model_config.json b/research/compression/entropy_coder/configs/synthetic/model_config.json deleted file mode 100644 index c6f1f3e11..000000000 --- a/research/compression/entropy_coder/configs/synthetic/model_config.json +++ /dev/null @@ -1,4 +0,0 @@ -{ - "layer_depth": 2, - "layer_count": 8 -} diff --git a/research/compression/entropy_coder/configs/synthetic/train_config.json b/research/compression/entropy_coder/configs/synthetic/train_config.json deleted file mode 100644 index 79e4909fd..000000000 --- a/research/compression/entropy_coder/configs/synthetic/train_config.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "batch_size": 4, - "learning_rate": 0.1, - "decay_rate": 0.9, - "samples_per_decay": 20000 -} diff --git a/research/compression/entropy_coder/core/code_loader.py b/research/compression/entropy_coder/core/code_loader.py deleted file mode 100644 index 603ab724a..000000000 --- a/research/compression/entropy_coder/core/code_loader.py +++ /dev/null @@ -1,73 +0,0 @@ -# Copyright 2017 The TensorFlow Authors All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -"""Load binary codes stored as tf.Example in a TFRecord table.""" - -import tensorflow as tf - - -def ReadFirstCode(dataset): - """Read the first example from a binary code RecordIO table.""" - for record in tf.python_io.tf_record_iterator(dataset): - tf_example = tf.train.Example() - tf_example.ParseFromString(record) - break - return tf_example - - -def LoadBinaryCode(input_config, batch_size): - """Load a batch of binary codes from a tf.Example dataset. - - Args: - input_config: An InputConfig proto containing the input configuration. - batch_size: Output batch size of examples. - - Returns: - A batched tensor of binary codes. - """ - data = input_config.data - - # TODO: Possibly use multiple files (instead of just one). - file_list = [data] - filename_queue = tf.train.string_input_producer(file_list, - capacity=4) - reader = tf.TFRecordReader() - _, values = reader.read(filename_queue) - - serialized_example = tf.reshape(values, shape=[1]) - serialized_features = { - 'code_shape': tf.FixedLenFeature([3], - dtype=tf.int64), - 'code': tf.VarLenFeature(tf.float32), - } - example = tf.parse_example(serialized_example, serialized_features) - - # 3D shape: height x width x binary_code_depth - z = example['code_shape'] - code_shape = tf.reshape(tf.cast(z, tf.int32), [3]) - # Un-flatten the binary codes. - code = tf.reshape(tf.sparse_tensor_to_dense(example['code']), code_shape) - - queue_size = 10 - queue = tf.PaddingFIFOQueue( - queue_size + 3 * batch_size, - dtypes=[code.dtype], - shapes=[[None, None, None]]) - enqueue_op = queue.enqueue([code]) - dequeue_code = queue.dequeue_many(batch_size) - queue_runner = tf.train.queue_runner.QueueRunner(queue, [enqueue_op]) - tf.add_to_collection(tf.GraphKeys.QUEUE_RUNNERS, queue_runner) - - return dequeue_code diff --git a/research/compression/entropy_coder/core/config_helper.py b/research/compression/entropy_coder/core/config_helper.py deleted file mode 100644 index a7d949e32..000000000 --- a/research/compression/entropy_coder/core/config_helper.py +++ /dev/null @@ -1,52 +0,0 @@ -# Copyright 2017 The TensorFlow Authors All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -"""Helper functions used in both train and inference.""" - -import json -import os.path - -import tensorflow as tf - - -def GetConfigString(config_file): - config_string = '' - if config_file is not None: - config_string = open(config_file).read() - return config_string - - -class InputConfig(object): - - def __init__(self, config_string): - config = json.loads(config_string) - self.data = config["data"] - self.unique_code_size = config["unique_code_size"] - - -class TrainConfig(object): - - def __init__(self, config_string): - config = json.loads(config_string) - self.batch_size = config["batch_size"] - self.learning_rate = config["learning_rate"] - self.decay_rate = config["decay_rate"] - self.samples_per_decay = config["samples_per_decay"] - - -def SaveConfig(directory, filename, config_string): - path = os.path.join(directory, filename) - with tf.gfile.Open(path, mode='w') as f: - f.write(config_string) diff --git a/research/compression/entropy_coder/core/entropy_coder_single.py b/research/compression/entropy_coder/core/entropy_coder_single.py deleted file mode 100644 index 8a61b488b..000000000 --- a/research/compression/entropy_coder/core/entropy_coder_single.py +++ /dev/null @@ -1,116 +0,0 @@ -# Copyright 2017 The TensorFlow Authors All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -"""Compute the additional compression ratio after entropy coding.""" - -import io -import os - -import numpy as np -import tensorflow as tf - -import config_helper - -# pylint: disable=unused-import -from entropy_coder.all_models import all_models -# pylint: enable=unused-import -from entropy_coder.model import model_factory - - -# Checkpoint used to restore the model parameters. -tf.app.flags.DEFINE_string('checkpoint', None, - """Model checkpoint.""") - -# Model selection and configuration. -tf.app.flags.DEFINE_string('model', None, """Underlying encoder model.""") -tf.app.flags.DEFINE_string('model_config', None, - """Model config protobuf given as text file.""") - -# File holding the binary codes. -tf.flags.DEFINE_string('input_codes', None, 'Location of binary code file.') - -FLAGS = tf.flags.FLAGS - - -def main(_): - if (FLAGS.input_codes is None or FLAGS.model is None): - print ('\nUsage: python entropy_coder_single.py --model=progressive ' - '--model_config=model_config.json' - '--iteration=15\n\n') - return - - #if FLAGS.iteration < -1 or FLAGS.iteration > 15: - # print ('\n--iteration must be between 0 and 15 inclusive, or -1 to infer ' - # 'from file.\n') - # return - #iteration = FLAGS.iteration - - if not tf.gfile.Exists(FLAGS.input_codes): - print('\nInput codes not found.\n') - return - - with tf.gfile.FastGFile(FLAGS.input_codes, 'rb') as code_file: - contents = code_file.read() - loaded_codes = np.load(io.BytesIO(contents)) - assert ['codes', 'shape'] not in loaded_codes.files - loaded_shape = loaded_codes['shape'] - loaded_array = loaded_codes['codes'] - - # Unpack and recover code shapes. - unpacked_codes = np.reshape(np.unpackbits(loaded_array) - [:np.prod(loaded_shape)], - loaded_shape) - - numpy_int_codes = unpacked_codes.transpose([1, 2, 3, 0, 4]) - numpy_int_codes = numpy_int_codes.reshape([numpy_int_codes.shape[0], - numpy_int_codes.shape[1], - numpy_int_codes.shape[2], - -1]) - numpy_codes = numpy_int_codes.astype(np.float32) * 2.0 - 1.0 - - with tf.Graph().as_default() as graph: - # TF tensor to hold the binary codes to losslessly compress. - batch_size = 1 - codes = tf.placeholder(tf.float32, shape=numpy_codes.shape) - - # Create the entropy coder model. - global_step = None - optimizer = None - model = model_factory.GetModelRegistry().CreateModel(FLAGS.model) - model_config_string = config_helper.GetConfigString(FLAGS.model_config) - model.Initialize(global_step, optimizer, model_config_string) - model.BuildGraph(codes) - - saver = tf.train.Saver(sharded=True, keep_checkpoint_every_n_hours=12.0) - - with tf.Session(graph=graph) as sess: - # Initialize local variables. - sess.run(tf.local_variables_initializer()) - - # Restore model variables. - saver.restore(sess, FLAGS.checkpoint) - - tf_tensors = { - 'code_length': model.average_code_length - } - feed_dict = {codes: numpy_codes} - np_tensors = sess.run(tf_tensors, feed_dict=feed_dict) - - print('Additional compression ratio: {}'.format( - np_tensors['code_length'])) - - -if __name__ == '__main__': - tf.app.run() diff --git a/research/compression/entropy_coder/core/entropy_coder_train.py b/research/compression/entropy_coder/core/entropy_coder_train.py deleted file mode 100644 index 27c489037..000000000 --- a/research/compression/entropy_coder/core/entropy_coder_train.py +++ /dev/null @@ -1,184 +0,0 @@ -# Copyright 2017 The TensorFlow Authors All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -"""Train an entropy coder model.""" - -import time - -import tensorflow as tf - -import code_loader -import config_helper - -# pylint: disable=unused-import -from entropy_coder.all_models import all_models -# pylint: enable=unused-import -from entropy_coder.model import model_factory - - -FLAGS = tf.app.flags.FLAGS - -# Hardware resources configuration. -tf.app.flags.DEFINE_string('master', '', - """Name of the TensorFlow master to use.""") -tf.app.flags.DEFINE_string('train_dir', None, - """Directory where to write event logs.""") -tf.app.flags.DEFINE_integer('task', None, - """Task id of the replica running the training.""") -tf.app.flags.DEFINE_integer('ps_tasks', 0, """Number of tasks in the ps job. - If 0 no ps job is used.""") - -# Model selection and configuration. -tf.app.flags.DEFINE_string('model', None, """Underlying encoder model.""") -tf.app.flags.DEFINE_string('model_config', None, - """Model config protobuf given as text file.""") - -# Training data and parameters configuration. -tf.app.flags.DEFINE_string('input_config', None, - """Path to the training input config file.""") -tf.app.flags.DEFINE_string('train_config', None, - """Path to the training experiment config file.""") - - -def train(): - if FLAGS.train_dir is None: - raise ValueError('Parameter train_dir must be provided') - if FLAGS.task is None: - raise ValueError('Parameter task must be provided') - if FLAGS.model is None: - raise ValueError('Parameter model must be provided') - - input_config_string = config_helper.GetConfigString(FLAGS.input_config) - input_config = config_helper.InputConfig(input_config_string) - - # Training parameters. - train_config_string = config_helper.GetConfigString(FLAGS.train_config) - train_config = config_helper.TrainConfig(train_config_string) - - batch_size = train_config.batch_size - initial_learning_rate = train_config.learning_rate - decay_rate = train_config.decay_rate - samples_per_decay = train_config.samples_per_decay - - # Parameters for learning-rate decay. - # The formula is decay_rate ** floor(steps / decay_steps). - decay_steps = samples_per_decay / batch_size - decay_steps = max(decay_steps, 1) - - first_code = code_loader.ReadFirstCode(input_config.data) - first_code_height = ( - first_code.features.feature['code_shape'].int64_list.value[0]) - first_code_width = ( - first_code.features.feature['code_shape'].int64_list.value[1]) - max_bit_depth = ( - first_code.features.feature['code_shape'].int64_list.value[2]) - print('Maximum code depth: {}'.format(max_bit_depth)) - - with tf.Graph().as_default(): - ps_ops = ["Variable", "VariableV2", "AutoReloadVariable", "VarHandleOp"] - with tf.device(tf.train.replica_device_setter(FLAGS.ps_tasks, - ps_ops=ps_ops)): - codes = code_loader.LoadBinaryCode( - input_config=input_config, - batch_size=batch_size) - if input_config.unique_code_size: - print('Input code size: {} x {}'.format(first_code_height, - first_code_width)) - codes.set_shape( - [batch_size, first_code_height, first_code_width, max_bit_depth]) - else: - codes.set_shape([batch_size, None, None, max_bit_depth]) - codes_effective_shape = tf.shape(codes) - - global_step = tf.contrib.framework.create_global_step() - - # Apply learning-rate decay. - learning_rate = tf.train.exponential_decay( - learning_rate=initial_learning_rate, - global_step=global_step, - decay_steps=decay_steps, - decay_rate=decay_rate, - staircase=True) - tf.summary.scalar('Learning Rate', learning_rate) - optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate, - epsilon=1.0) - - # Create the entropy coder model. - model = model_factory.GetModelRegistry().CreateModel(FLAGS.model) - model_config_string = config_helper.GetConfigString(FLAGS.model_config) - model.Initialize(global_step, optimizer, model_config_string) - model.BuildGraph(codes) - - summary_op = tf.summary.merge_all() - - # Verify that the model can actually be trained. - if model.train_op is None: - raise ValueError('Input model {} is not trainable'.format(FLAGS.model)) - - # We disable the summary thread run by Supervisor class by passing - # summary_op=None. We still pass save_summaries_secs because it is used by - # the global step counter thread. - is_chief = (FLAGS.task == 0) - sv = tf.train.Supervisor(logdir=FLAGS.train_dir, - is_chief=is_chief, - global_step=global_step, - # saver=model.saver, - summary_op=None, - save_summaries_secs=120, - save_model_secs=600, - recovery_wait_secs=30) - - sess = sv.PrepareSession(FLAGS.master) - sv.StartQueueRunners(sess) - - step = sess.run(global_step) - print('Trainer initial step: {}.'.format(step)) - - # Once everything has been setup properly, save the configs. - if is_chief: - config_helper.SaveConfig(FLAGS.train_dir, 'input_config.json', - input_config_string) - config_helper.SaveConfig(FLAGS.train_dir, 'model_config.json', - model_config_string) - config_helper.SaveConfig(FLAGS.train_dir, 'train_config.json', - train_config_string) - - # Train the model. - next_summary_time = time.time() - while not sv.ShouldStop(): - feed_dict = None - - # Once in a while, update the summaries on the chief worker. - if is_chief and next_summary_time < time.time(): - summary_str = sess.run(summary_op, feed_dict=feed_dict) - sv.SummaryComputed(sess, summary_str) - next_summary_time = time.time() + sv.save_summaries_secs - else: - tf_tensors = { - 'train': model.train_op, - 'code_length': model.average_code_length - } - np_tensors = sess.run(tf_tensors, feed_dict=feed_dict) - print(np_tensors['code_length']) - - sv.Stop() - - -def main(argv=None): # pylint: disable=unused-argument - train() - - -if __name__ == '__main__': - tf.app.run() diff --git a/research/compression/entropy_coder/dataset/gen_synthetic_dataset.py b/research/compression/entropy_coder/dataset/gen_synthetic_dataset.py deleted file mode 100644 index de60aee32..000000000 --- a/research/compression/entropy_coder/dataset/gen_synthetic_dataset.py +++ /dev/null @@ -1,89 +0,0 @@ -# Copyright 2017 The TensorFlow Authors All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -"""Generate a synthetic dataset.""" - -import os - -import numpy as np -from six.moves import xrange -import tensorflow as tf - -import synthetic_model - - -FLAGS = tf.app.flags.FLAGS - -tf.app.flags.DEFINE_string( - 'dataset_dir', None, - """Directory where to write the dataset and the configs.""") -tf.app.flags.DEFINE_integer( - 'count', 1000, - """Number of samples to generate.""") - - -def int64_feature(values): - """Returns a TF-Feature of int64s. - - Args: - values: A scalar or list of values. - - Returns: - A TF-Feature. - """ - if not isinstance(values, (tuple, list)): - values = [values] - return tf.train.Feature(int64_list=tf.train.Int64List(value=values)) - - -def float_feature(values): - """Returns a TF-Feature of floats. - - Args: - values: A scalar of list of values. - - Returns: - A TF-Feature. - """ - if not isinstance(values, (tuple, list)): - values = [values] - return tf.train.Feature(float_list=tf.train.FloatList(value=values)) - - -def AddToTFRecord(code, tfrecord_writer): - example = tf.train.Example(features=tf.train.Features(feature={ - 'code_shape': int64_feature(code.shape), - 'code': float_feature(code.flatten().tolist()), - })) - tfrecord_writer.write(example.SerializeToString()) - - -def GenerateDataset(filename, count, code_shape): - with tf.python_io.TFRecordWriter(filename) as tfrecord_writer: - for _ in xrange(count): - code = synthetic_model.GenerateSingleCode(code_shape) - # Convert {0,1} codes to {-1,+1} codes. - code = 2.0 * code - 1.0 - AddToTFRecord(code, tfrecord_writer) - - -def main(argv=None): # pylint: disable=unused-argument - GenerateDataset(os.path.join(FLAGS.dataset_dir + '/synthetic_dataset'), - FLAGS.count, - [35, 48, 8]) - - -if __name__ == '__main__': - tf.app.run() diff --git a/research/compression/entropy_coder/dataset/gen_synthetic_single.py b/research/compression/entropy_coder/dataset/gen_synthetic_single.py deleted file mode 100644 index b8c3821c3..000000000 --- a/research/compression/entropy_coder/dataset/gen_synthetic_single.py +++ /dev/null @@ -1,72 +0,0 @@ -# Copyright 2016 The TensorFlow Authors All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -"""Generate a single synthetic sample.""" - -import io -import os - -import numpy as np -import tensorflow as tf - -import synthetic_model - - -FLAGS = tf.app.flags.FLAGS - -tf.app.flags.DEFINE_string( - 'sample_filename', None, - """Output file to store the generated binary code.""") - - -def GenerateSample(filename, code_shape, layer_depth): - # {0, +1} binary codes. - # No conversion since the output file is expected to store - # codes using {0, +1} codes (and not {-1, +1}). - code = synthetic_model.GenerateSingleCode(code_shape) - code = np.round(code) - - # Reformat the code so as to be compatible with what is generated - # by the image encoder. - # The image encoder generates a tensor of size: - # iteration_count x batch_size x height x width x iteration_depth. - # Here: batch_size = 1 - if code_shape[-1] % layer_depth != 0: - raise ValueError('Number of layers is not an integer') - height = code_shape[0] - width = code_shape[1] - code = code.reshape([1, height, width, -1, layer_depth]) - code = np.transpose(code, [3, 0, 1, 2, 4]) - - int_codes = code.astype(np.int8) - exported_codes = np.packbits(int_codes.reshape(-1)) - - output = io.BytesIO() - np.savez_compressed(output, shape=int_codes.shape, codes=exported_codes) - with tf.gfile.FastGFile(filename, 'wb') as code_file: - code_file.write(output.getvalue()) - - -def main(argv=None): # pylint: disable=unused-argument - # Note: the height and the width is different from the training dataset. - # The main purpose is to show that the entropy coder model is fully - # convolutional and can be used on any image size. - layer_depth = 2 - GenerateSample(FLAGS.sample_filename, [31, 36, 8], layer_depth) - - -if __name__ == '__main__': - tf.app.run() - diff --git a/research/compression/entropy_coder/dataset/synthetic_model.py b/research/compression/entropy_coder/dataset/synthetic_model.py deleted file mode 100644 index 9cccb64a1..000000000 --- a/research/compression/entropy_coder/dataset/synthetic_model.py +++ /dev/null @@ -1,75 +0,0 @@ -# Copyright 2016 The TensorFlow Authors All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -"""Binary code sample generator.""" - -import numpy as np -from six.moves import xrange - - -_CRC_LINE = [ - [0, 1, 0], - [1, 1, 0], - [1, 0, 0] -] - -_CRC_DEPTH = [1, 1, 0, 1] - - -def ComputeLineCrc(code, width, y, x, d): - crc = 0 - for dy in xrange(len(_CRC_LINE)): - i = y - 1 - dy - if i < 0: - continue - for dx in xrange(len(_CRC_LINE[dy])): - j = x - 2 + dx - if j < 0 or j >= width: - continue - crc += 1 if (code[i, j, d] != _CRC_LINE[dy][dx]) else 0 - return crc - - -def ComputeDepthCrc(code, y, x, d): - crc = 0 - for delta in xrange(len(_CRC_DEPTH)): - k = d - 1 - delta - if k < 0: - continue - crc += 1 if (code[y, x, k] != _CRC_DEPTH[delta]) else 0 - return crc - - -def GenerateSingleCode(code_shape): - code = np.zeros(code_shape, dtype=np.int) - - keep_value_proba = 0.8 - - height = code_shape[0] - width = code_shape[1] - depth = code_shape[2] - - for d in xrange(depth): - for y in xrange(height): - for x in xrange(width): - v1 = ComputeLineCrc(code, width, y, x, d) - v2 = ComputeDepthCrc(code, y, x, d) - v = 1 if (v1 + v2 >= 6) else 0 - if np.random.rand() < keep_value_proba: - code[y, x, d] = v - else: - code[y, x, d] = 1 - v - - return code diff --git a/research/compression/entropy_coder/lib/__init__.py b/research/compression/entropy_coder/lib/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/research/compression/entropy_coder/lib/block_base.py b/research/compression/entropy_coder/lib/block_base.py deleted file mode 100644 index 615dff828..000000000 --- a/research/compression/entropy_coder/lib/block_base.py +++ /dev/null @@ -1,258 +0,0 @@ -# Copyright 2017 The TensorFlow Authors All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -"""Base class for Tensorflow building blocks.""" - -import collections -import contextlib -import itertools - -import tensorflow as tf - -_block_stacks = collections.defaultdict(lambda: []) - - -class BlockBase(object): - """Base class for transform wrappers of Tensorflow. - - To implement a Tensorflow transform block, inherit this class. - - 1. To create a variable, use NewVar() method. Do not overload this method! - For example, use as follows. - a_variable = self.NewVar(initial_value) - - 2. All Tensorflow-related code must be done inside 'with self._BlockScope().' - Otherwise, name scoping and block hierarchy will not work. An exception - is _Apply() method, which is already called inside the context manager - by __call__() method. - - 3. Override and implement _Apply() method. This method is called by - __call__() method. - - The users would use blocks like the following. - nn1 = NN(128, bias=Bias(0), act=tf.nn.relu) - y = nn1(x) - - Some things to consider. - - - Use lazy-initialization if possible. That is, initialize at first Apply() - rather than at __init__(). - - Note: if needed, the variables can be created on a specific parameter - server by creating blocks in a scope like: - with g.device(device): - linear = Linear(...) - """ - - def __init__(self, name): - self._variables = [] - self._subblocks = [] - self._called = False - - # Intentionally distinguishing empty string and None. - # If name is an empty string, then do not use name scope. - self.name = name if name is not None else self.__class__.__name__ - self._graph = tf.get_default_graph() - - if self.name: - # Capture the scope string at the init time. - with self._graph.name_scope(self.name) as scope: - self._scope_str = scope - else: - self._scope_str = '' - - # Maintain hierarchy structure of blocks. - self._stack = _block_stacks[self._graph] - if self.__class__ is BlockBase: - # This code is only executed to create the root, which starts in the - # initialized state. - assert not self._stack - self._parent = None - self._called = True # The root is initialized. - return - - # Create a fake root if a root is not already present. - if not self._stack: - self._stack.append(BlockBase('NoOpRoot')) - - self._parent = self._stack[-1] - self._parent._subblocks.append(self) # pylint: disable=protected-access - - def __repr__(self): - return '"{}" ({})'.format(self._scope_str, self.__class__.__name__) - - @contextlib.contextmanager - def _OptionalNameScope(self, scope_str): - if scope_str: - with self._graph.name_scope(scope_str): - yield - else: - yield - - @contextlib.contextmanager - def _BlockScope(self): - """Context manager that handles graph, namescope, and nested blocks.""" - self._stack.append(self) - - try: - with self._graph.as_default(): - with self._OptionalNameScope(self._scope_str): - yield self - finally: # Pop from the stack no matter exception is raised or not. - # The following line is executed when leaving 'with self._BlockScope()' - self._stack.pop() - - def __call__(self, *args, **kwargs): - assert self._stack is _block_stacks[self._graph] - - with self._BlockScope(): - ret = self._Apply(*args, **kwargs) - - self._called = True - return ret - - def _Apply(self, *args, **kwargs): - """Implementation of __call__().""" - raise NotImplementedError() - - # Redirect all variable creation to this single function, so that we can - # switch to better variable creation scheme. - def NewVar(self, value, **kwargs): - """Creates a new variable. - - This function creates a variable, then returns a local copy created by - Identity operation. To get the Variable class object, use LookupRef() - method. - - Note that each time Variable class object is used as an input to an - operation, Tensorflow will create a new Send/Recv pair. This hurts - performance. - - If not for assign operations, use the local copy returned by this method. - - Args: - value: Initialization value of the variable. The shape and the data type - of the variable is determined by this initial value. - **kwargs: Extra named arguments passed to Variable.__init__(). - - Returns: - A local copy of the new variable. - """ - v = tf.Variable(value, **kwargs) - - self._variables.append(v) - return v - - @property - def initialized(self): - """Returns bool if the block is initialized. - - By default, BlockBase assumes that a block is initialized when __call__() - is executed for the first time. If this is an incorrect assumption for some - subclasses, override this property in those subclasses. - - Returns: - True if initialized, False otherwise. - """ - return self._called - - def AssertInitialized(self): - """Asserts initialized property.""" - if not self.initialized: - raise RuntimeError('{} has not been initialized.'.format(self)) - - def VariableList(self): - """Returns the list of all tensorflow variables used inside this block.""" - variables = list(itertools.chain( - itertools.chain.from_iterable( - t.VariableList() for t in self._subblocks), - self._VariableList())) - return variables - - def _VariableList(self): - """Returns the list of all tensorflow variables owned by this block.""" - self.AssertInitialized() - return self._variables - - def CreateWeightLoss(self): - """Returns L2 loss list of (almost) all variables used inside this block. - - When this method needs to be overridden, there are two choices. - - 1. Override CreateWeightLoss() to change the weight loss of all variables - that belong to this block, both directly and indirectly. - 2. Override _CreateWeightLoss() to change the weight loss of all - variables that directly belong to this block but not to the sub-blocks. - - Returns: - A Tensor object or None. - """ - losses = list(itertools.chain( - itertools.chain.from_iterable( - t.CreateWeightLoss() for t in self._subblocks), - self._CreateWeightLoss())) - return losses - - def _CreateWeightLoss(self): - """Returns weight loss list of variables that belong to this block.""" - self.AssertInitialized() - with self._BlockScope(): - return [tf.nn.l2_loss(v) for v in self._variables] - - def CreateUpdateOps(self): - """Creates update operations for this block and its sub-blocks.""" - ops = list(itertools.chain( - itertools.chain.from_iterable( - t.CreateUpdateOps() for t in self._subblocks), - self._CreateUpdateOps())) - return ops - - def _CreateUpdateOps(self): - """Creates update operations for this block.""" - self.AssertInitialized() - return [] - - def MarkAsNonTrainable(self): - """Mark all the variables of this block as non-trainable. - - All the variables owned directly or indirectly (through subblocks) are - marked as non trainable. - - This function along with CheckpointInitOp can be used to load a pretrained - model that consists in only one part of the whole graph. - """ - assert self._called - - all_variables = self.VariableList() - collection = tf.get_collection_ref(tf.GraphKeys.TRAINABLE_VARIABLES) - for v in all_variables: - if v in collection: - collection.remove(v) - - -def CreateWeightLoss(): - """Returns all weight losses from the blocks in the graph.""" - stack = _block_stacks[tf.get_default_graph()] - if not stack: - return [] - return stack[0].CreateWeightLoss() - - -def CreateBlockUpdates(): - """Combines all updates from the blocks in the graph.""" - stack = _block_stacks[tf.get_default_graph()] - if not stack: - return [] - return stack[0].CreateUpdateOps() diff --git a/research/compression/entropy_coder/lib/block_util.py b/research/compression/entropy_coder/lib/block_util.py deleted file mode 100644 index 80479cc66..000000000 --- a/research/compression/entropy_coder/lib/block_util.py +++ /dev/null @@ -1,101 +0,0 @@ -# Copyright 2017 The TensorFlow Authors All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -"""Utility functions for blocks.""" - -from __future__ import division -from __future__ import unicode_literals - -import math - -import numpy as np -import six -import tensorflow as tf - - -class RsqrtInitializer(object): - """Gaussian initializer with standard deviation 1/sqrt(n). - - Note that tf.truncated_normal is used internally. Therefore any random sample - outside two-sigma will be discarded and re-sampled. - """ - - def __init__(self, dims=(0,), **kwargs): - """Creates an initializer. - - Args: - dims: Dimension(s) index to compute standard deviation: - 1.0 / sqrt(product(shape[dims])) - **kwargs: Extra keyword arguments to pass to tf.truncated_normal. - """ - if isinstance(dims, six.integer_types): - self._dims = [dims] - else: - self._dims = dims - self._kwargs = kwargs - - def __call__(self, shape, dtype): - stddev = 1.0 / np.sqrt(np.prod([shape[x] for x in self._dims])) - return tf.truncated_normal( - shape=shape, dtype=dtype, stddev=stddev, **self._kwargs) - - -class RectifierInitializer(object): - """Gaussian initializer with standard deviation sqrt(2/fan_in). - - Note that tf.random_normal is used internally to ensure the expected weight - distribution. This is intended to be used with ReLU activations, specially - in ResNets. - - For details please refer to: - Delving Deep into Rectifiers: Surpassing Human-Level Performance on ImageNet - Classification - """ - - def __init__(self, dims=(0,), scale=2.0, **kwargs): - """Creates an initializer. - - Args: - dims: Dimension(s) index to compute standard deviation: - sqrt(scale / product(shape[dims])) - scale: A constant scaling for the initialization used as - sqrt(scale / product(shape[dims])). - **kwargs: Extra keyword arguments to pass to tf.truncated_normal. - """ - if isinstance(dims, six.integer_types): - self._dims = [dims] - else: - self._dims = dims - self._kwargs = kwargs - self._scale = scale - - def __call__(self, shape, dtype): - stddev = np.sqrt(self._scale / np.prod([shape[x] for x in self._dims])) - return tf.random_normal( - shape=shape, dtype=dtype, stddev=stddev, **self._kwargs) - - -class GaussianInitializer(object): - """Gaussian initializer with a given standard deviation. - - Note that tf.truncated_normal is used internally. Therefore any random sample - outside two-sigma will be discarded and re-sampled. - """ - - def __init__(self, stddev=1.0): - self._stddev = stddev - - def __call__(self, shape, dtype): - return tf.truncated_normal(shape=shape, dtype=dtype, stddev=self._stddev) diff --git a/research/compression/entropy_coder/lib/blocks.py b/research/compression/entropy_coder/lib/blocks.py deleted file mode 100644 index 002384eb0..000000000 --- a/research/compression/entropy_coder/lib/blocks.py +++ /dev/null @@ -1,24 +0,0 @@ -# Copyright 2017 The TensorFlow Authors All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -from block_base import * -from block_util import * -from blocks_binarizer import * -from blocks_entropy_coding import * -from blocks_lstm import * -from blocks_masked_conv2d import * -from blocks_masked_conv2d_lstm import * -from blocks_operator import * -from blocks_std import * diff --git a/research/compression/entropy_coder/lib/blocks_binarizer.py b/research/compression/entropy_coder/lib/blocks_binarizer.py deleted file mode 100644 index 820673161..000000000 --- a/research/compression/entropy_coder/lib/blocks_binarizer.py +++ /dev/null @@ -1,35 +0,0 @@ -# Copyright 2017 The TensorFlow Authors All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -"""Activation and weight binarizer implementations.""" - -import math - -import numpy as np -import tensorflow as tf - - -def ConvertSignCodeToZeroOneCode(x): - """Conversion from codes {-1, +1} to codes {0, 1}.""" - return 0.5 * (x + 1.0) - - -def ConvertZeroOneCodeToSignCode(x): - """Convert from codes {0, 1} to codes {-1, +1}.""" - return 2.0 * x - 1.0 - - -def CheckZeroOneCode(x): - return tf.reduce_all(tf.equal(x * (x - 1.0), 0)) diff --git a/research/compression/entropy_coder/lib/blocks_entropy_coding.py b/research/compression/entropy_coder/lib/blocks_entropy_coding.py deleted file mode 100644 index 6ee5d9792..000000000 --- a/research/compression/entropy_coder/lib/blocks_entropy_coding.py +++ /dev/null @@ -1,49 +0,0 @@ -# Copyright 2017 The TensorFlow Authors All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -"""Set of blocks related to entropy coding.""" - -import math - -import tensorflow as tf - -import block_base - -# pylint does not recognize block_base.BlockBase.__call__(). -# pylint: disable=not-callable - - -class CodeLength(block_base.BlockBase): - """Theoretical bound for a code length given a probability distribution. - """ - - def __init__(self, name=None): - super(CodeLength, self).__init__(name) - - def _Apply(self, c, p): - """Theoretical bound of the coded length given a probability distribution. - - Args: - c: The binary codes. Belong to {0, 1}. - p: The probability of: P(code==+1) - - Returns: - The average code length. - Note: the average code length can be greater than 1 bit (e.g. when - encoding the least likely symbol). - """ - entropy = ((1.0 - c) * tf.log(1.0 - p) + c * tf.log(p)) / (-math.log(2)) - entropy = tf.reduce_mean(entropy) - return entropy diff --git a/research/compression/entropy_coder/lib/blocks_entropy_coding_test.py b/research/compression/entropy_coder/lib/blocks_entropy_coding_test.py deleted file mode 100644 index 5209865f5..000000000 --- a/research/compression/entropy_coder/lib/blocks_entropy_coding_test.py +++ /dev/null @@ -1,56 +0,0 @@ -# Copyright 2017 The TensorFlow Authors All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -"""Tests for basic tensorflow blocks_entropy_coding.""" - -from __future__ import division -from __future__ import unicode_literals - -import math - -import numpy as np -import tensorflow as tf - -import blocks_entropy_coding - - -class BlocksEntropyCodingTest(tf.test.TestCase): - - def testCodeLength(self): - shape = [2, 4] - proba_feed = [[0.65, 0.25, 0.70, 0.10], - [0.28, 0.20, 0.44, 0.54]] - symbol_feed = [[1.0, 0.0, 1.0, 0.0], - [0.0, 0.0, 0.0, 1.0]] - mean_code_length = - ( - (math.log(0.65) + math.log(0.75) + math.log(0.70) + math.log(0.90) + - math.log(0.72) + math.log(0.80) + math.log(0.56) + math.log(0.54)) / - math.log(2.0)) / (shape[0] * shape[1]) - - symbol = tf.placeholder(dtype=tf.float32, shape=shape) - proba = tf.placeholder(dtype=tf.float32, shape=shape) - code_length_calculator = blocks_entropy_coding.CodeLength() - code_length = code_length_calculator(symbol, proba) - - with self.test_session(): - tf.global_variables_initializer().run() - code_length_eval = code_length.eval( - feed_dict={symbol: symbol_feed, proba: proba_feed}) - - self.assertAllClose(mean_code_length, code_length_eval) - - -if __name__ == '__main__': - tf.test.main() diff --git a/research/compression/entropy_coder/lib/blocks_lstm.py b/research/compression/entropy_coder/lib/blocks_lstm.py deleted file mode 100644 index 6e474e3e3..000000000 --- a/research/compression/entropy_coder/lib/blocks_lstm.py +++ /dev/null @@ -1,263 +0,0 @@ -# Copyright 2017 The TensorFlow Authors All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -"""Blocks of LSTM and its variants.""" - -import numpy as np -import tensorflow as tf - -import block_base -import block_util -import blocks_std - -# pylint does not recognize block_base.BlockBase.__call__(). -# pylint: disable=not-callable - - -def LSTMBiasInit(shape, dtype): - """Returns ones for forget-gate, and zeros for the others.""" - shape = np.array(shape) - - # Check internal consistencies. - assert shape.shape == (1,), shape - assert shape[0] % 4 == 0, shape - - n = shape[0] // 4 - ones = tf.fill([n], tf.constant(1, dtype=dtype)) - zeros = tf.fill([3 * n], tf.constant(0, dtype=dtype)) - return tf.concat([ones, zeros], 0) - - -class LSTMBase(block_base.BlockBase): - """Base class for LSTM implementations. - - These LSTM implementations use the pattern found in [1]. No peephole - connection, i.e., cell content is not used in recurrence computation. - Hidden units are also output units. - - [1] Zaremba, Sutskever, Vinyals. Recurrent Neural Network Regularization, - 2015. arxiv:1409.2329. - """ - - def __init__(self, output_shape, name): - """Initializes LSTMBase class object. - - Args: - output_shape: List representing the LSTM output shape. This argument - does not include batch dimension. For example, if the LSTM output has - shape [batch, depth], then pass [depth]. - name: Name of this block. - """ - super(LSTMBase, self).__init__(name) - - with self._BlockScope(): - self._output_shape = [None] + list(output_shape) - self._hidden = None - self._cell = None - - @property - def hidden(self): - """Returns the hidden units of this LSTM.""" - return self._hidden - - @hidden.setter - def hidden(self, value): - """Assigns to the hidden units of this LSTM. - - Args: - value: The new value for the hidden units. If None, the hidden units are - considered to be filled with zeros. - """ - if value is not None: - value.get_shape().assert_is_compatible_with(self._output_shape) - self._hidden = value - - @property - def cell(self): - """Returns the cell units of this LSTM.""" - return self._cell - - @cell.setter - def cell(self, value): - """Assigns to the cell units of this LSTM. - - Args: - value: The new value for the cell units. If None, the cell units are - considered to be filled with zeros. - """ - if value is not None: - value.get_shape().assert_is_compatible_with(self._output_shape) - self._cell = value - - # Consider moving bias terms to the base, and require this method to be - # linear. - def _TransformInputs(self, _): - """Transforms the input units to (4 * depth) units. - - The forget-gate, input-gate, output-gate, and cell update is computed as - f, i, j, o = T(h) + R(x) - where h is hidden units, x is input units, and T, R are transforms of - h, x, respectively. - - This method implements R. Note that T is strictly linear, so if LSTM is - going to use bias, this method must include the bias to the transformation. - - Subclasses must implement this method. See _Apply() for more details. - """ - raise NotImplementedError() - - def _TransformHidden(self, _): - """Transforms the hidden units to (4 * depth) units. - - The forget-gate, input-gate, output-gate, and cell update is computed as - f, i, j, o = T(h) + R(x) - where h is hidden units, x is input units, and T, R are transforms of - h, x, respectively. - - This method implements T in the equation. The method must implement a - strictly linear transformation. For example, it may use MatMul or Conv2D, - but must not add bias. This is because when hidden units are zeros, then - the LSTM implementation will skip calling this method, instead of passing - zeros to this function. - - Subclasses must implement this method. See _Apply() for more details. - """ - raise NotImplementedError() - - def _Apply(self, *args): - xtransform = self._TransformInputs(*args) - depth_axis = len(self._output_shape) - 1 - - if self.hidden is not None: - htransform = self._TransformHidden(self.hidden) - f, i, j, o = tf.split( - value=htransform + xtransform, num_or_size_splits=4, axis=depth_axis) - else: - f, i, j, o = tf.split( - value=xtransform, num_or_size_splits=4, axis=depth_axis) - - if self.cell is not None: - self.cell = tf.sigmoid(f) * self.cell + tf.sigmoid(i) * tf.tanh(j) - else: - self.cell = tf.sigmoid(i) * tf.tanh(j) - - self.hidden = tf.sigmoid(o) * tf.tanh(self.cell) - return self.hidden - - -class LSTM(LSTMBase): - """Efficient LSTM implementation used in [1]. - - [1] Zaremba, Sutskever, Vinyals. Recurrent Neural Network Regularization, - 2015. arxiv:1409.2329. - """ - - def __init__(self, - depth, - bias=LSTMBiasInit, - initializer=block_util.RsqrtInitializer(), - name=None): - super(LSTM, self).__init__([depth], name) - - with self._BlockScope(): - self._depth = depth - self._nn = blocks_std.NN( - 4 * depth, bias=bias, act=None, initializer=initializer) - self._hidden_linear = blocks_std.Linear( - 4 * depth, initializer=initializer) - - def _TransformInputs(self, *args): - return self._nn(*args) - - def _TransformHidden(self, h): - return self._hidden_linear(h) - - -class Conv2DLSTM(LSTMBase): - """Convolutional LSTM implementation with optimizations inspired by [1]. - - Note that when using the batch normalization feature, the bias initializer - will not be used, since BN effectively cancels its effect out. - - [1] Zaremba, Sutskever, Vinyals. Recurrent Neural Network Regularization, - 2015. arxiv:1409.2329. - """ - - def __init__(self, - depth, - filter_size, - hidden_filter_size, - strides, - padding, - bias=LSTMBiasInit, - initializer=block_util.RsqrtInitializer(dims=(0, 1, 2)), - use_moving_average=False, - name=None): - super(Conv2DLSTM, self).__init__([None, None, depth], name) - self._iter = 0 - - with self._BlockScope(): - self._input_conv = blocks_std.Conv2D( - 4 * depth, - filter_size, - strides, - padding, - bias=None, - act=None, - initializer=initializer, - name='input_conv2d') - - self._hidden_conv = blocks_std.Conv2D( - 4 * depth, - hidden_filter_size, - [1, 1], - 'SAME', - bias=None, - act=None, - initializer=initializer, - name='hidden_conv2d') - - if bias is not None: - self._bias = blocks_std.BiasAdd(bias, name='biases') - else: - self._bias = blocks_std.PassThrough() - - def _TransformInputs(self, x): - return self._bias(self._input_conv(x)) - - def _TransformHidden(self, h): - return self._hidden_conv(h) - - def _Apply(self, *args): - xtransform = self._TransformInputs(*args) - depth_axis = len(self._output_shape) - 1 - - if self.hidden is not None: - htransform = self._TransformHidden(self.hidden) - f, i, j, o = tf.split( - value=htransform + xtransform, num_or_size_splits=4, axis=depth_axis) - else: - f, i, j, o = tf.split( - value=xtransform, num_or_size_splits=4, axis=depth_axis) - - if self.cell is not None: - self.cell = tf.sigmoid(f) * self.cell + tf.sigmoid(i) * tf.tanh(j) - else: - self.cell = tf.sigmoid(i) * tf.tanh(j) - - self.hidden = tf.sigmoid(o) * tf.tanh(self.cell) - - self._iter += 1 - return self.hidden diff --git a/research/compression/entropy_coder/lib/blocks_lstm_test.py b/research/compression/entropy_coder/lib/blocks_lstm_test.py deleted file mode 100644 index 03c32dc13..000000000 --- a/research/compression/entropy_coder/lib/blocks_lstm_test.py +++ /dev/null @@ -1,113 +0,0 @@ -# Copyright 2017 The TensorFlow Authors All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -"""Tests for LSTM tensorflow blocks.""" -from __future__ import division - -import numpy as np -import tensorflow as tf - -import block_base -import blocks_std -import blocks_lstm - - -class BlocksLSTMTest(tf.test.TestCase): - - def CheckUnary(self, y, op_type): - self.assertEqual(op_type, y.op.type) - self.assertEqual(1, len(y.op.inputs)) - return y.op.inputs[0] - - def CheckBinary(self, y, op_type): - self.assertEqual(op_type, y.op.type) - self.assertEqual(2, len(y.op.inputs)) - return y.op.inputs - - def testLSTM(self): - lstm = blocks_lstm.LSTM(10) - lstm.hidden = tf.zeros(shape=[10, 10], dtype=tf.float32) - lstm.cell = tf.zeros(shape=[10, 10], dtype=tf.float32) - x = tf.placeholder(dtype=tf.float32, shape=[10, 11]) - y = lstm(x) - - o, tanhc = self.CheckBinary(y, 'Mul') - self.assertEqual(self.CheckUnary(o, 'Sigmoid').name, 'LSTM/split:3') - - self.assertIs(lstm.cell, self.CheckUnary(tanhc, 'Tanh')) - fc, ij = self.CheckBinary(lstm.cell, 'Add') - - f, _ = self.CheckBinary(fc, 'Mul') - self.assertEqual(self.CheckUnary(f, 'Sigmoid').name, 'LSTM/split:0') - - i, j = self.CheckBinary(ij, 'Mul') - self.assertEqual(self.CheckUnary(i, 'Sigmoid').name, 'LSTM/split:1') - j = self.CheckUnary(j, 'Tanh') - self.assertEqual(j.name, 'LSTM/split:2') - - def testLSTMBiasInit(self): - lstm = blocks_lstm.LSTM(9) - x = tf.placeholder(dtype=tf.float32, shape=[15, 7]) - lstm(x) - b = lstm._nn._bias - - with self.test_session(): - tf.global_variables_initializer().run() - bias_var = b._bias.eval() - - comp = ([1.0] * 9) + ([0.0] * 27) - self.assertAllEqual(bias_var, comp) - - def testConv2DLSTM(self): - lstm = blocks_lstm.Conv2DLSTM(depth=10, - filter_size=[1, 1], - hidden_filter_size=[1, 1], - strides=[1, 1], - padding='SAME') - lstm.hidden = tf.zeros(shape=[10, 11, 11, 10], dtype=tf.float32) - lstm.cell = tf.zeros(shape=[10, 11, 11, 10], dtype=tf.float32) - x = tf.placeholder(dtype=tf.float32, shape=[10, 11, 11, 1]) - y = lstm(x) - - o, tanhc = self.CheckBinary(y, 'Mul') - self.assertEqual(self.CheckUnary(o, 'Sigmoid').name, 'Conv2DLSTM/split:3') - - self.assertIs(lstm.cell, self.CheckUnary(tanhc, 'Tanh')) - fc, ij = self.CheckBinary(lstm.cell, 'Add') - - f, _ = self.CheckBinary(fc, 'Mul') - self.assertEqual(self.CheckUnary(f, 'Sigmoid').name, 'Conv2DLSTM/split:0') - - i, j = self.CheckBinary(ij, 'Mul') - self.assertEqual(self.CheckUnary(i, 'Sigmoid').name, 'Conv2DLSTM/split:1') - j = self.CheckUnary(j, 'Tanh') - self.assertEqual(j.name, 'Conv2DLSTM/split:2') - - def testConv2DLSTMBiasInit(self): - lstm = blocks_lstm.Conv2DLSTM(9, 1, 1, [1, 1], 'SAME') - x = tf.placeholder(dtype=tf.float32, shape=[1, 7, 7, 7]) - lstm(x) - b = lstm._bias - - with self.test_session(): - tf.global_variables_initializer().run() - bias_var = b._bias.eval() - - comp = ([1.0] * 9) + ([0.0] * 27) - self.assertAllEqual(bias_var, comp) - - -if __name__ == '__main__': - tf.test.main() diff --git a/research/compression/entropy_coder/lib/blocks_masked_conv2d.py b/research/compression/entropy_coder/lib/blocks_masked_conv2d.py deleted file mode 100644 index 3f562384a..000000000 --- a/research/compression/entropy_coder/lib/blocks_masked_conv2d.py +++ /dev/null @@ -1,226 +0,0 @@ -# Copyright 2017 The TensorFlow Authors All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -"""Define some typical masked 2D convolutions.""" - -import numpy as np -from six.moves import xrange -import tensorflow as tf - -import block_util -import blocks_std - -# pylint does not recognize block_base.BlockBase.__call__(). -# pylint: disable=not-callable - - -class RasterScanConv2D(blocks_std.Conv2DBase): - """Conv2D with no dependency on future pixels (in raster scan order). - - For example, assuming a 5 x 5 kernel, the kernel is applied a spatial mask: - T T T T T - T T T T T - T T x F F - F F F F F - F F F F F - where 'T' are pixels which are available when computing the convolution - for pixel 'x'. All the pixels marked with 'F' are not available. - 'x' itself is not available if strict_order is True, otherwise, it is - available. - """ - - def __init__(self, depth, filter_size, strides, padding, - strict_order=True, - bias=None, act=None, initializer=None, name=None): - super(RasterScanConv2D, self).__init__( - depth, filter_size, strides, padding, bias, act, name=name) - - if (filter_size[0] % 2) != 1 or (filter_size[1] % 2) != 1: - raise ValueError('Kernel size should be odd.') - - with self._BlockScope(): - if initializer is None: - initializer = block_util.RsqrtInitializer(dims=(0, 1, 2)) - self._initializer = initializer - self._strict_order = strict_order - - def _CreateKernel(self, shape, dtype): - init = self._initializer(shape, dtype) - kernel = self.NewVar(init) - - mask = np.ones(shape[:2], dtype=dtype.as_numpy_dtype) - center = shape[:2] // 2 - mask[center[0] + 1:, :] = 0 - if not self._strict_order: - mask[center[0], center[1] + 1:] = 0 - else: - mask[center[0], center[1]:] = 0 - mask = mask.reshape(mask.shape + (1, 1)) - - return tf.convert_to_tensor(mask, dtype) * kernel - - -class DepthOrderConv2D(blocks_std.Conv2DBase): - """Conv2D with no dependency on higher depth dimensions. - - More precisely, the output depth #n has only dependencies on input depths #k - for k < n (if strict_order is True) or for k <= n (if strict_order is False). - """ - - def __init__(self, depth, filter_size, strides, padding, - strict_order=True, - bias=None, act=None, initializer=None, name=None): - super(DepthOrderConv2D, self).__init__( - depth, filter_size, strides, padding, bias, act, name=name) - - with self._BlockScope(): - if initializer is None: - initializer = block_util.RsqrtInitializer(dims=(0, 1, 2)) - self._initializer = initializer - self._strict_order = strict_order - - def _CreateKernel(self, shape, dtype): - init = self._initializer(shape, dtype) - kernel = self.NewVar(init) - - mask = np.ones(shape[2:], dtype=dtype.as_numpy_dtype) - depth_output = shape[3] - for d in xrange(depth_output): - if self._strict_order: - mask[d:, d] = 0 - else: - mask[d + 1:, d] = 0 - mask = mask.reshape((1, 1) + mask.shape) - - return tf.convert_to_tensor(mask, dtype) * kernel - - -class GroupRasterScanConv2D(blocks_std.Conv2DBase): - """Conv2D with no dependency on future pixels (in raster scan order). - - This version only introduces dependencies on previous pixels in raster scan - order. It can also introduce some dependencies on previous depth positions - of the current pixel (current pixel = center pixel of the kernel) in the - following way: - the depth dimension of the input is split into Ki groups of size - |input_group_size|, the output dimension is split into Ko groups of size - |output_group_size| (usually Ki == Ko). Each output group ko of the current - pixel position can only depend on previous input groups ki - (i.e. ki < ko if strict_order is True or ki <= ko if strict_order is False). - - Notes: - - Block RasterScanConv2D is a special case of GroupRasterScanConv2D - where Ki == Ko == 1 (i.e. input_group_size == input_depth and - output_group_size == output_depth). - - For 1x1 convolution, block DepthOrderConv2D is a special case of - GroupRasterScanConv2D where input_group_size == 1 and - output_group_size == 1. - """ - - def __init__(self, depth, filter_size, strides, padding, - strict_order=True, - input_group_size=1, - output_group_size=1, - bias=None, act=None, initializer=None, name=None): - super(GroupRasterScanConv2D, self).__init__( - depth, filter_size, strides, padding, bias, act, name=name) - - if (filter_size[0] % 2) != 1 or (filter_size[1] % 2) != 1: - raise ValueError('Kernel size should be odd.') - - with self._BlockScope(): - if initializer is None: - initializer = block_util.RsqrtInitializer(dims=(0, 1, 2)) - self._initializer = initializer - self._input_group_size = input_group_size - self._output_group_size = output_group_size - self._strict_order = strict_order - - if depth % self._output_group_size != 0: - raise ValueError( - 'Invalid depth group size: {} for depth {}'.format( - self._output_group_size, depth)) - self._output_group_count = depth // self._output_group_size - - def _CreateKernel(self, shape, dtype): - init = self._initializer(shape, dtype) - kernel = self.NewVar(init) - - depth_input = shape[2] - if depth_input % self._input_group_size != 0: - raise ValueError( - 'Invalid depth group size: {} for depth {}'.format( - self._input_group_size, depth_input)) - input_group_count = depth_input // self._input_group_size - output_group_count = self._output_group_count - - # Set the mask to 0 for future pixels in raster scan order. - center = shape[:2] // 2 - mask = np.ones([shape[0], shape[1], - input_group_count, self._input_group_size, - output_group_count, self._output_group_size], - dtype=dtype.as_numpy_dtype) - mask[center[0] + 1:, :, :, :, :, :] = 0 - mask[center[0], center[1] + 1:, :, :, :, :] = 0 - - # Adjust the mask for the current position (the center position). - depth_output = shape[3] - for d in xrange(output_group_count): - mask[center[0], center[1], d + 1:, :, d:d + 1, :] = 0 - if self._strict_order: - mask[center[0], center[1], d, :, d:d + 1, :] = 0 - - mask = mask.reshape([shape[0], shape[1], depth_input, depth_output]) - return tf.convert_to_tensor(mask, dtype) * kernel - - -class InFillingConv2D(blocks_std.Conv2DBase): - """Conv2D with kernel having no dependency on the current pixel. - - For example, assuming a 5 x 5 kernel, the kernel is applied a spatial mask: - T T T T T - T T T T T - T T x T T - T T T T T - T T T T T - where 'T' marks a pixel which is available when computing the convolution - for pixel 'x'. 'x' itself is not available. - """ - - def __init__(self, depth, filter_size, strides, padding, - bias=None, act=None, initializer=None, name=None): - super(InFillingConv2D, self).__init__( - depth, filter_size, strides, padding, bias, act, name=name) - - if (filter_size[0] % 2) != 1 or (filter_size[1] % 2) != 1: - raise ValueError('Kernel size should be odd.') - if filter_size[0] == 1 and filter_size[1] == 1: - raise ValueError('Kernel size should be larger than 1x1.') - - with self._BlockScope(): - if initializer is None: - initializer = block_util.RsqrtInitializer(dims=(0, 1, 2)) - self._initializer = initializer - - def _CreateKernel(self, shape, dtype): - init = self._initializer(shape, dtype) - kernel = self.NewVar(init) - - mask = np.ones(shape[:2], dtype=dtype.as_numpy_dtype) - center = shape[:2] // 2 - mask[center[0], center[1]] = 0 - mask = mask.reshape(mask.shape + (1, 1)) - - return tf.convert_to_tensor(mask, dtype) * kernel diff --git a/research/compression/entropy_coder/lib/blocks_masked_conv2d_lstm.py b/research/compression/entropy_coder/lib/blocks_masked_conv2d_lstm.py deleted file mode 100644 index 2d6dfeffc..000000000 --- a/research/compression/entropy_coder/lib/blocks_masked_conv2d_lstm.py +++ /dev/null @@ -1,79 +0,0 @@ -# Copyright 2017 The TensorFlow Authors All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -"""Masked conv2d LSTM.""" - -import block_base -import block_util -import blocks_masked_conv2d -import blocks_lstm -import blocks_std - -# pylint: disable=not-callable - - -class RasterScanConv2DLSTM(blocks_lstm.LSTMBase): - """Convolutional LSTM implementation with optimizations inspired by [1]. - - Note that when using the batch normalization feature, the bias initializer - will not be used, since BN effectively cancels its effect out. - - [1] Zaremba, Sutskever, Vinyals. Recurrent Neural Network Regularization, - 2015. arxiv:1409.2329. - """ - - def __init__(self, - depth, - filter_size, - hidden_filter_size, - strides, - padding, - bias=blocks_lstm.LSTMBiasInit, - initializer=block_util.RsqrtInitializer(dims=(0, 1, 2)), - name=None): - super(RasterScanConv2DLSTM, self).__init__([None, None, depth], name) - - with self._BlockScope(): - self._input_conv = blocks_masked_conv2d.RasterScanConv2D( - 4 * depth, - filter_size, - strides, - padding, - strict_order=False, - bias=None, - act=None, - initializer=initializer, - name='input_conv2d') - - self._hidden_conv = blocks_std.Conv2D( - 4 * depth, - hidden_filter_size, - [1, 1], - 'SAME', - bias=None, - act=None, - initializer=initializer, - name='hidden_conv2d') - - if bias is not None: - self._bias = blocks_std.BiasAdd(bias, name='biases') - else: - self._bias = blocks_std.PassThrough() - - def _TransformInputs(self, x): - return self._bias(self._input_conv(x)) - - def _TransformHidden(self, h): - return self._hidden_conv(h) diff --git a/research/compression/entropy_coder/lib/blocks_masked_conv2d_test.py b/research/compression/entropy_coder/lib/blocks_masked_conv2d_test.py deleted file mode 100644 index 1d284ebff..000000000 --- a/research/compression/entropy_coder/lib/blocks_masked_conv2d_test.py +++ /dev/null @@ -1,207 +0,0 @@ -# Copyright 2017 The TensorFlow Authors All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -"""Tests of the 2D masked convolution blocks.""" - -from __future__ import division -from __future__ import unicode_literals - -import numpy as np -from six.moves import xrange -import tensorflow as tf - -import blocks_masked_conv2d - - -class MaskedConv2DTest(tf.test.TestCase): - - def testRasterScanKernel(self): - kernel_size = 5 - input_depth = 1 - output_depth = 1 - kernel_shape = [kernel_size, kernel_size, input_depth, output_depth] - - # pylint: disable=bad-whitespace - kernel_feed = [[ 1.0, 2.0, 3.0, 4.0, 5.0], - [ 6.0, 7.0, 8.0, 9.0, 10.0], - [11.0, 12.0, 13.0, 14.0, 15.0], - [16.0, 17.0, 18.0, 19.0, 20.0], - [21.0, 22.0, 23.0, 24.0, 25.0]] - kernel_feed = np.reshape(kernel_feed, kernel_shape) - kernel_expected = [[ 1.0, 2.0, 3.0, 4.0, 5.0], - [ 6.0, 7.0, 8.0, 9.0, 10.0], - [11.0, 12.0, 0.0, 0.0, 0.0], - [ 0.0, 0.0, 0.0, 0.0, 0.0], - [ 0.0, 0.0, 0.0, 0.0, 0.0]] - kernel_expected = np.reshape(kernel_expected, kernel_shape) - # pylint: enable=bad-whitespace - - init_kernel = lambda s, t: tf.constant(kernel_feed, dtype=t, shape=s) - masked_conv2d = blocks_masked_conv2d.RasterScanConv2D( - output_depth, [kernel_size] * 2, [1] * 2, 'SAME', - initializer=init_kernel) - x = tf.placeholder(dtype=tf.float32, shape=[10] * 3 + [input_depth]) - _ = masked_conv2d(x) - - with self.test_session(): - tf.global_variables_initializer().run() - kernel_value = masked_conv2d._kernel.eval() - - self.assertAllEqual(kernel_expected, kernel_value) - - def testDepthOrderKernel(self): - kernel_size = 1 - input_depth = 7 - output_depth = input_depth - kernel_shape = [kernel_size, kernel_size, input_depth, output_depth] - - kernel_feed = np.ones(kernel_shape) - x_shape = [5] * 3 + [input_depth] - x_feed = np.ones(x_shape) - y_expected = np.zeros(x_shape[0:3] + [output_depth]) - y_expected[:, :, :] = np.arange(output_depth) - - init_kernel = lambda s, t: tf.constant(kernel_feed, dtype=t, shape=s) - masked_conv2d = blocks_masked_conv2d.DepthOrderConv2D( - output_depth, [kernel_size] * 2, [1] * 2, 'SAME', - strict_order=True, - initializer=init_kernel) - x = tf.placeholder(dtype=tf.float32, shape=x_shape) - y = masked_conv2d(x) - - with self.test_session(): - tf.global_variables_initializer().run() - y_value = y.eval(feed_dict={x: x_feed}) - - self.assertAllEqual(y_expected, y_value) - - def testGroupRasterScanKernel(self): - kernel_size = 3 - input_depth = 4 - input_group_size = 2 - output_depth = 2 - output_group_size = 1 - kernel_shape = [kernel_size, kernel_size, input_depth, output_depth] - kernel_feed = np.ones(shape=kernel_shape) - - height = 5 - width = 5 - x_shape = [1, height, width, input_depth] - x_feed = np.ones(shape=x_shape) - - # pylint: disable=bad-whitespace - y_expected = [ - [[ 0, 2], [ 4, 6], [ 4, 6], [ 4, 6], [ 4, 6]], - [[ 8, 10], [16, 18], [16, 18], [16, 18], [12, 14]], - [[ 8, 10], [16, 18], [16, 18], [16, 18], [12, 14]], - [[ 8, 10], [16, 18], [16, 18], [16, 18], [12, 14]], - [[ 8, 10], [16, 18], [16, 18], [16, 18], [12, 14]], - ] - y_expected = np.reshape(y_expected, [1, height, width, output_depth]) - # pylint: enable=bad-whitespace - - init_kernel = lambda s, t: tf.constant(kernel_feed, dtype=t, shape=s) - masked_conv2d = blocks_masked_conv2d.GroupRasterScanConv2D( - output_depth, [kernel_size] * 2, [1] * 2, 'SAME', - strict_order=True, - input_group_size=input_group_size, - output_group_size=output_group_size, - initializer=init_kernel) - x = tf.placeholder(dtype=tf.float32, shape=x_shape) - y = masked_conv2d(x) - - with self.test_session(): - tf.global_variables_initializer().run() - y_value = y.eval(feed_dict={x: x_feed}) - - self.assertAllEqual(y_expected, y_value) - - def testInFillingKernel(self): - kernel_size = 5 - input_depth = 1 - output_depth = 1 - kernel_shape = [kernel_size, kernel_size, input_depth, output_depth] - - # pylint: disable=bad-whitespace - kernel_feed = [[ 1.0, 2.0, 3.0, 4.0, 5.0], - [ 6.0, 7.0, 8.0, 9.0, 10.0], - [11.0, 12.0, 13.0, 14.0, 15.0], - [16.0, 17.0, 18.0, 19.0, 20.0], - [21.0, 22.0, 23.0, 24.0, 25.0]] - kernel_feed = np.reshape(kernel_feed, kernel_shape) - kernel_expected = [[ 1.0, 2.0, 3.0, 4.0, 5.0], - [ 6.0, 7.0, 8.0, 9.0, 10.0], - [11.0, 12.0, 0.0, 14.0, 15.0], - [16.0, 17.0, 18.0, 19.0, 20.0], - [21.0, 22.0, 23.0, 24.0, 25.0]] - kernel_expected = np.reshape(kernel_expected, kernel_shape) - # pylint: enable=bad-whitespace - - init_kernel = lambda s, t: tf.constant(kernel_feed, dtype=t, shape=s) - masked_conv2d = blocks_masked_conv2d.InFillingConv2D( - output_depth, [kernel_size] * 2, [1] * 2, 'SAME', - initializer=init_kernel) - x = tf.placeholder(dtype=tf.float32, shape=[10] * 3 + [input_depth]) - _ = masked_conv2d(x) - - with self.test_session(): - tf.global_variables_initializer().run() - kernel_value = masked_conv2d._kernel.eval() - - self.assertAllEqual(kernel_expected, kernel_value) - - def testConv2DMaskedNumerics(self): - kernel_size = 5 - input_shape = [1, 10, 10, 1] - filter_shape = [kernel_size, kernel_size, 1, 1] - strides = [1, 1, 1, 1] - output_shape = [1, 10, 10, 1] - - conv = blocks_masked_conv2d.RasterScanConv2D( - depth=filter_shape[-1], - filter_size=filter_shape[0:2], - strides=strides[1:3], - padding='SAME', - initializer=tf.constant_initializer(value=1.0)) - x = tf.placeholder(dtype=tf.float32, shape=input_shape) - y = conv(x) - - x_feed = - np.ones(input_shape, dtype=float) - y_expected = np.ones(output_shape, dtype=float) - for i in xrange(input_shape[1]): - for j in xrange(input_shape[2]): - x_feed[0, i, j, 0] = 10 * (j + 1) + i - v = 0 - ki_start = max(i - kernel_size // 2, 0) - kj_start = max(j - kernel_size // 2, 0) - kj_end = min(j + kernel_size // 2, input_shape[2] - 1) - for ki in range(ki_start, i + 1): - for kj in range(kj_start, kj_end + 1): - if ki > i: - continue - if ki == i and kj >= j: - continue - v += 10 * (kj + 1) + ki - y_expected[0, i, j, 0] = v - - with self.test_session(): - tf.global_variables_initializer().run() - y_value = y.eval(feed_dict={x: x_feed}) - - self.assertAllEqual(y_expected, y_value) - - -if __name__ == '__main__': - tf.test.main() diff --git a/research/compression/entropy_coder/lib/blocks_operator.py b/research/compression/entropy_coder/lib/blocks_operator.py deleted file mode 100644 index e35e37b27..000000000 --- a/research/compression/entropy_coder/lib/blocks_operator.py +++ /dev/null @@ -1,87 +0,0 @@ -# Copyright 2017 The TensorFlow Authors All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -"""Common blocks which work as operators on other blocks.""" - -import tensorflow as tf - -import block_base - -# pylint: disable=not-callable - - -class CompositionOperator(block_base.BlockBase): - """Composition of several blocks.""" - - def __init__(self, block_list, name=None): - """Initialization of the composition operator. - - Args: - block_list: List of blocks.BlockBase that are chained to create - a new blocks.BlockBase. - name: Name of this block. - """ - super(CompositionOperator, self).__init__(name) - self._blocks = block_list - - def _Apply(self, x): - """Apply successively all the blocks on the given input tensor.""" - h = x - for layer in self._blocks: - h = layer(h) - return h - - -class LineOperator(block_base.BlockBase): - """Repeat the same block over all the lines of an input tensor.""" - - def __init__(self, block, name=None): - super(LineOperator, self).__init__(name) - self._block = block - - def _Apply(self, x): - height = x.get_shape()[1].value - if height is None: - raise ValueError('Unknown tensor height') - all_line_x = tf.split(value=x, num_or_size_splits=height, axis=1) - - y = [] - for line_x in all_line_x: - y.append(self._block(line_x)) - y = tf.concat(values=y, axis=1) - - return y - - -class TowerOperator(block_base.BlockBase): - """Parallel execution with concatenation of several blocks.""" - - def __init__(self, block_list, dim=3, name=None): - """Initialization of the parallel exec + concat (Tower). - - Args: - block_list: List of blocks.BlockBase that are chained to create - a new blocks.BlockBase. - dim: the dimension on which to concat. - name: Name of this block. - """ - super(TowerOperator, self).__init__(name) - self._blocks = block_list - self._concat_dim = dim - - def _Apply(self, x): - """Apply successively all the blocks on the given input tensor.""" - outputs = [layer(x) for layer in self._blocks] - return tf.concat(outputs, self._concat_dim) diff --git a/research/compression/entropy_coder/lib/blocks_operator_test.py b/research/compression/entropy_coder/lib/blocks_operator_test.py deleted file mode 100644 index 8b6d80da1..000000000 --- a/research/compression/entropy_coder/lib/blocks_operator_test.py +++ /dev/null @@ -1,64 +0,0 @@ -# Copyright 2017 The TensorFlow Authors All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -"""Tests of the block operators.""" - -import numpy as np -import tensorflow as tf - -import block_base -import blocks_operator - - -class AddOneBlock(block_base.BlockBase): - - def __init__(self, name=None): - super(AddOneBlock, self).__init__(name) - - def _Apply(self, x): - return x + 1.0 - - -class SquareBlock(block_base.BlockBase): - - def __init__(self, name=None): - super(SquareBlock, self).__init__(name) - - def _Apply(self, x): - return x * x - - -class BlocksOperatorTest(tf.test.TestCase): - - def testComposition(self): - x_value = np.array([[1.0, 2.0, 3.0], - [-1.0, -2.0, -3.0]]) - y_expected_value = np.array([[4.0, 9.0, 16.0], - [0.0, 1.0, 4.0]]) - - x = tf.placeholder(dtype=tf.float32, shape=[2, 3]) - complex_block = blocks_operator.CompositionOperator( - [AddOneBlock(), - SquareBlock()]) - y = complex_block(x) - - with self.test_session(): - y_value = y.eval(feed_dict={x: x_value}) - - self.assertAllClose(y_expected_value, y_value) - - -if __name__ == '__main__': - tf.test.main() diff --git a/research/compression/entropy_coder/lib/blocks_std.py b/research/compression/entropy_coder/lib/blocks_std.py deleted file mode 100644 index 2c6174853..000000000 --- a/research/compression/entropy_coder/lib/blocks_std.py +++ /dev/null @@ -1,363 +0,0 @@ -# Copyright 2017 The TensorFlow Authors All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -"""Basic blocks for building tensorflow models.""" - -import numpy as np -import tensorflow as tf - -import block_base -import block_util - -# pylint does not recognize block_base.BlockBase.__call__(). -# pylint: disable=not-callable - - -def HandleConvPaddingModes(x, padding, kernel_shape, strides): - """Returns an updated tensor and padding type for REFLECT and SYMMETRIC. - - Args: - x: A 4D tensor with shape [batch_size, height, width, depth]. - padding: Padding mode (SAME, VALID, REFLECT, or SYMMETRIC). - kernel_shape: Shape of convolution kernel that will be applied. - strides: Convolution stride that will be used. - - Returns: - x and padding after adjustments for REFLECT and SYMMETRIC. - """ - # For 1x1 convolution, all padding modes are the same. - if np.all(kernel_shape[:2] == 1): - return x, 'VALID' - - if padding == 'REFLECT' or padding == 'SYMMETRIC': - # We manually compute the number of paddings as if 'SAME'. - # From Tensorflow kernel, the formulas are as follows. - # output_shape = ceil(input_shape / strides) - # paddings = (output_shape - 1) * strides + filter_size - input_shape - # Let x, y, s be a shorthand notations for input_shape, output_shape, and - # strides, respectively. Let (x - 1) = sn + r where 0 <= r < s. Note that - # y - 1 = ceil(x / s) - 1 = floor((x - 1) / s) = n - # provided that x > 0. Therefore - # paddings = n * s + filter_size - (sn + r + 1) - # = filter_size - r - 1. - input_shape = x.get_shape() # shape at graph construction time - img_shape = tf.shape(x)[1:3] # image shape (no batch) at run time - remainder = tf.mod(img_shape - 1, strides[1:3]) - pad_sizes = kernel_shape[:2] - remainder - 1 - - pad_rows = pad_sizes[0] - pad_cols = pad_sizes[1] - pad = tf.stack([[0, 0], tf.stack([pad_rows // 2, (pad_rows + 1) // 2]), - tf.stack([pad_cols // 2, (pad_cols + 1) // 2]), [0, 0]]) - - # Manually pad the input and switch the padding mode to 'VALID'. - x = tf.pad(x, pad, mode=padding) - x.set_shape([input_shape[0], x.get_shape()[1], - x.get_shape()[2], input_shape[3]]) - padding = 'VALID' - - return x, padding - - -class PassThrough(block_base.BlockBase): - """A dummy transform block that does nothing.""" - - def __init__(self): - # Pass an empty string to disable name scoping. - super(PassThrough, self).__init__(name='') - - def _Apply(self, inp): - return inp - - @property - def initialized(self): - """Always returns True.""" - return True - - -class Bias(object): - """An initialization helper class for BiasAdd block below.""" - - def __init__(self, value=0): - self.value = value - - -class BiasAdd(block_base.BlockBase): - """A tf.nn.bias_add wrapper. - - This wrapper may act as a PassThrough block depending on the initializer - provided, to make easier optional bias applications in NN blocks, etc. - See __init__() for the details. - """ - - def __init__(self, initializer=Bias(0), name=None): - """Initializes Bias block. - - |initializer| parameter have two special cases. - - 1. If initializer is None, then this block works as a PassThrough. - 2. If initializer is a Bias class object, then tf.constant_initializer is - used with the stored value. - - Args: - initializer: An initializer for the bias variable. - name: Name of this block. - """ - super(BiasAdd, self).__init__(name) - - with self._BlockScope(): - if isinstance(initializer, Bias): - self._initializer = tf.constant_initializer(value=initializer.value) - else: - self._initializer = initializer - - self._bias = None - - def _Apply(self, x): - if not self._bias: - init = self._initializer([int(x.get_shape()[-1])], x.dtype) - self._bias = self.NewVar(init) - - return tf.nn.bias_add(x, self._bias) - - def CreateWeightLoss(self): - return [] - - -class LinearBase(block_base.BlockBase): - """A matmul wrapper. - - Returns input * W, where matrix W can be customized through derivation. - """ - - def __init__(self, depth, name=None): - super(LinearBase, self).__init__(name) - - with self._BlockScope(): - self._depth = depth - self._matrix = None - - def _CreateKernel(self, shape, dtype): - raise NotImplementedError('This method must be sub-classed.') - - def _Apply(self, x): - if not self._matrix: - shape = [int(x.get_shape()[-1]), self._depth] - self._matrix = self._CreateKernel(shape, x.dtype) - - return tf.matmul(x, self._matrix) - - -class Linear(LinearBase): - """A matmul wrapper. - - Returns input * W, where matrix W is learned. - """ - - def __init__(self, - depth, - initializer=block_util.RsqrtInitializer(), - name=None): - super(Linear, self).__init__(depth, name) - - with self._BlockScope(): - self._initializer = initializer - - def _CreateKernel(self, shape, dtype): - init = self._initializer(shape, dtype) - return self.NewVar(init) - - -class NN(block_base.BlockBase): - """A neural network layer wrapper. - - Returns act(input * W + b), where matrix W, bias b are learned, and act is an - optional activation function (i.e., nonlinearity). - - This transform block can handle multiple inputs. If x_1, x_2, ..., x_m are - the inputs, then returns act(x_1 * W_1 + ... + x_m * W_m + b). - - Attributes: - nunits: The dimension of the output. - """ - - def __init__(self, - depth, - bias=Bias(0), - act=None, # e.g., tf.nn.relu - initializer=block_util.RsqrtInitializer(), - linear_block_factory=(lambda d, i: Linear(d, initializer=i)), - name=None): - """Initializes NN block. - - Args: - depth: The depth of the output. - bias: An initializer for the bias, or a Bias class object. If None, there - will be no bias term for this NN block. See BiasAdd block. - act: Optional activation function. If None, no activation is applied. - initializer: The initialization method for the matrix weights. - linear_block_factory: A function used to create a linear block. - name: The name of this block. - """ - super(NN, self).__init__(name) - - with self._BlockScope(): - self._linear_block_factory = linear_block_factory - self._depth = depth - self._initializer = initializer - self._matrices = None - - self._bias = BiasAdd(bias) if bias else PassThrough() - self._act = act if act else PassThrough() - - def _Apply(self, *args): - if not self._matrices: - self._matrices = [ - self._linear_block_factory(self._depth, self._initializer) - for _ in args] - - if len(self._matrices) != len(args): - raise ValueError('{} expected {} inputs, but observed {} inputs'.format( - self.name, len(self._matrices), len(args))) - - if len(args) > 1: - y = tf.add_n([m(x) for m, x in zip(self._matrices, args)]) - else: - y = self._matrices[0](args[0]) - - return self._act(self._bias(y)) - - -class Conv2DBase(block_base.BlockBase): - """A tf.nn.conv2d operator.""" - - def __init__(self, depth, filter_size, strides, padding, - bias=None, act=None, atrous_rate=None, conv=tf.nn.conv2d, - name=None): - """Initializes a Conv2DBase block. - - Arguments: - depth: The output depth of the block (i.e. #filters); if negative, the - output depth will be set to be the same as the input depth. - filter_size: The size of the 2D filter. If it's specified as an integer, - it's going to create a square filter. Otherwise, this is a tuple - specifying the height x width of the filter. - strides: A tuple specifying the y and x stride. - padding: One of the valid padding modes allowed by tf.nn.conv2d, or - 'REFLECT'/'SYMMETRIC' for mirror padding. - bias: An initializer for the bias, or a Bias class object. If None, there - will be no bias in this block. See BiasAdd block. - act: Optional activation function applied to the output. - atrous_rate: optional input rate for ATrous convolution. If not None, this - will be used and the strides will be ignored. - conv: The convolution function to use (e.g. tf.nn.conv2d). - name: The name for this conv2d op. - """ - super(Conv2DBase, self).__init__(name) - - with self._BlockScope(): - self._act = act if act else PassThrough() - self._bias = BiasAdd(bias) if bias else PassThrough() - - self._kernel_shape = np.zeros((4,), dtype=np.int32) - self._kernel_shape[:2] = filter_size - self._kernel_shape[3] = depth - - self._strides = np.ones((4,), dtype=np.int32) - self._strides[1:3] = strides - self._strides = list(self._strides) - - self._padding = padding - - self._kernel = None - self._conv = conv - - self._atrous_rate = atrous_rate - - def _CreateKernel(self, shape, dtype): - raise NotImplementedError('This method must be sub-classed') - - def _Apply(self, x): - """Apply the self._conv op. - - Arguments: - x: input tensor. It needs to be a 4D tensor of the form - [batch, height, width, channels]. - Returns: - The output of the convolution of x with the current convolutional - kernel. - Raises: - ValueError: if number of channels is not defined at graph construction. - """ - input_shape = x.get_shape().with_rank(4) - input_shape[3:].assert_is_fully_defined() # channels must be defined - if self._kernel is None: - assert self._kernel_shape[2] == 0, self._kernel_shape - self._kernel_shape[2] = input_shape[3].value - if self._kernel_shape[3] < 0: - # Make output depth be the same as input depth. - self._kernel_shape[3] = self._kernel_shape[2] - self._kernel = self._CreateKernel(self._kernel_shape, x.dtype) - - x, padding = HandleConvPaddingModes( - x, self._padding, self._kernel_shape, self._strides) - if self._atrous_rate is None: - x = self._conv(x, self._kernel, strides=self._strides, padding=padding) - else: - x = self._conv(x, self._kernel, rate=self._atrous_rate, padding=padding) - - if self._padding != 'VALID': - # Manually update shape. Known shape information can be lost by tf.pad(). - height = (1 + (input_shape[1].value - 1) // self._strides[1] - if input_shape[1].value else None) - width = (1 + (input_shape[2].value - 1) // self._strides[2] - if input_shape[2].value else None) - shape = x.get_shape() - x.set_shape([shape[0], height, width, shape[3]]) - - return self._act(self._bias(x)) - - -class Conv2D(Conv2DBase): - """A tf.nn.conv2d operator.""" - - def __init__(self, depth, filter_size, strides, padding, - bias=None, act=None, initializer=None, name=None): - """Initializes a Conv2D block. - - Arguments: - depth: The output depth of the block (i.e., #filters) - filter_size: The size of the 2D filter. If it's specified as an integer, - it's going to create a square filter. Otherwise, this is a tuple - specifying the height x width of the filter. - strides: A tuple specifying the y and x stride. - padding: One of the valid padding modes allowed by tf.nn.conv2d, or - 'REFLECT'/'SYMMETRIC' for mirror padding. - bias: An initializer for the bias, or a Bias class object. If None, there - will be no bias in this block. See BiasAdd block. - act: Optional activation function applied to the output. - initializer: Optional initializer for weights. - name: The name for this conv2d op. - """ - super(Conv2D, self).__init__(depth, filter_size, strides, padding, bias, - act, conv=tf.nn.conv2d, name=name) - - with self._BlockScope(): - if initializer is None: - initializer = block_util.RsqrtInitializer(dims=(0, 1, 2)) - self._initializer = initializer - - def _CreateKernel(self, shape, dtype): - return self.NewVar(self._initializer(shape, dtype)) diff --git a/research/compression/entropy_coder/lib/blocks_std_test.py b/research/compression/entropy_coder/lib/blocks_std_test.py deleted file mode 100644 index 328ebc9d2..000000000 --- a/research/compression/entropy_coder/lib/blocks_std_test.py +++ /dev/null @@ -1,340 +0,0 @@ -# Copyright 2017 The TensorFlow Authors All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -"""Tests for basic tensorflow blocks_std.""" - -from __future__ import division -from __future__ import unicode_literals - -import math -import os - -import numpy as np -from six.moves import xrange -import tensorflow as tf - -import blocks_std - - -def _NumpyConv2D(x, f, strides, padding, rate=1): - assert strides[0] == 1 and strides[3] == 1, strides - - if rate > 1: - f_shape = f.shape - expand_f = np.zeros([f_shape[0], ((f_shape[1] - 1) * rate + 1), - f_shape[2], f_shape[3]]) - expand_f[:, [y * rate for y in range(f_shape[1])], :, :] = f - f = np.zeros([((f_shape[0] - 1) * rate + 1), expand_f.shape[1], - f_shape[2], f_shape[3]]) - f[[y * rate for y in range(f_shape[0])], :, :, :] = expand_f - - if padding != 'VALID': - assert x.shape[1] > 0 and x.shape[2] > 0, x.shape - # Compute the number of padded rows and cols. - # See Conv2D block comments for a math explanation. - remainder = ((x.shape[1] - 1) % strides[1], (x.shape[2] - 1) % strides[2]) - pad_rows = f.shape[0] - remainder[0] - 1 - pad_cols = f.shape[1] - remainder[1] - 1 - pad = ((0, 0), - (pad_rows // 2, (pad_rows + 1) // 2), - (pad_cols // 2, (pad_cols + 1) // 2), - (0, 0)) - - # Pad the input using numpy.pad(). - mode = None - if padding == 'SAME': - mode = str('constant') - if padding == 'REFLECT': - mode = str('reflect') - if padding == 'SYMMETRIC': - mode = str('symmetric') - x = np.pad(x, pad, mode=mode) - - # Since x is now properly padded, proceed as if padding mode is VALID. - x_window = np.empty( - (x.shape[0], - int(math.ceil((x.shape[1] - f.shape[0] + 1) / strides[1])), - int(math.ceil((x.shape[2] - f.shape[1] + 1) / strides[2])), - np.prod(f.shape[:3]))) - - # The output at pixel location (i, j) is the result of linear transformation - # applied to the window whose top-left corner is at - # (i * row_stride, j * col_stride). - for i in xrange(x_window.shape[1]): - k = i * strides[1] - for j in xrange(x_window.shape[2]): - l = j * strides[2] - x_window[:, i, j, :] = x[:, - k:(k + f.shape[0]), - l:(l + f.shape[1]), - :].reshape((x_window.shape[0], -1)) - - y = np.tensordot(x_window, f.reshape((-1, f.shape[3])), axes=1) - return y - - -class BlocksStdTest(tf.test.TestCase): - - def CheckUnary(self, y, op_type): - self.assertEqual(op_type, y.op.type) - self.assertEqual(1, len(y.op.inputs)) - return y.op.inputs[0] - - def CheckBinary(self, y, op_type): - self.assertEqual(op_type, y.op.type) - self.assertEqual(2, len(y.op.inputs)) - return y.op.inputs - - def testPassThrough(self): - p = blocks_std.PassThrough() - x = tf.placeholder(dtype=tf.float32, shape=[1]) - self.assertIs(p(x), x) - - def CheckBiasAdd(self, y, b): - x, u = self.CheckBinary(y, 'BiasAdd') - self.assertIs(u, b._bias.value()) - self.assertEqual(x.dtype, u.dtype.base_dtype) - return x - - def testBiasAdd(self): - b = blocks_std.BiasAdd() - x = tf.placeholder(dtype=tf.float32, shape=[4, 8]) - y = b(x) - self.assertEqual(b._bias.get_shape(), x.get_shape()[-1:]) - self.assertIs(x, self.CheckBiasAdd(y, b)) - - def testBiasRankTest(self): - b = blocks_std.BiasAdd() - x = tf.placeholder(dtype=tf.float32, shape=[10]) - with self.assertRaises(ValueError): - b(x) - - def CheckLinear(self, y, m): - x, w = self.CheckBinary(y, 'MatMul') - self.assertIs(w, m._matrix.value()) - self.assertEqual(x.dtype, w.dtype.base_dtype) - return x - - def testLinear(self): - m = blocks_std.Linear(10) - x = tf.placeholder(dtype=tf.float32, shape=[8, 9]) - y = m(x) - self.assertEqual(m._matrix.get_shape(), [9, 10]) - self.assertIs(x, self.CheckLinear(y, m)) - - def testLinearShared(self): - # Create a linear map which is applied twice on different inputs - # (i.e. the weights of the map are shared). - linear_map = blocks_std.Linear(6) - x1 = tf.random_normal(shape=[1, 5]) - x2 = tf.random_normal(shape=[1, 5]) - xs = x1 + x2 - - # Apply the transform with the same weights. - y1 = linear_map(x1) - y2 = linear_map(x2) - ys = linear_map(xs) - - with self.test_session() as sess: - # Initialize all the variables of the graph. - tf.global_variables_initializer().run() - - y1_res, y2_res, ys_res = sess.run([y1, y2, ys]) - self.assertAllClose(y1_res + y2_res, ys_res) - - def CheckNN(self, y, nn, act=None): - if act: - pre_act = self.CheckUnary(y, act) - else: - pre_act = y - - if not isinstance(nn._bias, blocks_std.PassThrough): - pre_bias = self.CheckBiasAdd(pre_act, nn._bias) - else: - pre_bias = pre_act - - if len(nn._matrices) > 1: - self.assertEqual('AddN', pre_bias.op.type) - pre_bias = pre_bias.op.inputs - else: - pre_bias = [pre_bias] - - self.assertEqual(len(pre_bias), len(nn._matrices)) - return [self.CheckLinear(u, m) for u, m in zip(pre_bias, nn._matrices)] - - def testNNWithoutActWithoutBias(self): - nn = blocks_std.NN(10, act=None, bias=None) - x = tf.placeholder(dtype=tf.float32, shape=[5, 7]) - y = nn(x) - self.assertIs(x, self.CheckNN(y, nn)[0]) - - def testNNWithoutBiasWithAct(self): - nn = blocks_std.NN(10, act=tf.nn.relu, bias=None) - x = tf.placeholder(dtype=tf.float32, shape=[5, 7]) - y = nn(x) - self.assertIs(x, self.CheckNN(y, nn, 'Relu')[0]) - - def testNNWithBiasWithoutAct(self): - nn = blocks_std.NN(10, bias=blocks_std.Bias(0), act=None) - x = tf.placeholder(dtype=tf.float32, shape=[5, 7]) - y = nn(x) - self.assertIs(x, self.CheckNN(y, nn)[0]) - - def testNNWithBiasWithAct(self): - nn = blocks_std.NN(10, bias=blocks_std.Bias(0), act=tf.square) - x = tf.placeholder(dtype=tf.float32, shape=[5, 7]) - y = nn(x) - self.assertIs(x, self.CheckNN(y, nn, 'Square')[0]) - - def testNNMultipleInputs(self): - nn = blocks_std.NN(10, bias=blocks_std.Bias(0), act=tf.tanh) - x = [tf.placeholder(dtype=tf.float32, shape=[5, 7]), - tf.placeholder(dtype=tf.float32, shape=[5, 3]), - tf.placeholder(dtype=tf.float32, shape=[5, 5])] - y = nn(*x) - xs = self.CheckNN(y, nn, 'Tanh') - self.assertEqual(len(x), len(xs)) - for u, v in zip(x, xs): - self.assertIs(u, v) - - def testConv2DSAME(self): - np.random.seed(142536) - - x_shape = [4, 16, 11, 5] - f_shape = [4, 3, 5, 6] - strides = [1, 2, 2, 1] - padding = 'SAME' - - conv = blocks_std.Conv2D(depth=f_shape[-1], - filter_size=f_shape[0:2], - strides=strides[1:3], - padding=padding, - act=None, - bias=None) - x_value = np.random.normal(size=x_shape) - x = tf.convert_to_tensor(x_value, dtype=tf.float32) - y = conv(x) - - with self.test_session(): - tf.global_variables_initializer().run() - f_value = conv._kernel.eval() - y_value = y.eval() - - y_expected = _NumpyConv2D(x_value, f_value, - strides=strides, padding=padding) - self.assertAllClose(y_expected, y_value) - - def testConv2DValid(self): - np.random.seed(253647) - - x_shape = [4, 11, 12, 5] - f_shape = [5, 2, 5, 5] - strides = [1, 2, 2, 1] - padding = 'VALID' - - conv = blocks_std.Conv2D(depth=f_shape[-1], - filter_size=f_shape[0:2], - strides=strides[1:3], - padding=padding, - act=None, - bias=None) - x_value = np.random.normal(size=x_shape) - x = tf.convert_to_tensor(x_value, dtype=tf.float32) - y = conv(x) - - with self.test_session(): - tf.global_variables_initializer().run() - f_value = conv._kernel.eval() - y_value = y.eval() - - y_expected = _NumpyConv2D(x_value, f_value, - strides=strides, padding=padding) - self.assertAllClose(y_expected, y_value) - - def testConv2DSymmetric(self): - np.random.seed(364758) - - x_shape = [4, 10, 12, 6] - f_shape = [3, 4, 6, 5] - strides = [1, 1, 1, 1] - padding = 'SYMMETRIC' - - conv = blocks_std.Conv2D(depth=f_shape[-1], - filter_size=f_shape[0:2], - strides=strides[1:3], - padding=padding, - act=None, - bias=None) - x_value = np.random.normal(size=x_shape) - x = tf.convert_to_tensor(x_value, dtype=tf.float32) - y = conv(x) - - with self.test_session(): - tf.global_variables_initializer().run() - f_value = conv._kernel.eval() - y_value = y.eval() - - y_expected = _NumpyConv2D(x_value, f_value, - strides=strides, padding=padding) - self.assertAllClose(y_expected, y_value) - - def testConv2DReflect(self): - np.random.seed(768798) - - x_shape = [4, 10, 12, 6] - f_shape = [3, 4, 6, 5] - strides = [1, 2, 2, 1] - padding = 'REFLECT' - - conv = blocks_std.Conv2D(depth=f_shape[-1], - filter_size=f_shape[0:2], - strides=strides[1:3], - padding=padding, - act=None, - bias=None) - x_value = np.random.normal(size=x_shape) - x = tf.convert_to_tensor(x_value, dtype=tf.float32) - y = conv(x) - - with self.test_session(): - tf.global_variables_initializer().run() - f_value = conv._kernel.eval() - y_value = y.eval() - - y_expected = _NumpyConv2D(x_value, f_value, - strides=strides, padding=padding) - self.assertAllClose(y_expected, y_value) - - def testConv2DBias(self): - input_shape = [19, 14, 14, 64] - filter_shape = [3, 7, 64, 128] - strides = [1, 2, 2, 1] - output_shape = [19, 6, 4, 128] - - conv = blocks_std.Conv2D(depth=filter_shape[-1], - filter_size=filter_shape[0:2], - strides=strides[1:3], - padding='VALID', - act=None, - bias=blocks_std.Bias(1)) - x = tf.placeholder(dtype=tf.float32, shape=input_shape) - - y = conv(x) - self.CheckBiasAdd(y, conv._bias) - self.assertEqual(output_shape, y.get_shape().as_list()) - - -if __name__ == '__main__': - tf.test.main() diff --git a/research/compression/entropy_coder/model/__init__.py b/research/compression/entropy_coder/model/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/research/compression/entropy_coder/model/entropy_coder_model.py b/research/compression/entropy_coder/model/entropy_coder_model.py deleted file mode 100644 index 67f7eb5bc..000000000 --- a/research/compression/entropy_coder/model/entropy_coder_model.py +++ /dev/null @@ -1,55 +0,0 @@ -# Copyright 2017 The TensorFlow Authors All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -"""Entropy coder model.""" - - -class EntropyCoderModel(object): - """Entropy coder model.""" - - def __init__(self): - # Loss used for training the model. - self.loss = None - - # Tensorflow op to run to train the model. - self.train_op = None - - # Tensor corresponding to the average code length of the input bit field - # tensor. The average code length is a number of output bits per input bit. - # To get an effective compression, this number should be between 0.0 - # and 1.0 (1.0 corresponds to no compression). - self.average_code_length = None - - def Initialize(self, global_step, optimizer, config_string): - raise NotImplementedError() - - def BuildGraph(self, input_codes): - """Build the Tensorflow graph corresponding to the entropy coder model. - - Args: - input_codes: Tensor of size: batch_size x height x width x bit_depth - corresponding to the codes to compress. - The input codes are {-1, +1} codes. - """ - # TODO: - # - consider switching to {0, 1} codes. - # - consider passing an extra tensor which gives for each (b, y, x) - # what is the actual depth (which would allow to use more or less bits - # for each (y, x) location. - raise NotImplementedError() - - def GetConfigStringForUnitTest(self): - """Returns a default model configuration to be used for unit tests.""" - return None diff --git a/research/compression/entropy_coder/model/model_factory.py b/research/compression/entropy_coder/model/model_factory.py deleted file mode 100644 index e6f9902f3..000000000 --- a/research/compression/entropy_coder/model/model_factory.py +++ /dev/null @@ -1,53 +0,0 @@ -# Copyright 2017 The TensorFlow Authors All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -"""Entropy coder model registrar.""" - - -class ModelFactory(object): - """Factory of encoder/decoder models.""" - - def __init__(self): - self._model_dictionary = dict() - - def RegisterModel(self, - entropy_coder_model_name, - entropy_coder_model_factory): - self._model_dictionary[entropy_coder_model_name] = ( - entropy_coder_model_factory) - - def CreateModel(self, model_name): - current_model_factory = self._model_dictionary[model_name] - return current_model_factory() - - def GetAvailableModels(self): - return self._model_dictionary.keys() - - -_model_registry = ModelFactory() - - -def GetModelRegistry(): - return _model_registry - - -class RegisterEntropyCoderModel(object): - - def __init__(self, model_name): - self._model_name = model_name - - def __call__(self, f): - _model_registry.RegisterModel(self._model_name, f) - return f diff --git a/research/compression/entropy_coder/progressive/__init__.py b/research/compression/entropy_coder/progressive/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/research/compression/entropy_coder/progressive/progressive.py b/research/compression/entropy_coder/progressive/progressive.py deleted file mode 100644 index 7b03a07db..000000000 --- a/research/compression/entropy_coder/progressive/progressive.py +++ /dev/null @@ -1,242 +0,0 @@ -# Copyright 2017 The TensorFlow Authors All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -"""Code probability model used for entropy coding.""" - -import json - -from six.moves import xrange -import tensorflow as tf - -from entropy_coder.lib import blocks -from entropy_coder.model import entropy_coder_model -from entropy_coder.model import model_factory - -# pylint: disable=not-callable - - -class BrnnPredictor(blocks.BlockBase): - """BRNN prediction applied on one layer.""" - - def __init__(self, code_depth, name=None): - super(BrnnPredictor, self).__init__(name) - - with self._BlockScope(): - hidden_depth = 2 * code_depth - - # What is coming from the previous layer/iteration - # is going through a regular Conv2D layer as opposed to the binary codes - # of the current layer/iteration which are going through a masked - # convolution. - self._adaptation0 = blocks.RasterScanConv2D( - hidden_depth, [7, 7], [1, 1], 'SAME', - strict_order=True, - bias=blocks.Bias(0), act=tf.tanh) - self._adaptation1 = blocks.Conv2D( - hidden_depth, [3, 3], [1, 1], 'SAME', - bias=blocks.Bias(0), act=tf.tanh) - self._predictor = blocks.CompositionOperator([ - blocks.LineOperator( - blocks.RasterScanConv2DLSTM( - depth=hidden_depth, - filter_size=[1, 3], - hidden_filter_size=[1, 3], - strides=[1, 1], - padding='SAME')), - blocks.Conv2D(hidden_depth, [1, 1], [1, 1], 'SAME', - bias=blocks.Bias(0), act=tf.tanh), - blocks.Conv2D(code_depth, [1, 1], [1, 1], 'SAME', - bias=blocks.Bias(0), act=tf.tanh) - ]) - - def _Apply(self, x, s): - # Code estimation using both: - # - the state from the previous iteration/layer, - # - the binary codes that are before in raster scan order. - h = tf.concat(values=[self._adaptation0(x), self._adaptation1(s)], axis=3) - - estimated_codes = self._predictor(h) - - return estimated_codes - - -class LayerPrediction(blocks.BlockBase): - """Binary code prediction for one layer.""" - - def __init__(self, layer_count, code_depth, name=None): - super(LayerPrediction, self).__init__(name) - - self._layer_count = layer_count - - # No previous layer. - self._layer_state = None - self._current_layer = 0 - - with self._BlockScope(): - # Layers used to do the conditional code prediction. - self._brnn_predictors = [] - for _ in xrange(layer_count): - self._brnn_predictors.append(BrnnPredictor(code_depth)) - - # Layers used to generate the input of the LSTM operating on the - # iteration/depth domain. - hidden_depth = 2 * code_depth - self._state_blocks = [] - for _ in xrange(layer_count): - self._state_blocks.append(blocks.CompositionOperator([ - blocks.Conv2D( - hidden_depth, [3, 3], [1, 1], 'SAME', - bias=blocks.Bias(0), act=tf.tanh), - blocks.Conv2D( - code_depth, [3, 3], [1, 1], 'SAME', - bias=blocks.Bias(0), act=tf.tanh) - ])) - - # Memory of the RNN is equivalent to the size of 2 layers of binary - # codes. - hidden_depth = 2 * code_depth - self._layer_rnn = blocks.CompositionOperator([ - blocks.Conv2DLSTM( - depth=hidden_depth, - filter_size=[1, 1], - hidden_filter_size=[1, 1], - strides=[1, 1], - padding='SAME'), - blocks.Conv2D(hidden_depth, [1, 1], [1, 1], 'SAME', - bias=blocks.Bias(0), act=tf.tanh), - blocks.Conv2D(code_depth, [1, 1], [1, 1], 'SAME', - bias=blocks.Bias(0), act=tf.tanh) - ]) - - def _Apply(self, x): - assert self._current_layer < self._layer_count - - # Layer state is set to 0 when there is no previous iteration. - if self._layer_state is None: - self._layer_state = tf.zeros_like(x, dtype=tf.float32) - - # Code estimation using both: - # - the state from the previous iteration/layer, - # - the binary codes that are before in raster scan order. - estimated_codes = self._brnn_predictors[self._current_layer]( - x, self._layer_state) - - # Compute the updated layer state. - h = self._state_blocks[self._current_layer](x) - self._layer_state = self._layer_rnn(h) - self._current_layer += 1 - - return estimated_codes - - -class ProgressiveModel(entropy_coder_model.EntropyCoderModel): - """Progressive BRNN entropy coder model.""" - - def __init__(self): - super(ProgressiveModel, self).__init__() - - def Initialize(self, global_step, optimizer, config_string): - if config_string is None: - raise ValueError('The progressive model requires a configuration.') - config = json.loads(config_string) - if 'coded_layer_count' not in config: - config['coded_layer_count'] = 0 - - self._config = config - self._optimizer = optimizer - self._global_step = global_step - - def BuildGraph(self, input_codes): - """Build the graph corresponding to the progressive BRNN model.""" - layer_depth = self._config['layer_depth'] - layer_count = self._config['layer_count'] - - code_shape = input_codes.get_shape() - code_depth = code_shape[-1].value - if self._config['coded_layer_count'] > 0: - prefix_depth = self._config['coded_layer_count'] * layer_depth - if code_depth < prefix_depth: - raise ValueError('Invalid prefix depth: {} VS {}'.format( - prefix_depth, code_depth)) - input_codes = input_codes[:, :, :, :prefix_depth] - - code_shape = input_codes.get_shape() - code_depth = code_shape[-1].value - if code_depth % layer_depth != 0: - raise ValueError( - 'Code depth must be a multiple of the layer depth: {} vs {}'.format( - code_depth, layer_depth)) - code_layer_count = code_depth // layer_depth - if code_layer_count > layer_count: - raise ValueError('Input codes have too many layers: {}, max={}'.format( - code_layer_count, layer_count)) - - # Block used to estimate binary codes. - layer_prediction = LayerPrediction(layer_count, layer_depth) - - # Block used to compute code lengths. - code_length_block = blocks.CodeLength() - - # Loop over all the layers. - code_length = [] - code_layers = tf.split( - value=input_codes, num_or_size_splits=code_layer_count, axis=3) - for k in xrange(code_layer_count): - x = code_layers[k] - predicted_x = layer_prediction(x) - # Saturate the prediction to avoid infinite code length. - epsilon = 0.001 - predicted_x = tf.clip_by_value( - predicted_x, -1 + epsilon, +1 - epsilon) - code_length.append(code_length_block( - blocks.ConvertSignCodeToZeroOneCode(x), - blocks.ConvertSignCodeToZeroOneCode(predicted_x))) - tf.summary.scalar('code_length_layer_{:02d}'.format(k), code_length[-1]) - code_length = tf.stack(code_length) - self.loss = tf.reduce_mean(code_length) - tf.summary.scalar('loss', self.loss) - - # Loop over all the remaining layers just to make sure they are - # instantiated. Otherwise, loading model params could fail. - dummy_x = tf.zeros_like(code_layers[0]) - for _ in xrange(layer_count - code_layer_count): - dummy_predicted_x = layer_prediction(dummy_x) - - # Average bitrate over total_line_count. - self.average_code_length = tf.reduce_mean(code_length) - - if self._optimizer: - optim_op = self._optimizer.minimize(self.loss, - global_step=self._global_step) - block_updates = blocks.CreateBlockUpdates() - if block_updates: - with tf.get_default_graph().control_dependencies([optim_op]): - self.train_op = tf.group(*block_updates) - else: - self.train_op = optim_op - else: - self.train_op = None - - def GetConfigStringForUnitTest(self): - s = '{\n' - s += '"layer_depth": 1,\n' - s += '"layer_count": 8\n' - s += '}\n' - return s - - -@model_factory.RegisterEntropyCoderModel('progressive') -def CreateProgressiveModel(): - return ProgressiveModel() diff --git a/research/compression/image_encoder/README.md b/research/compression/image_encoder/README.md deleted file mode 100644 index a47da977a..000000000 --- a/research/compression/image_encoder/README.md +++ /dev/null @@ -1,105 +0,0 @@ -# Image Compression with Neural Networks - -This is a [TensorFlow](http://www.tensorflow.org/) model for compressing and -decompressing images using an already trained Residual GRU model as descibed -in [Full Resolution Image Compression with Recurrent Neural Networks](https://arxiv.org/abs/1608.05148). Please consult the paper for more details -on the architecture and compression results. - -This code will allow you to perform the lossy compression on an model -already trained on compression. This code doesn't not currently contain the -Entropy Coding portions of our paper. - - -## Prerequisites -The only software requirements for running the encoder and decoder is having -Tensorflow installed. You will also need to [download](http://download.tensorflow.org/models/compression_residual_gru-2016-08-23.tar.gz) -and extract the model residual_gru.pb. - -If you want to generate the perceptual similarity under MS-SSIM, you will also -need to [Install SciPy](https://www.scipy.org/install.html). - -## Encoding -The Residual GRU network is fully convolutional, but requires the images -height and width in pixels by a multiple of 32. There is an image in this folder -called example.png that is 768x1024 if one is needed for testing. We also -rely on TensorFlow's built in decoding ops, which support only PNG and JPEG at -time of release. - -To encode an image, simply run the following command: - -`python encoder.py --input_image=/your/image/here.png ---output_codes=output_codes.npz --iteration=15 ---model=/path/to/model/residual_gru.pb -` - -The iteration parameter specifies the lossy-quality to target for compression. -The quality can be [0-15], where 0 corresponds to a target of 1/8 (bits per -pixel) bpp and every increment results in an additional 1/8 bpp. - -| Iteration | BPP | Compression Ratio | -|---: |---: |---: | -|0 | 0.125 | 192:1| -|1 | 0.250 | 96:1| -|2 | 0.375 | 64:1| -|3 | 0.500 | 48:1| -|4 | 0.625 | 38.4:1| -|5 | 0.750 | 32:1| -|6 | 0.875 | 27.4:1| -|7 | 1.000 | 24:1| -|8 | 1.125 | 21.3:1| -|9 | 1.250 | 19.2:1| -|10 | 1.375 | 17.4:1| -|11 | 1.500 | 16:1| -|12 | 1.625 | 14.7:1| -|13 | 1.750 | 13.7:1| -|14 | 1.875 | 12.8:1| -|15 | 2.000 | 12:1| - -The output_codes file contains the numpy shape and a flattened, bit-packed -array of the codes. These can be inspected in python by using numpy.load(). - - -## Decoding -After generating codes for an image, the lossy reconstructions for that image -can be done as follows: - -`python decoder.py --input_codes=codes.npz --output_directory=/tmp/decoded/ ---model=residual_gru.pb` - -The output_directory will contain images decoded at each quality level. - - -## Comparing Similarity -One of our primary metrics for comparing how similar two images are -is MS-SSIM. - -To generate these metrics on your images you can run: -`python msssim.py --original_image=/path/to/your/image.png ---compared_image=/tmp/decoded/image_15.png` - - -## Results -CSV results containing the post-entropy bitrates and MS-SSIM over Kodak can -are available for reference. Each row of the CSV represents each of the Kodak -images in their dataset number (1-24). Each column of the CSV represents each -iteration of the model (1-16). - -[Post Entropy Bitrates](https://storage.googleapis.com/compression-ml/residual_gru_results/bitrate.csv) - -[MS-SSIM](https://storage.googleapis.com/compression-ml/residual_gru_results/msssim.csv) - - -## FAQ - -#### How do I train my own compression network? -We currently don't provide the code to build and train a compression -graph from scratch. - -#### I get an InvalidArgumentError: Incompatible shapes. -This is usually due to the fact that our network only supports images that are -both height and width divisible by 32 pixel. Try padding your images to 32 -pixel boundaries. - - -## Contact Info -Model repository maintained by Nick Johnston ([nmjohn](https://github.com/nmjohn)). diff --git a/research/compression/image_encoder/decoder.py b/research/compression/image_encoder/decoder.py deleted file mode 100644 index 75bc18cad..000000000 --- a/research/compression/image_encoder/decoder.py +++ /dev/null @@ -1,127 +0,0 @@ -#!/usr/bin/python -# -# Copyright 2016 The TensorFlow Authors. All Rights Reserved. -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -r"""Neural Network Image Compression Decoder. - -Decompress an image from the numpy's npz format generated by the encoder. - -Example usage: -python decoder.py --input_codes=output_codes.pkl --iteration=15 \ ---output_directory=/tmp/compression_output/ --model=residual_gru.pb -""" -import io -import os - -import numpy as np -import tensorflow as tf - -tf.flags.DEFINE_string('input_codes', None, 'Location of binary code file.') -tf.flags.DEFINE_integer('iteration', -1, 'The max quality level of ' - 'the images to output. Use -1 to infer from loaded ' - ' codes.') -tf.flags.DEFINE_string('output_directory', None, 'Directory to save decoded ' - 'images.') -tf.flags.DEFINE_string('model', None, 'Location of compression model.') - -FLAGS = tf.flags.FLAGS - - -def get_input_tensor_names(): - name_list = ['GruBinarizer/SignBinarizer/Sign:0'] - for i in range(1, 16): - name_list.append('GruBinarizer/SignBinarizer/Sign_{}:0'.format(i)) - return name_list - - -def get_output_tensor_names(): - return ['loop_{0:02d}/add:0'.format(i) for i in range(0, 16)] - - -def main(_): - if (FLAGS.input_codes is None or FLAGS.output_directory is None or - FLAGS.model is None): - print('\nUsage: python decoder.py --input_codes=output_codes.pkl ' - '--iteration=15 --output_directory=/tmp/compression_output/ ' - '--model=residual_gru.pb\n\n') - return - - if FLAGS.iteration < -1 or FLAGS.iteration > 15: - print('\n--iteration must be between 0 and 15 inclusive, or -1 to infer ' - 'from file.\n') - return - iteration = FLAGS.iteration - - if not tf.gfile.Exists(FLAGS.output_directory): - tf.gfile.MkDir(FLAGS.output_directory) - - if not tf.gfile.Exists(FLAGS.input_codes): - print('\nInput codes not found.\n') - return - - contents = '' - with tf.gfile.FastGFile(FLAGS.input_codes, 'rb') as code_file: - contents = code_file.read() - loaded_codes = np.load(io.BytesIO(contents)) - assert ['codes', 'shape'] not in loaded_codes.files - loaded_shape = loaded_codes['shape'] - loaded_array = loaded_codes['codes'] - - # Unpack and recover code shapes. - unpacked_codes = np.reshape(np.unpackbits(loaded_array) - [:np.prod(loaded_shape)], - loaded_shape) - - numpy_int_codes = np.split(unpacked_codes, len(unpacked_codes)) - if iteration == -1: - iteration = len(unpacked_codes) - 1 - # Convert back to float and recover scale. - numpy_codes = [np.squeeze(x.astype(np.float32), 0) * 2 - 1 for x in - numpy_int_codes] - - with tf.Graph().as_default() as graph: - # Load the inference model for decoding. - with tf.gfile.FastGFile(FLAGS.model, 'rb') as model_file: - graph_def = tf.GraphDef() - graph_def.ParseFromString(model_file.read()) - _ = tf.import_graph_def(graph_def, name='') - - # For encoding the tensors into PNGs. - input_image = tf.placeholder(tf.uint8) - encoded_image = tf.image.encode_png(input_image) - - input_tensors = [graph.get_tensor_by_name(name) for name in - get_input_tensor_names()][0:iteration+1] - outputs = [graph.get_tensor_by_name(name) for name in - get_output_tensor_names()][0:iteration+1] - - feed_dict = {key: value for (key, value) in zip(input_tensors, - numpy_codes)} - - with tf.Session(graph=graph) as sess: - results = sess.run(outputs, feed_dict=feed_dict) - - for index, result in enumerate(results): - img = np.uint8(np.clip(result + 0.5, 0, 255)) - img = img.squeeze() - png_img = sess.run(encoded_image, feed_dict={input_image: img}) - - with tf.gfile.FastGFile(os.path.join(FLAGS.output_directory, - 'image_{0:02d}.png'.format(index)), - 'w') as output_image: - output_image.write(png_img) - - -if __name__ == '__main__': - tf.app.run() diff --git a/research/compression/image_encoder/encoder.py b/research/compression/image_encoder/encoder.py deleted file mode 100644 index 27754bdae..000000000 --- a/research/compression/image_encoder/encoder.py +++ /dev/null @@ -1,105 +0,0 @@ -#!/usr/bin/python -# -# Copyright 2016 The TensorFlow Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -r"""Neural Network Image Compression Encoder. - -Compresses an image to a binarized numpy array. The image must be padded to a -multiple of 32 pixels in height and width. - -Example usage: -python encoder.py --input_image=/your/image/here.png \ ---output_codes=output_codes.pkl --iteration=15 --model=residual_gru.pb -""" -import io -import os - -import numpy as np -import tensorflow as tf - -tf.flags.DEFINE_string('input_image', None, 'Location of input image. We rely ' - 'on tf.image to decode the image, so only PNG and JPEG ' - 'formats are currently supported.') -tf.flags.DEFINE_integer('iteration', 15, 'Quality level for encoding image. ' - 'Must be between 0 and 15 inclusive.') -tf.flags.DEFINE_string('output_codes', None, 'File to save output encoding.') -tf.flags.DEFINE_string('model', None, 'Location of compression model.') - -FLAGS = tf.flags.FLAGS - - -def get_output_tensor_names(): - name_list = ['GruBinarizer/SignBinarizer/Sign:0'] - for i in range(1, 16): - name_list.append('GruBinarizer/SignBinarizer/Sign_{}:0'.format(i)) - return name_list - - -def main(_): - if (FLAGS.input_image is None or FLAGS.output_codes is None or - FLAGS.model is None): - print('\nUsage: python encoder.py --input_image=/your/image/here.png ' - '--output_codes=output_codes.pkl --iteration=15 ' - '--model=residual_gru.pb\n\n') - return - - if FLAGS.iteration < 0 or FLAGS.iteration > 15: - print('\n--iteration must be between 0 and 15 inclusive.\n') - return - - with tf.gfile.FastGFile(FLAGS.input_image, 'rb') as input_image: - input_image_str = input_image.read() - - with tf.Graph().as_default() as graph: - # Load the inference model for encoding. - with tf.gfile.FastGFile(FLAGS.model, 'rb') as model_file: - graph_def = tf.GraphDef() - graph_def.ParseFromString(model_file.read()) - _ = tf.import_graph_def(graph_def, name='') - - input_tensor = graph.get_tensor_by_name('Placeholder:0') - outputs = [graph.get_tensor_by_name(name) for name in - get_output_tensor_names()] - - input_image = tf.placeholder(tf.string) - _, ext = os.path.splitext(FLAGS.input_image) - if ext == '.png': - decoded_image = tf.image.decode_png(input_image, channels=3) - elif ext == '.jpeg' or ext == '.jpg': - decoded_image = tf.image.decode_jpeg(input_image, channels=3) - else: - assert False, 'Unsupported file format {}'.format(ext) - decoded_image = tf.expand_dims(decoded_image, 0) - - with tf.Session(graph=graph) as sess: - img_array = sess.run(decoded_image, feed_dict={input_image: - input_image_str}) - results = sess.run(outputs, feed_dict={input_tensor: img_array}) - - results = results[0:FLAGS.iteration + 1] - int_codes = np.asarray([x.astype(np.int8) for x in results]) - - # Convert int codes to binary. - int_codes = (int_codes + 1)//2 - export = np.packbits(int_codes.reshape(-1)) - - output = io.BytesIO() - np.savez_compressed(output, shape=int_codes.shape, codes=export) - with tf.gfile.FastGFile(FLAGS.output_codes, 'w') as code_file: - code_file.write(output.getvalue()) - - -if __name__ == '__main__': - tf.app.run() diff --git a/research/compression/image_encoder/example.png b/research/compression/image_encoder/example.png deleted file mode 100644 index d3409b01a557fe8c3058fad21ed969f8af28cb97..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 3155141 zcmb@P2b>*OdG_C#xxH-P)voHQyBizaG1$0cz{VI;0s$inTi}MVF)g$>B$NaK#G!>2 zLQPCUf(;JUKwL2HRkmccmA3ERy}i!&f6izvg#^C*@_kpjyLac#oH^$`Z+YHw&V1y; zmz|T(EX|l%{=9R~yvQuzW6V+seqFNR1^mptamyKJTzK9YXN+EZ-8FCCa^+^TcY5Br zx!a0-ckWBJTz!q-V|LZm*I#$hIWHQ0^=n=`n!L|^OIyN@F}rlr+pm4;E6%={hU~nv z&KP|=C+YKnSLHGD^w+~jo_pcw=z-(^qffn?uD$MhGw(vqe(q(P-@b{@H}Uz(8?V1s z$LIMx@TRxwvmevv>*ye#hxPgL=<^Zooapmu`h3|{S6{|C>hraiU3Hm0|BBCl@s1ld z^Er44pFjMLEt_xT^CNs-cjXONZQ*l`&jVL&zVz)7FsI|!Z{BnTpP$3$+;taUbOxWF zZu?JKk~v$ zH@)@J>nkoeU>{);KgXT6m zqxf<+XWr1g2aX-M_Q0`gzGik}8QOgEz_B-d&g`~7GaKG|;Mlsan04P|cI*8Ijb}*w za>e!6UwiV=N8fnkjYnqkY@ zP1js`%?;O$zWv%uH*Fq0@^_i|e|_OW-8!frM_jb|&6}^=d^NYa1TNch_2o?W)t7C# ze#^P&Ii9UFMVQPy++fCV%6TC6{7!Rx-nH{P@J_LB4^ zCdqr%s(lgP^O^76&;Rtp{{(;fZ{KqHs1BcT(Z!>iZn*9p`YdI`D{dLV^;R>j@AdxF`EN};{C|`g?G32p!cM=)0^-XyoMk6Ie)-k;cxJd z_D}Rr_s{n)_FwN`?!V1{w||rWSN>=HulfJz-|GLu|Be5szr&yM%Yg;iU?^A}92uM# zoDp0Qyf%1qa9!}e;G@CK!PkOYg4=^TgNK71!E{iMC1U-t(b$o(Q)1`FUK!gIdt2;% zv5&_-ANyA9*4VFN565=K=3||BKE6DDc>Lt}dGS}rFOT0C|8V@~_&4G|i2pkNXnZVQ zNhA_OiH(U966YpflXy$wJ&BJe{x0#o#Lp8CCUz&v$z*aQd3f^F7Hjs?@Qmvs154U7h++>gLpUQa?>Sl-ieSrhC%s(lt;&Us0 zxng?d(8|+RzGdZSR{nJ5#H#*PFIcr@)n``yY}MrG;OOb2SB`#m^jD+vtCz1ncl8ac zzqy<_eCx>f65y6(N}zPoPg`t^2niyLr*;Pt%v^I zp${D9A9mbfR~+_*!|p%aKm7Q^w;cX=hd+2k{D_l|xcZ2%9r5^)xg$?M@*PKh_sE?` z4ITB8qds)hZAZ->z3%AO9{txx-+6TBImbQc%IAFjIZqwacg%~A`Oq=9AG2`mVaIMd z_6x^8^4$D$&wlRvpZmk-7M^$b^EN;4OV4}axc=i_dfcBMcgJz<<4-vLy5ny-e&U36 zC%o~5FP`xDi32BIc;d%Tyz8X+NiRC-{U`nSr25InpM3qv-#>Zolp{}h>nYzkW&HUY zpTGI}Uw{7YQ`ek&>8W2mb;k=vU+~5keB}i@Uby;&m%i|8FWhz7+S4vO?Hi}c8~GFa7jOpMKf;mtFm`+b*yR&b#2#7i_z5daSt>Qy^lee|pU zer%YSs%AN|cAjlJ%;*M0bPkG+25>)-kMyZ?CUA7A;$ zKY2s<8!mmrzrHd4##g=ZJ8x`Vdcmb%yR`hK^WOBuH_dN4W7FSmn!44JW^yL#*yzq*fubAF)#+J`-DZJ&}w|x06)wf>o)^ENwyz*67 ze($Q(Rd2lNM_2b<{no30bY%@pkX+uYdaw zuO9$R-Fw3!H~iTRJKynwcYNU;wHq(F@n7HB`_8N1dGEUpd)G(aweQ_$zxyBF6MN65 z_uTQ`)$je_dw2fH>3{Oo_j&Jo)BEmt|JwK8^!~9Aob!QieK7OEw|?-qA9~J*KJ}r} zpT6o(Z@+29O&_>v_n)2fXW#vB*N3nB@Z*1e%AbGvFJgbOQ6rQ*Dw6*Z+)uwQ}6!N z?oYq;)3<(R^fMp*OzCgl@Hcng{JfjL{I{9Ez2R?neD)=u{o&`sQ}C;oIN-&hqbk;=BHLZ}{%SKfdlC zANZ#;|LKRf9C^!Ezc=)~kNva#^9}zz`7dw$m&d+;{`c?r*AxHsmVaCSZ(saD{|`QT zt9R?WZk@mFiraSl@HIbt@JBEH(H*y+a{CYd{iuKc=8sqZ_zOQ7{K+SNn)&HXKMjBO z?w^%^e(leve{uOQcHi-aJD&RGtAF{(uU`JE`+j}?ukZSgv;X6lcblh z^4&kW=frz%`_1vcx%J-T?)|}UkNfQp?mPazTkk*N{vSSY(gU|Y`1}Wd`p^p>`o+U% zKKvh#ocG9Y9(~!P4?gzF$DVlnb&v0O;<6{kpS<$P!q#_eZ9esZr{hn5bX(81o3}6D z{Rj^FWWe-&HGmE`}X*;<3FA_d*Xq~ zKbjnyx^}8L{o$G3nJ>;BGW&0Hr_J3v|LXa%!gYmk;p4@T;x|jrE8S7Pu)Mu;Ri#<| zXl}%fGO13`RKD2#%=lsso;Z$hQVqxE+UTUR#Yd;s4IEeP8vSh9T#+|B{3!E}I@xTf517-jPW>74#`c=mNrP|V78XkuRu(KQ z6s>*K8jCMI)`1&=q6ijgXp5iuiZ-H`zyG)K{qI7k=$uwJyKK)hPur7^J!bdc|B&t7H*XVD6)RO+xMpUVbjnt(T4tl8 zD{cLTb=KS0XZ-_%mdW%(qb}>T+7^1Ab?0-I;ca}5`l{dF{ysLW(;r+amGSM7w8H`;M0 zoM@|8ZLpz{jm*Px?zQ+5RrJ67Kf&wQu9-urFvS{)0ee;_mKHs%hH*Y&Cku#b_4Qx z@O9q0x>LqfSgs%1_}r@nA2nK5Yr>ngKKNAQ@vTyATCrS$RG~GSO=!vSfHAb1)`q_% z7ou;o-GaO^D9SrbSckrLknv8tZE^L>3+P9NK4zhR%$6?oX+7k-8rL*!X2!Rf*)lWS zVyi9(mTv;C-kgNe`2w~`eRABLot?^ z)j9>6@MdvP%v$uxx14oZ%9h!%WxQ_gt#JnQM4CfC#m9Bt`mLA7h|Y5DF@FAu@0&J` z>oa2$of(^+uUVlIvlcYTWwN%kd&B|)B5`O~ZWnB>K5MOd6Z%DCYeOsF5709SOD0pW zb%-2$&=XzAZGAs!E#{(Jt)pw=bb<32Q;fN__yqTduJo!_t8))zf$!_gQv$yB2o1Fz z?dfBa`KJ4hrhbctJMc~27K-$x1^vAw_lj92+eK^AZ_EYfH`@)Xl{AM?p%Yl61&`2X ztzK~BM~g!>?u1(Icum?s=Vj8C&x|m)A%7!t*Z_^w^RWlKV?ooBoG)rtYvSL!l}gM9 zEIc%bgPMV#Swt8{i*vGt_YTC?1v z#XZC;4eX5W(uNY~SKiXviowA>%q;h1oKCVY-Cf#J z|HN@g`UNN3f^Bzn&T>5IB{-UgvN8HsrYBfsTg_iNo3m+0n%Pk9cG+U^eB7(hmRgN- z)c|7&ZIJscgyT+L)lS{Z(T}t>JIpP1W-i=iCHTzeIL5^FWiaeT$27U`R{E9a9k*VB z1yIv*+CF>2_J&W>PS#d=&*OgR2<}z!;`9aH<$SotM#B-t8>erQmo)QJvaR0zR&Qx; zL#|FRF8G2uou5luWp>a~v&*cqvuvfY8F->;^YcZkq60&CP6PIkYo|;2TlUYBT%iwQ zG4vdIC5DdB+~~Us@-kmwPO6daR38K6EFR~)IQ$NyiZwiRplA{3z5n`H94_Iz7;UCG zUu`DS$OPxe3UFuFpAJGnv)FG&j6AE)jRxG*YB{@>OlK{f?Xe&!+2VfK9G&aYZqOaK zdT+(r-FvK&*hzanvZnd#vP{s${j!W>in%ISv08%v11l`ltX^h}u@<^>!MeM^M#xvK z7PEG(AAON!9t&1$Pjmk|GKUmb#jk{!=|eJw9f+@h>gb|!85!PZv-4GCuz}9#vRYC4 zyJ4B$(9-#ib)`M*3i=0iQJ7zF?IFgfjs(YsxnIlKiZHf-9YL&v;JP!4t7EeMc{qunRu&)>XH3Z-MhX`sgtq=mIZgeO&|S127VHt5Di& zsTg*T`=-)8jJai{3c;(w6#Ry4G?BnY2EE6;F=t6^YYaLYauI_UQkgh5Gh@|y6`CPe zoYyWSEEUA9xG)ap+GkVa6U-$_4jKjYHMwLZ^VMx_=As3S+sI%x;ae6vQ7%`kUa8yc z0zq_SIT1%Y#XImRS_9rr_hKst+u*)2__}RdAHar8A(_yo0i91jewn@GMQ^r4R-R%3 z*Uo_vYqgRUrY4z(Dr1Iks_2kr-N|Y?g?@wArGLb?E#_Z%LOLKsMl^?x)8Th9c&CHy z#D2^d>yCa6&Tq5`!t-@-#i3_^U@*DPR^$)0CA~~$IughLf&oqGWDPa2%M$Ysn_nw4 zu{Z>1J%W2qD0E{_#yo?7A(-Jx0QQ>o*4c23dCLcxY)oA7aSO(F)0cn=ho}$~4Q$N^ zcbd1;w_y9M)knK5mX#6LOFtuFb$_}aG6Z@>XXvl!lRF40qASAAZhQE*x7nUO<5sVu zyinj$5)Hs}8Y2yj_!|>KzeAK*5bLpA7KM$mYXH9M5T~X$6Watx=^iq^-90&sWCGCJ z1qko8e6EYXB9Ji;a5`g|0r~>s)jW!} z{5)_k{k3?H#oy8R-D&&Jjz0f)KmX9Vi}ciWWh+no*2W&V&z{*;v8VO|{^zPTiIS|N zq{_twlcEaPLu z5dTpEOPGwh$4O$ zcr1|l089g>>T;ZrH-Q6&4v%5{&;x}!3c0%A#^Q`9GAdvz@FGAW<0QfXlhK(eXaLx8 z4M_JuY3ST;L36qlFi5G04m=b&NL&MGk$&v7F_K&#_vKvST*E6Zl)M1AG#1S>PZ3ZG zK_hYar;d>j6!qzAG?N-Tv=<=hFt6wyeecn~20Yfp=m?l;P6ezy0ULgIFr)%#NdQg) z<0d|hF%H=l>3KMnzR6w4&<8mL%+Y?|JB?1u=X%kx=<^sljces-Xs+8h@FCuaBuhH1 z13e_mhxVOqaln#3_XNXMXl=6w&Vc4cj->7_S(f$?{7!NI6z$2;(`<4l=A&+9uWk*% zzns@9^U{PL;A#LOeFfOI>9gj);g_HhccWkGPtsq)-_o1#GVPUX=j+z;Td zQ9D>H5GH%b4C`Mb#F*+g_OXf^QUTWE?pq505Q7(G-1Uvlf!g%7;_h2yNaLMx&Mx9; z7MWW)x&RyKS!Iqx9L75OH&2$Y$+!$ag45{DWFH!+oZU9V_`2%LXHiK@^AgFr0ND_qX&IB zGIC<{PaM@_Q^-Q6gKi1B=?{EPyCER6C%>Be0}e~fR*g6JklYKj*Bf<*17dvEOZJ#S zInW*h3kV}k& zg~PDds?bq(M6xbVrw!f15M9>4{PzN9%ggGk~IJZh0(5xXqh=UzVz=uGx7JQ)q zgaS3f8!doe3=rOeKZI{OAr2_`BSi!t2R?}<&>QHJ8i2E>8)qh$c8-A@<}$%7M?~n< z#dityyNTZQ)2{6l_e^2u;>eA!AV;!>eTuM`f|v1l(y};4nT%u%nnBkZfS9kWjC0he zDKG$NM`ke#s9xrU&+2!Ct)LY;E{2m|FB2rHlx=o)!fKV0o39GLQ3H$#x#?g(8A}^| z>L#Bra%X1sG_W7a4ZaVqAm zIzMY(10Ayfk0O8NLV*BjiNFr?Tb6xS@e1SDAkaara?2{bixoil824QuV8Fasjq$+? zY3#@P4Tsv&zEN~&hUt~9hR1NW$|V9dcyJ~7NODvvfz6x~9*+V>45F?%=Pj`k|3>l! zzwn*3u6SR3q|f4O1*{dVX~8cz0GEAWXi!mq03FOIMpx7&kPwAMf|5HdC_W1KM~O*5 zXjIV|2fLgQ?-wGNs5OWIUbDvR$mdzGb``fnAZ*aRMeIEE{O=ZFCmSeu@j!e}!_C@RAzy0j6Pfx!Xm?d=8d^jJ@C zKfnc(1i{6eoW{hUSojx6>4*!S?Qhgu9TGFkh^lQFNEdB@(XtrNT=y`bFOLyTAq*9F zUnxLk1gbV#o`nK)!{k>6lT*HJtAKR?itY~`Y8ps*dMWjUNWjWrYm$z zK#^#0Z5$;;)M9?ZrY0wdS`dv)ud}W`;;bV}V9-3uRURe--v83|`WGG3SoNLSR=avj zmFoM&zd3~QFk)QMOV{W-y<{YGJ)ovJ2lxk+$e3U}5@cPxI8hT6pw5fh)U^?P`S9Po zqA?!$LihPY=j+Vh_kDCcL}`}y-ECu!K5E{Ti_*@Ya zfw(Zhah3st|1{PhfieRurE-{2qEidyc_(|_1KrlUq#Hf}+(Dv5Mvf~{jC71FK)_rR zyUXSQ_K}z0HFj<@A(ND0;(L{}PeXmKtYZ5+p%b1c&+VZo{v{My91 zYPje!{D4xBQjCU=Jd2-YYz`uaFTV9qY@F9@D{76?f|5@s09P@96v|0a<3xhvz_3~! zqk~bFac$Ec6H4<$bh2O(Fi(C@eCI*8;!~LakBvUz@3gbRC zc+~*P34n41poGM5o*<#3-;xtiS+Gt65-gYg5tpXi=ZI&;m-@D93$!5sr%14Xkg_&) zqT40(UK1lyAxjg6$_&V9XD;c6GNb~B0Y=Hkp>hU>KFAQ)i26CchkgS6iqt6M0xR>q zqTtYjF~}eZ7rATa|+t@nTCCUk6w-!xOdng z!$BlWQMKr}vOG9eMD0DvF8tSIeA2nj`H|sKr!o3YM>58t+&a#|5#hXI^D&&PoqQ() z3N2|@^PmWEx7TOA-T;PQxtmCnXF~4T2OQ<#(Lr0#6)^1AfIj%p~26 zyWeuO#eWHZ1><2Hv?HV129$;MJhW*bCx9A)EM>CP((4`Osn{-B6I;+C6F1*3Ta|u0 z2Zz6zamQ)OmQ?*ZZYGA@wJacsC1iq1{F^1BKUH^@qq05io>6&n;- z9zYdxzcyvLx|Z2y4DcQ@B$uKANH60;C`%xRv5m8>0FmZSqTC`yCfHx1-?i8ja|G`( z$Mo2y$zm0%N9_B?NoG+k0O7p_-zFDY_Dpr!IVST&`z1LB{g^TC*tC*YhI%+NIQ(Va zW1W4rK(sXuoq8pQ$dzcU=wk~yHkofl9j%EjD!~I_GMB|tv%CZ3!UZAV$&Q$pxs_~a zUg9{cat1RxHuk1h!w0;o4PKN2e_2x6@=3<*>V>IftCu+c*vfz5bh+#q0tD8>t4#`hIv<@MTAg>Ph71ag4&GZ3^rWwMEVzsmgbOW_yoNW z0B92dm;>lAvR>p}tR*bvvRr=4X)#)rWTeU3NUWdB-i_);`>st{eod?*8<;6)OE z3Pqt8vWC-8E?~3Z!LGgm%jYwWULrz+>IKofhitRJa-87Pd|?K=MCC~LDnPy*|1Rjy z5QyR{xFbBFjJ2~BfZzbyNKZytPGt%Ekuidu@MWVuO%MZq$3`W=Y^nGl0o5M(B?_j+ zf(*SRBZA#^`VadeXEF#FzYst6rCb63Efj6f*n~}tlL;sn3A$;FMBkGUE=w`y4th?S zsDq4)i8SX;+D!?|#bMG`%4X-LjLIEoPoPQa4LzD7$eSbR+r+u8GLH*Wvo^PHn#@gu zs6Ll1RINBeAZY>zd}_hw`P@Or2aH|#FqOa|Kt3H0bL%>R+>m>P$f}QymHrjR^l{1# z+jzJQ53a@L;}pV2C8`M;*pvCW8Cxh2s=^Uya;}0xP53@R=2=+O$M)3uTMj_LJrnd< znWGf*74frpTLBqJ2F(;0;tOYEg*m1emiZ! zUeY)X+LAGW8Dm>vy5#LL)ooeN4Z6s_DTM# z4M!V(M!%zRMu7d`ZzUv1Fwv(Lz`IIxs*Dg7h-!;zGnutExNHa^$TRQKCTd5&54@t5 zHCEko@uj1S$8>!09ep2_{|L10pTpnnFHx>e6A)G-!UwZTarvns&eHgz7qiZXgK4|? zjehI?QO^!O8hu_ozWDp#e*f3A4?dzZy>QxQp7^EhduH4o+dgS~W>NB#juqgUIRrIw z0suh>Yy+h)V4<$IP+nCmqAEs`q$5=h6QevR!DsQ~(kxLq&QGayZ75-V10>v0Vv5`< zg_>q=(Q9OWn%08@Sgr#)UE~BRAd5Z*s@1Nb6acvTOk^ZZ_Mk(#kwB}WLn*RZ&e>P= zntuZOtro|SgG8J%3i!7}R;&q_Z3*bYa{_Ss7v~z?M}|@|rzztIfZ1%42uhRkq~T#A zhfIldNuIL6EPch(2k4}1ZVtJEZ{-ju(qBiZW-@?IG9P{nFpnJc0uUppCrT4>f(L!a zcQ`5Voo=l_1g3|Sdy)bkqA^ajw`|UvwltT8q?!{7I=J%%^VF93M4te?j&By{p2M6c zaHsZV6vbjPpaQTOivY5mDwRIAFueLs<940lDrHInvKnoi1ih4bsi7|gB-9);2+ibl zMEXZKKuSCYok>bt9pDwG&sZrg2J(f(oH2ef{*9JW{t?I&-P}Agew7olM~fl(A)m12)G*A>R0wp8-e!~hGl_{vZM#FXV#uk}y=1uPv6 zp%HNv+R!*K+*nDEe#CLi=nQ;XfmncY0EiI@6~L1YQG`>(kg-yKo#P}Xf?nHa9&zW$ zNh8{1d3Zb)Pb2Tl+kR(2RE=h@34idyhb ziA=#Z8-rZCI34~Xn9>rEtS$v$R(3i4dG9n7H&~6M&)?obIFMNx1U!g*|0abF46;)OC z!6UUpSF8cTR{Ln3n)(rpGET9#yPD~Di{ashXFw#?1xH_<5{?@r)u%skWEhR#g06~HPcRZ zl|@n+oJr)|Fe2*FOkX%#0E8xoSWYBz|2r;3cZz*x{2U;<^rDmX87bYz)Q7nW|p zP4i^Mr3=6;(8q^2QqZp{Cx~o>FqP=vp)9L_T<5arC6$1oCpu)*V-;lwnK#OAd-8dk znV3aq!ol!P5l2ouQY|SHI8CK{KmE%%y_SYvA$nawn8G|XgJ;t4i%&*A2F=^DF%B|I$oCxLjQp}4SAknXm^QU{u#$|B?+BK?2^gVcTzqLH6k$`d!5jz5UI_cn-zTw5M5as{Gp{o^ty474Gy7=tClrewH_@V|1#?1z-=sM^|!AI^RL!_UYgckK)^1xKy z(nS^^K`E5ef}GK)Qb!5xpV_tzC!qykl-Dc6j&};MRIQ{Ula&e)-s-$t>!9+XqyQx~ z+A;vqYd@X#|D`@eelc_6`UO(~{u@nz29cd2Yc#wLX}$z&Lx$Jc;L?>YDyEEz`W>|v z{S?&%44)mN7|~7Uvp3@_fdl=mYeWy|91_v@)JOdt%|+CrK69*1HiA-`8sNX_m>%Os z#6gifogeW7ANOCYPyh3m?&LbO|B{10>yZBb-DACr)?W6+0Lnk z&6KM)LppH2AO}jdkcz%5!mj>Plt6~b1MDdifWSE6RV`tRT20oILK1;$jf!X!%C~z- z&ek7=F&;@+|B{p~Av56WU^z97#m#1kETByLyL@;NWgU}q4iJGALu%`F4*@$4Ap^KfAI#Ks7M0yBLFLq6GKrezO*KeUVFM!?=htwkb>Z)-X~P5yv)_&l>=NQK6P^E zTG1T@lJuqy9jJ!ePi-}Z2WLr&R*@1JM%7=cKDb5XH%85t0thHBeseW6Zm{m7O(ds8 z^u5_i5)IS(B9yhGdoNA_fDMBw&>#auf{zi9#s;L-p}WTEqMk&`LL#UFJ}Fy{9!56- zWK=2`WoiI=01(ZEqXp-61Qh6x^piQKLF)kI2*@bNapE)IE3%qE#|T_Wf9a)lJ$14j zi|2|u$^^wIRa2B!afqliEI{Aja6l@7JxZq3 zCFZQH2p{8=tx#!)N}ps%Ap*6xL!6tXPZ%|LN8mMs!>V#3S8m2UN-APpLp3^|l$J+t zx@expA!t0wH|^aR7|Kvt9zn}|=;Z)HeI zj3rctD#;uI=Hjl@Omvex;i$GV@PkJ%NM$&jtCA3!XwxTp>h3Ap;Hx=zBSpC)*2igM zmF3jPTFh~;0+GWyS+cg1O#w0SJaPfB%O{rs`VuzC7!vfnjZCpTpH^kS;VEwkpS3c9 zG6CdHHOE=XQpMlO2#YQdkp5z~D6NVC)a10q;IS0;Eo2Qu%3n`SHNb)r6t1ETf(pI9 zEeCKaHzFR50~RX$E24+%@HuP^m{z1+l0(@X`omAa!<^UFghRI`fjE`8)$0J|68EQG zJY;ZM@6-Ues$4CSL&>J>D!?6`BK~yigaopgZw3ZW5uxT<@sE=?__q$=(Hf@)Z}m|_ zb(sWC5Dmq6I<9qfYNJM{H5!2?m)S&zxl8~+T=x+;iuw^b!7f0^b?T#dR`+$htYh?3 z*#XE;Oiw^pGJo*2M>aJ}d6U-ag_0LaIop)f#eAv<6orR|#@dJmrD~v2um(B< z9jeSq#{~@pPKi{L4U?0pl2X}4t#b-}z&ZUC-CWiLddMl#_!aP|7KzBiYjUccb4f5l z>$3u3Gw7_cK=G2CFW3Q^$<1U=i^9ER0ni=A0vTaSZvv`9R7TXKy(*c&ctCc7b%!Y09>bV?zF}sgA<^>M(Z7-tW5yz(|~wH5x$5yETbTCsBVnlEKS_{`7VZl)wnK)`3XSP|eXP&x1rLO2Ck;mpUyDfHC!E71; zD5ew`LS`h&emAKXYO5c6{At_qG#+eoyDd5DNCvJ*o?fEj@4V=L$AwTzycJ!Dq1?6h!NZ$nFf^`r8j#g(g9Ah;Fpwn{ zX4DZDrD?@G5h$$#{=2M?bpctopy!~}pf|_l5WRu1<@u`3i2PCCrQ~%?lNvVY6AB~- z=*noZrxr2sjUpSx=_l=}t@qg8y#>yMNtvu9ph+bjT3l6CI><#3JUm~7k$UC?G|p)u zU!Dkrqnl_Zl%^gWcsb<*%^k?%aK}XgM6W!IJWS+vz`7`PO5jjvb|MWYEfNV|Fp%~(ecIi@3x?Gh%+xdZc|U(Yg?aUvH4_^$bZcy z3kv{&3LgI~kqeAEr6hG0cDlZ!)C9l~NoO1cinWMS*@UPDSBu0H zk&)8OxOVXB;Z1L2Pxb<{s1^W)CZmpGzyZ`;#Bl)RB&fbU$*Rv1^^M{YMIWR<#l4zX zfi%6Gq2s7?I;hQ-Lm}{`s9jgir=~PZUjcO($0lGJU!3*;(E#3D4&J8*Rd#i7Shb>* zHQ|^KlKsk2Q>_K60{wMw5)qYrh>s#E(%r?&u17Fqhc{3Pb&e5K1dWJtfU-p$b6h;9 z`4!)5J^`jEw~~jh0r=N(0vZ6VnUm3GhVubls_J`x>=r{}G(^@M6vm}_q8Vf6- zo+}ljNhs8ze5+*B%^3$$>y+Op#^2DmpowHIL3;ruRidBn69p&aFcxf~Im_7j0wB?7 zv+NdNtu+{J9GV7o%5J?0fKB#EWj4}d(!l}|0u{c>b)b>5J=41p%C&Hx@Eu`EEWvM`ZSLaqG}Mr^Ter;U@LwKId1ohE}496ZBS z05RR0(Yvf@xD-BR2pkL0txX&S?hJ6xJ1+MeFZt*tu7F3CSy63vhbW`wI8qKMxvywIiHeMmi5S2zLcqz$XZfh)bo}t`~avaa^g8wP5OTk-N<`}tGMq9x_ zw;m0etJGB`jS8652sBZX#jz-8%f-+gzC~UGbfI*m7Td~B3($l3%&Wt*v?E7Lj$#K# zqCjQrK)| zK)b+}*)+}sdPKZfD3D=6CK?0;@Vmqf;KH^YcF3TuTzw2x0LuyfqH7dXVt!TP)fA1cc=`*(O z6>mdv4Lf0jwESLt0fO)>AA9us_AlT01^`V&>4I^X1e7XB7J!jqA#aYt_S^#~|vJ%ZXtO1(k$Ryx$y9a3V^5#ZpLzlqXJ2)!2B%mK`HY=6?0 zkPYbW?XffqIuxl6N$WRQSuO?-Xtz!JEQll6;znpJ4_WsQ$x75H9Z}hlqH#&G3$E6e zGj(2z=>Ej`n2l4Hk1j)rD;gI?A^Q84xM%>U+4wX76ac5Xw8tKuV0HdB7MWmZ5cp-w zWxqOrFDi?pIujbY&(SqbGrDgMT;tjix4W0xi&{Q# zMr1&vYdKF2muT#mM;GZ{Ql`3s&(zD-7AWo@VpuN2pn%gR;Cf(ih;-+QgZitn=%wp* z&&B)d*#Go(a2vYj_qCyW=v+#qrhjWw4?k>M_f&1?Oxwn$Rr-TRk3bc02mYhoHMZXNMn2`PWitgCW+!?+$6l zB}-FQo~Le-Xl;KlHT1mzXh6H}w}QxZBTI`J4~15#GdP)ax;N+mvv>SLK*7Hm}( zI0t3cBBG;GGDRB|i58&N0;Z6KRf+~H$|)l&5@|^K;HHqjr326@L?r;SF@azyN+R^i5Za;kpXAr_2ty)5gmYg;cdX!E#5IZ~|)OB={`Uqxuy` z175>uY1b3YgMfWVgkN%`#m)lp2{v_h0T2M4YK`MK`U13djH6r-rPu{7eL-D`FX7fD&XxLZWFh_!@01hBMa$;4ya^pVuLSCLQF! z6qds|Xn+oWi-QHQbRM%wU_tet{0%>-hg$#9Q6PlA%V-2zdw}w9G0&opTZqlX#)$4} z0b>he@8e;rv`6ItTF2n58>#%%t{HhS>ciaU*rc58i=_wqcidcF9>kyJbfpJK-ATy0$N)DR-%#U zxGqr^o)g?$sU69V01m2wd4y+}&yaZ)*pqCFPtyv#0ie^AxiT8=Nq;rznn%ekx2dX%b_6`D*z5!mU@hL%VdoHX^N`58vZR@2w2->(5lp3za&p>ov^iPjw1FoZ2>)}9 z+jUEL0~u7Ux5^FWM5<&@>kJw=QxzO8IakUMsFX*MZ_Tl4=qsfK<_WsOJ53zCIzlG> z)89u)DN*RO%J0~TQS#c=)k~(U*XCKE?2)mFBc}l+O0HJRt#64O3Pe4kJ&vsOhO$VS zgA!#s@fZ##;89t@`B@e`kL|Sat$=DW7sG`C8(2yZlD-Tt9b$|sUxiK$1v$l+(kqY= znG!G+Jp`V$16~3LRlv5RQX=%fDj_^?BcPeGH0bOlXDuJFMi4&J;`18LQIkp4Mi6au zr}U-dP+5hz)&M}e7Fo7JolF?|L;BN@_X@k0$&pp5QU)hmavrCjBr+UL7m!`euM3DE zsA-JM}mliN4A8Q69wxFIln!;w1$zQpk)P z&KTqF;D{*8=97&Lk@qrqrwG6GF-KK`pvvR~I22lck{}SrkZF|{*T(o;rB`xFg&p-0 zuebmm#FbMZ?iUtwwnntm>Tuq27LuIXA@C;0T5C_WR#pyCETy`xaGwR`CjjfzJHcG4 z6_c(L@fe4jKw5SH*7b@TdVpM8v(mgHj<)1+N3h*L^~AG8uK}eA>b~MA*d~+U-M`I( ziLBKVFh9pSH0)Nq$BBo;*_!joyKVb3du&5uJ5nHn2xh%?HF&EolwL05uiY*V_;w$)c_t>}%FWE*J zPp>U!LgN%_*Xjr?-dY6!8z<^hD{n)Qz}U3d!m|(PmyEtgc7mDVGYp78#4r|Gj4ME= zNa`G0LX}D=h>$3F9PheADy*YAYm6NRZ-A{yB``fG)&}J#^6)B^tlf3zR?cC&DwLY) z76%8Ube6gi{pTO*k;Gf-lSy?iY1Qax^i!;)Ptmyt&eKu%1!GveJ~{^5x$~to-Hs>E zwyhMk%6}yIx}V|B`?|ywgTl$}EN%S-Hk~h7a9;wyx3j|G!=umtKFU zP4$t;;l6up=e_sZ_Pq_;Ig8mXvlvn3J}7M|BxPml@VUk%reBe}8bGQ;bVSCE z=|iYx3~&sHxFxBB$7VVq79h$H1s@p(&=Ljh?P75(TLcy`#`4sCwpTMsT?Pl2;OOeli{bf8R-3=;66-F-r`E}^!U1!O3is&W&jL*=9}e#IdK z!c=A{;7)Hihg=Kg49Ua*O=%657VzOf@U5H)X_A2RL!#0P0Es$u13_Dhdh#wxz_8+O zRyCPH8pjnUAjg7uZ9pIg0Sb}@pp3;)J#I5(n~UfQZP_OvDBmQZh%2oCpR9zwoFm#F zQb+C<`!a@DQuCmUg#aWD0p%H$GEq6&ic`;TL@%pESOqe)9j+WY2UO|10BBTyt1>X5 z2FmXkAC!Rim3gJy1UsWSRD@Xfkt_Oj!_N!qm;bLV$y)|9wLKDG7Gb% zahojd228bWdSRanT-ADexqAf{HqsfAc0b$&PvacX{}k^U0J{p9tHTEkvIt7~xA_fW zcw>Kw<~BtqWMzJV3<%{x%h^J>f|KEqu}TB-owJP2NqK8*_skv^HTJUTS9RJA>%$}t zNuFs^j;%lmCCcbO_?`%AwJ>ONI04zT%KRv!g-;xxQOefLlc8pTXNo{zc9!fJbEO5G zuBrf>gI4#AR-JFLRD90U&1k0nC(tXv^AwO(GQSmF{e4$1!YXaRD2own{rJ-$%e@Dd7(? z%+d*3qtHjGS0a`Llc8fe@EyFBB!Jt59xCrJ3#%+uM|3?87=r0Ek z-VOl%**qYbx$<$;+N?im*S3=-V4Y1y83ZaJSo;u?osu)6K$fy9TAq zI4k*Fk1I1$W-LRS!l1^MrCM7hc}xI?>$RLs&ngqZ<}36-HT#PClmE8n41k~Yaov4n zU9!ueQ;&=Mr!Xo+0oXI|WgWRHD2{ zn}fgvR-46%QEA;Izf~p&_;+hT(Y*-;t{I#7Nw&f{G1wsSz5;r3I{DJ+uM|2Xfz6d} zj!HP0JK1LXBI$6Zi&m8F6J}I-mE=OUDTPf=wkNM$W){@~3^88xRhd#JWwm3S3VRBN zJLw?xmv|^*M%DgvJwz09VhGehnnZq>Xl^RuTQ+FzHR~-n=8))l09>QaC#WsA4AIHG zW9HvKMmbo&&9lHoX=d$g1DCPApB8NGwr%&{X*>2(eghyW3)&zk;1YKJh*@zmu*omq ziXw{Jx>H}zfV-Kr3X5VNw_p71pY8tpo?vBrFUDU00S07Yi1a~EAKPbAh9O`n&#_)c z*)!P!Nn>MUnin^gB>*cCT7Ru%gh^6p#uij=;5r&xVJp`i3j?s>8%o`y&)Vm;MtQ@& z@d~9#PXW%Bp=h+uV1V&3Sr{O z1F5phlmM!>`*T|mD&m16aK;z@Rb)}Ksr0#`SyFuHA&e(vVg3ZmwfHgsOpCwb7=~rb zyD?IV?$;pY(k|qNYZK{Rws(Bm)o5!iL0qLXZkzxN=+}c)6<1;w z+*IhV=tX}pu8Qz>?dPRv)>TTQH-BMVRq1Ig5?16i5q8s0K~lB=B_hnsaW!)!;-iD= z1U(uUEje3mfV7jvS!)t0S7fLIz;Y!(=oT(;%CC;>rUIoZ9CyHXLl%PAlM5V4|!T@(|fGPcMa=l7ZWXzS$ zRwPdJk)xqnV)3c6vZ@)D+_>lvBI9Ty0luPkhUlS;h3^wN zPR`Q~0G4x0=#+#g20Owv?)R>K1{0gOl{#9b=ouNq{pVvXFyXu&H`G17@cqtS!#PQ!hjrRTMP0S6`!S*^jr3Yayb^SdfhCL#n4r1 z)b0|I>+`6g#&FMudu^(=gW!rlpBB%fSDDkaJcbm;pFt}-r`6A#TLDSVaTxR%%mL?jnk8to>sibEysRJEaTQpb$XP z+ck<4#j<7`7VWU2#m)uV&f_Sy0c{?c24eoaHMc|)**v4s&f9dlK%^8<$tDKMz-jwh z@fnv&TcOj#E?kL*EP=<9Ti05|_+=Ow?wCTd&q zCSB-2J3J;{Q;W)C@}G0i9>|7D=M)j=E5;}uqAePz;<$J`v{V3=gYZ5V(XOfmlpWBf z4g#*)rcsM5+bBTwPv5!cWjIRf9_RuAHDonD2c6KN>-GOLy+x-p6w*xfz6K zv~U`yhwJ1_Dr#Rx20}S5bp@!%2zT-93CVrH*0n5#ot>K?g9t$N4uPJ`A;${a7G~6mg%7bhy|uy$EOOCycq%n;55&o6i?cnp^{)T0DIS(Ii=verv=L72 zAfUaJ3q(0)8DM=UwPRnr#ZK7qlqJYE%oS!?KfuGY_QFuq{nKlNnCKGk1V%{v+G=Dq zl!10tUO1k zLVp)bGiFb3pK>*aPd;5o5T3Bnl`31Q@%#V)It(h{t;wztZS~R^`gsJs1DKp=8#cD$ zQY3i|LM+BtBu?200XC20TA-;l0$ttOxnh{-1SkzgPtc(WvNkEUJ59p}wOo%RGmyVvH~j&%W#zJ#Kx0{(X(kO}7N z`R8A3%hsO4wTo@JrVjd{<7)r+eSgrY|K(%UuZR}Wmy07fFnV~a`Hamzb*JszI&M$w zDA~RR_8cHhsoe<5)H13@woOE^!Z8_rZHuR9nQFEJBK<)M1%>eHCFs&(F|cUn_C8kX zQuVh)|5bjVI^Te@lOFz-GpO2Fiq#zOQa`l_tY?6otWfycAxL0U4=X7)*)oihHV0M_ zYVnB5BU)_9D}be_oYKq)FXMrqQTj^dYYSXOn+5&_h6Ny1`XPibC&1ALZP*r4Qxr!E ziS||aokYG={v#fYOXi5^FK2t-Opb_7)?>{~4-as`;Al}VQQ76*Iv|$DgY(NZ(_{c+ zL=7uNMUfQ=BccLm(Bd-}jc5G(*~GzmmPm3nky}b1m<8IY@O6cw%ju`Ut0Xz%Nx2A^ zLg2>|zGK+vE;Ys@TtqQSGtfaQ%V78@0>;d>N)M}liX1`<(LkTwqmYQ!N-@_MhrVv0 zAC-HM1E|Q0WY-x?daCXEL?QuX$(R(ev+XEaJtU|&jeO(W=mBBc=?QVC*BB<8Fg4zf z0jl$$p9Uph+Rt9wL@HuguK+v&uPP=GN`M&zx)TeFt{oQEEt#k95*UxhVW;V3#64ry-3 zXrhlC+ma1n=MJUv+l*N@t;|{~2P{|2Dg+sUFnWq^C*=M54=>(aF+$tiND>+Z=DF)nw(FV!VwAqoxW1c%iJxgQ{m0{`z z;pN$NMfzgj6hQ!u84wrNb2jf}M;(yvL+h#RrNi2S1{-dy#BQ-kNR^$Ov`82Rae9TG z!vE^Mu2NP7uq@i6166+9fo~Kw))@lW9(=Ba^>S?W5`dBepjvl5z)bQi&?C7}=3n#@ zJ?emctwj;|(V}U-aPqHyD7(kRpcj+{6aYuY_^0`lZ0MiXR%q>H0$OW}UBrtyLQZH$ z_ZNmxsTxS#Is|MI6N3a7cuowZH&}N@hC>XmN}nj}FGn`T8USf8Ri|r1ks35sW-${C zKqu+c3j7?iUBzuYNK1=C=_dMd$uJM;1mH_=tCT2CpA{(TBKoZWg@S1dENbm0By3;Z`uHk zl(Ju0;vPdw(TO}bRL`hLCo^tik*=OR*h|?$IYR8vMD)8{Ap1n;<#6Qn>>JEtLLl93 zhl*Z?|L04TGgI=qd?}?jIOhF>%P}|1HP2mO!5of&=39CU-9W!IH(DgDF;A9}9q6V# z_d7Ur?l~u5u#m9?Y)aE%r!58A;^0G#pA+5nH6Bjpc3;DZ&*cI1%xV9MVcWNR!r?I_ z2wXxr_$=k*RJnu%W~HmeFWQKums0R7C2vD(3Ak!kGy2x@d8X17CBievx$FzsP;H-F zWY@rycB%vyG{B)X?7heM+zxT@Y=W}Axfue?#eFWjpzXu;oPv;RVk}Hn=DbGMuSHpy zf&-DwLt^NU<{pWF7v(_7sgzQde8C>P#ba`A5q-CHX@ydxA|=cP`lRPj#o9dN05QN* zLU3r179D|HL<^!wDJtD1qlfN-m?*pD%UKM((QNgwJC1~yKSL^cY`Zn@=AmNSRLi=` zD%4U1q|zm*sx6JB_u5Q#mp%E=12$ej5ui{TOkM*bC^9gsG$P7$ntM!wS+&q@a(B$` z`^wLVF0#s;C&OvmDvw29E+&MLP+B<7;un`9LFlzZi#+@~_2il`839+v$ru%r&=XUq zSe-56DC(ogmWu&vLighTJd4q`@o&Hdr#XQ-`ip5ANC7J_-82%lk=wieC5cBCK5npe8fXi^q!*N0J}70 z9n{%5Q0De&htE7fcZbENS_`A+1i&WL3IZ$uSV_*+BCZC}nL3KGNc1^{5!5;ZrBQpk zm!VjRn6T?X1<XUT4vtzMNX)(#}vzh8@WweYav7D7eb zEhWLpT*ka(Or)IyRw+3HOVn3SwN`{)va4v3%4IyjsiI_x^5x+3Yz`;0huuYbGHmZi zjXibk>W_Fa1HhHyaq~rA74?!MA->duaO5a=WQ<7nCio7uha$y*qSR89F!+EzT^lC? z&W)|ig8+MQjD3HQo*Es(SI3=XwQ6lz!(Krnke^HP4^<3 z2~e*SsUuEq&64(Ubdkf>&77)cd7mZvyD6u(Ov*t%F6*_ z;N;EF6Z*k=fNV%qi?n}?^%POrg?bHV$8mNzUrw5rrNn`nT4f4+WCG{|W#C)R%Mg{1 zb6-UQ+c+gn)^^C5r1uWsSJ??kJGH2EQ_i3Of#yO{L6wVYVZFH^NmSijQn`w=0cY#)wz172CtN9hCosk6X8NjX(q>qwx7=0J{O6Of}w zx%8J3N#rSPn(<;C;S2NxK#|7b71q$fziytS%Vey1sUP|*zSfZl)bkrK;&!dldAi=& zUhyHE2)z`oS5#d%Mo)y*CLo#{IY1Fuq757YS5n7$S`Vo55{9)539s*>Vq%@hh-ff5i`9?I1!v&;|h zTbb4ti(iQnLqv6!|H1-NLXs(*a{`yDyhxC#YY-V%z+rw@!?t_-Y~9+B?P-a8Be%NT zJv)N2jf@P~wrzVDFXLb?v>8Di4^cZd$!UqBb3G5h4bOBaJqmLyhQ*1J^Q5Oy2W&v$ zQ6`p=9s+zOY5-Wq#n91LieRYzo*9Rp`EH(QKTO7A7T~R2Xo#|Fy<#lh&zcKOm29Ep zP~|vOd|*RWcHV;;*K;NGjG_==Zn0UQWWo0B;aNoliB_-DbD0Rl$VSo^*$PY&=SJB_ zEyPyox+otfOO+hS+dMW~(kdNZg;x=6vMFS(*r|;W8f#y0l1R12C}Bl>sXE}kv=cTn zlwZ*nnFiSjSpsw#d@hGgS&Qf)q6+T8FJMNYLah^&LoD7xP#izV(bW@?rNnuY9h5N%6tQf)Nf&GrD=H?C z0Oh^))E-&yPG<$wX4g0h)f5`bXrDx=5TdaH_CU*OF#wNg)hudNm?R)tJwpwK++L_B zG@=}z2venr($Te1BW?8F=pLk*tkKtj$%sl3peMsRAinB+U4^iDr^dz4bSoiY6@1QSdiA$PSRt?&f*+aI5Mip{&2nQ6QnjeJ{CBF+; z^Ae8-Z4v@$aE-gS!pvA2*201GR1XwcqYh{#P64*+2~<$3;&{~zC8_mMn4v_SV4J*X zsM;6iS2Z9CF~~Zr*nXPT(ps6Nr-kL%A49yRnjx?il{n*|fDt%UMd0M8QI#q%O{m@m z^b{jR(%((p6QSTi>KyX)LFB~7z&V6-_)OrKzOo!3TuPWrUcw5Fq7j{TBD~|S5MS$B zX)J|lqq#*FQlu(dOo)aSBmUJnl27R+)s84{tIyDiEm1lx)j|;ZC)A~V@pSKbwsp`} z6tZku2%&w4rOKs+F>PEa+CW$2QsKdkHwg@c2|)5(?=O{ ztu3H=jr*tZ429?rduK&mZG>*a_3Fzazj46zLAt;_b{OwZFIg<`RR#SVSwn!7HCJ1){eI>jjy9f zt;}UBe7Z69 zgoyP9nPQSsq#|ayEMG;4+~lc{!0< z{;9qo#m7Q;htKgMKgB!jVPv?xQ#8?2>1$y*vpyfvOqNUZQbLKt@H2U$y*bsAl&8UG zp$GSC9)Z!b5& z7b>qc!|()00a=p*A8@ojCkL7dwF!P~E>?}}_0dGR80U-SH1DOOeF_rLh2DCDX}07Y zDPtlhQl10vQVvegi*n$ZDr#0~k0ro$`yF71h1tKrtH2sYgjUPQllB%~S@cAQtXRj_ zXN)f?d7PKtRSGxA&GC%wE@NR|8J*62QQHMgfcb*3KpEarsp5quWa|X7FMai`sD#%0 zVz|S>3VH+T>2>mu@UWwQIuyv9tue)(b36-uibpMw(IxNV4Y|jtZ187c&K|hh=^S%j z$|rE$ZW>i|L#x9G2T>r4e*QA9@b729$YP{T5wX@THX> z1!drsR=3`V;tJs>0YFxPcuSQ;&QCWI>Nf{Q9EJ}^MUG7D+VxaqjIJbRr33waQhM-V z%6+(>#s^eK6mG$IAa)Q`Gow(z%&Q!xM~^;a!qN*49g)uUBgK++Pd!N%)Pn#{ard8s zT$<%`HmQKk;Wla#0~4W8Em#}tgn`@QppLt~PS3%S43NYf2~g#1r9tshK~v!}-@tNi z8Hvh^?$k4KgQnyav5mBM9oPFM0t7(ibI+-MA?1>!e3M8$ka}Z1-MsZydgp?@fE)sQ zv6-uk+|HxVu%$Xb)$Y}a=nHq2m@e`9zORFU;f zh?lBVs@7?6Rp?1N=$9k*ba;3W$J(a%VuL2@GO^Z1B^{leq_?Jb*eg!4Q?c6oBapwJ zr)|&^6uS37O!kGax^;_|MP(BJuO1?ru=gR9L>SqazxnWYqlwvHoC?2X2qw?Vlr7)q zUsu=hr|Ey*z%IZ6jxZ0(+*wNvO^x+S_j{)!^9bG8&7(YoI|IM%bG@r~E z5Fh>j^2qk6u((>Uw~=|0~y58XRgxY zIam>;o7_s_BKkWnOC@@-L;A#_zSP%y|g3ZYl;!=ctr>gMS@Bsj$<}IKv6KcoBNXJS*OFlN|j8> ziAoNsK(0qIPcN#T$OJQCoU7W(g!w7ZyUGBDDgtSP3idg91Vt|1pz5p*le+1}z89#Ui=l4c=kCoC>>>qEtXw3%~i(eTbLhu%D&VDoP^mEjXD*?*jPf z>UL&42grN)W^LvrsR&5_<_y@}e}>C^N&Zw9SGr@DbG*l<>vI(qJ_$x}7sjWe$QQ^T z;<9|6yC`%~$`v@_LWJ2xZURP+`9TYMddkC=xU8N0rvVrJND+qN9%S7OLM?ExShGEI zqeYN>3q>@-D%`^aXAKhZ37A0DDO&Nxn9jpr*$$p?pWKoOu@J zx3Fe#Ef4G#WGovacNl{F!MxKFlcqG7goDrNQgv5;c- z|HJ}{Nrx79Dg*KbdA8w?70R{7q=0gb8rmQOu;P7IJ8OKK+sZ_(K7!n^NS@cIGi0w$ z1mmo5Y9X3XU?`RmfINm&&uTDLtGs|L8ln6cE-8_#q;XXMf6)qq&d(CAdcI~DAs4d~ z+?P5V;2$Ug8eBQ(zYzww4m(2aYURiR+k;sAflC2eMLFaQsN|ixY`EDgLGAeZlK@)E1PsrtzpD+vn&V{l`M-Xtqa9E+z<&5Ef7!pg|#Gajs zRhub~<;8Uf1GxrG6d5d3Axt!1(2QPM<3!Z5GiZQIe9i(X^+&19wmHhSBK$`lH)TI? zU7oXY)e1|BlF$##^)>9LVWyF6h&^N(Jj#I_Rd{lYi&iP}+UhvG7I~BC3Kh=TM?A;t zuE;&82dOWpN=O6Zo_{Q0covsr^agFglXcOWd~g0&mGj>mQ@98I=JQNkg$L+{&sXXC zfumyyEY&M!e5Q|tV+@2Ad?khJpL$5GN|NIn(L+6nEWF}=%&3gN_-Q>jB0#*A9 z3Ooi3mZ`PDRp?(Q#5E$a*RDG`3>t-ZMZaR;T+-+V{~ZWZx_*bAvph%0%@B<3CLMch zIJ6*5L;ssM$!WznSTTI0M$-S7exPh0n8}xsYw-IVhP7e1TRT5V9y)0v>@!d0`HHD( zjuBQ+&B^;IK)(x(an@ltC#H##??Y8pDv;+hV+|G+sJA*;M?AE>QS=>#4jP=>(%ejN zrP}D?R3&_glQ;*^ZFP3an&xy77R3d z6*?To^a|&cq=o^ZVK8Q@n8vWF0MjA#tuv4FwXoG5u+e=7Pl&l<%0f;1gYHQ>yZAUY zul*e_A4bIxKw0!BO_)C^%@tXTIad=Lry95-k9e)r3;7rH#w~fxq8JUD5#1T47<)PH z*P_daPVt5#qOAgPaY_OOn}lP6voSK6MqXm#LOC@Bkh`dY!+TdIN*&%!t51HI7LTx& zM&tmMUyZ&_K`4A?o>6EPnJxEc! zVJ0fJ4ZOJr2PLY7cOXy_;xSsPYt%{Mg>O(fTyKgbddIP1Syv5M>cL2qrHd?%Vq6g~ z)AOOHwF!~ib98rS4`RU*XWbQ=&$1+1MWAF=h{lkd-lrPn5=;I0Q>MMaG|LE|F1;*2 z|KtGzkO%-UcY4M8mO@}F_RfLx({Iioe#-=~=P$7R5+8y|^NeblM*y4V|Eq_;`>YR{ zD9QqVPDdx{^zbQ07IVB|JSV3oRAfJq3_%Iflmao92IbZy#6J zy!tzP_W$<(>}oF$pNmN>4)?sckMjT7)AaeXetJd%_W=AEqj(Kbh-U~^1&#vM{4JA0 z-J>4VXx7ahAQy}}2%NL2;zg}7K3km45igIac;1pTalci(Z`F0$A8g`1p{G~{qqo4h zJMpSyl_Z`RD=)b%f?wgzNq{1HpuL6`{gNkS2y;A3CdqavmT^*7#N!dd_WzvWNg5F8 zihh;zuio1Ky_B+C08TP`!WxT(Wq^n7FXF~3$g~Dg0b1%A@3Wu2pJ)0BPC61eWjzAXQ=yy3rdCSua(yGZ~p%v{+RNnpDy;PxPlb|6| z5^FxItMYf1Fk-y$q8@!z zM|??F;=w}Z0N)$IT#XV2z9UdnRtGpX7I-&RCWHYW358>Z2S0~lz~OUc=z&9!5u0>s zo>wtX)RT4Dz}je>*UuaV(7SumxZoV^m8l2Fb>#6<$<#BlQt3v~;J(J@_1xGhL`D6I zPTiA<Uptx%3%zdfq1omvuM_w3Oe( z3%ZWNNKTOL;kGcK)j61O7R3*&Zwzhca56Y>#sV@6L9Essb#A6ZWKOe=!J)^M>34_< zzEQ3)B#SukoLn|1%5iI}jm$2jXUE5QQSoG&&%rA~5I9WNVYLb-HPED$l~vX&#(k+(+8exhJQ{ z=mkil|G1`DA2eqaA&XE@%mNM-bh@x!6YV)7U@0Hs!JcaH!78%9I3aTY2WJJ(d2V%w zZ}vK?q;m$&Tx;W9$5~fJvRXjUu;^I~vnV zXtoTVskaQNw9Qc{R-s$Vs0*%O7|R3Qzx~w57wBW1F8D_Tfa_gH87*K`75m`gB_4Oq zrbZRAeOs$^d#P!#pnw~JC{sDEUX`bt%JuE7bb$e_F=o+=3tiFP$nFz5b6FJ&2R86U z3r1?3y~RKas>$>4OX<6VUa*|e8yDnRuWhFmEx48pa?K}1(`#H<1)H`nMHD7H;I;GH&iKehxvfOWOY| z5z-R+27TJ9Sdd3YCn1#E8`t9?TBHK4(p&5S1`eT4r=O&K7@wk{OS+B50D}}6&@dqrObNDqyP=9bXM?TKw%SekSHB6 zHm$+U?Txh4+o9DC=}?5w503ES0AGt1jt>_UF?>Ym&U7V+Os2h|_eAANgprB*n~ct{ z?BOffU;cLWyTAFju*=>NnnNgRUI)bs#O+_6q=VG_F)LXWyfk3UNveE5F4e|I0I zhc|=DN0Vq*K}1GOOb44#A#;#k|Iu}``)2nEpmF{G*AI5Re4frf`y(cwyh!(tC-m&2 z>JQ^`tT4y(-v{oVkqVtcRD~gn1fYy)#d8LZ3DgT9lWGzPa%e_{5x)hJX2mzZvQeHu z3mV8lLu(ie4T?QtTq{Q(ec%vskzWVxs7%LnDeyT)WugR0p(cJW5G;Ch^}adLNoK9p zKO)R2)J)2r>Iq`1j9%@CfW16eQ+@I%q;u0C6llEPN_7*M$CFd`4P>oX{%!tdQ(lvYMCoStw!jN<5!1?s3W_q0X6#DIUEho?y;Zz5!K&_9~sUbRiW( zeq*9ETqV{f!|}a{5HO1bQ#`0D_*{w4$tQfM_(R9p4qyHhFQq1TA^v4Fq~M&OG>zdW z1Ab0CgSZEH1l~=6oA)#Y5%P+Ms(p@xS(8X}c#Pcs zJ;2-I!B6`$8U+y7D+o7}Ut2A+han!OPnog+C~+oX@G?}TsM5r-8`UwIqvy1z5y`{G zQtmOr{3xnM8YUD67)2_R%2i_zV+=E8n4%Ca1I|^UXA<%;Wc4PtT3i>N+9hQwhro8M z=8&SA6(N6(6cxPi`p3jHfCiipQ~rpvb7jJFh4R4wQ9K}|t41oSem#_X!uH~Qp6O)n zC5NZANMyj?x>jf==u9lf-ir5`x>RS)J$grvpy3U?n^oEuNjF9Thw#WaKlW`TyorIO zQ(z5;*%Vkt^u0_}lWCGG()&O$z>33-f(cU<)Td|gh#|B-WWP6l9k|XJD}yz#{B}r07~9aRik|$@mR^q6 zl_&eHL$Kr`gAahSe7=O7TtYAM=o;^L?~%d0mdGmg;78b^8 zfcmmGy4~;#YPdcIUocm~U0vwQuE0T+lg*rk6n>;(H5ZX7Ls!w{& zinGxPU1pDUS`jV4!z!ts8Zehkqg&fBGK4IXm%`6sirfnW(UeDw02}!OaEkdzjm7D8 zNzt55I7S$)czX{gaH#^uCMJe8YE-(yeO2Z` z(7mni>uECvVyTRKFUJRY}HWDM-=3)<;?n(iGk zFNN@=)g@MF`@H~ILzLuW6#Xeb`*<3981X-E0vg*&x5G z3UJMd>r-KBwdNRx=1+5|&8-(f^;T72OkSskBvHt+F%SuF2!M&HA1FaGdK(=_Z%?iv z1F$Mr!R)!koM=4zLb|F#kI-~lN1u^$=jZH=hPirGGFSsQ4p^aUn;sP^znkpS9Ktvi z;Hw~q>+l0%$C_25jDkophRCgH@SJ3VG9%l>xE50qDx?bN9r1My4H-gI7lQ-?r-*=L zRvgC3fed|!qkspk#(PcWkSQ8M4dKR0xM}>Kp^~WUi3eKoduAB;j_et+M6srlW)`7_ zgvyy7W%Aw557P00t&|?8cYpAm^wEbOq~HI8@1*YyCKmI@4XLc>tF!>}Me8Mn*FV52gA^;uI;toz7 zc?m9(yVo^$l6zRGV`M-Z%`l%vfF5$Edsx|Pdw@~{Jl%ll5FV|r;b|h*xI(W?HWue6 zqbR_1%%7~G0Bv4ZL4Zm4S%jA@UAR-AN3>|++ya-w>jCo%#!3G&n8KX42YrmzCEku0 zWk{-zX(&TlI9M+;Lg){AFbeaPxely?&w+a=MQ~<7>sg_=17m9i%*_SUI1|xNb|mvb zz>}gdo*r>8y&B@8dtjvLHbR`=ty&X@HQ?4@B1h-fL2q6v7K&MW7TVLe&JW+3& zX(!UNlo7m(&jsbNZWtE-G(2J27#b5&{T8GmE)m*Oyk}F!WiDCkj5V0P6UHihTi%i3 zAJ=7%an8~ek0pko6>A!PSG5gw1Y@ijn90Au@E`FA*gvdOJmgH33wkJtMh>Y3Q}Ho^ zWc7O(I_z7%6dru}Fkhy3ssxDZ)=t^!!2ZbgJ9Ohw(Tl{gg^ga=XidjGOsP?UL&eAx zVSZMC4X&Y3o~;MSsc>Ls0tqmMluGc?4%B1rEw3NLm-_T09-%Bwh~{)jd2+O~EqhGq zTujMf##5-VvdSNVyPP3=khZ3zNhmn==$BF8G>FIn0DmyV_*KK>cIj`WpWpoF>7A`# zV$b09?2?oRym#!)6xu9}7A)1cLe3d#5o6~Vr^^`;WA<#jud42*)TN<2^66juNfz<2PePeQdF26A{b#c>tAg7gIbX#`sz z6p?uh1goEcK8n8%73yh-1VmLl^reCl#gY)9IHT;d&#m|h5aA2J0vwXwr4Q5eRJ`Sb zW9T6ItinHpl{hJFOr>KLc?ePYsAoI!omsaAR0ZCpQWu_2?%`SKH8tYkJ%jg&r>sXR zQK9zS`4jy0KGLQX3a)S@furC_Ohx4rVeIo6aZaPzx;{>Uv^36&h}W`24JBcGd1qFw ziAUY4aOyvt*e5S@|Kh(6#yrt~6;;PiyJj7jBT|H-I^X-VAG?KU0EEMpd+Icb3ULh1 zb>vYE8Puk=Vx1bJ@ZN(vbIxzv=X}r$2q1(Es!#`JYb<#|OjES3fcF+L^xM>;lo`BI zqNXFyx#KQ{2S6**HeP+}fi7tasj^PqH}4UiM%i3>)JhNPC}=|8(h|J)JIDd|MTyEp-s+}jkJJ{b=empJO^;^BaSDGXjm zic|C!yuO59RKlHnr$JsL+EOshjx{c!i9zoeeG&bUbaLxwrs$JT?mfgPBEO&guVYBP z{>H2H^Tg=H(OmRQ5pr{L{4Y`7fu$*P;?S70Ie2k^zD55}BK(d@*Dz+2fonQwtQ!c2 zd*Gf9g%YhLsY{?@zS**Vev)qO?WGpO9ctFo0>dfB7-$8C!MDIqaXNHy_N7iMpLb2B zl!dOyPopUsd8QZhk@Ppd{`A`^Ww`*>4s}RtcP&F4oMypzR@Mi>_Eq*le-T((Fy@Fd|?Ml@=FU!II7tcsC_s`Uo^-uL`R0WU{~T)N+jpxz+>nr%svPWz<1MK;XL; zz5GGY7#iR&g`77?9XiF^&2*{$bBGXO50ZB;2I=JZ6bqljCGX4;dJ6w2+$2O7T!i~C zIba!#tI|uMLXj~wn4%g=#-HMK4^*#;&1xez55Ztx*Q?N@KiAsp=^6#hZ@u*2R(+yke0Ff*0A4v!3=oA$n{92wOr^aG(S`xeH;Zkgt>4L-Z3*sv&>8GNd0=33(c( zH1wtiL*}kGT39Q@*EdmU2*YY*!n$}(;ZOKgW(*#!xM4K0N61>dC=(juIa7g_;Z8^3 z6K6X`Nc2b>@mu4-6addjFR{v70f8Ws(zhf-_DO}GaQ7kpSum=i5wy+z47UpB9@F-U zC|X4Z9C$|XtN{N8lZ19(fl;d@!%U;U9J%i9Cr^mcw7n>5MmZ>8#IM1W*T5OrKNrX_ zDcYc*=)y2q}K}=+1gBn)t;Iv$|GQ%Bj20m zf03UZ^#ckwn5`bai8-@e-)8EeSb*o5l7s=l7cX*Oo?}(fKp$KfQ{WeoJTm)&cTbha4^QMC6!tRQ)~4wy#XvpLQ)0Mlk1Z`tBq z&UTkxn?GCsSKz`%I1v8Y&NU3ef~iG)lu=Y2#Gbk4LG_L;Y}401}j8IAarTi}0%=t7ulc&L@9Ez+b4q-3bH z&v1C?n3`JSz~0eAGj_#k){s+-{Xvm4#ROc#kzhTFkxUIq<%r)ZL425X=;Q!Dp^F^v zck+}qc-|+J_xP#ohYq-p3RCvLp~AXZs#l3KQnI-oS)#_YvQZ^pdXmG}`Eq zb~k&4)?%z%np7re3?r}umY$ae0GJ-A>lz|*34gOClblhBnAKQmkiavjA<=*k*76_u za+?}GYvjTWDEY+Y_$B5MLr7X8erZX2s5V8ni^~e@c2w9SRqGuZpl(q+fMDQrbBbH@ z`Df|q{Im3MpB|8SsU{L5hzOeS4Ux&*So{lf;xWzj3|cXr9TE%YI>0Kn;9}iQbicq8 zw{yLXhxP;@>7f*uc)0MI%lMoyqQV~C9@vONC3-D!h%zD-ICD(hq*lH^!53{klD5tAZ@{nK=EbjG>}lec#&aEIa@BtOKMUeaH5^up7wP$RUZ_ebeF@BT1-`0-^H-@w{wEZc;+A`eAzNp4FW{5GG|nwQlw8=jec$BQ=;-Pg$FMU;Y^LTs7V3eZFW5i zAv>e{2ZEmIA^}1LByfy8aJD2)C|tT-6nmJ7o?Scrh}(Kf$v%VriLBx6>yV-|AOdi9 zil+@htRd@XP1kY3^2i)YWhi25aIDvemqxOYsW=nvZ`YJa4dP%FvMNd%CZJsuMEUuw zfN4%qRE0>L9z5zN5I{!kye@uc)GVIv@eO@oG&G_4 zbp+fQdz7j8ZkeaufF>xx6FO*&2j=mEqc%w3ZKL@bvIORekTef9j{&g2BkdLS`h*88 z4vzOiWL~uh4oIMS(8Ci4OV#L#;y>_PgQ%=tt~)rmP?+p!1a@!}u1oIlm|;1BL|Q}4vOW|>eo+qO;Wg4*><7if2+m+oOsC+o z#h@y#PCshmY_LzOy<_bLb8!q>fk&pe2&WzD*Z>e|C)xF_88I5*nN^6SJ9(^38j&eB z)-l)%*h2rgwM6XOKEM;?gbuDUozBWeF^I8b|^9DD#;zJ$DBv zTJrj_hM^vCzTC9Y>r!&tzpj2>PpRN#v;*N29L7$TtJ znw*6=D%pk*=04J*-$uoZ9ED`${n8VQ=5FGKyOG7?=wWLcD7p(Af`bC79(+!W$nJrY z2iji%6OBa;!wC+|34NojQIv)HtpccQSY!ClghmB6dKjVuh4P1du93DWE-WzGRO)9K z22nVy9AIxY2+8A&k&oh+Ap)@?BHfOA36m@}RXPFI7-l)rzTD0dZ5NXnJRsGNe%=#s zsKDg1e4G8=upn4x7x%)zC{Q3fgkHmI4$m-c+Nhd*ZK)RmnO7nmtbtM2BsFhERNHGw z1SJgH5xPo5Gf;+Y`rlro*C>W|UZ+8o0%mFhj5?(T(Rm#OoQZu>XL{!09VK9r!-=~B zzgIAUQk_n9I0v+GDk#BWG=m>=9DNn_MR^hUwk>`PaHj9j zEJhqIu(uqM{Leo8G`)J`OE`3IL8~;K2z?QGAl`D#$Z9ca18n7gMi-pms0@$4;T1g1 zJ)uAA3rO2Edqtht6lY~no%C=g@bOl7e-IJ~te_Wg-iaaW&qor%FX}TzM^Z zOK+q_O(l&*^EIR8RKENq9d(|hdk-I{V+7e8Mm_}rodh-#i;+YZP{t+cIZKd6q|j#7 zBLKOhv#@XxhI<*RSR-8Q{P$7?YZYVdjC}ELjzWt9DV{xOHe;qkd0!Zdp~ zU**Dl80Ndp5P@he45RC_-b1~`AeCX}E|bAJbi{F{cCm01p@{#8cLjv-W>oMFz0{IT0E1`WR{+fXIaOH z>5(V)!3UqG{d?a^x8L~(+3mdTU--z@k-g;afBxYLePsX3z1`Pk8XUc!o_%zW;Zr$sBb&27SXH5d^9C^51ylt$=a2`($(8ZAZw!#pf3vNk}R$fl2RKUQfPiXI*Z zKn!7YQ$7O*fbYN)#fQ|FUMchnFg-0Yb75_CPcH(xDA$D%qL4&U zC~G&gm?`}*45Dl@)drmtnl}C?6GyL+5v&TSOO5t~{SDJ~rh-kuziXQcj1f7hbO=Hq zn}nedGgZ$~r7GfO&RBA3GvP}(EOQU3`_U`InUkNnNi@nD0OpSI`RjZGQ~qa1y#E2$ z{~muUxW3G@S@9FQ;cwsPnF^ciclu><9qHTGO+6!DKL!h62*U$RBM_CR>=05KlCKqLy7$`e_4@DL74{%GBXJj+nMg0PHWn}WwGb9(WE zK7qjuzF={;pY=@U{LEnxxc)IhKqWmo5JA%;J4x`F95m$US`S69xFaIh zer5^@N}SxBb?F>X82B|u0W#G{51x-m%i%y=E50Sx4;;WT>C!gN(Vd0&IU0645*z7~ z1}=n(WJH2Jg+S-}dbxC}OyKoXgm0a$Wh)B7Rroaq$`t-thh*oAis?ZZD-%4PHZeFm zCE`Vs@$1*Q#x+x<_>AAIL)vu`pN>3hr5sY>7+rDAwuH9dte{|YZNF*y7N3BNL8ExI zP;zok!!mkBN?yK90}Iy4a#1SfgTR9OtkWuaMypaMN~&a=uBYcSk73{-1ZyXJ2=6M2 za;I~~U3eZkn_{>KzcI?t;PemDqvh98KB^c9boAmq6>@iN?YANdVf~0HLnY`;TDMD2 ziI$Obh8;+g%2clvyaXj!hk-m?FP*JJ$2f)-(`LFjq61#XNCAK>!}hAFLsgiELSDL_-~hAcNct~x zy>OgRFlySP{9JrCRa-;eOu#qsSLcd6Vg(Okay4sG9|f2oa}GEbfQ2bRd7}d8 zx=2>&4Ve#bvNj-ea$HZ3U1@C@6;ZBTYps3j+(0freu=%GL?u*wG2+jwT)CUip{Fu+mp45lww484o@ zKTG?EbX|ey6f_GCe-09B5X?cyIUeY94u*6pkR1jBP$U40W&<(V&D5d{4nxIuPFtxG zUY$B2+75Y=CwS_67J)zvBB974fytmtd>sH{2C=n_K zV4ceg3Rw4H9>5O3a#dbt7WWA$5m@g+)bLGTBEmYYQzu}z0jD&y+nZRA7Up(n!-ZAJ zwNFmLNUke_Sg&9jh=kNa0k4t^4nO1Yg)3c(&i2#OhwnuW{i8=uqkY%*&O5-s)`P%@ zLoVoHfdR%wSdHGX$dg03ngU`vRF5#$!w2`sO*;f}Rp2a4W_|+9vO@?<`dhMw=g+%% zt?tHaR+iVST!&%X!2`VEq2Lz=EHb`*PTUa2GGl*-z|Y@Hk3al+`u#urMtc9l`|17@ z)`DQVB)vjp9N@7K(Y;25Sz&|bN^ z#~&GSX3x6DtA|(1Ud#4z^}Ij1CzEythaabhto6nDEIoKZOD=G2jFk>rg1-oTW=E83 zD8wiQH+~v;Qw-HX3CB;4yXuWg_CI`VbNn0IDU1E_qqpj@XR#= z=6FtLFsG;r;#1y3;TpN;!E_OBTr67W@>BR>M};ru2=`@#yGnR1V<4U$263PkP$?w8qbru4E7_P0RsXTTzuL&^^@ z*?0@SG-zTQ>3$N7K4-@u744LR!rpelZ+pp?HA zqbg`wRDd%kFpQ{=cOxo!FEHWG1jB(1P7ER^6CzYo3-iD?R)^;j!=^`y3A5hkcy;m^ z?OxN=oKS`Upk5bw2=X_sEh83>WB!PWHgK`8JRbm#gzXWQMiasqV9(N%0J2L%{K+|Z zo;dz^4&g={#txIEyhnJExR3lSa&3v2NqIS7F28VfurS$6C}J7{J{&{s+LwCM#Vc-U z&NliCYh_(t15d0s$rH^&T1FH|rKk{j!17IaH42IMaX!uvsGE-r?5rHr zt^#_|-f)7)JH`dWb5xw|z#>dFs2p1wf)w1fNrm)e;cx{7Cx>FI(QKzDi(NG^R;H7Y zUL@?BIB=X3GXs|5-y({hDbOT>r!iMR>CM?ai#iJNBl6{0t6jEqimc$9DmwB8m3ck2 zL&i$a5SjzzS?8P@s=ci=Cn`B*t>RsgI-VlwBv}VN@qmq(iUB>Wn8sAV3oFj5GIjwd8An zgZKk71G%d~>3flkG4X1MoDE}7THv!rOs(~ho7k?>2q)X(JCpw^)!ARrwu%#U9GsVX zDg6wzNbovX{9tBq!wcAisXjiWM>Yghz)cN%{tn%P>#JeT$j0i5PBH0HAOuMo8-qgZ63cT3tuQABTAqPP~GW$1Sj#wY8A$&~4w6Alq}W1|A` zr$F7FPEuzLINg$k<5bbWWR1W;`Y}q{>r!{ZdN<9W$lc()$vViBRz|wv5*etXEx~0* zQVmHU~oC=UPw2ZQgpAbG{lj3 zOiIwn{zGVb9b-nPxXxrE4)7tRfZc~UNbm!W%)V9b)ulz&h?t?1B*9z2 zPorRKtfNVKP=f|0VU!7!tXH#q1GWXyuwO34tsqKCXRc?WcAJTl@9;YU7~t6XW4?F< zY92Cl>tWib>q3Xm#=&y9BQ`n#A?&T)=U~nt%rTapDMeZ#dPAG7b(By%u6Sj}kPn$(jeQ#n7!6aRt~9Fw{c6p^RJn^cMo7d=19T910jj=MD-=e5`GFizM)MhF^D z`qtK~aR9cFe74_1Nt=feAWWNts}7DZkhqs2@qplJjCN;3XD4rp(4qF^b?1H zsrV}-*C?PXD}aawz}5IHo8Vg}BSFd_XM{daPUs^2{5~OJW*=Di#(VStIJ~S+l%uMr z1V*l49;2W?d`RoJ(x*{z?&wGbO$$RtWh*oX2tJr$pi^cHs7^s1c!NGpk3Ri&`n|9H z_w>QXPtyITD5;D-G^f=e9$LxvlLos%Q(z2Ih62J`WyltgMGmH#2?(t5Vw=SHhPK z7Pl4x_kj0`aHsgBccpHdR)lGe?j80(zVS{o?cQ8V&+aiq3@@0u&Q;tOdY_Ag+zr2~ z4Ap4wSKq+;k0Cr55rQ#rlr>?o7wG;VO&O;5EbT|GGDLDBYXc_0VHQ8}G=*RShA90$ zj4loActCG`$0-`J_iSY}V?5d76rhI5DZqiXS`3ejaiO3{kI+DP?|9vH@M)j8s=`R9AEp;E zc#QzDuX+jNp^VaTZLvdid`-r*^VA!X_y5>dJ_>ML{2Eu74{|L9}rsh z2*bz)*Hyx^8XlBo&e?UEe>%EA;luf5j{nTTQsi^%h2svKnHS(ToTc7;m1@@&=cZC4 zpQ@QN%uc}rYf%aquhNpbuAg;%{0D3^FOe_{2=iRDu4S)f7-fSNn%mcQ(kWp)bC4rV z4BE7V3Jk;DrP7+IAJn&yiwj0#(4yD}59j(QjA<}PE<)r)Um;Fa9+R_I#;Qe4;clXOwWKRG=%ZHq?^l< z!o?-&V)Wx7a`z^wlqUg~a?$?Uwk|y5J=UYJTX159v!+C}T3)PD7T#gtXSiKuSjf-l zB!wUZH;_S`jiY??;N@Hf$zJp@$DA7tGZvG1tCe#o`4rHj_wX6k$6gEl&9pdyN8iL4 zq8+JTy;YHrKhfqH{L@(_*3lIi43?)Qr|CSo5rzP5LrwpR+PK^yUKDECcWu89$PIODHspR zLDSxJHt4~&9jd$n)D1-~Qc_s028(TG?X(n1utWkj7_DRT{3flc#7lSl-~#@X8)o~_27oA+aSXC291B2}hAm#GPegX*S5e~Jp?hLu2qm=g34)ki!?lR;o z#=B{T@>Z|wtQl+A%%unV6wJ!olt=Oi-plXj2eiUQ&a7{8Jw9*Rq_aM(O{x~RiE>+P zj>Bz95i&g8=gcEehv^S!0nSVjXT&%IV#+|m75)5KJ{=<2 zJA1a~qlg?aVpgZ!c9}KisO6DXRMboIS?I}h%qbr70j=2|?>|bzBig7RdLE3{j%EYM z3692Eo(^q2tuMKx+m~J2)^G}}Z)~PH*AiwUTS2>i8ert$OED7!o_5s`!Ze*JmcX3W z^J}CPt<^EQnc*N_o4H`khr?YR462OP5ok9x>D4y34rcScLp<+w9EH+jh5bb%7~a!l zy*Y45=eovt3ECS{u+3uV5VT0|zW09G+1@3>y2q@BTR~Ggu&ujO4~+-}GQ&2}8f!-9 zr_?ms*kxq&C$v=psz(P8u8w?H{gpR~-v6YH;<^cmMAb?4F z^t79vK79c)?{W|jG;4fqPD`n+Z4OoCc{{xhz)N`HmaEUxW8QB;OCJx|^Jgzun<)~c zCvfitg)rPDxJ)n^)>J}B8d}nDX>Z(yu$hY5rT~n30z+9Mg@*=(fly~C8MZRjl4Q2^ z*c?d-m0s?%QxL=G2r%Gg=G3jj3}j#z48C&cjznm|@WH_`g}a}FP!yxYS!}={Amm_$ z+$v*<0aqv%57$TO;b-4Y-}=L^r5}87Hx&OqA@I?3o%<=^M0b^A7!95Q)2Cdq#1_M+ zU@jpT;HoOyzE9LLhnf$vA3SE@fIQ*Z$s+yn@BCK!C;$5AxPpesU+v-j;$l~C#~U>t zKS<|y-%Af4kwOD(_EB_>35{ClyI`82E3w_DS)3<9hXMq1kR91& zky4fmz-LfoBfm~wijv;{8zj2zT;7}Rwq^V5O zfWxp9=RnC*k3vHGcVB{kI9pH#NRc8Z+B|JNcV(1e6@}~}#e%EU2Hae|jq&v}{H-@i z#R|oB-P%u*%v9LKA2T!LxW>qYHKf-}I^_JWe=C80@1rSU#l@CZ2?%ZPg|BQaGy| zSD{ams4+A-Zy7(~Z7dS5!gX1@1m#JHBjLH@JdG3vp>Yj8MOSCepIjP)gZ-+{SiiL1 zZqp0_{(^w9LsBRTz(8acRrK&THDD2BISh~(fP|q~xWtQ&k`sGm?OxO9sQKHb&E-)d zqLY$9YnHsL%dzOzkjo^w?OUAPmg!<}cQQAtn!Gdo{xOSzQe{35|I!1&ow-{Wb0Z zjvpQ4s`?P zHgF>CHE$uSi6&zxD2pC+K7n6R(}TdWds7Nyj?Z%)|5wS;BQ9N{!v;R!)CWh<57^Jl zXb#2UWehHzQbrACb%sfTWOWLhiwwamu#X0MfDusBPNYR0A31UqLr;fEx;qC}$*(MW z)YzSqE?EcmrU@0{k-?eJ6*Q8N+_n4)iQ1>;{QiSi7Ze_HnPF+crHD3}XvL3^>!SXcg8eD+< zc)_(Q=p+LaSvvz&$c8Av2Wmw3yQJA%UUsM(w5XmGw+)6~k(n_IdZWG|=$*lox9ZfF z!C8m|+2yW^QLOCB_zFrTQHctA-PChIXOP~;D9Gwq!e*(Hq=VC!z0x<2!Ftr@u-0lD zlOMXxbKN2MKpcZNzsk&;HxQrjF_icfb)43Fbua9!13zssYXE#!rJ0X0mer?#y>!c^ zxG%C2IH{9H*CrecMv|6uTKZCZGvmCS5h83S;7FGOIR}U>N#mAP%`rxcIHGhYm{L2$ zdDiJ%;Ak~94-CbTQej`c3yO9u8k!k(#JkkzJpV6rpk;)(^?$l2zo%) zc?f3_G&_PRh<@VA2nS%Km-qD+VX*OjdAm`*I{c>u|Led@5JiAVBGa(Ohz2f^g2 z;E!E0HQE~Rc@3*FywIOLbjhsgh{h+MZ~TY;uNTYot6IQnjcw|Z(LL+8SUD>ZGta-C zhFAl!OffMNf$`;S*@qHZgY>Ai$%0cKZ~ra;2jqnPf`NRS7*@jK8H zf{{SS8r9B%0{KjpL`oBQO=dLD38p|HK%YDS%tA_V)!;OZb)#On0_6bNs95$2H_@&4 z1#NbW;VZd(T}@A|$v$qS^_zheVnCtzG~K&L6Kyo6Gh&^^;$eh3l=O~mG!avb6+;VA z0i)Z$n)dc?ro9_Ckj^z)A+2+g=uxaW)1rhcFoZg6^WokL_E`j$OWag>#tDumpP^+s zZNh|9W1O}Zz_cc}#SqBQgh80ik~re?eJXtlpRpbqwBTB(RJIvmtF|R)lp}fmCuwl< zaoWHC{q&7*{BHWr_a8tdFc##gF z^aFCr&N0=^WtnOL*w+%b$Q;W6?+2C=Tnurug?qe?t27!wYdY_tkK#7HBzH{iVIMkW zysbn^SO7L6uke`)*RnXH1#U+LC|u8Vm?T+m8n4s^BNnqyq_{4Q3Drtg@hF{rw~4`W zGOZ`i!lXsdu5(UE3eGp8TF`N&U~<4KbMY&obW8XV;m+r@mIWxV>P8w>9Qqe<9$oiGo3!s_YZw(Xf%YYU|D7CkjNGh6+TJEE3U z!o_9*lUy+JUAU&WsC3UPS3U=>>PgTwX^w}lDRoGSL_0MRmUu4bDLi4tz?u96;GW^4 zd`T{gb_HJ>W4TXgL!>2POAlS@k_RUp`8=g6MHXnQc17@-Ws>sg7{jZ$@f7zgM+vr6 z-SokGY}exe5&5U|rsgV~B0nQ^Gyn1l-Y&o7%WGB51yHiGw-*OTtj(?lz@7J*aLNUE zr|ai$G~VthmE1dO@Z8f77w1;M5^x-+R+anh)70FxZf)P-TDfS?rprvp7|FO;Sx%_) zOdU26g(pb0A~3W9c}^RZ89AVl!$8=yfEJQRE0q`N##~+E(t|jX7 z30cBgo^NbEYckHACQLGqf)k%!~ zOX&o>DenY4r9%#k6X9B(7;DhdR>Ld+1GQG&mtsZa71);)El(V&0RJhxVcI{B>79;> zEiTBvLPIyN%Et<8)O^k*v?ku!1w?sRuArT=FEyI68rRe7<=>%ob2V{A z(11q4Z@C|lO1OykOUEnPRW;C<7ihrtGuEX) zM!%Y4(aDEe^=)#6&O_700Su1-W`sD47b zf5{o*h6WC$@*Wx}p=Pja4Y{yK-_jNp#4%BiYdg+fxEtClvIaHrIl~X+|8iMKfua@q zz&j{m_L%B&%Li#Ly-&B#@!&oRS8t`~1+ z`~gcLQV4*ty>BhbvI~wfB)rOe)$dNbdh;4$NUvQE(MNmKn1&s#`Qfuzk7%8+fQjt?U&ppj#kK_)3%M3*r} zg^dUwi{NZLZ<&!ooB!kYV5Gjr->Mi(#^$}U+f1*2=@c-&3<7mP1vqFaxg0tMm)i~?xxM_9$L;-^2t7=W)p2WL1WMhLMkz1S>}SmK^Hd}D(CippQ7 z^F+HBRH33qGaUztD05Narkm3>fRpyB)sTumUoLF~(^NpI?9J%&xUlGj{K1lKVqjXr zFV2-!g|WcrVS~^I${g$i-GzKIjM@`!!+LB*WYwQ;B0pERkc3!%m9VHTUvnO6@XQLr zG|pAL1mYgFbYnP2SG{;4|5AvVUuHg$>yeQ{p1cm!I5Tk|w&J3AxrwqNKlM+OX6{~aXm!}R5Ykd=Bmhh z;N{$b-*JFoM2JsbLo&0impPg8%1{L0HP|C42TUx&Fw{6Pca5cm(6tw0jSyC}Z)#@g)2O|#ULg$#{*1?eMtht9@rXHAY zCk=23U<{8ku^w8>dA2;y)1EHuU%V9Cvq$hq0mMT&;(Pfx50y%dG4YYb@)qs{UBgR3 zc)-T%T+!Ek(Vsle$5}u3MVIiien&WkpDdFli35DXd-1Iz=@6iB1*4N_IQV}5<{ezi zd$BLy@10yL29fmOzdRiT9OQr+7KT(4ho9k zqSp{!IB?`CMP%O`UZVj!qk+F5^Q`>O3goQuK8C@}bO)Sj&;p0YwWak-fw>XhuDb3`>fj=njCQ44sY1uc#8Eo7E*44Olyw4e9{UNWW)(uCxF!$9`eRHunt z!#-ihm7zl!9g+h+(j)=j1M?{dK*rUbmcsx|2LUU}o&4XVtL5w4H-8zJD$YA9YBU-?) z^Ypj0jR5urUJ)vROV?Sj9x@0H!kMIxA&#^3?BH2?j$k7%AA!f~9D+o<0C-*ES0`5J zaJ>P>gT!qfh}KRCB_Vo>u{kYA6>1sa1P`{&$@~4o)IB*5f#iWX^~+5v=v16NKo{<& z7QAk*k3?up##pMxg6jnIFsw=>$=efCVA^k!ql=iOEg6gn>ygzG6Va`}ktrxBxB%Zo zHCQu8#5EyGp0aA#T#j=2Nvy$`^Um%~ly|CNxc>IdTWJFi_5{mqvG^YCd5(#@F`BX4 zM^QftgvgjAsldd1s|c;;1~BHc66N;pD_je~L_9Wou&`K+KTJ-QIp~3g1;DH=6R+3T zNhSk^lfNE}_wdD&U_7C$BM|K`6@&u3h9v5G&mM>?A%6bZ0qguGp6;Xc_M3NzA64=A zAEfVo`|AvRc$^+RL1AFJjzT}Gdbq|IMGLTmfD1((*!U&mym|z4fOx|rEpWjT&qO#z zyw&%kaBC1q04lx_YxZn;=LN~14?g`k{q28r0ghbr3Rh6Ly2X!vdQ?}pUXjZ*c>ZDP zA3slbAJGnoxZye8wLTtCn|&LP^E{nOx55J(m7p}_7M^;1XSOq;>PW@5vDQnw2-qqL zjQIt1h!VFJN-;53z49>q7Sp^`uC_MCH!X0O9^ORAEDa@63}H|TVFoki+w;;p1-T=@ zpf#RTD2u~988l=polttIX4J{#??Jj#{R-+%HLU@&$?k(-)+%<@GSL<28nc7X!|;FxbxCn0Ps%UMS)TXQ z5W=w|$B3-QGhhN=8vGf~_?hhV#fD3FTFeQXk+&f+STw;#aI$05O8I`g6OFhzll$Oyu~Q;uwWqc$pm26}{P;Gk!XvZ67V-HT5{Cpt%rqN((n?`#(w^gtg7(Qb6f|fZA6?@l?a#?2lZWax}NZ@as1zmC*Di}`P=?~MgLpO&4`4>d* zr$r3(5_~z2vJYO0Cn{q30?Inm|DXo78~z zymf#KIEN7xo+WZMG%kd==c2(rU(osmIBGDe5X*a10y!e^TKIc8@K&A_I)S*(wT-X< zM?LlcC-{~*p}d;}cnY#G*jGkbuMq_TF{{v|vd(qqFqE7^7!D-x$wZP+tl0zGWS=NN zoS_k%DKM7ygFkXFjXRY{TTDg2MfT|Z-4I6-=i<&>yYL1b;-Q?1KDk_9g)Tg`tjAm= zD?wH6U>ULnc!uzir-65TPN!1vHtwTtqY@}is7WaM{JTz%;t5d?d#1p&2I*o{?8FAMc}UWVvbXxzW}+2|h4nD8?@vd4@&QX*Fa~mE*N@;I$iH z1LhXY*fmAmDd24Kg$4y=M#}Y=zj%I|V2oqL_KrYQkN`V!qKTMAv}Uvc z|Gpo4wIhYf`wSUr(W(WSt=0y6qX>tpUW>mho>KpkuQ$nxi1t(Hd3H+ZbqVIq&@XPE4AJM@Bq)~QB4$5wMoG7E5 zl_qp<$Y)~|Ja}XArEl&~Ej+=%RzGOi!i)E!x`KC{zB;Sd12=R=$u}10^8D(CJ*P`AK^Jw|8!_S9N-$4)AhlF~{?cUd|bH)0w2s z%Uz=&YX;(|lxmFUO+aMf%Na;P@^=Y>};sxiH~gQk((kDFifT zGYd9z3<8fK5K-P`nsg?SXccKe$!Jx!G%{pfTAO9sRt)KzIq9GXut7`R`&e3*qg#a` zfJ~%{2b*%lT_oTETqiWhU=5`k3X;I>S*TSmBZz=C2RkP9qCdKzDwrf#ykHXmZ8#{x zfgExmKGzm7T4*u|If6_+^Zp}%*x!h7)|359u5Z+&ly1FxC*8X9W#8$EruD72)7wAA zTx%;krymeCA=!-3j)GeOYipJZ6)L98Xld#}c3PC<;#v#|SfgbUSP@%|jmJV@6nL+u zV`hDKPgr+3HK-t|QoK>6s-{7umE(#va_aRT(0Yh4Kib_`ZFC*EdVl*n(Q~YGLa$SjmggjvuIN2C!&?=?kp0T$ByzzdjP&T~(aF4` zz?^%9g&Is2q6u$;SO5dzGa_VoT!5qSa$WAxv;o6r!K6_>%6OT-`|d~SR~~2c}E!8<^ zm@ViVo==rq+d8=@n;=V2_H8ZDHfUx=A55dwa?b;Vj*J0?fOXes{nWx+hrbcLCSt}{ zj>Ojm?~f!{l~Qrh^b}yTLJ`7z<##+Cz(OU^BNvWETfHF6!%_v$8!d;S(iVes8@(IC zQM`@MaYu1fyz@&s_D{Wo7UEoE!q?VD4R3{W-toAzrcDxX*RLVWxke9$Ub1%|SJ`4t ze4|Cm8+cJIfmbj(-pRd}=`9*I=Sxp98V*pRiLlZ3hx%!uMB;o(Uv=%*KVys}#xYC| zc8vOONElBqSAh%gQ~UuQfkWQWrJ5=s98tEDEumyg{_PnMCT9{|_BTKH9A!Ox#*_@` zReqzAKy)XsHyIDt6nvi1iz zIwQe!HKW>>@U%E#Y70oix6X(-3m&&xL<3PxIb)3hd{A-jaV?cuZl_R_ZWYP}`0I?;5_*wrPVHD&d5=6v z15*W6!C_T-gKAovg$H%Hbk;QGeOKw7Mrx5F@H$Qc&$vR<)DARYk%kWdW^mC;f7b@{ z3%(8aj!0C+QOfKkA4((d(R0aKRBWv5^JqcoH00P;Bx;lAA7@f;5NSl&so@>Bm|{T^ zy!;q#DE2pBk;W*TIWJ5R_K#qv>Ac2NjK3x2rOvvm2y0uBn7&h>?Z|MTmlJyCoV@N5 z79zh?5x88Q}b4dG;k&`Mhb#wVstb-dQ@~R|iR3Q^pw&SQ&LX4`2S8@YAxk^9I zUo#LqYUN+yUhoi<>M85g$)+;vSYwY*kFukSrPPsyNW70rA;0-A-wCC{Gbv(R6rlrO zWLNCXJ%Sq;c78TXTY$3Ji!wp9Qn68i_H`Gm!5YywM--RolL)!iiB>F$icLs{*E=a4 zsN6)QEI1aEz+`~H3#bL2DsPr}kj46_Ko)P^7vJ_REuwR{242RH$Y@IT>O(liYy7Tm z5_E#h(itbMx>BV}oGb6;AD2c+gyx(c84vJ8I%RqMR!)%J_y*C`nUB0lvPs7dWd^Q-f=@K@{kyOnNFP(4<%e`Lu!2 zWJFq;)z~wo3^a_r7*Q*M1Ar*jTO>Qd?k7eQUCqJKDV&_l8jNinSw3tpXqOKiN--gm zVzz2!2d4oo4y*PFg|@)q3Dv(V4D$i!Bh3{6swuDXo+i<7#pratp5|0LFN;h^v&e_d zsmKaC4I%ZF0Jg-h~7MSbT|FfmwqmAL><>CoyMrTt`SeqS{>4&-7L+?K|&|s4^BW{S7ro0 z@^eM{q)rN>4&tiaC05W_(fsHJ3yxk?eLGCyYAQl2>UI7MojJcst0$!`cte7>Jd}7>Gx36IbU# zU}A%3>G2oe!kD0%`r6N@E!sA%wTYYrBcc~0eJ2}KQ0-tb?(ct+j*lKws^38vL5Wza zY&j9I{}C@sMPa*vcE{5-9O`25Qyz8fSrnk9%d_7Z50J5*&9D0c&mbghk71( zENwMp6h_aJ*O!=-=K)Xm5riXyR@oM&AhYm}FJKM9>qXH5UxIH&Cr)VV^5paHgaIHF z{_+P1ogSzDlkcTxkDjFy6oL*-wR=PZ#$}ZDx+_J|!jsUT#Q~rPKM*c0gr3e_7V6e? znh^X0?}b%{tF}y006U>4@B(bH`jK?_z?~9|*VA$I9x7PiQYJPOZsM0-e)gjf;+-o5 ztrVUKNQf4==rsx>il63YI>y!YrfAd*Gr-7FXsRfn5AfOSS}G>-;Aa_wPA`_;R3A%n zomK8yEVK%yq( zNCBBUcw^qUaZ^krT#6EszQi})Y8W^5V(In9R=O6Q;Bc+E_XTi310rYx#t0LEj6eZ~wMA)sC4`&Y1vD&)>D`k!_%K zO~~iI?4Y7@y=f1@qLIuZqCqzu0XpY8DOe@AJVz;+Vtjfo;&+Z~8TI7P*pAAOq!6BQ z6^6in6;$vLFSZa^!DnVvyosDE1UVoG1)Svo1s4t;lgeY77)+VeBk+xTW$*$YpauJf zTCHloW{nVYx5Fz5U-O-KZCFFb2O*J+<-r>iIIcs*!1Mw6q5H~1`&V8~Fh9dfVa;;H zO?@o0kaGp8Nc>5cpPe>cLaV0Aidu zI6)PmxMrkQ8V7VJ@}}9aW-OzKK9i0U!!>w6APMJ~7_ZmS{+Qye!oEhHy-I?qNk*!4 zNaRtf2u4vyl1;y>kQS9AjY0nWwf{K$uPMFE1>jIMs>11}^1yl)%j=x^I{SdH<&Q&o z_)|W_Ik;G$5d$YkedB0Q%nrZ@Jk0DdCM7DXNEdW?jYg5Mm3o7BisY=-HLt86M4_DP zK;dV2+fj<82X@2XZ~)-MvcUmpyL>?fIL<<9kt)>AtWa`z6@?1!fbJo^3d0F<#`+#{ z*3?FK>GM8xB>8J*L}yA>@OR|6_pruKIi!5^Jt3!p7+&HyI4C}fPpXPzaX=ZS+~$3L zt9z8Wo(>NH7S}aOC?u5a!ok&W2k7DD=lM*e)A5@3H$o=9@1h)^aGok<#*bZl)GDca zA-=ONDnyrwM!QF4jOjev*BBxIuPq`gHrCCC;Cz*(Y<5VbdVu#|KC?!jUZW)IjKaNO zaFB*?q@{2`nd1STg9ljyxr#Si91(sImADsCZuIEY7;KLtsJWEIl$!Q5zFw z*0ECLj4MRs^;$2`2OSN(#F4q!q>UpzFf}5Q9Sd;&Sm`{ZI(DS4$k|$BGr%|xSgUZq zgukhNR@YlKE38VS*Wvg_E+T({Eh>IEGL|?{e5e}^fu%fAS?$8Tmyu)Ba5=W%iBXHh zeKiCkjZ{7d+|?|Exnz~YP#z#>AtvYpe8&-!XK$wT$Vb#g!w~hF>e+l~S=dZZ9@5?9 z>@n%|o9NWmZu$`KHQ=)LYjawu@l60f-Fo)?DFD6V#Zx3hqU=~!& zp`Ru_7E(kHq7hy5jiDFGNEx$&+1xEgPwi4Fyf{97?ksAu%V}yS%Eb>ApdQMj9z4QN z%Huj>tYpPap&aIsC-$yhsR`JuxdhB61v(FJ?KAR>FO=f9Ib5GVTm9ib{9jl9^dJ8& zguLe@v+warQ_@5dLe3aZ1$PJ1t@efOcX#hWn8y=^*^~NFibHK`YIzaUU*On$Ft3=l zUBvhV6nBtOQ2H(+9yTrZS7bAck4P zmmw%~9*7Q7EbyK%z4dmwQg>}$e!co@|NejIb~%XupRe}rPgbvf@b^|f{QAdQ#k8;R zmsjH4dF>bLymbg+s8#RTgZJQ#b~ihYTdW0Zp0HXq6jt2ttZ)KqM>z8ioZagbEyp!( zP;epEFs52 Ui@I9S8s`iNG5hmy@1EO^w!&K8dZV>v~`lHAjZ0BHaT!>lpE8f;*WM&xVoZ5%V@M0bu_p=DaNa%?5qtbd|IY`V)OCmv z>@pN@PrHUYw|O048X%wiIi> zWOn8Hr%*SWnqXb6?g|gzTYrp|=Go6+c?4ry8rjb9lk|J85&ryhtaE0zFkTPOHyp z9MBrLr^waDYCZ3x^q@I_!CoZ92VY!QK6naV`_ob2hx*68bl5~5`O_D?IOVq5vpnH% z-z<&qqm7~|HwiBAD{9gHOo|E^x4#C3*zhMfH@-hRV4(XJZxTAeSo^uE6$Y@ClbX=BKrGFU)QERCWE^(i+v<8{yTW+a@=l18AVn_Zku6>wSTsrAd z$^=1w&fA}Wzai+az%|d}F~jaG7#H1|{> zeo{1Qt8#u>%am}+G|j7hn|dK!zq*<;IqaiIpzDyL`znC%$lO>@ygs~22D*Fhk5;b0 z>O$+fqNh`w>ANDL$+uDy;uVcIxa>1j;W;@NwZoBxP>S9z&I{W+fbep4TSynJ6#}LgsA5fxRH0BGGu2BO@2|<4H7R0yCo+aTB4C;X|GA!EJB9Y zck>uIe&;D3nbv2m*B-{pt3?b4hNRunFu^rG)11F85}oL)^3&Lhtz+shD`Q(X%V7KS6YMMzVGYf95m+oO8` z^ApDv%BS0==rHK8c zjN#Mip0VNAP6cb8Bj0@=TKy}TkA@~y$+LPTPkUZ9D2a=0HF_UByr;o|8oT(v`@^L< zS=QD4D~&JR`52R|muQZq#)!YX)%vKT!>c7|^l&s7RDgetA#aXX97682xiKCWojH~2 zYJ(NO^%fIWVEOFn>ZKb4MhZwr(nc6rPA@T~SMsSY6RK8Qk=vfh98iu9vQj9wF}QWB zUKg%fhxDbfgwQ8afX*q%rZh^}qeq&{3cEl4w-~H~-ntuW|rN5vOY9+|*re}NpyslQyfBKJCKmPGgS6@jjdhnngg~G8h zKZ;RR^=+&{jiYnReaafI+wK$H0u;R+I$a|x@cjJcD2h)-CK5=8V6|y)t|?Q&CB&Uv zLVGxcQW#g=K}>b#HlAj}Jcg1NV?19T+g?O)*tIDy#zin}t?hdXLPXLvHN9PoE#KQG zKn7xX;aR=vT{UNZqr4je_7DEq_gCNlga2do^S|+*`L;1zrRRLJ-pi}sT|N1;zb9Js z3cTSM=6o%u@s|0m6X+WhfU`pM&C2m#5EKW7sj05!Bz{(6t8_Mjyov zyo1dkGLUp#QLh+8Ina6dBG%o!^a#4VzDtT&Ts>H(NQOJXr2d{^@<~jq0eRN6cjv)X zYx4d>_c_pDSK5fRRaAcFB%+wf2AJGYhO*d3pVheO7u;-F_g;7y#{CM(wu|0l49x84t|z!>q5q^hxvXCwTVU z#8|%nAI+yJTHIcPo8Y)4-<-xuQqOq8JZqO09SHmw7W$B;-we5UrdUQ#aAEJ+QZdo{6wfec;qD< zUT)6eK*FvErUbhH&BD4pM+lp1e}C(Q*u{V9OmdCC-Y{S{M69+Pe$d*Yk$7gbb4__= zxqGhp>7bJmoWWGe)Hqk*_I0_=p3CI|7J2zKICs+0L>VHlrj379r;wAT6qtb7XXAma zYe+fra9%R3A7A@EA>E0SXKF3xFdkif>LuNH?HiEzt>%P0Oewax_;%?yuCatO26_9o zA-yRYDgCP=+iS{ShJDIy^=9=ltSGWWXo|qRmFjv0N5Z2WH3(}(L2XF2Ag86t*}E+U ze(8?q#=mOc+858Qi%k=3A5yl9994S}%!CWUDJRu8h3h*MK-DEMF(ktqIH4pT~zy$%g zv|~=P2NzmQ;qX6n36bR0^4Pl;WI@(%Q0OEF{aRT&)1Mj~SLy6!NxKRAsJ0 z92Pr_GiIB&@_jA{3&_4(cPv5G!w9<`+kYf`kbn1_gs67eTjE)nqm{l{+< z&;ISzZ~VrOR=@nqzapiA_uXbRzMt9r5gW7xc(l-W(ir*&4^Mo8tFCA_cXm)#H^8@@}BG|?28+lqxcp7_OcXLvh^ z;jZw})2Bb4y?FDs<5#6DsUO&Y^w;l%#?qk@3=p$dhEvLH!Yq5YUbbIZ`6IDvcL zC)9d&2pGH;2rpwq7>jkhAl@9_VD7y#rc$0Gt`zW?Wji#js|cSrJg)l`h2Qx<|JLef zfAv2$KF^;k8b8x>ZS`BLSBJl&O&*UsV!fh3?8=k8^-###JojksTZ#tXynvTe+aP6$ zVY|(1Za3jliVJ1XXd5~ctjkNid(Tn1Laev66f7(2oG`A9Tg)b-=f=9j*WH96Lcd%0 zC-5dRXI`nRi=xYza^G0?h@-6ue}Y}D#!@0r(M$M@GYpJJD8jr8_`WC-V2FuaX=q=dchm<@*WugKIW?tDCM-fR!uN1| z*Y}n+JR`o*`gWzj<&-JUdcV$Z=7K~F-qG3)j3D3|JL9jeH~Pc2YR@SpXMtJs&Y=|J zO39i6Jwv3B#=AyAwNX=y=kA$Yu#Yz2bcY0VVpdVao1Q_Vs7B!0y}8oL|G&6If5tcK zQRu2ci4UaQ0n!q^^LO1?cJ`$4S=%*OP?J5=Fv?awR)Wr`#P!~O-UB`TD*x8PMDo1C>IlZ`nJ zy)JDne-7{H^km}|9)4F-c4JCW+0+!FrX4xCs;7V|^UR{fJ-gHAa-?MFELeoj88_j5 zihNEzlk{ zL(yB!8@4(Bwxy+I5X?Hj@j-9%a}VDo68%|mx&gsa2PHJxaqv^6hOJYfgOwll0uW{cW_54(9slPk$oK z{O9L%*&}nPi`b!};8Q#=u5>A~x1AGbu&yy%>+;vq0v}w=cfkz?qxnYx=@FYT`G7qZ zf=RcqON{fY7oSV9c(Qs)k&2B@6u=|K2(yqcP4X}RnS3G4!rr%}0G0Z)Cr7;$m+lGh z5q&_f*fLLOA+Ix5V2V{~Ess>Y+>sw&PJJZzhUfm}E4|&d+q5@cDIO51h1rm4DaG=P zAmU&ZM-J4~Dgqs5m4Gmh3M^W}bV8ua zRZKPGxFKNnlf`)f!QYN#&$`Q*+2>nEQNd^UgfXPkVIkkV$-L$@6}3ri^{^<%%k`uID);JR{@5tcdC z|8-9E(JexV#SdgR=Wl3c# zhC{+eS8OnekgUP~gH#Y=qcb8Hb73;v8W>`h*ad|#JVZkT#CR=3QlF3UW!;St9S$wR z=h`fGOR#zf)rbB?q zCRP9O(Vg$C{`B(CT&qS$gad{s$9-S;w!A~!l=9!oyzzs%`+k|>f)mSrx2wyYAnW_E zFXg{`O7{sz7FueZ3&w!Sylce9dg3od@2k&!9^44TF)GYq<{NGiy#DptQLAt)%?C^I zH7|~nz%JhUUUlIqxRm~rvbyP{n$mtcVs=%rFIsB9*C^UWiMHOAwgy)C)(xZX%=>pv zVY=!}fpA@0y{{?%9aq^I9SMx+HDl}!11lx>6cdlflxH~#_|OP?8fFW>EI8%W1FQg+ z7&h9d_1?7Ew4Q|6h57gd55;3Bvp1M*Ij4ElIsvDGwmiC1C}*rZ`k_uF*zgJC@jUx3 zFL%mF5sw&Lovs$#aB*~)5{3q=r--IUX>r}8O~7r8VjvwoWhgF9AFPAWO@Zy%3F5rY z{y>L>BPB6@(3+ru&l7f(ZfQ?@QtwDId(mXA$vx#d52(nI1f4HNU<*sf{d`yed zm7?96jd>u|kw1xiNg>YzOy%JPEZ;S6eo>1(sc!Z0-U9s6{wfldG7Vqgv*r0Uc$2a} zMW4p*HIW#+Z*hR|`kGfpL!Rj1l;T_pROgJeue0CwXQfEF?l)u>e^HRyqGUBH}~A z|TS(Va5boQnEZAJM6F)is8GlW%nCp^d5n;-CQIRneMtMdf#ty;b z|8ucl=`EASk^E@Qz;Z@TY%{p8DaBhloSkVZ@RkE2k9{WB<-6B9DoW*)7JZkVbkKPU zaPHpwI^>+bw>o?BmQ1y0;8#27qQ0j^nlw1LbX>6txz)$tWsDZ+1c>C-y0oh;PP5g? zW3+7#rJ7ZUy~wegQ5=Lv9Jyvt%FCA2+BL<#=QQ=M-R1bWdy4@8ChcMM2ix}eLi*b` z{xyZe__%r9Lg#g4D}r;au(bl)BgmI?Vmh{QXvN1VL+C3b!NeH0$I|j{4#_x+J9_@P zbW15u&Ve~$m|lW;&Xn}J_+EPDfpqpGXA9(1s7`7ew&5l|_}0l=Z)KGf!Hak0W#29< zpj0_Lq$UL?=DU(MxaMc$j2`9Q&f9P~omEAf6sd{qK#H87=`XcdFU6=mO`(qfOD~K^ zPLv2<#?^CV7quH_Cz#!#BOki1`@XGdjplkqAL+U2)1^2rH6eFa|L`CEu`CnLGxMtT zeJ6zF#7W-A%Y6*~V4NP-b9?Xm=9Gh8JW(5ecoDs1In9?zUwimp|K@N0^%zT&J~zM# z<>^m<@-J6E`uh3m8DUr}A~ZLaZ(fc=Nc5G-zXP2M0ye^1x5$0o`wu_(kOHLx1n%8r z-DY9{M#x5_3FpJ|zZ6^5Ng&1gHtQ(_zvp~sD_|KNPcd*uz7+;u2+LC}Y!InMn)tLE zGj^2p>g5~AvGh(I??s&ms_?}Vm-gu=k%Xg6O<42EIfBE0~cUHIF`$R;cLf#Kn-~HZiuHL`< zZuR+}{TU_B+^wY}Max&qvs4??2r-3khY=MrBv^Nx!M(|?m-2o~4Zzmk2j)fS#XH*r zis5?c4DMZCFk-yJP|!7bk!O#q&>uLyot9OT3n`M)Gh=WSRW6}_b8G)*okj$o_8|*+ zSKUX~^rU_=hzwxZg)n^J5AdO2g!HCAjL*R zC*QlZy8q$+>X&}yGsN%$eE+HI{!oP)5)=i0Z!sha6E9t~Qedo& zU|`~!!(iw9PGFxhoKFbK1r|5wq3h^X(E-~RAw ztdyNr+8$8KC(z#~1^l1-{hH$O1l$yH&U4Y;T&*L7`VN&m1XWLw&Y(5pbIRS@O$MIn`~%rrM+Gh>HHibK0Xb z#g3N@nDfrCr*WnD!9YJ?Ot@X+d5faF&_eI){bpl@3RFyf)@}^#{i6 zyXk^t>yHo3t>S8P&#=u?ID*C=)NQ8R@c42MYupT*Ude=bKdvd~q8|1^u*^@EQnqPYhu5}bM_fcXfR`Hm2FG;YK^_TLC)u*+d#6$SEDBkdrVn0z1#$SqmpNG?vPS`WLK4rN#V^CW$oJ-m5 zHHWlmLa?nn)b8<;_kQhAq?w?J7CkO9JqEThdUl3L@JaqeYmGgQI^$*Rc5e!Qa}8qs z9Sk9bxwPA8&f4PT0bp4-R|H+9`zD&y>U-b)?n~}0^leUtih`4O3(kjM_VpW_>aKWn zd(yvjif>=>jr{3F@jCb9kbFb7TSvD?&J}v- z(9+$az6hpz&ho-fH9hO!1J_WFf`)TWpF|gkE863Ru8xEfHD_A*Rv$wLEaHuN%m`04+q{G@pcbuZ57m zQGAWj^M(l;|66#DM+}vnCm=A~pZoNK)o=Wje{T?R7CR%YRv&!l*AUd%>c@ZD0c@vyk&Vlzq5^_u-boH6ax>0l6(%(Hq&9^=QK?yWxirQ56T{%dM0e&LRM z!hf-PclCR#4Sno4$qmGE&2pKuD=|^{*3+`6<*J9vJv;E`&1((V-*}G_ zAPLGs)Ze~Jk4l(C59Jfqmwir#1gBH*@?Q%H@9v#qGMawgK3MI`rG7x+kCnoD0>OD2V6l_n3Knyy zSB*XwMWx*5;IJt5=GgcYn7sCj+qasDrCnk$U5tTtpnFp3;loWvZU0f6UEA-qDH2vd z=C{h1)w}VeVrXDcxW20oi%6P35kHFhDTVem2xy__8y?qpa*iY1QC3W`7Q?Sa6fPMVE1}?P9;G@&g|Dr7 zmvVSZ!Fb-|@IRq{SXjnu40!v|zNbXyb&fYh!|mlMygMUgJDiIhijS4n7JTfx@#&O! zrFr!e48u$NUrJ)_0^hxc$Husn*Ips>nxDF06gq}h_IvSwO-CLdM2o$K{`?(FnEY>e z(^ow+e&Au=hj-s73KX1_t>u(UcpdLe2~9TO3E^$VPe1YEI=zkZ=b99#o)=Fn%eJ~$9L&k)JO4`oHjkU@8L+% zDR@~yuvNp^aL~%Ew{CPPUctzU;T*nDq@jNaWwaX{QesP|IOnnMfUG(SHfCPkcv>kG zGd36bHRW?p+F!Xs^58XGvB8TOGf7|`7o6>U@J_pv;y3kL!WU}sQ676Tt z?B5ui?#oLXe`t?09GccUU9DapI$OkZn^#@zYGu1$s%Ga0(F01`+H?@*!F#;&4zf(4 z9KGCFrw+inp^SDmOl*|i@A`}euB$l{2M=hzo?p-S@z86>dEnIYZFp{at2fV{tzJH% z)C1Y&Hhj5l|DzGI==LqC$x?JD1&VAv(>AozZQ~V3YCO-R^R}irn*pDD!%2G%@1v_y zeh=w?wIMA)v?Jn4!+q8f9v@#)FvL8cL(Gq2d5zfc=f!Dmz418o~oTW72T^ay940r z9M-yeW;*CS88K}_`gAxw#*EM7>PJqDe(>NuMZ$iZTNJLv%X514N`|y%mkzyR0rn8B z*vK*P(M$X>zPjLice(iBzdQVIHjlY^qv*8lpM3qr>L)5lU#dD9Eyh!l0!f{>w(h0v ze1~J4La(TBgF9zmqWS)vOeCgO)`9VQS}kVkw^whm%iap9LTo=;|ih47eeS1 zo7_gDp^e0EhTp!nSHJpi{ulC39VDcYf*C>KA{*Ng2QT{_3OqxgOqWc_i0N zBjsfL!cQA1ZJfmOuIE>UI8r3{9nSQY+x6y~hWN$9bJf(Y;1suNUyG?7XgGXVobJ}0 zLO@ARKjz#xQF;z5yUw%v0$0uwFeWa2&ibGeH3@Ta^*kSUfhTdr)*AliCOG569cwkt zst516yp-HmZCCF5sw0Daco?{Y)EO9G&~%ano5k~U<+-HfVMhIG8+DgY5(jLLlCT47 z-(5`wq`9+N>eYB>&l&9F;x_>=x`lt9>v{WYLUFV#dWyf3Xxy5EvuXIaVJ%r!f7MUc z!IwV!f!{y$J4>N)s?vQl@B6z~`?0(;9=Me!1VelV?sH$ib76oN$4q;gRW-@H19h=+ zpPsUk4!Ql?hcnht?!|h^c2w)l8-3>-oN>%mRG4vBo`jaABTc4<;TtW;-WU_xNo&tl z6GuFD=!UJb?|W!Tyw@^1P$2C+jg^i~3UOfrPrRnQtoz(3-6i+V9uFApXl_P$lfN#@r!Hnkj*iYs z2s&vAX=34KoT>Tb(wT(Z_>j5weXpSfxYt7C38UGdjV!+@jDn*Eo%i#`rB!04@g?Dx zm-lI9o)Jwi2OOlGxX~QOdtkb?9vhzE^YI+mt2ENA=3TIh4!z9-JTj$_!pzBs`cRPU zVN1m{N!qk1>vJUa8Vj{#=8fAbwS~57p;cA?8ya5E-ai1PowNfQ1#ijCg-ZHOZb-aw z`Ilu@?u&g#JB1}maE^x3yz)Tg>FJeaz3!OPEc#u=YCCHW2X++&UFbX98sq&t#=fk0 z!x#fc{5LD$oe)LqY+e(>(oxOd4y;^V?TXrzBsj9&!10 zauLxe1m+v#2A8zY;cD^FR)%Nh0vI)^%y;^N-h{VumgF*qo9_ zn``j4M(nwq;Y@OFa=GAemgE?Vo;`fXXW>a&O`1fXB^#Igs=O2N*Ie-SElGfXlMMz}M4$LDFHY3u0{7yhol>f|?AC53-=y!Udot4jKIN$YfuGj8TAfsS;S zG!*8bon&A<+qiz$p02ck!dtygXj<2K?S=MNYg~7~v zJeI*V+T387C%am2Ibu`9Vvxp|;z4pWkOA^&!qd(TO{L4^;abar0Q0dv(C>cOT#j=JH-m?(_11eYCNzXYMPS^O=z7 z^UuFTKSDq}2y68Z1>3C)xz=PsCvk0|sf%*FHZaEI_{~!j{$R0Qz~IgYk5-2-Uh1qu z<`s@9j2dr(_lI&O-=6;&uL#W@O`L1{6I@55X4P$TdB4B&nfxZWv0s1mKO<|kX#D&I zT6Iz>rdHwUy3qZ-4zAgIuzIEE_Mt8d&3)Z7YOhw#&zimMv&Ea1PFKo?0txF12c-zn zv9tx!b>LVlz&r1y|L~p(ja2;}P4Cj5(v?a#IiUT&qp81q{>q7|+zxW=OLLu9{g{GZ z1%&6164WttPoFs|m_mA7?oLd}cOm{U;W=h=z<^-IymlyXg)CBjO17F4#oUk*R35}a ztcc$Ga*twAEw(YoXmMQ|QmkkgN5pHKPH9Ma520P@cg&%9WA65tj7{}>!U!>ofZ3H@ zXTvh{2EO1_Z^e#?O*#Ajdp-Exf3*5fKKsiQgc?9oyO{qMjE0|l|MymJp8x5b_OvY# z43zvp2tyr63``ly)J@EuP34&VDyXLM`^}igF6^;!x8BUJ`CydH`}cQOXP@st)zY$u;Yq$5 zA(tu|dA7IhYHBK2qmTRCYpV|K?|aZ_EMvLN=_|JWR}Gm%%59>0>AClrWCB!++MZO3mf{Y-anl6tgU_K zv(Notpz|s(szM$Ln7`WxEArD{x%qQ7-t&xazfJLf?PpOkb#!AwaSr&qXDx~9*FJ4` zDRioRuf?2G{x2n}-dyTSdUX$enjPcl`E$zu>p~0Bu(iu!?4-WyxAM3!^mDCuPHFf{ z>zNd%a^4mf?epi13 zuI26xe~NVvOwB7M(>>u`c-8M-@vmsDzjLRCPM4eUQ}e8E z=~T8EKR)Q!2$k;5oiuq>Jr9n>TS`tI!{3Y(>P@_g$7D%)ZCjl}Wd14kg?QuZ6@;bW z7h6vW%5`4bFht-Pj3VpKU4J9LDA`zl&ofE7hfnbhxptV6LbMzs^&Hg##XPn>C#AbC z6B`PGrXa#z+?4w;@WG4iX$tl1bva|!P%9KvId0{Q@$v~fHoH@o*4G`V;a_XcsPO=6 zY3*&s^E8_(n^;&DBZEpVAH^RCixkWaK_dE5m z31;}j(>my8s0RGmEz|0wJHcwLr05mZam_?m1A|IAST&s%^O!-&U;+S?X&SS9!zxB-=xC7LS7!zITf?U z3ID*;TVWzR6G^=qztPOUMB;t&j?`ogvs(PBA><;8}lsVn0a_g%+Ihj;ZvU57vA z3tify9if{}2t3xGFNLgURE0SqI$Gh<=hMWZljwy}GGQ>VJGUq40Ljjn$QZ`qv**O0 zqu1K+4dAVF3LXrVa$m|pI!-w#(FAS5K6AvF!xRRRJJS!^IOz&;;%VRIeNPN`;sXE_ zC`VSTTT{Gdvmy(hWn@>yxleB0-+r)qDEIf|?CWu^8o+mYnm44<5>yX6(te zJAA^85Fhnjw3H4SZtMx0bbtBjcQ~l$SYUfqKh2jfwL5bCiB2s~6i2p?IX#nIV?0Ol zco^_d+O@-?D~!8tD2AN=7T$q|y{`<#EKbf zlwxm`?8W1}p9U`xvIj?mCDrC>Kf;)ce{^~VajFW#1{-%yUcr!#v#bL`0-8X`0amk+ zCiI*_TD)`=Z2$`xAwWpkS<{pF$2FmXG7ki6ZI=}EC|9zxO<%_f}j$(1kmiXXDu(N1i|Xddidhb>cdYStiJksX&V!f z6!#hi?*4%TaFt`P&BFKc+2136B4>F3{kB-L7+N~d9 zbKfB0uC*6<{CIEm-JhrAi7_7BC5T-2(Py_;kH4b@)GgLI#v^l#$fy-fj4QH`GvytU znU&@J<~#9KvxGCee-3C6%Jv0s^F2Mvs`S~FOuMTk z%i0`eN@(j#2wOaDN#5SH#`e1PhX?P(K#Lvb6}c*31Ac~otv~O`;17I@m}|$8qFbE$ zQZw>VPK=SkS_C7*IwZ?@D)-~&x~i&7odi=#7Xcx;w^uC&$`8tWT;WH!*xwo6dF@gF zsl=;KK6;OFeJn8%Y|+e;$HQ1LkJqZ64=5d-|8CZvgSjdi@Gw6`1-?PbQSu)SjD6@{ zOl<8BUA5rvvKRdo#e%<)sLS4$?B6SIYhDZVpcPjvp>?0GDe(2GE5%0YJ*my@VwmLyc1Md z>&zo1sJM0pX|GX!to2){r-%+c8KZwGF#D2B^Q^|Ra(eWN$A+WPb+44O@ruLQyt?Hf zWR%3)Yq3;LN3_@6@<7+lY9eLk-jFHBz`5F_Xbj-M9J?50rc;g(wCe~vM#I)!YqGA6 z?v4}H1)<0Ymw~kgGy7As($oOc%b}AH&%1ub*t)XD@V53PReIOH>smEs0{hgSPn77u z^Wb0WeE&F1IS&+1m*hqC>m_&nC^L*doqy8I7u<97C=>CFe4% zu&{wGaLv91%kVw=?KwpS+JE5j9vv!Wt_W@ezs)Ggn|iw@|K%w4MDnNzVXIDL9Ui`> zurJP&V|*aE1iv~M+}MLXUdo3%zoa6Q0ez$AGrmx6L8z0WnL?B@nhqnzXRZ_%6KcOZ z@L=~&&C_vEo0tA~a=1P?eFBc*!nQUF>#FT9x0{DCEj0XCDlx;G1-Da#sz{w)yqg>LKhzr1Pf48T@ zujwsCWrI~RAEP(N4Cr7`p$z)=aYldpo5Qeb)bc$>;WA>MBYKbk4>&ryKod2`*0ZcE zoGFJ3JiNUT?MPt^CR=#lefc)uee^Rt?oKImdRF^hJF-Vw*ggD&tTEy13n!^w-&@_t zVN8j=R3N9abMUnYWrj&c))?W8r=o^G{K1b`544H9_o=x}!hbRi{yU-X?CNXy$vN>( z^13-yXnDRbvSjU5rI)wW8tN%~JqL><|5P-?qZ^gHXV;&Pv&limBGgWbbm+r4N*SwX z@6av$=)~4T>q$}HdlW5!fetYhZl?4eJ(HME?)A(|^crjjU^xnA*XRWV{>4^2o7N+e z@m4?iaBYW<5o_F1JaHkqH1dU9j9>H$H{Lpopi{&un0Wf^`S92rK=GN$#Wqg7EeA~w z*&_#3HP`mP%JlNF%EL)KKEJk@gLnRp4l80+WeNU9q8lU zxJQhUE2pf@4KSziixy;*Es+8vhNO@$^r}tP4}SFAX`o-wWf(YQhs|JMEr##HGZ*8d zwHt;5(VWp){Avt5_#FP0rO<%~n?HT>bdG3ziAnT;D@0!h@XwNXS zpoCj3HMSA`F@)*3Q8;2_7V9hPxiEEQ*j0;#m;v*hO^2z2An%0mJ0dRzJMA5saI?0- z3BVXn7WlOM0L{iuIY{}6QI~3Tofq7z6B-h>c|Ln(IgX)0sKpEt=)IalfM~-a%Fdte z*=PxItU;{i(I`b( z0LwEmXo>ALFR!;(^Io3eIU$pEuCvT7j&yrYR6DCsgOvI3 z@!9IH{M8RvAODQX)rSwAfEL`1<@x)B_`Xo#F6BMtKN85~X)OuR&KBW=AT(5oj>4%r z*T3d+jED~@4##?-o$<76^H5aTdq=YGy}Ou-1n>2Yd#fXRbxP5`R;CmVUvDTlV7y6R zq`GDR^sgspa%gX$-{=`yNKr3jS-7&@TVRIqnqD4d;=(=cYLP1n3`S73d#CAK@^0tl z@ARE;Fnp>==EB(F_t9HM9Q@kWHloy*!k`^f?6E^P6+cXL#(ZMJrx;Z*NePUwcQMJ2 zKYHY|v(=+Jdhp&iA1OI`dh^s?y2h-`kXOhU%d6jTdj9Br_zr(&1-ydu#Vh0gfzR8A z;1Y8tl)=6}on23j@e>yG9UT1T@PLsZt`pdH)ocI%YV~?;>03)&yAC#)e>nB-TB<^& z3gJ~u>w=MTdcd>q=UjLfO`TI-PMrQ!zu)M4U9rP;X*ax#XK=P2hlj5y|MsGtC=vCL z@TOCU0N_`YkLYFxEUPZfQ=3=1rsOH~9Tn`W)d>$z0_2T0G;d!sl<}@S+hJf%k7-v? z!aSuUy2=TW@GTjBPoYk&VAfNrX=TTh|CEh1shV}CMCFa#kmlH0Q{duZ2_nDd+Klog z>2rxTb~gP2`#h1oOX(>xw?rv}VG4Y_tX;?>UAwQ-8M|xUkuexSd!>N2%SF+`k7zz6 zsOZ!!=~6{>k4}WJJBiPA&38S|xG_dKrOeg`IhY)ZyylDux041FS;afHRqk^Z`a#d& zTJ?X1MR*jx&P<~VpG*QV`0X&nw;13zl$UoBpeNNP4<*BGA|8!f`iT8VPUMMxErECx zeMj#Iug@lT1^(l4=f1^?2s+W+9N?zw@eNI8NX94lhb zD+3~ePr(YlwKl3M6k`#nB1j4(}L6fTCQ{ zflJKFm^mcuWo=E~+x^JgycfsYPVv$bll-QN?r_Q$3oo;BL@P&oK?y61Gwu&8)@DO1fZz8Ee9;}oxCOBfjq zrvNQQ81b)Yja*72zZ4NczbVMMDWa`-AC&W%QR_Zo4I9zDYfjd@&KYT_xi=U!kGNUj4<3CvQHW@>vCSiTZ(f4p z(u|5~6v<))sHA;-`#we3T#a+raikx9_&$SH8e>Kb*c^S4&c-v&L9s8LwO+1mWQUBD z;xlVL{=5qxa#9ci?{^rEY!m`Re&sKU%$e`Lz!{KLgV^wNWd*F82%`OqF7+TI-5&jaaB@daQS{AwS+k})y@mXLj^b3-cG$sxGIgHoJw1f>9% z+MLn$62HDwf!yRq7X|AxMgW!(sLVU(RMH2(N^qz1+2`8)bbT-@!jXdi;o}b}+nqp3 z=SsJAO`iQ;(Vr2Gts>j^;ZFFq1D7M@_H^2e;g#vdHT_S~@7eLtV3fR1Q4b&AQvsZV zJLr~@7Cz{^2?RGMs7kn$Zo_-ll_|V^4dmU5G+`fjZe)_*=T1Fy9*mUKH2+& zTZKV*-w}4qxm^6|Wr+w5<Wlk!rG^xGnJ9qI`rhsH zyy&zI870I5`&~})q+IwSp*tsw0Vu9Tc(7nckeI7{yZg@SE^TIZXJ*>Y7?ibqg6K3G|3&gl%)gAl^x< zdQao~(O@7?9_rU=^nz77AT@wiYhSFv^h~ql$oskq>hV z>P-c5``2H=ZBb@1fB5s^M?Chb?tbA{zK4Nf#^(J(81Iw;_~h$zRWwe&5E}oLfBn|# zSAOHs>eHWju=?=!cbq5^GK1yyPv8;b&L(UU`Zq%Ec?;W_U=?#+W|6XyDk_ZVn{(}C zUg_a?gxR0+{8xorI|(WAtLt)sokDb_)@7%@ZbqrTpmjo1k7AT~<_pYFL#Fw@rx727 z?%wL_>n|sw&^|{8r#u$Zm(ALnPc01!@8$t6cRA1V2*>7vp>$eNX+R0uU|H3j4Trnx z7AS{RHQs1}Q`?vX`zD-kgKX#E5R-|19kr_p_rZ-rEZ`%VVg7wy#399OF$5O)28S3%fA#s_ zwHJLN6~py6s}H?Lcm85_`H_;g`r%xEv-&x|&2jaK&%fs?hD2>h{H#`A<0lRb0ozg^ z!lJ^b39?IT4wsJX-{}b@y0o#l+SNlIe|sz~=!#L0;LdZOqTi`s1~9HU8Yge-1sIRu zQF(esh{gEdbwsU8U&(cyKC$*II?tmYznNo#DNne@z;NH^yD4SnQ=V~0ZlBBPTu*^d zA@s;|(SaKZNQ%OV2vZJPd#gSAyNzTDMdvJ%)94@`{oeuUn)Dz!=w`QYkA>}89paq>n*qi zE-N@#lxkNGPUQzro)(O}9&>Sh!cAC6DAdD|Qgr{>=rb7Nmp z9x`O|{^j73*U50f*X>bN*hOZ>==ZRcC$5(J5g9CE;aaG^#)*HboG_}%&H z*-zn-6Z$UklsoqxtsZ^ya2yWXa?(q+-2jVP4{fUYE#)}4hp$I3-Gf){?N{YXMrb3K zv7g5u0IbJP5m*kyHFSDK;AD8LlaH4OqgGg_B2hNT8e9WkOM&S?i5vY%t9g1SQu@W4 zcVrdC9=}YeRohm_1&n`cF4DU)B+?~1*8A+WXONfW(YNoV^qpwo6MtTZtKnvC7*BZe zb0X4t83P@A>$`MQJC5_hO=;^mSU&Yvm78%WFL;ghYZe~bk!hkIePh}s7LptSH|06(dvbRCU&1b7wNyd zdT{@N4kw(RaB9+yg1a>oQCV<$@%(4VbX598yR0vsbwRh?d=0`E20p1d_eaF0JDmGoU(ty@<;&nfHRBk_|>Io2VsM3gq6j$ zO<9Te9Rp~KE2L8Yq#^*Ph`x40o)RtljbIUL-$giRYzejUst$u}8h~@iXU`4b{Vu%KX9@3$iuybT5|hd!kwTJSOu&Xf=QgXE3?nq@@k*I{VbD&$ut(sC z{S(6ZlP6WgSpE0sFBh5P@?oR((`UV2tv>&~|9SO$zyJB_ix*d`+izm|Keb53myqS* zClyntFeG4&U+M?O*6C%fA!VdGgW&g?GQTHWwCfZs%XeQ&-k(n-SnuC)2*D@c;UPGZ z&-O#-ZcFJhPE5Bj=Q(7VHcJ)WLB@=q_8@|pS4#^IjGy;mJVtQkKn~((Kl=%R?>@&B ze*Cdi6R8j@=^q&&r)OXBcuVnl|7!IMzpRJqXOCCkz5AzhWHNmnoJy76a`PKU4<(V2BZa4}=hGz<6iq!=pC7ArHJaNX> z8ilIVAF+E(JkM6^Xdm=$gS$IwbyiLqO7XrJPb<%c#iVxKl3XQA-I(8Buk;z97g;9>Kex2M13n#2SN52^w07hq%7?G5Gdq)BjS!5;O6k z@${He>zF;V=bc2BV%{9b5Hg<;9DRn?y1G4|VruiPA^s8(07Li||I35!!j|H-?Ax-p zjBR{^?B~E}SAA=qp5#r&Wx3~9t0zDH>3FL+*Uigrc@wSJvbiqh@|Yr$A-#)7wzi&? zQjzi+pHIlwEI);4%NU~=xu*0Tms^W*^U}BHJoK9k!0yRNFWURw{m1ZGWW_-#`)YjN zywC_i9`U3&VQdGYhbf0=dW&u|05d$U)Dx6aMW+KFf;?qAJC!oh{KMRO1+Kd;&T;{l z+Uq%@*x02NZmO|bC!01XL7N>9CG{yt)fOl^9Vd`=rW~f^rGN&)A_)@_@r;}Td4((F zEj2Rk8Js7&9bD|uDa9&pYYI^R9B*4gD=8u;+E>+LCgU~P z6wEf|OI*qqzeWe8B$$}F#*>m`loRwcN4GjHvQ50v$p6zqzZ-7CK9jO95+~ z$;%FQZc4W1-31yrXl!#jmcDYi{l)6h@y9-ouY!y3!f^l^n3{$uB+G%rKJ{HPE&}j~ zk%4;@%Go4Q=J;t&+F;Ue`KTU zrUjxkPOnP3MNsOR=9EKVnOoyI8Lw;v|AMIh(LJ_SA1M;ng@5%)CuyxzTf>!el<2jFhKX|L~#rooB*1MSj8{LeWsE{^Z6o}K@M674wNXOzb~ zTGu$TcastyZ?A4<^ZX&YFb9#B)P;=E8%kvb(M8RN|1|e#0e`B-;2Pt(U~pD&{sV1t zpc6T64fbUthZ}sUp0Gmnyx!%~=S1Jqvh7S7U*7yPPT~!xe75fy2Lptl0kG*5*Ux#$MfV%5Vl z*_%Unj5h|!o|MIm!W#SM?8`7HvmzsH43K0L9H>6C)ao~Hwfym{GHBxUJPIhHC?z@d z{MoEy(u}@=VTTXIGbV~_P2o(d9w!2xl1%!x89{T}qgR(D5F(cvnzO4+oUdLSu71v3 z>RbB$y}NiZN2|F`b^#zCk(KG{%`pSC8nz6C=x^u_eHW?TJf}pS#K24h9uNxhTtWYt zhrXH(w08@IAsA$nR>lt}xdjC$&T7M6rHZME22)I^c6Z=2l zjnD?&%EXtdt-y1st0`uhL+A6Nh2xBvO-N5W&TDLF44 zjd!!Ip}KUJHD2O1&EoGre2mFK1cOwezb=)a6E+e+mn!vTctOJQld*D4uCVK#y&2|~ zu;}44TgJnk$EyRKhHl;d@#;GtJ>~^-Mt2rD<~k7;T%ba^gkB!-)|^1eNLpagUNoA$ z^Cp&>hp^LprZ5*n)qZoli6^J;Ru4XA6;lT4v3XC?@1sw)S08+KYxUjteg)foUCn#kX%0a&emgg6*E#3%CdO~=ZDum5+{a|#*t3!@YV;01ek`u;?n>I*wwNY`}A zOiEt6@lqtOB7p^Lh)-{~9p25&j}Rq6%iDQ+@ze3>m#-ZH zl}2XP-j{~PkoJCC7(9F%+Cdnej0d6%IGkz{+{)FjXtz-9v9RDR{oROIUXDAugTNQ- zy$e=*t52<9QTFTjQ@;M%>WO>9#aK}+ylS0{#voh@!Iyr5dKA;U4CPqx+3L;p4|Ow1 zaAP)du=m#5w+e<`x%S+3KY*1#b_apyvr&AFOXXQT$M_2`Pwurn$~%@)c*(Fienp`< zm5+a37a+O*7}zvoW=y5b7gdRgPYM7S*X%k*n@~MNuciN#JKUN&D!OhenS2?e3G;Ad z=tJoFj-Lae^`ZCXh(RA=)GuGXmP0DUiusPGHU$JppZE^&r)cjEAsa*{@@JoeU=fpG zHQp8sC0wyKP-_0sx5(>eMqE7S42%4oAa9%5s7d*9YV-+8UcQ_?s-o5Y? z3{!{`td?Q-;^W~op5YLLo!vf%qfjQ|F_AXJ;$LQ@laNDdM)RE z!)ZyE)=;r=is_P?8vINgjZXon`Nt!saNK97(iSQZ?_h)+IT`e$;G(nLJDzcu@Z?GF z829!wJ{bJNp*aC5K1ms|;87}YXYx!lhi&ubH9Y5CpQKAQFnOtuH8!u;X$oHHrx`B6 zFkxL6vWj^D#r#taT-V=M_QpIh{=wlqkrQF@%yWD$88LgyupEBl(xQDS^W|V?6t-V0 z(KHwOyrNEg6pT8;G`u3&DShU{c@sU>QKo$zXAk@y93vMqUbjnE;`Kj?#~LriW-9}& zC_>1VvTCh)i`(BK#5X*O8BwL|CXf3*-WF|yv!3Mpc#r+C?oNNaq_m%0I|5hV)_pnj z>v_bRGiAo2+<^kqD~!`vlUA3)w0sjKq6@OAlp+gQk6-yTM@_g~v3L6!hcb&mW5BS# z6b^Ht6XeC7wa@;Nk$99zqZu>_$E&$YA#c1K3N=HiehBE$F+C+2X?-R)xIdW+UF>=B z?;g4sCAw-xVDc;`m>Of<-fm6KV5Y6uT;EvPswu-j+ z*rSaHg@id7W|r%6*;zizR9sB8{ISU0l3lOE1QhV99+r4OM6thTv3kZdo6Ob zrgrYVod;tW^OdjOUcBM)*Y!sRX!=vkg+?~n9S?ZTTV^R-)ngnul;Fx~cBl2oe4Rsv zqHI6T_4vJb%Xqg}HL#e22jKtUf}O}%KZCcO4EOep^iIZUy@s(V5X%^Y3*a#|OzAVRfa)4;~F;|1IFyJTkbkFSQsiz5>4hM(4%o|p`o^)u$<>FsH#8qO?P z9wLVAv*_6l@BX^EPdy7bcTKwAkqm*~`6s_`&EJ2v`&+Bkk_&LP`pbXmFPcF{iXA4W zYW@PdJuX?mk%)H`@6G`+Ov2Vpnw7CP-%%X1Aj+$XIL-m$E$>B61FOQs7KEPJ_7MPu zzMU3UI4lNRkw?T-$TsV)T7VSC<2Vu^eEW)lFICE&f4(S76Dlk3({)*o&8_YyA=4;8 zLCL2rqDA$71SLRay?5P3M=WM$`opar30o{S1~rxVAq3`CbMKhXP%H#XXl4zj{LX%2 z5=*#m@$y8ZDRR4%?lvJ7k|zkb5RkQa#v&X?9rN564Z%W){fqB^x%%zj`rlfOxgiLr zV*G`_^zZO=)pD^{_cIb+{NvTX{N3MP{q8^c{^|!$u2*jm*SQcL_s!~yAKS>wpO|w> zPeP5AdQYAtm^a3rV})ZT71W-?=F3azR@FsNc&|=e0}e6VJDO~M_3O9J2*+%92#hNXxO}^cDob;iXpT92TQrqIYPG{Sfw^UZi*-7k zqa6F4TaZ!y>WjA&(XA!r=jiC=6eiah;%9zkfAzt~3T`F`|HBy|K>*_hS=U5I`YXX7 zlXU1YgAfetQA}kzN(dES>6&q3j}G$!Qxr~kmJ9n`gF`6}!=m76ieoTOm_8L*xT8Mc zkVT(>EF|4&Kx6DAU*PjQ9$=4WRl@~Bt@e{Cm%}KGbc1nTOEb#g-J;xtYa?`#lG=~l?0rLw*op}w$EqJ{4GEffUSe;JqI4mO z5gw%s)DqE9!!J|#cb!1E7?(Z^e69qLi!h&;dz+a!|Eo%?D zr{}~IQ{IZ=PRfM$jbFRDXup%r+W1lS?L$I-I?RF1Z4Je*NoS6@Wh~zF^N!O6mw3gx zCfx6Mrq&$lFarFcbUgfaZ6{A7<6PH_;82<(%%8cG!+NgBkU2=6NX_G1Daw#=zlks9 z?T0fd-p#YUjL$X4;9JprO77`6N5I1LTVKlmO@|LiB`D%_iT)OP@PE9nbiI^_XcvpJ z25-Mf=gYaWW3A;shp+MdH0@Csz&JW6qP)X!-z)_~WaAk8)-z(mal`Iw(V5Y=wVRht z+fxSz6uD|&!-JHrE3!A(Z7AM{NkdlSiBFhk@N;_1aMqdkF__NL%bXQ}fxmc`$jBw5 zxxX(c<|&S~3r#qdhuvJ_Hq}bxIj*hslK)QrgVC1gOFF@ZNM}k>uW&1cD3}EE^)&^P zcmEn5rR^j-zrKDs7x`-BR1PQuh`PXk3TVflZWaA!{KRwmzUZvc24CYkJtVl~94P8j z8d8NN?cZ}n%i=!_0{TCtR^y+Cbc8Ziit=ZCPNqz9-_21>JUI`4jXwr2I0tt$O(>16?;0mwo$(ctWiV%G zB=eIG@x08@r7Gv#lFu29#X7Pi7Tn`Rc`M#n_&6tmpMuqBj3f=ylzWb{#`^Zm$KQD*#N~91hYE20ZYL@vwK4O)JxtkL6w`eF_7()f(+$G!7($<% z$>*<240Y{>;CrKp^MoQ)XubWP!gBZmjfXH3bksO-O{gJ$Lj8rD!X-5fCJ+n@(YF0| z*IF~SYPpD4VfGmn5ffqSyF#xOie_9U6mV;JorT?c`rCs;e1G*y0G}Yj!uJ^)8=J!B zDNXLJg;^({o#?6T(Jl^`qYJ_qQ?5WV6|tP`UAPnWHQ#;A_PyKnQ{NChI+bg^xq9+d zDN2+k`462D(7f&__Pb>oyUuYhbWgndyiV3F=ri8Z zHG6s}*YV<1r-viTKjt`HW$bbGtCsAT*`m1CGPTfVoq3{{oCT#GmZtOc$usxbE4bfY zrigCwDsNNROPLE#AH08G>KY}ULJ?0m7fy|bRTp!K?q5hf2`kDcZ;a?VdcRG;-NOGe zMq5)6h352C`K51PYl;w$ab3JHPu9%cI?^AuIvvpOl#a!a z@N7e;pk?Hoo7;SDj1>C#YS)JMc^eZfU1NH~BRzS!J_X0RySB}1Z^x0)bIgB~$>`fP zbtDNN;{UBNMWyu(s#7Yi7+CFPO5$6>e`*CN9$UKZY~mmF+m7F+ztxea^HAb7rFY!C ze5c4$-Gln2@|RV3%FUM&2B&xSmc8A^Pmb`7WBI%f?!|}uPL$C4#sRVJ#6;`9krvab zN6mQ)@19OIaH(yGw$~ZQDdctm{d6zM!EnFOAu3*4v~$gTTXt(qSt%_hBdKd!ziAtc zV$QixYr}HxL~XrI@$;N0Bejnt{EP2S#5 zs*`0oDpDNU!(fyW61-+&o`)C5J3-!_+0UXLl(uPqO2HXiGRJdPOFi?i7$6m1hPU05 zqi#!)bw>2L`>uJ|*Yo5{J=vC;IL#CAukyZ&oCNoj&&%@j!82K~7y|KEvSj<=>XS$R zfe5J@os>i_;Twg#!K8JkxX1T5MSi0CH3nhcWgOi1aM>WOr}_2w1v<9#@JRF?PPOiV ze^@qfu(#IH`d%877Q5Oysfu{QCwC@R zPNw#wNXMXHIB2RV`eoxhdC@ruYEc=6H9zT8*gekq;2#Y&_cMCj@tfDf@6!#}+Dm&- zO)c=pAoKLHkSzW=E#%l27XiiOtqhWdun}#@Y~Uo72Q*lKexqM(!gE5XHW65csH!h$ z_9^$hhH*Ceh`eu%+a&iC2yY{nHg8+*;#;2aQiIx-MYv%W!yvmW3z{;Z-Bicam3tRJ z=lH>xyzdtxV@ccWrSdukopl_cHMM&CRz7td@1+w{r5wSk9_FH35?v+k>HW08rZ zafsTy+t`Xc%Kr^%+T1(x?6X~_I1**qy=_j@1%z{sO4OtVkj~;w1KmY61C!f6Ik$S!Q(dR<+ z@@uVT@zC(VhLGb`gmekIl(6=4@q&P%ZN7Ct;1h0pcja7f-WkMw|2|^fDQb}rU=Tb~ z#Sf7$t>gYZhNOFl6F+f--uKk-vG>oYF09vC9)>m8fO${`gUJ-&npdr_$|WzY1WLMo zVI*~p=(ULnv|kCj1YtW~FXZi9Lw@oY*PA{3C~6R@SLBrPowW^<;fHpkyvP-(Z}L1` z=}n&UJW&;veH|Y*(EHuJ+~3 zJ+CR}r7<4WRun%h)gnC4NUI3@fY+^k9i>07(Kk5OPA9~M51Dt0eQ9~8_+yTUezq7o zZ=Er)c#!Oaak{Vep3Se^*WoEaDqO^e$B2LjDd-uW(_YqdT6=ij61%50gvV`3ox}3f z_e!Y<_xmnozuJQo=oFse#cs%xz0@pp2Rx*>e~Wk5c*M8kFW*LA?PQU}41^2^!U_F_ z_dB9mc^5O*oSI9qE5*(n3GfQ_6LO=F@d;c*_RbsF@N)`(w-pcH+zH?8yE-QABPfgVk4&8O=&v@2-nqaJk=FDEl zbLwW2L#t?ADR^V}yDvdo`jro6?a7M{`r5OH9nrqcVbWL?l@=9lrhqbNSZA@k!#8rI z*F|)i#!^gc-6DvGl>6I^|C$B#W%WQf|L~NP4!{J%`tt{)MdlX8x88sy`U@U; z8zIJQp1Lw6o#C=azU&|jax_X5-<#i7Ko6{L3aJ;mw4VrDK-MV&fds_#lYxs{s zd}oiWG04QveC=IISG53ST=G(z$s#YUH=L|HQ}@haXrdBPNVq*-{^T~gAHyNuvh!s1 z{@F*)vXb}yST6gM=d)iIXs&1WU$`6GGEDj)Be3yi;`qvZ*BsLW0)op*)yuiEXT8(p zB-!Wt3-^Zmtv#OWHZX;6PNGO#n|(>cPs#jy2w z-=BkdgNN(!fsEOVpaaI*vFQ9E&+ax}b|E$FD8-KRXXD|6iD-{ZHpYwBT3`bB!$%*l z?m8fJU-rSq#)n|_eDL~0RQt)BKlZ#LZ}GSeedRA4A?K3RzDCmE1S4Sf}(DdE!P*ds3FSJuWVlPmUD;BQ#$Fknx}CbdHV2QNHTmGpBX2hYbVX!nBAPtRY#yX z>jBQ?*j{wwQr#VPdi(CwmVEV;5WW6t^*uG2_rPLP?O+bAJ?2#g)76{R zm->@$T}pFN$MBZIQR>Qtu`grrre7EiyHZ~Q0T>t+TrUR7F7IR!i7&7QxvwAooey~({{)NF z4pQ~Zo{%eHbsNsT@16wG@_J?F^{syjSH7~&uhpeIlke|S4_jC-?+A`j?aBqu@!>J6 zp8|J9p-7MHH}+6d(LQUuAZME z^gT5fKfA``!$W_)`VJUAGv)(xfurBNKxkmL>_Un@ucz;bDF2tLD4y(UaP4R^`1ZtJohCSansShFP#38pOLk;FMVa8avwHE2@~;-)MwI9D z#9q}Wn-X$SJD0}vy9NUBkJ%R+Q@B1j6&ZZ~?AbUZ4t0~s*j$tWxRWQvleVOqB*dfD zlo~*CUpy;VaV!vQUwrA~gUlJS%>=^tVp8qXOv>C@p69PF4-&Qk#W>ctmHHHU< zOSOEPPBZ1;kFbwd=x*{BZ|wQJKzXU+nFqqd8UBDW_%dEn<3&?BUOG&$g05K4sFv>O zyWy(<(eA~E$54Q?DKh^LWqtDQx7ZDkmk(E`IjY^`-US@}q=z>j3 zf>eg!js`r^Kr;;l2%31HKf>@pGt)9)*?=ui1XwU3vp`8GN~Eg9ELN9IL`M7icl*xY z&u_n%RYlT(eO}zS=bp3o+H3vRukRKdqh$K_CS|;bhC5Bv)!z5BICWf`pbc00)wn_G zEkBF5m|u($OP;yC^>8w%6vr`^derciQSwJ%p}>@d6p6Jf2cKo+bZsC`d1x#*G!L(n zp_BHn2m9GNi09TOcRwR-OO+wsoBUA-t`>x2TaGU zAJ8p$_Gl>%Be0tf7}zC_0%-|{rC%VU>y)0?DNL{HD8-BFtO^GhZtbJj)|K%dg4?En z4;tUhJGk2zBc#JEU31p+gey?L-}}9kw?llrNo)njt?CsvGNPyb^3ARA`11r|%5134 zI1>$(hIg5B2>vduJ+&2XUvfS%5O>?Bc43sBX_FYA*j`JSi2T*Gy*U0fML+!OT4!@; z8BGy8Nx45eD}Oyp{_;x74)BDqy%>!17XJ*!X?Vj&0{Y;kdK8YZ;;b_k+C8hi__R43 z4$&awY|k0qTKd=7ZV{lI8H&3P9z9rH|Fq~ZMIl7Bw<31v2#u*#U^Fk0fW`vk=xPk_ z_RQzwi3Z7dvahX8$3NK-yxfzj6&-ky#puTi-Jl~LXdGT-M!J+2$0S@|3_I83HKn5P z`dUwGv(F9CR|_3cs?nd|_yK-57vXa;}T`%br_Te`Sq z9oV4;IUHV zkNWO7{c*SP_tI@ATGSHMS-q&k;AX94cQY)czJBrLv(?TAbqmUQaz?h1lRalYqrlk& z*EMc=U8Gh-eDrmAeefT?{tu9T5;`5BIDIXbN3QFIwU>EDky#H4lO&e}BtfS~Su1^B zL)MXYX^8F5T)UKN$DJA+6>#^!csk9P(X*?I=>zHq&F)rljlJDkY^1TD2KkfYB<9h} zpqZ8**T!iUv#M-`;`RhUqYoC01rRkEo7Tr21CmZP+10v3MF;DJ%HQ`ilHGRsqX*lo zU;6I1R=@cDUmC9Q;?>jD@BH>}uYUadf3SKU7e9RwfL$3Ocd z?Wk_BQ2;oIKi;juEV$8@$71VxdyPomdH2rh^RJ(W*-N7Ml)YLx`2**w~5n5Q=mhZAw&eQ$X$KSu-d^%LNb}RdZc~JDjS`wuHLfyF3>9mV@^x%Fc z;C!>ud}r=I397X3A65z6anikeUCW!gGiC`Krcs8c4H3Gno>Z}N6b_!ze1eO)59d!K zZ;Ih7d`t@T1#3l;J+~;A#d_{ZabMg71-|73I0cL8rWp5iv@ko|82adXPn)p!tV=k? zm5PIncI>)7clgA>z}!uTlN6t(llyMF>#nmxrV6OJ9+seaZ~KE}`uF=wE|4MsVE~q7 zV<7tA+f)0&EB!@+aoWM}VA?gEJ@w{#`y*WNWxto#THL|`0m-_qF@nYjdP-icK52fH zg%odWUhU}%_&xtv1l+VqfgZoPekV715G?J6PI= z^?C4=Q2pe|lUyZf27Q05cV~Mvp}@j5yEiwFctt}KmaR>KNFRmp#DrwbU#$XAOC~|i47m z-GeB~cc0J9RgcMWfw$&{ZeGM_6ZMDZcjZDC(N7!Yx&H3I_>293E1()}c~;}!w0zJOs^$49kZq%nqG-0n>51R`bD2HYvExnxj z_;Hw3nDIv5P1DfE!}`|p3`jKQ)nU?{MUyHYdvwd{`<{El?T0Uiqv(wlvo#mXchQDYZzC1Y7Jg}ub*$nGw#-NkVg1AH!C;ii_1Il zx8_%sr_l9Q%<@hl*yrKzyF+oI26r1Njrhl$=Qzey^1z>HV+F&vKBgSojlLV zz#}YksqQ7S(?EjnSg7Sb>W)B%IS>EN%VWj=wx?!%zcL(=aUfp$;C2&;Afa1OVw8*{o@G@R_n$=BP(>Y?1smAK3 zHQ>YZS@(~nJE8HurardEtUuMv6Eb{5CtDG>1ABOVPSU1_?r2p|qP4=i>&XEP_n|`% z!f9x+jTD~q_=KSNPx}Uzr^z!%#^U!lFj|=E+*=u zCECX+cys-2P3U$*0Z#7fJyuNggYQz`x6)B26ceooybl@_F1xJAQKx~uJgxSU0&C>6C)o?CXG5li%(7~3x(Z$O#{EU?opF7UVU+^J5(44@X-n2`m20ug7{>grHtGDlA zn?_UDAt#PK7CF3S*}*AkFKWm5$Iv^gj~t>a;Fp6BSq<>UVHWrEe!p(M@FM#mFJN-o zp7+|>Ia)n0mx^rNO%~m~lg`$>=!Im?SYqLf4mc@a({12YIY0E#_Z~kkr6Zl1zN(d2 zb4IhQwTmovC%10*85nDO8EtN@p5`uk)=bJv9|BTVZPMmLyo;p*ip3mPq2f4P$|1Mz zm}c!V!f>?>A2X=MdvQSxa=D=Yrkw(SMfl>X7Dge}jQi;5i`8eJeNsQH?8XR3<7bOR z*eYxgmN7hJ2)PMzxul9UZmy0NO88*t2W{pUER^j2?PLtZrdGf7-tE=@=zsjHt6%&J z|8hWCT%rK|mw)YI^^@QFKd=7p|H=Pt^@l(DGG)KQhlt>)F)t;lnQxe-Or5|34}y#N zzfqh{Pfq66&l_o{oU2$AD?fhuWKK)FYy-r{Fq#Fn37rs@a?r|RIE6*_Lxk3VX&ywt zdwY(LMT~8tJ|BODLUo|D6)2Arxksywy$IkyeoG;wk^~>UzgLxUF0ebDA>Q%72Zxk_i^2f*rJuFC?Ln2Q*B?|N z^r-zTgwc4I(Sm8e*Sqr4QoyzRc zG1%p^w?i-2O-rAI0VU|B6IGT%hOX_#t{BEHq!9m@&nAf(lTHC;P)y%5g0d$ab9>S5 z5^@tGPZ-&2N?l|TL&G>1T$x;g_7>5Zr)OvyyjMJwfb+A9k@FlzxZK-jMEP~nQdcDK z^!ma`QcN~){=Iv@`Bzq}H@N`&!bTYaTC+SkD-k*2a6fnI?!%5muGiUa)l>|D(gqtz zg?7xl1b@$1en8F(rTD}O{WY_gs~Bfoynl4;FScHcIaPld3uCO}D--cL-2bXJ6t64d zsD3GioU)p*l$U1xO`>aKUcr>9vA%XE9Brg$(Q3y9Q@me1se)cgE6gZf#=LY~Ow2^;;!+(v0>uL*d48Ye`UFpfOhCu{NarW4%0{NhD!m6A~7 zHKYr-iG6W_M%`$eq=$8o<3+LU(;N~L)9JG) zoife9^ExgE(SB3d=Aq4xKw#uL1Z0efXdi8NVYDAihX)h1iKPBEwV5-T=o)xUakQ(8 z;b*_+vgQBHZ(Nq$k0)_mEg^*VvcK)sD142jFQE?X($%f&jYLHbP@tarb`bsGlP z-s|dC0_=VYWD3W2#VHgNEq@k6DFr7yL@y&)!KQzV4s+oE(1&yQU`plW6Z9<5+0PgI zx@X*fZ+rFS&+2^$#J(wPxm0t)i(rro|0<>1_-nb>gaz#9!9jVUDbcs?yc;jbWkP-= zJC4i6P|+U{yRQ3_DPZIv6-DJ_rJ@Q=++!V8fpS_f8Wmeav(0I@ao)>dd)VCH3tqQd zJP9T_j21xhoVz}8-)|D2*r4Q^)tK&1Iyj` zetqE`-F>?h zksCDF{OMtnLly0;g}ckKgC)ZfkHR+={?a~1CkzA56S}OBj{T;XF9&P{`(zTQ2aTqwEPQhwoN?Y* zM=AHJbG2wQzq5q;aqjs^^lyf9QfHqDBY_u6S@RIB0t4>2;M>n}z>wp?aq^j(t0EZi z?CQ}M$S3(acS`9v>Dhsj^y*}?m+4@Wq>IqJzq`9sfZ zU!L!?!bAXjhW+YjFT-yX$CUN?i|9*S8mn}Jc9 zL>6gzHTq7%a?SrS1p~=f@cm$X-^~{K)qB+x@E-G=X4?TlGiqL~@zxiw$8>#<5xji) z#p-9D{xm~?qT2?A2ws>pi@AMkT#ssg5vOO&qhg2@C5(>eZm-0#!z`^GUYZbL8d&#C zRp@{AJ0Gn6wg1__wffGV`KvJ~3enpaVYjim|M*u{|ARmC%M&X-OEEq^dRDAC-!Cr)-i#6LqhG93a)0D{T^_u24HiCnZpq-^)XbardOu@>qN{DeVcPDaweO{r zRaH6-=@(;cF|FhUeV);Ayn0d+>S>C83Y5|oACth8^WKFA_L0Y51#2UOBNWo8ssBR#vFIf<6H=3;7=yKC7`AboPNE;@ zc@=}AVLXB3F%2i=KM*c(bZ~RX-Lv6L3}Xg@JA_dR0Vdgn1jh(K9iD)rx)N^%pts}lJag|9mii!f8H2w>FcY<4?iq9^I-LUm8jo5_%^qCzYZf=L_oNTC@LNK9di0ULVlQo6m{?yjcCD zUSwy@!I1KU5PJ@e#FG35-uClwnNdyaGl zK9W&i6(Nv>EgI21@DQd!Q=3m(0CpF zeDPJrOZ&j{<{IrdI4kbmuXR;4Nb#jO5GF5b7j%-w23J54O&w$;+^hHOb-4MW!U7S# znNJD~29DNvb{X3o9@F}#&(P9%v4+V5PfB)+f{BWNFmn82#%q3_Pq5*;KEp33>cFiQ z(l*TS5m7MFQx&Ut$RISH4!2t)zBx34f3}X@yi#bJ5E(%>aBxlEq6N;L-td^c^=uM+ zcru*#nY|#`&<95a{^3Kt|NO0E1WwJMzQ_wVN!49HhC}0CN9U?_J&RO^B9x5tT7I1s z8D5>$KRMW|{(bhO3h=&rUZhhm(D6ve55)O-@1~tppWA;`#l{o8h81s(wzs{FoAx}D zoZ=O>-p!Jm6$nV0cG6ayHXM}W5Ugsy!Z9TRU@VG-_1O>Yjc(sNSiSqs58Chd%IoN4 z##q~He7`;O14eCdc#tEe_&z#VTd!BIXh7Ss5uVsLt)fIg_Tx+M-91=+tCRn}pQGm+ zMMNH6e_iK_@&KZ#^Bj0r*g*q=?w7c>Rm;=QLaqMZK=NT`mLJ#sPA5^>P$2 zIK7cUVPEu3hSBF2x-8_VvV9aR#&ST>Ab+mUh>_TTSpvI;`D@#Edcu-Sa#4=!d8bKD zE+asCK}z0Nh7kPdJ&&k;miP_Mb7{1=&}7%O`EbpBaA>@#!?P*KjTsKTj4n>&yQ0sJ zH-mLblmjL%DvE-*_Nx(^GklMKzWF_P-&+0d!kM1;@Ka(qwul?L$HT4N>+PB-+UV5a zOZB$5af2@7H6NX1?r>kv9(ggGqVLK2bfP@6E+{yu2@d0I#*Z!Hq1pcm4 zMb*!kUTA!gpLu>V#Dd@>p0?k?2H$+=Zbg<_t|g0dMm&D}QSG3jyjY&3| z0pCUk9ABqT(-tKg^9lJ`(&Jfb@mneBbMAKFaTfDY3G<(P@<{;HN+1slLPH|~&1P75 zJZ_M7!Kj5;CV?+NO<8nGv^gDwG!!bS1I@4mvW}qac|Z8@*6M%qzy4dDy!D+C;DPf` zzM8*%+PeKKtAFKx`irZ7>DPXJ^^|1hvl{8%?&sxg)t%ucv9_3NkdYG5<%RJpelj}tYI=(K%)5P}PgJSU*U0eDK z-+zDg?O)FOQk#!=9^D%?^C04W`ue9jUX0gfLA0RUnzPeWL+>Uo?uKC?&vP&3MztO$ zq4mrkq*6X9eX$5bNxWB`kaLl@No#_O`bjQm5CP_z6K&Gmc#ewA@NC!oU*c+u+M$7daqF6cW?idzIoJm4;sT&nm#!; ziDHk1-s^YDMzHRQ^BEJucd@Ac4}PfO$NlYdik`HsXpE$sdot4czJO!LjHeJ>zZaN0 zWhLdw+VG1WK0==|)*+C6=9$eTN0t^$or#&Fbm`rD`f9Bx*B2Sfef+ZV;vae_invI_ zp)dqVgf)$tCnX21_2Mx%bN^n~SUR~A{bXqLH~tZ?4sT-gd8#mM3EHBXeI3jyikTF= z=(_LTd;HD_X5sc>>M_tKMH(pl_F|GoyG_#OUTwEd5{4)7FbS_tXfY2_LcEMn#Dla7 znF`&$6ETwVMTsB7uW<<`kt0W3!Y2wW6#!`btJ6uw|2+0VfuJ2*}Z@Jzx$E$?sLcaNx(lCte%~fEH21JeL z`6b|ct~3p6g9D;o=C^cv$m19Bg zZgznD3A-1!61IEKSNC%`+_^2)EzBGRDw=HX^a-_}mw7aIQs8+bMIadV(h4r( z!DktlXBj2Eofk#U2~Ow1_MP|cuRi_k^W<2Lk6<-vsmYRUbk-i`^6Xwp=hhrlvtRMk zW!~wXXfvv9ui`aDwyrvwcTSZI@D2}=I!cjRD~!KN>ALQRnB47vkM;G|lSAj7oGc$T z-lM!$_wqj9-Y7brGwL#>PvY}M$6%j!^3iGY`blX~U!jc#d3BhK*?p&K)>3(U{zg&N z^OF~yxCM8c6(uFQBO7n}ZQb~mwV4;1TceD%DX%9nIauU$30$kat&f^@IJ@>qWAo5c zF?&I%)+Mb;LDk#WVr=GeDD~(no_#azhPpIb9wR{jVQ*Fg!3`9+<~rpoP&K zY1^QZ!xb5Eh$y$S&rMO*)6{-n#S5p%!gE}gY_s2-2J~0PBp%K;~#SL z`DSTPDqeZG<5`VGiPa6n3xDEJl4{Ih4S@6%vVp@^%Bc2!4b(n`wasCS%btz)=mK!u z>tc9DdO$pvp)e_%&5!eu4r{+$dXk|C8?4oVR&;@Jfb^Yxm%cfU)^LIRBzxeaKHYl4 zPGM$ppG?8)CapEx1y?QIzWn^FjJ49^<7M!6r{Vy*g8~80hS=d4IE?5!+yCx=_NP~? zB^RI~&R?$A`hy2|>poN8(6gLt?fdRd21RSTPTzZdSsHqgjqTuZ*)y-3!&Yltl;$?}Hnw1JqC6DOQgTx-zelkgopv7AyCFT#4&m$zkW{KcHcOVhK0?W&U?35 z|80NFj(6{Ng5^#K_$$Boc=c<4>wnR4l;7yiaAZbAzyai+`KrYJ)-SBS`{(@puU1b! z{X47Q_|N~-)&J%H^#54>_NN&@g?%X(S{zZ-%|q`(N-4Oo{Ph&mOnJ^|4pGg`Qmk*S zxw7Q#}UjlAmApqvolLC+YXUlc>p z4gyIV3&!bKxqVMrQ||9tIEru_Pin1kq&A)cMo|d{Ap%c_H{y zg7M=m?%^;Pem^ptP(o+8h9bJMeS z?RQEHl1@nJdz1(NR=K7z@)7PSZWwU_|78_nwPwXe#zU=I>ijqyuiszXcbNG4&E3^M z`r(fgwkg&x^5mA=JYn3XW}GGDn-~dSDe3yG`Qce!ntD?teHTvjTb|~4n!*S4C^Dh% zo}5T6&NM2ggm7XAPczQY=lvKep*#(zTji@NbcOs0NrK!fc9L@6M#oGk11cja#3L+w zu07LgY8({tpWO@%IAT47qm-jdxEO3G_`FR759Q=-yA*Swh~Oy{>h%;tIOTs(d8f25 zJe0R^aJ)UShMB|QOcys#2A2*8NXKjHhxF=ezo{VbaamV9KZTNq3tXmGb9)QE1Q#AY zQLmmqd9Eq#B4Lvc&T-Vf;4N_DxX;m`h|thwG(uMz{yxv_GCa^)`i{XnJzp)b#W9vn zGuFVDcT&GrM+nD$@H_byjS<fw+e3x-gF}g zk490OJ@7vK4R0uFaEx;702dzoFE*Y{W$I*Ogh!|03p|6HxD|!kScB&oS$GMYGY-1( z`9T>8V=M>LIru1KCC~eEfkS+2VlFL?ei0*O#~zZ zf)}1we7xAMnfu`uY6Np|X)odjYo!(*WZ-atbZI!cSv07zwjZp%$jRs!dybOz4rAbK zK%e((8zf!xsFO!U;||`h@)Z5VZaB!S|DY4t_BycQNwu9|qYHtwKT>%;=6%=lwdr}3g>{L(T&=}WZG60^M}zDU&UGz zgLj-c$IqhZqPn^|Y@p5f0Q_LYiUd!cRPZIs&Bsebl#`C9UR@Y~ABVH5Whv`m3U*}r zPKE{jO7@1AXynM1c-(37z}~=#rZaoM3!hX-;?Z|oM~b=QjE!%;`?!9~_f`k#o7w}R z=~=sh{1+eJeBMsHZetX+AhhNjceKM<*fHBg!~h4fVE$f&6|ae0%t7WplauE?=4K3w zTs(pdgS;q>1xZ;>gB*}gE1)K(C`e)OlDn7y?l}~yVFZnhC=hGQ@7{z&fT8RO&+@z} z#FA=c4v1H{XupkkTqx{Mm9Fwv|BYX-DCY4Xgi&U3jrH@l-t_02PnOSreszs@Klp2_ zfAxR)?bT;L{`;$^|M}7CS;sG$jv~Xgn4Fdm`d3mY3?eK&g-Qtw8{~NqF^qU)FqB3V zGZBg%!O(qrrBe7A0KE9SA;A5671~muyG}vfvt#pgUDa5=Q?+M)EG&70H=X3OIyhfF`XGVWqt(eRd7so^}iN5Ol zj1O>&Xefw6g1jg^Cf2x_P)oO+XPfIziU#5ZSNq2+pl6r)j!~4yMPpvT*`TW* z{kQ`(zNj9gX8-c7c~VE&vacjc{|&DQe%eCl4XPf1FtGQ2D2?`Px5mtl33W7i$S@uh zkpy!(Z9m3&(L8yiI1PlT9nlzK&D!9P`T${N44M+|g|8i|w}_EnBoNf}Xoh`}L1Kzi zEI4zC-7|JdL)uH2QHazjQ2aHGCJ--6bCGJNu7{_X(n9gXf4gCDc0%^S4Zd&0!oZWg{h5cuA(dxm6-QD<8Fownh z_IebW6rh&T9;Ey<*4sRP<7zj7&KBMuy>+wns6_kBeM#%YvlZM@exnspp^OUAN#k8d z2eXgxu~XqJ$O(~X#i>7f%erneIe_M*vhc8;R!|%!)wn;tk-(KtjSsA)%&li6)Y)eB z>fY(bnnDt8cuMFz?WX^&AzPG;0?X<5NzmlUhqYoL7f%TA}BUixmETboEs<1~;4%1!!Bf z2ih!pp?EpA)7ske6u(B`ktr! z3@;zvnLNz7XDB)QPR2u!_$@_KKg_Q>`rS#?2aa^E)@w6G_#$4)yU&}gEs?pH!9?r2 z2R;J>AaJf|(G+V)kBA=yCr*ULpe0B847@C=|K=F>9DZlX*fF-l121W5m%$nGZ(#=E z;3&;GpU4z^(Q_A=hwq+`xtLc-IEG4p;R9xCytUFo*Wh;siuvLjYuZEwkIkzhNa-DG z48ml32jHbRm->z@Z>KZoVw`akKlI=kpzG40BtH=-@ z;xL{i)h8EY_u68?RTLgjec@xjNO#dAI>)Y00ZF?C^oTp?0on%rK zgM&SQ2Cm`EnaFUs=rix}5nF1Hdu)6rgTH5tf*w6NB6+iMx9X*ij&9X|Ra6swfIBVe z>9Y*|p0!t7uR*6VV9Bs*6-vpX3qoK0PGvc~e*H9!s(X6&)6dgOgV(3gtU8gCPFUmd zcgoq_I~fxkVLf+r?;Ho{14Gys0Af6bC85qQd1JdH@ z!~_y*J%n%~+(1FodtNUoI;iY0Z zq<9FC$z7vZ=#;Qkm!Vs?@3(0c(xyPjrI*tQ=9mbtqp>MFTM_eS*5k`(d#jgEey^@G z6-Qp@Nv(M2{SQBmc|537MLB;J7z;`5P%mScP4I`<8iOtpo zUpbB&<;zbBSa|0s+VybuqVT!U?0An58X|qe-|We<=i!>6VmbuYr9JHV@R?$4Z6hdp zUg9yDp{z6(xOJa-xQF1F>znI_^T>m@2HI15&_my*O=u|0Fd!7WJz7}=&M3TrvF#lN zX(VfFnKU!c&||dco?AafY37!YtPkj;)i+~k9|aC`&O!^mOTjL>Fil0gy!Q!>%&@i9 zCvW~nQ{M>lE9x@l+8_3w*I(X0`13#cKR?J%+AE4q#$2cW92GS@KF%w2l-DVoqVPp_ zjU6sE-}CANj`O(kgj|JJ6V)iU`}(YdU^~EKvxwtb-pr$vnos}mq*?%QU=)@2ny^=x zh0b}Sb-;O5fje4rf{Uj9<4uc8E{r>V(=!PNin#P1jGq@s+DTA|iRM+KNQVRZcuL_D z)|P&`|1< zv*yWiSPAxt*B#a6YjSKU@V!PUX$PipeGu2Xm%x}Fn?tjrAdB+8L>t2=iZEu6S^M3( z2%npjZ$jUEx~k88-LAxMC_={3Bp!YngMo{#%yoHnm2tvzs5WEv`!cwjyZh(WI>&3< zchW*V(KTLc!=rkBdtxqeoSuh=~-trQ=Ql+E1;BcquCb5Rq0 zFCE7&Gi=q9i6S^zNXpf5!t^Xh1mz(iH*;o?Zp2sj%SERM9W~BnO12Jl*J}#%QrJ)8 z+gEjq+t5X;^o`_(9?fZQbx*rp_m-R0_i{}ALJ^Gj4;ybkzPKa$7G7N!y*rI=JC+#zQmU;p=4t0fnpRG^$uFDyFQAw4p%#^>Qf%M14%f}*<%c*33WdZt_k z^)Bd#Rn%*a(`_YfSIvRle_#YU_ z!A2k(^PQ|dUw^uK*DfaFMpS zQ{)I=n`jsu>_?4{$p2d!FecT_-4tEM4D4e>pKwmqBU`!*`jp0J3_5|F@!e$i&}a7> zVWBfLNS^c=J*m-6_$J%}1Ny-@Ipag?DRK92I*xnycKkvVgL|i_eMTq3y(nGh@Bsx= zL2dnFi=ORn^*+9RUi5swEC#7M_`+5PKE!Irpil#IlS6Z&S6F#4L0e|IrddZ*a?rqf zk?Oc-yt3nrOS!v@r=Yp*`kCSzlwWJT_dww?+O4QMr_D{q07vIW@TEvA3f>4$c>iCQ zcA3|xdFH8){^Yvrg(+&Q>Am`(;aOna*lJS`qCu}f+~cE@U?YNzBRL3&A?zf@F@(nX zT>e2i5hLrO9^nDLrVyQC*m%=uCjN;R;@^ApwCfU@}qO&z`zxTKAy=+;pLg>Hu zTmQx6G+$MqD?BLVXpC!JP;i5o)ClYl!hL|O7{q6ab{SW^dAoU=Sb;|oDVaNxGFlYq5TXf*X*FR0!`dMwJ>U0z% zJ*TctC`v z=L8V>y9p15se(X455pd>OawO8^Niqx7NeqO#jyCO-|zLCaFw&ErRRe8z3DTII)P$6tAE@NLqzAB zKi4yU(BD{x)&7HiI?!UiFNL6_i_UE7Gckd@G=0Mzb(&&wUbm*pGpQCJlOo?3Xj6wA z;Z=8(d`@oaS;FwDpAez-Q?QDVRSUvv$r(VnZzg2@#?LTzicj*<#~+mOF5CpPI%aV5uebkSGb%Dn_@N#?*EfYpsU3wbc_kOvp8d#{J=t5=8|t6exRGi z=Edqwv=a}G_nI7vB;Uju###1M<{!?g-mEkmn3sC?wW+U zZAW8_27Apxg@-#W@wy4^l&T=@M=C3E!{eR_P)oz;ZT-XM(^&22oBxFmcuF|N^PVB3 zW^Rp>$)~{{kG1cir#qvkMfo?gIQ;CT$dEO-e(Ahdw6GiK2E}svS#*QZLAw8}iEX9S zj(1$mL3{t@&WqKfYTM*JleJeFE9pstS9B8!;m~4$oeYK6U=GW%@$F}FXfa5W1@RE+ zrc$3I+g=Ds*!QhfdyS96rjXp3ACMR6J`d5@;FVqKz2l>hc5IW_dtdbJ^CBMjX| z*NmdAkb*)e$~PxYHl|~v&x6BtMyc^QM07< zEW8-6$_yV_^0Mp(d>0POVBooz^?`rx=H=EAM_R59jQB#_CceU0=OpR*&4sQo5vjf> zm(T?#bbp3J?Q@DS!+??dpiVuLjvYU8t^MM(5ov()Iz zXpDf9SI=^^HAH&ED{MaaoOF-K`$arpyZvc(83Jdkzw>whevyRNt6%*17t#SmC==n) z{cgGj9lX^202ff2e+#47LSRZ%tN~;0bKaiwh>`-p%C~sm+Zgv!RE7JrvOIQzQNZ;X zA#~6f^7RoA1!0urc1Ku6QG~w{XM-gyOuR8|IK;Jqo^KwbsL7EB*pz-kt?Mz1x8cj% z{r$ZU@2&paum1Vf_y31~y^$7DSpMJ8f8qsYz5MpvfAWPjViWa5dHQ!&zxQAN=IYZg z6Z9lnlQM5g8UcTq5rfGr1dJK}(ReDv=S+asqF_-P73Kksa)A%B0D1M-3u!)jr~IvV z9)yf)96HUTNC)fJS=w?nc?S<}={ynB83d1E9NLp)M^TD{JY07=x>!e@r%yjmx%#>s zu{`h!_sS)Ir+SE;x>G3N+?E&H`31bhj*2~5{nsgZzgw6)#j)=;D<H6bGxly}>F zk5}(^2EaR=?V$Z+rz&(mVy6KNiS$|FOA3&#Mfs11RYDNXlZH~+S|Q){2yc>3gf8(zz7aw_Huhgi*6t28iFy~ zGlG`afp?$MPhe7b)mtp7FnutuSY*E$F<qyiSuz0Iw0aO=Xx@-^1&#Kd)70^Y6>P$n(>=^rtaUHVVyui~A7ku0AX_txTNUJOColG2_fKOn`6#wBPXrq6M!li9Yh&I^cWg-Vl zx)5ehAP^9JAH>I4|N8EG0l+G=@y3K}vbOli|sd4ag zjsb2O$YYJOBP6+Dgt!J?pe^{+-wgEz?Lza8=Hs=BP>mzS$LDBbBg1>T55>c^G(^LT z!CSYJ@G%q)9wLRwnH*X@wB{n`tDhB}erLT}x%eX|0OMqt2Nkg4!%MQ;Kl+Og=lJl( z96e^{W@>%WoJYxfv(AaWHwHX!l-3x0;$;)%Zob-udlt{_ezyiNTy0@9-j$Rg zG)5Nm0D8@)0`uk_Vx`2=<@#Q_=8I0QqWFr62J;sw@@;c#O|fpRiT1%EQNxYeh%4v@ zCo(`w&ixFYn`nlP2TIN;T2B$CPvRf&itg3aCQ>{&5^taf?bO=*O08IP#Ea;WVEQuW z%fd?+=LKELeGK%KG}* z7e!V2K4*?oZSU2gZl{JIQm8cH*y_v^=aGQhtvXhoUmsS}R9C4Fqv#xPcRKz#RGF7F zqd5`IUlb{pE&}0!!u-ctN>Lf2X(E7XqbUDBHv-xWR{nN{lf2>wd9Eq;kMbPin>YX>djANxVQw)$#X$QLrdx>E94J-1G3b(?^Wwii1DPn$~(15JjvALaoJ8_ z?bmMXp|*B;6gH>5Cx%9W2w(a?X29r*ta?VC&(ciV*!oo$3Qfr?=clEV?y;Wduj}Ql z9v1VLW-1*@xs8PU^(q#`)u-0+Y{cvH7Ok!M9;mhU6%Yu`Jj7+ zWc^ck8v-}gd>g^etT7Tw7U78T!zThO9y&@SW-GiSB&t;x>hK>%0;k~t{PZ_(k9}G2 zs(X6CX$PEWQQPB&iEadU5e~xQr2KWvWdvX_awH?%-rK8>PCCGLM88q)cbn7v7K7&D z-u}P+Z~yQ9lhx|Y83DTgH?!vTliq><l<%qYkmeKPp4|za2?gH$Rp~(tfmhwP zcT?`@+F$D#3lpO(U$6e3b8S%Xnv|b}^;w@w-4d#vL-SheX%UZ$^Ukt3s;%O5luEhW zIxY<`o9**8bJ7;x8B5>iLpSHC z&Wn51JcWmS=x?i4ALW?xog5$zD${yY@9>M1*yL*bhU{oA5g)}tq(@=#QcqMuqT;^h zcMuMrNFS_lw$N5j%@Bao?MQY-8_^ONq7vl9B-#ZRWc{&thpq@g}hhtSppugqssh|3^Qm$)1_w&1Lk*-`NN4nMzIdl`N$40>oboXlJwS+S7;(6n+; zT$CDgyn37ht3GHWeBDf5#A{(Ha|2caz1Apct~=@ z^^E146j=w#kx|R5?|Z$D7oedz46b#(bxZ}az~d%>!)utm&;(dm9usV;d`#e3(j;n3Qt9_ zw0SmGn>S9QFTZqn*t6B$XHQlS-+R0|xceX@>v=7!pVn+6CkDERkA>wur^5=X{oD!8 zPoL(@V=?1H8B@AMy-IGrt_H-%>SwOP3woBTHieQAg)|<;2kk~b5FfIJY<;hT5JEOV zhQQ*1r8E@Q3`uyKmNsEAVsg|vTJ#WELPO}@n3xlb`Ps9t5_r6q%gb|y8MHxzB@yAA zzUl#lN{HfwKpt}579lu738HAN{4lS~pZ=w9t^U%#{?}IzKl4q?}E$aKV1Fv@BgQ(AO7&g>eE+SS!fYS8|RP(RvW|ulu&AUW(%PdMiG7d z)+?l!ame2I6d8oNo3g!M`VO!Eg9neY+U|~(xl>3M(qO`uELt#ce7SV{w=4Jzu@;5} zPH$JazWU}z4_EJfaKAYOV@v6^6%)L_e}DB?{>Hzv`mg@W|7dl5^|K0rUUv3;hDN=r z%kpjv%xcNg@A6LZ`)(a69%V3;?|BfTsNd{nhm35N*0US3SJxl9ecW0Y9G=CTpvrg= z3Lv-q6@KMO3bA86C!0N^q8r!t)LxXum#Zfoq;nAx+kcGOJ-#2}jTvDuloN)RFJp-@ zl$6R8MKvG}*}BP_d6j2o~3C?a66J60ZfU(NR8{FIkmNCj$b3# zgFo-#k3ygt*zd|zU7p|qmVBQoVUKiVf=x3#^Uu!X5kF`SAgf;J{JX@y# z$|LkL=Inc(xXU~}8F;p5`4eLBN7S$l>n9lXi_vtM!q_mwmk2y#o5NN{xw?(PVLS``z`{JbFG?5z%UW2v z00j%cES2Xlj}iq!*x3O?<8cc=naj7F(-MsLZ|Afqs(4ze0q-ZL&3?jkBSYQ%cMli? zebv9GpNGrAZHam|*$9nH5+!KJGzNs{hrkaH+rKT5G3iF#uYKlK1&bHQ!Mpaer{S3{ zWmhS{x>eyZ1B3VpMIe)^donIkCY(XAmUCh$#%|AgWM*1?K7Rk);d6QTTZir<_lge4 z$uI%u#u}N2N&U z;9>!DZ*I1SSLKM4UmNj&Kpkus-gGJrCfeV}W7SZ^vlthl$Kc0x zhI%Yva8^%uQCAAB4ZCTbYeh{qt|-T?qj9DWf4|=<*pwv_122W&WQa71pft}l6}rdn zuJqUkv!2r;JeL_s4xwBxQsV{Zj;_9LZt?4KoLOjcvaw|D`P}2c96XQtgDGm%1 zfdHqLI(TkPw!m+_2uBbsT%;Z4{rZ!K9c z=TP*W@9hKQm<$|atk1xN?C|M$X5+CJJ%iEDK<4lOmzx}W46EIohxns4Y(&4eOGSHG zS`cN@3BoX&0(zMvPs*x>5^H^TUNni~s%`mAj)u!;arx33>1;L58Y5(@sQ;v@#uGeK z%A~X)j$yg|=5k%mxfH4~&T3IPk-TV1lNkJFk%>7?G61%60Ig?42D)@Krx`b9doTSa zyKd^cbXSBoj6-#nu;Hl24|v+G(n*)|j$G3I6&>e5m6wl4OOa(X4iSPMC!CNFt}0QUezy5}gh=7>C z?mkOr%y?Fq!Wl&`!0)HqT60+V7^8hjb&`7Qx-me~uL8;FDf4$UNFFxC(4GTjX#pA# zh*V2q-0wV~2N?x-Z+AXVFli$3(4H5Jmqs(;XwO}bKdfg2{OBjor$+Zq_wAP=fAp$W z^PRb}I`|J?{|B2m{3M-60V8#SmRJl-okYC8*MeuP;3&6`A?;u`Ny_YtF(zde>bj8- z-E}og7|h+swb3V|tu?`1elOx6Tzz31bGY3=F|>xEl&+@TTM=sGQq@#n{7&^BlV|cO znEP25@292yr9b=6um0lS{8v}IcitIblf9ra6Ybf5^0iRivMt`feZBd7ZVQ@M|NZJG z|JDC-^_#!(2dm%uL#Gv`u_mp>tQMDJI2*-TH%>xYGaV7N7!i&l7YJ-yURZ^C(+b0C5}DXsXhB~h>JN+2|9;v6spx3hqcQ=ASLBeJ_2xx?b|hpc(+p<_2hQXL6hb%`9BrIy z!Ny>6npEGfe;vG}VX$IpdW-ADHLh>(f+GnOnXdRPfIsH0Tz$K1Znb;b z)_k|?=xnV?cwp_TzWb=_o(^o>^{V4ouY;i`(X0hKe4bYCgr~KBTQ@fu>Ix0V^+uZ~ z2`-NkEtfca``@+tx1F~?7x}HORTY<<`utffp`LQNmpc<34^L1$l9qwKmV-BhfN=g? zpW?=|rvBghx6`H^jB%PJ!mT;dscT8W1_^)cMRRSR@D(poM1ycHSbOJMAb}4myxv%Hr`)n83LDly2prXd&~fw?=$yv zsi7~x!k0*F`ETap8MGPNm_D3Zp0K1gxx)MB*&*BT<@H+P=+h~&URCWzIXOG5)5hd( zBjy{bV?^VDwG<1=qmiUHw8X_?jtk9WRVHsIEY@Nwyt+N9c@`e)v?ueMt+RJMUQXiB zOm57J@Q(+D#UAR!m*QEq*x0+bSXy_&g$sRBF7|4fR?=;GW~J8_qyldPB2a@JBtU(yCP%Wy~?fq zpt+`%+@!Ugo=V=$O$$fkdl%u@#d)pZnlt%G%K&roG7d3xK!e%N;-U!!WGED#r9lM4 zwQwp{#fWPkvnC4BETNx;cj@S$s6%x;xNH~FpRlxCAr>JkAlP|^!QqhKKC{WRpW-Xg zQ}mwJLN@w*!XJQ^rgeVYXRTvi5ej@*ax`FOpT|Sz#J%Rh3%0%fZdIs1s)I!QIhct# zowNt|&++k#EH5jM2WN?1hmY&LX~Uy~N!~P5lrJ`@zietBEc1TLQ_v$10`85LX@9gm zVR^4vSHI`3@DF{DHP8oe2(IE)#{GH4ae$xj&d^}{8H~`E95?@lCL0LvnS!Uh0^89P z+2wBu@Ag-Y;Eo1ZvB{jQcpTz1{(L&#s`w#Z(SGXBepO@EsW=YaN9`AWXl!$} z_Y?Ze0f4v4(YmSsHyprUPx=}8j5b-J!TPY1F3+-X?K_=hx(k4Fe9t~?rz4ROXW{S^ zK!I}**w6hwIizi_?{tMY?er@$W4t+i4uA2!-(_j>D&g1ok2ZUb3}zKgK}mXp17NnI;~)O-f3>QiB^N+Zg=rkz>2RQ5`sY_4 zK6-!l>A2jZ$~#)qviFbQdq0b`He}8JIQ~M<;msgXme6^0oK>?`TFS|*bSuPwV8t;^ zifa?o=EhgW6?j`@?7US?%%y=53gq_?h{;3*cnGC8l}5qNLf${DG&)lbAdKy(Q>{LZ zs|@iBEnr57HpU3T7ClU+%@a%GUfF8ncEnqIUMy@3f!*9ooR{A$nf)*R+3&6X(y#x| z19XCEUN*|Bx&2cwpS}J34Kw=rHE%v|4mVf7lOgaQum0P=_oLNs{rGbA#go^?VZ_JV ztPm0+gyst~2wAv@#Cp7d#Mh8VpVUpvslTsM{v_w$x%XbjliurOE@yYw+NjMI8o*Y= z*x+K*2%_B@0Kc1(mFuzb2bmgtlrZ>IQsg(@yR&-uE`#-tUT;^oeB!U3pQ8Wh(Z5k! znZL1marA#(ef9G121#mRr%2^e*jatI>$sD@*K;XJpY=XwtHniov73@m1g-BV|6bU{ z!WeV-zIhiToO=;zM3DipT!DFo8v`@(=>%VGmtLNf|B=95&;1cAN%;$|gwUI0=bf^M zx7WYBdXa+o=s^i}Xa6Ah*OH<*|Bc?CF!$P#%_%nR0fn2AYz|k=c_*Tje7!K17*Nc? z3b=6zn3GNpqWGUie25pkH>#q-L@``Rs}v*#$|AT7?wOcNjJdt&3E^+gO*p~*2{#N3 zLV=kJXoXMhD?@LmJgZUi23O#dIPM};Cyp1BIEmI?WX#;H7aw)>(Lg?BJI1#yuG_Am z1K*87(z~2R^PHss;BC=H(;leXGq}kh&>NPxi}|d6w{Mr-3|f8ec@&6XouB9=L&T19 zqY2^Qe>WxiuZ~OfKhkr~9``R;$e%hn;&y8<+E*=suFD&u^B&dqN#f#p2EuhG9-X~T znQ6aHQ-UR15tR1~N%Y_)zT8xri)t_H^*MTJ zuVRAWo=Va+6mQ>vM5&!0S7ef`RTC<&+@jCbM*TL~6NMV==d^uoh`$E9yQ%@;o7E5@Mk z|9mZtv$Rm@vevt_EK8`iE~o7+XBDh{Cwp{?K! z!<#_F7ENWWQx;A;8E)^cyr|%q@O6K1XJz9&xmfLX=*aPF1`p?kaB6T3Hv;LvYl^STIpnst z4BgB4%~A63tb0#xWrV)SZQn^{cOE)=J=_RK^{ky^LNk<>7Ej^s-TmZUhh_*F;CI2E zu?`TM?F$!j+;8>G+ncL*YbW-g+^F5Gmg{2L4$(M{J}BVNUL39N#2?o)SWb=)>!o~m z^+oP&+*H*wd^9!yg)-3tx!s=j2Yz>%k~hVBT@f1`IV&#;3fVh6Ybzy0PffgZJ>CU3 zob4I-Q?{}{5&wP&ZF0+yXYF>Iiq;~Hiz__p@-%HUTDpl;NSEnP48G72k!9M z{F?cwMy+**Ps?$^m&3L07jl!6wrB*F+FmuDaRz4a0&n)bH=oTj;7y-xs~nx@!_(*} zr^9YJb(b|q-74`O9-~)Ig4-Pm(_-6C&V=@%&D%QfFrm}C6?T=!m=VY|e0){%zt&Uu zO?;EiM?cxBw=~`d-_T&+hOb)sq`M?gIf75h7sBtYbDY9xAY9#Q4>&0)&Uh$Sy`zrh z&MXd^cuKql_aY-tJJAgN488;tp>f?&-v7pj1G`txU#^~ZQr^*#Lo#wpYMS3Xc5>az zb(}n%R;_b`J>2c!4x!d@d@MWK*x`uL$ddMeyq8x#iTeRl6|Vdljr#RiJ*bPy>kjm= zvBD#x5WhCkILFneBM zdH};ydg?sSW)X-O4~7lV#p#?vVgqZlw)*7~+5dc%!2jZ3|C<4yIpFnn%=zQZ|7MqZ z?~3KBKmP4M`TqLqw^RQA_Uga+yT7~o&EF|+B@bpH=haajnV=9tW01k3q0sT7kYEB? z(j&A9_&}bpprkbIh^xrO)}500-_HuWm2y}A&b&1Td6-mBdyS_*WL-Q9MGEdeNbqKv z=Y6CEKz`GI!<$wgKEAtp_=rOH|Icf?3Xy-kc-!Ol=l2pwltqyQzdvsH^1FD1YY^|G zs1e=~H+gIqMu-OEFra^Sg%bE>zgd_eWv?QSefnbcy1b%G1mwOH8hdjZQ$Nm&|FVQW zXTVcde$EghXz_3HajJ-_*w^JVs1!cGEOGqx&l(0%w#WUhiCRQbB6;&)3(4gtsF0I4 z5Phy{VK$1}Y2KXE#=KC#Qlt8f;^Bx2!*xWdrs+4GOlCdTq98PvhG)PH)0o-B=bIlI zfnk$yYpo&e7-tlr7?mgoVH<*Qgf?dT9}}f|!fuQJzD%`rRkdP?Yqd>LF=T8AMR}vI z=#h34-ds;a!0fzull&}W%gtK)wU#e7SBUR_)_Vq}YbdgMhf?|pJvm%n=ET6G_@jQK zyB4EAt@jF&eoc_KKYG@+PC|xPdIV57A`$=&;<}qLc1-BJ77h5#ayZArr?QX3 zpUQRO53^aVgrh*RIa)(1>lBOj0^|r~G{jrwIO;Y$C@xjbltW(9p)tHH2G8rG8Vt`n ztb+e`%5P{gg8g-#BQbE^E(*`^Ma-Zl;eEGj<5wY|n3|_4ho$snL?0J*`-!ovwKx#|{GVn>R))-EViq>t7uRC7P7+ug3gJFUM zz75_&uarJt!k`mXX~AYudJ>E&ItR5@+q-?UdhZ(vms@$SbK=McA;kUGbpJAAZYuSf z9KmAE`W0H|L>q49y)AkD@^wG;D5RLc#esda8_G11FfW#4syhdHsP@{M)<9Stq{zF$ z9vqhUGeR>Z`6Q3;@yj?)ny|e)3(wAa&U#0vIwD>!+T?8I)7b6$oY+-8R*3M*p65mE zG{nKx1f=<(7yNC>y$L6JzV~R+W?yAc%DrjgGnW^i9j|V6NQ%P^+?%62JWtUH?pNW# zQ~iyvQ?i2hty>*-Qj|_k#)!G_{8om>_d2-X(bnne!QN5*8ozAlvhKQfheFOvm>h#= zFNz9ClK$%Q!JG{CX^QxHd}*tbezJvlXf4pPiA@R*}ab}M35MwP+43{va;RmfXUW>5clE~Ha%)Snm=y*Pl-pCjR%>}v9 zv+W7Re7gnC!5S&EZ#%Q-VZ50xw5bhKFkj{!{uhP1by6YUPSw-_`zj}k%6K_acW=J6 z+SuvPmxyLFV`xcOw_ow!WZ&fSTz)nAN3V-|;~6{kMuK~uyO{1gO$lMDmZy!KylfQW-m9Po@D>b@~fnh%e=-u4Hz+L54- z=ZWHu(=HsE$VBtBKV&{W;yp@l+CvW_!o~|K;UE0y@BZO}_MHKw+d|)TPkc1~E^iAS zg#-%^Ze5~P3~!MnIZ=~KT;!jeJ-A&z>Zf1MK97;q9=>>4aYpp4-|p=qCbz4U=47}$ zucG_wA16DbxgzZ^%d&9_uiV_NgLI-`Cx_l?a{AQYxqH{lR=@u{zqLAf_GtC-cYbl& zQFaJR#N8bIXm#=`4>SuYsoUaW776jS79?3s@6e4ZlmfvZlP12r^aKoIey5&BL9V|o z+U7)*Sy$(^LHP3P`m6;YXF#(+30%ZKo|nc(qRO?$%CiX>0Lx(jglr040NU>NgEsnC zKDxd7mwxT9t$yLp|CMHFOIpMK*O$e4S&jF8|Fd3g&eacBKmL1vd-V_g%iml5!#^mn z62Cdh3wTub{EG--Jk248`FmnZU63XQvnUWjrg^Kzz&(71Shu!suO2`CW~o040cxxh z((g|x`og@Yr9}5Y7ENLAKYTC5D3vJOQhf*+DaGO7FpLLgXyIXp z{^vEYep%&O;lEMPT!*MBI{j1~D#`Oy1gPh*j!L3+_Mk6GS|3wx<-P zm@Xy~?dNS^xyNXF-gaYMCg3r!mL4s=Q4UoI4&*7i3l3O{YDAxdyWJpg7HW-gb`K+N zEdwsD~G z(LCF?foV^(Uz7qu`PH)oWxWg~wQ2%A6;xGOUdEuDVB(~;tvqwb9SwY*RlS)Ka29V+ zInARgCCVJMyE=?M9FrJmhW`}nZNANvA{~F>fk9srZVsSxG;DiDI1k?mSH?SOUsDLC z6;EqqgxQm~DKPrY?c|+W_)&Y`emHi!4^niti`Hq#U9Y{AYtg^Oh-=*?JvY$I$+_%= z{GEHZ+uzOAXPTk$*mMf27v@=dqOE7P+&!u4v*e`gQH`JbYJRK0v#^&r1nkteW#w5?|%Dc z_2~Vr)p!5&qpm50yjcP~<1-q0nO9nb;q}WD!|-$6{76wZ`MIKa0j8 z2_f4I`xoAK^CWR+^d1(y4eZSU-KM~`C!!PkB|e+qq~JlS#@jCyU@aI@5?(%GBr_~3 zTKcqPxCHrw_w{=%k-d~kVRk3(fXi+tAD-1B z?1BUM!r*y4W-BA!ai^4~2Q@%n58gW+rXu}7O5$-9?2L!41nrAX1kOQVT(z_Wlb23sevqv}xen%m2YNq7r%~I%|))(|g6*>HOdEmGI#^3tr_C$RK8zMcHND(468Zl#>SbCvf_Je$;m9?p=WXe{kzBc)aU)KbR{BfyY40*J& z@F6SVZ$6)K76Wx7 z1rKbbJ~Qsn)kcb+?g_5n>mj0rO*dL-+-T%FJ%o|JU{O4=)Y+{J1Wk#jGCo` zjxpbkZ{M!^btipfbARD$=P&kF@1^5>`m-2=`)Id-em?>nU+f#G@hfp2$d{n(?~4C=>^9|xlhuV6MuooCpzQRxbu z4p<7C%5MDdEWSYEqj?A3>~7r)R?WWxqOU*yy&NO)nj(iA znj$rZ80Xtnd6nlm`3^WA&c7A!D&o)ixmcowAeto$ZSopf(0#`%I~3BQEq;)jGGv!q zFi+N`c@9(tkoIbpBSWTe~`!S{SPVcZ(j>bdHdJG z82;$5Kk;ss{}u+{3(^!pV_~tpgA_;WP(>OeSij$Io@ES&H<|KSssJXKml?sigPL|- zyMb2U?}g~qi%>jR>U^)~3Dc{*N{*17YR5d2qD<01#=8=8L`;MN@{%+D)y0p8$;!)Q zWc5XGT-cj>f~gqn413ISEvdo^hn9Mj9?KawW-uHE9-{Xyyj8ts{AS0qnX={O7)`=b zwdmvx$3&)WMc-jqQx(+uZYXvXw5B?HSPg@ekccJ-5ti*hOi5&jQY`YoQ^zy1S>EtP zA@Zq28^);Oh!CwUPl8{H%x22RPW@Q->H)_~aaPo0r{9rsa7;`soo`gJI8I=z20_}g94;~J*{!{QTfSbowokg zmViHb6-{Rdo=VB6T5(_+txgI{Oom_gm_kB9K_8AX zMVXVz)%lgM?ePRfrZEhqlr>83U1c4Yj#sWcTyKh z&YfB)ZIsKsbEsOixui%k{^!7m6j;JJuThoo@f4{QO*pog@yB3~Yxt=D5euX9^n0($ z;*K#Ys_gj8lhtmKr>7}?_ddvZQZ(3h&;!S3OF{-bOM< zFG}-h+W0zU#3`2)NY9kNJBDO1Vu+oFSK}p&PF~iB)A-iNX-If3;~Vb0O*y$%m(jRewe8v|O?OLWbRKu| z3@_N*D^)Z8gD&KH?^gq}Rh8#n@|UqTeT}o=R2y%l127DBGD034{z9;H;!-?rtKQS+ zU$$nX-SfaO7%?#RH$G0MxIefj`lSAe9`WpH>4&dA8TzvyWNgGbUQW@{WekQ6-VASg zpMt~)CM)dX7?AB7kNDG0UE2r;??^?aWcREK(XWO*jYdYchESdjK6H`6?M5?}@h3&C z&-M6SQjmOSpL$~VsWpM4jN6lFc|9dedo#wk(VX0N6O7VMqZJWF>Ue}}2BbpzWCr7V4Zny4g%^}`Xw3IOk_cr?e)>)CeA|I#gUykDv525(! z2(q5cIFnXfh)f%;y^DHcZhRiSCadu#mDCMJE~`f6Y-8jw@);e&Q{w9mgxb2(mk*OG zY6HqCzf?1_-`_cS7)96iyLr8?hUSPDKUgu8Pcq7H-_DKo-tA;ty)VP#+v_o(ULzlS zmj2ibq4RVAu-KMr81C$KpA@5`qnE)Tr%bqMUcCM@`DkqO=(ggtYtU zw?4DR!JVFkc0>mHV4@=6Wk2y~t$yqehxBGCLswVQkb_G&c^=N;>&9{KPLYhoxb52T z?Ec;Rs|SzXYgI*t(`o_~Q02Y-F_ z%YXk@SHJ$z#_FS&S)7OKtCuPNJaP~^c=Z5{-n8(uVY<1n+6jw62v_u-_xp0qD(kaT zse^kh;^W8CR!}b`HcPzwlqzxUc)tr(-M#x@zDESs1)P?g-DM1Wd4k^k0-`Y(^ZxDg z?Ta3t&usbp!~U8%rbH36*0t06CHi_2%5V22LRK#NXdY z{>?wizB@6&5IT$VGGe9#Ng+7p9f>H6kcd*3Yj7(fKO4`1oLi@>g+sx8JoG8hQ>QTF zYXntR^Ln0t#G@Pq7|6->e)92Btc{1a&seojn2;$o&~N4M_1$Ty3Y3;v-0mSHkkI@-%SkvfwAz8|AxM7DP%9H78Yc^*sM_^rZ{`;SeT5aTU{s=rpi;SY(p z-hqHyQeI;IYPZ>7dDr}Jz7bx%qvsxA#DMtquKiYhy`R@JWvumJG$~hfomWzN&-GORgOV)&;c5h zMJa-K&TKgCUP^L9=(oJ$;mOqCjA;|z9+(>iLnJMrtX{kB!{Zu|5jN!ZR(4_Ru=UAyv=%jS^)}^JQZW&(s9Pnht``0t#YY@&(*HICOfMK?Y&a?|0vJ=tD*sq+e4)kQ#niJ{O5d$2jCx~y&*;G);g8{ zC<+!UA6%luq|uFlMe7O^c#vh%AgrsQLa^(O$X@O9#3f4|suGes2#}AX@$K{XlW7@a zvjW|NLhF-fe{d}Y?&H!qe)Z*@)kzHNlhSK={1`tna+Ks^q)b#LgJ$xu833)S)4Gmk z?}KtO@FE;<4Di9Rq4K?rrDmF^)UHg1c=N?}bxLbvBl$|H75S8AaeVwLBP&_o93zw9 z&>93+ij%S_g-Tos=FPQAuC@}P5F6I`$uR}bgS?Q+sGD1?yL_6`+`K_rKw?Uu}ea0@j zchLcc<#aC`^o*ZDD*+j z?jg~x@uopQGiqIvkJ@5LaY#yO6#KnO2Aq{A3kOb4TL+%!wE;1{8cu|z+!#;&t^Q@6aa&Hjjp z@VGsMcRKZyaROJ21>WGRlz(dnPo~@xBOxc?a*z#LkC)!-FsIt{yTUPNrL>M=*C87W zndhIrzdC&Se#HcWck?;B&hgZBU;Ap#gD>8XHYz@lcYk~H-FWF?e6?aYbfw_-?4u7W zjdeZgLfbj`qJvIY2 zBAZQ`5e#9x6gExq4|q{a3$d|!yWf81Yfn}```ERfO$#<_-@2gr;rHjMnD19Sr52?F8m<$1TA0j3oXTf7O zLiaPCfU{kS!+v>NyMo3cKPq^YHEMoq1( z&i^9edKu-u`H$kLX6U=Ue?ClYfpm&W%ItY~aWyqBBii1ds16f22|5A?3dnOi=2?w_ zb@*ZJ;M08$k$EprA$CkLQoKC`iwK0EBQE7Ff(%Q2U1+USxWLvXqpQNjlQV}YhNokA z)t|Hsrwv_Hr*e(}?Pn+t%hnQxm_=eUoDI)gbByGw&|mLj1~Hk4QHqPYYAkkX8^V;B z;buok+u)h|vavMY!mJjj{5Q?X}G0dj6-dOvk z{yzC{pZ&;cwd4YXFN8%T{V(?ZSrs8JJ7Ff+=_A~~;9+V5Hc!7wOwvSV1CRC*QAA}kn4BHOcn(WLW6mO^5OvA3ofl2n2x;NN3%~5TX3Gu7oV5 zWKlxur|;D4X$)Riw7CRZe9We40-P{p6jJub^U}R=N7WhhC*=el$SIzXaQE(u z;KfLf6Bdj{)qxHZdgDzG7r~q|u^z6D^CH1;niuv}m4IHqJRF=60w3Yi<;E$+f`M!< zJYPKG3(dD$1W~wP&hkx9c%*B8KF6nR=0gi|PZ2NP$(XdtHcj3d(@p)2fq_1%rI?Ur zsP!iS*DSjya+ta8BqZgJgU@9~-t&(MwmiEnpN*Zyd(g(a$8SGgJ+IKpNyc&7^1zbd zyvaLuVe?J%qX44Yol*ntJj|idv7+#PFCjkrn(_i0y4-Ga-km(++Q>6KRi_#gh5k0; zCu+!vI%%O9)S`6KG>%IxAgG;Uw1m(J(&#Ybd2;%L?X_C8@$u8+JZyL45BoMr!`V_T zdcB*6?6N4$`gV15P++{dLX;qGie4Ht2PHO z9?~2>dm_PkdgDvGcTQH1sxSHSWr{g^=~;s_Pq;Twh__#~hiIezk4~N^%O8Xf9W$I) z_M|F6A_My4)IyKLi-Y53sg)|5T^0@5Sp9gv$J>G@h44N_DG%~gH^R%}c}xA#oP)Ii zkBxCEvEdbc!s7<1TNmk+>hGeli|Sx+mDaePk$xxG-)tm{9~OmpUDP2jtJGewTJSR* zO8%fX^VBj&0?)5BX3C}CFH>^uYr?msjR3SYABaQ~-4aZ@yC+<0VvQxd4kx6iY~?*4 zTrui?A2}bLPkZ=DiW^viVO=isYTmnh?vOMRFAanDJ5 z@5r%l4eIat(=t4&^<1H%a6LRMBDh_FmW#$bk&qg2dYFj#Ug^toh71a+c;d^*; zSe3h!S&<=1@ljDPd#{dChJ(oJv#XD4Gy14a{7NB?^VaTVyq2@bK7r9OyB5r4V5r2% zFu1DNzIkviOCN%hKG#S%5FFA!z-c&Xu;d-*bY413efT`Oa`V|MI<^yEBU7A^ zD4Iks7LnOcm$UH~?>@nM5pmMcVk{8fdtNMpy^(eW$o04HVBA&6U)z0~*S``_ssQlZ z3PTdKuZ4jkL`W`WfY+A=HHD?ScLZn)h7d3a2LK>C-tpf`gK1Lo4&N^9{4-yDwEEe< z@)uT5zWRG+97eP}jB6R&?KfUO|E3wgdHC-3zL{&EFHiq*8-f4F)wh4;v(+ztwKrx^@XAVT2GeA>4R9oDn?)-Sv)$-7X5SzyF{@uH}TaA>-ctjDVB~ zLQHO}(`CR>8yJYsw}t{mz$(pY4y}XZLRp#nyVt89`Nrd6Sf9^gy6?~DbG-fL^Vi!M zfBww^oi`BZ=V2@Zu}{0mMp93YAMGiuMMX~P$01j-ei8eyh`(~&|?ZI zv9Xnc%yZ655Ambc@xogFVbpPg>kNi#B?j|Q$s?XqSi&vg*qszT3VC3wiblljNaLy` zaQ{r%I}$uE6&S3qr%cD`6gO7ahL?&`Zy>z<`c#?ZqC&>!dAC)mKq%I8k>#x|hKoWX z=m;UEL1OX_FgEfKw;P`a)aKrorL`b@=|MJvAGGfSzAsT0N*4xb-plhrn?@;a9_GBC zOjlA+3W>uH7{=($X=l8TcQZnF610xk+-l>kQ&DQM>10%lfd>E3t-qoGezm$@_1e2m z?osZu*i-lM>{*o$d!~r=oid?ot8eyOj45U6H&fKBP11FaHh$cATU$m_NKW}al^pyhE;o{`t_6yo1kQ`{j({_?^@H~NZRBkFy z8i6$Flp>>f%^KpOeC2I!S(m;*s5an>F;M;Z>Cb87&;?n z!F#yhZ)i;4DJvof=uOTzo4U}`F>B0c6rM=!YVvX$ zW3L=XvzMoyck_x-^eMV`9)x34se=fEf5yr<@v+Y4)l3(9!!If2H_k+KL z2A{>CP~emy&ADBVs&s90br{xRyc6uy`7ON$*+4o^p1P6Xx|% z$GheQbt0lv+>MkMhMxUQ-Db;9u*uumnoR@>MFzrYbRu z5*4WXQLhU<;>C`)Z%73y>l5q7Yg*$fP_FJr+v_QyqHSk+!WjT~kn|q(3~%uNN%!gX zI`}p8;Vt-!&8z@+tGF{*(G-m0*(C#)2pkx+CJkhMhZDV54RN;(*0=X8Iz*Hb{=>70 z9@M68#w-6uky~qx=TV#13x@F~-=P2a9DYHz^;3`JXOWieV7gT_WbL~288R$6c-Zyc zZzK!Pa#Wm09~AGc`hSw1vR6O%cSA5^Z9EiYZ91GoO47!Dw9%Tv9}bh_PvXy?G}qSQ z_{Gaojg-?(sZ2I9ydFJj!+g$QDbe_WA}_eoIAti~wR5K;DUA6*KX4ho-MX)p?`~xo zlE(Ki-A{>5l*gE3CBGbrBW@*e2##k zBTAQ^C3`sVRCWZxZFz$A97+%FrHu4{(H&}j#;nShN|o?T1e^2XUI*n!m78KYjh{10 zK^zX=ZE~{pA)8J!3{EN-u+{oFsAI~8(@knjn>O?+AY-4yQKvj6mbxwnVe|R+cfP%P z|HJP$FMDLIOM8@(VSCC7!;R?VN<_HVx}3Ddvy46I*d7cVo;Ni7O51&Ox^H?%mUat@ zPgCA8QcQ%hq9Mo&S?35q7g~60su@ ze_s8wUwyUu?k8KTXK{&_<$4~K8+V-N<~+t~KB2(C00Tt45Mf~yEn@Sbthi>cyvT!t z^7-2c_Ndza`*}C-P3g~!o0JO7P5ovbK=T!@a$W!>gFc9jYiV zJ`c2g14rc(oTTVvLiM?|PJm7BcFKfG8xR>|Tnqrr1fEq%qu<~Rg$+D~b8o-}$7?K3 z9+B@osW%bjPIv*;AT`3;irdR&zq)G7eaCZ!AgotLCkC2wqa_y9w#^A)dn+=snve@%D>z_hxT_1N|#XIlB+#}XQdtmZPI_gn= zW;?9MkXavENEs+CU}vk1mnrwEZeVl_@F;B7yI^peRnJ#<6K)4L;YUPk63Z0{&O9m? z-lP6CnyR0-oeBLPiFAG+!K5pt~uM}dl33^(Cp#sr1e zTHn%WlS+8-xN$NRrJz(0UW8#xb|Ot^yE$OKlsk$)zT`6;?8}&Nq2t~YR#UB_F%w`^ zCVcO8-k(=5+c1|vo5?p%kmacs1*xgotaVa4hV>mUW4{sj!&jRRZiHG6;+lP1?wdm zUql1Tet|~&VUf|b4@ZfD?8Zmn&KW2v&y4Lm_gV+}+QHNq7)r+s_l$u$8pjSAs{7`Upc8dj&!Ic zO(#XAvBD#Ja}NaV3~bp2@x5cpm+iM5pCNv{e=CMUXF?Q8go568o{D-C;aueE$r zX~w>b^^L$ceMF)a4UaT$hf+4&&0Z+$~ z7HKj5Pg3G8T5nkgQfOVfQTmNN9_QircKrLYnM;j(b+(*$vNno$`!_4W`?R|9gV)LC zjiR>)DX~|1mM^2d$U9^X7aHsRa?733qtF3far8XHqsZ>*N3Cx>m6OIE($n^daTaY? z+{7IAlXHHUr-@E{-~U)LtG=I_Z9~%j7Hk zENv%R*-SqZp|BkIqj6muU&eeLkHcpwWWtATwr2}Jm4R@1o0?qybJB_QZFKFlPhQyjl9Br&=au3KuA8*e=EMZpECM`dFf_K~D&Wv9MDsW9-+%aD zHtw0Y8!&BNrD5(cxQF+!)*=(eLl#3wZw!Z+{35Ig!LEhWEsDELdeV;iglPOu7-Oz- zKrPy$C^sR4Zel`v4?>)u{L-D(pZm}M%IeX3KcRO+`5ljCT%UjGGfe*Hul@X+@2#GF z|9@Nk%HR2$t6%u$$?AhPiG5T~?~8I@4~2%y|4J;!l$N<$Y!AZA{LN~tzFgFV%x0l5 zhiW}}{KXW#2U)?7+64S)v9uy4(E<#4w*>%)-H1h4%ozZfx%{#5Lf5Xaq37c5yIj5d z_U`IyUwCGt@6B6a{m`LP^*DM^BZ{wl?cV-Q~7#EfZdPiPpeD5cvB% z_RUq5Zb}Uj2|xg#f+*ngC|@qr3%oak@Yw5aYc%V4z*{R+M=;M_F^G-T&-|XBT)o@5 z4$ZD=PzS4zKmBmRx|qmLxzRg&F~B@3yV1fZq+Q35P}&mwm|x!K_iIB@q3neHgDMm` zet9c}zPr1F&%S4H{>X;DQkHJ-pLuq#u~X~}UAsZE>)Slp7jiZ4WE5;~Sl`cjs_>Mj z`{uLN`zN2So?oiq^i~R$2mnSH{L1scX``rWDA$!*y-G04RaVYb4leJC6d{oUQ_DmT094py7n_g82qLhlj5e8re-ugRe zqZItG#;RLjxKm-wyt@XSrc;cuQnOvRgtSE=7GojiKL`{9tZj3}heR#iOQ?e0lr^mF zS?9i+Jn-7b-VRbYUKeJ4)t)DzS>qo?r~7Bq)f=9{1IYOC=+>bF_j}Hx?`EFHT?3PT z0rmm2p68OqyBl1(hG*6qoBJ3p;FAYv=%V}Ja5HH5a6jgr!aj;VJ{6uWJUKWvK0^c# zzoSpQ4LpojDx{FU5H))89idf|@#lC}@bl_7N2%9HV>7PC-MVR;XHJw?1&qy|QWG-f zr%FhQ>G1fWyV?xk#w`^OHS8x3EP^UubJ&lyAUI3^GjEGipQ)tcAH}O7}japt;Ub+Ko{OW|7Sx zS#)8+ivTha7qI6oO&WJ!Pb7hsf}iR?j%c5azePW`lFM*w_BIVn7q6&2R;fyZN1H`1 zowsswR%)Cz4UnThj8GWL>U;dN_pO1dO7<{N_(T&#{NT?RHH~48yk1CQXkB|0F-Ezl zODHkzD`#oZF%K04)XphqiO?7LJczoYAsF|B|;96S74Oja&m$`Ci3C~;jfHXP} z>3Yx?@0fi%t%H?Ij2kli0V@hp)kdy0KUgT1#-cNpK5RgfGDYbBAHqHhM zV}&vy=t}2c_@eDK^kPc=Tjwepxr`(c?{@j3@j+s&^cxHtDymNBAS9PFfn8 z9-8Yqda(Cs$e7744qJNLL4|Peczl_?u4n+@8>h!&m?o1(kLBo94bOZS!#TM#Iz(r0 zq)sJ6={{%>g3(E)5Kd8*FMR0>tDpIqKUNmVPmg0+`QhgkHn35BKd1BKcY=cz)*w0XxU{%;o_uNbxN4O9RaA?$)prYKf(Ug9J!4=^ zrh)O`+#)%|ZtC69?_JA_Ll6m10V1YCna#kQfk8;S+$p+3bCbUo@lx^#QKgG4j5ZJz zi-$}&LMciC{`n>$O81CyfFH%gEMML-t#hZa}UyQEt^Grc1V-7DOKq&;(VZHxu#8hqK zkmG(KE<$V=OBYSe*nv$c1QDD3u%#TBb{2vjMgwj!ObEQ6!Qe#T_rCg0b9#f!zcs(x zxy~0q^u z?~v!fTW@aCEGUc(JDgs;m5ARs2`9KO-?-8@MFC!h2es?Bu8~*$C@;#Lybd+fdEO6K z&yT-be>_<{pwdff<4r2gY$7gHZl9$xluiOhH_{120WuGoQ+rE-^WM$ZXHeIbV@;j) zOR!8?SWkftrpmD{<i1?h=@*Tw7J>ynS9BpH z5p@ubJ#Ks_ZRTQ}xL3~VMZn^Xrd(V@?q28Q^~S|uF}|j`^w~&^S;PW@F3KBan38J! z>MLHbZ1Uw9Pf2L5#%P4*K&aRhNU;tfgW94<@IEqDuJP1>PVw0Q^ZFY_4Bzep6RhkdsRV1KCp4fjQfBW^eQ!hyS9wkE-0%11=KPkkgL0n}o}7Q1 zRgAi8A5jLxak=s7RBxWawDq>(@UTsH8+rTh-cv+D8fkPLys?YHq-5%5sS2kViYNEr zfM{^Pm(#o1Jnom?@+`xJ%sE}Vzxp`O9396Bku6<0xKBA@Jo9pcjlER7{ddA28-%9> zV`JKDAHr#oyK7F4*6E_0<5Z5KB`GZh4lk=T2ERlsqzPJUw0K(C-nC#6=GPy1BsD~@ z+cdsY6y@osFH*`n=jHO{q|rHK$;Lxo+fkCDlg*+p@W~qQZ-1#ODdBZxgHM$H%xf)8 zlku`tVMD8pwaMPBcNR#gLil>ktq!bhYcI0lv{NM#uRCXGt&){w!nXCz=*A=Pzh)nv zrMO0$4(vKm1!ueSZ_D-Qi`KU_HRsEC z^Q=j<&6~W|IB*{N684Z<7YrdEK2%E47`{;s+zJOdE|m7fJL;&G~8B47j{%U-RFYuyIWvK{Fiq&rC7I0*Yu2kR9npdCU zaNJ?2@Y$Npp4Pgh)JFi*{-lAXpP~hhy*ufkqffNx49zLJklwqO<91&a%+8J3Z=clR zr%zXhDdbyO{WkJ49`VcbB6+h`t=~lFdW8c@uG;%xAFOhoYpp5QpR5smFUEHB4+f9Y zCw}zn%ZA{4IU~OG-jA(5_?|r%$Ey!Ncph)qP0=saN>sd7$Pb5JGN8hs@HrfBT)W*H zVGNI~+u0=kARXoIz5NWr7vWl89{jb}f3-mcwZIK4C75AhBT>Uj+Aa-sG}>CWDud{~ zgK;Q>l;j3lBos_-2?k?H4_qwGAav$FWfE#jOrP}J%SB2v@Evy!YS}4{{ay*5fBN75 zGpjHDI_KX@I8Ecd=D0qmcwzVrY5FRNes=BKL@HRps1(s*_+H*3jp(8*`k3vA!2knB5ee?4umqkxkl_wGCXw_P^P$CEX{ zE!3Z|;7%4nJF07gRWZevrT@To8ejYyviO=V}d>b{V zkw?;Ro{=gFUwyr&HmqE$KJR;(HnqX^|52Yv%6EJHLeCQH=l7Dv*S^yC)^}d|)=u7} z`t-waWpQLW=duRxu&2+o6!QvqTIbF6Fui#nmB9O|qw%;PuHs5JV$;McN;kGp2h3-T z;nhWPt<#ODI9Hk?<=hUtxhgUs1Laf(qEXFqNn<)f%mXk4yy&1No&Iw2s*FpFZaDhcNu?!I2IBPILWi&FiQ_s>U?1mg%7_EgX zt?||V>++>;xGL+1yUb}cxH<05`qk1&yFX{e-e@=DzRoJqo(%_bQE0%o@pR!Cd?tYP zAHQ10b)5SPlae#Uf?e`@+7Xr%#x>>XS$pd-7D&C0wa|LeXbrk)#^M@hAms)=sK%?l zUR4KlVBVZi)=0?UNpd!kSg3DjNQPBYedkTT58Vel4S*6XRm6Nkufd5%NBV%pSfI@Z zuQ8W_X}^ujsjgV3v~*HSth#sOf}XL}-_(0H*Q5`)%x*@Y>R!V^tF#8?nn*=4WiT;T z?r^R)meNSLzQ@{WVze}eZE&;7nY#$DSOdr9xm{N7;v%>9X8A8CZ-<*7H;#mn+Mf5` zD(59>dKG=b!Ku9%E=U6BGDTMs%(={*9<~+l^u1!(=085Ga%#5HSO@TFIZ73cD=!W5 z-WPL?-%XQ#o$cJQ!NYs01hh}uw|T__xXaa(-O4&SJ$=#6ba{<<*?LtS;vac4Yz`2H z;6}dF@xiY?snlKb-K>VE+&>|{!Mj#asjK~iVAa|QF>cBAOGCZe+Kl(4<;zRgI?_za zSBZyhRUu$ic+=q#>uJ8PlgOu0?8(W~Bzmo|G>*`o;Ydmuf9Vb~kErsSG`Pp%%bAS(=$&wL*6^h}ta>Alw z@IkqbTEEs7&D2QwSiz!Af9ui1W;pPwoU5I*%UwL0b`|W_3Udtq4IYk)y|?}KUC%Vmy+$BZgqb8FdlC6VxES^`8-~^Q9Z@ocuOZGM+e>9U?z-(_Ir>179Gdu#@m#gQ|F?_{uj0<@)!8jUZtqbX+XBM*AybMIin@a?kTpsA}@)@%=*N9RkzuyuEN)N`wWI^&*0SX9rN!TWc)xN>@kvZM#Mg#n zl~uq1DIwg^H`6)k=sbIqHW19fN^aU-@VdydIBK^(Ps}E!qBuX`jSvd>w=Og;rCI== z`TbTX%olwFypb=~Dl{3QZL*fE#K+ZU@p z@w0z&^`n3MXZ4Cc=dq09&4+*PW%>07uW=tDz3CX+!w>%V)%X6<|GE0+Z=9^2zC2jH zWa4)0nnZ2|#7{<>46)?1G^jzp<})#@6gmlPGb`eu#*TlB<>Q}$m^=Z4m^|06;X7hs& zvyB0zP;p(Zbr!wPivi%e;Jf--f2;F2P`te#WEc$1b@f5Nf3H`9kh1w>y)LfzjLhDD zzwbWm`IXv_k@s(DKjbZZus+6zoQM?4=1mA;ji(_pWml0q+w9c{eO>7bh2^(XJe`zv zd8##uwDp(r$W=&KB3K53b+~L7&#nUCxzCG|L=1(ODx@DP>UZmqJ0ihjN1V^{sZL8! zy-twZOaKotCGDxl#-JeZ>|m20#89B|aq&|$?fTp+x%noAgTW`6b{KjBeq7)gjZU^% z&z&WNwwqA0bNHn47$?=oJ<4q+K7MUyZ@*uZH0eNs-Hu3&`dSjO4_2XuB;T8i2sR0v zxg0#mu$Y4Ys&CgECGdxxy#}M^a8UxRqe}gw00Qyr+S%)Vn=&+3L^WC?$UBO#`k#%q zd38p5`3NuCWp!~#u$36t<;naKLMw_-k!)^Yc$J`rpF6ppuCMH(O=+;Jnu5R}2Crz_ zyl%qbb@NR@vU}`xHFl4xLnfa?AA~nN#6XuLcVP%6rSE**tD1A%Ym5c%vN;qWdR))t zStI?&Pl8X^OsSS~lFoB~xSygpg0bJwo9hV8rZKM<37INoLT!|iQdvg5Dsdho2iOgSp;fcm= zUfbcPb~bu)3vofZ7EfLdnqVa9zjj+s&;z@`IXrqcE!2OL?Dyy)0qe2A0zY?sLb0`B z{AeHF870S%YK!wmRfs82-q_92gFK*2Q-C&7_9h=m-{I@cYzApGwSOsbKV?EVgz<3_ zZ~+?%4&{;z8JYe!Iz5^>BL{dStVf}ci2HY|MovU?i9R=WTn)cFi@-haJORT zqnsyuxV#yl=O-zK&1e5!G@k);lUz{lXubX|$2fm%gh+Wern8s1RV&|fR$Q;!qk+ET zv^dH&n9(u3A$fX#eRp*)YJPlGB6f;0o+asDJ>eJS&V@wripG`JQ?2!QV*IHb$DNYq z4=Wi$BspN^`d)CYgo_Y_#P;p&JFDOKjXyH9u+`4a)5{MVM4_+RqnsJ@Esj|au2Jq~ ziZkO4-dzMY^@98x!O z(Yto&W`I0RR&EEx2Yco8oL3JtIBm5yA!2Un)*!xc(E3hq^|7B0vyst$nu2@S?&1j( z=IgsYOpXR?_j7Pf7`Oo)7kaSSL20LRJSxRkH3vIz-+JdsdD4#?W6qOT&!&Z1Rc~hqMVa8^xLMhl&BHXxP; zagzad)jf=*XCHm1LrzMKnE8|L&80cXuV)Bs3Wqi4hoziM8NYDPK|Lp&V)ez)K+Dxq z%#f%1g*Z7}3hhOdHEIy74U;uMuNHgC)Y{V==07@flLa?_xR_;&z@t*2nu>t9|y{ilC- z^=rTMoz<^>JIniIr(&t637ukC5#mLJ$up#c5yi7<&I932qG(`}OtsZS!G-~_SB*Qd zytm)^%IeX>wXi2;^WCL!4tW5ouVy-3cwj2wF~xD(iE5rf3+uE8^JCP$j)3D)2?x8FTI|}e?v(gUww}G;BREoex0(&X zlXAdwg^@&0O*f{{!3~>|lN*cLuV)t#YCjS!+U&5R@(`kd6asNvN0_29Wjz}4(9}-3 z4jWr35y8nRTaIKT3QxO!e@(sB3U6oBfrQn2-a4zWhj4u(VR?jY^y_!+Vjf{=_rQ*R z4|fTJr_~NS3h$4?r{^(!$^MttqFf%eGOzQ_MlE>*f>aAMp1{jTBfyT%D)&*{!_#to z;3HT{;8!Y1X{7jI3VD&WmlX3nOc`@Z@ldqTx?BcyWtVKwh_PF1yrX$E_v4rw&l{2Zr8%^{#_~= zb=165WW@rtc~-K!I=P$e>OFgz(fTO`zJ%p=&~msapJvBDqw{PlAVck*Nk|I1Nh{7sF&c$vcL zyBR-w;oDw{`%<(&o)oav13oNIX0`S>p1864Fvr1*#`ww94Q0f}4^x8RJ=ltF&bl_Y zn*@dk!#BSC`({iV@gLsO$x|HdH8~JYQu28i$ZJs+<;Tb{d|BeW1b+vg46eYL))O3* zeYsA--D<5y{)c0f3%usReJ5E~%lG-m-Cy;GV0>p!z2W#~eKTIq%iCEOT@99UYAC*= z2h_HRo2*CqbnqwZV{nOs2H)3*f9P*;z4jQC=gEQ5L%J@JJnL}LcX*{{BG2T=QVvGc zLEHG3@hf{Z`Fr6C-lhc0i+K9mJUA}0}eX4Xz;3WP9%!Vj}?Hsy2wyS#Dc?P zsqbBa`Ln~-i{C7VsVD-^u^p^)@+JH-7d#}n7qmt5_KR9y`=2o_2Oo zIjdy7h?oNs=D?}wa5iaVyifF`25iFQsh{Nh+PBt}b~0W;{>i88JI+1V;PLp0oGAwO zS-CVDmHMH(+T3z__;T)dFhm5H!LV#{y<7g!gA8=-ALsE`*2tP1l}l>9=!Nw*bK`u0 zAJ_2=5wN4?^htSATj?lt6OOSDzyBK@CJ+qDpWBvG*x1z2Ha{n9-hKP6)x8w_yD`eU zMXl9{K4}9;m^1$({#cu5^cz0WXI{OiZgIah@X%lo9Mgx;y|%1=YGe^Wm-NA65V|HM z*%UJO?I5T&2wkBI6TQ86c0-mFru@fP;wcR^(mmLA1l3@iWb&x`kAM8fzPS1mfBrv+ zSc`Y|sJDN;2MnB>DpvgT=T;y6($B5F^J_`~RZJx%>~(I>)0klT$ta+b0*yN~n;RvJ zQzlsY7^8o!^bq|d+m-fn5Ho!8_}#qp58Hlzr&sfeczPn_TY<~-d>i)SZJOlS>83@k z{ryK&yF6mm6P?gm$HM{gZ%x67P~{JOP&Qe1CF1pu;)x7bH(#9h*7672`Minm_4dQA z`F7WTCw2Gxjo?T6?(02C$c4x-g|fwZDnS#YNSv2A5|gN4{8~oGc>YU?$e1`UY5nl# zlNe1f4XM<8#9-xaFUFuGT?$w09YRkcXM}mb9X4g=2Co#KO7GmPb+m2F^HTBYLckb5 zxZQh@fzT;LWBG(-hgO3!yR0 zR^lf?yUdsOeJ#S6-Mb~hGO83KXYg%SE<+-3ydfG&SOzsNnw!G+Qfw$87(;Bc*TxkS z*l)KUhD9;vwZ6y@*y;Ln8g`F-V4eKZyz>-Mu^i32aox#GdHc^w`v2Dd^|$_uzr9*5 zxd1nRdvzYnGapiD3U3F;SY@+ZggS!<-Y645BA)l&Z<8VdM1u)fZztfLwZWo^*iC3x z>v;yiS_!ocJ!L`oP?BMS;;bZR3TZSh@-fMp=;pOljtryL;UZ-pJ)b4amog}I)v0Rd zHrKQxSaKudx{Def7#Rw~MG3CLk1;(I0G6?w`cFy2M%M@y~R zg}=ESt_(*TB^g7vyV*T(o$~IwtlwgtIKZ9vJOIi%(y3osb zAp5|`B@oBR>v}XfAnbd)ym+3b*}fm|SWRRrjRG{M4>gYA1L1^mGPGyBeZOqPX-q@g z_0e|}jClmdar#*M@EINA`-_1CuiR%Td+kZtTX3!GyLSY(Yy94SES5L4lWE424+eME zOV7yDepX2w*&jQleR&RVoQA)8=$S;~^ABSV4{48Czvp|7bSnWAYk&1J%3 zz2m&MUPgO&)A;weIVwdHzqYY$r_aZq<$-&ep^>q35wEkp6ie`9)=QIno)`2*5zT`y z7v+oId4Jp1vwHUWJ9%4^O{D~V`&D$#uqhI(wAFTs%R!36QMh>1G07Ks>i3!pMgL3h zetq@u?px7QDOh>+&TgJXyRB_7m-2Lzk+mLfb7tX5l7zRm9t_{x+O5=C`0AKfiU*m` z_>+#a)3vi+$-Tii#25b>M@f+ZjyuI79zXg*X%4SyQ&c~LqBa<4j&z+A*Qy(=ml_6d zZk6dc;oL9nf<7QMW4%<-&G@SRchVCmtM1__pirS5Sl;0)6^hem6oq}(C_%GTK&z%%c9h8bsvYZ~CJi!qw(L2EemZyPE@mwuzZ zYl0}o!TFZ47%_@EsC2;q2}R1fDOK&+$E)KHYbTn$EMkLC9$1T#*BgE*zIm(7uQ?p1 zye^w3QAzH|#}709OL6V!bjZtog)9i=%m`D#vrwomwSx5ph=l0Kjs+WiP}={l4lC-=&pr(wnv_Kv5{U45o5rJJvf&gok8Dz0Sb&H(#t=i-D39zlEw~BvcMxoZMb{C^ za|U#PR0fEb0aFp`9f1j7A!L)yak~1wU%0#aQ-AqCT`;(B(+Ktn!rawY5$ki6Ax;e)sS3+G1Q;{lP0ao#0J5ZJJJm;e86b5^8ArmbgwKrmzbIe8#cTL2wW2`ZSrRCH z?iNARZ9MuJd|UN(n$aNQBh^Emr3mA7QM{|>beZu+A)Pd@V8Q#z%VxaLmEgvw(BGsn z^!cEAb85(`MSPutU`LgEDBz+i2tuio=pSAX zILFdj5(0~k4Yu&Me*}jP;Dq)PaB}iT(dc?c$YiOthJ)PQgMZvQUc6HN<$0Rhvdo@Qh($5Ty?&~udfVXVM zujORlKJxQqmp0aLji1FX@zXrqH|0UE$A|XIYd*>Fxhky*J??cF!CL8w4lOu+5p6a1 zd;`6f;*&9amV!PzTT2u8PRjXPkK;!fg3pdVUOkPjUZ?0BR_5!_I<)s;w>XGJr+(<+&@uPD#6t_edjqRUPg-9nZWLPOO7p z!;mS{H-lRoTORNg&JV5k-tJdav;5k6V*v55g?{Nh8MWaKWqY@%;|U(o)uTHFBT)wT5`MVt92Z_hz73Jqm5TaDm{(0&%H;F z+tF9Fx5F<+gk#$6>w4A!C|A#;ImTjRhEKcUz?728 z@Ozyj1YMtZ9J5pv%Z$@d7B`B9efdl8O|kqtdBZ<_|9j0Pc(k4z#p}|hT5~*K8sd2E zgOxoT_I%+PTcwTP3D58!85?I#3#>oKOgmhOK0rz5#kUv7DPwD;-@#E80(O!W_5ey< ztCztAcsTVr;brSBQZwnQF-`nzcxh|3n?p>xy6O1dxMZ`4jKL|V`!RlH&&c^dp+CmM z*VgZ4AV2PUING{!T!kwRI7nmF2__k0IeHu#^6Wfs z|BLrSrqbqW^Wno%&mTqC>C<+Vm&f1Sk|khL8|tY)|F9B`-Lsnm_26D*vZA9Ui>H*~ zYASQ8*hOzgzSQZtj!e3wg;y-+U^|buV7oA}Ayzq_m5{iXQ%2NWU@hTs4mNu2cs5vZ)ism7Bk>5$(~R>jcM~3{-!ScVF_P zAU*aw1z-RA@DGcC83}fhvB*F^|K7I=rgWsxs61H2SBOlgHH5jmyg$r5`iWboz%Sl=vny%Yc}bvutgPt<10kZ8yJ;FOJ)D3UL zq6o7QCQxoJ-_D9J)L)dLgn_Z8N4n>bie$4 zYuN}w<04!U=0(iJ##IIY=LZmkl;=$UNyDO&DT+TXz8>o)+s8f#2Z#Kb3FH&5LWoU`_rtj=w*hn#Q zKRf&^8n(#~!#EG8tm$bbA(Ar*NZ$RHr<9G0P9z$pcKwBpi8RkeQ0H##{x*bF{8)cH z(#}xFptfV|@-wBY03Ot{;pn8z*e8yn%)@XQ{0KUX9Q^LYtX~%Xd|BRSJM&A})g-0>TghAH5{^g>vbk$$Dttg!3O}nM=h)YHY|JuzF)29R7h|mSJ_5Z zUbmb?pM=ArOfz;c(XhYk@g#IhkWbEhur$`K3fU`+P1t~_@hoME7Ao^c>ml1Cxa1V!;@h~ z$-;*zEri^fxz06c9Q8-V2{7#2HRd-9+29FVsayIFp&ARkZ=G9XK%!WK&3KBvuRkj` z_YV~NIEEbMO|E^o5WX`$>TW!UGs|##QcuHSk&%hCl=`#TTsf2A4|-NW-e8o(!IRg+ z6Xi(pC(%5xoz2l*$2)d;SPB7UiS-dqQ!k|^QJ(A~$_%2dlt-z3d+$USr5Bw&3>F{uOqsAj z@4QG`lka~x)7Xzb-dMf=sx-gSA1P}e7F4i{iUZ@Qqg|~vzHld^x^w?=!n!?X4gM_J zQG@mF&ZE_jf90nJZKUOVcKFQ{kI%;Y3C3hCx<`}i@dNqzR~`Hzr4#fW=gml?Z0*Ho z&yJIC;ei7S_Jg@1`-5+BO`C?7&DyP2@H|Vop>&b!lwEMg8#uCTypOr0Ywv-^>omO6IJw|&{8L0j6^Gd1;e71z) z&qC+kCl=y8vAuB;EQW4t-`IyYMl)c=aX63G0$t(19EfLV%_QNZNANQ)xQ3VUybqtN z0}&aUuqkY(&W`RszIiTd(eD(M;#BhQDxR#M!g1GK(Zvii{JWhhgarGC$q5@?8#%^{^7mBlToicmNZe^nV{Wk|UIH4>Q zYkWoLx}M>)Q~MiEtZN+bLuAxp8fbSfo%(Jihwi@hc;<3;cJkl+ssHNVU9FZ}fOy2_ z>XTRLc+mr0EI-w1uTvgx+UvJ_@Su|$a~!KOeq`&;QHIA`)o2ZD38bW`iFk}oz>6&mH%Y*rQh?1=ewGm{{8)zJ(}K| ztBz7?es1;aKli_`e)*q#wEFaQ$IDR|gPcOMs{YIgE3OaWZk00aZq89niJ;wk)DCf!vVQsz;*=X_ui{Jy17)qW?PTHcRhzh7uu-83%g+lk+wUBIx4OyGQ%VnO3d$U+(GEJy4COupdi1FZ7y#qDj9TFaBOv3(MQDQB*Ya|Ft6XLBpCPWaqOXx_UUb8I7>I$OJ0 z`d4}MAhrr3QfS7TK*`RUyO<*P5leM1ZMFtZ)jWiTDT&~NC?`L^RGx-D-hg39~m-c-J@RyICem9oUJ08P+Uvk&up)K2~<55L+R9f)wbdXmxj;^?0Zk;f)e zh}E_0T^^`sf&U~=p#J1kQrIy2eExmYJPPN_*Qa=RP^2zIt=eGMga+a_7a2LhnioMO z6^uM=4QW$K!ud1%UocJ8f1*D*je^P;?RiSTO&fj3`yQ+qN#nUNtLA=rR^@{^S~=kz zTzQ?qQWOFGFe(zy(=H$!9qv?%E)SA=k1=I*WizJ7PE`?Jmppw~eK!#tzm38XED6<7 zViH!9hSFzVLQ49OLT%eiFiRJ7Ffam+0G`cajY+=w${LsvXo`GG{o0OJ?fc*m!QLj-sCx+6QI&w( zTt>*wKJ((HK40Cuys9kC2Q&Ee{}^tY6|&3o6Y$5o?wyE$An zCeDJxj$?inuTv>z_Ch7+c=S&K)M0e5t}5K1q+K_5P*bx{F@mdT<$SC9HObNK-ABzY zC&)c1374xcyd6)k4fkzPl&;I^O_<}r)>ictxYS5`!ZE7U)A%81N=p?XrKqL2#@Eh@ z-dQhrFwv1{z18h=v}3b&^FxQ)97VBo&zcPyILGi3~;8X#Do5-nF&m2zw4bDIpQ%>bwJ7_S(aeRH3kt@-s0{edBP%*N#uT(yGH9ETMHROGV34XnF#)?d4$V}$Rb z862gdSUAiHo6*(%BO8KY0L$son${oO8Dnkuaw{8+jx#bC$JOhvxpM~Sr*ZYBU}g4?isRtl#KBuDMrk`+4qT1rkm&me!(S-g_7U)_5_PI3D<> z@!DuC8$hv@=P$mydQ!dji(q^wMfkjvzRr%HwdSP`tm;h;_fTOAPqc;j1>^K8e2l1&0#7y%Bm z;cybY*=;;{K71Bgg%4<0RE7==b{El$b&|p-VkwQ@+T0v|I_W`E8LKA_Dv(<+$@^t2fKW)S2WdJ!#xIs={y*V;Yo_j<=ZL)X-eX3@!EqV(3F-%v~pfWob=rdtso182P<&x24Ue>DS(@5qXtvhB`rJvR6|LfXkvmSiF(twuq zpJnWe|ME8dhccsI58wHmGCIB8j%Rs2C%}SrpbUp%Y7r-;Vk5rEbsokHpR5!OL_LNF60!EP%2-GN&XOdfV?QGfdpu)_7AX< zOSP6)WA$Nkp;pC6`l}S(-O9fSp{ksNxuCmC-n4#GF-3JM<4@i08H4HXbp{WGec4|j zniZh(2=J_)hvyeXSFX=e(9ZK#ByiRfj5dQ`)vm~c6eFoyHV~c%3t`MeSZNT2)>D*( zg)#86-j8>x{$Hj1s|I9qJ@2A2g%~qN%;>{F>tyCNtRxu15ym2Ak^zn0u!Om=d5rKK z<$)oG9(kb~;~VAF30XVA8vPM=6M?Kz!jB@st4dG_%fcKyO5UjJ;G{L4)2(8BWAL@U znCT7y(K2I&21$r&gE1km+{2DPIlQ$FtecLM z#v=q8cvl+>&nec+>zBg$#^VQ{DOrTc=k!<$&5LnJU=T3U;qWqWACL8nFF51rjccL= zwLN_{hGj!GmCnAI((JmPaz$}%;0Y#^(NDCN@SpM55aGs1=XJ&VjNW1>KtIR|Ld_YC z_TwlJyoS80qF1-xN(PvS76s^{4YOBmmR+~cCYr}_zU-*2T;ciTnc-3$iqeq_O(gvHYe8~wwC1=@4Z(PsXcY2H>TGC8O3=eQ?4~8hZxN)QZ z!S!%U>Lwjv^rME!2;lfJ{w^NeUG|~{*X|YZo;^Cf$CHM>f}uSFQ)05UHDBX){bVOJ zJt!G}qfCE|qfbum1f;Z-Kh>(vJM@AxV-)G8BpS9^N{-5sXQi{f%42P=bibo2 zsGS{mJan7j(fPxNkCP3j6}|sBhf!r>tz$T3t}I5M}xz& z*;kPfv=eW&PYwTO%t%}3q_l601D8{BN!1GV!VA~>eJcZjfz^+r>&5A2OhG@qvp=xk z!{3Vx`{U}!SX4k#Xvhq@dh#G8}E(GqXEtPAG|7t zaQ&Ut`~UcVTK(ex`wv#%{f)EL_n+^tUdSCw(ZZhO=nKj5m{O>CIL6zSu$4MN{B0w; zV1OCgtU)>AEw~?Y-)Rqk9Pe4o2$ts#Fqa&w=GOeB`B3yNI)peVXLYya(t8hjR&Hzm z&Kpa6=3zeO^*bLwI9Km)uUWqT{MXOlE!TjH5a^3uI{{)y3wedKt8erjwief2eW&Md z^vq*2*YS2qA9|~&YrHqMN{H3o&73es(enJ-A!gECxy`wz1{>;*)X=OOt9Oufwy{0D4431fLcrzZ>49zAk;Y zhL?rvud<+d9Zx6@3C|4O?8%g-zUK`KuVaMf(+K-45f%QMF4OSX(rX9IDq44<=}HfxPnUY}iCQ`lXP1O<<3_c^Ib8S;5Evt1cgB z3|N2<-t6BfCAG&>2&eIx@#;q>v+(^!@ax%s%w2Rx>Q>z}Ch)DZzW2%-)h&&CN&lJt z-u#aI48*!D5l;Nyv$@xp${p|--BSd|py^r)BY5&2^16-dyWd9vOh%9g#v`w`rASU# zU!8Ve21q({8)mI1<1t995zoM|4*g@S^>r;ajrx*jeflzGEqswqf`Tqm1}ELLF_^bX zG8CcEcILw94Cx2X7r9)|^E94U)oQ(*|LuqI|Gjd_i#)1iM3I-WILcP-Pedu2F7DQ; zU~S4PH7K`ngNy!x2iF(wSPeZ!3PVIW#o*N2~vczMcw)VWp&O_MBF~IYAr`I{9 zs_~1K{CB_b=&!C;OD@1(k*}Moe4MX+zevS5^4w*lGlXjMvQIN!hjRx1f}&{KGymM&^=Q;O$)6=jME z#8{+Op!bZ$`ll@A)yP91Z_9Hj^?;M*7Ei3|)zJU5-l!tP!yE6c{k|9;p}NUthWzU) zFO<)PTly@VUEXA z5T7O!$^Hy0yaSJcSk_a~hF2+eoHIFw*5hyRw}SiObF_jc z`wSQ7)V1E%YLo|y7aoSE856I$IWHc1I!N@>KXi)GpkEFGcM`$$rhpn8!KiDDDTEi9 z6Qm=G8gr_sg&=F>rXNWGBUy&^VoY-5tQ~Va*RFh9HGPdS{vhvvP4=}-*9_164@k3O9>fBWsXyS^e7?ag>`_2KiX{!zMfG&=!o47fnJ8?G`CTyNvFbr1oNJ|#sDyw8e;kKD<~!(&FK)SPRl zC{FZ2!KSy8@%kB?y4Q<%w6(WCiLPv2De=x2n1U1GXnu?3o^&2ELkipu-Llj*Muw_pnmxI|muY_1j8D z*QIpj92kT4;#uohq`UO3)ZF^p%PHWLvUG2SY<9!#Fn@tufG3}{?_Uj z{?V_m{@HIz``IZCXhB9uNU0#7M z#LSau!^i@cW!{j%rbY7>1#y@OBt$fCy;Eo?qu@5g<-Wl9W)5C|aBjc-^DvdhxjbBF zwzrQTyzlmV-m4FxRj*brX3%Gk4x;Ou_xnzy;G^j^HMLn|j;@bctRD2vuV=uVwy`K)Fw89L{YgettS;Wco=LRj^MMWB1MrH#QN6SkCplZ+FA3N0@?7* zBKm7KR-l{ul)!78M;W)~&a-zSRU=030GYKsmlRfMc;lf7r*}I7_+z}=c2GJ`oMpzEhUaJOA&S5UK@Jx zA20zMxFNcCnlY%9+b9FwBqT4aO<~~Cw7HW|(S|=)DS^O@NlSadzxqEV3K#NBiU+WpqI4X}+`!hak1Xw*NsNP>?DCk}GtaAf zCxiWNsf_o-cj=I)r5?fO@jQoTckg!s%TWef5svGWo$J=y(bw1E)1Yz+ngjO8#`O%! z-Q?r33TdtNWs25TMn?(SeGZqBFGDNw$&=1CxY>BO-#=L3n2VaSNHLIl;n22?yuwPk z?so>rY=CT@muH`*&(4AjZrKRjNhc{;c#O27Jfe-w*(H}5P}{o?;$bOHIaQQ})*0OA z%(0=~Ax2}s#0x0W;2bFW4uO@HT9u&1IiSzr7amNsVDb`jJ?*w3wyGs3l(c?N^%}1% zu792W02em$aC6dxJgp(823>;T(Z>5iI1Kjn(md>qx|6d>(T9`d@zJ3@Ex|K>2EM29 zHk*S_GuEXDU6$_1tIt5UY4ceQnzhb6Ftp8JuTOIt%c)}t>st54fF z`f}!U(KVa*^wa!;JvfV8@LaDK;U9xK<(6?zpHQ0fQ5$0)JSs)2ei=Lrp=YJu9e?iw z1IfGpI(}Jny)2Q2)Vw=8B08J{W1AJ{3=ce~0$g)u2%dY8{>caPm!LJA9$LJ$m%0bfKJi$qoE> zcoBXI)I(Vd&zMx^;UCd#&VX$=+!gpTM*zcUWJqK3!$pqEJL!^Vt-*Pbv)xBeR_krL zzRch!uitv-aZ36@IJ%JxEqW5a|LoVkw_586@w;FA;_AJxy;t@`%6x_bc#K$x&uu58 z?JYRCFZAn;GZZ6&84B<3SFMb`~)P<720#{~W&P`a9`)9WO5Wve{lWd=tJ( zm7R(BGsqGU`qk_Q2m~xZ4nRiPNc2acfC+I4HbQ~I#e*qa!g}{TgvS#eKe)I0qkr@d zt^UBD{C!ULmm_3dM6@Ai{dZ7=OPA9i`~drSrE zp|FQcJw*U+R^RD8#hmbriKd(uKzBp$^J?{rhdY-sx-RBZKlU3e-}`G^`pyH|I$+(g z_qpoM+JiRBR0^*U@JS4R=WZqa!jtO^q3w`uuY*e{jW(<;k%&&>jiCSy+m;wRk1z;s z=^P+6f-mGAB_aw4?BP>mjX~{1^eP#g$C!8_RXn&=(!`zdO3WJ`Qkc{~p8C;sn66n{ z)Yll$7i&AA=^H;;=BywkN*e=(dGT~UHprLI|lgP5XJo3OZN@D7d; zRDI?pH$Ss0#&>vBKY9P$^RSZ$@8v}(iKtKevoUBp1NbUW%S9eZ-iC|IYCJoYrFOU1 zAJt~wiSl+Yj|;tzcd@xg3!)$`#Lzp>HM}L%7a^oKh801;c+T@?GhXXuc0419-H=HFjUMl zg~{jBJdYR66JOD%c@5uN1~Ko!PyakHygVEw82d!3y3QPD4)mSitv_=CD-VBB;xZWD zq&oE5(oZl5xQyKOlnKgHjkRul4&J=u?z1M++$bOJo3`OskKvDX!WrU$N?n6qNuE(! zeP5rVCfZVss$td@DAv!k59i&zJcjS#0pro_`;@pDU}Kz$GLQnUyP@xl?HWBBfBEwc z^HIm2kY%V`H708Zk5u}YsBG=?T&_~mwIe5a3`j;u@Q{YI7q7U^n3hjIIM{{u(Ag`qLRDO523MVIMM_(x)L%-1q7)uLOjN?!=BSU7l9NCFJ0b5RyDHoYQzbSwF zqBMbv?mx}JHR-sQ@3vrh_qRW8zTqdP82;Y^@g~Q>{$?>&Ygs?ep@fS zMFk>ePeN5l64^x+7A0fSr!Gqw%-E5dlgbqgc}wA&lpP9ZYagtqdW&?X@C%<{kn*ao zXJkVHMxTWxC3fmpMLY`esp@j@)HUJMI4F8m3h}Cb*Vo!}#~8|Ra=o&v3tfh3HA%)_ zc4F~ir9V07ockaTT_X3pc~7QKT9pT)IvngFAQ(y4(&gHgXJY#JeUR zKIL;Q+%T{Chc-;OYlA}9z{farpIyW7<1DcL+8^3!zIg9>cy|+Q=?=c9{JGDZhsW3V z@Ws$A6?Vdu&7#$9@0wv*$%hmVHl7WEZ`Y)~z39 zJn?FW_+58K1;tQW>?KciaL7luT0Kx@X1_F&Wt|*gRsE{8g>Wdm#;4Yz5AChrDaARX zdpjLyqrDmT9=HDrKUsbD^pg%`?J%o^vhRIqP^t@-5gH#Vt0Q=pK643| zEdFjqUk=*W2nWvdx_|uP_h;@(+dg^cOGR5s^NnAechKEh&POG?@q?qIPgWm&^x^9K z<5I5TRaY4gXH`pFPmxE)Cm9WL_|el&DMovxmp8!Zj~qmI;ZXeRC?oeeUZ@h`7r*qC z;CV65>C47)l9Lj<-!H1L-}=t}74SRwYp?%mNEb!)D^5PrJIGDRn^uIRnIF*ti4yl= z<#;T~^Wr`<>}6Mox`Nu1@J5S3Gy6gDi=B1+qrd;B=5w_4J03X2KmYA=kXn)_ej22T&`IXXQ5mh6O_bhnfp$EQQZH} z+L`rQnqGJP-SggO9;)ip(ACvtx4S(X8{0|b1PdZZVTb~XqC^oNfn0FK1>7L<1(3)U za19a(lR^*@Ae4}VI0c)3f>LE3B?3HvHzWX1U=ol}iAKu{^JNEpj|j2ZHf0K!*kn;ppARD;EwkAImU zzz*iruk=E7L`kYN3b2H#kTGRC)5i(fT8nMn{IycasH6=*CnZQ14 z*?VJUr`I#e>8$cpvbz93v=*n}A}tPoxRjlH+J@O@f1x5||)8HJCGpY0#x} zP}G*XvjD)b^rZ)8sZIEvtWz*ZERwEi0@nstO!_>}UJ>R>ENWXlIFtpsowJ#C)*RHu z5z7y7XOm_R31_b{wMf%WMwio+@p?U_c93A&;G&X|TDfkzNdL?`W8hgvn|U)u`e!^! zzQUG!mBKtJ&b~@{3vTjdU;N{8aiMU(T+%TcB6mK5dveX^2GOK|4c}c)-?a?bQe-tg z2upEE8A>U(i#gFfTMpO2bcPAsY+~o;^9`&=n!zi0iCV`*Z5yjb7j2?81VD zlt$Qc-7(Oo=F%zj-*t$a>vG*|n!j~LAlN{O`APOz2{vbZV3OK(9k{ic z%oF$m+@%OK3`$&vV@Lx6rhoP-+RZCO(AHerkX0laujN@DfWK zdjcn1+-4q2--3=RTEUL03xfBv#z>uGf^5y)g$q|_M-Q1V*1+>;9Fzf9IyoW}{S16o zvt-Y+7d3~Qj5L7nbV*DYQL0aRB{YP8pRrrQXMhRcn8^)X4T{7qBxmV?+2UO*v@n;a zPxj#p9-?5y4E8c@JJ$`Q?&BmY2y5^X94BxC7|2iHRnS0j%|CgK&kjz|kl%!PWzgQw|*%JUxb1V}%9}hSUpf%nj$*XRVPvk=drmI>sRz59CR3 zTex6iRRSkG#@U*x$EQb(Q656SRA<@Q+oKP@L!&I8B){1p*>#Qh?Dp0TCV}i?5Udn| z?ZPBL+k%^!U?CbHCbyau6{5^S)=>pr1YZ}DUlv|&LK}jSf zxAv&{+?i-BFC>_a#Smm@W;q(LDM8mHZV_RHofWckRWsH|7d)Mw@U*B1^MaX{5;FFO z5C#KiLja5`(9F?PXu|x|1vir&i{NiDc^R1-T7l6s>XT&#OchXPWakJYV{m%RYYg)@ zFkDlEF-8WKx>(Mj1yLNL7#OFdv44dPxWac~R9@5SkPZ2SWD0|KHBCX6fDg$R60m?t z)F${P5ky9u2Om=#OjDvD_i)bcp$I2jGO=bvHl}7L6CE@)3_K73!aW>NC>hf{%USji zgiCsmEGYI1P0^+bA8+m-#;ae}`G?Z5rsA+nYfgZ*FjT;k7&E=xCo`0oDZH8SSIP|8 zuq(vWXx}}V7y=$D{&L2z*-Df}0M#^Hvi*vpBE>FD8?2!3nBp0wElSKa@kE)V1K(9d zWd<>A0w;&9uqIBGJ<4kHTxcHGoP+eoogy7|7&2>J3z{0m4^4B%94Led02!c4Zd?Fx zS@-6WrYb}myy+gG7us{IwBfiItHAO(=V71e5T36K#Vo@v!ko&0La}4>5CfZvvc&+F zW=hB4t7KP$$Bb=6OiHRD*mGRG&_G}jlPT~Le*RCIqY$IXn+4$_V{%=<_tZlaRy41$ z>ljq-glp;1bz!~;dQm31R{==aIX6#`N*!x%rU%9WPFyMh-dBVh*9QhnEQ!AH^uc>* zHqW_U2ICt*ZE-Zp14!0P4J`e07S)rfJnlgh?z19y}}9iGI47HGg8LF zsX!db*lZ(M3ya}>+OvGZ9cbGg>-!>&LK6^tX45yK;Fw*yvWjw}!ooNmQy|T_Wu#PO09YrZfMrkwd45Az_U~-Lg*Xd-%Kx`V`lKYb1@g z_G=OXxKG7Zp~=AOISN%VyK0yzql*F!-nUx2$Q1}(C>0*T>Nvz(=OL{T-9723KRAUZ zQ706fvrO9@8nuO)+B%Xd0Um)YEmE0jqOf*x-On+&00Gt%bF9x~0yM~a=rTA_ZR6P< zZtXh4zA-kQ3MJl4hooIGSTGmyOK?~=PGR2far!TURs~M?_ksyYPp07a`6<;Yz%A>* zH!x`z`3IV;UZrz5-a|~qIM~xT0%y)AWmjQg=l48>Q6_pw0 zlEaQzdv2pQm0!jPKN?`hR}%7g<~FcD=dh|Rl0=PX3ta*hw>ajvgUjv=W5^*cEt3VT zda=Dt=}53HFcz#(MrfV+ zZEft)A4Y?N$E*?ZB5;~RS0+>z7)=hU?e1+J1s@nav9BP6z|RF47|cYVg9IW^VVFfI z%69<4ydefySa{Z#)^nMP;*X21g?%00%_fBNOCP^gec@|A%?1kQ=YCN>t1gxKQ(u60 z{;)cG^Vh3azy6KtwKoUVqr*;h#HRPiwj+@Jg3Le(xf(-L6o($(Mo_VjuZA$@JGlHLg{&oW3m}*{S7Gm*V{8h^M zyiEg+TwAfkn%Why^b0Yvp%%Xd*vxDa&0gePpDh8R)myy3!*?=FfES`^h|NBm+@RH` zb*v=_;Ol93IZoju(UBV}mia2{si5m`G8lXIF2cY0_6naV2)sDubSsE0S0!9mDAG?F z1wyNFMj{rWf)D}I4f+r{Jp{%KmDbIORe*JygDY$tBzx|}ch6N7meMzZ6qL`tExOzS zSFj;KAAt*0Q>8~=_KL?D!rXlsFfW+`Iblj{LbZLgcuz3vz}zhLaYj(0xMx7;IoV`A zPw{|SnfRe_<7b)l1-NCY0;2TTaOG&fB_#B^3@?QN;|_#h1Y54JR4>pD1FId7b(7J= zO1}YJ+#R5&meU??>Xn_ z8O#UkN#8(q1Yd=nF$6{8hv%Zk1A%EHoZLctYT(zWIoS&;yB{!k|h(g34)MKd}2T z*u@$se8x{sCo0?7!#tCC1DJ!Jyu>lEo{8;J(DVf|6)Q0PNP8fZGFUnf6An{G7_cMh zQT)2J;XMlJTHSmpxU$35S4umMHDlsFa4p5r!8bGU6l!JK{xK=q(on!Ik(r5?w5>3l zx^&DzywuX~ddnzM`1*40-fR4ef^VU*Ov$YP%~FW)m3Rs64{fy#{+v&#t6FxZaW1|R zFb`g9{qz3rx+ga|W-OfwC3<9vMiV?!FiE?Yf{Z&zfkq)FLfj>7cbq0fDU3}Jzv7lfZ>au^`{nYCkVu91u9+?}WQ#oFp` z5sI8YSSyTynV>K!OG~Is-*KyCh+k?L;=38R7G@t2JE#JzN5}}74qiD07N22LWQ+-7 zX9Vs8J!xZvpZ17gVepwMbl`$YmWdkJt5G5h^u3_8&KU}~{72A^nA3oL*uqOZhk?)9 z2o})ECe}p91$*J$y1M0%6bO1?z4$@v0Cz%dOQf9;9H~ zYVb_K7uvdZ7mo#APU!NK17z4KKoY=nk8tA6Mj|jOZ0ecOT0IsnKo3ALPWP+(h3~?X z4BuD|c{h$t3vEUFh+-@gG=%#SSAa0rqQudhxWoB^vM_odjaediJf0r}ROw@%cm;uU zF?fcR_%Z$W@xl_{(BpVnIm2-z6D0K;W&jA zV5?R*>iY5FY4wN_Z_WBH2Lm7n0T;btDi#`6#^CnofS5Oe`DYlh6sC;`X~0kW7=sM( z_jv6bO)U*Z>D73VWE+H0P>k_27y8U$GQvcU1#@>!&e3;IPSi7*#7jW)@y~n)k)TZl zjvFbZXT^%6gQtu~<%Bjl2HSWkX`ZoqdKns2bCG@ti>+-BM50#c!4bTSxTMW)b@TRf z)y=DW*>B1Q7Z;}_&7l;67d!$!N~eW6>p>sZNwXf?c7```d!sg7`Ri3x zVgWcY5YNXtNe62fC_5NS*NH#8di7eOS5#ft4uwtot$9*w!01 z=4gDnUp04lj{jsx!`j?=$gJvV>r7agnKEhFGN)$IJ(al^eGR_D+S# z0yGz|i*RKT@+()+%t#7Q!1ojj1KMSTigpSZBCCo*k6`yJ92}2T73dU*+TEVnl zS$0LG#c~mG*R!$3T^Xir8PcY3;>*Bq-L2Bt+2R<*R@@hlEQAl!W#czOXeCOFk{es7 zKh9Q2tl)tez?fyWWu|5RewWFtpq$~$f)L_fw@s|~8!$B4SRrYw2NKPc@l8;EE5dea z9BVAY&hO5j&pGB27wW}1fs6xc0iNPMoKS*`HXTt7c$jrnr*uGhPR5BK2b>jv4Dqqe zWF8^7=uSR99pn@>VQ3xL0mOI#!8Uu*>?{@USWTI;g4sFDLPbC(qA$nQ6Yde5=_gE; zK7eH4E)kW{`4`u^cQZwkFiN=?1!~%6{;?*}#?Y)lTJSG)h&ecq2#hRX@Y-g$p)gPB zl6@k$fg=)hpHpBgN`o!g77yjmJ7oq6LX1;s5>yO6hKrWIBgo27p$|*kNvRD+Cao!L zvgSN5UOS*9GC1Ghc46#*3Ro0DR$8MH<>V6b1C3n@#3?mY5HwaQZ*okI)d4QOmDc~X zW~5g=wq4jLY>WtbDCLNF|? zb3q8>#gI@<3=gT32<(gTK_M7iVD1YtecK#WtfF2+*)ULjJjc6hXbl8tFnU7x3-=>b zo7Cn>g$xOAW+^RpL8(3{M!{RIN7>{K;UTw2MB|!kDej2x++>|p6dCtN1XN@IS2hJc zJyKooEBA$_;vJ}Sz`_g!}k8bk>;Y6VcGh znDwQz$d``K`k|iEsn99TiPu7YfwB@BCe1+MQE?lvY1VePz-5H@7e7kk+ttvC8qpr| zn6lDNr_N7zkcp(TRu$O1dK+)e?ZAu; z6{IyJxXW5}z{eQ}|E!~=Ou(Zld&qKG-}&GFHMk&VtP#TAqb{hW$25SqNahG3!`r)H zEl{RpHZr1ukj}1TWibtY3xmv*L~s@hN}v@$4}hT;ZUC=qHPPj##(IkkgrE50OVw9? z_HS1!yEokfE;;}A@53+2*N0yKboNg5+Hd`f>h145s@{0ms*Wx=w;IG8f+!OvK8E-h zp5UT2L0lPxRy+&YYMFC0I(`Nlmys4U6#yBxr&L@=GxQwl4QiZVBgZHHd=Vmda zWCIt|NABJww#xV`7|RESTJHbArT>>NG-lbFQohGf9|eEm!KMrg_T6ItauX2VH3H4f zi4&VG9^<~;tDZm@&#_py7aP^X)3;a?ge%}tGJ~)(OQ2?CytE%M_tR=OCyi}hws}mO zH~4J33M_t7SYNVnugmJ5BCQX zh<}&lkav4f)dYtwm4Y%~pAinu&|kBKF2~_j;jA@-_2ttEg8aofF|QE*5+~2E$*hAY zvBnCI;;q(w=n{XW1+7H7>K3|K$HmfHzpS?<5s``NM%L0#S3XF4EjE~bO5ESL<) zL4SIr7&G^42(81ERJIgw=$yV)lOW=(heU8hd7)0SlC@CM4);MyBjEJUQ9hVhpL%s# zih5WBFNm)-M&In-5i!wj+8Wlbl$2pUTF_^@m6h;LO#986UXhn&+mM_bAWm%w6 zu~56VtdaD~myE=L>jiQQ89(0xZ=Up;zw2p`z~+9h<=#s?%{|;pQ_fMsjzp5SRPsvp zsrcsjfVXQcKJeMr?ZdDRK*3`QK0M10#jC)&)3Ea`f-CFDoE4;L5&G^txS6lgRKcJ% znFZyRLMNS}FZZ|M4~A>?SsQWE{uK(X0oTL(Ws!LWBQ0BoL7Mvz%i!`o$jYNU=eXQ` z$<}l{2*Hjf%u!e|FL{MIBGOj}=|6X;12sI!&>N5Z9BLHE9Wp%_KW*zSk9C-)b2L5c zmhhv%4LTltRybf3v{iBfiZJik#1H$FX-i+q{-$^EOb>AIJbR?92Loq)p+msb;`}wj z*sL9@Lglm8QViXPvguLrR3_{uX}h7)%%bssx|Cii?wCbMNs$fK7Z{i+n;a2_WhQG5 zOyINeys`LWnQM50AyaDOaXh7*)CJa4Pe80u>SK70`HCDi1ser|5s1!^5!4Z$sG(`E z;lET?m}|H}(5X=##1W1tt7XnlO(l#q?jhLBrox&vCLXy-CbKoWJ&n#oKCIV0hHe?A zF_r$&pJ9+iA$2d{t>9EZX_$V>I(ybVpEzsq%9maxal`OMkOLU1A}U$C2QW22cjwSL z%uK|cQ?f&3qz}FJHTOV+cxR4V-@E!8^Ss6%>quCHha)r^Ym9}lP9L*pHk2%(3xrFd zP*@VkI?)rorog;M-NQDea`oIq)8x5{`FQ`#vn#vB?&Kkf;wyx@XHLmK5j-oZh0 z7gShkENJL3_^U@jkf>-$Bg80i%Mea+%l(yltvq8yLX==KEUhc6C|1MCztp^onPg5b3AncCjCzjT$y4)Is%BJdZ$LMJA9oG!dG|j*UkA zlkS1-`ONMNkC5j@=Ex!`MS*9~LHG3(e44QC(*MnMYsr_~hynO~#1YF2WRBZ=9}5no zY|-ctnU;z!@O~T-)m4ra-bQ}z;W-el=NAXn;ltOFPx17Qk6C9Gg=V#Vb%zXWjI+oz zW@f*0?~TCu>eZ`c6MMQCK)~3k?4Y7mLq1x#4#72$M-U3Q5Cp#G#10t~FPU4CYMh10 zwo=P_4*$@s9t+O5+Z^Qo+%hfu`3%NXbhbB2W$gkpg}yfSH=cu&cdr%cM$|c zP-_V)ysL@&%*`7&QmgUC^*ccbH92TzMkyTJ9P21CS8rUSkn1%NK|)tvZmj=ruL%E# zpptbxjp+CtT$6npSxVEO7l6e+ShN7waJ|!HI5SlA%mCJUn?D0*Y%72B&+? z`6}4W5GL7))d`z&6@uEUozc4NMFMV^3xb`@2Gr)Hd%k03fsjWg34wPU_gH$;?wS0U z4b+Em885Eo%a=g`hO?a1C^n3eU|v8QZVH*EU6$kW&wC|yLfWW$@lFgtP047)#3Hnn z#gI_R>fwqL@dJmNG69Ulcej6Em3|jL{@xcTKFESmqQ0g_GSE;h3>O zfF+tHRk33eHlZb@8^QsRNRPzX1IsC%6~k~Wt3ao;NBldMlLf?S&h;+%MgSZm^f~v6 zp#dlWFqkod^#d$tE`wgPiAEC1{Ld~RF0|9f-RY5laUZg-mJ2y1v1d-Kt#}J+ z!pOh{03pN3=|__t=D1E`AHQ?t?|6a*3xUR^k%=^6c6x-I%1DbSW3XWP(6Amo9Nu%O zFiB`HNQ|Uq_LuE*JBZ+SnWqF=%O!N!X6S(50%LFq_*&vZ;aXxTeNb611_A|3#$bEg zA1f30sl2%sIj)-)V{K2%g_)|rIoB`??o;^lPwcgSnkvMNdkia!2+NqHS%t~Cmr9=s zvN9-83L^pD7r)CiMRdmPOBcSdmmzq+IOB`a6ai1BMW66BU?xovub2-sCV0jkP#93) zwSRHhcZE>zGlWv*nM?VuLL>g=9u*~ibM3U~0M-w*9;rAljdmS8fCDy<3{r#;6=LC-o#puS9{OErNZ{rh-@`!YoAHT9 zP9V8O4AHUMcM8e#%!Gp7!YG0^dm&yNpMw#)o26tvW1Z$U?(QfUd=~Z!{^EF&iJ|#E zBV)9|@FqSQs|$7o7BXWARj%=|jF~oQUtAS_f>EiDsUZmJxdInaQjATe=03_2ebc9& z?-i_r3c0We<|HFWz$CUf;TkXq@m68-${K|bST~P{RY>Y{&GEJI6tke!`I%|K2-B3T z^MtVb{inHC;m_nz*T=AF-nYq;Yog3OdKnx+ZidOILAX~$DEjSp5RfsWXRiHxp{xT=s} zgN`lg&oK>ZCpInC{uaPc2{DX=c7Ubp$Ts9z_9xHDXME+jb`Mcd*zg2GU~-PtdQOOx z2MAcM$X&BW{+&sUO$-_u@@nv{>6GM0ESMYvM?c5F8hQg?8FR#W8@lenGxD?~$wE&% zgfes+C==kfmhlC(nwx|`E(l3tDX~)P&)y^BwoSlgM8ywm?nUksfpN|C5wj+DYJ zaHUNRFt#oI5N?LD6q=@Y0xAn`z^ZtO?}hWF6c~75tM0T?%h<3rLgBz?4WAw0(Pz@; z!AU0)isw4PqgYbtN!bUH2LuiX&m-(T-~`PuX}t?xq4E&oMkWAWn}*fvIZA|4`8_fbTelyh2zPa5C90QWTRcAzDw<`|xoXTTh2 zn2EVtgz`4pSGY$-i2cUz9y9#rTW{h`!P|$TYu-b~3p`mU)|Iv8w-qesYm8ejj53vS z?+9FWeoQ8~rx8#^Pz4nT;3)y-{7KhUybZnULx;^xaL=w2NiZA{0|Cw|t$OIiA-M~i zzx|Vo>VWW{qbD6a9M|Fn z(Ocn(ZT>xZ`hX*vJ?RfPOZT9@o)Xr>1LqOY7DrjohdDR$4p0z;VBjv4FOz-BOZ1cJ zAuvG%wYqtqNF0rr4Q4`)LW*vW9-e^D-`S~t@@Ia!dg;qwWkCvqEo7qnKUh>6=l{!V zKe&(&v)Sv_gWvnt)vJH_ZuPx=veDguYz#BUXCga@RIHeW2yg-rM9>Y!>>^ZH^V349 zEi4L-SHP}86gLg|VS%q)LBrbI!NuZ{z*>%3U=bT3$E1&j-i+s;ueLZsPv-jn9N4-FMj?Hxf!RgoavBpJ>xOVhc{uJ1$Pq}zd zJ+2*7)dnP1P@qjKeaOf|6u}~s;k#tHV zM27F`+qB9@1V#QSXgEei)KNBqLOYud2I8^@ax#4x67%-DarEMC!Hr^}&>|q7h_6T} zSk~AaoSc$VkLaN#a|Fj_tUUyT0rfSt6d3x|X~Uc#NRGyFSfRsW(~B8N&cc9S9Jdfq zKoP1|xUiP#l=xzW_5tqK1C+NNNUkE6g_;%EF6d`Y?7j(U&=6pXFa_~mz&smR=o0Tb zUx$FqtDC!fJW0`KEWDi-gkrdj^$bg)Ztzrt+TuA>N{KvkAy5|LfklHbk&)F;xUOI! z&C_zL8&~1ecl$FmNJTq>KjYCN;u?EOiyJVi_Az(=(?B$Rs9!#^@UbJPuc9L6E zIBc6XXv;nn?CFW`GET?Nc$iaRtjwD|?lqs6UdSBcU0{8*R%-}ZT85n$m*b{&%pp6p zRH{tSwimMgFeZd7<1xjDSa@M3TA)Bt=E+@f;}TC?+R>QekA|cLZ<*}UI1`17hr~U1 ztCl+x(xipX-FZ0y6|NO%okou{DrATYDjX_mhPim$wlH>GY*U!1ObY;8u#FOaRoVhKJuKJ6o_peB7i(ZVY2c)S>=}$%eX));cBWwoQZ*({e`Q5Hb1#q}iGf71dPe0B zb3?>Y!{-!M^wuq)k%k~y%wG?wf?xys+#t+vj$$5*W8lfN?g7@tKRri1LIw;~>UP$d zhK4O@3KOu<_>j;&kLf;|VN_xcd#x9$8Hb@nWrp^4!3FoTJk#O_hH%UYD~zXvYbI8w zQWD-y>ufh$dkDR(sdFa`(&NW#(1sv8tr}bJVG+_a1))YiyAwd>o5cipQSv&&bk$ec8lAl0#e7Q%G2pweQleVO7C8=I747dvJs}#8hhlEA15}IR?4v|f|>E2ix zCNRT?e!3aQ@G%p(t(~k#kSDSSEReFxzCJ_#Nys7?+?=8u({GpgPdTZoUfQ%9e8kW%~MF-mnf5ZAm~rrv`r0DJ?YN+mEOQG=e8<_3x9LvAKy+7CNK{uVoJ7&dr2F ztq}ijL5B|b`gH%T>Tv%Z#>6-=*18@NJ89F9s!sb)?gPK$c=2|3ND!vx{qEK47`^c% zFuvwCasqUNNdtSH2R~vcQfsmLO58%3B3n-C#mfyC;hL`kw;+Bz1Q-azb-$&_bW34ps!W;zy5ElSAOqa_1Yr@6g1Q+OyNRT z5QNLB7w$!^m({Oa*{*Ur#Lm?#WLa-lySwBl zxX>sTE8J_bSMvfo#AGEd)?xlhLw*0Sf9Pj9-SXw<0?*}@@~#+&H8x+ohsCKlUN86R z=6b-NtnUf$mI6J*R{tHYk}Qoy=ZN@gU8)xdh913VX1%Ai8D8XzsqtsbAjAg?z*K9! zMQAPHzaVxLs|zXtL{#lr3%29Yv33kQClMJUs8`?&rD0z;Aw%@fwL3aGrB9HajU|3; z(uc{wEriguI;F5s9QZ~2WHyQ{1(b#K_)TR-;Ce+?C2=d?Sv`K0v=bYbe{34s%GlT> z9viHkr$uQoEo7cJr6nRu#pZ2e1p*Ktw=m~wgbMHmLZlGn23OcJw!Q|_0K3?CvK!QLG0Wf)%mNTE?-2g`;3}N9%kYB)L#;gxp z1U|P`owZ?n%%;Oy5TRb3AuP1(TZH~F2JqV0Zi$?gU1TI4_q*VL0!m&`;Fz$j#Vzi! z!V3ogkf>XmT1t<(Xq}o-@>6S?vAwL*6viD0fCD9z#Km)tymQT$u&~dCRT@}lz7|0N zs_I@dNiiM(2$|Meamx8>sVYJ$_^Ci=8~mt+T>)8~)>0MakZ;Zi&DU;c}-F=nypF7nau8S$yU#@kJ1MFpKDp0OE?Ey;u6m=YjCQ-+NME0e-5rix#G)6 zMUDCAF8-&*G_--al*(=*3+-@+_o#3b1q1I4r%XDQ+7P%Be_sc`rqC1x1ow%SXmLQi zcRrvdB?z%#vW`tG$_x_W1l+=stOahKc{)yUPWbb?LQ)T!BuYbK@iqijDtXceuP_2JEQ>> z9XZqlnzSJA!W=D?o|#o#q!|huDhkp`l!#XSAmKscqNj>^n1i^f0m6_WLyB5>F|4eT z!ySNK=INe}a77;p8=}|+Fz2|GSi);oP2wf*n?QHvAZ@I!U97Igjkg)++8XmhW@@6e zHGrWsp@)?p`H*oFrh+_S7><=%T*G}5yYI{iO@an`y!D81kvWAGQfx#VlYW&>^QMC3 zeiK&G2Q&@lC0_-fz+ZDcQl8?EOa3d3sBj2$OIsTh=<4vWnO%j+lHg;mvaIOl$J@(L;mKT+`tS01J`H5L&T?nVz+^U|*JCIa%*HJE!6Jsa>W-4Nai<=X4O7$KfnZ;s; z$+3{DI@SSUcmoX3@|)?*uxeonkooIDhou-fkixML3bl6)#T~rU%cAV6T(F?bZIg1O zCxiwyP=1?jMnve~otN%bw>S>lF;DQ|J-Ty5IEfpu+Vm(g%MIl{k6?hz3K zDjF*YELUInXm#c0C*UzCJ8T<0yc)GmpFWCA%rHd+_PDuEDDGAu5UE8vvB!7lHw)_L z!U(LBFGJF8WHP4J4|GHBz-+E=w5l(?{E_Obzwisy=H3fwpOzTl4|^@ImUp{cUKyVM zLG{k>{d)DCZ#}EtI$#5_xC=CEUAQtmwHFgYbob2ov!ht`Sg?`|gH{7jQlk}Y_tXf( zdbUZ3P_Vyp6``MGUaJ9lj&%bU6wqVt#0C=?_8XXKWZ_BZ+a!$Yoah@xUU1nm{B14PJaWFR6fn_GV`*M!j zYhWn!2#Le4y+xlW9||ix?`bs+kb6R{Tbz_wlt?~K{48!uh{zagni=t0vW{_s+olO= zOX4*n9^Y9hxVdTEnD!c+hACNj!lWx>;&fY@2)lqom>9dpM)D}&79~I}?Ny5)p}65J zwBV8Vfcuf${J^E5#{*0I5y^AdkQsmKGtnj15OA>$qZ&^j03HQOBNs3awk7VjR`ttU z(|=i2B^E%7ia9FQ;8!s7<}m_;$2M!RN-0xXn{eN`RvDN-d9pA^lvYB5^5?#lH(3P; zf-%RF;)a$W8@q~r!DWRD?xB6-;uV>Vml$B7ka2rT3j`pHS+}Z*Mh#-Y6u1?PWEv7Y zr4r#0i@B|FNnyp8f`S(e)tInu;F%<=bkKzgK`8~~XZ!D91w->Ezme6!xSz=E`${z}^aBPmf&>R_mXb&*R zu-FSNe)FCP4RoL|p9%6oQPTecJ%m)zKkeB8<8aR6R9QP80 zxk2V*iC6`3*Ggs5+=1W@ye#d2L*lh-n4B?IaEfAZVJTcaN(hi`j@!&Jzdg7Nw4+TC zgjIx9m4YtD2o)`1=gCKN6#E(dT@b@6h{`zVKSBq1hXB5@i&7wMP-1f~z-tr1o~8%kwXR+pL4tV>)*+RJ=hS2pz!6wVAtL zfSCfbhKUA3xw!-j_on*^59^FPKEwGGiUG5W%I|nDArnQJyDV}L9B|)2C!{m*D21z> z9>yLq*KBTd0`r`BoHA{Ba+dM{BgRAEDNhLg{PX-4E6UaCPq~*EZ^Djt2vgReQbU25 z;z=G7K8aD5_C-PP1KKQ%R6gQaV;;iNbsd8P1CGsY<86fr;pL!n0AG%PhWIS(NZ-So0J$D(G=m;SwJ}ANG>o zT@^R>BHw!`1x@LLQXlu!_zXa@2gewzrmTqqYLrLTAk>)s?)qtTs9}_l*Y((Y=j2Ds z2saisp1}6x$+Pr5*80u*tW!pPg4aqf^X3*j2rrpO-bte^H*biMbXWeY0_iwcRyA(A z_dzJaUYwxb5XZCh3g~4v%&1MN?k4h?a;G7<@|rPWcX~|q8fbO*sNXdzOLYkOF=pTD zSwli=JgF;l1o!)RuC4E{QABz0xtq745rnDIPYGGY({N^3F*1yXv5d;FV!bTw>QkTo z6oNFd_pJB#zV|BOHBZxja!=uxwj<4v!#l1Rz{GFXY648;Mn7{>0~Rd1J5$84B=Gdd&J0HrbO$QaF6I3=uT1l+FO zejfP*mr3HuSQNpLtaX{6L^RquQ(}VI%xak|#0t!BgRq;A?5AO-HdXF9CUJ%Um_#1N)Wo83@bm-=bcu6v zEX4vSj{q^Wtgtb3+nWPma+;f3*SHMn3Wq9ea_00JE?$p}yT$l-G_A1-6+pBAB@{@aL~A*)i=P!VB$85~Q;=nfWon(olKSETbp*sW z*(ZHF5*UX~vi@U;))5F}xvv>+Gp#t*%@ytvK84#Z8o>?+cA_%{bD42y$2ot>>5JNUpjt%9z;YSLNdQ>_c zU7mo0O%%KV(V>Tz#8BEObFf_q0}0;|I3O$%FB!FrREr{0LWsJChs!uvR4o~r_yCLn zc{Ws+YizB!Roh+4lL5gI)_gadOxAImB**_5F9c1O0$Bsjyg%X~8~U-ve-7c3bEpVH zh-4ZwEctOsnil{QYw^w&fWh8Ez=613@I@v!<=pUdDiINKRE|;As;K3M_(T~d+yb@` z96meaI!7aG6%F%Yue4w)v7903t1QK0>KcTJDHs*2pTwCPAb45=5jKFA5ZjE#~KBt$}qc|-@aRfb zfHVor`(*T5!dwqVm@%8|YTMFIUq2A0`5a;0cKn0kruq@>Xt_$vy>0TF>lJk1yUB0P z-PmLIO$0A#GK16e>HOk((BlKG^PoEqk3KRC%eT`yl+3%$oY9D(Gz@)hfumR0Zz`sFx^ zw?$Yi$4o!oE42b29ic+b41v2zn9hXrXcQ(C$l6#epFFXBEt(^&j8++8?6g0I?igg= zCDdewp@MIORglo=!DHZ;{X*EN+y$S)t2Q_6W`(LFE$F`n0~Cu17E_NC&HiHk1vj9_ z!jtETw1Lt=->7)fBGhvn8b#kKb-+}?DcIKG>&(=E8zn!9HFkp)m5#Yu zikR=hUYPpkny8SuW**on^=Fd&4o*quR3w;#bkxuqmt&%LoBeD^7E%o1PsE{vkKzkA z;lZIcJ2+oQU~~@djS^ynV&L9kejP%jCJ1|ao6gUmj}};1gC0PmRn8jZO^i=9A4roB zln|UPFM9@#o)aOki;-nYJayx^B#|l%D<~T7B(+dERT*&pWuA^1UJwSv9(JEG{_tz2 zY+`cja;=<$G^7q7^+v>S0j+Vbpb_~C^Fi^FPAS~`;$HjFc+n=L!=DsKb%Hx0;hkKO ziAm>pWDz8NxhJ!ics6-B!FSgMZvyk#;JlqfA`HYKt0i$e&od_W;Re+m8yw{3Ayfxv zoD&3owJ-wr525?WBMdqCXZyyc+r~ODQ>m*e&$nQj}2V zxzrzMBNak?!y2MrY_@DM#%pX)wzV{!hH(vR6_4+$xkK>*@$7SS8x8&*R zyn6THUJ_BiFfW&x@BZH(GqNxt;0Rn=%uNf=2pl6f96XW8XblI35R3+~da^7> zDDfd} z1Eh?^!Y!CA`5g%KdfqNhV2q5tt*3$$1hw9opgbR~u&KuoQ!9oP0_(hu-5g?BJU=Dg zR7(vTKk?^)K6TIGSwoUbeaV<(&0y|Uf1xA61&E5!xD<|Lysaft89ZwrgJT1&iqnb$h>n+_?M7B<2ND7Jnb`K_THOir%%>i1ysD2;oGn8 z?gNRWp3Q_C@PJ;6^yR~}=$Kt6X@)VzGH7C%`1_VsVV#=oQC=7K_0lrUb_k2H79F+XFMF>_5hA zdkU#~1Ih`(&aBW3*EWJU01mTO1{x;Z#|x)$3I1uAkVy+E z`-rucJBva?+w5=mS?Ie~&+Jp)#1!x&R(!wFsqGXvJ7vbr_(6fdP@Gk$ReALu@gn;X z+%1Z&yVtRnG0TK$E_2Bb^yhb(xysIjxa-A`QG%(uJE1Hjt^VwEi`0F94W6L7dk7{Gi-Yc7T=bEyagXmBu*ArVoK2^Vlp zEMF=v8gpJBu)bLCx9L-UT!$AAG-#aL55hTg$K0JZJP=t3dc#=A{K%s7K9!54%va4hr$FlbQl9_gooa-008oK|c|4k%`s)f^e^deetv z2OYCm;+~;Zi#B@&9cQ5e0f9HaAvnR9-QQ(=&|b&NzX63rT*$fQ5rNmh(#vU58VSE- z-IagF(3s=N8JrNK5&O$|l@Kw7SrtyMF&JeuOK`@BBLSy5hbZn81*_~O_nCV|MQut& zwB=qBP%04Oqx?%tdiEImUA~dM#`5YZc5Edy4!Ck(i@heDT>zKx$*f<2w*&FhGOS30 zsO@^us1Ch+;UTH7{14tR##l_ z9w*gPbQ=Ys!Fmi4FNN>->MWLP)E0^u8vHcDxa16w_#W|Ktu2M@tb4IbbnBjS%7PYjjE7D8#EkU#^b z?7b4=2@e2{hD=$ttHZt(RvP=vZ#_Pyq$~zfUkdvk>eoftcOI_!Du&}$LnYiWT@40= zlcO+PxkkRJ221eFsTHq9y?XBUEy4$>`UkMjNN}F@4>_X<0iVR?dhIK}`pbwemvsSv zsVWJcLQ4MLyhSPcovYRM))nSX)Cjn>v2_(ozsiJ$2`Z1#=8!|ej!!t3h;YIIp#g^f zt)ZBNA4=npAFWb%y#J`W&tIRgp)twPMvXYq3)agDnC2c-tN-NuAF{#A4wWPMGK0k6 zdgCYLa$>GhrYA(G=3IkIzO=Vf{rFFPrTW}Y{wz-@sfcJk&!1al`SMbh-aE=Gm-slJ zzRO_%|EBuxH@{!KO)|h>hAEj9)eUh@z}J(*VfEG<4_Ha52FL?8h(Yd(>?X#YI&oaK z)}t{EQY*7yHc`3ETE2!4X`Sq<=kDC8uH7VSV(->+7!1H|=GXl1z3WbaOI|f_P$@%0 z_j@mAuzX!UyPQ-md+q)AEbskc@7`CqYw1zgkvUbpi9zR>{wZ)w4XEQY>RoYK)DY8# zf$r|ErI}35XZDlj6+FR5GmvM{^DvlyIk0?H)yFjeT1QT$60mYTln=xF9 zaf5ee53>g*NCQHUL#}C)pkJA#T3qJLFYG(HpAyj+lTQDF&q%s*0zs=8-Rx=c#*=b1 z(Wp^nb3q+6AC*`$VFUN!y9YvOf{W=>>W*7L%0?GJOcZnNkQn*D>!9193w|UXf@?|$ zN~1mxuz2yA=QAKACa*1}U^x(~3-_?5iUVwVrC-hS#^e;q4ayxjI!D)*6@W^sAy`HU zCLH=8&58Sf@fC@euI!h#UFVotY1?c^gEiw?)P=!kZp<1;ZRLcKJRLgMjiqW^G6Snc zq*7@k*HV*`{+*Zq6yYqnqq*5Q33rw>%XM&0P_)oluNe==k3JHZ^D%29zKGufS6og> z2USf>b$J~@=LPPlM^LFH1)xUkhcsa+f zBpVtIOcMveJ!&oDH!abI5}5mt{ph{6#M=x@g7-;Trbv?R=rVU{16boa+LrKUeiOO5 zFys|i4NnyOL9*ap@fzzMkEo$AX5cDK07{YIY^wopFi_g{R4S3_HDw zkwXqwDp3U96rwEo;cs$F0zC2@fDtWT&>2plb>J|c-~$e-~e z;!Da8u?DV_`^M)=2`JZ5`scW$=H+GVjTwqeDcO_8#CAC`Nt3)TY#@TG8k`RG=Q(Kt z{TP$t`o{tT51>9eZQ`cEN+nM>-YRsEYwkOS0o>pq;DdBY3xIW6l_tff#Am@fq?HMD z&kiVB@6p9^)uG%_d$f%Nh!qDI8c*YX37rO3##6F@j10|$f6+h~duW0*gU>lo1WT40 zuLr!eE@c=Ag5whlg<@O+v>|o151&;#@rrxOKRWO+?gl1Y5SQh<+{QD`oj=~i8QXB5 z=SO2xZI@?ahGsq6)M;I=`x<=ZE`05c{d?6ZRw_4*7MxgKh?PA(K1@dX96at5yW;*5 zzk9gKrnsmY$X!}b)^vBF;kgBLOSa)fbh4i;X&k8u^W(VR^*Xp#-9q^+jn{aSvD#Zepp_qnFK%s**&T-Lxv4a>GR zkP$bKoTYQd4;e~uLE>rkY(FxjICgx5bi{g}<4#Z$5T2HrlU2zhJ&;7JswdLIw2@s| zMRAk1v;=uLi=K&U1fQd zdspi>6Jvi_8HZZ!xFJqX&;n>r*Gm@*xO5}P!8&+?n3W<7;hCYeji#Iw_i!H>8F-`R zZSD{nz!;hatQJ)o!Y45Xc%qz#28d+I0!Dq=A~R!;;OWnNC zPQkiwbCoI>-iVFoKovZC1h^VoBYP~fAclb7TAY|7Atfok8emR&h$(5IzYGVnhz*tj zG6LEf#G-ICd4nSC_ zNjhL%jirePe13?z&HIgL;ut_w_A_8GCqu~%Dv?w0kO+!D77&Q*fQ==AS|M}F`@|Jv zOQQ5ah(RH~D-6O+qo@$WgqfyAA@2^%dd#Mr0F*V%4whF6BcW~0k{%;_4)c_OrfHtn zXKmJ5%)49^=JC~wANI`*RMBg!jl@pILmdwUf7+yd_BWfH@tNg&CX84+i51VyQ~)sZ z>3i+9DGZLHP$TGnJ|;U@K^;OXQPbq%YjCDnp9OWkjG3FmjEW!+w7G7nf)H>inezfx z1C)>f<`AUvb(u2=5yvZ!>lHw;im{05hiKZv1rCq;iy6X z6c)^yc5pJ2#EtNLA9Is1aczV#cjdG5PMidfc&{+BA78Ftfe4>@(C={xGb@ch;+J!D zkJ+|y7w-8I++Nnt_LeXE_GR;PXi3~K&WTo*|6IKs=9yKp_r_yRfP^lS!v)?67lOA>ID8!?)QrMboYwpYcM+ z#iudCw?>I`+-x)AR3}OJ;ttxQGuEH|4E`6zPJ;xi=h!h^%#6=qI5S)i@_~Y%x`j28 zxpPs

9@v3E<|trE#R??x~d?@(%bmuy4lbeB~i5Av^*2fqMgRs%eZy3`!a)4N@<2 z68au2pzI_bM&Jytu269D5EeDmQ>1c%?VtI9u7SI>!963+XgqNnXwcE5PY@jzs}%hw;>dx+9V25xYJaf(yKw@O=%NrvfBlXJW_A?alk*#loXaO@kKgOw+9Wj;4BqaFd#>r|G(h=gKk~e_a#fb%kRW94T-i zdFPDNHW7jagtX||tvejMyUlz#(wn%Y3p7JxYl zhyX1(IX<8tPNky?P2KDi6!K0EA)0f9@CJX#8)9r+;wTHm4NWlf>t47Zkzp`6#$|oZ zse{NT?7L$!i%(Gk%&7KW-PyOeZo;c^p;oCFYuH@x#Av#C0gvY%X~wgCk_C{un$O+b zV8RCw46Xi>SG5k_0gM2!-qRME)=e^GzWA9JtH1V(zf^5(-(;8~r!NVo%$hfVmCwGt z?^W*e0w+Bky;6PmH~(Gr)|*Gw2?EcYzFH8(W7MPj?>?)ZqII1fV`&ia1so4Yvn5co z92IMSZU+7H1leX2S*MipH2Ah&zH`00^U=H2i!Z%cUAyxM?iHjy;~$@0QD1q3{6pOO z7xy`t>^E~##c+UjMauQt_uj?V;yK(a)i?g&8`Wn&`w4t49KVKgu}hhu&gKhImTI+Y zv}HWa`(K5zzW>Vm0L0}o20Su+dcb#yum8RaBp(oy0?~yC&9r($iOx7(W=t9ocm^>u zMn%S9GZG56A)AE#i_!+d;dH+JBJ~vbm|0F+*2=X2P9CeG%7m?*AEarRuxK&MNx3n!WvqN#qFF;BL4TlCvjhb1Qv!d%Gk}P9Z(}p zCds3~1BQTKy^E6ErVrQa0$*mbc_9GMW?~4i^bs4B07ZP1a}*iZQUPT_LWaWD7-bjQ z0eBP-B{VRmzD$P9>@x_C9u5@bDe*vh1;krxI!gf!I5nGIKkNk-#xO^4NW!6TuksLK zkp2|pM=)%yt1=%wN;0it%@@|LS6N9JwF;I;`MxJ4+7Dx~hAYISFwG)x#i|FM%7E-Q z?Xc{&;h(^ml0|L1EC~22d=xwQ9Q1)4&~!bz-5-zBLQjStd?}_AZB;t?BrE-hQKKx?kK+Y^Vs=v>}m>mjQef zx4bUCxu5*4jkTDe=HCmz0di2t-QEq>t6R`I^(l*1=k8}yz_G1_$2-DY4^WENeQa9AD@E}bIl-Wam%$u z6u{zLXrv`)v}(o!QByTH)P3eK91y07J$id{p|qC{4)!h0&!2+k3?i=Mifm zw+aWpdv|%2?~ZfEejOtOE>PIkh~qKVWPJ_B&pIKy<1OG%zdH(`eE^LpQyW*Yj!yph zJAeFhRke%-P!4l^5u8Otg$D$3_o4@%^j4Wc!?JD=h$eAEdL|Em3kt;9J_3Rk#$89K z0;S-ou*aHcTyS4p&O-x|fy7fzj?BJuj_hHr_^#EAxR5@)Vr5%TA^486y92*#;pH7o z2*?Lq;(&)e^g4U26_W&Ck&m(_)JY1Y^jsM@9oPatmDVldl-4)dGyF@;rcWdMOt{y0 zp9XYMYxHLKR@TqNvZ;n1W1^T78az)Fye9suGg%ZzMOZuiEu1P1E zk4(t@3BUHmT|5mB1-C5|=8jrs28YklzjV$0?cK&XWo&pxW_mz{B{5DK8QQGdb3{!X zoII-@aZrF2My4btjv%V36hOaiWC5U&_N2GN1GqSI8k%8@ccvoFZPZrIfcImZ-Mju= z)k2=!yS2kMuT?j0DnDUVgJ++e!qZ`DlgH1hxdOiZAf)%<$vIt+{<`2v4sc?9tOIXy zXN@sSW1ai%?iH%RkTttvj84IuCr{rN!titBa0#YI7E)-QaH?1;K``Hyco~=*ypRw8 zmkBFdrE6PkxscI#ym_%omh5y4WD53L0v}#E^p*Y8)iOBv>n+WDjT*@^rKPClB!Eom9<#@b`bQ z`jvnBE7ik81p_9as{#TfA<&($Mj+!h;U7QpxtFT1{n9U?SzH$ic{ynt_a$)!GXYM% z{@{P!;cG^5)U$j4z54dof44e3=vT)Jj_pN&8Y1kx`{1H_`pk2xPm}U(;BZ)HnwH@q zFfwTAqoE!4+cZd$c2_L0iiY*#b2qC`zI40#^4I=GwN7F6V*R)0vRN}u##brmo<2kH z8dWC;_p5i`d7EOkxVk7B{O#|&PGH$M1#$=HCjb&+fVJk$w_mTm@%yh5sMZG2)~b(w z{N?I}kG)h~-Mdlk-h2_5-Ju`CH2%!j5?;PLLpDhD4u5K9l!qV$NKXW3KOh=In9-Ck zaP`&*!%<_EmdN;O$!gJ(2_&48um?Nq5>Aj(!p;}2^BsEK<+oO&gn&pu{nMiNj4YBm zbDYgQZWOmN*A%Ec5?F6fgj77y`F;-Fb&#aGvsB;XF<~i4?yud5k6enbG zrC_~DF6;yQq_a}Kh$Ij!L&D5i_>BD*;S;M9N}!g)5}L(606&^oHp4_+e7bXeeEDqJ z;a%A3El_yLTv$t$$&A(g$RiV&uSOS@k#Y~pE3`;bCzI2aoVs@0D|7X;LX!KVv8r42d`u4O-ne`R+HXBm^v2%DgR zk6x~o*9xqK0&By+LW5*4EezTvsVnK0q*Ve$oxR#VQPIo1CC#;B||DWcae4lG+8@N(f!+GN=Yehl>;}4#JLph8CSd9=86_Rs$n3nl`cD>xkcqhXberYxLrSYdlq`HO=hD#tef!2h$WZmI|O~5@y)~LBgIdnx> zLjN3zco%Cu7z7;Lf4VT>7f{^hbM_^?+Zm|b3cwmXQsYFDwZL;j zJNsDeEt#W1utV67UQq?f6GD`ue6x;5jI4rp8)U8~Fdd`p93G?bYl>X=-~idld$!gI zuL8fwI8JyuXx`kSC_V`;XSHV}CxF*@9nQ6E)4zM5ivr;R4AKyJcZ-lE*U>~U4?|gD zjZ_F+&#F%SO`PyhH=z~f@^F1?FHzMu{YT%3UN_Ba1a_3F3Ys-7I-0z?+kBK+h~Afsd; zz#W6|kDnn}Q9_`hV8qj>SU(;Rzqv}Rc>CF;diBBU)n}j9t53WD)HdjJQn7OQ-nEtxX%$d# zSMVt{brBL3>KU*MK)AU#7`~8X4}x>L#UU2L-!T*+i5qwo;U4akYWhY7`>!skTebaRnUsY5*a&r~ym9_G$F{kCvwbX{v^lVVd9pZGscNE5t|O2rRhMWw4{mmu>MV?J-stfQEIE3qKVK z*UVAbzHRCW({mIs@SJ#=`2xEloG}CX28T1Zd;gMZ-clcY*Jo_c`2AF&>F|=Q?}DZ@rx47=XlhBfomIj!&u|V;NG!& z6+ya#oB!+ro@K58^AYAAbK1dhBs`?O$?T_tV!xJ)Nc?jae{FIV@)5=UxWJ+B5PSAkT!x*6aaBfL5C0^D=5Gt6j(e~*KwJ9V8Qbd znX=BbPrjMPwHc+029y@5Lu(JQKw1*cgJWvcgPztOz+ZVDuj!0ZCnJ;ykCx4bai0Sh zVFvYyQsUy6w`CUN9AzRoNvxk?p}qDslpY0t;VBMErx9SJ;cRHY%pxPW@TY*E(5mbw zghA$Meusi9?2Vezg;tc1L)zkbhQitso`L08oK?Y5tqP(+J5NvWc;K;XV;F2Y(3Sc-Gvi$)+7(}WSxFW=~h58$9B z$wn-!dq$%~4!2OCJ^>d+LhBFE;d6>U>j}a0KfY0S6 zZo1E;YZ`q*hk`|p&-*Uh1=mUjGkl48sMvV2ogUgDd;p9Mk8%CaDD&;6FgI>c&yc6z}PxR0w< z3_otyRWgDJc4gG&=us+9qc5FDxnKs;RAK9P1_zG@zk$sGr)G^1s*j&MC3Bb-Sc1Z< z5uoWyFQNxU%^BkeUg;qQ-BoI%Ku6_C9x-iR+$xIRb#h)^v*g7w2lEG&FRYfLfG&QH z*eAl{;sX4Hx2-X572pjl{D!@aDaUB2lggpvSmAmTAzdC_M<_oz+^?=|d*~1A%fzi6 zEk4aK%;vN)jv+t0REUiI@1uA=Jvt$e0Xc!tpdm^@kJ3Pv5L!nOF#N8AyqpslQQll` z_hNlaJ_dU+N+Iy=cJO53nb_c@GL%e?YrA&0y8GfFp%91E!V+QKylO3K@H^M!@10a&j4{@!akAvO71|G7GR@QC_IYyjLxk13OI@7`l#woWoI&qJ1R zEMTlux&r}->xa1S-MU$Q;2r62Oib%kTJRD}PF>R$Ek>;M51%RhwDaSyQ)3`SVbKv?604foK3ZA>6w0uCU) zv(y3sGIvb^*1`o5QeYV3#+m^eW1w}7b`hw$5KS4_h|; zNjT9AmoQ=&@ReEm2l3N-0)QoQhQRbGJF(B4c0pb(4>)pp<`O4>x${==cReUe8{xg1 zlU*cz5o56J>pnK|7|VQ~aqZX$w9A%bQxOJ*3*6`9awH^30kcJ33IhQyS#W-HGs?8Z zBm0JE0cM2;1#8#A7)uk3J$pP%hMpn_F-N#V+7q_;1NgJ_O39@tmOtElFs`!c6)a>> z2-7gEIfCkZimM#ukS&9y0V_y#2WtXrLBCqP%pI7H5yD^~i)rhh{oUr@tg6dc029m> z6Ur;G*2KUtksXqBBsRv}ljOtj`oz)3Qb_qBLpsRn2yrd>A&3|-1O<{2%r1%jjKdrP zy+(!-#Cj~uVK{|p8A5|l=gCO+%WDY!)#%Jp3ko$7HeJQS)4g2;QDDw}S{dB03TP5Z zEnEu)0Y>JEurv`AboXk(Qz#I4uKRm9!n5F0EdAmS<8j@*)s5_Lbdd3hEi^zAz#2m7 zI+WXB*brRDSOml(tT|qpr9{`ku$leu!HO?62H8byaj&ITOp3*+4QgjaV^8dhXn=E{^N~<8py33fHpL4gsxxh-12KZNg z=SNF1xmRaHJgqQmLuhp>_!X#CG)52M5 zJCYZK)!e;2XZ%`PH6)!G7J(o$#G>ixVkdaeJ&nvGSYup3m@ssA?&*fme5MWbpySY5 z-(4}5lb0~WNI^R>GAL$t71S)nCS6rwz>pJKq`?LnB_5z$)fK>LS0PqG#y(J+m=APH zg+#gp?{c5Al)P9&1o{SFvbJ2a1es+_7ty4Jz2G)QjstK511uV1)pYXbc{w*4i zVk6*I)(L=~PmTi5wI*^3f|ds)sH}(&Dkn4cyPhoPTyRw=IA-u0y6*7AsZ}hs?nw<) zwjF*1v?e?&%vcuhWbQn{UZeGR0%U3Y%Km1K%#Wqc9DJ&j{?UL6elbsa**_V{RxROttRq?{nh+lS9MmF@8p1lN+3i~xk9FsdF?#-@vqtRi;R0xlTgwD7l6 zu1`3ym-T=V4(vKA19XJ9v{V|#4M*e~mcZ zEa-~=1xCO{&q5cYYAF`LyupPWL;`+kFxWqX=Yz)<-FLrsu)wn{7-R~b91>Nsv%8aZ zQ@LxP9IRlW9ifP<;VoF*#hcH#<+INY_SqLh47emz?`&4rZhR7N!nGtmdvJsC|7X-k zJvzczjCbVe(*x#@p$>Y~L0MU)Zs^YL`eiwx^@JwNBRxpxSl*0rh$YW6o>uy(bhrWbKdLqVicWzWKBK*Jn zsZWvE`&nF+TLAQfeGHEG5$az7VXss#f9^BfBLNk?eHF%8KD)ut)=plje*eGydiCZT z!|L%d)>wp!bBOraiIx)32P)%6JmxHCy#U}4gn$h}n;yS6;U52>Ez$P+EXa)djuOmQ zUU{v$a%Hc2{$u}#wKIpYExXS1x$~`C^Yo^AQ@?IDn@u()%GNBvi2*0hni~wh->L`^6DOYI>dvP(Wk0AlcvX9^m1{~QF0(VEN6te{t z0G5mw&#R-_GwNz%_!#}KYS!ShCub=5RYF(^f{A(+)}&%jd?BG*N$QUvl~ExsC?*7D zvAm5T>q%QOZZ|S}x*6=BT3mLPsZ)x2i+7xNe8fDWBJmDL=M)jrG>2j2BSu~W&%hW1 zILZ+%iVG@zQqzb%0V;E49+-cY!_5FRBxk46B%9OQH{okCSp*pPVZwpg1fyZ%A&q(@ z>DK_`0!AR-SF=6-qYdB*XP`Y=W8?6c(|?$hD13(O=v|Tcg^NPE4^y#EOT3UlyUec8 zNRC*#tT2cN16+_v4qXDh^02EYx~!YyG7&@?ypGG?q8O>HvP24#Dk};q6U>$H^ha27 z>ESAv?lh-h<@`BGS&nrccvl?$ zX&=k?7%EN?@J$)VZa*^r3QK9lU*;VH$)#TnLY3-xiLIA;rCAh>qQGlNNN4e&p`-vJ zKJ%X*FUo3Qm0Y6YFZG}dnX}jx0{aY-(Ffk+qqrc}xzsCtsgSz5A!q0kh5(`nQNnT+ zSSle>3ANA%b8B9bzfa|vCIUw@eCb8kt1WCaB*xHzuyG~DaT@boj6f>768b)OZXKw2 zk;|@%ANCcTu#BOA{`EIx#Ip=tO7JuH11w?USh-?6@z?T!4WDB)sKH%b*>KD|5wPL8 z1yy1caNjmu39$e21?7XdllFKP!;$a)wF~xTiXkoz79p+y=o`Wv{8N#npbENkK^5gY zWyDA8_zIV7n;(stypskTueJH2FrZ(S%b1);c_%VK=LGaj&naB+Tt#85=bu|Db?r0$ zI|sj^Ur)a>#KFTT+U(~Vvo!DOtMTb|h=$`;`WGV&{hXG}vU!p)s!fJ0I*6O&M`ggFsw0 zt4+yL!ZNcs%wS699_JQ7(5K8X*qT)l;8GM|6>{hSp5Ng(`36Da7lisuXurXIjg9Hz zF^Vx!0r1Kuxs{b?@|4Ah6*g8HYAEEcMz+v&6YtO1PxLXv<0N4kC!5AOSnl_OnOKWC5RW0X6T(mDqbIAm6P zP5jWu>w1oJ#n32JIwq6%S>2eEe-(}m!o!=P8D)k>EYt%HaG$vkT)+(x!Mz3Y2={C` zFCLO2{)L|~hprT~8FQR)(simp>Ir-5kUqJUQPg>u{F~xA?;1kwTx8QTaCfE8df3!N zIbb2q`Z8seseYtkz5)~KhBMpi$W+@+#vXV~AKo)Uh?ytc@vWJW#{(-e);Xcw93TKs zRrZ)ok4pI~eo$M&TZNE)aZU}((@;SP#$n-}5OuzH-g}RgJ@+^jo@Ho}FjN*4{3^6t zPKzt2gH6G69UvaP-Ph}moRSy0Ay(OL=uEAKS6#!B@%PyWFophxc<|qO2j_{r6Jr*r zfGhMFn2abhyZ|4eD;+n=cJps~u!o_=7r^52TlOsAc-Y23QIGV{3FXKHg4@Tiv0j~} zTZ9IQn{le)y?3uq17%+Wpb35|XSyU=F%<0fgKt+`yB`(@9{7DmX0D?nRy8<~;nkxr zf$yQ0SnH{y z5{1F(c~VI8_)VXix2lczZnG@&6w9kyy(CmdA?4{L67A{vJ}(u8jt_tN0=NQEv<${y zeI8$aT%A4pw0iRjE2dws{^HSD^~sMu!d7FkhXl3$$CRCs7h`R1OWHAu8{kxSm_X(k6(OTJ^JMLtH1d1kJ)p;v8)`k zs)zlQKG+ZNE0@Z^1TVk!$Rt(%MZAGlE`%$hsOIo0_YAp!f$`b%tG{?kda3{YM}&Et zSKs;NUndRecc`ayIs!okPR#!T<^NUnMRm@u`#xUX0mxsjlN zDTw7*VaSA>sF2IlvLq&*iS97XvW!YU0Vnjzm>@iUP4!rVppu;ftZ1P$3U7ZKtf8vl z%vhO`Fja`cxFFhWpaU?T2ftwRmg-yE6S>%^#hL=dp+)ZrV=y;xLbf5J^;8-?>4jAB zNFfv^)u+|l+H>y75MAb{KITwEV1&ZEf)~hI5;{OBFBwr`aUiqIiLf!c0K8?E7&j=@ zi9e;3%7RQsq`z>pFk>1}pBiAW%I9@pOw$ zz~1r-SHo=PQ({B$I<;`2-T`(bnn;@ecX_WJG$#S~IW1Ld2;}&DB_vB#( zUz~%iwmNIJ&tH`W`|hekHZXHL2`gyF^3F>PGE32M>ywY!7cb{|KzwbtYa8$d@YL}Y z9JCGQocXgIpZnckp9kEGS$UW5j9tFw=NL5n18XSY`$K2IMRT=u-;2@3&TrX zJuIsoo{2~TcFOg(;ddQq9>r2&ZQ9fcc>N)9n)uK@tfJ z0Ml#}UxgD1hYDWDOJt)8pG1)jjCEEq;ls>y!iR*rbV)2rU(BTrA&_JWnP$EDGm(fo}F}@$bCa#OXwtpy$!xg z&t5zM=j%nr0dCZKoj+^Y^>JgzqJn_ zvNFU9PYq?>jmOF|S3Few1#_45^mK%kDd0?Bs{i@J|L})Zwag2E46Ri=dqe>6?A|-? zbEwt3siVu-`m8iuB5N9Jn`|5=RWC+PZwr0WG4aa|Pui_l+l0z%u<0O@7DgEGQ4^@c z3|sCeO9Bw!IN@F#CgKi#G~!1XD{Gp@Gv2>{K>YGab#QRAdiL~bb@=)@OAs=8kO%AO z4}b9e>W9DgU#p+{jo+fNs}_s24U&Ob{4L0*(J<|QS)DxjqWYZ7Kd(-E)#qRSq&j@~ z3Pf~sFCAo=M`W~9>%=E*Z|+n#_O4YQeCxyNAN=aC@XR*27})=HfviuSez!V&{IXi# zS*>=?ZevMN3dWZuLXhM2lRqcb=XW_WZph>vR&U=ls@I&*1RPN2vHU?!H&{qY5{rsi zGEgMnyh0o=E1gy0ie>@OJ$6xKa`}4#BN_o%R}{9{JpIRi{8nohDxBgy9;>vIB0|d&`;hivdNM2s)X*P|5M$4VEQ5K> zo*4~RPy%E!{<>_bK=ib#C@0KE2beBokif`Tx#cNZj$3gj(dgY7vc#-GCMO6}iZu*F zQ$a=PNqS5?FXr!Fg#nL-A~{RoZl+~YNd6YYZfiKnO@a$DoFXt(kmr~_McoiD6vEbr zr_9Z;^rX%B7*&)dMo)SBS8xbO=M%Y~@XP=oZcovU`h9~$XdmP*LXM*i*Sggk!o^f> z5^KvGnikM7Ae9t1x$C*|*vo|dh|??^y2qgq6|J)hBSY5%;Att|2N1xXcJWRv2-Q)z z7{WFg;|YR6pl^gpC=`v?Z=m42*@P9;YPMo#C*HyMTuiaP4XZ-m{S#mHJGu)Zy1=&GD;12tbp=Yaw~#fMFn`$FRBeZmY&$W2NQ|1g;zqjYh=iX z#Q<@Pu1C0t8Vr%SV$P^%8QNCht^h4qnanTtfx_0GEELogueoy}J~KxCi_8;$6a+DJ zm`BH6o|gX7cQa-A1q_9UXPr7xx^=&=q#RQw$yi9d%^2 zg1==&u2gR7Tu*c{T*E1o{=_-G(JJlUFNOeuOPHvn(&s4Y(4ON|k+&Ux3;wy>@6vO@ z;}|0FO9iJ;bM?=1hH7bGj7d9JlviNyT+&eR3qvB5MY~=K1;b>FM_1VrF2dY-j*>uW zaV7>6EfsI3eRd9k$5r{IK_Cp>BxnWkzH8iYB{YVz{0r^STM;3B6eq+DSYY6twt+Zss9Sm&(0 z1s_;}K_GiOC_Q7m-i9>j?W=LLutcKLLhHOorZV8r*j7%N_q+5{ywc+>zBSAs)V$1U z^2J;F&b#}-Dx4~C zE9>w&6p(oXW2VOGLX542o4o94Fe3^V^QOG4xsvMKJ6GWS1VwsC=wXY{9i{ChicK;S z0k1aY&5$EcDdCNeQim%_b5;(z>q3BfKn&rTRKR`@oyd3TFZ0}DB>_~&s}8*hams2O zHsWjqFI9G7M$(=*QK$YspF6BFgB|ENFPK-x%gV@TaLgU?%kW@MNn@40-XxW@N}bAN zXQNtW6Xu-cC{uX#Haw&6{PTvMZ3rYdm|M)Lo_`H$bsU&W;5nmjzG1F={~G=myyes= zSFyyoWD}wvZSYPUwP5%G6=x2Cx3{H*x^ySkWteFncojoeeCI{_E#8GjsSHe|{xV;A zhaw>@?iI&|IIHZ%2n26+wjSKMO(@s}E8T>zLYVUAb;4XUcm?ldh2A&`;H$V`Dpbcb zk*K+(0r|RnmXc@#eZtU~p2h(4WoVvfSm^9RgP|&AA}5IvVk1lj$}T+GI;Iz_qItRJ zphH+y;ghrjY{W^2TsF$X)1fbEZ^WjmttRS3MhwNFn#u308gE|g33|n6%6I@m-X4Ot@8~P;$sCp95SA-F=b`I}0 zYzJnV)jQLcyV`*{2wj)3sGmES9gQsz0g7G!1_ zo_tmvzj#qSKk10JAIBKWLV5X$5P9l(~{E z=zE!CY2z1R+`dzkA@R*$6~DQPew2NQit=nup3sI%v{}leEsq;+q4>x&-6TAzo}>1= z(FOPsA~e$jMti%}E!BL^V~5~iyyq_E(j$dHTN4?^iBAj|=~ah;IX>!SD^2>xp@h86 zkDKo>h9=abGH*tLIS70?e9L@Ur%ItM`aVn<<|=1>pN8I;j6ojml(`^ePJA@>QRdwS_B0*_tFmhr4?W_=2+$aF zPEaOL>K*?jt4gje9fMdKn`B}@X;F!?3>~JwF30j0+%x7?1y+SM@ zV$gt>N`#P2Dm9+_*?NVw8^4yY6fZT3(k@lStHvCcG>+1f0)`(O;9cP+yYj2Xq)bib z7X^!(Di?8g{AoO=UX@6Yr?3 z@R)yk4gIBV>I0K9H&J{AGWEoBrPqe{^rVVzGH_Go3QSlz?+HI(5}1jjR0J#&ug(`Z zUFOL!0+oMZUDCZ4L!fy73k;oCd6b^f#8C@xgtJOoF&Mxj-{;pZTHd}1SK*cK+{$VW zLQworxJS_`FF5qnCj>nB$Z!E~+U-08UIjSLIoL5Xr3O}EuAOfrMzK0 zv|hd@yf8n^lk;`*;-YFpzneF~NfI|SJK{8S#onKV{6{Y~d{;vVH5SDj4>DnnH9}7t ztAt;oP}o1wL<6viBCOK2eG9rK_ER1vaEwi-cRX_BLXP(R4lOt$aau3-oH`qwUiOul zs`CtwYYFR8;U&E0%2_Z+K^v1;qej@LY#`wisT*vUo~0w{%zMC01K52BuQ()1M~e!$ zAq!0$A%aw|uX5GD^0YBLd@)8&F+bgP#sztr?84a5SC7JpAah?bsotoFNKfl?eQ3TS6RmBBhCWd3ZE}TDIFF( zr-S@cxfGVrF#u+*oQI6bFb`j3K81n2)3>BK>kK_Op+E+U$Pa1fYEYEpA^4!uF0H6! z3(l zQw*L%RIKKm;Pc=)BR4ck7%HrtzMudnyi9RM zh>hVy^68h)A1Ca6Jt;$LIuLq&k2qz0#25yXt^AXnBB<+3z zBSbvf8`Bk`34XC^lgJC2+otXnzOO=SJ+l04?LS~MFXIMBhlCoRlg?AVro&>0oUzYS zypX4)t{)t~B3%58w$Kr_ce9EzME^5osOK;kZqy``k^6!S`_&n9{QP;+p=#hO#a!V? zCN23AD}Hh{3;;+9pziE%u@l*h0a(7E;0l|6^!gb0<@Nm^9aVS!%YUqX^I!ct0Ifw- zE?&(w9=@thp8qIb#iM?&IzwT*IDH9l7i92Z0qxEv2Nbx;lNZ>Kdh6CLPCDGHzV!>= zsXqAd1Mo=%Ue;Rb%>P&QFYmax+5pIdSU72S=%x?UPi%K4&UW} zdKH2MyTf2xhT*`dl1H7s+8MhOdDJnG6OhZgD9taY%-zqJ*%3h+1xr046ddJ+k4ed=?M%yeRyq7!py$ z@MhJ_RKZwhqs^3cX@21<2t;3Fm1T}MUr9WhxKyrE4aqYvG6*HPs|JgSA#K19#+$Pr zhA>)Be1>lslPd!sb70sE0pO;;1^Wvo*|4iIL)}>=DxO7Q5y6CV?}>|ML=XpMx zm2VXz7&yNwbZ*$SZSh||Vhp8fr~*Y8J7+QWr{mKIa`9l-W9F`^TH%VRkN_p~S0NpFGc+DC2AVh$r#Z1EXf* zM5F0s9#o)h(=myE!pR@!&#^i0rM(O(bt&O_oW5t(0D~^!X3&tAf?n@z>1%GKUb9Q6 z+z5BQ(T+k-S<)h-r7RV2VSaIe=`1}8FJS3=Dq+UL=duG_j@^txb|}UG<4E5Duba~y zyD(QdQz4FG;k$g6-&mbxyuX}(%R2^F8-%BdgLL5K2+KUHROpHI&Q-k@U{unZ>8HJq ztV#$&DOX@-UkyvJP5b0K!nmN0P^UiodG1PvD!U<7)3dG5&<`sPI>;9@hAb53|72xj|k{$fbUXQ)~M=%lDjMt08Zw zr*KK%;S0i|ZMwrP^RzQ|l)}JAzR5FtDi1Kk(J#hL8#e0u`P^nBEC&)M$2tYfG9vDI zUV-$d;xs0ih8b8?;u>}0oVl-KV@9ZCG2*}({p&b7gsn!eD;=YW$`o=Cc=lKoZLX;l zivP|DzYN{1Zl2`J`U83Ipeu`HfQw*t?0^{;k9j!gKi~a2Yed z1NN*6moA5sZJ>}FkMAkG9sH{G^{htu*#&eg^#+gV0BeF&U_&kXROei zy?Kq+f)0csY(Jg9Ee`ZDqtMo-r)-mgQ(QrB8-dqurZ#M7XNz!O83zM!RmJqLHS|?O z^c=^8y(dl14yvus}NVFROvF|XXH)^-UQV$O{4=&gZE?jPWL|LtjvmkVVN@=N1-g^k@i`**{4&oI=F zk6yD9@H-d3-5k@hIfpJ;J2*KeU=OJ7GGe+y}YSmtY>g459PTu;I8Duy>>WtluM@KIh=^%kij@BTgU3!1};6b%>{eHE*f1Ms( zXEv7g&a{AysBOw{^#oPA3h;v4<~#vy8HJZkPD5t8;)7K#C$Xc z!kDH798GK=;4Vr3=p$dl4U`B8vX-oD8(8QjPAiH3bfaS$NDFt$}L%|L@tsn*y z)UJ3LmyqjeY$U_7fGeZ<#@!FngzCXErLS=f=0DS|7EVVWb#wI5Fn#7)GIk5@i&X+4 zb;;n42HWItHfT_8;sBtQE~jj)t&xigqh^((=cKeLDR1P~YT>5Qn9I%S9=wvj@?MhB za$hNJaBH*X&ev_8&_5Oi#-v7KCcpdYFHiUXmC~CPT(4DG2IvL?DXWBZ0rEGays3crjQxo>X(bjJ=EY` z_OaTDO9@Qhm_EpX3O9Ew;v{v#!QCk;XJsNX$7=9O9rH^n_KJpA@E`dSuB9egmJG@= zf!dQ&&YiZVOt5Xo6^U-!E_v1oy*gdzkj!@rb%@)4~~wKOGbMn)sX4_aH`3_;e>(&#Glq9p+h zLgUhiaFLGVwzfU`V4R|G))UZ15;?#2R~``eE_kA*Cm(UAxN(3YL^NIM(n@ey$Q{4Q z{iz1Y`$~!6C4V!obckwrPc2Bxk)Z)UZ|W7wGx`&YQP?;jF30Tq_A7H?K?mYR8Nr~H zVHshhc9pi(qxolGlJDM!dg{^a<#MZHt!yX*Fb%PGZ=2+ev(ugq4<_2+* z?`8RezVVZTB*4oNZalKaRd3<{n|E>f_kcmP6Lz1HB1O-`3|TueA-n1>nm?hWRs9t( zB(Zv(7>s!nZvg$SwQ!rti7%Z)Rm40ZpIwiB}2jpEmoN570~@`o$@zbxpOuz zm~Mt}i}cAUY8#IL{FJRjVk%?NlG9m`N|GHqX8fWaG~cpLd2U(uMziOc9=sy&JjIMj zL7pNy13TrCV-VMp%aeE2Fs-W(?*x5$MtPPMR@tQCNtuQ#cxZ!g({kcXU`aoT`wY+E z9d~e#faT={9x2kwX~=mbv6mqNpjrBgPz$UxS3YFWRLPb9yb&DpzPzVcZ9Zd8Xw%Is z;RoQLV`QZ1lf1QDe9|oPa?+U{-wiE_@cDklTcH!NK`Wxr7W1L*VmV+W%}De9EfmRZ z%BnRJ0aQ)*Hqix{2dkxxEQe#VY^-g?8!Mi;TplmSLxt6_~VD_%vK$G6BHra%#oO}EB6(`xEZFBgCLG~lskY{QQuVKY( z6BD#ep3zl2*iE!}cSN6|-+BbHCz6>pMc$?yF~+UVxYyDB zJ>9SWA~fN#)zi+f+Q2GOAJpO%kMt_pS7nJf+rK=PZ{R})`7X{`bKnTXgBu@IH;DVW z`_2Q_Zw|7~a>lNi-~ZkpR1ZJ>r26i6zhCX$yhVJ_cOZbb)#Fb;Bk2AdBZ4Ha+p8P{ z+^7zpJ_ks+5<#FX+!wohx2kvF{|0$SzmD4jCYPx!|1Cc|(7)Tok#pbpwdxxm{Gj^b zk6vWkw}^W}V0e%9p0Y*T04rQBx&Wy*h5!TrdqR;C*@MMQnoL_V0ipTqx(sj+lKEHW z;9Ia>$|1Cf$}-=Q!bU|+NjZWmn5JS%TVs^94n)xexc2=E<|s z2zCHi7wmK+RF7Ueb11KJY@tOt4;S{Xc>-l9Dts8(Vf2KKVu*mCxQt}39gHiPF6%M8 zTM`Yev2?BRW%v@ne6hBXcmaCp#fWx_D!~oAqis|gKOf!SX^Q*6KsoXD=s1C#h!|1 z21M#EQM@SBRWuZeNP6%MNP>rD&JhsIkHSY{Z!#YezJ`UHUffjTI-!il6Ng+<5-u|8 zq7b?p4_Lqpq8PwPHB10X#-I3zeAX2#ra0SP(vWrd$veE0FOESY3{0TAQD%g{f{NcO zQN=XN(7_SzDcCf;DUja?M8>2sqgIg35cJoYRdyBAjyVdO?OZ85fIpZ0^4DEx3fKrg z-glh3qErrqON;iUk1Ty;Nul)9Q-`EmVW5IqPIsbRzRN?(?nz-`e;EH&8FSQ*h=ow$ z^2c3wid6s?8h3QMo$bG4Q=##SlIuOjV&4$=yhFbbsJ3bSZI&x~xYb;O622~-WO)!m z*0q}*g2I!}x!y-PLalmcn1Vqvf0fLscHfXFrpg z#`}&-Jf*Mx)peDI2D4jf7g~hplSYA*CDZ}`9+A$?=e-TwnVY6ELw2aZ;mi<4sVd5v>0gEz z58{vT34I6@%INAZ-{~LEQH`n|3XStLArzoXe9-#(gct-}xwH+RC~=#hi93Xuqw(aF zId;8~Ic6Z`;1(znIFai}3T8%&JG8};K^sSn@X@gtod9_?1K)I@348gIcI(P2G5~%% zW+`<S{ zf~jAJl{<-D{vM#$NcYN60tWJ4izV<5bKx}bYK#(b&$u9CCwrX2G{G1cF#pfMY4f$N zt6Wl6a+Obd(w6RjmtRf$B#)7QiT~n~HOq@6J^N~}<@?T!u(D6$_Y7KssDL+e%E2*b zp&0)HZ*>B(+$(B3?@6bo6$4o^ubSAlQJhA8OQY#aHu~5mwG;z@io|6rv<`rvZ0Lxm z__P+_!;B486XcjYrA-XaUS|)zvE*Gpzc{I$A3lwOFP(334DPk-9F7C8g$#(rGUp|m z1qnSd_IcBQcob6OSi%>0N7}b@E*+T$N4ao`%U#$kBHKwj1yZQcU$E{y$#Opi3gumL za#^V?_tdNXYc~N5@C46|OES)Fla!OX^}{FvJ>r{*)#Cieeb(JsGfXD>KCk;CQPG;S78n7$lZ_q zi6_g?o@9HW;E??}}&wgK)`Ro5KOYgBRgXPId37dF7ofH$l z23hcIdN#BK>xn#x6VdC;x@C)9x+TaJ28r^dw;!@%_Xpkpn2oqs6cL3f^)iWi+s$iM z$J3Wh0ZxzWVNASb6&@p$Dwa}!1G}*#Oo+r_vloZ2st0%8AyrXP;LTme3MKu;E5{0j zqjB`sp(-jME_qjl&lP~ArlCH!{PPO$sWhlCsC-DsFi(g^VN(b3Jq1F9=RxXqC#xQE zy^QA3R4|HM@*{T#!rj7b;>Cg~$GgZJLMvFA=Inx90E2c9=1&GCQ(M$9Zn#AZkOG?1 zHRv=X5D%Fui_t(0Pg`T-!Las%Q6bn+x;3I?de;}*)kzK0fw>rvQb+dVTgCvvb5qw@ z?KP1^oQoZ`SIi+sjaZ|0XI!mA#GH%^AtB(x;He=)cI5(w1#BXy>8l{s2vC@*8M`*D zAd^|xrf`f?m1gR;p{3Zqn^ zFuAfT2&Q&&U0?n_9{l_-?ERanDzN}6#V8jFAlLCR4m6;Q^HEUfF{`u7&A>nv_6P%& zdCD3jsdrSRj*ikd6?@~$u5zQYE}sBUhf6uH#M)t$B2TzzkC7ui0wdpbgMhb;Eh)wa z?*JDSyQ0kT92$@T`&>l`S;l{`RRt6@pvT+lO>?k3pdqv*$ul`JAj>-krXz}1nR^O3 zfQWDgzw87rsSxWqb-cnpQ$+m=2#sTn01c3+XV#z207GcKc#iEqZ89I!NE-nLpTJ+| z&T*+RaoYnoG%^w-&vV;GrLr5)8$ypZjfo7NP=6}qMe)x5&Y}5^3LtH07`g*J{jp&n z68R0R^yUf6G^=5F)ukvV^hr3<4SPVpzy=gIzb3Ya2gQ?&DM7m>J&+a-7;JKCH5I7F z)52UGDqcjWP2R_oz}UaQiCzE%#!d$!a1p3>GP{7OujDPIaAuS5nG*Wzj~(P!92Xwq zgS53&M4TJzG%m=gj!}s+;uVh_pY9q{J#aPS&7tH zV64spoMW9IB~Ba_CiGaq%ZE^4{fT0PEFYqzWalwP^X|3lC^|TEaEwenUY?8&&a;l4 zv5CZ)6Q+szEjZ=sMS-h$;_9YGtBwWZVW9(*Jy#c`hZXe(A`HZ_Ii62fI>!@~q!zN0 zdQB^&oavv*`&g}nA3Kve*BC_jkn`Fh&7BCh&PsKi&#p=hTs3v2C2>I-rho*c-Z^Qp zg1qKwm~0e0XKqgF=nKsE2<0|uw2-6j3f3uQbdamX9vGnWO{LTgUFO4#QA(WF!qYRE zBV#DPB)R(?GB2zlL*#?@uH$%_hb!B=WJ;eeyFUFYwNUA{Hwa+M>NH+S#m2<5<>@@`-_C6mn=S$e{|<*Dj2Y^dyT zDA9Gsc=O;+b?5&5Y8{+X?k(rA2mF8Z*`HzPPpiE(PDTS~<;fmcutD_A(d*Zu0!KR& z+q8ml1uc6b-fQBYj2~)#`NgNz?lwS#Nv^Sk@!-R6^Vtv`Wc; zmEqlnSg+mOt9EZ(BgfG`6$3sH|K7VFf)vHd5ul0^iTH22Y~k;I6(kP2qr7cw>{M$k zEiofuoH7T41^XydBa~l9Uc-B~VArPBXI-nEaq2B~r>WlU1W2k}DNG<975w74EF*7U zG52CDp!mWNRNO*ObUeb3385;Og~F>a$RoTIpZwXZ+THq;emvlqy4!3b=H0fN7W8<^ z(;|w}wgM-h_5pBI&>1qKU{lF4-A|ikqT0f61@2Sa_ahMLlRL@uI=VAS_e$Xl1|ym99DBC6p07F{ChK7MK^*7Yp9U~f=@Ad~^k`)(JQN%9gz$Xhf4KNlq>I((l zXVZ%B(7r;l$#;Ub=pSAu!J<(IGfhrY6%`aKy*~aV*wgSFS6EQyeuv>b+8{ki;wUrf}mR?K9TpsQYVt1r@XoDw>$p|WT@|m&DymE$)C5RQkix(>P zDtdEZ?~-Bi-64!rK0M7tA;5`)7-0xE%K`JEFu>Yja3%@XAT?+=AqUcwg5DqI*z*25 ze`1y1ZpRcw44A3NPw-HCUnL>s%SZS#68vO&$RnU1P{Jg24(czMC4dy>I=c`U5eGw-#DvELs z=cEqbmFqE(lo1{bRFXK)6O95pmU^j7$1r9meEWk>ub zIX~|hOlyF$^AE^UR)gGkN*l}>U6B?YpJ64oCrniCX7Cbsa@sGA36)MEiRsQu@J3{g zG^&Fqhf|1;)Sd7{-no2>a&&xIt?u&{miD#l?4U$`>LM_Fb3DQG?sUS?W7x9F?Li}+ zgw-TOv0~HGLhn4!bIKij+SQ@~9zWq}_@$mu4S=j{Fb0kt0}cjBlk^<;X_VOa{S9^t zb1eL9<7+6?WH!KXKRNjrKKuswV4%Xg7VsP=P+H8qI8DGpnl|N-&JB4@cH^67he9nv zCb9vD@zuaxkLh*QLf?FY5J`_iH+ei*6nw-LK-=5{iiW=Fl!A33zfr1~c2CnBx8-H8 ztK)Nw2Ikmc`~m%~8*cr+1y*pAz=OaSHkMP<{TWwm8SXfEeFLZsW z6AikL6AEBD*PhsiM1w{M%LY#J*mFXAl}`>f+Kqe&=qG;m+;=P#saGz6L4@D zE_cC30*x&l33H<_z|*ZAHrV(bJeZCNe_|8K$P;J5+09K1jkO&%@aSIma_*c6{ujnL zwBx*`&xZKQbG9%L@emScEVib1j&}H;JXXdGA(r z_cq7*-XT;7kGI|UX?L!QXJB@3`Q&P#%g2B3zqUqQL2kleNZz};%CsS1^}ajN3Gty6 zEDhqKeGr@NNFcF&D2kEZ*jW??zXLJqlsRT;9waf}li@8Avw}lnPRmpg3tIw(=w&cs zu}qFap&jGmRCx8Q4>{H3)vM#`?!8g<>i9Xx$&w?AoYuPg6cUF_6||f_2r&iGh+QOl zX|2|!Qj_;bkbCkJ9+z1D2uRzIC}6Qz*LZqi&e@)h!XweTVvteey-}gnh;T{M^Wg1s zVl9D-491i>?qoBDTrbvw9k31Z*@?(9hXo!c8slASgyBO#5{RA~VQ626ARVZS*8&BL zjvET$r2vGydB^t@>41y(yE!^QrJ)E(dV(BVRA=+I)mh^WgD2+|^E4y;hS8)BnX$P; zH9IKH|LnPN4DRlwT&smu%h>H7g}@1Vh=9v+zIe4%;O${+ZX zoyYW1jMTf8zR-t~dI7lO)ha6@!o&VBzx-8EQGn_l(?9MmFM;H-mW>F-qA1#*C~?w< z+?;=MApewE<}ZS^aC-WO#B&8g;e}Y^UGPGMim%pF6hdjiIT3e7e#`jBm4IR-WZoPV z@3`MV+^PX*09eZUotLaHonl-i<&XVNC#{x#2pRq|W)-!BkJ%S+UZcCTZ9QNUv{ULV z{js4a_0l2rt1yZmhVI#hN_rH0@M~%CTP6Pl3T-us_bf)5?dT~_2oN38kheaCUk<=z?*R;G!nS0@EO9A`+!cvRHG*R zj{Ys-l=-8}n%h3p7{$cT*qw)^cRQ(Nc+bmu@(R3wa7lCGEafiu`9#rj5)$oRm0dbC z{LXL!r|?_)8-<*CP*D^&@|+^#y|^u3@Tm7W_#j`N;oV~5FtAm)gts^?ZkIlqok1l+ zhfeW63WS31iVDf8^h?>m4S6x|MuC$)xTius#pp0{M^~+3Xr2~m28wmO@ws@%lwR`R z5tW*fNn<$%1-LO5*R1>%Y0q#Fy{Nsc?%ckMGGRG!kMPP3QmwVgH;kM!TR+TB~# zW_`W7z)*PwPnt}3!97AHy7yRp#fv>6eGoVpSs0JJa4)cdf`JF~!raKi{f&YT3{}eX z{)=yh%g*LyC#PheUEP_FBF7A!l7_k-c|O4j37(i zfitd1XxM4^OlXtNu=4+=n)f%8L1n{GD%vESdbLiuE<8wYc*4`bIr*G0(g5>PX6Wc} zPQ_P`6Rvx zL+Mr&zG}nIsXC=36C^YIRfn1U!rS&+&%J%q+b>66q0g+#5F^jHfu#`Ers z-XoRJ8J_X83qr;i%d6L>Xv7eJ?mYdl(duQ-%igtpj8E_pJ*AHVd(p>0K3^M|8W{o=Jku0z=1I{Cpw%@Sz&0S)HX>48Ak-LHj9S5**O-r zOZr6u#vl-n)RQh*#nC~$vAfNx#Ahh5DO2mtRsh#7RDvs8zmyt|=8nJ9;0eNf%F zd%bGnxZB&^qYjSmHS`GHeU4&2=kGuLx4oaQsuBy(M*$DNS#a{&?z_=l0V!8zjDE6r zVRIjvX4g9rI#q*EH+cQZ3WmTpz5#7u#4>)lpqF~9VNs_Tr-z4M!uQaXFhEv;wMO_Q zd{CXjm6{&mRRdOVnmoT`uf_AH5346nz6gvo6q<*}SjO{b#Bm*02M^w(dNDyxC^z0Oe@nf`R3Y6Th$ zH9{r>gXpPOu_zrwwgC4OJHgW6O(t2Kycgx&n@%qEJdTV?}v7WM|tOvr=X3|00%M!kD zrKk3Hc%@w0NF9U0J+<3BiEzGZxj@~{`j)5+W5>te=U13{KVdX{a*7lH;}J{G7&mj3 zH0PBXWqd3orgRLd+*5$7TqAISyD@d<#4@Aeo03&Xxya7DMBK&v`jvGE;Ih8<_vQm!EbV`P){=7_R4;ZWjTm%OKBfi31 z6@bzNUCLkUwtcTCp%Rx5xhI2>5Kgf|kM=E_i{(4~<1S@fsVcM~jWV?n zf(lfI75r7$$rcPxiqgrO;)^SN@ov$E0z({=rzx01JGN~QC3_<#*GUYpVSa?`o++K#vWJM_gi~GP;Wn5$5^b{&|zH47psvU%XVu;Xp zz4lT?oD}IZjsp0MaB|6A@34xkFfHRL&=tR0D0?yVc?$V{HQvCIM#8TEhd87{E3D;( z{L7U-@J$6Scx*puD+ZEf`O(;kb9x@K_;mW_Z~|w}g4=F>qlO(Qq(c#E}6^`IuBkeDGDrYTG7CP&pEZ z!tbtRhM%=3qr%%1mkMhX8T-o|i2L?kUY}TZa0P~*db$XG4YU|^!5Nfs%Gyp@?Lfh! z55ZIV6xl?7V`u~47ORlsBlr)iZl{Dy?%e=t;FCDLxy@2O8w1}QlG1|>z&XqSL&6+J zvm-i5ghjiD3?LMyO_Br$KV32X3S)<^wTlw^qCw}la7?Hin4i%w3s3O->>0@$+=GP@ zJzqSd-3y!}yTOebtFz);=I7}~%{ zXN>O+Nj2nL3X^Wnr~~>+`!(Lt>o1OUP&OCt6+@;;e>zJVgb&G|XqE3MX6`M+*cq~~ z%G5eK1w2s2L?q7jl=odVnX#ezjALY*tAm7K8ByCJL5DIx-e%mo&Ohgxfg|IK6Kjle zZ3vF5$tLn}B}?3pzqODBJ$PyZkGKxFadlc95jwg{n2WUI{R9n4tu984FyaO@z^a*hNC_jmez3*r!wy9mGq5&9Zg&0!VR+7fQ6%y%Lv~p)<30Di zPS~X1Bs!x(cxa9pCOpj$EcwKU_Xpw*;O}tK*a~|}cCOv6_OIU(>Sfa8S3I}w4tp>S zFI!+hjEQDZM%-bHMftZpWqDCNc=3!wu#R5F=(;?A&Y@0(HnSh-k`rpPI!_;z4Ks2{ zkL`nwL-6_%xSaWmL9n{I!F-~_Knk3Sz-eA)aB1z(60L+9Onm&TddOViCjd>PR4X8wvm|35BK+g~sHRX&LrP2##1tv%w(Kwj`VN)A#)meR!Q z&J2KTJkU!aAp&d(#@tdW6Hr7vu@c8jriYIn+=}9*pePoJGnf|)s<+GgJ;sD0?tp}$EjYG}6s~2=auZIK zjhra6DkM4FfWC}XW?)oWqaw0M=}8rIAd?@}oOTrKGvZ5TERV`m{Vr2BM4^K+tkR(( zpukedOPt(-DFYwsmwP(qFj9t9Jj5Ut@2i};G@A<#b`1{VD3%usdW0u=2!NqAj#KDr zAVDiX${UU~PFEWv_ zxRRnEo3R0J0S;J4F5p@)M1IJ8&}HGmR(fFI+#$62+iGE{*G;6okC1UsfZ2NR&T=eq zS1$lmGi2i)dO9$qZdL$f(uP49`XjL={0E^v!%Hm$rua#l!11_A5BsOVb+ z5D?Pv@df78yq>NOWS@$4@SDt1PB}jr8#l%21H$8JRPGY{xaW7x)0x=O&U&l4SwqieL)AlZ<#39lmar8 zqqNgU-^*B0&`|uvQsYU#s#B&;iz-smqVwrhyhX+M7F2rvJl5S4-_BUJF1QUm=|?dn z@>+ov%+PS9;3J<^bnLT*6CNO#0~O%nq>6<8$(RZrOObq~4#(!+8WmmJxKhICl+Ftc z0`WKlwgoD&-N2D|RsMZV#iN8ImFFt|dKn|rfL#o+z_C0RW(qQmg0jkFEzTL=#W}hY ze$RKCQ{d{DNWIb(_a&bg6Twp|;-5HM!m@my7x^yU_}TfBx7p#~B4ZV7@?LRKywGXj zSi}u+(tpY#@z0#mQ4k2*u#OR-Fe=$@^v?L<8RD?b3M2bsSYARhxhF7219;!@th#xP zyxkWl%Q!W3V0og^YYxG2Jgpw5{;)ZCM4n*{01pY69jm>P2z^C`$l3LRzIv=GL)F>4 z1izcJVyZ&BMZV58Rz45$xMm|MO7h$O8{StQ066EJOhDZhiQtj5H9|fn;}>xfsPH;B z3?_U6Z8(ayt4umNgl92$fLMcM8LTqb@t9VujJxvL?_-1!n_ou>cav8RLAat5SQ0R1 z^#gkB06X_t>FHKxH|f7lEhY{4Dzkt+a27{b;meL%iQsB%ANg;lHoe(8JVtD;u2^Yd zig`ih$j~mrL1IWl|Fk6!8jjD?zI(&W1t|lP&aI;;XZDS(f1EH@CzMKukaFKKpOE~b zO@_Nwl@a(ycr7ws*=wI$%5}WNBQ_+9Akvg$(7+TgLObAA6Av2I<(W#H_f^7+VFT_n zKl-5^t$6AW4LaLmXx=jk#T>b38?MHP+wvbOqpUDvZsdPTPHs^R7x<2@p~2Z?&gAjR z>jv$)zr%HRFW>C}n~C!c%-k=j;Z;t91P-Ui%ohd|Fp!VQ*Aj|K8yhI=D&5vG!f`lc zKa&c$-P*wuZz6p?*M&~m--%*whA177=Xm=YM6B$hKzq;>O;`Ps=d_CkfZ;9gbW^w4 zZ#Ky2b3y*lC(n)v5o4}sK*Qg`uDYjyeu!g-hdLVjIT_C=iBt01uahZrh!bsc_Li`q zJ&agrANYxb@(Hs8#eh`@WHo@y`xt`2AL9dp2wyv4HAH9LkWip0hf{13w!Xz_WY=zE zv|#Y8w>Oy!@xpm#zYkfaE(kX|ef^?3dii7Cxv0AD{u=T8?y0@HfRViG+0%y{Zh(Q# zbSV4GIqC_R;Z|FaKky(8G=4D`h^Ttga1FW75iLDw8&Z@WL zkE$=f998#j;w2uORG)qEC)F39e^$MDh6jT<>UZBKP12qQLSZkDv_GYJ-E|<#P~N4w&8$KrF+xbg0B{gRk}V)Qd(cvXS>W<C$Ps;-!Wi`6qe>CRA!6d4 z;C&Ts8JULE7%xg4B2^}P6?Gq_j?dnb*}F>X0SPBSDR=o@#+q;< z=EnIkPH=RU@ZbBkV=THj2eN^8hV2xXyUI~;jAvYq+fn5n+${>Ic*}cKP60~rtHSNH zJO3IEGB+9Va<50Ss33G)=9FnEEF81%XgCy4KH^gRwqLYNH&eI5Q_p5DZd$i+F2+b{ zPn`4DZt*CW##-c@+=pjby#N8{!U@E%ylLT*rWIk~R37@8dhpIhxswmsSAN7HGmTi*)Bn8he0gP~B!A1ipd4TT zsdzdMMfmzz#aKMi;6S+Z%y@3+z%nXLMW`0~_mmyp3hat;Sn$K}0#j4Qt4#kwr(m}A z=@pY{$7={24Gqg&Nt@JXB6#t~&89RMp5UCj;nUR|2@n<7A<=Gm?m!Gru-~~GkAyf@ z=AeK8g%&zj$<@eTjwAIf-^&~pT3?3x2%nUtEdXlq0t-Lbe_>7;jT^(zY~Sx*^2z0@ zia70MEd|~MzY0<^|NInX*%d2FF$O)U!loF01-91VIOS~`Ocu@fD{bUI#$-S2uH&}X!osBNXf51?e^EB!MbaP62TwJcz4z~b zi&00UJZ6eEfnt!jyL7o;kbk3IYRX|--50zxvHWT(Sv7a5bjn~K!u5y$xd)x3Q0wei{%A~NN;;YMkYoIOSf(QPgokG*oAO>>@ zAXTh&j@Uo>&YW2L4ja2@f;!>#I9s~P$c{%2e0 z%q&V68w0IX_!e>|Z5ZJt5t|hG$TxZ0<%FJp~jBPl}Ue?9%GG^t)l9qHTr5I)GtMu=8jo^`Z zE#gy!(z%XJiSRr)FIRn%+O;(`##5KPf*PDWQWF;C)8u2`VC^- z^{ThYw$mY@$5<7YsccoV51#+oLIK$=Ij13 z^|Qk2J}n)90sx+-=-(#e)-6`jw#ce;^VS`d>s>Nht`Hx85@Y7p-P_fphmW%w5!%AA zxI7|!oB0ql6UM#2b&Y=Ml;aQr`XIne$|8H~5^gp^Cpe|PGnDTA{jam@5nT&755Sl6 zv*&EUCA|cF{p5vBLaiBJ|JUt`I(j>H;h<1|WD(0C6*93-7$J@dqg72hl-W zs{eR9@)q+=IDo86uY?AW&k;YoD|qP}O8Jbl38Fmey{3eWLP3$7?08{vJ^Q;aNea2w8kaf{~zUQxoh@n9|*PzQrALdZFI{XJ^+-1>f~58DNw!7+?%pCc^oN(!%qM)p0vtz-UA$k|9Tl=@vgSM*>83 zx|EkmB&!OyooCMykO68q@rBQ_RC>Za+wv*~MZvyu!V@!sV&k$V5Qt%?@vT59{c|1^ z_RD)k!LuK!o_91fZPV2Nz|1K8Yn%KA&a}q>t#1jJ;;E%~^j(F^0($m08Rl%3IVx(Qc z%XT#Sv!sr|v>g?v0#EwMjgm61^e^d0DHpn?Tn%Be;%WsnaXBVeyK@^wjppDB=y%x= zDPH)i5hz^jZ;TZ><&SulkOu0E^1;0bc*kHL>4$VE4@n<+o>hd5iL$|ktN9RJbDuhS zi7s)`V{SUG@G4+sXdZ7+#(AL#{Gz~61Ga=G6kG+b=!43DFwcGw`WvSUb%Via&t{xg z>nWnmw#8Zdf7K@*10i~Emo#qp#B*WeTf*M)+Lm*elE5s=EO=%)=~s_--DM;PM;+iDrUO=t!NL2V6%mHtFwhc`OE zZZhBQ?&8fQrT6rR@4`nVt~j3rVm6{mr*qHT`Iuk4muuHCn#}5R9UOTMd{rt5DQctc z;MFp7UVrceJ)$_`iZNd1I#7p>g_)r+EA*koUcH^I+hi2nuX<$r3lgIwMakoc)Y0G( zt1GMslfHBT4h;zBiJ?kdw#s?%4IZsCBaS}dCy$i``4tD{9SjHBV!s!w2s30E95?gZ zB+Paf$EI1sJeF426oVgHL%H3>Gi~UJ8rl@6qDS2?w8DUH8|M%cP9JbOXuJcz+Uiam zFXxP}OILJ&)OA$g0GaEQq<%wvbk4bFPC6`MLCOXDDqj_Gg%fiK{_%(VhO?kc&=+yn zQqHh4NSQ59HGI^S(ZZif72=S3S|31V7+oD}COX*NV6!~+Xry>t?#WO7l$Dw;^S4i`Q8MKSb9AsS^xVS; z=s*|i>~(2!%GuWDJq*`gyy~45qF5w3U~+ALyE~L?uGFD0nZr_W*%|uYDRDUK4DR-)tlol2t~(WXRbP=9bLu9IX^ol zB#D(<+F9XLK2yvqXElK0n8sM~EUM8Z;Zv-znQX&wsK)gJG63z8sD~8;+HzIt#p54V zkN)%r)#)*(_A%efuWI0zYe z9|DQ@XDsdNDR%=|gI|Se7!>0$^T-T%Jo13B+6kM0MLJ;zLQ~F70}HI}!gLLVC`jq} zWb~Gk>G)Z=@mA1CV1mhVH#>v+V`C%2q z=NfB>Bfv>|JkssWlpdH9biI_LZqw^&)OZMv`e|4y{i5o?N}g&wXXXaRKg9#+>VhK` zSItPEq9Y^syamVWp-y_8a>y8c_gR!q0go3T3W%Q_HS-*t#DIQ{Bk(gNeop!k)hS-wn z0`)`z<;=aX5g%2!6li)~fq*qyn#S$BI1aS9|I?O1wv{DCyo*TeFb-EII-jK6X(Gd!x_xW{-qAkOr=c0ZvTXJ@lr!` zGHTw5f@*yz7;Sh6ck+(0%VX$<$q$TNJ&+QL0?AAuXw5$_B?=pj&EOC75=Fr`#VHBc zwt<_ou5y~Pz)~YIMg|k1(5FCA5}2Rm@twjZc*DD8VAKWei5q?w#{w(cazOkm$~^D! zEUPvYb%U<_O69}3miO8xm0QaR8?S=nVi>KMpfT@ftkk2Vc>DUKz=^}a7Vn~LgBQUc_K3}x(>mU89Xudh z)m&%pNZVOw#lQ`e$Ioij<{lnE_*|V;;6(%ZLNA2NYI6t2$|f);6k>Q71$YiVIX3$s z&#;~B0b|Vb`4MBofx{-;)x|Efb^zB z1{G08xryhP@tLc=OZxCS{b>_MF~!)ZSV5k$DsSq|I18LJ@WPO_&6Vq<_vA1y6u}WE zt%*y#ocWp)9yK9!YKrmUN`;qrp@E`;33^qXRaaBwNpEl&aRWz?ZXtv)b97PoLHtvGHJR~>KwE(*5ewv9y%CD*ViWFneK)d6;E&Z&(9 z+nO@qiXXT--2kR@a6mj1PyCxcLu2AMFF_+^EaHXqU1&h8n?&JF2wlcX9n4$dvCbcO z&5DRLWk`?nNr61qi8Mv&l+PH#6URyvbH{ z`mW~=c;><-D~PVD^p3WnsV*CMe^h<>7eC-YJLi&lM$bKA&xH{an3%lx1|1k98hrP# zHdltF=L=ROW+zN8F!lIjWy$ed@Snb!O4aOX8fs0Mlopf|*+lsD`}Y7WNZfnFQptI8 zvi{<){z?W%p}%rD1b=^eiHf#qXnqBG>2bS;-l?pa$&I#?1XF9RQxQ{Rl3{mj=7=yq^ zQ#P0gWVZvT1dw@+b%)L>}`l%hvWCzTkV}zg`6`?(hnb&AuCAVMBBk zM1bjnq|S-E{~hBpJY$H_NAX-Dj7p3B1j77vTrf=qD@w866vI%=lf3k(DqLp}bQfdD z6AQ~GQwS=3U`}8d|oY?_^LO&%p)l|o5x0;6G@L6w@gjiUZ zOig@3*i#ZSj;GF`@CErfP$%ca`y8NY1gcozDuYR=%+MGce_IJ7b9tLp1~USIOCuQ) z;T$$d2)@UOs6rD+Iu}D3i2H z9iC8_tP*;n5NaCQPIcx2JWNL@7X>pN^&JZEFJ4&;Si2b|5E_yP2#}&Q@eI;(S>C>> zj46a&)pDw(6$M-R43jLUC5thdA_Y!Bl`s zsh0msUG~?_h%lOr!`NuP22AWF6s5ff}pjHF)68+BI^YZ&!!Mk5F1)GS7_10bu;b zVWhK&!8pd2ihf|HV~)d*h_ady2)Me0#7#-3GJXfGHy|FLfsrdT&);p1rXPf zk*g@MI#cBPZe%?@#b~>NZw!B10hYpCAPINz*VV%qI~3L#DQ?J%&C;bV;@s%`St@?P zoha<7h-a)8^Hb_BN@p5@wsaVX+m1E*2lF4pg779YC~Ex0OAr6IKPC0B{WFA))fu5n zhtpSZ*w}Z|PWJJ9Z*FWOZi}u~94GK=1lQs!^lVk9E}_w$O6@onL}DDBzKuR{j)O~w zt+LhAt#_{-gy(Exn3++^C^u>?vqhX9MmDX-qGxJ<_XgZm{nbMgRQlly=_` zb+cY=?A=5mM~6XCKPL<41*->#htGn$Lk?g&eWQ_7uUBC8Ty!tyAzpc_l zYnfZtS=f=l%&QHqa=O55e^lMLy4ZWH*IM04CfoHe*+!1GM{rqu{{I_;69CNy?&21N@jb1wYsTk z_3V^&8z6lAl2S@J&IaeZnzAuJ22U#mM`#iDI>Eg_PzjmcQT%ne;nMtLA;*H@IUbVo z-m_=INi9UvO1TNb1WP@^th)wfp#`}HvtX@Kw{IJcy}ChesWta#(f_0+P~+r%VazXl zDUwuf3!!sg4adMOxs{d+X%uiv$*N8E9#A2S* zZ=?n9t|nKijRbBRXsWy1bw)Tvf|Kb_;0er46|~Te&$GmrpdqWl7*BD^?iS5X zy&pc$`XpSe4qWBJ7g$@KFQIy_)giFy3Yv=CNVk%+;yqhq{`n@~awffh<)q7V?^DMZZXv};JJ>Uw?!p)kW*)4;&plQ*uaaz-=7DP=oSaO9)T@XTZED*4VDG7}hC% z>vGfj(osti zBAH{gfWVk{qiF`u!~^e_GHSHNjm~_58*(%Gg18w=zQ4dc}?GtOof@ z-B&n99dfwLyV@#ql8sIBA6_?T*!h*$+J?1f!;NF)v!l||Dss2T2p_9rI@Pv(1>58R)W>>7rwV3{%5@h4@Y}Gee6zSvM;>E zjXCrbBs?Cg#K3It#sY9TjK%V#`5Rm)vkM;yFX2(KuEj6Yr@-$T?`^R34c~9x zK=V7qW5|*Ndt+TXZlIwx(Hk0k9-AN?CA^_`1-YSTDuMlaJ@iaKuM=Y=WNOS=9Sd|t zx_1t6RqFz-k-Xoa_b%E`r^yjBNu$me|EyBi6M^ioiMUQ{9{sb8XYk zl&J6~*)ck6F3YI>YE6&P#&tKF3xU9W=w)?%h56u@K6#8Rw}MyByD&B#Q4g8!CBAgo zLE0iud1r+HbKK4bS)Y**e~ebC-f_V^dT393ms3;SVa1ra3d?$FN#+(x15p!h|0jIcW9n2jc0^>a6zicv~q;nfB zoOcAMyvA0!9a_~gO)um%$WwWYyvL0Y#^;Eq6Xe$dZS%rqkh2r|$w{JU;!13nXie&` z=U@}zzp<|Kn#!rSjV{*e0z<}n&N0<)78nr}@8CMb1#j+=8x*TlP1(3D^Slpmr`M5j zJHXTW0vWXSY;bbh~F~+rie1vED{4MixN%|t{@N%$>=%OZ;C46Ud(G+D)2E`gmRX>iK z_ms7bqnGSpU>#=s-4_oH8~KL3QktY_@tyd-dH^bqE+;>3v& z^h0-z2;o`mx{FO(Z2XS`Lc&ruxjZ1TNr++25guiN!-!QBSwk}B$GQ!qb33|$W?l1> zlDDnn8X2{~U&5$b+;6#~i}kS84cxe6OkfB~_r|pK3gM)A%Y?{rBw7ef7)VGeY6f&e znZ#u=d93#~gGdM_h)9uET$)9wxU8UZui`t0cw8?=zTv_U#xj-gpTwmwD#oBlr!Urn zqNJ;{31fF&pvy2AkuK|<3M$+%GmvpYyhst))DJ8@hq{Ik5+RMdDKStgaI^^w2e=}^ zUIOkwb~X{L*lne}00eFeqSmCS7kskaX&hDj0*Ca{s`WyG#xnJ{A zS(mQro}Qi_1T!o^fCLf?*sHcz?Uv&gzShE~5u}bjEjURmOFrniiPj0i}CA$s+#8;!G)$TgPR~RLD)R7TxwKxZ3F;1 z=gwP>4_+n^^?`S2D}e>iOCbMJ0~kz;@JlhkSDq_4k9sPWk7Bt%9$YT>#q>iNw!D|U>+NfQ1*s7F4bhcT%G!- z9&tC8FiZr4JFQWG1CnvWm9_MfK!!38D&lj6!0rglStbbtWy$U(_A7d<-obx_Pd;dO z;6jVxJ_H)71?S;n%JvN`;Pai4=riSw@vHG!cD&{fWnJ9MF>Eq}T(3Q}(<3ns-cgFP z1T8H}b?>As;Y-iZG`{p%opZQ|KK3&TZ~R3WLrdaFrXR@zeMOhsjeAuOjVZ5R>Y2EM z?z;y*6gHo=6OQpdq%Ohgo>e%!FmmzyT8-QfwoVJ&0YIE`f5&C@075O zWcBvF>MKS_UeeumKRVHI>t1flzORarp*X&M*>Y13kJ}#?!2d{UlqLQli8LF>Zr?kY z6msu!J38f1Z^hvlLra(Dx-sAz&(n*~N`YfzfS3=lu1*`v(br3HZSN&-!_VHXlg17Q zhf=p}%cB_yFt8}umMI(+FP?O1kP(sOvIJf1Drd^jTw>+?GFNQI#MP~4i~77<4DfD8 z4!?QRai>*!SSmS&>e=PH)yZygR43B*7|D1p{Gt1q!}`uSBOsqUec8B$_r+{9Z=c(# zn(@+HO?hXelR5H4Ze}cq>*FxAEO1af#pUMR3H%=VkKZ@jY-U&Nt70xT(Qmx+;`PSr zesDX&nxRh4>${6PKdPu)^J?%?Fx8iMx4FP*j|^v%~_ zru@HLefsGyi?RDQezh!<&KOgT_FvwBq?Oau4p zFTSdL$$3kSa}m;3B`v>f{e<;&@BGWvX4do1|FUPj&7*!1L*36>x_!6xpIgW<>m*3p zA)Y57@@t#(RRThs%$t{;F7f95>dpJMeE;E3RzLi^DSUd=#S`^=fADYi`v(O|JzxF$ zXMfUKNI_9Cn1MPCIRpcuK5h+alv#`w93}<|93dtx&*GWpy`WIXG{G}6Jg=|ypY7n@ zh#_FWj^K3Yh&7^)>E&WGH}AAN#TvgLJ$XUz+-psv>+v5jd@3U3Mx2=91i- z@IoNtv5IKHy}MLcX52YQ0lXOxEHhM#y^X zxA#sHyzoeI?El7^vV-UooTQKmSzFaPLGpcAl(N>@tw_|^oX{4oCdML0=8YyEjx!vj z?m2SaIzgP!-QKl)GDA7_7wuaIK zVbenI$08XQ?T`x{yhptGlRUXLn+_8Gjb(~cyqfR`wlm`s?DY%2C|FlbLUX!FF=UfF zw0ra+ZCV9b0xoMg_*~w%BNpxmXv%|M6X7(G_$GbOuHLk?^^8Ss& z3J&*&pR{*fe;+Daf4jF3m7XtS9v}nftauEAEO00>a0Z8W^k?xvb>BF|Ab88Qz`uMR zFaLFWV3~JKW@M1QR+rI&mx-6T<_~XQNS|ZE3g4rcRdSB;1Oi<4E|^T@0cxw^PF_&x`i&0U*Y7$L>KiLK48P(8C~H6&x=3-H6^th2iKXKCHmTL$NKuFT z^{+m~M|e{EVo}h|)TLbC#~>q2{5JNo74Xd`lI z2)XEe{X>(>=jBNC+6t5%B!$H{QWZ*6FXWGl1GiLC(8P5!g-M!XKG5 z`J(%4wZORI@rI=uIX8S)$Mw7y{H|e}`P9Umqi2x?90zf5IO4F{;LxZqa>e48lNV>J z7sszw4?h-*lH+XeWiO}ychV4CM3k$&yPX#FVM@t2!H~Q={H_=PE1i!^XHvG3qYh%> z>AY-td@F@WVElgG*;g3^48|py`{KZ`+GXSzI@=pBBE?_kl>14t6t6){vM+eoGivMV z(2=@M-WSN7=k=^ri=%ck^N1QVLFWy1ay})v!viQeb!4@X!ss-w^^R$u)3MC0m2+y> zyVG~8S6}`#z2dXg_rCwTtJ^ytRYw8e?J`~~rOHXCk;(IEz7maaQ65#0?c5>hmR}nd zPMZ5Ct|lAAitu_H3^!wTQsWh(!uQ0?)>o^^WEUm>U5?MoR*5e= z%wXom`sd_Aym{wttH?PZFRSZKitvk99lFr!%|<-H$$@s^iNWuhv86nd*Q1A^n0Yev z*PzcgG;aJaQcTKKYG!YH*WUK6Eg{j+J_&b^86COm3@zIcSxJK)tdElXkOv<0+ zp7Qvdd{|b@8kgZGG^@^tx$oqT6`-x!y;VNUlP%uxtx`&UQWlVQ_TJ5q6WbELoV<8Z zALRyp^P){B!Gr!~QTg2;|2VJz(~cf~(kfRx6|;Z>Ki|#p+B*=Z7Tu&1KD}Q))@W!u zUF-I}0`K>WlPhhMT%g9_taulvk-dEReD$kOe?GZOU%ia3qYZNCZSg<{MQ6xTvR5Pt zV{A9W;ILezPI7eO=5BDxv3ePyZna9a-g{E|EQdz4A>htlr-ZfAQ_SC4M$K1;ucF8J zpmJY)`RkmC9N=`RwbkPfKUsaq*l8@zQug0wFp}>_MV@RFqyF-nuhWTtR$P0V0unU4 zr3N}XTp}y(K6qNp(+A_o*p81*GqCUOKkk_E4|1xu0ispjoel-r&Ov>WSO4qZyjcC_ z)4!^%t<~@Soj=T=iSCl!uZm5Ii~7xg=zs?_`11AX>Wk;!l!y3rJkWsu%h%-u7kyB0 z)e;2j)rh@9CV{h)Pgj5SXaCpgm%sSB;Jhr1ET?Z%(7!8B(q&;RGr>h%b8K0(g1FC1 zcKp+y{@Db{AVJC4@&oQCrd6*%UOYhPGDGX5Z2oOuz9OxlHGQn zwA{A6_A;X(>x_jt>^-#DXOz5|*S#G_5wS_oG9CtRzt7fmkc;M zv2tT_X#iHQS}xnE?~YaVg9^bYglGEQSChODgYy{GCIu5QWrx7G$}t-d67~$cB_-5H z)slc|qIUYqc?K)#w%M}v#d!v6#+Q^p6B^XyaF(~<#){B90*NAFl0QnoPe@jD?uSwc zA3WnYMb1|NvxLH=1L(Wmq%7_#9~dc<6UGqUygvk>uIi+aj+ZlmL=Y1GXqlp4^#*xR zt*tQ}E9agG&-jiqp~~Ky)q`-$qvcZXc&B^D)66hl6s*PgARs6`3`}FH52LK2#q04^ z|Ik7c%y>s#4CT2M|jfARF|-Y9?Y*E z58O3i!ZtzUUn!9&Cg>89fI!CK(XBk3a00*X9Z!zrMVXo1)f85fXYEUMcGmd0cpMq; z!j-~M&{t{yrjy}!j;2;-VC-#;c5$>!Su{Jhy{88BxzMiR8G5D=&R8@IwTu6kF*nA& z*Zc5lyccxUZ{t|Mw7VF56+*1W76_UfN{XgyXLw_{;;-=yE-;M8)hp`iJ%*ZgRWpru zb8&8<5q!NgVeH`jeRy3Cr0Bktn)+@Rpf;sV+mU=943z~lrEkVB#Ysze!rU@*Pjw1i znz^d5K|Et_3iP^`JWoyy%>=~Jn}V88Uaj`-6>jiRahB~OK5GNkaR(GY17+~elfnra z?~|`;JBP>GNh$T7Czn!IPq$l{d|yt^6fIdCHmYZKTZWK1DKipa)R5}>Iqe3Yke=1~ z^GO4AapGhp2qBk~Q6?{q*I`PdK5+;M2sf83DckUZdAGSBFaG99X=0*Bc@1~g3;8Md zo2~$78}((qer)ds`)MJkDL7s9}Pkv6%(R^iRj>4D0n77mX*Q`+^^#@IqE|A>aX>PfbzIIe!&I;d_$$>m`%>7A9 zG9BnVM_?}&&USBgEGK5L-oN)Cxzj2_Ksf@J;(R+#KP^(qABJV3KgVwGHHYtq^ZV`o z6|r*A;Q^bax>_$h=%U@V!hW8Wp#RygK3)Clmp?BQ=w*0pY#L_{=c^7cx@ff_l_Zwo zJSk+WKHY0WjZGFpZ-hm0;0^BZX}BbhtrnQq?mxO)dZ9<@te-TWuCKm+{mszbVR=5^ zl@s=;RlB#>2f93d{CgQe4`&79G|&B+lvlNZAJ)UoVJiT?`Rw!6OL{Z;moY+bz&rOI zJy|_{`u%7*!@n_j`Jxq#Z@*go&hP(z#`W7aP26f#<;_GSY-Y@V`_;46H_yLn$MgGe z+KO4Y+by)pT>s@)wUN>F%{R~Mdt=&I8W)BE(YctSLjpJdEyA+#Y0_BY;5=cqy!i5u zSD*dt&sV?c5P`1`g{Zt-y(k~q%T6>o&2=k5Ii>Y|Tdz-3W}WyT78ya`Dx`;yyW38V z&D}RG-@INO+$)}}bU(LW72lWj^Wf1=d7Wx3rZKp8Q=Gqh`SEJ|&a>5eJ8F(EUaju@ z*M$dPMm)?@Kd9kivKjQGZ8s>)T8W z)x_%np#+EtT>eFuFq!enVX6oo6B)7X;yaDkMw7?ulV=HF=Y4l4;-7;Erd$S9;}#`1 zk@S9VEGlAfOk0%gCN6laO-po_3HLq?E^dUN=O!tzuP$3IY{g*GCiMMELhU?JGhx{= z-1D_2OTtoE(7cmCM;l?hduXTej>(Vm)*KfHN#R`-XkjpmkwsuE-X0#GCh~S-WAHIb z;UuQKxyD1@cN6}^9H*2{9=XPLR0JLGf>Jb4gWc~*^--Rd34&ln6KD#(CPex$xlFxm zFe_q`pmb2Om)kddz=&c93r7gHV40uZV<0e!7bVh!GjCRw@>`{SA7!o&#`?PIscSI? zdLGy&!wGw+>BV?x{ERPN(_U?%CrZh(s-UjxvG%ue!?*s{@~k50Ryc5j!HG9bMl)G) zB%}~fxCwLb8h7-!Os3&TJ(Zt{9=Wfms&JiRq3wpG z>>X|8ovppK<_WYUPvWIqnj_1jvlKiy+f7m2O&Qzg$*<0{+K{iYOwf75(eA@!&N5~* z2PE^ha*&}VI>h>x*#GL&k7&fa1AptmRtu}mI}SZ~kS_45dyQF&w{WJN9DTy0?&M5U z@_zB&j|;m!YLD6JM&5s^Swj6)D+2Zkl7~*pYM^o=cCuPg1op%1-LWr$;gNDTd*13} zFw6=BUhlmo)YTeqe#ab2J4$V@8r4=yo2_2JbH9z_4xLOtGmrfZ283Q8u%~H<>br=`?^>?S5Yu4qr6G%uSVjlYw*g>bv$d{EO8;FIT8l zi;Kqa@zba6K7Ls^(KoB(LZ1#_yjy)+=+jB3r5&`dfphgJ{&C=iLkoWK%U@0C*^FKE zL&t&}DT6Mf|Lbrd^b(YF)e|54b8~&y^2)Pc{n_f9VvSyWb+!7c1o+1hgktgaPTpc0!2+ zV%39(XH;R{l+=jd60p2!6f{C!4%nR?6@nX6M}!C+EWIn1ZdL>OJg2uH z??47WF*(AqaS#rwHU0x>byxogB%i~ljk19}h6}?9Qn&K(Z07YjY+3A9$L*b7v~s7<%ILtcwB5_$R-c>X>YoYol4rxlw6zW*C@gL5 zq(s?y1SAu-cj5KxHi$#vxk$Ut06%LBJ|i?wkqLc3YRttT>))h0NH|#0IEkiD+KN94 zM@1KfnYZ8m0P)=VAN@D^%;yB&Bx!F<7y#d5Qq^Tl`rZ%vbKQ7mLwL{d4TUjS z8qk5Mc9v8yXlWe22gKK2u@Oglml< zH9TGI^}E}%D(EybUiT%wR@)Q=%G)w&%!D~G4&=jQCU>xfirO`)EXoj>qx?=270Xj;3$LB3SxIIrV`$4wRL2%GeeIzE28$QK3jCqBX$KtO zg5U1<-g@$ZIOPC_p_kJ{%tuJT#w_Z0w?t1`TUv1}*h4 z>hhYBXQ`NtS*zc@KOq*w8w)KNRbdKulFe3cI2qKt(Pmj48QBpmxa36c!_Ud(Hqs_z z&I*&*E_`D6s+NUcZWNw#x4fJnIjj8Po5CzU+-dZ~2u7eCs_dv1QKZxS^y^vByD z-uF}J_t#T+3T2R@hXG8cI+g0Glyqw;^6yhl_nNzQ`xRogb6m2L+JC$GGW><5yt%0} zxp(sZTh*27trgzA_6{)CDzMCBLFxI~Bm@y^5zLLx=E$rDv+>w$UNDzgx!cNXeY}(M z6+Y1=2g7=E`enwzaec7*yxF{!tFiE-Qmzzk!kKc`+_PW&cX3nWz^a?wvr65{ObY2< zeV7%8?lGl*xKeM9S`^Irr6{|`3i_x5`!c%+jX^*b7UZ1bh6Nsj*`W=_X zC&?fRbU5E>1N8^3%zXLm%Ypr>81%h^a(h;%G0+LL#rSdXiTMp5?Ist^@tmCxiyZjm z(ZkilLK+XE$NTAbyM^J1!-v;rU;euKKVN@#kORqV4#lJA&sN_)`+4C)KMgJ>^Y-djfAwjK|Eu2FUVZlIZwjsYW_9?o)LJR~ zPYb8ISLpdx$65=udwcp_#&vDPm*z6Y$2i-Uv>*GyCeWZvXbGToZDZyE397qe7$SVwG%;q`o9tk-cNF$5E#$O=QWhXFzHWfhBcq;MV<|8$kG z+DX|zKK*QUw|Ra$ByDZ{WOeJ+t*o^MpuySABmU?~2Ld#?3{jM)_}Q~BR(BtMWW2g= z;9TH8Zxijkr$1h8{<;M6>pa;pbqHml85By2H==_$U@;Edm}6b*KLLFe^5=2VjOTqP zT$pGoT~W$a!Y8WPK1lrShTJ}5AfY}o-fZnfD2#xVTmpGB#7SQ(ChXHw6(G%jKz|u z9*=4GpM>f96;23=2`lJbeS|a8VZE#diN0X~jL?t{A&$U_yC|Px$}u8_2yMOBzxsr^ z5Zx$`>M)r$i;ptUxSo~S0d7r(lmM}yHm8}m<~UCMt{*o1X%}S1Iu@P(<+BsIqrveq zrqzyDFG1O01lzn6{k75kEP-;`WK;Q3e0p|%l)yjlHv@64SXp}tMiFXk$H1z&v+)Y) zk8ukhJv&Y)9_KMQDZZcaG8_34nv9%L)I$WCr})Bw@$?yv30(qKa$|7Asfz%rQ7G?k zD+q23jTxKZLxRib!5e5KlsXgcXjoIR;V%$7?)z_f9wor{EC7iMVFJ3xQ+s zT>XWEG3us$$`lyg@0DN}#d+#67Ts^Gy-nd!2yO5l3W1tU`Cinw>$*qr>djHE(Xe;* zv2t)Y@vJFU^*nk>Kv9^glVZ_;r9j}!t`IP7Hi^51M<%QW44#J%Mm>&Y^x7bf)=h4D zG@%f~lQB;3Pt11Tr&r*uuNOHX3>sx~Idy5K>%jp!Y*ePsg&#bts>%YD2W;+(B?d!| zhrvzHjE@a!a5DYv+4i-O1Gvf55itzz$td`)cAjqrfimt)CbGtx(MLW>aBcz_r$Wys zr}5Ck!soMERcFyLPd|Y{ZjG|sduV(V@#ZkH^e=+L;hPFAc86;`7We zbPUJ(W9}Ikf>&q@{0IE3;9B9q8{jea3=0Q3kaeK}fYrAEMxp6(^c>#1+#gy+_wiq^ zixQb@>ZtES8D8pIps(q^s_PS@e2QtobV-&x&R>!hbvT+x~NW4mys(@s^Ijqn*tHg}&FSzt1g-_+DLVYn?lyl!*y zmH7X)=e-;3d2votMmO7kNKqamz4&Q?M^^C|7DpZ8^X|1h5Yb2Ux^qxoUZ;$L?PO7g z9-mN$0pD>hsvFf6KcvYPjMLG!AyUdt+|MjzQki0{8=Kc~>NJ_C@@_5Pd z-&f{Ujtg_te)}%)7y6-(@cxb*3om$&ycA=<&dG;XgKcRyy!FhcTh61Q#R$qhbTH1Y z-an7#mlOKX8;Nm4gZ&Nm5IObU$YL_nkE~jFL&)qnMj34*&3dg@8M_SNk+;#X@$7~0 zMiB;Qx*O+bz^vCVG5MU2+ih&!zmr2YgY2t9Rc=<6tE_P8^)gkJW3v#%TV?YxUz|o8 z)v!cSL`-+u6#SDg1+_WA11fBMso?2Z06cUMn7 z{$61@ZO(0*jO?I2`c#HME&mh%j8Hf<887p)L8L@l=sFU$M<1TERP;`WU`ab+T4pb z>{}74WDl{h?+HANs23mhz`&T#<1b43ft2v`mduh`q9RzPflm zN<8={Ggj<(qhsjr&`|Itx-)=^PH8jq+rqBCYxRJ9sf*F{5DHRQhW6(FzW(;&1^@V? zQi4(FopIa%eD>L|Qto+|%21GE|0c%Zu?Gl-Cnz9PT;meZk!92SGs!g623tqPQBmHu z5?Vax5Wlfe!c*D1Lg=kL6(4+(x1ldDL-eP={>|zqzxTKEUT4vT9&}s(I zB;`J8Wnhl^Jxe&591Kk`o0a&ogNqO%h-f(lm;6 zZxF)e-D(_z4_Ye$M?8~4ga*$E*;KftsXq0!BMI~E#2`lQhBhg!DD{FzDUZ)2qRxAV zMS)n)csLCno6;Djm&KJ5aFg~m05OY=SMQsw3G1bO zlbG2oNTO{M_IL%GSSR`PDE$i_FaFEF|6l#X)oO_a2)7ic<5oT9_|*UzB?Zl)zxk=! z?pwj|osg##*oQH#GvMaJQDh04(Gbog3!&W| zk9x>9`53dQWNB<@URz_>;GNp>y!k#91TSKuT-6sJJ;Ou#Al4Q^O_)xP^r_|=4u)jB zN}&j|WCR0=Ldp;%6ZC255T1KSJIm_aQijJ=Ta21mppTWSj8p1Hd8~cyif!lVKvcTR z+piv=&MHjRi~-#D-kAUOaoNvMe-SH^j|Qz`P(FLXJUU8?b{Q|cQFEX~W5a_u%kGs8 zn(*2RL{rxFaW^^ zzK(H5xv%2s_0mVZo7n7daT9I@Zt_e&@T2z`>h4S3XbkjuxxXkIjCLzp^=it2$@3X6 zWqa;>H5MvLnKAi<)B3N+(>HwDz*5SM$E-R)_4T_Iy8(S*gN7=Yo60V7s9`Y{%4$y^ zf|nwp+rn8U^ry#+&FkSETIxnc!UG%zm6Z~>Mtj;KO zNA^VF)3!c(Zmu(Dff-FJ3(m=Ilwvzs#k|ushE-!xv8ijJts2BOLw^fQaAqGG`0+G( z7l$q~y?Pw7{E485H1Z`sc#fuK-*=bea zPOJLWA!H#Xc(-HicbkMc^LVQ%K{;|+2p&MQmSrk@#fQq`UUxpt)V$Ch`is7YUeog! zbmln@4Mxv)eHq6Bc^`Za3txHo_~EPyINa$Vr54c|*OF#N1 z2lv}|uSYI1*f|#NmNNdVm8@N{?a}T*ICJcKz%_2^cj={X-@P0fLFY(HkzpU*d9`^S#Ad=PQTxmcLZWWHiP_4XBCoAhe=ycz^;q*n}L$5j% zz>1wV(1~~Nqz~TB5TNfc$X}G&_U)S&jYBC=|G^*qw|@kAE&}oY?J~G%Q~D<-KCsBd zZiy%MU5Y>xAdAxSm~**b785oKLhqRv5gD%)sb|2Eqx;8pOLu=QMx@MK6!#suXILyk z)MnfD7$MQtriz!VmtXxw?>C?=-Cd+)Thg*5(xGc}k_hGc+seIfS!9V3UKkzG%lv`S zX0m9|WnmbP49()Y{}k;=f8ZMeZ9oZi@sO14we5sc3jghU4Pco9HXFgsmSK~ht1_ka zx$^09$-j{8r##RWRE2X~ z9NRcnYv+ZWWyAT?;xjCPkkFZe^P#&2l{4Y)=L7`U_Eqz>oQBp>s7aV-8;9H#`yT3|?xlJB@SW(ioySG=UaCFGwBQ6k7&Z!T zF-|tXaxyF_cbki=lo7PGnRhPV@Hj966SLr#d#JCT(>gj>!{i#b;}El>BiTqcqn0odS7(^! z*L(pUv@vqONhgSBHM9=YgJtZ|-0)rqB}?Z0a6dGKIF-Go&Fe2%e)>A^_IVj6ju3oI4wCUHT z)`M^aZ?}uB7Y62-+bSBT+HMBa_dj{M`h(y7;p*cLo|L8X;p)NTk3*kC9Ve^LzxaIh z)4%wqtAFw*f3o`Gv-YL5F`V%fIMWA^ZFiQzetO1!p*u@_{>7YKMKK;df&7f!7z+$J zv~W-exibhRuYdG=QkdLfaGkUnd8ZKeosK15PhTTlzI^wlcUuVw|3@NS>ht#=eQ)() ziD~Y)!ttclhYy}U>6qY0t4ANFtY;X(?-+0C2k<9F_p#Nkcmqtb3>|g8$I)TVI^XOO4c2kg6oIpZYKg2_Tm9Y-J}G_v2djtS>L8totl+$t4)t~`4fJ(# zj9e04__BQpFUkt^zI3}$7z5xR{1L=O0{(O@OtXAm{x{|Wcubz`sM2D&1I)R^4hA#O zmVkI4#qk|=U)oK|qUwg&sv}%aqhcY&215AMjo^x}KI!m@^%3LGJl2to(-!3WAQ{_U>m4bY_6qVd7=Z zk&j`-1e%R)wpqBX`a|Yv-ha!|yv4AOD$gnZS3D{MFt8W?Ga2XQkmjjz9EKkyH32oA zsOldp5x3Vm_$e89gHscOVqTlGYZY`eCX`jg$qNY)!_>iIta&=U1Ai=M-Yc8ZD8#%- z@4F8Muo7PGO=H1|9){T(W1d%S92~T}zatt=#JWppSy}4i;Dhjx%F_|5LJq71@Tcex z1*t{dVL-iLzHP5G+^2M~0PVFH`)tT1nVkD?3%` zX=g)Q`G8~3aMkYo$dy;zxR`LkP4PpxJXOYxaGcmZ{UpHkRUiA#__2wKVE|^H?(xd5nW=JV6cxm3P!Yi~@^qYR3hpsUbLUtg%D`7~G@bHIqQk7PuNO zG&t9y9M5WyzJO=?RvopEwzR*rQC-nGx*RC4$5|r-cl{YU>hGf1g3)}pOfE}%%l|`H zm7B$-ZVAirPC8<+n@rRgz3P$n`sJxM>C7|Fr%&*=j0xC6`M@%YQH@vC^arow0sZ3u znAHfq0ANrLlXtJ{Q)_}$R z)K0I~Up$9T7ol%V!0mjEsB`EFZJ;kSq^$3rkv}RVgYb{l$sWyq9-qMnS7b04<|n!3 zvU&po#;oILwP}JFyjEmZLo1=ZFgW;KaPGau=n3Uh4nF%(9ktnCB|tbKF14qfq3_|h z-tI;HB{LU%fM`)3c-ufYZK<~L(g)A2Sh+lN!6Ue}+S6@YcdlzAQLd-WzY2 zdJSy<$O|g~07YkiZD4=~Ple0c8bztk;8YepQ5ME%sie7P;0bSFa(}V~K|t3wC&A#$ z_n^e@eS~Kp(7*R;u)p1Je6-1V;D*^|R@|;}02dU#c~-r~m?FQ`{6iV#^f6zG%Rk90 zy4^f-uv09wSoZwdht0{WR6l1dY|rfpzGuNd(O;+)$s+ za;%6UB1eY~Qe@UzVJ|*^34_cDG5zd~YvYRXApK;!LHo^(x4!Ich%3ejkadr^+b9%P z9SjJU>~vB{pSC*n`d$69;XimP8U%&zlx2vLzt#$`y@^tZA7(%t<{f`uDsfI38Q!4W z7yyjSM-$GZc}O|!yR1&<7acj={K{Lq(?L{owLEfpSV+-f{W~m-B_qTp z!HgD-z5Bx}0X9ZvWxO*y*@uR2bQtqJ&g$Ne zKIwSh4+=*reQ7v<-U`4;tNVu?CyZ|%Waxc#uS}P%oZZ^r`5*u1|2~7}`iuap2X{(o zdar~ydQMkZ4$y5MOL*E2E=fjg)<+wgg-$U#4;}WW{mvHH>(I1GU7!AOWzS2x+(vMt z<#)0or3YWOvURJ2+8*WQKOk@0`$7-@))B$s81H{qh9Db+FKieN{%<>m_&8Z1d`i^I zUIvGmBrq1@@p+`vfWCZN{vR^P^VG6JC+&Us;m5Na!UACsoV8&gO)Da>d9v(42;BzJ zYXUTBTB@%b3C)PY6=67m(=yVem6T_M+l z6QKk@a|hGa9@!)WLZ*o&@BQMrLU$8ZrR`w3p_hv!+F483nUx8B)1Jq0NYS*yc4ru4 zCeVw!d5l_aZG28o+Z=OwJI`ZvnN%UJ{UTTP1r%4h8{@B?w_MY1{L`cAI%_!1ZUsQK`B3|n8T!i0wkQ%)RZXEeOS+9p z_13vwGs)Md@HBOH+nD)(V9_7#*3LB3v)<_4+MWryyA@v)#9r$q!tS>?>^ntDJ1&1G zm9DX&2;;}O7DI9?WyVtX(pde#JFWP^7d(S?yi8uhKjA@NYc(3GY&b-_T%Qzs;Ys!% zEmySD4@hQwf{S3knba7`jD}Hqcr$C0yojP_-1QCHTS{IG3iTu-B6zDCRzHW1K@;4J zk(rQszle$oFR)Fl@1Pj|$%4gT0ogL?r{olDuKh7!2E_{w;ap62uNb26M}|>HC>ChF z2bDMOi{V(U-LeTXl*jgq0k$aACh*1q-hyY857zR)zKrRD+r{7+IQ6%eW+^(nlcRWq zx0zDo83T4gtAG+-4E^9q-CYc(G4OmDyr8dsugfu5yrVq^E1m-HLSsCs!m1b|UV!KR zb{TsL-gwrpajFiDkmFz_YdEkVr90C`jnCh4SOov%OZU8by+0tx+xXJ|{g-(FfZK=EQoCu6trY^RRbo|c4w^&c3cJ^AJxR%o?HU10RP z;1wt8Um=Wd3yA6dLGz$&L^ckt?9qFDwfgoq9Bdf~yD97?5elVAP*)}pqwTZ2zL%G!`riGZ z>94j5+Y#<_o?&7ikHZ*`FM3{`LXGWND)zIvgDh(t$;HMue{Vdr#OXgNM2F}9G<=`5 z5^zx)yBD___ddUrv?55Fm$w_w(+mN9KP&=bqpW-AV_6aIz{~EvE&f}Al#}*6j^X|^ zI*vB(6wb1nE^>bUdKBbWZOq=!>%19kml-(t9PK+N0KEz0#YdOn_P6`N>H|4A6oUSP zIW*5Tv@moYFTi1U$-ufh{Zwe2P@dGz&`-3<3qF+)5c=B~&Wg5uA!_J8(q)jsA1WSd zh%PkEU>K)fPw@9eebIn$=m?{K^`q6E7wttkY;VS=zx;Wm)&a6@vfXQiisZx%Cv>C6 z@nQQ2DF2Vz2Vj%%eoFi8a3_ZT4MX6oU#lDO1#0XESUx zeoi2E_Jz}#W7FpPWy7v{L&6w1Ck5->g%n>uVE~GX@|$<{eOD!AcLY6*~aB{hTm>&;p(>~zk#^p;5*8gG*cawMe2&I zZZ2N3QPTg~gA+Yb&74uNSJs}Lc>c;Bi{z?Q>1!$Kx0|ExKafr9?dnyTlx~$h>v3UI zHqO6E=RM5;e$&a-jExuHy@)Q`x7%vPUaJBQA*6p$c|`!?vzckpG#7a+!|89|UL-Oq3_nSWG2g;G z$-xAg*~M%U#@E)qUA^&kW+f_Ys#|@s@wA}3X5l2d~!?pg=~)znx|x8D-kMi?(7d#|ag*yZ7%FSjqj_B^*xHg3Yd(g(Kpuq(^qm zxX&l$WoVmI8Pt&mgHI+{v`Q5ZD z)MMtI8E?J3PF&;Rj8qegbQzvTQ;S0_?83EK3F=rSDBBKeeW9UT*x zv!e9pT+B}!EyyghQSv=}1Xq6@eRe~H2n^LsNF1KX6tVCvuy1) z%Q9)^F$UUN;>pY=wCP$5-{W4z>FCTrc5iSMJxtzIV^kj{Q=ZrZ2uMn?3LpocjW%$t z9n8$sGmwtt4}U`&>ekEZLhtSk{woQUrUUm_vptCR>RYgL-OO`64<@jAuR>FvrZs{1 zjV%zCwp2K+E$?(+|4_R)nJLq^Nk@auYZvWV z7km^Ss$Ls{-qkVdg)6&oU=* z9mZG5>kYh{XJET!h*Co5`b}GGd?kxVm2<*lW470%e*e}(q!9j(gQ-)Nl7BYJ8Qi0^ zs(0b)R+^u)vxMUZjPdJsg;=Fn{% zGi@opz!7Y{e8Y>zbeJTJO*~5Q3?1UN_`(t%ZJxeJ;L(G$`g^OVA3hx#x^;KK?|0?>6ukc8yRTP&{J;Oh)t~>_KkLM>b__rK zVRaWzlsjCzaPau?(`Y!FPUFQVbht%u;+HxCuR5B{nNW#7`x?-}z_UTNu4aes`|`hE zmd^0O(=5)cO)*tTx_HIH=`#y(pFZnC9G#(eVbLVS6oiVXJ30o-jo)J zb+sE$9>YPOkFr<`G$WXT>tgk7)`E?Xn^_tkW?AqW9&`-xda=#7qty)N;g1i(&x6Mw z0=d)kvL+g1mV`qo=p~L!e%tbj`E5HL$x;J(Jv-dfx$Y&)j%$Cdjl5@;&$3MR)2|kd zcvu0sAV9zW>UDE(DRZ|qvj;5!eDKjntLNX8=dmRI zNK{akfL=*cCpIW*v{TskwSofQe*1Y!X+%=f6pVX!+p$z~Y{A>s(NB-|S6@dYkM@sN z-{q#DSbX*D>&&khrtv30Y>O6zw^1dN41Xr;h%`dnDCloL!Z<1}g=_u&QAlb1&?MQ& z^cy_$vrO(bEF2UVw%tzGtK2u?woWEo^*P2g`J2?%QkE$b2u!fTZn6NkXaWPiZYZAs_O8}?y%V!xR z;cC_!;Km*@Y!P?a8|Y;86*GiwDY=G7MT(J#P&yeA%t|UeIhfs{7SRaqvCN zM|&nE3_prJ=^i}eI;^NZ&PIv^vdPXy7`Sv#aI2l@1o}-Xmr)ucf)Vo5-x`emygQQ` z7y@pNYc%c+JQvc?L}iQ_cgq+P1Xvfi2Y>wq@3c`L=5u2>Wxy1k^lc^*?^f3=8#Hd< znM+lbfq&plS+7+$qhM$SzC8?%DyYf?6J-O1MgrQHi{f+5GbSx%e+wViUi2njf^Lmp zxDi3QK->hBF*P^|m!>)sk}k{ySo`kv^>17nhZ`_eR39-tr7Ap*0et<2=f(+7R^a-l zPc=TqT`o42pyi5QPX>LTdKU=6Oz}e>`ehzpVrlwY-#lj+;~V8B-Bo>=v1HhHZ-F0{ zs;3|8)k~XrX}Jdnv#eG<$}Cr(z*3o-sqMzFHrVa$p%FueyvEm*>DS^}$j1XKWW7`2TMzJL<$caEez=noB#_Jp6bDhsn1&h$Om@ zgnm)X4aYKGkY!A(ea7V?O?MR2WUYHrOKGDQlZE=yXEz%2`8=a!Np$`H{LO>^dbPTa z1+ZMW7($hucb56U2|yGO{7JEEuq%Mh!LS&;zAs7&qi*3<%A11*EH^F5;K9EDLy-cM zGOn}a_0g--)o#0i&)O9%=i(4M`ebAs7Fo*eTrA{H>4XmMq!mZ(y9cs;?B7#MufKcgwy7*_SP~;$gXbZw|HdVuaROBHw7|<3X;;%M_klcO{xH0KY1lWoy&2b$nR-k4*q)jn`%4WS8Zf zVik?e@?r`<7q9q-(|0*JDtDf)aZy-;)JOCRINpf0l?tRQw0r;ET9x zoPFf*N%XLlt9d7RYYu0$pdE4#d!YjtJ;~f4ZU((;3y&-`dEK^x=-#-v44QEZ59X*! z>3#M1yv#S2OwBuF3H`yP#Xuc?gfVl(ER}oMHOm~;u<-jL54?XP_a~+(mw6Smgi=Kr zLiC^e_dDe+Ik8{NmXHw6HFKdkU#NxC4WAZF{|A5j_gfOUm5aOB=$<_EoK%SUtM^cx3i|O0@6~^5LY7FR#BXw{5f} zE=`-aatPz4?VOo%@kT_`zCP?U9=F<%Xsk~=()Vp~UhCA~zw9?g3=>NePOaOEua?w-3$71#Zx^KB;R5Z1w+sHTaYhgLV>u}| zL%4u{n`xhZV54F@)c{WTK=^+A!3RUr$LX|@OgYV8RX=`D|IJ9U;f3KvcbW}ueJ2w* zjwaqZ{masonCmsm>y3kB@98x^$~o{K{N2BEV+@3?xf;Cxk1NmuE8lC#{NX3x@5tbn zs}BP5WnPD4Ik~c)QXlJNg@*Si8%WUKtj3^UPmxB**5+D(Tmod9T_I*~grycbRA3{d z9z9!aw5#*zC8hD>I^lGbv#j!04Rl`dtfA1CrM|WL)GpgR>pa`xA%+PFF@n_wLcl$1 zZns-YdHCRA$z<;rbS^;g{pv4&_Rm*WuM;Ztf?(Ono5&DheS;IB&w6EpAkD&7NsO(9 zoBVUsq+s=nmm+>`3WU7O!`N%{eFQ-HtPOfoC^k zgfY9X!jbxUC`>3*e*?UehsU}hp|9T~tb0yaP2%bbZkP<`UVW%dmLkOu)3TsXBL4AK zj25&Ab|-SR7rp0=1)+C9ZP6TD8km@!#h;bS z*s#F@^@evvq6T|ZK zG};$`0|8(%X{<@NlTxOAb=Q|nZ!eaxi{%E%H z6do}MP8_=K-Sl-P*xI24>z@;$)-(R5k32)yaFc7jIB^Q2EP&s*)Sva_h5n(3op=wv zv^mCR^WvE{24Iy;AI{>>uIWdmjfY%> z`Yh%Joq-08M~=hmb+FN(T=x7LPIb)cQhymHD2CxOPL95XwZZ$~zPguHkA>c>!boaQ z4&5k0)8y1|%+bZd5B-_X!%Lo{X_dnXFYthx+#I^@OAqTpGHr;Zcj2fWRnV{TUgT+o zf`eRK6sOv)+VMK}m&$HOsy#w>gHRdk#TG6igZ~iX$F74i{ zR%M!Fw#1*7y#hSyP?B7gI_UMYlN5MQv`vFBvm$uG-ny7*k$P3eqOPPy2h) zJh9Q37@sY)9bN7W{qf%OWbMp747e)4-zi|^f^<|LfAqZ^y|+`UrHI^_!236)y7}za zf3^DKlz;g)KYsd$tKa+me=FsCe|FQpZG3m5IeBrVXdC6fddCSAgV=eDCT#Y5_~hvv z5-^T{I?jo+mVvew-K^)8C(o=_yveJ6oN;S|>UP?;gR5*Hz|*0uSe#aeI!3x)k0VEf z#d@@{)lTOJcfS{|n)i$SqFk@V*ZS!MFF1Jp{Q2su-~77hnGAvO`SgP}?sT&3dYN#} z-<+h+W^_b%Pk#KP;hp#Cx36Ek%vkNjv{nE{kAcHlNii)FMo4BZun5sy4>?&C49HvHJTzdAj=O!zVWe05O4N{Q7K2 zM|50&Mi`#nl;IWLTm9~je{c2b#p~H}ELYR+PJ#hq5+1Qt$cqRf_PxCKv#cJnA=hpO zLfa+mAUw-jV+jMZcy7Q(0u!woT z zaBb2V#_m+EZ9*sfN!1|5(YvhRlXmv*6tcBn%A}o;a3CNrSVGb;4jjQ3Uz~hyCmDYf z8_$pPVr^cv1kAft*w1AlJ;Dy`u!?$OtYz2C*(F9zP#55N>Oc$m;Of`AP6)Gk!EPW{ zu|U!#C!{`xqnMfEf0j3lB5z4m=#Rk~p*$0Ck|4UB$%tZLjKYh_(=QKI$gZ6}lN#Zr z;!}o#OF@<%Ab`US(X4pi;lFz~iajL}dI^2kvh0p_z~U`!FL1#PIJ7%TXSGh7{UyA> zUQJWSC@ejk8njEn@+F!PuC@fmN7E{pdI)!YNPyJ8*;rFy$~!^GI0h3O=-UlUi=wAP z)l>U=2#oO>ri{1TMYzZ1SPMq)1{>Lc74Poe7eMgXqbm&MRf2hO6muFwDf-#1Jdj*ZQOW-iPC@c!CwS;JNW2AiH?c zrbP8fpTWrVj2E&|o$?{v&{mJ$b%h9H>lfdkNe^Z;p+tdTwcIU)q9L8J6YNikv+Cf5 zWwoWE=%BhL?F+i@eFhXb^p9+JEp+Gp0+&g0Ru16MYt;je*}3b5+8IYf_t2Jh%olU@ zccJMim~y0tUBPDscxbTl@JFd8ztD_#DC9RV%t~JEtS3*=^%!0=F5xA-O&^jMR(24o z4JqhSAhSBOgD>^JZ`BWu%3goB(t>X0_5QOmZe(T!y!-h6My^bVNA%%hC=Gq~+$tH^ zs&aw7_t1n;Ra`)^*MNGoFFjC8ZL9NlDc7Z`+t1mhuHzJ!iqBkp*d~IX{@HRCy2WFd!tlD>jiyp zrraIcfm*)B8Pnjl0QGnDsKx<+TVN9ShRUD*?yE^an$^c zFUO1BcPBle`x(>fZN#RMy)sJdz zWdoaXBto68*9=K?ItFWX8Yl0La^CwsqhXi7W1vTK*9=KC51ozt@Ai~&Z*ZEP(0nmG zR+|%Agy(x`#i^|-9mo)ECF^X&dw7?^zqxuEEJxt7VhJA`85DO`@8}6Z zvY)QG-7(x-jz;d@M(KCxOZe}yO&@3Ji-+j~uSy9-&g|UjwATa9#$-h_brswrf5Plm zVQL?wJCX&br>zL(bKMZawwvRvIqdYXam=uMo6bYV?d_MIsGRz5UKZm1ZO8g%keK_v z${=`DSiP{rGdqMce(jR}dwWGP>l zS3fV4j=O*JdeoWOKm74eRxh*CUcY=3&P)gkp`FLf_ih_30xV$`kr9!$VzZF<`;F~F z^z}qh6XW^mF9of)0+IsW^4!Z`#W;ET^In|3|6ct~8Lz`q)EtIz3~={;lU6@RCokKH z(IDhGcw0P{a#99}-QTOtTl)`Izw_fCl~U+n_4?@f>i(hK!@ZdlJGj3eaoA4Ypz?l% zY(&CpJiKa0PzZ00tG=WRcS6hBP68vM!c3Gc!eW%khzBzWQ855^DZL$Gje{#kkR!0r z&4TrPgnoUFg%YTS?s_sS-i)G)_@yUUyqOrX=Xn5(3_c!#5#FTNfeF?)yhz(*RE>)X z#qQj(f_rmO=$zh0V1(`_%kd~C>=P#utID-s`)f&=|*wIjQV zd>Ms?w?z)r?UeQt%oA_!@4wCB7Z#Fdv=^62CI+gUbL;EgtQei2=Se$hj5bQ=5CdA} zsRu*+;_2lHv*O@b$hGpf!INEX7fpI6hbG><+O5#qZD~Bl^jBZJ zOF1&8(O>k_%+<0oJS~$lWsJ8SGsE5g^v(b9m#fuvEI_r42d1)~PlyQbSvaDk8P{1k z3Z>)4?vCN*;dH4FLkGZo_ur!YEkc{8s0I`qScmp1uPq9RRSjx_@uDCQUM3-Rpab=z zH*{gSS$}zwz^DIHZ=*6eo;%eAB#*l9(=vGglMoGiyq!%11G_rFKYy-iqBdtj36zmN zp7%RO(R6c}yykZGj|X?DV(OvnE=IAD3bN@t1*~yI-!YQ5N0A1r@3VYEJm`&1U!0}U9!>8r2L6LDZ_RvsS8Ot0Yi(q9RMoq}o{1kDYI;lX$;*%apCysNL2XyeEsw1m4|8#9g1 zyBu<>POh>8_wJ+HgBx?MLwTH-q_3}1O2BC^+BsV5J&re?>q!Zhl8nX|XG^%e-(1^# zj>hp499&)sizMrMt`E^VT2BA3Op1Ja6Y%dG2Mp)tlg8mZIWFV~_t9^`bk;mQVV22O zigKcCwN;qVS-T?F*GgfsRl1qrykEK&*(P2!mhZp$Iw_8Rqpi?`C*bNbd2dDkeRbX6 zYtL9y0~vDJ_`WR#i@ExM=Ty5%r<2R}6>#0e9%dPuOk z7^IYNG)Qjag9quub|;BFKD9DHUUqzRPTgCD&)m5qI^cZu{Mnc3d2c%1>)Go0m(SZ@ z@q^XogH{h3HDT0JMmg@5EaqUgLtMM&EtPjipsZD=uUc81c|n z4(iJe5E}k#RbV5B?Pgx`y%w|_A-RgaHBH#pp9{0owUE?u+XuyR@d%7h<~Ro3nFo{=^2y!na&`J$9nb82t=xKY^j z<7B`e{_e-CyX8HVmFCMr=zsR<=c}K8`l*1Np@aVa|9?T|(>7*&{Da@g0ui77n}~Za z!=Oz;y!#`_LcR$&AXS)un*YPZhY z93eMl8-05J-h=&?ShO7>*=taUjXYO#7I`r7d^mTSa%h9q#$LnRz%%GK3vF57YB|W+ z=Y^|aUj5bXB#`f~MB)_n@xCWyFzI^Or?612*Gh>qJ#JZbz0ex>W)oJz6Ok@ge`8WO zo6SEd8rlebLvSN8ny^gbEb(Y8m?@u>fIdfZqvZDe!fJqe^9p%}fxOq-3la0sCOj%R z0&&JE2&Z2$-df9kgR90l?8J<{-+(dDj*dDNWE*iN^a4WcpA-y7r3*O8gtpoD!%n`q z-BR%0dNFk6X~X0U4<5alm`d$q>9~jCuF12u*E=Oj zzYq?SvGFol&cts5XaYgI5bquXcgQ4YcGodK;c zjeTS2e=t$3taecr;D6Mq+VM#lFzB0i`~@4K>Av(^#?)uY#iv%}G%)+4Z%x_>T4j`V z&+;jus=-QBH~K+W;ZI%NUkng^Z(@js5XOXCL|rbcbCcG_jMuv+w^6Et?*`7J5^}RC5scLbhVkA8@1kUBIk-KmehN_qQfk6wbgvBjP&yfA!;=8Jet!}K z=!bs8XN^oaL`96(gl+{FURiKrWl1sr;fH0M$-g-rtoOY`_z%XqZ$j4B*^A=kE;w>; zsR@jflKK%|derT3pArThsH1SB31!d%MNZG>d7t2fVz7+gbzjhJbgT?mbZ^oP)dod% z8FTapx7uq=^r!landdk4r9F0E9mXWy3dk`Oz#BzTQt_4Y!^-%oi>x2M(01?Y%+OnR z(Q_hc`gMbUZTswko<-3ww63z+9hi-2^)9dlV;!4)6uqE2Z+mxe75z-J(=NVF*8cbt^xayw1)ByabQldPI^ z@qzJlF}{vpc1qm46z_*AVZppM-hD^{Q!?+{U@Fte{=@cz-EK}v);H2QR)qYb9#^o| zH@P9tnjgWkU5F;9n7H?|j?m@pm~pPO4YY5I^WWcv3l6Gcl;b7gLhqB!dUVw)!Bw8n z&Jbvx?)0ZtE$$S;>%g9!i*Ke6aBg-I=C)BT>Nh+-U1xbi*Q-}Z(Qf${8GGO4NaT=u zef4Iw`{J8o&-2XZ9X0pcY<%9a!6Ro2DJ4fax{T|D>msU+;`=u{ttka+EAKJheN;mD z{k;CS@7$kpWk?y@)0EFsPP4!Dy^kSFIQvw&K3g#_;Ca^W^3?Up*VA^8H&+T7}DTxLxExtM?9!!b|2| ziD>RVe$;qHXN~`#+G{nSvD(d`Jx@0{YX#u2 zv0bGvh;qSY@7t_<{;C|ZFTM^xg+JYXn4VZp(BL71g#sFrM;W~KIgG zco^-686cM@Z8TmhEzsj1te!r}>mOf{v6Awf9DUQCiC-4_%=w*e();B8ezf~LAKh8~ zd;j(ylz!>{I9Ff4e7Aac{C4$MpFdmuqCG&Vq&3(8tL$IrdelaaAN=44F<7322ISW5 z7ps?dI%7G+!RN$NWwn_A5$F;xUTc)#AvS}Z87wzp2%Q-MvmiXmI2w{pRa0vxHenay8|QDE@pirTT}z)6sCZ^7!A& z!;x1(PEN4N<8^Rwr_Cz~$97Jiqgf5(3j}~UHhM0l(q0cGM8yOYL2G#BpJO;RWd4}zOaCG7*$dx}A!{6%0X75Dy zS=a%jzm>=MTZQeYH+mh0)5lqQOflZRW_F4ZF9X|6Ta<~baHY=|xpOF3G6bD4G!hIK zc|atmy}kdvydnqnmeP`v)(LBqZKL;jUkEiPVq6t=wVv=G?k(**DQ;FQYJ&jZ*enzx zx|taL4i2azDFO+?LUY6>+7vFiZtO$0waM9ALvpfO?Oh1nd z<+}@d~}*%$#?F>qveKgxV&n=Uw$Tc1!3I8b+U-4$*33&!f+&;nLUl zZT<^8t2-T}bCq*sues!)cxpKj&CO&1$C9i!7b!bi_wu-t|JvZ`PhsNBdH<#{%UN($ z{Q;9_GB0{zx5tG!Ooo!$Ae{}#C#b(cB>bS z<8>R+C$u-EcYF7+Jke!?D|5_dr!id>3bb8F&_zl4$^X4PZ}(c!Z{2?Nreq>_QuN-Q zv_GIWc+vMd!HdV<{+V0l06$81u!1M#^IfsxyX|vZ%ZZ0aC>x_B;?2fCOoqi7*yas1 zowKp;?^>-;o^pAolc63ycrcEkql^OCfsV>pGxFknbk^Lz*`A2)Tjr>K@3-+cWSqQy zInf7~TZi#+s|Yy}PczP%G)BxcC3ion#21aDu{Fo~Og9)=Zj3Q+um)VT1m5@))b%l} zk0H6p$?8Dvy)QCj2Flld|N3g4uU`%C@SyuM4>d0M$snSsc^)2k?CBb=ae!R&#LOW* z*UUskROe;J*E#xcZoXBR%^1qvKPu+`j!{;K_rs33b|6_;tO_yX&Z7yoP6P7xqnd z9G97jEV|RlZ|6A};SRkDPl6NuyMOCmA##<<=s#@lz~!2}p@rEr|81=Oy}$Sm|FhL< zi3P~%+*rvM`?d_3X#>31MlsOi`DkS)$RM8NcO?+q?blhXD`d6+S*wAF3Bh%*c@jE zWv3F-yxCzGfBz>Rum1gi_uuFk<}&Kn|F^HdTm9*u{k76KJC~a%d9276eJg#B>WdkOeeC^TO!1OVpXNwde^ze z1d^lVR)uz4ZKP=D`8}`f`;@ZxG4cmb+E9@}AU%}HDT8Ts(q;~b*zd?b%az||6o@^O zf##qDw3`QI4tkQX-t36B(}zLQUV|n_jC#Kir^DdA%>A$vBW}0Cu+dWAq)P-fMLCzC>8smsru2_SefX6Ip&#_=1BEQ;< zL}Q{J#B{icf^B0SkCVw5QPxUrL=*`Ko+HBkJcW0jG#EDHP8N@d%Vgm)87&M@``tQ= zNNiBu>b>EfQfwagkq2Zm{I8WWkI{xfNGFq@cGNcqA5`CYWwgK7#xeqmp(C7gSsJeo zm3f~>h}YektjTq|cdXcK_q%Haq58(Fmx!bQ8M_)99KjPSc)j}F%VuuFr5*6nq7qQo z8!c1B*VY<$2Q9Ru+7d_46A<;yi87B(R~xwF)$ zFXI)^X7!KPF9>0QlEDxd0X@o56@j@Q!uv)sna_=veiNc|)u?htji6=loc33Lz-hVC z_zcW_SMiO4Fl~6&58S9z5j1wA*h>Dc1R5Fx5{-eadT-JrPCJdM`%{rWeI81RCJ0;A z@H+Kx`CR!v&6mYvmgg=3=9=(?9^V`dPLj;#=jfNdCB%;~w<2bIXM%}FC*+}qhOVZj zl)><`nQ^V%=C|5j+N5Xz5_t#(v&_oV(aSxluhIJ81 z&}RFA8`(zNSYKC%4>Z#olRkJEtQjY`q1gJ%%SSHY%Z*^8NL5qdksSl+)U_CZ@aJVf zMXTXv+V0kkA<%;{cH!dY()4)T!}$`r2FLJV8eW$FM-~mV)fj)Ndz9%q1PXXEIe~LR z9IF%GPy5D?gCGMRK^wcd-*aK2jLI8cnl4p0xnx{#`uJN|6$rLDv9gc&L7NiFgO{fs zFZc{T20)GZ*F|A(h1i$Ih@9dusMsh~V1vFf7Aj(;o1#3hPMy70;n_$W3>+Z(?m3zo zK2Dykh1+v;PEXGYlN^{TV-?0G!X*Uj|KaOAmLyBE^L#V3NUbU>tE;-Ydobw105Ggz zFwpn_T=D_<2z&uzK`gN4ic4D5Rn32XMq8D^&-Xi}T21 zcg>0eiWndNIbs|>^7fQXy?Q^CaXLLrk2r=;J9y7~ID%+^IDI_(DDHCM zkl{VD`zDG}jj`RL`~g-%obbp~KT8}{8(d7$_Jo=JSDuBhfol(=ZEWgk<`s<)oy@S zHRUXdy)uL77H~bmXvKhX1(o-AcJ`_Z!qt=y9*3k5fC|zdjIZa^*8_F z-&gJ%eqq ztRZuiqw)lrlOU&ju!5k~vmj>M&6*bsMq@#1tiMU70ughJP;YU>SzBckWOBmE+YjGz zhWB-1r_M)g_+-g*_w;#U{rdeq21Ps`@mi0FHM7kQOMl%bOl;&wet`07wRZ?Wj1max z0SiinueoEcSqK^B0oez`(l6(87{ucZHAAwYR)rgecd}C8D}Ea+yG9vfTuqiB+jzMZ z{2O@rJ*S|AZYbD`vJxc&z!l|CZ;lfXZKXc=mb#PSM`DfSr!HD50$)XnzWF{3HC5S# zd5Yyvd%#pxlbjxJS69}PeeoZ_3_X{{ROlTzLS5wMO6#=GCk&>y z!aEI={5F%9<8czQQziD3@4UP4I7oFh0g2=^b!r!Z?;Z`S8slY}lX+-3W1|c3Ez6d) zkHKZEWkM@pR`n_;)iU`pQPOS%BVj_*`@Jlg&haEH#U1f)>J!EfdbFge-rTr$ea^{t zpxjB(zUdVzOXEyD^ugnty;L{?D=JZ782%;96rvgcV>~|!YBK^kQP~g68^R@QRI>TV zJ?{rm;5D9P%LzMy2Vz?Y4}Erl8MoqP5r33(0*XT5;O(&2s($p-GiW@FVDW$(F@7aE6yD8-tPC zPF?Z@7=$Ny2f8%$N5jazt(Tk)5pyCc%CmG*#w<+Ts3@06&;Dv0s!U{+fHtf{h1|{K zsgrkAVl#G#P1yC}KR=_`xXd08F)K7Gb=C{`M&;FZ<&}7r zq3l5U1o0nT^inDpdimAYA7v!0BDdSZw$CdZ%E zw2+oOMFWgqr@l4Q!{A3x=JzWVyi+cXD)5~?P?~z9Oam{yukx(*^Ivdb=$z%PcQtyN z6Q3w?;H%V1dpuNL`s2+QLU@2flTNJMlx__15GTIpX6Up9y!>0G-^^daAW9GL46OMS z2KLRc4!zs7TlyTKhw5@WKg4jL7H z>A&Zqqo)$R3x|}`GCVPu*7)E50hKCP?Hvrh}{mrViLq2%C?F}3| z_k?xWn)1tVC}oH@lpPxEt_)z6Ku^Nga9L>^IItqnW?pX-Q|&56(z~*;(fLvy=%ZwJ zQQX7ZjQ5@m!F7y)BW8HhcsftXYw}bNhoRv)X6lh8>%gPNeAmHR(1^G}8Jn}(aZBbE z4U(37B<75-35`2Q=6GYmPg*Es(9)2!kfv3AAYT7&@Sf1sPYByEO{1qwGS34`_hT%H zG11ZKIHeJP^;AnEgB#$yt=|J9Yx;SW#s|X5I%#Bbo98|rECk#po`Ep_%OYBgLV7$oUZuWN%O$R(JdXK@Y*&-*Izs_I3iNSHh=H@9n(82a2#>_@# z($czt{Nn6Gbw!BKJsF2O(BVhVet@pXiUAJJ8AiqR#XAzH@Hc6A2ZROPyMnNXOvErS zbI`l9chy(F`Bg%jr@(w?|A=`1A5}dZ*xlB+I%a&hwZS(!7BKn6FMm<}&A<7#)qnlX zO?5r)VvI{O!|WwM8b}5(E5Ba(?tfmZo4@5lpPSGyq0hkBeR`Kx7#WKh!|#;TIvB&8V#A?Biw9vuXAujlBWZHgbW! z%8q!XfNcX#Te6|5;;aDoTC_<6wj`!7lOck0I)flkxQDkL6n$s|50{&RC+x@Yym!;s znL2cif~|2mZ&0-iAI^{Mab-Ej$4A?U_54MROf zQ6L4BeN;E}JC~EQHyt<7AXbs@QD8Cxa1L$j(Ng&bD8j<=CKP~)S=GH6&E_2xS$Pk>Q^$#tETC7H@5R`rymTkM>T%YH)_9BZNPksWK{rAa*ibB!J`H8! zV%%OTH%z_2DMj*M3@L{#ycw@>^+6`S+~&T0_c^#-f9KCt87I-q82s(Kge^k%Q8ua0 zn9`YiM%#tI$^`FNjB7PnfwISX0~kKSk5+_Sm@yu`$F^6Vm%dp%p+tNs%7jV*@*`uV zVPPc@c(HhJecwxCvMA!hb*&iqMDH;pboj_T;7)xS&TGd-Njl)b`3H(hThIo5u+LEr zF&eY4AnmC1f*0Xh29UBPPL^@=Bi^D6h;#aGTPlhgPf;SAjh*Qn4m@Z=2fcVc6DQJv z{G^x+{JF=Qw9Bvc0bgZ$>gAaRmzRGF^WWlAnq%x-8b;~&#>PMShyUX5Rn|*^A7}?SYY53lHZP5-1ZKd24x<;B zyqEcjC+YWU{8_C)?nGfx=?5_?WMjf3c23Bdzs&|n=%{$gfw*k~57D9La6t-C$DqRB zBrH>}tn<)>oau8myI;S(uX?wn(>|me^G?z`Grv0agw;vA$a51<2s6Whou^tHsFgE& zFgP|k7%GHM3<=lV+p!!*0rSe%HW2}YEA@%R?lyJ_$3R)u6-WGj{r&>FCnFC>9dF=~ z+>D6&eAz z&|c`C&*Bt?P@!9$yXu1_!yhfsrg$RGg}8lWGhXjH`kSpMDaZp(7(%-{jK_P17Bh;} z-^IYwHg^?TN0H-nUin}c5FK%&`fD~7U*HvYrTFyplo==+8b_1FKd`pL@?dvZ$V#UvfTpz6EgGD0}3oJkyBGE#{(V^qI$ zNg(BpM6x2P^*xZ$#@dZsoa68VlB%cB9wl{n@*^b0#U*2h~p9D zn=x=Xmw*!&H`wT^SJ=2m{h$iVa0zufq3em%$k78P096zdQ%1Sm5?ORe-qYG-YKM{V zEHJ6nfT>I&qlG!0H2AR{XIpV59K2Lwv{V^TD)LR?tN_pe@fu?}Ym~Sgm#jc}mvYpj zwLe3tmdLGRqk$sFJIV9S_#BgSl}3z{*X?LPeB@^JfVH-XAvWQ@`x5jF$f!-=QWPfY z;`SD_(c^r2WAqgs3d|g|h9WA>*^VI+4L2pzZu(!{fJ2ta#EVOYOJejm@P+DN4m>%u zQ|AC24aM1zeodTM#UW$CWva2{CR6ca#syO$ph%f=*idMT`da4lrmTv!8{hQE z=Sn|mkG=vfl;z^tRnR&vm3o_+@Sg8BC>KRaEb+IS*COQUQ#{h?PQ)MM;XT>`lEG(` zb^3tlFLj4%^?W&*V2n7qQ4b(V-QLgO=zmePq4{4q4G&oN4?I#M*0(;5JC#nE>F1^tIAlqbGOKkSzMk_N29eKx!fEQ=g< z<7Jz)c#Mu`!l~d+nP#^%b;=daw$pToe@@V1$UHy{CcUg)2rw?4 zFNHTsdGCSU;D$Hws8K&<#bMqE-0j5**Y-(9;Gc@WoA#AY!qxFh^IrB}FgZro?5tfxp9s&U>;+cd&J+>uXMET-x`?RKQ6-MED{r-+Qme3sH;#(*;&h;%k zvC=@4IzoVEc&3JTC?$Bvdrwi;$ZI}F!7;nxhQ&_6eskdmeM~9EZjR zhOIJNLtru+;>AVjSJ6lq(!h zz^e^!n&e@J_-a#jx@o`e{ydb?5j5y&*vhRs-r){-YVtWIYT$P825)Q+M;p#1jG+l* z)&uN(&>~hmLPifir^>p2!c_t1aSezGN?;v0j#(X;qu9;5Z^0KUE2fzw9BQdfK|SF? z(J2qg^WfQg(!99M8AOaVr(2__DhH+ir1PbGy#L@{%5)CXiCFkqT>;(zJ^+EI(gSUz zB!5xX*>EijB01y@%Y?3V76pU02CvHN60(nh74x}&NV6U;$3zih&-p^BV5;pF9z#G66y##&31HC%H$x4!5Rrv}pxS}d<0;h+%ApkDfsM^qB+a+C(%o!KbTRo~`liH{Vv@eDM>Kb4{yb3=p+JInu+( zTn#n6Zb{kklzy|vL_N93J_q&1SFc}I-CM$rSY1;F7@F$gx&wGa$DC0UH6kBotR7s# zD}x(!FH+I&Klwp*^7vB%H^!`-kr4~qbfN)E8*a4!Z@>Jm`rE(#N%i$5t6Li+P%*rZ z@p*oSINWBXrRx9H;4c~@G$!5l46k4rNz=oHTXv0#BV|julskTxS|&g!gbH_iVwAH2 z-UUu0kQzKaqf@;hFzfx>cR<}-om+AJYc}68iv9f)*4nOEt7Gdk)UWwiD4zDDNTr-u z!E~0}y6g&3U*A9~^l<^qNRF(T)x+H1J^GX#r-uYzAqAku>#uG(*7$7#XYFvbvzu*l ze!dBn%Ls*Xfd=k~1)66x1Tdf`q-mX98pOq@;W#6}N^lzG)y~WmYr3F>6_s_p;^L=Y zaK!KCC4_gWUrC~AOnrgBI6&i;%K+C1EGxz1_Jk^xqDpMF$!KCqIZT9bp`AzCO7+4^ z4qt1mnW^Bc%=*B};Q6fm^NP}MjBk0)@{3xL8j$W|dXOLEoV=|>*{p4&tKDvVhQq+g!1Q z9(DH$XElxFkmE_*sq~4Sg|L-T3NT@(COToKl6l-LrO zq;elF18R+&gkGYV(uXzg_)T82QA=5ml2rKQ8Q;|*l=jhp_%44%bCyov3AN|ckV>H| z-{%6A;Lcf9nxGugT6dd4k5rc{)*CdXG^9}I+D`_+-|~R%fjeCLu>ic!r4}JixUQ}i zP|6qDv`EGu9?`7>?PJ-Gft`J_zcvqLIO&)1@)r!mT{SFbG5g{#l{w4lf-$MME*=J$ zK85EpR?9Fxzgs`=3ANyketKV+WF|x#DZIgRhr)Y-SKjemX&fr@Z+`tOeNg)oM&cH@ z$QN$w2X?yWEXRMeOS$xkx{=m$j>}+bjDbNk`@)|-kERY>gs1J;mskA{e8T*7h;WhUljwR+@$R(0eT@}l%x_>^&`A6Ow%tg2{$ zmPG)_o64MG1s1JenzVQR<6+?Db1&W1?%=b&sWA)mM?B<+Xg-yz!qDgZi}mkdo&A#H z2)HV5c^cYxoUY>w68o;EVV%lDufT!-?6dd@{)CNi<9_5N<*d;6_6gd; z3JOi|o*H(vVDViV2@a%@Qr37DV6C>iqIX-dYzf>JulCg05n8dkAkhsQ3_E=*Bk;SV zU2qZ4mhIVnr58Mo;J|lj9Yy5 z;Z+$UaL{YP(1b|zknJt(XPwfoh5bfY(^0E3O%6W`QhOM^Al;Gc8`;~ zMvtIwd_5O4bO2)nIvId8!$q@tU&gq=#gCa_&Oe+H8~!HA_wDoNfAHh#@Zg~EGqgO` z+etrnc24jVUr`D2Nj~cDvLjn-0N754$)|Zq3=Zs=*mYAmPO#MOHoiwAv&*VgouIkZDJHG`DFBI`vfC#sfQeB=9m> z0ww~pEVSo38<%9U!ZRRroo80vu#I%YdLX>Ugog>jdW|tK;8YsVbxe@`{^n`*+2h0N zdhl6wcEL^ygTn~Ax@R}UHMx_%{1O3!S!VkRFHJblgEl#;rZhDGF(eO{J3+F$jEPCXIBSW7P$N)v(vF8Xr^%)({E1Td}htnNBo^+yb5~y zuTm+qh))$w1)-B|CTzx4Tw1Lpmh$dPy4# zVsWT7)7*mmnhI|YVy2g@sWo-mx2h3Ig40R=qw*!n*qEgMnzjp!g$LH=G{-?FV?rpH zq&Jf*6bAF6Uh@uuZF&rDS)*f;vcf4YmgR(FAGMkmxctc@YW$C`h9US}lJs}(asg$P z9}5&onO;FI-5M}&B4sZbKFR`c9TQ9BrAuES<>aCPpil#%5lqq=6$HM#-1|qL{`0C@ z#R9;~!B@&^YyiKcc?!b(kM^8(s7=O>@~M&u)bzwgp#}cIxn=k+KJ2?niM^Am{3hN# z+EMHlL|Ig2LSFP9Wtl9g6gr}mbVCJ2i7C%hVQ8GTQZFyjq-9+miz}Y)#U(^P6y#pU zd}x1`HCcO8`jC_Jc(240I9~dSqLE`o?F%$!Dh&Iifs(ptOIr2V%{;Sy?m2dqnixma zH}He*i*lG&q9B+O0!K{hSsMxBURT=^heDKOD)E_p z;GZK%UsQ&qO}ZX>$)P6v4WR6?jif)q*Io%jjTe<~YhmaG#t?}HM3f0>mG&HqGLyF~ zNgL1{?PXIC{rhd5OrFXRiJCSuK|}XmrV6oMuPA$Vf-*&6F83o_DXzg#MrQX{ICTt; z{d=X2kpu70LdLE^Dle#TD^Ch893F4k7*|n7bjf+wMCzp-`T^kVg}h~H-x1D{bAb`h z>A$1VS>bmY41DCj0vq44ecq?9!lB@}z=A^Q+nRnHPkw!YvcWa)XHunooddrA{ol~F zeH6OEOM1yzd0+Q=;ZOR9)?gbll;!+xJ;J2W0{sk(OFe;({pE{lngRD2=&GFpPjSx= zFs2)!MPX6ucPTKG2>ivL{ES-^B{Zi^DtNdYN8F5jcS&CmCU#Lc67x7?XQHn0hADt3 zE41U5r~l@ucJMsI z9MJRO+gHTy8e795E$o8r5_2OB*(V49d!G?$U@?C*aR%0tijHxsXiL91US`bELzeIE z?{M+=$kc!%;?eUzV(!___;d{1UVY7ysGInxr;Sp%g?XUDh>RX%G&)4}#clU{D)!79`j&)I=EWjq8Z*+B$@oEj zzK2XKiVSp|(v&cxHoi+E$}V_V-j??@&M3p}a#3HLG^@9aFN?#|`lQd)O`AL~b;-*u za8qP!V)W=&(%Omp5*NH8zvHWyHqI;caTRS_|-Vq8r zeB5DUfXn*gP?;_7-Eq>FzwbFX!h@N1_m8V5k59|E>5p>Dx!28R>O1rhCHxvXx#ncA zakc;SAo{`ZVOG69C!Z@z44z8rj(4{GlHc=;^6XYeHmiHx!2`<18}fGUfAkasVxPXT zc>&|iQ@>EgNPRS5oNPRB&O@jK-1OH20o1K;Z`u4Yg%;7B+!-$bvs^&C(s_p^26jlY zEJfZ?cM21W)B?v@;RY~i!<5TaW{2rQEzrd%;UUR!mAXcY(cFK+gF4yLJKlBV24c12-GEMP5~RL(31MbbU^k zY525ULyLmwL?grcDK{$FRh-IXtOt>o5n>2a5I;rG%Zv#ml&R@^Jiok_b!i^TyzZFI zS{*ee{g7*wj8S7pg+a5;*chxZwvSC?N?7}yl6q&txz~DKoak!=>@P@U z>&A>~Mod-=xQRAnGl~#&rN`ZJG{+bfl@W4Q2+SgEe@C&BHp*^0AFrM>78ObUBW6ql6x8%X{J=U)P0mr_`Iy`xbTkSYCAS1qRY``EHl>p2cQZ zm8D=_HxHogXKWcJ+<*YnSX7~PAlC0uxyW>Hl!k04xaBz-i>p9BQrR%rUmDPx5-&89 zYj)B*B>I%28R73ZBCL`zWrf9#QkDh^EkcJLsTt#lddn4vq#pm4eiF<{XJ;7r*)dH= z9D|peEF7Q4x<-wB?Rd(HfUp+-?j}|_;2(Sefm60ju70Aj1YBtuo)jTY@_-9*NPQ?f zZuHu7)oi~e#$p%2T@=EEqg3#;RV{JEz??P zIQkonmp1H^m%OH8LBsr|LqcV=DEsoKayX|;(O!YzTHfXL;Ke#Es|=T3!e{h5t7QA2*f1*ueX);S z6hdQNn#^xIq&V5Yi8FX$BOphwqT(_qp~wsW$PHo?c0UESD8!7{0O*T%U!qu{6JXG^ zNb@Hzi_thMfT?K$dA*vEpL0PR#3s2Wog13W%Vzk16^ZE}?-5;Q{rgLsmn! zNM~ce)gz2IxxV7y1D^Hw4yq0b+aEvTXjo3Q+$YzlF|)TAV5Z&k;2{<9ZgPubR1rX5 zH^m*q;R=o3|D>vu+yHG zt0DOMi^59-?)o;KWvU?LGPZhii`~00f$4oRLHYn*2tV-Ej4fUVhew%@o9O#5oha!bTSj zZ;#|PX3BJewdKod$Z5F^Rt1zRyTpC%W5Dzt8Q-H$suApp6)-^W5;yfaeFNWa?%ZaB zWb&U9`=;>rK!Pdo8n;*>Xq_ZFM8$cJU7C@it#Ljx7a+=C;qxm>crl1RPA=j zdrQQ|ohO7nTvfmN>Sc9HI;%dZi1u~}`iGxyFlPFv?P_=ieZN+IKpm`3DZ`Yfjhkyu z@4)(S*PsdBRd||Aw)bfFE-^4wfCC1^v#WcSKJd=EjGr(Lm&)L>_0e(yZhiS3@(nP( zCpmic*zNiJV>}6ucB_v;+^biws^?FiaXj03^}l}l6PTJ*E_jZ*>=4>NVD~Qy{*?Tn zDo^b(0)QQ9i+Gz6Rs^GtK=`k}`i}tkhNU`#8wpZGQNQ@W?qIfhHQ85t*{G&cT5v+QWco=R>2oL2I zI!+$GCuL_saOU7%JpSOt<@pg_`1SF9uCg%Yr%bGH4G`;M%z6j)_N$0g1faFBTpJ|o zheo4VGo0iG?XV-O!LA++4>c8+H}&Av3HH>Wm_Q>PV5@-hO!Pj7E;Px@nsiU#K&8-~ zm`enqhJee~=Kq{C$=HvEXJ+%8(T_2l7|(3NQGr94NN@jGM&fgVGJ$ZOkH|sj%8y1N zm*WtQzVGpPDtw5;z$?lL@YmQ?VRwZjF!voB3P8(dm8H9q8P@{ufXinJeaq_=aiv1$ z1_CxizrBaxM@U2m6K|HYO2#C5Wnr+B`$YlJ!wXOYe?IBHik7g1!fD&_*aBnpJN2&v zjjhvX(xrim5x(Fy#ch`_GPoVEA9~3$nNYuNJJA)TV^xPnHe;}lRLb2|~*YUsUk)!CS80nCn+K%t;Gk@ITDKTV>+1e%@ZSrGg~J zJP8f>St(&X4a=&fI{8Bf4S~ zrv%jq6UU;FQ_{xphMl6~tcLK+@k)0#9IrW_k-a?0CL#oSS#`2sj-g!QCrkQipR&Nl z{(9dsF}{&ePV`=eUk1laqUPn}wa|jkbDuFbNsA_K1b==Rn0#%G(wG%HRwu;bg)83| z@I1FybyA-VIyT>t2KjDZRa8=l(mgG!(SAw= zDmvmZ{YYUt%b2VuFtI*<(=fP_w)CclM;K2wG=V4k9s1*~7_`=|0%lp-_q&(x7y5)x z;*kXZj)gWfz}&o{$Gq?#)xrz3UU-!Dv<$?VVNz`r`WR&JPXmWcR;yD7qoE!RSm!Bs zt(MRpV5~m|St(ySXe4NGV%dZbfhac`jPXdTfNk%Syqpz&LqI14t!r=&2-}^0$9Fu8 zM?dx^o&hyAe6De|IT~B!*B@@h_Zx- zla+xHCv7&+HMWkP<54C10~2qjPkxB=pH+)%Q~5DZumO!^qKqv90ko7*GI1n5&gf^K zRfBDIY`Tk5oVas-h_`c0_=O&I+;D-5a1GTlW=|7ca?((X6USVk*d>8`7sXzqXvD7F zY${;Bx@T@_lE1RcoZrz4yKA^n7yZIemu<#Edqgql0e3}A`W)O}kt>~yDfFkqk;6NO zdu(#ppGWgO@twqIAVaXN+M?hWt4XEOcNoh8U;xUpeJ>#e-xj zQX?lZ+MI)mBe(Dcg{eb&=5JoeYV3;tHT**6(0*zGKN^MMC+gOT8To>2TDL8{g|9Hu zD9SwI%gpz!_EzroP>xyG0e-6;&QLbU4^DaqD$WG#L*Y`qAxyVAYMIB(AHK;(Lt!It zaRec3$-8`4S(RVzQ0SFxU39}Pt1bH&RhuOHH%rS1dA)(|7({^v z;U1r|xpZ<#Rv1)I!xUWAsv4oy`SZjDZ#*>gt@E>`2}sobU0D1~Ur&%}d5}RlR=sP4&yK z@boW^swOAxO)dv)e(A?~CB9qRMyjxJcl=TH^_RaUwC_GAtCWVc5rwVe68Fc}jI?|{bt$ED&@2u~~e682Fo+S;J9Xtpm!HfvesC4R zRwg82R3M2%t3g2&Fee;$fP0WYj%nn%0wy_JVZ0{y z?VFQ)(fb$=f4AER)CEhg^=rnCX9GAAo)5FrJ_z4nHNae+1((1DA)@yqlN()k{O*tF zkzcLe7y&Q5CY+3cgriR~I`N(eIG3GmPp^|V8H8Je;i`Xp$U)Y(_T0ESWISMbhYzkyG}Nt8n6kW=zJ?s7EtD=`t2!>S90@ft zZfXaY3Tcf@`zMdsD&(KRGTaC{c_HfWS5v0c&1!-m<1=oDUWHUw4;&cO=&saXJBC~ z)?4VD`g!Mjk2({WcE2~BoJ)!r!w}%3RePqukXohQC>3--L&bOGix^+xp1%VJo+wA- z5J7Hu+D`T+(JqBhXy7^E?>u8zrHIfd62{^?vJUyHGVQ%}UwJ!l$oM6@plSsG`(pLL z!OJr1?|y;?o;!wedC&6e{yNr__dVbDIdHU;9##A6gOuZo=~Q$OB!v%SEzK4>$XLC> zD=NNzm7c6WZSeP+PD=O{7^1#Bvqk!1-!cb)Kf`?1?K?1W?d6CKP1Tc}uoG$4K0`b9 zRRgm_Zo-6vT3(vB-ytz^$4$z$$=0xr`bOV>?q|0WCzAG@?GayT|-Ex5o2tjU@bX@ zSAN;Td)njI)rHHm^Q8AN3`pMDz}PW8o_rEn12CKirx?D{a!&={x{)jLgCRXfM|*KH z)me3xg4}zsNu2fsFP=Qsape%4%rIEw1qK2uyC1Js9g~X5ke;S}IwFLM&<<0wERhGE zUN(-mn7rnMiS+5?79Q}VB0@O~57SQ_1LDO{RqshBP6yJ&dY?+62<2o{(csnp3f(B% ztOL5Df5M^AvE_AU8SlTMDP=$P3JY4{zB~Y|7?z5uxbX^KLT6b)qeO%_kaOG?R*$fs z%1jM3^N(APt1}EDddf6=E~%f>;54jxi`0<2)h4vzo&lY(p3>zD%WdR~_fVAILU)! z+QcdB+@riTG=7YM+Iau!n`-Cah>)5?lsJ|QAUrpG-rf!{!28v; zF?T40?gfw#vj+g=_BJ`njT~Yvl{kO`Zx$tias2L8$R@a_oJz-UFA>*goAvVe2%NG= zgcWs%0bpBiUcYDa4;D9*#o!&n3j)6*WGBiimeq`<9F-OGOt~`AU}eDYo;k#2IX&@O z#!8@~*nmpPMPZUDs4t0Ona^ZqH}#5ZLnn%7Twz4-VG2(&Gw0ZXb{(Ee+jZ(ktI1ML zixq}giWGMO$&qk;O4yEEFS#jLvdK_Dr5vwQMv-qR30PQpFZUrBzJ?K1koAr!cr6?y zfx<;;qB_324|O3FVbbr5d8Vkl&0S$w*BZi)a&e zG9%q9-+5YcQ5NBC3X{M^MN^zZ`wCSjc41RC(G4TCPq`6G&0N4Go zOxm*|i$K4^LHtVRUiQ-#LuWisXiV<&On_0iOa`URC}zB?0wNtIY=k}+xKJ_U%PNB~ z;Cn)FXy5nBDM_xr02g38GZ`~*EQ+kd*XWc6Y__zgk}02S9K{Qya%(HJ@5JIHv|jab z+{N%znJn+e(+j-#Gwl>b9c7Ak?5iNM*BTHB$zyEcKi|)D%UGBEfl`dzwhTu?HbtS-FyW+EkspX2&LVG+D zL&C=T{agCU3W+>-YjZ>D*rX|vluBrVbakLDBbf(IDVdmtQRIXFilBgL0}ay;mj zDocejfF$(nT)fF)2By}T0mJN%fo41@Ec2WA@dpHk?!k$8OGpTC)u}>}7@B0ShE9Ml zj0A0nE$%kwnMz%38EijlIn@KNrQD#G1 z7!Gyz2rTK>4!RV`sIE8;*15)&ohe3?^fns5t=^OD#BgDAnnsCazLp21*XSX9_r7Fb zXkUe%@uJJbW6X1&gdhG6?@$l7k*CU7X@X*s9=xH9m}$&FZCwis z8DV7fZIaW?pcUsO=*86w6W{b%?Mdeudv8}=`XDFNd2FzbVd;1XpPFv)>leSkYk!GO z-L0Pf-tTiFT|YYK5F@f_Xc+x>HKIW@Ob;5@vD?|W;;8Cg6m|v0#mxozX}_ty{>?Yl z&%eSbpOp}-4Z_}PbyFvesz3d+KgTF}4z2d9n{O|xi>dkYD^?U*)yZ>nsj`;<9jpFl zfBI*C*(1Kl)4Lpm9yS?5W%?fAsjuF!*LOj<< zB{z`l>_36{I7wpko{b&nECVS~71}D&j0XnQYhoYUtX#l?F?bZ7ZrDj#9@?Kw(A2N+ z)F3DmJM6)D&B5odhLCYuTKh7U#)yK}vi>?LsT75&XrIC5eP}aCPPhQKomf=1BIt#m zl{qQ#nWZp#l*xgPSxqc-zw_f{Xdb~3pCh2Ic&GW!qXJvnlNrT~XGAL)edb;ZW9E39 zU&if90zXYAZ7t7EUqOlTg7Pn{5f$l^-E%|u!k9c`PN};UEo}2b5_@6 z|D2QFY@Jb#7`g^0zd@iq>h8JhDt-{Ayg^&Qw9+GBETB`SaBv)W{_WcWK?+=J2rl;o z==Dcd&^O@c`++~Of_5`ThM8w{UIBD(UN$~e^k^2wBE}T~;{<9fsGds|50jNEgj9yy zxT^9<)!>rJi2gX~gG$=AaoZEN<+8g9Q|TrO9iIv{>-Dn#mQPHwy#y~RxlU5jfa6se zUe}>wB2E;**5@YgOa=i=s-%2rKa+!aP95c5QEJ667o9J-=X3Q=XwPP)SNml-p4m?? zpsFa~i+u(N)DVT-hpWry8tIO~ajog!&r*+*0m=-^WGXI>MWB?pd|UYKJL?HX-ch)! zKtyR)LFGMR;XmsY7uj%{cEk^~6a-nHV<_bqS#Tz-^E&VOFsnMWEl-#7rN1jE0S1(_ z;G7qD)xO!jO^8$ePd|!MCQL(xfrBs-)`fosz3&w`_@?)zA&U5LMF$#4!nj-ocdIt! z!Sw?^{VseXe^FMX3&#t#_yxXpl2XAlu#o;kr}l# z&#z%{p;^XcSmAn1!ob>$eU*Q_9HXmx#dEGvLoaAw7}-BzV&9}y;TawS$9|WK!)wq{ zyojr^uHJ>VfSLSkU4{#32xfftgE|XMc)!e1!XcqIyl=~{;HJk^C@r~>(-2RZCuYBv!!{LPwS;?p)VT}UY4~D zSFe4=zB|4E$OieY?6)b6Io#!+%H2O+^Tmis&<2;NK>AnExSE{3byyQ5kLq47uvctT+#QRu7 zlQ*}u%BdT8JBP&RH&MV*b{+qOeoxr1HzL`D@y^nn`vuy}2X$Qj)~BqU4qsD03FOJ} zQ!_kK#|*}Rd-VG34QM%ck#kaGJ5C$$ZpOlU!kZ@_2q}gp-LC=|cwVcn&aQA8vH=&J zMz6ob=}KN3y4YeBV-Ig=o0arOI3u@Mjc^|I;1R>B#>^d#T_d4Q*h~5d%zK1ug!UX0 zb^4C6_I(=*a)mwkvkWIEL7yC(b6>o?yL?u0f%-#~Tv%?we} zx1mH06iAQ)xG96fO!zG?K}0(?s!KUz-K7~X-}8IM&N~`Z(z7cg9{ivV>Cw6BjAnAWXY;Q7cE={$qQlLz zak@uLy!7Q5dxXeza6U@Y8nT|&sZ#9Gz0S$5zPg##eVA^Tb)&lSvcV}?Z4R=K25aEF zMr6Q-^9JnJx$qcE)2|bYtj=PNn5N&>_9s$4-a5}f+u*j=w<{p0Vo|TQa7dWrm`>afCqO-$i!BdUwL;2wj`<>kmVjJ>J7mL08^vLW#q{>)m)TW)fo15P z0pkm`)J@;mk|`l*vl;LOrsqDf==y|XovVJ(C9bGoyqIZQEmn)7?drcts`yh)*UEK>j#cD0JKuURH?MCsJoK zQxkeYnHR)*;hadEGlWKxQiF;ltfj6VF>X*<)d6oqHY4AQKa~Df4XfG>zaFiKVVIL# zj6s_6H|egL^k?R(fOu8;r>QZ5LwqNzh#ro3t%Pq8nhLHWP*nI4kO;U56IRmU;aL!7 zSqTErGKTb`XG&$OtQde(?#H7j{KYQ)ZBdVcB@-tFOk^*9HP(C~rz=r+mIlG2My$UI zVLdETMe%CVKE{H}ujv3ZAuq?nD2)KbqVyPHLUZtn&umBqLgmUIk2MXB9S397Fi{Rc9`Fuz6^5X0yWyCn8~E4qcG|x2P9}*0 z%*YRKyV95yF=-kaa79gg>tH~UoX0UO33jSJ6on=dLPwm(GZTBmzkoM024CA?PS%cMvf z8l#bk(g!f`9XWYzXcXQrbSbQ@2aJ|ex`C~DaFVy2efEm%Kpy9GF$ENFh!bUueYJm{ z9Ojdh<(o+eq-lzPUwQ!il;@={+@s&ZR{E-e1NTfRh;;^w2i5os~ZZr+0%A)PaN6L$`YE;G$crfuS*#KSovcoq;p}KkR|x7&;g@|&s;qXFG1HWyp7H~;_?teBob0QHCgnW{L~0 zq0ucuHlBR^jJXd55C^3D5#&T2W$ynObCmr3C7EG>S%=NPO%A%zsEpxCJ+Hs}3VLO8 zYtmm@zsP4L1LfC!fL$U6F3C8vxwk_(9cyhAN>&M|Pd*dp%bCVIDNbpB&iiu@P!L=MLWTnQ9^i2*Z5GKyyw(IG&_wS)ic>`muz91Tgr~`>~ zgfUlRGp=D~vQ39jbvOIE>Apev8jAnnN1t=pg?kz~(28gj(_MeKdQpAx>;E1&ZMNAQ z4~`6zZueP%kk&Au?=T3Z!#aDd%m_3c;55CbAB_3qXCLJRUAhT&Vl#&4idDwtpi6iSxc6rKz);XqLI2Jf+kL(CyfHxWc^E-gZu$xv|M zk#>Si%1(#|)swXX3Ekc@LGlyNtPBzZ$)Q5HJhp^9F+*5Iu>c0%XPT0~!VBd-1sSxA zSr~~ze3#KIS2iS4hAWd2jH=?}#HrP4NRAU~j@eV`)E|mW+lV@8Hpi_YOldKQ!i?O{ z(3L49?RCQpM2vqnC9yId7MXoI(J-ZZz@9EbBZ61YA~eB|Fc)6NVoN9M5D2}-Div-D z52EOc6-2oMKlWyhppU{T?F2_E1HzwjHYOmrd=`a_?t)*o{}KZ4XpnJF0hGS+ zRESC^x$=yvlI1zy6|C~| z6nwfGByTH#)n04^QpuI9MwF$o^d*K057Ym^LmGDM_Eq_yBITz%AsuQ2cu&u4fsxCn zDl%q`2))720G)pFs}dI8;JbW7pET$+evDIB(QvY~F5fj1m2l9Au}^*)S8D!j$5?|t zme6txKIm2YccgyWKU)!aelPr2`pwHc<41#xwxtuxw3yT#Yu*C~%8ra(Sa4H1bAw=F zWO+|o5?3mp_S3Prj9uu3{!&we=Z?i%gKzsJ)ufLq*1iM2HKa9c8P~c$mdOS18EyI^ zELFrI8EUiz%G(dVdEdJD6XyVQ&pU>3$_u({lrugz1PJk)({}WP2Jf_Gb}`?Lw^@1t z_Zp3!JSV)QBRau9>7I-%Y#;*+-zyo?fkl*^di8`^fuS@)--I!|Zc0nxn)Hnf#PP0q z!M^0I7Mr&b#v6LDX@0%@%xZ-71DoL6Phr6rQ-|ZBJ#pqG%ZYcY&n5jh4hokW^rt8r zmbXm}rb0I*tjn(0=YnfHQH%yZ37w<7*9=8TFRQq7uZi@I1pR&tdYi>lbUNU>jnc>q z$Svd5DN7qM)TuL`f0S7jz`EmrcYTkd!HM9J)&d{Kru@Z@}ZgqeX5_!Nl<*(%fGLxhbD%8S<9GWy{vpC)GTrW61>)Y{n)$XHx(ukfC z2*>y|$Z{VHKYl;tXQDx-?Tl4duU+w{ge|M|`sIj3BazxycaJvRFq0@Q=H?6rDa z8zoo+)Va%ZPLc>C0D=DH8Rsr=ys$6yKcgk#%Qgm*8Jl#l%y1O;dOP3><)87nVlYHz zc|5#Fh`WlnOFX`rcpjZRtzN(Sy4oJ&F^5T-8rq==-ylTsKIb{KK$i60*nE^R z%lixFpW!{pXD-;d%c@14el+0)BQZLJj6M0mAF|irBjWuz&5qQ$FK^z5FNQ>=+`=1~ zYteHW>~);uXmuY;4<~x`H%^yZaO!D;)U*>$vvtMel;=PC>{&MLnq|mM&u`zpsm^W) zS8K56m81Zp!F6@^=3QXkLD%h&!~q6Pc>NJ8TATFGBsq7SdC=>+34OErqd)!g>c@Zl zCnaMRdnSe$9}OovnfT`F1IS?M3yXhi=Mfz}E|ZYp2f$q?;3$?TRK)9ta1iVsE$G1v zTDx7sWa6j-bRjf8B}~Abi9vDhj(Qaa&*Uy%MkhjunHd8ef%w4fc=C52eNkii zSWYjf#Tf;Gka%>;JL$_*b(pN(=dcBeXek==Z`hC4WdsSHo(+zwW#d07L=|*cObUI8 z6e84GT#*0QbH-g(tvdjAmbdXR%&5l$6%@>JvBacjy9omHG%Lb+0uebfUPIaNQql2p z)ufbz*I;%6OlYexkCVTI4K>rj@}A0wb;gPoW|S$OtfFkYFTtGmt~wa9<4o$L7RwR^ zsCew`n|0cD5o|JJFv7T1@{P-0Ds&0gaZ_27oXTy5`-F`o!i9dBf^3eUo3L!F=e|yR z8@y}g9|{2X0ULhUoTCcy0Q^uWAMVU3!6ZysH(55fiNEt86aOwVT(<#7;k`UZsrIO1lN^rEU8zEpVw=s0b-&y%X%^5yFVR=>aWE z@;u}|>_B#Z#03X}8dYaJ40kx?I9~ z1@Af=!ZQ)_z2zK8omvpmkPx=5JjzNA+RRw=V;TI;t zm2xOBb8sZ77^m$-?K71bjr1tYr7L_d-r&-v99_nmK2au_Kco@fPhTwoUhG$lBK}r+ z5m%CqZQ9556Zs*2*CBL{X{`1c+7Q=_Z8cIuEO-mJBXej=FSyUebKuQAX*GHDk>5-) z^rI-oIj|5pq@rgZwQdrk6oXS*rEk1TS93`-;!*`!dUvJBPx-|%2`}Th_%D~^mA+^n z_?kMwx0wf3nw6P-6!;#B)ir4p&EK!{#?`W{B0!_V53GojKfKP$fGdnk zR%-)ugpIPqKHZ*QB`l~$hLYA6iZ&j6=OMH8Z9J-1CuDbNkmzCXVOCw=@(%JrFK3s2 z&pOPtq&HkbQ+iYTz7J1zI!BD>oVHLJ*;A$?W3$2TPSGh`fvIUq#jt}NGe;1m%2!sb zQP`JzWZ=QOYiOwS+UH<0!+4#q$LK&U^ngt^jLtdg_~fuw9qo81C7H``7-m*Wj!&L}LFXbB$=^hOk%sl=?;Jk?cFd8|KOL2py>8g_8E-bBBcT91(jy#c zh9;|mOMAg3hNI^hHNeM=&9Lr!tWidqDh@(!9p4&_@Mn0#8+vtv3qO&YA)M_$suEnvBe zF4rVHNbyLi!(;fV|h~+WuL!W$}8&auz$pS{o}ny)w}BtR7Mm8@2X3w6AsQ^LtliE z(WprYW4=k({rL2m_I&n&eqSq ze94{?M8tm(*Y5*(+ps zRXuzDIjdm1^p|6pFF#c8UVed7isdh}uDEPghsY+=cT_&8j-LRCG3Ou?{sYxv5kqh= z!^Gu52C+bC7Cbi8!ZLT)|GmsdIrVAD&M+Z`3*dzgK{GcSFqKL&G9D)79$sGsvEo#j zrBKoHH)7>siA4w>=WqlMZc@;=w2&s0o zyAZ+_!ZQjj3=DCqTs|NG5Kj>n3fne^J*Yp#^TNhf$QI^soC;C;j#q%yAI8ER|0y5< zzKrig1o?tQz_gPw%*&f*7r1xNkFg2xB(#V1@SBgyIW*CmKmeNC` z#@+zp{Wm7;c9((J1U!CV)720ofbUK~W72}wW;+N1=#4b7D)K{?ub0paV1@RK^VD;{ znj9TImqO9aS5c12E$)>v5u&mi^fARUSHcJ$y)nTxOsUsS0VVBdU{O6|6?j6CdRCKh zQ97g%aUBVPIib&=+G0XIYhqQ|l!RHiDRr*3%Ku#Z3vfw4*CbRIjbgE+Ec;qqr z={S87K|T{n(7So{r8@27Q~dlO-H9uBYp5Xzw$K@!$>x_dr`*i>l*(7Y3dV>MKR}@)p0q# zG{vXgC<<~>VAHl`R`3b^NlQ#_+2=&Np2-nUwY>nYlujLDF!%%ZdO)o^lQsC2t_}ZC zNw!SJ%Xj%WtA32nEGnK_Y-T4##GDFJ7W@KoMpv#v*TPAxmmVw2UDaxutblv+b~eLP zkp_xfRlMRz3=ziIV}Jl_e9oySh9G#fKcO$^TaTYcD7FP&VR^{%;uqSM&KYAuTyPMC zPALHiAxBC$kk1{1%2s4uXx19#!(wbGU-g!&R9aq;Iv$_fx-`uTfw_n=S;KnIfgN}C zS;N3~Y{QFh{HKiISDxd0<~^B!%P|WBokf0EM#Rg?V__;yxk1o_^XvoMlltrQKOS(~ zbcMzHG!@>;XQ@Ap&>ur|9IXm+T4FVy7o+fXdMc~T-*{8asFHYnR|>$ZGC)8GM;z~5 z)Pq7VUnv(gD%+IZL)p`ldH(H(YUlBOlp%cU>AU0ioQOUv4Xr);xrs;geW$uIh93vU zJ#t|&Kt7R%wAF(y+sJjoidyIaZIs`QjU5z4V!3f%)tDn5CLFMYp3$Pu>@dcegt5t9 zMU5?$qF$3jyzr13AHI9CQC+=5S!0zMR}@CUX0=D|{%vxun`*kn+^2CQ+-D}C0DpRw zx7g=)dwo&ep)@v_Z2ZZpBOca1G2okIbrGWX?qkF&ZGsX#vU`tC@w^h&3hf#jKYe%) z4=B$KA3;9=c87TQm5o=_e|hVQG0sq6*#HhRRs_C&`7LzXuYUFAm(>@)yaX&}Ffujs z8F)rdWp%I1@9%y3X|+%Io-*Rq>u-~x>j#9=d4jU3_20etniYy$#>F0jH{VuY{q%oS zCy!6EU*pA#*ExW#>i^Z?FB+PJ;4f5n%dQCXUK!+jb^aRj>LwK40_t5&Y0Ne+&05_i z3OMta#a#e3HgMHp!73=>Fc5@kHXJvi(Yt=>%wyb*(%tn6F(+o@x#JyYA0^@~R>&Qg z*VQm3|5u9ur#8}gj>$!Xp?GIAXJk2eLIsN&Yegic-`E@_pvv_uV~dnP9o+gY+zd@V zmC(+3a;1{Nrl%z042XV@8eTK|LBobxe0L;gfI+{qU0^0YkVQ_qj8+ zxgMSQomq@gqGdRezSEY0yJ~w{0>0~iaY0kcnYZTH?5FSWcid=}L4qup<;>%@4k}eq zjH^i*0c`CXU!V-(gLHK!Q)(xLkX9Nq0s;KouoyF^4u~QRjHA%?GFxbVyHggN(ti3# zzmc`o;6~AMRqfF~PjmB#JT*@1+}>eF)tyTdrUg(tGeI7MY>#1M_1WpH-@60SyHVzmQVYNYpb1jzerGVYB|QA`nCL^1v((jMEd%U zcy|mE_NtF+=m3CO379cf-%lBkMJc%PzQ6`#Dsh$Qy0&6PS_k!oH=uR@R9jW+vp;H7 z5KlA=$nSPd%@-Im1LtU7Xh5yP^bXQPu`q;>bS%#K zCrJ7Adx5_etzsY}1U5cWL#B0TG1`y?R`lrK;S>7dGLg#;r9wW05${SR#S#}5Xyo+W z-|X0hrnCa&Glw0x0~;LuSODOSev13_!B5|F+?3@He&tQTY5V?dlfe}umi{sm@ivuf zZCS=5zAp^Lu@%OJ&y&;(KsMkFHK!swsM6-FgW?%8-`7^C^e4?YUb~>_64&wyZpzZY zmU6!9C9OzxGPiexMaHfI0H3OnIRjIS@lh#XJj!{2Og120>!U1BuKG{Mgg*_WZl8-6 zFX^2YGUM?QWgHiB%d&ZeXZaMi@+Cc^J%?g{9iRF3ydk|PXD4V?w#9$6QyPj^Y(HtQ z?2N`*j4W~-^pE1oNZ(?L!O99X!~ZTXa2I$>yS8uKO%kiy59J?M8Aq8@*4mLBSKG5v z1HEm>8B{(H=JLJmrcc}}RwiQt_v^k(UzQbKu_1WJ5d~L*9==s(dg*b|)$KFgeLT%S zKOC3zhpkAKf&@T--W<=ZGJIMmij!>h9AWQWYKSbJQ$evDOari{NIy z==PEOq_9_SY1?14_I}qOZRi87&pF09G)ED`TZSE&{bPtnstIg}EX7O3oCfW6hzIEr zf}lsVLkgGu9bzlVRq5$dZMQAz)D3BE_IL480%wJU-0X^Bd*udEkweTW>tc)v%VUiutj!5)BZPE31nGlE25;D3*G6W1RVu%0oj zIprw`Ljv{pj!vp8CdwU5R!`z`zHp6GcR&+_C=^GyXZI@1WGZY$>4wqRg)0LZ+rVm_oiLWv({4r(eB zra@OKA9VuWYy7rxb9<(8+(EdHp=%T#m>09OPC%bbXpp%RaUBKYo*0;^N;dULc==@< zswRrxOqQ!jrp}s{8x>U)8C*dw%mZD z_dIDX4e*yie+K_vo%sw?RuJ;oID_SgO#;BgwY?ewe0UGhLq8gn4UEDGa8<~`}9GkUm3bon?A>fs%q>vHBV z#-P#+EQ4$A(`^-91se#AV&a-?-bI)z?4?=Vr6$>r8y#9y!0AGA+zM|Mvt|^bs5rz& za4+qaeh8Z=vCwOTJ(A6!PP=J26(Z9ssK~g>oIAdeXZFSMWJjg~o1rLtCG5ml@O6m` z3bwN{{z+fLz2HNfg->ZD6C8gVA19sqY<<~cloQ_dfCssVTfuYT75)T{=?i@jZsI7= zqFt9)h9;C-PHc(Epp27ElqA-E^JAU1Mmc3^f<0E9%{cg; zx~!YA36s=`EDheF4f`VI!@J5Ao@w;U+T7(Cv>F*=90-OG@?Zq2+ZIu&wbf@(lD+2ioB*GAWaDi2t8%oP#`by2jysy_tFIK3JS}XoSN_?<%-hJ z6A&wg$;?Z)ZQnL5bahA+NOQkGTgtjaj7cKQU)?{&@J=4uN9-EOT9C75h2%z4iapQL2 z#ZwEq5lV-^1m*U7ScZS7hv`OHs;6J-Do%hB1LiodE)Wl(Ur=M-4jyY?9nDNeum*{DmbJwx-O2QO;Vb$ zqwwtgpxPwW$-@nK=+KTgA(pl<^@js!u9+xB_BDYr{1u= zO$x$2lKivdazIL#DN4GhU3S?xsT_Jh0d&65A=Uz@o0})W#embT+UObgYq3h)+e+?}i|Q2gk7UkB&8Aowz6d*1&thk+)6qQO-TZ%sCF(>Zw`E?DgE|TP*$W1JEH)1)@N0R2NHWBK&-BSL7*$Ih~aJp22f zfBKhBFed9~9Lkn_>dJ}d4dq(PqNd=1(4wgGx4EklQ11)emk<V|wBJ;Dd5b4hp=yaFJ=q7`>Ug?7{DjrQsaz#igsGW51TKdIWoHS%mK%xNE>l`wN861!bMT`t! zvtFy05$!>O@hDde2`90VvMKki8bP8Vp%N;@^&m(21J-`ZAo2J#-Ijm4QVLniV~sI= z;w_bQ@#$uPC}O(t_;s!nry3XH%L4?Ob+%a>%LC45Y%*A~>(});jV0SLIkYPd8{~fT zPz7Wm#9t;6$8`UIcZGoe(H#PuQ|abY3Jf{*BVKY=j@TeDVz=>>)4;OTJVp;9!C_Lt zdS`0jBy71Sol0YV4iVHvI0+5C=ooh@|KM2K@lt?T-p_aw7)$;}km!|ia!h)lv}ZYS zDmSJHZoWz=O18IT>9ALiC3hc;8FqLobW{i1Gq&lnJ~TFHW=y#3<#CEj$-O zgKoqP-?{B)Jaw6H%T6F+pVb@rko$VKgpqx)4f`s5tib1xNiG$|;JQ#c<(*8t$M_W7 zt1;Q8@GlBwfk(Q?e*vp~T~PkU6em=mG*>p8n7P9 z+6Q04QclGr0n8{Sp>du`pUNKJiQ;cNyc~Wm4X1wl zlK#P`(q+Lhjc_GlJM?Io(A}!++KJ#{tN|VhD4L$43r+5FGsyo@Tbx9JtzN0^J`tG)wm3@|ww3dWez=RSDy^!@Qs&LupT3E{;r}y_xP^9{II;J)zt7#=%-lLRH-B4w z%#7tXMaWMY_eXRL*C+Da>hD&MVTM8bz>sZMn@?Irc~r@#hvENTwLI^&%T>Gjq@%z8 z{)cwSelxi+Rsb9}^|He(zW(!f;lJIs8Q~}8*eIW~mC=1A$lbs>A#~tDV>--+yea_x z;^hysJKL)6CpjAr%Qr6qxjB6&cg8Wu@lE{qrcEszI-3UG=g6@C?sV^dV}_a6CF1}1 z{hv24;>!q`6H^j@h4zsuB7sW%W(vvrfXncFu>AMmrp& zc;;cN9y{$q-fPw3;DJpQ*-`~(dGxY9j)U!@JpFfN>R2&hYu$~fl_F9NLICiy&p+Ed zt#&C}N89*s0on5(CC5j&g z&~dwM4lDZlvErkWGnW@v{ii%XLd>!lk33xTo$pQXmEJJIY01Lj$aig@Cx|esmePnF&d0jA7GtKeNQ&oDb{$wb(&+G;?P zBSAH3L3#1E;@47bEu-ZkhS_Tu)K$knUbg|jq@QJU4nQj~PV2g4z#~@AIt>gCB(hm@ zzEH#Sq8eKO3x<<-44XCwbd2+ZJ1xV8A84@)d274nu5P=`SvYEyMH1^}C4P=e7rHtw z!hhU^EUG~WW7f+yrl_6PI&|Mhk6?>W*`^6qql2piI>B6zl$9vLPfMF!v(0}Dm6*F` z=dFl0qXA`29{6X}PV)T#nDfxHvs;JoBN@9s3n{v4{B=G3QS6?>*CeQkgb$RkqPUmd zj8fX^!z%9Il3fCw3D$*Z2uBQaidzRoBNNzadqwR@NRk42C!^bI^#KL~AWj{_ zp@G6n?NW4;LXi+9tntw-vxkhI3}p9;5ck-uDAn}vdUFFV2}iqV*)C%U)sLv8Wn&Si z;X~X8rhb=Wy5O5j(A9m$GKP=U9bY$QPSVUjt96Zi9N%EOHM*w8VT~+fIex~6)3kA* z((1Q6Ztw?&MQFy1l&>C6IV`$(_;%d>-r|pZSw>bZ*58b&0S{i(XzZG3Aw}cY9CFVs zFQ7%k9QcNg!;;Y(DDlojj*Zn%pT`jDy*5@^Gx%9F(H0oIupVBe7#R{XPWXe<_21M= z)FyV?uiO`~245os`jBv$Qd@l{c~%shtU}P$iKTdj%vX8<_6ffd%j1g>HpVn~;E53n zcW|$t8Vco78+w;Kfd`;Xx=`PHKf}RC;SOKw+GxzHHICj7Zo9WW7H*?0@-h0lZ)7Uk z1e0_hbWl{A(M)kt!o!1F)~BE8KU&qie#dbNQM>W4{%+~-zO{>d!Dp8u8mQ}Mp7!|Y znEv#t-T&^{Z~u?YX5|8q*#@cKZd^`rK^gdq2a^i(oq&uKn(6Zf)^Vnr2XG02<~7`E zj&q}3a;+O?ylNM6)U~;l!OX}5E8OEQ;$zK~+w_jrMqdwY=@tpfxdECSSUfAbh<|#x z&0Fvi-!;D_=#y#7SOi;;_B(;S?z2L4luf*2efTcMudCs^S;=nxoi-LWz|^d* zA3Szegw#oATcveAe1NyoF;By9at!WD*^hm@H(E_uF00Uz&NEu1wOzhn##iyX&FSj` zkZ)gYPAZ$Cp6oan50ax#JLb1to%gCG`Xs|q5N>b(X!EAz=silI2>x|8!BuHHS4E4- zyzAuci+Jm9DIGfZM*Bu}}mv?~T0wP!xAk{;FC+zIvhW1miG5gaN0%(fo>H>pfm1Z}EV+l`tE9 zn$%78Cojmao@1-vKdFT(6kz(}#xQnL&*8D{^!F}98Y9J&0>hWt66{IcvKwbNbG-fj zO$K@RK6w5(r)V$Zy&AA%v*d7)HSNGwuQLlRXRpY3hX3B-)AU&-n{pEMA*#MtzgLW) z;6{6M#io{l)_48>@I(9&@4x|By7?p-lFe}1MvkXN+;>}j+G_Ks5`J-U&$n`rV`3wM z_BeHS8}D^AeYDkK2jS%?NB6S1@Uaan$CuRtZGyt@r=_7mH-rCv8xEqv=D5w1-+cY2 zg4ajs6Ty{>vS>O=ILGGapMKsk+UwAPSFe8DylU0r^ue>uoA~`Sga1C5I|SgffBK(o zUj9G-dSvo0C)UP_X ziPH?KxwV_8!YIDmDKp9q*@2|O?O_ws+0Xal0=q&iRXu9rdtJvcA3v7zlQJ@S-hYVL zIbDR}Q4s@1N7OaeCQH{tbWn6}4A-CzV*-pzVKVF(PzPPVzAk_4;;THjIxXTamoAlI z6mx%%^}ONa-3F@y^PHXRx~>IxMsH%VyG8m5&!8v;WpcR>Oc>#yh)p=;T|7CPTqGPCZD-ULV3s|g%<*~KlAJuOifr_5z-<` z@(lMA7-R9)rwKi^Cd2jQ>4y{bvaF2MLyD^UWZj3kKen4CQ!&FkV_@sYCKfF9lb}3r zIe6!^JkO4j9LHtXlCJu4U<-oU*dOOAgX6Si%5v_GGVbpjgwsxx8)X=eaNO8&_8Kgs zy~)~sYdb`*FFuV268sJUh@qoueJ9;dnq;RH0xtLTYIg1>ToR;mZP8z3K^{M0JG*$A zz!UW;$FI@OwTYV5Kcy*unxJRoPyTYaK28}Ck__Z=5Jq@0J`zG>g!Ml9jj7-CG;6BukcWlm_ab*Snzg@Ijv7Yd=V`v zPg3$4L2cm1)tB--+8Lu@#&*^mAaKpqWoU8S&Nogv!ZS8eUzxel}IVi$5(QtI><=M6B)_e0{uCBgC%5U+fvoW}WQ(J%k z(nmPBX>Y!%Ph(k^;K$d-$vFn~dgdobw1>1Y2O{*4KG&}O#@PLK=>lwIhlv48-A`Vi z{r?|-!&C7mM`+S;x^J*N4ClE8JN}ux_`a8;i|Mv=01L+5%=P9>V-@}C#@Y(!{d6f~ zA)GazIFEPC~?hJ|pw%Z&n~F>3)_GvhLRnJWrIb_VK&% zjw2FY@Z_31!P33RkEft?;OnAeTZfPO-fSHjc5U59Bd+UpJbY_cx>b|msjl68J95gn z9YUiGsTXT*fpN71b9T+-FYTE3L<=VCXT9Z8*9(vHH5%{PAD^SkCx^&C}nu*YIg$t3RbxPTtW~ z0g$11(dsfCJpqq`a!MM(7XTS(@{qQgPnYR5vgQ13(b0@)x$%4Z9Z4K)2l4N&-NTK` z2}@i&E8UmruAL4=+B^Gtj7xm+{`J>0Z|*<(eCk`?=D*+WrJ;+LY|AN4w(e%r>=ju) zY-Qjg80?b0C=jXt*EtvVB;q?!d2}-?_T|F#{aL^G?=a)*(er1)o6OHRIm^a4z0P1f zdr<-Tzl+v6C8g9}9NCN43V8w7`^m`%8G;9bcD4WZ^_!W);KOUsho8|J-Bo6Bty$bW z=5)-5#?q+!<%m$}j5vHl*Gkh?dv$asd0c}kez=X3Xx@7<@*h@eDe?^;utyZ}cY0Q) zdUwRfIeJo`&eivfZc<+DgSg)qtiXJ%6xGG)`>cW-uBwULZCCVJeDa}v1LtoFG^YO? zN^qwGB-H2JiTBsADacEf97oePr6|A4PC4il)Q8cZep)Nl;lJJK0)CCUl%?#V z94H~89IYaVwd*kKmXaR4$!Sfot9=5 zDnDrR`uh%Y=tN*a{9TBmMb*#ZVKvnv_;Au7gn**R5&0~HiOeX=vRD3*C7bgm=Z8F* zd*{{OstnKD^0aJsxB2(1TB1*$xAADaZsK(rlus`XF%n#?;TWlH)D$I5FOW_DA{os zkgIu8uHh7(4>{Lu#@O0z;x<-F<~G{C|IpZ*Ks~!A&YHmT)*buW-H^3@r~NhA24@+5 z28O8|c`HwUQg(V4jwv;r5LZ<#y(mu^+JjEtG5&(5VjJ9BzU zgexWL&OsZbuA;}lukdd?kTZ03w6}Q|U=Oxl#&DH%nD0#p!qAxFosSe@f~lIH*OzUQ zK}imh6Tqqodf5ua%2gL#U;MF7G7C49O&3MiXB-JP3Su@G)i1|lmH3gq!U0@tDJQR6#P73LEGK1XkcGkE{lM)}*CnX}YBFyRM%7kx;0F_vao zHC*Fg$6FuU2!&6hHJE;y$1z{1nAZrsF*0t(nGn)H9D5l<4a^w5omYGuS!)A6>#uPS z!TZuRIMA1i^FI2_r>-+}JTtgzzKp@wz2g|&{DpMmws)kV3_O~@2B@WsvT{at*; z_~az7O_VE*iQpaF*B*ZI^4aV5v-e=|dB%e{I_{a3^WU?5R2m3A`)}|CtFw8!4U$$i zDZTCTaB-5fo;e;j2b=*<_JAvZjPg{h~J%ym*%1V8#d@3 zi#{}fTfQ0m-}Y4>j5}kCi{~Do!GjIC(mW7jFXO#GT{j+hjW^PT`o*U!x*VLf*Ee&0 zt+a)`XsA!=nNmPF2Iygh3GWR}CXJ=OT<3N6-P@P%D(K$;YV&Rek#v)f-xrvHUWspv%=Oj^9bMh6-3+0@rgp4qrHxlJ;{N%d=qYd z+`Rby#b)P2hU-bI5@jNso@Qqx506TBJ}s^0PV=4N34Z}U?cxD^Wn(R8?590#;r;K+ z=ehTGhBgm-yJXRD>hQGOa}LRaQc@hqFh=9MY_R&b8n9b%_b}(~i;PYClLQfLmfkN_ zwV75VGO`#&UmDhNVfch7#b!5epFcC^Riv_Rs)-s@6^ z--Vg@C|KBL6Fg-@nRjR$E;;N{8(Dod8=Fd_nRB$c;5@u z**kD}zs=IETCh8$4jEIX{^lRHLhz4%|J%*4zWlO4KxM0f<1(k{eeC}_yh;7psZaJm z(d%$@viZ}Gf1Zutdpn0YgYA|(KaCSU;eRh)I0%RM{JU?yjb8<}n-8_~ z`6pj?C_(M_{ZTgWkJUmg?I+liTct(8*LHhjs}V{`!~`{HK#HeM9^;^Oc{vn@7gk|- z$sbqa=+%qu&9moU#B?@@6j5#hof0M?;7vLGc3xiuX*OoSnFJvq0ft$}m}?+%zz7)3 zdsfbWkIrj0llA?FH(!q+(&l!saRkU+CtvMnFvVDI zE%{-Rk0FU?sMh2d_dT)qAi-LG`gMkAywVCs-hOEjCJiTmyk*FWsuIGMrOiP|u~8IL zP81|Y!l;BOMjYcT;WS3hD6bwGfxZ+N!{!2FY6oLELU05rC8+ssr$jjjR~2cK{usb0fkd(WQlJ;D%s})Q8+at-4w&(vK`)xkR2X{rZfom*|J&_muzT{;NjgirN#smine{Z*>EmdMtG-`7cS@(?L6LVYk2P>YFuRW24 z`F=RAHsH(rhycO8aMu6$wvS_sH|8;1`rf>lS6?ng#~_Tq2ugVFUIwaYb8q{s9rwVE zE*kUdW*J3YZ`jku+}D_F_Vr^fc&+E>;qLA4DA)S$yA_p%pXTCp(3AS>^Lh@<3&xt% z(TvwzAN=J|-5Tn~(A{oY_r){dUN6dEjfJ%Fk{SkOlfF>if{2oKtzeS1VPV>)v>9mQ z?AHd~;G9}vS-}8(k~#P^_#3wkn&9S0ZN+;bxI5}U+FQ@LO#Xpak$!p6v(i<)-84-1 zcb~Zz9#n=9rMKA*K0Ko>{Y-T(?IlZmr~g@f!NP%e%0Z67rz~D|6g~u+BjDhvJXXr_0 zMe}fFtZ+4Ob+dk!VOC#A**%_(nlr}bw2<9#?A*KHn957&Uqd4=`fG%JJ3m?ge;8PO z_zqpVsqUwL^!MyIrhPvvJNj}yo@D&V+Qa`S0NrP%NvwZ5taa@EJU+v|Dprw<%Ui^pC&GQPk?-$I1i_2u;UWF>Q z(<}Q06)($&2Pau?p;%Dkq7`{~Qn_a;?4>tOl8J(H`lah_;C=t9Ncr72<&)P}2K{CB z&1nXj8sTh-!y?h|Ik2S)wSG`ACjKg*a*@Gg72?$o-waJfN{<`wj=9`@_8Z#Mz=5?9 zcBd1wk_1h&=0@KMP+A?y7+ExK?(8S`hfjh}Rinb<*$mMIZydZ7i7hBuu68Y*zb@+k zP5S)D`StP^bcEwde39^7pGyYH8Y^wgJQ#rdS{T(dzaGJ%cQs}wbv)Ob> zsxrv4CxhkTr+b^{j~{LBBu_Jtqw7&sfI6!8&HK%pvWxcaJRC>paht}cT2EdN1Bu*LM|x4vY(f9Cq}&Us0p>#q|I2hLa>zq zKgLD15#47~FJ(8URRt%jSAY2lDa-C{tQ3bz6l-jqDIUTVycHoKych^8{fCn2HHJsZ zH^w$Or2S}f#sNn#PS1NZb~}J}c$rtdrdm z6DjM6-WaaYYU|!OaH4<92$t`+90%>#hr!9B2hKWfx3^8YjBuT6s}}2JloT9nZMC$P zfK^V7@f>1nUm6%i)Sis+r_}2wZCJT~x8wWDzuYcj0Vj#(CVCN?Q=5M)ntRexSAREG zdCPkhlP=BY>@;Vf-2dxV1}-mDl}NC)58$M5fNK_#t4SJev5Xr^c?^uzfldal5v=i<=P67A5*&oyVbPSi%< zVR!Jl+7H$~b0(QHD^I!4-Dqq>$J!f1r7qxh;JxWn+s%OB>9_YkjXNjId_-TjdS+6P zi~~-(9lha*%(|tM6`2JeC3-U?P4{66<%fFAJwLudv9z~CPcQh?IyQ@W!{8yX_NrxnBaNFJL3RN88>(p zKg~QcU&7w)vbQux5`*)nvXODkv!y1EL*ec~8$QHr-GjeWx`8*c30zVrW>r`Va1hOg zZ-ZOkgNvHCv@i9AQz742f9MN;umvYzOB~iQTJ_N24bs84+OLzc;a5~O(-})g-$EiSL&!V|Gn*5pW zZu(si_ra@Mr9#eIbkJ9S$b-}5Ux*!gujXq1X0F2zoVY%E9PMEST#eDN*uNYS^b!2C zG5eO!aLhO*!42lM(g^;AJF9Nw4*APznU#%rY4-6%13FpBLP0Yabz-Cd#(>d(@aTln zt#s49Hj~oBSFOlq%Vo?gCn9q@nP0Jevb~_tl}Kzy?q2jer9Cs2>T1S0bZ=}N3w(iZ z9er&N&g`}8^I01+Q$A%46hv#hFqVINdvp7Vau%EW252ih>S9B(X@M4ILp z{zb^8%I!#j%8*n(T?#RN-OD&8&mfrmz9(fUBlcOka<`O?>kMqXKBH6b?&Beze$Xj{OsB0x4-z+=5Y=MoqrNP9pwmrpK zwW^)#6kVx6Z#QpCwR_QKYIdSRKlV2K`P&Y|aJqog32eYc5CRyp@!T$xW;WgJm$O7b zSmFyE<>OJsUQMJL{8&!a1I#b-TiS?amA&pKj8q=DH%l8%VxcH-XmhXk(mg1JF<=;T z47Q>PWAv53B$qGFu8)*JgoP;FKzXTV#E9yE@DymmvG3*#`rcnmu2*c|1io|EcNs_K zW}YUa;VNM_ z8xtEdJPq@WVUa>kun}erGs38@#9(SEw1FUM|AqUdd>FLzeBV#xX?kx3ph$-Zl@r)3 zV{L6ZA%mc6Ia4~2NDsV%DJ~y8I5EO_lv8VZjezaW`lgsE%W%>t-1?!-L?n`RnW~5uOuq8B=$@MH4N& zoVLq72jQ&cHOsaaP6zuK4~(%A4}?dlT8=9ok3WQoc8`z?5_BNcmtj&GOhSOu#BlJM z`@-8WNWx;;?Ryd6F;>EHoYiVlFvdT(x!h9{uMztUkGJQbcWF|!2p+fNM`Btf7 zTgDP^!M8NFIbkdKrJhWrJE;dB`WRz6`Ost;VOT?r!;FpD**e|_Ga8Li7i@6oy}#zf zI3WF<6|tM|#~FsNel=wl;{T>|V_nLO%(#vEcjbocjaA4n*o|u*@0muml=)z1P&Rxt zN-@~s%>875Yb%|o2d5?f2ZRCj9VH58{4y^O_Zqp@BNY68%8!FWCQ!nc!3$Q;OI;4Iq1pLRKiffel-U_i05!yrH#ihqqRz`8&2-hA&$ zV_?9IjIRwPd*NDJp?TWXHlB9TiXjwGfZB84R&!{g{JvLUVR|=!#c%NGHCWUxdNr_F zAqcWr8R{Nzaw69pTJy|)EBjFB7O8k(vHH8xh2pMoI029wI)F;>XAE+1!?pe!3@8SM z{tgZ53ohM)q-c$2Pn)}I&aC;44vhGqn?Q_Bxx;TWELXMd1KN7$a}7mrFZ_*=WIOu0 z%!xH7@J(TWV2~ne??@m`e_0%Z44Tm7wX#V;Op z!qQgy|L(&AO>MG#_xk(fa6zx`IV=TEv={x4H^NEH4xQ1Lp?=UR$NuOj z5^%dx+>hhazRL(bO{RVbPL&qfBX=`yw=y;_PlB_3Y8N>rw|P-kgUpZP@Oc>Cq>^1^h;vw#1Z6kuWW#-UCs0~j-FMs`Q>)|bLy>bR;pjR0 z;>j0ZY<~6gzlgkzyWiJsnC2*uEq5|fwv}bAjTY2v1E4p1llp6}sIX&y2gmL*oAb1N z3d-Wc-M{#+{@wqu*{oauGW@<@&jm0qKW@H$`6lP(O>-lMCc2%MD#XT=_H_5bCv6^nHqrV0oQ%!R`%bU3zoz-Q z|DfZ7Gwz*aXYA2Kg`-h@=onhU|VzQfukVxl7rywcMtP_Rl*TVOtH{m~V`y+9HAWS{mgR6nYN@ z2^9u?#x%vL(!ut2WvWsTOwnzINDu9{jL4AoN;E{5ym8Jz%d(@WI8F)0tsEsAWg~jm zdGmw;6oC_5ibGl-73)Fb)bn zZD8&T=owq)z$kAHpxV-wSqF|W{w#;K6qry-#aET93IsV*`kcsnd@cfXf*;Clg&&6D zf=Rt@PDK&7@IMis-dO^-??4}L3lrW`2AsBHd4scb#Jtv~+l7l}`S3(UBmiy<{J16j zaj5hmx?2OomrN!v)m!FBRsHn7PhIWa+VE-qbsgrRea==_3g7w+`XktDOj@;#z zzN_hZxo15ee|O&kVU3X^mfjl!*ZBul)dnv>u3a<0UJ7)bc5Bpiv{~P~rH+W0-olyj zELeg+e)?){k+! zXXXK1Hn+XzI|`l?R=dZRf3sFZL9J-4n~ZneYYeAkP%p}dQO7WxvXS*~pwcTCZ0;Qh zdfbt>-~#J767U@k*c1$sI$wC}EuP1ljHjCxMrU+OBFR{4uleg8$i@Nd;uy_YBaFNn z>mQkO3%l|6+s9!OwCM#O3<$>tqZ{}<126iX3;y=P=d)3|FIGFzGrX;F#&a6h=d|PI zIvW}4VKNVX^usvr``Q_N^qi4$z2-vC2u4j5J$)wtE0M#ya6GME{%X`@P!Av zKKFNPeNJj)m?A&Wbylo=-~CDGbY!%uQO|SU$EW}+xLm7wu#GcNUyji3($GRr&ZDW- zfn!z+@N)>Fqofju9oY&tdX&{NRElR;8@o*x!X!jLR{@V6#69PT46^Yp^gr8hq9FaX7sQ&ko|cmb#q5 zeRf@nM^4VGS5^N>8E$nz)}3_1y{fy22Cr3C1gf2kQ|Yge&Y7!$`iRy!I(u8$8I7GY zkB8|7>C>}kF#2PBk4W3TlO;UqSM;IP!=T=aPAWRxJ$x|LjrNCkS}iCj#qPthRxKE# z3ZNL*qelhyy3aWRlYSIXf~ELnDtJXJ^pmmiv_sJD?mhxRvZ>&1dm?^#@m;GyYqR=A zd^WnUtb*5XkLEmtblbC4ij!{l83YwL|W%6kI?`PR|O~35asi{F;C4okOdrTkT`WDa#Y+n$6Yy-1&5~FW{@RXYf3{ z*M{Yy@I5&FT&9x`t3>teQN?l!c3wRA<<#P5S6UHMRcVUhgzXnUzSz7hWye05y$-Jv z01TYrU5AK0|Lo^Iv$OfdUYRh_?J7PwtN_kc`&G`WWS;iL%fhdjOGIZPr3%uK;>Bgm3GTPfDg0^!M2&G#=?OE&}z9SvupJR}6 z?>S0Fl+^6FXvub_NgtbcO}^4#7||v{jLJJU285%y)k^;4=|Aau^@9&vTeSUC zwHmuzsu?Z&&(rPCyQ47cpl3VnRZWXtc@+=*D=$n8IOrv=LUD;4|Sx9ww3>Xj3sL z+Ry%g_~xpy68e_LEs>2v!&A+x;NA}3R+}snGjjEX{fuAap-X|CGE6|j9-{)>vuy01=I2F9ZJwxc&VZja z#y)GS|G-S9Xe0{tK4I5*%)KUe{q-ech1k=3dVZdpFkA=Iv+w>xV)FqgU^y zPk3NtjxmCD!P58o)csAz^|7AwC4<&qFwU~_P5TkPcYRgIjAU(_=kA+1Rf7}3M~}vZ zCua#>H}mA}Up%jkS_7}Xy$65qS9*;(B681ZXAGi~F-aL{Wi8`M`N1)AYnzi#G7bFX zg8l-O0l`^-clePH4DaO4IGZkx7q>R5FB=E=D&__l!Syl<|K5<4OA>87DF_&MS(%gU!B-piin2;8wI|^0tdiAl zKNg)WjiTpgUSU+(zcSV{%7DPR(%`ujv z@nKbGWKbM&TB7Y~PM9E;)rGzIb@q2~Y~wot0Ed=IZzew4X)n;iT8hp#qYefQT9C)mk(PImmH#HS#{N%;F1 zy`2F;%+$@yqr;3=5$u^DT|8)&DQ?<4{p@F(M;%ahkiLA?Uex_IW1o~t%znk=;6X#@ z5}f8q9_M^)=SLlcrvNIQ`AG(H*(*6lcM6<7XG0~1UX+QV|FewogU&#BTso80q^+_t zR6%;!YS&@Ba+YKEtdp_tWjD8Ty`JcU{Y|K`;X9bWdPdzCr1cSfO`_I$kW zFg|5a&525*zIye(y}O-i{p3?Q7{gOL0dZNM4<3KEIf*BCqSbZx3edm&;rq?2cP}?z z{rRieoNgBH7UX{PVvAOSdtHlvZ68ET7NRAaTZti3zu zo44P;?U^)HeOUV+O6FCOnzt>zKoH$3I#}p0!`!HL5uAC|*(i=hRXX4cv=03kKZk z$8Pj)9m{lpo$Fxf{^;(dy$0PgNyTc8&3gJ=@4v<_EM&LMk`AjMlDGj+KP-%8p|Eh` zA==e!o^EP4oQ=1Vgk*i)hO}bJ(JEZ(i`l811r4DH(O=%8_29MhzOlt`;HK5~&Fngu zQ*_cUckZqwF>_9cX!0VG*0hkhpw1eNY`q`PST1s^o{*8P|FnX(g@TQrWe9HbWx`7F zH_tL;bdAT_8mze%e7ve2nSOYdBo<3XKG8zLVB4trE*yiY_u%q9UY|`ieWDrWdVLrt z+AK{LP3L|rw6uk1S)nXljZdyi&yQuVWpwSeqyTvU9l5~_hs8%|&>SfWf(U3Qx2dPoB*D}DGoFF9nPM_(qx&og zn&)Vd{+qVP^J;v)kF$LSngj7Gde-UigOFkr8d})o!&Bkqir9>o+O6cAx}vl3 zV&pr1ukVn6exgixszzqKAT}1D-ojBceXiZcdCON}w5tP6*A{&6`Op8wzx*#Zo0SVN z-sBP#bLZf>@eOVpgT5w;)4238M{@^L5C#t!5Kg+Z8zfmJUoKg--bGdLt`>RbC{JNm3mTP^c9-iuCM zQc7j2dytoH3Y)yoIfg#Px1|IAUCAlpY|Y+LwR)cl?{rFeEDdN-SgqBo31bho-s+vAO$X&j?W+SzM`i=5t4bzJ_#Kbg zJQNN^oakHQb)wbXXm^o)U>fWlCKv1PtW6&cc=NcLsdpNe9oOgC0_vq<9g z#oA%MQ;ybYxkFnIDshvYvLDT!fBN&yz30CyCx@Mp9hg^j^zOUO+dM339kU#ej(l*C z-4%gs9u-fJTcj@c$Gp2o;aQoY>vFg4w0#)gI?#qa#(JiYPp%c!&*RC)wN&Cn)JJ&( zzxma#!=or2S+n@qX2jFU8*j&M9st|({P=wv7{1&5=}&)dNo?gh-7mavbI5V!luk;4 zxN3eM=Dn-%c=6E9uEn zm`xm$gA{E;LgOfl{P~m3^Tt`MF#NnH6z4j7QX0qGYKHFo7ytQx`mY#UcID17t0*;w z#S)McP9Z1yG0TOeR#=u;384|eI;5A0Dbx^^V-#y+At{6-HbE8N48y)_{mZZ1AL~d* zF`hh*;}CYyZdmIvgym`O6G1+q1bW9MzyD)6Ay^3Ngo!oe#z%Qk44d5l zWq{OQ1Urkt?E1YeDa3GLNJami$$2Wde@EhAr=1bV^Xyepwh|({7&Y$_lhT=|r_zCTK}u zO8=Y+G5BRzsbk4dSFbhqUk+Hr5z1Uk9ycNE`X(WlDr+3!ZtAmkZ48lmrrf|mIj-c; zg$vi_w|nuO#9lA=<6D1c`5@ZKZ$RTQ#@s*O={g~v5Yxr2Q98=hGr^(@`4lxjiu$&0 zskv}#tS!SNy41*oZ$s+v6JS_7cn^H>nmnV!1Sf}rQ)<0?l4{-Bk0k`K%sQ-?MZ$`r z)tC9^JM%$#2O1oV=dGnu{(S|A;VZ&y@?{9SetkK{L%g6ZAaNon<~c#Gftt7LpZQ+b z!NN&k64v$$mNwD8-fgvbO@;?0#qHc)hZ;h*t2#!P)^ zPc9KWED)DtKaNz7!CQ?FybBJz(LLIT&n!uykzIZGX&kTcDSXNBUCx>D!H?IHR}031 zq4@6x-@9u0 zZ0XtCd99sU?hBHi`mZtk=TV{qkNMTsZLkTgXH4!v*T&&q?M<8MRj>_yfK}Y+(RgS%o7jBvnR<5G=_#Rq#=B+rOp>|()~48(!xC=D zNY2(wlg1=m{OQkrz4_g5exHHdj?9cc4)y!Ox>E{Z!@od^Uy|HUsi51*EQl#!1I-*mj}+aKD6`u%q!8}KPV5z5}dMm4CVLL4^<0w zf{xMoVMhPsf^?5d{GMIPwd-^&K_AKTZ(qE~Xl*&Cao%ay^WE}Ec2)5RUub=&oyz<+ zww8)8ciI^G^l`gtTOz@uYPT|E(bJf3<>9^BvEzAdREk~`fEe~h6m3S1Cu87zI|uu^!=o%EL=cM3vs+dMb6ie)+N`hU$fW;M9=gY{c$)vemBLPd8tF`Pt^#gB+sZvXo^BPVN$?Pq`~~ zR*&AjY1i)GY`*>W`#uL>_po_S%j3Bkhkw}$mVfXw`pGdS3&^pfmJE^r9XL>)81dG! z(I5W$-){cy4}Vok#hXqtiw}PDk2in$PyX5F^I!jVbHAmL2bBf7loQps;QCoRlz;cz z-?mKftZ}eC%hgU+ehi*>A5Mo~lUI2gc_(8GH&9GX1+MV$$%J;cQj=HZQ(&-nIU5XO%D@_-=gwl}srwkBP5QM9 zC^Dtcz!cvMT>8RLW591`K!)_B+Az2Xn8*v5t#^^nkcs&TX*DS4xZN1e=MjP#P*w(< zAU3uA5?G>Fir-pOVa!9rtm!wYO}0s#C5Zb1(~Xj>ot>>bM&0Kb$_2V^tqrWV#)N3P zB=*y@&JOR!qt3QIuO25udhLEAT-sedli+xBr}mCdeh3%USu59uaYl%bArwO~#7s(? z1|-Do?Y>NaFs3KAHc?Q~!7&oEI`&)llGM8DOhz*XYi-UcQzHNqygy+Z4(g-vP-X<6 z$vF|B=;^lY$^nhDSMHIVq%gM}m-?EW(;59&)hiU?;S}(AIb^4;B=FpSayDXEkuvn~ z!Y#|_r8t)YF-|-tpKPXif`O0{86Phk8humWjDo?_O`G^!z;njgzGWtRAXx9#I`4eaeuSQ@j z9H^`4XK~9$0Ps`v<`v~QMe$KGTH_UyXz)TgcW;OtC3VoQe{)NdX8XgGkJu&wVhHEcKyq&h^p5B@paY}8$ z^`f0?W1zr*errbcfB=~Lw7h<6uYbM=x0m_hSxC^^{0U80GX*)3+hpH{45=-KJHt;Kizaxr zc^kZ<)ljXiIR&okM5Sxwl4NF(yKnfVK8)M_erFKcxS^51A#vcJh<(E{GI(Psi2%-6 z48C^8_}9)t&d3~3z+N59vy3qK(61l-F8_1Nxir4pk~d2S1xvKCa%eRR-WQE)H5!W! zFC!f2YkmNEHbT%L!THnm0TE!m>N|kuAK88TTK9Lod&h~Yu{mBf-ku;(^K3skXcwz{ z1p_oAf6f>+yA18symqW?jFH?ppl0NHUz=yMy3)llj(eDVbnQAhQLBxE;}orGm#(wg zGA|BR{R`@Zl6lSwdO!|+@e>0c#>rB7L$h1iw}#%B2iAgV6Twa&(id0dn9A+l%lLw~ z+p<3mBbm44wxn}1S)Qz>hHq<(-X&KaJhBrqn;{;RgDcAFw9(U(Rz!0UBt|o;l%oMJ zJ$PH)fUX-Ov<^9n4KY|UnxFn6!~ZbDjhxF)YM1ZpHziLuu7M@ol2zV|;$AlfI_w4G z`^Brx(=R^RJp3}l$?AqaYIr**~{ok%2UY@?%{Q0}DH{X2s zCc{)Ioi^C@Gb_XOhv%IDxmV=A57o`?c-I_}se~fyA{uRHU_NOhz`_0Jy;mJ08!1El z-S^*aE=o>+n1POR2jS#lhV%Ul&wGz6Yg8m(0PSJM^3|JHGgt1XJFd!m`cOdieRVSL z*m;^`aaH2Fa!cpo`!roVRai0vkIUn}D){DPE<7ywdH1k9ndoSvgWBA7pC4qP*1`0F zIhuLhZ~hF&6{2Erw+Y0d3`yVL{o>aptY_O4%)M9k1N}l4Ob$}c#(sHY@{rh9WW~n< zzbD~>BQG#W%^X+ODeN@=k1Ch*X!EEVolhSB4EGk*=Sakp{mRQdso1_issk`=lwk1h zw^`sk9?rU)1#rf%;5D7Yk(~fv?O)a3!^Zg0CWsuH5AQq1m$6=s*G2Z%J@%BjosQny z?*OGTNZw@JU&KR#pkxnQk1)NAcA+Nx3aAHD?J)d5L?^lNbBb$r(rFcSwu|C_`o-@z z&!2wYBjwQj_+7}fLQy6~y41#&$8A{oi!XmRj>_A11$QLfX6Nne&3AwPZgY9?@2bsy zu=&M*^3OKE|I2^8`T4JYJ%<(i_{~?F53gQyYdKQ271fgM@no3<=Vx!rJ$lqLbH67j zsmd-6M$lHtg>uuSKE$aJAtvQqv^ih|ck}@lrG#kB#TgFb!BL8j(qOl##OvC>rv z0&Zin?I-p9JbYDF;OzTgXq=+|(ZIx5!kTi`X8878Elkb57=Dy@*Y;DKqQZ018lm3L zL7(j!p46KZiHRVs3cWF($Q+u06D?ey-Olg;5z$(I9KW68jQ_4l1G>z}=R8dmx$;2G zz+leUt29tEwuz8&nJ_uAF(7`!D^pP2N=!;!|*Q7K!IYna>Wo_<#_fB*z*> zvj#i#VG!tu;?bU2NJ$ZDaI}_T@ll`W+hsi6@|8Y^*gYrVZtH*f<)8lLX0vhuFhQ?T z)V)V7j?1J-)&3YAX$SMd#2=Pu%+4s+pC5~IV`Rf0t=zR+zNCn)=$r++6lQvlLDg{@u;y~JWQw*ArPU8KrySdkmw!%T%~Xrh9V{A&VKoEp0lR`yBP-u zNmOh*xCuf6-D}Qq4`z7g>`f|QZA}sI1~!Sf2%50lPLSDiV%gD)BQhx@!DBPgUPh(J z)r@m!fj2pN=6MhBOv3}C2kvj9_T-5bA@DI)wN)~=0>pfrSJ7?T!IaZ z0m9a`+n4(Sa>g_=r+&4;XhFN%zBKPXIGY680k3|#4n!$DUQh_n9LHTa#>3q+hB$oJ zkY!>!q_wpG1~M9nZnp9i|H82`4V{vO)9?+P#yPJ(4_Ngly+_-Q84a5G8Vu&6A^Phx z*mUa0{K;0DYv{FBz1Fj6gTm)6%MX2ejuAMiHYwcn;_%xedWHWq$x zMyse6Okf5QzQ?;kVFJ~@Whc5J`)?cry4|&rQJi=8%!`~P>u=;7HVB_HN8l?Q-`0v% z!%;m@vQ6SO*-jo^2bHNG$nPj)>te^*% zGChFaugxVq05Suez6B7~g8uj>qq*ADM^>^wzKB*u%^!4(aC#4IL|APyIPIXE+l>JS zoG0)X75(b}{hQ7GA0KU=d|ui_29s0_Wr7anL6<@^8#Njy9i#5(UI7=iLD>qctX)QA zdfMq`PTf)lh!g)om50U$H>mreYAI4toGx`-Ajrv3$E8E>7M;Fx_xbR7BWkn6kDCu) zeH{(k?Od6g2Nii2XmCudW0@b8L_UF@TJ@bsGMmJT4ab7qku~x0)yGQFWQVmXu+t9L z>zu7~rHLBLQHME@Iq0*MOy255EOSsPs_HStI*$0wu_>E^yl~3bbvTO(`Rxlv&W;eFEwx2A#8{XjKBspqj|79yR`_Jxd{_!vV!RE71ezy7JxBqv> zb+tn)U-a?x?dFF&KWsi}<>t};r{k>e#B@VkCSa=%eLS~Hp4Eb zuX!fD^kH>n)&I0fgPr~6)vL|>7vCpiOVRq!3RgTO#i>r(RM1LB&Y~lQAGfOT$x&sn z{_t-qk9E5FCO+S6I@gS~e&C+Fv)B6}}!_V1#8;wO`Bl39<~UiLlh`2<|4pxX#z>KAKjo+nm>s`p zW*P9h-7wcqWvShX!VOLq{?LyCBIL)}=xD=69CuUD;nhk2Lvs!U$O-5)FwuXhOK0u! zI!^Gg(cxOr-<49;C?S=Xo)dYR@b54d3@RHqzWEt-%G{8eMm|JmvMup3?FLQH z21ETADdMg4lkQ$WMWP&Q0hc`-97ixGm~<`0E{NSn<@(76kG#)e<)FD9fC8;vk_>6 zqgt|q@gyBFd^F>kCwpeesToVS1MSS0@ZQ4%kGIkm0AwC4S*L9}%IM&8@x0HT8{mC& z1A}zAaddLx;T+A4S)gX}qZ<>thV2x*0KttjSX1cO#7)))+w`X0`qjF!b&gMD#|Xh33zxB&5jF3-l*;QqN7niynd06ZU?8L@%yFuw0drJ z>3xP9{(n{~&Qw_-qwq{PBtto}=2%iE13rAEQzk%I9}k}WtQCx>MN&I`>G;j0dAu(o z%OG=r4Lx<-{zoaW_YV}+mu^Bg!flPBL%;AMpWB`>6;SN@l~u4)8Ms}$fuk?|c2b3m z%hH4BcO`NjmfrEn^G`}a`Ld&Mt4Ud^4QIC|2VcYgp-d)&l|WppTe{>$5n+ceS*bz( zSs4p|jyj$)Vc|ir@Lqc!o@Kz_s|*zRB@%AOtsHf^`qFypav9Fy3k^8esCbb*a>{~12_-TD9gj0|UlkcY{?JLLttefD%lFj>efi@L<=?N8P5XDU zX|juUN+F^%M=r!iJ2lK{nHsQr)rrk9y3;tW<%@^7GbhfyeOq7ICXMq~zyF7uCr{$@ z0@;81_D^#_fuJsaeA?j$yB%SzZF^#6vydBX3^t9R<@?I_7$cqjvG3Wy(uU4Pms?>< zZkL91ma}B8m}th!mVMLS74qrsp=_O;pVHH=3aCEJ>9eQ6K^M=SJRhgFZa1&L`(g9d zU;Xvwtb-0d>H7x-p=8E<$lg*TbnkIxopLfFSmiN0k?uy`(HvvM+m0^n#1`hG7)O*i zlug>kMC$5{jQZ5<4-8}GYVE@ zp^2mp7be6#JZ=hx@MLIThf}LIq7zfT$oFqs}LpgfSxwCQuhbzLfjC3mkr2{eY z8z-ZumPVL6K*BwKNBL5M@tfCDAn+AF5DZTZ-nw_@P9sl{5G@ml?b&H#p6Wm| zaqgyoa`$*8qWij^+H62WQ|;@M(lc?pW%%D6c%S0(L(TWqv_Qd(aTsqbrzyJksDkcR zOE{ODX#xWOEjr$$$PhYRmkxBibF$fY9Cr~VN4$;$(K5U;6!y?qSrL&MB^aD@;?Ipa z@i>qFOxjfr=D}%o@aos5`_mk1$1Pg2G#+s4PfEa6&aI7Hxne_)dzEys;$f+tp|B{> zAX5fTi`g$a#@P%(LpK{YWASL+-0m7QGRnK^=Ye!lYa?E^(yW+` zf-*SPM=yK`j7cF$VNc{e8geGo_#65J6yx4_X545u}mfPkBv#wl{sYl8$f59(!>X9kgn^HKJThy3?G{&m1RzLq|NWhA~O4)C3Qb@Cc6M><~Pb)Xbn%W-xi!=c!+71jZS^ zD1&#uBWQc7DCa>r(HUl)KBeyEs~44JsqKezL~TL6#zYQ$$e3bZD2y?iMA5pH=?`z) zF#8~zCr84Jc=PhxHm!aV>ciRovuIF_%5eL>l~{Wq-)8rGcw6Cze(-{moXBJb{TPw` z-3{);hXwS)73W%N7Ms9ouju)%w5SZ}lMJ`LaCNVgpS|?8dXJaM+IMXnT|p!~yR&&; ziqKI;yj9|7ZK#c_M`x(PYJZab3uhUbvufV_re}*Ym)exxD4;6Hiwn&6XOA9le)i>; zV;>w=F6T)&v=XZgOldV!UDJ_e(Ht+#s&r4E)z|&-t(@J9_CvfXMP~PF2KMFVevZvy zH29cd{^9uT=Fj##VD;!nW}RmYze*RKl@Vf0y9Kzkvz+yA3~wPZnm;U~BZnfVspXxy ze)g^cFwyknmD9XVHV2=4w)x^0zsN{`uzA;>jI9c*6m^|Q`tX1B;;_)ibLwpTb&_t} zzskny8l7b&N1F0+8->f||L%YLpa1*KX5|8mzTCA!aPlV7+pHb$6*#`vhVsYH{=w#p z$Db6at1yf9)pD&pcBA=4X0W|pU3M~A<8(?FdyZornsbc8LAKZXqv*6{b)$fGHWfMh z?%QwU&#j!mdo4t@A0wwjn%p=qqWY!s_zwM#it4}X2;+C{VfdJ1`=NH9ee!Jc7eD`H zdq+MC=CXI*|G0Vm^$(jr{LLRWSI;`h?vqa^Q{<%Yl$}z(YWH=&(zQ&bvcbU^7j1ZO2Dyn` zlUFgjJo@HhbGw>O8L-35!NLf$`F6bOI^&#RcF!H1b-s9BTw-_-J1IW|aLg~#md#b0 zab7TvIPR^qA;#h;?Y>aygcu!x8sifh2kpX8X^3Y*05GI2J2rD;8oZb=3xJ>+T#WLp z-@2JJ3q@r|izjdhZ3=Dftu1(5QK4`YLynOW&tFQVsviu?ID7T_yH1Z1?Qvq1)sWy0 zpYqdY(`F1dM&wOIir|#uA&NT2A(#^Yv!TcYt}f_#@T&lD-o_!p%8Pd2^Muhto;b$E z@SEy<60$XyLkH%RXSGkakF97_2`B+lW#woSKFi&?I4RZ zHiE$fA0-%lhq{&6=&FX!0(`kYsP^M7hD+SbP`?+r~o z+;jRz?XJ|KVFinsn*-= zs>1tDSsv0<(i4q_H`-91pAr z0e;v_^@MRe;{QoGX?`#=@SC5Bm{tWt6lXXuIN(jYs|9ZtK7oaPvjIFo3Zqs`#%S?j zXw-AH?b>n>dj=rU#P!?P7`-<%T?X3v#-&@M&6P!WbkRuqq5e5>le(hen#9KgOdnTU zxBW1$M6?@^TL;EILp@W7uJcq#XZEY}yBhFA>_9w&m@oolQlttJ~283?w1C#9=S z7~dq+;H~%OjaPSRAV8L^nG~Yt58N+a?AhCU&BNZilQZL4%Ef9DMLg{X;#9uUo__n4 z)7l)mgby)>^|eAPD`iUh+E{ThhDxKb)ydy{ebWd_P+w7T~sr6mvpLIw`%q<`sZrQiAyy&#FkRRo3mvSr_=UG5w(D zA3SY?X>>X7fT8!VOZRx2k^4LcC4E<@dGoOizl>lybynfCcho{>`@C<1r9BxU*yv$z zg#Cb*ow7*@Tpy^JS=2e>wBh90WVq#Me9R!1#=@3(|K`Wd(VO;4W-m!!Q8?sjDJa*C z<2?a5jA{*{wcL4@y0y(z#Yi$VSE#iJ_#XeFg)(t~7GdhOj0o9l{Q;++Q% zo^9S&WolMST7?O;oUcy7>J+ZsG8DEyX)K>VZ}p~CoACNEN5g4Yr#T0EPMGT+ubJ!d zk<~lA^q{>Hb8=BQbhVh6`xXmOTDWCZ|&swQ_*wuGWQ=9%o6 z!}a*#m|qioe|$45Owrar7nmi;Ge zcl|i|j^020`sL=Y|NS4sMJqkc7g+{uJxAGunEK4 zkt6o^=>YYU;Fz*ejnhUn%Xycb4sjTB5IPfeY{F~TJis_2Z2NZzB?f^sk?!lg<)Vpf z)XuCzG!jZ+w~4h=wBy6c`<9qGi7df&{>c^n~UR->l8}o^cf&7e%rB*TpePl-e}8DWdC^zTW1!?eDEJQkPe4xM@Smu2NQg zPhrCMNE9R`CaWLADlG)~D*=@tWA?TwC4R9mswC-A!l z4-^^!px-;uO~iM*0|An5MGT^2M@7pktF;6bt}MEDLSbZNK}{k+YFU8PX8k98z^`8h zI=U<5d_`zOIcyLV^-ICw8%mu~G)kxl1;x8o0C@26OYM6uH4JXP(pZ*r(Ngce8gb> ziXP6&O!wPRc9LL|7kbs|0Ect>s$B}^Xt#)k)qu0ou=33~+X_Ulp4&jxvYyD(aVt^U zBGim~uX`8;l%C2l&OCP*gOkNX!gdo%1g#Cq3>dk-7Z)8<*s`Z1Ik6R5SiyLa5N$$F zKO9Yp;?hb}e2h;NE2pdm2k{(5i575fDv4mus$sYpLnyq%TS8=>VUT-`p+|!D+4qC~ z8X5dHHh7$4EhB`s;Q+4wsfUb87{Mijr0ZDV-WiofGH z*8j}e`o*7&HNWP|GMK|5*_?5f-BG?VnllxHb4_He)yLuxhTS*`!HQ!=%_)E}PNHSK z^nHKpf1D$Jpop6QV_yld{i9Rby30?&n8g$pe(3re8?&MxiN29)nQ}Bl?bR4>Z0bqTtipLn? zeHO{Z7jQ!#ps^+*EQ}8Zq(1O>fyDV<>c^c7M@~7sc6G)z z5yBMI;IT{Km=&6WEXX!8FPK)EkD)Rw%&+vq4P6;p93a>6hJ79-dS`g7&6_9y@>%83 zN6~FR;9W?-uNgW58$?LsXL#W2nJcv$8tQuh47W5tr!l$30gJ}{Yn)Y&DIoQ}od3Tm zKmOA%MawI-Sibr}834cj)#sb9zUc_lH$~jT4J=I2gQBNc@8m=CARS~ZaPa_2Sd=>1Y8x_yk`H#g}&WZpq3I&=o- zLqO+!hP#cZml?!PnRD_IV~|?EDnfkvF1beTe%xslAiUl!2w0^T`#Xs9>j`*-|5jTW znBy2EYn0x8QrSk;hQ`54mLHv$*U(smBZRG9e^FJaY_qgOdok6P(?pAGnC^DY?e&4i=fPJR8N=yBFQ2ACD( z^Y**Q6j0KYUR0v=y8VEMk2_eZd(3sZ@HBq0Clp>)a(dFrh*g|zg5JJ^+!)N= zME$^MzMX#fI#E>wzKnxJ%NW$>8OWZWV~v|@SJC^rX!&mW?wIXFHA2s3Cn`DGY0*X5 z$v^zly3NNUFRir7C2 zuXrVO2Nw=7kxh49P(u}>*fLz1`}OtbufB~*dHeFWDXx%(3IV&8gA+t?KLugpt^C7< zP@Rmyop9@k5gt>Sr1!ijOK#E`B5TZ2$43n|CR9dKN{05-tUXZ{G(-?Sl${?4x+*O~ z&2&{U_A1Hco;SzejA5x%lv=PmQd#o0F`)ic8l0+Gu-*u#(q7ncGJ#QHqb%-^ya$AD|i?USjQPBXQ}U|fsrH{$a!GRSes}J1_sy&w#GViqilMJ zVxCBRcQ2)gzELK;n^ag>D%7Io8ioIjCR9%!43~MP`&X25@tN^bq?AnE3@#Ucq|~Do z;W+JT2)UDzBI;l=SKt$u4BmVfZ=xa>mmxUDj5hRN-)JRQ)COD*fEf<9^iFMYxbxP# z$BW|UfN=s&iV%d@&BN`^mq-DE_0VRmc$<(va>`oEiE;11OQ{Yg1q^|I)iWLwH92$RPz5<7&bb{A+R&w5v*|%tJ-K*13g{%E!xk}0XO`pR$*pW&&hgxa z(FzRw%qwIPKsLnLLj#F}ubvwtqoJCIBP9E7(%xorbyskbH+aPaa5*fF9RtT;*Dyf!Fmq?pgA6KDl7h^uW4z?vF3@;VV4I8IV^l zqD+3l_cYoVyQR+*Gt9w5UzzKTy-WQrCBewPjYPvAMJ_j7M0W5V3`C?4Bd;{%1>U|h ziuu1bMQ`z_&#Rm-aNxV&|zT*sd-`9hy;9TSMWKfI~DVN<1)I|Lf8J^?zLH$)?#0QRgM$bx#Jg{<~j5Wemh%BPWBTsSi$ko(xSs zPkIqtcWv==yjuTw$++FCEygtdkWwO=PUeuuHq@E}$~u$c^mEolY*YtzB4A_Q{WMp4 zAHlocP0x!=8HvY^P+Z?7M604`6X(P5`zJ$&|AFjZ4NqwDRr z-)+A6(^pz5EvQN>AJc>3ye`6Nn5<2<5*@<%w%WU&<@V(lpASvZMCuF~-lF)7+c-I| zCyc-xi<7c1ROg{@?c2NCip5^G!EO=$yLX!-P0{0{mlN^7w86R}5s#kSoq!Q!!TaE8 z?5`zj$xM}-1ZU#z(wpy09sX{eRbYnrajP8>F?oN!$<)st$PmBZ3PyHQ+flRGDm&V$ z@!KNa@4}x91&+zRbb~5Icy3lZ;;EhH72W3~GHudCE3gz_j z0}dKqQV7Ppx|<^wky^cOUb5G+x$afI^yC;%r<)2~z5V{r@nlAO_M#OBv+fRF!H?;K zop@lI9mnZOuHgM01xlJv>`-Z#N3VY9UQTV*s;bQMB&X+oDOhC5{Z5?w?AMPrpMHX; zqIE|0%lF@J{_w|dHh=r?|JYsua=tCMt?E4L_wkd`u`-0tTct8@cZ1yDuGlj}*5EyI`XoF1eYnq$j3)&IAbcWXW~UeygRITaU6Q z%y~9%K=eGE?H&b`i=vE(I{A8(TsLH=jbbnmh$n|3-Q?<1zaztjs2CxM9AHv;lq@u3 zsQ)F#Jv_qN#1xI`o0AM$$MSvb&ad>X2Nuw0MNP*Y)!m5g0s#=tUta_xMN0&lA&Av9){+n z6NXkf2#~3(8&SvBju}O|t|om3M&hFIDGEUOfk{qU-OaPHY~NXxfYab^j3W6EK;ifq z!IwI zETT!gLTwnEd^MYAC}g9cz;{{&>IAriQUbFLzgN<2Vs>W$i1-uy*Ws6d4(;<=QKQDy z4}qy~hSn(m_{gxALpHtC9Yu*7dZznSE9mX$8>=_duuCC(%D{pxpeM&$6TK+E+ZTMJ z(JYz4QA1M8myo=@uS@l{_<#}NeWP#Ot8Z}MIxSJQZ;ZQpYIp3E=`*_3FU6(Z(;P>A z$=wsRs?jl;Zw0;Q+&9ia-OTDr0y`D9-fP=9`p2+jjMc~uZLW&0y=fvVVV9C+1U5f< zWZpBt@XX+~1{l`9lOBM`Hr8$B7{Kw}Rs`PdwXeqNPD{}r7)g{tLQ6#t%k_7vsmvi6 zd^{2e=pexOkwaibsb&%=Fm622BBeNVZ63gpm-goSZpM3)D$)H?Ej)T_jEqcZ+#^$h zS3A>Q_+y-zAJ9SuxfhP`<{;_@Khsy=;p345Gg1cgx(|10W9Dx64UhQBK>Wjh;mPgh z?et@8?xif1jmZ_&n8f`a8MrLBc4TmCZUd7WYlWxjyH);Pj#(!L(q;>z?aE z|5*tnJ@D-~@2*cDJu`;u!XMtA&%p#<_fFZ|?k0EjJxJ?+^ryE*;0=1#T;q7m!DGI5 zcZBfk_G#a)fKwmy$(>_Zcn7d$@T~YB}gs&gPZ9a8wJUlwU9GV)y#zo8!FI z$G7ip{_b!7WnS(!bXKnnd`E>kaoFHZnfE@mIoKhidu+&%@@*{i}}>KIuP68fh(J>KLrKupTu`Ku?XA0OJ#+=kn0pLaW~siWtz z;_S^B<|(eu=tjvTyf|6=wYl?IxW9Qb1^i9Cu+za*FE>{ko6kCeS31_WrK-Gn?;vk^ ziYNIMTx)YG2WGlTu))6&Y@t?HWf&S6-W*eV=HWQMd8HMq=)ucPR_h}gkrD?vbB#G? z5m+_gXpW<1tcorW`t^?>8s912&S92ZWJleJB3gkRQ_X*?H7+8OsI{qVq@?+ z=c>JZt)8HX`%<|w@Lv>S|1{j6q&I@g;>><^4aznaorJPg~C!ctUSWQBCe<|L3 z0b;_yNaD$cp&~jWV_2INU$%Tg`G4QW50gIc%UJ}ZjGyhb8#?9W;NW(3L26cPshQ4G zbmyR|1{y2Mdsia-r92q~?Rb?N3;|X-3Yk76$Z$omK9=;|nd!EEQo2oKlv-!6Z_|@0 z=iW>jy)go`zChgcH~q%$a6&pCMg1bU`->@~eytN=25#+z-_>qKYtewA?84nFxmV7F z!!t3rdquqllu!h22n=OYnsd9Cz&Ggf45!ffbfDKxOKh9-(uw86cRquO1iXZl;Wwc6 zol>LcGx!>C8dbl|&s8m9{jio$*PrsCP!NvLt3H`3VsWUo^QXG5> z8iK)JeW*Qk;E9=T`jocSyVWo-sN2t>Ljz8`mfrCgVdqp+!ox*rxSCfSOa{1i;FK{c zO_@~y19-~AHyVI77-kcGD=-sl-D{ThX4Ngou<=alwZY(J^vz~&-i(Iv#K2Zad?x@Y zCcP1^;0wMP5O5k?mUSD@gA;wMyp6tS`9Y?OlzStbl5aELZbHXi26gk^iS2y&(@(vV z(i>kGMg$2S!s{Qyk#18w2FCg}dvNAnPzBoT?-?AzT365Ti?Ee>cimn8RuB9SHF`+? zm{{~j|E9zJJN#YQH6|9Fe8=9FJ>tlH8 zFPOb>xx(H-_j$M9{v-GNUIq!dN(nRqy=$z5yJWx^1<4I%J=ErsCt$_b+McEYY5-6E z`P!(s;5~hT5AY9g%EMi7Fya^~>QcMk-L5Q5uD(pYJzZrMeEOLruIMUX(SeHx3VhSl zMSJ*6-6pi5WpH|=ox#@(Q~2o5P4G4@md7`F({su)oC8z4Tf^$iVN z_??1CHjJ@5cj05fs2DyV9~i0&x6Kt(!8{#cXN9J5IXDkT+xPTmWF>feZpFf@96%II z^dH$ed_%^q_D=h9rH-i&+z>KZV!FSTTPm7O?7gF%F#>{-vWu4H(yj3apEmJNI%v#O z0iJrN`H~6nLw51nYVY)t6#;XKhI^Jni+q`thI(fdAPe}0A1SNo8Z65(RTI@eh2T%w zi#9o4TyQ_<==Ahe8#VE`#0U58-PpV(OP{^YyM53hMjc4?`K`_0{>^V^zBfLc;Op?P z8@I0&KaJKr>cT;4GUePD2Rerb<8CECXgv2_KOMrw>9muQtbbM;x6UFm@m=#0T9MJ} zID40pRJhXBJkYMRnRJQ_kkT+-*?+V7yB9CZM}M4WMk{?r9PVov(}U|Lj+r z&9#5}`k&+>An}_iLhF?%H}MED0x5z*q_f$~jL!TwTKf?$daoXR3KA}G!SUOfJKAwRi}Q9pUkK1C zN6aO&*Ixh(Ui)3IW~MiyqzEuuPn#Wyd|n{lN)BDy4Y<8b^f?+2!1Z`E`{Z|3Pt{MVQ}b_#6tXqb%`d?W}Q0<20Wef9?wuq-`B^ zvWjOUp4^|$JB!?mGcjl3!R|jnhM!sMu8oloeO}LH@w7Z<%FzJoT&Rsnf*-pX-!z%s ztLxx_1z+-`&WNL!v@YGG0nvu=~B1p6;{C7Be4 zf_pYCA@>Dc(3GZC4%(9099!4T;YscG z%4k$(7XN`C?08;(hqt%&YqRf!AdQRn2VaaVt4^H&*LXK7$E`IKijT;%t=2+|+|tTB zBUJ*8W;!0{-Zf^$=9mGunm)N)HPusp`cABlrr$kdV|h!441u^b;~E+_*?368hkj{W zAE9z^1?TZ5tWMu++dRg8wgXz9>?VfmTI>%=S;;pxZVwmK%}2ZFk^XCs)FSd~4T!fT1E`!SNk2=EAc!sYM?d{l?_w{)8_?l<(u zmBo{*70a+yL;kgmdAms)X7M84`0vC z`RqwMVT*$#SAa3HrIyS;dma1Ag9v_!>&>(J{U&#(eoE0Zsquo1j5GfB;j_9l`_fPZ)xnH`{v_%G4jVQ-#0H_ZFb^saUOLo*MB>e za|K^-H?O?1md{-_Eqw9WSDVl7d^tQ7lN8h0#;)omF&$83zB|P4Ax!LuBVpa+_BF?d z1&l)R2Tdv-k3lTg{o?a4HecSqKmDS4)ADD|=<`z+(4Fu}qrTdvi*=YxaYH;#^?CHD zaKBD5Y{@Ws;*bw5hsgF40}9!$p8D6E_wseSyKgq{jouN?@KmyZTN++?p?HrlHr_f- zdp;A+Q61-PFok=xHV(vr9k;x2*mA&?EMWoj-g6`~AWnM!WnN|@!ls0+hv0oUzZ75n z)vv$a{Nw-PKi~Y--~X#DL*wV&=6Ur$_+z_yfBUzahkxkrySL4S^(oJ&JhDf(Z*0E) z>KB{)x9>&gJjP7cEQ_DwmG`;$#XO2(vfRx>bFEDsZ?c$Xo{lcYm+`@yP}hqo%1cgn z%o@%Zh^)oRRph88rrRC$yC1*oXYGva2}9(MQ_;kd9g!E+ML#~e*GY5f2ruJZxx$U3 zZuI}B*IrGTxR~_uyH)QN5RI+~Gg_ZIs9uFo^#oTnuqc zCC-J8a~MO7qS~M$+nh#ZorJKl_GYqEe%9@3>`J>f1ZHu8@1~HPr8T0rohlWKSSi&J z>O%wK>{CMH%(@SAMDSFC>#+P-DV-*|!{a}1(6{*4Yp>pYKdwp~M(Gu|uy>V$6|=@1 zPFFagp+;0Evczez0tT&2$hJ{HJF#HTmSIC+n{4c+s#N^c8{_Uoou1WBZ`N~hLSgTNq~)7By#yeBW}PfXcBSQ95@zdi(&NsBPq3bE;16&mz9i}4{5uWWZSnxt*N zkJBo$CUi8Kc(mSfSqomr0pqh^g||KlhT**?+9_8%f>qTs^=mgCsPhdHuF;esIEr4U z@$SG;JtLHY-^8JE7*+?UM{%CY!y($Dbq^Q?0>M>pvhu2R)mgU>uj9%J7Zj_XPYVm) zp?mK#?+No{B-f-U>AU_s&&8oVlgWn+&kAP!G#7DEm}O z2}a-342IKicLmPf4=HmRSz|1#cz(}@qVOEH`W+PEqp5qw=5|bgT_A7GLUwcu!TRBT z^@3|n>YgQYdNvb61=ey=1<|A5HYA}Xnz=W;(&I@*6w%RbuD*=o(mTt&*}K6ukVfG7 z1~45B?!HZbwA;_xF)0Ca@sLSXL6-atFYdv;FX(U6(C~rJ+FtPw!5*)WPZSRBb5wSfrzDKU&Acza6$Zro5{|JcUE z8#k||IKLRYnSbweOz*2FZ*wbC{E8__2Hq@2gUtK%Q!zhd1mrfaKg%eJ&doztIQi+3yB3f)liR~$f68~E!`sc@sj_HoSZx1z%JxNZsQCuN^7sFfPf@cGO|uK$Fc5C zbIhj(Bs|D1&DnTZzsPCx`_}r{eGaOs?z5il$G0EelpQ8b@VoF@K79E7=Eu_c zJbP3e)5~aHDm~*-9&gS`xcOLpGUpt8)e=@Xf7_DJt5<)Fcats=08blhHVYhvQ;yo5 zS9_Z$onA(-A@{D|icd0*#Ty-_s9P>VcoJ%tfT-ULR=i*d6oQfu);%#@6v z^1W^c`18_V?Mhogd7oo%1CkIhI0y)zENQ`C*6_%-6+pWt=ElSfp7D}SJ$W$O734$> z;^TFLA7kw`7$J8&sJhVZ(S~je6|pF-J(ITQv>m~F&MB)gn0i0KF&-rhpFjxU!BdiDhzS@1zz(g6pJ7-SVZq2!Ygmj>GpQ}FIAtXv*Y03- z5q4J+Mm$E~8DSp`6HBBLpwuUMr6iE{NJ8co!xBIVIPf$v0&R2VG&VV>GN!_pbt;qL zZsqoCW3NQMlz)bR9A&Oa%3eGAKZ-=Sm^d-`umaG4ke^OFqws9$PSo}r@4W}zVwB8MT~@D7f@@Ai zsjR!>nX1Pwd}XN3WU6<)j|UBopW~G-%1p^%8Kuh@n86k+2JCp6P162Dju5eE*>sp! zX}n_{qCgRdTL#WBnPpx+Q1yf&L6LIA?MDweKTVQOW}(1{ZDTO{&Rb&l6uyzmYm~&= zyYew5JlsQeXMj|EQ?VW^WsRiF8R?2v*}N{yHW9t`nU=qe8H z)2FBLy7f$_vULi{(`Ui4+o>rg={y%D{j~yhe66+Af(=K(82W{;#>VhZv@vNCqF~pa zPqZ;+DHej+;g@m8JGnf6wO{|hj(3c4?=U_*SI;QnL!(t01xO)=Fx_)GzI*PdnsGuzRn|2|q5l7{#t>8|o#;r@G3MpS+0VDp>HtGz3SrH#F*=Psh4$=`0^(2${0@03RdG8ehvO zprC+lHqXG2MlUY7&|kP0@V(?MoF-F}3uH!^T%|AG(`NzuR>@+lYNP<^Gf?2H;#J6~ z6gWJ}fA=UM)mljfy6|Qn=%mjmj{jZYKp$<*IL&C#cl{WrPUWrsCOU`SQg~P?#t)~> zf%r?BpD_kkE%7WP7r!Uls!44e3P``^%HHY6;M((Te}5iRc+el1Yq!sZyRh9rSmR|? zEdEiwmr?@8!w#NqP=2@L!q{^St?*)Xj-ge#UL8KC5caKSDZTE8Z+yexpZn3~<}2%VKDBOY(bi&r!QJDy}}0y}ci~DdA}UlG1`d zy3dKh3vKmNU$2XG!RyH~eMDQGSVnJr*dqrbp{#k_oD>x#uLFbE=jx zV&z%@`qgq{zN%eFcG}kaHZr37D4>lsTw~WO{(0B?6z{XV+jiIL+s6!^Z4OTT*uplR z@%kbyVpR-plC$Oppuc(cbRs8SzUo-muG6xBT#~&Z*v9~guVynr@0v5^M1@1?mtGem z^!SHAY)%j{xNjCubN5bhA@xVhnmN<>yPlDDwJ}1s`cTTf%PHtpH?sYtShAxO?r*;Q zCVK6cf~a(o-@nO8^lnzB!EgTHsXi>g{k+4VoCYfm4H%AFX+D1c_=^5({89gQsJw%FN-*%&J&QmX-u z7q%*KX18#UPBK8>zHaq8rTs=RJzszG#pbIoZg2kTm)~yg_Wq^3_isD-?eX)6n}78; zf7m>Jv0LuZ<088M&;Q}@f7xtSEIzi$LG~;z*Kd0fB4W*&lzz){`mW1gTi5b zeBW>JOdNeyqNJ)4d&_}`$E;Redw4ttONilm{$d%wIK~|7*kI!o9c~n>6cs0S?BK4G z*NSdwJeniTF>-|Bo98L}@837Z;z#@?9_hy6%^A!6Giif9ylzuU`ts`>c$YFN2OhL) z^=i-RKi#x($Z7qylt%ArWAq~DH?xj%G|vFr$+CiE7WxD+_1&>`PLP^hpe%~&if(nO ze~i6(w_x?k5Wa-CPbV0CywbqV^G{H?F#GOa%HO6OUJQYFHEvBP1yHc4_vpq!%Y=Us z-36U?DoMGl))Fy6o{+0$yKM0}1o5uzBvCyLuW7?7hDl7vc*|TAJd)N)W0zv=5#k=z)+kXm%-Z6OQqP())K1}9!diWH z3L}_uJ1O#DUq?~O=VTItgA~>JGXktOL-in-(ob>Hj{Ic25SI&QvVt)wiz+)y=?!w8 zx5m&Ydqb<*w)^_9{AoNn)1wA3_`ztvjX_{Cq@2`W#*y8>HU#h z2fZUGkHTgWrFfINCK-S4>a(3{rf|4jik}Hc`-GTu7X>@^iH=LbGyveOGVz0JCaT&F zysq#PqXho+fzUUxg_Jch_j~O`rMSeU@$4v3l#qVUrSE<3Enuws`qzE+n*O9*K&7r0D8XL81f`%J%bG**_HmPA&`*12OgQr6O zozZ^WB+C1XXPy=8Iy>qpR=wiT00u5$ga+4}IKhu+-j;9l?HdHb8|sYi5p42hqFw*T z0@F@Sb?IAh_KhcKjHkx)C|R||bEi=TgUOwMBAZO`)HvYJV;h#{v9JD{10ub-&mUd^uDmb@wf&%SSg4?pR}}91OsJJ7<~Q< zXP$w5cnU0CzE2-|J2X!Fe&b<^6$KR^fMd~;Lb}E!gIQ?FuyNx6P8e12JgrY#Q6PTj zp@kQ4O&^s5TfF3*;ae}(3GGlOwwqB_+mzJkRgoF%)if)VQ(yhlkH+XsW+{Sj0XOVRVz;Rfbv9aj2_-T|^ywuY32Oj`rppQ(c9{7VF?+=V{dGSU?!x85S zS=o>3(&m*MPcxrYKzfx)5q3cjV-SB2Uw6+%o+HPCfAOpNcga&Q;*IEU{DdE<#D_wP z>TtBi3*8R{^9Y&ZJ07g~w5K0@VH0$Zuw9GC;0_I8x3_z5;aSx^o4$e*(}fR{v}+hK zxS|octWV?>Mb!z0&*dO4u>K&u;AILMry{TMt-Qn^azs3QD8J=$UI;lk&U-(hu6bP@ zYO=;=;Wk1{Yk)VMaq6^9J1EHzAP|7p3*^366FfybS5lG94w{o)pq_&=J2*7NrE6r#MrLRp|r|%8|O0oEFJo zm#~c^Jo9jlUuEbJ@+7yZC<@9tUK)N%2Ad-p({FnBRq2D?CEt14C7Wm5GJqK6hqeFt zXLn|E#qd%TqF^HyLxqeO=TZ01n-|`drsryWc3689)mx>uIZDpk=w$=g(+(HdEh~&2 z#AL-@`o}>#OyN^lWh4v6PLVV(o7KTgBiPl3O7g96)l85t7~ z77la$cpX#B5R?_*eH%bK)=HU!R4B3T?we7jxBc%I`BkB}uF>wwFFFk9u1>f&yw0u)Y94B|q&Y+746R!bASY0Sz5p!SI2t}dqml}Ko46*US&QL-( zW;YN#AI~RGdcxq{nWZ#BC18tZGU!~|lK+!qRKO^~drr{{A(;GfWfA%~<$v$eaqYE~ za`vxB5X6$b-_l7rp$Mr^HIv~e`ys97MyaRG^JEiN!a__!mtsWf+M0k;zF0%a%}Xgp z@milfjbSaNt_hj~nF0h(${PV1QBAnxjqY|Yn96F!1d#F+GEMrFy*X739xD$L@LDRX z=PAQY$~>jv>hhL;UY za&o$#+oa;docmXtaM5V*b0tfDQ>VU3{1ZWxmn(~ZzS$o9?A7t zLzRS6?KJ_Bgk#FoIwCq4o2+ssWdp^}eJ4ot=>XrFl&fYtflFG=Gro2a zQ@|MtaaA}pre-p2-0dd-DPN3r6`VGp?P#VXISq`-W*ibomaQ3tCQm!krcaeH_Lfl< z-lL0Sq~(c3H^SSbZF%<~ug}AWO@s*tMXOgDy5jG5*-MnH_O*z?z_md3a zZPs5)mT*NFdUu77nT%_za--1qIejc_kTUo)UEnz zA>Vt%`VaQdkq_|>}%yLnb)_;jU?T5^qp!fKQW#JhL^ zT>3e6`rgZaGpv28!$du@Whz!;!KrS_{_KjT+}GwPE7h}BJL^dO(^>pC-oT5t$3X+P zUG|WzS{MEX_tAsm>Aux4D>%NR*C<1MGO~=jcTHA9D>yc`Q_}jfaNQ?4Ju&$j>l&lh z`{8T6T78E*N;Mfx(I%IKpO8uT3GIfXlG$r4Q~HGs@*czaq5)?OT$-x}r_~LA;G21I zU<}X6S-kKuy!afx;H1hkY?hdIr%nb~Xsq*}8h>^f>s!*Yryc}Pb&tXr6r<$U3x=>n z=@ir68Xj}nBzhwWJn`}xPgZU}H5X8tjhjV_#TONieDy^?htJ?Fr8e24;EaRHO!(&X zjV$plo{3iV$10rhS?Qo=e3X|k8F55`Pc8)HJUht)TE{mh8GE}W{5OX>a`sK-KXkej z`N{jv8Pbp0#Vcf`#^>ZEW17+P&YV-56j{5AM^*$Ie1)pP0mp^>$rT$u8sjQ5jsopo zx(fcfb@N_|^38H+-k!Ph!Gp)W-@ylW^U723tMlyaRdL$%45^H&q&7x21Q#9~L$9ar z@;aZFCv`VXlflA>wc|IW%$(qGgU`OcSAC0*;S{dHR-?0vQ#?E;lijfP-CR!QlBFkj ztiP*y?{D3{*PaFk1UM>rWo~dX6$7y2ZsS?`I|~!8pEE~S2g6V}&b>PwT^tV_wX>EH zgKAckzbuUqFSPe4?f3#5=9mc;uHVLzdH1IWe`ub0nas~i9-Qcf@8IisbbQ-Azd45( z6Zb#6ySWzr0&8Mt`&WpEG7}807KFOePyfgjfT479gZeAT8 z9;aOhmmAgCuqxvSZ6R$>TE$t;fabz*6(*u%Q5ezTFh&1LOnb1~K?&7&weYRm?dm-) zckYMA4Sc?Q_M*8~j?rXL{BhX4d-(a6o6jnX#!lM%)cgtuGj{6ZZq9P^fP)fDJgM2? zOh}lp)czWSI~hLDUY0g0{9V6wH-j&I(o)964OesT%pf10avH_;`Oce1pPV~*X+ zw}L(B?o=Y3j7OK?4c^O?orEX>jsYNN23PgQIwQC=V6vHT7G7=tjn_P( zQaFy?A38(e1WyzWRYrtyBoJdyb$>kL=6w6uK=xv5Xz}o;$rWlVTOZoA0MBUhhs;~2JxS@<_%S0Xys^d?8=u-=n0r(ggF_!*?d?_H2##Kwv`3{*xSFux4dK->*)UqS zco|Wsp>Q)RGYw+bcnQJYGt259TUxx7N3BAJ%>_&p8eYf_2uOfgz8^gFIWWhM)et<@ z!8j>|%+j|B{zl%klMfjrucc>dr7r&d)MO5Z!=pSP?F)EY-UG{>?ui>m6O%3FPXi_= z6H}oda(=^t5xIGBw;VgUyfcTBNyYwLyao6zW&*flkdNln6<7^@g=O*gmiRAJa$Mt`d-pZ>FUKTjU zO!$m8JeTglBb?)-tuoT>i~blV{itgon|cO6Jo@#k3MfIAlGi&GUSIp{OL$pE*aZyT z<76;ShgQ+qCdy?{G+X2lAn(WqinWQ9GCQ8I6o23QXR2mcF$RpUxNYu(Vdz^K_(oSx zVNH2;dVgV;9EqQX&yugzfBBF9yUk|B0t9|YY&8MIr1-(Ww8)Ljajz zf0ko?*UD^s53ehX1}$o*aROau!J>ZSg`9$ae9bY^6g1;m{k+>~c_t}CpUt{CVe&r4 zOM?py4SiPqTl}GAuNp?Rhjw}${n43b(}f1`vl<@&?h&dBFIpM)U6b-fe2%ssnTQlwJC(3!GhHD@oK7n(?G(Lzo z>do}Y9Dv4PHpe0W!yK+L5}t7U?c+xeht8he%1iB3EBot&#n>kS=T_f^%fS2n!hepg z9*^+`w~SQzIZuE5<67}arp!uv%{X*{o}pjc&C0v=r=4q|s13rwFudM~IB=K|CvwKx zX{$|6H^YZ}Ipp@PBpX}pxLKbbJou?pMfR{p@4WNxI`HU2eLVW=tIZep|1$6T&CQ+o z>W4PczKwn#@3lIy`|r2V&`QB~f7h5R95n~$$k4_V$8+5qz8d8l|2IbI!B&zuM&9HQ zv)b`4nBHV0nw!x2Lw$qWS2^t*z;LUO_q!R*S0j>fVzc>CJUnw#czl^bVt)9Tp2j$$ z;|VuA?(=GNynE+n?R?pR7FMqwO%C3JqIJkt`(E&D6M7q{r6H0=P+riV#_(h0-0ffl zhVFhF;W_tqzWLQRf4$9ZL|zbLSK7pYkS0z8+_Fs)c7zwC==|Ckj87;*3E`N^z&44S zJZJgFgjr_@Ai{+(g^Xb^OxnQEaRQXE*uN6f9DEZomXi7U_4l)>;!+m(rIgaMPV%yv zV6afACA3%d63Q`#wrWXR^Pgu}KIE8W2}W~^=e|=lrel32kij>KR`nPI6q@Y?&UF!{ zy%%G;XL4cXsh>Api!NSz-=;)mSwUb`?gIrS&ca{3V*A< zChj)Xh(8kw#5)#WkC$PTJGiUe!Ke1?&`i?RZHXO!OuT8o;S#Jo!`fbgWhqhp>K+WC z1w^G_qC#I?D~lh$EZ#tAUKy2 z0xs`&fki*6EA|W4@LC^djN{=rkD?~Sj7A5Im8Lk3;!r!rs-(nwJemErgv{_7+651# z)4PQE&~9*s@A}h^+Fkt^a0YG)i2C(yl>Xpc%K!3wjzWhOD!0AVw|;~2;(lMXVLWUv zWu{!u*5{f8`_cdP(>wFew!T1wr;7?kvAsaxmWLwejM1UH3CY;LD8Jgk^^LD^I`8}p zcP**A-{S#C$llaHw16+y!o{isZB$N#EV#VC#l^PO`gDOi^(*J{uipd1YHHfrKAXM` zeblFH<5fe`-jY|!&RCm9`_W@aCU@}H7$iNTv{DzzapeaO6-CGG1s-tX8$951;2m-; zd13sxhK7}~ay4~T4`nQ9hQ}66wK#mU#-45)k3MJh2&`S&RfqY_DvyorWX8y@p2bS@ z{OXnDgo!sfv|ZU$X1c7>3taw;Q+0O_E!hWEQvcwly8Z5&aTDy8?ZM_h_wd}p|1yHW zM=n&!5Zhd)U14jp8rJjvAG{AA^j+I{&^2TWU*y>;q`c6Ke}~)bH5Br zyuu3)R*maR{d`_l82lkTNt^Z(+$#2;F}K1=bK0bsA7lg^6>h?5GX`vYj}{}FFL=N> z!jI;8Ax)up=yxsS0UjAG%JaG#_vWPoS!KfG(T%&62@kF6$QQidJoe33U)7iWLavHe zALm2ug)fN!j#|mN8vYK`OLjZ_2!}ce6|Lpv9NY$v@kw97OWxhRgpMi9wVUM37n)LB zwj{7~vK$xcd~7rLwN@kIhr^ql!WMiyW0#AxxYWw`VFu_yFx|cTc?MPW)KRvzzU`*q zoSA=HC9((LEMCKNA(=zo1;ub#KQ*~R>?fn$~49NHa zugM{L^Ln~M2IYSE#qY14KPdyy!x;~ADk)yesKDn|0wohMju|O789O+LLnz>*Rh!H+ zMs_~`>a)KdAr>+Xl(;&Eiis;|U-llza!#K7rezpmCWLBmeMnj5jV>%mB7e#3b!NP^ z)oD^5g+mJ@1x|MP-e28V6Il~TRSql^+PU-zqcdLW8$k>vlMn%=vNa8?$*A7dulm-+ zKSHi*^lJolpFEf&+`9=U>!}Yy0NXD`xt>M*`FDK+HAYbx@U0}&P<2rR-RXJn!Xg~A z4(Gh^H3v*v@XBkn?=+JUK~)!RY;-ZXPI|q{+edKrKCe#7{wI47GOT($29}f3<^Tn| z^atiX^aV`O)hC*O!G8l|_eT)-bK)g?W#Ahjqxgj{DPm)ugizzD`j?Ueqy1OsFWA_P!3q9R|KjOB zFUFp}46kfq_1P;+`SGn1?jh4WaU&ROV3g*5YH#S+bKjNRDq%G&t}ERyLfznt_}$_050aHIE`G8KWo`5>ljZ z!{gl>Ykm;5s#{@(GeUc~@B|j{YHK-}syBX8UvEwQ`ijq%vf`d`i|i?ko{=ruo3_*! zKQ*TeuZ1s$->S<;1$)j5j~)~d^+=xnq12D|1l;|_-Tr>LxmNncE6EY*F<+;=Q=Ene z0*-<_A&1FBbduS`X(|f6+7N6k;wNEMuj@OwCuBJubWHrUlwm6Y z9}CA_{FOpoKZRCVrM9Xw7zjr5`V|IAeL6V2-Q0DQXaDp1-KOP?0DZn&)|#8eupi%K zK&@4yS;dK&;SHJKPo}Y(QZdghc9WTt^Q;Y zR5syJ=j@ZXzF#;`vGH{|^Pmw_e|J-UCmasXRuP@{W(Niw<{7u&l>vy}2APn&CyyU( z-k|64XJxc1U2Goq_vp~GZ+=mV(6S6UJxu}^EWUVfs-BgQ*;tMjyY)7`b?IKooM>`8 z8h!iqH=Doz_x@h+uklRxym38aFQW)vUY1D5=4-O#-j{#Axqau0@V~S9;fL?yhYaM5 zJ>f$~(Z;@yfbZ0@Cz~finBuLg8DF{zt{|N6^d(ue9vAY4#~Gr#g@p?Xwpw*kMy19l zkHOmH66`a;Al87|PPPPZKf@!z4WX-;4YD${OuSn?pPa~*`IzTeb{d0tl+TM7Mwku4 zBO;~LpvC|()TIQ$Y^vP-u>?0w1hi2@hoAuqg4f1OzI_scn5CnZi{0u*otg=y`GqEJhP~GLT_okk3H=z*S zAUO8xn@=pw*AmG+gl*d5-p z%QZZ7xS05)sT>6)T1;jO{8u?Kb8GNLBgpf91J|IRu!zb~B6y01A%dGBO1VNqgAIID zQ=_Mw|Ls5cAOH8wX2k-`iVu8+LlZKC2`}I^t0ujmxp`k5F0{onmy0X&RCXgY+8T(% zFH?|W(PT6Vva&oW3E?&d5#*3yuZ4KfQRotO-5ma;0HVPt=CvzC$9M$8<%#`Yo`#v| ztC>PON(AqQ)gP=@KVpNK{CSLU6}~uaIeVC-fAN`#+R&RxDNh!B;gf3#&P(J#ZAuPc za)$y2s7w&D8639kdfw7F?;IuPw6T5C>XHzTFt5@+mjJY>om0RCzZ9DZ^9aV-lM*x_ z;u)ia5U6V;7;8NlntHqP6-BS1Rc#Dyrce(8a^Xcu6bgomXG1&m?YFw!UW(+Z3l7nO z@wCd|h3e4p>VmITXZWZ;R+T2Ew=$G&JP%*PchzBx;X`8y$tyA6r@Cr#saazm8&D>y zCGV<_3<*!;A@8~SD`A2DT=Wr~RlKbOUer}-wGLm-xBk&zo+6Vuc=SujS%n01^?+T! z19dRR3ri`VRGaGTJxaKG;Fv;$zPz5pZ`0?Vsar)|~ZwYoNU7CQ9{&SsS{Ji^nA z)8P?JAsd)@{fyY;2PFsn%rPn&1GN20#{O<&F)p2o@i;j}7FZ44UgSSHqMzy>r&eu} z`QTF1mJjs{t$H{xde1c_gF&tO=e^;XIlWSw-Rb_!MLqLe<`jk!kLK(H)4#?F8o0mY zES%wZm@m;r%B-&a0X3$-E1{d$SIypW38Pl&`o^Ecu zNKySAlI?SeLNHpxK3|J^pd^2~~mV0gWF z_IUH^?aLT0$L6OD0%1Rq?f1X`ZsgAF$qTNx?Y(+gnCzQZ%Nu@_;`8LkPP&>@mBroP zxj!oxckbMYUhz`n>wW!u(`I4*r^k;T5C3|G$HdrQX_a6%<^M2T$x(0BfT#azbMs02 z5*dC@H6zD_0+G)q{E6HcUMi$1W%qrlrQZd|gi06gd?mx+dUUc9Kx~1d(KquJ8Tqu8 z7VR@=UX{V-MF}qGQI}G#?=&ZyON5oa=tNm#k8HS?g1_H>1crl)eEAr1uXb-X)0S1p z+RnA+EXEAu@uc}mXqQ!rH6O--@%Q0A<4fpzdd|yIUB{pJVmCv=nG(D2pKX5s|NTvS zMt-%qdHhq;m1JrGg+>uoFaqf?W+!&7xA_m#BXEG!; z_Tw#M`D*iI=xZKb#}9kQz5%=<>flA2*#m9l?`gO>uRqSGc=*$g^{0$Yohtgr?|-VT zvrazyaxzvu>EyFltrA=d?^XkZA-#CsQOovO=0Ltv=n&^B+>&Mc^n$&9n85a>Z2hgssm=(25?oX<)zM0uF_xyll5 z%hTpXT_zaS?fo^;*V+gH-hGy8belnZsbzM}EaI<;WFDjYtye-O`~u&-G(o8fDG>tQdQ{jd{U@p zkA%L|+;~v&OMiG=AAGLwXcjCX5sg(bl&Mlmn&nG7u!w=>WihVSet@{C zcTU<<(aSUDR7PmRqyF>S?6uKsWg0SZ*5+b8yoAQGj|z`7&lJQJMItHk4gTgq6T6BlKpR_gp-!$x&q!#~%Nf*e82R zc*lENC39L^jgG=`fyHUdmN{io7>TfaxfRnI&R zPQNt`7fNi6n_-c7VV;8)o(ItIX~G)*?0M}dgT{E{=R(!ynX-$Qr?Dx!lq{94_PtvZ z7w^V%XxF`;VMUMnTb1h`0)V+sw<)eOw(w#_@zF|S+rugz2a_lwchnDq4e|!gjAB%RdG_r8e z75>(|!}+zw#u{_vzIO&M7eraB4dDR1?o~h=z7MDLeefC_E@ETJ@8EQ+@*GXC$|&>t zX|djKQYJgh>2*iX&YTgC;7wr^l>84FcS0!c=3T$#kgs^`l`xILWAqIVJk+6mlww|2 z3c7s}cxUkFTm9Y1K)jmbD^=z}&OyhtqTQun2HZi)oE3~&v8}>q&tJ}_R!C;J@S>tS zUBaqD0Oo-`$e?*q`o_{z#y1YK8s|yZ2Y*Y7&YNX|X%ndN!(m`!@mY$b zu*$1t?Yn-xzv9Gim4;G3+&>5(!%OKahh_5NaRw{?1n1Y^emzFdwKh~A7fxgT#lP=M z6Z$l7_KQ|n-lgbKcHzP*grjEnD=$>oTx=B@4OiOcq6qS?+v8~5ynXj3MbX~O_JxIK z^Ot#|w<1)!T&NwNhX7p0GgV-qHoL({=F?4?X>A~iA-~E30QjYz1IiI8?eO9(0 zo@$}6SC88_(n`h4GSNNw;m4f)<;^d==X!h4%o_(8fhY09tlad@lR~$o7u|2{-no8j z_?Bn?aR!qzBLmBvc$OYRXEATyY7f9S-~MVo@h_UnwsSQnm+;r;83lj$um0KQQ8D&k ze*MeM^;R_+u$$Y#@YUCaI3+*MQ?~W_q&hiOg%R7hE<@7O&T}xQ--*_6DlVU6&6qqc zq~}&EMQGWOh(D7X7kGc)`>)!w@S+XW74H8M;i&UV}-I|lHM!)0eu)>SUNg1iy)ZAw9>$mQAHplJJ+d0SI)y~c@ z|NN`J=FKrN%!JWEJ(N?l0fL#uqEY@0-cgcx2weRR(-Fu-Ft`d|>R1!e>gp1bWU0Y5 z0xM*TPbY|H^6GQqmzp+I$-C%$frj@m%@C@APT`x_ygZ=>ex)i;LJ{%_#Ww7IXh4BA zutrE3M1&gnTr*K53{DfGlv9e}5?o6#k1(s{)AH?q?7$?G)tYqH)hl zYA2<2a(V0jSln7%IE66+yP^#sLY+`{r*|%50zDotZUocrz!j|CU4ncujxLX23z@QA zcn_0FTXkhu>PDML5Y?*rJN)~TZze2^EQQFLW2~4p+ST1gfyW( z#!L+uFkmp4CNo1X6M{}QMi*Y0*DbHv{Dld9TGm`=oO@T}^;RDZqyz*(B0g>I^^Xm4 zGSldr9Qyi2jKh;j-Y4VKgfyWEYOD;S5&z^!2zJ(EnCp)~R>SZ@{f1A-|0SUTRu_@nW( z5Ik_3dJz9o$g98q@Gb9*I`r8%O#wo&6k~*%;+raguOI0QLXs zWBnK<2>~wJR}Wzn5vP5>=U#jSo>9d521XM$Tmg03zo5fX%GW(`jG(Cy3?s|j>Qf73 z+WH1w_50;s*F`~f|9lHPwQaQGCHEIk^t<=bo*b!W08AyG`%^<>)dEU=Hy*KSv z-SU)>+2sq$ldRHeG6-<4Ixv|G=9{@!!<=5PA9%B#<&&@mXhuOAJiX_s=U!#^vG z-5s2ES0B|Cu?B~XJlGUs<{j-S%i!=ihD$UZ1F7Hm%YC%-Dew32T;&%`utnBTdX4J~ zx~je3gG;dWoIG(&x%hAnuI$~B3)4n;tZxh6a&V_y1kr_TPXVoNv|D@xwxwA2G2dq; zt}Tpv%>e%x8)))crUdrTn`YShlTD@S? z2YA3QWG;C-_p5PbK~rY&`MlQ)&O-6xqT6~O->7ThW_?nK$JnaRXewRd%n!9?-rI&3 zu02@Qn=Dj^u$N;8TqQ$gm7=h}qAYcdGf6`?%hc#zgtLBrw9wlSSusppfXl{UpcR!cPU7+;lTNuHgrCH@*t(|RUYIgm3c9W z_(3vV-gdaYS5_Pc1aiRWlRi&2qfjL4qVQctAlOb?rLdXyVB*!ITOg7N3=yHTU~(Re zdU@(SGq3V)zsx`AZblz-f%E z^rwCsCzFMzb~sXn9X)#Z;}~lqCa$)b^;&u%o_!naPf~d8&ylU@T47h$Qk0K^18qI` ziT)GTmWNwt{eb8abpAtWv*WCZWJwQ` zJ?Yugr<>pY?r)2vkkY=aL5&T4eqH*|3FnFTzWwHx(PVFQ>^V#_o+HzY(|3hOy%wfc zU#)!Xr4MoZ39+?Wa-1>z+5InO6S3?=vst)Rnvw3--yO*+?OyUSzzzptqf1A zQ)q{dms$ximgtREyUt?1*QHq9T>Gc5|A~#PtrNsaK_-c_k#6R9ks{-ow6ma|(U|6B zt{_;u;^OkKO}dQRHMPtk0XtI6bpNar1RrbF$r3K+9qMTV2C61;Auc3p7wI+ zxO=4b^sRd+5f+AHe46 z_>?3eNzXf7Zl`hwarH?lZuXA%W(}EiG2(q%R~mc`UaLsE)j3vgKe)Tb%F*$zdelF) zIptZ2>#FzNhkwxE2g_JSQal;1D6zBqOCN)uCgQoJA~W0sxF+35AWd1Ku#7TVd^Jvg zf{Uy4l(af*z4TS@erSFBmW?vYLR&k@SkTQfxll-gIZ8Om=;RsJ)mT}U9{N7?O4{Rn zoTi6D&fwCna8?Fwj@IzuBwAG8r6gw~ad2rij+8Tl3(Uo@TxeWNw8L2|4$t_2dy1=S zw2Y)Fx6#e`2fkmPKTT2>KalnOiKTA#&oxK$GQ%xA8Y{+lF!UcB`&%08qFWu9dtmks z;PA>;+hbAavo_}n_MhvxsBdr*4v=zvUov5Mt()Y5aYvSsP1DX)7G#5i@ZWgY%8k+J zd(Z5-{l4J8kSVn_o2%+GI%;!Ohble0sC$@G&+*>HU78K8fXwLS{x=@#b1;$>=r&`e zN^H__ZQHnLW$wYjN>gNflQZDJN90s;0pBLS#0+R#9YYiCRo=UX>6DAF$isn~HH~Z+ z<#lNA9iPltA~q_h%lMHtIWJU|-)1*+iFXv#y8gh?7Efk!+5~u4yIH2eJu`d_s%Hgs z|A~%bJJh*0@KjfD!ka#s#|+rPQ}Dqd4H|FbfuUp2&ob#i1h)BqWXj+rIERe=gZF89 zW4+DIm?u$(6(kqE7L#i*f=xp{W?ATT>tP7n#`P7nJo^gU@mv{D=+iq2?x}xE{50`*}pXA>*uF70nv1xPRpDaP2aMgY8*|=`=9{zf_ zr&HgIPk89RExZf&@o6uL$vA040Q%94F*DB%tKh73E@C>x<6O>VEl7UmC7n0Ex8wO$ z?r<`<7Dju!n#sR%c10 zj82nJG#)ui2Y6G?NW5lnAD5m+z&UHdIOFwu`sk-L??>Zu)|W`Jxw+GhUos_e^O-l`q!!^fgGnO7tp+g7c zQ$1|+hF!Ay{H`6paG=JZnfo)h7O5I8{MO|JLN(;qjFe)#?7^KX8&xpeRD=3_BT zy9Lwho8^V8@r9)=y#Mggk1Yw+r`m=;JUFq*Ju_F%PU*USqop1@oKt;*cg39DfS9dy9F1-N{pJd6rc=Tv^{f+rN-WbL@%#LM-QTh@tX^=#r!85T8l#5^>s%ko>tvqVQMnTr+ zx@WRxTr9(ZpsRr}4&F;CElwm8bf>0@HV9J0he=bA`VBYoZ%)6JsSj|(yfc9t?eIEE zN{kIx<5nVEE-r%&s!ziop}Hny##NIU;0>eOKPndA+;gfMEZW@nyLpN<^~+ z&O7(&X$Fg44b5Ws{*M`&jQX#_!@wp(fyp1MXWUK$x3^Lf8DDS*^zNVs80N^}gaV}4 zQKNyGWGk~`X2Nf8c+IhYT*a>wLUOR_$HWQDBvdp# zm(t|@*&tDWOd^vCsP{*isJxU7*KfA#C!FsJUQB@7sWd!@?g^2}t((v#+-I_kUm24h z+t9&?VmL@U@h*YCd!xR_+dI)z-C}zhl;JY>Ki9@d22lCwcH2c|G8rR1JX6*$%ei~O z<2YWR=jGlpRu6msB==Iyg~Nn)g6m^}{tV2I9rz&!T;vExs2vZ5$K8ySO6L|ykf3w@ zin)dc=S>K3ji12buYU-?Z)jRif4o<=aei}Dv6`-Xe_{~MxtXQ>a zcy0~d=Gg_#-8UiV`?ROt-l5zL&v~knE^@&b(N4GL`FyDoWxUhPDK|=6<=s~=<&hEz z7CQ{#YP?++)uRU=S#*%tnbI;hS?cq;-4`xdhg0N?kNMp`e#X#Ix#sqpSIxVG_oX`(O z{*2S)nI^z)@{7r3CRm3bdI>IOS)?*sg?*MvDg}S|P2cd%G*Wqe(mym*&s^Kj zo}nfIaJT1VJNdTyxZw59l5zd+Kku%k6Fjfp zM^Lm zOg07O(1$Y5xZvQ~>;7P0H*;f`vgz5Y61(T3H~)Ng``$R*uH=ILW$`eN9zNMT>XasmlI3|jC@o_?eDZ8_ zy&!7inG5|@setZ(_C=i$h9F^1_UXF6JGOm9rMa<iO(Zn;Q7lkiv!@tWom41=2 zfLSQboFtC$zrm_}SBxV&le+*Z0xx#rdO@$0uV9aOhYHIZweIK1y*E>-3>H`=nE_EK@wr zP_jJ1do1n+@tt0I<3=fGf?Z}80rbWsrPT)058Zp4tKDX?FFwndyC3f2lbiQ$H}7OD zzIi+b*MIbn{>x4YyHWW!n}74&zX-k@yu}yU!1Mk=r=U&P2}7s&n+(Dmx9`s;M9z_& zlzXWS#TPw))W!=jLVds9rVK_7Ic^Dq!NhP2<(sqkQGkjKEQcxo6Zd3ng^#E4)3u|! zb3o6B$K^>)HeSo8r;h z4=;%sjvs&c{&#Jj`F``omu(z~Mwd!Ew4bpm4vhSz&pRB<=|)`E&g{+! zxdiWQ3g}($P#CK<=4-%V(AuRenCzFrkx)($h%qt&Sj*bqxgOFJ1ihz>0Q<}B=-I1n zeI2~@w4c?ZgLn&*5S)ag?>!F}>M(Ir&i2aVX=hS5yKB-*5o`cy!++~N=u(@uysw?r zE)Q1-@N)H}wECW_2ngpVDUPL8I%{Qw!nf#E!Ty(sdKn9#=}KrgeT=}Iz}ec-nQE%- zMW^uuxZednyIpvG!Of#b@G7sLW=-V0=vY7N4G%?vjB+q51^wo2Ap9u-0^WG^c!y2s z5<5Go>NI90Ox*iW;5?cUA_SOt2Elg}sxd@*U%Nv`t;bZ3nFJ%jgcoK~1!w)+8fZq? ztgP?^>34KDfN$LBsK*r2YYB!)!O;6A-pSe4U}Y#6wCHJr%mn*V>cXe>KAQowPf;Jk zp$UZzfIixDpF;1Gat~i~@?77fxG)h+vTg15_cNcxpZ;SkF-+V)NHJPv;;s5HN+D3t z1Mf}0(A0M^LU@Og&uHnddeB(kV}ocG*21x}U|)QV|A%9Onb8FI;jX$@j8gSgmo}|q zO4C4@rF#t3L89*d^NgWQI<5G$g$vKX&g<&hqG(G${Z-+}f$9Y! zUK-f|Tvyd=mmD0P=oa1v>&UzcPPv+R%QuQgt z#?t@%fBeh;XS2D;3ovxmFxZFQeINYjuK()0SUr8&jw9c26qc@_-Y#Ta2TVG zb%AloSmTB~T5(U)-`-#2rM}fa^$(8l1Y)3I_xjO$A#J{|W;`F5^uM-${%x%FDmdG;#3niNK-+lPNW(K4U_!a0U4Q!7uW85Uz*`;U zh(6<8J*vRK#2ErkDSn(Hd8>Gx3DR#qcQ}TftTqKu;_Ntm$SYb6+k-1s{L zEgo4raRpAVBi5(U6xa`&XAgg9#o!MGguj{%08S6H3Lt0c_K*W{BCtmuizo4SIanRX zO3q(RM`FC$`Kb*2`TXI>A7*|Mi*u`8#Bx1`nbD;jr;8rqTwnojS{abG<$WI7lhfik zYE^9Z*1#95>F8b=GR&7&F~0ut7n1|j*p~w4c}m?$^U6W8SqmeZqA%y*UNJP1(bJC* z^GSx&Yq3myXE;sDx)ezBy1bYU0D!v_3U2g<-@A95wr00_^K|#c=rjmRy<@XMhZGg_ zA}!HTD>}xp_!c`=9Xo#_`$hdWzgVrJ8-4!utpe5CEOGr#$?HGg>=&^ie%UEm^`hs0 zZ}a81e^Hr&@{e{h?7mN4Hjc%^^!{%8q!o32x>Y{a<4%M7s(1DeIxV&PHZ{mp^RCag zPS%X)B*))xKEMCjeT*&^Wud;*{B&~^3T5g?5&ekKSkBkt zc^E!#9ibi1b3p&%7vFBa`s(Xe6mDcVy>2z9Bb#48O^%i_sGOm1UOXOy0uMMe(QK~?1xR#>RRy_!(T z3poO~WwhkjUMY`u1l<>CzxVNexAHFRJ&uukwUq!DSOZ;1DskgorQA`NXDAOqFg zPVr!*4~Kz8H^CSj^>?SfE6*b~9?+r9S^{RdjsiHfYXhC(t(s?2O(@l0eBvI10KDj1 z1N||U{`7ph2tQ$KjII<&ZBpd*8JPkFhFaZOiiF8&l;S>Z=Af|PNkM?7_nmmh1VJF6 zAukrZsmw&ToIs*yp1M|9< z2|PTjQt$4iz>Ff+yUTz_3(x*7*lH)H8F+t&KX}OTpq!N=N@P7+V<Ng(g z@mydWItAC@#r?sB-wVHi*0o?C6xSMXKYq4gyREm{Qh{@flu?FeTQ=4FkuPEt&o)FL4)D1=hhZOusI8 zH~RT$923`T)f_)E;u*^IKRk_cUO7hu?<51vVPLD-=}QF$*YE1TXLt|p2iDr1@4>)< zfhWjt_=Yc`e@ag9_eEb9ABO^PsfFC_y)j^Fbgk(0Oy8S-bk9BV(i@4B>Rf%)mq4B} zCP#96=E0FU$t28gjtIBp%~_t#*R29vFZ|?MUUYKed2o`iKmGBi&6nSNRSKAPqo$nj zvZpz;iS6CyQ9Dh4DmnVYKCkueQSwsIv(vT6tEVaPyqg?Ham(zuM60VQZl6+mPe1-{ z;0A+ypm4ZWomPla6dhi(IRs4w{@=TEcW5=v$N1=t)llp<6qJ>zm!n z`!+Lz~Y9V^UONzO%NQ5(kVos0r>b|Tt1UxmKb-n&xU*xmZLU966{ z>&Bhx`+-`q zIZ9{x&8#$7lDxZ0^232Fz(GpsuyE_Fed32s&Kg*^{3#(5`Jh zzh5H$QZhB_`@CL1rC(~Gbsh8mm+O>7oE6Z<;Ms%UZw|lx_2#4yt53#K3OJs265H*L z;C=S2@G(YR_y{?{U!QX#!i%|w!_y`c`qvNbe&%#L%5d7NoUp7}&CAe@H1keYUiX%4 zvf}!_RTKu$+rpiQr%BJ(Z=sPkVbURHyua-9u?&}W@HkB zmw?;-{EN@(L>5IiPMX{aldJ7Wv4dbvoHAjLFviObM(5F;rocB)C^}~;N^=5LomCr+20kw?|8jc;PFvE-u$#6CEbyxzvdz?%XDB!Zzk%PB9==j)Qt+K# zrc4oQJ*5x^!+CLrm+VaT4h6O%b(;`3AjiO&+Sg=&X(8LPEQT7TC_Xt)F|wOF%+?R-Ghouh?vo^7@HCD;Wo19Zz!kblzOP$3HR{Gbc!b3i_+c!W*z`4Kr z4%Xft!G~_@tXTcxl|6^Yp3lzT;3>*#p3lTy8FHM$G)L0)V~i=p4jz3p4b23CgWTwU zZPe|7-y~zgfg{Rtz|DJ-`wj1aYjTVaEVmsUnOs+xPAy$bq?BH;Gfwn7d{zB^ z-XQW_T%L)Y*MAbIhm%znUgG82#@j27m$DI_g4?pO_{hOku%M+SNglA-brfE)hm{uX zy=h5YXD^kAS(&riRENn=fibd!@2DfHr7$qton&nHYVesxr#dofe&Zp!{JvEryw^N( zmeACOo#&Pk;gk_DN4EBT6rUl&P`ipQ@Z0+)Ms1zu$ua(`G#E18^JrimI6^PW$c)z@ z9(v;+@bk!=W^@zmuGw!C(7Z;2qsnVKFa{qznZAzz^+Ju$q#nNffU|%5QU|4Ql&v1{ zNOhw+>q~!Q4D=lD5ZuE

d7H;O`yb80t<{2zU6QU)}Sr=c5o*?ReJVw7c7Q`~eV< z?)f)-j3m;(`Q82CU_84qJITS-@1J38tT5{Qo(B8WpOGEap?&;0{0MY{&%~FzRp<4 z=mxj*v-Qh&xMuj0>zZLCP+avv2oCwiJQy+2Z%(t_!bE5O(m2|TI!=~c@WgMOW|rapEbw6!sOT?L~gHWZZaHf+qeoPO1p z_jw>CJ2&3It54>c54+}0Nb5LYseYn^@C?V5f6SZxrX7+GI&tZKI?0z`WeDVXK1}hw z+&Mx=d73|_7(IDx_iKj>H2LzZazflFCLZ5BdRkm|V+4=szg*Rt+Y?c6wo>2ldN;!k ze@Y=W_^hww=fsKYXRyjkdL?fnSw~C`4EhI=Q;kuDBO%Nt}f~@X_AE6oEO^mCg+p9e2hHl*XyvIcXw9@eIq);zLMd3iN zpOkr`CmXUnA-bzq9PeKg_;*p053x8 zKExUy&8HLnP)OX1XT$G~xTZ^9zs^YD#4Mz(N&LyH_RX|w-U`ti#~#z$S$w-l7KX`< zLfa;?A-svmIA~`&-nCM*^WXfp|J7gL`|REfVoDba^-2~JFQ_y@dm(yxsxJgu-5F(r z_LFkSLtxU;d5XCn5q`lp){DVpiOGgh3?%0GXJK1e9%Nof#8Je#dF%B$S>FPKGP@LwnQVKJLUy^Oivpc0lj7aL+smrFoDw9( zPV%fJrWCtRdHshVCa8p0NUX8-UxKG%BS31QcKoXD3uSYDLq=^iKVF1|^d-V##B&pP z?SgrvQ&pQ>3>eBBfnwku7Qf{HBz;6Q<*-Y&pUG*}yt= zUaYFr_3=)IgXwn@WF2gxP*-$p&Z*8sr{=ffMrI=W;0M+FkZ)6ZQ z+3Zi^WU+(zAsY5R9-zd-nOMi+S-cuF=%Ri+M%YVU?4&PVU|akNXtd?=TM3kTQKJne z_UY})R%qfgh1u*>R&4B*-lZgXA3pGq^1=?R%8hbS+l0X=0o@<|@0|%Hs&BK}0OysV zd|1^`cIEdqR`vg)dT_Y(se5y>Ol^BlnMx&ytTZeSY!Iy%`!aR(oUB-q6aSl;kT{1LJ$Q`_L^Pt5zLgfYH>7Yd}ldlwj^9N`(`>;f)07zIQh)Kg)9 zgBR^1w5z%qJd_sChIbMez}nWK0`E@oJ{ZB?R|P)o>G0FQbJ4!Clqzb|pOn0*V*c^| zMU@vfFY4^q^6*C^=nuY84lepOD<;9YTJ5>^t7zK?Z2_J@pEi0Azi=8%J-sqU51}}( z8$J9>GEPZFGzZ2C1zYuv0X6LncvX|a+n9ns z%jfn4kWWh)uFmR%hZXaV%LlCLt?pnY2M2-hnEVj0j#t+Euv5P(GYa6m+#R^`R~_RK z9llTg2ZuR^AvCKAl^-P*t?^I&MPoc5=2<&BIH^3kPc|-Px4s4{J_5V)aG}jD|A2YX zp?*|ua6UY`F^wJ$!?=T*0>ZYvC84!}HAFJzg?u^_5%&e>Kd!){phOzN$^#a7f&$XTq)mX}tk~ zJ%iWU3@ksV{;9Kf^+SIgeS5dlnc(6iM-M|m*oj2woG9<&9ma$cz-;!EBt#PRwb{LX zG>Xk2{W-6X46QzifQePs{{o;X$Yh{*&j?+$zZfue5pN#;qHZ z;s;-uYOapUf7mLkAXEBO~0nAt5{Vm zzLR9WNc#IZk+)huR@S}u_lsY|iSvqcPMnBn5w7UI?V&#U_>;~1ALi5;V`D&k_PP_e zEV6!Kd!BfN&HD0X3ws;?4#LfCi>ceURghI4y&yFiLsGf=FP=VZ%c2LH2M<1PO=ZVO zN7HNB!!|&{94p# zpD7q&yv)>9g1Qu6gbtzaN=I;>H=%pC)AU;vE+aNr1Aw4WgtIs?vm0_##YKX6MR4NW z>lS3z8K==uSPmw5GoDWi4>M+iv-%U>{zNfakY61MQM(G5-rBM7GZ;i#YiiO9-aIFu zdB1h|?P&`jGu(DE@<&C9TJ{V<2r1*)LJyZII|OzWiye5Bv$N5Z)vk33q-NCKG}ztl z_f;7N<4$>FF41m#mLe>Be~eQaKRi;zW9VeZSorE)e-rgdNvyi^2xC}f8O%+U%>4^F zHXN6>TM8$^A8r^AW z&VoTsR7{61=uUU}?ir<-)Q^%~h+l)AGJ6Swfpd%qgJq1L${sJ4(Z2F#dmh2`OP|#b ziQZv|bz|@ZpYSGs9V`^JHk6;}6r7!ZH+iTQ7lB75NEl zcjx^#U@AKXNw70!x={^vJszO942WRH!=``XZ?51Wo|8_~4fVJ@9-fUx+GaE@g8ugT zz**ar9DSe-*OKE^zgvuit+B{h!_zMC%!z&a6P&J9gZ6vsW&ica_W>DC&ix(?Zo%HY z=nOyV@qCQHGAfxLjMD;6v-B7^G_lGdxQI z!RPfV4i*k0nnDdYw59!VN~RwRH^a*#8{~8uNS=_h8VyeT6|DG(0Z0<6XG=>R;TXV+ zzAt8@2MUzbFOKM-j>@IH>rG4hR?H zfYi%4d2ldt*;D#$i#Pl*PO;8(nc#LA)F(}(_UPD2sqFjcy!!8&ckA~=DSNi=#Ty|U zdPw2u#J=HE?Q^(l-NV7-tf326QmF7B9O7;52t*D3VlucK9oLQFKe%eFY0|P*R-Ge< z!Da?u|E>2!>*;Il>GwE9m0^_OAUW`tr=9ApO7GzpJ!FJ2is;V^7uuw~XJ1SEdGJm3 zT-vzkstJ4MOw-~V9qt^8(&PkWB4Pu~B<3_6Q(IYG}$kzurw3u{lDLdLMS zNcG@qQQ&44Eb^TNwZTAlNoSG%aycV!rztWg&cPwsa2l1qj5C4Xptin(-8w`LfK=(D zjIhfEgFgKDgBA*Z+vfLQZ{GjpgLWirCLqHX^#-qath(7ibb+d1*=HhP@z#+S-Mja}tdk(OkK0wq-k~wi&|MJW zpnl+I4yE->>^ecvC(S0@e0OW}qI*aEev{31wZ+zN`!ssa3`|ZQ-g}#kebni$4_gaB zue{ex#e1Eoca%YY)@=NZQsn;VkN$prZvD&S-^|{&6X=icv~^ri?u*Y$v5KZU2Ti3# zvCW;cdqsm-+B^@U*&hPZGO;e@eo`pe&oM;8R{fgi@|Wu2c(edoIpr}*fZOFHP8g$e z7UcGvvX+|}m74yrnmlhlMlr~WR$tt&%Ze~P)Sv2&)R@Gm@?sbxaD8WoiI%#L3@MmHHAHR&|ZIo+n!BcdXAj8>rOb1 zj(uond*L-`SQ~|V)_{!h>hE64OkE6)fgwJF!>yq)bf_K9sxk&R10J5De5K!r9GN}g zjJ|#?H?qMdg^%AEAR;OjwVEkFx7j?s`y5J9MJNnY3m%exIP{g!Be(h zWIn@l(hSgkV8t zwKfMH(;JA1W_5gK%Hws^VvMhy)#lOQN1L;lzxs^EqSUjtCpc$Zs4jW~4dA``XFQ4wu}sEd@<#Pdhp{JcfI81E*OBzeZe=GrGhE^rOFeBA8}3wV4%2 zPfQ=OdmeO*>~7I&+i6~u6HXTe%Uc1dcuY!--~j5r;^e5f`j46+I6Ep=ys`N>gZ0

UTuK-Qi z5OofP7$S$(kcDILk29A%z6$R%^VG|Bc|0gWeKq{Rf=IS|Qd4SZ^C}rU$q}$!E}3&# z#~|h9$@2{H%C$eu=HvIjUz$zML^(n1sMj42{IkFOmzz(2{3p#!+-mJ#;9NS{eEsr2 zY`#AHGTv%k#q}cPoiOx&r3&5Ky!p*9TetA_%yO_b-fuUm51Q37!$X!F@-P9w#$ffo z$#!@Zjy}A0JA9;1OFy#fm8kK{c9wb_t+UMH39|+{$JgI46}a7vTI73iuXLXM_$Isg zQZznl+ti!ywYBM!4#;RW$*jxG#z^w;elXlF9Z2B!+Vv~p?61dR!z&zLSuyW*ikP&# zol}gLa9(bj=te8rdHzR5<>cv!B#B=hOzHQ`ei;9WazBnX@`XnOu@X9j6>VONnH_Q4FGJ zyM6MNs3OW!RPXe#c26slq4+i>X(8#>xKjT_ZIXcz?0$oLt_CcyjRD>b3Ysxbzh*wx z4H4yFt6y$XhPxx&OKD5N&{4YpjIlR*c@4w{C}Ol-TrC&>UBXw#*MhL><2k#`e(06- z8OmexCS}sX&DIswHvIIwAG7`1d0Lc6u|f4>@0feDCgLncd7W2(mLP5W7|1;{EoeGx zoehpD&{W#SGE~Du?}yIT7?5?wyaA7rN66aOOt7wWAb9I}Ffc;wF2KN~oLsZzXs`jT zW=3Iw4|Bqk0mij8pypi^tq%20B(>*M<_y+V!U(s&FTJ3ZOYqyDevHA=r*slZdh4vE zsiv)E;Ae!3hHxI|UyHC9xYen`iC$O7suK;Ob^Nq%A7OM2nia**2`KlS@vw%zlw87k zrn=^>t`Z#>-JW3tt>Rq4$ZRMRRqE4svJ? znpzY^deROG@b;z?_RJzm0l3nDXNvyp^hHY4P83c)D<@V@ssp_YAfk3-{Dni&CQ&(t zHRp}-bs9fhN@2s#%vhk=)REM-SVb}PTgFh7s{v)Vv>)PCXgP=Bj4b-6-Ucb^-&(>nnh3_fby=kMOLkH~{_`>m|-38ki zpz3l3AHGKi^3%%$?*u4%)o<%RtQi>n8Em1+i}MVpOq9I$w|$+bm4_#A&Q3hlW1*nh z=~lXH^l-oRu*ZW7c<>&2%o>Hk#R|SC@3Oeq_LBOy@Z(b-$WC%lqwd4u%r5p@-tw%8 z>0S?~LAYQ1*R54`zZs7UAdumW2o=m)hYLZZ?u9o|d$_)A77m}s&uiVp>QhB37F;pO z1c2xo{Y^ID*PlVzcs1~9xEl;(&(ueU#cj)Q>U}t#bf(&6V0!9r`T@StQmR>UNH2Ic zu+^>Ab)8*$O5}8__wYGd=%aCLw4e5R--Tuv)EMwCeuA5E;<_zo9-J5HyWXEz9-7y_ zHo?s)BCE3_UUi{!w|cmcKlnnoWEEa_3O-GsNQ3&=m_@~enxP+P88K_!$C9y_cxLde zyWs$9TaZORXPd*?T0s%B=WQ8UUN{_m_4UJ9uzGNnP8S48HxwZ@mIx*~ZRw^RUZg_Q zZ@K7iZZ`547_m-3K=80_A+Hy>usHMn$DeGT=MZpM?c**j6}*S>x14^?%(a3yb~DqK zE=~4_C5OzKNmzaD?ejzI{tF9F@c77Um9%jxaeVw?VZXK6mVH%(YL{oa`>ld=H4%; zRu2C`A#pkE&mMo>F}a^_e)c#2c6056dovUBs|SC+`TE(z(uxiXTy+M)Cx5j0$&YVm zlbmh--M{|Vn{U4Sby+LT0E92;MSuOvUo z-Rw>~1g zo=L@ymX1z-&+b<7CSI^OdhC_ftX?eL{kZHBj`#Hfbu+tOqt+x|J-9u1e_l$@x5>e) zqvtKC?)d2Fyt~sZ$j;Txi~WKLXgk}tF&{p8@w&Ab$q89EqqKSV zAbiKm_4#f)60I~gsp4!DJ0V`)-21R}wSV;A{=J`$@{UH1%oR0ss8ltih?ZdqL&!PC zeulV8a<&qR7`cdkJ*GsH5ED4B`S^t<9!7X%3b&)$GGQN$@uiH4g7oP3EI>p#j{ z!`-J@#}LY3Ae4-hi={QaJ8c1Tb>5l0U45&4sdZ(;tt+~abpshZGSsAB;p;JkFi?$+uBnYdN&mxZNz?F``o~$T ztUin}snzp$HN-d=%vK#a)@R)r{i26+kB+F0$^viIuTBN~b7y|^RXu(8kGy%u&1tfG z>QfdB+nF`xy+<9c@_p|)`mXm@n^Ks}rdo5Qjx8R*ro3wC+*n{}g143P2Tu%l!5kNz zs8NpTyxaY8N~Y|hpK|DI)(Vd$^79T|04@+}(;o;06yR8WelwnZq8-Oxq`FrI9`&kI z9Tvn>U<=k>2_`dPcnJE*stfMywLXaIZT+uUGGh%wgP%gR%{evQKR685Ftxr{Kpivd z=3TOrFi2H%!O^_4IAZ!WsYdC5HExL{2M`@qt!TB?dEQ$(uC$b%580>I#Sinl{|zkl zYj8AZs&9i6*r@9H(T)C_2>v-Np=9w@bx3_7gK#5g!jPmNSDh8?Wh8*l(E;fudI7#T z0@_g)ESwLxhL@R5sow2#n48%gW!WLZVY;vflu!<`p>J*Fl%9DkyLeQ)AqT>3VM;=MmJ-eRE(3xc(k9j z1K|fh8k6UsrSAJAN1We-bop}dbfKth&*muD(vH-By`2(gXP0#O!bLhQeYb*M#^888 zT1xYf7miJzKYNj(^?ni8_I7W>5wdgEfG%48yj_x{RvSxZhq%TVv&v{qk0*@-7cntb zNL9L%p?oFBX-@<@JWlX5#OUv@kgxOx1zZBM^_)Ngt?KFyvW&r&{@{Bt04^vs3? zr}JCzg47o2D5n{)wga7QHZ#Tr$0nNqlZ9t=6&OCQPU|%eb9!F2kQdE{rcwOv`|oe= zef-1HiQbzr5H0Lf^h7Rt?_ReZX6dxgUU#V1$+L{+&o{45A8wu(ygUl$ub#i%eA}M= z`)!bN|AS99Klss)GSJ)gr+3W&>icAB04AEtR(XA5y}|3vv(m4glrCeDI*{?kwRRqQ zuY=?qMkZy6%^;8`7&UfW1?k>r8PKoWnew1Rxr}9kwPvfVc{pw1`;)JJz4_)>KkJz0 z@RXAPmLGiY2OUypH>r2g{CQ~AkG`rcc>tHd z(z*qBxpKGTlY{3STdfz5>R%dabMbmx-d4xO*YS0{b(GzWQ;u`~1mbP^iQaU*Oa?P- z^=cfdnQf{TW9-0@!?5%G@T<+uOYNJgk&HW2r2sX^CXpP}pzaP~vsJ-_e+`N&+JG4l zo#Mt=qKtEOU!+&Wh3B#&0t|?djibBP9{(6{tB86&%Dax&_Qj^K2m!{l7doXo?=jr* zV5tVvGJx~Wc-IJhk|VR%G~SxIpwOzf*$I829F}4>824QieQRaZ4p_#B2UEGx8ZyAG z!O|CSr7D93(=b|1r7=>GXt(}45mh^s4W-~6k%vr{h%#LWZQ6>FIV1_uC3*jS^03Wd z?3`Sg-Ib~&h7PCd4<=ia+L6f~u+mrnXZx{bErbTN2p<*$iT-)V00B-4X`*}GBg~w& zSsYg#;|Z%j<`k`=SZ#sknwf#xI!a+qKQ%B9f+Me+0cpUnJApSQI#FL-GiZk^Ga4ew zwbblGi?-^MHpUAdT%5xmoM&?%Y?M?+Xz;&VHYPFr$~jk_s%{3y-yEcN9nw zm=rOC1D}W@xKAe3<4_UP&9kslJK>~T^Le?QO7K*LVX3zh>Wtwr*!o5O{Kji8&dsdR zsdLNdb$5M-AC9U18OX+Yp)Am)x^!3>)1A31v#l7OJZno&^uE+qp(0RgTe+r!1M9R1 zgUH72wYCXuaGtvK%iqAd==Dqm_&Cn)bm*@6U?w+ zosUz%MFn3qjepv7cvu*G;oC`}A-~acH~DH0mC7AQKWJ@0xUm9=Vg5jd+pl*El-!A{} zf414IT!8S@ea=YJYqf>1ic)3__%3zmC?3T46IcL4{HH&&WlKm9N_Itv8Dw}$S9etn)#db z&FXq>yz9L{J8!QqbN!YNw!DCQ;MX{-5_AEVt(j+&4EiIQ>!WJ|Gs(I#ZZRzI5E(an z=oIC>=hte3K7@afVX)8cEz^$+<#Lz4 zp({= z|Mf3R#dz6lMkntkO;3wpfAdXi8%j$dyVAPu-IY(?t~$L(*PWFC@Fx9ya(1}6^L|@b z;-PjZY!r<4J&!i*2jc|$hfY22{%oiayi$Cfrh7TskX7&*V??CU-0bdtMxqp0Q9vI$7SK61K zPJegd%Vu`&BwION;o<1`Nrz~?*xYJ{g`UC4t{w0rgai8vn%#Xule0S_=B+%@WG42))chM(v1)A z)_1d8={(FBZ&|c_xu7y0t&)*9&ghj(`x(rwKZ&2-yVqf1%|6e8ZezT$y~0DdIx5im zRO(rQ@XH4$(fa1*2Or#>la~b%E*3`s8^@AED5cr1Q(>>*ZCifUCu<(*0d&m^+q}vK zKMA(W&4AUjDo8KXR+5sGw^z3+Xz*ZdZNEI!Ge(1aff0Mfi_v zCHud#*JbY+`Gx;x|1sJpEC|_7c!JAX^RozG9D(x@w~E~-r8K`)I2t=X&Qf;>LUmYl zuouOJ;me2!{`5i3(_~nhHuXM@kJEV{lLT64bX(H>;{3E=mWPWM!RoN;$_CFq-!%15 z@!$@&a!pW4#jNw~9(~hqdLS=p#@5;$EnMFx#3lJZ4!UquNTB^hlg+p$ykz0cR7%vf zGT3poM_a@yki}QQ?ORjgStsAW`8m87;-LVI8ug>O(ZF}OWpS(#FkYzAIpVK(XX>C1tf>&Lw~+gqlDL*NYacUV0#tsR zT!irY88yQL{J_cZi8gCTyQUEE!Wv=M9JSD_WY4ias>Zzd@NM;)_l6f!Fz=kUQV+F3 z4p{vzID_Y^oseh6r*Far`a=^r`f)Gs$|nnI-D|F?!fRFM8d;3U_Kcf;^!;)n4y*I8 zbu1%GVfZX$Ues?iD_l`?;lZdA9)gXzCn4B@1~Y%&bC9wC!2{N75?kXDp56u9U&SkG znkYe^lNXUrfqQr%e+S7pT#H0V#8vmutxx)4D)!L@f6FTKoqfA-Qq$AUg!v)N)yIT# z@pOH{7t4cV*`!~Bd#um+XNr5Svss=2x}NoOEZjcz@lx_T?N=YGsDl0H6aOt77*)YN z<$C@f^UTQn@X~o(6oQ;M4`Ly=J2zoyOyl;5{-bykWuD?=P4T%dAhD3 z4J|{w>RPhFQpOJxsR|nWuNiWhMjqY|lD_vo{!PYRYb2}%?aGNDhslV3)J%9z*M?Km z+pbG__24I)BX8$$$ZJCw@^5F1^{f}_`z#)+t_#6vp$2*Ec09GFS>YE3-J{EAOsM=s z(tx3l!#kDoTfu-cm4VM$wIWJ9Y;cJ{9qC^y@kiJ75}FS`_9kAUKk&lvbU5;C-bGIx zhhs9JKf|x#5nuG@fPyk z?M_}(^&Pich-uFkZwvjW$4?r+<#$YYzS?F#%q+d<<<`*8yG3anXL}=f=!yMsf(xG& z!ggo?U3RHscc;J6%Lz^Q@9(u2=8MhaRu$X>$5NgJ zU)6uT(D3cCLKpw{I6CWP<%f^(^r8z`MqlO0z{ih%@5h^;{q@h<2eS?OlPy~yp_#EZ zFbX@jsyRd%pTLE$_S$*k&ET`6j*F(aH~UZa-W)yO6!f$kov9BzH#=%n|CRO-)sM^V zk@_+{g&sDnf0pIS9%mPabaeE6akFceLP=XPUic9I8y~ zHS3?~Ub)55*!!o|BeG-vSKHpOuHOmIKl+=>i7I9BX3@UN(>OS5AJi=3qiFVRH2dh| zHvGT*eu?Vw-HUIUjwn5)apdC`a{S3p{?_KlKlE-%jVa2uEA8hX2 z59X#WUW7L})JKW_ei4%pyWR=ulXlRjqnqcHyJXCiwUz1z`yY)CeeG!KWEFn)ZC@0)n_B)lG-9!9suyKleRoc`XA z=HxIY+RfzP`IDk&;YDs6UUp!^Vfp;z_EBC5eP^%NXUWgg_8qmw(0lLQ+ukhGM`3OQKWLCNk2_nYr!M0If^C07${LR2Kb?+tu0 z>nUa9dj#DrN$YJcG~FXP+vyqyH#&wbVG|-ZhdxG11!h1$S5j}A?ii55fq}@}Zt5?b ztN~p(TUeNYGmgf9rA#e!2u4CeAq7JH<}9Qf=U;t^@x8lNUVDP!QkLBlN1M=EWhdc3 z-`e&?f!e%SNqgE44#(p%_}*Glo+~$@z@BUk9&Vs~t~C!&IE9BB)by{u`4DD7<5AS% zj6=3ytys#^AmbndZY3PSmqCw~Sz#aVhgNzXnB`m*q2%D zgy(0aYzCSBsKPz%peKbuvFQ@n{ZZnay`2&?84AbMXDZ4R6CFU2kVo~sPPs2bwLvD{ z7^eh~IjGm$1iC^Q1sNj!G4@})$iUtW_bD?YtVoLB`4kDNg=x^-#6`Xi_;VfK5 zbG$PfgNK_d8G9Grw#`s-g+6N$QS`492C_IH_y>e>Lf>gK98HL^p3Ll}0dPo+Q+-*u z@fk^it484pn}>HLw!9|KK)4_8f@|@-@V?xG8t%USs2ShjKc7~u#}ZSQ-wez#?#9@v zt=eN?T}C>n{i2H~~U5 z6uO1KZ9V9RTZRff$E)~Wo7y){DeE0=%}xL%;)2C|JVQ>`?Tj&F0_O2{@Ze3;a$|(+ zPq%w*a6hsOAMvU&h67-mtdLh*9LjJLA;!?IVeOkDz6_@Ny>O7T+hV(e^s7Z*aw61= z&iXNW1b%}DFW@zf8wYLiNeHh!IJaTG0EitBz^}ZK6XlF6_~%VQIu}?*4$_6Q>p}G@ z52o#uM1aG>^kdN`!&J~szv|}pnv)EB&$alqktqFPWB;@8cdTkYBQaF6w&p=+O}9(~mwH zSgr~J(Z|8D zI#PM3#dROu`*d?Z?tI<>0Uv(!VO_~t&9FTVmzO$;?)8)Q%zXT4^RUywE`+yh>0Ya? zPnxdT$rgC?=3w*nqc1lX?|#y$bOjEZAZKf$FZ=Z^z0hy;dfp)ouiLUI4bxF#?4Fea zED!k8AAMF{P5fFPcU#DJse7hF_wA`#pJvJ-+}}<=eD&a0(Icmzu|9BXC*GqE!o3AA z@_|gfz4_u{Q#&dtk2ojpO>$;R<23u~^m(~i;my`QufKV?`Q4xVoz1PbFuGRG(T{$| zfl|?@cOTb(xl3Pu@p%n>IP&f{QKKix#l?%A)>qKfKDM`;in;Wk{?|YG>EwklivTwI zh;f#3XzxKAyuRy%tP642#Uc)C{*plJsHjg2)pwm!fkA@$q1;635Smh&)fg0Z<<0E? ziUA-Jq#jCHv_b8;07N8 zo@#2Yld{Sf>)%Bym8+N0e<9@Z8W~&8E0>?dwLX3oW4B!mMafV;YJ-2z=q`lZI#%Gf z1;WhL#cXh6?vgU4pteyq^Pd<&5}E{J<>7>|OTeAgfYAm1%yhtyPC*aP)XFZm4DuWd zgG)cVr@tw#>Y3XUo@8wV%cSXALAm1+75uY#4K&F~lLmTR$NuodFVM0$~WknotTJL z1I9Q32EJg5oS z#a#O4GGl;`lwrgg6#e!$5qB{449w)15isTJvssB*F`UCghD0jCnpRi_2bz%qb=4lZ z9Rcdb$e?zr6OP~*A535%3=iM*>DkI3%K%YP7pFvYiIWb!`aC=oo<}dmXCwdWC2Ip! zJcB>KYg^;sh?L7Rnm&A2c)nC*p09R?9$pLQctjC2vu7T|jeK}OZe2a<-ZBmciu1N5 zA)}+R;~bpVQ*GX!`&&G(dKUdFJoo@J+Du=nv)2a>@~F-4kLA$1!RD>V4h=jA*vny6b|9^-)AlSJRO?7|1|Ji2eHSx$+16`J=MSkW)21|B(6 zctahdO9*qnwqpsHw^!rl=n?Jc`8@GrxTh=Y^*HNbq{FA)p^HLc=e#cO!iSdhag3rY zKKw>^&>QmPwhmlaP;zjw4Z9*cg9YGep+`o*AE7mYaMRj)Ia22?Z~`ln~K8Qkc$ zHPW~IHLzv~%*@Kbi9Z$}Bay9%&=oDoA^8Op+F{W-b#C5J`|3OS7L*V*c8#v?>B1#0 zJ?9Z`0Ja{Co^QNbwYazHQe^nFPmcV$7VWE8sll&u=rguL@XhQ*x@f_<^qRiX6>tY% z#xS%b^Bgib(UuM3{T4VJM>Zh6B|z06Iy$)CH;xH*G!EC-r}pQJAK+)|2^{=mwg9>y zi~hkTFEf(J!;OsGt={HjteKl4zKq{M=fFLB{LLi&bJEe%J-bW1Dwpi2qh;llh^}Kf zffuvG6658R+d1W^bQU*+-W93&eGM(a+VmyEgm?2Q4Ud&^u}3yLk4c% zzCJU1R_1=#;Q_Xh+H0ozLUM64=feU8hYDN?{yTR&?d#539KhMLC{1Ks`LlHQVKn?U z8Z}yN&gvJZWxqh+%~oaKMymqKm!i{^>bYVM(c7I^SK#qNQT~hRjXIcP)@fz&1IOT~ zzI@Z{lU!@Fggeo~jQHeX;p1kw4x6Dm{k9pYS10A2l}FVs9O}ZaWFnGm_R3klb-NYe z%?iEDsqtP@1e!CXbBVktr*lIWY9e>zo`sZ~GuTV+*58 z|L7n8=})BsXw8a|SRxsSx&5ALPnK>*x|J3qz#^@Zq=KVL8MgjadYBFdMcJ32rex@* z8b!!yC*hsQbqqkUuc`eEA%i;wUZ+PXAhP#D-lW7N1e7a_ zyDpn3*K~xZU>Zi6ose$D@V5$T7BgtRSr|tKkArbSh^zCsKKH(;JOyXw0TUT8^qoUA z?KDU@N>3z@QZq$6j#m8wBVtov7AP>V43ygl6ueBzN(?&&Uwtu9&FM~kr*JSIB>`Mz z3|EYJIPynfavm0L;2ypgP8gmBsEPW8H&cu!G0Z5ZzE^5sR1bz+{eiGWn_7%+@j^VI zF5BlAXfYFGZ=5v7SMS3#diq?(e@ZR|X3tF1xyxv%Zdc{-5_|@1Qd&Vy$;8fz?G z1Mv*DdYXkX`}PJu94y{nGEOP^GgDyu9}X8h3~#_!e1l&Wc*~F^+e`5Wum=GF4sf`C;n7||K*)s2SB&<(%Smt>(Y3?Th>(-t^V*T$f#GVi(6;X*68T>P@W zulv7!?l+#?$~+jf?e|=LXE@?xx=LO8JqB}itKWBZxTdb!T`-)>$8-{0__?2TKq zRXyjoyEoC#epBG!7`hHVmVVq07DJz2q$9U*j1gK{^u<5qGpQV!%>fBhPwm#cSr)`$ zGlnZkrLHoA%(GV{(^55 z{*2@Jd(qzeOW&op;j4NkXft&z2(|$Ml+fk%xu{~6c~*xjAH%H3JliS9lFU>;+8;M= z%}jFN>3h2+SoEY8TLQgqZ%uNb|8y&keBBsmVcCAV#lDO5_{`c@rp9Ww*?{gJl)gcJ zXV(h#hkuvVz(OxiauRQq3Td?}1NoKUTKDlXgM7>P>JkLH zTg3m%FTS3(ueZI;)SGj~0G%Bsk{P;lf=z>X{J#uzbvO(_TJxoNoc}{ch?qV}R$jdsu42UtBTxHr{YB%z7Pk z1hQoHeJMU|#dC!dklwd=;wYniKm336;Rl<$%>dkK-$?;pv&dh6_2uT*pZ_{qz03)D zx_R>O(dL^@^Lp^$i_Mokd-(8+Qera5Z@oYJTRv+w@AJ}C9{%R@!T?P#d35Y%j-}i zPTuvLu@*kWqwGBnpOk5JU2SIS(~moryH&_+!pDyu#=qsxul+R}X!9~m!U z14@ESAJl^sW&=+p3?>GEVcPBxGQSNNQwyC%X%XPwALZKjZKP3z@nuWQcQdMI;An5I zi#aeA&gdw2Jh*mSwWIw>=MiyHr-r+{Bb8&ONnpOJG@#NOlZq@4Dq*zr>-*omQuQXH*tYe^qf?^II zRwa3n@jrucu+4zM5%PLvOe;a~I$dokLwFp+sy1fu(O+F3g|D>lW-b;jCZbv2;2KS4 zQ1p9nqp*6bDZvsA1B1NoZAt+q_nru@l`-f=;V}LT8}Q4>CVbmA4VsfGk|JNx+L*a= zZ>*JQ0KOPQJIdGUCB`8``a)jqG8}`mdf)YImdl3@3V-E*uc_r_05s6l&Ot{{OGA?? zW_BQoqeNt0G>3l(MIW;-7n)*S&ZNQX(hNTKEVZW?1#LyGZ6DsgC1~M)U+Pl1to8YF z5h^=_Pd;-n={vlVvCuaB6-aYWi)(fadNIL*Bk%?u>3R${xqlwvE9GbKPnoFOfNhWy zRYPN};hLG2zGJhQnKCsR{Z8AXcV>qwKhf3tWv^QOnrK@085uT|28aFm#=(rPHeQxj zzF^9b3@Uxv4PPQZauRXMHZq4#M09k_CfJKUYf!!zox#P)Aah``u7^`N5!ddCs+s0> zN{{L3lMFcnDFcmuP%r)xC~z%F@Y%P)rM^QZxmkSJjlPpl&gr6!cc+|ZA#CgTsA7zx zL1Mq=z4}+%_-$mePs(M$Ij;v!T=WlEKrjxjZ~f<$Er27Jp2s^2Rz0GV@Mon)zE@q{ zsvLb-tzhcHpJ2qVd&$U3+X=@#TeM&Og$6P)eAolB2dOD>(sBfM{ckKsq)) z5&W2FMKXi-mr9Kg4OahbV^t%#K^=q3K4(U)`}()4UHv7q@GEjR`!rWIeM~nn43u@5 zRpnUGo3oY*ooiv}T0fJzX=8L<@X>+eFjt1IK^O21Z_EHhMvY6o8!kQ9l<(@Gw-K#c z1_$6k?{Ru7H%?A92GGd+=;F!`xD}r9uBYG-rR2ow3?m+Gqz1W^M|=BnYlHJiU!Vip z(DREo(0~Ds5rMSy&HU)bmIoK#M!NzJMDb1dqSNUUFoDxE4H zG|~SnPQ|LN=dCVfSnlN*yv+VEt89D|RM{=3`#PiNs%>%}H*1g)R%hXcF~qo%ul}?Z ztS7ZC-O_plSp-izOu$xb@3$&cFi~pZl?#r>T@iIbGxEfS&__q~O2v^zOTOUfYEg7t zAT7?itx3TNH=lg=(bWHU|K6YV^xfvmFTW~F!1^tYIXTTCC|D?chHP96H$(Rc*0h5I zC$)ascT~y?Ie52uQA$h3C%Nt0>6=nZN-uetq5Y~OY3p)(M@DbD?Oia}&;-EJ)y8Hs z(~Lx8FF2f}wRA_gYPOouc&+pvI@dnMqW0JnQ`{;Q<$C(q3U2ALc!EA=cRhIc?dI_D z*U3xeIb@A}Qir}S<>ff|=taEzv>@g0wIcZ1rH^u)PB*{!tA8{0&T+E}4ovv;PyfN@ zZl`7)mmc-SZ+Y$MK<3n1EoA1{@jdfdwFlh(tsC(PQM)E_#GgM;U;ga>-hBR>U$;KNigdg1l&O&H7-5b$w}+3jy&ALoZG1n(*~ZV8 zIW{}Bb?LwO@Ba2rivtgGE#=3F5W-^`C!iUD-8S;wznKxZAERDrV1H#_t&AX>^T%Yo z{8~+^5z2@V0(q0iEvKI71o7~^e>2eHCv7Y>5;3SmO;;i&b*alc1cgDfgpDNwE>=wC zt7D>8m4QHph+M*1uwqb2m{0SBCp9L6xrYh8#fKT^qqxr%GG$oJU|ZANF-88TMLJ-h z45bZeb6|~WEN)tcCZh#|1%3UtDR2J|aJ^#?^;ZfI#fV2Q%-V&F)fC=wi(ihK3Oen8 z23zPXqd8(_ z6x%Cu4~}O*b-(}g+dyN{wvyVz6Z5R^w!lCq#->-s!#;o4fCd_j#}L$Zx4PhX4RRc( z0kCH?;KbB(4ZPY)@N}fQZ5>6yu4#LF$Ax9i!wklG!5N~`Z5joqZ|a+Yrq|UvZ3gRZ z(}-pZ+~hIOiC3m=n{sxV%)#|e@o2!HeCyoQ0Ra1iGBC!F2rm3XAnpG~Y2$6ma<`oO z;mJw4S^TPfupRr zv7Kh=jPKyZ%l@fjO!1{jgIsR2Ayif_O^23Gwx;L7_X7jQ~m z3>u%9!Z^uhF}`y4?0Vc;jXW&$*C@UsV?cR2^dejPqy%75`2!$3Lvrm171hXc#P zCw&FR{i1RlAO@v;OU?;+-}W9ZB)i&nFVsvwM~4K5J}kKjs;L_#>6k@(_#eGA^dZL# z#Y*VE`g&*Z-1BL(=lWMO9?dg9rqVRAy*8I6@2SLeg9MT~l*(Y~M%EH?T^59J#K9_c%P00uwK|&xu&h z?ZTr}tTB#ihrwiQnf)586aP?#Z^2(%a6b-c@J)YfV`OFIE<+$CMrVn<(S4Ju6sGA} z``x;?XaW4{W#pG$FarV~ka%UQu0hfoF2C0o8v&Q=fxxJsv(mhcQaiMcS~T zy>`@XtDsxA+F!B?XZBlQAb7E{<6*PX(yv!a>A5*$#B2fK; z3D$rn{eZT%BVxO(h^ld6t)Zf4@TXKA+pPFLGZUqeJnv8f{KC-?xSTZ2BIsAk8?T#V zfIDLNtFLnaIz;BjzyJHeDy=Jrqq;ao7S}#{T7GzZ$c8aBZXk!rCtD89IHdds31_x%HRMw!gbbvMu6fd}wT-=fH&L z;FgY&(qzH3;O9wp$eWDxw_r}x@!{)cAD*`^K$_Io_3v+f`LoTF7O{%lZ)qn;WR1j0 z&YnM+CAHN0g%=%ldw=ucr@xb7dMDj|)V4;Kax_}gmmYZ4@Ap4!5o-pnz0m*aFaOQv z&;Q?lwfXj&uj3B^#Kwx9qTfa9_oF);nd!N8`|hlv*xhLy2*zRSc=N`U58|b8So#si z;dw?i`MuiuiaU*8H<~eU-pv&$<*oI25&oXFCns2qPy4-VlbS0%!^?Qa31heJeptN) z(!$froP$HBk`cT9_q`84Z40Ee5Lb$jpq9fdH~@Ivsc>u>vr!gsgNw}K1L;90m3v;) z-rlEz6QBI(CoNR}UbJbfYoFDxzxi_deeK%K#=q8_G#lmAz2_~Ee$u*^S7ptNT^16o zL*XF2h&I+8TyMeqrT_AO_~V~WT1`9AKwe>uL~lhSM7?&4WOk539ZbLyGK%2U>x`Y3 zF~(5?PUS^C#=xu$V)MF>Gioh1ai2I-O)e_NJ@u@?f*=pJM(8QM8Q9Ltdq(Amo-8E@ zu`SR_fOuiEA7^!rp&66EF7hbfU8<591kougv~9S5f%h7$JblwH;F!GBy1jNE>Uji~ z!ZRn$1fsSm3rHmND9b^jEcY-(j73Ph)06%m{1~R8XWHTj_)SxJUEOo4TF3sf-QB|! zoClFG1`I~w)iV0!8tR(?bLH;Y5V!>+Ju}OqZ_5GuEso&@V~=95^`T90qASHZ#?O`p zb&kL`uvc!N`%mG)6_QPtWwPy3k2Ij2<+dlVkcaX%f9R*wC0d`hdg%p&Q$% zJmU~f8N?#x=&X!^U~9mjFHfIQXI6rtD(`&;NXj;aKB+$$;--ODu6yv;E(Jy5&)`&9 zurWkuu~Y52FO^F)NIHQ$NQ;W`8bPsv@3p4o_KQLtl+I&s(90Q_m!tcvJupqlS?G@k zPFZqF3?L##Q-6H2Rl0qxZ{edp;&Dy}8PgY0e2S6sUB(#L7{G9h4!_0W)Z6{p^Rap& zaAm@Hh6ics(bNgATbV&uhRe{3fl{Ta1PR$@IN|FApf*J_DyvTALZ)}eh!1DVNeLPn z1e-Mi`eFSKK(!^EO#=*m_&|sG)D}ZE`DX~ifdP4(^7!**Q~h&Mt9lkL*LTlXpH{tN0EcHVPup!Lo=+ygzomJne6MqCned3@Fkmq=t?|}J_ zE_5=Zyl_B&MCE`n~`AZ_+QL0b@>gL)&@Zr7KubB~>M&fn29$EZtyTg-cu?3_T=>+|D3uHERKFGx^mk0y~M}{IK+o=NaSZ0Jekk(<4RM!9*{?uM{Gi_dk33Wb>p*{!VMS?v&zT z47pt{J3ak0BibobuQDK||422u-q>@m-Hk-kjRSCal%sIeewwEp*Xx9=737o#bhP>A z+i!wx9Yc&)rA=j8)H{aeI@EvocZ!C8 z{C+bErO?>36z#~-*l4L)`_AGCi;1r{=1qQm`;2~g?}O5yKJ0kjk4r^u+)qzFj=q2O z@BdwRWE1X}26lJz=|`W8k#Br^RY3fCPTGtw;hgPqz2M*I{&4;6qi=`r;q_{>Eqi92 zT4VCOdS8|LGtP0&$@K#%atno0k^YChwExQ8yVZB5)T=4RYqXL5jgH zk8`Ij$}HF=Ko^Ta8ka1mzuS}L~wn{U>a;;s1EluxC~gzkFnP?#)|s2kJbhS@N1hf zv5bBLhe6b!2rfol#^F4NgJ6S*E`u?Ah>oCUFksNcL zxIQxI>veFgXa<^d0LIv5bXNvWXV6UHjS)J7N$<78O><6dNWBd6Nllx+Fv{z{0kpcz zAmD@jgl`>h9dC3=W7(VCch+w-w9VLy7mP1ar=|~cR*o;(hHoi%c!a|-1dK(sV*usM z1!(;bk=-Mc6am8MClZIso+-w0Uu|Y{H{BfW8RFrNQpes(!@FyqL{BJp&kkAM7Cqav z?daA4(N7ylhpcH6FX5eo4&@M0wL_O_arnO3!Z0*O`_m&1p7lkGKBmSU1!@7FQ*BO) zuy7Fc$+{Npnbv>yG@a6Rq)AnMjXzSZ%g6|>(hW~$%e;aBJ%brj+~Yt6r`eR#29cu_ zp{eS3oQDPkNcyhda2tOz7)7ZmTr_c+tzy_zcttqz2!ZWVe!ClsHnr`)xhmg(izX0@ zzs~8S-7)CvpKH;p^7CuKh3_Zo*1aY3=?-16-dFz@`>( z7Q{+zE`IW#|C4{Z*{ocE^p4b!71>-n26+d+!=gdI3ahWNB!dM#=~jFh1^c~>>v^ZP z$0)A+@Omq`rj)>#hR<(fu3`0 zH~Zd+|LQln6g0e=QGQnL{%gbIYy-R65XUn+G0KXFc4Ig6YDpw* z3|XH-Jsj+2v%-zZiWiOUc&ZbPp%zR?h!X zw&mwvKA4%4!=l{|I=a!U&fN_EYwdb;w^@&uU;m;MpL?6%{r%tD{P>68-|RmAV)Nkh zzux@v!LP@5GW)TUQ}Ks?_wP63@Srw|+_%N*wbmoN$!0lu{cQ7Bf77~6 zru3|%uXA2X6-w{y*C(fRGS(eSe3tEl7Wd+t4;s4!74WLQ!`r2t5hceR8xv7@UY5vf_Idok@V`)y>+t!5Y#F-RBIsM2 zgX2=(s>=)#UHz(n`0L;V!`Mjix`2YMa>*`-!B+g%({#`Y#&3$_dU?_*lhe*2cFk~K z;iDF-USU-*RstfEG{7=w2y`9AVA5jD6kIP??q&PffBUdKdu=_K&_FCj_3pwlK-Ikz z(NeB@Y%s8BNR-wfWnh`(1e>6u2#QDXQRU}iw5uJJ&s0}K==DowgeHO}JX zjHtC!2s{!UcuLh-_?zGnWZS2Kb&RS}t`_umKNuO(GZj6GRU;HIv#5XJU2k2O!{D%R z0nhHv(9V!*Mae>#Mgyq6FSNRuAo=hSQYtecPUkrK|j;YV<72S@KFHpUzOpF zc~%A8t8{fOgNQ)P@9vMmQISPU9XtQ7!C=!~;;4$+G59Q+=|6l9&v*%Nea>(9dmUWf zqp0MG&R~{|lXFJ4w7O*+^q&4zrv+qhs`#u0@E0#yL}%;aoShV%>FwiJ^{syE=anK{ zF8zJirV;wUa4@5y-re1{Ks?BxIH$o;N{q}`y1GS>>8hN*aO9LGxR~^!;FpF+sYy|= zFx~*+loVx*k7U!pKD=3-_+ZUEE=K??j3_w3yKv)PsMpPT5A0JeSjT7z&cR>LBh|dO zb%eY>m;1qlKa?Fqd1$c=OL%cjAf)!xKYFD1m96G^Z}AtTS2O&ammPgr9f7%E3HDXT zdJnJSsWpR8AJgOHMg++DEcwVIS0hVm=|4u&z%Z4rAVQUn!#p6Jx1~SYa{2C=pW5^X zceJb^h21R%mx&vEFkENA?Opt3ZOEW(7%Q2J&SX#hE1JJ%5QgT_$hF<=X4VkZFXPA< zYUxa4(i{3R-0WpgQ51uVftP*>7w16{x4c_R^`U-}-{fy-TRY$sWJo$HSL*DHLD8W5 z3+O(=`vit6M~_;Rsqfvb59mnN`yJ@?*!G(K_1oA~>%m_isvjEY!!1k@8@30(E0qe* zrpAmD*>KGQP8Z>9S%dX{W$;qj@z+Y9W#)z|Ov7rx+z{LJ_S zx8d?6z7w9G6p>~FUt&1r#AYbR}Mt}jkRJ8rvjI_QT#{Bg!) zLDF`0xtZ~cZv$~IN6d3f>OkYNEjBaw+o=&PIHvmlqV$>9`qevl)~RGZIb-zBm3AS# znNfSCzeFq>s@-ZzXBp=)J{*4Tg~QHkF}vcvqwI|{JzV8_ZUMEl*r9!RdQl`k#aeqA z*3D!Hv~hfZep26@sq{KW#rR-b-uG|a%>lSGJh|JcZRPW?9&8@8nApjs2jTk< z{^%#+18vMgv=F;7`{9FMYz`Y&FF*Ow=5~hpyHeGkmj-_tOmzNn`rxqbOb?^yv*%AX zyHDEAelMEk#9U0)$p(Ambt&5W8OzS!;oP3YTW2|*?{XTxZ6WZ>%8L*0 zYc164;CTKloW>v0Y0Uy%YqsZFMn0P2^OLsxyng*)b9?7z0>)O!v1l>$@vGK3)E4<- zFB5zL5V|HUnzLd%{n?{-c=mGqcKyb^+A+)0iLUJwwAHut0-D&Ri6l9@i1Qn1H!nAb z**&e94X4tQIbPC-3aZGlP8p_2T8}p#i~T_{oV>e}a@OfDN7gY=UObCnhbhdXA^?Q4 zMGw3T9eVue#pc_u+uyq!cUGD`Pz|a81Q|pys4)|yiySb{W>Ajd?Ex>9hk%Lf$Cfh) zLJ*IQWi!)My(_gxXSc-D$u+pw_1c+8VNyUTD`^J^5tuPFz3;f;qA5&j0_0spCzJN) zO`41_dEX41lwUtBfTon*HfkD_qSOd6q-Pe6N2fE?MpF?={>pX;OIv8;W+pb-b)zVP!#du z&QReJ4d<rX->9yq{f?G-{_O-qYq7C&M2i38J{sNnh&cq%l+m&3Wo4%?j8v8ci8~lqOof zZA;so{$`M>EF*CS-^%IlL>6lq&G8^oqK83o7D87yM-oqWV|Ym9(lozWgMHJ!wX1Iw zt(?HQ!W)Z9b^52r+SodXgtC{W=!y*1w{T&)_>DBA2GH3dB?UCJsHpehi4&;KNp~VN z3ng=JoGP;9j|1B~pwKt7eVH-~F0kRtF1V)mRbrw7q^EpR|ar>C4;gpz-WN3$W5(q9%^TL{GDcwhP8XOH69BzFukI z7#hSotKSwjwas53Teksb{Lgr28bQrCboJBh5o6|(l&WZKu^GMO9SV2ko@`e)yuiD% zvD!qX<)4xh&sPvZ*YP==;j3PDg%|nsK0TYuN58jtn=~HnqQQ`y>@Av&C6QdZw(k$0 z;-z46aeDChx~ijp(~qg6#{7qPs+qP?7{xuH8@R8-ZKH?YqAyq(IN^Mp?+~wF!)FPy z0mxI8>bo}ooIG)w0Id%h1~8ee=gebaga?Z8!^IUGY{DULG$v~cJpR1Hu~H2_A*0*FBiE)O4EG-DYdvs@_;N$e?AMBZRT}Z#Z zx4GMKzu4hM#+`Z`Uwri-gE*s6^#5rF=QE2ui+JB^drFIvU$H|I%1CZ&#pe&mPZ=ivugK)Xx38x5|&-+5F{S z{=3b^)+X%ie1CJfbe(5UJD9BZE~Go)8b&8T@FF8UIvVFrqw8@t9zAGw`ZNRhZRZ0d zO*sp%HeY@9RdmE-she>2qExP{pWNNtzxRXhb`;xgkv=JB(F@P6ZB~oShg&wwtLXH$ zMaWmf|C`PnxY7JLRgs$4v}n8YXp3KxDfXG#nI;QNlf@W1(74bu%Z}F@-`1&as+{J zFs4OxX{A9#%0n)<1zEGut@aqi1j~uF3_u2p0SzD&0z>m8E5$zgS7O>Lr8)4r)H7Qi zHXw)qQ8K3~EbppgcQ51nWdclT)?q5q0Atao)!7Snk>;ZMqpM)RK*G?kUj~zqji}qf zc$@(PM8@POz8K2&7Or}C4L0j@6l{G+e5(hZ!A995U^n`{TU0=5llRn++U@_yyp+9i z6g0Zb;U89zS6~Lk>N6|TD|2n@W#zTU$`cZw=5s|9_t#UYnmr+RAw9$Wq!x`u+W#EE>OPBt|fAv58 z>1MNX0p!i);its3%MqRR1sO$5j@;m7R7P98g0HtxtbnXmjzp;twYlJpmv+nLi=S4W zaf_7WxrOCrSoKo%^j>mha9s2Rc=d5?L_PZ{QpV{$D_u=_TlBhS#~QOzI(XhsFf+s} zHvL8(oyDIsEzSsz{}Pj)>uZpE)~CzDu6{+QW!MbIqY$_^!0i^ycl}=2dWt z7{wt)!o7?R(8s_O;OO4)Vsz~5qzyt#h0Z^MOA2BUzUc0rg<*v(ro<}K|APgLF3vL} zWAJ=_Upo_#XH*6QU1qSCpIo!^?igm3rx2w8PK3zs^XFAio6!X?cW-`?%h7}IH(i-_ zL?648xGp&0h9jU2_1jGeOzOe=^b(l$f!>gJZP3K`4BRyn11tE-TkG2Paq`90R(V?k z!zJTPo18;yp1?`hcdL5BK;N}T4yN0|sBO8>>SZM1N0IdkCd;PNOWo7w+9G%4dMKeD z?T(x*_$HcOJyRxJ4-LRr>u}Ei8{A{R?yU&hp0&jNnH_8-NR*57yY=Msa^88 z^31`4UhAH=7JeuFtar?goTYQ&fDW8R=K9n-;N|4N<%KN|YPaVcGH^!Hp`kBuVmr>a zPvVJTvQ6ncPb+_6^Wps#X_BpUgWZOv{$5abh%YjFee>wyqvbHvrgK*6U@0GVm9n+s ztFZ|##P8I;F?_Zk&QZJA^9#nP%3sZx`?zTJ<&19l5#$g6po8d_*G0$~u%gcoAIaCZ z(6hbfgTujFFPahf{5M|~L`sMC9^1)GgFus6X7W*GlS1P(UV1XNnjK$WW=y{-J<7%k z*WWAsV{+f6!PfMZ>d%ONmtK3`3A-;c(Di*Q2UNcC;-KA(q-o*{jp@UF_Z<9XecaG* z@UlA*W#ib(H#snseSY+taAs@O(qei~-NwRMXd3)JuX8%$GxnJsc&wen+t$?E0G2~4dBoxEzx9zCY7NB4K>Mu%(>J<5KaLFulk9We=pZww9*&IFl&7@+zEO%)) z7!FVGY;LqB;Cne-zxdS`o2Os>YIFb7A4Q+fHa~mubq8&IF|)$Q(e~?K{Ce~0{TAxp z`CxPJ+WA_%c#7V04v^EOU)j>L;H?AKW=(0hWarUEm+G(dW1CQLp6DA+HXfoE;Pi(- z_?^wY(vl)ttk9a0c$(gKSe!BT-Pyy<^LD}dS;qgT-~U198GJOn{nZ!0%31wp-kY_L zfn=8bN;6%*^T&U>+08b!Xc=uDG(+6jg}E(xhgHwKFS+B zdzFT>O1SBOlX88WY-OJ)+uazwa5W7>JFmIaq&JQjEfZ$Ox{iB<530U4Sd&R@7S)@y zBk3JY71zBvKeiFeq880f>yfshUexh@t6hE8#MHrp6E5NYk2Ybcv)WiUwNa@VNuCuV zm^u0G*#C(a;JD;Bt-_d9pRA~GWF?HztCyNJNt-n-!AWP|qmFuwPbhyH4($`#?XVsu zvYPdaRRk7cmla+xdc{fImpfC<;i-OC(`dr#Q3p71oO8f-l8jjGhF4tPo6)~AaC`)o>W4pd>7)90 z8+pk%OUKj-i&l47J4W;Iyz7q@Znf5LAswq58@W-w|CFIkr=HGCXS>@A)cX_Sjjs0! zwb>Sd_aRK>_)dLh(Fxzop#{|gPvf0O$Kw_;n4z*K>*-PS-D`?3eoDoZ?`2c+@Lk>7 z1+V^mcWu9(f5OwkCu-ry#f$I(20CD@_K-x*mnB=Na6x8V$_~D(i>0N1V{Npx#r8S9 z1K+&AsoR6fz0Cp{>%4c>2>|fWl`bQYi}jA*x3JV>g94oPg^%I``2HOAwCw-r~r?&OoS5X@y5Y}OpFC6ISdQ&7}A`XSlc~fC( z_>@kIqk<#4T`2D$?{ZBM2X{C!@{T1O4~kX+g+8^*brezK>QBG*U0Z0&LihY>mX3sIIXoYfi^A)8lfE?V zI6T}t+PT>jPRfQ|{p?o~Bpmm>1Nc{2{{2gxo=x_g7Ue@b2=KtB|-SNTZ%ilbxUG&;(%xRkF z$*(srJ2~vywY!^-KK`A}gTmtvzWieI_2+-HdG+kIm-Ewd4cNoO=771)~T%SfO z5#QYiwm;){@*WKywyaVca}} zA;uzzxW?)Q1J@W}{Z?;}$2b?_A>eW>2-*yk5yha+&|gMmFs0B4KY@IwT}BLu21aUt z!Rx);Gu^AwnWv;8QkEDQ)6Pb?)lF!sW%htFSLB7I)Yi5o!h!l6Z;X;L-*(ojUIHh1 z|AN&+y>_AdjF(wmmJ)b3bDwLlW%xTmYJ~?PP)bW5mr|-Kh5*@w3%H_y?eo}o!X-p9 z!wvy$K$y8-Mo_pAMh+cwe~hFQ-1INunqysK(95mbzS6RH?)DfH^%sLp60O0460LFc z8oWmvztLk9a%IuXjmez>V;2QvkctxPS?^HFej7yRTlgFbp;?Ln{k5U3xq4^XuO4`I ze`u?oXgmusq7hsTowaGO3w*W1yYf1pHMwhSF>^6g?=; zIb+eTl3Z{G7( zZEwr_VdZn&Tlq!n+3Xr!`VXy?^+z^+x~gLE=^K4cF1?SQ(_eL-XH{~+h0o4aUAiZ|< zX&Irx+s(5q7Y4fpBaDAE0mBxD@_h7>9`Q*YIjm6l!ckrX%>o>!B z`rfVW<*Pq(<@v})v{?sDgrml?nO3jNX@&s2Ou#A}!1?IKK25c)+{P`>B#VFyZZsc& z(nSj?;8t|(qmE&LEM@RP#ddgEdEGOltgKvNEK}KFVH1%En*yzUk!LVNr zB;6{XCLI>+?`N=Gar)zXouF0FX`JoQK)>q?Cr|G0F@vr;y>q9%88P>@LgaLzqjqm) zEZ%J;uyK4=#)tQSAEY??_jTd^39`}agOioh0ylHQUBN~R4-&b`9@U@Y47#J)ipV%; ztQ~_qyrK<$jm+Dr5#x<5DexHF`U!@$y_Iwh!H@L3?T^YEa$GRp5F8Txv)}WjOSMy&nl_;8 zPY;vV;P~P1|MBLNPw(dhy(&DPgH#YyS6)fhfApXXo^(LC!>ecAJ3W2zhhP2E|9P`n zxd8F40A2aK@yhM+eKo!~2zPkMYT?m7A(Cc&CCJMOs7?!CUrwG*Hd1y-q?|TP!+!dM z-z|iHzXc~H`E$lXE-BvY*|ot@0laulfaZ0Mslf7HPJpT88}(mnvyp$s__Z4!Z?0za z9~|5o+rpIj)$*DIEoXred!_G}a&S0{rg^N!w@nq^mXmz%UQSQbQ()v68)u%kWz*xP z7EHx_^6@8|8%<9fv>0V4ey3{=t~zkx{n>lh75UXzIHQA^TAu@Evk4-;Z&9*SrN;l>mO3CMU8(#NQ%2NM~s z&8;G^_+TlU6jlH+3Jod)X96$=VT|p40tY;*X$FBKzqod0Fu+pwU|9nY*kN|?9v*lb&~9zo9sA?HU?zBGko3E55$LiF zKKfeit;4MWdI{=L9(#vW%QIZc${MUrnHCsq6_KH5_=DThi#=2RoH?E#nSmo?jKX<3 zuc6C8u@*4+G0A0_m<4fZWeSgLkRJs;C7;lk!6V_mmr+2`G)OR3XGS8plri*(4)8-s zP>wj2`KHwNY`lUAD08CbS_h||S2Y@|r{X52A-&HP+{dPtU$4KThGvy~VBs3YjrSh0&Z${9$IqC0BPJo9Jj?|pcb z2Q*~k9;Gk@`lmE6X1kr_4SuQOfk*#Zmv1lhr(ozOc(josCHNJdD!d%MB=33$k8%y} z!tIpMYw(PWudiGYSNR|G`@k1oYA&eg9C=CxpWdD;!v z{qCM&A?}qU1AMbp*|&0f_3?wY_5cSZdyQ*ww?1$&lm*|6mz5m>&}pz;bJXLa(Bc8V z@R^n_`GY6c*f+H3`M2L_G5vSZYUPoBjV}`?SHqL2gTuiqT#R9X{wN@rL+}8v)Amw#T-yPz~s{7j>&ae8Cb`-DG2-GMaowm zaLQiefEg`%lGvYHH*b$e7A(piV-U}UC<~k@g;?BpO$Tz(PwZcPrP&Vw|JyD-{a+l5 zBan^73f1sZ;PJ`88=pv+AC8Z@p+x>G_G7palK$Wo7(BU zani|=_O^VVqOOmU75W~}|C3Js!nXFWTw`HlM0FYiE%Lfj%qQcB1M_?{#ug)7Ya6Ds zvkau=~=S^*zQ#P{knZtZHj*-kGb(+ zFMa3W@Oe0Eqo9ko3Z&|Y<)T$S{P5Q1_Lb|Hw%KKL<;{N+%^nwTzMKG7lZTvEP`5v5 z4k%74hRcN(THNd?8V_VLF+T=@|h-DC%T9354lk%Kz%{ zfYaZln49TQ)UbVARrtlO@T4x z=}^)upRgGbT^fwbYYZ@DQ-n+d&(;t99vC!>6x`ycuW+ZFZYqz$;nKmzdt66G6Payg zFtAWfO1Bg02Ai2(>7jtOHw{joEtIv_Xl%kyyb&$0WPjF+X_K#2pGtdASB=dV< z=_w{35c_M;UdB|Z!PRHIFJ1o(y5pTCrK-n!cvRmsT%pSJS34_jWOg`S17w&h%i3xJ ztH5;w(7?6o2O*Ec;JI=MYF+Y2n!GKF|TrOMzT89+vGb94&kqK=HD& z&;eh_Gn`TJctwRh^F6rNzkNzm;!&uEXSKIJ(X*vss*@6v@Z>qE-|@8DZitcq21aXl zg9H3xg&!<9MUS9HKk`I*$_>QncW4WLhrytapRov`^q!%G!Bj9S8S@ADRjLiiwQ#MB z^#Fh1AUIp?>2~__^pRe`%aSkndhr%o4mZ^f&y=Mu_}G=J9Ej6aynqi!agkh&JCMpR z&4AaNoxy=Ol=sC2DK3WBTQ6?A{&NYX35(1`uy_K1hV)f;z zV!_&T>7&f5xAHl3wDEL&Iyefi2k{ABP~P%t+AU%k@4|fa&v`e`@IQH-CpVt-eA&O#e@^(5toVvhiB~8xQ;NYVRv+j3*5){f-y$ z90NeJl(G79bWc8$-DY9PQ~ew~@MQc$8;Tv?c$t#MiE%Dv`pLs@T7_B&2>H@`dy(3c znvy!WuZ&wauWzpB0ltv3tiR@z-kdPbWC#jv7HVX}e(j}r`yj^U0^l#Fs9n3B&J@2H zkMXJPd2HuWqiVz&A7_k%@amh8ka{8IAHOUeWq)(MuqJ%GWGN$y3|V~{>{nX}dLwVU z)6;BITZpum0@+H}l;T_0Z*Oj7h#2SKXl5)+W5E=>!|mF#h@-Sn%)HuP zeD(XnaDLk z7=Xd^wtnj%2cc&tbKspndo{W{inpzqq)GXI^|HR34rhnSS1&r!K3<^YUu-Ke3o5S! z+xu4a!j;cn$^1WJb=44yJh6D&R-W%KYZ{pSRC_a1?fT zXoje$n^)oZRjab`%|Wx9PRx55?>#LH46m**C60+gW5XFq`}@xeld4T8a%MVg!z{|r ze*g2$-hcQ{{??CK3OsP52+{=Vv(ab-9L1&T`z$5E2y^!2^#<;nSx0AQg~yXNQKhK& zKg2F?EWzf|0d-=ViqCyabyR0zsebz|WR3^8^pLyq2KMUJfzv-}fJ16_9 z+N+EAW{kIRHiK0-J*=aeavP6%-d87tOv7r=7+$5FJp;D`5&<<{u868ELTINf69j}2 zybwOFUQZk00om37Py*$nABO5egBjoxjPR{o=?OD!@g5C)Qwq^o9p8H9dq&9bVuR(S zfD3h)S(w_vyR6->`z!;b@`8)&mXwnuuwG^iX1A})I@7_scntyntGR`N| z2=WsNU4oa;)lZi4zud7lYhQvvua$oFv(<;C8|);jw7LJo zr{Le=*)!?peKeitVCe;(^S+K?(gJXPyG7B@A=_3xCa0- zX4S0?Rk13qEWedGG?b>F25Q;HnCtBy_etsJU1h^Ng`_7dt#{GVY#LAPBn-Czp7vL^ zzPI|%PG(JWCave{oPLcaBV)>5{*jyYivic8RqK@7ZTv|t%!}!#3qRyRMniC-BUuKY zQ$}h3*+2h3ezMuDSO9!AMq&hD6l$9$F!UMi=)Meo^~96WSf9oj;1s$=(J+XsU}Q|+ zQ+IM#n9&OJ+vcxBlKZpDIzwx49P;7c~6{4=JE9I0H<2aExyWmUqGH{~(jDVI~uIA=^?tl1Ot@`aSh#trtUc27IP-;}#p zewttw;PQ=I!H=)QPx%=aoI?lgizB3_;s?KU9moKmnWC##^CG8v%=UK4^5-dUXhKQX zAB1v{QQC_qdCrBd*cZ}R#GvPtG`^H1+1dEi$1L^96w4ERgdQLd7B^Q&8J&TAFQGm;m4<#@7i&0yyNGB z=Sl0f?A!pK4vaA#GN3OP((>>8yMMoslfqq6=*6V#Q|rozOhHF%|dLb_#bj&b3ziQ>}{Rf>k z_T{~zP2O~(Xsf7m9KU$j)<1dG9n-68(An z=uiWpRPpDYb+*O*zlsK5-fIul-~4j(`LBQ4c33&8h4bVnc$wqW+rRZJL zCxqV`IG1oVt&ABjHI;MliiLc7^VL_s+q^z(U%(p|H_!G`Hjg^i7R&#UVB@t^rUkPu zUWFML>O?xgdyD=8YF&gucf21l>%?Q#(Iy0?&aVSg9>J=k5V%Kmiv27&btD=mALO!@ z5un7lEd!>(Bg&7Ut<19qiSo_9conAXr4%mV7m$;61ZPs2z*+dHp1s;&4ZuhY{0m=e zc0xGRbmAJ4x;R4GAUZfMoq=)6)Zl*GZe)%~j0VJouYs}q5k}Q>vi)3F@|5S8;ayYd z42)!K-(uw2kT~UM)*{4cFJmlgB;Zoc!*DhO-Qcf>DM%DD28v;ipq;^7M-A`d?+7>o zo@Z!`HtUI~~rpv42FBV2?n$U~U&x*50#%bhob zP&9DQ9*~si@T5UHTv>y?4pR(E!%tw8&->mDY^EvWUkZJ$kc`haZPC!#=?OL`oq!1q zcCqDfJ9~&)z^V~w2j^QHjKZ5Qn(|=(O}KQ42c_7Xt{46QXUYvz^L`9zzu~HjF$w09 z)wV*GdM7o^%>b_bJ>zAV<_-v8tweZNNB}AaL(U-DEzBur_^v-nv>kzR4Rmk=q6Kwz zPl|;DFpRlhI_2}q8v2JvLv*hbGU|)R?V}qW2xT1Urzcc89P34qG zn@OK3@9PdB(Q;GPcxBdDR5IZ_xGoECF0b`c-lB0l)X_J2ZGkh|U!;^fqu(xU@t6oc;tk z+soZJ0{Zq&{YyEn{Mv&z)UmLt@I6B8Fe|J@D!foy;q;Q zC|`RS_1)D@vL-kcjGldz6^gV~xm;Yu;qjAk_qx6zf zY^^I2kFG9?{z0DGCmpwXw$PouOTlOCihl2lQ&*poZM+UX+pN4T^x|DQ?5XmN10x7u z=4gBJ=xN}bD)!oOCebc3!mJQIGRQhBvn4jKS0-8YFh$iuP;E8yGaH|j9$do{Ju};` z%`ma{(A9LfLo-`61*3(r89e$*Tsge)wxg4e-@V>^({@DCPMm+^aYgq#2ULB}Y=o`5 zY_{)-x*6wQx5`grs1++kbY+-PBHG3t5h86i98=J}4c{o$Yd<;~Gg|4n?+ z1}YaiupvTSO3}Q1_wMG#N1u$0A-^BBh<6P3fUrQAqI?v*=NTrA8zaN(yH0u27qt;f zI{Nrg9_xp{xW75q%)*DCe78L_GY-PVH^2Ga=2fs=Z=J>`9~bVDL14!Bai;`~k={l% zr`sN`Qv%|HYnzX5|8Vp4gX^1r{eS<<=|@L-hL`M;2dso3T-Nku?F< zg^+O!PDP&zrt3F98YQ2+z=veR_kZ-m!pJUczKPer`s$1JuA^}wSSZ;um*}wZgS+_;hQ9g!M_g3 zI$=rai~yP#y&66WbFBO}L2)vL+CX+D?j%Q8V}v{Fn1GvwNp;NGKXeA7I!VBp@^P(A zmsn>5to2-&52jqZ7>=67i`EICRb-A>{r>N3{=)v~VPPg-%VX$IE4WetXSY9CIPnO!55Y>p< z3?F!D_8kkZQHbHVf_Y&mZlnAJkB+A!kAYNUokVo1wF6?0EsC))=t@(2;s!^+hiK}8 zMN@VynT6usgr9dEsm+tEldf=}C79)OfB@PW7{O@(8n2t(8fpagfk(m}yX{yIr#eR838#A6OV4g!nQ+A3%nnFB?;*udN_3l9pJ24#owDF0Xoik}Mf)+h-)DG6EB#%+bFY&b)YPVb2pJbrTE}0WimwYj&O>Jod(9>BHWK+ z3ojL0+JQlB;T8l=&00I-^emoWv@-mS|BOoDc?b*T7XjAW2KO6oYU-?Ts$OR;7RU-bT!D z+^4p$5Q*}XKvRqZC!0#IkiIoGo-LoaZK_y7qKcnuW^5~k!JvNdagfMsuW93wnI{=4 zbzqq_Cve_B8+h;d=N&6^8c{*u>hDOj>RL`9Rj!J+Z012}8Y`#rO56d~@5N7x59}GN zJ~UqadW~0PB;2~R)w_e_-^moy5kdW-AloHoJc^uRa9dl*GIh*sPi5ed;kG~=2T_ob zWy7mjy>WYmIwbif{le8Q&yH{K#>wQA9SDcF$nVN*1RJ>xJ7lSH(G)G+lfNg!=b4Ot zvYcU30n-lN<}rNHINmuG9~y`73K~&{zWnmb&CMIv(5Ug>Qs~ZI zJwHwd^t3?NI!8QRQ`>mIjopj$rs$ndQ9eq^A8&oiUa=T?mKhdyfQxVY*dfO*d>7lW zpL5Nj1bECEg7Ybu_RTe0&sZG8CfYEb@Z6bGd9#h3d+X2vI9a+=V|-yBJl$j>{4-v8 z`n6M^JlVlG-oJVwhP-gca3bV{JjO<+GH8Y69(1HFht+X1^x#b1^}&EIG;{Qx0ff~k5Ps`4nNQ?p+8`B0_IWP^*imvBhLS7G55zp ziINpx-}`Eukt5$5N1Y;ep|YM7(qaP>M>4~G=K1DQc(U)Q1+R8iv|i|BGZ>dUXyH_F zeB2`LPd@l4-Z{B>m0HjLD;BSZ zL;Ozcy?@!E3CGQXSfpDY;vj#W@v-!R#<>i?H-+Gw4tETkdfD_ndLxtfQZrAN_HSf7 zM7Ig6s(;resrhP+3qw()ELSNYlN5I;tAGKqhSc> zd*n&h;<=kC;tYTk{#TEx+BaIrYE;5c((P2C?Krx+!mOp_WN#=}z`Yvb&gGC^Y} z9mG2}u=Y2bHo;WgS)^6KLYEuNB^#y&8Wv{GY9WQ(3P>?1bv-S}X<4LXVvoRja@@33M z$>Dh;OtgWq1eT=$5d1w&%Ek|aCq@{##*+hY@)%r#D#l#aum%|l>;N&x2A7Hup8ta)Ge=EVDB<*^hCepJ%T(diha)y(or7SF-UC)%S zoz)Kql?kr#WcRHGm+#&E?)UD2^mvr#6^#Kr{ZqZ+MfpBjJN;ryPqJke*jMAcFA1f0 z@u0lqLhY*THnY$)s9i0rs}VNz z$1S4VGaNkM^{Qm;ai~^oxZ3#Oi zj!u=SpZ=j7eXg>?19?Oi>~ObcuvVX=C?B+ft(_r`~yRkgkM8wu<(%fXyjmhL5h(LI!cPXnY~chwaS5Cb*c|-T=+qI z)Tx6}$iD40J=8WS)vr7&38cN`pK83h|m#$oa5!~elHaQ2PXlo)qCyRif@F~N3B&SS8p4cV_2HhcC?WdSt zZL#K@xRf*LxN%{&JB}va;R%GdZ6n@0TbY-OSwGXlV)f3}#+5@}SOALo9IPZ1=|U%s zaVFd>RO8ZxwhIjoj;j4;nZU}R~g=bOL(WcykY=fC^S=Fz=-n{&q(HkZEp>E_XAzubIv?Z)Qg?|gT2^+%l~6#Rej+2@7GT-)4i zeZ-CH*W2atak2YPHxC~^u3kD;;Xc)KlwSL)nE(CL)sf;39)^xBOX*8b(-WSysNB)U z`-Nk9W+1nl(yJCFKdek4Q{x~Gt@<2KKj-$<4An~)S_hG{n#_2bho7O2zWO~vn7K0c zJ@3G-vp3MdyT*YIw_@Zz|KgkaPtUtQdlBx1iDwu#8}@Yn-sVsL#@{Up;&OFH>x|RG zj{QB#N%1E?{B+u9q4C~-^iTi#kB9rK@i@hGJZ`u(sHVeqfAi|?td<{)vi$D&<>vRl z`)PQrv&5)&Yq3LusXrb99oq+>7*Hp{q{3dt(ylyAX(5Hq4`?6H5ZZvSi$(9z3?MUz z_|XMHAkzT{QQgDV@_t~Vl(VcbY0~?KISD5ngAj5*!yNcZGs3J+rVMb5(2WqjDWN<< z1Qrj9DM?=RXpyIBOn3w{O{+L45Tht=NIu7PFyY~D?$O|X`BJgML= zFQ$AJBMFx)dKO>L z9>58kl67C3qunT*{Z__w*uD+8@ULQ)G<@+)!8tt4KwsVHzyq*;uR)b^VeznjG+tw9 zEbS@|OnA2TX-HROptr2wp7*2bYk41kuxP+YKa0gZkJq&gewJq98UB_>COjVvy`G9D z<-8xh@7dz~K}G4;wQchJShE$LR7U?M-(X+6YDZJ)?+CecNa8gl0;wUnz%T zzvCrY!D}|b4kY@x^8r>7)md6)!jJaxLaJ2X!@Ka0r&KcSEu*}ijgr$n*%b8$UVYOq z0+(?zI4pzj36tC2gO38Dtf{a%!2)XrD>SkV@)#I^(~!9)!}h+f3ea}txnHyZZvTc( z;idPUPcifDtu)B44|H#2*I&13r-WYY=rB&4ZC#7@OR*ie)C*~SyU+n87TBx)k$JVx z1y(p;aK(N(1mqoMP<|h63YQ^n-dXR#`yP}dlfXB1^c**i#~=NTc*)#VJ})xd2Orp` zUnwC4q`xD(qR}#1q?2Ufd1zg}fiZeecE=D=hqC(boqr%E1JQ4uNK^V#dH9wmkWwlA zYQHS_!2;Ah<4I+Wqd1xWrYZQLWfgdi7gk?fu#j`wri{tgd$Y{s)ya4p4=>|ZJLPX| z+`>CR4B&9L-YZ98n(bEI<3I^7i$3~DX{WN%xF76EJ(%AWG4!4p?%?)NnegJLJX2%i zr2Ew;InGwQWYDMlmUmz-CAp`5zb9wk^6qfIo+{B@DYiIvk2CP|$dR7&pt>f1z@Uk> zLjcF0wp?|YdEy{MPe#1)0#487L7iPWyP{ixTl9wsuhw%tLfmeDB z#n1OaW5}z<@~bJ6H?H3r1&@q{Q+fo{iYYg~zo`nKTUB0cu`b=j>0QE3j8npXZnWNk47P>j zErpzHb|Q$}>K?81AmLJC5K3aGhz!z)|!6 z=Fk2`GZh^%93OqyKKsJHk5b0Zm;Sq-w7uu84~v@*ckKvdoxz3LY=Jg8e64j2*GtD? z;W*%DDfnbsFK6uk{L9ZbkKn7LjNe2Xy4ag=kDltL?>qx%t%m=RY2_!E;Fpf1Z;Ijn z&fV|ChZ!-wx1QF{RL?UeEHZbB?Sn^;GQQ(j4(d(>yLtPA7Fri}9MA9NsL&@)U1$e9 z>q^4ktPklO9*S6vpC{T)>26ss#6RcP3{KmI-fx|O^-lQpN<47t;p2`!EtsnzHz4b@ zrrde6YXr;U!nw#Q;55)zn+(rx{KclG9Z zHi8EOCb&iz^lELbML=eG#{d_yn(PXM(Zr!m)CL71p>kBn5hU?;>;x(yX)kJ4GvP#0 zKt%F%hKO(k@rT+Pybh{5na7%LqD>?G%D<+k^%n+^a>8s(17(I85%Ag!5MJE$8JCXi?YY_AKW`nQ4hmUdHbgmgK6kQ zzbDXJ1cScAcfn~EiAu{iM{w}Am^M6-V5UeDduJIXE}l!l8@v=Vt+M!szf_u!eJsfh|Hk(Y-}9tL3p=cXpKUUZ@e}kJ7M= z8#4$zKpffeY)Cv`oT(|t!+!47;X&$SeW=tZiB?&ydUSp2HVaKs$WY;oG(Q#@8iN#9Zi!&lHQFYVf=pk8t^8 zo8D%WPi3WvJ|_#Qa+wJ_t6hY1=Z@e%gEyXXso&@Sd-c1@Pvvbr(X8a91}yx?TlV6m zBP;FdA|&Z;cAhCsB8jt z+NZ7LO68L(c+&+!Jh%EtvWJW;?RZwMcfY6J1p@tfF3lT!cjF$cB$@oX zYsuKvUTJsjb`a8psAyR@8d(D7{$qljq2M>?1zET&7XA&W6Bbqi^wbV~0a*QH2H9Xb z=o!NdPOECJ!QIfMei=+7&!X`t@)}Zqvir#xYU&#PA{AU091GpZ4UH8=ClA8K})msm$@_zHi*&L5`o!7On<& zBUdZH;f`ZK1t2=e@7m!W_@0fDP~X!ki*H?aWdVN>^eeiM4chL1ja}=3mP|%)hYbQs z719c;me!X#j5pC~$F^SW}hFWAiDoJ?>!P&BFy?d|y2 z$+>ytJD#>a#6d7)X!T!AHji_s_D?8IHR(6nc`^m^UE$625(a{8A>o%&8qsFEM>|4s z@=chV@itjP?!0N2Jvexk(l`VN8Z!yJ+ee3H?}E?xUt1hy+#(sL*eaRIN|k1qqSj2<63L& z;NdXi=5>nfvla_KEL=r?yXG){89oe{SK+|8daCs@7u%6%9Fr-|Uwu{RP;Fxvee}KW zw_c!7>t>~%v`fz0bWKK%E!tl{dlmeSXbx{eq*`P0s5BRgyT5+n%K!fLfBHXfHrq1- z{@N#hX>;lD%;w{d@3ug;9gu3f!)U+$fBw%2-TCx;Kiqu!*Z$7tv!DKpW^6i&Ij1N2 zG%~vGA=2b~pZ;JBBjIHZqhqX?0eJf8ev`8Y?Ot><9=4@u+v9VBq9Y@(aq06yrbL*0 zlCu9nYZs0tbTPQ&xhwCkO?cRM3dy)fMsCiJj;4OaFN3`;?DuhY$RWz0_j906Fj zivPP1hk)oirYuSK1RKFc7^`~?ppZODX@XNM_)hRjXBv=VFU>*Bhbi7B+=VCc0P+wc z$nKj(reR{m-1Fp!vob@#!$623+HP8Ud8m|MJqZlrPKWPFoghI_y?(<4T;(Wh!H+Je}`d?JzjN;HtAW^?vxId~0xx|E8cS?3#B~ zPDs0aqkWt?OZsgkA-evH!iS+2!xsP3h)U_d~}7 zt#8i^JnQ|!v3sTO%10re-KDhMZ}|s2IPb@-^9cTFU*&kIv~MrBo|OiIw8Mft=_v*H z9}cx~l)x!-wb63`lK~cjgTwDx+hdSUIal5sz?T96FB9XhPsVc$d)_n8$q?{T&JBuT zy59}x@eiYUWP|*10Xk~}&>1r;eLPTJ)8(Ek3(b8qZsB&6l;H7fcz4^M`c;m&ymxS| zE^Q(=mZG-utQuEZ?b<2Q%G+H#98^I?RbF2f)KAe8&GS{Gp9@xeTm9A7U3Fs|SYJ-QxK+6nNec?`s7w2X7Zj#6 zzm?p7eK5cWE_B*?mX$5ca^#w9B_8ZfS+z6R#NJZa)+`p8x(q+i?r1mg^+p@vKt=FB zN^5vvSPZr}M|!QT45e<%Bdsic$)i0(z4d!*bQsKKK&52_N>&-wH#Dz(BWo&W$|$Yz zP1)caxVHE6uzOdp{9W(;C?8Nj$4FunD3|Qw$;C4zmZksxsju%L14q|x>`^wk+k0tc z+ZrOu1rHfJ5de5ioz+E_%#Jv5oPIv?ZYyJyCv*rLyvfqhk8` z9nPgC&&~%Qnw)Nn%=4FTr2H20Qs~jOPBp$-ei!*fhM|%2(c1#)dxhMrF|cvBHgGoK zZhF-V2W2IbUB*`W4IPFOZ*~Jc7--ggijojN&y#+!&=rP^%?qp{Ab%*w(qCyo@|9*M zYV7EUc6B-o_eU8@PclG6hg?so{ouyUIbC-`4a1As9-eaR0duBrE~I#0x_oPMwJl1^ z9Nrzi6+aLCoptb_tx&%n{>31+EaiDOF5GGtqVJ}>o5gsUvM(%){Ba1Jb0Dl!V=S0u z_~he{TX<|6)C|AMHgm(#ccaiG>n`Bk*z)B!g;9koZC>MJxQudfAawEk9Fx4PuXldoEN@g$1I zsjmdz)vxQ7(oRf8P&1X8!g`ACJmZOOAD58Qg3lQkQ%#n_q^Wn3R(d}yjMWerBUg;n z(B*e!j>dou`*4BFL)z#C+!1T9Kvu+(I5<#kxOk*r7&{DxwV89235MOsvb6rNpSxYJ zT%%l+stjpxtT~|B4x-V(N==uxfR*N9v>&s)Z@47+CXlZ-N&^+(1)rtz&iK|lY+XYd zF?-Dga(S{e=2>;xd+VTS4O}y^GPPqi`wu4Qnb;o|OP`V2ebWXY2o3~OkHpqbM`{Hh ztEh6oqKA{>MxfCM!gar&7Xo{k1xz1j|U@(yjh3*ez> z$WVfJJ`+s3tzTJ#ufF9kS+Fd;$upy!L|p?j=+pF!2D@iUK=-v*-E!&T*U`YWDcJ3q z<?{d5 zhVf+?ZuwK&m)@$mm4gOdgZJ7Q8t_j)OSitguItQ$^$af8yNX+Qoz#;=joW{$x-qHed9%{`i`ElI5pInvO!u`|#^|!#N^p)&yLb|L*0uevAX!Hm zcQacC^^Gs^ANQT@JwnPz%!vc4zZ-$|Yh}*@k9ZCbh!@}{rX@}5Du-r2JX^Ul^7l+X z*KYrY2g`>~!RxmXa5Tff94)gps#CvN`&cd-slOuTd4Ky}WpY)H=IQ-{q;@WOH^1lk z!U5OY+*R-H;+_(ezSS9(IR=4CcCs<3_4prMCDU?Wg`u{`t^WW<2TH(g=AR%2&O# zFEJr#3Q+^ET7w_`(0}3x@tpKqS}nk$6)mCimkd~GP+2ow)5pq3Q$y3;HL^x|`iYL+ z<@MY{bU!=|jP=~_>Iv|nq5d?m$TzqS36nlcW;+IswJ~kNU#qW6GyR~tXMCuVp+8*! zqja(27H{J@P}I(CeNqfghE~bW-Wkidg=I4y&=P6nyS-nzJ;{7qzMLuU!s%1+V(T5a zpHs0|4GO^L(e_>1*>P@Ld0jj_TBs>TnN6igTuruCRffjrAUU+0F80`Y2sG6 z4lm}GcaoPl9+t>hFUZlyw1(Bf=SN%X^r|i^mGPKq1>*_L|I<%D-P|rdI=!OFjE``^T?NQ~_ zsd%fr&)XyY{=MJl#dCsN+s9R}>4`ZsrfFhvM69kWt@J+iKJQ&HoZYXi8y_}h+}KX9 z3m^BhY+e-8^x*!Jtegk6q5Z6b%M=Mda|)X6f(}|h7%H+luC@~QHZo@Ah0}2O(cOikg&r`$>%=5Oxr^ zHg_e+xBtWoxcuhQNAOwcTBn;dAz=pI(k-_FVZ2<(E0}Szm$p&~iqh$?+ypZi^qU~p ziAQAghtSb+N7%3Ia*yC5ywp6MzI5CI71uyudz}=x-izztZYPjN;S3j;h`?EWS0xQn zO>>Mw+4~7*>ltD~rVcQ5sJS*$1PxLc2n^=3U&i!~Iz383;SybS z{B2zZ(5)h|I&=eQbl(kNt1g2`Fc}~QPIWJlj)}oMZR^?G*FH3pzOSK&WZ+vD_vrS% zH1fL8LwwWx+_r!4R2kYi#t}G5-rsRS&wDlWeinK35{#5UbG25DQs52dwji1nSqbB( zna_;05nc&GowGmj?wXpR!s|e58UTMr)9AX4G`y2S6P_tPP!6!-`XgK4H+1!VRUec>){suLd_qt!^a7o8XH z$U@G9>jpwN)OW}$d6d7Dt?*MigEYB%6W%I+_A%!!7Z(N&2*UFSE!ZhC-vIr29@8Xj%Q5HVEF*zu{ETUS;)7 z5dFc{oMv&L&y4r%Lh7T)ml;){3?y9X|HVx~0aEt3|qx*5?YgZSUWIU8^ zl=w-{z+Chho)|2ErK>N>`^d+Pm)S}MoWZc@Umpu6+9C09L$}5!iG~xr^S{q-nr-Td z4|+7u;pjxuGvhQV;fx853)-gDV3>BQq4#KoE;4g?XwSOjUE`DA1EY5;p9kGfTD&Wr ze9^t~mw~n35AW12`IM*cs(+^UTVRGP;(En#h(TTQx*+mgxB}NH*7)mH3K`sS4coQ| z4+4doo}9LA;F;Xw*K*ZT_PCxM9OI%TjRM^Jvju8ffDl0QPWz4Z_T4;Hoogy0Wp`F~ zt?@@6!G}(*B4fl1iIw51waAiuBMWS)#NfD9AozaDCwK76_~c2!`;YE_Gw~+yxWAX4 zrjHp@8XNaZ7xJgijf=WEOgY$khf@Y!#Ps1e{4i4uxta0wEJA3!gJK2+rBtvy9ygAm zjXvobnN;70L;8;~P`l1vY}=%8df`f^Z-uu@xuo|x8h3Vhh%Gr+OkdCmFSN=Tsoxee zWSdB|t{LIfG)*h4i!-9Ml0n9OeET|M;G6rMtW|9EDGL~`-3(7>I=G?L)fbE73ZL-u z@WI!c2L7^y&~%IK=+2*we)U<40Pbf%qML@_xOro9qv;R_g>kxYW;|*y*}Z@G zPyWu2&+ZE_5^F>+Yl}mA9bsH=QD1>z)g~b88Eha36aBpR=POY;VxGCB! zab@nxp5EF!WzY5zLyN6o9zhaq2yJ;6VC9Wc!RI$)2z`1G&=HFO=2i4BNkEyM!70S8 zV9g=Kq*LEfL2;9JkPNnN29`lWzqiVe;+0W7t879G&Jxr^3kl@iDp}~+^EHSi(@LjY zgXPdJmfbKwm zxAEqo6+^~5NE3$sYGO+-ARvk73d6~qYf>L&~gHfWC4W)0pXG>#_TE{{m{95(;S zFjUUq0UCU2(~i~61PJD2kZObu+vB>P?Vp}lp53)@zbaUG zS=H@mQaTU92c)1C82y|-(NgQ>K_J57aE}nTGzn77rnrVS4P3= z#{~YOWiZbmS^im-zC*&mWzb&$j&V?5s3T1vCOU@V=~om^GinC=Dp`HH@^)~XOn}ZJ zhCpPD#|^_5)jnxM;PAj~Bwqs!__d^TB;0CeKWl6CR|ftGQ}a%huz^Phe+%4cmwI-I zhB9VKdOxvu-H-AseHT2qof(zx%$!bN+EoaD6^F-dx(*O!&F{YPxC{PeA>XF{{tkZS zbMN)aICMdWr2i59h&YAf4SL zqR$xPm3O9vG8Y;MwOBWQmE6TEs)BVe`E)yVPrp`Sm*+Ff7|hZ;au)8BpnXdo)glVG zhX>0;Ht6e$z>oOCKLymyk@@^UJK5<%`R5C4wl@VyikaF8q0GcaNO06(6LH_;g{ z&hO|22Fe!bH3tqxrYWSS;F^7c$td*oG(Zypr3W8+;IHAJexcPom9SqhR=V!>FKyF7 zEo9k^kGuCJnF=QDHqWZDEQ^t=Ym2-3=J0dr)_$wd1;!;q(RzTbNOi*Dd~eI?@wfQj z@sMwj&rTuuwX#x=tD&O&nLdOM!s!6JWXZHwTT4G>RJAdRlHE zaxpSpT0ve9z`oJJ;nD*!SZ4_7!&7mjNV9*yVSNclrXD0 z@!7<;1#3-N&r@6(RN79#wir^oUen^DhrKp0v`CECmQkYp9IE%e`KloEd!;$3?%=N@ z=(}d9MF@QM`_DGN{q@f_&mMdkA2Q5SyodKQI$DHzA;r}*CxZ>`dbgJcdB24b=c;dv zqWY&T%*-;G4YjKIR7N->M*I_5e?HH!6T~c_yxc5+Xa=+#hpPn!^;2GYO7}P#!5Dnv ze2&}aeZPF?`m6r&dH}stulkmq7GbS4I&onbL zD``gnEyWd1U$ouM#o)Yt{pRM%b;l?3mKy+CGtzkbwtZm1Na&2xEC6T9I)cr#QbNpSe@p>V()04^&lHzQ8`T`2 zw=CaA05AJg%Lr?odcTo}z7*^{A+*d4Q4terNMv$PZXs2 zl3_Kz=iM8mye<(zvQwBnca5?kO@dYZlc$o<9PY=nRS6@grSFCS0mBT2rCV4vFmA7u zobl`_51mR=&Vd)b4UQ!lc(iK-Y0r2M7LivuLeubr6+WZPRGy;1HVTi_-7AA4p$;`I zZ`BHyV)2qGV32oxm#(&%>bv{V<)-%X8eL$trIbWBd!Om#Ff>A{PRf%@Ibf;7(Fp7U z6b<_GZU(9Nyj*b2Ahj=-LY}srgOD>lD*X@_YR-j~p>#PCjt&*^C0qy3_x`az+{)u#>i6PDF8 zhSMl?aMW+Obi-@JxL5{k&*p7?xaYzJD0mcnsIouZy9Q|0Fdjl=IK$^_5MJ;v7*;=D z4L2y(=+*8r>%+52es#gcd@6rrP2amcgs1-uzIvuu%2{nw9v&KHYWmiq3$%hIh)SPQ zIs6eWt8?;C+ZKEku%64*%Ne*vHtrO~tsa+-fd{hE?MbSrBo=LxFE0y>_ z8z>X{Jcq%AuMMB}ZdI@J;qhT{1Bgt$Bx?qby%;$pLI1T6&b=oe?4U3XjcCQegUaKO ziDsbRT{xkVre7#nSC|H@KU1e$aD5Bgz_@ZsOg3sanYnO;@1RA=^7U7Tn3~Xjo+$_3(T6*N@ztZUExvK@GTzJ>p&k_vJr`~OqMR*2UQQi@H|ct@ z`iE~aqw%JvWAOIc3e>ZCP_+@PW^XBz_9@mM^nYO-|6)4Txs+@;!&77r z7#(!Mn7Gh}_>?sAOX%p+$B#FU9z1A~U;*Y@lq0BQg^?77Qy9|4yvQH_;7>QVKK^7@ zjGjqxrNdbHyBA+8(}w;Qy&0p|O7Ld7>XS1A8<-T$hY#1$z`L0eG@6|}!tc^?>Lc|b z#-FiwRyl`HeEc}&@!6{<=}8$m!6s;4pLtQh`osGjXPaT;G`=_S`Wi16j*Ye2h?mZU zZ?gk$-oD>FPLX}t9+u#{*o?xtj1EY)Q^ffeDG0B+n4)~UVD?*W@pC%b1=!Gl41Sg; zn>I$*;E;Oyr0tbzqxvqj4bIILMqFuDgdxM*UMps7f7NzBk3Roh?P$@#?N2tJ{*}MF zxqbU)i!B->8n@46z)bq&$(8GuIuxNTchVK?!TX?%_wS|1zifsEB!c%pzI%6byA{Lx z&0<_=p}?&Sj~gvMu-g2f@#=mDR=kLZI1HcFX8qu5vlVb}2Mi%RP7foS=m$>m(ycs@H6UjyeNW@DLpCfl9yc?-|x(52<^io6_bd;cVoao{S>k`n;Ovw!R4IB&x z!kmF;8qo&O;I?W<+2S2%4MF9DgHnRFW&qIqZ3g8Ey9%GRK^yf0LVywQP8*%%MH%Y; zz{xk^8d--ox)}v%a2s#*7G;!!D!a??d0skwdDT;09uodjo~P^bdw64hQUexCqYO$l{6a>oc20kj!%sMi ze#+5q{YiRlUmuWY;gtaTh7~-= z)DA7CMN_XbU9#`i3+!N27uZ&v>weN|TQ_7qc)eFF#RE+kK}tQFLZt2B0G(qn_v2-= zrFg+MB|f7}R=_A;c>vLt@gSEYH+OZJ*|G&qg*L!1T#@VAjnA}d50exzZcn~T*(Hz1JtTxWcdPM_);OmE470$N4~gZr{L(>lu&q6tL;qC zVaI$D`!JLywHL7cY03R6oenwFmT)@04*d z`u4)qKk>88?kTx(=FH}Tk8(8qu+TX1;_s8;gO_07@bLrO+Dy?sS6GEu?9nBH`DLNg zwu_-e;?FYyEVPJ0OsUQjx_MB@%Huq_uL?&wj`#QDM`0>XgA-@}@X^DR*v5utqIFi| zGGpOFt7Ff#$^OM=CoZ;!>3-YAoT*)DbDQJZa;X0$=Vk^yoYGS!l)FCWu%|I{;5PgS zOMTM{U#;Qk=Li!%1$X+2mAXy@Gh<;L9DG^1j4~w(wzdRHhAL|r5Y2An=(L_r6a>ep zt*oB3vUj!++R6%>^9%Jkmu^+oYR}=8(ej(${V01IAN_Fi{lD_pH-GY@?>C!~(GfrF7bUR2 ze=!!w`FHp_rT)u~;r+UU2_9x3hMVw=&OiLokJ{?!hlNPB5r1XZ5k|-S`irkNfA-U# zY<~9_e=+idZe-2Fo0NUqTV2jzv2Mb4QMM2hI);z#w{6tzLYeM%w|_`Gm=MjaW-qat2>I)y(0I-Ph}gQ`PB@w_FJ zdp{j527b4llEBb-2-EPstSMXlin2#Yls{%u?)vUs9cc!)5kk@~Vc_J1VLptXQR3Qp zdFtRfFr{L!6E7+)A>)#kcS~4-qX;Re zEK(j{8^;>xz&QsZl$H?IseG%D@=@*mB5+I@rbKQ%%!td|Fdox7j&J49;5l6e4uIbP z(R26kvooG%+==njuQCE$d*?ghOzAq=V8Xjs5hbCpg&WKoo_G%oOf#~KY=>{K8mx8$ z;dY=Z{|M_5s^yOs6Q-bC)pu$03|8J5D3vi!OQ4ztS;EO6gb-UYS5yjm9UEVaaxu+W z^(sD%Yv2bNC2p5dJ8RiuH4W^8Zj3PSX`8m}Jc$#Y5FCzXym9l|cqiVzJ(0Jg{lMB2 zjprSnU1oT!A34t0OHGK5Dd_NA`z=Hg;>8F!O!zVQ@Q1--W`GE?(!Y6yUlOvxIpIL1 zGlkE4MOhh7cF*ty94vZKc=W%Xm+rV@1;?P7bqIk{|9RU&{lj>@mh^5w+QJ4m25Jh( z>KDWQLxqz6(Y50l$kT4;oR138Z;DI>4b9fwdKPsTuD$hoc&6{_oAOHRx$J%~g{eH@ z1u+{#2^GwdI&H7*LDvm0_WmJnr$(>}wZ|%QXDt zJtJ(qW%BkJJe6nFOHNtygU_m~f4hD-vPrv2xiezA9o~kU9UgtlyI>75F}Hp;jtFJ; zUMYB1{fjzrRxs!z`t7%M^mY(E9a9~B%1zPA1_dWz$MMgPK+(+|r# z{H5;RFL;!*;Dl3`g}l<Y1_IdAK(nVh6cf=KPrb|%>z8qSk)ODhL8Aa*e7~H3OLXYuD7sl zi86pz>SRO*_(vnx^lz~Ay2=)B_G9skV)0>Bk(FIlv&*^CduRNdGFL}b_9)L|WXl~~ zwZ~8VrtE#$YGF$t;A+I6wwpPTxD30=<)l{zigk-&t{ffn9Q|2?xQ!BAAbXP#>?oz zP{IlkIoK~mi(%sGEPg&%t)JL0%xkvcs$*Vh;qFIw?rv_~`R>dFIHFpiCtK6OppZw% z97Xt1Gb67%@$9SL{ABYyh2P?BTM6B~`~A&_-}$(*+CC{|dN1C1)3!qL97l&YeHbX( zWX9t5-MgF1x9-Lh$(Qtor!QV^o~1i8NS-uv0B6&N@c8VRXoB~pZST)!sGdK`C@fOs z?D;b-Wd3M#t`Mcum1(;nj%gVmM(fwn2cP3%GX)=H5MHhAFI#)?_H~>16mj5i2I8x7}+40P%x#G=!kLZOk(h7THEwl3lb_ztZ3Y}IZ-?x z1?{Lty)0Hsr=7W-h##*Fk|+WYH=zdIKpbU)H)aHF3_?j-o~WuN6ZF)W9pi=8TerCI z4vgcKlNLA$ow4c@)P$ns2Bza^NdbjVc$)eXf~HpqsD3b#0Hp-oHa=;c&gnt&XIuyTS*g&vM)J&++F#WvyCGwFu5RcZkY zud$tXMwzKqU|))UWmFC$ACDM-d%4=LNcbGyh`B0r4P17CIN03#`pGO_+le3#0uP3!qqFq_ z23vK2@jyQaE?!B(m9?gnQ?t7*UC5Zpbw&nlaGY4oj(Uoyds9Qu9VU4 zdyle>Fntdo$lJ9B%*v`f=nA5;m#+8t*R@)??opW1AiXw>LKg7{P34^dIv!Kjy71b1 z?-_ouC!d`g?9(^>rSj&$jov#(*zAn76t3y(JQT&c?k$p7qu7o^ zB76q;AZPfg^1(WNqnmf|JF;X!8!AFMTKNqZaJKrj-`oGmvPI9ZGU+Bm?Kj|CBwkCZ zx1KiSTjh;X7!4MqFL|-Uymr?!DhB^AJj}B#t;{kRysg>NX8bqu0!<;ZXNuMd1g(I7 zwPDeagXGLsMpClG8*3LG_)K_2^TYO$=2=}k#;v1P_fDZTBSF*g%Q6(sPk2VqSC-H_ zH0BVI*X(Q30s=!lB`;tCAL3!lTk=xhIh{OR;TZZ!Z)ybDK}HN_`=ifr;>_4U=7k5w ziMkzLKyGmgjIv!Hvv(o-F5b=qOlgBhJp+y+vbHSRP~5uj6O>aY#n+lIu#e2F%{Hc2 zhS=o6MD@%Gh1EUYTTaNNJez-@s${?@FXUCIU=TY)$^MuNPuR5j-_^CMX z!Z**S80$5g3^$Cz#ouKZ*h^L)S~y=0EM?Um`NEZUBg6X{%l^QRqay^<$YaqWjkyzw z6Z6>1^=$nZUF^1IHtJ-38%zq*Ry$DPXOC*a&}8JK@ZuB~FflMG-NT>3L8ebwkX?b2 zv(+@SwIiRRi%{Gvms@BWkMh_G$vHeIL@1B*eql0uwMS@*wFTnQ(Ws#(<+szgk~3y8 z@W%OeT{(ZIbpv_gdH*TrZ|zZ8ILh-vVeC08Y)80;_jczw89Z+t*IIkUVZTl(Jlhzu zm(r(^W&vh1{Td_GiOlDDe;b^XR{K&K1L+S8+5icEaa(-^|6DA+cAjoMgK&}8`c7?n zbg)aruTniH%3XA$i| z9&LEPRr>Q86K6UJ%lK$~*)N34(%EMn!e9-m!-mAZbEF;CSMKEf)<2I9??3)u{@G@; zVgcT6zW(O^=0auTKL=P2T#(p(oGp%{XH1*truQ zMB~;>WB`5j%@-Xoq%&?>A97=J;nsIH?E}@A5pO(-2d{TfL3Q2j$l{~MlUF$phkxUP z*9^1M9Zh{E1-)I+$gxY`X@=qIouT=Qc2YV#c(U2Q`@LwusgV&IU!c@izx~zbC?n@o zhUO<7D}B4YVIQ=6QQGL{LHzb42c<|AI5`;~o{C>ywZG}j^Z+4C4<0Y-oIGo#S9(2uyUvVI$=F#1nvyVbrgf+`-nb+MxB2Qn_vM1exg9h zqeBlKYW4E8)`63|@)^J>&lC{XcnZTc<&M%%kn+IIqRg!ULztS5qOABwILsjl!LagI zcK--(LTxDr6%G&Jsr~Uo@YSB`AHyO6NRjXg{AHgnUQz9pZw4FhYU9pGKoi1P#xXG9 zvfutGYhd%P0n)QkL?%tCdQT9%J0?^XUEu*<-X@gcGg5RBEC6a?A38-B%4K=_zj+%z z7z#SqdpzK2bu0sw;>MUuC@3JM@tX&aJD#~yNs6t*qqc3U*l-wsmd{Q-6O$SoPR5&c zEqQfLBp9`g;9>Ai+Y-8`5~@gj|K9Tkk_MqVt!=ujzp+4x6+es3GPDoSEm)mkx8gJ# zka{nk^2b$R)9cIn!JO|-4%E*5{dl`PLxnQ9<0V0Z=K4U=LixvjpwZOV$IR<$3qxDUDWW zzuD3YdEz)64iBssX{-u|luj}a>}!UgG_Q^BW`LDWB-0 z90oWAXJ*RrS#l0Y^AZd64ju;1Zj|S@2eou~c0GVYl8jFPlQr7(`t^%}>wFuV%p%(GLs25< z&AQsb=T*DQD9a*YIDgly?FkA|@Si++wppUKpxkL%~Wnn*dYk|B(NhxZG`NWZz9!Eoco&G2z@PR!!W zWW=7^&tqFihn;?Sd3WPC`Y~!amXJq%vnC>(8k@+)*OfGoacGEpA{+tN3UKy&1rYJnU`i=!n?5A$8E*= z^zqZ-OY}1H_bRXc%NF3C=-umN{LK$LuGdaL$%dED9<_=7?`EdMZzriWY9tdIYoqJ^ z-~N2_&F_95YQl|>n%a3LoE%r?QGz;_8-u?U&}}=)u&W=0&)3giZ+`Ricj<>2V)|N+ zc41q4MaaC%Vel&5;q_5rV@J^|gK}ma$)VbLnW6E)-Nqetbeu3{rjWC&%gwt&kB*O@ z4KJH*GONIu^FD*?W~U&$a|&Da5e1Jj4qk4S`l>!(@+ z+-$?yEA^Ml`+sRV%flx>+x+C`|0-e|yjjytf%5Q&gz5AnhsE&&T+Zfw7yxRwqhejW>DqkibZx90O6z!4@?g(-1OaDo09UPy z>9+a8#CVE&Cf|GR9ci?=kIVCEU-)pm>g*~}c7lB1q3~|gQxZnWuD*eT44(mvaIUb* zFz}g*tV*RvKX4gb`x}E*@VsJ7lr&fwH6`smI5c9sXah4Lh~~??M@cpsGG=rbqq@O= z!4H21%wQyRD{B^4^*sYJ_=0DqL{m_BC*e@r#GQ_D)1X4=0njcH+A)}{4ug`q7;4i3 zpw<8i3S2B>Z*ldB=#HmMmA!ubsC;$qXzMySkvAb>Vh~*6?69!5O1pBU(4%JukYR(a|0Y`l3;qmuob)8TBl?cLA> z0c$UsW-j|RM${mo`_Qey{Ta+oc|&&Ds&f?4=!Y(22uOw`Q|yux^A3HdFeEpScU3GU zB?47%LwQC61Nk_CJgA3{! z+jtDE4fsA)F^0g!7G#a_k<2>IV}mz%*YKGBIUQrrXUcfp`^DMAdpsdD@wwx5&!#k-i4K&- z8H}nnyJ_${G%{<_vlVUtrpkyWo~;%oPr|#p642kuR6H9;O(=vsWI$B~%Wfg%*7Y^BBdSp^}m@-s#GrSe=NU;>0mN$YkMh z=uUQs;Wes!OVfDN?q8${zBe0$R>SY%E#{f}(7>83G}4w+=u#fqoU-%#Yc@?PePr?2^hZt|a(>=ayn2(vXg0jC?GIq|TtE7DT!9;~3B~TQ+|AWIwvKswR-I4f<8iBgnYt>HeD=c-vuu|^dHT{>!KLO zh#+rz_N+yzbQNp5PS|+mT1IgOB3vBifj2f>$-93eWq2k9sx~zQ1}*(>8NtS@OBeMQ z{KvbW^R;p5VTOW*wU=6adgErZ0~tIwFI>x+)y_FjzK+)IOW!U!2OM1)Oo(=+1-jM( z2%FlY7&m*cU$_;ADaY5@lwF4H(;RW~FftC{t1&;>GvKaiZg}1NTb%>s^ zmm@+MZ~N`~yyKA{b)wiTvhFz}kn@r_KOtfu8r=QllMKiUn>We!XUQ#N>RvP0X9_X; z?xnk%yPviW;~3AimLfiRPpf% zBw{8foY9@E|JeI){=5J7$9t!rZ2r~1`M*YR)*pt62-z9T0(N*Trk>vkAaaesT0*V& zQFy#S8WaP0KL>H8=u^%p|Cov(7FuLy6CTr@VoBkgL9s+5lq00FDRIU*h6SJ@E38&f z$c>Q=m=vX%eZbryehs`^PM5qqI-$YK6f@z0QPihhiZjJT2ri++i&dJ6z(^B&-C$vL zw@zjXRay!B8wDDSD)P5N32W0&bLP4FtBZmt4a849@&-p=Wec7@J9u`At@0?C%A7%> zdJLkN9^ky7E}avuMz~cV#b4gtwG+6rZ*e#})q*@~7<_2cQV8I)77fz{;{c`}&-$wT zSp++Uc0b?nu*GAE#99j1A^2AQUiVyTzZZ_-IWb>43OdD~zyt$)kAeseaG~>{h$n9C zM~r-g>FPPbJo!u3bL9?72jC#{7F-P7aVtk(ZFw>C*@K>kM1KWg}0mI{KZ|1>CQr93q@K={I zN9o8@g_mEpiva!{zN>!t)uvf&j*8&xxoLQ9t1-iG79zuiQFGeYD=i<|1Ougl5_w$T zT>Uc~=%<6bA-L327lgTA*ZhFN;W^27ytzv=3YuiW0#@HH?}6CEQT+YZH;4W}uS`kh zS)ohYwBEp5yt%fi6K2BTKw&>7;`mUz4B3z z@Y~>EmmX7YFI0dV$(1({yXhk9c1qWJ;>GGq+Fpvm%X(LJmAkU8K7wHfZf0#T1j1@o|lgcNq-j5nEg$wL7$kal$xb;EwF=rP&?0H5Qmv z9mQrV)5!h_sjIAR!{5T`#KTtm8YfmOybB-y^ud1_J+@~A%*l`Mq8)<1U!i zY{23y=f%;IMGhNB1@l3+WK@W3lq(ys>t9xmFCi{MHpb&l#l5t9f@F z044nN|K;k=w)9$(!@hq23N>J!4i3)TvnVdIMNRfv*5u(8eB&qZ3wZfPH@eVc_|Cra z@;gYjWs5RJnW9KhY&N@3pVMc;JQSuX0OkKLvc4vlH&9jIw|DN$$cV_uh{(vuo%ooM zq96Os;q;49gXNT!kUj#-p^IHe&M7055Cw>i^pI&Q^9-|)lhl=J=Cyp+Q1z_B{HdDAMw ztB2oio;`dP-dkZww!dt?HD_FE9|yjQ;u$Ed0u(CNm(BfezS`XV`m4<^{^ZX#-#+-R z`B;?qS_OLi?fuQ&FTU9v6&7UgA0E5b>cOXa;`A(p>&52Z z{K3Bp;Vp}_L{HG2MDXS1HMtm`UB&Pv4?{={7r_md5mGF+L3lv-grLHJl%AKpFk`hE zMrmUqx2&%XjbnC8@6xkr<9x`o$B{2)jEk7?Ol3n;@6?|E5z)`ADwq%{aJ6q@odcG7 zUtMDztEn=h=q0u&P{IcQwXKJsqWY#ACG2@?OjN{hlN|0(%e-pUe9R*6n0q z)6Yqh*C-1E6*?f$=|wh@jQt&4cAL^eNR2{lg6SVz&mM`MhpN-bECH^3%U>gG`aFhQ zt$TNjjrvh>PX-3d?{#j3@!P_R5HcRGeUn3dif^dpXpzw?L9k-mTZ#*azX7%ek z8lJ11mQ0isaiA`hE!o?Up3iiS2PJ%MH(224mA@2as3B%99AX10%1`$%nA!9tNL8xzI%uNT-hw)*> zWGTew&S_ts>z=`ReP7Gz;Z&bb>ClJ1*Qeg|8I64MdH6&-zCj_BAkgWb!4RAdt)uBg zT=bUIz^wUj=_NHyta~_}9M*{1U6T!bZR_V5Grngp5T&Rmv*|0?#1A{9Wj8O0O;=)Y zZQ^s-4qBKfmlAh&CLx=ElkW4rb|x3LF&WQ@6Si6PWkT2_PX?ezjMvyMgdus<-NBu4 zHv68y5K7U0LQcX18Z7?BtCXa*>PaTyBY4L4uEov$?miSQS-ReLW0WW5dTEq1<@`Iv z)rw)SAJsPs%hWm)7#N1nDhuUs3NEm!!^DpYGY;G%Yr3aDTSbtc{e0t)64V>jKBHjZ zvYN|K*l)5FDg-}p>Qd(!BfB;sPY0T25=p z3*NNjrDO2ugi{Wxoz4nnAC)xrh2iF)&@ixV2(Z4oK-1TCG^DJIniZ}eC;lfj0={uPg zoq`2#2(Pj~Z7aiPj3Htl&|!=Yyb#^21WcQH7cT9?LWVa0@BdCX(g`7w4satIae!5g zsNFNPWzaf#o}sJHJ#Wl`v0^=^0}P<+?~Gq!0Z%Oc->#afYw%ouhR1>tUe;W9`m{0D ztY3eJ<14rLdpTozKjOUwPjt{OrRbOps=O=e4o`2#h5Z+tJ!@|1vz{eucb&|8wKV62 z#Ox&}#N`X?Lo>3_#z|pEmy@F(-uW#4+s*6yG|%q+g%{(Y{y%9Wrm=J}h3z2kuNd>` zZ}c)YIB2#qK+OI)6r+U@xp&E+F>33NY)Ne|cOeYG$R#&g&nIVCJm zI!y5GrRbs^x4Y^!n}g;O&Lv?o_Fl@+WIraO zR+t`Y%&JJAtw7UZgxHuH_R4`z7kE)516hiljfvwt&A<4IKj{;>()V;8n|0sbzq@%@ zJtu`6&Hm1JDW&OG;?CdY{K92d8+0#UwK4ksuQtED*CAKk`?e?xhQ&wMFK#~i=+hZD z5;nX_XW&$Dj=}MhmlMuuKj+Mg@u^TI&Wg7$p2QI6=Tzs%85rMwTi8z=axt1Zr0ZRZ zKi1vy&aq5K&8ZY-WaBryqS*s8@6f@4LGPoA;7|_UFpHbl8fu4a8ULyVb2{ zt)x82(6N_Ri0BizXzU)h2jY4<#F=ZCH!qX57jJ&Bx%cb8oQ=scHl6G@_7AR>!nM_} zaOlMA&BcFy_)mBK@8ADF3#xiP8p~nW;IlHbJF{nooHlX!#XJG)W_aUlDMy# z)7gt;8Uc**aWUS-WqqA`X(uCDxz25_-p zf_&boGkp(7;u+@MG`3g8U8IGs{E~f6s)dM|rIZKw1ou|gSSPgk8habJ&|p-o50l_L z3ul%9%oKH2ZZEx>89`rrk+cn&^Un0K8Y0ucLkh3C&Ao+hqktwq359zVF5$z>HCHg2 zQOzeQjpVVBn5(E&8l}IpORCPS7RkPHUeWww2SL*YU{Gk_R9}Pa34@(on7a?I-mGvM)M?>ndqf z6gSUWZ0A-|THG7B58CL!g7aNAfH|bNU z7vKBzn`?N|jru?2YP_F>uVsCn@@w_F?3RVQ?KSidr`*KaK29USUo9dTjNJYEs!0%t4$e+5^*Yz^8PwJ~fQja+Jm z6Ts_~#8=H&WE(B*ur=3lWmZlP)G=eun5yq{Jm>Ut)iZLjvH;!Uab8@%=03~wuaK$S z>DO?CFIZ)fZ*WT%PJ!OXDTbi+XO`&{iDE&~-HP`<9(3Qvt@F{;j&iui59Ft0PEW6YAOvaBFWE!EtT zSYxU-$#Odh$qYM3_j*3KiH_nb@o!a(oJK>PKWRgNdn^wwaDD4Rv@H)~>*+76JCZ_% zp4Uu;D?`qbA7|6p#BH!j&exx#JWw&$$Vu}Gm#@wXj&>8rk0)pPz{*dnsLugiJK-eF z(YOf}-7m%>?M0hpyq6-?~+0 zJ)bYVbSAB6Ezw4wZvSRXg{T$8Umt>ztCKv5K)ni}ew(v;sc(I!A=`Po;z`NK$)y~f zSm79jYj`&Ffv>Rv|Jt0f6p-dG<7T^TzzQbc?FfF5qWXH2{;&6(v>V3DW#2i94# z?lazSRdeH=ZWg>-{HZS-o8R@wvxz0_eIBG=eg4bhKOUt)a*LaLv;5*QM+INbMKjCj zC&>s|7~rJF57W@_mz}xrME;NUo(zEX}o+)8;%Ki$Ba9hCmhTp9_T{v@J^i6*Vm1OSB>S@WoohX zP7FAOO#Ctn&*-EV@%GAZ`lR`K;&^SuXydQgtKDUGmmDkbfF#VancI{?$ zHAmZc^2IN|n6Ya9zf^qA@BH>pH+MelumkcgIQO#6 zm;(yx$GNP_bK&99FaQ0_t7mdk-%2+)_jmrm-}`5Y>5xL%i}?#Sr$klv{GGTM6Kvnb z@f^n((l&6}MCp)7Vd6p{ttj#ss9>lRN6IPJnRf{{0o^7a6FPCoEkRJs5<$-i8S66B zIHvBOf#^BRP%bB!6L1NE=Dk5CB(Il7zD;|ck7!Y@^44?h`T>pp5i*EQXpIrmeG~ZX zL{VSwx%6MK*c?S%d(7~0yH>mZ3s$fMYjskZC$<136BZQFp3VRUk4bS|Cd`zw2qjRP z`$<1mKD%&R-4hp4J(L$0dF*|7)Tx;oVx|SiGFtSt-u8O^KYg~H2z}RIeV;(yN?H>o zXt$U42j6bYZTM4P!ibQFX0yx_uFyn%E`k(p2^yC+C@&@&!V4Xhne-ANdeJ-p`qQ z1H-rzTZA76m4P=O^j<&Fvns2u@(cb+rEuB}jcxI{#+H7&*Pp+3K@Zxwy~_LCx50;} z+F6s1hLyo5p0CM0+V^#V;8~3CwfVKm&x96#nMk?Zj0R)KOD1ygbD8UxO&bwxcom(E zNw8@I-*5_SafeT7WwZPh4|?C4KKJFMl&SorfT;qC6aHW0Lxstx(|)b@$+)G=i~>_# z@S-c*_VGQ1YZO%@8qfd#uEnG9yo~s@++y^*S9v&`daF4&)i6eZx_ai$D2U_>qXo>p z=T*OES+CDCR$~;3-%_^i806YB$&|vX2^9|yt>VS~Cat|(hF3EJC|E8_BjhqUO1a-@ z$F7)*t*hC(xA(>QI2K!~Iba_2{%-BS4!Dd_vXQKrxYD6Z&s3iIBPIzC7z3m51e@{s zD*lFR_#XqPd+>CUQ=x%5>;3!s6CT3Cj1%-=AYCf$i9Cy!xLo5UDP!;l8X9-XQIynk zP+Zjc_~c^!ztB>J)HV#J-SEP_c%l9%I%6%6xvOVst~+|^|I}5TMiJRR^w35>lzZ@x z?Cj3gK(O?pVpy1~582zjTOWPyM}5Or_~n!brma5X1Md#s^l9F6ulKbD#sLl#3iiyq z^ABLB@A&kZyz1F+=CtZx^(@)ynG3G<&Da8$dVQZ|>FLjU#t8#@{GrV?W|F(z-Qu4k zYm}&En@rhRHeL`;``R>z8CMi0%6F~B6HcJ&xZRcxuXa!bW%zlH7ES<)lx0^3z2FxB z;$Ja@Hbn^3zuf6nYf}TGNI!JK7*9v2J*jV8>YYdNiI~Ej3<={CPfnuzlv}4?eb_G0 z^YM;Y6{kCiT{4bd<@UvQmL9}y;&V!*)I&3OCC9}=GDPflcD(&@JBgoh4i&%hCYSH? zVn2olQf|kI2+v*I_aQUoxhxdbKlxvy3FF%#1=7=LZ|2kBr^N3awA4_Ne1|JAgf3?k zQT9Lm__IIcA&)_PqYe;(%^fGn}pjhfP8E?>!uz z!$(hZ-A~^2=ykP?3i>a@%G0OCW~E#o*5`*09#>DNz2^SsgmrW@y5U!K%h|eHZc&$z z6(^7JI4~Y=6|Z$MzPR9+=K8FDbh{b+c;=Kv>IWHEKTGQW$wwb2r|inkQCVFM3HbE; z-!C@oN~wQJZj$c-&Ni()i zNyg+$Sva4Ndy|)=uLE|K2XQdL;3@eG zgAuIVGuSEE5W-Vr=fo(Cgh19X!(cWLB;2<`g|H%2+sEC#{?lGz>Kp;24y;!?aR~*t(PR1ape%@d>@;*VR z7ApavcEEf0?l8Qxo>&|DKY8_P(=mo{gs6V@>NBMfEbi}Stx@ksD2CXj&_t8)wH4qd zvyj|(mGmBA4B^pb{sb67rfv1|(0btW40cUd_Xuft5BkYT=6)UDwljZQ7dK&1BI4_T z3nQKkAH)mQOQDTHPA9Gv=dtx>QJA!&uRQS6T6GN``%B@c^euXY&H7S>RTpx#IsIge z#5XmQRy+MTlV^w5_(g-uVABTg4}PRP$Jz<0HQ5bpr#PD(X92yPcm_WFQq?OzXRx|X z@nXle-M=$BR2r`A8Xoe&6=7H(O;*wYSf`)Ys}cUzm*8pUKBmYA^PAVL1~dp?WyzaB z&NZOXlt<-U>w=D&etmS9wYISM*Wr z6lJHR3!YU-kHAJz-i{M5^WYAD_u^m_oqHLLvGnYchs$O_lj7oyzQ+@)o|U`2LXPLO zBH$#OSB*z}U?Ml(_5WR;@ht$i%ALNG6SFQJZjMW*1QnDX z_ZY3(7)5_3v&HM~|MiPBIAc|%iq}}G{$St6oQ;B0ooMR$DiZ|Ss-jT0W-OC~n5X#!A@MXtAFQ+4D_Y}-?`+4@dX_$U&#tU3(2`+)N z!S*`1$*qRc&=~J&uiL?RO3S{ic*WpWo(xhxc=gFO{LvkA(ri9y?lX4K5#F$XAAB`N zyqCIW!;`w|@3f2d!P6^Kr!svUS!u-{tqp>PTyR%c&z5|gW`Zm32|ws@Tv;nc4Dq3X z@z8kN%F`;EHVtI)J&ee0eM1d(b%#=TEX~=h44lq)@qiUyvM4|cN)NW=WFtRCOs?Vd z741h93hy|t`VMD(TjNsS>J9?{yv7II$=>ihuQO-ZFW-DV3Z^$39izNb%;24aj)FNd zruiGR>NhsRU~wSnEh%hh%4i*#6F-^z@rR>Yua}c}jJoD_25Vfpc@bY5D`H4tUph5O z+@GY>f_3-!VoL4h&Gk;fVt5P+@^-(-Q$I1H!EnCubuJz#`Ys};?A`yizrj^`p6yv- ziH0xgr^9R>-1}~GIYrYPBhTcEa+Z?EKmYu5%*_8Arin6M$z`jD zDbuY=T+Arb?;F?OpOw0ituIU2*Y4N9`15~1r+rPvhm5$7E__mdY@m2NbpP`6U-oW{ zQ=9U(qBkY{Z~WxP!M)X!h(zPa>hO!rvrf!9F71(+J}I~GD<%4RKV*(Fj|;@#X+y?= zoyr%beY&3U@MLqn^iM0wqcPtmiOa=8egD?q$^eLOYvDng4Zi#Gi^*}yG2p0Uxa44X z8-1MI_2lW%ymv7gJ4uw|if(tWT%b3jIm60o!Z+XCop*oxw|_Syv<(fx`Q@*_oYU&0 zP`Z+l_M&lpF+L)bPlA2Fn4hm(C3u`+120xpcK6!--5mJAC&e7agU?EDb@}#A2Mu(% zLNM(R(2%1C0>mhX2Hhx6eSY~ILnr84LNS3z=L2cHIvCoLxsVY9g^>VI_m>i$g=GTqWKMm0H8NO&zN@8gYY@EClvble~jIBY=AfA#R_bSX1%_L)Lv$$2KcET7p) z+&zMBE9htPpJ7`k$+)VF_A`kZ0c;9`J0N0I%Y==m`oUWZ29H%MKexktIt(vdi1Qyefk|z?s|%p)t`*RpP!0v3X;FhhG;# zNFdE_&)T7Iue1_c%;;$%{HsZ!j(07OaCO0B?@V}7Na)q-ua4INnp^^SXQ#jGT z{`P-9D=?lbw>0t^9p>mL?0xto8XKAn8Ztu=FUW~bN&YeHc zgAqKV;2b6V>^93ph7ii!d*uh_zf#L3uka@qy;J4I2MbW=&6u#k~?$(aP=|_c#8DCa&JdJ>X0#D_dlTDernaTh(c&?5yh5CHR)pariu(8<#Vd zP8C#$9$D3Ulq%0$gX7+t&m#cfZ^>44oBC=Os`Uvy@v?C^xEcZ)zZ6F!67NUr27E16 zK3UOc$+C@J#=u_vap|)(6;31q&xJhu`%QR4h8$f@)}d=qjQ5-uow5UGLf(#3=7(0* zyh1Be^y}~1CEQpFFO+Iyc;sQc{o+X4rl2H?rp?lP|KLF=#7yidd{D@6*l!$EbNK4_$di@R_j$_9i#(Rk3YC}@&zv6T%RlQhAl_n% zy1DDZX*iI4g677B|JsTbNxN*L%?Uh&9idMvk7s@6)CmaVYJTusytn0Nv{M(j$$prD zJ95u_Hhj=`V}?A@?_M5S4nHE8I_jN&XVItUL#2^Jcn_11vB~~{w(sMtSoh<|F+!?! zYV)?waNo-dFYyccMQhg@9`uZ}NqcMFH6GTua(IrD4i_&fTHO_#fc|i-Kgexa!`wCF zy&~u0E7k~Fy-JC+lBG|h;PyU)l!w$WDd3w+<;%?5drA730@W|30ODI7)@O%#;qSH?$?L%6kQy1$ z3PQ5xAh|?=ypqyu&Oti|0|?1LUu`i;9Pj(A4G5e%JLPV?l+nSreOIm=7>*8hR4-_tL`7My<6s2!dHGH@mf zMo(pg&@A`}vGq>XHYgK%@Uy@icmSq8brFD=%%mDOEx2P&19HN0wAJKc@|26qj4~*# z4VQ2ca8LhhBov&sPszzEnQ-y~Sf}rlgx(9n!POW|HSL)`_cGuHE)(l01ToSU2O2=r z@v!s;K{H_n>KKp~EUP@Dktr?x6LN<{shOLzRM{h;fo!u^RF)7E--;YHg#aD*sfCd6w(GWvVIWqy;g zlhoEO@o3rNxOkH%3iRI=dFQw)cXY5TN7A{AVc+C z#_xE~g4uV=oG-$KKA)5Gv-h|A0tTJ#Db^D%T`tNDTV?HGw_)fw)?191$%jHclm}ctez8F(0jw3G^dg^C@wF#Aq6L1l3+!!*zwx5~!{5QX!^_n79-sDmDSG;D{3$7xobod}S8EcDX54qr z*fEZD8qQ`+)kdF2(V6dkn~TAzEXBJqs*O>?dbq{4I=VZ&Z%mFc-#zvD=NhA?GCai^ zHaVRS5F|y)iBZ-q9`|nGWg!SWs=SyN!^cdPdGxdg24ibZBgzAJu$R&hogANT4jHd^ zu)smX2sexuUh6Z8tC!-t7fkaChaRFf)|raewvIMiG>;Dy-Zo65N2C6 zhIw-{7^)9EXl48w4@9q1qAs+eX376qVFgyY_b(I%(4=Tiva&}`2{nM1MrKOgNfUAW zLk34{F~*ktM-hl-&#h)BKO9y=PI_nKtsyTz}i^W7C9zQ=j-w|yJ5bWmAb|_ zZ>;?QYY$NSO)>~(r8Ss8`}EU_lhyq$gNo3KnaANap3qOJdl+}>We8mEB&?4=`e<+g z2V}hPlr`U5wJiC4@8KDzV?8dF&V|BD?tJiZVI3VJ5N`G3s2#gjIjyG1-a=`!AC0lj z+bc8?hLrU$z$PXhj|fqLZ=q1TjrGgdZx+t;aT`Q$)|bY7;XPKve(|$E*?gBGisB}e_d;Wt@Kb*cw*YvE_k(U`` z=1L%1QFxjB;@FMs{j=4l6P8RMUQ|C634CwgaS-`KkU-8VV% zntQIj+We$cP}d7N^4uW`-+cY6%~xNxV>(0NFr(qdjp*J#d0@%l|N0;Odk{|# zwocpabBr|O1Zp?uFX21~HDJif4c(%z78h)5oFD@ppUF1fqkL0jrSCW`9qkw2 z<3Bu($qx@1ON|Nd#D91}oZon4qluW$bNh0hCBU0J#cT559PT_z03M7%Agn|B32~eb zwXm`2sFSp$Ni*;%9y`Y?e%jd7$8&`Oj&T%X&Y!Vj5zd=9Y(nI5Lr(M=%JmFM@zJm- z+~B#&bVnmocFP09Q}w+Xf+r=9%)qbxhuWbOzU)2T*$MdzPTxnNsjStn6QN&0ApB#- zx$^yx9q+Ofc(L5)fwkcmG_=m4L7yXCZR(TyqD^04mLjc6!lPqn+x*)rLK7+al$IIa zg@T-K|C5QYxASzZGu(%tQvSBaE&f!eGVn2c8*Pny<6soBkfe?2Yab{M@VdRMxDL;S z;}LogjCa%>+>{~x@EPXdYLr4P`(K^tpe+i!`q6}=U@3+{N2c|U97@~@z>r_72!k_UeY;>;_Bs}yNGD;atCQf}^@{zKlclzD&nXOh~ z9;?Cl4L|iH9C4ELy$Ms!KKlfO#$3Hzta0x3s-vS0FFq?2j#HHXHw~HRrDt&DK9B{$zs6#9RN!q&s1?+A=>=AE@c9sb8q=e`R`0+ToTHQn7n)mbpLVJUt}yO4 z+;wZ%1byx3_u{F61O6$$0n9Udt;G8Dj3aT*(S>}Imgjb!d~%ZRagywpg2rd9+hf6D zz#v>9a69qEh3YDIXYXBzPj93MDl=iPz5DP{p6PmYoYESj zScJywSM@cVFi0+5C{JW*kvMboU0NA@Z&l7_+er8(T!&UQVk|KKy9n`{5^n+<0s5 zN(MM``LOVy0lYp-$!F|-@X06h?(2*K5figY(`jHFJ`RpB=Qo094DK~&Tz6;6 zysT*RDno=3@gf{EmhiSj0Zvu})8j&izWw%_=G68 zFl$DqVgKIkTfr7TXUv&5CL*PZW?qnQj|B`>j%a+>#3{aOvIGTySV~^8*|=C4lfrnF zdz}OjR-uvL8i6+6<32B@H(AGUF~kVX+Uc#3y9mR>XDQymAs{e^zco8oVqt`WiI+e? zbY2<*#J~BEn}dUrQmMJ8lk-!#;kqhJe4LsNQ@nmQQ6>aj#MF46D#OD#LOli~*k=P& z&sF6bLDpaIc-?o5v)4e2S7+dj0noefBBqX#NxrF%pfgArS|&s%hpbFRt4_)M&n0NR zjFC;EbNWbi!)48OufF!fvy}c3t|>6p9oHf}KvsUkQ)7kl1o-e$fx!(i)VJ>Sq@NMM zr}#i`b=6?Bw=9R|y%&yELGj^jb0jk%;eC5M7LMS4(Z}}*tr$XeuRh@qp(r7L@JxWZ zj~-)yl@E4TuG(-)ppD@+3}F%gv0<0mK%oR8#by+*5S;L0Y$>(nk40f9sZR;67@>Mn ztmErmn|3HmE<%=vBVa~&Zw;k^rCQfYL%7CI!8e9Y2nn+A6^^S9TzJnOjPvzzRyv}S zi4sna+RM@u-DH3tgH)fQpGndfbCjvD7L~2@&w26fA7J<`573)V4LQk^WqJ1MjrVG_ zx@yx|;b#*VJP2^Q+i}5fg!9#({XG7Zxc$6SyF22w+l1CcadfWb(Uv-Se&j109;!x3 zi{~JLGPCTCauA&L(F9>!!t#WUAw!SsF)$XuVUoZve$Wd)jFQ~@Ja3}};05JF_hb>N zjw%juNObZQ&mi zzvKzCuVQM{HvS2^icV;Qeox2!g`e=;dwr{ai#NgTf=6Z6rCeov4aYOS`pk$nN)!`A z?=NLNXsUaKq*#UnpAE1Sy2-{=x$}ix^>vgQO@o7@VlTOWu?g;S-uQ|Q+|o>u6%^(D zgEl^ff3oQ;*@iZiMyo16R z-qt60VkC#;Xj-1VLP@^)_Cc`8=8{~AANE78e5JA_k$YqW#f-b^ z(steKUSoj*8$QScm(|6gM}P4`zlM(~ZAGrtR&9*Oy_zF}I)yFb3)$#qHL=fwkdR`& zu&;;`Jp8gi(s=zt`{Y1$jYUQoItM25Kc2!%>h_OZo7}k3+R509&x{>#rsP*vU(I7n z&aJ=3zl*{}QGc7fkex;Rv5*DzY@K;EI_<(0_-EcBL94@fCkVm2!oezX%J)#WWx{EWOZPt=U6W-wqvVJfyE}vc9(Q z`zs+*^_y{iE2Yl<2Fl=tHseO^kwYu~yp6jllHQRaZdQ`xFR}gJpZ%7FH|{l-?Ikd8 z%8cVjxDnb~@<$u8h0^HmBuS$LexO|xyz z$Z$y^e;r;fXE4||;1HGj8C>!<%aC*DgFBN<=U;3=o=#FYm@4H%f%vVppoc`>eDc5InHCctX-$WmMkCQN2LLm5~0q0c&i59IQ z6!LN)i;vNtF2ooh`(KCqM~y>9zxmqyCgkV+8&{UIjKj4trhfY;WSKkD_;rZGI0$O; z*)#hwGGc`1>^$AvxpQZ8CnIq$deb9>G=cZ~KmM)F2X{Uheh~_Fez$`N+DCA=m4L6m z`7YjSB_z6Xd=Mh{3hQ~DvGA}Bz*h3XApGcRVL#W)^ZCI?A4Iq2?P&FN#?t@(fBmag zjGm7n_F;Ya?2`|ZPwnUEMABxn;K?!A3eSTlZFtX-a<79NcBAF3Hge-rV-RmJXeASo z`RPTgQ#&VCSHkt*`v?EZ*N{}LJi98X;g<@@qb z8^pcf#s2c7i5;8I33I}f;!GK%!xDyzS6ou62Uc7ib)SV41@;O z5(Yj8NNuWXAnu(JG<_eL;4T4Cfl38uJ?i_k3ju_3-$psDg#X&)Jy=4R61M~%T&&U+ zOK@Rkug~NXvtq7MsKb}Zn2EE-*7M`BbRU`T>WO8u$aB5$0U zUqvl(!s%cHp1SWx{rbje11F%M{nnRR z`fU)50-|+71zy$P7e)%g*kWfaBtFdUs^Ezf@|HF zw(weYz=z9w(-#G{-@Av0Mu`vuwIEo}gJVs|#t8)0_xO3LtBMgseHlTVuo=s~H}K7* zEJlHYV4lc9`k8D!M(KrI8uORZ@B5WraOO4|P>|t1PGozqrk) znDMUhE@K$K7@yu7T2bzToi|t?Mg~mj^=rxwJjQkkT9fLG8`#ZhVi&gGx`snEgtk;U;Ep?@$AFFX+Pm-ik_DD-t?yuP+zC})t~9> zs%y|B+-9m#!@|K}_!Pf3jj0$<_3C(SXJxW`_?7`+64*;w_gzMxgMMV_u#(_XuR8FA zP4_lOf)5`pS(8E01a`SN<5>xaKc!b4xnDUeA-w2&8F3dfCaP?B(P*3rDjbt@?lT6+ zG^?*Suk5Cj<$bI#6LJ#{jx(z6bv&s3T6n<<#EolK#m?rLO+jnYei=P?;H&q%dzojp z{^17>i%B8ad*u3KnL?h4iN@=~i5P`woGb_*_}!$(^FPP+TFE2ZtQ2|HHL@~TIA+1AV%~Oei+N6-czKd?c<=7ln|t@aOTj%Hp5S~~^}gx-2a_eoe4y>?U`t_C2VU9A2n*?27y;Bo!`>g#Xw+~3%I z@3SA}gy^X0#{bK3bGOxlyWia#-u|%F4OxMH^ur&_rs;e4zbXvq>sjfsS8qkiobTX@ zJITg_c!}M5# zK*s?Dl!4r!4S6`;vSs2Zp&ycBcGf3ZLAY5RAWUWoGDt(V2EA+9#=t}b0(g{m_W=_l z{`FM_lfej(o)ONJf6n1GAcOE?zK5 z)=y1=5B*rECIRokwWyb60IhrT0>lsuTRbSsP0&O2oEgDjf(heq*y9Ul@ST`jxDRGH zR5x0SnFSjKY?Nkr>c2JukFFTp)zIG&N9vx+*{3<#?u#y%Dca_z6nS`?c^J-tkQp1Vx{F-k@K6v3Px8F#zfV z&t45q-_^BxIn7P~D3}x86RhfG>=AzYFdND!71bf;T#jh?gX>wYuiiz!U}?+|7!wa2 zkHd-ZpVx;r<>v`%k`M=|E^eNukE-)(9*zXr7Q^QvIf63-h*+x$~qZb?z`Y&42*g; zi4Necd#4;XNYTgNl|XP=Vu>51x{LoHx^|GCCMnWJV~SdB=Gt zJim1$wqCNI;X%$i=fDbx@%uJh5H;PHCHO#Zk~O!%7apJlp3=K==g~Rbt}FZpH{8h% zvgjRs!^7mzZ_XL|qiKMVq2?6*bCJ75wAF*x?aOdgeeW#3RUbo^l3{L``UBQ#F3MT$ zAS+Gx6ZSZmz^`Ps@=8^|_N#09QJ=`nHMUQAVilUQPTEvcq68Vwq;tx0fO;G=!Iy)5FPOlz1Ua z7o#hukNrn`rFOh@?ZeIW_uk)JN{JS+@VeE}7yUec(~49puH(F_PnS|~4-$$8#pBDr zPT2z|BS(fA?K{~D4={>HHsp+vY41s~>rXRGofD%kq62Q`NvG5skIzf-dFEBex;}oC zH+toszxUl2@dpoWtG37X?H{L5jb0!~2)tPjUZp^X*Y5a|P#Qic3yi zYem74$TtdElEvo2l~Qa*8wxmm=_r`2ibFFaqz$Zvd7UYi{@Y|iJRFb6&NM4&jn6T> z;tht?mFi~1okR<~WKMmVQ<*XEd=DvuDcv@mn-|a3SLr=pp3U*R|7vr))r*~j!oyO& zr82fU@vLy8Uw{4UR)vo?-}~WDXCt>Xqrd+8SDQci4}a7ifV;yF_UGOH`1ay8u9BHt&e~C^Ube*_Wy2nFW;VV+JVnw^jVb~JvNz=_^91ZB)xEB_JCLk z71AcuYRAC|joRg9CSSUV$wmQ5=9pj_&vR`j6#{rXwnbFMI*1&MXEP-SVoYGOp|-L>v>ZuzTQX&p@e(rz zs3C8$p(3!y|;c7B)uCM2v?(4T~;M1y95W=S?xZ#D4|5+^ndyUA-x|1MWsP} zt#7hxGS{Y^M6uvdc3@Y9RieFXMzfnp9mN(Ia->>OY0QMXd2A=-A z5|U%eSEe@geckUha6m}c^r-iF%Z2A``?bQ57Cw|(SM|(m+68~#tIGgVgkS=vudOlr zIQK#B5}S@DE(b8}w3>t;OrTAv!%SPjYGRq!gL^`b`ab(aWJF~K^>4F3dJ_o}`2;B!Jjf&pH}FoClkqsf*hhd#Z70zl<8)Yh9ee!-9Y!B#j_ zGD-%8JD8;#Wkm2~Y>%oO1)=u9?>NLcY;Mn9f-+*{9g=wgeBeG`nZqX!C&SRomPg^> z^^3hc81=CXMQ`)oUTeZoCqu##`Pn>2G8Kt^&vXb2JT`T%y~Rrov;di~Tz!Jw{>5K- z98Zh~FkEVXFr>fX55i_5-{RTR;H%`ql~46+!r7wBC?DvEko8A+&&I$SZ+)M3R$rAFn$|D!r(REW zV;l|s7y`9V`1g$0cgD|Y{S@6E4^J)Ss);D$ClNmQ8$%!vcY5!jF}$DGzRGH2DFs^N zbw+neHsi^);!hjUO3o|R&zvf_b9$~b9hOJ#)QbAT{uBA@9?r; zZS$HvE#iiszdqSK>oCS=9Rn*osK;ZhTulK{FWm7AF2e)PjhD4)Sh~q`DM92&#p+hd zdJd}bD#q`|*$TCwIA%Q7!St8T?Z9@)F0^fqsB?f0x7`aj6c7feehTq3uxH;>^;vvtI*R;R!@^bQ`p4A10-vAAfme4V)H z9ejaaQ*ePBo!npV)_8CZ{p)mZO?iU`Zf13&lsDd&-3yxuX#%gYFosL~Q4=ZBb<*d7 z&UjlgbUQ}RH22lgz%V@NwK_a}*nK>Drf@@V!pT-<&p58!;_GRuazMaOwHuEt9D+)n zeK)qc2}ipfcBPCylGS58^__xzSjfkjvlmj1g$6ZG)dy!JQOL&&-`HhhQc~~VyO-g5 zt^>Y=lEjbEh*H{i8NkF(wHco9JNjKnUhCgXHn!hh=oRJ5D&fyO3#@hEF~nQ z%zmbne+QFYY7^*Q%Cn4hC(%UM(ej$t)3@y})0KJjn`J&j4{Ukor|~1mX+QTJePv=#oN-B79#ZP zuf7=P)$QBwZ+`T>&&tRo$9;xc@LEmVZS`RsoYD8?OU|4IWV|nRCwd5Rk}dC6ad*;b zn!^tI{vyLG0EW-?O^EHwGFLik`N`w^dClulczBri{86Dq!a{G~yfqy8s(n1q+9$Ao z;q5w`A){o!I?0s7Ry7_z+}YgD3tyns=xB26pS_Tbt07K>qxL`i@qhTYn?L`L|EX1x z&JV~5ajBIQ2Yo&6OalAUKL6quo3DTMMe^g_=KT-e-~9BaKi%9e^o(A|VENfEeztj% zv+gLla44HkY_`);zyH-g`7bx0)%MBTG8F~wi-+ILs_Em5jwj9UuL_BJ+UD+8r8oWR zyD|-BRG!b!c&|1-`|y)7ppMgDjtaSaeDY|<$g|2cv2A|*{lAe*U9}N&cFTvjrjnQd zlgHo$8>SD)W{8kU2+jEEYu>vAK4CWparMk*DN0OLjDS=Z3y5cC zKj?kSv2n4v!#A@LL55%z1B?sk^TyuTLjR z@Xcfsy#}xGfcnN549GWmr@%0D>dn{j7SQATh12S09H=$mr!ONA;V&FapQ?`#_Fbk3 zg9{%SbXC+pwC_{!5>)<5J?EMJLYU{jxx!mjt#|r7OX>}rRS*34QVQc}CEz-pG||ij z=M3bbwTgNZlB~Ai^-&PJZGgIlmL*$$6HFH`;59q=qYfBqf6C}e-<9Q^JW1I(N$K2g zVlmE~C^^<#eWTpQl1)J2EgZi*DJw-OzYbFVrPGwk?AEP>OZA+ycOX28_2gYT+HKNH zxjWYcgI?E4M`mo897a*7-Z6?a(EYGE3e50BcpPP$1!{XzM#Z z(5D&M$rPUi{LNsUmtU1j0SfrU3^Msh6)1jZSH^Umc~UK~(ES%7OR5$I#D ziObYcw6S6IeViCc$_8SMkYI8yebn7&-}GE`QdLcI~WMx+1Ps$ zjcvrokGT5$cmbFEoe-zW8MkC+cSk1lXXrL_NB!S9mqVoYgB#q+Nf5b!4r({)O_X zOCL|iW^f7#!87QmT|f`4!QsA@o8QcnjkBpkWv6xe*pwX3;j6}LW9vAns=g~45zN2! z&_$-!%kIM`IH$q|b5Fp6Zg9T(q3+657dbM|Yv)BC?n}Z^;_)-><)EuLP)V9ghcJ!N zUb|L<(COxl+wpmIH3p^8&4>aZ?)ldj8|hUxhmrmqYJF$F&Xl% zy7dX3=pJ+xD z)d7d-zyL8WHO)(Z^JcIoqlEzN^aB~3f0SN%^)DCVBdw%48-?|ko+e!H?A!an&C%Jj zyv;LxWgG|_dG+FPl}Q{FG6eAhn*o#CYkYYS)ewkLVRAD8Q1 zl*pC#NbIIOKX_W|X7dwT_5XPrp}+p-tIf}U`SZ;OrTx6!#%nSHCnS_NPJDinhyF$> zN3Rshvl|Uxzj+odGqNsSZU){M~O4l~$K7anW@U1WBYy*EZWUw%>F)01B{pU7TA0KY+ z-@P9{(u>=u+&+ZQ+kpKnIq6IWo7{!&36XjmzD{N}vyjP*2K+CS^=5s$dc8EN7ytR; zKP@JxQAm65AAxMw;Yg0&T&6i$m=s9+!EIv+)PkTiv*uU=NiPzMW`$MFhD-ZOdyES( zJ}SYsb%=4pPFXPvxhUgcYhK(YWCQKYY%=TUNp*}Qhg+}4_2%+EoX$2Ayj_8}1NV|F zOS`i1AlC*L|FqdmSiPhe#l^^2Gj8n!FF3uub(;a1*~yJC%`yFysSX)3C^!v}84c<& z!pEHwjvVi1q!|Tvg&9#?-pZY83)tDDEiGv@g(@H1hXWLle0+9vku(HOuzJpo$^~Wq zAxKBt>Mxj1^5IXs04d>x>b9c|$w;f!CldA4N~o?Sa~IrO$&11+-$`CD8u9QI3x}Vj zxeT3yZ>%aaXP@W2e(9rkxjE2_2I+M(hPtdPve;JY2{l$Pc+@ks)Knc5X9dROO_FOZ zsi5PyiB#B+(arrbT4^s2--T~M{1Asm!^@Q&9C``ALu?lq2d<_7*&-8zAq%HlgPSlqA1TI`tNA=?$P)TJ0yywqa|Eqr! zE7Se$?95$E6099iVJ{x1X>eI`UA$?D|e&Z;tjfCZPiml`Q>QrT;d+|&BRVmuOzG%}Wz}s@( z79aR~mT=Idu?g>e>WAKg1H%P;_iz4=)-#x?9nY6$1V`kClXJN9QXO03>8;w6++B{q z(PX0E3+ym1*4n7hV)z|a!%>=c#%yqdK0PFe3#h;;}aXtqUBqz^sRad|G zBjd_}C&M4ndtxoBbLbqbi^hv50=$aCLv5LzPpKa*^)$IQk6D=YurmJQ7PG$#F0|uX z>%Dz$oG7jDHoU8L|SwSt@ zU+=DdEscts(Sxzt6$r2Ux$yUL{n{8lc zn$KjL2n%F19^Y>2m;)1neYeynWCGr2A>hB!1ncIAJbwo-@oeqCH77;m(bz}MaP=F* zpbU4idsi2}6rVG24Nq+6)&5Rv#zpP*Gkh|1=u?f74{96Q)*n2KQ?~L6j|^|4ai4LB z@{3b~w);L0+`T!xTmJ!Q-lXNes~_f<@s=3nJu|K~#&Tcd&|mM*?bEVmFsYZe_jq7> zZ)C!#@stmC~AkTMn= z6(`67Ab+^q=|_%(|XJ zJk2Uy{6mPHP0KH4CQVKlf%{9lUrR~ynqJHsG<1rlGQF(L8Sy~`tbn8P zOWLLSs3nVMtfA&zOJC<3!N%OLSSr+! ziyd49C)w!K%NOmeeb%Ok|M;^%Yl-91=Epz&amVF;vbk{eR^HLP$B(|9VERX;&w2Uc z$>#a{pEZW*1g|%@Kl|b4Z+`Z>n}6~@eD)`s&58xU*N-25yZQW=KifQt2rXhquj^TX z7e?;eqEYzee)C~@T3g0=Uaa8P?b?-+&oWEC zk^1p2_xk1{^P&feEHqi?V9fBa+^4UK%Cn5%1Mfj z@{&F)XXvA^e>Iz0cCt8P&N)%jcrn(r`Q%M<{Ggb$Oa2h4JMv`5)oX$r89Y zOiLAeohmjPc}9rTEaF!42(>jVyD>LCh(Q{62CrOyOfx%QC?PAsx`{7bCD7lt?ql*N z)VSZ4fe@eohyiVj#{4sw6D(fjHuZPnND@Nh5~(eU6viT~G3fm50D~DIJsX8M*wkx> z+bAgNB+Mep`GJ@4RDH}<8|yQ4gp~epd(F#25$tDTP=_WrU2;zRX_FNEP~bE#D}Cuj6zhK z-kpiLlFR5=%CJrlT5u<@b(KSG6Dr{nOLw1PXIh|0z`c5zlS}Aozc%VKAxUAEb531z zElE5BW5phXIP8%TG6K5#w(iYk7!bPkxB6WAW0y1IXAHDllFV7vazhc^1Cu^h*YHN~ zy=frerLzfi6NWy4cZ|hw$|WP9PkDU}pvvo0N?TZ&Z8<>gxk;wR*92J|%=PRK)DlhPc~?!2vIA=H8m@R6hCNNmk8YYQ8_1#2r4~1^LP{u1u8bm(Tqvu zqU-bH4jyRT@omQzU&$3E=4p1JHIa&uc$V=6UxGs&OT1ImXP^xXCLa@S%bch5)#Jaq z#!Wc*tvka$=mpnR0AAy@zD(>@xLG~({!)bDS6RFXr@n80;Bvt+s2Utlyb~D-*YNa` zoC()8+z@P%U&q*5COX$Ku(P9n>R`aEHsR4G2#N?C7)RVT)F%@X18MNv3t+Z%dJ_Li zt#STc>4Q$54!mTAAY=yniX~XPP{I70tM`337Rd}{>q$QlYfY!ct#EMuZP@iQ31`#(fXM+O`@Z95EfU`U_ zxDUV82M?4baT?^LNz`1XJ{Wfz7=@oKg%53^4_RB6qC?NZ>Dltzx<&?MNQ8-TRNzuUa9yHY(A-l+X^W?i^$sFq&?59mnTk{T> z;?r$k;aS=&1!p`2A3TD=4pHm{Mtotp9WM=zr$x`AWACGpv4M`uP;b{QV=p}H$Il!c z==Zo-h$}6JqQvt~yxQy3#ka?u;&iQ4X*$$W?Bx{7kMG=WY%Uowv z4BzPQB)-Ga8hul2luZ=B^*8=zc|KEaQ>uGhncxoRWbTdgxxf4TruY||Dr$D~>2KX0 z9*Ao;U!|!3$$$E@nGb*PX$C`1E1QV$5#wp)K3_%%=DAoJr0d1~yiS%p?7)EM;zNu5 z`SQ!JHqWA$zAAQ`^-cIWbm0x`E z7ai2lk@t<6hf+UnigD^-hxgj0ZoEIaSF8^MEkomK>4Wav`DFP24VfL?(Bow~l74^s z$tO9(&TYQV?SJ>dz0IRykvO0)mrIvH`aFC-Xk*7WE#26Za4AFVS@<~jxBs($@XriN z*Gv-q^=-t2!54u{oOX0j)JOTx`XwI3_e>DG@^w;*5(@qt>lkpDkri%WrpJviXGKxE zl$WSE+C@;xQ$j5`2*B|O1Ox#_aCmM7!Nhq~8zXr7Y;vQ7D>D-sm`3@tx{!dc&8RvI z)Ti1dw1d3PCF-U1 zOj-klDnCVae30UnNzUCxyEUPAzjxu^v)TQ5iobbP`Q8i3y_G=r)!)9Y32l8JdRA_X zo?g`d^*mZsb|xP9?LXf05j@uK;lp$56cedi27Q#|0}tu=;1<=}JS zz6aqB{=7p;q0qwVqCb8y*5GSR%)|S>*HL&ho{YU&VO-!e&W$tJjK2MzoO#KcSG;^^ ztbbtcXN^y#7N5C=uhB~17sIdT>f_mm>x#|$^;9PYzPaCfgE#zsS{H1Oe9`yQf4wwx zTi-K~Qm96`PlECC`7FL!JdlF`c=4>S%af_>ICsMD81jSEaMArO4Dhj=5oB_i@iM%z z6ypo6^^5PBONaZfyi;w+MMQcPU%Cg67c;=55gNmdQ5@uRl1{N)hfPvwA$VDxoE(#b zxl-_K%*-i58Lec@e#(C>#N*2wY_k$Wd*RGW%~MU)HP2}sUGISZtHFs6+ zFvCJzN7(M)ayp^b%q9IrV`FnGTc*2yDc|2|7oGdO$}WQ{P#YTyFOhr7`)#asyRzE! z!DleVH{ptSVKm^&zmlB_t_#m!k5l{6qm?lI!0%L!jPM7d;0-)t4&a{ZW$2B&nh!S3s^hIy=(_b!5x%!iQxS(&nbC3ecaC(>$`?3uM&%?*i zH@fX!DXINH*(qF~{PyOv_dZG&C}!waKimA9-~SgS@DHB)dy>rB&%6Iw#zY5xju9pG z&#+2!tHAfiDfpCpCucd`jNG>Yjsxqv6k+^&v)CLlR#%G?;y8eun|bTszm<|49YnlH z*Z4F;=Gl{{v%}aaXAC!&xkCyjtItPPJj36&-+j0Fqd)q08A{JLAAS1q9AW&EpZsw1 zd%yd8n@iD{Q}5Sb{N)@NVP%*>c-XxABID#niuuRElVo{1TSD^OBJxyt2Fx$|ed%P~tf|!bU*1)WXu5A3PhQ zn8-%KF){aV?JRC^_ki#kr0#x^#5YNTfj2x>N z)s|PPbI-FVg%|KBNdx1!H{Lt%1a7b{rAdXSx}UqQRmoJi@2<)zrUJfvNlwOly*1Ke%Vg60%meVNIv z`g!Fh9D@O9c}D48VU)6j{hL$@kfY&w9{hdp$?5mt0n?PO&Yt;9%b2(7{P0t4Eo}9y z|AXWH+MJ;s*i*ysPrJ@tm^oKK+aaV+&U2wAo(*%!R+A z(Bl8_xKllL>6@4clUFllqW@0bxP;f6(o7s5cRXXy1y9eUSs4ocVL}0o-oNwyv~xb( zojb^*8h?TRU0!aJtFG+4&f7FN2v+uWg2rSHJ7DzLVFiQ~&X|y-x?{tLF)hE^;zymJ zRQz>iKpRWh&_^pdkvUXQ)~gHS>1TqBht&9QA#HF#rwnzy6Rz7`8Nq97@dL#YK25Uf z=V6|sQmaoL&@E)cMCLlTBX6C+?QoeK5m+gB*Yf*|^0rw*MaQbD&;1HHlY8(*D~`dc zjSBQ1-4edlA5#x+z>5pdj#1I)35Rls1X!Zw&>TO+w|KLg8k#?VopBq!(PqM|P6u+M zq4I%KaXbXi`sTCtszSpl(kan1mMZLC51c&bzyzKzIAmN=f{yCnQvNa$$gOZ^?CGP` zsQnxsD=dfdt)2`ENwm>H-L=i&o#W@p|Jt|W7ptZaq@dT%rOWN2>)w(bZ6XR+uL}%* z(drjtfgCuu```T^zd0iyIL9apmx7>;IjOUbDEyp5`VkCotx^T&Q6Wh9M}HVAPNy8> zFYd_v)(z%putGo6I1ca#C` zMjuCfo}_e|w^mqDW2T9Dts;P}dcy}8d6s9jE-x~CA(G!SH-`(C{<}Z?F}Q~8L3e#6 z7tve2=w%)_bt33jRp60fhw#s)&_#w2!?e9n=ZhIaS}YjiOEK|#H+%UjAtAVjP+^3J9D&m`{{RWCS|I7j#%#9 znXmkfZ&$yKlXx_Nf!FYu6o>ogTAk1M;~4R?*Jc6=QLfvLlRYRe20Kv{NGN|{glZFngg?tqImA?j6?lSM~Ht8 z3$8b~FXjEUtM~Tplt5{L3T1hfqDT20W4l9OI23!brHkXtN=S?AYuWP>oo44gry_(nk zLi5Pg(i8nEJ|~j}_`fPm*MoAKzV5Giz#&Rz0^u?6GQ9cjVDm{Sz#gX@!~L^%wl;90 zHo)2(n$o)8>1AhHIf=r_?GC^>xRL?%L35lkZ#KXD`5$h+{roR70#fGRe}7gv?DoD~ z>LuAQIL^NO^%u>DN0Wc^Mf7?6;BGj6H6cU$t*+!v7`b=z&gIR2llT07DS1NZ;6^y% zY*MYR(VHrdU3Z%YB;$YhsKXI*S~ws1U<;`Sg1!_vwYfd9RHU4>H6aWe6U{fB5U;YqvKCAAGM}!x>yQMx8mT zzdw>t_>Z_@t;3pF^slpbGppy8FF zAWvvbShHea*AoFY3RMN`_Xt1BiUuWb)QGtrjAGk8uxN+%J-m^CdDq|(r*@pMX9apz zv6i4>p9H9D`aqHDH3F?#dQV-3a{{3rs&|%F4H^=u|K3~5SD)(}McuoUM*tXT6Eb4J z%_Nwhte$FUz^^(I^`q4B4j5RY8daWA06q#nBjqeb-(b>*86?{_c(^Dql)DC^PE`La zDc2^P^9Zcf1tM_u(`26-GPpxQT{3wrB_MdW{f400mA`ZpU#P8YVAN!I3I{0RU41u! zq6>wIG8YO5SEIyDncnTDLGBHqS?V9;*8_hE6?teGBns;@FU5ArA0D6(Jj<)KaE(6o zc;XXRhXS_L72`Qi5VTm=d$4b{he5gcNX|dkv3B6Twp@4*jQ&IG7z?#K^lyS&Jh7fB zRMk@_I&Lpyb`2(~XM}J3Q8PnN&xVisdv=t+#c$?o-JFz6)iEntc%+)ZH1lhfSELu! zRlh2yesmDVu$}A{Y?ZCQ(wpGfwV**re)9H&%A-r4C?+3B122Bt-WSyfDl zVg$}gT)2GK#6nRs?NxXL-%-lijISm#%9oM6hMxYltXsVNUiTUL&)RiG@s(=G_&h$# zJCiaH1?{+g)lO^qTCEHSNtvT%JCd1w&=emp1~=nC|D&mXEzf$qQy15`>u-;jvCUWo zD(XjD4BPkF57a#udab;Ss|=noR_$~(peNCNW%1_1aY`|xy#{wT>L`*VAT5EU;Q{rd9SLgyDeaO|c&vwv zk)3mE-r-@csr61{cw|5_+(~41XES7ln&N$N+%KD39W{zS<`l5{3qQ+<#ox7QOa!}n zw=!W?0c%?iD%3NcdGk}Hx;b9J8mEADv)cnznCc&+qq{=bM&7_n|6S z8JpcxuNE7beehjB)ZKg$^D?sHq;4-bv^cBVjm^+jIrGDUc|v0PtbOgd3)FY*3?<)@ z$Nc1(3w&@WZUQ^g_q^2H}{zRp!Y&y zkIG0u&q$R}r(}3JfR?BIrPP7(Xf#0mCAC%j3uM@J2*E%?| z8vX~)D%B2ybmq{;ed=K}?3i2a#8>03t{mJwYd16=BGYf){&;iTs=!W3d`}*)JgK)X z6-INumA(Y{ryqVVJ*K0#87&zXZ#zQ!@#mj^-YJ7Oa?&(@;w27WFv9PPaAy4SzF%#Z_Kll7@#TDNuL2(X z^aq#9VR~5{-?&3O77})C^TQwfApCutVU!a*`rP~WQM;l4vN7MDiTaA>(n9T>&oR;g z6828$$CrQk7ZWms@A2h0nL{HQ96ygI$lTqlOU}k)<3Mlq;7qAAj0pzKPYS*IgQzE4Q<SUJLO!9)OQd%wvk+AcT~q+lGFJtx5Xw^k(xxatK@ z?-_iqz0*n5#7E6DDNMK&-kN>ske?re4CgE;r@mrUP%d_1A5h4CnzI~u?36D z8vMOWnTXeWr@r(q2u<{ez1yVj9eo3<$;A8PU8)Mgc1?6^(2Vlm^O+11_y$&DZR*e# zMNWI8sP!(f$%Cw%d!U*yubvfPJl_d>%HNPI_^KRohlV^)a050x)7$;vi*?T+9;LQH z^sX39g36?_Cc>#^@It$qs;h&ssb3S5SpDcy_0vXgbiX=lW5U;_UG?BOhE9D!6Yvj= zKCgGzaAdC&hfk-4v^@7om}O6mxsVDoGy^8TV5xG55gcYP1TVKqfTlXjavp%0aH4x5rrulB3{Jj!azHP&6By3Wv|M8r2?9ZCGy74C5 zFbc@H= zJKJ&|Qa;7kwt29^gy*>?@#QsHe6wg6oWbIN2>l_8C}ViMU*)m8I>`*qguj!MLIJn@ z8vgZj_TY4HjDS3FyDineNV%f~aRw}7ck%3aHp$NDN0E2+Ena7URDS*$HtzMUehv>n zoZ|BnHX297Wv{H8{hhw_&BkM^7#t@)OaI9Tw93j@;El!?t|cQ@JvO=F#T5o&j8v%i z8iv!36}veApmO3-fkgI9{=`*X_o79zoopfz@H^QjZrFymmD{kfFgQeClwA%F9&-l= zj1#24c=o7!W0+JwPbIosX&K&TJ;ttdLKTQ+48IgM#zjgCJWBj8904qOyiVtsIIe!^ zCtSikn8|pzB<{Cy5n9hiOAamvySyu1(&Wsn5W`r#QY-bpt8o&XRy^QcpUqQvm&58{ zKO?!%qkPsc^Q& z1&`JiE`(^|MKGG1R2zPx8`_L4>)FsDnM|qFj*EOB`VXK3&Vr|BTI96xp7JkpP};}j ziS`Z)v$!A(<$&Q^F5LJ+UgnD_k!WemJEr&CDb8v(1-Vth=whYhY%~U=8RTj4*$;|s zA7iHaU!<+>JGM4O{_MqYb}gqVPlQ z!)l|G^H9^6n$Sz_C%^D2xg=cA>MxHq+Hk;7q#4?;qZ?VB%@utXuU~AOY)+hJ*1Q`J zzj&1=zIL9qS(X05yZpT?A8(FQ)}Q93=SY!<^v2DPhpxh9zQ1)wO18tkA0)CvqVK8-Ky|msdoL-HqNHV7^2A!2SOd>I@(Xw^ zzk?=R%bNKeJo1aUk(OE)~^?lztaUyo?*nRAX z0H5FpJlcM3e}7}Sl41Lx^rANz*-vuF%+4}&&!tB$cNFh?x9;Q|bpl#Ae3sE|vG(J~ zUw0tR=JM?~Uln05kQGm5UMxoi2p)d^o8>s$^m-{q_p0NdgBg|%!nt+lcDQR+AqS50 zgpT3O7IL;z!h5?nK3Kl^`twqJI(e*haS!i(z3hjJD7jgj>y1s$dNV{$G}~;C;oG;` z4yZJ--S~kMXbYz|)&2c<-*(*ix63bn@z={&9bhnYPH)ual~Qy5_>cZ{`Qv}^N0U}W zjz53?X!$CG{9*ffZq%1gKK^MARO|N&HvRJRzv? zMG++#zkA<(zdXs2I7%=J#+ixrd+Y2!oTHuXje2U^?Y1EPN@-392j7PePVu|Ov;Ofk zPX!7=N??ijyGH@a%Oq4@mew**1sI}ao2ZnG?AB4$+Kvm!}L7iYLDB z;rcEEbLy@w0?0HhLZPVT-;cnpehY;wF=f0saDn5Oz z-!)z-(boDxQ4d_f7v2U(wdY;Jn-MogIHyR-;A6P1Q_;0qS;ia3Y@*G*__Edg+M2#@ zYal!}#S;=ou!-=V?)p)&fn%!fUE}1vQ^VemF*GVSYZ!zGwLb$~LEnRW#sX~9nQ+>R zlb0AyCf~O{&!dF+NP%fsxzM4`5#}7qD5LK#eVD!vT;USFdzMQ5!*Bn5S=N04uqXqX zK@K)=QWDykE_aKZ$`{!(}rm8U36qOuo05|*6)o3_fdxbX4n~CXeEV!ej$D~nP+t3soq=D0amz; zJ)WP8DDT0MZuL()1FQDx^6*uBVBAKYq`)*W3K_a;Yjt$t^b||A$58q4cZw;csd$2; zs8SVZuZkD~evmnGN6AMH4rTJT+0St_Q);4F7}_kU=%+{!I60IeHm{l~8Oo{6S^p63 zDaJ{QG9K{?cvcz!b};6Y42^Yv9B%LS7bx*uVD-P1Ka)FCIUIFWw{M&YDNHk?c)|sz zFu}xDxM7g^GalsNhA4KGG0M!0*#LV*XfL40>a5ZFrOk{FMvXGzXn2cI>zXkd90p_k z93>;0YK<#+sREtx8JEM*bTqW@XjkQ}kLI4`bKfg*N9G#e9`q6in0?JVrNJ z^J3;^P7UhGf!S&+=KD_WZKUumibP8sUVVQX{tsI`Z%oOYnURPutPPm%)_{ngepftL z*ESguS!gRwdlvpE`!LbJ_$hc-*+IvlVUTu3tJ-mZz*b(^(0Xr8Uk9JyY2)&~PkK=X zh!naxbfH-_vv!lx*qDc+fLSph@7)pdX5|Ner??roqo@82FZ8JgXzWzp0&8?%_s}Rmm=Q@jl%rd^~ zvp!7SCwQX+^mk_Fdl|gon%>l=-@`ZEP&bjQ26}lt^%84j@D{P&>9}fruvSq(@%rU* z*(+}yjD`Q|v6fFt*|QdjbKDtKXs~9IpQTH7-OnIbXWt=}8Ui!T| zca}>zs?TfxC_N-K??T4=wVZ&R_Sv-C&DdA<*$I`OeE7)(PahRgK1vo}Y1Ykbfq}DW z0!M$(ZcL5;+vK97e93^Dwe$Tq-Yk-TS;8G9eJu+=`J@xsCLCuM(}n{*`3 z$zeL}-uD^3?M`IC$subc1+F*a<=qy|zRpfP^4WnOoOA3 zEw+80WAUt%q;J2y+wr{Lj#G6unTRKx+_qP0?90RKH~s1T>zy9<-ut(Q-tV`_*lCD? zGATVzOWQgqI!{hGj(P9AgBsdTw3MY!KK`h2%dU!!?zU+6yRUz@yll;alj_dDcc&Sk zQqZDB&g+No{bV`ZYom$A{Z)N=W=11;wn{6ze&bf-n@mKMdKo<;z=w|>EMI)rg7oB} zgIlCjS>N#8w_hb&F6XrDFHfBG7OiY|pN={g9=03LOF2-Sw@r&>MhMi|i8@F86qZvA zAcXN^dJNZN0uliu%ac255GYwu6OTrb1>kJ2IZVWRO;L`JOqkaeS%96T-^?b?5iesM z@y`w_m1R(>SKHHI&(u#jQPq%;sO>w$FAR4LtS3T?AXcCE$GOnAfFDMz?3|9&$S}E6 z7nSX1&uhAFR%6Yc?bIvNo1$}~-Xp?15ApEuI)Xv490^l33{H^{eVbz&D>EJ_#;vdF z?BT#gxy`~xaD&ajR!+q;i(vXR1|~pwK9%TqFsP(k2;oq!b3YKb@h}ZF!nryp$FuUqJ2`q6VmC`5)BMZy?iSQ-yBHwG*?hVk{1@UM>Aqkz_S8{his zWTqPF9ewOshL2PbQ=4xpe2}prx0LYYOe&`?=?%wkQ*gb@xZUX3#~5e$=4!A+%vlG~ zQOf!agQZ9Xo_a|^a6!1HB8jo=0b@U;sLncWpp*RRfaBd*L-a9VUaME7t z4;aR{2!_E;Jj>V|#aG*M;a``UMrc<31U2v*VXi^W4JCL)Rw`iZLxX#pjM(^3l2OZ38-|$d%Bx^W<)3N%p5v*iOIavzQ>D3W^!&f-3;EeUkJJD7BVz25yxJ9nW zr|tTWa@;E*IE!%^vQlkCqMgol$^#S44LxwdC`8L5riuEl+GrdZs@j-wA`k0p#gj=* zH`ggA*$K|9t*&E2jyn3+dWs7vA+uzYw_iDq_+d0G0D(^A73*(jtW2BWhAnHuM4-u; zIV!tdGWIiqUQfP0n(f`9k~5e3Qn;A&qb3RdLOMHDMH$-UK=in3qm_*D(OGMMCw->; ze8&k(%R99NY~H_nGFv`Z%>tY$b;e4Mlz0$B1_@qOsQ7n-5gTWhWTdYF19$GJv#`YftQ`Ag~ntKgY_WXY;GQR9B!^s#=d8cNIonj$2!WrW)z+!N6kJ+4ZPnr zn~#d155Hs^?Aao-K3obnAOGH;HWN`A(f7Yx?%(~PdXt|aN#1-zvz$p5-TmfX>79qm z`!}z*5IHBI!G4h=WMTGx#=8aOH#;0c#ND=`0xCCJX8=z-jj>ZST@3ZR-+Y}DaoBdV z@osHdXlzaBxeJ%;YO^lMjLSJTAKm$&qk}(cw&!A-`~R?f_4zNBzxa!Pw_J^X-@ATu zxgM@wr(a&RL*IVsNOmB*T3XYYt7n$4KmTn!|E@9kVEO6Ke!hJ2(?7`($tenV-+cae z6RdZLfn(LJIl0>51lO)LQ(b_Av$j`RYXH8v`(3jY-$kd}_5Jp8DgA%80|;7dwS4sP zXCogr59RFj7!w4(_s2l(JwkUP*b)3h z(PAcB#3f2Xk^w^g5pIHrSX%{5_i9%uZE+9+2ft6c03l}&V^grFjN8xZ>aVvL_fvS@ z@nBVWvB}g)Y)ym|QPln@wulo0>C@TZa^HJn6b$oLxH7A_TZJlO)lYCvf4u|7m?AAQ z4iDicH(EZbyuvy&xT=Gzu`+dHw?uLEaRvxAUC@V#O!rmJH)Z8wzzhy+55D{nFy6Cf zK`%r7FgTp(i}4A{QL6A`jRt`z0zB{c!kMB|aIF)J3-F8}?%vF>R1XXQ*{6C>6~pBC zYnc%(V%)zKhhB(Y8lXx0Q8yLXyQVDFB}b1DLg7qw_+(s94F~B(j1m1x+6;W!Rg*oA ztyh363#r{T%|6D_z_%I+l#KCefF~k5edu?xe%_r(Pd8_O^%zqRPy05#o%R(zy*L!w z52ij>y7jCvRL_74U&cZ|S0iX>6a7{O4*D%oZMrS zz>t{nQA$N!3|!N{#t}{6&B@Ol#gGHln`fIxJ(`Ho>M)M;Qr%Mh&Sa<<6m3oau@0njygi+4=$}958 z>qG*R!epeyA85QuXc1;c3qZm^u(uej@*+KUmtf!-bJ1{PtuIHzPe!CMjyD+q+tqhm z8rIBe_8dOpMn1byo!>#bhD+A5tpZ@JeGCo;X&*Cwv!{1II1Up9o#*{8T0vv_fNIhA z>?+jVF|c|zG!=hu=jbRsju&SyypUc_MqAUG8rS-FHaL|NNWf!q`^WHR$Y)q=kVDl) zCzuVicRCqbH`gnepm6j-G{Ctqw93wUrpAulv`7AAfXR*D-gL@j!Hjf>NdKhJRNdt?d7bSAz2O^hxYp3-saDVv8t@`C!`)v?=zq$i)%J}pWhsjs#( z`DwgjuIf>*w^YHfA1a+X>OwS0Lw21-wVBAN21E7@9Yw(T7$dpG02Wuycz{87vLw^WalbdR-P+0Lr_L~+KY@eX+sCIhNu8S z@XucS)rX$%|M2}7r1#s2XVN;7KYML$>Ey57j5adU`7Pve=uB?m3pNk^8N}|-ShekG zGqu(e=mWiDi}U~t?)W&pV9~wrjOnUvHbK8|8a)%r}1X#stoKL3+q8%qn8}PjbPu(srXH2BhY;)Sx`%sh3!t?FLLh` zM9#MU+`M*W0%zz`(Ca~w{4aj@`SR`EucuAxI9|Or&gJ@dIvLrrb>-7#?@Tiwc|6;j z&zD=bis090JVZYHP`SI`e7C%RXZkXsmvdnN2n)(zGCo zpS)W~psKI8-R`dnd~uh;7P2BW-Rb@L*8ecy+>U|AXbmpR<30TxLD#R-PcH^V&sG6h zRl3(bkxRyt$fl^Ef*IT4M&8*h5KgHQDdSxANx-e?6EO#l>J^HJ$El1cie6EkR+K9* zJxhPULk@xQR8V3_0-#4Z7Jm=>Dwqj~`ZPU*zmw_S`Z~K4gb2p|8HP5Tfp3I&Z`Z*! zs-&i_>RAo$6&Hjpe1K(eUZKI4-+-(}&oP4ts7S;_4m^or;g8gw9e!#7_Px)@IvoRj zoaj)`*6|C&6@(w}k3rQ9LQYxwWHdoNUKK{=gL7g;`okbUfy;QWlnv6po#4qY*W@8r zr*>UF%?42=8k&~E__1?qlJm&M4$Rj=sD2FmbTSancC>dL_LjN z{64f=9oEV`n`p_wa^=RgQE*#1xOJ^aSB^!j&IppqAo5}%9_MqrLrO%Yqz4iDjIuea zBCy?Kcv7bDq^}H_p{YSabq@-37?ReGPruMGRZ>015}NRvyT;OSxL{YeKGmt=dkStG zkE(+kN{jQ%a1lwNyx%rO>m4*z)%7WZKVy0KRl6&t0OaR{N;`Xcy(;}pwaB-&hOnEcY{O23p`C>zDVFvPRyN*@^?WSa{eqh=1RK#{RuXE`|= zYm`))4r3ZkznavZ@WRljg!MohDJH*-$IKWXr88!yixa*J!E2O$Z3ke?>~k4!BPAO* z7r8M91`6PejtEY)R+lDVl%4Fr9Knh;g%#dlZhcokDSsqcQCes3!?y9AMcT>eiL{1? z>3jD*BMTWHy*2fc-RQx%+66>CM|&AK8AAeK=!DEYm!e;10eUl@+-{ee;fWRl!uLf{ z#b%Y!K6nMb9jya;dU5eCl7-f=#~n!hb0m7`^||zY?g!&U>AUNdl|QA{98RQm z;E&fB&F{Ah&>r1gpHEYNILbCFd7B~j;=nAxn`W5=H8M8Sp>VE#se*QUp~KdG)&6m_ zebM2ii=`3nl*+=8Z;|J<^VgT1*B3kB>PZV^pDqu-|MhazAyx0)`Z$E&J zOIZ_{9w#z7LQ7Kc=*CwCXWtBsmWtDRFLNqhzO;Wl+G+=)bS;PXg;@&`b$U0t*$EFu z&f#>II_Rott?2Cfve}lG(&x~D^$54xk?3NnJ9Pi!;CvQ8Zk#P&-RHZa$H{eY8XxIv z#^`c;>yK_>;D=Z1!9wWRi)7pCkd|hdlJFiw^kq5gbcNsY-EE8cq8(~pMc=FuGUxMT zEhkz=#MgiKtBB-PvkYzF`oZnxX78=XayK5?p?LdwGZ(MQT6rYmy>}%8xAdjx#oB^j zeevb;^o2b6y51U)D;MqPb9H%`<0W8q?fTW__ga)}=djnWtpV6r-oN$9vX_FR8=kfx z_(6JsHAfyCJiR{)ojD9+YfS|?!k_8bKMZTSm|%O>fSod(fi#6W3O9u1&J0m8>fM{D zMhv+!YtL(pC*-1ZhJgsM+8Kc!(@hTgFn;&FN%%!Ho2HE@nGiDqurioq_8<* z{P(SpY1O&KXzKQefkS~%!mEPUXK-ZTSNBBsV^+#Y3%ow%5T7bkLb!oz41mgUKHR4a zCiSTDE8LB!&|Ckd8ZwBq)!-H)CXC@>glXS7SR-~x?fx^4@Ior93r^njOk~dY>FWR# zltWM{qatrJ4j@0NM_q8~*1HTQeVM3r_f4^aXK+*dh?26kHP%WKjEWN@AUdH)C|%F` zsKOb|AB~+Ed!0Ig3ocioukqfQ`WvqpaHi)~aZ+}ss{2ma9qTu^f}8UyMMJJVc?RCm zR44tKDf|<FsS@x$rQ!N9S{59u(TIlnUVps@rs)s(yEJ1=!8P#IVMg4{sMW@Q zNuP4KCt4dVdT0Ks=d=tQ7{eNvwm9BFHVR_!U(@oW31gA&)oDen!2#z6FX3@e1pM2h zyG`d_ZWo4jM2I#vcV8^muCx_fkvou!ur%TszRt;TU<(7RNK$933>?9SMg$f(zF_1` zgtpi5 zmJH1K<*;r=Z`OpoiFS@tfSV~Dg3~yoUAxYhir&jfc=5c0T&lwq>pC+Lrlh~s$20ML zjm`7@lv-Vgr)nE*>vJ-|MdtOSvBeAca^;s(zCaEu4aetIXx)ZO9psMR=qS>dktgG( zj{XNjQh_o@hyJXfTluLdRWNTm@GZLYE*S6#zPC%%q%<~$Ha8&uZSgA|Kka;M+V~I{UUR(QhqulZI3}l**W=J zPVuB3rcjP*cRyq9W#ch2wKm{&BqyUcoN6~D{|J{c7z3|TE1npsFqbq+rom*gjcB%IlN?P#mQPu@#MVseY33*k4rDD ziD1Y{EdsDnR2ojyyKS9G_;^)L`f4=vpbMU*37$QdWU8In;jPb-*oH2M-l$;hbD?o# zGP1nS(a*I#a95Xl(VCh$%u`oCIDFly+&ZQTtny90z1LGEBCI~caraiazT(YyZMclM zMXP4#&Tgy$GERhcU88I26GMD6JPx0DR^QP%dEs;FcRx7i($8>x%->Z9{A>~gac9qvb( zoM5!}s`Yke5Jcs{Cs^miU|V)xyLxeO@V5J9(`UeCHnLlKf2ZAoHqSJpD5W(FdUn3_ z4RzWI^ti>KbcTLe(9R&Vo@Fyf!dU1Rygbd=eOyY6w4OIFpEjFNfTG2o8E36OXaVkv zhvoOT_kMl8UR_4PcYN`(U3NsA_d2!qQZjw>Wqc40FPB2{@$HXh-RJA%)#2l#<@0a8 z39hrtNAKNkcB-}`T7e_@uagrQ$D0?!!_yX*zHLV|3d9)elPyEfpHKcZ26pM$-w*G7 z;s+O5z>&6}{Xy#qY$5MpI1BLMA;h&PxNSYNH|dFMwp;z+MhkO0b0J!KQ;^!<%XHMt zFl0+{Pha{Q_irv25`42p;mXbBYC9plJ9<9rqy&HN-1%^1 z{(gq{>zt_l9IyS>3;fOB{M9(xciP(X##&`v?OfA8$_-~G$~e)*H1{(O0UzZroqzL+$z zH;v`}do3LP;k$59s$;212jR2&2nV2;m&>HCv*7kcT8fybR2@MeGc z>Bo2Ke+QNoWWW3Ey>NGB`RudbTYmoYpC>z?E#KY!a%Kj8cHSL1MZ%25*InUJ&(;Kf^2dOZ5^3gO@_pu$l3ZM~IaV;v3)qn;ft++t zH>8d9r8oNPOPoZ42_xVt*MvA|qyBO6eU4WV+*~=_LtD{^=3-|8Ql&MwO6s0hE6>6F z=CnuChcnmNTr=DYCRg1r^r5|~qS?Y{-#nTN{*w8jzPj`k&ib4@i27%~cdePF)D?U@ zB#>Lt?L?jFn?Ckf1ZO0C+I6^VT7j3OcHH&OShSiHfYnr673hCHgs@ms`l%h$9^j+x zbo6D?g(QTt8l68$Z05*G>In}*NZ;j$tB%FR0|}#9*(|gZtAA>N`wI1T&8DGPkbN=q zJzx9XD%o1~Jy zTa8s3PKOv27U?Z0I9qs))r!{Pt{%?Vt>5)8SO%40H{7Fr*U;@rS#ZB&?bcU#*6xJ4 z$(-utm5@sl(yc9RSXDps!aZmGy6ckkIaXwMRuVUcok^ecC+GCf4hiqdUs=eng^#8lOvgabggdcXz~DW?z{g+# z4TN8CRsHmJwxOx4u%%VNZ%r43z?mMZPvj8IE7UbsF5ysZWGTMf&EvQA23j^o(`4KN z4k2q+l(vi=O+O2a>f@<}DI{*3td^c>i{PU>guGZ>4N(8Vt-5&@@0RCT{7*~N#YGaD zS!2SXLabNYYw^QBSpJ7)S#tr%U!l7@T3h+wk=@!gb-rgT z`Yrk)Ulw2em|xjFt7k<@LPPX8lvyp6Re$~D^<_Ga9Nh~>3)tU*tMaC?D;5u;(fF#J zY-XGhV|^eM4X?iUxW3|X@;%!3+gLP^r3kvebo|4ttrigpr|(A7@@VWM%#P5G11scskt2I0f}h{HygWbGwlzI_{OVy_ zlU?f&iNn!t=ZiY*oV^61!C^*Gax~vAD^lL!1=C867kUt8Mj`mt_)VlDUSU-~etK{8 z@}(<bS7D~4gSBi=B5wezLu~WGsr4d&RqS z9PzcbI=b`W$CI!4*MIZ3%cCs)OKrC&C+xBPPA%#`)w@~9hx4_`m?b~^J@5SGAT(YcwT)%#OIlgkWX`ZLcmo1_= z+p*baqa~-@ZDRZp91$Bg7bCqXX>Sz^-7~LK84_l*Wb)+VxK46D5Y%xBQXQu*qtHc# zN5D{0XpREG)HX+LEQcAD83hB>IGR4EEQQ!dtbwI_1i;(lgvAJ<1~TV`kum=$^n@KQ z!H}~JkOsRL7OPNOjU><4a;vZy;oQKgi!fYktbgF>{Sj>SWfTfQ)lEdKPXq=bgIVo$ zNTE>vn-Mdk5n&HLdIxi#ZT|NtqE|TqM>)9W|AQ~s2xhU_%8W5R;AYrm(Glb8sUWM6 zCD;kS$%#mKVs5x%qA`dlKp_eEc*9sBSQxSOmmw#2VGI$CR*$~J*M#cAlljQ0uy*0b zUWdkE=%BLtz(9g4hzsAG1CR! z)fdX5=C!4Rgu5}FaB!%L{Rv|jzgY!YpM~w- z9@x&I@Rm@r5M|E^LXOeM1bD4YiHhb;-xg-DT5@YUMV1g2@}3pebzrl3@{5<+pt|i0 z4jAc$J>g7v=&-%jIMf!VpfCH$ZPg~>3Xg-E;0K?n5Mwx@;2Eg;96mY3v+;d|5cpG; z*+a4PR((x@N2B0%!P%_jS7$U>_0>_o+w0MIxhR;oJu`Une;5d!Iz1_e@Fbcps0Du^fzCjd_nViT@Zn_yN$26VBkYQyX$RF0?=@$vrq5e087P zdX@rx^r(D*#%w2JhmPTN$cwz#K7Hp~0cbqVIt8rpeO``?@M)0L6nba``kN_aa5Km% zr$>=mQ#t!Ny@h6jt!_qFO=Xay87THk%&sn!87DRN7Oa_`vI29q{^;53myd$m7$@gK zqC*Gyq&Ojt<<^B$5 zJn*-}85~@@cD_DmWb{`zhbNp;>EGy%_++EdcF+$Ut}=AhWp#IOgG%rLO^ycPYjD=@ z2`~f?nj1&EXT}W=x>nrw{)x~UU8!u((e?_*sxRz&7#Q7*?i%0OLJ&_zADi_h%IUiT zC-wF=cqiZ$3^OGIw(vB0R#R1aF9Jt1J<*`fjJ&hkfm`IqN)Gj7;Mao7c#&u5-1o}W z4jJ+^Jyj2kj~+d3ysdD} z0FItszRqY3z}*~xy>f3h_uA&>=*9A&ut40uy{8FXwlUzn$>AX_|Ap7nlF~4}Lbs3S0fln3t0> z@?oRkP&pTD2`9(C4zF@d3-4*l=)eB@>)FNRM#tG+lJ}OZ1mw%~$m@8J zthbd8S%t3-hL5Lv@ojr_e)d_R|F$#g@8IdLkRYJY$kvmB9BqyCZK3~zriSR3T**$IyONO~&DBrz|D^mL(*uv5 zcj8#5m!at=O#_gf2jvMpdDIDP(TKw?9D1fBI#s(4RK0 z1zSG;$?{%DBxkcmkSG(@rhG&d1zE8~C=enebTCt76EDsEpdZE~{DzqWw5KWKgkwrb zN3kzPJx!3Vrrsi`R)IuG8~5@45{RPv1T@8id8dv#JF^a>Koe(uy5E0oy+IIQ(z>eZ z-q#iZLt#)hs^YmaT1S|Gzd9Lu>Ts=suOIH?q`CU;-;|g0!xOK4W%?7{)x$~t?EWf* zDW%nWJkEPHgJ8AcL37{CQuFe*!cfO-sW8fSZC|U?a)JupT$r?$BDD24faO^D1!lP8 zU<`~I?TlgOi#ccgAe-_9Gl6_iYeEPru9wVYsp{jto1cM z(aVZ9|6i5JIDNsR^NJgcar$DiRk38aCOj!YkLd&OkCW86Oq6?`5hQx8UmUl-bf2P^ zc&QF}b~NRTIXbE`bFreg({@PFmjR~=*pc8f=Br^F18RW5G={P!(7&5-u3d1h?=ZUF za9%5$=`ccX5CFbF6*XJ|1%tO%1&8?T_L8vC<&pnrwM@rSVT-PmrTeb#TF4-Vv3z#~^(p;-deywQ^kM z3NL|=_6MM$Yjp4TIN>LNO#9K85k&Jo@QHo>f_`{in;fe#n)*I{Lc2q?;V62O;A~dH z>>bDJOv+U|oc@*z*N$K{untD)U@IZyD62d-TyB2{hZQ&~AW zu5}+}&f-YJ44@Lv8pjns2&H%8XAxov(pGKGj3c}U3%PrxdG>T;1vfe_^I`Onw9JvQ z;`7{Yt1%_}7A<`Hs#ST9M2BshPQJALOJe}n;HpytHv*%t7(ZqnIQkagoX*m0W-K%W zgZH=LLYsc7LNh-8N3I4s?_R9G;=eY{q_Yyx&*^ zSN~iiYfkWW`g8!O-u%k1p-K9{1hfi9G5zDyRFEowg83{~ZUzXsHa) z!O7{%C$PAOcF2UGad@oWIVh(0Rl)%`TO@&9nVDhoes#@OJl#VdXn*KBBD9kMXKVsw z;eYC^ZhOw^$0~ ze>8J08L*k7HQOi#Ki)RRF0-N+OE%xG%u($#WF=akZPCZ`ch8Fc-YusngYD>Y2I1vq zqC#l2cV1sDG|CCtFSKo6&MR$3AE7KeIRQ^fa<4PBP@U24rHtC=`{=<|U2^Ga-3R~k=hs-qT}FPBFdtQRw! zZR>L>Jjt_q+57uNvbDFHk-TRQ*I;}4ywj|VYy6<=f*oe%9<;yZ-7kK(y!iJ1(8>9| z3(H>H@BH+$&w`;Dmwt0_SS07JwcjYTWy8G4_Pbd9j*d2i{rq*?3q=EFFYL*xQBsxu zKcD09CfYb}!P2ce>856{&)=Bio5}rGucD9W;!LwQA-(|0)9QIr{(n*QaQXaMj>+d= zM(+hM5?)uz{juobQCkh|)~}6M9bNq7;d1z(r2ZVZfAiqp@@IecFI$vwbNTYOzgqtC zKm5h=%~#(?bImZM6t7;|SZ=luB4U}`+%LZPZ7Yh~2Q(Ya z_9VTzeEO45m)o~LnHd5L6>McB@NA2sFYbObd1V(G(;F>_`1IzLXf~dC@AgN3PS}X( zjsY>kCIy>OYjabA&!A&U!hTX32&@R1u^4U=287Ehh%u#+FpvnAY>-|s!qf<@QYZTE zU4{BHZes-a>=$K#`Dfc6wa>^-(Vk+~1WNU~ZQ9Jfhm?1><~e~Wxn9I~t#K7*>Qsm^ zvBK?V0QCd>>K(7EKJfJBS)E@a$^FVBhYUC{&50=~MM9~EW3bs2m}21VF<3>UO!Fgd zD*{F06BF}nbt3dQhV2hDU>t+Ak+vdd@H6;WaYBf~k8J~1m{;$gz?$fIl9e)8l{s)l z1RXOdN?#du^>;d6BCyJ6ccxc^q7wZ-XaZ@C5cQ+)j37efG#zJPT~XS7Or-~_GGlNe zVr6FaBl-z%r%Zmjs*ZqJXI9eA;dAQ#pZ@22|KqZ(xd7E-eAF{w1;eC;M2n4LG!h=_ z53(Ct)wi*oZ2 zWYM?Hh`f!S4~uko-=Dr8HvMa+gY$2ioq?x&r_nYk6JQ)V493B2^@7W_&d6{Y3QkBG z?LN${V+@2~sIThuvC?EM&-XrqTmOD6H#~5r>Prfh{7^p};sImK zp_ynoT-EpEh&Vv7A#m@kDf#q8CXwWomuXe#=*E$FQv`^Gh>KX$pMQD{JoK4&}zR~ zc?qWI-b(j#MMAfVNH$`VhEOmwSaSACow#od^|Heb!>2L zERC~^0ijcm47vIq7Sq z9ki11YJW#XeXXp-KWKe$QeTH|qQh}C363CX_hn@9j^5BuJTXtEiuJcfVIX=TBj7_5!#wqI9LD+%Z^O6YzSf5CjZ3Xe zJ?PDSj;npe8?>pyJNhALz!5S zmm3*}&%TOBG60_j%ggBE5Pg!m_{E7?8LG9vpA3CmI?D5+yq9tUq3eUS5i{VRZmHH+7Ta;sAm4#^UwfgvG z4gtAP&EdM$ymMli^r_3uQhYG0SC4XRTY8#|D3QLIW|SrE>RG3Ay=@kNp*=df@x0Ii zhd=$V{%rYY|MZ`Cn&ZWG9SAq+NKGxSq`4>t-z8 z8KY(mo;AC(-Kz6Dw?FLkvX7SQw?0@dHv_ZN$#C}Ff0;p!o@9$Wd-izw_PeiVyP(fM z|LyX(fAgzg!;5En|8{$cUTGHM@&pxr^Xp$P|M9Q>a{1ktITFcD+az5rUG;J}Wgou@ z*K~$c@c!f<{^_JXSxoREI$?i)^ZhrIj{bcMM{L1#?fSLl(~ny4(AZwe@k3L^nNtuU z%HMeW-K2~S0tHe_DYc@Z3Aj_wP^v@RH;NGBO-{Y{QZQ#jpn8xcK}UED^LB@_8Y7TU zNJuikFczbP@rXrgrgkYqin=!XK9Nk^F@lECdNNbdvxD-Nx0|{iM(w_5_No%Sl^0Av z*(YlRhswLw2uQF6FA0LUv^UYOJ}1Y0Bv1W`DaJtvPE6oh`&0eKtv`%t8wk^N1jJs% z@7{LPRD_EY8|=Sl_0UE+ZF4$Qj6}fA)a-zfAdIOAF5;G_2Y;9yHV3EmzfgAqHxIaS zdLs(XSeO;b^=JB2ed-wlAUwDzks1yT;v$$UOL-1n`aJ@(vY~WvIJwuPE1_E_t8VIp zUv*6V)ij1%FM(3o{(mC0Db4Dy{Pd}3E;AGaFJYoAgA5F2HNY#XSMTd@O00fOoi#3x z-Zchs1mYcnUdz52S}@u_a7#Tsci%Cov+e~RMb6P)LT(g>@4X~)66&j?G3g&?Vqoa~ zp~>E*KnO~lG2;$5J*D9MMyEsbLsReo|6y7MJrmgkyKx((XcXW8?e{N?3{3dNON>C5 zRc@MMK!L$sM!~A^lx@7URT+HAAZNf&Jt+i+t3Ga)fBHHk`Nvt6JFni^Pcehu8lSDE zCWkuoGg^R$70+;A9qMcBYA}G-ymvf7xKakTZa7Tw*iUo+dHsssUZogpfgo2DEx&xX zpTTD;xIEM9(2sF8Dhtl(VE<#Zid6eO<6N8g89t|6AfRdVSb@f-cX|#Na5~$G6_Fj< zLa)^Wr=qu;!8ZndiXpTN?Vf2>9HDBH_f>9GNAzHf7@Ze7ZRl}R#*tGMQNBe7TLC6D z%B%<&1W0%0Q@SmPGJuP(P?l+c+&JhA*is?p#*R{GkC_KdeI95 z)2z&?6QIGj$vMW)%4Zz7nyEiyAoSj(uY|B!-_SUqo|S_2p#tH1WGM^;kp8O+&Ud4c zzBOyv|8fBEMqM7qcpVdn;0VzJm5p8)AKt;!>KXl^O=%7By;)sB76FuvW`Ff}uL$b- z3@EwG28rTol#tt8Z!9tu z#}Jzpw2yMI@k8T#I$H++a51v8N7_bD{*1;66@~BU7S4hbPfQu(&^u)LFhlL0&XNx% zt810*>kP-#q0-6?P1FJ-9sabl(lJ_5pSD+->N{Qw@XEB_qsLOII0O^KseQ9?YtL60 z%>HeZsz-*xh1ba`Ho~mmYV3!PYi?$R<0(BR-zo$RXP- zH~V?NBf#ZZX(WK#Yfs2?=dYF5pEG!^jrmJ4;ao}0u*&pl&Zo2(^l288tYY{xtPRh0 zM-m^kdh@<*a5|;z&Ca7_sWn*buu>}J%l3Y~y50Vpr7j$044O&hP`;4gol*DY=bz_z z6g7_zarN?P;2p8XYsX zLW6L6Dw&i}IGw`^qT?6GPnN^8XmVqAfgoqk#h>s8ujHP!VQU}jI zcfK*&zWPC@0^CfOw243A7HoUXM3`BAkc04(&punWA6;o%qraT3f}WHUf845B@^5&N zaUZUw_M9tahfQ_ZJpr4Y)_yn+{wO>-rR;V4ZzANa&8wxrK3snNi&iNYO=qN&&DX9J zlapl&H<)fedUXDR2r1kTtC_cx*HbmC< z9F@oa@b2AY;p=E^XSx0Ht>tMan{hnMh>?$46mxf!)E-x+p< z1FEktGd-w$&w2WUpzckCt-A_}Hi3!oMTny5;8Rd^kx(EgS3D%J8ZbgkstV-?9uX?X zT{8B2G_V0}(pWeTL;OWJYk#1rZl8k$xCj}OTL(9BiS{U(nT6=xHCj@?D$5Ah{zOfb zO^BZMXQDEd8SpDN!n3;Fui}_=71sLVf~T3S=-FI-7z?;wjRN#>(2ZZc z*neX*uf=NzuLM(n9EHJO<@^@K!PkT*W%>6}MA30{kM7VQ#k^L?*_?k zKlYWFz#Qq)cKDTQSLGP$B9c)*9Dc*bRTy?Vf~{;jky^%*;lDm#u?WynEBuZ~Q> zf_(F)*XOD)j4gc(5b}33{v*37VFsGb@fo5(kS4ft$6ALv$rJ!Gwuh(tIn(X6r6CK; z1y5$JN$rTPIuch7JA)vqn^_Iu%;HrPw}M=s<*Qme+(!Q42mS9h9lcj>s+Cpx+A zuUWLSld>K>d$}sO>X6Q0Z@$-!mp@_xPS7qR(X0%|*wp^j4E$TSt}hom9qR2EMr6#I ziU#kOS~z&|#?O}Ho%fdQ3pX+%?Pt3_KQ3&0_Dx%{w2cTdHI6I4tZ6Hww+5ee(tua9 z^jR1AOmCdOlv8u1i1Ou9Lms!DCcK5U!D)!Bue}$aNHdZWVWvrXh?E4!sMfnumReBw z+!~+cXE+<)?>$C1dPYnB`WhGvmsRf?Cr`G9r+Zy1L=& z);OotHJ95e@nEjCoX7AHcyU!s7SQ9hu-jW={nDw z73&|F2~Wq-=YA3LgBHzx`MclrZbog#_1=8{qv*E9tI|x`?EXSVreK2gIU%Q+tfgDojN4>30~b%&zU?3fhb&oVo>m7+0J0eQa?m;ibTs1-&d3N0 z*4K=Jb?i7o&?^Oc?7``O{2mTuDd-QUXlN}EZIkngkzToxVZjXNuY%!KI6P`wBN2WF zgj7X(pbh=grT5E~f1U$n%NkDK&bzZe{onrTpD)WvF2JUSn%N_(-h_jLoGP;nab4rL zrbewNo2>zljmGxq&@6JYpu-UyZXhLy^LG2rr^^SW{0LC7)h0lfj#_C-VBkuNdp{`^ z>Si->mnH!4rWu9fWdC3N^?z!g&hOe&_15x_{`8L#A-9`~ zZ7`(aE`p z_XSOU6`kB~dniWyfBP@~MJJK{WX43cjs?yC=HLFC<^TKl|9(z;L(dkU+Z^OvGgh2U z_}VWxM=#;Kt1S+`Vm(DP6iHTBvk1}k=0siV4dsilb!-gpUOG)7Fu0h`RLa>r!7;8d zWB(}1nJ!MSZxnrl08Fd_i$5kUD8_?`nbN6)6VYgJhk$W$5WSgQYcd$hshvfNDNoNS z-8r}*;&N=JsH(_+4UCW>cyv4)F57c{b*E0u)!jF;>uH$ z7C$DSQmCndzE=mLB#aV@l;#NH-eu@Vd84f4Hmi$M1nv>w^?eq=Ma~4%DP*2mY-FL% zNY6eD6a_?-tUU=n&k<~G!z=kd4pIQCLj(mZ6Je>02(ED=6ty>S`V0?x%P}HY8ScDl z-y2Bdf>JJBlh6W$}GdAahp(^Z^@KpOfb4m%$ zCKv}w4faig3|P_8(L3%1oBQj+aHY^`eNdr#06Fi%g+7KKceZZkV5qQve#C+^#%pR0M?+g?$?8W@7dUvE zKwcf2HMMB+nWO19qP0^^5C%UUFvf$=>c`tI{5kJSwuWYoN(*Y?pCgTujDxm~b)+!A zEz)w<`c4baTDvy&+nMZ4i|1!SdUdZo31zKp!JVJlL9{;%* zjMD7Z$Y!`h8xhygZsoUwKI$9sSJD;_q z{#Pjmq;!;$mm0!w-`QOqyQW{3Lv`mIFqqB0lE>(GH8fSbjv0f146_r8-MZRZaA;uC z85)zdd}CXm)w3Ip9M)j0UuF!5a&Ogtf?(v3{?@iNU+WA^@+Voco#48Z!qb)+JnJhQ zQ%`Q9Jw_SjKeIgbnOu>cnqD9N=Om4hg1Ca~b$X+hQ*@V#dS$SE@X@+}vHa^d?P)0e z#$4Z==ydklv}xVevQbnmR5}3Wj9p;P<*YnjjZ==m$U1yi4^R1U_{}&3lrgtgEvKG? z3f|2eqn*-3>UgOI+sor0I>~A_+z3zEm7D38b2&k?8AF41R9Xn*nK5dE_k$J%3$hw{J3kAD*8RTwz|linNhA3nAANR0nlaM)!x*LF8iWA z^c8(ZIg$N3n+!&djj=eJa1&IS;zM(d$$*}Y4*%dCN2@ZrPL|Z8>0fuEsf^Op#R;Q(tKi~H_181BYAJE(cbkI&PoZ~Df<1qsJgUb zyA8q3i#G3Phn?!z%~CjaTU@@=?1P=wMo%z`qmQTUYiG%jVd0ajngLvNbf5>sR z@|?btCu2J)ihig1F&D**SrED%Cy?YA_tlT!5RwxlEQUfPL->7Rd<4KShdNB1V|0BW z+#<&W20^kad%|ZT;N8&|iVx9Fq-_{wjp9HWsWe8j5(u-;+4gVVt(EY@tdf7HK@8-y zByCX27cDUJ;ylwzifa@}Mp#4(4*6}9Z?0SfpvKs07&eJ|A{>LYIw!TGW>?R($|}7@ z`StEh=k^&2x>jK^g5jher;%lS9-$hZ82!BP@%11r#_e(U3FPXH!6~A-IfB=M9k@!^ z`d4}QVWeZaUW^YoNi2o>ga`EruMPibnJ!9zlW*#n0sxeCIg~yl6nw<9;-k>2Qxuudwo}#Y;*gVEv0xU_ zOu~S-F`TikJ#?%!eL?sBreED>^k`*;MsrovHIW&W_Iri6&y#1yQ`#4&i$l3a>Q@=E z;tc+Wsd@)rWLT#JO>2XVlOx*Xaw<{a)W$ld@bA0+=^MI1X%zH0M<;-ejOcyFTq?LR zTl=Fkm&?Eb*zVQ<>a#$R^HjQ2qYQ7C_|2)FdUh-Ce%G-tqg z+7=MaT)epI0{yUcpT&h^{6_;KV8$eyVCtK-P3aFj>|m9unrJ3^W7vvFzOp%YkvG%j zH}2e_tZFZlKROBn;W>T?moZ!KtXm%18R%{4KBB}!b7N&)pxog`dzOs=D3&jN3(SQKfzyJWGbw7b877FS@Y(D#AeyAT-L`P4i2e znuW6Ui*f&2ANr1${PP`+kh>Gest$77!bh|_bQMjl90N8oH2M*^(3t;aNUx`F^|P|z z_CJr&>o`{Zni=QbpLe=HbY1z;MU@>n)Nhw|T&wJzZ&Tj4X?JennZ? zas{1dWYU-QfL`p;Tt2h}_ahVgl>Q}CFkhvEE*HhM5Om<^_*Qt{y52xH z^jl{$xHdXjtMrns;NX0qQ+j_lL)E4UH;c+%ZgJ{H-WnM;7Flvo3X>z$(E|Hs(!u%;#y3Sm z8m*C`@NWA}gg&-ibs0xIil!&P6CPuSX1uO0S8v~1&SWpzs{Y}_@0Ujpo^(1|4s^2w zWXM5z^Ymr-(h79i;BH z(~!W#+4%fz&ZV)iHseBcd99g=y^M6hn}_XU^j+&C4qH>P9c^B{d80tbjb^8Aj00oA zHhFDERlwY?c1{vI*USSO-KmDsh4)(vf*&uJ-fCt{h&yD6HzaTv{t>om{D~wt5RCBQ zo`%{KnyF3Ywu<6y?RK(GLU$*^gJ_I|a1n?7OgRG#-ci~i&1z+g){Uu?k^t!G83Z#O z7ttX$1n8ep1J7~!Z@j9wA~?$JtPTUsnNG6gU$Tv`t)2fC}}2FyhP%a)yEW z-ls?{!%_6h4-l@(pAtxz1P6gT{jG7reFSptF?=!HlnKUpuW|C*KT1Y7S7`_?C^ygq zY!3{<4C`vF`8`IaGS$JuA4Q{%I!6g|7~Wu-ZfRriKQ)H$7>fD;m-TQB@{bBZ>Jy}) zut`w?I3<=MwkXtYCiN%)Qd#=a<54uxBc(#1t;S}b?)7|VqB`V!A0wcFYG5V!w~E$ejr6VvR>ofrJG@QX z>JAqSWc{YT;X>3mAdCEP@I{~7^053^ow#zjzJ+sn*1M(9+2VG$9O^krbC@x@V63qM z>-WePJRR(Cz~KVhic8cN4fbHg#|d7BSAwl_g9&t3nTb4|qz`U%j3<71Q31vTgI3<| zI0C(ge&D)>g0G`B!--S?>wlIO|51o9Qa;iEMZ>l-DzoIm=enP_lpya=t|EO>V!(sH zP@1YicWQ_YpLeq&(VhWAV{7D1ph5qP+Ui_DcVkdl^tnZen4Koqf=khf+-6{)E^Cp< z#rcGwhP%cDFH> z+Ss<*&Kjyx7WhHBxU-m^qm&#zId9UqEc87Cv*iKA%_KWdOgj+ z$V#_855}v>pKwJ!GV))B8>1c)tD(}33)y45E$HiJys|E!G}g0a;YNl_<(_8%nBfxi z@@lmV-$%OfNbLp>KA3)F_<3L80Dj4(+32G019#=n2c9ta3@=~x$8(PN@OZH4SrTW) zu`2y{Qr@@9Pgzvfv%afufYU?2l~Hje{Nor6?^bA?(cs*i=;xkuu+Bz=WGwze(`5I+ z-8-p^ev&ohL~tdy&V~{lSm7oEVFxI+xPsXy>A%W6{eD&+|F>;t`Qhz5%f^i}rKXi% zUc~jV*(hr%W-S?~j%{>8KXdMeMYpvt#qrsL@0Q;b;s2ncZMRA-J722D=5DkG1sxl9 z*7nPcyQg%}D!cNwXN+Y~)U6q}&F)=lY%kyVpa|_v(bdTdkMK(WkGGpaebsuBi{~zN zLR9q^g+3|`>4&d>y}bDDhh-~zyKwn>&dsOGm7Ei6?_k`_ADRFB;k_2k9*lweyy*N+ zd)~{h-z#nD_<1u-%}N|*AX`uK!Fv~zcg|3;y=`)%D7-YIL22}}E{eXC4lmuLz6%l^ zcHA;v-Y-IK5wSq;Sqt*}i@M_Vf<(zh+gMuTVEyD)(eusY^iP%-cdjpg z^A~@Wv7KB>h>>-ZB2@cQm!u z!uhinZQCyNMhl_k-GggCJzk4 z1QT8}0}?SYtk+0{+*{jaWn49oqDSBtQBZakwCG2Q&@L6PBX;ROcEKRnSD{OB61w^@ zP1pYzaArsRUIm$cxEOdmV}l8Pf?qn63vL(-MBwScMUx6L!k9p%#NoC&QaZ+G9G3b^ z*p3m~y|=T~Xl1AE>P*Nu!@NfWX9JKR%#f%a!rA!JOj1GDDPV@qiDKv-#-nzr6l0<^ zL)I9FuBpc$^7IL=icC2)11qbNTECBy3a*A{A|OF9W99vEG^1q=gq6X?&{Ui#0zj!G zzenjxNq}&4GYSTQP(8J6tSM&~8ue%ltojRQ{(t~?6r0>up13}$%iS8U+=}ZJ@0ITv z=K)`hQVzzo6-uW@dfwyiRfh)DR2}Qxs_Z!pRn+HxgPeyZNT)BAavNOmJ#k)6F6s zgJ1k!+0}qu4T!`?_Zl}iRykbu?&{DR*KnO&n}~BXM)?c_hDXMIsBSOpc9>}KvVG*y zfHur*fWQtfuaX0fGT(*!${d&CVGBr$07v-|^38tEUfeTIl<-OeXnUf2#|(n*HB#zd z{{r`@(`fB7WF|V>*b?aHnzkP$JG_H-ql@TI`i`7b1SZHbbc8Qw+=DB_0B%s+ zDV?r#zv4Y+t$n0=Dx0_38g#p@4MlX%6jUPfMdr`cr|lfijU1IX zIf#yXo&D?U-$91b_Ri|C=myFscU|LDXK1HX{o^?R#cfZL7h#DExw;&65-K$=*#8|n|s&ajsduGJfd%@8CQ=FXA z^}IK7Y<<^#bVz@@jMG%v&xZHMx|~U5>|)osciKihnfneO-5PNjEd4i*@6^~h$2(>l zy`z2a@EqWTZgNlm@3rg8FMj!pQa0M$2BOiKmY!_^0>>RPi|dmcHj^H;7^wK-+O;-2D~kL{`lb! z%|1HN>P1F(hIzEOwH4h5$6ijwW{mv0^*4vD6R;hrHCuSuEUpOte(5(N_2kcu>&7i_)VQ8&q5< zjpo)~Td6jSvfpoGc{P6CEER4iBNeUu@)v(M!6LGByHvYd%_Qvfp0f_ph|721-d+Ca zkNe=l+Z?D23T$BE5* zzSN{Q!d9!KC(;vf#IwM9N*BGDm0o)EIvLK~eIxwWg zIjE&)-XDRR5E+3=U{;pd;lLBD;##+Q6#X(=k}|2-7^>k4UMoHh3khU*fr;+J3uO@O zaHjv0_M?1#9pe?^)IiXOBMNQCAlSwL@6o!@EQQ(g=r)|Lks{G6j*|9gTuGPi^{abC zV127EE_dFAvxcG~Cg|%~ZLc!`3AkXbS5sEKtI)5O!8hKq!da32#_&`Tt?g2nwJ@}!we_L| z*XLkcaXF4ok7pAYbt+mRbl!hs2+d4J57uY+VEulI$61>bzKo}94Bf%EUm5b%H*h12 zF3%0t7@&RsF+x{^7Bey$(X-st)5YrW$QWB;fUWvi8P3p)rV%L+j;VJ=!$&cqt^Uj+ zn1w?-hY29>t}_daA1N0_fQ!61#d4=;Y=PCHLPbpayHd0wie0WG&~_-o`m(MMfc)uB zu=-pbAaK^`w5zOf(RZ;!zi0G=g_B9y7zfla4*8UaG5CRhbmfh8ns=2I@kYu8enuws zj{MzoMfhF`V03cjUJ3<2z1%ECs5X=%q*vtzycIsD=^AVGQk(1IUufuYGO2mCQ=vc#VDal;rZbMkW0&*p)`QHrrToen-TOO5eP`r#Z|S>M#>z$3I~m_$gO^eaus zg4cxV7$NJz-;_894-R@4p3ax&OQyMxpXP+oU>ZKH%$wxwvmDCVuRH_#pzZG{LSa-X zKp%hjVRe)?(IUKeDPYm|GlE#jO88;ez3l*}&72y0XK!qjTiiCcuR90ec*zKeHl-XL z@1*2&0=C|KKgyrzqC{WjFq|!lZyn7z((oK!ks(J%#5krxo+o2fqm>y68wqoY1Rw&Z zcbX;P@We-Rf^aw+`mCRv$c>GRF}vG^Jo5LH4&bR4=1<%7kJ(jYKHr07c8eN107_$3 zovWiBZFrZwQcrY*AJ*B)WQ211gPt_vsxPC1M#eS1m8nFt>~KpiRL2zVg?`&@PH+mG zr`PHCkv(o!$LOq+auaEHyOw-M<1YQlG8uhF+Rc`g4F;HrH<=3H^BYX?;Uaq~+-LeF zdUVyKqWzD&PBs-y=J3Py^JLoh_a0=BwM$AE`sQvq+a0`Q&43}jeXEmKJ2}{)bznMI zG#K4(B?Cf0xU60xDn2fT9&a7QBWsX?#yR_nBPptF-~%M)K+~fBwB~kj2r}BO=2^6O zP&x|x<=NBc!$bQIJI%IpEzBgao-Vd1_-r=G?ooC2d7L5pvPkix3~`Hj_ga)Jr~Tf& z_UeD|ef@hfj-lYxg$(nf6Ij0cZBg|i>IY9-yO7giE6^)#nfh#bQt+pd&;Ge6|SA24fp+r8CI2bIm$P>;C5Mx4qZ;ft;ILoC>=iMJLCNXA?5*KGf_;`s7iw zQE>B<&wjdGzm!a3&qVXv>D_b5(&Nq^c-eXp0^J zh#Dc1MMpzOq@$AF7RDuUabhhDoVa|FCu?amY1j!T!NL=%Y$NQ*KZ&!111k#W3Fm6V zyeb!Ul7G8lOQJ}1`a&9#+TNScfAwvkjLOk;n^`liu||+~#3|(*ZLZx>0_`CPWv4mv^^I8jCobTBix1q_#9fO ztrcHcyWy1XBNHl;bsrq8%3vJW_19nhbG;}ra;6X;iyXXNu3UdRn{iu^uvxvznNGB5 z;h+^*MT22|H5nS)fZzg-$5k8!`1Wed2k~K zuvOhyCA>4a6mY7)tOyYgbJnc?S{(1iy!!A`W#lhz%@!fWF}gnaINp_6~)+Zp2vik4>9vR+)vVajupzTC6(3~C`bH}FQB_6$%&Euga zch;-2)g)YLs!mey4vQ*0O(OWn(5Y#(9#pah(b0IK^DYlCcoIpcq28G)s+{43H}LTW z9X4<=`MsHU0qix|y|()9un2hHOZy7<4fh8pT=CH45CxiERjzfhN{gJ`bMxkY8Ve&y=k^3k0;ZME`VA+Ed%3(2bYRrBO$%Kt$N@(G@1 zWo))Nx_v9(wXf%Kr`xfZE?;?AzEJY`#>de}(7%-{7F@xYCPBy(zNN*_p3O=e8mdQ# zCYfgZMpk&YvEd!qL1XV~mR?g2tV7e;*tj{!FzBI4TC3|V1=v}d^8cR=VZ6aMjC@tV*++zO?hjd7%f@ zy8iC}Ffciwc%wN#xq5hUm`Hzn$5t*=ruwSRpiW7eIy%y?ske8I z!;1*z@NH$t@Wf(pf~In{J3P@q&?(w52Gcayy2iV%96S$@{eNf;F6*8#5#~o1zx>s& zCMR;QEqHEb-Ptb8Tk@Lcq^{(sJw^kn@kG1nfwq9Uxbwk`lieLGRS#<(g(Ejvidi&bKYHyHUj@J8{*~R<3i}k+UR1pcApkqDPFK# zD@;DSQ>3>K@{XN+H&Uwip56Om`J%ln*=+~$(1X01Z@&6!rZsNgY=MKEz2w%}>VH)@ z`naumUiBw*ym|Khkr(*sQG26a>ezILCYa8GKl!^8$%vLr)9sY&V!GNCsa4rd*?ZHp z!qf74cCz9xHZ8z9H-@|ciS$O$xH(?i_C`mKA1n`>s*s2I&F8-<5|k(3;9LyXU_AOE zZ|2?amS6qte{Qw*(~fiQv@Wu=)&DY$?k8`t;Q5m0Z{6yhcYDi=``;{o*P^zc{NyLg zhlT6!H%)La>(`=-TgiwIKfaS!5USHx<@n~w^=HfR z`FHi9#Q^PNdc6Y}e)8F8Erd8+?gZ2I_NTpH*#GOV?>D{D-m;CyPB}^T7`3DV8&_K3ajr!eH}8BjvOx~= zg}qMkY>;0S896AQRUQ_7wrMVi>orQBAKXCU9cK)`ZPR3@SL|F!s7;cj@OE5;h@;lEWkMPA4#VXBb1=fj=8-v5rltHaEz6JZZ2xJ)~O-o81UmBw!agVNN{7|5wt)W zMT4oUE1|;xnY*x@`%7eyT= zfW03zu2UijV+z+%ly57~u_17kW!U?zoy5*CJVS2FaWDHh% z{$CIK-39c%jd9OF=^rIQX;4}{SjX6#`Z)r4eeDg+R(lm5T+S-FDxUYcM_URy`pob?N|D%N!hsw3Ggcpk>F8_I7^gT| zziF$c3}p2TjdAoc{B5%kflc{d8>9CBr|Ld-G+UAczkeArBQjEptMZEOs;O4!;RwzU z1CTqC8-gIf4flKjJ^(i)z$F)4aL1<^5X2B@bkFp3Pm8Ks;@-M4b!KFS$WZ+M>_k)# zPi91X-#LEV&CShCxw$!7A45y|+Qb7K`#CW6Y&JCXeJ7U{gX2w16^^;{&SUCBS-rbk zAgB(WwIgsd4!q>L{9yI$>C;h+&C_YPUt@@AZ|3qcZp5mwf-8y$wTsJ6U*RSdFu__C zvwpOUJLMT)WZ2+;(h6OVFN-IL9D=hK&_(rBwEETAU;M4>eagv3P9Ng|FN{8h8=Rr1 z^XA3cwc-&{fVMj{qRK+)LiJLzt#gb_JJD>o<6v1!A=a2NR1&W`OsNH%GVu)6 z$Ne=5X*7hdS*|`#$&lRp8h#kI;piedyIwT;;oE2R;fK{}O2AgZz1PkUQ{-|pmYYx9 zDxqr`1D%80xXb|@wGYQ)Ca!pNKn*?O%kMk%;YNM3gb~`NED;mMjQfuRhq167-``A` zFhAD9Yta>rtz3?lKyzQ(#SMQEx}EHJCi!_0O(O9t9${G&55iA}f5K@k&r;tdUCq2U zR>3)kuT-Bp5bGEhSD$+hd&UG$Gzahx^r}5eF^*^Vu5!Z%5l8RA#T-1L+AcKBsi5Z} zp>{9jim#qWx5jIcZ2*W~l8Hl8)dzX?YVbGrstT;b`>LEXXW@gFa~rI9VEROF&v+Y; zp3e6g9GahWJnhm2@6#90R*$aTEzmrpxjBU{uUQ&vz|?)*j!#kw{T?RYKf3kP)j=T; z_@LOdk00GxeVVZ=F64(7KPG!q(BFTL#RLHUX&fw3l%q1GUV5lEx$Vs>yxIW(R4OnX zslje}PmhJ9|Z#(JzLOxdS-t4!__3L6x zN}eAbeUNc{5)9^i1DASMnzK6(S9hxOdP?D~cB8JfIVLrt%_s5T<{jON0|xEQ5>9_7 zMm3z{)oba7^hL6Pq4cUH49WFh#lv5>VPP{R`z&3KQO?oOV(-WfW66m4`nTVN(Y~c` zHHkWE_j%c6%B+%H+s*m%@WK7yc)i;BlRsYl&ENd(1ix!%FW!4U{wyQ~eQ$-6M{Vf% z;?vJ2R>?fr3=elZ8S7vD=l?}ZhaF}RPk8k3qt&f~|E2Dl<)e(7aWoPN)id-tzn9N` zpB;1rbhP2nhJXIg{^{y%%Rn}O+-+y~%Xs~d-+nW@lE+95ulfR4@ORd7<3Y;~@1m2t zzxUbdhohEeI+pmuhkv@d5xrYF7Jp?~O6sX*_~aHojxW8+c;k54uAe*6qv*e#HZfTa zxtqbjQLx{^2-ehbiH*u_W+3c7eKt;oTOC-?sRZ$&_0=~qtb=&+T1#4cjrqMt4^}tg zImf{%Y~*#u6@704_%RtBFli;!mEyn(fb*8uI>}-)AjI!rd=$_4N2l#Hp()5&?Guz3 z<~9mGva4@VMsH+D6`)kEI+h?MK)yC7i3nhZXbkIW5*wURAPw&@j)n$fsZD{6h;+&{ zF5)+4X)6nk#Ztbg;Wei$_T>KbIfGkt1X_p0m#Ix9*khx=J z8m3pj!4h8V_%eZc2KqnDgc)Q~_&(3-+u#f8OqQ%Ou~X~UTARshFrKr^vLq}J<4Fqp zHSoZPvYR|eeP-yu-4gKHSOmMVpi~Sa2u?ostF%4Mk+~M-2fis{;fRu7J-mgOtNU=v z1fm>W%aGHL(Q12+k2dPd?Vb+?&sR3Z@a*F;MUgoR#;t)|_fSCn5i~eSpWbYa$UcK`K&(iPu-gBS96T&hemNXl1aCFFX9iIuOyzbXT{`UPSoD-@% zWg8pEvlEL~*QSWG6oaZ8*t`Qq^)sr#w``Ij2(a*AuZ-t#3m3z)x_{L_O45P@_ZaA$ zD8;GWSQP587(Yl^slM%$HMm5_aw^UcH>PjH>+M!`(Eu;pY?!Dcg4OW=vq@YA0p{M? zC4Pslq8U69YH7)TYjw8$KOx@r5BU zG082GVQJuVoHTkv$LL5b>F`wbr{Jt5r(A=F8c;TXs;<&KS{z2?HQuv4N2eoC>kqzq zT4Tm&>2I`CAH@$I<|Tc4J+mx$oP2oW{N`%^x4}|yym${Q%b&En{Mrp^fSOn4#fxvF zlW5y!fM~iXDQGd?X2I7uUH+r zbFYAWD_g<2@RGE!Xnr+M8s zz6|dTPg~`uyp{m~AKUH9<&1>~^vJ<*rz3h@_ySq!J-9O8%=Ifb^${b^&Rem2Uj{5|d%+>RwqxfYLoz1UJYLB*f$6)UrBh+A7d3WlS2vds$O z!Kz)C>IF*l*8tTZ(@=4VK*o{xtZ&a@Kh5+-nuQ z7@muGvT3p2X4?nF?%cXvlK%`oySAOEhA%yDmE+B?+gOs0E{CbKL*M-N+tsgr^P6z6 zwfdml??3sypGHTm3TJel<;-~VWPf!Lo!#93VD%(N!dKru$!I(pTyJdWB|1-8KDi9# zAf$IniLiSs4|>a&*KaKord~ymb#1hmCRKx^!%<;`AJ8%N#oLjf4@vI}72utnWhKRW z4+9c_jWKVx1V>39CS2W~^U6CNq8h7f85o|Wxgtz@BW6PwT7q*~fTfV% zI-~UIL;rYXOi&DR6i$<;fm6pQ4uuuj8cvxy&1yvWE{txNba*AKW;zjQm%`0t#NEc@ zZ6?%4?04W9k6t)a6Jfw(mDyGMst=YX)_AP@>jlKhx*pH)%!m4t0@Migyx)rXoInwl zwble5Wp`lpP)q7w{*H%DJM$G>CQ<@e-@}7}b5!UsXfV#ZJU0ttIiqb=*C=D)s|I~t z0^_R9v&IZFGOFl~q71%cf%NGjpDlpz|9#oCPBRUvb ztlTIlh6Ud1Be;~CF#)dkQixOk z*~(TAJWqIG_kH!8f?{q^%FX#1ui6Lg@UNj!0@*w68dL9F8SVv>r))-^?d~gWTi(mH z_^`T_e;-_S*HM7x{n|hW`sW<`nQmw!7UTgL{M zY@3u0KJJu4MPS4fmOZH8LG0Q(%O5$&OsldV$kMb~qs{BvUo!~WO@8a{tqfedElXL(fjGTrUc@5buV>5^Ms~JY=*-^X zYX7VN{kP@rT)Ug1TX;pz(Z6XI?!nu~t6K%rzdL-@y+|5eB{v-}o*f#m#3O@t_(Ndz z*Ky>#cS}FkXDRRg{>z`2{6F0&@9=p6<2Qr@2@^8jT?_#|=4q$s386C|tSq^2m+p3< zO1&_=<*;;DTLq%OPI-k;{omaxWF;OdZP|pQqOHPS99VHy0(&^vD$xCElM_ddRl2Ke zC4ayv^W(#SnB+FlKHSKLHS#}INVBuB@Jm=@gi$=rh+Cd>S0`?x{ou@0=^b=U6 z$N6yeC{Onz3HFC=at^g4HU;4M*mqDTsmvq&wb`#&t0I(`+KF&s=aASoarW6<(I2peEG}O&wufYlzGRVpUleR zX~w=&8E@aezxtzo*6i+1K|hbqZ{EFINK>mQjsLsD@{g8-^fX;^J)L^{vrkuVpLKk4 zbA9*Wt9-`b8@V z?PNaPUOoGvO&HOTys4+9Q2ODUZztsI{@q8jin^1YO8HYs zWBFOsH(I7zYXyLSxY4c{%UxTYnLKC00}ummLK%2MbF;*Bh!=sTQ!kUMA^Jb+$_~rem&uY84!%TN-S$R_k<6QxEa$k;h~6y=vsC^&9GHh zH`HlVO&W}dI#m7s@w}zJ^v_P+6p4x-H9<|rvHAeiL}46EWizi(N-6px!M8luE>EiqmcpZ31daPo-uDC&!Vs&2{@xcgZzzhYw= z-jHsv$IvIdAlzfRaJbfTs!*uuOgO^dW;fC>J~a5lZ_#CnbX-2`qtYgM~ z7zR}titOxMiujsS^F9P%46IOCK(r%NszUIICfXnJ|PF5H>F!J4_r=yx_&HYTpkkhH<|z{ zrq;w9Z=q=klkV4v1Ve(evcnDPFWB6_3hg44j5mcM2GFx`(`WN+K<*BToA}O@H1l1m zhv{$cNrf@te$@kR{iNVZx0D=Aaj2ice`EK?>@PU459ZW@53RNHGzSE{M~nmaqz%#d z7_RZu3@%3V`esZt(1@NDKP*MjmKb!)^HBi=z*}^f4NxxH>c^i`5*c!gT%keN zQzEAP)LfZ(i+u!jr}E87b9{c92k~f^`3N~zctaO(N}%v}kMYr8X-j;bIW}XhT<`Zx zo5#T{#n8;x?oV#yo-Vua3p0xW?7ux)9aaC4u*TXLnrxgJ$x(bM?-TYB-I-IHxA`8U ztKLC3#(vqw9?uEC@Q0@H>Scyv=?jnAEbQQD-J(~sT6(SZYi|aBPR-mXHPf};$vad1^&>j55;bFg$rEOJ1gdM{)8=|SJJ@F>7*2u@ z9gRF&N{;iVM*-Xa{^B?0EM7eMf6+&DVlLue=w{&scq+Q-c{Hc#W!!3~FU#t{Wxa6N zW#$RpQ*MzN8f_j{hq2PiIHu~ew$Qh-BO}p8a%AYyvtXT-)ZT*g-?d%iRw@Uc@fI!o z(T>#yb!6ThVJppVbfZ3p9bIpw?R+QwV5i->J1Nm%*|0O^)oTCbagh)QrN6q{F4cq8 z@vCR62OEy^ZPjLtLe@NscFFOL48GLu(TDJ?lU94yIxJ(iLzc{k(*XbS7r$IR`}T*` zjl=q$BXyL>WFWr2@FmShgZ-q)%zWcaw361a#Cyb=C7W~NnrU^oFrW{EquiS5U{<8) z3;{pJ={DM#x!xRW^-XxlwUY3kbmH0F3>hA7IF=vuZM$$aD~F|3Dl&9&AjQMdQRJe$ zeeGU5PK()Z2kMKCvweB7Uy7~{K-jvsdQ`5@(+=Obwo|I1P6#Z{vR$ejg`3j6)&#Sn zz{z>P-Osx_Hw$}-_UqGe2E%5speF}2+{s~dz4xyS7rU(ChA}=Vp~B_p1C7&i4cDA; zH+EO|{^O6AH~-;Tr8hc!;4qx*x4Twmgg^YFKS^1>Io@`T;jMV8u+rp*7dfBv7YKFla_ zD8M)0{I;;6_-Jt5ZXd;A#=+CVnD9Z-Ej#sTx4w47b=$kkodkK?I~f2e26=1n!ohX_B*!S%!#| zcgdi-t|ss>p~{X@lYRO)zrDOLE-`{h z>@3;6jGmX_DrIi!uE@aA7Zb40fnD1H)POIH05grDQM541ibC&v2)23FW@DV2_1?&& zAYt)cx~6JyhGQXCTRqdgVYa~>h{N!E+LCq*29vL>DyHr}%*NWw@$|w7m&#B5jV}h; zuOYPYl+}R#ja-V6Pe?~ZSfc@j5aM-(uB3a3n4rWqpu26 zns}jyNCry37Fbm_umiMr(Z7CPEz6Gr&@*)sDC(a}8`1k*A>DY*9tmS3G_11b)LI+D z^x>qrrH~L5;&nk6n!BWd7>~SU#%5uTg-B%dIyivx172S1qjx^@cwQf7WpC1MUhR|e z=k33t7?zeJ7_YsQQ;m1Tmat$7@dVWvFU>U@{V>b%m{p&4*XmEr^j9DKXtsCzD~w7Q z08juw{?K_3&uBcFca~&%+jdUNEoaYE?_4AR$Wr4ls}RwGDmWD;CNykFrf}TB2=Ve! z^wA?m%zzt(r8gKC6xm63Uxn&LAHz!rnE??)7|?qkg!8$(SOT0woCNab`c80;VN)Rr ztCiOGr^VN{q0CB~i$Z%Gzn#_AF>n{73k@*tHd83f_o0dEnEeFx8NNrM9Gv#O`W9~{ z`ifS%kCxzV=%o7Wc|s`Y4emJ|4tdtY*YNW0Q-CV;Uw-j_{_EB1GB1D?B4LDu;;-IL z`TEiz8Aou8k<(kojf3M}r}BuYH-{fQl>9n{S*<-sXL54dU_A8P`_cwMB(U@o*>kL% zw&JjM9gW8m^V*N?p-k%AS)01ESrX1ESn;K5F{J&jFI7Bo&HAW(fNo|?+~C9KSoyQ@JS6y%;Qwa3C+^3Ix23ps?hdJjSJ}h@* zAr?D1MQ=aK+h49w<9eKO|1>^ubpAS6RTn%bd0eAoINoee4F}69!#rjFT`LFV6mRwV zQHpK2;;B70W&w3r(qxenr~A)Z@j6<4_nTj>p8xW@JiGNZN8+6nyqm@An_~=W`6|h8 z5f*ZFs@p0Og>=R-qriTHaY8oPXU^8o&HA~KUhp1YF0l!eNrFVKTs&o?dsoB|Eg2M;$QRyo2eNJ zJK^y`hRyE%V&2K};2N6B&{-#djXAm=zS7q&^mi&IEBv7e2N;a57p)v-+)%#p2yTqC z#^+t%Id{&Qhd*{|*l&ON^GWUFsNgTY_Seg-|z>?*RrayGDwb>Lrh0J zZq+q${c)OM03l+QG)?fBW6i-$KPe=zQ)^YxL|KFjBEVpk1Fsl2{h^)^6cor_Ot5H+z4@#U-jMa-2k%8=BIWD;Ennuj6htR6%k6i zjUJTLd=Gi$I$@}I)RW>~of^Tj%NjNb)kCnu9nT#Yc%jUiA#$oS1Es(EKH*sraBohh z7sd%+guJ~3IA7)Jmwxt21hb4YM7cBgA~~yfSzXXp{WX@gi!Mem=y(_95-ctXmuo!H z-FIEx(=K7J>>@mtiR!rp7wuk)A@RylM9|l4+*W6}fb_`?TD3!i)jf=%`j)ZKx9YC^ z)!2+ctYXTLw=5&Nw|omrypVm1O6r@}>oZ&i15eV7efNWA@IRY`jRObIA(hi{o3rloK}%GX+JKu*t<1qq2FyS=Fe$6Iz2CAixcH6 zg`x$0biwr)w!wskcXotSB=EvRtaqu5nU@I) zIPLz`7%_b0apOEVJz18|2`r&c=;n4CoSjEM$m(6=YabPTajg@Ctfs-K4ajKZiY^Av zfnmPKS>nBw9R)5N#5=0|l9qbj_{iILRx$;1Y{m}+-zQ))z)5}x1)`Xj686`n|F-~I#Mt65AKaku=0LWx{SdU%`QR>J@#(&Hbz2y z9vqp*;mN9jZkvj4i(MfTxO6dG3z(HZK$0g-P*H(OFSAp+Hb?WgOZWr(P zGCr~bZ50P0bBX(l@!Cp!p#yStAJ)!AINoXnm`dZb&>(YC-3$cyLA%DFX{(i*TyQsB zDA>^!I&v*W+JaYQQG&LuKrHW9sIgu$1L0lzQJwM7p_9s5VOr#T_k+Qloxfn{UGh-9 z3@mi8aAI_|{D5QTMrGk^996vm#;Vu+{6&YxLw~_G{nTG{cZG#PGJFO{|I{})sV)%> zR_iEa4qv$4{+5Tg1*UYuQu!F~epUt@kq|w*Q%w1XpFUo#7hbj*y}x`>Jo?+6)$>IW&!6FL`NjdL`-!l|E`KlR6)e*XQBwcWnXlD2G?i2i1o zZbSlHzu8f?v86Zf-?X{DtXl0euu?#G9%n@x*FX69PIG8)X!&WY1y1uim&-Z5<=~s& zCO;SFf+ zKmFj7)n5A-UdHQW)e{Wp_wJQTwRry@e|WO`;ra8`t5ymwiWu09pS^h+ z4K~(!v{p~P`f~MmUwyUuzVIudtheLqP9#H*Up@OK{GP8qxb@NM+HNZw856>uuHSr+ z(b?*u$dHx^7=3u@ZaDkPzx?xf8H1>U6k3^hU%T{xlauw5YdnZX+MAN`Knj{K&!0SB zeUst%Z2#X3kN)PXuN(hDs=~)#{hR-1_2lW()gS)ppRPXp=}%XG{IfqQwCHK!Nf`jU ztpwPxF#G}2jsc4EJ>PE=RsmI548ARU0y|h(47ZXq3GLn&LISzo74{^%3c-ZwfO!eIsSnkK&tNd% z@O@IidABDRlW>xa;}n6-gW~HlT$&`KfYioz1Z^VkhVQdt3km5FA}L4%ddwb^Sr{=v z3QCGf0<-tkPbjKm_+8H!4KsEzXfP~&U7$oz#zA2bUH~+5hT#6 z;8G~@z`zZ>_eQia0;)DCjUz~F=6A|@_=|^E*4Svj%PLv$d)FpkLe~h)bFi2={?=XN zGVRp&?$)nmoQ5vl@!wdw=Ci&=x0ehMF8*P@)xW+I6gCV0|BZ;HE`r`=oQc+fxJSlz z;OGWC@f=I6K3?`b{n8EHvmmK^Q9y7uD5D;I%>tm7|QU3y2Ni)8-YgmCb1z zCtEQ&3HO(|ir-KMd7s~(7=t`%kkWsbxb|a8Y&aAyfnV7mM*&>sB$#n3e}#0FePQ5Q z6hAb_%Z}z$35&+^%!*=dFMWp>a2u!k*3I$A@Vq5|-Y2Bb6Q%^jDAm=$b90)KcaWFO zDj)pYP{+5l)6&K1d08P+0*>m-40Z3oi^|P=JZ3mwsYR_0paIIW1Ol$AoBqY^t3Eo# zY%j;vN)tYz|I#YvQb?Jt?i!K}Vxgy;P4#c1Og^)j)%!3|9oyZfL>`?v?NL^p@D{%v zLpAsr(War02DH9xl7%C$hw1txmE(9g7wvXnws%4qaN&{1tr|({ZPf_v%Xrh{nZx!i zI3YtTweu(FzJ{QRw5d(@(dsjKhsi+A2EhFd)A-4qN;oh}K#5N2cpE)*L_Axt}9 z^zJ1+qiV+=pHx0nHv38>+`bVpE$+-#P>v?fs{{8Cg@O1|15|bZ+PTVsbz0Xq! zCU}8^jf6cV<-JfR-eN0dUFT&Wx*j}X0_z;OI1a8wD~(?N3Xf`z;B!{P7AH`+p6PSG zoQUM)%}QJ8a>`hvIo?I%v^XCYg>Y6L8&`8z<`0djm&?Q93Glke(EfUiwm??-t1A&R z{SR>|Io+RX2Kp&to{UjyY^xAGUhO4fcrczk_hXJ2?~91&%8>&G{A>*ErSj;&M%ZK; zn1dg^U0qlF4y}QmkvhD-0`O1KCZF-Tp~voR7N4D<7+5ai$)5XXv-L+G71EMt{9&1; z?zGwRx?@}WXZ$y}Q_u@pJ?Su$wUlED*+Jv^E^qJZxc84fTs=r(+lim8wU6&b$k;E* zhw$iw!Y|fZxw@W0xpwVc{Gbq<_6Hq4i$8^nog5$A?GrKXC#!G6NASfbkH61RVmvd% zCG@{>89xtqLaFufM)$9$dqDyEFsELZ+VfSLQ5_CIrr#@-1?if+z&RWW= zqv8Mhmwz{#V<&XEOaEl7fA918(9A$`4jqc#{Xvi7_wX}^$~*RQ^- zZ%%vLDudgHWlZ|@>XYC5M;#3CQ40TOrR8j8rvn*|3kjDjrimU7@nxYltyDx8AAS5; z2cFoggMPMGPo6xjFKesMTNT)41a|)<8Sf8{7ku^dpcBc?R?jjz?31D^ zFj#)}v!8W>TOmIg1a}-Zjwl}Hl4mglQ5^iUNEw)yyi?uI zT5qxk-<+yZ`LJ6Z5oK@a8;@RfX~%sbGveH}rDGT2&VCH_^HR+wX@FH7ufi;O zh969yfbCB27)ynG4tAeLXz3RWGL~vR)Sh{+tIG>Bo{IpXL@B%Afgu6bfiqk=O~{~* zwdQt7#rtS+ zyx^xc>v=CP@!K~E%{*|JbA1VkQ;rhh{B+HtZ~dh5nkV`u{aiQ07a<_ukNVpf zfh!qM1}U_r4){<(|D(-`JFWkt!1RprW}XS@xKW0RnUj^}^}8}^yvrlcXrTmezE44^ zZ9HMptxf}VA-KQ{AXKzjGKQ0c!+OS*IVzUeDQ(_$En!a-%f-kCdz!_iv!uR--vCZQ z$(z>-FWAO7Zc^AtM;WjW>LO43&PJO6^$TvmrLsPwu`zbgUr?D(qs;X|SRO;%b9HAr z1e5vMysC6}hhFp_$j7y1X?f94mZ4KB6Xn@yk_`p_s1bJeN|uO-1FRpc9u_5r zdFL^-D3c*_w)&ur(x!mT;BU|OD_{EJX!nl`>k$I{`1SKbl)f3*?zE@m^T%zZ{^+Cd zvcCGi|LosQ$QC|{{}3xT!^0tdi!KRgxqh#uAanm@JFV%0Ng_1VJxxhLMWRsVPAee~eD!n8qSNFckzN zom7Jx0U~0AM>-43P!8kcX}^dlWp^0n&;)kOZms1_Dc*PD_9_<<>a>(r>k%9QzSHM} zbGe+WE8>$C2y2tGpKz5@ZYE&^jKHpV5IA^#?JuwdKV^FAulF(9ldN^`EdAhB>0j-@ z53dB~Ldg>AXKBxKj7uYo1%gL;LIdoW)Erb5Q=6RD4VsmLQx;sHFQ3ka!hZKdQ z3Hh07DG9>`XHxTYsJA=)xWB-Tlfo@#4wjir6oFu&pqgysMc0=8C{){wAUMAY7n(ZY z&Di!_Dl=SOpGoLxyU*9gtxn0J48YkEhM;*>^N3Gf6cF^&J-1bZCWfgu0pMy;rW*H! z$JY!B=>mVS88@(-T!@*%7DUrG3QpZ#iZ>Qh*WhJ&ANH(1Oe||KPuRm{^(bSO;wnE% zx6urr(_eTRBdE%2yZIElQeH5`m;`S>rZ}(~FXe|8>IcQ6?@OPR(oSV-=L%MWbD=-? z`kC+4sbyT<^O>NYy86otGWCR)32y)z8XBbgqm3?g1uz3-(glL0cPP9|KbJNZZw*>E z$d^}TX54xQ-eyU^xqO`Wm{B~2ym~4wY)Tz23iNo>!_Ux4CCnom&4d#f@9|ib>sWj% z0dDLn?B$_@^E@UT1Dknuj{Qv_IUwuoqNUj0A3Uv6V4K7G>Ohwzma?{^S*cRbFGCh5 zFSAt(N&*1GRUkHg`nMR3KxY`AFzVsCG18kEuVBN69K5A1KO7J1d3p(}-97v>hrzLU zSe`XRSo>8Vb=-EI!LxW|0F0tNWEFhsp?Kpp>uYT`?z8J!d>GxtcxOC>(M+4@JlH7t zSBK-_yKpS!uzBu)u)gM8DYPMcGQN3hN6Cv091OVD$_}I8FlULCP$5jiw}Y9Y@30%Z zQ~dN8+sLZA!7y>?V9>|jbuT~%SCLrb)02MC;)E`!Gd}5}Y)R273k+{Rr8;mE(uIQd zFVY%XX7r#@#-d9b!@H~Vb}963r^tX$%e&##b9nR}4Wci+FfdTytgN8x#k<;^tZrlP zIAp0c;YwK8!!|KqZ~30b%~77$FTQOxAa7<0#>bz2zPfSeZeHHvetGM5z;@%)jNv+#enT?y7X5Vfgj0 zH@rAFp2B@a_ge5TmRuH{=1BMO7IeUaD?5_%3#}**6W$nAphoc8z^M;Q1ObR^s{8N< zw?pI6&%iRPdU#bZEdpR_)2Fi(Ee89nN`iRWt=^eS;W@4R^Upq?`u572LP6T==<56T zG6rhv`bi-?2VZya)vs44rKLPR`7uIB&uDYDRMK$p;vhw?O_y)m9K8Rg=jDW7V@34v zdUeXAhepmiPi12-d^lcwHwPk}r}%&2(5LS*PEz*uM})w7#zx(3PP7N1dpBmoIQof( zjX~`)4qmpg_IV1N0}bKEX4RX$bFfp`uLZEK1 z?g!uNH>azoKYm-eoRMPnKP*%yJnXcZ;GN$kDrgL^#*M?H;f&5oJA7EEQJaC^rTl{K zUB?u^&XY}_kWk`Uu$GT{xbb%FU9xagv;oQqzZ3? zWiy>}J039ydZCAShk00Q zoV7U6S6}_QdH5n3wOJ;%o2#FE{`u0e9V!Rg)2M?(P+5 z$I}#Xk8K`K$`tr4Y;q;4OF;18q!A;Q@r)+YL65pU>hNlsj2_xoL_>J~lmi~DGmkmBKuF78t z_<$RX%mzgawf@2v+>LOp4n%d<{sk8pv2%#Pqyyy=hhQ*nDc#{g{jj9KC|3sA`@V<2 zS;e{hK1NnJqMR8meZhRAs44po80r1qG2WM>7@WZo9$=mj0#6?=%k(`M10T54_IRhg zGwoHj%R93CfWKx3wpk$x2?T7f&Ug=Q+^bHFc%r|fXoIYNg{U6YSN-%NoWi$uDz84Y zg6>qjJn?tA;O7cf!jyiBb{dQ+;Q4e&T@@Vq?=yO37@!TpwGX45=qG%ORR_~0g=%_W zac?o~^q-LJe)XbW4gr8sE?Rr7ju_>oAz%!wm`Qot)1mR{a4@LbFCO%91~fRBL#P;{l;LCL^w9{WyF8zI!Vj?*BfDO}P~W&wCkC=zhKab*pQksez;Sz!^rmA5BDS##FjHmNfnwzypdtP5RI3I^zWC zT|${bMfn7kOFw4yhXDm|ZfaYXyqiE`7?0A^4LpaPvKTt4i{LEPzm4S<)=(@RHo%#a*CstKc*sEy9Zs>WOKPg-( zKZDPZ$6GnZ174rO2~U&{yzq!nPZau5;?vHK*r8;iH%^&rN2~Yyd3$@t z>rXP6gXm0K%z$edjtl1Sw~V*Gp@VTw_0HPJLi}Nw8<0BuBit>96nvt&>YV|u5yVxc z?!hL-1v`vZ6PhL)=CWrt_|P@ewJQs5puxEpPzbEKSU%ck2K#b~dsm9?eh z2iF?Y!w!uz?S{tc>A~@{PJR8MP5!O^Fx>8z4p?R`_z17Jipk%-6TgVBoRsn9+Krx- zAOF@KdfhIwOZoZ_g8xmpPmi(b79YC4x!1VlX^#)$u{Yc6;{A z<@dA@9=eN-;@c^>V&*Tz>t6cwt6#!w+ka8^N)hZ*)bfu_CB7c)S^#vWlQwN#~&HeF;;J9 zWNkN>j#?RbQVhP6yU(w6UP3Uve)WI-+5h-ItX4}bKr#8)9QvybhWPn;c&Xmq`fBs> z^_%ybv*9g$NmwYSl9ku}UzVvU<$5bQa-3`ZkmI^ByI;tcL)ILwXaB>sjP-B7`E_$; z3Gac&CqMt|)xUoAcJ-h9i+{Pg_r)KqzJIm9y3t&qYhFYLE}MJzGbEn3+94G1!%rTs zJ}scJMlzLx+0%sAK|Mq0i8*7EWMFCM4&#dp;aV+v%POOoAt?1(>?X5+wAYndQ{_4b7oswscq5N z;6)Sz+R)N}7>%Q3x}FLU4Fj%ym}z7v#yP(lF|(e@82{7b%rWkL7K3f&0{3b0`s4S(4%Z*yQmUK71)(}TSnks2e(MkQC{`J1UVyX1N8mo1CAb%~F(1t=lWtKL;Ij5CXqdUeJ#h|g#=xOq`xzJxF z`afEpzV|Y4;BMKpvD6I~ICfd~xatn~uxqsU28w1deGMM(s23jkIQUc6p#bS!OSx!y z>h3wWw4m;xrRYcSBp5X|8cl$qBg+6g-@Alv}?Y z?5F#y;}_q^-oJYP?wd5Y-==Mp$L~!i9hJZI`OE*1Hdy>k&u@KHvhS?dvpi()ijA1# zbej`3Gk768#DR_nDOs`bvF5Ag_u3bu0#1B^Cav6BsrqKwElIAAlf(K#Di+x$`57_3E-?U=j{q7 z5B1T!*oZgnwi{Mp|9A#1A-ydi|82A%&5$elwYPhB_2|JTY41BNFTBf|X!D>T|7Zj) z?4*HPcBYlnCBn>XLJ;b3V{b3nohPB?jLrClC4+alm|uSTb?`R&Pe7N0?WGAVjr_-~;0`_+XpZJEQ6tPa@$&7zGP3>Lob-LJj8zPJm zFA(~FcsI9pFd2u}oeaiGaAFnyYQ$qbc}!UU>1g5L^!S_b6%UFR-g?lk;zy4Q)VEQo zUA`@a9p`1aeLt<+Sl7=hIk8sXQN~fba_Nz~#R}nWe@8fJj&uhzO%I1c=H_W6-RWhe0s?Z5&_TwL~5bon)c%h#ka(kHZhE>o5Q9U#y;Y@B1IWOYg*M78-i< z=1sa+awM92*ir>L-)^k;it)PLCWQwN9;L3m z{fqwv{*XTkYm?T* zbp+rDfDq%oiD#HLo32s&xaT4g%Jy1;u`Mgj`x7$|Ga}%e=*46{k%w!)ZRsic;E`}y zpt9H`ih~J*U<8aw;4eqgoJ16y7|giS!POMJQtBz31j6-}9R$&XafDHXgt?3xTVH!8 z+8bP%!&k8xa@KviME@ke#|P<`lv`^7yIa)F8R81y3m^cn^i0Rg0`FGT9+QqR&3L2?zZVKODd zN8#0RDdu%iQwnLU-B-p;vYP4j0`Z@ZI-&j7jsQ`p3wn zz)dS)2)<~!5g2#~a7iy^aMS)aYTjP2kx81Rd^ zr`$W50)?%?B-v6p``1(8^Hw@ zJZFqF{R(HUq4VCGt3Fw}+KCqfU2F?w2o5Zh8Y4nwjIdy&KujAwU*;n)!a?tj5rfZN zV#8(^LJi+Mt$_04w(u)P#H5cKID^Fa3_#!q{`73_IZqh- zqFFo6)n!Q_j8|p#nlsTyFX7+!+R?Yy4$kZ4vAsyBzxeiVOTY5(%Ln*FJN6FSiC3U@ zGVQEbxSc!g#(eys<>q#U=OTUeDmQO^G?4iHbf8wW0~uxv+zy@*w*S8yV{k6K50OX} zg@3xtr%5xD0&~0I^?Ud4j9g}ny|9ZgJkG#H3-Z{i8Vno^i&0iT!^gzxR`>9vzH6T$ zNI6~{bIY6_`GubPxA3X~V|X1QLvD-=3BMyZ8k~hg#WQ-=tCv5usl8xX9B2z=s7)LC z30Cy9{Oi8h*`BM{^*h6;&u9RD+fFt+l}cV%hl>bcmlyH&&G6E^f_uteZwybFdg8@^ z(w^8su6D7)(!Ja%zH2X?27Mo=BOK*I-cLztwYZO;j;@t3p4=k!@~@htcKdM@rZbewl@ ztIrQp%J#|$x|Wf5j*i0t<(|=JoMvgdIuB>ZYr8>fA1|Ym*!*$xgGb?`7_s^fZ!f?9 z)oQ<^VW0JrZrc1RJyzql6|cr)u5a(lrY1?sLmCTHx=Xa#HkhB4*o$Yy9#wezlWpTuP&Y z*FXGl_3%L{b8>%Eux|yA8c$Qe8Njo{za^27aXD5t9W>|at7T;hEGMz$g4=D-+D+d& zZG4`;e7kyDs-FD}c;hcU+Ow8}_A_8!^**QYkL{F|BIjMZYTvhva#ox8IG*#U4Fz|q z58h-n*rgXnyKkcteXtQJAjYu7YYx&y(GOZ;WZdd#bH>4o(k6L6b1Iq0QH0nz&hJ0^ zU`|TA8$EL2FS?o~oA4Cw@yiM7uU)+RNhylH{M9dp7Cw0N(dy%H7loK0vGwwR7B}@t zxm3jjjWZ-1IROtH-Mv>^AAT17t&b6r?N+GUqN{974zRo#g1fgb1rY&a;>mN_A#!;Z z$_<4Mnj91`iogioEd2ETkete-KLOjUPamQJ!CkgIhCJ_d! z5Bo9Wy*@KP_Ji?G5O)G_Jn_L`lY-sF17pl*$@`dRsfpU`pS5mDmw5>%;H&|#-eOz6 z3rz28$NSnM5U?{UgA3yX%ECN+=p)srcl2GV4s{q<7AOx4W;QkS?xZ{7W~y>W_wpDK zG&MMu@9r)^z4UY9b0WC+?_($pj!Cub@@+!a?y_FnlxUDg&m6@HXtf92U;>LDeF;3k z4m<=Tg<*k}Vi5eJOfRs`kIznZySL}QikH4z!Vd=T z_IGew{bPulyHi07W?&##mv^od;6X#RdagCNF;~F{%V1hoJ?c?)>63N_hdn>d!@JRG zV53kB4)B3R>8#pHV0P8jGt7q9b(GO68(OO0R=5OAPOyH$rbrR5LUD5z#0AkN@G*V6 z{F5ysi0sCfYnQ@(J;qk)1+Z!!?_P(MfMu9wPr=?ALrQo3tp&!}7=Vn98qcm>>(__H38iID-ri5QlaMb+JRs)c?I>o$L9XquNzwf+gNa@Jb%okq^<#DBu-y z8ks&y48->2l=u&6QJ@w4*p*L+Q*j?o=X=QmY{D5QTUgOov zbObPxVdjUGC>b@ZfKXD$xfQ71fg7;XAk-0FhUoi^JbaIyAdoXO!0rFH6HiH&IL)}1`U*D^4#wRx#=62EbL zXhpCr0cBIMA||#-%+W_T?zeHMHO_e7Rtjs%L@_(nm&bjp`mN@e2WJ_foUsUFb{{7* z;MK;E(^7$<6Q2KTrLox!FHu9yWx0FZ)udkKD zM{eWa9Q-OpesA^ZN1v^3W(02+HDO2XWOp!d;R-$v^FEW`0Wf?3uEoH-DxDAm3a;^l zTeEV(Gp~GjuFScZr1+x<0C~_)Gi2VsFTGoRcyPa+*(txn&v)C5Qy*@oY;&9pzX^VN zz*$BIejt0z%NOEqJYP=8qqsBJ7#Vo=;mhMZ=WkaZefnuBc*u%28)P)up!wm4pXSYf zTpFR2{tSbc8SnSnz56;3`pZ1)`ziP5{oGjZu!!)$P}%7OGb^h%g73K0MNgjoxO!4r zo~`1NZfBT)CBi8Qe|bdjXDqan`sKg>cj}_n`$f1!!pZA_)#A#o6^W@GCmkf#P>ESlJtrasW1({6}HYPaez=?v7K5UahxN5p& zNR^j01LW@A4;!1!)%Q<+yZW(JgO_az5!-*OLm57(-qZA)Z;FrFs*m@IL)t3#e&gl` zh^-ltNECK4b2xWCLZGZ8=xZ%=T+c=SK7!DI*^WRlBLdql;|g9X=2)Lu&zn?8P7G9f z16~XH;1;VrhHt_*fP~cI;Y~qjEHw%2w4&ez1gacH)~yC4!y&_ja&KAb{hM~B?6BbM z02JIxUv#p&QjT%Rnh0cI-M`;;aGe%P9NWU%btg?ZV9X zyG)i5%3vJ6wIb#{#cT%G!WA&!?kASmQ;sXwzE8*T%3CYQ0K=#H16n`@0ZTu{vQjF<0o;No+$Ge?|SjLOH^<^0RtWozu7aJ^iRE-XcIW>Pq#u_p9Sd zP)&W>G_*Z4wv;+Ft=s^tHth~<3m%L)50n)O_c1{N(0h{_CSa>|;L-o7y*iw}Ww+zZ z2N-VrbWYVXs{Zz6&Svk<2-)Tq`W(e_&X>^s93tie3=U1{(1GjIkAJPHCmexGt_-u42+c&ANGa-MDk z|0pjh59rGdXY?T--t4xk{MniCj{49)W6jCo^4y76>Ic`L&hy~9GEintR)13gdO^$(AsNOQo{{*nU4_-wBB!o^lfl9g&948oe=^i_re zBWf+h>{U4mZ@lPaq;P@P+!V?YgE=aP>v{PkZ{I8Q;?ZvVQO;Xd&x`m?$9NV{Opa-T z99lL!gj&X2V>GK5!ENBsmidJ~hHv9VaNpnI50z5}KjbVNI)aDVTSB4ge>)n74(cMK z?OFwmuT`)~IaOI3I@asw$dOvEl~F3dfC8>~3vp#Ud`Ang@4v5j#@x$OsEqg2w}hL* z@4}P1J@+G&rI+=``}(D2eb8=|DLBp}9?tm=?#khL)BlDKes1Rxy>5I{3K{M;BJtvn z<0;zWfyG~jwt|hWV&zdQCprHF(OcytA2<$cq?T%b0c_rrzk4?$?p@ws4#TP+nfx}| zS9eGr9N4Y;;Ji%)tJ^(Kak{Ckyv1({yNSPSZr4We93FK{?m^E=Wp+SAS823|lkJ~h`SC~@v+T`IpJ$)P63jyj#-kr)dwFVbAN-?vSqAUBz zB4cYKy$KBFJZjiaIVQhEM>r)*061m*UAu78j+&ntW*zx{Gv%McEGOr2#x-Z_J5Jt= zgm>Zd6{WfP@SvT&!trb{z+V(|Hd{Qv7)nUxlYa4;PKC% z|CkPFMWMFR8+V$cw_DL!!e(C0?%vm>a=Mw4Zr*S}NbmEYRWz$?cMIdW-+=`?WfySz zBxM=VfA!U`^OSciabsbl*#{pyXjgIL5*|)6VyrAo+OFzz0M4TiI@zqUa7CYz3;9G^ zvv@E*W)(*#9R#u8DPImhp(9zf+HFif|Mc;!TI@8Qc#~NomE2()Y0qMaXp-zPAFL{v zF;BmHv-;|{Uk}}WTBd@RtzvM}zt6DROo6wG@&|wNvkctZRqyEbVb=0+Pz-= z-@Sb`1`l1}&5PF=4{KKg0SjrjI>N+U!VrhHPQQ7?pFlx73dqUBiPF`1n56m8Y< z2q$IMh>a3R_*Qm^sd5umlox@(M5vH~3}=99hk&;8Y2w^4mXh$UIt^j=7{tgY-9m`x zxpMnAI21^_6|TO7>LKRkn_zE8PCVCrWgT*0yAsKDxPv45_m&(&@vo`#q%}{ zxzBdPH3B6Ore$J}1f_1Oi?M)VGh8U1x>Q~Dmms1bDc84wgU4D~kea4M z$2`If7-xl}I?h_&fk!Zk8Q}P@F8ScJtJ`HDCP-fZTmwgAV_1tTke8Web6U;A1_?E@5$5yG4lQ`VP*-y z>L5tinecW6#}zIpuz2RR6&sp zy4ns`;d1JAqta-^62Z`GcZf7~j3BSinD2N;x{DU4oc_%F)p_-yze`^iJl9lr&}9l< z^#K{I`sc4P$e|nFs-1;1^?TpcQn`d|U+X(%wb1Ri$I_ zPb_-RqlD>0Rdm(Ylr?>{D<2KKmquge4&&HrLwyb&h8_c&a2}k73*DwL9k+XOtMm!p z-$=PPu7lSzd3<@dmss5Q!D2@yBh;Rzfk`d(#lKN>s+=;2vf!*vMMLm%iD!S@?w5OD z=PVG<%1B19=bN$@92Rz9d46r;#7BYQSm+%JxN#g~J)EP1Rl80P@<2_)VTnR%ncES| zb|Gs^UHUKG&}s3$_CO3Q=26$eC(s5jaLKC^B%8%3-%631&_{}L{hL+Q*qxn`Jf|+Z z$d(+W4OyP@^>&@&$>>!28N4z~i~=^!>y(Zfi>60>*Aep}NEdqHoT^1K%Ul}ATKogOkdcl4j2Bts9#Z3A zJ1R~rU2$ZgFbFuq?l$v8zD;rB&;j@CMsGL#dIs)c%$ajwzj0D?NSl?gcgezqo?x(# z!D=oow2Y7b1Fu=a7d;>DU=ZdxZn)swtr?u<*^F6Ab9fdi;!!ORJx+J3r|SEOPU=H1 zO&L2U;mQ2e7G6K$DS0&|hNsY(5Aqz2Azoj0GIBW0gx{F&5*ostWpAmMqUYf~RS&pMt1zzN#E(K3rSiQS-wEFCmyBQ0eY8T8D z%Jku|7yNx-cp0YnF#1gdfk8ig+DBK6Gr4qVde%*)nJ_>h2jxt`&&3G1k^?&drr{aZEVX!ms|RK4slgW2;~-^f^VOfGag zdJ)JtLL1xn&C53(!+WFDUlW>1PR9R)fy=t^HqSWzLk1qDkPEpn#%pW$SNC^2mFsTt z_qXrNu4AE(*k=IXOA zeo{D8HO31+e*F3BpZ@6|MQbN1$x_9s4>Z3x) z)>;K-OwFEN-p<*~0}+L)I7)F8%#>a6ZtJ$pqm2mDrd1Q1Fl`z~l=XHDffx%u(PxB< zQ6u=Rp0mt({v+Lm8TAZ$Vc&vID0fiKFq#}I9|LV+E5kVSk3XPX&+34oLJ$tqXb-e#Uz<#tZ0*x zF_t+v>H~-k=xjEt%@HaTk@~qP81s!oTLIr6ib^9 zk5~kbdV*7*s%ApA41QmH$269&!PF*j+WjcH-8>2LlY2Gchfd&ll!abQxB8R+{_lUX zT3yBhpp!-_+y`k)O!@H&4oB(tH4JuD*b?i^mC9>1OpGw=$-IBrV6@gd{lZ)Z?p_Cr zc9zutuA9a z^SW>>9^qvZc5v2N0y3tXOp5RJ9jvnnHhOg`+ndr35uDfK4U}AI;Er1%MMqaBt9i+I zj)865%!~NzRF+|KlHzj}V!;U6;0%er_jA1L)Gito|GT6WY8>kCz#Xo5?HEA3{0HIO zWEby>Colw#gV}PoLEzCIx~e~8^hbljonV3YbriKen}@EHTeL2)BFT{TR-Z zE5pAMo@cGR{P?V-;nB{$J5G;kxjZH2_R+2K8y>B`{`SSh3p>sfAGv;8Y<8P}Zg!*9-D+(RCCG6b^2Iz^Xs6E$felUd+VE^E!&_T7r;mOuPJwXL ze>8+K4iD-E*>l*w2Fe6FL9h;fc>jJsMWqv@+VICI$KYW!zNJh>GsXuz%NQ=TnH%D*M|X)1wWC9H5z`ek1NVHmNEyzXn;!6{6`)roinqsOgV7D@+QAoK;V`57 z@L30x?7dFN>OUQ){RGj(#d>_`x)7Ja`fB?uZ+$`&AGt^wQv+oY4CMBH;VQe^oTi*g zjZ@acNjXM2NUdQL+RaYwwlJ`F- zA7(mkp*SzcSdbo!a{umKCzchOE~Hcc?HRb0_x#nf454qoFP8t!tbpCgSa+(I2$Uva z8+^ZAJ^u8I)q|AVokG2yXEZr5fnoBX{F}n+7#J^}JX!tr+uzK4_ga~-sTxh#S775a zrTTttKDgVeK;yOnj^@d6n~(R?%Pvx~clO#89=?Rm+km}UDA@bzelOfJ9DGt}&xeoh z=3S3^;txM|(7}V!4WZ?|+Pj;ff#2WSvx&Qn$aKGoS{XjbQ-9pOTaUgN+P$^c(au&Y zqD3_F=<)Bjck0^e!ww0!Ss2n;1_Q&E?nk-bt8S->IaP0C^}78b-?tLs6tp=vXF zr;l18pZ&h_T=)J_hhn_#)V$asroSi>6z+>Gg~|S6F7*tvVT}_eyiMcnOGumC2!uyR z=P)}Sg0a^7tCR9J9mXU^*d<)91TR5}09oCBtPG%P1Yn8p{94&AB7E~X**Fs^_x!}L zQqVB7D{tv>2w8+$%i^slU#Z4?Ns z4?|e`07nFaI5imnFg9as`7zjbff+ z=sx|BcQ>)L2%DaVodu_T0>7~xT>0EPSKqa@d6i z@szCwV5HX*zQ-|xZ8W$k1jD#e-lr8n^hUHgj4-G&G)~eS z@)@qqqJ#C6;^7JP&ymXJC!WTbboo5}tKP^u9A^+5>Q9K%;}1qrHfx z@w*A@=sOQDua&-)WGF*2q*WeIqAWNd#l~VM=lMKP_I)jh!!V#M%)G3wWq;C=DNFM8 zx;ph*L3Rq)LeJr~{-9Afv?6e;40RNGBkxq7SFKWwy|#6u=N;>rFsD%1DEqRLeI9m{>5Yy(z0u~=!{?5Wy4hx-t->DxSYD|V6`oQq|aB|Tqz_rHuv_nkZH8<|2his;>nlJUN@Gqgr zIY%M7wr-USfA98#sdL>XV@9!kZQ*Py^pi7Aa&tI>yr9BG;Kvw)UDlXq&r1H@{4fsm zmC;L*^AkFlQ*xsaw?~}_^|RwPxE_>SG}?Ug>Uo*0-b_6Ec6%2Zg)zc6+S~BaXFpBB zw)ZDQ#P62Syyl=2#u~q!cq@f}Ek$vskeaj8Wiz@Xt%dJ^32!2^%yY(>F}O%EJxP&g z{2H?xd8OeTU$X(&xZtn1GYCHZ;wNK#+|8iaD{tsmU;b^pF+SK>InH>yF~MK(Q#wAQ z>fK=*wp+PL18XaWl{TtR|#vD_6+YiM_2U z+jq-gMDOIFepiO1S8X8vxcB#REZGymL;f2uwwUpZjH@9+Ker>$DHZ=+DOXV0I+|717Xo{j1b zLOHwOoY|Z`FnN?3zi^ELqREe2X}F)VfA5~`OKsAQr`@~vsDmCJtZud%!gwhxoM#ekq)KwkB?MCUu`fHpO=0 z5%G7Oq6nD8&0+{Q*SurpfC6giX+5iX1Zq9liEz)Q)e5kgmyrm;Jtt`ekW`|v%{(qV z^`*yZ=lxd6{zpI`=l#p7e*dNkbBqaMb_)YRAUe%zL!3f_rY~``S*og=6Ux+*AqJ+t zt2lj(sax?n%9>^Fl;-La8ljF+l;J7ZtE9T~Syi5bfS`H0M|tS}CQqhHBa(cnCO*a)JF)i5mnE`r)j;V*;&hh6jpEt38)Y+GO)H0;_Ta$0WtAt%Yvt zV`Z%_5b>1Y9@G}WNJ#VsTcmpnlZDiUZU(pYWigogTkRP2j00wP34<{<5%AYVK~ukW z;GhS69;G1qGTvvGu#B+~B0#3Q3XjlQzRWx0HB)buyG2EbAyB39Co2U2K9LPwOx6eG8ct(Q35I+J=IQhfbPd&3{M&x z{Gl<>_1+k$wTJ$M7J`gYfEM76;GOx~=jngT$wn7!F8!h~>&r>uK2|Lzu778(SlA!J zZ!qXn@2IP4>K@+1$$-|QOJmn_j1o>|-N2igLA3+#&6E1_UMP<~)TZ|bmot`&A;U0p z?@AdN7^`M*-Lqv(d+wbC=T^sxo^%BAS;h%C+nF3b4p&<#EpviQ?W*f6g@+&=rLJ>-GJPRhquy(9+OuF7#B}lNfA_5L*GX~A-5MEt*vt0Uq*8H$p)GFs; zf^7+bq!_@NQANj#A}w}y>S^8{=5;m?&<0Ptj2Tes)He7=FSGfyCQo}m3{Lyrw}RlL zGKRAi4{2CWs!JvlMpL`PRu@h)iWnR(I=r)T=v?J^<3+MUKZf3VZ)hk0keu{_LIfz| zdk^n-3~EW*XC>JRP%8o{O&cA$`D+H9l-~Xe3RO-OHtBnNeaTd*A{oOBCKmWy-g%#wes!v!X4!$@MjB!1OliKa$ zC_AAm-1a3r%=q+-!fWq@Ymt}6ZNZuE^SSa2tOKZoESNDZZU=>3zmwnP8 z1Bsw7+A;P#^LOvwZH23iuptHyFs4oeeV+IIApGy=N$#|T>dNSwtboyF z;e5+x(r^Q`e((ih$oc{nAkXS<1%ywzKYXG4Rs~#WONayZI@*+CGycM1th2YFX^prh5(0b*oYw6@cp0)4bvCu|H<;`R?xluxQ} zGx=kt+(@ZS%P-4N2YO@(@H7jNwPMGR+$p@roL}%iSd8}_rE3*BE$lDEkB+mR!J1=g zUF=5&_j*TE+u(k^Fe;luKd8>%e)X%>jppS^%>C{EN7bEk+qNW&U0-MS`}F;}{k>K7 z>LDZt$qDGxmP`D zp1bc_YtEdRk&%(nWn^S4f49T??Y!b2QqFmch4`2!Hm#o5h7JGg3*BjJU*A%MIXmqI zKodfr_fo#?_2W~&*Fe76`zdvM{yp11x zNV$KX+<4U-|N7B`9`#QBrmM-~71h-DoWu1M?055+-@bc$WDVU$yI+0t_3E3?zDilX z)!2(vssA~xtTeyMQ26nu?@Jqdv+>Un)torZxxL#s?(M!^eg5f}(_fo=55g}w`L6tQhOwFP`u1j|0KC)^zYu=UH#dA_>Y?}B^@|we$*$t7O~4v zCieg9uRoiVuV@_pw{sRf_~g@>R~$1RqHzYLLy0arFyz4}pRfMxAO1(H$1gw33Oc!m zZudJp;Z9BtV{|3G&s;!s)0Vs>oOCnNi-oegX3-*Y#_d+&JQxl} zc9OSWz|P)HO1uqa2@}Dd4K$~diNiak?8MSK5{*zuSTP7@ax|%`slPHY+-oyd1b@(p z6{oD`yf~*Vlb$-Z&+--+s-7ot7<0ph0rD6bd~BA0U99@pcLPTV2XSx44ZRzDgdNWY zg?cCJ9>d)!_b=u#OxkP-W-m~%EPE4L6HYOYF!0`su?mG*1gux8bK$zRx0L4z8pB{1 zH3FyPEDDHF37Iskx}0YWC_BieluKXR+fWmX!>{AT#f5{ic}Mdu#%>X z6|=-<8a-EKD2KsJ!h%CeE=(YiGlHC_Lp_++S{aJUf{XD)c^e*m`M-Wt6`G-J5mqLh z@d)Kp&k%L(Rjg;m%R3ljeTw$v_g5KZy%%(E1mmJy_B%r7Z-!e-=sh)z{au#t1$Rbh zhcozd8J~sjz=r;ovDKPA7hnl?fiTAJ~!a{ktE6F`o{Mk~sW+y_&wPg(Ex z&jzYx3>W&E@0D_)&4HsA;_+ixtmzHT=hCg^+tA|Rueyx|IK7YN6^9e`g9Q}o@;Ub^ zgAQotv#Qg(o{tgZS#&p{JQacZfwS(Y9}>p97w!iG)nn|(xEc7Cu{H#Ff>nS9ZR)6o z-a%W84X`?~E*QgYynUWwXvW=)U0)OC7krdW^eNugewujf7%T3H+AQIh*E2GJ;T5IJMiP4F4t?%`cyY|eOB zqbw ze;)G8weip^vz`w#SI}=YBfz2E`YBxWFr@~sg=e@YFJ2cGA%x(1ijI{I9z=ZiMls(` zU5kB(kGz+~>~1m8nlE?CuCrHK$1gr#vI>a{esurl>ev71i`DCwH}fc7TYd7?FIG3d z{;~i_;1|JUlPXKR7gDKCT|6z%a<16`;%#suMi5f7k?^SnTH6 z8D%eLToYsAZ@nvbxcKZbE{F8I?@K({TxcvS27LcDruAvy3YNxaaTuBJjl~q`dtVrn z-nY8emyDi8*|`77gT`bzDmBJ4C7W{PmmS#rMmEvllFHn~!mX z=nr13E-=0h^Tcz|4W8Hh$n(bK+BN*MA&Fo5ydM0U?Qb|M6{tA-gEHXk#h*MpK7G}Y zM7!~tINRX$_SHgq+CVG>aWnlxga?D3`R)N~X(@b+>bB zY;3j{pcM)$;T$Mu8UJYaT8@p7oy?1dIeDTZ=M>y8v=Q9j|MBIcX;cXe8-}I*ujq{9)|+C_Oeh;52r`VtV#L;^!YkfsC*k1mu12z#Sy|PU zfH*JccEm#O5)ur9?aF+3%X*RdB9*cU>QWDZOrUPHT(#Zb)nWo!EmxX(md>{JyPr|O zTj|_^!!}~Qd(QGo05kv;7wuvG1fmH=Fj0zS1VWHDF5&{?B~0wFj(FM;`Y6}I4pEr& zPRiV#4Pec>;ol7{g~>ZY4|q+6QR@IAYVdR(vr!+Sf(zbZ3>!0?Jim?HSxZokLv^0^ zPKUYJnCv;CpRxo(jD(UU^akNr5@rb>6^BdN=549hwMCIJXw_MHmY{dkH3BlMP)=rv zIw2!gQOq%hBqRZ0kFkQA_FYzq#q$*K;V#f4n&7F9Dh;mg^?f{Y)jjW4XguPcVuvI!ofgI#>9z99p6Nt4&jgK@)8d;4?OW2w)1rTA4Jqix;bwhO=u> zo#DFQ{!B)7E*X7r|3v{r^ymnVj5}pc*Fd1n*+f-w9GA1$I8+%Dzbw}))jtZf2Ynxp zdu{i1E^wpCKfzHQ=r1aY8NzE5xi-i1TRG_Om)gc7^D!J)@}CAn3s})suP=30{|Lar znO>v+%RZxz9_qi9D&shK2UBJ0ZU4HaIE{SA^HZO|Fq9iZjR%gF+s)oo7C%aMR-o~y-{}9Szuld9&~e`r zB$Q{0nX;4|%F77Z+BVRXf9NL^woYU+xjw?>PfBz6>S-<3w{QfY$QlU22VcQm`wn5$ zJ{}@`LQX=82!3TgahzC+=gq#Fa9KR7IU$-0n+ed_09>EZzOglrY~(hk_BhOjMnv3m zIQfvXhhSy!QDn_6%Ee|6KL)eIZtNyw$gXJ|tqhHHe+h}J!rno1$E8b!Q?Sk|u6|bc zgzkhZ!d4OPo&HR|mIC~jS4!7xat6N{>75(+q)n~u>r_=Wr&z`St-~X`s z^*{JkGHADrm(N$%fALuw*Msn}xBBC^zhC{!fAKFHDyN&3yZr70wrc%XAvBwx!-{`%$WO-7ga>xiPc)7&Tn z3z+mD8LM@8ZTQ6BL*dk~vte|UVw?^{=n>Cj{H-r^ex8xPi~*>7zjx=r(E2K+BV+A# z8Dw~NA3gez=xsCY%{;p=GqPL7YY6IBZ8X@TAGY>eu^KEIqc$ivU#e!rBN@Uo}H z26Ab6wILlm1KP&k3CFB`WsdNJ;DBRvh!0FCW`hI2BZvDxT_{MyZ(&Ep^W)1xh2R+f zo>j+moc#=OwBN0wJA_$nmy+{UdjK4Kl@}-TnPQIr-6%GG_+&8MyWU$wgWI4}N_Yc4E`&O5+B{rchaBql<^Fvco62;QPX6T09}5wB(h5;47KLuo%jjYg zF%v%Qe}bo*83~NWvy^{2hwPeQIqN)#C#|%Q{fySLl>g@$S);QxPrv&7%OY%+FfDtu z-oI}rdlDU8d}woao5vf%n1)@@8)DzMB~8e*B1qYq)x1aqUefI8^_q%1cQ3&Fsl#b|kQAC@lA6`1|J5NTd z4yW&9gz-qr7(=L-T!78;KTnD7bdO>*pYWKOH!sE{g9W2p7Sxfc9JnfD*I(HW4tm`n@ht8Aa7I!nN;{4lmd! zFO>}cguGb5E&~v5W^x86i%-Z6CU3INRoz6!qI4H}kig)5q6@+XQ?~j-m^H$S z;-a!Ho+nD$a@Cq@t7{aV>4UKZmHP>}CQ*<~xlw%jKHj75nj|AkwNXW5WHretE9w|y zCfd+v&nYV2fjdkPJddkl-Ito^?h(K-P1tzR;Ln-M2${4_^7{C>cs_4G*LI(0Z zM)D%>NqDAcGZ~%c+SBO|dPAcjWa9S^6T-^@VR>X~YjHf3|2j_@SPtHQTs=>r+PI&= zn^3eo&ai1mXf&u^hGqD+8dmu$WfIcvdhgBjEf_}$MeD{Bjnzkg)7PLGewKq`QR>jy z(*MA&UqRaqfBHLog|LVZ@BmsG$7GK(@;o!yHrjYR%9;5Yj*Tx^roGzbJz6$=wGlR+ zcbt93#(6fjvn@N_>!K#EuEuudwdKG0yw?$ zKADiW>{{>abjtnxU*+X5FFPaX<#QQKzAbfOvD7I-XZ1&fO@6u6kH3As`rY4syZZUZ z?^9Yz;uqV{>4yKy&l&=oAlTBrMJ?h8PiTe2|#^2^9R1 zN8Lk`Q#CmIn^p$S<$D;fRlp5^Q{J5l&wKha{3%13H-l2T{}TrJcBui7ir!aTlREd0ah?4qnwH%d<_&i_RU+m*_1 zY`1A0-p3J841FOQ+im7OX~Xbo#)!kWu2tXF#{6a>B|p9Vc|z>)MJdI1d)~}lod@G_ zx()gi26Gdf>10+iTn%;YWVlL95)F{iR_F&mVNsG7duC>(EplUE3tyU*)b3AKpSnG~ zGP%Fnu?idf4Vi3OR;E0oT{#*3L9114NUJV%fY)WhxgmrodN?{gm{p*oQuKNb7U43^ z4cKYrV>g2a-3cFLu;6_+%4LtHg>TJi#2H`YnSR|YqmS{tQQtT~pB3IGhdv$XO|s%a zb&~~*MA>-oBZk=K$FEiozxrZz=b{+;?xFt)ovyu;+IjoxaR(W;Um|=Q^xkgzkdoVH&+c|H79+ETjtM)bau;V^Rmy)>C+>!g<*x5@!uFP}Y|F?gQ=v-Rp} zn}k0r#OQuB)Cy>Fs16pQ_V?juLYLmYd)>S{jqXaLTAx} zN^A%h{yH9IICLfv_gY5e^iD84X<_|@6tfn5;e6IK^V&LQ!G&aVDlYt};@Rw%B)AS8gOyBaB<&Cf%y$xKMS?TbEQc zT4F#|UDTb6CJJ!+(YRnj>sEn=xbRG*`saN$&Frd9^$TEjY~boVN^7QpnL9)9T_9D6 zWbW}L9Com9X@s^YkY>1yACq&5YpIQ<%c8O3q@+9vxwJ<*%vHVXudK6xxr8eYVf=9l zS|o9-FU>rCr3y)McXHC! z5=q9L&*fdQ>hp!mws*R(ZTHcqHFsmW)UtF>T}ZusoR8*~7@FN`8+*}cOw0T-*STd~ z=IC?=0G_t>y$P0MKB-PuAl* zsEWtJ2?>UQa|MgXQpSB!?5I>1TjmA42ZsI+ZS^@8Y*JesQ?seR+O#}u;=u>lW&8NHJft#{v0-o*KW`FE z-)mrKxzFxYZmHW+y5B5HeCBCEnB{(wislG>6IZh_bE+F7ZNnekb&vxL1_ST}&Th;-ch|g$MGE(xDd+2KTC2@DM4}vy zZ`*is`exDi@mAeK+vbRAEU?>numWzi6a4y}wCvK(vTj<{I6K_Tnkc@c)4@VSx=exaJ>N+`qA$UtbUw$yV3U9ywjUakla@qw zvQ~~Z_Tod~D+I$^@fNGA@m`+5o$iS_*=_Rxxh~!WYQ)yz;p=(O%$w#A~#B%M0T0C>f<#K zbL51uXU{uiAvhnDYg4Syhfe#uBjr}(;~4JYJHcCLhQ41FFC=D7Tr@dy{o2jhP+%;1 zm!!G;?>6s^m${f9WxqABKEYJ4ynaeC3G4|rUHn-bU z@gi@|R+h%YmL^&fxsr@JZ6g3&E}Jr14v2`&0k|6rxA-WrRfol~3B%yUv_aH6V(mWp zBr7sHXU&o=VQFN;^hxzu$7z)=nL><||$RYeqj;5@HlIGtF

U>^Z@(BzvVE-wiXQ6XAKpKa}(ulDa^j4_@9z4mVlpvCO*hX{`w z>9}>DCfBNUHp>O#gkHXT6~iO!3EIY}L2^w{VBZCu%>WXvl!cijfn8a_VGx7CL|a`k zA`_xA7!V<=bC?SxE2961auGm_3!d_!UM98sh-?J+5D(*k;)$76fhuc{OKr5)OQA}R zaf86)ddWbWP^HdMtTE>5>2@%yCr2T9}yP(OUFlJSpTR-@@#TbI*DXM)22@zIGq^)rX;g`99T0Cq!|5 z7~I9IqT`7_@^)44#GIYw4qs3r4o4p;9$|ULi@+T^@=kwokU&~+Qq2{j3^o~~Mg7z# zcs7=^dkrmxBk$-bSUD~QW4flFwI?Scp!cr*8&^G-OgL7PKH4~-UP{-%1lRq8>uJCH;Dgdo^q&O^fmJd{`8~%5_sH? zV;}~|7y|%;iwfa}u7$tAaKGa{l<5gn4vCJF_I&097q=uYF}>P zghc}CWBGw+$9*_3F2VGJWAk*)UxnNCatS1TM{@>1hA@rW(cp4P|xg3Zg2vfw0|lQIrnhNe1) zDuN8n!AJe!ej_)AA2v^?Juo$vYnQGKQ(j%-u2i(Z`2z&fM2l;mW}V;zTWSYX1hRJPWkKKe7pLqzbdxkbq=KB175s5NO%{Y_3_>6#oJuI z(w$UCvtyL&Wm#$&b90|kZ253aJHzWkzcFx+LNwR(p>gjg9%+sb9}OR74~5b-GAz8F z3y!N*xwOq!vdZSHF~a-3C~#F2O^9Dmha82*X*Thh8)SBZX^*R~qSH=D-i+t%Ctu&E zoDDDS83)}gIRtYPYv_w&G0UvAF{em1W;0*wv%aG*b6WfQD$SR9_Ne2NKW3QJjmBqp zQl~ur;fLN0hN`ES_%S>;MaAX*Z1SicJZAV1SZZnd3`g}%JFf9Lg*!h-M)YOoVcwuI z%%f$gRj#+pqu+1Qca#gRn=KmGK>91!vO*AH@&=jQF8 zA{&P&Y5RK_0s9$Ig0aKdz!^K+EnCUVTgzN%CWK#0nY$C6(Qyvs&1_w1KbP@V@ljV& zzMNusIqaP=kqoo4qkez#ls zJ2^o1qD6}2aRv`LF!4Bz1zuD~hgRn#ukgbGaTJ}P-&cJ)-uNsf_+3kAA1^W-8wbJW zoTn7sJGqdrrWng5$sshy;I=Wy4tlXI4^mVY{uy;-KubGx{qBSGwhWwj$lLm$uE=8e zwPSdVAN)^3`{bWM{QK=(72CXVsT0`*^E5^I55N2Cf$gB2nc{@xCbg-F!^WnGvkZDN z!G;qleefr8+#Dqz)OmDV-qH8D&s%QZu<`Xu;3QJZ}RldyFqQeaF zG&V7VCR~+Tr!lFpEG#(@I>f~zeV!Lf<<^3Xv;$-WpKMj>m4fRG{!R*~G8jU)vjE#2 zlE-E%>ukHN_)Z7g>(hjvjR+V=%ItAFvNl_v*4)z{U$>sw^XYsRS{Fe9NxWa`k57(w zZSq=DT6=EP7b&+`%b+KzzMe~w$6{FjvyK#Mu5kH8e*KOT+R@Ab<% zt4obKS-C|aZ?{uKuLR+^SL!5_+p(q?(Xg-~{FqV!TD;cQLZuC7#x(BDrM~~*3U6R`Fm-7m62ULt9Ru0*~G^C<#4JK3*z)?nl-!!-bB# z!!v|{Femt|w%}ago=-JbKOAe9a>kQP;hvEvnBiVI6{1az^3I7V3@#t;9uZ&&Ber%7?**jqAW$^m~w=Uy6 zF+}yR*XOx9%w>#^LcY*weN(^%Hx&{O!INay*EQ?jJ->OQGCf%o}XujmuuU#+%Ke|D8{kb)C-khK)O`KSz z2xb?$vICxzVGNM&&8DI1v^vK-Ca<3$(ht!Ry5f04i(_C_&1QMr_S-qfP~u5|L}o66On-$eT1KhA?qI9?1Bfsd$U>@Y{Q3xIVcB*=@G%V)J;Hu z%Z%cT5hN>Rhhsu-We^aI@a1j7$|it-N;wUh2FvD>;hoX$;3ybBHW$Wl1#k75uf`sY z1mcV_`smRm4S-;|?;+3Vn~b9Ot;5?jcRK;>azaiuq~LcF)y=)u-tpb;WFWljPyhxZ zelAHm{%maDexQsexbymA(H7@`UNIq2ImmKn54ALK^Qu&joO8iQRpkfkl>V}_nBgEKP<-G)!6}@ zCpM+xqQ1z~F@=z1?=5vK^fmL0GLf;YPea@ITlFDxWoH9jGypc^$8g_m=j^p&KlV~| zH#54>&x@DRFg;E=&Lcd5?^ZkEWx>@P6w-Hdd8G36GdKoQQ>ym#H9Dw1R5voU-*RRz z4;TIhwttrY9` z?chC3i8?)dxVl#&_P56ct;f~QE{d1PSQoXh-3Ec}b^-5ZP<|{I2N?R7z@LFP+%j3UnPPidT*%A3(GNPZ7Q&4u`CBwqZC*`z z|IiNH;?v@BXU$=;9*=+gp-llDT#$}&vHcHMUlb@lo0DpZ1e1!2@ymHYHu-}qJeXo> z{LG!r=0P0}u{jRI&xbn0t9($<{lpyysoaw;D@*v(ema-KS3*klqMrind)F@Gy%O8o zna>cIn6#qnkUYcMJbU({K>h>9(oNJ5L>W&`F#F_-iRD2jE2Ts>P8^U{(8 zBdmXOa#`=vDZVVt&d2Y+YkUt@2c=;XJ;51f)ok>IXu{kOuV@GG%&+c!Qm)N!zWGJG z!RkWC9iP5mz5dPLrmP2dc-wE>%-O>fbA03K-px6hna%~5pMIJG-8epfTDqfRzOwK- zt?YTboO4i>(vG7QFRqH=t!TqUvSG$X4#tbLP#invskA@WuXQLuD=Jowb~=@9H@u$2 zGvrskbN7?x@3MO06t?3OdlwwchPU`88FDQewX$i0mleV5#i23Y<(j=!oSAsJm*L^J zfBT!&U;fSC22%6pA5?!UVH4LKvYxeJ3a4m%nIZ|Uf@rt)BdfksP)8Zlp8%s2FwqT7 zq#Lr(Facqn65NDJOBq>MRWoJVCEi5NW5?T)alMstOm{K(j-JC5d9$yy9DV*S0JC0r z{@=c+?M@4_WJOuSBh|k!*AUU98Dbx}f~fWhbi_niT`S}UMNs=N?}!CYH#rFKzD}P; z(e2*OjoPaK!kJ+4+8YVby;0WsHSP3#cHM@FsVf{{@Z-%2FZhjDYi$^*WB25B8U>?! z22!%%X}|uBC$#eV#!@>=$WqML3caPkQ6hW8(yPfaN?qmQpK>Sp?_R37+;Pm#)7x(*(E{8@d==Zw5!F@kQD0X|@7I;T-sy|~S zpuf3CL7-@90Dg?cyci7N`1`tNMdPxV;au7^?!g1^6&mmGUZWMqB53wuIU7wxJ;~P-|LUx z(CP`A6Y?lLLv{q78Ta%9?CWq0DIJ7v@Zd27Mj4ILPWBTX(a5_L5xC~fV{oo>!SPIv z=cf6Oci8F1Nq}FR7N+2eGd1o(Y+f}kc%|_k4=)__9uG1@L?fIWlbWelriS1g&wcbW z9@px$Ds06}=;t_7Os(oVphUEZiL>TvxD)_R31%Q4r4;Z`7x@^ESm*|pW<{&<_5Q+F z;Sk?l_#T6(QRx|as@-6m@Wa75n7mqrwMUpzj(PvylriFQ0kM*oUoFA^{kxrnRNss( z9GnGXL6v#O-qnZb5P#dvlSlc%F-N)0sDZ)yVN-)P;0Qwm_tI{#M$>p7F1nEuINhpHE;Ah+D<9+IdiFTB-5;{;py{ca?YylUWlb2 z3+?HJ;bW7UFHh(Fl+d&2?_Hq;!a*6JaBxxG>oJ9IZOG-iz9xvdd+?57417JLR4x2J zSnH3VVyRkoQ&<|2kwJp%fB4~tde;0)`5C#@Senl&GHyQcZQbSE=zrnivX9-QQ`Fl5 zXZQlt_Y0k@@2ihI7=(a^9lODpBMd1D0I*5Dy1tKlf&rBiCZVFI<1aM57a>w`$alfJ-}0?@Dh%5 z+?e4<&dla{l=?}H2j`pWjT-f1LKvj;XStbhFWuT%I- zf0Wn!&;Q|-Lc)GECt_KhIh04Vu$;X@k9>dUcw8R*`Zu|d@x(o8aTY##^gi4ahQ~<( z-s8gYjXHO3+p=!=}Yj~+eBsnd?!$B(Ng+Nd182Yt`x(7Gl3>RN`0Q?#z< zIN42UXWVaEeXTy38ukl``KeRA>~g+caQyamhCv2#j9_WAv4f-c-TUJoe;8%`xGV>+ zI+S7IzZo6zR$);)ZPJ*l@tg3j-a`j;==-f`xcBv!Up0=cYNrpmCuHebGC@kYapa@} zGpra3M~(HZjdl=M&!hy3mR{CwgHtojzZB`WwRPjx!_`-R_GhioHurOk{MBFnMF$1E zD)_33Q_hqwp*TW(V%h9 z>zi$TEz78F>;y0dhgeTOM0C%}3D^0?G`Lq!T1t6Q=igmq`Ib*9#?ok4IzwUjSM|mi zcnueYw!w;UDG8W|$u*~}s6RviozU<_c^0t~I}qvI3&FhgJcN5qGM1%uLqk|=p)hqU z%VvZOXqEw~iouPcp}=R@ghwEc72R*kVA5`k2Om-Qs&<#uJq>d(dK)uUuStkdd1~f< zeKKi=8TLtlSYB6F+uo<_@NSNGr~VN<{-8_W*B)$wPu<#}d@M>#-Z}!O^5B}!-qR*f z3AsS1Zoii2CLi2Vbk>(LlcESb)irnye?pZAZuL#u-~(@XcXfNNp|if3Sb;H@{!Ki- zU*PGFKnSSm_ZlxA!w*Y+_4Q%@z6bdx`?`(br(RRzYk7?bgU2ljqqJ)S*bC8de)2cTq$&`OK@xMs?R(zZu;z*JvNjdH;n1BlD8;E zPJZFNT|8L9Q#oxD{Fad?e64%v+&)0wGoT2H`?Ky03quj8%SkMt$e;(T?MVy^AJ#5lRxec}?8wRv%Ljcnzji4VP#7 z;u=FMM&q9OHoUHi2y)kqPi<;1@W*&>mQSB3%kX5*_3C_LLrZN741pCO@!9IbZ?rS! zMPEkoZvsL2xTPb;^IiWZw*Mt!K@Q3e;+kTSb zPu|uoco=-3uKpn?^;99gIp6y|U*@xK^#fc1;fIH#Z>;b8^j9IT_hgtF&rZVbdNAK=<;5NsSsNVg zVYT5=u-|YJRBhrXcnx$`?Tj%#Cx7Uzl;9f~4Z>rNWAskZI<76@yyDt#w}bY2bv%6Z zWpH&0VB<~(;}IMd;F=Y(LZFV@G5O)HYgcX&wz}&h~Sf@n(WPwyi za3=2leJiQUKBwrTdQgRDz8gy`Gx+h>U;T3RFi$?3d;R+L>UY2UyF#*F)p&bZ!k77F zclI3nUU_l&-HsL(e!&y}{U5$vJ2f|yICY#yJD7FVvRv>bPQvWY(oh0H90XNHZlS}m)cCO zD{Z_xKZscshj{q$s3naCIRYBbN1YfpzH>STz{iM;GI=$Gr3N-&5f~=@zH^*kXK>VS z%;v-E+I*W34cUuU7hw|&msCCiug)!}VKwx9_o(WcfbyzZf(yer@(NZDe9SSL z0XtUxB||<=SjR<15L2QleTzV9vM3w8Cb~^cgX3^L3^@h}1~P7x^ZL$vG|E3XW|Fb$ zt<>=5+24_HZquH6C{A$eYU=hsc<5PQ7e&Na#0dK{t445{z!kv6Fg;yZwD|?Eehy5H z>qa3`VXHffA~!24Xkl<3-gywzNU{N-!Z*T@Hl!-+k z+7GgXQFI@O)u~Uezc<|aK26r9dNIw47)$qsy{Q{y{^s2Z1mbe_eZK0?(2nt1KT@tb z8=rM?Pp0k^8D3*;UYXs#%S#l5fnz-A-etgsi^^4cVA25E*rW&;{rSSxKzYc;?ozuq4u zSRKK9dNmvb!^D3#MvP=I!Uut1c^j`l`(j!3=if69@*WvuVHlhule>4?Xg(TqI51}* zH{{LA?j=4U9JnAJ<*+fbd0i+E+=2xjJa>5}mi@iG3&trtEk_J5yx{C4MXY$?D>1d1 z|FzGvH?#oby>COEv}MKur8Dkg$o*+oS!0|Kj>k>=2&eu?Rw306{J|wUpv=EasGeN> zFnK$VMm7G43SIrg=-ZQezVQUielO|I!0 zP3a29C7m6W+85q zJfJq=v4I$4B&dQzvyEF&+4y@S9;V1Dct49b0+VMIZNu$sP)0LdV%T@;=aU~E zkBt8ES6}9xZsolY=bwK6!;BF+f?r`H;@6MD5xCC6>ErLdZ=>ne)hAzHD@>x4Ghr#TmK{#+w#EstDE@A)2caPg*i|(SV z>fDI0?*-3ADVoRtks98m@GnNwZpKdgBHGh{mQPxNv!Z&h)rOEg#+6*P*HWrcJvtTY zWB&p&&7}~I?*(7RoOaXwg!_mxI4PmU^E~#i8`BCtCjuO5Bz z`RZDGX_{1>y67;E46lqjAy_S+6f;}!{z1nknsTz+WUMWdvSWKc7Q?ig5C%_KI?wC!}}9`oDSF zK?4p4XwV%LK=4tBEw^L#t~sqKri;m8{)mqfGzS$##CD+*ER+EZ6HFKk&??p+LCA|` z(~$cK)hq{sN351|HX2GsEjeyHzriS;v`(G$$HO_h=>tg4RE85}XB5{V+(>|uP<@MX z*%Yb{f=rBTz{G@OcoCmLb_quqQBd6s4aSM(F@gc3Sp-!k!>mFq<1-1pHJz{|`3vOH-6! z?+!IYCl@hLAqOW(2`hl+!69QC1SkGNVO<;Tux(Q_=MsmJKZs^<7&CK4yt`G38Gw3e z6ScnL8OF?IE*-V1(P`2tJz~3sA<6rgx3L_T=JgLhK22E?21MrIMbn|_+sv9^HRm^? ztNu6j>gN~=JzK)a;zZFOo_srn;r8uY$#$8xHd;|Bw`zZqOiY*tC>uVwFyY65n_BC& zc8p!m@L(&c=Lu1AeCARw!oPk^$Lq|(TY@n>?@(m$j>c7Xt=X>=?T=Hjck!n8DWJ*< z@et-G+(R7~h0DS7%|~}u&ofp&rpSMc4u|RWE`7ng+Ht5+W8>t$qZF@KuYT&3(nqV! ztBg1NgS>29a&(>)N_1FW;)4&J>Qwj-WqZH9NWAgb)L|T)Xjxv>Yr0zg;3iAcW{(5w5FavjXwH4&W+lW!A4HbJNG|LiM_qh zT4Q$erhLm_IBSo?-Nw(9Y`#WY4*Zx9AIg7Y3*T~H{_zig%qS?NC))V>>tDv3@`}gT zuGeRmu(Bpd@Sz!U7Y`3}(cy8MYB_E2e@1UN zTAlhTcxBy*>DEePAHI(|EJV2H)lOrRuhhn6O1E*jUk+R0(WmWOvHw9PxfkL10I#a9 zjd;~3DcV-#tQH6tLreeZKmAW~^fmv2SE#8dk&U8;L_ILXcQ2zntVX-&bdpj?MuLsH zgg|ZY6^>LJ6Ly-ck)iAL`;JE@Ekv`G(l0;#a`pX>f1EzvY99akSHCWV zajyuU4rJ)JyrA}~3e!4)i^g#4=B-3^2zO(96O*~R2%iue8*8kx76Tr8iaZ;eCVrhW zAOt^83jw&E)Ch!?yU~W=9(6y<@}d-Z9X)(f{U+AoyV95)G@&E7lRW+zmKg=x4M@s) z!i~}@U6M=}n1RW=2!9OD{E100|1eVub%=K4C3q<91PplNNTX1%2@umZ>HIdy!8#tN zn2t1sG71q=7}t0VOTJ9-*bq0&m)zAcSY(We9#z}4Yrc+hqc9vcCTke9 zpdNUd7y6DUXLR76_c59=M?=%pGs=^A_--xA96G7$OS+yPjj_2&!Pl>A(f-68pS_wC zbMrlX)v4-8QH?>{R7{aBy-0H>1+kM0El+Eh!Z)xEOj>|fb31OB$Gd_H9rqm1Gbcvj z*10JSwP6bG)X_crc+4lt;waTQS}v*w9(H2>=P74a+D7W8oSU~ijNW%0waPI(?XuoU zy`0g&Q_t(}(#2^n9B6f|WX~9dXRCp-ia8<}pIoxjc(Z!qaZ`WK)nOCx7_kX7ds}dS zUcz3R^2#nT;|vUQ$sbx_yf@~-$R*EACT z>KkXm%%LfSFQbcv#_&g}{~Qx4OUuRnqQrK#Q$|i+K>RMgy|dFep}E?!0&`d=_M9$O z$f@{wr=4v?KUPr3`;Kn)xb}^MHfJ@YIiz2_w{Wyo8z4PO5k=$nt>86|23?PTj9|)3 z3E0b{gqd6LCY6@`gSe>oHF>0dk5~| zIT_wO60#z($x%2=LzV11u zHvXaSXVt&aCS>rVNg-N7&xCPpY&YM7*|Xi83J!bV2)$nj%}uA2RqnWrs9PBkoUk@} zepGMsLvnSKWX1n)XQYrp$&^>E zP&|43J6HH{-zSiI{rE?YlNAJFErK z3)NwqS$P;|Dol{^@kbE|M}-bC;AAwq-gq50N7Bt_uDmbq-^pn78jg$8o>81XWQ=hL z!!NnMl>&a$Z}e*=^y#yoC$rAA=I&njlggFT;r88!{pF3vPr~)P6zVsxJC-`;dMo2d zcoQ1N6*)z%wvLh434tFoG7ik^c>hLG9Zxf?=zH%w1@2L&kfBjgA=iufznv0o?mvD0 zc=f7{xW;2vqbhS@gLyjey_9o%4D214zD08!x188xsOF)6{^D6otPr-w`r*S*X3lZK z+r)ow`_XFe;jddwXf-IqCYLZ8Hn{`%hJat&-cdd zm`Y2Hr{dpAuh@p&&5lXLKp=>Mc~WXSOy2~KR-4$(vIN=2ehO>p?`Drc-18!(zyF62 z>@sgxsknAF%k$l2XPGX7D$DO}-nus}^Pc9RYXu-`uRH|^lTvpB7IE#SOgp|$ZL{MY zqi2+)ygPPnl->vkj1(~i4c2&%b<&FwtBY{tod&wTEp5dWVzh*j_f;_pb$!)7#*BfF z2c^mhD9f#sX74Ya!DYFAvz^fwwQ1#G6oN` zpU@UQwPx6}p{)1SS8Yoln*_>&gCelhRR8L8x9G&fl#~I(c+<;1G90{toU9lT>cFc@ z{i&ydL)1E770b98cun`r7lJab7L%?o;YEKy!DILLiePXO9G-=nL4Gh})Y>#AVD`O& zy}c-+H86GPyGM%>vv_2+vfyL^NBvh4s=|$ksL=MOc6h1O>7sPHrmy-CPO3c|EW#g7 zDTqsZ%2SfTjRy1s9A2wT^fSZYdv#K7#sgwp`Zvq5mC?TEFt+q140Xf45o0n2Veewz zluj4iE#tAkSnJi@|G^S)fm5B7lf2Yk0MwuQsDJQ(iQDN+PlKb*1owjL@$}BfP6hq1 zzE@^FZWXOKTr4=7Hmb{t)vVG*Gt;Zfa)T#jLp6M=dWKt@DFKVJ*ZttG+Q!J3P%3AyUf;`h2VeN%z*W=TuP5`*tsYIN)SUL! z4}F}l$-{Wx)fjR)$0<+hk!i=pBTSjdZpoQoRjKi+55{9?-WM(O$>U6*4(_@)SGbyU zHT~lC+bwM9 zw7wqNr)S)IKD5%@y{I1*y*$0C)16XKYA(g`=fYTC&)n5Pp8u)}b^xSM)lcaYM;`W^ z5jA|ba^s-VM*qfG9^O$~wNvxG&bb%8fU!#DH3uU_Yp+i!t*tr?&4vj)7rzn;hA$nr zIzj=(fA(7eq|9oYBTp!Z+~^WBFz$F%;hMqo@L}0(ZnnY@{V-q;3i06~eqP-Eolf_< zURcbPla#ZZ2>8V6b)n1gkp0H!dTEKRUJ8vqN(noETi8g7tghMMdgXP>Z#oLvMN{}O zTA(A`C*KgGxrC|C=do`3C0tm{N8vBr#a&ERF)cm(qXKIKi8}2Xj1n z`!5^uy#I&IwS&nLb5zLA$?>b2YQt%~LXw16DW30AzVW#evLyxFF}>FdSAm~5ogVe( z>F;J7uZ6!Gtq7FBao`ngu$^Lilq{6LADu7~@teJ5%U=5puH|{3jpK!R+jMKSg{Rp$ z3O9p0qHOO*`!x_z1mLJ;^6LeKNIi|kPMOA}26ZIx>sI=nX0TWt{N|UxUfqr-z74-u zGXNL~cglcc1Mzp?eOCz0hY7*Cf6G}F+q0Ts6lt2mbGSc${!^Ks+9MLp-pG*oSfT}|!;u!}8BtcP zy?^ljMMi6>jC1l>xo|!B^cN+|>70OQ=itpxtM5KM9fN42x^~;6GEoKgIbG24mrjc{g(DobQ0R`)AW*_gy}46o_Z zSXEyPHH3{f0<#G*sTtL?cuivCbx7*7((1MBhFKzFN)-A+p+YkVaOx_f;GBX|`xpX` z;dq}ar;J!ag2Zuq8083=xF4ec16C!%IxB`u_ck(!FzKddO(m>*K4&;DogF-wq+Yba zY&<%(cmUB{V=U z>N|{UvPv#n*5m#O$058LI}HTyWF-PlC^Sy4nKyk>D4OJbi>WrgJj;jW#KtryJg*uV z*OR{V5(f({MaOxLhMvN=S`XU%H=Bp!84i@<{V>mG zeYN2X?hX%&FD;F-cj1diY@?$~@g8#nOrlD)Gh}T(7heFsLyJ9IlzG*nQy&X2!~drB zc;<0gQ1;{(Z@eiAqh#q4xU4r|tSZH2#Q@~2ol@!gxR@*73QWBrEpKn80Tzq|uh~xnr9W&;@ z-yFBMWxo~l8+lBH)rct;+y3MCKQ^wxfFAIw+E^&Q&h@+hdB9|?j09irHzaD z@J`0``5aB%_yTgF#6RKg+9;A8VpW_q&n^!t9l@%IHf>0q^rg|;aecSi9cIUi&k!cB zii54a;^wcW05zZO&G_=OPs6`Cv5|sE!O2KWi8F`kDPMl`c^>K%&b;1mRVPwh4?0fz z`8?y@J}aByMJdY9_s&*#_iwiuGY3NDj?-VB6k~nZ(ZnJZyl<@0sH0t>WixNTId)u9 z6XPXo-*(Pb21`?E=o9`qj&?e(b^Gr99HM#ai}&XsIct;bNAnc1)h45l!hKDD8xP?k z#^osG@czw?#l7DB^tUS;PgjpK+745qUq1h~)vdh2;nm8=FTVU{-g8_m1My8MRo{1f z@z!~KueLsPz}s@>O7veRX2WRSbaNoYt)8_)a@dN9qjHzTD=EIw94|SA@>)u)kVW3> zqgH<2y?&bKzEGEEl;sQt)kG^i*)lSj$9K~o8Sd9V|6=MuI~03*<(_ne~9y z;eD@KHGNxu=p8Zy2`9apAww_SvJAG-T)5KG)kPaESC~iX)hJ?-@_cSYNEi*m946lc z!>}xcEz7|ypwN3YO7I{wz6dT=e)@#Wz}Yg*e2!r zYrHSh5a#Wv7nrEd9Jefjd0x%xN1Wp=?SW@F6{Qx_<9V#Y$Qn4HW5qkIMTD9PbPxS&BaFj0O)C#yiU2gn#Em^>Jwey|zD z#Y5sfB+v>+_&j^xfcJa6Cbha`kyw;+_gNl*LY5w81POT6!++vwoDjjq@TyJ$meCFA)(O^T<-E3@(4yug<`c9 z5>vjhnOOLb|Lgzo-~O}JYKaBNP-$5YJX90ib3Vl1!=&QIhBvFR3_8f_9%17vi|7dV!8EFF08ELIxF2 zbbL}e-OW~=7-CYa#y;!ff=g|I7p<#Z5prTyA{kOxxs?NiY|ETJ_U() z=nM@OreLlMl^MkfE!74OwJ|p4r|}7NG@II*A65{zgAZR^RvMOfbW3>C%-w{o3_fwk znI7Yq83Y>3`i3|G)x}{j^EUL26Q?#^6v}`dcw<1dm*b^pP{6o#t$Y z$mB)G^E2>{Qp7GgjlMXJ=5umEznjy-l*o&XJb_m>O3zx|`0EBmv!;*g>x8{s!Z8`D z;-Bb2fAA-~9ZljnJJF33qC;wiVmwTCimn?sZHC;*D_ZDD`N$c+(kB{oRa2Z~ZJD%< zgbJJl!eTb!MYTYV^q%sRnvW^^##(5{$M8#b$yU?g7GjczI@q5Vx?yaBZpP_K1`IDg z1?*2e(pXfM<4uN{n{xlB;25#zrJ`smStKYTpH06oo@}Lu>cQYOyro`>!pucth z>gxO7Jju(;fZOfFxKFg+<%D%eMN!PdAn0f2&gLIw7CuR3l<3oX!D)X$f0Y;{TDrq+- zHF)#2(Vw=gMxL9OIYYmF_HFnVw_j*Y2EnVBFU#UnXjl^^a_XJvq&DD7IGK+u>3W+y#Tj0d(swD~m1tA+QHt)MT6wseQXkK4J|9wayR6>SL`v;Jo3sDeepew(a^&YV&%npt8NhU!k132l z|L$*BZ|;7Yx4&#j(F8tbROWlwIg~ zdkP-^^fH-nnBnxh;4S46G z9Ll93t zL-3Q*k}~vP#^-EOC#}e@z2*Vuv?Lj;YybTHKMMz;tp7rA5;3kcziBr0!1t&D_r?Xe z=r&u}l1uBj^UjXn6bNN)G`iTkS_+JtSrGlhSZxfDK43HSZ?$&9WIu|?k24!j-baBw z$3)MH1v)8xfzhQ+Us#J}oA#d>HA9LWUS4j2S@0V878fRMVUbR>fxXL}!Ze57VsubD z4*uavxvZM4e&b9y&rX4KwrsOjoc^F)Ytyt7?lO74PLnRfm8(M#G*_K9Nt*Xq5Y2wG zm*&pBv9woJl_&L4jq+4K^;&K@I%}gkNl3iuZZnD2&80DY>|Na6z*zT#pV>w~R*}9g zsOjgFfuVV?I<8c|-)l`~KEoP(Em}!^Z#>`tJfu2Js{bo8)C+ggis;LXLvMpMl+8O6 z<5O`G2Gk-tSY0Q?$y+2C19(S9vI0*n;^lA(ZJ=N64m9s-Nu?ZyIfIQ zGjm|+*~9`x(KwOzmrV-u^3=f{1Ba-_HMpo9bge8)V+!`}xSDH6+ly8j2Zp10uevXv z!P{6G%Zoj2YjKI={XX}2i%aaw-z>FplT=P~q zvxEXS+VnZGcs;inWGvDdTX?_3Ay{Gzd!AD&phhcETp`!i2!f3X&q@e^>{Ff~@~ zq6>e!!@ta(W&Vk`>Br^VT0YOsv3LZ|V)oFqbp2{lYBphH#fZt%rvHW(r<}eAM}Jp( zf82VtgPu)G8rOog{%>AZd`YKN+(U5lI-#%S6e}&4TiTAx+;ZE+Y|u0Pi) zTqoX@g@uQri+a&K6xi%&P7{<><&+?rMYszXhdzw83yk`<;E#nJjlpL@!0?Hu(_F46 zGmIm7p)B5iHCOBebdUF={#7r@OVgrx2?j6WMlR;^{0(W~CA4dJpnEOtqM6Yc`aScB zH4+?_7w$fw5iLtQM+KBTO>5wyHsTdq=Bjy8d;6I`+|D%S)9RFJX}4I8du=RWiAk~b zgA=^7)dok}{mACwalwQJmFaIXWzg~}v!Asi zk7nQv4sWCp?-nB@WI*8li&xK@duXv;oR+XY-WiZWiH1N+r2NW!9fM+(8iXV zEe#2@H=b`_KZ)+r=o%yC?>g;l#;jbN_2o1x;XIAlrh^ zh!>d~Je<^;d-oqKR!ehgcH1{jyq>JCXFvWvTDQ#HX=ZmGw5-r+WiNkP{l&j6pX+Cz zbzi!nW%K33wCL>u^S4UzgFiU9S<$fJc)9K;rhwuKd`t9HSAvsybo_G&A!fSK0-NJVo=X8_Y z6MB+sb%KN&z*t32af-cuH6yHfWv0xb0_|A1a%zWf%6^0{{$zVQLb(|O%I)G_HR=B_ z46dWzh*$}KCRU8)Fr>T)$t{T-%g2LQO>_+s4lxtL2m_=DLGB3B>Rl@bC~B?nP);DT zDyEWkko)qJV8b|S#*GM{t8&>L8;jsRteu6(V_Y#EqL)##k&!by-5SW_)?lW-DzfAd zcrlae3O@!KLsUmMCLNH}CxUem>h+RZ6Jq#81*$!zei-(`tWQ!xyt|oEBxQrXx%?k< z+03mK--$@@;8Kqkt*b{lV#jSwX;r_1%nzs#e-t>=_^NE)F&f?&bB{?A}nsd3O?AkFx2 zPpQujU5I2x(KH5Y;iSNNyuf8CX_WZdbAPB8L#P^Hqi%*Cfv(*d=h}_LdatHxh;ciD zZa6i=XX#^zp677FKtYTnfWiSdF{BYvaD-4D721ck{Vhfh;f|gSzV`c=ZBP{suJ~pS%0KpGKZQ$^B7Hx%NCB|vHRQ2WA#PeqYSBc6o<;_^T1iB;mCI_c@G_N z>AMCu8os>5Ls1+?K{Xhz?nNfS=Na4uQ|~*CND1^c#%lKmXJCYr%8XJ_J!^#`BAWMS zY*fRUV19xtMmff*I;)F7)IUn@>{^Wh`%S=el?p_)2JN)M4T7;R>cEi408fBZ=wsQ# z9MLxXsGFhhC{&7?xojDz&Mhvtpt0+Z+a0b1ZXOkBqo^c?| z^>O@Foc6*xvAl zj%TN_2D=ts$sko5% zm8~vg^gg-qCZ%4&&v;TG*q z-PMIIT=P2|`URfB1$e3hFek-L*p+Sb+>@u}Tg(;Cb^Ww@oo05k0B&_WxOcNtz>2Bq zwY}3@t2f`*R=a+;ccl<(DbQw;hV?v|eek+00x3`59{t5?C!Tb><8g<$YnNg*x=eCd zoXolTOh+<`XfU`q>gr6tqhNEvdzA=F>5cKKQVzW>Aph;lXX9etE2#c@h5?g{dl$d4 zn^>m>qKmydNYSghp}U(`m)*N(T;RcRwcV?6wbk8l;JsVT2|JOu!_TM9Yy9#_u`hVA z_(!Lg4gZeE;B|)?VQ0}He&XlJxmZW%lRSC;JQsb-E@al5 z;(HjwsOY@I4-QMcq|fbOYRTeGJiwe!?@MM(>Z0P$#m{~E#a9_RrKT!o%-aU~O*`@r zS^{w(k_{5v{TlvBdm_RA!TV^v!Da9`6zgqEK;#E`c_jkH z^T`zk%}MV4T|1reeEM~B=^!KNr)Mu_sj-pHkSy-47^mwQBD?A8A~bJJ9#An~#Si5$ zde+Y7@4o*bep6ghILB+pIBtCBIQno~F53iX#5RV2NoT@Q6m`46AwqZ~uEJYxmj=X1 zWx;E;UozXR*14|aW%*cg)A0a@=x8hkTjg=sML@+MV@xler5v9WnDjP6NVn#dnq5Y@ zaYmujVH95lm^cRGjA_^6?WF{ji>Z1?eQS~hV8~hW=TStK-JMux??nvOYNcd2s%}hq zh^6n=OtfuB(k3N4CTfjG`wM&TnOY()>sP=M)HTN{S7JS27>)Y(8qC=d8YRrmY2An! z5kLZu_e|>A8paWW81HI?I0|`{6F&l)KXzt$h5CcXrJtKI`dtahdk*dqagZEj>Z?4w_2&Ez(t&kW9TJ9DxXA(n)7nbO--))Y3}KQc?Ffd; zcLx8I$E+m5wBucjkr*S--~d)p^2EAn6oyE!u0R6JOuDr}j8;v^!AG<)NI`xGQRk;W zz35rLF(`_};3(x~;I5NmX}VcmF($kzyzA>w6xiw!qfdV*_+SDKocpfqVq7hsAc51Z zU~|ExAnH*%yH7bYe}P2~&G=Vt!I$|Pb4=KJXWb?NH3Gi&r%wIu%i<9=M%7t!vjP=7 za5X4Un4;WoaRPAd0m#f%uycF7M=)Y;BI%gMhpQD)T zN8fswisxA ziFW++&Y9RIZ8tu=h>j4~4y^LhQC6JDc*?uo=iYfjueb|d)p3}d?_LHx8zzWu^bAAa zmy82_4~NFc{a)&SzBf?yFL4-tf~9BUT}McG6oxQY52ERXCW1OXW_33d^@0T6m#YW; zoVnJV!D|>0GY_jKQ)%8`X1^8I5aacp+0|`@bUkm)CwxW&=BB<@YWUa~aHUTdPxUq> zc)!>Q@fY%hjwi33%=?>ddMaKOG3kmK7Y1hB7#6(02YFPlr0`g!-E7Q8_=XEk45i0u z6#Q^#9PrTbn5rrI525`R0+J=@gVOB1@rp561u+Nkl{%m zbD1am89~>ItGUtYjT*^AhYIkJzAu(Xl6HMuJ7xaFH}pq)c&V!oG>XsfL=#qE{9S8~ z-z)`^z;@+7Mk@^PhYug+J-roA;20|Aq+OF=ef200Xz7*e%ZC!g3#N830@)?Db3Dg& z@LC?=3-m-$4!*sT(x1<&MZx7MQ`&(K^K~FqSqFJ_@rk`s!`#YQxIe3coE6QL+O^7JmcB2@9E~tWmy$;Sx}4( z{#E^B1gFQ;^}&aox?p42esa0GbR!3eap7t1%j(@l`Fu}aOx-5qdvS8rx7n)PMOjL0 zS`oYX<4@%tEnpx2fIT69n5tknBT4b7;gUVZ+@H7|2F==v-;KN->m+F&p&PF@h_V5m2IxP zZH2;SPin9Ht}mhqj(|IN?l%95i*3%~Z0Nj3gEIrLao;*S%|hP1^mI3?F<}I-q!AG? zmmmn3aNPZfIL2{Oa1zG1b3IQ+%IT;Mb@*f6Y>K>2IUK-bcEZe2OHywi*Kj+KkKZ-< z-Vnf;Oz-qpbJf?t&Rp+*2F{QETLegCYG4{*f1?!ia1cnF`i)J%J$&;$;J#5*kdX}3WfZ&UALc@y3P>_poH)()(c zjM|PE!%%gbgdwgM46+W60v4P;$KQoAdfqrrTefh+f2~d#bvp|_bjNI$WpC|I$Wya9 zJ75OZJ(B<k~i;f$7vp33v!+Kxtx9_-ZIcAORom^#4{yy$jTD7t^zVF(UH*i29|Xa?SJFGQms z^B%E?3X?y5s}8v78C)bGXY6o@hGsTT;oru`IIB-tFULMci8Zuyzq*8B;)t^;Y{qAy z;f1EA`N0jM@>m;`7vl-0Wn9*LBd5&e{F%_e3NtFB(BO_1(0w>ONg4b9sk)OT&5|X- z>z})ad$>nu7VlvSgPkzr)rekU8<||GXVF=`S zP*2-S-iHUWE&Q8<;BUgknzU^qQ1VGR_>65UI^+YTp5pKD7Y^Q-v`IcCsd2XxPsvb$ zE>@NVH#2Bw^>Son_)g|ipoOV1itc^yl&XBcwRaRjghsgr-^MGN^^3;KB52-75u~UH zMw@mBz>RYYKF-G{IbadyctBz+B5&~dIws4 z;fSEHB`+IS9G{M>zS1heD46llVO~qSfvFMZFS5nh#jED#lXmiNT@u#U3Tp4|QRQFJH)zNFvSVLxLz z+QPqiW#uX^0wQI$g5fN1yT-8+j%~bo_|tcJykBm9{p-&=O1YJm!n_z17QYQ5fp z5O~b-y>ErwWOQ8GX%!((IVb`Kuim};dB)SqxjDP48!J1_8*}Pbhm?>93wPyQm9Ze* z2ps0rFp*5psA?|QHxDGQY_24C4_l#QY(0tQZ;ZES;XNxbzx~^Pv$^;6cNx8#&1b=h z=dQ(TAG9d~KN~H)`Bw^gbCk9N9G;Yl$y~)C{P@GIBh0ZLSiPS?o8wTp&0P)INoi$B>o=w8Si`o z+@wi}dv6q>N{Az!T_nLYOS2}Do-Y9#4>qt6Xg9^S17!ABt zh@t-RO!nEcYG}YTIW_orStm~2j8@&VWEf}80EX4xB}iNsyHpI`hTIH5_1Yp~&{tXa zDLN)slU{;1fvC>}S#o~rBV;MogeD<|$>vmx1|F|{U#Gw4C90>k2)oKtEGU4ZSb15C z{hjCiU4|J&A>N?;d*07_zpCfmg$D!GWWbm(Y4BED^i|aaj2ADIRI#ykOX^K^M!D8jH>!fy550{ z>1*XivF(}i7r4;pd2Yh{mBFybiFr7(`wKCU#;~AtgdbikHEno!p+)-E3}(2<{>!1wjNI% zV4Qrf-J3BDW@RfjctNn*p#&IX!^Z(Z;TasNt0Gfx_W(%Q^AXtER`0+NEEh|?y)m#| z;L&#;pm%xWG%iI2FTXyHQJ*7%U|vtFcX%ea;5l$9cP51$E1AJeu|h-GneduHRXA&` z9LsD#SqwIwkvWZz(I1^DPJvc!EqABd+qJ;DvW^Ss^;DeRLgXO7QBd_iDNV3vTnkpwF#v7SM)N zWeP)RdH%G?{p?XE33Uk*I4VB&%0Y3|DIq82sFpHlyX+LF-M^Lsd959GlqZUZbVx6y zfH6-M3W9!c^(KW<)`Pu5L&HYnmO{Nej+8_=);{knc%TO5N4=6P)F6JIV-*^+wKM6|kTM#|w|?v%L2dsu?R0?PVbX zW7Sq}LJxY$J4+^n$8aYt-t|1;#;q`c`+0ET{95|M;1ei0-*4wbI%gq@*(h#EUafgoXV4=y6Ie1uXd$Y=6&xPLO}!}s4! zN|-M`|8(<12G^CsC&-MS9~D<^FM@P{;d~qs!SJTA9pm}ndc0K1C1HRE(a37b^;SmS zWPrVlpTxkg9iTItBuDJ=$@Q>uw={MV=JK zwHIwWy4H9k(R&w-c`XamN)euwkLX2~u4AW{t|R3P*>G}_gQIy4Z5fAXPwCxD z1|A=^S20*Vc(3rt6w{k|oQ36x{XYoSt(0tgO(x_t+8aBil??Qj0hs-5Al z>yO>b=yJW?|I&OtdQ_gz#{JFc`qd|2Z0>&eNq7&98N2i-MumCH1s~uN z%603`-F6w1=PTE)HmYXxeg_X^7cYfxD}goM(vJR>CdI_=$TPa>AionTgcJ+}@(WcS zVi*GNIs#bQ1IXin5C-wGR5&Lo{VyLTB2M-B_WLwf^A z-%N;TX&|d#!~joInKJH&vJP# zp}%N5b@hHwRA*(QS;c-)0x&{v&E#Ri=;dkKAT@#ElhrTX*R!qxEuLwh4}rm2S$so* z_H(&OWMHp<7n7VWj6mtHGQBaES_xGA6=c)q;3Bx^hhn79{VD;X;lsYclQ9H@Q5oQB zuKM*S?`ThWfi4nYaOGWPz&T+*y3o7B_ucpTf{*@`X7d@|dVj%1dF;pZ`MgJaq_ zuP1;PSmBp(HGS#xT(v(&j3U8U_vT%bPsBg(v{7H3h+h$HgWKr!Pq7)v`c*Z7GW}2n zufwM!Yqis@^S-Zf|4Scx$3#fpOdaQS#o<*LKgU0d2aG2xgOuVX*IfOS^|QQ5#-jLS zlPg?|@}{!zGn;`A8E@JbrnpuRg(P}!jJeto=j)ndU-daSUS$xN42~1NGg**5BNKRv zTXIi$Q!=3Nqz%RLQX-Bq*z3Vd$zO`($dUL`8FU!j)MJwsnf$hid{zho)Vs!?^b#j> zI48rvJy(4Po8ykr1^%<{TF)!hyTPXZ<_F_uWS=n?{0n^xZ&q5yn6EvK6V8FvR)F+*Ld}eeo>f%MYb@3d z_;|D^+As2)Qj8oQy|-t62)}LW+|EP1ofo+b7I|Ou;6|<`X~yp5&9jGp7Y)h4a25{Dky7x%@tj7~9E-Q*sOHp|gy_LVb|TFf zsKcI%N3G0VyH=P(O0@C%JkP7d;l|2wlRCK~-5nl<3m2o@suWKrYBFBx#o|RWtsniG zc|RgMaY#?cd18FT8kDDlJob(u7LH<7-6s7?G*<-o7BZ+r&!%cF}wQS z;pSJLf3-Q~wI{JF>u7B>y}^xi1i9D;IK-bs19 zk^v#%JLj4W$Q)E~CTahKnx(|=kd@J3oL!O9wUfbgm{ibpO9p<}QLlH)HEQGii&h@sh$sJM&Iyj7{Z;^uBnX85 z*eiLQUU;oN16#pjbMfVXp8X>MVw{lcR%}TIN;ZeE&94lKAAh(var>{6hui64P8WQV zV$RV1tni-?-n%i1vrY9>+6kEpiDU}dMz^{V?ItX*9Qo3EJ+p$Fxj+)ZMDn3Bi-Bfi5z{2Pa&F4w zIaAjNBC+%)JVe1sz0xn?Q=LutU>5&K(E}twI*LeKCA=iDJY~k?+_UZ4Nn4H~D_gtM ztB}T%;vSAS`BOMe=-OO46HUCn_g=59rZ&Rd z)%ylf0!!A0$)-_9G->G$9?=#K27lE*OL)+VeKRer%215dBR6tC4NOlan(cZRp&zV?j={9`<-4?MHlQGc|B9lbd*^*&ZW zH|Mth`-hekGRnY=?~69V<2;MkMk&=!zYJXy**PT#epOF9a9bmj8@5j~LG)xDAFNa1 zEu5~^zUqS1fx5RN&icM^Lg}sO)H@!M%6h)W0g8o>ULWsFu%Z)1SU3iLq$sO<6vl;! z-aVhqyZ^4F2ha058mHo-M`7*$#d0`7DP%{CY_-i~0LSigBnUmy-kH_NsaAjT5~n0h zmZA9CejpPBTZ*st}9<5mV#q*RSJVkN4p2yXxT6l(&0HBn~`HKnW@*Rni6g{8uJ9r9~0V15E ziLrLhZ)gtSS&6JZJc>q&8`tjm=aZX5KS=6IY>Z_AClnu9b%vnxRPoB6mupn}q^jX5 zM~*TS7>f7=J~$=@ciK;OrXq&hRQTb!yp~r#*zA^@Fkx8U%si_dsk?ppIEBc*v1>V; z%D(s2?R=*RHjB`IPqtAY>XYk2X#2~bZlEGmfiKWJ<6j~_+g4dO2 z_(E=u?C)(n2Un9@IiA{shj{Vu#)pMzq`)>G3I*aZC6CEM`zeh1Gm2__J~A#iX2m(U z_O>Zmo|6GfF3-5CA=ziHUrX7%eQgx|7b!mu&=AhCHaT-n;VJw_2AMSi5FbU~YxgU;{NH{pIWSdZYwDe(<1Gm8Y9m<%NCuyi71DFFWxiXq&1gOzQHb z61X20+7k`l^=nRZO4EB~R5?i5{+qx1x)mmS2b?Zee$JGmtv+8#Nqt;M%A3YL8J~SM z`+;s$@ztKmfJoL*nD)@6cK7l)lSL6KY!$W=%B+qZg-iPmZ`8I7QsmOd9Z7!B+++v| z+2SZLFEJ*e{YCpB&VuWzOgv!`CCrK2g$r$G5bYck+EUwA77tR6FLh+_rd0q&0t3sq ztWx!F6tunl|IV5^)2fy%{2;bQ0aE8K?t`?pZ2qTZiI8Vk+cD(@Gzj9;tC>&J} zm}9yOqx!U62uh!g593gX7sc1c=Hnb2>Zx!FY~e{6w^TU*!pxzt?!*X|;sTH=#)Z&mn92?oTWE!Ol~bne>-^1c{=B^jx7$dK z#>Oxn9X%+S;&|YDar;k2?T1D?Zntj_ISD%`1Xt#7`$H^J;PQ9F?b zAiQRC!c7&=@qID6#Qv;5OgF=?XY}Wj(t1kB%*uXK;(yO~8;~

    Y3^<0h~tZ3_9h zSp8zRESbqZK;ZRypBqR4KEgX;zcyrs%$}#!Z{9Ttn%G7;!?|4*CvoPU)TD{1VI+(} z(2k&rsFYmGD3f_5p*WK*UiJFal_2XKlZ=fSCbgvu6d%`*Hu_iDzV*vv@-n0Xe6c)u zO@ck|FQdQ`CFN?br9k*GVMs4J<$BAq{cZym0NNIN*u(I0URNLVl_EBh6(j*+pji4N zfDJGV?TAE!*NH7V5xOE%)CUMC1I6zvJQ=xyKUstg*u`;Wi=5B_?yS+M|K(Le8jzwhwX|D#Z&*G!h(M<)r= z@u1J%Q@8$rmG{hZ0+;}3#HVml5YA~3D|-iCUD`2;F!A->n4^v<&fwcJC4Thxp zYKMnv_@&b7x#%OvM`3Mp9{9XqCh8h5ZAuqx!1c;tQ}r6H4nOW!S#@`>zZCYtNA--? zzT1?nIgYfVa6blF?U@|3PblKn8TjY?$uNzy^L(|X`bC#9V0s2J@7IA5ig;{dhi5s>~Oz6=d}fB3EHhqw9;N0=ux{&E@H@Q;4(k5gox*I<|Ll&WzIRB*;b zc(-CaFIEOrUQz#WgZld6U|RQ;2`>CxABT?m-WWQq?-XFX8p#dK8RzgZT2wVyqF;PM zQEu)^8M#y}_Ulf5o0E<5B0p<`;V~sL1^7+b?Dj4f26Uwgx36x#{r0;Q(3I3BVxcjQ z+GIPvNOEeB0lHxJUyc<=MdMwtlZ)s*dwt0GltBA4_KE>-dMk6ttBk@mwg=yE+>2Ca zb1Ho3p1KG#iu10$Y4O*0fBbEG8J^S@rx5}}X)C+IHS=!>8AB+(lzES#_ofwQ^1yfo zHSooxn|mB@dgFHK4eNu=r4({<`lREvFO_(o$DF5D{PobGcbt|c1-6*$iuBA%L7S|j zhm~edlatzwD)k|_;} z41oFrcQ)m}%}BWa&7;zJ{-2wlTRAv87T(kRu-C@c6uX*?&y8hcy?)jop?-(gZ;kw< zyz$!FsC_>#>VBtdZA$ua_mj`MmonJqRZYx9^=$R%i|2C zHwhpoiCweO&~v=Osd$teqny6G{o&?ne0E*zdd8FEVeMzM+Rahwh1thaKgdPn9iNIp zpSg0J3S~;dN91XBqw$U6_&GA(W(Zl4>EY-PHi?s~`1g7S2xH@@eqC!bEt%?gX(wzk zLiaxUB$=9*Hz3ZEVPxny14Ec2m5VA%q`sdJ=JH@nfygZHHM}F3Ze!oaS zU!Jg{+P!^evfvnNoFZ#=YA7F%Cp&`4*u0+6$q=GrlEHK@D?K>*(6t{^P@YoA186mth;t>$^-=qoc;Z z@7HIref;@nv*)FupHY_~)I1SKmEr3BjNJ|+IPdH2Xt!g3NU+eI$>F!nJ5S^1XBipV zvghQwJpio^IC5I*R#M_P`327tvftcdh2_%q+gXb#G`wZ6U-K$tQTP8+-ZYZOHR478 zC=g>M{7pJXdHORiW|DN|@VibO+bToQaYU0s^SFHg9fiDkS?<{h0RnJ@npJ=ZIUO{+;zX^BhGg^8M&ylS6swcdhzufJm6r!gI zkq1G)^>6Zl_pYz1ps3GP+dj^6{eV>u6MbFiPEW$G_uLN?tL@&=#}Pv67*zCv65#Rj zeC|;KGEf%ZUf^yj53Z>rK&Ev6f$IVveO1r!dUY;3s=vA}zUxbMUyKLd+U>^mdFyn! z!O8T)JKCFfFX&PEp~c_B?Ky}j5{w#3`Zx|E*jk}j@x&FWu3GN>rKm2&h!V=vJiJsH ze1!(;I?b_g+_RT2@xoGUES;JxCaxB)x?VMDnN-wERz%QIsKqhk!DYzvy5&h7FIW8A zqyh5d(iNT+mwHU0JL&k#Cc2ZxW7`b|7d193c-pnJz4yA<(qs!dlTR)R7WqW6oml7i zaGV>}Gn4d`szbdqt4_i6w%q0T=ABT!-lsHBfG7t}PIC=z;kU0aK(@^gLvw5rju)MK z)`K->)_YiHXcsKA^3yB4;E0WXg#!4}ooInM2mbQ*eR@u6MksYhtz3Vq5SORQQl#o#p^Dzpe+US zOsK(1i+xTj$ZtTdeJ>}){k}I#|27prDWu>6*f(Z?#fAPUVUecRw0^M$ow&_!hvbOJZ@54{-Zf<;V zv(=BAoBh0+aPh1Yyngup(dMV`?x*kz-zWo21LksVUU8z`==`D&G+a~tot68U=Rv%T z5>AG@cR!i&^E~gb6^M5eisI~pwb7Xq;Qfp>WBAoi&o>WC<9KRSr;gDf-Y*Nx=ou-| zAv?}q@|qsubhBak#>VS-@ULoPV#}{@OmjMKe}5OP|BRSWyR*_*a*)fr;MZE zJx*`h%Lp@isdAn&&wVo~Jxs;#7>@Qr_|!tM}+x zb;bW2j8}(O$Zg|CRwrke*ej79R=lIHB& z&Pgat?ETIWIBo3z_~^mr?Z5eb^04_nd{~{Ed7*jWe%}9wPhMt(JRdLoS6}_AEIa?8 z{&GBUxDoG-)36eU70Im{?{(CW-* zpMTy8)Mj(!zT5bRe|wrfnG%bh{& z5Cxxg#04i#cjQVE*K3fJiYFH>MHr@;~~nQqN-aeqLpnPnxwP z$awJ55)2Xu8Lx7TL#ZC(RXf6t@adk-btZwKeXjf29NcDROgPF}f8@PJWIuL7>+J~a zh&5A8U{S)O1B+IvQYlhjN>jtwAYt0SzWZqMB=BE=qygDOM)z zvHX3fd36om`a2VFw`TKdc+jMC5eDw?JJ0nYCR=3_>`gB4Me&?T7{2^gequjs(RfAI%fW-fKO!pq=j>g?sp=YKx%YG>N5-#`Cc zrk^rs2&vgkxe+Tq!n|jU?Tb}OG57Gt5P+_W82k?I9GP<*?w$vm16)pec1? zJU9u?7v%wWU=h!?QD{TxI%+t)wOn7+YCO{%m27cfrMafpy7W9}6P`&?;f$KFg~92j ziSlvHH;aW+^PQvB$!15b?xy@$rLDSPTJ)PeVhNRjyL$C`!n>lmqmkFXquNT*WTa7U zw%a@_4B=!png&mEfRzCVTH`W6N4~?!8p9l2_z!Zj7h)>mI3326f&=vX-+!0Kv4e~n zgZhV$uCyWgYD$q%jWUxCbYNRqT?#$PE2y8#v6j(eoDh)4x;Z=JrJfGX{dG|~Wx_FT z>~nb4o-{}FepGtSU;X?4-sU8)wUo^V2W`x3^Xu>a_V=6r_pknD^SU_u)(L8(>Vy5T zRO3TvP$fqX@Dxu&rDom@cjgqJFtW^lw^B&INa?wI_p>o%$OkdoM~!>N0Hg0(VL2~T zUJnbyyM6oi=G#26BO}TZbs&6c;^x(Ou>$|P3`UG#j?%-@@iJWLf4l9!S(o{s z@o{?cU;gXU|8}!EKO^8(3)Xk@4x-7@&}tg`?Eir)!`3Y`Ea~3 z{(toFrx`OpRgZlnlV-nmcEUCN;z?fl-~Ik~%{y&^4^S&1`^gxxLvn(vXVE{tyxs~6 zV}Oxmwd&!E2P1c0cNo~URyDu;^FN#6W~|a7(VElub*px#{i^%tLgwy2{Bd-=mmQbA zcl(3QfA}B$vsTppCfdE+Tz~Jq@WJqYvMFCrhIocY8=Eix^wY>$$71JqYx3k_CqX(y z!(kdW`<}^H9aGCqN=0ir8wJEfN5%iEN?nFu4GNlI(JN0-HDlskrw@#kmY^S22-;vy2* zLdBHvu0bkKthnxrN!$cJ%2>E|&xAcj&9n&1Ci@z#{!xB{+oUyd+TCYW?r*x8KMxKg5=fP(=u$qGFeLtKD;%hV@**Sv ztb}jf3O#U50kI@*FoXkr=ozJN<`6iot;wT5ZPbpk@d#n2EcAGH{j03L#@ik)WlGMt_$#7;f}Q zJvd=~cMt!7LnzYlsXkQ9`&K!I=hlsIv-)6cRJ{7&Yig^0@SMX8fbhNAaj#!>_JN`4 z;(epU853|WM95^Vu)c}Q#0wKA%X6e1FixBCTXn>3Q)fac4^w8D@n1V)Fc;SIw1}%u z$Q~3Lq}~~OwPB($d5rPh`(xbqeCk&&+TlZ+gq<#Gj5?bn0ni+dhSB*|`=O3gRBKl% zL&SFQkcnoT1iI6Evs&5;#Bts{4iZbk#rO0VhV&$bB01&E5-WD_6h$gC9c)}TR{Dh> z%kIPH)q&3{=%0a+xN4k;t#hY3hTkE+fBLJfrI=rosaVgJ4C_NBhC1DR8?TPePJSA5 z=WS_f$%+fed#~!2QGz4--iwqyD?C0^rlzOC1g@d^@OWPR@M!;pu`+zEfl3eG#T#dn z=f2Sx-+)?UGj994Vv8GR6s_u=gVBP!#(NsglC?abV$;o6&r*Ki_)>Y|g+SnkOV@ez z?FVQBZ1YS&PZ)ua1))Jh=WwllA&vOrEaw!yS;o8M5qT<$^FAoVgST8>_0v|-#qXZ> zD+ERAT)4r<#@kYY7w-C6{XO#=-qwog?s#2q-g$3i@Avmp3LE>Bj!cJ9F!tLUF`;e= zz1`Ai^1M1J_k+7151eoyJi|sxyvZpww5Y#iHCpF`?Xb`KMaBk*jgJh08!5!%v#+-y zmO&!@Cp<)qnD+IZEt~0Iy;4lIYDr1D`O)?Q23J-4rBXH%GX}u zCJw|RhQ|Xo1gCAA6|Ww@+D2={;dp+HVK^7 zj*Xh;-+;{1os3BaUrNK0TJz{uL@C3~9gdf$!;4bP+tk@E7j%yzskw z-<9t_#a|d0&%Y2${qdY*wbn*{NF$1O?IStN&_Q1^ZLe{KH$MC7H=B3&zL|}}FEYZ8 z%T#pjdYhu-Ewb`j8+Y;Oc*rw6*Xi+vO=WCI|7q3785UPF1blwoehV3xJp1FfUl$Yq zbxQHGR$AI5oDpzb*2^c!%2!VwjJMrp^?&$J{>kRcUwt`!pyP1>&>hasZqI5QJ@+7m z-u1YXn}7Q^|0>+ZN6%h|U-N!^8=c68?e_6pE=&~89O8we<@WzEv|qS>=S=IfU*-IQ`q-|4!=9%~xN3 zQCm5lR`fLc1T@eW;ZjyUiHrA>VNU#KkE=8vqtRH-_h!KnTp+OEmd!N z61T+Vw3(~|QfK!d=~)Z!N>qKEwQ%9qKFiGf;hU$OzQdrBh}WzT$CSACZEHVon0_RB zlvcB5Nt20_NMe$z++W=K%qJYA>}aPWntGO7N9rN&HFIQL9W0rBOj8)16vnY6-Hv#r zupjgyI|uImi%KjU3#}+yfBHqm&RdDeIBhAKD1K%c|oq&W$G zb(vMIn~6Usvs`As1?z_yoqrIyq`i-_eWza6c z)0&X=OpoEvYc$w_v$m#=>K)12m(eo74z>Mo!OogrWz2kJ7RgRN&W_7?k2a(qc#TFj zmO%Z~{loLoN2Hdw6#&rc63;P3=j$X#=jompGMN8B%BF19D}}Ja`XnhSiAO z9$Xf45l-M4zQ?MGSEhgA!C%KZnyq_SyVaWnaWDdng{HP#6pMC9>6&GQdO>4Y)>eS@ zPpeB;sgJ=sT67QK+qK+;)zuAj(*ZV%#-v9i;I$5QP9l8l97>Y4Xe`Mz|y7Zwn%vc64H<2(;; z34ER1la(3o0GPrvn8*67ygoOVto7gT?EH)O7k^e7KY~{`ez{iJrBzKs z0kmjqe7k1jkJr!NgIAUde7e9%`_RrT@0>TF93CHe)!!N4eH)KauuQOR@B3@q(Q;mH=-bEw58ENDp>6tW$jNeaxhwU*sqM4J

    B zd;;-^qls(MQp{K>$vB4i4po{O?_wfj~aQ(Bi)dEh;5c86ng@wo1*7T)n@|AznY z9;;!`29$Wv&hsm|6)oZ2`}Y1=lB^jJji%HG3CLMDXbpd~gS}3ne3|8I)834?U?09p z4x)v=c;C^kPL<+X+%9h>j~I`RoRK$jvwwW|^UYxi`)On5$s7B(H$VOKaPz)GXb-SW^(PN!QgC*x^WeyLxnO6VA zZqImSzh(LNJ5`E|oO!6ZpWFRaCoRzhUNyE4!oiK=l*U>Pp>up|+M&ysl$YFJSp#^V zu9dQdHxC%M!ox?!b3Eb34oAvNx~IbrdyWT+_lxN0w3iHWJnML!8pCh1Oz`Yp{bVtU zD`ch9Q*PvDm*PeYiY18{Zt`xjaHNdkNqSw1nB%m*m}cfKnl-)64(gwB>%J*Kf4_de zHGV?e!T#aq(+@w+auXYq_F4aDscq)4l`d^gB}*qldjaE_0W%Jke8A&kmWu^S{$4&< zyOPo4FfExJHUFIyk4R_J1|EL+;Ne)hVt{OHvW&*{kFRLNaC@V`e4BXC+}L=^dg&5N zCeAQwXK~53oId&Det0I2TaGBUily7}#Et2r;^`ib_C6^r>xU&Tj?2?>{JzdAJUvba z3eE??^m9k-J}*rV>zKTM|IVGwufF)AO$v8siOJk}<5u;jr-H%Bluw`hocq5W?BRhs zUCh*Bn={PoVxQh*S;!vo^x30w&PJPXyD~DIb=ugS;3+mVI)C`#N8zHw6vD-m@VC`T zW!qV5FACs)@J)GWn=JI@T`7Ea??{~#ztkT~l;j;tK#bR$mOLCL@iIPnmfN34hRpb& zjWKi!mhHp)-^Js0a_4C{t-kR6e#>)bEy3-kztgop`SMp2J9zKAZ#yw8=9Z?$fHSBV z$GhdSE6a6caBe-bzwO2c4c?fqBQ9b>AeyM+^jG~>si6L!e@GxdYkU8L6pPN47UbxC zld1qPBZGjLzcFd@C0<>VG(ol%QVjAr!At#>pMhVQlCl_HLUsbB!C?@ac$(Sg9ZY=I;wbme5_n5t>LoA)kIRJl zi|fDgzAMLt@}?b#JsdbIHP666IE)feS=T5i?$l3x0nfO(dv_-EfS7*kBOF7_z+Tbd zpMEsKQ3~|?U6ZD@Fdjq5&KO8_6H+Acrctll*lk8u`U%Bl;zgia!3c*nO{ z_K9fVMgt=Qo|HQdp6SorSOzeBPwd&3WXhRy!W@9a9(MlkywKE!-oQ_p#w+Mz_d=URdEP#+Ht zzg1oDgUjDxq3}8Ft9xEEF1r=nP!UV@9nYl@B4f(WF#ce7c|h{p_8`2;=2aBz`y#*sBKJj`q$vsdUj)$@#jZ<40ZUBjQXTJ$HuSsJyXAW zMrOl$14voV(b9>EXhX)JFIO%148?Gk1#<6+!X4P7u^1u37_V2+taqCb z`#8r?a$Qe+VXslP7z6r6G1bmat|VnIwXU!Ji@pmIuIccxVh-I`-kLF()H_RC)kR#d z3*FXOm|gk!Hb~)VH(22oUAQnu!H%Z-jxSWP#&2b6lU(S%fBM~j^`C4u=dl2b-Kwv8 zjFS~Jh+Y92?#R*$`KL`v_G~bya`4u|^tE19e=@5&;6c6k$3L_gSo$<_1Kw~>bisoW ze&`a&^FG{J7IoA!n(Fb?QYHFeqQ);*uQti2P|rzk;TF$ayJo33{@fAjmzZ~x+d$Z&m-8#pB{%TgK`@4^Lo>Wk0*!&kr1xbIw_6GBi!=;K|Vxx>Ogrfqrxr#62zs859K<;5NQ4~}=d5!G@ zIZEr8G+%q^29m?$D>P7M|7d@XBnCF!;Yp`FJ$Ur2-TFUmE~m`P3+jZb&1uICxBTEl zt@QMFp`g8wTbGd()5@?q>TEMzx#Ic%^$zd)n7~z-l=~F@y?xF@7GVpK22a2XAp)^IC7wfO&^Sd*U2Lz z{LXu~H~YyFO91eC{Z{ctEjwAt!n<;3o40L@*iXix-LukU9kd+s>8FjOrJhiu61oj` z18~+sv$)PqN>rK^yZg?W&n0a&D5{LmrFNY-QKh(?qgUITCqHJ1{IN;&X{Teft7N-l zg$bj$ehqYDl*hHpw4q5Y6KtljHw$JvpX$j+%2G%VS(sQiGU;s zVa-C&+EGW@#qg1_>RW+r&?MArF}aha$`cDI42qygKv7~SX4~9g{bi-^wl=%4uMAPR zazMS#Dg!$OPRs{b_Phq}J{yQGNFNVXpDz@7Wf8^1An%_#r4gZsoJP>u`RF~sc9#d! zGnJVT;D_bw?!E#L{5*O0Y}>s~PSzRr)kd4QSrhtGP~&o3(lUQlQ6fME##oHDR2$n1@Gz5K|jJxpo0g0xcr5a`CKEbJ^f#(2}9Ee z9^g&2sb@;fxTstK&AhYtc}{~-AAV6P{7uch=L4hPI9`fl@J*?zj~AB`5Kq;W@ zf4}tOysn{_&lJA%@umIfqz)76&_{z-THsOIv*3Q$1aTS+YlBR1#1m_hTSmaZtmCeT z>65z16kkRjBvW|l;Nxwy9Q;+rmNAv2R8`bfL56JL%}-?+8=SoM_ICEuE0&-r5L z(PpfK=O`igo}n;`fA8UQat<-J#Abw2yfWV0#vFyGSHg+s83E!S0{UgU)m}H@4Y_Na z3`7$XH6?T_BSW0g1y3y88M8_aB`O!M*I&D?t$u1pUyKQp{j7v^Pw+5fa{8Fkdz$BU z6t!-ULr6za(Z+I!xE~CChcA;hCCjn8M&kggzx_ycL%9R}B;-y$tknlHVi|MzYgW6W z1Mlk4uV)o(awaGGA%pry0ba2;$-7{aqxvKs5j;89<{0w|?;K?ljy$&t14jJFTRZ&USMg%9IZXfS*Q-Y#8ezyNhzxtZIM4WiYGY86ZneP^oIheQ-~M{`aT5_FPQKv<;|%_j25y3@4$=I zE(Q-;S%sw7Q08pfuz`VrWNx!-c{jz^{N z@>1i8M=5QuM`5qu-meeF7o+xhJ1%*3#{lwP&!-Jetmbhm=ocIAtOPk}P{Q|}c*2;Q zqpMGxaFz$We2)~$kG4OXAofzd*H)>aQp&IX*y&(*@1#g?HjjSHvt3<32lMTl@tXdH z%iFD7Sm|+eFJ9t(Hx5rPm98rKIk@3js}jZq`f)l?#&@>6|MP>*_rCyJy6!dRObO6DNltfACUuWpp z0X;G-98xqnEI22uYU%I0#ya^zk$f3`Ct-aztf)ogcB|f;RiDezg z{I_DkUe{-O7TWgT8f(?VX?xfP0XUZ}ra>M}GLnL0yYXY3FvjnH|NR^Y!I^fWLou!v zUv%Z5m4bGlKYen)GSRsBo9|yfYPIU46zlLR-sblEn~y&HaPwZP3iKz5`yDrbbbLP} zI7dM^LVrfjqW(NJM+Kz{Hu`-PU*W6 zGjLG9Xrs!T7j2SgY#v?}=M|HLNU&YryVe*c`^$0K>KxdNMNampJbA94w9(^PdbDwC zb;WLf`B2GD2El%td#xPqUx_Xh-tZ%0=H0vIgpA_Rm&2=rr}i>D@XI&felrm_2N`6O zzj-aG5_)TZ%&OU`zFlXpPY*y0>}p)Ar`+H!<1k0^nMP zspWVf!Uc>ICst5zT$#{?6dkl&#~ZyHQ5>mBF#4`a!kWU#^D!bUgb^T^#iS;QFLlWv zVH_yCS74sbxn{uk;wTL&2oaa;8m)_KqN*+eFpw}BCCwu)qF6;C-aWUVp<9)q6q(3Y-$x1VT}tr!Oc`kag13HynIO}!pHYZ3 zp#Gj&u~`PydHuZu);igZ{?z8Ed%)_uB}a893!0!)&gN;)hUNN?qzkY3r`BrLPGG&FFNg4-m@IGN1|@g)t)`#5<*c`B=q zn9oWX=vFu2bkS3Ne|imG@lo*i=J4#`YJID{bLF>sDV(lpqrc#~Xn_}Y45stviw@_l zhg)$l44;xETPgIH^I!^NETsIm*kQV1dYL0H~Z92#)S!t^MOC_tjW)1XQ=-1SiZCItS=OF_6h3YC%;?ubyA(A z6mL6Awtk@7f_wF2`qnt7P+Trayd!*vzWt@dUyEk9t`{tEZeUqC;mKb{RZbQj`*0%= zd%WcO6CJPTZRc=$Q)t-C9X+2_R&o<;WHI@LkDz(j+PEP*lDB!Nr7{xoy33HO9rKI1 z;N=U>CA40&ArI_kbQG<_V8|ZE3nk%J;Xl`J-Ko~?%^&lEzbwF4ur{2lz+5V8g!vNk z7?M2dN8wt+d3%`sDM^H~%;<=x)0Y_e)Ct zqQLmS`j>yP`Q5+%e{!^xeyB5W^;-gU7aEXoVWV+f-68bCA#yQ(gD^ZT`jeHEesmw) zRR==>-bZnV!+ypXjFEqC+>jM-_jA~VLpy7FGguzkcI4jZL!l zX!FIFQuxIC(e}CV6^wKdksjo~Q`vql3qaKkZ#{hc^X7}sKAQt4(8Bo9{*9Z3AXOs4 zHN5h3;Wc3@a$b%XN;{Ncb6Pmh-A}HibUQ_|`oq~yeQ=2cv0*|xrx}1J?Qj}60oH`mkf)gSEwee2|CK*R!n`V zj=k$B;kQREjcvEnOsbxM~K{(+OyirSWT1q1rSwa|oEx*>rOxhB4XAMQ1nV_oA?g@Rb# zEJLmsZ64y-ZiWKYWdFXWbf|!z_erS`noHf_W_TPg%@JD9!jLQ2K1( zi~*v3lT3dJD}RM21>C@{4ogF)uXvSGDlA2TM9h!+X903BqVCg-7 zk#bFB+A|?`Zw92-*QAdQNvl!#E|jy~ZSjpdR5gi7I~I>)FcVi$j8HLv3C&qPZ!lGQ z^I!ir|8Ch}S1iC%NWy*mK*SP|`op6X6w|)@0qfg%8fPV=aRO$v(%%sh)6P<8*QDxm z!l({vS2^&)zt1cZ6M$!tVCpACmJ+PY@;2%V+E9>A@)*ym2BaxF6GO063ByWzLTrYX z=XmXA_ZBYKgccn2bKnWzEOLEDt0e5CG9U)VSnYls5(Mg~4En_LCLEuQ1K0GYzZX30 z@5RLfu8j&sCH*|Vz~GCf=ayE`K;7r?%!8${f=^v8hUV~H?Q2T0i@#9oImnj6zY2`e z)qRuapuhj{(&9Dzvg-6~u5dhj-Iu}F`R&1*`hV&B;)CJ3UcSJs;wFw!;MLLZFZ&~U zUmq{&Fd2~jeHQr1Jl=ks_Do;5L z9;d>*%jq)xsGWryW56V1Y{KJmT5zBow<2(m!i$fktq@Mf2pl7)K9N=Z*CeDbDMI*?8QOBX(#GkB84GU3-Te zYpe3*`3jz3#*<(TqSenzt8*_nDA;&kzc2V@-@P13Wcge4JU5zTO5o)`%@8=qLkUNh zGHUkHj3`cc#pzHt3io*zEW1tSyr8qgyHHb?9hzua(J_*Co7?2X=1oVNH}y-1h_IUS zbu?FI=oriODG+Njp6?r6qTd+#=Qw=(JTGz&rPLV@e9y_%x2=l651Mh5;7iJvF-Yzl zmG9_MSuV_n@7=yp4#`g{v%mTJ4_}w~T?#C*+~jEE2>-j_yh=GIIMZV6z+5=ATTy$I z?X6amt`$xpB@D&F@lGY088h67nUz*7CY8zGga*)$`JPPE)D$(kt*?O%3e+7ZxJ> z2RVS)FN_(u&dXaDQm8Vv>c5@O>K`7$OYxdm`**yl@YQ%eIZBD^zWG4d$Jv{X@y+w> zlqWh39((qTK9To3{DF(HiB>;L(Sq0G0rnS#6Rfk^USD1}Cagd>#KO2vSs8~wgK`{= z>aen61z$R((~QlZA2`S$#hgQ`J~)x>&IhdmOlrc`5*yb~+nK8_`E4iku(pf=nFbuM zE9@k6G_NzhDZ$-umo@_s|KJ;ALS_{qw2XG;jB|$mj5p8YiCLMeO?6v6yIu}XP9w&^ z?Q&YWI3&#DXrUZ^4h&XstVq2kW8kI!`W4S_rPls|Cp|YG?YE+ZwvHlx)$y}Zs9mbh zv&tV$)deq~e)xX!qLrFf#U|rP9pzjm-*~?V=i%V2x}79<<3=(xc_tmv!o`Bgd}^#- z+o#91iqTQk=w|F%U3vfGkMi!PK)1I*DA{-SzAk*|Veik zk8{3REb(GY+@P#J%c1&c37KWfdLAiJnNd2YWwO(VCcxHGNz`s)OIT2dyC^dhCQLbs zTMSJxKW6U!$wlcKCB036+Uei?b)N`viiCJg3`!w8i_xXo*fSsr%b2x4BOEO2Vo-y} zx!)%?9X7$K^q8>f83l1W;b{O7TEeMVto90EFoOUSjh8QmHPOw0stptDq^Srl%rVQ% zF@mLVh6%wCK-C?b-6tTrG3~2kg_~qehFjlF@Uwxk&xGf|(;HqPh-UJy-4Ufd*Riqq z>!;89I>tob2gkK*iE=4dMqT){wtX9FAj>OlwI zGW;=yO7-`l0wzRnczeO1-sz_r0!A^<&~L%z`#Fr>V+Bq-dWmlE+~*4MqAr7_`{85i zQ>O~%oyzogt!gbp%frD-_q8;JNj37kxQ2I0-swV+|4Dt|cInBC?TXdKc!Ghcj{f#+ z`WsFr-z%_c3tnesa5X#k=C0?}G3cs46I!BO=ep?BShi z;>g&^b1KOhbE&m&bguD3holZPrD4(9nDV&3(N!P>#o%#t^Gv<>|o zIGTj>(S$N?Q{h=F2llg&Nn?Zr_l?3E^mBLuZ&-zFa=DB+(MTF2PKt3ja2)iG<#By< zOfs34PBU$ql%!y}mSRnrQm0LGZK)Vq8h@NLR->${Qi##=OekhDo*X%DPY0REgFWGr z!7q+m3_LkS;Sl0+aiEC}vG{O!AR6r$TMdXSx6Ah$>^ph7$>`%!qFCi{?UsUS#~ejsM=3q}=ho=tr(Ha4Qg>HHTCmV~UIv;&CG< z-G@%Tx_me>%)Fz^-RQCSFLI`ubHD$ zoXG!6#Wr6l&i%tYqx+?L5kD;5(Ko;Q{pP1aQ>-4nX;UXf73T50IcNIcjgbLxQ`^hC z-)A%*Fhis(S~C{V#YJXb$h|cWkdxg#AJ-#B8cQP|YCjasD*A-E#Osvt8|QJ*6ywYB zB78DvmNA|3omaSk;DPq6@%MIkNbX?#vOo>LK{qZCpTdegl8 z?i~X;2XTGc%S-<*n#_jecu)P;3ggjldvzQ(aqDKvezI|No*cff+W7puaq&3#O`+F_(kR_z+g>Nbl?AY;|aaBY@w$ezf_+AAVa5|Kkbg+U>N$Oz)95azF1U zcZdo&en$DOkJ3Zo54x|_ldT*>4v@HZ_~jV#P6vZWNoSn+B|kp7Z9cQ7;=T4nG}tyl zJ$Nx5=vI66cZ=gVdBkmUJp<<`J=~s@z3MpJyW4ocAknQq@89iv@Vz%-Q&&2!_-bXJ zwbv}h9Irsco5rs!ar_<+PmWYUp?O5yn2|`_o$uYapQ58 zrHSM;Z-(8?;|Ut!8Xs7OB(N!9glVH!$FJn2;RQFLu$(Zd7&hFExy4W}rJ_O1YeJ|F zEJi<(gU8x%f;p#tN(SXop*7(XAmb74c{gW}8dNOa-2|OUln5H7y?eDVLNIqg$giGs zZ>PxcPHsiqt>Y$3Z6$CF=2_|nPSpot)@Lt>OExL>4*_SjMTZ#~dCF;Zqm%_3h8&|L z#M9g3q$nnizVycY(zCXb?AetSk_k7fRE@)haiNVlP0&2-^fMI#L3apAFA3 zFa|LYN0F?4^4M}I&`q*yiFOSP$nQ}FOules!Vt@Bxp^tD=!4cfdEQ(j;KAVCXp+pB zcPZqA;!r8&b`(AEI;QNf@}}!CLpGN>v=p3 zY5kkA;DalKAIBHtL56j0>3i4UG8l2_OcIShH~m~F6KhP=0${*^zWBCZwV15OAoole zJXaZz@eUw{UJ-l~7NV-v{A#~Gd2bZhKFrY4)Oi`88V6h92QLmEs7u-ES8VX8>5vTt zE{f_H0q5_;&^=dK0=hz>ZmwUH3ED(h1?%u=@3|<2KGphsN7H49E-${%@HvWqB?hiw zrQFuoFJo`sYaFP2__BB0(?9R3!?+yBK<~|jUfX1ievjk78p$K!d?r6x-@(8Wa zf%0RCe1{j!_{`(EmvTH~ea0TIT`;v;hc+fd{jwS%!wka$0iCj?&g;d_3xDE{*{>|6 z)HmTlE3tXw>X_2j^OS@)c~8$StvwM}n&>B?fBoUTTzFib%rkkZtAAuwymXjH`f^@L z1_w1|%61ldLH!SlAUE8f2uW#*)}`Ve2qPIHx~GrE|JpO#R6 zJn`|GaF&Dk5wDz>RCCHqSYP-z)=Z`~Uo3E$X7U2QD+39h$CBnxd`y|Wobw7yW26Q- z89VZ|e%J#h>H7tza{NrVS-h}ojxBzE7@)F@Cde3`goD1r$(>@EgLd;{Ny;A;hfk3L zC+{z%0sI(tjCA9+XU3kekJHHyxp*2*DJU|#1!8gjB`}ZXc9i=W-%}(ZfY|w1=n( z7p$*jyHGH^cE$khMRGu``YHTJn_{Hzy#G-KVv1yn(?NeZT5iAhQQpue^?w=D4-1`; z%XW`^dH-f_7L^d6zdC+0D?Xpp_m6Jm+_Yk_cV+XaY&$P=YD$3N2v*Mhy$m+{2`Onq zuArGcC;g=qQ_Sfwlw^kX^U6;cT{IUahZiRMRCFD^!MzMaI1vHyG`?!8&ycvX`H*s4 zd8a2{FZAcfHo6L9y4uQ>l^&53l>PnYSVuXlL&A^AFIwiDr!QJX2nOw#r|fro^O7SY z{Ua6`IUJs3N0M#qWkx8udQi+bLts{?q56Jbs??EaY)7TwK3VXnB2oRct)|I?a%h-H3j4Exf~>F{XVyBvgrkA#Ckcare&) zgL#%A@VqQf^ffx@LE%@D+Bm=}3A4HT(fb)?D+b>_1HW&`%Wy9t0N%mhSKEN?n(&(p zheq|}%5@4KxqOpsiKlj34Mh9rh0{EE@Y9TwYh@G?B4>5s;e(R;KPs~q8rP1TpWC~y z^Vl;m;_+a6^|Emrt%Qfl<@vl-qBrTojCOo-=S~}(tIzY5k&XkL{7*hs?nx^SGH_j~ z&xxj1gnryS&7lKFpEf?PG(H%4m#-J9_vN1_+ja{hYg`r*l=-&#>ficD;iL`Cg%EL8 z+rz<$vY#9ghFCS@T+rsVYpq=H#LS>gfVpPCH5kQd&Sdp2qQwYHNQ=*E)57yo7CkM- z?aBV;@%Oz~BJ5Ys+H8`ZqO*3sOAR`4p(H^BAt*5z>v7`5Le?y~)viny16c2Af8d}= z88n1qw-TknVfc6T{X%gy=~ApHrG)NG$}ySt3BxD<#0p9x~8GGboq%zU2# zSs8ssBumnS`f&yomI#5tFA;i78!S38-qT_L0nUJe&oKo0KAxoBF<{ROx=R!)z|_h5 zs(%Z4y$|7CyCrTWC`>xNwyx@65CK#h9vC(-v$D&$ReK34mi`Ewa{*ls2Nx=WlW8U% zQ{9^2lrZp{pb}QU+^bE-+*;A8{U(6ng}@x;ed-Fw;b-a&W*z{Fi*kNP!Rq1i0wa)r z4ajp8j8Ry5FrneaU-Y)=mwL4ft%Vx{?ZPNoFt7K}r1`lHF=TvLm-dqsgOBOEUUrY+ z2o??cOo{JNUu&=aXy0ShQRbpQzFu6$D4GsVx=G1~_g}o=Q(q6@fkNP;-|mMhw`Y9t za%cldJXrOGquzl_*YJ0Lz0>PMlTpTcPo;xr?ew&-Qy>0OyQn{J zCCC);I#2bYtJMRPDBI>-o(6R* zK8J+C{RK}|7{61nyysdl`vA}=R6k`Q(+|Bjp_y=u&FN|3r+Y0c=Nl}HGNH~}`eJ#* z7k;e*tW`XHX8gRPJm-AT*5I(oVbY8?QJe{D(6*=kbgi+vXg6cN`@DGI7QW+M8Q^%4 zZ3d*YA6`j$NHC!-`O6b7?7$fG?#_fSEl<;U{VE5qiL;sf8ADrJh0$~z>csx{?ow8q z{B&a`T48z8_RTTxy=yEB=|R)0c~8kZ@N%q7d~bc>1ldj**{&>@lrd3DlQ_my9~hqP_a5&4;B| z6!u8Yb9PDNxo4k2G#38&c9fN~&9#=}g{h95%}e?&k9ZPy#ux>b(KGv~`s+w&e6Z7s zka2)cGO}2)oQ>4sOj#aC@7lci`Xp~Yx!xQ?0XAMa6X4r8*iZR%T&WcoCk#;@U#4U| zecEx?DeYITMW_1b-i)x&0#-^Wevx3_kC@{*RWaq;-V%AOD2Xp#JXS8OhDzfBAp?t5PgJ-TdaOFUHHu0C-+TlQ|J7U`a3fLBK9Nguii`_1(Q0 zf5X4{b|l>JpmEA`-cSN&|KT^E;<=v9o{(S|9iV5+nWsXVDI00Mc&+|lYsG^1`Em*z zW55cCBpY(A?A^GMg8imA>+e&{UJMR$BX`PDp&a;IyUne4+Q|7?DMz<@*GB8F|M=IN zKlIMq(~cc3%!L;f=-Yd(iX4WgDhI<{;a8jE#0$;~IZ7^z1%H(%S`Xp)YO5;dtl9S> zi(>r2@%h}Q+>*Y(D!nWTEaZybbG`EFe)9Zj26)w*}OO?%TM25x^Fdnrt`TWiTGKX?T14aJojGaAsI- z*}p6*B)$i8<73*19NxF7*_>-k?KjT%(V)`fRc{VG&1k{fcmeN|0rn9Yug3h(;hJG; z^Y0iMwMX{o)3vhQFmU~lTUXj3e^6$m>(T5exexd_F4v#GH-$jY|=nK3zv@^ z9ME_JGiRxEsn6r1TNxr(Z?p;e<4%ePbb`PkSf~6Eeh#y`_3JwD7WrhUDj8fqtZ;$piuYWEN z|MRi|eIur>PPA@;l~Lioi7P&d@TtUoZUUka4m?X)fH(*zfWT}J zQ?%bTNHhkIa6_;<9@Iv^lm1GYVv@R0#s<+KX^?oIU^h`+)aki_0v{vvd)&vdh6mP; z5jJ62@S3azgk9bx9aYuxEa*>P`iDU%|D$+R(Vq%X?VAwL6YN58X0x@%yG9?Xj7=;0 z%j+_$7r{Q&ryNmp;7?;`_0!~>#MjU$lY^K(t8#D$7SE!Izka=g2S%Ch`)szFIt}o} zWOzn9go_FJ;({+ihtStBK`=P%F8qOySA?;rO?Vz7Cpbpw1=sXp!3WR_xTfxN7`40T zGGc3lFVK`Hc;K&!rX8@&(!v-XvScl?9zM-Myi?o^wd3v^}PyaMXG%>%Jtv$Yxpfe zHC^!%kNNZkpR2R~hk3!Ul!oE2V8$QA2bEv)0S~VC$MG1Sa2zaUHqO#!bWA7gs<9>~ z!W-;!r@!z*9+E4rnbhC{9CJr+x+;e*VmqB0Ch^%($f3wgol|5@4pS$dNKrH?>ocQZ zrz3{NyoxjDb-i4Og8c|upa5yn_?geahJVHw4L-0)!+4ake|#z=2aj!yx9v)blH-tH z$zK^C3W=OFl#NUL#v6AzIZv_X6^5TWGj)Ph8F-x&r)tx6+=}S+6cO77uHSsW34O_1 zG2~vqRBY=S&-NJYw;`?g>WD84X_SF#pHy{u+fqx0cDu3n=Ch%DbmUNcf}OnI=Eeqb0Md@oA1BA*MuuABp$~{ z#(A3?n>XY;Cxf)X>vvoX?WN#hJQ>ex!+3bBFP7wMn15%O03Qnf!3V}19)Ybma1<5q zb3idNp>aR&T?JDTiyQ6+!-!|lD)i<0>13qc%Y|-TeZNhY*UA8LWn#1)H+!wDKyckq z8N+Xl)fq;^*Y-+;mf?|6it6iGn_jn0N+nr8_j+z_p)~WnI$l?XmHm`Yxc}{6{?+E& zKl~y3wjZEuBy!f8&c+lX;n)3c?d)d7D=|%jO0bV=183v5jOB|%(3EVRd!n<%A_k^j8 zbEI<8dZN=Df7}@6HFpg9yG}{g4>AQC@UlB&g0uH#p?c<1`xWesun`!Y>3%Z)q@a0u zWSaXQeDuk%7SFoX1N%R%dR|Ikzkang{BmL}d9qds_xA2&AasIX?H@K~@r;a)8y!=IBo32Fz(URhkNpy_Up*hipDcUcCgUH!|8Dd3 zxA!*pGh)CrV=~$EwrpYK6~~1YM!)9pH-+H*^jqviUaZB_yz?g}+Ta226@wEDDHr-TVHVAb^SOGK(pKA*2Pn_iT5hYy z&~Od|hM&o{cKnFVt+8;PfJ-QV9SaO%-bN^}<`-Ziltw{r zK-7WW3rCe3PpgopzEAt3^!0vi#4y3phx)ogWO^42`ak{aOV0^bgK9#|v<)}C8=D4F z6AOU56_{Op?i!C(b$~%z@H_N{fb(l6n(EQUdTUj|bJh#PD+H@>B4Y=QA%}rDeXAZA z_HJB1-;!F>Hl-6>6a_F%NLk;}o`O9H?G^lp<}+ZYrqwUrEzYN*V}C(Nv8sxJ7kc1r zaOEC2;@`RNS@5qgsraSO)jDHuV5$M-iL0f;EKLu<07u zr;cDbmop287tg}^lvIB40YilRA$!Ozv`}aakKm*@F!uGvAHQIM`>A^XZ~kiRj*+Ua z;R}X?cUnpp3tBnlDe?J~r+hR0)GG57LqYBK2OOnfTh5+w6^+P0)S5o^1`j_tP0~J7 z66{wIyEkoJr7zmoTjN0~AR#~cv)r~Wme z%UQm1PVcsy?KsfJ@PGc_{m=jDW^;ZYd(SNSS=o@Za4|9q-=9pBxzo;`g<*f1U)H?R#@bS zclh085{qWnQq&JqDy)J%c=>RY_;D~)x3TrQm|&MNXBpEWm{?c`3{ z`nimSx$tWrz=f~KcCzVVjN7s>AbA*9wWDWc%)w>v>1_ZO)92 zTgFOZTkmp8?sorqJo512+0um7w)D`4%SIb5OogYdkHGY8;*jV_F5kF zydA4~=!2_}9Ln;{_sKS4db^H#E`nu3LV|_&{#|^0IQt%w6X|W+*IE$^35=o=&n7z> zKLO-d`{U#QNBr$zDv1n^T6sEYCTVWEBBSO2|x-){cRZ-2Y_?tYEf`yEVcsdYQ3{wO;Z@v>e30Ddw4kCOkZOTm?8;+M|35mE{xtN_X!rB$vOVAUx zqxp_!(XR>{K zQ51U5(${zk4Ah>EViXQ0jCNePf2+99RnUbt?oa>wEaunG z3KLo$t|=Jj)Ps3%3=tDs?eJK`p@|Qxspq6l4IbhJa4=%@Pg`uQKJ-q1!P)QJ*S7ll zT^Vb<*>!$@V6NYjdaW}0>+()7jQ4VGtnN)4vnm#xA%6G?e*IF~r&x8v3tsb)(ZfS4 zwpV);4%hTExb@pf2OZ4=;@?=Xr>nB=APoEesXkTSW-)A4; zlYIL7dmI8_5sol)IfoaW%aIAPe({Ls-aqdPpoe$h38Ix+_+7lgVh=mm2bl(FIf}DeiMc4 zIlNk<^njy+LVUET9hWjZ$nKFr*HY5O%IlX{-mu^1*A9p(GzL62mg&}h_`%m?k$qMB zZHip!F_VD}KAA6;L!-~3@_HdZ92qO;JVhfp%i~t)&i}{Moh<3OGzngRQ_4xHc|3!3 zzEfXybxmDWw81T~z>FJU2N!{{X=BGduwccmuYz8H8DKy)U37H~-+1Puqj^dxC1oc6 zpGW4Yex%HN-{*-Z!o%Ie!`;KfgA#`JJb#pk@%|OIbEygNN*>Fg8yQAs>K8hY$3k3q zy7p64XFnIYlk74#P^6B~4IA{=&l)3fRPSR9W0caRUK@fLxenl>+{iGpS3k@M_ddVZ z=e@jv&zl?CbeuAMAzYuP5TCZ$@_MI%nqN2-E~cOe2eO<_;l>w~^!}$jz8MeSwG?9f zI-#5OSBgX%DB)vPr{foJArhr%Ww5C{SyUM@%`tAlWf$a$dL7v7qXZEg8v<0 zWlzb=LUDu-a7w*xPUQ@f*+|Uw5e3Wa7c9a~@WW(Gssi}AgvKcgjovX_jF*FGe!Eje zZ*}@Fh4tV4^}o#{{&@HlpX}_Eb}GR^qFm>eQ*+oNK$I7N&!B9o-sw5kd>#^8q(COP*hlD$P- zbbi6fgi{=bkLN$NccJ&BRlQJ#t)EJ#3ci^$8=rXUZ3=k5Hs5v{>!UX6%914fN?!P@ z6H@Yi^VA-L5zOs{Oiz+6Na|LbL8Ve_JZZz3voe&e_ z{&E|W85m(|RseX<=TzU2;_xxa2%I^wb=t<+_Us;}sLp)e^HicH+-10a`~6pC9D3P! z*(vw^gTa^f7>8(OMTh~y$gqk2eRze_*tc-C2eYBy*uxV;GcqDxJ&Gsn_wqyDFdU>+ zek+%LW47C)C!jmSE30bo__6VYUi3oyR7U?R=l;!`pVZ&d?FQG4jMppeX_(OO+9fBP zA@y~N`QzgE^;1aF`wyL^k?xsJ9=rnjh>e<6+@GN~8 ztnCd6Hgl}7-VfnmubBEj`qSIN(njPw`!%P;P4Ze314&KV-5GzW;4!Alu#Qjms*#_iSiLVTW#Y3}F- z*=25i^y3c$*QE@T>%A8XM(_F=V=}(BF5ej%_UoViUj83m|Euj6kDiT&Znkb78ftvZ z)jy+9;9wlws&N&y3*v{Q&!c9t*#SnY#PLb3q!E`FiA5tbO8XcR2aE|=oRM1rrp=gq zJzFD$6s&yDxQ9NrPKJs&4?L)@H>?W5s4NB7S}P}ka)(v^q;e#1t&T)d=g4q%*EL#~ zHC}i97ZhhbcxIe#jE3HGNn(|uk&x`(CC5lR=9Ux#1e_U&r1g~B{e*iymkM{!nvucb zJ+oK$z#KP@_S)xV7Mi*BZ)DiovE15|-EvzWxw{)RP*@*GO69~a1k!4|`f))*rym+A zT=qGQX?mZ;T%YtOpu0x9t}cj~b;r5j)2Di((W&G7*SV(LgSmx9 zJ}-Qo!I|`=-LNtslPjHc@P<0|32b9EXd9S)KeznAq3|l3#Oq!CM_*d(1U8S=7`)Xr zrwzq7RF~2G(VEXr=`f!M}}e} zVb?6W%1k*75s))K#HSE2{m?)C?G`xP!|VF$N59Z!UGA-Cq|JKfiIzR+e+YG8h4vv= z8TAa`R!ZH=qKbxIovTmXpWT!(HjBWFf*;rhz%_FTW(+a&zVdJ+klk9h*_1cnef=3L zvW98E1AAphw&9aDk z40qQ5abZ^uKOA>>0r{t1yr~^7Z8GR0ODwpPVrjp|NLog-(TC*Wh2k>sl;xVhnl_i^ z4bR|gmvQUKUVw5AHlg?+u)0A4B9kUr9ONlmz-dE1e_c6GL_P>c>fsnwqIkTs@L8VKP1 zT!7Z{orLuKMTzW_-{2aaFIz+}ro~(u4SkkV!{3G8<@UXl=CN0xGfpz+u$tebl}f*J zx8q8$q>YN7!W&oG;b_T2{zoxT_=rZr^>`W%cEkm+Hlsh+w@e+|@fnxpyEHcAut{Tc zH;v&bygOHu@#uH;TPZk$ZipN<56GCIeg&TeZ< z?#eaBqH=v)<8a9Xv{{?3;$wrm>wG+_%ec_Em=3gZPYlwyGPP4*(Z+m7BZRB-9qkSX z5G-%0i`7%nPOrM01yJlqRvV3c|M5?oC$FA1=dqqY7I2>Rmz(vpo$#NOZsy8uyF|r* zwe)i1)+}9BU2s(yS=SEh?Naryx|cSdr5``i45jyC`RVVZvZ@c)uCQ{W?QU-Atot{I z>00s3wVU@gS9eO$VpB@7c*j|PSGR6&Zs%^_Z#hcL&y}k;M&6(S59gc4!AZPM=CiDL zN()9AdC9tyie_jVh6bmNql5BY+8l7D_^6L9eZjBrh!@3h(|-^bkG@G0|5b5Qub$+J z&N`s&UdkP98OeMrm|pRb$@P!+=`^q7w9*$ge^$>I4<3}`^q}R7EbPXy%gL-{v;OYf z?41Y~SVIf3QDZ%XYs+e2krCj!_?BpU#N8epkN3jq%VL7EBHo_7PD{Tr`ADVNIjK)C zo_sg)LKm~R){@v(0_wRqUxmk`-^SqPHKDl?Vl1rwe(N>n( zq&Z41(qrDdc$&hNc@*ux`}+5rM~|N-Bd=|4-o7z-VBH_)Rk+lqp&QjlMt%1A{ori{Xm|P$0@Tj=3mxnD2w@HIn z=!*xR7k*;twxyPXa`t}mMOhz`%WeFWvw1sN{JM>~Z%bW97nCp5$!Dzf@h-<4r!51W zCZF->%+vAo_PaUgyo%;{KLWgdr#PO*WUrqml#fji3COm~e>}{+@p`$pFri75>u0%} z5?=&7GpT*<1U3TCE`(tRjC(Ge1@lp!D8Pd3D9M>s42zIM825E4dnVtRyyC!dttE)7 z59v%6gpBkX-tWt_M9o+CxUm}eTPX^bAs9L_S&8(3q&e9Y`^AkSnSE?omlbu1ybCakkZJzX%@@bpJ{_~1IX zq_UKW`qDkdgnp|}nR!!1-mU-o>#k?$I7Ep3RHYi>uPll86?^^BQi^hgqX8;TieeVB|kk!>Syxe`f zWeodqp(uB7z{Nug2kH-ht8BJH+uS!T=H8T!IQ4Fv0 zj^`=U#7rQg5NfA37!P79Ol%iYF1Ul0=hpU_`)=YH z>Q9{s-?`&cPUJTfh-)l}&&m9lNf7)Kl-wwQJIf{D=W-O(Z%kjzXkrAR+38L+4UX*` zH=HRZ^WB1A7ao$q%^Ok{p|kNc%M8J6v&8K?x%iqWr7Tjv6EGPsVna4tDT39{tvc~U zwQ(&s>||-E?CWrOsigvLX|X4QbLVL3;O2_o+g-}LxkwwXNs*Tlb2G)#PHBPdezx|* zYr*(7KZuEd`%C+|GlLtvPKNs!P8}C(*_=Yb)qK6Ypr4c!Jz$4-D2Z>MFa&bX$N#(W zSIH5tpxo&S4b8=8V#ggJTL@4~m z4lc&m*KV~NHrzZfMa%P)o~P{!woEX~;21DE&V1Zwj(|qnz{ve6fr~y)Wj#SF7a7hm z*}L&u6ZSTZa!Qz6@6U zsy_0}GN$=|D}{Uec1mQ5qCotMx&ALyw{hq=Y$sLei#Sm2Er%5MG?{L`!h6j-Eq%q;C2D#QWU*UNjK&>5B7syA35rjvs{CQm>N$Vw4>KD*j8hK^|W2y(5Jcj zC8W1`%J{Xz`c?e)xXl%U)Sa+6b87PTv(F!FZnjHbV83>L{Q9?>$1jSfsr~z(e=>aZ zDuVz$7^#+_$d-u%ukV&=US-gN&93D8w+mSBU1NCQjei*dM|84wIy2JEGva6D2)@)( zlo+zt65?M;7DPun;q{xh;)lk1hVnZ*s_{^?XAI$`on((4wqO1BH=DozhkwX8FSn?e zr^fWHJNGw#{HK38OK*5Y-|XIf(J{zU{m75Y$@C_r`iJ7f-aP$&c#)H#p;nt667q5_ zWqkA953{Uy&=QYRa?{}lrw5i~lMf84*Bvwbw)m>2@v#sBG$KcSdicZUKm6U_kCAoz z_U#TYxwiR}KmC)UkBG%eFkAU;e=tn?L!ZUyl5^+ENJTuo#~U z$ybye-Howw z;lVpr7$eRO;K1YGTj7|Kk7lh+U?K$ zjr-uCQG_!57cz?HH=s4Uoh zpWLI>LGTlO~2=?6brXNG(C8A+=> zxfIVgfBXz59LD>UKX^Vcc_aj77+5?QEOlbYu4}-;fK52e1Y=dZE0$=W#$v0S@iw&5 z8E>A!+h2k26HC?I@e-aRq`#|A)oC2gIQRa#)|jmy!6(*n@#Lm>ohGOz>~p@~X;toG za`z;~CtvgsuZ3qZqR)BXX`YEm2TeN0iqDOsC7LljCVa?Rl z--OS2q}cTyIe(ZJQNnM>iNcu)e}bvAQzm9}#!(6>8rM$R%g~>v_aucDQNhGRxSe7x zx9U)^x(}{rq*Uh96vOQ%NpflT<4afzEqJA!mf_6!yj0pGhdS)k#^qbQpT!b{Q^v%N zl%pGBN$Sgx7=7_{%d=!A+FN0k=>n~fs>f+U zDO6|=1H{?Pul0E^7%B9-Jon*Kd@z`fQZ^~e#%g4mdw%1mkuBY~(qcZt5BNVBdoD?Tyl3SC029CV;%P%1`m3ew{5T)A7HSE_tK{i$@i-52jwWMjFJ`evzPW4 zn~j_#r%p##g{bZpFjR~#>InHtZv`FySaOZQr*^=+64RM z3pox|2jTcO0uj4@^hX%ZiT9M96L>}rhNt8*5*e2*S?Jj z4D!bri{=-OfRLOkIUcViX^0NfjnG=`%;ZjPP9;ag_CU2~yyxZ>ys~qtgDUcV-}y9H zuC{wUA!C>On;*X_cW<#;{nH2hNrvJv@<-p`Pu=9T6~CLeKWmjG+1EYC1|L6qI-d7f zxHM%MSn<%_K|6ZmC;FxX2M%99iJq;dHpUzo{0*Tc>UY1J6h2?x ze~@CHoQ{87@5`oR=RF>O-HHpI%TzCy?VZgZ{_+n7#@ij(p{>isT>jxd`?H=OZ?0z) z{MgvMmjmEtUjJM7KA-eJ4E&fpBCpk;ac&_*^gsFVW^l0G&hV{+>Ur17QwGfQ^2~l; z3Z`HE>VMfh&%yBI$M1u~QQ!NUhrjFGezH4%2 zb%qR=iRrM)CP=>lMEGF>W5kk-Q89}12*4Ea2s4vPQegx~yN-D8X4#Z*z?yx>EC$pl z4!uheI7&G3d|)i_O`u}TU}=t^X{uPP5EBqmkZO{%i^-BDHkm;D?j!I*8%0$eZOczU zTy0s7QfAP(v?R`+_p_>o*(dD5GZS4mm*wAJufkCT>&Ko6jc{Oi^sYWrNYA}?`Zbe@ zNkrNHsc$a0@;Tf}7;b zgT;(aQQNp{}8I_wWrK zpdbxogPTh9ERd#!!PmS4=Xe-S7rfKgfpO6!qyVSKy{r!9aFel#ui{tzGzPS%KI4@sf@D-1X!cV0yw3<*8&KBx01~AKKPV4JWXXlb z6&_$t>!bdHcD(MjIz~n>fCX%fP<8n}LU%o>U>q=odR83@DX*-t;cs9M4&Y^s&Xm2= zl>Dip0u%03pK7AQE!T&i%k2sTt5dM-GCOT0;IL!cPfs0>9v*Uh42@HMw({;%1Q;d@ z&#O-cJcD86*?89kc`v8Lq|j&rV9?UqDX^<7ct--vxQ__KKj_8a$(Yt@+2Crpw28{8!~Ab3JT={b-V zuMeH-uVnG$2O8qrgS>{0ExuCt!Cn)+5UdY*H^&%nyixA1w~HCyLAa0>$DZy}LTb}k zy?T0iPA8&Z4ZZM2yf{6e)b>-ik$!%NSmXQk)~l5JOLA z*eUEssvmu47`;nDf8Wuy2RX#9-0k$P>eB9&43L8q-tB07+>!)zuoZAOQX1%Z*Dh_aW-MI$rGFMb~6vGa+gwYo}|P)l_Qm+MbN}+WFWj)@u85q zJ{E5Brd*_Cm$`*(+Yg5kc9M?ogd;f;@fxFtB;nw@;3qsyZt*aOK2`}wDM#3@okGuy z9menEhfU+Aa4}NHP%41hn0C^XqjdNBetgQCT|WzJ+Kx}2ojxt>YRx-`FFTp3KJ3-k zPd+OQt#_?jJb&JS043GGo8j=~gZrENckYg|>ohwj#Tn~|d6_?SD8jejemgpfkSj^! zHy2a58yiQ-l_LjcjQnqMBxBgDp6Ej9ew&xuu76Lss(7;P6vg|-AHzSuB26@+ zmpQ30-uO`PzhjXPb0m@rB!-nRkF+UaNx31I~CdR+b0+Rq*GK(65M%xBPuyn{5hi zlgq0@i{#_&q^rV;$cqW#yIab!=3<6(!1dES(2CoYQjuNW&)AB8;KOel11=a`4Gn<#AdBsXkK;3TWdL9_0+`t zu*wKR34To|28y_8%7IDgBMEkP@qka#)auj-rgaglCKpy(4G&Xa%!o0MsRuD95qE9! z7C9!+KoY-?h{8As8iS;8zjmy|q?H}Tgm6+pmw`Z#nM5gqRXumbO)_Q($T0?#>$AR& zaZ=rrB)-z4S?{{0``sTeUT`WgiM$zUJ)}euln2FgQuy?1JXPTC-@sLS zct-X5F#o2}XaV*?8Cd%mFO1UCJ^2W|u}Dx~3Aw)OQ$WDZlpO;u0lwg0_rM8n^LOBw zX1ZGq)id}D#{}{S{orOKDG$Eph9RlXUZ3$1tixN=e)p>1SkjLB183urb#z+zEj5akUJV^#+`{ z&S_AgMTN>Q$3@TA{~34c;I$#srr*IiirI|&gm?m%BEeCKew+kiW?keTxpkP5LJ>&c z*=!vxTAiMj7Njn}KWePw@!B-OpsP?iDQ-Aw;2ZqLBK!*fIQLqTx8g{J0tnlX2;A<| zL|K>dx)l8F6n-@IKGRd3=(C&B%xJJWW{eC@!tF9D8vB*ee|a#)vp#$IF0WgY*6h=% z@9_~CD@3mvtCxaR=u2bHQJ8X0bFggjBW0vecJX&j4ty8wQwyI~LF!_kd9Opt8I2qP zdpRTEptLqz}93A-{uiUtif)joyj`o@8KV@gXC3xP%@hVm~I0yOR znuoJmhy9MCIy9=+Btj~qMSQSprqs^5($;#sk$CX}Bxf~@N9ViJLcRl~?{^xn@ zFOTB&{deDWm`}=2;U(`<)bQwqR%Y?YwVkV*`(NH48ayj^=1-+9QTIVG@3w34k|gBe zhY~E8tOr$|gJ^_vEWi)`&QN3i(sFF z@w%NXcz&277jJOx8ABYel(fr@TTV6n`AI7}RohCFV}7eQ!xYEui7{3?{0sPh zJL8h<<-~YZ>K)8sPI1Jx@V%Yn^}Ce&sk_x2;)dgp6UMkkJM#EVWp=mqO~xH7VrxFK zv)5{yRTL-PacbST)d2w2jSeye>~&xXV~tF;7YW{edi-#mAQlctOxODim#xNQ8;9cY zLIy{^*OQS`YwS%yzmknsEC1*Z|M}+g&;MkMrv01&H*ekK;b zL>E#(h5vNqRJdqZr<~5FUlUcGT1)V(sFieiV)+xOlvqq*8OEeR053&|=OyO1Jk>5HuQ0J?9KU8HJ!Vu__YNC0v#V3*As|Ug`^mY9j8da+C#} zx)g$@wd$GoyDc1JuevRfUbvJ%>>ei67j>R15!!Z5$_Gsi)0o79q4K1H5e zNCHz! zPfrNJcuIMS_+U<%zyT-RDsRwF8TIttfKqmpf7MSJEiKwZ8`{h}cxHhx5aNY_!8euQ zm;Nc^7e$GI)fbS@&*a~vB=(Px_r8HK{RC+5DT|-A9A!sQuunw2`e-v>1$r7L!khT$ zy3sXI_dNPmMh)sQDW8V`X-~N^Mxx_P{?)5}bWwizzI*VaO;xK)86738z^pE`9}M+* zK~(L_@|bs~KRxUJx)Q9*SYNn`07FB@^6*Wt8z<9;>ejY)&Xp9CoM+mc%f~LTnIzB0 zD54)XN$;c#x@I*(y=U|}H(G>^h;!B_6PP$~eR4w2Kermej4xR$p#ib>stj!0FW&UxB39Tu5%nuTx-yy(J1a{ zoC1KZ+aqTh0UnN?18G%~<#-+KgR;$ec>~+{ zN%5EZM<@ty;o&Cla;aZ#=XoaQ#{eYrtIysFvTdjPbgi4MV2}q=nt&HaGtj_kHGzCI z$>Z>mt#0kM@GG{bUeA??cS5?pIH|3NPcB54Q@z^g?=6-tgb#t~l@E%RKt~DOw*=kgv6m;?wYdCnM>4ef(H{L+NaUEx%5A zjqx_0)$grn_paksw?4hSx%Q6X=a7w*(r9vU)JbE)oKqSv9>nKEcN@CXnc^8IeLX4% zDMeH?fmODK-sV;zX_wOjJ&FNW4O;akCfm+^vU-h>}vFy?RGbIwhA10(xwhY?uudi?m&B=l!QIsr{8 zsu#tm%Zs{~T=>N=z8u_3?e!`}-MD=6`0<>aq%5X8gUee^6N;q?2A zLI)6Nrznof?3FCR=Kr48#e%`$<&Oe&&mAm!R z`@d+Mt=aT#czoG$y)T0w!LByuq==GBdD7k`Cxi=e4A_^W&%FE>8(YQ&S*zWnw+!}% zML2j?ACKZ`&-cT}-A_IpBNI*@{`e?5NJF*^$#?NoRgNM4F}<77YOK77MlW(CsFX8@ zv*IKpz;W*%gX#1%BdO-evD0wNK&N$pXY0y?&82l4(`cIfei#4KCl3;~ z+dG$Q)g)WTCT8-mWumvfSDEDa3964rbuNL%li$=c1Mc1HU;4IuZb7sH*s zyOavv9rV&lC2M39)H6C6)QrjT-c7xfpkQb~$YMfC-zsKv#pm~V>Y*4lu^5cC85Gq^ z3`NiC=b;A!SO!G{Bl^`gOC^Cx2Z#Tr$nJ-e@DGQSKJZY|AVduBbW+_t=ZQ0M@i_E& zcn>~?hC_Q(a`1HTq;meI7SiDoKJYniX z8#vYH`QB47ATE(1Od0rRXg~#O4lGRo#CrQwbYT=YiIuuZ|P!P)p8?+hPY_`B2p>c7}*&hi2T&AMh4 zqw&ER@;L9Y`N3q<;G4`TUw!rK-p?ad`%co?PI2?hiZJikH}6AspRhMJGa9}^IH8f zHbTwdFB~Wi9vmts zL~3MoURV4GUh+mTDzUZ3V&44j3#Q>X6Pn7y$_TFE=2Bi>22`6@<2|GK{bBLsDPK+q zx_K{e^`-VbL_hVMuhz|+nv%);eCJ-qQA#aEmKW6?fmInGCm*H&Ggw}>3Sj@rt5(H!t+E}zOc$!|ltH;!t!RHp8GrV)j61=2_$tqO_ZfDV z>Z?8*tM-{t${7##XUs-;os17DbCQjmJ2H=|lgIo`h8P3KW?`E~DWJlecFG#|aZBc> zx0`Q%C?9L^oa7N+*^#Wo#U=Hjfiuv&V@R?Dtu`j>LJ93ftNm9-+1{?d(Ty~ z3-7Je9Q8iAd^yAWdIy%gf16@IbLDCBvQq(HwQ@PiyVV~$g<9j&^cCSjI~lM?ja^4| z&pe!LyHSW6p5<)4o%3THKN-ebSCWGrZ%q~jbaaYPNtm50xBJ`op4>~mP2Sd&46;cG z+{|lIQ({U70ND7owN(gBhQM(~fq2{ZZ_kwgG(Ij~umNHzDkEWqPv3hVR; zusVVmRu7!b&5}ZFozad<0$CctEQOjBV{A$}p%_W$A0>f$Fy8U%vQL1+&YTQ!glex< zM+!%E5$a26?t6mV-~f|$`!EBAaz}a4Kg*AVXwydRM9`E0LeZp2s9^+kjF)Mk?iX6s z5_nnTzJo!A6Mdh342-01LUuzwoYsCDt;5z#`cqZU7%`M%B-0oD6tctWlr_ZW-}5+M z!8Gp+Z?bf)y-`xV4?X~&37FZ)tbywHZ;SwAw3pDqg2p0_k6R9ftG&}^RF%s0F z58+ayJ)QfbASz$~849ClR}Q_zYr`2@B@v?2m~N_W(#eQQ5rCVeU{s^`2B%ONE#MT~ z^(_Pr*_Oc2RweLs@DsMPVMqtm0Y-e(`}(F&Uhb_kqo8tq9z_ftqP;qNAK)qr{}YNv zndyDc;SVn_#aNYj^rMeB?-id6mNkeeboh$koKOOF&QuF_I833aZI{WG*Soi?D_Do7 z^Jw^c4P9-kd))^Qo&xc#RDo}GbIL5mX({&bpi*Hkli8y_%%zMRakIbQJIzb8 zBD;t;@a?B$>^4!0Bi@ruhsUP&Eh<@wA|K#%?ZGG`QJ%t`loE>ajIsJ44qEKLF+O{T zx_3TDIaSG-(%`r9cX5=1Rv}Vk;g?*QxdNSoOSY9|v~UBcyo>^VDkQ;wc*p0(K+-yoP3qq>_C*4%QT zkdse88$Of0I6O;P4P#7VF z$aXm+mQ1JIQhbF2a>OoqwfNtj7b1q&`XI&GSd=-+iHJwFx19m|{g2*3~kh8bLtmBxoQ$egVdYBaQZjM?9G5X3+J{lBlzeXowS!f&j9DEa<3S);)c z?Vdd8h~UPP!vw5C&1Um(Xaz{<=JQs+tfm>;R!UR*=Sbn zuKRxQ*@G1N44MpSh8opOCNFxIvCJTcn{l#@>?jjc;e}S7IG~KPix~+Q+FT6BqeO+H zb=9f({m?`>^bwxV5nY2_NJSN$5k6G3Axxq4u z26dObBNHiznGN3SnWmF7&j1xz*DvXyCy-fm{5s#Z&$9@#G%gMt^dP# z8dFDq(s2il3eQEE1h4X|9heSBa5apq)jTB#1uvEtA(oK?iIgL_8bv8OnQ(^(qwBe0 zY599V33Pq%89YOWVdQwTf8ZNsYd}{HZWk|jV~n4^gXx@a4RotDcy98n<8#l0X?p4Y z**kssnXc+1m`2GTc=|riS3kk%XN;Ua%<75nm2sKyJnyZ+_ZVTybq_w~N@)2wki6X=B|mm`}d;@M@8R*_nJUa{aO)HxB1 zuke8eWLf+m1P`3Nhsq7V>3^TkaRV=48#aX@{HEeAvcY>UZ4CYJ+u${CWuGVG$|#`8 z*I@OI>{7;q&4|J*9ge1&wHIiRIb-W`%A!q&(t+~4F@|`nD8t7#(@YQ51UK>je8<}h zrhy$iT_%ik7#)oVum8Eu+FDH?nbll7;<>%n}lc1XTTX7 zFWZd#E+tp~@xwN#KK)3VRWfhi?OIcpF++ zq8$ST&~Nj4HyRpGN$Ggnm{t#cKnG^OXGX1+k)w>HhtUmRqa#zambE%{-C#Piy zVuVyT{fTT5t|fEXU?Ex>?`fQ6w|Q9q%bX>H(HU*w<3byTcdKg@<-Gm+YYwLTOQ+hm z`71T+ta=9j%a^T5w{r2m`F)O%?mZ!-6`e8Zyp)q@`s&S-&9}e%&BzS4jrKjRxItw(4RG5wjU)N!0m@Z=HIlkAb}2925zgNa{q^p$sQ&P*ro9FI{En_ z9K3#%L6I}!S}Pm(Zr|A)7D3}Zbafh}eVjX2(r1$W_9)m$eDhXwYFVgG3uokLSy%H= z?Y{fz_hZ=0#`ZE}B};U(og9%4xjiVnbqURrJ1w@CpERP1&9el{^5cyG>GD*?SV^{_ zb<7s~%p^M*4C)+f#U$X`X)yEr4`yRhR*$$zh$bn#9i)AAqk-C>K*;fQ!b}2=($c3s zM}dAu8S4&)TMAYT+kLT3`XIK{nEF?PQ;A=5Wxq~;2rqKoL^~UWgK<27@JCSV zXoUz9=WZje!A40U?7&g6+V8y#lg3XqgLo|gZi^KS2R3lQ8zYGD$INnIPv7)4Y=E1B zVv-ndjsZi!1%vDfXaMnyipjjx+k?4o6B=+UOlQJys&OpendmF0t{JT8SD#ECl~LzR zw8n1zG(ah)qwLo)M$25?hl}d?tAF#a|MSh}=U9Mcl%XL5M!ouX_SOuDwUobB05I-M zY+yKhXO`^xw!FURoGBRJG197T;6kJclK@{LL61ivu1^Nl2-@IH?Dl?ttAFlwXYdMd z{bNX+U+@vM^|i^NnPOIeQu4GpD*)=oLjeSg$#?{7c(HqMr8fN?o{NXT#hdC_TfDKn zjH_9Kbqvu;SqZY?ap9`^D?6Ke&*1HWCgwi+_)fuC?Pzx(8XOS%zFT3jVeH(vn|IK( z*T-d1H)b3asbVJ<3Rhr%Xy#Ak76I;M3%J`yMLy^W_T~RrfjG;VHCZtJOxh zr+Chd>M~K86r>}>vok{vek)%++dV^!kw<;pt8WCX$&}ovqRLQ;Or|zl^}BIJMxf`k zRevZ3uTp|`i^m*#H%O;UyxnL?W`vF4X$Y&biL1#N2%5k%I^H#O@L~G3_zP~KX5ccuv1xfrrM`y(@Z z{@u3^H(!1A`zEEla=phmx!i<(>+{cNLOY2*TZNB6_;QM@`n4A)j8ZGCk+a~u-#mAx zDHHIHKK3yjr@Z^@;!t?oc+!@vMc(I0eetRSq4Vx7_bzT875|J*H|qPfmeJoF<^^pG zTGHQzx83lMm*Ni_2Ir>=MrP$fp@a=hktke}Pk7WZ^{z$BzE_`=%?ushR-qR|*ZK_Z ziyt%G;TPs>2c3;y)mohzxq=7F#=_~OUTsrmNGx=f|9zZG#%~6R4Ut$!O~WxYFdk>$ zw^P{qPU&Od?3ZPS*P8s~0etxVcVo;7ui3FW(mZUOjW=^Q&vzSM#fiU3(Kspm;;6%j zE|jTiE5&E0Mzc49DaTp#Aw$t^r_3vNiu1qvmw&Oj)}d1uqnWt)C-KMc{^3`7<)3X1 z$Vj+qIc_&a&iwCyFmtGo4oaJS2sSp;B|=tw_F=+oZr*4mB#*Z<6mHZGMd@h=v|$N` z-*wqvQnK_B&w~m34nF$}*Bo^(;}fAgjz)e^O#GELbiOK-hhcE?`tjzwe~gbxMQQ`) zl@u{YB3rGzo(J}E`vB|I@?*Pd^mWB-H3deZ718?&c4F`DF_6m6Y?>rR01zdFWr1 z_VP_3BNRO{OZc5Re`I%XzyW{`U%lA;Dwwa9>4*_y6DkA19)orYHAdP;QQyV%e@sr~ zo*lksNK2W^1I-EYE~lt@jO(oC=Dr3PXtPQ%-knytlP`=qjs`0XHwu@MV1QHCd+(0l z#%JqDY#Wf_ilHcE#TdXV$2k%%?6g{h1{r8qi}5#)IqdBCO_{z*X)6?sgZfROXrc^m z++OK7pZ)ZxaIn{#uYOk+$SZ$dyKCRaiI91DKvcr~|qk-W01gOBUC;`3ZybWtg)Ii={c@Q(lN4SCl%F;1OPMIUJNj7+eH>sDFI zY$*QK?_LZJ%?lzuL|Qw2N&raOV95t|il9z+g}f&V<}R z)RDecF>hRq-lm#@bnEDLC;Yw05LoHhFbRQgj<-}VMsx&X7$(?Qeq&tp{Col&!Dy0* zNCevHv8)vdmnOUs%zd7ap$3P^iLPM7*=*DZVV1Y1V>qjyft9c(K>O{VNz1#l6jlF( zc-UZOGPOiZ2!efQh_;qzwL@U5&t!XX5E(A&L+A_{!BPK45ejZFyR2Z$dovj~7*jq7 zUz0D~HZ1#GUn7dnj35u5Iv#!+fKyNR3r!%HU88t{qrowrfZAQ2nBHzQS{9WNLWs*) z!`1B_bU?>Ze8LS+%+R%ZZ7Laj;QlGUCOs|o+FUaU_KcA-OY#GGgBsk`tDG|W3xBa) zyu&l6ya_BNrce!+Nfi8}2%pm_BMz^*(5V-6as&V-cJX|nPnGtq_iJ~ZL>PW5a>0aH zAHd+m!@+B1(H?Gj8@+2~h0t}N&V-6Igp~s0@CkbK$yIdAy24Y{f^7PvZv0Bxq3(4IxH;4*%K~sYG>|;%Z>yYBYJ4u zeeV!}V_X{J-5@v6j3WRIhF|(5g&k$x2vQI}D6Lj`TvJzkiQjA})h{&5-8TG+rw&?r z9)a0>I`PNVv6RzVYB20iIGWl@QD-u3Vws%s`azlIHR>H~DNW;hhHu}El?8#Wk{y-r z9)2A>_g8PKfQYMhJ*zFPioJ3`a_hThNoKmIfhpqVRz*GA9lDQm_D<)0_b`#hQ2o>kO3 zvNkcNAjJm^d@In~o&ITabi|Gh92;TZyNwT_hR2OJd%JkXc2is#5ca_s8@!uEse*Ec)q{q`R0u?A!0WQmy&{$vr8QRVG7lo zyuQz$tV51Y+FXiTXa7d9ZMPTac?#QB3Z@Vt%0Kb^#e*+5pZ)ot%;80s^J>0*`F8Wy z|K@-1P@><5VGf}7sl>y(_BS-(@P^d{;d{4k<-v@|mkJx%?fY(UU9FCZDo82YE@Khp zAFNx=TX%|GzyH~%o7<&Lyz#`w-~j9wzLSFWdNG4=Iu4ik$li+ADP@mKk7_>!Mcby! zdwJP;=dXA2>p_b1%NIYjN|~{fg7oe0zbRFu)Rpl`9@G6+NFqQAVEbMk{WN&BDj~e? zW?@GR83T1MJqYXY?7xnlyT#876CpnsCHjQ6>u1S_m^;$vEUP@nA%~_6iI*~r+DwzDMPD71_0z-eH!sj8#ogv+d_iW?Wht#zSOTr=fWb6w2!9T{N zeqHK(0oSPmXM0Y!n6XFX*1lE2!#BpYP@0tZ^h(C2RiIn9KFK})X!Zlis!y-Hd~szn zBS>x)vSouagU3qI`}mWiK9yWXa7use+G`|rMQet5U}oB!|cHV+;Y?)9M% zsLEYT7HY#O-!GnbN-(3d`@-QGRx_5&1;#o29tX2M15$}|p_t>HYJDpsAWBwKzY8O@ zftN&k_3HiXmw_kx#jOr+lcty$myK!987>jW0gvzEIj6Z#a)R)U->eW&>|c~l_3=}O zK(=akIb$@Oer&8iX@%rcjur!uY&5sLc>HK{B^+}dTxg#}*62LDkiootv3h>Ax1jS5 z9c+k|Emj1H0}&Y?-tRA3YiCblIiaodp0Bm)=S$8hVN(RPzsvqlC9U3xgK z+}dIV;T%7f8s4|&4q4Mz4ByDKb`Mvs@9L;{uqYE^4pFN%RJ9Uf-SAX?STS-Q* z*^%X*bvDv+`WnpRvaL1ftc){l?S%JJHyBdMgJGD1o2fyn0CresiSV?&J zoDWCi3m4}Xx{<>8cWR8T5hEd3yV|T5v*xQ8m4gd3K`*!9({nVy=QaZjy{fG{Q@5%Y zKX?}})~o4MxCm9=0K>WUx!O?6*%fYLc7k)t(BRa=)ueXoN9cielfzg<8DL-_k&Cs~w`v^8|!K)czwClhj@w=7O zEj}bV?>B4VJvh6VMCbKjVO~iqd97V|8!ILG&}YkqPz!fDHRB531k2*6>goPaP``&? zD<3}m8(v>ee}4CzHiH{HG5oh=7k&V{OI(^1CbYT97nmkCu{&uELyb(a3A}IS-^+bU zg9&AZ&GVnVEOFL~9B-EO+Ww=Xs?pY7&$ygtj+R|_=1m-fga`sCh? z-16Zix3#{Q1GpIBz)WlGoaRM=Csgl7d|)YthE`Ae;Sy~=#-hPhu`ZSho(h^?Yn%o1 z@JFv~Z8uxbeTMAxX!v8rYQkRQpt)QAM{S<5`SA02O-2?CH1R_+E*Q4R`sj*>@RQVM zP9E!UwHeFguO;Xn&xMChZAc3TawpQJW;wQcXh|=hwp+4*|Bq=Mg<0gHEHQhYL3sUm za&rYg=XORkTHv*Uqpziv-MRm1JE`wY4pN5@{D=SZ-)?^YkH2cSqm!gss^66tHSI9X zM#h6lr-TQCN(vSXi`NF<(EuM`>i$t0E`#|>u{)oo^^1+O1am7FFPHoTmlv~h7`=HF zP0r*4+Kkt*TTVg~mx`xmP7E0bEhF5zem5MgfbuuFoaI}*lI3yO`_GCSdUI5~O!Ded zd}Yi$E8W%OPKlb+-;&>h(%{}rj*Vv{oLtU5Jzfj+=>9=t1(F*E=!qepXbuPt#>=wvc-0 zLR#;wEE&6#Pk;B{{${gTu>km7OG$&;jHiqZxQO?}8im@?87`M=cbAJiJ;;WcSq|Xw zA_M9(=6w0`r|?NseoT z+lC&veG@-RbwoA^xW9GhMoWdAL>FT{PxH5NG2#!d$sq61^$y#;i>I8T=JYRp73;{8 zJC=2G`k_Gl;J0ghC-23(cDH}^^7pe7pFX*}Cl+s=Xy{~J2Yl>@3vt)4o;~lth#RG5 z`sMJWl$l_>e(Uw-haXE}lr<-gPh0n-6>53wZTXYG`|e@OJ{>j@O|D-4Y?db)^_y3X z%V#YOe8_s-N~j!Xt-|}QVEiO2Y5S_pb#0ESA1wH#L$|v-{8-*_)cKA2wjEzax&D)_ zDP?wix#h$cil`GrKD4GOx0tn$<0jCz%$?^esEU9W{FCwN}iPW4-NM83iLhG#+O_*o>iW=I>nL(WEo6 zOu=OeAu-{6J0>9fXF_e*WUWPm-h!Jh15t<3OJC+n`01fZjUl7#fRJDv<)`vuFvfW5 z1xm$~NvMf~Fe!MZUj#aroHaf7;ByqNa0CRPFe+R)%%lsoXayW)2*CN&jdS<5Aa8Di za|KD6al7`0HqW_-axw2uW%X*hd)DX5O?s7{S^k-cCm7|iBe=Awe#*V~Ot5{O3w(ay z%<_y2PlUQP>H3cE^??Q-JzPF6Wp7P(W7KGCct)MIStXQBm-^59$nfKC0-smVjUlS9 z(`RLa84UiS-@w;>Mj3?qr>PO#XPr>jP`u|;Pe6ll-0l4}S$Xc6SG4G}GOqO*U8|QI zt&C@9kJpVLq5@M`JgB9!a`7EL?OrTBybu?~x63uUcq5$D-gfTZxhl*3MR=i^ep-8X z3^Ug*OygT=T3!~Ut}V(peC=|<^#VRzZdS{G0-BeSiXL1iaQNZ4U2A<%*KzH^y3gF$ z(opDk+!D-O^}S=MB-l*qV>|{Q*eI;{$#Z4B=Q^h`gKm{WYtPPqqGRpBGfbWZpjXUe#sxoB3F_zL#6=rG?^jo zjh%92-n60J;Bq-dNlw4V#o9c3@~kC+A2l?JJw@$OZm7#Ci4;gmtW8+rSgxdG*a_*l z@U4uEuvHsvY#4)CyRaMnhj-7~3!qs>n3j{P(&h+)t}DiyaRyi1b$cnAM&?$#2q}^D zDMro96g$qD*U3@wDbk*Zf4Fz=_DuSpe)7rmU%kH>m)E>Nu zb%2p^df-p*b_dQIMKBi17>Qje|2_|VcsE>*=P;~~1weU7>0d^~`zKV@h>Psw@k`TY!uVuw?X?iOow zt7C+16o9YE3@~`Fzk&U+IrZhlRh6cvI)ZrfzGVnO)sp;Qxe{C<0&k)Xei}tK#M!Y) zr+C^1f`hAfr~W;z^vazZw#x3rYjQ}+TRINC2p}|pQ?Z`Jd%K%I`r_W^T79!?_#GMr zoTY(DtBBX@BbjD5IxaX49%(8+ylIn7ie%^*xUMvoEMb_tUdFdahfh1o`C+Mgiizq3 zFgn(&T)fiA2+z<@auOe(R*z*LaaQ}57pkKd8jl%UV6R<9((8_wefPS&p9Pm6_TJ;? zcH_r88IISoO;3AI{18LciGQ!s1=S%VjQ+k$k(B)3b+x!V{352yj%v2Bq_szGPZ|~io<;;hTt@jx+2Q2i)xdX7S zhQIxsJ6kOSebbW1qf&kyClkqle;E(IFI?xsu}wF zLT5_Cu;@~_?U)FOnD2dq==G~sOh!N_>?l6{KjbaQ(s=!H2^;}n5J+&3&<}~Lo+hOk zPz4=D9G2FVn1fKTGh>wE5J~wraz`M>C`2J8-eBS3gUlr+*Y8R+73G>Z7_j8gzs+me;TamzAGP&=FRC_-)XF1IJ;(rqp$j{ zFTLI4>aKs%=WI0y)kzSG!O$VJ0KZ8{mncU(cW|uFCeib2`chf-o6vbM&jT3=wr=+h zJghR_Tk!g@`k*|6W&&};i#ivMg0;SYMF2H{roN$bubWI)hdjss%Et}SXLzE9=U;%2 zIu8$cQU8Z0z_j=br}@H=qi{AYEgqS^q2+432U91U5pD#KNnj>JPr{A0bEwjF6WCHZ z`d0ai8F|?e^=`GV z;@KHn9iH=cR^_Twb)Hu?bk9Y(v$6#q_j%5GZSjQfR&I#O#?7Kx<32tUCtv^|F?tP6 zdk^O@xb@404jcktL8YzMR4EcLXiN+*MYB=QObaBSQTVo)I2FHL=d36pi2cKm2Ru;Qaqw!===?~*%Fbuvbg({q^N8qAR zj8a~?KF;f^fK#}?m>dBIIX(TV4lx*ZyXk z;Au3bqiX@gv%w&wek>sNw3yWoA5scZa^95!<;8Pr?e8`_zj!bK;+J{G!zbfxR(j)s zoYzBB_`nn3BKIN!?02t^c%t{vepY_^i#F)xQiW^g)SiJ82beF<%z+u-aJ%>ikH%Lu z!_iVJGrmT?swX+hz*O9yk;UD|>*%T6lCRAh(G$N})tC`6W2TmBTVOP0diqm4+hy#y zUd(_voOi`&Y+d-}c&xAF-F)(-MDy)#Tn5C&@=HFL)%>#kN8NOU$4@rj6l?MB>7!B1 zZ|~eJ&^x+lzqa1j8T=>nN8s0Hac(J$PGTb0t;p7VSB6FgiRZ^DryL)gK2D4hzr#gv z=kE0kOq)X-A#TG@!Q#DXXJw=dZz-eI@vg%vjxW57mSQt9Dr{D%-Ycb0vD?yWOw||L zR4&wHEawL$_EoV^V`POBF^lpqTg@WZT@u@e(&EWdnqmNY0Wa!oZmO>~GGY6&zIT|}^4ILTA65JnsEY9kAiQH3&(bQ@I5C5yuxma1hl415N1x{+F zoy(;{O7R!Ck7tgG;j#nVZhOE@%Aw!}CuNxvF)^J^mQfYXW6*e`QI{;xKF5IFs_#kx z`Q0~PehJjlm=v{d>-)AI<0ZlmSl-6ub2-FSr#BX`ZwLV7z&yl@_o+d-8bmQ@9X& zu0Cyk`pM@RIoDF`JK?U?sJA^M8)su;HCUPaJcEBb*aZGREd1xY@1Eo|Y8ylbj`47} zbV#z>z@IU1$26J5@L|xP%k573`t;M!qj_HbI`X$){rk<251-f8g*Hb0IICxO?_8VR z={Msu2QyqPZfm!l`d4D|OBqaZkpAX3zaB6CcB|66#q=;b(e^jL`CWa8#?j}yZ+|F; z>2IfBciQyexN63&!EfWmo!j@)k!5Gd5r33NKch{|mmT@9Tglj*wpqb|6C2onapDd{ zc%6|rbQ&4g2Ahn-Z3N&An}p*rEJfi%J0WJtdX&z@S_JHWNUPS?Z zGD2JwoPO3Poz%~B@Y!m5>96)8UCwQ z6ZXY~CK^pX(ApAqw4H#R<5dZ&U^mwCPxKC+qcHcq{_3Z&cM{e;j`f> zuhj?7Cp#Z7fp2@DjQhE8{3I-e7fD(Rf9N#zgl{oBTeG zJfpm%coeYg$XlRexpktCl7)x0dK3#zhy~n{Yh*EqR;XSyNO^hvhFr*4`2Mu8f!CWa zAAGUdk{h(u3db|g>1yF5b_h2>ul^cavV{1+zkbeeM`q8x`dK-wG1%zcZjW44mJCv# zzD+0w4C}n!c(pP-<_L_1QJ!ug~*(g(=-@Wqplz z@CYH3ubbjZnarhMdwFU5{x*Z};{NMW6bX@SN9Cl2={$P4mDyfez8QJaNgpc4-_r;rqL}b7o7kW$wF<+C` zQWZK)^xh}8b+`GmG&AwKe3WtsAJz_=wEKKH-SwGIQqp zl>P5tJnG$KVtu<(2+!5L_7uo>g^Ji1<#ezHOEm0%?_DZ2lTaW5=5!f42ggpJO`nac z6?&V@%~&|NdMC;ACPnpmIBEk;A(WryF+OP3gG1#)Z`9d|HuFJe^2=bpMSRb%(GU=V#RdTR$)Tpth|f?On0mQ;q=e{@-Jp1iFRJJ`RMC- z{9`e+b}XA;tl9}X{EL6_pSAJi*5*(C^e;ND_tTvXa-K8*ONL`0#HA&JT^V z$Av!8q3w{TvstaBcgoER&q4{W-pojdj}9}oJ`ywuT+t|wQ_rw%5cC2Io^WiyZ zqj>Zw%;fmR;h?7Y&al&r40ejpoVV0j>}+6Z>i%kc!O~(^G;bY`;b9lSS1(p zQ@u!2qIx`y^_?f@LX*3lX_WBe;zp0fBa^UQU?wy#^lVZh#3htC-eI{*bGRfwDWXeJ zm~;zf{Oab>z_1rb`44IXssS1O2d}+46UDTlFDBFa<9-eMJ(u<=13?-s&h9tE{9HmG z`a-y>VEPhcngj_t|CX>*|MJwR3*NLf{iDDjX!TB7r=FN_&fA!JhC~$X3;}RbG^&5$ zs=Cd3{jVoYq75izYC~Z{Xf1Ai!DQE8`Q7+Js`E zO?(1Zp3PO8c!AIwVEnF%nP4>CKd5E6!g~!E1iihC+wRO|a#VI+)~epS9Wy zR26+9l={vPq$n&UXh8r6Uey*!OnEiP)sO#tQh!gYbK!5*U7g<19>7!*Z|SSDb6*|R zABr%5n)<1HMfF>cO_qxazU#tLy!;*dr8J>|rUn4D(8{XAbLGILZiN^k<1r&+^!@Cq z22DDXV13%`nR~(RFQ|nmEN^yib#LkpSWlO7-94SD9XmRW<&_F-85KL0q3f^E#FcW1 zVjisSRY#=k{Xp-7@B^tCEFbqVJflm=#~TCkmYM+H6!vg>`fhXM;A5VZWR=6}#zTePLDxAMD4c!BGkwxzuCM;1{m^&dV%*@f zR{et1c;}#i>!a%5P5C+B$8}X~bh6Gw&-0Ht>=>Mr!kPd%SYm|)lMqF4%MqF>fzSPD zq47=@z<&V2;M`w~qy(O83?~zweAY3?PJ8+^ui;L)Czms) zG3CYnqdoX+i=V?~;%!LNJ}KbY$Z!Rfb=Sw*V~o3Lb%>lKhwy{~!>2!wt@u7XGD5vS z(xA5S$kd>G9~Qs(8=7igJqxF+UG4X>{~rBE#Dezl@ryg=h~CnYrHKOWsXThX|f;y&}Ip&qT}Q59_(->{KRFY>L@uevXJwlF}A`4tz6~luHL^dPFt!M2FtDM_v6pzA~Xqqa@rmS z!~4@hRwCB@`0YkJM~xGTobf*TPCP#2I7PTyy__L2q17q>r4D%6(Z$c6w}Jq7IR&m1 z_GGW7JhXCZ3ZddC053&*=9gd~hmCE9jCq~|#!N>U^j9PV6@x*Pf4`EA)%2HB-^by0 zy7}z>7o!j5oDb%Vmd2{Eq@P~5%ep=~WZ|<8t!SW*abTREeC#OT${rq;4kucUOszd~ z;joYud`rQej0VAWDMRE^G-ZTLBK>47Sv4{|bSU@bmw!+>M`*v;Jo)Kid0n66I4V_D zVPn5|@L=F-r(7|*{$Tq{o4B%|KhLO(SC2qyLJUXfAyQq zH$Q&Y_hiM*!kgOra5t~L{vUm`cj0;MIs~EJyREcJm329umao>yW``L9zwYnB!yh_E zBJV%PWibEnn{PV6<6XFPs$Jzz?M63u#Ycn|E2DC!t$cpWsrEJl^gZJ*2ORoX{goA9 z`^Jr(JR@1I6Z0RlI8?x^Cfef52za_bPr}Y;{%wPfI~7PV?2(vKtUD*Hr$3OD$9t_KG?m;d+wuq%TsB2o%fM)-1`lt@u>7U ztmF}C7+0@E+~d75$TEU#fT0Y@8MV8YlF>htkFcUx&CD0=gpvu#S@c5XeiH1A2lp&BpTjfsGf=z- zW7DS3@mYhi7j!o|CkASW0OMWmb}*_h!5sb!UP5Oni8@*V?MA1ebrmlUTerJ!q8}bq zu4g{4Ctf`hJl+kj6%K~lPJpacns~N}Yz!PUGO_l)GTL33PDm4^T1$DFXLaPRjXM ztnS{kA~DY(pv$}G_*iATSp`$44mDetFnCHlr(hC2v%jXkP--dgmW55Iyxz)HNjRzg zzAx5ke7N1^IXD>^)OLL{UgUK&LC#9m^uazDwU8OB40)w)V_WiYm((UwLXC$%-Z)6f zzue?v;@&Q{aza&%r4;`+_CWRQLmoDqVO0lTF_zKoXFdsN3Lu`E-qb#vgjCocJ{Mwg zDIVo5TMFgX~-7*}uecsfaE=d>{uuUi4J3Tq?o<-#&}tzEpA zv+>d-fS-DQa20+QJpDkZi)*qK1j2!f|_VFG0g%0oP!^4MO%?w+(>URVL~5FkP$UQ zdqS96B3ZcU7MxX-H|s=*OMNNl-OvFQ2BFnEeMG1F<0hD%zbM3_)moti_wN0;`QtzR z(`cP6&cSu_R&Aw#?1sB}yNdS=e*WZ zrM*ab`H+|Mh?1RA9dRaBnu5M}+8B%G<3+B&WP&+@(sfysz;<&}zzeCd*FY}K5AU6V zWs_wQ1sNLVz~huvp*wGLAd+R`@(Hb&jh3g6)2jF*gw_woO=`YB@IVhZT&Ifm z{>A1GzUa^gAvE#w#qTr38)*08_|N{k|LK3;Y*s8lZ`Rkk)fKKQ1K4D0_*(d!KQ-df@I`fv!duD6 zG<@Q1^@H(TiBQiNMLioO8jwBLhT~z?wc3ud^?^V)_<3*uZ*rR#)LYTXd&!!`v8{$uGD@xvh<#`>CAqMN)e;JguC7-Kw&w`~RovPJTQ~ zmIbf>A|o;*=Xt2QHCNr?joq(DH4?O-d&Q<#tWb*u;=e#hEyNayHG5WwH4BDDNJxN0 z^X7Z+ySL`7teoc|a*E{pF^{;fIgyd^`<>&*-Q3*F+|1nEybnIBnL6%)e+-0fQ=qhItQ9*bIPHL&a)I}7EZ*PGVF2a{$!kK>_I7+icJCf|{6>-MyD+{L z<`NAx`BZ7`^xyyw3olkyOq!H@{LbjB$na`#MetiqNIHi`{CaL>$f}T#yF(hINXQuq zX7=xX_%$(E+LsZ*#IFB@`GjRbPIO)>$*#sA=Lo}?0*EFM;KE%mbon1HW+gRE7yP=j zuJxfBg1ukMA+Sr1^zmKtYGg$(=r3HNLGlb8OsGtCQ~Xsb)N(Qc^~?&uv=clgd#j5l z3I~$W0&U`nwK19UXDl1j`5TP|&-7t{>@&Rej0_n$f{uhpHGpKwgR-OC{PA}AH-FsR zyMJ#K2>71;F8y774*u%3uWAw(B-_A5>9N8*hFk3o47KwXjNTZ06;fKi$55(mGMH&d zS;ReW-xju-^F|A!G}IRxmpPHp5^t_dMe-SXK8kWO;@tt?uD*uz1KDq1m=#+1*6xcG z-j^@GYtP$*VzD2VNAL5^&wufYWX4C;C-fsnP|BS#u?cXydC~r|BlV4F}c!L`& za|3+n7iAHT+8@3QN2q1S^q>q}DZ9Md3CqbJ<#K7-wINOLUYdBJj_R; zo#)R>r2a{JRtkK#BD0-$_*Fcz_8)W)9i_LRIU&V+d3^DLkfi&?tBWYro zyN;6%h{^YPC$Ij^yya-Q+puf3E%9V?ijzUOoO}sx=E!@ymc&az|Q8&j&Ey(0x(11k>%RO z+>niEmh!=KU3^|d%_8KXlB}7f`7(rv_fxkYgs~qp66!*NDAH>Rq1;9oO{TuflB3}u zYobnp!i1PC#uM*laGkUKI&0cButvibBC8@A|M^;-n9DFXo6iPdifnK=E;z{M7b7Uk zV#mlV_LIkJoktOKbj=2``lN1?HD|@m)h4`&m41=O zpkh6%?cN_;#CSZMJ1Hfji1$9vE#f4E2s(K7Px`3APxMVrgRO)fpE;?QltY z!n;5a%{b>yxO9D+Yfa#U{qJa$Y7(q)FH;tJ z8Ct9S7!reLBVN5BL7yy9j$v(*>Bl<+)F}JNyym9Kd8M{Sd+%mrO_R_zA-s|@S3{3b z?ZTF^QHeSlvuPjv^(nZGsYzHbVO%}a@#(hzlx&>)sNEGGuB1kSCE8l0jPVo-*U=9I z)*b^6?aj(UG|ul2?PEZxlm$E$`8{I51(0vlUUywRCWwqOSqO!_NU6~2T7o@p81 zy;(kO!X;S8_yl`(PB>hAL>Wcf42Zd^( z{UFw$4!(KL2Il_0Om2*qs`&qJzWHV{kASJFlbhrr+*W-rpgpk88TVUx=_oV@^!qy8 z84H39T$2nUqh*~qwpLp-YR?E|XRA;Nqw7>O$Bw@)-WXr2Uq}Kuz>r?DsG92+I45<#K5$3k~Ys)1~@U zr}{==B=`F~ncRW_ov(4Pen{*YdZT!IHhfe62mZ!jA>Mpf(VhoQ&+r#M_u2h5cJpq} zZBp&_ywjiV=mVa{FJM52cs;ySv1b%I)vJ@o7kyVW*_d}SWErNF6^Z=!u8pl3Q_a`- zdS#Ca_sPVa28Ql=W#EtV-)Gfh3N<*;mCwnO7oJBIXdeDqrl$9`z3-G z+dc|>^Z$`jrqeHkNt3itd$JG=?Vc?0`&Kx6_$WCMPx4ZeN8{XR^^&}q<85t5u1--B z?{$d9l|0Xrsx?}9#mJ~qVbb^W+%l|W`Z*_Tqj5Uc#^oce*qln?Gq#Kbdpc*uAXx4{ z>eN<>ad?(?@>x33vo?1dd~i$2x*NP`bLNolaYnpLNhSA*rrqe{)bZ2xr#8YN-g0;Z z+OeV&BPPqjQL7>n)}L)-=~;(OBv)RQm50LnmNMMMvBJB)8=lXigGPtbN0=yQ23bN8 z#zSyv^~dJxnXlvT`;VXHOiq~&DS=n}4E*@1@#3AI`JwMpfzs`4Vwd8zFNG|&QMW@J z?2WWn;Zz4v;JYus`FiuKFTcsqIJx=h&pvE}^|{UMjM%Tf`m!u+$2OmS{`uyg{quib z_N$9|>^qPkTKoR`_03=Y^!@_o6#h2z;^h)=``}g9dJ9lnRNUA>Vwi@&Cr=NwR_E5CCW_7`M zIh23_$QY!9o~{icJ|{A!^aJ(V#4l+KdAJ`vy?c1Gy-pdvXrL@b?{bFj$@o<}basbJ z?8R3*JI7ycPMu2_1Rx>8+acE|p_%-M5VBgH6i-y_olG+nWdk=}wJJgYE(AuglOoDW zfQe2TO@lIC-bOOI4OGiVF;qWxI2+<%oIx6i)E&Y!I_XoMmjp^5x)O}l$y%kj&#kaH z@Di#K|MrQ>RR4H1l~{#l5D@eT=Op8_H$qd}CW08E{x4xr`w{Ajm2c2$!yx)KNQ5jU zfp8RBMBzpFZ}K+E*SnSy8{m|uL)=jIrU${!xX^3(dzbJuIb$xDkMBDEvM$WzF+ww; z$YKTmB775~mod`c!uKvgfx+I$gWdR;2&UEAe3x)HxrEj}hsnVK;%toTyFNG_%Ck`f z5o=#eddPN#-{`-3!bSHy0fsgZv%CJyq#R8UVC$-Eb+Oph3EvS=u=idsFlF4g0??XH zHz`L|G}lvfb?V^qb(;j%)haU(jKGx0Rg7I^PEq z8+scEf`1gM=*wUIiQ1<=ysuWUsYS@gfCcAn`Cb8ry0Q7UFYxQDYZUp~g~Kr1Ut`sS zx@N+7mp9Ic!N({g^A07)*v5mwG41t^y1+^Pa5~sbj0QldO?!4uHXibGb6iY90`>Ll z*>?S$_ES7h9(W+nwiJ*^NruHs4WRL>x>a9-7FjZawK6L_edgJm?)9I(7{~sN_h>#V zJF-9l3^1cz9rd$Y46CUxBZ}g3f|oRf+N!Ed6Jm7<|DU}7ev@&sH=aVH<20y2iX9xF zeU21-hkraD$3VCM2l_FhV4P6D;4*H*^L>0$1`lBnH@^EJKJJ8|`cJ8$aM*tUYD%|y z7fqTsWIywKElG#9xLIgGrQy%`HYR9MDBDnSaE@oVxLD2wa*G^#lQQ<^_5I#$A`3>F zlqaV6Kpri;f}-;AJjI}wr(e=$>X=lg@CphzC1QmSG}Ke4zGLgA)WIQKJ$&@z=G$+a zq~%-%4z0YaP5cyGiU$K3>?@Q}fAEHOyVqP6hNewC1~1w@83PJOK&y2-(iu|dR{~%7m%+c1^qb~;Vgn=ZZp1vzgs9g5j*FH`m6w>fC#idZI zlt)U*%NmUohwkh0;Da%2MeyT~KCDXt6n#)^z;3fEPwLq+;he}oc+`sfS6^S>e0Tl& zI0Go{*WSOj`RwD5$FX93%-OcYX5^&CmE&%BG4$3rdrsXoIA z!wBYIr%*jAh3sA)U#AhCZJ)_j<*YWJ>fJ|qd2#1n%AD~R>;K`0@0Y!%qo}2iY`n$9 z--~bX>8VyX#=+w)gA*R4H@$Y*iD$i+HyLf;O?iF1|9tbf@T$8R+;{KX&(nWmbFIzN zvuCHi?VTx0P~j<8&YeiveYAP{ARVCbGM-i|-bU9lnhA@+yYCv8*F6*>?L;rk7Yl~;3y^P z)r)2E3D#HXkB%tciw@BPSe~RX-^ru@&6i(Cw=xy=+ZbED+)H8SR6KGdWjn?EtFON* z6Vcx0@{wz`QFv&-6z$zY8c!5r`9UX(*$b=eci(@%`PJY2X7gj=Hw+_tq+YcD;fEh@ zY;NAXxq1AoL1<;`WPdn&;MN|{ufO^-L-tQ{+LTtcsG0r7SP0lS$3s4Y-$|iP$-8yu z`jTVeg>&{ub&xZUGHNbgxSR~?yoQ`Vj#8E``g~_}I6l~X^YJI2ZhrQ``-6`wmoCS5 z==kC0oRzb1Fv)w`Pq0;8;FI$Ac?JS}zmE2gx54;$&mTXz+nkLV>&X5-!Qn$5-+&%P zFyc9V=J@8$jb3i2y`Ah63Ph-mG;I(AekO_pi{1Q3vm6aB<}gqu>1WTfAhSGzi7|0xxh56#;IsOwn}D3j!S~*p39S2`&&1rsvczo3RRCweFistkhuHGgg6DUD08`9B}5qEG}Ryh>x{#xh*pLpk&33$Xwk(fy@ z^mztGOX3)UHaGP~NJqp#ChV4?s#oRL7WbGDG5aJiiMzco&#>nhQo9TliVQC1wSg@X zjUbcZ$7GAaw7D!prcZv=ynC#5cQ-`F)4WlxwvJm?ce&Lqlk(=25#Vu)euUl?|+x+#7O z|LflLcB=B-q@p|8T9a~yVD}wWJpJi@L^lN}kiHUpqr~?a3g^(Po)LoRih`pcrE`?1 z{u*CPpF{h`Iw7RZ!(W|a@DX6@7^U=}zOnkKw>moSFmabshT%iQTnt#WH+2m@7@c5A z+2}>%b_gR{u<0wz3|_@`S9^lqxb)jQcxwc^XDKR{kq>d{`AiPIWh@%O$}ZZef?8W+ zKV#%MuY32^w?dkuc6P~6kl+SK^R;n zA{ROWi$R)+k38r{yJM*I2wiS*qooL-p;@5_PfIp=PND>dd;OJma3(o-dw-k@eYRF( zyk!O2jVGilebwaktTP0*^Q!SyzeG3KC3*RFCbokyY`o#cwIH95m0RE7^$;GtS$v`} z7{Y%akti6A1(`)b^ghMghQ-rOQZi;xATFH0FtY4oM#1oZ#r?cm_wGI3T>tL7l(}+H^Taw5vS(!Xjz5K2mmX^ zl2>nsG0+cV>{VHPth}IcJa^a<<#6y)88X7r?`VZ#@V3oq@3vY&HV1{#300yvFv`NE z-&$KZ3-?wC%?%8WNkyGvAXIX^^JoV@)CO7shx?S0ZjQIns7+hJL}9pp?`D(ykF&uN z-Ue#KG8coZF=}4OFVbZ$ikrjBd`^IaSO#(Ba3(OtA-PT<)#UWaqZnd&{ud!DjyHNUE& zD`7K?ptloWZC<#1rC9vJe&I|jHXJ+b2~uS`qf9a|r5?tg!l%yVrM`IO(iqG_SD$5M zNKSC43^7l_o1FVrR4%0WUuu=aIRQ2jpKK*iD%Vq;J#^{{I(=Duenvv!9^{4j2G1L0 z6o4Wqfk%VC-36N?Ip!X;%CKd1zP>AevSX3!h9RhL=Ijop`tgU^#Gr>!tZ(>X?5i4<&}5tEUly(w zD9tzNsNw95aJ9Vq49YX7gotnAg>#!n>0!6NyVdGSS>WI>Jp4O_HJum7+A>X(I)wOSLdq?YBxVR-G)sdnR)-BuVf6rQ%yLJocR{g0a`tu}zPC7wc> zo@9Mx@V|UP5pkY(hcsM_85`(dzkC=H68;gJNtp71SqYNafDvpz5L*4L z>8`4u{^=KitURlmp<%KhKz1W`*69d-ls%H%M6iUO$w3;%cn>NC-b9TwI=98)gU=4!+% z3&!xm<9#qI58kK$DGeIys>J+V6Si>x?;4MZVLuq>-RS7>S~y0Z5Vv^5fPi2rbM@f> z7{Y&OrN854pGh*I81CowIx~i4m5=akJV(KqPTG6Y7=ts7_ZrtvH;=-H3^)u6IJ0-lB@Lr-Jz6aN z;_dh=*cg;!3`9f|dgET@7v7gl(l+DaP%%ZH>+0r#Y<3SHt4kk-$AW#xXuda|y*A@R zey9|^=`Wh-BKO==j}?a5lc0%y%tePZr+KmmjNUwKQ@HF@+r6RxLWCrz*h+~xe%kxR zwD)^D1y1IvwMo&BhtK_7pq` z5FxTZ-ny9wKP9&io^u(#Kj{QvJ@DQyfBoB1KVHqF8h^&WFuhYm!nqXN)3VRhpJyp? z`u98|{%KxsS+!1IY$IpwSh_&6XQ_$gol05O#^}`Cp zc4b5=+`IjFbMIE0UxQ2i_CZa!M&T?B`mN^avl&Y^$~rt$sE!O_LXs$FH*VgZSnoxv z#ymZu`wzr-hd;D;D&f4QvN4Vqw)6f69n6(?_;JejehP-wXW@3|k{$S1nq^5O zcz)k@VzBdr;5tq$Os7rHttyaTb+b?$#sys>y`UQ-d+IYjqSJ^lka=vkaX5zls^fk} z%fs4XyqyYfJHhs%5R(Tf($CM8|G#>iE5hL8{35e#=H6+QXB;;T47#%++*v0$;(2|6 zGqUVa>4r45a?t?bx{ECo0R-&WBkLK){!vRhoQ*6jo?0s6t8`l4W+;MIGlef!_PzC&O`jofg5Q zaZ1FSdi1uA%gw1+2hwI-JT@WJgut>8GIjMQ_-MJbvfKxP0lL&PZjbtpx@i;2I%Pos?v$#1 zTH(y-Bu;gZqTZWbW;nxa+27ZhvRi2mlIw0YOI9TK-Y zo;0oA(*oL_6f~X>?v`a~H>vl`KGT-Edwbr3AMf_-)8MNw6|KB>n$@cp9UorqPk?WI z7}sfKNk)BISG9ww3R4cI4tPLIP{AECO9MTeAw}TIC%w4X&Ehb*Xrh^(T7N+Vio5Yfq;RmloiFyIylcBegMb)$YP`58$r$Xn3Q2Cdo-< z`W@H5B;sTPefL;7^(sA@!rCm;`?WbaX~X}{j@@zMT$Z*ORLw+H=p0liCeHIPZfocCR7V!}8WXZTG5B?5Hf5%UYYT;JjK`T;x}4D=~nE926CL%}RFX2#y% z*_B+oTa72y;0Cq?OSXAyVmQg?hswekqvO|u> ziH!^=HhkF`iiRy6uvovbFi$mWL+?{BVf$>uKK&%<)8VDN}> z4prGm4Y~26& z5$V&}U&CLShZ%UODes<%x%XtpY zoiitG=K_A58@f##%~_7IwKM>)ciN$PGVS(R{o|!P&s|-b9M&%@L9o8Lv({&+T${ADirM_F0-qQ%Ym(m=IDw3kjQt()bUcUc>+ zTeb*e;r($q5E_!j@F+L_)``srU;MPCptIq-7_TRGF2BfpQ}yZ-$T z6GJ8BWlnyJ#&=#l%|&Lrv%s=f0?Dj{1@_NckmDgQ z<_h}olMg~zCTsb^lBywam605HPABWTKqx}Ygh{bIR0RD+AdG<#cr`?knBaN}Q~AtX z6d0-!X4$I^geFMJ@sNmGhX_&T)b@#lkx3$i%AzV4e;QnXm?pIh^A`4KJ+4CFm9@t{$)(C)0(AzyiPD! z*TQhej^%o2;@t zJuX7V3m4P0Qz7M*#UyLzly-x`8@+3iojxjAL6h3_zrQn%CJgoTzIt57ilKM3bvW=^ zR$y{(8;8nK4(jG8G~PQj2EfktSBSGW50p<6SVa%W6^Ch=ga z#(quEoVnGl-Gn-PQo^OaQeG*p*HK=pY(}Z#);6cDp8FdLr|h_UhMt?~8#{L?p~hVs zCSQ{Xq15SCkWRlxu{)sEh4KE6zQ<^*V8!PG8@wOFsqfrUQimMRRkn=U zWL#|@?|s)H?K8yZ7E}h`52o~SxHuYAF3yR&X<}^no2083y@W6DF+#cdD6Bgjx}xrGX>~q(K4ffc!f)bu zaKi1S44l1c=~%zaL-Qt?5kP1>p!?T*jT2^3e#x`e-;(kBhSkA!2^VUD`X^7(VDex+Al^$vCm>qkecXtb^13^IhfW z4ephJ1N2R-y0z$q6bFl+?uQ7=r%fH^B{l zX@WQGoaDsR=NXqL~QQG+Bz5TmLw^J zzxwT$(b?kUYw3>^>s4$*|44^G}RL5qEhQefYR0<(9Sl=x|Y`t_qM&C1BPXzxi zM{4iO2m!ksMG$W0u|)i=zQ1kxb0_|OlSOtiJe??k{TRg2?8{v0heqOV@G{n6&ez8~M@LsT(Y$CgfmzMLCpKYNh9QD}{p*zamD6*VtBZ9~DOcz-rRSQdM8FT<)0 z8ZkN#|OKKET29$wMy!`a)6i3^X^V_e!+C04#WKPtI>BSVt5f&RJ zoI0{CtI<~d=N)ZFU_v%Q-7wD}KFK?PpeMKv5=L0|c7(7sn-FS@1aj4|fi$3qg`lSZ zyxJ+Q$4oIwR^@t#!Co{12zbAME2o;&$h=E{6C&1rO-4rwN>#CXw-Xd5zQff7RyU!* zBZkEfC*o$mVPuxC;66l%s41d|lX2p-uvsIetbr{er3f-2o)=4SGUdU~?34s{9h`}# zm6Df73e`(cys3Xfw7omq;NP{jRA1iO-V{`bs;Tfmh*RH7&CJm~ETa+YOf!DfYn@8-nuEoe zTop5b;mX=QIN)p*S^~=Rsl!u3U4I%YxI28-qu_^AxKQ80_(pdoNn7(I%Y) zPbdRXb7+7>9OF*oE*fWJhDJ?VwK{n3)&QqIFd+?nfa3$ueT(;{j0oSA;rUrB4b5Q< zls*~J#TfLtLBvZA?cFL2fYgQhjCAp<(R@D4caIUR|Hz!-f_L z$YIiNw5gWqe=yeA@QM{zykSCgfyr95KC8zy@~ppdZz{ERgf6_V)kVSD1ts6Pzqwti z3TZ8(y!q9hor>^nERN+-I@&~zPmeaC?uUm|(V>g}Pf4hah>JR+F)sjsMf9huRj)<#_2+&9kQiHp<;JiPx zniX!;KRnwQPa=kfg6w>oayCkOUnf?wzfbM7YlFnwlu)V8q<^Cn>x;1m)WitEb@h&O zz^K{%65I^MEiVLh%@50qi&L>__@9fJiVH1ZjQbA*e_w?6}U>jm)6*+ka z)}wg>@Wmm$;s0?O)y|pI#fxNE?!79XXgK7kAPa4sz9FZ0@I@h-ff4C*zIY4p4^Wi2&&G3(lZ~dH2wt!K zt%A9?GZ?^)2m5cvO?n`)I>$~*a3lVeXdPg9&3h76ul;iF21W;ou?XIQ*^xng0+I<)i@PlCPj=kk6} z`nB3V8cxOhFaTa>fIq7*Gk4Es&Ur^P@1 z)vy011EZ7Wl7Z4caxm-XyCP+53YcSpTa~f<_DPDk)d2IvLz^wCkyGMC__5I=cf-)? z(PFp60NJ>()tqy?O*7)qUPNE}>5q6R5fSk;$F~{k!R=qh10rthWDlPg*AC*_#f?49 zIJ{T8@4ffo=JHQJ-kiMJ=9M-*JkLSy)UZ>n9z3{ncXRvt_02E;>aUCWx?7z7rH(~@ zZ*w}?EhUrF(th#9Pd9(}AN-@)^z`|s*CwVUWj5tM#d8!~7TL>tn_CZFY<~PUg}&Xu zuL~@DQeYjg=L~Mb%il9uCtEQPue-q^s;M#i5v^qpM#HK%hHLT~Q-S$>Q4-iD4MpcDoofw>;hBZqr6fIY(Gy#MmUxVGi(x!PUgYf1nRLYNL&ae^K*i6M{)c0*D}?+`1Z;v7T6`gtgzff^ENLA{7d}-6_mwJV(KTK;dLyth`Xd zndnjymVm7O-h-Q0?RrCmsy&gAiCX4e!e|tNCJ_czMMkij$V?cNsclw#jOih`FzP6E zeLs>=OI!>wCRxH$I1Q#_S-*tY^Cy(r@CD9lFd_0T!BZ$W=x5*m-T(0QKih0pEI>~Q z=P9E>b@m4yDXU)6=T)J)x{0_;nfm=&Jhq$KMT_?9uG=H&kZe{amKo-rDFXN)ee9MFx2=x2~nKMvv2hfy4Z6O9pY%1r=qaDb;#i`MnOw-5eK z`*5agKLq6{DB&AU@dY@o*q~zpzZ1^a8w9px(`hTX_u->@P>jFCa*u<3depsxcDqTz zvBF_E(dsHuSqSZ#@rJihggynYS8RT*_>9t0(M9jQ5S+rzW~_r_ZM0Ylk$z9$WbnK6 zohOjONhoR)%?}S9!dl;jK`BFFQpWS4|HJyd8w~nS-q;W@-ptBvaTL_YG0N~<@WaRU z@lGOYmzHts?It-ZWlfY#b|z-cpGY`RJm%^iwi5DM{?a3^6lPNw|bjE2Zz5&C!#YOZuLm&K;ht44 ze9G+yXLG5qe-sGMwTtE#Z7rNA7X+(Z^{(CgwKcaIt8rlUZphva&ZE1+Hz@Y=VhQmp z)5noJDr=~QrqDG$JZOf@-4D-)j-0KIG(DQaL3WXwXU?QF6%KUn{N>HtJLPSRF6jb2 z&6C*?swvIVnh3RttLp>#z>tjpQu3~*#4sSp@E3(+C?jRo$y22vETrVjWdY&wNm}@u zV74IvULU7Zm>ZvD#8D{Cos0%6MSDF%qhW8x%b4i)D3Zy0s~Kbq<&-0Xd@*mL_tyz0 z`81!G0!iZbopgkIDOZ$L#`H(g*!io4YUAs04Hkx>3qd@d0 zH4qsKUv|b;HIf?Ri(J{O53`aTuQ+kaT*2Eva~`@2{^y0OFftzIu^0Y$p`ds9Wf4OX zF}lh#-u)-_|Hm8GH#cwJ?cUDjYO6g@@;Kv%Bgq=a-wItjUaGUpDeeq<8|>aRuEz?6 zIa;3Mx1HA4(p|=m{J?Uw7fQ38=eohc@5j?)oEmu=POASI&vtbVIKjJ{!mVF&!9K1% z8&B{Oqe*Vq7lmZ6NR#ABV<^>5v(4~6-r|(Ax}eO_aQ6Ane=oeJ`-X$NDcAQOJl=fs zn{Ue&ab$DpOs6Ey%0|0w3(U9c8GWqX%1oTbFg@)~E2}oUT)c2GJSLbM|I>w-zABZb zo#amn!Q2WzHjUW4;JqX73R`+3XYS7C4}bsnTV*d@RI7Bv!*^eQHQ`MsTe11WKl-E1 zl}_+81~r*+y;sDEjj`xTzu?cNnGagA&_}#~rj-N6(pSIz zDt-2?Q$Vtinz*-G!WE}C%OwGraXm|HZ}P0Y7Vl4hG>8(po^MBj{BbjZlIgu~5)%h+ z;CRrIPc_JMnNg6G;XPCCO%sMCdBW>Zm>IC~ghqfq+X_z6o}=tSip0?>R6fogfkdEC zpb^$O)n*@^H(# zuj%?1zGl+wOV`BJSAqUwjJnyk2DfXJ=x&UXRsV)ws)P{rKRi;c<~;Vw^%aCO7F#Ce z`Ekz!_*s3Kaq)e6pqrk9Yt=isSz#wm;AJ$SCME~&hASp%VygcjB^(LvImIdD!te6j zt#PGDcB}e(R#g+{EPz)RYZpym*rBmX@~}>u(@SqsOoCOX!AMB;acYgu2~TyQvB zS9kxWvEa9YA!W+iov4p~SG)cKW5R~Kq_6727uBsVGeOn|yD}LNa5zRr??IdT(Eb$G z$KV+n2SPt+kb=;kgk1M%^}S;P85p%YFdyJ6nm5T?eel<$IWg&dhI7YgACG1S)_7|8 zq%!sqXhewI;BW(G*fiL+IKQIg?uWQfLL*fqVo6zQX(lkT|Z;*M? zn&FGZt?01vj913s!$+{&%jod%nlT1fzjbNwq1(E+E;l_x8&P(!O^(p;aV90kNnoe) zdQPY!JO!V!DA9W}+z|@-Wi@D42YPN|f?J-ZrR;>M3XiA0KJT?caPRKjl>ghC8$aA? z*xMzYS4k+0Hpq#0M_M6CnMjZhPIw{r3Y&6KuE;WuAQx#!=JTXUL_WM0eWE8a=1BE( z5}>1<{xVv5^3cNDa(mYG>O`O6uX{e5Po$c$^L*rWyz`D6t{nsq4>P|QZ}9dv$=#Ub zj7gbzm!h`YO7iw@bkx0tuii7j;eunSSE`Gg=x>FNp31F+{p?&qMijtluZwa zF$XSnH-1oL4jRK)zu>AjhDHjx-7k~|ogRJDuEy|TK2zhpJ3ls7g&jysq_*h|Mjr5qngsqEyiM-OxM zCF^n)ao#JAj|{-s4V`vQzbO#t@nl7DI`ZW`+}nCw;d)Q7IZ3 zJLNn!r*(;lc$rRspVna_Yxl2tWIz6RzLU2)o`2OTX*X}*X*HmgkqkBR@K$Mb9zA-t zIhQf^;iU_k3(?@Mbi2R!&97P|dr&rjLUU3m@8-#8+d*1`y~bvDv16B|jugg62H^euXxSkM_HqFDWjv1`rLNjC zx1=xGi?GvPr?+|HUq!30GBEDFD0HVo0&08|YpbL}os6H{q8RVn!f)s#7cO7geEi|Z zo6kS~AO}dHTH&-AGgyNn{U7Rdi3d!-@Ddo!F!Fvg;Dk=e63Z5FB(I1@VcY5 zIWtCXM6aKH@maEXQ><7={2giQ_WphwM?zF?vn<#UMWL)yEN4Q8g2;R9qyz$SH=#$U zNxd-JnIpcv424%&trQtc&SP~{2jm+xlN8_w2#RJSFk-)fm|;ygs>^d0x%!v#SIG!> zJY_vI;BONeEUHQ5+C5XSP=HAR6tnX}VFGbnODU#AM*Lv*JeagkFqqKRg@h9{Gr`sO z1m{j1%cvnR*U^2IP4I(i9ti$$;2nFeQD9)S z&ShXCZtb>ol>K$kkF2@+EuGNh^h^viiW;s>DR=8X+{`RGHm)puNmr10+er) zKfbW6KZZcYA0e#&6DFa_#*Be4EN76T|ItijHIrZ1s{08@lWt=SZg;GHpog87Kq+;w zg0KCcwZ#wktnUp;Do`HT#Z&Z42=eacaIhO@yf1!NGuJIn6ImT!`h0X~ZHyC!0* zKX2T( z?z_;!w4q$}P55ZIwc<15SUqcRLX%+g3;6?I`d=48IDPc4bZK>F?t>2wC|$EM9pH>s zDdGrzDMEz#aMUd2*&Gl(SBLND*XdEayUm41+B90dVu?qd4E?mieSE#c$dWXkQn6G0 zYn-D^yj4B=0WQ!YDR2dK{}w$A{er)-(*IGc>)^;bWt;rN{Pe3Ar|&)O?=q;=M`ZYm zhjF*dt2Pj8&2_93FzwSgb{q{JKs4>FIw5n|vdYv4Z0(Ro%kk6m3g{mt(ep99%?k~u zga|dzoO?P&_T}bMTr?ii6St$EHdj8pI|@2D$>P1{R-00J?alL2@0=^qz8sO1vF!|k zGi`=P{}ed$_|qp3H`{0OaOXkZYkwlyb~@C0c0ZctMV20BE2I6noQN53PgCCXO>Fj= zLWOo4KOSdvVSG%Jr^AW81meuw-8<#M`Tg}D3g0L^CR)K)VKu|ho{(rI-Qr3i781Pc z!>yF)@4x>(a%c{W-p{m`VM2+GNvkbzIwxism*`kZz?}@JS0$5wT1@@9HuKv5q`%;M zoKpGCS6>%u^q{dcs^;sXqa@zG|6mMH^4@7#3ti|TL1t6%cTfmmo7f$8_0?Bjw$CEtDteOV_r$4l zdHq{e=m0Hgm`-Gfeg4xgA_=_kZu5ToP8`_q@KJcK4cK?+!MnE?bClQ=|9Ert^7&Hr zpvjD#!h6X3n5upr%ADdz(hq+2#m_fC-n_nf+B<&9&i7gsI^U{-jm$#9zA5a-Upbby z!P2*ad8lGRj#)Gu#Hs(uWFK_T9AhK7z&+$TFjv`3(8 zZzflDNA${Kh;COWl?eHgH#j26vSr~19+g({k)(0#0mxs*3v6dedPfK~ZXB!Zuiw*0r+SG&~|-oaqd6}PIV5vuKhsIPtNxl1$C|L&>71;0v; z;W;I|uk&!`h06KRJIla!YxQ>(Sajr`NwOf#SYyQ*y&snKpRr#m@46Tbbp#y4Oa$AHM%SB`r^Po?!{iPo1-5kDtQVR0A)K zH@>MKa7(#fS3FgjL;e+StTg#G-f*7>+lwbS6yS?O!h1e1SH_C335@q`o>TS*x4o@= zvSJ)Ueb%Rix2e1;^pTKhtYTUy2?f)x`jyAKaE_*-iwE_2@xge@rv6x{864$MzC^XuVq7VOszcxPvXrNGBwpFIZN>^o#AMBB8Hx(WZo_wS~7=x zx+#*3y}ibEFIklXvoSqmpFxV@tBg~j&Tw_R$z`&Oa|JY;M6zr!aE@$kW$uI%CJk*bDIBqs$F>J@1JnwSc zp1W{$4y`!aoI|FZ7#gTda~M9=PjEhLh2cqIHjG}Gm%jMLA8!8mPyXTN@BY2N*DA+H zqX*kZd%KV*vSH%%({;c2{IkvH?_J8^C~WNBz3A5bl?+MwztHNDXc!q0^+8)t!vD{I z`g@ztu9T~^F=W8bzQJI#U*b$5Ju(j6F9hp!b)3lneHl;Cj~v;4Igh^+GJn#^dJN9% z*T1hXM_WxO2XFP?$p~QB>Kj>ewbhFEJ7H{x;_o6z2~pP1_g`gIKaN2O=Ky37tj(}- zYvRgeVL~9_#VwAPrU9XJjv-N*RH+g4FJnXo!^GCLX@!<-r#x$PmbgQViHSGRgoV+T z@nkzY3;Lb&h@e^baP0xCe{`Mr|Yd2}gNl)kMsHOjf&;+(|iEpIQuW zcXms8F>M_gfe(+BoE3{#ua;M*E<}t8i3HZ$tX6elV9X4+lo0kYhJvd(A*pivtyaA1 zp8h#$Y=i?aRakw?m{9l-IR;>Lf^#W$9v)1T-H-iaOkRpSebcXuuRIqN0@OZ)0wA+q z^)H3O_*Z!)Onwu4JpEj42c3@f4s7+eS9$b!_teKw7>s(yJ@tpPzRu+@?+@i`DeLiH z{hqcfJM`_{;1`E^n4M;D)mwOhupXXCp;~Zwe;EoM4}AXAsxcUEKbSQOjpNnqW9{;6 z7>2&gV`Pj*)zguKqrm$vltieSchDk*Nrgv>7Y@mdLH&vT^ht&I7!R)f8DXUFl;nZO zScGp1`H|K}VSDfq&i8DrY6aqWFdiCw6>mbTt5a^_z=Z2ND1@EjJj$hgJQ55X($^gI z-BS8~vdT61t6#?73f!4KKZzzN>Pf53NgkyHicC+Dd0^!&ikIwz0Kw2?;GT-&%$neV!QTfY|TziM+z(-8MR^UUL;5 z;%HLdHSejyXVlVtbJklSL}XZbUZ<`2ZQ-lQw>l4h$>Q-sCTp#{%<5_PP|wR&Q+CLe zytW2wl=Y!iV_j%KaOT(y&nXUtzl`G3c;dgEHmS-~WW33mARqpn`K7TroZG;^_;=xW zI#Qd)Orw7rK3CVGy;+&>omw6~K{xP3=s3->i0dTp9hCzX#v@ zGxH=l-M`_5*kCR^S99~uDE&ckP&m&xuOwrm-lqILxqo}8ihQ8B%0eU)3j_Xfp4WS2 z4ti+G&KN3)9B;AmzQK@-_dB{PI32t{uh6l(*Jbx z_kaFJDRU`UDS{k?KwkUzN+e$*5#eR8^Ps*cOVII)jdQf`_}v|uz((fE^71&jLl!W8 z@dTMBgHi^0Mxo8kOLxdKUOLJ9?GboYXoqxfz+lE3yHeWr^R$h?^p~?I zFSNHJ=S2HHu7uB@c3{E(`ET|&H!@U0>Kyzq##eOlXMgsm8Kyre6IaGT#v#MoW_EMc z-LiL`>1gBafBG-}(Z9NN>%n;O#o9i8u(!GWT}yZO^BPITwQafA1k)Hc$`?8waELC7 zGntNOqY4ofk2H?swXi&80{0?sAWIOT`WfpqK})$enH)|CBbA0_*MvrKcTwUfoUGlb zW!_P*iDIR4Yg&$w5En2>reh{j)_lT_;KM8k#4`-HvRM$HJ@;FEWDHo6#A+GFA-Z>^ zs!9MDG#y-%R})D0)MsMENM45OnplcCZMsP`+l`reSOqRcqH&b9chrjC@YDT z54fGa!DY~ZqjzQ*Q{N7Lyg!qms=H=GY4GZ6MS8WCD_<>BhlV{ev8=xXN^PcWAHEab zmFl%FN;kts4eIJyfKY%jCc{E}x%fv7j>0y3^1MsoVl<-=N)Y)l z74&@!Vz>z+e5XFK{u~JuEbW_Qyr=$ErpJ|Y!Rg_c>95ZepYhE0e3Y)ng7-=IfH4IV zkKi~Af`L3^6fZo7s|j@!7oUO`EiTV)ipIfsc~3j8LpY4fcu*nc;Qkmt;RAleJ0I3h z=sU5IXGhi|is^WtlcQc&|Hz{7hj%D@9$0Za0HHcpJ7@~f z@){$3oOs^g0;l&Dh8ApNtTw)IJv3T%OJQXwRCU)OJa`s8kX;mqnwbj->O=>Adm;M4 zQ|tM_*1gI3-&DdW#erkxZ=57x4NkIGrj(PXozc+gLp->R$?zziNyvchu#ei&$jAe2 z14*OG9SjH=w44TDukSq`1E#`g5RMLMcof6xjA91I42zLr)$OxQhu}ufv$v(PzvDYy zt-k4hWk+5QJiP}uJ);mV`Y=W?xe&}r2KIyQ^r06HzRx7pbGOKfnjSnLI5ff_6Gk?D z0Cxp=FbBTAPuN}Xb6$YW3JiX5=7Ny2qbauIosOo@@^GgFJ6+X>&OOC4NlDyUL>@c(%LA+hpo=_E>FB{EAU{XgfQ7mEXLg2yjw^j$}4-Tp=UmE2=UEu+BLGdAtTA*W@msUPU9x1ME0y~x@7w3Mn(TIF~V zJlh$n5@b+>=~k~&P#>j;Ke+kj=BsahyZPbUZ)Pt5uf2`dc!ihU3Mt;yp8X1^>g&ml zvi1B?;agH_;scK2hG2{eDLctF#5Qs3t&~{p<3PG}Z2lgJ2S@&D@aOQ&*LweqmvJ45W{l%xBZ9Yt4e;r;PW&pt1 znL@nm4Y~f~jnb$xPPy0Nx}&o@S?pxSp}iC5&Xwe+@&4-Tud4Iy=9A6~I3Fy3{p-J( z_I~#H=Vc4Jws~5A?qq!HKi&S?)r+lm7DD#;VLP!yszJ2W^6+j-QTrUkO%fCAOaf(P z*hvu(cgC`)_YjG&h%5r^NEVTUHz@5R1Vau%fk9 z;wlpmll=y4DsJ!y8bSbbQkj!kS`Ak*O5RNZum5_&PzcWCvk61rO~S$=MhGQgXHu>n zxudmhvZ%KK>g~i#g#Mz#M-YcqnO+^-fpw=P`qynTCQwIt8Kzsfqa1ICk0 zabmC~Y)3%Xe{&+G1A^YPv}(g8Oirj(xR@CJIx!PheS^bMJSsnveYl~NSh6#D@hG*q zq-S9z*z{?X6F97912mIjEzeQO-Rje%>GOC772f9kDsqJ@6P9tDrNpVX3Dej%M&K~5 zQ5uM~p{4H49+Vl28Io|dCfQNK+z+QNJG-W!qB}g^u{ot;K2Rh zu=3J!Ht^90BRSkedozmAS-IxGjsVMhm}ii8ocBBvZN>;~*wD^<9*F%e<)nveOu9LE zsvb($9%Hac10pyTcupv!K$?JhZ}eux`mzu z|BPqwpuwTFp6}x+20#qVZ1FHMw3?kqs%a(wo(YxV>}o~#*zFO*+o+fcFWHpW^ng58AxU} zI@J(7^%ZRo!WX17<|b=&V6!F4CoxCxaju zahjF*{(`-tLqo=xu?a70Obd~yor&?U%B}h~hZ3r@m%Qu3JMZvrd}{n%^MCcMj9&QD zT(ZUp8p++j`Q4>HaMVgL42bKob_u91@(O)S;(^-PX^)P1d(m_Mm71PsSy{?5^t83PyFrx8C@@2S%r zs+ICA9F*>Lt*{(tc#t9FihTNv25Hbg{P2^_2bZGD2!%0s&u@F6WEo3oNUtrts&c0? z0N?vb2diB@A70~~Xh^RAzxaRuBKhB5h3I4ETyt4+=SizRj|(@u-73P*zWCyID+}kh z&&*i|7p}ZFn}5;87uPa`-nMVS_%un8M^B$UN+G|Of`7k5-)@)T=UN-(dmgPymu!XW z>u+4K_kQlze4>i19C-VwWk|*2KpsUHKwlMg9@jwo8ON$v;=3Z~BfdC^| zNZ>=|#29)Qd-kC(Ju}ZQh5S$nHwI0p-met=8fzIIfHsrz;G;gHlPX0!o{f>%OEbCm zu8GJqBca{FPaO;Vp{oEH>wG52J|9m0${kKb{?0_zduw9j4Jdzff|dY;dz*-4Gcmr} zSsp&^_N87>#(A8)Pg!5>)nJ$D)Y$AdR!R>g*VZA<;OFr3v|kyKCc}r zr)YKtHV=Lx{Q3vW|Y6+ z?j85_mr$M;{0}aFBM$scaT$$7xdWV;$S5-$66QtTdY%o6azi%o;p2tNrJ&3bbkETu z*Ug(c$jHQ3%5g4Gd@PSMklZsadzAcY?9to{#nE-O_xwmH+)%nDX5&2cU%?p%^bi{6 zaHq=jwf8gTKnHeBVi_Xr2-0A^O$!D8A&3`i*OSIc{~La)(_|$B8^}PQKKS z3483`zIiA*VHA(o)8~UamuEWQN4Bo(?NR>6xl@^e3B3-6L5ltO0n?pA+y%9bJn-vq)& zul69I$v5dTK3o0bc-oC0$p+`g2o08QRLBR#{zXv_X!cZNek^bCPW8Sn{3j4+{*630 z7tW;IUwVJ)60UovK2Y2!mT4K&KlAR>WQf($Nws;o4W+U%R-SW0WkF1+Tdhip*;>YI~%U?4jRN;R_4(1ZhimYgO8)}L~M28BPm9=!=>x+AjG{w zx99+u+U$BV1N>DRy`KuLYSZ(L+htygNZ-TTmu%QRyZP|bFD5LBLrxfw)sg%6?-urT zWj6S7*r3Zx<;K5!`An&cOSQRmvrw}oYu;v*o$efi6B65G?0@?Cr;{r5O@cEtZGQOS zhr-~?vP0&E>q~_tx#*uaZ`_(pMi;KWzxnLb&t{Lp$u>ZfM~8FOn!H*qy z_Ra9YhaY~h`TVmF!|R7bhc`QO;BUVAX7iihd^wIHs}rAm`sob+&42%2{zv~swsF+j&&%R}p58~9@nBT+ zrhHd3x={pf9@kV_qtg@m+Ow!7Ea4JTd?vLXm|kFNv4-}2?g)l>f8zks)q+N`zVHpxjx9pXD88|%2lBz|Kw^jbe&`J!u?iKK;0y& znJU3FR6-lG-JN6{O+DB=)Ru8G+fH0Wz0p^e-LKC6`*${*6$>yM65O}^Kx+10B}Z$d zsi<>M43}vy$6yQz7U}L3w`o+veFp2= z@d#d$(QL#`%R+37gTPEUnDl2Z97+PPe7uwPS$Xx+9K>1JtO6Q!ken~u!DAT;ocb~~ zCMAGj7ZRE?%L&Gw*e5SHrX-%zkS2h%mzLX)hOSq&TeJI33-FE}XCuJCi>{Z35B8y( z8s}msIp?YzN&=&qUY+W3O`m%XKG(V%uGD>aud>qN(DWkwX~T2<9h#)N)%LJ?_h)lX z-z}>_S7a{r-0+`v+*=mxqFMOW7;9jSfAB}wOB?BPX=ldDs?>~eeLEbFWtA>VLf`bu ztbtFrk4Q7o$TqW#^%HDT-N1%mZ~KcZk-@bzLUSx?v@uqayoI@MxhJ`3t#vnX1|LdA z3vgFm{Z->=d&YHmXx(QCIFtb2ylDdh3HLOQ$n$m+R>QdXNGqM1@rXX~k|i}23w-8Y z!Y}&2HZZL*U6j@Ty*|rZ2*1lFrm^@Ss=DFnTw2bF++=6krFS|P9R4r}c#vjAPT?L^ z&>s9H5#~c>q}|vzh+QmD0mJh5-MxD=muIQm8aJ|-Ol*{=Jo(RJd@XTx%aV0$h-;Q& zsvjRk#UuaVma9Aw9?rp3d$p7B7&?BN$HRTOqnFfZ1c$bQsW$N99Pg_aN4UG;Xz{Cc zd|nde(fu3^o4h{KeYmkC@#f&!EaBFNBQ{Az>rO`;0b_)Njh40e5&brYb(2Mfe$nlb zBDL+J+&Pwe%yK|+KxaDgbX%U(;2D_wUtgd@U1&|J5NU4k?9-=F1VnThZ*U*-g_qC` z9BLm;sN;AW9)c)O(xNdm_@=ypN8{PC4n}C&(7>Ho+m)snYUKz^wmH?sLp54!ZH{M2 z$^I=5ly;-Z_@&45drwRm;MTVx7ujT z@z~7U$;gSF8+S9A7Z5!&Lm$cj%Nua?HrjjF9Lj}k_d0mbwM=lnSeO@SiCn?xl&7ms zbrP4c&BBP^Ea#kvmhGg6d-VR@m%pi%^qb_sYg%uexp3~>=5pzK1leZ`mx8BwnA+Qm z*7qHZ)N+71#TeZcw~?g>kKmTVN3QhQG|>2ARdd3^{idC)KPk;oF7f&=%;4DTQW)0e ztB`U!&)oTfzm320*9&@gGFF(J_RqB=`eYWx;P-PazX-HI5B|~S(~m!H zEQ)1|Zq5DZ04~l3>yu(`c&6?azjUch2d9qFk=Fe1E;)U^n6Hyr6+iv-)8^OsLrmJM zHj*?($HT)v$yI+ZT*=-1v~duV^!cX+__rI|p$WhI^{+NxfBQ|x7XP-GrN>zsyPFR_ z`>fM`&uuQnE7E(OEA86{Pb%L8{^IeEoB#2DBJ=m^W2#=BYs+u%Unyl#R_S=n#X?vY zefE&BHjd@eO2Anke`7mKT<+v4f{Upwva*HTDhmd(6BzDZ8z=8t1E*OZj@=!j0|VN zI7R}T^b(lSPA?CvHb}VeL`4>@^?A`u?Es@T#^v4p1sf%o61DKO-e1b;f(w4@pL+Xm za>&JF3`3L0uDGcH9%a85MxkvC951@0O?>)hx5+nCkzl}i-0%e{xIS;)z4&ntI={s>o=vSI$in=hF-1B z=%T+vs}Kc&Gv1Y5#pAoW(`{%c$I!&lHb$vn3{$TdD;+)<8+B9kxEPfqo8US!iT^EI zs=PwAi9Y&&@G-PMK&+MIw>T9^B zev-pDPTRrD*n5%7*aUs&)}7()*$~kCYZd&cBT4nc@yJ0&g0vvf<&4FeRHw~e zvFyTax!wGenl>;{vZVsyd^zSM#pJex?3^U8!u8u=bQ%wOf#Kn$#^v7*UN?5=>%?yJ z(oXp(xj%O^wE7%%S1(y>&g0msFBvjq3`J}d)Pu1tvBUIbD0hy)nTM(-=%?3dY-N3IBB#5WfK8*IGaz5+ioyAs<%OlhE8p+oGnHsTwJUD z=fN#WJ43~0$wviy-)VO%xBJoXQH%_}J6_xkzJJh$2}>BKax?GdOgL71;OB_qK7U)xj^z-n zX~*=dXdG=`EyWD}`}Bj4HkaF>@a)OG&6hVESb#^ARXG#3=w?)B4ZlKF>21I2;VLE|5D2RXCHr9Y)s1$ zCpk}@1JUwE@RK=GL7_3_X9HEsG4C=!xa9ZWl^Zjhis`%ev@r;t{lhn+TVQ@v zaQy%M%Rk-x`p^DqGCu5dm;nTxDOQHD$SGr~X=}ICRiFG~^LL{$>8sB43?Fbj?>+7} zdUGd+mddhjxw@2lcR6{J;L?Z)fRf z;Wb`Nu6dNll$uX8%;Yqn_vC<|6}S-mh?)p#J*nu5rKIgazJHP zWoBhnS9Mj-q-dm(M#@Z*$zb0dm5uV44$$B&=Mk01ZVa=YaTsmspCTNp-`hW0Z6zy0oc zu|A(JryHXOXId)B5q#su)8)mJ`z;#NfjM9rsVB9QE>+OQ} z)lST3Fk3+Te9Uf$UC7Ju^a}ZB1CaE2!Jr6 z1dS3_yGM!{APh}bJ79t)Jgs>M*LZ~0m%P}`78G8UEA>+fngjEka9D9P;|FKKtNvlu0~k80mi|?8-lx3sGz=WkkrTnz z*tGCadyFE=(QBz%8dHXLgA%h@$AxjFouRL4g%+toq;Xh<+B%Q5Yt7w-$x0nExULTr zSV7eB_2jLTp6Zck=gUenV6o*$r z6aHso%wVE<(tN0jwQW+fYgQ0@57=XPHP&D_8hmhJtSGGTOQC<=oSE_I9sI-iFhbyO zjeV;k^;OTQP7f0MLwHl>#1=RTe27DPq-3y-Gb90Qj!^zz;;hOwetf!`EcTnzVg|32bPm*Sf-5t&l(UHh6f$a?BS8 zfsav~jos?89k}y`TA7&mEA7-*eVZ81p^0@Ih7Z7Ca9QC#l5hrN#ajVzIK!HYbvA{R z_vcN@-_S~ZbQ#u>8MQx01G|wj2o?$or4KyH%qEC%&D$vd?Zk|PAadhas}FFAH@Qac z^bS7i^sVW)u|lI0Z`&L&uKks7(GJ7yJf0K)PBpQvE_6!aaVpz7M`|!UpmP_a9B-Rd zD4P!3{g}-M)iH$NMfWOaKCE=RzN4=2GRRvBk?+-}I-@F@+?@r<>!J5iftsw6&DxQw8atPuZ1Y5^aW&<91gZU0$8wIxSq=35~KQ3iW^leA*o%?rFc<+s#X{Ykw``v3D zCXna9_QdC)SF-KF-FC~Sct3kuik-&cMH@tZ^W}|VuP#r$a&XEC%US$9T;Iuw#Aw)y zR!+7NVBL}ZV)^X$%{=PuD*w%`<=9J(ub50z32(TX(MZnMndRDtzcX<(@0GNl4r{d+ zFl5UIZ_1-)LYc%5TZXA$4ceN7 z_GpcuGQ!MBVG|r2uoUNEb}7sTl5*uAMuyoO$;uq+i~$|X1M9uxeVQb3w_$*Umv$+v zQu|1^p|@}qnx^fB z94^(>Ts-Wif`b>I`mWr1lRLeRRJnGT_J9U1MB?aKgV=>K!`qdhHA# zYky{JltWL(N#RPO2lWmucOTBw4eser<<_zXbq?adUq^#iz_}JCY>A|pzo2Lgh1V47 z;SX;xN^&2*f_xQn(bsH9X`b6$pa43ByW=f`@&JKUJKmdr14F{MU{Ul)*=JDGDzyv) zgY(ACZ(g4>1@#XkCPVM9;~cMu=~9{n*t#%hSfC)zysU0xG$A$S6y+e)6VQO=UBhnx zi5G%B9-P8T8Pry-!fRup*S-o@TSGr`(Cx|QNAN{cJYUB%t_iz9%uBz{d-ECof_nIn@&hkrScCdfz0%-3fBHOS zs-Wiju^CL1<8@VOoj>3Ky!l1pZ+Pm$N;7av0P8;mwnl*w&oA78q7t%(mxhPK%Uq|( z;g7}|ESw%=Kvf9uGk$O46TAIWgcp)R-AIKKW4h9+_NNZ;F}&-4rqVQJN*E_)rp|`w za6YN2+g-iieT;Jy({6GYZ4?ANIRHzy;gq2hrFJ=zvdQp`iq`Q9f4p*|xTD$np)cc^ zJ*eLZo9o&!?|25$sFTFZ{a4{)tLy{bwZZ0;lqcar|4$Yuyq~e*6hULKnSm=mqyG4V z-u1&A27@~MX52n)OrN%EP@-|04{VZQR2=pnovzM^U>jluy88MN;v%hn*VAMhn+*>$ zcc+i~I>QNv^{{8I12!@EbW+#Qu}3CJ*Gxy2WPkA+1_lROAop+Oad za8U$>3(_M{j-!je{^r}|vx30opxg|{SMsXE^Bks;0(bv$3ch|v1D8&hm$A?=3gLsi zx_fnwGJU?>jOWXjX*}dHX7D>&{6vO~adSAp_WcL#V4ZW*map58vy%}n65?{{h19QK z&n=yYL))E<^o3RRKJLcOQdQrYf z>3@!AFxlmMrZA+_DfD(w-zsSQc_&$&ds5>26wwQg9!ee@}$=VIYF=L*i};TFJ- zt{fBm)y@I}^oQ+a3%&Pw^PI7|onia@@q^{NufJOE zJ$N_{n4kXcA1_y;#nn)vBgbE($H@efA@tY3D8Fs(pv`fzWhm1Vzn8JOmlFP>InFVJ z{_fwq-}q(>#alSh_Uj*G(Y!)4Kl}ObHw_!B#)z}*dGZuZeDae|myfT1f6AQWXv#<{RFAqKPyvSg$4mJiJI#TP9S`NxAk~|^H4yM(o_(~f!I53U?J-EjR>h`qf z-3j%`vxI?Lj^2+}c|^cED^?1J>Ve{b!5MG2Cj^L+Qa>=DDusgDT>AN zQ*IaDQT*}+ibpl0|L`7833o=iI(S<= z^S37Jj+xST3Mc>o-8=ABGriN-zC^Nfum1-&JmGlK_Ef1YHZ8|%qr#4LJW~Hw<%{Bg zArI8e7cFrhtjhY4&1ePl_$(h(63T88Qu@fr3U{^M9~Yi_n0W;XQbpXN=ZW30qg*TAeC&h$UJK zm0_}7Ta@S7W!SvX&?wsQ>2LFb9u#EJRrrNiwZn7&tbQ7Yb$0c7V_-TN1G{pyJ7Gt? z@;v26Y7oojn|M(;d{cQehabI7d3YP&+wZvxZa6N+psEFZ0||l50HFLoD)w_DCj)%O z4&po2H=mUH-l^_&-t;OZt@^B1b+ej$bT7J!ql|DejOnv|ub%#la-SkAW_mY`a;RZm z?B&|Aa?xPT*rC}`9vicUG9@&=TN(4C-c>l65Z@ShrjC`ik8QN7gRe$^WCDS%?STc1 zDbR2Ff$ymwJ#6PxvVUOa9@<0- z&}ssFjB6d5f3*<|%h6W1TRpUYgt7DnE-UfuMQKT1ZZ1#W7Iz!|&J^Y)=c=UYXqgfB zb}vtD&lnEy0H%|pkifY)--=JEboO}_DV)J%*Wrst50{sDpilEWC)n}uk#n`{I{Z$i zG`!&?h{FfoF=?|Jp}x*4i#a$+{|y3U`WvrkAclt4`c__{hS&`tJeb= zFgRKa?Bt|HEQQxS%P~m7-+fc+m#uPUnp5##G$YyntHN_HM*H$BzIc@}5Djh=gT9?7 zzIdBxzWO>|m?0soRVt^G(Up9O7m|tSj#Bn4<;f{qyvBDr#p~by<$o-3dWwH-pYPQp`s{NDg4z06|QHogohToHUvDNWT3jXm{DR``R?%hr8Usv#sXS8gz5_l}7)t(A_ z3FuN%I#omTVW(~|B;U`p7vODkjOX`xr{0<0=#QLSjq7B2;c>g$?NEJ@2f7du`pjlw zAkS0Ezy9XNvj4PIjh58l?&A+X7=7$X@$he>zcVSrr^-wD?zMNv@RC-?vY=Ig?bl~g z?01$+d8jYXcsSi_?SuZ2@kisuZyTRit)4Mpq|UljSQQ0&vpzWW>vaam(B_|4Ht@x%b)&_I9Zie0xlz4H8wvb_5{%?A+&Z%Q47TrV_ER|?_T ztUZQTU){cySc^zPtnqcL#e*B9U3SnfZ#yF9t`u>B*ga$GB>ztJ4$M4!9qG#d#G znQo3}NPgHJfa~WuraO%+Lo||~YgTFMr-*?vnJy3S-e?cQSDo(mWuEtTJy-rmKl-?k zwpJ3NG2z)hPdGRtdgJ)H(u%^>cP=lta=4$#==${2PwGQqN-u90jdOZ=|HF@?i+08D zyt~}J{Z6Y_50_tj_N$D(466EivJf=#0e`p{oLAcQ>(skP@r7GAzgTu!VUfnuYU>e( zarDWV@%C-$m>N&}d$)uCMY4v=qZ>`Oh-@!w2IE0^kUN4mp+Wf}kWJ{m4I{z0FiVQS zo0M4>@3_?iiXv~D&JrfP|32e#y-(BV8ll+`RF4gru1TekpxI1e-L6g>ij!ywtjg8= zT1Pch*V~AM*Y-^?GzX?0F;JFF5Yg+x40wMCsUr!G%@9fv!N@6CJb-bj$~3t{2*Hsg zp8l)~r}?}pvnQ&LqO+TlvQAFID`7K&pguR-$Fl(+n+fgepC$IntjhZ;&<=_Z-a>9h zMd(l99fk{x7Z|+Z+bRpfXGnkpo*Ym3CMABqNxi{vXb|0Z3AK{8fp>ksF-vBIln{d< z#js3z`}~aA#FwkH!JlRR@Gyo@@EU&;Yz&hC4uTNx|Hib5ZBK(ugCq+fyn6Sm$6kk; zq&QAmB10EF%%<@0GhU%SjRE6w#zjDBZ4p9}8hfA8>Z;ZG=l>Pb6p)Ng$|E`&4%=7o z!gmb^t3T)jzVrnf3Fht^(~Z2xvsxAN+AZ9q!JbCIsS3)|>d_1b06}z^fADEam=4zzwXo_(xv^{nw+m-<)w9XhKNLx7Of&q_^qC@X3=E<7Dp&PHkP-um8( zzv*xH!07v-f{jMkcB=vCbV81rN^gbc)e)TTd!r}^0!G|pCec#q8elX?9aK;R!i z-g8w9T)i{%ur`0Iw095vx!;Vbee|7z9-{(>5hC!lc|0L^x_X}{+|jW9NuNP+w*k*) zONw(Fa%T0w$R4*skdfeh#}y-$-Npz^R$M49sUD-W!O7ZNZHn)P+dN8nYi3W6SYP-y zwq2uyN6+pVlfhN(o0GFTk)UI!_@`p`8O2(mka&Mq7rY%nXa$d$@4+-)-0D^Lgl5#= z332ItiKwIK>P~q^bKz|8pltPQ;ZMD3rjPe4Jh3xelgcPq(CJ*2--*Y`dpr5%UOov9 z9{J*i8wdQ4!K8n9XdV9Ve)iwrT9!2yVESSb)~o3waNu!mVr^r>599Ag;zyJa<9|pC z`}F}0=_vY^0tU|{4_uCY9l0R1v$-*I+cR|BpQxp>O0EiMBR8~zPaLketqSfszIa|@ zHD1)XSKCI4z@)Z{0dD1ZdNl_Bx&q4a15TkpM`DITe~#Msk>Y*kTsa9-5RbJg;5oXL z4#i%8H1JVU$(0$`ka5WU{RZ`Ge4HRkiKXz%Q%MFmnNjG*@S*T(12ThO2+Nc3#OuG= zNmkEOGVnyswhIT!10>F>WjJQYSs~{^wi$WCT>AbhrE;r1G7_Yd0dRN7ABKj*Bz(<0 z49DP5uQJ0stQz$WrM8mutpA2?n@nBG>tnraPNM%Y*1|WJgP|gVhHp03>JN22G4JD% zzSnQ{czPu7qqJ)J8P!al$(>F^;_cikIm7VI-ab;FC4b-CYEx)@usL+`f-~ zhrf$?X4NgW{B*j@iD;@$A-t5OJkc+o#$WPurVu^t^VZjSg~J^~_~Z9JU4Hz@r)_ez z2c-GbgrZcrI2bu?p0()`oe1w*LrbFfWY3G}XnIpS06r5emplIVTuL51Kg&qp4JmVC zX}HH1&gKnn=Rq(=$01yT3Hxb;j!>dG1ztsK4>Q)EmIB8+cnn!KvW5<1UiN!J&Kl@@ z&kM0>PP?qI2GmT}Qz?kou3cR|xN@RUr4s&s^K`kxEVmvOU@- z^zHJOe-n?4w=yz$l+DlWooCB0zW8!^Cj;kdMiGZ4ej-7_>!Ii1Y2)!$bcd?U&A`R( zs2ugjoGyX><&LH;Pw10YCSEp18`UF3(Vo+%jSJaI!QRg3e(${x%3XPRdDf;_G^9^L zZ15>ZKVN9GtI$)%;*FcPqK`FH@A5krXS1>6rLBfBn4Yxn;^yrym#3{7@V-B2r9gh< zZ|~eL7v~p6G=%q}8n!w@dmP-YQd8c~W=NjT*y5SDGW)|%-dle9!|T&89efdA9eT$U zg=#T&lPTLTIx_mZZ#t^?+xU9NV7Fp(>e}h$qmMscK6&qIdj_7i+4s?GfWF?5%)%CL z-uY&ETpy0M7l|JIIs;GM&(np_o=kDyX-~!NuYTEv`7fz&C z%ZWF_d#(?i;5EYa9kd~I=<_eX9Qk;)WUi-MrMXhBRjcFjz#16FDd)nYNB5RHPvR|& zLvlW57$I6>v`hO%abY~h6TU(io5GEZ^Pa~l?j-}idyv_O#z(+lEW9+sIHCv&1ZHHJ zg&<@-?gy-#gM^WlfzcLwgJJ}RQ7j{Z@yrggQC31S#v`VX63^fmXEb3JEH2A5n3WO3 z$oFG5o0Qg=>nve4II>HKd!%@nRI5T-yS>BP;;Jj7|5^E;&kIB`Hwkvi#^kK}qe(Bm;Mt&s5mpzWwJO>~ zO;>%~t1cc6LU!%D{?v2Iai#PPVunt%?qg;bK*oS7v1_T@04GS>3C*Z!$`a3bQ^(L+U>ar5OR$dy~J-81x-p zW`9TD^>6w(xLwD2HYPPlYlv2bTrtVtD%j!YP!RG^Q$V%T*RHi682)rIBx}=m-&O;n zP9F5feM%k}X0^#@G#V8d=I&Fx_1hd#&r197Rg3+u{pfxam%dlkD0YM6$`8!*zS%2>SG3wQBbbN zj3as!Ys#aJae5cdrf*d?$~^U^ch-3)Y5rPv=4*u+I;*23zE)fE5gK=6-L#zXt;#Cy zez1daHi~y^ymxM(4J{w^p(hzeTg#<33fdoF576Z+ZA8m!$xA3>ihl5J!bcKojtf6l z;)B8UL0y_1gDT!&f0A5_j~?5YNBOR7O%LC|2suK3#@_?*!k&2@t=6x}dAO2R9=GD5|hm?@AKL*a#d&b+4Ot@UD=t6ALj;ptkjaE7iVA;EHb()|XaL^o2L6=F8e0lOb zI-`ug%qdl_#jVZqJU7AcldWXkg*>QHWr|xXA<=@|n=eyjcMDT{@%&K=^|Oqtm&c+j8c9W87F?CU)0R;2Wm+6$6+vY0dLIt7LDzvB-J<1MG-G0(rdySxbZ!i9K?8LSlS{Uf)QAAS0x zRxr+t{zUO-P@TwVbv@rs*Ncaq$YXxG4Y%luv!qe)5Pv=hG>l)n_y- zLIjPSZ5%GPO7Jkx^@IDjT8Z6R{>~r$QL7ARmS2AM>*emf4uFVuPiL5Kmf_~{)4bM& z1kt-lUcAf^&OiL&50`7BqYlLQ@W+1;4&op2iJ$-RA1zlKJMCL(IUWDPJ1@isX2rZI=3o^W`p(54 zPKQr4bTWJilix}9oq2sBdVa9{vw!igk|7s6HBA1|2I_Uo3>4+3O*~d1=1*W!Zdl3Z zv%1F<5&;kzt0GPDZ~DY==aj4Bk!+a2zlL-4|D;}vI2wuBZ%TbCMkKFZ_b|y}x`a;( zaT{8!8rIMVU@`JKi2V@Kyv)PrG1!RA&h+6;-ie<`fASq-M0#n=S7n|1XUxiahKhcug&fA!a zc6-iKL_uH#&E}J-xqeiZXdp1*&{A#V*rije&^W{sCayj>Gyz+E?)?n4%I)W!--u9b zMl&HWV4H8$LTT}3P#&B@iZD_q!mVB!0y@ST(;|E@XiRw(zJwhd@F?eLNdP9c!gq|# zgAk>yQ9M#8^@cDID$$5d`+Z#nVt^7fU;;DT8&?8HJ)UUz=L*ZZ2Hpu zUN=dNla82R;7w!(OJi{m?+uQ14@Pudy=$-M?$o`u)4M}t>WUU>mvK8@a5$~F9uQ6= zgex<+1zXoBZj{Ub1gUumN2BQX_3+YGRX6?=M#@)Z8qXDjN`=4LLw6Lw!<7`UNz4sr zCsOi9k&E7%BFoWeNPIJ8$Vfwmy5Z2B2^|dQ`s5j(ijr5Z^;J5i6E1W z%~N1xq*pMZT!VT0)M0bqcKttD*(u|_>R%0=)(I}Jlr!iDm;Q}1-7SWIt{<-8OgX5iE`9cpaSZ=%;RC+A zfB3rr9=zd21pMhTTAUs4aA6AJoM&rds@2$hp^QqOX=&GL#{gh#tDA7O8pNabDg!{; zzZa!FoIWWTyhvo;@i7=^&W0awSvj3}4Z97;C)8(-fO1`ML*%ds*xn|sFd zesS9g!K$y5p>5iaEDxqG7|DT|>(ZR|n|D_MIHY(oj^4&E9am2Xqao=9#r~c+(w>0k z2}~W*hYTK2N8zy-WrsI3#dyXoK!uf@Nm1NPnS+;E;ixVA$JpCXvlDOAHacWrlhIa9 zR{Ar}s6R~9&QNM!M%MOQIr43c=Jjb!gK)h6NF;CW!Xl zO1YW=5Vl(pDm`X0=txTB$u}kZpH;BrB)Gt2l_?mGy)E84!$!Dkn)mWFCF(+6Rv5bk z`1-%YI~~nDx%W+qV&OKe;K-Qwyl^Zbt>ogxR#$%VlOGix65jHRJ}rIY;Zflu18dZ3 zqEog;r|CCm#2O^4IIy#4~3;gbaHTUX}pi_4boOP9Dz&TG@C` zrd%IyzCJHR*a`z%Wc>_l0j*6=enU(hN0T5EPmFE z8X0XYgqTbeMzUZxW!q89H@^7w@;0O8Ty-2R4j)~f&44*us^Qb^XAsutWY^>IoY|w< zX!2G+Eo_DXeJe%yd++^89_Ws_&GURR{PXHxz4qbCr#pH))?Fx6=~kn&aRApBd_XoP z#}dDaMxH&BwlqBhKW?nhqE(SId92@i?|aM5um3t6Y|oMAAG}-WP+>Cni>IFr)Qz!I zx-vD=yS55d$^eNMNs40Ci)UXt(5D?nV%6w$p+aZUWzi%j%7pp#_J^g`yq?1USD$|w z9*!;Vw{pP$^*Vm@rs$3xb=>Z-gbcKcm)~Fh=np%sD@Qo}@=4i^?%n@eEaP;;qc{hJ!9wMxhsY9Tx(1m|6GdF6#dUzO<{!6fvwKo&*T2p?OVwd z6DR||%uAcCr2OcIpDsW7kZ+{>SA-D+DLr{lRiRSR8Wn!;k-X zIrI1a-tw!z`s?MJP6fMptGc2I2UVQO7=01VPxz1VYk!Nht4*Q_?UEXLGBJf4bN@}x zpEe#Jlzpi%$d1HJP`r8F#7#m_4G<-T)f}VvBbf1;M|!iduSzJ9@|laTW#vYb!weY` zwbP(yH8okV8s0UYL!7}fTUe<+Kf0xn9#9$y-kM(7PG91?qYR2;H))9N)l*S@DYNpZ zj#3kW`5b`NR$WrcvAA8BE1@`ju{pH5O{BPgK%4xGm<*x*#N(?Vg4KQq9Pe+i%E`?7 ze%r<*v1b!fQ<*UsM$qS(EIYyW+n7d!FtN9Cv z9l?dijKeVA53noSSod?hB=8XaN8ne+BvWWrR(s|br6>H@7;7wfNx+H`rWHYLb=Ix69O8a z>azKdSQ^6%ebvr*|EnAAQYyr9&z#X#|9Iv@c2$_W)zx!tYUe~u&-~rU<0ciHi9&Ta zn)86EJ+xOFZ&IvYHZC=u*DFOdq5U#NYrA_4SgUS$jbltV@+QrD!AW5oyh2+wQ`FbE z%?us16+NIi#=-Lh8pC1etNstF`)}&%xqev_ld224EGy%t?ncd+ng_Sle@wz>L}9h@4QwYPOdjB2zHJiN==8plN~t#c&Es>~wW zYM=&l?T-iv@R_&$9;My5!7KXeedUK1>+h~rgT_X?`-P?0Y&mmo#se*KFf?Y#nvq3z z%SK0d8zohL(7sT#ZmGU!>l`x=^b@@vDNLYl#9P+=Sq!F4Mt5xr=Rp_3Q5{_?)_p>P zY8TES2rV+;W_2uOnA~YV`-^w2tJjWtwt7qfqW}{A`ze|EtgV;C#7m*@H+V)#m}4) z%BHXD{-70hJj6c8D}~o=l?i9g;AtL;mZ!}kTLS2`MI|z{PNhWI9$lMyZX+x!a+_ncQfAd>mQ8<>{qdQ zS>h4d7x8LYzQoQmF6lNl_zI(XiwCBh-Yx6Q>%u=S#Lq9J)Q9kiA}BHayzAKJQ_&*& zqvWHnF*L%r<9*S~PKMO2ufJV>_LJXPKKap4GvwRgeBx5{B;=;hlB4bKcw2Z+{X~n} zaNfYG7p!t~(r{Pj7Ggjn&O%r@8mi^o)nUdh+ey<0n69A4988l=oj3>wn|MHzOO&z$<4ZM|r*6 zzw_1dXMg@@tyn!>KK=d&86BO*cG}8qlVb1W@;aseMLOv7$4+T115ySd<7GQKdzukR z;U7NLB*GrHGZ^eWJ<+LacT(tYe)n*Bn(?@||FToit}LIng7wvnFT+RKuJ%urt>}6w zN?WnYpnCG;QSGL?4eyK|qMyI}kN)oRgG;4Xt>1Y5i;Tj*Y6SwXeZTN9hUT+$|IG|d z>7n^*hH1^2-%Sj_t)VZbNQzEKv`jrqTUn0?1j8J*&?H{H!gVC#dsY5fUP{c5*W$v3 zHglwezb!S`c#q=VEW_CtoADuDf0SU1O3eEKU%}kxAEtt2;xIg2;_rD5dFKgUM|V=v z4h5FiDI}QWx@;CRtUr-d^rQAwP*PCb+-uTIJA^Ezub8%jN4zURq;7zX@qiiDhuJ6> z<0oiTBrh{zz?jwzxsXG_fd?(E5+1s zt@l?LJp&UXVq*CFJ;E&99V-9YnKA3xsGQZgkq5Av2Zy~mN{dw#eHWWO%5nEc(1Igm zQ_jKDUr!ETZQN_0p-BlDBe6OvFaoZI2QTV_Zwj!h;zPxjw+Z_7o_c!EIH<2T$5@Px zc%@W?)OaGbrIEEv!nFFm1MgF=ir`6Ejlr8astj0asoP-bQ`O$>@@byiG%BoOlsk1* zj#syj3avO$p0FdZQ#Tq*W1C=0`94_ z^bR98%t87FI#?`AIJmG_IFpq4mB8YjRy@MVYvS1p| zbnPC}kiLW8#mj!UN-%pMYWGZ@tTo&+WvGxAp*5TF2%%X*l4e3zZ|D!FD=wkhXfZCu z&Qe;uN8uMkF0_Ghyw^R7BAT}Wwc&=C-sA;e#|)lwq>VxWNqE_00t@Ed+_AD%go)u> zLw5CtK_!+MUo$?&159u*tEDvm`3;A`*_?)d;T%;zv?FWR;bs0ABlKJK2er9@)>b}@ z2aO>O#{R*pe^a_o@5A%J(5EwQ%-N0hn(^N5CSw5M4KTa>a-bCW@c0d@Mj?q&rEau5 zVN$({9@a7Cj0$%NU&lsM?$w9hDRt<16#g2rvgw^Hpm4ZSG{w!TA3V(-w)pM-v-(t6 zhGSNRii~4p>I(+r7T&aP6S?dsLQZXH+)6edFOf00?Qy_#+oj!F z(|LcR*`77;w%QlrLf>zjGuj&izd>13+@fFQr}}vpPZ^S`qH&T`*Wq)B)^?bhdS@Po zH_Y$I?8zwP<3zF1jjMz?8z<0RieF{WoXwhZK2D7s?f0AFsGq%fygd3k_*1YavkG2S zJG0m5!u8&7PBs_rl%Dd#>*aQDQ}!rf=>%xk^Oa`#GYSE5550$X$LQAOj#$mRn-_VE zmilw5)RK6`gFMH=piee0Po@MvdzvS^Y(OvD+ko%kN;ZHS^V2qgQ)Z8u+s5w5ay9R- zql6!tgpCs!DzN~k)6+tjHm_8!u|8AWwhU|VB`qu(d7Tp8jp6fn&Fk<$eZ!Bm9cR7^0o2Szc}?U+B#dkjm@@$*V=!u{UAP^F#=beKk>l_WoA3E(cXb? z#$mk?KDVDeoRFYXC(kcW+q7?``d|OsKb`X=E|u-+Vn(GE4E*Lv4jg^I)7}I1ILca5 z97h%NS8g|sPcs@GJ}P7LW$($I z!mZZxDMDxXtNmca>!ec^u6pm*ZvIO%aE z7~^Duwqzf{ui+2wQ7kr=ix=Bb-h_GGpoz1>OmGYW)q6)`SQspihvm}!mQ#5IUsD7s z3YhV*)aDwdf+5EwYHwBsA_xjA1$GpJo{i^8BO%9XLHo3ZTY=AJj9B|jY6CJ$>s2_O z*f3-=3L7EJk40p>HAiC>6h#277A%bWraD${R?IH~mIY*%Oe3zpeHmb@7ZhA*0vqoj#d^OnlQGGLra1vRe2xLs zps%=^H>Nt@sFlXpGh>43Y?m)LNT*(7GfRES)}J-&Q*|{qv+=ZIdQNbT=Mw?-&nC@P zvGy!{gxg7T+V{EOqOXMIz=0mXUmN~-2a_IeR*H+VhXUXGK~TlvFIrN@_v!CEBZ%O; zXF_xUIRBx|>YRqb!PC;i!TUOfa5^(cJ;pTVEGN2d)sU$%3VF5ny#Ht1t2g=r^C67J z%_|H|ip>};RXhrWVedWosms%isi$$^nO6^G-WZI*QB7W%6|6pIz6{*@uOA1O2Kza; zfZosF!ehLGU)_+&c!US5PsH=gH7!Hywc$2+29SFc@%BSw4p`x zL|LMI^A3&i@}{}MBX5|r-54K*kQn>Ugggk5%lj@0gpw+(E$<#BWQ2M1V?1pGbBb#9 zJN=S3nIY|fLP-MN*65m|w(`ZwA89VgK2l9n7vlwu>ld09ZUK8o4g_&u!}IE~f&+@w zC=2=(QYkl+U8-T1gFiOWeIrA*#S7{?8s;329L7lPmG!FMRnv8NW5tCr5%R+TIM*`$ zh4usx*Avo0NLD{1Z2;}|L;nFAbFbg~#edf3Na5jZqmcUYGF$Pomf`U|eVm-x=N-er81{KucZ zh&fAHwkk67F&HUf3a<)!-x?R$wD7M9?<}mcq<`_z2DdtC&el`+$OAwg)soFi}D4+$%-`UQ?z?e2Rh9hy~aa9LUCp9lSa z$Twzu0MzCAA)ioA|KK)ArYXgS*HjiQH}cgD*Y%-)@WgZcW*ml2(QWcKe;L$oozKv| z6B)?Vc_KZ5=bM9WR$DTD=px8aiq+F8_~xe2#aBlP8EM7vRp$?!Jzsc9bL;BmONH^Q zp(JoJj=Avuq|hOr^Sz8jePooOCmz+aDR)wR9xnvwMRf~al)1EGOKRu!uoQF5XzS#6RTn`R4jvaj zZ(}bbV6x-YFUoa@S)QkWKQ8kP2LOC*rd(Rha_Z~oD$%jk!fO{UFUKx0w4>45-7myj zx=S)xcD2#h!o$<*+S~hIFaM8!uqD3snH|xti3m7B1->jYAt7?0a zP71sfqOYa|?KN4XD|%Aw(7VM#ee&tgmybSre|d2Gs|NUsBiKl}0W zhrjpp1VTz^hW$?$zxitULCZ_eZ{J-0{I5T2^TCK98Nn>VS?7CP>S3eY%~Cg% zg5iJzGTXUE<;Ww|ViLo6a2qLyBA&U1bH`Otu~-!|4U2x|)zElm!y#{GRsErFu)%u|CW~d-|+=iA!D%Hh-aX4LNX_(EM?&E?G)n|Vu z$7jD;`rJ42(8*ml+_bj$64&tMJ@^fn1McfyoR3D+yVlRPT9cY-6*f*DEs&Ju)HvY4 zq~V&3JYa&^_tE0&Q;KWl>Yq+g#Rk9OKh4*myW-m`j{5(Qql0nf)~tvUP)Cs4+q8ma zZkm-8JJ#7~Xf?MYMAxGO6|;C~zER7-6hJ-Gha}R}9WH`z-mTrqzt?Z3IIS?q8;i8B ziQ&<}-@g2=zWT1NH7$)1sGeZcyyw^OJ$^#}ga!J0;CA6mY${n8cp!vA8Q`NO=eKpbff5JK-F*`f~o&f#O>!u{FZ>~l2FkX+59WmWE2R=}hK<7ueQ zSDl2j*BZKd;RUYkkiK*O-mEc?>nAO1VsvUBbCC3W^{|*X=xB|#0%EU^=7A51UwPH8 zJpr=v=~^1le>;tR&OD$2d$E6bsk*8h2DNr@&+ppoe!UH+>vL|iy)>K?xlC2TVwn@q z)EzHmWoW}86|@X9$QyY8{FIJeAW0r8f*owB9g*9A7e zXj6gwi%0j*rQsGYgf1$t|5XuxU1eNV4SsoaVrc_v13YtCxj>GP!P*b4Aq?MU><;KR ze6MtkX1wJ2pl{0JL*8n-1iL!xKee!es)r)DcpFWw8w+#QACvyhF8QmYz&RRxyIs00 zv++i#h`bnCS-eOzcXZ=ITI$7=toMgsZWr`?x44{##Y@16O?#&c!ah|@k2FG3WxeV> zo#b@{40$}>jdUHnfdwbH{AJd{xneoCi|x2q+|jXX=gT2l(t2Z&>tg5Ltu*Cu7la>R-1!%ulgxc0exA6KujJe%8E>L>Ah<~Lm0DAWKiuQ0pl zkd|!+^rW2XJ2?#2vnhjggUQWWTnh$tHm}0f-1_8+lf}MknPIm8eD4fDFB{CImN&NY zzV7ax3|Hj@4G(iLL2y^|%v1F7s3iTYr8mXiJS)}Akpj`rcK|`F1(hp?GhOcW)Jqz??kOy!QT?HgZT=WL}!rK4pbK%QtV{T<*uK=vuMSv54gMwfxC@Eb+g2M{$-^ zUnt0re)!33&~SXOaeL4q5%*J^DeS-Z{r8sR;p$i$K=#rl-n1k4_E$HTTi^St!yC%` zTB@C8|Fh~Z)-#$qnVdP*(#NUrz4=bNZQ(M)lvnh|7dMyRe13bB@XdJf)$e^cyQ5#F zbl>>)R?8Qq^9jbQx%}VhP=k|>W!x7H5A>(XgxgY<1;-t0a3!TvN8a$&*X>UJ`s=TkkACpsa;t+dwu?VJb>?b@Mmy2ZUyJ^~Gf9_^fBL%}kZ`g% zrBYEnebBPW!{ygEzgfP#_06(DVSL@T?`H*DjiM7GxK}lRC{NbeIr0pnh|g2P5#>zo z{+r<12G#k~E2F>+QX2xC`7KaM*>3fIN>`H&QyV5@K4SJUEX-imo(a){kfly|W_wTs zvI?dcaHB+lHmUX_c+M_yF{7Y1r3m9HGRIf0RJ#Ut)T{_}c6J6A0nBwY&qkmqPI+#y zf@1_)wQ&<5n!e6O(K%7O`lnMEph?4h@iv0BJIc{~xUPiTZ>~oKIk>cXbX2uO}v`%X2soShu^Yzk%Fu#a2A|9$bT2_afXLG5`t5S#F^^ zqLcNdT4@yOw|*!``Ja(dzIH~z3m)sEYw$SSRwCx5e(56^fs+zu=!Ynd>GZzk2m*(g zX%zd@#b4dEDh%Cktd1tg)CaFC4Qh{TxN&Rz23APwf`b9Qw!kB1h6{sxa24nj*2cI$ z6>j|Wx9`1fN8T92;mtKz4!$u0r(OzlxCkH06Z+uMpigdAP`EX{gtzE{3v%jnKbQh$ zU^H*h)P4j_t*`Q6q@bn*3=9+(Y8VA4TI;z$#5rK1cgzFt*GVPDpZ5(a7dR!_CF%7R1 zntNqiD4|m0qLp|_^rannmyL-rK;QV(@Fe~4Jl-)1bK_zj65PgjU6L^eC3*Wq_C; z3=7^qI9Qij%%SFETGy*Kn@NanER36R+1Vi3FNu$de|4hPZaRlFgq9}JxqOyuuzdY| z94|OutPG_$7;QXwSunSpyxSRHlKGpP_jAu*x!SJT_(#JrhVIMCT8_j=Zr^{DLLJ`` z&XCM|P@D$k{BV~pK8&4krgFX5B=xo!BpEHlI-YIwfcZ2CkM>Ws!@4cVx$1MN7kh2% zj=$J+JK@q%#iud=Cd)%`%+ej2=rSf^g&(GbK1-Q? z@Sr%K#zJ@p+OV;7;**2VQt_MS*~^{it&I_vFBjj_F~aBbyxS!5D24gEj>`SwSHH=@ zTpFHd+sm!S;cc-#*Tet!KKyX`#jk(0{Ja16UqwGp=NRAJz8#ldw;q4<=y4(w{o@O*JZB=kGI3YZ zzxws?13ZSjyxajE-@A5cLSY`3tJV?7yOwYhR9hXA@ySO&EJo?o@{>=0yj*>^bU*16 zUw?ON`P0Ani{)>A^Ubn#?rNbl7nXnU_y19R==Ji8zxwyfUgLA@+_qsQg^{PIS*aEp0l_-q)+bw-#`PEl9T2U!O!p@U&Fg?K_>Or)Hp%FeNzdTTa;V=qJV9E^Bjmb246a#BI z1dsB3J<(YMib2MNOTjU`LY+-X!fH|oG^0m}Mj18hBoGr}f{3f1Ld464;T*QZL&mF# zN$j5JSwie&1kH*R)CZXX+TaCe!U5C3$T~71;>z-?4B|hU03IfWQPwx*z)mPh$>94K zCqzU2TfG-m&HIqTuCH$+jJ@)nDO-c{E=9-pZuU?A)`^U`F)j-CZ<#t{ffpoLt*^~d zbR=+fjDnUztKVX!c&d2?)ia)QLOEtq;Te~K7amRGF&GHP-s^p`5~JCUQ3cV!S??m+ z)sr@!W>3O}@tLDutEZNiEy{a1f1Z`SD$li$U`7iBEf#*HC9+ZGy%R16-_<1#-vr8n zDj>i<{Xz`AXLna0Yk7BiuJ2X~4*Ng2?HPJxREfh9#IGI3n+tu!W{0WH4%8V_Pilk8 z*3LtCQ3smD*wv42hX!hMrNhemelT{*XkYyp6D8pg9_`s-rwjve$_^Z>VlxU=w5#7U z#^I4ihWu5%ert=uNC8)GKf8STvKBg280VMU=z5;5^$=on%iY>H-vZ{KALcUnyH}sp zu^N66L?b;)RBcgQz>7Z3uclOnr%&%SzRg93EyGPeDHU@>X-r4eymJgLc!EcKBkX9w z>(j@Rc`_L$R{ttKWya9}s$rZ70E#W9AKoSwQGEe3@o2$6aV$NP>>GRpM_IdA5rO9Ty1L%TaEuJTMq3>g}=+q}}=@V^Gjyl13Y5vWN0 zS;q%YnAokR;LI<0s@xcN;RqjNOrW(fjD3dnFgQ3euR~B5yjOPucWALsb5mNA3+2(X z9xh%8{lfj*JWAVnZUjT~?pdW^l<+b$ig>m-D0qj(NcL!LulDr|?_t=?3dFMczx-eS z!?LWg06gpM2CNM+M9#LKbYQi7xG{hS`E~8V8=6H|fnNU(a8jFdbbN*gc-_Yr$PRRB zm5>*|uj}}C6Xl?np?UO0p;xzI3}&>6uHbI6NvNm##D`L@5Ey((XE(cz@#AjmIQgM& zH?+N45tVN2C}TCW!qos;C6;DIo@O+v4jU^@bW)p`)>N0^Za%l-xSLR(VD@muk!8LO zZPo_4W=^bzFF4S}`dP7=6UJpde8U*N@shpM!4qUN#14@cK71NJbwv#vTaU%Y|hTk@L(ApSG(wKoIMBk4xM^~FpinZ92#|$ z=ea+fIT(|^kAo}J4w~0NTgweU3GekEo!|p#OD%_&xdM0k;3C(pT;jhJ!j0tYk;dY6 zY4&6md0l+m7)sIi)A~9xAmf;vCd()~ugVR2uQ@`7pUo>v#;hCr;`_Ud=L|e6I<@mO z4?kzl&{s$#D>*M4)p%+=BE{`Uc!2Z06k)s@ta#0l6kf*#ljrYVf2XAV@%QQ;eWZ46 zvJ#v4$tNF=ho2W0T|b?aMR?7chVgBym3R#Y#++VR3#Uq2&v<~Z$Hm`_6RAF4yc9o3 z7u!k+6+r(s<7e{rh8(N3_yi@CE`$$DZNs7aIOV^e;lv4NYqpfLDd<)a>0%G=KPmbA zQ=~eH=Pz8jnsN}7(3#Q^MVH5$JHl|J zJUSlTZSd+>KgThHdv~w6@c4tHg7F49SJ9#4nCXtcN{M!S?a}D`d|qz3KKI((b@E*8 z7VaV~)5$iNUMlRz=HQJSfkW@ff;WZb-1_#`^6=iHj#W-4iT8f~_3g&Zak=N_)GUs& zlZDS%Mfvv2-z;};evzmB&GPO$?=IImECRld#kaOvE!t0C70-2~4G}kfd3U*8$k6F_ za+}*u*Lv6i2OoX%qvchfHfDVA{qHUBUb<3hrB)*D-ksG`$AbU#cYZwdWF^*kebZ)z zbDipSF8rBSTlMGa)lL-a&$T|!l{59ly|0(weDTHdFaOP-k6v-^($(ero$7X{O$y&@ z6{1}Z%isL+mwE7?L`$z`h2rZQcb3mT|FXIGUoC&<@BO3j(aC7P{^b~8O`yi94G@jr zt8lZCf&5W4NoGIlkcj6wd+$ddkL?g{EUvcE;Mni{-k9ZzaGN&8!=2?%(Cp@0nJ$l3f*_*yY2vyj7h+{qXi{Mueo~h755t+D zyGk4?g37sw0?GzBsxk&eeb85eoZ#v+=8Ac&-k_dRmLLQ3pIYFE@L1s)BdP}@ID=(y zSKS!Ps$ftYS4OA;%KyMQcnXH*D=ecDGm`v{smoWV zK4pv>rmVj!)4{lvi_sgOj1-HGA%8UdjtU;J5dy0$+eoDdmU)_dwzkNPlw zFCU5mKkl{aqMQx7KUQ+bzh<87IDQY<_pXJ)MHpQ?=UjB|{7m2Ybc z-W@T}D{7;t`ZEfZG3t-%*88IcH<>Hohxi(a7O>*HLf0>+zHP~2^z)!fCuTN=S1vLb3|LSb+-jXDZcLogYK*U|fVBnfB7X zjN(v5^dy+7%DK>hOI-w!9i9?VGb}F@-0Rp$x$nSBNyE?H)W4lPXm9g)4Gp9~;GeS! zR)eEV)(4>tV{D{~xzM%1aeQ;_n{f;#!eQWOUiCSugUz@lZ|ftOIB5lauPl0mV}>(&Fhu(1(^y$O8$P5=e|)!c zHx8gi9T)TO@Nkva@K)?}`M)<{xj55aWnGEF9&QI4*kpfQ1qRa~LCx`UkDN#a6M()6K&H(cmFF75Z?ZVXi*7{~%Z4(N-|>LWf(11>HufK^Y4~gIsC{?7Pty`ThbO;(yJhd#!vt zPRZr8#mCGsbI+Vd6W}p-Ib$fqj|<Rbb_U$*aPS774nNWv3FB!t)z;o9v%mYhKb_sL=8g;~*9+IV=IG|; zz-O%v$nklq5H3d)} zRc9L$r;h!LfBw&xzx@10&q85y>-{TNS}c6F{Q8%Fz5JVh+bY2?|7N*yC==E0lkq557hwn?qa@T>d8TxX_ZAuN7t6gW zAPL~CLzXtLnE3f4dE>`(SNSzPMDH}9gi^TRxbL#EQ2lj|AXradq5z0fj+kRMtMc7& zw@gpSV`d{{OzIGqC>;j0+kFgf7?{c0GxeIR{zSdK7h?-{3|kt2$ApS!j)5~q zMMS6mttPP;TlrS)_&rg00OqJoLJmH}Gp;?M^fm&)jghH8Fb_V1DUZg{HntGj6f%9E zP_8^CYv@nmLRspqD8?{~IgVgyEQHWj-nHEXlGB+SR|tOr&_|zEj+%(r_bTLh$|%-E zDS-+Ps}N}x2O7_8kk-e8#iYutV9yPRs6HVT?v3h4=jdk?kH+Rd(bkGWhL(ycnbJ-jDi4RW&7%yz>KR&Bhd;qP;Y2m) z-ENISh&F%QMimETW9j|oXCFe*(+jb};;>am;`S z=XP+uQZD#-3GqX80paGbaW#NoK`VYvUd;NuItI`QIK75nF%A-v30kI9f4!9jQ!OJu&?4$i#>Y4*-^A3b%ux6y~RE)10LtmEAhWg?`;_zm5KU-Sc3 z^d{f$8g9wk-@KbTdyxk^d>>2Z?2YR++<>jWXx~4m=h0nC_v2`DhRx(`4352INexb$ z-~toJ2188WRzsnFT3wA#?d82ZR-MvBp||N*zs(UB!zYxjcnOy}J>%D>`8+t)=iVhZ z-IZ3$ttmIPFi!CvaE0r}c5qc6R-Urb6ig`+FgW-IbuR9AT12#pec`tOz2<-->n)RC7n_Kkn|GyYLZcfx15GC%R#nhS4~`$50~7K~wiz+>wd9FRf7 z$p&YGw3)+V(zqJ6>p$ai>H>5o&&&+U6lX$n`g_3^QV^U zCAO#V?!L^k-9}Zka4LoJQh7@mRNZ}eKb@ie*85`} zMzK|@G_+QvQ~WP_ikI!Zc`@E~s~PZe|Msor|LY)zpZ)xIM~S8@SSdw+#_{a6ccZWK zIUyWckkRlY$3Wq7_H3BiLcDidP1G0DcrvF{2b#e{8OBx(7y)le^K>y--+i|O7}{Cv zAh~Ag?7xwzzyT7cTIG2jUEbeqw{R!Aoy>uJG(N_0aVmv-w|PG+Dj7}Z3qShk<2>eh z;_(O(8A7Kf>p|mtB?ItOeC}n6^Y=e^zfH8Qur&TxFP|*5>S(z_pO2ya_(AzMkLRtg z{&(JcH$1n`qm|dQ8GH_5`tlclm4Wxc^6|Cz8n;Kw*$j)1fAE9x$Xm6w3iI&6-NBa? z*GnCwaO(8;npZiNT1Am^ZYSeqqZNmj!FTi4SJ6*<1$G`q$1i5HajYn#!Wb4NRp`|( zKKo+%tKWP(y!T=T*zs0Ze)PkSmY;q4JFST9cj{QHAdU5t467gg&d-ujt;R&_W86jO z*V@Q@HbW5)l3Btk$EP0^ZrVoatMC0F+AjT6Mim`*tVtWLqu$U|hiKRp2&3`-^8%3tw>vtVSI16!3pidZlA4^gHi zOkXIvU=rg>?to#Gz7Tfkkx;y*0I)s_p?*&ewJcD{h4o+j;%s802=|9zcoj2PJup>r z0Q(w?s~;-vHk{N5v^XV%m55cnZo&%D*o#oaFCQ!45uxVkc+qKM7`{rXV?10cf!X>K z3SxRQ!`zJVvy>S|8-KL_KmBjZvc>|04`o+87^7Y3Re~WRm@$VRlka2`X*Yq*kidM5 z8AXF~5Zj*qFiZ%-QQ)E*ecY>VG32gMLI<~%(YMMNQztjA%0TT_c9iQ8SP75XCpcWj zOP^iCFoPxNRvfz53djgT1dA^FhrW&e8jnnwS()P0!y2ibp}}6+J!Ug_{hL8YkJ_Hb zg58sKYAm`woW;`O!aKS!f}X=E9?AQ!j=I$MYOl<0 z3OWPRe1$8s9Nv4_o4nUJPI#+tl=toZ7kR^qTW!f*k?(E4+x& zN60A2eO^U|&|b!r5Tw;>*|0@;HZ%`N=5rT zC zDI<#{9L%=shr{U{*CX(<14S`sZ!W%6_mmvNYEoJns;5sp6hlk-Q6b#)85rCUI_44H$+RY=h^d?6wXYRKhr+l_KbNRxh*ZkzxSH$>!yL1k2-6-60(sANOV5NcVAcc$ro)5m(=wV1gJBq~dGLY>v#Oqz zDK*I=dPnEZJGHkmq*W2}K86Jj%vB6~%_nqwKc<8!ul2|9dN8XY5l6N3d-c#M@1TMF z3BxFO>1>q5Tq*VIg@Hx)1Z+5)C3tx15+=aw!22)^QNgZJ>O$<{XAg&f2~f+sN88cA zl~sPYv6WJxx%H$ee6$%7UREVt-P*&92g_3qPR5%w45+u2r(;o9h~b_vz8M4#`~2WSP6;#5ex%i|M7st zTIRhK#wqSbU!sBPp=7PkyiXZel_N$U55`=Tnf|IL-on`*;`Tj3WfRk9A!56c-fSxquv-;)T^z-KAK0!0zN1y z=FW<{U?V{1)m0c$RM+m9FUD~#J9J-rXlxWOQnPDT8fxFXUE4e8ca;pSXmg&o3AH)O znB6otntO0NaP)JGQ=icZ#oh*c%9;6bB!(C%C-^uGz*Rqk8Jr9pIGL5W-mro9MGTk0 z{34~_GP0%rS9#uQG#C>^2?)kAx{_FrgxNzL}Sp`fe$@udIQxDf-kvIATmnHd~He<>LLW%8T>VR2TgrQMTd%y!V zkGn@{(DzWd&V$}He=RR}dyumk9~szs5A#3UNimk+XZ19g#l47 z`aI@JS;A``wb4%25zlxfN5QNaN`ihGcX7EcWoB*!pl3V9Q6ICC27mECg2gp+rVhaS zx)FXgpjBXQQyjE~SK@8Hue7pa!6tt5eC2cD+w`cl8~4DF?pH8^JPjrMJy5hSeA{CT?)do|$+Jel&)$doe0|g2v#e^Ki%sqm1zZ|L}-D z$GH(y=<${PDdECAGatc8k#M1Xiu+rNuX&o{vENbl#@ATFH^Wi*hU_Zq*he$b-7bRy ze3hG(i}2}M`_&ikHWwJCP;qFiW+Bzb6*eoo;0DKxwXoCqvyR8q*Yh4ueL*$&Q-NDG zKA*!m!tZJ{Gs-fG@&+3>My9;9oGL?u2cKJcF?Un?b`w~RqJ@*|Z4_Piuf$_wK=9=x z(^yNqAUrp?_uDYJyPblXw-$|`IF&+nrtn0xmTUkU*(PkoiCt)W4iE^Bc&0e@mwi6W zTl*%s;Et2y_Ah?3?0$58IaNI;&NX*4%)l(PNO+8lT;{MOzY0(IP9%6&ZMj*Vk9$piOgvVI%_M zLVCi5R*K|M-HGR(Z?))baqwgcyC1AlX`<0rMGKrf(d_7%O|qXChIQ>?$I(8#yL{Hh z*zYGFdDOr9^6N1eE|$}n!ENLD zN=N0EBy@NYhuE72l?S4qK#e7vYkKs^Y%8JS^nVf{lg5MR<0gq2sK7W+5mkj zBW|<37mkPCeb8p_2IV1jsg3ncXiJA##Tu_J5A9YHMa833M-(j(j!-46@eWI57z?51 z&8ATV(Z5!NFbxyFdDWzgp)_d?(pF5^q&`&Qg}e;&8V=J72|h@naq$3%f3MMDs;ePT zJ(U@c5PqGYHvxDK2nWKDFr~cC<~GbLW^I|qdon?cdV%=bxqQ!B>-*)oT*YtKo9$tUd2dpR50nd4d@ZF?kHYvh*Mdp+KdTGFu|WyGDM0&Co98^u1OZ&Ij*Y>Q z*G=B~fN{)eAmL=qtW&xExWJLiqWZ#5u&tO_l`hQOeSP$V@MEOFJKhi)qmbaNQT2uG z!OJ|?hv|Bs2T%3Ir9S=HXpM1JOu9E4uxrl-FMmSk^woE8c&EyHFP1?Q9eng2+KG`F z&(%OmNaR@*@-~&!&B0ag4qf&>8L#&vOsh)0vw9SGeomdrtxZnd+69AgADW5=;d%=B z=(#1_^%z}D%_A^Hd%cG`wL1Onw>FHz=o{e?u7;*`JmA*0tAyfv&@Mxw*Sh6BI9y?> z;%Uo!=#YWHyL_^x*U9Rov$f{kwSVil&b%Ag8oBUkqc4HP0O)I#6+S{R#q``WMqAa( zn?^}|QFe<)W)$i<>4@p?4H|75}y^28%I8FT8&>KR8&O5_m&2<>vHX-@{+8n-5?z zPJ}IHakK}1gB?usn$dvs?xEFDhJ#JJ@B_at-;WW#VFDcr*NC-@muZsY55br_<^>=l zf!YVp@aP)z>~CMQV&Z;bK%5mE6@fARsVCv3#0#8HUFIBs4~<@JXhwLY`gy5Wk9F@% zGpjrRxs{C-Pr(*UPODNDPF=bvlxcpMh-fyC{CFH8sRFYa)^}9n)|BnO5QeuYHk=ja zi6<1<>1tyz93tj$?C~w_lRHGY4YqG>7|q*$x-2ipth@vdrON7w>^{QK&gHng zSXhqueMZahx4iw}KasLZ!Ftx-fT6eW_T=gPaY#%zqg(f1R4+pwtziL|QZA+KR+kVM z*nFNsb(ImaMWL4!s0=RAi-iA{d#%j8Qo&H#rO98G9+O;_HQgoIYdq=xBPX8x7(C#L%B!?&I!j1GsG^m`FNw%*ROB2ak-HA?e$8lIhsz&+uN0r9^E`XjYFeu=J8iaZ1&1+7 z>Pa?msqinx?dZ$V5aUpmy0@(WJ$=@yeX9az!olN589rqmI-4SY`uy?sTf|$F%~mhY zUpo^{--*6Eg)WAQFWtIzYx%`r|JAY`ZZ5ZZTnN|0#v48UKmX$YUao)m-g5mv`zOnV z8^4U+W^!c|Fo2t{>1RjN4gd7t{l)V2Kl_)7CfWr^*W ztjZ)jEz`e@;qYvT87KHH(;aKcjzVRb#fkzZ$)j<)B{@Qx2g?LT8$Pvz;f*k2h%{+l zQ(8w*4T0c+O8f16fO$x1cRnh=u;vggiO$F$xSUvGJQNV{U- zba|tOMmh6*yd2#fCAvw05&1JP9xC35KcNypP59t7p}WVUNKj4+z^WCNvrhb}p+rRy zQCwS^$L4%j&(+ay-UwEF*qF8^6<%d95E3sC*ZQ^5%coAn2unG)#zM!bB6VKUHVaTUm1D_aOQBIL|Yg%z-&nm4=c^UDaxNS{MclZ+c(NfER{gxBY+l{1q`Cg*v6kM!^N5598yW|DmG z-nbDbPMkPXoH${}WLVx);pb)@7w%PYY}~CzaE)7)?xh>A2wmtPfJ&F()Hdvq@G0=? zT=6z-QzqVRYPO7c(Wnq-RGOUp3Yc899|I8BUAgjoW6-6UrdhL4q$&;39X<(pwaBnzYK-`*R{TBa#j4+`=Fw%si(XMb_!Z(s8mAO%o#<olJTeM5}VkwpA{~MeuF!m=9j@-1FD^_@D9De31s~DwWUbeVrk) zMs?zw8x>@fnbak&pe5*1+<1(xqNy4SIA!F06o^+7+~-t9V72eQ=R8x2c34bq@=M1| z@DnEmensD*&l(iMYk$0K({dRX{z{|gDE7B~7kG_tixiU-(ZJ+BHTO^O6h*X-(+S~X zDsOJAbrnGzS)YzSvv|3p>HSJowLMoC7Ks?47hJG zJPZS}Do+u!zqfeSU4#92Pg6!X<)?AE#$0o|A@8^(9@)W zrp6GPeP`y_WIPK0WuEop>DF;W;nHSBM7eRiO(+z2msUpQfrR@wKl_-JL)}+pj?kV_ zoIC`(fp_>Z^s`RN&~eUYfLoL!l=*#9WPbbY?>UpHHVKqD?j|vlQMw;?YT4; zmuy^bF;Ba$vjMlsI1Z?A^ON}ieGM`u9{=mUiXjwz!;-QBs-Xy8KbyH19~We#38d*fi;bnO!N5Q1WOizRv7a<%$UjSvRGG>ADDdgSV3!4=lfAS zs52f5>KBGvjt-QV>XIRBfx4r3g?S*9m|!mXXvOwI%zbDEbu%t(1?AfV85CB&$g3=6 z4ZsvUoP%3ojbSyJBut+$T9pn7N`_LsUNU?bJ23fO;ja>9{HF$!LR^B&aiwnhjqp)0 zsdUf|@QF8JR~Tu;hz|$e86tz0!ACiUfmu+c)eAXTam@YT(C-%EuLO5X0!3@thWJWZ5N7KDr$S=w;zc1; zO$MK)c;5HE3a}#HAICxzs_;fxrL>uE9255mD1d@n;IN!tR=uiTI`!y4@Zk3dGVr6~ z6JwA%(_e5|R~sw?UV%^8@=Sd3y@vBt134=sFyU-~q%PB(#xn`+itf-e@2L2Tiw;ih zPd0M%OdQvKQ%%W9-Jr!*X0 z8Isqej}UhsjNVOoOuyVuEVi`d>J_*P%~FrBBs7BlWqxA3$Wt@|U0GxLb2)axWqTUH z(gn{|$Z7k6U?m{2~$n$N%#j!xUr=wAd`4@Ox-4(Ifkw|tJoL|?%dW{ z02;Y;BG~Wx z2-Ns?`s=*lz2Ja)RW!tzH^d24#y>v?a^?th<44L2Fy4BqoF`_zvAtfH9q7xu$`h_&!2>6+ zcjZ}SMV_p;HOv2AcurX_ndn``bkXiS*u{jjw#v`g-X@kI^58#4+rJ;EcCWERGc}u z)b|Hbgjy)by-kkoJw|?mzJ5Y)+!&nDIh04^?~TVE8|z|atx0$sr>JhA_~6i*pPWfJ z&l$@8F30}v?Cl2Uzx_Yo1lMyjWMSdo9&^jpgJEK;7l?;9JB{t11*U&$9$Lov2u?=G2sJ%EjFOAT z9Od<^-@FP>lh^biyJlyn%YD*z>h;#iV7`6NNr1@_LqEZoVmOe6=m;Zi3;w^ek1~#) z+lO&5(H<+iY#QFffLh-qTN1FD#`BMezn>VOpc zeeCI(o1VrY%{d0psG-{$--bz=Le%*x9mv40$bxsk%8{!|@&<9nDVogFv&`v5JpUg( zc?5O~2FY>8X?A)t?|&c^X>)VCtYTz#HI6Zk*ev|%Pry0+iH7CdO%A%a+FC1Jgw?Bi z?QU`o!C(C~2GNVM&jFJs@C2P(t|;}gpJC!T4*A|$K`&=2^bYlRj9?Y4&fNrtFUrFoDi1ewM<}{i89pw zXw_H7MHyg}DJZ0(K=a(qK-{rG`b!y@cP0vDRaC^^H5=_r;}vfQ<>US1z8-KbGZ~*( zVx93q-@;Vs9C)WfH3%LHlZ?-t*$U?{Ry!(_LOMwIZTk#sTrrz?TM{#5~^ zGJ;}Y9n>c_^n^7K*p8=4mu<9Y)Cn0cBCUirRa1CoU0!_5cw}C9gZt7^j0iqQF}thJ z{)UDv$JN1I3}D(77UznqMYi$%^VCy4bl$V={D=#|;GNoKzu1F%#ZadYz9Vy#zAR`v zj;~6TefPPaI|jCMql0H@){}u<9!*#h^G^lOtP{R%W-UWEI{O(aO;rlk3kaJ4Cp<^c zCI3G+0$-;OFeABumx_DEH-5sHK5GD$ps-O$EV~kMkMKP;AVCWHQsvxEeH5&%S z!Z6(!If$^=%V&lm=`6}T^mqpUff>*aVS21+xgkzIq7sEaJRwQudpE0D7mABxA}7eI z5;W{8bvVC4&Ew_&~N*IGGG**4Bwtwi*;m0Pr8%N!ACBX39oo}xnGU!s+6BG zmxl4;$P1^YrV&b{7If_Rv2C+U)U=rJGVWD{RsM&f08X6$@=^~hy3#p-Gsgb(6He)T zkfy34ErincU)(bQaSRjX1K&l3^;SVhz4S%i;p#?rv%;Z*t|2C04)ySE9iQ^&w|~ayqmYDAn{Lk2 zn!pJBfq_K>hIZ0fn~@lt3<9J^rB>?%6!9Wg7pI;g)4Ne1E*o=&#>xQ*2|U)aGZCV= zIN3vDk=v18xP%u%#-eaoJ!#5SCzrxRNKLuM00F$cmcG(Gh@%G8`yB$KYQmei6dVd0 zw0E=sEk6g4u?I#pybO-@%0L_hm&W-^sTN+Xx&Z}Z;HDaD7&hv<*#S&yqii`QZlQIC zJN5`I5rT8l8((8vYE+;of-hbP-qCtY+!POhIyeJ-?n^AR>`X)u>$&Jm4jGf3a=?UB z{ADLHW&P(>1rCi-t4v5(tWPG&)3gJLgji$@3^Q#o?o8}~{t>ivlG=-Bw9JVHyXSF< zC(RFIa)ni3N+8qyjPetWk(iN}uV2z{#t)pIlIrS89DvhneheY_hEtS;bCE>p6MR=Y z+d)gzB6sJ!?8SH#iBnBbE5PLIrUDwoDsUr_#5zn6wu#yMAck&&zpoJDt7uz$SYRF_ zJ4{+5t+w&0Np{4d(J?Sr{ljIH?sy#T8EJja(B!4hHir*fQMM2F@4S{Ciq$ps-RJn% zVYIdjmVmOv41LPrjSaeD7rRRz;$Dq?h^?=&3hCQOj^1TS;{-1QOj`x-82V7yUT~7u z8JhRfy+!bg3!J5n9^N@byuX8C}zlYX`SRkF6Q=A}a!mALg%dC`*a zFaWKCJp%17IFVMy=4OI1g6{R+st{gn5(kCF)aF>@%l2{EW+&_kmhAi}p(D`fy?Z0% zV4aghShhJJW(q3?E2_PA-a>i$cm+Himal*PRr&f?zhw#I8js9a zSz2BqHm8(B%AJnDci2?e&+*Y+qZj3uYkyg~5BtmMW}D-K7t6%*RD`M4y(y7izh6ag z5yJ!?r56u&Ily|@AZM(b8z@F9s{JxQGg}rGrl5all_eG9z9)z^I#YoapTIlA(zuy< z*qMSAL*47!#Gx`J=*)}PugWx*knNZa!k=E2&L^;(+E`+S?Y!AIE-&csyKOcuFwTGU z`OmRXIewYxyN`wJX?CO@GIoO?Cyr*53j7YOFv+Uhv=5V*)f2W;qYc1p&R|5A^Q4(N z`7w)Khk`g=Yme4`%X%-8J1$|$M$*tB18`+HIa3<~lc2c}1tJFEaS#hLQ2kB@CUVLdu24dB47=Af%#H2Hw^B85{+N<6w){4lS+Nk|g@GJNX{`MF`=SVL5&pW!WJy}P$dDYs$KV_}Uc6%c1`O9~g4~DQF z7z*l?hM-LZwJ30m?S~dcv_0-h4=MuK0_KE=!fS(f4be7vT55CCjBTp4R9|kx;!u7y0#(j$ea}KJhM&b zphC1-ec%}@L)Sjt%$2!iJqTRs3#-=_M|y! zx@zhugkc`ulisBcX#-eMTCdqrb_Ktcx!2_b=ZiE4JoMS*+m|S~m$(WO^hy1KEu1RA z3VD@=j4AhpO@&#|#Cson+oL8O3r%$24SdkKmee&ChP=e!Xm=-SDA=|Klc;b@bGr5& zYptwmMLXa5T(QRsDk)X?CoNClt4~veU*WHJ{SrswE$NqdS^DXvxGt}p6b78OqubG< za3JQJGk|smU2d$fr49udmpNMX3iPP(5OA#rNn@hM237p{-Z{$S&GkWGm%dc+RlZ{B zh#lVPVrPM^-QvoN2MgK`U5FRpbhe5kFY63%@gC!l6RMoD)M+^teCK&ig9CrkVCphQ zE8iJ-#);=BeHtfDx=~#FL%54a*}Z5ck65VSD z$Kep=}R-#bxTTv(w_X@9!T@@c0&W?olq-(E6=xt%erP0k*Cmfp8Hp#;?>A< zw<0Q!tVd%`n&8(RZjk$Yzi9z0H0--7W%3ObTT|sgb+mzp0OP=RoEbgL>n_RzN%Rf* zvCDtwxKCfNzT*hh=Vglk@W9U-qdj9h$9si>wfg!U%Z=oi)M%KTjitkGUKmhcd4Tre+O^eem;29U}C_()a#CtJ6SE0WzU%X^@_E`DZPd_96hYS!HKU*Kx z%eUWgO4<<{6_{(LXu6spehov`o!cIs;IZ&SET&XRLjA9A|PtGU8gw&e{e8Gsq4V zx9DrBm~KN(5+?4%XK=eAIvJvFaPRXKriUQXy@DW)P|WY;goSlMXiTaw41fpH4&(7%FUHm16-M504l4RoQiKB*re?!{ z12+@fMuowhu7F=B3Vn}4ZGV8vT~2LwfW>H`fdtc1mddD8rYA1&V89@)L=5sIjM(J} z3DkHCm5anf2qQSM4Vg!i0Sj!@V|-H+!fLY&L8T#7nV=}i@R^0zesM#Y5tptE2K?p# z1m^bu%L(Es{tVi3J@Q&3fC*IBsKu5r0#9&Vl@?Qpc+S0XVKU_?3otORoQW<6q5s~I zsi8KAS>Q5mA;tJCJ*dc17iDbE$tSvmmm4WV7t#m1#zoomQ3c3x-7Ywzy&?ftqQtn6|PZsZo71EYXtbX zD*>7c9+%U=S`=8nYuphzM1P5E@CkI3)F!-R{=YgD8YI+7zgqO!_#d*bsGEErC@sy| zo(9Yyp1GWk2TncO(zF^HU~XPWTm)sDpN_0F7o(llYhrX~Ec1Z+DB|5ZW_*`k7^T1@ z&Dv&-e-l5_0c?i)bdtmXp^P(9`bgZNa}tlU0`(e2#CRHMBsoN(yGs;3V+y2Cz1|uw z!9P&cz5xsK%U%DugdJ#|v+~>?V+7&JE;HD^%s25&=?Gf_Nbc=ME|-qnZQaJgvj5g? zy|!hagw@W7dkrjmoz6)R!I_)ANI$KpY5yG4Mj<8p5`1 zx#L}eZHbw%k06I}VQf^?smpuvZo4EMg^U?LH^n)(^B@Ku1-NfHpbKs_;=NGgVb{j5 z(pS62FAXPCHHk|dT=riYiA>M{qfg2KCQ=R*7$CsC$x>HNI-{PfZUB%x%MBwM@@~#^ zyr`-SH3o@AnI-??aN+FiV2e1;Ox)I1b49c>jo9z9?kyE$3SyrOo*{F?$$LC^Y0EZ6wBMmqhz*^rCYP zHBo^ZbEhoyurR+sbv3^03`0mm&0WoHRwYg`xFzi=;#S0&G1=15)jo!gUUKMd2);JR zu2WYcj7#fB?g~eP_~*;Z&8#xGY9epQ0U&|6Yx$}Bh+>51JNDf?{J~aZRfX}DiSY_p`;^Y^Fs~a&`E5r z)f~4>f7|fo31Y2!IQ+y-3(|@K@~+?+oNk<&bH^#zN3PV~{u_)K4?%zz(a)Xzx_h>p zz#(_dcjNB9#TaV#Ou$>n$4N}lV>WG!;%L$#uun|UBKJm^BU@XW#Pn=&EH2BdQZ7ht zGNyeP!SWi02ANAf(ff}%UV11LsWE&FUTy{^%G|@J<`6l)w4)mmCCe zjA4cWYK#)J=?d04Pr?JJUVbc6N6#QOR2|Z!&7Dt9MOsLSzLPQ90tzv_F^~%Yp!UveFD(Fru1<5HvNA>(_axQ^w zNS47jFBKjcW^X(?Ou8th3Idh=De*{XPq+l2xsfFad?c3FighQxux=?aIi&p zv8i1uSi%w^s04&E;S>OY2jOGPOBXwEg_+wzpLQMC#L!4BT=oan7&y`Z-;+Gw4*A@A z`4>;HbpVe7&3j%>%3hX>#AOs>>-B%$t8Li2pX_3H<6nLHZ{Lf=vaLIt7iog~$7jW5&T{Z2M#5 zx$0l(*2}h1FSP08^F3h+nFlseT;Goei`Sh4Vkwv7MPF4I9Vb&R2%8k@ci}Zg2na(r zJPJKpf2Bg{n|Tf6G0&iQ63u#b+>sWT=(h@4jgfKV0A2*^p{-%)TEoX!A)N*q;wpQP z4*iE_TqS@m!yf=x5A8L=F>S#^giVi#^puqY@qxe_zzFDuLAgYEO}AKE}77B$MSe$3}*Lh*UB&bQA#DhbRx5QXSA(UQJf&xFE zB|fp79D%?TBbPg1IZCh!nFAUv!oB{ZtgbByCbrHu}~hW2Rt zhE-DMx5Vl_kCPoE3_xW;4_+_0mwr_M?UU&^vf@TtUgmSv%jOEnH7fw140u!uGz3`D zK_JK%tzXZamx^5+B)}Z6J1@n4$amoa3%DXzTa1Huf5q08Wj_bHHm7+Yij zuEn#_0q2La)_G(+RX@g@E0Y>Xb(0pg^Lelj&tHs3@lXF~Q~6_9kS78)U}TI4N)Znk zgLuv52lO%J1D|shyuw4x$tsSVpZ3Z8)yfdx3%{WQnN!wbdjU9I6ch9tg9}fe&opEj zz~;EUJp?6f7!p;g+C5)TY)})9nMtF54KRIpna72K@?dNpf?GPV5)~+Tv<^ik*;-8 znjd^%kYI4U&RNAl2_9fhUvdAOJc%lir+8we(SGt>uFRX5o*k8h{+#0#Z=ox^e)*~l zkz-Tt(vQNg$~?XvTj8)L(s`ZkIy*;BGO8khM*f{ijvo71zCIT}~GaPvaK;J~fv zjzpT{T3G_C5Tq_Tf)+%`@H9smKVAfvDAOp?Yw*7fcKhlul3x#^7nxS(Jx+u+D=o>1 zjLg@S=BaH%v@ynN6SZTws26&wBWZoWlPj_2HO*>4W1$@E9AI>wLH{}x;8&6WG&)VN z{9`=o2L$`KR;XDjkb!c&CNV$njOS~|%?hr+Id;J3;EEq)?923Kwyv53fyNp*i zKa>VLkKNt-*^j;`A3a)zjy#R*u!b}dGNt$W`HSz$Ykt=La8l+Dm&>!KPgp6LDLH&S%fZD9xj!|rTNU+OO$&r zj}L#evQj?&c-yW6U{GVTy4?Cw>C7R*=F7!5O>YUuH9=-he=bvNz!DI${ zw2nH82m^%>9iCJGLv)uaJ$hR3T7RmN-*TW z#31^f0$Sl_+a9nGgM&%mzzYW|@FX+dz-j(PHvZSXEF@`~VAfZvTRE1T&z;6Q)E4=h#(o*bhN zoc^UmRq*+waw0vbggQqIY3YEG)eLcGA9*dxmiwDtVG((1Q_}=$Q3S~4@FFcKf z`Q>~9TDwb!jooBSMWo}(*quQW@FjMni#xuei4iAxjYc5Q^w#<8I&Kctsuc>&r!)0gQDnvVFv*Xn9=AU$K zdn)vLfcFucdiE8(P;uxWH0NBWUN<0kBA)Ex97XJwyt(qIta4G0WxcFB|LK=_%pK_F z?s3M3rc=M;A484r_9PpCc*9-ajDGX zWW<~8T_B`mJSs3L>zHPB37P2omf=oh83p)+f#9vc=R8L?S|ckbDeFqC44Zc?UqgWC z8`IFTrgIS{6%vnQl`fqVarE4^<-9ai9!-P>tj_%m;@I)JzQ(hS;@G-E;dgI@&4D*p z3#B15LAc5~MEVa;(6S=mc1Ni5qw~|%B@UF6bsU^unXlqOJ`Qv7L_X`Zp#7`-s{WsF2ZZvK}9l+qYgGhN~Umv?JaU8)pd6jqC zBsxdOxa}H6cH4h&22%7g5AowF`wYp{=SV+uhNsO%hY1KTJ&K@Z^rQwvK` zl!sCBXW0+nb@hI|^k_^oSEL1F=0}$nieBCv3ItC|$RD^D4S-V)aY7t}OJNuy#m*UC z>Osaw{^)9&LqCNv@3GB;^y?G@V{hL>1lR>j{qh3axf%9Q6j!50Q4~>-q1|B|BQq$A zD)=7t>)r(Uu@MJX*Lb3l*Y4dIU`6PN9k9y$wY7JQ1N@D?PV;_)U8rY_hsJ(C$>vuu zesm(}plG4Yw|6$nci*fh&fY}%<9MFs-|9NED)@w5hMx>DC%|=5KGF}v{Z4`N41RM4 zZ|&vXhI<#lk)dY;$^!Zmrw>Mih&VsMzA+~zxE^8Gud(RzXAP2LQWz<7Zh(8R!D=D( zoFiZ471Gfd-tXoNnF_Wwg3fa+baG{4#59`htswVlH?(wW$mms9Ib^@c4yQqyZ(5pG zU+rfcZs4EWghClE^ML~b9zFUPZ~P>&I z6#U*_Udjs5#@cFDF>Z9)!5{S6Z*iRSEIY^>I$FV#hUosmPWhWJzbe1_=2aOV8806{ zeOl(H-Mq^@!?~gUO$Fr;f{c(A8c#xdkALz@6 z?f2~HZkC0aNgPk?Bfxk?f5B^?3Fjn-a0oN$AD75nQpCbMz-t8|s5m8FkHIhoSccGT zj4w+pXDoBISjLMYMt$a*Q-Fyq2TgWnUA|M5*eb9T#l{fB@gi6%6V)?=(xXro9&+doNdMhYh|0)kl?Aw!g?um8wt|!8Cln0C zF=eU;O~IqmrXr;`Ql-J4@U;}SvD3F*sjT?8zu2cbEhUL}kwwHhG6P8)`oRfP~Z5G11b z>W5an78TUK0Ng-d)|WZVdr>#x251=S9M+gvFnP zCF;O%JW+&HYNQnh!8Yl46nFd27ekMvJ^PaJB9-crRO_G z1M^*@L|Ch`48*kWJ&lW+%TB`_Siyl0r3sBMS8?p3?TTAbK`p7@iQNkyK0`QKhx-*& zFhFpW8?Xp(rMy-06TI@l`*HtxvLIvb>Oq5D;qnn-&yNk+K;WbtZF|{G%bUEt8t;TP zqC6uxz!`=n4fvlSXr_(2Lb-~3L2=i+>Z+$3k4?W4nIlb7 z#*M$ueOkw>UX2zfrALuF#?qnS8j>XLRPdD_$yfm#F+RbkhLpS&j)k%bjPzY1H}9qR zmM$q=X)N_p4w>s1ng3KnUFFDm;VDKsz-~~ol&wkaMBO?HoR88&j-(Yb;B!nOd#OV@ zRz`~77C7?0-s>i-arVRFZroIDlf4y!KUt&4p)8fBevyhb5c$B@p42Pb*& zidgjb&?!e11CI_V<=Zsy_cJyg%YFu(ccXtA8rMgA103nwOK#}2O=}nI^%y4I(?_2? zEsJcv6@M2;mu2h4x9r>BV!p%Uq03QHMeXlxkk50yyn6ed4b~&&(ft)RZzFl2vo~zW zRo?8ZZz{wchU%p>Het4ff3LN~xoPGUzC&tm2`Ip*&_C^Hxz4Z;zfL-H;SuMg|*%YXfg|6YFk@?F}Qor3N#T$b-GV|+Er zG_pgd`Vkv|b>z>^O_lrfxqlt6g>UtX<1rW z#!wlD7vm9Akh}CFvyc(EX{ER!F|DP(Ap4A|G>T?9EQlp8 zW4j1oga|qfOj_@o%!ob)9+)Nrp^_j2bwfrrh4GBy>2FSrp&loVg4UhndgFx4)Izo& z#aPDaH(_JI?5|3b=|9AgOfpQCdW6*k&N4m2q)d;ZAglfMufkUrf-*oID*hGE^o#Nu zH(uV4BExq>ImGWExv#Pj7dRDG3ML053LMJFwP8T%pLHrM6}?*as6 z7-vB79dY6Ch$ADW~935s^u$2s^fxwUkUH-c80=<{}~bM4b6fylcGU zU2&cwyq{;n4GnkN;cKo;LOy2`GPLRyd@&ZnD@iJf0s}8)A4PA3+6XMi z^dhm@w#nPl0C<$a)>vEKp{#I)QBtqZ)0X()S!lp})E#d$c=Pgp<^yf#-(5c(ugXxU zfXh0}k4lUA=ShE-QK)kRp$vV{N8d@F-5{Da#I4Ao%dRX5m&&Mn49pAMY2Va3dWand zF`hjQ45ndx2jp2BV&72<=HAc7!u`7o4zDLkWaUJa|bb@>`WD>k*Hg`cgpJ$p_#Bf7UB~ zS_U>rHHOL11_q?;5nub~uCDhGC*)0Yf(e_5!6Qd9{n(_C$#=Qe7DhyRRBh{7r zV5yX63(tHLIn+ksJ)zH6 zgrl@M{9%;+izDDl*0z17H`NtI9bkqg_OYr!PD|pd<-`4Wh$q<8>ZaZy4=|A*QExlE z8(MX~5c)HLf;GV=&^dBT9$*w)oMTw=>>8zfpS=ZZcxuJ<5cPUOmiab471gXkNzDen zP6;#d0E%gid#q>o8@*cF3_l$Lj!D`W8YaXEoS&d%Yrq&=F7Hs6nVg;=Py0^U-31S9 zsJ*y6DjRQqhjNI_#^65Wz=8I&r{yu;=n>}A4YFDXPyg6dd4S>6AXM|=%A?TUi?!8w z>H#wbau*7^i5FVL@4NTD#aNp|6p}>=g8os+_pZqI$%=(}FonH^?xW$_1I~8ey(=T+ zD_xo!hZfj#K^}NFe(w^NWkf=oiNMT(p3&#JIK2$mx_ekT0M|QY`nkZNvGC}lGRK}v z$7`?kvTTt6f9((Zp*06nU2YSleanz14paK#7k@t?HijjwZS9r)=P$E2W*l5klM8m_ z0)-b^-X_Fof(%4D!8PjIjNK_7^u4X^(C7~)*b-+oUBaF%+nu?5ct8nH?Lsy1LfiJ zTpahFf;r4S05^-TzWqaTgu2%)`$M3=AAbIe@=yNZKP!7{FEgKpSrHjFPpqE)Hkl4& zL}AnnvPOXuPvL%P+?&E#4#ec`QSeFx4745vJyw@2*U1!pN2X(FiGON)RfYz&mFUlVZ zj%}&jC>vX&R=?$zQEyfkHXwj=@%b7 zw5+{h465>8Ju&rB6=dR0<->~dH zZ?b3rqj0Y)@?N4R=poVs9|XQ5O2RyV6^I7vLU zzW@|r=N!9KSn}Q2eg&qNdl583HR|+MOWV>WdJvG=8tntnF-`=`zP#59ojQIQYiKz0 zj8gU3Ugtdqy7(1#>f})!d-;Rqq;nK{c>(e$;Y-4ju3InrN=}TQ6%O9FHyWA3+6(-s zhX`6Km4`8(6d>7S#CyW7BCKHfp1<%;#`bPb$S)jY2{e0^P`3Q)CAhYk7sNkT406hP zsj#VV=%Je=Bw=ak0Y^7OloQEbc(756Tb>OLXG60)%Ae)x?n~T-L zI(GnyI=zcI!m#-Xp3itVUma)ZyBl0VRq%e4Gjpm67j+Ah@4Inbl1Nw;eF<$+mvRbF zxOm7rJ?xEf+~wttg#jx*A;nAeWb-k!!C$@5E+}aj7!sr*l^nISS^Hz3ptFoRe1v{0 zd-3N)fzN6J0N&D$hA+qnZ*a)`tU~VIJ(XvT3O&k~J)D|`S2LMMG=ynaqgsQ&Htnm1 zjeFW0$7T;1N|4`}0ZDrALauxBRP=Rn>8LWXxv`Hr^{yxodZwN$90G;`X=sZ)tLLe} zQKrfQX~=P|&Nz``xRQJvy#V-xODBYME+0^_zNO8ZTODf^msuTP{2b47yqisU#sEBM z5as-qFyj+qvRkJIQS@(7hI-&ldUkKXxAVE_>Hx-oD}O_TaCq+oW%8VHHrYsn&=1oq zj-VHaBaIAKjPy7UpnyxeIiP?!WU9^qRtOFW$2&4Up1urYob}`79s_4rw`9J-yL*Vz zwL$h9PmUU&ohXBB))a@m@(dJ8_bbI%3-MrVRz?J^UFC$Y?6ZMS=osjNS2#CRu;nxE z^EhVJ;s9KTvla^IEipIkvD5PObB+h@pD6FX<)pHghBvWlfTBNxk~dAegMUejB${K}D;&Tv2hMEJu+>8hv@;A5_hNcN zS{IwM-7r3f*MCSy+U-d>+$B^JM418Ulc)EPPdZIn7^ny39{tQh0b)tLhT@T@6tR4DagTw*~R26V#)3gfA+ zSsu}A93~A>s$7Q2Qobr!dWi7QV_}6QP;V46kRHX#bz|DLtinphAQz9t?EO0>RxYD5 zWq40F%vdE<3$n8uCBbERjXG2^R5THMlbCP@Ccl?`)28>QH zTabul!iFI6S}qw$EiWGEEoFEE9Ed|8fnaT0!6KvQA-GeRf&o1V(U$1T-MhA7oifNM zq<*G-@4GUCtCV`|l90nsz!5EDfkK#=?ET~`?&Y47(t-6om0TIHYHEb9_=E8){4@;w z{I?YW`(aiW`=B>QZ>ymR)@28jh4#|eWa%Sb928P74H9u5uQFw1rV0R=te1iZG6Npq zW!x@c!VZx_9|WR6(gN++C)@A9ZD^8;XGdwVuJ08iaO5R;87syyf{yluR3VoTE^5=W z>TPl1(!4m02N#$X(!oD(G3==@3OXMh<0$ct3-#w84QdPkaO$!x?+Z=(K;2HtG|2tn zf^S}7HqrzA%6I-s6*5Ti@3>e#G-&~89M~;g@B18Dpm~)H`yIF_AbALb@%LV}g{lO& z+Te{CKG2`xBG#vu&hbdzZoVrh?Q_CAct^a@RE(S?3vkfx>aO&nQCW>8hq8~QY3W`? zGPKXz)-TT84`R4Ek7J3lr~?Aw=DrePc4`>U>9V)@loqHo4A~gjYGk2MfjdJR z?1v0pIK^vny{DYi#1*>0$fnPZQ#PP7(3!uq@84_b9=zPZ=e(q4hQrd-%V*}5@8>vH z8j7QYI-FsawN8~4jXuXAHi7&_IuZ52BOh?Tg|W!fc*caQ(!(9EqQNUdndGs~Z^xDH zMHy4!6i$Df^S1~X%YwP|Tcc1Mds3A6uj8vSMY}(L^KaHmxjQ4EDrBKA5zQt3qNMSz zp7d^FYty!?5Ion|th|SB(xPL+kD0AodMvD{;J@$smFs>5~@kxtuKy}JACw>>S69vQ}@TX%Sta z*+=12p^=Yzb>jpx1*TbZdb#NaQDuv(+m3_s*)h)vYT}l6Mp1B`JH1g*rDCv~zKCVv z0!DBs4QPl<+I}t=9)=KSjKu-l7}G5yr?(q!-61;?QyzW9vS-UN(YzOmIKS zq_mTb@Ze;YP?$b&w$WxKfffdVVSp2)&ac=!jHgyw@qP6pv-iAyyI$UHZD9xul%G8N z2wY#2wsEI?;EaIo36Quek2Wmm>Bh!8e;U@GYqcStJ&tW;hgdrbN#!bNkk>a2}jB95O#!2Qzej23tOA`c;6*3m z2#O`A8^BkIdcdIB+uUYFg_TBVxII5rnuP4M*;D38Tl$O!FvbR&v_Thivgv?PcIhB? zo@6f|{Hnz!WeO=Vl zKmWMQLK8#m>p05ET+BK?J6Y#3=oZ*>Ap9SgS4W40;V~}iL`Qr-M6q_gu0e?G0}K&Q z_>v~N>B|+y*zu8u3uZNQ&dfa0@zTOlnPJne#*G=mF2TW1Kl?>_^5}8GbZ$tV;&~y5 z`}Bu7G%?<V#v+;m1EHiwo1V>3JW=WqSS|VPqID;JcSIA&!alm+z^+4@2KgA}|iv zA7SE*+rFFfVr@5a?UWR$^6UXtE<9JEkC3l#zWm?i9>&1^2lq4Q-N4-Qqc6Vra~S|5 z2-0qAV`jSn>*i%WKW2nMk_wJYEZ%N1$qcZRqJTR;F^{oKg5BqAmg&aAR+y=%3}!+q zsAc-Ri+~9;3X`EZ?)$3s+Mb((;*p|W70h_}hw+u!mszMqsHH6{&UNR$}@h6vz zB?5>WGIT#HTx4(wWuaTP)p=$cQ4o3CWnQ194(hCX4j@qKv>iGVg0;?E{^7R$RABQb z{qdPfy_XEnLW&Cix;#!bf!TU3<}=IN&)ky<(|$T-zkHP|$CUCnb%Fo~TAcJ6qGTK3 zI!Yt&hXqq-gwGwGE57gA=AM^$@Lg3X-$k?te8z#>z+)+K>>tWGUQFWJUj6Hsca$*) zrb#G>#};Y~IJt$!a3FD;4s$2?v;nT~@maryzCstgTZN~J0Da+kgpW7`uy`qX-}Z&i zdMsZJQ~?*)w(cCiQ$Fm2aCFLAp66Z}X9gzRILU!!C;81VgyY(PHy}k!gJ;lutF&zJ9x-W@N}P%2JHx@?@3zX$WV`N3?2KeVCi@h zJ^AgNHXRe|bncjG$00PYzG?1SAh*J=BCUs&vJ?z$Lp0U+4hr%sD>mT3fv>b9olr6V zb&?kSbQ_p6`n2PJrBOGAZlPu+WLZkGdA(qs=zH^6j zI~`boPKSx@N-sJzR0yl0j|Uh9JjxaQQwfykNOg{-b6XtL6{$K;g#m2c&0%S(!fQuk zH1j=9o|G#QZogj7}_*fcsG?x z;+F4hX7#>@dl_#pJkqrN=A+n)$5I@N7st}E5+*-5e^tP}e6PZ#vMn}jlXv)QdHJei z?EKB^yl**P;yd(C*)+;;+tPqGCf)byJ{lBo1bMu)LS`9Y5jN+ExH1%?9snl&RmVqo z2@Trk?=L>5pB0es4vqN03@$uu$o`ob#+0xET*sEVhX-86*4l*4u@%=>jQu5PGnrzn z2(U3Y6W{Tx^dK&UFR8&%u#97O4~#gHA4o%~kN136-XYD&Ya&Hm@sZNJe0el?eb(b#spn-l?6HL6K!P-kGs0Q#5`vhDORbe;!LnBKkkMEqa zIr|Ld)^jUP36mMdAe+;`XpY2L>E>t8tJEZ9TAR5Do=i=v5ouzHep3g-lSbI6e8>vR zCB~8=kvczaIfi?P6_OF)TLKRGNk7Kk@`Fb>5^DC$ufF^h@}0pD0Wy7a3`-P{9t?$; zxeJ-3lfHT|-24%uVdJpg1@odNc9r*|G{U@ONLuc)Qwxhs*|4#dEkcd-B1NFE3c!>@ z2vR_{!9- zop{cuKd^{@dk)NQzLLqivLSo5Zr?Y~wO+OZaQs-0t2!AQ>N3U`+J_ldTA&cGC~zyT zopdUS6(6(*5hyr#r5XfuSf(98LYp!GbC}C;o%|Jh)E94<0!YCI?b%;HgHNfzagr7R zoSKE-22}c7%>h5$a0Nm5^zwP6vsjM;K|gcKW$<77p86>5fH;0Mnw~l4;>Lb>b%ed+ z7Db&8^gMXgSX5xu3?BZ4PPk$S`T>|#N1{C6(T;fXqxYP5F-|EJPl^H-yx2dXan!q@ zVIhCaoV2(@;rn>M#f>~JX<(_;!vHn> zvf?9gdjXsMC7OvgFb)vv(u;BY?gPjG!4Dd|2l`kkBCKS46C9`rFn7eh0Q!!p2T`s@ z$9KLf#^G@mX(D^2$o-kD5mH!hE42laY`!+nHy;zkqaId8J=?wRKtj3ETr%jhl<*kFlhuIb70)jDQ6#X zet{=4KUi8|FF}(GKf8GUx5_RlHy7~y-(Ms~8z7GOet}nB{yN3MPiADAoZtYjc{1%x zAC@t*^_-C^^PG%1^kED|&v7|rrRD+{FHzzR2Q#&84o#%bgTSACgYaYbsAx!;CCv1a z!a2o?g5iK*4A}_Iy4)Xu=N*31A~Tk} zP8en~=6(uzk`5MmC1XBB8p|6FC$o$pRnoqSzQ%6l4E&}arW|!8a_3?<{nKdOBSv33 z@9UjT=$5mqMr|n9&?6XwYgP=qMrR01A~opSo$~8n{<^$*Mf^VSnJQV&yR&Er`nn)w zW_$CfTn)1d@aU6r3;+A}+gIh=*RRS3=LO6@cvkKajx&V;uz|62xV>ApHrL81itcAW z{}K4YXkZoX=xjGmn?{?|tnk|=<=?+wEib=&!=6@7?*_NZ;xW!wVA7Q}4(W2Q$~b#I zW>L6Dd1rix(66m`<*!y>$ADQP#7EwD2_HIU<>A!qQ5Z3Q{nvk0{^U>oPPzZ&S$Xm9 z9jh*@w6TZL%IXKWoPby8=rXnE4D}mE_8ep3#U|N>o_@qQUYF(NN36-PQnEbB3J~~a zY=-IQ5Fu>+%pb#)j#;|WOy7rq1P1$--M7Nz-bQ_%pEQmg9N0qBoXof;iHF|GAzl} z169%s+Ldu}pT8a}pO6CHiAR`;g(<~PO=;z%Qz&b_tE}jO=qPPYDg|tWqDm3p&ER4D zWQ04l$dnZ@BFKMwY@HgJn3VOhMYUiBii)0|P&$-)tvh|S41LU@UMQ(9pUS*_UNiN8 z3Gt_(_b+%AZ*-9hoJt5N2jV`lglyJPFle|TaQ^*&{12tnSOAT?s@NoUmP)d5j&`g= zg|C}Qsi0Dc(R6U4^C*l%@0of36;h^$l#tSUVG7(ZWyaUL*U&~hd*C-vs+?PwG=yNc z9vOstDx?LK>IMYfx)X|2S^oU?&!{-S2_PL#1%SmpG2m?QibddaknBL)l-$!8jLthE zT7uCswJtlC)GGiXQmXq(DQDC59+mOYfnyHw+2q^NK6u-Xds*CP!UB{aj(S0$63zH8 zgMeKU$;KDT`(EnsgT%xCNM|Yv;ac9$fzUox%iH3Pc&t%~%ZcC_?=(>_`r!nv@Ke-L zWgL0$geBRDGyNYG*|0!_n*rh4p^31fKS(Lbr<70gWGG- z3y)=V^Xrl@|6Af(0K*tFH6HO`yIOqW$j`<+oZymCgI6l4^jX%il$he`=C5^|%1b;r z8N`_cR5y5tvq~ezio1;Hx%O(X1~}{X>{#uVlRg|P<{W);LO?5&m2x`eC=#NSE~=(@ z$5`Z`!bkgggRuf_8UMT^%?Kau2p=EC=H0kdTFU66IT{58wvo#3WKtXYL5I z{S)7HqnC3*ys=}Ju4L&>x)WJk(wB6iwFGLoVIRR9_j~&*bE(!k<4&6fKWE90V`*FL z>;n~&DQ{H#Strev>p;4&KaPq^M=)D>12z|B$S=*D(yCP}H(z8lQ_4#lx;Nb*7LUmtKJ*fa3#LUb z@oK-NIT2W6h`%q{`SKZNu99>?t#W?rI9+O{7HrMUxe`!k0Kwz zAb71~D-KMJ(uv=7t~q8xps%|yZ`2;y9%RdoA)`==;oUcRASi zJ^3*nA?IqFo!lrz^5k>oi`;dJB=*v{%ABX!Irff~KT!&J=CQbqrXGbuEUP2ptTxI4 z8%~~mGE+vOryZ7Y-1wmO@0`9sK4EIWLtK{YVJT#~N>BaJBi145GHjGfDe&p7e5#9x69T#@jl1! z23VDc8VrC7H;0^H4c=g_4bJv*xWrtU?ezcyENpP>b?K7O!8Y+XYwYG-EBCMp&{Lty zdI-fa+I?38YwX=xnPxNC7k~Uk`SQ!(L;>q3x9vR1`xmgtvJ{0}*k=i4Yj3;!_|qRV z9^{Y3(|)*n04=x6{5?(^V>7|P5F1U{fvqBavhyB`2kQbW@q*M+QLI=38zN@#$qbr5 zR_O_r;Hk?8D3H@E`zR~Sj^F_h?QKte8!Gqi-5}n7J)%|1*F$4zIji(Ft?+PWJfI>(k&+A z4=19WJ9Vpu4+G|QTrA)tlpJmaBc!c1Ih|xrGC#F+p_UN zc7?7?RRz#XrYbX$*_(gQwO`${8EA_LlznvI`K~)q;_fI?KFuXSmh%NZXHAv1U2E>; zh+<*03fF(F%YM2;@&qDuM^FplljRcXR=C@kb?LTC+rXT3BdJ4$t=?C8u-)JdxOFAv zqK%GKC{q*T(qa^LUQyuXUFx?6FBy!?Qo0kL;@uxazg@Ctj&OyC&)*vl;7+=}0>&g` z_I+XJd!PXp|0-C;OQ+tBGA91L_y1k=%lQ$*hUF{i-8EVu3Ebi`@9|Qdk-c(}JSYi?oaFVrskiN|?Dvs(+aL<@{U&YQj zp5+6cUurS(j!Qk&eFNQ!8+s~?9dt%9zM~1tXNigW?3KBPqh#=&^e>!Vx;n9;=Mw5(2TT|Iq!t#3flZd+0dZ~xw{nB!!TU~JcsGOZa3T3=n~hVO_Vr| zEO{4D10TzojH4F6bm7>G7mr$%_9_q6_^qxP>vIfkyKbzqYTywILcas{{I9(p&<)Qw49924p;Na6PMrM4Lm zoDa#L+?jvi1!PE54vjO^v{)+J_FI_bN1;&^0dF{eql5=+H?OdaCd=`h*^<4W<$I^n~LA+}$hOS{<#_oVL9n>&+L9Mf!`Kp`VIi&L2 zAo$$(>QbyqJ9NuZYc}ant_v8AvyyIIIHQy z;GSn$9u%JxbUxJG``{GVl|P`I2h`7BHz{~(Uz;7pVtW!-Y4`-?%=5jwvt4#}w`h|- z(dNnKyYlKAPI@9Qr1dqBPc3waa}+yer~GS=)5T0j)5ZXESNYb?G0EiFh`?NHP}aFO zq9O^e$o!B7$&W1`$sq=iXFt1$U3U2J?ug9*do00R;VMSK+gU9;Ys4NM!dHO(+2cp$ zKCpPKx;u}%P>{!QUDvozd6hAc;$o}}jrNsoaywq`GgiPph})bd!D*}p;$6T4OAPIc zy1XX6Y@+0yljz<}LrF!(*sJWDr`V;A6Jnc;FWbfS-op;yBXFi8sdob2K0++-sa{ax zgDA5@eAI|^h&;L_JzzGJLC?;C?TxMIcnnu$ie+>RjgvmfP=_InakgM+q`u!=FaP^j z&pGD#0NQ3}HAYO=?Lv9G{*FEkm4(S^=Axk}$`tZtva|N;np3;>*URPcMOm00EUkn0 z<*Tm_%j`Ts_lr-;vxVjIU`7SJt8BCR=OqUNs92AYs9(eFnmO1DJx*cxwTP8FIiRl? zh2{s|WP^mpq6W?|u`|lD#g&Ef;72oMZqf8iLmUas5y6+k#F@3q+i#weerF?DDBR6H ziXwl;*t#ib*nI(Mn3ys;mjuk`$Y>t!xc=li5ivAQ ze3034{R-g`=0SI~Wb5R0vP-ghSzrj1zeznXcL+s~kru62gs0QeNuE3Wa%P8N&=h4g z5TY1zTXotb_+AN|q?`gRJL2es?aOF*$`6bP=I45@!qDXNUa3z-3Iex&86ZvYfFH`~ zag&Kh@uU}4XW#SyIq7Q~4xj6~OnUmH5#;+$lB8jvz9_s5nD9pFQ{eHw1>+etu{4BJ zJKkZl3MHAeu~0A;#z6ux$lT9fKnC3Y1vdL@c?7-VWPdV-fasd(f94#%Zl0xwQ0wa|6otaMb3 z7F!NxVhM|<2;mfz4yXzt<`{~z8@!$I)KR(VAbf61`4OL#5m%Pi;8hTHXqFlxh*WSg zhV(cAd3DL$*@;C~fkPE91@Fum@Ek)X;HURKoNR(N~NCy>TIe zyk#2|AAYqeage&~5XkOgOjHn!i4%V3i08MP%eN~370$o|i7KS^NU0RqpIX231jU3H z9p{S%a%XkG@vm$8I0*p26$CFy!Iduqb=$@j$`AM_Ovb%bc!&$5E#Zo?3(R-uxWhda zflmYd+l+zpD|4JaNTcTCWO0r@LT3ObyvD^<#Zy|U{DIL=KlB6xVQ}d)^?2Wt2YFd8 z{~dGM=%hQ7@@4MgJ-th@TEFnb_~x18CZ0ec-DHe;<_<0u66r`@r6T8XobGrFPhcM1 zsHk!Tr3QyPki#Rre+Snk^G)Z0b$S^Gsh7>wEOBm(r1Q(wlB~FpU(cc$JK)LbY;?@I zS8MPC-@#wU*t1_g)3{Oi)*yT0mZSv~kdE;x0(2JnT^_4K?_m(~VvU$Syi+|qYk?2v zrL!ohT&UdLttzuwT4g+?Q(0T`Is%XQ6K3N$<$YfDltTG|3Ol@;xgahp&!cweK&oj_ zZ==b{K!ref%HDXgrJZ60B5bL_I6M5I8|fLG@?V;ABop}VW1a^mz-JtrZ91n^oE>Z1 z);r&WLrL2AU3ss4aJ5k0t19h9aq7Yo&D>X6;x)?J2>*HJ<(OW8Z|8b)j?(WK*dWy8 z^ByW;4CVhih#4<*U~ z^Bw}(N%Vejw4EUD17r7iHcdkJk4OwZhc|WxPrCJ=;Hh@U`4P?yX<-K?_3#BTDXcbJ zmwk?)9Vp{V!@Td&>E|fJ#LH~6n^(gge0x&Z$%O`d4>>Ex%EaU-o21S;I0SnHIUYGn zOw`QWVuJRYy&vG0#OW|b>S}tejrTj}RM)<9j>0{mZc;xHSN89I_FtZrQey$Ym3`G0cgohg1CDq; z0GG%sc)}vNM4Q0>?5B^)-~9)lu?b-wY)OmoQ+Tv-TOQdyFvCg&JC$`rjS<6CHwz)- z;O`nEuG$UY>~y}i$Qyg=Np_@9@*s%lpcow*weQ$KU^a#M+56DoZQ18&>PvK@cN^Or z4SXMkpUK!{wTH1b*YA6dFy24qfC!vxpZw(SlqdI|m6gXoC>Oiy#9FbL14Hnt`-qJI zKgRgvpotc{mKoQZQ+8)FU4aIp@Ukq=Eg>`M7xPnEVtPUQ5QIWeuR`BI-7$80Ai)lC z3>}IC9tKP~UXXcc-6M!W_BDc{O&Ecb(&1+M(-{@Z%HSe_;JRAUGD??;C6pSv0Mn*% zks>fj*#aX{NGcS(ua^K8CX69ug${*bY$%}UMY3MKI$;Wy<=y(8Ry)s`Br^CYe!QFH z^l&Pgp$)}9|9I~PB32kFFe}rKY5?QIsGvn4g*jGb#d3Q4!`vA-CqalE?rD3%K()b5 z4X;pe*itnf>|0L@K?o3u!MUp|iWOy4-t3R{nomnf>)R^h3jI_LZt{`3@?99kBbO&O znJm}cPUd`-Sypedn4@3TjUabfTcuy7Z=OnxrfRe)xPdKg$ZRn#AQ*QjihmirOxw>f zR%k1PufDdnlbTdq(jIVGU0Ua}?fH)V6J{#n4+;q0 z_DSkoHhGUH%klF~09;_!@&IN;`^30=2=fGgKPRk*AU&?+9tz z@QBcj@QZH|O&@7HhA>| zQz=p?Xh=&((oYLva00@|Roe8aU?G=a_ZllyH7r z5ttjVEBKRV*>BtEGUzpJc~qzen~*@85DWX>;uK3bO8Vc^!%l2GkZwvqji+sm495kd z3)zL4m%d5I@;4m?8YA*Ed8bORG#2ughliJ_sEb$dAcHH0q!_iC?rHuTQXw6_b!mLsW5Yc)p&6#)Hvdb~$fSS^-N5;5N17x}jI`xHd4_yk=a_^P+`~US zKm-s0i~ez9nY_`oWjd9-FO7&Z`H*lW2RDj~4zD3(jI>kvH>ZS+kC4oN6nRruD=$%= zda2|HMXraH6pgN9!f|?_TaB&}ywW4!tsAS+Q@x(943;f|wND5>NA1Db8X@duE=PUK zGj{j!@?!`YjQ+dlFGw|XOYk^yj6TW<4A;57#yCAww=%kddxUxPb^+Ho{P4mQjhsHl zoV#FGv7_CM0VhxQXkJ4mr9-{Zy(H{62YN4IKugO@q(eHP-5rbt=!TUhm3cQsNZ*EK zit_VAoqGIb_}vdVP6(Krt>gy>q&Hf9w-&|p;E4V~M?KxF01QA41k{h+vVj1coD~cj z@P1Bi$=9zoX$#p#-yhsx2DgSpO{3s)Tr;c7!@#LNU|5&CgOxGE@R&hh_Gs-B4vu-T zd0ZMpQ{`vB_@pcn%H#aIWHm!4#4z}sCOp!)Xo{s%4Ei>qR0sRLgw9N$__G@q!`AsP zubG)$WYvP)o@~@`Jw)cD8* z{DBEk36cn@)4I~l*q@QRcAWtK=(@<;wddcJcWf##_i`UN`sCA3OBaU04hIw50{1BN zARYea|Ms6)aoNGR8ZJK}jA;zx$g!B8Ut%R^0z-*i>Fi*B{rYP*MtocT`M>ZYQaKITzmV?H*8MO_~mekiD@=FoR{^r4OSmc%i||2 z99wMORgNLc@+ia)ujOJ}J4+uP1uG%wA-X|A*IlFCIpyMtB_f%y1f?)Cc1~}XgXbVB zB>XE=mD$Hj4lzNSdB%&({mR&EF-jH;z~z6vGneF|qId?|`lF!nc9<8Gtx}!$Xh%!N z-HvdcBv`)Xxq?*Bf(o%7ZQ(JDMlX*Zj~3Q<7c)i1TDB zzQR;lAQMucdtb)HR0C$}Lzx!dTbJ3bhroAnDnW@aSX|z%N*~XtC(0G?$Xrqa25DTH zJ3#8&Fwx*u(UHb$8@|q0+skumKf_oPPFjXpBTQw$2pKQSdi`If-t0;8>&oxDl~q}p zS^L_xbakV<(F*_q1VE9)&EyQJkU|PuQs`kqp$I$tV1@q;`@ism9DdQ8Ec)iu`*s@55}I7&!@g1bVVe!*6rSNM7C%50Po)yzQt_ePym%-^8aMl+v7zws<@4f51=;yf@l+^etRRyooe~yY z#iR@;S<=w2Ex79>{*rS_)fC28x?$&v^K&; z!N%m!2S+r*?8L(rUIKyb^PtLQn=S5f>IK@ip4%f&+M#{n(g<)pdCf4O@Cw#YlrW57 z=e2sJ_0#|p#vJ+of7}7$z$JdTI4~ zTN6{5hrj_dhA{P6;08XQJ=E~v^DxT2JfH_}mkx*ANpe?A-h`3R2wmyvt;Vv8V36;m zd0{eE7RZ=Zco^R|P;!o}qsQC}zracl*^z)GuK0;K)nJrjJa*KuMR`CQqrfm7$88A? zGkNVNrEM~Bz;!)wt?v9fb&Lbtkh#IUhM1y2i zgS6Ac>v)2)(i>rXduu4b#dchNrN#G{jK)EcjAv{jQ)WLDxd9*bZS=W)yGK<0clE+uszE*y3FSV^S@5mi(d8_%C1VxL`kQH;b7`DUFOU6>O5(bI(N!n zPehYe^mO05f0s}f!h43lEDaAWAS3zd6Al*G#-IZy3-c?~Or6TE(UYgo2+LVT-kz0N zb~Y?&rGe6Fki)d2MFDIYYbzCcz!~DZp=_GnIzH|n@`IgN@)jONx5x!(zBN@7~27- zV@)A1;AN-KAmhIn91krG(Eb<(O`DQN@~K1aee~o>Ib3~GnixmdpsC3l%g}^6%UO9z z?!iElBbb@bHz?!g78u6*&b@a~wm+ck(7p2RJ9jCiV+OZ6ob>0)RkAaG>~J+W8Euf7 zn)GFWN&CQ%C<18Loz@BZnj*~WjP=uEjVv*i76!{Ca<BP1IxEYpGx$H_9>ds}Av9?p!_8f+y{{gY|Lgz! zSLL7m^M65^rDW;bwYC%13|m`uQC0=03Ni;+nYBbJfGUNa}!1Pf8{%3x#$ zeHPIWUL@oFtEZMWXu_m8i9HIQ9r4oRA=7mGOhFn?4*jcPm?-3r?CdI5SHiD^;nLg#z==YFbsLam8rF2Pe}7e?_4qop z#$_m%_)&RR`HMo$^I6=0l=~dFf*IU6E?+VXLH6t-nHyq)uqOK8dzGOm`LvT^1h~E7{X1_CnTs~Z&{?nW6=Bm0DxGN@ zwpq);or)wlS3&hmyoIIW*5tw=Xf@@U7f1crwqfv0?kvK}aw@)0LkUg~zUFRK)ohvV7rt_u?#EgWsT z@;A$gC9DDZlSXvZxCZ2h|HrFe{OeL`EP#AEe206k=tmw=>4_22cS3N)GxzGzj$s3% zv0ZVO(q+LzP=v5%je;9xf{LZevus0UFfU*k zo_?%N9Yv<;7{|&Wbe&K=?$y9bA3X1o?;$Q|$C4|K-?sd%VWkW-L<2bBP6I*`lviBA z&B8(Mfdo&RWPSu%y!wK2nKzv@C|#^K!*#*~Xj5;l<)ox<9sqZhuLXC&FMbv3Fd68I zkW<#S#=S(L{A_i^tH;wCN=p1c3b2*s51#xHI%}f{ETLGVGfV@D0B#2(MB7EZHRdqIVWj#eJ%YE68?){nHOdqTv(|{*I)INb=Cfns z_la{Kxw(*e-$7X;buHl?%gfY_TwculoIHD0rb(_H!GJowK9{jHOfW|mvHX&?8*N{B z!9kMyjm}F$Y%P0qOgLcHFMM>6 z&>s!JYbcb`$jsafS`lBJ z{eCZpM(n_2Y)hQ`KS5Pxe1zj}2ewONg*8DNZ{1vC5mU+sV__68zqF{6&vD613@tTe zYM!v3Lv~A6caO{C=P$}U^UD;1+f_W=>@uv7kWC3CdEE3Icm+47%-7?m9AW@}w9x)f zfBcg>A6^P9B!$hc8j|!A0Y^%;Ok3#n?LUV)){$e)jxn`Sg?Dl#R!a%J(@e;QiaT z%3%lH0$4O8tncY5a2IppzgOPAkK%vl2ZT;9uqI~9H5BwQb~Y}C+>O;hB_CnuD{8Bv z%bl{L{MKvo(&&= z$!?LmOn2XXugsB%EABleSox$*wf_29`Sh~`@GxEe(SP$#BXc_=tQ8Es(XlCTL>Ort z1eJE)eeXx*>Eqv)-6s#rqeq{Xk3aZvdHXv*E7u8QGj|7-KI?ObJd(}z&CJOR2E**k z0(@ko%)$dMW^R@;&?6!mV6Kf0L#d|@C$(8JN_8A87~eiMZs*mtV~_LiiiXj1v%aekT0|FG>Fuy z3<1MXyq6!DkzmN^t~_5ArplbvN|}4^Q&IOufkGj;>{pO#86K4`7?vRVWnM5F7PgBG z5CW%PyX~ydbR1&ISW}5<5C))-aze6mp+vfRT?!xJjj`fD(;MrQ-4v6N2VnAjgp%8H z-lrB`hAvybu*+}U#WGVLTBOhd#L`D;I! z+l<+`QfQ1ZSj$f(Phk)Rocn8hE&Vc9ugOBImzBne^KF3>X~JI@xQt4~X5PS^eMOMa zhp_1-mT?KVTgQf1==oHjM`(G)ew>TES4{IxyyV6B`RII!A8{Aj;hrRtOHa_Ei#f)K zcwQ`;Jm)KP3H}|QaM`Zj_~0C3&b)BNzv5q5vL<|(cHGIZ&nwSmHNfgP z#H*k4EI1Tyy^`P1FwZ+L3S}XT0++TJKNIZwIiM)Dg{x&$sq0I7ZmWxL6-CrJo>eCU z$Bg?7VWqL)={kZN<;XEBxVc~J(`ptPZ(>=`=pk(nfxBwZ$q`sPQW+?*4d)O{v0j-k z#=(%IAg2g2*BBPxRXms!p71=rcTEbM?R0Vbsr(Uu0aKR_r55MiK3q%OFTZ2_eh$4k zCA=J~xD@Xi8@`VM1|T{0k52sYpfWe28`34L`2*>^;(k>(P5cH{zW}{oYrl zC7`?H;lmHEr=C(r9~RpEDaXZ`V6{^mUTlGodwu!++&=FUKP zpgU3Q2B7>5vn_8Dt8M6bQ2AAXw33&~u6%G1dDP(^4O#O`1g)xo@DSS2FmlZ}9{#Vi zt^y9cCZ}4s-M(C}ZE`fEsm943uwYaf1|?r~y+$vy2tTppcwb=-IfA;!=}PB1KlGeA zFOG&boL`M?J=ESSo*ip`GG@M3Ziq_$OV|s4&ph!9SjCt8LNB;f1ajW9#Dr84cjzkd0QBIX0WRnAhwnRu>7+z{p0Hqh~Nk28c4&$Jk-O zG5mrgFM%gt@-{u~8Y%K6Lwz;0GT+FyKG$^oq~JV;t)~~sFD@9HhMuDX!ThVlD*oIZ zH-yTu8#3*hvb-VDU761v!kE5(@f;66$l|&*tpYrS!L_h(i*Sx9PH~z@_|CSNfoDyu5S!c3Gm*j*hiLvUH#G$hSP9j6)FyFh16p``y>i%I=p(YMYgiKmy(-9#Q!{}tMQ^#srR9w)9{ zA1X`i-kf(2%Ja`(l~4cbIn{MIjvG3hn_XmWjFhL(UX~3ErK!mojCfA;+9y2c;7u7L z#(o2@`o_bT$%R}b!B<0X8=4rU@W1o3O}zdfar+jBn3y~*Z(h8@*}h+X^Vuim%SVqe z^e)QZ|NDQpeE97TQ#fLPU8p8RE`K?K7rB}aC<8e%wp@PjqqoZJJiAjj?n3j>3^1R_(MmD%s!Z1?V z21ea9p^CvPU|j6Ro|@?m9fjd3+%E6{$hk&14Z`wcGRa{Wn*?f$GH4ZK8RVdX+&f@Y zdMYHmEO7pWkxM8Fd_MCwFPW-VhOz3o+Y7YkWxxq>h1mG+s=p+=0?R2@wF-x-TVgYL ze@49I0K!09&jsg>06Ux6F7tUAm!S7q5ETyCD@^_zrZj|UCDZZz?{h3xzgL`_OCTRt zv6O#=`7u@v6ZxP5>MHhCF!fxBON-^w4G(&+N;!Doz47v%iWI{_EC>j&MS%1E8^h`9 z8aPquR&dylxb$9e9$}1e)+RBkfuh7Pq8!j8PXjOT=wCc_0- zcZO7Ijdg2cRqH8M_(st}DA}Lmvu}4pgk0gpV_>Q($Cl5u98f=Qez!@y|$MDwzL z5W;o7&EiwpbUflm;iYlxdl)c33O_NK7)2EPp28-r`@HSeafSxKu~eM+U!JLo7=N$s z1MeEsS25nX&KROZ&^EW_ne5Kdj!0EOi-!@~%#st)t}mFUiCfunR*9i+$1I(h2q(IQ z&$c{?#v@}d&ROetYJyY7%$tM`jZi|dgCO%EuUmB?JASTz;OfG(6T)TN++$vVy$P@( zcb-QtUFx+qSHUys4-jgXB$j6*(7_2^ikaX@{x4pIJrl$OD(lW|)>i1qZkSJL&7s>f z&v}nZ7>)BN@wM<9$E>mMUAc$v6T9mN{+X2J^J$0wG&~F=00jDlhPWEqfoK?7ZJ&EW ze_V5}Y2^|YX;FGs0km%v8t7KfdGb>(`^&Me046V0>67NVdZ_VW3a>G;O$v2uXgODX zlpnzrO@=j|@S%a%QCwC7M8HW3BY^(8Om zUU`f=I^xT9D54D?fML@H_@rG8ZyixUUulQ$a=QJ`!lGSj(m;lv1~O_jb)44;i>cmo%V73P#Cc;#4YDhY?@tnCxRuwHGg!8h2Y zg6@R(6dXA}j-y3*s>*!F5L2MFR44$2uCquTG=0gJTuaVbxNXK@d_VLL{m3K3-=tOM zPOQjPH9Gf+CD+@nr@zG!vpQ}@pdA%wWxG2o%5xnM7tG}e%6gaevf5jZ9QQORD7JK% z|6_2_#!Ej2+=lm@GM;_rdWbN)Q+Xzz`-A|sTHElPLqeTKadvRrD|0O0H}BB*j=Mt$ z(IGMI$2g+Si3uOYV}G*A4uOUuUTX~zcl9j1VBVEp-X_XkV{V881EBeF%Cb%{mNQiJ zJwklIQNIn}CCsY9+8>^zNd5FcxkWw#TnJB35+~2@Ue*5-yjEV8mLoeIJAXkxo_rW{x zSq>1OpHE-^p&Sf8%nr=_TO8MXF+-@#Ce?pFFJFK8b=iFMxExTmNZLX0l!ckua_h#; z@|};rUFPU}l3lM6^b~3MD=H1g_L77}x2 zn&W?o+h@1n51)M&eb$r7JS9@zwX?wqW;3iK*8Kr={_xpjJp7~O{dXRePyX;p`8WUj zzbXIj7r!ib@82u$J$SEt{QiBE_~Y{X-~Fy^fRDlURJnbVFf7Ww&Mc7_J!Oek*EuKT zZLODAPcd-Vnb==l!w4lo7~Nf?Y5_c)Q()rwa1{mW0Hc?1A?C#OxXvkeI7w0BDV;RP z$s112-j+fOQ6TdKxiY!xMN+|I!E(v%Ts;r+UKz3rRHkUP7was$s;`8j+3d=370fJr zZgtB@Mz8nN`!b1ir(*2_b^%6^!^C*Vf4PU{4|4_%h*HHvOVl}hWJZA8#=uZoOg=R(H9!_Xkg1q7(lqXMMo zEefIUS?i$K5%SXvGnbY%K=H}(qtc*fmbqZwa?~ZC{mv6%+)bgk(5lSMuC3%_*q`qe z+DS?UCo0fa`2y}NIA-VO2oJW(jo$U-cYs^@jxpGd&%RYXL}jM_o9`JW_^ip#LRxX9 zq368&tS{k{OoBV!UY#`PI4m87a9*Xtht>TZjUks4|h$fKD0%n06Cm9Y_a0`xv7 zw*$I#E_je0D02v&qvS-HRk#h$lU$PMmCB31jM6?1AkXN95Y5)nd$*ES-7{r@gb8vW+ zKEIlwM*V*tgUFyAbuE+b#sWF zC*V85tB2~uICN6r{sCu>TY4En>32->5&6CC3S081#0PW&6d8%7JEf&z`U%a$+iK}g z$ELFA_%5q!VXVo=b#&OMawWwNpdn>}FWb@xp$jIA5EbStXDa{)|9D|gTlollWi250 zDn0TXFiXEWHN;_Zkk~b`P3h44PKaAK+({XcU+%LqmlaEznGfo}qd1%pelazNa)z>` zgGE|yIp^GqSfMXH^T0%M^%07pYoUc_Q<>F4u~zorBG2!+xj;PMP!5KS9jkN(ezVqz zfu=1JI{(=YM`BL;iX4rm|tROgmQ2Q{X|c

    kH+k#?U` zCB}+NuF(m?eWpe+EGSdEL&!-NrAYkt*=-oa^VfhzTPERBEq)AI$lR0Y+#+Uw77wha zOEzh@1KzA=WO&;fl07?kZk21Vkl_OzJlob@XG}Y*uW+c4=#R2Ljp6D}-0mix{2fYb zvct8F5^OkHa~3@b_?zSk9Fe?gdD{WQxW-VHsej*@?UrrE`p4I-hq2YNae?AC4L(n2 z%eWyo8{ixsF0ORE4jab~@RrA&o4p45;T-n z^)~cp48F;?%o}*`qHs=81!)3aGe1=e1{^d0zy8f{*)5{N4Qp$19^HY$G2>In6Jq3d zw%$;khjRm%>f0!<*Kf|TE?<)mum@a}nxdYC zD5t!C_kQ`iKmM=EkAL*zvNF#pTddg=aP;!^tMZ>8e_j6g$4|vn{ z8Q}1$IgG`B@STs!CdUo`#sB>acHS&ey8%Dzm)o}=a5CEjjvc)F(A3uEI=fN}$l?~I zRHsq==it{Q{vU0Z&AwMw-ek^Z*lF4#*15*(aZshrjt9YZF6V`h^yAHV_oE4m@x0{O^)8#Nxte+bg%;{kZ(}dmlmDFFlU} z;S7x_sH|aY9p8i!Kp-w<6(H4W2}dtd2c{~McB@LSj?70-PZD%wmJF_LT{Bsij41Ju zwnblhPh@oQ+J@1EmZNl>Vm%I6&4bBSX_E=c;4~bxKwYr)Wn86+RfLJaFcc>7By(M6 z<4cAwat%4Tiq}42+t+^mwHktM_#A|SwR8ep8YfqMCY*-vZ8vR!4ZD}IxfPl!N4$0E z$@qoSsvC?l4AU$2?>M~Xi{`MimSGnIlfmM34>Vz--99@+;w!TO3FeQeU?dULa z1Yn&@Oc5s8@pw$Gb*&k4@a)_hGpTo4I#*Fa2C`cR4n>&s&$zRL0$kqjOE^^Yq9EZ} zs)`i&R!Hip%v{v9qay2kL=^?U8tSSuDjPLv+(s)R#b=B<#vi)mN^pKTM}{kSE6a`h zpvU}hlY$lotI}y`f*qcqSasNSMa5^ZbqpREYfs*ykZ+=7==B#NhEizM*{+p5oD->= zi=hXfGue<$|BMCHWDI_v^B%mD@o8)uVv_a3cj6*DO1x(!GS9A06n5tx+HkG+4HaWv z9Gv&tc0$U)k@OnRxjRXW8w{pB`xSQS0wg77+nDumCHXkleJLd6hdn9*U7YOujf1cA zZ_p=RLOoi3?^;GZ1D(~tv=RHhAZ8s#9@u)PD8R_68e-%VRS{1WFz(eTj*$W`3@Oqm zHB3p`_a!Xf%(K(UhlIlMlzmmeD?TbN=z)7QnEULY`@Hu^Q_fMw!urrCVNj8);$AQK zAvlZCCXQ_bJYgS4*%-!?+@@xDrEU3y@$Swx?WA$xgkK_Sc^aBS4&vc9ROjNtJUtYq z;0@XVHrHf~G8U{m8w>}p@UOs;o!()m#YirhzkgMQHmj~oUoX~d4kwQ0?3+@tr1zu~ug@*75VSE~t3wVO3COJfm>N0%}47)fwFO%TqltLI# zJhe&Hhc=myaSRr`v~SRo4_}qTKD#2&ei=o{JK@;cS!(2iKQ~|otUr$|nw#d}uW51@ zpkWPs`JJ9`4N^l=MyW!0NSMmF@*kynlAR9eN!}^nvih5A*@6!v(9z_m20x&fv%@ZM zo8;#m=D-{&t7**;J5M!U63knKkyrr?+2Q(uR@^z=W=DmSi^~FrM5=>950>@pTG|-e zH=+01!nJbi7I_8eRr?raZRTv2ydL&bFj%(BzyG&?fhYTo$Q=wYe$4A(>!mz> zxkU{8YI*he5ysj`dH+woQ$D6W{97x4|y=MGh#F1{!?+1Y89cR1W_xxBYLRMu{-VLYq>J1YyiIX*rp zciy^A2#sePos?}$iZY(>fApXXoLnQ1W~xkp&(8H5<)?r4GsZ^f4&gWI8w-?pUB148 z9OH}%=x~o}Ogh9Ggo)jM@E|)c&z?R_6{peh(ejf&{Ym-w!w<_Q%Krv^yYzpwH9KYzJi zwxPja{^oPeh}bGKBb*xm#9&=8j+7P-m`L~!t<=~*?gjBICl~|30+>Q*KrfQSf2r`8BvzRvkigWek<0B2kN4h* zvMe)Fi5cKtg|K|kNq{>;Tvov$GaXn`@ghv{QuzV^J@PU@8L$0Rfe6X*IYJu<6?P)g zkOnuRmVbIGHxS3=bmV8s0V!yte3&-L}6Mvp7(hc~?JP>V2+?wfv$5Se*wqzCl_@7r3=@rpTxh6L>lqe zXH?%WFh(4!Bd6O+HNc>fn@P5$+u;b_8ujkzsd~x?2xhm7^P9=gL5VS@_lvKuvSnOtP9@pz$57f<!$MTpbv(sD0k;Rlk2BPGdBfvqT|74759iSEkc25LJlEadVk3geK^!&SQE)tFgKOiBp-$N4wC*t2VVt+9#QP_PSnxaqTf_yORoqOXG z0=G&kdV{{0Grm`0F%-@n5gRZEQ~TYJsR7G_LqjHujuF>?NHXyB>=GVd{O9b|-V?pO$SxR-RLsWpth~%^bid;6%ear8xt(;A3re z#B6t%nC?kp(#OW;pbfbYKg@pv#eZUG;$Qsv&;Q3#F3$)!qckQ8UFl<6cc`f4`jd{; zEyV2+p;XSHYu~kEIdAEIpRpJva{Vqw5h0$Z&`o3>hEkhdC;3Ts$Bw`mM&vYAX*BRP ztfc)Tj2+&9duRY2zfJP#0S2VBGR>h@_t`a=Tf`s%%WL3ulS(#ssB2%!|J?tdTm%C! z#aCpt9w4lix$(yR#nPK4=>m($g}RSr*E||1H#zBV<;GIEg**NMTKdSyWI2WjY@!K2 zS^sTnbAI;4r)7`B00vPIN7)85?n#TMjm17etk(63PT9Lb<~X%&hdFg^=j2hDIJ`&w zW)5$7_Xp)CNE#RI{?K{3fA21*c)W~fnq7D<>ecc10q%mM!k*ZJx&6~OM>@hChy+A&t&6ygQ)lSHdxSw;{SK)W41ha z_f8onW@?WEL-w{fUYc;5dmnK$a{qnmd)}qw5^+J)Q9XV0G|cFL?LCik_K=oYh(rOu ziY4a)+kCTuCH=g#$cUeN=k0R)hksJueDSdSKmYbWZ~(zZ`CtF#{}#bDr0darFnCg7 zn*r^@U@)|JwZyDMCRmW${NcOuc?J8k4JR7#b|E^lOUfjd9tmYnWp8oB# zY8tvGy{6Q5BP(uh36IqgjT|X&QbgR-ax6Gtb??^pT`_r&mRhO>i z4YYpHV!)wN&+lCYYNzg}%GH&$N&qzsiCc|LX<(dAN7oeGTJv7~I(97#-;4jiCoFPT z+79Q(w2!T?^}wm=4D9+g^3jr>j9%#{xT513(YU7w;adID|mJ&PeY1-2>? zgk257tpdgeEDBP!IqUvqyMnfnJ<%El^E}t#T#-s>nLHRyHi%_;ffc5^D5vrS!SX8F z6)vpLzzg1(9@J32r)}kW=92q-M0b`W@CE9?QMDkPplA%>P0n1O&0Y1z%V+Fcn4Dwz ztMBdJVD4DBN^#~^nz%rud1PhJ7zWyH_?{oRtnDcA<4V@`=seL5U<6OX;#uCIH!ch^ z*ScTRu(xHt#2sz=Oxkk2IbQWAUB`9K5>Eo&oC9e^mxKMOb+{g^V`kUPlp`W|6?ZWc zrKA)`x|Rh_t$D&)fk@!cAyU+}rbVVEZK)e2s~Vr{>|xHQ%{j#A9X|`aNw+ zdyoS#sjcd2H@I6X*nNK)>VVn|_)|*^Ub)Ye;B2XlWc!VF?Nj_Yf3B!(5!g82i`!^{ z_9cx-%Z`S7eMwJEg58a$a>BT+;xzC(NY^ZVf=}n%-$^Wh4zQVY(bh`Un)8RgoDv$v z)!L%CIGJ3~O5z{kv7W-`DMd|j%6N4@wujNwbeTKfXe{C5P7-*Lc5J`TXZ$^}7DRlX zzIEef6+3qNtC}_1X~y8%l-{JHtVH+#vH{rS8`4Q)M4?A5e(wo?;~AO_O*C;QY7wb@ z7$94Z4GwBphm`GM-2_$4MaD0^F?SXg2W+m}H?$Hrr}zdQ=Fe-VCuAg2{U88AcV-LA zTYSX}0^C~Jj!}61Ww5}&f4n(m9#7GFoK6GnM@Nb60CxE>sO8t^%xc$?hEnL%dG=}N zQd;%poZ5yZ;J6Jusy(w-`PFjiv(7Hq+VuHz;-HLis_~gTge5kN@IP--_w$0%W8mGj z7LN?ix^BV~8M9ktTJ?ILQ2Ek#8=2WWW9Y*;kG-Yl4dTjuq3`Av|-(n`&*u#g=-N$Y8TQWBZ-!7*6!8L% z+GoRiWpL6`NXuoRLCwhtVoHY0^t7%z)daADtdrbfvU&raAh*vvqU7RU@qniRbv96H zBE=z@s++`6jlpj{;#aydwr+KOqr84YEy;zcGTmG(b6g)ILBLw_`y3v14*i-reX@te z!12)ww-z}5_$GAj`e*IHTRm#MOJe4vF<1OH*_xWfjc#W1GFucAGg`Ne^6GG{tPH;Zv$nIc&TBK>Y6!a?u^O1MrpdCcQ}cS?J-L)ZIa>p$8=^48n8bNGZ?d-X_R z*7Yt48mupN-eZ6~xOGQHmU-+Fr?-oX-}RygXdJ6uU2$vsP55yi`8zc;#a6&}bfgX? zttRKMmFsWa!RX+$J5HtSjvkhK^gT4gnaph%9<623USQ6-7fieP&7uV`Ke-pD>3GKe zZGt+vNe19K@wzKUt#5>qq-(~#B>&dTxGLmipfXK`hD=2TUqKb6neUBBfu7hmks;D2 zNQwXrGo~*L3YlsQ0wA*YFlg=z0|uTb03c2#r0Yne$)L296lv+3zFeddjGr}j5(gkR z=T+l#DnqFZ%=a*S5YGRx%#1Ix51RC!=u2C|;qxxKz{-2KK_cO4!~*IH!U!7_=L7a` z4DMAKJAnx~W?_+;`QjswDu^nB6%dIhu}0$G&$+ka;_CAiPOIx(MWnha#htD#Am%yd znYL6u6uKZWLP26ub)ehOf`k@#=w&~SJHm~H&_wAn%k-3InHk_x#o~u&;-aK26&Y3% z{X+P_p5uP%!>i}Pi;AxbVQO>%qX{8C>-V~3eg9v+#D{ap@PJ#Gph=$B z2ovYxAVvZ=sQf!N;qv7uba7pcL!~4FkZ0^g*NQNEmOCvvOrOX#Pl|E7AQ_wVdG%6B zv~`WnF8ky5rK`=}+^;kSsbfGaOiIs%=e4^Qgf(??=~x=5u%o2o>Hu8rA_J-!Uf2AYYY_K8E}m@EqYf)qu93dJlurAXuN#aJoQgH_kD527elLDP6@i z%TUC~bFS#qeghxlk9x#s$E#uGYKXEVuVCI(K5{QJ;Bm6qH>b}w-}{(bcTR=txCwrZ z6Bw4(pxs(fzxD;A;2(&lU0=yc$xHpj8H5rlCu^I1M3t$3e^5tpuUIfQm&UVhK7O2n1o-%l);3OtH4 z?S$_FcTTBd6#1PBU9H|z~R8aX=;1Nq=PTAsb?~4_N4kGX zj7Fa_KyC1LhWB=t*p3-aC%gI9LOFz2?NTzQ%a)}{bn-4URNJiM99rR8AEM?Y(eZl( z?Dvn^emFQSQ%lfnoj@wAFlS^Dj~~{W%6yr|pqVc#%PZxkTP5&;T}lUS zn=$LSiP70)t7!TNgPT%KjcE=f7{ftAP=9xu*fNeT{=>uHl%co3S1=t)V{xKP4zr$+ zVM$PAQD~^N=9P17QDekeo4yNfV*TE2n~&MAX5a&v`wVEtVnH4 zZZ(s~proE&^?3dCR$P^lP_bi?K{&?XC>MRHuqu31EaK^QQA9BCSB8-5v z2H*r_3zbjt~!X%@9;3YOF}@Lys37&`!Kp;LPzSVDhpb zJ_i5dmsVg<3Sb&z>{s8_HXWg|OMwNn{8s!ZRE3{*7*{RB5 z0#$o#lkNaSuZtyuv{Gq4pKVtKbP!Kyil(c~^Vyfr2#+_*#GNS>xy%#a>0yg9&wLrH zU~Ra>{{VvqjxS-iA;+RZ$0uH1hc;~=yhuyFG!!#W`RMpn@VvVU#@v+u7_*)a_vHiL zB>`rp6;ib0_t04;j+>(FDe%Cdu@};Yp!t0iCEDd3Fh~v}onrj1X4j#*cSWa0_G zfw@~SaXs`g07W*m+n}Eob3ZpdS{4z|;{=giZ4NVpJV|b{+?kYb`_^@$?7w|*(^=%&m>Q{gL_`faXG8W(z&#BL; zgk&9n^C4=?olAqT4>R$^$<=5zgv^FjUGqo!7Yp@&alnYA1sx>7l^_0kOfHlV47tX| zhqSGbOm-_c(FibpMJJ4~q+AXSd#^n>zm_;*RdG#u!1}}!Dt{hAeu(R8&`CZhd^{z- zjgwJM=}k6PC?%YL!LdkZmVHvmaSp_V&va0jM+hn(QdXT#BkQ1-F%sI0DWbm(Lk(D$ z@)SM7D!F>{ZF>|tZ(uNMl+YCLspvuLjKNqCg@uNVYm!jt%$+xCEJ(9*?{n%Y_E2t6 z&wPq;^M@-cH4S7i8G7NX4LrAzAHX5~>3MHl;?U%z)IWvJ#EVABh4H9^#D#J9FbaSM zeb&5y^4_^Io7r}p3xm0(#UoBVI)jGJ`V`eiS?hAY<2%@;_Tj`VxS@DGtsoPOAxpv>JVDoAb`g0-1E4xWs-wg8ANuio zel#zfw-{lJA^m_cy{PIJ7b@w@!!ZHn>LKT}xkr6iBMq!s`N2Eiicv1D*scb)8J8pu zgYP*U7b-@~AqK)QC42_)U=Iy(&T-@q8+P!)Kb3iZs) zG_^ff@GPrS>=Rq0a{!|Z-rD!D2JmCBK(eTT%@eWO1mCv?SmUG6&k^__*nfM2d0>rC zOhXeW!`Z=yj=OlRhbhw|T^a9mgohc)RkoTh7%OGO&h|=wokWN2gVY+`CWCbC)f;vx z2)Msh%3BZE4LTwdd219{m&)|wjk3a_0y?LTcGk-$pZ&2sBOIiOf;c-hQ@-`!2K$D&yG*|~S$;V4gL1mwE1Rz{tRc%2c2-95 zD4WG==_cu70wsME9y`>){G+eI^R3bvhelZtZ4S#A*yHd5=Dpd*Xdb3kaUa>l{K~tI z*%k7Xtz!(vL6muqp_XSeDP&~#%9me#R(97gI(o0lJlW54;Ph=|{Q5qID<%O(hOjr}z{M^@Gt{St<%B_&(jw<< z?MLZ`sGP?O7>Z{cTVF{@k}NF+ugahD1CNM`Miop750y$Lz^x+|i9#BlMgKJn!P8t+ z?z%82nU}8wi1MTiK}J@Kr!yHK({^QeG8cu6UiBLCz40M zS!#;GY^}r0-O@b=7xC`173R}=>AgGsv%8}AUJs3)#O#Owlg3N7rPDO{WZVi;@!{Mk zC{<8ZU~Nu;l+r-VwVaW|BO+v*$dDT(zKW)VTpsil4 zCH(~SZQHbQm7T5fYPgi8W14t@ZOrUgq$czMLG#zo$q3}KLR8$?j=gHU_+I6Qp``7= zA$`)15GhRJZ2~ur-p|M)Zicu-sqFxR_>}JW$1nHryq`r>1QmEROH&3Reu4||W!u)_ z@fn+0s>)&UGxTHD8aw*?PR0CW%!4q=MJ2+Enc z(J;1^8!$6YDH@@MozB-$LdEpBI1XXZd#e#*B1MV|g>mgKI8x$F(7M z958~+bQGtqFO_QSxV*15rJ348RdF0qJTFM0Vfbh8#GH5{mD!h$)!p4A5G;?-Q>@XU zQlAqznI{R;1ho?sKz9?A!+NO!iauEf1pBu!U<}ntjc?jjfmEhAPYKM{kfd#S_zA{> zM0e;;8DksjZ~{?#t{fw?Ju3W&g6kSK-NVh&Fz>)Yy3tYJ!4%+RQkG7Sk9p$nQZio;?)cl=<6gkeLZS_V(cQ;Qo9SuQBsuC6tBbcM{fd zLAc6+bv302Xc%8cImFm`!(kuFg-L!ls2lp4c|K+iW|k+)6iRDnl#-0{A$1$(@cg(5 zPlTUO9TFyk;Xp#cHrJa+@IGp>_s0**UK^v8@Qv=-)3VFK7w3C82wsDmJ=y`jspdJM zAb7tIiJ318?C=uiHp}h=#>+2$`OEUH4}Vy`_wLQIGJOXn^q_39JM3IcQX0sTRoJLy zl`=I4t-~_NNoIxt9TO(=^3Ch={59Tqn6k+~4Gh4uH;*}j`jD*W33y4jY_P_EL?Xc; zy9F=5{Ioni`aE@77jWj>B0r%69bJ$XeKBxAa??)PMUg!wyha^tn7KS@tU<#l$gHO! zOW!zu@bD9M06NgRiJyne|314{mQyopytUge&j>3zIhrf)PJX*A&J%7#UG$wT_8fri z5MHU1s@0e+qsXXfvZCLkL<_;_JOcyC7fd}-==X!s_EraF${^48C8(9X*8VjNOduo=Y>H9AhkoLOq65-N279*?N2dInz~ zB@Ah>LD&}=$2)}M%(H8u+_ckf(vw@6V zqy{h{KVux`!EUQ1w8r2|$A``G{Kd2KYZQ9d$r`19)=BdHXMgq&%gU{L;ZbkB$8HJ7 zD)%v7cM-Br9zJA;?~KwzoKW_U|8aT$&H^~cGh$XMq8?@i0kG6Oi#t|0N=_6n{+_}_ zWW>4wcZixq*@3}qS3mfZ&u_M9} zDtzJ!&w`5>Z{mkBD73up%dz1L3{&>Za_6U(VquIn77Fl2?^c=NdqEN}0IFb9>ZWh+ z0XCVr8IMkMLQc3}y785`Rba4ukGn}cN@~4KlIO&$N(OLnBXg1zIO*YPLdC*%719%rh9t{0MDm7Pz_QD;eB;ttY+>Q#MBV5T!=t-!-8Z zP~}T33#ZG;T9#6kJ9URJVB6I2X_7TI&K75zwtyr|d5=&Pm626EANy=`XUgbLG+EcL zL9zI0Vm=q=r(r;|oM}A@R0!nF9*W-SP8# z1dKSFc9?xseM~rW1TZ2VB zNS8jP(aeMe@1ju$fNLQ6JED~@p`y%>@Y=tnag4Y1n&BxLBN{IHJ6q7y2o4PknoAGv zs=y+GQ;?jH5f!Y2&N0U9gaAVeWyVbFv6eMM-PJ>3ftkigWuS&zx!(2e!fojr#Svk2 z%Gk#d(0#n7ZZi)Hzt3}zS&A3jCyl!fu!$TASL)a$HR-dCnRThh9onySP8~aPOjLw5 z+87M*U4}iN2uulmun+m>| zWQ;gMG#G!@JmYKPFapWY1N@D7P&sv7SL2&Os6f)Xp*e9bT+X56=%5TvlAVYI!wTLi-fioc#6z86DoCS*HqlsJX|%6G2V6Qkc*9H! zi#&e6hbEs@sf6j!5j2F8V4I7=@jGH zDELk{eQ8YSxNESsRo<&1>%hwS^y3-_ z16iLm7O%A@K88LRhr4~^UsxRW8E)z$=+OpU6XL~i*T`pXkyczo{$<#q%P!#+0f>+w zF7PYu0i#X<4Eq!4_?&C5%^{TVn>Vh*7Q7do1H2dpcWr5w5Flt2`Z_=fUq|^~U#~{} z8lGs&yxpAEASSc|Pp0y07o&L$Pq0Vkx`U`v-%n~AfB$az z_B-#CrI{JVJ6Kk?c1s6O`w3aRC!F%N%R)YKR|dLqjcd#^(z3BieMXlV`5LWBGKi<} z>f(SIYHUN_=ojbpVvu8miG)hLGaaG!d<>5_hFCBWo78}%yPNDfL^ovjIm9Y32Fj; z&WUUDou5T!nyEYoT;KcnT@qAr?4U>Pk#T;25&z`%6HeBmOwFrTD90zn$zyagjNafD zv_Up}=xmlYua!H5^qdZGU<7#IBNXY=y|1ag3IAuUExmoSygfEs-ca>_w0o=cx5-5% zEa`$Zi}q0lMv(z6N-;q@!{h}Fqd4nCc=(i~fH|>j0L8gKFizGuj_*;j$XOdB=$Zp; z9~>O-Vt`?25SHY+?4b`2u@f=~Z-cV3o(-|o2r*1~h?@+joqzil^aL?OQ#t@Nru*YJ zIGUImvv}{#wHd}J>$GQ6)W3}fe}c^PCinj8SHH%2b5xcV#>&p4|6Cr;PLvhGxY`Tz z+>6nNbM5BR3cP=UwM2bX@>{y_!ae497sK|9_J8%azbou}mw)u%{rBblk3M8gv}Hqf*badZD!#|L7|~KVA@ehm5CYyoG=ZVN@6Ft^WMG!24e7i zlmZYX4vb^=$W(csirg^Dy)b#Q2{+g>W3CLS@>Wky!RWgs7%TzeMyiiK(=!#OfDAM$zc*ap>gEcq`e`Ng!v8(aE~Dd zDk1dAugZtVPv>w5=*0e{8R1PxRp1s+Dh0w+=UV*x60RiI(xyLwSaA5M|9FPC;;;70 z8=qmkmZwr@;R54e?M!~;^q008$ z9Px>G=4aR{)d(=A+A>)7S3Sxol`70A%d#L(r*oBFQ1h+_(_=LR{sEXeY=olY@FvTf z_037DbguF%IqNx$q6J;3sA|+5;0>YJ1N4G7OGgF$YS8=AdmSFZx{R^ryo0wGs$Pda zvnGWne4r|#u7`7&x$8@ZimkcMgx`Dx`&MZ9StT2n0ZOOIX$Th9dQJLge(**~YZ|>B zJo_{Un{szSe#7nv_`kp_<*6_3>g3=CzMIB#dCokY!%LXijLF)M_GwI-j+8_2b%e)m z5Ji629a;Fep;y-Rb$mx;o+VKedK!cu*q6p@XdnDI?)uC3(2gTe0>Feo&|8-H2&bk;5@MqU} zLVSc7FF(*m_)4=HTJ9K~;+1p0QsRa&w~S#1Cd)24HWM_D_eh9@_B1kmy?*kr9IQQK ztACqtAL-fn`|8Mu0qT0-*HB6Vg%}fUL339-sIDt{9)rplbFcw)DE`EwwBTB`O?h+p zfXW24DId-WT#-FEoTR%mluDE^sFM4ua_juKuHXRR(;5b!t81yJxF3$BS2lTHVeTEZU zi~FoK@y+)g6w>jz`6%k^>r{^+tb(Oe=9a01LJOO_@MHM^ZEKv}y0c81{qpsb)$-_X zKWA4G&p9|3-ZqK!y$g5wkxkH^bDG@zcqa{AfI!Z>|Wvz0KO7h9`YH=AoO%fm0eE?<292sj4H z!p(UUY~*h92wsFfNH`m+e`#S{v2O>@B42VEg_e++p+lUt+mB0k75YRG|LK4I_sWB% ziSi1gqRnpSG(5-9nLU!Kt!q9m-JZ4Bc_0}WUdRR@JHp-a@bSa)M|O$kNDQB47vM>g z9nzM^Enk#d1NY$<@J)=leH8hDiz#?pb=Ymb-U$C%y1tZzV08}bqh2{6`Zze&F|^H{ zGY4&t5S?948QUhonJLGJE#(&BuWg(;I^xWETRR#mcdZ`8uDK35ch$fD{qL4J@;Kb} zv$)4O4nvLHIpX)_CQsPCdb18~9Cpjx^%d%)E+xcjdwGfck~ih;`yZB{|LMbfxJZVZ_{436GBeoWh)@=yQCpL1ZtT_RpE4*OtT;`c;}Q;vP}1s}<}jS%H6bQ1=1 zRfJe%o8+p3DKih$3Xc;-Fv$TJST8h)P;D;B<9 zHDAIgOf49f9u28QHtO+>)<08dCv*WAc#11rgK@F`xmTs6;>)WI;#n`ilY+oDW$@xj zKU*IUyx;jvFAUo26(@RfROalrnR|p+w5rHdBcw8N(CSGA*=uWen80NYlWmVQ43z2fnml6*B+omGgUlseZn#>i%3Vx*!jra!2k0C62+lA}=oNyB`ZB*B`m4VuL1`Xjc);>-Y z+EMUnV426&ML?e%cp4SSy?95hvI3!J-OD!<#>XTXj z^Ou#YB8m6UBju$Be>A>b_r@SgH&%0E6FA1-D9Oo697Ay>`5rqa8uj*9>5O&By0gwU zBo$-YLi5>0;eHcCs|OTxPedx;*6)MH`!quuI5X?2gE-#b~Z>T1mpAcOPa{~3{R(jX-KDmYu5MXGJxRB zqcCN?KnFQ#5LPsVH&&y_I-iCGrEmp<$r=OSIv__-u5R9>vKApb8vMr%s!Y*;bAy~F z4B#fub!p?t>K1X%I2b5yKgq7+CZ!VH$-J>RTUKVQ#sz#RuVctdf}R0fEVv%1nu9TT zgmQI(XMS)L1B&+g{dT!_e}(Ecgh(wAj^UbvZ(9HJ6#Zg#os+Ae^VaEJnP0vJjiP81 z6K`180gjRNEu-T(L2HWRni1S*;I}IcP@i;|3QuS9EDV^7o*`$$B;B^lt7mID)|XB~ zTVp7>OYP+{LsF|J63`CgC?&Z@1>}m-|Z( zfE5k}?v6MA2u&ZNSi=j4=fIyk1*7E}xn?bBZk^JZPhX?7!>`;KIRX~t!t(V6;76&w z&aNg^k@&C7uuDvx)9Tp35HRG5MM#Jh1PdLU3=uZ9a;?nJ#y|_>ipoa|i#G`2xdwg+ z#b&o-l_PtP@QlxJ3f?ebVGWK@R##l3iXTTIl%v7pGJSWZEX|!#F^bAjBjE86X9ZzV z$6F`m>Cl&Da*m2h>J|H5iTlmjc@|I?qA*?S*jNX(QC zX#U~DzlJ|-LJOU;(q1eNzxtA+owvy+AVdlpHaF@FJ^q++!Lq?_PCotP5<8y1c$3lxx9xcOj9=7U)@tslV(th0)U;_yPBF>GiC%sMzJPB-sv%!TwRdBl~qQKK<3UxD&iKB)AR6JDyqk+Q+qcqr`UOh&_cQg;K^v4ISe)bJY z38>Cf-{6}uxBw7A5xCq*|5XWeQ5b^ZHovbN6u|sBQ;>?&4g6qAo`T>u-ZHcU(iN7r zB>-YO^uR~fPM&XY-O^zq;L2FxQ(k(|D!x=6#5?%Z@Zmi(7~vNqQ|8aS2O|235pdat z3S8C!9lKz?O9ST|h9ay9QxZ1dQBa8G;`XpgjKbEqY?Wts z`79kMjx}O6FdY|YgAO?)1|?4#w6cNiYBU@YQ`<%P(yOI_c|pRILsH?jl1q!p)v%kI zouc0JJWP|y2?%lLd=!CT4uJh7DUwbs_a7kCj8H zJO|$32MjdFd(z$+!nKAn@vZJm7@Z0ggQjl;FR=FdFk$dHYAh)I=?QC54P&2*3!y*y zbA2b7Q9RIx_)dbV;}t*NA#!-7@(_3n%|;1y-LMnLU3zz5G6-342+V1UHl^;iD}Dajry&v=58i)Z0yQ4KW0Dk;1PVX1 z5=V?U8tP*(>0DsaBoAU}5%%zp?xir21Gq}0W5+*g%q;kjMur(!HxjsmWUM-|e9zrbJ3GjU$@s10I7 zh;`{nox>wARrKX0D!nOkKr5C61f7hJ#6I4S6E5SnyW98Dw7azS-{e<5twOTT(V6DA zb)f6eGh?@JW858={dOs09>b)!MMW-(wnLldJc!3ryxSKj<-FxrhXgP(UPGPwglxz< zM$I{pwwND=$@ME<0kn>p#d3_2&}J;s5iz+j_~eT!%?%W5F>AO>AER-IV(NfKI1%w5 zjNA3@E`q!X(H#Yh542U2e=UM>P7L!7$L?zEn$u%tKj}s-1(4y#D5R`q<-9%RL^@>z z(E;XF*w%nyQ9~Gd(9MvcR?vj`P^o@|hf^cV%29^nncyvKjxW38Oz`Nqh9c!+QR*}z z!tdde$&*8|5^q+*>fv>5gNGj9=_ra34sNUCMje90S#~?dF`@^^?Cs-uHnIB}VH-^b z(_$?=eEtgMXScN2ZLpyJ^;_3tq^Xeo_y^zOKAlLo#0SUA0U;_|>}n1YS7{!=7;fU3 z`6=2Z*;e{u7uy4JJh2Xdm`BD%ocI={R2vx5Tf6HxJKT-HD1~25V=%M`Cvg|d-R=Xz zXO7veP{A4{8I{JML2!!Ua>fBmmc}3E{T{mqhw!xa)No2@Hb-~Mt$Sx2iHo6nM#VHj zKs2_O=5A)~cPY(2P3-&N1U2!uzbYpaJLTP*|KNW({EJd*EWo$Qzz*w|MCJxZIbSrb zBtywalD~1&K#1r;zB`w1*|8=GemWtX-bkdNPa)-aWWN$3mm~ycedBLN`BN7 zatKGjAjsXM@dyoc~K7KhR8+Ab*h0=g8ar?OKE9K*92{~0j5*3OnVDQxzma^r)0 z$xSxA*3bt#Wr@mULc0&Y^>B|} zjF4RteRWJ2`lha7c=suMWEW%a9KN`6^G>;U?=A+pdKh{h^pkS(I4sl^$Jo&|w`+}^ zuGMExFfNWrJip0_X!ohk^s>Bt=VM?!Dlb0!MR~b~!NX4JGLDWZPGTFuNF1dy)+`Cb z6RgLTJ9i_)5B4a5O5%HiJdag&LD#o9PMRDSk1;+W%<$j+hhHEoaNKdtGP(&3Qdw_e zoyqjhP0+;>RVh|j=`q$gIKpa|xKk@12I6+MTty|uEk{p@5wWnnEP9pFC}C&-@lrFH zy-TmRif9xT?vcp>MLdog3?2y>FC`3HVdzT+39o?bshrub3l+lQVU_K?n4QGGG8rlp zGCyIK(JNG{dRxP36etQSy>zza1X-8agh|`zK>Qxr=A=rkeNl{=e;79JfsS3?z88{WEaksaI#viQYOr@k!og zUO_32DZC}bg^EW#sEX@sfpcBqjDrMvF$6@RTn$Np3C$S~uMr|LRCI+Yo@2(LvL-g2 zD<&xT=|Q8uw;qYl37>Q#dQG$qP8_7K;K`=BPJi|(PV(M&B8&Fi(W$`mnf^5@^?rdB z?ymTB&h-i_Pu(@DZR??}0@LS`*Kj%iSvx%Ib1^u1@5?di{qm&|k|cTF8_O>(tMGK` z-*p_^1p*nkqZC|(I@*kipvplTCg!PcF=pvI=e@%ittg^V;cmtr$32?dC|;!_u^>+M zn2f>bCUSPVO1bmJdS)J^XY&Y{Y=;_}vT~H(PuHX|rga@LU(kmMY}z|L6cRnA$z{U0 zfs1C{o}%d2osFw?ZPTnrtYiZpt&wl2TAP?$?J1RO0qvl)X>=cddJ;FZzqY)*kE2nich0UB_^5a+AM zPlL&r?l$Rtfj0AXNE;erDv5>!skoUG%O&U%#SXYeNq7!VAWWlAsAd#6R*p1X z6+ObgpqVBQ8Ag&sUE0!E>k?-xUR0KuRj?)wp&8I*$c8+gFPQsEAC5&uNF`pSQyO(Q z!@2Wb58Fu_jMb1YL(lXC4igTfkt7|e{M504FJL?*ne+T;i|6Hk@+lt%C}8k@*NBPD zPPpFo!#W8~Z?DO^JXxi^l2Bs-ATJ0a&N1BxdP`*B`epXL*J%{8kMoOMUv z_UQ>shpVrt0))@<+I5t06n~!4n=FPPTf$Gylb2)Fz9DdKEPtw#FemN;jG^EQw_$hg zz=|7p*iZ0I$^&$YogNVugVJV#zw2%U#Y4}dasTIhmZlmQkjlK!y1VZfa*<)+`wYeT zf~__26@I4(W+FLgh_&f`@C@jsO$bE{2iAfie=X^leq!`X&)x@2$~r?WgLUL?D1vEd zqLdyJ<1Vi1ZZdO(5U>rT0p9aUkLnXKgmGXFvM$d^Hnnn>2CI0Rz)-zTefpEVqcTGR z>@>%&j!eKWFsd$^C~uU~9N?O=MnnA-d~Atb1lx684WNiONeYFSP}&B{bHWKH;SpbV zUlH!Oi}AdRQrj#KJ|>4Y}{UujJiwpvrP`a(pfmjAz3qUv=++v+}s`J zc&l_UKqjVf2C&Z0Q0&gnsRYFJx5#m5kc*H#=@q3Y>R`N*UEQ> ze#~ytv-0)&V|Y3;iaEAqygXopb?=<*Jbg(>${H6eRg2Cx)hh3Q_%6B@yAv2gD%@84 zbMBf`DB2i4r-VA~QKhWooH~Ed%ovrDZj@!>_eYTVJLTcCr{!;6JS<-kt|U*rardn< zztSpO>^Ap~rpq`A_zbWPvXdaq?(UJay!sV2@5#Ml?wV9z+QZYn`RXgy&~tX=Ks!bN zi$NAwwkU0XlbHKkqYug$^B^71!JkGZZk0JgzQ$g0(AYX_cIXC26GI~y_M1Da7}FSZ z>X7V2s8`*(c>_G-Ji*wGbC5ZdH!m$y9+h31wI`3ui&gH2Dd-Tn0ln{7W))*{7Gv@l zKC(|Z**tW6&W_8|7mv$d{ncNUzx>^2=+nLbkEu8BvHZvqvg7V+?t50&>MEDlUg+t@ zJ>9SLjzG+?3IT%efCLgk@GpD|z6XLokPu=7L-)Mn>27!1E?2p#TwCt@ax?FKd7cxQ z)_cq8y!l%qPMkP#_BfH2Z?A)E#t9D?p{#%X^o#W9{U4;Iho7ZSj(h3llW&Poo}{D0 zeIhICL|`z^h(dL#X5>-91$g+`o3GQ!?m;?XUr}ihSV{Se69D0V$Lu5dJC8qPf5&qA zdi!a5@n$=fmq5%EOym?nUSJ2e6+mS8E`F;ySfRrsEA_l|*aYJSRr4`Jk#bprWSLfv z2?z~Fl(ULLc}6MFQ-jJ0Qke{hKo3bM$RJK-UdApz48h`iOpb!hHvNQPVDS&l3%0#$ zW~+cy$O&UkrGNo)&F5A2Of6CIw?VIkf2Oq3!%` z2~9Ib0`=So5Y=jr9gdQpj9b`((W9`^L=c~K^v#2ey#m0W+q@Z<&!{-5gh92!8&B{* zaFqolE0K>S`QjXUo>4*bJ^KS&0$|=f{rA^dZE=T*~z?4Z1IeDPoQlpcmwdwKFu?RuLb4`qxS<*tx z+0_85*G_auJp%I+Kj7JfIz``P4O=}`Ei>J-;Q`2v2&Hcj~1TG(ivJV#msjw1WHypFnp zR1TcH`maQQ|P%vec@60g0OXSdsynuOC5!9mw zl7k;;=tVjlvd;7y>7>J)nqn5`hxi+qjD`tIc1%DgJn#CmO=Vb*_X2k2No72o5WuWM zP2-}#nv9LlR1!Je+kz(Xcza|p%Cq^%L+X#0pqnXZpo`+!A@8}5GAa(I-S~*H)-ZCjMDT>XHg0ebW#$mPui8ZS_fQszGaoxjBp`-s5t0j`Sp8Rz8YPd9OU8 zOLdbTwBr62Y0${v5c;pUhNynxSTirq4h!X&(e`Y->7bBLdJq1>np2q%?koOWKl=De zDBPspl;9_Ia>?b<;eg?RE2EA)XJw`i#%cv`aixue0=OF`6kqt_6{nmwQM&b(PqpWR zcHD3&zk`7=&%j7g0gb~kB~`0zGG8MHd$w-bOpY8TwXjdx+$=QS!HYZYIxcWLgE85F zf53e%DRXJ9``52t(LNwxE(U*?3f z7y}P2#@)bRIHFwXm#<$jM5BtNuwY;;;z(%Etflr6WnQtZS~wWoy8{&D%y-f;%Ig72 zB;?1O)Y38gyOvg0)BR;jsNzv>af0pO93>YyOSGiYLU|{haEi!I_iBm?My|ElNN;>_ z`!-Gi99az`ArwWRp8@kwz{sLj9EQt2%0CBEwjMI?M!+!cuHcuYS*pR!q9vEp{@b1O zm%o0Vp1pmCUVy%dr@gd(k*e_a&Rc5ufALLvw0xJn3wNSKY;#K5!hnSqCxg>;&S}0( zXsl_Y#f{ugkeT}i<95w@_D-p^oEE3<;b`24Z{w&S zU}SL%4ZTYWU6e_4tYajaV)^9B6Z%nB!a<}d_=PEc5F={?gC96595UA7KrSn@jk*8! z?MqHQruq<>JC_|Osm{h^B4kD{zWeS)`uz8#R$Y>sh*2;!NLf~>GeuB}t2zg8~s#2mN!P*Z2Hi03y$qE@8iiV6^ zrNe_5+-zh1i2^~zR4@HBOzVbu%Xr+x7&F3q=9al?t@|ybmZ{5hdN2?~0&vog9bbRK z^UHI3OJv$IDHZ=v=ox5*!yOE}`by5bWcAU`&1tkQN*S5?if-T|avnlOj3`8J(nH)h zBLfPi8jPRkc_;*rjq?ZV9^BHJaPa;Ff{mKYD&sK2hMpPvR#lT>MFR*QPbe-BeaL-^ zUll2Y77NJ~pFj@JtB}QJF#F=4`M}YZFf-ODf$Dc!G8|hY9;bIwPMQUpxU)T>;5%)^ zjCxwd+rJ;sCs^OlbJQRfr{Kzug5CRm6f!DQ`6b_HTrz9!2TWX2EwQ$}<535X zcV#?21H+}kpl7>}X6Z-DXV|1jfNB0#kVRPA=+_N^G^JXr?DLwTc4^6XO<#)5+%6mL zsRX*tZpLVixa|v59O?%?LIH`kQ+VuEI$dWG#~yI6fjBWhA3whkQ{{KJ(Zj|Q-gkL zD4T#1Mg$o-D2FKID2ByQw!y2EVC0ip=7^JuO@V9(P z!B9i#Yy*=OW%?+IZupWeWQ|r=nnuC*57XV^1X`}C2x@Rmq3p}+OciS4P4+-0&kB%# z&^rWz@c=&MThe)~MQ~Y#VValD-_QT`fBIJ`yuDqh^D6$Pc6go{P0OVEyW zrfb&BkEjDmAoS_sg(~UtiAd$(B<}{Ph#HMzOFD>+o50+}^WK{CU@1~=K*tcd;Q1nM z#5PB|uhNaTxQ?+(eShmR|`X^*tsejyvW#7o$Q#0M&}KwNGNP*P*H|JMoY%EKoEF z^J92Hi}!oMMxTbW>$q=WL1=WQOKYLaAQ%kVo~K!Ak%SNe*<)1le0YS{Wa2z1GU-Ex zZ2J<7TC^cHUd{b6$GXJjvBiRb(dan zB9aOJXZ)@Y8c6)vz@Ovxs9-@coHECb=pMBw%VYqnX_a7xUDO>Hg7spxSi3y0Q1R3- zj--PjfCJhwXI6_7@L)a)3uc1AfC)Xrb(8PbkB0(~=;es^`R>2Z?*;povLMuUp zYPx>h{2^nL7<>c=uI3sZ;abRfXoMRv(18TPz3kk z!9LHE1F;Q5;5m_CvcK~-!1db4is*w2^E=JWvJRyAa}^fN@sQ0MDnwWl^Q)8s!VOF- zX?^+@3zk!`2nFQNqovC1jhe>=g@2~*xfB1pRl8M_Z{BS3l=ts+)=@iafxR~Mz42ng!}lU$6S$za|wM|GHV20ifE~`Q{ZX}rp!DnZnV>%-u<(5ml~Pl z%h%})deS;0u8GTgmi9O#0Nj~Xb4c#f-WEG>`}fk1e)KWQ=0Vygzh!(5%@7heCO>Lt ze>**U`J7TdTgU*zThq9}AF?Z%72Rax6GH35^@r)bCPy0M2{QE9Kek&AF>_O!agVZ6ATv zY*CjN#rodk$G|g{zI*+GREzn@-8|YqAP1a~9JG6J_%^+ML+w(O==HVLw90z@@gMy$ zN&f7n$Ae)wY3t>)bnu7YlN-`cfB#Sa0kEv3AN}M{SzG%k=m&&gV8-O>4T@|QG6xN% zzK%t_y1Ig=j4%jcgRANxLYr_b`1qSY{3yM7_4_o1^>%sEB}~HxDY?BaX$zd{esdR4 zjYX*w&K+sy2$@@;CeWjU*tNvpGBufAxZU|`-A~tfxNtyMXi#!cksVS>mzxqw#*chO zQn@Js{t|J0nYc*q!GPUmCrgNHJfKEssU0<4lPP5g0w65G$RRpi9x@f(+iJM5msk)o zwmxMt0ucUh-h;w6m(y{~@;MkfZTio?-MM_lB54wD43`d*W(cB-%SYy=HY<~pnMEB& zzN@{s$f9u=3Zo9jCiByBQIoY)O(220;>j@xbBu*s*}_txP#Xp&vp+SLV37G5?E=5g zMPJ;CI4k{zwg3zY9GgV>3Wh3H#iL`kpZHw=QtOBQV}9%__s_`agXx3<)G8d8ulkZ2 zRTkXhA=LoqxbYNT5)_Oy@ZFTw!6&zBtT-zAuPTTaVJPp7_AE5Y~mcc!mOs9tVjc z17D~AM8RUk0*sk?<vU#aGAc`z4Y4G085A}gpp9RWAEYbS z1nW4N3rKFP0O{0xnh4VXyUf3iQrbj7i!XQPs`St_&ruhYF=;92=2qaE4DCT3u1^!* zTL_nF;=gL}u|Wbim3c1i#FpmrXGIQo!7%49CFBT1@laex$^^s^G*(L z98sT7S`b!gMeD-yIfhNH$TjfL7S~8IiqT6q;@|bo+JjbF%%QRF5^xy;a$N45y+T2} zz`bZy0w>q!Dj6>HgpRNu1k6d+LiChGhBB!9tPK2ID=?Yx1{UI@K6O9A_t4M^*~4qOjBS;nCUEX zK-C5sD7Ka{vMnv-uu6P3K&EfL4oAoWU}50_EZ6v@V(mwKJ11_QGt}gmgc?RiyWlcl zV-C5m3q10&P&myOEP(@>0wk?dhzHuzTcO2UVAtvxn)GzNkgFI0C0yY{Xk5CH(R5h@ z7nHX#%x0wsX=lO?URSy&3wo?+nK^JrU<UHN@x~j+%wECbs^$Lied+;L!s+w|c)c`FR|-S%#*qXyoY6{Ym^$*D{nuGT<2PD%^&)-V-SMI{U3B{0W_4?>b zjsRwCPf=wXMM3o%SnT2I&5SG8yeoQkGv zT~DvoUUJBi>Ij7DTJXy@`9TNZ>6~0X(?@10=)Zq-l3u@lhkiJSBevW0asT!`28Z@x?Ockhz-H6i5GP9J~x35IJo zEmIiw^>6=YI=^#2-Mak|d~1n~C5P$N^Jj!frqkZ=x9RL)F%|cQ&yQVH?bj=j>Yrh}i~E8-anc zLhRFaqBX{QSr=0%686vE zaUcWm7;BedfldSnSOYG1ed!Vocd)2otR|1@ow4w=i$I$|2JXwgFW$w0zm*|;1#WK8 zSIAwOiE;WcALmawrpr%JB@>GzOJ8Kv3Qp`w+95Q;I6?`gZJ!ZUp`=~Uy-ms_os+1e z=a_9Jx}p{E8O8#*3;mh(moZgtDpc936 z?nh%%gIgLG58}kqAa%SE8p2-l6cL5Jb2WmoI;X;JBKMR9t9eOAV6F^g5{8E!BoC=D z5mZH3=|4-ZfH^iJ*6a|)Sh{xIm>a0k=YTAKq1Q9=spzAHhs(jJzy}!uk2zBp^taF2 zvc4soU0M4uLxfL(I&rST7Kygz1Vs)MG+)I#qJb{|P;nJyk#B-m?TqAM*l^(9F)^caci%%4X0v>t9~wxME$fXd33 z3J{tipS)=ZYI6Y0U>LK~1N{WQ<%VINF~0X15}$?30@cp7^lZq}4L=$%(vQCluYw^4 z0WRpfV7{Z@S?|G1z=^OLQVkCdx}rX<_;FXGimgEAux<{+2#1J?-NTNN{#)eMxRy*}SD`=0vmOQuu-b1OZ**+< zK>59-=A(Qoss=zCLJV0)J0-l8MNV#Vyeg^T7-O#8MR>!XgV!*ID$xq%Uho_6tb!#k zlvlX6#igi{Pj*oN^v1g*TMxMorwRu4fO*LC4-Fz`dqg>OB9y7cJ24!?GG*22|4L&# z{T}WUv=du36x46xj1wPurDWK$H=TLlc~5{VVnJ6?{>MD$(g+^L0uQGl z__JoP@>|E85tYa@guav?ZWhrPoH+GscG-MC04b3# zMecj0w1ncnmLA+^V+Y2)2XXA~?7|n=8H)-M|w#z(Iy_P>?R zG4L+0ZeiG=w4um1pwqkSOBfII0iK5CJ<7wtix+O;5yv10Kj$R9@8adH78X*8B>var zkeouh^5mVJowT+2mQ(D8sSdBc)#3!J(ph@dt9DtyAms_;3$LhE5eA7zW4elHPoozIpyG`Z85!qVte{ zlS68?k|)%`h?@WS=jqNTKSFF(OG<4m z!vo+Y8rf}(u)^RXz2167ipxy;=>6&716L<|>B%3Sq%9mPfA-J*E@AIl`t;*ZqU>Fi zU-QJZCF?##h^kC>woyX22zvqqD%QItENguO7t;`rW} z%%qLteWFGUq7>w?v8K>%VT7(P2xWj(Bx3+;2qYA70Jf;C1Yp?0^O*g}cx1pb0Tm;} zGYc3GAI1hC5JQF5A9Kg-7$P@+!(BdFa57MFBjXMau!{@y>DdW|8dx--GzuK^h;XG! zh`ZbD&5!VkbN}{PJ(cDJ36bMMBr^`9K<)rEv6?4~lRx=)|M{zwu5$rY7!_{zW9EY} zn486Lbl}E(WhFHWt=uOtgz{itd47CPKq5rWAL37?P(nh+hq9xP$eu#*0dy)r3Ic`^ z?ExIjkv~{niW4JD;yl`lciEkZ@V$W{@^a`jaOiL7!+|^B=C3 zzp>Lgo>EcGV{IF&Zp-ZG9Y~CpU=t zdhBYgAe^S=ILZ;+ihpU6ZUQdJIHpNV`W}B|x|iNHz+^yqEs@wj&Jx8)bdNI`)&~yn zI6Fl12jx-$z&v_vuWLG@KWg^OnWr_;Xn1c0K##PwjQgD8b#}q-J#Oorf?#+I{7~vh zB&h%^3&vMxv3t%G7gJ1Xl(UE&y;zH`H{nE%(6%{f&QX>tnKxz`0gZql*NOD+{YV+1 zA9EaC_x7j3DjkAGeptsay?7RS$p#oXVIgNoT-JLHZkh6+LwEVQR4353Iid1cqzdDO zezb0YSNuCq5pCtJj-{X#6ean9YzCtAc^uhZY#T`nG$F_$BDxRy|S1jRhFD-=8Bfjr9qi5_Bc z>yftQHXCgyIW>mQ?SA_#z1TcJ`I}8Y`+py&yLa!W@-of^9bCNFMoDU-GvF zUT)l?sWHUsk51Bu7LVZ#`&*RL!J}=#fm!nPgzXl|(@iR-?4SVlArvDD^LWiSkEqef z8X1zl<2+K)g^0r}E<<=drmTIvLRzYp5uTy zcAnJVh8Vr49LBN%E_XC$NpIQbc-k`@!c^OVWDe5K@we&CCMiM3@N+!0)g{&$Mp?i6 zBz?|=EziBroP$rse#YjY1Ja?alrX_F{mq*%(sR}sGt3dgYw7)acS%p7jxXz{F%3N| zQMCXPIp3H96@)=vOY%X%-mu`2Tthogh<+368f!b@ZGmh*tN^C#5jfAtku9UDkW;r zuHPaeKAU#g95QY$reFN=&(r_&xBiSYG$Ji9iSM|+y|>3EfO%@RuCiO#lfK5Jj&bM% zhRxa{wLoDgQ7r%*oBG!xD$u#yO;29ENy82XJh*5>f3q0#^8XH}z!l*IW%zkdCk+u8 zd02^g`agg5Kho~&UFKpwEfbNhkie!#Ogx zD(T)&KS}pm@2An>Fr7WUNUz?$rG1ccrI!zas=-L63G;-)#g$7!88ROk=>Vpu^<+Ms zt2~5|OOpY~szrp1TXv!sPUbA52u6+Yt3vGHn1#LXCQmz#`1)O7+Y14-cHG~kvmlMB%gJ<9# z@tz6WGK)}%c|H_OAs0tzso{B5QRUA9c=uEpPx(@rFg+{0mfYhm;HtdyjUQoDIq;(~ z7G6DX@}V1kyeACI8@P;6Gl+CS!v8XExZaQShRnPby1WreiT$fw37CSKH-Oo~I?NB( zttA^psR70@A>J@5*);=$kj_C`Uu-|M1=ztU%JcF9N}+^_DG#QFoUsm? zD4FhpjFchi2RsLD(smv1_k_&?)fSAZu$WFqgsJfgPAFo1PVT5Xa2YMp0%!_F27XLk zJ-j#?+T1xO&)ZtY1(a>mt2D5kmr$U&=A1e%J*{4My$k>45w%j`E1n^*v1LMk2Z48p zfuo}CIx;-$nzVke_!0j`W7^9Y9-(4rbklSN+vM?>`vM_uQ;VcOaj!GO z`86UnhoGA0gqX(KeTt2*2Y>Vk%20lR18RshB29>Bz`6d=IB>)|!t3wRrfv+8yiZvUk}=Vz8OzMb$Jg`4!Sh;#(I`|*;VYMe`O5I63?ryT@|9tAA?VFKNP~i z9R?{6nu02=$PdEl!u*+oU*-r*>&?nHH86Y3pJ}W>4<2|1)FvZ{WayQ~{M1 z3J72G%IT6hq6j&8#pie#S$WL6^XS^6AHVZkL*4aU#^5QF!ZS1n*EKB8Ftj|l!Ho{m z#096i88HxNRpgO7Wh2ebnFA5>OzCP&s+%R0!l^=kq0Bo1d9pq_7ndJK( z(!odpk9mQXT+UW$7$U=!FA=#%9#@ETXb|>Ep)u0p&eJ0O*;W_+SYaMOLaYT5lp~|? z8=Q^)goB7N1me99+|pVQO^gr>>TH9*7(F%p$S9$-g4PC|qA;THRha(?3eP&VD^Kv! zj))2lsEK)X0e&ftj)#Ln>m22a;^UF9BjBFQKt~$lwA-6-^4Jw~j-&BlXOHoaK4=7i zQZ|LcY$*Th?A+byoTRr$d*p>5q%-9Kxae{sB8!sTMyLfNU~&I%}2ojD)mVGktj18as4X=7|JAcRB?KsHLw^ zVt3%@{mm}5J*hp~L?%PW{C5tG zz04d9&$4c|wltr9_Cq}VcUI8%sDX;JX!CFrdMBE)gmFhk|8DP)HMJb;Efp=rMbraj zw9g>{eU9F(S#F7`)M;t7Yl^)0mEsb7pK>_t)_!~RoAlM__s}ruV@1vYGu=n7&F)^P zfgRY?Qij*7Z?#ujX^HK#o!v@$^L(4a{2kJxFeXSk@&rPS&~w&P=L$yzkSLQZY2`Yv zVt~E}?`$l$(|`NR-;rPcf(XU}yS?27$R}7et_$C>?Gy z>_zzH&;K|g67HKBks@{geX8ThPhD?Uq^0-Z0Tt-C+@7R+w{NAF9hCO}@|43CZl$08 z{HN)`qlXwjr2JvD?Mx0*3pn~lk61%RbigNn{hRds`BT#1?x$b;@=s`wgErdl*grvE zRU9xxJgV@eag{JtgLJ#H`&~F{c#oRc8Y4gBR5I4#jGr5BCNz4!tnJtieN2zTQYCQtzJLl)wY9>J_ds z9(Z#QsKO(J2#ei~S=uv5ApWNjArn-v`?pG%9v;V&jR?mPsZR9e_wa}@UKK52u#dbG znw3Z!Dwx(XHRxGHvC3`HHgMT??$74)oGL8CkYcDtHv}X0HV6VMAQVyhi+vKIEO8A5 z+64(PzyvOY_xhtpL>S{X_ruP)?ps*(?zp?!=zvBF?4Ne@5Ze~;0)G)afoSGK1q`zR zph8(ic&hwrF<66aOlg7Wnm#pP^lWR)2n`U^mdb#0sKTWHN06hu1dS<(HB9K=XH=y9 z#NGhQ(0EBD1lY()93doNL2Ph6!h?BCxRjDChf()hJT<@_;9|Bb} zDw5->aTLZJ2eE{}0QaGQF}Jay6Fi<#W~NRbksnX5j#0(V9D2j?_7J;$F(T!8I!JWN zm}V5qL27X=0*j6huScQD1WO1#r#+N(c!29KqmU8tywO$- zca?te1~taGq$`Wm+g80f6L@P-v(QhUJY3ZdVTh{ zTwplLtA@Z+fW|J^*l6l&9Ywo|m#>cUC5>id4v3vE*A2X(;f7)$zaod&y4aTS$x*)? zJ@B3d`eWXmOUB|!T88sY6Oqmw3$w+0C^H%+2zF`Gh*~zfO?_fcpw%7hF;w*tx=mAeXIPn9@KnMBR(9Fr+8Jj^psSFxsW9|GhhBAiI;?iPz5AV29 zuY;4rbhx(@DJ50u9JbIursBwfuz|Dc3MWB*ls;a+&jAFqg<@PM3R4(y>K$-H&@so- z`M}i45fHICbueZw%baM%zgm+$8ndJf!N=#Pdep&m$38e`ofr zn@|8(_EN;ww*UVBEJO(=%(9W$5ADSId?q(j|bq^|EFMhFIhqIP8kW0Dd* zI#uQuTj^7B+}oszoZ{WzIe8Nik1_js7KjSefU7`?p~dVkE}x~(pOK1m|5Hi{t)`EO z*q$D}qO9b8>=D&=^t3$4G(FgOnEvz!e?ooFXX$raPtr3C(FV$Un{&yQmli^)b}er8 zNoP554}&tLk~S6=(hPIa#z>mxjdZEWJM-=@(P0zl2NxEe@ z{qWvx_}LkIB0glFKrv2;Q}=uH>BIE+);*5YT_8dLMZ*V7`791spbAO~!6)j_%nH#5 z`RXZjYN@jldV$_~9c%8NF#W5;9)XWP`C)qR-h*ozByH%;>&Ww81P>aEn=hWG9gIbJ z@9o8C&aIl2DxY5VXn>i?eO{5T}q@X^B$!YI5V!d%4&nc?Swst$T-?E`R3 zX{pW*F(o2vHH`Kej1A zMYt2$R0TbyUFvLIVNJVv%(LmkYsxaVOoU2{Ct#_-MA;e@4v;AUTdhHhQ6$1eU<4|x z2mw55hSXIGDuiNO+}Q)qw^)r3Kg=VPVkRMbAP_KmW%@H*n_MwtZ(VrQD&PETkgOUExGqZWQS=p;5C7 z7*%E*Ul|2gK_Na2Te`Qx#jm6cLxyiE27Xrw@EByhZ*Fq4uTb!#bN=8XUVJwU2=U}I zOmZ|CtkmaFe$>6}6Nc$VXl5Ync`0 zulI(pao>3|g%Hs0a2Tw@ZP`dS`&wCe@K_-@=(`LiEy$9v3! zLfBC|m+=9y;T4Nk7(e6GD;-ZEO=8@@D}FQ{Aivz5I01ffV`_*7Oc0_l#&dcVc{cth z0DMAWeX!40&3lZ)HXBg@=4XL|=H6W&(lEJCv2DT;XWPLz?sc&R{dMsO6e z!WgMc;FSx!4c_=|Zn=iDC+pBzj8%HEaz_;n8 z=@UY3PLVS7@(p|)=DqV8VUXv$fYM=KrVv!{9@SA=^eA4K56xJt-q3}wVPRx`71A|&aCr2(`n3wa9_`xd`J(A9&^2?Dwi4%ksio< zF(>q?^TDWvpD_xBYqL+yI;)9@KO;3NYq4%@n@4lxjTWMPE=XJSp>`s_5U!vr?+1O^ z509X%4Jg!I#p~=k!p{XqA?%Id)l*hPsk4;X8Jht;RKPx&#XOh7M_baghK#~dsO3wM z$^lJ`P$a!3j$(~5-WxvPZ$B!G;?G=pqgyd{z;%s<5)ZxErfE|7<2uk0aQzp*#iMe^ z6bEr2Wl5(V&wP%69J9fcRh!3!f>^H;HUd8yGE~l{&q18-b4lK`jvcwBACH~Yb1o&w zH;po1vC&ooJt%~^G`+?M^v!r=6R}nu3-4cAzYW=StK z6y+9aH8Jhj8e8GM%YL zq{TI86kK}KUnX2G0F90C>N_j&D!Fm+z;>wn@Cdx{4i;sY*cd5#rzl2bHRvdT6wDBS z!N5n5UE$Ry(sd-P%4E{kcxM9qo~E?Srpg++L039~@3)D9Trn1N%hM5+gGjw$yEXe_ zmr=Af(ua>frM}}5>mEG`9%mZ!KI3@3w;hoRjV#w_i-{wUL_|OyiqSwdnjuk7H`yDwAnC88Q- zHj-|vQJkR7njVuhR6OKFGYpUgHb7H#N2B2k1fN|Ur?*6|I#dFhgKl*Cj4+lbc#n<# zj;K;~bofPjO5M<2|518$Yn|iYA7N}zQHf2`2Y@w2+KZ7mcF#a}(;U+9aoR&Ue}@;m ziqEo(tPyurjGPj>UyYip%?6x^ldozxb*ETE)VxY<`2}=xf)O+Q7CF&cOxC>Pzwp|nNjt^1=J#z-b>G6B_(=>+pF{wda)^3H< z_?AeOs@DHkdj5{hz!dfW!Jj-#<3l#M*Jpv5>POIQvpyZ^r~3z&7#AP0Mm;D1nZohc z4;qpOx_}N2bAEzE(!p|aZ&i)UoP`&qfCH;ED24fuY zjWP!PT0cGhY=KA?-ZPKFbMt8nK>HjV;6`9i6;cShv}75f*eS43@@1kbERb|~CnF7q zKV^h6d_!j7lSy@A;aLpa+*iFqU6@~Z?^RInW`$?iwxT`f!PAD^!ESXTo@H@{F@fOJ zJMKw0)-??_rog*?PewuK{CEil&4QGEB=+0a^whYXM8zsKZcQbDe%EkkG6a! z6fa=PZDdiUM@h!$t1?F;3?db>4q+t<+`J{Djd6&0)`o5Ac~_TH`1-ef$f%+%-xq4z zWs#T)kke?KIPQ3ze_nIvelTaRa68zrQB~;;F&MWu8H}fmel55rGhp zb1w`w?m1RN=Z>n%986d^isisBbD zuKRJUL9*9)JrF2Ze56l3t~r3h?+P~+EhWfpxCvAjzxYbCo~WO z5*)c^H9$-Sa*j2?TvwswcpZUgs8&VI_f6S4L9r5_HE`#~L{FKa_fT{IC^iinKJ}y^ zl`IW+jqo_|0ma1ma;}9x))S36ZiSP|l**n4o{HW8MpZvXslOuRjS#g)uBWxQc87#6 z)s=guxbV)qH=F4g&&WBs?Mo>C>cBQ^>cu5+x?WuermIB1-e%k}UyRLpcdf_9PR1Sw zd4M0Ij1D;X0l%FoDMotmW9m@Q6Mgx<{3FumpiOBzI~y36-=Q@XNmGoZOWSmng#yic zu7jva9B_kMo(p5=6i+uwpQpv#(4f5=9SWLeL2Km7d%z|NE{+rouuSK|EJCHjYuNe2 z?_p$PjOcAEfIByD)<};jotpxBMv50ALcTyhmU=XL8%M+91d>pF-^g0PKo|C3mTq|7 z4Wj1HYv7gTm5k{I9@mY0K-|ldysqcjzcasZwF4{fdhaIcnGNXd?4cWILB5gsKgfo5 zP%7|{Ujd&Ay~QHLakM2J67d^T3B-9A;`Mhdr+B{Q`#O;vqvHxZ2{(9gJq7Z>p?gY% z#W~F~!TGx$KTkRD8ff9|$0(B4^~Ae|EG8kJh`j|UZALRKssUPJy-CZS2v%p4t(!^* ztTodf4PJQQgmYLfWo6E_VI+HY<~kh>XVnlTr9+|c9tv4RKN&^TTxQ&n#s|!C@CfTt z8Zuo}x)XmIu+pAK?kY<)S}Z>ru*ye6{$*pt6Vpo2h-=3s;G8S7Jq}lMQzO5CBf*=} zhp8FUNe6d4{U$u-DAjo0<#XpHj!_;GWkkLyRvGT57oalo3u2zZBrbcSM#kBQQp6q9G2y&ImU3-pp|Z7prwMQI|9rouQc zP+-3zy|C3_?h2H{?0ief%T0{4^RzI7TzBq5)QqnUkC?(iGT{3KH0->yEK{2ZQ|)p) z?Y*?ZQ9s?gwUX{kJz%_^fLKnO)u-upll>KgInvKaOTkHcwofIV;VpnTOJBc!lU{zi zm7X-8l8;UX2IHw#R?=*XeJVt0UB}N}yiKPIOW=jQpb$Z6am=9^)P-kGCm5syj*DK# zYfdp&Oq?PS3l7deM_6b6;0~PFw#4Res+X~5j+nOz2SOBx)*X`y*WKAlGj|`RW#Dyh z>xlKgzx6Kt`Ct5>^y1aa7{fN3xX&=W-HW<;O3D?q|Kp7Z>HhsYl##`YkAtSl8do2- z%z z3Eeun5CQyFp37TV*wMDvhFH%U3Wi8kRP^Ziw_eDQ8xW+7PUfxmDLf~<3n>S4K>Wl# zVQ^t9fRq^a!(c*q>wT7RWs2dQ;NegPp?Kj9!jD#HD>|hKkO#(CpePwKbXPL)>b+37 zGcbq&)&+yIZ-Jf2xmZj*1cHG~g$Mx_!jMh~fW>&l>rI|&Rwh(bX%iUq=vY0(RE`iv zz#TZXPsUTE<}qtK3ZYA6Tmp{(2BXr;>-2>3Njs+D_;K9dhevrg(pNyfELJp$L&qw! zMux_-GHVr6nF^Bv1LdpQk;r+G~Qqkyy(1)H1Hq%Bbrox@-mWP7yxe&MrWyraq zjev{$DlNj4;bWZK4`s>rc@As>ng753$Q*s1O9~h1Tu)k&@j7Q-%gPe>Vk+|Ao`hyg zG#apq6pcmcO%O7zIfuf^xEL=4eoeDLz%$-cc-6s?3tafzylz2#zpLykgvE_y5T`$} zUQB(njp0f_EJE}5h?RbTr{i;mG2YiSV6~FMhGd9D-mS7Bn8h9WZF~I-% z2EOvzxW-AOFL00RW5W32G7JG|l2OJuTX=r-zDie)FB){M>iw6-!BZFjG?UY@cn>_* z@l0wQ!!0T6cg*Q$C}j0D(lr<-@LH`0Zpj?ylncir)v0)yvpXtm|J#56Fr}OeAa-3B zj)i;TkI6J2-nkYm+Xq>aX02bJNPF$TM)IBZ}5~pwH z2p+_mvmZf&y8w9^uR0q>X0#VIv?@;k2aJVR`HoV9(`uq89r%UEd0=z_k8@qdLwa?>Sm2?TUg<{oXe4m#{93ltHFt(m!s-MTj!orMBwZ8A zL=6xXBqU*Hpo-Adk%BwPAOj~_;-w>JI z1qUKgp2JdQD8L4rHGOx2G1TQfH(~0n6!xGG#w(wS^~{(|Pb!Zy9qT2naJ+6rp`aIK zQ9%zqv>tPVUeGgd8V8{=&0+v^)HAP;3vIoAOYN#7cmkfOh&48@DkrUc<^H4+X9on2 z;GWJ6q$51cIQSs0BE($meCTxV19oF08P3uENfIbg3b z$7Wu6A$8*??9-9I4cOb@zLx$74>W7XR4^l%8Ug2Q6rZXrk^0!ii%oHYGw^}I)8b%~ z+qV|eUt|YFnz@=Y3@>kui=hcCJ`Gz;84LgQo^RF12SY4c3 zftRosic`?=%OT&@@u=}X+U6_+_Lbn2fU~6~a63iG(_8%0VKinx#qn-0{r>W8I$FcC zyokY9TfjSipL+AmA4)Q0MeTYF^@{?hS#1zGsHM9zchl#u|G)@{jdK$-M_WUTB8aP zdqYfp^|Z?>&XHZ{>peGSFs|w4*(ezEYMY8wc-q?!SHtmf!la`Srp4QL)142D1TYqy zXgw+yxz}XRW2Udzl+G6JDo~}zQG&kSLz!dVf(dC*G zIvmPavDR2*MI4(ltqKHi<>3-6QWvVsc#KCxk8&v12paqF7&e8io)JF^8wCJwusa+} zNTvjV_-P;rqcRBaK^-wR;jghkdxik{ZYUCkc%i82Ic%K|kYRX%2p zC~Gndm$-nU7v4tR$s3llpw@uV&|+>GvkV@b(RUVPG!Lv+(b2F_G19B)J9>*Fl?4!h z5auAR1xTik(B+%C;w<@K^qy>{=PNw_2oaTu@X#`qpoh7-7PK-A#^#4EX_2|8 zGww54KwT@f{4JbF9$E!9Cj7?Z$VFQAd4-vZ_cb25>@%Ts1FP_a62Z8I$GsP^c{PMy z44E0>DaU;Mt|=4F@r|(bw@f|8$GXsCuU+bp#82E~$W|5dyKRXwm1-wbBUUe4K|{nb z@uL!EKW-G2giIYU`bGl?Aq)Wov0gF_K|`bsQUIJ&Maw0g>uGYbt)txQtF(bX#^9JO z%^B%|U`P58KZfWv)>H!NIXtk3x^Fuv% zy^FD76nZ~1VuZp|9&nHibT&c5nx?Lv28ghWlbAo|-qjHlMn@_)p%4L|IAbxguJv5g zhI7ihRw1dQXa!#b?!Gv@{>Q-lk4<`tC3dA zhfbj^xv`R@Z6*?Wr1BIW{&ivP!&^h@wx z@D@%IDrx9V9U+)&0f$JJU%MxYwOgdLobAA;dR)_SGQvCE#TXPnP54=b9N{8H*cJQ4 zTo0x@SurUp+Q1Y0o=?xgO|%-ukEt~|LdV>JQyQXuc}2#n{cC(ir5TrMzLUoQc=7Hv z59f9mKY>G@bN-|&*HZ9g?wfw2GRXxLc|GwcNrcQtHWpi>$(&^j|JAVX_P@jR{q(b`j^7o)F9=D^cw3N zh;1_*QRec~xv5P?X(Gkyn!njj@Q94l-xhG3s^j=?j5LwQ=& zP{31a)T)XC)<;x0E;!+%Y#*e%`qLI*`dV+>u7);Dz zY)p4EX${#xRht6)g*0x;r1;IxU9pi9C#5nI8qzSg2*nK5%SqdqEg^!X63n& zNj&22oz!_a@3=Wiw@v!qWCkS=9G;+PvNae5lqxu^H|1w@zMAfHEHK9whJ!`j!Zp!> zw^?MsBhXitT!S3Pjv?74m8w|lq=nK7=LJlq4td;7_`(D{t^#WTT%A$ZzB+XQO_}Zl z%@>GNR6jr^Vca-5FNgc-a=e=^C!|zj5SgZJ3D$m>^s&AUA8JQlVo;G5X#LnO8?6gf z#ulu(+KuIWv7mwLf(B)vXS4WgK9WJD4=6xEvIHw}g zDTM}(C?(wUkTK?L9UiwtiRl^moNM%ia;$y!ZkT^B?H;0F&#m1_k8j;copmx#sKPYG zX6CL(DqmF7G#l34V^ko@ay3^;Cr3CY@UjzFiPFjotiMb8>|K&lw6PJie*41*=y?rF zuO3h?^Ds>lVVI(h{>s8k6rlO`>C^P9U;Qfm<>$Y{+kTP$V(%+xlKndHsHIzL>Gq@d z(xcmJ(B&YlefTg{mN@@_iaN^_-uUe9Qkr{jo#Ug~)O>K9Hg^xxt;Ks(0h+=C&t6v? zRWFVXA{t=@r4dHm?(t6g>g8Vr&$zv`mVWW*XE;G8>F{bZEnp>aV19QOIBzMh_JJxiS;)tW{a>j%f2^UR?32#dJj zv~bN!RG_3Npv>p^p)UR}i=MjKw9YwJ`+ zMCDDUe(>`z(t%r^C*U@{U8}I zfd?g==TyKwVliBTo2YHNpe)WPvDu0U& zwJ>uVFh4yv>YvwR;CUIog3X=-)M#5V3+#;B^RsO|{@Dbb3CeE&ZmMy zIK`vhf+FLOlnMng@C1}R=SOd)sRcK4s+U+#S%&d?oMIiu?dI1phhxCE_~2E<+vJ@2 zBa?UjRU2t71inUyV~$7@UlpJtO{FM)1G98!%7phEPACB}PP_0LW63DYG~#n!=V6rx z$9)64LN)ek*phP_ec39MP1p7TWYND0Nkl=wPxK28Tqlk=JfMu1r4;b<2=CB_ozjl9 z5=xQyxHbkP0u3}36vrezi)U#typrM-53&_}Zpwao+uk*V&CS1=Tl*8&3UaUcz8*-u zxC&t5%*F%?9e=Y-fluR=^~XDURlLtv=m0^&moB^^&ofv77P|a?oSIBl3kv+uMxI6P zJ8s4o<3>>$6E0Pm%w9?IKkSdjDMASmLB64=MF#9L>RlkCEbOh#j|?Dg!u|sV;`38 z)L3&GY_tarlsQyOSfm3L64!<#DnDqkPhc{}%Z(BiXPFS58v`>Pz+W^1fWbSLT_(OSxY5-=Z*QPq&zkdE*^7fXdIs6TVT> zby#C~u?x_cn~d$<$WoWQeK#0;B(rPmiVeTcTNq7%El;vz=R!w;DYQlml*HmqULv;L z_XjEPye}{t;4{`Y{mbWlPWftuuZW)TTii7<5{jG#%RpIk9@vE9AY%FGsWnO;yx6&m zY1PniuJq_*oYA{&`x8zi#|w|w*;YpJj#8lL4Jz3>(;9P(1weE%(j*2eC%@J?9|7Qd zj-Bc<6O9XKV}nih7L(BE>QPnf-5DPCQ)nM$)aa>n)S_BUuf%~`9h?Z{-4~$6={1xo z4k3$8>l~HcoFUJjbRySrdx)aXIGsp_*~L{(^a z7DLPVIKv?@>{AArcKSmyF<9#*6yCS^kc;# zcT1%jaVlSW>LWr(Z-AZ(6+Fvw=}Grmg#O7lzR9sQ?{%amh2>P`yNaQe2EwwZTN$D$ zJ#98Lr~q2I!4v~7BaE>~`PFJ znNZB_O+hFhg{uk+Lh8@&{m*}q()Aeu!mpr-d*X(F0~UA~CBWh6Xapwf!eu;G4FZ=c zJz?DNL@0|aKHCUf(WcCarb2jl!#@>@!W9CaIgdW+TW_lWttxGk%T3Adz13|&DJu=er7wS6$fdZwkF5FoQR`Ww1hvn>i*q0Kj zL)8God0LnO2rXspO^roOn8u-CrM_iq%-Q0l_4?NUu0+~nKaUl7|pC)8)mo3aNC)ZE4jHI8vqBAt+ojNMt0 zi(ipDWe$-&dC-Vq!w6H@GjPQQ2fl{}$e7yX6GZq>xM{IZ!H8hN6W)REnzzIXiMucd)m$b%}iR*E21m4pbM3`(G@I>s-2e5 z;SHFT9teSPQAt}q=fbn@>kTc%?s+7y7G1bcnMZer^8jutR2Htg^K{@2K(47#I)B(J z3;>OcHUT_MlU(W=P_wM-S_1wmlE{r&hx?p)kEQ5oP|mFa#M*~adWpJ)Wp=nJU}SV9 z;LMsLOa|P9hd$Oc-9aOUePrUXF2S{A>qjOlU(m%K8ZP4uCdV}f6Sy^R%{h|RLZg&+ z*zIS?*99Q`$>L4a&)zC<%d?qD`kr%~*Pm<5p9nXA1M_oi+mCn(lh)r@GrFW(XdAwN zlP3pw2xruO@GVX0vK7bDk8|VN@sNyZG(xR;t?m+9D9$f<9dpDkM2|&P%kEKsu?ilG zSjo~v=lFy+(8jO^;_wbCaG=+-4h-e6)__X(AfwI)#xNJaW74^OtC`0^6bg5q7Z$ZD zaUrL6?aSY^umH>#X&WKMpcJ>_KMvO5S(EjFoF2hBeMletM`E|nLGxODz@cD}ewYY; zz#2e?2haAoB;EM?W&=!-cOyxK{%YW(MY4U}98t7WHS{rFG1tdaF=4T*7442i(6LVuh z0k}q!y(k&w`l+_+%mpW}K^b$78-;It@s7D5hwQLSr~tT_a8QI6Pe~v0Z|30FLlmnv zCB8~-@qLqhYst<+`Tb1Ri)Hp@`Rp0#}a@qeEiV|=`M8Kpe&dNKe#^52oJoa{Eu}* zuaHk&a(hk*E4b-Q-6C(IVdwYOHt83fNVl<(79T!LrP~jXEm)@{=$F`~@!8+}SsdlO zORZn4TCC2_r?+p(oh*~rgqLK0cZ>S_`ypdLq0H3M;`>;z%sXL>nUg{K?ZN*5MsfqO z2%Bv9C^hkXG1le9HLQrk^xca$>Fb@#bZg-U$V~VXn+z5>Xt)eLUJ-&g!K+kVT;zJ4 z?`)!=a>6v$Sffm?SLGNF)O0#P2HjeJ%u&~ySSnlY6$cLh-)f`TA}AIX=GY)$ER975 zGb?DK!>}~XyK3_?R|qwB1Zpm;<%PD25M*&H5LE>7jwcu-4e4$b(Yk6}^)>Ft;RB&u z*_=w3ZUHy~|3!S0cVyZ!3fuLkfLUx_g_B|&J`xG(-`%#pA374x-w0}%nu6bVRU_Rc zm0c1%%h+_mn)II&oMBAxZ;4b!Br$vH%=h61o{z8rV>hwgm?@wJvomSVwzUW%;S>Ll zaVC+dXw_w)n?x98%(~PZSU%Dyk{|BL}GO4ol#%G+2#(Cb}#cOC2|zFdH5Wix&De=g9Tt9H~V5xATYn z846Iq$9>nTu<2nCqpn-W7c)wu-v88ax-$a*& zgi5tP%q?Svv@;Dksn?&t8G})|)4l2mHqx(U&^+FCY$0&w*Q9V2{sDPZy_gG>JFZu7 z#h~|LC`lal4)h2IKM^XUtq7&5bg58^JK!~h5>pLr2#t!hU|EA#z8X0_%zFt>1>_7& z2p(wtoEm!|6-B^Zq1iQQn9q50fm#;MypD4U-AgC6#&Wn`g~xFXIW>tWeDHgDz1Mn_ zXc|hRz}7MsgEr(T&i4r1cj(u)Ef=P8pA{s=N|!3h<|;`C5n7;44JyZJ8!D%MtN>6l zgu*09uOo1!qNdXAp&^!G%3&PnN!K`v{{@Cegc|8vB%u!hBStB7A@8yOSg*7n?}Ask zf#J_An=b#DUc&kL6J3#*F?!-;vp>MDYyWSQT7a68iwsd>mB-U&Ek%BhcV-2IJ5<> zS$pz~C`-kf&gG7z3C&<7pUoPAleVEJFMwy~&V+tL4-pQbUuCy)po~kqL_S!ZsvhFX zT%`OP*!XU^#kL)rH4R(1t81*^I{hnKokxFkI%t%RokKc~JV_=eRzL6ZIFrKMK(HE_ zkafVIQbaf4z_CF~z}4da5fFi4vVYgnOMv|>k~Cf?`ZUO&{6i`Q+sS8nB&^Q z%o=$W(|8}Zd7nC&oVqon`1|zC{m4_Y82gAE(`k$@I!{H0RvvHw3$!;emeVVvXswX= zy^Nvmj@>3DX1XX{>=GrXky%T%F79XOrY(I#>II7JH%go&Mjp)+IZZPt;bUC+P4d8| z$?YOTjCFDXe;~gLd2Z-Yw{Nldiq_0AM6t%{af*^_?R4qA=v+}Z6G~582VYU*2Y0Tt zjjMPbS8$av^hiFhoML!fuB4+j`9TzfZ*ho1hq6JvLpDL6{LkyYho*Fp>>0@%oRTJ2uHM<`xE8hvYOuC z_=xgLhv{HInW=7>P|h;z8Alq*{t+^+I9?_%j>;RrvBZ(uv*aI58W_QBJh^)tycXEp zuzj39|BjrcHd|;0B$lP&4Dk(iIw|~EoOL`tJuH%&9lDZ+)|Sjn0U5a|EMOV3iodnB zJaSKKQKHUeGGrcF@Q^i30J?_weBWp#@A9Ldl~L*$G5%JI`ew2S7NX1qWXr@ws@{qa zoY093Sp}-bGsd}HI5IcYQJIs9O9*quqHqcef!~HB>@RyDd0tD91*poI6&m1SjCu-@ zUp5t=A`Du^SOKqh&G3doP^CvsrjkC8S@Mj4$ZT&s&jFi431${>*(}e*k&S#Gl6>e# zQdWT2gzw8lea^AQefm<_QV@I1d%`1#9_QK=$NU|NCl|Alit?kNV059t(yskFj|$;v zpZi>nu|!T6_hr*|8{TJnkP&BtfN`nOg2x`o{WTa|%$2kN?6&6-%L)~7s(S3ZUe^m) zQ5eu(c*u#+*_76m0>YY?-VFtj_QJynV38BWW8o!-PH?td&o4bgT#hS`5ar*##SQ(3 zQOL8Qga!_wD`}VhrE`U^3Wj(J<|7XHW?3OQdk9wIv@{swbq)BR@5je^rwy8l@#S&l zd5bv^Gtn>Y^Q`T}=}U|$j3OG74t-X|Iv6?xrGg*1D%`8_x6D@0`OQZIJa8m#P&#Cc zwypQuya`LVc;>xIoh7xh@sQsZ-zxsnspE?zTj1c7leVlvCt?*!WL?B@NbO%`& z*`EiS$SZwE7^hK~cp>_g4=Qk@1Q>WSLgQST2SjH9kHgV$2g$&!L7emF05H60DrT{+ zfyL9aR9rkss-yCcC&&ot?(vsc^A7hxpAPdt1g z#_RZ@@iE7Ux~V}AWmE4GLNwMd<2QD$hff#`18byINZxhaF$bB^XIzVa;D~4j4O;|0 zXqYibCmJRohVRm3UaS7;ZE<6%G>-_jT~GZwW4uG=Ra(~Jq7aQ)r0+au;ugF~+uYE> zAU2gz_7~6ccdS_-fwm*5mbD}ZQMb}}QAmw!V9CWAbv`_LS>9_q!52VCUjqRC8e?~D z@TzOJgLg3%@dgjUUC28^+pKG&C@N(d(X5z&(cIRlNK>py?>Z+@9l&IMS)*)a4J zw+qVj;I+TF+Jk<&Y4hFNv_HF_K4?5Z0VWBbnw;t^o;0Vo`3z-uj)M+#+Vt72+F`T6 z_StsYMENbi4@}@69^!62ojDf)s`8||a`WJuepQiQJGIPWZY<5pSf89y@@?{*n z7rSp$>zq7r_=ZG3S0$&2DiWjyrR4|rZ~#%QAoyuYyZabWI#l3g#1ByV;R*1e4x1BR zy?#k<4?B*TkHw{B@@G!d&hBnH-r8cm(0Q1*^)zWe+(vOPb72@%|6M{RtZ&Y8c3`*f{X&#b2Z%`Jf-(`xyos>m0lc zj`z}}+eVmdrnkrNFCtDxYx>CPHV3Et=qIb_dyAYj#-;>}%Y%chbbt}%CXb)o`HZL- z{(WeCX6g|+XU`OP!KW-+Rp%7F;o!Tpc{!Ih7JrcHtGCkB`IoL$ELp-AXQK-&8mt)* zs?gALrcj7=$~BR0G99$ZU<@m&1OX)0ew{G7iTpCO9@bbW8o^|;H%LYyPw^vTh!8ad zspnP#vJ;gU!zi9s;z!16-+GfY2sA`Oxux%TLIsF>QGSAV{n$nP(3y-~VIhO_E{}3g zg$bBsf{aln8J=0kDIB!y-*#-CjkvIV!?a$fiTH$fh;N?mB8JRqszYQWq@?4N-G$(C zGXVVrH}_3BjE(>Q;PbLx3>jlYI)wRpT;2;Mg!V#V4J3jWo1xiIR%Ogww+%!#*HLi5 zag6=?J%$kiS7k>gE-tJOtGC%Owa@xd>2r`iuLl;tNnE%6P_k$*cCFbDABCMpkqlV{ zJQ9k9k#>M8aHk?8?i3Kxi_Z$X^U5!tVy>A}Jw-Aue;k*Jmf>7ISt_rr0y?yxjKd5Q zGwM9w%)f0i7vflb-U8}T(#QeD7?~Nr!+b(0@w`+PhJ($)et7B_ zC+bxI&J#SF4XJD(sUC1unCy>tWDb6J+`?uzH@SB9?ce^kMy_}W;mjQRPM!1-%i%Oh z7xBrP!u=7A0uOpiE%YCTE4bBg7cU;G=rqKxPLu&XavBZc6{RI<(p4c$SG@@(8qR`w zba`KV#@wKY_*79t4Hz{rGz3hQF!^1Woo~IVeY~N1tUXvmBTEHDIHWH+=AL~*P5j+P zdGI5RHkynIIQ8CiVD$5ZLQnC&Uf>isKS!BWDP!Ihh=HSdln;_&2Y94SlD71eN3n`m z4U_OZ1N8*s+S8*nJ|s5B(Z%5Rm{Xkt;{3!%zCF;e1J4V!m_vR#Zs~2b><@O}I z08->Y5FV3kox$LVL5%sfeOUEC!|@W2zRr{pp5k+qA2bglC)Dm+Um;PKqUOMM7 zcq*YaaOrxj(4KS<>zL~$x;g|s)X-nW-bIzCdqz&;) zQ~cKR4pX*dmNf)cXiJ{rhL(D?HRIx=SKd^f z@pxwOLJ~D-aHxRFztIt8k$vdFmF_%2O=C9nt?|p?lqW3|Sov^(=9@nw3oxEB2T8*2 zM$v4cUch|v6bijHHQUh}I=iQc65Qka8x(!Y^mI;Akbj)y z7Ywn>)w0)t0@=S!Ih=an#N-6KNJlVWs4j3wnVw^k&aX%#vGhu(rObdm2Y`U1ab)=p zn<;luGLLXV7%gevG$~QBV)7 zJAT(uZ0CojMuN^VH$96N+LX!` zsXOFyQ;p^E%*W-oP>P!rynl8+Sibu7>#~kAJ4DLQ_h#NDwF(&p|C+pgpCh-o@l?a} zEF}f4tYaiup3CVZR>I_<%#ymfey~vf)ze>;fA~9pT#neCy4OqE3Qv&fQ&u@1+4|3J z4pqi!IQI5amT7=;S?o-{1+C9>$i^Ui>4XCK?HJvwZx+M5EnDRpk}q5U#{p?LmwglE z@x6!05)6zrjv{{Xob(<}WuvC(hrjipd`I~NPxIuzvH6GPAYwKOZW8{ulq6 zI;_kaczN>h5%b$szFD9z4EQZ;p)zNCXKUr@ezWwm^Ssd@N-Ez+-<7YrTm8xWTxqd6 z;Kl1j==Q4oJ0JXhdGO#qM+!r?yRXYjQqgt}nq_`$zRYoW!+P`c^3}@!%1&tN%fbh4 z-J)C*b8MNR{j+DR6r;#QxZtll?6Mxf_&jWV32%kpg2Po5ddqNqv&W{GH$*!6rpww1 zSq8}T!NEs3NEXXXV;p&Tg7bll0NPqO`Kz+K(_Y@0dapcr#LoEt^uPSi{`BzRq^vA& zB6JOlV8OtgTjT`RV%f@Jy%H!VAY37CULkbO`k*$KVH$*?p85I=!cu1Hdgdd;{x?5S z&{$AO80VhCQ}(VmNX6HhaDJzWD0U)Qg(=U&wBqSa!wSpF)D_?fvD=?7@`-291>~>% zLx%Fa-U=;E!)-EjVCARnDClLl5il4xQ3kl@HC;m7NxL$9UdV3_%>}9|tCy-4n?_@! zegEWHyJuX&6$Z$=yyYYL#1naa%?m1k-V5`$l{_d6yu*(u$hpsR*Uv}K;kTOGc`X>Y zr>W$a^`h5C{ZR#7?aT2a446|HaUuyQ05a%!dL1C2n|Bn*(m3mo6eq!Ifi8*SxQHGrK#1PrN%FV$k=TH)n#Lc$XW@ zm+5CvQ|;>-_Bv-iTNU%*#6HrVa{=xYo_rLlDv7o$Jy)2SQ$Ai-v2bqv@e_N!x@Ut++Ozw zUIy?a4&WhbVDQVZofmg5deo`3my{|y;1inXXTs{vko3X`rJn|ZQNE!Q!S$Hjas++* z;c8i1{8icBT*ZS%$qHaIx7<%Z$6a#zRTaxhUnnK4ank{;L+Go{vmWIG1dikL zJ=ccUle)lnRui~DnE`jCCKC-{9Q0`s>@kkFG&fsZBsHg>^K;;TiIiRQnW~5ZH}fmq z=s{O$6Y$KbaKhmAz?*WSLEj0p8yFuc!|a0WADr|p#^J>Bjd`p`?V=0=uik%49u1Rf zB0al)U8{Xuvs97FjE;~V6rKh}(uapesGB%a8tlyM^>j#R*ZvjAXZ&lS1bGmGSPMTF zAM$mS7$Z|r@Zd2dT_M{wfF8#Q^_?zdF^Ut%Dk2+p(z(1WuMx;GZMgufyKpC73 zTr2J(%je}Ej+^Vs5_kVe|I%Uoh?~0JeC39Q9kYgxDKmQf>uFoy8rgTHp(W3xP1m>z zo!)Z=Y>W)ns;RTWsAI!^vlAR6sf{`66fWQ<_aJi18ndQ3o>B=T2>f}r&Hz8jeZ_Md~%eKI;Kjj}#uz&Sf7VGfJKX$wKm(T;$nsG<+euv}s(pY*d~btegNYt^Xvv*v zyud^XE-?hQIPK}gsEcWm0MWp6-N*QD58w?(yEodbPGkw3L-Na|q3Nl+Ws3bDo?zI) z4&}{uO4{Htm-ltFua>vXZ*r2?XxDALyv#Xc z+rl__v$sM!L_xaXP1gHFXWQO@UQR4pe^@3dO*H$6oyaJZI?ygIs14aM3w`vK<>e)j zR&Hasedoagq7nV&<=RsDi(mY@Y@(63Q5SW9Q_WUZI3D-SG7)8JpECFT(m0C$$k;ST z0N-Y;tg*o<(#0s9L&>Fn6wl5}irDwv0-r=1*kADwgW&-*a>_Bczxw3M^0zNvl`r2O z;Gn=!NeY(7czeQZhl;7jR2#gVI^w3K9v^c}tTCsdp`ry&0s>6-^9`<&i& zoQRV~?gcQLVk?i-ao$Gy?EMcuDj&S}QJJE`1=xcQcZr5*EJ44}-cUD5W;kZr+V?pr z?JYHLdA}dss&}6fS@0wdkMKS9QTKK9KpSVkvawFP=ma_ zvonrjqJrW zJ;T6;+`}Ju?$p|mKFgpKw1((q8ew#N_tX>>4K6~2^reEO;Ei|$CZ4q)#iQ-mZiEKR zMgd>*9re;E=xVBjn`q*3hS|uJR32;~3MK8@rWXv^SEh)tf{80{qU`dTr7>jq;=p>8 zGC^O{n;Pa%n|$S?VtuWYBbdSnZM!}kVR8wBQ@}+D4{SoN(p&{#Qiyoog87$(NhMO1 z8VTVN0c+U_6<{~Kh=WMKFj~R4EymB>Ic9~ezP`GjpenmD&~%?ia65lhP*y|5aTsQ$ z4HzNa2EYEUGI7C1O?6pErZ5!_nY$>}>P|gAVHK6&leE)}T|C;pbEV>?2h4Yr%zE{_ z?)@ky{4_t<2QoGEr?9@#%PqZ{F2Q@LZ_3y}TjpMc@5T@cY`k34y%@yxGvZR>PG3HB z{jq=hfH*QZg>Am%KCtktW6px5Z#oJsh<^pT&#MgN$Wr>v+bTUi%P(M_kY=5#;7)HA zzl#SI!_bVvP^6$J2&anRD4rQh&zzztw_~(3<-lpG3Trch+dVrPsNf%^#qM0Au3d|L z@8NYvkA_Bp5m;jc(T{>ym}6`*htix89F^55gp5HN)r-7M+DX>j8$f6nM7g64 z$0OX1wXRQS4!X|14St>;EtkeFjPqa!ruG@-Hpqiy6J*lu7>j=f1`!T;hNM{qwdEls z>3AZ=Eo=E4B^1?)vC%fLokI@;2xrU1pjLpf%7au8oOf0SaPb^`N6n(kQYF%rxWbvC z!$Eox7rxVAO}EpY%CiC^Z>Je(xy48SEVji>BdE0TT6ke)ixP}4FvN3x1 zV?1UKz%Vp;jv^?3YQaloNxx98@JyB~wtD7w+dsRAJ+O zoEZ#6nU=cByc=hbxToE^)}a?=4N86|dAH$Lv?UekAx{*N?;Z>wEu1?y;LY?eJ?AH^ zi3^I^r$6p_3~HzNcfsV*r0pk}|P}nutB@H)znG(JMbX_nd1G zz)Xn}XrJ$<$an&jZF|q?SN2Ds{4kNU$cqnl$S?38Z6iUM|rGG_35( zLkdjK5l(B^G9#hWE;iF}4icVy)=N8wSpfScrGHMEE961L19^FhjhF*{V;tf#Nj*eP zRibeJX8Q}2b4ucX%U1`BWvq)$lJHieZUX~{Wep{|gJSE0=rbCO`&%f}>qZ!f(oK+R z*vs+d5XjL2n>zO}I^i#agdiT?;wW0)@z8}1a@^q-8Jqgc6?y7zzV9Nf#Hfq&3eePA z=D1%JS&u(%pn&(|9d2E2;JN*q;O|by$6x^d-Tq#=N6DT>2iO|+s51EHIvgu96R>@`;z<4~EI#5kdzXzwNKp7p!-cDby7^Smr}E=JxP zE$ts;GcN|*7KZ1qUw&Tx8O8IriEzx^dWa&sQMQQ~bi2=mdYrolEj;NQ3@}b@Q~2!S z!Xl|GZ%HR&V>4cDi!vA$ovdGQ%#e;7CW zBQ&n($EHw5p)Y8SEGO1KYaN>ILBDZh``r}g---M(68!Yp*W}l)lsz_UtB3mB zZ@>Q#xrQPRU5w7(Dx1%qm*vHm7&vTjU@mXnq1+LiaEUsg2s-A(a$Q7=kP#gevM?h0 z-n;LYwWYV^*Pnh--uce^<$FK;A$=li@Y?SZu{mlZH4IWvuj*&r>|wP0oK&Tc9z7`o z-M7jxM%_tSC>I7-F*MD(xikJQ{DZo!!aPZ^{8B zrRHY)h*0UIKViPw%1>wi?Xq$BvTW@($^=o1Vcv#sf35ypT-aiFV@sextbR0g`bi7g|n1KlX2(J-wbZSwHr{gKqkBp{W?eLjwa* zILMCtYs!K@qex=iCYq3Tz0Xs0;rQiJVd}JXvmsStruEvMOdKSsAjl-Rm3LIgon3`{ zFa}fgr=Bk#5pUsu{x4khMSk<1j8Ude-~5Sp1|pGH3!w~~>-Hwx4 z?|a8as-kEOSYZwR97pcaMOv)wL^)-F#d}42{$wh%XnmqW$4|y0+^L=rymIpzHyX>r zqHq@uX0*}^bE;7kTDTskcy&I6CG*d9y?@mZsjw=L-G`#WQWfe-{WdX7TFfZ*&EkDT zsnKiZuvBWgP%_6jzR*5QP1)Qc)sOX~f^bHAR%7xgUlly-e>RQeanrt`uC(LaY1}xc z8UZT)3OB#^8~SJMdz_-4)$COOSJHsWQATCwL7G^1t~D}zM*79;fkM-dH$qR$B~0vK zcOkXnG#XXLRZ=xLMk(CfGeUuG@Oq55>y%U%c~BG&6%t_fxt!X?^_IpkeOH4ZLkT}| zzYjN(uA>v-CK@12&ZkP0V-3whE0U~sf^^VNE~xxP&uWzK@T2Q}2sGf>yq+`~<|4;= zgGo2}8SO9yY5?cPAUNzN2iHQ=7GkhAr-Da=Pj71n=?xkYDxNBmZrpSY7@8I~(`)qX z3NJ*H{_I=lgS2lX)qNMe;I)Gdmg*nUl-I4@tg^~*!{<-g_obIsKC41wdPldXJn@WJ z6Nd0taI0rpPqK5DHO@K_$IjC!ng`R3PV) zc@dBbKguikZUaYcp`l8fF_u*1;GY^+;?DGe9K+3;N(CVB4ta#8b%Nb&v=`T=t7t3= zQ!g;|QR~n-);r&ilB`UK;_uiQ_lq|@N|U>usJ9h5ZG2x^gn+XeE^QAm5qd!HGg`1`=EGG+eu zF&Pec{;x=NIc0B=d$6U+6H;@I_rVv3K3H|gN_0nSY}Te&{wS63J1|CeiQKGjlEQNY zEi<U;sjVWp{?q2#oJHmNH{pMbZ_@8dC z;qhNB8*jfU8!NBMA#Ka=ZjB9;uYdD3MID}(l~=3f2)@0w__}POfbT3Ux{)PD1B6hW zGqitVr5@_mTVqy(ZkY5Jvn>vB5S@^AV;cG@m4nVmUAtl)`!LMiA#zNmp&cCB{(G~s zTwc(RCwLjD`tgr{R3?dX8O1q7ek`o9NtU(a-kb|~)efmoPhY(*ZxhskFv)# zYKF4kth)~ygXN=14WdbxI1W3Aq8v1rl%r&4p|B*i0>ro|U5_^knJ0&7xr7<2 zF~I%0Sj2wSvk1g_*5_4HWr}SGwHwPH3gdm(yo?$Fj6jk3==H66_J#o!HYz%mDaz1g z{yBAscX|m)`THr#fg;TC=*=gG2k^`(1SJu>`4~EemoX58h7Kx#uHyo>>Z#Wg%;eIq z8?xF^YTSHmTf&$+pDILH>?}z_DGWdy(9TV6b(AIU!-7FP=WAm$iAzJG!m3{*!kT*? zg&Z*G@8%;Pbkl3_46GgwARBCP&1g#Sm(l92t-=7%7?PM5pZ2W~#Eaq|#-m`U_dM^L zO@A=)UUrFIf^Yj#a5)b*-}Ur3kt($oeg-RP|GJBBp9?>yX)&PCRoG?xz;H8ndnoERNp>e(gfV%Tk#h5@6LY z={J4SIX&mQC{aOCVbc4U^$EH)1ia>W7((V;aN%fC+HFTe$hGH@TZ|toif3%DHgLvURbxJZf1ea_rsY!n%3i zQlGXh{CZT<2fWB@D$Z^qy+rX4PK`w8#iv7)C^FqWY<7fB@_S&F7jZHbJT(=a5a@d1 zebs1A3r}SfacErS(Py1Aqb$;wjoY^o1dF$y?QD<&L2feDIdnpdQo`?qy$Mlb<(sYv zalkx;N9i!(5Q1!ymp8CL<;5zu&ZP!nq9U~AN7>^)jcLc=$Nm#-0zZ~4j3bJ^gj@Xh z5qEw!lHnz3StCXTnZ<#kilL=(1tV?(`{@ykLOKB!BP&=LQ4Ce^FUZI3!^<2a4@g`y zrk-VO1E=3jhw+q1=}mf3>8FwKIN>@ykq&S`q2Ok@uohj@nOkrgC7K(q0VoMW9T~zi ziJ&?jo^?FppqG6fzL$Mc@R!%*54gd_!hrA=2W_}Q+~;P zu3P%khzV8dK{34nP;LxRy-*pmi#K(rg))m0Cr=p|ga$~>xFWi?LCMKB z=46**0^=Y40dqn6Py2NFh{Eih%b%6Eo3G)MMBy6q_cP98a(M37#dcqtE)IBh+F{UkQ;2r2T&eM zdAq)xF&?r;JeiS+VQ&?CIe>eFG@~V#mP*eIyti$nZYtgf)~1Fo zF+UBeCTUL4!7%W(IF#kyeJaXKvU!;`aLo8UP4NKF^aAo?=zOS*OiYs+vssST@nB;- z9PcfZFQ094Ai!|Bd+T<&bN?Y}jjT;FM=n?|3rh>-+3S_kf+l|OgWoM5QqAU<=peuT zd)5(70uAmIKVb7}Pd{ZoS!#PLn;77fy2tS`Kvd+#i*L$X_6JN%Oo1EH4cT`y3j96Y zljY$(QjS=|4N;@$>{9#F|;&@+2{;8I6!mbpKIlOwMtZ@h@ybFJ?lF1(@+gC5kJ0HAX9zT3HWv-dT%;RY1SeZp3KRa75Up@N^A{|sgn&>X~ z*hlj5hwo#A9+qFe{xYd%zdichGR7%=ySpgpy_}qOu~ePac<_|MGvp(D=NJ&ks;ec8NVRJ7uc-PMIae^O(rGTCQn)Ai9L6eL@0dUP!VdgSY`}Ci%C6$Q2;$cG=L~Y7_BMwFoc*jLgKTa-9Gd6?| zg9&p3M>12N)nJf0dsHo#5PsEoax+Q;Pl(D#(o|?rCa4f|pn5V9d-MYX6FmMXH-y^- zY7Oz56K7=hZJqz*)BomwDy8NE!03IK>%ytBqf#zM)o4<{){i4o8KV6tD3zKN(4n&-n+(!^l2NS%dU-T z8Rfb$UWLdpinff$-e@<5utdwp>lLrygL?`denjC&xJzf2=(AP9M$5~f28g(GNI12bn8FpfHhZk7dK^vAN!^U?;kEy7t605`%; z^YK`L7X`lFQpY9URkDPaU-P+9*d!#!FKug_IyO^g#HSUHf*Y=#-~}<4RV6lf<{hih z;8IC@6H}DGql%E;*v+>~Wp{O(5IK04#^FuUxQ&HQ=u2Fi+9-@ogqz2m8l|{G@zj8G z{X1d3JnN=q!{zcU6=>6>H2f3YVmx9=FL{l20y}@=6oFTH8cyYjtS{O(AKy0Q57}=a z?&wFu#7);KfH>Hqup2GtXsPc7Axju0z%r z?I-HV*i}HfIQO2eP@o(aDFm)J=+p_cf{Xu>vV=jH@$sArw|&F#5jyfEhzHotQK&*l z{4bAIfp%UmRoJ;7k3Op^RohgcH>j_TC)6XP8z@nu{8S)&#PAuOHgV=U=|*8!d1{L% zxvT8qfx9wK6}gk+mchArsMNuzulr>+f?Ec6%{zF2wFxpXqQlE*R~S`Pg}Zua@h0<} z5n%`=&pAFz$$A~|L(5&JF2KuOgGM!aP=xn6jp>FK+fl0HaG@FN_iNC%^Nf5i^9W7J zL+Thw5rCdG020NfO^@5QEH*%dCrY{D8x%|MyN00P_=)l37hqPAuCmqNp`&0$+H{Og zYo$%`a)HA`_{D{^tO4ACg5;XWLZ)w}MVDzv%zM(+G@!tRu*bLoH~q|)Ufy0dDW`fI z{TOLBa^ZZ6TUJo|HhnHer1+#iBUxErv>%=ZeC{WZp9W{nEAQNt;SFD5uGn+$yAh8O%N8^KGZbaQ zBS1?P84VBxC-z&@AON9#Yq%}E*v#i^R z(nq9Ym(2 z5+J8i-J&e;5b1|f=ovU@gJ-5zJv4P^{B{|K-&1L{y!pRB#mkRDf`Yt;VtKN(NpAis zrBg@ByH7ZEYi_bkk93x?X-?D{<@7AuWUTAZ{{?jQD`oA~v$_WjBD_L=b(7M5%rVkkR8Tr*!?SRBf?|{O zocA96VR^57uRMGHXXT%N@-uLvC!dqN21d$w-w4%uHp|PmugdDuM!C=Nz&%up+P=cW zf3{Ut_Wqo?KPmS+@37%{3tjyHdL1Zt@6JJsr)*kYFTJC8%8!2gAD6Sg{U6Ko*I$?6 z`BqtJKEoJbUq$ajjvoFyHY9ugh><~z*zO1A9ucrNE1c&*l{nIY{*yn2?GSP~DQg^o zreL>3s49})O?in*lA$L(-eJg)D3>G9WfEHFPP&sK1C#-|`BtW-;IIvufQqm}+UI0k zDlsWX#yu5LnTGB7NqaC7_-*dlrougyLio<(%JVQbzuV@`yL`>}{LJ;*jv;Sh(L1kF zr6S{mdr>BOBUob;07G&)fk%0&g03K9AW;&HJjA$=At{7_&D-X!N_^tZ_Wke(W8gXa z=Q*!LK|%u)c7N9ssX#Zx2{3>J{$a2px+;gVY@V|r0E+VGoBZnY!jOLG(=kS<=VkG! z!K5&Np(-f&-G6D9mweCh-25&cv8DJ_y!g#N%vX(IwS!!`#<@%ne5x#H07XH%p3|!2 z0#n9>IOD#s*r(4`T3{iIXRe8};M+cUC_M?I?+f+ya~Vq>08s8R@{7f z{7C2i9{BQ(cxUXuL&v;j8W#eM_qgMw%$GtY1})F|)AywVm0N{(1TNnldvM1y(teCK z+RP|?g>Sy2z^rZ5@oP9ZPT#dJl?o$e;w&`EbzhD*j`?d4NzvPH%FGc`ZXc$n4bX=n zIl_^Vt|Jd++1&BqCh=GmT~j4$s-7@aBS0^K<2QO1lmt&+Lx>x(fd=dPuJ7nYlm=zA zho*o;In=PtGbrSIoW!uQ7`1P&KvERU>!b^m>o~@_d51$*s zyJTd*l&V35X$%Z_XCHeWdI)Rx4Um6|QtA=H8ZXu0gtpXn_#0;dlPv$s7->K0Nx(0! zmjB4#ocnBOMOpQ`V~xQJ%zigAp|Y28xE^@#5+^}YMi^^8(17V-M%Gkp?bGuk&e+g* zh+sTI=|3h7fJuz;GRlTTHxlhMSGK=ku-=@aH;n-s_n+q<5It%^zt-Cy#6SCe6r4dV zN(Q_dK_5w7wc9>VzLH%10M z@Xs}5I!+>K;XgFQM`J=|-hFu*R?>*=o5$RX;WA&k7+OBZ z;c&q^xkA}%wkQ{hN8OFAT{v9kZ=-AxNzeckuWmp;J7NPUym*Aot!>ocKX33XQO+h# zIg)|cu*vW6g?%bT?Xyv}pK*0@XxIq`+ypt|9c}Ok_7#~fvX3#{z>w%iS#FYwv)kOE z>dtD`5>p#`9-kO0w@{#Wjm~jQa35nH<$$PR=J?)&*)lfMNBPUEvfp!3mWVv8aqvwgYUjm=GfEFq{5T#JKNa6FxkKu*j(SnkfRU>?_TrY2yR?GVK1{h*75P-GKGA(h2df)uX9q1PT>i}_EKu~H(z~GUXmWP zw!$WCV0-t@gYsbh4l;o)+(RDU%YG9o3Z23)c9F62=K*l{ldt(TX}Ps_rK zt!%!&J8~O7%-)3M*YK{c^5X5MM7kE(3rCd^A~qgB82JwCj-!^3-jp@U zls4f*J?x1&A@!{nc)Hnqa7oyzb8vW=`mk04BK-!Y(na{M!CB`r;4riZX&JK0GU*l& zWR$7M3O(3{HDMgrijbRA-3(!BMMB*$l$si$;uW4-e-HC^SASK+Wfbw#6^5`07`Fv0c{&owxRcUx3ACULFC2LU@UP!OE@Q=udCRPnM8l`Q*c zVUcR=_>8|t!)N@kXr5EM;yO(e6b5#2LQ6a*Q;=~BhkU%w3-H*#FZp5YF<5v(;iRln zmedc>@R%Mdg|2jEM`8MzZy6!V25OAY*iI^k(6{i*B;yg|y&D|bt_G7HIzb3M@_U|Oqh4{6ugxfq4XmTzE$pNUp$IlBV@wh6{eAw>pkDe z5b47|!XZ2=s6NAO)<&J@+@?RrQH3Q9`JQ9sx|@~wnm#=3RHG?t1{xA)zK7{Wo6w|6 zEa_A#QYt^S(g_MS@ARWsddlGmjB(dfD>Yh!BFA^Fd`Q!L*RO3K6>edvbLgfujXyo& zSs(N*MYT|LJ85^F`pGICDm`wp6n>{6>)B7R zPs-BxFhZE}cOxTK8KDNX<05DA*T4LvY%DEiW3HZ2`(fI_JKtfv8IwfI->eys$W@+` zC!jo_*hy=b2oP2b6nsBGJx z%O-tigkni!5+??L$}(JsZq)8mIa6Wdeiv)&oD>tvi}0*~Fd8QxIJ;B=ATsxWXwZ1W zHl3S98sKdz>aNF|HHZ2e`nMK*^16B7a*Wcq5i=VQ7j5ul%o^H)w#1Tigh2iF07mDD ziC4+-ng^Y^S+fh}+PxE|6P&TJ@r75Q-FyXDKx6Yhs_&7J7 zy_2=bBLX9Sl%r{r`{4!PCyo$smY>kB&#BOu3MbFjF%gFu-`$+5M_jrwf^FWqhk$zC zh4NP#R-RsH@F_1O1^P07Tm{#2fQ%w|h?kqYr9BPFc=n|u@EiDUu7_?}1GM8Xq&V^E zc>Dtu)-ig8_oW-xi~nJo(5_KI@CQ8}9?7dv(0b*I$Pd>7xRW=#SHM!pu%T28Iz>T# z#74;u=D?L>MH|a}wo}5h6J@^zT|=Ji6QD)B+bK3;B$}}dpr{EU-~_?}(n)&2*BSGo z*WQSh^l{!sN=G;K;qk_HaH8=5Ja?E9mcDgP`CQ8!_Yy&vour!1pbA!dS*3=%Os9=# zMjr}(8~XvW=Y)MwgAJ;#k^4S7c8@lj<@4A7qAZOZl|PvOoiaW6Vfo)b`ltVWDK!`1 z!4#1Y_7@ye31^MUI4k77FYbMTzPyKgVBG?{X`kJ!J$cjd{#kkc>}BcNU&Gr!TzaTM zKS)VdR!v!r5K^Ot@NOeBTxXN$W*VwDYyxqM&m-MJ!=1kh3pi)vPe|q)bc9IKp_X!48*K6!_J3|E&wEz{)Ww0BRG+4p}R z&HHak8{QEYmEoDK4P5k%F%~|H$#4clMJxtVVUbcI>B9#_oSMTr5fZv@3VI#0O|zLpLUSjtEA|TEkRsWq)!N7(=9r98*myt`j7q zZZ6F$H5)t3@r4kI;S8+|dDM-Dtp+0p)*`vMc0p-Sm^7`{8xAIZ_@@L?WA8vCYw=9V zzX(LoCq%qQ-)gAsDO*Bagxb6u@0@lmxI zaTWTsZTj*Vcwpj@EKu+R7fxGfd(yj%jYYo&6F9$1b90NtQX$0$7XEByWg==^o<7!q&? zuw`k4kAY_`NhMk`GCp|==;IyffFHmt_16+H*ZARvaQn;=Wh5$3VX^R$2idc85c=>U zWSZ|hU*}LdbbW|3tbuF6OU8)~lLDx0??L_RsN06d8jKbaHy23!28nj?rpi^WYz>{*T4jVnxb zLcl&F|L=y*+Hf7asidom6U0WWmq-bs;l467JB1)M;o9|zHr#D#m9BDv53SCH9uCwm z=;D~u%+5#_*1|9p1t^R~jky9SecCzL%sRPJljaf_MyXyj_px`XXAAk2iSy{LV?8?VOt*}dbCrB9aOC=yXBe`w-78D?Aj^65K{R#3>cD5-&vp^Z1 z=0<5pksGJ|eahUlQF~^9Jep|~$U9R5xbP_#^yX!G{q*n35cMCsN!V_~TDnAA9puPb zaqBowz{4;eig9+zS|?Jkh3|;qB}4#jiN(_VTOJ_(zlFhhq;p z2;hUVKfPa;7B=EeZV?Wc<|M#&^rIz`$))E8Yq7U$IJ;X%DD?mC;6-_Juu&FBwm(B2 zti0HjEI>GLk^j@$Wz*xnp^K9;F-C}mHuhMXtGMQyEj%@?x2zdU%~|oq9jmzB@es5F z?+^;4>qT$SJ{t-~Z?OyacfLyzC&C6yO&`{f-adKmH{X0y`Zr&ts>C>WdiUPL^8SY( zmK_c_c>eV|MfBxhglgesTUa+=FRt<$d#v?H^KUZ$g&y%Z#zR0|EvtG4&ttl;cU>Q-65`ig@j zz~$%`N7?t3-~ITH%7fX*uCtpO{;{<2l5$4my^<$s zX{_PFx$^OypTKL6%E07z%EPPG@-Kh!m*w-XJ_m@d^4`NoWqj;W`J?yy%3I3o?QtaW z@!>N^_{cx*b2xmYi7n6ef7 zxO+^7>|^pf24PNDLuFxc1=Eirs4x}9hKcZTuY|cGO!AXQL6W6{;{IS@5{b3k!cbYr zB3wm5<;IUhqI&D;HDNlgYo3qWh5NNo`1KKWGBG)ZE(3l_lqa}$E-(oENE9Y@`ba13 zW(TK&1mgFa3bD*iNAcJFpkio!xi*=Qf`lC+CXlgO18)^*j+McJ z2bB|T6`~?1oZr6BvkDG>mtPAeV$xAEF4#2VHW^#9&I3Y;%q0Fl8XW2vE?szi$ug&hy@k7xM5lLe*G;=LFyj{jOgv60Y*^q>VMOlg~E zrMVcxa%n>&<~}%H+RdENkIynjUBwi=z+F`hJ?GyA-NV>r%Et(yV`$Hr&ph)jd1&Cn zaa5dDccg}&ANvVx&M&`M_Z6zOz#r7<6Bs z(WdioL$e|#VGeju4Rcf=#Fca|-uxt&3*$-wmQ&W?rwTYpj@2FI(05d}7+zepzHA!UcpCr03R>FZr#vf85x7*04Azurur$_cyz<7n6Y9qnf)76AuBN?be z*Y@87eOAbyoRZeK;XObw(yhmud_q` zs>EXo_)l+W94EYIufk?O^5i;G*Sv=7;#nRnymiwOvWS{ zmgEc_ZiUy3feSsP?IhiAwd1CThK^Zl-Q?@s>AB5d$T4Cwz<%2rIXoB^96@~0HXdF( zL_z*@4;}#a7i-_3_zrO5Ax1H{XySQZrnJuxIT*w28gB0eG0a03Aq0=>K82R_5VtW- z<;C$HId*%jJ@Rgbfn#;`MftNo`=870|M0&qw|gIyccx9V_)Te!9U!A-8Goyc@4ikh zi}|1sURk6J&+Y!n@_V=6F9Xxv<>~F!vV#I@EzNE6Z$KH}aq^OgZ%FM|Projwgbda% zH!)P}iG@0mEVr{O9Z`qX@?`>kMDFJav?)HiIkaVpBYB5_zje1ou47-ByU(d?)cdr? zXA^n?CG4;*<^8!icKC8w0tVC$`F6ivdX)tG@y=m*N-zB=ugIMmA+Kt(uRMD}ULL=X zxH$#fE)ae;ysNzFA?I*%W{xEL2jzqJ?^72R#g>B)2FRi8A%Cf~4*)ACs4aa){Yz?i z68A@rg1d753~w`U7ERsW(K#?Uz_dqa5gDZO(>_}(Mfk1$-ju(ldql&FFhdwNHq zXI2kUiRR&MG}$~QaY2kpKoyVOwKJzWRW=wizY@TEa^s?V~SVjno}mG8eh zf-&{1?Cx(9rg>E!07oaabMH*O1K%cJ6~59WN38VUFFiPVThP&Ib1NagO~Rd|_*1{t zJlxZ=L#ZJ{z1wVJ7`{8mQN<$wNwV4oyHh;6(A@)r?BH^-9TV?aB5CXFt8mM}kJ#a&$gK*do-i~|FN;Cm7%Kk|+{ zqW#@x&Ur@q749(6fI0X$a5A&hng~(DhW~I!yLJHtrVHWS_KtY0XHg6 z;wpJ8fuX_?qX_otm|Z}EYF>c~wP72T4xX*jHlrp4wAnL9TgGUQNgF`Qk5ffEre5bM)@8% z7`WbP8jojB<;C&cC}~w00CI%A_;XG*oNR{)bLa4KJ06L^!yoVwla7zyq0O&V^NlLR<(Rm;rd`3mgVs z@FNjPBXu6QN?R_2N zBNOHRyN^?PVvjmE^=v3|jnIB_HK4^B ziNHXjL(4UJRHM)7k;?iIO6Ja-rWl97DX(yR?#gTDSTQ$={FDFqe|cR>%?04s^kF_r z93=GFgJGeXWS&M6QW=MIulLqDxS1~=39Jx^{9f(F^5nXB{fv z;t2Rnc2?QtwHRxcCX)w(j0#?H<=9P*yx1oRL7G$y`4Ky6(cNT%rL!D`BVp+5-#O~ zGPBS&eenH)b!hbE0zT>QaUNKv20XZ?!yoC(K026#92~&&o=(({H@Abx*rnPP1JGLv zmk6(Q6Nw?F4P==*~ET0k@gJ&q!ZQJ$5uUvO5(we-n;`Pc}ghp<`6#xON+_0`;#}yjoy4ND7 zz>U>8$U&eT=M3CwxT82j);Ruo(wrxF+zxp_EsC!r$ynHUxhWq;YC7o+c=&iXjI5kpsPe?>360osJZA*6Js#ro^Wxa*GM$x-OnR9c)u{$Pv&&#`y zC(4^`c5PDPsnw3M74I{6ICs~n>$;cPrFei*jLmHAKk8-&@dhwqIK$%xQN~xdmn&~pzZ7;9vk64yBJiMEj@Q;Rskm=?rq@hChzA0<@AybL#@-DviS7p<)8hl`SM@= z;XlT({%(1PBWEv05AgVZ&3Lxh&ANvp1Nb>LtQ%Tukr;kFj$ShlzDP!YhXnCq4^ephfwc1e?C*QD5ea~!RYN(|~3ofu6%r-N&QnxaiMWE`>CVxJVAL!!IuM0!Ulccg*I z(sgmL_F?Ss5l<{!WykCUk&TI2%KEStS{OP?@$so!<%d7~LHYQ*k1<}r6@BPDJj7Am z-p~5tFa_8A9{oIj{vv#65~X?Im;(x+#WSLKog+jD=Oq3ZeQ+I#it5KR5X}iO;Z5R+-=3NLtS#&z8c!3U+V~z zHtT(!QBcA4QdS5yVEG`Ko)4N%CEn~eUK`$%c~xat0}N)(eIT>9{Hfj>*RKjYi~)r- z&nVy+xJDn78RbN{c*(U6W8fOUv=D518&obGPr~L10EM)80kE`-7J&l5JIWP2 zs2}Fg8i)$*jDe?|{rm-wQMiI@j41d>n;sF~QOS#9#y!_AjrvO3xWpsz_}zy|jTsnx z7iEX^45BDx1(m=0>tC^JY@G)+g~b(0lo*Eo#If^M6RE*0N`_H8o=P9CIjAb%RqTcL z+w~1h;#TjY$2*#KRzvmbb{gbZm$>l|#oTP4>_) zTsyo(7+hx=E6@Bp(2WP$pGWcu#--xj$#;|RO<{5^ zN~<)6vBt&?uKQiZJqoe337j$Nfg?^X?pY?!Lo}q3iiXNlJ(4+NQO59`bi{xZ-tcgC ze3Oh3y35$X1ARL+Ndv-J<-rIR{`WZ9*g*WJECvxd2p|RmDH9D0z!+ETF=V{4uhX=nL0fqj7?iJ;utq zm3}l}z&EP~J_18vtSZ|L>tmki)U%WixPi$5J;yD(N=fPAd5icrP{uBiDSar84Gb=n z6+F7@W$SXg%#J^X-cXR88<4xTwTjaLrIg*d9{oHrJdv~!kNG}AcB>P4;?rd}r)R-u z){Yj+c=t>hg%|a+eyvA+Myirg+mTVDC@7;R2jw?-cy}nJbHqA$XBtI}w4Xvbm&>b7 zJinyPvEEK7>vKWGc6alLov6rP#(zoL&_(|qM>r3$=>UU=s8a*O<;Ct7<^A!Gz$ZKG zsRi2Iby!|KdsaUA>REaDa*aBt)aYe{!u%}rhZFXbXJ23&t#Gm#JHS=vS7K$dXOCoK1g0KDv%u z8iL4|!Qp<4LUbMe9FwQtLByg{;{;>21B1oIb581zaI&SAG%Y;jFthVM(gWKld)DMg zY~VJUY2-{k-qpvu^yj9b-m8aY<_sQj&);GwEW$U;X1Rl^qS6mPWKE5546_S%pIyQmc=A^{$!>Y;S(zQYjbV6H zW{4<_f`fJ<9Q{OME{Ti{;D~8wy$oH5imX@gw;?E0i#8Jb0qJ#ol)WV8y<>?eRE6^OzSrH1S7y6qGm z6jf6IWDCiowGVz%xk~|Jn3|TL%)}i*uql)l7`%cFrYvKzbHtr2h)Gu(@)zb3MS-@w zrq{{>v5H?RXVBioaFf(S49QL~ksN?m0_;|*o@Nt3Z*>OBt!uHQ=yr-8) zC5C6h4Cx0P2X8QCnV%kzq=yKW|M(qn@`;j8Lx2iVcCnZ(RBUhJq8ees<;M}L>_$oU z1(c(DBqQyxE z;20qSuBp@lA6U$ZZ&BK8MDHwg0$vq(zMs-KEH;)W_i2|ow0*s6Q6zcCk3XWg2{X@V zlu8TUIA=_7cwoweKx51sZGu$3SK*x000!Y)XYA4fwCT7mRR$1%!Y$KuoKZ6Di(5hz zrJZlW;qQKO%?D_t8cVb#PHgo0BSWw~ugO$Y6hb3D7nkjrm|8pik*aa9kTQo_N-6$a{f-cWEaEGVgm|s)zyu zl_>}VAMZ%BnHzrA{Tmtyb$oT5GUv1##TQ=SuGJotjPJgGr`&yfj}kzXML4Ej9wjgK z5X8Uu#b2|#jIuMVkW)O#{(VZ5EGt}o{fMwg76rD z<2MyQQ;}4vr6Eg>n4WZsau6D`Ul#~tPl_PV`>Ql&6jQpArc%;Myg5etxG{18lINHY z6nO=P4iHbb=|?FU;5c6UQ>oFJV#2BQ@!fR?F-H1p+-VT`jBTilC6$x@jV5HC=-b_X!I{rOPg&=* zu0ufQfeN~G!H=w09UM{G`TfXf1$Z)I;a&C3BhhQ_H0}h?p>ar6{5ZcF2B+W% z6$>Q*TmV;$D;B4+%0C^XRdoU131bi!9jrI!l&QdIuxydEEe}FY@?M*U0}4e?=tEww z!B*!lJe%)Eom?k0QTYR7^bzjyr}6$_C*=VN6z603V!sHbx1aQ{X<}Ujsg1m69HE@ViJs+t^qr zZ=QcuCc99sfqnSSSb213gyU>?%c~7iG4?Ll%{o%1Z&|8~)D-p)G%z~NR5*uTHRzS2 zHP4)Rq`x*!Av&?nCJcp5y3NwYF%b`=~n#0WNWm#TX zDp!;b^3=sa6!yJ~ZH$_p7!h5Z^0e1|Q9k^g_sac|cgnky)CDcOczHjk-sAh=!%}J# z&!4-`$y%IpRhBTeIM#Q3sN9_zWnaWW`R4gv`R4VD^7FPA<<1QG^3%7dz%Wt{DEfcE zF5outNm-&J{&_mw3<|6D&G!z^$`Yqyc^F1F`yBd)+so{|vGVZAecG5LXI&!@4>ysR zSL_35W1SD+^y{UleJ8wbkeaS{2kv1Yb)$UlQMzWOY;nf9^^7y?^Vc%opp2 z-O~G{ZLO^CurFXALyY5q*GXkzjj^qQqp}<24kgEK-M(G!-kxEfMwO?li;HFFoM##L z{QO)_UTnu9Q83D&CjO}-T$Ona6qtX4SrY?>RlduC5ai#VbyDJpe{e+7hIOp4^WK!j ziw0>?&^vhPfHpKD+tgcegwH*AqShIC;IYr!=X>Cv>>2P?uI^Dc6~5HssQW$im$i9P zoE~#J*)8UCn{kZbY{CdRBi$XxM&o!K1NINHb?cr{G@=olxW;Qqm!5(abVPSFNwdSb%93TtBLvJ7l)*!*FhT~#LT)yXIKhA& z&oW0Ch&i%G2jGE~At{(y%?iW|m}{5z(HxfMqipqvP%1n zpbtY9A6Rq<2Nn7r`lw2P#K7XGFL*`zJC~cN{P9tUaT7*Edx1lN$94WwX*aZ~z)r|A zi@J_OPow#?_EC=u79JVDZNWr=QEM@RkA7svDy~T_L5b^&H%nL*z*R{$0wD9H`v_k9 z@5Dfn@0$zTWylu1(G(!_j?ejBA@qOz`+xEeOR2d4+{t)R3iMo9zDMS*!l)2a82QmN zBC{25e)RS_hNLdo33sZOlsOZ2E>(p_#Q>#&I$$0z>z|x=Zom8yep&;rI=1Sa)U%F| zr7P~xPJK#Gv77r^hJoW%Vjl-P_xVVINw=1d$$fkEI&$>LeusW^ytg_=3a(w(q7 zkB%Z6&AIPFP?2@cZOaTTqQ&3#llfPxpzRK7W$7uc{b`7>V4x#u`GQ5@$2xu*Uw%{+ zWcZSnhC?vvcRmr}^lAC07 z;df#B|G~>!T)n0h`qv=f9Sb~%wlW5B1RiMDwQqRXg&7?c`ffqz_SJ)uA|4#4jJX}* zs`7imGj3S-NaMX-JYrOV_~oyDiPBh)vhhIqx_Q9=; zg+XVXXcXSyQ_rDo#QW%+pfCu7&WS4&)g3%xQI@1t+79ls{%Mjw(t7ZK5@ux7&&~Yh zaqw%}mVZojk@i(Atu;AFh`Sqw!czr%@mRJaNc1v?ees?=0L>4f!$;cYPsZm&JMZAP zi#BdbsJNb00`nfPB-h(LF&tM8z;i z3(CG6)`pu?Q!EFELoItRd@iM|8sESJcM?DS);zin%-xB#~DgTH=a&uCVOx& zC_RC$(PsG;lv3!ViRaL{Ip@91F{6`qa_Sg3ls~rU%gyu38QW5c=9hS$U9V2Rc$0s0 zgOA)-8j*V+I%Lj0(apSe@#IEldZ51wcNZJ$9H)kOxAZ|ft&5!~<);{nw&8N=Jk=mm zAqR&TepxY2)!+n%9A02N)E+-;%9`_3>6rWUDUZvzm?QabUB}{@6fgSL05~P}M1v}H z=UBkM^9Fe_hn4Phj>-F(v>25dR^bEwRHm~AH0g{MX}mO1WK1dI^_++|ND8n;D}*iS zV8{?tF*H~JEpSR38WHX{5kuY59dNLG^9i+wKIILNFnCpNXO0=HrFp7@qdu>`Ag#qo z!zITj%wB7?JiGyeP@au)tITaV2bN`G?R22@bu#}da7L6gT$1O!v740UaTK7rcOQ_1 zfo@2C{(Y3#MkjS{$&)_gNZM|cv2JqWAK$&rX;Bo-XONhuPzSl=D6I>xNPh;tA@a=S zV;9h&X(g{oxsiu9s9k((Y!DiS|MHG_9qgn85%?jV#Yooh?&~msuftcS#%9Vb6m<7O zvGmFYX&56zvt_PtGVd<1QB?dmhjU|d;GOwkKSL)_^+WHIGY`wX#;q8$Mi@4?o|nar z=hTG!0iOM7iuC^kf`XP=1D}5Wn__9BP89oXQfWSax`{H_&Zb#>zof01)Bf4BSJar? zBoF_zjIh6DoUD&O{{8Ql>4|A_$`8s33geUecgq*gzb5Lj41IHo-x!7m2MqL6V(5&B z#t9La{?0*8?CR!t;ps9(>S{kmQ4`N}2MX(8V>mS_mx<2dHmZrH%8%^gIq%wC4z8Bgr>=QYim1T zuNixkNQr^?IO3bytdk#*u7lo74n68V2LQ~J+aup48-)lPG^lgONao%isZ{VE<#Dn( zNFD5ezE0rXryOE1H9lKrr*6ZFz{#ex2WJ@JC-dBK*&vd{IC~$#f2l8e!CIphZu9gh zdL~h0`8RYj$k<0_IPDQ$+&@X$8`mMfGQlAO9q@#SwlSPm$E4VP!NCh0f-(6x^5$ItQhGAjCP3?X?*)r2=M0i~W)?_0IJSz;t^ch>Mj2foG z#8{=n<3a-`TB(YN3tHx*!r&dhdnH~Q?#1iFb(JJx?STM2`9Z!Xo=)i-(}?ffcg&89 zRjvoeN!KeM&%ER3y8W5PlZ{^xjT;bUeBvXuR=E$~&vS_c$mJ0>3T(PZQBcvU0*zm} z&i`Kb86lz_5LJ~ml>qL^Kr6HL3ERq?^Rter`lJxlC~MtRP*7Mk+c`fSA(VQQyrm+c zfG}@Q+%VG2Ta-xvQ=zT`T`Y2s=c7~rLsBZF13UI3&iS5SugB&D)VtD66^2Rwg7`gN zSQ0U%z)vD8&ZpOSA3&T8+T$k+%uS!Um*>G3ud!4EPw35gVgxd81-rsK0t?TU^qIM& zDI0Z69;+LJ3k68!*Z#a74-wM+<9YwlJEc2kFMWRE_WgaL-sg{qX`Ba)>~^P=R8zr`4V4E_BpINZ#?- zPttrqs>bvQVfhyAUDBQf3{*f(VNuyKB~SE(cA1>a8N%6O2kkjUt}38B0w30?1zh5Y zm#*LsZMb9g&U!kUUT&Y$5MluEzGFOY!;Hh!1H5Fyz#M`c=8I4|`Ns5w7nHnXz`n#g z>D~hQu3llsVM?3Pq4v(HGE6S|$^Hg;(v`nU51xwUdcUDd*_Kh^%A0HlBg!^WvZQ%8 z9?Qq8lFfatDf7gEvP0hDyyOUAp3<9a#Nz6N`C|Ro6e(5_YshtYfbw!>MH&>p#yE9q zf!7+R-fNNiaivj5<0rt;NAY}Xf-0YUPJ__MiulTbCCrcT=O1{H7lxnl%`Y`NE)9WR z*0}p3M4V$4c4a-A&<(ldMguoH(&?H5NXSt?red0@S*AYigXdQmr>pKU zWC;v$+;&yF8uK5R~`~0X3 z{}>~iw3zjc4cZwdl?S;-N{;nHU1IH|XKs^&zqEN=)>k=x9c6zP{&WBP-@`zdDtG7S z!)uOcW1BResh|F!92~J1VQ-mD>+R*y{9_FD&Kyc3O-=TUmI3BWIc@6ACC=g=uZ*{^lqC)Vc)Iu-*7F%hm9s&3c7Hx%$q*58057#Dmo>Tp(qdGi%r(E^H+b&ki zA)EZ&Y<)#akDB2jIAbNV4jLS&^%c${-eEoTQ#4_pN@`g%Dm z0t4|=Xs(y&g(r}CQrI^9`1YMU)U$=)Pq7S#c-YXqr@9;v(%QvZ*e8sj0AWVLbY&PS zy?&z5@!f?ilRr|KvGBMd=vLuDIAm*06- zfhTj!1})ix%nx|Hp6P)x+P>6o2-2L)%4Ah+6s9mZ@AENdOh%dtJ@ikvyarQ(lf@|D zS@ZrKiz8I<$g~xx!jSw^ZfKodvWeJF6-2ZNGf(P(Zz7;%yLzA^hFCDySrnOgz2r3O zUg-31JMu_j@dF>H2^nQ&-rQ2UM`@Ai`dpm{8Kw=95Cd!`7Fwnib8N)Q(=5Bzl$%&e?4cpmbQf*sAyZmN!(Y^U=G}=9%tUA z&6Y**A?)lmpgnQSuJf)$oc?Gq?K&Q>2iJ^34;coc(TS>9p_uVK z9(ww7j4^7YKM)3F-?+CMFi1EOEjYFY#_C%O4)e+V9G-h05Rp|kLJf=$QvnmOuMj*4eZ#$rlf1YY% zi8-%DiFG~lqH9H%;=#_ipeuDp#$|goDwGTU8vHu;7^wVAN8**X^aywHo}N>I;wtmW z+C~tvss#lGi}C^8dtip2nl=Yb(ztE5;f-3_*yAWfay?;sKC3}?1XD`%1sKGo>+vSV zOxz@GQ21z5IMcdoRreZWG_s?yE$yoq-S7ZmRH*YLo>gg4MT)tneT$&GE~IrDf(9zX zthk3p9b@3G^TMym-*+Bu-+P&FaE+P+A7L)}XR!cJ9FxvWVR78!jmT@xB%wx4K}-yOf22_L75#_QMvlUKn2&j`0`+m*$@9>A0`5(TBGN^#bChyQCcTrrk20@-my$+02{t)LXhcGG&w!04H87c*)fgbH{ zq4@8hD8WZaGrG{bMH^S#ckOf`ygXeDhMc)LMlnzc^rNDw0ojhwzJXV#igfQsk_0=* z4p+QdfO)>+dLIh;A^jdu^nR~-SQa_` z&Z=hp^f3Z1hdBcG4qoT+&M`KP$|u{)@6A3+s@%v39@vg)Amuv$yV+e{B4_UE0H35Z zjS@k6haCG$yxOL0U17XT_qDF6CzfAPQl(~gH}eEO*AJ{sZ}$g1kY=%}ZqXpvDHwNjcsn&z4^K zX(qrT)relK?T}hCRQ~9X{ylI7-dX2;{N4c96VToP5rGp<#WMOodH7~yk?EC z%A}8T%Drx^ERgTM3lAXeio-`%mllZ59FwK8g_B{8qu;5Z!?^lzER3Rfuf6?$plvDz z!Lu~7rLjZu;V-Us%i+~Br*UnUnXX%j%674#x{G}KBht~jG4|iS`iynek~dbg6$X>*48K16NY+>VC34bEG*-vUIZgylt7O3(^;2(j&!-`7gq>@X$!NeRoM1mPp(a3! zT!}s5U*MnRcNM%4W8NF>M4BR&2 z_27N$18a>c1iT(50vvh~Bv7kzXl$qm$pBR>RQw%-&--o^s1R=;;6BWdHh3JkZL5az zvw?~43Vq=LArN@QK}rqs!UYiq8MO>A3j(2mrk~$=IFl9wgKvIPYn^B9M=JoZ5uBB| zDgf=v{zXg`)`C?d8nRmd8Ln3_hOwqq#s*$hMffkmBH}_kFlRBmWXviX{E5QAczs{R zS>#y1-8MZEv!{o9rz`=ksibJ#D`aUcR2WczrnNmL?Ye&96M%eYHDh#YN~3Emt(k}n>6SC5?=ilu25<(i6jU(3K`6OotFVp_ z{d90#SfK$$J+n_G1C{p`WO&biv3lC)Ty-o%@B9`m*-;42c#nQ3l_mFEQD()5^!#9L z$1)Q{0*}liu4a4{clx}t9p+?`7b!Fi8y;4XjiTybOD;zFew zd;pv0#H20?=>Fpi^{v`x`M;CRxi24g9B&ZdE(wq;WHxDH<#ekbA25N90yIPJ(6;RnFtR=Rk}e4xELw&GW0RNmzH_u*GnQK-B@ml9-$jDdZM zf=L@H37!VnAroG+#W9*05Ab5zKXFVMgU1#8t=F|h9xJXiD2PXXz`lT(KTm2)k@&v~SA%fukYg?Q|(e&XU< zzhUVCWKkU2r*(s#-B}Bt1y+0DcPiu$xcFZANqUhsYkz7zwQL;+XKdt^)5joa-MaYVYlOc2aHji9^GS}E6-F-8 z-GiBlEy}twJA?2MXPj1Wr&(Mg7J)2j^3^j0<3GOnzWn>&{s(qJuFBe%o$~bAYFR_5 zTYdBufz5019IUwA4(Ugiv8=x<|Lgzxzm(%oxNd)Zg~3CCZLBAp(}NXGTW z^QYyD#g+1k-GS|o2OQ9K3O^>TCPv zIr>n(e|?NV+U6MGUzC+u%0ZuJf3}4 z`|LQF;qCAcK^vppDuK{rjad%yYJ>Yn?90fba0?@t7SJ%VOko*+${0ewLxOIbWQ6L! zD!a{tvT^@8bjeu<>XPxPB~y);0b1 zsCa|%ar*dhR)A>93}>=ryP5INleAU90!7}eVMtrx5w#l#|Lq#cQ^d?yFZsV zZ(d=nzbF&%)}{6;xMP==418^rm?zKPFqe>bSc4YHu62RpH%)5Y<)zK?U;pd>3OWBx z`Q1;i*g0A&D=WlQV5CQ`gpgU(W#%r5BjIa?6VjHb=!`tCeK2u?_A>!d88t7XaWRRt z9ju$(M+Q-?&y~^1WO6DQ$K$!p$+Sp$wR#0EnOb%dU@=*MsZ+mk8B`W3J}D4}hD=At z5ccEVj^`X*=aRn=I}>fDC;S2 z_SJUl7Nrvx_D{Y3X-Tx5`iEkG!ebi>c4npV{*b_>Ceq_Q8nX+5ZSa`B3$F_`mM?xs zz-8chDShEVps>7sFzz5ev`hr?6r2E?ZQLOY1_s{HYN0^mR=GaOH(2p}X4T2iV;5=A z#nZN^n)`mz28eMCbF3xzlO2RHkHr+&DHcH`TmM$W4ew=vQs4la)@^$(1;IW2aD4So zS791m_Gh#^VI5J`c<#7HnGz}Di+XbmCQP(itF1dPfR6{kly`hqg$Bnlp;7R2hcMt}9}_ey0{u-r(wXpA z;emVl;lqT$h!E#;aYusVCb;k&m}V^_)p;AZnLjQMX+wdK`@WK$0y-_zYD~CzgK_X9 zE!1551Z$}*IA>T$dVJbX%TQS`>Q|_Z`@RQMjDr?qX~6ar&eMm7eknX@DayDYbyn9s z`99Vj%Yb(;+m!a*y5&LMaqi}i+Y$PSH>vc=7}J&kQE@Y(9-0M*D7h>Yx(Nz*6hYdz z+9=DJLC~W;U_3L=>;uS2AHgz3`K$B7c_413wUJ<{|GYAXXWlCm*0HKQL|mF!_VBz#Oao1Kxv2U{SeqK{X%vEPuMFNDojO05H#~&vPwQ zifqGimYyOI^|6+N{gGlO9;{GYNXN5S6}2*ZY^ZbWi9A8_%z=+55z)_-qmVI->Ezqd&D>GL?8t>bv+2SLKhxRHAG;Q-*-6f z+e82(MNqnF>c+%p_<0nP{KSztP(z1-jyF(8}B z6*;}7MYHu7%ss)XeXAmb)Av2X7EM%ZJBx3Fzhpz|;;W0_c&jDXXN-UekAln%{ucw7 zQ%7YoV+g#Cx${z<;CCAY0@1%$4FN(`H=K)e3Ux?J~MHEb5%i0x4=sT zvvc$7+b?$E9M>j7)8O(hR^c2125ku|gOL8ySZLq#f|4Z&_d`=U%S+AVCXcnaYp3u^ zd%hnUX8cqb!no*1c#;qNPCpzc|D5yEopbh}PdV61I-TI?-`SPb@?>QJfqMmh zO8)#zJPaL-*$qOlL}jb12sO=bCd=yj1^M;K7{h^L=kTIA=Gq*)8ycOINjkd5OdNwx z&j4uP3%^Q?4msa9ph0m;m#%Pa**pFL;TwNx@P>Ok<>kBZL2$0C?C!0D}mI9FFwB97-$Vr|G58=Ogal?Ws;Zz&J6f? zc2xRH&~bY?ZlViIv`*^Lm1o29#Vpx`ZW{yne8HWt3&6x&6FUe-G3bNACnWTZxU&*#}?zkK%eCAdRL#3$c%V_|9+jfvec z_`vDjr?Nuu@K?-_YuvN@jPdt>dR5-C!Zyc;jXfv5@Cqr9RMu}fe2j4%kO{x9|*9%EKfdvf;@g*UcY?{ zFC$A7_Lj@rH)Vz0=S^I<+8kVWPmIGQF@Xq9xcg5ROaZS5atVWP(-u1liD@I-|&(!FS8BF zTpP(QP_0o}Si%^n>%oFAqfkhon#w5n?qwSW06C*?fQ5|_h`0tD#xe&}^5jQkISQi~ zM0i0kV$j-JWOygFhS~D2Qx_OTNR$A+*%zU9!n++RFE;l%x5CKW3U>jFaKpYJtM3^yq&)$i|8EoPYa)mC`b~s4}D^8+m3(KnXlpqoH!0OpSUnG?-eMyhj&-z zW&|hT)lG=*z^tpH<#;NbINrwbh))H|?0WHAA8IXFVDY*A3Z2+v|9F=#T1Iox zPia+pq3SXRv!6hyoaN6GlC5*7C70|-S~;iSvEnfbne{QZ!dI#Dzxkj3OO}D^3oyjk zqWX#6gpiA0Hgi@`6q>#0R{9RQsIJ1|xJf&f81XBMMVaPV6guZJmvi3n0F!kt^J21V zIldXMiWlp&e-x&D1xf@N1ZAye(p%h+XwXbSDjm93*!INDB-z``xA%WffLRE|fwMs* zMN_+|*=pib3%kXnU9CtkR;0Ft1t-`;M*vnS_Zo?V!eEZp+}dMOxP=L5bcRnS0FsEQ zqUy_4)3A%VWv^ixitujB>H}uVyUqX;IueHqiD~Itm}#Z!8v+`7m;!Z}Z!if>E!1KF zCrP9)5nbV^e#dE6*Br~LACDRw0MSMI#R)`YY%DFD$+fcOm|N;lI~3upGte|0WD&70 zHB0*&MkS&hUZ?^vLh#;q>p_Cm#1{Z5@Au@Jr41A9YbwU^fdQikIMNDa$`>8ejna1U zuG^yI34|a!nAay+gxv}-dxxiox@FO1@ih1^X|$Lu1`xP?CBdkXdUBhZq}m1mirFwA zK;(L0*nZka$kD|s-|Zt9aAEPAf`*K%K{T*!-*TD}M?Hb3>Rf+(c}%JxCQTD5AFX&E zDR+r&gHMf<(r+O_Uopm58q_|QNeKV!B@!0@#z=)T&3H{t0Vjh=I!ADhivil4Wf#HE znx_aLoqalSvM=KcLf^SR(YeL<<|z@Lo8=|u>2tEan|jUQu+F#+;VwqU1PIUuCi!2I zLdWbGU+o>2*Eoq^lM2RTm6Ox-5-F5p ziu=r=*$&%5oT|08wq71DJ_W5~91DC|);HGCjyv3E3xc^QGjDSg_A=t~726>DJ3p24 zwkL6EYA4Zs{IhV5XUx=y<*1kVj8EO*Cj{M$&u=o8&>g`CXCL|;#K1PjyexiZ{F`JHr_HJ&EMse2g#8ySL;i#YrDJ`NQSE1J>U#2eb?f%kTg2kL3wl zJT3}rY`2Xem770Uzio4i9&yBSk1a(P3N@gGrA4rDQ(pY~d1-CTm3iPEPMnhp>Ix}+ zi31Z(l1p}GxWZOA*&cZJl6;|KIAqV0YjyzLOkz1Vt=c7;%5CoxwEuz04t+82+_K9y z6DORtmX}Gl_oBR=Bd_TkM+~n_m2bAb#QMNG0^R)a$A4Q^dr!)XtzSWhoW54RVhiSr z^6BG0l(%dj5&FwmUzbm(Kb4uoN6c%`=vmA<5-D{|vlY;Tsi7Zc#ZhwfNHAAECEt-0Ei5vxnAZftb}a{dBgxF~Uc%wO zbcnKTYy>%Zi0|Skiw}gNbmid*N@wxc@wv>6_X8iIP?ZkB6U<17LEf1*X!ZoU zy>+i>71SvX&CjT**YZkPBfKklx|%xyT9~)}?iPl>#g9Hv{-aUujhJ$++d2T#p`y(y zMcJlO89&41m42G2xK4nWX{q0p-n!^oU8^HQeSY7q$4cBKkxajm%m-%G;bKKN-cTMvhLO0z)Z#K9W>yoDk@~1zNQl$ z-CMUZevT zWA0h-;Tpl6p8{ns{NPRL-sN97Jmer_;CRv>{Sm~WC(tz+d+A$YCNEQe#*edN<`)7(P6pzciK31A6z)EFwTZ!mOcN9!U?F;um+z)CpAevwKLv6s~BO?V8wE)%sp29Cr0C# z;o^UpS@b0u3;nJEQ1BS&Q~$>)%tte%E3w7~IMor;<98K?6+&&xqi}z|XrG6{nWnEX zopqUWf|&&IeA>1+0V{wmJy)0mx7rN!FPx(>m!i^XXiH~)Rpn>|K<`OZpe>g?OCO~M z3$5ts8*qZ7uXWjXw}TRG%3Qj|^l$2q>+_TJLBE-C_PP~%^&ip{Ej`J%q?cbNX#WSpV4tN~Fp4C0}V)--dI@oB1= zGFG=*+2|+g4%#c`PSQBJHrn?N@__5Ub55XaPyXA2cgIQt&MR-Lyid4AvE`kA`R)I_ zq%pp0#RKWoFZGH^%d#EA7%z_Dl+XKPol|H5r~6h9ALSi{S776)*(p-h%(e+)1aCvO z!>%9Fbcs4DNIn38Ey+51Z08h(hlQKi{fesL$1}GlGF^}J!A^)bzD%*URpV=rV7xyTF z2^v!-$hW#a|A^e37f}WeNP{MCpTehR9e@k z=v2lw#4Nx1^cndkH_8=9+un9L@$2-UbdJcQ`WRo5T&yK1OLx%wH4gauy8|LoSMjF} z)xiJe$`_wKFE2QrcZ~UPg`#?Hq$Jx0^T2V#r^@pU&i;L1pTf85<6Dk@X6(!kTj4g}a=^hF8NwLM+=r+^! z9pWxR@zhKwO3=dG=gAFvc*tpIw7I*(@xLhai;o87(-J3e!4Eu~pv5U?8ZbWl>}w*R zv3igr_0#T09Q7{|i;*{=V{#xs)?+A37iE?C(`KET!$0U7^0Vp=aexK=@SF&D3>%H+ zGXkIAqcpV2A^hUfi2eKZvd*b%@|{cg?f2BL|IyAlr<8F#`4#@z6Y!$d6;3^aJ||G% zCYq#UVlKVi=g^4l>HI${ z&BcxKhwcBn91z@k*YAMe@9`}c8Urbdy_5I(4%#}QD&l&C@`W;yTM{?7pjK4Ws4E6ytdIiEn3vbLd6 zufX8@@eASkNN5y(ma1rp3b^s#;dxAy&^C|Ko|xhFz!7PPA9b;<^~NGb-?bc7lYU(Q zV|EKHEYvJGb+M){fblP|(Wxx(5iWg3qcTPpLhxqaeLljQ3#R=No{^yKcqq*Jo-lh^ zzl)vkXTb+x?|D@LOe~Gyu>Sgv&t>TO%H6*9$?|mh0!v8_tX2DEV;|ewY=|InYiJ93`d34=T)E-`U*>67Y5;e zxHnQ>tT62g7n41>u@8Z(;)?3ij&NpCr)}vkS8h^(D+{^fCXDWeVgXRXXGdXk3kDhP_Z;1K0C zIV}5LfBV-@+IlJIYH7jsb+myA>}0pT1j{d?D))?L~nFb}LmGG1yI|&6Go-5B*@^ zh1K{{`C=^C!oYiN#xrT0un;GRihA7LaF5?ArsozBIu;NB5e(w6&&~ktHrRzo6O^|v zu795T<#)#*^iF}AA;KMo>dTXPEVLPG7JBGCffy(Oe!A^$P>n|ntN0rPq(WzZd}lC~IVJXqFn}v2MG2+GTib3&n&ZTOAb@gQi~LJiFrLr@P7XGB$Mq>_lRcQ?g|w zFhZaTH0oGA(w&?kN9f!O=mvuVg5llqHOesUuq!aZ2~G=(TS5I5 zN5~o_eTRT=@QVQ|yZdjjY9HYr^tgPn`4mGC0w_MnGg#x#@MBUw`_(%BNhmK*=1o{{ z96QeY`|M_*032{I!t0;DEC0_w{=WR_``1|F-;=4~xE$dFcgI*y&U4b#$`T3+QRKki zhBg*(++VY=z9V9{yZeFrL-LuDQ}fXlCxoFWm=7J=o2Jh?_?r=7 zVP!LX%V_Lbc;X5>6vCY-eiZKWGpyrRGMa-BNFaYr^lt= zVNoBT81>GN%HGiq(aP5dzDpd~fbSpvJ(DerE(ptPC}H?R(SufRNeQF^IC;Oq4#7g% z`*>Kk-yzJOT#Tb3>tHsB==(D zNqI~p_{I$Bg-{aRZr)e`PqaNi89PGBnweXJ*RWeiaLF-g(snq+VUIwM3)VY=w4qn# zJdxz?7MgBSBd52g7( zeEaRUTIuzn(UN*Zya~;hb4J&~!@NRDcyPb4j!t{1ET zB7y>b{^!CKM(6AfLx6e1++@ryBpxxE?N6SWvPXekXMU}B7LSFIa+dc}pcFy+$5dg3 zo2gf3r!Ye~EzOmqg~7Ps@g)WQ9zhd0tB^esnyUb#^QCRc%=lk~7yA_JxldrC5~bT| ztURtWv@7G{J=#x{o$n(|Y0Xiz5Iuqr2Kc~~^??cV1lHf2k3B2pAum8$=Vcy-nFgH7 zh%0QhpUKCjU;>_G&MG%vSuj}WD+*lDX;_icG60fc5NIyn6Sjyiyq`XEUpTE!<)Qja z2rC8cpZ~B*-(n4`I|HdIbqRBl`ui@;LhcMLv;eb6E8G-LAyUu);}ss;^99FQ{8%Ob zlLFEoe?F9vvMefY+_G~#Rmi+AMLDMVP5miLm9}MF3L08#?Wa3SDtF%ZiULWU5pL-# zJde6rxU*Q*#mjPmg9Y2y9>k7-ZQVYLz{^90De)^7ENNR*PrYI=uk)NQTv@20Ys=W5 z?09LSrVZ)M-J%XovsR$uX}dnOoI7?dnuW3j8lxl^pV9+cMRP-2L9HXJQ`GK3DA^xHb+iRpu%4`ZI)0T$KNsXK>? zj=kf#Z5Rj>*9?d*Lbla%Wn8FJU#J9eC;(DVlxg}UwYei{j@L7UiE9LhlihdZ;ya<^ z@?uXI;zTE`XYNkQm(6o2jmQh6Bhw7&04tBIZYDT7F^6WB)*@_6C*tso;Ip2-V$g&N zWk3Xe9d=FQ(IV);lsr*h=!XX}7&Jf=5oi_EvzrGkBw;>f5*-Vz1i$=tH>3fs_1{!E z%3J}D`ltzec4MuO|3c+FtG<}D0bW$Nf=}?tjH`8eP=uUP1VAiSd5g7J-Ze(PMr#8D_$`xL z66>$2_2y^UWn+F#6NE9lK*}8~{SD-m7W{DrrE7x7&_rJ|Pg`w|v)wDZH?PaP(>J8A z`H008$A6ED#)5r=g3-WLN;jkDGm8l6xR#ht6-WOLsb*?-gaVv!p*}#D(g;RV1iEWv zrfY!e-A+=TfEak>ZTF~b6M1@rd(Z?vfh~l5qC}tvvO3@bGSylt6Hb}k|{%Q)I8(p9ni@O{^1a@#AnZ})`i4rw|E6^{W ze_mcbeG;0xL~6Y2VYngKNagSp<>M2nZw^n6Nm+Bu?=u9->m1qpw{l*|}qEu)J@smKV>r z*u8M4V-dPrjO;K8yeEf;obonaR%dlzQqU)=8>7hvE=*s&d{(~s^*<}W`sNvd1mw9~ znMNSSN7CQ{6rQDdQr*l$%kb2DlH#A9zv8%E#uc2;!ecX0SWDd1NZ=X%xD&XFJtEiW zFP?q_PI{zAB0n{|5ohQ0k9W-&0L7FhO2_&-M@SQC`}pxX^94m-Um_IN8(g8}q0^*P zl4sA8`mMpdS|&nwVR8-ra6uye{qlB~+@Wv&#N0vo;H0=|PP7}lKVy7O%MDSB7w5<2 z@BroK5=Ci@)J^omQ{$JK0m{aTKled2pccMp6a4Vc5r|Rq-C#g-BwN2K5Hh}{0 zln#eW=&Gjjx`dU#%ONJGD7l|7*x<>Aa*S6G@LilRmK^LbGmRSE)yC8C30wam1L}I%W+MRSzgk>IX-Hmg_;=y!dYryY!Py(Bg_Ft z@w0mn_gsveN-i2Q=^+b^3%c*f2HeJ#Imq<1boq-907G!XBH*sTI17vnLq@9uzYB`F z>$KRJ>^=?n4op6Rkj&N>YQwba?`r9mVY;7@TGT>$JET6?&Mo-_EuLx{)%Rl~;5Lxx@Q_$cICP3W4yNSkt|u_3w0TwuUKRfZ_*;$u%}*D=tlKSng08Y_1IUKj<9k*1Ca61oOFAW7je zOi%D1I?OB7Z$3!sXR_;fJ!B#(oM}OmX7Um*n8Pwk2K~n%&24DHF*Ddf0WUbJmQ3Fl z7BOEhxk(-Wv`xRH6TgT3(l_51EZ-AO$5WRBEvYUhbwS~w{a~U>Sqh{Ut_oU(sV?%> z%_Kr3b;kOvFCF-fg+k&JHrrQFz#UmK6!}van}GO(WrFbANL=W==6_r zTBTxX3`OfI#XI1h&c)~3K1&DA4SGSn&QINzq$`aFz%IUdr_vKll)0|ZC)`o$9_TD} zfZyyKQQruE)-T?j-)ttws%C_#Zb9C!V-<>~^#>e1yklQOGk`3wO;UUCVFYtd_<9%{ zJpaG|Z!;F*547)qG!gQxKzulMAvMeKlmGLG8R0au5kJ6-LfRN@@T<-j_?1GAKIAIS zI{)9^AebQ_pOXaqV(%SgA6&UuERDX^T?!RYq{vAv}-lRaW^yoQ@EK(Eg zB+6SJ4IQK{+$4@kzj1}I;d6H+CKquZK)9C=*6z?pz#hne@aNXISEa`Mqz!z6?UGhy zan_U0?Z1CmAAMhh>E>isfIDIl65*RN%oe~`py>g7cCxUlJ3sA*3%*7S*BPz<;?}hz z0x@-F96+#&x(5q51$)Bm7p~5ImpWKK;~FWQImWDKaSM`1P(NLh9P642WkS~(BH`vs z2PMnC=rZFv==xV(=;Z&*1Ijpitt)Ahgk9dR0Ym-{-TO}$sTEF#n#N*2gVLsOua$as zfreDxfiY3a{8ZVrJ_eS))U`qZ*Y&Ed**voM;F{_MpZV<=`Hnoyd*VRe!#mVr|ME{> z$jDvZUD<5kc1 z2x!!LJh2#haUC4TCthp$1v>(}2wO+wbku|4(WB}L+~obMi(As#m@9M!cOl%=FiLcX zr?_w2maVNW?lPZ?RlCEb<#v;2|5X0+!;e_j5gLtnVhvwFk$tkUh(#FpEQD|uhAzrQ z7vXzCzwdGU)4`j`=GP_{prx9JmH-Z(%|Xj33%D^I-~)ca5m1BPQJF5}S_G1x2dI9>J=VUD1!xU)G&NxE&;~DNg2+;_~ zZPIo9>gzAc7o-fjBpuNWO2pivhTgS8_(VA*3ICrz?UXL`rF-)<0`3Y?ut-c;!HFf9 zW$wv$Sqr?s!+^DDA-uzWU8C z%k;;O^!>cdEv=NV|K`^S9O>g> zd4I}jb-=Z;!tRlAM~uq~F#rQ{c{-i$H>GEG3)lrY;~md&)o@R9Daa{BisjZX{9ZD=L z1Pj3^{F}i4T|x1P!S1qMc!_&Ztlyqk#6Jm8Ayp*H%-zD4IVf0a0g2Uu$zP)g6(DFq zA&1GQGrwCRYCkevuWZvYxLTHPnT+ln{CwP{izD}KN2VNKS%2Fmkjvy+AfN|>lyND* zd)kDeLcN%*AXDb;6#=L|$p5Rr#w10tP&my(Esp};FgNO#75K-?FdEn!liGAbYNou! zaKhH;<46$HbxL8z#oXhKwU(*rR;EZblvR0A*c&k#-*x>;x)2yllqz5d6ij>pkm>6R zWm=C+G|I_j0~&pg@NfUYg~4;Ih%iIoSMHF(IzA70VUGS4X5JBgm8mMAyTFLwT9+xH z`t-x3=s*D)WC+Sv;->b~pgk8fZgIix`Hb6)N)Z=)f$`^lz;)3Qc42a1tiLTeWU=9f z1CY8^f^xbKxAOdfX6&~S?Ui<_V5Oi11l+6iXb_vR2}m^f)>1EcG2IKzrVZ&wI#(eb zGWKpUJ7&Qt_Z0FJ(4}jck6Y}WY>i;;2t z=@AXfC-r4k>DT%{25+4}cq6GX7C96Q7Br;$DCXe_)IW{|&SR*nq%I=Y1^qph-m#X$Rqe6eYl`((1gH9<$>CpPVLWxWOqtu2CYu zXzBY~EUVDMfOSGcg7qlqDj-FPcf6UG)}$d#>%BWpp<@S^dOXZQ1BTK;Q(Tdqv;2L$zhytFbt*u^ecS61z%w`;S=FsBNYmO^GTV( z!0T}Y;Clie?{NLOZQyUu`zF7?!m8FJ@1cR+Lg}v6?7XgQ+;`l9OWJB;1s(23ZaF)> zE{EI54bbfPMys5WCg|d_MS30bSH9vzCKjcy9&xC@R#4>I_+)+_*~{BvR^zdw;Q;mVCjDG?3tKAaA%z%=eB}(lYrtLGR-B1 zyT$e|sfxHtEV5G?UrvcVFs@_*KlM-K<7}1H^7R((A#ju*>pad>QyU1o7zp5XuDM3G zbS=yGv1Moyfw)O(roN}|^%xK4%!`c|-~mM+VfW*QLr%*g#S}Pd-jD~AdfV*Ebho$5 zaB>$6B43WpM3z-%RDoSsU1fBJWuIKe+uJ_@3#l(>j-U;MaNLh3Urm&wkL*$qPeC#v zEbO!J@mZ*mg9GsAA0uwbUCG_v>#~2vjAW`e8q4tNS(FErROCL!ZDZkiIlaRrt@$Kw z_8$+3BOs9Y1|0QpYwI=+ktKFm7w+->N5QyXD!^5yLGmr(a#9Tnzp0{VdwlVk3x0d=m-C8Mj#g9|;sxG(bz;qy%5OY7DxUYN{?QPj`7 zOk|m21b(WFfWo`B2c{zA%Iw7r-RC#z6APp}3zl_ZRv;Uvkc8483xKdI%W3%)7F|2s z>rt5I7s7#V7!?P?<)uF>Yh0{?;wOrfn5UQvA?V~C7Jgv%RJU;^{TSQaZl}e9Y9$m_ zKvme(5*k4X*xmK9J6dn5P|R;Nf(IC=lDe$hg$7uyDu4Xndwh>l^T6cAw{?4^G;LKn zp?&aaU5=WtQz5^34xB1I{}@7h*n{2V>x_4>Ohq6%G`f zSg-k{Uh1T0sb7WCZiSAxFCJV#y=mY4%TJ1lgU|!e+Fs@fpO*Fh=%Na)N;@O^h&9l< zs7zcqX5vK}se+SZZ@adc3Q}j_ay~xX<7JD~cT}k8Yy9nbJ}NV2x+}+y+D1Z|Fy>fd z+NV{@@-#|~BO>+<^yQo5N*fVWtspR?$WlUxoloW@OyHyRLtmz55PYqJy8Vk%P8$^` z;)a?tr~W#ojn)y0;RatZ6|^lMi?h< z!{02(V=VMzEWBgzH}R)XpWO%+PIm!DSNaRQ4+I{}$`i8V<^A+8Go|*Gx&R`0qQ>A{ zN~sEyZ9LE`p9$iHE)=rGr#U)Fk>Oi`(|~RJ)y2Qo{@YK-e1af8q*u^IyTEFH8d$=H ztPu=|XwSm$PF)O1;(>mO%Zew3ab2XaNI!Jcz}jnsdR>cxV*)}AcGbXA`3Cqz0B#T* zXj(HylW~;iHShx`#tq^58sFfW9mt&y_$FiFZnN{Y&Ohi8_`T>#@Gh`%KXv^h!%8yp z*iYzPpY+V@%md%4!oSv7>#GU@HedXn?-6hnMm+9xfK^)9_>Z@kMh(ocN1Kz zZLh<8Y?7Ho0bD~Cx2oXEoD_E5iZm!BLlAw-?hIvZPlZM$KD-Y;=&T>5WYd@@F6tbn zFzsY*cTUreH0z;4?%oXa-RHBNP+NOsUpDfgR_>DBLVs2zf&UX>NcIa7&)X zDU_N?U8_(Qm~rKnU4y>XbXDCH}2m2Vv$b1d*{kD2~d{`r$H!n;0@;Q#ff6ONBH zUnz9kh>>jLg7qsD>@I!OZ7z8%vD9C65rSRo zIFfk^cQUNF9FNTo=Ntw#EYu{kEL-HhoMK1fj$q##Ea{#&HpCzQ!@KV}7-6Yg5MceD zyrlcRQ%?8dK!fR3>R~=QPw3MC7(onz`a0vrSKL=RCTr`y6gI<86-y<``kz*+1^@F*nD^b-Ki% z8{~-PojU~c_Gj`Sl*aycdHtRoKk$mJ$G<4Q`J0zz38i}f>M!Mj*o^^vM+4k|z~un} z_g}4S#prxUYzC?SA@u=DP^;{c@3X;agAL}}WM{K1!1_)(bfU9-fdY7dK>7;DL&WF zzsX;p0q`BSOtmm4-J1dk97tCTxDRc>EGXkUx(C6hvlzQISr;%qD6flz!kaKg7?Uzw z_;{x+EKK$rEC@1j6&qnruovH5=*=+U{U$;|7I-T1zSdNkvF7X=sOQ=R{PaVjl~`Tu z+$|XYk1SRm2p}T)j{*uTz{9TyB=*TZ^H4sndxRCy8h^lO+pyx`jYZaTqnq}fQrX3z zy!D4!I~WQ^wom_KWn{A;T?Ys93}N>&z{s5m7hjc)xP<_lILg>jm4bbIPZ@jf1)(Y8 zaxbnKKiNIXf)-mBY83Lc7ndK(2xll0AR;IOD{L|E3y@l~3i;oPJPBdiHzz z!V`rj6%H5fdxa27^NRh+qR(&Z^F5PQn=M5_MVj~WT`wNjcL7*>O?%?qc@*eUjQ>5* z_2}~fm(seBrXI#N@hn)cv`AL}YR5|?SgPVWzrnV@M>%S1cZJP4qMfex-GDW%euxsh zK^=^V8BG#%V71bba{?MRx28pe$ejWOKIej&KvXzX$}&@gIpYhysYACP6#%iQ@RzwF zEr=q@xpPAWjH^Nl5b|3=+_uC^_!G|+oNVEN*aEL(U>WCXXb?EV=W<^o2rvi_uc_>h zg9vF)TASoVDlL7+4z+o23cu!QeU51t5i35OS}0rmxdSi;k7_975oFfHJz$)LMHh3{abd2&DbM-+E>vgeYUisQ`WKeRHWJlW5j%~{FHWB zAN;NFH$(=#KqFXRTg)8^v5qrfjxOlQv2hL>=&5^}V`vfzcMF{V_E|q)-<2_SNV6Cw z2ptXC&FEmE9O8p~%Tb=K78!{UQlw+~al?~nuz-8iapDUYA5Z-PCG)E z!g|;a4-&^3AeeWw-ys-KrNDH8!l|_{Cx)>$bP&{ZEy!U9(BU+G%@Y&zv3%PrgA9|6 zPa2?Zk9dy3LYzYz@;;S-&;`6wdTr|83a=h1%oerp(p)a^DNl7CQf;L}3XEKLY9TNK zed!UP;NRo8+#6jpuyTf$5R48v3f6$#D|mVbOYrpEd3m~p+YUm%_V)|?o@d55K?~pi zVUuHJDPz1!7r|fnCRzwIh)$CvZlA%&{KeAqG7r6eM+(Y5c_1&l(l%LTILS#Dsm^eb z<73$&Ar4@k42X*yPiwy&qvPwh%#XT@)ueCE_eWb>_>2=vf$%v?#-cLZE3e95%1<{x zuoFb^FxhFQ!1>GsbAY3Z+gOH;Qz9WPB=>-;j955gIfsr_%yd^e$11#rU-%@!p36%l zi(ezo!8y+uHQ)=*gX_VkxRdOWma~aF)9lzi_XLs0KI zJDn(Wzxw7sFJFHCvW$P^q%oBIsYT+7RC2D)%H0KSK+K!=!Uk(?vm7yQU6i>+G9MAy z3U6$doe$gPmg9A2W|pz0FOwDO9VY;)1jElKNK`O(SS~#k5KH}s6OK#9D6`bYZ49@q zi3UFOD2Z1H{oUbCnSa83^B7`IT01!Em;LUm^1HWK*5MBWl%wnZ4;&)UDK9wz4#F$1 zb~xD$g|kVz(NB!U^UZbQ5y+4<_71locA2h;V<5%tEISGP3)KU%t)XO2;6`-cJSxwh zu`4roQQp4ZE&uMj|E&!Esa3vs`itNh=7#GL=~tU5HXJEDj}m{(uB(O&MiE{IO~Sic ztko;47-I(q<;}rIcB|IOmPG6Y$$VBgvie~ElsKzv6r(wICgedooG{2P41v#E#4vPb z2h{x^?EoZ`1KW(A2IE$y%=Q+ovBTn_HMlO)Omu{jx+MtHrfbZ7fKZ@8z=B|tIkALE8m`!V%7xATF$#o4}g|2WHLsLm8p> zkqk`6Cl2_eJsFq%@h|QcJWqLk2QHq+cbA)4EG^5+3VXV$)J2GT6)NagT45bkNKz5= zgcKDIlbfrxOGpagTKUEhaI@(2=|AnXSV;L`+?><`%|gL~pkOyZI~P5smyD3hc*>9R zo`Ma1jr))bI=>al#JgjIF^YE?6WY@n;({fERQVAG7ZZgldXO&M!#H9g6<-3xXH%_* z{#loa&%GbZ?E_A$a7*tZL@PuppxC&0wkrz3=@Pi(^WhSHJ5A{nw0|mjzLIu<%Vo*F zxNBAEtJbLkrRCeg3guq0jHD;vN?h>JM>gZQE8yFvilA^=uY%*q4MUiz_ddn|?n76jL)C}IJq;?l3xqR4FJ9mgxQ$ERsN zg;O{jyG*ByedPhxpi9>15*9PN)bWweFhD?)3@uC+1L;(zgfMF*ZA9U;(_Ah>x<2}A z5@F0hD=miWrUgU@TSB1dV5!V;!T9Ss762c(9DC=Dr@djzyx90}-<0x@7oed9UF$#A z#BqJ!X~$s+u5C^J!KcczMvc+sPN4XT^_`oeAlZBFnOaWYeCN3Mkq5<^2A-r9v(4~@ zcfbL>#Ua-%N2|L+`#6iIN017GwBE-`mdqAFQiVWXpiXJke(FME#vqj(iet=y1`&YW zO|G#i)DOP(+a1kGeP$ad;|e_PW||G;o<2b|w99wucFf1IRNi4F^qzlOWd+!AG|P>< zrOxd;#=~6r4TE>Ht3V1zc-J&*o7?3bw#_F8(k8+VOvPQG77Kb4fq8VzVTsfVu1|Af z9*BpNu06(AO1r^&d2w8ix1XZF3dqukmYyh;;8xlWZBhRX{)7-h(JiNu>y4InU0n$DkvuZ_~})7I={}+Lq7OH7fUP|lqmCL;c@wF>rv*h zb56OZH9^DU%VoX0Rz8pw<_ux&mVUL`TUcHfVIPG>>b_ zLHYj8tMX>&EP?T!mNhX^&u6$}*P{>2TPU^99zVfvoHGp&?pw^Y*7)=Cc;z#chLiHe zqrLK*uYO&ou*yqsXT)3m&E_Kv4Rg5Y+?4Iz-Lijx#T(&X{?b7qZOoh#bh=#Dwm#>8 z6W2OaryfWAzJ66sK5dt0k2csjVAlYeoF%!zkh2k51mn(<^3>AT(9b-5^XOhZG;q)_ zr@Q+cP5c2`oG2Td_%_YE#us!^px>c{T|obxIo5wvqAp{8-8y6!rd=*O>;Q9~!4tCL z+$1)LV|{UTx>O#xYvYQ8>vb1bq8-v_qQLHbdgE*q!V?f-{zdV?@M zi`r>7)3K+$^o=t_p!VuL(uloH2ja9BDAvQ`#H{v5EQs8%mfR|s{Vd!eSXU+|EHZd|A!PgmAO2Diasia->3d40 zm5I8D(;uzvR8JX&7nmUvQtO}ZS*QKi&o-4&f71mT>#U2qOqW;cJqm~weP5ec_}u~) zZwf21y3mM*e|;D|*C5tG&@WC^Ce-rnmpBktu`+-MVGw^AD;BH>vwYe=`;to-1dwvZ zV6=Ib9Pv@+sE>a#f0YKU$u2a>qC~$V(7L0`2&r6Dqu0u9y9#Q-nfPt@xH=m z6}W+rp>Xa5Kfs`ZXy9-)HdNZQKe6w}klr(X3d#V^fAjh_so+veaP;B3%cbzvQ|Z+iK5VE{!FmoH{j_8o{qBxFT)xG-i#%_^IWD6DW z7CQg~vfdmWl2-8s>oiE2CIEC3CGmh*j0<)KrkEE~%@sm$=gTMDbnbLvz5mEuK`7qk zFsmPS%ahOM%S!~v`)QR9lus1Q>&r$tMA1^gJZHXjQC4TbwQ(P6Kd$dx^88=)_R0)G z>H^B*6qe|z`}J~scSyMjEVLM&4C$fsx8~8;Scj$g09PGdSe{Nla_BI!64ycu!hQFU zwUC_*1jA;RL%Eo5{qwPMXo||qy>ba2=9US%#ZO&cofrXV-ZN{~Hy#l$Li$JMWd}UC zv)J!oeV#y2XD)kuv`lJc17pMlzVif~E+NDYeTTqXU65`$K6qh#pSX#BQpMk6mDgwe zl6Zri3ltWz8#P)BP`v!pU4xx+Lr^~4D|1+bi3f=3a(9TO`@TbZRp98NIW7>;{Cxfy zf$1~l`1Doz!>6}oP(r9D?&JN@8#32$-T}K{U5va}#9i#L3$RZtfCkU!Lln0=(-N~Y zOlBlwNs~nmUa_>iUbdE3!kdTS!QEEnN%5!)sB>a|@$)kA{wRUI({t+u6heBJq`~i$ z?f2Vc^1=0qftcY;g9-TW-TjAhK6!=#d5YKt;$q1pbyeA#z%NFsYTbGaR?meP^j=a@9Eza$}SUv zH4KL~a6sZ&;4=eJn{y>r%ijKOc#B!lJpAp5wm;xfWxgW~J6^{GzPFd|FgVQ+YsJ|U z@T3+V5a&l-ev{sG5#EPltU-U7F?zCu!Z=vOwQZ#gXV=TylRu9X0DN1~n3q86AvttU zNC)TaJw`KYU!y6*{EWPY|9~hv2$5&?R;WjaqC-w@R_H1MkR8gwI^ax>cy0DEy}#+jE9-ISKu4 zz5IgtdABMPF;G)KP*X<*T4t?<8(AjI*~KN6n+Tq;P3oz;3RNMpeit+sD$i(-@B~aQ zthzc_x0j2SW$lNS7@4jMt$5S&JF;r}hPXo{j`AhrQ}`MnSbDs$^#L-^DTzQY4yquB zWzu!UXH*ac3cSS1PZ zVnN|2^uu$MJhW&jJ5rWPz8+A2?)vpYsKO*0nbiMa3|DS-Kc;*@GgwR zQsTn});7WVL)+9tpKMbaQ`q-aPahRV>)`*vMaD!x_%Co$&!~UGro~O^qmI7w zrS&M}c`1wfo^Uz`oHr3jbH{pWd!c_p;2+STjDh_JoYn4m-&#g}c@WNlT^9}esX!+7 zJq^*z_o6sHyy+rom+aIXiw#s7CTyma7CUz;fX5^kkiIWHoCyR+QGaMRJ3RDTNNmA= zIlpYjD|3+!GhFF-ZCmE4b-2P~9D$Z7i)gH{uBhPP&m9x#PywOx29QVPy12|>t=H1) z&Se!WITVeu&V6-9U9jwz!v9D?k{_vH%yYQV6aufo|FbL}3b*sP5h&z4K&w2D(gdf@NR}wZf0Dq{2`q0dTp;3;&+{R|hzpUswekh|wl?lzTX#Il0iSC|k460kLA=rx zxMLhA!NWa&Lug0*deDz_(Lx~4QQ3kdl-vVKkpX?8Y_M}9u2ck?2oN1IGTBcWr!NSQ zjI-axk$gs%B$XL)6V{JXAfZjq5p+ol8eUXVR5evxEKh9_%B2&n+RnW`*^|gqNE$^_ zYvZDmv7;U5yXEC?)}f%Cv4K{c$?~R_V@*3hj|bnhc#k8bWk;7W7$>;bic|t@vj^Tf zD2f_*+@;XSr4ro2^(^r(bmoSgH0PNInZ?*ZyZLh(nPcUTtX>B-oG zLaWe$DiGWwwboB@$6!e#@^aUu@Ks=Nt#cgZj!`Q8EdnUX%jrXSkvI7p)9pl;6X_Y= zdr$nK3PF6FwG3lby(S&yBxNU{p-)%GWxIz#5@p~TA#@L`dxzjvt)M3evv_cnxxq~N zX?L5{h>h}WX`@{BXR*4Ma>5whp$HA{R=^W+0&`egRXi{73m>q9HvzZC^+GQQU6XD; zal$KERW#V069jxkf{m|UOw)Hpjq;uu@Wy z4vD2X^(0>oeR5Y#_s9wAI77(2C5L0TO9mTwrG^*XVLj~X(Gz172w3GXD-|#WyIC@E zb?`AK3Ko39PZ)QF)Nx#2F39(P557D+@*aL54cvB--8kc5b!9c{;tay}MS~1J93JH< z(W`gxAC&lM{NfvC(YZj-q|O&B=jGaULY{?uHsHH+*Z7xf(gMe|mUi3hW_9o8BPX~v zFkMOYd0)=3GLJKEGaL>!g^SJ>w2YQqE)K~!$r#SgJ}+OfE_-6{A#O$nX&eqN%M;vn zZu?{q;=r;VzisalzVJP?1c&3)+m*6RyH^O~v+&sklzFq3IjB;IH(ambr0oJcb($ z3aaBjiI4p?ionjn8-#SvbnuLZkML{GmUyabp$gXl>0O_eN#Y{j9PW|L>cJ>5&s?}? z_YT|AEN(ZtC0}sHN}p4)Z=l=rE7m9ZfvksTz1u~;u(pBIZtFNZr6vXVaQPiPuFF{l z9I6LKdhBK>`pFmFP5t=svV8s$9J%uW&;7;v-x3sj48Cx^#be`scz}YZp8kH?6{NjfP<7=VydWxG{T@46L0VRQE5UV9+;#sWyjs|GjB`qc{ z&iclh7E+-==I3G~u&@Cv@(76%mcL;>@?`}$B?#6N+U4M&!a7ftSf)KNL1S3SO%YAk#G1g!v!>=Ch&iP(VWX zVPeI-f!|uz;_pivfrWYm7AP}mTwLO7F29q}`IPU;3Io!5FcR;xPC<~g%bUXcPygof zzc1y%7eKt^z6%AoWRPrGtoT2fqVavsw6rw0Ww9D4%zls zse}?TbYMaIZU1FhnjQfWd}ma!UO-PwB&9mjY6XwJ215|$ zN&E>`2#;!GB9_sZDj|L|RFW+_+oZdub&-Z9*^`}}$IpWIq?Z{?{4FGT2pNAMV+rXjxoEKWKfB1P<~D|m9F~e?Y37G(oVI1e zGBhO^E4gn>A|)C4Vc-^$5baoZ=$|oGVrr99ZwQ`@9>2%JBW(`%qHr;vlVQv5(ZzM5?^QQC*mCNRobk zha`wSx5+WIckl$aM%v(5ZLf>tx)vfhGVQWRQl|@+^T%xsrDL_^pS50ToKD7<4+4nh zFJ2idAH

    j(OiPYKuqV=U#()%}yW=O^-CB-43*?(H*WpU*T(RwMm04;LhzHCS2=M z3x(n`XVr!~jsf*fkXD2jvzV!2aGNow58z!t9OqIGZQ=@8B(*!{jzNEJW5^#o<~d_Z zy9S-P49W8xhiC`%QBsw!g${v*8Z!4QHHi>tNQt`7R+tO0caTtry;a55tU=*r=3ga zaH2fM1b%u$9W-~}St=`=PcYl-dkelf!D$?+s1Rj&%hAQ`AwcXH%5hWj2x|F?M@|33 z_kTj_dReA{MYDbrIJeORPHxGuNeUm4*&=bj89mgp_we($*uO?HQ|K7vbgicx4&Z#= z+dIPQO0xUYD@@6avc(Bq^W@HyPp+(!*OJxkj&v@4PKQ#;cWu(ohFJ_S)erI>KG0+f zWfAFkh&FXQxZor(BwuLtl&I}0%{D2W)Hcpe^~-~QFqfy28cA7$`NG`l5zKN6eJdwjpphJ3{}3V0!)In` zcmD`01RCWKGw}IAvs`Rkkx!Lv1Ga!pm^)hp0bH?OO#zc@L~rbv(dGCFf;QeB?x4XC z|@Vi;u#bvbI5p=0|cMW&ANaQr6swUqE<~;mKa) zIGQ$m?mILOK4q>ha!B9YP_WWOM4ji+SD$O0^BtTu++t6pJdXRl!gOBb>qukinOO!d?CX z82zaWhUOigi2|z3b78H23Qoeu#05V|hEBbv^8x9=u0MgM3>HxqyD(^LP&-2qk;w@2 z7}^j$&3(tgOQ%)Qk>^?HDMQ~(S&=AcfYB`Hb3t{ylGcg#yysVSs$ zzuh$;LP6k;v^3ISQ}vbKj+ga~6bbz~6!XN0bmShOICs)#p#=c_BSyHedGVc#h7`)h zmkVMaVZ~_Mm^>-hsz?pYd7pcZufm1&Vtc-yQ|+u+dPo`IuycI3lkuO)xHwS!t`B%Z zU-&0httZ5yLO?LMgSy2P||T&tNjOtY|%*XloQ|5v!tMDA#}?R zKP|^czK;Z5-!YG;t2Dh#`!f30!#JyXyC8!UfQ!h=bjbXXaB^HbQo=xlUkykisR6s`1&g*ZKEC#AYN*V&Q;TosL=Bk z_oWpr3RQ4yL+AQEuHa;A(;`Dy6!bh*NxunS)+bWjxtMACSK!z0LnTNdO@a0rla^^? zPB7P>bEK|p5wS>X5+nh%3T$vlajSBUJB|{7)mguDSzoPep;|4^q;0o%Jq72IBP*{l zFINS`wi#C}FoDgv2W-wi>y+nC;`1|$uYytWI0{5yHKm9_D9&pg<_DPWjM#M?J48uF zm=`|gketoAEcz89TR5!uP!>Qek*ZTf;j$QNV+8J@eBOg3?1eO7*Frya+@xvYwycx^ zq&%;Ey`_KSoi2EwEqHa(^DtHh90XV3K-?v$BJ{_+W*g&p_akEfuUx}y$k=P5HxJ+~ z(TxxZ3$um-gRblYa8g#~scuK0K(S_u563})&GDB13x}7u>>!9c2IjP`(f-xaqP;Cf_T#0F%o)iB~Vg^Ep>9`_@dV}_+O_%Y1mxQygVn{K6aag;VFy-yItwCSBh zq3D94INuD8FHElqs)wR{ zNGhru@Z27r;fwOVT-@)KtK%0aTCBOV`}iOpGP5};3?=x(1pzZd6yNTfa)TeAYfJ;5 zss=|ELoGS6Y#g|I)OW)n3S_Tmd(b16#^GVeIg^YLFpd*#zxnKovNlfu3Rs;dl^%=| zA^8wtm$?OB9U$*aF}LSdR?5X&jvha=e&~&pvu@#sXCK~{_sB)(9GiQH|Ix~j#QCIN znkQ`Ph%r8+oms|2KOYs)o}zq@Xn$yBp^Z62<9Y?O) z;$*e`o3{iOyd#AW{!wfPEG=;g8nThc1$*rFUr-*enG=hoDRM5~q3A3jH17~~TsW*i zf51l^3*2EVXrCzX58EeYZXMZ$h;+;DUH-s!1HnfK^3(Wv?s6*I+Uf$wCA00z`raiS z(E|N_^s+@hQ0C=emGo7!334&8C`xXhsAvTE;SgUiXmCJO@9yC?tz5C5-jZu} z#Vxybzx?Cxz#%?t(^xf6*PG=x-~4kFP1e9;6dQchZW_NMxZw?RsmjJI(C`x@xf}Q! z?K8hoWH5v&hv!o8f(=rRg&n*qEcWP}j2$XwAzWmip`Wvd_lQQwL}HS-ukML3Va7~e z1%gD*flw_3UUh+YHnYe;04^*tbR{18}U+WsrfeDU9e$Xe;(fB}@JsT#IjhXo1yY zm_-Xfh9)e2z(WLa%1cyPTv;4yIohFKe$;WG&753A*&4A7(Bc7d;1=!IHu(^e2*hV{6FA@S!f zg6~+rv^N?Dg*5RRx|MdQ2ibx$j3MoN>5nEQr!(Bk{yXiu=vO+GR)L4V&=!k(XaQL3 z)|P6FD5H$^(LRrIjbP?3&uphQV;P@GgSK5QuhN1gv>3YmQx#?^u6;wG8-U2##%KYq za8vng5xkA_U1xjJt3~b;wXQbVj&#lk zSCmvJR4|jTIi9{}eIA73*eoxR@EJ2mf)jXO3%&kDQ!sM389ny(o(1QS9gSnohrhv+ zb2W7x5LL;D6E;NLUA)L&x$8E02wmSy&Lq8*Zq&(FtPXF>1+EAErVQCNv7JS=Z z_~^rrjA?exAiit;0Ung8+fHtBb1;$EQ;D)-Bat+$QA*DyJ%c((F z!ZqM1=t*JOsS%G%Qt2r^nS9D_t>5|M_OB7g&L{b}%2o13vPPKyk_JOF&IQLx+*^<1 z8yKnIPp#~e)AaX(T$gTFF@{YHD!dXyK=>YI!)KuZHxX1QrNLWxSl!KXeUT;=l(E>u zvqs9WelacnO#WZzFycCk^9`pAIS0KbPHa#9O-=Ey>LDPV?!QHj0TwNFjH$sM%^?EP z1^Fnc4-jaFKl&Pj063*_@F1T6lRP(y+4Lde39qa0u?F<#I+rBh<968rm@s)X# zfpi?DZ3*8vdnlc#i;kMm@9ms^t&> zzEffFr9o;Og~ULN8R)nVt!uGP3Cc-9?f@FSO_%9YCSUpx96l(E8YQfgZsnPMbZ$ES zbdM+XpX;F!!FM=k_YkssDg=z5UA|^M^c(Dufk1a$+MFn6{>SW`;``A=V7tc0rbWKS zu^E!zFF8+%2FCgSN`(*H&fvU%Oy0vY4!M{_Qf^J+lftQ6?S+#v?ltu=MIV|d5JyCLdd#rv+^rGKM4m2g63NT}7yxq# zU+9rDy-ymWlS5K`;k>G1|C>^(FTlbo zQM&pam2z;rjla$sg54bJJh&j;&4N+jC`@DQR-Bh3G6*o%3!7yH1zQ81K6BE+J-NaS z>XT<7=fV?FlrDcHKj!Q5eD-h3=R_Bug3B&`J$LY`^TBR;vrljX5wkDWA0xZ*(imD3 zK8$xs|8t&5=R*{ZTXLV8ceBg7+PxYQb=u*$TJq5Ts(~W;5?@HtRzdgI%!TvAQ+%n8 zu>9|p)%9Nxys%luXAFJ0e8mSsomk&hcPEP)9nP-Qic?+M1 z|IBQ$??l?AGlER|>~gyKUN13VEZ z(k1;;4`tdZeia!D-#>8q-S0k@cgs7a`S;&``z>??a~PCUjwm}rTe-$D&PWxv6C=35 zm}JZ{8LbAEbt~P8oJC&Y5vHM_QiUkp928t&R4yPqw+%1?Tfhh39a z_e@iSq);frx8MbD`23P!f$OKX{n9pFR|Fai#M0%P#bb)70B)LWsC!MNfoV*kKLb~6<8Xw!c6>$XiU ztr@;hO!K=@@ z{AwZ2qG9_~`y+npLXzPPRJCh5-ZxmdtJSv(jw%8?N~IOX^iMqSPTir&I=Z>>j zj>LZ&uJrDB3$(cPdv&j<&ocCB$9hC|oPmQ6d4twwQUPIkj1@&8Rl4i-SsX=Cp$|f6 z?oV;9K;swbP&(0#BFZ0qPCI-?QGnic@o2KhK%`ap1`eTgyi{7Rn788HHgz(oxO0~g ze#Ld8*W3jdXCa&Cbe7o}a`Iu>ZQ|U%yzCkGlkg!U=&#E}q zN)W7ByvIr5f*=EH%qw`{yW?=p!fUeYE3C7i{Np^;S$P1ZLTOgK_9KBaKvEg~me#8B zMNQNfi#@qAjrKJakiwd{jSy;UJQG6=c$5y@HFS5zmo($N_Q>GjxIzIh!mTd_PF>}a zdsKl4VcpJ2ep zm5y zWE_Nv%JjZ8A-%e5sj;i>ra|5C1fj^B0)HNjs=Jwv{nCKD>C&Km%cMkM!=v3*up*Fy=Z;ZuPI0q4 zM=743n2Xi5fno$0IGG=n?e+oI`W@;8|D;K|M9DRx-pJE3cz~j@i9zI;^ie1m>=KZMqK%ca#z+}8gwnIS-7ZH&!D5mppx}~7-52aUOu^>}h`8D;Z##b|hq(U@ zH)asF2T;UtlOom5@HqHraInJhpUSSgD7?D^q}XY_B?Z!cIqkkKd$%u9#K0HU{>B(P zoVX!%al5)e+56dJh4%TfaqtosEOs0*6xGySL*?iI_n=)|eh}7??-N|W-JGU2csEXr zhckd6Cit9&uKiQox=;=d+dDZX+Eh|3pC|+i)Kz!M&Ph$wf!EEAJm*OA<9lFuA7)$_oD4jSl!ZV+KUO>HhSh}O%5)g*x=?XF3`oGi zm`?LlLYrD*TmX0uM&u$G!IN?(?)D-7+`?6mj}JF3$irl|MyJ%&3@(^3Z2CV8b5-%t zDcyLEl~%66wJ^mb}B_B+wCn zz(;mv#IY!}url5GM_gy4ita;Xc0HfpNKQ6PN{z%UjkdJ3>Qck+4bHitQ zaPpX4ivVTcxIV0voXU)NGzC+v`2J2KHte`K)|Tp3KwGu`3M<@>8&yfrYAoH_^#=W9 zFl(dsf{Rha86R=$&X7g`ca}oo(l(Fif`j2H0znhLqo7CgsdH3(vryUxk3rQCVg6fT zke2Pu)YK>o3v@}XkY>ivIX$km#NW_ZtfmN2v*dZ3C!d_Y<=03Wy1^-o>U;0&lynP6 z`mK_xLfY5B;W6g&F@tB|0u`sgNVmXgMhKyWy6Eo=ipDsKiV>ckXe1m8vtwG5!@HO- z5%f?%r3LC!DG>cy!KCHPHSX9SE#6xDtI%t29Ruowh6O}LAv;_sH_lz@D8WvQ9wn*4 zXB-z69iH=3OQFo?4xvR_RVXu+)eUYuX#u#THF0gMfGH~!w3=Abhq2acEz=6sxnws^bMuX{#dbskfPKDSAq z1Go*3d}6V@HW@kN3cB+AHS0}|6lVOSX;Th`pMxLg{S^xynpdSCVDZ`j?pg|R(v)-{ zo*C>Yh{Krt6oPf5x$=O3Lz2vsv$YBTmvO34s*pJ!Mi|rJn+jZ?tgrGqV1+(a5>$K~ zS5bZeZcMyh(;cxcF&3^rqwEq%A1bKg$w*|USPWCpyaz&94q`*-#huJa9bDRuv>-Nc zMM0r8YE*>^iJ9jeW_GElH+10m^HN}Njd0#(SBZ%(eB$i}rPt-mHLgL<&0C@sZBF+_ zM^0K%@l@d+$c|C%yAxU3^gdvw4)ImpXQ_aD$=cwjR%W6mIgb=q6C&S@pd2EH0tk897RB$pvxIb_X0{8@J>&UaojF0Y0&nq zsi5#==_Gp~c zL^$1icZ3C)wQ_(wqlX9PP=zL!#&H?iMo{dQ1$^4KPQNGp&_Y?9<=9~Qd1ky9!g`Zj zp~@4-X9&qet$+Ufv(m)v<%d6gQ+77ql(&EWPvx^;{9|ZsV2ISd zfjf%%OAk=)W}9=kY2u=z^2)Bu+duyqvKcI&JbT26S6?H0F`gP9hET{EyIXb(y5zge z0afG`eSbi@D2{vn)hEA<5`J;~vK*B^l-;}Yvaz>|;7oKlmiRl|c?Je|zz31YBPdB@ zoA}VAoN8v$TBh%aPP-sGzz3*TDuKf0Q{}K5` zS4spy;x0x>Vtb!c&vv#>%Mmj82yRdJ)}EDT4_?r>5e@>_N(As2hc?_ybU4Bq8W`7o zZ=Q&D4tQuBlnKWFW~WhRPw$n^(L_0)WS4r0C~1VvnHD?&x2tC}4~e5-9pD#}6h;r% znBugWHlfXXgr0lE9CI2*&$cRDLQTObBXd*3@l%H6UHK`nxpLY8%@!*23RV1Zh#NsA!`iy@H*ud&5c;k8!`*TlAkPKSkH~09h-|sb+ z=mt#qMxRCn5;-rpg7>6cq$p>nFRnSlz5YcIMzS~*DCz@8RdsM6wq8uDjfnV-}N7~pSl)k`IFv2 za}+m^W{!jhfbJF~@)=lT6$c)15NjRx?VI|UODcuAqheAO*}mHlMS>Gyr<(eV_z?i> z;8R>kf3-&FIka4#Q@Hmz+l^(A1=zmT7?dCB#&!&_wlc@jm+%>z(PeC7=|kY?V&QTg zDLXj+bVh{o7%eb{1`m(We#Vf_fv#GH4{>1K3hvURxYr#-0PD^ZXyDJf_<&A;g|Dbc zp-}|~Am^H8BvyrY`2|8f_|cN>BGiQUC^Xwvte>Id*wyhePW{N^3|9D65uyw zj;S*A<24!iC<2rM<{0y14#9t%v@s@Q)(6~!M_QbwqI*vp;mllq;>2LS{t@Q#0u_Yd zpZ@sikolt82>r&>jJh3FGS5EB%wn400H!xebkKI8OmzDN)Nr!nm#d_0& z>zo)`Y2+Rm|%u!F`8wGyn z*o6w~2?Fx_v-0mhte1&JPS1KYS(^6^62;B$7Nu`^2KvB5fkObsi6LkK!v!!+<2v~D zb5P9pbp&C6C=&>SQ{(udqafds{Jn!q*;Mmhd4Rw>(=?AI#|#sM+hiTvJ|dkE!t*sQ zF$3ebWnl@yo^aF)jt6dClE;y>VbaqPUX@4eer#@^mlg6nF2Wxthu~AB54yPJ@CxQc zcK@Nb(*uIIv1mV=yAS_6FT0oQhzxFX>J&Q?@IcOBLn*?m;|jr--3Q%08@R+U+QeIs zM|5P1Bo24w_vatVB(917$jMcoAgm6ebWGsN@)9exC(hl+*m8w{yt#K$wl{Y%axm_# z)3UXHR9hNBPe^i;xsuitQ&(Ar^V6NDRx#o zXrR^IAa5&&HxQ5U_6<84teMj2Eyp@P{|q`o5ucex5o+y~{7Yt}+p{RumrtBl1d z@d_;x>@LfpE^kNXOR)A{Bg2BLcWMDE$Egmuj8MeY^ zzR9HX;uFRY|8W?L+v#@9#XbTc_pF0|L7EFzt+Os5!sPQ7sNY@qtS@32@YMFAc-S80 zykZ?;My?3-B`=6c;llQJh1RT$t$>gUsi)BEu; zrHK6ivzF$%|JPr^jf_0Fv+jxuVOOZLMTIuqqkJOqA4t$**6V`TudP}Xr3>MsZG^kp zPOoJejt{!933KpPpQmMua?Oj*DY$a3i5n{9x?`0#c{YEHd&XZ}z-WBcv2n=}4uuf6 z>#d7d>JHTh2{kXM&)NwaPa!_&Gx(328BB-*DdXefH^R9;d_(|zy!WcL_r|z~9B&It ztoGo=hU{m(Rv-RI7r3DAh^?0M-3$bW}3dymi~(w zYwjw@DxxE*di=0FLuQB}fwFfBEPQ5ssZYM5VsOJ& zvRP3^Sv0i1>~8OHw)!5)#`nl@LEb)u#JJo6Pli$Wlv#wAHl-yE3j!?2_DMWsyy%&8 zR6(wZC2*GAfN2)sIZkVu!1ZVX!FG~Qx3Jw+FsN8O%VW$(nm!`rD$FOV4Q=;vmK_NC zJ&It|z{;zzKR`P}e43Y2;dmG;RD}b$@+VH2sdPZWSZlGaThff*V|llIaZGuQIOZ{Q z$M%xE&{v)sXyT&2|%&*ggWbSH`O^}y}Gqo#$6bFs?ARuKJNTsY;*^5C)8*2-Q6yqb^QaD zIv4GPbG!1oIu9}hoCcB(b-E~u_1O}Y&=H3L>`-ySJ1;d<~jBZhs#lsa!NBd+98U#*GpK5b(z|KDOi-IY6 zpCd(?Xkv!I{z2+$pw*3zBVS-mzHPHJ)WSl_F5(a;Ce4tKbDZ3yO)S!*2)!D~?r*`1mgB=)(B(uLlz`iBp5t!}GJ9ha|0_R5WlMG@U zuZ^p6a)6uFDT*S7hyU#9Ps=LD@-8l}WG)<&xAV?*7sY80HyZXQ$|%9r1D<|$f>672 zT#nYa%hnICag`)X5-vfy&=GW0mas%GE-Wx^6YMCk!+N+wW};IB*sby}yZ>+5xct6c zP7+w%{074mu7G!I9P2wppEjU_59Q$IEhmgEmd_r11^=w4xxGGLFB`ZxwV7cZ6tLaX zT`c{Z4vrn6#|CO@RSmks& z@PCEdCV}BAQ>$f#He2MSeMinugPIwlobEP+3zG^>mkd*kBD@<~gwDp`%lGEU!nA%ekY?p_zm{WMhj83e5n;YAV4Q0X8EQu%#1JX>|;jR+18ok&%i6 zDUL2SvaZ~*b8*fN#jEX?9HZPVpT2lnzJB_z!n4k=h>L(eW^ln;TYRVj@IU?rvVz^n zjERhgVRu3D@zGg)@ah7ofTh5YQi-Vc9C;_W3`pNgrC2QkeL;m8O$VLKm617{vbAYj zFa>V&q6U~rgf^Ii3{gmGz#7Y%?e(8$K8A^~_28raFWYsez&89X19j2TN~qB6s|q%h zMF=&`p!I9b&`RaK{#zKbz_2|QS7wG_S659tCQb8BfQqfx8T;c;*tVR z(_a_c-dO0mz-^{+OW$~7`CN5@?i*m<;nYy?WK_swMy^60D zMolX4>?l@#VZqV@#sE-RYF5qzpVAjF`cx2Nos6qDx#p|icVQA3aU!j8Bk%aBQqF5y zisExe!K!>#_{9}Nz*YaO9!naJt3*g!wv^okJ|i%D&u1v?$O==nWbr&W%QYY3Nx#=I zwVcmGPrz!QEYcqv&`P;VcRnfn_RW`btiEiRZ~m;e@;$dL%Ri5hOfP&sh5GJlv-W$4{B5ADf^Bw6 zY?OK;(D>aKzq82q`kH=2g}Ff!$sr6wWMU#TV)149Q+U(`%=xUKnbfY_msUjbIYNE- z6;|UD4mmj|W6#dk4%VtIj&(g{LBxWN@NcsG(^KqjZ2MNF2ZpX9B2c`gPp3zOOJU{3 zERTQ{boXHPzPkkqO;UvXO&6sJgsLgp(LZ|ufo=wdE+5jhQo2;QlpZeOIZ1WNJx>4{ z!ROe(Wye2!3>@kf)l|4cDC9wq$nwk9d940g{t5J|ZbXbNb1A7Bp{KaK(HC7lq%~p9 z7`am>?!l)o$IF-5mplTv%WlXOc#9E+z9}>+Bsz|RD2_7-1_qrC&_8*IW8f)bE@%c% zH`x8qf8X?#7pw^vq^-L^p>P3|ciWu&LVhftl!rR+g1#6b^6ms|j#^bzLKk4eE=DU_7B! zlIJvNnYBkFmTp+OKwhJiT(K)+L(Wevv1Y@g*$O+?KsEG9zqlAd+B3!cSZLVj27wF0(!F7J}pRe9rmvVHA^|P)oL5l@4C!OEUO?M0B zr_x^(KgW-%a*cU>ji2>(18X|&5;sE#l?ZHYeCWFfoGSKOy|1yJPav3&vjb-8MYs!= zcHP(9UqF~9gAV&rWMi3S2cL{5TUTZM9e(j22!6-HDo!trCtw_#6U34k1PveT;8t{r zs}L!7_)gpb^P*v99sWB>pHKW_aIq}#`rzJ7@-zmgGMPt*xg|#;Cz_OKvQk# z9p17Hz7P;cbWx*E^Ni0eLf?D%5Ew3x7$Y)161>XXB(tyzOqWFdx*(0=pMKcokfsx} zc46!w+slhbPZ1uue%r$TzD>gS*>VpBdx{j0?qcm7o|F$4HF$K0Y(32>PFkB;#V`CR z$Krm4E7BsCZv6Rimx5%VmjQMdu-IR7AOUoKjv(JT;aF^fPTl!iA#>3+mi=zp?36dS zMc!f%*nG2r97tbLdgjK-c*cC|a+LD^-Wf&!cwJtV*+9) z$ymhhB50l=lg;kQx8>XO-<4xvB9Uvk8G8-R4X`He<_2H?K@v1D&qhddIy8!F((N|9 zXNDb|@j`-84um)(XxO>OiEy00c8)dth8+%LGu|Hm0R>~XGzbDL7|G09H4Ma%*htJQoHQ7>|WpwRs(|TL=U5YnVd2+^s~JJSuG}Gj_YM|=$9+g?w}zl zFc_>R!%%3{8sVbgcbR}b(*7x^=EbC740~UhAaKo(OolHqzgV7RfRyuD{ewSlZ~H87 z1vcxCucd8MsVykzS%<+f;Wl1_vH8lQFogc17>KCD8~S(K7q@hMDcD!RNyUVc)ai3B z46_!;$-qQ;^IIz*NE71Caw-XqiwmKGqL1f3$Z@Ab z2!y9l(4=CoD_Gl33i0dy6CX&@Pj((2<^t2WX+c4p5yqJFBeP4 zyFM#Tx@c86Yn|5T^ZAdj{C1(t%XatBw{57UtB&E$;&suezYK^m@C# zfUgSNy13Q3k&^a_mh*TZ5MTaP+$nU>S{CH0?5IeJGoJ9#JYMVYe+77j0D~9J>+D$M zP`79Xs*r=;-I zokXL;{Hy_GB%MP*H_fJjk~5@+TUsE?1kInZ(-6Mfj43(19pD(z9G&0R8L-GI&J~dbwcMg;Y=jd3@O~A^|{Bh^FjnL3O+Gl8? z9j(vw&#{hh2;WdR4{bqbPD|;=wiV;)4r>eo0`0h-=u+XLP;pNGEz4gW2lXU^KqIIt zxFc*U{L=he*))@ zt%{7eRmj)xcoe~21PTQSrSb2)B!DqJTXxs%sH~ z7;X{g#HO@yp}NA7+HHa7!O8M`=|Opn3tOYf4iieB*}SL@ylV(xUXsaWYHSu?@I@@U z3!M5ji*Sc)n)JuuOmZsM9PT?qoO*YMRr!dtx|mj>0W#Y_>tib_k+^)2o%QN;7OV1H zSsKLkjQeBw{|_RF-{K2CDz2f3HnA(G>~IWCqM(cs^MGJ_0lX(?&?pPUD5-sJNlAN6 zj`+RvgK{>+cQPs+l3vuzMN`m1hs-g11leBXv@?`el$t?eAspYivdm88=Ln_*{0>Zz z1OJ?CFzgcG(yYtl1@ha$`7Js7U5``%QH6ZvBp zdGO)V)AD%rew5jfk;(Gl{sXfA;1aV8{^;w<%4~V^*(YUX^`6WKMGfoq$}(;zpAb_p zU1srN-)(##_x~k4xIy9s=#$`TU0LPbA9nYnyknk%rohL@X@aF+g7a-o!7?5|ULtYb zA}pVBBGw&!(mkq4K=Ta-i1#P#GH}ko7$=zx(r4Y7oR^)uV`vdW5X6NiJDQp+8 zUg{O z$sj6IQcEN9A0{u;iDlUZP5}Tengzotj%Mn_kZD_?K8pFx!otI0M7Afh>!&cYuDZx^ zO{T`PmSI*>2h4>9h}A6Lh0E+7Sx|t(&;Hv*)9-!d!QU?C5qyE$3Fq5BMo5C`vs|QJ z7r(k_@qnO;MEj8XW%)k}R<@x4=ckL9eaXL`FjpS5U3hv_6cfbrx+{#OO@s`#Nt4-UI3a{!) z!^E^%)R*|{wC211+i&B?+jx*y*U5*q>a@sVw z(67!#K7u#vuK*!f14m#r=)0-&K6&<}OyTP)4>=?Q+8LIlK3odJ^0bUE_1JzCWj+}Y>Z z`B}eL+}M9z1>DUTL4Y&k*(9K=csL&?2@+N5HL!Ao?cmXQe2~Z32|yqbKejDB#X^e^ zH$;2&d=SJ2o`azQ4{zw)cOQ>H890SG0vGtM!k+!8{_xVCG##IB#!uRlhJ49yZdpW4 zuRJ+NN;_PabUD)XX9NpalZB;$0Ok0EKA^t~-G#zKbuBSd5VQ>Db>GQO01J)oUfEGl zFh$4^m^w~rHh9(d*S-sjFwuRKJT143Gh;o9A~Xyf4yrh?9mm&kv0UZ>eM_J>@R*e1 znsvzepaDZ(Am6qB3ia1mRPV?d(qQ2h7VAO9phkFj7pof!XGqC4iK}M3atA5O3ctmn zxYf-@T*SQ**yRaTaJMe->-eGYMUrKnw%BPoC%cS03*w^gmeo0CJHjD9ZqiqGm|Vm` z5bztXq2LK!kY2F~-G~n@mvJ}Z)3&J~JY7B?BbMt~o}ym<=>lZhT6Yd2+<%-?DwW<> znRGr%gMldD`?Q{UAdJ@O2d_#$6^HV(A72&kmX(KwE_p~Cn${8fGeSDnZ7jN7GSD26 zF7l9E^aHqS*q8Bfgm_#S+8nEDFm)H9ej01@MAy`Y%z3;dj!5rZ)}T}Hx4j8sI>a-K;{t=F zpUprdr4D?6wq{XMaZ9>1?gQh)-6XL{qesMD=ypdg{L@QZHg?N8JC|QRcvc=S&ol1q zJ`p79E)=40rGdA)TaEe0E#VzJO!txZf>yCv-nS18Rq zYQs$U<98&RK^cFw^`@L%W5uSO3xbCyfT2CSM@aK7ydJm<{L)wz zSaF-(gt&!)XXmH~c}=454R;A-KG;PtKP8C`9gf*OD(^mQKv$2W;NOC~Yf@uh zUmc>%eh;5y_h){VEK+8qg1Zo#qesC!G&4y6uL|r1=NoKn9U&u4hBpnM93FDk#2vIT zI?6$tcz`$`j!+I}$u6fW>AeTj<;CGIe}tvcJY1L-|cX2z!$^~ zm{Rsb**n-N^9#%6lSiaDZ7i1K%RSuBP-HRc;CqKl8SYuYJPr>VM=`h>CZL-2q^AJX ziR}L}V|S3x+1_ZiV5%^qFePpz+XdDKtq?2sR^l&NgR<(@rWURUDoj!Z0HgY4#&VA^ zZ5W0;Lnay~!^G_`FfuHK8b16&dA?IN7GoCDFeR;syRyt13?dlNLPtltrB2eN1=Y^#V={3v`_z*>6W zd$4Q$&F%sH5k-ze57l|rw)*cVR@6`z8}PxT?B7fM++$GcHTofK*l+vcxJ!Eqs^TCg z65AgNNOSdF$Iu6)R|&4(w;szPNN`>H$e1eNQ(x-0X~&5BW>1OIm9_+lexqH*-2t3b z<(`5wF{N#!H}V zS6m~!@ios!iQ=02yEsmk65HYxJaEr;!Y{bycl#CL3mEEL!=!PVpG=KR(4tc_DX67Eu9KU+{6~&OM6}*fM(AWP? zfeayB+^Pgb;1`{YX}bFt`0=W5d2ZY+qe+MAE4Z0P#j=keyr)qyHUD*duE;gn@ z-tK!4!NoX)VHO&dWzfW;hED2!_5H89ubjP+1CQQ)>^nC z`+XNz;vk>2$5-xyW3Ia^;B|MfRK#=xGF7ScsEb;58GJt0eiaz;S?j2~8|sRvulMrP8r=p0Ny~* zUuRC^N`VFa)z)TtwYy%9u$ADMSMjLkT)KmGpzQ%RzmcFnXadviFmd5AH=cG+uqRt@#Y{yZmG(c#;z}ty2 zQx=0hiEr@yBi;5U*lis)w`%##-M<~4TrLkE{}dmtRU}+eZ6O4D%Al5*TLKMUZT+Y6 z-Pu2v?W=7F179tOa^#Nu$t3#ka`bYCZK6FK{;yd`bw|mGfmh>5)JV^xL^%(wG@W&Y<4bL^HmKtc2b8}eHweNR1LF{UQgC|zX=QBU!2$FeDxPxdR;u8ey(qK^_ z1==lvD;HNsWowtgqTdDzxcKekk7ba6ht7g^l-=$oj8{KGPGp;#ZO)-4mj2 z>)@|S`hCffYt&`StV!YIaCp$QlRALMc763%GF5N6V$BIy?1DCa%be_vS- zB7wO=rgbD_`z@!4Wj; zDCohMvDL8jeHlTrXK|%pb)1|>t^sJN_8BSOt!Yr1cjhH1y+j+hWD&npTar%D3Yr|y zP_0a8G^`;Yow=&z1PvkEB$yt>wM>S()pi0G{j0ro`;#%@wmwq!y#_Nti{LBUZahyT zu||wy4tDzlu`aX|y5U{|LhL&)Y2P)X(l+%n_q=94RLk}KnbZpUW2)3|&RfUX2v$Gc z=5t%Y{Hto2;Rhfk&v4)MLC~bP(33yx9Ub%e{=5yu z8jFw22$7!Vqii_8!SyA+N1k3bMY^%2#fh@aX)g2>}8-M9AJ^{?#ILfe+N#?Rx}J zBverC7|CPq3@V_=4Y+bk>V6nAvz>5>8Na+Z*g+iYlLUCPJEpjW1RBE>1`dh zO>WLeCB~dWCg+%Dc7cc{*1v0PoGl-CT?5)!xrbjECVuimlx^~!9v>5YF?l~J<>rUy z@r4>MJC|&QaX3Ny5`P@sI1O;JygKeaO=z>Lj5k}3Q7u$G7;O=eBqyqn2c4SK}Hi{Plg2i z5m>V-DMVPBNhZUjF1e}9FZ;KYkwFDUo>L&T2AQRTie2mrio)h%;QeepP?zw?oc)wf ziYwbzXs$3(+B$ev80rt}uJG1Nv`+<5DkAjKhV@dha7Fl`Rrj-9;Jp_?oiAaq)Fn*b z$PtZvPurB05sNqLVkpu)Ba&+hs@CPx6*xb{EB&H;78QUG9$-=_&u7@I64-s{0iHEF~CDr9SJRk+M4I`ktcN$hL-30#!3ukoXxI{*XWvseRP z7Ue#C)I(`22Y$-Xhy3LEJ|4XZHiCmXHe3JL)epMcu^JiYi}!l ztxZDah94CIz6w9>_I~@EFwnh^uRdP9E-eI$6s$0S^K`~4>sa=GT7h%Ms%01g)yHkN zJ?MisD@D|&RIK_h6_J=zg$3Huv?L5on5}SX5in(lRszY#1tYLqqd(QBXFFd(xJ;12x{}KVrnFNJp&M?p^ zXKEw<`2gt^CdvaA;5D2qpFMqwk%HY{_EuG@PBk@dZ^g1=K1&rL`>5bA9<3yB(_VgZ zsewPDk-8D|Xd<)%9cnTc3o%>U8y>KZqppM3Q(DRRHh?k0YkF8=QrOR-1@a*t@d#UalD;_7ly#~!#*rG_ z-gI3PSeT4%LnwDG2wy}fRrvRGunQdjy{>6mTv7!?;6OrMEU-HNx*eh+L0%!tS11u; zl`r{19OuD{&dxE}GlpH1swf9sHz%y^UJ}K3fitZ0+BM4Hk1m4QID&!=rylw!qa1+v zeY=g&r;jfEj8B|2PkEJoeYt#Az7OhB-W<+$!7<7@;w_M$ez0h}-BX2c`HiN0`a%D1 z>0|f?u&KaYa)MP(;-t5Zvox+*T?gN$+ce-te82^cW~%j0l(F_K_A5(u!`RdkiSw-{gl3> zxh`7*mn5&h8FoFxkJvok_+3FV_@#jUYmQcBOy(vQkPjP~vzMMAMt;~kj13(k&yV+x zF`2hGs+P1aQ+T!zHLNl>F}%t=HR95!dIWt=6q}!Be&Gr+h=Vk;Cv~zl;{h)Sg(sam zthjjX;Eyu5@Ci93S1|EE=Crj{5V3?W$!cL^k@-o=AbeMz;LQDW;Tgger;#y^V+d+T z2vIF)Ok9}8#)!>l9NFp?+8%y!8wdDzoMM^fnB5kAI5~s%@mcB)a_S&v`!f_#59$!F z9)!`=SBj%?TlgBa5b$;pNIR6jWvjpxS$m$U*2W)+Edgn5j`>qN5!(RM_sWa4wfN%L zr!(g9IX-j;7yIQk$^Xw(bkSkm(m2L<=lo`xEv|bcvHt};Z<-*F^YZQf@5_g+-<8ds zm!wA8Ec;l>JB<6IwNK0Adr#nfdpP=|fV19A;HW-yZ7L=F4uQD?Uas!g&VY{ZP)^TP zmQYSE5oQfamA<7fk7z!+CK?!_*=*T5xHl_j32vK zxUrQ{a+I@>cG1IrI#N#}*bXqpX~Yj4uf+pD+rUSeOWxWE?(_ePtNE`)tCZ0B=C29 zp)}&7c|O0gU6KV4#x8SGu>B~IrzjOtd*xp)V!%zES}a`9EE@%ZI>RjOh|l?!0U`id zZVme&`$0 zw?4*_&5b00*9t7mw6DcU3ArywX7S{A>Hyio=|UO!=~EiDa-RZ6eQd`{N%=(k`Ft%x zsXBHxXBj85ZCSvciOcwPh!5HUquj4Fn11n0zYjlF%Jbr^2ZL=0lI7~yRGifkicP-i z|FLfJ-EypXT&CUJ=94;X-&cPeyx(uP!dhYOm+`j3VeWC!(&8(a3C-7QQIxD78WIQo z_6|j}Sod*kz2a4ish=vnMrfK!#2jW?$=#ZD!H$m)4fYp(8y5#r;6O+};$PaRO1l`b z%^uz}23&DG?1<0z;J5!OF$#=zM~o;t;P9nTn2iPKE0%s>yrPcm0CGRLWsYfW4J;tm zV1Om^(br0-|lB1PmcOMzIAp>)<0}AwKO(VCGYyAiTr5V?TRutwz#)WdfG3 zhXZ&Na3Wr5Pi40nT&$HBW36z`7upanmHxHd2ZMp3I_E3M$t@zxLv&ipJ>K||ZS@W* zNzDD|VE|^1aAJr<75)ywx(1~9jM6W6U?mc1Q+zsB@>%iVNlgliMs|m<~}~e zjbmcJtw_jy#{0M6z++KwNk7xUKR|^h6NIuVV+L|G2-2_)=S?EFsoypP+nDQ|V|IUnu+C`v-5Q|`kPa;&uq0(5I1eR+DI%Iz=$k)8JW z+J9(_ITj_xWk=fZ0sG$LzcHZDSmugd#(^FP#peVcvNNM1Hi)AGe9LbGLDM>*Bk^#!V79&y4U@K4VYrA?-W zE3_5Ycu#XYW_Q3t4&*7fBl`pnpg571%R?)gL?Z95UvkW`SrI^ORR*Xr)n;b$txjam zxo)w=WPUAAGttT-!<601&`q&(;u*|nX>rPiA7 zj0`Hz%e*M7n^6RrenotS$qBnP(vMV}+Wg6L{Q2o6o3ou*l~bPLJZl|&X6$bnu&-EZ z`SiJ}7*LK1s-Pt+cF|&?<9-CLJ{**>9-UEz$(@BP3|16eTYFu+s$i@z>F!1q>}vBs z-h;z&i?yHUg*Esq9;9bUVY zx-Fl6h{ql*V8;fsPT;%n!j}#J@ z^(75f*u3WRKJJ(N_A@`}pY?-mac*7q!L3=x+_ClR$7n&^DYR&* z*79v`ve0tqL%|B++_9nE&~A{=_X-wuFmHjyPsg^tz&PJkT73y?KBWu(90LVL$J|Fe z90a0>yHD4$_RaPkIVCT?+w)3kwZHy${71M!9(8VDThogKEK-@Y#g zZ1oz|s#Q_uXg^#`d;@rqYMd8%V3=DlLVt!7GM&ZW=*G$K!_5ngF(3-A5?ilHE zqRWtD*+G~&J=i6IJNXH1i+N)(fw>=bBXtbrsTW-95XGnh6hR;SmJmX+Lkj%~-(?Im7KJ~958b~U8-;FNoiYc6$cwa7cM!DlqKG7u z1}-n*L(X4{Sc<-GMG`h61YWSFonYVXhOk9`n+@TDu3 zZ+BjFx5DO)le}{a8n@5*Y5QXJYAZ&p3JY1ynSuCJo@BmS$n*O zkp75M+a4n5FCY(Kw3sF@FVXC|k!I6o9*s8B9*Q*vGdIvDWXF+ezF^rVw_?LHTYEH>G3U;fR7o z*w%$@GOt`W_?g#-y;w859~mm z5p=RZR)A}AqBg!PfBp2=c=q3wKmPe0#&P`riC%YiW&0FsJ15NE7$x5Lc^M@q>nP)j zs>`nMJZXO(0UudX_uj_H)B(4*R|K-me!=d`WZB-^D8KvmPvm7~X9ppFo{PN5b#sIP_ z)8*~3T3CDl|Prn`irrG$O&Y?*GLP$gL;d` zMuB{9vA72L;_W`OSoJFF6zl~kG)!b&Q=sOrCMEuV(n9i=Z;LzGX3q% z6azM`&dm4v;=XlT&dU7m4nhQGE?JK+8Q68WH@~Ski%b?BuEX42l(Tqn+wF17>v|yb zl!3X$AA!Ne$NMyB9a?YVZ|cHC9bv-2SgrT8+Y^2QhW!&UJnM(x_{*2iiRIj-UhA?w z{lVgs&F@}8Dz$x>vy9XUO?}o?+XSvin83*Z_&mjE=*RkP-}+?+wku5aH;=%yW5wd0 z<$T$6KL@D4a|)be+k>}nY5(Y+LR&7D{Weqvt)u_0II?2^#bD;Lojwx_hx3zPVG78Av_nPCyrQpMHQ7+M!lpT76GX)0g6mLS# zvlM2y!L^D=DzoQR$f2X-2>dRHP5rESoxnU@wA$ps+b4hM0fz_du?t|_l0uMmICoX< zE^$|>FCi5d3OWc*fZaf0Qs_~@(w)SGEHNG2lqR&OI;H^$WWXNDB{)(Zk`M3U!~5ih zn`e7>ip0=Zs)(2~T~LIw^o7FEg)smQpghZ5#aeV4SiDEDj7f{(NeII7M{%NX8?5r3 zYVxe@SB%#2Cu}hED&J6mr6^4yjInTc5o!h`$nLKJk!au4|4s7pL+Hzb9_Y)}Srj`&4*F zu=Z)S)&D;ys$rQE2iIB!8OuvuhvYldA&e>mapRI+sLS1;QCB)f!g=UU^#J(^&)K0i z->7|aeYj%R!ujP+gcd~c3WYJwl)z2Peksg*D1(ZpwB5z1CEWH$3%jXyas)JGopJIz zl^J6dG#n&Cm$7yIh|z>56ad}v$ZjU|qH?1jxUN8z<`6O%leEv6OHab5;yoON5O-EA z<5hvA)AVr^%bs*Ki~{UhbBBUsy`~SU0=xKP9DtKAp%eZ8qx|sOxd@qqPsf`2pa+Zk z5--CjM^OT-6a0d8tD%gvNY6uiwY+N(D6V5q|3Y)n#SK2!T`Zej((+^n1_cZ+cg79@ z1YwZFr>I9m8#2WJ`CG*(bgR3D<2f}?aOMn&VaT<0gCF?-V|aSFi;%{lC;D4Znt*IKX3g@dAaxR%KFZK zfET>N=1ufCN5-0;bHIF(OoKs$5~ousvlrZ3_i|_1+71QhowF!{yDJcjf)|_oO>(m0vzzV15j+vw;f{%Eb8CV+8C6 zB-&pm<7`#_It)z7Ln4murr7KEi@;mVyWOo|Ruc{09zXpPnLdBmaxR{w>D8q{(!q022Nr?4i8(2Ik``6(kJ&<%cl<>mcM)c z-;@{kadR4+Aqbcg*p6@+Ba(X)kvA@_#nVGADt}pi~bL zczgri4^BC@m3;4FlB%W@DaKpYA%JTq*Z>)+-15l9v)BKGEaqi znWHSm?Zo(whv^`E2%rlUAN^xbmA+@2yj^@LIF%(Hy%(G=56n3 zD2y_K9KT&`ta-@HJ@X7N7m;c>AgU4Q8fGjL23Bvld zXR+Z|glg*28Wf8xWyGg~eR6Ns7CtUcwn+~nIQ1E|!i!gB=ykEH4BS!?zyU0Zg7~IB z>$iudyb+$@iOO_ma*^=2+r;+AMVErLp7hQ8>2fVsf95;YrTvU!gnCO>Sj3A>xu82H z87q;@b@40QvA|h}m-D~lZ6TfzK6?rNecrZeK}rf&FuVImbsuDX{ne@A%VFLBF0TwwADbzhc_*H3ZItt1ncfMCt_}5K~{Ew(2?$ zgiWY<*&H7}3xLXdY2Tqqd0;j`&h#vfyJK7Fu0E(xnYN|XROfx>v~*7gr8%vnT8b}7 zw%@|~q!1%q`W1(UX!=LXp>tN4D~$>#(4}TwIRsCY-dXA#x`BpoQ=Y637L%5_Ck{;c zquWoPrsN?GnF3~F8B%wz|CAfTN;|~X@EF$7sTr)JTFt?UIV#)4EeUs0;vkf1p>E^r zDIM$n)K^j?m?1bhpL7=i6@k49Mf9295z4@e?jpMEEtt%5g;n&T?n8KC73q0YM1Ri+ByO=Aags+(KAC;`;%F}IFi zEa%S0(2(#}oYZe%6TE5IFpZD99fK?mw&%R^00k9Vq;F36nj(obez*#v{P_x>qb-$_ zxU91F_I zHAB|mFEJz5%{z67SHB~`d4#A6l`u$a_G6Od^fUNWkF$-h``0j*RMR$j8k!(MpCxsF1J8yL#5{9a@IL@{Ij)!%H=St4v1bFM5 zs5DU52j?-g0AA}17mIxwtYchxDvCC^y4CFkC0GGfYp$t%&O7G_D5v0*F&W^f+)Mn< zFAk9@JhzMEUa^i3;U+Ug9?ed9k88*t?i*w}*~f4?iTjTV`dM}-*OFL0F0rr!R09Ad~@Ix`3FQ213LWjs|RPY>G| zXs^rhKB=;>_$CnsgnGl+tf454b;ipcI~6J{m$*UQ;{I(?e8{dmSX>2ft@4L&e*ll| zvNFF+mL5*_LYcTGNOc@%d3Rg(_ek=;wa$DPDL?)6F@ehkS07>lNbhu}E7$c@SzURG z%+)Th_P%F4QBqjI8qnFw(lUKSKs`7@*k`=42#t`-(i69?%!LXA5Y{Pk-d!Nia zd*ngg**%VuFp5Ci!4+p?%(J4y8_hn@97@lyB zvg!Ehho!Z(ShnAtmN%r`Iw$ePFze<$Qq(%Q?>IwF8RTnVmISa7~T*KVyrk@Hgw&4QP z7p$Bh<~?)rRe{MGYk7$-6G*1VA4_WwM(P6bPH^GiKYk+=Q@{NPShQ!Geaoge;dZ@N zzwj7Ln~G$O`t&D5PT-)*2+rU!{Sl|MW5@VcVbA&!c(?%0^bM^$8_B`x3In`0>H?59 ze7pyjmTvn*Co1EqCHUV3yw)9<`R<}w@z}>NHU7ml9- z_aFlJs!$y~*(fjX`8)moNE1C=<{o`YIqR{9L5NrKoXVlEEL7CbUB1^bvJ8d7*zz#< z#0nhWS#RbKz1(O8cAz>gSFehPv zTAiiU2v*KRS_m%fI4|kcZ(=7w-N3H$q2-_A@Pb;3-lv>666UH@iL+Xdf{U>W;}g~T zZ)P$daE_SSr?13jKJdI*e+)kDEBQIG3Y=0}t-_;nQQaZjbwD7a zfAR(U=+1;RBJZ&+Da6Ff4<5o;iA|WE!}`cr%&{Fm0xr#>X6`~yy^;@R6v3@q?Yc+| zv2c#Ced`XuDCG=_tN!r_{n8)s^)0)>RF6m6wqvPbB-g>4xRXwi&nwU3yR@qX+(5vw zscGC4$R|i2Lyz%Z z!*SB8tq|(SC-%oIF0`4Wj=82{CL&ZeJUTNuNfCy1142shj6=- z5yb+!GP{&`mrD#2SK=I_;?QDU6X{oWNSJfLDa`^H6H4{kMxj~i70>p{v6Uw|4)PE` z?Tgm@#0>FU0|A5s?N>e{DH$6eo!KYXt-2n_Wo(VPN_+MxJPbwCT~Ocly|!Ex1Qoz* zgngAo{r(mH`Noa_LM`$_8{z*L7mW?lKpwY<89|`zkQn}sh*Pb;1y_)QW%dFW4&ya0 z2~J&qi%ZR`Ryo7^=_y)6Hv~jZ-7n7`zbL=@{4Wi{GHBUXb9AT~QVl^eD46Yo`d4%T+QJ7B-5VX$_zAl*qlf*A^P(cHDAQ%)wLXzj^Ip|~p3o`Cf2$Uu{n42-VKJ-Bl zaihzTx|~1U!ewam8aGH>KgOn^33o6UP*2Fx7_({KRs7D|>}ZXWk98c&_a$2}SDt)@ zuzj+9%nmAJhs%(Bd<>V$zxniM<*SE}$U1aRaPo0^@#tCk>d`7kF?Zl03+0o=N9FYT zw7fsq#BcuuEA+kc6@v7G#hK8}#?e~>uAvz+|Fn6gL%_ENf&KP6{_y(<`v~~ZO;eg< z2k?eni(w99Fuv%+?#r^{@y6rTwJD0r%}QDS_O!hIUV2g1yhF}}$8wbN#1QE%5rR)p zEbb$$pP|rv`%gp#ylIp-evpl{}rrzbA15I{c7EF8ZmQ%kHF!v93AnJ0(*8LorFFe|yZqMdsW*s%%rZu(@c)V(#KWS*>(VruY>@pjr=YNEQ=e zmLD~MRsybqjLfbt_`rK2__^JhRC?4AMkb)tqrjlWQ0C`%7lC@s0zPXor41Kt2@e4c zsmS^z3K!G}1e8d<-S!}#NV`+e)|*ntf&80Cb13Ssf(=j_%LOtz$(1& z!Wg(xSFM_gbFG#Y_l!mGk_S>>r2%WQuUxl2Dz#?+6ngv&-Eh5@w@t2EF8$17y)u?{ z*GZglozFT}Jk!H}ZKw9p`XaQXPn4%V`(6e4`dRzN|KbriEmP@FoX`Z9>b=|*6MU)V z_&+daYu1GZN~gT{(jNxN9R~$}8L8sRQC5DhE|kffqmE*(Sb&2|Nq+g1L% zA=W_Qpid|EEG`e=O&ZoW+t0W@@O?zfDQ!$)DILT0N1uNW2r=N(-6OM(=~^JYIe*1@ zti1@?^(CLE1_4j*5elvT&acGH(7yeTa87^55Y|TMqj>hM5Fqf<3J3-lb?k#Le(M^t z2*WiVp-C?6LCQ_w0yM)G_2i@q3yU*u8&x2tD(=S`&!7mGX)y z@UJ3go0U$rWc6RdMvpUYRTy)=QCwo;j!+vDp#tFUMH4v2IZ>vs=t~BUhcs!18ZFK~ z9OZ%b6zqK4c9c8ERUCtJ>(u&g%18xgj~l);O&$1GsMqqW;$rqG=Sb$JiVd{s*wP#R z>Pc!1c05K{`0S@HQAs0dTa>#(0PGlCOz^PTf7~4zo#5zK7GUe@p5q<@Owv71hyDgo z#)RP(OQr^^LGEjO>Jkqy06bmjIyeGB2HZ+Z;_uzxjFzt$)#4os0k$_C+D++)+1uELWqYHw&F}grimoe$R z#?aY@%7XpV@;?Htc${8WFZ#o5!WZZ$frmAKRLI(=|H(Yma+oxN6n3J)Jc3ksx<7{K; zKV+Baw0(>sfO`Wm51^#tgVj3o6+2mBS%o51ENYLpb!I)ov z^*80Oe)fyg4PmvPMG^SfC(p2e<03@B?lC(6P16zKmU1(Me>XS*apiybzZx7aTzvtW zGvs_HyGxTeh|b!VJ2g24mpHkyX>DHls~TjQ~6=%HTX6hob;x!@aw@Y znUe0YCSd%5c83NyiE(5pZc#G}H|6n@d#n@eP(ZgDJ?17?F&Hj$Ho~`TL=Bc_i=UJi zb5G#OD5$h~fQ!9x8oKJPF-CLjHXmGk06%2pBn5061INh zU;|7Z{)5q=z$5hI@5drw1iO|sgT0RM4Q;U~R6(C@Uv>?WH34Q5g#eZxdA}J(*ec$A=@*Zr(hs^^>9j$E8f-6f|E^GWa0_b>WL3tg_$(!r^oL z)v~ltS>c!1WcYvi6226rOxzWE!iOD*a7iO!R5E;&0V5yaNQFq9n}?{##a97c-)~z2 z(%@Gb@a6MW8Klcrz;FK)zO|%jz3R8i$_@&8_rZN|=kwHG6_x*wu=i}1bUE()>bO4M z+wuO+^>73TLLdk&sReeW-IYQN>4oeIg%oZ}E=moeAcqV>V@S^yQ)8DSR3YRwhb;`cRR z)UK@Kh(`fX5dil5fk*L>Hl_0l3cu-#b?`Eue1#bSj*6VWF#rII)<+eCSSV@3xv7#O z4G$o&*scPvIrX(tn#NAwa8ET-a2+NHQlX~bw519-_=!hYqwo5lAF~M9FLx|L=b+p( z1kyiXqk3SLUw{YR)@e^wVBDcOfLA$Aj$8T=)aV;s6&*XwYy(c|q}G-z^j}JnHXNT3 zgzGVmADto_k;f9BlQ^LZ!LN^V;>S@cM#yB+uXT{cxJyAEVoB^0JVrWD0MjMJ1;JeJ zrucKQmhCu>b%4M*aM-KFx!4o%m|>t?9Sy4Hf@5QUbWd_FXngV707{A(pqxWhIAlz@ zuHbFTs7WY4J^2cZ81+%1?2%PytZ+smC7M z7w7=N+(2Jb*U7IuJ*fjh&v9|rs`}Hr1A>W;YvNqDPpjA3Z%R>P&>ZIhb~CyV$m0@2 zUCxVh7Q*04u)(jf4+h&_V39u55=VysyGc<;;ViUaDsOM4(66%la!X3nNl}6T$QYzGJ))D>C2fnxs-4 zLtl$dOdR4hX=4YA{}w*^?gZ@a@8G*l{2&%i;{_14BH%PRBVdo+q$3pcMgxTvw-iqR z!`BA9s+=)CxJ*GuTvcqZdeaD4!;XZpim z;FFJVJ9nKk00{DZMfRF*tjE`ILzzZU>)_l1Q&H}okd70}ck_fDicJ)mCMim{R0@ct zq1zqoW@&gh;_#=5uJ_6e{_fofpsQ=I%HPcsTX6UlA)mO=j9o7y{i9g3pO-y$6rg72 z0>R)^vPv%dH58%VP6C?Y4Wsy%b2@UY_eU3t(9#-$-9CY`&?|H~|9lhI7Sd48@0U#k z=>uXAnj5<3Ljwqmhv52DT18Oq<7na|+>{Pc{+G8HKa>_T%NX0zg=^9j=OtXl670^K zJF>cu?H_P58-nvyGAi8|yHZ9`6gH3+M>uAEy#KVkZ2r6~5KMauANMKrmuJh*%cJ%6 zvI}gUj@Dof-O7=|1;2LULBY{b?+CPHz%d8=a=h|B^55wAE!<_^L@35x2RD{}=sRmY ze4u*}{OdhJ<|L1bM!qh`8+<=Jg-5f?C@%tcmjtfPKSFe6Y@`j_8YMH&5iUN)qz-c& zvwY?0Q(_M$yUK$%ua?_b(Eqc4@jsOR@<;z&`7eI(pO=6CYyYhL;LU$Re?`dw13I@4F?3I z?;UTU6g=TPfX65Z^UVL715`qhWxaEwwm4{aZp2RGhA(B@z>&V4vL9%i?F zwS4^S7tCj59jx;mjQ#botL#iPIoseRu4+AH1|_%~8hWw1!mfuw=L0CkH&BY_I0Imc zqk~_S$BUn{qujwx!#FV}LzKh#u@8TT<#TWz3eOTs+YEI1DN6WDc)z=K`{nWTm+YEt zpu`LY7hON$1s(t1R~9{nmNgD0TF|m}9e`OgM&?}BLgbdMzM&^B(sVciMHqv-_h@pkJN{#UKe4W^Ld2}g+NM$*(u1&a(&sp zUuAIl;zwFj2-kv085T=RM*xIL)}023j+?%%LwMkOAk9@HE!F?IZoTdv$Z(@bP?=Uc zZ~0arj~fQxt2K;0K>qPj-Hd<<7$Ue*PF!mR44iq=|FkgM0sqzd(q9!hUcnWgLSFml zkJ=7kQlKs%El{Y7#fkp7DDec(xHt)u{i!(TDZcw3Z|-G;QwV5U~STDW{pJXhh`1(xS*-gbmh zI#qB2PC*qG$JcVs9|_TZzaIYzNq$>}@Ak{_s=}xJwdFL#XB`3a2RB&A2NjSJM&icf z^SqnP!;_YaOz4-3K-B7sLnGX^K6e0o49F@{_!c1$SltPWt56HDScmDqI~$q!dk+5A5Gm9vVCfI7kcDL$fmoqGDGr$*2E3VN#%C0ybzM<7GD}GZM^uVG zjSLzTv@5lC}MO&cpW4EG(ISls`NRh zOcRBN^5k8i%)4YFZ;!_=>VMwOXj{s|w;fyNq+=)! zblVaKbP_&@AWOfYkO3?6ejSGe^1N(T} z=c*(i{EIIxIQLw$#pekMnC?F7WR%$@0CRyeI!+K;U7u8lP0Q%~HdSUn!l%Lg2gC~O z;d_3@DO`Oj{bcxQV8!hv{pZL4X+0<2WGwp1C92yQ7HP|mBXBSfg8o< z2JUU7tZa}~XM~{3k&y@G-rT#u(uX2&R6c+7Y5DZazb~t}VXSN{VEx}iP`@Zk>&s;u zp|ponqy|VwIY9QKuErkyAgCD_w>L|f>b{2M8XS`sesCPT$|iXljx(d2w6X$GYBgh606Bqhj0- zolel7kpT=FtT~cIFErZ$E-wiJS1>N5CbbXC1iNk{$PN!oql^$+0FT~y8vq|C<^rLl)haOWZh2|p8n%Lk`3Tig#zQ*4?768JM;9M#m94ARE!(C z{;^|d49+Qv$1wu(+WLOE_U3f?-naj}{HwV?FEhQk>@de%50r;ah}YQS;5H9q`{09* z%cCdH%Z=-EoV446GKz-9xNQ(Waj*)_Jh~|JUx5>Z{wa1(8q70hP|k?xAU$aV_o1#1 z4{(F-u%@s&MIpw85_+6pAwFbz zp^VM2FM=Xa*wxxO+F*{eliWBj_ixXZYga}vG&B+RIV*xRp~oD7KEat3XSBU}@T@Fx zZ3maAQ^s@{KB7@=XO}a)IC#wTuZ><5!~L)DIBO#4yVk-RnIGfCpRJ-b3B#5ddzOIZvsl3xTw;<=Os)aR=^H8$C+iGX zWI$mweA-e3Bo+sS3;2))AU3}!gyS;6B-etNC6GsS;gIn}2%wlkRu*Q;?_$9}5tKeu z<%Zk99XP=kQ@K{>v%$FwA$712L@}^R%C+0_Q){hDZiGM=^x(#YmdB`#+Vg*u2(CFk z_RNNI*YV?;LS5<=9{$?pDQ3~s2U03rcFR8UQ(+)=IUc-{Cbb2F^(ALA1rp4e=lo7V z+NlTFi7OzXzKRpSr>4MTweHkN@7ZPWmQ7J6bYL&7m-@twuv>owkQCz@W$XO_%!5|a zdY-~U$H`~KvO;zr3}F!F)xbr_ zYz}=0BGPwo909P7RY0;I+=;6M^@s@j=Dab;uci*ILca@7EX@dN9vNNHPx;{ zNbQ?=RrvQlXySeqK!XGRiyu5~`Lb<=KtE5C&a(<6{uVdVyzq#Bui005fx?>S2LuKL zpa`|p6Y}*Q0!SUWItJoS>!`vo=tM!$FB_Tw&!$*ZKt`rPaFL2B;9clo7;EhqLACnM z3eOQj;4$0y4Fu+4g7Ri2M)Bq4fG`#SVM)+2hL4V;@BH}FQrdX|A`m7pmoi`(oM^p8 zVZs`uZ@G#j7irbGU*8lsms+m&{81ZqGp%D4auoa`_jW7-{ZMnd~rTTemmd zE^?BJJe~O)Xsyf+Du-ABw3e43L4yE;1dqiPDNY<4)!+2gts4gM#N34pjN3+CXqW;f z!_W3DQKz0#mO~S$>zq|lv?v892QANA_7W+@Nf+W_V%zs_{UCtbEr3caz(m_MkU=67 zB8Wi~I!JYk!)+@ym3`pbLA0m z4J73c-_^o$o%;+*aG~^XQ!)uBg}+%TgX?+?81OZb$>bgULpYQR?!6BFB}UDlO>?wj z7I*u{_Dm)C=;c!!?RP^b00pdK7wHRAxZ0+^cGh)>TR{_(`S`@w%bQp5N9iBIA7%&V z@z2UHKlq?Lef(KDSX(OnILNCx1)@0Vyd zn15HEWgFv^$#Tw#U>it+7ibu>Bb-UH#)r$tiR!N>YreADhbP^b_w@t#P9W(;`pq`@9{OzZomybwr ze@aBH>2D^+28lu?N*n%pW9ZFtixbuKBiuxiez~&(DNdG~Q}|ku8fTt7p_@A!C*{f77er{2{GK#NeZ2R#*3jiSua)w5Pycrga`+5?tCg~W-TZ=loTl*^ z9E1s?;q2@q@iTwbQu;>qhpPu;nMq)a10IxtO$}xC3yfAQw!JXTbG8wV&?rdNbM}m_ z03y-JHG9tX*T~5Ia%=J%Wu);CSccG!Y97;>F>{Twn4QB*pivJPxIk;YF?S6=E{-1_ z*OEa%34s=ULj)+XExC)&TYMNhol9M$`e7Rq?Q(g20imp;{Nk4%mY@IKmy9!~rj4=1 zguhY;+VZtq9pwu7cJE!iM_>YnTA&&`7nV&O=~R^rTj&u``UPWCqOeNa=NVtZWPfSGUy&A+qLhFu zdN2>WFmNr*-HVjf^ka$Oib@?r7cWi7z8s4@PhZp$90JR$eFcxe#2qdHt33(amZ@Xm z;^U&FbSI3#g*XpB1g?&+w8-tyLh$HN3xf6W=_2P{KVz1Ip{T{#x^P6oR^p<6Z5T4P zJPhd6QRzl=f@7kF=3kY3rH|H&MupRM0~75 z_Q?7~CmyOTV>)RIN;N<^?Rz9 zh8a+KPH2bg&Mf;UZCEb(Ml*&84wzi^MRMNhLy|cH?7kE*CMG%Wo`qg|JcS1;tac*w zAdZxq?8|zJ(SSlNT{4gX5e)n)(8&uGjw>Gc0`o`krM(;@3oOEC)UMj3+jB}~m8QTX z?fI&_r;bl9&@Fv36r$RKtOBKiq5`<)v-)i9gU^U7iClcz&%D(jz;m3enR|6SZBL^K z8?E6>bxt_P_47P6wGKQ^8tTPl58#1a0aU>_La|LjKk@+sC=&cc?Fz@zX$Jz+CFWZf zFI$x^juC1U?A0{lN5yl}_Jt9JYUbee6o+%S3;cAXJHwZ(pGBPCZc*!JqQI^|)C2vC zd*@Hp9xJbO7FH9u{7A(BoFg!ccgMrNcL_XrmVP&JXm8>NrBE-dn&RsTVl~>=x!Q-e z?wqnMtqEGrcDHdx2Jgr@&`Y(Ts8lG>3?R&Ft{=b`Xb4lCW>E3tSf4X4VMu(JFUmX8 zCS{z{2`*v0L(6o)d$y|*WE6i@2;?HIYA7Q;Xl3&=rhkal`@S;`oUeADPcZ0{yuHLT zJeM&5Cw^81OD;iL8`l351gT>bE}t72rB9f>w{hOz-reE7jTv$Y6C?@@N|VmrR=@5s zxe8tMX=wBv4hr~Y`TG6uVosfj-_YvLQ~Wm8P@oVTi75T<+wYd|yz_Mq1R+U3$GA@2 z805%s#uncV)GfwWKd>$$adA}ueFHe|cM*j;ICM;cc5-tPc{@4?esNJhVIC|mkXx2K zl%^3n#TNTM_Qxp6SfA9aM16DhEYJw{ke;>jJyt=H_9xg`yo@hAVhWz-Y7qQ_@9)whTo=+ ziSpxj|NHX&>wiqPfq}A3RO#Bqv(jK&Vs`Xq879I0`r0O^JRLA3@Fr$V6Tx?fg9&z- zGYaKRoW?ImqTi1&n_vsJfO>lH6WQ%93rqWD{?%$(S=>fwh4152x`q$TAj$auPdDbvs(~SF z33T-0w{yCNbN@>D>iJT9=4P(W;LtvhW1cU2NYm7K0W60cDZIv57!}J_TywCzNBW%u zPSjd}MrH^G8zU-umJ`<=jNv#>=9zQ1YuSqU-nYI3&*Blfo!3*|LqMwmsi!B(^J&lY`kbVdlP#A}YuKoT9;Wkr{0l1j^k_jCXxpKh%ox-2y zA|2XLXt7>sR%6S6y?0GZn(yVKn()G;VTSRU;8#q*G8X=+NhuIUDD@uC`CIFZ3{Pe( zqsih4<8i?enh2~44>B^J_gQYZu(hL+t>5wrW!9n46M@a=c}~l_;+Pt`NcX;`am(9R zDvO|F+ik%@R!Fbv2VcVkc_y$bvaxVjYs`AU;&1T~8N}<{wFbNGm-z3{j)h!|ZL4l` zXep?QMFeZ$iSG)J#b1L3D3$TsR|G;n6`JazD~t-_Eg_iiHfah0`<1N<>jcj+uZqVO z94*kSHu%UFo)5bD^yQ+HapAi-c0q8#^XS9i&Sv;aL>gsMr}cBqvGpZReWhJKD}*xh z3QK;%{HvFy$xi^>KDMcy3~6w@KHn1SQRD9RhX!S zG71CgY{DVEN%O)`g(wR1-T!Q{(kQdI5&pskV-% zTLI9IP*W~W%h0-Gm+E-Zpa93B3SLHNq6i>d=?JPIpyk3eFHMDig%W6em_@ijx&rAv z^OMSxI9|MfXFeFu3YY&^fxVWIwlZ-5G~>pnW_&8*U!8k7>JBjD=p>7p_i~B5=}$k`e`! zwolw%p)CVHJbltJIK!H(KOM24J&3OgYQh$rz z^2?WPYb4Vz@ff86YGj_)dD#KKRABE%z%gH{&i%b4m^bOaNFN z0@D@MIbY|r{gQU-{25{Gfwk*RX%z48+iH(0(p(X?jIjSxSGQ;A0EZn zmrfKxB7P@kn6IQsIb+OExOd2DPP?m&Dc0TILDyg)2HtH1<~{sf_VAV2-RZ))pIn+- zSa_l3-ahyVbZ+p4tH?f9@ooG&)}bZzPj(Fu;_>Z}7VcfUf>j%SNQxNwN9hEv$l?lr*#^5i#WOMH^pJ=j?#=jsMI9w|9Qj!TUj>&tsseLW;)h24+KGC7SR z$N>tT=z78&>Ov9lWUiq;4-1eks>1obZ+xfxr|Y8T~mg_F?^J2&Y6dHLwc z-`vR{R56!zE5z$c6qe8$nL~G2LY^e4_YO{xw$?D ztSHaRM2IhuVHAI;VQ6R%9y!185?KRY4`Uc2h1!j|>*YIN|7Q92`|p&y*RKSH7byLBi9@aspG7q51Zv3wNw8l(se7a=nvbXvNWz zp%4;fptXoZDEB$aI)eU=waN;@@T-t1qf-!xaL;Fi0ATQ%OkZJ5^+6bIS$HGB(vA$X zE|`?_uf0kDT&@cvOuxeF4u(Pu_n7e3+xb8JkN^6kQmQWii^c2bY0;J<9NHkTD8S0} z^P&zJu*^At@w+Wxw#&Gv3$k!gaoVkdsqBkC>a$F&&D?jZ__qtC^?{evK`q?(*}SP! zxU4&U2|%?io60pRj>9_7xHwxju<~gi9XI>eh9kZ^fyA!Xegzg5TWNr5c+T%t0kV!Z zuF^+;(;ov%>=QsZR07I>yc9nCq#Ub$YZSGzD8RLa_S#3SSpG?R_E$Kfkn+8@Ngdv_ zfz(e~hDDtCGq~Zl!jg-Duk-~76sp<_YP&zuhhr&xzE$$t;D`RLATQe#Khl=pR0R27 z-=)T+|_84iR;SyUQdITjFn*$kihj`D3<*hp|B3!^w2& z%2lFRaeomW7A=F0beJE&ogry~LQ~aN=BoPgw~?J|#_M9P301f339aPLB9vzVJ<$@&f+Q5Ofiq zDGhK>T)AtbfNNSF$0!1)JRYGyw+^6Ae}u=EW26G9;zA_?65&}F7u^-0FxCxR2cW-t zvK#fX#<;tD%3(C_{`goIFxuuRR!Og&vkP;95a+Ix_(^wxTY=nNhq#(hkHHKoNy(ST zJPF?IEU(N1;4#U*b2my4pIyM-n=z^U!8s%?W!GC@310S7dJ#4o7f%k8{7eO^?rzDK zoU1CIj-kSn3L*v%=OlP@hq~8je}sxI)Bhk)Nvj4LI5y^=HkFYr#=t~7(yqZ6gP5*8 z3_yV_iavc24}p$4{VFdCob)}x1N@fW121){JlRftwXOq`Yp*a{P9Arr5(cj01|*$h zdH2t6;(PyWudHIF-PqhLTSVS&A8wRGti>up9S!6M*2hcoM_##lt;`IKVIfE8GH9n` zq&&Fu@05S}$A5-3b)q~X^6}Td{&o54qyMM$T&xjks)D3~3;u9?U)WhJFLqwB)5%zZ z|0eJcO>j~lI1T-TchJY_FxW97}O zBUq=;%j&@n?i!@g!8Jvt$CNs!C=~rTY>!|8)|xLLnB?fy6L`QH!IFnqg2xb~Cr75C zIcO3Wj)KFtI2X^(yV;pZ+`sUtK7iNqTnA}rCda;6ZVZ1L<+6h$^)DEx!E$r-O6cWW z+C#wXCXxLCZcK+NB1asP+{96z{&XU9>|;IN+~uS!6l&+!9^Vf*7I$%Ft;}9!7Xx<@ zFan(H3~++f;Al^|J9)dz0jsHxr1u?mDZ0s@Pl)2h zRmz=?Kl#=V$}lwaSD*iz^2z20Wq}=mJ$B>RrpsJ$ZVckKa`)~XcwsL)54h$`4nvoh z<@3+x%O*l_Cp%PweViVKWqWXVuH2sa76Ag#0)AaM+3_aD5y3Ol?7Z&NpJjN>>cJ+) z2TrfW1+Sar{5S4S<92ph?%cnL5@>b_f&{?x0BPGsC$V%-4l-8myuB(pX9Zbb!zR@ewSHVXwYs>d~o~v$ik725XK0=$5ciT?$s#Cqu5i@lorg zP4MP(E@)czs!&_;Uz@P^^q=wY<#WOu%N)O}piz|wQ5^iWU=R2{&X@dw&-BZR{pWSx zz~}f;H5Y}c-s6T?p{NL`W0bmh#=husLsNNiiE(#CVM+ukC4DyV(y0AU{lL!`fe6elcx^>N6Ky>;sw!;NRm7sT{~60x zUG~>;@ul$KV&DEQta*y-{$=!A2&m%;EL;uDE}B}tz^&J!XmDH0UmatjAQ8r5Rc9_a z4l1OnlkfIP;lSke{RkJ5txut-Mi$E_+!_a$;K@b9{G(n|*cV^fb>SK3ne$#4M4^$Px@jm_ zt1v}b!4NV|UAJ)mxpMs~sU(I<2We2w*%@sBi@ORc7k#*RM7R%J6;6HTbs-VS^0rvi zQPcqrx^@20C)(G2$QPxUHsZxOE1oJ~THVAg*b06Wk`xNVuR>xz>HRd7F^vI$agd&( zctcWVz{e+~09ab}F~-2p@>VcnVOBX4x3;J9Vp^xnCt#1CI)bUtQzhkm z7NX#LcJrKP^ar&pzuoyc!Rj(Z-u(4!FvTBs*}LH8<2)R;pP zDB~_x_y_fyKYIWHV2HSaKCFJukr>~kDeBQ5T-uOVdc>&ph{5(elYfaXD1oc?i|^Dc zU(6@nP$7!NpE9m1j=##Ye~y*zyygsb7vL0nG{@!=M|Qt>z8)WI{g}w(*(I*h3@YWB`HZrheCq_c`$fpag-q z7k4=FdJ-9YhEOl@9h~Ws;ywxV%Ao|Hm$&Yu=q)G^Lnd-0N_S%mL9lzaym$Sdm)Y)H zC`d=;H%~vnTHYw{%|3{6p_@n092`?xvnqPUr>b%DH} zYow4NV?TWKkX;+$grA%e!8$Nk9$vp&o{;?hmtTEQ4!U15H%a}(al-DT^mh${H;%p~ zN|%(Z<@%M$|DKP|t6P!8- zTfRh)CLaL;D)}-|EDo9L9fHHc6Sg#8mk<>A~P;eNA&vhk|Sp&a)0 zqC|D?FgNknN0}I5XTaT(i6K({fb%9h4M|6|{GvR0`D=Esu=H;~hqk86z3H#vE_T1n z4&5vL-P2iXb{M-cvQbP8&!B*fmM1S>;C{q`G!yJRuwyaQ$J!(>VolM_Zn%r6=e~)u zNxa9)t!HKW$^=FzhLvl#uW$eX{AG(sBI=>6_g&yz=qfX72`xNhb*YaJ`x0TBKDxX{fJm?h zb+V}SLaYi0bt_eoT)3)sC1CRNiXT30%MWI}@W?P_`u5L-l;VL+0Yf1`W?F^Z`Wzsq z4+=mLLU`67wJdoMauwRsYAQ>AeTbKMR^YO)?SZqrWxP^T&D8q6-vTH2gvBQ%g;y|3h?^hYe%Phk=p6^~h1ve@tp7robB^kk%3=KQRdVIJb8vLfZCGdPO||JqrTseQToU?94?N>AIrjc#cgMD= z&u1(crH*Ts^KBc!ANP$7Xj{K|yyBSFJ6MDOGp|}+E0TgF;{hEVp-3iPheg^WDce?Y zg^_-OgETs8ku+XF7YXaoiWTdj{crV9Ity*tr+nv=XM{5}D#9HE>DT+VpFVS~^@=6a zV$M;@j{HKkR$lPK6bt9ZB_!$47;_e)` ze_Up6l17A-Hog?n3_^82I@aRf-}cpUad%~qv@U+ypKS56uisCjWB3$bnI&m8<93Fx z^Cdh_eq`FCxH+&Oo7P5QLRytVYJaN0oPLQLo}p02$^q0WZd<{HedS-~1z+kM0$&JQ z$-T@C=|=y5U5*TBl@5b%Xw|W5JzVk3I8abFt8QtkT zpw3g$u4vg;*-^Om5n<7|BCl}KI>9FvlP^`(@zTXeF7A9bA0f~Jx0YX&6|7>6C~J(0 zor)l1LObrj>gv&+=a#o$(s2Z8+RQ;5;6NS_+$quVGN@A}K!bvK7EX6PT!`C-BiF>5 z!mAp$q=mTc@J!|%%7N>k_}1_xUi`~$kOWI#RS+B_m5uBY@H_EWeEN*3poG=BQx;)g zdP^TNr>K=a1^F~#{o=+tqkMp`%9Fh=f3<&(8_GK4F2D68P%YxJz(7bDh5u9h($BE+ zEBNaqw2hnC%KAomxwM8*#5MfkRRTH@fE?Q{0&CwOpXLfj+P>K7E^og7gYy2HUx)rX z%ZE?CDu45<|F>Lh%`>DPg-d|z06S>ZOC!((%2FT79PTj&{PrTqbP|={p#DBB{Pc0} zfcZ<1uklWf$NpAFS=%^h?OJq~O_Ix>qEHIU_$&e2^Z}2P^6KDGIl{e63w{&Vm%Uw5 z{cy~)F&oKgNZmtWX#{cCfrl zzROE+@feuqS6;HVI9D!-&p2gAK?4Y5%~&7?(#|$9A)UDBkQdVq39HApQqHFjQZ z-o8=3b>~e|OA(;nL0T)~PDb#fAApXY;)ZhwO?hy^8OqlVJl^)qxNv9gt@76Fd*$wx z>t&mxk3T~&zjNbWc`)@psg6+Ba0BY;Mrr6@X6J%KEigow_qKULYBOkMl-#K|W^OV! zaZlrjW^Z~eYyH2ByW5IC!?VNPYkd_F#Ud+8Ry7fSZ^OOE$8e4be@is``2!v zGz_!z#4cjz7^DpW5MX@7+BV#K2YMhjiZ)dgdkCJNAmz~!yK^sB=d&L5bPxcJOY1K2 zBxlgzKfW6PL1gAQdte^*7tDx-;{a=-fq)T|K#*4JT7nZUOa_Yb+PcV0sde6DigUe#%6>q+lWgm1+7)902vo;ADa_y#$&8pWA_2uIl2O;?%=) z)Fbor3stjZDE29+aNRa@hfkQR6GE$t*1>FL!wh9CS{lTGuxl|=kjwM_XZ<$Ee+tD} zJoB3yyj=Qh(Z!L{m6_)W7dNh1Y3n7JTAj68`c$e%K&izkOMwVo3gfxREpTN2d{&m@ zWra;&mUkgY|7k~N?zmWolT$p%@Fnt!Bg*F`rhHMZg+JS~K81qR%l9ZDl*+R_YI6}} z_>u*QF|v>LpH}(adKSj|dhEfAW0{u~(4J+}M(sxW5M_+h~;(?fp~-%0rRDqgH%_SMivn1)9pyd1A#$Y(}0SP^Fb?Mg`V-Hj5xGh z?Gr7#dy!t*cju5oX%#*kBZZ*Q1AXH$+SVvQNpVJf&hOya+m!cD#YBAP0e*|?I^M#E zUnEv9;S`&;r^4g_pQ!Y-@HmKIr<`q2r3)nh!z9g~09{fZjNu}{xY9?FtCFhHFi7I) zz(wtz{&k_y2fl92VBJ9=>}8vm)mM3@Dgp?EIdKXrtwEl`s>RtpMPP%E*_Q1KkT^7D zlEOe2g0whspu}tKXDur407LLo1MCG8S!jn{gV8kOYKJRd>P7gyI3X5c9qTuOX*cde z-Q?$lBC~!^Oe$!Rd4_B)FBg``FvI*OW#$|;8-%$J`&`g!^L`Cs8K_0pkXkl_d2y1}7<5wv>Bsb2>KRPU15 zzY`&Tia^&v1lE0aCr&vqp$W}Q%;KVC90K@SS~@I`zaWn*mh;J33?abvvwu zkboTmXS=YVYpF+z0WSKBYt$jT4DM>)AHGtW+uP-*AAJUGc*uj212`a_N1oxDqJr8) z2{|ApV}+B|W^OvSw0<9z*$Ld75QLXG()jq`oS+GCfgdX3f9qj&Y+*Vb$MVC0o{fz;}}Ac@m0YAW|PSajZv`-Pl3Y)qPqMj@H&aL99PSW49SJX#mfcBdkO@mf0Mym zFZ?bHr)|aay39GuO=iG-nN_TgN&{ZtAt$p9D)hSr+YfTRr|)$75X zlNQve!#+j{;=28)FTPMw+vTM#BzZ6Jcua**K>f>6)J-Fnz z?<#0i#liYLGBmj6R)lHFC=}X=W1O4ZYb!hH8+QXoO7S1B_E__D3v)cwDx~P(by^6q zJ05w_u=EitGZ(~*LX7wqf1x+WgYuRGij;92h0oKtLWfpHIa_EeP&O?-d`>20psS0r zjFp!1S!tF!+qjaZ)Bk!c0t(M7NLi2dIZl>QS@fqb=_8ACJ^{rx6%yMp(SF)glI0n? zmwRoj2vM^502jBo$FtNW?I}!*v6y+#fbKz5U15Y40l`i>k@8%{-)|MdEc6Jz3Kl&I zW5A)6v|1{J)BlvF7mM{e0SU#D<_B$3T#zWzQR--R5^Hu|axYXxYflRBf*76fvpWGQ3pp6j|DuV;(o+GRuC69+E3EMGS z&KY^&hzl5X=rUybx8NF}^n<^Ir@LHc7-nir@h?q^ zx4d=lp#KW}nVYoh&XjH+bd<%}IspjIic&l*3RH$IqgfF2wR`JXV@htQ|BM8J z#qsMo3jESAHDwMd%nvZ$;!TB7+7kz%bIOSy-wOFA{tt}6=HUS<2{{oDnv}0d5^9Hogwqr)O`K}_I*fj89A2jnwLlq}dl+f34#{?Ok z7zJHCvZk38%irQSyk2FM-zNEYyttFMxIZ{Mz*il=ZQWA9@hJYs5AVHE9=`byJoMrk zv=(7I!vb(;Fw=?YhvmQe;eTAdF?R>W>!SSYzxwO);fMcQ8M$0RIl}b`VP9YU12Qog zB)&=3m?P@%gset6wzda)Yjl&wQbWWM^X#0{s!p-olU6hqceCy|k5CHENSuC&fVsyy z-b;Y-0Ky->8o1_o!jf(juC152kr?Ac#wU2Y%9^yigAsEJAND;?$r~m^45JBLB;O~{ zvX2~~ou?#9p^yNn!!se)u&`3na4> z0l`O?X3rVVp$Yrz>>5E2r@QL}HnStswO$rZNLfn^!k)pySj&%~=O$x0i~GzEzVoB< zkU->xg+*drn`L8Zr#xSLQWj61mFndim-({U*cD$rt6t z`s1>=|B_vnm9juE_EvL+SQU1w$PD!Uz4yz5@f&3cI$1v^24rH2106=#33*XId-fSI z7k5xTpdA#iV|G6}R05_ZIYn$87p1xIkt24p`bIb~;4gmilb^s9oTvZ)uM{$%j;&lK zmzfe|(grdqu(;LXWLnJ$2Q{;Ui;%C^f{zTCNftqvn=($f=Jo%Kup{%Lu9W9fAwXrq zMZ^1AE7UF{s8g?OEeon-0uzi7?jpcFiN%E^ZT(IU6$V{o_02VPo3~8gXF<;NZ7i&LYki`=JFzOOyRd+bmAP?Vm)DKIDI{+X+`M zeh2o{%wh(UO+W3MFz|L^^(7p(<9*wReB_NF-`Xk1cZG`xr@$qQSxkMHHUTgFONJFI z=an(YxAdq!8yo{S*Rx1~C*1*RTf!v_3aOS+z}7k?j%zO~J{1JH#9#HZ4<>>$A#;&V zUu~9`-_sT0rakNMx7TYO3MepJulX!rYU?hDTJP+8y)Vv$-RG*6Q5sZO<6;^~gB72A z^(Bo8zl*(#RD=%73ZC~=JpJCrVMb5bTJMab<3bzKluAP&Xx+EjI({zl_MO`R6dJM& zKefVhA(ppT+AYK-Ekg=OaU%m3dk1!-DEGkBw3_w9)DsMB`v_;cR3xaEXHzFQ(HC4i zIbM>WZKsF8+J^wsOFbCyF#ssAc%qSR8f+883mje*S4ED@Kc2Op?xY!9=^PU`-s?fo zcPBvD8axvLrqvg%!5SB&oj!Pk_zoQiH}&air$Xa*teaG_jKmTa&zb>V9MgWR{!JfhG-au7Sta5!w;al+GYZqu|PMjT;dQ@+EwW!ME~O8_)_1 zm!mBnRRY9&_z3;<cwCZo)zC5~E zS3HlK&BA&@k4Iu;gx%)>7R}61WX558ztT&zqskybDt3hr&G{wBdTe*gGLx@$-2`>^PL#=>J7ttCJN=~f?8a51x!+&Tw>f5)ocImWPL7Z@XZGeW z;{e}cJWts1+r^SD-)L~clFFiZAHh%}pWDVFy@{25cLiG70(VURJ@`Na1wcCLLFm0Y zHB$Pfj!Vx3aS3C%q~O}p4WPHL-NB-N$w^Yn_+RgoeFE7}5Lg?X6TC;TUdzK?;cW`$bAUKq1gNu@W?0CpS>nEh)Y;w%8DJ;n~8#*}F0Lv~z zZ`Uw*<8Tmyn71|$$_|;24}!Z3T%QgQ zy1O{ht+{c6wRyK(c>@{;$A`?zaoh(l33y#c5Waqga}_3gNnJ?-iye61MPIo(dM#;m zcbU^C>@FRl&=}{|g}^-2gVI0-q1z8;aE~KX8ySUWIAdUH25UK~U8M)z(hi`Xzy2#ygc1xqGd_Vag>n}ho!iaM19M7w!F09rWC-ddh3CV2_ec+l z;?6k_ow%Z%G*Kp-yJczZDa!8#r~ToQ#PPn@@z=kP;?T+IZw*q>9(V0Bt{U0MZnW_f z1r){hyq~d!U*YPetN93PhuMWZWX4_f<`HLO;3_6h+d14pkUuP6eDxK(CM)Iso!jgN zJ>y8@$2ksISE4<3uFi-(xps?rhH`a85dI1Cb%l6`^}P+!fErlQ#m)*lEej|pCmYo1 znG&QBC5`O~yIbccL*?blA~1npQo_dlQ)QsChVX~he_4)x_rh<#!<^tN>D&<-cNS?> zdAG|w&To@5!4DabLynhBzGt6=YQl(MzHS|5A)yjm@rKuAIxbK$TH!I!(>i6Qu%o=Q z#a1f{^ORX>ISjLe`B_&4SQwY}xj0#$Z)gqui>e5)DmPSa{l5JXcYxrz)~&C)SvjN+>rjYEQ2=)F zRuB;P_EntQq8UEKJRhMs7c76<2ZfJx7ID<$H^m8}O6pSC(sHc@%R22}`c1tGzT!tZ zs=FA{LdA_sK4L`5T6do183p<5I<*BQS`MAaae&2MTW@Wo_MzfQSnIBW$aOa@eQVKG z=ztdMCxVB4$q-w(z#pr z$JE26jEzCQbyowy2;m_X#|W1QLu@q<=thH8N5$MUhj!04J+DIstWsbWlQ5LHZkS9@zW?FbqKo)pfobf^CP3fvA%lJ*NJ5$liFn5Xl&QxJ?)LkWDkOx^GZvv@U>qW zIefGi;I!*HbTx^>%5$A4DJlq2 z1OU3tU!o|q=<6buZ`^>uuf(7r*^P2$yVg^MKSu)$fQ!(KGz}h%3-Nh{YDg}9kQS{= zx32`kfImVmUSeByedb>DGg{KSL2G`l?rkv0AUUwz6L zkXO%S5MjLeXL1V((4bv$qjD)tiz|0Y22ebPba!DJUxk9wl}4B^b^L|LDx?=~Qbr-Q zmpa=!@$9snFRcU+!9G-JstYTjr3;VQzEJ9X~n>x)=lN0QcDu z0N)BLIRY4~?*T!U`{!h|1r=_aq0Uq1P={I4JWtbFj~XJwESlKR-Y^L_@8JHj%0Or6AYmTPlZ__0#25PRdH zV@>_@8QZR5l<^+tI~HhMbM_7}Zr~eEqhoGnOdCD=zgCk~sVSY5& z6&yby@cFc?HD8oRJHH_l%>f<}9K(x13QFl8xFWs!z{s2B3a&LL^!E&(?G7x~Qv~wv z9W9sLt#y(Kj3Z2wRSbXj9a4XuAaOh1x;yp{F+Upw!=GbxC3v`pK6&VuDNxVpyKyPQ zJUfA4s^H9wF8k}}C{2zhsY%IozrF4Ajhr{Ih`ZCBTa)lR1ax6x7iE~#qrJq5ou9zt zfxDk0b&v4LKT?3CtyKbW&p1QDI5aFu8)bfNx%7>*dpps9r`J6G z69fTI;X2bZcvNn`F;(tDLn9~xebX0Zn6lF2>MMlwl~rOJ$PUCoWx9Up|8sHFDBCC| zaN{!4Gf{RnIkatkk#)UtA zwHxPUYLx6n#J9XV_lIQ%rEHljSFTyT9Q<{J%hn0XU>}O-Hpe^f6Nr9#Fq(LYebdu= z$8Ue}*`v(Qn>TLJKgWHC z`7~JeF3B9#v6*$}5IBzL&+O=RQduA5Yy*#xKIA~VC(m9HXK)psN?#c}UGw(H{xm#_ z{EMqm*H3scWMM>n@bZ_he|WWC> z@LIK;hKce^Ma7qsDyi)F2|vyjyS|0`Yuc7f*qUU7Sp@k7+Nxa7Jq1Mv-8Qqm&UY6s znP(Pd?kH%7(^5XxIO|KDJjXp3!YD1&5sN&`+?Rr@{jz#7pe5I!7q1E%kF~m51x17K zEGN!{)dCiY5XtxSg-`KMv9ucj-^CF)oSb$p;wAX8eHVK=mc`PC(xk;{U%03y^+cJl z?_8)tnZf`<1dr#Ma9Cd!Uz(v_-##BjNLan*XZvk^z+syf^m>Mk3R_oGw{3?)yym5T3K{ri3JJ4BX|)e1#8DAi+`&#T90+_2%RXrKr_}C z*lEv&MBr>a<1Y;S_Ajr%Mg3gMf?IFrmTgLl5ddo8JWH+hK5*50^qWhLukKq~vN8_P zzC$dn_?TsalaEh}$OpNQ@41e4KQ9SEeTs;OIO3>7zeBqoChM#+C?<2$YVO za0#PyLg6epv7R$_od_fy(i9d!hZ)KUrDTSH3noGp)PY~XsfMv(Txf?$Rq2%L;v=-d zJa8NosP*ZUAR@$3kNvg{X+>TT1(pj9c03e9gHwwDC&f}&!QF?XGX-e-U_F*qShJjh zxpkO*C+8yAPuf%IsIErUvg{GmGMYM{5h&S>l1_v(N-MNrg%P+FVCbjD0pW2hZ7bGw zV3p@~6NIRs!=OMH%y0WDp9H|*M_vUFYx8dvXm_ThFJ?|?`3yIc3r-|HBMVCAt}u9) z3Uv#~JYZ~nMoYV?Ce7)uV(N~mLZy~HX~gH`L*YHZP}eepr=<%7QwGm~XKBqb9L991 zu+oQcr}8MwNf(;&wLN!`+%-^vq%FS3osBka$2y{<^Sh28cn1%fN|=odw1OYM8K3Yx z;0wNhRM_I*-@2wEC>~Xm>KcMiIIv11($WQtL;+v7k)ullc490%p>>svDTnY9cL;Q+ za$F7(bPur58ffdBY4ngrzI#a3*u;9P zi;ZqdgX5(v?X8y27oH=;?!bTgnQvryA&b;t-zf78Jam)!hAcu)9+3)iaShj<9p(}| z89wG1qAD{^y5)`V#)?cD#=*Whlz4LH_fCTE>16F0q%mELOwrkYj34zW0hwdWZ_=20 z7*jv=ZII?A0+qWz((K`(t}Z4KLFq+_aNIfnmifnFa^%}DCW(MW+XW zytAX9JnwIm*@64?1(;EU#8(cwgZ~a8Adby|bFwWZm>lc6#-0xF>mgUVJgGQrZ{ga6 zfp(As-89O$G&P6~P}%52`!eRi({5El4z>D2Qd>(1>xa&J4K26{VN5#Vo^)}Iy!xgR zU4Bf^J^{l69IUm=Zp!#=cR*DXaOq-9Mmg`nGY-7AyhzN!J~0_9hvm!9Jp%yciwr_X zq#Z>@E>}?i2Z!9Pg{DXo>TW~-=mCe5p(GLzy@issytYkROb(=D*TK!&EsW1y1dGp1 z%o4`YLzWzroAY6UYf0P5Je}WIWw&KFu2gF~>=K~FjWVaVcS%EvBB`oy_r|-ZmKY%j z7S`>Im4kWAjvxxw?8IETHg*sGK|rnJNYL>CylxwL*Ih9U2^t$m**$UoAP!R&9)C_s z&g1d8tgde@#uaE3+H#j2j)p>tfr`1=M^>UiViorHj>&9vP;TG4RhF06iRX9$<1KNk zlBaZW_?YwUfHM$e)2`>MoZh^)%T540ZSM9nN*rfD9v<|3ex4nOF<^7o9AgQ%P{9&U zQ2-eK-+B29?vaE~X%#XK9z$RMl8BE@n4BFI$SFRdF|-sD)i1{h4Nz!t#@2IikanoKmX0FpwBRtgmXWk8V;E0JYb2rO?~#vCwf zT=`@*Xt~3g#MWt`m~RCHiiQcxEb9{JV&JPTyeZJaN#LTqET#>o&)c-ZYv)gX^r!!A zDXlXCWXzPLfEH4p&5KW&t^G)St?N++Y&F*bozGZ1ZOwu99(DQlp6&aLFRg5}Mtk+1 zeUzkKM1A?JZTW0n==~0!`Qr=M>O${2=6DII-(B$QGnD6I#ZM_ex%`XT87uZBAW~Q~ z9x4AQ)Yj6~DZyR%<<6 zqL0>RxnugMH0YL*r>(Tp>4>d7Bd0i#B61AS7HH%Fv(>x*-Wb|G;d9u~Bv$bXJp1IdvqQG+ z*l5300`&nUpGb8KggHUA2xwNVTVvqX&Op>!4)j}c@(8$f3BP3x6i^eYc(4ubG1hye zooJH#Q`!)RN}QU4>(Q=Cp_+PXB7Dd12#ZG#a7>U8`x+5i<`R{ei4QrlZRzl@gzNZN zpFoI9HD!J1^gVGNgjTqJ2@QqnXkW0>7x7O8l~(MJ@2ZtlSYdWtv0b|5J?X$ zPzjeVr|nvobBt>+B~9wmu|7o%I^V0-Vjbd!mNSOZwA#E|YD(z8rHSgZBMwdQ-HSF9 zt(x+_44^sE{|vf`^eM35&Dv!^`T`kjAq_-w=1!!3%Pj*GGYY;&T7v86W7GA8wfQlxjI30_%p7;_6o&uJn@GMvtffZ)z;sGp<&2Iw-+$yQt zJN>4P0%!U@s_DU&c!?wF*f9lI`e#q=a{NxvETv_&P3H#)puF`HTmasN*y|r2K{LR2 zNv%MdiG&Zm^e}g%mDEigNeAWJpdI;!Qdu-X{7@>Nu@FT=P}8X5bY_ z@xH~Q;}8uX*IWpgGir3CrD>AJ>4@*X^`kgKO4N_!5Hs=ijy@}lx4AsQ8BdUog&dC@ z8laZn0p709T`%9b^H#aZ$$KmKC;a@$+D<*p7TEfTmBu{k()tBhSIM*js}U3h*3k$M9EBF)4XY7F4++ST52@Aeq@BT$3G8TzPu3qKSD zQ{K=o+Ql$_B^~(GoE+glg&&F9gU2h&2R!^`d(~}KMx@OQ!21F7%XCFU!;P{uzlCoS znjP(`Bk3#EH-b!!&?V0vCf6zC4h%Tdqls956Mr337M-ybVSJF5qb8Bgua=gPtcSoK zRG<>Nz;}p<>=jZIEs^$Tah2eJ z(kyES^#F!GBBI61=EgdQ7xWRWi*pgaNb76+M2s(^dCYMz#1w%*`0pV3n-{eUv;BDQ zHB9`yZ2v5fUI2-D2Cv8e;0C$`!%cKYV31YF(0f zgxmd2k$T^SL1(Ak9USY}=alit{jygK^KyF1pD&R(WLBD)WRaR!G(nhLyY3(3aOb`f z3v!NRgy5)En9{ z`F9mAsE2<#P3hDnGxJrMY5gwDuNPVc9Q(o~NE;O>st`^Wr>YgXN1JWpT16Em0X@8! zYc7ViZ-36MPl=wz#Qs{ZeX+hSn4EaEk8OC9Mu__jL>&^P%pvY`eVjya3pD@yFc(_MDH2EK8sY=FV!? z1cB6DaZJx?N*a%Hzz>GEj*mAOqiV9OZ8#P>G$+bAAX<+?s}OL(S}EG53u(rgq;TMv zITiuey1_aSB{{T=MtnGz9M^M_1iOHCLBlS*3J&JfL^T z@Y69?AWNzR76#MDh+{{|KYcEG4TS^P&@X(zBa~0+jQ#c~S}g|Yuk(|-k73XT1}JPP z<%WQRb$naR_HSfVw=b<`}-wfdlU_3%-k-uOZS< z#7bfRO^*UR9uq5_v|sTh#4IY(qs--oJ(J3K(VsfMB?x~*GuDs5=O~CXaivv6rH}tu zjH-2m2U}z#9Z3@wb-RH?gh%gF{R4lqt+|M!P`K+6!Tm(Y&FfM`5xCh!P&gsg z$cYaAL|$u_mKF`TI6#T+APV-7gzH_b8JfFQ7WQ!pckLZ|=UXIYAIBf!pnNj_l5{%H zN~a45j@9y2x<)D$cmXFO4Np@CCRzn)6{cjYptJIF=kVbk+cKE@2RI`495eShr(%sw z_mpY!bbkKj^YVO)GFan#cLvH;gv$m(ZLbOjZS+BE2fM4JmBCaG{SM7x;>EAz1e{Lb zSK^FwT>22|hoL*8NiXm#A*=^eu%|DLW*pt>SY1a@yBI{sJu01?7}ky8Jj9^`&Xuf_ z41((ios1mRbxs0$gs>C%&1DA(<55sd&tt^CN?a3v9R`)|L`cEf!VAcy4f5&1;%@mgEm#=mm3 z%yC5e?(#y}+XVN}m`a&JW_Y-<2=tY=zjl{X*t#(BvsD27Y^_|D#ZULj&aJ#g1A2pt{Ib$5vC&}vR!w`cEc9*%Y6E)|MS!G;H}9r!x+CgGf=KD zc|ToVWpWefu#Dm{bd|{AiSqCc&i|WVlpeMazWLVuvPRz8N6%MF?**so5jY_~nVr2( zuGeV}B;cep=H2q{OSZKL1Oe~nhQE1d3O}?dqSH0!%L}fS+Y>Y7Wo5gGB>P?9^%9=3 zwD+QXxbVyJZ2mEP2)Z4fC^v6hg|47oWl&zy?#kL?Xljhe_RcW^x`?>Wan)>V?H>-1 zU-T+IRAwT8zx3{vhqrH{u%Z~E$V^Xi5DnW`M<}%GC-ddax85N|(c5rCP8x&W&dxW> ztKHv}KDHbBm=6v3;4rDUMn-0l-Jm}5fZn}6#kLG|hT?K^G)#R1We1)Ja0E+iK%stz zuM2P`SHHDivPR$k@Xl|4`Ar5&9woHa%}upitOXFcjJThp>|ho0AxgPN4Wo0RORY>W z7@$JW5l0Ve{m}7W<}Kn5n3S?|v<8?dkqb=vERYC~?L`$XCC#${w}l^s3hMANnPUeF zenW;#dny(KXh&2{+5ogu;bf7iPLaUKGnudok04J5YOXLvLmBEh3{BaQTKTMtvP@IP z;>79<^R%N1W%e0ZSWsO^#h}bZzXe}1Jq2N-k?V_lDjiG+eN@D!^=09wYy=}3a**P8 z0W+s_S$*>UuQ;s%hTlSPi-VSh2h9B#daMA~@hibGYWxn83 z#wI+u$EW^0I+r2a=EYO_WC;P7Dv+MzOj*lgxzh3mB6y7FwA?w)Tw*~zQvjrX0Svx@ z$>+pmBv)#7VbhtO|9SJj&k85FlHrQe1dp)zYI!`z0t3lJ;NoG*IdZ(jInPurzAIiFOKuKuA z=N0lD17W8)zvZ`imf!leK?c0*80mcJqFfa=YT+Rl)!@$Z_RNdarzJdo4fZiFt^?lb zfy#+AEe^E+SP%8H8{mT7h2nI~^;m?-H+PAEZOsZjQm*u#_JLFS^P2cSCz?q-CQ_N} zbq4@vZTdZgkWA;SCS!L%fROV_Sos%Az4$R@h}KaRe&NfJ#|{x?6<8WhSlf{Pc`JN1 z4>?JSag#>+aj5s@9JH;vh%5Nuu*%R@tBk%Wz{>N3EW)v5F;<6B{;MxkTj+NV3=a=S z0T}2v;eX{f?l~W9ZX}pNVP7ZGpUz7&TAA^UPFwcHxupM}IX?9b z0}G+!K9m&sf>Fr&0$s+4g+f6;r5*W^Mks$90Z&03s>ylSzgNbHlGW<(0Tk{$oUsNO z71&tCl5bzfr-H9;@SO2MPp>{zpbM1Q`MQ0qa1JE_Z}R^^NB+rIRnWR!w`iY{<(k-| zU+K5R==ug;=y!a5`0n^b8KA=OT>jT8eS&Z+AKYiBq>1I#v??c9>7~+M1d0fBIN|SO z{ngrEh!{QT=e}zhZMxn|I|j|{;Kcm#(rQ^E4`kQLjtq`Dp_4x2#u&;E%{6;=fv=D7 z(VhyAxSt^PNuy)1yn2Pd2!b~*9awlV=8eMxuzFu$4nF4j2@=t-tgV(U9O1Qa4i00;TzTCIH4g^$TOLg9*$o88xlvS!UdE@>hZ|N4_h z_|5DDcMrccTLy_t?(Vxr8=R`e?g_3%;KNh77|TA669!mM{%UlQN7M5-oTKpfV-oQn z?qThIfik|0rJBgr4vr3%ul62YFOB0HC{K&bc@zQ+Wy%lwtn98IkuK-Be6{tweDj_A z<@Vdt<>hCqC|2?73A~N-&eDsUW^2Ouj()0OP zx%D3FGqS}xJ4j=uZR+t568HdVg?dm{W)SqZ*Nwh~HW8Lj2{6#W*K<5lo_>TM7;rp8 zXl~#{--)l&02c9USjK0jXUi}F0iM3658xKQH1d@zb0g*Pqvg;&8*X(T(cazLcR|UD*i!TOQwRx*lc=^iFP9QabH{gFSrcK6~QC)Eo5y=r_3~wr9K>{Sp{|(dFm5W zOzye>GEo&8AX4wS2*||qqKsx8g$fsmxM1E?@jkQ@rnI;Nr8Wc2N}zZu1L| z7Ql+jrh49|uuMBJb30eYl{)MrjL`QKCfvzLPDk3U!f9s*3Wzhd!XhA@%kAt8Iset2 zBbhaQvrMNS(zow;UwnB*kwZX`iiBOa8tbpjg?pCcUX&!t2z%O1J0jTjEaDTaE7Ywp z#~pA~@etPtZh2SBhqyB`)6e|NxQRPD3~UMrS`aHeO3&66MZngm-{ko<1&{(_Rr)jM zu;AvDH(DA(xYhwxOTBca@T>yl&VgmmSa1Y<#0XgX4Gwa$mwFjA;i){QFnaMY$lSIL z-Ek}nQnKAjHS~qjd@u!2Xd>tW7nJecQ-TFx1t=WO0iCgHl&->wIYM>zuepplBoe{G z=qm-pDJ+F6%lD#0ii;~WjyBJwt?B9UvVhCOG+WbyoX;Nj3~=N8cWbv7!O^zdnRHYX zLKW6@@lYYvok+Ac!A=fW$btVt@xbnlj6L>Z(tj$Y~6Yp6EPkg4JVgGB+ zR4wnopx|p=c@r9NJh&mfD~S8oOJB}<(i(z~yx2CJBRpdW^4&5D`|^F~ad?U)_@`gw z8P;HhZ+~KV0B5#s6Y}R~3>-h5#ILX%YS6KJIVp;A(s5E9X*Fc*oVRjoWl?j)3YPe3 zqP$(y>khHOF&KfM#o&s1E^%zn{A3(yqV}7)u4|0OpG(&W>e59=w-)E1I|b5PBB|jU z*U(W?@$kM1fB|nN*%#ODFi{=#>b8SLnRQ6xl)K=%h{((DAVTNAbfIA(e1^Z$bFASY zSA2nz;3?=#ovwr4Wek8ZN`en%eBwYH`;;=`L<_%Hr3C-4N|9sccsdeM3V_vlbHk27 zoQQd%%iA@=1r&jtQVR#`=7^ST0UzyUd^n-b?<`*5zio9e-QKW_NJ$z&J@CyD^%*+$2Jnlg*Av zYh*J19$Z_7hTwCfoW=!wAAj&k`Nbdpw9HP8m7`mPw!JSo?sqkEMb{0y1EuWDbUpAR zJsQOC?l!Jk=kObhUfm-oi=hGdI6F5zT@z&!>+;DDKFI+MZ=Ua$&Fv%R z<0N#8v4dcS{k=od3o-VL+Xg!m3mmsR&e63e`;L1zxST2f`G5LHWeH{M-+uM$GP%S_ zWw=ydZ~cr{^1t57yqKl(qYZ*=ZrQOoM5(?&5%o}zaom%-P~5k7*9a8Apa6X-%8cOZ zG(hk}tA(<~X=ww!1g&6L*df>h_R4VJ`_5qp?1h)lzx}*CUwezebp(ZA7i0YvEm|+f zH|q+JtN_<2Bo+3`>?#8dSG7}VD+?uzMnT@PtaV|KE;yM;a*7VDSqypZ zK)c}h-9@|++PGmC(=P=nrR0?5uZxC#Q4o$$oyC-%cHqbdUk7jXqMR`CHCDG;HIKe) zL*GJyb5W6*cbd;Vg%L+AH+?|8!eZ?TpN@~03yPn_gO^2p<`}79373qI27td)ALSH! zR04wo%8NVe@Lm6A+#tZ(G(sa9x#JpZDesFPt#Ns;f>Pn*bA%C_3Op5d$0dNegz-!| zaZ#`J0WVg2`$vcDKy5NX64eBht0+m@C`*$=PgU3VuGpu~ZKTX4b)iVW87__|Y|!e*qRexM4+ zGHwO*KCEgvM1Xt274O+cm3#+9I!wM&EUDGE-kkz!rqNgg5xy^s($xx#05wJ01Q%ns zXLZSmkV>0JL?oVrA5+7a>ra@h-+n2;D6qH_8S69U!p!Mc6lY*p07F(_aYPuUJ)@t+ zhqU52$tSe-jE%rI;I^p;81+xSfT!Kz7IBYgt5J^El<7K0oP$>wJLZWESRnV?C`w5V z7@*$f8gQo*p|2b3u=L^hh0dWX+w`!K>T<`kI#e0k%wNiS$xk$LP}-r)Wh$}Itu!D* zXddu1w931o6CUI{I8fo2?!_0ihBh4sX-um0ZWMFq&ax2(8B2MSN{#|!3^~*-th!YQ zLliym?+&k)WxAHjc^}J~0zDRfx(nXx?xS=i{Z?Vue44m>QQ#6hdsu)ceMzGVdGd)U zc|fS~M*Le>J$=ruIQPV5oo^}=iFQX2mF8SPty`B1UDq^H8MqdFa$hQHXjUt^-{oV2 z)MNjw+uiC>gkA;nIyPvKw#$z$Z@!ecUJn5oQ25eus-Zn!o^gHtxnrQey>gjxUS_JP3%bOvGc!Z48W3Fn$FK@7FFq)L^0R+f7H5~h&oG;{ z&?V!pA~!KLRAxwGUx;fM7(>Avyey9%<3cf|0fF%YV@bJlNaFhq{Il2aDL=wR8R7nl zRERg1@;}Ogp?k{@pnsxxF$fF~0T1beYQ9*6{w{dj#Q;JY@do3MaPJw$`hS4&Vuzz- zNfk6qUs0<1a3ed|L78ZQw-!8;o$S+Ngv0}QC$wOy8Wa2f)d>>wfZ~i4T(1*(t=mI|I2jTJt%lasu8W)V; z{>}mTChsxH_or#+1mBq7{@-h5jimmQQ|x#`X-8O;Tcp*ImgLb2t;_e8vNL#eKuUV@ zn*Qpa{89Pf*;r{^ua+GmwtEotch_3w)mOL_wfk|e!7UE2k4GPJ3KudNeoO2U!8=hj zyV&{Yx*aG-ZxMhGQ3|0uc}*`?{yxSyv17&$p=ic!6**g9f1mZ?I@aCKZpH-h6?k`S zy@PkO7&B7$%#cGA$4%y@d3xCyLIIE-hlW{4?siG75;&ayfVd1z?QvoAXeI7E#pX zd%Z)sGr#HpxoV#>XN3Lro0lly;DeX~`Njc?z&5U1y3WlJmA|)dH2-Z`WUd`^EVgc4 zZ`KZy5^rv4uB>p##q0HN|2+>3puO*3E=du5U_foQZnUg;&y*hy%HHl4Oq~JbyKW>| z#$ZA!0WKU#`vZrQ$$B+RErK!3(}mol9&50c{j3YO0;QAQRYIY_$v4tT+U&L~528RQ zi0Bg@2Rllpgul3cUDh zmA;dUzPVV!Sp_NOWw7}qt=S*zqc1`d3qL?sCR~*R+ZA77&8fQzl~(IJHfWnw0IQ#^ zze2}z3-as4azXV9yuKvd{ITx1*a#1>yErS5*4+vfnkpdp9wqY*sG(d{?5u}>^;sGW zWZbBtr-HeHPZn64g}GF`vfxpXrCg+alDLS}$HGdDE`I!B>^je>)4tljD3i7dJ-iM5a-TSdxQxTdKWboDYQV3I+Q8@N}+fhX_s# zWVI3_7%BWvVT3+L(mrSfjj#mHq%DJ_oS#-Q&iu8klrG&^PSl>XF(|-S92*6H>Cf>| zA?IWIa?S#A!&oY;)}0LJh@DY!sY06*Q6<6q;?cH}WStx0LqAo6_q??5D+J275(Lb4 z@zT;FFb>DRIm#FdqPvVUWE~hA9scuwW29U41&Cr1{zSj*N8PDlF;;m22LdjQHo{)g z3OP4{!#+En_RTSck^_@Cw@a0t_{--AhSCK6On>uV#fg1#ZdpdU0H}&D?r8~fZe=X! zXXCDEd5ij2{A6wde_T`Oi*5VaOIWG^ukW_}QA;ouNL})@(gRB@-264A&?zw!TK1h6 z=^y27ziCgpHLs?B9bW}f-TZV#a6G)P!YvjQRC8d9joLNZkcWCSz1HLa!+o0KJrx@2 zghs_f6va-TED)5@ZSf>H_A!bKpPR!h>YQR1fZZWog-jQupzoOI8sO&$;#nW6aD71z zP3cDkPr8bG6VKgAP#Mt3rDCBXZCrzM(a#Qn^q=|7yWt7k3csSg%ycwDzstUwofTp9 z^8dIf<%GA)pS*QNZqhDJ2=f#$ch1_R4!T0=T3BNDXqa7z zZgwMqf0tnA1eg+3icAUooc@Gc{r~~Ohug#~ymL1Ir39C#L4;}3@|@wNp@2J1ZphJL z=?s2w%`vnH{s&}qQAT?3WT`CN!?Fxdc=-+Km%iL4E`-y%mhjuB??=RWIR9K%b!8dA z$9!&S6a@))p#zk{^Ot4%0m{`dN_>v~mhbnG!xOi-6DX4%i~U1lr-;XRM0|nD-8p^r z>;otfB?}zyjh407t<2L0k5*{^6gQud^6^Jc%TIs$nEbl9r(wt~DmTo>Va8x+n$%I8 zv^9cau)haQtg``ff}4`-7W0394Y;AnCr^G@K6>zBnPk3w{`zIvB2|-aHiw)Jx3RSe zy`#vo^W?az(4U`qA}(%Z?Cj1=-KS4S%u!sRuJ(u{U}tC(S5c(!JLqnVolIkc260pE zB+<)Jhl7$RT##nKNsVC=Z;aJa-Nb?$Bv zH$u9kDPk$cIBaBQW}JFVy=lJMe#UGcTpn=n#(Z|B`$%1<>*@*lN#&BRgYFc3^LtKu z<4}iVa*uj2ietV@OvmLd0paX&y?Xf$h3=Bwx|Q#g0Vj23Cw}_(xvIsmjmFZ$){7n9 zCnwY4E}cdTsTLd-9Ee+2AsMI>&i}&nVMYoWVe;OVQ6t1lxbf3=F@y0%$Phx_m-*^b z?C!!8r--NkL>OiiPeHssi>FwDaw-iR#EI>~6omxlV*3gSGDE-Ag_Dn%L8+b(2%0c5 znNOHA?OMj>V;uw03VbqPE%aLJ_~OIVm+o;ZRdw3XM^-B{B_MnGqzjStGVy`iWnXtB zZF4S|F2us8!sMc9UGn=dDR83zED@!xR7e3X>&)N$Z(DUCwV?0h8D#{kF3hzp7afVn zpICvxEgu7eT{F3^W9Y)+Rjr!9EK~8j4dfq8$9^KLMVLtc?GuX+xX%_nkk?afctQAR zC!m3M@k}wH=)h9j6TT|+Qx^rAex`={Li(gIU>~xeTMytFSn7_IeJ~fGZGys#pYMC6 zZvNW7zwYR$yzs3qyte7+b?~czY}cw=1t0Q=mg$R@Q{ii1w7p7e;->biaj&s$mTCT@ z4DZ@o$JK4!dU2CGF50PsZxM!RGW81tTmPM5srBTZZ`XF4JhaJ6F#SHEh2Iszuk~vM?-rj>VU#V~~i0&#_Mj>5%xz#PM748(eu6ZbEj=$DyapM9l zZ&8u*FcaxUVcb3r;;(7n>WNQ-Snb$}h=5n;U9v#|^Tg-?$+efUV9vsn$(|uv8D9fo zhoCFt1vD; zn-Dm2UA-k)em}nX?%14T1kf;{3(52>r;ROg@)uUpBLY^B_D&dU1lvh`$|)0X0|L;+ z!9Eu0)lCjJKwf;huvC@^+&#G^nDSsJ3E@3lM7N?FgmC96Tn{v0z=VdyOK-ml1Mb8q zrYFP%tiRKsd0m#seL0;(_6T(3CpFXB1=iM_oQV^1Z`UZns_Y(6wyTF6nhzc_|GU}A zd*GK95RMIA2- zG78;6iI4l^Ksc&9CXMDjuS%!ADc2Pjx zDRd`>evm0?gv0td)VuyRw2pa)IyCAx9Rwex2ajW8Pd@th?kF zC^jn3+2LRg%6}Bh2m5gkLrHVzEjt#B#STXAl|=z&(J*3#e6K7VaB=@iP%>B-NQHQX;Xy1iW`P(c>9TcN z&ddb5X2^V849xz4+ZY4v!m2>egn?u`0?%U? ziOvEZeAv0#R{FzN>$MLqM!JWH3EOW3z*=7Q!sh5lr#`ol#l1pGv%6ynvt8I`%G*Es zRb8xv-`eUW+$t4WEV&h2cgCSvJbf?LLIo?Zo*||^A*|MAsuV}|!?EH+@X2@U4LrcY z7vKVi)FQ659;W|%x4aX}F`<+d^0(G!t5s>L_Y}sW&{3woFD-PginRL0flMu_DfetA z?{xlFczHx;tA*9Jgh2)b%v|=_`Yl^&@VojVq{FCqTKi0s&L93o0KY&;ch|r`I%(z# z!Cb%GGZs;4#N|_*s8s3SujSB_$g-;F=g?%$) zoUn!8Vh(4%Q-^(t5J`o?te~k7@7O!fX_h+00mHj1q8Fhh4JGh}tq0<1MLc%`m${o5%t#iFYJ`3S;~^VWO*c$h(y z0O&+(usDiUp7vrPr%mtE-QXhh*MZx9Wxl30fazS`3lF6K?!r}p5?CW33t6RCVXt(> zsE6jE37*Buix3zEfO)C_8^en@plxxi02p6zo=0J!GTFfUSQRVh@=ugP1i1*>jDtHz zDnc4GbR7r}=Xp&&kI>b5SK7BN|H?Cj^}U-B{7C${0}viTTh{BV3YpTeidVzOoL35S z;?hFt8%Wiy#5G5Em(T}o=>k;O36%@_&mtcbGd5J6v2&btH39gJHHX2`70j?t?*asi6TY+D@|~NoASQk)P4fH{>|r% zl|ieU+Z*NG+A6~PK5MVXY*Qb8>Eb?$6aK~q!>VYxo}M8D>RYd(6ze8CWG?T*BhAkt zjLo7v4>RW!vJZFksmGelNm|3uN)Ij$eaxegY4AkC`#Vg-Q2?S?dL-*mdGvIt-1`83 z{zZhUQ}U2|BGsf>d$0y0T-uI-(H;>_S0Hw1ma%wDdPYwj+vT18%Pj=ft1?7tO?l!K zE-KDh`I>^OO5!#0i#ui4Sh79zVrLyg1p@3DX)IOd`Ua~KMbWa!303;KcPV5rK63F> zeCRM^XB{I1*4#scw=qt;V))r@A`NKwEGK&5&avkSt0a#U zj~E^LI3QtQ{E9ly%GTZi%9zKa%d@mLV@XCCJ0i2q+jl$gTvC4GbI;v!w8hTbj@jC7 zz%>FTi5%Lv8y#Q)o}GIb0eOHdL1Po}v?*M-?v3C+!tuZ;6*mUzB182Jv4g;=R#O~r zdxcPXjJ3QQVcmHxkMw|t1=6Mt;3D} zy4@}F5AoM0{p|r6l#bcST7A1-zWVY7I}O8SdEtKKOJ&S9yhbB(>*fIZ#+8csd_|na z8Fb=|b*F=A_TC8LI_w%SJHA4SR*tcTCi)1#?ds#u0W9$*SlBt(VEn)#aS2Cc`GHw5 z4{q5dSWBS0{AK_bw?ptC?=o}R)aWua&e}`B_u9@oc5Cj+<{M5)drS7H-QCnZMGE2j8b}zi+)^~F~(oDS*x0OfXZ6v*MHCjLvc~# z-CY7P4iAo*9BvEKNltOdNg$Q&gn7zHWJvysFc)Y)8?Yvmtc!$OlVF$oNq z!mHJ~BJ<386o`T!EB6v9l(o;|z%u^&t`?Vi+538B40+XlMp(!@z6-1r=MV4J1(GTP zV-=LYS9fRJZ3AUGV`tggNCPLhvM%bE*?S3UVgi7@iZ8$cVF{D{$}^Qt8_HNmE|hU);g)J;s|_xhmn7`y_)v0Dp-PIUD@ejaer^Ur47d<)_?G> z)iT5-Uiq9|75b$Rui$iw@O}jiDLA_G70a-~wNOBdo+jf?0K4VsTdcE`HHGOJb*Q8< z!m+BjnAk7xnYqLf_IveRht_CUx_2i)#aXwY3@&BFg#x!Pxie97($ffKbk8tOAW8^* zu;0$D5t#fi!k(A(s_-s7m?_DWkjc*ubp6sA_v9g3XLUc&YN*1)2xneKQ50XiFYRTA zf&N=x6d4$OGq&R9eJV)GcnMP-H#+a94laC_C)5-8RJv(5bGk!6@{G`;`1oEu(x!aW zzfFBP`AW;UFo0;S@ZwI`=w#<_#k*~LRT@@+ue()Rm=&uDqkv<3Z?RGl^QPv zHv8@zvl7Q!8b~*}lQRWSOzVBq&Z;P4IYZ)87?vJJaOF`D=tdZmuf7-J0bE)zPC@0f zkJEofCnvF9Pbce?3%O}=r2#aQ2%c4-19st8S?r8&@J@XyW}yMb*8XQj;eK3z-1P$< z%ZgLJ&?CNcFL*%&vfuI;*Syd&Wuyt$7st<^%KM-RaY-wu1Tt>!Z(+zo_+{Mo*v)t6 zWdiH=@GP(+1bcxdT)%kVi3!Cg0Rynj5DVaV=n^LVXRW{zt?S$<^Wzw|uv4u2E!-ZC zv7%-!3?QIP;ue9>+}~G&{j-Y-O-{~EmHGR#&=eMOdN@4ExDK-G(APrHcHX1xAsCH- zUw345mFR+LL@@%-mjqfqnY~x$xWCuhEbDCoa5;Udo1KOmEbMn!nky!xGkFBc5Qd6N z+?$i#3*6qK5KMBU?-0V=Az6OfSjlgCHDs_`f!nhd)oTQ0Fb@3SpC2v$r^qHlxCM#V zYXV7cZgJ;eXY2|_r!r)kV$+b0&!7~N-fGR1C*3VCe19J~?LW@Cjd3PiH%9cKS)S=^H=7RV?1?DRPk2P?N5&RqaqkZQD z2N%S!^N)W1kID%5+HC@ACsy##y5(r$NtB3dG9MA}(amwiSm;+f2$yU)7vOpQ%~9Foc-)bR*BbHok>W+0g;Y;n*9XLKLq zlhissDBJj3T`^ftAW)f-3qbfVW&NAQ8s;8L8TWEH2z*Uql(pqR5EzWLH-eMQSD#I- z`3l&cRK_syPG#c0r*B)s)VVp8oEt#=!Wldlaa_d>98exLMe6Ne8OxQkiqOkUUm#v#9WAZJ^iDodk5)*pl7lOlNHH~W!Ci1g~yj-1-DK(cs+`tsvQwd!Ld~qXAsE-@M5~U+G zi#!SecXmgpTO8Vu0!QGn5^8o#(m?QGODP6G!bN$0w44Z`b^4R>pgv1?qj6hSxV3yX zm+){SxU<~~8})db@99evyZSkF#xu9JEuTIElWnD1o~aPJ-P}jtbyJ~x5zuVY@d*r_ zckGmHmCc~y|Hh;7DYi?xsT(>cQez;ZplB4ja8=CFr+!FAsOP{e3{&=6xWjy^{X&tzPkAB)Wuh5@md0!zwh0Jja$slY= zTb74w)Up{2X}Kz=j-Qm|+)#*(5g{*E!9-Ueg-S+;dMfoc?Mfp-wEg6n^F!AgjW`CG z3Wq#7)^gyLSHldzk+>H<8b9O|(7RSZ##jn=7Z@@Nykbz=un$s31YSN#d%~=d$OTtE zC!VASVQ^vh$8ij;(9bSXwW^dQxSp13k_v=V8q;k{+En?FKbYd$z~&(YRda3+u~5o?9`%I7@1Eynqq9?#9hv5aq)|^z{ADZVPQ>cM5oWur9i;NE6~& zdbjQdXAz3E_A!6pkS~;9&zWDXF@D2<7zvJr5qv0^lJ!3b*)yPa~Bb-qH8G_p*jGNF5^^VOF zpG3OhhaWy53y*HX9tMR>MV6T7pYPzuk0lqud5A-})c^Z9*vptE6`@UX=U-6&>@r7F z1KR~h^Y$Zv{`3Ftzbc=6^bp~D96@jlA<#J^4oI;&!7<9j2@D{_yQ?8iF4(#7@U-#C zQ3U+47+bdq1a}d+L4fqg;0qLsQ-Z;z!tv>`a>EYJ0)qC+{G+lm`-}1i%YRgMPS!X8 zYo7q?6M|;j<>AUF1Pfo5-@p7VUP$iHlDP(5V}bO{lXIixRcqN&@%`Ts-HU3L>2;Av=c>ig6yhLt) zs9={fCw4egOoPdw8GFE=0pdM<@H5UOa8cVoGjM*7c~0~$O6z6!J_^aZ((-6z_&_i3 zDcf$5g1Uu!SGybj!w%3CnR|?-QSmi{))`9O30aIT&f(1{XGGHxTQkow#*c9u8_rS4 zj`+EU*MK!PF$vw7mFcuRetfT{2ZieY zBQDvxzgY8OL~Vj|b_vip+9whj-zBp-oUx@9e>>L?eqe`rLnc_Q;T@|%7PTt8xVYpr zJrLz3Lk|Pw{V+y|*ezZepuhU(>yF?eZRPB+gwe*=Gxbsl1+z_Wg}~}`cOp0=5`;uK zV-Nx*b!DOBIlPDo0$e;%07@TZIPzqE0-@!EvGcw1YbFg(<8R3VrXZMw+{GD2l|@S^ z_-tH(LVAD#&dSLBD--uN|G7G_h%{Dj+f~2`ujjJ>niJSI;4}5&V}-mv_Z@{47g7b4 zx}aF7GCuDyJX9V4JOvWmBrajBExQK%+s=VVPg@yDH*tAxuOhoYE8Q7vu z?g6h%O{;bKXCHW37#f;X2-Y$lm^=LxPy7=?;HzmLwS>8#R=kQ26&NbAeemYuC>{7c z{if{*yp91sUJ(GA@t~YPUbdCf8Ts3^o1o_UTFTUN%?;smTza4IAz2*0O4p1a`t3zi`Zt;H~ZV3!uM6CiAiQ?+VhJ!LUHVY{)7^;N+l?MOG$ zFPHd5PC$V`804wKgo~02eG~x4ge$vmegI|_|I9Di(hb1{8r1R#T>G-~g!{tz9?#Sr z7Y`N?t+ZA6QrS?*;D@q)Rsk^3O~F+d055RgVhsfyY!8#c2I0@0iCqj1o)IvC)o2Vs zU{Ix%CZ#!d3hYa!5OgVBXf+m>5%fBJux;rgNe^gWfz>`*PCAf?(~sQa5kJyT(xpdvi<;wNeA3rQMru3$@uFc;uP?iW9$`JtplWL33r#swlz>VAN38++?2=i>zwTX z!r0nR1_u0SX*PAU^j7(`)F(E8&G||PRET`r-G&@chGjf+fp-S*mlX#dVkwZ$Ta^Oo zz&`lfhZR;IcQf8Zkpb=@+*OPfaEHK)QL24+j6tSBz6y^T7E}b})4{d<58tjkR|zQ2 z+))UG-$?`X)$tHUVM<@Io+<>^%ZK#RXDY|wB1)3q`L*}ba>kBt5%$F+KL)i**9t!J zDbLuEHgxYezGG*Oo%UcV5JfyQe{LfWl`^Xg2KBlF4)f_fiu3_S!9XH5WieB*^~TsMeSK>3+p#xMKfNcr%= zlkzA3;HTw-4-v8#Ni9lH>iFDm{+pH0N~yj8NBF|yN`#Bc8EzO?_}dR-wLc^<^9t+j z9kCZ)ZIUAFuCcqgM@Ae1HyyLVap(tLa(aqA8N2l*;b|i%CwH8jdy3npZdv<#9Ii!5PIt3P`Rwj5oL1}aHqiix z%4USL^2k_{N{!%zK{T0}*|Bm+YMuFcv;z)m=ptC6b$V3Z5w@%1Ha$I8K3Mq_Bvq-+fYV=N8tr$qeNaIEjSB5~=7`KR+ zsLU0Rz$D?b#{idZP9{U*J|o@H>(}@paavp-Ib82A+$#$VP>X0`^U9v#ceT2@Q#RJn z*2w*NaEuR{0SO%1@MiO?GQ+XNlQV2-TP7MyZVHE#J2DImgxsxTqM}YEzC_qEi1A&3 zW9b*7&0jtR zc7U)X?y1FM^`(b;HIo(srYQ6QIeYKA7e32ShQoE11L4R$lgHu1+mXE}< zpRCjIQdMH8(jNhovw5ji*oTy+tPH~nVk-1`@JpXMFR*S#x^O(R4P?K0Cu2%oUXEim z^9hN4$j$F)g}S7hKBV-FYAqHxZ9mSzKIe0%2IvQgR9I~j4fw7rt*|Ne z`EDJTPCw{#%&nBowu@ths-mPSh~mXn%Rc5S?MU0rm9b6R!7OEIlkcKZJlk);3!Igf z?OmRBs-WL~wO!u({*~{kOUe~;i*@Kknx}X)+%y1=saN`|l3{IM$srmogr%f!TZts* z9qAz^XX~VHqjXKwuTyJ+T)3yCrQ};%w~X3=wCh6R{$|4Z*|@mUQMRe(qY|62?WmMW#M6w z5RnGa8o-hR49FdzQky)Z_EGv$%9IwB9z#9g#r}wo{7}Yunt4K*yd$mGO28FwNuxZo z-_lTcC2fRfao+}|kGw+}>uA0UnrPO4!hkKeNd=4BJ zjF(v>x+%wtvbVsawr_LdMJt43X+P|Q^}0StbJF7t+K>Jo(o#;5V*c7M;n%-NleTCyI)inU5PPIaa@PK>v| zbV94q`?(M-Sgw||oR@evA7i$%`FswIBd4;3PqZOvoQ_Rej&JRSeqKxha{>3_D1OP& zz`JcsoE*#BLw?2UHi13}qbHiDG50I9T;T`YgJ3p@Kgt9`&*&Vd6wS>cjdBtd4(*rt zUEDCA68J%VLZ(kdEDGSbABSp{X7-{wmMb*!_dY_e5wp)b!l=H~VwTNjKs`78T4`Kp@X(Jxc=F@&h%OnFq#pX(=A&iBwaA-fm#6E_WrW|t0@x@)>mqN@(QmB;u~m3 zH$+jdt!|fZzx}qXzkFQ|*7u<|%;gBoo@B1&R#$rSwD#bibB8aCk>RIkG;80G?u++5 zRZ-$L3RMfp*5)4D8yp5>S{ufHm>iy4hkN*l_LYT+58yWhP?3`qqfGhyH{X;mzx;xE zd|qZBAe^(65Gxg18}o}Z2^wi3)Sl6o5w>B}%wK=bQOT>jL|@}yL)5ItE>GPbApa;? z9XNFF?Q;g0t*UFbGG4uY!MtLdmgC!J7sjwi8St`+g&JR;u4!`C4iP>6lC7OKa~6kx zG^P8?i`hcyqMvHFrZ#hp>VZB-$M8FwU~6Out?U$kB#sWg?&cVI;OI88m{SW~11C84 zS^7Ib8{XPx3+i|$5yvlHze&K{I47fxO?hw?!%pg`HV00e95Oy^t+<_dMx^m6>5Voy z=;7ttO`^LW6cPmCOSKIxu>B0)nPSjC@DTITk76@EK2#n+f1_h+vNyE5!O3FMRp)=1w|6={P~Ds|o;7&X`_d zfV}TR8E8y1)T`j1C_3(E!O~0vgK;HrVQCgnIk*f$(_@4Xs#qCvstw z$u^g5N({MthacZp*pQj|JOYR-Md!6FywoSuHX?j7zbb%v*S3A%0#z8IqTEwhOQaaj zEztPQ~$W#J1@2FIWw+25k_t6iQaLRxK{|`Pg3>BtE zuoWaKx8JPI>01?=!7;>|ZHK0D{(gUvs@<`#7Dz3@ox)V(%$+JT)|TLI zzmB8xA-K*vey=noO=p~Fvl$QTYCc=fch4)0xDbAKPpIg9MU>Q$?|<{ZU_#lC2Pq*v z@X2v@+#UEjZMerHO{oziJJs2q0)foSzI1BwHG~Pxip~X0g!+eYQyTY_B?TN|bF6LK z#xJRa9%N2gmpC#?+c{-(myC@SE6{l(G{av7g$PAFlR4{87r~8ptXmk}9&$b_lxc=m zxscZ+$vc(VSLaK{JaehO=R2mtP(asAZ~7bsbOj>?M-2nXU;wXj&y{M*;v7!j=7pmj zO_sv%7MO}g^{E0+z@b7AT2Pqk1Oq5Q%Rf{CyRq^hS)?zGpiOrwzWgAzI(Puu&b(o~ zK)A|^&-26QY&TLk!X$n2$7gCq;>T|hYCCh(Izp1rhPbq@%Ig?t=LCKkeByK6HnEO; zWsKrv&sahg=~wy*U)5A^o;_)R+IW}0jv4)x-o>5Ytt)1C@dbTwH}g_KxAIo$Kt4DqCMjX2)sJv73vD8}D=?g4>UJHd{i7|$*exYbIrAvx z5lsCBhP+q(*E;i^zH5$` z)_mrg6`HbtY$w>3cnFTUr;`0%3Fkd}#54c%Q^lctj7|p6`k492^)Yx(;riVma2>^t^eCtJ`#Prw@S%JB z;Rj_JpP8ExPv1g@$5e0r$}y~`g9w&|BY|lg)3WLMR32F)F?*YqrCM8|6@C=DZ)p3L zxj^oo@A?G|hEFmE-3W<&2yN_#@c9fpvR0ArlaN*{nOJiXlpkRAoMO&jf(LzYRO0o8 z&~P*`484$MXYl%7c{cZHd4OMq{x#!+%P0;AXIOpjXuJ0sm@i3_NDgg_ODN(k=jJ?X$ z3D#~??DU|-%;L)e2_S%Dg2zwl-RsTr`WuA!6OQ#o$aK44ZV}%Yd}%xhYhs#%Cy=35 zUtVBszC}15D6{k9oYXc&WGL2g@~$2deBo((Z2}nHuCC*Ma!%G3w?PnNDV3%XZaJoy;U|z{7~de$ zGLcWzi057phroetVi1nUzm;J~N9Q!hR|=Ct2f+oDtQ01gkpVO}NQ(;+|l=wbQf z!$r2mMhK`Ns++)q7O9waP;O*xMt7gH-L2BUurNs-M0ca4Jblc`Wcd7?696O6NDlp; z5d?Jt2bd$HSm51GJ4b1?P1&DrD@O2+-~$JjCXi;jOb$RZS6`MGM4&7#=h@74;~^oRO#G;6v+-JNlAp~wQkbC)_Nsqf^T zmGHGLtb%S`*3*GY0Z&XzdA`-aN9EpE`EkGAg-;<(7$^jO>q2BZe8nQAHJlH2%C@P_ zcf6X#%l^eeD#9sdi?(3}mJDp%4LsiApZL*dq{%gDz=kl`(-fe8QA}vpJ_Wu`UFoyc z2q0I313zok6Hnf=4IQ)_*!8ig_~1ReBZ%VOYG|j~@u^tmR&Y|=QE^ZZbT=WL;=d>~ z_K{zOT<;snS^H}P;;REkp4WTACGMn$=4$%#H~-egfX#NkD^44{Dc z`OZ7G;~4r^nDd>__FW`2%7ATI@;zL%=X|4D`lnA8&j7+jy^FL72KV?(8m3E`qbZ*{ zDt#z0nGq6_jdc1YgR~NT9Q|a4T%z;G>a%2A)@|oo&H3L#r@m1T1B4fOWk%j6!^oBG)|~MJCf2~BlP=RzHG&rXOw+E&fay} zd8MsdPZ$jKNUyQRTPH=rFS6ZU@B7@g-ClP-TZfUvnJX|UcM{Bj9A$`cP|?$p4-=w{ zK0LfHvUpFJGC$G-g?FsJ`GsGAtF%SGQjYJJSnO@r#V|rTbvu6U4yec}fcN9j9=L#0 z-tWdyKjvlMX)bYPs;jMQPNM-v@0;8}9swZg+dcWAkhOgKrQd&H3Z- z$TK`%RN+)ujMSG_EpLCloQIB=G*@xp9OXIxT*UksUPBqg`;0~Ko!9w~x3cCF8h`g&T|R5YoN}AIvY` zC!Nc3dG>=JmWNANSW&7@IC51tk@OWs-tqI4B~%~gZb_8l_AbfUNuP6xpHEfRfs=&% zM#VxtybJAP(Txi|r!Mi8I;0=y523Rc7n0sVtoEE_rV`N5s}^fv{K#>!v-la_Bg-~;CTJtzW49)vjZ zF^Vf`O!tvNgsyIO625r3R$i{Ik`aYyRDvmXaaUP=-7eq00shN5j-%yZ2~M<{nCGZs zgi3wh^26UAoYyB7hKPV>+?`}bNh4(T;wv@4mVi4~?_QAt2KSfSHu4SAVvnfQOA_qQ zE_OkS?8I=cvdZf0^l&-ZxhgOIdJO>@_m9Q#;K6*D1&4DSMc8c^2Cov8%J&2oe@?T) z1;hwzJrOPmj=4ZVnp+x2>A)|E-4@+cS{QFVsco2?%JQFU@`oCEdxcWaMyVJftJfd^i9l!-@n81J;tn9b&ft{T#6rE$n?Ev?msq)FQ z56a^wk9dY6$z1sPv;U+#nSX|&MZVZu6dO*lyJSab5BDb50#6U@B2eb(haYfC=d&);S@dLpTqbS}^K=2{ z%4ZgdDp0`~nXFEdEM&gY5gbyMXZZ^hDhIMrt#ra8Bd?aD4pUJvh?SVB34@T)#3jh` zz~)=JDfnJQUaA%gRq$bf;+sq^0u->zl;hICJ==^G-u4j&_>-$z%BW2|clztZ^j9l$ zu*_d~N75R9#AaQ%6$F5WcVo@BeV&UY+la6S&fYJIJoLH!N^L9#5%9oQa7sJg7jS=K zQRarvq9`F)DcIP5cLi#0RG~sxwJzTVKjL4jtM3H{?%6kS=_1s0UPUhN(5|mm!Q38u zRhU$!>W_6*`ioLv5#CXGs~8bZe-(y+4M$Gv@DdIeb#W%mWRak3DsYfo6!VWh`z}8| zPP@>9?`7~EL!PAOyyi$ao-kzvlunf@m>#FAK$MF%ggvxmpMgingjJ!(zuEo-kJjOH zf34eR3J?l-w$so^>JaWu`5Y%p`Fbn0*RrXnmb3C&UkCHlYbypCSg)T2tZ84EL$}#b= z*`FuUS*+;P+mFRch;2W?9>DE5*$;8xeaFfF6x?fBm^&-Y0Ar(~mw7^8q;o~=V2PKt zD(eJ3Mmh$a{|z|GK@HLd&xKuP$_Exll63p7xM5M5nHq~gs4y2H5`o@4k-`>BapnaS zf&!qh>jEpTbY%)!-5+NwU--Qocdg9!L*8OPq(P|3PEvwjpXEP4^E@3%?^93QE1(_u zU*+YMwtZEs$^)ExT3@ZP(n7_D?WaZYnRa*%Gq8WzMWJ2Gh!d^PI;mTqG~4Wg)-iGH zr~ufE)YJmqq!My&W+wrKAQp2ZKgga7IshPrqVOF)U%NISkVg2W4e?}mts^wb!VCFi zoUTzEJYYf>QOC;hW>f-~IH4n@SG)t45vJyNm3D7YT4T&%EKv#!cXC4CUDY;Zn*&W#+9m?9~ z2FdTYIJF8PQ{x;kXra8p6{5|t!Pk8pwM%-QYuq=Eu|j)FpDAssGI5P%vQ@UuNvMB| zWC&csr-9yx%UcA?n>*45;y$s5z;$_r!hz58+4&xy!41NwinsX;yKs%^!XMnrmD|&p zJY+zswa)%wgW^0c7z=BUGJ<0SZ83%#3&i)k);2CU3a>42?d}S+363uiHZO4VxH#h^ zv-w`q*US?!S<2ThH_I1aKPSq+2ZwnqyIr_EajMrHmi$}Nwwz))-rq9W{v|FL%u}q7 zU%Xi>UtxVd-00#N@N)d_APV6wsa%dpVKazD`nfk%s z0%?UlcxtXu@Nsoaa09TKcND+&`}gO{>CG`m^m2#=icWinAP06$Mu<9p@RWSM91y_P zR2lACCMulBTlx{j6gRe3dzW!w_kwvm$l7pqC8asnI30|9zz>#IWWxCL@0E9}uQ*lf zpuBkXqI~%^N*=jZC&%ssF9AZ(%Q&us1K{nZ=LCy-Z}QfTk4z@#>&sWrA+GReq}%%V z!>48C-aS4e^Fk}PoK$y3@Wn2Izi^$RSh;&TP5Z8!+gqzS0BZ`xVtR6lou-ZCmo-Sq z`91*Op#o^FU1rJiI?b-zIJ*Y(!%O9h&)=0VfBmMse(^5!-pg*BDX3;hDYSC$0kHrm zbwt*Cl=Q99>nOWjcewcd*`NK{XIe>2YtUQ@%cf9q5n_az+kfxhTB`o1I4p2!8{`D!Kh?b{xpkx!u^j%bIKI>E3SWCQb+cIi}Azlq*kti%HoGw0{9tK{Sw~Jt{ z*$H2nVB^cKfI)#UBMWKyP>8X-3PIq9*;9Gy(E`Ib^O?FVq=Rz@7xu*#(w4#`-xNHB zRb~?_8ZUWYIDOA|vMBHw)KIbg=dWWvsQ6lVe}n|Q?~{E@7eBRAmpZ?otef}(~pg>D3S}JRfKmBoxDri*W_}G>-USkk=^!^I2 zY%-StPP!H7=MIcbx5Ybk*j~oV%DKdaAFFaw$Hw-&Y)`z4-&)c!w!9X2#zPZbo&Z1h zYveREW;^`m8SguX{T{4z=pX_N1wx(5#q!jpFI`M*q|FKiC=cc!g)fasaseXQ=WG*|E=G)MsaKCHW#7e22<7@ zg)Y*T%1sS|yTFChsUsW0)7 zQsNCDc_3}ndZj6dCHMI6Jq^|-Y8lJRYaGi6ywnka^?g0{{frqj)zFoF^O#@zE&o>W zxFBFo9;mxm<>%GFlXt-G;s#a>uWJe%~|6MZ}h&h^WPWv#YnNGaz; zxeBiq&@TA~IPK@)hY_~btwXA?5pgF^)_qMpc<4-<`ff2&-J(nwB#fewNRRTd%l5IW~aiNL#+Sq(`SJ z|0-^6%lS!>%Hw(9L+;z0IP$O0ToXM=!L|MzlBqv0)UoQ=r(i+KyVN4 zp8&@()@Q~FKWX!MUxMdLgiB8!yFm!OLon5lR8SVQ#QU*JvoH8_ZiGi;_3RHP>!{{AK`FrZUUY} za{ueSoThfxGBw#0N)c&-P?8kjRCIdii;C1ZG&$T)en*5g$42wS!8*zphGG57Z#h-V zbT^kMA;;i;@!=HK>FWE=$U>9gp@qir zj^{OQNaTtfMevr39242yMeM}#V-#HY+U_>OJOQXGCdxIJq(njx#Md9IGV^kKMa}Z&WOlnedENn<5TE^U9uZ?D$LdD9G5-^QM?{M zS_I!jmT#Hwbub4JY!LuFfD6*gwN;KCKIJsCfvg2zeYHo@e+~s8irawRevIa(209|G z(#+I6yk`()XqH{3SLA?&CJvZsM24e_!<$F}MVv_=}dA@YhSyVwaOFACOB^S-n_eba~QCYcmMp)|NOHNPTs0)hy|L-Sv6=$ zq`9h4=Oq8`Uh~Yq-gN=6krhdiZsf9ttdPP1YpT;eSi7k&)(Kpy9{*Bb3 zfD&t|b;!J7W->o3|89Y)ZO9mP4e-82WU2}X5^r-URLXGU=MR&BMc7JxCw06; zQ4)C1{!vk_n`cdqPZzsb$-%GV;Lm&fQO+x4&%5?dfGlY{O<(F?OV;NV*M6tAPJhLz z{qdP}Q=dX0_K7bF;yXjSFR`s(2sex{y`H|_F8yea6!t%e0% zpMB{`%apB#HtY*^Ic^zeXwIEF-Sy&XKt1lNIj0-GkevoQ<8Lnc6#<3{2Py;HcL7#- zqWaL2ZWC7uhQOy4dxXVhVSb*VM7M@}wf)8%TM~$y*8hDl!T`Zsl9< zN+E@_+~f8LZs<&%F0|`NIX0O~A;u0~1j>KGQ9jTMOV@kimq#5Q#%G=ggJT}r<&pj1 zmXPs{OZ+$|tdl<~scE}uqf^LwtwSrkbgOXR90x9i$sehK7Hhr2sK4tu!uka+6^>Qr z3OrR_VX-5KDyyAOB3tng`Hlgftn`+#rMlq$&Z`PJ)J z2#5%LSl>O;y9albeq27TaPvJ!K-D-ij^(l&Yii~!E5Qi(09go1b%&{VL~TLnLP#_- zOI7YXZBIXH(&%s+m&7J5U0?}y4kn1+J_EBMtvx)ibd4a#?JI(O?{LM5az%{5-7OhY zv>$*!-FIArR0{5{QBuf_czugtMTQjvAx#Q!gHUwW)k5Iq(4m1baKmxA1mZ4CO=3;$ zDQ}Nomo2QtcPKUPLLOki+}nT$uuJLg#tl0V)ANi!xfjQEd0AM%y#&Py0o|cckQ>0S zd*uOt#}JU=mFrmOH&!o6Tj-7^{`2J5ls0?G|Cu9-8Ler8XD7y{p*<|a91I|xUn4{g zb&+KU3;E#=!W_?#aY;YQT8b;##Iy>mw1I#Rzd9if;1+?`K%hy3m{c6|h+dOWp> z4>-hvgCBY?CV{uBY_DyTP@n8JxR%_IUdWV;D<8}eP|Im#xQVR3JwQlwmjasP$X(~Q z3G&atVK;MXbIoivWS~Ky9!6j_KWsNv=Z_zMR31NhP#!;?BY+k+8-;zw>VQ7q46#F_ zQo?*W*xSP*kDC-h#3J_;0o8Tn9HIDV{}xNRa~K7Zyq>z>E#I3gPd{2JPd?(v;(Ie? zWag$Uaah39k7p2maU~)>(hQdInR#UflmVwCv^asWW}cwkV*+Z~0ErihX`wvir5D;5 zC4cJBFgrO2&+k^rojZF{7VF@UI16;( zBVs$U>&iP$&}(RH*1&k$9>mzwO@Q?=Nm19<*s&-N*eyYlA841Qxp53iBZ<}6d)r+O zw>`|1`Jeygzx+(-9UM>K z!IdAaL;**(1Tw>V(?7oX*Pp-Zs(0&i7kUC&2wb^F8IVC+lgHf|3vljY4g;!L9i(o` zW4-r^WsTW?$-)tf822RhULDD(L%~ZSI*8|v0*1`OtP5s-k$v9MF&Q-f<=3;ey<7m3 zbIe(rlDSRuFwPVO7P}!A?esego_4*2LzraN5z<^N_>Jojb%aqUC_4!VGWT`g`2MZ3x(pH6vBG$wESv*)UT@dU({|9n= zV-df2KLUOFqObuJF0#N;1&=y{6aueRaS@M}qlM&Q6<=ADgU@Q65#PS=)%+D+%i6De zro0R#3qJSLA79|J0Q$d|Pz!}^xH$6%1+ofZ3WgOgEQZ`yVf1eVKwyplDemnjzugF6 zcHPVIzf};Roc_GFE3CHXu1DJEaRhXjQUXSywFZmAB&Z7R_NjiBmTI3OfD%xsuqf!JEBNa8d|x{KU6HL|(FNXY4bVsI$RY z(*x;?p$pzTanr@gaj<-Tc}K{F$8oD}K)^=7;$A>G-%I{g>T>)nn|aRyt93az=cdrR z2us2CIXghqBP=d97O2)#76cX^w#FIusZ}zd7$}-r(x_vBSR66+$E9P z9nTvCDt$?Z@in);9xP@QN664Pf%V6&Uo1$TF>mnyC8rQ+sq_F6^CH?|x&)j8R`JfC zShDR)KHFI;{f?@5*9TnU&kEeplK#DJ;0IDpb6Lha{=@o0d1=<#YMtUKMuEIiTebdb zsNkK9lL}#_De0>6nL5(qFEE4-+{rVg9sbzR3mQCp5c&Z&@#Q6r851EtZxoNfM?H?9 zbJej>xVvLlRH5JY^ktTR8cWh5khRr1WKr}aCfrpJa?Og80B;0(h(Ghixun4A8YjON zR;|V2LPg`0t#Y%ggtqO6@bS#KCLOA98;m+=1|h~&97}Iz0n)Wd!WO#FdT9y?>GzntKQxC<0j970T`q z_?2d_JpPw<2bdf35c#d=ZgdYQP|=&Gt+;pGZ#wQ9`nzA##+ADX?Xd_sW_3}( zEF|uyQ@3e0eyt!p`0VL9{#N>)kby$KxJ>b-Q0T z6JsMT$-06|j)&NoNv0e37Zd3J;U|yF)0GL1g}p5MxO^A`)Q2a<+zg5#?kHwL8^JYX zrh6F$V3YdWp=D{!qn6F%In1ua2>#upeGj2!eC&@9ez71QlAALp?Xe4RLK1u}_pZG? z?6O>u7`~0LeG9*snbR$eX>8(te)l)sv2k|;nr9bfl=$Y6N$?I%v-Y5fjSS-!fTEvpzRg<0PJshBL1pUpHITRuC>cD@Acfi75ca+#IVeSqLjNs}w9iFmF+{HP&12@pl z4K7r}xD84|H>Z8vM_K07yS<%b1bW=dP`Xa`a1kZPYU^kKna*_$7pIZ&7@+$4S)175 zxFpekZ4!;;7J?KMRrBp&L*dbWo-zm?2e!u+U7e6Uq|L)uJ)o<4cP@m)=3vtKpQ_m<0 z*I}6;uzZ~3s0*Q&(739(H2%W+5pyDra9T8Zo{b!fj_A& z|NFOo&W*qI-*?ZnEYSv58lt(G#QLFaknZT5!8dXmNEoOpSVS$z{S}*s{q{* zG`JT50D%0l0^!Qy40s9*;$1-8=5G4sef#E>B`dJi=B-CTE;vt#dOzdWc_*KFW(~36 zazC|+H2dR%+hH2iL>Vm>YDE=gaSx%4W9S0n^6cWM(3+kwRuL9?u7IgPojx(X`fOW~ zKQ1!1EB>T|4pu5%aG#g_KRENRb4L7W@sY9CK4m0Rk9A2${ukFN?goE8RVj7sQxW$S z9)zD8Dmjjm-z#m@jyCke%e>1y;E!;@ci~srRS*_(+hZA)Qz3iQF?8r`@B0u6k2Gj? zj-z90A^$2Y#Xnve3#|1yCGtG|56sqDFJX?~vu)+qpkSyZ zJt@3;G$`2MJp>}gKo_84P7VuPES~Hq0~4FgJnKggm>`v$Cl^)hQ;K?SXzLOegErQ} z7K^mmN*p6ys0&Rg}=tw z6HC5uP$;}b*oBAh?ua_Cq;Km=8}wH`EA6R#iXW9VT{w*y2^~fNWoHK3ahF*ahBkqq zC!B(Fj_~5x8U(DnoOJ3g&L!noEWnM5V_i2)hk3BKAD1F`f22$MQt{7vfza=^yu!RY z82}xHTB(gHQdZiQ&OFlIKH6S0U#oSyGmk0fxI3(LEj&rXjwp1bBj`5c1pGdTQYBt| zk2b`AUZ5SrpejzKQ09nqz#5iA0r-3c&0e|-z~?rCf15OyM_6w6NrJzDbi0GBr3Bmt1VT;04^GbO0@U);{SDq)UzCg?l3zXQ5oqSDgTxeO@F^i`bp9%DtLJ!mj3$Zq&>Yt2)`p{!Ewf+EsFZkIQ)aS zg#Lf=r@whsO7#U`AbP;_0Q{^E*-7j90Axz^ zh5N(_Ah7Rlo@0$(!2f#@MV5mMICNp_?M708&Twkh^fbrWV!>Y?VW$9}a!*^-mFt2S zfnF33m8sQAbO88zlV$dvPL zZ5=lh6aw7o79TG}@jE-ZC~I#HaYLcaVRk=JSZsHkfLQ7M!-qdDKlxw<1xY%OI4cMHh24#j?4D&pJxOHHu&#r;}+27(~Hxj15xN zU@OhHI41ex5XFF8{8I@0V<=XKo0sMP{6C(TfAfEBmEZoFQD7%VdL0{?C`;2z>{RT+ z59?0eH8f^k{k`>)l$j47tibO{4cdRiDPN=T6Eb9xbw`Eg1Q(-oTna}K;%DY&)5qie z3*shb6F+l?LUxPWnsYlRYI5*FFFQ5ITjcgXEajJv|LgMM+>gs--(wU^f{b-5>^B<` z1}3`gDS4M1B+-Af0_`&WcEa2PY^5 zoHsz`rg6q%ZEH8Q^vO>iu+ET)3Psb?$Cg$`%kqP%^2Pr{O4x5s%M=01Q|vgjFlHTY zlPQV}Lk~Y9dlCn33=jwPir9xavMFt@@0Onb;eY%WpB>@DzfZKgTRSso5{LL8?Xr!b zfUL1Yrd%&LLs)j_>;LDU%J>KAAe#^{Xcj@w+{RDujQtz8aa@q~32uaPnViB)7%FW< zK!6izDUt>1E|i5JOv?oiw<8#MwYI<+xgQ}$?nPbxYc3b4Mj*G2Y%97DS(d*M!ua52f~n~RV;0Jaw; z16*edoO{-*5C>45wyQPH_qb)tX~p)!Kq+6BhP;wzz9`I8Ry&XdsNz>#ytjb)oXdsE zdVK~6z$LEjhgHy0Z8!^%FguR6F9VKZ1^j$V9T{7zb1~zt0$T@g>PrWFnoB{lw$T|E z?h1pjR6#|-nBF-);MxUSmji{WSiFHlMawJYD3h_~eaF-tn_J3cK?NQ@Sn@qu>cU%b zU>(i4#}dVuML?ka21d(Rg~C(>TfVDo)cc%;fp8eu(-gg>f4Ue{n5q({sfp`nG$G;p#In?$L{6cEGZ9!s&?mjJpG%GB7s5`SEU}67XwQD0cxP z?9fgNKl1~uNC&*rBFK0NtLoyyQo0z4##U{cUj<|zsE{;c%k6wtXwvmW{uO!!PMM=r zWEs(Li{fE_eI;a08T%Tg0c%|7Kjoom@Sf46U~S)?1`T|-&Wcm<;rjxiqM;D3H84|% z8st3+#TjW~r=&4ica$+zCr~pU^sU3gr9tO^=+g$doL>qm&TXH{MV$8vsS5D6E3cAY zs<3;?QnD>E77C3yWeVJQ8S|yUWriPV*>{Z}5MY1Yp-?bBFmVMIM=#w*jE~c(p^M=l zLPC+cf6j<<-EOOaj zcY(`pgo-}$(gQfoD9i(criT&W5z=o^$bfGEf!UL^Jc?GORa(2lf8SH9JVDF#&CEpS zxUYnc83^4g7C8*+=!Ch~#y5#~OvBj1#bSTc)5|UpRB?fD?IG=Fi_C;(oWWKPei$b# z#Alq~bq*b0vSaYq>)T}d`34wJ=%KmuOM)k{+M3C$#qLlGOMeepiZJ``Yi&CuL-8bep^PTm?tPaL%1Z3 z4lSd+Ov7W?r9m#bIOT}hZFUYQH_PE$26m30(LO@^6|n_vc5!Z5BL-1U-2u=jGdx1< zfT0=07EBGyuq!c8<{pvV2&8%<*w$-eWLTecdqE{GXE%Gew#}6vef$X5#yi}k+uFMX}G4c$_?$!p1j_GJQau_!q1?oQB&AP#_0p1K8 zflV?W5yY@YdO~_lAsCZxHSeFeD*B-;j2;pY4t-=;j(>xCf=cEI_(9 z9h1`blHHuWUDBdnk~)-~g%>a0l&`*ghqWA6HRd-YPW>nX2;7dF%KXgK4DC&nGhzky z-Xg1V=+`yl(<1Gvt~s8aaev`{`Dg#xe_MX}$sdoF=6Rk^oto!6^>DkJO}a^tL~AxA7@}YU1`G(2 z0SkmJ8!&88FDyXRivU0R!La`TKj=k(Ukt*QC6S^;c9Y$G`_46=I(6oGp8b8l+`H;F zn@t#Ar*`eN*UB+6GBP4EBJyOnn-lsl-9mWWgtwlaA*3+J8K1Xrvy;p@2tBt6Z`vLE z{R_r+80X;qy;ID+&hRC<-erfwh|Ml-YimfeNuhN0jdlI*Z|7r6}QobNIkitz2=2PQpQi>D?v;d6v)#sIlj}(Cd>nACDYOBn$pD{ zqG-9^hp0Eo4<^zwi*!M3}H#-GWz;%J#AQ0i#)Zy%{~SlFlNs<6QxC=?+FdBi1C(S)zhg<0P7pG=A=Y)egN zd#Dp;B@4a%OKQ@Xj<(wTq~XS%_`rId8_}L8eR>!i7O253IY6$u8B&V)R5j5IkhpJzX?nxa`_s}_53cLP*X9KbkQ&mggu2Z?dlkHcs; zS|qi}ITF~GI9KV<8Jem!w)tPr8zPT0+i%}N0N^Zo7RuPHt9_-eCY|y-c<8syRKr1C z&JpKU@SF1UkGWMxfmTz7_@U!739B$p0geMpEBRqqrI&$=OqR@X%}oM8Lf%wZu`xm* zS*;-v!kpKn6E4zjg$#cAmO8{~?<~jm!eoQ@GFLT}v+D^Z(GPJ?C)_uTKI0u+Pg{LY ze)~#o3E}{}H5pOpH5!aQ8VO|y+>T)Wg`s$&O|@;Kqm-w=46Gk`(t}V|g(*iC+RB)C z7KBPDnPu069vJ^&7Gdh)j)v>s0nVO>Xmukvfd{*2TxIyTwP!~t(EMi=vlNA4l?B5y zC=o_GG1nMI^NVdcLeZ|VF)@|uMBq}O(Xq4>0ZD=67w-zdIwWKRu7C? z;eaX+ce{0Oio?+e<}%N3%u*g;Ti!PgKZcJ)KL=B3W%1ebh4N!Ue(Dc0Fv<=bb8&rV zrTp-RpOyLN%Vn4YzlL#;cFofdVR>^sra&rsclTFu+Gng7SMl=eUz>r6cisX^4p}>f z+1HF`kK>+CdDi1D63*3~?Dh_g_LQyVR}vqyTIvo##oCIl>#|$1*kf=J7W3= zT0%Rs!wdbIz$avg(|@~qE;uh{4y^%Zovp<2Z|%i246_~%0_$y`pr38!IXf&%OW+r{ z)eWP6!kGw*2v>`*m&zfy$G#k=9uJ{#;IJ_?{8JONWw?8|Y@EGL#3q zIM7ee&}VjYq_s~zStF9rDt+R71B8~9g)RJc3T9FdiT1TnUVL;&xYn)Es`oy)2Mz2k z&p&xp7C6oL;k(y}-bsDEcS0-M{?Tv$50B~sjUxe*TS-|Ob7h9Oxx`O(@(2c(g0Ega z@$$QQp9gP#Bi%7c&n}o4NPgP)OT0Cc*d%Aqq8VFY7Yhqu!Ew%#1=K+KS6hE0(Hiun;G zIDU4h3LCb8F`^!YBJs>Nq^;x&^GABe+D{!%+HChIt7(2@d4V-A#5>#Uv)Ui>d7thp zG%>#|i~LEOvK&hOsK1Fb&H7SPU`M-n-;!t`wVjqP>Qr1eWuxtlvVEe-S?cEA-A3Wj zyt38!fks5bStv2eEgFc zQizs8>>)UUQG#3q+&Z`I#(i#q9=QWxxZ{b5k+kjX=pX!_?Ch3m3jkp!N{Y^NV>ndy zBMh_=>7g6#Y9c^6q`-+?eEQT`Ue2!&zU>Ly%lqZ(RSwg6=V5Tcdc}ciszN5}7yrE+ z%fL3jWdg-hahWuZ1waxm!9SjvHi{8#uLW>`?ATDQIIQYRoS+=X*mhR@tTaWos!~wr zhcl-^ zMyhHTn|NK{Gm(@mlmhIJb$Vk*?S!7ea#PtF;6Bn2t=Be)(lSZ9vH9{;C^<S@D491#}j6T-yo6AYfP zC8&P~;ZCBah9g#GKQadQG%`ZHc-L}_>L^Cq!+e}bZNRV#HRwpjI>9yx`2wSUqX!rRm5?v%+JU`rN_x>r;D5IwyW7Dp z2@`}vuj^fmpS78iuJKNyL7vGgfXv3=I#J$?KDZ$$?WxcDj3W9751pUO(R|!yKuM`vxAcY>To* z(MDVJ0N0M{2tc)5VO*OrieWY@C|2~N$hT>C!i1dLvU2_<`2r_-H$k+f^D!%4j4d+ zV#x!4M>xq2kK*%*vh?bZQ`d+;iVA;wi!CM&zBnTM(_SOYXFo#Hcvok~G z%C%v(J{AZ$1?N^kXV^N@fl+!3&H69?!GH5e&Seci9U`*1 z)?5Lp#)q|O5cQ<|q5gdT?B!rIFYoKeJbt(!UOkAc`zJ~kYI@inAVAX=!HV|`)Fh#2 z5*RK>6;4)viD0tJ6#`Jidl!~=2Ey`O2*Oo!D{)j>a1l{WB{!m=T#2ZhimMs}iWDGY z;TC~c(|}c)8r2x(m`+Zia!{7dOA^?p)ay5wY6Gcvs~ot9Vw3oYG!kml+x+U-;J$l3 z5lEsevB^b!uF$UD9^se8iC0-b$!)n&Gf`e75a4f4)4Qn3Ny=46y0NS@(sl?No9Bdu z0MW2KBXd>3x$wN@;N%L(LCF-*c%FrqC-tNu>#cun6x<4@Y+BP;rIMAo-)s+5E`et* z+%7~ce1OXU@o7jUb9{=al-lG_=a=mcl8KwEY_JDHLZEc z%oqTH^u3{WwaNJtrz^~=R3Z*orz%tL#hX-GnIdi-b4z#JylNX9PY51)0{gsaq;0g@ zmWQbjUwCGH%{~c#+b5XSID2b)nZ8IV`DHuhOe^j+^ZIRDnv$cx!=O;0xR z-&>OXjwDE4VHpNT{Gors%|$+;LdaXU0#y3hCmv9!LGuU3B z?_ZFD!r0Xc8}f(0GI=BQsiAK;F&mWf8DaE-qe<1 zVsVv@avSe*$BU2S^5!zXz@R#2()t`csTPx`0i5j!HwHI1!m)MMV@ z3T>AzNuy*KLr>_7jWKP!eH{cb^tVwv62B2 zPV@k&zX}yK)~;jbXd=eWcVYOYL6jlA&xn!FH0dM%NAd@@Vg7yUQ1D;yzz&n#uk;_Fc@H!K zw>TL&{;0(h@So>ZD?@rG1mzrq$A{SB7#2iZwG&`zANKjZ2jjGZA^17W23_SMcs_>M zoorVrU;#1&Beqmv6kPA<2F&JQwg?;zMi2O~eFsCch0U9qyS0_zb#VoB}*;?gFQP;Zg7ow3Fm%V6QNG#F#F=A}kXmY?z?(z1m=5 ze=~xuQ48zl%jd6v8e@uT`zkRmuiltp>kDB7A*QT@r))ROui$wPQ$BgUpO9t>BnZaT zbL-k<8A4P1!$12NK8}F?*c<6>{>$J0$K`ka%m20vVdL}hlb-@##)8N&W0TA;XwLT5 z0;gOJl*u@qSRR4)^+G0W)gHp92S=Rk_Ym(Fr!44v2R1dvcwJibbhdp2^?~xc|ByQ&u;(~Ihz^i%e#n|@v<~Q(W z=Lp>$VukL+7U-lrpMOyvbGq39QsK2ZL+r3VAom_ly!S4ln@V|&4bDCq zZjYL7X)T7b<0v4DFCO>`BUML?eFFN<5x68@^~ zKl{)Bt4GI3PrFJ+7_?qT61+ZJwK0-}P@tdr&X2$3KU$m%XsJ+w;O*fmtr4dKWel<- zvH+Dj{#Xea#m;Fwe-rIK4ySS*j7lWb> z)vklfqU*TTd(*q1%CNbRq%X-`$5eoYsYvcd-ey55O&HZ|3M92Hw8zJ;RFv%>DbikH zVtIz1P{U`>(@*lsT>9GuiIOZJ?GpZJTS}^Z4#P!R3Di${0fl2lUS@wy>GKARtRq4u zZxmvMk$+9RbD?((Q&$=REO}qyWruxjo2SD`}fo#vV?@EjsZ+nwAm9Ri_4Yf- z_$WB3@mBlmu7HOIC|D}wi%-s78CAzp?QJ(&(=f83@u?>L(>X*6U>r>M%T>at{T}di zuJho%b zDQaxPPNJI#=WeGPLgnN{`xO{*ga+8ez(;zNuu=9B7%eDudWzCs8B zfBUA&H{bbY`PTjKl$+Dn%MfiD*V7&$_otuzX=(edzxUgZUU6)or`O!PJAvyr6Il$- zRT%(pX-0kY|MhRs%311k!YeS6EWd>h9k&nsHh*g4CPIq|(@$Y616GsP0$6xt46{Hn zSdXUj`>* z5vGH*YGIWhZF~CD_^;8Vl$kn2jP#x4+?&jLOb{l}cNXIt{xx9|A%Xt*9k_m_ze(S~ ziwXnECu;8k@ z7pCV2aP%v}l4C%dYkt!=?`u8QVcP=wWUgss-fCgCqY4Rj!S=Wd(11{dVe-}S&$v0h z!ZWpcYhAn{Uy_nr%}HRwv)WIYlUj!Dw!MzCyC=aHo=McBWlec%LeR29H^L~)?Ha5$5qI+Cb-S zOe)m=$v8n2`_aI-3z6MI>gs_j=!V(!YDXhE#L==cj!={=Y#VrArc=gEhP2Tp%dD6x zu0qQhZ)uaCW|N!Ldj&0q#F9qPlGU;ukC8NybW;C4P$XrONLJBAUS(j)|0gyAggg1~{|f1B3? zdmi!BJavbU@$xq?F_FrncwO_%D+(q0?4M~oP0LdK-4qOj-99Ha9m9CJGf#NOjopx6qhxm zS1{|tc}AhYa6KI|$&LqJn#TETYtd#7c*7KG4xxEPCkFFZCa=Pj^kQg#QEtyomm626 zVQB4-uoDNg>E#}i2kAyN0V6)8s&76TZrexk(H#i*{W3C%aXlw>t!*uWuQ1(cZEcF_ zyNw@;w%G7CM`-oo0~r^T3*@)Ghj`V+GEMDunN_KWlh;!GC&;vSR zx`I(Tf`L0FN8G=9$tkIv#w8=)g2MyK^Xwq%4KDu8UY#uCQy9HtUen%6oJiUQI>#`+ zw`-77%tp%+JkoRE%rRf(o$FI&8g2P^-g&?L#<$)r$1RwXbf2Kb-N3nF6PQ4wXwL@D zM+!Bg#CDyUfreq+vq|Wz8xwb7y1|o<)6z3}3{G~VoFJ|-0@(N@PEEuUg`|QHu1E09 zz#7h$5K$kLzw#@8y?pOG-zirS1YU2vDoZEJz>OU^aA*_H>VNnDd|dWkz-+Iaqvb#0 zNZ`@3y17s~#_nQ)uu(qw;u8d{{lH>&_6q&SIDTmny3<+4^_r#KyT{-VjI=T84~Z+k zf)Jwi+nuOx1k%At`bP{{weCll2|50)o+yV&&Fycpt1{MJu8iI-W4#<;foCizjK@FH zEU-A-Jue#zXwhNT9sFMBZ0lfI+FUF5i8XqaLtnKhDF(<0&PSGA^u)2Jp*Vh z2|d+|H|-u{YjlB^OV3Md{}~6yusgVhKJeZW=^f(2%+MwA5p+8A1^|ZQ{3Q_u_AnW1L(zD!&LJxOtK}9v{go?|Wn$o7`Tp1cM)~%; zUk`1*dVRLsdw2(d8M-#Wp${B4-S$hr{9BI1eW2h3D8M-sh3I=Zo*gyOIlPHLFxv{U8;@I>OvF~8`@GCX9$_k zSiC(_lZUj9Plo+1;~;}n8A;OcUdAnE9;A0GGXe~KaU6Va_Aq;V*Yvd=WJaOf?7MHw@3#z!u(yw~ z3+A_rwHoMT;{S>pJcqD29?=qp0EkaQR2bMq`p&0T=2v|l?Vznm-8{Elz|0!lrKy4= z$%UW4`IsV1Ufx?KSVew*YDSs7flKac8)Z~w`W*v>yBwJu7+3snSCa>9Wu`1h`00^n zxe7Y|R%^>k7;U#;S;)an@mS55j8?StjFGq{+%k5w$#!WoDt-qd^u5kWdTGC$KfqMn z0Ws)XZKGh;);R}sI98->hK13nU)bx&)WxDWfcCBzdsXrK6b+{H%rYqv7*Y^*457*C zz=PCwxWi#g(8o_-LgNUyO`z=Cw{NlVUrT#sVt3FW9^o}8uJn*s7+9y{BQ4>Fg{NKi zOuR~Fp3z(;kGo0qrSVug7Y30h{@FPD?NNB~y?v3nh&G>c zs{p{fMKyg6qkoLj=mgV(6JUG5+Tr?fz^?M{Hm49#FC~Sxbuwn+v_h%MER{;aHwqbk z8|gw#XdfDXJ-ORhl*CPifuT`sZNbHaLje!W=Z>k&kH!!`p#fzk$yOIj;ADlf2%&^MLrDXzj^^*>m~e-W6=rN3uJy4};!=mYM+t zzc7927^ROefrOkXg!wVZy!f7-PaA{HTnndDwX}kpA|B84=6-(ge$h#qcDbBm=vwRm`N{2h?%($Q+j}w z{}_$&F>`tgPusn9m~8}yZcJptqp=Ql!`w{}e^M>*CZ}}usM$woy}%LklF8Y^Zood~ zF3T|gPhY%3iwn;KUXD&U$6)ZW_2_~Tez~%MsS)+zTe0y}lO{Ua$@&6AxsPL~_Uhw= zNYadGe_K!EeU6Ep!@LW-e*`N5^Xk<$n)SAF|Nb080H$<3*!s8QPXiNqLcI8GPBpuK z<8PI}`b*y{otT?+@F~4Q25S*Q7{&hvt@_KKG6pLObA*;cK=Z(X-f`Cafw3|*K1Xc$ zrR+BKfM;{pu9O`D$1f9#>VSh7=Ekp-PPP8*V03muPY{Am@TkQSt33O1JN^Zuvj}o% zAQ`4}M-Z+&Tyf9HgUI%RAw@D?sa z?wJ^yhDJbdAadf^?DaAFd{|yT-C*+tTxTptuCq2S!`~UF2d-Sz$-6{8O$oz-EYefG2eOVYpk z0re=?({hfHx_7thsbPmmP^Wl%$dJdqn}tQ<6m)TgpDLah5KO2jsi=B&G7z@hveLgH=F>6rJ zAfM&|VGOLCd)p*K5GI)45^lcIZi$wQUYbi?=`RCGk+~1?mXhb4&tw2$icDiY)|v8X zmu(VE)=7amWr{ZDq!sIN<#)j)oq|Kkwy&1sLd&8dGa%e7vkE-4koVr(S6}lLSh`&+ zOdWwRue^&-gN-FU*%+e=i^?FOk)qvUu#=C5aM^AX3x9>4_$^R>F27|mQg?-u^?Mga zgY?2x81l%1NDI-YP&LQ4LncFCgDjN7i1H*{3Dv{zgsq{R6u@tpP#2QQZ2O)!w8L}c z(@NSZ%&bq75N|1-Z|e|Fz%h*%hQQ3R;->!p0#RY%CO>J@x(T*z8`UCYcEk;K;KISp`Z)L4Z9 zYQV%c*0j9L#W!hdbM2R7kU>gY>b&ADg{t|t{kBn48OI`o4dUjr^o^w25erL&U2WA( zqlVCVYfZts^xXzm#@U}z1_cW7uE~g6zm=QU`h&ncLdl3W60IG0qA8aeT84-S(*r?@pyqvEQsO<^&&Qs@Q6}adUIOjM! zrrwKf_S&5#X^HTX8MH1by6+rYzcc6aKu~hSKkN6AKV?8eqbR63?qEO)5Y~nYq2%DG z8K}w9X&?WKfZRJ5-;6I6=c)O4uNFH(Ej22vT9?9~IGx=Q1hjL7H1Neh|L2$}=;&6Q9Ktlhn3FW~>VNzVWZ_7)bZvZz<08$9&)$Yby<6yqI4YhckS@ z-)FT=3Zr|NogCvh+TSw_!#ss{vkQFIUjPOT=I`3vO!>xx`pk2;I`l313ZG863yCQRcS|?YMAMD=Q6_Rss;_PhBfjWN7*{f=SFk&j`$s zyGFo-Noown2h%Xaq@Q2GG1l-tjPZaG0?>vIh6aFNgcoYVk<~C{-FWu*k@sKzFW>pJ zlxhobwtE3XOI%V96~J@@0EdQ%^WuVQ)B)zzj=}Jh{w53zZS_<^O+gOOJimUy&J1%w zA;Wc^2upEq@IEV0gc==?teDV1Pf*>bxjXK0Q zrw?>ym#&9XyC9?G?i2?BfRlUBmuok#aeVX$J5C4e=pC1p%_V$Q^#eI7KlPlH$*g%l)K=>YFH)fjMIzk97Y?u4jzE%!7s=I%J zaCWrs@#1HRef#OBugZ%T%LtWE%d_)O3)6(QL5=>&KmF&A?p?>jysCYI!2I`>t3kZJ z^2FP}_-XS^C1y2?V1sy|_*+6+_TYd+5FMJy1a|9WBVO4qf=$LjVL&aZ%#|NV83Q6S z1`sGK3?ca?3ho3nHJ2{DB7qCH&qI*O?-d4C;z_ywkb>zubK?LX~I0zMg-Qj*Y?RERH#InNZZ^J6<)%`^@^IP z^DW#$cQGkU>tmR`Sw5p5A%wl_N^jH9@bhmH1_KSBm=*Fj!* zdu(p{NI!gUhKv(G;NQbwC7-b6%D9lg1+`ukM$RXN6vsxmh<}X=!axPD8#OdCR$L>k zm+i8TmhVC=GxXKu*5=&pf#G(Jcfiz(o97Zp(p$dFckMrAh$ysE2eNj&&lGy9IRzzE zBu%~5A)Wq^-nK`}%eUoNzj&_@XrIJg;a(xFmQO~znwE%9GL16c2bh*fgZlB%8X87x zN!z^Cns-ocG}AEf#yajnYushKEjQ!Ed?Eb-<_=q1oISs?5_6RyY+v7bcoSzfw&tN@ z%Q)W$Cg27^BKz~p3?fjVO92+r-%j!kw*)pFOljxfT)p+MmLsQsXmfXY#b`It9? z&fna$palX786V5YbN&;*a}Rt%I~Y4Jl^nG!69Grda*Q27g$l}$sdQ(>$Idn9t>cw^ z!hsi#g$bg)HY@kS=&P5e9SR7c>oAj+V7u&qb3jJIHXEPS;0N`CK)~3jgt)*V+npkP zGh!k_dG6{+FRkCalEQ8nQemtOx6FMv3`jyt@jGTbFnhiDaI~`+_3*7wSOr6zRmEil z0ByMSD~M@}_&__=?%Uk#PNj~Dg> z0E3xL-GEQ#Gi8uQIJtlc|MZ>Q!rl3X(Etn|ALggOvwl*9^8|>H10e=8m9Ab)Fbt>< zPVXFUq0wcxfB^hozItB%)sH?dfAG^!%4cXVSBUU((Xvy%``+#HYu~t!4K|u>7|dZ9 zx`Dwd6qCI$JUG?^<1;uD;e5cZ%@>Qyb>4Dvk^)`_T3c~b(+6$g8L9{ghr}e^fSD0j z#7hIe8|42K(~;iJ(L_K|IUw$w;uCO&ll~>&I_)|(?r5~bI2j#9u$)~WXu~K;pTvn^ zJwu`3+0!-5P69Ll99qUqWqExaXJ!HcKwrdH?d%;}+k4EeNheOTD&R6!w7uc{IlQ3F$Qd?Hjj!?h;V}C}dx?dsj3%R*({h5Qa`T z26ls>EFSSnGw1^}1>BsRa_0m4bK0!pvHHiTAVN7o`LM&FDHo2WB7P9I{Q ze~jRxv-2t2z&iMzprklwZx8s;GswEgTB(_or?!>T;W9xy(KeXe zLxn3$+S=&rgNF||Nf1uDa}Coc{2Q!p@$*o;F~|DQN%6Z+?#h3f9Uq zde)Z!&dt5e8i1$hPJw%^sC8p}e20Iz_y9@Ktt8>H*W%sP(90dg0{>0%0 zH&>R|%d6+FiPG~iyT}6smly|52y^&99PEul!!Rov9!IEo&Z%Sw)4Sp`BMctti=a7V z8$$6p!rD0w+?(tB2xcfjh{|>A?i|so#_%tA4(xi$cI$k(F*}EK)DAlw2%furL`qsg zfMJIM#ndM+zbI{-*w*&j|M+(vLAF@%u;XJOn}nx+ON2s=`2N}JFMirg<6Fs;S0#uD z=t#Ak9zjR)iix$!;QjuAJU0b!mhC8gTR5V|Ozu!6R+tt07cgGftsoiLU8s@mYx zM71(FVICU|G@Z9usF;EgDr_1=$A-9IRHmV3kjXTU|7i;0sQa_IsD-fbPTtk}HJ_2O zA+^Od>ZUB)4?!_}nD<`RBlB1nKOg&?68Vylyj>vdDSr?I^Q6C|6Bf4L3^C*2osN>` zw>$PlZKmxAbIncFF3LEW$I+0nchR&QtFOPCS{2(wcKTb#%`{=Wd1`;t9@5xW(@8ja zk$$L=QllnQ>B1&cVPho6^hQtDBu6dF?AGaG>h>bVCy(`M9;BUc77<|YA`8Hi-ZHFLcv=Tl<+txM zNiB#={FZ@?{WzKIPZ&KrZjv;n!nn3om{?}TL(A~F#2`Bh+^Z3mQE>ocR~$U344>s^ zU~N-yzrr9w1$EVNtz}xZb$MmswO%oT#ja^8VtIk9#d2Q-OUtZfBs00hMK!H<$+BwQ zE>10Ip=4fxC(OJHuRc7M?>PBw|Eh4OBO&(nm{zz&?kOkYn&X{$>YTD-DzWV@s4|h_ zTY6!6rsYq3l73JL`ON@R0n>S`b{QtpJi@}hc^a6Ek+`c_Q%sf^(1F1b7!BzY8dnxV zcMlxLZfpgsuxi^JJDE|+W-;H=aSerrn*KqYhVS3MNyxWBV0S?nMxuUjY`O;{^bN6C zus95`m|3TA-=iH1gogX#voFfCXHTGm?gpskGRVCVz zO9DL*M5BQ3g3I7QrZzLzpe>CH$!hr&270hM$W%kwJ))U6WMVvDg4z&A*wmMd| z*A;&(TTQrQlR1>P-Un^5Opoi}1Hf2l}#J4Xa*T|*q! z3V#_|U>TYt-GMOxk572!_aThhAsX}ja|By}tm5n(2JHk5ry6{WN(o-Px;o4)YDv)GWz4so7J0yQ2f{d!+FxG}axKQ^wpw z2Tl-cBonrI; z458sQ-yS2)p~;*s1(OMW=ohogcwffL%;0o#0uKLhk;5PNV;%Z&Q8&dFIP1k^w0-d&*{%#@B`;Pe47>XN&s z&Y1(uWd^SheaA4Mn44&-cJmsl^7fPR`tWmN)~vI$U{oIP=Yj(h*4SAfB9VBY!)`a@ z%4&`fZnU#r>O3ev`qLkk-~YFNKs&*i4x;Z6aK5Fri)cbO%EJ#HmYa7b%N$1(4`I4P z+|b$ zUDbDi`Gt}O7Y2zG$KtMWiY?CRm@_^5{At-e-zD(<9QcgTvV4v(Hi0Yvp0uGTV25XC z5vSrUgx(f?US`Yvhu1M|!)ym;UCsT@(F)GHj}vOMpS5RYb(bAqcqhW;_Hr_ve1P>| z1)l2&06+f8t1{2&YD1h@cZAu}!k6nf%5Rr;9MY$!N7zY*UeM-!b}0{tP}R$+ZtkLW z9}ppmF`pfuLP4|!!@dW9>Y$SPFF*i!{`Tdd3{ybF~CIc^^1AT%b@ifJn#_d68I~RoG zEyLq9mvBlbX$t~Jh%Fi@%Snhg2vTJPSZF~snS(bB677(%N#vW(dRe%HNv%8!S?Xxy z1zVHfno}h=mS^%XiR29-r(g9_V*~N$w~Jd{G$rsZ2j*=snl>L|1Bq}xkd89BvD}y| z*(U0F({|o#`%QM@!V}@;gxn+4+N*?mBFiRGP_KkFi1O0 zZ(|uLwbf~O>TyxCefHfl$z_AcYdf0^sNc3DrF(Bt5^-utvD^zY7e2K)=C@ti!6q%w zYJ2OqZ8J?6NRl@fVEZVuphm?$W*jJoglV%yYs1b9#$3W*%|VRd)EsEXBYn1YsT`c5 z9Zg2Se#%T+hJ6j=DRfz!B`7w{e%RhnXX_7#u6}ia9Zdv$LB7iHi5K)Y>9M7T;I)V0l2L=mLu|vKS}awQkv=->>pefX z2WI>X1IRD6p4Ca1`J0rKZy(51+X?L9G>%|0)Zz=L%7h{aaEX5kFmHWrGy_4ahE^u# z9xxJQ03N`ciaY<-|9t0ODb*H$@TA5tX=4B_{{VZ|wE|?m#0niX)~b2*!}tA@1pmm*uA)eNrBO`6cnYH=Qs+un|JydFqWP zHfbYVDS-0=oRJ(lb~G?^6PSquj+Je$NFz+eb#Q~RcCf@a-vv&YxXu{oIpxXia5ZPT z40wJy1`0(Ewh1kt8(|#U1=aOQcYFEIDFd4jWM6dC!}lp~s5m<8M6*eGx*d_}`g zKkQ3Pf_SEIU|Cjj4l+9~UVdknz;;l4%#irjluH5H`Jj;J91&J7k_rMc{Q27?_ikQYKRQ4Rc=`_v)aGM}1b!f$(8`zu`RyFcsP6du0M^L$s~ z#&ONq0d_{yk)lTD9{H_Zz=G#l$Am3uEZT9Z+W$H?HnCGaL>nm@Z4gs`k02c9o!a+f zSA7A)ibnY86z#u9`%+FPTKcYj@-mK|9(Ka6mOJS~#Io<>v?&#;v)2a8H||^`YRyu4 zy8ax&mJ>j6JZ*=WxI_bb2{YD)CbE?iwOTvSeljP9Fvl1iBFaxUzdO<5+b_&^G>MoR z!hD7P?C4nQP7g0nU~m)?jQQ`0Tkb3rj)U#&7|9xOhA)8U4P3Gda0cB~==t#756j(a zcLVD!&OO*zLvxNm(0;+#D_Ag+ zLZR>q+I?2g@q-IA7ehO3UoH0Medj(^iacAxh5p@Q9j*rAqc|1Q~{_P+9QPmm) zi$3h!bx+6V3nFu%@EqjMo= zAaq>d7h<3n%%_;2mo4n}be-lTEeA)(ZGAD%&8YL@I9#STjS| z(X-R8;>l!3*!dftSo*xWLX3X`==}1peUlKcL#4lah;X;{816&H&FDj~5D-qm^XW<2 zDZ~(>bRPEr8Z4PgH(_`CG09S?ut{{SmoGLLGms3rqfZ20GSst#P_T*6GJ)Voq$zfm zJ*n^Z{ksS@_$k2LpJKY#N66kT@TO92DT8wceB&&LjhEo&K4%>;SH@=tF)2DOFF#)` z?L@1Zy*@;=r|GgdzeqWl_i+Dgvi4PKp`a- zA-pVV5?*K`rAjDeJeZ4g$d4U!;n2REC)7m{2i;U>hN!43n+Z(5e!0~e1hf~2FFq&0P2Ak>!0 zKu44A`}&IamScY_b7p0!LqwpE>=e*%ex?7ZgECF$W!?5a_iATpS75~hnF%>58C@Wo zKV1u&414mVDBrmlxojyYk;wJQdi>vVxDQj|b1IYpm%*}p7u@CoD7*r1p4q1+?EP*{ z<&-3RZ4*FvE9}%~=5ZmTISts?cH2qc$=tewWV?ic+I9(01PSrUKEO;kHr8YLshj$2 zr|?XB$*Yhgj#!TUs2{#EIKtBQSx4=knqseLkZpl&gg90wG#OX=OC8o7Cd32)(l38o zzP8N@8DX%5L+VmsQ2Rz3WnB0~TS@!;Eils71};X(qPa4Hj#0DT9++@#+|yp_vKw-Q**aLyA*3>oz>x(?rau~57;c4vE^Kb? zukcaZEu`*P zS^5AB#96nqH6aK&pbXFG4zJ9A zcAHEGP4bx0GoH)bs-X>?r@%_9#6#yM{R8%a2k#W>w*uh&=lJdKNj#^6E+n9;(e2b zSf22<9-nzt{H%QtuWUaM$_vLK!WwB!FHC$d?pMs_oAaZhfv)Yi}+c&wTSm>s%VzCC-j%%DNuBErkr`6jT1TZ4f=f@wWMdwD^d`iBrO zV4!uPJUfEnl?KtD12pYG5a)PJ^3c&@%5nfsDA=4_;Pe|G280SiP-b_p6^5J+z^X>M zpt+8;U}@`hc||0bFTeb>oUfq?2M4~6$bNh53e2j)&>31;#&GaznPQjk)!Gs^>f7KL zI18RT$FlRsScszzNed=DYilrR2&O}5t1p?bdpHEIU_!Hw$<7{U9XMd_7I+}mCN}rm z91tesD~-^vL!m?T6mRr1x!~B?4s6?9&^`Dnf+<3WHvAsfD_-8bbG5wl;5N4E=Vd`# zZ$g|N5nFy|1v-X_PCLE@9_4#wwm+e1_cS#j`jF>1w6lxBf7C(bojDG6!#SBIMzA@9 zk=@&do*iIOLEVS=W}fYLveUz1S6?p0Y^V+Mo$yCAv7;R)ZlX1HwBdEi7+?sqRJF43pF?@{Ebt~aq*I{Bea4O!zQluLvXw5bI z#xU){mcJFjst+ggp&{(?R|(gQ=KJ2g2gKo@#?HT&aX7#s8H_=w9pp&m>G461xyF{? z00ZEf=3l+t_-!GCdM?EnXBez5qMYVq=u+n(`#EKmU4q5!m2&?bdqm3P<0|fEa7aT{;!jW|YJAci$F7RzISVj+Z zoo;NE6@=JMgu?;W0C$gBjmyn@cQQBk@e%Osl~F>g_Mt?Q-;h7Z*&2|LHh0|#JfV4y zM@4tD>$lH5TzI_6`g2k4-+G5O9hRT`DLXQlsaBc-X@F+7{cr#4KYP>zq6TX~V+qgO z>+Sv9ySHz@diRUp`=-8ilJ&Apc8asb4$dKqOUrnD6Lt)Q()LUyB#Rp9;Mc=+NjRIA zntRU@kEsVhFd~o4Ci6GMxh__u4nx2K03miVSg(`bZ!c{xCHzY1O3yMNIuSvxvq;zm zo2+o*0vzJa0|~M(-Z0F0!TXeL;SzWWih2F^#(L0{z+~7M2iC`A2@}S{Tsl9y@VUhu zcFjcf4gc4Mhaqe9!TT(T{C0^^>u0;ej4Bj`sJY13cF6EHnSK%6(iJrPcENGM4)f-7 z9)@AyJIF%1ptNw|$^u6EFwdr-Y!4xF!84_G(G3&Ua=3ND^-A8xxA!iV&FB7B2($re zgdGo`@y{G-Yh?=TQ(cgKZoexmk}&u7D|Jzp{f!;7FbWf76*4F?y|s@n2r(l8n${bk zkm7u<&>)cvb1BEvya0!|Xx)}Wwv-ztgCA-u(@tOT34YpE$})KsJ``3d!+rvb=H#>r z+aE@T@5-U01-VRRR zbB<+R(%&YXl-W|b0mkx7<_4S&OoXAhCetk5*yjp+X+maG#!(QPwgOe;z4j9}=UzYK}a_1Z?8?vyf}Yr97F!6buby`l9oQwqm9e*idJ zcWo~{2KSmmr7`Ih+_Gbq)?@z+)8kHw5q^3#15#ijU5<)p?kQ+$ZjeL4c-Iu_ZJ<-g zKmpDx%arktc~}^H>!z;YzCr_KN*~E29gz92bcpBtNpqbyl~w^;QS~d&0yXLr#@>6I zD=&B~AW5IUMmd_YB`wTKd^h|-yQ`f)%p`N;fHmt7^N9nLXB)(R--glBHeERDFuIMM ze>)n=c0wwix59LwWpyXcg0=H@P79MB4hHQVW?dz^$SI8HJdtJAIOX;X6P+~}wq2PQ zOgP*T`C#s0xr%v1KZ2_Im0e7A3?mAgi2&v)y>P&srhAFzwwJj0_W6)VHU}_PdqlV4 zO}RTaUf!D+B^jnNFxd(<3M;Kd_|bgj4BYANhpqr~0|$h5lT(em^KJ_V)8n%Z0LcML zD0ceUVSE3^EgYP?%RG$K5>A>j++&>1JAi2gnjj9Y0zMpG<<7oy$#Hj1IAvwjB5#bz5!BeV=*O~u09+jj`9 zDGepAJ!U2QI|x#oIdI1AgTXJ(v38ib$!Ww0Zq8wMxD=+kh*R1DgKWQs5Qc^4>nEG# zi$7i~pZte~^6W>OW&ZOu7;|_t;7HSGKXbBR?$W(iCSh!E+#bTISfQ953wB)g_g1l{ zAVLt@{(|;gMphbcotgtNXwY$j#T>>HvURvV0Ve%0(LlW$JVbj=@NxYK{s9N*^Y7f5W$nSQ1w4~> zcqSIhGD6HznVWnVMa3!e!UN|#D@!Fx=san6*xgv3f1PzOVYUrfsqtfTkc@^P^JS~oP5zfwk-Y0ugsNFZ2>3~4T2cnB=|CQ-bgMOmJc&SEOFYWV`7UXU+R)^_+L zV`v+MS@U`NYZ@3Y()irV{wZ)s_=IoOGzrDVLPnZLm%qycxyi;APLxcL6_W+X6>(}m ztAwK@QS;G9fRGwU?;m4ies2U6+=!grTTb4JMwC@K8K^L79yuk;(V*Hk zX$-7adIw}q4$KvxlXNVPQM;y}p>lX)A^{}kClgZY>I!We?!4u|n+? zup`+Ue9QRqNUp4#zB!Igwn#DQ2XN$_Dq*?doHPTslPyG^e{~XBwoI{H9hCwLaYx+K zqYmzn{*GX5nl@Mu92Y4>5Yk|v%pUv^Dr;5RdZmrP)r@nLqv_qF-$UiSzIA}Q5E}+y zn^=@=N_U|rbo^YcI#ILs;W_7C{yr+jwWS62x$!P$Rh{B^P`4L`q1dXZ#WRUo6EA`< z!p1SAJo^aN09w+}R%0s$6KOli@`)tL_y`Ayg;$*elmPs)&B(L#HFJQLN?7<%_1pTn zw-5Dy2v{^Jq=9>jWa(G{(`Y7yt@-J%9D>wO313y0Ha~^~lJFXP)Htdwq^B`_Sk1!u zSA5HOQKvX8F6zi(8&uhM!TpI>2I?11Zj~uPWP!*$0{(=h>1Fj7o`$H==0_(uB{}DS z^Boi+uk)rl4yY0rRw1HnKgg}yj!3^4=GM?{z&6i?vTznI^$Km^v2U7bB4J9D8HY&z z7RY=^Hty|{Z84L1^gtIE18;s;O^)RQQ=S@ES)R@9fMe#6akZ6F_u)WPL7oun^Y9S+ z2RM^8jvnpmW928=h$XU*C+jNC zE=nm|l>g=OYr?A#TE$$K9s-0d2IYSD#=B(-wdLyJCgo#f51eDm!wF)o7i`yPsK_vP z_Tf>Aw1Ql-91Nzv>+!aUPYPc?t`Y{rx?S``2$6=@qBk)dw zIi6wj(t<-qS08f|GTFlxh3ib=w5LPnyWHCm2RiH!hjInA@av7&*c}nb9Xhu|%t}?n zx9{94zwy0aF2DA*_ldgDPP*4+OYH(%i;K^tIeJ+$j;O3XT6k~}2LZ-Oe3sbtbX+Cm z4~Lpyd(+u_5?t`uVR}$Tx~5CV8ML~Un4%;1%MjZRyM)Zy-d$nt7)#VJY0NjA2nO&( z9bgM$5V-FtB_eIEue~mx{dAErCA1CwK7&STXW5PvYbYvdyJ5C4yJVtadxR6oIw!Pw z)N}{qGBk6B@%|O^6YPG6BXD1xBf(Xt0qrCY@ECVX@4NT^M!DW|2k&E!Xm0~9cyJqT z%{h`&1TW#_u?h1y4NVHQsYLMn#HDR zyqALnUKdaZ*bT$5^`W5whVTK$YzKKr#X0CVcWb0vnVN;$-@~?NGh>8D!P-PCO!-IsB4% z)6&ghv`j8`YF4^%I2f87E>A!GvdrT+ptODO!Cbld;6`a1#3P;9)OX&UrvIzav}<2K z#9WvF6s#i&IA(~90X=!IPMthkxoKL(lC6Mll|M!WXb~Rt<%<8StY=>-m3T} zy><%zR)v*7?7p*18gEVsd7kh)>HJOFsLy=>45Gg0);1=M^>ZVPXb{P*qn718glP-$%=+`*JM)I?HF;VD>|nHPH)RMm7bMdGwbWBp`u2%Bg^_i6kwF_9ucV>= zz%ebY?=rIV$t)UfIu2ekWf|K#sHr;(BWZ1q$sF6jnDoLnOsOz7KaFrKOsKs}lW>Os z+dvb!fC+aOSn{P2KIhXYC2()TGB#f30q$uhgZ`d&T zsJO%(7b2l30~qF=HoF*DmXo^^%fv7;;F9HJtbvj5Wb&N%ssue9s|(e=Aal#D+VIFw zLC|5WWc*!}dRUP3pd7?-ePD2ie853Mk}Uz+M!CH(u+~{UnJZpdZ?^k@r;MQsrINY* zcU*;=cqM}pG^VdWg*18@(@)xAU%5-WDUNxI;3yK4$B#KQEuEg2dPnP#w7m!dswH1Ne}?*Lks#z4|EeOVo?$$Q#|XVKf^&h4 zu(1W3b1fZl{?S!f)4&kmWuCGxN_v@WnR(zzVKVP&H+^+J2}?mmfy@(O&Mgn4iYBel zb8cj7M&dvj!8*Rh$}r1(bI0=vHy3wR?=t^x)vAy&SH)}Ff;;gjTUjta_Cu|Sb6?M1835xo$JU323a@N^kyaJ| zAd8@5)JuFrao$BQw440e`$&x3wrXw%Sf}|FDDA5-vqfRB{0;OFJt1P7lAJZH~3^LA8lhXt>0F-~!cp2ejghQ>o55Zr0|E zP0YgdjZuGBn9@_cGG*pl_`F_REQ@P+?Gk*x6NX8Z=P6#vTX^9F;-DbH)hvVePq3R?CRl#FyJ1RtIWl;ca@g#Er$kupVVeM09Ussp z);{U%5nB-2W^@yqa)g5%dP27_4o7fVhXHbXkAu9}R>Nx(rr6Lk=P*piI`Gi%eN>_s zES{ByLq}-i2M^yVfAd>kD{}ci$`N5&Ao}1weZk(Pl`$~9 zO&7v!58fCqC!^CEb6+nXeDJmM;O1mm-QalI#g9=iV4p)jG^vRCoH&!NpIz9Wj9fv0 z;;8e?W9DC9|E)4NaF_YRRuA;En6m;c&TkeFoUGeFF2`$vWY@a(&p} zCpbO!V{ZH0ynBVMf=k-gUT)lHeiAid6Poz^i>=aqK8q6pM;wncr`dj3V+)nx1?Ogn z^Lno=t*qiOfx`=YTlL+MG|qM2}7prU&)?z4xZdRhW2f zvDkURfrXjCy7vGt=bjF1ypA>#Q&Z*51-3qIl-=6JdWD0^jT>`iq zh4SLb>o_sJ`~G{xNX1?ZTCuRSN*kfSOq}gKlmMIH{}zWqaLC4k4{jkiWB0Z~AHfe{ z-}Za|^q)Pd)JF>B$4dnL#TWSY3$JDg%>+OH8;uTx=+F~0Sm5^|R_@H%;Kj|V;?Qo_JxPwKl{!D3h>5T)7WPxEn}c2 zmVzuN>@RuauvVC%l)Kd70ies|`rpOc<8RK-4tCLB>O`X7^(qFSV30R)&a(Mdq`O3XWlP z=$9}L4%lYOmyzkfK-c_gNKLO6RIRn=yodQm)7q0TJ}gl2utaA^o3%p{e!?VLTI!F` zMA_nFwDiEreQ054h>tL! z$X|sjZZbyvmjSF_u>&Bj<0`sPMFbwro}Yd?E>2iQM$VO4nJm`5~02YCNBV+kq* zbrM(9Sb7_HN*UpZpbuRhcvF@zkP8oReJ{e9`w$96L%fsio=(V zJ-_XHJ^(0VL0aJ(GzVkf*x~^?=V*ne!1U7ut6Jz9qf-sP%8+PCnIjgYeUC@`s^L0B zTj+1KX`Z}hp#K~MKwE`x7^w-`3clQnZxr5uDCx}uEShy#2(?&RXkX?A?;RVro=^0U1%BeD zwA!l$=1tAs5e&A>vbzH3GNj0F0<|FY+Q`t4GxEdUH5HZ#L8a)jO)83Dg8F>GhYPdddg z(6-hxJqxZfuDhHhwuQ0!)+T}qxK~e`!stdXVNL?msNM9oOKmf1Z4AZ@^F!DqPVI8c zqz}?^*EPq_kUr-&7UnS8es+Io zEr$Qo^tZRnc3%NEcCZ!OgMRhl$V4026`i`)0|P(F5w#zd`!fh@=L==qdCsQ|0IFC) z1Y+!WGj2LET)n1^544Z*sJ6r-9D(c3A$CPLM6BR7Yp9!SB7*>1hb!gr^NsSwi)Yxu zv6Fy(xrelLaSX2bvWn1kxPyWVp1>n|`=I9?tZ{qL^Sd|il!wG`?nIa|X6+>cNjKv- zjm_qG?=bTU58tV=auqwyrM*S$hPulzjP>knU-{qeRT@Y@MGeCy7SnDZKz>@RGxtc?1HcgMkz9Or8|9l@tkZ0VrY z!a@7IAsiWo5nk@xpF)Urgf=hS_8-0{>rFZ5apv@a7V#1G^BL_6yVlh+vCC`!EsTk z%#XjD%&%#}1elMytj{v)B4k+&W*~J?XQO;8X)*_JW0fG%ggypvRD(Qufc`{DP z<`~xBCdm87LM&`+w-ownyRFXgi@+gVEnOIpN0>I@Xy0S5V!ycJz>p=nGOU7$Zc#;D zTb$6av~<= zQ*lEE9kQ)Z0Apatf#4B^hEd{Kw5TqEP*(%z*Z7BQQSw_86ralwyREB+P_KNM7x7I2 zL4q#6Da2T|^Ugk07|EC_1n|uSiSakT9mg8B$EI?RAYeLs=WqKc%w^WqqI*QEFsLI{ z>ru~h5F!PY8pRhz9x<`C(7%P<6fh#iIrJcTDQOBmFv6*xG~!D-+nj${YUXWR?(gg`@i zqJu&3ivRLQKV;;6DC{c^aPNDc38$3lJH`Sa)^sWq8gl^{1vh=?U^rU5bN)ymq+1zh zX{_UII+Cd2KY{5_fNS7pD2{#m&7ye5E1bJh4d-)*PDk_=H53@gK+ z*4r^k8U{^Ts=zAqq(Ie$hFE4sX9{t;&Xo$wI?m)Fos7OXE#5h2=~~tn+vFT_eF#$c z8@#Mdbe@T;!m+li!aD#8&I%^}D1%Wt|*;e3WTaV?4Aqg^sn$R4=l<(rnx<_#QMeJ~oP*p33SgzxwY0TL! zrYB1XyqoR{SSRKJFfVE&ap@?hE!YOZRCi*c(+9(>=CTXeO#$sQrdmidW`vu z-nOm*cxLLbiphzBfB;n(8z9V!!p|X$^4``t9?4hAyK{F+XKPQ{V?JEsnOr(FTxXt; zevea_E{F%I!o-kXqMf@um#sKK?BJEV{}j_HjPqNFb*aMz!PwnB85~hNOSSOqY+}$K z46%$YN?Y`l)$C|u8q+3X%x-PK!4gYUx*K{0N2|~|Gz+7O`AdKID07|@t56;+zuLl! z^&Cc!x|Y#!vn!&9axabsDmHeZJtvsdjLsTPji>`JcFX4&438mAHz!f^|c=W*p&&+X3 z+;xPq!}4_DHD)IWWhjYs`WWgND}$)zm-go|!e3#G@u0>uXk3qc${85;;1uwyop3fC zi@*(SGaji5jTg{IccOYRqf!WS@jKB&_R{xw4CCN1)IUXp1nL=3(X+#eTT3y!8N~#~ zli)fSOR$Kb{2w~_rb~xuk;}c+5I{}=yr!^kI+ZQTW4jP)9G3V&NzOV^$i?7 z!W+BocON}^BoX`S1+4ymUq4&YpD%{V7)B1Q3?p)k9azkLASfb>+dl@1J^;XK5JLz7 zPTH3Ri-o4~$B>4&ral_nT->r~vXHopNf=$IT(FwVp+rB-EvkLB{TAW^AmgVAkqD%X znSw@$M7ls(d|Q{L`CN^!$n3*}%JCs>^MQY1B6$`Ky@ZP^S|b*4)0jMrKQ~Huw#lAq z&+3Aa%hjWCxmZ}c?ej8?by$$!+06ifjiYJ#VZ^=rnKH;r`jpgYQx-Ici5j9TD!?H9 zq8#C3mrUcIb^1)$y7<^;?@g0xy$=lD*6Vnw`KTUtUs*IEzA_3m+^QO z)|6Yzs<5-qYVO4=!DF)m5!=d-)B$dX5r=V)=Fhrh7&7+4kl(3?8^$!J(o9@xXC%CsTM^cj{%F!;DY}ulZ>H1?=oE?Un(rLZNhku?K&}@Ol)u{bD>B z%LubD1i~h}V!*ej?d5HGF(vYu_(K}&vfC=TD2{uWhsZ&^GRw4fIb%lHbsd&|U|m(hGBCnc|W2*fGyeOgp_hO=oB?k6`K# zU?N&DPE{y?k)jNhQHBkI?WbN#b55YqrO+)-GTJ53{UIS)4)(NhhOvgB&PiYR1Ds%L zqs{Oo0n?B1Mr|ub}sGb{k>Loy)?z2T$B{naHv09Jx9i*z5wYJ_6%kV2iKt(nZ?ig9bt< zZD0q}9V|}}6w;ay#;ma))13jlVxbEccnI%oo|JVM^T`>!ZV`N>1qa)PK|&FNMAFBJ zX%~d*+24V|g}GNq(5CzR1T!_-r_dulj$vNd-#jD6BXpK_G!gDk$@ln6g47dhQaU8v z+9k5UKFUyqfMH@gx`W$`mVIUclLNrK#1nLV9glH5qXkqi+KtnexMy?&6+n6fs~}>0Fu@uijM5F_7@u4)U(fbX zP&=;g>7yDnVO%_saz4F$3(LuCD3fe40oAnyU7Ie~&)iDIb5hS-vE=ze<)j-n?py0b zd00k3q2KP_N~6@KpKyZL1}C9?!O^^zXyH#*I7|X_xAE!gbn-Z6phtVIsXZKBJXc$*v@V-6!%F7|CzuexFl=mL=SCy%5?ZZZV7kH63FMmXBm!|2&u*c672lo zqnQ#+DfLL?qix{161)>sV(r#*7$efkOj%TA$X&oH;RoKdP7-cgOz$GfVnE)iT|#uB z-e@SP-IV!F9qEPr1b{yGpK$6hby@VOG<~pb^uczySk!*e5Bls?8DqYMBlUUNUN3(q zuQhUGds#)Dk6{R1tbAVkDp1W;zb!tC6y?_U^>tcH1-8-mnh1D#FVXQbeYE3t(0*HI zU0JaE(=?AiftT%eEO_t7r}dLm_96tIufW~1 ztf!`TjA#Rpuzp`DSju2z!AA^qOFjY(^(v&Q%`sSh9P7Y2+wfJ{?Q&&2T#z*LWmdE2{90l!&Cy-Fl+p-LJUQT^W+E&$gJQS zT(!T{C&OzfB4H_0WIx5tFv8qttY{0VQMU9EaxqdAlPrw+M7|I^NAl z#xe-532`d1E)Iym9eDbTRDw3RT=9`-~^xQr_itg`*{K2d#I+ z?Uuhx%e{B@gZviY+?F=^EMp+tN$;+>7*oxE_=e^jB1C#J*d9I>b*+GIFe&!o%zHt{P+IiY=oUkdgm{9DV61PQLZO4*; z_$ZTM$_XSC+RWOo=6s5AA)vi!R21l_x{I@D3#R&z9R!`no0BLlILx36UO`$R+$6z- zEM@!*Q-o?Bhu^*n%qNB?a+=n|$ApA|AzWHrgFnE$0gn!0w8=gj?en=$&46bomtEr0GY>o|H+m9e&xEN`e=0p+D>8qy09_u?q7@H{q z{L7dgFm6i^p94?m#I$z*!-VpgK`^_8LSYls4SlSez;6Y7Wq0}P~)H5>fVm%>9+ zx_esqm;uuv6d>&6ctY9F|M|Zv>tAdlB(k$HCOw5egZZ5$2!5}&|AeJ_^5rrbeggGF zOPZ6=Y-s5^A&52!x1;i+$&R*t(?5$Msh7FNT>jR(cfpH`@{ApgeT2@dcPGoOdspxs zKJIku2MBPBH?Cq*HBmnK%+&g+Zi?%Rvy8}GgXEHDc~7?_;pxa+H9oG5p#+`WC1cI}p1H*frEVL+Pnt1hed z^EX|8+xwbVA}ZrqHNsnj4s!80r}NuvTONUEXKZsj@m+9g3@Q+j$!i3fED{g~DUw9G z64**a>*5ZR0)RqfAhHs7exyz(w`szJlGAUW$!w@`l0eAVn)hKW-c|UE(7+@!rgjKoA~xwmKfUN!kV@xm)Vux zYx`VyqJ1TGZ25W4e=f_gyOk-(6Z^nJo0N+m6;3s8P0!X4?N%5HY)DfV$C_8O0n3&# zOuO4f*PmpvEo+PB(w-z%+#$?=Cg0biDFzwLm1PFcV; z@Zxu6lzvaK73sQe_iz_UeU%>%_ zYklq>Ri?Z$?6vLm5?F=-YmbGcSx*i0uf|Qr)INbPGCRDl!W=1WC%?VKJNp^Nn>Jgn z%y;|-$mfol?d>pbE}CAp$>;GqAfL{f8Al3^=@;b)Urh~CCS${c*rvKj3Lk|JnLEpI zUO>P3?II8S_%^+!OwL0WJ{}6#;F05OLSdG1<6fp)lcbCndHkl}Fr_uJ4D($aA+6eL zaZTn=q1bs?|Cj}#o$+>&6_3oT*^d2D;FJhQ*aWshBicjCkcNB(SD1hE>d1G2fTjbh zFjm0OL3(cC8qUDbA++5x^g6vc*HRXRh_4hG*d#IwjKB2@?_rqMDYQIuoO&~g#%P*d zhAEDjoF}5to$K?whpCXUrEMscXhVe+oetAu|0`~YF}$ZwQ5R65*lPdP!gpt0+6INh zHcmu%CJ*Dn|2hVg=Z;@?YGlMIgHa4Vh+ERB;0EuUv$<$o?#SzR#j%Qu#sLk!fdf@h z1E!ld_|g$lb@=tTQ*6>HKpaqe7E?6(t~0Q7fMJf{+$NW z1>RS|_3QK%Ty<1ZZiP?9RUs@DsqmyouKHWt6qo$%TsFPqAS^v$NQY|UW*W)ogt0v* z;C~C48$f--HIh@SG`Vs;>;f*QFbx-YJs#tLt#ZP-uercJ$K(cYGk?V~7%5^RZegC* zBedeeGf0q3IR5n#THBPw?s10(o!_2Kx*{P(Ekqdgpu zv4fw%B*$Heh1omhjm4MCoteeN34XY`gj4bwy9PL`(hh^PkBv4NLj)BBQiZG$7<_j@ z1}1fMc1QI%k<)r%fOqkoV9F3b7y%i034{TdXcap>?1*S1-ozYdu!qyCm_iS}cmk4O zXO6ABbjR^|hR+T^KgjUjE+@mr#wo-nC7NI&&%kY?=x0SuPV{Bt0H!cZS( zFqa%9E&j`+Y(3w?p8-=BVt_th)A<-*6U@6jZL0@ia1ZYP7zGu||FZO81LhmkN8)%M zvh%5lm2J}8rU_F&HaN-d!yscn!kCHZQY|k!EIw)Gmo-M^2adNy>)G4e4ugNe@yG)R za2|cFDN7d$mJon^=m{sVi(Ahw8VpLl*%*u zXW>V3h~w^g9|9qc@&nL-J)~E-1s0=?`$G>D1VG7*#gt9LBC4JgucSk`oYb+G} z3=^VPUwgM)=U|K(6iyS1n4K}NcYx&z@lzi?UFA@XX9##G190g6{L4oO`8^!?a0IT? z_V>9mF2CYW9k%qXJ$N-yeNH2QlK0UhI4QaO{Wo2mGEK=c7CX7UBN~K<_@+`U4ksrN zIk)Q|r7(;zAKpnExes&7J-9;=E`(}3kuX{5)G(>x^O;o01vF^Ky#(C$hFPFonTil$ z%CgZexT%LS5@wF%PFSC%t>)*`DKB-oFuBsSQw6tKFf2P-S5jMF?3G;{D8hEzE{SSg zRH~p6W)aF^Z3+v~&YF()#Y~AZYs4ekOSx4$RNA_}fmez~B+onKR-PoJPm--L}P6pXatK zJ~O7VP*+siTfra`FE)AE2X=@oGv&!xwzc)V6F;oWtIi{C+5=!yWy~BWnJ>RB$8kuN zYWTWZqFuw8xX{5T^#R3jB_(_*L=4zGmJAx5b6p2K%n? zq4T$knP=P!Q+pv^S-yP~mNG{AIw+({e^q`|=DZ3%&W}X<0YHvnc0WTCIp0huXZ`X0!HZe@N@wk4R(G`Xib)A2Ndqgq@eD!F~{eSBbGSbcBJZc54E zdm__1W)PRyqR}y|A7;r8 z>)iYzAwT(sftf&axlUx4eGK6hu20m^i$H?nZy*Fdlb3r{lm?T}oz)NF8 zpYRFjCHOs}06qY%@?>cX%_?CeF*g&wn$n%ajE!;N#vrB^Eqn~RaL9Dbcd-FK*~TPB zAsFqa@bNx41|Fmin0|%w9wPcki^p-4y*G1{aX~=3#I%B(Sn|MF?v*dsJ}>J#FlY3q z2Ok8@pBgw|_M?HezIimp`YRatq=n=4@HpCb%x_dMFa(L*ba4z0F{m=>7ihbsVcNfI z&S4a!$ty;+g3)JpL35W=wDG6x@{AB|tQP@6-;6DG5>9B;F%p71UVVceDl>#!av8>!c-pMf3QJd`~!R!&YV-{!^~H4nK^=^>;XG5>nlKrGZ*kPhau%uD>Ulb z|DVIG9$*gC-#^7mPgVp^5RQa3Qv=0~_@U6k?gAk&dpXd+x!iCr13j5nX?1pqfMr!! z;BP_LPi^6Qhq}tud-G-X7GaXF4DvykX^ul@25A=p8KG1SPVw$rzgAx7gtcBys*^s+ z|4oe#mqkn}CkfX)h)>o$p+9G5MkV{)6DbV8jzYvKcW&kH&Typt`A=Yuf4N#7KHfq5 z&k1wAqh)2;$Xe57ae6j5JB}|z69Hr#nV=h7T*V&$*%5~fObnMzj?aGfc#T~*H2ut_ z76QaE@Nv_@HSG}CO;OJL%#HHVr=LXO5@COP06z$v@$sXWotnn+cMay9D029eI3J|p zhwPpVv6FLwz_+$WOnsE(jV40J%ye+I39kR}hd(Oc`R0!>3A;^5TzzwP%OCy8AC&*~ zfBE0b&wl!p(5-6-5zvE8#vj~qNA7|hzvt}Uc#_;1p?xC&NLnfxWRyZ?9In6Vsw9i) znPc;mbgTBG=K0%lzbKhUsE|{%^lgO0@-G zGMk6>+L9=bo7Yh^E2Vs{nGrRvRiYs}6o$qTK z9B1xnddj9;f9E}U6a*#EGM<*3Hl^*BM!7EPq_b}dgSOM>fjciUhSn-v%+kKX9Pr-q zs-tkbfu=Jb>4H0ahhgAbhAlSlZTKqK#U~@UWm~A3w%bd31WwyWwHY#Q+_yU^4r-%s ze{H|>LPkW*XNOTVU#Gn?LMfY4e3%82ZZZo}SC`UBeZk)j3@Fl~+uh3erEl&0eM0RotWHJcE8|;-Ygdr8imhZesuIc?OGU@n#7- zcX=PJn7i639K`>p?#Da zCQ>>DoXi_x*|AN(Yxxo0ERahK7yqr+?+DW5w-qvq+W#M-$uu(cn^M zZvzb`%+n=D{0?Ap&`UVaJ$3~Ka8hg$Uhv7zG7PzbA_52pcAa55(xBcW+sJOz=C((! z>ePsE4z3HI29BU3XvG(s*NBicNGM3mYWlaZ33pwm&oIG8C$bEKZ|t#))7RP+zoR6TbF642Y6>?pKa1>Ws>=>*(-HwLy2vZbc_wL*8mhZlF7rzFaM)kWP z@=A*s^1b*5xD)Z56a5ab!!``*5l7!9f)YH3g1-W%q{DKYA%jh!3|;{n&mLI9S@o1% zmJv*PjtQB%%`wCq`%A!>eqkClH8fp_cf-yB@H2!dVT4r>Y;WV3eYk~J13MJsFt#wH z;)#7QaKJGP{w0x;;u8gK87?xWjNs4o!We!DzUwdBIldU@dZ#yn)$GCqH_Tbq3rXJ3LD6bGEttS_goqv#De?&y>>wzdcVWJw>50Bj zNB9{?7(DWMP=64{%G0vCP%5FE2fr|pfhlic8a7QtpkbKQL_cI*J0qm(5us2A*_G%W zfKg|T=#Swp$kNgp&dO-MbrvSSCYm&`4D_;N(759q`A=c4iNG^;t;|o%mr?wxju~Tjymru_8}@Y*Gq@d2Vw84G;at3Uqe*zos$_J0 z-R1G5SWTvS&l+WAi2x7?8ml`m*5Cfq`C6%y!k5On~5-lc7B2r@kXHOM#g*m zcndQi1T%0F-F-P&N3)N4UF)ESNL91t2~m62I6l6gyiFX~*&t#}M$6~F#PJ+KtFIeV zRm|Zpw|dLY=?X%@Zh41uAWk&pV}~vb5_6aH5E)y>8imp@J|ISHVn48a?+4#0KltAF z%isClj}uDvi0D#{p_}l#?0BIm*x7zm9z3F4{8)PMCFnwc*2MT+pB8rS+%@fU+l#q$ z4*vW#=U^wAS3p&7zy4RMC1OChydFMzj@Ar2Fp#Np)g`<0>sNhSwjax2k}(+$Y4`zW zX$a4crJmfOh7X6)P9-1F3eUBRQXu1i#QMLfKSL$n9;PoC7opKQ7ETXQOfU7c!ECv&DV zZXMk4+kE1d%@yvoZS;dTj)C8GE(y|(v!@HKFiJZ~>rRrF<<*^*Iv!y(DM#3L zwAXc4t%HO1jW%1W?V&6`5lqOJey3n;cwv<66R%T~Oj&OaQDGvaIUfX(aPq&nVxuXe ztsoG@Itgpb8go19(*z@qZF!Dn3oYM1i@tr=bV_rj9>#cpI;IFwH#5pnr5r^$&Gz^- zCO=IU%Terk`?0ZBpb)mg_0qPo_}C~JedkgbTpFP;6TuxA2HP^>fG%SesL_WW7Ok&L zffI z-Bn>A&Z{Nf$3#|pU}kk-tdpg?Vf8GM7EF`Es-|(`8jL?7Jw2caTN}m}rWWn~-~X-O zDgW^A|6Y0L-piPQ;G{S=UvAIcDYHY1v>jeshJk-Htvp35`smpfW-15?_yY7}=f1OD zg+iK(V2jN)_#-2Kun&XCL0gm4_!Yn?^%41R560vR9AH<-Gc`*1N#dWMoTHtG;nbr7+5nq&#<1 zj4EXGB3=P=k3BvuSH`YlYme^`Au>jC<6A@3sGRMVJ39I zm}%D32W%#0o7kOK+*GNenc6WCZMJdRmM$3rvd&ZB<2=GFY1a7tFy;f81G$E^F!}1n z+4x7_{yru*<3ts^g46H;5q~zBH@(=b!;DYJI2uWc0}b&RAm(a$OeC7epA&Z<{ID$z zyu;_GF`ePH=Q{M`3_i#qUGh27?BRsjm9aNuG|V$|U==f>XHPNHhOg;0Jm}B_f`$&( z9M(4H9A^H27_B3Y!rfpS0l$#R+0mFp^|4!`0J(`KeHVxE8w>B0>yvX4j4tpeK?DT9 zM)5IN!7OMWp%i~AZ0}c=*D=T0FT1P9oUO86o+HR~VSdwu-ZnW0;gy+}5gIXLYHfsu zIET;K_2>mRJvRIh=3PaI2k0H3v2P(%wh)#*0JIyNZW79K9Ig8J^a#5J9z%_Q#xTy# zUE$moqC5@bw}I39GKb@-v(vfx2$QnoO$>jam!m{?8z9dK<~rkjj0f;u<1B|2Os{qk zuoa#RD>XlR6~ST~a~$UrV`!u+5S_SiW%}9>el$ z$0z`L@V9yI*MGD8-5=rzeDwx%>wfvC|MNd7|HuFR(=s^!P39CPUkE;@?0_9zu$6;@ zdkaC1W->`k zguq5CLzx|h-^Bs+402}?cG2J?!$Ef-m}=hY0+sW_dFVutB1Bq;aFaAds4fUHJUk$c zjJbk|&%=~S5onzYny}K5%bSjNx})*wFs_y9C!yte!Ew5H{ie(+i9BmzV_GOezYBpW>6rvam)H8&cexl2pP>X)GS+u<0y>%Z(3pJ zcv(f5?>0;wR})nspteBQn#hG##7+4flkHJT#rp}<;??dQIO zyIDkJHkkN1AA`PV`eXf;*|}1tk`CqCjt(5sb{=>96lAtChV5_Xm%nAa!d!Lm)q>k? zu`FBb`)K>yV=2r`!_V60zy}=QfiTX+yK1{%p9x3XE#u4+<}3*^J{8WDk*rK0FBpT0 zqcK0S9_z3+{%5{Zo?HKkFk^|dNBe5Gb#)L9W5T^A3FDK5+M~Q9%m8%qo|H2RD%TxPV<22CP)K%^2oNY=l^tyZWB-;a`hL zLTZOk(9riI;7)L$(*foN|Kk2fWu1eB-oCL|ZqHvqFzqkvFw_T_wp^TI-;D za8HREWM0?FQ|ufrVP1k$%n?nnj`=>@$59>|?n}Zpj*i!{J3;H%+UqZ`U%OYn{nASq z-{A0z$;@E)6gJZM!@ztye#g)@O=@H$kBKa!LhL1MxZk`w1G+aT17;G`+GjUp0Q}!v zJ|-k1=6NUs$En;uNxep>$=`nt}4>V7(Y2pZI zB@?A=w<*^iGdYk7dfD>vAeI5PGRCJkfP!9LzcpW8y?qm@;ec4KtAub{1fZ0GeNqdg z?FDd6v^}SX{blp%4g=SNhca7ZNQ`|P3q8zM!^$$w8q3c^#6^%@Y>vh`(BjseTS#sj zsa9RtS>*W&c8t*&j9tMs=k zqD^p>k)ueQKb|>zTphF%FozoC30nP#WLnt*YXz$iT=e z8~}yHNR||J{CwbBdr3Nw!BwoTij-z^_y{-4O@BzO=b7!yDofv`hEmgTrnFQp({knF z1e6rj?{?QnL3-iuzjhOm#x1TXKYR3EY88 zu!z?kwQ@L&z|}EyY`I7$?8rzTxu>%=e{Helr#+^UYamIu4I9YE(U<#5UhA~HcKgG{ z2|NlAuk3TRKd39T9H79g-{hr!^C#veufE<-$6ol<@$jB@rfxlFS$SwfJZWtwsy`bM zPeIbz6S=jTSJk%j2K5lc*I?uJv6e0TXb->BTJQPDm8!B$SRH=nK9Ub}C9U8u?Y6(% zfFt$3vrS+-z2w~e$O+lz*u&T^-r4q!1mRo>&hk9-fYymaUgB_v3$rdI0>{^XzN2HL zf=gsPt<`Z6XDm;3vL3bO3oh`jT@fhEBmP(o-{uoPgt?roaI;MR+4e3tv|dzQy^P;5 zgnWn+3j#KK>X(NCOt7lD&&nXs@jBn7J9-rM4|}?i_-b>RUrGe3@F_PoJoGg* zfj-FnX6uKvJ`;cZ*49kylQ?#%y7X6Y+IG`sIEI{7M|*wl_;M5Y3z?4e=ezWex`mxI z=^SoBDMjZ1+pelojz2g^A1p`Qifxd6$XNPZ+@q$973EqUpbLJb9pIPaLb}datIikp zhW-)Hq(6RwPmEdm%cN5!Dfccc(tkO6-%Gb0GsiFE$aBVA?#^@`=+dJ&2D}3=a6#3x zW9(K{<{ZzoH}V;|Yku=42mKIw!p^a8*F(~;?lTutcc5I;+xI|Sn2YNbw0^*(y`u>A91d|ZD1(Z_Iwd*$xp6>JiY%AbFDA0uhvFyaWa z?+I2Ogh6}Ne7Q_YC6JW~Qv|aitXMQWZrhl@N`@x~FcSPeCY~n;w=(D0>S9|!!~cbm z>o9F1N29!V?|bFDcfVim&AwhnyCyj#fcW6hDB*gFG~mf(vlzc$M@73uyViEGhobzx z6QpIr<)GijFnA`)s@xH-ah+$iP+vOm#`e0!Pg20Tqs8GB+CRJ%0$$I9X@ zLXwTcH^HwNvpD7oNIS@F61aO6V1_;5V!F(6(JK|)wW8zMZ5MoAqGv}1_ z%2UqjtN?dWVS8F%FU)Sivvg=+7N>#HgzY)Nj>rQ5db5>s!B)UTjNS3i$NLwD4&b?u zbHEDT%)8iOd79P{p5>R|%;*ro_`#Fo(=XvO4>)|{HFEWrrN>XOjm9=cH9UtRTpqhM z)L(91f2Z8$#IZ5L-e46^OwKQ{nL8xh+BL!-jUr8>J_jBT@QQr~P7jSYJZA=%=AYEq&C0(2vNn5WhPJ<^L(fh#{&?2@@2&;39HeAWDfr@|W$>Vai zg_Z|fEVq@9*%lc_&}qz~m1wf{1pj)7+W+32SIWZ76>QPg%a!@7rHAqO?2|9agTqFY zINCopkhsq{QSw>2K)|?t=PERdtuNYnd@zU|6md&u7s`L~5C8M>ci(-NBc#79fBcg_ zEI;|ve^y$YOlMdo>Gmpf_T6{iVy#g!2hVX{*<9b|H(Tb0kRx1|_DlnP*vc`6#)OSM zJ2biq09hG9h+Gg4#AJz}2)VrFe&uEP`3GC&?k&8AIDa_CxDs#vIRX6l;_BSLDDTC) z&NH8M^6^cU7DPp&CgaQs0m5Mr$^J?_7@3%!lH$TH!3k~Vk4$M6eyd~>*Tvp;Sv-pk z1d3T$(*qnALnmctF=;1Dxe`r@zKm=o{+2IkcR}VIH`;2qs|$IxsY^FSs1v4zp1gXVsv>r7~7gxsy0d zDc^=2vGfQZG7r`Y++zR@{Bk))DL;+nk$JdJBQ1+j5D$E97U6HdA*POhg+(1-vv{ep z^&}|MRpG6^v0k558xSk4V`S__`v&yd>3u~zSUe*rQBQ4GWxQ(R{HtI@Tk>f$Y|Ibs z3gXSpw{z$eMl-m|Z^u*kO7x7$mw!BiE9M#POB<-p8tHpNqY%eUMoh*@hSAG@boy?~ zGnRY{Cw>L@Dx5m^NtLk?jN(O@eu2tZ_{{oaFC_RgKIY*97UFFcq!jK+K>Hmjo21~` z@w{gZ3k(h0<#UA$iDB($ZA%qe+`g*#AhWBwRHjVZDRI-j(e2Ece&Xvji77XZCS|sP zT>~~hS1>YVX~Z@`)wXb=5B9~$$#ns9M#F&<#F;IE23rCi4j?|dZFbJ$-VN6fnA#n( z*}tg!&1ai=W;M>H^-bjj&bNGzr7qA5{g2(j4x_7J75x1+4|7u zOd{wF-<9dFG}iXU;GgH&CIR=P&C=OAUi1U_r3`SxIJ*`er!4vx;g2$etquVyE7B(G zmuAb%1rO@{td3Q}#qqW%aF22$@Q}wjpKMvA;fRT#b@3t0>{qBHI9c&MZRiUHKeh7rDDh645raAL7uqg$y^5?!?g@!jO+-tSWlbSkhq0>IQHl7LDBm`G3a%+b#<3`gDASJ1B`!s_g-Vt1gE0b`>utA{Do0hg+5 z(D1rCfn1gcXg(jVZNqq>d4#n)x`e%h@i}Ae83JW#ZIdy^riqDXfc+D~96j24S{|)G zEsr->*?OBQuUxs#@x&wLuO2?(L?=QLF?Z{nku;$|#1Y`FH}7Ddj3+5f;#rwt`=FQ8 zt?t8EuEB$&#l}oxm3DJ3Zdt$%>eqkxn>bu>I@an#+6A5gqX>y~r2z*f?noEnSOi15 zyS0r1lrS^E&UH~5?6wD!BjYl=uz=bfMi=2^ha<(a{mNKIQK0d|1nx0q&Lww6I{EdR#KHC%BXv0s@oG(9RD`6R-hA=*c62gU-af4~xlDQ>x zlZ-vyz(a%>(ne6{4#!fvp#BB|0HJYuF~kSuiA(z&=6x|Zb%|EGA7P=te;mPp)8l%_ zxu0cRCYd_!ENGa1%!zOq(eLl0smHd8`O@8(<}|MFv7I_!HVz-)bo>Ot384@e8y@SJ zZI2%Cb!u{uyujyjro8vgcknI;f3Ppxc6w~xIEolj=j_4 z&sWPQAAO1K&nUt8N7(9u{xaXECa=JRQ$9krTP&xXYPZeq#<+2zJ=JS*3jE$JpMCm_ za@d);HA5&Qw%o9tdcs_Jju1Lpn(S0yql7SXg#!+9d_1D)&AT{=uoW|a4Wm;fBB~fAWt~>qB?F!PlplO2&==XbhmLtqutEU^~X=M)igRus3w$E`^0^X z&7?a(2!7gsO-`d^;Xsfo0HA&KVqURVNZc-r^bF{`LCoK01UJ2Zf2X|t))WD)iS?C5 zI?4INY~i9GU%No6N+#RSYMQSl;MN>|+G!#DPUItup-zyt_Cb=?Ardqi5d$7aD+SP? zq4}6CIYD%p9`8j;pJt)suCs_ryd}O8s4TpsIb-rxVSyYW=Da7rdHjUYVRVv@vNTFG zz0WOAk1Vx*wJsJpm_HYh&f@FHrgx;5E_%5L#8ZzaReNq5Dl=!|PFcxGS})rhW}M8G z`S+puV03JY7vIrd%Hi`>!*?|Us<4mTbNub78Wd=Sp-R8|7~;Cz}kxn z7wb(M>3^2b)a!!Fd;ZCq1$L>IG%T{dvChCCu%K}iDHG^o;^Ji=U8-%XIFJPX<&jLT zmnDUPB@a)*2PTd8_Laq|QrMk0GqO7_v4x-=YCC1#QXVC%MGNz0n`wIlQt~)Pm-IQ~KpWEs`G=Hy@wk^Og+Ww(_j;7S07q6@)b`gWr+3(SgG^WAAJiuu)CxmKA`>2nb z3Jd!I{Ok{jS;Ps3%~rA_Y&YG-L0!u0>d|OhrDG%weC%xP1Gohss^++kwM1X|Ua+XT zyAUCadZRcYriU;PR;HskXdTQZZ4sZHL&8L+BmyM&Bn3CMX>fOm=l*kkAsA%WgaiiM zmZsPy-)jIJ!PYU7nYQmzbcg|?&@0DG%%v<~KZ~xS?6+%0V zSd9CFL(W~`OS_~z3W5=y_$AInIY1GigA^z>RoL<#rBLkLco0FKbi&4QsW?c3()LOl zq@B(mM>F$^@~HMjpG|JxD5rBdZ!>?|b4!>xE|wp~59RsWk2_>1%s1N_Izu^*f(`=O z2hkMLxeJe-EVi;fSIhW(?|A}!?;LNzi0rZ8bD9%|$m3{wQK<91%*u_A`!dE!gUu zvfHEW*D2a%QO`J-8brVK+BeD%@qj!Ao)0*+YZY5unUpS0IXcC_+y2+K!{i}kxve6; zxbxsiODAYHUDx^vjU`}=|0yF`2pgpB>ES+jg`JU1pbV>P&FNVWCo`Rt$7`$QFFyFF z{N>L+!tk2Yy6DmIxeUIVckn}oma*~w`60?bgT|vaf4qUI#U`gjacZBqCS5KW-FPd{6aMG7e*HJt%^OCGdQvtB zcz<-f&0NG@r)Q+ReDh^IPe&OC=r=r(%>E%x0qfZ0>~ON0hTRuvshv;HSBRxK)Q@m+ zwY+-kCK`I|lL-8;`N%0uW!8SiSophxGBJR*Pcof5sTdu;I_y~sP zuz*R95g#9$U`}3^=a_{YV)k){6XiGzc5}MBj1WiEn4&$-3s|y5>7RK@5T`$6g5e4?JACu;|dp1vq znn}Em$EVPmBMf+qv;xC1jw}j-h)k>>^pm#sVE@(C#c5zYSdH{@JbLpAO!gGCbDB2K z5YCBMoiN5@6O-lI+^fKva6re6^4aHWFy?q9k6`=Ndx&t)_`@VW`gDcocobuZ@1X^! z#LZn>$F!)M5K+Cmd}!;DLLNH=J?YC8kpocrmkBf~$BUgF@4s$~G}STN;GJT3zEn3*z>; z<8$gse%OMkAQ+y3QzuUAq2EEmd#IMX{o*8JwJq2Oq{&;C0AO^dw;ng zW$~gsnZHwrfP_-u@Sm$LaFs!>FtQTcVp}^`w8uQRcV04G9ip6e0W&qmX#weKUVEX2 z62^mPYL&^9dilv4F3YqbzNep-YXZU8fk&rbX_JhG|41J#jQMJ%c~;F7EUz+Y9ab-926#$k^gp;l|5A>n(us-> zj1jmYU=w1j;TycnCrZwA99>jW{Iu<#%eV-Z92DBckLl_@7pbKhAhFD0aSeTn=$VTui8-aNrhM-4- z0>i5C?zneTS1$phd+|WLKm(ITQfB&Vn`xbj0%0P451>hMPTDT3J~vOidF$9l)5vp~ z%*-qDg}LTi?o)Vc4&)AiwC$1*DpCu_gG%Wa_joI2M};?4D3(_6nl_8WwkfzN?(sAD z#`FC9Dhb==)ao(RlFC^2?;bgiH$T&_bE)7 zT6(p@186EehV>Z6n=^R>8@01<-F!7`;$PnXy!`B=Ut&6P7;XFj>n*H13?kusVDfVW zGdrB(mEmJ9VTffOHSJQn-A%nidX~bhVT6)#5=NrTF6(&Lg{B|~0rb;(>6+QX8}exD zIUcuj6>K^_rd%To@IP;mR7ifIZW_-;y^AvD2)-0!o>t)5O>&pJd20eT^KcF%QB2VaBcLB zhMzcqu^VX2m2cg91*PXyd9bp?a|9k#@huoZ@dHqDF&{Bc+TG&Ng*A53=`78gs`$T222h6#d^G+YdU?x)aFU$Dy)?`zF~piS@hJy}fd}5;yX*vXBa|6D z{`I?8%QfuYH*q2uKJjVmX~f^ zf$;+t?m#h)?p*9(=F(y(gBS@BaGpKo^j8GXa{}XUZ?ABo)&snO*HG%R>%+K>j!bh} z-1Tzv#;e%<-^Qux4i41_t}bd!7EOJIdasv(DLt9xuP}?j`DG0+^{rK)4{sR9MhN32 z<~zsu9P|yNu)wz6etp4_z@I-_E^old-$BS5M!4BLUWKvdNMH`4c>Hh`x-yKmeWq;e zu5q&55_FVZTa-QS2u#cxI;Xolc<^a?_-Tv4{(A^7O-zH9$}gEG0|-A8Qw`?f5rX#) z>kYIX-oTix?zmjNgR%#F`uNYEqL5`b1=ycmp&*2 z3~?0o2*)7LVdB&%!{yoM81Q#-YT3jLF=IE&hfn{kY_EW~?1X&wi~EF0+5m?&5z{nT z>I4m99<1$bm)m#W%;{q-=EUZcN9B@mOe3su4Nk0E*<7JMVuu2^m0xU@kNyRRU3?l3 z;eY)87Lv+`pRsMh;@5|Su(x~8))9^voM`dxx2`gwUHmSQ@rPOxsy_>Y`lXl6Sn<_E{ux0ZFaYUvpbPy_11KKV5izW^2QglijPcsL7 z+|vf@plXLGu&U`&Z2nkF+HTwUVLv6bfk*0X_nieOg=6L6vtbL?f3#YG1y z&9c9MBicxT;nOe+0HA%D-%HQk2%q3(WoYU+SV5BT?TA}X1uS>sfN-I>T1+P|?}Pz) zTr|QUrV^gF=ZE<$xm%MZ>qz@ZBoksS6zJk@UAA1}q*hMmvhGMRf4~84j2+hwlW#kG zt^iQmM3LTr7cveV^B>2_R{Nbez_e2a-T9h2^WIz9;9Rvl+T3B7EHWi{h^P{uw?J!SQwDK(^zCXO``MfxCa#CL((#7)x~ zP(KSq?Ja*D0~u=&3xigYBwd(p>bt-emfC}(j=v}yMxI3=un}&Iq5cOd2Ry<$a0Op% zvvp?tXrD}%bguTL(mi2ag*0I*4z*?&a9{H_W#1rEj*i;;`tzSOCk;m{_l$ z#3qLJMkuB6)*l$;8NY$IKs9Ypg`Z+*XuGfc)h^5VT5-g=!(|z?#k^kr%^l?l`}!38 zO)lQ?yhG!KL1>NTi<{OjEWB?&3of^xX_6Kwkj{0cDgiVVfu#nYlZ$WRRlR{NQ{kzu zOKBr{TPUg=b6P?hL1uQP*!+*?&T_Q#x829!olN>Z3bIpdvBNOZ{xC@}=K2Smp^@+I zCnU)bcJOJTLO79K?CmxEwa?8FH1G(*ta{+8&YPSx#?HJASCG4)Ekf5d zKw79z#|(#I7)GWHkPc;r+Ugd}C|yRto-V@(%a^AEXxL#WX=C98FF!z`eDB^jXmd|_ zq!zZ(OMMCy3Mz!c;l2fvdCqQx$|~_}4|9!E07mt)_BuR#Z2*Vi~I7ub#B7lZ;FPh(89U|=Q*T;G^tUEm03*W0m? ziE`)04dQxYhs}WrYGV5l%vu}lH~>SU%&?oMUu@6VDih;Km{59nRwslnzZ| zrg|bA&eG))zkmi>d~x2;H@mCsu%L||A_#q#PPvE9W$f*7tX+iBpTfEI`qfF~Kw@#u zK<5ww^uZfo3}mhwgtE~(C_=l(%wk4j?9al^!qPKNgF8Ztj9{ixaUZX3O&>jqSd+&l zr-O+oo;KDCJslWSkbMrH0PN7xpQ52(#YX*$hhMX1f2$^p zZRvH#iyawYdWP5X6ut$!8y>F=nAySVK_PPmZMs78Y4>LEZyNz}3*k<`x9RD{oX+@s z7e<^gO8taG8bxTL;PTS7ZvTyDK`ml&lz<&!TzW*)Qtu>0cJ9<39K3!Fo>QjZPqahxZ?A^dfTuy6(3G=$Z) ziQ61ee2ty3Q=F0!Kz0Y&RpP`vaHQwI_(#A0{>L!?BWz_2;qb7#$0TNJ;((7as?^9| zop0c%W9St6AJY1Qcy@@WZ~pe{SEY9OVlG1OHV>;ruNGisg<}BGZXE7n%fzBBgX)$w zQ&L)!Z<&)2%67%t$2wWT+7JyFj;fhrVS+-3ptlKsm_rr_C!JeNfAl~8Pf(v~3(yf9 z)Eqj&B-&ZnDgZJetAjhpi23DUI@Y~CK83h+W zK$3P_XY$)7V4H0#@FB#KGBPea_nQ(cj*?d%eHYUp>d&|dXD-J^!Pt78#|qLq ziu%mFGVtz7IPW@m*v&%Ii?&rd!k4 zH{z>`1pDAclHjw%U5(W_Hr1?~z)|Mj(P13fy3aAf!h&})4jCsidjT`YS}niu^rg!-#e1h`UPWy(D= z-Zh1pkoUF!F$JSepV>f#_0Hv@C%*VcC`vXQEAh=aL?w2y=H*-EgvHc9Jjku}h|wmL z2KZ2T)jn~PbU@D+e57O0BknSTsYhrF$H0wumMnZ6Tm5o;;@rwrGm;iT5$3)HlX3zw zNi&`Ee!_^5w~GN)IFM@EKF8u5jjUs+$&fbE@fT_^cKE7j&um1JQ#A93*jhJ;h=Nfd zf%3^iUHu$zVIcNyj^6DhPwyb66&#t`y4Wh~9EG~Qvq20*%nI0DKf^3TnluUnJBS%c zOt9c=X(8eK6RH#PUu>^0uzX>d+!jf=hUhLm{ELU(TYx% zxv7QHxLCj;aSrVnbK?YOMI9djyM7iQe(|7u`1sScR9`MCd2Tf0K{9D4UoYZL;g2R(*1YxWCwV8;>=+ZQu&%&j-aN?a((u zS~-Q5y${}n!eF}R(f&iniS>D|vvv!-Bi^TnrZkA>x^t{8xePKk;h40nK0Ji!HUtnS zM6K(j&+ZSjXPAEIC@lpxc5yev4$*;iiS_`EKDHSQBh=54#4?p22FzvbrwHxv#Ml@%{e)X$$E2SzIHg|$ zqptHXwFo@B`&;mam2UTu|A;6*fC%tG>p7@mjCinWCG3$5l{kw!6`U1^8$LV74-?~@_|MsD=MCQ?1dElIQo}*=P zZV`OD#c7O~%3zB>ePx_>bI`yrFeA3;AdK&=YcH2Q4iG@LWR4-sLX)TFuc3{fMSxg{ zpO>^Lj^mgXv7)d8#m)u?4)kL>G|XIDdAP^X$ehSVA2&E;;=KE~EMDgXF;0UVoy9DP zHEwhmlQ)FZ-3@q8M#kgN4~Qu{OvtL)MZ#PGcl?jb(q}{&YTiK8f2E90_Jb=YWyv_Q z9C+b1M10W`@ck6HTwUZ~0Q}H~2Iu0_@$l38r5Dq-<}l1X0{!f@-g508f)VW<>>ptr z(0s@-U`|4ZW`NZpzGn;9u7_uC0eiv)mHVH3hJZ9(e*3q7R3=AG%FjN&&ym9%01uceI99b2NR$}&XxX*hHH=2#BS(i9D=9V!eaE&=t>}^a4*Pa`^Ep~ zFMSdYAa46e{2{hf2Pw08d6qN)%=hxUeuPU3C7ladT_S5yStx4>b`;5~;YO#q*itC|s;T#yVx#M*0`cn=q&B zFdL?_Jo_FYi02T;n9@*{{Ziv$dbO!;%{%64EA@zb3WarHv^|yih&^o>C3{%M)w{Hr zvMQd4FOD|_@lxitZlzazvd>{~cqh(w@H{w|_NKt}!#)d_D&Wwcz=V9_1^2Q457Pld zq3|M;0c^7CmaLAhAOEH~shc7p-aNOj^eqgYX=qzDXF^-fLTcM8<1M4^f&{Z^dFeFp zW)OswVtWHzdBN|Az!cb02?wn^d5MKJi{i?o$I z>6Z;KkMMIWp_t|ke~nN;KAAR8$&mp$#B5@R&~!U&t?y%Qu?3@l0Q0}k*1GTZ(ahy| zM>qlFCljTedN?9<_c@E7(O8H_!uKxGJkH&1Bov!F1&(Wi`m+<@K?8v`Z55uv(hk*r zwVwr##4|d>$2JNhba$k;3qA**dDqP%cZoTHNoAOXRmC}bF8y-Xr3wk@d@YjnRUo%* zItJ?$s&Hd`X6J9l6WFT35B;IsinsKO`#9?QE#2Zf47u>Pbzz!>xx$x>3`uBPH4h?> z5TGOWx#BtXWzN?ATV#hW+22qtJIF$8|8F{f=(6-=a|zGHsz zu$Gnr_+xzvR@849BvimOP4e*F5th{S`#ex`4B8EH9q^rF73B;0U8|0PZ3~|bcfR6t zLxFy4_H%$~!GWeGwhJ8fn-Z{(SDOlR1|FTlq_-|Ghh=dWkB49|`v&a0fMBkn(N$}! z8OJ&r+Z}L`8G&+dw)Bm_g%IEKva5x8!9}@t7mYGZ$R$j>F!j_=O=U(#Jwb^w6;SLM zebTu!<{i>dafJT$qB*wj!_C3+-pg;oPKwZk(ol-I(jYFf5O zIK+MIi;vj3vYtW0A@(7hG+{dLT$^OK0ETa`Mi^>fLvGM_$4Q?L2isOPIgQWAE=|mm zHKWnLkCOoa@-Vo_{Ku>%Od6pSPvIdx{6Jp-7~(6{)E6EW2U~U-aDRu}!rT_uzp6{DllLso6~fp<@C;rjL2Q zi#FS#QOkaIfHU&7d1wKQDv@N2$TN%+a~I=#|Knv8uP9Dn7!QEAT4H7eLcl2k83zb- zLu15B`gZ|u)WETNasGOFnFuyRW8g?XQHg+q2bgSO5~FZli8Ww>7VY6gJ~@u*2xM<- z8HTqt2vd7lRtdUt$&QZ4K_8*D9)a;}!q{q>B90xvy$%pSzaLW>f1lCrDHOuA44hEm zgD?Redh(Z!t|#n#Ok=(>H#0*7o5#>i4lux~#2u<*c0j=-`bg|oqWm<+X3CW*W0#JW zTN5u4G{05;>cLOT%E3eMnv=gymV+aE+eCtTrgJ;^GR#`VZqgp%Hn)k2bIk6?DFVohvoj~UoxibfZ)Thx{H|-k(FA+k=@=V@(xVy z8N#M9bT#{$ona?Q^EQO71DtpJ@d+E{@PJp}e5<_q+FfuDXJy8E2!D|Yf<>$^VHSih zgMFi_vV%$L5T}bRE?~|>RHum%jzON{2xE2?*o?rTn^?FnU5S9SgBD*w*8?6VFj=~F zW4`R-n{q&;AF!S|!Ok%Uc)a(@Z!(+5%O{Wj97O@9Y0&ah)}8}QeU6Cn>ki@7x!Lm8 zBF^dT#GGqRhO@jo6@T^N&xsPWg-~=<-u%wXF#XTUzr+`ZMY}ZT2FegZNzZ@zU;nr7 zFEY4K*arEtKmM|O{8talG>$6wUY;+n+`=}NL9C>ZuQZL${Qq#dEbb76XbUN14O>1n ztlF)4)ZL{7fsT3%pIh@Hdf!>FZ9s@J z589u~d_r%SLzXMz(<41Wh1>0sL zRe7oLlKGZ7bUsu;s0y;?G*XC}#XVsuD#)7o%syGabHu#0L~xLQ5!e(I^RU7*DQJ&% zkOKI$`^i?YW`>?xWZ&K9?u5=GQ5axNE8H$0pglwQ2xzNY`^w^mS?ZXd-7+nyZ}$B2C0 zYGr5}ud;h3Zk%Ht!rnaj6uzoJVHtjeU!`5-p+x5iUn$=T#TDC<`9oR{N(jDDhi&Mk z58BmxML9yDj)QY3IG8bUYX+JVfMcH7feN{cy8`Yc|H&yIySBYw$H3Ih_P2J4f4@GHscyV`f@ zew}aPp6xN6^F=*<)!^HTG=_JirBs=JwdDr$SBYWxPi_7U`Kc|x3%3FME2GbjJI}>y z%WNT79%3?~xogT|eKY>KCS*GBALwL?CRW?;3xtP0!VxyH@x2^42LJemxwL=6(D0WB zXkeQS0>Y#H&G>SRs}90bqZeg*v8!BJ;54XN@DQ#249sZna8(FgDy)KQYnWebtYY56 z?pF?Z00*wZ6HMWFx_`6<1222kD2iSyJbzZ%j1L>zqT+(sNDhV{*5*S6OFz0P&fAOXw7AK`!Ul{X!jUp zW7H4AkWFBf(1V!{(~VHQFLP+eZ01HoKc3k!w9D+wA%w5uoY{jj<#((qS zLFvKi@)Ew_#`QZKz4{(ZIc+}q!_R*I$E8$T0Cpu|kcRN(+1oS3B6jCksf^Bn35=~w zw(~}$Fzl)h*=p_G33l8oUU$I=ZLKuVI7H3gE@!mPL!O|bw1rN)1?QEKKL;}O!%_|1Gft?y z&FO7x<-?CYWtV|LFn=oGxaPDkb+NLRxxR!QH_<0jNFe52zeYD-5Ufxiz zS_PTw)B^(}!x}~(oC(uwQkfn5-OfyDDMFZdD-PK=udn*Wi&~Za=ehVU;P???4}cgF zmtRf?9)q_TD}ifX8G4r}dYOKwx%{eqvn>0kV9HYcE^Cr#8~tUTmFNq-;^zg76LsEn{>JB)Tk?VKSA8xUB9nE_0oi~ux*W^#z5N@qBV)p+BE5w(*tF3mB_CBy^% zx#Ofm@u~ah^-1QuW8-__`*m1}gKCo{g;6Sys|#VkbbcI5!=7HUaG#@X_Ht(=G>QAn zR}*pxf6EqR!8PivyDmO+Y#BqwK>^zP>?(j4eb|>epT&_d*;;8$nfR>7BA ztddv0&(4rPxv%MYO6YXF!Q@2hPZ7@rE>_j7RPVl<7L}~RT~3G#&Q^xs3Y|Nq(HY)0iR)rX@X>o=9(cjVr<_`)^+#|e zpMs?(KGEDUkDL=a>Dq7WHKLbMfUIYi(2{$Ytpx5EXRG=3xYHx>L>iy~amdMGIW)-m z1S3pmFniHV0()-keS5IY@4}p++h{$+9N)(*WO?loLUk{A>nU1mC9~#$9HVdauo6XALvRtY@d3`2aKe#R_2y(e&P$ z#g=^l^B1(p@a>qB%e)LD;Bau!qX%#~*p)+jEx(-OgpbdF$0e^5&-~$&br_PP(g*%E z*CxF6oX!ZhH=Js#lY<0QPqc)p}CKCr||7XVJpUdUq4y z1%2))2TSdpu&YBKnnSE>(Afu{ZkEo7_BhEU_!Yfj2vR#ag6VTLWx>tokwpiynzV{<~!g}3*lf48~!(5eGegS zA)!xIel(zWV}$nH!j9{h-IgJk{Ju%obLSQ&NA%;8Xg$*4EkdB$KNYq1eHbQo1cr7P z=Jd>(&2{Ur0j2Oluy6F!I+cj<`#%^4|6>_iy{Q!WdL6Z`3|FGEp0skzZzwo zC`$VriCc(7b?4>{U8xY{y2|YM^|G|I%oz)7<&8I9&LL=xuE8?XHN*I#1md3#&l3i~ z!Nz89Q@#Tl&(YO}gI--;E)So5R!#@E1CI@M+|JL@`V(?iv$&O~t3U?_bm*n!jm-6w z<<_$ZPdn^%t#fQ}qcK*RoYHr>!+gTWWRRUB9l2+3PL#WMZ)xm><1W4xn1QW4+eJ7a z>KEr`TwLn7KUqfDF?CkT1PZD#%!3Tm>k-K-_|hsE9~0^nt3U)} z9J9g4m+noK4?g%Qbce&aR@L9XDF3aYVu~CXPLzo<%%N&_341x;}Pm<9( zDL@8*ze$rac;>fl?)&trBU|t9eTuAp+dJrN%>c+v&OVZhQWF?6;q8U zp|;9%O*~xScxy`qi?+WK4<&7(V}H_H>!DpVglG1iFZ$!h%Vy?P2j=-hgfkj#+Dssy^Rem-JAV?FoN>zz zm~9}b+t)HWF?E2*8&WTtRObN{MxuHV<{QS6DHTm=Ca8=*1Pc=znI-Me(v!=*6hk{(G#aZKD`I)Jwl?ldv+1#{gQ;e!c2QKRr8J`kg}{usdTz zFe4F?NbL^VCFT&_nB7R5jDb%Nc-WIq>T}`I{>ivS=)v((I+T$G&Xx%Q zN?CDwCqpN0zf7!j!tr(-(;r}z(3v=G-(r!>yM66u>)#0~&2x8Dym0Z4 z$qVVs=UC`0TE~Z`P@f75>-C-W1iSdQrYbDdv6h~s9j3Epk_5i|CNu4;IGi%Lk3i6V z7G}HyE7B&j^PAhRD|e(JHa#?pyj1`ICsH2sAboS3W$J-5wyHLUd;lU4SZ2Ku?`fcz6^Lg;s7NHltvYr5A4-`5?^X|b&%qDP^ zxdEefK}THk+yxLX`e3TGhu_3bxrGh15qiQ4z)V~q5DbkuUeswAPd=Obn`La4_2Po4 zQkc%{tn{IkXP%<6Z!{IqF}We?i(wbf*)8bC{NR+RCznU!zNQkI=s4uqnSh{PgZ z3}ZHO3X}fD@_PBk>#v|KC9uayJ=aI3@?@6~kT^*0ankGYF{T=jOYy<+yu0vHnHrg8 z4mL{*hG+T79%c}lJ`u)}K5yXEyN|u}Aak=1EoTH%`m(!;LXc38I;-{%T$b1GeY^a| zcfXCMca-QsJ7pQ4g%Rx9$2c3|_;9x@b1+pO0@1hL`WE#qAW-42ux`Yh(emwYeT%yG z%R|m0c)I=wvkC%`Kp&cf{vE=sX!l?p^CHd(nd(JAtA}#Ali7~|E*>fjK=~L!ah9=# z!5>p&s{jr&KQTOA7A9_RsMQ_bHOj{yd`UmL%0718I`TG%+%&|%tv=c&0E+SXRTdD> zAH~ZtAu%ndk3Qn2Q28NDT%O~+EF--I^V#a+yat%<5yFHT+x`&zXmR8-x&X@RXLqu{ z?gYS;?;y-v;*@-Z5VN*|Ab{D`Cq%&6=J2k}I84HZ7pB=!>g6!AF@zqX{a`+lxeSf! z;|vP5;!>PtqDLMOQq^##t5}c7GYld$WPPEIeRiP^@IyE!3XSHJJD5>ACxwr5P;K`K z(wuPYVf=sh+y9_ULc2IuA zdf0c!B3UGhEbcomFYnUbx7X?EWz;?UXvUctONu2#h!ZOT5+t#{NFRbAP5>L$4Pqf5 z@*#ghfP73~AVK0pcC3+X1(T6xQ7_ZocVD>g#UfcOR&mSk_nfMGdomGXSns{Xdh319 z@|@>v&whp{AznEvp^k6K`DH(J;(2#d)W1ev~z{C)HAqRfpTVVfI_b|L9jA#g(VXAi5 zcGAk7B~D51XN}M0pJKT(Ju*YIqici;#k2@ht+C;8oNo7051Nc2q6sxv6VN#W?-`n7 zgh7nKyZ`s2^d~>~G(Dy7M=-%JFAS!M(S>xlzR3x}Rm^uz(&{GWr3faD5CpJoqrJP% zxdS&-U;otmQ|HJzSw4j`}2O@+KDFlxZ3_4=)K!30B)2ge`e| z5O%M&H{hgP?#&R}RH)czA19ozTM35&ETDHke+RO6VAIoXq$L2-a z_C}W_WjVcN1d%{_0ZR*}c6q`o4q|)>dkdC!8vrWmF@2;3P?>^^5ufJgwnRcJ7=d=X z>_hS7AIdRJTS{p?naHq>v=4l!KIM<8f&ns{{mmC;Ksay{Z8i;k;028l<+?$OXMeTp zh_pTdr!>r$T)7U52yKAO8ykNx6u_i3DI~F`q(alRiTbG4=Kz+8CCjr$aY#CzXXbqac=r}gQw}Rh}8T02pHgn{D2D42FV#vBdbVx@(%TlEWzAQXR0%*ZThgPomQDl#JMR$BV?1l79fO2I{mQ{6viu1%9|^v<9q#Vw=qG9HiT_bjccM zXM0Q=96dgC5}4o&EH>!^(#UQ^u4@F# zZ(svaA9<4MTb;ba;F~#)R`h)bSx&Z}OB5vns zY>*9L+S(;nWSJ9~CZ-65M7#G-I3oD~wI5<&Cgm=6$f{zrP1;@GfASJv03tvgDv7pK7F#A zc9!--t)P?aF%nH34~FqF-P<~#ZbOTqV!_78Ii#HwGZ0P>GId3S^d_W_(+5xc)3 z;O!#_|2UqvufO_*;Ajq@)`v$3_V41%SoBC>=VlQPaAt0L{23e`QDjgwVF$(-#f#Q4 z6EQcDjP#dKU4fI+CX04QnMHUYjSftuN$if~UOlbR*tK2Yrc6J4dGVW=o$REa?|#T} z!r)@-C}O#)cq-C9)gxupN2*i~@y4xUGBPwuUhonjt%yUqPw1l9WLX@qPF+bazw|;{ zTDc47grfnWq7FIi!LU&}-JHWoP2wqhefVv>Y>6q`c*-ISBLYNEqA?gZT z-_n$)qd}2BezY8I9h~S*6AQ&uAuQ6~Zj-Szl+HT$u^rQy1A9K=hE}kxYoK--obF|f zfID&{gR3&8?@#{h({%67O4{EezAK#55r-Dczx+yiarzcviq_Ko&(`qJ)_&*B^t=D@ zf0k}7+)NKw?sID1&x!v_+|ik7h-Qw&hU;in&eJ!({GIgr%eOdf?=$SVHqr%~9HbbL zEcWqAMuk^`gTpgs2o+jC>YERKu$+!J!7FQ8X@oJ3nyB!%|J~oaJ%S7NFkTz3=~re5 zO3OfXFt%1^*Rx-`+Doj9?qBz^3>U{;^pc*ap!jTbc$h%2#tJ2V6ua!$7`rPvj2fQ8 zZbD9CsX9GW+SHhENcqG@!AdSwCgmBTEjJO`=VoL+#e^Xi<#&h>L0xPP?CPd*Q}EmJ zBCXFDka7dsjkqZZhZ~5KD|TIH3w1~tc*VEy@ZPmQC^CKrqDk1=R$~)&$eAptRw=EZ zX7Z-zGds;I%Ca!h8v3MtvYBmeQ!v6z0TOKHi2IBqH%=FSaKgu8BCW|tAKX*GE)Lzm z@_W7ubuwle!k}ptgA@E`kG$av~vrxTGfevL4$L_^>`;us%I* z2-7zcDVuWe_iZH_mlZR0Of!(od=(y$vNy|@e!ww1wNc#lvn##U^2!btt5bIdr# z+DD^GVLLaoD4ZIB$NZ*^4kNGq4n|vb0FP7+&obun-)JWZV_fjV?|5T7TPdI8-qcY< z;3o36a3Y-ICy%)nVMbaH=b+FSCzj)!`#hIp52;#FZQ`5}r_t-Q#j^N{XVxXuo5NJo(DpnA!)sHSQ_=E_;%Co|WO-fa5LmWEZeFko0opQ{@GjLxJ7@ z*9_l@r&DmQ$j!3ka6Q&{8}CCyCv`xq2SXIxYbInOt(NK zS1z+ixGFh7PgdLDbgde3JC(jj9Pg37NeC3@yv&O51u=;D)L4n<7+~+OAEuqP!>~tj zj0oE(qRjePCB|!i*C6?bExE-w=NMAwU}4ZP3Jwd3Ao=E58GE)~MAZ%Aa27en){r<7 zdmfQ~46MghjQKGF?;`yn;^uSuw!5#!xW^`i6U*wT44Q~jJ2)jN97G_1BqH@#9z|@g z{lX^?*CNIy#!BEt+)@tfo5tjd)(;`H81Vs{^Y#Q>I1Zi1BunE9FcD2~h}hlJwTvZc zN1b5ba&od8%-0PhoufEP9CdQK7lf}&tg6XQ4Bbz0*xB6NB<5n4HZCm8VW&aJAl8a8 zPPFRk<&-T>Xj*?-h0BKnM>H;)T=!Mc7!_b=6`K+?6$5yy77$l^D%(hJIX&80N{=3I zfoGg8HVz5gj{P7^O$Q8_A;|_YaB6o?~0n|Hv9Q7KE_?ai5 z!G!U=d4`uY;`=^uR3;d`YhxEjoi|{P&Ts_KYqtkxa0c=CH@@^W;KuHZzL$yZfyo;o zKXI(%Gbt^1z&JjK!5kS`Om|j3 zO!v3%!Wi+KerOU_DeR2Em9bd&c2JdZa6lPPTAdqqQH@N^%%yi1-${$8GIn;JVnh#t zj%swPxxpU6p$w=*b~u*04s+f&j=Kp-bcHcgBZS-`;iL%lgbHhQ zc{%Ol=+VGFZV0m)9Yl02yu9d52g{uLw_2rtoFjy0x;C3`-Fh8Ihn@8BXZIOToKQwM z9}n>8gWjDQn`B;LJGS&7Ej@mW6i780YaGnhcfR?nsaaitA*iJ9fA4RQ{xm(=ycdE4 z?TJS4R34hZBO5!X@p-)VJNnZHKYYUc;B+~d$zzzi9y+q{cYp8i-=3XAmko!pFoR(% z1Ir2KNO$Y;%75dQKN6yO(QZHa#rN%}|7zduybU^N5APv2+X1-nDko(eU>9`BqB)L- zhx^GML3)i<7N>`iF%w)a_7HTf&C`v^ox)9m{VqUt!BkAAho6hM_7SF2Y$-vkgv9|K zL=feC{&JIavr*b(tKC@gW(h9i9lsf19%P5rL)&xGBR_SzK!wnWz&dGEz|)NrLdVK9lp%1v~0#)$2BH?lx0pf7re- zh!5On`0QH^B6Bu2Jjx7lv&BYbZsoqua0ny&U8KuIDX9Vmi6S>kCx33fY0Vo3$*t0Y zs0eJbxG^>?H$LDbJR&Jen+s!H3Sp>>EXTG}KFOj!+v0sYzYhh2r`*brnAX~JJ>FY) z4s#>H)S zK^tN$Q+{|l`&zhdAC(G&_u^HXIrBT70Nh(H(>PBcTqGz=$178+vI?e?bWY8H4gh6d zwZF0_JP}QtD-xG+a7?rBgxFpqeZ}~OC9xb~k>FPncc^q!O2AUYJMa3~5KHLOCy)+8 zS0Vktc+|PcyTAogK%(fJ{OW3t2n=0?&_5yc#XcL<0pS;_l;0rEXG4JlM$(5ThLL~} z)WOzG8iJ=r`Z^Mn8f8?FF7Cky92iarrodLJ5*s6A2Gl|jPWCYi1?!rIwMvAM(2}MA zgJQd705YzClRk}$JkEhk?1?lzlu1i_#J@$xoIn_oFYy49#XJ*G{QD>7gCh!pk!>*d zBXx>t3W`MSzP7*J@a+#vP(tsR=3%Twu7zS!I2_fcx4&aOb!_uH{?YGYj}-M-MNUh` zNJDo}3Xh>AByeMl3wD&5*Ps|vlqVdyHPM6Q%Gc*rtL?GaP9e_Pm+_cov|}8@4O_3s za~%(F#sIF=_DEby zu&a%T`e8tt*d}PeuQs5ABVt`+4jGRX2qz`JhY*x%BZe^E?&!wup$Dn#F#A0xM|Gs3 z@Q}X%ZgJF!^uLSPj@m~=EYkBs#_$-n!K36Oj1At9N}!EfIy5kzW(ZYt!IeUGW1XF#2zz*;+#NWo&7wN5Q1^wXZXdp=X93mPeJ@0GFitQUbvz-D@VM@v%qh$rjFoyw z{K~ynf@`!Kv~`g6<`R4z99B3F0nGwz#yK9W z08F2BrP=Alv@kZC9zR`W4r0VU%<;b%@f(DHuR-V-7{XKRbdL5A9&kEXKQ@E?I31Oz z#YJB@6Ja95T*qmHeYsFTxIrkHKE^|DR|%&Lj?YEHZ<{*LXj_9Zfjy7TQwcke5o}I| zDK8=2H;$_7;LtGPXov$kJ^n(PpIKy#)zZ`TCDx_AbO_;mNLK8&>)Q9GzKAS zQ{z`SNp2YJg|t!~sX0dXjEk}9QI`+q#tEL~qgZ7j(Z4c;HU`@kaIdZBla-}3zX0J3 zvvh`MeAf`q8IK)|X|*;-1l%9x7~>r@M785x7+Md~AOzeVD-VQund5&^LScMAPt4cF zG(A1e*r=uQBDPc5-ghEZ@7D$oZ~79>2_BbSLnEi)r96p487Ge^rzwG(QS5M-^~@`1 zGv;OoF?W|fLrB2lF!}n`^wsZvi+KY~E34_lKmU;7k9R(r!3R$tU@OGY%Q#ycGe{1h zh1VZ$;25)wUDPmfNT>0xpG)t*a|;umkJ5)f|1^E=%irS2U^E1mt2hS{st#?D<8)%S zlwPSV)4DbMP$E;ql}%t>cGYEA;B zJomlt>}zei#7!GM%V`s?w$lw=ygmokTT9HFy`u1_3wR|cDA6J^>@4g&;Iht~M@g&r z3}*!H1;czX9_TxeOP#sx425WO*cbsv5OWk02H-4u_K*4fNbHFp2ZC*JkigkR;sF{wU9D&-XNbrc^TYA# zYS@07=R$5v`)SS}ZKX;_tW#XL!pX(oW@wMID6N&3=hiJwY`=ZuTmzka)AwYR^f$Yv zD>ByhyUaG&3#ne6_=0t$emVka46P*3$q>Q($t$Bk&+|wIf%ec(!lE;PW-JD22TLLC z?qt8UZ)F@L3T#s^_Gqpln&x;kXg7q>*8UPi3Q}H(UYQQH1zkuR9an~$aZOanrLp9*&WQeCX!4m_J9vXyBAHo6RXUHBLrmB z2YU+AbW*_Nhq)${N>)1wzf&OIr6xmNnApGvLm-V%F5a9Fy~i-l{X>M!5>F7)Rm>`k z?Wo2+Yr-MoA?|k3@*7~?v1;GQ#^`alZrv8%lZX#XR+uXS{IL6qc>`G&oCG`L^ zpM!Mc#%#JePkV@2tDxlwEzLAcg2GH&yBMX+vSmi^y$6T^iTfiU!>`&#q=JhkZtZg z#@vYYi*=*S92vkKcKOq7v5i0=|mPSckp=nJmtU6oeQsJyNPZimGfA*eTaX#z%hdV9v<{UtqCG3_%Zr-_3IGLh;?$xHa**ud-#Rf}#R?cO6P0M_D3G^56SQvFi zT@ny^v(1|^OC|k7lT8vEtTz-J7k2@N5*;5!*`}vWk z58~c3fz4+Uax^FEwd~Jd;+KS6)M46yL5RpNj6Sm-zD*W&SO&k90{h*5&W|W7o7QmK zdS~g@?`wC1C?nvcJ@jL=g|y7eoFtNoLyM10wgh-s@LTN*NgnA{u?b(a&nA%9zh;ZY z3f#nLX)aty6deP>;E@V6l94fD4*pnI$XJ2VG(ODr^G#xLB^+c5JUfQH7aieA9B@d0 zn8{p#3|s8Cu7C=eW| zTlq-jlu)-Tl`XJI7zcS2ZH3`H!i${3?7z_9P@i?U_CzIADlrE(9gyHn@6C*Zz$uSo z9b-50vFnawCgVL5j2?j;FEy*Nu4nVza_lEfc9;jkz@I$k9FwiJf&MNa%@n8RZ>KY@ zf*A=>*J58fA1&W^9kegj8}c|v0$%T^HpUfcVvOd&8~IHc<$Gggd?eCc7G2*=9~UsY zo(gNML9H_Kd|*w^Sz~RsE}!REp1E#`+EZ|)@JLOsYq%ys2HtPt^?PDy8vZr74}Sod zgNUz8A38ror6+p%?ZTt<*ZzxGrJzEXjooRiONkfFg%s+D!!ANl4M6Y+mvz`@hEFN* z32FW@-nR6o8lxKFer%sHKr4H@fp6Cw%jtz#7=ZAaoFnKx@e+v!guOmeVS?Sy%n>gU z;^7$kau4Ata*V8ID0Q@2m5p;wNn#(zyR!#E`T|L1qtTQ02++R$6bU%C&L>CNgD{wR zB&in{oWw+En<7Wcc9{n+#V#c9mMxW3I7Z8*9da>B7KJVnZRTRPio^qR?&*+t?LS`DY5gtY-9lJqRXSB z>HQa9OH&Zv4^}qQ4j#u6(9Tzd1aDn`JKZF#&Nl5Vk4~h8$*Zx)HwiDaiQTy;I(4FP zC?dVZxxvu}v3AC(XRKF*H#)9pss*kQc8pGW$`$khsi5QF?a}1uL_AiQ;7m+3#n)Aoe5bceB|nN9D+TpFF@SX{>nCnDCdvG2Yt6PL0?8IRJF<$I}m zhPe;@V)!`6r=c_qi`G|?=tc`t9A^BM8K11v`v^5U<|!MRj6S}EVZ1tfEnORaiNgWP z;cw9lKZ62Vi>v9Kg*S-%is=yxrrymKzq^gq2lJwT6n__(;~v($5$4Oa8&m16S8k=Z zUVJ%SnsJ)sm_p-$1`W;B zF`A7M=1U$xA%X4uFXMHsR;8mc#(@P}XpA0kdK&R&VHlS2;8kOAe!0&SL<0pAwT}mG zFw8Jt9-r;OG6N9$UamPkXbfs-hdN-?x;jc}5)-zU-knb~R~hFWFfpARvs^w&H{Zdu zsK}uY_azt$g1YBE4MB@m9tlSSw- zHuR0H_4M@7T?PRGi(e!*(qMn64PA6=aBmU>LHx6bJ7HrZpe%_y7yB$t%}_FzxnXl- z;=2&Jarj1J-E_j>#u{`^Bz0qyh{zkV_nA0$dPh;Q8MWVAy!ETHRpLnUT%ILI;rTbo zVpD8C_BU@Lt@(oxq=GCxwtnFi1~x3Dhyo~q(T2^93rH!#yb?~^@^y{WJT!w^H^`6eD<^T+ZsE?I~yn!N4MGu9md@xslH z)&QpENko&_4_8R=DL?uP*aHR#2`|gfAKsHU$|1jHDRF=HES`{_ceW||RJ718p5{-K z?K@#5uZ$VrZE34e+5)eDj6Xrl+6)#w%MK|&i&gXs=_Qi9Ts#~Y;>yc$=06ffzQB|C zu!?wQDqE2+;S{D!eEBrDyG`t69Gf-cQo^b|2672%t{flyavYm3`plm^WZVR^)OzSx zi@HhIPUtJ2 z{h% zW3LH(8xFrCl#kc`S z>?aUNzN^C+L`+^f&M-Z3ysu-E_xg*0{g8N=pbh(l(avWGN!0Mnlm zW5ID&L%ZetuMr)ij`@*u_6!lb<{#SOhXj@9`Wt9wV;s$6PPFUb1fz-R5<%OGX>uNd zd;|h)0EfS^?ld(IbByz3-w0-MXazi~)wQmUCd_^+A!Sy4rHy_kcEab)4#M!@)Oi5& zz28kRc0=BPILs}?r+zqn5HyG3-)0rYSf>Z$4QeNIc94%-86AKmDR96%1PaGL4pIAx?2}CQQ=lpS|>V z|A&;aEx-k)Dr$9D7i8FL`-1-djqiOsedjB0f)~OT!7SCzHd5^r zTYQ)U8L4t<41$)Ub$Q=2Mx01O7S)ipuiZOLD@&Nyz%)Vb4yhxY4rYfO zaGKOH4%YqPN*{njLf$ka{2_QxaSZP+v3`aR3K}n+lUUF9p{2U1^Tq4eAtDdbT4gQu zASpk`$6$cARxm~BDILLJ8q*b<_q`ST9Cp#LY%_o8Q|5I^Vjah9oF3>C7+m_urN4I2 z1FgmlC(9k9O{<>Z(Atf-9iq90BfUNjhxP5Wt)Peaw9`X3z^@G2V7hX3E-hR`P{g4Y zt_L3AFhD&65Xoa4ReW^3lh(GMq^BFp(5OhtbrL4ZjAO^c7zT)kt1v`qcOSovD58lU zI%ftTeknMwS2=AB1qFnD7dTYI5UT+PZa^C{Ki1Y((gEgc;&cGN1T|Azk2wf})9osa z&3L|vP*j z_^H5jc(R)AZ#{xm8Nn3lBK_0<^t!Yal<^J20xi9fhgO+Wb4)pUy!#k6M~0l{vf?NpKU zU-M6{hnko;d&jxHQ*3nCH}UA^q!JhYgBOP=o)dZ!01)*OB&72qTP&h}yZO4?DFsuy zphP1ih%T-WBQBC5T{EkV<;sM%b@2((C6@pRF*f!vCv9?|xOPum3$_-*pyiku< zdm|2l!1Ux3w02t}KQIcbABRe`ho2!`YUL5;Y$`<#p6S04#Jlgs0~u|jv0T#J_#1q|z5$=Jm?`W@6gRqhJ!H%<`l7mZ%KEg{32id(?sA z(6UV@d~KXW`GH3MP`?sH^LPbLsV7w8@y=%w(xh#__a=juVHbbaLA&|NZMEItMR@Ih zwN19qY!Z|~*!shJo|`WE$T0&h>?ivu`kD8U?pf(k2){DS)@b|55@p!d-0zN&c$-nm z`j9nP4FqOqyh_R{NlvU(;38|e-zyPZD5hRxd2uY>IF|SyA z3eHK!Nl4P8Z)`uC9_^9&p%^;4RWm;uI~MeIN74F@woN+*65y z21mMO%1GKUeNenQF&}WCKF4sLbHXT|tn~Ri${?G+{m4Yh95@#O2Fv8b&-|PMoG%gv zO>iVm&3B5aOC2)-O$D3}=SVHFzy(ei0pmcjx7bDli5D@K(NVVtBC$^N6z6p}rV-sp z7oERA$p<1=qLZ<82@&flZ2F;TCS~mFgBm_=;83Ep-Xs=aHvz{vc?d5S(jXm&aVTM% zUfr((9diQh!4ZV0bHqMV!WaFDU3~kZ>2wW|R=}Mx2Df&qI7C*$1kR{KM)R?6<}fLE z|CRUCSHJj;G>P;(4c5>W5XEK+!Wa8(j}|t*?1 zItGxk5=Rjt|FSuT9X8IeXux+4R+wiij7=O*3FvX_>Lf%>2XPq>n5T>-aDKSMgvTK{ zo6kANShrlCA%ZW9_y?fD7(h#K3}bf8_@7^VCH>pq|1F$+Ptp!{< z-9&@21C4=M4GdWgMypP=mTrzIEj#K{g|1=7TqXI)L+KDqDdh52b-Uz zUdGBOk&YS=WgUzwnK=7Qr|-op*TYI+oA{5;({F$K&2;n1SbFou%jqV56|09hgwozJ zLH#GEXVSvNJof*LBl@*C%%KTHtSLcoR=0>*xmyR$fi%6a$ef`MV2osV49wpJGt}f5 zVxztEL4-^2>-cT79V0VEbSCgUX!IKxh`GMx@%_ zkkJCK_>43;DNJS`zB%pAK;Fw%-)6Hg`HP znSpq&Z4v`?mG%9|otU|THU%>w=av>A9(!FxCPL!{cs|VBb zZE$doxoJPn$S~JvoVJK>+si3x&Sj4;M%M;|4}%OG1DGLgJi(WQh*(BV()4HVX`Mp~ zWHiq?7WfrT7Fz&+XX(l6GLd|);JYy&C!XECy9FVSc`Mv*gV?4kPd4cr4({k3VI09g z_bjAuP5+(rO6e={?|lU`wO6Ooy}L{4r$6~PUAr}xCZ|Ty(#Adds5hNKU+-uHhn2&eNa%aE&7X8|mG5=F-N-0pW-UHY%n0EJr)9IsbpITzorg zt2!U@0e~YmgRSi<-24i}LGILUII|vt8+S94wUxSL!Z>LQsK5+dA15`@_*0}Nn*CUg zHkq+Fx9g*P2?Vei+5^T0gGxs`HVb!Eu3!vc9zv?=?BIV8Rq-}&Dsk(J_)v#w_~nGp zM1*Do{^L2%g7}JG3`I8nU(GVw`S zY_?XT)0j43oos5Rm6-GRMVk|wzBQ58#Bnf!Y1rt5!z(sd?jr?%m_|I!Fvvi};wF4N z6>XO5`%D0|VW(^hAb}frW^Mxelq*reUI4`=t?lL<5?C^{+be+>gumrlknr(OLWa_~ zN3KL~3y^Mb3r7&g!i0n~ZmT<;BkAwx%3OF@sK&{KSwNQCvu#at~ zQY$gyFXu<}FZJZplaNMfj>4FPgd+v`2NppNA|D{EBg0QAG(P?bJnzJxzwa(&6P$Zx3~YB0_>>*7g~<~*;G34^7TG@T z1>XL3>;=;U{Jt`!eVzM;dvQV9Jeut%QrJEo3!k+b`C$mcp@cMWQYp{q5JV(%LHtve zHs@qyjv|ki07ZJ|n2os$3~EIrlASlsQQK_E*~Er*3j1I;MPYRTGL5Y91q6)go;L}g z=s?X1B?M)pi;7^88s+27O>JG&Lme?s=~D}Ec~=OWJJugOXAG$wi1zX-8VqP|y!Kt7 zACtK#F`QV$)ahGIJ8YI93#!)HpA`s@p3*`y?@0qNn6htH}ta_8;hiyqi)~( z$r65WO+Bs&Mw^lFSNhik?^yhzbw;9(^zj7aUx{;`kdHY96QKEm^~s2Ksr>*p>ynVS z;m9({(*Rcxctk&u5fxVFq4UzYP{2`D0@qlwn#_v#YMrA-A*A~I337q0|0NE#ovhh4 zg6UsC6rDlfcM+LoU>K>MhxC9@1zDDH#=h2ofR!=S+11D-!J&a0*9T2HE-@L|d~%%D zo_GxH4rUBU{HjC>LK5H0sYu^?k)ZhyI_r&CS0^ zp!=?g%!NY;unUelJ#R42c&{l!9|XVY&*{%1cq$Y2u|$ybPRyHr{oB8hzIgLyT3N%< zb7~^}&ToDPyK(~EKiY;_DyPMT1stb$L&Dlmp#A}T62h(@VrqXKJ8}r+*%vVD7?YtP zst|Z&&{3DGaAw?V5Res|V-GfKm~>#?0dpneq|aj?RtJ5z_j5w-2r1*Aq3^t&Rz&Qh@>9q$H#$B%m44|1hQz0fwizW4<~tqwC+@cqzSPHiy#;puhK zsfTEykR_*{>MX=N4#uNb!2{-FNa5=+adk9JWlUxUS+6CWE0|A}VaV$+NpmC9nD!6~ zmHApGRBQ zKK}6o;J+XqKi^m>p&vXBkcP4BnZdb=3^qFS9%h)RIfe2MUyX2LWRMX_n|GUs=LBOQ}N(|}AUvG7>0o3(AF z9-A&PCLxnI4S@0)dF7sC1CtOSpBsNj)-9MT2nAs9r|Rd}kl6`IZSxp}z7tcxO8Ehw zFSv3!5{MEpqzDPOwdHq~Wf}>?{Eza?2wvQ?g~iJa&mt{HoQW6f^E>L}mxQA2c9W85 ziyjf^xFA}1A4EDZ01^F0`9Ubr=150b)?-exwFx0o1yBJe@K7}Pb}?yuys8n^m!Ke<5l!MW#@WxpPA3`8qXl=B%B;;nQ#RTaSB2tHRGRn<6mZ; z=nL?x_RKt%6?5fta71d$h}rB+miwT`edb7&C*+GjJtQ8hDM8#j^|%V*oq| zuP`{$LMuiav~dq6F8Vr;J8^0Iqp#_cXWEs(lf5C1{ce@#_~u@FZcSo9dYHmQ+o{(+ z6z5jaCZNSfN5_BkNB{k=rPMwnK=|`q6Mq?}3IwdqvF`YA1GS%RcOF-EvGp_eC|^B| z^>wjM0TBch8zz#d`3W|0)bs;5{|*g%jy?Y=^AZB@2=#R*>hUuqSZdH?3J{Gvl3=y0ZlbsJ5VO2MpUNzrLfkYs0`?S!K(j)n-S!E< zMIS1mu0fP}>JS)UKZBUTeC-&5zjA~M9`h6ZhA)Y3V-SKG_6VyGT?Y-qV_#}_3{!zp zryEVe;OIEgb+zxD_=SI*V`QJmK{6T3u>GYerEa9F_)1A=q_6S~1k+KIisr$x$et9kVN1mm>#V3yH|18g0F zxz35Di1J~ckZhw-sU6f~4byz93%aL^2#V_`8)>Vt8j^O{wY0Il4y{BuM~L|Ov1!_b z)&eI8?Z2I?lV~gSm)XX~y}F6CyQOIf0msB%dUS9%eYSZg9Wm}?D4m-!XZ@I_oFHvC z`b=>Mh5+Ay0-C4u>Ogw>a1UYZZkQRJ!`$oxuO??B1bI2v(Uud~4tHg)dLaB5FG#WF zD$l{U4(5!|a1`z7M;WKrk3HviF2DUL738E3gtdFKQ>|DW_H}IZx6^mOwFsA_4}n+S0AjWL+^=}`ulgs7pk^lh8LR#lW&x`4bq3V4 zUzO;CJNMJ}>Z6Dq+eaf*k4pT*5Hi47piw`aX-*KFmFl4kSDe-Z91_~L$E&^J$4XB+ zH+?7+UbBHgOh(fK11zdRWaLfJjX*-*bpDGC)eX}^`J<{i%Ho-qd!rk)usplxw}gb7 ztqej)drfDtkw?PW+-`umF3S)%LCpB3RlfGZmJx9COfwlv=>SH+4!DFz0ws#HeNi%H zvFW&hSeAsGWqF^8d(x312m(taU)0~uPdYF07Y4Z}*+dr*-fk+URrt#l=_P zE6UA};)#R6H?4Y1Z~0M&&sy-tIA~!b#*&>!)?6Roqzh)$@nZ4Ro!e^pl*;cIaE{ql zUCtVra0f#4Z>z3czv-NtZe}LQ<)~S4p){muFDiJ)uVD>!7`ADmDPmpo!+ee}EA-2a zEc%V-!Xik)jC)V{w!uCT*65oS+>V3Lj8OOh_4E^QWTM=iW=BheTP`EKVB=C+1)XNy24B96b%rC#mOd4^< z92G!6aoMlJ?YI498vBZ$Ns)=P%bSHj>}Q4Oug`0A|W3Wx|XK7m;BoR3^tb{AvY`JNKvqViCPT z7ZSxH$I12~sdS7Sb?t|#M1@JL>K5`qy>YsMPGIPAs-d=TPFBHd^}j$=boP9QN|T3Sheapz;qI4~E&`B3eL znzZN!%$GD_5-tS(Gy_CKj9r}ROHfXu9M3y6osZHrtgVi@OGG1i{Aek4U~i5?MQUtg zzQ+--RTz)%-VWfMqAv)`jaiE^?UkY)Vz1uBl)~eQtN8zQLiG0QOv|`NSpor0=u{kd z%fmC+FyCU1$Q-SuBhD{4$7JY@7jC2%7st{HQIMu*Zl>?P_g;D%M^So*wI6$M9k9`g zT!bbO3xbiuAUtJG+#4K^=y`}(1bsn0V=!1oDXU@Hq9sA+DNgql`UEqL3(RlMF(*93 zI-vvq1sOR;UmERQNH@l&FwrquPG@>$@kV+Fr(&hmOM4s$hLpYsf_Gwkj59=F?1<{p zL?ffcf+i!UI2!j4GOq@Rx&*U^3W9Nj2AR4GO8oolIH&TAqj)bFSN%A2J0I@+#ddml ze;Y;({~{y1Fn9M4Fss1r8&?LJ|i^_ zN%Q#N65)5M9G14Uw$Aaj%o}`0E*LvUM9S%b;C<urwCokU7rOXJi1X#yXNDMD#Z%oaI*7{V5ZTbXIcmrH?+*D24_+A(AI;nJfBWh>o@5+a>r26c(% z>zHuUH3;<)m|e%7;Y&;X_~+2)Mu(CS@nE-on8fB&+RBmJCy!3jou96zO_;|w-+w7x zzXB7&esO@)@h;~dXlkeVl3Ijy%-&Aw{psQ3!}QtRD&?0L^M>msvJ_!tPn!5Vpw&n) z1YKx?uD>vmMy4@mn*y549RS5o;SX;AgWFl^O~_5893MSBP5=8pzC&EnTKdA<*Wi?n z(zm~k_coJ7LN0og<$_ZAlKd_%8L}kg>Fy zKKSz=qejF|54DpE-!b-T4I%)XFoCtB48jNwU-KYO@KdUDqPmEMT@q49L+vu13_&pQ zEH*s=Im5I_ID;r5y$gi|ml8fVG|P}!c5`&o$;6s%W}S#s6wq7`$LvGw$T~H4huREg@n;`ad|_r!q$GosLBWx{#G(}H11=2 zr-N;aP-P;*QoRqN0Ne<>DQ%0FM4dZrw4bzgcMu0HAl#H$uLCzRW-P1K&Ne7PP*F_~ z3{iT9#&!kl;6#o%6L>Cg+?!rvnffDaX4L0HVa-IDEezr^AoUdl0=AA42-X#Z2xTZ0 z6F-@FvEBai1Kd<(6LTj~aO=5niAx}f^g-Crt|$`%s4WQ=9`i>H5>b{#nG&I*R;iv+ z{0=iH z(nw?hWsFfBTtgEUlUZAK%Th?7ia$(~D8zAVS_`x-rWcnIkG93AEIu=x{pP()Rm^o@?WV6i zBx$N_XuWREjAv8CmU{%V0mHC`$qoJ22~p6`!93mA=c;+sw8L}VC0I`(Zn`v7rcGz6 z>)9A|Y>vL?nfRckJY(!}7ZH!>S0#l7$_r*uCRrHiHbW5xib!nvaRICAS!f__YY_3W zrn#R*uxnFj?3h0a5&$#0S_!)Kdj3flCKAq7Fu=FR9ZM`)4|w4dbCSAZoPy8jdtsu? zSQB_=$C)RD5_T+QWhQ{Wj`aXMkib5YnD@Q@0tzhD(1o^+GMd2UTeecVhJNGODR{WR zxy`Z_9B9|A=?@Z-U;>(K_Db&jS638Go3zcBQJ@j@15&tiPvk{PiFwck>1}6U-xvh+ zApM5pp5lk9Lp&d6ef4GUHLE^M`qGbRZn4q!IlV@UTirC}J=`)-JHGQ!DV{DR}N5!=I z^azuoer&cqhX*M=?g(cbIMzwjkz#QILVGbBnVZ=5(|a)3pbXGgmXK@{4M^#7ClXfo z%u}of3LM1rAN>1&Cw=wp*D&W$TJ5<3AVud@_GKbzQm@_18iymYQdW|F6tY>%)+p>z6S^~hlNL*Si}exLA} zbaXJJLtbY=HQ1*chlpQB}_i>521NgPNdzZ&zq+(HJA~d;WyI6 z(f1fhtLu<4>oozX?s3=|5tc@XDepdEe0dpw&eJ)3iaB_zpMHj6tdXw?GZC@!iPCiM z10wl!%vrM5L@uI*tNUr?Q$vf6rQiPA-((H0rR8nLGn$?_cAYa6 z&LEBrBdf21>sW*}MPMP#MMXHtGJYX;8?0wa>Pwg-T|%61;1l38nbkU-azNjCsSO#x z#ASA70!H!>4OJB(GEA}B1m<)d=A+z)lPweFtfRvEY`AIs1XlO(MS;o1*;o_2%8tyh zObcVglapoOp+6Iv4a4sCaL&R$z8RXD87XLUYlj2N6h3DY^K*#U!)^R3U>e4*jHd&% zR-N4!n7egzK0$>!+MBLUzM7^-UjTNPI1tKEAeKMoaGfP0`c%>;r%P|%ucW{H@UwKu zL4KptbI`_^LJ{F^a*9x-XlU-;Ur7^v^Y}H@*@p-<%U*auKRSncVXR&E`?`l%BQSx2 zsh%Dm=49^2Xdmh@0wd{-FHNUwH`q^p_xJzP+d8gQuub~OXUFN!{?7-5nmSCczI}y- zrYC*%-QgfoUAALa@ca4A=kDSKe`0%R|M;b}`K=oQh~@1u9qu;gMzVpo^nszR*4NXA zKly3eSi#eOeU-(A@Lt5YJ;I^mnE1BkL12O#RV^D*DGI${AnC;)n>veg96-e6b7RUx zAZZ-!DgA?AU-HX|!W!W8&j`n;IJ&E=6Nh*RrjQ?`D94njL5VsxAYK-(N4VCNbCsmq1Tp$1_>J~wa) zD@mrDE~@1o_*n&=-*<3dR!cB;ipkts%S}o}cA`LXzl2JJpt(Hrh-*!MV2b3s$rTaKoNWfXMfEmDo()`rJ`B%3+CVjTE`PjD~f@twLZDi_CLYn9Wd!=B4sLi#bFz_;>k%H&wwp(L5*TO`j2nnb9@QB zcnkYJCaPZlvAdX$dYlYWlG=AjDTZ3lTp&~~NgIT=2s&^;KE-F`j`|&AfeY%&^$=dA zh)4}l)fC554zW9ov5WLVktyPeW?Md>>CtAmn4qSi{2SQ+XgA=vapCHMf>j;i?pH_7 zW!R%$xQskQ#fOU&m=4^l8gSfjfm4IY$H6W94Erwe5aUiPL1ECo=r7>b1I{vXS@fs9 zCZN8vY|67wOyi;@UIRB`9Ng0BVe1hyo-~Fk;z5*aAKHJ`r__K(iMpW7%@+$+>-Tx9 zPKyoN+&*cw*0hcg7cN^M^vnp zkBoQikBI9yhzRunvGpkwuFH=OekJU!9QVh>-89x>1CjY_uYWPU^3q~@d1ir7IOFNj zI`%3@%W1v&B<*N(!rU@g``z`soQ(AVjt$X2hV|N%EZtj94<0{-0@J<*vH6}hT#~TH zi&T{cR|(g1S+}!2e%F|-j1`R0G5bhGQ~<^*KOuBc8S(HN?-3lIlc#DN(|V5hdmjo8 zjYPy>4dZ`Usrzep(%$J7l-U84Hg-xYOt>E2UmKo-1364HHz(6q-+42=^MzN^ zON_78%4Z?T)$VBrN6vF7ZS!@p_IlFVQ0S;qibP_6^@$9k( zCy0Lowl1t&NLX&ZypT>ix6<8>yM#?LeyuT9nTpufRF4S9!@Libexnza4$cn{Y-h(f zSrAHUXPIM{H)`o6)Fm%npGh4<+iUvj*D&}GCxPHR-&z=IAxuohFf3R$Kj%a zie>~mC=L+7LF60`tbVqM-5T5+f`b!0!RN|fOK;BoDvUcZk(r0nlN0m*nj} z>fDvIjYEp|P)Y=Lm`g=C(mG>rm-|Z&18Bk>OIV(w#;ERc#4|(e#n%^sxgXE{gHQp$ zGtu9)3u1P0>UHKR5&&@9KitRs7)lR6Sx#Fk9QDk8)`Nt=uwb2t{Chcs(pa;q_y)1F z>qLd};K6d5F5ij>22Zx0FjsogJf{HbnO$QZpX0c2RO?UG)d6g~kY0^)kjMasUTl1d zDg(*ZCC(HB*wz2nfB1*DmpAv*Kl{Hn)4%*@KTqq+d+F9|)9IVvU4V^1Ps75gc%dy? zauNKiue^l+@_SZguJ&6yTB_7g1d8JO*#Kp!w|4;^1aHF@J-l--Z7e@PSIFW3OKAL~ z8jJBBi=ldz6KsKe?j-YE>HVD@1`j$I3^VoC(t|{?lWd-w6#r?16OQ5U1|u-Q;l^WN zW_A2-WchI>$|N2$v0+MgmU#PY)6C`48uDqA(|#Y%*$ni?4Uu`KEAxgK1PSFaKybo} zvSRBatppcz7Ly`&w0I|!zi^Sx^tRPa_W70b3r2|Gcqi6fv;#iwW67~>5=mqQfzv9$ z4akp|#R^Mo%KV6hA3O(sc$XJ0a8H_NTnXj1@tbih&OZ+y^^0HX4#JmvE0W+7PV?|5 z>W}=wC^A~DXqOq^Zu-$0@dw#MKWC>1vx}Qtj`c^mJ{E7*W!tPfE^*_SpcbCxY!ZlU z)DVe*J6ApKsE)5(uIV(!k3$~BBWWbE6feaIOD))fm}TL0YeTKuWbrAq6EeVi@CpTyQNDwdIfGt8)fi22uAu* zHC}F;-%)JufW7-j_%jN0e74Zt={d$FCxmr#X6{xzG!zY9X27h^a^<@F1+|*Cz02 zMDCc$IJPrg{XE0~+aT&N0JwA72nc~I^6?@F(^fNi&W289wz`1PwsTX4MR+CT-Nnu3 zIF5NrTZBtIX#+!Ix{|h&Kh^`D^M?C82kmz+(}w6Cyo6Z3^+H^jm)sVht!>Qx{Dg5J zlL8JdfycI3Mx+Ct@IsDpEu+CZ%F?+D>0@pONr5iP&wU4Mh&(0EEk`wh1V=PJ8fogB z$2819>bF7+?IFC5FREeqf6`G6hQTp>gqTwi?B@0wCkSByOk*6=O2a&9t`7Bs=?3*+ z*U>+b#_&3oSUG?=ta}tLL}mfeo?$x<5k=P7=slkGVBfGJ1~umhU>PPp%NtK%aEN<2 zH5YhkV6$?7-NHU%^3w~{L2T~b5`Zx%VCp(C+GYP^#6tLcNCw0orz#yIUeutuS%pxd z4f_~DZ)`3hlD`A-zDWZhEP%6<<7T@F3R*-&I6k(Ru3UMMxPsGaMG)0O7}2mJPTV_% zNoe9VS{gscICz};2*-s{KiK6SqO|u~`u*Sjd+9s3-cR=)Zlyo^=tDe;Nd@uRs~DN~ zUO-s&lrZi_EO`k5=c!~;hT5xmJnS0#a5uI|{fKnCu|trdk}#9NtP*p)u}{0Tg`wXJ ztgiis&L8`zJ-WIEIq~S7G=%r?Ha0c;1a;p-Rd7&)m@erAh|Lj(-Ng%HvCtzhBVV*F$?n9+;fgR7n)Jy5*2}QXOZqOOAk5H)bAdN@aTQsT(qG)V580LtQ z8BeG_Iys8?1;QS^{Q7J3*CZ+nh*}s5kj|VxN~;fd)9O9cPB02%V}u*RwrUN+wGPcQ zz*s4qm(wzI*#$8(hvrkdv-%+2dGv_TjUU3C62^!1MZ2WK3Tq8^GmdM81YM{KO0>H? zMr>1vdj$eb)IpvK*G+hsp;9T`{rP3_B(5 zZh9D(mMwE#**U;N7*!4o)hTMV6NDKakl~tkfi22|dv{X}^-Kxp24l_^iFrCYjspg2 zgc|B1#rnOdE2hV%f?;9mu>WyPHOKK9L$w?GHMUe#OET_^(cWfSWe#j_VoQgE!8v1b zfKWG%zZ$e;gYZB#oGN4rx|pj)PKMj$1i7bAcOtBm>ueLX#OnG}#QHL{WYs3CF`T|| z>r2p$sLNQdUwYwcy1-sX&v9X#q+`w|EE7rKmN0iQ)PKSjb6nzjPt!EdNhb|iqD??&i__#?iX}n@{WDy8k3f{RPyNOC-`7RT%5|X5k1)O(044l77;-XS(hBNo6FrgKOeWx+#CqJd+HJb1gZE%1(nKY+&6Wqz z)k+(6dQYln_pZuOCwKrJUdKCr@YIBYpP&06@^KUJ0%v$hS^^pUxQUsk?a3Mjx~=>< zO$+DojC=5uiAxAeh^6rMP5dESGka8B_JeIA+w%cn4LVxI|8Ff@$4z(wSFK87sK zw#p!cZHf<~suu2iPnqIYhTU)Nk*GQ9Fu8R(j>8k4$z+|v79Yl7hAlsr5imj6{!ou3 zjdR0~W5apmnD){eO9T51ALEf{p}C-4{)>7j-!a|GCQ?LNJ2O^JSLWC>32!1pXP7Su z`XKsY5PGm$RZ?%cGCARy%A9u|6!B7vUIs3474|9+5LJjr;}m;Trp$`%)qz2Xf=Ct~ z(+5O(j5}u7d2(O!-1lt1Knk-Y^!$z-;LUr-X7rbMbWLCkI^M;1FxbEsI5rdMwQ&d( zQ@6jZNCv8bolUH3NR?wv0uF74Kpfu?k1!{fwjDz7g7TwR$Q~!AN&JINArv_K$`u59 zG1BD&26zEbw$ib8$+)p>rTya4zK*xRB%b^oqb2f4NO2+@MEV(`Tpa;y(>Wf7-4L%B zt^@&JJ;I=#eaAj3axCi*crPJY^R&#}8a5mdfCVJ7lVexH7G?`O1|2yR6*eW@Jr#<6 z?m&;P07|=yLxP--Vg$a5q`FL47(>|^dgK@fgfhXsFOe!Yu+!KlD0%%9=^kSotKh&F zls|nyTNFPNv|sN($M4=zCStlf!KIjjc&1@7?;RJGQ6Zk_8RZ=jL1Sxu8G>K~0us-3 zoKO&rVvmOL^u)C^IeR0`LVS*tv8#dLn3&~crI}IMF@Qr0f!NXLRM9jH4c$ur;cxt2 zdhf=o>8E$@q<{Rwe}NDU5BIriX>Mj7d;$sdKXF)GhdecLS{Q`!E5H!QyiL#I{f_fZu`rlsU<9UyMtP476PixFijV8n zw7P*IKH4N5zudQv4r@rmF$p0q>j8{&Kkc2ti6H@VKjY|>wrLxFhE|K_vc?wDEcIOF zw63kSeF*s-4E`6W69!>_4g5eevYyF|ZtbjL7yl@&R?)mMx1VlrLR6#OB81ic5r;^y zhFqJz8hv%zxtqqA<6t1P4xJ!q;C!Cb&os_wjSxqSnK}nOl!nzVar*7YXyPj8>FTQ^ z;gzjW2Mncw?&0*t)XRjIyG$#!zf7;Za}6Q<7t_~&?dA04?_Ez{`qs_##rNkqO%Km$ z+SCcdIZm6qU?wiR;53_kY4sxn=)~ilcwwBFy05W5oH7>CFyZk4MZrm7Faz}2(xZd4 z{^XE7Y>e^Qf!*Dcn74)3U;CZg8yuNeAXeJ>um27QbV&w+tV}GqJ?QJ6js2 zEx>2m_9xy`sZzI0pk-oIVo&qh*w{@(xiTOwl2pY1s3bE&+y;JWo5X{Jk(-v^ZM<8A zZSzvnXm&UCP?1Tb1cdaV3$kefx4=z(Z6aOxEX3cI6`Pdpv6(bl$*@VHZ9K{h2sflw zf*%A>yeCZn;}722CsC%GZ>zTfP*6PLRs(OxhUwZ&SSF_OII|6H7)=v6i*`is2QKV? z&=z>qB**H7)$!sfHzq#|CK;?#MtFJA@6L~qjFQ3j{IF@Eq|gNJatr1f4*Xz z1w!nyJQjpc{Fpyq{HCpz7G=^F?xM^*PIJ0;z2^6_JWo;*k-4v;6ND&mVVX8E+Gd!{ z6LW)i=CS{Bc^M|5a9~86cpr6IJ?*w0)5bIL#bY;zw#LtU5$bc-0q2PAbj-LJ8m>YO zj!r|t+_tbJV1U%GB%F%o26@fN z+6RyHbsqGwIdxfI;96ooZ)VlKN|*tSk0X_(O%0^B&IQYi&^QV=9Mm*MT*$11uGLkAq^7ev`#$7I;KciS>Zv7iPSy>F4SU?Fghg|0>jwV2~*?xfnXR

    kI%J?Y~F zCO!5Yn;L1$6kHOx|CLwYO<()USJI1DuBNLqv+14J33h(%8uJ4)3mFF_z?;mGIz*r$ zg!WG`>ZXhyaMEq4A*Apg3#`X>cQ9zcRY6<9x~gzWBFuvVBpOSjm>ubj41Sw+zRxiO zQd@u~LvySWrhT0djv8l6NEfHh(xk*TQf|u{oWy$=jlg*tN-vJxOtW~$E{sg2nW0kp zn!VK55%r>`-C|&Jw2P&mLY`c6UVx$x%tHzPSEPb z)T}R!%n+1*EF1po`D`5K3%p)sc2967IERTTau`ZehaBR}e($T_O1Bnf>2sJv%)2g$ znlLh6hOxkM13~ZlGEN}$qoyX846PgXU=a_%3sc zF@>YRIp#_;xm}@G6z_@#ur#G>6@Oj z@wBzOn*RAu{wZOBK28Uit{vl{GsNj+(`f33IoYq#{WLYYcGA?<0kj!?(6|c1D-iWX zOaOaByLk66(0szcP0zlZzW&bFiErDL9zMMj^RS@y24Pkg+PnLo-U(B@Lqc+1VzPC@ z;Q@yS1F7(ZcmJ*1^NUmI`+w`Lbn_;Q51ljsM{;#ykY(D-jiUXD2mh}>ZW+FBU(fH` zccx$|W-=283hvVZ=fHO}cB}OWM}#Gq`e19dj*S(kP)rbe>+9e7YWl6;{&(PlzC%o_ zx6+HRyb4!01$V~ALVi7W>v)KGkeBQGj{P!wMwF?q%8_#bSe^$GO~#Ldws*6V1!=T|_h)adQ)H7xs|G zavy1U7xayPxY}j-Iy^9`tNlEPAJg!RVxAK@lr2H-N3XFUAn<9F+qD0}rb+7JBJW&Z}zLK&)}Ngn07AyZ6T zVc#B)g+bi$g8u;z`Fs{Q48Sv-G6F$7`ksG+sf0K8OD4#e z7&LSA8F{QvtXhj>!+Gp)$AyfT-52)$tk#wh?F`s*KdYIMNYW?6;N(3JsW~0Q0UL?^ z7=8kl&F6x($$={F``IWbkp5p^oJHa?6rN!c83uWe`3Ro%-tDJcPcjTRd6wI+`41)- zlwSmfE*Jx2w25ce41hquLLhE7xuGG0cDvRf(II~qS_9W4=N||PBEPLg>LR_2 zh{GpqpGbpr%q1pAUEoE)L_YonGynh4^`5b@X6c>ZDHe-WB#SxcOWqtiPHfHeNHZFY zy|n-@FxQ{p#9`*t+gxd?u;~=nV#;R&i8iT z+cz=iVyGfn%>MnJm(|^prMj17)m!g-;&aaVKbgAXe!ucrKS0_|jE^Bq9G?IXthEzD zeH`Phi4c@)oIOa-i`d~zQT(ItY~4`F*e9ASdg7#w0}Y>1(v>2ixb^cRn4#FBZkSGCZFf!^*^BHT$A zDQE?6`&HAtcB75JbPi^)kT)wDfV6u&U7LO}eeLGGbPesAf)H)rmzVJ%1!zhe`f5~Z z+w#^f{bT)*oM98-lWWE#h%t3^;WG{o?Qd zV!Mc9eH$c=8GGJleqUUURnhajFTgK{;P(f8pGc>V0VJ;8rgm+;LYy&#~ z@tZ$R3&b?nE8Aog3`sOtJtLLC9JGjISBN>UiUB z<9)laf%OJ+*^P-5IyN%G;Ypsy1HA~>h;}2-xWN6hh5-L$syims*8oBofxlXaR+!2? z$~3#)0d1<`VQ-eZ4j785!5e92{6^}>6pqabfhN8p2-J+{J2r5cZGt@l>y!Ep-tF{7 z<}9`k#xdW4mp|UtCE}4bVM~vp5^cbnm~}ZmI*=~)Pr`WC(&mZ6X3M0Vd-!S)il^vW zz^A4M+U(`**f_z~9!dT1#2}`ly=k~@gk%L`%+@q@3$I;LLZ>s*^}1)UuGUvq(H6pB z;Yq%Kgf}>6mLZDD#EdUHZ7R0m@tFr`F$6bTV7`s#v@HnPz;7a1tX0Fw=HVf%7a+#osX z+0ZiJ5P*{J)GCBMo~My7RIR%A$LLY%3HHqaL~S$k5n#+O8J{qdip?k+|3;ZyineEs zmN8+;0x|?*wo!+sGy3hg1S4W`jDcg*Ot}u)=VM4qjMFFZ64N`LN@Qca9AB?-&{(eH z;rwLtDDdIMuK=TtCm$WBU`XOy$4ZlktdEY(5o4)KzMF`z82{)2oV`*|FUH(hcUEGU zad7C{Pd8kF-shEbE$EVaqs`PIgBo-Y4`h9?Sj!IDiVrfgwB4)FrERoF!K84>n*FYs z4Kf(o3wm{a91C9!dT!9YBkp%uyDcCaBY%HB$IyXgI{~&^Anh|5b7}vwG4mE%9R1*0 z+#?#JYJt80J;HZ!L8k4LYaWP>f!{?NxIOBnCmv!^qJ4{Twf>7W&F9$ctdY867HPZp zNxRK&Zh!DUrj*$N0Fn+k+|Sq>46S6lm1tXUfN0vGWLR+DgY>Q+$3Zg>NuNsmRrS*8 z04L|ncM&O;06`|tbuJafJW`%5(d5U4V7r3B328m`$s7Y1+p$l#AI6OIAP(Dl`fcEC zGwsNDyQW(L9K&F+SISh_%~R~y1B6qPL94C9J`2##0VM*HFg2oFTFUn^b3Dgb61qjn zuoCW|1Eu`7{$kuHC;+5LlSA5Cae0gyodU2DO31Wi*f&ldpLGHR|DGlyl8 zbm+pdv3-`uI?!p=esBe8aXI>FI?iJx-S$Q1u!zJhm>SBD%vl$G^m{>23xU*Qr=VO1 zT;|AL>TKCe%GG0l`8hEeb4b22dc*0jXPk3`^F|=gxK}ah*hY{Pnl@>IbF+)k8)tl1 zCrr=g)||tE?J%*b*+bR{psvFo)-Qla7(xzIx)UZd{*@MDP67Vf=Ry$B08&~QjT7wS z>zE6TAtBuZ1RfdNlEoy@cTWqzMo5wzrW4&gICCO>URuKG5&rNQ>s?#SzyXEJP&Cv1T}>z8=D3jimi9 zeHpgXHkt3zlSArUgmFR}(TP-kf2#>i%R*XMUIM&g z3WFA2P}4>HO6Rl=Gv#RsyM4aR`l!ZLI$veD^oF=^(ui!&aCjekjb=K4v_J)Omt0 z0&?}#yn^JO`8>hy|IOu}rg!h(4@cka%1&zS;>==gAD?q4+6T{}Dls_gO)zPgF!gry zvvvWkOM9sokljLJ24+1-!*kc&q6uKqH{6kKjlF{Ag|Ll;j_K${i-5zYd#!#!`$&AD z5||Z3AZ-FjPg4IlJ|TeBW=ybJ1&WmHxGAt~!91)?Xt8k6#^(JDbGCMXD}L{!D%L~< zC=SyVX{Oz$(CV}n0iYcnXaw*{sbaPSO{j7a{xo4uF46#tqI@Tw?ke7u`DnwObV*B^a@LT`?|l16dR|9I)0^x-^q z@MN$nwl$@J5q&0DmxKs1blZW+TF?sb;^17wCufJXsZ)A8^wSDM(ArrgX$8#t+61%2 z8rmZ?&r{6djy^~u%q8)A=m$Yh*@K#3DA({+*gC;Uz0#A04tv18Fn#O=9lc~EgFy%p&VGUG7E5Y9%O>* zY@rA(C`T~JefC!nW#p5GdK@%-RcE0D z&|?AUaOcLXq%xkO5B`lheMk8oa$fU;^$Av-X}c@NmuV>|SyF1v%j=D?2nS%sBc$uS)=SvT;e%FU-r?_<3-z*fX0S*QH~N^4^^qE zVIt&vO)DHjnJW8XOwIsu9PBZTl;~Lbz1kOTkb^;?4H*!>_@23=xM+uC?7YR`0GL8^ zVgLA1^5nsu#Z;6z0mQPgu{TE-wD}B82}KztAT#4|(_yUmu}n+n3kN#rkz1n=j*Tzx zwM@(QWm{yVtY7-I-E3u;4nQV@fkc~%Hw~q-1 z-K?`Ry^_6RTxqw>%$|h~g5%1bZEWzamyC&wmIe?kxH(%OuZU;LXXz!n!moHXz$xuv zu5!?@z4y9f5ftkn^Il--e0oh<&YVG`=;*uSBh6O#)t?RlRH1BC{Iyxb747KOE3JglTVy7?E_D z(oQ2zB zrgQcz$Ebz0Iys#KZ8K$PmUB+ujqsrvh5LZ*a1sSV@$x+lG#ZlQtze;qkjFM^?vrCL zZo{nT@QCMa9}JN5XdyN=7$a8$W%!Kt&F+1f`6Qr^a&=72$)M40*T1n?3(P%1G?+si zfdPRg2$)}-O-n=!xqKOiT2kUxkyaWj(!EROyQAELphaQTNt#1L;J6y|(#R?Fn9Gw~aaJ zE&-N_(t^??n3{dg`2GE{bUu%n4#DQnyD1MQ#JG!^@;t%+Z+8hW4@21kV`~h`@zG&c ziavwDC``195rb8__d?Hwio0+g|>A*t;R zM-~_r9ZT+sL#Wlp8huE{Vx`RL(?2wJmLwOY_M!!Rf` z3VLR0+WQeYG-HYb!|-%|A>{|@4<bfBCyzOvpx3EU`aAXGYKqF<+9@mh!<5a(Xk~&BGu$MPd%`T=hvqY7uL5px^u^;C` zSgVk3Vk}{ z_#VGJ4d%qr82f+I)gMBO?IcssB&du@RIY7~u^vNc+>H}0nw{zzecOS~@a00vt|sRg zX3}ftB^Xnj*Oys?&xj1BvwpJ@cxWS!BfJ^o+^3t}qgP?Vhz3-^oGH&Nll0@hy zvGQ~|Ej(XNFTZ##4K!a$KmX(T)Y`w7u6$)I;ogb&Gm+qx| z->at&-e;nT!qp3NeukE!z7C^+&(3S#p2FO5nQUk)(C1+K#t&Xjr<-U>jyu!azj!a@ z|HFUq2X7oeWIcGe`|`*XtU!M8&TC(OW&66{`SX`w*%c4j?8R0doB0Cak~Xn-x;EX4 z`sXN(4{`uc`j?aT?i?$pSj1E;E!JNj{{U7 z2oWkVb$mTY{m!Q&!i!_Ub$u^l9_^?9XpemndSyVA@1vts6E|k<;u9rVzJD9tr+hv- zCQ6WOcl6JGc#Xetub}v1jN%?HW_@Lh1us+-V-^EzNwGK6Zr)L!D&nk}!se`T(P!?n zW`AdwFZ#yK8NBT%tr_Q-^;n+!=G2eb>7X2*9@>!=v zu$=Y7dMqouCmUbsC?+rNk9P2!fYTB1o5uVoP0CC)qz11${?_3?>Txb*8hPF^4Rbl_ zqWRH(o9#R9dF~5lD@;-BgN$ajz66ijkT=ToS=$+fMC*7* zxs6w&9>KWlBh~;%je_KWw(rfHB*o`Yaw%dL-yyqeNUlUR@X+Ut{3&$V9{mjz{jCL{rU^I3%Reh*j_$sGR5W+z}$5p zKo6RvLjT%c*LHTua}91xDVg&jAPENDJ~58&RV{)*X@xSioj=Adc>j#(D?Fn)sHQ)D zXE@GC1MIWVh-9F7Otr2V%YIs1S_b^j(_T$ON-!a2ePaG-yJih%?77{TNc7^kxJ9Iy z+093oiDIHaBpH{w_ZP7JHw@pv2sYEhv=6bm5u!FZgSIfwpuwC;m`0S!;T&u|b%ad2 zM7de7tfoWE9ZH7xL$h>BHX{a}@npF8XT~tsnE}MLyOuGKhKc|dX|%e7ITHc@4f{EP zjrb3L<0Z@uI@05vC(IL27$M#{X;zO4*wtbqK8qCb`St=f=`xu{b0HcE3`5Tl4x%_a z=5x2x4`2J&)6D2ZnkSI`Pe1$_=jVsihY5vYJ+Xz~TqHRHIIhT^_u0opA=zCbq$B{T z2PPA#@+nM24(IEMcG9cD)XXj+1zw)Vv=>lD+p+Ic$fnWmu$(I`;DTRI_J;A4o#fO13}gpc(u7b^^P% zb@3iM@?$hA2WSg+v6=71OiYGeKC=mlw1Tt;Dfo)RP4^>m(y3T zz7$%b6Qttv^Jqb6?;-PXP^DjlXfy<-!qH}I=L_epsj{>m8L4{5aan!^-7K6&KeoV+Q(s}$Sema-i7Jd+XP6%xZ*(?XKWc$ zw-I~+)`*f-C8&y9h#_gm#xG?66>}*a-v!w%09-`~?t2FNS8K+^VG=|nEY?^XnwKMe z4_eCU^32UNHQWvJxPpetaRDQ-AB4#pWA4sgl+gq5e&hP>bY*lL^Cki$Y#!puppR^=vJq2`a96ac?b6PI5wZ0ucwb6 zJc-CxM|w{4Ed?cOfm;%A>HF#R*Jal?Xcp%n64F@M{38nikl7{+Lu@bIWKPp|{w3(a69-Q#YnPWc}M-#Jp{3 zg{)&lcH??Gre2zKkKicGzLUd3W^E4BMCRr8-5Y4q_R^;h7KzsRA_+ZUP|9=Vq~`iqR`7VFIjOHJwCE0d{)GxURBETv|AiVz2- zG8R9Yq-~KDEdQe){TpvgjFE$z4VQU}h3L-Yf^2+>w->+urt6J+*cM}xJ#?k9A+Dmf zd;Fx1)TNDubeu*>DQC#Mr;nL3?}I{mK9R zN2#*#3y2uIF#vsUZz~HeI~=#L*;Kt+!68^crt{AWuiZ<1+UCF!dS%!^1M9n;scw#v8V)bqS`QXq7h0E+!+J>aGzk)%_pu|XMC5{Z<{lK>pXjS zyQwOjwQbfTKyjRW+`D~_AMRs%qs?)j7kOzurDW2+c`55vwqf{yZHjUzS5+$?y@`8~ zmQlWAoqeXy7vtbKI%dv`8@qL6;3NARt>Rg)S@st|J1ahGCHGo1<;1i0#R>Ms7hI0E zW}m$?n@h`U*!KGf6~s?~V?HX8^B;xMy{tW18?*K_7@rt_3*~v4qilSrhxgJ})_2ET z3b1MPg|BT1rN>-YwY#OCoqPX-%g&wyhi{l$&XZD3i9wi}twpWIeK6`ollhUJFimG>1cy_V7&}P}knk zhLjEGN~A5?gB$6L76AG*VL<|k36UV*LpqQ2ET@W>jlT^*Er50dcYK8CnmBB;2ezVJ zXhG82Xp^DlGzHQ@v>mCLG3}*qrRU}oaQMw2=9*{*aE69YqU4@f6Rt(1=^1sDPQ)9vS;}QB884lG9p6=1C z9{PLCcwr48#B;OH(zDN= zriVlhs**S0h;*wb%U;GV9YJZX6s0ezd$3bYH;t<=2? z2o~HM9<&ZVXhkrn=Gf<&b2?)k(jr!2KHeR_o{|i)e*Ia`3E54^H9ayXklrA5Sh$3Qh4_PWma7PJOwJI z@2y~t(gi(j>}6-Z0=ruX zoyi${f>5@ZHTrMn8pKn=p^7bNl$S46xS{6^r)y+&PZDz5PUe!xU+2%e~w+O&bvq zym0wj>AN>xgQ@)}Jzk!L=|Y(X!+VZXx_+n%ol#U^z6Ovd46BThz8PzzZ`K5?M0Cv@ z+5$u2_jL58?p#m0|JLIe?~#d7%y|w{dF+gQ79~JFJ}E1hAhF-}kkWek(nKmA7t@uQ z>*?xE0^#7JGk~wd+U#z6>rdxM7u|={pD0^K=|WIYXAX|h!c3Fp?zDLazbE_!uy}02 zgorMsnQQ$htD4eZ{2z}}iFR}hsgp$77walu7{+NXO&i2 zUgP<=FBaoP{fw28HWzTbAT;X;vlK7z-UHWzHW&;>Kb%@AD1aPuDB!Sy1~`%d@V)oP zcu;r(2^Vkulx~S%CrUe^Rg#4)Rr*@jWGikuzNJXNTUrgMi$J= zenlDdIi%gR&F}4xbp+5Ayz$RA24LYkN8aXW^R1-7dMzv3WZC|3oAVI;zUYhP`pU*C z-bP&#iwA5-h5erQmYH?=V*Gg4S}iY}q4{gugE8V6ulqjcit+HT^#r)3jZT8<|CoEe zciw#7=X_;!cF}aI46yH0)N9|PTs~Wp?|sa1k3R9i&yBLI!}+tf&Y0bn&XulMr_V?G z>=QrU8{s0OtMrR|Ei0poC^%zWoM-Fe4>iYgwl6ef(FVuF_E~e>%KWfF$4A@jap#lS zzbK#Y{Jt?awmsH_?F8(&Cc^iCHu|zCfxGSXl4G2e8(;Iw5sJGS7qW5YW~MYYMYpy% z^z3)gUe;d^5m%$qJ++IDK^qeAet>@$KPCawTw@*62y4)}ti;CMhALf2csrpYn*i@Q zBsLKuGuAijfWuiwT6=Dz&4&Wz0Akc0@Q%Ywz^EOubYQfh5(`eIUaf=Lg>ayDj430p z`3v=i7R){bQ^T0bzy)LO7%&nJ5uFWbn9P@xLlX?`8Cn(g{Tu+a8Q^{*Gg9sc$N=JL ze_Q7u2ZaG5sE_-U{wtX;n5P!nwcIRyumT76LF^n__;J2Z@K=yQ3_7%bu3^U6@ssYo zuC~iWU*7NfQ}QYUATuN&5=cjs6P^u|Ak~VLp~QcK@PQQo$suzjJsE98+BQ#&(RADw zoFfVXj1jAz;n?2Yit}bqy z0Agy~Vwu|Vn1a9rg{xSj2QRiLZ}=g zeTRWro!3N)FoFGD;fv4$pe2nB z5JE_RtecIA1rMQ>qdjcUWjgs>!Yrjk4uvzqO;+(y)HiJv-$Lgo585jr0B8jq9|Ed3 za40|6XRM&{aOh@#A3%cNiZ(%Te!Pu<0OnwmC{1-!PczpoNO}*D%2&{6$i%s3N(dUx z0iYD*s6`%y+AfmbUM$fuXEq%9(Vl5-WqcVE?A1*$Rl`(iZ&znv1u85H%)2^GIK{pJ zzlUal2s5P7EmOX!Ztb61hY@hmt1sSR4IyQI_$g9A-;4i__U}6^{lUwcjAb3hXRnHz=Vk?X@k9EFz0Bs@|eD{H^_YCFtZsX-Eu!n zcQf|;Oh-uEWm3-|$`e9lo=}d08)=OgKQOl=g!_flE$#!%@$JBIx2?60P`Tu$=t6sl ziBMYyy9~@kt80Y)qXup0U*H~Yy69p%^Mf{OmAL!`fVOM0wv+8w?iXqi_L#8{uD;0IF_znnNH8iHPJ!Zds8Oo|NejU2XD+S z!7UPLpbfR*@@gd{4iQa*W9qNIB-JloU;gCLge1lV#!rQ~!0&%pgH!D$iN^t|J~)3i zzYpI-RZ*#@OXFSXwXb%ud1KRuc&-A0e>PW3fB1(F(l7tXPtyEnZ*g$0@jWU!7H9up zCz3$bKB$2J{d?H*X{)JIn#W=*2ldx~{grg(g`1!qRDsxMYKs{$Z6RQZqljd{=oKzF zd!{;7|L$Aad0UjdG? zw-+FX16u^eOMu~5f-RE7r;7_XJqty1+GdKko3lS`FvhXJTG>$hj-fF~L;$4e&Sf z-B5KXn9s&VW=cRQkg#(RT9tkTi@`iwuJuLv{^mAoqkxb?3pPX3!t+6Q{`GsniS*?h z`0}}!!x&5J<`&-j1?OteRaPhcbnNVxcYRJifF;`M-0N6sbF&w39Ge&in?>vF8$~-} z-ez4g1_8imQ-CeLQOf8zIM4Jv>t~c34`+?$S;t2Wh|zDNH12UugF(9}-)3g7i!rp} zF&2&S;#=n2O*t6U20*o!L2KU59NF+_bvC!sjC2vS$ar}#r)SK+edd8^r{kMR%^S4p z<%@CTJI6W>XP&Xoz7XutFAhgOySFLy(1}#QEyH7fW4+rVX_#lt$F3QnKikCQUS7j? z^nh@SL>EyA(TWL-5-C;h3I}ok{yd-~^DPnpySK3KAz62Q_*o}tv%%^IU<92yK*D(Q zH8yzxTL~cEgOjX*4m3Gz1?baj#!Ep1{iVGEX{GW;*J&_9(j#}p+)HD=luR{KH^?MF zelXWux1aeooZS$F$grFNlFEIPv_Av?%{m4WZNsTCV1TovBwfZvv!*8cX<3Fdc2Bh4 z7BmA!rg7|!Fv0QkcD=;-yWep=w3Cdh3|&k+PiAukO|k(B;(;n$(-s)JamFbND@y&$ zl?;YL6B@$4P8dvV7^3`+ftSso79%)jloo;{?< zt`V8M0kk_iWYhih;b+tVsN}qo`QcVfq%c{)7ft6)56^rK4Gaf;Q`;s)1rPv`%|BJz zozdwK&VM>%8kwpWP+p~-tVUwv6S5G2S8JF)z&TL3;KFebY_n=`9BjkP6>a$((ef}6 zk&|l&FmwZAFArhUd-rx)KVC>5Y`=%&DJBK9X>|p2A?()G>=<3dc{2RA`46^{&>u2h zK{qWONH~8xUFo};R?()v_3&|e@BKgK?61%iz&DwYWwO{+gLyc_v6_UkVf#(e4mI;|p6UnKN*4Z&a@aMcb&qUnt} z6>eX90qqCEJ*BI`fS;y?XGWlrK?K;bKF*P>8wutNr}mNI-q2ob?yNydYN!nG!8~ym z)OLxk#rz+MF0s|0`0@40lrmd@BB2rkJVMuzy_fMnA~fhRcm>{rvd~QQ0rm&kJG3pw zr0AnZ&n!7M5rzo!A+!oedCv*Yy0*CT`M!S$qtMK}_9NY;(ZuVgt>7yC)m+c|Nbr`= zGWDKYN{(TcPsl%jIY=|f)v|MgwLo7Fajfki#AFXb&r|L(3+xG?w*@+^Gw$ol+kj#u zDa&#=I7dtiPQ}jdh^C41Pw>a#-R85=7u&# zaKCWtEo>P5sGkb-wN8vuH?0cao7(sKl?nQJ%4gz?;<NtGnyz${i%$ zsL>EC;nmFH!a_Ve-%V3Jcd=>OB=g@Y?a8F?-J``cMF^;${t0nyk+%21X|8^{14qdQ zN3^>9@BQ8X;EiG2)75Ew@X2zzeRVkWH%^H-<*$4F<*R@Z;4`uRPAY?8#%JSRDuHwLN@fCB&-^eN#*Zf5bdDg$aEZ5K8D{1%w z^v1Oq8L#o<*xO$x$nk4%vCdb}sO9)R`oi-U^+|ue_n8{|;L9(|b5^r{X5;R)=pCn}U&k7~`0IPy6JPkAgGrP~>AdIQqYmwSs)#mC zqGF7|4fO3B*aWO?;1OnW6F7w$C{mQOzP>|xR}DrzsNe`#_mN0|vJ6KQh?qT3DTPSe zz~~i>Ku;pRB&`!vYK z`HgxrI+w#QGDc&gV*r5?VmUJKAbvi^hD8H?gZ`J1Hpy|RDtB(X(8pumjevg#QL{D- z01rU)>O5D-ep%v7=|tV*%i#Z=a7*iJ7}sl5%(Gp6AUDLjFgVx`&{qz7xv3*UmE`zK zL=VJu01Iqr%z&gpxn8H;e9=wzsmlI(`t0Z<&ds%$^8$wEBIW8PUYs}(G$Hw_6DM*T zD6iv$T&K<#j(NHJE{OqNr!TqmXmK??_~aeF|A@Y720*V^(_6%KJVk`5=ePTfaWjtz zY9DKX&?+Yw?K`iEiuV!MBLk%m>`llPM(Ow32u)q;5vZ1imJevuTzuQZCt;RhNrukTU31j#C62GW`b+OUPNQ@U2KYs z?P@vVof7Tb!%;xTk`gKr*R$NGX9Z%dgmjl>qrpD`>0|wHcCdU)h~{fc>?23mGa&}n zMn<*68U1KRjiKj%4Me!Hg#Fp}3R$kmDn}m@fhB0p$p40`QxhMw-yM zVE)-6hU#$EE%o&1v$+7aa%+ji1@5)WE6b#xt>7sOw}**EPyc9o<@O!+cWlQtK8ZaI zj}R6b^M@C;?du{O*4R}-4)v&7IOZ8*^%7D^J1X~A)orRf>(EzSdp@|G%hOZP1S(lV zE}dg!zfF8mRc+dE!v_WTwgU}nBW8BJJ;1kTk^6(TSUMD(LCb@K#CfJ)$9Oes3#3hu z8_@^vJPgNx_V#>m@%yy`YrF&6Gghqovg#!a|JmnD?YnTE1B{p9e7b^G&yLvV7*8D_ zPV1=KD69=ui(tDm0V!t^(diQC{|0nx@nUY#(dbnti4tw_zL4@`t7_}mj>F|*zgQD z%WP5^8>Thej%=I-;@WI@k0WVWZc_fx5UyO*<3l_dW9wM*S+Qs|DcX>g>lc~GIs-?p z(E!cd6O4z&P^Whr<{_@}Oh%{N!*$ysU07C(HJ^j-XrrI~aoz%Od!N@Cv&KC#pxQ%) z4GQmL?rE>gN4)2LsvKCZ5x7ye1fv$0l(lCVt))gt;aHa`6uj-91}i8 z-G0H#!#(PayR!Ke0P+p>dxw4UKHK9P{!+GM?OL**zWmCVx3^xyLR4xWj+m>p)g@vAB1r=nN4O96 zDMhty0IHtuuCQI%L1?tJx*GFS!f?DmSQ%|j@&Ir_s5X8wcdCE3NNL+zQXK?SMY|T) zuoJ)(NxBjs5B3g5h!_q3BfT-evPfO!Za^vV4r2{FT+mshU@OQFYo0X$iagbzhxQk{M$&g~ z{SE=|`v}Irm!8c%Nl%{t42lLoaIc>FHEeaOq`RHR%TS472cczTJ`QO6K6I;NK%Muk zIIVj*Cc?d4 zRbdfh@lG<#xj$|!qF&i{PiaY$6PMCUFHA63faum9>O<{^u1FOI^z2&X@EGg$9V8=5Wc64eP^I&9NL6l2rJgwNq~J+FJ&0rb_5SKz_a+veJS*} z1--^mkRhnuH=Xl7YQV$ZjPc~Y^=0C)V(ViB5i^>J-(+faa%dyDhT4JwoC8?MvYXd>5jdRSC|gBM_Ol2Y4NSDBiNRjdPH!UAzz%7e(3zwhUU{^ho;_Ge`I|5O zTW{R|WH){9`%^Ujgm_`-b={eHxhed0ufKdfwi{wLour3zXM{?~^l|U}>`BB-yZI97 z7b&hvO1-;R+tLsQk!WoaY=L#r#ON?!fSEjTj(p`eFAtA-9xYSqR;)sN(%y|X)v6$=C?GlpAF z55{t62_k1W6KVQ64;?u_4}Ujf4}1Y!1AvAEPB6g6?00Sif#dik;pj-=0qo)6caEIp z2rmRLN>bdF1s-n9+4q!hJ#u5d;S~oc_XiM*`m%B|y}REdU7^m%S|oV(8S3*n4;>|e z0^9(Tl;a^RQCprzd1gS(d#eLTxEKY+(H^cvc~LLrFou4Pl(4>|JpNM`z<|Y;*&t91 z_xMZjruL_CU^61ro0aJ;UoaQ8o3;X`{I)S(mYt1D_K-v3cQF?9OCV}__9LXBjIjr$ z?a*kLuY4fti85L2)+y7bip)C0u7TTvG06(EN>_e7E8WMlR?g4GG6;r3Nvn*7&t~g` z23sF>J0=2r0aaX+jx3*-?e^M51LIKUZZ1hffF4@b=&J+VU<8~yV=DTn;3)3rLjW~pauEGO$@5KYj6^W|nq{gf$v_-fE(tTih0uST=XM3fy&f%iqZ z_K@#vH*FBy*jdX{!(=-hzed0QyOy`cQp#)rY>$284(^o^<00vm`=f5h#?qr`#+mWe z;9r}Lu%Wpa?_k#Hdq`Bdp8})481r~WT2v}Ud#J(;i~=8}bK8LbeI$YhfD6NNtm2_& ztxAanCa%RCfIvxVCmyV3osy9)1Lo~-p0<>fXp_j`82bVm#{R)QGNp(Kn>fsykv&FbVXdh9|>ON<*`Fh5(r?4h=nbwq-Jt49(L1r> zF!9XFGFpL3M(?czC;!&%d+F;puL4A17GT6ay8jWT0~p|A-%`Rr**?cUB9jSSD`C!) zZn%KNSZ6v@v_1p)8aL87wg(sm?-@Ii5H@E>(YpzQqYTjL(Y&D`2XuT4lekI%amE&jw00&d&uej}d9Den!(NR*4D>|!q@)9ai`JLfQg%X6#g)8%#Ovpv1|(jCt4F__@( z^Z{PUvybKp9*+%?_B}Af=Yme8z=s%@E9uQ4Iek92PP@)Ygm5Xnz|WJ}r8G}kWVO*{ zv?|SLLe%&x?32zM8>OzNjBO_#-I64`3jNTSnzXAuBtU?1RLk!i9+RQU_Y!V52GcFr z*La^}v5P=a?S-DfU0uYO<-FfohB5Oz2c0dfucT^jo$OX*)I#erH@nH)HHRU6L5&)r zW;zHmUxrcHKT#v&@^zjF-^5xz4VAwFNcT-ij+D<>JaqSBx`H%WHEbSfymnigvoK0) zI7TAjuK@CjfIhVXMd+fJFiZ+b+&>TZD2tFcCz$Ib90gi(`~XrFuIZgRiojI6tr`IH zlza4k*2&^fK`9L9^bDS~0P>>)7zHwQkjlYJ$((U4H&fzgdcnJ z09(Ac`Mit0T@@#XZo=)f7fH=a{YB!p;sSty{vczz0sv;tS(NstP*kGC?O z#tS_`5Oj=0{wSpK*q?Ks*h2@auG7ABh*iw<`DGk=_F#^NV0=gffdfH_c66#`s52Ia zg!h6Gt|Js`!@yq#Tt>&xNuwl2=mEDHrmBi9-`wNv^s9gVtMusyPt%Q;NLLKgv%Itb zqm?#_96dwe)R9Z z@!jv?nZX7z-iP;)o%iLa{td4$zHzA7lwi0YE~t*8^Gw7$fAJX{Yc1V=X);Y-#S8~m z{2P-kaD5om!KH7pFh2gYmj2m4`ZzuL@Xhq((?2C`@COhg;SR{aut8i9*H@NOJF%kLRl@;@^a565#QFttqJUGs09b3K?@%0 z@x5iU*;v1IY0xX+6D-9dy#Q+skY}&-SoCiI2;Rjj?iZ+K?UXPDoir>1a+ED|lrdWqeU_5w zV`H4H-=8Ska{RftCapN0v_9&zz5t=zWS{*B$*OeV90phwOe*D(hO+W3$4x4j5#DR$ z9%f3+lWkLiD;=}~#+1aiFg`SrN|bsUt=kQk`H|E3s#@)_4b@Kt*u26W zUbGbrfO`_{%wzJ?ghW;8zY<#yRY95YKm}|9b>}j?VqpNBU;7}~aZCmOjWy(4D6N&g z`9-_ApJ#%8D8v0X00>tsS4S!Z1;My~iDrW48NU|C!lEdbN@IODBW08x?33wuV_)N0 zY1eyQXDtY>oGqoG8halD`fZPEMiZhvGCb|>6PgDA?>sT(sMm=r$h_a28cyGNS(B93 z^n~-}%uH8+xMu+4jm;Gj3Y>?D(=y<7WkEY$K)W{0+GZd{)ikw21~IINi4V#(OhD@4 zL5PPT2jXDgNV1WT+XlxOvXBK2zNID8#Q7LLZVFoJYWuc5@3~(KxMoiOKF%htu@sczx^G+GdrCO zt?R=9GfUm{(2F6h+(r}d^!Wp>&5|ybP*pu->q4TuTV<30F^2aEBYgnQ4zWZ#L%NEv z3p&&(?-usXcP6?C7sT2C6dnSKSWMwivx9`v%ttxK!(rViOM*#XAVkUluv{G+fCEa&k{dyYd>5TZJbL)$Y z8<|Llp*6zUU;!6&8!4OVcQ+=&_inwKe)#>rP2AHX;+QS~wiKG+P0sm$sK*0@<8HS@ zXc`#|_pC0m1fHU~EMkK#&^v*dGPr#KX>FOcwy#+d=1K)(TME<)geDrFL5qj8zygfQ z{r7e-tJz9R%PTO22p+LP@5HHt)do;Kz~oxbR~$rOC|Uq@v39^SO7-1i%P{EoUVIT8 z!#qP-IWm?tVH&geY$I(xMWBFl&sk7@azI~%Z>H4i904(M(t1t+xt=0~mZD5qx z3)GsKX>5l&APwfWn&y`l)7>*6!V+qCleLuXO_;#In4O`8=<9P|W1oiT>zFki*9du4YYsZ=!1G;gWjN)z zPqVgVlp_v0xUcD$Bo})dX>?=)O%ei*qfQ*JFpZf|=*`&y?P_vV+d~-aSg~(tl4QG1 zh}Ww(eikVNvvla<5YtWM(d>1EZ^NX?L@7IHIYb$!Fdppy^&fowTWM;fD=iXhcpp4D zx3Gk8e2?e^YNnu<@^PBJjC6h+=HslB5Jbzw23GT;0Elr$urYIOj5GcakLWIZ7|`U< z_x{%YG=@dXR%JP@K7+x+Ct>JHPZ}V3h-b^}!fYBFBk}>~&Klb2rq*g2A%xTvYib-v znmox!-hK0FI@uze5e!)`ZP4?(A4iwwmtW~+5L24ONg|GTH+6T|FTEO{+-w`~v#;SI z*f{4_vCK2p7hLiE4;FCiucsGYx{_wD^pY_d8&_mq;|m?)M{KT-?K=F+u0j4t5Q`s;qenl0GT_(BB!AyiS*}8m+FZJXU{ssJ{&pzWJ&S29% zD&u?Ys#Sq|*>Q7L!h?{{!BjDanr@gy#QxcLfs_4IVrL_qQ|Hq9mG&?!(KheT=9;Rg zUvq*}#+F;@PbR&fgs4YtPoqE1p+CN?&mXqK_+EjJ;#b^^ex!(Vv-q%?UJ?a1aizN5cQ(1`OFDVAwQC%|7%OC@@S%kU-rsOB~#iJCBK z;i{}1^h4YIOl?@(@!mf0Pc4+R#D3?kam_KyGy~c9(G~6sfK7Wd08IP5pEg;JV1-4C)ql$DF4PWEnvs~+v z?z~TWbv&H+Cg$6)DoxaDN?8!~9 zF+4oXVUPI;wywa>*h-EBMjn)SvPN7=+O8x{X>A8m!PVt8GO`k+z84!!`pxn}I)rHu z<+Wk*$N&U5V7aB{9yWX0!sFcMxGFV)(%o*NjkzRkm`O#$;}M=?maINh7Y`ES4>E&1mFoU+h6P&vh=% zN>3b|q#c$Jjydpbo3zjf=i}bd@KJI81%ficY735z06hG&&k?2v(A5bubHdo3pTXET zAJ}Ran0EugyN_y$qZF=+RIQH9{D~ll!+jf5mnJIfXva>M>_D~it@H?i)8_GFYCawU zpl>i%NQc?5jdwk_G{<3kh#+bJfXtX1JCSiy>Rm&usO|azdr+Zy8<0>&!rBp9)cqRc z&wU@xJ^@f;yIjU8vx#)U)9n!oWS49|4*<7^Cq1ce{0O>4SjCvMKwp}UtdPCPRNAk7 zgH)fBqv`3RhcE${5w+F;xA^V>==RsKokt`7o0G4muMEE!9=sK7`13GJImSU7`cu-Q zhK)9JB26zXEv3S72LN>oGY!&~7T5V)M>5H}1dx9JwHFB+gqDas^MeP^)1Uw3T_pQR z9x)A?m?C~E_V7K!r(_^nPw!&(dyL3fFLxJjfqllgKEm#Kt-cI6?m!ZLBfaq5yQz~* zWwYx~@SQOjIv&SW=1$Pmu2i0wlYX{P3Z|n zv(9)Q(&iF0d3e-|H+6qHu0DgVh#gAO0xb{Eu$jLyfdlKnNcsyvl>$hP^i%<-EIprt zEKk$O2q1`cVWNfO1LhSPJ;1X#Pc)J$77aP1%pKU1pCYgXiw8{zevQTHoacyKGI^yF zQ)7KF@Ew3D%jYohVNW3zJ7ynyguNjd2sFQ3fsr9%iID=vN5=3gD8R^U1c23&;e^a= z`rGirsN2^FmQcW-vHVP%ne!3uG=zR->-e^71k+CAB=`f z=n3A{t&D$YyqOnwZ+1t3`Q_AE=t|oQ>}xpQw&QhOTO;uTCQ;38XpqpP9r4qQgr|cr zHno!lFv&XDOT`H*A6xV`Y>i5ow;371uu3ehIBykz7%|~=uMHoYLulTFB?y-*2q~D0 z%}q3ZNZ(DJ>`oiPE1pp((c9C82^35!ZC41ffKTc#TOOnrZ+?}14)b$DA9Z?$;Ai^t zx89CK5dwOVuNvF%(SwHd19Ja!m)gO+%<*Y2GoH+4SbRYDnttawmrj_UbK(Ry1FXl0 z(R;?8a{Fdadhes{G<*}E9NMkF!y0S42Mw>`h{~*$X7-snnZ&kt(LB=L_HOVaA-az6 zH@Nrm6lBDr!tZ9f>qT~Hy#LRZ(yg19&@i^L7u3?_sp|;A0P%hJMq%Od;O*yW>RvA% z?X799rJB0hx6oW*Qt!;8J?S1lAzkx!DmC?|Jv0;TqhN$y*6-i?o!@_Be6%GEqVDlv zj@8U#{`Yn9=dZkH2c+O#Bo^~)s5xda&q1p2=+lL?g^|DNyBA-bNSCHl8X=RxAxL!v zqFo}7@t^oq&Cgf0ywXw)bIEkH~|NdA0o0KwJ05*N?<3eS_;*~>H z|D)us2vwFiaC_jQkC1L?>v%3GU?>DX0k-LFWZ&ZAkwAe_X_$U^qkvWEf4l@ufD1uG z04s({t%Di~PeQ?CNYn%%JRl%-+&#oS;Ild4I@cs>Rn;EOj<+v?l-J#Cc^g2`uldBc zw8Q73WbXAk2e*fdH`~rItBH~X;7UThe$fZo>^;6bp!q63%ZNnj{0a~VsQ5C7wN9UN ztn5eBM`s1p0!M*#z--F5{oZ5UN|2QR_&4fhd_r<-`z{k z`5CaYp@OoQJ2#tnG`l|}($TMrv9PcFd~P#OefA}^IrhWC_%HYghhF~LZp#et?gZLq z+7tBxTKFP1pT<1KU+b{_(RcbB3{5tMR2StnzSFrpG*+}lhiO5Ug&XMHdo0KPL-sL8 zVLHGy0c_@U%{@F5()nn=gGPTHH~t09L4z@-HlMG923bSKyOn{G9;8$I??#$^4{gv` zZl`|L*1U_x^T+bJUuM9%Vm!Tv#kla z(hy*-x4$2|&fyS7Scbv~O@Ry?KQJ=LA%>{|^+mQQ+S7sbR7Nn9#OsL3T4dNV&6l)^ zsvPNL6M(B=a0AwwslSz!$HC3bAw8$vA?ZiDXN*xwhNwM&_2)GIT2#$1FZ3}J%%u#SUuP}XVjAZ?N2wF2PW1rW3qU=AtQy;rS78|Oni z{uNT0nhvubt!_VL&xauZd7= zK%u~5fbvFqIslyPJ1xu`&cKIAnRgI888T-FC(}+$W~QexzapZ_0pPL^=1uA5CRu+r zc4v_&>u3uTj+SSSY*YuB)>Lt9)z7JeXJvMrGorxYK!`8HK@ppCOg*6UZNgTa>~@m2 z_$up_44h~b)&TU{#E(sN5t6Sz+PsZrK{`QGhKW~aTQ~j15mtvv?e=N>9;OH907${% z?(RG$2spVDBwl;!9E{b(z$ooNTMsk6h9l@3d(lhxQ6B2WNWipo)e9Fb!aneiSTmY9j2LBJ;eiq5`-{E9R#~? z!BngUqfjIAP8E&R*vKfRLohGQhZ)x5%nSw|a1Y%ZdvbMa4O~+W#>@26hAM*_!qkc+ z63<~QIXm?WXacYf5UB^EK?+ZJE&yf|0Z_JAw$ZY7a-aJy&*2BK2mQ3*0DJlR1ang* zv({(q83jyD+TyHsuPI_lF*w=}^GsMPaN7ar$-V32m{H>cLtEVsjFa10x}08^{#xoK zFahKr>*n(4M4BEOOH#@B7)wHvJMT6rS{c?ASRbO$Bc)d zuatnV0gTfcNvLwg!PJL2UKJ*Gx4xM0d`VA9)q9E-$oQmT{>6S=$FbgX+Du+L=fl>; zI_>Hv^cEVi)dg@OZ4g6hT6nN6E>L@x$6+7So)R%RdwJ%V*u=Z;$4%+#)p7QJ);#7` z-@fyMbb0U^+4!(GB%a|mB06+^!3+XPwbCy)Hr{z5ql7;645zG^`Qrr617$iN+(=(Y5{f6A@_{J zEVN@g>qc6~t9o~14bFB0H5{?DAhvVFINHLshg#gjPm=;bB*ZS5*QU=!>B3ji?t9<| z_0e+(LwisUF0aPL7!40E!3(1z2;(b3tY7)r!#kqBxM(kctVAwj!h;ZZHtyEp{aG-2 zh?@)EF9`Kob^rs=EIXD-c3?ZgS(rVa8l# z_u4Mo`sJ6B870{5Uq2=X^Rp z!NgE+m=Q^vv@F_VzoQJFrHxeK{vg0KhNa&L{vAE%!LiI<&ZD%F{dNAG^PrnA-REb^ zlg=8ZZb5T2SEj{dz&gAZF9(d*DH@pwEYAQdjnz5V^x;ZH_X*E(0CL(UON^>prD)EZ z2WX8*Ap#2D3x@PP(5xuz@7c&(IY68hp{Zat+8eV(#ST05IaW5qj9!+f>exslX46sn zteKEnv27$-2LA7dF;Qy}bjCPnK4-d4rB6B=_G11sI);rX;PQ}tOyJwgrmhp65`XDk zI$|JVZTQx)3CCS*&6@x$GCLWFN6bb;-7l2v%Q$4~kI(KO)X9KHneHRrr=xO-Jv81+ zQ}&;^?!@ldKHFaRUID)iQXpVo8q8wQKQt6HL0iL|P`dD8>et~eo^_E%+o2!Y;@*9V z#>KMjW6+Y{Lle@r!U5Y9^aU;1=Sa~?rXl^ecAe|f4M03~wPX5ljI;GVpbl+g{#2on z=7$=~$S{y$3Q4J^HYFUX2Z_AYu6Y$HH+KO#rB0q9qRAnSho=WEX@&57wTmbo2hh_g z&q}{0(@{d&j0sN%Ap;9#L7@x}47mFPdwJieXBe>|aY{VP;hcyKw86(W2pGPzvkv`{ zu?w)FowM;mi8};CK_4`0AtkX9mO$Y!ep!I?a{yci(M5&^+mS$%HLBVTvwMp%0>Bd| z(lCXQ5euyb21aCS!nVFfz;S#CoCR&m)rcFk+^k@lmz;9m8bf=JHL}mR9k52&`2fl% z>A}ZK>Enlw(zB(7^qjJ`_I6_IO8|ZY(dW?YXwp!Ii7q4H(;*e?&hs$S&~RvaoIl2+ zh$+lhzj+5+?2fdsu|x!{U2MkJ!ZET4xNmF5-ki0nw$azktE4@Jrq#R{>+|%q&e|!# zC;{|JgIE^42wj2`7{_PJ9|QRJQV%xGuitr@`ghaZ_7mDhKh5xlp9ErKK%_B<`FBDsGKBa1+jl}g!!oa8JSoB}99pN7NaU9`cq>>m95`7`JOM_bms zny*agqu>Vy74yI<`^)yW%oh&g!;?6!_rSCfnPKzrjnI6&h6@>jl+Ub!=ah88E% zRKFe zY1958ST#zJX02!XZ_;eUhiUx^jZ0nCh8XJUjhxNy985~idB4F*voanRm7 zUV`q9VeoN+2f!P%RdXEc4)apl=(@4~uibkI&6A0=4#A%7q-R#Ziz!!AH|9yy4-J*} zis{fK8%*)~;Tk4j;1smfXLS?I$RzEs*OxHYA;AahZMUX{pQDDcQs{zqZDBF?k%%gWP$RXoo?zrX zlN3N4kb!Q1a=X#)J)c8Jzj=;7&MP8oFRE&bcS|99CyFGqN~MQobt*tRkH7g9G4F%DU6 zvIRvhlFHU5s=PANJZ++KaJN@d8x9wK=b`KYtuvmf?zCeq!okVKs%|wuGh`L2GS9a% z;hhXE#S8>^5X7+W0}KF#0Kt&F(c%CG+@^%g@es5IQ1U&KB5>6~MZoWYEm6A&wA1$i z+kTFOLX!T5A=@V7lbN3=vGD+paqv6(oc)7x zc)oF%+xxhO0vcKp$0`HJZg^RrZHF&`g|y`$MfNt{Ii9`(Br-1cLkVzs341@HF3 zerM4IoW-mUlx7iDMw=ZY%Mwt;U%qrKbleJH6JQDn5I{a&f>9n7$oR5vf@kd_?O%Xv zzPFt|ACaZ}&gZz_{s(PPNY)d6XaCul8DBW}Y`h#dKm%nrk|)?cE&}j)AnH(}8$I)K zJnLLiLeLS@5?U$`eKjz25(L+12>0{P2c0X+b6~8`mkghbr=1O&9AqS{K3?~Kmr2&$W(-3s(lnt3M`!;55)tXca7=vxxPkb z6(agnF;NiE8qv%#P~x1KXE{dv_@6fT$+1+65PJ+#BN+`%aLoQA^Wf_Mso^f=Z;?v2 z1_0UN?ZC4M$SGshetGzYCx z83zmoduRY0zn~xPas;y%dy4ylz@0e}mkcG(k@__u^9)pF%Iw16Ehpe1}u09aL2 z?lo+?&vKZ^APv(Yw+-!gE2&mJ2Tm|itz&1rzD@{5Ol_c==)d$%?=ztcL)A&-p=Ll` z&oGhZhy-Isq#lG)y`&8_rRd-ww2ubh#PxSX@&droF<}Jfm!EQ8>C*t?(}GzV%x8C> zW^D8m0Mm)np?kdo8v)WTf;AUF5ohK>oIN#zJ8y$pe0O0bZ;$9BPZ{>h1BgvAvnz z{^$cj7s7DBaP1?ZFLl6h(SJ3U$MsBm*+KA*nd$4{yI=;nCe`doei1W z1vD6FVS31*)Hl`{AXDP6V*#Kp|(>}5ik>J;7D3-&`Q ztZ+(`ST&kwL`?GRD1whlO)zhKCu4E|ZHq_xCkDt9FoZewY+6T}ZkD@FaFkKV%AI6) zg;sj{$Rswz-bM(^mCc2;T3bpByQZghZp1)@(Zv6xx3HB*&At}J}Sp`&JE_q-~uvz*RR}xQ5lQ< zW$%#v_+%|GWgfGeeds`#A*eT+mS>e1%doCyr=6UdOk)I_S=n3$6SAfN-PJAjRThM@ z+&vOxG;iaOzriYKrA-JT+6ZCmIr11sUZV&V(V%USF)lH7CWjd68BDh?T|-CrN!lQ; zzw~g@ga(W-xUD!dAF^(AOjZjC&m4X&l@%Pgp`io9m%8aH_;63#$G>I(C*uPJ-!;q+ zS@#v3h(p_rPs$!>Ff@dmjdw5Z8C^-XybGPg=l#p|c;1_xJz7AG{&w)PO8* z`mFuG^vaHCujkSt+!*00kSIen7ltRP{*7b}%uj0^rZ!$d2$e za03l4nS<1b5W!(WO>NlO_OKDPae!UAawRzE0~eLtHBx^!OVe`dw5K|r^Acc(3Kl7c zpul*uZg!d=6*ydtl;Ifc!A<$nepVDD=NSNjffp2oy*7u06X7PAjf!uw5&Rsq0V?7i zz7ZI)$J1?=Hw!j9*kmN^A9c98cwXs4b`a_p;3k|MkZM3Yq)nmRiG)e5M0Otlmo`V? z{Pmg#W&l0@+P3gD;4a_WHo=(X=Go*0pAE^nZB^r8Q|zNpIPQWm4}8bWa_xhMd`ObH z*Pg@vK=eFMc>*?pg|rg(`V=VGjVsjaJ>C^SglBAvFfVI|U5RgbHUsSejQQSii=r>S zH~gdyhcY$s1=U8~{-#`87TS#H69=?RnpgQ1z7)JSy>S3v@ArrPYWFFFYyXU167y}D zc9fO|IHe7?+xf~I5orepk@Y%e&Z*b!OUwyRD*3XhG6FP>d;A`vq*}}L5Fr;yfZz$RR*;~5Jc-w<8QQ6m`=^=QyKf#*Y%M$1$>b_TOjqN7=&+#k&7Lrrz{9*6cj&J3s@Cts4zA_U&Ha zF0&6gL(Xs!DUvcR%Pf+yt+K>?P*k>MIg#y3*=0LbVVv^VPAVVrA0$=zkdIj_PNl5K zwj@dvMUmpnaOTe3eZ#)*-Hk?L$?x|Z^i>)c7v257?^&Mnoaa3IK#W2$B5Be;2j*%Cipvj<3dAw+K)@A%TVrsP=$LRreZj zlyk33D@MS>Kxf8;+s-LPggS~1gp46vzpya$&fz*Xm`dRG>o+Iyd7e(^2p{zhq88Bv zx^@GkCRAX#L}+Tjz+Z4KYjyAJQ7J{mR3<4gxS0x6VV2Y$uT%?Io8_?GjVo4da#&wmT+^RoUE0w ztm0)Jc?QW)aZf^XKQf601;Mz~gIg|2mpU;G`!LyN!K(2)KqjA8Uj7nFxjvZct+cWF z0EEC@g7^^k;}Jge8^?QTgq2sy}S z@faOwt1V6?f6{~2x}Q(2Q(Z>?58*e|c$JFK%F?qa&;vf)&4w zBj8UP6Sv0^EGyK8cnpt)Ajquuv9|OfXb!_fUJ|ePr|0Yg2 zAxY@P)u|gxL$TF`oA*}Q$35?~ldOyE!6tJrVClgo%ie)`g}%&QBhZ+=Jv};0@vLE7 zpxm2CsNm}6s^zwK)9f^^L~{ta^uI|UZJG42MHB|d4dWtI#G!wuu7-@c758+{TrDMiyavc=dr_eLbGhNZ7m*P~N zqHYt!B^_XqIUp;P@gOI=z?r=lbdgC`$@XL|6HBdkkl@F&Q-xR`wJ}t}S>bSMVjAlh zu294&+<$r(7bI$Ry>bI<=}szq|NGy6ugzotZTpUwhrEj}VRjkxTiiGz(1gIIpdNc)Kt`|Qax z0EE!M;b8!<{)v$aPG&&unTHmK!4ZXdvN>9bTayc>C$# z>9H^@4#0WgC(RslV4TC1pN1_}Coi-0egl_~)|Y7>30%SD*Z)$RimA9frvQjSvw!vopR{`F-K-s7FJIiAfv zDAVH010?3b$?N;fWMtyxbp-+E6K!72DHu2UiE3U(6}mb=dS3Oc8B!UBd0lXB@L*l1XyxSqR9upi7F^gSm2aSMUp_ zHr7B03HQ8K(J23gpS>UG_k|`NrpT9qsBK0}gkQvfGUUBK@Q$yYvrHxf<2P>fTX4fM z!w_E0!I{upFeUoF+P3rcufKwTe=&Ap?oTkLyqD2a(B+N$!-u<4x14rn`y=yj; z5FlwpVM<^~3c%8MMk|2NsAbN@y|{<=x)F+uvl4Euo(GhfePq^c+gGfu?LlX~7U@2{ z(*s2qgYzNS(udzEL}h%*d)`%buD+Z*;JsSIY#CDFaf*go1}p?xM&ekb4enQ{2@4T( zGu9%q1K|&B;4UW`dyn*pXj8j7deb3?WF4*E`u;-%1(Z8z#m~r9--3GgCZc909n*o<6|ueSZbF zf*rD%U@F^W^W_MxV<%~I2VY^&0uZc8;jh{bF0#1CGV5S&%J}tK zf53gLTge$|Ko1Uq5y69Il^PxAj6(Q0J1Ya1Vj3as^=-Vi4uRqN`3CTkX~}RN7$%EL zKSByW=>iuBJZ{~C2}U59%N^#`Bpxz$!p9))Niz2VJX}R;xJ4-(dWNc`BQ50`Lc@XO zrHQM7X)#b!1;Z&BR9XaeF5(UYR;3C|sKv}YOCLOZOm-Qp5(sRkxOxB#1i+m3G&G@{ z_XEU0OihjAnrrd`gjmQ_1I3GJE6*6)85x5{r(VIOVFX<7qRzt_mWnm@bZ;6WPQXMV zx)_;8d>^G`wRkgq=d-_zo5o(cyZs1!LU3f9pgZ`YKn=YF4*Ksa(18Kr_N!ITwhpk~ z?c%dv?03I0H#G8#TdV28>T?p1Xhp(F1{a;2Ns#uSOSFt@RWq$E?l6X9iedFd+`u@l zN4+rT{REVQzLXel5!8(Z2o3(dPwz)nSofW@O}q#5!@ABd`crEh=z8{pVDzWZ2|P=J*P=$v@BQNg-p%$S8^cfkYOKWlVj z@j(E>jj4!maf-6L3%silhhQpNc0t5>obSWXlVzt#J%hFNO@yFCJQrok*=~Cz$!Vfg z@LclDaJ%i173l0@3_ki!I?x({;6IG3Q4zO50U@8@8EoRWj`N%iuSmsa@vIxycLktR zEG&Hp`pB-V+fc;Hp47>__283$tEJb=C>M-7IKSKv3(*L58TO75+MsdofBb&*Wo6Iz;2#Tw=&LFjm#pss)HM_hG8H=%lc$7YRvRk)^Pmhu4q|u+ z6eEYSSy3Rg5)0h;GW>`Yw*h%pTkv$h0DIuzym=i(oiu*{_!gls-_;|!De(badWhB z00HVA1@X)Hn(@8A2N1k-?Y=G)V^Cu)ix5IWoW5=@R*>(*-GT9V@H=h^q6eJXQ|BZD z=RBfs7ND)gj=3=&4@Vcq&r0NRAKd~;U@0-+I%~+~fR~U7CY|wz`vA|n@qPJMGA!d^ zztK0JgK?k%f5bH@sQuZd&xk_`fHFp%MCr?H#h4tIZN^&Buk++PU;edM84=s^vrLQx zL#F3admjCTrG|H$!Q7mm%OmkBXKg41$6J zFtp+Jxa=msV)6Jsf0AIHpizOn3K-}LfxsN@f;9`444mrmj_Xq>#;~IFsLTtDD&ITEC8~@dO$v|i3&e7)?_*`HWZ79IX ze3)fMe9ppde(!x+CN%A<&Gm;HqZRQ)7&=GyQMPp0L;g|7fQOHE6}&H;6S&|;cm5t0 zuDw+Df4KLTKcCW-FMxBT)qr0fuD$Yt9?`d*pL1CF%6R4&{G0^;0@ItUb(`9;DHT~v{FLEf!Je%#o;)Z)vt2JHQ4kl(92epTFv~FhG86O`_%S(%4 zW>%9f-Rs~ml5r93T}P+qo)g|OFuFB5#(=4Nz`5~#jNPd)9|fbpHG$6@`2W4je*s{W z1TrA9|4u9-B7?+1;Mt4-%uf4xW5y2PQB?JCv{nY{ zzaZt!?xR#s>u^J;@(XJ??qiBU8fZ0!QwvA)GaTU4nJHmZwWCUe3-9LC?II#iAGR|B z(p%{wu1L!*s})B+Ta9G?{s63VS`AqTItNQbCE}fK4pha?EFSBu~=PU7&7T zyoZ^YjW9k%N~lYVFhC?(Twg^9pOvC6ArNN}a1*t!Kc=ctiS&09<^_aP$)X7)V-}y_w0gNxw@1fQ{TD8&_)C0-L%@gUTSeFhCaj0ABoj6Nl zFd`$(fwcLkj#L5OUx*U3OSxiEr5c2=lD_upFOtXfApN-Shw0g~Ee>MKBeSQF#>_cf zKoYq9b~U{*FdO5dg(#R*rfS^x@U=47WfzIz2;%h8{7q6ZAr+G9>`NnW!d&g(+w~!b zmjscu;&iN_4XGIok7mU~u@j%SV>Gul{8B7x;g~0HU>amDYH~fClwi$6I*3HlHO%3Q zugV2J<&jl_EvLls)px(jq1QoXg%zX<+#PUQmnICsq;|lC>x;X)L-h3)*&MEO*Z}t? z_+oa49%$3Go3}70%u?-d5kK_jn7}YdaA=ubMn9wmngPwP70h1e=V%0g>-jNrLG^y# zqM{#c`=iIlROu^|d%iDBQc8NgAnAdX*A5G5k21Wf-H(ZyCukn+k}2afy_Kkvh6#cD zrGW>SBsZ5RAcAB6EWsfKs@qKaHJn4rGb=i z0~MB>(l8=lP-MP6Mq2pfAxMnv@{>FGh}V-R4~{}Od8 z?nFc%wqr|4%ANP2JMgdRfSR1(I^wHBs-T9iF&3qW;2zlk-5{%xhPIXcdT1Xcki+rd z%ZH+!^2=w{E57xhyJSq7A5;Z;Tsp)Du1@ zSAWKH*)QdabLRf`p=<}C2ID(7r}Gnr!2m!A!vPk#(&wRZ5I5)JsPfA(`)ar6=a?t+ zdGY<#{jQO$K`;@F$-d-#Y&-kG$jR8Y{nHR_bTNKD2Enka%rx-sY6$Iqb6-9}4_xza z8%_}g9RtgAlOw#~^Iyy(x2Ia&_Us4#ISzUywvY_?QjRMQf7%W{ihG@aU*{5Ia$Hxxw&D9wZFoOzIbWw7 z&i^zjoc*Wi$TbsSmNRuA0c%Db9I$l#Jl8gR z*uHy79A#C#9DVwK3?%R#DVBbs4{itsP7aQNI-i&?@9o1?a9sALBV)9}XW=7IWEPG1 z5|)}8LPhOf5SM)FYF?VM)Mhvj#};eJ2cL_!*~2_!M={SAFyU}0uqFXVp0o0xz9#)R zWE(j=M4rGBK020|G)Lh69*MZEGRRLa>QdD^Mx6Ou<(@LW29`UYnI20ozjBL6!}*Zv z$5FGXo{q?J;4VX`W3|3ZLqw_yqasmuN}?fYF+a^1(wQ9DtLlmM<2lzgfbF?H1>mqK6B3@! z(WcCxs<^>{ku+2u3zg;a<|^MKLGk=Ps>pLp1igLQ!FA$?d1J0;XgJRCso5lmM0$6D zKaMX;Jntfzl{wT$scO_sMCDnFMBCWho1KT?liDe84v9Mapcrq3%2NgoJUw`pMrR1t z8brWo_N3Z2Cg?)vdu9DRI z^_y>SK*OMP6UfFM?1b?;Lxss7%-$9g&`i;RijcFBd7WcUH+pv&hjjf6x=C5oiKa`3 za@Ve;V>iAT#WX_<@=Pk8j$$%_slap~j`wm-4v?>M7(u9}3LcY1g(%|z@NjE|y@J4C zYBH%{57C|t{60pU5o$T#OQi^XGp2^++|NPm$WI1}RKV+3#IAvX=`)0kfxd8>vp=M?$_$le-@f zalb)g`Wj`Ejnqz3VYoUrmEOI1gZ17aZ|eq3C}tQm5=M zV2}*IwAY3CX}F&hRQ1KQLfPWlE}D(CTI4!DCP?!FMpb;T8P!bAPy;)E_QRj0kALzE zQ|b`W@0btp%^IXU_d#PneCUqBJ1BeLyCsq>l+ec>c=YHA=K&G(qXa^b%hWS-Z(|>U znMh%x(q-lR-QU0zhfB&Pg{9!G((U=1;8igl;zx7><8FW!7dY3f3WwGfhx`%|=2FQX zQlOFNi*>=QzmcHiLzPaYd(j79{4t9jzsl5-H5 zQ>6Y=yscys)h>BhSsi0+l0y|DvA(jzUnF-KaTanA$0!*9r9{>59#cgmMG?z{%nCIg zgsL@7G~Xm>;IOs@Awtqapb^n!D2yzTr}B_iS~b6xVcDke$c&5{D;X8+yJ#+CA~Hkm z-sMn*SnyT6z(G$lnDi@VhB3&da+^;9Q$Ep7`>#aCM?Lz~z8IMlj1Rg1K_r#oHAnl$VKi_lFd$}b>l$jhq*q-Cf?W(ede#E$5 zFpe+4l(i9nBVZFa5Yx6*zvCO=8Cq9A^CL?t-lz^2f5lZ9TEVWLRer`}HyVn5xf1ZU zALr!yI`=$F1pt{jm<1VH*57r`0VtVCms$6@yq-MIE7s6C!qA(ek+Uuc+KVyKz4to| z)zVfE%FoeXED}(P@z95F91VcYkAP>)$Qj5OIHxdGc#&7M3oPP0J`0Z+n@=zf;lSm1 zncl*A+J^1aELecA3x~nN4HP5%zV-H- z)Q?|4>qhk|nAINcDY9>6G(+gX3>#x{wu~ndQ7e!z#=u41Fw&TNhM%Kn= zI_BmB%+eMou^>UAvl@&>A2}OO+2d>Lii7!Lh{G4d#%j89cPlDDfGy!KA9M?>(r_`YjCTF<&om9HG z5rGklyPxpwK6s8`z?$icF*QC&)lD3W3Rpfo_Za8pWtCzZ_%TpvsvooSFbv|g`RVl6 zzWMp|jdyOKHQq^o^vQE_gj0mU`(ss^3rwmoTc@&F!ViP(6x!EyQnqYTmh_C|`<7RZ zLL9(AC-2U_9KkOxxW)xSAWcQoKR}8s;5GzLVy;(d#mdrBTKbrPLbT4#wF%?jAl-|& zP#K(tq3KQ^J@_>J{!e~L)&Tq#*y^SOJM`>kZF{iT^cqM2o@(7VCcVoMjCpr20RuA^ z={8le=4Ptti=TZpjgdFB27^~q1I*qI>lB#fR5j_D7uS7L9T|gHA?N{oJuxj2&hl@+ z^D+X(STJms2;Mu|gzrJY2ws~4#-c`Q>U-dwH18=??%*rvdWS*=9C!aIB<_>D_4$)^ z=5qj3PLwdYO%GwROJ(lqLotd#XQik+A0ya-Dw-vwCriNU-lr?j3#zo;{!;qfjh9hc z;Mh(Uf(Os>snQn;Uz2lkytd9!Y$7P@_be^xgST*e`sQ5VOAAljTS0;X|s2?da6jNMHWiH#rP{0Vl1F^h@7RS zAeV@|Apen}(^hqoVbXcFfuyP#$Ld~2_R6%$kWEZXad4858x2qo_lMJVX#0`8Smc|( z_ACD|rR)o!21w1j1jK4RL*&_tl7cX+3IHb{%NfE=4b8+JhqIc7TxJ-d2kFu@1#ZfS zz~Eqn7NE(nCOXU(fjVae2ylbgH3?gByekW|Q&6p$Ft-*6l+= z6G98CZQDjJ2@4NC9*%bWPu~oYSEnx>+Lc86A)2G3BuYo~(9Ilr*{CvKIOD5EN)HGjVYa=I=SQB4UTW=_1+-VUFkgXZyC(?#uQ= z6Uhb*7&<3@V;uBp-|ZJq+C(rMF*n8=YZTw{y;Pqu3v)YjoHEW?Gj!Do4di*}u|#{$ z*FJgHmkGdSOdO}6%uvjKGyD-uSxZYZid4av^Q_Lh^qQZcZvhb1(!>Mr1V=*fqCo|N z5YFt7`^p>wi-pHvGQW8y~N%j|pTSjo$jS|#>d1xuq< zq{h&=c)$r;+mXR_e5V{D<}Z#&e(v|Z6?9~J!{Ucl_xkngu~%iJ4#E{eJVv2_U!4iS zt*#eBlzRxA6AmGe$2^>i?iqQ1g%tpWL98Z|%)+>Pl)VEJ@5KFahR#_ll)|;o$KDGX z<$K5AycN8`6We0WS_8b#F$nkk5+~?UdIvseMG=PHFRhLqc^-_3d;Bh1a4h^f7lS_3 z^2s21c;?7y?&7)*#8v=<_y^o*Fxu2tvxS4dQINXI7%^POid#p6X*6PuNWDEGb`KB0 zH{5Vup8rBr)Y+*o!<0Ttn|lw5%Dm4#SeQxS(}RFFLZzP@3ozqo`Ym~T_sLV9r}Qnf zzK9EhsdX$FYq~fo#`4B;1SQ;j$&^kPX?qK%#{dBsHLbH+ajbUacm`plI}xI__$W** zZElerRM98p}Axg zHe9Qs`Wg|O*RCy4k?19SC5bphYxpt|$fLLk&CXY_np~z2mp)Ga=EEPSCzQzrTdvLn z{}!--O@)PJn^Y$5RPl{TL=@)r>1(ugBZMha_jM3h(~q^nT$<9y*t_^H_7t(c;Ywk| zYCloF@&_`FO_YMqAKXj#e)=iQ4ds8)khA*GCC&}3L1P2+fg=wdJ&g1y;%v}r;I5`4 zN-<;cu^@yIp+!mqTd<&x&@MR~sj}w~WCkZ-sE1)3VSe}bC?G+Se>J$$Q`f%-8lxpi zAzsb-b=Cp%aGNPz|8Xuiz@TxJYx7(gHx zBz@J)^=kUsJAWCMq!P^Xc3OjBF7y&vJo-j@>G~Jalvzed=W~Rc7Q~;i7==ruudw6S zs;#A-D!8)>uA)R_%zXq}%C8k*($&n5Oq5A)X0<%Z1cQEvJ7};eeqm5N>sd3E51t{W z3^IkyDN1A+`XQ{WB?Rp0$r*%Aqr>~sy@yZI>hl%WP1qh4Njl$ z^0;&fg^5w|R^7rK?d`YU03PI0Wj!J4%p~&@QzO?xxuDmD7UL0wU;SeX?A-!xGNOm& z=G}{?+VOllVeNa3un#W=s1fvAfv8Rte1pjTV}ygCg}_L^-=LS!(FX2^o&}x*lFCgg zoW1#_ae|5}Siv?4vcW=t#bfHm&Gea9KEwHHS!@*1&~x+c*4E`M8wkJ%iF_yB*e7+_ z39d(|zGzn}&fM`;Ef2!*-!7(`ui+jzd_w9@-~+uJgx57t?X2^N;vA%{!q0tv`WE=p zOMc~bcyP;r_w(I!db+rXFveP*jitgbed`~;r)jp16gDwxzC;fdx@BQksR&Ugl-k@( zav%5K2if*c)35&ZArkN+smGm|SjmaE`1o-ucWl6b7SkX9>5tL}A3Z0JpPHPm^sB%2 zy)=ja-1EmrX>NWdz4^IOh}&iQ=}#7!umo9cTz~jBweTmalE}87Xru;aPit$d8JDOY zT3LZGa}QYrPk?T6VKzAu*(n>$Lw%2f)eSU)Ho?sX9b%k}l*LioRKwb^yMq~vL&d5? zGMX1if{l|s8sv~6Pnpitmk`_zKA$7~gO+?oJBYM=Vlr8oL^iQZfDEI^6fO+FBO-`7 zAl>6}fPySs2G2YS9W!I)my6sZgO)Y>QOOLVuB(ym12SvQjc$1fX_!Ojiea$7E}o;$?0%$Rrq&Vn1+@OVTia-J6z+rxhYdo-!i@!Lx?jUY zE@Oy>&b)|dvz^5ahc$Cht^?l(Tw^}&xvM=b?4pr;J&z?Cjqg2dT*sgneBH)DABp`&`yreP zR}LswMVu5b^8C0@yp-0_j(*(+Yw1K`bu0Um7RrDtK%JtAHzkZ(O%r;Xb8);f6X`c5 z#wJKkKaX$lYdqVJR(d_{9XGM99LJZ{StQ66xZjfxB$Vs$|y=9WFd*0!S@r>xx zC(k$}H+8vzPBI27S`q>ouu+qCi0`yjqpnc`SjKT}9hZowwMby@Je-&Gxr_XpW^phl zXa%N9ym?B$`vefk)DMuBXmkR1GZb$H$~u8C5nv2Yf`@oxS z&iaAkNbVPhp}x{QjJIb65yzx%vTW((G&nv8&BcXa?_?|8+qz4x$b4pYGiC!>6w-`+ z7$a7KM!&4LE=g zdjmN3VoABgGH-z(1>PN8sUCjtAwh`;QNp-FQ3gxy8nN3$%A(=E@o?dI`0##M033@< zG+?A{n016u19o=SbxFq6hIBIGtwlfjdvx6yB2I%`rar^H=iL+mQJ_RKd!`HU)gH1k z)h-%rwZZN)=Lv^^K!&FQdv$wxJN^03?r=YhA*GRbiHT5%uTCLIgZv(8(lJxy>~6t) z?}F$0gcI4RRYTu*l|i0;rnr(Wj7^NDS@MjQ$oFV_xTgR&LWJI_NUd=9gV~VPGBTK=C_Jfj1%~q;HUM!#j0hF=c^5(5_4n zlf&mDc*ISri1lGf>q9HAl{Hdyf%DtudfvdFerqd~oC6gCAdA3+3^7|gvqwRKbLi9f z6s|3OS?MD0en7m2)!1&nHV#f8uwhkUX|RG^OA9Yg@(d}wC|B-YIi@`Gga*aUG zEy*G@fY^{Blmxp7oA)0tp==tAbVZMzlH2;g7$F2L@*Wov0H<%T&xph5r~J27!Hmc@ zecS%_XBoLZ&MdWZGMNR<%n)R-iTQ$5v=zAu=?94*4pCT$P_r5_^52N1*g zBB^t4c7-^{%Ca3qK_<-^Kjt`WtMz-E$-ESgn4+_1Ovd}ye161qb(5ljPur?TW`4n zJQReROMz4N?plP40{213`6fT>SS;>f>MDsn4d-jL?;Xxlipz-doVh~_N`Bc2ux?X+ z?vGbYoql*WV8)j+MPjh%=HiH`Y_yQMc&vAo7 z;Jk+G2o{xUt6%)f3jZPzK| zU1SCwcc3+J-PzqI(2|^jv^Pp_;t4b^A*^$_51?`FCD-B*?v^8%fEzFq4K#+^Tl*Y< zha92^8ffuMQl01N$_Ud8SJzZr&hUiQxWdJ}?z%8$OJK~)|Kd7x2mbDFl?nluKy)z+ z*fRI960zp4uzN$?1xv@$g*$zXnU>! zmSvmwJ2j`eK7$R{PwSf-9Jr(d!8M_aoStQv>*{Dn8m1!KFQ6Ii#?@wfXA`X;aXVg7wrc%2%0I$|Tzc3Kl6lJWcGI*)!HT7k5@TlPKjX%4yX#<;EoE*Kk!$Jys*= zu?*rGE*%?7%al1qp@z@(VP0El_%o~v70H`am9UfhFV zczT%YG(PK9@+}sqbhO{XVoi{=fsMyx?9ioF0dW6_>YXrmJ4dUO8U2WJkPIDHXH7Sn z_TCH9!JOVob3<=1@B)=TpQWQqn6D0mCfsbSx>c$;pL5)m&~_8d*+OgHiJK53VQn(4 zmXRA9uY{Y5Y5Xkf?D+dg-DD7Hv5Sgnq_w&Ge35SnUWCy*+#`Sy2CO<(4h=b?3v`3E zMcH=2bG^lD%mFu7wA4-J_2lV;wDIf-+SU${deRmN>(|zg zhfe;tl(H|tM(jV`hjm#C8VdbaU~#b¦C4CBpUdDu&sw*g#Ms>2iPZ`T)#3ul#k z!5pc(ThdW+oHamTJO@X&(Dy~g?= zkjSIpzUIEGGAdh0mrE!f3tu7`OBtJYcXS z5iisQ&m5^L5rTK28y5W70S7ivTwpBW4527iZ3u+|aR}rq-A7n8{iWypBn1uT$X+pb ztpwb6ut?(3L-w7^9p<%NOP}1mgWxzt;Qc&&^C0eO%UJj(P*$A5ucK)H)&KbKzW1fi z12((u;FeIPbe`@&9+r;ngub+d#9>Zj zan{nVL$iNB2gA-c&cTB^7%v$!uQ4h=$35IEqb5M$2bhxwJj9PNJKi{e`0On?XPsI> zCm`eDV-i`$;yT6)=9~L7?9{L+`Q|HmOp+Gi5u@br0KPI7+$0g!q$d*_e#Io^I_eCrYdq-i! zXMOoMV9y)(hIkaXWOKrmv8wrW4+)RhN^xJ_tMRPx4s{CjaZXv*|( zxYvM74L^026WyE#_{@Y$4454Yr^Tj4z~dP?4i6iPT;AbeCXF0fShTjD;kKc+P52A(s61oTZRf$dQm6jJXxa++ zcTY^C-DD2iTUN~^@K0WiNlSy&^j8VY8==aZ1zE~ieLQ0pS2u8%p_&=`h)%Q6pyZON zRd^UcFj48y1G8uVs4z0HTADC4MBwACewdAZ(n_Jc!Y{lGgFDYR@nuHw(TaEP52am$)v9m%Ys4ejP0*xa=)4>k-3^SwlRclELD|+NGq`*N70cH>3 zV1;1OZQP&&P1rkD(=wdH;L|GZSc#xfwbrKk>tHP9-fQUFM#)|@&NH~Z98l^OL83`o zmNU-pegkZAiRd8~VPb4P;sN!&Uq4z5=G$}8T)3V;uAgqirWCX~=sC=Hoq*6jgcO5f z-OHCJ?CT4d;Trp~12->tiwoB6^caC^Pk0Jta9&yoE^Om+CajC_A<~i|b9b|s#cA_C zc4B=|Sx|)OmfaDPG?$vr1gnhp=2o< zAjjqe!p_$IQN(_|KNG}_m@f(4}g*OJgs(D5^NxQ(13di zSs=)5uh%K5{4%aZGwd7MAU@MQZ%$vWCu+>g7 zT`E`+FTw`WT4=fTnb~W=3e7kkA1&}&-mgMNow=Ly>2p-NnjV>iH+=^G0z5eT^s8@D zT%%6Kxf%r^7G9nK&rj3cyN}3ePxUdh@)m9x8^e_o;qwBE0mot}mbVe+N60s?k~mzC z8Pqn#1-fu)8m4S_AA9k@f(ukFlLyo-%Jl(xi7#5lszrPkF7quikPUSZ+{`*uiMhFZ z=MI7F2WbGIvH>j4abM~rm>gust!^q^7O|@Jti{~IeVYAl%1B-Ce&d_Jow`nX(npV; z602h_*dc^c2n={R04|-K^hI&5IsyDs6trj%jQ;fL3W7f{hUg9=6pmL>CUb_atsJBR z_k-l=?$TY92n%U_gS^Gi*VLzFZI+6a#JBxV|KorE-Vk-6MM+sAbA2M%q_3-~QkC(gHvYSbMUV($0;%PFjVuSRiy|a(I;4QqX&n-BAo}m-$ffd6xwc{ zGf0NXArr@b(Z6HzOPLK{ z!7T7TzUEU7iczcRy}+Ki`rbP&a^QFR0o#ymWfuT?qi?n2Kr7l(lP9AfQy8vN48T@= z-ZghT`9J<%y(G5bb`|&JdqZnVn=&Tirf_n7f}xafI~Sh!bDNP;nd7tBK&n3T)CgQ(%A|{Z(lOeZV`;2>OTniFUo1d*<)e z7}|69duI_$SG&y^pTOmDue%oDP%vQhqwk~yLD+kbZG|S6F*_H>;AaoYc*b`5F8FAl zjS2f;9*=*y%-s3QO?m!##0na=Cv3tp?-)QBE{6d?v5hf;yD?MeD-P33_Dkh6`!rxK z9QYMh{%U1ZVZg<>9#$Lin7P|@+{5R*KV-Ir8)LUGCgD8VLRWTI%HOqdD)28r%KGxG zZR)oi?J-%e$5L^@XY$i|@ADzrc6@D`qt((tNHg9vpykqY6%mm+2QJ#fUstL+DfyZ* z7-)J=VXC`u73hHZ93?Tk76swbN6Ngy#T4_Xq|Z;>N;gP+-a*;my`E1wobi!|QE6fF zH;vyhuC6kp2H6(TNH<}I-Fx~ySI4om5(iSkjY#P3@1LfX#}ui-XID5$$Dp*}f~h)j zT|#rJm3De+3Wlbh)(EJrQ$kt>vPI<|1q)Chgoa+m-H)r0+3R#)5q@exm4%DACY11h zuAm$%;~UOt#oTajf$7=CeW%4arE=%^5X(r%P#P;wAgG`@J;%l2a*2M(__FP}MTQ}Q z7PVd&tjDfN96fu6{m_+B6)TWW3Rh21bLR!>*;Mvdq z8t9+I6=;q*R}p~OJGe5e@7yC*<$VC5Acz(grngu|Xew3HP+x|FmYUNa^yyER)AAxp$kqTjB{N@Q{~9P; zW<7Mz<%~n{AX9g&=@Ix?(dCQtnT}$AxTnix3L7Ue`{6N}S=cz;WC*gT$PneB)%3dd zCo*fm_X5EcXCjzF-I?mR!fZ#-vdIUA@R#o=FxrL{($%i3=$Y)BNaOv}NDNfeGHE;l zp;>V(FZ^$R_fOLO`zx&XIs8qP_>ODo-FH8mrb)4?_2K@?`}l=Z4GiH&Yu2-s#k7uF zPy-9w39c$zl;6eMDUIN6HZfL8*T(14Ei}i6SR4$LH@N*2e()R*j1Kx7q9B4+w3U@L zyfpCI;5^$VcvjanEmBpy6LbTTA5vL*4uB@EEg+Ehh5OOQ@>;}P$o!`+_Akm-Ex|(s z%=TnYOT;7&*vdx21)NML(B zvy}ebAAOvjEUDt?PQUW4?~)PYR@BZm$kyD(-}==r1rzv(|L;?@WLxQt&n*DtzF=A_ zNFw{V!eOpTur$6J#F!ykj7c% zMtf^xBdsm3K>y|u1a8Ln(gp<+HOIQ=#X*O8ToZm5-}{`t&%Nw#3lykGFc7eW5Wx|M zk_2XweLrB}zt$%}BrFwZ0tcWq&esFNvDjYTdoV*+3^|VgU!S8detfxZ!ab~yvF-s5 z+PK1B+83{6&g0hjFCL}+t3G&6JQ6=5;GB+xr!Q}h@!5@_rag0_gCMyPZpb^%dwCbQ z&U$dK78PAJi_C2T-|}#2gwCz5FQei6ltXYUgsDE@WD*2BJ3QV`i%ZKSvLH<$0fNSo z8SEE|ATrQJ2~rr2*T=4Nc*Y#2iA&CG`+fr?r!7jyaabid0yAqdH28s^^wKr0xjhsn#5r=-jg%a z;j@1C;X}rUy9M*_K0yTk<|sZ2nLu`jWFFTEaxY&GPp?TBW;$Sws7Z}IW~)x25W|apW*Fw2bA&EIey_ z_c*sU!Zn2Q5?F7{tWTT=n$~_8c3nSCE_TVkPuX5FFqw9|g$6%(1D2^JFM`SdV=A6R76-jZb+gSysw>pBG7kl>=gnZKp zOU883(8{a3h+1`H`%LSqANdxRJ+?L03v%dB-bxiT@y#=G%@gQ-xK6SalBZZkTK;3< z+8k^N@ghUGi^2aG#JTd6%7aUIDWEv@+5m9?75v4!u=pJu%d72^*kvzOCvBq{_qs+y z^fxg|F!wlaP7{?h%(#28GPTYQ!oAD2m)@Ekt;K#kBu=t|x4;-_Nqd-79Y%j+_8wYCvN*`(~17H&go`g^U^${A-swB*GqSQrJ3@u{Cg;ND4(?|wvi-8HN|H5lO%Zel~=VKZ&)Ak5&c5LLWL@tfVejbEm=!w4yF z!NFDV6gfTUa&Z7b{s@K1B^rIKg?sRjm(bKPeEf}3ZQ$N>gqz&r^JT0I>|e?vw_Ul< z!0i+GyB^%}A~i59oV7#h#+b6ddeP!57;6@ZH05a!IX?o^i02zc*)YW+C@e1aNpUg?i5x1rw12V&9?oGAEv1i8-gi^Itbl-R+3&(X`7gfzUN73>N4V%bSvtlneHaY$|MS26 z7$ik2g?Z}R-(EclnEEf`C-h zZ4C{Pl89Wx(~~0*@Wb@eAKfF$eImX2)^&Wpb^stnWALXn1CIya76ehCSMZ(H9DB*8 z9pl+C%&ePRq`Cs}Y6OTW45&5I)kB6ytpGQ3I0GQ;ZMqeBa7qje(vxvFSgIGIrIz=E zxQ99cE1D41<_ucK4x(Ys*D@xkK{P-5YHyJrcYA9mG*ev!F!d5h)yL-d@IQvxRzT6T zXv!_e;Y*<-7!rtyu?1~L!3FoAvma=3&}O-C4?$lckU&&ytI3$nuoNi))ey0fN{q1t zT-7oe0M(Dik}jfO{ll7Qq0Dt_5f%?<3)H9`R{$zPXv<_?km2q}!>JZB4gJ39!sKc|G41cl!+2_{U2|&pt8(=r=M~*}cN6Fp=Eo0ie(T zme6PJ+iumi3OC!mkbwn{Ttm&juCAN0;6Q1^^>S^)Z#|x&ZSYYT=I{9%W8!un2>~QC za<-(pFEekMC9=;ve>;j+^4J_FctKCmw!(`HS6hP`f+3Ii^7+j4Lv(U#hI}w*bt1`0D2H*3f57CbJqhMnb?fdfx_!QrH zU;A2y1E0fh7uWz#ARA0%T%$dI#6KC!5QOcI_aOwv)!c7bl5u%ls<@Mm-Yd(ZiwCbu}OJe#<;dzf4IyOdWfLXme1T7(qCCh^QPWqZ!n_Pbgz} zU=HFee|ZHS$l%jwFy6w0_ibS~rwV40eBTa|7WN*@fnki-Ymz1L(KmUI<@I*nc9x&9 zU)td-1tnpd*MR*Nd(2k6u3#7-<9=Z5oTE?IkEi+UE7}5}h zgY#$nMN5wWTevixw`ZYik}bl$;9jL%L(yc3?PIjKSXXd68KFQ?1xx)Pfu7wxD3zKN zXhNfIkYtbu)?J*g&X8xGv+@M@+gHK;N@W=46>sZ(%Ip%S(4cVE4%r1QT-Dh_j9wAt z6M6NIDak7CEWk|v@-Kdo(R9N2=$=Dt96r;PVXgFNmiI|De)KS%l7+{3J?j>jNA5 zJlKf~h#B{A|AQfHU}Z5?=LToLF#vVWR`-;_ru{JEBcw;KjKN${Ua5f~W{|VF_dRDU zYPU~CPs!@U^c*#qWR^T1t06SmZjc z$XJ^;P|EJE8(&~N5obTz=>iP>In3bHw86Ey5a}|*x>IwYjK6ztk+@CbKZX&YYjs?M znj2xsG;YW^$vp&zjkQgf|2n)-U%F1}^*)pm`pi$mI2*Gfo!lg&(9^Z2;2DfC0$RPH z|2pHR4Dho@zy>#^nd?(&Vc|wF+vm)Clo|+=vz9eRXu#?+%36+(R#}HZ?kT0UrN`{E zWrX9s(7fx0b4<|vG|cJfB>WeG#y%F&Gnnvg_K(*6B6P2WVDZw_tE^)=tsx{_f>Xv^ zsGM;QW4#OM?Uz5t8nJg^?7N6P5g!_)Ez%1@Md^cw&k%M-+1sQQM)*}@-^8z8UP=vr z4S1+LFkP)@cNcWn;)0&}M+hn{g1CL}{;%UAB=0$iWwDHc=8XN*MEUyY@qGkn!^c zns2Rv!`*XSx~R8+lBk~q2Vohz96-^2giu5PG&pCWzyiGB(BRA9FD^t_a1Qr+h(kDx zOiq*6iGl4P>($|AT7G^+5c?CXtys&jrWXFufBi4tTf*;eolHK)0LT>n?*IN{f>QR; zd`zXUe&t&F;xA0chGPCO-ZXNrjI_|0YT2v7r*v3B$G|D3(P{m zNmGf$YB0y=saS60mZ#1+g}h4}m94T&=yuG3sc)PgzuGCmWDCAjugpr@1b&;qaZ`OZ;@rs6%j% zIUxM(!D@|med_z!Bo)#)=_9v~Z|@!w`zG4GBXSOlTm}a9Q6C=Kf=N{0dQC+Te>S^J zgp6tsaCg70I~S;~h^_`&D@Fv$ac^kQAOa$%L3AFSS`dPWFgJw>?^Ot>X|iRGx-*6U zAqc9#q*Y^pjpe-Cn$j8qiSZMA`=-ny*X}XWI-eCv^kX-dt@Q>RuR+hDfoE=JpYSku zx;SdBF=7Iivw%dSB`3E2L zzxK0y2aCgV3T3nhvlaXT_C%nunE^!#jWCSyG#VEC5<4|SzZQHex zaSn!<_wpoR*Yh zEuPJqSiXrd*ms*Lqd)Esewe>9P`(_mjB7N*xLgarbFLmH#xMBSzQ9E8b8Ng181a|) z0czT^O~;vAX59S9__vws=hm_|ZS8(y2Ef95@*K3x<#9UwnD^C~Y(NM_ zJG7;S&Hb!EXxd1vd+zftH9+v4!9df70-yz+EF>iDIUrzieRU_`VTaV3$47?%_S zkC`$i*4)?^<7vXu24JKl8Q;Ct^=I7tZLf9UFcTAIw#C1^K7oH%IK!9%jy5g)u=76QbcS+MG?$?KL;vI(K&mbWE_yz==W86`YCkVwcuaXHjR-mg zgFJwSO|7ABFJ`hkNK>Do05jWQ zjRs)0UwY|UTBpF%AwK=qM3@@C4Gc+Wfk3yueV_TT9=hIf4k&E6@nz0Rz|ilyD3sPq z-t{A3eF-j|!oOI}Rae*$!s$!qe{m7FG2%-+c62?)V+$Sy1sIyrO7Bp5ZEiYMNn5Ev zRX;VArm-NnbZu%S&|MenIXH8G1xMxD0g6Ml!k!cQ@9Tb6LSbk;W1K6%%j&|Fq0w+F zIVRD`32@MFeS;bkrtdaJPocjXCbo=HZxbK$J!pzr@l)JQbpJW0k3)j1rLo=JTFcL& z0ce~j``8x-NULZyM?bb6a_9G^`P4UZi`1Pk%SAN!Wd2!NS-~CK`Xyu(LJ&THuX*}0 z!tz~&$Mm(mLIz1PRjqG8A7Z(={gY(*-N)xUr1$A zt~RiaoKPI>6hWnjy?F_<-$_=X8`ED-pTG7+_~y>^Y@IAq(Cw}ZVnWz&)3aE|hzn?* z?E_mZ#PFE~;&N7T3)1>g!`j#@AILcS6gZjW=T>f8`gb2Y~PV#yUQFNP!bbm>xjuNJGbHr>{}XY>f11 zMse$^V##U)RIL&x6J#2t`GslBS)@WEP1!hU)E**jt{IHe*dpgF#Wf(zGFfs-ZvN4K z{f{{rvoF9Q%&v!sjJ3s7>^q|Hb(`UPO)pR`FghO|+yLV*(L=+4-O{vf9?BLDt24GU zf{fLE>X+)~wy?wG+!R4c9>%nXm1B=h<$*8bFif!qi_9p{DB$Qj>{vCGg=GLZvb_T_ zWjflLRlbP(WEy0?6&S*;iLnOJmT=2lX))lcmIti_raKME7x+OH5qdHKfA8{N=>J1A z0K$`2YNZ`l=h}7hwdwFYJX{TXpq!I)#S&B@3)IbW$pjD`E{(=oQWDi_+%0S|8Wz4!Vn{4)2z&#n77-B`t#A2?Pku*`FMA3KSt`KSFgNYQX%0VUDJ0warHUIC;WK1 zVZ0~q@Gp4c_4xAYLK_F-0U#H!=YyAnTV~AOV=E3n;}q+`4Yt*WkAGFIJNVp&{rC)j z<7roguec?+GjF}R7WfsT^hWW{YyJw8{664+1$U?C+F5wS_AlA+`Ycv3#g$P@Sw+eX zyz+Dzs=k}XXQ}3LZI+FUNsaFvt9~rP$G+l4M>@M3T;OSLVsOXj*i5~leLm*O%@PiY zqdAGg6sv$v5D0yyzsi$#++4 z7W)mlWjnr?HpF4U9+beUVbWiqgUCq07-mjY{>~#&jVfpvC*A-F=}Y9<>m)(ErAiN} z)YIt1nZIY1zThl*sjX03Rp8jZ;F$wBebu@})cWeK&7#Fiy{-y2oE*2Zv~` z@9mlhJ*R!?)XYo*ezjmYl;GsM722YCPV%?jel<-Z*)39?Y>T|4nu6ZneAJyrCjK+20CHnnEppjZ$E76vfOw6-{7$Ttaxc!^s8d9AW#30<2**Sg-+?p7oIp!&U3&v|1+^`i*<@R)Cf}dh3jYZcRvLKUYVN%2C7d9LL5f@ zO-dDWOx9L~(OG)@e1*KG`dJ{U!~dLeyAcL`q> zRQD~^$1(tFXo}pY1X&`8K(n!Zb`-!Be3Xt*@fs+g*=BZn5`om<52PxjmnJ2F%S3{9 z5MA8R$(+42gtPv{EZR}Dcyh5yHiuNy(l^M!q<+j2tSs7vQkofiBYi>N8_xQBxNKNG z&47WkQ}#!zH$C`piHW6jZ7Tb?SrBT6N?#KM&YWUSF$iLa$YdoTGmhvmUnIJioeka* zghH^TevH$ow+HY)>Y?PbQsl}r(lsp;u!8Se3AJ@UOq|upwvGuvAdsO(F7{p}d(`ng z;F{<@J2M9kPk|TYKt=4*>0ZC5diWQy_Zf4G`SzmX7G=jPV=#FsJ$>>7RXM>dydRTurG@z$xWALj-?(7aNo^sni3 zq@M;H1ie8Lvl^#Cl?KI>4b;MHxJyZD(^E><%uR1L4mCvP8#VZ3mWsGafy>X=p$)t2 zJtU)X&J5=2etPM(+i>f}bno87)KNn-gh{yEMMX0;=g^1g${Unvwu;)FblBPicUe>V ztqfG@mz=>c#D~qDG){WWF0%2MQ}z-m#mp0zElhbt7=vBm=j51mQ)zCIo{EU$Qpb2Y z+sCX!5Ec6Lbp7SARJq39U~LNj@Spw5_YOd+fB(CGo__EjeuU_Mieu|+`pVbd#%8jb zUVjPXWMC54Cxl)Jgsc)${R~G<9kGV6L5<<`wT~M8XFtB1<`-^oU>!#& zviY=XI%u+)sv}0<;M1b5P;4aH!-L0?nmTb=V(kyRQ(8P~V{f z%mv&x8(a0dn=O*Iasa4$SFH_R@SKF7)Dej8Y+n!>@tRGi&x}MdZ2k}$)j}Ppc02$s z`90wfxa3~NDogwswPzW_2{x*!17r$}u4UjHMlMiinwi)9p&BRWi3$}R*Clu!o)TOg zT_f@k1QByH^TMP(Kp_weigAMF!B|3Un4bj9=S{t#?YhlSn@B#GRX4WR`4Ef)?dq`X zdk^z&a(C&ZsAS_|Y5+zz?S_8>&&ZHCMgtnO#h;k0nD)HaG)clSm9Po^X?}WMQ z1vL2V1URhu*z>;(Y&Ki_Az8_?M4 z)pxcdVBzX-mtPhZY0kH`0}jpNF&&TZv2c7pcoNMw+>G5Ys& zT(db~sNBZjnJKU6gSGO<_2QYR&?X${H~T%sWAWK`+H=Z(RqHbcno_+RP;hL~R~{oz zhq}V9~j`p@f4f3z3y;cBc2*S#mN0qyw1 z=XRk636v0Up8Vr#UcDEv;>-MO?$3P?zCujMXQKJ5zG5ACF9_|!u}0J`%yC|?D-%zQ zfg6Q6+5lDJZ9z25L*Pr`o{ft;4vbffgRfNmc%RUX`CQ!(5r})W^S00cFhYnU#FNMM zx3K7j>U`Bk%MYKUA}XYr8}l4?sPw?UDDFT%;);Y{SlGS0D3}l!56uXwf=zVqdiatS zg{bO1I2+XH-Wvk`Xr6CBn%aLg?cP|!_YYhi80o?;JY25>^YfX(ePO5ah{^l+E?IBPB ztsl8XRlOE4(Hz4VH7SW|1Zxq8ar1OrN>w^35(m#^!+@w2kd#&JCq|bA?SV2WU&whgmWAt%8Lz6~FrkG-4 zBf-Ph79mb^SwGR>!xhSL;{*L{=@EE|K!MOv+a;wI1vE6t9AQ>6h0!|xLRSClg6lqV zkK%DNJVLe<%qb^4_tm$5DOK^eJVG!j5F=VbAl;%+!5RBAP6c=d`61Ib_K|K;&1;S5 z{wBQP1@~OSU%G$jDrkPVi6|sttsI-1#MJ@3Gd&pVuRtgLZut=|BoM}kKg}@&32kz`OK~Kg)h9Go;-e<7LcHB+?ZzKsQ8yR)5jk_Vw2bDyogq% zMx=@L!5a{zStja1)SVvP*`OBpb!_G>9RFX#1Tf7r4U+p_VqPevHxMZ1aHzY6rek)z zH{GQ2*fbKZ_V`osExq#0G6_)*m3u+uV8+eu$eLn)I6^yDW2^`YFyiJ8^g!`owWjtZ zh-@QSWoxNz zG&sP+H4a7)$meC*d^rw@=U*IK8!iHl!ZXlhtonKq13P~MCL?>t0Ei)xD zTlUR*tH`>a56LHcg~6aVjO&Mp}5!2&Xu>(t^@E#*eam52P5O=NB#j99_H63 zeBSTcmJ#3EyUeBQ*nVjlvJbC{Yu>LNH;>77gq(QjEnMfc3Za3y`qIaaC*!&Ny+Te* zf{Q+q=N1#adM5Yf*xak`zI=}dw02h!3l@ew%J;dEfL$=H4uogIrw)NW$}94SQ_><3 z!aMMExZ-eL`+Th}Kqwq&4&*i`5R9^OvoG;fKO6Hic9{RoHo0cs;g_%GN7@8cDHX`jV+5E%3!Na!GbbunOOl5MfcpNQON zFS@s-WBUE+L&T{VpLG;9QvBTh~VrFgJe#51C_( z)BTs%#Wl)n#=7{d%>^|QW;S3u{Phxl;(7nZ4|@?LS7R=0v!LN~&=kIF3ubP0@!ggK z%Dj()2X-AV_B(So0@_1A6dG4nk|vrkb8lL9vb4)ccwcA!tKGQt4N@^`-R26+@KX$xkRu5rLUrgSic45)x4 zbE(2XYeVkmnf9bg0t=(Am@}ytsk{8(OW6A7G~>Vd^b#&(hJjDJ2#;? zeortY2ivsq|B>~kzp-ZNVc#KHRor*7xZjO;>)X@aGu^ZID4H2f zfaqgB8y+}7g8UarfcQgz7)Go(PAqS-W;9FpjHZ|A?z{5x-phTfB5P-{^7}o9Yz^$< zJUqY%Q5%K}nkM;@v$TK9kCrF?)MOG2yOr6G?Y@*5irRDup znkvC}Nar!Wq%XbxyXl>K?-1lcPF4ia0x&f+B|ik-LU1*`l{BJBmAPY-rVXtSX!H?9 z(1;hPBDD&hf>yx%mdordJcp664YHvaby`CBzzHO@;>YCiG0(S3fk-P>MPP*wljao1 zsal1Q=p0q}efH#e+T44=XOs$P)O@j@(RV*j;KTj33egyb7K>%&=(m7)`GmJc$ z1uYs_*G3TPI_zl^%h3z=`Vc?y<+)|7mf)rV^WG9YT|psn_sf6u{h$5dryLck4-mQg zwQqhY9Wclvr0WNF&9jD99R&OEDJ2=nN$Kg5nQCwfPAWtm;XG+N10Aib0p1~n!RqxJ zY5o3k`tI-E3xK_`*q`3IJ;NabVPYmU|FiBNGUWph5dDmf>jFGT2q|rcsEQwbaGah# z-b|a%9;f}y$LR^eK$XPAN|`MHRaM;x8WbHkb^4Q8RO71I%4-r-7euqVj`zW^@x87x zGLWYT3oW!4I$wHC=R6lhB6Y#Ln=7KEBj)HhVGke}em2dA<>X{Y%>PvnNn3zC%1L3J*G=f*gzFHcz(`b1a9m~j?%hPCJ-FE!jK6MFF$Q>fO*GRQN&ZEA0GX63;GHz<;htYb9XFR{3 zudOh}dYR740w+3Bo2kIDx^^m0bn4Wy;ZGO*pkU4-M0*A=^&7#;JY}ZU01Y7kc=-3W zw_$=A5BGe&weF!GxHqqlOu4(m4`T^Ihj-7BL{?$lwY{>I0c3H%`|ccK{e7cNw=c%w zoLqO-Kc97A+VRncaN&2n1^mR`T(%hs3BLe0%IMMp_*!9Z3k63;|M+gGdewGWv>!MT|rlz ztSgR|0qBD0leH+7$Phv55t*+l5s@b_HFcPZ3Pkl3M(P;nTGQe<-U1bkblEY8Yl4!z zGt;w?VWJ=Az5xU9(}z#e3oOi*b(bOB#+7KF;sG566L5!9?<2Qo#)n+P4de#pVogWG zB*ThrB%`T9UTw5sR$;WMBBZNL9U8EYwY!G9PwkjFp^esGwQq>%!U}LZUiR=hoMZvonn3-%_8evf!RF+{^sTe(;99Eo`rq#6CUH( z?%!a_JYR5dSfrib>_pncPnVZOFHi0hL1G)-2H<2hqC{Zgw6bK1Zib<2TD|CqC6>_I=c_<*DsUVZS_YGnR zK*Q=;QbD=9r8NL+5U_C-8Lz99^u9@ZrcyF$7+xLcX!r-T!D{m_nj`58x7HaWYg0Hw z08pt1qeKqY7VA8SD5VJ*-iQC@sKedGQcq{69$LU9O--5i47Z4$y)7`tajcfBqQ zZ50=pjZbg|BQRi`;E!?UDT*@%;Ds(ioGq(BWL`l5a_#;a z!3NW4oQX&#kEL!fuItI^5&PSh?x57&KHgvr*l!rm?N_hx)7Aonzcgp74_r2jW?GlA z?N@s++$dE!-zMF^z?wCR<;}e_0H+UOlWH043pkS;KhhU;?c~xt!)1j$^#rIIcwvBy zIFdt5Iw4$*HeWD*%eQB|Yw$!7>(himo%ojz65t{ZmP8)m+Jp)LoT(+z%CYP(n)DJ? z@WjnVZl97C$f)NkRpFYPo%!h{#%$IZc^_e0w`qkGqIVIDbp`G+Cj|rrI0G^+Od&+1e7Mf|fSIEl7&gqBL{^t`TtjOk>4@ts;}ECmQ3eax>9C=*Ei zRq~d>mvK`N(2vhNhH4{x^^kOnm}i+$8ex6S73#W)h%{|WZ6x}v7~^)C@YUC9!4%{J zL{5GgMCEc!q48tg)yVS=#522Y$V|HSUbg+v^fHG?D+5805izxgPdRoG+W-+mGC%I2 zLP#)(GS`>fWOk)tuKs`kWj74|5b@iG<1z{GImAK6!8z!Dl^GQKlhJT)4l4H>8g={T zTBMF)JU-};V~bZjl<&{immoCdP40OeM7LaGyv!A5 zT*B?ybiMMqf*1fJ%VQRF?+Z=huqw#B#n`1aI74rjiXVe+UYmPw{{~ z4Fa>K6Ef0I-(DBU`72~QFZcLrPM)PJAuHC2@n=TZr30pzw0Ko!MLbd9{F)aya0QI3 z1Cv<=#$L_gBQp~W8P zqte#!EE+R&Yo4&5?GOS7`pnIYlBf?FLZQ}ZCEpzdRIn)r!d#pxRAbj^Y5iU`0DQgH3&>YWm4!&5EHRS&V1kmKj|JcSK59;yJuv)sKEyaXKM6M z(I(eVbAs|NdwP7hlU8q%&k;8oU0tST&Pjj{EyE4Lv@{(w@7p|caHx9@Oc3K~u-22f za!et3jpEX@dqj03;JlynGBh>~^F_P?E+@<67d@?!1ief2C_oj~5cV-r>Twp%*`Fe` zRL)8Tm03P&l*6r>PSC{tM9TJH&c`Y7-N5S7F9#+R33C8T8#tG_N5Tjgn;M_vY>;gP zJU&BOuLcpD7p$Yog(l3u7C>G6j9C+oeF={p_MwC&U~PUSJ$tp4ws*0r)9>|b%Or=t zNSg>*Eoj^zE*qw~sS`6&CC}^mX~sV%^i#rmQp5^mUmb8~lb}cQ-(J754y>D>bu}Hf zHsCo}r*0`-Us%GD15Y+q30@;gL=){fN17tSQdnzv2%^nPGu2DtBMMk_#XnAn4_=ONN$t{QYOKP6^-gcH&&T(t<(Xdw!AD z?x^@gse?COtID3iPpjZ)!Q-eXI6a`~#vE>8D1A=hZN(>}2%h&z^j~jd34nK+BpuoD z@j>9YuxbJ?b>O$a`A|93M{dpo@|G^F%;3k5fZmUmor+vq1?2(v35Xp;60%T^tCu?eqwo1PGE1 zryqAltQzpiGq|NU5xlA!W9fVs%M3j9+-i|5z$$j72^3@?l~4!XR(Go-EKY5BRXrmX z5z;Dqr|Gj#9;VmszniW@Q-1pVli*bg9gxud`>&-hymdDwSHQjH#;uuj6NmgZ5LVsU zz=T%`sZ8XI6CI{fn05k?Zr-|1z|$8AQkVv?^-}`zGY}DPm4Jm^Dt7Dv7B`9kMkJ`g7Ie;NRkQ{q49#i zY89{oghZ_g!MJimsv%_xw9YLEC0xXXh>MP8ICX3Coj({4Ysnl%{30^PEW%X7KZ4sq zAPUxb6z!%dL?lLi%&k|{Enzao<3U$@Wuqc~xGVq$!4Hk4sU2h^e=jY%Tar(aAX_VV!$J!7Kob2qsbhKB7XM3_46fzk`wc@FN`S|N4{btF;~kw z_INI9CAsdRL?1Fy^vg%*?j#IUl!;Qie>FeYkI+IFKp4i2$gsIDu5&;8C``s$BPOKTqau)sUdg^i(?mFD)XJ5hC(Z591_8lxUkMF!CEMGAX;lnjywY@I$ zAHZeXGGe}qpn2zh;mKs|uAwP&d@h7uVc$90j`LwJn-T`DF;f03{lotmJe++2TqD{R z_W&0(o?QxrN%|6-#=e{Hj%!1l1<#M#H(|i~?pn(9Avb`oAGZ+OI^Kf`0#8hkBk~#* z561|C)3=r01FqaL(_Iat`7zg=ZYgezFw#QprG_X9b~q>I>-rWU66S979t9F%Pq>Zp z;T)C{iVoK@+z>%*uDhBnVLu>*HXMLYxn^1cOjS0F0^@+df1zLwZII_BESAh89`V1+ zSHQciF@M>9*W0!olWoQWvG2CVg7XT7pSii0?r)q2*O~jG6-{1z7S8przv|67J6P|! z5=ML`U^yOPy8s<0r1NJruG_vvi+n1>7be1+q1kXnppxuv8U3LE9}0p zzc4P}eft_o%zJ5*V8j~vFWYF_YY^Lh1Q_@x^7SD^7Ha8~z6zAwJw#FU;^9lu6%jBv zkG6LbcasCsmvD{^WmuRiqs7#MVS@dXw)eCoKxa4)Y-cKv2IUZK;vfm|6$O$x*>#i6$^(6`v(Vxu~fh?-~wTO;}VSM0J#48*gNUnyWb>z&KvCNzWOv=8A{7lAXi#xR&&1f4N7i56YxP^NW*facm=DjHp% zVEiK_rpJ|vYEJ_&>mUE(EWLz5F5@>o$a;nQz84KLiRhb|KPH-zSVfYQ8e zn7oGhAEC**hIl?4i}}Qli56~>is{qGrkp~7#aS0F_bX`OD|;|u2%MBuzOWJ-=NX`a zsm28ffAK2r7#-HLJ1fjnAOPlW9{AY6y7Qd8j*lOImTp|Xna`LmV^}TvVcO5MW+12) zy3lt71hvejjT>Z5r%0lIoj~352B{B+fg9p3M1JB_OB?I&e|&J3wt?3&TK~1z=F$N3 zRq*@D7r)AV@K*$IUjXkrTf_#;66b-CGDY=K1>&9kU6TD11dJ<}!oI=pHLB971Srto z0D^$Q!TZEUEH6#-+*F#tlGF~o!6|&LS3odH}VVt+#GqlMeIg5GeNB3Et$ zm-&Jf@Dzv{Z}3sL!x+=FzrTeb%J~~72pc|gm>36R8V>Q?C@!M_r^;_X!T9GV#q{#S z0 zX@%hBf$#rM|HJpc_jkUY9z2*!>o=+5u`AK9$$)r3&nSO5 zF}0Mw{hQxNckU7tGJ|wUvg1Jri2}o)y?2=Y{SWuZ?MB800$tvIeV#)Mme4mxwDs`(v$T=OU9~ zkXpDep?xVLi5kE>3ep+HpUy)O%sA~Ik`>|*a~Y2S_-JHsS8~&2%=I(Y$FPWyV3(S= zwNNNHscGnPzygl=&T%m)|3IJsNF=@zMKADh5MUM|2&(s8y*LyCV^{?MAm=Nx%)YE> zXn+XI;;Eff6C6a6&r13tvLW`uF)3Ml$inr2K5f&s)Wk&48hu8u?1czYjiO@~&4-%- z&ngtk>|{3=_CvuB#N}Nf2+%{!l6l)&SPOg}W{dx`Ai#X=$N9Psj=?yAute}hXr^t2 zwG}92BwVHV4Ti;WD_GELte1#L_J=!sjQ6kx#7Ml&v7w3Q-Iu+`%$O%bVh?3vxFV0l zS_YHCSAMh!uPcc7xJ;>g9DOrB8K}5l!eGztV~jwC<8pkQA6XHd|!uFvPjylB&T@)j?)CiX!~EVhW4S1@P!>-<0SMNF_6TVw*4Ymj@$*Wylhj;}A* z&jpFz=`UQj_!4DB^dAerJYyYgUy8xY^>9pi?cFoSl-JvL$L}v!;<|&fzs!8XNTap! z<$05!D_jNK@QiSo*V8fNHf&#ETG%cE(=xIn-~{KZKXw*&b?L)Oi6*lG;)vO8zPT=A46K2B(M8E&u$lt-Fam>$iLm4_ z5(Q4jXIqX(Wrp)qBbwcG^13_kSVsoIs<1vV0>HL;N>3qlW~>Rt1<%W1&{*t!X1e2v z7{BMzR|u5snSzI76u$hW7ysjLo`DGFi?e~vHJ3icqYUVVS!+Cg<#KSz@p!C zuhz;e6nbvBgZr~%JhHKM?26Xz!)I&gByAQ=t z2bTtjoC+p|x(1A_aRdwa;w~&MBA7wLaN+6oQE(R5h3zeTXwjUP@r}>I7WfnYY;nvq ze*R+!^~2DWGu%jy5dgZKEZi3uzvoZo6gviu8il^}rDFmO4=q^>p4ZsrzW1SQT3Wdl z)s&8Lo!NM@4UHgx7cw=!gewJAsm{<~p2L)Nk$zF&rP2gKBwF6F;xrQmJ_gNdgKNny zj67}6Em24Trf0_BbeQNm@N1%VGti8e4I1RT;yRWiaukl0qp-nyPd}n)!Tan>6?lU- z4Z~Qo&N4N+^w!a$M+!RJTV2~8Zcbwe57^7n*#T#j@k;v&YC7Qs#-aK8v(pvsxdZ9t zhV71|_4P7>0nEkXy>zd93yo@rn7#@@js59uGNi08Q4tIcvC4)X&zIPbhtGaSA*xrd zXfSl0D$)q!DiFBsezeJ*if%m(P*Kdhm6nb^AE+TPa~=^Sh6&0Yf;R8(ns1!*i9fbl z?8T+)>7BRUiK=bOWI8fP_^gg)2X~hSdB=;RFrYBUjbmJLhVWisueu282i3#yzpo+u z%`M(Y|M~C#A^gc~6rwoC<)UjoR`yGwRo=ohh?d8lCAX!O->jPEuP!OBPlNp(BCApf zMsgV6^z~b}BR{F$1Y3uDjLDRLxD8WuK^nJ7HP$LA8|4?y@PMe}Ql{H+6QzX$NLV-7 zBkwf>&>@~0qtgny!&q&az&yD>5saI}Zx%_`-=*yF)a*JLYQC5rys^%jJOJhgLt)Y6 z+)N_0=pHFfI@u>~0Vdo$r z5QGPSHRfV)t@!nrLJ8ty@neG^Y}X`QLc#CS20bax$Tq|ff{>l#JyRwz&N9EolB%H7 zt;s&L02=c;zief%K8k&8rBgEYSoCB9|No^`+v_>T7RDMuk!Q zpe9(LQv!1isirYooQ+gsx)DtvQTq}>8z)dl^Z%tcmeTrlkQ|3jKw6kW)5g2~u(v_H z_ep*{$wBV|WM}kgR*WWP{6=VR5pznfc|4GXixv?55lk@G>*2fFO{_gn-*<_ZahSBv-~l0Hj760)s&^rsQjY zyvI@7jI(hvqaG54f^3DjNJrhnAacvA3jsXg-a{I(?Hz)1i16-1do+$PU@9jQ#><$h zE%b2dOWdLwNsD$f(XegG(1QpH6t8??EB0Z6mqbF=nr?n;npppNxX&x*YReip8h!O z5^39%srMzj0+=u+4mD#@!>R?ygA|Mj_guNp$acPl^*krmz1mc+3GzU!c z-sejuSuL6`2{%&+WT>Xv`#qelhY7)BZFoW}PXTxcCX?pkzG#;JVomZoiljtf&QX8n z%v8#l$*5#4vFpKN$0!|xYXC6CIkNrOLl74W3gX9{P5p711uH_i4(@%IJu|pV2Y|={ z3w(Gsv^t5T)L_vN>&ks0SR1=Szl_5lS6aqi-%JCodHQl4S&VpjkL$roUbf@df`Rfg z`{*mi%UETv99M=vzTh5z+^e|8vo_~{aXoI~FYa?J-e*5DY6=40e)V$BGJp2v+2Qt> zr_7^FGm8?pUiG6nRx3e_fqfIS`dky*_jwU?L03BkD-d7A7{pv@CdL3<=<>j}_{!%D z<{XW^tebw{rXvgs9r$7dU(VSDDix7-4IUkaLZ+1m=?n4_d#B5faACF<8CTaYDivvR zw^ha{ZqWkfeUA0Qs5qFq2yA9NB6Qc-Gr=v4L~SpnXn z#v}oxd9=!+OjbP`&hwG?1*vTp4!}c(8uX z&vN;70#)B)udynEPezD!F`ebJy+@phYC5J0*c>q|>I*!zgD~Jn`^V6IX#aGBwie(6 zhTt{GAw_nagCjCQ5=g5{k11a*%R5W9M^V)wZci=X;^EN-pov0+Y(-idpFBOn3err! z^Tw~IrTSbtXi{7NhO|aT$}s~oIPXwB1g}GqFI;*iFrEf6K3Y|1W?j>JLoGbDW(Y zP{k3e6xeIs1}S(?K!b)6lvQfu43BjhHz$+@uReR8UQr%=ghB}C@QD*x zd5zobkUsb95ST`Q{`t>0fg2R%^P^alQriFEoXT;;kP**t#v15-GJ^0sMgjp-l(vX@ z7{Kz^Ax5QLuB3%8;~w-T-U9)MSlxt3$K^LMyve`Oe@M3A#w)9Ete^Q(A|b$(55Wn$xv_2z?No8aK^7GPMM; z{>?Yf|6NMi7XTy#l3+qc{R@o-M-AccJ502EKZwy_zGEb6hQsHqi84Em(bJJ{8Y1N4 zh4p}&+$b6B25KA$ae=n=@v@X}#d3Er&kc~{7{O-Nhc6PSBpn;VXUsk%%%%>N;7h=~ z*U0TD{2o&}z@&sUiB#MmA9Ia0HR#C(Ea@wh-aKg zNCH^}V-ruvpu&{+H)D6Lf|21_+tGRy!U205?kC*mx-fW{Unid}VLSxWf{u~?uNW$) zZs6ho1k^og!@%~1up$fuf2VB0I9)ILwOPNAv9di7ujj!M&=LvT^X0gNhtIS4@O;2Q zJR5fa(`s&LoQFa?A5S`V`sS6#cnLp_IW97Q7(?6=>*w>{>nmn``8m6lc*fi>_t$pg zj<{bU6x-l`JmdUbefQ2M6x#lJueiX=dHJhGJ?uw$lOxrj&&BtA7x2LSA%Fk{_8%|z zo*z4QO^pd@lTO7lwh=(ey$Z=@FBwBSKea%qS~7zSVxBGFGu=Vs#;!mNz+1Qm6hYp$ zF*UUiZb15yI!EWM^1<=s3}#pwJZHeO>564-0-hZgL`yfGQe?-toY3WP4II|x!1awd z3dZk&5=NYdcgL-<{_GJKJma1%M9f&fVcJGw0){0DFc3cD{Q^^7ia6_DbG_pgjb@n1 zmWRuJ{F3>`ulG1Ee*)g~UJLtxWC(uV9%q*R?a$WflE)Zhz(xLE1xb%{t-F};>{z2m z#v|m3=U8aQkEWO(l~h--Ag*!Vo>5`CjoDrNZ8`M;Qd3&mbsp{B;1sSgWD?oy9g-ID zm!#a|KCDnE8I(l(?PQf}gY_|Zcw_2Z$TG{xoxYuXusK7x-dkJ^V2WIl1N5v6`| zj6RbDy z_P+A~OUf`MX30Ww3KOZH{PUw{{7sfR+#yC#sP+0z(jIjGWDn*L+G0R*KmP3{@~2OX zFTtd#fFk;%u#M}BSO8%OzCg1?(LCw6b$AZPvCJ=(=OP|s$gD0zB7W-&-wk)JCtFXr zUP+VSq+dS#MS8gTjD5>No+DIF8aERVaM+_}!mE!TqxeJ9j=&g77RD_NI6)!!r~l&r zN-MfGVHvDj-3k`Ta>x4Ps}kb4gD&)&uai^-<*AB71jfL(#CUX6Y%t!tH>ZPPucM7! zh6&%S9pYB9ocbqmF#}d`N~X^O*{Z1NL*E9q_fRN$UKI+%CmzM(c|!%CSztI595m@} z3!yIDFd>Aa2tehz`viwhAOz}ygM7@r=S?c1vgW!-H&C=nr>dmV^}G&`!}NoP2axEt>(ZYg~7`da4RpRW7^t?w z37XcjSOUit4^aviE1U!3RKPJk&RG%HJFR9TxXBz-$yOdtzCa;Mx44_Hm5FIUF^rZS zr*O_3v789;1iOH%@i;;F*2^Jag#S&odP-pb1@2(e+z^u9w{=iJy>qj^taJ9TPot}RDBE5P@`Q`PA^z@gHaLq$OzrPcB z{+x4mw0Xdux6{~-nM5^>G_=xAOE=68MQT`t+=rj-rox~9n@@?LenliZ*DJosJ*06DoP^zzRNznzatMR5007jYy2%oo z9-6iA3Fkm{fP5JvwYLfg9_$`IgH@zA1pw0^PUB;y7SFf?pVOr%By~1HA5j@o50~^% zBpi$ufFa_Rc1fh+N ztcLHf#9cR|9+53{0Wd)5KeK47#okW)RS~E%Fqj1yXydB_Uj5=tuXQ zBCZFS@MjI82PQMnjE8m2#7--TghqD=+@50n)b7I+#r&Be-^pA1J~28DLY|BE%|4Q~ z@DNB4omywJDrsVK-5LP3{h78)>xu+g0_nZy3RBF(bqV&&>=pdT49VQ-i>g*%cPIq_ z$C4Qd*Av0vSK1PR2n%W(J(y0;zI1O8ff@hM$7e-Qx)dvN3w`;4UwzVz0Vx1%`dW+Z zifBxn5l;tm7+PpQ`8?OIT$%h7xp&=X+VT5|#_@azE+CLtmtY39N@OO_eR3&%xu%XM#>Vx?gHAhI z7BtVg7E1qpGFGgcYjLHuwmT*e%Vcw|J6(Ax+Kc<+n(In?eBpgzC5VS*ws|1d%=4sB zEn%f!E_$y%k74EDS%W(5KgQ2@3b;HJ`_9MQw*Bz8PYX9{yn!Z&El82U*|ueT{We^6 zSW7^UW*8%F3lWaWHf%fA6Bv+jlmT@;J#zu4j7PsxVe0~IRuPSm!kY}yDSns(obLjR z-!vs`CBO>!5_H>W0pG%sF*^m~al(a%GffY1hNF-b;~7+2hgDJQo-k{AV>M9$XTl6{ zC80nK!1&!CU>W?8&(h`6x*yyT%M4!9^z1!>L)T|A7WM5;P0aHFn+kA{y ztUv3V7fB(2`)r(FnQs9g3&Hfs`r1aohHx&dv3|V8x$s?!Ujc&V%Ex*$BAVdkOkiDq-V3PWE^@4WFf%=v}%vO(%YGdY_qrIpkQwZ^u?V*+gt;t zVVhE{DscJ<bxu%(!K{<@!{~ICiQ0OR7s{$0fvD;m2RHPuz+)( ztOJm%U6QLG*VWD$DL8TI=yYJvTEx!aHZ+Gt{?_7py17tJ?|=AydU3XshUal-K+rQ~ zt3OSGraQP_oRFEO26J_RP(sQHis+GH2-lJQZJ0JJ{by)TwR#K<<2^u1PZB;xipws{ z`~dC{($+Fek2JSaA3}3Yu&9UBD^kS&T|xE&*RN(1rV(bZja4KpcmTa;O7gJ2K0;9O7@icT z#1njgb%8iV6od`dPK|pDKl=^bfaVwHLO6?~LR)5@>47WsYljmG3z_&`Sm?2z;$%QF zo$Q{bDcs8}JU0QHMVVwY&+%}0S$#@#G~ zTB?kp7+@__UUVsD*55-gAjV>>X5qqd&IZ<7RGpjJN*#`%4vl55lnjZ5f$Pv<5)tRU#B*OOpSXF=Tw<*7I&U{ zi6DFW5^0M2Mo6naj62o4zxH+NjeNj)hsKT@(=(X15R6*AO1ge~9_17kkF&k>?31%J zH%vh|)~Cd{`q8s~usTf+S){`nP}CvS5k@SbrnCpp5kBmPyXqrOqf6viiRETnHCFo z7;17i$?aRSD2Uje!_q@qRC?DmlNmiV&)L@2<-j-k}f$9R&03f`Vx6-w1AR~x&0mfX!AimaAg(w>u^dd;WdF5bQ z`cR{RAY+c}Vd{;y2zFA#p){(LFGAMkNUEC6=<+U*d~upT{82<05hp6Bgu4owapx;R zcI^7fwdhCIMgkuMoa%(<>S9W-fKNV9ucJec$tlPyYwoCPq=H5uYy#A43uRWOdg21HANBT`ZF z6t{9shK#w=r&IHJ2d%%Nd*V6?JI+1(uG0~(d@bMW=kNvPnXp8$?$LP6mwV!Sm{-?P5HB%bUv_j;e#xxk$D1C^2S z_7*U&avWD>O+e-LfX1WT`2*f4R@@Di8oTekypM7T=amy z5}KA-@;A>-uY~45?i8t_Jyr0uFOvc&1SptgSb#7yPK6eg8!mo7nmq4S=(Mi86@~L2 zv-hlp0md>e!i7vbH@NfC5Y7U#WN#g3oO4DS*{N7hCK!908FDRm;7=FBGr;|RFYhhf z1cT3Y$KlH{+Dj}obBQy09r9o4Ae@EZ3jGsMYa^re9!FEwZ=?o;c1x6FPLb*TF?7 zpM|3n7-d~AVZ5fMIKR>X7@_@5tL-XKqlt`WSTw0jCj?2L`*<7|z>Y-#~ z-E`$FP@t$mY~M)08EX$#RN2-)O^~fH)|-X1W}d*VnQN?mItrfu`1#ZHla0sF#ab%k z4rO}XF0Lb|$132UNzll(g;lZ)9ir8LNlI_3zcRK-Qa6elWYDB5GNUe*FGlq*nx#Zg0I}uJpwj97n@EPN2Uy;*jhsXT&Yepq(E@ zi1MCMn8qy7k~l=+LL~}o<=^A8IndJ6W37h^>J}NJ0r)X~_5eK4AdkJrOr@!ADHX7PVPS;VM-C-^^vhz z=1IUc%&fde2MM-^RiMBgr1bsXKV(jM1K}l)1a#e1$UPjuU4w9XzfU?3)u5N21V0Q$c4^aZINVFZR?Bm|D9dm8|Uni7ytbvP~GMq-%8 zd`8aRCT1u`0>VTCjcUK!2R957agkkF1Gj2uWLIub1@9q#EflO0A)8xNL}f9B=Uba; zs)e5?lBiYm@&>Swd+l_clsx_TW^I#C_S)JiNPeC|4_mC)9%H=`b1vYAR>zD`L!*7p zA+scK0mi2V^C^;<9-mDInz*)!FfTL`{`agafj$HzsfDfLQp0V`*(zW7VYB9@|556h z0(qEvsE0WY5r{($;=_b;f%MIhprQUW$J(iVTD^IVwSOL3Y?c7uB1%W2Wf&WqLn5I9 zBGU8A&7HJ>w3qN>9bu34bzHqMg|-UKIa-Yu7y`A^wG@RH1&*4kUC2mAnXXTPta+jlx~ufXnKau508VQRtTqtjZeS* zH@=a|x7H#u_XKstzxd0o^soNad+FJe$6)v_A`W5>PWNpvc$Knf#Npu}2PS7yMMU$c zyu;wMk;t`6T?PTZ5hC8kr->*gb;xsk6S#UY| z5z`>xqY~5wfbi7^29<#cl_)o=hEa6kjIwcXg8*ANi*eX>WVghyB@_iL(}_ex8wW(H zd4PkbeltNC!JI9`sn4*-l$TCWC5@9NiCXpjrVIi2IGF|?Xp8h|IcZG+XGAj#405+q z(>QUF^VE`xNnmHahsYHtg@m8x8Wf3*jsw+ML z7QDF1xFd1CoD<_!E#e&f6{+;0X4UD~ac6Ef9?WaXJ;Day23K*JV3__gSDNQ;DkbNh z_rShrpC9|qznKG!Pqr0p=814_Z(-T7hzNZmBtOoc>{P{Pr1;nCTIRWO#9~eR5dBwYaVp3 zbWnCE!jET0>C!~uo+YJfpS3?I0$eR;RILMA7UwDq`{Q26p1LM{cA7dS2Uo!df4rFY z(0eirxRn9J^kF+{C4&0~=Q^V3k`6DWRqoMoroF;kQT&TUkOczoHHET-+c28Qr48o35Y5S+_GJHh~qNEoxL9*pkNfj zd`fiP7{nBh7BoX}rqI1+Ycw?uNE?E$jjMJ>^6uk9&IgRa6sbt8u+@Qc4sM-i^d&K} zuK;&!5SeLRHePL0&Xkllm;h_sujBQSspo(N{TIwJD9u@bGo67G6)w#y-9|H46%zbW{vbHstFbmY2qe*aZ!mg!v`zbb1AYIUS>FcRH`XDr2?pX`zNnfi2 zm=OcV`j85gV7f%wn%LcQV_xV1=Gu(>jd1(=`vh0qO&@Q(h{{CLEg3GQ5(6{zsX8ag zytI1+MsQ_(lJha0enuJCrC7FPDpdD-@FN2^uBw@#wuiO>Qp^Y13*DYV(y;8S(a&`?q^QnO3lPPLTCMPe?)2`(jLi99Ak8H3$5%Nnp(AN(##fgzC@;$GEVGg zXgkGcCC;@~7mrA#WPcn9_L99GXHS7iBh=djZnWA+!WX!4^bs67KqgH1D1Dh;KB344 z+p%brH<9sGs$dKqhf}XmK7xVfg&ZH@iFV5LBNd~7zRVJpB;*7G^tEi zEum$=<)ptKKe9B(eHd)m^FClhezMi102L+?c>?{+)CdpoohHS8+LET!&^`@ecI>g% zYQ(OSs%qoYN9p17%`~w{vVStn7zlxWls(lwMAsM7(5cZg=PsI7@LC_hbBr%uc@eV> zIHH7>wo02*Lqvze{I{8Z;W~T(fjbXBro=MtKVo64@DBXJAO8LC4>>swpQ;)wN%_)g z_6GITzxD3x5JZxhqUL|LS;rsf0)b&B-FWj3$%6qZ!qI>Bi;eVu|Fi!uJ%02t5+9(6 z?@;7PB*F4H-y7j#`=D)rBr|BLTAEDAJWi)D=SJ4dfvm6JevJr-we*c&|E+XDa@X;x zd6LQAN_X$xNgISvA3meZaog}|7J^?1oXn^MUgGAjDtzsrB0wSI@PH~L-p9rS=fI+g z*n^>ADI7X+Tr}5_kzyNUofiP%rYV8`~ntVTeEo3E~i`BxLGbH~Urn zXnsV^9F~;?SFqsNsyGO~^kI_mvmaIF0-(0=&wT;_2scSa!v19(v}c*}M-{N&g>8_( z!?BV1dl+P<9S`R-#vBu(Z%6FQKb@m@DgXrF^S^{UtJCSvcDxVgg1;m8$C~v*f|h&q8&J z48G%V9x;1h#=|7j6I1ZJ%lTcdj#G*W1>?lL-6t1KWEMUd99qG^s|pc(WH0hsb%Yt` z=?%U~4C8P5b3OSkGko4eH^7JAvmJTG>;uF@r0yP7}mzj{w@)hGG!JLk*$)tbb;Hto3R!}Fwo;+RzZx(2Zy0wXW4BZoAR!-vu!Wxpgw8AhmZ0@f)!4p^7gU25 z`A)w!pA9CNwp9uk@oz=|nM4Vb&bmevs?Ex#x;%#1Bl|^J;9h9^mVr^=7Y+iqaDHTh z9jgqSFW1r{1qLWMPX!0>)rY{b`!pjzT#1CGt7pJ&%0xFd5(wTvwg=HodB-@>G+H9J*(Wue~imL%479C zpCP`Fd-$Gq^7-ORU_Z=tF+RI;PB3=hL*YO{&^E=(rZ{WB#5d3siYqloI=niXwhs7V zh`lesB$@Nk^)+$#5UF=et8#A82119aLIx?9IWb3+cprH^iQF|2_XP>|>I8u_yC>`; z_zKg-$u4y8kbR_14uIxj|i9$ZdoO?=2xf;c5OjRPB z>;*2wv;e20;%H@gDNVzG4HI}`nj%%yr{Ky7Xu~)T(J~JQ(APF+z(BbhcV}>n7Ul>{ zIAI-B$nhpaG*>DI9XZ2XAmi+pR!tfuHO<86JLzlde*jZ4o6f+e4RUT*E3YW3u!Wj> z8%9wdTofQg@fz{js z&mdaK3k3+zhW>59VVkJgM>`wr1A+!Ws;rmFtLgQ-cTwyOV{M?y7+TZgvlnUa8F-Ym zFZ#RqV}XZpB9>c33xD+VDQSgJa~p8S7>}rSRI8)4x7_Yb*_2$KYXyC@ z!RXVrVP-uvqcB+(SUHDbYM$x{&i+vSO>@&A>0V}5mJqY~=EtlhrWbUN`-xyxDJgy_@OKOTp@z@b(cu|9 z8|wpOia$FgggFbqiIY0rJtPvC^(M>9D>S~xN3X)|Ky&BV7{Y*N1kRnxw_5cTeW=KQ zNhRWXW^x`S?GP>m2%KGlHZk9*uoz^FtII27f4POi2W|6OXaE*dLB*l(CNMlpd10$;CH5#-#hoF)L z5cee%iuBsWywf3)Shs|$eDm@k3g`NO8Ym{tPo@jQ6f-FQ2SU`9rp}pD=_F2Y&I(Cp zeV8QAYS1r~yzb{Pf~i5PB}_a+tk>pezf9-&D9MctfyXBCjcU{O*f7=we8E)CtX{vF z-uS|6C|q+j#3{^=H`nJv`Cu7vgJ;Lf%URze!spNq+O~x9Lx&orBJyU0YjApMCmS+W4z~ zhtzFeHzXntB8Mgj0nDb(z!7!EfzlZ(2nxU=122N{m8;(L}TDLDw!V!(g9App7GC4h`x=H_W|r z09`r8qC_o&Y_NjJf0Q&z6NK6i<0I9?=juwJDWaLugl5VK5A+$&>Ej_Wa4x|}!?XnR z1iH!$rA#dh30Pg>!nP!e-0qC%KQm*|2!Aqle$|K|VmoQ>aSgO>vXlTY$Ru8Q6@9t^ zlHw)+p0QpM=(Ug!AsAS$*uoGxMsq-5(zO-{Yf8W}LH6&S zis1GCk)e&TbIo~p*kpEs8FO6REHfq(7s43>l$j3At4o3q=H5t%MZC^JjVNCQV;L|( z8=*C|4-YM{A>$OI;5{RJwN4NK)>SRL2gbBWmqVsinXN2r1Y;IdTgH{K($qk#DeVRV z6T!G4u_?j$=!>D|PlnJ}qa5D*>m?!*3H&4W2XS& z_#{HP?_f|BG=fQWzOG1qxhC<;$5;oR^E|ot!pg6{yf2T>eUMmXA&ILz<9fg-ICnnV zw_V24a%UD2VwBNFXqJ=-IB*U8 zPT?yDG}u=b{2faC#+z`G=jQVn&ODd4#d!Hjp_(s(q4PPfXQ5SrLZt)aVPO94?$}z! zdCZUtuBpPO#MX6?;m#IFuCuRE#qYA+&-a)1>?i;7Hx1=L3gO(F^Bk`3k;jxlVVFE) zr`$&y?r-$#x#hZ6M!)mL75>i**_BZ6dApoFnu_ne-E--k_XC6V$s#G&GKn=s5m1=67+JLnP2|;OM=7yn)7RADV^$fi=2P!-_yUHx)D& zM7Lg0m1qAsiM_W@ULYcQQx~rFJ9pQ??S^ct=+tz{`hbGN&3qDFXFU4m45KvUm{>(&7-|g;KA?( ztps+oPFYCEs++Ep8BT=X@E|}y2|Nlc2rq4{8%7A@z!2UK*Vb;PH}Bjel@c_MF z!51oMWyWRB{T;yVL*KG-7`tH#UMw%(LMeL#oM2`Egfsk%rrf5)rV9)e5(rlg)j6+cvOh5y{G$wd%VFXgl;r?(m@nU|agj`5GHf1QBe(Ib8lMGk!}zP9sSt5|iuFL;yRb}3F_`4WnP&^j#+1S+w4XIMdRccW1IWzX zsUY8kZ&A@8LDF9>lpE}?Jmdg^Y7=W$7i3u^{gFWjZE}(d?_&xxthqQ+8Z}Ol2H|zq zwS~W(CSU$2{(!^9iM0Ka%sW^P+gSM^qu@01_TOb+`PgFZM`%xETxE?<9D1OWKN9ReZhC}2-9-&~`Z!E-HX9Cb@R?{hXMhbGg1^3K~Ry-%nrX2nAA1w8J- zDb}qfS{BV7NwG2w!p-5iurVIekgt6OLx*cUtdpKEaE@; zXAjda5CqO^R452d_Bn}XiH^GYHdPT8ahI7(Yu8}r%a{w%!kAyL4l+B20sp~!Pt(8t ze}9_Jwx}|)P)dLIkAFYCBz4aG0*l0=NzB_sq7GqNdi?Ma6-hqiK%-e7oMCd9uJE@Q zQgp|3e{*S;$}XfFz*j9aJD7ttS%&0}G-`=DxYV?~wimzTRj7s`^IM8YViH!9&|0(C_KV%rM@5K!e5RHVlF%)3;0Po@1EN$e zKxtXgObM3Ydw@}INaiO2Hg%E2Q6v^jA@>_yteFmY%EVrbE5(x`=1&E33WwqwiD&GV zb7vCS@^-Zr%$~UgkT7ZI?W&q1H8fQ)KRz4Z^Sl}-h3*c&S|Z{Bo{4?pKABaUu*uBS z+g|P~LmPO6jmz42x72a6`i_lXpVoTo*$_GUV}Ck4$M*Ydtd0Nqto^WX+$SOl28*+n z1$p+}P2=kjMguNg>)fv{74Ey^Q}BrWW;_93!XBH?GP9oGJ(?G148X*x$+QG@9b%@| z+{nTPR^6KU_0Fk0iCVaD0V7TWWB5Cd5IHN(9ULjA!y18VR17j7;+XCm1)QvbyW@SL zVOVGA^Qh(XbmSs+Lb?(+JTbnsQEkBn*G; z?)`u<^LOg}Z$86?MWS9|&g;0awhqAU(4M}foG#7?-vcU?&92ByFm_yA+R$2Q6uX13 zH?A?9Pc@Vb5ts&F!0ec_RM%HilpK++r-iFSN!-sF>?3b&2Y3&`k(D;sPV{nN@Ii|p z3*A?6(C-mxJcZxz3QUL0_C8FBmR;{}R$wGZR&I62c^Jd3GhA*Q7pJ!mW#QiQN>tq{ z5al~dwY1R~_(gh-Yr`>C{ZaCT53S%b3vS!|^oZwj;Tn|%(t?H>O*H>9NCi^c6yZlY zNBz)OgsU!=b2yrzN$3gw?Je9?idrW)$5R9^4&zoLrV;CvCa}=M6Pt6jK$%zD7jB2u z1~axE?iZd!g@f(QL*Rx;&}r5H_b6RI+5|obr$sD*)H1+#d=`K7&9qbB54Wyilyl-z z^OH(vINE_n%+mT^zI*T++b_1#iQ!nmg<-da) zQnfLO^AXk#=^FDAzaH?N)#-+4M=i5BLVQ%wT?==U;Td=^0x^bgb(#Z@n zv-HpZ{LjM;FwzVG)fHgVV3WDo#dK|jK$Yev?4c=@`qSbq(q5t6KSL-yoo^9SFqWQv zgf)+-<{~)Wie|co;rb7L&|3vXCvbGmnKu7#3FX2VR*jLKG{ay>p$8b`JefAA?+8H@ zGfbq}gtj!v7BJx6Avm~qtsZFwcvT1jxquc8uaHg&AsP$@{l>Zi-z-82B`;^ZiIDvG zF@l=qe-SFz?hz!5@KnM2*CyKdDQP!NL)M_L7WKdxGo*BClQH1%;3(by%6xkE z9x(~v#C-(5jjh4-qaTw-Yym|+xlaw2(H;F3QN;t_`R%{;eN9XvERl@T)dk7nLvwL^ z1tbL%jN8i$(&ZQ5S%JBqPy1*J{_+3$N&3N`{TZf)Gc*=dOko3L%4X(nfgQ`~*4=CA zdw*j#{rde;@+pBx2@;vWPi+Q?eHEXs2X8E=iKTVwnJ=d=f8#BULb#Zmg(k!V_WMK^ z%Zx}YpFV!fp7he&?|d!2e7TJkVk#IoO{=pcM>UFUm1-TY-?^4i?w(LVJRFnPhuD_x3I>|1u;q3!@Yp@F~Zf) zj$LLqOm@!AXWe&SzVjT*MjNaT0*rGqXDEw#xf(oAA6j0*m!H2w!@)x`Iw5StJLYx{ zh}MGqE)nCfVg{EnJmbAZd_yz*2+;(xHZoQ+G?sCe0B8vbtv=V3pw+O*yu$*-dWw)_ zaGbC2!SJ|Yj70&&^$;1mi873SC5+2NgnJ0jxGp~HD?iTy*dFuJhdmfXkv!M*K{m~h z?f8hlA}JXZ_trl1cpyh{ldXyxV_A{gyqFm*8RTHV7%M9pu z+p@hZIAl1CHtA1>*{DP{a&e94Skd^o97c2@?Anx!Vyp{r8P^=M>%bHK#!cXYtFiNS zykc&bAL4J;)J=3Bj==#(#4w#96tI;rN%&Q=S60W5~Rz3D-rmHYg+ES zCxl_v$)7ymJW0Rv73&+|*Gd3V{fK?iYNT>95SjWh5bN6OQ2kI##jm}A_lYh_+1@5+{+m>D=h#Y@Ih z!gZV>pW|=weXKXY>$nwEqF9V)i=S$}63P;_unh=u1udou6Yp*J_?sWkQ4`moDvVBx zJeO*cJM zt)^wUdoa|4Fa``67(2vD%l8mlVI*AifCJZb3g+=L8fIzi_mJW*4D@Xw^EY`AiQ4ryjrVi!j@S;lzbG=60pngn4{`Zo z`#Dp(<&d)xSD&SoH`6t=t-pBuqx8`WlI&AS%$%wB@7?Eb;4qj#;F7gmzeUbS(1h^W zBf!FF^}qe?zn#i#(#{!h2+pCa(9n+}kd{b4GeB&Hsh{?a_n?Es%3(Pv(Z1U3Vb(%7 zohMH=(uW^>lwLmCAW!BK1g%{PTFeK%*(b(CSD+m<$RB+8OB9RjcXKD&3K{~FIfB+) zC85eq)A(q4pPU*;m>W$~Wn9!?&T9w^k(V8zOgJ#sVgUZ6K+45$-g^gSWH~*rYyff6 z3v;ZGrmx@oGEvH;V!}J4;he#7gM8LPr-G}7*1b`)Vkv4&Vov999bcZptOTd`?8Q^y znzF&*YtM$~SNzvQh-%}K<~0=oreuT91?E$FwYiyo^!|_2($W&^e~-M*^S~X^;H>== z`Pz%D+bB7BkH}}gP5I+)AKH6_#X-)@G4Uy$O_Bj{R&=F;!6qN4RRv{)%?O}^!RHm6 ztV*X)5Q5w3#rP6}EN+Q&Q?oozY!PcWF)^R+uiQ<;tnoQGu8sm@ zfD8l*KqKIrfp>rHd*2u6!3=@WG$9Z7V>%y&vF5rpuve}bOp+z`4<1$1|MHK2oPP9g z|2@?Y=olte#0q|nA*#_?wIqDTZOVGo_GDX|TopgAN znFs(bAtWeanh;iPx7G)V@EdRZumAo32>fJUfO`af?BiQEo&NIuhe*fW^quegT4-hT zRnzBg7IXi~(n5N0=QS$1jq@Qb4;;n1oi)7uxyr$ zo(wk34Z?(n%R_aw*)q*JVAr??7oLNRc~*oQ#Mp?dpoK6XnE7OoY}23)nvZ8aKyeuO zMgc-1XjJ<+!8}mG(1@$?QY-Gh80l}4W*K-9MlSXM0SksMTTo=sWn^?c5$R`QO^3W> z2qmszVwZ3`!2Ay-9-|9x*tLPdWsoveu98Se)4U9OFyIi@U`}j5o@Z@12v;smkJg39glG(Lrc>{hK~*N2`c0#~{qfR5)hGmJxe?man4? z#~FLz1zs_3-u)EUS@V1k!o5^Z#*S}Yj##0~wURK>V(d}==Bl57ju@NptiYF<2;hdX zUX3Z5VGJ_ye(wv=3^2_`xX0WztMC+ zqZb*Ye~ZKN9Ag}|!as$a++aM!pT4_K&NE+g(6;TTxD=OjKaM%~r$Cj3L~i5dfN4iu z1I)(Ra?hEFd#kU!t|hu|n!rC`k8`FVa75Ie8qy)?hkMh)kRISoG=w&oOdbdju7D}Lx+f$Z-zMc9l`=czab8(mPG7ur2Tdbp ze{iy}-E8?cKIm3{Nf6s)viDxg9(`QZf7pl z4`7fEDLG2p7vNZN?m5+->J;NShZB<4=nfRC4!Y^!(xj5hbVveDD1fl8w7^$I3MkCX zpp0R?>bL`oCkAj&TAU@(J7YPg64lVe5!zmO8Z6aa_G18=K1-R}*B^WV2J?imnuG$k zF46@pyoQyGZmJb3Jsp@sdn$rf&j>IzAEz#;1+3;%Gpq0z^Hg1zwy0vmt3$9Up->)t%A0k{`f!p z{q)V-H_}IsJ|aKs9&6n~Nm)+cxc3$EZj)~khIBM;rZ}X4hP{1$LVuLH#Yaip*hLVU0VjU- z{##Uio##AQT@6JlMH}Rabm>0AvNg?jhex|;x`8qFpX18j;Kl>*7S=m1SYSX`Ar-Nn zi@B+paBHel6}BHmrzL&|fHCQbXRn5(r%6EkFg&K!!}KB;V6TnwcmzZI{x3g3qraB! z-@X^ZK$Y}YqvW71|G~qZEgk4FCnxnlOFS%U;1jqU7BB7&kVBxX{kA* z+<6C!*(9DXddPI?aIiNC+tVW6iPU5W*~=^Q5%;4ayj-41&mW!OR)?^Rt6;ZUPcL4+ zN*(g44kNHt*|Ub0h1Vba=J)k6_g8asr^aFrjHI{Tm``thX@LVt00G496vFkV|N2?_ z-~YFNn?C)kzW@j}IrwIcL6X5&ZE;}^a}3o?#sC;hCq4Zo2|}7-@!eW!fh5Y8pCnde zAf$Pa*9FL?b75jr0w2(@AMEeOeIp#;CeqQhHOwj?OAq`{-g_V9QA&UEC;u&)o>F?W zc}h;xskDT(Vr~v|C1dFzLTVDnBM?aotv!dsAXIbF$zaMfN{~EIgJ@>d%C}(tmzEas zX*U2!KTnP;vOp+DLD))WrmvDv85BbUd8KU+K2wf-k*O!Rv>q2a<~)$?4j`h zVZHi)F&HKv=05)p=E1q}ynrl0H_MCHWk3b$PK=*}%2QzmbqztF znuo9Y5Qdzq&p75QCe80Pf4b%(QJLO2C?KvJ91NjiT{VeC`9OfZ{1t)+eW@vpx$<3pciu2x5JH(I$9m-k zLd$WFP05t_lh>PfdXQE1pRN#EN`!7pJ?oFyiLoSc zTNf3YGNuJnwT2)jA3bGD_}mE+3qk_1G8J8%P3v&9ZZNn<$n6yYao>*|6ay@;9L!F36&M*}1=1c$4|1y`( zDesxB@i(v(gpH3GFZ_xy6!(g=*e!rtzwc6yDwaDC4yj%$Gux?gp0gw)HxR z#$~S528*Z7#$nZ@0oCR(ic@GoJp~OkuLEd`zy08=XcEyRqYXVeeTi@eQ=o|+X7OOR zo(^}k>RzP#_wT0P`tlb&HfakYK8PSOT;@!W#U>Te;*-he3^$_Xxs`P1+ReZpR#zOG z>7?>1nm3+#`O0cy{b-oc$UVtd7l|cFpF9SZ(ddnH{xHHyCD(IOw9-{)vC=JbBY$LdG406Wn zy0KiWr2BW?O}DPy;+$QiN6$VBCUbxQ%%yuc|DecI7^tH3>ovwgk>?pa#@)kGt(MhY zy+u08@)FEAN#}{xP$4K&Wf6g5*#KzhIrr+u#Ozp47^HyjY*{yRkqPhBcAa-c+S4+tt;!x_ftadUj`bmb+ZhNF#{gk~btkQ4pXT zLXdbRvH{&lUqBZE0RrR&B}kA&i2xyrT%(;GKRxZcd{tI@7nvCu>7AbEJ()dAMR!(4 z{>`!PIp_C!qi^+Ko??U)waQ_zg@n|MbKS_>I!Pw6Pr6kt1q@?(|JHYrc9$?+`6NAF zon)_;3+YEBRj4DO7U=$mzxN-d-~IIm z=^8fZ=YX`YUx1gGhY$2gd6NCbp{=yK=NTCG79VMz2S-AAKSB2zj}_v5@`@Df|w$d+mLy=45lvDbGW)2G$Uw(ilI_ZjOo))~vGiFSE-YAoHJ zeGU8iQu=@Y?9bAx$Jp|(5Dgo>ay`A*+<1HMA8|mvuP3+xt&hgNNpdqZQ!%E=9^mFiH zQw6ivj@isP8B|QyHUQ(XhonYrgF%}A)xZ3|@C(30ABn(!`uG11b(hE~y%>4f_sK%B zNoE+q^{EbBNc`&9n`zJ1!OkWpEr!vaOEs5HAF6shkTl0}BbmKEDxrTUU&p~h!UMaKLw>xngo>K z#tyN`Y!Y4I3^j8Hz`#S&P@wzxO`LMQg@s}ii~ygsUCo0INWs8h$QIF4s7Z-W2CYbs z=^wCzchAP^?339mr*hB;t9LmR+Qhhg8r4N4G zfc!YGjvu=)b15)mt|>Qg1uHC@B=FnAE0C_6Eidu6?en8L+|}rqt93Yl6Sic*m?j-Q z8*^ZPs4xJe2Lau3ZsQ9bwvMR6xBZ*G*k>EV(GeRNUFE~X(zob)JVQYokOF2m1&1pD zC=Ul9wN5X~c~F)Tdx(X{w0~FS4lo4Fnux;h27>mz=(8{K3F)Sc23?gI^5bmTJT)|V zP-vq!7{A;{$1>A^<$F*C6Jy`(i0#co81HF%;zzK^%>az{C!}FKo58;wXj{m*V|jJsV@iOK~sCx_pW>LTq@Ny~@0I@O@8f)V zFlm}05D!Ki$%@0KM99)D)6W$scf9p8vMeS41?;9}a6)oUyWIm^CxYsjZ{GE{>%;mS zH-!&ojOjLV2|&Rr0;oHA^i^_@adCW@dRk!v=%4e z`|RI-e8=n24(Wi-$qYmv+z5EkvUrfbBROWQ6iPJM%k?)<*x|lFWqj9!RZXQ(4nT2& z!zt43Lqb+6v6Qyd258=*U}6TTZH-i_C)$GpuzEU&(IPYmWx2=!`6Tve!#CRpwY-N4 zBiNU3&fZMFcKO+&*v4Z*8G;4?Y#H18iGlZ0U;BObO6-IYoJ8N*nrQ{Vhy)dc zFf8F}15W1aF(ln%qVbyR34 z+|Fgn>l~T3^n0m8<^tlq20(%YH zM${eboK4d!{kk`t(B~p+y&a$(bnQX|`~zFqNO01`6v7QyKLdZuRJmvLmNCUaQ?N@| z#RlOl`-k36zc%wOCRhjQ=U;zCyTtn7gC&?uYl&U;FO20CB77kH7l2>3{h*KLf;~$%0W)E9HiBi0M_aUFUY3-I2cQ zKVdw4vz{Gs#`XY?dNHF`bFoIaCKHe-*;bN&6^ZB+e}N;)DuE!?rkLm<_H5Q!8OP}s zXnA9is8|S6_rO67q{*5hUBHa=08|~LvkuZTV`Z?iu!F{63&6a_oHOqTX!mf!SZ%Dp z%+_HHcJZ~!XTE+WMYLY}uo)@H(D0oAZ*hoA**#X1U(Ynnd)2O%F=z2y+((Ph1D@L4 ztb<*^G+0fDA0qk_^tO&)ObZ%3_ZjDRA8G$C5rPzA)CgH`ghBzkOj9do|M>VsFi-BQ zVm*VztgK*awFkX3jvWZ3JHVQr7i)m!_2s=Zat+HDfOUPN$=-@k2mw$BnE)01_JVH> zY9cl=gQRn%U{oI_O_)r#{neMB5{-w5Q-E=X5Fs>@eKdH-rYz<;GtNSIftU5SG&-iq zwDth(WyVd++J*UB56u6_wXSsY?g(M=`_ed?inHE>bnVV?Y9EwD*@qcZ9ojV?>R^8B z;AE43bQDk1lh2-{rRN)9UVIZ4{ z>Bn%JPjTu%^}$9hFm~g)HUWop=LVANz4U+oXo-Xq^J#_L01IqzOKVGH$=D>7@K*Zt zvnDoMY)?7>;UJ}LP?1e27y^C|q8{r;eba!0?ZvUE$>z9&&D`tvX3{_Vr~e$z2gv}F z{{9E=5)-(Z{-=NOzv0-`i^ul}+$>T(JhFF5MYjTHSVtw)PV}KBP8EB2uL}qyGNsMh zA{HUQ*cb)^AQML=c^_QUlot02OfA0yx#XQUpj;- zs1v^R0BMsFa-V6T(H_2fn9E!#4TVMnj3^zAbCs6G2kE36v4=_k*U$>Y+Kj$Q=z>Hy z4)41uxQH#^Lq#AKfQI|ritCnpbzeZn1Nm7tQx{A=s04W#2(?GptayyF9h+QNW(cB{ zQIBRwmKV}(o^vd-dQ#>LdqXrss`}J~*hZN=O)PASpuz*rdG$jnQK5&spIoNnmcfw# zohNw3dGZ@$$i1+kSJKYh#b)Xq+AAQke0K=}k%MN@mLCA#K5^fT#xbBHlqi78?`Kwm)&7@l`b~4GG`{Ft#H;QNH!tFaF?j)h52gHLnY}+@u0v+n)SrJa50f?t9Es zFo3qqnReX#aO?5|KwumLDqPhU_b=HebGc)#jijCF0QrS2t2e82X4> zL0R7SW1r-H@+@TLIn(DQ{V;BHDH<1T7nE|rL&hpEi;)oHdpw$lnla4n6tK&PbOG`O z*hBc-bg==`^H?Lc(U59NXLTWQKU0dRS&vd3ZBw&vfR1{A9u6@NaKU}{^O31>edKYZ zt6a}f#zM131++Sz4Z5fE0)I`Cc$2aXIFL1Ha#2AK)a_wz+ngU6Ail;p*y{k-{OSFm z1*#6PPT2vrc}I|KS&mF*`dlCO%d5;t4Bj^*81)1#NP_})o(sC7tY6Ux>qa`fY6_)O zRBl_e$2-oWhoAJ#v#}1OJ;z5%U_7WCD(6wlb}hOp*lsz%-79_OYK|3nxE_Lj0Co`? zn!hO!=RhKs=*S0u7`WcC#2&W$=u0b%yk^ayO-Gpv7vESSgVyJ-W{7`N?n4);T) z%+%Yd#E?)X+3A*b=>nq3-@5;P`rg~`Lc68(g(ejnE6gRr7<}Vu*tyTnU<(a>HIZzt zApmRcc5-fFZsOj-0X=&M(*uND3oihF0QxuIyOAcaY3_#E+$HMG+|y;EcWlx|?9!## zKAab4$Sg@H({=3g2l1(k{TwFe=%~Q?gcKgh=#^$$KgC|6iQFCgZ8}uKky3|3oFFv^ zVw^Np>L58(&)`1cw{uh*)24d7;NIAPL4d(= zz7Emy?_*+fh@YBLdL#a|ld){^`Fi?{pC?bBq{Vrn60NOrCY+|BDI(d-mhi)a3E4$h z$JiSZwVzbC?bUs6N?)TFeQtKc6^v9%volh-jp|&umXvJA~bOby3wWly}bsV<`U^2_;lP5n(fBwry zfq^Px2pk5E($nT=>5KZ8>FbAEgr21j9VGo0TOdYLn4_jskUDUYgz^m}IA5EQ#u*S@hEK5_H&>K6vYU{QO#4UY|!m{4#A4Vs(21 zB7terX5L6lojPP6wL)9Fp29<{O)BOmq;ak%HG$_eD-!u zq;AZC{`60Ol79KqXKC&QiPgZ{+n6YAV%F1x8Pw>=NE#d;Ae?O_P2a%>;r1x&?+gtI zXOJOQOXNj(xsLw~>I^hw9Wdo52=!Oz>fyU_d}g2=QiOAEtuHh&?_-~1JQQ@GL`-F( zeYQatZU5{4{!c!dn(a$9oEU82()xb-qo2*E|M~y;IQ_dn`g6GYkKrI*GKqv(+k@rNVC?8k%Ulb?SEGqIGW zW~L+QkB9ES5V2p8u$i=DZ2AUVE_Rl?gcXArn@w`4a{xNIf(-40-B=}NZQV68WbLp> zF)JFo&Y%%$t8bW%U5PxKHTP^+MsC?yDJmIk%z;G4#Zt7O6^?Z@MT0ykh2NLb^v3tO?+ya%b=s~Sqk9aKFY@fJidw3$4)_Bs=ujZS_ zc`w@KtGwsOvG6A|5?p7DV_dywfA|R-NGr9%Xg~~4%(=i#&=Hb(>kkl(6p9UA{|?91 zvcnXMKb2O-m{T5?X%BR&X-GjZttuVu0)VhhsXgynL;zLBAvf8BR$xduOpd=tkizt`w!92%$ z;qR+9*d6;2bZVWCaz_p{bi%^`<=(&IaDe@t~LE764ONrU6l3mW!e3oLa zu!jZ!C&<-x1Xq}>C`pxpFfYQy-M)190bzRkuHo+h5JBR&N$5qBJdI3ab3Sp#*emVj zEae>Bt7FeiIMgQLSC5!G$<*_$4UBMv*;EsA2E(wPu;*(!tLE7KKpJvysiN(1PM3C8 zVr)*)y6tH%4gf2VTHEtSny7YF9`A7L0C#ccoKo;ytvLBVm-gpnaamg z>E86+)HUe&h>r%iK@8=(Jcem4bRcZxV`5dW{lS0m2k8(0-~%M=KG*B(;t0{h}AlI3=!^UlfTqQFd!RR#__h1QOl7xOWxL2$p;Uu2+v2osZo z)D3X#!>2*_Mg3CBXdXI=K68xLKs$7`Lax0Fm<6TgEohBS8MAgwclt>Pc7Z>E(q@B7 z7zI%QO5hL9P4{$7t4wR{xSz0A#D{b=a8T?O_6H{3J#h#Bg@mbBg**n=XE8%p9S z7$1TGv~Sl!n>RH&5h=8nF&RIje_aTRf8+h%;Tg3xA}W*xCHcrZW*tT#20B{U#gjCmIV+X_r-=lL*ZVl#yHtfWsq{w&OiN-#K0 z_OLI0{4zcIY&kvqdJVzBW?Ee!3KxA*d)$t&0ps>`?}M9Yv1x06N18-i&{APPRqg`j zZsJS3y1bFD-yUH37~zsVW1I66NAVWS`%Ym}n)senaSS(-SKB}O=%bICO!DW?cG8ak z{{PRPew=>v!=I$*U;Yu&25r4om^`xcG7tj*ebpN}KyAW#$|1e}<{NCUr)dr~O#>T2 zfuIB`hZlg6$-N*u;2Ferh~3vQOvE~2juu{$0Adf~U~r45k9ypscoU|T)XyY!!HMRR zFJGpQfA%q|+p}$pLG>rpnp_xK<<2H&RC(bMt`=NN0@Y#;nt}wXqN`kh*j-Hbv|{yVy8e z31_FnbL%BYP`aXBsW}36u#Mbh(F-t4k`us_&BQ}fNsH0|N(c_mTnMVt#eZQk;x`s! zNGoYy9Kasv{P<3m zZPr6#3<$SQL4c8lY||CM(RPy;{6EImZyx*^pyo1tmTKdf00aU984fSTdyFshC;-dl zDD4!?Sr=D)%;yD!^f$m0eXvCT^|wXkHZmUc%?mltg8j?VQ^zp0L1=6&FBq6;2V*TQ z`5qqui=U`gkm27J$$e{#zAzS!hf)tUK>|;G9~^TEU<`Nxpm8O2^`!u5QAg~WQA3OY z_pI5uvHwBWlo9hsKhLN~=kd_Yc-4BTGa?|-$P7BE_Oktsho1n~e1+Yj@O&-KAKs5= z_(d^37*cV^I{GGE*cN{KsI(sd+UGdTTo=xb2SB@KaR36x%mdm!K%#m6FMqfGw}Z1l zx@-%;I7@ZGFvJ|#0M;*{Q6^A3)G)u|a%9Tpx=eAP)4KmuUj{v~5QLyA8DApdEu*@ z8jVw;5-6TJ<5}y?-}9l67vU9`VY@9VTE#!s_s!z7Y;Jmz<$7s}?dO*Sw2x|ai|{KB zE9)B%MbIcV0J7yHWUA6GLI)=8LbMT31ZKR*vo;V3sZRA)HLKV`15q#$F^rL^y{pFS zs*WOL2(}uko6o6t9(&hCY)=ioT?3iiy?Zwd;MbNw+=$@74TQ@>#74HcbwtYh4Hh7E zlJOox-uu?wG;#w^L=1cN zRzC+xHSw0*-T}pks1UF2V_*-ZFnDZ8px_eWKs21*#0FQ?(M0}ya)SL1YKjW>a5Gb7 zxPw#F^so#(=YmlB;OCeFDds?F7yGgsxL%MQ99->)J2^r)tVBPEJ#UQQ+1M4KX0G4t zM*@d&F>~dC-c6Vl4sN@IQ=nKZz$Ln2{z!W~QiZr-|){?=dlEjF2@^f{iVbwb*-;DG5` zI{*M3F;;RBj5Yd#$Q!4YHf*c=s-vg|x+0v_oA;-gcN{P~FmHfh43j7+ z>gg&s>Boa6oWPLyvO*~m=_!s7V`JAzLV;5OTzw>lV!o7+@knqPN{tS;=_nk8V)#Rx zP>kH7{m>lT(Pv-%BE5KtG5pAM8piv2acz!+R$5l&YYf-Jw6Y6$1<7yVRoxEfUIBr- zhqU(U(SI7A!leos|Ck%I=hg76b~XON}9sflXayEOa~{E9o@WnA1Bo=fMS{S zyHCipmN34rAT8(*&*Wq1(%8QT#1%ecj?Oype)RKCSIExwBt3kDsQuN~u(vPKjS<%o1TLagzuFHsV91>v(CI2Mi`2C$ z(z-S^#RPPbubN6=BN1%iiC)J&U-e zHGTQTb3zWl3NfMEY>uK?Hw`yU?SwqMjyMps2Wo9A5)<`Z5+z&_IbEjZs;-DbXr)<>;s`M zE8!I3A!OQ8S5_B?kZOT-7cdj&h9=VY;EOOk9C{wrmg#Sa+K?IHmCD$+BhG3K@2^T2 zLZ9=TNK@hFuh50)8bnDuGsln@vKxzb6!09(m&XKexL(;n<6Rb(2-kN+yqq1*dbG>7 zD%K4fEC~+IM&zp+M!;fyibh2`(uXvr7p9=akJb4MXB_t=jQml2=LX{g(FQLwPSHBX z*LU1JL>HDB_B}M3rqVxuGyd^-PI9gii#A_Zsyu1c6DUZ?Smv zLr6}s*6Vw=#qp6goO^T0`+yu?l*?h{yvbGY0Nvm{zT2+=K+JO(jzb(&!^g_hO81TQ{kd!u6Rc}Qh2>-_Qzz%4d#)`!Nw zofxxrsdV8I)Hul*+r4hM6fs}hgjGyCuIi9)E7GXJl^0>rT>T@nK zNajtx`=8H7wO8MRrefOTwJ3%CS7M2GqU@{pX)`15zm_YP8fEftth>t+qzn5aU0X)b z{1pxOH_u7;u`(foXuT84$LcJTp^7?u*Y)E=jz`cW1!UT@9Ozl`uk^@TR~)X>7eaIB z07F>B(}Gsj66>1;W@l08@Nt5XdkIipVh`;ho6;_ss?Km8&`wi_I90s!^H0-@SC3dn zd(a5`DhxuCb7`M2K!==VTL8*!7>)zHE{BH3(zk#8{q)xK2*&$33p{8>0zCtzq@Mm9P6b*C7 ze;l15e%^18s+rI&I~zFGk-pftq>13}dOpUv34Psu=WQ~s-3mLF-F+MmNI%?xTIC3J zg*4d3`7LeIBJS}oaU#9$mgkzYL!yDF>{@;H1$+phq=Pr+&JC>$1?y`TPI@k0-l*w7*14| zoqNNn+NOCyD|TorI94`ESq;#~!&Q%M*SJ!Whz|;TUnh)IXE$eRWiD-yCi(G;7ceh) zb22Z*9%5)?t9+ryFSMmogXgnAjwO*3CxOirPqQs@=DP-ANLW^@(8YEg19}#o;TKt! zNJ=n&z%F}gb}FP5hwjO&t9~+^YO`#-$_*HR2HvE7NJ%~0&%uH>X7Jv|@nwhjxQB#j z++{y(;_M)Eax88G=!Zuu;j&zVA!&jZn4&ZRolT&I5KpXbEC&{FXJ8l*XR~jE$t+W& z{4%Nl*ZUrz`WHX@gmsJq0&85A;FDL6(?De?y>s&&*0!m%i)pz&Psk$dz`;@nnhL-i zb>n%>k-=UKlSe?_ZnE8Juh@!5KR}c5-b=&Ran`{=Uz2J7mI9fxR4enI-vXw!ySCzldEP zc0u3%&ENm1zWRa=lMavUBl`q@HAtxo*+XCg4rKsCIEi7O#n1p?8kb4$o!|fczm#s? zz8MSQITDe*&^yWDb2uaB6TG?^DuHn^-Wcyi&x5oGSL%~6azxZkfYT+s8ZOY&bUQoK`vb5_T+OM7Bd0XRD`8yMuN2AH`q3lpMFKZ7jmkWdDQ!=~tYd3Buy3RgQY zjO3#kn@r^=!s`8W|F^xWFC3>B17_GD#~RA@VDJ_pfmnEFcyV+9Y>F^Z_DLM6 zWI*n|2T6oPsFdKCjjFd=LIqNR^CPb~b?L>%?!{a=&cu=8pfX^y9yu*;jNB{N2;SVo zMC0V3SCg^h)cFcL14w$@ix?OS4|5OyIHcpd1Eg(524-<^3ud_NCYyDB9L&DJMV|}6 zMj5_IiP3jO^&VITYWJgv*jIekXTork3vLF^uh(Q21Oe=H(LUQ|BPHYj)>PnqL5hWC zAi_LQrekmWY?qQ9s4A|zA%(cu*>X~%CYzpVJ-3=!jk+v0m+Nnzj>~+BQYoK?L|dd4 z4=m@-gGT1Z`m?=O2E2MxyB@;s$Y=Sn{W3M-Fu*%eFYkMxDM^*-@DtZ<%VnGCL|n6u zXge*E8SoQeEc%z*>pF@twABgrjl~0!*~3N@#LBKn{fzc z#X00hI}Y33>EYukV_EdiXB|6%|0##841#JcO5~o8S%<&f_*gfV?|$J;o{=f+0^Apf zZPt&mr2($lACab@ljiRVAfuaPBm->D#sD0wjfYgF)>kH;9srOO-UeFlCbLkf)P;bc zFS7Qjme9-8gH%vt>T{+i*3U6}-zn>KA0fdJhub#NQ!^K-A~3s+@obyyTf4?LB=N=S z@){nXD`^$xVH>Zl6Tp)F&NS>;&=o$e4@Mn&7c?>N928;13(ZTnzT=>T3d(WiXRVU04?N3vTj0%?nBc zct7l6kYpAC)I|g~#shUfFhF(}(&B2R!afT?=HN3hc!TUkX4g41a}VlQNplC?`|=zC z^#R|pI}m?E`yK;o*A1tELA0sQ^<>r2pa9K%`<(}AdZ>mZ_ayxpA@WYxXG3_SZU(Q) z$ImeEUx0q09d-BpqX?O>$=SqC=KurcuRdQS)}ug?FnYv|R6?k_X9G3%ITAye{UdB= z&s&L|2s5%kT<#-`?}so7cAaiRb9yNrAq{j^J;<-$nn-WXe1N@2M_OLdMq(wvFv3%`~Yb7vS7|ZRGW|@O&wK`sETfD$qM~ zX1F;+uIP#EdI~3p4glCG0>Fk^8yyz_gJci=@LS&@Ugs!4b{U6{=d_*hP9WGYQ^07( z_@$^V3u+uBQM za_B+oEB2+Og_mg=vjqX+0kJGMceZgpBw5mUKM9NGuv^*zEEDqxtfJ)i0*3JD1jG0N z!o7HQpY^8N;Y=Dt05Ok7Vnxr~`f(aY8*+jM=7{s!7|}ZO>DXtiR_R=4#g;M}sY8Oz zZ>*>Jm&d7(>|iZ~yz0c>Mqz`&>h*d*M4}wgC&WH7Uhb`^Babv20HB+-ssqWmb~A_A z`lxn{^#k=NOxZogv-#D02(vtMJO z8Vl%_dKGT!CG6gydeo&Y{*aM(EZj1r*Nyr{s^g$}5q5nTE7JPj3VjEE zHi=v5K7lSN?77yLV35IXeXMI^IlCvVY?9>+#^m>Y?JuX_e&@SrbV}**>T}jJ_<{h>#Kia~H{_BjrV`-*TZ9v&eSZhX2@mbGi=}GO( z0h+_!GE5{whsCEj-@v4HA=%$O*o3*;BXL$a4EsGB!~;ca+qKtnuM3Z3_NY?-0N8c_ z8bC9(vd)=k+)#}8(VBTKX=k+t!=|HWmHk0GXqoDL*5!p77G#@-Upj+vK0^!k#s{}J zpGcarvqxXa@ZgAT9UR(sMYBRiy3>CQ1-GHZg%A+$ftx2D7>z>tfE7r4K*+Fqi{vYIa}_<`#|T zrKDepr5g;KA4DJl2zX5yTtPY)2CZzkZE#|8q6OUl;pYh>q=bW<;TUhV!MYYHx3V?( zkN!NRYzqKD?qMMuOJq!v2g~N>I&RXa+~5$eO-=#^`h#GfKU-uH&IkaG+7i&dikHhd zgHb_k*8)NJVMkOQ7(=o?PZ)qrCLhB|COQbxvJ-8E8K~C!1N56kLaF)=nva3OYiWEE zDG)X~TU*3^bAfOegcO_(wZJhd(NPj87aTFk1cVSp1mb4{l_(siBHkYwH>+wAB>T~e z#Y4(NO-@mxYaS5&*lX%y zwAYg{7#bV|Adz|#wY1?U1de8^tH3yv`H|KPYh>Nhpa+`X!)nC_`bnS?c7NR*8VV&c z=x`nk-X56%i4EL?HnM`TYMFgUt%RvKuYkT%Rp0Y~?PNU~f<&6IJm=Aa$2uKv*I&>M zW8ugJ!1r6sN1p6Vi!r)!Jsx$Ac$Xpd;h4v(d9z&0^lwHym*vF7qg>;@dt%tVaXnUq z^~E!J9r@ikb4;=j!TS91S1#YOgU*81tS@Q^a`RdL&du{1f4krCpFlG^VcC94aQ}2I zA=rp^U(&ea=~)%)I|NQ=X^J?U{bi*7grE7vS3kz&U?FHE9H|rwa*iN(4q&69^_juK zk*eDQnKYA`yn3~cGQ=e(I9-4+C%FI*uq%4 zr3a=FFub`*cpqqb@ESrGJX*H_5e*!>mT|f}B_(mtG|?;uU@D5x$^l?NDexJl3`NeP zT^-uGjyb;sRCqiixgHoo;{Zs~?o4C;6C4m6y%w0`ckg{bD&K2qjyQ-Kn4g~#Rtfh% z8P#^q+VQb2OfYJg6dl4$VB-o1)n?8(qmKUs=jU&~{eGHu}rcQRpcE-=Tn)*;%DP!4MY3o~@g*xjM@m)`pjgMP*yrfOmFDfPoZz@P|l8_U=W zYr=w;vxyDV2n0@GKRsjY+~>4dkh!?vd~6}k_>CJMr2pi%|KoIPe3EfNf{IqDy>*!N zIGiRbQ-pchNk9JL=fMQtp1Fy643b<9?=fON_7Y-9rbKO&i67eVEI%RL?Jn=Mm+>CO zDe&U7JMFBNQV}V01HitpMA*V5()F_5M;W{Z@i~t~)GW}@R-?_*{;;#Bnr_cbV{mV3 z>tp6~H@*ClFj5$FS258+ITPkS=Xkje;gx#p&K<^dGaWHy|KMvmEFUN78VXiN_8703Vv53MKKXm?Z{cTPL^(!^_h z5k6{U7>5JO4rT_e%o0vX%dgm5I0w~?YZiyG7-tjVfnd}P!??Kymi>qS@W1{@_0$i3 z`el0ZauH&OaB7oyD2=jU2JyJ!fJ0iX+Wm;dT>w~4v2l2azkD&D28RdO{0Ml9y^#_I zN7qJZpa{ly~g;7CM9N%5xBfaPo< zaT7=%;1%}b>GQO_yvo6fB!z?d#;seZS_je(|NQ5eGd)VT`Fm)z8md`n6JjKIFMw)v zY?uQcd#c41xGhw?b%OUoxNdMBw7J>FV>=|~)Ho^OCPPxVjpx$_rZ~Mw9CcAQ%ZpNj zk?|SQDUZWR!nu>;;v`Iqq#V1iYD>!|LF}(2-Ch^SnWEHeMlM=`fF?r>Qve{_;yWE2 zSO$LHhihA2TI0|Pt(3P}l6WnzFJ%H65K{B0v`*oTP1Yz4-lTdy|= zt{)Dx05~cZG>s4pIxm16)FcJ2lL zZs1mzrSiF6+wY-jI|ZbCjW#Ov*X!N!(=JxfEJLW5_bHpH^;ov!qZhPeWZ6n5JS5bT zL{qpQCQ#IG3}MX)`mi$=^c%X%_;1pf5-$6eWB0ZI{Az5TQyk=I%~%WCUmFm)A7>xK zeJKQx88CZ`8a-3Eme39iBGEI0OF!Vg%w{}*0H9XGqmezWihX7;^{PpjxiN|D<^;js zS%Z{kJDk!oCILgF#T+4=!x;O<_{2DNuVa)!RvdswfKS$#ZIu@N1a0x#zs->6-##b9 zA>CRoGbWJ+U0xAEFtE%`*5Ww!j1}YW)A4Wo?KKZL#ydvcyZL^8GSh=E26&G*ZIi0| zpa$yXx6C*9zIl`K0+93D=cJL4y81-a%k!~!c-S)`GwU3l9hhKLP{Av0bv&CV zTXfRZh@iw2nn>BrisZN#i60^W*h3Jd7wRIa_2v#5Se!LVoL$o?NuZ2;yJ3@3KsA|Z;LpO3Fr}p;4P@Mo+uHVrT1>V2>`3c zx!oX?4X}!S0-%_SHat1cIxu7EAEwWY0YEY|k?#9Cr!j^VxRi1~COHIyukSv?%U6vT zaRZ5I?Kpdm^;VIh18%$fG;3fE0EUN{OR2@zT;uGVdsqz>+q={1i)A!oD{1sbG2Op+ zJo-?xQdPp^=0R+L4YM>vwGI z7I)jy^VOyF<%=(Pknl5Y%nLsiX!6PYJkd<>TY#a1>%u#jn4PRsgV(nJFgF@ok*?N^ zUizJ^EH6WYdw@fjA(+bZeNx%NSfbNFGe~OSS7>Npyt=>$Vu>}txp7N1{e-oRy;{GN z$lB^&Q6xSk0pWK5(hHBlRJ<I&&Mv630pPKYT!JaHZDGaGxv>f%xc+l;U0YLPyg zh^8F?+{K9%51l9XsY|@oazdz_^0n9prM1mXQgxFN?+CN9UVLVby64ivl`qn#Up)rnO{WiT zJ)p^G3wGCGWSgl7L%~9TX&}@imL=zCt^xZ`qATpchR;8HP=B(__apm@CE(ro4>B#xUncSL{EWQ3!0sxK;cII`FyZ zWSuO(!s{G{Rv)VI>2hf6>T85%!oJ_=72**YdaWj$dxg6C7`c~*`JwHn&c-S}OckOT zTqj&-6Q2Rh9@mzZ!+fZPL%IW#H>~Oz2begvg(jwoWwb8U(XLb*>Q67em`iPy$+tiH z>`PqS*#v77W9htlJ#6w_B&VpJI*5;^>atY9d$y{Jd^IM=PziVLPQeUZ3*Uey9En;1 zCt7Y#oBVz@$zjqDSD1(@+;bm4ql7)^fg^&4iH*Gj7xMz(A*4JI=iG)iW^jHwZm~(| zQhxy9u(~k=!q$;Db>OHVNtw7lo<95Ri}dqf{4%}$_FHLYW(p3=_@4|8-o5MV*o2nM zd0vi9(*U^343Hb>DNYi4{i+ENjI`l+b9zGJfT^i82-r3r)*eB1S8=x%)VMGT*aTj? zh9eD<_z@0#LyTyzx5K7z<IUav3{2rIp%ED&fqa-jEKc=a@K6g+zwnA~sDVi4aF8 z-q9YGS5o!LA`!bD$~GpL0KN&}0_02euHg81aIzP$2>hITz;mcd zq%v1Z?6Wxm8$#UkIl)8*3w~3|uBnID{P?aP=Sh{9nt^as;%_(A6Bbo~cbEm+<8u*i z#r8RdFdYI}85#jYfELQj<^=*21OPHtJY(D31TDwk0{;U{xpF^k3vi3M^<tn}PMY z=&#<*poV_aHNQvy*l4b{K)>*GoISxz@ZIqf$Z0YGmWzAVAD-RJm%dQ>uyUMy9`NtM z4xKn2+{`8La%di`&w6N|fc{su*q7gV&UX9pU%%TIr!9lF{M)Iu3-Kg`pG`&hiw9%Q zc*o$xKIC}%@sJgaMR|UYtGPUxKsPFF^v#ZEzZvS7C+$)rLk{ENwdlW<(|rYy#s`40^PqE`$$-1o zbQaThz)&k{DGaUBjl@dLKreBH1^*Sa1N}%}M#qQ4fodF}KRQO>=4(S~diHu;A4Wo^ z@S=l>Q`%qcCr#z(0Q(`%bgZ>Fz}P1QmGp$RYPSw&&-cujBjGtE4{@?QI7iyv%oW4;kmfaGnq!D4Ic2v ziRBY?&EGN_{ulH>`+VLXuelz4F0UQwGtWcR;X9V)|4}EuK=8x4NoOa^p&f8uWKsPaGl4=H6^ zfHX>M)jm?6)-aD*TUkuc7oG+PIz|(5&>%26>+${@ufr^uPSxlgt_k-Xfm7c5rM3`e zNlH9$g5>=e6Qspu!bfdq@BVJqurV5y&Tj+!%YB1L)DO}#v_o4wgJB6i7M1jD{wvll z;SCWWxYnOP`vOLQ*t9TIY9}Kx0#*g*I6$B6Bh15O}HD4We~LIKD% zNC(Xe;98+U7uFpLrH(KaGdz&KcxH<`!KY)JSn?ClsQ6$Xr*<)h=Y&E}&wR~`T*p3N zVNQ*~4(w+BX)}o`+KM``;+SmqF#JTi&P=Ryke26{n13AjjnctBa(01+2SI6dRN)gE zm#ru1Pd@$u`(JnZ;PwL;n_60|FJbDYLvA(o4Gr-wbjQBp-m75I&oS#G7)r;VGv^vAhVIC@N6(oX2>fb}Vsc=)^c-K!-`hu?w9Tb`pQvwH?L) ztc4%l)IAuj55DueAB|q;z?;Nj3Hv6!hoPgecT?Kx;aBW}-6zlWARNsADfA{M2h#`d z-6O5yY}m%Sz&7DVn>aRTl5@;PGlnE&`Z|Nj0qTP9LjpHwrV&7Pk;7h__aYfIriT~= zxF|DDY~viViTTab)s3{YLQZkOsuK{P3_|MW8S&r7+BOn(;;kXQc=_@bD$@Z#I`O1X z_p3$~44X7Sm1Qr4rNq6?MrTk~57V{PdFn?!iE71zRshk8)JIiUg~$On?%d$O&(i-* zIQU=$*d%t5*eDeoMEWN)RK^sk!l7dn2O~cap%;M-8ik1wX$?>X`14C_N;s?lLL!t5 z8N2vP~}1a$t3M{A-WN`FkSc(lrH}(*^67i&wH}djp7E)*q$|w9!5V*r84K!+Pw4{k*Eg_Jk=2WrrCO z?Q+q1U4WT^Nv?~@j5b6WxmIhAG9448UU3-FSDy*#DBl81(N>?aP0?QF+OMi~ZIyrI z{`d!DMO`tL(o;NeSq^svie8e|{4e@N-(*a@?$~lcTU;=|S`7WT(W5KgLXFD94 zD2?yY#<(DGW0Qj(W8S=$YmRj!W0f6o8D~LrNX=2aLUT&Hlt9TMWkBfM(Z0+2+;`L- zIHnQyhj*n-r63ZN^QAP&)2WJi#@NUtiA`{N?;^oTi4a@S=FGU#8yn9!yI))~PaJSw z)}B_|e9iV^9_WFN+x|FB&awVT-ItmJ%^KApDEa9HTp3%^%thMt3iM<^RIj(v$Jm^y zIDW_cIX==jjgeNkXWLvC3<&?~%&&?an^X1%$HRfoJBEa-(Yr7uelhM0ESCMEfX zYuZ6->xj-=o5hA32Rv-e4^Pnou&-*aa0U%U=p{1MDE)0AOQl&PP3@Z#51360#sbH} z1L9J40@iz=Eml20nuh9>iBu5@X$=#b8s?e3#cG;B;I%_0ts^A*yN5WHFTM;dk7J-T z{}(@5NKYQ_r00+DA6WwAAptdlkUnian6$P6Ojb6RVGveRD~EFLz!|^*(+E<)Vlc=- zjXEDXb1s~!Q!<1#F-0A{KAOg=lbELnpTM)h-;5!g8KbZ&Dhi9!4;R3!2&R3Hl{u`M5oB(bw(BKQ^ zNYkZ~B4Sw{HBZva>pf}s2H@qij&u_}Nu6=V=~$a~CHXP~7t|%asp;)L?*ViHj2PfL zrnW(I0NehFGUJ2a0q4{SAm{U^Utr>e`3w8w0sa7IU=3-v1~YcVS>bil^u|)4txP;W zSNj_K7R%pHm@gYUl>cOoJo}2AC1uXZm-7pljH$5);Nh3h1~@<0|Acnd%I#_4)pD2~ zRxl%6nxCUgfLf0dbez!-Y3m`T#|ZF9N(ZP@u;spjeKx>dTkdmAmimXw-~%iuP|=tL zN@l-=u+@Fvtf^+hDi3B&Rppp-edUDEh>Sm08eN0zyPOk8fMTr~dSEusn4cqnW+a_p z{`Qb+U$Bp8p;5qm#0Xj8hs2qwPe2FY@dC}*9`tYsW*ZzDNwc#zGF}2OZyB%!GZ8H* z&K*qV=@4Mtjlg3FjYt6p=8#IWPl`JX5~In=0Y0Pf;?N)?2(9WYzq6GcwOOf*p$`0F zjhy!T`+w!HV#YI$8Q*m<U1ofA&cAyS0gB2qsP?vdj$(42D0}E(}Dmat&+- zBaINKHVQ2h^+#>p@ku?sT6s>Q2ryc8Fg;(I15hhC;=E$B>Ogv5!0+QiW(;AnYw8Rs zJCDTPl*6`R@88**UVr_5ScVvu5n^S{7_t$NPNbeD_^Lu;s{_XG3&LyaXJnYS4zxqk zw7wq=GW_i!(B9vMVIYE3KaRqkFc5960|{%d^!TfX#14K9ELn?j(h0nSHR||?Pr8cu zv=IcOfw_{}3VqmKhY4`^({I4v zAtm>&YD7K;x43`-7DMXq*7i65vw!fBPN}L91k;MQ3iJ z9@kENg{&Y3y@l2Y%LRAAg4dq2#^E@K#6)u$&$Q9OGQb)MAH=6rxBy|v80Y|@#LfY1 zW20u*CBHFcuvy4Z%;8kkk2*z3yLrqzFhx2?8qTP(Yv- zAUJ=Xl|gA^e3fo{5E^UMgRlT0;1v>^*C~r(4=&t(1C7|2knGSb%`Un*L{3PXytF`g zDeME4YHN0+J(`k6fmH{anZVg&#S>Mo+r!taJYgg1p-kO0Ef#Q?`Ox^>)_jm_!XvYk zKOEp!&myhxF!o?^@n`16=e%MY^F10L-@TTj&^)f$UjY_v^EZ{txr2=_Ke^v5^4EwC6 zB*uk}R^a82w8Cq7J}vFC{ZX`S;YTT}*C{o|(`(L;hq;@T^J4q17(MUP+>52&GFJt2EJ%(kj=QLJR?a_})-3O${0eKN{P? z1tW*qDaSbX$=2ky3BU!WMkn)L?j-kYsf&g7Y6Gr@CTNB0|DN>U3VF^px^{gd=72es zL2-ljmUlE)a7Lr!{4FCC^>M`y!x{4(^F+Cpv!J##Cu!| z`sbog2HR|z%)et2^lZcUgx?f*9G}oySVpusmn};Zorv+st#|yLTmSn1_uVqDl4JNi zhK6nnC>hFVzvah=Yf)G9!Cvtn<+>*9mwo9#YB^XNPBT+8Xoz}=I=7ZyE&~P*776D6 z90_hB>WS%P8Be&J%$OTk96=H^;sGzStOnS#==^}al08dEPU{YXjTiE~TCj zr~5>IS3eG*g!tT{zRiaKa~x4QzlJb18vsN;dPuaX7YLyU?RO1xwPPatED^27{kTG~ z@(%XqJ^ctaaGWJ%N4tT|Nh3NiqLi6TPet}Pd+7&`S zdhV_>Mk~CJw3+B2UFMM;h-#CIo`?lMN zI0`WrS6|^HK!PAmg`tNkW_^Qb3;SU*jFz~$POx|wHyzF!I~(+u!1*j$%{oqY$%?j% zLn{d#cD2z*6Tohdun;M%8bdS$eIp1BtJDPmH{9$AbkL^u5C#I`k3DLSb8Z;PwvyoG zM`pl7Lj|LvS!FMo(>QV*IG7~>olF7dBj+T~Xdoc0Z;??GZMqVEBmMNDiBA+xexAXGEQBXxVyKWtcMxplcgzV84C*-EL4fmL|-HktqWcaE3L|G&T7t zkBDDMf`Cw<6VocSYD+IxpzGx<{D!8xWZFz*fJMj&QJmU-=db_Gk67^`-RflbRg!(q z!9%%e9Ot44It_j94A9 zIa`ODzkoVGM*QulIznzyb?_#s?d1*z!qQsbL^EgD+f*v+o zuG@}yHhv3kyk;GODWz&r3I9?j<1R^&Tza!CV*r z`gXkToVY2up?KX6%JgY!5++pAf(I|g8u+dM?wqm*RWk`4KP1@TB;8S=2&AMKxnuCb(pv{6*e$i&e z(0Qfsd`+5GYUmuisHp(%m}g8_^|h?YkmEKf;S&?s%Nq0UuYm)7Z%Yrt}y6X!B_ z-78LCK7@AIZ?u)yzTCFt8l)*clo5NBjpkC!rQhS0%o*SL+e@xbTae$3S~J~vKd#yq z+81SJdiJ_Pkf?{d9^AIyAIIH3_`H8#F#@pAJ&*L1!yb+Q(b-m- z$CiJi$zh89yL3_H5L9~p3d!#FrrC^g|529GX7>ufCP1ywmNQXl7TS&dtxRA*pX7v2Snf2Fwm}egc9JB<&FKDt30p@tmX1VO_xH4wwaOve;V<2RsZz!1|@9%HROGt7rf+ zlj3@9AT5@GFB4iDDGTSIOaPh>OfK{RaZM=f3jZ+Z%e_JElwjArsI!P54?FD~tp}kb zAaNIPzm8eS-X2*}cZq}p*3eYb>`!V{WE$Orj_^_3hR#A$gM?o*pw3ntg^3O%aBCt> z&-AM%246mMIgAX9AUqw6ke59$fHFfnoYx9H{TR_tQzU__(UbayF!RCpV(}GB502_i zf;kylFfZ%Iy1_8E$3$z|1^-m=fztfz7$!!?RLzM;#z!!D)KQdIVGh)&%jiK+tT!OwydL}I4mj@+5ZokE)+?M?bw0+(obfjD5_>Qj8Y8_Ku_hufF+SZ` zEL;U!F)xKaU)`8wcd(X@jBZ8!_S8MJ2i&9|#|4bu($0K(v4l?wPVxI-9aFaNZX@}@ zq1$V%l;ab{(UKHF4jE&S-Vv)D^;>$vRnuO|XV!GAM8F6t^z)Ys*%o-$*2YZ2nx?T1hllHXz&R_l8A8CVCC5#^DC4D8j z2U6w{V)|>(-^GU41s7RGswarDB17xN!7tJBQ)jV2`-~rplxO-0t)|`M7|cW;_KPL&9wAoaNZt=%RN;C^u-zM2dr6mo)1k{KYlBGIE zF~BwpE=o>VF5ih7!2U}qz2W^-v3tnN5%0h~yID;T>$!phM;#Sokwv|_{3;GrrW)|g z^MHys-+r4-qKiHm)0=_kfL2|nhCyz*Lc$3r!M4UG1EG1{C_F9LopvEH*k!_0GporE zAgf|*!_ivxf20>>A!<4y_a^gVKJGC8V22af3i$+y7dT%DgihhyB&ZVML#=ZgiNrpt zsW#?DPx=B2%fukhpTCUQw%RXm6GiD<<_^t-i}92M99?j8+57{dCklZMLjo8>B^sqf za}PJn25g52N^w#HrN^- zp^Y4ZydPR18W2s5k>RG*055HUW!p#R$wR_9h_(forwJ4o%#hFVmXzbOg2ez$-nZra z&SRb#oXB#;yzv-6)_FB2T;tFDZXSU=hqm0Oayj|i<-gvdAOVfQ#pe|)u$)*YY_2rY zjaFOH0CzlVYcs7=9)AnyWiZt8|6x z$78+ZcJXi2BiN6*w5T9HdPKQCs%@^LVY_oXEI-)NsLMJX0so^@QBGXqZ>40J_HuoU zsEy8jp^z)up<1u{M*hY7UL1jt@X0MNN3XePID6giPVpG zHIeLQuhf>_J=2t+yPPRUddLAz)dJL!n6;eKcNXR`0u{|viai{_fW2;nPtvWXG)hoi zUoy=D{bTF`03!PIyKlUY@Z}=?WZ{QteU}u#*g`meJ*e?VFy~Nu-VU?z+R$t1&eXd^ zvLlcMlHo(LSqxfD{(Zg$#EQaHWX})>G-mTO2eE6>2<9q`gD%IwHFbSX# zNvC%BuG0buly(Krl-z^XC4`sI&FC18{Ug1hHC9-pmco5JGzBtI(7NNHc6_3L277kw z+cEVq4n4)9QP~QK?lIAOP64;aXh$?@X#xm~&<`>@=!J-eYb!8_hWbS7NX~*DOiZf9 z(O_V9ke(Z*&s+=_?tugn5ijjv*??4#IoF@9J3V_z%y;@^8)b;hf5f)O*+hTAV#TuB zlD4$|3KK2xulq$i*rA01>B1~Y89^(wt3N>}Oorea0;X)VC^%AUvV8zO3g-1ya!t9f zc?KE1sSOEwHt(ejn2;j#acy`cOygEav}Ne>J$zMmFl*UGa(+zu=q?;gWzLGwlIyfq z!`eXqNthY?42y)7hE^p2eoWpD@fYYMsvBE1;|MsVkFI;Bf%d8mp}lC26i#jefKLcF zTh#FwQ>_Mm3DSxN6}yt?y%B4~gN> zRBhpGt3d2nXKaW~GpcpwZ+Lu^_t1F2mFiO=jqXB&nwolUD|{3y9`U0lTMGlnP!p;P z{72gmenbdiXnh}g*;_kJk6$i=Gf5~|!ygDn$#V-s4)y`?F9HO8&UzcC*4af%S&oJlv-uOV8w7}hds)ilkMA#VFyfA{ZwRL04pTRVNIya(Xe#U@yS z3+Mu{>nPL@h*$OFViUkdR0}p>0g#)qjDz-PJ#co0i5ei&VYQZ>fQ?nE$xSbV(gn9} z5-ksmu*v1Shl`uI5=FT!IrUQ}7O21`0f>=9b0NdcVE%+e!$kxq1G6wnR>Cb*MvF^z zIGa8upqXynx)o|RBl|e{0=`|;UDN{MGI~T!D9CYBzLAa=vFp>GOzFPYIy=j0a&{JN zKvs8|vR8?XpsE2^E@8fQ_f9w=tIpJHMeyN-2}nF_+VB8ZD$&B?s=^_fkdCSr_VrB7icU_kcbJ~FVG5T2BdV@encio=G1Y&I&}TbQ1Sy{ zU?Dh+w$=Ou*8R=l&i4S|JnUkpYdm8&EQ{}gL&|V5W$+;g;W-DxIxH*hS#JJ`O^drx z0(77%o=2mt0ut%Z2nNn`fC0fH4_l`|(PsiY^INcS)fOM}y7l{?wCp`;#?2~#sg1WX z3*-~?9H7s?>=<+7n21pL&R5=JD}+XT)$yfx?(q*a9gLE1ax*{@|N4B?;3Hi3(fkpF z^EcPDHCHO(E!%8a(qqgY*L;W3il1*<%SFe-{s~ik*>C6cYDZge=0nDY zmS&viXs7++9f#Eu(mA$sn&hc9aJ@^5_BX(5j3)izv6y#DzpUR`$e-LE{>}AVUbTGj zw$EoUo%`!;gEK&#A!T$g^5grex`VJB8?O0Y9-~|@t%$Z?^*!jD2bF?`e~)v@3-;0X z{rI=cp^T?>BBeP1O8(V z4B#C>aPwLH^E9{p9N|em2OP6508)R&;26v$YooqkB)q#MH~RH7Mr`sTegqF+z5tl+ zq(009l(ZiavFN;&{5P1h&DZM)8(7l$1P^$c&> zPJ|@Qcy`ko8i=zqwBs;{O3ejHdte>l6MY4Cab~`nd&$}A80^6$ViG(sO>hG8M*u7j z0kOupKZId$PlLEK?NEpLg6f!H9FPy`STkh~>0Ktz;2MPx9O8t_yeR2)`zaIorVZ@1 zSI2P{v#|n_{{zfp>g01Up_o1eo#aI!V@kSz=iPLD=o&WR3)D|6e)bq8?uH)L!B_@B zpFrmej2>g6^Qrr74|et9BC|G6UB3o^MiSj5OgwcP`g9+y3*C-qOI5%<(o2_{GKPk|tc*|x{oYsspfi7cM8MIcZ=bmk zh#McjlQC3ll{L$lzR?zgI<#U^6&hobew{+sAg0(C&Ea&!x)#i5i~=J;EdG6x%F+eG z&4QWPBj6zsxMK3xHT98@d;SR8Htug@_ou~Q74@pu+6d&5X`iLA^ zNY1;E@+)8#L#vVTyJ7F$4)^K*F^p$B?fl@u`|0l3X!_;X4@1apR3^>b^fB_?189DS z@Wy*Mq}+Y)dYoT78wwG`S9!g@`Oa-L5OuIJVHBCU_Jk%kH~Gzsd!lbrHcAgG*h zb`{L3-Z#2N6SmAvU8h?=a+2jzOPazlL-^qT#v|6GNU2vJgKzYo0-Hp^1BNm)FcOfn z!7@Y)XO!hcbO8#czW(-q|1hO&3qVpJB>_ggIXNRt)m|&{j7i7?;dHV~zd3`C0E@W9 zhWyD_b2zPFLr+`cz^6}JNd8^ShC9@0#zAwF+J(dKAuL%N%+2`tH8|G}9E-={n%8NQ z42n>O!;Xb17pA0Ezllyb_U49h_)pS37=dskQAEku$<>B5kp-mm(TEg=qbm1}!XY!8 z=XlHjTy`5TV=N1BuF+12#>4oS)z%5yxQm%ifD6hLfV9yzGmJ8Xl+VV(?~gA2F{8q2 zEWjQPG7!c-JPve#f!OY1Djq_0oM45*YN#JXET$@rbjNgR7ZrkBiMB}ULJj&D-n0;a zJ38Y91+t^l5!bDp1Y?j_HWPWO7=+*FJnzbehl`cs4#ZckL0wRmE6(RSL{SuAcp-Ob zKUe8-I}nX{&!v^czupzmU!oUwQ*R4~!Uh5=qj`)ugTXi7^Z^I$fSsivMf*Wor{~KB zwowwVZ3-^oa-0IuaLss_e(Xnx^}#c83l6?SEE06IB@D+x;tNWTJ~1&4maTB-rwxIo zxx&)pdo(S|GW3z9#B+SjefB{nHZId{XPIY0WJ90So91#EOVEpm+j|m0ew6-4-!lhi z7#tDV6}Mx{0$uT26i8t)dRKGdVvab9l;Q5hs7KfAj_Mm`mwy~%ezQQ`4Q-(a%zA7? z^w*0#C)cHziNfN?&b1Fiizu4MM!(OpIgm!COYPu=WM3W%XK)-e&-h- zXa`gfed90R7Nz-jetkMSM;x4)=%bRp4R@xwLLEf-@T;%Wlc!&@ zf1pArG(l#Hl^HXTWh;HtDbTo(3LKm(jpA=!zkw=#Fm3X_U||bx$V4q?r$pnx!&N85 z03b}oksQ!rXWAqkvrY#}+FGQI+AVCqCILSN!Cz%Obg*E28GG0F0X8~Eh`9&HwTnbD z0qX7{RN~+e=zkeNagJ>xKrPl2a}t~`2^%`-r&&~07xwidT_j9UCuqEeBy@8F$p`>z z9Lb2o({eG8{_s2BOWo{Ao2y@9+qHl_4xAh|VUhHK{ZPq|>KG-K5rKg*I3^6$KHNvC zzn%6#|1HE$?i(Pvo?>u#XZGtI`lMZ&@3#@jqzGqu&io$2O_G~(zfa8Sz?KqtUs zU>zO^i=`<^8D|u?qgPK~;CX&d2Cg@u1#Ig!7KlR&N_NkI4$XkqrCl6*M(maB_1aRo zK1>#M1jqdUG4*Cal4jRo-mQJj+V`ri-n*xJW_ng&W(W+1qzC~Z1i~N*D-e@^MoCK@PizoaQH#t2nQ4A^L1nRB@~NJ?c7?uA$S*vQzWyg)N|%Ou(@(zl zp9rQQQvqvgx;*ob!f7LEn)BH6k?Okm8a?QEoF*;|;xmX#%3&`Olo=DqzVq;*$0_Fp zbCd>rtb6)mAX=KQQhy$JEh2n4#3iiy`at(t z_W0b58lnerDsp7iig7HW5_XQOe|ED?kiBzCA;wvyn7RspB~|g0-zTaO_e?5xbhc5G8U#nVv<)S}}8v9Bn4vwG6bu z1_!7nOj3OmW(ZXuw8sG_!Ij}-Lb3ibI&wc;38p9PNvuooIb3rbs`q5jGNTZ5Ay!qj zmauXDt^sJqf6m9ooH?JwIqu;?^c`wMnHm2vmfR-CH1l`vjxVm!PNX(sAmUT_oWOKz zx{}$d4m;Wr?;M}`233zJr1+A7@a6Ni8LB8o=&!E1Sn0U=i81<(8+mzeFnz9tS8c#G za~yQ+@4kXD7ylt1Zf3{fOnu21`scjuqXl(GtiSDWm+KbmtuRJ=wy4k(P1BK3biASd zWeooAIBI>zcb@}pF<;gx{B?jepoo40$}w~q)auBp>+S-_wdfn%i?;H-q7UwK+TP=M z^DkWR#Sdx=Tmw~3$1t)wy_<2M9e=u3)KNnNqhZQZJ*3%9A3m8m&1y1cJxKZnxhR;L z4oj7e0+Kd*Rf~Hv58$;yO!lpMNmH3ZLTDQpuRa%6&54v#&BiU>8Zj*umF$@M{t>W835w? z=mfn71F10Hq`jJZGrsYGm#ZHPxStu1f0~noufO?D z65N{c$ln6KF&^i~xEPZ|2#0RQZI7&Oj6sLkdg!S8YKh#GWg;aN6!io0PG6>9*(R5z z+M14bO0nV_xiY_r>I{`BxME)AxYNoyw54iM1}7>8NwYZT6san3GSeIL>F%S4=`Oh{ zD@63R;P;~fM7p9$Qd67H?~*RYYDfzo@1-YCO6ly#_4KJ5Br^vxyYxAWuf~&wC*T%o zbSXxGP>T}1Z_%i^5 z%DU=qe1bSTXO?jE=WIjH#n%WmKRzNwQta&GPmu#(P&FbvD@1!%x!(qK{4c>&89Y#> zwbG>n1c=e$0?c(@=anTYDB;X(N!vCU(`L*lp&o|$tXNuj@iZ;oWjy$@c4JPEQ4;4G zQ0coaIzY>ywV_saU0Mg~)0G?Dks8LdF7~POePewu2z-m^-V?ut+mMzjY8`RlqO1 zZ>%f5{_^#7X>u698*-Rp&S-7H-|J*Ot*Gk7MZ#qAvB%h7n)<~f>4c12FXhxLm^y^N zNArrp_-+5x`V)&DtUXmm>rC&LH?<+S#BWfV4y3>JDJ4 za3%xYiJM7RVE|qRCwAJoe6}APx7yhPd<}GhXAHj7H+&QSoEG>Wvq>D0tF;Azsu^Lm zLf_5!lk+%t(+AA=+{g3y1`=FAW!o+)z%9+Kv!`~6#2v%+q=0fMYrj21q^-S!RA4=7 z3kF;zQxQ`|Jv>tj*>4OmDsNN*FZbcQb`gYy?+L>7&^ZcXbPa=x2v=x@t?JfD^stH? zQ)Mp0@L#%gE%lM|X=!~mXwNBpSef~BS;U6Qe|l0zEd+)VRm}7y^c-j- zD?%v8z_IX`heDQ#WNt>$arR1onweTaNF={CX@ef$nP>gcZlefMThs7|U;jsM2jj2A z4AB;Gl$4baB)}2f<(;|Knn4bIeE2V}sVa{H z0X(eI=IF0>4V0x17KeUi(hfXqVIZ36Ea!j|SO5>nu@}cUBae5GbhZJ)?VArGSJN2w z5}l$ps8wBdV`Gckg2^8zr>A~41(^2BSI04<48r ziH+k8e-<{R4E*3AdCph|pK<;k-1>Q(0^d4*ck>@%dzNW(+-{8;BQGArfmU5ErWnX3 z^Y6IS)EXS3gQ$$KOo#W|j)#X)TjpA%$$*~>k_?9>R0X>1s|3`3qGuLA)`Rc-2W-?D zWr0FCMWifau)$!q_>_H#_|CmP<8O{l?MI#y8dNpC3TkX910BNWjB!N2v@d@8@_T3| z!SU=M9T#J5{awOiziJ>Hk8S$yb?5KA)u`*_m>DP6%!4sAJ%Gn={Fc{|kNy|HD#_{;O7x=U6s+!S&wYGg29`{c>l&PCa zKsvCp1nful2uwUv=Wy*H0{}#iy{g||7dG*$S1)r7zXb4Hh-sRWaoFP^)0a+P5Cth~ z+1Gps6^<;w?cQ~N%D_IwZN58S*Ukf{27l)kpz&Ve8PBmM0L^=xM@=x`j_O=z^6BAM1}=s$?06if3vGM0>foYj{>L-+ z6aPXu=DW{uzwhzq%|6NRG9#=Q*Ll=FTtfv^|H=4qUCQXaPhYmf9Hl`4Cw_2_;(&PW zJnScbv+vrId=?Hq?|f@Gs?a1(+NXFPef$61P7Rjs@fhQO@q3J(M*>F7MIQ)Gj9={= z+h%PGXp0{{yoaWDGiOOaF|bLhnKd-nha7^gYbzRI9ED)ca_|;V<~$=vi#Bxr-oq;3 zA+2+`49v{J776}$ptF`a6<*Fsficq>NDOqDy-Q?bqT16uIUb8;gTKf(ievQ-^k|v0 zXL^a0K2792lu6<|Jv?5A{@{;+CcPfbZD_HXg9;+GqHN?e{~)C&_Ycyg;pc!|GYlVj zuTRlFpOU&{3sdqM>FX$Tu!&F4*7gQb(l86$*HKVFCILQY;SsP>L6K=EFl0Y=Vl5a& zt2{X}nAXrP8gc3RxmG5d?-)RHiWb$<#dUR@8C)xFOJfhBLT7yoX9eqU3NzM()BE&? zz-YnLLWM{)UylGTUSSJ>K66&FZ^akW0v?i6`^pUx-qZH>`oI0kKYK5w>`MFBY6@a1^-M1F$y5vfqx)v;@T1xF7VHkLB;x|v{_k8JLF{j z`3JYs?AC0Yn>3)ui81N0-YcQq?dOWDm=XpCi0-piS8i)`J%LeVFtb0#CN>r@?!l?Ge9z;^uHJKBvOl1fq{e-ZLZL`A@^heEau#$KLFJVKNU2D_8; z)O-7z>Hf`G=mgsLMSP(Vv|8b}*4J`C1EL@Mp9*g(YEk~Ju3~}WC}~G=9KMAh!{$89 zKPeXL(f)T4Ioo%ppZvC4X?kuEFM}K{tYxkVYnJ<>>2#11&sS;ZD6Qk)CZBvtBzzCn z?L$%?ZBtO70!+8TG5xaw=a^>~{#@NX6jC_kI{R@K#aCqq!4M^YK@UZg1FLIW_>%FeK?Z`^s?o*>=6Iun`BD4aVWauY3+dO6#G;@_wFDs0b%} z+Ca_#__!3nc$6s_R|Y}M;p9Pnf_72upa))sB)OFmHJhV&or!5K@>zw$B;S85kYcZB(feV=~v z)1L+*VIo9$j3K08vZP%#k{*bbC)M1h1EHowpOs0^NUk$oV_KQxw~*GUYaZ4Fr8y2C z;MAxCDDx7YB3m4Q9_IWLhOR=G-{=Dnk}gOgf;DhsfhZMBRBA~(Aw->+iMo3!VTUHQ z3k2&%%VU}4exj#EROAB=AO$uxO-R^$mT3-+1;kqB)&)|KsVC~{V`PnyyODoW{Biu%`(-4|yU5hyTHr*?7ly)j`pdZI@8S$I=eqZcJARkpltBpw zglje7m4~^;cbN+A5n&zE(?2j>MkEVsY8KHLGtNx#6_RM%_UMoE$;V?1xlQ}cZL8gm zd-WQ0{ij?&70Aq?KuudkS_$hZCf0Ss&-tDF>+1V@ju4HsVUs~ zTAO<`?eEoB?U`WeY!6Hk?}e$)Ij0yb{(W6U2+A0#Gm4XuxOl!Z0N?Bsz90|yr0Ey?iUtey%DGR$7qCXhI{pM;3J=dfW$T6!GgIj ze)R&@{uXP3!UMVy_zrEW(N7)sqq<_eD|CybJR@AIujq^4#k**qKKK;#j5*r@f6hDO z0bgS?$FEKgdWyEzeZZ~Jo@*I@T%#WuY%P+tm$;MntZmyKe_Rv3wZ0r@>@BZWe|KKq zp92Kw!wv&>d7nA2r?`Z6y#_Dg8*Phw&aaAtx`Q>~&Le?}yvS+*aRFPw#D4^%@3^%5 zE-m_`ECDCKMrl18WE_OOAA5i}f>LR+eJ zC`J^5sA%WXXn`I`our&GD+QWy82_177zNIj7BqLeShv^F^wrm4We#T5?Loj6?ksg^ zw8O=Ykw}jecB2Rf`9B}u-A}7?2FXpr0O=G==88mwutEzJv~U3qC5)E063l@No@eC1 zP+#m3t8ZGF5pcv}2-gN86s?lO6kK}w@GipIcwA>f=D|R)bN%dWfD&Po1Aqp z?LB>*xxf{l4pLS^hnTZMG6M!|&1vZlin!hS^!)Yb)8&h2IlnDAtkO)HXVwjJJBq(B z4qd}_Y2>0&&#Xn;#q{d6>uG6W5yoeSG)SW`b|rF4&QivAI^DYcnDWh}3!cEM_Uu8J*+`p%;;evTq6>Ot+8d6^V9-hkn)_(7^>k>)DxsG^dv9OBWEW*+ zMWv%-p@5HjL)WB(L!yIOzoL<_Fge3xR@p)+VkI;Liw3S0a9>$n+)O)j2ya*eDj?#} z_z?7nY#(GxsWffCW59px5T%O--gJpZAUn@>Qa4)VEwWE=9;jSx$637rMpi9(BE91Q z=@b_U4pBIQqM_Vv#NuF@wF-FMj4Mfhxbv(nt4 zE{avj3z~LJ-phH(m-bE&_00ITRF7cX650Sajr$xeSd z-40|7=I3V8?Tx`}aahHU)`uxN!ag8yXWK zgv`TO21G=6F(eg};rZeM%AZ90$ z`v3tE!=w@Dj*#+^5s-t<3d z^=14VhfJLF&*@>jjc4gc+zKY1hShkx{^C$Da#95DQG=@$L&iwh0;Iqr*9cT+sMqn;}S-+#h=6Uu%L09kNaxO5f=l;eWy3Z>x0$DqY=NM zO@z4Ekc^RQ6gXVP?R*F0XAI(n%Oig0y%3LiOI(k=!f5=@zJ!|_nFj(6&fAx%H)NC@ zyTS!`dW$2>@ZcVM3fB$#5`98Thj1Yi9OS|A*&b~O!x$G0hC72emASZB6E%1;L3#Ev zPOK@v(?smq%z)W~^NA71{M^q94tZX#6$BJqa%Cgta>#@QY)A% z>vOJ62(ML3POK+zQM>DBfA_PgwiL{qQ#=E#tjH6^Df05fy2&WI2Ew*RD*=A2SpZ7- ziB$rTPgeX9>%GVE7~Sup_}P>#`r~>-UIU-Yf3fOSVIuHTSnwA@GB=;Nm-5m?x6K`>!z1xTom&lF(U;?OjcqnSBOY+4_-I>vemah7zYH&5iy3A-$m7ej z0Mw1&^PKVwWzc+ydyXe!9OxtVmOb%VT(!x(w>c=;hZeMH#pnLWC}5NsDq<=Jq-^mJ zChXQUDh9bJC9G}Y;z{ZgORslO4bwAZ7|Uq~86!Mc-9_l~gh0&A_ztU%iF+#u0A)0I z4Y(Mz!-zEy&l18V`vj603}yoYhy^az(Db&Ujn_ru(UT&Po#Uhi8Ka#hEZMs-@n|I} zIa>!K-PBAx3{kiZBt6%?NsFW)w>-UP<<4?T`rut63M++lcJdr^g9)nF9Rh9gDvGCV ze1f^QrGQc<-3^v8A!IRyp%MH7fa?kcekz=EEF*2Jc#`%J#7AyyUG%gB%uSO!6^%1H zit`3*3F(shDQl|hnGvA7Fib21FzU$Vg2?+Yu3rw?bGyhk8QrKOqg$4pt5$aeIca4icfe zwLk?h`s!AQgZ|A;ucqEJb{>?vmn{{CL zko=&*5ItHBCY0s&7p3-iK{PzvLe0UwnQ&)^_28~5C+!jm$1M;MiDf2zE2_7D{ zn6J61eDMe`pO=B)Fk_J{cNb}T*uc9?TEIaAC>Kf~9WFH%C=dwc(q?+}a0a67<~2*`C@ltquX!0SHgZUvO_wfThFBt5 zLy%`rilF93H6&kY z**s-EFkAhE%XecA?j{h}GS0o2>3X?7G*CzvFP>o|nh<%Jxs)MXnW&Nrrfu#s*t}C|b`UmsQCiQR> zj&byI-IuxkoTK=m2AD7I@959?6$LtRF`n}o|Koe~8S4VU@Oj#|k<5U3_{l}ER%oJR zDi{TA$OL<@y?HN<`8nE@!SZ0{cm8VbOERzGzGDpQRE(b|X+M~L$6IaJK4TBCJ~SC^ z2@@ISU@Re)_8i^Ey%`oUf7y{i?~xYnbO3^YkId z90T&Yqw ziv%EeOg^r>oKp4$h<0Smr2%TKWWJ+v7xOfJAn!jO=Ve@tzNoIT2Y4Hr0rAN636>!> zskMSf&b{X0ovmG3f;7`BPZsXP>FgDz^omTOQLjqNBJbtugk*gy(J+$EwCj zD(63+ch8y^)m0R4ezkV-tLPWF?>vN|^K#GoygsWIO^H3ke0w-O9H`bM} z{1~tJE&3EMe$}rFnA1bag!_Q+E}(5yAGaaC2VV2;UIQd{^(&mzPNA@@pD2&1n|8+o@o=VHU$E$rg)>LdcMnJ;(6@9_Yicrx!Pu50~`=YTzf$@`y2 z>_f%O9-7lVa`-6(tAXsq`lO3TJFzHdM$f{CdQMn^6WR&>h+}79+;&KiUP7s4iQ!{{ za!uT>GDaV9wd(Eog1b(~D5j+Grl^6$krD))5p?t~YDW+|z&pkTL56W_xssOWRRrO~ z-PxC}T)qNhW;!`y2i9kp9EAp8Y?@oluz`CJxiZ~P&e63H4`lA{-YTW3dq-H)hN&(! z$Qc9@YN+$zEB_k~d2(5V-IN*OfJy2_&YY(ab5MIv++M>}X7or_93VYRh{n-)QeEijp9 zCPv_WT^uysGB=zR?hOa3)vKv@FP(XY9R<^_hP;Sx^-uouFVlltbHo?yrpMEd2=?Ag zq+JbW&|p@E5!(2Zy-qBJ6!O7`+f;|T0!KYC@VNtXvYZzc`XNdE-11PeZk z6aqo=EJbn0I;yxc?ok>dTtXQ`uJ7$4N#;=^_%fHNf!xIngqkRa%+k@`^=B^O+OZXG z)2C?Ci<>zPrI8q@5w!fhxH;B=hed?2aMy7Dp=m}V&sZ!w+dp7(f3)tnh)8eLi1$+o z${^IWIq(K1y#dSV60shp8XCA*K$A@j3KpgE231XGjq^ZYVb0P^>Dv*3fq$*l>Gf|9Kn z#Y%bzFl!m|m-?ZKm7z?xy!|{RDiUPuDJ=PglwomkQ6MXD?kv!;gT`1aFm+Y_PhIQm-rs{n=uW>wIqb$&0Q+(l3sIl zoigTA+qepG5BnLneuyiCfPEE2(eJvoNK7Q?ZlIvgY+ilJYYYO%fKNov`QXWe@Om6( z{G?`4q6zyD8%yAZTw$M%I9}Xn2Qo)ocLU}_MLIz)B^dMpBFM*jI20R)kkBA-Y}zN3 zF&rGK3xxH6R>Le4QbJ%TBH?Qxc>90+pVD7__Xp|WgK3Bpfw0db{QwkAkxN((${Yl0 z5OQ*1G(c=F-G-c{r(|laMNW$g;^yD7EcUB95I7(;`EXnN;s* zZ7@l%$34t2B)DC2w7Fm#XcW2_?;&Q@;x74F3zSks^CClS@T!3$GPei7?8x95#&4>i zFs~taiL_?1KtCFNG8!Qt8^B;+UAG+DA;=`vv`+mojLH#*Lo+7mRx~KnGmk+miakIa zHb8oAM=L-U4SeiO=xl}t6Eu&E6D%`C>ASU+G&4ID>v|}2BHWp$YpM2Mv!iY;ZbAzXy$+#@q0JgaM;39j?uY#ap@Zi^_!d{YJ!NH&qh3mc$}Bd^CmOLOVg-rLy|5*oDP|ha9JPppY74$qmH; z==ddS{)wZ0*8~n&=#M^Scz9kbirB9Z;NFv2j=8gbT1bRh7Qls5#)H@+aa}k`m()zF z?bAAw8Cr!J38{N7_A%NXU$*TaYOwM-&wjN|#`}1$<8mCaul>y4a}8Wy@z7zp2JU9p zJKBVx@~O)7V8!7kg*x{l4RJs7@orGico~^nBXRYSmDyH%p?1<1t<2=Q=nn*-+9MpkfZs})b84iey!oV&m3H5575XFD5n4&lR%oO%1SQM1!D?of{xmu zMrj@MP@^eOq!W%^tAsgr%?#x_hno&_i*<^=>e#n?fp0Q@YOs7xoD}}TD&kOtoreg| z#0~u4SHPNUHKEw`wr$PCDkVH9+wfa&BQbUe8w^iD$Co3fSzOoLf2KThdw%ZTX<2OD)uiWRh5&zuZe9o`DPlQcY62t>x;qYEzVaJ&S?r6rYt(aS*Kva1fA|ok&I(q8u5|7SfqvkZ-@o_6zlyON2r6AXZo)?& ztttx`xGya+dzDOY2O36mTpqG_M)C1&Xk1PEjRfcdyC!mHYBxPb$T()AmWW-(>QLIi zy?~4?_wU|GQ&eHP@ghn5!L6>2$y7X90VuAG+CcIw*5eYyl%z|piNfaWlcV(THjGk{ z;LtN9)MsBG)GhJ^f-ixThn@>{JA#4z4-VM?AQSgp7lqY(PkLJF{;Cs;xH z&alT}Y&*zWA|B$PA8sqL@C-0p^Pva0wTzL`rJ+rYudYqo_-3!9Cy&?g`{$fM3tMKd zU%Y+=k^zHGAMKn)gJ{CLD2{W~Lk6P9o2f#d(wGxmgtR0@Qhuhdaz_`WW@7j{aIWtl zkog$P%L!oeEK);VERjhHw<*U~&zZTpg7q9>5K%7N$1E14@YsoUvXhvEW&+ELSX){M z1Xgnnv?HLj$oX$Z8pqe)(FYE~;2l1KX{A~l0>EVyi|5auqd3tzv>*2o81Yla-AX|L zm3-2l&=O0F*au}a;U?(^?a07HKhQXSU=55BvtO?vd=l*1Nd_y^;_dG=r`=AKBv^OQ z9GA#ev$)RrPSW_2G)!TG->itG05nKJsKEi4XXbhjMpW8cVNDLmQ>i=IHf~)-e9rra zUQD0(jo(Z+o+CD+Z#eaVYi0cP7dP*qOkGL~3saG|^X_}UL}-(S;bt@3lx~0UPO4lU z!sDPnU3vCmYMWVPoflJOk+sJ4s~by6JMLDkz(O~lPF#?TcR6OQ)!uiJ#ASi%VXX(| zG)3Wx>1lCc4PJ!Ui8cf`&V@49#s)8>7hZfJ-F^5d9ZoA$nP(UG=3VB4mcBp|nF>Dk z@)Di&xl3Y{_0>Ick5ayyT=qw;xO`JV?$*u61oR?!jp3FCDZl;EdueQvb-Q#SRYr+1 zAfSB%w=|OxXaOx#xps>OJR7CgFMawYgt@x3eM&$+Ze2@@x=FE8V|t&k@A#+EDe`+F zylZ{yqWpIg)$|UDx3MsnnXHD$+3T4&N8CsQ%8WW(+6pLh5B7`cXFvN9IB*lI70!kz z6BIT!i6e0BvI0&X%&kw1#0nM2cF1jvP@)G#{eE{^Uf3jN16qtCeRW|K%i$LE0`Tet zog?F`2MUvt=i*WvVr`7AkJN$}&tjllQX7W}r3ll5X}5&`e)BNWF=qK5%v}8gT?E{0 zrHS#0^!NYaAEdwdvp-ME%ame7q*l7LsDRoLdt8`bV4)$_1i84NF$V_d;qpE~HZQ(# zBN#n{nI1lP$aAK5BHb4Pf*PPk7EuPqIjDsXMgbyfQ?}+NV~;W~wkHRZrq@19OlA() zhoY_^MYiM8bcAVb413WB&24+1XzI=NQgx2 z!5of`XDT{U5$P39RllYPm2%RCv7Zkt&f80f_R@^{UozC7H7PyE3D z9TOwXU%^8c%AeyNKG<()bZlBeyyh!#-}@QA{aOW%V>b3x zSQ|{y24my_$Zm~3okuXz?1f+mSp(3W&zbR%sf;zIC2nQ7Z0+j&wKB-0DojMWPx^M_ z*q;5{0S3qFTmdU%j`4$Arn=0y#n^ewd;KlO;@7z5Wv;_)*tZ8}vwoEJ!(%lDR>E@% z@d8lbzI}tF++v&wk25}nA+@M5E#Q75*}W9bXQwfFqA`>4H_e#@J=_7&jzt=~7?uMO zwj62%Y=zqnL{w%xtWm5QTY?Fa{ZRVW7|vKGEz&=N4%-K^ooLVvEtXIN@H= zML>utxVgRD+rmCH+u|>iG04kyTx;D!SklPQ5W+3#CD#Tq;J{AUv!k?e_#lkQyL#gmTb>5J!mF*}$E+N{n&ICz)K5dq27@%VEN z>Sye{be(c6xsN<1$0?$?##$E)e2<3yi!s;ev-bs_y2k7+>5b#FU){fgv8&@?=Kxe` zvx!zxzjblNEN?>tBV>j<6O5_Ko|T;$Qi?985nK`!mZT*+1mk%~*MsW{!2;?88nzGf zcT5!`k)xA1vM;Q-bqvB_;{^n@%k4mPqgehkXiA*tt~fy#`vz64MEuX(dK}z{eh(?$$MBM>Y~>2q^GS1%Glg%ou>npb+47JAv^hvB(kH zf3@i1)8J4)%s*}>Fr5dufV86ZKi7-<3cr_edD+H=O6|I<(MUz80-9qjEv5B^RO~`9 zJir&6K;Z#gUV3nYG=KU0BL3^{Az<2upy_^eVN^CoCO)ogZj&Xb15Gs=`7#w#JrfGJ zm~>**YzEKBkI6g2PvJBToavxM>~1Qqbij+SlX0UN7;Yfb%u0GpfNi!`!K(n{wfPms zlo`=&eE8cbkTiP!Y?_{)!&U8EIwlKS-^4`vwU^GP8&@x-m#hv~!l z>GVFq*o~*#jCw0FCYffp-BQ4V;1Ehlys6=Fj?&GKreRi>2tw`$2EFM#xF^$%BT5=V zfGQ)LZ;{;IK;RSU9Qa*9cs5fIA3=Wk6qP^FRn}~C1wVNNwP$V&M9}R%@Y-0uL%Q8( z)0=O-k+%MF1qHF^6gb3cTPVuxgdM$RopUea?maY+7U2m?l(O#cfS03+>JI0Lx!n&) zA$&x9L0!i_!U+n}M^6x>cGK18uBNWZp;SJ_B4n^GZa@8dqp7Y)dRyGL4w?V%8bSty zunZoa;8me(k_tt*d-yt%0MujQgO*4S1||p6`U8X;crEv76Z`F`PR|jn0PqbX2xhG_ zX1_y0#d+1%s^SZm_(d|HL9e=c7ir@HYkW5K4fF*gZ^kDzZGB@`(trQ8Kc&*zqx3KT z#s8ev9;_jlkl_l!ytxtK?sPqkywH`V5ipyM5y1c88-M(^2b71SFM~^=te~g=++8vw z)x#MC5{R01gG52#JUHA?arp9lXtcN|Ufdpxy*r=3+M%e#&3@rI$zA^VYhUR2hbd)W zfN0xC*c2X6@hlJ6mQ0W1>R?kgKzz)Ztd!o5cBZQh%>xXDzWg%!;}d6adAXea<^TG> zhvX+yX0F_8*PlrflV_2l+R$omLSPp`8YE@X$W2cnNkS~0^9fA%1i>G}XumD(_s%=- zK*U#IZgg$X*M2mZkxqQz)KrPU3MMiE`sD}WGDe^SlZ{lzCeuG$i7=Qq5M8mG^^1ziyNOT(@>}uU*1L_&OOrRV|XcD=m&$A|5D`?ci zRjMs?69j*TU*#U=-c__;J4H63_0ox*IHhfwrl^tuvmtU@g+iDKlkTn_%+F(y^36;{ zy$Do+JF)&S#Au>pjl~I<&6zP~5B3mhSXi}E{t!$Nr9$PIgl2H#qWNmL7789z{H@#$TMj&pJ*URtB_dHqEYh44!F-k18~TgX8g)EsXRlt>n8nasr;3nWn|P zUS)30N)wbvXFTQ6qstV&gAA6MtopZDkBh6#+K(sHL@qc2e?_SgIKjWF8 zb9Mo-fW6i%-Fj3w#NKhfV49gg7UyO?#)4j|)EFO~K=X=ViV$F}hYAdcd*3vT7cO1^ zU)vGv%Fs7lb}n6Ktm9P7@s{)31R%Q>k(>m zCN1_GZWsr+6@<_wzJp_yS+#0Xrz*5FO?*(_s`x)B&yMQWgbJmzU|tX2qdRGQL( zBGNh{YRI?NmauM6-yoKM6USIXCA~G;Wbr)ID+ljjLdFSi!x+ zL>(OfQU*?;w+!FaZN)RJxGHt_v;xq7oW0PL0o*sX*RheEj?O7GtMN&3Mr|8sih);sCFo4-g44l3ypMg7uTjT^&2)|k{3zMy9b``!+>@V;{JqdRic_#J&eB@aCY(K zKa_s3pQZPQTyH@bY_0Ey_L{hYIYR^YROFIzXzWZkaSKanZ3>1OH>pN^^ve`nT140m zMH=IBeT)%m#)aCrO4GDDcT=u{O8kxhExj`4fq0yH<1~+n4WU||sdufUTZa{*F1AfZ zA!)fa6LyyNQX5upE8>|+tBd%Y0>XShHE;$H_#Q3Hp~zq#uy+b0y8hvC(%2UjVvm94 z=HgO1fCrFIRiW0171`kaLZLr>;jQ0FuRL=#&CWhTvHby=Gq*$8+&A2Tm0}&d!rDU3 z0%=mCxpvjI7_=G0oOgB!NDo0)bIIZc(RRIS8asXqpI0S)_cb?ozeL^x% z!1o-9-}x+QMY>9gt46y`F>A4Mmurj8S#f^hBiImv_F))DT z!o_pxgZCb$Z++`uv(}LA1{Rcmt@yk34I+(WVzSotyTAA_O`gNoi}7#cB2dCbM{Q** z+6EbH{rIg9y^O}eY(g@=`fQDljlr~(g5fgb&>`^A#bFbFI7^ipk=@+D0Ra5820-p1 zzGytEZafAsm5{`BWiYT;mxl&45NoUWm@Ssl1#>QQqZ;{q0S`o__*1X)H+?XBxA zQ=nEbgc;y=%G_nPe1$6(IM*O!0TRfh$~JKz7?YC(X&? zRVbdlNOQVm`O$scz~aO|4)o}Oe#Pf7XIIxshF!ex>dG!Y&U+1^dzXVy4T-7om~aHE zpCAC!DL4orcRg97ZhSkPtLvkcNZd2nQ!VYDSunzc&YWmN%bI~{2kOe#$zxl$&Ys+QUbCTs4g({;i{={N2S74~n05Bo+Xy(Bkg>etpeT3I` zg7AfpOpf*Eo0`a1&icigIY!%Ie*{dcxMmEGwD1U62Y#})p=^n##D=^F+AtTF%D55& zuM-Xi-Jt@n#U+_3{u4*DW<9U9zejN(_k%Cr_$(YjPWq#5ZmsPPZxJtCTvszt27IvK z&M7oZ^CMH`H}KOq6tJedkfARi1asAwx^Xbvd?VyE6X!Z8VK-b%gE78yx>%EhHdYcs0 z%V=rA(au4X8T9L6d_cbZNL9>`yYSoY96~VIfN_9OE-%fc&4tIQyLl4a5(2mY0qRu8 zo?xVn(C^rMp_l-M(7F~F6wko+ww_pzz)9GQW3-qR+-72p*$c3PP%wPwDL;FNaM8rM zV{oF7HjUshE-0aFftI*_u|_Pg>uXW5k)BbkUg>capQ+{pr@7 zduT0j^Bu@;9r}lNVnt{qt!B>%!Me>=<{XV|1FB`u(|MZ;ZVlaWN$9ow9b%)rP@y0gSPdKl13G# zvJtJld$F^Vf>aEAzl3`YSY)L;NyWb$(%9a>s#zB1FD}{XE#)flvm6%Gmc{^h=CyyRAe`CSnDK zO>NtfX6_P)g1>(s76n!Vx+jh8gbyiD8J2U7^(gPlJbSJn@I!BpizvPDSSU^(rvJG0 zHx%0%V(s*rFkaEvL@Z1yG^vTyO0l;F_D4JM0RzND?v&Kf6XeexsUvQnl|p27y|}Bg z*GdZrjac4V5JtO5Rb8R9acQxbKEi#*7_|Pg1EKs`o|#WO(yV?z>q-m zE&lTs{bn_L+o{Zkhh6Pp(T&2pIP%RnR<#)ER&Au$-*vG`+C9z)JMRf#Hs_Jmr0+g`%6rbN-wy)a(?nE73(08B0N^>vdxS8I)9I$}Em%(69Bk{ik*{5FWB`RmWDOTS#AwM?&vB_IdtW9);)=6|d1dXzuh1A`@d(D1@50~J4W@;@)iTSp${36HGJ^pd z?*iXkORX!xY`~~Rd#s;rJ8qVkwauC*8A3NcM`9f4GV;#f{=DuQ1fve)mpM=mmkGJq1pt2)*7N8wa?1Ts?~5zGQ7S#_`_1hJVJBLoMa;Nu3_x-?^r`~%!3Nj z)(UqXJo5B+=WLs`y;<`g7W@j%fFk!t+X^w_oBJy=Injq}7Kg2{g+S3z%td#(PKc+P z7kx0To1t*yA?q55Lt79h89v9_15@R*!RP@KeQ4EMDj4bJqg4dTp&(5JoM$LGk5KO@#nv=;E?_KwkSR4A`y0}CWu~xx`IadCd z2C$(5E{8LlA=Wz9f}33{*HLDG4Z}N&Rb3}P#QyRE_kei0ZD2)DaT{0TWgFqD!PQ`3 z7^rRM{gc<#_8q(M65njnSH>6d4OIiMqH((Rf8uAxH=hY?b$+acfqlMejHmOVXaC3Z ze&U0O>RWyioRu!qza*$OaOcE&(xR>Lk0)d7^amc8>e95Wri^Yub7+vO@mglU*&%Rq z2fA@~;#}Iqr^R|NOY5^_Oc`T*7WR6S?mW27RLCAfCZDL|0ezX$&+;9>{+VMo=GF*BQLSf+o||E zI68uMcr-nnTL@;Sv)A!sfj4-u^3NW;_SKZKFM#I@%n<$b!mPElQDj*Vv{AQ3 z7AP1?=Yob={9YoEQ%lg$IsDsa$Y?`q(h}}a?Zgn)BS6Tsp2ECG{TFcSph#t1CeubL z&l4CbEl4fgt9zIgVs%;EEm2er-CL$1#`r<=q+6D}b&X+egO2gNT-s*+3_j-UwJ0LR zz;;L1Vw0|7f%WR6q?uqo~uYVzZ?$d9jm&Y!VoW7WDBAu=;yqkV8^N{?Z zOGv;^(#GmKQWggrzIKhK+JYFg4j>f*j6Du3tD)(qWST+|NrW;utVEH7smIGfq_01B zC4K7G-ozBqg7f|zB9f87ArdQa*Ja1wu$rE^G8zZ#ne%5NZ>EH$j^YBui$jFb6sUdi zz$TMPeWQ!#cMo3)rcvb(eGjG;HozKwepd_lP*K$lr~1k%9QY~(g;DQf7U?1IrHHoR zm+z5oiE_uSq?c6dvAi@*zSi?0+0=Jn9wJa-o_wBlq-q))S0iG|DKxk2p?g2PlX|WU zrZdlur?HFYc^s5|j#d;Qmcg8+va)b>_%h2b*$3T2(k6^cZ_ zh_&^$B0b=LO+Y{qAFkWCZl%kgcrKl^sRoz5n#@ zadVkL3&)24gFpJl+a9hGTu;x;v3c@H-}ombXD8KOtYJ{1px|EQVX2?5n%YMSqy%A` zQrF<9yjDF@?8mlk*lR(vXv3e~0ddOkv+75YulC#l)aryCyoTLSF-z%lw5KJVrJQh2 zl;&;39=C|o`x2W*j8&RJdRddP*TqL%XS>&S*!Du?Bmnw6{=5A-g4lT;r> z)FhiqwX7AA&iwwaQa2nV8ofeCO)nN=s){pSNK1{ z6i|eZK z>>EBT?@K1^!6vMgo+Bj;#J5U}Vvkj@MGJoYs&s&t9_dq{iBFt}h%_V!ee&wqlaX7F zzB1kNz51DFbCj?9Hg^Gg8}N4?8T0+l{^m~!OpZm98vj*?l%ec}(BiNjVRDNF;+i+w zlK94`l}714;1rF-Z(OhT5xbUW?JxEWb90TQPttLJucHlHt>H=RpX!*Rop|2y!~;Pa z?bQ2o3*vAfQ1nCRT(7pxJ&w_S;tFqp`@DzL$NZhR^AZ-iLAnOEm-jLK@=s3O%bqRz zdRa6@3Hu1iK%5EZaeRZ$PF%o`w43jnp`vTY+@b_AMM>btFhP5mo+j}fRU*4|{(3q; zdN%#u>z_>%1Cyu$z)^fAXO>qY@XUPCa+|8_8}ZYq$CpPO(~PJRLUWh%H#v5pJs6Q;0H`8G zLV4G1AD7A-GG}n6SZVVMuYM+^tbh02@1|eee;6v?RZZj+ zFHrT~H`JZZUb0*-3Dx2LA1y$)Q74aIX8+^6vc;a?c!CB2^Ij+e>T`7ta}4F3O^?K6QMba7a#lzK_IRd5YMMg> z-k|9^sGkspD(M(fc7j*frbDl za~Z&T%=OaxG29KFy+#3s(gpzs`s4scv=6SQIH9(#gsNIIr{{o0BrKF-Bzi>?Cz?fR zVn+|S#9W`;Uty{6QDSXcktCmeVKUWqYN|LT*XA>56MI8jYGVCnaI~Xy0A|q~phUJ8 zS5q^5$@8ChD}CY1UrnF9dI_;;D?Qnm#iaG)^u1r;@AIR-PPcD=l;)?Oq`U9mBe((P z@ep$-nqv7e_g0}FEh}1(b)^`5nns2vQLQg>4zC8iP>S~~>@M`Bw|?U@>D8BCP5bap z@BIA3pd+pLRvoY@<-B*@U!ykS9n{P|3<0aJH@Dg1KZd|M$hShJeF#O=J zaZvFdgqjCEK=R=Z?zkGcIS39w_8$+&^ZsDtUbi2g1Bjjrwh`?@*kW;bLdH#{t}4qN z0CSA!y-OZ4up{+hqnsS2_Bo6LQ^o-bsrCbGk>oD>?XUcP`h#zNHA?jU+5hqH`E47EW62I1%bRy+U=5h56Z#?yp?EoPPS# zcX7reoV^#K4FlwXXTDGmd4mO1U)q-7m1t>Crb|An;&bD8_(0%Mo5)a#1ZoA$d#ak# zs7y^c&Q0cyqdp{URYoVk)l=M}2L~XH$o1R{FG1)BQQ=Y<1y#l_LV!L`GRHEIG76@p z5hgM`IxU6(#~k$YaNT`{Gb7)F;pQM=Ndsn%!$Xn@#=8t_s9?Az9`@spG&D4r1`){c z|4R=ZJx0qg1=9@?mhq=Q39$&}=V1PvGyk#xB>sqFwi_rH@RMCvgAz}YvM`yt&fDH6v+USq zUNy^PmA_+GWr{hG_Vgw4fR`~8F5a(c&~ca98~XmK%9kE;DduDz(L?sHep9OYK@{Mi z(2r(~x!bnPrVMHb;ozPOC|~1w;RZzFdDmPaK7?y-2%11&p&*Ghc0|=Z1C9jNNX+>y zaDiWaxn4dS+A!W_I5W-6ZTdj`&Nv-!C{ehU>2!YQE8n?~dxU8%z`>q;Ht4Wx&Trym zO_&J<2iI%;F-S%w9x_7yl(%YHI>%~1T!}VqH-d0@!q0g+uCO<{H+=Uu-~0mX@)w(q zJ)?5P{o_lx*`0NwX0`XKKS0zLT%$3ZWIRvc`83H6GBk{KlN9zOVhywiWyEoI) z%6xkBjo*Zhcc(9Y?n~(on#)e;+U4qg zoOl5T7(E=1ON+g!7v{>y<$6*QxmP?#WSHRyg622jU|t{^^28jcIOV%84S|a}MM*EI zMg}k&>ql2CZs#>nosI9TduD@Z;k~_8Dqg|Jb#YF?=p5kN;u5s6*PgxjeEQ6_7h#Nb zQg2S5dF4$6hLmp2%%)Yeooa3^b*|&|A6~Fh9fIf-!HwQM|&reu#;s4!?&T8E%v%NBr#}(9kJo4YVFun+sM&$AKyENE*LM zVtpAz6*oG~XXT4#UrhA~UgD=`lzy=C5t=rf*dg;%01$h^JmUgFPc2Fy4Yu$@1e}0! zp0A#bxTvF@r3eFiN4`Z|4bv(nl(xQ>G(OUWxo94ZI#J1tQ%=Zz!=2nqKo|I6I-q=nQi3?NY0_C33OT+2a*Dk}PQhs`F86n~bKb-Y6 z{ooNkJP*?B6orj|pXO8PUia`2?hAdus7R#mv!tXsPCxqPk8tq+S$gN@2Wb;6dkbTK z^6`ghdu}alOi^HC6+8wX8d)#TWyjo(z}HLmm8B)pDsGp_8gdq&DQF=<13Q>}!ucJ% z8W`+P*RNbhp}C&^_T68k5ANPZI66vS`O+8C^VdexLGyB2Mnm3k(n{dXGl1DNKIB0q zbe(&q)wU=H>x9@zEn(@_RCZX1>5ws3%(TM!){9Si=NPG;7*lcCA|N@v6f!wl_h-|C z`CH87D82XLJ1CNu$X{&^-@0^m=z6+-{uIr#5$wiFm@PrcQ4S)ExZ@wMc{vfmf!*@{DYwUp7`~uho@w;zo^@FKm(|Es8 z!TE9SjOW=-Ai6F2fDCGEgE%<2ze>OvK14BY0tU3jfJ2)_8y>*^6z%dq|IrcOwd1F> z%;s`4pC$afzYF^z8iaZzzJ;ZuGzBsEqksCx>5E_aTzc=_+vzX<&u>FySJFTD!{5e_ zq)0PXE-Iw&{D0p`#q~K5NNxK`df}zlV9a|VVr+bv6Q?WFY^4=7>!tf=!KmQpjm@TC{ zAKxKr0}bdAiE@buvi5r`fe_k*+t4;v*+SL_CX3A_F_Q?%Ac>$^%d5s#Egpex6gt;EIyp%qEpwYE(U4VnBF7`NHE zx%BS)?}t^z-_*LQLC(xSc)@&Sz=9D8Jdqhti(mEkVeWQhFooj?#+@+=M;+QFb-oKX zaqK|r3rvIv8l;;s%5-+H7M<9cJ25jFfrrfj_-b*H36H4;^C8?l{AE&P1bh!>lm3ID z=I}6(B(IswiYbNckAsGduNEYyD2ZXmm|_aFW7KGV$-p9j0Yb|T=IALHB;ara0dzqA z%RW*0nb=41aGsYRg_(Z=cZif>x*q6z@sdXfOqt3Adh&ehDNJgfuI}VEL}2Dvf`ri@L|VgDQ+1^$8VJ( z1sL~2Btz$Se^c8pdPuVrCUS+U95~R*zOWtFT*1x*-~C}^xAT*s4&mJfxJN5dX3ia! z_$L%yuRJDqji0?<{V~Qb4Hj1J9hngELwHBjC!hHxuKSF_N3D;b@m$MHHaFU*&)ETu z#2Ed^pTfQOX~7aY-k9&>Gk6eBSKqzE2AEQAhtJ*{yWZz%i~spwyky4Xk?MsILb(uv zN%UJC3oV3YLmZ9gT`%@tb+5+b%r6ug;wA@k4l>EUmr?Q=95Tw)4xyN_zqKfBqS#RQ z)DNwR+>a*MZY3-IXW#lF_}!LXy76NAH{bqNx<7jtt$lWyzj^99S#++S*~?pcU^T+Sv49`=lu`1BPY4 zpT$SBmosm6msEQ=Do@p zAg6L9=&eZ0zHh`yTuY`r$q{LbO2B4|z=vIYjVBOlm8A}B?tp^5hOwT?EKB$4cu}h+Qqi~O#2v#^+MKMd{E;N~}Kaplf z?}M$)4V1IJq{G2|hTNI1n~E1T*k#fQbzw;uB645w>E`2_t|AOXWkIvW0Co46!I0*z zt;ecl!u@i&82na0xZFfk`H{j8xUkRuGnZ(YU{MSw2nn7|j0bGiDnBu0mR+qk1Bw zp*!0YJ~$@OsICsBb2~*pI8Wvm=hMh_(n%3~I@~jf<;wI)`{~1*9}%C$Sp?lUMNl~< z;2>xnT6_aD#N%Ul-y`;T1)uIuzxHPO-QW5=Lg*8MI%cpg^+GSrG}97oK79C?cA+iM zy%i&=;rZ&JeGOmx`X9gTWgE5&;0qEmu&MXap$7neZUu+R940FT4uF~>4g&+C&Uh1Gtew_H-YSE z6j4pg5DoO&8=p!OXU5aFzV+?&qwoI+f_j?%oB!_X5c>Tz^<)aB|62OXzxtbW|AQZ9 z;L`rZOV6c?&%9t)AQC2AlgqMz>qt(P5R}2PcDNB+5+WtcJ+u$EZrucN_OTeOvpFbp z2vHfQ??_+5&R-5@f1gMx94bQ-SRX!M{g~7|jAo|sMpz52iR2ip;}m7an-{(m^0(wqjjf7d~gV7hvN-4C2WQ zihMHj5>Sb0FfEKdI%DBIlzr6>c46#%;u`nKbZJT1A`n2wZ{gm9yO!=o7DTAIDfmd> zrEU8X#{hsW1r6t2BH`iS9BOQE9C`rK!&70%!$5&V4ZTd9OnDY)01l0roF970??fPy zo~ob|Jf>=RMt_mQAE$8{-L~j;Jl?pfyYlin(a*&%SL_3xtvMmddn5ur3L< z7c*i!eh$qBEh-?H*SCxhr`-uZ8F68#bxXl{3wHyxZ?@-p1&rv=wQ^A2r*I}Q7w_D6 zCWF^S&--QGq%9uk0#)Hkg@(IHAxwf~M6MPx*HZj8%HH)5SA01y``9$y7r5*i*pB$B zpde!;o~mv4LGwo%5t`Q^kJUZmUyPsL^E~LDc4D!)@{}ezF#%E)TJqeo0Kjco0FZ(9 zABTg?vw}k4i#PCc{InR&_`kq^FXbV|ZZx^vsD(?50>a$*Vi~_(mVh*ffjot{4P(pCcXI;-MJlLLET$w-^ zSJuE(FWw^_n>tP0DM7Qlx+t9vjvcX9UhCzs9OxgUYSMOCdH(#{|B5O|D0pDT+9)od zS^hC@oinquu^*x$5px$d6IhK$(Y|frwBOK9K3p`eLO>>lY~(>>4ODWynqcIhK(v8i z%G}#<)6hRi%h~=uKG-nl9i6y{ki@*v%n!8hL9bAExQlxOjKd+EQ&Y1k-RdZzi&6)T zbtn#aZ$e9GdEGXmK}|h!OnM^mG&{809Am_>d1DnO1Hsyb(Yf6oYVdcjS z3#MzqJ0@KpzmVQ|@p)hg!Z6K7e7`M;koJ7-+LdWF_a+AXT&=G9i!{RVs|6 z69Zi%>GLnY4wKrL9xu&?rBi`NSF7z+Xdfxwv}Br@LF=oEgEGGD(i}^@iVL1mrt|6V z8;bMRf+XIj2Dt!@P_wKe$1~o&*g_DXxF~F-(FzEmgfhn~W76$LYtRWo-x183mAg84 zrb2n)L)?*ksXRO7*-miJbv4_C1%^(E+Vz|X_QJOnX1NDN z@U(zb7u}>$&h#W=K34V2xa6bmv7o4^%F=Kd%fNOSH?Ps z^`mcK93gWEOA>`ASl0@;EML=&Cr1~7D!O}m#vUM0HrG>_2HvreKoQp-Dg*qhN;aK{uoT7a!yyM+?EnKP~lx^jv@ubWqawe~D&8zmJPg-Ci(G!J$WM1#v=Ykxa} z95D&xhZm2-6__}HU0ldkmp0%>RW$G6b@3bm(P{ejcfONu-TgQ$(3ZhQ-h^%-oS+;4 zh>Q`-O!qiAZge%dY1wzHz`C)m3rox;TpbH!$$2jdKn;x%w=hb2M}({AZrn)!)!+MK z8p5*k!OfdNuRX6D1zh$&&P*k=6JYqeWL!e*VG@1E0jG(~myEo(h_EuH4$ZrK&Oe$6 zCSQTT1|@(3HwcZl2Jgpp4{e^!*Zm>CB(5?@;c~zwnFWb|3tEYRE^;chV;be49x$+i z!?y)ZG}{ktLvwoXm+z;a|LuqPZf@d>crE?ofATdX-?_N{yI=l0n0}|y&wls=h^|bA z%tb39^DhwryAx#+jZmJQn`X0U?gddG9@`xBGIuf#t!$7T%)!%BQ*qD;!`&Uq@UE{C z0epr9HOdK64{Y4z`g{cwE<*&tkXcfqUL){ECi^T2d+$CVdH<8UQD)JA8FPFZW8j=G zUqbL;cel4**NSM0F6Y=!dJqfi75G52H-vWz;3MGPj~t%{?f?l$)6*N~^FSIVN+B)>6vEn0QR> z89Xe)9~+f>VvK=BU=r8G=@T?d5;nR@l9=nu$zOf4~;?x z+IM|Zm7+&RSQ{eE*MPf1m{=SdYnf08N9+|`tt@G10Sc2U(-Uhe!wX{oyj(8{A5#Zr zRi==`%+Wb^=OFI+&i|qxf2_y%QFtk|2`m|M)49l~deC`)i+G37rZ(HP^qP!_nyfYs zS<4kF3_AAm9wtpnVzMz~jbyysdyc_K!59bQ))Eu0M)aczUJbkV9;1D+kDBnHl}#Lu zei@oFKbcRhSi;ghWkh;A z;}7jVhiE&*wyl*Me&D$kgJd3@C!|z*>-=f^!l0 zxOA?0EN#puTTEGRf8tNF%VVsOcwjri5KShy654-$4?GFz`ixI1()cMetA4g!X>mLc ztaO#}v(E8goD4={^H_O2GWW6$9RFWu7-XE%=taq%JYbuVmuDq ziRC`|!oz&$&HX_~`HwNiZ0wusEiky!2S@jl}?CK1%|EdDD@2D)(XB9H|L=R2wK8P%8p0|1U7o8eRCXG{&7rM;W_gjk)(A(7 zq|7^k`E5ri+9LvXj$lfKA^qUj@H;P)TE(E%9t5gYir|D&2->uN+MIske-|l~u9?ui|#~FYT2i)`Aupk2(V)~TPmmg4=Fr$gIYMpd3YfGD{1$V5oSI{bB zS#X_9tG2U3rPSM4i>NT#V(bY0Y89_UY(bG23$)fK3@ZfRZow4pE>qcy)NG+R!RFMB z;!jQa=_wK0eZzsL+i2_$5xP2{;kC>zD*x))TMQg!WGCk(G%&0%#ypZ~p#lGMECKz4 z#Icbe!4!NIf=f&D&FTIveA(yAX_v(Px~X=cxo-z{o+-sC;w76oFNpfy0iVnpTELQ2 zkBe7@bW|0r1KscnP*2>Bw!$+)SH?4w`pMJh+A?dvl7jn9&C}=0roqPAv&wlzd1aGW0{|`*`|xIJ zc?EYUF!uJH+aVaNF5yPT7zph~DZLHlkpMVji7kpSic$?q@|pZw|Xe=nu%3&7zI!c`9u{6qZ%;0I6sGv zBc&b3K|uYH3Em4K8!9k+*(=_^LthC@TD_2Arv zOK6xo(|7*g{|8!Tc7Ae|0<3{UMHQ4Fqr*Ya!$8fn2k`2uEE&!flHK zMGc2;KQc-l_F7R~MVa$5LBfvl0Qj5UXah?{5iOazDg$ZC_#Co;!JJkg0`?>E0^$%e z{YVi2dCr3_QjV}#|36jl`6FwV9`>E?>R73B4)^vAbMNHY3A;O+$n|hZnv_Tp0xg*e zBtb!j00TB`efRtUhCT>_VF(8NiJt@;a-acIBA4Ql+%Pk{GdnlU&7E%NP+c9WyE^#y zdrsFZUH5i()m!g-;&YxH5b^^N#x}#~usy*v8F;3~P0gtDlZkYGO6nqZU%JXT4-+3p z>K^@}>EB>YBlu&U^l3_%a9N{m>}-)8PoiRb!GwXpM6SBxgcg|o)aVFRGKVtF0iV2A z2$BCVZo>305*_zi!AWi7IfqkIsN)RA5JWFc??LMB=Fk=iK=bcKkM1V8)1cWi1&IgW zbWk~cjVqR!Pph}ZUO8bwnSKU`F^j`oigz`$%G}A^2-jL{E`aS2O1L+K?hq0fGtVph z`JLJg&j`y0(+Ob#?wm`wO7Xog7-e`NDDIhSCDZRi7Q}FE6^>jR{og|q?SNPt$+53s zl4)Bwk_m~*T0G~ozQhx1SG`Z>osS5*+vN8JQM=Z3rmwzh?w%?%XhIjM+fM}6lhE4s zWRhf_dkqwHVs-)i>~kKB5inO`Hx#IVio~&=V9NJgxp(#@6dE%iz7f9mAe0ht*F=Kv zy2wC>`<3%W@bw(agqd=shjrNB+GPJuBYr4?#95$Y1|$AE;M{wT^fQ;S92prxGewaN z&bXm@@M7XsGe8&-ii+B##mO<^X4Ij;J04YKu(b{rG~Rp@rPqg~cdO|)MMYNT1%#olV*nzwM)Z z`2vPy1#5B%As=``L+3iZ|Moj+_h2e4PFmK~N?Oiy3z~4WPl_wfm&qVJx2~5}wf13R z8ff8*4ScHE)AKgmLfF$1?l8?GlHOz4QZs#l>w^By=CUk7)KS3@pTIfhbLWw^CkJ$E;yDLP;lq&;=9? z=itep&8+J%<$4GE&(p#H=W`ys#I+0Xv*)#w_Dqm(;9)ySHO`N8cR(|Z5}{N)OOF?y zQihhq8gIPDdf}Umt1WSzHo#~}&4jP`1ui(2SKX#)hYE(92nM?+y~O*xM3C%K`jdr^ z(nj+x#T&YyKPPC4ncoQ(Ue3(Q(idqBt8W1RVfcLMBQ*#13rZ_OnN3 z9M5W*pi>JBY?I%zgNkEttq6Zs4kO4E*fAja1iGxXQ~aT(*tIe}UDG%RVBTEhMXIEg z3^-?rYs6?Yz%5PSt4P6tQxflsPjzQJAPM~uaSW9qgg&m#FD#@TT!prAm$4Lk8Q*$& zn@)viJPk~uarrf!>#n#A^802X+&8M@+6yw0X?aen|N4V)to6v_{6vrmA*w|cPjjmF( zs+c~ZgmQn0!!?K{<0@&($UxMN@Y5z!a7>)zKp!Ql4GPbe?rz-Vrf*EAuYUD5xX(0I zJH*p)ZZBrjH-Gu7X>4jR{l$O$b1We%R9VBt-1wv3i!?iZJ-z?hFNL+~&d)!AKCU9% zjR=zPeP;wfvj@0t?LAw;dc@k!kg98DHm%^9B0hWNl~>?#5fpLvF8ux<{__u+F<4hb zpOLbFF2NC}3nUUKkwm<_7-(8lEm`1)Fa18g3XqjpW+5PmgAZPQ=1;tW5POhlAqW(G z1V-|2T(y_{J`+XW^PQP)%!VKlZNg3{q8 zKM1qqzxc2IE|UIn`ZxdTe@g4mKLN>-aRe#}X0F}qOX2NmG$V%j$n=UO;#uJ1bOqH|bS@&5X zCa(!|_T;c2k?Do8gtY{J(6FS$edg2ynP8F~rrOEqAc4wgp{(>`e`Qj7SpmjUxuKzD z9Ij_*+ZeOg%s$nEXOX@6G%Y8Y6Tu@usgZ=?h_cL#Cj_0Cuy(-$8Dp6>89OsU1!KutC@gr7>*gqg&CIZ?3lN?VQy@a&`a*j$$N7xYBYS8@fxrp8 zw|zBZ_NSUxIJHtB2i%dqLL;QAwqtSW^VL&8HaldiK&;V=SUPYJP$QncBn3!#vzLRPFgBU>GuK3QsL z3%69`YI>`~SkcB}f-;#2U+3O38p_a^V!5Rcg&aCX159R;J^JH+h0HL*{@Mx*&@T7G zRO)VbK-&te*RRZB{R0M=gJ~8$x67g5d2*GT^`WkL1L=vj}(-LJ>!66tvHa)Te zp_fcm65dz(NI|HzV;6b=!@RzWa7?>0iCY9=)(DUm=5ovw?iL6I_3h(SDOS=;Q?I1S z$ybQ4}%Hy>~`W}(v6?}?pn4iN+)`W8LXgKn4K9x2qAYVj!a>t zMX)j;dvlxooig{FYuz!088y;pFsr(31uk)RXk+;maH@6LmsTcB8G&Qe$cK^vm?$GQsHC>BTJzAcZs1w+Hk;j0 zE0;K?j+aC?X)yc1v5$!@Xa|S)A|TX=t&&cOYXF!ufqO{-y!GPgA`CA=9o`4u{NPot zS*jV{1pde%czLG&5zZWex%=C|Cw}cilj9^f$yPJ@!%nPo#;$eYAFu0_mAraz5&Q0U zKERcGAoUSvvJIS^z~Ec?RrlOO3e30%dJt@(Q0nD;X^r=683bPhPL8PBcfvlL!(f~2 zqmHnIb{Ij6b4P(Gl`?|}Bf5(n0K)~O1yn}322r*im!vme{R*%#0wcdcMP?LOXyBPT z5Y0Nv3n9#RKs(!c?%tz<jKmR-+m`97!y2n;k*8^USO+E%JGQEJmgD#pm#HWbdlb z2VlebLr7dQE0W3>c^WJuW(Z&$^nk_9Gfw-D?_yPz->Q*y_RoWz&!tOGX0oe3^pTdC*cpMn@;@aqaYT7D&1IZ~oQ4O^Xk{&tk*efz+nvZln*s_v`85 z^LhNdNVyhFbHW#K7-WZDgF_x#CIC*UP0hd%z%qad*(yaIq?2QO<=K3T2@IgAAEla1 z6>Wn>3p|wil0{G>Ot(lw#J!F%VHz+?2kQHO@4DoE zLN9pPcN{L(TL4o#$0pNG7^h<0+;fEmkzBORvl4P6{MFd#Hy-^mCK>mvnSpHa-ZjXs zXE2(=68*K@f)onWh&d()lbJ3MFymB6DdDf~__d&DqVqoQ@9M$4j3&%XR8=%^291jN z!{42ztY)G%!AdbAWT(XP^CNSV*Tiw@exasZO^iXfYVEBgRKq+Pnojmm*Cee75%k0J zj=?p9eG23kXNER)OK}>)l(853ySt7d6ftHQR%0kU_>RH#6)wzVAi?ZI0EzFsNZYQB ze(!3E-51B}dAGd~CNA4>oeT!nauZp1XjcZLQ!OVgxu$C2f(fQQU2*Kf!T<^j!cYkY zL8fg8&5r_w6Y>5|w2LJKM&~5FC|EiU{b|iiCA@bdg!-N9q@Wu8KtM$XXK5to7XGkZ>3uDh@Ny|9$=pwGlp zOd^CRCu4iwr~trvw%U&8qeI`tQu~`G^0~F8{Ni2fZR>lUvmfuF_h>hd#&%*<(N=U0 zO=z_#PI0YcIE*>^aEQO?;PSnF=1v30`<{nvTZ@lGL>wG#am{n!{qDbOqYwaUi|bld zgekwv@WHw}02&l`n8B?E6Sm9#g@A~VXrTWlDJrdI7xYF=tjZSx8HL*+@@o&`m+Yz= z7iA2Xu5JqaYS^{VvM{W*Lj@0m<2zuqFJM9#C_)B!pO|Fvpcy#|(5uEaLL%qC7lF8h zTdPc$1rD%z(u zK^7l^b{jCoXSm9Y&rG0k9K_XR12M&POZG0)=bTF5^1v48Q-N#rSS}o~-PrQtqM?YaI%m1N^;p zg*-!vr=TQG0Xu}NtO3<=CfNwkDR>Z=LvWL;Qy3LDxIgZ#yh6rvSc1S82jE_X#R^Pj z6-$t=Q3W*Hb(qIzPnHN~-VVH`Qf7I^K@I`;9kWrd0F-o(>3Uvoq<{x38oEFna&fd$=eq zhecF?ZK8M@or1=x6l=qpb3x1n*|Sa=QvsOmCEm=+to3cQ+8fLG+vDyH>kh#+D{o~)|952F@Z-E1j5lm|&xS<>}LEERe{!v60-hgVG&2=)A zo#5)!mtJ}GD%r)_LZdH_vO{pQVQJHov+3P;-V7n=;e&^qN32pLJJ8+HDzu^Kf-bMV zSS5J?Zi&!;ap@6lgz~K=42XOKDGT_#_XRoO8iY7L$&ds2d>8*N@9{K9TmvHlV1b~y z&wB%Di?Ds3&*r_4ptZOnvb0ZIr%(UJ|NLAaR|vLkT#^#H-~jO1HVEtnz+l=i@9k1I zK!9i>Nv>0``fvW`b9|#-q_2PLtLe9X>)Yv*j~}G}`CtFrwE66>c*fw%HY$k?r{DX7 zKcv3$9kL1#Ne+_OL6{gF8v&tGVh$G>wRV%2ihY^vXuqL=n)-HKlqXH6ZSL9%hO^y+8whZDWs0IYJh40&# z{iCft3vEBXb1^FxcsVm0~Y*d?xA5>3~iU%Qw- z6=)PnvOq;>X^0=CF4~0l@3^Q*I&gIa1pcsMs6Uo_ytTdR}Q7{rYT-# zKt4!-7h)A;I?%?J(E=%HTlQ6$5rz!9eEGp@qyQ8J+-KoPrs=lNdN z5!3YK6xH1@J&%l0o)K;SCFd1wiA`csC zX4|sb+#GXL*mSBdmqY90CZ!vOsnW@(aP4^$1H8!iN1u5HH_}cTQZkj%> zGGw|PDKr@Ht~JlG>6fe&y52bx7`$FP{q+8Lq{1Ugy6s6R^TxnVGoEul!hMs6ESHwiVy`y|~}D z^EvqvH}hHd7S}e;Cif{Yan)Pn?D`aU0Gp0aw8JFRMdJ@nuL^ zw18#X^*I$8&YdNqWxn&Q^Ut-mEj7h}Kkxz0;ybOAd==jb8=eg{uEM9*#1QC!zlpK& zIAiy}c$n@z`6A7)Y?1dI#mLbfmQnW(xQ7uQ8kzyVyUBL42tGi2rvN|)fj7WQJrLyH z0TnB_c|iLbxFxOQa#W(|P#0Chj0-x4$vm!;avJk|1e-FCGpqv_lo+;raTCAlLI;7c zu02d{fpn9_G7NipFUAo8@FrU29yG*d(ia1IC|$|~50V}g zjk~TaM<~p|a=;{kn%f&lzxbIGMBCm;?HjTU?V?FE8;_ZI%Fr5d&^~$5+hO1u;KX7d zL6`VDcXVmpE^@}Z)0?l%iEz`C$Gf;=aE4&IPmfiO;N~MUU%r-p?b{!4X6xy*&wiSo z<4UwffT-vH%5D9rVc1~;pFKo7O)1xaRZ8J{J{vG>brfF!6*wH%7uMPR?#8`I4eMtM zkJ7zo3(zc-b6C@L2Q(kMV5SgLz=`_Sb!ro^=XKnBRZOT64i^k!DvBs_iha60f^QH? z%mFXmR6!&CJA;|F^tgCd?QO(darV1ll2yJ){{(skPkzxhYfPZ$feaNm$Mexoddglb zHY%8;ze89t_8996wDh42Lhx$9D_EzXK32V_1v z!hPyslk80dnO9-t6kARZmSh7ih%+j}@R@Z;mu7S8*I+P>eK4tsr%jrnGG=&a9C&8m z2~2+ZwcF{|&1+Z~U_238j_7ljVC}J&Z!iZGFP+u&z3=^I`tJMRNLS%|9zMO39(=Np z6*=VN7oW+*c9-|D0+=+Uh=qss!CzlI>zWbSj$j5q#d;hcq4DPomK4NMYZrS6% z{@G7ygY0yiF>$Z4iHF2nx?W>cdzF?ON7sWp)cytm#}7%Di?XtRv=cWWD!2k0p0fhh zxqW!GUIZkCs|y63HqJtsS_hc*5FTh-Tn3+WxPv7F;ifJ0OxP*1z8lgH;FUCYXe2`RfL*RdAsK;zzBif zLjWfo{$HkL z+V3N4)bz|)D1>Y9a+_Pmm!&i}cMTd=i$pWx%1-#;ZIs*ctOp1L`}^=zxW*mvzQ(xA z!kK)7`123`=%0NklM{?P;4ATPK;mjvj|FWZV{jCoTiP@a0m}$dHPT7LAI~ zX@K_qGXBT?JTJn}0t0`e6Yt3{gOo3Cj)UNPiKO>uqQLX)RAlJ!pa3XG071ATIZoQv zbg3!n{cpTW@vj@{PyY1B>5u={|0OjxKMMw36MY}PQ*VFu>k&+1!0DrhU&P|*n>tRJ zL$i+5S$rie{k#Pz-3lyrGD0RFnR3&IZf>n&riIDjrBtnt{xQVgtN`6G1xjEB=9q=T zWmw0rOLLWy@8tAkWENRnS!8Z!A%$vDsNgd;#Q0=H!u0?`W>x?h`ib!=fNTi=yc*_h z0hN9+Gpp^E0Ty8$)3@eEb0K$vRCG@OhJ#U%`8VT{3|eTx>Dkn#w(UmBRLKmA-1Z3= zY9QIGmA)kVu@7chF$gjTHlg8QuVv8Gb_g53g7Cs5Dg3mUZSM&LLyJ=!DSPJ!hB!R#EZ znS+u67j9*69dBrMx$d0NVIr^stEBo14KjP+n8KO>9IBlQtu%8EiwX>v<8U3+Lb=|- z7=x&Ek&wwz+pbHHlhofY2Y=HSZG@$TVW|-v#FeHCZHE&LIAG0!8I{;DKKt`KgWB|` z4BvfcLc8v%a3(`5F;qZs{02j6=5%h3Q;n*c5n+OvMY{nb%shhC_`U=(H|}L^Wu5Qo znEmmY7;7M2_e?xOa50RwL{dU+JHB$}qP*WB6w-#8@o?i}okqvTLh#foprD{7Q6|UC z4)&|{L*^o4Tj)2|nRzRO>R#l0W4$>Sj4lLc`)R?W@L^p8Uy-_y{zc@P@D&Dmoc+Wl z1nrM|X@3$iHP#|~7r7nHu8Wfyf8UDpY?1Zmy=!tQV#gSSI4^63llga_Jh!e>e!1W3 zrTxND`-)GO_qc{y3o=Z4P-rpQ{_=eAyyM7BIy8azjw#L+??Vy9-~8fP;1$+1p0T}f zVew1u;clMgVej|z<#S$def9*?1Zxy4!hez(p|#xYK(MI#)GjDNs3aKZmLU$*bj z?ttJ1j-WNyOq?g&)e*LrmzGf!ZAM0*8&_{b)Zb6veCORXGdT<+fmH|rRg3Vzz(}l| zFt0}a6#U-T*B@GHwT2y72m6Nz3LXN_bsUo%qDEFBga~|hN5&9@I}}a{I7MSAb3%M0 zV`RUbAv|s;E)e}>?`Xo*>$@H|o56>m*HRUiBC_77;nvby+o3&Dex8$Y=!^_JrX@wi zLjLwm1O{ArU~roFFgNJObxj0oYOydSAjdBR(NhH8`aT0n<0zW0$N62`U57qpYl#*; z{nZaBK6HvOBkdTY^sS5(B@R$TkqQ%haBYTC%(wwP{9-9J5dcn9M&ZP8j+;mAxI6uW z-}pVWvB&AtpZ_#HS;WDcMlS)C|X{8ssF;>T^88pZbMNjei ziM>8!KY}))2--)e>BhxO9BcL?@oO8xkghl69%aoCkaT}hIEHS^2zRg#D+?29W14L51+u_Nbg930Q$f6&NbGkkbdQB-$<`t znNDS_En3;ZQ#DkyrnUR1dZ?GHQ~NWkNXBo zX=JJ&#=V~yE4==(7QH|)RahRsj)uR)A}}txB+Ol;2P>0;PS0f*Q6$R6(OW%OTS(h z=d7JEu%!WNl>pDU1O3s5B2CFa_1y{~_hjpOjZR6KwBl$^vI zUKj+mEz`4CVu3s~2D0YtI=Ua#jqTxJW8#9*(|@c1@U}zj)x>Ce1&Z-I!+gnlT)XiS zNP~ha{?J)PV z@j-V)N(P1PgQ(44&2lj(q-XxgxC;joTeY7u|N4_h@FNQzZaM6gjGIh@40ns+k#W;~ zN0YTkRAygBnWMlH0up=5UTV&Co({(Q7;^5H_e!qzAq*Ih8h*HOD`Vn&`|_`26hLKn zWaQnuZ0TTY_&y6G&LEm}|u;DO9pORsyM?c}w>ElYt(%MLf&UTDa<&(8tRv?uK8gDq_8+9eZq z-1vw8`oBSV&AtHm^20=H4O&^jOiABrYFQ+<5*A0ztCW~mFu9D`{g$B*CZ095ZDBeV ziFOD!=R6e<+;3sXc?v)O|14;ldC^XJTmW0|Q~2@> znVHItWkNh3j@RNy*)>Zgp+Ycw41{C>&fsKS0~@g~kroeOPXQp{z}9G|^~1*xFa8HE zxCClvf}0i)Tv`6J9sr!c?`KBmy)2N|4NQoa-|>pW>}{7Eeq7_bmJd%Hpvyb`h~id$xyX*ICB+1?kL!NNVsd~){Bl=5>jL_6p<6G%Z;d~`;VV`C9tOXn ziFh_YFEU_swDq?oA3uvjbPbd7mE+UF+F%}EO4tH#%!9wja9|#GrTLLz(%Qf|4&%$| zX8-u3@590Vh`h)z7(YY>Q&s(dpf0$@+QC*@*;)o!QZ{(w1^y+bU88*vN}&rW6OOr5 z#lNHCIN+u^>J=h!yU3a8UNr#L#vTY9R;!aevKo{AJJ(-FyFhNoYIy{1%=XZBMXEM9 zodTMLHq^b0yKz9)iVY7ONaTo4&WXN63TWUc9O;nYbSy_~uFWQqj;^x70c}Rvv!|L{ zrJ^2E1BIrCZ&OD33c0XXsrp5}LZuV@NW#}I09~yTD*4e;8lHlCrWimQDS_m;hek<> z0*8C|{=>BNY$xsDY-}EGQzsROOn&3tUrn#fjHe%d{Nr@@=}-BNfIT`mq)@`_?JCLU zjmC!ydv=~S7uYA(QP6H@zN+pm5qx$)syYZ=gJ(9@NE5RLLB>p=DKWH_V7v&x;JH!- zzf+_Y18@dMDRGNBdwF$>1pw~gzSL5RJkHR*=%=S|ad$BXDX2NyUMA3wK6PGKt8&g- zT9HWvr&i{We3RKvNJ$>E3rxZZS~s;N9&lf_FMZR0R|MPJca~;``_dG-9Unj4MiWT7 zDbmTA9>)}fBJu2(0}%ptCa;lQ0f|QCViS%{%~23I2>uDkt&1Rn9wdNHs@)9@O~%~Y zkdWFiA0_;r+6%xT2<3#}1GSiuTbu!HQtDSi*z#}mOhq6TF!8-_em8yd)~lFT$Srxe zo^~`hU_R;}=}%YQxS8gN1g_$n^kVB?`uVd5>HgwUS|iBjoV~pF#hp0+Q49qWijny3 zWXhFR(vW0zo+4Kk1D2Q|MO5~k|%kujXu2`*3*BvRC;N(s-ko4iHd-K%JWO52nG zc74GgtkWqHIwUvNLzADT)+TE7D>GMNrc9m>2zdraxTgrC!yFEY*52B|*P2|N)d5OC z!?17^>*$u@;LzQ7lK8BHIOeW{9X?Kn}~Ll+cUc!+sz z9+SWd_)#1PK}LIV2AoFDT5w}V3a%gc0XW^J>~j}*uwx&J)(bTb^@>!4C7~7JIuLHW z@#*+45{B}Lr+Ed}!u=xr%n5?1V^Jj#JmN3ga|6Af2UQ>0e)Qn?(N{+9s=53qafpQn z2;x4T6-k7B8)B#GUas7QSlh(8lN!;>h*YzaOo)To4iY;Ad6!76+y@fg#tx*P%gpsT za$JoO?Yj{NW|L>-4z%}0XGZD5q zqm%|L7rcdY+zu*U)zl5dcDX85o2OK_xj;g(oNW_i)p2wmNlCRVOag?Y0cAXc=&25q zlNRDtb&A~VPkR)Jy5OEA8mJ$TdQ_Vp;wUI#36bADf?Jo63cT)z$Vm=P3BVuKrd|^s z+-MQJYE?OXpKHCW7M6oBXooi0sr*d90M~%`PDFWHLE4iE;vJ96RZHxH>A3tGq|NvA z?U?k%a9+ZV$lUvV`CEp9hq(qS;lc%yVIW!5v9@zcDuoy)eSO(3_p8FN1yuq)s(Lv% z+CUw*^9Oe@Sv4o#Lp^eka+C6dZKxnFqO2bWAE#m^pmPX==O&uGY}05}iHIux0}#gr zzC`x#%i$|E86DU1tqAUxwoGTy$G)4#(9*rGi`(QgN^HWBYmtL1l(g7?;POakTU4d` zM-+i;RR*;^xM_%n2uO}U&NSnMx1mj)FF|ay<=kCA5nTzJzN*1N)T6<)xksEi?bNDm zJmcQ4I&+WtD|rhi5=x_lqu>VD)=_6j)E%Fybnnv+8l|=I4KiBy7$jUoN)x~6-{bxE zrIhpwwLKFL2GNbq@Yi5Zl`!-%s^aVA8mXrD%=dA2lxWXz_S9FvJ&?QCze!~8JS7;X zIE==N&qldRxUFWUt=0E67zb5!zKnivXFfVzx?%biC2u@R6Vh;?3BkF27{7-}B1&uI>9g$%{4Y$;Vsxj@*ZMrlh@8O)?h|q7 z+2|mx$TX=vii1aKmh*SYeI*#XIL$0w5&xnc{5oO)wAprk9F>ZOC^LI=W*Aj_2dZMC z-HBpUsA?~e*5m}WCO$i-m<($>Xqpfp+~=Oo!8BT$Oyj`x5DCzqKYtqj6)aLrT=QG#x5 zP7?eLdW1oR2E%wZ_7Rv!W1~_+#$W%SUh+h%DLf?kyXlE6o^p1EnTGo>NOoVq*T^(S z{p60c=TlYeQNEZyq@lp2L3u()6Lo#HZzSl$K3cd7QXr|_k+vLTX0d9SxUzWwqla^O z|1Dr^w~*$a5llw@%}!Dfb&)d0r1tvBXp-%K9!OiO!{Epz&k4c}>#Iia;obYB?jgW& zV3k3|NsrY3{3>LvZv*-`@sC4~TLn^%Lu&*5`t=RRs1o7wdul`lvc*HQ3piYq*cR z5q!fZcmE6kR6A&f3jD7L!VF(uV6jOmCbL24YbPTu%oIrD#O(-?RPw_-W%bi4di!01 zK=pg-WKGUG1Rz{3P$rxaM6>veDrf_ks|nVby)v72sdj6+$Q1$ubR+2nmiIBsosbcr z3*R!=d2ps9Ek9M;MCtMkB93@nxjlyRf%8Y8%Zn!q5P!}I{HT>rJ4|7Snb4|s(yGV= zz%!BpxoY{7+7eO3zxF%7|Di;@2on+|eEkw2&4`-2xm`dG4bY2` z@6H&QyS(lR=jA=&Q^YfwnDJR8Oar+u@A5ruUcNxm+GPH*VeLumE(Zz1_G2qBm^I$kiTH+(kCah4!UmA|q$> zK>z@$4BQ4jKu7ol>9{;jrHybJ=RQ+MnAcKuzJx~t;h|kclO%DmT&*vW+yxBCXQc4En%C$Lcp{4{#;j3iXR09+aK&B?M zUd0}!b7~p8nPnnRaaJ@fh7U-9Me;K1zEoApc&bTJOJJTu-oV^obER~k%&&VFnmQ)d zBKT^MWT*!5Yii~4i;6f7%Dv*RZ|z4xK&MF&e?%0sj^L&*zt`bYM!W@g`B{)A&)F8h z!1_2oKGsWiS zIXh0TnR-E0bEs^ge(f;!{UDF=QKWR78PyK8gAh#-qrI`K15FvsjtqG(Dk&@1DB;&A z#9(_eI59Q`YhS`I%g!V84?MPU%_yoioqidg=Z@aF&x!M5?9Nl4o?*0?5sgbD!l7R{ zc7=iv!dR>Znxoij2_Q|#fP3zAybZ!jK0AGD!eJUBCgU9@5CjehoUcBdN!5fZh-zch zhOGcFfnVGRnhKL&XMvJkf&s7^STBsS1XTYWnQIwm)$fkWnaDs}HN|uY z!u)Pt^Z7vJ^sk`lcfyvce%s0#C)(tD*UA7L&x_y1J)CjjNUf1fkT^w6q1r|dgOSpX z?Et4>1p*ng2AVpVZ-pK=N0W#+RxGE+*!_xMj8*?7p}{`IjV=5rEPESE$amrx;n}m{ z^M2!T$vD@24?N2~8Q1>euW%Ok#&55KTiKYv`aEX`ovw+XA76aw z{yGkY7hf@M-g?GEfpa;R4C~&J@lZ4azHVv!dBdeVC)d^=e#LlV9DYx~D*d?}xclujR$1G=p9Jo8SHxcJ&9OZ2K{IHfMY=8g~=z?LI=X zDN!Q$h$c-Z_3F#Fi69dfFe-CJd6!Yfa}5kjcw+YO`)4B;N>k~fgH+nLp(EhHO)=VLMz&_Of0d+ zrUpWZ0H4tSjl<`nCB^r34CM?9evV>6#zltzgn6G~j;f>mU)Xv=^gO0)iVCO+^(>pB ztrJG;jkj*4x852`d#9_HxJJJ}ob!qDY@*;;dP1ro60VoN$lSrA>APDXI=Ml9%sRd?4aVJ$Bm4#0-)_>+bRaEQN?kw2BgO?e<~*|}2wyUo z{uq@_HX7iMbs%6M<16tF^qY1&$O6%eR@9&=KrZ0J0tnyw_S+E|i`;?$rqT|R9;JFc z7ag=8I06L%V8?E25h8d3!LbA1&?hShFwhr1!2mK28WF#o(Z)E{r6%wR+J}j*?-(>E z{FtC$zf6A7UJqJ-aj=ob4H!^O_6K$5YN423_zR*&;iuE~3-A?IB&EInA#g5pH{*o4 zs;gDX2y&#I=|pHO_U)vLB7z=i zhia?PEP~ZsH|dWV9ZZW@-^FiQ|1Fe#sF8?f)t3!R2F&Z?J(yJj87|<@zV@B>QvV#8 zNz#k7`V3{}yqPP2Pa=1Zz{#gL%bl{1BV)syfeNW`R>6@5Yyb;G?Z6gz6s2b;K?==7 z(px>PN0fgDaJq#;XKxF|CF|KY%)RXIz(gks+cE(&n4;xltn9_$4+;E{FF3BXr?+1J zN_yw*H{oM&Z2)F|{FA>*ckch3R75%NXaNhIs3sI?ouucI&%-LlF%Jcm0R!%T_}qEc zDIPRFJ&|T^&P9-g_(MUgfEDf>fvN^i1sMqbqlT5LO+g%Y5W+Wr41`k7jcz3>$rJ$f z?>a;|w~h0Ebp(2ab!(5*TZa^40LWNlDvrHCNzi7xN@?q9n!Jf0`XJ+DELut0aK5`B zt3cwlOK5_iNA6$$&DQmllC z_k10<_|_B7&7rNdmC#;^ocS`ok&Hk7ZQmS*MI<(+fK*%}Unr(L`uCo`vf6cW073vnHXZm)DE-LDt3k3284%lX zkz@c{b9Z_&IWp=JYs08_aA2$9=VSgso$7S{cMT3mJ8?wBg^9q`q{_pIxC{cYV$%T5 z{B10{T3MYNt1!^pTN_knSwZU{vzpELN}i^_F=4xe%KTfa^NX05VX8D|!n91!Oa~(o z#0AY*8;o2P&DI`FTAcubTdGIN|sF)w!!*;VKs3Zm>dE|6r^vTDI_ zt};zZk4+HQ2F{WzIAzMLh9e^1OGI1#DSTwjyCV{{!Z?*&<46D>GUy^jmNeGfHIYei zeSOK0x!(TBB*=8dZZU|^%(EvlgbpfTGWL?jyr1V~2z+^OFqX8TfD(HMfs7V)l&N&zPtn@(E&UPorS^&F;6DBiksMoU zudv{N4WdA407C-8hoRRQm0vIrBu9ow$dEV&*TTP22Ui?C_}ZzIcObR1iZ z(?w;xuA7#C0RjQS;gz|`xQno&sul3*9(dS$fq%#C9EE!cy@IXQ3}Mu7oU3c@+9)JA zF8y27oa!uWUzX8USka#)%(tws(bsCdZI5O`7(F~fTtgA4?|s&`+>h8ZChy)Fm?T`= zj<`e3o@=W>kd-N{5s2P#8=1L>AY|1wEs5UmiB$6yXXg^OeLi+E{`f1ya=tFyI5U}J z|9H0YJcXaYnb8g{T#hA#3bx1dl-r;^p8G{VTz6A>US*659A7a{{$dWvEX?aes`LuW8l({IMO+ zmiR$<(?2iHHP^p<+dEtMl#*;8JSdz92S!f*>7V}J>2H4g|I!!d4x-ju?q}YV zJte3>Wsd@%&kPX-_?5Tb1<#YN=I#SD*Dz}Ad7LX!!sv*sYk&mZ>ASD)$I&hVEDl%}EX4IVuX|{<8{C5_71{#BNc3rusLcz;1a=1Q&02wWm((UZS~tL9 zR;oNZ(JTPm!mK*pue|Xpj^Vc$+cQebK1+*_R}mUE1E8mxu5i&U{(o4av!fir^^S^DmNaRks> zv+$g`PK1H4zxxhrqLXxX8Bs`yie7Av%0cPSpu&{afCA3qWwpz6v-tv{9ygjOnu5zkoxH0->)5A@I4i=yX}WoRf<*CsX=I%A zOiKu8j8J$Jr!=q#csxwv?;Fpp?ly0}`g$5I4v@QaBP~*LR~+tr@<}@XOV1~7ji;M0 zQ^Wv;;_faM7y=u_mC{`WB89U7+{8xu#|X{MF}1=W{NMEQ}&bq0i%*>n9-8KgcMI##)pZ2ucnp7Etp~UuCqZV z6zQWzdoo3N}=F1m1$41a*+{1TDC&A!@xc~O8t!F~pLn9uz za7^Y4fteOqddoX|pS)TcMHv^Fo+eDj7VbF1LmW_YIVz>e+()iWBEziPe8B}HPedBdK@F#q4ki|0 zI3-!_DXu`93r`rwI14=slLFE}OY1=wn7p@!7Jh1aiUSHEf@zZ=8AQ;~FO+qa;6%zI zwXv9Z)vonXB2bqG^GtdG&A%lB=|NC))jv24GiI6qtBnoA6d|p5qveJ{ITOLL5e3Y% z=Z!U(sdG$&R3|gpFp_;}(f|mVD)%caI?PMdtR~GdgHH4QvTx?e6oCgb#292SCGwpJ zCsDBs#>jmPcwzHwFEfQ|NEo-~HQ|fCTz_H0If|t1)8{(TysPDi=vnUbm6-w=S+qWk zOD%G&J$>7T&v}?LoMr~p_m@BzjFvm*-U(ZdS&cRjAJ2%)C8U~wjgq&rR8J2F6m3ZO z1|mRZ;hFKWR<<#W zT6c41I)4Tbr^>ndW5RnQ7iBE9QuvbLjCs?v)6;!Np+F7(0GfdwYjMWe&STWQNCx}l zgqq_#t6Pj3*$8G}-NJ;4J6tA0YV=Vx@!B&2j(O@zVnrbTdRF|AsbmW5=jG-AIPR_~ zL)h7XYbPX=oGk;w4R8Yds99=R3rc9MIv^x!)9uSKh;z;m432p((%L4s;|WoT5+h-} zwe|{jS_ceL(EUw|go(giH}^(`fDC-BC2OS+G1y7oTlO4Kqt~bLPdx5n~_a9x*0=I$=dU5l( ze(e?-+RpUTPk)S?C#rYGW!8xcTza+`H)%$}zgWG-z@NGX@?4xz6M=&ddK*NCvj00P zr|I*9M+}>@j_b=5X^Xyc{W?rEKFz`~FbF3AJ^&}9DVM(Nlbn5dWfM(3>x&@ThNgIY ze2nw>jJ|MNWu7Xph6cyNy4yezRT@_)o>uu1Nmb$@FXb=o`1-F%5@Q#@~JgWxL zynf?Knt$*({q;wWh~Pv(09+c-*S#_c_n{#Te6HKDAc!M7i8g)by>Fz+iNW;efBt8r z{Mk*Pu=J}~NH#Z!rw}~3=hizYvwDbH6!u|a71%>@!g(=%!|C=w%llzcd%DP53X^gW zv{uH4lzdoK<P#W;TLT=%@+y_jlV<^_)TnTCo?<2w&yg7Qv&qc0~ae81A+*+Gp?$ zM+eYq3tr$5VGPfjRK3wjz}Emw8Sc9qD1D&OLXpDXm8&!9){Wt`f4)uyuwCH5NKeKK zohXp%<;>J zxF}DJ&!sC@M$=#a;3veKkn(8&1t6t*&$OCw4tsC`1w*C9ofW)4x?!q|>6d=}gEV*b zI<%*e?%cTpZNQ`ezG>^i+JZvMv}sHi9G0hMSMfcnvGp5ZN}m2hm=m(cW0xPIsys>E;cz z)X+FBHXVDoLvI|Vv2l4Ga{Hzo=FA!4ybtz`rdQv%j(( zK_@oxiomi^fX`6TdO~^apWXXmy8le;8J2nu=*Y;8^zPelq%R)dPutKr78rbjl4ygu zVL36%xDR-+*pjrtAW-?;F8GpSMMWFJ7cmhv__iHjUKmwDbq?%x5Z!(2-D|{%V133S zTwjHDcz(#f67-N5hJB*hLsJjE@odT;X>Cx52#Y1wlg+0TIT(TCMOa;Vv7X+3^_2*0 z-I&K6ei_R$R+u4_2bLN4Rhfcbo%A7SmC`em5nTyz1FKtY(CduVr=hls9&G4GQ0M*&Gm%oIXKCY!fQ&4aE(YBsYA5J6Q~Wt`+8 zh+&W5n;tA1BI$lOIg}*^D~YR32$^VAs*2EG*PXRm6I*&zf9~CUqX+gAc}xy_Y~UGT*xwrlP5$ z8P-hccooKUEs@Zveb8SPPCI})m?@Bt43se9dU;KAr@6%%FhUY1wj!831s<6w5nsR& zZAa85#NOv*oF)85WICuzxr>7%Q!Ha4vo4dw^W1X6K@*nLjC;TL*fKXn``P8k_aF?A zsJ`c60gy>%Twx92Oz1;f1i23pBxjOwO^BKK2F~d(H2;_jZB}iQB`Mcy2v{(o3R%K& zSPkghv3Z{YisQAN1Bm+;!9kvjBCz3D)mHnG0de?Rs)R31)&}4yxaLSx5GxF>+GMrf zCOFSCGNlKmX|7x9xfLhj9y*?|TCr~SFVkUL$`$UD+6J{*S_xc#;W;yinAqt{HyiiT z@iU8n4Remlyt^JUx@zl9=cP45hFs09FytP%mm)e{-(-ee3mH?b14ef{z5?&XA-Na< zREYPc(bD2!zs@b=2IxP?)%nD>vsgwhvxL!y?FU|=e_g?ZA909lBonUyxrKkT_nGd= z@f%m+y4a7tnuicbV;R)cJ4apg#2rd+GF@6u)Dk&v_sxoWT4JK`1cJFGw*%h*^J<~P zkDT_^Qd%j{&qmT~A#kk1f8^z4jE-6OcKuz0z<>N@XaGmn@Y$~)owM&lgD!&Lm0=;i zi0_$xUN^tvmKHq3f$=u}*`MtTd%k=|qHJ3-^u7Xz(}4fH*23ww(DFYGA;miwQ#|8y zd60Shan<%<3c;iPy}Tjb=eE6Mo0q+FzG4r(<{{^@ZMtr`zkJTUUAVGI;n6fHuCeq@ ze5IS>Hf6r+Xf)Ti7KmplM|n;aD<|xe!nEnJ3Y>)s?kz=>E-U1^e2TTT2!Xa_xI#q< zW@~s9w+=LR3L{5o56@r-#5Cua;}rre58DI|8l(#+`KCn48I+Na- zn@x3a(FsgrPoInJLVye5?GA7w7=SUHysGA!6eVbQkI=NXT!K8Sn^>toy7xSa)XYs$ z0Ee7{^<%POu+-f!L>E|#>nI;t(sc9oE9q-rdpmvc#r^crCm$=jQL?xIEh2^9GWj53 z+E3aK($$x7q5Sv%n`E%8E?!PfrHqQXF$5|1b&+%}o#l%(cb)wsUSoHYwbVt@MD0}FGzTmq z0s@=K^+9@A4a8)fxKf0$kf~bG8?B>S{oB;S_w1v+J)q<=xKBJ1${|KTPE60G>M?VA z3OmB)$Pj+tFoTaCJz*Ufds&=fnQ3yO_9A>-(Jc|>8?s~@L8;x~t#j!w z@4Wd=q@Viy?p>@IC?wI~hkF<9QMxf7x09|4rnfo>Q?9!QE+TKeeHFj+d3^aPT@0-1 zdUL9G0z#7(&*_)Y&Qp$A26uEEcP_Qh;L{ow)TL#tH}lWQrhv;Ad_tL^+HyZ*hEF(y zk<|THfy{VJ(<@nYLeB`b%T4fk9apXDB>O>rS=-;QaVFra`>+D^_LCbLfBE`4ZYM9)1C>(Hs zF#OgV-$?Ji^-g;9`~h==p5RKS5<{M(Z~|@yPV%{yb~6s>nEZm)3F#jg2+t6JneW8q z=2}0c4Xs`mSPDy|_E{rS%olf`AaucN@ham=JU9dWCI{%!Q_emJOPnD-YvSt_d@=Mv z_n{v0s47|Z;wGgQq9Ce;L+Fh@O{v*0VSjFhSBHoUx(ZNvB5&4G!@~lw}7Y_!KX@b{aBoH_x z5*{A4a%vMr##yVw;)xM>8Gy!(5F9>BQ+O}4D#DfsW#XdV(6w{&d=~^PfA1IGAK$aM z8PW5G=Oqp{$(^*rXFe41Y1=SE0vgt}O{3&7ER{(`!eN%t8ze_kxhPT}1X#>Xre7dc zB>)VGuZ*`E2(1ae2MS;S(B806xgNcK*tUNZzM~2UYA9Vy83RU^$I5uD_;Fz1Ay;Jx z)a2<;dJe;40`op3ezlqU&a2&s7zM^?;^rNQ(;>koGC{WAhcGZdzZ52GQwBYG`UFN0 zw+Gsj!852=M#LY>{~pnP7utX02KLBqaEdmmf=NmOcubn1BN*haE^=vgkHN^0z!^c| z9O<`W!d`_K+O%Y%!j=H7uUgeY2$;;>9+{*8VlPHUa*b{oB(G&=5h4UK6HX+=p-tx@ z5xMag4Sr0Jf2ey2hbT<;j#*7E2h06dD9f%qAO+UT!zyvs6~i|DJ~NOW8~t?sj`gKI z83xDU*v*S2!zvS+H^;kqv0s<(5XdltL)BW$rR#s}0cSii$b7-QLK#*VOm)+f_{(Gd z6}D|x;fW&?Omv+&)nO#UMGH8RIc>*8Zh)9I>lJ`R);T?t!le+S6$jygeG*2x&{!DA z?wJwU22TyhN!jfx>eA8jL&Qz3KPPON(S3h z5D1Mp<5u|ejMjl=agYLT-e2a#zJ%My<=VTC`o+pfE3Eiqd!FIgD+n&=h;?H;z@M6Y ztxV3Z#iRxU6`U}F7K>|4kIA&HC}d9FeR3e`n&aAp;L5#@&-NYjHrk$HN~}6pfAkmC zg5;WNF;tl%Tx#JEe_A=p9hOLL6G&-g7~QKZPwie<4J}|ESW+XZ@Y4g+=w1sa!KkzT z0VVwP<$N>ndR1mb+?%1iNYOGlTEg72jM();QluUlO+r8@3nY7OC=m z?uobJd0X?oOYCd^JQ)4@T>fFqwsCnqa5|suzm@LFHf`JXG?KNUHfH3x{rD!&JI~B} zxhI4L=|Cv|_@HKrF1cE* z#Vi>mp1KGiR2yl5fOE8Yqq7Ap2KrDRQ#wwQ*Z~x z3&Jf4*B=>f8R8B!pp$bjE4UzdY<7t|xM<>rz+UPFutkhn;6B#-=8dT|Iy4?Dg%b`j zY2q=Msuge*j>y0BTYnb@ax4Aduf8AW`2F`@OQTd+TzI||`7t}Nh}COH1S%#Nkt%fY zG%e4ATesm2Sf+pg;e;OGJ44$k-dta=aUKbR<*b-QLxH%9gzE+7XW*=wXoYf>YUa51 z%r7sjvThAB8g-E^32Qmb>h3nU%d9yFVfyhatnI4xXB*raJ!YmgU9u6bAY82ls%mlu&HF2+eg_j`|@0R_x)RG0vD8Fm_S`Y zc2Q=kS$_U>355qK7kMUPvG8&@Mq@oPqtcLKNh>I-s2*oY`UZmO2@I~vixJ8LOJ`MZ z77!!`#)!#8v#(g3{8p|G{J3;)Df+2(8!uV+j`KUs$QMhfZ4!{=lyS|h(dE;XFHj*H%O z^V&^ZY*<6iXAeT48M_LRwTClAL4Z2NrS=erEu^8_@W9Yq`qn#NM*urbPjJCD-+W*< z;LS>C;*pahVlOStjYZ@PpMF3!(+BuOaaIZ8)O|CKNZQCL;@PaolLlen29F@*auzO* z2qb5}q?6M(p(VJg9b(%N}$yp$iOh8ztwh{g~X~|(S;O-L8@s`i?_#p20a<+4%P_aL54t$ zq4SW?$&@N2xVaL+AUbTmn<#U-z6CPCwIWQ&+*h``6U>pWBDSNpp&N~@V|f1L35OUa zfkQDmfh!S+ZV$p>0GMtWbpoyoRO>;TpaxY&O>L8L2%#P3+Bo&Q6);*AjH(1&S_;_1 zIyf+d^n49}L4vi$uCRG!Q19?_n)v9u1;WW2$D`y_%buNi(xE2oIQe z`K)$9ZK_q)%z0^QE|v}#oov1}Ta)9EIZ{U!S}qtffn8=rvzQE##RAm2YEIMN-F4Na zLqFUmeYmDFecs#B{F|dQH2kcw;3JdZ`pa0kcdmZ`5IbjkGC!?Xw8irZ7Ve*jH5g0U zHzp&P3Eumh3_%57$*E~-grkwN3}Of7c&%iDsn#B=bA&11K3EqSJz>@))=ji?0jcbb znk*T3vvKHKuG!kb8%W!W;KuOzh?!TdKQJOg<28}4LZRo-KvRoKc%4}hus#~hzwn^I zJ5I8EeajV~gm>4-bqQfy8$FtWVBqCwn4kAp`AP!dnjXQFTV`{2ZyT6`iDpRlYw2ID z5sqIIrsq>-fG}uKz8ZRwW~)D8Pob5F*v!#Jt|G) zeiY8!pJSL2=cf>&0HWn4Gs`l;>`w?S;@`YJv5x_huD?6u9~tO)!x>e0y2Ous??1nb zJ6Js14!EW5Xv;fzK7KCSQmH~0e&S#5#_!{b8(^F6iGR878OSg8o@Z^}9$T+yMHuvg zV{(lAi?hYwa09f@{I$R1AF?xl-E-T@op_SseSoes=NJ7%4=tKOd!CLgcp4B#@Fj%N z{&&Hl2e<*%s7myB;|@Of8|j>qvU|r{>7$?G>Vsd8T4kGF!%7$VWLY<@H;!FvWEX+4 z6%xab%z~YO*&CKGPzny;e z^g&w3t*pC)Qrg^iiuT>21SWA8UP`5EnjW~G-oE-KxE|q!hli@>*(H-)<2f*-as@nu zwYP@G(!>NMsxh_Wej>h%f(N*Tm>#i$TZI-0{lzURQzs+K*#XQSbU=7^-(6F2fr=Lh z8`^5h8I#n12p1A@Yklh^P0Zd-*RD>d&p-bu3aWWvyghyGovCoeRja)4lr=>YidSqJ zn1taWql<1$i&&sfDTS=xzA$+K9`LMrZkv=Xu9N8A8+3K@2eBC_H*9X|#QSY%$<%6g4H3 zWNipyz0f`xJ`(|GC9r&P4}qxjcZvU_{cgMrgj=mwy)fIp+TojYohDkDy~3(QJj5=R ze$V;-MJ-Ld)SqT=_QPk9?FpBZj*gKuGkqP#9$JoDS;SbN**#*sT1bzWuXx`!M<(F% ziRsYwM;cJ<#JgdTELWW*5FtodH^LFLx*z}GVS03DD;U=bR_nQ2vuWb`5LtkxBPmHc zaFg1g@!%+{IhrNOWFz9v6Y!fjZ4@T49Y6R(bA}Tm(S!R=1cGBlVM|zhE&pBV>#yEQ zQ!wiC9F?ir^xMDjo2dl6Z0#(=q{8%~P^iO*-n=yi{GO-zPpPW4h{6i^YlAPVZ>(T> zTTCZC!WDQM81E}hrAgHJ`&5JLEFWX7>OgZ&H9ykop24FEyOvQ_i$5}v-JReDfMw(G zE+}!00!gLDIlK!oq3smjP{_q#0v^ZOO{|RG0ZrU=v_uOv{lr{!Q59_-3*536C#81q z6xcBH9Wc;M=##8p;mnF`2;{7f^h71t5z62WXmo|54>MP$=!2klTF^qg!`!oD#{WinvJ8KPR=z$|pAKc!J{&hP&He}qMnz4_qp{Qidr z`q{B?Jer1C%(7ZYi90WmelSih7lZM=V*+o)XZ@G_myxiDpYe*UJq$k21)pmn>DFJb z`Qrh}CN+@3B|*v8bNj6}a%*`^wjmFrke~)G`l3JZd)gz)Umq|18@DJ2xP_ZVjlUyN zVSxY<8gyz^kT4I^J3%Vz%fWQ)b6{m4WD<30krB{O4$d^jDbsBd{s?$v?D{kcNWL<| zuGPSC3qz73=LhnDVRFn3tPKLXjC=*BQJIP+ghEr2!MeH-tb;gqFvIzzOv#m(Zic2Z zVhh$lD%-0d<)g?-p%Bvx6XD+JM&WoxS`$+fAT09Y^1a$9&6a^oK-{OSi%eMrSm9bE z;gu07$^6l4xqk!#F^u$xKObgbHJK&uGbUjWq+_;(kzoV}{L&vhe46gu`+~nu()Am! zU|O1B&Cz_b7e^d2L?PCfalt^!{A`f%NJ3#ivVD3$f(eA_3Wi-ljP;IyPnja%D73Mx zoogB?PI;p%jxH|Ymcl`D4?K`+ebsEp_{zM?D9AwNeaJ$Zdlj4R9?6XI<37kls=3vT zz?I!-$xQFB0Oj zwlca}xMYwdPFgvfgD;g7sPygUwzL!$=1z?}tcX5N(3ZHe26<+--OuA>ngmv%j zqm{)+O_9$kv}rxkPhR}tx$=zptZnX~-Krb+qa}xD00{7anj-x@^*vTQBGFSw%Yu6B zg>z@z!d;#Rj2^Hh)34@OYm@!vXR}2r3q)FUvc`zcA~0EjS7Pa3eg7S|Z95-78&^=P zZHJ}C^Y3@=xt0Olc|yS8Ojzha28oG^PGXImgRV<~KRvnPT)ubxzGTz`cYCKJ^3nXy z`(*A~ued)DERFNBFWd3_MSIby&F9Z?qU`VT#jWujPtp+$MKgbEOqX1I>@9QjZ@jpb zqYVpzb8<4YXCf6{Vd&G`8d_d%yUN7rG<^hn2KQdP=pj=78U+B@b^`-tU{!(iuM?s zZ8jZ=a&;K|ISveWp)G_goZ&vOw+#)UEUuYqWJU@I17<7Ha^c7nVtWyqd(|Rh&cFAZ z^19m)dxVD){=2%l9K+dL^7jG;VT#(vujqt2_2KA%9uwDTP z64tlZ(e&>z-L^EPYk@Fd97%ugd%vB|2{8T1-Fqmx4#?j=kou>f#{@Soz+Ba~$hxv4 zbB0oDt~X8Iusl0sfcCvu#!^d+fH2ALuvmbt14nt5Eyo^boAIjAl$jESLW{2gg8dek zO1A?qu@~FK@SLFeKH*xKIHPR@7EAHcL9`zh4#>5ZHvgGf&^+%o_y@DF^bpvp^|Gl| z2kWf(Ltq^sn~xcHwy+TtCE(c2mj?L!5w1Ra8!(pCCouk@p+W{9 z@hexysDy=g0PFqj_r9C1qLKaCr$2&j;DvxT`+#av#&2l-ZsMI$0r!ea2B!vIa>RyYyrueg4@dR;dwu(Ql;p-hY|rv$CzTILp`um`jP; z3&0E33saa&saH6H;X=>tX>hGORhi#M?MiBEXbg};Xrp2a~hV;t&nMUAuQ?zzqWfI=pcff`9f~Ij6 z6bpnK6}r|TIOTc8hzl|asfaRrlU8Asj`AZWl{qICLA+^urZO)YqXDnBbRR7|Nn*M> zOU$c=rDux_mbgPfo0sn+7{NE}!Al(=49VZ=%B2Fs*98KIUIX3WF?qbMj_gX*#3FTp z7fh2_mF716FpfU@IAiZt7Z5H{5DbnQyw2I8+FS*ERh>Xt4op@_gM%nRc8t)G}z zEH-2~CWVM$j;3U~FVGSz>s1 zYdwO7W&YrRxF5-s6ls9~aRrG~Rv?l-+;(ItyEVHX^erg{B%{wmhGn>V6}!Quy{enH&XMV+&lO(6n(~#H#y%e(z8wgb7Ot zmw;1a6db$fT;YqaS!;>n04_xe)Xv@UXFqJ+8<-DnLmp^LiQ2EvD6F{Nw&g#y^$LoX z>ozdTy>$;<567}kwjW_ehTML%qRCh*i0pH}?P|%2Y!1A4y|vJJk4$;MC2ML%7&qJV z?^%fT;;-+U^y#{Z^mPX^1)&xj?{SX8r$S>0THK=+lmBt1RSH-T$g!ze6+UXr#W}fm zX4Ud+TlUeuTyy7cMkoa@1tZUk{VFuL9)Fv0cYhp5#A=CWY2TM^xOsjG=AOSkCwt}f zFWqzEzSxF%HWV=4;$7^ed(aX*G9GrBXvO{EN2pcsiW&#>?G>*%|J<+F7zJZ+Gz-(9|!v90? zi+E@scf=a5CaW;7kJs*p%cw!|YnzkR9J>*mGt?CL24Ivu{A!9I2zy!=vFSMqg6p!r)2d3=ng85~V43HI#b?8dZ7Ae$( z_4EW~K_}^*E4YCafSZlIb=*UMIc88sSkM~11%p^@?@x2%qX9Rax)R_g{(SR3FtZEu zfbdQ-fI#8K1nV{_FD(_nUz1VCx&@Pw$H%gT z0Fbqq7NXKkaggF+y)gf;zx=HXE-#5AZbs(qX+)XyixukwH~?B4a2X(h8}9$34(ZB;3GY`n7ys zCue%84<;VB*dr_(7wjK)7xsv=&-t`PVMI5ah8j~e+XeU>X)u_NRG0`HYPzwA>kf2; z1t!CdbeT9iECrS#uHg=}jV8Za7h`aOxT#LzxO4rudDfEPQUB*u{0a;NePS(O(oSGl zcSx<-Ul~ci_DkPO@7&0Slyhh5@pA&MDp7M{cZJMF9Rt8WS-QYM zI|S;^+@d@;=i%YUPna9+!uU7f6Alqx%{q3B`%@R$hU8D=t1Oo-tur$g%NX;jW4Y0T zf)rV77JJ3a*EctyUs&lWI<~fmqK-Hc6}#=YNOh2w^{hbZ`?Qz}zxq4>^g|m|Bi#>R zv+p4xb6x^CBFKz)mID?UaY9-6Y;xCk(8HM#e?DZf8r%Dd@8r+B`#iG_fUpu zTBOK(AMwXm>wZa6GbcCU4qpq^%EweDN$6;}=S6=0N z0Y}1SH9(P(!6a&QB*<21R0FGtS$7>Z`ilz-k;hq)ME4kD13WPL(wjG2=EIFInbn1@ zmPx7_^$*@wYE(<*x?1{GpXZ179{@Nok7y>=)(9>}tFPa;#6TZXv+B6mYKIkI)UG-< zgF{Eq?hlX)+q8FT3bqN%TwGow0~2O++^|v5I`d)%y&}eCWkO7i$+#`tc z2xfQ-f9N{p5CbuC=-gA8FEu8iU1iN8IF+#)~ zq&8uBWg9}UA0~Z+qpUTsM1d3U-Nb*ihEN2$fKkKn8FbWH5pj`*|QrNLg z;ZVPM3wYHjveR-=QUDj`eK`(`*D=l5uV6&E=JkI3+AW+TqZ`%`7DRY*T@-YL*R16Q zX#jKC!axQFv=6R_ZYk5NXFs~A1 z<~a~voTF9_uuGh+&>Zrd0pOln_ba*?WVl1?4bk-b%(N@qW|((u{Pz=Qm*3pSW(WT9nsfE*xR1XfJOHb44~+!l zPXoe;|4jA}4{&b?)t4W9KECHVLyUcJ?Gz3@n;A^K$Nk`!&+*@Mi}BVm`NrRu*Bmc* z#GFD%;XC`tS94oFlINH|``(vu=7d}HM>vk6O?(%4fOYEz?*}WqrdwuxMdjYtTku`NmFc^#XHaL@(gLGDV%gfP-Q1p1exx3EE>%- zT|^xl+H>&k8FcW3;!ri>I?FB*?$?0Al&%p`A;bMh{E zYkq;8e2%bi0QzSvP;~1fn16@K+Vt$Pcj`RCVuA`Icq2-#$(T29ws)MFkFE4IH4+i z9A>n>O&mBO8h&r375yW9c(JYA)WKd2MH4H!`#4WO4YSc$U@o8Qs~6Z%*$L{xU%>l7 z6l-wBjL-P0O92m=p3*}@Y3jhA0d4;UFJU{wsY407X7tUPAtFpks zT_+@q<4yVM?icAdKfg=p@O*mb-T#!XEq{|U0Z167V&EZO#~QUY+sL<0@(=W?xNn9r zLRv=Q;p6*h3r~*~j@%z3luITXHZ0tN_S|VN7JWWrYy#vw;Wn44a#XDm2Txy%R*ZA9 zki~kB#+RTKL$8^W)ek5q`;d8|3@uZVu3x#%N_V88D~7tFRBe%0gY4=jOsz(Rp)Qn+ zeH89C{9=@Z=QF%eWzHX{m!RDeAwit&a6*{YF%_!DmoBG^3ooac(M!x5^p5O4g>QG# z?vuw4QSQJPLl|rcqltqxSzjwdsVK^%^vt*LHps6oU%DP)Z%jPn0LT;hVNQtr-CP-~ zHnFKfRyOEE4`XY0vo?%LIjF2u#*jJ!xamsU8@pVKEEqyA631WUP_TS%Aie$8J5;Tl zj2y9p<8r!seF2(2O*4HrF|6JXUf4hZs^Jm*)@$!^_R~Nr)whr-U=Zd=ywD)*$nz5n zWmRbwSLVRI-S8SK+O;?g@C4qX%x|9(wnX?{NzWv*NpC2%iIJ=9q3{3UPu_QfOAO({ zl1afQed&r6d?*C`P>YAIb$^`bZU|Jjpo{5)&aF`y@6VXDyt*U(<9d3Q()k$ye&QBU zFrB{?)CxDh^QB^u#gsC;3IyAs51A12S$r=N_H!70w8X=_TLI~U`p#!`!|1KEZvvIE zoK}fAV#954!d?zy>qjecvkF=V@q#oAe8V!JAdi}|0^@UG1Go$>pItrh@1EGr>Xj zaNVskVKqcZgC7+F0h)l<7jI$gXpJ!RJb+zypZAh$o8CYBYq%f-+1cF0Ttl5TJHP-< zIjSe8zQA z%7j5_s?1(+H}25{Bnqo>6tXo|`}!>%0}3;vk_;%3;Eh{%V!djJlpQ34yV49}Yg?*8 zMC=KD%+z!9lPdbcbf!ZGKD)L{Ny}e?ZX5G zxsgAbX`S%%0^@d0^y_zll=+UoLfXW4jK_u1zLb$xTJ_N?e80*-*P`Tk|-Nub)DQq_SHF! z;8#r9BEalKEr}+JC2>uhhv)~-cO&?>FNw~zHy%uF!LmG-b5NQ(pMg-}juGR-n!Du; z;tyJ4-lP>do*Ixpft0yd&C%z`BsAgMPuIis(y!^-%M|QaXkU!Sm+eWDG7;x4+$Qi7 zi+?-zI-AWH3pcjomKlw4kcMkyjb7Ku;7RAm&8R!gYdXld=!ahjXBo<0)McZ^d(^FXw`8u-=-%kgOne(W6qrez?~}2hO!<-GESI zAKW+DdS!x5qYTElJ?}G< z7;>lO48ewVf%^gt%k}8IzwE!?oij`KcvxvN78!HEW!d-HAAHY(_*vFMd>3=^^?O@} z`j|hZBdjc13x4evJXGc#$ys)Q?`3+?qMeTK?UUDWFKZOfdj38>=jx1!@&~P-F@_K* zw4)%1zkMM01dOn6{2k(fwUL3Jx9{iDQq)xCS-yAu&A2mTGn(Lk4dAFg`$PEGUb&Dq zch}OxwfpJW#tK4?iVo009wcyi`8w`l=@kJ&7tY$mKNN@=I!Pp}g{`2CYm0N37HjFj?p^rF4kuP}n8C!=v~=+zDAs4g zYd{8`TnD!aR1-0bAhRw6qeu1*bM$CGE)Fwkt#`}@2u(N#3N`M@7z4HyZbB2ZdeXHQ zZ>QI8zRZ3F9ar|Kz4&3e`{*g}JxqZtdis4~VTyRu0o(=DIfM^(fK;Z)pzb2h%8cP2 zYLAX{U_u-8>~l5I%9V|WvGRg@!IKaJATW0KE|Uu{ah*lZChlm?g;t;*`MH|HS!aU; zE;DG6ag0fqUVQEVcpYPEts7ecGGCy+r$w^AdBl1>efq&O4h!0# z^xrlI4H42Yaw+}Qzxhinh-e;Lj|ujti0zq~>!8LiPsPNAg-npH%G)IN?Ud6~GKI03 zr%Q7;;FHKpp4mlATR%tsc1HXQd=fLXAW?I^R_|R7-Y8+!kYDyPN12Ai30}A`rk0Q) zlo>ijBYgT1rFp8rFqy~f?-4Xm ztfshQ4m_lw!1{xdhj2-~{oQxdoA3N?n!7|fCba0V8X_Oc#DzWJ5RWHM9^if*L^~q( z4@gCXsmfwOWS(!UL!bUx$`iQ_RI(3qkqJ;cPAO@0I&wfWuzn{G6MG3wWwC0*n|CQn{<-G?bbCNG*9g8{~cPy5bmQ-i1R@s7{NNpzO+=6I7RI-sGB}Y>)~2q zZ!7mkCOCPvpv4R8AlRnJfl#mA`CR}g@(|#9g1aI2&X4};eUBLvlnF}m*2081>X;~+ z2rT_(qG3O?{EVNOJ*O#0lpA10KMKhJ zll0yA1SiOiqFY7Nb{|=(G!zSD0C!<;2SVRqqoW;gk1-X-`Dp4lMny0okTtGH#w5to zB&-?R{-_#S3MFq=BY&HbEHE)HLG!QAmcgnl2uQ<6&M7i(;s9jK*U&KT2|&EBeF70Z z2R?HlP%NHyG*?*9RFF7EVEW$n7BLP509+wf(7K9Bs1p81gUK~b?-2vxP7+{c>j+~9 z;7Lbj9Vr0ZX!_j@yw`keB7h(>^UGx>bPE_q4L`vNFBJ#NIvFQY=EsdHLl&sphHeMd z^#BOE7WhnB^(UiifFbkfNoWFG!LUK;)5Pfr0HsxdpT(>r2#`KUED{}WVJ`Wl5YsIBH)(2p>}NP zE@@c$JXdNor^{Px(`&*N&h*^UkOHRQH7UNMDu^Q7Lr5*4%Urd)Zs= zGuOp-!@b0OIeKYdmtGdAjY*b!^}m}GB7JF=8zTm-4l{QSH7(kX>)_sV&&zeBWyhDf zn*=<$znw?-x$7o?_Ig1UfBo}|&wR!I zUDxyXd|=59KhHQ~c3pNc?fZ}GrL~x-xYBEQ2C^q}SH)AKZ(b*D^CtS{b@Dg)mv>3m zJ}2Bm{Eogll29HT6Mr)=423y1-ilaH)k8re*R%-U(JeebwZOg`rhX%dRU8Z7#c7OB zpFCncP!>=CoRfxu{(D*GpX={o9Gw8WMA7zsY9hjwStpXepa;~j( zh8@6oLL?6BI!Jq%@-R9%<`$JK8XVM;s5hw`vP_EyP*`nYfXWslnumBo0j0Tuo1w`* zHBeeJ;5-GwjUnBr5{rRr^5*4lrXPLd`)P~hm=dRm0p%H2F=*9QRGyj`;J-6s76eTr z)IRRTyuF7pgKdF)Mq?K-9%4rREIlJ0J|91I6upozLfR`Cfg8(qg9doF;p6rU$Q!URBYT#LJ; zL4p5asxZt@gR(`5reoqL21b#wl$<$1SyD;l@yHO?61gSVtu@Jl#r>jtq`ZY|9G64L zbrcBcG+gfR%^WdWmJ>9FOgFK|_iDDSgC{h+xmBvfZ&>KRiMzOloMzbz>v@}N(S`RJA)*uQHqIVfVGvQG&etqa)ujbhm!gP z-@oxY?;y9%D1G$|-d?0{)Ds=^bkj25A4V;8ammV8PtyI*2}9s$;Wu7-8|=Us>jfx| zY+l?kgA*vhxNPM4Dh4(B?Gg843lBSLkg-cSSg?_CxL^wAqldM;|FhQ!;=%NRTM)N$VJ3I96JkI3jLFymE2=db+v%a+>5Ios-i&jER-B zze#LseuTp{mWTtpltzdLa-R#L&Ej3Xk8AJVXQ4nlx4Ocvy>OZ1g>k`g%$)Hk*|Bf@bQ_o>*ApjdG=hp{9ykT-kF3)_?=>lNlXOiQ75hz<{id zC!Ybx;X2G8vJ@Dk%gf3G^B2cZw%EU9U2kk`20$OD9N)s!#V9j&maC*Ii!Z0E7#|Kf z61)nISiEotEHaV)^Z)y|bbT=7NsU;(-eKqog;VTcvPvkz?7t!0hsW?W%U~HJ)QNS! zf0TW|Ua51sonaP5Dt^>3ChTkzb7;KYyMOQ>-gmHCl-VgvgiM|KifH^rG@TXEOh_yiJFvaPhh zjB$>85B(e(#_B+gMFohKP#e)gcb+L|zZC57Bfdcp3FiGBnxVpp#Ry>G{J|I?Dt80T zalL)>eizFvVsU!>n3tAWXut%Fc=__>08S4cd`ThQJqOIVAXEUIfXdh#wMv1$ih!V0 z_M=tP(ksr73_>erAC?23Edcjb+TXyg3^kW|nK zq%4;r4P^k9IpKbJvsPZ`U7#dAIHzHu<>EN8htDjkZqS|zrvOogs$j5Pfz>%+)Uv7q z;7jYRR%fR_00ZsHR4qi!P#Fh)LB`pOC7B2F9^rv6^zx;P2otjgVFWQx0B`y#0Er*F z4$TjddFB`_^I}kU&>{52e+W&iK#bcpvTyqHv?v)vVw=)|Ks0FIJ88!`$WGu==(*3N z73hF@aNnqaND~5d+X#yeZ5nrDtdyWK1dSksXM2sW@U7Eldn3rUU(TI@ksd#(*F`}p z&{vW2rI*LK6adK5_E<~32wvk4AH_YR6XUUF_En372?b^-J14HQYY^*Ao35L|`YL(0 z6$%*2q~0NqW_A9ZkML4)jR_>qnT*T68+T>qpZvx9OZd9IR}X+Et+_u`Z0K6hV-W?u1%RXOxjVA(h4sxaXk(0mlg#PDi#XN@J_%7RGgiE%Su%N z_0C_+DdTZ|r8BLkzI?AC#j$w=Y6~S^VAqc)#TX__iD=Q0221qQ`RPRg@YqLb$qqhIjK^FC!41cQq2&J~7?Qk+HZ zk>DH;uk)kbZNYd%ALG}~m+NHgqSv~&{^j!oQ+g$AOyFcc;)nYjvkN9S_eK-cA`Xsg z-9O6?bBEu-c?~_#gi&5Xx0{9%t0*^6)C`y3P zUc8)O=F5}73LJPaj79XlZ~bw4WBHZzWPL4tzP<|H#ZZCO*V>Y8K&UjJapVLUDEL41 zdCc1|hK&kV@iK`Cb>g0sE6x$59^O3Z(_@Y;{dVV-*VCOFS4gJVAkmI_V&4>Yv6zAr zoR0?8I53E&E0-^jeR`PI9ygx+*Tw%Wr7RYJvUC&a`t2)eWLA$F>x!kfuv<)Z0I{V^ zJTkb*yp0V`r`K-Xjy2mSkX<=kqkeB>^9$CXx-$|6PRSD0BPlPHPU1K8q>M7&whGuU zMa2pg2jnw+3seATh0kzEf;2mVJKltcB3b!V-Y0QEuViLCvRXopU=mBZNw>;ZlRZp& zXcAMhhgYa=W;1-d!+>nAk#;?mF=8#@2?AtKlakkJt?i{o3nB*v6RSL9$WtRz-WT_w z$XGmoXlN7=P58i+H7p4r=O|s3i9FVGteVu%R4xMQIG|#Bxj$XvM-Ne%TCcks^qo$vqK zbYtma`o*vRj(8^I01BUqcXuDzsCX+%J9syhe`f6xJ9!KyR^h2feO%UtST?qNY7j5h zHQ7!Jw~6B=gZ*$7FYjacHkN3nC@n5522VQ~!5EA3Jxmh8-i2fH91ucnX=0%S+4mgiHy@%r@;}>4$5$OcRxM7_kaXMyktlU75V_ayPOCI zZ@o?yuEF|3fgUoKyD`w6p&WHWOBHCmjUwHJ1%GnjLYkYKh&J_XDcelvx!Y|->g1c zqdzT9MVpRf$kQ%75ykEF(LdftAxEB(>3`>i7t`k2c6$8yX_}#Cug2HL2?tG(JYejb z^kck{G%i;1v`(uIoPzmAsO1|~LEu)yBq>Ej0~V-C+>vF9Iy+Hn*=M=;{_sD%??BXO z1<*Au;Sgm7;@GFQRaXvI7#RgU+#DJb8%W?=Al17>pq+Kga$%{@0FAD@5b|!&VDwCj zyvxmsFqE+>D789;aNv{pEUt;m_|7*@zp{dFpWnx$c+aQ4kDIuFKm7artwE*=2ebSR zv7D4(PzPw>RTeC2ra`o}DKIagovOXfaW;4#HOB_f5J%BIyQ#%wQPlJJ53J>UE#;?RY(+R**Li=I^~!JdX8M7CXg|cNKZfyY5r9#m&(v#KljZ?qO zu0`pg6A+Fk9vQmO0=yrFkkaF$Cqavs8A2;->oDRY`brrZT&NwiSB8n7@J!1q35pF# zIlvp>=^0n9UFLV5qX<8XIf;%t;P{UxB3n*|{;a$*gDrne(j$*0yBcDugOMDjI^=u&xG}Hb9bo&VL1w2Llabnn*dxvqhpCM9QKE6 z6G~>RGW5bzBlw?j4G#}OUursFkZgL*15x{vf^EP7J00M$eX*s|zpwx0tk-3T@ZV*KIhf(}$BYTd< zDqKf|)488R$)vA7%VUf^tfGm+;K2b}5{ChU@{AlpD>H_;y7HiGeM9lJbMhV^#MM%i zV|QMn1HPv(nZoIZ@eS`yNcj39-al`{_TqDz^*O$Q#@z*RJ=e+4e1+!?{+21=87-@P z2Jg3-;8SdP?zdUnu#YyxN6}~dZRj2EXq6w@*Kp+TFu*Xe_FdPN{LY5>ipFS5-pik! zv;?^@$C;m`+-A$l8^+NNUO4X^-)-#?aM!$)nX$cD=U>RCt8&IaC zmpYGg{3ZkkMNcEf?j|y!PQS2j{RE383W~wPgmIjCw5j`o>8kYu$^{xOUOSGqDC>q05qGdQU16uy?f^lVK;=2 zY*G>r`EtT@T|f8I8GUolK^8$%%7u*H;ot1X3A|_$f)cg|aAa+7X+`;>+&#gH-2x^R@5Ap=?H72j}456dgM9d}^B%H3okRaS zgBMySwgJ}gR5N9X$%R_;d*Hbm-$q8p=}#5;NJrtbCLs)V@)@2WW$yJWFQt=?Vj5ed zd=z0?yXz($W|11*6#o}=lH*Po%tgj=oSr=;a$ukNqt*ZN*zx`f%`}=REtG6$uo42Wvduu5zT%G4Z-5df_ zVg}_?7-#?!Uc6>iD1=FrY*k>iic zsX@)yS4g@P^L+8e7xV`li-DlcT=$?P>3#1sEDXHb%OMLs%Y>K?6b-S8b2&zW&f>x> z$_ypKpkbAzGZeyZ^hu8>KV^TMF|WBF{LxR|mq|m9v0-$t>$X3}VkKCEKOzK)7X<7d z;6WrVC+}HOxkfohKM+ z%~TBg3@!2kuknegm@A*{!iTkJYXu=h_72QQE0(9>I1arK0_(MPm@O_)H*myXL%fCy z8CqgVo}HZ?Tt{2DrHo-SMiX}-o`5D`kcME6#*yPLqu--kbMfL*di3Za_u^LLI{}lf zS?9%!PX(@w+oH=dbb*6FIkaZR8AX_x2bp>lt7eRX9j1XfhJiST{240^$!Iy&BI=k0 z>oWMTi1S{?ZwQy`Q`OqTXM#yTcYYm6ec{i405BlU9X(KGzv?b`bmkgWRjd2G!GFCyYVAJnh2j|`SP@xf|c|x5bL@E=4 z7L^vkZ)CD@j~!Mx_h2Em?2mgwE5(IHtd)#Ipy@cB59iIjtTj#Rx!FOs86cIuM~NtC zQJ^ls5Aei>mENsaN)ueeUgfzplol;-0z&&8Jc53Wl5K8yOO`b&_+-EHxivHe{votz z(@-}J2-(3AD6{|v$Kf@GN$C*_iyhCf{%K~_$6AR(6v`a5Xl{&(gD_D>7Z!GShi>~8 z&oq=u#mN5I5&Lg{N9dzUU=8b;%r_eoG(cZf7F{1hGz`OV4jqFHc(2wj_lb4oY|y!} ze3kb(A6oa_&#ZEcS+HUzwTe$Dk38?39$hQLu~f(eDxR!2h&Mqm#0c5Lmcdd%@YEVT zca!XAEr*7ZITq`}Nh7Z9dFYgF397T-L8|e+JR^)Dj)d#|KIonIzB2Hyko3x|%@{Y= z*-!70p8f8;{h(z&@#FaZUtH@Ri)r`1=Y0w=Z~s{_PfGIT|D)hO&u}lv%MIDe`kM6v z?KRr>d;6j?D{A{%2}ZE;Mb;Sin(yQqWQVoTIhdl^^77MAvY-k3XU*kf)-v*b5ZHoO z=kfj{vMLX$v3T#d+}b7%m;96tc-H{oAe;);0f0KQ_&D@s{Cbu6;V8L7KTpZFjIdAQ z=h@=y-L7E-XVkco2ICa{&~T@B|!eu>ypEB!B3m z`&~Jr0$@yi2Ql~>F?xwDXkxI^qbeUC(c+Eb{pL%rh9dFv&%aDR{pg=Cbdf<15J5CW zJh&kpapD~oP2-7=5z6ip7txS`e&|?UeOS^HtH+6HjWyIFi}KBQw5ZN6-ALd5)+>Yn zJxpJGeiv{?H3inh5V?xRGWrH)!WfWGOVq+F5*}G1^kE-f=c!qv(^`~M+I)t$OF9NK zG{6-7SiQBJSNGVL@H6E}8S}685q++F&;Gjir3d%@X&CWT=)?qFY5CGpx-)-~I;;B< z7XTHb0{I=v8|f{89P?`kmvo>Zz+;=8SFPK5V(ZQJmT#N^;>osRZU&-{5GS-ns3Xs5 zGp|0cK^RGF8{)t%XJdG-)_JQtc%qnLWPQYRYYZ}pvWUk+%=GTJ-{C|pyc8z~+5EFe z`1#Z2D|gcE>$lSO?ndz0lXjgGoeb&q)JygTWA7S-Ux1yiy*Qp0E|XnNX{kyP97cFZ zgOXI8fYFkre2AgHdE<7fww{0sFaqvubL|#JydohndmK+or4I7c#C6ix#j9_m>q|GW zPH(4Q{^NhaXn2?=76>b44$ZbVi?o+8t|N|L-dooTO1;-vjF&_3MAoSD7>hHa&b4h4 z4%J3sxbxlRbb0B8l&3Prm-qI`lx3dC=#As$G5AzQ1^kr7dGI)W^6Lj_?FspPfZ34= z${9YRLc{@~rW~>}N7+0*e1*fuso26ZPsyrIXmWv2Aq`GzD_?O)!Ut)CF;!9U`tfRX zKr_;>d|tWOV%=9Ckxe~6kuEIH!pqr1C}ED@mx^c&&wU#u+zj_sJkH&e(~-91-{Pu; z#f#~yM^E_fLE6}P9A&w3SkWh_g*rVqmkt}8ut!~3*GYxbA0rMjegxl!$9QD5VQUrE zO3o^?v=49?V*j5IWpm70jG{Of)ArLE{TikeBg)g`XY`V6Y7A2z8da-7OT_V?P>0kq zOd9Vx2|H{-3-jX(5PN2TSBV(_dyP*J>R7WTtj4F~^rPSZA@UFf_-Hp2uG|lP{NKGV zXkg#K69h&I6Zg?`W{ieZ$6Z!qvmQ~ys*I(e!ifmF$YjX6N={G^9Ln}Vn|fjOw7bs+ zhH>q)C<-~@vz~U;_M3CBQY&6A_!1wo$lII&>?i(9Vo`8fieT6i6c8wNtk_=H02nn%kTQa604t9m zhznw4*8oKxDeU>!0(d9zgiYvk6ik@~!VQ{v05U;EKj;_#3x<+e-m)??GSl!D@NFMz z^upp|F!S~G)wH~{9HAgv8>_gH_aWX)IELkmwgeF}KbcktMiwkU7|hda?9VV3BfS7a zj9Y87Aq`F96Kh!iWn40&iP`x8HZnVt&I(&=F(%^}1vy$BJvLTgq5>qakCHXaOAe6g z{0My9_0EMM8!9j=CO2;0NRJ*r4jOX)?dK7|TdQqWVuF79Ijd!#t(WL^KEsXXDk|Lk z&XQCTw~XTah+M5x5d#j5*p3Q>#2Nt3kPV*Khm}q1d=u}6mTRrSnI#$l!hObc3hd5- zlgh7vz^rlU*`!+I^7Rmfr5kWev-FAQ&_m~0SDe6CW@-EO-DjK3W?O=KL92U7Z-rxb zU0g$z1sl@(=SxQ8OTllLkNa?x9cq181&@8t$`&C606x;RpxJ)7M}3A2g|{&`f_rJt zec_roP7``7gf`hXfp91ZY^Ly{FemoUbubUZup0M^lHr>`(Nc-&*vyjZ1{l3be;Kh{4Ix^QQGy zg;^lYaAL1IX1_PT!VfL8qldt~Vi?-_OOWny(JWyIVJ+vz89V11DWn5wi)Ozbp8(SR zmGM86d_YFz&;UsEhVf;r>nUSaS+aj|gd}U-hXSfq-!MJFn{(`VwXFKPgZI*wa~ySN zp#g<&>;?Mb`sJ{+dQgChldoaCAbrN2Gi18uu;EegB>@Y2B=ZCrVwQOVY?}tNwdGIO zD?amcZ})y0^E1E8EcRZP(O>oe>4-L%qwEE3#r4r(e02UCTeaURVbMZ7$Bx>jbm?EN zS4O8^?fk}bzW$CDcwUT+K5>aR$NhR1Lpk6cEsOR&DWXu?NWc8xZxOV3ssvE{7!uSxW^FKv zM7R;{)Y+RyB$#4&r`xOnfc`Xxr+f$_2lSY?G!OvLel`Hzo8$+Si6d7qJC?Zx5=%MF ztAkkK7AKc&Y+!W+zvci`##;vfVIL~<9H*A}o;)64G(!18BTgk!i9&usyM1^B&Hx%8 zuRKZj_^EQ@*eQyO09SgKK=%r>HNzz;mZk znUE+T2v$nU#}KA+g50uLzO*B_u5!w!p#sx$!>Nmq3d3jyCJ2kc7_ddssz*w@woxvL zy=0$*2eIcQ8);w|{;H9x!CoxjO&gxU3XLZuPU&RLB6kSc(haCT!J5jgj5RY#jdB=H z=P(xg2`tp`^E{SjjeJXl#h5E57Lb;e2`-BLl7>9JN-Odp$FtIO$b%*xLqGFmOvBLn z2q9BG>|>2C;f49$cYZJZ;5)C==5E@p6!^~h?@6E zpbU_R4h9osnw;9i>OISeq*hPC426-3q9Z75AN}edQQ(Ta#~8-RTmvMuW`LS;6XSnK z8+*@?S%>ga=3r`O2+O)WjIdqGU3n-`2i~XoYeVTT{`B9b7p7lLeH~Na2`aJ>$DV_} zhoohRTh9h^W8)$9T}$Mmbzxi?$^ou1=&`@{?vWh7wV$?DSJTsnPt)qkM#|$&l0S@b z+G#f@pdB8rrH!=})HVu9kc6vc{g5MgHSC#NVW}n9ckmzA{pnX*;Ghx0eHKE7olqV! zFbQmrw}UuMg0B{ zc{<8MLp*Eb(pYmgPr`5q3M3LD_N4Q3gjWS!g!P5DNyYRrk0Q52>8R_k6K+Hjy2?PY zfNWupYoHBJCbVw&P>Wi$%?8I+GtSG`=YkhBz~`H5I~)VOP07y@a1{HIep;SzYIF*o zSH{bXhmabvw{O3k-hAzq^tf=JgmdyFfAA+idEbc*p(CJyk;-)Kpg)ivlgT!K(LRUT zu@U*0mzGQxpLn@4M0!NBjd6q$R)BrnGsjp#bgl0YqqakBvmMSa-X?LV3c>ofpB*C* zYwXMlNo>oRDb<1&M{%cg%EC>2PmkjoKQ=k!fO?uH25PZ9APi{z(U*)#CQlnM z3AJ0>I9CL;u50NMj9C0vVJdhw9?W9TYLNkoA&@#_G%ic7=qwEQBy(Acw`B)g(wO3(wf8_|R6M$uR4yuxpXCa#8lmEaMHl1fC>k zj)@@Cd38+g5oy>w5Qoyq#g`smHk>tIDA0haTrvg25KRi*(f zD)?GbqK+;+M-PV0C>)(*6>jNA1yp*qXt{gMn!^GYd62RCDkfnJfM42g30!)Sex;?T zEJB-+=+B-DFA5C5jYVX3hH5M9XT$8e*q_p)Yvhj>ZxsUu$CG?zVA`>20EkbX(?Zdpi@G)eme19>!Q={&}w?IQK00q4Xo~i|_f(8=uO!`7V?T`{1kqzTH~< zw$1RC^2OJD(}(jNU61}VoLC2~(pgm$xFzj~1J9pnTh6^S5!PGp=b7<5{^!`TH}so+ zI@KDObEFFn^cei|+OH&5iU=AO8*40Qxa5tgUTulq)gDSh#z;h~ws2hJTnV z(|}KVOt{wxe9i!6eb$}^Xg9D1C+bS(u=?r2IV=uP-;;0-)~`lwzX1d@Y$X*++W;jG z%wQj2T&r`KT^BMkVg1zYZ9B3D(X&Gh9H+ z8Vhjo@(rxjS2-n;<7w}Ig;n{0+KG50I6hf>jD;fV$mo6m-tb61&CE@sG+|BWWTXn< zxf?(}GMQCiD}w_{ggY>?(7lF;<1~MP`qfzDcb;*~F5w@;#Ov=?k&7r|hG_`SyQmi` z_zwgxbLAz?jj#vX?Ad)osIZ>TE{>A`Ng)jQSr&c=z#?XwV@}xD@) z4(XvV^-&OiAkS4;!K-+?cZ)0x?Z^IP-KFh5=2b-;mInXAa7EaWJWc-Q@y(}(lt|wg zO}fdsi2BNehQ0IdzfSMI{aV`Exu1T0?=$9vya3c7dE=HqcXJDXW~k#)+A83k2Rt5g zGT0C?(rbga;K?HZewAV=)5MA+7vhi$X&~eAExca@9He%I z{RB9+wq+a3yldT0NSC=QZ9jv@4lH1do8KeBxR`$NvyVtT-UC-L*Mxh$t6x5B>^A#bi@(p%0LtDCk95-YuE39 ztMPQB-0hP4^uqPa80J?}4aGC}2Y>cIyzh2WU>l$2h5LBWCO4H*(Z+mEn_L%cjLLP1R6=>;?koINhMxQ9!TY;i%URw{|mN<_x9 znbhgQ0)h?aM`mRRj4@=!-pUvq@BZFFTDo$Tw)WGr$B$9O1hmG%q5X27z-u4vT+51} zEF>}=HfGY<5Lj8fo7JRQxkW3HXy76LWz(~YZT*nUY$zMMl9lc%}wNj9P}>4-Or^X z!ItabT$|KvA$qU(xdMDA;=n^3pcO-Gq#xHpnwA;4?tW%YgS4IjT|F6D`;Lty;?LQ}iC&a(l4}JpJg_^iVa0ji>X}u)jiRQ`}3)jV1X;4<@ z7`)DIgu>1oW-mQ08QnSeuBRTLI)x8ZQX7Pa7~Wu*qmdE4*xG&o#84F&VQ@+yR;oh?HP8=o5I#^+- z8*AmH3IHH5j4L#B(k20!T!ZN;f_@1D60mq|Z6?F$3Pv>x(#tpGF$^wvSyc5xQOE&+ zoEw##2-`zZYI5zrdhb1CRag4$Cm&&DTn&I@SZ^L#Y3`1NF1jfwk3IlxQ$~}VI?~Ld z+-Vt}aRw$>An9{xm?}%e%hv&9CxGQsLx(XEj?>?sL8{H{YeYm@VIGgT&%B5eu5n+< zPgJ-wVekCHKVehdjH8o%?Q9LtPB1r~#zdksAw}|WZVQk&q<4e4tHD>zL7FBJvzJ2h zr{+wsCl&_hm{Z!tdb$T)9HDSI7xExu&E20S$!c5)`r^GPBg$R#Y;%NQ5AbNK{`F%q zwrW~4{owcBPHzxD-mFoE^SFlBXNz&cADm-oq{R`$bqtbvl2rEOaVB({7g7N%PtT50 z4G56iQy^?;7T!i_)%FD}+O)@MZyG8*_IL+#<0q~ z#~}eIs5h?PBy9IaD2AKcYtTH!`LU8~D2YRf=vO~Uy>bW*K~{!3rpisnDf3TaxqG5Y zKIk!?hFQSq;5f!W_Dc<3k{{~?K%>lZQr*3uQ?m3cJwzSp(&9CYuY{#~S`}loI#L6p z@$}3B$;(&Mi?`-b0trjQaMyW=SBE(F_03(p7$}!#D85%Y#k7Z0Lg`zrw8}d7r?p1~ zsuCVhp>QA#O)IzQv&HeLHdM!W*T*{Kz#?KKBxUMmPFL@5rupkC5*Vc5A#xzMg5_Z= zd(0EKXJ>1dp?9W>ON+rDHt7eOkhv#Uj5VrL^>5?BZhH9JXDGvE3}7RKwNc)gxrHF< z451C30It*jRW%uztqayC=`m;?g8@*_jF}R82xb zvYfT}OSW#P;+x;Oo!))xI}u6otKWQ(N-YjyLMdpm#=Dd@-PzbfkwM;);5>u^egd7o z`r=FJCGv)bFL?Nif!VMUC1%AfNcKjcQ|MOf*&+7uvkY;R{ngS@mGJ?*E1pzgj5ZVY!1jKyE z<~f8hs9Eaj^lxMK!@9%<)DTdGfgHgcbhYZHfikd=30R;}SCnUMqd*+-{RzMxI#AT` zd^`x;TSica)Z#O(y#ZwClY>%l>M7{N8q$>~frmwpxV{5Qt-+X#pVX2q!0>tg?Huu6 zFe({$_P4(U|5~Y@&!HQfx$^XiY~BF_%!8-7Xt6%QLhF%!&Qn-O>2Fw5ojacf@Gvw< zASyUgs2a8)!}cYkmXRnx1b$Ydk@3+r7-%mb2`iXZaprtvVvKsyQ*r3S>a%Cu10liK z1*pa-x6!T@BAqA~S|~l!I!m0?>It1dXja4sV?d*VE4wbw--2hC z#gGS?vOfYKCrY}~Z7(CXuNUxAxH}{4i0@1$mYMsV;UOLgZJck!xHA^pa9y?L+IN9$ zRtKIO4BFM=ay6&V7U~@wXQK0OzzX^*DE0wqkFw4-LgoAZr2F1K}98>`O=X zT?HtVKHBtUUj?TEYXP>*#88qf)}B*Xm?!TO5SgeeptKEXOS+W7+aEzQU4~{%4CZ7o z-YXEoqQY}j6!fm`7WSjRdMk97TE8RhH3%5?lI1t4+}N%^Vf~iySX4*{8emlNS;i0_+kI)1&*GJB5I>F2vkx6w z#$z0V^FlN5D&+uc;~W07MXz$t+Dy#p^LA}t(B$i!hJ97}KsY2AIwb)A2K39*Oyi}t| zKQ-J7#Z>_R8gbL`66BF%=mjuXp=p=WiFE+cA&1R)^3*VUWPk+99OncWDrT~=a{{$6 z4qyjsaf%X9Kf-d11zx2_L(}HQDr4kai@q$Wws`=JVnvmg3~^`;QyFyNcvLo(pDKRJ z3KarNb{<;yp9$;5d^&)2RC-`XV?3k0Bi9TP-ZFxP(r{a9wa_mVSO8Splc@k@UZc0# zHp37C&3BKIAU$b*4m^pFA%K_dKn;b>xJ~oU;R8iTU({}IHh{oBjKgM%0D|l z9ktH~_RMrAw6K%(I^Gk<%tLK*tY>yWXc-&LN@5Drla-Hq2QKHIlLg z@P_3nFP#4~);;6}{W9j=GQ-2jR(~JiN!UZFL>3a{aFC~k;x&f(YbWx z#wC~>>IwX>j)Kv$3?v2=Jr|{diZS~Q4^NKjIAbUnhP!Q{2sAL3$g_`DDOf=4c@7#d z^l@nRl<-i?MH6nciD!m$D)!0iD3I%7C8a!`pE>}vP34|p!lF6`*i(2g7M4aalyyR@ zB<5~oKq8lh-Is2^a1&2QA!I~;u!r@YOyeAVJT=BCT7)i7%nswF=)q&MPI*z3LKJ0X zf_$cl=V9s=n1#I<1s*(7mp*iI=EF;OZezf84-+d7oyq$fRB;lpK)x5nR=jbFm(Y{u1muT!w^SrLz!VMc{q}dim;QvXsc(Mo zt@OsXZl?QBK1P`(^l((A%*1l$U|YpdKF+>`fsT;BszUwYhaab(|N6J-0Hs=wK^J*R z=B8As)U>k!E`&4BwWCDo6^b{#sJ1QymMa@|zk6>XlYk6r(&@6;!8+dS#M}DsD5hlXISMVxy z;7#bm{UqR2fRykknDCr~q1Kr!flNGuhigGpsA?Uq;}x)8ZZ9q?g@t`J$yLT>T%f?` z9E`az{w+v2|12z6%#8vuh=A`ET*5uAhEAK-1hdJV!y4e&gH~JslQ#uxu^8Ov25>H9 zI8M8atQVlDB_<9~;j?qW7nVZSF+zy=Tx+Ai!dC?i+vG?6>Z8Q2&Cbro!Cv7)Mo1b4 zBA~EFe)(Vr0>q;b7H-11nAa-D{%XM$Si-lW|5|e`O(~=O>iz@yFU(oZmW@O|1#bcb z!(IfJ9?dD!HkLb^D?x{J;`-|jGwcPX4P(`6C*cREmR5MEWfP?*Uy)48+}MtxL2jrB z!C`Hje>aOX<8MKzw5)OxpYoX?LIC1RYBPz3t_C0o3kozN7*`+}PNT)$y^w`RvFYfC zhi^!oG787w{JBPgH-SZjm7v(UZ@k|*?!$7|iz0EvdRW=U{tJktGsC>9bB&b=Z67=e&uKY%b(uG7*$5Xng-((^h;;XiD6Lo z%Vc_=XTRhY^cy1SKlJxuvNjE zO9$O!Ey6ucIo|dR3w@Kg>KavO+*_`lR_qqC$Gv1>jfLfF!58$*nvdX^wdhJCmU`|a zfpK_vg4t(3@a)e1L)t2)=}Gbk2p#e8!c+Ry12A{Lolw&9gb-XoLYqAiOhE_u!4KG*5WXAHDlJ;IK$irIm^}Zg`V$D0LJ5wUOhn`!s|? zHB9A}Ej%us;K{K0VcvOWgODMO46bPdBc7FlG>Cb>%59Fh(R4F{Iy zl%|=U2x}C`+lCdM!LMKfLOMB+X@R85Zr11-IS3W137%l_)<9$YxK-l3r*?!ahQ3YM z4HAPU<9r18hvk-IulEi{vgiO{vjUIxEE&TR3NtsU;7d)F9F#O2N2&K=m$|g#HD8LYCLSR68UMk5J~bkAuD@Ck^`)fKvi@({L0L zfypBtl?&uV}g&nN%AFBv_PJW_3eL$xb=xyHX0#! z{j)|-*w-SKZW+Q^`o@jd)6J_F@ht7*^=)AIRguEl4glWWG`fMcq-vGMv>vR%Ds)HW zj66blIz&Td!vHGYbAK7y&aB6?4$I_-k*qJz=1P8Jl%xGK*0_sn zIX90Y0D5OQXNN=5dIqh0hhdAX)*edoD0jn{2_G<|>=|cml#lk|AI-Gh_$YPgRqR6+ zjgm(Kc<&!1A_ah7En@HnV~9C=NxVPptuR0Cb;3j8HK*+9o>A};iP>FklKI&u=B6-Y zc%69?Vi5WXJ=}YE4@1W$A&hwvv)9x53gtoH_}zc~ekQ0;-5IM>8?(U#hoEF;Tqwb- zOell^7YVc!Q(=+jdk5!c6G_R?gGeAiPY9G@_`Ry{xmIPR2A>m_qu9tWEEu^w-NhA1 zDtNuek5oJa1D))?0Nx1lBN*{M+MbrKO%8L>{nkQLQ^leq#Eqh+(V zc?9kAFgLn&qdQ=_GHMZS3#-H^E)!#~WH2=Vrfy=bmxB737yf7e6eMbE5>S9o`$~`L zYP@v&hVbK=f__#m-a9{n=m2}(%#Utc7g|uEGT@YTB8XgQ=hSkFo&(@K_aiU_dWcED(QA3u(qQqKjCPT+tz#6zVuAQDF_p9q- z4`#4*hcX9)^s`XN7=!DrhfE8i>!6iRW$`@Tn4Q8ni07oO$d%!Gg^u(qW;EZy$_noL z^OvC=(wG%}^cI+CsQ|X`8Ux((0{15El(4v&x1i_3JuQefbj>xSlaK(41r6(vv?J2B zyEHro(m!olHrHzeD2@#R3!0EOW^16jm#u*y)X&ZH(EDH&D?L|}X)S&(wFe<-Y|uL)uk(97I4Y5c#IcW#j>mOcfBTG2 z283w&4jOQb&*w2bc`;6Tb9@oBozaueQfbS0^|`ko`;I2yCvp$@wsac&*N<(U|M3m~ z<43m6wK>+qwahq<)0fe<&z0B3ZSgmJOdiMBq&>e6xxvkXZ|`%BGI>Iq*`4P@iuqLz zFk9Ju(U0tJ_oRwlJQI7oRcHpW?&~4b8z5+mIDY2p@TipTettK7dH+F1(->7w;cd>N z`@fG6mtjuZ)WBqD$T3RZDg2cOLthw&T(>D=rzsIRIzv40nCl16kv6$TU~$?SKp1EA zqmtI90!Z8Xl6rxVQ__*jf_XdAZk5Eo8vE$IcYZJZPk;88v;}{q&)ry#kvgg4nDs{S z8y_PKh!ax#>7#R4A-T9SPl7%nCHc%P@NGv4c$05Qx2C4tvpCfbz`FQ(3J-`*2YorqW|<4KT5NN5N+1CsW!-Ixr|K@K$Tkfl9Hjp zCIvU}2=oI!1#req3#zQI4p{)KQIOal?pgDb1kUI5b3|B8Rrx#!-@~(Ucw7uwDhaiz z%yLFJh`*}5baavs+x&C-qc83e4IWl_bNsI?+=|%ul-@u4 z>kohXF$s?_Tui%Is?j2e8{5EDLFE+>IO>`iNK@AqNbI{oqu0~m)bFL&-u$cdCqMoP znYe$EzW=>HN(&1wq&k~!y+HXZ0>Urfcr|_Jy+2GZzxp}@J4}nyWX4j7;Q*IXdytdW zh;v$fxWj}XgczjBI7gJ@xiEVn-MRikdh+yfy7t<9nwTO14is#Osg*BCIKW+W!h}jJ zY`0YJ8c2WsAODy1?|%5-rV4EpD;qG~W0FjwzgrQg0U&$Ux=|nAoZ}RUwq>hR1v8rco475SxcAn#3TV6w%&A~Rf}k&K z(j&qrU}2zq2++7a+SM)BB9=)D)CAhH0&Z*y=1iuc+Deqxyc@Vuv z{gOWziOsOR z!km=x$LLdOYvKfH9n+w%%o>Er1I`!n6i%6kt9k6LrZfklsmPGf6H%`_lZo@Csleme z@_B-$5X1UMYwW-}nG#_RqTix4MTn9iq%)}1Bj7{=kUeccXgn_@lgJjyioEb?aE44x zvZodbUk4d^DwYcbV}Xlh=0JKM!M;VzIdb}Lk_=3ZAEefkaYfc3J;pgEYO`NmLVdWD z=9CgAy3CD>j0Om%yOj0`1asGyTGU?RA<8O!mr7F6fy2$D$Qsk-x?1QdO*__g#8dX1Ia%?d^5e4Hj$W_Fww&Hc+ z%j4PS?k>&8qAea1=*!zX8G8$fZrxZj-=#M1SHz2erl&Xp(89eewt?XGMYLn7Ay1WZ z?U)dL)Ecs~RIX1MGeoa_rwuhQ`x8Zl0|7!A++ys9-)Ta&P5s0oLr*L6y>1#&bY|jJ z3Npu_oFkZrnvY2VUT1tyw`)irnP%1%cLROXEF$7pdPepf{lWVf>p08_dLk0XWg8L8 z#`PZW+@K)3+JTIwuv>^27Wbs%idZGqNBUMfw(T>XC(5z^G#@<0C2@ag4wBJ^IbDsy z6=VI~2dAQC|H&^Ho7_Z=L7w7K#n$C?EbcX3LY~;AUtWHp$+wB->ZG{O{Lirt<)9O^ z`p7nAGEQh%O;Qa`y2(r}AOhOa53Yvc__YvZND2BvaCMTQd8Yd^`b2C1=sx%#ZAp_= zgu_mIhkH)br7M@&L)7jC3G5S$ZjidydvJ!o zdqiFm<~ju45WeLBzMo`|i155ee&|V8KG(6Ut#vUrrrWpBtrO30{+A(d=0}=$t$1EA zi&zD<0_H~k$A7UN*T(QvV2aSENbf? z;II?=Sf(aV1O8U7mOz@_>6O=Rk)>HkE7SsnGC~75!yPR8vy@Jo(k9jzbhNNY5V~$# zPNbwD{wSv$8FZ_exRYxamjF^!ny{uQ_y)7TaUuBK*$+-_AGI8tH3Gz;mt0PpIJ688 z@yOZ+8N6zC3kw(1pTGB3xHsVCfo~bWeH+Z@&BSMkKFU2F=6=HA6LVPs8fR zHPM6HsFx#N1vDO7aR%>jO*$z_6sBzE#F8*$91riwU!8;SalV(<)M^)qf5Co}ij4Ql zOY`X(2{@0|$u>Ogp@2K(UJ6_3`r^fOndd%ywgD(=rAMEw2Jh%2_QFaWIbvHN`Pdhu zSSrr2I(^%7@gm5>V7y+J_F%*9dM z9P?@8*eH_vD z8GCesqVs!{W_kG4I+h1w3E>yrm{tieP6Hhy>H0LM36cf8(%8XN4EILnn6My!RDSpX z`A8>V_ZXA^4q3VTBp^;O?(wN%G>XB9<&#v8iStp04lqx-9JI*4l@$*1(UjdHnSp%? zr%IDcm(r`ZzL9P&y@=U-GA%BTrwjA*X`J!R_ROWxj>R;Zu(kxPLt|+_6_)4Io3C6? zYrCuI>fDudd1Nt>wvjf^HqzrsXy6#$j*R%>pZy7QrOSAqqoz0EjcA)_DlCG+B)!Gy zbHDiM-@}&(Q-FRPqb0RWoKOqygI32;NZ10Y1cv$3fB%#8Z@&A3)SEj=zjNhZA-{+l zD*aQMUo@FAm$sfF)uHMBCW3%x8N_eNsxn6j&kTJt9nUnDJk^I z;;l4zeSlJOyI5i=b%+}yA%{{o7E#I-m8iLSd_0&o$tL!|rNyb9bZ1%Zb(no!O~1MK z2>^W@Kun@U6?p>RJt~~00}d7#TOc{|B0!w|_T=+&T6(pQz06+3M>RS_QW|?`dmXF_ zE<8KJeNTxmW#`QoZm?ak#=-;2BuDJPGc3aI8rNCF5fm#=c{B!R?Nh4ZJSnFOujbRj zO-o)W@995>I!sfZNlV@KDz)gbFerDbWeKsJq4+ z#e10`!9#pecF*ih1BSiFdY77grluxB`OvIW#RPBy5P7_fV!?Ndvj}wJh(b8=`;I4Oojw;tsweZUDrZvd-YuW(R+(n4sW0zms6 zE(Yg^&Eg3*7S7kypJx*WfZ!gmB?4T5oSEkAn_wXN#NVz)2yAvde?6qu zSto8spl-2c7wU|8(F88Qx5lY8)M{h_Nxjqa3lz9T3udzl$_)OWqy)*cCo9a6W?dB; z%wQZcFb3llBpH9f644$+#gB&v2mt+DMMP8l$QbcN5NjJ|VrH@mi{@m3Sr*(aP{pK) zX?g@Ot8ZJz)5kRm_EYB5`lSIt>3aZlh(}pqbk>ZqQ~4;bn04 zB=qLIxiJJd3O0dYH^9mDG2q)IgbWxk@w_Z23+k)T;`B5% zF1(H;i!$*uZiy4-KoS?NGQ+}_!QMu`=Hex+fbb%2?CY8rD3n@P~E zWk3b&00pK>`kr*Ab;FnRm$^`=vso##Ueq!~P+F3v4rtF4l&Wc7*Nx!S0~)NSXh5xW zCZN_WCT&zOTlec?V-K5o?0yu;JGSf^_q_Xs{ilnGaj4KcSqfG`kqV)}S>`36lW}QO z;3n2iXEW6R9?Y*8G!6mkWiqmdFyW5^)Mdn$&k|fox6V@^ z7CIF$L5cugrM`^o$+~@ZzQ)?9h)8>O+L*JfgcdFv=2mx;erYr51j&(XA|K(O)~*id z!&-r^DO)hcWSKANA|Am1eMzhGC|$IBRLj|TYLd;C*6^gG{t{XhSRdCl65 zZ}}dA_0It1&p+d>;NNv~f5(ftv0Yk@en{shd}gvn%yX=ndx+1t&mY%6b7RPKZ6|BX zp*jP~5U(>~K#-WN8?g}}7PExw?FzJyfNf9<5kYUc5f6s>#c%#T`2OPJ0s_{!6%8C7 zoB|hPE~PQY?WtzH0FV~3B_|w|b9ex+u46%kj$NbfewAh91!YspyZKbWs@fzbA)SyZ zNJd~Mb+tPImMZ%ig9JtFboe%)9cyB^z?tV~g_X2b+9dvkdZRIqgCP%%_p|Isth2EW zDEgjI)c}Abcw+xx7s+A`K&|md%3}CvH*4Q8=>)n!?zmqa%PGR2B}QS))~2)x@29-W zIOSowE^&ZOI!G%!53mLoQ-7LFbE9)uCUJ{nJ^h%|)pSY5AtD%O-0U;ULn!LRCqm;z zsz$gDySsbo|NHb;DL;vu6gm$>A>$)-hQM*kvVbqlT9#MOP0XYg0r^ASI{O@R6l za%GU^j_9JP;X**{1uwZVKbtOh52vjoVo<2{C=aaclD&-F>b;a1 zg`y}txK0^pavHi1O3_hTrK-tZfvOk)TIKf%K=G8!&<4g16Yn(KRd9)X@#tX!8KvI2 z(e%O_H`BMj_uVv0*}xHk`89H61^`Z1++X7-rLB&y(l91;lLI4W7kOVn5!@lryV^v) z^Q>xpE$v|JvviSk+-&21sd1cc4e;LcB7k*J6GtA`VT4O_uTlDu>J7as zX^t%VT4jqkHr&k2-SSlq6GEZ+?XQbzYCu`qPJjQme;0i3Y?1gn;)*JSckDvTeFUd1 zzz=~m&?m}v$7b+c4==E6k@DN}gw-e1^Ib2bTw#JFh-Xo*i=CBr)^HCWptz9ek>jMh zQ;g5bt`ibOHonBAx{Qkn;BKtn3G-s*nH4^NWqJWx@?e|&w2B)LwVY>R=!CzoQPh8i zGI@<-;-IA?=-5NBdN4LGEidCD8&02o@-g3Iy`Nym3@)TczkEUrS1H~6))a;Uo=KQV zdkBTEluPqhCL>;_hJtZK%ykjB*ckC+R@Z3ZIeGSM1%tv2GWIC_;{M;@-62t9oO-VC z9Z#?{K5BkqGBv?AU)>$;lF9FuPvvXANM8EF=V|rLrH(jT)aB#1;H@o4eEr#7d%w zD8Ved*%$_cmWhct7yb&Xb0+WDRAySZx(4klt{knAf|J$N_2-sS$t=6aISH`IObc8D z6}}Yg&L2}0!d%5kS!;itL+8T{9)O?sVbw$E#CJSjK;m@Et-IOq0(_sNRoiy^06;cn z{LC2ysrWP7Q;&fG-`4U}AbPz($uSL5du?W6feqbCD-XY7Tm9uXA!b3I^P%;?U~bn&ATN+{j1uwy)~$%GKwx{nc4RlQ^XiFkT2W>A zTE`8%6?kNTPhb~ajQRC?1F{4B(XJm34(dTt!6LYk*bd9%$aGBSS9tq8X(zILm@vVD zpj(fXi4Q6T0n}(Otm9lCiV}0>m}5<2TnKUI+4vQ$zcDAQyW_V#Eoy!*NofTi!*bAO zGe_A5inbZwqut1CQUSarGz8ja7Q5!`)bO;)#AJXfSjPF7F?)!&#yK?xLuJY~D|i^P zz2fMlE#{T}Sel8R(-*ui0BXN2@%$>@hH*Sh_TP4BZ{dHfBT=8n0;udr-vRsq!3-?g z)T(V85y;McKF9tGO3gSn2wH2IO0mF7nu(YWHjoD|SkqI05c`MwOn3=z0Dl|MsqsMY zCyi=-7K{Y#@+^U{?YS<#!gIkIGR$bt=h;8kUggi4sd{3&V9yW44b>2kzJ zcH>sx=CmbG?Xmu67nW&3o_s3fWoBSPLj;g(?8tQwrpf-P)JI%RZh*Z9xVOI+>SxP9 z*U%|e=Ezb8rRKGi!%HIY7%Xcfv+HEcisfPCF*#_0S}AEC9=1v?$1RRbeL|*R6VUA5 z33>e*a z%E^mcYx}9RrxHuW0|KP`hvBs>k1;aVGt_8csDh`5X*BUVHY>HOQFO6i3}G2<)>t*< zT$h$q)^?P!194Dx@O0By7w5a`AH0?R@W<~_F6RoS{c0)z0@pTH zQ0TVO*6yRUdGvXDviW8D#Rs3JPapDg_bwqPoA4_v;dqE7n&>9dv3(l77+G95HmQmI{}6xLe$=z}lP>a)9?X!m7$_~U0N={89*!(>;leo;tkUz7;mKS+;0eZZXJH9&!DW1KneC_Iz(UH_B}UqXT^BrO!lVBXp)6SJn#h!a^+ zLK@KgDGGo_uk*?V8ZH;6FNL96en2Ow`#Fqf2qhhQq{2?{-7dn3YA8A?%>+h71cb5D zDh8eG1^K@U+w$CE)N=ml!w*ssW6a^7_#glK^mqU8w*HjYz|g!}$24%;5)*AFZdwOGI--ltM~3m%kBxDehB?6u$2X zfH;q&ft%Ek^e-pA%HzF#Ex-C$a+f1PXw z-5QYz!Di4@?_j;jM&MUL?6TU74+nc?QUI%Lg2kcE^o>{N0}wrYxR!qU)Bl>DK3Ty+ zKvv5Rr(!`+)^a)q$mnGa3n08O%eK&6iB`?TDl~NSJIN@~fPBC&{9J%xrYqxsX%Re2 zc2ggkCwdtZh%1&X99=|5GG4Sj*VrsU?-}6QVd`2b7_=1ny%Qwp@FWmD3p9#uAYpJl z0(itzV8|euz~b=q;UipB#Kj@7nN@WNUd$H)nhpf9X9CsFd4Ddvp8pmwoeN(7j&eGD z7l1?nz>j@%UYTr~gt5fjJLg$!npQ4u3@y~~Cd0YtaWEPIx$Qf#hBL_2tW{+KfF6lK zf}pJdTG$}i^XP!UEW28A-4W=ZmE-o&{SQ}VvBVeBW66?;g1khnV zRV9*69%xhx*C92dY_<1Yb5{B)-2YJA^_hckurjQf`E6!lLXI} zKGC`lr3Ij{R6`wRX!$G;e{j9b(vJ`d<}6?#Yhz!0C%92bwced{9(2w7^CNZz;LeBF zcz8w=P%m)T@)rs=Yj3c&dqu@TKpB7?S`|<@cfAT=_ciaO)$jl@FVb5tbEuWr&tk=x zZ}0QRKE?|g3UJPKCLtJqs!_yDx5&O@j6Pe>gZm-?F3m~r5qAoS+h;S&Bc7AK4G^a2 zT4_+Q)Rl}|PnWz>+KRGT{5D`%MlEfJQpq#yvvgtGDv_3(koQQpdec13%45efK%LRH z^RBWdec5;EJq$TK-#vInzkLp$(pRqGd+Cz@K~Ma^2Qwg_0rXG|{EC0syb0tr6lBkf z`~5pZ<OYfKyKXWZIV-+{>@^ck;tzUxAI$63EtZxecJb<^41Og8|dA7-kO$XcT6YEUU zE_2>3?^ph?PXt05C+tO=uvT5{eVrr`U_=|jIF%>Ek7KWR9x_teVttx;t$8%-ka>_g zkTrR{j*g}!Dj-bO?8GxXuqxy#0v>rg0AO~XTRU?P>wcR91>|k$7ctN7C23~_nimZ1 z#|dbMSVIj%Bo4c#k^?B8z>3X#t9*E};foWmq`B$Y^wkRcpOdRBo72ZhOqN@buGn$` z=z7Uily{V<-+5Rz35ziUgtq{0DlrbR)0t(Ynv zP5PrEbs|>5wJ=K9kQF9QJK%|60u4bYfS)sC?CGDOu^~@w4}6nWih>QZHch4O8Vr(`{@@y`=|7)pM98q^^1?w z-Os+Fqo-+dfw7!ns4HQJdmwZLAMXDQi#)Ms&|b<}J_>xr zX^!_lMaH8K&Ccs{JVq&Nr=1m#xg~=dfjk3VZHg)4hjI{XZ`;4FaIjNaAO&-&Jb%s8-R1=gki)bJOEFh-i@r{E@*q0 zwK2+|vBxtw*{oc^!p_>uE(EqRf?~ax#xe9li5zvjmVW!uz4XNwE1U?1Qo%kUX+7j~ zj!-QiPk3%L7FE`vS?0#tn>iFSE5>x9sC4(i+sH$xQ|jpiPmnco#V~UV3+ea&f2Q6n z#@6df%-dC`=6Rl{TX!BfU5cb6ilW#Wx}_duyW46bZUpXjoCHXMyd+3o4D%(1gCH=F z0FC%5fzTbtXrmp;Rx4`Dt*IrFBB@LA&U4+G=Q>rV<}u&5b{)F^%i@{-e-CS~z2>#{ z*T0_5ogGOtvlEP61)M`C2>VtE-a=rP){3io3k<*6#Pe6NO%fLP7V--F^qBoe#ST`G+QhZZ>FaNPD@{xgLdE)( z43UM$HsC_D7O96+l=fGVrE@4UQ;ATes4}2Yk$=h}1!Rs|Lg5k@UVj4_u8N4z?{1>@(l!iS8?EFYh>)?mbr3K$55xdIY1J=&lF z?RgSIErL}?#1LM%crHD7@DPno^G#+hGC+y3SA_J?pVm)iO55T$2*W+R^#e>8=4}oF zr6!!O+Mh~|f`w%bz~WPwD_AfP{$$8Ls^I7%8iujXsV#dTn9NSa?Ep<$Mwta1g$tPx z^I|e)>x#gA1A}Tq#N5q=5Y~a#`|!aX){Bkh_`DcjFm14B8;fs&I%;v`A3_i#Vs2QK z$x<97QN{Mx)(HWE5SnGB(4oM-!F-HQ_Xt=Ya3BgCEFB8{dF42{lXw4Bmu8G;W z*4)$5tyR=gJch4`J4e9AQ$RLgjF#H5KR_DD(K22O zSgSC=zII(Er=H<0>ZNZj*7U@H6p-}vC?Kfd8BS%5w+55|_m>v!u(qDU?SRlf$F9Xv z;b`lAfH%_Jfb%gY}@5|VXudYfnbHg^97!qKm+a3Da#lufThV+wr@^~ z%1osO0M;;+MsPw4R)kFQ?G8ep0`)AX01a@qyk4Gm=v2PV^5_{*C~%JcbyEAY zNEnf@t6>a=`MJ;BUn+lxWP3)>z|VCR=fb2iAV#y0xpCIXk0M!)X)L(?igYe zdz^XG1Y@)v=u2GE9T+I{5HxPs{9bCqJ>~D*Y}}y29vQFTlP$R3VLVZJ0H%BaT>?^C zaM@!a=+g*Y$8TEerH}4G;pzI@M(&7T7>hki&%ENd^3Ob3!TuF~qGSwB`pfeQU>ZSy zLiFjguDx|5kB!~tnVh@RM9sd{h(lf{+@n}p1Ww=%Yte2^$Py%& zHi02y<>Yrx;36CWL8^#Fx_$+Z9&@Q^KuK$N_C9ud-)YrUH)EZSQ~X>LskYGaRt5APC}7dZT|N-Fvo*N4-8R5c^(Phf;%(Q3;;NTcpF26MN}$Q%8jL ztwI}8170-*s#U_M);-;TMbw)vEcEhfVRy4F%yZh;cndJb;t4I%IOR6cpt!YqlKLrn zU!`>u1K0dECc2Rz1;3h%gh`F!kw_0{dvHGub`hdAwd${B&H((w&KLG}@h)c*YE zzH!Y}s5Z#=+dtVrjImAVHQ~Azsdvvh|7^s`c+}NP&RH#d2g`6B0_YCBTIIkpO139% zY5;0Ld9kv^L0Ew?Oyb#Sp@@E0Ck6Zw7?%mb*+wa8ZR=vZgm&l|B?i57kW;sepU1Mk ziLqs`fMJG^$Tz?E`E=>xd1&)?dher;DbKT>e(>a%>DGAt%MyVG)_Zsdn3?nm5 zrA|(`8tcEpVGD#p6>n0GXDP}YqE@75)A!Qk_Ef?^4c!2V5cW4n0^OyydXXBN6-P~} zp`SGX?-0L%8}bu*1V_iveRx!3fm-2RB(*|kcG)vlr|1~%NL77!vQb1E2y0XLudZXA z38yicR|e8MdOn>Q>tnA#S1tJ@9@llHwn}hm^H#cf@89560jAp+mRgmAN~wFGlX|Q> zY4#B`4V>6Rslv3uo+Bn5_&1?Iiw6rN$eLtZiNNhPK8BWT5q`G0nsWf_u$)T|_x6-I zP~ffN0?*fjV>u?rc?=#D$TMjpnxqXOcVTuBK2=D=XGhb^uX1YJ(oDMh=p&p2RU8~Z zIV)tJWfHKP30Z4n?@PP)s1tnDxF0o{H`h7Qjde)dDsy?zQy#YcHj5fBRpg@BZ5#aQIPM$T;C;rz}?`hMnq@wsiO1`P9)@N9m?s`h=FW zAXkw1tLM&>2WyT>!1sK*@vB?FiUTz;mQQjx!Qgpw2=1gC5Bk%${@(AWZ~vd)jss7Q zn>$k%ve-L6`6)F@IdRkkZt{0(kEw||fH4#}>oJfYnvh%|VxX$Qe3dTn^C5}P5UC0@ z=Pl|{J$>>R;kqqdy>?Bi6bmE+3*m^3A68%Pm+@uG5E~Jp8M+VP*=+x``1e!Fu>fv9 zOM7f_7I|1x5ITB#dLdc}pICVm>P^Ix#B239G#rIX>!!?4p-2Xhi79SLw3ZquhFX<` zN~@(7Vvn!wWisFRt#733uY86UP+G7sbP)4bk1(@Jg2o0et*~gzc-Z7#OJvU%iOXsu zF41G_2#n(%87(YDu7aSZ&?&3(3x(EcGR)5of?QaA&Yl^I@C=hy0v;f6xOqSj1xOj+ zAtoF4Nz{CkU};H`XWe+hKp2O>3?&Vr2ZjM@%K{TH)S9Qe zNJ68K>GQ$^tPVoWP@)e7D=#f;p+vBTTErG-rb1xthHyPYi`BC2jE$`fp@Bi#zSc+Q zf2>s&x3Sjf0%jE9uPZX#$@EOeIsH|_6{I0Gr_EJf5=|dm|k1V%T(sEA!hM0&uGCma#FXN{DO`%d8 zE(5LE)zIQKL zqRIBxi1#-HKmxDQuuk0hE<`-$%DoC#C4^Z&OJt36R_Kb*FxFD*@4&zSN#H2wgas`_ z==Q-0kLyHLASTtd5q5#7yuy@b7vh*}!?1ct7W6<56{`%4ENmT7qs|a9m1x&U`r@C7 zzT$&(%%L$Xw_XevfGdPV?&lrK59`AGPdJgud>r0{((PJWvscfWFgLd(EZwZ1v_fOT zI&fa)$VGEnR5F_J3W)tGn=%M<4>p-^Emj<@A&yN?0(?H!(tMT>qE+Tt+wef4*!Z2G z(DGQp983l`o>bUcI!1vwyoo%oaPA3%Od;r$0{i5|GX(uk3?!b^Mz|A-lZyN*0mzo@ zqNUTlppefP+0%wu8B*zdg^$)~>4a-y629<=Q2io#164!})e6bd#pm0<;L z>m5T=3K|raa)n|(y1#RsSKqq}1UqXG>u|b9cwY`{Ys+goNH90Ody3JjG1KKy}`diGk|FmO~Mq#xmp=47O{ zuBNm?ysss1&?dogj-n+a+C;+UrS9~%Kl^&>Gm-&A!oBz3O&{KPgj`N}H`@LGY>(b}WeKlPq5qTb7@R#rZ7`})=N|L)dX0?XpOi!i$b9JeK znz#jMU4hf`h{sC9XTTp6A~Ad)#YVpN@y!p@?R(#+*!>J1lzIB>;(H9NP=>8zrZzxHb{0(*`%Ch2^U1YV7*!A!svd1?*9?Fk`x9*3+} zZrIQswz{a1iWhE|+Nu$PDj|S39gxSiLb3hM{*H9@^_MUzqcAS7rw>2=dB`JOoHEyY ziBfaqL3om#`EEt_Vnu&Sk8Tq_MV`#)mCkhb`T)i!JZ_A0p0KZDLIjyMTM~~U_$ZC9 zf%d-f<<~Iy9H&42KmJ+z_@|SMKZobGa6o}Ocyo0=k1SPMP zyaDisJ=jk8)iw&`J0JZdXx8vWJQjE7!J_>NhToPc zIxmws4I3~aO-94hu>cZT7uqYVJ-*?i48~`DIEXuMutSsRLkHMqbgBP>(+w8Lz!wrS z5Z5RKLE86G-Qc~MDk2K(w z&^=p9H$J)pfyPRXFe!mkK;-}2rvMMp_8BfWvh#JLc$Jypr|{!9g#z8*!d7iOLLwNe z&wYCF);1Ij6hQQFD1ZiME5mg_gEu_ zFI?#|Nd}(Bt5p`IDAqwZ$)v1O(-`wzqpDCShYD;grbuO#ALGnJnSzM5o9PepT_AKy zc)A9k3T=!>ED^rK-z4RXJDeX*oVkMT4nm%^eJM z_d*v|8x<$_o6xhqslvUrPd6b>hJDzM{nn8)qMO=%GHLh8c&e85j8 zYKz9VM@%j^$%M41n}{ubI|r2{l{)hsTu=9{1+cB3>lh`m;xNJPOL(q>vT#Qlyn0B#5KJ$j$4LnjZksHdp#_` z69H}8Dk57DRWSaZ$mxX+kQ>n~dQOkvjj>FHJ` zS&B2Q{gYtXd|Hq5Tx*{=WS`;z7mw`He@ytPoB6x~#(q8AG zg6Vx44C1-dXUc7d@rLG9kjk)(`|QzY!2!DqfS*1ThJe%G9hbksS{-5B;s0}wKFh-n zgjd(0tca*2_?!2}_x=WL5J#LSoG5TyAwis$=Fm+DNfCm_9U2Rkp@#}uu6fW5s)U(m zBrVFGMbX*)A`T$f6DFZhtyjw8_$n;hgoWs3*)iT796V7GqBDi$eJ#`owzZtYt})0rkwSbll`cI*)=C0PWb zf{+-;aEL|R#NislF%YyH2$$F-3`0T0BYW%Eo7M2Ox%GvBwTYxwv~o|Fzpy?FUnHcU zu>%WfC-l6Na28G#t7tIyhCh-M6)N|rgmi?5(rQg~kL;n$uC+fQNL9P^T`&4DBK0R3k|VV@z?Kyo|QP zG?>IrB+8D3s;REWb@o+=$7`BJ@VhvRvb7k2SJ?jgkPqI|C5$=P3n&oTdc z!f@IKpb5=X|EupO4+N`mC#Q1p_n9l7Lm{Iu|I$jjH}NFlY(&ffmW1`Jlc4?Z8^WRvslK>8yGG6fc};{!UH;6upc&$Zkp<#vHbj9gC`Tup z7-nvWBMbbg{9kJ9XfNASx&h{!qDFSXoHD z18wQK&kux$u!xo1dhADACNd*exz^Cyg{h6SLiI0mSi4ao>hT8e64JCyu7M$Y@(NFQ zwRCCkB~CpXs!NZbO{6&vdf4G02YI0MBWNo0{RoS&iM_SZcu%CQu7{@_R;Sfx`y8=6 zODI!CY9chJ0dItSvOs=?Az00%OZ)s=Pr%G>&O9_e2`we^g7COJ*V<5L8GeE zAcsKpv^S)=nVB@fSO^tKqy3y@i9vjGYa=2Itf#-n{wgAmuVLsop{i4%yLaquhDfJb zJ1}%&X>K9PM|vtqwoc}x1ugje$K+&TZa!wIY~y=?A|o<6vZsXv9e3s_}mQam+Uxa0VfXi4#Kl$Vn zJOaJA--%zQbfmgO3k8Swi1pjYy5jgyV);J;70=6bWCUgA6mhdogYRX;S`WC{fr2O? z0Vl`c4GiVu-n}Pmbath4r@bKWj9FpDImKA{KX2pG+N_L*8euab_-GA9 zxJTI3%qmW(AUJlf5d7It#@I(+e5dl(O+2~+xP-TCy)!=`1bwVJcBCL7&Pd>mYxKRs zRkqSwEePi)Oh9jdzloRm5{Fm=2g{`(s<5PGTO>CAFD$3@ulFNc9y5d1(yJ9V4!+=d zVXg2i-r$_FX9296K(3cM)`JyAUg7_^{#SXkOQ+m>q^-X!qB zK8*R+davgrnqW?bj_3tZm{GZr$z@T4#}QZ~K_2GRi`BjxFP!t%3c7Y$fytw!WsWM7 z3OpfD(}otHZ4@w*pWNTBPmGE6_Vk``1m zJwExJ^YT-^oMY?(o{8_d*IiZ#oHGsLd)wn*e$39J<$z_t#oxI4f)5zO`oioN!Sob1ZZSEKtUPv+=WrgBA&8d+)vX zql(EU)^r6w+o*5G`btQMo<+KWuDCB1ES*TkBNO^mKHI3SR;b@YcqJK>c4KC9Zg*=sk-XW~40Pg@{_0X7g@Zfje z_)>adq$NF?{~-PF-ur}{88%7L1n|)@n`5`KVzn1Mgk7Z1f-n7q0qOmE_z1y{l7y2S zhx+W{Gm2&qYmLXj5`z1bDy=bwn)?l}J)|_Y$Fo+UBpABdLikG)UWFD4ckGZIS)$m7 z9t-LE7Sf|9ai;1|-st_uDdkvz<8Zz{O+XO;L+r|Gu;^>dz5!2K2DJSU$Kg4aChQU>bouq}^mo4XMmjs%P9DKRdhf4)mEQTqlQcKI zkI{0CJ==%k!wF;LV}0|>UrE=`UP^-_1F5)!@VT_ce#hcZUoXCKjpJ=G$e^>{y;({# zb5`}kgFpyL6F7Q6JefrZ){DgIlYF|jL}jXZ=o^aYu1YC%x0_S9&Rl5-UOx4Bivu9= zm{NV}ka0eGb}!w3az8zsnL-X3OP_oBD;(GWpD4|zZG`MiavfT+E^EQBC)Dkbe1lES zE~sVdaHAz7Mwn(p1X2Y-1=Uh__B>H@c0@Q@S6m5vN?Q`YDJo*YC(1-gAo zqP9^KrkWg<{gc+zeP%2TT%sH+AwPR~OWY%#qIpDUk4dg=R3E%_uMUUL*9dxx1A=4oAWR+tt-c((p=J=0w1`>6z3q)RD%YJCmCF z+X?e7MyO^5%IGGi)jhaDsaO=9YDx$94ssR%!n?0 zRE6hZ=D~V;dUu69SHeUG3A+W)Dhnv0c(2XVZ6xfrvAHXakGzUU9&g?3J(8&Bh*0Tb zui#mD_x*GhFUrZuy)n9h%%NpHUTYHGw#G&eI#w18eGLc!c4c#8_X z{iAPdS*?W$T3Ye|1hFh*2y-Bl!lle8D&HUgDWv4cO_?k-2;#~up*=HNZ5!Sf0>h7`17p*QKX z7s1fdk_xCYPX(w5{m~M|IAYk0MaEfn@xbP6$deNCjU*L$tPrmj-Z%A6hrp zaZ@t87`J&3&P7-#SccMbD#!_UEzr6z{oZOe0kdHAtRakBrBH8&mjab|DuZX$f;lK) zDNrkb>iGyEfqNs?TjfMX!E+%9o{mFdOSn4K_IA%oFj1KhHO3OcDXZk182G1f_8#FH zRz{x7V4@B&9yHfMCe=c6SRTJh0~?GzV}69=b{^uBwftSAPlc%g?!_?|Y$TJw{`!7AoG;*Ri;*2J6` zON@yR-pa0}5AoBndWUq|U;Lf#qd~rRE-1$3j@-A>vLg9ROA8BWa{3t_1SP~8E5hoN7Fv-)lJQV{^#Mm?bpkYUx=7nV%rSE|652+j^UOqS0o@RFL zqz@Jzruh|4%p)Yn<4`^2Qg7y^ix<-8U;9k@(#xL#KRMwKL9u`W?H*aBipt^?N5P`( z%n?gJgC%sAu!u!A-cjTnb9l_+_+IE{9L^|`Iv->_=J3sJK|@i!^c z(xJP=OgF>t&%ZE8KL|q$>^1idR>~@jD4xP*jJaW^m6kLGt|uh?u2a%cg{2$Z z7S<|o;*I0zp+aEr3)U?{f%&Zt56ubV*yTGDj|;}$vKIC10|oN~gfo#zrOndZ&4l7u zfo5d|p@@BD^g$0+QkA?-LN)h*jhJXZdN{+ zX@z^k;DyzBZwW&kr@`%RLkmy}+=6X>KSY5#%(-Op&%twq^k9*^++M=u&DVAfdP`iP1~2*xwOn)0b#{e+oa7@_|DnN;0O z7!&YcnJ@85QZC9@9Uh|I{!6KswK%Rq7Tqm!%s1z*un(437pZ0hZbL#fj#;3hv3rQ* z{CZ>nXk-|;IJ;%`St?=t+(b#*;-u6KLVx;4I-*SLQH50$>+w3Q5~4K)t{a&Ev4KR@ z5cbpxzG{>-cc+>2J(STJP|SMzP-KujHc=K-YFdVBP-3Yt_5?+2yO2i5fDNI*m#EfBQ>BXxTLk2&j9dllka|+#52a>7FFm%&rcQhLfA;Jx^#>UyC?2%PhSfL<< zy3T@OAf(DH*=`V8h$6(sjXKVz5D+tHDc}44kEq8yAB@cB6>t?QB=uR?r42p>>6hs+ z4w=3dK`FOHTY*P{uX|qNDQ5_w-M3nRuv#2I96tK!Bande%+UrF7-b}_Y@~4OqbVsM zs#rv1HnW-b(|hlJi15Tg5Ai#<&<-D>tw;{0 z$zWD7de%q5mG%{keI{4%@ws>`<}Gv4+U$If6_y>O%%3b;oLf)-E59*mUlWSH^d|A29|$&@_r_;TB z?y>PyEDULKv&5P9^xMo)#w24z4@ffLOyqW358SrajAe1~6GrW4)b|^`cE=6i&`83%jsf?sq=^>Ctd) zw2VtY-4uGn>@S!;ui}qYT#VD#Qn^M*6_6E5+~=*{tM?;&K0Lp`DlaND;#0(L!)O#7 zWNhJCV2vG*hoN~qtO|ti)VgTP;*x!8kuu!i_RX7-G%a&dIah(PR<-w)*q@HqdCBCg zMV)gYNKPcg$P%%RQJ7o$^6s6xQS!Ed`|MY@w92^RUL<$pjS(KMSrdYVuoNziUmTPW znNJ|z*``1UMULNHTl?}^m55jrUKGqghrAY!g>lNT3%KxYL$4U$1}-W5+Lwx9USAal zExRaE`~!5M6=ABmvZ1m{{V98^QmfJLB&9Czu;#U4=pNy3@ zy=+gMHPp|!8A}=Q`P}2)b`0(}*IHP*AG~6}(1c1+c*XcGyngg&Wf}Le+s*q74HD0z z)GS=GO=>o!mg|c@-EiUw#$vj@3zNf z8__5CY5DT<%m4@HZ}x4wuAR8x9U4JCZQt?OA9Ok{o>AHJl1>`hB@c;y_&$e}5)!vE z$6heJ?Kpi*T>a!E73>xlfVoGsBh*p9U7Rc#Zb#eVlWQD96F|Pq3hzup56%N)6#|Eju0RPpW=d{A$kw-I8mJ7?7#(*gvnPyp{c_& zZ~*d#LPI5@O+pOnNUj_?+a5~k{M-ftG>VQ2F1Xsx0aJRi6JZ^eNUTKhF{fj1ll(yx z0EO~x_>pU^H>8~J(MGi)H;Ls`rNQnm6@*&K8hY+Y0ijccqLwoqtj4uXPQos8Gxp!= zhLWFlq64h{76+)Ymo2+Jfc4*XGV~~_!C)lR1MNQdh;&1L_XwTX##^vSxty`f&!xZn z&9C7-m`hU=v+4Rv*V4H&c-J?dq)iUuVghOQ*;Kmm-ote3=42wxim)p}HW|wSRf@!g z5(mT$4UNJ-Fsg!{k5IgJ$P`~WqEZ!T(1>-rt{ymF9H&NnAN;fjnDqoccuv7a=@FH8 zmKiU621Df08eVyXe_Sl=1wtq>lntHjPh;a$h$CLTvSt_MgalULKic_bYN3qnDtQol z2aE7fZ~%6xm0j7cg8Y<*MtBcn+2Xu}N?=`DvUtQ%8oSmBZg6NMp5LL~i$r?#V@<9} zfA#NvkZ%0)Rx07O*ry#2L2E^6t?f|Brh?P~yyY0abA%ELUtQ+VIY?JJB3HDQkg19a zcnpp5!W%#1pcqYZacF;NNL1qWM5s&8mR<|4$#T*NJS=`t0Qj#%phj2AC| z=Cw3}XLoXDBHfh7E^p#F$sRXDHLY4?fr|QOWFn$VnyR}v3HVGJ8yP0|WQlb>NwXX> zwv2*t_4?<)bNC*K;8lbNrAicj=Ce)=R2|B62O)M#>vP}-2DBQAhaf*5pxDco5AaqU zvG?VBkerYY?mc`&*x5?D{=(Jt+AGhY6cP@`+86o<$KIAWDgbzbqn2R>Jgo}4F%9u3 zQa)4-S>ouS2r^FDwaia~r5o4yX06+e#5;~q<6nX9-~WeSrl0)e=V9G5E?EIfwj=$F zkPFE##705W%@4VcSFK1P4zR#pvWO7c*w6|i@(JU7{a%JZW4uE&U|hHF+#yD4F$B?` zu1*$~v4J?2t2DD*rmZL4ufDMJRdWotx#5IKgI`R zm$2oN5)Gwc9`q|rPCMeh{MI+ZID}~krvgQ`zRE~pk_x8~MEiIAKBaXl8gO7-!o_f) zsGt%J%cS%CB1sVVh-Ahk%Oraso+9ZY*0r$2Grm|`+Ey^q8t$01{z~YzObIuMw+wGv z!4oCaG34@?LXoioFT3|rG19`CVRR~}@uNzm%ALZaxTi(LWU_262M&zoQ-O`?8meQh zUh&U1b5c4?5`v-7j-I1F3&Z0uH7pS-9iFi4ey|i82o&&90GH8ejdcvhXzC($O)LTE z9If6Wyo6O2xQUM?;Z#dbdHogI6f9*zTAuU@C|D?T%u%~Jgp5<6+$uccgY~Et`aD3T zxJp8r>&i3s6Qwh0)9P9kw53PJI{FGkp7yMpQWW%gWBu(1U|=buZaA=U&4ri!Dg^mn zg6U7>jtWW!gTRr%YFiZLEd(t{~^S(9kC9<=)Ur zE=N=NRp>HgCtq2p7k-A&Xx$LT;WMmc2Vn01XRVQ{0sMdqXD(J|Pc&+s;$&sdykm3cklk~n9m z4~1slKKUnVp%PsPA0vp(k|zWbfpB~ko1lsg+(>)`$%#|qnL{S5sngBnfiG%&_~#Xi;qL%U z;;PgId;`F?;e4Wf8VI)$i|983M`_;C2;>-xV~X$nOdGb#C7}P~>*?la!Qy-Fi)Xn- zQ*ypF&(WTYB@1nh`;5=DQo#~M*k>1K(vyjYobI$7#OITHA8|my0unQv9VD_2;h9SV z>DqH6Y>q89+FGoWeQqLgx{wkvwGfd*DZdXB^SQAB-XpyusYj9x^9kTd2#}a`cUiI} zYGhI;vx36*W*b+)bS-le+AncH8Lpg;M}T)-J8niqhZ7{LJ*-ih;%hje%%bAE6x3f| zrO1BK_@KkIM4Z#Zr;pODiTU(!j(Wc^wquI0TRU@ZzL-|14Ow5HXuU)p33ZKlGYPFF zuoy^cAY-|mQZaRnSj3Q=_BW}~jLX4n=`X+X>uIpFC#@aMa4U6cG0Pj*fVCWRD#8Sr z!o9>jJpbyo)ZKNSQYnMrqXZ2N2xi^T44yMjVOWI}+(q1ml6`T(xQlF6@PLjDh=MMg zYJ#@wF@+ydM#uyjlYP`0B)rBMg^L$$AlxX2>=EnZ5>UOqnq#iSQE}I`sb-FCST)$G z^hJB@r}nNEq$XT*YTeAwjc?4NM7ymGmo7+Hi@K-ey^fn_mxClKs=)=~J(_SI4-VF+ z$%VTRgdL8aT}>ao_b5$2J*|cj1_`tnHGv}QiCVG?5&hO5{874o?Mj+>^pF#!Thh7b zE{8T*V2%=LT}N%i@OBR%#iKGTOs%4QJOyuxXcLoBl1)u;j{^=4(9-reVpe8WU_JvQ zeZU{965B@i^dc2Z+6EG3X|(dKE<-5?jbj?(wiR%7QtDS#ZN*yC@>A(0dzma;|VVdCn$M4(%Zi}=BGFcQbhFRiZmhiDGal(oxMTA+B#i(ImY6JSOJIPQu zKSag^f!;T7KTLo8$A3cL+Db@Q5>BOFW`m@arFZ>ta$E^HAPf^3H!&6#no9{QOu7)$ z%+<4tz%mB%SJ;=$5py6R)?^3sL#14pUmytXF0mdI$oHr~?)TWVX4GCw6Ii~$nr3n= znNeJPXCJ|c1VnBfB&?4TLFdQ%Ddc-MeekP~0Fr(<z9kIW ztALV;6qZ7eXnM4raasPRIva?^=WJ?ojG0LLYUY*Fi(_?czVV-uq0+yL>ZvL+>w1Wg zf`-D%21urvJ0xxApv3HCxWp~tg+jol(CiMpn43QFR*qF%MdAii`AW%|b>dm!rsV8= z#T5fs)i(SrEW;FuU?jZlKWcC?{y5zO;vWJI_mZd=aMIjloBDn=hlI4}m|QQ$Vm5TR zcIaDS%phCq+Ik%C5mKlo>^e4|S&*)w8V`>%Ct$4Ir zPS?wMDUi9B6j}`Ww#3gau{%esl`%XDbDbS+m^KKYB{4%+Sk^w(ng|qU&fe=-owJDp zYR@zm>(@;w3~|cr_H6n_d%$G4#yVNv#<(xrP)gMuVL$FAYgTH450Q=L#0j;yIDA8# zXHvRhWb!ddC(7l0ku1R2#0kf%wxy8hzSd>oob$1@z?-!eKG}?`w!p56@e5CXS4)>R ziRbPMI->u$hyMIs{CDp-=P;|%29x$1F23Y)eZ5=*=>%`UojhpUvYql(zBdBZqkr(5 zb;x^_JynJ|@X;0Iz7Ph^#b*`9c}G8Xh;NtA(pPJqs!@u^)<-tcM-!iM9fv%pC8our zn`vL#6nM__;gaHh$1Q%lUyZNuR59bV)+mt_CA@?&d)7S6DgttVN*25vc2w7<)SLDAp^= zj<6=NcJ$px)?pWQajVF_9>bSEey)`mA(`b<)N<)Ds`OOG3gSiVAzol`VuDg%x(cBYW&o z`;yaDF^!`L5%;l+3DC%(`;M+Q2L$U{u*?hW0^a8(gotS+9|VB*VK}lnCIppCTVDRHKq8fP&Q_! zr_+O9Jwf1uZyXT7jtkmklWmUgJ=uh&B5Z1@k%l&sdHUjW*EsgMl>+-yX?9{2I1~SJ zwk|bw5JNK$+^OBU&A|XW$0#4bO#UlFRBqZqz@48aCW}qeH&BNFTny!`sg|1TSe_cX z;qNWFvapmP>~@gt`|KX(<%M-hKT@2Ywa}f^L>zcy2UM?GYnFQvB?3kFgnSznua4Hi z)L7l0+8VplrE_QC?X_uU`4+;kS?J;p>Id+B2^0kNQdw=$eH2;uzG}E1#%e60kCjI_@{#$8qygfao&ZY?z z^6X~n*)QH)NYfuKrkQ(_BsR=3eq!(jd(-gwacXaN!h=1L3%ZM}c)ZSjWxp%0Y!a{2 z$C19)Qk4yvm}NDTf!@A27@>Grr2QIV+?FB}9S1Q68*m`_1pFOW)iD-xWteJWeGJtY z9XvRuIh!6mm?54X5f5kzuhwW+K5z>>Ohsnwtqq!hKMSRiK@XZs73kyARVp`=|eV zdieN$2t*Yii5kDcqx9Mhs!tEl!a-!jS%6buj(}G_OV(v{GE`rIO9)uLsel>BGkm96 z(Rcs_gfNvO$jD?B@NeI~3vr~p2?Az2u@O=QL_LTM8;3>o8O^DfR6=kFCSsrb32a}x z2nPGY%fwYQG^AgC_#uo04VCs~U6_R2)(9bKK6~ zKFwscQ!`s_%{lfB=98|he-`6CotN!;DYyzR{c1u)Kc6sG*gnQuVu=mMuh436y;jkP z4*^cXBw)p>aVQFG&AVx2ugK4(Kdvj9<5%6`b6z zElkmIt6-Y_ta2nw6zYYE)?kU0er*e#I~Hq!+J>=t))k%O^fRqPx&?G!D7YK%Vp$B` zWeS-a2$BZY$G+fqg)R4|_@OmT!9k(PG77>*;Yu77e!|~<;Qg+R%DW1RS$z^qevW&c zqkY>)z#Z6$&mNc{e7#FpfjRuk4;r&2{vmRO5Urw?E!+XRxL+kI;^hzy!zGY^Ivrca zTbkwae|fD#h!ZLOmakpE5Ptd2R|Dg-#TQZzeg`cQF3e5ltAAQNb8hbPP`r2*r^OAs zl2IEMCvK{!=%x}j?qk~#e!|iLDER9NidZdRzNsSU^Nd5hE+1H-&~LlCkNjSEY5nnP zjD!k>XV#4|M)d>Uy9hMHmG^;fg|aYE>GE=K*;atb;k%x!u`jNna22Pt#Kt|g;g8OO zwX>U8KR*A>s|?FnbNZ0v+KYWTf1mN78YgplfmeE7Fa@#kR^D&G@I_;oyFCLU~4iRIO@1g?J$g8ycY9Va}>{-V@sLo z{*wQ}qsRzsZiO!*+=TapcC{D}63;h6nWz!mu6L)O;QObBE-?YeWGEshq%D*H4MO6f z3c9#c-8?`d&=7OO^Kdd3^!tGLn-h`%+8V{g(VKk9P38SR1l} zn3w{JgNjlm@johL?llhv+2Rn9ttI&b^heJM_{C2_hdgvfw_7!Ux0Kqpu|;jh=229g zbq}{Vuk3)pmyz)i${Y>i)~HAiZaxeirL1w-K;Svvk@HuE2omQ|l#Si=*ZG3o)Jpe~28`4MTeasl-6MNejYEWXf5d_mFl|;CL*1})9hx*c&-+D9kbhV{--+4F9 za~Q-1_)rau#-GDz1kc>rrap01cjmi=MLw^w25u1GKrBgPT`PvSz0^s_LnntiY;F*z z1AbXM(>SUl3}7vt@cRU`L6XA-)7IBbJQ!mME97<4VK1#h~8jR#?oNm{e}4_$#|_zN*Si6o`!T`~slPeD{y06I zc|Z_93LXXj;c{skPuR>YT>I3S?eFiTG*3n9fya)5yAw~Jh3u7$C{M39XttP0LNHjZ1q6V*)^{MV6ae%Ul+t%OKV1|dy+V?G#n6WxQ92d@B zYU#x5Miy~bYkyiLX}|+F!t=_yNar?5j$FhW(o#JLJShu@LUn?-NO4j*w+i~Bu}(Pk z_B5vAHna+*<#?au2b%FO#PF4*YKp~L;Z&C_u!}z)W;j?e+R#Wg1 zVX}Y<_2DMnaRNZ*C1HkFu%Re~j-W7sHD!8g>do(d@kvUju>g-AJ|qEPh9Ig5j(z(e z{ojA`C+W}s{JXd;s4EH4b7C^Rti5L9?dM>CAhv>xGN$g1D(*Kp5lYX7&YNCA*=M~^ zO1jpW_~dSuLZGtbmEEto&n{?*;R4-b}{%`v)kv-^nUqWed3ivjm}>wH87J{U~%O zmPDYf4At@QS5`h0=AByzd)#Ub&YaPg#~+N9{uDF~Ko+)96B76bqmTA!n7$)CfPpJ? zDA;Gd)Am3-QUhhg)rJ<>=BcM;M<5xu7@;wYiwojlos_IE}?K{ zjGRi7LWJWpo~4;gAOr4osVR&b_-_o|AsbO>o003hV&Bl0?YWNSwN;@iVVzN-l4-i8 zKB$u69#Du?xL3FlZ^T`*gZPs`<85%Pr-HBUQW>mxWzeW7 z;dskR1cQ$Ghd?9FvbS6hKdVf93K<{da(!(p6FA_&4c;3H6=O9PtVSBazg?b}Uddp+ z(LE65w0I^I0Gf1Pg^&o~Y*$!2S9>d4j1_>hGLgsNo{4+o9@ii%3uQW@S0wPwQC%R<*88*B`+ddOsSH38>?R^>6 zxqtidVSktT0aAYVNxyUN=f3^Mv6R=&*Vyy%UEJ;+Fh$@5yj6Ielk4F3X0|Ra&Zp_w zX_)^ef^jhoAVg>on_mXc+sXbNC1Vo-qgiE)z0wL(??hXNV4P#CN63zX;xgq*wy3RWh*(|^;&VAoXb54yh1Wey<&{{I4Ih$5?h!&H?)|;r z{6>21(s-)F!rejb=xWNK?BX@pDl&E;gXGP#B%|=5wYevKnP<;cRR9q=KTyhU*>EJA1ebT|$PbA@QP6PdnNS7~3&l9&w7>HnkL~z671mxTbO^ zjd0F@pVB^Ame?gZ!SZt|gwj4~$v%k;?xSAnEowQgWV~~a<`D2Oqx0|q;V})PRq0#* z;7x+E6Cpy4}O;Z`o}*Hh2zn!2b|)?iDOODv+UJX;gpB35{_m#$~G46BfJ3OwzRbjjYH(5mK zB={bWfHYMP+4Qr8v|8Fp6LS+N1JkKAhf>1C+Q=+6B*-X;O~S37e`B1IJ{ITSqwH22p4(Q|w;Bay6OUVw zlfK#pd%#0Zx}2Ry8JACJ!Ea3+JsoL*eW5{JI!+ok3U4m?xAd6ue9$8C)#wI!g#2*q z@@2B9&!(vd%izj1xKvMafjmSnd#2^bC{v&`UI9XfRu)nPAwDPjjTjc%2?Lx=zj)`T z>EXmZYQmB|-r|8qgoKe~aDMzkx^U@YDs&BuynTYOAfc8(>h_j-smTQcVz83SeKZ^` zToOkGb&0(~&&K+4nwli0>7yIz0m{GzRu>Va1`RJ5A^KZ(kU{uu@El%aM9vW;(?b%UVQ4ZhnQ2&0X)A;}US+1i z7HLcClblfID&y9gE3*tGstgN|=WjAXhRMA$YcO(-J#Haa>S!oQ`96yVEcLuBYGWR>)l$LySJ zkbML`>dv;XKNK+L=RM9(eDu;f=(vT8VIc~#o*JeCBTT|#$hZ`q6%1=R2*VH(OCjk_ zUs)qlkhO7~3Lhp^DCjpLIIeio4eRVYS|})Q*01itHDa69rVJmFQ>j?Hr(1AK>ai(N zf7HDzK4kBmmTtXgWPY*E?q|aeBC#SEtOf#vf7XSE0$-flF^t&#A&%<(aZXUhJYQk& z8an`2h&H{-g0nQm?}V*$3n3pEIiExDz-N5Y*9yFNpi!*7cp|j?7l>^~ z>T}Gy2%*+mKCUJN<-|lL6IehL_EQ0Cwe#C3Ldz(uF5XFF*qe< zFje52d&Q&TTROT^@9-5Mz7@g;m+XMV}pD3IHxPZAe$Jduo#16o3c#gQAqXPD+vpJH6hp`sJk2X+tAk66uwG=(x z*Avc!Sr8N2=p&z@jWRsS5DU+iF!n(U!N1OHP3a%~_rH_cI2!t$cW#n3eS(66cNaM6 zmAi0$9QojBnkGDG7i+$yV_v@gVtW0HSJNx6T_#q(BW>+*MD^NS;Nxm>G5zF+KTmf* zoTc6*-VW-K51s2n(9B6raR35iklq_cqZi!8^6Q{0#a9-Dj@CT6&;(R7snSSey4y5*C&S<)P?h z4P|lKJIBa`{%ksX;S%H4n2TqDJupAHK`7K(`rv0b$%wv{){zbHVL-P~a4S)g;d?Dz z(#q^rAMC!EK6mz|REJE&4ATOJ;ZGiXL_+);xJs7xiY0fd$tC$pl(Jjecu3TmnXY0m z-mV=&p}=uvqo!W-P}p0npSW@Hr2&4R{15!GaM%{1K)?Li2Y58LfzKc~$DROg9=pxb zq|K+ut0?x(%=gXTc!l-dNK;Q2kqtfe_z5yDv>Be-NsaAsEa+>+c?53wc&*h?C_xCm zNDH*WZ=x(OOam8Cu?w$4M|T_h6a2(L&?z8T?73iF}LX_Q{+2v401J91G+ej0WVxWMmy76s1u4m z-ipx{JS@vWcN#J3H8=JK+>}6@2!lF?$PHgSpL#EjrO&?k`Sivc*HfWq;LO{07)&EN zm%(NNCc!Lc3W}IuL0GP0<0>fO(~}XpixhUkRjjp0W*p1MBQkG@j+-UNcbkNQ)`Hc_ zjT!==1QMq2qSnB`LMT;%bQ}OY07Eyy#26#ZgiKW`!SA$+*He2$sP0sY`--izSb zb?-AWXE!?E!(_7Hl8L0S^-{PKK6cf2Z@`4 zy6x#=5n8Jy9fZZ*r&M0EKq!AAEKE!Z+_DaTG%bAq9pa<#{7`(tjQcatd8k z0ps$q&*lb*-=X9{6FmUHH8l}Y&x1;!amoi+1KY7AntdPTo}{Prp|a}p{*^86b{upP z9E_u2&Pe z@A~?g)@;|j9|gr2ptB@CJopBuhG{^Gz0H2?;zXg5(NW-~RUYFHp22fxhSCDbf;WG4 zGwJPKM#)~~a2U&OY-}RLZBNrT;tp^|x{dG$z0%cK@`S+_$*midJ7J`(4`V_B2s*_a zG(^-QL^M(cMq1^l#c^Lxs1c*Ed`u~pT0$`@_}mJ^7GC1I`*@%7C2ZlSY|z*aO$H8! zM*7Mw^NdL<2zxdYsDdkuIyaFCm zJTP(=VT^d~xfv|8gkjBYP_l(EMJ&%pCR+{}Pj`n@!VlC*^}NrUML>)(sje|2Y(axTP(1LyU{ zjVQZTfpy%F)Q2}7ry@t_Rzg!16zfs?2FED*qG1L4831Rs4K>+XCs~ykb9t6Ds90J{ zmp|J<$)lF^!B3u~?OE^u?~!y*9@OLsTW87bAZ&Z?j&$8qNmT;ji3p!NC}SG68bX#q zv4T(RkifgMO-@@=Cpiv7slR79ogcfL&h!qaE);|Pla;i=v9y%jOmnx{7vRPcR`C@Q z(;G3KZee9VM5&QS6`((bIt~gVmR&mS5yP{~OX=3*$KWLi=qS`TXQsIiVYrogwS*-d zA2p}XzWO_ z3~cDM&vC`;B=M@;9&7zq za~JDkU09)@P%u*)_BRnuH@S?%=Yv4=yn>;Zf`N=#f~r+a?N)-QKxn8#7RY!$HnJoh z)P;C^uiC-uM|_}WQF(Ndnf&`|NNh)fBmojoTAEGX=;WF5^SU<6ox2CgWF|@ zV(i=06VqyE*o=Z_2t@Q71921c&i7%Zf*}SY73aoqvm#_)U0w2^YsmXME=$ z>rOwy(ebi`jx5?ygevbR@xXNx$9+DO4LkIT@!03-Tf2_$RJPrV3Kq`GAIjt6okE;V z#Q5lq*O((?k$%v<4aIlS9E@8y#${VP>sWlId~YQd;PReVv2x6Y)6D#I6j;f zgom^%hV1tN7Xg>yQiegEWPpZy`7L0fGAzLSb3DR}eHntf_xjn_O!wo1zd4T6vBb09 zblQqV1^g&{y@bNChKOjB^=B=D4vBaCRvt=LB-u;ACun4}feL|eDSgd*HshFiUcy+o z_i(CJ1p>z=LW3cTJMcJjGoUR!tI}zKfNJ8)TYCFZBKq;T^--X`lO%0J zE}G~?x(Q4OT@o(xMqyA>=wxj&BZfdRCTXs;+Vv*Xgu>pt)ckYp8yhH84$`~NoRe|> z$K)nhBCNR;yk#sUgu(R{_AsFi$HpMD7dwV8$1(AS!meWQ>P65qHhH&_a7IEcgjp}V zt4W+?9uHQRh@n=9StYTQvVg?k*JI%y9mlfE>0Da~S?}MW82MxJhA26GVr^3fzl#B4 z4+Hxe$()zZpC`nFPze-@%{2?{Zz41n($JL`2+w#ePW9X-F;@C=KwP?sp;iy7YvVn_ zKdZ@nsAkheE*k5x1Km2nN@}iyJli9f4b!Q@P4B|_oDUpzz>=^%l(fpsRKm*17U`tdAu4i?h<{UTlhlFy+5TAmwNb1mr>#IQ7< z)oMHl;278y5=pL28_d`jY*kTSO@D5=(!5dbiaSD&P%G5Rrr^&*_8Fb=OMY{0Z z7&S}XH+V;}?kh{IFO<^k#~d3?#UttHH1gf!yA#}#2RkJE;H1?33P3V;MLp984{qGyp6IE(t8VCpl zkeBri#u@Ffv1M>>;INoUr};eycL=2_8}v*uR0TXOkm0fwm=Gs7ihqtlhNT5rg6a9x zZgGWNnSrNO*|4$a5_1`&3n4$(J?a(Z;dtJ}OfQv`eTaM&L}g|c;x_Xv6anLN75o?` zh^*F(*MaXWT7M@XH6!6$6tYTRASZS9p+BBh8yb!k}*dg@r zqhUb`N{SoK*Jrm;;_5h?+Zv~QiRRmc8*~jT*HWyb;}Qlrlr1Yd;iUqn6yH=@jPq3i z5R8^&Tt``qI=8@4#w8_k(fHj2iG5NrSKFXdM zv)={5QE->wn>boW=nkkHYaxE^a%`b^ppa-^9#3dkhgE+ZdpCuTwbs{w+spHd96d?Q zAxeeHf%omRcYStlVjh?DTJlKnw)W1nkzh(NpLN2wOrksFL@EU#`?Md!m0TmOotwmn zwjclrWoxktQ{n3sN;PweSby<{f8vpN<)t+j3V4cFwwZr(kH#M1XYoQo*k@FHOyu|T zHYeC9{DcBaAFjQYQ}bZl+pbY40*slr8UL8;39y&W7*b{2Z?qZZlG0vDc_xY+|tiW#@LMZMLe(o;55T}DS_}l5a#&iDW z6Z|fWTm#!Gf0j-Nhy1*7⁡C#J25K#)IL;FZ*!KZ8%=+-XFLp zzV|NH%CE{-jEQfAlVKJW9Q$j<87duzC#%jx-x zSJM(nlYjZ{cPP4FLU?LPogDjWTcP-%IP6j+{p|3$RDm^ijj*6K;^j9tEw^}tP(zF{ zaqCA2bCmjz5SbH;8X!3B5U$h+E_nPYvT52aB6MJNJZj4`h`r%->Ej5&$t#RTvAxI} z!a7)KLNR!5l79;p$WVxxlNQk9LwFj3>oy@I=Acw!&HiNJS?cfTO?|{TH*G-2=O!?U zL8}PCs5WGwFo4ieB%YmEe#(VbQypfXApEu(;^pD5>r@!2JRwn)0}R^RuBNkH&!?wr zw}Tg`JPn_}oc`hGpbJQ_cH|HpXXr}N$gA8H9@*es&dl-SP*b8)B(`>Lsxm}a~X zb?2MYedbamWC&2PKRieP!y-Z{xs=coXbEuJV{d4MRc_dCWhF^eoi*HveRG1q|JECC zMAf>-kDdhmUIiYWG*(AHG8YqZrA=`w#Ko|D(_aO(PL)b+A;j)kg895g!WtTETE)S zp-|Pg5*lVTAm%PUElf_PshgjmEO3M|{IvIsA=HHCn0J9vc7RfNp?d&cM6xN-5Kx%l z&4$SsqPaA~dSQVtfU^p^mEhemM^dknhgYg?r+jQ1;W1nAR)kP;EB4tdQ=dG79=hK0 z$h~y#bNlIqS4LCyRT6OVu&qpxYk}pwguG$yCfW@62an(&i~=EaJ3PF_aoAgxNV|L& zC+%THO7;k!R=oSKfF%?Nm)8 z8!Shd?IC4Kw<%+~wX~I%$Z2Tm$T?br5Q`%!Z+XDh((V!mCjbYkY;}^qFf&2u9@U4^ zHhc*MOqp|Q9r>`nBDGU0Rj-gVethhF>Z@sr(z#R9)8Nc<8an@C6ym5v_B0gn_Q!XK zc<3YlgwUT_Drm6}TI&1Z{}=|+c$&UDN`m!T>Ki{onHW!2=PDNOrIRL|a;y-OlJ zv`Ja3i3D}`Rh78_Rcs`n5(LmvMxZ7?8qcUe3Sp7SL1@*gjL(zlX!VWqiD<-q zi3~S0Vgoxvp`a&z;{R-Np7*_r8$v8&QMhC_(Uz~q<0@1uWavh-e5M@5;{ANg7r)27 z?AR5w*?;%FAE!57e~I(H+fyxZm4k)}Aw+u8#^lpEdaDZU%EUeyfb(%vOT04kia`Zz z9Gh?{j}_4?#-=c@;1~1Yy~IMo?u=vr<;%4a*1W`|?aR-Wpt3KSufGM8wQXSzQ4*H^ zR))Fl`M%8T%Swm~ABsp2ELIumw2HYJUoP=h&|qZjMPu+GbbNNbL|EOcCh%1_7I2^d za}b~SAFx1}!a5QOqY$uaHit(TI#I`c%nA#C=?pb^rX7m;>`{qRhsxWsxI z_9bCIh7bnqz&}4L+>7rr$RpgI43Pbjaou*bW~uCnR7+(2*Q1%ML>UHRn8O<1nGj@& zP6ftLaM>HXM^y8H*_jJaNiK?gs=cdZF5t0hCEmZD3Pdrcu1;!0&jUYUOfnzgLVH|m1cN@;1*#k@mWu*A%ifK zr6qoc4$-TI0G|sy4_x6rk18zML|F(lF-`c#Sp0#0wK8%;D2<>d^EqwF253R20c~%OLj8&R_OuHvA56dKH_~iX2ZO}6WM#T7`5&?(2e$t%$*)L`LIn0M42tFp~ ze$}I(vJ~IA*L?;-Lz?A%mf{1_alc@^tAq%HfX!)FdJ!TI%Eoz&>GD6v?P8u@ zJR^+dGC>dRfqND5>{lh$HvL1*4qS16RWSe+Zv{3LIQwdC%NF`wg$XR{14F|oTjvNV zY~l#hPH0#*ba<5LK6`M5Sp8*~wd-?if9yS{Z&}G*k`#%!XKz)dF+wv$z(ExAN0YO} z)kBjQzFv(AaMf_i3KUfLzci`>EBO}6c?$x&|J=Lw;VS@O{`?>K9_+!$35&4ItlVM; z8nT0;&^1W?@7{s9SIcA0iy~x!xc?2FYom0eFxbQtzDr}Z#1n55reNZ)R{8=0x3wA9|0; z0#8Lx`i(DsJq;jiyz}9Y(@%c-Bls&qC!?w+PI?zy|NQ5!vnGW!F^d--@~=?7!5r2a zcT#sBb@VT_q`OpVS=-bDrq`H-b=^>yRG|0p6f~3wt2+nH7B}z?6i`@ddkC#dgXy>b z);CdTPSTIQ|3jdIB7;)APRPbFf>;atum*)kgPgL2RgvUJdl)<#2y-2NzAJtH&9f+P zbLrh5-%j^$kn4Cr9u6T)ZZmObl@qgiNUGl2StcBWa4BE`VqwAVgr0H^!VwAEteIZS z-Dl(_;2mkjl3mq`5>-6qbHHnl`)Vl$-`56$s`YaeD;aG1R5K_6&*Kd@ju3>;R|t5YG9BMG@oHYMOlbh(y^b;DpxzrE;5! zRPNIPiSuD>IIK%|ezF+(hUc#Jq*va$p86Wc|9__5E7;QOO3d3g$8&QI9d0)|B6TAX z1PPJ=L68(FXhe-h8f#>`%6_mO?59z=>?#ea>?*(b)o=2P$}X47E|*7~;mn95b4U&W z5+K+Bx`9TAn{&?L-01IH`P?bGL+|KI-(Yp=cb$~#@a`(k^I)dTu-F14zMclITtY~z<~#s{X}8m*+s10urjDVMmOKYyhl6&=2BZMqfKVad3Me?O zjXf5Z=?VlZ58ztm;H^zuo(yNHIK*24#m^Z68EB7n=?F_eA+inK`}mXeTMrFlPG)Cj z)5OFSe*0K9;8lM3&O2$5dAa||7f=OGYO8KYqxZ%TDzKVdT*ifKn6$RDsippW+MLEc zYw0lc-PlPNIZ1AwAm*9*afD3LufB%xIMdCEX9)hxi3_Ld{K(~Z&58mF!zAU!B7@}S zCX-c!`43GNi0`RG!PGNZ-j5@GB}yzZ3d2|vy)6|k5%gf**kIIVJHcQF#o_^wlG-I~ zT$cFB&gl*j`v-jypgtqPa&vX#2nLx|SPPJ4eq?Grf>hza{$wqANw|P8cs&?3`w9`! zT}4SK`a*kZyCtfuR>}$i5Sj}zBnmzexXH6J!)_Mg5(Pmrw&C&Q0vkg$zH?~s2r4rN zb7v|Wy{ zaiai=T$jNt!`jc-gaZ+QYqYBw9Asq-3?c5@7sqJdP64Vg;!;?1_~1LgqAoT!3wwo6 zg&IF$^||lZXoDTtzVOAV%65fGBj7vr=+Hx%XrbzTl zJfU>7uZ2vDmgCU$7q~_@g-2v`;nSS{wxP95{8e+Kpx|cW+vl}%)ZiZ}1FL%hELOlv zJlAEZ8@C;UZp%#2k*0sZ>Irc&eOg~+#AOgS(E#fg?#p-);o`DWh}|R(!oEhDK$#jw z?wn{1afmHwJ6wy)xHXx%!g>wN--3W>EeO*jef2~&@mzYKKoX2RNE=gy21`F!`=VCd zfN|%*w|M6}GtLM-&JA4%R}cG3Co z%0xDvca258v=uO9UVMc>7vC~w2#c^}QGW=dQE3$5ckUnmm-AYGpA&bqJc*CujB8DEH&6xd z2@Z#u3jqV1GR3YgYIZMtN_9_>Ch%w9f?)(+`VU76Sgtw0kzSc{!bzbntT+&bP&iOm zBq5-!fG7R1o@j*ce&+w34*s|%Sj3OK&R9#dBQVSJ5d~O>BB@VH2UIY{eQN`$RD5tD z-kGks5BL)*BR%F;&=u!{8@6qI($45n2?t8Oq6{UNu#6s*fP)gR!kWlEX}<~#kN@6+ zA@uqI(Y4{)2191n4BaJ`al6=tA%yV2ERbFlCdSQtWf!X*2OQ}}VgL7GvUYWIfuTMk zE07AzAROO-HC&{x#WokLf`Jqb)eDKLX;(K=fHHpaV6 zKJj^ctf4EcdvUjbhVc+)?bFFo7$_K8vI#x=?Ca_3(=Vjq?v^w@{W;l)RuG1WXMm|} zB53qR`v9&52kFt+Y;?IE*!<(~e-j2~IXxU7hhb~xkfi}M*D%kYe2^YbkK?bs#(cp` zl&tlngF~+pMofxL}=mRPg~m+vDCUUQGH0o>VKmVgYPQwT`@BQ-UFtS_er5jfeRw~o<>Jqqsh(dXJ zhHYXX)XHDKjs=J3-FNVRNw2?6W+h@1-v8Mr9bg6$N83v#=}c+<;5c$TNNT{hy?%2UtO%!?)iW;s{zY{XqXKI83gN z`MZiM%sx!y=*Khk$+gB_#MpJ;uBVQn^Bh+@aw+wZt*8>g12LF#{Exp#9qY}w$rYIY ztk6Qq(bgDw=3SdAsy47{>=T4+)*KIgJ448KkscC&?D5t$b6u_ zdL`YuaT5i|DuOS96{&m=$a-}`K)CIP+n8y0c^%;vC#^8m`RW7G+E2ibZ6PER|6zt8 z$GeAeWgDx*bFaOb-u~uSQYFb_e)Ik>)8finI(O|UT;RY%&Xbs(xt|sv%wna3A89Ac z51Apug$nD!2`Pl6&$?O}E5r6A83MQJ)8z{<Ll**Xzqzpr9$R za9B9j(?{=rfD(E#&Fd3|%iliFZp;>4uRFz_ zY$%zrrxddkv>zbWK@k0+&v4v_h`UjiUv7T+<;Ip7L#hnqiaPkuN%xgEu)H}{K)^#N z0J-!(m!Z#_h$SVg5Mds4uM6$6n}?eU&zXN-Y1jK6!K!xDm%kMRB-{%GOKq-jzW5+o z7l@dIb)df&Er_|)N%x1@$9jEI4Yb+}3fgki`H?YnESAlIg0u!RnO%HfOt>y%6zwuz`vPIHo6wL@`T}wIdW}fCL}X18Q!)Nu%c65cO2FttnHi3f*MfoWo8Uws7aH*FJ0fV z;KQYWi;S5-R?{plnd4rV*!KuPqGAP9VIJ-z^i14w{Tw>36Qx&>Cd>e1z`tu;89?Z=K2)7Ubf$T<}cs~zQEkNmdkYpT;ckx z*LC1$Fy8zsuem(G89#+LJ&Ur|VgJi7zlX5Eg?!yQSqpU3viUjJFxZGNf2!7ts_efo z=b;#qU&Y_RTk5oHLC08-O;~TFb=K@NW^eL`LW20{_$}Xv78SIKerRbkm7(yDcn;`_ z4ZGHTPQg@RS9+pvuw`@?^Cdmb_yWv$F5ndXw68Sed^w_k1y`I4`tLJQf_9{f@te;l z@G8|IY^~F^TD}J~qJIkIek}{R!bJf>&AUyj5D@$UQLe{DZ7)gh57VUUH#p+R$vIo!)tGeF2(4j{F1Qc>dyZp;QouCLce7sjEmA@lXHk3$pg%f>V#5vohm2qoL~g8 zmR!5_LK=aoygPoEdPqb-|Bp!P_|b1+o>4Xw573S?H_~_eBoBchEA5@^pcy5VD#58L zMT*1}X#LvfbAR^~?Rs-$>e<@eARQ@;B9?xAwu8~~FfiUIcD=I%WE(&bjHnUz>i zKlgfROTESK082qen2GhXdJq~qiYiUcDpSl1O4?~DT=fb1Zxg>>=k><~eW z6J_PEu0G#JdPhb}ZujjK*CEyl1hRTkj_OLZyOu4<_0T_0t8Ho_K?CzHEv`TqQYrMw z&u~XxDB`2ul+HbiJ2uv9H~1!;>xX)={&e-E`=5Q9wimJ9f`G>;=(Ngv8g>7La|mY$ z90-02frzdMk?c*7wx))*bU+NmIPN3I%z@TJ6MqbGKx!Sr-w~N>oFiSv^ocLv3ism6 zU&C+y9Bw~ozp+LY`U!}o{)?B`f6#Jo>~ltiYmjp=YO07+!pd;Q*j>XltQliL+Pj<6 zl`GGr{&T%?O<}sJ3)eUTFD~D_oL+qGCVuU6>30wAq>m>4V|ui3m)!r4)6&`k^|B6( zjUf)FK%l!Ru@gdw3NiPI;(Rf!J>CvJ&i%o3sM2iRi(N*yzV?muqj%m!IG9d<{Z~Iu zckhiN{Il1xCOZiDJw7FA2TFpSo!q%-(Lj}pvO|4Q>`5c9{xVU zIxsMDo{U+2>6NEnWbVnsVN}*O#C< zb7^UMHQnRTx#6bLinkXQ(MWISRgF#|2Emry~;5QNr_!AOurSc$GoCTs@C%?(~| z5K5H1w*IE_OJ9J{T2o%at47n8E*ZL8SO+LrVkoVnv{7;~HfcVCi1S})k=$TFKpA&$ zgocVb+~{J1ViEhC#5p!po_Cv4%8fp|!`S!OjCh94tr9@#-z@J)g`rsvW{`VoZX%th z^*aXIb$tK-g?mZq$%MhJ!e*ISJKi?CYy7QbwsYXCT zH_hg8YH$-EeE00na;`5m4>s)E9V444@-=G~n=^ni2O=jJ*BM|q0W)E4rsj4%<{s5^ z+viH1=V_{7Ah}L!Q*v-(xWYYmZQ5r=m7jHR0$VOGb2-#S87nGtN7kXbSV!>iWpcip zTmTXEIq|%mtV33jA?zO2EA`f#;l#=*>SyB1$&@clSU$Kl;l=epEbr02eOJni6TK)WLd%T? zB`(3%J_G^q?i%GRddQc z`Vyb&P#x>zpsLsd5R7_mL8W&JjNK{aW`ZMW!PkgrBj;MeVCtGW>e0mEm~_4u%pq-B zH@N2F^Xl`O_T-qenZPxbJD3TA(?Ve+Tk8zpK$;H8#tvvVFX5VP z%k7G>)b3pcSO2GZ#{hEh5+2bdK3s7YcevJL9R`<>JR(d0HeJJYU*@0m0i^*W-K|Ec z!AQ!Q8vM2>CTxU*r@x8sni1r(guek&UUOWkJSE7Q&ol)EUX^fx0=Yxf?>eS5z5+BH zFytcr(KaubT+gvqxE7zxmxHm2xX)( zw@Hj|RPY=p>n$!otbvDXYmH!lUCeAdmvZSA9Hx(H)UImys?+^X z7t$>pwHwG$x=fm$3CtXuaqJXs7cu*Njkp^JnA?m zwUd0IhtRNUXuN(na&7yXU&z^6dyH=#W2q!)1VMQGOuo~HPF%XMD|vqdOl(N;^pMp!}JC8^cbmZ1rFE48)kvICw`r>Pchlyrx(&V zc-MgXesXFGPHH>dc;ynlA?@kI_dg?r&qi8&jHZPE0DbigGTJBjprTcb3W@{$TuJ?y zH(E$3xUsrUa6lKh)tIQws-3f8yi zXkSw~(u&+3$h|;B_UU#<>bca(IFX1*F|)@>Psaq9>~b*3(HXfbfrIP48ENf=-*u<8 z9Mr>s0C4X09OR%>D(BtSPb9n1zW8q~&B8U-ZsO~Ly1$pB>4A3v+-*FnO`p$>h3}%$ zrR%H-$yi?Hl)2g2$9MJA!z5g=97xdjRJ!%dD`|Z04)c6~?--JxQ;_yJbYnisLTyVzm0xlF##3W6Gxp!V@W z+MmWe)X&<)TvOXbl>OC?)K)n_@W`XUZ70}0sGc>pPn7(t-*_o?*6yaC|HXV-I@`n4 zFn}aqm&QNEtU-`M3+a&bu^#9jO3%LVEZVlgG=LPLW}*R!fhf- znrTXdSHK%gMfx=!>`Z~1M8fxyFVs!VgxETVx=Hy8Mu|n`CR$c^Y0EV2L_TE$^-2&- zayNl2Mh?r-*P|AQXhEL!IqMT~bZX^Nyh6?EqLq{7pQ%jb=&RVMW209U%|Ioe`mGpL z;08m5;VU7NERlCZG|yt#rRj^l5*E|{$97c*2J=b7+5^kuuphqN{A06sb%Kp$W+j3N z>}hO_sCa23n3d81WIjt@=`WF8*mg$8rqh*?bK*HUvT#krvDZA6k=BTs4;A?!(iVt) zl54T0!8q8m--E%RD=uvNA1YJFRqC&56|cK7no=hGP53VJDI*&jrn_T|Rrv8BFEL!X@!2 zB*qv=o<}Z5p8+P%+U-2vS{N*Nr5L~X?6~5<0>?&WI@)SuG~vI@s$=vUne4a76K zA3THlNSqZ8n?%tW^`|OMXI1;Z2T`#Lst-b-qtBi`rMaRF{{Z2^hHF`sVs&k}ZK0mwLN-(pSB7rs21L)-Vc5SF7kaU|Co?OU7zj=_Si^Oy^(=eIas zUSrY-eR3=-+(S5{vhsTJzR$*5Wc)(Qw?LNOIX-@Yv(!ud)*~#!1W_)>Z-tO3YrlC$ ze;!RoriyY62xIF}0MswUltIE@!KIDl(XH(*Vg50)Sf^8IK_hcTi?o*+a}R%TT-kpS z6KLtX?MbVJN|+s7=ZsOBkjE9kbC6M=g1f?bJi#@CErfwD*O}ja-tk(_dIfDfO`BFD z;~8@i&-n}`ptJsTPMrnUM6LuuRaBEU_^g-X&)Kpb9`ZRCuAltHdMx$9QWo->xZ$|N z?}jz351`jwGG`SsftE&y<^NqGJI(QEDj?UQ45H(E{O~ac3OtSzffT@H*o}7FCjaIt zw)nY4%mhT*HInmBW_E3j6Rj9qf#XKIQH{3X=z9jUDCcAguQ_ zwWX@YHq_J2&_kjW@yUqH8_c(-Se}xRLOsbbX=Vbj^i?&a?2N!SVR&tWiU87rEt$I$ zjZq*+Y^ICj2n@{IZ@ift^o71vd3#jv< zk9*7$x+s`1U?WXuV5@Zw2G}BX&&=o~T_zgz z90yaJD_~-PnO}mo;~PSNf9eHa4oO{8aYn`fPQH5h+xujBKot$Y)JF;)@maH@s+42W zH*IpS6;*oQr3M5;l?JT$YENu)_x|8JD0{Zj-~9A1V@(|p8DEVs{h$4-|B@5Y+S26M z-Lym+Cw+X~cQq$8vR1b?c44-m99+|s-_X#Bd565*dzcBR9~29G{K5BskiPx$^F)Ks zrvKyr`VX8ihY#@CHojkN92qN9=U#N(&eYGr|U00%Sm~a1fgKI0bbI$@Lgvg(Tw@p8{fyL?>1(}M&|t-O#VT- z`{~EH1CVdDvkPIE^jy0IqL%x?p|v#e7{LX4P&Ih|%DYDWifA&oW~9A?0Vak-&6=7`GSYyMrn zFHD#NV59~(!!wEp)2v1??(@9bhVuP9pVrD8P(1#V-cpx-Dt=lvG{3a3q^95c4e#$c!%$F5dM=&k9 zG6$cp`wZFILR=}#`($tEL6c?;A3tq`Ve@D3M6Am3C1!oz(Hm^ z#y}fdD%?mEdc&WIe#y9NnNV;QH|(nQIVa%@W#e?uu__qnT)7Gq6l5GxKi|X8gFb2P z3C56lkoeWH$W5i7&ztyZd#|P_w=i{$K*U@BH}ILXZP4*w#w@v#)M+ zGBidY+bY3TQkXyV?uuuB1MH=T~Vx(kHJ7Z{HcOg`I8Z^&}jh2yOH= z&k5_4`@+F~gvO6w<>#E*FNNLQqQa1W3h0i}`t6rYh?tz$QSOI0;A{i9ct-g9@;P6Q zBVXZvi^VCX|D6jFqaobF5+*~#GVSXLH#;j zS5)JK4ucjZZbs#tv|g+wVwL{l*o{IGi-GT)XnfZEiR9`$g$cYuN)eBZ-rPn&StK$P zzn!LfPPb&N%S1YE;uo}c#F4NqC+WiQbLl&;yq;P)?BK(xzf03c)2wwE)-Sdq-)2o^ z3nymYPB#XxgytD`8X=%MUHBjVm%mLZ=LM+8QTo-9r(xpv@z>C20fvdSr;ETIhXII( zmCS>uFjWx|sz7jtRZZoIG)}&wz6O7|BLsq4Ql2q}O|;mq8V_edX-P!$Li&7>eH8(u z0S&7DEv~aBw8=7}r!beHyyHXvQ|qtdIoFrIU?#}9 z*A~%{;6*b*eQ$pID$dyx=|BAB*J=JCuqE%L^dwG9WDTFVPjR|e?|B+wZSFH>qmDgx z*VDtk_KmmFfA!9vq(KCdE&MW?$k}-J(>v*lj~)c0s31Oc4WB0>e-E(!ERNwuK}Xu~ zg{f{p%U{H|6wA!i!>eVwS-T6~0D1KDc;pB;Qu_VLeYC52GW1%o8nm%vvA zmioH3^he+R$LS&(<-h#%|C{Jla<`%^l3wht&D_aOp7e>-WD*hhAb%;|}c$QmOz7O8*8FTTxK4mtLhr2M-Go*P75bA}kc ztN4n6A4PJouPyFzogliwZfGz&1?xnbs;9vbg4_{;q{pwvak zYsT+r060{0Xu}nR3-O_?br35GiZ`qtai9;8xWyg%25tx61HDw)^1|)gX^?i`yYut( zyHCei@8s`Z!{QF}T}#CMQ&+B_;Au-2ubhkAq*~^G^0$ALes}jy`o(*{NMGE4=;pv& z?+`dJnu=Ifp1JuEneyTv9f9AErk$Q)zr^A`8yU{rH{F z@o~d?L}K5O8_%Yfo_{UPjZY^}uv6jJMbJvs;JHih$_O^;z~!`q@CE>QM6m7%9)0Lw z4fB^-h)oU7Hkd1jw9d?t6deMfBXRiQu)rktk$%UGmW@+^p^oRWKTX~kfPQR7wjpDn zpdfPt#ikx`pKF%)n#_rtlhKxLYOV~mG!~b^j*Y~POpbxyE;=_>$Yc;hZc)%m`Nt3I zR48%c;u&k?c5GxWtPoK0J==3b4rYTsSgUn~2Gfnu_PJ=3(c{qx9OPOsL1H44nWP_m zeO(ZDGJBO695owiP_qVCXLL4Q83xLhwZ74BH_y`etUHgJJB~3nRy!ht!!P>bD>hl` zc_LiVZ-=cm)-lMC+KxhqMA>U)!OmS(*hnBl5aKfi#)u{C)}1XcdD9k#!4Uo)Y~8qx z?9}|K0A^RMMrK=_k;%!qC>?{NED1h*m!WO|ZjQzNDJiRia?<>xGu+cn$mrB8oY*Jd zmhn|)7Oh?8O?Y}=*vTvho>0_#zGUjG*W9+wL%@pr5?5UbY%kZ(IH)HxcGMY9WkyAF zrE4JIYn=>On7T_cQ6gAAjnuS<_PIE~CQDpEnhe!7kd2r+}hXs>V+GB>4pQ3Dsj zEuY`Pyzknog&yceK*=^WtF3V~?vMws(P%bKweh z0g6+CO~}!>C8EF1gICMAAGf(rt??J;abPa)h(~JLb6I;v|7gRveGpdSf%xFd`SEQZ zEgj|Jdi>&dbS>6KtOM@((m~a_ZO3PP<@%W`aNTRVr7@@&gL5fvM1EWuLe_LGYs5tV zR1UXjE_1qytomsJnxP@U{0gU~9X}~3` zf{4^w0s_QIvmu<)SCiF?4<6>CTU`xqRnmDCnwv{RN@C4$AM3y!CYVQr5sw}={f@L% zS2|r|PO1ChqlsWTml3WES`&SwBX+C;Mt%4iu0ObinP*n5{1Mve3dVF1XYkvXE~k}8 z57PhhAOAdkHu)JAhL!Zu$DdN)7JOAcy{qSn3|9l`!N8MQlEv#;!Lr}x_}tA|u94J! z9Y)xr*}E^drR`%5C@L<88ySNKx7~;6J5kL2m%+yZyb&G-;(R67ddMSrhRf3b`fvXo z>8O@*8-kwWVG)Pp02_k~4`JYs5RN*}wWaP6))9hLaRryu&$x#s-?ghZ$1^t&@OZWr zw<6OZR**`^lO$Dk<*W@d;_M$BXIsnlVb)$0;Y)oiPv6F6oX+nipTbnMN+PrN95p-*Xb&tLf_SNSefIwlPooFX+j3 z5tuBDgWu4I2F81W^@+W}LpNO4)iCGM&ndreiK}7N5e)_|A{ur0BMN)*VLuW`C)(tJ}KX>t6 z7m?9qk#+=xR|~ks<{x3)EHXD$HQ0w}yxoY)8=mH5H(r_a(9%O>^=Z=_?KL%_Ml>iD z-?0Q^#Pbs3*!AK$`T$W;Yo|7p+G!_mM3(pECT;&B z_ZtiQ@#rWfWds5IJ6lM`+r*+iH`E`walcJg^cn%JJ%8A z$20!P;0MtO%4kFO7XijT)27i}!c-vQxr>aySZIXm{e zxoBz=9+?qf1hJuLewSJGoue~O7Xq|ql z6?KL|D_&lcLCxdzcUe#kw;SM+3unf%j7!4L?`rydKKGZ;QpJKkYhWFhIAHzGlR}6` z6z0@a&RRqub1x3F02g$}386uY3NOVQ*JG?t;AaG~u+T>^BAS_V$FhL`Gesfx;wJ7t z2>)s|V@!-sd@`z3pYk0nS-X5{sc|hgz^ob@ps=725WsZ|0?)NX#UTjrX`jn;VS|i1 zw-Mhm4tmL}`e{pW89l!SY^6csXsib>&~tIfVfZPt=yw@~5QqwDMZ%6d2YwIpJyb5vL>JrT}wb8oMr-j&`_#I`F-Bpf{4rpH;1; z(S-*b-})H>3`_)826LQlFQF-4L`efWH?z(+@mW_nu)WFYO;{INJ7B`k8evAD4MZh6 zj|#bSq&P7V`cP{}TArLn)4NVQ1WtOE=U&$!=4gj_3hz}AOs55+3HPN+1f5;3n|(qYF

    ;{VW6c7Gke(d2HYaR!{po zB`VeI72As_Re)U=u5_;96WxO(8N4kNvRz@}vb96s8B-yvGr zWeDq8M>VIlVF4rZx5=z6;LF0|4EZ!?0%syl3fj=z)ec@mbD5{bsaZP4K|aLFN!uTf zK1>tf^Vd1a>$`7#1wrJK^nfVkGkC|U8t{*Nwfmb{nJZ2HAOENSHog7kGih?+uhX47 z)9I;;FQ-?(`ZcU3>*>M6&xuzU;uOI#$0;g`Df`X zxCovsaDw2QZ+sW}H%)BFJZp9(RkgPFy<0=ji@-1tv5HMz(@mr%k$i)sx@>qrfW$QX zR@J-#I5i|{4T2G4+C2G0O}$Kr!HRCSBAn8>1X@BPkqRwvCi_vJOpN~BR3Ab})(FYa zutr#jzN{-4eERYwYa#8H__OF)96ldb3m>qrp;gV=ZtHT>&rD!x8G5zt3Jys}PRuNNO|~ zZ7Cq>McQuwQu&oQt3? z%qjigj(H%R3zZl4RZUDjJj8xV513W@rv}WiO6;8%wT)rPqlDL8*K|9^!=?P4HiTiU zDdtDNcAp20X%F`_>hWBAx<_#0Miw-vq)1(+bC8KJX3Fu112TwILw_Om^vU^j-5Bem z@3RUVgL-3AWeLZW2utBB%mXhGpn-vKbOM|^;Uq3vF5IicMRDC>_{nwGkl(Gx|D?Ch zr{$ba;Yv^W&JV6zzx7GzY{R*ZvOp*flqta|n0cP{v0S+~zyR?hKJ9~WrLI66>JD10 zP)OZWTvl?}p7;r759rne4>wcac{5(duS|C zCN;sVczW1=&0$*HR?A1*?2nbese(POn$#@q?QH16m%k-k8@6CfD{&$};Z!p79-bbW z*@J^7oX;AprNB25>7&tH-9%$roag!u`3?7BVsS<5KnSRZ=D<`p0z6j8+G<2-<=x$kHlSgA-#B_~(*i-dIw! zkMe`do^;hS$O&68#-3ogjmFn~{{#z#lGdgH4c?Ry0a$3PlF zm})~{{G3GXOK9#5c5dq$rXL7UxZZgB-!7-BxmG(kg13@6-bN!Y4Y5!4-JHHg&QaZ1 zwn;Ukbsp~|gaP=Y-Rw1B?$Ge}XKx(3)Ilo3gLLoC40y?jZe7GBfFsV83dDqECe|l- zA_Hv3m%}ylN2>KFc>?h%o@AzboYtx%1hY!ocHy?3Yf8N%DmW|B+H^53j#3|PKE^V7 zNXctU+&259#TRf*fiiOjdiKAl4F~(Ru|WgNyNV%fzcicq(*)-eC&_t6L2kbaPR{$ zGwY|olsyp3$M6*_4Xs$+2;6tiE07a5)q^Cv&^_iFKA!c>7_qQaBY^Z=KxxGx66Tq$ z<7nd(T#>5S69;h3W2n$g1rbj8>O(jtK)VJj_xk7M1#!U>zB7(MQroQ#nm$7QLrcXZlh}fpp zbels#e*C}v4QqpawSV~HyK30g{CirD%(nrxBAL>*gyx7$1HhOG%t5P?F~JPN*A3-e}Oi@w_*z&k+Y> zVTX9iO~>jaHZgRlNBF06*Uec?eUzcNiUc2%i4dSZf1*F3jdLTrudrELMnFR3U1Uy% zWnIismzxpeb8`%rf~Z$wmv>(4bNuFWo53u)jpn2W^RFi59;Ccl?7e^i#6Wt5af={K z#wD{O&k}x(Jnu_}-LA@DdMN_0>|b=+Zo3J`gIX<`~0vEW!mM zWce7Ib^Av_H()BEP1=Xp+OX|uF|sLo%3toU8v3RCw(?GTYD$qV2OYUOdnjIkC^;l-)~{%+hE=pKlj9QUm+|Y?34qS<6;S> zo;B_qO0>jV1xNACF==J+q&0gg<7xeBmHn-ofHWao-hhQ`WDP&#J(UC0Lx-ao=iNH9 zWeWIkJ^B^5T`L*pWxNf>IPg;3%GJc2@tpPgwjaXE?+Uffhw$)II5T7Pkh*y$AWR$a z;&;{_2by!Kbe(Uu?>#zX)d6SwDKlM;(a5w9mXYz;xtM?3p?=q9tT!Jn)f??wANON? z4#>Cb#{TjG(lJN{2V;gJ?wZ39~s14zm&2b=fPn` zT@-Z&96LaaYr@AzsWJZYTvN(2JRA2ZApTirezP(AJ(}b{UNJ|`eY9npwl17_mcK|u zz@WV5zvwadTn`HS3S|1IOKbeJe?AxG`M-dVxSC;ugUtb{W%dB~hE;s@o6!{463`p- zgN3ZJdYk->xT26-(RjHH0t9!^s!H3;r76>~iy!37?QRf1AoHjq>G+VKWHQIppv68$xj>F_;*4Ny=xZ?6Yez6|`0}&22Ama; z$qZCeSs)lz!JhjY2!m@4xJpn4t*dbijSUCs($$94JJ^)k2iTjtG+%JI0MVLz9FVYc zxLShE5f%{wo0+%fg7ZYu{xy>s&;_grd-QdOx5+Q6n2*|;ZbD?Spu;>L;@7VCfXwUi z9PT^_pK2@T#>Nnab`hY+pahR_>DqO!6Wn>iVF>)ScEPX_%y9qFSQ;nE`3|lT5zEK? zNqaqpRVLTJ17a$QBxctyy6>VHZVb>yupk(B4{dzVMqs573)27(Lat%r9+unTIw6u*&wn^fb9phtCg(8;s*W zV18%F{ABXS6a09(`v$S3*3u7f4b6N7YcFUM!LEBSmQ}Sd{{%KOTM_JC-_A)@y;b<- z7xAO_xM5DuB8mOPgC)rs$A*g&D0om3RO8y>=~N3dxSh54rps4H5GY(i2vM|iO3<`; zZyr%s#M(GaC~zCVa^bwyu~+n8g!yM)58>So5JXfaoDu}9m7{U6E)86UH^8-KbM7$B zk0Q8WQD{f|yi9QSc^KdUe9gBHIrM_VHO_F;={#SXni==}(hA4Yj?pS78#ec(3zsej z59N_q(xvFXd=2y3fNR?+nT?7Bb)RBwsIBiNqt$R~t3n&jno;RAJUA3|Yy)2Dc%QLp zHOKPPgcakgNPzkVmJ)>PI`Z>&bl0N=C!Z+vQJhtww6~6{+7vEa2!ihOyCl6Z=A;F| z*VvUE4+p?C%p9uSLj&nQ{pWwek-LrQ(?`Ecjri6#!$jNf12pR!2(lfV=Bek0C-=<| zh&wPika`g&iq*?$6-B@*4EhFrI>3F%gO5DOMoPQC#R;4oyj`9`?}Mp#{H&z+mVV{Vke`s=5W2JmAK9xG>m}{`f~fOgFDz zOAFKE>9Yr)BWQ7Y*z`OnqV1%qNe=5kAgmg`@YK7R*#$XRgfb8M!|4y4={DHOmr8;? zv*c~?l3FwTwP8YZIS7Ws1$(I-zH+}^G zSB4*IL?XKd6Koqa6`O-MqwjvY%Yl^cN`AEt+Yi&W#ks`~u5pv{5^LL)_%H*sPMdKH zW|7}9CY}jlgnC`D5@9zZtBSxm>!m#{0Z-CiG6osqya7YRK+Gk4vkahWhPZo#US#_~ z1G}`k7O8XI8WeYp_neEvrNo-mMH)5_r{?w^Fh zar;K(YT+mIW4%1aZx>g%$DqlSX>o(J_40ko#wJCb3VrrYwp1Z1 z+@pDT_G}XrPQWFbCb!4LEEJ|Wj>;ADhx)E`L>(*f(;(5MPIDVHv5#@x9EQ`-) z+%Cb{K2hFhtjGSz$lDM9{GHndex>o+j@Me*Ol8W|?(3T=4hXdv1?{*A)`O#!2=lgE z791!*U8a#kD@C0KA;w(tv>NF~U9(u*yOCvK1C!x=FvI$ti)-S)<1a!Fbca)j_k!JX zo!LKM!Y9Tm^3tz7%zW4W3*S7Z(%kTHw)&N85Ju(aghUzUep;n4XD;0A`KGnUZ@HZe zQ-1Qh1oyyCF;HC2`23`7T+Q`7p$8?pP^yp*>tr6lr93C4IWOUdRm645L{FlVaWUK@ z?5}uleU97pWLARC`zf`Mx0)0P#|jJ-0DONL)JrRH$+ zA`K)Pp#ol6$ZaM4U@*}PQCe+Tt+e80SQNOX01)#byaGP1DaXo~d1nDlJk7L098&1b z&t!Nz|H8oa5HCR*uRO=XAaH^IRpv<3cp_jT%@b~+kry|FjqP()T=7)~sZ71k#y!_J z9afl$-~7*GrSayuFOS`n$fULW`(H#?z8a`)iP}LZsPj#49!8&*2N)%(|VJ zSs~pgbQAyZU6|*kg>}kQq`u)k{ImP<=dZ*Sql0WhxP!39Wuy$`RNG1IVL8$2<-7=` z2G*<#V4GB!YYWg3c>Lb>5wz6Zse}1%ByD84xWeSgJd#QWtDSBa&~06B@Ch!G^6?Nx zT0rVzB0P)xSOQ=aVeNf{ctB|*ctFw%GgCEn6J!f>YJzn4gUywF0!Po%vrk7@@Oes4j$ zX#({o++_?9RsPf()6j5|+Iz^l1Vg(;Hl+RCtkjVquBBh@{|2nq5R6;j?7j{zph+hw z5&{|V7chNlhWiG3BEY_qxjR7{JxdbzL()Kc(v(5mCe^?B(lb~^J7MI}K;xn$Z7m{r zMV6o5q4d<%>j-L%90f~SR5ZY6)KyC`uvVuvtTy6MB?me*^>O5KYueSp;rv;91 zU7alwKu%U5gbszrI)txdgeqMus}Kk;-x@)Cem(WJ4KP)}^oz&o{=J99b@VV+0D{nZ zf}jDH!i-fC1WRldnQ{8gbrWL%jUW-c3ehtZX}eokeAW?0n8PFRP8v|lAqD+UffTr; zEso(nv{H-Ky(`v~b9esYWh^7azpPH9=-EZn+(-~|L)t`O-CaFMqZ4BYp$eNR^|cRV zA9ongY7`spJ11dL13AfZ)j8A(9I=`qjI|Z|(v2(6le~iChlwv~B`9{dw?92c{DCgH z8nAOKKSTY@F&m^=wKC_$sDKP>7LZgVLMI4tjy1? zg15LWu@1V2aiN0Y?qUyJBgSTF9xDc?yVVe^e~imrq?-laH7Isl&tp~SZclyHH_}JH z_#%Dwa6HvDlkyfOoykq7J1fuwjuIx_>-~u(PAMbd0P#ajtos6SGR<`)3*Zo{eH9Hb z>WeVyRjjun8vRPF6wPFwTH9C*C2A8^5RXa@-hkMn?eztY#HRnmADtbLgqoP3_kQ`) z^oST6Ew4*hCA5015F9Qqszs@4!1SOHXPNmR_6$Tfj9{MmF2PqaIXV8*AO8B~l(H{C zHd#qByeH+5oCo1{-g(Brha-rzFa09x*vKUE5_L0?sAbbF$hYMs@)GWl{*i>w)L=0& zYRB}sR6C$9N(Rq%L}WM0*pT8n&(n|G28-W@>6padv!i2^Vm!gM#6A9_JqfCWQZ10a zgudJiB9AmgB?KafxsTeG^;hHLQr^Hcd09tbl{+cr^t+9Af{B)4cA@$_f(&)d&n|}) zcK*U3n>vyqh;?p|@0dCFAp(aWt8ma=CU!R(w|!qZ_&JY1xF8#3w8AdF`AK0JQ1{>1 zpt%!&aRI^&Onl4S2`IZ8w*;DPv516CY@S?mJdU>#H;mX+{T|QK3jO52KuGb(UU)AH zLE?{{$ZZId$jM)xx7>W563dle+%FF}3pDl~7}Jh*M_+7%tIoMy@f|SmlRgCtL7kS5 z>-3pB%oQeCHfB>oDqK-K_t9@OtMHozG4+x>!}9?{=Gb~<(!>E{LIj-cTFgF$R+=%E z8Ci!Dtx{GP+u3N3x^jPkEn|&3xNSSp7N2FjwvO`4don&gqgGHxD2N+<3SV!*X0K(m zXiGnFra5q!!t{kr4u--bXC(w744_r=o^#iT=}~_|nSA@L_Fp$8@mn8t;UjDm{_V2@ ziN77M5BQ9)SSQY*eGyL7pU0DN({aem1n8n|%Lb&l&KmPM>#+p?V!iu}571BN!8-jE z-~4X>Egw%yD7<4Q`ILF}Gb-bMCM)L9dVQ5)ZMFGYem15!>hNiCJqyUTDp1-H)QC8HqyRXp`0M+Ngr#O{~-4ENBYIQW%nEDgh~a2P!%?G8{k^w=ELWu89wcE;y=IaZ>GDn@9PvU*XNeV$L+FF6WRgu zr)%r#$|9P%B`mpXQBJp6(+6*(1(X3byUXDT?m5tsHuA&^%_8n7r)aIs7^9#r44jX8 zxZfHW*_oNyvw9jXCC1C zA``TRrnR9CziN_S+ubLe3#rjG0r&@aeAOL^g3vIGC^g2pZjt#$2U_e9JZ|ARVjt6aoQw@A0Tkc}*j3 zCE&OCb3~bg72HP<25#KC$y}Nm7G{gJX7(Ft-=47u`;3V`*0zz4o^unHNXcnhPS07e zCwfYllisxG9#_0FKL(!2)Do!592d}1i@e)p`8lFKj3fA*v)^J#(uJ+56WWY$)7p+8 zFT+NE>W>K=#?s)4Tt_g!BiD!1H^1{@>P8^B_u>8Y-rwFy6Ave#?P%W3K8P^Y(%lIE zg-<_0t9fzQCT_^MsF+EoqMEu?6hS!WCuh^b7ch>DO;^9*9RN^MFIkwb;JJaT%jyjH zxP=9=n$yS-Vq3A4bm|^chn0aNr*&DBH)!$rWCX&M<(-IUQ7Kc2n^FsIKJHJu#52HQ z#NJUP_p4cm>WQ7HLn!Se;Xyx|>NQ+Z*jds(@H&8=9WxKh8>{K&=SY>hZ@_ zSSK!A9U#sEH(AJR6F9YBfi;eJo7vf^w8Hw9N0T2o#7%4;H$NQmLLneO@aW+z0t7-{ z2bqF6pWx=@;nWCiUL#}CJZFXMu}#Iw6l_(Kk3dOk-6mYR3e6n%y}ruzTwZ}^ZFz&EpNG;v zedmW{VEQVSf(3*Oa{ZI_$FnMQ<5htz&Z$`KE_A2ifqr5Px)63re1I^fYkPsy@)}79 zV#;vHC~x4@e{SSm2dwm@v|psKKJK9rg4noUs)-R$GS-Cx5;+JFlMS(Rk$PVu+=wq3 z>oHthq^w>G_X>!Zk?d}$2Hok-6HE+DKet%*EE1_E-!;M405A!keyV<|05otoOkq+f zUNRYO2H}>$=Av*B8xD&ws-$h23&tsvHW?0{p%*obOH-|8qi~nu0kwKU zQ%)w7ZH((|JpA_MW+aliSw%m+O#g%Np)X#uFTwD*;qlwiZ*aEvEJrZ5l83jQ^QmR% zItfDYK$2?4tRSH96TZguMQw|WZD!6h!)?9puC`^pR2!duC$=mEs2cj}hVCXW0}>20 z16b0#z6pDo5g9L^lR4mu(`bL}7rz~kKMh{bj9i|7 z=g&>vo90xHY&qbh&nSReim!p!5@_p=C*zSkhXIRdj*Axd!%)P((6j)#p&7VrJb`_Xfwa6#)H>ROEwT}KhJ(0X1-|O8 zq8nG>wqub1#*=*Tvd^|Fegwd{=lC77^TfaSE`5QuV7b|QS6RN7t$k2z5GB$*x_+q2 zd3LjQZP-t*#cC9mv7U38fH@_ckGx*3oqqA38+mlA)Q7x2xo^k9yx_e1wl8^oIp6t} z>&dM;7VmqH>pURy>ezhJ07l_twLWE^f&rzx^e^vm>*X4TL&&v7f*+NRp$Ch(#bhQ%yF!dKTJ8Eaq83mtpXmO`y`CE~V# zq~&wbT%KM>{{@C-_dG)x?v@Csa4)QSUSMqtZ($KXOY74QX`ffQ75Dj>%hF)3i`uv; zj4bJWzuBMgoAw%XIpP(f9_tcEaX(-lLXe`oLPo}Y#>Rc;Bd;$y5jFC`dV=)`Mn$b( z6C0!O-YssYEpp|rtxcqH7^juR6=E~?)9%Rz?Lg}tFm!AiR~L4uUXdt)9OjAomcqE{;W?uvsO~2XJQwPJ@K)_)&8d$7P#yt>A@YQeJ zgn_8Um!Gpn;1hJEg`dW1v6Uvin8G#5!_A;AWE=bT+b;q8!}RgQ0|HFfpc&9uve-y# zJO-HIIl~CGC?OFrJV?w`s9IQJFW~c-dm8Iy#0=E+2qwo;-Jy30RncVNhDKry&yv95=wIB#$ zaX32LqU~*tU)8WS!$XGn$v6F;^^o8Iu06}N~4b^(c3u{O^(A3$G`mV6%u@oI{5!r+)c6JDWU5N>BXMyGF1u zApotC@o05p0dmRg;o`Htw1sOLb3|-~yvhOe$rPqD4_8=x6=~oK2XhTy;qbN5G`aEx zSdWE*!zfM6asr+5AhqHC_S8U@rU#Gik%?ywn2{Vr8erBVEFdFSFCa9*F#qjGX=-XU zwe=ClM#e7ZwQ9d5UFf?+wyt)Bm6izbE^>y1LGfh%#9gM2cn#8W!k2UqL|cKY9DxMs zU|n}Q1Yfn{XzFVXVn#34!{K>fJbuW2c!=ew7r_CSIs$L6Ub}(o8^`fJ+U4vGtZ~4p zgFx;`l0jMB3Og>=qzfF#rKRG*hr~Ew$uJ2)J-FQkqyED6E9upzhnTZ%&ZYP~-5uog_jJo`ut9BC%S1Dlx zqH;BwjFT$CWXGsWlML4ui+GbXVES%05pC}V)Xl`3v;6?`qRZ5U!Ybh>n3j{p~a4M7S4$1zZy!iHwuz&n`< zI6Mh~8^75w?7L>pzya`3hQo2^vHINaX)5Ws_z+pC6gaG#8p_YuR~aZ7FiXNve9J}nnqlL!Q7>&fX5lZMSukJEu*i_g|BIWxti!S9@?SnD90FP~ zUwOQd6^Cck%FFy8f-h>vY{dE;mt#~@W>QhElA7|6@JcUJ$K9=i^^~HD;614^e7%UoNv|b^SUm>heFQa?X z_k7pqz!%!{6=SoE*LXYM!CZ0AtUJy<`%JWvtIx0xW{L8?0{^+`^B`ZoR;oXK&_AEG zH`Ek3=yh6+D^Z5Nx_O4ihqC24i+lX%lv-Z8WL?q&Yx6W+=SVsl7os0NWnEb~)78+I zxGI%%Jvv6`Kp|biZmfV=f@J>fhXRHtVCpWUw%_rZDo-Ve7RMM3<5l1g?txQ0BV7*0 z8QifRGmZJ%wP61(>q=2eEiD)RdH(We3_^GXsCdGG^Kw6MotK{nT2UV#`FVR}-7#6h zi<#$Hd+L-#8v-ccQ<^K=;m@*?8);p%Zx8*-J(V=#qJO@4B>EI%Vq94O5`T01HX&UO zK@1r09c_hM=Ef$0@JG|k*FNbfIsFywK4k)S|`*a2vN4w8pq)DbSr9$n3*lr!}I~j|pHY!c}iBAYGxEmhg5EP2K%)a~_p@Je9Pp3R z4n9rCocOo4wvvVh2GjT6`d0eN?Hj3<1+s!!a&3Jlz4FaB(x3k8|2jSMd|!J1H$P2( z{*(U~Gs7wi>}vY>qq~s@bcY4+(auUXUF4<5DIEVR(wprOjEfm!34h<|@zL~?zx?;< zH}AiP|1UmCyd@eE@v5G_;diCl{*Y^r=nKfI)I;^0lq>Ioa1V&n+Fgty+=POLb2D3m z;#LEIcQ5hzP%FY#mQ`+Y98yTBww!IOJy$k6j*CJH=xTJK$j!Q!y5KU0hl)V%)ZKw+ zr1;hwFMde*l`!PCrMgbKyHxDVQEM(5JCNH!=~+1@&FE4M`{e%RE84U_VlzF-yC(gi z{_-o%pQkPBEbp|A&0RWFx9rIJUR2xJ(k|wG{|uPehax|IG{$alM1DF>#GqUoJANlo z#5#EIpmx$MBXxFA!Fka?iH+T4c$u4}pJnbS_s#M7&s{?}(-Nn#@j2&MJfQ=DBT5)@5~*Id4)wbA~KRIR?O)MmgDJl3zxarl^AqEfHJcD@7prTz$WC8lPQX>Y?# zoWiNthK}k!r_-S?@p(@NOZ`9-k@WM(zjA+8^7p3(=_mS-L|4#*|1Mh1NHFLqj*IV`yPFBPGdrHA5>=x zJLgrim2h`m$o+YgrZ!1iZdOx@T&Zk7QKt}@+qxEY^aae2H-T54$2%(xF|N{lf%4gr~eQhzov#GEq7fP%oWVF&Fi;8UVA zUh4$(tLGJaCIL_N?;&6sDTF=--I%*eh5-7OU1iB2oUy84lyT zRfFRh9t@akNvS|aKq*0dv|pw_vHz;*g>G+0ce?e$^C5*V&oARovkC(t_lnc}@;nj+ z^|9W>gJTX1(G0Lm-pxJQ3fzftNYhz!;KA|nUd~lWoBJF`dw%c&CLSDtmzUV+7l13A zKd=?v4Fs0hm#Qi>PW-8s7k}l%>zm21*h9a8vr<9bQ485uYEnC=TGbu#Cg`9SKZ51e z`7|~$8d0NJ!Xnguivrs z!@~~_aFW)k`w%A5&xJ8^lFlPVZKCR2KjWk}{ENu<=~2YHWZBrm8GMtyu@6S7nP7rV zRJ~_7%IhetYIc9yphS5P^ZdfQ;Mc-w2dpPjs*vhwxN9&?j(wUQ&W@)wQUV$Hq2$-n z1M`Ozps&#CG*Z+I%p=b8t=-HU8pJ~wR{g|Qmr2l$N4hFoC52;52S%F5eCR)70FTl+ z@HOS4`B&@l-D>MJAO~idLk9Lh8`pLfK2LRRXi~{z`Ps*h(*4h7Sm(8*FX}|P0FN2h z+{An^*F~hCP0UHUY@EAHsvS;zt3txikEBTIoBsaiz&U-K#Zi1+n2+lN=hM9p-b0PN z8f*WRSHF^mIF@-H<}VTL-glt@p5ym3x1T0IC&C-)>xgx(R$=J;0D%Gc?~w9wkKCx+ zIIEu#c+oS|8|!pwg@X+6ZDX+@73GWtt(a+)x(civm)$H(b$54f`oVYJLQ-5wcOO1X z6$Gc4s%m!+e>=HC{n?@>8_A>ApVWfbG zDiJWE>quJe)D7d$s8hy;OO?KS{PNdW?|bR-7n4Z! ziS{s4us-xVvtL*R3B)0|fV!3ta5ll)$;k=m05d}Y=~F#{25qhFsj7GI{JYslCaZ`- zZ4c#?;MmhM#CuausE&cG#q}bHA#st&Lt8@9Gq0U?(XtJft}v+*D7n1T zRj4}Ri+)C7hM+x}C<@S1H!sG>pUjU;q5bgz^GcbLf}t~9;UW`~?eSG?8tzPGrrCu@ zCwR)f=k{cVsW#vgU%-4BA^j`n=T@-I?ndN~DL^E$5;LRyR##1(0MUT)vR|r*Tv(1z zCcyr=q4_GSmEFK4fWB(!o6gCOAvSHh2Fx9bn~~#oQ;Zy#FdAh+MIRhDQ=f5e^&2SX z7o;jKI9?z3hcEkL8(yPk-vK+v?AA;FXjHgmvr;w__}e*lY~`2zkG^stn-%id%)=)` zLPIoPYQs@5=RPf7`>cKOG#J&qjMW7WMD)$qkY)}A!gr0*x|c#>Npfe z6%b<+<=HY$`a9#M?TI@!U{C0i{p4A}6k3=)G09Myj!gi`vW09?yvrHxxZhnQCYsRO2)m?~&Z zQaY*&U70N{oC36UwL0 z=Wz=MzYBlYalU8Wv8JPu3_G89&Md>-(pz}Mh0^bG9pyHx-|rF~KkBCW7wf~Zao?^uYK5=Bi|DLSvwn+by2S0kwU~cyP}*vFj(MYP$7#OSCN@9K z08XL8sOn(xMVvNDI6sdAH^PC675mbHAm>2>&Vx*y%w5#&b9^^foa1nQT{q?XdERVa z_~rgrq4Wv*6*V{qj04>IeL6#D1#rN~0#9HQ&+uQ&9rgKe0QyV1UA|<|z*q0bTv?BE zV1r!a9c==^OioMn@tkF(O|Dl)AL?&8| z;W&(~%y$h(uMYHF2%n*Q_wE5V7^QmD;y7~e;e>n2#@gJ%0UD%}+1;j2Hvc$+o5X`; z^x&`x={`&-d!DNDI-I84+ImP0*OB^31g@Yq#b)e*JjfyRr!>Djr~iWUV4l}Koa=ob zvCd8TQ44dGw--I>H2=7FlAeeZHHY|E=*!c>roI6)u}Pla?ai&cze(Gje+2vL8lpi7 zi175vN{(7}FV%Mu1tr>GoblW60cFqGm`8Jn+)ejjRepJTUw~IIRn3 z^d=5DsE1+QGuJ2c)QC`_=GWZm=DQ}z6eGg7i-^vh1xz*w8BH8j>w4TH0`?43%n1Tl zOFPc-nER^maj7{WG8m?^n)wWPBR~W_1T@X;{RTSBJX(RNL+Hb7(!{zpr4(v!%5!?% zFnk&jw8C&7m>rWl*8197O&=w{#h(om%f)AgQhRp?xJ_Q&r3D03 z1YvOH6f=|xz~&BwEO1xF=RSU<%k%5$jc^Jxxv zYD_DKvjj6#rIGWO(sQ@32QPMIq#q%Cj+4)(SfBXo;BQoefOt~7fieY}i^l&9MMBRI zif|8A7~M;&Q{W$`^_{RDcK6Af%Tdx=P;Ni_A}6vvL-qlVDMx$X-rkKhU^jj8$?w7+ za0y?XMSQUK*n6e>uYc6ggl4T>+Z$oUzIO~MUDi%rX;jbj6`5dCFOjg3|&-3>Hqq3>ZSaI9Q& zOf*Yz#ZrfnHy#&e2r-1i=O;l#95N#ZCeLXZv41`r&qEB{w}ex!$DIc_QN~v=2(+#9 zrTNTG8N27Y&rQy=IhaF~6bw4$wKN5Dr)__S#V4B(46pzOCZV;aExOP3(s(KBc9^*i zzSD8$5$5CWXj}&C_x?6 z$)}o<@XfIglr3-Mfiup7?X%GBf56g-5l#S1s5ATVoUPeb{G?3y4#^N^&ty! z#V=7Ozh&gSmKi#Q>Ri8Y$j@5(vnCEA0rwrp@$`yE!_0GFUkktEUm4=Bz)YsUj+lG z#@n}bO9yIVK66xf^pA2XCA?>^r5iCX;C~hZvLLFJf-8(edPu$hKU44ZWodF8_+{B! z?w0pe-PPVrPmk&G7y|($015;Rc*xy7;EH@$9`0F?T8Y}DSIvw513l?Uk2*(`IU*^p zmRR5r1VD%w0@E{Xr(JcKF7MsFW%c_--fw2Und*D*_vM!v85tQF5g8GQaOOREbykpS za0@o`JjTddjw|K;6Nk~Sm?yzBEWrDWDe(C`e6tklYwuWvRypCaPUOLW!EqB4h4O8^ zt|_o-{_GP4DYFmXcTPL5rf3T#06fJ#E9bIq>0O?!A*qTFkWqk$?eTd+K$>)jJv!4i zZOA)XFryj8sehokiibPTrc38uU``yRXVXtomG!H&_Or2*m4Q?le;`iE|iTh!-96bH>< zfYMTHu4T)Vn1A9(W6~yx<@GaKMG@G+|0Z_Kz_|yG7|9!cw)DRNr|%<_0h?vS16Q#= zGp~9_5N5HALuL35VWr(`$Dz0>z52!nsT0fS36A$8L#G)G(v6U=+D=`T{XE(jXD4Z@qmZ z-F)+A`o?>&AzaQOOwN*;2e%K3YgjVP(b#w|0uX)lWHl5baj}g>v`XYArhI4yBPJ2e z9v4jah^oW^9$~(M#V!Vp#!;oPZj8L@Dx60!yHx3^L@NAHkMG-Di~JTtr<9lFDSXJs_rxp#nFn8+Scm< zPx>OE*dpY)Be2J=(Fxxj1`U3-97J>`2t}ljr!4Av?$mmZlfr2P(4cj-ZUTwGqjVUsUYA z!-qf=<;b(XwUm}=V~CuWb@Y7^1x8+-7*qYij_Y(#pk9u_PAIc;oHX{! zpUtE#=IfPfSJT*#QTOHpEwP(p4qG??uqhAq_l)4E5AD|P;p_Hd1b^JScZ3{fF~5HI=vhx#ze; zbP|`_)84RM7XXh5-Qh$E7dzpW=~4|s$&I`7?ggU+cGFWe^pt zm_R&K2~nVN!4zEK$<|tHv67585h=qGb>c6>4Z-^kzg_6~mY;h!@D4oU4V)I2?^=hj zE93+mf>!%4>gIjx_wqBoi!?5Ugn&#K`~$V(Sz*vVgup^sY0N4Lg#rf>3b=!Y3wYvve$;xGKLy110nR7%MMNod^bwe% zz$uM+KXyM@oE0Ed++?tYbF@_l9#FoVx6YB68_=?N_nq8^$l!(_HF&%q;}z}K=!@^s zry7E+!{6eefK_wEG>Y~`?vuw;opE6d?_h?I6vwnZ)X9b>A#6!sDc8s02cIvh8$~3*lU%rIXlqwi%EF)rRBT^UfX(YXF zd0v+DJztfl2P}N56nN9;;z4+dd1!f26MYf(_?_Fx<@3F)JlA02BjYpU*Ey?lNXZ&a z)yh$E2>r~Ftl-fQ;~a(|4H)uJ6$>iUN7_%zQ%!ugeTBzI~guVRgI>^%~=)ZT=3n$#!2ZS?IX zE(>7+$I374<^^=;DR@64AisAD2gD=G<<2`sfjTeV*o~C)4d)ciCyc z;>(Ug1Bu;tag||{B>Fo_bmQ-T=exM-45h~M>9mdz_ju+Gc`Mg}*Z>5SQS^=yLXra2 z5j!c21c-tjuqeAzZ^X1wwL5$GG_OBNbNp2r5Yp%H7f0YU$igw(t}dt1Gn~pvN};_i z*IQhQ3?kU3C4_QMic*GPLjvIpI>UnAf)F-8scRKheS$p>kR3evtlTn!u-SuQdN-}% ziqX@4I{n*!{m1FO_ufsXhC5k@m#_evkGT|Zn_^0z`8VIWneKmjpGfPyFcR(}8;zbF zWSpc+g9=R4ZzL}kT3j`Bt*&mtR|#I2dV;GDW86K2@ejZJGWa)mWS0PmE}~z}3)%$S zyPQVlDQ(SIaMwv4Gjf?ZkHuHMV08LH1>+5V`TOix%s*U8+XO(wk;(LD8^8Q2N87d& zu|3k)N96l7ZXzl&oEF)K3s^7iKqy8_Prwm0=+3a?qrz}Np3tQ^qMxz&tI#vznVBQwv4jz5_3ftwk_P*b$>sq)~B~#f1CJ$<@67~ z{5$kp3=8Z~&9P&owcYpS)n;?3Lve5LR6la7?yj>V)CN}I$y=V5$F9-vS%iM(*X$#5 z&~pAxGsjEoaZ)BX{Ga`sKTTK8pHDyg`~R4J_zyovslga9$8Mh~jqo@b!j;e5yt`}0 zGH~=a^WeY#@V`sfI0WKv{^l>!Q}PWTJBM9JoHHoGJ$R3-ae~^Jmj>Bcc#cb*E}+e6 zV~&#paa-$W_tD*n19)KX_yFUObty2s8-oK{L*4Wr}dK9q@SDGC2z!o+upRF%oQ zfGA}8>2rzBNfnWazz`D&EkuSJQKx)vBTSTtY%{l=_3mRyQE*U@k@0#j1Y{VeR%wO$ z9O*_Gh~bD}76nf`DT9_M6gEtq5sBaxB3V&YF7&r_fUMs-ea{8YXR%1YEL_YK{TTxB7}h0))(X}xg)dMH9L0PCiu z+U;Ol!L-@_}y@~~49*r9kVk-+v1GwxZfwYTXh3%pZws%#`VrPXq#f^)< z+xymI|NPLG*Gr9_)uFbq=TGq%{bVBL@(Oub@$-rO2-mCVjU#3;>gT>ngv{5Qw&Onv zHfqzn7z$Jtpd7a-z`MXip==T46~7CdSdRPoSKxtnEFZY6efPIl)W(=9^tf;YJ;IQ! zBfFKU0EuTx@=Bo?6!5sJE09>Xe-=^*{QSA^@qbv|YwhtN!2IK)h2=w`AOP_n9@Y97 zKPY3ppn2unQ zx|th}gDwGTWbz4$2VTUDJ@L;g`pBnX<(-%Sur?N!pbQEeJ>{GIupI$MZH;Y z21Es4%7!pcpNbakt5+_c;qkW%vh?p|nOuzez67(+90!H0xS}5gJzJ;u^Y5r5o^#(h z2!4WoC}Ufu-tnI0oy!G{XMvkKz@6{OtL(S$I_^#?dBFfjSyBM2CC;5x(}^`%z_m0g zKx=(vKr&46$zwX>*07PqL7%m51U~~3;kR${YVj9*QV_WxMgZ`W^EYQa<#C_@a5?T; zvK+r+e8oXL3x4F-NoU^l3!gDA`6mT!#xCUdt~H z06214$ATjKqw7uhnnQI6A=>9AVAVg^uvy5oTd<*xOsm;FonE z>Sc~GPln0oX}X@St^v}8^bkylr!)k2=?$&Njp>m9MUm+fo z7{Vs)fJgZ`fCg?H|KQ22E#{c)CO4$DZIQ=tmHdGp}nw2)} z&>%Yur%w%mljbz}bSnCwHCueHnkXK=6=k{M2}1JA(&ATAugTv`8KH#Zy zgdnq7{7e z7q>ae7hw;50XHXvK&|`W&?xf~V^kjSgfCJbffw09WlI4{c?CYtUlTIHG0Y2ZnBZrQ z*VYmODNA!F3f~|J+J|t*=x!&7bon_x?|pe#|~=T$sxjPI%43AKxKsJ>#6REkG?i8IYo41(Be)XXdH9$ducSL7Eswce2hSofq6Dr?;x^Sp z&QgpH=ZH_#9cB-IdTG#DAa}r=Q~hVzNgzO1h4$*`mGpbx{C4`_owu+sBh2E4Bk%MS zL+7n-Sm8ASpKD~NxdqplO`_S|(Q1O;8sk(x(lw!|YBkq@q#L(m=6qIee2DO`|Gs%t z-T5i88h!sFW*s)IDb%9Y!5)SJzI^41{Yz3ci z0|Mq)+$n3dIqeN!d~ zegHqcLKD>@1cL?7AwUQ-uzEGncU>)9*z3R_2##HVj6eN5b7kUJ;^funqp6@`0ghi< zuwsa!j6VO~0OJ9Vu!j4Bw8SNSgzzkWxnf)>9Xm(7tHnrxNBo@tbEJ&1wx~9bw>cPn z&n?*yK6z#z{R>46`imVQkwzb~5H0+UQ0&-};6a<8IJZp3>rmex`G z%a!HdwjJfDSNbh{%EglmFsH?`-VA|?b72v+ZJR8FYU>hjwr3h4KZD+CGaR|y_#HKT@lW8$-hBzi_!=Vvj~-?5ch1tv6FI?osvqnxa-Z(%7m(fJ92+@=tZrB=a$@Jr@SYftMY3+En12oP7>Ga3nOA_|1*Q^zhM4nw*+p?Zw9) zIGRX)ZyV-yJT9TE50G>no-{W{{>=F`yd+dIpgYE_4@F7KzvFaZ00=bQz{{Nj@Jig5 z4hZNiVc}n7hhv7=i!lOz%RL$h8CzU^T8Rh#|NK2XN)t#+;-|ek37Yi zZE*6wb&Lt@uBZOJ;JUCYAWt3-+BLoNY+1t;Mz0T87^e_x}@II z%nyX|rDp_IZ$fuGzi?_KjhyKZ`dnl;;F#Sg`Ba&}+47LPRVX2}v9-9DUcPcRojui@ zR_cF}=J01f+-XK(ljm^200#xQo95p>u1&hQ8B=N2pnAqazH)TnIld47;yBr#N9GB3*4v9Rr8Owhw^z7WPY_bb*3zsbLKXIuijb9l^Uw!{K zQQ+G-;DA8+84f>vKqYTD=6YWuj) z9QmE!{=L-4ZpYvK=O3jxG6v};T~!{&cfX~HT@3o!%#J{ZZlImLX=!FA)pz69e`+T^ zdGMSgu5~A)|ME%xQ?~U4NnMdTJ4xcvew?noI>b&0>8iwO1BRoMHQWTZ(;9)<14Afy z@Sj$c(QZzy+hWJ1Lf%nbgBnhszVM+8S>GZxOqqpN9|J2D(iNa$OFFC?2+ateun`8$ z^X@r417RZanL&`gr=?r1Q~@IgUqQJrYniqaBDMi#-Y{6VHX_pn^|^TEEJT!|sFT&x5lxo+be>ha-H$rRhh>hlDW*eCm7;Hz#MTJ)rTUIV=n6JsT` zOun$_^4;f75QS<9hdc5oF7U{IPE_B~k{dYWNhn!7Yf_2eo#=xv(zg)qXi%%8@KQn4 z>34Cb0O4hE8t0x<#4$rajpF>GjAi4wu=1a-Rx}Wd#_{&bf?%#!IHP@V8*UBY3`C(Z zGd8>zTV(W03UgZ-i8Sa<#WcpvArlV9A`4wA1OE1bPx8fZ6u{H9+?V`sem{g0@E?Vs z#i*0_6@UW=*6Gv0haJunHcE2OhrAX7fcOs_@>5|hS||XCBEaZ7@gzQ?UcZF`!1#;T z$kj)?p`=i`3X%aH$q{M;J7cYy+RT2p11&}fUVZAXfw({g*BXo08IVz9~$62u*aXT z`swpfjCkgFc`p=FMnAtIEYc8jOE_sa6yjPhD9$^IPR?hkGhcPWOYsaM@OwMP*l&4U zqa^LxzGGUzAMnvu%#|7(A&BP2;8*Ud<}ry|Rs`@d{{sHpS88A(A|EJqZ{i|ohA*x2 zZCL4q!0~Hg7a(>HXwU8Yyo&%V03L=4?rXUXK1Unb*g%c&v*3Zi9`qADI^#VnBhraI z65q}b+*G8UnAhUNeuIfmSY@z z(aR62la}P+MO;!=f4906Kq<$x3}Z02|yBYWfS8uwF_Ae1|7hPQ)VU*Buq< z62U~}kroj=a4*5?yUTiZKmep}Iftw%L1)Z|sb>!{(tL_wI2)F8$EOpeV1VOpr|`qw zM6mG0HpeBD5`-JqH4RP8C=qStHr!Z7)1~X@(pF^&%PKo?z}JR5fq|u3P|YW8;N$4P zNc!OBD;!?1lBTd$?;@1vc_p5m>&GYy%jwkEP#WqRAZCH!P!t7s1Iq0^tmk;LqXa~z zIs~UO*7$=%+@m-&U}*ee`iuYe`&fUwNJMd)b%AVuBy(?W!7YevL7uv`jIgpag}@g< zsH~~D7VRJmmpDo6`U?|S&=;|)?gkwW4vnOL|0jQlaMzgDvCQu7ZxLiWjQ}gMHlplK|aWG}jd#eE84O5s@;A-Le_)i8`P_6V8^bsgY#V5BAW4D;8GAc}yq zf!Bd2jrENZmq@~Nt?tK%C_OXGp;+&5b>W1npfv`*r=J5-7-xA}KW-@JN6v%{asSa1 zf{$^LAkf>L7zmdX6$j#qzIZyN6( zNI&`Lmn6h@%^(HNCSFLAQqi4v*LslDh9z4jv<8RTW2qtFd0Gin&-DW+) zs@DGXRg_W`Hszkidfau$K&A1jTxK_nr21Q1v$*cSPEc@L827GGaDdgl5#e>6gEu^0 zJ9YzA>Nuj=U6;MpeFBeN2MEAF*NGxsk4w>X`s@Go2OQ!1|3S+wktrzFM0QX%me-jB z^cVaZv$I5UfC_Wmn2Lr50*`T5`t0eg^pBr@1U+!_US*kMe95Y`Zy2AgABVX$) z zL5bd527a~@O3h_<$(G?^N9<_fYR!r56NwxOYa=+uWz3`{V1Xxj-R z4>n7Tg0`vYDliq9v`oyUQ&#E%;X-R!|82xdul3)piZ&J|ksZm7ncQ9q##&NhVT?O6 zY5t>}b-J~oP_KJ|9rLuCB1y6tUu53?8QJbyp(3DQlNT=#!TnfgC=+l|Z?QNUEagMr z_a9;RORk4cR}v<<3x)!^&!a6}9xPj1P=cZJ-C(ZR#-{=WFzXS6A$V99cmsU_n~Sl7 z74Z$+)a8oq(eK=z3|$2ZaCwb;;-Bx^Xz}+ylR3>ZiO?FUH!Q9&x7eMa4L^M^h=KML zB*k4`h$W8P59_lJT7F|eY7-mZ6aFI|&0HzU!bw`TV_rBa|{}C*T4_dpOxnNtS4|; zgV*~MjlSq(ThL^IbKmz0IE0rra=luQGZU8<3OM}q3ZabNxXa;l@#i=x-1$tQiOxiS z^LW^X^(*M)akEW@M}tn6msg_gP}XWRshgjQv^^3guc9yFNaij0(qGKi=nHrgpMY5d zmESoLfWKw(wxO~bG$lQ8*;c+X4)U{FU9g(K3l+Q5rd8cbdKVj(&6iuF&|Ba_dUJf4 zj{%50LK?~?H9&Y3?dIFG4NkaKGMdP1hY`*AMreVPgywmrX z+rX#wPFSQ*X)Dtxiz8#_9ATCKMs7pbm9f#WNH8z`XatbYxe!RaG5@7S-YqQfu}ew4 z)~CCdFu^HpFzUBm72iCD@&|F9=e+l#ApcsoaPU94w40QRe{(&V=7c{ZLM2c^J1aio z%6>{gdg;X|jh#j8t}28nEy3P5sb!v5I+mrFhrSwTCj`JT2qeYM%0CEQYglldNm5 zQHNVBZ1S6?R8QJVnoB0qSOhk(AQ0bpWpeH|2lo;;^ZfZ!`+Spg!TI4rnIq8C5~Vzz5VO&r=AhU zp?(oO0VQ~<;@(l=Sm@nFc1ED1CcFiVODOksqipmLTtdHM>8v_lTX@lGlQ)&f}n~b@2CmW)Y>dKE3+pH&C#kQG#|GT0LkAVSDLu z`uOQj(^kt8fwkq-j9b|jC$4QQkq&hc;haoCgA>M85TCFMZ)WT~GCFvb2_b>s)TEu| zVQ_KG&J0#+&J0*sTf>!xbh;RJ!+#xDITdAh6xI>q3yt9#YK6^NYA~}@q+`R|wFLBPpU>fc}%lP%Lsnqke|r~MK^EXHlQ`#3f}h=6a* z%^Euk%Q1J%kkpO>3Ac;+q_ITemhUtYQ~rP1F4=5qyl{kQC}ZVy`jvyh`E7ZpWa3JDIl3zcq3(M_5#=e3;M zMMG^_ZtueA_L8nDE;PR5GZ!Ye^a{Ug2hRmaA;H)I*ohzogFm6YY(eD)eF4Amekd9I z4nY)D#{vz(iK84a3OEh{Fva3StpE^pD>YcZgWJ1`JNX`ddhSA zCqcOQ#L@}xl7U6gsc-{t&cvdSo-+yx!75(OCbdP(Mb1c1)g&-l}75>DrpZ<=q01nU*c>KHq3W#&Zt9W1L z;Jk1__uWDmmqzT1!ja6hjXB~ovjc=e0j%P{z7=f-oQ$9E6;~(>)L}YGEu9sFAR+cW zuRPv0+T(Z39fefzVHxL~G!pGN$9dQG%@*cd3BPpi%A|9dT-I`dFZ$tUzH*y>&+R)G zP;z)&%!#6G#ztTy`d8qqcvt!=aOFKq+8244c=1lqG~*RNEfmd1NejpOJ`-OVo?4-p z0~trYlH1ArQnX1)xyAg<=fWMdZ_~6LIP)9yAFzuz$2c0y?vPpV&4PG#1M(0Ute?1{ zh=1WVo10l|Tr8~D1=VM^Da^WFYLtm|q44nn7TW|7vG9uz+mbhAf!IDP_}5^wQ#>u` zCdQsS*5MdOj7XI6J;9=G`kQf{>DiqEe+#p9$A5lWwe7|i+w%#qQ&{D#XfujY#@I1= z!{_tFv2kL1`9YbY?=fcH6P$Qvp@1SjrDgrEb#2lZvyau^6B|8}S<9(&*|t0Y$bNQ_ zDaCYxM_BdtJW80kV(LhPE#H0XRZ=|nq}%r&l9!)APf~hrtr_e`P${k!k|RvPOkW#!f{((|MHsiA|!7VY2-_nT&vbFFw4g#T?^bqsK=vTjxp zW>us+N%>btYRr97ZU!#s-w_83X))g<7ysrVLL7qQHa_$U%BGGr-R};EOdV5CJ^t8j z9meu-yeK~Lzx~bk@O|D)|M^EhOm~0f&IT?YocEKONj-?~i8$UOz;TVhM>817Yu(K_ zf8|WNdSfD;A~1IctM)vW&~Ds{-hbm#ny)@c_g9}FRC7ug{_;~Z1T5~Z#n^(}G=L@l zJeg2VkBtKxE;A+0Bxvcu6{;JN1XpArs$!k4RNM_A7!@~(dV*pV5EXhoE_RtrK4yzK zMq!AZPOQv*9J|{=mLdZ)H*ss(nk69#N?oio5*GjOA;{O^#?!^v)#Drg`0jL?oWu&u z&V|C2N2F`W+~t(DR)S{R@u#kAll?{^j_flb&)^z!*g)(AnlA(nRI!E}} z*TIYDIsajMjRV2D4fuu-87EI*bj>dJV#vb+U3aLDoJK?932x6&#jOB2d{>?aX_;+K4ltT{1W6R@02jl7!y@&9*ZB zvm6~qb$nJu#4Ud?{w_X=g>7ccX0O6ieWD%;zzunldPO>hU615-NU&eLjt@r;$ zdiU*D(=5ASQ%jGCkw8&u@%UU^f^fBZ^kA0spR`F}dq)?t0Ri;$Gn`ohOyxmhKyZQD zT!c>{1~t@kco+)tMit9CICs965ys`wy#yXVo_oqXs-$mzkprc2H5F^k5=UtG>_u0M0^w}*GV%%UZUh7L^7s;?j!xO!y)9cq?VaKcw z`33ocF*zUzczJr3xrU1yV^dea}>ypjtO)YZ(TI4|nKV`0ErPY$r0C@{7WM4@(B_;p9{6hLZ0a}ptwMvAr& z@OW3ItzZ?FA%#|c`@VkTT6iUB-(k83QZ9K-V%f^gP2WX0bfJ>zd%3ef)gcV%-l2b~ zgaJ#SxB`z0s7%4(%OaFS7H!dpI1br{x?JS*5^l|QD33+)Sy+^P>b*F|mv8hwirZKJ zY|G9D&f{6G%i9brWyLGMYK&Rjiyvvge!I9S7)d|UQx@7#Y#bYL#Gk^8gdbMWnjqAS zf@hloAU!Bxh(GBgloxh86bKy~@4Gb_g1InrMS0%_HorTzTG+$79wnd`>4@!AKdmGB zZd=r0UHP3{U+onu446F}C*GiLU(R!=)}FX?jsc&7hg8*sf1pJp0~Bu_!YmYEgw6jN z@FNyK!9auyc#EsRlb_ZpMAlne_De-$o1kD15z0!BPh*s#m=RR!-Gx!kZ=tJR(aon=_;6o9IE^EF-KCb6JwOb-z|rwW;R*3cTBJy|0yVlyYrl19?(C-|v1GhdnzmX8tUOpn-s_rOpW zuEd^#OTcI=?n)*U>Fw^NJMJK~q#GBn;WtlcF}pMkWH8byuhmk4`2Zy+)fmG8ljJPkz?GeM*GjCeFB0PX4a`2Gdxycw5HUC|NG+9Dhh!4@)>u?zGRwA;4}uH z5C8J|R+NxFVk>Y3D($k?0ZAvvFWawfTo7Np{%U&jwRhN^noIwD|L^HTHFXSPT)al$Q>Dm22Ue|V?D2MNSeIy%H^~0WE56* z(cB05wQszK!bG~v&Ymm2~B`Q>l(VK6^Hwp4@&49>>Ft+xXH((x2Y> zzMX>K@WE$E+p;WO@rs02?6#vUZnHR(n1W%^D zt-WP-{+<)t!MPG^t;})Gdw`JIaG64Mq`Z7hV}NI$py*OZ^Pv3mm=vt5shxw-cGf4; zUGV3T-g4I_cF2xVRHryRuH4a*#?SRr7i-SQ$i#;dK6M6in-Pp;%nI^qnyyfF03B;K zge=@OQ6R6G+z=QCw~1{PHMK4@PzTCg1+CU65Uj$tfoeu3I~g57>k8HnA{5fRTvVDM zR4aF}ic?H!F$+JG23D<}x+^#_6_9joh@fNYQH*uba*?pcT&p#MAbuES&h)ht%0|@7 zBRlU$ja$SH0(JSB1Kud>qQ+kclPr26hyh~`DrOL<&xu*I=r)=06LM+K8b5J_Q!r6)zc%ECA#;^S|!csE8>fMr;HFpwM7Ha(|s%SY|C;gE#fx%DJ=XFx6+8h zk=CRbBWTPOQ>CftfBMe{9*K*<~eR%ReP>X>2C!-MA z$I)O7K#&z%kNk{)8Ia(U*g~K}tI`anWkO_2=rUR(RvkzgnT$kw5y48wO6I;O5=DN& zNCH98D<-#3sV^)xZlMB`TL&45iaQxTf$^))?VKx>+feyAoo3R+Rl6m^B@eHtf;K|4 z;Qd^;#Kc4E^MlXOV4Xw~7WH!z9QhE75&jmrkJ%F7%^(hO;~!;ny-_^Z#jDm~RSul> z_$huO&;YZjX=Pk(7m#pP*H@4>mT)v*#_Y#&!1&Ra{NE#ASJ$5-u`I!HNa(%oX?IAurPENN4XbOjO{1vL0GU?bd&Na_B zX3{p=MSUAmJgbvTUM;2tbA1yg)VX<*tTWl#Jihqs#Wc`2!1e;$7QnZ~X;8=bBt;*2 zF9+diYIPAL$26&in(=$-BnaRb?Zq6NM(N6HPw0DVD~kpMbBJ$_4)%9{bvu3h(IX@W zXb-8~lh5ifiE5hOSS5uFejOFe=;oq4AunYn4g9FckLbd35$EM?%$Ma>{ZHD6wj|OL zk#-N0?$HiO;PJ2MAPta?&k-yLy&bW&U@pWJ{66|P^=Wf+DoqkWysBTfpROb%T>h>B81(*uI-*Xqvr$74RKTL1E@fzlGd|-B$ zSUf#N(fNed)JA*Ya35bBeVv=I%5>q>zJing9@3#!0tFK-tV+SHo*G6Bih(}*Q8uz= z;XK}($80=@e-#p)(aWtAYHjOG?alZI&BKg67=W!JtyUbcJe{jkH~lux7r~h3SoS`t<2Eh~LoE>@%X1 z%V>R!??y{Vm+eF4$j8~x+@H>mUQE+ZKTALS!QY|*aGWv$8pq8XV~o63;^2%Hc96`4 zx=DmA(30+oLk(!Vw;!>U?WaHyc_9JAg59OL)VQdZ};)7)b=m5$p9h}lTn z3)}eex!soVl_4U(1^<|-O>*5nNb}2c_yIB(!2K>k7S}Fa3#-Z}AK&JC-Rv z2M?+cQLY2N3H&`dbDtoReGYtxfJ*rlY(`)45?dq_H~R3|NojFrBmL@!&@;!;4U7$A z{XR)6MD%W;f$kvLhoHdR*Pl}hZ^0kcfJ113Up1H5Mr_}Z(<2wqt|r6smk0=H zq)*DNx=j?pf%4MsQ=Bemh5@z@%4ip#-ui^Vfajq-YjJ{OqS50s#$1_sx)O7u2mCdh zzj*aS8MsVR(~r`D5ow-b9LMX@OOY+|>2oX~Ht52DRU#G}%_>UInsLo<7m^uFs=`F@ zY;+@NWgx+rvLeYdeA`u;BeL9Qu;K(y0E(l zX-bG~0KcOR6bn;GvM}~w2*4fz0JTLk-g6NKmXJh{)`UU?Wl6%i1;-(aWiJ+N@8t57 z1|AubuLV=5Z3PN1$HsnIwiUJ71xC$)8;lP#w^paJii;!N{-Qt7fctY{=A*o@#$_RY z*Xs4X;+gFzxVT_DP9en5KYgY`SfLJK6lVdyb=Io2jyxf1uuzvxSwwo|XSAb0Q(S>R zANWl8p;yx{=ra{>WA2cy3Q5%R!d6Jr3Pu4(^xryaV`-hW%dv@esL@Z&?%6zDj7ZV{ zLV6XOxl`619k4DtQmePnxQaH!hxjQjVGp|E#+UmW&xM&UKoc|>ed1pD-hpe|{BpDT zcODn<>y`T>IC(6hUrOn~5YM&>4m__aO-FR0a7;MfzVM3Q(a%K2XLr_~`xsZz#zcvjE+%vKQE z=AN{xRxRH|tcbSNva+9pfdFg^@MryMF!I$V#zJjA=pN?lfhiF|i?v1GTj03R^aC&A z61>qO&}Q67-@;VDE3ebWemgdJKc$6}$+UJbPn0o&D#FwOV`E-JdCdWzt_!AT@wnT41jqw~=tWY=^bk$l zjw$_!v^mG55fbO-zuYFea*KSN6Bl~Y&9~3s;NOcW6D4JlT!@&>6Yn=7aM2;=6w$^g z9txD}%hUsXG-Kv$$LFLCQ??PjCHx2KG36)zOqPCvPm_+&jX2cr5Ja&2oFi}XhjCk7 zzTMVm&O-!MbqAD(6Z|X`P)*N*)idM**FLQ<2A*8IGR|-`L_orW`?oQR&*86QF2@nl zp`3*t+u~%YN)=l$0*`n$Z)8VXM|$z{xzvTwbnCMRX&uwK$0b*RLjhGEF#BhU!6t$x z>QKl8N}sDGA{sre)+pB|QtUJ~_h9xX>XAC7Ao;XT@tX+5+V5L2kq;1Y*@@4_Ygb=L z-}%-z2o{)6&xp3(Lz##*f%?=u{w`Zs^^K%pWL%vGraRi@poHg72x?d-h=uK=6@tCyaU>rc z9Y{k11K@0hqu|TX7N=#QxNWa(rY`)i&YYRRmum{gc2eD(Cer}s;+tf3>_$YGmr2w=c8aY4 zt-qwK>FMn!$^IBT0>uQnZYQ12lP7nzVx+!70yWbk3XTUn{lz0<=ZysQSi1=;>{I?RwIK#}7DI0v|Z&r_5H%D%)j^M0}4=5M(me zk}eFqNdz;&C`$_*W`U_3x<5wwoqRMIK`DE&)wGjNztE4x*@FN$;uzY{&%Kc?quto5 zLtaD=cbtQ&nH(Ch`_2>1OwdP#6hilYHpdadoc^`3j&i~H6Ts6qF_v0T?04PLV~fK8 z7-cs3YRIN+t2GgXr1`&N*lYqQ<=ahcKfVv$HL?})hz9Xb>%fXv4}SpwU2#eTPYe$X z;{!$B;PV9H5V-ThAN)K`-k*-T-7+xPNPjc=zClnGwos-Y%|qGn^t0zLe<*Sb!PZc8 z7X%XryVz)rfudg+FzSAZy@()bwwJ7KgqnX^pcP-}(*7z;d~ zks2Uq+Jyqz21vuhY0(N;Heg0fj$$8JM&d-z!U~hpcH%{zfK2`YPuVOSj@7`YgJ3K| zz^f9Hg}_|KR~-Zw8W*1s{%BRlSNn}^E!whOabVjng1jAr5N!)X;78>o!)-oKC$)=? zcXDAr{hAqKZp(*U+|PK)3t|uk?}}gFjYUu;N21|B5guC>lnH^6-wX@mrq2+{ngTd| zaiSP5PH zU+cHbE0mO4Ls8Zq@I}1X2m5J#g-V4QxLejbT69Vr?dG2T0vGRCA-|{nQ z(mRrteTI?)-vJ^|T(E;~pdH&P@ZsE7;M4@pKk=?nz!IKd`;QBs?Yh5jdkT&6ihzY8 zA;8yQ&Hb=1HX8qY-j;W?l2A#s6EqwB%Vi5Zc;9!W!GJYzBY4y&E_}+T4y8VqzGME@ zVD`CVTGU4o-{3d6=7B;&R6_mE2d`+631pf4%kQ}k>60G$BkozhFBD~6bewOojfpVe zsWkz6%u@{=;EI=;C@1}y53T}VlArXFo6_XkW=Uucp}go56(SC|=+mm)| zPr3?vqHv5FZQ7Rikcxp3;ST=lI~i);rAz^vJWl*Ny!NZ0eai>5RFnHl?eI$52>w8A zt|4)3Juubb+}(_2(8B|s&n{!F+zzX*P#xoQ*@`v1k$i<)+@D#%LP^?_hcl1UW+TVI z;{3aPv_v$mMlpOnuzGfNb*DO!hCP z4&i1KC;SahqQWnRXwf|m9@wiRKPNsWoI6Wd_&zXY9*V$^K+=eF`!3GuyPNnUVe#%D zhh{U*)dqt#;#geQgb<2`s&0(`lM?M7jbPNvdx{Bj#NrZd;ibCkXQ!OUq&L)zMymU_u-azBrl}-e~^R zufF*@g7SL$=)oQEYCjE@K%kcwf@0SNE%&qYb5C%~5lQNGPlTg?|X9MG=kKgI>b0|$7;Y#!P zJMFS_(FqM*xO5Tv*vIOx(>_-HPJ;th)APp*j5kj7IKv+iP_U0*Qe^|B;W@i91aE{q zgc1lO94p)(X``M^a@rbJ?zMT8EOsoqd%GfEq|X1h?|d4$M8_`n5i#AER;Liw8SDDa zGGq8hX#k(3AN|L_Ni)-&K7~?TC1p~57rSWmNulzXb*Zkw)7{uPKyGMe3{P-Me{y#P zx@&-r5qbxRqz3;b%+~I5bQ2-__M2~}K6W^M{?X5QkCa4X1K@ER$A0dRhi$Oii^Q!#!0D~dCQU#@*;D-KFx@kZUoW5o>au^Pv z`(`BlJyISCcPMJ~Z2( zsjY_~80J`oU?j8GMBZQcTA2e$Tq}vGW&@9TjZt8v_e{ER<3{k&C3Y@VKI)7fm#d%% zYy4EcyZ*x2R9D`@|BrN1hwT3Bvv%xzvLOL6%-;&ZE~!z!I-}5%VF~_c1 zGEtemjMu*dS5RT2j#v!%4S&pv0Pw^DB%ItkpnWHei;RPw7rMMSx_qepiDdf`+nL}% z%dHlJaLQ-V3ZWc0y=;$lCO$F#ge6OML{nj{E*4}Nthfy=)OYwTBq1p9Uf_{|4dMef z+tV896+LqWjPF_vc@7-nL7~vPWN6mmpXq<%9>k@h<1UW@1b(-?HRT1EMT0SP2SJ~! zScq%*weHw;U`+fj6!A&@e2I31Cl+THbFRQ8%s9VmAAr?o!WAX*KSh0Cx)ooG_o7zb zu@Cmg`+*6y{)-PbNl$Z6zSn@sU|z(i0Ob$|gdqM?Xs}=2cXuueyw*#3?}2miCA0D~&lBj9x1&4sRqD3AVoWTf zaA;k^E`O1-908%Q-uTNv1^ht|JoVG75H5=-)u6xVZ#)+1;Mm{~TQ9B{(Eux~-BcAl z<+CUiSoHqqFy(#AC`Ek5FT+F_@E~H z5ot9j<09FNgLOA~3H3d+Oww?#bx`{D=hLE@vKoH zFf7bGi(MDsmL8&=_|A)#3LNJ#kN(x@BA$r`{vve|pK*^q<++r{KWLm6X_hM&vPz9c zqS0{)x{624b8j-AvXKRNC@UMxfhz?qB5%#Z;0R`1ge6n={K^&rRF1Uo z=o};xxED(?c_VQXSw|3DA!+}Kxi1l>wh_*DaAjB{B65C~v^OhjY^~$aj-Yr9K#pIh zrzJJvQUnYoqJWK9C8=Vn$Lg)%;I4pCpKH{e4ol?SWTD>Uluayz@*NYcudnS>Cw{pI zDJKZuy#t(N#%We2Xt}!sYp_;wgjC#q_8C8;nN5Fmg7DutgfPl;4MMKJRYORT z8_o<(APAjFfArhmq(5`%pKjg9LTr>NetztFj9rta*fl^%I@n@oU17`Pm7!T@ytHSa zne$khQ1HKSZVW+lBi*|*6T-d%eM@^Y2i=^GQ`S~d0G1YJ5ylYW&Ao}G_kduD4WfIW zJb0K^t4--V3Q8O9Hp3h@{34N~FP%S=E+8!7*_8Ik>AKIc!^;Uy6#LJ_x7$b)) zaWYmb3Q9dVUD#Y<%o$VCJ9(;9?*LK35TXGa?%)Xf;%rVgw1=g(gUH@q1jN|+0`r22&1^?NEW>?9% zdk=*JSa30Nr&Z(UA)=*@dhqmVdi~}b={JA<19m7=dieM;!8N_?9ORv0ck{M!MblTdYg|wX zcsTz?I(7cVw7AL%dBYb84jM(NCDQ>>!H`h;gMay_>A(4}|3iB7jdwXP;KHR3U1ZGx zX(we03b!t5E+nwnU@-Dyb5Q9A_l4kvZ=CPI3n7U9e)%~9LHL$} z1D`8+@ew#s(=?5fu!mDO1TTEL7HBC6OCa^SJ>~dG3tE13W6&aL-+kAyfj32+jw2lx&sl@w%OW&c+aD%Y_;LqRU zxDfiHUha94igk?_@@UAsQ1=d|_&xg{X&xFCxD9d}ynHYCE?l|TdVL58$o^Th65HAJ%;#7P% zUiMSvzX6v%=VP>zfx(i)Rwy{PF5Nza?F+oZHJocQjkxlhleR6Rn@RlVW8jd-csY0d z#TS3Yy@IF2xN;|6;WO})2bAv_#|8c^!f@bhQ+o%tve)kbU4i{|f z=@8i`zvp}OE~B3Dv$VbSnf1yW#7U+L`JBANXTC2V3b-um9m@Gxyo2P$FLx1agD17W z_^6e&GeHC3RUF8#eQw>h=UTE|#VUHtvB6m3j!3@Wg8Ot67WNg=*{ou1HUYoJs1=0t zJ&v#4Wz7;UT~(?m0qdjy+9TiL4sIKD2=rxKCt6wq!As+ zfk9e^+u4cWWM15NI2fR}F%3*KrNMIuXctIk&NCCbSFyy$4go^q*Ixa4`nC7oOE+H_ zNjoRg>Gu3b=^_5WyX;7qJpcXw^2PN1fAg=>#Q9zXr0K{ZS;b``))Us1aAko$<>3ap zDfH=9bm?qAI5^^zvN>ptoeq5Q2Tu=RWC9MNhK-h8TO!xB?mnDQCePN`w~o8Q4nE6g zuDp^~h(4aingjTz~pG(Wza*IWZ1c?;9~hcC>dRgkDLnTz`o)Ejp|}Y_e|Nir60-*_kZuS?Qe%Ox97dq)wslx(Pw&0{`{{+NB<@F; z-@t9gt8a{x+eSOV+a$XTxX*;Ztb2AP*7%!mya|1_qgZVcKY|+>$4W2F;0J%PWf-~KWU4GlntC^kg5Dsy$B(C(m2*iV(sMuW@f89))mt(DxUW#$O!fJh}8JjsaY z9&px8x+HhMcB&k;dpw`+J^mGO4;*HJyH2kb^1+_8zQj?tzm;4Sqe>ThUd&hTHk zb2xqb{jV}dIAr4X9ipFG`9oekoK3&@hpl-KE*Z zJGjytpo806Kk*|d^7_s1Be+YOq6cxB7hBWi8<*Gx;?NMr!=sl&abkYUXT9W29$T#Y zovw0Kb_b5@s_EP-C{pyZvdp0gPc;M?^^1ZIPd4SHe7~}~N?M^M`iG|hF6{0|bdt8H z9d|VYJ2y$k*+^7zea9A`v{QG9I`&DqHI?3a?G=ta9!)>{$DiZMH6PDim$osgSE1u) zv$N^cm44jg>eIdZ&(i#q?uMLd2PCGPDz~}o)&QS9!xlNb7`yFY$^zg)#+S z-3DAFWYo^cBCnmoK?uGs#4vAxYQcZfrGa7y+O)}nlD9=;i3x#0BDHWVHUKVs)^D4! zEeud1!}=AZLI?~27`W+ASVFAw*<$w`*w616P@`9_N z5Q`CQTZjIsKtkkBp zJXOJRVJ!q0U$8y}lKj3dKR&mQf+wsBZrPQ>_Va=qW$drLqz=bG7cb$9of^tzJWDVc zMt^%nUxd`Y@aOHoZ@`u7w+})lmRu-m*aJU#{6fH^fOXn_2%wY-xcO}xU$h6l84oQw zmd(3R!lAGc-Hh?{9fcUn_&uJ~>i3uT^W~>~$>qEs0xb8$PtmS4<+z5B9%Y3c`sI63 z#`^rO;*#&@c5)1`R$HxW-C;fWUA5>kNg@byL-i zrkZA_I6;U7zm0r@t|OkDwTJawA%4Y?zW44e?pu!IXt1HyJQC#vq? z(p6tKn66%WD`GkxJ-nNK@vBb=8DI_&5Zpv;MG3#{y)aT9q!AM9U%q-3nS_(Ya7ps` z-M-PG^xdz&n>w4<()h?&T4SdO@sF`zhhy!cnBp=+>Y7!A(GFZs`p(o5Y>SIb(+0uP zs}buF`B~W&(*4I1IXwlc1kLS|xPN`gdBE-tLTEF>ehbF{AL-9d@N6SH0Okr84+qd| znIKx-i#FL6*e7TkJ1Z9W&h%egeK);>aR1sXFU66@_Z~dJlH3Z7;!cB`$maSg^qzT@ zJB9}s1SUw2^IO0E8?kG$#W*!{NI(ziZQgt38|kn9@4v!#em?aPD`E`IvuEUz#l>xe zW3Mk?Igb)>looeZ!-{{5^?&Izb~ZRdd2<;h1=hcZZ@j0PT|ILSKm8p9_02SPx|dxI z_(t7&`s~>qyh1qjZsjO-BA{Nlcs>-Q?PZh+g!Cg|lvkM7vxKXiyQbTupjum7NdM>m z{(q&-S!fLdStH8!*)wCzwd@MEi?SZ;j=YdGULz9-)omMTnu87YpnLhEYmah5yFUD+ z1w#rX=)Vd+&ZhE+r-+eihpmxnX&zc8|2=7xRwuVmc91JL>%lBG@;D8;Dt#>|1xG0P zb&Ti0dEA)zJlY|R(eZfdY3~Iu^I}WpU3Nd1D;4tAZgFrx1N5F{Xga}~ys+|^ouc{l;~)PdHC(&?%7-%Cyl~W*KP#UL zoZ4Qtz zEfN9;14!N638BjNvQSPV+%;(Mgv8C^Yln~=f{Tj|{XW8{FsL&lAvzR-fYKRdyaJDW zy3o2PTYvmd+p+t>Gx3qr7KuFvVUAjOKioa& zYZg2d0JJPABv2|E;XQF-TL==dP)BRjM18{IW!)+{j**QhbVtB#w8Xp7c5PLsyknya zMD!)v&w_|!8*tXzwC)fhcs~|(e*;(X-RHsq)?>_kSNJ1mZ9EiL^qcy^f{JhxWdJhw z85jhD`%x!991owlZD!q}X!5>-u$TXe#}IaE-yOT+qG;_ZgcCo%ct3DOJJD=yED8bA znxp@;8}(DpXAFlR*8Z_ijD@(eUpy}aK;iP9qnw3De%ptF-lQqsi@sTkdVI&S3f8t+ z(3S7msD8ut-9C6Pmtp*(1C+t?p#&wI(T@T_@qW}E?@<~5XI#)^v?HBZjdex4yyIsW z4fw7gmEHFUdgXNm2FHVcfy26sarC=mLLX{%IF8m8^G#TQi`L^E-^+N(MRTgrJg!*i@J zcphJZmZcW~fVT;!eA&yP4gB(k_*WQ}-lV_S8I>NTKl{O-RZviDQ?E7T%Qk{O=|c3A z=ULHI$mb<_LyT7*Kl`b1$aTVD7vHuKcy%%1%Ge0*$#Z|z%0=&lC6~)ILA~@Q8%Z<- z;lE4W`VgC#{Sd)l?@oEx0bAk*+j^vJBi8#v1c3*)r_LzQ=u5JV*VpnKKOeK?viS6kieJEkYJRlHvg_El`i6=NDF}dUZ z>ea8NZ@&FqEUsr+=m*2CbZUAg%`MCkM9b+`j7clI=KIvUiN)Idk}3fT4zYM+(KC_1 zYapCG)~phFAe-57Yo^U6{&Y2I0nT;=z?}^Oamg0**2S^3RC$^n@7|;D1g(wF>)!Lnm8SX!1<+VaK(v7+l*ICc4&+-xPU8755o5tCwP7Oi=U()|J{GW zqCSffvkon55k5*Irv}5i?lH=HShQKP)9?_vN$I`5=n96?j}}=KU5$(#EloSncanF>?q(nkBd+pT1WrHP{<%_#1$;8ET`+|FQ#)B&fqs) zPU{?6@YS!s27OMUls!kdT_Fc3LCW;qwGnZZ$%ZRd#|grA7duPr5+Km&x!?{!JAG^8 z$Y2lnaCdzltFc*eN<4GEY>>FWjx0^z{jJ|mzyG`6PLp#F(&E#_R8M@c@>L5vKw&_I zPU~@7YewKS%aA)bn;Z(Uf!m4x-jAl{aV?reP=6GH;T}m{4w>geqkYgO?kPJYA*H@% z1ja^S`tEQ30k}k=3`2vNm9XaHtFPSi@Xo{Z^wAV@3b|BK+|1Rv#b@vm@P2%_ohs~3 zRj|@K(VJRPWKb3lQ7+B=WCHx-!^Si?FqqD8n$|N8WO(?1Kwpd)gXB(aB}PJdZefm$ zTkHaT=XZZM0>URB&SRBtirobJ=cTe!hcCOhV_M-}q>>A*xobcMD@vf;gL7gn+K4+S zYw4!%s|eTb7QmR!$1@Bs%1H=7_tM7&bP{Y(=sij1rF? zJWdY?ruVqyU1Bxtn?c>mXddURl2_#@;>$M%P_WiG`gxBXE!(N^-7Ht`1UqcF^Pw0X zqf{Q@j@5AK%JmQ37W4e;9jyHZ*0|sqv8>kbLZF2zG>t%@3r%LeA;eR$B8sjgaV#SM z%8s3>!JNY>--X77)kPo14>Kqh+AJ``TwG8XILkTUp5N}GV*pS{-;gt?$gEXLRLU{o8WcV=*5e{Ydz5E6WdLAY43bQ%$bh6U#GLlU=(GLy3Somf zxgCqK1nYvPuv7?wU$#0cRO`kNf}{Op(XefIOoXWrvg}hlpds6k5DO(t%ZP$U7Q_IG z`m{beYEHDXA?o+KT$`=c_?fcBX5RAkdeRf;} z$r;f+rvPxr#HF3^nFJTY74TDMC|cqRe9BxE62)zCg|(cDqdgmlDs#OmE{?XeW?S|# z+N6F#;UlgBj)2v%pxqE6dFr!JIJoC<`a278&+ll8m+Be|?8cXK3J};9&6Gyo{ z+t0Y9h0n`_)__Zz3>O2@W}CF0`y)L8pG3)h$0Q(&-`wJTZbum^tF!k|FwxY+wpdUz%*4y@0;igtJMJbNM(<6rB04V;Qjl&RKp( z9pFqn%O@RocT_4^h_!MXi0I8i5jkM-jJ6qX6+?ZvO^s;(Ef-1y@Yv=F<7}N?u|vyz zDforLK;gn8ega`pQdnwz=eBgKi@*;HbG_KtFjj5Ab?7+FJlafiPqFHqkouEnCZ69# z#yi|&?eo;93OnuV2w9W(!;?*AYkx7#rx}6`aU&w|^B5ymV=LBA#;kI*iw8snAqF>} zI^1*+aQ6s0Rhen)!~%&&$PPiu@`Ng>5yQwsV6Jg}?qqZ#;I5+N?2u(h{2wAvOiWBb zmrZF3pZsmotR5V!u|_jb2}snySH*Yv?t^>jvj=z6&wu*U^zq$0;25_Tf`?;nFot6j zBLrHq%K|xV5tp${>QQ<9FoBqWP5UOfFz^~>55aC%e`^Hl?xEgv_R=VB9%g?!Oc%y) zrlF4hG(AuDmy->$z)YjCFmA{>`hB;Lw-Kmm79@<7`0aDlK`P;9O?`5u`yy>#__THSk)CTAa~6)fhE9x)RYtkj$e2#tHhZwH4xc#wsi zZ^gyNQ_c)IXd(V!dvPy4xcea8zV{$Kc{UaNsl?9Bz({u-5YWLXaEeRqd`iDa|b-Qd}M6tXIFQx#C5v1^E|OdOmc9(0j8 zU}t6##n}v3Cj`WrWabQU9;1xK^Z8|5kPdJW;vf!y=pV#ItEUxrwzd#j_?+`+l>;O=E}H`$P??AU+C$*> zRJAxzf}nlNwlop%g3_U9i-&GF7IJ9$tlLt(q}ntgz#>OsuS^nag9W}0e&WPUUhZP+&_Q(hb; z&@6~pIxmVgY=K%CK7~PnSlAt8;h>rT*lJy@3jsSs1_sp%0!|8qfJy!KKel1T0|exA zg^|Fwm2wp>IuxhRnK2cMwa>LA+8>#h{m4p`J@(3V*xeY7=yR^W5X@}T=aKf*@6;Cr zL0Mg&{H}87V(0>I#nxWn%TM9rMt+y)wi?0}cnV>hx&i~#ZQuMJ#2!ckhqNOu{PS&H zYzj+c7Am9*g;W28DVn1_#***3jBg4eo@Wmq$Of%l+k`zdW_{90GIw}4OMmg;JXo@129L^{{fZb>eHEbiN1 z`)J>#uP@UBhzdHcv9styp4nDXYugSBIdueXxgRubBmX~DZx&8 zx!Qk)UFT`%VHF|L*N^kI-O;&Py6Qh-4>KTy3LYKQO&4tpe25V=v>ek#KjXRovMDBa zq={Y%vQO$p{SDu!Lcs>n@iMm~KP^A#>iC7JfB^|JQ06|%3xUpT7VlTo6O8;HQsyYddVXT@Q`?WGKti=gz0escFVZYg&g1J0J-B<}yyo z3o`Akgw-5^dBWxzhrZ0&fd3e#r#?jdf1-o%Zew{b-TlnngdT(pm{WHw zy4gX(M5%%%djE)HNjXhSJL)|&t=%vV!m75mvNm$!>!51Cie918p!bPV>@5> zLaSn z)I}Wcr@GR{AzU_Nql$Cu-PK>fs3X)sF9jkQQ$`@?M;xd58PZXv_~}2PdC?Bc>>5Yz zYL=wU`B=+%+FM3gW9P)kL732E(xpPJHB8Zrnz(#$4cYkJ-UsibyZ7!u z4_tJnfir=L=jBPMnL~s6s}IlgYN(0obL#?BHyD0uSVl(K3SA_$1nMLeURl zQn$nryel|W*H>W5H{Ib-$VEwkkP&ls2eifC!u8D4$=W)O)1%v*e$_RcE}puS9^Sgo zZiJ4sZ3rG)2(X-$K|fyRNZ@U7@~n$yqGhR2?nd|<8iv`1ZChE<`5S?k^C#FULNJ3j zW6rW1cXaH_wyV*!c9uO0f%c@W>O>(rKoIOwurhiVG!Rdh)pq=L9A6$a-qYSsq^2px zBEfK&oA*&j+<7oV_|)li@BTe@Bk{d~#=*}z|1!tah_ zZ-|ri@EcfP1<#$>ZE`;E$G4>g+V3LBI`1izC>&2e(~mEUXF}A#`vm%Kqzl)k(|OK6 zSeTnlGq>-f_<(1J5IRJ+TzTf%8$Ij<=$PQ7Uo{FYATqQ&8VyCm4+G%b4 zWNpnOQl^m+m5?sHYGOiKc4DT3#JSfXs`g2S-$l(lyktVpaIB1jT}Dzk<-(gyeOv}; zGl-TKdweLP%Pf!M15Qet*4!d2NpoejI*$X;U(Ro%vd$!Mdb5&08L-VC1BkrQ-qwS`Cmzx?m>$a%XP-nB z#0yBKrAvaL*8wpGws<_cPV^9n(Y2V~D_y*B|knAg;nZC4eB<21KD6Cnb;WtD`BQ1ZyMimQ{YBng}?FiOfeg%3>>WINLTC?~(qw1{0UGjo-7iXZ$5`aYo< znPXh#v?Xp9oS!&;zlLL8@hR!;qMyfgPE(elvcOsegDM`8#~Cr#C*a$@T$%AFDfM-Z zF|D;Rsz;oXNWDYAW!@B&(OryqW^QR`n@C%6lFwGymRjM&#^U55P13-ff*MYSh6YqK zxxoSuK^CHoook$%6$qrEc(J1?aVf)q2mzn>&Tk43U>MIV%=Gq^h~N_scpc9jqt?m) z($cxYtp4E(*pAHzDahQfDIXm(sAmdEAFwI^D4fR&#@jx_yO`5}DP^U&d7gZvullSqdmP|+$Mnkt^bjefN;DTXttlT3BFsn~hGAv)gsuGfbd#9+ zUo70hA$A6#0aKanMasvv+AxYJAU7p*cN zh_lZ52#aVp(WV=Jyh8kTNlA$Rh{&X~XANgl$Au;?1ME(Wqw(CJUwI_5g6Hti2sY@w z>GYY))V<09YnwPu_NB4uiL^yO+#_!NQ4`12;;h?1>)YVi0QrLkeKpoH@Nld=cl~;L z=K9lV|9At1s!@|TVt8&Ybq+QWZn6W1N_Gn9FTeNWAK-`3nLht&1~}2vh`&CaCeL#A z0Jv1y!Dz3dJw=eA`8bzg0(y3_JFt@0TY)#yk|r+Uz>H%lA`Xlh%o9fyYbtSq-^cnY zLJN#&jU6MMn`!bq2BG)Cp&DWsLYTmrFW5~&2#`kWM73F-tr0PY2uhe^Ii9LS={Zf5 zqfyQ}Sl29!qiQG4j^b3ynn^UIF8oB4-_-P@TZVCTPDRU0y!drs9S{|01ub_sJ4NlZ z%g!bp3iGX=fo6nHOm)x@w{dX9aOY$?GkPX{`Q;rn`Gl(^6lXJasbIb~L=+ya8CKVI z-nJiO{)9Q3CT_bh<<2Pz40|VA9KyClUphpqPg=C!aR3&|)_oY@zxa>-6!V_rG>1vq zF=jtIjg55i+0$VnCevy3vLp`PJv%56AX? z1dhiK9w6i~b{Q}V8iqEmAxt~xQ&B#Q2j-+s0tTEu*Gsga8p65#yfw`~#$N?fuxDSr zOg$IU%+1?mkvC&5dHbFBZfvt9u}j=vnJ`0f>izDPkj%RX>cHg!=8y}=2Ee*>%5=C; z`mQv;XvHa0VIXz{SQKPFa%eHOe%#uCh?!Z)3yYn|1tXZZSa9;!q3nw*Di#H1u53&`V7Rb z3QUtBS4}|eVkaeVww^Y2(2a@1YKRzcycE1d+V{~eXljHP3>eQX%evT>nQq`p+PrQ8 zi2IVMi~3MjNgLaV1bo1gdN_fUFchT3FZT&h5ELox3uWK2?7-8)xHV1AFDsPqWeU}L zSr)J3kL_>T=r^>vf%b+FX2#2YXP)wM_sriJXE84N1i1Rl_V(M$`)q2I>*cfD zmu1#~4-WQ!GGuHD#xygTrWFzUh42daLXEYn~Vpa-#pD^V1Ji*p-M z51t7}0ZbVAPtk__w#}WRqDNRT|6^Q}&v)TB;$zy>=Tgf)>3tRqb9?K+$An0FKD@X7 zaTGlOfnv7eD$5WD+Rb?ma_L88S%;XtG_|SK$LxU}fqqWaJT=}AlfoGRnv-aa!$PS& zxjN3p1%_< z+Y&qCnZARH;X5j8@V8qLrUjnllJ z;!v%2#!#KZzH~&@T&ad2xcYEAZOn6c94Eti7J)Q7M615f?#vPgeeG{M{?X9m=qbb8 zw88fdhh`y2Hfz6+>DV!Sq=6lWyLIh^wY+})Tj|}`-%G#uo%hJQljc@u>7V4~F#W=@Wt6a1H0AxLm_Kp4|I7d6zexY&Km3dI<*iRRGvpT23?&0! zJoWQvuEJ-+I_`0B+QEh=AR{0mY}{i(Ngm z_^m`X+NX`{2og`7znuQ+U;nF!YSl1QaSef*38!Aip9DH-;o_X71F`LKL~w#j*YOD; z?Fy%l?-R|eK}4zf`)k45y!O58j4PsD&CTOb4*&Grw_dqXClbOx|Lt3J!@?GTR(0Vr zDvz`GF2I~}Z5TBxYM1mI2uD9gZ%pdNqHaZ8p#5kUXeSoM116zX8sF{b&=AVpOF!R| zMp)iE&qrDOrN6{N6bp)P!O1NA^47cUoo>*1)laq`a` z8*m-2!i)k(0yGQ64VMyjRUjSQw-w{_gvjI}gXPDUgpY={VL(`JK~BUZfQ$;bg1OHv z=ioDm8CpgSovz_j8C+=y$~H`im-oIikJ~hUTXCP45&>CagjkL%qIHRe z_jfLw^xXTeDA(3j67zR(Q^KQCaxi&q2Vc_Ic-GNx>)?vc&me>~04x4lXCKLBm?mCl z+Srgj2flbq5BrP=#p=gz%Mat$z%akF(aaMUU*5+jUi-Wlt=x*+lY04{-*Ot6GX+`d zWhb{|n$xt>(RAVzm(REfT!LtFK+uLXkte5m@;A6v@>q|ka{@mhjm+KzXjIk5#C>*;P-)bsi@}Mz=Exx4`8Iz89Sg$CH`#{IO zz{E*@p#+Wv9kbnGh8_KnL`qN!js>Q*=+VaRz$zY!0Sz@RDp-EZscxB25Y zDYz->s73~RCFi{;BF`}~&p>mxhw|c}luQB;E9piz<#NRM0bcZ-CJB53Cm(-31>7W=o%hz4;H>e{W*(m%u%|2>I??;we(YU z8QTQ3W9-QvyZE6p)rP6P%YKAcCI%WOa{C^5Lk?*o#}(}ZH@&%wAuegVQC%C<+n9&l z80OF6vA4g9-4QVe54VMdN**2g;{}e9C76461x`@zjg|hOc`3M8Rl{SPeD$sCFQ!vt zr=b-F^KjWpN7@u%ew;c9QS`~@U!@gnb6&ghVtV(b7g5bVOrOmE7WMrcl8|#dv_{q1 z)QQRvCx(3DAE%XJud?)2Bdt8d$olSXy8p#CDp%Tofb&PdZ9$dmaNL2L+D0-ko+*WL z;?oePGZB-v6C-4Bo?Dy4zFdv=D;Fl<+?Ugx z$B#lCU*UwQ*YN(8$+&ZG2AWsX-~HZOX>EHoJ#+1ubm`owbZ_}qdN@B5{j7fgNd^^d zFHRtXd>_H&eEIQXj(Jt;#2yL>j~SJ+33oFKBZ19`?d|cmCnuemdqQo$G>>G6w{O)0 zA?TZpMLHWHj2fuJ%#~J;0n)US)i8GM&0Kju#T)eD+q;%;WpvRPpu)--&IS#YMJ7*!GDnPP)a`D*a=Z zIZjVnY;oouEE29}6p!ok1g>9%R*(sXtqbW9s;0%q8;l#IIvfth#yl+y`tfgL5jz)8 zE}%6ACQ-9%m*-qWe9vB_+E)7W&e{R?KzEt{*cxH$;@UIU=-=CE9-Ef~#|G`jQXCw( z(yb0V71Z{QaHT0vKyywV97bx=)1P|FckJoKjmNPkG%7Lg35AF*G1kp4M9)}uMYo5|;x3;~90jN* z>5=f5OVw7WN!p=JuD(nxU*@I5Mm1fSzRn>hY&{~e&E1)cFi2;wji={czl@~4np(c~ z!YenNzg1PXBPO=udc1;>8B~nPT>~_i!x#)alkxG237dxWc{Vbo;rSOfLrnTk%yQm( zUAai?AU3){lng*HuECrLQ}qdpOZ;}Bp_7P|FyQ9A52IHqD3eve)mkx8j^ByjMv&Pv zPMl#OA|4sfJ(`WT7mL`KC7#4oV0&l&!&sst!P!~8k z0wz}(R4Z$zvs3WT1%o24no%OW+zez{i2WrrMwhy!=yY-H;0BjwD@uPmxu9U-nU_mF|S%+ZG zG8RBmCF{cA3xC;6lpnBj9fMXT5O%KRVCTxBTp#ypaMtpo0iM+!`6*^ z^GhY)_*@)iq@u0(9nW|d_2S!0!88BP%p}S3bN`1r5MCXhm&JO4j0=`GERHUl0T@ zGc0pLOZq_Br_|GPD42ToIqF$-PyRtoG%EA#JZ zJ2i~XLtV$X(GxP}UiOY%G<+_8dQ|GHWgdtg&lo`g*M8y{7B7W$@yeHD!!efoQK<5Z zypk>i%eXOho&(E1=~#*Pys?k)BNzu8BI@S$k8o`?zqN;i3c0SNrT#gc>HylEy1Up( zQytlCE61%uqjYz^b34E^#(;DiAiSlC(>UrWL>Q7cqgtO~pZ@0U2doo#woXiv7wJJ`cPjrDK`34T-qBi>Ha#I2j8+g%ZeqCqf$?VD~XTI!>S2N%yzr@X&og5P9Nc zI>&QVvTLJW&@FhWwh#hOJD(Z?#wHG4Um6pNO(>8{2 zdK(_kTM;)>nj4mA0K@KKjOlwgscgrC7%(H~XxDaQ2)hZxyWlus!1{KMYVO)iLlY2! zcDi!)sq`QJy+2AXT)muz$IoDk!`1|C5zG|ZGHu{Fi0X9cG)CO*yKJE>q=&cfMcmJ& zWx}64F!WL){^vD}qX*Lijwl`?-&szNx_J3A$H%s(kM4a)``$+&!RdnM*4^Ohw|{#l z-TcLTn!QhdIl2TKJh<^-KE@DRHM!xSwuoyfGbm%Fu|4+l^bzB8ZpC`%DObU;vz>If z!InE-wh_t*8yI7i9&RH@;}NYEv>gG(vR8L!30!|a{p#*7(lY1$x)v!=wJ{g85D&DY zQ=4gw;`c`IdBFhyo?&1DhX#h!a(hZ=f!H3xYr3_W?L^GIReL*^9I$PMUKIg?*pk@G z4058?*~`H z)Zy~AQ|Zrs@}uC>?mT?N%;vfUlZy~q=ajSBrX{jHfhv7`vOkRzOSH9{kdbYp*w`TW zU|-ivo#pim6QuxG#b{0Em}T1B^}7k<`~ieJr$y_!2NT_dW5EeRnR%;h$@OCMK0P&- zM#k9^z)o)qVRnUhpnC|#qj)kOVvnS)7sW)F9~X;#>~(fgrLW?FUF{|m7Q)lo)^fT# ze=pr<>ub|(y0Gq$|D}yNb)fz59!6;pl`6cq>1Ti!ZNL=ZMImN z5ey8$C7m6ep4W|!(_H%3&jrpmgk6U`IGYAcCU#gMT~05Pt`=MdvN|ofQn?Lxt@aGKAEa_j-;vkT76^E)Fso zekU&{Z6^~KJmL85f)nLYetB;tHEi{{dmn0K94t<{;-moLV&HzCWrmi{C4@3$EWLtR z;M+yddb)tgTv#vbD(oniG%kP+o>+!t;9cDPF0|${VkMIqHa;wb!C(R>U)J9;nQUn= z?V0TSHef7NCIUt<3*snF1r5YgykyFC%pg&w6~qf~X%o7j{f)*0I1z+!bPk zU&8f14uYT-UJ6#i4NW0sdG8x(pwMK$Ft25apZzJrlc@xszlAg9K}!WO(`3zQrj-W+kZqr;wF4s9DilD>h@&!=zZGdO= zZA-NtC@Sa=v0D{gGS|%nTQ3Ex!z5-+SPMP%Awm1?>=At+tgcDm<0` z019SASk5!zWPCjx2Sm!}AyARleD)1bZZyAIGj{;(zs|qf5?O?EXeo%dVOwO|1#AII zE84m2H`yzhG3gKtno)o0K_2HCHCUDf6*9gcB&^Wp@rGBFCH=Yg<5l)UgsS+-Z`;bs z1b!v~M()dGVlEVCkIM|9L1ehp{0AM$r`MuhysI2Nn=snlhWY>M@hqD6hw0X1;#Z=v zyn6mhIt#;5XHC)mx^ZSSuXQX|Vj%ypj4j{mZ%K5GG!GQI8xJ_dRS0LQw%8 z?lkQqJnRvQWnh$@1LmVPw9B2?#nko?%3v6`Ye(t(Km1Ahum7|ED!qB}8SI5N(yxDa zlO2g$^s^O|a0m$4J3ROjdmNalEyCYC24_$EX@*&q0i2qegh?BOp&{UTp-sGU?p!*@ z(XRKl?xc-_MKtd^CNPJ>T)GTuL)*cTvK5%3R@Xq%Z)z!;qz*Sxr%*4cH?$G!XVzZk|()8qhUCnm)(=>lF@E3PxtQJb=!(CHtzZ$5TJG5H84MYqrSl@ zb7)-KyEs-EfuN1hE4>_+K+ndrl|H}`t;Rs#!**yGZ*LWq+w6F3V)R~Hay{b+*U3J- zH}}#i4kp^L(1U3g$A*2I@x)QFp30?Req)QclfC`eunZuGYTE*nj*vP!Hoyr{ci`2C z4b|M>R4&>9#(e|rZ+AbA8=QEzLYNvIT!tn_QVTRtblwl!pnd9&S1G4%?IEz$w(!io zw*rmz3TNkskWdZ6K<(iH+eB3LI^g}=-+48?`RoN4W#U$@e;(_Ju}k&7 zZR+IoHu}^SbgFY$Kr<(zo#4T)_wx&{KbO9RGr{aSp^RoQ(kC>JHZ3yB$cfYg!Kv2G zP8pucgl!rb!O4NxyQtpT`RK(lLB~}>4q?Y4|EQhZg|pMC7rY+DwO&0fVONkx#mrZwO_y1yAF9!Jz)Wsx5H&fVJ1 z91=P!?Gu=PzZdTKI6YXNNjun5_27ZqF^E%2doRL2^Dk3$b*PzLziQfLhpo2h$!fN* zcpKhL!I-1N5kjV>PDfkrH7yXT3ey&ZDHYh-BpuTS+=cUWzW3jGKfH`T`S`ccfS9Mu zJw4sb<&e)4Vc^Zst1~Xf^K+B0YS*c{wqc3#SW3 zD~%IjXkrn9$py(8I{`9DGV$W;1e)z_xDD;2T3-(spz{Rb93U`su{eCqJUCIQ)o}r9 zu(jwy-p-;WL+a6lK@0QnKU8g$lNWcJ6sZl*uW~VSp%(X0JdsZPj+q2xHg!G_L|opm zXqnc$Hbf3%1mhRD(iEi0iy!b@#7Sd%fk-<&sx@-;nRRjj3r3QTnwe1+XpP9Yd~dyY z1?}i|z!DCS8K~JtDNi73wj)QplWxMd?WB*3rYoq=OPCb)q#ouh7k(LS7jWwlba4Ua zy&mG2TmDQx@OQ$99uWrg373^n6g}@I-ppZ%kvKi>H(=y-Ox87$txyTpWq3IVEO%$5-S9vuLZJEcMTAmT@SB1?%AZ z_@+)mDH$Sh6>)!Oydx>)QKH$TRd71xy=7!Pd`%|*co+Hoj#`_9cV!x3N4}yQ@1tx5 zFj9rkkf$tuW@atpXz%n5D#UKK?nTGH$tS>yv*oC$iXu5MLmc<7rCQ-1b5Ub2I6~92H2I zHvbPCrHjmRvU{tfR z!{Fj-;K7hTngc0-iI5D6+A+tNb7=*}Cfj{-AOY>+_POn%=VTWz{X|KY> zbWu(IxDbI-UL#nTqiow$+R#^>A8Z5Xr4Tx~w++NwK{w`KInRP<&MDqoZ|A*OOrf4_ zEF3*{rIi=Cp_gUwArEBux?HqtI$s%pJi%nsnrVJzE!~}6AP>wv3`;Mb!FpE8@Qse2 zPUp^?ON$)!>2nud4ab{^&$+onaQ5}}(9VfR3+t5z%tMoCz)$x^ojtwGE^X}Meol77 zX|w3}?A(CII*jqe>Fepe*WQ8II!W`ZtLf9*UlN>sH7%_^4g>5eT18LUS|d!52K76J z`JsMla#zv%9%5|Y)Ird2Of{;M-wm@kG&zx;hhaA)&+iuRpzY&5n$;$DCSXHh=qfPU zuCMktcX{;uC7?LK%vK8sao9%)r#_GG5pZfx+HM>%^kOatBXQROcb%V_t%hzI}58=BkGC$EKz*(60>| zuW{&s>%F@zX6PowOF!l!3QOw$7NwR|!A0Nzi_Ts;)lji--Eqne>w%{aO0%+i!C0GWBQtFK{x| znTr=Ob zZ>FzS?$gH*hH*q~!K>X->v-+LEUXJNxP`AY<#BhcN}Sj(b~Ad?3T9J_3v2X8cS#WV z@tjo)?(Rw}bLad+>WWEBA7Qra(>ljEhj`R?IEAX9V19O$)PzWd$|^_d?yR$B(r*>2 z`}+pzFDR<1ddS_4Wn%xH=(Is>(4OJ;G&MGv&P`uQ zo$RVKI9#Hg)4JL?P~Z^ffWyWhUcSSO2Rzir&ZfzUGwJN5OX(Vp45z10Q|G?)*{7dS z59Vlgk~~RQYZupDqbI!f+NXS`GAQF-e;&pR%iXAt@k z1nNWqxbm%Wguk71?>F5&V%>9{k`}6x8pG^NjSF_JJZF&CX*KQbQ!j32~UB2yf zPBa?hJJHGr>e1nZEw!D{0mcUS+Ur;z}WGe?nw)fAb^XExVTt|tBaK7>0M=z zexJ*DHpZ4$fgU<+WZ6$1pozz%s%pnMg0JxI)TdMWNov|37t76HO)~nlOQg^ zL_;U*6`Et}XgpGB8BkpzEScvf>cloryHXcc7e03zf_V;f%uzHj^mIWK(Q*OwXuaq- za!b+!cuqzD=40)0oxK+ykP8?V9guh!laTLC@+0*x#Dn5r^v|DJv85AY67aBL7n>I57GLp7c zrknT&BLZ_1@OV(dvfL;e*pZjC)+fp|qctefXBzunoHACqJX48#QD^fNxY}M6?*D+L zIYsyk2$Wp(7LsOI!gs92`*`bn+lh-oE5DP)%I(kO#JFe<4GZa< zOejw}+6T>NZ8OtmKa^%_#hiG+5L(1!9&f;evIBM7hyG;!quze=KZH5%!%IFmF47Bw zcL9fdYU~AKUTL4}C>-YkCTOJZ*twH-T)ADvm5V%?(#|#cvOa+aFG>c{HWYt_|_g^k6 z*G(9{cdj!=Yv2$7dFHs_*%P`858NHr_J1$&B@;K`<1^_IkEE~T(-1-XvGb}n%pTS4 zk(4}lQS``CcL8Mntz#WcQU&g+0t3~I)+sdaz}NXprW!tm1vPsDs;F^J)n?UGv`T_x z8bVuOhHzB~rq0RgWK!&mF>>9=x!W;dehrUBZM>Ob!xxo}B`Yvms z+RTFnaPVL}IqAc+0@Des2n<~!ZJN<&yR&jkA8c^KRA;4ukm6cQU6`W`6|?hzT`P7~ z)Chm_o6qPgC+Q#k`QJ-_=O^!`AH4f4hY=80ah^~nXqme>EzACVg1^N+cK16qwYqvq z?xko6gu#QRgoKe1ub@?4TO^(;r$*Hg687MUETVl?F=rf5 z)&@uAs@2}!pkB0>{oiqTLOnYO`*iB;DcYKtlrY-PvpZY4{_fsj{tvCS$%nb4cN+?) zh4wN6i~BJ94a}$dhKV=2M!2A*@UytW&e0-fUXLHl@t$LwM_$F_^nBXEVuMYT2$SQ? z-i@1j1Ah~R0L_CgPCtuM-8rWPrv5Ht-T8(xsK+(}Z7U&Y-h1bV>GkW^)1BKNV_t;xHp2R=VdjSr=5 z1kxjfIOnS^d?na6NTZyvCO^TC)YaOLZU0=_C6tna=5gCzgaaBG!|VD8b4G;KYnNU~ zSEe|LZt651)tq8ReBPP6?Bp>hHZk$4Bj|}m2Xh-WqpeUT^IfT;y{FGNm_yp>=k9QJ zG}9mr`UFQjw=sT4hX&HA$|w<-5NbK;Pep8nes6>uJ>&K9*hLuE5@QP!rvtS49h@$f zI2L*h#(e3{Jl$?TPN{3h*Wunxb{aTpxXS#p4{nQ(*u`V_YLjE7w+Pqv@WDOicm$YP z4^vPW1ZQ{!{VJT#8qBd9@ZMj3@kRRRqmRPV*R|6fa6>$4?wOU%MxH^i#t%k?ssdH4 zKTrUNqBMCKFKBiz^=;|Ex5kh`?zB0N=AjmB&0;}iEXXJ; z4Y}^9ad9H|Dz${c$e4!bA|TV|!Y0x-veF7Qj*g zMIy?WL<~D7N>b8|#F)hd{0zTg#$fgYEL>oU4|f6EBJ*#{v<}dNk4!;eBS7vvB`5q7 zm~>4S%q~!(9;Ej>ga8p2x-{h53A~IqDkLeGtI4}4!8WkH#9#c(XBruk4BvciH_8mk zWJm+E2J^v9;6wR=3t;o_qK(BP|BrmqDwx4|PC4LbiNTmIE;QBjs=(bn|@%6JsG#7Y`In;|e$#C+YOfXZc6@r~qX7 zQSbaMp4L9_;5+c3mL|!Jex_-DUgG0R;Ifb(AOFk$`Tu@DrECj;peCKfvA{F8f6<3% z*OKWXGXN-+XZCN?#e5+?j7whS`G8cmPtcThwHDgG=Q_AxX8Hoi3gvVpa=X{@>m~g~ zR~Xh*LPndu_e9K$~L*RT8c=O7#;*(*TUicy*Zjp@lmT#LyTHmvneYgrhyQmEo1y_rx2I&Xry|w{^1^+E#UZ1jN^!Lg}c_fZ8Foo&y8@0b7Dd z8|jpHu`n9}uJsCG2OP2wm*z>fr-GpC1oxDomP#1W@8}L*REtOEqn^d(La1O7f-RCx z9@}5j$?+*3I!(GZL`#z{=qW794uVT`t^?9qC)0X5n9h)Ow4)JKBGMoL!T18xn>2xQ$ zf4n(OpOv;8Bnxw*xy|4ZTEeL|Vpp<*fbFf0jeRgqbPVW|nV%k2 z=4P?)X7N8d0*|!GnuCdh%r6G%5dwZ7fju7Wz#Gb+E1I1u+M=qBTUO2%)F}W4%G3cz5wTIT+vw$JpiB zO-v|us2lzjFn$U}eLaKWV-ms_ung7LOvt3x<1RF+t!V+9=>zaSqW|`fQEo403~ewW zFnBQ+GcE?J)2Vr{C)Hv8IJOE$TVh(WEAZ^~tLdNq`3DGHYw7m#U#DN*{xq%8PmkRB zfQ8sY2y1Y@fUzYH_v{BJ>8Wc^)4y;iU0uSIq#wa(CCxsDQN~=U2|i))6ec{V@+ZeG z5fuLtn*QF@2b2G3@iE7?J|uM0gY>(Pew{x5^{1GGE#th2(r^$S0h1udn~Kp@oPl@n zzj*zv*V5asy@h!f3?jnO4zWe|5z_WoRa8QY-Mr?s;}Gupc=p`cG=kmv`syb9N-qwu z#BjtJcSUFLHRcJr!{}Id%)iDe?I5PB;Q$ zji7;2mNx3Nwa(y-@kWI3X-mKS z<yJPCgmJI$6*l^qsO-ReFV17iGmDAZ0*j8i4l|AE(uK>Mw1*9RrFkH|cm_f4@;UrORuJGv()%C$ zNgBi#=9%YSRv2KvMADa?!!^p9vKwfvzd0mm+UdM3xur`KP8GffShPlt1B zsZy_``A38uV(mEh)Fmtkm}}S(bIvz}QDXz1lYvrYXK(=X!DISfGvST;F#Buk+Qftm z`a;7aj_z$GY|z?0cJc17hnZRrgU|VQd0{aQd+_8j{TJMg9b(>HT&B-KF~?m8eXbQj z|5z&*rhn(LTEunDZPA8M*G-r!kJye7nY1Dk47B%@xxDV^|K+^4i)rF0el`8HViZ+V;E|9@w#5-}yaDurYMQCWE z++y`4T`-rSg=cW`+wmo?!F2G%XVOB{tXrgkh|)KhU7rOVnVeXJ6m-a(^F!mjxHDMM zsQFGtLl^?IEu=>&{P`l_^UU%y^OxyiddrE60p#W1tYH?%lIfFS%kQK9W%~m!JBR0t z!88RD;*puWXfygqX2yUYV5ozvqUFojQ))>Yzx{x1;1bM)eHi@hPv*(HK-`!=Qsp{l z7%q-#yYjQx?GaW%BeG;<&CDf!zLQy^*mxS{^E_YX&2*4fQHH=x%oVrZ6u*6+tH@to zM%$F{fF3m~+9L8o8}pX5&UBICd_m;`p7=vaU^{+_v^$3b&T3cv*M{=(#C@<6j~gMVYOD0!fXfY@UlXWqog6jO(^6%2s82nnW0ixiey z`bieJ+F;gz2#wX1N=N5Hegj8=G0SP}v_r>zb`w^pdksqg8DZmUcEJPzAWT7`-Jq{Q z&o#wuZR^JLqytSgPLuoko=`r4@%uO~HjZt^jHkAFlEy2SQpaIGj*DwFCw-S4jx_|J z3ugyY-^6D6V)@tXXe>dG4H#y%?BEBzJhJrw6QmlaAQ@TZ^qI5i+O_k*Anqjg@Y9?Y zwzRRHddA1o>5J#n1d(alhzR4UV^#Qshqsn6IeG}=HNZ~5YP$W!$M_f!Er(qLwW9~x zVGq;CVVYZD#H3j(LQ^v)8yiF`>O!c}BEhv@vy5O45y}qO^-)-h)8nvYuqG z<7ofw6Ni|D^rg|Bu5^IVrXaUP6eajP%tf%hhv{t)O-D_2Gx0dXDH*dRkMiAI!n|pP zJd8I@W#&0z_|C#T`a(;(Fm;s!CdSiW|NQ^uh-f2LWfKvj&3F)jV^gPSZOyye*}=dR zXJuucK0&8|SIBnhv>^q)fl5!BCobtBf=(y4 z?>#u~E?{1>KFe+#;JOqv(55$xLPdX_xPWu~5Hle=RqiA)3duufgn|C$hrdOGuV2We zz~lK#&&FJJ>$6*E^bwRWvARVho5`t(^zNJACq6Cx;AA!3|LRuy@^_D!!;Wwae>$C- zo?uKOnBX^I1f;L-KL#FchKXC9fc`ri7LXW2`dGT-CllS(bBcg9gP3a(y@?1@a}Q?H zqeqKrnX!KeX>{cF7~UA`wA}$aPx{W*iCEN(S!gejnwl|*JH~;(8V7{X=Doy_m0!lU zC2cV_5%za6mD=XumTtz3+ImlE)1OtoY;1Ilx=&&bx4>}XAM|x#$#2{5b}7$vAy`1q z1LlVY^xDR3PB>WsM>9IFGSuBkc71g24PlfqC7iJfEu6}>oF1aQsO{rhvb&mo`se>3 z{qz6wKTUu5doQQI|A#+JE#H0ngB$DE&}paV)nLMLvdS7uxBeL5A?>4CqI0__Sdd%B zv3<+nw83?X(hg4Tj#~OaG#CvhZrcTEg^5H)uff)<55pWL7BwbFp2UK}1V|G`dX{Y_ zCbo8{Tr1nvvE?h@!UWaHHlKNf>zV^w0|4(?GQcSu5D9f)U5wJxU|@)+nh4X&s5v-{ zf0~(PVbMt_OHEp6r~_VLn(?9TM!@m1T+48qPv$-t6z5$UBX$*HvGjpVv4ocWnLxgxZVHjY7T>?kqU1URKx(a7n=}9-c;#|}>F_>)<&;4nVFwY4_JdvuX|c-` zV@DcgoE@Yt#L_eb6WTLtL>*rNC;&&jg&O5K=*bU0u_H!*w7(0}o?#!bzL`e3uA$kD zx4B$3hfiRMD@D=W^5wnda5>S~cGf(0ZcIoazK^~ZT(-F6_9&XvjRx`wA;XFEJn*}0 zci-iHC3N9|D|eDJk5T$BOreV{M347!K)Ih}8VE0cY#G`HJmMH!U_qV8uNj8$Gi=5S zw9ezia-v((INEAZa1gpiSWGP6l-WhURv82f(@8n9SI}^UPv0 zEcyt2%l_VvmaH3XZYPVCUCc40U9SoV`;6LRLpw&jBoA|V^fTuocDtet=@&9X($#fC zxX#T~BHl}nTyJO$u3#PX5q@UKp#$}a-_i`)NWW-@f_Ax$=pzNB=t;oJWmq)rU6w+B zmg$$`!gROPF`<*N^_PM1*y}2<{au_! z1VWgVz!<=kL{t~LP*WTB+c1_o*=}J|zQZm=MDD?~NC80w-0D1xDNd;|KR{pk^{AUW zEnRzPKXoJo&;8+aegfNJ7}(k&3`LX9&>UUMajf=>_IB5Aqeub3egmCP2#4vZPMU-G zxQmmIYM1~yN4eH|Z153`#x9I$gS=Jj&aIQ<)qZI-7!`4jH`n`PnDYjahEPGnTn(dn ztiY`3!0CxhiJg%lw3|dENt7L68n9k40137D5SGzCepCBn`7?|vu!!@vJ`(o~h+Xr%)lJuX$Jlp*5@QJ_PT;?#xhi1bP#;^`3$L+0kNWVF{tA znZBk;nEn}G%*^66jdCP)!hF)6)YW5+-?&j)do8(GxzS%AE?_22@nEf91=0; zc+aDV@dts?>>_=&m&2l96nD^SS7Ob<@gj<+3lrH&4iL>ZR|N)K)?;iSrr8%+6?i&yCf>=^X)<6{GZe|O&D zotj{v4R3kx!DBXwHqw4tOKV4b4rGAYQQ&m8sgZLq?aS%szxkNm5_%E4R5g!Ure8nD zu0UIR6%F??0tb%z>qG-%>S*w%lZZPzould5^Aj-ItLcl+zu@4BAx!>WNmT^WHNuSA zjw-Je=sUHLVQy^fAXpJ8Xpb?u2ZP={)WuFq4tMHY+D5yeS4oe*M5|A9D0f9pPfa0k zR^WT-u#78@P;Suin;3)V&tJrkL?M?sgAVOjQrOkx+%YI`y2qgzhd6yV=znoCBpQAv z#8|f+6V9OwjCSW`=V#zJ*EvmYA@E&%b`Zg`DJ{jk#9Be$zWeyIwAl3Pw03-#dFFrm zHX|wh@ZER*q|E~lUVrnQ8#PZNpb^y2%b>XMI~m8s!NeDlT`0hbO*0&4Z=FLjgTPcT z5+Fu1Mkg~0LCHWl|2vpn_>_ul6}_8fgFV2NWEM}#R6_MIQw2>N~F3HSL|)WG|yoi&KfOYIkV=|YgnTmGDF-G_f^DXCtSg}C8@T}M z92Wh^abZcki~h;23u#F|=U(G;*RdBju74=9U`~$OIhvMje2-V&#FV28ds$dr__;l7 zm@!pAi@&x;XiMnj*zDzq*&P<+9fW`gOMO77z6M%WU>*>)riKRf2wZzPVpN9x5#|y* ztF38c`2gk$L5qfLhBI4QB)sDsrafDA{0V3iG``J9cb)7aoDj)l1!17R0aFHH`o@TK zf(CmJQ-IY6tQ(p<;~eR!LiuQ1lv*%Y+RAsL<=#V6*?LG+p3!qKgw?dT4%@Usn9v6} z&=N#u2Xlrxb?CtEycK5BHgf%IGB!SIbc6ZWG1(Ej`^KM#nL;S4z{vFsd8!qsgJDu2 zqtHO(ypJZ>^nMzw{|i6}Z>;@@#UHTCJw=Nd;dI(HL0je3{2ikdhpH7<|3+!o6 zo*|%CZG%(L4haQ$f=@^VXWB6wfsGHpMI70|3zZOzHdYjrIBpuJ-%gM0g|u}OZwv8Q${ni}z)E&N)U6cu$w7_e&4>+IG?dXr4{b;|(Je3N;4rgMW ze=C@+*yjC{#;9i}Y=s#A)LWCL6Cy{QY%#BJwt_nr4II>&lyL#UOr-+v#27n7_}D;8 zFHFNMtNB(7zR8ZkDs+>dGJ;i4wV#vDV9wVXX{>vUs6p^ZoWZf%jBvzhWEU|jIz&-? z|ItG5WL<>TJbUg+`tc8coZfun+vz)RyqRu(@i{oq>v0@5q-YZnv&K#na&>qDv!sdi z)Y-FXb!#4fpfQeN9%1KWog)d@ae`q#raqp!Ri~}1J=N%g9&7FKz6u7r9EV-S6o@Xs z9EL9l41Y@tiiR0{ikJhOr~C0aIfjR=trOOiHgZSjM5PP;@2RsF(iZ0}tkPa|&`2M` zHS>l(Ku0)^JDyx~SjlK#l~D%Xfj_2Bg9IM%aIQW4o|NeI%W((EaVfvKPE@YyFh?|> zZ6g8_$DSkDxVX3+DuA?(VD8wlky>8;_V;hN=(xys15_sj2ZUbM78Kjch05M$!Z3$V-^)u2-vGc^x57fFkd9C-mWZ^<}1=+y}LPUguF zS*{J9?d$|CGZ`A~VBDA>Tm<+R7x!WBiooH-nwjp*P{}Zv$^jD#7e8E#Vq$b<5qBmG z&3l637Qa~BUBt71Vp-+F6pIkJ24mpv0smaQytD2K6l$cbM+rw9%_mc-hTgiW4GpFn zI+cuudDLvVkEAA=GrPH>EzC<4T zn#_TWn00o-f{Kwwc)qu8B~#}!;VZqVm5jCqrnp;&GOCNhs>GAIkdY8SY3z3vIBhF2 z$&7iBH~$1zTaqHIhh-IgG?>dM`)hieM*RGEU(!LiB@Ca7TU-*H%kkp#GJiPblfmDn zkbYlZxgSNnftt(8X?+&`k6h;U^1b~ienok?9N&vyeouR4Vp~|Gjy_1gFpW%AFk}V% zn4|$%_Gi(Hbk>_Pn_%>0I=SOt#=-Jr!sA)dA50rI@uVuti^x6XEd}MMzqz=Hd-Dpj z823R*+O!mytdDe%#^x!@V*msY>%r^j)4naQm{WL>>1hf15^u-O6IubZY`KN$~2UCOjUPZFI_2kY;L?w^O8iF_tkZ(ohN0gd#f;35;RHb`Yb z*lNR=DEy6%jiQ~ahCQhYjM%}1(Upn!FtHJw&UR7CxP7g;W-q2f7T~m?50N`_DpN$q zqrFS~qi*11yE-Pt#j)oY4#89a)`w3p{Ir36-9v+)h4bl+F`X zX9d%sn#3+n`AE9BT7qiOLtlzc6kF%4J#q$ z*TK|@QF(2xHLb6q>3)2O4LG*;FoWZhFy0+`r>C=mLIIPREu399*-23MqxnQH=1Tjq z^^fMYVfadFj-Y^101P1OASMZYm}2yDG;SLj;KLdwyP&-dqf+A#rMU%mFEAZ~yf9ZM z>{hg*(Y7z;aUtzmTVSxEt2_D%JTeRwguptQ(>gm8+KE>XfVzj!Jku4LyYvfT{)Hvj zfhL*|xgI&6YNnGFgbFIQJ5H)FJDLd5l$hI~y~X@xdwU5DEl!K)dWp1yxf79jHlVqE zT&9FB4=pOE@+rhY)3R5r)GCPj-Yb~ zZ4?I(X_J!xzonS3@H-^h&eO*VJi6nHuPF5>tI8-W_y2 z6D<^izyOhPPEDRp3u_$ntVIT$0&B6bm+$T;tmzgZO6jZ4Kl zr;2%>iJ!*;c2RKQu;T^uPl9$9D@}cT7ps{Wxr8Un;sU0*i&y(A)&(-dv~B{L?95+}Dhv!)mP>2v;L z*(qoetq{+2h!e-?6GdA9CDH}60&db%Mm4nE)FJuAmp$?IpbO%FpsU?=4?g%rAjw zW|HVk#U%upRJB%u$qZ{$DA$#?2IEXu)ABDCKKfV2LkPAjFoP7C20oMN3e5Z?Nb?te zL_KK1vvREQq=03*$jOb*GJt4X%PMHdi@1VOq%T@G+rxITQ-fjdmkRN~ksbjTa0TfY z1MGk?exepU^t&v(q_1$Ju9Tbm3{(KWGF{Z8z*j~@9He<%@jSO#eo#K92T`_{@FkE; zQ_wM<`UgZE#089et4`V8rzQ?tVfvVO*w0lcnAV}JX_MnSnp>Fe@xJ z6&Muf=8cUF93MB(v=Nztg>8LvJuPrZ)7>=a z5W8KPI!eQ6UtW2y##-7qwu}fPEEP;;+l8Q7wnLG}R`Gp_f2NmJ&2{lPUO+{k z9W==X89>Y}HAH=DJkc=moxHK>w3V{Z`p#^^7yYKxFjXqk@ zN>h62(v?)_Fe}3*wj&T2ZnS@JI6Z&)d6>R>dhqBm&aI3A+S~rtisriwrrVI^GTX1b z@SXJ3>C0(h^AT$WYd43l?Q_tTnptZqKXQVvNJ=+O{@FHmLQcr-a{hSPYI>v7LkkU1 zWlKem%tZ!a4X*hR&aVRrYv>H#rAQzR!VFa**fHGc<`Ny4PTV~KLBP0`kS`9B8bsc! zLjY{A+L4%CcwK-O=LmU4<+?Z=B5}f-;RgHrFQ?}(Tws9TL9J~pRXRG0O&!LI$UET0 z0!b8Hbne4#VpI}V$9cp~+JjWFy@$a)HbX?_!p@Dw9@-4D^+qI~2%*(LLVxDk+vz8N z_~#rkd<9#tOX-E@zm?9NKf_@ioQ!3|u`M&fAazjcJTP->0g0p)r=BU|n>MGl+5!?Y z;hON!e|Yy!`t>h?SRXu5EEB3-_C4mEf`6Tt+X z4(EWcK87AlklPFC&K(?vAVUXg{7obT+pvp}Pgn%fN!;pZdx=TFB~U5HLGYs={Fvvc z^pTDb*20$Rh|al9$plzc8-y_Wv_|M9;|KYIV$>EHb8zk)G6L_!}){hU&F|G@)j zOeiU~8Vp0kCTp0@9MkEY=ykraZ2Okm(lV>PY%4gphX!aX2C$E$xyqlls>ue5!nHz9M7eVcXI3JsGR2}7!?K?ykW=**k zS1%<35ehF^W@|+qIeX!{YMC#qCFD97ov}nRM=gqkf)V!_T6g zUw?;`RHT)$XlDj?A4wYL3_=Hrb#;Yz!r`)9X%%(@yq8u!vA)ty?$SCqDVa(9g43}+ z($SU8tDsjH29quf*o*QkUz)lKTHX_`nv#U&KD+eypU|W!6^}A~UUF=idKtG;ZD|Hi z$^%r;lH|FwBOQpJG!Kr8cCp;d-AGwz>O0bex-oE7;-bH>n%L&nO;ns9Ln8FI#E++b zecfj9ly|{-@}1M=lsSjbNG}f1QW{fyv`4fHKg2y>=Aa&?`$oTzrE_BTc^d=SvZ7t6 zn|Xta4+aw2i)if(f;%i-j#jS zbb=9Wp;?nS4Jy8Q0syI&VM7HH1GRve+fdlf5z$r}WSN0Usl+=ZDjo^lwy{8A&wMSizld%dkJ* z8zLyet$~Ge{SIDr#@^Fo)6-aM|xROy-XU_lscj5J*s*w{!wNt z_BtRYmQaEs{i*HKq))r;K)FAU?*75tk|s8#D8_UK-u3;J4p9Yz7pU6t(n z-+d!pof=KQ{^ApCk%*Ux#4^CqzI|}U_AP0ov%nteK|K!D^4KqT*e22NJ)c%dpnq&O z^irhfwZCPbG94lfH}z?;7T9b#bWf2 z$8`-=YK=hb#?d@=mN=dS=HEh%zK?xV7fji^-~V3NhwM;x6E+a`8w?bp@5G#?$Nq4^dMOw$m|CqJy1U2~)YV^hq z_t?7_0j1i69oEBT>@u*s@*&!IaB=9sL81ju@oFmsf<3K6*iGzW_Hf*+cHqrU{&0Atj~voQt-a3iJB$IO zN7U<=rpMAJH-83g*xo~0(x&f#eyz%12ECp3u`jpb<*Fmm39&pw+sJ4-KA?@+#?T32 zd2NBCa}fZjkJ^+59`;Jf57^r1!XB-U@K372wdGkN&gUU+#U6{TD&1J3j$1VYyTpd= zY8%GB2aj#G#afJEihwaOQAroiJ)H(mV6o9Q_rm`?diYgZr!U&)zxecK8X!jRi`SkB z=JxjIb7_NZle@oNAZ*bdu}mk@IAO7xD?4d^kr=tycip`^i%0Ahk|f(|w8*0ue_fA+yCJP zhX<^t|K)%GZ_=l~T8XU$w=K?{JstD48pU-^;%jDn@8GE0fLB$bZDFf6cpaWQr)qoF z#uiMw+c8WWyGYf$2nY?2wPyTmAY?i}H|Z=u{y3eF?ab+CDugp@OLKQPGQQv~7(q$J)7gsxtCJe}NfyA5f;UV8AIT7+&TS7d5BiP{X*m z^IPu@XY^PkoEVtw+;_C|x+n-&$;OyXnRy_XHAuUVg$)grE>%b+qYNb3t{%6^CrU zLtfH`s>_AQi-8Uhv&Q9OBs>oY$V_Rb5D(*hFd)JJ|Lm=p@zF3&#ya4$cslV3r)1{1 z4IBg_4C$}n;=O7+nMN5^8Tya}%}X0d8-UWEwHcB@1Ye28Wxi7N>d}E-mSbgPN)%?y z6YA_>&U~M70@+|Pf+?`h{EI#j#X-%Yz5xwb!L&#u8YPUZp_%_I&v#Bs!jf@uVN){^ zumup8ZJd|(cy4~c<#uP%q^@4MoQ!9{wSai;vnVlsXT~=#?9qQnCfvYE!1f25mOO%J zqG1&{<9VcYjsV9Zsqmy(^kKh&5e%zUCYR4;HpMC9Xj|Kk^h4S#+KM&}wY?oTmlW+9 zc!`rR%Xi{x+0k$LYg_Xm@Zj6>^_c-pt)n3 zx<@(Qc-jBOiT3u10zxnw@zTf8xJavgNA!^lk;FwJ-iuG9^I4HT`U0?{Dm?eS>Aa$T z{Qg>K@SSgb_BAbha>96u0R6mkOiM#)Ag*2+Zc(4yE905)XGREdt$)Pd2YUD1oOg+T0%O1=zA~M~;B|Jk( zFC7~&u5Qukpio9D5SDE$(vB-}imhA)Hp-MCk7typAZopXW_BTd^CRd6Qt>lHhC;D3qsNcZI$qf)XuXe!@o7KO1Da($);7JVHBj&9YfG=c z^qq9!{N?oA^(#1G@1|dW_EEZXcP5>H=!-ACo_e9(Z*Kh!_9KsB02k4y<82Eq;p*8E zLoJS=_xA6xJ`N#9v#;wu5oEFD6lv?;Yr%!!r4hV&`-$m# z`}REqI{L0#Z)n!%ezz6@_lLo4>fC4=VT)|FvB-GEnT0;4=khMn$~s2=YiRQq@aA76 z1lI<3e$BhYr$yRpqMt5(^+}rl;wy~yIf8ny$`QWA^o+SIE1>HfaBvvTE(o0PN0Vb? z2z&5MIBv)f?{lKt5$&P^y}i$IiBbhVdW=}2l|#aV5i0E3vjgd~)qhV|C{F+S&3f3C z=}dCw%rx@@hWX6ln=tP&55sFozpd@f^bh{@#~j(+N6g#RcxLC97lf;%?=iPC25Pkb zGD2F7eypJGeBDJTD(7}RoW*hAR2#|y<~3~R_8IRH?u&GK#&=bw5dePtM>m|z^%^k1 zbZErL=(Nui4x#MCS*z?RA`WuTo%ZNS8ACfZ$?~MjyZFIhC-ZCv!9&tGnVM8Bkl%ik zY%&w?LZId>FSITQE^aRFmKot<7|3KHoqtWo1ndH%hKCL+qZ(2l9XuESu-BFd)I#7; zyAez+by0}0ezJerwVK5gya1T&9HQ*e@Y z(!k*=9$qDVrD+K#@L*uOs4Km?fP2MWKQs`wc4^vOtTYzoAPoTvQ_0o6J!`^Q$e*0*ICLy>-ncWV|dZ z7_TTUzscohW(M%|G3jbm1OhF<&HJFK^`k#|iLzJxj(YQeZzr-SoBBq5M8ftJAKu%a z_=sWk0$@jnB{+B;b zDcb@RxR@r&ik2(t2HsQ+7`~%_#5?Pg;XbJ&PfQc|m2DKMct%>w_4;Odf5-TDw(}=SeZg4GX)lIww&F_wF2Y!2BC% zT3bprHw6>TQ(A+f|It6>2MNZOMA&iHP1Ztl>O$$Cf44)MY znp-fCcHu=cJU;OT+QaJsPXmm|(cwId_d^&Yx9eM@&8}l5j&W5OQPFjB>=CnQj|qQ= z@;d~A63kHp8tYD&TbxN@PEOOz$KVSsvTb$OMaHb1e(@B~hAN?E23X|jtCg7p0`+gf z?5?DFY+Yv6M57sQJjDZ={yWA=qL0&>HaGB$h9NnD=~ggao<)m@(sOr%wG)PO11-JB z(z+{MBGggRLlQ4Y=~2!9-nJozaH3$hWF7d0ZO8B|1X(=9e(IDpyJzHF>LJGCrDtDG zqZ8NC*p=(){IwhD+H=pRE7z{4%U32a{g_DSFI+)@I!pY>3+d8@=h7d4{Xb0uqx}SP z)s$fqjW|LK$N6@TK#PRvDRp>6FsCHJXeo4bqKWr3q!Qi&YFuA=@nslOw89&6>Gti9 z(So<8ud_Qay);D($q&<`xlh7$Vr%69Ga61>?Yo${INg9iDs{)Z(?Ti&O0BVRp+F& z0mcW`3#XXqPy!5nm7SPsYUT***g1u`T`ziihSImb^0o9}{vPW-CLb83uNjsI!LH$W z3wk7s4Ib%64^V5riAl(@nhT87T8IN={MrMA<)aqD-e4>bm&<&HAv*NaT-I~?m|Yh_ z-@tfAOjv~73W9S87fUeK?D%+WbTh(3g06OnUE~}pmX3^h3O*E|6*O0#5Y7p2;_g~E zJ25a@2*F*Q-Ry**ap$x);}^F!cEWU5!(mBp-FqM3Pg76kD8o4bWwEFPcREAtEF7mN z4;Byz*+CnwB1l%!8Yh)`(8vx<`Z`km7Q35nQ&k9uRY72KH)ad3_12z&G{h0vQwRea zbIX`3!B4RhbJ~#=$DWwjK-z7bICij4(_43wakYg#BN9J>7a|8Kokg_t;@xyl^f(c=RBI$6m~XRykNgen}dW zEq(C#ll1lPzLGwIo{tbX6vzxA55G*)ExN;S%N$XnsQkikXgR( zfkQaOrzTQChnC}W=(Au9#ityAI4Z6vmvlD8#Z%hzKieig$sIKQRCtS&_4JnKm77@a zpZX)|hUYR{`L(d-a-|2W$=nFpBZ2QlVs2BW9fb)l-ute=WuPefJAgC=%o&NOFoZ|S zw@}A~`DF}42r(hgqu;!zRLcyy3fN>4p%?HYE`ss3G6AUnFFq~Pbdg5-GH)cHtlUk} z2Z1GCA$8_Uyhr%}lly>gVI#FT3oMZQDNP6yOsJ(5V>>|OiTMh6WekPUzhVIv zwBiauSv*TSd_riAI$>g{HQ)lan4_pO=)uNO$J2I5C)O#H^}34kdBpSF=b?CD9&x@< zfU|6ID-GKxrj7JGkMj5y=GYbDl~k_K7x|YLsGxAS(yLvwQL5a+n=Zn{k>By|Cx}$r-IRXG;lSvsO$p3tjTQd67uC3#<-2ex&yNscUavawoM8WZ@lp)$C^Ha2DvXy zj9)~H*$Hzu3G+UMz_5vsfJqM_X>7L|x^=ev;dH2-{y^8W#bT}Y(s-IBY@FMP6{?v_ z2?k58-R26kybt9u2kOo1y5=1G^6NL!`OC!FB;*Q9BVmc4sXY`(ti3W(y?y5*JddZK zZJ^Ec=u;+UnDypxCP2xKW|^Z>(cbQ%6|TXkZR0U5K7lsO4h#cxg4u&6IWns;o5O%M zRxV(&bd6Z0R|yFJe42Rf)pX;P*VFT_zmZ;f{q=P1`VHc7UM5(6FP^HM2q`*>kk(v!5JZWhRy_UfSAHj!Jp7xy*xF` zRM0uW#{32F45I||JzUvM_wIjyX1^^Bb@imTfBFBUw?BE5Mz4&eW6W}l5qtjpD6uB7 zvF49eHefiWalKlG*o&++6Dvhb?xfUpa|&eS2wNn;of5F6E)nDYzR&_hQZ zoj5O``KNnf8r#w*@83$3pJZ)udoMygT6g34dODdF2_6)oAat^morM(S9T2lKQ_$TE z_~K`y^MNO0q4C~Zg?2KZ#0ZGBcNZ_|9UND#j6X*hoiE~OGKz@^JQ8LzM9jhrj?g$K z>}as|xEryJ#=ne7oLc>r1w3>ak21E}o*ODj{;Sdjbx_aJ3@6^f;HwSy5ER!2obzLT z(&S}lbw70~B+_?{?8IzfKD0Qu$IcQb${{S3IWpTLn_s-~xzx+C$&Vg9N^=}zypIqn z9^fcxV4iywhILrohK8BpQ+W_UideB@=ljyY7|d^n$2|idxY$QXJ*E#@@TS!R{pf^v z$-M*6XAcK<+#;4Jp~Ez@Bvj9FM<3q99+Kr@D=0sp|J@bM`-lL5vb;tZrxSz$=WC`6 z;3lpsfcIhN03jTlZBh2pgT1uyz_kD+Eo;EliHS6cKfvQ1AVdeYt7Qz3L#n4?262feuip(Blc3x}B51wTi| zVa8tXnKR*C?po7~KtC`6+>GayCkXF)rZY!&GM*O~<_Hn=xv#u+qLhS3gBl-a;7z1! zCvaxFVE)yb@FS$*(3sJFe>)(Kv7^)lUeg!tRc(BjayT>d&n;|$j)hJF;>yPXGx5ui zF*sskX7O=RwN58~7l~}!Zrxr{De2WnxVTU}_nP>qY17$7IDHn24LO{&)&As#nmjg% z_f$n17ir6-uKF%Wh7;tHz`+b1(18#*bv44I2M9DF3j?&D=Q7+bplTokDAzOcKqm2R z4b*0ZQLcr=_P5yMnJDqw^ui;fY&)E++_o$P13}H549+0{vEj)u3L$xsr<27A^#>!y zdlxM6E`wsdx!q9^_c8`D&rZr@V#`vGR_k_=wM|x1>{^J6Xs};Vg#U9!1tMMoxj`pKYccWQ_LMrYI4lsx)C-NB!oCqo652m<4{zs);L^I;TM?(FY-D zSq?N9G!Wmpz8JL98-G!+1(V-&UfCu+_k9S#mKX1-2mFQL=r@yI&QBlkUi^lmmG}%W z&>}F76o!P<4iWBP$bd&Ua-U1b)JHwS;4^ofG-a^uM{YZl-Li69Y=?EzW@tpHLm;Tb zXfyl-(*#B`?4|;L_NDLn(wsmXm^LsMKwCG=jZc6n#xTSZ^^%Dy>i1@hbrFE6auqO!m#IxJTh`0zsfivz_el@``u zpIL|aaa__Xv^C>f!GPA7IntP3ExGUpeG4=19bZ|1w*CwUnW8L;jJyE^>D8_Xhd7Nf z8f81q@;D?rSG^p*edF$@^QVhzj1Br)Itk$cpvAL_5*HxrZw7vug*a6#=qBTy;^NXI zL7X^Ow!x&U2yp>+(l@@GbqS;Ms=K$EU)l-}1dy&&bhN_Y%9QIhTV*Gphd|?_BO{z< z)CVkR;3-=L(7tx6v~iuS>LVFy?(HX|N_c>ab!9rW-@pwFxJ!ubaDLC)Tk)2aL_xda zP{Jn-XpHBTALH?0Ub!fju8vRIX1<~!`TYCEl2tm!}Z zxmVNkFuOPX~Dy5#x8vbMjKLTyor3L3$OmWb@p1@R~s!nA*XItotee9>Z zVK{Vtq)9Mrgt@`oz}=Ys?qOg=D^F+^*Ei=47kiKN?H;9}tu^9-?xwXTm||d#arVM9 z1kt}5FzjLCWQaeHb*LG#hpF`NWIDRt?k6ytpdNB3CKy|?^x3zbL`G~l2j#GFsG!0Skp%=`dw%75z zy`3f>{Vsj*&fDqZkKVcC){Tj$gz1jdSE7wxq|{l*Muv~eV@rZdv#E*!7gaKr3)%7^2^SewVd zq}kcx0&zp3+a?ga!O^ip7ci4zNbZ2EJ$6=zZWJa!P5NC_iJwXT*jFkZ9NWiws&A|C zq`zE?AbK_XhT=4`OKUHEKp34S3JuLWeh8C&WVXC6_pL%qc z_@@Ku+2>wL!{Zm23#xJ2*de&_)WKbLV~l;--oeqy96F%Kdek?CqcV#tleYRv7OzMsrz%x zU&NAU&IvDjg#gBqW__MW<_?_728K9ehNHgEvtt1srR9E1dM;ibVv^IwjUB zTX=>Nst_iv_bfYh(0Cs^GBfy&9AZnq!~6zSrNL)!LeGp>JjspkySKCx@Hy9acXGH0 z0&SIwu!4*^?Fd@;B*G&AX~#V8q*1d${92$@Cwv7rCUL};a(evx<%@nHPxAOqs$waWHMh#Uz=N->4mS2r~aX8y8q!6f)}QXtb1`$zHxsm z2*yna2FE&(W3JUpeBU|BRRERG-eOnq%wMpnB$yxJ_6FvX~kWSb*nwm48#S{MaTkUp|WlUqHt&vOYKokz#s#}ds*yQ&?21^xvPSp(N!VP$b?Xi z0}-4!$;&irUPO|}>rI#k@Xi4e%!w_B@hBEK+Zc0yVBg1X0{Mf8fTx8;D+?`o z@C%H8GH$sn86$+1$RpDTY*82ZB-BsWw=S<05PRWAtiw)2XxHD!F}M#I`{Kzj_|3jQaCW)0m8h+^C_jF7IulJ4m*}JU)+i{_wYb5q0}h zm}Ge33HQ7pUqNp^k2K)hv0q$;kSRow)jYfm{KkV^uKgcy3JdQGSf2J9crW0z?C4kO zQrNY;PmhQe#`@gBbMsrG<(m?!wn3(tan2m({1okgeqw$Q(73!2N5WBW3yrqjDqF%AB@k?47g^~YZ8LMXG;CWm z=~18$`XZI)Ky7IL-8pclQFu%j8hL&=wq@ozS?ugL$E(^e4-U{t)%Wfs#2ig|t(X?b zn7ES@+VQ|!9@h#L2L!NIu!uPYJm&dPRAk(#ym6kh&n#116=O8&KtPHIU;^fd9jG?F2D5X>$Ii&}6DAn=Obqp5-%a}Y`31hoOMMH=L~B7q*8n57 zhUrn+$Scg>&I{%}LIdxv#J-w4+Fk|@gOX#}5Mg*)V085@kVX%1^gV^~H*Tu|%uhK@ z%Xy^}P2(UYEW;OJ&NkQ5@_m^_Y?enZ5&`8(tc@NI?M{t0#xA+k1YU><3-hD-lL5v( zfZ~X{Wr{Qhj-3v6ChUi-5M%5E*VtK3=DP6=a1-y4`M|*cpfl*iIjk9}zxfceliC_) zHg|CDyc>b>KfZlC-MjrsdUW?OTHRTgE{+4nHhCR$B%{!*;+wD!+`DTH+@mdB<+QDb zlL!i{Fw7jeySqaiRLpG_F>T?qi(Pv^@VCLNI#R1d)bYe56^(IH9DVKax#bFGE&Ye# z4B0cx@w@x?QC4okv|5L!aJlwW$Ui_kv6m-`OK;Da^v2B_>HWLEOYbk-NmDTWf4K87 zt-<*J@#|MpC$VPtR;JQ@O)v1t2t^-sW=O4L1L1SPLl=SL&s{m6o+HNQ#fi(Q4Gp?G z0X1;#iK#8rwTB6nNADh>v@Agv2bg#mz30qC54dGqLXP{Kj<$hDUAnCf+CLsXK!3%W z2osELd`m+o%qp}_M>sB|`Ca;@0uOf3jMH^h=l61$h3NZmOjsbNc-Jrw#kj_C*$0P< z>F?kEKhitDeF(z|O~7=Y#qRyawehrm@`(Ca$1!1ZHv!ov_4T7Uegf0F${|tEXn)@T zV;Bcqgyg;v4m?0;u3>f-KB3gpf|GOu+Iq*XntOxfcOYD}Fh95pbK&AR^w^g^ymg;8 z5vRVbC!!ZMl6HM_IlcG69h`-`C_^V$#vu8!@`AER;YyHJ_`ngL6WjFpX?+ z?F2@~M=0Xh1-$#tdb;=Fa=LQm8gnv|3%GHIZ()8B0ZB)3ggS)j;jxk6vlVDJm$y0K zVl0hZrO%lcXCE+c)t&l1EI=y`ia`Jb=Nx-oMQCeOgMt#Eg!0EV0G$jbdc;lT@BqW2 zZ4eLGIK=alYnV&nFXlusfX$tg-~oi-sRm92MgzFbK(26Zwg|Tq)jG3|#DtcaJQ~SF z653?PHj`U4jP!W{5Gi>f&kJ20$o$MW1Va)k?IWy8JfaeaZfI&ecW?L2jUWro)m6uP;4Wb1$7rEFrr@mmSYzKel3kMN? z2V#6+B_b(Oc!+2%LH(AWW4>BP9i}>L1$@$B zFcePiEOIhA0T1{H4w|xL{0hb@imvC84q}T^?Jl$wT7L4GMwm{ZWs=6btRb({0sUv6 zQ6=pa-T*$@rR}ve2>6l4$-?^rE@4q!YL=WkZuk$JP+1l*`Dw-VR9T}0Q)H?-Lls!0 z;AkcQ*e_}^?MpA~6gL@{v`IK9K*r9JXmY*uJdVSBg#_jow~RVdQIgLXT=c6j#(1C% z7nCTQr*YL`@R|6q46o=5zRjnQ=XX@h>lkZ+E9=btZvD11#v6DRj$BtgFZpAfQi*XU zy-uVf>!Ylvm*w#na9Bs3Zmd6GB)!RnGLOf4y9K_uXty@<6vCwMmoQ3)C|0ObhSl=3 zFlHMauiAf-hB+;8#(TmghK4-1iSfbW=a#U25pkYL=?R44)Jy%sq_gh|3_{T#)>9k` zP+`uMkzqYcgJ=bN33+FS+OCXW)&Z15Iw3m#t85{AF?Gh3(S-R5YloWbCKwyTA~p(h zy^S(xDl))MERJE`N8ex}68$4Q;w={lUR>aP1`H)|BP{$E-=>Y<(7lVRW{`GWJVX2H zpOH`w3FUVRb0yw(U?wf12?kdaiV}>=#uiaeu%9@=G@`xI34?*qLKL?XG?u%VcdT(t zYwVQZd$qZ=j!9G}N5>kd97kL5F}paQW;m*MbpfUmvyH(4j_K4?14gDF6M%-M>uB9E zQ`vh+-OQmNOu=Uh2Fjhq)g!dD%nJjUTP@EYpk3#no_0)M*cCGl=_U+`k!mVL8hPf` zPD~ma)0N)q>6J@2Y2SAG!_;rn^a5p8FuNJX84@$1$%l_%)QJJRzDB=;8|Lj2T3B~5 z?2aZJOKSv@-)85agCmrO2K(a_qjl_x8xsLRIE6`P<{IV>9Yi5A;?5z?jt?d|Zgh%1 z-6;q2qis1|%c(>AX>p0;d102?Fiq+mz*%vhs8pM1xD&_DGB2Bs!<6V#VxwXT(u_^^ zWkNenW9H$BOQ&djo7rt}F52JggXZXa9I@HI!Ql}NK8*CyPALs^jizh(UVL)vkb8*InZzDEvT9P7!Vxw)dx}5uDH{udwq%-6|p*n^7`y zs6rDA^y#sx z2U2L#cA{f78uuD@?r;C$Z5(J3CScHI*rk3Au`_l_AUrv{~hhT z{;m#a7Ds=gC$$ndf14BjT8RME$xh(TGKU5*XE3cYwg%G0v2$sR13*gbq}BFx!rs8F z%M-Y8R%U*N*XCrna>AQ|apVbknjY~CfwaQBuaa8j+T08Z720r!ptQ5I$MFhQd3#ug5a)` zJep(JgHBH9^AdESkZ{C${qYB@sc(!>r|_}WJ`O!$ccg;p)%eJH`~klECvOc64glbM z3|a&HN?+TU1X+RM(VUoM0@NBokeHOmr+{h0QmV#WP7XZWsu8)$mKn zHniS!qE0hvCidKob5!_)<>m)c*MiVy&Rirl#e zh;cYuw)JOip5MXT@=Vx5dq-aHEm4LiwDMG3FbPHe*y%vx6)2h=Ws^MGA2_BQe*+wJ zNFrgdK0c#6^? zoM0BIJ7~=2OB>KrrjdGGF|k>uZO#2y(4WMT;mbm)f*4>>r_^BoW;zL`&Ejpi-*I33 zww`=%9XZlWq_DauKaaVB4sr>WB|{q6h;oHd>;u0KVZ{2Xt2AC z{9}3+4Y&O}IBiBIe`#q6XIL~tXuoxe%a=l7Bk{NieQC$SXghc4oMz*NE}MhM?6^CL zKwig<{$m9mEK7XlG0zWS1{a}7@+B7_N1xg^R2sBk!aVnh(|ma+ZTaUVEjrJuwBUbi zw_*AXd&u7rjC})IU;EQ`&2ePyHVmML;2f}eBQn4&=@)=W6pW;&Kq-*f*4=dkCycu< z4UfRbu(0D7P$G7_N8T^aav~5Kx31L9BBp)|8q^~qTD>%WArb%%kbWlgWqa<42D%6WXEfj&xyS zJR&T$a*XZ)eXO8ypRnfJm@ajXJd>V(=AWet13hVJZzX&>mZ6EoN9%<8TugIw#IlF{ zJ;7}WO=}!k3l5wwG?+LbN)!lYcMk0=#|EDsD)2X_!As>(!tR03rAf?7(2#rdYikb- zI2`2i9I$~yox__OIHUqbJuDR{w}aE>+K5O~b5~1y_HD)vOnLZLG^3#h??!yGUgvb_ zMun(A9YL$DrEXv#+~>k7k#%6~H_K?Iu@2!BJaRHdw$s2MM*>5iMoFp=nlX;BLdu|WoO^dA}Ohqr1>cOELdau5c;pFDXOp*3rm z?;Mi<7+z3im?!_)t`2tbTrX;j!I*oQ?=+pQ;27+2&uep>I(DX;x*SKgt1(T#zmx91 zw-mI{(upJN7-3csrsiiKK@*%oFw~b;xL?JLFQL6}?dV0Q#~g2K1tG^nMmX}i?@StE z7s?~9=hkCR3XBxb~DW^>ofZ1ekWB4``0+#%KF5>O$#6=Q^xI4B4pH$%!k2A9Y(^EQCDs zo}pB{_JM%Lm;X_8EKF*+Ldylbxv0FT#V!QJ7mSYI!6aIScm|k=4I4~@w7@fw%9UHf zZ~mIy4k~a&8tal6gwHFqjQp0tQEMokWm3(nfD!+b_-n59Ep1<~)}^ zpN4uA&H@_pigWujF6tBaLJ^RP5Yt*XmnRuT`A_o;iVLL9Zke`Zn8H@wES67RKLz6! zrZQ0)|JC2oCG?@fVP+m<}Tb$pi(a%jchs>F9$-r4gu3v^yK_&kVlNDyrXm7wu^|VQx#F(+u zP0OEUJL2U@l0vaw^6>DEscmM$MOmx zD&s;}GhHXLT!|A#jGQ~$!C8$roy+PPIsCL**!R*3Qqg-ELF`xYHeaVv$C|zVvAip z;dj*B3!l3UYUp+k*P{<>Xvy|ypYXXJHXW7HrNLns-}N-aX;wWf{@ZALJ^#gdWEV~F zMx3#L<_~7x1zQCfvoFy@XbXSKtIKE>%P<-UAd4_3gx);CUfZ>64Q6co!bm!g4f=5- zal3IY^e9@xT^|ralIVsoP2dHG!X=o1TMw4g(mEkKi9B>;=qe7xo9W}*PhgDjufg`c z0)yB}|9KjtxUOPT+{NxhsjtdA7)5sW9lHvCyF^eq#Bs8LHFfyx1diat;E+gwr%Uv^ z(O|$SjCu_m!b5Us>Uk+p4 zBj6!qp41qdFuNrfV$|3OPcZNFxpw-_Nk)%pM7TTV=;vzU9u58w)evSvAc7?u7$x0+$B(;?(8$l5Q+nK zBlNp*JT77OvVml*Ux7G!jK+IM=6HYzN(p5L;~V7ez}GK5fPbAnP1AQaF{|3cl&^y*LOMD>O!w~Girp-i z1$R37hDXzN1kLNv_^taN1*~NR|5FZmo1dOe&CHWsBZFz=436T#hk{4O@ZcpKv0?gg zl9un;nSs8c{WgS-Mj|ZP9&PxmgQc|ect5qBR0&ZlQ}4Op4%0kO9>D`=o&fx9?qim} z#kd_x=O%8ZZ+z#+>D>4k1PugI{1)c!F0(V^_=E<)yAI38(2hI7jvcLMj9_)ZE^vvy zbQ~Y!OVnK9{E%`LeR(S|0Pg-OD3 z0db2+XGpiQWh_GyrDxO>$Y3j}(LX@|B9}D+pkU6KFuCQqRO?;-*oj#ymlsqj@*n-5 z7jS>*Xp*;^6cWhFC9>2)S&DY9-uSGztS2wPG8;SvMm32}{4I2pNex*S43rBCZTDF$ zG8E=QBco6-9ts$CaA^8SXKQ>9;$pIm){rUA2cRI^D=sYu8RKdCU|M*X8Iz2Y(^$k@ z;IgTZfT%TbzyNSCYo?69fGcat#F3Y6RYT_$Z~&K+Othb}oN@%DU_xx86R}Q5zOs$B zOJ@@6ww`Dk<=HpEEYh#pT%lgB4a>EWMO-_OWJIkw!xDYXZ7k|8@}$XIwi%C3WY*=2 zD6Y8Y+j@g0c@e2dCj6FRyYgi|nP6~7df|})@>19dpfV^qFV7<{*=?^mEJ?h}IQY(@ z9kfCfS1@HcU)0aT0#=zF;g2$fk9V0#$+9C?|{Z z?BAeQ$A~raFOXMH6P3hs;jiO8m^5%K{wVG#?jj@QQI?l5KdleYiShO8jWXAl>Hn^4u|4;3G0xw&ht< zhCkDddGgZ`i0$t{YOcq5(y>t>E*3oVqxQ)q34111OSeCR4k;sGp>USKpdW=iO>A5U zGww-i<@}a$@Y98puW_MGQh_lQmR61vHm_+-9=ic-w;KqdlT$L=Q2-f{Ln1StwI(;V z-+HpZ!D5A0x`KvSg@FsFD7F3K%_C?fJ{QI;j61IYHFZi31-?lXIF_#Rob4Lm++mt1 zGH$>iDeXh%rMT*t0v_p5+{J7o{R0Ej2{>RaqJNC8;`oxGTtwrxN^JXeGfLbSEFdtPl>egt2@Z(Op_$ zoDR_}?{M(c5gwX4Qns+W!ekWlkq2hE+p>qIeuHQqdxqycggGdWgemIf^JmkyaWp<9 z?B>1M>0l_EWZYQSj3~3ThM7wxbi9sB!- zVpnI4_RY=AqS0P~=|Y=N-A}Z~ zsqfrb{2NA43P3aL4m#mzEl}nRi?e+fQycTgoyl9^_Xwx@jWqSq6g)4A6Yzh+PQffz zGe%B>$evEL(&$OjoB)ZjU&Jw}I zD`rdAFWT>cxcZ^UOEiKrX+SbXNFV=uiNzIo8NoOL29U@SU5qx zgLR$bmOWt-IAe0PRy&yoJEybD4CRyqwDt@)gVq@BPcqgonFG$Wy+HevQ75bn!5k$E^&JV@Ian zjMiuNsFQE&@ON~QFz`O(GsEHUpjRGRmzS`L$9U(x&q*f(EqzCUXSr2K%lT}dV>EyB6);73%*e!p&%zybOav-rq}E;_v+ zUOFNOM&KmwJRw=Vd39K7W~TPJ zY#rJ-NFHXNvk1 z)w?R#&p-EC`m@)*g5AlJw1ITd)L2bpsMM?US($dsAknD`JSMHu5QbjCiK?>Ki9|fM zcCe*zZJcd@USelnV8KOIODs#4`Uu#5N?>}8F_j{adLA zXMzUW)r<|!HcZF#+>_K#sHBa(#k91$5}c>nQSHTy{i-C_N;n-t$EGszB1;|6KnHO$ z2_@4DC)e3iO`W5}tE~2>tIym_UwZS)Y2xbnRBPH}YXB2-w&9Fzshy2>Q&k)gPLVh~ zUb;k(|Fh7dv1PZ{nNJuyhj4d?NC@Y~%4zt5wjdQw$Xddq92{{l#pyotfy`8i69M74 zw>Uk{_~}k$wS85FeZlSa-X?QUXgM#!!bw&?5FokDShJ`KW3}t1#Ix1q(_gYSd3E| zI&+r8LuSx|<2B6*Y({rE+Jy)?9Zx5SFTD0fdhR(w?SS`3GgD!cWE^1U=L6`_696lC z6|CYpTqV9$_gS_~kb*k!>epdO8~cs{<{>7YyT3=%j)3NvkS>e0KqlAlLplHqjGs?G z`tG;VfA~NDD*f%>{cT!X+(Q~@N9ycA0&QdKZIxK2jw|L*E{cln86_|6B=vB1?hGb~ z`MQRrS?R@zh3&->+lP%vIY&tHO@H+D@4ltjX%ViDp~|5&V;K3-;MrJLv9dClLmD8h z9HM&<4*1~m$Rk&xfs&KGE3=mig_Djes*|;86j9`W!rRD2%tgS2PT3@mNxh!dysjCs z+CvltXA)&nM&#mo!7&yK29NSZUeks;f(0zzL)c0r0+rK~lr*+c4p#y*kC*j%1?*87 zoH`4N3yVvP@5>Kc@m}qdcsZikqQd}99jB_725fmQP;>f%* zAl9Fyje5UDV!Ko}DL;#P=_|i2*YwdZmdnTAw#9D|AXxT~OqMA!YKb&*`}j_<@q* z?5E6d@|%2?ThLriCmnqT$8~sf*%{V)6@j0EX)>AlZD~v_lo7Nd{_4;Cwt3bW@M`HO z?EHpIqJK>DY5f_`1#XJ=kRoti2Q4I1`_9^=6^gB2Kz{+lKzu`rAsRfh`d(= zQ163);ZwQ*^U)@u5%oInG`F_BdeSk1bMZ6gZ9AV3;)RIM+FTQ{!8;Y0j@Z#IbBf-QF4ha?6~hH7 z>AKA!yy9EP9WwXJ=C{L?7RwskL#MJ`?&BvnObI#ewBRm0}JmP*KVdi{mR!c zKz}bi#*^;{-~A@I-A;e=f4`lUu`w|e-InVie=fvj)YrX3Ib@AG^o3=%RM-MPMir>4 zFvc-3?W;CNmEDS5e)-~g;sy4jE^baAKKv*ce;SKj5L*@f+HIlEh29qD4p9%oNNuy$ zQnt2TKl<|5QVVL@snwY@&5^h`c*BTs#4AGBtxxU}_G2TMEz46=wu@?ZkMKCg@Pr4! zER#^?DoKlLDWV32LB z&K(S|n<%e@jaEBbWL+AVGbTzn()G&|X=dj4826*Lox~Af20?BE z8!Eg$mx(Lc1_QsaGmq_0ZugM~PQV{&+&zcA$Mo$b>Ruu~DGn;k=X#oI|I`314Q$Ea zK%NFU9qRdKV0t<7c5Qb)t!yvgak`YAJe-TIfHmTVN;7)0mpf?_V==L9wT0KX!hP>x zCn|c75B(8|D`Q!{%QKt|Ln_Yuj6*wKITa0MjR|K zsvm!LFjetTu3?L%1C7D+j}TPYphz!_o`W3OQjv}r&&K6+`>6jcryU}o?szmdw9_@> z_9uKH+sb9Oq_&n>GgO^Bk3mxnY_GiZ%4;YDy08^nN|Un>nddmzqOUK6mirGshH3XO z3G4+4DRqnumpsTPckW?lwnAD$uu*>h_%Ja^+4^8EI3&K4hbA;3thJR9@cQVx9!zJT z+g*gYR&bivd5>cT-qV59({VPof{qY|v_F(*^fGS0>(nOU$3B^!gnm&yTAqJu04fl{=!~VmYeaYhp=I!T#K9+oV!_pxks?89;7q9se)|y8y?rWvoyU^q+ybRnaF9LjDMaAp#gk^dEFhCq)U|#(oLYKLa)|tdLN=q{&E5PBmr` zCKm~X3lj*l3{akYNxO75*0xLmwci(B3?lC*XB>J)gLd;IpjmrNh*3=ds?$WVtN zGc6DskF(H_>vnKjPt+Az=nx3kNl`d>18hJ{IkB%!fi8qH0--X7zy-`QZHqXxun+)9 zD}3g+;?S7c2%fp zFM1ppWuSaqa0T{-JZ;b@gdeZSD{B12Z~l|IY(un*!o^2%Wv03Qi|1kA#?KHCDBC(L z--*O_Q&NV+?GAkLLgqPyJj<-Z z8@L6w_(@{Z`-3?*XaXv(myySU@1+~tz$-5Em0w1 zQ{b|lMSjZ=)_{#~T0=SH&lTn|S-&?=2ob)Qv!k}4Y3_ms3s{A-UV?OFpU4~pjvepj zqpYAY8_D~~&l~&A+){LeDhs^kywY!>Ww#U%9O*5xXort&PuPsnFM)phj=YwenUst> z)0!{Z&VA879%PyTmLk1rqkby$NKqHgn1AZ|ZAJly?UgCA92s~&;r+*=YDhQf%5W7s zY~N=0)alg#v^AYj@x=t1@E7484B+Don_ka02MY9>}qGz(SKXk=P3m?~)>- z{g&(Aa*z%GNn-#6%(Tf=z^_q$^c3{s7~oZ0(FgQb&WFBYK?qAi3t^|;p(j7Kj_pQmxHfrEn`(lMP983=}$ zt#4vCu4Bv4ik;KzH(yO%FvkDz_TQ&>A3jLm{NA@y2lMs+_P_rP!S0_#ovmo}+AzHD z8|o!L5hu`|LTNDQhiM7>7Iz>H@n&?rlEHK*ph?Yexi>VFZ3tTUt)_;yt+c+gjv#W9 z&R)ZQ3*+WeJG(hD|7`FoEZ*FyW)@3M{P5 z26G=fV>()BYrqcp>)8X{n<>MQytmV ze5Mvd5w@}u0)9AE&6T_vL26*kFig#{3!^|A+b(!wy|1zy4y%oCU7I~0D@ zIIk`prHA)0>YsuR77!Y?`xt~bR3>1t^H z5Ag;!cB=|lne?5_U2Lq@@m%F3t_7SP*m>H*V|0L>rh&l0SnVytgl8g3KsQV=!R(I_+zt>bwN27)W|`fe1!!kypF>0N zGB!+7bD6mUgXJ!U`Y~~=Y}$6F&+%|KR8iIS6{&c zoT2c%n59?vCH!k@pjueY+)|OQ%`SS*z+v0GqkxxC?5n31=9#ciKlxDHdqGopP89X z_wL?DSbGvmm@21^X}hVjcnN{kqy zloiYY2y@XoWit1uHe>ENdAIu7#@f;%)c-$<=aKU^1U+i3z+2bqDacH zJn#8IIW96XS$Y=50>NSwVK49ZWn022P-@VAh;SqAx8_+V+e8_-X7_Qgf4G%v$UWOjJ+4(BSc{(CIdHrU~;# zw%k`{5n#)%UzErl^*KJp2XzH~d+$Zspdp@{EmAwn`3fRYEA+JAOz1y8v5Vym|GfON z1j~r(fzkdIKPIzYCs6>6RM7+;2TXi>Nhi_g(KmsSz;k|Uy)v#aJ#tW7!d0(6eiwaj zx*SeQyFzG!xhsB$$O0U+BjBVg%ZPTYV-!dMhZ<-8dJxdgyvnQB}$ z)Ydz=9phE!d7TDy3|M0EfBg6{#;Ul|ap_brG&~fHu7?iT_NF=`7tRgLof#U@BrW7V z3Rq#l^Sq%z1#Gk-`kEgor{bd3b$s)#4u5`OhMYdyTECBiNFXhXNU<5h8&Y%_Mnf|? zR*@XcKvg$IN=M8V^afW79;1<2n+N2 z>O%VE2Op#dgiE=6^KyFr;%NHSA3jXK$Jo3R@62(6caNPNO@rsV)6m6gy70^(<|)9| zg1HV2)f`4}-JLN|=03+kox&)(mX^^tYE;|=!@u9O4P&_kv$`56X!4j`44cghPY?_i z)W$9jr?qX+I+#O^e?5kD?*zkY*C_gT9V7C!c~0449q#WNO!H`A4-o)QS?m=uH34#- zDIwbg+SjbcH(C5JEO%{;dGEx~K>>15>RD zksA2}nDc$Ku?@!igz<_s94XO$Qc>v6R(nS$zwO(#>c7D82EO zAEvK=N&|n*}RBOaf zJwWhrEVXk2S$kJ&>K`LkZ6C}%^UndMYctbxFprES806t|9Py11spH2syA_jD^DyI- zhhn!Ak7Z4{YP%j7QiZw28K#2ZfeBCz(<&ViP7uWOq;0^QtC7z!&Mp;p$*RQAJZU8M zsT!+Yj<&VKnRh#3x~HEkrKMTps1ovtuw%x)+=CYSFX&LjAq;aKuZ`JHBlN~m)q7~^ zoj0B9w(*ExA=J)0@4lO6?i1H`zm%r$5ynR|CHnuMrqd}7Ana0f64SQ1eJ`EA(Us0W zk7*L&lD09sF_!M?Dt(TbkTl(bFwn=5!CM=9Y5B<})Yr&KXl%P+lEhRUyAa*&m~au= zOXhkPp?Bo^1dMPGCOz8-H18n{L$8?f9BvphpPfWC>KNQRj$4@vo7w4T!rVs%CSDlG z>ZfUzLq!m$fKlNeMYha5yMWDb=w&>nJx2PN@vyytq8V>;KhC44%=cR-3o!CM>H6r6 z^v!R)nZEx6b{v~0p|e%Y>QM4veiXLh&}q*ZPV2)gY;eMP|Cq2xn4v+#7q1MYDhE~U z0Ppe?$_2((I0AqHXG?c^H4|0o<1TW9TtXf>ZSeqw)%56-hwNN#F%FpsJh7te0>`%Q z0fSmEFgYzfpgiMQ+rAo_qAaP>NNk|AeNr=Juv0x~M2@FOsC|&RR{Jdnz#JS~8P0SH z7_)FnQ^jLe*TEVaj%^O59A4E)Kq8fpPK*CN>sHrXAkN6wvmV za@5$mh-F4Wn@tvcvN~CXy(I7j13?xiCK;WlLItTpyG2JUFCvlLdalZ zehL>bh?7_#sawrkT&5LvJ_;W)buLa86Bo~^1h`p1>VLsN@GQH1XJ%QPxCjd;MFw0h zdclHQzHmeJvX5E2Na?)K&I1evCt%W8b}%|9cB6C80;z~I4MSTLxm6^hWmuN|8tn}tb~(&x^LBa0y-z|k&n2+(^I{d3c2~EpffbV)87A9D> zJD7id1j7J~A-p(_#eaYm(B=Dhkm)+;P5PmhqD|2!;v>q3#_PC=%)k?HLhFGae(Pmr zJq7&*P$UjGbEaGt{TF>f9zc%%R%>e;yeu~hfBCl!&wV6HrG4`ADrkcO%onKe89&4W zcRZI){LXEPa-;dssq+H!O7t1?rtnyhdDZG#p!s4Rv9CzaNR4`g%5oil(!E+@^ESiS ztCdqbUI-!GEC3yt^%!R~TEQajqGsT5J$ySls;L*|Sb5pG(37SNsiLq@QhUoY$4VPq zU})fJYmBw%PhpVB#8C^JI9D9g<}47ihT2Bkqd$W{16Rh4^RRG=C!xx7lJ}q_b{dpn z#9!c@AL3UU4N4SMWRc1D5AdiT+I2l)ffQzz-q-?nPVzv6r6o?R+L}T0%I*oCb9z^{ z!jKORjT1JcJDsxa|CkUoOWSLyeV{v4&sEdJGeha1b2bg$Y)*gi!YgQc7t`(%AwE#} zb)jv%d>y8mVD+#}#5inAAHKf^!-WPP+x;VAOS&oEJpevf4=ODD4IO>t>x5C+AWRXO zV*31GSFRLI@)|gSfe{ru9L3sPfg!>Mx@O}rC$Ya3dUTic7~C_%gJG^Uw}4g@a;d^- zw`1?!0E6oohRcZE2~P#f0v|#Lw%j$CrUSIUJ3Dh}=xTrJ8Ns}ZHNT?@J}?1MySiRm zLU4gG)TCY$CYZv0P8_mxPGC&L`4A=zZ5-7*3G=hL1k<{Xvlh12SrBuqazYl2iEE)D zel)*Q5wc6HO%%zgjW*bahLF<6A60PdRKVXsKkUq~Bhmt|(AC+DM*J3b=$N-qhUQDdbtX%tQH(lW>VBFMDhb8vzgNJo#R5C~6%dhtY2Lj!Gc zD8z%QkJ4c0ILr|-Vo_qQ5qfERnY9@v8_WLKN!sIZhJ9e$1|K!%y`ggx=^5<8Uwi$< zVBk0QmZ3c&sle1vJ-UN-`Q5YyzckC~V#p0>p@D;WXj&WHco@u2Udw;aQ?WJB!_biV(rtx3lyUSRj9CXmy zb%y)C^qb$^Po3o}*w8aJE10h#K)SY<4jIFk2h~pT>lp2YX7VL*-o?WO- z{ghCw8jpxeM*eV<<8?Wd!+n4jcAbo=Nibi8=ppd?!vX?6w1qjRI)v66 z---4MD3dTh(vP6bVH)KL=Am<~Y4CHcspk%n3~p_uK7@qVUb~)FPo~n?<$-jx(Zq4u zc;jhVW2X=8meja?mpXXZc4Up-|33YHAK<@n&zPdLodEvVUmb~Ewuc`Ok9P^a8sVy$ zh41|NYTB$20~Z0ZLLA%qyBq2D*lfD~!bp1YOXm^f${die=pIeb(-Gs`aAjSXuj%OP zj?osec@Hq(YTL(0hB@VkU5>p?=AAEm^;>Ug`y?X}UZPAkx>&oAs@+%e)2>j4Sms=f zeHnqkNy)8d2f9;u8_!$}JVRaEB}O$9QeNz0F_Sr>EFR}YmlP$ooL#&u%WvPgg$wPo z(CFq~pY$2{1%pF2l0vX{jDIcwe4^tRF)p<6z=TDyl<$H|kbL=@|3af4unRwR z3QI6RPJnr#7gg31ns?F^rV^HA2LoVC-oaSV4$HAUe%tU6e8f5RhmeptD9Rz7@CGzA z8ys7PxpZ=j_xy@BFd)1X?$iKTw~TX`@v!JqO~!>~@Y}rBn_ z+hB?CQ>M`@3ZEv7*Ouw3Qin;{;y(U2O|C1RMIYyW2pjdFb<>cT%XVaqcJ9k4T3U+|okI29z8-4NVtr^2q^{ENzqI)%Zjz*A;!gbN%+x%qvh zv6gxtWO|VW5vTl7pEHCv@hI{8#~f)J;D}!YO5K~c7YcV$6OZpC5>fz*otLXXV8#&O)Cyf z!?*Y|Z_pd>%;z?{eQQJ%^Ti)Uc-dEEwb9Ih%t?G2JTd8nHYT)_9M=n&fuDUe=DUPW z*>Bl|(Z$?_BU3N1#Xi(`7KRFC5W>(r+V~CQh;Gcl6d*uh_da@|fxzPpsRzyD@yR+S z6UK_fyoB^RWwzt&*?^XDA0}aTb{DOrbCTM66)vn}XkVML5$+tu{D&QaUcxW+V0N;# z?-++ZSc^P;Oa)w-of#XW_HiGE=@cRGm<5|8eA5raDr_R1n!Mir9yH+wDsRWB743dQ zTYCMamsoEO(vx|Zgq`JJTGYZbuKufE{$eJjdRzc$+T9;44q&!AU~<4QWnLo|=f&}7 zF}L1;N!t!SAV>?lY;EbChjixDK3j8zRy6UXU6z| zQ0(0@u?n?KK4INHM9Zz2)V@qTyn>ojnUs7|Axc8DuO9na!xV>7v?d`o>RQ^tpF2p^s}G*MSAtMt7&TEw*)`GfiS?)x7&}>-@o_QXzaJ3HNv-`%=E}y z=`fs-9pgb&YH%rUc^-y3^Et*fHm0i<95qaV_6h5=jR4V9?o7|V_WAU0{_FoH{pg23 zOlL7|d3^tV`uQ*Z2A_^e4nfe#m_9ggcV`Y$v)y##+;yB>8*nba%dP@piVz0A_n-WW z^xg0O5LjOht@p=w?!Z9etUAnYRr7^ZJ|bQyaW5BVX&*vTiGDbQ_Vfo(h-;%A(#J83 zq`OWszMFFhDQKsmF6IU-6MC8nnLkPe1o7r^t{kiaMz-fSa9{-WEJ^0VyKo9f3f#1@Z2~KmX z82@%N9TNIzpDDmTQt5j zaRb%Z=`@`?4B4ZGCk8YNzzR+yB^E?G z)rCc^y^DaRLb34C4SQ_K`d%sCOGTIG8(WtPpHpHm1l$Khz_;n$DvZSs((#|`NW@p- zoprb%>%$Q10Wb(}Xsf8hMZuzzrJ@uK6LO}>Q`3^f^ChN@P z2(*hzPNSy9wuOx}`SRjVWn4bb+nwMG2{sr$%Cmi$i7|a%6nPv(Xx*_GiU0%q^JQLPSIehAhvo>v z=|U`Q(U-P^XEY|-#rTRVGye`4`!D0ccJRRe1Fr>hB0Mq6VN0w#w zXiv1CA3QU4)Xd9hkKa7=vbLx9KIcdL7G1n6a9G6FD+B?#OkuKrOp_N+1*ME!Vb64v zE0IFw_Iz(Xzl9>|7cX@n%*W7;D};#{Wx!%WuE=jL(nv$%-j}vnWq?_I6f_2(wF%{5U_UsJ51ARo#TwW-bXpn=#1~^ z0~!|jgq1(%Az`*n#g$=#(&CZ5_Nm4`r*YolCI2)9@IFfL5sZXFfOP8aSUYpA#n^VW z_{tv&@jl4`5c2qv#`DfBT5@nKmTi;s<{8X>hFLiK`@31>yMhL@*4Uul*^x7tM(`N( z2n?isZg~!rXsI-7OTdeCO7mk3*;W^~7(+bsxtDDh=Ge&!n#?#QAuptdJl8d=AhKx6 zk1~qBsFx!X;@o6;j@SH~HuCJ4Q?QPkbVLLfETjqvjRe^~MZ2y;b%m%i9DJMB2-YpL zxv;dB=3oLnF->!!lSYEdV_WT9S3)3gEm-CxsRNiS9sSBp#=k^M4wG0$$Z3T!*(Z`p za|^-5v9ayzyTr*$2n{f3EkvxbzP=Hh0Wo{o!}Mo!e@P?+J-HT!0!x!7+Tk4EiVuS( zAkElOw-7f}lZYNnL3GURBaOEp#K;%8LoJ;g z>9mS-bq!})%_j_UZtPWKU>a9d|HMTcFxO$4j=|v`8fdK!`mu-iFpEA;ml`}jn*PO~ z{8{?^Gb2QE`E^=toJ7e;1f|+!x-;|3bkKz3J5I;jTYKR%VKEyEAKp&? z^!6{n85&*~I8UCdpb2&i=qpgg2jPo1ZlGJchrl0=g$1w(VPH%+Y zy7Texp);cG5EZC$M4ZXBO@sw(>oHp?0iU+z^hUIyh7fdEU4bk)T!$p8mlxU&@V0sP?Zo`++AdrVncF+my{>0%d<0QE4d z6;E!17LO5Fcemh|U=*vpFw^)Ih^u8z=IZR}OwYYM0aLz{K78=2u+*wz2bg`NG+>yfBkK`ol+Y@P;^Z!>bViQ~Gf3(fYw1%YB&kxyPGn z`4P-5wAI$afhV+2`jj8=)VCd?BzfYWNAkwp&$u|#d4*Hq2H5rB^gDb|Op6X9tuM&G zN^3_5WZP-<@|AREWF++gXJXf9dk@|Ulm_B;zd-`>Gz%tO{-R8>1j9|fLbGK{yrNwJCqGyWY=iY_s};;Z9bOl` z%!Jwoh?51xXMQZ3F~xfqEhb3HF->M@C`^XLHoNZeA4vkXj83F_Ji7~FB5TiJ5LnCt z#wdrypYN!TvK62#ztE@^3$RR)u#2mxg!|$$uX%F2X^Z(OOLPWqxO1>Z39(oUr!eG& z*t}7AQJ`(&Sqw@VAB>U>thY7#8hW#{3dT>9hv-e)&nMad4BXHbm^_%qv>hEm_SK#|Ig+(1v zj<_)`A3)4F5+4~}pLk_DuxuxpOcOryKJ2ffwX9DFNa8C58OtZVZO+hGO}?`YnSL_e zMIB6FruTWi>i1*`;gyFOn%o}8qBNG_=OyWK>G^*ra^LwY({uigR1}nVXTYa@o3li< zZudq_by%fQaqE>~vrd2UU)1ltFba207j34Ts6P^$hxgK>L>{-9R$}gmay|tjn|Ik8 zGTUl+rM=AH#+Zm2t)6muE`9hu=LoQ(KC;#81u_zuCU)}ZyP{tlKNetF`MrInaA#YD zNros_=-g-<6*;7BwckE*ZuR5k9IBvb|7>z%l_oAWS#t}E2`4n}gLdk4knxu1LGSHX zj|uL=G161PG)tME;{>Xug|Ki5lY-JZ~G)bBSH@#!(I!_-Z;oNTB)l{`By{eZ~Nb zGI-hEBsf1?{S_AfBX%Z^Phr3?H&MVYmBd#T*kstmkPNSNLwqU-&!{Fdut+qNFGwz5C8@(K>Ug8HWlq8eBg5ka0menhjHK2vQklH23U;$RIU0 z@eHBH6RBiyXCH6i%*tHJ&WPr0GEH`rLQV8h(7kg7+I4mnVtl}ON83r;i0)PzHMC~8 z!UmPBqdE6TV%POq92eKIUEU-}iB9V^j%412;s0Rn9c;%rq~LHP&22x%%quG&4Hw$j zz^(%GkQ(V@%$mxWS-Hz4eM)1Vnsz|wO2=04G0e{;(o}o;(wDyY&(jaS_s6Lf;pYGP z4}X>Z-=F_&djHn#U^d@)_0{yu%^U0Yy_lNh=|*)NSTwRs+FG9TOHuUu3B2f7eg~9>9^L1FbbVS z1TW0FjxpOAZp3_Tg=1z3{|M-@c4dVj;U|aCTr)>F=uVo^z9R_K_P04=nggLQYiet4 zLrbhy7{=IL8F@Mcxv_EVrrMurKvKOH^+5&WOFZ4>%cNAnZL zn}gBgZkt+SWl2t%nCp=x<~GR-bdL*M$jvO@@7oPdmF zkJyOTU1xZ0{TZ7lC{PFi+`(yY=g(iDoiO=?^mJXYF4rD~hb>N{t8s+2^JR%6qRY*k z+{Fx~d0(S`FwnKMc+jws2Kvq5j2`fFM`H&qIrxM#fX`NH51S4-n3FjsO9iTFBU{ z0cD=4X5DV3dTHV$Gv!1VJQ+nvaN-~@pxi?svRHPWMD0XnMX|v19gAR5 zelV$-QL|CDECzzJ9aq+NQilwO3{3`|;-XQcWx$$F3bFtaI0&6|gNX<3g7Gq)*<=L7 zS0t|&7)*@y+ulgWH*h=m#!;SRv`(e#V7g7jr54yC#9`4+J2fW7dfSC8<08ZA1S*hE znP+MgR?D#d0#6yfTv707-g1)}at3+yiS5md6t%>C&;+pBAGwVGpR6}+((JhI{Id3a zuj+m82D-5{2!bF9aRWh#G$oN@STkBId+g9yvcn2{!f)*G8;9@#{23gMupRa*+c9Ge zBgQddij*jc6i5mnvGrcv)zwwqweP!szkfcr0F7_=Q_p?w&E@3DlbPq7Jm;j<)pQNM z8irls5hqQZ4WlhxHjJ3?=RMyR7{;Onf6Dc#h4`&zSbXrY57K{mRr}xcNk{so&PwCS zmS;6jtw%gsc9?6+qwmCD`b)g!SzUz4oZgg9q(6TPLp0j8+LYhaA3%~voT3>5pD{yg zwdK-3`|tj|l$I?3iJNwdgZdGtEEb`?ly>|_xRT0tw(^?HGW;~}kMYk-Q`)xRtwsM8 zE45geaJtJvDuVq^=|aOjwBh^8*qev)lBU+{ljdsR@ONmnHLp01#ars&oqc3~eA;v@ zea^GAA^5G#d(9?HwZ70#>)!gD7aTkGoj3`+f}M$^=CKUl$vm4sv|(DVPufEBgY%RU z7=73`~Ro^t%^@8Gts<+ib{->NgBN zi;r`8W74UAfVS0^uK?Q3d)uJ7N<-(CZJCbCrY*rC&wbY1XAY7HbP(E>jCtF}Z$8Xj~E8PWWqqx zmyXrF!&RdG7|jK9CS#rh(G=Q#PqTCf-TBB^<=Z=I(8U!#0rdI)7BSVIVGe@k@pCV{ zP;Nf|0(8Q0q-Z7YPTvg^qhinKNH4v3ljCu4~;c4E7M=3Jrn!WEy8;PC7fL zQ?ZezBy}q{kl1&vf@{QW_DP5U?<5Fs4{QN8ps% z-o`Iu$MBn)D8V=l;79Y-uYad}`R11ipZP)g(U1P0@*WYHFf5~F_K={7CCrnirbalY z;5W)iw77R?K88jSGNG9<%BlN<@SfS5pb6|`v3uua}zwnK?9dn zqB-pWcW_2n+OrogAlz)1U;gT^aN z2}MfS)$`AvB>GW55x!PYB6O7t*C&7<6QDaA<)v$%DW@h*mLLA`2gnud!abqQTPx+I z*G?l4Ae@8r)ma2b=>56ZK3jh8Kl)yI`1CerkI(QQ!TbxqiGp81KmJWSn8L9e@a?xf zPEfD^oUX+J;3Vb5zQu&J_>8lPgqwd1ugM2Y+)pl0qNKJynxH1%B%J?M{W!~7~15+4bwfSP2LUinNJ;~z4?^fyA z1@L`c$QW0&%QP4wJ4`hjmeO3uYGq+Ub-F4g)v!gn2WOf);HT4QUt8 zY8#No=`RT@Z6|NOZ3I^qQ|jjiDc-G7wiFq(VdljiuVRki0uDTuX>|t-NOyV^_<{pq zs~TaM2l|=GofpBg_1TZM(el#9RBoBj0`G}Y6k!2UEX^SzReeb#Z=;{rq1(}+v=~{PGD%k zrxwP(wtYUMm}B}8cCY^*gK3=s#v9+WyG9va!c@zFUTFV6zG_>{T+M!{6IzoIuQ)Mv z1BdO9hN>`^aY-G1TX&c);LX*lv!VOE_YaqUtJz&;aw$D^9pjF=D{5-`nyJ!{_m#ds zsVDs_?IgL+rH|V7P(P#=3Lb3Ztt8Z zQ;)*Fj_m={#RB?qr#AGQZfu0q>;uOnsOJwu@IUCW|Gpe`BU+g{-$ioY{P!N3+GGA?cks*^l#j2qCtAI zKx46F;yBqI7Uos#_~#zXM>A=>chfnyZDabgt#dEhG<_U<(0UK_aYz!(=L!y`YrJ2a zUry*vcyJh&!!j~(igmNAOwZ1gAN}|*iQe)6#sm!>COT?#-G$It3HL+RN7f;<+4lOr zhjASt1h8)QqqUP(4A%*|BY^F}(5axq(4gWuO2ig!uvi>r!i}^So!vLb1DvOvrY@-Es?#%i_ zU+#kRK71G)56)>ab^3KV)!6TJc!Y!+OP&#r5a3#(!fpiZxrcpiz|3x8+M`cmCqSrS z{;h9(tGw~z7og|1^1x1Srr(mR6a%-H9|EA zSVOqm0-v9I?bph8zWqBK?>t-nm;dp9F8}cN@3RxcVPgZMFm(uD*w>%FJb^RjbLIA3 zn9FA~2-frordOMLn2UioBQf>i|Dgg-t@0Lhb!X{Gnc0{t@89_d28UTmgN1`e9s|{hB$6 z?o(Uc1M@#Lj^GPkpJ6JpgD-(*R>SzW42+@d9p$7|V0&`sQ4Y1z;nz^1YiQS}FbkT( zbZrZ-aP82$s6&By1A$>?W(K-FEGI}aI%VkBHUtAgm_Eaa7!$KL<_|2ppa&-=@XW47 z@m!4V;qveQ&hOH`!VVU@3W-w0E(gNoGEUTn_jK;=f{qUumpeGr4iUqD41sWI8*MmT zyXOvdLI-_)*x1we|L}q5VZcDsr(gccSIYGl&X$$U`Eu*g`=J}>nrAZz<_J)5H)V8U zkp94Y9p8zKC7gc|Se@He*`*uc$l!sA;j*$$DBZ=!(4;$p3aQS?&^J8I5qWpP!3JZ` zaen#w^O=8)iuCyL9D?10vddW#-9%L~Qc#`$7*4?9cyT=rqmQ7ty&>a`Ian{_30k!; zG~L<938f3u!nudc6nnjxq_KO2bNl24gu`=~f1=b_V;pF*^#Xo8r#J-i!*@R>EG|lV z6diMq2sl7LoVyDD&7p0EK3<)3XKA_Ic<~yJ{wO$@`wjTOQFH`2P*|jIIhUg2^I!hf z+w_9F6G%8)Y;$;?JOhvIs+?Eu0!jxu!F#Ae78;m*w?KB7plEUs7p)b9ptPE}!VtHn zI4(et2NMHiKxb1;zFiQM_5fi-U!c#!7RY^%$x`=0-qSQ@RS zlZgLPBJW(-gu9w3`Lr?}mC555>19Togq;f|1SLT6;zVZ-1r;wRGc_!BbY(mg4w&R+ zb{Q}(Z8Fn)XieM(GsbphlxyBFU#)V&SXg1jN^Ogka9O_am^YK94;c(Dk>(RuBnf=} zuFMI{fQ-246IZ+!hYAA@f{H5`2g+62O*)FTd<7o24y?DnpnfKphM`m=nLZ$sI1pb| zGY9Tyb(lHe&~!|OBAsH#SQ^jOa0g!Lsnw?f=%g*KzFC=u2c#H6vGYFnk{Rzbj^X4J7(`_N)dmv&Nq(=O|>?&jj>F)Y5b9WsZ0n>oS_ z_tLFbZ6|psTU@5x2A)7^T}f%`e^i$g?$yBJuh3$cLetV$wmJPRUZpE%qgr>xRmF#R z5~sC|pWxS&4UDOayf#Icl0Oe+fL-)#FLZ3%)J*&3KN73#@OKqxd4|yGbIWevH-Sf> zr(E83kS2%Mq@3E<(HKcXmP>ezXmaH?c@>QXcgq@en@z~Im*I$6NY|J^C2AbWDY(2 zO6FJ7hrS_B48?}nOLJQlFz(FAJk8H8vh%)?gMdZ|(OAt}bWTM#yF$ZY`kdcI6`9KU zNz4e?y|^(5_x5lLMNIg!LvZ!%xiX0W_@lr5w`GpF z`kUBr*L9BF3j7|{VV*m>ar}fC?2}PITWH^PaG+LOH>LzI`NC?1Fd3}EL0H518z``@ zJi&CB;uOy5MLQi+t6}nLzJg+dCTwAMZS+9?3OLO)4!Z@&3fxpL`JdGKJS{NlZj3c;AL4R>tm1Wf-jzinYpt}@Q3Nu8X_(1vZe zXGk3GAs9M*A(Yo&|3>+f-~TW0XFMuD{_F3TpS<(avN(gpg#+jpKmSJggYSH+yn@~M ziSeQG2)_|+zKsb#_ml`qXzN`^RC>MG%Dr-&T2h6TnZW=?V91hd1V!!Ua1Vh(&n#H4+^WA$e+R=&>B1B9sp zgOpRpg7bXgpn{ovi`K<)H33Tg0+ zNgJVDrT+)_r^8=dV>~%PJ?Z!HgE@qjmY*rKA%7wc-JP16z*LQsgVliV!9z(4j>&cE z+Cm7}K!EUo!yQ6h-dlLDJY4z++8_gyJjLuOh$@6>D+2T)`1{7>)Z1{sM4B>?ksqf#NONe!$Mb4rBOF|HYq`=?4$X z{LGWgLz*pVV%0;LZTL+%Pf6RZ>cwq1P5R@oCs(r;qa)h<^3xF+vC!C+2WPZRL3tHYFG-}6J-}|gr*KWn#2H8TdZ?V4h5qDJ5bL)lJz>1O&Bm5?}91AC~?Z%Y1gDP zTeLdZlR1d97{DkxnYcJ9-MhHDphG@EWHj=mwn< z8li$`!W%Oea>)QX7-dvkNT@hMfm@Uc0b!)*KpBR{rr0v=&}yPlnQvu*w~zi@07%TV zrhQRejcI_Z3m^%cBZ+y52GHXaOpW}bK%V=%a-^+R@U}inNv-rw^p)!c2A=Ud-Y0cM ziJG5AS-D892DPn~R!j*AFjnb>dm9He`b=)$5hsiyTGfmr$kkkASY|j2+9!8VIadrs z8TszfrHrJhYN4ne$s1cMV6jufodd6zW!M&<`7ty`Ef-Fonl+}Y^)dD!9uc1-kA;~} z1B2hHmUbA;!mL`eN^sT!kxC?7;>&zWLUK}dl|}tT8fS^xA1xe_u3q)qen~kL;)-i} z;W1CmC%&C=RfSc;Gk->YtE{XHz+zwLqAXw8M$_4M;>`U1Za%)3f&l-xPBZ%6PRVnA zH~B16{IwngE@`1&rmisCZqs^MR2_ueo1X`^rw)o*i}eYg-{D$oRx>tt7TtoY7AlU& z6uS0ZlhOewJ$R+vE%?N-eIt}!!tjsk;*D@xZk~U-Zs2MpI`gHJRzcDxuemB7oLDM; zTm9!dKJlt=y=(fnX?LW1-dT|E^1B72We0E6TgS4$ZEdZGo3zD8!sfTl_ueb>0-)I^ zwOv9geMt{Bef@6XA2@vD3gWk6GAzsJ4F1;e+DHyM2GA;Km$c;RGnA0HGe1>N5+78-#|ww9~5!+riP>^hpcM;tk^ikpmS>aZ=fAABr= zXeV2jk+%CT_RqLt9J@BIyDX%;VrciL;YDbqrT z*_a3hnR!tuGgoL>zYRCzmbJf4s~d(Kr)7wXu~X6;QUJ!uE(UThyN0`C75CJsr`Tv{ zSgeZoxvN*pAOG$@DZlfjFJf@MT0VaGL3!>*U-{Ct%jNR9GiBjm4MStX?NFCTif$AC zQ*|~ihO2OkmW2^j=_WQEqbCT-g#AXrLsTaL!|QCZvTNh92Mi;G9~l~Ej%Fe4g?sB} z!HR>(E^##@5yDTRB@tgUNG8^?f<$ogQ?M7wDH|Ry(?WgRy>Z@^j+ZX(c|ed zk4keNPD|R^LJ|@`YIY1Hzl^H49gbyeawv98a%5d_dYw2alrHS0FsxTHZG*d#^XY;k zG%oEDb`l=7+pb3{YE`+VUZl^AHDJgwsY6MGjot;flZW-S2UH?LhfI{^j?}y^n8!S5&sBu{SYr-$kNx@G14xp!UFr zF|^!lH}NA&Hw=t%RI%at&DXzLe*DhADfjN*A}^=8aTvfdoYgqS-=DepN}0s7d|~!Z zdH20{$_g-P_TU=2Mi`)V94B?)FjngzwiX*Y?l6e@(V8mRufiZnw29Zr{?gEpUa)fI z&;vFwN>m-Ne3#A`5PlGEXKc2*;Bw*oH!$3`-@f;2ub1<95I2-pAa+Wy34)`?p=0_U_B(LcASb-S2#zU>*ma#|{Jn7Vd7LBgeuX18Or%<1 zkC0a5a*DZYVSXj_m~|J@lfzb%B7PmOJ%lhi#D2}K5Sh@-$rQ72$0HTks=#CT=`7oL z*flv`M$nAhIDfXhcRhms;}sLCg-TdGP#qExlv}XPV`RxHLt6&^L~J#04Ep+oAvXb zciN+vuQH-eeAZR>*d?G^whyy-rC1jO%H>%*g~$LnaaxV)3L}UwR#@5(u67E zhiaxUK~z-1toAuoGO}Q|F$|vUb0!es70)~u9tT_`wWO2z#+R6KOm3Se{!${&w;7Tl$lBq?v@Ll4&HfOtzQwSo>E7 zOn7-O+~yU3T$UrVWsYWBLwK_PEmFAbN0+_&Qw58{8||{br1<1d`knlt7asdQ%v?I5 z-b*tg(XT9e)=ybp_NetVjAmtUYGR*Ru5?nbq)r{j&v++XX$$wq&&@zKTjGD=QW{qJ z&I29>mRj0zz6QQZOQfW20lb01Mpy#ncpDndgZd}+TOTkvE|SPsTnd=B&op639W&gg zZ^%>Oau`@Akh6F>?wjYKK^PfR2wwv$g_t>)<(ZBGQg-v*;KBDnK~k8;Y?Xoa+j@ht zR+-k3zT&sEZT>36P>y-|R6N#l?IGJ!x2~G=N!#}d!OpL$AbS<0VS>7OH-#PNFpQT6yr_N_KF6P057Q^?4|a(j z4}9+2sf^2UZ7sAD;MAzmafAcZ*BX**6Es8!wHgt-eR1Vl~U*y=~ag-|qh8J;-bnaE@;`Xs)9Ajx1$OVovbL9>Z)2R18S3hp1847GdsYpTg|Id|>;e5GFm0VPj#8&@JpFc3?O$b;uZX8@L@jE3RD%Iw37 zdU<{wA!?|6>)YQdPo6#w&4ab5RXu5K07G%{B>m}uyMYn*3L$RVaa1w(=Mf>S?2Gn} zzVeMXUN3Ka=4SaBM<6fLeqseN#_P7YTLUYP=LzG4um~N_0O#u33R?x}j@h#5?i`Px z<(3~Khqk(IA%L)zr02Kh9n&9k1Tr4+ooyV!jF8kfc)kpFUu1|82bB1o{X;v%qa28e ze}j0aE6}AX{!tEIIKfF}dw8YJJ@6E{b!_BN9j_d~fUY8h;{8hdcCekB7?~)Subrdp zy8X4TM=&;QdKnvE)3&-EZ6PpuOz{kdDHvP+tv9|A8+wmphi*C$N zG0q>%me}lw%Qa#IpTwT)DFX0g<_4Suuq|4qZ%A=>m9U;Fc0&uKr+d(#7I14X^xnt z9CV_BpaTJ6h%GC(7G`GV%U8bfA3|emZ24}K_dfhZ`I{fU6Z4+upMMV9Am%ID(}5tn z{FIPX+5>*=H-7u=;Zcm=7?Cb`YJ2q}bAmC(kW$WRsGcjp=KKH=HO!e*>f)<(6yttM zbOMXV9cAm`q%=pOEJ6l94&%%tFLLv@G72)O_OHKFK5)5c1y=48-iLwed)t!561X)S zOWSR$jS16~FRHbLwSQzVY@d@1K!S_PTm=+u6=a%PICbF_I`Jj`B$8T=i;fGalcbC@ z`KUvUVr4|p@{=#8Wmvv=r7pD$5u#{gyuRoI(<}6;nU|4QlJ{Q5-agg9(*?#hh52PN ztN03%YM%);eeM-u0(dRg55NO?Wi+_uchg><0hjINFMoWQb~q6xA8`9PZOUs>N*Df< zmVkwqp(|gvQU_l$7hyzrUl|ne1dUi%1W~e9dNaR$<^*hcJO{4afYU07rSb}g?E$FZ zgT$6?_l8mC#``dyDa!oSc|}mG@W*d(oIRYJuet3p{VpV@(@^r;g9SL~_C*uao+WJYz-vqpFl?e(fdGUlgnzrQ=HG zo~$sD9w94}Y<20C^*D*(ctRZXz1l=p!m$I5QO%wDt-*SZ%C#s zY&_4NJXt=Peq0t26ne1jSw+MCx9`1Qe)_>jWe$%~;~L^#0ONpWkogbMElj({-#sw< zYPE4fgux-69|pOj6L>bV;I|WZQ>~|A>^zRtn2k?xJ}^8=8!#*qH&VvRIG4_CGURoq zvC^f&a~A?eM=yfI?$yix>c20g+5&J;!X|Ssb9XP^;cBn92=lW`cpPmn#Jh9%7J`M! z6YVeBVGc5vP^R;_#`p&?k2@Uidbo@I1DeJ-Sz)M*A`vD9#UB>oTNpfhFhyhI*pTAQ|O5KY;4HTKzLfu;pyx80Pn}=;#&4H?#@CPj>PO|lt=r&^_bQs`Ps0hFyKtp^>y2OMv^Z>^c2}`6LC9Oz zMn|ESaeyG5FhsZ+wBrNAeLcpcWueKuQWuo`HClN9+h(xq1oG*e~zg`l!rLKf#`3p)4ICz_9zW!&ta} zkAS|Mc38jdXhR$hDkYaCW^v&`UCJ=mkBeYH&A zzeC{rj&l0SD2D6UpdeJu(|6MJ5Fw=oM)2w#K3A^4^lPPW>=cYAJCiW9&%OG2Y=97| z)WFjhY>2?v;%N`PF3fU#wfi^-lshPIRnKFZ(9^zyy!^R(zkj}|lj55JfP-2lW zsgi@bm7v76lVZ>wt+o?Mm@5|w%C%i`NIrKFc9M}9vs@Xk)K7Ub(LUGwMCJ#Ov-nlf zMtJEY27rdO!j%P!JTff+RLkVK6RV4Fm_nWlU#+jgpth+tP3DGjX@iWjH*RZdLQ%c- z)M~THk;X;1+B})4!j+v9?)`RAHeCx1sXuj4x9lGPFb_~RIF{+L4YlS9AGxy&lB4o? zuXf0O3*#g5X;;eUHn4!#FnT=aMcUXH?$%u&+D|$Bw`e}5vz%I=w^?|3ApY(12uzga zW#33osW$o&e`k6XxBN$Gel$H8x z+q?~3Sgc@4cif~u{AmRw4cykwea55-r9JAj%-VkIuH{MV5%MU@f2Ifo!fuh~F)#mQ z)XZl^;-G@zJmOpCBo-5GEQLYkgJH~$9ESkkgOWsrikHl~Dc8AkpxF+e zeY}Xl_j3XA~Ok);%@M&M~OHlkCo8=H)KX2-^;t8W=3x z-F+1`YQBxBspp}6D1CJrPa6iLgK~7%>VOuFuL@^Z&Yn6|PL7|$@SQau(+|Ti^~0#i z8|>kw8>L>W?`3F~o@%%(Kj97nrV`_rd;q88Nb@ES8_?uRMUn9*;|UGJD}Uh;*fL3V1@o7xIPXLU-|s6feSoaY0ur;_weRM0BH$dL&Ia`pZ)glmNq+}uL?%jI5y zF!pMIEf`IrrGwT|HD>0F)Q(mB;r85MZj_q#BKGLjA9mZnZ1q|*P? z+YOy4sL7u(J>=SER|L7B3}OopkuZJqBpQI@gi%Jh?CjvY;Eofg5M*8Y>aTx;5LKhZ zTb{;r=m|94U*s1G;9i^QTo;MhR zEptY)Hj$Gj#vy7XC^R2CXcUYSDwD>XkG7$YZ3hRM09Er5x0x-(#ZbY*iE&NaFfNxz znI0uNx5=EarIfmeOEE4=rt}ZLAqVexn)|?;b%Z>=PZqsTA}Rt?GS}zih`EaksD(DK zAgWp)#ECXW$+X&~)g(?}G|$XgI6EK)+mfKLd^>^3@Pv`5z~j5^0lPrr#kPL5Vaz7vHwsg*A)>&lO;DtS)b|xbj_bmj_uu13Ps_5F)+#Eze1~ zxo;sjsB|H4)y|?GxGUD9ffi-FudqAF5Ktg;ug=r~!9%DHKH+iXrX1e8+*`CL&pSJc z%gMKD7b^qg;_EA!6OiWrR!Z}lo6Co-D^DocEkfI9V$1iwGHkS681ua5ui2%%I$?l)9`g;B z+$C8}>w91@T{tmv%aA2RksxqccG@p+q%&>O`_g>J-;M=wRUb9DHHCPKkV;!~`GD&a zT5nY!3{-sE4zCIy@AIyH=M8@v`eo>*eBlbbdtvI=_~s|<}aS8-q{6&chzAIY#lWHsowr+v(V~4C&tV0YaRRr;T@F(|qZ#7VHgOnLc%B14|wVE^V_6e@n3<&3kD; zIFEO_c$FIYu9q}hO}sRR5ijD0G2^r=@bCwmD;uV;v z4Ln!XEN-)|*+(7&dW5E~vlkq|+}U&PIJlEyn7pM0Vm;DF+VRVHxQ01*rAcT&Ww7?x zZ5Ut&;=+|HFt>x4))J&1#=`lNX45xaxKg^TCG8Ffl1z^*Alb~j)Uo%cZ} z_}0Xvt_jkE^P8IZ>`K9SqYA}TWDf=bEj0`O;KXLRe35vXlP6&OIF{7oTu~@Vl``TA z82fBb3?hJJO>mw-!>^L72lKFzNnpQp0nf?PoN)C%2LsS%?7USH9bi`DDO<=x*s!~c z!Y&AXDhbOSenK88q1ovo->3W2ZH5!m)6b-3fRN)xO)5qxFCc;w~D~ z)kOpe)@53p@$fBTL3UI3!UBB={%#X4#vKR>hA|rwcy?eeJ24OIM*!&HDB4a0v+w-VKPqp2 z``dUSzl(6dP65pP7~{^JhGm$!ZsIHsvg;9_IvOBeZTpkE3z%Dtfk(y&w59{%$iO(J zR1pTWG>q+H1jq0j_>yca$!oAa;f>u1 z{pqRx47>CX-+usI=nc$n4%F@(qWRazNsptJ(Zj@tShn5lTzu}e*UQ;+=gZ&!?C0=p z2GZB|ei@vgEyo0BX?_Db>@TOz6QTy8^>9biHiYD@r`XA#FJ}>QAJ6`({N-Q&4UuD> z!oc4tKmWx~5t8N*LePpY(g&>n-N2(Ta?tW-cQ8e1Wfut_LZj9P3P~y>olDf-%Lorp zuY=Yjn;pygwV2kqK)N-(0`&;=WLbv@U;U$ud-|>o6FPUZG+WkprHzyP3Og$L1#KcE z)|j>I{?Hfe@~+II(y9uLT?DmLSI?Fa@_Do}2tsHlek|jgYUovHoZYS?aNo@syZ7No z<@T-jIik7FIeV;QXHHLH%JeklhOcFQG2(zqozauyWy_NXiAk$@;L``ogoI)ppxuY` zuP42!T-1NX1$C3%34lO=V+Rk~-e%YFkeId#noKr~4U`NBTz!-6W%&uFjeCqG=QQA5 zUwTp&w|Y4d@U3$1?gK&(&BVNG8?WNg(aX#wJqWO~;d3>?>$v{X>u*b})uaO_1-1i4 zwzfu2rkgl7=`}2(wEefk+&0~0;`DxdALkV&EVpc&)MTPWu7G+@v=g*j)M}g-h<|07 zGRJj6pusE}dq|<0oA3%kpb!^<7D4;d=iw{)BQey)mjOW<7n>~pyo+W&IeC_1T8j&X zG>l5Eqb{gfWK2<NAswkqeBBiEXVKq);wTC1jWlCtcy8f(DkDANla#xom5r0KAD=mAD0l zV=W)~>tb!zT1TzVfgrrCK26CcY1L0T!V%zF6^F?uN%K74t=|qFVQntk67mZCt>;$E zZDQEU^t5nhS})-d*2>hiN@~I5FYzY&rFRRmIPqth=@)K&P|Nd$KY^>FIptbL#WQs1 zR=M;@p%u@q{Y`F#dl5r`d%6pUSYKkpQSv%YbTJqwo~}j;MxcE zA`O=~Nx9-edJ9~&UMfhMFhSOp`l(0Su|HBBe|X-&?YC_?zW8lIOR6oE4p>-#QCyk6 z)B;MN`jd2)Bi#IL!SC2-xuzb+T6UaB<6@-3#4=sPtgF%; zFtI?%_`6ukypE5LF|V*#VOlW?gDHc%wt_Pz8mv8icbH3MQf-@c>lCZAeYF0}*+Vcf zGAiOO+ICVp|GOn_5|rd5Eb-GaFOv?#|aNZyY*n0V5gE#@KhBLWIe`> z1*sJ`pYT%4xZri1_SRsCcCoMCgOO2;a$UjOGkJt<97a%k`~hqy_FxpYgafAJ@|DjJGx9AK z^A*C&V26zvjwUM#i_+0PiZyA|!v+QidhscMQ74{Yd>~*DYrj<{Ts%6Sy82P%AvknF zzdJ{07Q1I6&;T{jeF&Gr4vV2(WG;I!<2iSB=O@@`ZeD-4b!+-h*pK=}A@^kX8^i=#0$BCxWPvEE&t++Q!# zzclV8VQ`4IIB^!=he=PH0(UU_U&eX34-NGJI})1+QtL~^5JjOW-?EB1%|Sb+Wh|{j zIN;0hN}nn{XR$5c#Ej?^j=x9vpxFQJT>R=+AJFFEa`n0EFs|T+F~7OH90Bm$*rl8v z^&vvn3ZY>J2_mm@(DJ(aVHD|y?|tV#D3{N_04$4`?K~|DOUvczl^1bRe?4vVz=uwHZ4B z_G2;!2?OKe)?-df`v75?-IQkt)w44wb@2g#Db;F3=5!l#Ii1X9z9HAlH44HWhrPoy zW*P7X_L3lkb^J#2qCpR~Ka%Ls#eTE`&MxVt1u_@u4#Q=N>%G)Oq|C&W+-4b&fG2ohrN< zGgg1ty)w!;-(+kXqf6dNM!lUr+#nQK7yc>=ILuH?YYaZdI0jD$XCR0Xs86ZOl}^?W zE3|_UyXv^8I?51ch{NMJod4#3R^IshuVa>W#HoD5NWJm$=ii>3n#?vKbc!vR0|p6{Z4w=Z zoqXIzRQs&cnznR{NaJc@9i&4Lle+@i)w@Va?53>?rwb4h28elz^lD)JY{l3z8s5jd zx-dF;XnvTnIw^<_7Y7+bCtNQb-+bm(nFAMpiHU`w#Yk7C#;L5PG4QU-eQa@g=%nRh z-~uCoNv+wMw>2zEmgN-rBGX$zaN%O2;+0#om1(aGLttd0wLO4E$+QVT z$w4|g-hV7c(LP%=^}4{itU3V-Q-1R;IOY+bwjBBhk;(kUhLUID+49G=62T%2eYL}O z!SZUDGnpOpsui>kNuIKUd*F0ZunY;g5r)DT1O^w%%EiVpg>s~Uw9tlfpT!Ei)yKBT zXX2ULR@#aK@lbItQzV{i6KdN{9h`}57P4r(z`;K7rzjHyC)Vo|@XMEhp=DgXf;Zmz zUGK$R#fSZCI&)T9wOzimyqdSUZ#@7X+ySI;X4iqb>cs;d+6Hmw)g>(n5H=IMZ~=evHh8J;ge~}E3e9`-+NZp>uG%&y4i-=rHq-ku)RcNmCo`VM$9Y0e zdC!a1w-GfO6D-S5*#gMb;Ez_;=M@?0?~Fz0$x7-NrZ?x({~VrEUt^fzt#oSV9 zBa>IFfM(QS>ICOlaP9>gw8Nobf8^qw^SYTV%LqQU%in1ezpX1cOFu9^tRP7$4#j<2 z>NSt8c0Qzy&LJdCfbFC=AC>b^96L|eI{Zsc>jXaYs0rr*|5YRjH~01(rz5$$Yf}^o zlFvNAu!06mP*r9bE3*%#y4T1dESxV~IA6y4M#=^na-9e@)e={o(2MQwK;y3Yh$aQ% zcYwSbm`&_%P#5d{{yKEAM=V0(3%dQk%`v|+*=lOnzsO=#bDuDEGbS#_%CPUB@0%$jFS8GX_QofBMgyxvvDw`BaUF+$x2pkGK1!rcD zPj%AG~`0uf!VmPO2HP7qaQ>hu|$ z!8?e8GF47oIbXUEj=%i*H_A!+;}NG}y?g6EF(o&E1BYp%-8^|jU$f(5T+2ZiL4_Nm z>}W!@%wYp-1nZx@w_N(#U|1)>xjQ15Ts&F215-#t>5q#y&gVp~_5lPGOr4gd7vmeD zk3uH`LND<$_3e>%+u%U&-=5XQ2n92oaAJ26+QZ3xo7e zjhx1GYY=mxkJts_L^;gQ)c(p0cd;8K&oVeT72)jTTesp!tI72%H@{fE@}<|yXI^~; z0pnNfuHcKoj!-{=;~lFCwq2Yo^kL~(S zwB-tIjvobpRRoO{=xDHi9G`;M*ok-=lW$F?op0!JN1>yWIm0;V@Q<9px6530z>e4v zJ4fB%)04#_u(MO3@Sur{hXB;^3}ehBxyhU+eTC7TVZ_`OM8<`z&K-vn;MImTT9pXMS2I zvXt{qKQV8YJn(^C#i8*&#?xkm77D-toA{~F>Ey(TavcG1m$_%(c@|B6H-a!8!jLTG(Sf19Ua1Whaa*5O3koZw>{ z98iHhX~41H;-uByiq}>?%2qH-$StXntKwRmf=>o`!-&Z^sGSf}nSjdp2?P&n;mIRz zp~}FPSCnH};y=47+?YSBy*{VIi{bL)dfK=Nn3(gjUoHUeRUkQhchHb1@%_|J)AHJ!Yt3LPE{7o4`A(N0kPhZye z88`g4+$!LkzxHc1ywZrj`Ij-Ba{1#GI9v5mR~4p6D+0(DaQJrKNT>1G@h*tLo9*%k zR|9A0fjpIFnzkkhiTqv1IVF%+22@%{f4~@5*cDe47RE_>a~FW(GT*qBuI4_<7Un}4 zU_n)Dv;h-uR1f{I#7Q-Ise+TZalVa`g!A_qs&p#ReG$pyTJ+TK&9QNk8?22?S-(wme?Zpg> zf`aaz<|sDoXH zK^&Re)GTuh@-`a#RU!y!_bka9Ds&fndIhdwoFE4|8EF?Ae$Y#wz#v11(t7+EpkM8) zH*qT6!%p8QGS*jl2l|iod7yUy+wWe4qm^>%?AbC!s8TeM@E$PoFhJb|#oyRo$3DHY zjE#@d$Gc^Ad9K{wS}cP&OkTeE9GcSSGX_@K(OF$HvduEIwZ{>y2yyr{9O6G=h)-o( zg(6RY8(=4)1A(V9emtiR1#p?tiPNLlS9@C5c0$0qw!1dT;15k=Mv`3sm}1O44mdV< z3dQOm@F`@o-$NN_cEPcyKRIR;`VZJ8YGcPk?erO<*UYV89)xtYi~!kb0Djg9*TpvQ zx&ni@go)Gv_3yH)p>6lWhcjsBm%>m!o_UPob{%*McJk~jGWRKzwW}Ft{5`ri%bbHo z9br-53l*?xmNz$veg!?MlsepmP&rv|4FL^Njk!VxcTS)}d#%m;yTAH}^8WiDaL@op z=E9H~<)#lgZ;Y^{y)gNkI8={7*F6Y~hA!PlNOj<4*P6EUVrIt1409JkHwud$&B$Z+ z15e?6;AF=qJ8i(EkDY=+7oj!3`khzH%U`(87{D(DJjm~L5t>y~y5WHl#wVsT@TzTu zgLRjQsF=_t`~@LnH;B-*O?0U?%<=|c_t z06KtH21ibnFMa(@`gE+k_ltMSv*`tJfC<%@;Z0q8P%I+^IFER;n{&Y`Lg(V6wS>5p zH$Eha(GJ4qE7xCQY+}2A_dY$PGd^PgK-;=e1TlX)X0+W8&&IsyeA3=Nh%%?IoWU1q z8G+0`pJ!)onfaiNU8YVBA?O+({-6HOQmQQgc74nA+1M0EgX7zB;rzLB=EUjp{r~Ix zQNo>M7o)v%quhP)GlVdLOCS(*Tz>Aww{;G3QglIdd&dQ{FWb}-B7+qg(70V>f z1g>=NqUU+pNd+kfbjyL6lohNRcqdtzO9@{l$w`xik&l;?K^8dh zVU58L_Zi7)XN4gG2iWnS>D(rA(1>euiZ|P1xiwV_UKiz-hMpwgKvOT$(s#+LFhhX~ zNnzs1T{T`+XbA40gD?Oj^tTJO?Xca}Zd(r-0JfuHjD(@NOkeRX^GSLa{=jS+VXu7x z02O`!GhOHiJ7J`digSK9f}Zc<$VQnNbkrx!yF=wA z<0Ep8g$?clDe0wEca2hTQrBDCNAr&#TXz^%9yK^Lc|him>9Y$3%{A?)*hs3pH*G#V zvi<1;u|dL^@}#_qGqv}DL7D-O&;fl~>BcfGtJdp%`j0#nAYp6-tvc2-8MoSPg_hkg zcivNU`r2M{Jk>t6WG`W=ePdeZ3e6ZAvp@U9w`#RBkJO}rFK`6z6qH?^re9n%b5S=C z1$65b7ZLucOKprWHG+BVI4bZRmw0hp+Lqv`c`l;4gtvh&-wn=q?0i@mUdIY`d^!(# zGf$A8qL163dB(BUg4Z&g2ZSed$Rh2SuXBjIiM2m-v?=#tG{|IZZZ-N~o@hWUWSMu3 z%2E|HacpGkS*@0fgo1($S9OTvh)(9`C~v6WIoriI5sc{fPudX_1eT0r$7%3UznigL zVU$sCUG%lI?%41W54qf7U@>f>tGQDwa{cEil;YnpFYVT`RtHd?iyz-h&*b`q9>tFj z@{lxQ#N8HXo=0j}53%LnZ)b6o1CSm7bgyd>=3oUaI1F&X^aDnBV{JZ6$|i>G+EuTz zPCdYmc81vDu0hhBJKhf;EyYo=;B452FtJ0F8RM!Ob;VPF3MV9W!}JUwcu6B3;kv`e zBW`ywdyvUg5w?MreRv2bZz4N+f}AE^MlxAmdRCqhv)c$Br#T61814BYHrn$~5&Gtd z1~Q5E9aXFL;9cDZn8Tpaguz-SzPslVY+=42Ze`duFbUW|ao3Jep#Zr_EcaeD-k3`` zPQ*p%9y|?WFF%C2%j8Lps6{y0=e&f&4nDxteuNg2Bb2pk-ZOmaD)@k58W|%5CTn~* z8tQKR3KTqSAJHUZQss$T&J8Gov%8>gf&Bjt>&`X;QT$l!GuL9~4P%?@aNCA?gz(C= zI)BNK+UJJ8bu4Vbck~mL=HC!W<|n`SMVVhD(uxceLdojJBMwwUvyQg+!1EZuljbP$ zBnr<9gms*qoyA0mQ?4{yL5n|6$jCj~){En-J3&YBrJxTn8RCp&7=vGW zWNqLa1%;;b=gwmCGfevs{Gj>IzWk+f<=oZs)A#=tVV1r?;96W*LqqLOA^k!-U2!`) z2B957mm;z5!npRJSlz%G+IaIl9M`+f{a!nQ1m-23h8{vl>+B@#FyrYdekg}31z;+H z20U|5pKfj}gyszh!i>ti&Rfj9&YU_6EJR0Qr^axVMj{%*466?f^f+OP4XdfZp)${1 zE(My|57||Mh6gA6Bao;p)+DMA=6j3iHO{j#uN|cCMG))8-*RzYVTShLH{edAeugpW z<6fGQesv&U;54yO#=zvcQ-ly*#QcsOA9g+5y_&v1Q>O1cVTXYokU`fMgq(RYL!-2k z@q-yV^W?e9*AY}l%Mh?%JtKPkM^JcmF z{0#&Qgi#a+>zIP+B;VaTRGxqF5@Wm`A-%=-9^+|~bXrp!F-AO<%QXoLx+r;fFd1AX z|6AX9lX)65Ht5Bp!Z8{GFRTObWegd{V#tDiW5*F+fo`0AjZnGI?n)p2HAWDUC)EVY zT|kBD-f?iv{njFUD}Fz>Zr?8d_SU;kCkj~G8q-~vLyKApQopVGR^&)t06 zh!Qni93jq#wQM7z4VLL;f{lX(15uA@nXxTwzY^+`L7PQ^0qY{;*l;3L)2)Aif`y|( zjf%u(nNGM4bhRfEga5Qwm*{A6zP)p?2t)22<%hZ9xw{B1oMG}LV~}e(>fFqgMI?D# zgmhp79thO$hH0o1q3P@8;%@yCon`r6M!`Dr8$38E$vi^uz#|hO4BpeZu1tP%u_y|Q za8YK;5`Nw}L0F!b&;1=?foDpOCL$^M>wD{0;uRM%=?>aj3V%s!d#qo0lFzb*pXZwU zm|Pem>8Eg>9p|GcOD3=1ZsWPodkLfE)pD)V`%k`w-Ai+qTEBR&T77>vZS>nRt3YI{ zY*#HuF}>CUE?f2bKJ^49D8u#uuuNViCCY9X+uHwi!i|7N89tLCu@3Xr{;ch>YzjYa zhd=GpFtIX2HbBs<^7s;d7AW3XZUi0?$|aokds@RE>C-YS(~pR+uvA=`$GR;37=O@* z`Arc#^2_nyeS;?36rAwRw1Jng)ZpIqB=CRA4Ngs2|0j>|n3rrx7cGM|i7Q?-b&`b3 z_W2Mv^0LC?8-G_}O6@Xb@>}@K-zg0Scea69m-m%s!2@Z;RqCM}C$6-Mdc=`fNMBn( z${-QXy&Si!3dduq3SzBsNu5=gsx)s|_JOT!+9VBsQnvkBe>Z(#9l;IoCS4xX_EQ+Y zDjY3ve0vOA`pI_j(()SG_W5x=7G*z!E61VlG_|m3gC)r5%Pb4Gy8!l;^lT&xnF;S> zI>45&O>sZ%GT@f>5Tc?|8pu!vhtHF9PL<*A9)bVVm(qlPGK35QpO z8;vF7N9IQ*hD)aXXIaNMX{F73vjU4B%aLN7zfz{p!Ki&m!#pb{Y0{A zOKn-=P=U1K$v(9H3aa-q`4($hGTJrH!@|k)!)~_aVOF=Xi{9QN8cqiX`;b=3Ywz7E zOZ$(?6DeZhAFfd>QWfG z(7boR*m-2DOmTb1aG5x976y-yg$@CLH9V*XXL1RZie_r)8fXA%uM{Kr!m;#I;Mo<8$@>1Pal@T>RA zS;AWmgU1K=?_w4;Sgv23D$5*^>aoc~nCJ*cFG3Wpfx#rg5W@87Gv}kUJVof&0U`l) z(l_mxS;=70I}y<4A1`sN0OnKMT?p0Wh6s1iY{O=UuQ3=(dk+zei0U+euYrowP0UQ% zG2IztH>88zfS>*R7dSBAE1hg)#5@mexDM2ZmHrfxLp?gY7m13AT~V1wPA?Z@n%u>rROgDh^gfvb<5hp}RJ2h~j9`&sF@dj00xgGhBU7TN-8+oXm_t-m~^i*Tf;4WmPv?VB}hzYM?R+TnttRzhN2S16#AK2VPza$ARrIk36m4G|AfN@Wsg{DF2H`Q)Ay2-Qf8PE z7Z?`_GHXkoe7p~X#lvO+)H#nIflYYoVnwz5t5EXJ9Rn7ooJo*LQG9%&ov*O@Oh{#7 zBwE_;00%VNZW_b0)$~QZZ999Rro;0Us!6M(@bO#GGjY-o!31n&A+ zyjiAWgmwgecVx)RGvCRqxVWa!<8o@bj_JyfrY7L-di#`Q@6NxJ`=BvaGo+L)~FQDm7WBiAq`q| z`ZZLuJFsD($!E3BvyOAi(eFuG(9v@M4WG=744(d2Iuj4T_$2hRbD{0yQ?+b?3cz79 zfm2)vOFq!#u;}?Bd{Rtwk2+RyJRE~D)qY&9 zF`*vMaJB;i#12tM+(FnRdLAoV`c_B8J{Y4;%z?*Ima$)QZ_^E0O@I{!CQ_z;D&+?G@<`8Gwy>0Sr3=vYZnuEYI#AIo1W)qVew7Q&P zJcMR^4DI&BWM4UV1^}VeWo)ahXAdT-6Xtaf^NO`Ob|E+k^@w$m-lLzvGEA5UG�) zcDLvBZ`*|V9D-5mXQ!mQPdefs+JDs840-66vg6>)xRnN6Yv^R?nz0TOM`TQ(&Ry&;iO=ZG?D7$+<@+3c8WM5N-gak5rsVAwv@Mz z{@l~22D><>cOAzEZOmlvq*f3$gq_JS+?ULYW)JO1huZTDVMn#?G1P29taIwzAYQj4 z90Qz*3#o1r1MXo=E*9}p@5gqljZ>@k8NAm{Tq=(irxS{)2YVRpwkVytQ1^291RH&D z;=6c%o4E9-0C|ev7Ac1I1 zf50}`0mkea|8Fty>_`~xGII-~`FS~4CJBzqc8kChzuJgd+lj+P>2^yLPiq3=D{bf{ zoXHR}sLq3YLtO1lv<|YzaJn6Ak6wPBRI?P0D%1Ky(sx(gQwh2FHoTx&;@${P;G~BN8Q}b{xrw?#3o*F}6&* zgjedp{^r@tY`H%@jqyJcCz7CMFfylIXkd(~za$qA3|rV6bs!n^6UWs-Zhdppvupwo z){D5pQ+lT(MLcGJ_0T!n#BVh$m@%>3+G=MDYKnNK4vU2NVNkZQ$FR1sf>cc`VkRbI zLmM~OIR2f6$6||b?j9kfe)UoL_IJNsE?>G*{_3y)x;(t~lmXOJhOjj}d11VCJa^;O zxAm~tf_UTv?Ns+MJ^4cM-(aYY))ivVd1SCND=UGun;Ee4XxjF~EnM|AHyzyR~9+HQE@_h^%JRe~%>`%b$auiRYX4{S;CFSdUbdzKJj#|E zZeUb0vi}{pw5X=BB0g!K_vG>3k5|CrJ22-_r9XfBY0@^oDIxUfbLm6^0BdxQZ<@<| z&AsiYZHW-TWI?8s%ip{+uUwQcr#{LRF5ymnNEJa6Ulk@{u>1G03=%pVaY5 z-KMdh>|^P@_G5+9@g{1hJoMu41f#aTJobH+5Gvj(TviskCY@~z9nc?j+=!oaMBwoq zH~!C+a?|-%P{})RaBrDbEIwjSK$6fo2&}OBZP_h&DKu>Z#)@n3KqG@s@&@)Gv}w0l zsfaoPBKgTmy4o+o^r2;&S6ViWD#%C$ETqm0fzLi+99piITdJz; zm89Yz!?*7Y=V25E@ugvIb@E{`M+F%m;qDCSGHkq1>@M_j6E@pi=N&j2=tl|EZ3D}( zeUT)Pis$M85kzl#@Xd-=@gE}o0FE!W{xBUX}-~Z7C2K4!_#D(RZj(m*>_wS?p~2gYW)k`RE~bE6X$3Ca8L6 zd!6@3hP}a-$F)th?hy4DgZts#M==f`$C&%ng`RTyVh466h$_IXovFB#n+6M!I$}h> zK2X*a{FlmBm52G4Q{NDz$gu)*&9Gr z+ehGe>|m%4xgm9_nK zQ;C)C3N30!VjR%nQ39M}Z`0Akwmk4|z=#^8-q2Jsb6)0=D(u(hH%%AS^cNf?jPTIu zQqEjM{R*y5pShO!n|D6`u-v#fS-$QMtFkDPPz$En`38SoN6gUJqk{ z={!K>DK!yHZ^O?ag5CblQ1CpjT->?GdLNGu$0yQe`s ze8=_<#~ZWtx5s$MlmU_KV2iU$njIKrc*`)^oot_Ix8geL$yhstr!qdMM_lh9*?GE} zqtwCWSe5bbz#c5l4&XpRBJ(ix=LvhxvC^wT&kh2C_DV9sV<-Ay!tvJSv^?>jbpS#b znHVY8UV>)voL$4vo*jwI0BTu0RKW2SO$;;;+fU%^#(84{r<-#Zub2Pk&;Cnn;~udc zGg_`a{~Vf5PR4`zHMG~zm}-Ac1%vTko2#LR)PxSwSx0OMJkOEM3)oO@F5zX%bI?lN zYNof!$%{AG${J#;0LhyWI(?JGaXobr>3ynnY^=ts{K`cRYk2nqwo5ob1SY?Sz85)!xp@ zPQpWm0ie?)fW84{69~@6YosbuXE{#XYo5Q(Vx!tz#={9f?_*U~_Pm4NJ#7~!W4JjP zNnlZ_Q@*NmTFdX&Vg|8=n5Z#}4t2T^Tb7Gc>sKUm7ZlUD5Z6VzwU_{husMNA2rNk{ zL--VgBz@}-lb(X>qGeg;vu&z6g)<8*&qK826cCcs@_BB%%p?B%miMd2B$}2sxoC4| z-8C<;n4a>(M9?Pd&2Q36uyuhXHx2N&Dm}vnSYGNNzx7%oiO5gM)GH25gK|m-X91rf$i!>hc-1)|A0hE6QKdV%x-zbSKU#4y3J`zk#<3Z1ug#{GI$Dv4PXF!sw>- zT8nDw=9%M2no60%^r^P`tm4gfH89sUE3T)%n|9a)F4fSElE6)xl)>{DFEdGJ5vQ#B z{F6Q~Z-itr`mXudAjfNMPg0rkxHUehbVr@O0S*_s&~5s-rVf+HJB0*m&AidVlWF5k zEBXu=pD-wtPge)j&K?rgKi}NI?Wt?MMHn>C{=&jez$C;xiBG=-^-$} zp{_zfAAzQ=&%JP;YYU`Ntc$AKu)-z0mYuo4K`HL|Bd&dJ|429DE-!<9(j!>gK;N4u z{Yd)2YF^vGC))(%^s31l~v5<$pBZKucbp)}=YrQ5*VgQEDWlUKF zJqEa{|51s$H_tXf?+~XY^|6)RwulYT3SnF3%lyOTa`(?%ge=fC(unH=9leU9R( zm*ao?P;nDZ=NZDn%>5^21x=*(5!!qhEPNkEOc>oF7yoMMHuMsN3HcxX{J$+PTpBGu zdH=_pBJ==;y$$UiD>>h`)oqC`go$-R^K@|tfm-2Jj&r>SOtaI=*b$*vLnAuE$xXc@ z`{m@>P7I(~SD+t59=Yu>Fu-pZ?JYtQY5X1X2XQj|D+m-%o;(Ue+s+HeR4S%!52kSa z+`|rNm3W-9Phiq^@r>O8213{L6T(L(LN7(vLD|`{agLHbjZF;RvYRkxo&csz#SsEP zVlkrWHZ%~t4m=BVL7$Hzln$WT_QWTd6Iclt_`NcD2CsBr*D!t)CwIwoHpFv8YK|^2-_#8PL79OX75jzFTVUTTI;#;zkUB-mj^3P z$co^_p$2o0cgo{w0@*)!Ru-mL%AF7A(7JBKhjS6VydV5jsaYS=xLWe0Do0Ab4yopk*h!$L)?y zaS|OKzGLhn40fC-r>5|jg&|Y`*`^K+)TK{(u@1HD{M!X~aUCAY!x2c|56!kSCopm{ zb`6@py@kUC+WxAGqg`&_guj7@kkN;Ub*ku4fy;c9BewbOWG?B2x$Z+@JdEv6Cp2*W z+Ii+IctV^~wrJz#I^nns6~+AI!4<1Z*a{JwQvu4@poT;mVK-%C3Biy#F`#+w~~vZ~x|Rl`p(<4KLmgh->-~yQ^P9dxYG2=|$pUPL%C-Y_;40Abi&r&k~W_h-tS95h5H9l#(a}eRge)ZG$*g+a8|NM{twEW~Je^>4!6cA>XeuQ36oWA(B z9>5wl8Ti+ofDQH~WgMIglw7UY;}I1E+y!tccENXVb%zExm~FU>k_(Fgs&z|to(3IR z5ZnQPoY)3*P_;ZI$m6z*Wy$)frqZN{=6cObl6-z6&DS|fBf9^6xigW zgxm+uyh~}VZ~MtMTZV!!KLVdHdNmACQ&we^Y8!lZY)h7MYTlGjUW=%B{&yK&>&gG) zIwDL62W_-fwR%fJs)uHTvoc|=-uo0zEktw7gnraAn(MDuheCg=w=O@MIr2PK48Oo^ttC%VrU#_q!%{+%ea!q^~8J&c^VQQMZ& zyRbJ=t0r?6ts2Z!4Kt%N6I=7l3F5vsz{Q6H#Bt+Wh##i@Nl8-plXN}0CXviTD_VA|kn;UJ6BC;cKl zWiAm*_7mkguQYR%1-ok(I-m~el7w|`vQj>_*>;*Hnm^{6T)s~E)}9H1d_LqYX*Iyl zyckmy#a9+k)~v%eH0YgpR`w98qlZ&3Ilgva5PJ!XnwK$O7$)xFo3H(P8HE8}+?&Ho z5r(&ev_2tH`)IlCGi_zGbM>U6Kl_tED8Kj3*UO`4 z@0Gv&_=gy7D;#V8O8jj?(?lplV3DzsA{Pk=f8x{_8qqmjtK3Ytf78;;vuDsw%Bjess#%UQ46U?LSfyAe7i?{+l9 z_!Yy*B8YUOMLZw`4j!n_us2#=^vGMn1g(Hi8020UTkRD4+WN5RDIC8_{8X5;6+9ac zVOV7fNx;2yQh5TIH+$%_H*esKa0pYf3||3aCoU3N<|3ZOFo4#F*Xkt9_3X@iyn+w5 z@t!Fm^-xGrJA{o=lT{NsY|N@`f@i}qU(Uiz}str4x9^i5e%}oP4&J+D3#InRO zV2cn^Tbw56u9Hl12f~*$E<@bPIO~I7bYc0%mp+9Iw^6?T)9;sC)A#X~UnsY)UoUrV z+{qXlL(q5kq?P79q7ORpl-_0+iV>Xx$LxmPo}MYk&YtJpu`)oLMhU+)d+loZ!B2ky{a_kKc`j3AVV8=deLYEzKUvM- zpF9f>mJkRHM}LbaCH6Fg9pG5YKBQM;J#C?Su>9aP6GKvF z(l-FG6d_|GQMy=osZFz7XA$9W@pS=JGwDJn@|sMa?UCS}VHnGSBa0q+TVg`uoTE1t z5*7rWIHRPq4Gsv;o;Sd>@cF6bFi8d0EN`hVFqwkqG89h4sgL@-!g%s5`FLh}iNlFd zn15H}8K$j~)~QjU#lR<@`KgI-zP$L37Tg^Ct8JE6YCB9;19DR? znJOJuSk0^e)-2butJzeea7`)F&)kDcY0DK)CRSJ@tX4=XT(nnOZ_=GGig(Mk#4y(Z zxsFRsuQJx{EAVKW9dG7KJ0YQFeqJR_ldi;9E)v&q%e`P)3U%66uSTkrCsXMI-+;<| z*Y~v_tXrD!P2zxB0Oh2A7%REB7vF);KI3D5iFf~`4Mt!JmyvRecm-}BH{dc!vn@@f zgYN>jbwGa#yS086F&ARM*E`ekWIRVrRXj$tmv7$-SNe$hgv&G<^vkfT5p3c@VOcoE zxr~N!Mtjxt!!YZeXb^lADI!ZjSAj=`jhFpy{T2Vx7^Pa6?ec@NB(%?5>EdLY64EI6 zrU9g_aMD(WjRaT~pZ-4pDJ4+);#h9a|xSD*dXKeO%Wbr`~-o} zVQxRZe7(GX?Mk_Ohht>HXFuEd2ZVxYL3kJ(gDE*eC?M3eJM(D#S)jWyX~EnU<_mjW zG}>xHHCs3ZgYbvn{8Cxm{8jnx>vzHHfp_fzL4@iIJx*}}Opw}NTX z%o+?dCzPE(U(TFAUM_w1TzT~?XUmhH8ZRf#kCv_>*HH}OvAb4S$gaFL0*-0g%({=_ zm_7l^Ti9<)$4y#v-rqzSCq2nk&2<<`-c=*U%2a zxD%FWb*UAbXO&=@`XYR=v$RaeoK*tR>ltjQq-}&B`$cB}d4n!!aR^3HGYAA#JH)cWv&$6h%f`$BKjvq)nQc z%z?K-;zRb0$Ru&pFW$YKodi+8{}5(!i4&Qie-w?|n>Z|Pt(1$LGB)|-6J-%$<;01n z%D?@u{>#!gL9EXf%sSvXvQsvUT|JuV#p!j-y4?MN7vYqvLo~`Q?067Qr#$-%r!!$H zvVcAP26p6q2w?+ybgEb(#F367?Tl|v5Nlz+(Yx94W*boa60Z~5vi&^MlhnA6n#QT2 zZzBZEkx}A`4h)rZPdOB z0j=Y-LcYK(-0pm^uDA;z4ccc~a_p?~9BsD((A4-*Okmi3xi=egr!Ri?Gv&z8Fn$kn zp^rA~*rkHH9=U@89@u3wrs^ny*ec<7*62gj^6{$Ij{g8tH$9x8jB<4HD0H}1-ulT8 z%Wtl}Tjt?2X)J3|XHEdzrXS}Y&J)%PkLT9)cxoR#fuPN9)cD!KGI4RRjGsp#GUSoM zn90n3RMyqHgL4lWI3WDaHbM0#PQOr|fAK|Z>Iw2cHi-kzR2gMg@Xp)cCq8XQIq~8Y zj!5)3v};^d57+7%>_GEAlrd7b`r%UuAGS}wtq?K^ISg6>Z z(0~}~*Gv>24LVgJEb}$}X8*K92lA3z%LF?kRGB9SdJuxnV;;NR&HA9=p@2>CD3ve^ z!@IkaF+VWgQNH)xACy1&qyMx_ojOsjT>CfzA-e&m-f%Hrg_t~guTM8^7;%LFiNnrv z3)&;lwv9graqAJR$BZE=WJF}9>d}`-9hwfYceR7G3yq|M1iFU7Z~<~_s@flBf~N!B zqESN5?gCq95?G{b*CmkSg6E=ZdgF_^xG8Z)P~e@Dk@dUKcxHDbR*}YJ64ruo>_j7z zp{*?iW{^nSE<`@}0u(L<2)Ep|UxdkW^>!g*MKZnkFnt&@o_q0_`?_d8HX(3=B%LWN z*Fl+b8triL)J9Dv)64dm#zokA{Fuc&mf<4r@6-=YLKrOKYEFY2Q?c+`Mg$ki7w8b?=RRu+EEQzoBrl7l z`D98=Ynu$KL2Jq5n_8|+e_iVHh5}P4u)ti9$E#VcZLEh){1y%wh?J@cK?9GCLLRrT z3K}(!i)GpnMj7g9v`KiRCSh-WgxSmfPM-9E^l2Z7kJ^4)l5(2x@*)$RnaPujw74DmJq?J|yEAJg6mTMb?!!|YZ`5oN)o~-%JPrsW?t+F;Ds6jx3p~2TCwYu zHpPcHs~2s64y3uHYqr;xk;zoexkCKaer!DVo_T`=sfWUW8GLcCFkwBuOCK;tTb3|8 zUt42njXc&}>uL5mu$Th4gwtaDcCK<>$h^TfEEf!>I{?ruT7Aw6g@JLaT)0?y(Rz2Z zZh-4YFkkKE0h;#o5#w(cE!^%7CKszPp6|HK0V=E@U z2x7IP@;y8}hBM;?0*jGH+RNZLj337Z?}DQ>LKw*yXWYA{0du zUM#22jFul?`2h^+1I)2{(YVtNQV$Rpat)K9HN1xB5i&Z!Z4V57Z`W}7)))V%Ts(cE z+?>6Pwwm36q2p!t&J6tvQw>wRzXMb2ZU?(3I=Z$(FSb+P#q9WqCwxqJ7~Hx;qc4sM zfdd?YX&U`GP@eh3b7i<6GcS&xe*YJj%Y$n(nQzoeclR^i@vc@N6ifQRbfY{lbk8;n z_}CD~-lEN4+g^odtC3oq%y!_}dX4@9gT7(hRD1>$2=~z}uK~xP=GV?E+Nv`L8D8rU zkLdBEgdUq1iz4*w`EzCJ#Bur`KLnJ%3N=|jK(cG%j$Z1Tte|(ee|NfcU@~`{Q}+l~ zQ_i0`hiMpbHTMy^*vWvM48FBGajI#@97*0_mm_z3aDdj&LY{M*S>?JWM=Lcxa$?TI_vp#L%t?zf$2;Jvy>CDM2di5T z2^V&s0nFhM6BL6}@(#lOK5*`1PSrbcw!HfMt2p^G<{4``+jbuAmcRYq|5xG&KPV@j zJ5t6jwUjYV;M>N-eFtLO+}gkwb}Yj1K75q|k^S69Y+#*#jXCRF$P|UlNZXw6qu~ErNvL!b6v$xv*Ze@N}$;SQdMc#Dwj(rnZ<)7A|5oR0c`-1fZAl zWy~zgZwINrO&Ov@wo&A=|d2fHY*f%fhsAVc7 zRlHY*y0+iEwoM!~uP|$_T$E+4VIC~Afv+Zxn=n=^`dT_LkMQLG%0NR4&9W+>P=sX* z5Eqc}yTY7m%uiW)MjEwI!u-2^EmZ$Q`KIaMDU801^S-`NN*(Ush(1~{7KG>UXwR2svi%DVt;ZKG-sZ3URjXhO0*~kqU6U#cyzr4QWjSz`|Qp+FWElnH7DP> zCi_vCxhh@dXWrF5uI~9#-?vpe^tFD>d{Dq)r@cd9{p~gO zHC8bjAx@}~R}8b$g~?er=0e*j)*kJ{;645rzwCc2*cRI;JJ3x-5q)kA!Bjj0XqzvF>mrHaS*G@`~d0C8Jj(C3V;wxo*lvCdj*dVYllAW0NyPGqI!}7`u+FhEw zhXdq3j;@RM=W(FDPdwCdw7#2VfdKw0DYl@OJvdnX6ZhFA8RWR!9>!iLHvT%69wGdb z_bclU%6nIS$nMEOdE)#N8CT1T4|3$IF&bAF5KV#Y5sd327%!vW9I(xy($q2Jy!Z4| zua?Q9$73%2NQY}3yIli#zq5&^A4YO!`WB~qJuJgrQxQrGfWLubxQe@>92X5O(q!lP zeN4H!5FB#sH}>6wZAU1R6R#f3!?dF}$2`PwxQTFJ7{_74B6+~W9)Lzkp;jIya|mHz zcxWWbS<`#i#2%(Qo}4v+EtvBNoCv!WqvgX7J_-}wh0x)+)GtIokwe;?wI6zLhrWlp zk0balJzOd8{`@lWcW;!n9R?O9b#-92#O^@5&eaH@hd9%!tuBwC9oVD};^KB8_&8^7 za>&L(PR&9{V17GfE*-%Eeq)(~0uTlaJmbk^-S`{0Zb}Kn*kQzPTqdWeBj$QI7* zTR4m_-QPe1zf{hQUnozWJB5I{iE@Bgp9bNFiDq)53C5Q}^p|jF$ z*C&vZ_^R4g+bJ%ijRWq-!T@P4M*?)w7Y6g@8+@2Q5CcGPOZmp-6o0MYsi`d$UK9Eq z*r}`e%n(_XDW`0cktPBiHJK*uf#InPFLkCL8u>{RUkJ*!jlp4~pF~Q`V0bPry@b}* zG$XGxpHnT*D!f&DQQ@>rUZx9$fak`e;3JLaz4YV`lb0}yGnurU7{fCsc7{d0s7zei zWslouLH?s~Vez>&S}$G0gZexIW&gZ1|A8mX$fi z@ldl>yzhO~$Bh43*N8gNRJM)J@%!MP?3HglGYxA_bxuX&M-o6&kS z*Klq#oi^jnnVtwXfOc|F1|+72Xpt~K85$WxAm~r9ZKL-L;83g$w9LHot_;2DHFX() z`PVldgEo?#Www#_`K) zEgplvI{tj$3L|GvNxP1Z+D-)lFvrJyIg%Wl);^{W#A%RP`!n;8IB(SFT#|7N{kpFB zw}tkQDli`ld6s$EamR&t8ye*nFsDW%P*KW_2+t^_%EYWo= z_Gtd6-1X(;qK9ZMiH7s!#0j*A#1P&1i20D%*e%%TkI@>;L-ujl>x1dg)_aZ9w61)3 zwLH8#$8!V;RC_x(I>N0$NHEWH+n6^HcM=nfRkYYU*b~3}+Ht}f4wn-bhRQcy{%pB^ z<)iW+fAdzEy}euRU0W;X23{%Oc;!2od6e?jkKZYq4{>7cI8nxi$IBci4Q=D>IDZG5 ze#~!1v|C3w*~C0&d(~5gI5L(^es^^5PcI-$Xoj&FhD3#3CqlytCcSGb%dF2ZMZl%T zeT5U7_FQL=2Q~fAc>nfBNlzUY>pF*_b`` z;y5kC(~0A=J6WzfPH@hJc9OR7D`+FvIOcSRSeCGXFnR1)5C+i@j@vL?8{n}6jxZ-y z+3&2i2WZiCaCE(KP3{AR*Ixfz!ZLac>{CyDrd&AxQkh-5$M_=12#n+SBs(J$yU6>R-S%8_G7VeVmWyD^pMB`;3Ft?bgY5w)d{C_zKSN*x;+ zE60w*$gWP8#q|aH6jP1;RYGRofOjE0WRAxLHW+yHbTN}T*pivYREh{to6x}l^q`=2 z{?yau^2Z-S8)}=GO_>8cGWf*Q)9j$2*~M{ojlO6h`2VG+UWn%Q!QB1ePK7CGz;s5w zin)#936fFl$}hoxz-TH2i=!LB4mH{d}J8dfTvyw+wECU{f|Wxeh#>zE69M6v>) zv1vUxVx1HI))ws~gw^(u^7>1kDNpoIm0$koUEsk-<^bV=aleb8d&usFAww^{^m=$9 z=dA6eYedAli?7D1(v3M>CnpH*hya+ z#IK&t)6jr%pgFpF&%nKd7nIDdipT;?O z;^Z4y^wJqH@o0^l7_}|ct9uL^CxcP%GI$pjINFWE51@FRK@<`sW)~hCtY+R&ntLEU zwtS=uBj{{E2N_06#>x)OR=2AKzkR8h&cxAi%Bto(+RE)>gJ-^a`NPhJ?Q-EU8^q(5 zn=rW0)r%MWQ|lTgfdXV+=?NZI=7cBQa!=g~3}LqT?E+}A3JO{5-Kzr`&4?Ike#2zI zB+JAa1wjVRv<@H_G8Zt*BV~3@gh>W0(33!>APcP(<(nE6bq}X2DIy z#qwmV5=c2kFgVcMQ%UgR?nSN7c8g&B8<^bO#StgmJ3xd;6!@P)OPDBL<{i(POrPl+ z$%4Y)W#nsHEXNcU1zeP64%=wgy70^Rd6Tk8YkIW>;!c>dXmBq=I#mUDQKY!`l99=7 z2uy{S`7P4RZcRU1F8y3#wVlF3|1#NyIj5{>;)RWRMOWycE>)D&XanE2u}M$OvaQQW zwla_4l>)-N2ZjKVGJG!sDn7#i*J48(O?+9F)uwlNUvXdC?^Bx{`b@%#g8?;w(jD*8*OfW9)X+);Hp`UusXevDnV& zdE&7tSXhSjx{H@SccHZ&GAKA&XT9o}H?8Ts%$xotZ)m6SEP3c_8B%G+30>N#^8n)$ zJegNPTV^~=vFT|`m~m)V&A&UG?))eWxb^N_*@tS$ezQFDt1TKG#by=T*g@<(om0Dc z*9)^T3}e>Af^XY}?+9U-6GjxK)M!8=My=zP|7$ z?`)%K$s-PVRG$Y&wPu<8>`FMNkwE%#zI;rt-1|%>g9g(sl@JI9*6)87%H{m%d?DS1 zdZ^EF5FGQ~vRy#6tyK$JcbV+~)*UssXuh|w$(Q-sK@-{0(a&0S6eggv+`K+ru3Wi{ zt^Yk3t9k0@7+4%aZ$ErkesdWcT-FxtXjh0B)PewT@d+45%zt)S*f&-V%FS!@WpP;z zGs-WfqYiczu;{@-7PD85+TDd74_QM5yJEdbU^>Fds7F%;utjFf=tl zY@trP{Yvlne!2Jpr@I~PDnEMj?eahUFF!4JuWyx8$1as_G`XKT{#=YC75^PuB!ny1ZMYp|MkE9R4LUK026}&G{plrvU*U@7N$t{n7c|) zDRc+T!gO|X0-0t9;(GYVP#j|W#@KCuaWhn=5lPk-cgx)E_444xYPkiz?p{NH0**dx z@6S(Mgidvlp|NP&S zXR-JH&HbwghcfHT`3#9sq8rI?Jp7n;8P$ja~8jJX4~wuW=Cd=7T@MuN(TaVknUI{A>@A(beO zvldLz6du;;kHSvPLugRm$T^$+KKj);q?2erlCKMQ>w<+M7(viFs7Y*qR=A!<#h z^ofDQ&zIS)PT6!W?WB5W%sCJ(IEn$srcC1={kV^iD9#U9|2?_ukh$j&g+vz&s;8r= zDQ3Zs6S)x~NAOEuHU#`KE84_<~0uO(P2!oILu;kZn+#A zo5=j;VN)*1y(9E1$JgfcG{$8&4W;feG5WdC#FSkn6X}2 z$4B6x;S14Ay}T_aPhKov`|9sAwX~K8cRoh&Smrol1TY+JcNY%ceJzIzcx+S&U@wlZB8##^AE0%O^ug-@n_#D|MFk|SvmXsnet%s z7Sj*w3bZAEzk{IcVI7v?&QnYG4mhBH=!`xh8+9JC^S}|;n$(Yt43#I(oGLf2vHOCF zoAeh`KAo7MOQSPwvIEvnbS9%(wc%ScdE#Pe8#{XZ4brC3YP`qTuF>Yt_@&yc4p5;3 zaT#rk4RQh3?8k}R1t!c7172oR0YckUUr}Tl>OG0_Ta%;M%RzCSq?d`$B1KxA<+P7= z;nMa?SX?$-6k-$0%;LQoaG3x#qBS%35LirvNb6!3jkT%$&Bym-%;IW!wlB;NfLd18 z9h39dWS9Xo3zmzSn&#MEQm6SN0I<-BSQqkUe!nRqSFN-DHhn09_gUn?q2N`fsWM(> z%fjeF>Y~XN#)82mbD4a!#dopEtk__3V(YagPkGiG29QP6wl~df!rWP?Rg=LZp3`C` zyP5~w@u!KGbr{hg^_z#fVv^CQ(+bUF`O%Q4FDzF)yT}WWj9tC_W_Xd?3Aq9*JpK+2 zgn{2-K#~PWNaiy?DJT7CUhX0k)b^R&a)d?6J(C~!wJm}pPLn0&@U&4EZ}OLPz#0Y# zm~y2qGgBYGE!(Ry+_u5T!dw}C%doPTl$Z`07Vk|Os7$Z@T$#ue2aauHEt?tx>-I(7lURnWiK{037E;qT`i(TUGjQ=c`wq}d z7V~EQ)MlwQ505EBMx2CPDbKVdi9jT+_}3i6CJIgSOIp!C@RU-B@%pQI{wckO{MR!3HOy|E($NfWy)-f6|tq}iIQ7Hdh)*}|{(&*N3QyIC!<%~sQ`2L0ac2iS>kaYW}1Ox#BK z@S}U>>c=ej^F3(w&@2xUjRdE>nFlzVqD4M=e1h`yFB`%E6^-RRw8z)5$-lK%R_9bs zcxs#GTJ$px$S%6#(%=^?B{2Os6=H_4$thd*|0ZS%hJbZ_(ClFt?dB-4yZ70V=ovXE zC(p9;gE`DHLc`4~_~UhqpxJ(sBW2rhV4f;({q(K!AAbCE%#&b@(b}%Dv%XG9(z+%# z=2e*G4YZ-dILl%JjLkj_=N5v_rt1SVD5IAiw9nY(#{|c4v9{sVsWY6g^;~)K(o`A3 zR)23FyJ&dUT@HI%hpAbH;nJQxGze~cu(#a%x z$y+dG#*cTfD)_kTPj^>Ga>fQ7m0hboLKbfGt`{b!8{w)SJUAYf@Jl#Epi;>tt|RCJ zziYx%7oUdzSfPJ9%N6XH6^efUYu_v*{bS`Jc-bM(#?DNo7x3Q&gEKd`irEGs&=G<} zgYXzF#W?n>^!qjkK8;Ort`WNoZmKh5voqwe&k9Ql#F{Es>jl?J&0*c`P?)EMj3IW| z*kLiGXxo5oRXGYxuz}pq5w2?S^{3djPMCkqtSJ<2^dMT`C(vqdz>EX?<}w3H1{c2} zplI2~A$5>YsO${35F5W|XcC4Hh7t$(J%kCYKqDOO;LozU(#1}KeZ~>9n0P59M3D(t zGK~sh&Mn<)-obG5Dk5KXIl!eDE;sj;{{w+5o04Oo=?!TH#K? zWrAa|Uw-N3I4wKQMiCqq5g^oJ%d_<%Olv~62V=j>9AsphWwh|_j_C{FIM5k*u4FN?cInWvctaC3s* z=_|}(Q(gkkjt{YjZ6@LMgF^24h=aOg0o3pf?@45`J0H$4u2>@&=R9PGWv=}6t)G;4e)cYN#(Eh)!_m;3g|T<2hRFw(QotZ}Y@ z<~c4j9;s-}F4RihUTU~fDV^aJ;7Jpv9zlVESO8=eEK(-FGJ#I)O)c9F+5*eep}20b zyXa~IE%I1WT#y>VLuFK`hXJbQ&_y`5h$+EEgO+kBAh_82sZV&wyDVaqZ9W%)Ff2(S zV}yo^`eZ09M#fIUl_7C4vu!eW%{nsESQ!bL&)~yzscEz9T#o^$42!px>x2u~A#U3h z#=xExUrvm+Ta1w*j4b_Tz6zsoSw1gaJh_bh>_Y8j%wgfOou)I7xVNw{Z?rL{M10#` z8Bog>wxlFo{!%%x2_K)NNg2S#z3tYFtKu~^JpNtVPg@{TwJPQ{Pv8qYG6viW77tA? zY@(n(p?vy4ysK?#;@)(@2W8aq6b5SFn9ug4zi3n7N@>lsv=5lWoLDCBWPrW=?PTJ& zpD-@M<8PkFNtv|5>LqIGH$O<^7^w`o0*3Y35Z?KZzO-RtsG2~K!Nrs-bkNwHBUADS zqvbd8m4y7~>)^<;y&+A;huOGle}Hf3Uwqfbh(~+X_x@L5GfT8?f!i0n;vv{gIoxq0 z#M~#XxTzIY2Jv_BOTnQH>S>O-Dp*t)n)N4lfUveomo~$Nzgcz#vH^PPtvIVVk>2Y3 zV_WORH-J~_<0sE$*sQBL*Vzv7Y~IirIEYe%RHk=sa^Y|GpYx5s%^!@=jEo!7b)&Th zaf~rv@r`yzCfN!TR+o3$Y07|g!>y?-q1KcoPV5$GMF%cTCNfWD>;Vh!UDTy#O@{{Y zEinAv2yxl_QBjYudoYNSCO!n|6>!)$T>%$*HT;yu6B!n1k8uZ#;=sQ5 zKIr2&Pf5l&A`iuYUfSX}&+Q*Is|%RDY!E4A?x6v&-OX-ATRoXwm<_i1%kE&AxsL{S z7841SP;Drh3<;>I#4eikIpT%iK?`~H#%x)@rhIcndcynzW_W8AvxM0mLQKL`z)NVeJnk5H z6Bif<7`mQ8Y>#))q+);khEylzr zUVf$ApSfG6A54b^HCK7!{5cq&w(LY)yMB%KOG}Jdmbzn{+pq(-JokVTRe_D>hc2bz zPR6}QMPI&rHz73pV7B{k1a83&d>O}ag*Sye>0g>tLJ((~0I4O1mkJFk5YRo@Q5b;f z@5Ich1z(YqCr{xBKF$un7EH2HyS8x>B~lalbsD{Y^FGmxX7H<6D=Qptys|Qb-^T;Y zL~z_D?Zp$%mfyVhE>WbWF~=e*6TDgG?lum0>lx-V$~}q##`K{l{UUfgf)^9k4kv8Z zdX)o)Y#01TjF}Du*GFir-IereVNCTC8EKtpRvYYmc@Bi@kO2-lF{4{sc!;SOj5=!~ z1%iXaa`Wa*#>J!Z`fIP1v!~9K_@=ncomF*HHv_qLg^`<<&VAR}V}y;pW-Z)0ax#QB%ilchiWm?_r5YPPCWvFC1lO0SA6e z{gx2kG=((4!2t9(K*-o}PQ7)*W+?6*D?G>jOJ5Fj}5|;v!~X%jNX*FQb%ZTrqFoz4smwkFe1rs@Envah`9W zsnZto$qv3XJB#dOVRE#BU}q#BI!24Y#+(NJq0RBBVRi^Gk5WU=OL?X9vMKGf{(#lf+mtO$%E5jXA>)qmZDFmf6W%UY^5I8DAIj znqQ(A=*M?~v95r=N(4|3uX7&RCQNSI(8RGfJoefc{VsByG*)KX1q*t>Zs1G8uJ&uZ)JM0ACV zui9*;r99x`d1|&CZ)s#4k|{fVN_{5scl~9%!Uh0Owe_`~;-uohbk;|jfLF^EM&Xkg zkApdF@l8w#T=;XB41$re+$OEtUovVkE3}gdIPDf+;L`pT8u8<08U;B~;d^2FEz`p@ za1g-(e955r-u&66;akQeno`Rmt(dZHwan zM)Onxl|EU}8p;Hormn~R&Amx;iK}F+b?|@6&wS8mv+1nI2{WC{b1(Eq_OdUz<6CD_ z<`#QJ(;XKQA6q;*-#GklNG9;@2CU{U6*uy>z~Y3{0^*;7EhG-ZfSbv?hB{3RJ zft#Z<~edVxt-P7w@d zl~)>w8V@eoP&na|Rm+0!arVnkVm}4%SHASw*YMEpDYx$vV4dLnJ1n)QP8|mqs@rS& zzCP5`oCvkc^Ud@UG) zud^*cg#3g$>BJ7_*-xBBolBg{>E$v_$e(TW?tR2YJaq<#4jezWP~Wd_w3nx+UMQ!= zrw9W70Q-?Sjyqm0yBJNo9j$F-1`9Y7Z=TrcqAj88K`xw2*ftF4J&4CMk#69KQ44w! z5wO@P(tsM6ZfsucKWSza&7NUY_8;}*joDxBJ>WnA7THgK@_9~HL&6-w9AE%@U5wVB zIP+Bb>`Tv=x8M0*`N#KuMEE88(rpOZuBLAt_4*EBVqD0rO#<*QoHO;;MH_Avj(CtU z;xW(rz`X}o)r~DlFE&l$X;Uv-?Q1k!p?yvSZrwe3?hJcpZ3eOw{T#@vYNnm+tfx|Aa zzXNYQsLcD3R#s8ntFm6>R6I}K(=zR;$-#04+olnu)_aSy;2-WEd$qw0IPF%V&j6b- zSk>Y>Nz3#(KR#lcV+N02J$^^9wb7n#fH<)ikT8y&9F0Uhj{TQT2_9_axVOEG3gG4F zWV9#GJa>t$r@k`1@Sq$YJx&NP4qV_Ml|fDb>mS9@3P!k(_PVtt9UmDvQ@-<^Kf{^i zR{80#e^Fle?B{VdnIJwdlH$@G?4)ipZxD|YC!HOnt!<>WO<*6vhW@qJU&F~~u1s?* zISpY9v26>!GKWg4E#=5r>^>Q*J2<&KLX!~calT^?+r&2M!N&{Ck&IpF&iS$s zc1-6R!%tb33s$zXse~y9`>bg^;XN@D`!@R2JljMmaN*U4((N^mobJUFx|c9cNI)kzW(3lSoj+?OYOt#WLiYNsQ_LGo~bPN>xr3Or}ofmJ!u+6Wh? z9RJJUaFP*#sP$-r9w>IQ?$_DtrJL%?UDG+(75$xn9CSH-VsJ2vnFP3wF`j!jsxdUk z9pIgdkMXcrK0-uUh%$_`pRWXCxMk`;#cOy%#n=Jx3!Oy2qX1? zS*3uqnF8&<+H;m#VY2KB1pnJcal{iD9P`$#ms&=hzyod;hFUkN0ji0^{027h;$@w- zT^Q1zHZ0G?k_j3(OBp^PedAeez4=MYO{8q#GEeL`NG@~cy$LFPrcz68l+WuF)a;uo zJ*YOQFw2eGN7hZ6#$R<@@VO9GNYWh?R`CdKrRn-AxNFoHc#~1oG}|krjtg*6;cU{A zbS`cxE!qckLV&I`S|P9zqBM1aOUtOVE8JY%3tw<2?TeB|UA`9EDUaVy{h4nXS!ERi zlReL~Y;ir#Ye5!Y$D;w``I)lnxa9`GA^|xTouFxh&7jlG8mB2SY1_@LLB33=D*iqe zpDNi9OAaGlRwNVFi5qZKdnHaN9_V@(JJ$6cu1-8U4Cru0d6a1oXeHdhLdWRUC-8#e zsXY$GD%p82lTHO(G6XU92WfSFsO<$>a{I?cAys>09&nwe&<9CldL@1}Ftsl&Pn-#_ z^Nbx5nzAmQ3zg%+JWXvu1Q_4~N|-X3S@0ktCHYWh@LJ2a+zeCmiy|I2X#U|pk1V{+;h?mA!$LN95-Lu(WQ#lLf#DpZZYk6?GyLp;R|qaq}3&jj9r!^9qKKkO!a$T(iDC(0+Ed!>BlrB9bj z=gyV)fBEzBw?Ft^S;S<(^+~&61)6Sbjk;hIWjvM^@y?`4kDvin%);mRbX#BQaIfSL z!ArUoKdZ!d?1DB135lYbvYT*An~WE|J5{$0V4pmK?S-+&XJCk>!^2kWvt)v3!Q{D< zWq>1PuYCM->>H*zSqt`$vU`STBXxNk^Efsr3La;PRESjuJy#L{F`Ia1TLj4q9XDN_sSY>ZxNV%KEHA?Spb-eUX#i*giFE&phew z5p#ZK5ij7CI)_Rj)X&U8JG488;pNbc)mh@}a-tsRKOlTC!tv_9_Ujwv=1185-C97X zUoJ}=_AqozJ3pi+3Q-6#{NyTQ$@xT&<~s{F%YBUUZ{56!aKknWbIlr>?G?i7tU$LT z*j~Lr{N8ssUe^#h;;M6ev^@9BD>)K-2`}M$x34m`*t$bFce~K-vZFBi&ph*Vx$@D+ z96OHP8YeU!aB#$lQ>XCMHNGk|Tcex5xyi|XXW4#v8d}?=4`!Gbpb@qv zjvYJ6m{U8y3|;7$fddf-9PBJ(Uxb#wZDefXjXpG3E=p?Ho^)KJ-3%*IUyk{?uxq=WS3L@pEW8zl*y!2^H84)na_P?i(KS*7FD$6k-pLL zq^UxIWucvMg(JQ7CC(n5C+>V_q1qKR;qhB0t4e+XT+0w1^Jn3NTx-ynJp56WavlE1qhu;D^`1f@E(0QjScVnl1C! zdxQY~!Z=7%?7@c7BcqIrFxghq8|sCmc_!nNGOR1lLQVXpEb&4f%dKi>)7IyYnMl$& zC`lF?5yOw^ncwC!zl8@kJd+V@w%hXUFEu1GEH1`2-*R%1-|VLI)3h}TbH=^p_*_^c z-~>_TB$G3j@66xm&n8_+&pvlD@V7Nuj{WX$KiXBKUfIjGn+)+(k1!q7il1U zOL6IUpOeS3#B;5u-lq&;mWh+DdJga4zz^m5m z!kE{5dzrt&U;DW}tM5WvrU4&5NxwhN2d-rv8+BWM@&?D`F@57%FcF-l2a?W)$y}Qz zu=6eMgbvhIJaBIsjW0dzOZpx~Go;oragnLpVx1!#!_t#TC9%H!UAZIs!Ziy|DP1?I6_>7vqJ<_g;GTyFnoi&x2m_rho5idh?J zyO<+jShWd?rqnUT)uuJDxMdM`H)R2hm5lcD8J>r@luRmaY+Y z3Q%Y+FKn=Q7Z?w01@P`&V-J6D;1L?-u`-E$)iDCJUqT3Y;o?|%=6SS+6VMJv?k?Qr zpd7q@_c1E>=uhK!sub&C0k+?;&Sf6ZF;1;pZ#jPAY^?C!wW zUndCvJPZUTKk)e2Ilu_rzCH(|gnb6~KdzxPIE;`u^>|R%XYsg$GEI5}tr_lWqP4Ny zB)kdBVi}!s2gtd!h11D60(P~|yYWiJeg@~z*r!;Q9_{N0Pxo%!WIbJG=VY>+Jav|M zt1z_~N1ubaIR;LC`HMHpe|+l)Fr23^oG73B^5;B0KIU0EN1~zV^+nq+&*kuQ10RdzXd#UaLHP6hqZ3qT1vhDl5 z<%zQw7)w3bwJ~0&b|sS-c|ZN^#WHs6NcqM$zlM--Sibv<|Et`;vyd3Cp1`Cxq_Idb zi3^<^KRZ@F^TP9Gf>@bb#9N&>dK4yTvHa}qpO$yue+xUU8T#Wc%s#lqi`&_Q_o^2GK*Gvg%CT>lxHtJ zL;fX<)H#>|!A!b0G}s=V>4xc9ZfxfhxX(E^ak@t-QCP5od^XUM$q1dbEWeF9c)}-g)be5SeP^4>>$7q zHXl0|;srBb^>hs)v?4^{{LwdnfXjQ=I4BIE1OXev{|?sfUA5~B$zGf&R*3B?52t;j zN)LAtHVAX%E`lf684gIr)|Wr``EvC5i88alQaW)Wa7S$!`|`O5>+IryQ_9$&?RwL% z%;K4i;Ma?7oXQjh0hwm;?M~eWC)#O)zJIt~`Wd4ve(W?gFP25tw36NLiHQao_D4(9@7j_{uFE7!Ww0ec@#S=l@}O7v}%RKY0h(na2_S=H_O~ zCqDfJoHPc@`|tk}+rev@^lZn<>}GlOg$w1c{`^k}y#KRugZQp{#9?jcbgnk+>fEh# z+%hfF=L|MQR{F&GuLCdU1Lg~la~?lFQV2|dLkt2Ma~X+B2ew-DUgmxRY7ub^dekHb z0RVpGJOAyka3pEk0x;%XgI(VT5hfq$X!M&K92oLRyr93r!Cnu_{rmSR&vmD_T)glq z?YajK&#sI8%%LOu(ES#D*f!2?z$gJ>m)K&GS=As)<0BU$OK=B3#{mTZClCoj`ye&p z(JE7k4Ro6)Ha!qWkK1WHxAQc#RfzCJGGjN%IC+_;8qqSz3n|ZK(w$gyGMI};)*uEq zi-p@YGR5_f0R$3+lNO{KVQ4NwE-ZC>ye?|inFYs&q~R_uR^}x$Cgbb&Zyh{7&0@&6 z`Et@&^I0Qb%Hy5+9~Z@)gCYwg4Hw=fd7=B^q z6Na0`Sp0yA^p}~W0dt4prQ!-oqumx7d^P%?dvRpd78|^i!7atKiQkR4Nh2)vd8){7 z>r<236duI)<94Mw@hcrU2|EEbXaX25%Xh+S*^UF5?Mi#L1ISF%Xcs?2TlQu0@EkfNw{&Tp$t;}&0me_Xp1a!dIbh+BBAv^rbXUWVa{*MWu8bw#Mf`zx^K)d>uGQ zWGrE?F#nXDeB8SOmk0~OWSxz?wiN*U*LYTy2|lpj1Wkcf#yIrWzFH2umk|iI*bBg1 z1`V!@uXd--^BZh&$Cw-~^}@m;+SoOYK|NW%@q1szKzOmtt=}$FXHj~w>+;YegYlND zrU6~V``jouFHd7uvx=sdus|?Hy=e6YF*P`G^dwsHVbbiE*$12SlV)7(`k-As+}CCn zrij>=y?Ao(q3IqS8!cb?+-v1KU;9$|%q!2r?6Z5_J5)aT>CcwuScpHkai!e2vX~gL z-NfZovE^~6cdjlZw1;%=ns%sJ40PgsVtqny(v*hv%NX;^xIOdqrNl@DO(X=*2nIuw z#{4t{%XS$0ZWwdINurbkr{G=>VHxFajKJ+BvcNQXcmTrGA}3ZsD;}-ehxtGk#_r$! z$&bq0zxlAt5q3p|R}by<*xnaD`CK6kI!w(r&I}90TE*)Y+^@vU2*!>!IG-A2Q zn6Sp|T*o0~a(vB5OTt5G~uP1ER+V(8C z&|8zWo$;!N?g61(&YyZ2lOp120&6RL)863ci95mN=Gr5R;qn zr=BXi@Mg33?v(j^^VvC(+4WdptpoPKv-E4;jaBIb_y~=r*_&&>X~H)h^m6zH{9f-~ zgv>rd2Ia+A>wM$uUn|G^ zhsuw>_k;5GyYGWHg(wFe)+Tu7pS$=}Idk$1ra+tJ=AE}-&}rK+J7Cl+6lGTaHk|NDuVRpQnGTv zOMq7zFv#}LHh{UDlwrHPbeQnB^HFm*pc=m0E>}m}DNL0eGmmXIpBje*<8=~fw6Dn= zHHxqG(;k~-s}in+_SOZOHt<_*ukWa_!LV8jZI_UJ4_yRyekZ@Fg-!gC#O?nu{^k$H zgq?opPF62C1AyS7u42hg?xe31rMRpNPn`@%0$j<{fL9o8pWC2mi+JR|;?*{SBXCji zRi$#<)Gl(#;UXBi6mIhRv7f3?q1Ft?6(>$h7bkovXy||vd_2<6@_sw+y22gns!pohQ>+ri{3_ z6CchCh9TOw?TvZBT?F%q7oDRt;ZOmi`GC6yh9VLM-|Kr2^Eq&Co6|Sck@=y~Zrc|c zwLPQ@etFhx2jFEa1|KPd_VHeO{Yrb>LQ@r|mSKCyZ#}k+JSuwpcDzXc;zrXzX;It- zuE0Urq%mLp&&n+eW&q6}9l!%-2WWj0Pr@BEfBR@nK`$ID@fq7Of$+%H0@J_07RGCH zf0@&K*h+`7Y9H7xqvM{`g?R&MzVVgMm2bZGIkdVkd^g@COb-h#4Dj_U#C_zCn#tEo zjL^e!|HeLy`Bu4i7cXN>F$|=SxpevH!+S92m<&9G>B7D{0oZ{@P5Kb0Ew#7w(+Ak$ zdpH4~4a{!TLf*lX_Lm>LOYGN^<*)wbAC%WWd9JkfZI)kN|4l-t^bfL?Pye}#qM6AY z0n-=mY{2XqQpIrxbj$&;Q(-b?n7{G$Z=BP9T_m>Y(sC4J2b`ki zvAWgP*APmCDk1LS7D3QkV74HCm>PGhV7BpB=p}4T55nbci$@2e5ym8^b$F$$@8da( z(E0#I_5raw*H}Yk!d?gn1iCC$`E{J1(o866;mM(S^hB4!jJm?d=hB;RYQxH6(2{WT0thcK3 zwI&cb!DIO-3P@2LQj0fG^K z^6YbHjER7P_qu|x@k*ItSZ`RxvaYa}fjjv;Fad)MZ+3`D6XR39N~ZQJzxUE|z$ zxy^UW0j6*Z^rf0&&tX=OV=6?!*b{zieF=MV)}jFd_{+a6BfuJ-$k0&Eu@|3wh9ihQ zajdtz{`oJIPk!RHa$i}cA?p*8xyXCqLI%hct$rJ9NnO;XQbcV^=&2x7v z+So-|;3TqT{AHs2qCGu?`)Vib%{FxYtv~n7xpwy<>D;+wE~3A67(y%dGQ2Os?$2H&-r zxp}O&eDLEdoTPRweH$UmIgs`nD|MfFpbOaa&C&nm09dm&F{XEC2p9D3Y}ug7K6W<7 zIQV0LP+6V*tOvBQ9aF3zg|b%gWN+tWuno+XHlXvbeEo~%bD#TMxifty%5Y5|Gw-tV zu*6A-Z@>BT@=LtSHLZ94RgfGWK0-*a7bv$6zZwn^K`0&U8)nVIpJ{0H$QwP_$T>;` z&`y-mmXUO^@K`w)3l{;k_r8-US5uWvW1&z|v9cW`6DQQV;97<7NM>R=I9*69Tp7FX~2rX^t<#YY%# z>u__B-#xprt~=BpsjBY}(*oJUHRfR?7fp zab&96zdQg~UR8o+AUMD)EHs2Nq^F$t#G)r=GpN$QN}FMJZ8%pL7RsuOp!lWDJh#qN z%5NF>Fu0aMc?xwBLxsHdqi@3K`yAk!@Hn9fYh|Wt{bsbNF!ADoD`UU{DL>Qzu2No} zK_@)7pB1j932~W};LmQg9rjnf>@V&~m-1M>gvTqiMBP<*WIQ%tO>0OJ*i#N=1aQis zCgBZjir7|*D+S6B|xkg^UGA4N^ z?Dl14MonE|vW@uz4sp_KlR%5h)R*!sqmFTJeFq@4k6-5HmY-=$lnUHtfk-myh9;uD z7UvDR6>llWtm4u;>#ayi-I+IdUhz}mvnn2eD9s)fe8f>@teqR(nUp?c*sM!oK+Ubt z_G8=av5tmh(Ii0awsYwt@_6Wqb-RV%OL{MNJ6&vKHr0l?yJ?-Bgv*eYn)u21qAnLN z+gafi2R^I)YlyJy00k!ck8{H#k|tRxV0f{^hCyIeq2Cxi%rB*L8BH02dq%k9kFiA1 z@ZCblouyJ9a=Oy`)KkaM~257dmeE8mK`Q_X5bb%hWxoA^=mVWy;FrW?s*>exw` zc}}~6hC2~}mRBDVG6qJN%pKVL%V-sxkh?Giad%*O6+sG9tGSu0<--r(WT%9&3=^m& zLkHg;G_bsY-~qg9;ME`-(#2gN$Nbs?qk*Y-fOwUjUUu!~723;*T4O_u7-)ig&L8?< zo_XR7G2N#*EM%(u7k~3N@J{(#%(L_hNC?5v=u50_4G--a+L&z(4 zG>nydXy_Eyu0D<(e(K_r<=mM`oNb57&wu_-`TKwWx1?Dq1JJ&GwS<|shal`BXcYS9 z0Q0Ks8ZtjPuNzF?F4mt#vo}q>dNB+s*ZBqy4HqO2LLxX<6uHPS&Axzl+-GBe@ zfQ|UcC}rT2ArtuuCnf|}6e}K^JUns)Q?$PF^S6Fhe)FsMfr)vE-5l|B;AwM=z09MG z`SrzV!hIbp7clXnIzk`Qp5Ubl-Nw5ckN8%5E|Bn#h z+SnQ0LU_9e9qfco+D14qpjQ(hCS5}Y83RsfZI|s17ldPMFUZ7pAa$50I|pja28Yk%eTEuGqccBe;9BG41UO`5MH&rl%dn^ zP4U5U%~hSpSiBnxjEkh5Ri)-gdz6bcs=1`uPR%@02VyWiGN8!ZdKl17H2 z-N~6-7e!&s3-3u=89dT6v4n{M58}_?4iFNgZW%O%91D_B^0Gb`7|ReG--Q9?J8+Ob zOoVWehsRtpgf8|LU6~&`0sPgp&_1Auqa=2IPC9d%zk$8HcHx&Ud{Kkzy9oUD3REdd z=FA_qt+un7N*HV}ltp{hYAeLn^yX>$t)!m;S|KrvICR@A%Q8$`aZAbMx1ZhRvCkrS z(MB2Mq@`^NJJO?91}Jq)^gb7!qyw+btI|LCqHHh!Pm`>R`n|+?Uh~5Dm6n_Y>%C*g z+M4}dOR*f&_@?4IaM8Z(Lh5_1ImRdLo+7ITbeQ!TG){>;w(MU_F{IY*dPY<(j&^uMJKws19jML0FET}eC zg@WV2XI|!W?ixVzrN-XKK|1HTkXpabjN3QBqSlM)q$WNJ0}B4yfXlQyzZzs*0m9vZ zC^^8VHv1LE3SUnAYP3J6bdkpKpT&kYiu1;(XAP85r+q$mEK>(Jd1g8XcfNU=-6&Ey z4pJU@)J{9U*$4U!h^l0x{yGk88_6#A@;>7s@NrLd$|=)E$xPb> z&)z?PA=@G#IYUz?a?&@YGrKhVpF|B%)%w?m1pMe#BnjlqPozci9U&G z%p4NK{K9g%`q6ZG=O@!;toL|%4fC8Q&pyfL0&Qt8i=2$<@uk~XKWr{?^zVHfCubQa zTfJp`Y>M59g8#u15U0zt=54bkEyHYU1FV)>XTEKik#*vVYN8|#U7&B=n1G?|pSidleJ@#N(+E`)!?}O1qBZ-scGR%fibnMGL?E4i^2Z#XF z$}UU~3`z$~_X?&TI;bifxKl+&`h^1x@Esf)Jy%*WGcmFVyeixO3aRdVpx}cU>m|+P zsiUP0jphQ*m@?Ia#3}spm%aeZM~Jt08=-B6_1lx%`pe1F;FP%J4qKUEwY)QTIMIj` zrTU2@x{aV=>{5?b-B_ic@ukR`0Woi}gW!(Fi^MPO!1UtG=`$Q0z==%Lx5~Xc(>Q&u zmC*^b)niroo4L7!Ej;6h-HWB|Masr(X>px?Bd+X;BiQMKn?<7e;Iz6819jw>J^`hi zJ8=ej+bcJ3-9i{*ePQeb*Jr#6^yg&_SHdJ(*~JZcw#?aawjITCw$Wi~?N#PM;WGigh(@I_GBi?A;P z&MmA{;f3f0tyK1COB;B<^y1Uy!c$W)>}=CPILK*<)HR3$#S72BQ2yut;eQ0Ljw=L6 zXiQ;I$MQ~0X;ePTiw$6+^9b7B$COYe{SZZq81kew6*=;@&b9JK!Z3)S>Jj0ueEy4^ zD)f~gZIx)pA;Kg$F+7;$t=H@Zykps-K z_@;;b$mh|OrH7-3yAXUI;osx+sZW10%BBzB|5cg7f9DCtoxW2>AsgeqZD?%rjdmsg zqX7+oG($q^fV7Ftzl+}}4msLsX_C{;B4soN?KGX(>v`ZV7TVl_Mh3ICchRadOYDLa zBPFw;RDA6KGi z{8F^L0(B?Aj?_;<_^qVLbk&e@EX=^Jld-Fdj-D>au8}%1IY^zQD>N0pES$6*u+oN5 zy{&L1OetomOn3rhliSXTW{XDAn!sTDTu8%YawE>vd}ZEk;6a5DheT#N?wbMzH|e|7YoX#%e3C9W2X1Z4n>##BT&A~yGcW>|Ou2Mm zK4}5Wi2&}zGle$#AWX0@SeJ<_NJ*1CJhG1Zvi|R#aPlu#Xfup&%@N#L2Uks73-)>F zl3RmAr!MI^Fg0*N4i}o{Rr`i_lrJ7?ohh}J-UNcX2w>L8v*3#Qf;qCM-safhy$oNJ z81@%gQcv1x&B9=TJhzY2PcZFjaezbH>?-(>h=ino`IoV-f~ZWNbCvUm&Rm@^M{4pl z>2YDvA0SLHI8aD6Dv)L&zLSYiOYd9|^B?9-g}U^a6)_)&F8N6zEL_6MxMK{r=`AXb zC^w`@JH@RGflQx_e9#l_jlu#xWs~3bN;~=EmA}PFt*?Rm2oJX1zO`;^udrE9#*}pj zw)$UkT22GcK6Nfi8IDEkwhZB9UWEPPj-?gHwfW_TIQT8VP&03HX^KE&(5|n;*esO0 zS69lf-@Soi2oqH_qU&fFbqL%rMm4xN#L@89Rm?pQ4sH{m{k#AE7v-HFPnWLU;qt_} z^Q|__CZ5}x`fTJr+kAdsaO76j!Z4hKzHGiZGmaoT(8(o66KgV@ib37^I6hi5Hly8BYmd9)3i2Tg)>%3L#|5vQI#ytS-LBJI|+ zh~6RENf(UJC?*|!gA+tpLLc}k5hLIBR!&hs18xkos~ z9GOiU4F9-9pTF|TOK737sh_!x-7}{+!Huush-;|V@slvFH2ls-&;}>L^>)#}-HsJQ z{K}tb@=R2plOtt#VyJAPX`VWE7N!k>3?}KBGiR`eUoAI@aI=dLg=!kCfM?7*oYOU# z-rq#X?C^V*}0GJx8m%TGBrOsFxCT6LM(Hs+D3 z$%{CZKaXF)SRCGO-?_?rm_({yTV?ADfkbWl0%1|@pZWPY{58H`E?@hwd_28WK7vWS zeTz_*1W#E)IIS_tD-WajkHgoUZUtza(uaueu4xP={P01Kzyj0fA~XA2b_Zj;WMdEq4dBpi^jVflP%|D`PC>d==%l6q|VI) z1aRo*DC}-c>Mkn&zvehfA=kXX^3J(grgRQ~S&`MWZ8ez;tCb_#{ba{2h9>zo)iar_O>BW^Ou>S75|fN&k{WY{A_BqLKFL}vsIojYVkj5-hpW|(@@TgMJt zaQ3VWtOHlZRjK@dG?n3CQe-j@qelZ`cbienvP6efCd$JU*!rQpc~5ypQIg5aqDR?Y zbun{7W>I87iw_AyM&B#m>Uly+p1J7O7d1ZsC9h0g7#HgEx0lXZf|W_0zOpaE8~}mO zgv5bonGT!?2^8ysD=$vHvdu+3VOAQ#oeWzCaEZ1iBQN}x@1Mep>B*SB_qp#av!>6h#^c%^ zb63Y=;I*x^S-NSq)%?x=vHl1~WQrg|T~&);p?Lg1^(k0ID+|2Y%@Mb@r_rw3wq{!^ z^POkZudr-7|EmIzH@00Z8~s8b3YC2wTv4u<cHzzE2;SkK*}CdAy>|`rd~!xoKx(T$##;!fctL z9e&$1$3ez&bLFL&Yqi}kS1;&Mx{z(7Uc~fB3>}AYR zm>Pez^D->PY3=|{L*ogTbEb}Uw!=P=;Wwn6^VP`EP<8?AKR+r?tXp#f>r1_Avlzbl z4RnGo%;GA2l6%JhI|c|D_J9blLP{N@=9T`W14eej(z;ZU5Mhp$orF zU`Ozs`Az&Y2SnH)fBKrYwnI9pNv)rULIrG(>CnQHP$jb15jwKVD@wQSGTcACmIfR_uE9x z*@4lYoxwcl!5wV<7s!Y96Xu`~=hPvhvvk8KZ^L}4p=@E>UOJw^S#kr%IvtB!=tJii z=MQ(mUwHXa`HMgK7H0-@5zGEMOvXA3FX6RW%!i1wbK)!^IFF8^wLc{Sfah`;D8o-O z9k5^%-37*f9?hO1NwZE;=OK)G3n2r$kH9XXmF=J4c<3`n%Mgrf7fc!{IMNhn+35lU zjfvANAyFS5mOHn$%H6v=l!s#>Corwgt8s>@_Y48&6qqF$SNjHITNo968gOWb=K5*- z2Bto@a6-L*ZJjdN=>Xgzg3!Km!=0Nf=Jz42Klcki)1* zE|sj+YL&cRSu6Ya$UetskB@hDfAC-7U%dWF$MN`3Yp((&Qj|n-NNNH&GYA8qk-CA- zId}W>`PTblW*^|~_j#YE!mV4ks_wmY?=AYFo3*fAt$NP@3^)D^x0i6Z+`%yzA>tN- z4Dro7C~p`uo6{G@*@XemcW28A&ZEyuh6Ut{NBKqel#6rV;^>B4E;@$0E+4<9a-Is75o2(j8T;Miv@Fi$eY zQ!HbjeqLs0WJ^<{Q5BEuJU}DvitHMCl=q}0y=%V?!fn{mpTM4boO1@|$_KaJW7uHo zjB<9z-2*6chZDy1TYwM~LD9b0cdmhPRU5vF!gbwU8uEoVaNcH&xtj;6#rH&0l^0+5 zIE>;Y=z!fKaK1)(Q=q_knm$3ejH0V;k7Ib@5#a;p3CY=p8Priu>l!-RkAQ|R2w`wD z2UCjk#FLMoE5G`UFPB03W*G)|nH?WbF{8xttH1IMj_jT;KfU@shf+N(M^Q9?{YzgW zx=ts%G>hf6pL?}@^IKoz{0B}{TV*$mK0fi-$#U-L^YH=r;g5dEjuvMn;OoN11UpB= z<-h&oKQ2GLe7RhH_gzeY*pXp;_TV^w3@aAfysPg6QMB4Q55!~Mom-rTyP%5))7AfD z$oW@a9qQ}Azo#3Yn-=B-@fQbWgxI%!@oV*?6GV@T2#ik6t^3KH|Oas`I_!6ov zixvY5Dc=QWje$TFkvJ0}2&B8{xsWzmS2cm`J^7aKmYV<(@5#YUcI zGRj>Lfs1c1lf8?h$In3r*J4 ztR2m4(wB2_#55fs$IEZXAPf1$qkU6OuJO^KeqcSP=y-DLR z&W-dFvnCcy>oQ&FB*j&@gipE?M_$(7yeQFD_%!tkLhP}9D!ej3wgEWAgXt~D@nWu^ z!gndBHlc}Mt4Wyc{dOckG1m3Z3&dd917Und#MHN`d0h_DTb$6F%>=i$J2Z z%LWJ=g$t`~e}sDrH_1AhjFx>Teay|f#4jaAR(jL>pSSYMqaB9{W=0`LZrEY8Z8A0 z--iZJ)<}zVCNMbS0o-=;$&L*5i3jH{d(|s=;aQ$D4vg+1o_NDE1Y721ao{tb+HU*D zaVzem8)?dPb=+De^`{;YS3e)Er^4WK@M0HG@FThw2FkPY&fs3rCA~9xZ(xCyD9;lgoNloxc&*VW4_&EY*C< zSo`q5!BrXu=HB{;AC|xP-Ve*%9kkmzH8P2;68qnBk53Tx5zTH7W-Ci8bJ(q8_k*^6 z_-Hrr-(k9q1cG_N*kc1_;?h_dI6*kbOQYpyzV%f3@~=H!KK|-tx%A4hGI(-;U3zv& zX0bez1D|UssdkK5g5bG?2@MfFHP_jpod$`ZyV4)Vw|6}4!l;XHwaof7 z$lQD2lL@ysmk}z6_;YK$tUYKUk`kI%w62G&{~oHgw~I4xJNUt#9o=dKsO?pJ1Ym@W z60?(!S)(?%A;g;y3`bC%h;}r z2L&aW=S{S`9u{Tz#zgvoPK=6Vl$tho1b|Dy(13m^0%#prv4uV=?w+j1w`#U2PACcpPOPv@29vD#I)cM*dS<*v? z(TeC_$DFk8yyM#EjzH#Z<}ZDc5}O`jZjit)^TfBG$Hv^ltJ=`qt7Y%@42)soN<8?E=<^5hFol`nqvO9&>5!RHulzIfq6c2fs1 z@f$rlUVi8Ie^1%1{P}mjL;lV(ar{J%nLb81*`xTOtgs`pg0rwnKB=vfb1j@VrG6uT z>9{+`4$AEPIn1`82j*|XJh$0rI{UNBr)d<^5j$^d<;q*{Qs#OYe;fyHqA__!L=WD= z4{t3a%z*>OnSC{cFGV*;8#}-J-f#RNCO9w9|Aa6mnpRz7KrCZzX@>Y(1O-qbF2b1L1-8uJg6FKwWt!Sbf*24%&ie$Mj13VBn&Ui zwb~KwAk|`m*=U08To;lMDJ?*T;QZ{dNV_sXF8oSNE)E+TL=WJ%&$Z7AujK?qy?U}| zJ>ZXNEyt~H?|X^e>kfqt5s8TwL^(1)E|l(6$oz{eg%2011AjXp_~)Xg)>>E{Y#xtn zooXR0zXt;BcH4$@PA&qX6x|^lz$ZRrOm&1&icgs`bi(Z-N}6a4$!Ug|4#+t0LT#3p zFj<$3xs1AY)h_7Gg;#*G`0}i2-|jmyk+w2S(I``?1R^{R?8?YU0F-6<3^L0~9ek(G zJPR0{N22$A7%lsO%i&_3F2eRZ5KtHRYvgG%aTfVVqsKd+lP&2}Q1g%F^WJ*A z{BFX;)0&~a3e({uugaWo13rYMCS~}7i~KMVSFP7FExWcIBvSx=Ca$f&fsbSW7V#Eb z2kr`w1xTwYhlGz{B#l72Xo`R7$IE)fVcJC!=_Jnq)GR)4GC4`Z^QQfL`jy1_CM}8s z+K46~byawzF?YXGci?KYEemHO4e65)xNK)&r|k-ZwJ2e!Fh=_;fTl~S+@{a`Wq;TD zE378;smW`4@4YN95wMcpd4l)$PsTd%NcSn%M;{6knTTydF6p!(Ao5#!_OdR^7q|9v zh5f_lmM5KmG+pvF@_X-`P#3&9G1`jQ(FY->u>b3~e;bVqtp2d>+L_=V7^M>x7@is# zx*|;#AY}Zt1&6U`Eb4GLh>gE7(T9kcZp0Y1#?o=Np?Tklt%jzC&M{%8VJdP^49^q{ zqV%B5*#FZ`FNHPG61c=u`a?VkI^TduJ{FPS+`c!Lc=X$O)i8PD#5vp6*MH6QJ_{_5 z^4Bt&<)=OpDTJk;8?acCl##wOFaNyq-RO^6M}<#3`p%W2j#GY-4z*1V)KM!HO&Cwq zIBwz>U=$g8G@%9`K7?UFbG*Z1+sdIwIttoG`#`3C17r44BD@T8vgiR}M7LmSM8+YE ziS7jZFiM##VUEnFowB}e+p93I%bX=Znf-mEII&KaW5-XG<0r>CX_ym5VHOTpBXwpR z%Y$5C$;w7~{Nv3r`;_y9>9<|M4e(QvSpDe_W=Fcmm_^I_UbY zmRcV~mqSfSqBX}s*PYa3XNFM9F%AerdT;I?W>bU|rN27J+m7w|SHJjq&R)2NX~We- z#aUm1fkYrQB&^Jw=0|Pd?*9A0ue~*lSqF^Z2JPF0etKZ!92U}`f7|gv7)E&L8h{2l z)z|r3g-;K>!Qcq!a*=c$iZ=PQz-Tb)40-1`12a(F5AFfhA8H zf|riTS|n)7Wx5WSf6dN%ab|UVx)UP5aDv01w9vS6^)dshj#pvXB5fOvx@`y`Gk2B< zcj{b5B<+!M_TmKu zAB2P1Wo8QeHBO!?7oI$Wz5i1&i+RA7m*(ouMLJIZ)nEM;<}>n_@ND3D_TF^4b>|+! z%>ZMQ-~lL%kh5ajXWq%2liHW`!+ZeVrf?GIL`6Vj_Z?#)a}g6 zebA96Z%=;qQn~oCQ}{f1AQz*aGz+wKcb;7()`)&I`|}9G?a)sz(cM1r{HM#WfBBp6 zfve@s8$V`k#A=0k(3xH9jyZNv4hc-4Z$j`)$Mq4#%t%OD&bU@cQ@e+|-}r;y{b!{# z&Iq8_QAD618ZlazYhpKJlqg}qp;NG$P-kP;7Iz1v*=juk4+|~p*W$rq{iFCu977;F z%9?ic`03X@NzK7H2y$g~SBdv46KLax@Gw>jv(2O-L2WU)WlNo!oLF6S9V~9O$Sl(L z(PX)R$Uv)=aWH5mLopDj`55$UrDS2?heR4Jq%VDHE4VD%Lv)(Ug&=PoWf2X`UaY6~U|mEmzwtqZ$a5Et{hP_f{XpMEQ^AO0Jp2>xv&@+-m`Lo4DT z5pc4e?FA>dIO< zDpEVNz0z9l(_mNt08whlYRxlW=K_(oBkC-%-ME^slaS|9A4HS<{7tz{PD~hmmNc}% zIszZxc8s5F4fwQiZx++}F6EeqkN1A+`#efIpQYR+tLem-su(HB8oA+r#i<;Zm-)io zCqYeR@mBCi8lqlDoyMU}c*sb7RXRv=vih#Jt{tiBhl6D-P@%A*F2kuX1_~{F2URb<~On2ZV%gA7kf{_S*&?`iJ&d zrUNKkw^MUU7jE-cI&>8=U4_TGx%XKkwcjilZRO`(TX+wDIcQ_yNM8g25)}$GY)4i

    wNzFe?wp@USI1gyv(kt3mHdq|YLT^MpQo0_yEGTBIymJOrPT2P zLY@2kBn^KYGdxHdVe~ne;16)|l|y7$f%n@8J`c_A5_V>8d8SO=y+v%sY39)_!qZ@T z0H?Q)YITlSf%op-XOH_X-1{IB7&eX+RtrpW$S3a*6=z$g3L za(!=qo#-3e*rO2tl8{4sy7uki>3e4l1NW73_bMK#Gn}+Ey;F{lo+{@bKaBzYC{7O3 z1e1SI?i24&9`t!mLfYEKz6iUKH4OI&yNct%3a2)$mfj;hG1%^eCKDDAj(7_xDce@s zEnu7Bw#Nuu{3H=2L!x_3)^iUNa&q?`l_ANv^oiogHaTaGtb)sJRA4q z7Qp3$xkbiQTbaOFpq|Er_qE2`z+#x9vmbl5eDcK?%X@FXUGChv8@uMQ@v-v6g{R@{ zaJYcu)uR@fkJzKM1-1PK(&UMYZ2!O&t)S+mA{Y(EHd2DzvkXl;uQ589BSkj}MJ z>cX(<2=-apW9+cyvN+Gd71*oA{+ZzU8fH5WE)Z*T4`&AF7K6g~A4Ow{r|XW61lS|F zc5Z-Iolr;ujhYV54}5CN?8#Nnf9z%2yj8B=ybK4g7SpofPqdxtLWNGoF6MwO`q=FU z*XlNK*FB6$19OVw%#YsKj!&b%s2z3O@9GUez1qDx6x$g;dxm|Y&$}_0@7V8*otNBV zd$%^wXf&9QJm7(|FUkbAX`4#IY_%ENQ~K9-I+|E8x(R=@z?khpnm&YQnwq+2W#!Uy z=TeT&s82utEC%pLu}gZkeC-#$7C3I-xIy@%emr5XC-&;IFMJ|Q=rB_1^vtdDmw)*e zQOSFl)ls>G-y3o}FdxWWIr9mKKoua6D1lySmy4?FC-+u$Q}DfhNSENBG(%$EhKC zHjxgOXIH_J{edk!heOOx&$_6UK8|}ficA|}cn)xu8A7UX4jLJrEWh(B{~~tm_wL=r z-i+{+XazQ1k62&&=<_x_lfUxWFO>nL=o>RP%ls-fh#33FHWYw6kyLi^eqWozSqR*R z7=U5??4ZLdZGHN2Vy|L5_2A)5*33>EnEKgHYz2O4ZFP=Vzc}?AqJ3~ejaH9NJ=|3r zg%ra+mb9@C-KeRP*4K9*VAnMm2Zy#{JcQjh>>^%OAA?0iuHv~9B7#`rAxww55E|Fc z0il7>?%!7uXGS_-nRK~pS!GRZ`Q6yK5Qaf6AdOKFeeiI{5>*OVqA)Uk` z;ozgebsw2_ShowJu~J?1w8m+ zfv|nn6&%o}z!m(-V$g2VCaAE3pE~`|@(P|L7D{mi1#a={%SbzX*9DtL;+EfG5ScJi zmy5Wm8MR>^89aIp1$I+Yrh8L*bxUAgReEAEr9rmC=k*nR20bWl+ZSFkUlCr&TJxhe zg%(UN9-Zu*kgQJ}o8FgUhM*1KC5?T=CA12+w%?@HYtt;1IjKy%=x_j#fJ>&^?V)(^ zkq^XHT-b0?#_!-L&xIQtk~c=GOv2*Ka;Y(CEz`b9I3wwU6bTq_pMg)yilCPAY8@`5 zX$_OHWm&IRg^(N4T2JVL{K776IYCoc;0c3enY=T5W$rAb_OUSg+%kj5q~Q-wWE`6+ z^`?5>{PYTMsoVPbFjoZzpEU)5B(oIDPddMAe^|QGewbyxEv>l~CSJYF7o6~}j*$vG zh0yP^DbxvVR}8SY&}RP$w`nSzHBV^Nx6*ow7f$OWKi^3ooN?P&_{4{oWtzm_wSS}` z`T`nMMIPbAu}RtXwPT{zZ=Tu)3kZV*KDFzy#nQ2kF~Y4KrOE)+`F5jy=-klFf?+8J z26t~>wUqrJ!krtfC!v2({Wxix$MPKCu{i>!sxoz<5+}5m{IwrUTj{R$vBg#4(RSM> zUOZxous*7#4lyo1KD=ymeeW~pXRnM6(m9UeV32ZZ8O~*nNJ6O)NL$jw=rSU zSRf62J%q-J&?Vb&y2wP@d@Ham%n`&KMkaJ){j{Egk`Mw=$+8$$Jk<7DhuZ_TVGCPV zL)$G)&k@$H)TIHq^xI5re0j5Y}?1do9V5IYs-v{mc| z*4J0cvxKlYd+uaeB^*mH_7pE(e2x>IIEr-Y7OH#FEq9{E+bLJC8dq`;Lw;fff=~EJ zPLf(JZ+z!wdHefw<<`5K*e)2a5CiRD44B#S-dS2MS8l&gJsk0h{e^}w-~8oYK^4A{ z!0gs9T{@@jVTQE5%2qz!!u<#VAQHTCS`|n4Iu@L72MEoh`qx7PXabBL!ROK5Eu*q* z1^2FlM!eX?S)ylf0(I&Lc5VF#x~R`k4|fn_^0D)H^|tPmE7<(#A^*(9k0GE9mk(}E zp>o|XW0OZ<_*={McdwV(8A1)x--&|@Q=$WSCpKU`Y=QJ*_}o9j0T~Fvb7*wDC! z*sS&4*z;iHvx*A*si$5>NPDdO;0J$OZrzxQUCUD+`&fDW;@R?**S?5rDMDix$M#|v z&8fPCbD^z_@u#18vW%hL*86!CM!bU{CYttEK~=6M<14TP#) znDHaT#?;0O5tFp2+))Bb(p_v1bfUFf;V9gddCJ8Q-10OC9~ehVihbG!48C5h#%eXz zrp_zk-}O+nwqs6J?GqQD2e%#N?%ZWITS!M7PS+Ielv@-sf!$cHZM0_(PvlnmW(zu2 z2pNEhw%?6qISBLYe6tL_$OmM-ai}^kuvOv+U^XdZz2oWML7Z&xZ$NL}4qagfHVzuG zHF8dg%{Vl%cZiJ^D)(E2+*-ZO*3T@)@d#JWw|ztX=_h%`S3dP}*2yJ8Hm&e}3GeSN z4yrhPVUlBP&m+W7m7l!xkAxDs#^DOLQTcC`sasQo`C2J2z4U51``BaUyMOU#({SAkJP=n%1LC?I5prkPh3=hWlLk2Z`#OQAdjL=yD@mReKI_o z$M=E+lay{~eGTDY9a`T(=w&}LTZmiO!X2<(uueFs1M+Dy(?2#CJNF~l9KFCH5+{$H zE^{0A%B`uXvbO>@nOIzFdT}x+*bgr7uC;%IP@CiB{^B%I3=kMm=y>Tg;5OwvUcuUl z$q&0t&)2Hl#^ zW1Lk!jN)yt36DpetNnJ)J#+d3_I8u(SQ2k_^w{Lw4iL4?)T}@lZ+8mlB$SfwLBWauSYnO$r1pI zwQS${?c^pb#%;4PX(LI)&`^J><-6hpT87T05ijeqkEkVWa~z}(@|H(|p|;WT9`zsf zSzd%j%i*2BYrm!_eg_=-v8JmvlGrx#k&Yhq0r!=81J~qh;KmtV71M zxDsxkrJUO83PYowCOw7jfZxkk^QH+}fDL>}hZQ?{+<=#Rk~Q068GdSc z0W0NMO&y=LUds(*&+}%xYrVqco&9Dae~2gPfV4j4J_-lk1%5RjJa;|{6Hh|zfm&hM zGlqHV!l6;Wi>vd3+WcM^dku>d`o|K$VGb&R2)?)`wndys$5!Ajrg@})(ZOBq>ENc~ zBZGpR6a`I^hxS>W__D0b@s=GSrsCXUGl%<}dMfoa(=(n@g0E`*)|`bRWs=AG1+j;4 z9|N2Ogywi14_}X--QWP4&CL}SFqx(f7z3WMm^sh&60)Wr#m*sG&pDWMZRZR@b3j~8 zR2AgmB%{0Z>8_#Pplue)R^lFFE(7xm)8@W;F2<0H4@tF?a7^{lr>+McCNOhv9%JS^ z*b{V@VZx=1b86H;#~7Mz?T28xP$*4L5nB@;d;q5ocLFrD|L_0!cgt`8#y4Q3Iq3>U zbPO%x67jYF^w0i7`P(1g#a;r2e4o%mJ6(i5f+>D~F$WoFusuYK%zl_mneUDH*5SW- z_CYBPTL4^2z$i*9oY%V1PQLoetF%@x#*T9A1e#fxvzI^pd0Ix?K~L>$#g>KB>SUUB zVK^5aVk?Dh$_}BGlv-r;U5{t5^H{@H=MY2nmE}cjc@VTsZ z-P*6PPISRA|I8;pS`SoIN?gNn(0|q5-CT4{j{yM5PBDF*`);!-q>t<-ycK zxqlPk42F9EqwBF#L)d>1+6g-<>+)35Eo>^{^-3$8_hi}~=ZqvAM9^MxLRmjvmmZ;Miryegy5d3E5jS-0w6L_-5fER~T=2e2scd?t&!LEWk7g{jLEVA!VPMpQq z9)85#Qpd4MT7C9XU?5r2kitWxp~^!Ty{B(0~?zp?2&d_iS4^IU8V`f zubuY>0{A)(6vEQW?#0;gqvgpbpD5Sfd%xVieFxgwNU-}k>;pUK=P!Kz3kVQb*=4vL z;Ym6fn;0YD{y6g?{3cGVDxI_gN^}q4PE-?sl+Ie|yY`;C?&>c9+a?9!I{8u)fLsG<5t(IdAkN`dSWcR@=C1oPMq z9rSLOyV%F+gzK(MbIsFZe}G-W#hWNKI0jk=iDm4xdhjGizRooH(zBn8CjaugZ$tN( z*dWCBv*Xh_Sb6Y0#`Pvq0QMAMoQvV#4PE4#yMaxR0zoHy{J;7y{uMhYV_9E+{!_21jJEg+Oeq^80+$~O z8sr5Q5~R(x&C5EH=Oe$H&wWP$kJ9=-Yr7;|%XASiO-g`1#I2Wk zQ)i>>JoFE=l3fp(4l*TNkT*%^WQW8I9#gi2M6zi270?2i?QQf6u-2Uq=wF727uK6y z9bilS=1H3>zI~Ro;>i-z*xaQJ&Gt3%V41a?+9u16c~~+kr0|b>+aw+=PHaSQ;JHjo z1dxm?-&$wQlOOyMX0McE;~L{B!bV=1K1pae>1yJXt*{t63iDX$$uV4+$q!*~vDO1~S|KEfTJ^D`n0?Te76lR*zirG{=4a;!B0dC@&dwIv zQu)9=`gMbK5`@FR@xg>>`u5#&_42y}_`g@K5vOt;;h_V=@*M=PWf4=00F@OX~k^A{0HAQXhi1&$Ejrhbp?PN4$v4Qq9EIm}mIce$$@l_8o8gE7T3H<}hCJqj}60FXv0g#?i4e%q?Bz_ViEbzr6^*ItMVRLkpTCxdVaVI))%f*q3$aXCB(oM78sv z2SFMSbsQ_U;jxS*tWrboX6HHiB?sOv=4><_jW=@(G}OTu@vsed4)!q7Ggj-tI=fcv zA`KEIsI5!ErLIASs0* z@$wj3q*-TYsu$XCXCq1P`#oa&?qOc#8uZGiU&h2{8ApR#<*hg0AdC(SJMEVFH*o&P zUwpaz&G)`ru3!BiV>%NEt&tDZDW{$OIKobV>;1?G$1pP&8ZzlKKl@^N{xeUNtwX)+ zXW4ne!28pMne(_piu2YQN{1DkGG2Z4vm7pR zBe9G1u$Py7>hzQ4oj2bp@4fpz3W7oAD&~0BiLo;rYz5QrQPgWQ%#)ZCv4TWMS4pYB zsnTN`Qy~Q}pZ&(K{c8CnJCyjPm+$}eUzZ!V2oZE-?AYtu+PWbfkKoR2U{XzYfRIcc zV>UrSY+FDnz#Pdo3>=t?cGrag-e^ zqM8n93&=J-NDZu{GEjFNBy71LQ9E%R(qX~|O{BwUO43U>GRkW3!sw7dS9h-v4GX?k zvkepsES2Se?!$c46M#rWTP2?QR=jX00|OD7PeK7BRpPZ!o>+#@ed~6maGO6{-~#D8 z@>;)3{(;|oWHf^iQrA8bht2dd7ZvaIQ5D{7zZ%76c@?A9PEo?=JMJsvRT)^#%)&^T zsL=+$tut@~hXPjm!!&>(PNM}*mRdfLQ->`uFVFa|&>$?$s~P6Umkuub)c3VtYafVR zD@i^ZXOpE7X$X8tS71Aj{T;?dSVb4VL-&+fg(@@Gd`;SMf@@yXoPW}|S7m}^zGOay zi-)#@8$U8|5rnvxo~56Z>+ir~yKCMiJw3|nEFQ?M$Z~5txpT?N!{Cs2js^c&N6lYx zQ%z5Vx?VvoKTK<(!jOs51~n3ix7scqLA)xbyoAr3mhYh<+CN&JZ8U~v6-<0@x_UL+ z@}Kl6`K_wikLmmRB$-l=34p`F0F-%dS=u#Ls^``|7U$I6Y!MiEM9;wo^zCHdtiRG3 zbjMh*JmV2(SCqxmT>zC5;?&(u8A$O-!Ymq8l!;o}mMPUPU6Uf~_qSsyd8A>X=D9Sb za>0H`{{m_Ad78r?d%`+Rw$au>nWjpI-8r$ANTU$`gx4>K-!Z06n zty8X{Th_`vcIR$kJDas0V#5xjw1c{RadD=cf2Nb*;lwCBxnJ(j&z1Z4h>^EXTtGtB z=)@*YR`K?gnKO{PLBYq-9&fUvpk~#1%8)H>2mziZ)X|C%hQR7>gNmPioZ#$A895d` zG?JK&5KO-ZVQ&eufOVM5HFhfQW3PYj>Rg#3V7NBfHgIAEiwcNnM zl(Pl%yu?XU4>+FILx*zMj$@;%51apSOfq^f`*YCvN1#B)c(q`4(*DuiUyEJN-3I zcw1gvO^7HN#fOBQIfEzbiPIzH=IVQ~I@m&3Tw9UuVBE(EnttJIdGDt;*r8cM=s{SA zCz;1P^x++7(Yb>81Mx3JZQMucoaw8v&SSEJOT-3;1q@=A(+i{A(hUJXTSxkjBNTLD zu7r78#sp0C779vmqwb$-EnAh>^ zE1AemO_vZnEAI!>y~~CU?E|+Yk`I zrAJ18@cwnQ<_{TDXxfhqqs`|z2s?A`eEHcge;MI!mEAMEnJF_0v&^0JjmUOB3eugc z+R7;e1{KyTYfG4m-Ol&;xnXvoMxY58adu+Ddy*A|B%Wpm^=&RV_CcR!Tht>nts*w& zk?e|n;wxWZw+M47U|HY11&&tA3ok#9xBs&oJ$?-wD=Ft_nkW?IGy#)z5ytbTcQu^UmAE14aO6tn?F$?EJ}d<^TC#IdXZGof-U4 zVv@?-J;JfUt`GDg^aXyGtFnTYGd4BX&+JPGG3W|hXyX6vZ~xyM8t~_MRe!I%`@w@U zb(Pq*y@SKA%Ooo;xh1i>&V)e6xp0V_exyPJCu=KbpkD3pod-m;A$VM@a%^x2jb}0+ zSt$5px_kgsW|n)2Ckz0um1LbvBv3mqcBD25hC4k>hA;*$GNe}%WZ9lvX34?_&cF5vVMn8)`eJ8^#1FL3HLU$2& zqOhG2t^hPlDGm0YaJiV~3e$mU0|c$3N|QPFt&6yDH<=6Z;dEr*d-*SZT)aQ3VZg4*}=)EP1SHW=754Q5H zCPvt#nJ~<@ojRMQU`Z=D=|4*0+mItxlHC>PE$yRBFYPi@7JaCI$CIS>opsrcD)`mM zw&C9nA(fYE3sC!$JY@CW1v>ah+14pe>a#em(#OIW!I$)!XxUctq~A@SHVFaGpaTpt3D6jEYO=3FXwN3OVX@t&6z0!!kO+|h5mkWn@j8I7Z zYWHln-#X=*T5x2*W#&CjHCi{d{iKt*?IlK{=0T1J!-$yH!14Mr^{`_Q{{Zr)57Rb< zN_Itov#E@*Dj97*=2zeBfnbW}*2&m56Xt1|X8r{1!%TcWaLC2I}YkRSry)=k+4MP#M z*@zFkU&Mr?CntDmD{t9hieXrtmzM}^zO$thB|t!n`T>a2F8a~qnfs4$)F_OwCw{r^ zxj^f1Y4CSL%Rs`}+1bEkX?1ZMp#TO0##{UE12or`<*}b*>=LYD|2%V_Ao;*?fQFb2 z1pHmq{xU{j(q+_T$Xu{D(6TN+Kzoe8LL1t@3+DzoHOqCVqfFnNExS1Nwqn2iET@i* z9z6jZgHg;Gt>cTI`2oDBqr zmc4Fv8BmB}gFnWRwj0}P*%^BVF_%Y#jk7GNmh=Lbrx<>-@qjf-cNv=1k{j>#e+( zeo@YK3X8i4vIp$OY~k!Xb^U`l3V;5yFO?UbJ6o>axQ;U`Fw@6nrxRSUW-E9Pkso0x za|BFlTi;$8J>FNo^r@d=$L1)f``s<~2v@a=Gkz~i*(L%P@+@udF2C_xzlYubD}+J1 zl7l!}h_O0C=%5~UG`cyAqP-O@H_WXL>H0$$FSDBvOQUGkZ{4|4KDcqC93MN*0Tj<< z{_JFDs~x`>W8*69I5#?wbs)!}x4qg~FKH;3V7dZjCwgz?~v!==*>AH;jpWLWB*n3x^*G zyBI(DE++=w_3#ITPW&;t@YAp>2MH6E2l!L1-J@?^hk;YYu;sV1<9xV-zY_`q zouVDf9u!7`aunsj(eYEvJAK2i@1aecJUN+xqDOr)Q;@XN34^OgG1Nn&aRlMhVnunv zq{>8=h1{CGSRmqTN}4bne7kVVNQTv-Og-iW(q}U5GJlp~9WE9w(rUKS8fRA)t=QSI zxJElpGC$^rvGFh_=KTubdBW-i>RvjbFsF|>a$8T zwarcz5`e;ygCM~9h)=r8dn@LF-}&$&3?JVL7D=0TR3h?iOa9u|^|BoiM0hW*YuoK# zYpi$@zQEJSCe!9+SxF_{Xp~!Jv?x#OAK?gy!Ev+AzN?HZ_~9Sb(p2jF2;M_~0AYXl z+&aT#Sq~F~`7AFC7isLn&>;1B54+U_A$3`X{U$e-{^qrs%i0FpP3g45j}wb;EZ>jm zeO~)XYOVsxN8gDJ8EyMa2CX?Bv^|%ut*G{WrFlV-_FIL=_5)94;>3rvZhr`~?XOVQ zJ`BdwHv7tYK1#2)!)IZYDn%$fM;mn7>TEJ^}Q2+UYpc z{Kq*!ji{B@T>{MxH37nZ0Js&f+)=B$UG^z-!tW;CD~L4V5~t#@*$3H)13vrHcVzqU zqR-uB2`$=;2EL=|hgQ=+NndfUu|Hus3mNe(~$cHKt8hNk@DlsPO<0FgG9jFw_Qiuq=bAuPm}F07D$@ zGv+t*M8{d2dr-zsbe2;mhlt16g?R>1bvO#u;1mytS9o-EjCQbQP(f!`56|m~Sfd;P z8YfEDFc}M#K0VkeYv#H`dgp6U89QS8TibZstY4grhW68l>@XA11mioJRqE(v2YDC) zV1b?VCA4lHYIBH*#XcrBYS&xfe)eg%!_YNH6h_m?jatTS7#(fLm#{ZBi1Oa$iWr0=<5{B9e3D2f zMp(h26BCHR{vkBn7>7+(a|# z8qXRsPt=x)Ndm1O>B7HasN6xoSyZUwjN|cBJ>~Q>*jS^L|KP@5@W2|4pmG$=|GnGO zi9*x1EyL3Z)4l?(uy?;ebQ2tzH})4o`}zWSOsn=SCo!XX?Nb~}yFXpto%>PU*@2$c zbl%)^K6Ix9B^&MSMnkP&(LG>nbPn<0c;TLok#chUaiTTN!nhKP6~9~IOv9slCU;8RfwWrG4Z(X5(?-Rcg1qwSn9=OnsSxv?XJYm*BaHapvojZ4EH}plF z_YmIRd+)vS;Lg2hx!2k0o5Tr!kln3$>M`1vLi~dV58~TU`5?w2<82#rI3pVkPN3vQ za2Y#|&kV}%El+P_oDLm1S-$n_e?XL^k(dqbFjwf1tkZ5creb~gIZR$Sj(H$X^Ni`$ zwJF-q?wE5YbA!sBuYdU)<>i-NW{!ZSFyA|U{(PA{eyUu1PXi!li!!HAd>s0!>;cEWTLagv&+HkBe_LgURA)I~(nd zSqycH3mKCz00P71!jT1)TAC;SsCoV`i+);KIW9!a4 zzba0IJ5WJmO0XHo>N#!8qq-}6mLX#m#+66nSv=NO*?`)J%9vP3Wult>L4u^nn#rV_ z-@wsCfOKB(WkM@sW{D9Hn(&HtFMczxrXA)H2Jvlr=2IRyaaLNTJ2J^zW9^rUI~R~- zG@oGPU!zPHA0Aqc_0v)-0580+)hDllt@ZmnB{&xO7Xg+03JgwLd7Cf)BypbkCp0At z+*4HTXTN#k>4LWFqv@JVsPJYiC0_$EBo*JeEUeK77GBG*^GDtbpL0#4d?7Pceww~E zs*$wfLcy%=3|d-ETd@zesWW?5eRpM1y#x$$0_)r1G;o%9LK#>6RH zG8^tRR^b4~QQY&#C_y?4JGZ5m9fPFvL?(p+9c~o{oczVLsViC|&byoX722SnTHHX_q?VDhue+%YqE3+8v0E%dwP z7Lh_`?yY1C+)##^n4 z0>jLLy~lg&-h^pC=w2@uo*FG*efGsN4r8pIV|wXknOoy1W7^t)K0qt)aZMbH>oLT8>qf`ItcJ1-bnn{4<__zu%rI@D{fk6w z8iL7gMXNqC-V1ZgfeIW`TZlf?4&&WTWFV?SD>+d<_Tuw6V|U|saHXu?=fpCq(%cJa z1(Ta`!ZVJQ9)hLon7fara|<7Y9hkUfn9gHplz-;=XR!TVD7WwjFg#?;sc@>Dy-!ee zw8c5S4~BEUWdn!hnKFH60jKOfoP>Mv(ddDhYA<)*$DSQwZs3UghVv$T3L&Ok$F%X5 z=GCZnV;ZxIaO$A|+LB`x!#n0jBJ9Xx>}_xlT{-r**%fjpK1b0)JKH#HJMN4|q*Wpzr&cmO=Z6K-3qqo?|On2j`911$<$F)+8Sgx>!p`IQC1J< z;ux(jf#FnTZcm+@q#ZihBgj!#<{ZW%R(RzQ!Q1&yo=2aW``g%xqo~~iXVR*II+-v> zL!yA`Y}V@b_@4mE&TGNM39ByIj5UeoSJ< z3FCVH?D=x#z3af%f+@}O2si8&xC4S&T?hTVNz^MDW#>{SOWw#B-UUnStY0xBX0Ehfl zb_cq;2Fic;U;hu~=fC;0<)trtsyy}SkC&t8&X$(pfieT$#?BoLj#a98j>QnWHAXRW zt~Y3ZA0}`gyZ98LTK5^FE9K7YOvdq*H{UMr{^%#vL)6Km%oEHJu1(TVE90egmmmQ* zRuUD+*;0kU=H}o2yKnyMQmQQgg6A@`D7dmO^flXO{B=P8t|i@=@U=M}S<6=Ep?A!K z4xoG6oGIDfL&)1i0FqbGXUI0h4jIU9lyXTQTv4J6iG8E?B}|xQ<$Ys- z^24Rr#aQM|={k%Dzr|#vQS0VoL6#flmP#}o5LcBMXw(Ue!ek!?S5{7L7l8CXFbl7{ z9`;Kn2@0?6s0*J}Rh&yV(nA%D!aOvXnrdBMtp>6u|K#^KhZ}`#@*jCp`e6*sIWmMRPKF+ z+%(PlWiBm8;A#u$N0Qq=^*3PfFaJ~Chh^B_(4c>~EK{Z6^4vU@uOAN31y13&RO@$6$xM*?8g(|x;a8(h@Cf4;ILITG#ASxCZJ>N% zkmmeOzWUhPXa{SaT8RvNcTR6%0%lB${{fgTnLn9FeVlZm1@HgmB*!Z;2e%}Ld;LTPyuPn#b*$@1vBu`4O=6LWs^lAs? zVyxKj&S`u{*f%eD5f^?cebs)e&c5QPzT+!bXn@8EyXDwNb@!mI1N;pPKKDv5hzvR* z>C6k==og@t(R+mfnUoe7ibWR5c}@sg#=gI>3$TIXsaePoicrEKAQNQFszsZ zz)%_aMeRsM8|8u{7hiqG{5Usnq19Y?u#|O6vs#jrvlqv32u54|00(EZatb2u8hOO1 zrV1*CjE%4J;&abGS6=$$OXZp8p2S385XQQWJ!7cXLvC2GS=1G_9fOk`!+RR@g?kUK zmh0E9lQ&xkN2JuS__O|YI0Ge)~TA{V=Kg^cAnd^a$q z>A^O92*=gg`FmxaU5Vv;M6Y2t;KXUdmEun!tth-{yRB)@A*b6pwP^4!d8^DLabF>%qU+EATd&Oa*~IOHP4&~>>OrPO8r7%YTy17NV%F!NTB zahGuyzmEe17T4}><~GO8nWs+@35$MV2W(q`K;Z{^_WUF^jAF{Rvw{%Jt`vhLrYPcK zk=>A)`}35GhMF;`1M<=uI~0t29q(oCZ{56Ee)QuvvU7Lh5a>YVg+TDb7!7lX91czKFr1q zCv$O{o@@CCVf?|(S zH|%K}W|KW5&=mdQN5Rv1rVXKN^^UxzYeiiz9D{69A*d743>uc_j)IWMTnFVfb{yOK zhelr?CMLdNsTK*V;Qy=L?>uXcVtdC5D>zQGq=};97HgfGR@P$PChP(E)otD zuP{N*qjZ|%#I(_dxfn6IMN97p@}=nj&S%abE>4!E5D{jtQ9@p;`!gCkwv4ALY^<%Hy2XR2V9KM<>h6N| zTBh&9(2^%i1nq6s7Z4hKCa^UUlKol{HQn2+bxmfTw#McLxQe|+b+ z8HEfSRwhOvN8IK`@_opNGs&hOt&9A$F9(1L1DDUFEz<~)zil5+!)WV_?t*OD=`-Qx zy<;HE7&x&jA~-c*(9F#Asjt!22upN=cyp|gfX=DqI{Dka3J~~(#st+t9pTM7^DG^Dj(u@EV234AW(wz&yS{aJBp9?J>y%8l)C%n7?=x7KGW)75VavdhGM zXp5R_AIPA(ZC&RYfw2pO+dg#>l~ydrezm84Yh9s37(q=T)$9p(H|9Pvfjt}oW0bSZ z7v^&UmXQSqM@DdHLop%FJPf86#!q}^>z${jQ9)4qLi{%;ZAm~p^PagV?WSx~Jc2)^ z@+TK%h-d!M^pqEYUU=+_w8Om4$9#BBqFOKChCKAQ?N#Wrff-`rny1pU=*`$kR`T*7 zePO*Rvle05K%Tn9WpHab)Lq-s*!^Iun~JNzA`IY6O|miQ6>eN-S}_e;hw(f_bv2Ln zcpKA)YDbT91qO2NA-1=KRitFrt5WVwZ=q?#AAp7bu?qyZfbn6vhdBjy#^pBtUTZK) zY9b$EDl`j&J^=IQdZs3N59Y_+fb@ItAU16<^|6L!-2qk)e-l5;EbQ|*HmY3}ed4+u z#_sIdBjxzSM46eH&yIkMugWd`%`RNLK-~0K$^ysvibEOoH8k?Vtv0^}6N&*sL5>hn zW?*!*oIZO7p@kh1Opw+wKUr9YktJm5%Far82V3(+cI=;i;Vjy6Y~L{-IUu|z&~SbL zZC5Ldc5(DrFJ~O!?=Uim=~{oekERh;la>)J564IumR&Ubw$F0}2C$7bTFX8=5wi$4 zckeCW?_jhWoiq&%iV(IiS8m@WTqEiC5tMA>5N8GqutTw7*hLs(1uRWzJs?a1>VPrP z>_$!X#S71sQ^z@a6^3Ypu!ucFL}bB6dUR-v@_-*<@1WbYnlO`S^4B&Wlq+xFD;sE^ zU8nkqLq9w;R4!k=TfX?!{~TaD%Z>N`9^V1V8em>xeLuuhq6>4LoA++Oc+Hg4M^1-G zmp5@J#j$m1Mm~lPc(-x7M zCQgrnGX#I=cK|=5O@ywwsfXppyYH8`-gtwvD865Q@VEa!^d*ioW-S;eEb965=gKRu zyj;#cd5JKo=SnB_$^5%>Vq~#?b~Z)_jv;&!dVhV6`DnMi_s(T#0yEM+BUOQXrXA;S zg|Gumy4WL1bfWqB1%%o4^7s=^;tR77p}8F;lDL#{@AMo3e17yT=)rT)b>4F})=-GF z@31p!X*J4 zc1-40yUA@NI;MXL@FSCfH0)&OUjwnT9)QGE^a$aW=%$*$(V$xr7QaIrKKi4i2p{SS}}hsO~NGcN*ij^ zrXQLq9yLn_NZc?v(tDbjkcHyV1~ZVTb48oQtFn;WCGrYQ99O@_ioIw|SLb z5iT)lpVqzP%$7|lwf7X#Vwf(+pKBL6IVmV;|{ic|V$t*SEll=T|jwzC)O=&9t zS*vX{k$u=K-$^EUC4yw)Z$*0QM@YWu|}3<6`2 zjQq9@zyVF#f7T&m8?7Q~g(2nJj>aXU7onJXd?y@!vU|rf`>@$=0BOJ`y-8c*(K@-K z%(Q`W)s)%x*zg03ha;-h?P7~ueaedbCZl4@FrXX*cW$!^EPZ_52<5I<+N^SLo{n!XgbxfoA((^Q`R@v zV$-}0!=d0Y{{Y_s?EhQ0=~EnPWs)3yYFLMdd$D&v_Amc%{M)5eTL2C!JD!L+d)OY= znCdWoL=x4cX70gixqS=fWY;wgO`uUr?#+eaR1dd=c6)o4ov8g`>f{^-oKagb9q1y) z_~S1Qa{vMTa`zxHI&{9s~%zNZWN4a@*3PJ8+9Of0IrPV>0qyOoT|24eCTKThYe>V)% zF|@WGUb~0mD>gR>bc`V)&#a(9T|ofCpBa-L1gF(a4s>!1>Ew%0({sH1-aq~A^1*vI z%AfvU|2w#Y*_lMZgPCg~-v9WJC#}N#Lq9KGc)T3#J6T40kFg_y11yZR;Yl|z9T{VX zrIgnX|4kRtJ@iP&JNfRE892&&9Le0orT z^@IgtBH)|(miU_|v5A|Ven5=PJtRo%OlXWDgp|098aIDuwLApg zL$+|XmoR*HOATUg#okRP6enIMC>OZq#A$3$L+c#CL9VdvwhMD4S8Kdb2UTm=5Yo_* z^3|_=m2IM1<%e(oHK$`iz8!L<3~mO|;zJ2ayDJ7aCjxrP8eC$cB{-RCW+DS(7@-!p zWV#gTgL+^x8QBbk{&MC0DeSlAk=WL93^Gb~U~xo4M{fDiN23_@cMl(wcdz^;?YVaS zT2$xa^45)847A0vJinanh(W?Ajh-EakZ|x=g=s^o4bG|CBi&ulH<>(EIFw`@&+;~H z)R43W*b?&Kfx|;49T$Bjg1dL-*&aBKB(+(7%AmjU_K(YtzW*KiNex3+nVdLHNTZA8 z)ai@F?mY)h>3Pq=5!fcq%-jbLZRG|wIJ*q!0Ztw4#>U(k*CFOaF9W9b;K8*8Jm4qF zzx%iUw)|JW|1ZjS{=dJ76xfChnhU2BzlR-oh=hHFLq3xuw8=uXy~Jem(1|P47Zb2{ ztjEeTFMhI|I&-ef+@0cGl?YrA_LvacdXNZ?;ZY+YO3X0;cP6LP#Jqs^;AOSKEaT~2 zuH;}Pv$LyP-D7~j80m(ea8^s<5LlB_;*tN;(7K{P&KlAYJm3FANy3LS#sF6+a*55yx^= zDbOM)SbTUjIcm$ayeh5LI>b-XEd)s5HEKZmoJvX22ZM4DX(w7`|lL%5T?^Tnh915vYOo~Zf^IKcKg^iD# zwQyOqA5kJMla7*_m+$;3cb%8M;I};-4w;9Q<2IrFT_sM(gwHEp%pl#G)|?fmVo@$Q zb@prwT{2%y7v($~(VF-oawl0X#Yhc-*)W zf!hQgu43T-dOWb|Y`8J%ww;#a+>$L-3Rm@QnN2$xOK##{wYd_3ZOv9Gb%hpfEBC@) zVFfMXL#~=j+zX)Zn*Cb8d1ku2ukAEnEhD+o=i(=GIoxdIqI8)p7P8vMDMP$bhvPsR z@Di@tHx<7rUs%;c(k~U}S|=%klSBKOw#Y0v=d=;>CG!ct-BPhn+!7eZFuji>OgB)= z=@on9gPV!V*Tn*8I3HnA{delI$56FxpgLQn9L6*@Du;03J*iYVp~T7+pQtC$G{o}N_-on+;V>A zrAzUGeeA#BjY`$!1bWIzjTsDyVp;YlXUFfVA$fdd8#W=*eV zw`;Twa^MV&^y2Y)w46QvSUE$Rx8dTJIEBa%Mb1;*7>mpK4FG2+;fx$72dJRg3@@YI z6EL3L<>uv?GCzgw0wGMEe&KYvaEW-H=dgV_#@K>!U46)Bo-ib|ubX&?U4$#@1mCX1 zdN0r8mAi$P@#9aPg|20cWQ4^B-+OrZawve#7PzW%Y8WShp;d@wb|$#SdZ(z&zc z*w6_i#f|d8?T2`9v*iS)^!y$^YP?_Q2S!X`z^$Hb)at7+;d}5q0|)@^@IP(62j%pc zljZpMBwJAnFrzq6a8+1ADJ$K!0B;IP7+u(E)M zD73MMR1+<#8iTB991YEBYVMkCV87*2z|yl070x9KOS1?7514y6QW=lf;gO-z!uu7Z zyF=*3WyW5&tZk2ueZXoKjH_wwStcZrB8eyKK7+5u)p;=YU%$8{uCoGXCYGl+H0nVdY9d8Y?0L`&~3+hIzdu5Z!^+ne;C z4zD}R4~w&FyA}=w4`5 zO+qvvS{y0aRTVPA1B-=&&$_JFwY-Cc#P-B?1h^GcDwE6pDmZ1W1fx|r5-;Wta|4WF z6zDM9oqd0?PkP%O<`qH~R`b|DmZYlF#g=6BGI$kFd`DHueVAt2nMJ?xodIQi`VHNE$(Z- zRybsK1ke7IdCs_@4B5LB1sw>#bv0?!_6wg^1k(o0AJ$vjBD~_=l)_?uX`;ed^QVne zRL3dL%me=Fjd}Ukz@u+e8Jk8mv+b3+56re9c&@Nj{P8dgSM6(QS=w*HZY|Q9 z#)(<(qqB$(;_-U%*|WsW0JoH>gN*ZMC-xr3C^eWqyc|ps{efl>1Y!7HwW}&mhFF-d zUYjc4|GOWuj$&^^i^avov**jpuRNPIWD#MejfqE`0Z^Sk0B>QI;OTf%F2d9^`IdkF zYyVk!mXoaR%uHeH00RRvzu)S1xpu00PqJMwd5$nUI3W<6-Fn3J4osQhm{y3dsI_HhyBHj7;>! zIl{HhQJlzD`O{X^`tZZDSmQy6W-Xbul??D7!av@n&<*?UXe<0R0*ICD)M zL@2~}0LO#P7|{2QJXT(L?&b1VfBx+Dk!gw4=T4JLDh@ zPM^~=nrQPd`tl2oOX!_PyQI;{pdf52aJzMtXb4PzP65tEv6+M*ccFO|ygkrYFX4UY zBn}VIu5G{k;xGPkdG#}|l)r!TFU#%w*P%sc8P))KBPMVKYCqZLrDC^^V#m$C`x^Ao zgEPi?97--eezAPv%bzM2pFB^fD7Kjp?Cvs-Hx@iDoX|ed$uJJ8{r#hi$5+ZPeEI9; zKDI}*3p0ToG%Kq#4Sqfj}YAD8MisIaC&;Gyz!&IDc9fmfUPR*{l*9JY(9d5Lq!B!x>sYK zb+_mcfuRclWT$h1QwG0}L(F^S@=xx9kHIoJIZ>W?@nZSIKl)aA>cT`h1)bksK-k6% z}%R9rhi((jG~%JD-3Ae(IjD1f6Qu7*`=)q>t z=fh|?d-@FjBeO2!FSCmsrM5yma4R@%r>xK;+wJy)JBR+3Hp3KKckP|PZnzK`Wcydz ztoU=BSBB5$R%pw;ufKU@J+|2~N0I)njC!yUJT&nrj?5>H6h3QbS*AF%?Y>VTW)2Me z3jFn*uvmA6v)N8-bp{jG;F#Y{dZom=9*BenoE0y?5idmqpyaCpg*&opQQHVGzCpwa z9dit0H*w-P+T+o3?SreF1cZkpd6=p(3`cQ8D3J|peRPPj51ltdv%*nZ5`_z?F%RIF zPXxQuXbBGtlqlpBw9n$MLtHx=Wi|X7^==UO-G~w{)+0wofs-8o)=k6Hh?jO~LT|rS z?Tiqn=(db==bjG6$`CP04M=W4e2>CaKr-&1p2LU4aTI3?7p+Je7tWn8pW_6m`#0Y# zKlsOYVUk#LSid!TSA(HE>vXh(22*>Ykr7VPB1Ym4M!{P!VDo6Z*J0q*FuI$dK(r5z z^d3GkNq82Hb$#N=bLHspd3L<}AbIS5x4UqhV26tx>@(*lnM5+t(=J2#VDt|@kBoQ2 zP~0!?zKiJvabli(_7Wjch?_VuLM+G=XeTGLd$aQYv-M^%njP7lUu5OJZ@F0abaKp~V z?*6uN=23*YM~;?>6Nk$fyPUglcj=!PC_~2v5pG6M0N4S}T-eUWg#J;4aB+cwaIcMU zPn=ZM$;O7&Rd$tfI+$T^j+_}R-T3kR%F~}jN!cpj`s;s(;K?dHIN3lr?`Nm!0ffW0 z9t6P=R*lA~4>y)2PU3p~;uVg0U1ViXXZ7k1lfyIIBtfX00|?E#I9=<4GBGwrCn~J7 z;zHTZ*GtgXJruSbj-PeDzj>87w)CshUX9^}E72IPNC&~M&f*$v>=idd>cSzN>V!YY z3XrQ)t0;}GLbMt>1}w(7%4&qh3FRAi0!ypioK9zC126gSJ^acRP)2nw_flrkD5-3x zW4@jsd*I~`VT)dW>&^1&>n{hs@Ck--Dya z%NU0VOijIB{_dy$P%d13yZqw9MaCE#E!h?8G0Mt8t|WE!v#ABS>r>BvoN{-{wW~K# zx(NP{7n^?E8fn6#K$^!G+gJNI_+uE4p~M~9D$#}sv)$s%)!Px=rv}D)k;{mm3(m4Z zi4^hxd8V5-Hka{Z_l(13Huk*t(yJI2C~ zgh>)LOo&qEuOJHT(r5((nh*p?c$HA2W%EwPD0xchmK(vs#9YodPF@m@$a7wbkYFa# z*NLP~P6|B=I==DKPHYN33fBO~uiegjCV?a*v6F9|tW3gyqHO=vXuTZd4j2i+iP6c! zNv`gAf#_hk8x7}pCxm_iL`(4cis_oiLnl}###kjuqqR_^ zScj93mu+?x!ikodh))Cu{N?il9M?(7tcgp?r{xP&Go5|lCA<`qyb`VHYL)(%K9Vi(#cj<)pP3IBOrzi= ze6djT#6C&JyfsZN|Cc{Ms3T89RS}rLi!l0E+vebIw39j_bO2$6TX>)&=+pufjNEbM zRYN_9Bks))ZOTX^JkcKts^UIYcgi!Ju0pn_S+C^?y*Q81%BRYRbtkM0FIAqJIIV3q zhj|ceERTBX4&pox9<8hCX41?n4S7{dqiq+3VyTuVfVSPd;ySOABUR+P>HTXec%ctq z7TzXqqvTsSZMF>l^S;(?$yFJ$Zukb1tn_F)>kjgMsc!315LWQ6v?m=@`Vof;tQFQK z&!}`_?M)g93=KFKyj0;^>u>ZA_ZlKPfkmOHjvA04Otj5<^M&-@cQI~dNz6mQT-%t_ z!6lPAu)x}wcaB()9P1CQ_PpYYJWf zSt<%DZ(3p#S5n*}Jd93qZ~;Q)G8XvxyGvyifl|1-akZJ7CK?BG$G+BnV78AGCcgc> zZ-d|`u_JVrBa&yY6ZS=yon21vVpG62mga;BA@3#|1@;i4hHyJM{m412 zj$_Q>;27mZd>?=0Sb6G`r^|NRNIe& zyD|@+0Ia1G;0Q>$vCBq2T_kYaH!f>GN2H#5WU?IMIM+UQqe42s%4=3+I(l_;a^-~; z4gBjjW<5m;JhiTr!>7CPvF|Jw-gu>4dgC&+0{Kt*@TB$?<@}=d#XJ5=to)cAT*Me zV0N7^V5#<$upv%^o4rf?(aY0;#Z$jJ4h(@CcCq58esFvgEyN>V@$2tk+a~V~OfR31 zmKIpia*XNv)``niR(7%8ce#5Pw6wFjs1h$dMD~Jy?o7{=y9k$XNG!s5G(hj}2=>@p z1{?w|^FF*lBjlM!AL9txPnNgdb{DF14#pUi1UE?Tpj1#ZyT%9NtM9NrfN+`^wFIRf zJ3Jg-?O0Kki6D=%DgSYmPo-d1%kwPmDr_)WWykg^p_Vp?gDcOM&#&WJ(vH%c@xWZE z5~!kTn_UTd>-wAJovBOZ4vNw~2Ycu`sLNAZYd;5Auv;Hmx7{9h{oJ$9mCyg$C*Y+W ztbW{P)6FZm6(Reu8`)#(_l$+h{swvY0o-78g*niw&`)iIrRpQpkNYLsTHKStz72K| z&$H`&{w^yHD4WBR$Zquc@)CtI&uRqJ$S^o@7GdmXAi;zCs?1`AYHJhy&nBlehdk(Va;7FOhe5c%= znkgfPaSg()DTiCpzjA2#-Z~1cyg(UpaUM53mg8DF;Y01qWs#lPtDJ^6%<;$LC&y6e zPn4tg9b$6}d=lka{@mGrI6H`6{{GL9NtXcw{-jI^v(&ih3Q=S)XinERVuVh-pg^aP zDdE5&AvBgdu$tKbtz6#g0;G^nDv}^%N{uoZfcMAFh%g5Okl=l$^&r9`$)X%c;QZv5 z4~1$c3I%mb^G_mla41+*%es>@FL-NNPBcxcbCd=;&XY0#AA*$;f<9+2(q}b-^xnHt zAX2@w_R4%MN6Wsxyk;|Rs*-GJql6t_-Nr;{h#{F&1b;LrHQxw$lnv?fFj2nb3ik8@~k~+0+9}60|UR***x9h99(mEySNo`e<+ z{3=c2&~OI4NS$%_051x3!eIS=hL%VtQC3>AJleoB+hbqXdhEaY&S#a5tP?sTuX$1? zPwb1(5BY`FRQap~L=G9F^Z`Ba(s$M;O-2L)M*E`r3|A#8&q-tZ#C!UuK~v@pjDV;> zV7sKxCY>VNHrmUJ3UXky4^<+Y`Ga#I0G_0?PXe=Tu*Q1Twpyo!)w;3+KVOC2GHhOb z@0Z41(&s&;MJb}4rc2+eap#vl4!UcN7Gs}Mus5|%6EMD-^y5S8^a^dMDEUKPEgT|6 zyr(X|kyxBHpK()hMEdN93G5XY&2r5vuE+=-lEHQ<>~U`%hEU^E_|t%-wJ+>%U3Coh zy$#+J-gO@lZ?-u!W7~lHeM>$rkTzMtn|Ts{Q)5I{3#B|#1A(DRnxV$D_zP34*wmHr zhys!Ef+*G+!C1I}hk_lG>1&<@7vy~(Zg6J*RQ!?vT$|RvC>i2`%kR?U`}y*;$rCCc zBT4!xMFdmpgaL8w`$r02h(N#0wo|$E)w}5HQ?Ljw@_D zTwyor0E(o?5$n^fORS2aGroLKyli77+}7oY@^r7&FlE$^W!!l-2y-;YDPl9Y_^cq5 z8)zS|ofs_YAfT1B(O8Uwgbz|6sAUpV9f7wKIxd`8rF+p9g5NIgFb5FA`Vgjm`n~UA zF?=(EkjKYP9KvGHF5NYR_}OWMWsVpgA326a9>IPCU-EsAT1Ek)!ad-f;aJlRVqxxW zV+BpmqWG{PFo)39(G4$cUqI+&6$GI}W00qhYE;Y-?$AdoN2cx_@OawFllA%D;}C;6 zEa2VjbLbm){IViLn55OE&y=lpEV8%-J#qizSpDynWnyJs zeRsB8x_ASjZZ^T)PoFze9(?kNGI{E3867(UeUw=DcTq6zKYXUFap*)R@`!s+bSdh` zHKvPLnIrh>ccN^LBgDCy<1xS+ShklJ5sGIJc2Sh1+dZDQ5e{e*Wx(-i)R3D~H-c9~ z??{i1iB|R*b%Wo*F6aRnq>oi0Je1-oxR7UR zbdm;CG~AP*#aw06o%n@&tX^LRlNc4ZX)|*tSX|cz1t>{LPcadisYs zguoRyBI&UDU^D~S>jCzT98f_gWRS|x)weE{v5~>ji=rYAlR+sn7?o!K-@N#zzgkMe z7eE?AG1_K->0S1lY;>33_}G`&=TXW$hcn!n#gz&-D340^G(YigIH~$P%y5Rb0=7ZXn|wA zF#&oxVLAD_tke`9A_RjB(>H|-8N5QCg1i$UY?;^Ed7Qi{kUu+4#CpXaUS#ns);jWg zWf_Q%|Ca0HHvVcVwd;s3wX)CL*h&PPI^z znBL7FX?zMzsth`nAqSD0W!xwzam(1XCMi4`C(KY35_w=?5ZKN7^<~8XyeK4O6SMf_ zPwn3cUfj4MW4$tS^M}BBuf^0A63cgx#i~U*2f3RT_h1mDc0RATrH!^j>ou<{4#hF= zC0ZwH>r;qIU)X-0`z$b-CU~}AtDr1Q;>o;fuUWC-dk$!5l;Z%mUIj0ig-9p09~BM? zX}3J!(tU}_q6COjX-QbTq#^TE$SeMZt-fxi5mjv9VIU@71BQK+FVND!Q?1*!=%*;I z$&_-<8=@g!3%C_h+b8Xc3mTbcyiT6#ssQZf3ouSQOw*shVt(qfe%oF@xwrY`sr^^`qa8&yt609{N*~e>+FTpIjWTLE7AC$c zPVMW83UY?-@_@RX6nvI3B;08ibSFHGdx;RA;TizZy1be9je1QZCD#1}zH7jc9MUcI`vc74T>(BDxOwi8zffERswlN25LPD- zFYny|Z0+&^j{Fc^aVdmp7y8ieohVopRvZB zii2a%P2uvz;M@QFT6fw8Px689Lrb*7ch+U!hpuR!M_6XuhR%f8GHi#qu`C5w1isLv zJhI{=Z3IT2r(elv>pr-A&LqOvwg0@g2>#Wdce4}=fo#hp_c z6gI~aN$9cAK==~*_9$mh2Gz))HrlQTFu=~NZs6>7Cc773{84%7r$5Ahn0TAK7uRmi zb<(|si%P=%@N8s)NFc@+m9D6>HAWsAE589u8s(1yseaO~1`Oh_+k%C94`I-@N~iL8 z=R9F(!yU|%xwQO`pg6dHVIQHwxSRScThsTz3w}q5vIxBGjOTkz@h}>9j6s6tt-SY zT*EDDiO)6!dW}x@O^scRn<9PS{)ugb1znN12=(XK8sdJHg7p)L zMp*Y?Qu~~+<@j-&x{=zI5JnC{HRGUzbO+oB&0ab}{}>litMn!o>pcV01EY-;&RsS_ z&k=XBcZB0%PvQ=A0s)SAop;=WfUq@&!gcV(C?SPba9J8h*ukppD!`39Wu8;DI*Fn( zad?8wylf&z=$pH_QEuZ}a(nhB?Q(BO7j7EkL|fs|25|ZsE*2Nw;q8Rg;L{Jrp5=TiId9RJYOc#_MlFkIcp43ydLnl(NGXa?hTSF{jy=bLv>l}YC1w2Zp^RHs$Q)2vA?Hq4wRQZlT0G(z#Z7*p z@g%|ek(0tpXovggoOhfDIdzK`UnN)KC-rM#_!%Rv4D@4+n28pb968(VxAnP zQBxUQIa3cqa?bjCT-QmT*V_Tl{S@ zfMnci#98hFwrJxrdj^3q>iq7&NYjIJ1$~MVk9;bg_+R3Pr3+Y`;3RTIBVUVi2e0r( zVdI(1L#A#52ZuPfo?H~{1Z)~{?5Aaboz!auqTFZZY2L>o2`o7nf{DuWyxm|^VR3Sz zU6w1pB6Lt*Xv%UM0*K9_K>JPsqA65bkNNCp%itz>s4xh%uz&EpX%+X4m*q*rw%(FJ(&e1<@qdW_;0zw>!&b?BL%?#$$Q6u{G)+_)Gqv9 ze%47!C@F(y@&fTw1!3qrFjx310JaNyk5Qr0im@bZ8zUa}QH3^zwXBlxOnfNFX$`CG zqdaQ@6^;tWgcKnBKI_sVoc8Q-); z%v0pGOfSpS+UwZz%Gk4^TB-e{W^osx->+V@#rz7}z88N&oF7rk4@@ro?VCzJmhXM7 zN4k$eC+%QzVjR}`$VE`6xm)5TfQfkOEaU42J8bAPtriVx%0pXzz z%96)r9^pN;Q6J-xyv=fJIlu`(rmy*=OIjvi{HBe38kz02SASN%^ZoDO7kxY9SxdM3 zGPICizxW=Lx&q^Y^3i8MR33cn0glncb%YXHpaETbRBGHbEAGQf!IACKm8*m3AN?E^ z+lKV6b$yjcE35dkYuVkwXIpm)1@s(~Epg!gyvDJ+3$wV!aPQdM!Ge9?lPAlEKl4DD zIM-c1^~w8lr0dH+xKtKyW8q|zr&tVcgO40KJf1mf4S!!7D@}^0^aW^fL$cu%C3E?k z!n|{-1|r9LH#9eXVmv~oN84&xXoCmuGu9SxbGd!#8gtTWIrY#nHr2AS01mZ&>Klzm z4gu+zlZXp?_cp%q(AmvPcgw~cVkL7|PtQR4(igvsyG?I-4U4R94O-m$@Xc>GJYmNm zbk515M90~5b%JuVBx5!KMlH_9GKFh8g}3%V8V19leOQi{h(oFiNINS7?zh-OxHtBx zPRyQIwZoC8yUSRSIj}(Q1CL4FM9^88WB$at*^STzXJtHl+83K*+3>nBqs4r$9J>!I z=P5Q&PT=Z;+s!;DRV^DD6N~XG{`m(G-kg71Ic3cKB6GJF%8hH6P&n`eV1=Zw7x9Y2 z2-;Zz=p@?G6bC|FzBXOKeTMnHiP~vhbTj^Qim0OhvzFq_R&SkBY?wy$R9ZXT7Be^N8@4%>RuLbyxpa{_|&8KyMMtI0dZ`bGY>eHpdbDDbv2S#$pF_-!nSc| z^01GggShYz!OQWr!79v^3vZz;-iCfDgZ-c5Y{vI$?O@FFsiCJ8MW73s&}Gf&LAFWR zY>*SkIti37l*jjRV=&^o{>0$R2Feg_;`xO~8sj`7~3rI*#0 z8#g#Y`tp0_$unnJnIj}9JfR);RyRX$V6+&Sc!-s{{<3<9gDO}pCNQ>-;Sz{$rK0%d4<)W`g#*e(9z!U~#<9Or5s}MnwbasR)CRe_O z;qcThW3oqpmw`C}_}vD`?&w#MC*frI9H7J7C2$1N^Z~660gx||eked@oBaip)AZk0 z__y$CwMimtGO=?{>XhL~M7l0jA(GbdL=DxI0@KR<6;7OhYJE)9JlFp%f*n9fu)jQ6 ziwj|lHkVFBqZ_~3!isk@2V!nj zsKlu_5eHtvpL!bKYkk1mpdD@kuX&=(@)<>=0hiTOSjB6D`8LXu)#>^qUGc#;{*lkI zCeG|<>v_LDycbud^Ih`T&g6Am@RqWE`AXWkEXJ)WK6KMru$_aK9q4o4x{9}*cvVvRXCIYs7FCHu2 zfr&chsrqCKS3mA72BvpjSYjjX78b}2Hjg?%$=f79{o8Gm!sikKP?p`XS}!7)bZ|89 z@BR_i@{>%|xGiYyyMFCX`S!nmt<2r94=WB6%9=L0zsd+hjse|AGZAthg`Gxsbp5BV z{@I;UsxJVvsLPWICwUoFnTOm+EJ}4<8)gN={RaAs*Zz0DV~)ZCy~@hK0vkS$oEa*Q zJTt;xfVn6UTeQ!84m7GU27!|~(D17xSUQbSuK)Y*{La_P&_rLka`j3=e(GW(%WXpl zYsY7QlMtSUV_ZWxlP73(-X?~93-EXZacBosY#pIwg_GafaEIt7+ROp`>}fGE+}Cqp zLjM@TD?(UXe7bQV!oBC6m!`{wm#>$Z8w+Iw1r1`O3={~)oHNOudm*+7#Ic1H9=aPH z8&03D&U@g&0){OVBTitt$Ej>>oR(y$*}_q)J9``D+{2ul#LB?3k%9J6{^*m%n#DOF#WR7`%~@bSJSdl~?R@ zH@5B}$m&u86_62*108t$@1y4*E$2=jFN?Y=@pcUXe1XlN!-qxy14RnKHvE*aqM$m@ zYTAS6A0R$_D+2W_{Vz|W-MrH+PJw=dzSS+rRRlMe%Qp>u>YDiupKgZNDgk?vOxbTw2zh#&Y-AEYZU_?LWkd z35wJjdtz4c$Il7^c#sb(eEaEYzQlA6x7oDs`2(Jx(7~ROE>_!yas5KeE$_W~qZ~PX1o;3Y z(Y3Nf<%b?zriM9D@7kps<<6aH4!7VKYm5PgTwO+~+~IVv;ZZjEpMD$#qn&6%FDHEP z#F69W7*T}Y{mGl>^xx99}v>rTs{Ds7j z1u-(sFlLyeer{UpTtYRB6re!@3MXOJAuK06nQJaElDO)$LZXQeJJ@!gfSnynYbFZo z0DJsK$)GNU@VIO6TxCEjdUn!tZ#rH#2(C_?Vk%__Ge1s_#$mROD%@7#&2-TkDL~~y zK}%sh;ZH?41SF@81(UK=Qk>i)%vdh@oJ^{K2;ngBBw8nS2~aD5@H$yiOw!8ebqUfk zxk(zMObBH*&7f@?=Fes+@QEi~T5^^&IBLS4e&Bryl?!-F+Nyk21_G#g&12qGA;)|f zKz#SI|5SLyi%Wo(QwIaN^GiD3ry%~ROyppa`k+y6BM)VYYa=4qMlJpJLH>lLQK#R< zr+Bt~UeyBMfJu6>ZgEDQ;LftmM_TF0@++Kz2cAS2SA=9fv)PvCW=QBWA*1bnx12l= z8v=I9^Ocvl39fixGRqFV^Uiz`MjEih>McyPp$bEl=9=&dqh(cPwSfmfvOMV{{bjwv zZe`SIn-r#Oe;x3Z)*NdT1>BbEMS1VviFcCAxtJtui{q}DBH5C|=fFtu)?ptAU+~Ij z;3l0{Xu5&_3M=n{*(-S(wR`UvwJoutST{dT$hB)O}n+RpJEA2E-URG>D=)i=KL^}-cDa_X9psvZdr&2J) zgrwUKFr-hcfW=~;1+SD(&VCetPAxW+(T$Z!SD;Q;1i05|p+ex^CO5n)Tq^Lo>Q@y+ zaar4DTcZdFZ2s&gU=jzkGdL0iFW#Fr4WTj8BULrhR{Aqvn$<|pTfT~un%DmJ@0ZJ! z?&QHjRG|`v28~irp2__3%RlQ779LoRa6}QYulXLDqAVxKW*Zb1LNAQBx_Tzv(x?9C zo&6J9A%DgerCVX(rY>Eb>i0^A@-8m8J|cWD!Ed4n>W*nhW?|K0t>3ar#Q2E;Hk*!O*(~Hk&|?+Au!{MEm0xe^#d6T`D);xXXsr1$^xf1E;tV zq4F;*xGwc72tg;(;Q|)kJx=i2MUvjcS10wMm^xo|AdIFS+G=14kNZ7;|G9GigQswd z8AZ@vD<|1AF>(;=A_9~fmJRdh%0U}p2#=f?;uzJbGCO6&8iWw$EESUUqly`pN$K6y z4vlPGxVsIGA)G+p*Kgh^%Uf(n1Xt$qSl@PB2`uB@!VDHv`VU%hNV@l7jiZD|#(U#( z;mViBjvWkUi$tB-1(#zJ950N(-$m?om8%V&EleZC0IOD06$9ha_h9+Fcaw-Y2yUm& z#pR|j%^R4W#db5HOng~B^lvN3E)2<4Zs~rA04=5kXa-wk4*X{6A zK<7JrgBR7a0!3ng;12YKK|EAwCJrCQ4IAZx)i}fY8Ym#EXTTxjI5kGD1EoG z$7BmZTyCcPP*I=bR8ywq<_+7JwTgt8AA!FXQ^< z8CL9|Zw{05oDda3l@od8m5bLoE8sRVwJT%@?z7ZOIYUGP+rbU(_(P8~CilzRuf593 z)(m5KkTVHR#|Y5E5$u!f6*)CI8D)NxajM*EVztTuwExHd$=JBf_x`1si4_l#$7kPhFKH8J^UEoOaS);1rlT>8i=rNN2Dv^WN zrxJ0Lj7FbI2u`TEY&=)}S_hN&QGWRJZ51I>dR6w)&4E7f+kRn=NI^1};{q#fi9k$W z+s-D_5GE}Y3Vu!Nq7XPO*tc$=QjDwCQ1DJ4Xn9I+ix0k}EFaiH`ZI9y&k3wzthU{D zRz*(ed=ub* z!#mm~eECx07FPo7yQHJ6CM*=d(@OVQ`LF?TBdK)mcp?e?M{eq=bvN3U_KRD-^Ot=X zR8;#w*i$X}X$@`lvpK#ZjFX@_cA90_PVa436g$$Gwuw{wQmp#Fwx`ls%4R$ZZ=UmM z&qx=JSNlHEk_5^w5oYhXs*uh&^=51s@t_(;R%5v_z`mOb`Ws&tJE_-7t4o0&I^5N@)mC zu#;yhJj$~iSPGe1fUVOuiR&o1bXEj?QdU8n9QI-G3kGRqR(TK#q>IqCxCU6?S(cyT zKRwTPGK;cYwH$B7rHOqlwDr?BS{cQ)Fl!{xk|r$jZuz-9EoYq2KKs3~gO zEEm$`Hg`$OHy%PW=5CZL4b`^z)^WpI1qz+%=?|3!d9(iV`n7iB=d2LZ%ceVd^wQ!y zQJOe(h!B%?w9Tn?RKgB>S+ddSgtzx86d z`pSG+zKN?4ZZ&I*^d-yhTJqh{sfEs!*mdiLUVAu@2~sJa{o-@w*MIlZW#sq((LRii zi50VjiJF+-tbV}H=Hg6_7%cV?z4e;g-Awjjz2>9%h91xHqVY=1zn2eSR5=Pe~V=qL&?fymgz~q?+`fgyzlHFk`dRO^5Ly=N@LUex(!ESK z7sqSmWxe1}zP^r&#^U^9S)AsWVpcu2oRe79koOND%nfj0S6AquwOoB?k+7QgLf4Kd z>C(fs*4dj9i$0cM@iKY(D8Bd5wNy>4d`E^ta(w9-2MAZdD4i>ZzO zRBSymd3g>uB+hGag+pQ3apSnUH#1jm-@RL=@7yl4_@W;=GDi4KA~vy6)IAl$xYkUZ zp>K}%;j#!{q|TRL`#A~{3JJW|y&_wSSiw16w;dj*ulkWQM>tg$A9uu-HeIODq!gU9 zNgYL6n&@C7vp{EFWE^?b`IWObu&D2HR);Qtod~6IYXoishUCa?=t6l@P?znI7}G}bMQG=_FnjN2 z70dCxlS5iU*}hkBcA$GH0Ju5WbWT=Iq@MB?>$LgE=m1vOkT9!&SSLy0mH50Qd?Uoz zMwJ$Yh5AkcZ73R~O;jM#+y3;8xR4n62^&C4(GnS9k|$XfBfW4$*aQw;d4#VSyIrAz zx0F-UnKsLi!plRS*QH9zbFk26iMmM}>1WcU@2tyuX`5+WRk3_=Y8yjGRG@H?KVff_ zuce4LeAjZ~!}=%g?O1VRtY0mxidD9n4}Vji_*Zbyf6+dwShi(cN#q^n@F!flXNkuu zYS_*y0H;0RUmsytE`-B2i5G~0i9;=4T2uJ9f9=;=k7byjVy%Ur`aEu7ysN^f?dKhT zGSUdcz#^5IKdVSB2mbBBzxwy3G~xmXgNn1MA_S&f)7J0eTL4odX}v5fuv2#W4A`|$ zH{JGvPtqAaS9}Y%G@r6~9=hdw6nft4x@SA3W3RL={YR~QsdV6d{qwu<_}wx?6XeTt z+U|EJacRA&P^3`sAWmWcuwdXO7kyyYj!!LY)}=!2v&yfilV?(QH73-_ z)OIO&OTY22wp{Z<6K!Sezn*!4m#$N*jas!!2qM9C^FE}PKGt;mSnoGv4G&`~HClp% z*@0v!@p&Y92a;5~wnmWTnz&``BZ)X*(ez1wR1L39dLml`Y;rUN_K zfZ@@Z!vx;%;bfyld>OX(FgF{2lA;e__SNjTi9{xhhFUpw>J&_}10NI|*^w|MApJC4 z9@s+UchYc)pmS)a+|Mq#fzbo(%H-57d{A1DcAL)2#tl^qowZ`p{GOKJ^i%X0ZI~$3Ipk4;@N?awYZ- zCfnYQ-Ewe@&`OitgghZ63C_;L?0W7VsJqx>LIJJ^9(#bDFVuj|kDg0z` z7*@J+B2fbr?hgEQ6#8^J*W|CX=5FX{y6yOeVxpWFJIu0KPq}^bR++s$m*DsPIPPz= zY~s?{J`$EZ*@??( zWrlmv;^~B$$+NMM#bM&k=Li?jb~8ky+JgR9JG8yM2i~KV54e!S?7!+@F98VE|f{MTG~# z=j1I1Fi$v%dnlI6UoBgVW3)(dF3&Knf4U1;hdl1c5n}2-{vqxUlt1``KSY~EB4>F7 zXlVmNm75P32RGinTDI7sy?BqMEJ8!~^aCeL2W@C6T5+}s6SYZ5B6rT`a1nMq51r~S zU;o-S%Y$dnm8(-O#q5xuutPVE+q+(loSPsXGH?@{RGN@=_Tp1@FaBs^R{@h{Ppkl1 zZ#I$Yzx0*gFJJ!rmpCH4qujf4xxD!OZUldk4EF+A0|e zpWz2GiFyy?3Z>fdz}GBK#^;1$J}>K)xmCwKa?l;2Nve0yy+||sul&X9tlktx)dFDn0aiYKy zT!3R${_2P?{>)uV76H*B`5vuVX!1yiigqaR_Un*CQc;DgNei8qcV;aixz4Y_G$U^kX72M;(txgNuPHu#(9{-e zyZG^^3bW825C)3it>&q`qT<-}rr}$CS;1~VU*RW7<2~i)p6901<}AD6M|kafg>G|+ zQ!kzZQx%l$5BWgSG^?{9eob!_13O*(h(~dxrfX6IZYy-7r{vuL!ha7N0|q(ky-Pt6 z6QtugK0pYBUTW}am_5CHz`!KJTCJK%vo*CbmA}=P$#uL4OX!48$2y&!I)gXA18+22 zV7J|lo0xg=PtnvR4&fnPECC#ugIWB`J|j1tjJ@OA6z=-(}89>!F6Q1f6T&AOGZ;#OQnDNAH!ni-afP2-to6CB#hapL*Y%%WS+TQtM0LAag;cIyX7Y@zf@lT`P=24%a@|e{O#ZU zeR=K0H_DwW9IJZccDeq}6g!cZBH-yuWSfQ#x0k~wd&|*tgaSD~N=OiV9r_57v%sbS zoW^?znbHG&4^6P_g^7F@-v#{?6z&vOck#Cv9O(z&_=w=ttjW6v->mx|I|ts{QJC;! zz`TC(jkoE8T@+;fty&Po=hy>W4+OkZ6sfBf}-U4G-qkC!n38|UDFE7K?}cV`feIj))dMhK&_y0(-s zP(3~Ugdf6D8v42X<`qo8(;S(M0t#$UKIF4o`o~#bA;7-NUmKXym2p%!oX0-)v0o|A zK7D_A=f>Z2;uFU?L$@mn?%Xy`CJHN}NC&)Pn@s|qv^7l3VNJc8;9WXy&LNR4s4?;q ze5J3C9ka|aI)`J3<%Bkz{;A)wv5GSH`7eGE<>f^Avp@Tv%e5QR8I!$(gezmoY)gML z#+b5@Ib7n=M`TUKgnGN9lybx|8$%S@ds%vFL9pIIU|uFhr^`Apea5A{mg9hzR`I7A zEKhvsNgUDfCvp(yS7?Z{`OpI_{q&c6S8taAjy{g{1{*tP1dvWyOdiCzKw%OtdAmDv z{Zuj6Kkwc&n*bKT9|YLVxYknVNo<<18w@D&mz!3$Idx4Zd^dJgUdQqRyU&HG875A7 zOU0vuz9b$JOx9hw$Y}IQ^;vH#fBeUPg2H{010)WY=RW*+nVNlzSidM6;6XUU7uk8g zhUtC_|E?{RLzkllIryRjniV}BT%c&@aodKK>V?nrA8q5Xj&tS3m%dwG{Nc|z9D&U` z%suj!+c^C%a}dYi5qJ<|-~Ac#Q3$+r4z!iKS6EV5R_Us;q-D!PU6y>~wV#y#_&0xB z{^D=wyUireOo-RN9!FO09x<`5J%H{;MGlw54-~8?0Du*V<%d0sOfoowA{JFQiK{zj+2=z~Lpf#XR+kGD>D2T2}fu1ZB;-!ol z8xutbM%Uq~ACx29bwYIl=MQB-flJFnaBK}U-ie|q7>fzR4Oq9&RhYV97UuV%f_NwO zI+=(E8{WnF?Y3Wh(X6yPLR``dhxx!?zB3>w*9?X@sZ7BR5_d3N`colF_*{>a!8v$q zaCODeI>0Qze#?Z{ppBFv5sB>l*3SrTz$_iu9(yRlI-d%sT3pSLeuW|0fA+P_vv2JS z+rVSs$wOcGHSfi-LTnXu?I+XK^x~m11mHDq<6YAn9oplub&Ky@Sa*05IKqq@bw)W$ z8KiCWFLz-=fj4!D-(bgo!DEy~g)M#poBw^sSV(+CX#RZ|gFmXTphghVqTs|}9SWG< z2Omk+NXUJJCO*BIZ6#CgorJthV>!+7>@!Pb=Cop27k!`G9Y!8#7J3Xsq!DgdlK^W6aQ z@e#wwVLZuLkoat2^3UcQW&s6GfpdJ-We8UrP#B;y@zung(uV?ka2PxZ573KK;14eN z^zyrxW5Ub4I_PP}(qQfBZ7 z{)PsK&xsV#M;~+&DSVpUa$3sVq1dB<#=5+RLR=w4&h@KPSV3`Yr{AN%TP@DG!-x7f zR+b>%!pZM0{wT+Y>uEr6m(JFRzqg35iE~sdmeV~#%Y5ZuK3o3uzxommwq0eG4F^B@ z?%U077~Bq=heyjaK0?>tW!EBs%kQ#)sOCegnFEow-B-va16_` zfr-^%^6uz0`27g6P5XurqL22M$+IKn-ZDG1SMmQqpmOD97nofsNIe`E+=ZoD#(#iS z1Pv$}PYxf!1WlVefup+*MVp`*`Xw#iGyG9C9%jyAE?uVozW=kIl*`2XY^Q%$aLS*# z&B_X4QT7n9jvW42`SNFfx12nAniUW6GZ@(W@f#T#9WK|{_4tVoeX)EFpNaqbum8_- zM4u(3=gZTx)TvT~4;VYb@6F=}Wo%2V`&;;AwGjHn0Qt_T?uK1K5Z%ImX@^Z7ZN^zd z2^hedK6HeYAguX@>RFn_9|*kFaG4mWd=oSg^)qa5INm!7=NWDoaOJ`SYzkv z8h%>!;DBL-?N(xkZeT!^MTO5J14t+t&-k`TM~>Sbe1Wv28;4v)#8o8?ukx5T-+Z(D z{kOi&PUyXIaCC$fp>h0}h;Bf<&TM>ymK>kbsvK9}9FND%$zzPqj9v_xiwpB*WO9t% zu7o_CXO}tF=>>d^7@gq*%1+90zxm~_l%tc!%hc^DV6Q7|@(cylXFvT+dF+`-P*^Xs zs(`$Qa_GC=ZQv!aW#l8tn`Y_{<#*@rz(6`TW_5GuMmu9&#c&>4+``8?yiXYfveHLQ zE@o`hWvuku90;J&vZp@~Wr#nO1yfa53s!8P?bwwVvmLGV)V+QDgxC^O{_qd}Im+Q+ z`Hz41cjXfwe;!($DnGvbt+GDX$EJ?D3s@dLgYu{xw}~5wS3Ae^s+9I{^mPxXz_}XJ z#)_Ple`A=Ao#`k~ef(ItGc{N4T*Qab)#G+f5rlE{K(B*ihsx>u?k^wt!5x;KsxN@6 zHT&Ie%Avnt9CAT@qg}n5xr)+?Q@>4=UgX!ujvZk&X9`2bOhVRu<;$NfPdB@y}=3X!Pk%e+H%TdU@%GKVpaQ*yIZe zZ4uUKs4?#hRO_x1Cpa~=2!}8inY03$&Y=qbPF`|4;Bt)Xlg1>#gc61)v@``Kpb#TD z${4+i?*w_B5I)%9Nu!x0Bn$@wrt$&6$WeVwR8fR-5rK<=ViU|O;%$#Oa$st$(@L*3A`>VTI#@HLd}mt9 ztz|P2LX@%SkvbYL3PXil=$Vj`*E+CHVFl5mVS5!mw9r?)VX8aZ5JLd@nv4WHp>8bhcR!#D(Ow>H^YC# z;>#B&AUns^CoP4hu@8M55Pe3u??1N=%GDP~JVy|PtrDld(i-MG2F3-7w>2-z2O8@JUw| zDsDolQw^ec~w0#2HWr-tw>zp!%BS;hdCHK{$qyILys=vLuhJ7svlfXYHY zmK?*sSik)x?%Wh0K6+UNXqOhbH^heUq>a|W*wPABmB|K7X@9lo2&2$Q7lD&6Rq&>7 zs0(oNJaof5CqL6wocbWYDIoDxAC7AIeJ8F~x?>Gsg&f5V<<6}sRt2s>s|bbT2Q!APz#~rA2@TW2(s_&yJkyxb zgAhA-Xb}1V6kxOMp=E@sQLK3*xY4LcT)(o4@P|`*StS5{Z~5%+JX${YYfk{rG8Xx_ z%K!M^eY;FuAPi9%Mvx?|2ott$M}gZ$rmgg&G!35Qw-txbE8}3D(~-2;S9*eWl=vva za8_HK#I(DLK(W|fuD!WfUi-}{nGA+3@#ugZ#(Wgi~-!8dPcEQ zE89IAHIgaP1-(j2{{((>FK`>^iO_xX-lp66=zn>%GH>W5j@6Mp)L( z$a&5cxbP^@<6PH6Se4fO;qv8=f1~{4&t5N6S7*!7iIe5Y^G^~QYN`D6)gPDJQ&Ux$ z!J&To)*>OkhRbg~|JCyIpMSUf<9h_rXC7KX$e&`zbSFDK-Kn{Y@T}$5P(mA6l~>vD zvBTE#ZdL<(vGBWlvkL)SSEVJKt2bAf2eCkJ?5#ur9p&_?eq5L~ba`SUL|aRD6sL6- zJ{CCWV45S9TiC_kiy%LCm}80&^-O`RZ`o?+NIVy@(<^xyy zHTGeogaZB0JJ96&F|M?{EzE8sUfBB(uG#9HP6N#+G)R+JU=+ z-OwtPD2P0cn^-Tjp_^J{K4li>F0Gr6F~_khCTr~M|Jv97WjS*67@LIN$-4|kSfS%u zo-Du8b3;Dp%7WujI{3yv{b%JAn{Iya!|#_b{l>2ma%rgiY*f42;Gd@@H6LH`P1(Dewn*QJ6IL$Kp|*>ryO8&N#DWt^3hM7WwmCtTz+K%g$36; zT;Pm_+QO=YD`@=(hs*E%{y$;&aC^CW>jFKYYyzzC1k2YjK6lN7LGXQ|*~&t0hVo>& z$De$>OpK4Q1NeIR#3!FEAARbHvcIuZ{_3y4Rk|>EOiYe4pX`_S{_zG%-b{J)!{^JH zrw^CQvlq&nZ(S^>j-4-``phR;4H!TAf|F_|o$I8<%oqVgOQwO6WpH7bBAf3rW*JzP zSOCafPnmc?sBw_|k=cYgp-@lHP++nJ5;%pWadpCgX!r?pBCV5^0-k5P>nFAi(Q8T1 zghc)bmXr};fD+x|=IoKOpu-5dmJf^$8YlUxajG1+?1V)o6FrHk$LtF0IoSdjWj>Tr ze}%_ye8w3$kGW4itpf@!Oj;4bgn)}W?5JviVp^pWJygJDE(eHk#|kelxL42a63@Z8f!Qh(m@k5s~41>lN%1!eK?U`l_8YXxE8 zh;uf5FOx|qBl3f$?COblL0MHvpyc$QI1z=i4?Yrdb=Cho-LZA@gRq=y>z<2Yd9hB8cnYjli;??BU!tF2=D^*Yn{V7r8k^kyD-yaZm zCgt2yM#ZczROn!^;z#Z&Y*s*48H{lFEOj>8?DwYNEHmM&#e^o3o+rYkkgp&Xm>cc2 z9lA33toC6O#%7xpwxy9OC~=$b0uX#b-!ee&g){i&nV0v?e#v{E^Gq0kE;JTlu#qo~ zOitTpS+zW2^UCUz&jo@j!XoW>zfHWOQzOJtu^Nl^KZLJRf(oI#utw`Fm55$3ZtZl3~Fkg1xi97gKnxDdqjVRq$g~%#gO74wt zU+GHP?`QX!KHM(NJ1NG!OkTnG>BJ)4;@U0a-(}@G!PrKGSMaTn8D&JcLT^+e4yY_V zgUXxlZDA%eK5erWadE9e(;N$X@U65_)uqwi+91n?4yjYTiNi!V;FIe3uMz-WMhpUrRi_Dg8m@sn~v1R2tI4P2`;d4)cCXHg~=_Z7#MQ}NjB!}LqE8`4=1 zb!uI!)mYar(VT@)ts@Eygh#r1>&lou^d28Pht*QSa+(RbpD-j(KZR?;?%mQkjw=ti z-@w%YmClfivHo|B_N3j;Z_IVP_*oQ?e{*z68>piA8wb| zUb$4>{l!fAU;n2cvm+HDsEy-Z;Xwv89~{8%o`WJZlBvAMZIDkd=PL2m2F$k8)hY#D zSD#cSyV;%UuJ!oLyE73!@zh72E>Au45v;i<%Hg9AmM8A}NI8Gx{_-X#d%a8G@0+;) zbYK}je(qQ~bbg5NMiW@}2NLrUIfu3}HzVA4wQzJWf!kMR59E}rxvTh~&qI?c1GxU| z5M(`yVpKR`ljnhpzVY&#pZR<_bM|C;?e!lKmgE+eYlQlxyYz!A1e|uYyFyqZ;tVb< zU={V`E021H&m;U8M1F(!J^lM-a0o#ji@!YIO>P_dv#-L(5r(&yP?{me?m@;V%0lmdHd22%DXo& z6FYG+G@*5R1wrWiiATyIcK80p_x=Lw{%YxG%o<|m`g@n~!JZGjhhCw}PPS&UenEX0 zW%k&;Ow^7wf`9^jg@V3RM$r{XJn!zP6u7Sh-a)9DK3s%!t6E`)`3e^FZj5~0oxQk= zY~!}IiV(hBZe3wlI|OO_+%3yUqde#afmS!Hbwb1_f5;;o!^%C74RBTilm^FLedSS( zbhk2izVzkz&72K93YGGKnX7~aLQ&~pg=3smw1#99K9D4bXi*af?}OC}$r>IUnjOQ*Y0s^wQ^T?47y1j50XF4)}v{zgu6M zFYjHy$jwXcOe3Z3k-Uqli8(TlH^S1w*CPd;})D`l)cp}@A%7r*k)Dt7Ge(3x|>mE;`heHpuk6&PB0Oyz+@F% zoLD3Y7y_0Z%gJs%h{8t34Z;cnkMf1n0R&{Z0C=sE5lc8}7+2}A z3^HI;snH^9XVyVO8r!Du%yaRtpz9@0tUyB7idA8tA9(C$M48O;G8P6N_&sHlM`eOq z1sXWLua{alQ$77z6*fM&Y9D8XhkUk9cQ1wgeUt$~u$|(e!XsR@O!1cdPWHCV`ZBS? zn1T~vuta|A2~5%t499wS6`_R`G8#j)q_3!}?kx$$<(&$HqCon>$(8mxq-8FaVP{0( zt>}uNW}ON&X@K=ZC&4q%RCMX~v`3uFh;&5}(0bSa@YuI`()?H9ZDhjKY%lp!hW(vBnpdUWw9Pg%ZV=QHHjHPf!#u*qb5Na?E58e) zmK6I@0o-vVY~t5&KYT&ymo5;>;AK6={cOOju<2MC>}|OGKs#M9d>OKJD4-hvx>a`| zEYsmtJWnVM;Sevh!SNNP1sHWT308wEX}R_pH~iXLjlSTH)>iy9@!(t9QO94+=Y6FO zo>Q{85T8Dm-&XXByE?H~eAcv4f@LCP5I+i}!J&BLX;TqUQL#*UgTAz;5rOpvF7-&fG@13RRZdjtfB-Cq!*AE}d}u4sMl` z=Wv0*?tf^qul)XReYQOO@Hqa`3*{$2zFOXUW4pZigK2#4*<6WnK$VTMa=BJEf3ooTe_BfQ1wbg1PkiMo zpD$niwNG(e=?$#gZ?kdu-SXCjH_K~pyuOhwy|8$ex>u?N|B(h1&UNjuWHS zagXUBPUjI6l*gW%Bs>i+t5`%m4Qjy6)a?qr2!M>SVT7T1@Vqd?oP_0c>?lGGLR#;j z)>ITZRy5o+?BO&T9YJa=s#mUDD{sAi83ktpOFWi(EXVI$|8BXlJ`SxT9T#erU?8gS{c>mxO2` z+>3%Vp^@xYPpjGh_Zo$C(a>;aywS3S-~Svq#-cqmi~xI>IUF~vEjBL8^NjoG-hsVc z@Vkx@i)DU`4aLSUwM`Vw%v^W+f#ua1lpI!60D5i3{S5L4#uhLJE|iAq ziVohukNXdXS;goi)Xv1Qk#hVrf$}fE#wmld(91#;0co`2I-@o7g5%N4zBT04dzY@3 z=bn4IthD{C++BLTymb2q>0@bA*Tean?d9Omk@DdWlZKeY9kkOvX`UJz0}wrF4YN0K zC0pfChUZ7hnKQ@ArPpy&ydbvWU4;I*OV}sJOh0aF&M8D9K$yp^X9{6{j}4T57@DM?EEqK40MFD zJVZa07P_Oui6HFD+06bHa@K2Gu#4g;~>i zKDUuFD>vJz|7vaLjq+K91ATcRN07@*h~S~t>mbhQDWtVion+J+opiK9Wcmwr*wGLt z9b@OHc^WX9;y@b2OHC$=pPF+J4>Dl@kAp|}6i}?g$-g(g5GF5RKJbHQ@%6N9?wtT#N%4zHj(`iRpO)tmnQ(5? zUa%P=w;Y8x{mx>=B&}_1rSMqkVGOa_gHLgm&AhbJ%cavO4O$ID;o#8B4oW@(Bolgs za$u8hWT~36w2(F1Xi7KY#r*}i+YxpEi83h%0EO8C(-r~d-g0V`74CgzwJJlB7^y%eD~OMZVNozmnRmYU^NJl1*zpImvDRR!won zEf; z+vc&4#J#TsrQSQiHmqF9!+5tpEYCE7!!)#mJ~ha-1=JyCS(aphl`akX4-mM~X@Md_ zh5`Q(?7L#!7Y0uNV`Cqmt`I0l8|$yX8zzk_kiog-je~;&Ot7p7P?0<-zhBP-@0j=g$t*U_*@u+Q|{%J_NVWoU{GQNc*=HcI>AMlrV_!bec&hUPwiqH zy0mcH0~cuWOTlSj0(Wdzs z*Eikhi3`x&%w2>U4JKHevFZ=@kC#5&X0!|&lFcFSM#y&d6pZ0kqcKK)xsibMEgZEw zfZuxu$I`ZAJ?$#5zxI0ho4^G>VYvf0XRvb zwa_u7@u3}ha|L4;x*Qnd=w00+h*hbN_YQjxj4E<~V{s4kabze0*pI&d|CWnaIko7< zttdK1t@-fNPsJbnhyUrP^j0E$N!!Xd)?7O)(}|N7&Pe=!361{*QoyL+8u zSoLS$LQvGbNy>9I5cefklIFqD78YwQ(Z&eYePf77L5?Y{_sdJtf_o^4AIqHV=}R8j z>!Au0$8?W!wU8r!*=MrK{*gVD8)0XUE|zqRc%CQ)-%z;DK1*tMW$3^BlmEQ*!(-lg z=Msj7>SE|ztZ^a{h+GYa#?GENleusH7Op#N05*+7s1xG_&0sS&^tSpr>OPz>bC%59@Ws*)e{^mUdMUBqw;a<1-%o^z5G{!;7U zaUED6gbqD8!ef>XAaon?XpKlsdswSmX>$h}Zg1ZR?o{JgvJaM1=g*X*XC})ydl*KE z?9|0x5REr?=WY`#^;Vg_eY?!e+zXG1VFbP<$8*)qL#3K>f;@{+N=(mtUB->F=O*y=pj18n*b`;)=#yx)X$D$=DqPT8LE)6d3Am?Yi+}2ZnBA zU3$%AV`tWRg8Wp%cPn#nijXX;bN{}eE|w+R zL@iFx3fBrX!s}@xuFPl|@mY-^U{X*0SPu>q_{@`KC7ugVCT#A-t!>x(rv|Mra{#8y zrU0wIq2;+OnMusL`CgR@1$l+$iti|j;3yYqDwO^QKYZsmZK%us2uGQ`r3LUxwuBo5 zpK$EJA#B1gU0G+8l1BNkxRkE~6ako8octn8kiQC3QWkw5Izk~*b>@kE8>N&oW7)R4 z0L@$aq6NV%ZKO{X(41^&HsydvYc)+uwlr&yR=MLZifmxyxeeggezzT__JM=nzKd|h zJKH7gr!AzAE^5CEJLS^vGS+%Q1GLAon}4-Gm4g~^G*a3oVG2EGQjpdxTlCg`BSBKz zB5{#C8Asw=TC*K>j5dn3(7ekk4i7EU{t!v&>*P;~soVDv#49?4Dnh*Iq3|m7c`x3` z>#dhoYU>wg;=ywAoOF(vJSTk^c+viJ+M%QNu z52j7uG3h(DY;&ar`VXx)bV`~kSl90++L0$T+M7P4#^A-Wp-;aUdpZ22K^sk6HCGzy zxDY4ymo!f%`IT*;9C=tug&-oVHt?6TA4a1%4B4TSL>=%z7(*M+2&34b4pp&OmoHdon9Imf{YSU`z>GjtGP>}Y2hJ>_Xe2%fHlg3sO| z4!eL3h7m9h;?i;J)(s*R-6|h_`pNRfdp|F?76=#*zQvoq(+VhBygT%vM?l%!Xa(;p zSg6@V%RWh0#kz2h7#i}VC5$!*`K5y+P7&6-@%iauzry6``EqRREMtq^6l{!q@F6z! z;=-{szguohU5A%)I+V(|2Xevg4r=x`6c{@U);*c(A}W*5#$S72GW5 z%FOlEGB?G3j=MxSLMZRT5C7)%#j=XK#^sBb$~za{MkwM4VghO$;bf)fpZ`?(?XP~l ze3P(^U-|NHmUpkdo51e2dmW3vO5+A$Koz_;SasS%cx%JTKF~jaP^EB+LV&T(p!=Ix ztk-ctTSw^c@w6y5N9$p+z6hL@;i`fgT0Kq5)58|92D@r9h|+P`RU*jfi3iRWBa{8( z>lY9TF?h1SW0VcqqlCNdAq1%_70yS>R9i%~F;t|kUwg5d!=s=b$D$``3D@KIJy4!Q zn4g)R3J)?=rxyHaEbl7_^}01Jvci*jm~pYiicL2LldCtbap=YrbTbrs+aNlPG_Fk8 z$#@+^k?n?#3`9S`%E!BJ;Kv>Vtx`|j+cI*PW885?k@lV2jDqD}f-1A2cq1G;_LbT0 zu;KpVyO&^9glOF$f>sYFf(;)<>6u_GALMfcIvT3{0LHM%CI)S*9xbk4d|ati{s`;q zAqB0t@NBcG*wfJ9BGBp@JQO9DJQ{#BvI>V`NM%=gUl@ohp7|)SagfI~jtD+=fBE8Xe4)&tl&`XPX_aVQTWrK19X-UDqd!s3-6z!9Tltpo zZZCtU%P4&C;+*>B@!2T4UF_xQ9_TEyi?_<1dw0q&e*7-9!6{`o@3Jbz=3{~(sFZK- z5P}pJ!*xy?JiwtGH!d!f>lYW0?{&+9k*d8MVBp!&)E?o=Nuwzga2uabpv#Wpf$y}eW`g40C8b#LX~#ZwCKqrxb1G0osPLVI@oC8lweF%;P~C<~(5dKj z(p33_H0F0AB|n0DHgN%01Ov;0!1)@l4O3z9a&n=NxH+}5d}P#; z9W+n1-}n!bTS3lt=o@E*6RnYnP5=-^uAQhfoS z3EpRAfeFvCN6W1u_(}6@P`*NyHU(UXM?V#gd?vk$uiWvVKCk1}ya`un9@3lFtJ!9e zLazl5{(*B9GOM?2X!@Yd6bjR76OwqLaP9% z7%=e}Q`)>n4{}plr+&)vM(L6?`c}F!>Qx15l@WLaZB(J(X9HX|uF)We!)_YJ*b#@! zSi-3*i{}W`VNiKeH9**pq(NAT!cSh?>o^cW(r)OGkGSDE1Nz?1;Sz;DIY4^*$15vO zRJ140HW~k#Wxn56(s!c_$1@!kcq`po4t-4-8UvbF`kw}dzC&>I7iG(Z zey*&_*XWNmcKaBF~lVF)U4owDAEZ+YtgKL2b6#?ron zV7iGgN}=FHp3cTXgc1+$QR&5{xU64cwc!w$8bH8D0qEj$i?bBgaNp5Y!#x9oX9zjT z83bMJlL*+16{6XE@8>_pU!MpzxG8Ay|I>f--;}36^AOfbgnByAaJpI+_c@|>4(qeH zADNgSLJmR+@UP-#R1VAl6N&f&?!Uya`G&C0h(jVXZA6OCMp&`m$ks+7G**ybr&{sgP;zTCylv zB1KW8L;@s;Yi5ACdwP1hr|7)HsK)7`SgbK)9v8G8DP5|KtBDojo#` zTG%mI+vd3GiJR;q-G(-Bo#{S;C3hd8Z;rBgs%)-6V+j0?X;tH9I{tVEX%_3SQa6x2 zY6(Gb3*nrgY3N{q3{K6se$}G@^$Zzg+>oY;k=R>>M**|^)A_Fu>dw(RJ63h9SN92a z@9o5;i$L~a+&2zz=_;Ei$arNXf|}jU>g(^O&nN<`SfJq>o|#a|vCQlZ;dc)$;cM^7 zZ}mN8>11y=e*O#Tv7tv(1#l`eyz$oC;K(_jJ66VASSB52a~G$JVYx4t$AXgu#(Hvm zlC-t#oHe5KEv7UwdN%~#3+FDR`@{we-@TKTW@k}^HpA!=9spG9+PpTu#?B4ya|pY2 z?pQ$c3gzV$xK!4l?4dL^Rkx<54$ime?PUC0nL{Y(#B6BT++K!n;VR?YsAR{i8hYO) z|NaK^O?SpZ1@1@C1401u8}*}TAk30Ei1wkk_O^cLgtY+P&_KM)E+@KGmF2Xcm8>sY z?Bv9Wc=8|M)vb4LOL+>Tk@F1R=5gHeNn?e|SqBOz-fQf{br6eSY{`jZC)3Yf`$@Wb z`R(8ZwQc3;*}r|3xl@*|zcCzVMAR|Q-@fujI^@|?J!jI1-~2GA4xUQS4xLT!y?ZrP z!IxT_QPi2!CyyUXqtHM(a?K6_$DWNLc%Bqk;o*V=qAxK&Sb@L`d7ILRX=I7Cv_Q}3G(eq+1 zZNsH)+d8b1NhS^)?{N9znlQxD2%dxsh>)|h5EGV9V2WqBFV4k*R!dg?A_^+zerhhrDK6v18^4Sk7?({uo1A zjQ;T}1V8X00BoMw_WL5e6hYF&{%`j1bxL^vmWnec=eOD{(6FUh5ry{ zs9>ovK^SvQSYr|D)VYQ|XaS?_JVFQOH-1wZ|OSl&sdA>TzR$Ign zE1Dj+JyzWR2bHX;4#ehId%l-M}{+x zV=kYs*@pEvXCKa60dhSM9pVRAeED6R`+)O=3ju2eE0>Y3#2W-axlBj4?c9oIx#x~m z;E*yU96F|2CJjn7LBr{@=hOY^aeU$X(wWnzL-?QNSk-L6(X_L?@ds;n$m09A<5TubvzFyBUtqMb05*ynq(&JB@NEe>$#xJ-b z4W4Bu;xt)!dJ)FzQC7-19PAKfrv|qj@{zk!VZgA5dTk<9bTs#n9+Wu-jmW&Rf!!j7EQ&kZc+9%$w+ zwrf)GU6_ffwHGI`!s18mp!u8?{OZZ)SrD>I4_yAr&$q(=}r8HAc~dups{iX8as93VA6GaEKA*7syk%lbBon&3E46dB&Bwv%-#qDP}!hS8I+bLp@xp5<%OY2ZSk}t0uMr zx;CPKtL5Akpfr+f-^aEvoX!Ag8BxEbi^&-mKI(IOm{8N&0U?TZ|DdI zS&@louqSnSG;a%;hmc8DQH*t~@xVXhUbcvvFomoZecj%+2`Vq}#%~SDp_zxI#8CnIE1d2xm3pdU%_oj zMcQ*K3h>66Rnmo4%yS?4s8EFqUK_Y3rplNa=xPU$)M!R(PRR23N}(2;{pK92uEfR4hUk1bGspOi{5$MyM!~t;~F(lK2aX)%t{xV?F$N zI-{_-0JybNKnsgCf_u;;b=m;LX`MWj;|hw*nSu)4!eKcDWVLL6N2^>57_BhS$3nwB z8GHD3^Qj;aFZ$rMSk$Z+7zvBeWMR5C0bo=$LC!1-GULX zk*NsKL~*$$Ri2MPSgZl1MR=cyrEs8-CsN%}u`ZPZ#Y$bLOhwql0XS~A1v8-?VbB(; zz;1RB;S=Y4#G>iA$SlkkoO{By7{Y+eN`&jX?|56c7HSvTI7*i1qkmj;+=Nqs!-{`C zK2~9C8MiK0*69|UmRO(h9ry_0NZiny;`I=WC>z#laAjV2i7h4t!kdzwy$cH!&{uFR zx1EI!aT|T5l<_5bai;IZSEOz%mI!M&ZDnB${3GP+z7P}x9y6Y@fU5Et<3d%=4f}2U zi!X(l5R@g);!F6XPYT9!?sVzKV80b&=t%tIc6=)Ags>WTvp#-{+h{MlJN8wx!|g;iSc<=903Y}g9<3}uz?-V6GcD2uN=2Xax1$8XxqpSh4U zDuror1@L0Exs3B!A=>8h&V+QqbHcry8GpLT9z78BZ9r|KeitN1MTSw)P$w00vhu9CaiPPCB4Ra!~zXtuSc*mCcs@i zai;Hg9rx^GHGZI`i)He}n1(wL3#zp16wYZu6;?AIVy@6m79jnS%XnWD_|Aq0aZ=~l|MIvN>+zY0KZvW8<0Xy4tV0n~7!Fq*$^~5m zO>QS+*6&v8n9&?h!IPm~2NE5dr%2kzP`a){+kJD1n_rX<(>4wI0Dmq6rrja^r83zrBSZ>Kf)n z6ZE|ajM!AM+(K`RgV~ST+enp58qbrLj-|&xd^#N)BAA-fRLcrAxCL#dk(;vwcoKWT zPC;AKX<{mlrK5Gt>2Ln>N9@w9rD_Cz#3bStRuGWp5%hMjLLz}}H?rG-^_Ro5!d(w| zI|*PklbvyK3Tw@*ZS~B{Wpeg!V@b`fy5IrgFaWl-w;jVgYanIx5-^{=*6|AF?Jh1f z^;qKzxG?P!WbFDgJ+nd#$#R09ZoS8w5mj&&CIrA`!){=OdM9@cnvLLwEV za}I^7jxz?jdz(^MO?!Ir#TU}o|LH$WqxZ0+Yn3(lmrixk_A(a!Ml8O~?FxU)3xJD! z`3So17_uN^!G^wYW6{XkLiVeeM9iO+g>?c*HzI@2{b_@_QBdKJ4<1aVg%zyz7z@|e zb)P{F!FrzEYMC4KZ*hJhjA3mEidw)!aD?`^NS!L5G+sbOVHH;-d4Z?auC0h0f`T#B zy5nEb(VR{Uokw|Kw+7c8m%t7b#_~#bU=X;s*)h#qk1YP5{lmRgh*4%zEG0e;IjgTI+_S6*Xa`w|xO zH56Od8rM?kXbpvBoxtNwb~$SMD$ zDy2kM6-`$9b2zKy?ru8)7jH&J4T5{2bx=TwO$ku0U5<~+p(_KwHQ|P7ko!g)S=phh zL`cy5WJW0;I3iMWXfMDQbxECkms|K-leFAc2|=@~lCCSS*L~iX5|=(s;oG5U*3qUs z;0GR*WR=iW$>cI#R4NILiSJR4XZ+%->6V7ytskM@E(MPCCsN`J+~K@dMZ4lw^N#Jg zn(EwHav=73HHmofoC#u0Q4!`;o|OVT-`%`dg0)HFYE}P691YbrENJ?hGNo01>RV9i zySQ;h^TSjGl~7MDrW4I9o_Vi{-&NQ4?W-f`xKwLSvky*hD9FUSB?&-NBE98)9vkUN zJiCHA%AwJS7ix1gbe6h}QHHuJS8+IU9#mIKS4HXqk)?wqxXpLKX8c1!7R_7;Jc)aM zLtpxL@Y7+WtQwaZ4fWBN5)9(W&P(H=@%X$jaV<0qXs;v*(HtuyeGpl^LL#t*pl$wR zTwU44Nl*jrh&RX6y8INM;@>S^1_gLl+}WJZiW5`YWEv7j zj04xzBHUW>%yzoLV``f`OTB8;e5DwbP?zxdvW=5C&2UIPcvky4dfrErCmO1;vt4X71_{|WUvkAxx9kaI>_qJ7%% zx~r;X#XHpo-wJI;n3xQjFvIbj-!)hb#G*bJ&I=t!L%O6Q&%M`|&@v*amVM_2xMD3_ z*oLHu;D>9nfMY5 z`M?XHwVq;G>-T${GDj6uz<(eRnhr$r%a?u{8IPHEEhZL2diRbK3`_(L#V^uRrn5ZG z&Lyjj0i>M&G9|=Q+@LP&47~e13j-zTVaAnw ziLS_hoiDcL*4zOIV#6k$asC7i(_Zu<=$-QTSoy>%3Emq>K0iOTmfm>tO<<#Ku9qX} z0Ejh0Kk$FX-{BcN(@kzrG{nM6n!G-fuK$#jGni-FG3Tl^LTNF#){&g7p9%#l>J4qy zAJh{^8N=i|j`kpdEpQ3$MjL8P0$dJ%)=a6DrMy}JE<`MWjEXRz0l693_qcuCyg*>T_ zv^=Co;&6aNA|+ETbwOL^oOE4t?W?Y1EVt2UX7#UF!Sdt@WL<1$l%=;`|4Hg-Z6__x zKr{p$`Qj(OkS=hz!kd?oPB~H;XWm$YJeZ-0 z0~qF}(TZx2saX3q@#CoFG`Msym)?EruhUbPPKVYwLsV-G=KED7=2lYQ%$Wa_@z&H3 z+q4ok8b&!|EbU02pZwT|)5)_Z(~So=)A-_jcv@vz1)po=c-6Wp|Cq&Zu6>#adNkd2 zeYmiN8>K6$>UZKxBZG72aJw>1T$@T;C((+CFn5h~3RKm*b%o>k9Qk88%`0+bI$t`Q ziNJ2*1gqoe#F-%?gK_RBfWYw`9eEwC4yhHwS;aSN<^iWD5CGuXV!fehQ@2)?I<{8J zz*Q~2ELxj4*;d%acg%k8fcFAPGgqeBq60^B_$wK8Yo66ZwyEUh&261H#J7g^P)&Sk z75_E*#8gGOa0`jFS&ZER^trsW!ZE=^k^Fy^Z5yqsy9;cwb+L^_IwuA$a3X?)tw8%1 zo~ucZKXNA3l((hHnW?~i|0-+RUQet+1CQVz*0Rg7-VfM{;6Ma?z$Qq|wYapIzWhtS zN)Fy%P4(pA`n^B+-_rH#w`jA0?aMQ14vn<1zr|MB34&HyBC2@o!Gq9ZJx#BUKm+Bg z>KZh~6E$h!mU9rQX;#2LePc|X8j;B|ao_YU7BJw{ zSi*iZ>To`=x+PvRoYcWZb?Ct+$WXDOfto zW#-OJLE{?iJpMK~VWfkFB!pcTL&t?erYumGTW7DxKm_}#$d9(F=cI!8d;)bunvOt^QONtU4@08SuK!@!mnS0#A{u#O+!7t0FkzH`=+RoNm*G|u2mkag1YRF!YFUsu%4Jd zz+fSrE>+@#XvCWU`CN=+5f_%_Ee$BlSSXYmKJAMFtHO(>NuBF+U0JbLfU=B&p`X?r zv@f0%c%jEU$8AEI@i%Y=9O3{Pm)f`<$~5@U_b%WFJTSiEC~9`@f2W@aA%13FgMO7Aj--~wWmnFhbVq5&S z+7QU&QN$_eD=Yb2j{(hH7aU@)=eMRSksJrL}GIyVmrIK(ju@_;?J@JpB0vU=rW8e zz;piT9)yGg%#WMt_UpIPy(>6<1Cu5}_t_OZ3>ea=U(Z%iJrS>4_z{G1M?W-o8YE`E z;T_TtbrQL#k!Vd`1}!^RO~G~mkr~)x`kaQwCQO-~__VAc6zxR4URS74Fwwl~cspm} zs8o(FCbSp&f&;ObAFx!|Pg9b(Exm^^NZiI2wK(=Ux6KjmTq&!e!)?+8xy9_sR9nkL z-(D?GA9&<*sjKjbw0?CXoqpuebgBDqrOAyO>Fv!wN{?_@!ug|nY3`9Ve1h=9Deole zxSEr>#?$>9^QoKdmNFD${S!1ByG45rp>vBgFmv(H;!`}flATQ|d}{T*ut$`wsd-v4B{#N_ zHid142G-v-6b%p-v~jQwjUiM6Y5LG3r&4(rBT`sQ^OT(;1yvI%YTAkJp2EByzR~*8 zKnF(VpMCTq`57N$i>Mig?qSUP58!VY9`Ubo9_}9ELj|pR8rcr=Z^Ka)T+I16yuUeh zE7kE#YgrS0z+6pE%}vrUZF4+w3vF)VM8C?|GOTHT5nna}BjkIok@BWW=3+S};rkp} zJij=Tb_j&fP*X!thK}Sog&?y)&B$iMBcsR}z|44TvK`QX-;kzdC^z#7X^=nbu3`>u zb4>Gcz7{cJaFFrSuDG8X1n(#;#!h)!pG zpzp>;WF1bSbAB5s+(>Z1mbl}${$BElSWBI!7{AP)I(dBz|CqJ#E5h}N`*4s3<_%G> z4fudIGA|Fi3FIJ%#XP(QO13mKr(WP(AmY|-8Rf_NR`T&Gqrd}I+$xwuj<0z==Yh`y z1w8mbA2$sJbURJlC5_e`>4^H_^%#955M>l)fWrPV{E96-xI-h`TN^~(?(o~7rS5@R z@{`u4(c6=0ZBn_@V}?ni#hO!(kJ!ka*V8vwN7J2acS-yYUg|qiUr$Hyg~g@W*k;fnU@T^J$GfbHusZBOCa8H<&V#b*5@Nwc&p^Kl=dKTuYq+yu<@?!w$+^*4PskoyxcBW9y)XXkr6ujvN~x1=b|%9)4HQW<_gT=PO$0zyUm$ zMGj^bf-Ws6Y)Bef7$gJ<%*84eLJ>q+$S|~P1qcO1AZLuTa3Nb#$dGBtxLsg&`5kG8 zs5=ugY@P{#ZG#ed!4a?!@L(A3K=^Xu_T?V4f{0d7EqXc?yO7w3&A6B;gcTc&XIylJ z!Nn*q(0rjy7wOXPQW-z(s|1igql)N9r0Wn$@Rb+oT%YyCf<^WV;lJUSz z>eeD`zw^sU$9;*_zO(RgMY)Gp?x$9Gzq@xAmMjLmq$q^&ZGGH#qB$PI9193-$#508 zve25_ckytc5=RPJTFUiLilg~h00Z~#2rvd(>lH4IYO@aeZ@xsGD@Bpcxl{#iqT|s& z$1N6m`lyy~Pi-fJQDBU|NB?-9`r^g#`JLM_9)4QRHUp3JQE@;#S&w7nbw6#-a;0m+ zVw2Bk$WvcdSi!6K=UJJtlqaSgmr|V}fCp@hoWdit21pF<&*{jXNwJw;Y%#-b zU+rE{vkZh=3jGRrrTzr1P$ug1xp-C@vaO=W;%mpONv{7k=9> z%0CF%F+kuRwv$U~a3F8T~z!>Woh0CTNp(X=}IrXuFtaFNwFLCaw&Xh$E6;5ihQX1GBY zp#J;k{Vt#M(`PLwU5Zzq&ktHo8dDS&PnMGQeKudu z1@rtj)lks5F!2XfME!yyH1;p(lSaj*3W@b(JX5c2*}4q~i><}j`i;xrH|7`h8?53y zu~g6~E!c)771TA<2ur3F%5W{B)2-TZedj!=MLRKT~xT{P!Uc58q8a zO+BfO$i#2{=WnII_`%JzWP~Ta5bZcxtK6P#I-e$Q&!s>6_UokE0cTigE0{Zun`5g{ z?U3E;rdEP<@aJi&YsW%6iDiG3w)fIBg6YKk4DtkC1_-3H;9+{=URvSMf!nuc)7*ni z3_kczfY*n7+yRguH{ugkhgH)^;2i{P zkID_f*4+?iANfHYM^ppwwLyxeRs{Lh^yH&Nj89A&=IeqjY+J zugW0@EfhfB0{JvEe?Ivi0|60@N7B&XIgY43iJ)R&ivb#}rTpzcNNeokFsNbKmd33+Oxg5kkpPSR zW%w_!>f2(P9(hg7B4}ZDdKP|#@Qy&f3r%BtOhZFw$Z?8eKMJDsy0vcfI=dt&Xl4#^ zZs#COg>di{edmKe>DkXPUZNnE)#9Lz-<|1tdIygaP5&&);tGCkljk`pzPLqyj9-kg| zZ{#oV8h1`&nt=!TlWf;}bGs>~Mog@5 z6tj7Ajn+R*LytD2R4k=iZxG$g5y86zG=%RJeQxi7cW~O@yYCEhytP5L?9`Fg%u~r! zo?Sy!Sau-x`PmsfIvQDv@Ly$^<(K-wmtoD+D7CqQ;gx9pxlvB>qhHF%%|xkd01jgq zdX0E77{|P~t0)J{1Osg83r4hg9a#h5(rKDNoDFE)T&~JX70oT}ub7@drfp8C0)yab z0}?~Hv51gtIoMO|DoWLu%kvLR^l z&Gy2A4b0{urNU^>>OzQhhXYu{eIsr$ahTvTrIOXq`dtt*@q%C45qIJ=6czdxNOiH~ z`XP)(mjXJ(Z$s>@OA9PS0kPR%{->WYn<#JlJWCtCvsDrT#xc`{$%V}w0wdo%DXY7? z8%-Z3LfO0}p118okf;BlxB;_@or@LKgAaSmSOANm{4VgJv{2qp`{p8S`*JYdesmX# zzS}>3%TRKVf%E*SiEi>x;16ma7?p+W|?>gBBWtQ!!dT@R_Jyym!xyQK$5=#9tGfF zw9F0XnI*g{E6&C9oR4|I1t}hkLC3-@4MjE9mVa~K@zKpg@iZ66ea(*f)Ex6%Tx42T z!BjxXyhr+l9zu|G>sR*U%cuZu|5B@4nL(o#{xzizx^6CNdi|2MS7y0Jid8eSQk}CeE}KcXx)xi@M&&Zw^xOM z%JO~7aNUQaJ=(U9P!BEI@67k*%XT69?O1cyHKk-65=ZV*_I7sT+>T4ak%2TmF--2s z$*9}6_ne8UMBT4?5g0cplHBAjqNdCA>z zXsxchDYYCSZ4f)F3KY(v7?qf=u3svsW@^B-0fAg0+!Q1y`UhDraaTe>Kpx{c`pTaQ zsLE6rRY1-vk&T;JpZCafD3+vmPi?A0=#$3G`k=#hxL_9XrvlX0(axI9t}^Y--kSk7 z_!jUo0wD~D_)Xl5ZV%rn8M zt#g<|D*}?~RB&s+QeN2DL2*)`6%Q@21Nuy(Rmi?tXa_jG3_CT-(`Gu-%#o>;C|U$H znDG68KoAClbrH(tncx{<%~V9Y$sM5mG&PSh1-)Im^mzLEZ~de6$fKvzr4OD@zx?Yj zrx(BY$#nXH`A&)W-Ch7!>qLKZ8Wg)vpZ?@$gZ^elr*Jh|MnP%eh}-UT?dDC~q)0}H zLZuAhE)}Fs`+$NwR2k0mGgEUU*hgq(taQB2A4Eb&E?_q}igsW^pMn4yKseMG2Mq3X8MkO7&m~6!v<# z1F{M5&X0qqMWSrw@yL-Ya|GCMN27)FtUMGPW+iYc7^^gxpLU&`xd-&Gg`;* zbD|uFYnZgZ7ef(UJ!H4fIVay#hVpfY)Bo%(0s~fXXCr^-9@|XkK1hHLsdlbko`hcA zVZo!q^#FK`7Wbfs$;UsR!3I+VYuM*HG8LHe(e+0co7_AIm-Qb`Q*Ze~cq&RHQg39CqH`B*I{xNnR z+S17V8$@;!{R|J=T-|3rA!F0OiuTT)S2Am+mk`G;0-pG?6TV_01_|cP6VN~il)DE3 zD?~T}rD7Kgzh)uaPn81zrOpsixCd(oxe61qC>f{20`)}|Fk`NVfPygV;JZNhX~#mS zf#4z!DGw?vFP74%yP9;_*b-10-V7M00E*SNxfH}ACLdy+aE~vg#UTn*q z20P%wy8(kWZH4{T9jd@Kv|;h$Sh!V^OulRN(usc|&{6dvjKH=OX!(fmwy4#?PumiQ z_B|8<$BjDde<)YfSsHioDZ*?wtP0dAUD>7#Ln_M5I{;xa!~?Ma+s_z7>!z$P7t%U(W-3v^Jh>kD;-Mh%uXq;+c_jH{d@UUGVWqaiHI-ehVEXW@$Ews=PIsOrj25v|=zvn6{j+`r zew9`M$tj>%9I4WDCT>hLBIwJXwEjfQ0{l(dclS$oA%&h0!~=&mDsRF_fbwlWXe1PZ zz>#>O?cfhymnMpEaD%emX1??K`II|4AUI`+Eel;-Fr7UNY89x$r6iFho9sxXcq z2sF2JARwCT94lZ8$FCwJY28E8!!3cG3+7qQ_qLc9&K` z)wr-QW{3~^?7G#0Pz+Yn!u=VNz~f2;iPR!Y8fc-h!Sc?}vbx>$&%ge6(^o$K5mL1f zNsMdj;y%Zp;sgEC=f8lGu$q4VKYlaacx?&mBB^C+`mn&0jt1cw)`bD021_~$5^I3) zNguAYF}^g~)ZuZ;`?#eVn+D%kctvEUj)K20bF_rB^hK023XMmIPRV2yjErYV?nwM}A(1n4RbwrAQV-BV2# zFATcNSpU{CsgS_YHbPyT#D!H`OY<(#ypCfwP(ri(duVk(J9WDR0)uU8);t9ntAKp7I5!ZEx#_Pu8XfxTqNI-pWpmydvjXrEUd3=A%1QuGh~0 za(Jaer>;4kl=iu=zLY-qD=(#cQ}@%z$bI^^Mw%L|xy3VK}7Wa(!{9mG~Cs*ak|2^IumV-G*GFYq0XS za9BbEigy*RKxfXJO6P_iNz?c6pU2W2dCqx8I&=rfaV$r^&^W2D`#u7&?o-Y=-GPFJ zqm=_<7rK~x1#+M+hO(o~R9{^O?UFCEsXVnEXU#xaRDqXYDeK8oYjKHMz};uV)6Wq8 z&p+FoPF-lh1&rLccTvQ^v-4G+W}UU{0?ynfz0#73Gm+6KTRTMd%5;oA_C&v0Vwvo_ z|82n&Hi%F+j->%Nw>EI6J6##>jml1m)Lb4rUA9+~W{5l{JU4i{cr+E$){+&$zo3m> z`uw%8|I7607oSNz9Ln+2%Rf%v`S0Ij48WI>^za&uU9le5)i=K)vyowYf=Lzz6lee( zW-O9Ja|B3a)0k}f2F4;799+w_)|xC_yRc|!^toVInnMIlZVPcb)%@!3EYz@&_?tTl z3H~o3I;bIB7d2)sXF-t#+eI)35Q7BTfu7=?h>W0M;EDv;)ZmND0Z&ONccKfw1Dgd% zO7L4|5OsUsXJ9NPW*2y|A!>8Ngkln2bG3>Qm@Z{ZRSh1(Jgme1B)7y?WKU=T)hIRSr^vrk#^$*pCffa})F;wjFw za%*Xbag1iN@~8Wd@ejh2T?X>5NJ)TExv;NJ)J7I3iCHUaB;}@UiB;wjedV{b+yN7S>?}N9isX?QruT# z_9c$=XU^rti58)f@*+(!$`Gda-GUcF@fQjs0%D1Vggt1#*e0YA{p7I_2qUh9GA>ww zgMTyrgRi8NeF3~IJ(uV(V0ZC`Y4Va$abzmtZL**?;NLm-9$Lz6#*X^;xQ;1*WN+TN%4x`u~F-m@vDF zn6|?`h~LtWI}+v()v~D4=b-?#FlbRE-Kxmy5+ohhqht$?Tnj+6&TT??EiJ`T)0r*LM>Q5amJ zEjhLPG{5bHII*l&w-P*-k``>&U5AqLLZRq)R7D$(u?Z8z1?(z1X8Ym#$=K%S#g(w~ zo3^}WP^YjfgwHPHU%N6x`j?&5b+ie$pu;pZKAEn3|2Fo2tY6UK8dmoL7IIIJ(|5X& zoQ19JSZoJ6BJh(@K@g|!5S+UShqYO7GWVxO5EUjZhl>yJCFU>}ef@iT`+{$VYbS81 z`0tW}rmLM?nk2Hnk4us?W-^93@jP0U@%B z{EVkA9#8itM+j~;-zUNnM-~sB>QA3}=}X{nIZe-9XB^q-*VTgbO8v*r;9i0io&eRS zEf~cF|`v|C{6mpD$57C$|mL5l1MNsY*0;)l@LC4UHb9)zgpa3lO&F;F? zq4eNlgW|Bg!cGY8NL%yp+tmuvHz5=wG}X0eO(jLp1}7^aDDUE$a=6RsS99CsXk1No zl?@!IKoF_|ABx5%t~wsY>=D4aVi;$!Ng(VtyA(?k2=kom)j;~9)WEulfF*z1A=$pk zM(+JPA^T3OeLAg2ugpsM_ zb=%^MwqyRFsFeZZ4)`i^@9^z5aC*&DI|%T~X*|p{a1g+yiX=j~rN!w@Yg zGGhn$^owJ-QN-LWGJT>x`1KKR&29o~KnrvA!yowo0lwp$G)Ry!xHHL0ocv{7@PtMV z-l%D{j!eTIA|o$zJsrI@mu|f}l@6L$(u-d@MN9^6K(Eg-cC011>Zqt~;4&uBZ;qMk z7G%@zIw_U5N#ElE7hBM(F$rq~q&qh~DbKOg9e-_(!y$-!SmCs}`nGD6eNvcJGv2Ik z%3J%J%spIw8fraGxidT)Jl1;ym!B;Z31zfRlz5E~Uwrw+bmiLH>F(IA)Q(Z&_1Aw! zK35Jg>0vhmC7)Pjj1nlwx&uk*PJq}FqO&dx{?1$&W`cFYYc6gw<*+u>F&VJHyHI;i z){_H^^P(V=m8i93xuE#YNR0th+Js-6n*6TzqsXtrxGd)C60i(?dn4|p?x1lIR zgZ@Uq;Ms5$0z8!tnWL;%x>w=ThtNLJgJ?c>vxJjtzJ${NQQ?Ylw+-nMxPk@%A^J|) z(r3`B{-VG{CxC#u#5*5BHI5_wDfw|*m-x)Wa-Ij);VZ1Jwjm5$DwXpYac?pEl?6*4 zpq;#{lr7$NBjy!2HxM_hu|7*bGELbv`2gY}03tS0NwKd~9x(ucF`==27F;_b?u$6G zd*VK-DMGD%FgC~b zf`2H5d68%NKe`&QaNlwFUVQr9@hLtVb#gC!X!#*dT&y#lXXVl|-j6YiAI?eW!SB4J z^-%VUG(wkYE7P2oEos1)Hk!EZ_{77`lU%R-hO0Kne9|R>-+Z^2yff$}K({X$&RC3r zy~Llq?zIwKN*xM{frr3>ZDoEZuhz9sK*WFGF~$IVfipoi5k$}lxRoZ$QC!@46;gjI zl#b7gupM7T@E<{OEP7au<$q>Z(`vs%P~V{$*H9#n z9XrOn*vIwY7I-V9#pOxxbC4draE9lqI69Slm1N#gm~tmT7mLFFTIxGhojQhqk>Zw_wU|MkDYsjY;w2^>+3BO%|2wYBcvc(qw9i7-VU(9OKJ&ntJ6hhH9_0I$$)ie`-M()*QQ=W5x#s6>F$BG2hzxW+V* zJ8~T%vYOqy8Z5M{2-EUg*m3aZYTQ#m1#e(M0X#l1kdF2p!KG-0aRN8I%|^6KW|?)4 z`!xYdxKua|fLVS}z^7YlygT9=vUGu7TG}Q6*!-6~->A_`YIZ-G zSrh-}ufB)y{LOUvtt*ka$|Hk!u)5FAAUu4+>{7#(+aH{lJVLy?089A7*Hy7Q zM(RX) z$5Adn@#2f=*kB)rI5;=p$ScFA7qqQ; z<|r^|9Mrw)#m_#+T*p0!ogr5V4Fod}$luAq2wUsOQ)RWO>v%Ks8yOE!jf2qOx;(#= zj z2J9P?GKi3RO9{inmlX!-f*O@lakwPJ6Zv`jrG>_c9`)vRISAOA?K$(?)}s!{Q1D_h zd&P+t^;6hWi^PM?2}=lwfp6wJxDC96Z~KuKSZWA_FgbX}^1Cz^?jSN17Le#al5Jubew=hW)&=~y^ zcOhy7tc*QnLV%^8e#)#ft?(V(i7PJia?Gq0wj9Dl#+Ml#97e}XCa2&d4cQ+%U|*zf z0TMq7&EhPS0m_)gCK{my;q`kSf3ucA>(V0k6pBKLMSuu;;A;4V^Gp_~9EYM?5N#+J zS)1LIs$Os$81z^HPPm6r^nEqNUr1Z_KJ`T1~BvrUo6OBhtHM5^eqE?>6g^U;@L zd25Y&Vg$sew50nC^GlT4!}urWU{)cxxy`B2mixfc#B&s){^*bGtH>1Li?*HTjFHzY zC%zp+=UFVQ&`vdTHFiUQ-ElM>c%rcj1nQsLx#z-6@2LpF!7-gGt(TXLHWk zsZiLh+7B zov4#CkrmFdk-s=@J_o<)CNckG=Z~kB+5$nTQ)!mtT=8?@B%N8d@@IlqDP*$k4*rRIjG#>FMB&ZR`w~a&)7B?|lUa5)5#F!6_`_ z$5B)eD(i6DsUd0mkqbxDul(}MX?k`F!F3crYFy#EP7v7nXnKismc8|jsSV|2d1jGJ zG%IO*@+MaQsdVwNqv@%0FQ${n9z#)PS9oU~*BF9&Ir;0_Wn4a%IJ)*2ZdM$Zt$|rcXTaf%FIe?OP}T zJL%#R7t%*Q^?@|EGDc$gHLRSCsrPtwI`c$dY8ycDAaVIF0@Eso5Ug@CQfo&;!~%Gl z*zP8Dk7BSx{Gyh1=`znOTxPZryj8TYo*r;|*a5!x#)IfC^l+`DG}IrobaLQ$@E8-> zYq53iInOk9ZExb2jdHb5UHZoClg7rzBHeoLO8UtgZ!-ey9vL%$keX#4Gh(r8iXv2vFz-pC z@-=toqjC{Gh?&6F;Tt!}E;Yu9YkeqLj4OfIy0cX`lNE;) zuEyZ#;x#+}fEWYP=#r(#k=k=zr~evVUE|>jG42`@KuNT>fzq^u%bqxO?JtLBTRI8W zCwtHC2J`~|0M&8&;H9TNke+(>u{6yIW4ni2sRiw#_Ml|TUc{D*}dvp z$&l2RP7Iw+108MYbpp5VjgF>~+jr8duf7&1%q=d>bEt;$_CAIU=!zk&Xl&_tC9f=u zJ&@PUl6tZk}zHH6gY%C zggs$EpoztndLnHheerq!Tzp*g{O+O|RzvYXJ;FoHFu}a|ik7$zT;X3XE?^p3$HkBE zl@?^(WL)e5U({zGLh#~?48WJdd0Z>vRJ_`(lQrt0fP`rogD80d+{gfF7p4^pBruEn zJT3|Y)WJK(z)#xKa;1gU@l!$Y(?BEfBVG(tb-bL=NDB1Yr)Zn!v|1T6pf6l2nS(sb zK;b_!l@!O<^1>J|>Q`7*QBY_OMn*f?`cB=p>6r48I+P#E>9jBjyuz`VsY#6Jdx2k5BvSLMJWiqGR=bk2?5n(64FY zns5)WT~nMnNwnT;#dpa!;5S^Qz?J^~+gP`xKLuQm8E(P)TZa%B^o#{krJ|a*O0L3w zX}*dpabx?zkL&|2D7fkh$89Qg?4T!TqZ)xKT+sNf546HiWWBK%g&o6CW@Bu8)OxMt z{o|T(kgr4AqF1_No{1Mrm-wQ7>@NO!SIUJf?nZqo5d6#a(}sM{c@r;g1pWKdYZQ)K zKJfk*oi}!?6lkesFbg2?LdOjH7EWm^7I%3neTqI8*9EH!MMip{Y$$q^FY*51vEVRL z1oJ7-;z+`YxhDZQS9!-}0ks^TwxvKJFI4$*J#d`dG0AR`9uGU3-nsXBS|_FPk^XLu zc4aL==;`FdubQ@o)QWYoim~6qCwU831Y!NfU->d=2ldfk3b%<6h~U-2SXkUmlb0vb z9Dd2Vz&7IcP!(=7xqq>HMlj~_vlr9A(Ie5{D&pn}1O%TWmEzs&?-HCl##oa150?Pv zU=3*#8ITx9cNKMI>BGI{F)VhK)d;ru^6%p^MCc-lEV~UDajUD^)9xBBCyU^VeBlO} z_H_27p8gixnHmtf$Q0C1tV~}Ou0n^YwUeOag}HS5<}l@RjMpBiIiG*-BdG&F?1|}_ z^!ktfJpIl${$08?@di5$D9Q+Yr?4D$?zN?=T{8c`=Vs@o8IH1a@7{e}77%{HS0&kn z)|NSr*jNYzEa!S2D~6MytVVHn-mjAhCgu-=tZX2^+rT1y;o=9<|N7hiDz%cq=l}da z{`YkAHi{D48f92u7uRM`cIwD{vzmslEE0&h$L>H)>Oa|(jz7|tjz5aaPvR6Z1nyOi zcy8{j$IYQGa9LQPy)6V~f-nnZSY~zmLEsAh3kRPmgK)Owq7HLL!W9@YU@$$?b zPth`^tZpsxYv+U6fE1vWWtLHhbfeid9E1G&OS33U?{W6R2rgZTKxOc~K&Gi%*T#Y? zdJ3p9C9Q2(&7W*bk3CJQRruJwD-Y7*2xleivBr|Fw6F@iWSuEP0Cnw=5A1?-;{)1^ zJz=Lq_f>QCi%*kAFdbsvRI?-GDRiD#*WKBf&L2OYt`Y=(?cE!|ngvLCg094w*TA1# z1K`OjQ?V<;oYHU?ix>3joIT{|+P*VwC=&?8?DVheLV|dzD_sqz)-^N{n16II-M`Ke z%)|HM_+r~I``YQVr?Hx|R?vn!Fxf|bH;psiQ5G{|C7lsiH!Q1cRCi5uAmhbWg%drboUI+icy=Rm2 zK?>_xwlLl9b1yubzVz~EP_Wn2C_?iZu5S`>4QC|84i|XahnDuCEph>;MaFh&d?R(R zqqxt`*WGtWQCil_TC$6Bz)5K+QHLnLYq-(<6 zzyM{&5P^3U#z815BO5Wz``J^4*uhwgeu6karbEY?FF@MLR5T zJ{JoQ--Ou3HUvYS^=4?v+_8*`kafxwET@1kZtR*uX|{YJw6IlZI{~KxKYbDhehRO` zxu3SE%SgB`MPGoCI)&3dN1M^Miw#Zr3{(_^&BEsdje6t)QBFaq=u#EJsyI=oaqN6J z5odKWeNwuPl=RLbYG{yo0=e>1!5!r9LgOwo%K%pv6{( zqr#)W-;6cF%mdPq`Q6RkZvI*s8O;`Pn&HUXoZt^wqT9t!Izm4kBXCBaRk)ylB5f)Z zybn*5b?mhY8S7BOTL>aN6F70r+NFE5#V)AkS;`s zXD2kW`xLped7>UVl9p8n>e=d)ZZ#sPAUClMUWalT_iY13zR z?-?xQj}mY=fVHiUeCoQAK?2Ww_)>b}iDP8VSw;a^1V1d^^i>Pq62Yi<-{yE>tdy-- zH(NVf0at$>pn_wFv=ifa{tz~bo0%((%=2!r_<@9z3IDu{S$Jm0}DbH z$V7-?T$$&4Rru69N$7|N+AVP$={~|+du31h>0ez-Km5bD)8+4vf2OoQ`v@e{eAU zlYj7=Y2?34qT|4KLRlU;{^$UOvL16`|Y4${RZhxopVP`Q=nN2*B|O4c0woxA!Q z$@El}&RuLx0~g!U3If{_2OzZ&&r#onunzs~;rqXW;6?fh+=cS`55Yip1c8#trIp>3 zEy`>YN1*GVM}}A68mN%%Y*1j-_$9BBCwR1R@NBZu=(eQdt9QXRN(NF;7$^=Vq2G1d zBgixxKICy^DC$3!&I}E4jO*!yUdNHebLpv1U1Z~^99OlmH1_61+L=LqV%~Vb-2pXlbee3-lS)eY&%OHU$n zH>OvA{Np(4HWFhn_w+ljK(TS1_c&V9ifd5GOCW0$V=aG?e+C~Uy=5!M|2Cn7>GHC< z25;o~tPFRi9$Z_l{Pn8{qzIg>1*WPrHp0v`#-yyTtfxaMj0I_br6bqU z!%cTxz^PEf+u8{#*MKuKxtx0Xju2ZxrYm+nCPr|9f({)2RgQx0J9;d34{O_$FE}2W za~uxXX`7o_P4B$%W_s`1jdb!fJ9lIwQog8z2Fn-+UBm{Donl9n-3<&K4Z7v=obJ1I z#O3H>r9xlP+(tYAn39>fa0Ww%L77HTLDz*N1PjV2xJVSig9EvAhU+1ez?U=s7vWh@ z;7cqHrG-kO6X_CJSa7Thpw$eScYJdZE5gD8<79J9h=FybjV>yXN$i%(-(}b#sPnKE z$$*zdDcZH5Xs2*wzyXG^+HqM;Re>$)qg0R#<-*bgQo;&bT$JpbMDIciU?JqoKwR8> zUdY9b!o604j0=hv7bj}X-zhIqh_^W0i!VMSk-9Jov%HJK~XE60)HD@QpS=rkrhF|S+irfW8j(Lj=3i>=a>c-3>b&wTh4Y^ zhA+3FFdR4pZ@z?2-1up4vogSw;4cJu3k8Vun`c6>fKl^gz^r9alN zQsFsq5yA!+N&<^Qq1FVSli6B_!dFQ#bV7rwxGsDFBlTF5vTF%@=%Pa-~9d7 zQ^JuoY6Yu7U^gvcElf>*bikagN06vzK?FnL3KWVUyrLP)X6!c75$BR;0Z9J_7YeuO z+dP@F0i_@;x8Tb;SdGy5aMuDLf)_gPfzK^+eOPG>@Kp$;Z_6kIViBhur;7Jt&IB&} zUHn2n;yN&u%J?b2@D*dh^YN?n41*AS5*QqN>+)?GeOG1lnReaL$zzo7f>+Brh?L3r zbqCc2pK{QrSyOU-z^~FEJzFvFFcyGgt)^xik=}hS1QhCh{|-Q2Wb^!U+;Sy+Qjz@3 zmv~f&@|DMxMxrl%04u_FcK^w=C{DP|gM7L}$Me=9Ek(bCL71SWJU3~dKX)y{J&tDE z2*m6~Q156(=LelUE7;98Hg6`TWOU z`z72PmeO~B^copZ*b%^0@&~{D>kv}*1(2=Qb)%3B9!;IN*E9lGYcE0s{_s^D@J1}+ z-Q8W#W>dOxWt{x!WV)Fr3&^U$ve17ssZmcOfOj__EHz{OWha(Hm}*IBJ1|sB_7Pq2 zcGAqqdb)LaIc+SWT=X=i$Dcou9)0>q>g}mWlM6HOul4l$JAax!@|9ERpZwF0rDiOo z-~R7EOyBXQ>N3lS-(?!JfYK2_nwP;i| zO}YbT1vRh9P%C3tNxhzwyovSQfKFYp{?TvzMtbJ_g>?Pat@N|2w}FR4m{?OI)uchg z_))j_Ri|EDA*Qf$ZZ44PX`?>PjBlkoSI9@tI8|X`ZU$e+&f$)AME4VT4s+HFIBSch z96d~*`RIqzv6ddj_LakbyTTE+hxqZRg((p>aOgl8TQEj4E0s23>NcmPl&Aqnf{w}N zWI^gT0(oWLqn+`^B0s2-dtrfX1|-T&PG>7$qF#l%?lN?n+p|u4+4cb&f&kTgD z8Ej9K1IX`MR?~JtWuHYCA`e%-F4Aw3ao34+sEY%gDTz9keL}T1PW<``5~M z*~U@g_4w^Yf-ST9ntPFN^enDSHfgSyQOU{2~gKV}C`qb zX}fCD`4f*3Bs3Vgd*?_c6%5yA0l9O~^j4ghcXx@tc7=c>qg$L<2+dQWMN90^g|q2v zzy7r}HjDq>!6sVPF4`bCVy7|UiEQrlmVmD&(Ehitl4K43$npuNK1~R(qc#$Na9NHa^+aPSTRD@7Qc!)x7751xJUQ`cfKj?NFl9oR{zC6-#Mdi9$q6)V^YsjEUfy zNCO~A5e{1B6DVjruQ0a7qz5)%u^MsT0ky1G0Urq$qNUSe&WdZa#b0Pb6o~wt*2}%v zf3`lK=Uzx@v?sy&JER|Q1`rvpc#3Cv-iJcHR)U~CSLz0&VR2US!71U?h+!SGWg5Pe zFnz@;NL|A3^U)V2UCKETL)}(7rL6bFsT0v!e2Y7ycfB6>;FvnQMvr;O(TnGbWnGD_ zo5p~JNyEg{hXNUd2`{sdF36vSqo2`t!mG(Z=3$pTI#Wrc0aewq1^hJB5{#y?+D1+! z^P1Zfj)euahB@?$D={o$a#ETu|1t|u7W(XW3#uxl%Uk%GhOWYNW z)S2g0emvK!goBX4J^P@E-LZ7606VS2m2N9i6iSH+QK<3;=44=sy} zLHpn}aKX4~^4F;}0s(kR+R+j#IX9XQoKoVTHm2QAr_5M}(?D}PIC(paW%+Twd@OdMBvibxbTc~EQ%@?4ao zzYs)V$9Zc%rG$|5smJ;1A}nvPpN^l4y!ZVJ6KAn5+fY03`S>3muM{8ijql;uT`VJx za@r|r&OTW=IHs=n)Oz6p9dO%Y+XrSYgylmVIIzXOTEf-B1=~3xZOA)Cy!ESL3zPN4 zmH^K=CSH=B#F5d#*{b5)J6F>xg3eK#-#`Bke=U9V zOMi=igg*B5=hEEtSX$e_2Y`oa@mENs?rB7m**aWFvz&sYL$reHI)df|C;b$N4%{dB z;X}{Al)n0fuR*_MI8KivU2mnfzCDg}t;b|}nEved@1&`#oTNmu^11OP%;qbYrwJTr zB@(&fUYcCLnT~Q~?@KRUBv0a0`lE0EW%|qihL6T=d_G7#zJ@t-4#8`W{DKNw8*BJZ zkzccct+_)4pk~v}szF5w;l|_$qU5r#@{? z)Uo9CrSi{(#(?tMHN<1X7L* zr%wim7>#N4cVh>_DR|y7^%1#DrL}+uVNn6O1v9)_n9lC23moo1N~S9M+*#Y2`pFAx zF3-~Gn+oXi(9>)Mxc=#jBG2vMhOUb)p#iSWP0iF|H+9AX-8Y*QA) zLI`fx(MnD2voJFkW_n!#^7@n4X2)LH&QEGj&wTL7w7E+pHxatBkQ#iR%2ujVJJ!K2 zTnaW(t_=9F@0qud#Q_ajwxH7Cd~0HxVVgjf!yWyeIQeaX@AB*DyS!2v720D4#ug59 zP8{o`59i2h5L&4Vxu=$G;JE+PQy0@wqUi4vpb#=2^kfPw*AEkN9@emZ!uqzmUYG9t zY@W3a-)EGy4Ym}fZf;ODmQJ>&>*|ncRO0-T*Hx{8ds1Gl&8Ew*-bxz_D348yKT46j zOyAMI^B#Jz4-KplmE6WWlYeI)v?U^;caf=@aqj;#TZ;!}9CG-bt1&udtB2D947N9BQwrRwu6^VfYbv~IS4LFA#9fFrt*v= zoXrE#KI5SAM_H55yO_!_6hai3+_#FwPRao~_pr>T<~_F<6oh1w;e^j(9BtYWyTG3d zo5bjXA^Z|WC1yqm#gPu}+~UPP+t(1jjfpYNgVZ3ZQSqJSK|uM-w!o z@F;x6@uP10_)uUF#=xCs&cIkas}@aH1yd}>mrw?*AAIvj)RFIMMs_;_tOLVNFcfJ- z+$(5XL#`%tQK%u+Tr zEaqOo=QV(!th~eT<|mBpKf$KXFb-ITxAah2BZ{C$7b2>c2@!mW8+m;R%tin0s`O&N z6u$hlZeM`JcjpuE@+Hqb?`62eleuBTV8NIK+TxZ3|X4*OU`7q-%g z&SU9UU;5>=L28+C64BSwpDpr;vcjd+@rLy3cPG-pN@pA<(BIolUd?qZmka6jSI3Ez zox|UzCq4f7ar#h5SMOcrxZ4rd)4p_xv@OR@kW2K=G=4lA2p|~Kh&na$dIy1WktpH% zdIjMoMuC(#_)lq3c57&TX)}cCMvn6}DS7yrGM@T(#JY~(%D{<}=?gD>JpJMS^NsX- zfAIg}(}DuV8lX^Aa_}zBjMP@!9Tw-_qpjrCTtflMYp%Sk9v`gLdHl7;_KB{Y!AF8r zL!1C+dcjprOELh2Qw7L)v6BV~1Tl}})jPK%h13=n(>PE7g`(bo9;Y)b$G{f}&Zu8~ zx8(o&1)J7KwlD8inBPJX5+7|ua1M44q_d1Ooekm5)oBesstv}_EyxxGKArg8)-WaC zBI&8N@XP8y*Fv;&5A{~y(>0dZjiOH;U5)ThoBG>m@KJbnU2Q-ycpN3B4tc2$|C@pS zzBKk=B2ANP)|6HX?K?Qam!kjO64l@7sVAT2$mFgxLlXbP?NuVakFrJ9jsMsb(dKi} zmOLYP0ERQy%A?2n)2IH(vkCBsb>hK0mU?-5ETgI*kp^yoUa_X z1#XmWz$^4Cp5&SCvQ*QjM=yRbefj0T4Nl0XyF*9W64GZ($MiezP7(!+po=gspL0#< zrGHh>gq$NOGumw)9e>H51@(l>wq8w3)uMTXL5lyg@fTa&H0>)BzvU8uEad#z1@A16k~%ZViewP7u#^+svJ*j$X=g%SS|p-DSNI0t@!>wj?d z-cfdD$6;r6&bb>MfNr3X<6r_NayUbB7({Z!rB_;$qUvSS_cb2gd*64zd+XM% zTeqriRb7<`VUU(`yn)5vw&!pA;TYRSAq;}OTyzRrqgalY__DlXXE`m1w!zPQ+rI>k z{|kjltE6}@@aw(Ytzu{jIEvEl5accs{T5T!x6YQE3sr%0hjcxD>+1Zmh{6}ZdSvhp zv*V}5U#4ZBe5tHidtuSAR)qMx!hiOgvi8@{Bwe_qj5le9|9$P?>hHf7G*IAC+))+y zEtC!LVqIKJnzjwT@8CI+%ztm+mpFLdwzFKeSp4VQ$$r=c>X9D3X1zrl3g2#bgIQqq zn)lq%$z31>Y7GOqGXNg!x35|pQ_Z!g|BhVvv9 zs_nUDdyc7aI?vv&6ZoWwVlLCEq%G;G=)X7+IzNR;I2}WgCN`4LcIbtYD#>}B|HT`G z%s+4nJ>-0(Orb16i;T9uU{$2h*jQuUKX*I6^X-@8wHF46N?c}9MXd7z5e!w;T`-J~vh{=(&$VtfAdefPxqmtI2=xD>;eNR?uqN(4=h6IF<-YSs{@ zqKlAPQ6CQ-emv6yE#m&L#R9)YN*WC<;-~_5h)X!vzwrI1POaxeWSv42Sm<7k?dn{4@4#3w%e&iJXH_(&XV?~PZUemRD3Vr^`0=QO2L>2zIH z&rZPF2q#A_#J!K5iT$VgQ64z4YYg`Uc7KcGVz**n|GxC`ti(DyhUIYsVQ$T|EV#^= znBGGXs)@>n5}^mi8E0o43Nx4Hx1aiYeEV;nLvSRm5QZY%P0Ta_5$F8#M57Ws=^sUT zYH#U^S^?yQ*)6c|(>bYPz|Vwm6r1!G~B#uPj~(aqg~b(92gZ@x!l z-r&rIJUFKt|1Eid%7nR!+Yo|F`BdQZw^s6H`sUp7`6>yIa&{9rtI_d$GZbwbA>{o>^m=ADwdH0j5u(7LvSF^x!C zGEk*#b`-y>7E<|`E8*tVD=59BDdL#rZ+zpaVkZHCIE{$hSDw=1Kesp+*WS8@LdZP1 zGs-Rqitu65Qz3koG;o3_K4tBDn@CSp8-1thNlAsv$*G3ee;Vbpm*9)Nq^>$r6UXoE zBURFUN<^6;A($?miv|jvPX%tb)ET zBIq_CcOul{x2NzfF8|Sw{dm0V9S_FSPd^vaQBnvAEa?Jd%~5M!HO?hX79mzUQKU1q&Cb*}&r;9*vb_5!h*}dc7421T{ zTkQ70mfJ{IRv5qG_wb~>=_L9K<-~ zb9b)MQTZejbMkOO-*NqOJ|wsgEs zTs4#4N{GrpO%)m|VJFSmeFcqLPV0h}XmKGO?!c7zr5Y;A_R|H0zACDD*|AgDPiJY` zFIuH6Fo}Qf7yT??1C;;YYkSV}Y+}w2yPYrU;kpZ+kL4{YP$0}5B#Ah$E2!|rKJ#`y zW}p4evsNuGrLDxV$lxVShz|v|90xAvHR+va4FD*VAL+m7hkdDGE45bNQG|>XvZRL=iid^)P0GT@>zBL=03BcKeY0 zfp)}&{j*W$RGtXN9H4;|7)6x4qk@zp(i1CM_S3QT;p9EoN~NV8#M;l~law)<*#3IW zcKMetn?xRDToL?uP=8tRra+Lwy9+h_*={61osrg~lCAKW3Yh(|VJh~zg1l|pm}Lvv zqZS+F&c6xQ7$C!<}=p?=?WpE8iPY;OAq{L zBW?|FLJ*1a$A_yCFoxs&tGDCLvlAo$pT(7D3;_@$1l#7Oh^fHpS;vlM8wt?&kh-UD zPZL%%(rHw2-~j&6Mh@=T*BnRh-5b3}c1J^R4c1fAg+O9^_I1ZYXU?RH%uDCa#*M+D z7$6#SvxGw`pqI+JCVGd<3(3FNIJw91Y~PRaag-B>5LzEP`gpwO%wzGr?>)+vEKgvAN@f*{ry`}SGgZoOVS{bQpmI|TkuzpLw(|7 z55;f)&dUnu_(>kvMd3Q#AF% z!%GOJAmvb7S3NWjjzO}gT@Bpi=-i%G`hFnx9_qvbk9!POa`9){o)V%EYmMH=D!(EB zWZY7q#>lgXFuy<=pGt(OS^^m+*ikSefi$MuU*j%)%yAvX&0$~P?id^$Vj|;;O5KQe z;I0&bv@UiX4x+uWRG(^${<~W^7y*|al_-QF4K0#{JIz;aT!}p$-SPOnkHsRs)Uzw& zOkq;QARw)p>PACS@^yq^l#d#CLo4oL?t-im$z7UmNX!A0yJb#gT1DWlT*oK?$?wC; zxVjZp)GqcJrM zUE$)QF;HWt>#4Bo)4z)-)Xw$>c1DVw&g(aCPjEQ?V%pu@1S$(-_bHIu3THKvh3HVq+;H~Fq zYJ*Ldr#RT+(gI9jlQo@)?VIFKorWG4cyfE{qnB?%Xs|Gzy94ALD^Jsf8Ueu#b2M`oPpuwHS#XyEIln!7LsaVRA~VR zS1OLBy03EG{Hcff`%t1?d4t>89h>E} z!qReRmt8jvNhKJy>Z^zYz#VRqAe~0s9!+PpwTg1hSoR-01g)TH;1;E8UTa4K28pAo zSPz_^Bp|7g2w_m7#mogVcLP9Fv0xXAmW#ZMS7xQfMP}rt*6a%UUS+013Q1B*Zx>HJR zQ&ZA{W1*E@>UT@e$!C5tt%fbljWAoS6)-Pyfi>a_sD)6NR0I@M+FDy+pbJ=chDGkASHFxs60= zO57`E_$r>4j$mNCsfJe;_Dj;JTbGPf0Y`B$$J_DC9RP5tf2cxQHkj>hE6Ce(DEkXA z3YbM_7$ZPPkuG5_XilgVh=foA$=!>$`&rCiLCCoQSRM!>U21Wpx7nX;i)XV>B7m2I zzVz)jzjJ&?0Q8w17f_>n2QMlz^g;l(3t)k#UL$*@8QpyfcRuj$ZV|L# zJrxWYgL16|8nxt3#?WmEtP-YCD&^(lED_>)g+CEdxd>|CT` z#uUMxv8U3U>$wX6JmP$3&gCbtJAYITRoVntI^+j^14iJL*SV0(>yv-_+xx;oTSXMR z^%iSUDkmV*@yNIy>N8f!ejYk-oG9JZm|YyfodDr!t_KSo$ErR%5pR5dAYT5?WW0=@ z`Q>Z)!sGff!RcTt@GeAEVBd_kdX5#wg4#wDZ(|Go#O0puMW7u?#5?wouXA@pH1#xc zBxfU*R@0`H#B(p7jUj?Dj~zdeLd7gzC$$9asgO>OkHscJ=w``Uyyr&_#EFNotoCt2 z7S`(vuil7nef4E>5<+gwmkNZzHQbC81oj@-AA314VEXFi_|4z_-{O1E4kDbA3%8*K z|KV8L<);H7V-N1&`u)d`p;bt+W=TR2mp}l=Qaq6B!xIR>} z)8I*J#30}=?yPhM!;?zaT+xo<$CDemT2zBg1UQ?e6J|4QL_@!WpLYjdJh)j1`Z7%+S&9+xQj@mx<=)Ci2I8!iz~poj1Xt!^^Afa?lk7MohBZ^ zT^U(|nF`E7>hwz<0o}_!e<|%5Cs0#=~{kog4GNnmWmG zk>9MMsO>(sn?oVG5HxY^*=mWaShlB$wyr8~h`EvVn7dXQx1S@2>x*~d-ed2NL%Sd3 z^ej?Fl^rBE>AN!Cz%*kjK8+mr>uzoXyHG20a42ANnzs;xQQwLjSH+2197=Ix@Mf%l z^HJR4#)odByt4kl@7D=h{)a#IFc zU3s`WQk%Gi=}&K8bnnu2X`O3Z#se_-DpBrr*?RNFRr2VLlY-8eMRuwgqfwO7r7Z;c zj@vP@aV>74@M;K{8Al1Kz>0rYT~stjZsJ1r$$nIjf~yS$o;bBHg*H3L1qrBATIxBxs!p@cnbJlp%G_^j5-By zDKQzcH+W|ILq?sJPwu6KhI_ftaov|hFR}Wpb*F%m3usY63i%e4qu7LBY1j5Gk-of2 zN8Iqb!b-7F`VcRg@w%<4E!mdKH&D)@PZnUx*azK-3=B=U_%0sup6$|}KfGojV^OHUPd>9#=Qdu%u}sRkv;;aP z`t++csvuCIxAHR;F+k)h_~dO@eS@7qMn}hryRZf`aDTG{top zp8XU{;@H$TT8?D~lAN?{9bky@Qdpym_~yI#Em{Wx=6nh-i9_ov?kR*wCyt1;C`Jfn-_ISXuJ}E88Lm8BUFI%2_ z6v(XF2Lm99V#|NmqU{PsP1_Im*MP1I#L|V=l;CE++0eAl79{_Z=b|NfS#>^tQ^O*ll6kD!A`H6V_=^JtGxvO#M{2;-kG>fpMyIEV40m4}C z@~Bbx-o|9Q&#WTsP2$XcV|p+)YstjXTtjX_l!UgfJYsbkp|T1qt4v|pBZ5?%o}be)v$FK}a9FeI@?v^IwlI|K;ll8@RkscLN5O_x$jO<6ZCnAp%xA zW1OJE3Ixus>fKQ~i*R~-87n=C$L5jfY _4*bwjT!3YcAI+Uj2u%n(2=C)40M4I% z1UvU0$2F$eb&|D!V9y0KAKiPj@|VHyTyL0!3@5bCZpsR6E-YcCBw)0GOe*)km*ZsL z(;4@@Z+9HHhhW)G@){yvD`2IvDp@g>izrYYy#)M*JiGH{je z;6y>$!ShCEdX%F#HbBH2)if`)MJ zTwNxaf00^@dgsWJgOz=fyp^u0y7IJuTYyadsHZ7e4rE4v$!kG#N^BNIJ%RG8rpsZ7dXDbK zt+xfm1tT7U_xTsjXK;A?t~Qhg1W!Off%DC4vCpl{ag^qdKz)oAq2ufeXLrH%jWN;vfIaKZ#Qh?Zq-qQ1!}49KL&R-1YwbasQLF&HKnB`=bAW zo@hNl8m|-W(SEQsItd={M!9J}-phd+Z8=7IAi!Xt2jePM zMk+2`mS%AWL`iHUS%3!0296f4?*w3Q>{zYMHpE+h^CAZz3`6(u0_ajYNqU8b*5NBF z;JhAQQOmohZy!n!2LV*zs<%EzK3@zD#-rd~*4Rai!8V;$p`1()qm;7)A#c;nBg1Gr zz<3d;KFI+UGtAQ!4o953gYg2FBDi!aITn@f8|qq_TLe*0Pq2?hOaWjwz>5}Fr(zfv zLFFz}h$+>=pqWGf?gCU;|6F@u{`h&zrE{T|5zA~%RmDFUvu}yc#h-5TTWv*Gg5uNr zG6=V46lCqj&MSo!2Em1<2x8Mps^t&iG8qLw@>R5-IYy~p__$KQz$4)&$b*^A@;VIv zon7=7;-=Ah{O+1qxOA(~8f5@X2LV7HmpP3~k-G|N#mg|hYG7Mp!uoQDNWnIhF!~_I zbx$Zx1}nzT#l|tyS8;_8u|2^ri!$Diw2hvQ{I|8b3f{LY{G+e+-SWqmG%=bR5LU9NrT(m)(jBxdo(xtt7iz(K z3qEMO{^d@QbmmxbO*nR5&P%zMF$lgyz?x!irDqM|-{ z6?LT|!m}v|!lzacz*KhY*?xDfIcDNh#0rJe!+9fJX`0XtGUIE+hw?uHy}-G`DBww} zbT-@37?W*dA#=?TbhpyQ(-v*X7aP+pgXd<}NS6OVln{(NurLr8Zk|W@B4Y`H^^+g@ zaD4icpNO)}<@oLY{r6(<@-T_z3HaNp!G&ZkUcY#q1o|WKY}+IjW8lJFt*yHu`j50n z&p})^h#{!LzqhVw373HNm>61!8HB}Bj^}LHheB4r5=W1>#6^#{T( zy=OY(T|aUXAt2)N#XIrFv%|4GjWE2>2VE@Up7CZJe#blGXMXzU<6!SDlezW8Z(Io3SUKX;Wp@B?zJ zZgDbP6|zZVM>)qx@1_R`xtx%u3)WKU3R#lK8%xZPg1IzZi#2!w>*@653@$!^K=z+D zG7@QR)sni1-?gWT4PV18ic{itwd4APAa3BTLhL#_1WoLAOd+7R5*MOiXQ1{xhY`5` z$xEtfyRM$2o-tx*WN`gk$5mwF@(8;}2&!Zj+J9m%bxy~b69?l<|MB~bCwO&IAjGNc ziRXFTa||Rctz4k3Mn|6D(p@p3VU4+_>x{}r1@1`loq7UEJ8%gbpC0odGJMC|xbu5#7W$bp8no<QW8}LQ$BzwHZCTz&m`?abL&EUm{1VefH67D{um&o4LuXTQkk=q?d58Ojq&!%|&_1B`B-K;)x**3E4Ce6rS zH7u2r%5?e8Ol)ZFVP|e-k@;^BJZ`deC}Jb*%B&zOZIKAUAn6snC{)sGkfF*kl*vN1ma}|bp&bP%*hv19b0^c5_GR$A6e@C?m*1@JhA6&p`q9{uP zg)mbrYS!a~$pB*O2RA85COkA`A9>rG!X-mIWb@gk@N%2VX_I==8bfKXs{}a|X*HvV zI|`hzIrhHA;m&t8UBBBeY15Y{{D`kLEdJGLu}P64@u1(nI|B1CD}{V58yi>~%rc?d zK&FMYF5vQ1lm-?HQ*O<&IJ;xuSV=#kQ;5@AOS@VS6v6}tbIoLq?|&D zV_H}ksfX)Hvwn(Ko)=rflkOJV{qo*U{n=;gPYWl{sc1Q0GA}dFJDxl3%7op{^-}>W z=?S`WVW73dRnnQGq_6@V7FlM1&AtFDRL^tt8+x;zqK^nqzy{@I|MEY~$vH0(1dI90 zvy@i|cdiyNF&=!UDIcbv_KQVKy0b3`yWZGoL;5Oq%A8e28R4aYq^G1Ao=YgiU4CZ{ zfIsJcTA;aR30-rn-?^i;U!j%evt467JWRsiB-ZhJtKEB0x?Fgc_#lL)sPumfn)W-FKKRP3wBfbg+puj;<*c-g_zVlm_ z1HABG7h1t5`jvM8j%g*2Adpmd#SKLdX zhgy9V>xu+Yj8{Q{+yHrrrG!Pi7Q8Iyjm9O$(!MyibG}&_K%hB86w z&2fb9&Q5&yAABGlI(8sleEMtgFaOoQjF(@%h}#;wN~B%X(ys+|f$TRMEVNq{%_Q+Z z4t_i0=CxH)RBpsIlJO5*CK&K4)e*f8c>3dhBV!k!1OFPo{n*rje+KG7dk$1-D ze(R6p?%fAtl{qvpbc4)o2p|X)zxQwcC@#IpZU!krM+mYt24Rzf2UMocojn)V&W_@W zgp0x$sXK?4V(vDlLJgA({ubZISMYJ>_fOa)x~!db}|l4a18JkKK8gc z6J#wtI>%6u7}Jj3TyG-hdj+l>9JxDuZ8>hdwiq{XGZ}buA^Sfwx`+VmF|{>VHt`+5 zj7!$!W(<)6wGKC{9%3F2obHRe&g_Z1&va+N=YhVC7`n|t1X{|SSkUUy9Dd)#FnHjD zJZu3RYNa>u*Lkyn@t^@kg6R*Q3x2T44#VmwmS_4_?7T78=nI}=(L|;pW5_=K@qZW} z`S6Djn%Ny08%5wE1_4X`Jc5@lPc6+IIiEFVxf8f|@9x;Ye{Zaj(zF)-QjaTF2Lb1o z-+Vo8oac}So>y4w?(fW_ZfkL$scwdr!PVh22=rK{w|KT1t8|M;M|U-_(*!@okhO-; zhx08t`sdhbK^>yKmWFovtoulLy!`U3WGtEjK43$rZS1IpHgE%j?t2gJO@3o4%(06T zaqijk?9dl!P!%HAaU*K#!7YY4EsX6=J@L>34`aw{BnweLJ5FDWKmD`Mr%M^rk!6E< z&ssKz>&%|+R@QiY+=&lZ=7?P!iqmMgGEV^Oow^ts!b@PjnIm>P_`XiO&HyeuQ=H9U zYFgG2l)Ob0v_t(zqrdwEb99T{jRAxZgZg2R`@J#FXgPXDZ;k-*qTjca?GK)>O22wlMCSaqG>QbbYEOFu$^a zexbjvtS?3Qxf3~gQQ#5CNACWQVPQp4VN;tX?o z7bpAOW~Xk9Y+me~u-;XnG`q{ad(WP@>w)`myJIfnx~1}7ePO(-I)NlTOT0OiK! zT%%VA4EGF+;%IF61DTanLKhciQEGKhgL{LYdUn}*2=J~(ew$$ql(n?9ewJBDE#OOL zDq~O1?;^I%_?ZwfhMq@!Zm)Q`@K~+Is6U16?RN5>%pHKJaQnW( zRPj@;@1l|xW9tVY>D!#3*^Y~q0_XON2VLM?qILw84B7)G|MErOte+Aol(}GV1!NWr zAD7=2Zc|XBu)7AnGA}l*`Meq)RpWJgKyz+5oQAN2(Y>274Rhv`BjQ0 zF1`~E7i+Hhm!-0yLbyvMisxN)9cYEs6wJ6rC0rCAF7^t|^aYAY7b9y*IEq^Bxz8%( znGP#mCx8+5CxF}a*m@x_IgIwzE&r^S?=Fs(uxZC%-0tAYVf10sg4$xbl5k(EChK~Z zQy89S3suH!K4Sx^QqW_E#4SPtHk*kvu)R9ALYq(wr~=Ml{#D@Cb;xnCtPCPgOY@dL6UWqzGVPdpIRzSE(fXYis9Q^p zLvBD@;+Mwtg)NlLf)0cy>5y`%BsfpF=3KJvte5(Q)lQh}UZKpkhvg1YbI9OJwA zOTNH!Da=u)_>!^a_C8g4-F*K(4|=jr7YnV*G!BXR3_<|op#X1vb-IH=Q;Uo91UEWn z`uJnvhYrM#d8-pH;BR7!v<_tkAaM=KQlt`UB%d-^uof$e6-If z4t}T*tJdeNqb<3K_|jEJLlQ{Hwlk<5Ki>^I1=Gy)AfPXKWBR}YfPdEHGdchHm2L4U zeeay`#pCpy>j|51Ga&K^gY^LfOH&bNAYG>$9Bn%%ZNs*l2?}lndaTEJRKTrE zVD^D>UP5`%Ub3r{;ra_MCynZb%zVb?Z^}l^NzWekmxQ>Vh z;+trsma_uf`BjN(>S)DXqbH6ZKOXnqdv6Nr&8=pBiD=BSE*YK)Qppj0?>Q8CcfVN8xFtAFM%vAip={%$mZE3qnPEUwNFq_YlYH zj>QN{hAAaAJT$c7w~s5#^|7n5S+g1seBfk!^d~+Lryn{LuRQ;1{PmwcO^$G^<@g92 zm}|Ob=|Egm)@;VFeEO&3NK-G$N*Q_E$xqMj%-NT}7ysjvJ{;#Nl<-vK@PS|5 zM2Mi=5`IsG`?(|m=1#@-I11uSCZFq zXn>s*f`03q&(a4pso-zFOeI@cj{u2fU0N?C_)}V394|6uE#T^kX;u=0NTk&gcB^Nx zGEc{?p#jF0fK@OqA9DR#ASS?LjPF2eYv6E!fL?|D1BdpJtq2P=`xVGuX$+Z}z8+W3 z52Vu6&e6|}oS0VDw8H6f#zZg=Y8eOC%E23C+8LR|#cW@6>^%|tu`2J~dlHwf1=4O3 zXgcMIZtEzOE2PI=i2DxRg-a3HlCUP5Md;GytE{^Seax%wLrv-aQq8Vc*Y0lC6a-lC zJ9Ks^hA!Wx416EjQ&Fhpd3XBDu-5z9p#Pn1`{LAH_r%dd2jfrv_&>xqzy7!BUL+mK z^Ve~6Gu5hV{RF=4NA~xT@S&QWCUzoFRCaZ7Ko)FkY(QQC&M*WzbJsDs&qHr|Nh}c$ zGs0OE6CANSaeFq-zJ7uBj>ZA8L$(r`*CAiBN!hSER|Q~ zIb}6>1B>EM_$mtG>=-*+%`NSprRUp=0RBq zS0N0zQ*sj7wk<&fE*$>lY;XXb^@T8^AmDWwj||-fK<2GGfQ0AE1*Ty1)Q^yp6OKBq z-e;{tE0RR$#boGAMn5f^zPe1{go7dqHwpw2pU?ZwgBkdAVJa*{Md!8Txk#E4P9H#D zE`%x^DX>$YUG$ZQX~DRyPdH4asdYOABCZqyqy6+D7or0G!U6}u<#Rr1Kdn)wLODLE zG}|ex#^TP!PTYH)a^jJCZQ1(MItSilR%VLGYgXxUy4_B;1RN>Y(C?y7T21)2>&|Bi zI5W0_XB8+4D7P2g?J|OyyE@*5kYW9aC;KUmw2GL?1mF@*VfBXNk#xiT6h`TB`uhTF zkvDmo3CQ*ugf1hNS$e*8C4!6$w1QxlJUj)QNJA-@y0BB90Y3`NS}JuLNdcdGnc9u9 z)$&^iCenp;#yBzoL{UL!1r4eo6xL#CfafXer~kH4&eXzp1SOxdU*diT*Tx6fW`$du za5C@tvYnkW**@?a;~>q8Lnmf2uF_MXkO{AXF5Tl*jHl1gDAoqtPxwn%yz5wjb0A9L zoe#!ard+_Eu=2b3ENDr1l3t1ifEk7f4smTxSc|z>wCi|VcaEvJhiTe(U)E#2&QIIp zHs6c+K4_CW z*-uea2-sX<2mNJ?JT91r8LL9z+?hA>7i~(8W6t?)JlS{g#ZR$U;FTZ3mox(J z6Sq5KWhuwnx#J@WU_MmP6Z8Vjr;k4UN&%aC91GnCjmJs)%%P&+T$k?OJ`j<13xf*$ zmFM!6aM339GSGP`+vd8@nv##E1U(lH*}S|ro1jf;s)(;Ca7s_|p78pZb>!URm%Cbt zd#)Gp0}8grgD{7I)xP<1%zc41`c=@E{>(*<+y4s`_fqJ0z^F$WE>sl3Eoj*B;78ue zqtC(ZB9_> zh+tob#j%1cEe30PSk|3eqXcAfRxYkfx<+Mom|cv0<4F92fAU!T>c9F#T4(?3pObF$ zi_c>fht9B8s~G7HHIIe14tE!6-{9jDr&{BYL;GW9k|4m<(dZ-S_2$)A;@5xUGq{xC z`%enPJO&$_y7o(t<@n`o5xr=?^-EqLI8?ksxjF7k)*L0{+wOb!1M#V!`vkt%ShM%T ze-S#gg7+QV6ZbsSMV2Ab5|)kTuEydFI9tRynQ=)s;5Dqg2hIiO$6=U&Vo+yB zAp>hGD(m9du@iCMnFmP~I}|rBjHPf^j}=zepcY(QTwC=EZ>+@yj9^WWfZRZK72GR4 zsBN8js`YmnVOTd_W|p3#X~=M_$0oAvnk*zcIGPX}U8pbYVQ! z#yR9}*P-~QKl`ik=zR}G9dprm*Q>Wj*m=OWo2bzmb|&f(*5Chu4@Fxe)?}2Uh7@>f z2mww;o8L-Gd6vj*y zw|_3UZ^I!6;h&3aE;J}Cy4ARlc&%`Yp)otp?_5MFFAH}B1SDFkin=aT*5e)fT~MLJLgAmFB3PBMcqEX<`WZ~u0bh6KcRd8=R~ z%yuweJisF9xa7+Zh&dNaZlxuL3Ni?eXY9eL#DlC-AkL~}x z=IFNgSCCS`aQs+wQVXTkw7k;UY?Z#*XX!x8VmVrV5+<6lIGrR4U#q$c zmV%C!R<+ZF1*ABB3iFPw=MKp98?ahQpVEPG1)d@}3sV3(n1{aM`g0aSHG`F|Mi~%H z9nwX@?qKjz!1i|$?1;jtHOL7B6S8j(5?x4G{Zc%uKncSTC*U`QU+-x)<|V!q1f>IC z8y~-XB%<&I5D$)N_D}lZd5@R(dcOEwiG?Izl#{lkRfUV4_M}Ofm~bjEi?2Lw*yebv zMELSK2R45SAAJ^n+fG{Gd4In>N4LLQ!q?8dgqJF_ddgV8VxsqxHh|q0r4R8TT!ms) z)TA&`nREi)95<8STBnOb(Pk1jt71Wf!d z;%&W_@zduEOO3b`FO&tj!n}hwiNwo^XNZE}g}Wb1|C8h)|9{&1F`8w_23Un+bUO@Oz$!uGXHohPl-Y_HFp)l$JN*nLrQ@ zX<(|F36^T(lq3j#0?B56f;Vukk*NLQ|L#zH{L_!elRtVNIVqRo^Pl@leBqD38;dwL zckkPSe@J;Egzc@#XmU zfAoh)BKmCNv<{QrLYwg=Hnq5Tx>f_yvJ0^J2IozeqH}eYa&j`hl-*0t#UJ_6_r$>M zb8&O>QtUm#iAwnX4Bi|iH!CnyA${NzrTKD+JfM>tm%D+MB*RKNVEoq60yc1RHrQ;6 z108Z1Vc;{ zEx9^hMccD+>M$p^{m_rb$jlwg%D3Xi)hn3r@wu5q$~5Z$f4lJ0P%BtM!_#lo1pJjq zcw1Y*aSL z?}JC;6zObEK6E_xo!refIfol?OxVDc%kjo*=i&!XJ%hG57AH?0jro=9aRD=NSI2|V zT*(P_C2hPpP=M4vI@p`?rLwFh=aTbzl~jA;Ho9({McT&d1{9(c>Kc z(8~#9-2^TWSv+);WbzlWyj9^_L;hG786rj}V}wNaWwHP8zGxy*zlSBTt3r^Y;1^VSrjD~s0NM#|KR)HPjJgX+~8n?N+QL}@S8JYe4f(+C-GaG z99bjf&{_;#n~zJ+Q0^u2m!dh$5pYwwf*%%sVJrB*p*Lng;kw^2Rd zz%2(;@}v)U`WVT0T&UqGM7q~C5M4c68iQBw#A~mgB~ao-T)cbFJLuCBLGJlXRLpX)8;zxDLZq|J)G>; z7w4F}Eo>8cD96+-{GPRBzzEfvqA{cnngbcgQg>py2hArfu3m%Jgn#SGF&Pfc@l{w20iZMfL(r2I(@0DxR9cC?9%d<3bPoSp@vKr-qUFy*$13w^>}^us!^ zCUajA!L|xG^*hn|y@a|v95(%P>{y6CNnJV%n+=&D3!*g)_1J8V>11}G` z(1|~{Tx7;+&T&ztPMNrUH&Jnc3&Q)EMVf8XX_h{0N?9Cw+tr`I-#SnG zQrcFM&Ga>tb z_Wf{J+||iGDT}5@AF4$()SEYpyjPex+^>_^8(|{zY~WY0mC#1IfMxb;R|&=6!ZA;{ zO>k|ru~MR1h}{#HE@t#T_wwwIefAIv^SCL=ZnF5ge<=P^ssuO1R)e_ZvQo$PD>c2W zlipHJ-1;i!!*(HQEJ>dNng|#?b6PU|U1RFm+ z$s_SkXDVo0M{@S(fwGIu!zuE z>3f1asMjyvj#7lfX^suPGjl5jF(be9^0x_kz&wl@@b#-hQG&2NzX&g84ro%;{0&J! z6V7*WS4v+OqFr>u6O-35e#{EmE9Qo@uAp*ja43G`Grt=h$E)JWpEyl4>PC#<*xgv& z5;sV-GZ+qp(mbw%_Ys^Wwr+MOmJDf80TI#Hgc{V`1nXR9LAWAelY=J;a| zNAH35c=`Du{E}wlHbP>>p<>OM;ppWh%)ZR>^jn!2nr80dzeO6ApZtZNiFe+2D*o>; zejdRc{IkWpi16NugY{Z70w-H9{oTFzj4Z_zc~&yrjy^xAV>1(2hfwGiak9o~E2=u% z@Rebln|7fTwh-Cd7p*8p?bNk}FNw6f%E1rwOOwn8qIfaImaQ(txwmeSR_d)NC6amM zHYt`CG#tRw5y~Y9?5S&vg9!U~h6dv+|M^eAAqNrc-XB-k<{B9#_ypQ_W=xI^W{&1{ z9O64V+T(P^i5Op-ppQhDj>BJ}gExP0KHiDx`B#4FCu1LnL|hvDUVP)Zuf)x1(@s^# ziNpI-VDH_3kRYyoap{eL`0{`KehgpbKnoP!TFNwIwUd`c z2vEs#@ghR-V-4iV?56+V+WA^RfJ)(ND_qy@_za#u_- zU%=@cd0VSh2uP3Q>2>q)G*770FZ=*Py#WagY{isp;qQ!}&LWDXJaG8VSiD5=(c_#p zR)vHA=l=cw8K>TTHs1GvC*!VTcf~LMvrosr{a^lVRMRJ|WSb~dLj!Nc*GDJf@R8H; z%-?;Fpp)CNtGgpw`^c6--q8|{7FRx7r2n3tmM8TQJTg6q^;wG>DWKL+x}77|S;JPl z>xx+c^o=upA?p#OmXn%KI;h1*bqh9E2o(^xLUOJk+_JoDUm^HGVk{mC1n%W1#1wSL z!ozn>c9|LnmI4oEu`*hdg^ahv>Ege!{X4f5vK8+6k9&DMF>Twr!no^!YgwA1g_UQ$ z=QRZ(A9P_8CVdjpjJ*Ro9M1NAMoXZJ2Wtu+7VvrJ7Q40#RyvD$G5b^~5ei8P+HlD2 z=d+4j9A%2dVnkikp^%h926(X_g}@=RqAyI4LV2M*9!p`^GF<2Xtk3i*F2X5n1Df5j2%RixI&o9g&4e-L;NaYIuJUx3M{@8T`brNDN9&g)}RR=_Ka+5 z&F3An;o&>ro&9T=*apHrIRyZulwnr6lA$6wdJv?N&bZebrTwu7WkL6wzUnj0;by+!o-)PcgJ2j zD&~*#QJUp8MHJYbg9-`?&s!{9ewT3rZ2oCIhj{tTL)4=HoHKySX;}nEZiS`)6?o9+ zCr|p~2}Xk7SY))c*q{LO+Tlrluf<6>pQTpkreoCtT> z`=G~<{bh8Bh5|aD!9pW_SWgO@)UQKx3A{n2Kwij~{0+BdS3t`krKNXi8rTwdT$5I9 zf&!u-hfzVY@WJu3;EfjXT7g&ThdDrQaL}`lZSj$vup&9CHZQd16zW0aYU5`GB zL-(8U7hn3bSi)*wMbx6k73oUe;9PU;TvMiTdY>iPehs{09^VQb&Q-KLHvMzg&PU(L z#(40(N8<>7Fr$z4#Mv7cX`F zA*~0#H?{a6DeT|Ve=2$iI=FK4O1$#YcjG!ygR9`QujdpNNqinkv2Z`oMzF#y=uO9K z-dNAqu=w{J1ouQLpGSyX9EYY+d`MQx?gV4R2~SSn4Xm$8HUvPZJS7U@Ln{czuI|ZW z!1V(8Og)*&UbJhHkRfq7){3b`eRVzQo8 zb3{#hN>$l4`1@WI)<&{SEZ|QuG80P#PfJTj9y%F6^-G_O$?+@koB!*tbHWrkQQ65T z??KyyN39^R*Y?-NLv6=UD8^%$9fdXMp@E}+o1yt8QuatUMqSpCPN#WJv9RBnNZMFHv1-`la0u#0`ijMp6@=vW zxAA%Et|JJ@ByY#pqnyJ>S{q5FM83|>(?{dtm!HZx zXuf&zEfia_E3ma}1pK*IE)jXH9|(Aq0kB5RFKomQe()^E)wbYM#`uyVrfiC_M2Nlm z0txX^7&SB;?CT^jYbIX3@`JcM{#M*V0rS9xR_4U@fg5=`np^O9hOTf7Foz19YKX4R z&X^u?hi3(UqbU>tqPH=;)j~6aLs#*0xk4l>(Wf|{(}m=>uU~#M?&>&z13M7rT!p^s zNdMOfT_R+Uv7_*f=U!o~7BVeX6G~4*M-9GN1js<2U;EB~BroO|IWS3qv^*Z9yhPAO z?%?p2zTJIsFTP3(DB~lX6sMBDw0D_&n$t1GvC>QW8m-~u!!Ax4zD)+%^d0YtBPYA@ z;kz0={V06Ui^|Bv5WLDD3-(Op9UJg0IZ5&)P;1b^9p;#P$iqQu@lWgN+ZC6tyhgnj zrbu;K_yW-82KQC2Wkg#V0yirNJh&BK`Ktv|2kCcN8ZZ9sQ}N*sewdx2;n;JaB7XYU zKZIdm4rO|d9Ha!(uygg?3opcXzY1+&)OELPewgE&InA)48Kq*QjzbMN%%jA^1=tmU z&sHMvH}9!qhm946xwka4Og>ip@rbJSzH`kWvt?uwWg`58x<0FKgjOB?aZ!t^9T#?l zP9{bQ?+l!axC00&(JTHHAIVs(SZ5anJr`%0yVh?zDkE{>a~odaHB(NA80y$r6fEaL zrUlB%i|YkUE)2d>5alx`7)(036TcN^wBm6kpVMhu3t7V9l7NulqOmiO)}KBtNsqS0 zgY?Qo<_d5uK}onNECR1D@gYoFBzF{-w35RiecpQt7sX;-EXFDj!jO6NwiOUzwSDoS z@{vHp41GSMuds+ZiK&q?2w~LePZ#?Hg=bP&qCX07TCJ?oWi+ie)KhdP*?P9W13Q&w zJL1A*{Arn{auqrKbPD{5f9j)5Zh6^X>gMM*{)+D^6kfA`_FtlMAuW_K>R`3W1zut5*|;%OtmU1?9@?*fH?#T^1@@E6Ng;O02w}I zl#Y%4EMwu`#%^8{PtFbRCocTHT|!09Xggo_Gv^RArXWa>6eMiCP!_h!QC1ifcr8S& z4lFP$64DkpPWs_Tw#&GwTnfLUkFe(pIFh#bQ3#~lFeU-##~-F;=PaEthd& ztpu+M^SXgJ=PC`};l359L@9I$uVd{vNM|Wj^QoVdQK#w5ZbP)05#+Ql4Qau*Khn2# zXtCyD04vJoT;jSp+T--%)U5pQh=tMVr+U^!(WgrD=ko^E=pDCha z|=N5Z_LEHZWB=Z;6Az!voA9EP9qOd3A6 zX(`(SmpgMk`;W5be2^$?LkC%D)#q{ygrB$RMQDVc^X6V9eO5t9Jn?+yEEPn`iu=Mx z(oRw@mlF^6!8hft!v#x+_w*x^JXx=1IkD;*f_puJsCg7k_2ELf$S&npIl@r4$>Jl1 zuTR9-)d>Po7h?D}xs1n2;yyCPf=wjvz+9Yr;ZnTu@+)!hKpQ-#o)jSaw&cg9_FwE7m>_E?#S))6Si#vQMiJ*iV>?dXSbXOZ zewR5p%pDNd$co;GkA3Dn@sXc95_R3A0V1d3GhezMfAU+;#l`Oq;mU%nK>gC+9LLV; zvH>+Acu+4r_r(1G~HU;K1@=}Z4P{_;zI6|-1&=ZM;!$NFAN3LKs0Jz=euQ!CB% zP~U@K-m?cqfB=TJT?BEok%JprN9n3>ZpAf>qoeoij^l@pL>EfJ1~k#foNtGIp81Qf z#drSnD=0nJq7=8CwF%NCu^UmcF1?!RfSqWBQ-f!ubp_*)(}SNzECTpSc!;XQ@x&6nobXo_#*P`K@op ztLLvEkguovTWdo%J4gog5lI?NF*jU7kjX~eVHaSU;Dy|!M*&>ND*n+={Y-q|$&be5 z$~4yIadMTC{J(CEAOz$`l-y--!ImI#(WJs^5_V4Ib3LO!OBfNW>PRVwlCrTyAOeZ> zE3m}7%dmy3TmwM`=Dvl=L}MSPtd*_C^>f2X*E+&;hjAr}85!WM%a`KpxpV2lh7=IH zdwMx=gRx-xufzY$y0Fad%_ewjKU~Qvdq?8zg|qMpj{}B>!Y4Ly8Csj{ikiiiXm9R8 z1|#1&3aV~n@($Na4K>QT&LPLR8l`8QK$Yp4>2wP$N2c3ie!A|+2L&=uGQ*3ZhLl7l zJNl(e=y?qW?Rv`1c*{C#wD+e5s0)_Bdq^*Ffv2+l6}YttBIA;zz@QC}I(=g}?{A%S{3n&MhSp8diQ4U#SJ?&9X8Qp@`msodD_?eXm9Jxk7 z2r@~TEE7CWsv=*MTeva_^K-foD7>W~ro5F#WVrmtoBu^Jm3xiugQUU=2DDkmt!6C? zBGVKP3Rh{h+^*B-ToirfLdAgRfJh}?%m0Dc1%|T4qO0&l@_Ul&$@wE zg-XRh92O!r41;UdZ-ERX08aaBw6vD&JkWsKS|yY+Qqhu`iXZ;+8Z}zJ5G=PBU-2M3 zj6TJ!6x6fG)4o28g?oYHoHywo_!r09!h={=@JJzxdORJxHMZ=#G?Q$nzBA1-!|au%JPR;>+>&m_ZkM6&?LGeWnJ>p%w1l-IY;c zlT#CH&9@O9wTqVA9z^>^Sn-_t+eDG=+q*Xvg*Pu;AaI}=>&zkxWD5%y%n2G($XUj+ z;m(0M(!_aTu`Jv!6mDd)p5S8p%eWS4q4)5LJXrddjtYp~ zmHxzS3hGP@1sSlLc#~oKay)eRN#WZa3g%1tI#a(m+Y!pWmI@&F6Fx^d5zcqto^YXe3 zlF1zwQO!8fZw(fOHNKz`-IfaXD{)lNpE#B`I=bShXxsLA^XHvH`77q7*QG*+kv4!G z@Y0YXbD?K`7cZ$qFrLy#Di_|SO!6zPui@&T!>>xJ(-FO=aAiX& zSVkaH*hyC@_?7O1xQw%Nxh(_$U3m#79?sXrzyA5Jq~O|rvMElS*&WADw?%I^>1=cd zMVMJf!RW%Bq`an=`oIB_cg1>FEa1NLjcs}t+s0&kR$;e{x> zgHhxTbc8JMgB!P*pIAD_@mF7jKER)}w1~jlOD@IQ+SaISA~JJPHvmk;pkr!k6gZ$? zcGIe~ydxyejn5}-Z#a&QAP9Gkud(B@zz#?`GC-+5_y}X0gD9!E6Fm6~AB>Y9XkqtZ z3g7>$xF>;H_vu(O7C(!cyEp;`&xjExSVo2zG8s_CKdJa+pN;BOKq_y%N1A zs)_V*GTU)bt^`2!I}d8@zFccQO|mE_x;ho?*NBSEaMlC0s+5SMo?Ff zt*xrZ;@*>Mh$m#;;=r3ZB4bsaS~+%Ei}ETl1sbbtbB-wCwF-Q<&9})|m;pg0?8yE@ z@z6&<7_WZyJNSvWWF+`?VoOFwhIw{1-toxYL_8nGl|^N(G~UAP#hv|QM~-Ap$id-T zoCw7c*|Vg5qD<5NhWOYo{A4@;jlb~L+1Tt}k2jt_ABT@M#XYC*$6aIvHxrI>=5U@F z;zQnibv|BtY7%~fVUW30g1gR-|M-s*{oEZdo_#LPU%E<;Srjr{YuvSwtK9!X?_p={ zI6DGkapY8cl;YFwE+;Bm3d|iu18?GvCeQME3wci6DW03caMXolHacMI;zpj;>rBm`=qG!oh+S%MrBL3H7 zom0^kQ5fsrdW#cDoAVgu`nsN&!)7R%c;eTibd>o*<(mso=}c+NZ?UUGUe;QULw1*beuXh+*T~s3vM>zaBcXqLE#t~0 zT~a=i6>*ojE+1MWHpWQgdXA3v#JM&C9iRW|SzMP|VmGlpU3>Si!$XV(bZ_~d(>?Kz zNdvY>e9-M{w}@dHLOz8b(f83&(k;#_%V?y*z=oT&`CFy6Ds}=^kZF=%qik#e*9z&w zOjjoJ&m+BQqvY7JQ(K-EA&}{U!onoORX9{L0RCdJ6T`e@U^41rfh!i5br7A_Xi67H zq&grhKoqmAi=T_9F9$D$8l(4wvV(Eeo;0}h!v~CyL zv@-b!PbX|H+`L?z^wH&Vmd|HgbnPFD%ntlE?9PIp*6GV$7e}N1{i1FETu^s}So@TM zGJ{ko79y4Dz@n0Z5_Ke=?2y+$w%e?(O|~GxSf@g?Fez}@y!fLy?_x*8NtS-O^jm>a zPR`Ptb&E%j5VmhVT)}whgIsjl?SAni+&pIyU~tj(nlItjlBp1Hf5e&J3)}%8MCBrt zZWviDFi=tE1GE)jO3VLt8wy$arE68q{A+^COZbF!=exgYmR?Xl^|_$dAsnh0iQjrc z(@5YTAZ0$B> zdkP`#Y^@uGtWT&o9vK(44*G1J zDae9P(XBEdE_{1$$4w#KZn8xxp`iWtEp01|Ixa~gV08s+ox-TXRE`&qWcm^67f+io zjU~76c+Oq99pTEd!Xn-vl&qge--gH76RHHuR`mZBL<`GqF+M&67|by&?FiP!>?piy zY1Q>C`$!RagS4`fo=5i#t@oxVs3(tS9v%TK(w49}9UBn-JzYoGwL-YiYQ?sV=EerL zHc4m={!;+}hxSM1r=TxV)c{X50+jXHZ-qF|e=jDC7O`rS59(v#NLL=_q)g8CWR5;p z(6`p;;T~ib(42h96TKY&6m$VbGsrzAN4lRzyj`zglkH4Q7 zfDUpZx5t@1?;?-jEAg{G`)jy3Vy)+B<1-K46_3B?iI|$5j0=O8N%Jy_z+@gnGA@$B zW|rXCg$ZGzzbGZ85)*jRPx-z_??ZrUh{=s%60na)Q*{%0CJ_$H$!W>4vZiO5o5WJc z@unLD3MwG@SL{-`!^UY*Be-kg>T&3yQ_o6Rj`FFoc(DZ8Dn0y7Hav)y) z!4*zqBfSz<{1t*TrSAvteK-#F?T?|M0diMP#3+f(2XEcM%8TH_*fx;hzKyg(C7qL; zJVx4|U5t%8_)}$ZdjP92IT~G8J*Ie>^hPTv69zaow>GlYpq%mh(4j-Ir>~debtkZd z@681A9`LY<^0JC-GEVHk)*^HSAI%*n&Z#v>)&%$NDoV$LtQoW9vK$*8q0V`5%lPuF zu@bt57FJy7iuv4D(}bH0r)M$dm(Si}XPH<8Ow16#VT#87kSCen}c~j z`Bxg2h&h4g%TY?IPi)4?CrU7E;39P+qO0qXIJEa2v9Gs}d9)he`R3oodYfj+WSm^0TYECtiQ`Y}_VIQ7>tDTp#5P!yFSlI58F%uU?LM+(hcSO5u5QkyW&Gk@`{!n-2})jocE;jeA9|1+ zr+x9%_nuB8Uu|1$H12JPy@&QhH!fA~2G6Zd6KgYCs-x;Jp^!Qwd-D??3qocJp*IV=Rz07qFNQ4Gw0RN&>o1zRT2t zYjO6~^J%1N-p#HwFkO6MGG71cc+_mFEGu)tV=zD(Fswy=3YjBQvmxAcaXj(9qunuv za%v)g+#!~a%C4ZHH3aabaZ>bLpM{f^L^p?N9Di_MOfODibeM~Sdk)7vcb$yyJoQwp z5%ljtASEax_dNPYwCv{jQg>DetS2o{B`21ZaT;MA>#g{@H8jG(8qn_`@(<6sV^Ilh z+1DAA45S;@bk<1ZDLqD}#|eyQPS_jc{Z^F&;it%T3_EXDQz69EZU3{rzfM45E)6fc* zD=Lk+7#9z-$co;)Mk5HAWqE-m28L-ubstwiV*R2RxJF>NZI7%2Sni9X0yo^E9iFi$ z{nD~%-3poM6YV2haqLQAmW9QT@t_@r6Ac1dVpaB7KK(-(!W583?~D_Kg|8+P%7DUY z+=P1A9Wddsuyhh&r2B%$(R$KOJ-H|6v9=5j-ip0_eMzTgd?=EjqfDR>D4@+U@#;|o z=3xl7k9wc@wO4V^-z{JEuIoblQiy8Hp&|FJ_ z%3&%PjHv>ng3=oFvoltLAWVRelijhDPNk(nxZ~#bt7u1BEZ{EgV?mQ2fNw2JDyZoP z4t(}g*8r#`1##W0ObxV&kSy+Kghc@VU95=p?8@Y>745sD<<`7bZ;zkMe6!%P7U58z zY~j#gVE(=x0a(F}2dy=Q#cJRYCIwdKp4;OJfm(!pMxMfr{M5x~12{oy)*=6J9%uik zF;C@!aV9x}KgSnz=KLrtC08Pf*QRq$egte(O#h1cCk&2L&MgUh`z3F3F4+$1RMNP2 zKYW%G*EZ?!TM<6?oq&FAL66S>XSi`C*FLZ1@nNK?PzRGRpp5^*FCbo3jp`OIE z^+_Ypi~X0!%ajDs9a_P5Txpm#RcN^eeesVM&v7pe5B!!tsHE4BF-KlgT~#P#j^9q% zE$}1UrSyk~vu|R`GUCCYNwct2U=eg|oJV;)Iy|F{1c^n`0b1R1}|wWqE>pA^V5JxV?taTtVwz*>t5QC=er)7JF>QwSQZrA6&BuT0S;VYJdJ9dT|{>s zjLq2~0sRI_OO&+3&k(8@TX)aw-<|VwaW6va-s7inFR719uU(4Y`*(kg&`lr7ooSBX z3$I^`eFS^<_3w$+imvEs?~NC3|80~vqMRc%$xm1DVP1m`ZMZ{Ye;MNgNA|@Jf8^1) zGc_D9UcuFbu^yioPaITMkq;Btw7jmPIOHw~%4sG?K!EqvN?_$G`5gBjeE=Wj{7h>Kc#T{q4|LZS?lE#&`vWn_1R_)++dR zm!y&WM`cUn8KWPg*A_{%K88YtkNeFV2=LAc_%Q)KCFGK` zdVPfKOtZ`zSuCy;Gn^WR;9o|am7L1OiUKYfBiwoCrPp=XcY5Eq>2;=imY#h9W&s8}0RkXEh$KKtB1KZR zEV~>`Tu+ zz3+SH_dBr5f7^C=C4%nw$Vhza-~Pw=LFISj!*}23#5_)OgKp~a zn1C0W|4@zPxRSHm>j}wGK00vlKj~F+6i8u3fzpRSV<@0PiJ>_>*(ew-e!+YF5_& zuBxPeFm#y15ITD{k%x=th?C-0=CSnmBiuI2CzY9s>?o~VVtrJ!*-kvmeurk&K?n6< zTW9uts_lIKi!Y#jazrt_b??BQIQyd?#JMY%>HOViSsS z-{#I}9oQN5C|!#tvCrN^k(xpTWUZ)e``~j=Bp(~$XzW`TZV;kH4#W-cEr)=Ckt3q( z81I6LtzlEFi~K`bLZhF#pH+RE`(FmjlPMGzx`HedNa0yQK}43}%WRn&zuG{)LvTcL zF6VazbS+;#qi~jj1`DHwDC>f9-B((Hc;8@J!0V?>QihSqgGs0R(?#_l!1kU&W90;Bhe0+}-_vy*9Zb)8{yQ5s7FxDzx$h_j(AwsMOd5Ap29tih^sC_ zS(gnQrA4p%5*APoaWEA}9x(YS6&?g8*VR6>+&fS4RO3KBpIfoc3s2|gZwifV&6I4$ z)!l;i#F$r{9!k0bZdTqQ^xD)`i3>Is(OI7Bi0G0<#aI?aMLa9oLimsj#;hXL9n?k2_ z%vH{sy9gP@lki{Yo_Hq>c76#Y#^aiZ>v}s<(05<^&uhYf@u}$O6_Cyt^0uKU+qW=v z?5=kz>NzGAK%ZZqC-X^by?B8zlwqJ;b1dcXf`Xf_i$Z_BYY1I(99sNM!WQ1bKfM)v zmx*UQIMV>Na^@JQycz${wmp#Xcc4J|k9GRy&57jj|jcFC{U8RB$ zeJR*$t*)zA5Mdphb9v*tW7f05K7l7{iJBzL<}Oxet;Qcx!ts?i-i}WR@4;M(#{(GA zt_@tqf^5i+VLv~4^KEhxZV^k3Wv>MfOEbnS)_Mh;DY3l>q^#R#KKEek(JDDU9Onp` znJ1CcNfKlGVH@mv4ve;a%5+a7nSZp0>z zmOXlC7vVezlYjQ@w9p^f|44LqZjJNrU5HyBj#9vU5rv3&^bt4H_jk+^N z7z~zJ_o?gRdM^+zarYVp?1%7%AvBIr*6!NJyKx1@~P}#8sT17ZY1)iHp%6l$S zM$>!@ad2{kBY8bxN{rIt#y-&_Ej_O@exG^ju}}duQC9)X5H|5Z`n=qQ-3l|8N!ei3cgEycZi+IHho*wovo|TKI&L%CHCtPgB0}1%^^sKB1 zrFWCim3ija)!QEb>^FY{MGj$gWklD7bEw>HDO$tN(>Z*qJgj~=F~MBF%m!NL3so|_%xUkG!%e8OCf48lqeXd?8|S3oE9c8 z!S$cz1glcI;8AY1LW`QNf;W^bFS<``Pd6C~d}_K1GE!&QD7=RzP9vl&N0e}d@1l4XYI@WQ{ZO|TI{{9 zC&5spwKXhf%zXoPnV9AJFjiq%(_Hh_ZNH8Qp~M1U-O&DPpF&+huH2V`SgyHnrN{L( zciz5~9h4ksI-eA0*RXDsd>7!5)6mi{T#BL~d=w_Rka(2d1;4Mq9H028Q0N>P@PApO z{9f=ZM@xU=C_T}Z?LnjgkNaKgyyH~p5zc8XcAz|H2}rFE&ek!i2pCe-z+U!ruuW~8 z5C$`}q@042d)Ip!Q4k!D1_1?8*SpXnaDjQ7W1zwy4uUN?BW}-*+*34@_#qQ?Ai`6n zVoexxA1VTa(aJBKDawQo7e$Qw+0P1Y;sMJ+-%4%bZ4ruW$J4`9!)-&DI1+Dm7y?N6 zxXT&cRW-2P}nXf?6szd@rzsmeRg=+mHB`q?L}!UVx4;HsNMp z8*mgiGG2atEY2;*Mmq|3TF7aR85M3i8V zqn|p$J!WPptw=bQ%9-$6K}f15QE(M!l0tJOWp8T{N(>*Ir$nKiK!@*KYa8^`A@Eb4 zuNI|18Y?}NCQ0vAf_2Z2TGj@7ed5DU@+jCxUwDQ@K*~gt=y~#uw}bL$c?yu$-Hi~i z;o*D4r;k(GawV?4I}%%tbs(&^a@4S;n6a)Sj9GTLp}8_1fAT0HPNykb%JHg%PzWpc z)-(b^HP&2K3S0%#KorXxt9)6_$5N`R9VA~4uEc?-Yp|Su5~pw9$$cvRpT9!Q^{dwi z!P$XjbVpqMY4+nPc^KUpu2n(xrluBXQY$oyljG{~{PgX^Iy}wp$J0j$x=9MiLc!<(-Z`!XTSbAP~^zNw!^^s)^nu1D-U5!Od+b7o4CrcI{SL&YSRXcQDo!_}mousl_|P9@!((_PNnT z6y6(nOc0E(aX1eG;~ZA-d6eUE;?pM(GNor#)g+x#)vBF!pTYpTNK&g+@@`+bK}>vY zeC11D;HclN;Ba;P_=68)0l~VV38Z5#?vq*aD@gH*CS(Hf$BIIA7$vTr8zQ%3Pc(No zQQvgBe?EBAyUV;Ui( z54Sjax@T8U44tR^D}68^@NzBQ{Nda2*(V-HQN#;DSP~|IXsp;wa`J7q7!*inBA+s< zf$2|=&qV7K`2*cmar*MdQBBf&M+0S6S&s_fScRAK&h7K$f9#DNgFE8Fh4bL|O#Jz4 z|B3aCIP%Q#*!2jB=A1830gtuR@A%BMtX4I{9&f?`_w3I-9y8AoM$G|T=?DI_Ik5Z?~KWl$hv5hCfE5Q7XK zLYR}#DJW_sVU4g(7v?GvmicH=ln}XTGGvKSrN@BQHfl(Vg^pDr!@jf*C-Y|PwryWv zdAi8cvdY}+NG{TC3MEPy(R%q^006^OTG&uB4y{BkxQr{a^y$w~31juOZU|%HEev$a zI8JO+1wM=+L!Y=`m%m3b$`n`GsJh%L$RJm!(=}jw3SZ7=Bg8?4qU;FZXeW%ry^$8t zBrJUK8381dUW7avzCe`Tmi8|6i9!`kQ8Ot#LlLXK-HemGW3Z8_FPOSE-%P=0MFJqWt075uF< zSTnNE8VF0|ueFdZf5eNf# zQv^#rXTXQwWkjx7dKv(~Nqj!zP7qeMpZLYMEDy>hp{}r~_ih1vQP?RSES{yU6eaT-nGqUyHzGQybPV+>;sJM<3=BD2ynC=ejVyIy?#{%h`7eRiHd;K$vJiP}$Ss zX5y2H#p0l_l9`CZDwEY%JRPgXi?rf1USA6GIal|z{kmsqQ>&}WZQ>s7&@dJ{eqv81 zQ)GnvnxIcWlbmj~3w!hjHNUdqc_C z<90O#5w8cvmT2@>*Hb*xJEK79*&n|9Qt1|oKIpnRP^TuoDmgGw;$%!`-A$;tdO&v_k&C0Ds!238vG8 zUep6K@n&t!eJRZChCYFitL2QuA7(;4FY_c>Q(=A?c=~c4dTtgeBv6kK<=SEnOFVM& zj@>b_VKD@_%Q{iI2Eb>_esHbmBJoDU_c&;kG^UCjGNT6TZB3wJh@3J4L z{k>u>S?08cK0CxkH_Qxl%^iltxc2r6YwPM+rCrT4IJBL+v=-qoUzMG`dU2Spj zv7K?g`6>zmAqxk0#NV8LJ#JC9v6a~Qj=sjYdVVO{P+r8?hKdG+%%L>qtZ?X=bjE-4 zRN&eDGlTJ^Zyt}HHsZRc#^d+@=8v&>cfh|ne2NgHDmO1l3sqe+|C^$se zqz0pcGh#AOmKaHkuy$4x0<(SZo;;Ll9z~_Cy@{}iA>gG!r74AwyOe!2-|KgN`~QqK z%C)}v>bK&xAN+NmQLyzuby_{IpIc_!gkg~~`>xkuO{OG|hSSRaLS`BVi;-HKk4D;|0UU4y>VH=`6$ zl|`k_1Fb5lUbF=7v%rS?ujf8OOW`H|_@De&v8#VCf+F59zTX(ZpR-+BvO@owzgy*IsiBNWhR$Aj?f=bnvsXJ3OK z&BZ`-FEQtb;yfipA363w{N#fZapce;Dn%AN?WyRA>vwJ<+`Dq%By?zmlA8bHfA=q# zFL^28JJZg+BYkjxTA@!~(?O`wGGR?MlwsY%(a56+)gx4+sREu8gwK3>=2M>Mh~)l$ zave6ul_3&x5pd_QRHv*4t+{ohib^+}xqy6EgM8K1jhExoyX0q#MnCxm3d`eEzgd`T zB~ci!fP-XDPT^W$`$=lp#~QNPBM9;CYsY3D#?Sn{=i{;VN8|VY{lASXXDf-Y3qgXoIKi zduUJ8jMZ@v+c|`GWMSIv<#`j@1gHvG+>Wp-t=3w$on zhu3Ak$;`c$WY6&#%dSvSEHz+H&NT(IG8FDriJ>1C)Bc#7#8hjW-FqCj6XJ<%JB15&b>d(4>l&6Zc(ZV5!jf!TB}uV_ zAGpr0qgJ*i+_6^qsmI-IjJZUyi)5h(LNw_$I?u$!5{Gt>6jWB-AAvWK!JuB~`h@&HtzfHTTh8-lw`-QV(yfyJ7fG9CfvPWxh`VH8@$W z(hKu(O;y}sCa$&Z@GxQ&j0%B}dE|HCY5Z!^0&qG*w}iGrGUIdZu6_z?;t_KfR(9at z)cdAT;!F5=O-r?~&0+-kw6~Ww-QPmgc0>c|Yb~V=4X+X>9j{|?j(JW9c%As<-nDPX z#@KVbjw8p%|AmVuBVOVb(gfir*Q7vHF6+CnU5p#+h*=Z z#}2*&G{!6*GIwFY*o2egmL{hM$9s8Ds8vd1_~#gX7c4q-Fe`$Bg4uvWiX>wZ08Wq>H4rnb=HMy4l4~1ryg*WmvS(OQVvTplYoKTA^7cLTd(h|E59EvY|{%430r@9NC02AnY2>+NdITH(ThKSN$4KUujXLo$< z>pvHR`*z3R(OvQAbH`)P;oZ~@?~ScHx5kzOTd|@x#^Axh*n9uZ*mrDi?7VLWm6#ex zHryPYoOIi^wKHmao8s6P4#i7fI|4B7#FsGuTfu@loo%OIx`H4j9fkh1GGAWLgU@D5z`D zaL<45yWfuA`M>{l{KYG;a);O%P}%}BQDL%BY4HQ$rsSVGZLaTg7!)*suogUnE6u48J3PmoBQ}nR{jQD?++m@bB zf|s4dtXH(70HZK?*2J7j4GQiu8d=xYP4Us`GbogJL%-8?{IrO_To-F94K6bE{%~$-m|+MJfX12y`{MNE}A~4 zR!bYDSyQ7_Zz7kZ8s65{-Nv3G&!VT6LjXC*jPM-g*suQ5i}AO9=_SgxUWqr~`%$$2 z)2=)JM?{GQXs2yFFcO%gzsxzqCh>YK&q%{m>@7)(ajF`_9S678a6sAY7~TyGU0b&f zLQgPI0L&Hklqb5IP(Mp`As3pm8Dv2!)Xs0el8mKEB{CeZck@p2 zKXxVF&&@1>k0?9jU$hVsz2pAf{B27a$&$yjBp%04UOxU?Rhu_&d0Bs**>=9n-qR#! z9Y9YEQu2{=2mx13Nr9h}n;V?!Z`_ewFN5XeI#6w;DY<~At&}!hC$7o$bJf-%Rj5By zl$EedfYjZW9C3k_Nt3z#Yifcr8&oWpO_MA4rL+S#k|So2EAP3gK+kc?8M%~{nU0gg zX)=|QW&$Vy|+rH9{oKh3w#`tsU>+P0t_zZBT5q;pAVx_Pig(nW0 zlwpxvT}@{BI)`#xo;wC*8_87swK%=U5G#e+R}+4Cr3(WTowIAwV6e0>U=X%Za7yq1 z6MrY0%JubG*UWoeM`7uDsD?W)*Hv?X*X5L&^VDR7m6AkCH9jXya<1OxaK#t)6hA2S z_>2vEkNxCY^2K|B3itv*`I4Ehz(|v-!RaX_aeq=;aCJl56lYW7261Fy^_#$jw3vtgq zoiJuN#cjZxxmKm+gLnut+$(sOHVGGNP@0%SBMFt%BGlX&khG+%7Lb866$zV5xsA%Y zI&R189xpVatntQkwxfy11{o8mLbR!~i{m=?CXLquFtB|9 zJcE8P+%UX2bd+Ez*}TGyceNN*hf2$Xc9Sv#P`*LDbIOm)ejf zbx&WKCg}Y6XP%8UB$Pk+55J!k5u{c90|S^?Cx|!T#4&0ZH2_Cl!}o5E#hc%Mn?Ui) zl&iU$u_u?#aI(|A2};>?#&u#V>NquQ3K!eRJxaciooU}zQ^@!wTr`!$Xf#nweu{Xl z8L|&YiKAH{+u4c;?ajDbr$^%A#ZQ<&__;zEALi7B`E%1iH)9$nu7gq}(DiNPR@t#hTETSaQu=HZ=+HR`cc(()};9GAL8M!J29n@W5$NYG|T z8iRxNnIsEjl9XBGJzV48mfrsO>eqfQ_V3>v*RNm7cBFS|rJY?}#KRCIPrbzzk|0JI zPZcLJwW4uNSB=I6%I%!J@-cL)omicN#Jy3&b<@4*-@7GlT%~R(f!Pz3@=*q89IS{P zkE2~6*;!w82uZZLx`lF0*6kdLJ;c2(VSRMJOwDjo+{AEZkDfgFV_er~V!2^HZcqkj zilFj+gM&#IQd)$^nwYTX$lf@4{=?{7>yI6M{n=*=X8ar1hhiBM`y{wzjOb=g{@Ov- z_P~|_)`gf6==vt4b~OcQN(Z>V@4$iBwRF9up?uCgDpH`WEpVD!V_QQU zc<^BC{Vb;e9^V`HJ+U`-Q3rKXZ(H0Vleev>J&rwnKblNc{OG+m<9n~ZhE_``0oF*l z{rDttg{+^n#Snv4a4!!F0DnATt`RMH89es{y*BFK$^$3R+#Wps2(VB_U5&r{%ioL_ zo`0Sb(nhlCiS1gW^#=88jWcTPVa@>B(n+hjy>Pj-u^x9WT!>FkoFNN~maDjh=&@)cZXcyB^pPRoe##UzXgv>11{?h{7z^i74M?bTVU0R!Ah1 zP_uCbHmjhVm&8F+VV+;lGctk{&LMO>%FA}k^+Jyon551M7@Cnv?1jWp#so#?`67s= zIT6At0ZMZz{c6II8@l=Ji>5R9SBB1hPQfkb;y8FN$4z@#&>VHvEJX>aLV-2yY|DOS zre<=eilzF>;%v0afP+QmBi3F6!JwI z-jf0!FyX%q!B_Yw{N|HR()$>Quumb9OZh#_Sj%$w}aN1XwzFTjAXbz^Aa-myNh-Y4Rtz}e}kdSc+nA{gC0{nBnu6-(vz~5^bQ$-uDW14+kf1VZA zzJ#mzlL61Hr?DRjgQY!gpc@%;3c}(auZ$JrT|vhs6dlB+vVQJyS3nJXu!aLRV%P(S{X1bESHIW(uA+Tw;7XKa{~gqzFNlFY+?oDa}T=yuB$E^v)hDE z+0W1b6ar!8T4|6-BnAHNqueKqPXeF9KMe_q(k3n^)=lAE*eY~o1}dLb`hY~RXbg*jyRTFd^Lg&)H*R81uBS>_p^J?MnM1dVqKSI|6Q}!$Mzd}E zE++MI)j5`xw*|)!J$LUpe(1Jv%Dv8byjQxI@7$9(${j^XXMJ23*3Nbm?(IvQ7GhrY zey@ucu7`?~_#{rc|J)CLHk((VIIb%8wiHVkitD*|z!U2^%H(a|*%{aA8rYBhrQt>8 zl;thTjmieXB$9^X6=n%{?#&lp(x}8&_Y2KI7R#R0xYR-&&pKjHj0?29h75XibU1EP zGgEW8eA3DRGXym|V_hV=R9#PLneb;I{Kp8Px63ep6#!waLBKHyo5UmTDK17cU>D-V zJ3on6UU>!HgSixZ7WX%ABQ^pdeg-CQnWSljDL&UDY=FERbqd}v*V)CXEckx>;C&Ri zzZaKJos0YX?;|#)fjXWa$Mw_K(($^S_`p>*q%Xzhr`e8iLTu>(Yqh>ZO?N2(yx9wzWysO zkYF$hysl$DMc`cF1S>+1oy#mHWNGUfe6j(vWHW*6CWlx%ww~mn*6wBmO$)_S7n00S zP0VI#Upr^b%DK4j2&bw&be#B-D;YCv_=50b;NTZH1M_ul%q*Rt)QTs(8FXKQ@K6`lS%T^*$uv1n+}4u_W)HTba1rlv zsfJX5uGq769|s^@i?g4cjeUFf#wL>7Mu~;Hb9;y*gQ*QWafjHVArv`k*UnAHz}{`q zyK5T)EXs^Z4aNgwQrm#1r!O|Iw8Xx?!FcDb9})jUcJRdusZj3Pvy)^ksy1M*?(gr4 z2M!;I?R}l`;m2nY5a(iW$IckwV1Vsgx6)QSd=UYkSfiWNm+j^tp8I$2h#GLS0fnIh z1K9S>+hX62-BG_pnW6cXs3Ze;d*60yeKwMzO3hD7JWY%euZ7~NAz*oxKZ7n=FAYJK z%&CM|G-in+A*CkG~MV^x`kX=N@ZYy)F}Gx;eRTD>&NL*A`vd+hc@Ss?MeX#xWDmJ@{lie&8{1 z^bRM|VLI>ci31NGjC~L9;gqhy*m@s_b!_j+F)pqWvvcQST%g}ucW$t@gdt zo+j4SwmECBKJ>tYC=UqYD0($0N4mRo8>JhNBvX00G~AGkW-`(1;eR9&;~t|kf& z+XQh(y3b9vc;>Um80%P^x%LqX1(gHnw}tw%QL48aSA1V2e;F0nOpvoJL}f+)&R^odLIlR(O}CCng{ zOdvO$S0G5XuOP0#l3G4^Bhi;fO0X0nw0f3tXyqkC1g+ly*JAaz&%(qMA{EqXuoPy0 zwBbZlJkt8Ro@uF&;~3Mw4^xnN^EGL@mVPZq3MULHeKZN5b!J$swl>Q=vU!*bHzpJ1 zT_&j@qo7t^&KstoB*jagF<%olwl8tAt*irDTFkb*Lsn@Y`BFdzDE0-g_*fWtktMjs zD}@1`5_!{-22;*8%jf7DCZ@8JV|Bu;q3x%@Py3d4l4$05XrBd0rk70Fer?BpA~;XV z+=P{al;cVv$LCzv!~^>09y3I}*Xu&T{{3^jGI5No41_iIpWV2L(vq$RKmpUS;#qE$anL z(yB$@;#?81B#>n-H}-{N7M`v{!WF{re1(@5F%<`u*^IZ!ISSpf2g`lg0PV9k+;e%4 z?G=1e@DwsC9l3bG*V9}iKJGnheKs2hg^#j^y=gpqz?w>k4AV z?$ohfz9jaI3U-y6(v4(M{z)m}^L&g$H)Oe)==X>@- z-;Ce26qk97|CRMi0iI_RuEpbm(~OM%#aZX<%}LwxdD_T(xgVk~?xxU9!?ta^#<-<0 z%@uLZ?|NACcW%(9ufn&8_0!lMJ()?j8DlEZF0xHACwJ(ECrWQoA^>ejH(g7|;_ZMZ zVcRnG@~a&y8qPrBoK z?tKOz-Y>FmR%mCL7#!QnvAIXwGu*2;Q22`9dQqeo{%O@L*zdfhAikBk`kXNEsu>f?BG*Xzo z0|1_`v-I591y|nQ@+XWH1^1XM<5e-0FKT$0V+_u#4%{!vUzM?S_fDAi90AMFCX#06 zQQRJX{IS@vV=#uu*cBfz!&W+JK*bxH+iMO?^#JTfl;_Ww2$GK}4Vq|s%3oA9nssGtj%i%8wq5JPU z6i3Hhd$2h@i7X})HD+=#dBG9R5B}4SIBk;f&hZKUSG*AIA|He1IiC}a)E?>g~ zLhOz4SXLR4_KK5}z(^X$N?@>P;dx5^Ae@N@)-IL82-lSbl!0C>vg! zV;eC|J3ENis*A(VcVqd+n{ykh{206@Hphk2BoZKCs%YK8+PmY>o|qv4@7~Q@aqHGq z1o@j-n+XWV+u}YS9~(0(Gv=u`x&ZT<dyZBSH2QY9zGmfh@Yw9;ES_o{wA*6J`=ad#@<35-9{9~oeyk_C%UZm^}$r*R2V)*uG3|$^VnZ6X)ub_aDtT1tJj$|9wilemKIN6Q?;Q^_S zpZnr3li1T8=g*x@I{m=WBXREB1uB>TU%V=kmMZw824lE(!qyta?gb!^{7-BMD8s~Ye|vJYn{@zg|& z)m{`z+x+CCQ<(rz-=x8t(nieb@<%t~hky8eVyf=Y_CV~|Nn8*a?0P<``UkeYY&Ik` zCy9s*&&83kC;&@r)2a?(lCVo?e7QNwpVlIn9Diofs?kYQGE@knro~dmCXq|BERpAf z)*h{3THmy$xY6l0`%X&~&oG1}l0M-5Ij+13q5{)QYa+jS-HjpB69G2}z5Pk-C6czr zBVF+FMz@xmO%_z<=r5HBdh-Z&IWOSrJY=deD;c-&ffAP%7)LFFc!8fVENuz5l96d= z3Q+71V3p%z;94`2;rk?yr6A6`f{mqMO3g{T!SS@uJh!a`xlx}P#_ zgXAphGYMhn>N2ogOEUqW+#`Wb#TK}_Hin^Sr7vBSzO?SwAxtXZXo>dhdei3(xA259 zN772oORdeV5V>U5!UTM8C2U2DNlQx$`v;*1oNR~5Z6v^`oMiG1>#1<+y#iAKU}Nt% z7KJv~LIH#YPPmIB1s>(QIGD9)fl=9q-d{q|Y*e9D0bBSa52PPwNONV(d=ry0q=b9D zPoR5E;X?sOT+3`x`mleC89QIsx&hee3E&s*QKZT=XvJf ztGJa)H+a3V1|E(eJS^`}-@u%6tcF??p-adbb^C+%5){ijxBg1@V<*66lV_!o--B>xlNi=ywbunNtt5jT2A!bX^= z%t{mW4mr;i+|$nAS2BEl+t{E@L{*`;MYP#;uk!Pie{6E)u z!^7bi-Cvn)zuu1hE6D27Px@znt~)roF}9>*%*Xwgmhx=dad{67NW+=4WAj<*fcT|B zMm)?lU`!kRb0dA}NkAbMZ}Tw)x3afYk`&6sRhxIe>mf>o4m?ro*1Kb}gdsEY2w@5u zDQIIbQutrS!l|V@h4dV!U~#Xg2+$D?=Y9kx;&Fjtfvq?syi=iD|6FKKfk6^(p7&q! zH|{AOtimtaP)c?EpZNI06f_n5RnDD@ihK=gWFW6c5NCL1;*LB{dRhL7KZbKjvw0-v z>=+CuHLzI^PB!R*X%O@Cgd_Ry;fIc9$byEJ#~*$;9z1$94(&e_-~RS@SQj5- z9N^RxWki@6!t@kEdPCB#i&sCTMrh#CKp1J6h_C+o6R~}NGntGC&lK0M#bD9Ux+(g% z?T9AE@ST74r}5Swz0FBm)iH4gi#SSa6Kgd>jnP?7J{rG07k8+Oi0=*CE5heIWn*qp zn-Kwc@@{2ZzrfM6@16ti5DqC#g4sVFed6=+_(M;F3zR}>qO=j-m0DnVlhBsyLuaWa zc>}`><&Q9n63~;vm3&B?oSLy7;sl{i_u>#j@m4~!K0NV$T0Cb6<4^~Yo~=+dL)k;x zKTEiU9tX458wf$Fr+R@$sfvq+&4{xL2vlYO*EI_FvL7sPua|R?QZKh};(eLMa?RR# zB&&PSkSrC01&aHdgdp4vr(`kl6d*h?zj5lFUi#!_9$PH$sBfcv3@>X4>eWpoKJCVO zPj<3~p+z#DZ=9Hqp%ZtZfh~nbqR@}sBm4)!b8|mO-QwXH8O8%JO?U}>TCbf}`(oe% z=h>eN)QW8Ez#Bt|$t*$bd$w&!tM4QVK_@4S$uqR*Yk+CPkR@KHFwXwJNZF?*j8$Xs z&UwO-V}vWU z#l;&xir@Xtzl$GVJRLLOXdQcFZnB=t;|D0QbvsVH@qV15HtCOF{UF}_?mO}Nx8I7l zzV~+i-JZD|1B113;@uD9>c>NPrm3}wL3j*!j*j1s!+Y+J&m4U?-uUn%>S=PMbshN0 zG0~Q@GkO4yP;dqTeuDiqHavxQ28%u3ju`^#SC;4F^r_Qn`A6c{ex(&-6NdX!KGcRPr*h#l+Evjvk9ED7qhgdM)Wv zHN05ba}=ehXHz#JVOK$Zjr7u*%IIn7&w9Y8-ak#K8uD&8*P#!dg0{*&TpU^9WI7IS zI7jW=Pw@^7%@Dps$))Yvcf72dLQHj6}AChe(L0zf8Xn1;fGLb;i_T3NMFGY49e5%Q5fZByc{03q}9rO;o( z8nBX}$y#fYVRC0-FjB-M@;sAcX57w6M#`<+Sgb@Td^-k&5Sd2iEYOz>8~mdU#^V37 zUFOecA9s9ymVqje*sdCNhA+4}wgIsy!i$Z_q+|)c`?JbM$(GZSsioiN?J&Iww3kc_ zV&Z0zfu%69J|78wG8MnCkB^DkMF9eRc)x;*{S}7jEO>bzZyO4Pm(qpmb$$>&DnqG! z@ta01y#S8B2%OyGP2B6efl3O?3LGjp^vG|lFK{&;RyVmob7*dAI?l2A?(=Ecgh)85 zv;_0+T04F>gzv7Q<2Cz9w_5pb7>a50E2#+NI?$KGN^fsZoEGGoT9KCLg$q1nO0AeBq^0F1@_5i=ZZ z!}*o|h^2&>nANrsH!r*`OC^z%ffYOlf(%rqUvSv9=0^Guy2?Abt~qe7(uYiqDI{F- zF5sWSn(NPX?^o~>EA^Uav6m>QV5Vnl9p@bn-Q@pNl7(~b@x)g$kl5Q={uL-2v|w=E z%sktgLr_r>=)j%dY$yeU%6;JaSmsNmsTl^W@Z_mtDoV8|LUt#-QxO!O_$&j@b?3KQ zWB!?AjtvW~d&lq65$~aEjx_DSdP$q!$DcUhph{>E_6 zGw=1;q;tSKv&|Tt>ta6wQh02@UOX3n%Wf(Hu)q8Ae&Oc5(jDoauL4^B&+p#5(Vq87 zyvt1(HE^3NI6EJ4O`KF2E%B8(mzPQ#ITne-_OAuM2t66T!u`&5BX3Ct@484I#8ubImkN@g(UNW( z;FB|8YpM{g-ag0d;`QCtV_UW0^gboY=_o|GQpCJxjVX^`X zrkTfD`%`(M&s;BH>zI=M@|zwkVQu3Y#Bv>>C)uCt0KDu!m1SV*T22$2@4+YA_YQJY zY)3qN|DnwA97P!Zz2E)4v~26Olq9>i6bu(9!I?VtS5sAN-PISI;}zY#E%D&<+o&zb zDSed_aR$r&@-kkBT5=0G0(Z-n0ScB+$DjSnZ^g}%H^@1_(n+04t-->joy_GqLmPx8 z>$yNbBe%wh{$`Hw?##tespfPp7)ty+p&L&>`4m~95qoy*M&MhdSUnlE*9j}ZvU=^p zIci#dicoqj?%unLqO*YFPaV!3Eo8u=7}n4)o6PJM_*Rx0v0Fw_uFo z^F10W%-&wl#8dz#JY#H(V(f%2{heR@JMs6w{?+*YTW_W^V40^1La?O8GDvB@_-Q#L z4P6>NDpxrf>lVWOW%4D+MD13%Z7&8Nac`P1Aj3v1pnu^5>WL0}iYas)cuIR+^H%8N z-OE>E)PoAhS~iZ`I?EY)2D&<4&?*J_MTF=T@O2rwP}###z6kE0o;Z_=%L9iF#Exy7 z^TaRZF~eWR5YDB)p44YmjC;c)>0z2@oumsMK+uO5%n9l>S;R`*f}3MxWE{)!E^48+ zk`a3q_)G%N_B;@w2fk}~?x$zY#>c15B(L-o&?)*-z}H|$hbZ(Y(S&4W?M>#|#(Idi zD}<%BbX(QK2oID7aOV!5hUqHm<#MRP9m*`3QCmYh(!7PK=Gede$!Kld93T7$#o!hm z!#Vi(D)ZuRX`JKe8>{2RUwtTs3FW)-AvrPE=i=lKPem*Hp`Fu1Zx4@ATa#m~DgE`) zxodIhy(@8py0=a6nwOq=GInDr--4oZ`tY6{rlrD{^BpdX$%>7%9hBUHw-#uCibXnui?NFQ*E38cKHHctdR-kj~u{$S7ua^ z)N|U?*%MDa{XFHHZpWpYL-eQe06${xLpQI*sT1Vrv|*8q@bYE_qZF>NVljS|mKs7#WJ1ZY}PX>uKLm{n%B`|-+>pj9xYn5}7p~}SbSuZ)&9C|Ja z$fVdOZ2XN+z(ywHb%o<%DYPkJwGJ1a&%SsD5ajbVDiWu_Nh8ihqFJo^++(M_Y){K@ zDh*DGmjbH7fNctQtunq8aGaaMeX+Wh(2or(CmomdU+>rN8_d}xI&mW{)j9vdI1Rhe z!mm|cYqU%@g>vvYg+`v+P;eZbW3kPRKSOpTFv7?%7JkCzj%9{5sa442nHCb|e0SXq zyU``QTG7|=KktuNKO+D^xE;$mXVSBWJT1mhp$vghh(-}`Y+6%Y7s2G-(xRgU-vrTG zV5{)#7*%e3m85d;kIymzpHbi_LYo3_-j!>h1v%LXFt&9tCGpyM!Lkto3e0Ue6?X1Z zfU|9J%Ih?dc;=e>5=JVh83qDgy1i5c>cHs+m|oTlR$xVb9Ne*kaWCTuKrk~7lDR0P z)?qBnUC%MJDJ;^S{W=D(d!+M*!0$SWKLSf!-MBBqL4c2Yq8>)X;C_l{ zMHmzBxCXsi13(J>dLHaUymS4k5OPg+6JANdpaIUcjHJA0-}y}G+53_#!d-;w)ngMR%KAWTUfBR#Ew_jrGSt52123MZRM0h@Nw*VgGx z8MpjOLAxl;JR`m)Jb9D8@H`OBd;HDMsRGcJeIy(hNAV_Tv?wqU+T|L3GGB9DI>_~L z?~YB5$K^}s5-uuqBWg9d{&yfO- z{U$GlX1K26zQ@mkpwNZ7*tVCD8S+3@>E}~Qf&A5XUyVu1&gk7PuyMv^zV5pk;`^7X z=Q$AxLA_-rjy~TLHEknt?!x&v`4i%Smpi~GJeKUY-j43L^wHV)$t%<*d?8EA@pJbfvXuM z;mTFQSrE|XXDtzgK*BmrP|{}_fqaazL8Bw1gr3~vR4$T~xo;7{cM&&66;TQmoFKMu z|GxP0S6+%cgup<0LVCx4d`wxAH~IkY2Qc;tz=a^q$U zbHIRtSQQ63c;aMdUwaDadIVP1I7JBoY=YCO^qhHk$TNBu$u!FX@5&nDlXOyHt(3|Fe^F!_>pElP=2~1lIYLr1%19s42+U`LHq>vT z3{rn5NXv1wBy?AjchN+MhKh0%3fVNp=c@_+uY`WhRRTMN=4FD(H}sCZv7)0srU@gn2B;+vHT+0J<%OH6^O*)40`e`C;Tkq0-0O*%LqI+J z*`qPdzj5X^OHIxhJr0x&(&KB`tO}<*dTe}@HnEcTY$D_f;~2bf8c)V7vG=Wg9CSdK zR?;3w5ugUn2;?&>F|g^r*tujpV7SFgqp3eOCob!{j{4?pu# zJo)s`#M4iGKHmMCw>YYo1YyRuhQL1yzRn@gds5fX^;?V`+U>^VPufy)WtG|3($WK9 zc<0s#aL~Kn9Ib>Tn|tEkcTXF_WSg{*)e0Vzxr zWQ38>tTZqQWHL`4bR#Ih7N)^Q;$Mm7VKBWze}fN4yw5qhMfko8F%)h!4X~i@RPO$0s}8I754*W~2Sd z>{8g|hT;M8Du4V>{BoR*Ed@CSqw7i2!&ksoNOApAkz@QSkuqy*pDA3c zz^LHnn7ohQ?a$NSta!u^IWL89fD8UOf7$^i-Y0P?Ry;jkBn{H8^+)SqoGL^LE{Yr0 zC$)-46K+=F<$(wmht3=o)=33L<<~vtbDLm5b->fS2w~_v(t-$XrvOK*`8@?Qt?B^5 zNdo})XESm13I>K!xW+PsgdGbZq_~kb`9DlZ<;Qu754i@yn0thy0+u)=T(hc|bLK(! zzHKVdxi_GI1n2R+)T~<$D zNw^CEzheP2VOV_29ko8@#4|u8Y*XX$i}?V-UVBp?Kn$ElN;t_!Dpu_knoE=5DlS zj{j+b^-Iw&x`=kcG#6calu4`H(8QlVFuD~fh zPp=na5`UapDvdc)X;x`-1?5JsfUu~+8Y!G?S0%!^B~9>Fo_B6CecMWUz`K2Up(w1- zll6OrlMr_v?nymt3Ygxvfg8d}K;|>+&-ppwmbk)PoiFgw$RU5wh@+q*{53FG)c)j0 zA7=csg&;hrK;eH4E33rUy=fSk?tJH>w@lBV)oBcs(26=wK9QlM3jBH`RZtB{v%af} z@(O&w@UVFl04v=%mV=KRp&0vEyz%|Njv<6uuRC@X1$mM1bw4F8080y~k5>-I6EE(I z=e~F(W)^P~LW4E_rUs36=7Vrh&%UG_%a309VO;*n5MeE)s+xOlnel1~R|u2~k$+Yt?-7OO=6)2VsZTV zF}}}6_h4OYA%w=`bSD^JLt}URgMajoQ=$3uSN?(uM>D`$1sp^GM{owPpNgVp0=H|K ze?T|a0{oN5>!E8US$CBqZf|n(m5Hkz+oWgEe(3>pu$lueS_Y9@unv!tL~DK0hN^}b zKp-_lsE0~b+q(KNjvrty;yCFYv13OcxH*Z(sgJ!@OJeoMarW#v6vXMcN*L3_UwSaM z9O>qyv)L42s~BHD2Xk!Uev@a%!Sk%zMCG6c5?|X{+e&JD)*>rSSTj~-6uyI>G{d)) z@04#Q#;0+oH)Duu!^=(`Qeb`i>a|RIUgCtgV?W;;&wTM1dxk`BPCaX`?jzY8Z%_wH zbKO+TuG|BD@{cCMq47AO06zHmG4N=FP_VlwOL)1V!MDMOP3AqYUfZ^Ak1F=ScmD8? zW8aZov18u=_sqrkO>#*%RymEsoP|7xawQ#-H+!-el!-rRI!2u~ybFy8{E!y*KKIa` z-Y$>awupxGN@d{g-8<}=cc}y=-52km;p}T=Wg`{5w1m=o%YI?$b}BVx~6k zUL|+o?o9HJIYY-*XJYr>?cn-nQG)LfDmW33J$fj<{NzKhw5j;u!Yfg=@4me+uYgPn z8Cq%ahCt5{k{FB=kr+xdMQ#NR=w%{d7PscH)P50gv~Fik00MXl>QZv98~);R5@?CI zZRrwqGs#~I^W-9k>t;{uv3>HJZo(825FWI$r{&Pu)5?a0TH)W&Ab(EvZk;vLzcP7h#Z6St@M6=_c@o4a%7e-)}icCCZ1szL;1lUANK5Ok) zNvd{3@+wv&g-HcbLRvP#6gU^%Vz0F%YpB81kQr}mqSkd}6*eblmbXBt*OdzysROLT zva)Ex6a-e!NHsBD9+}}uRVp5NiW1|f@YE~_QgS4|H!vQB5Zyu^)oGu`CR+8P4%k*g zAk;)X39Ev3p>rUzLZH?T`lX<+ebjK)x^R;SO57Z${hK@`<8(}p%{i#7D0GPfj?MWi zP~>{M{>;Ze$0bg=oc^AIh`+IZCa>C0t`B$0%w3OMOxE5$750Ts3Y`j#2=2z;yN(hx z6;wYL!E9Z~2Cqar1&_ov`g83Pzrg|fNy{4J^jU=f1$hPTvc^R@kO*qE5KbxyS~bi+ zNN*Zz<-EjE*Uh;plO_Q_~ zDr7ENpA}%TFW%Gp!*~_83tScAz(wX%k`onrRa}@G^VVV|>@peEHG?6ka-P(-D{r2X2m5&lxwdSK%vR8xqiZ5*ch*y zbOphOM^nitc@YjBytM_MDQ;CQDwJm>o^QTPnhQ3&T$sp6BdF}TH`)QddApuF8;{NE;e52G4u;C&fTq`}Ry7m_xD*_*Y%<*PFz$1lE z4HT@Q@K@PQCPE7e;EpX7RX%%NoOd3%5As@qio0#v{a$FcZt6KsJaR_z3K^Pf(fmmhxb-MDvt3c91`P1-`88U*oq=!?gY z3O8wP9fCwL+3BW%?nqk^a8De4lXEtv$sCI z{89Yyy>}7hN@-3LP%H2hc$(KdG4+O0bwEoOu>4ci9a_*BEzoeo7Twbx46=ReAcmKw zc;!!DNpFf?GL1YJ&R>Ie0S9n?bAMm_@>jnWBj?F2;H0Cy%GTU}hfas{soe3(p!7e4pZ7^HH}Yp=Z?qj)D2m|ZVnT?rr8 zTcF`iL#GQNI*Q>yEAdi%%A5mpTiBm<(6_3#<@oa7f0Vq9X^!~4n}}C0&O%c)x;1ZI z7e*QOqrZl&_i?p?d$f#VOwf%}gVM~+~OoN#Suh}?{u@Nn?G zcN4|s?;>cA#}}S?iVzsqhu2N4<4-;Rcszly+{-<^gw@nHcgG+6(VxZ-e)t-R%al5$ zJ$d^$&x}&ZYWsoS*mZ;oR|umsgmBfaz)zu1;@1@331PLyMl=*u4?=ajw2$Y9r_;h- z-)_Xj8spv))5Ms`!zB8jQtfJ%QmxyV%S&H=DMnc94)9XNd9)_Ir)>nUwh26ilE z|F*YwK!c#+$ln@(=I;`ghN4xEGNw#n7@TsorBYW=NZNb5P)th!oDFYreS>_2!#Jj> z$aU`;d%ZIHdj@0v-a>3)t)F=IbMdvm_mwzu_#q0NbQ2yn7-tE|8|Ki11;WkRTJeBY zLdTe2)!|1Dyp8*_S9lnoDzCctU)rDJS~iO z1w)kp!x_}vlCS~*E$M_NN-AC11v*(4sJxjKZ-%c(j^{r2I%fC=n2ApOax@ zluV*oAu_=XYOwNK)3qGKS$?ZVxUK|AD(mQ8 z6GE}~aX!6mRH!HbUt=tp5&}r^kVqdDMihSZ6tKLhXykf8!P1?|dMJRTWl;D-$dm(s zAh;p^6l;)P5(O96N8!SkYg?@9DisPxz>Svztk|W{+tt;RLXgiXIE$NF7BgfG*oc1$ zdz!D~%z7P31mwyt&t0f8gRcYhkorm(prY_4!)Dg?wq%=KoxB z%__1Vg#-7FO1me3i8v_)@?6II121cg>kdu$(1)uj^$;hE^;I~q?kUukb+5q`o9@H1y1%olOc@q3eZil6+q{wnyY)mK~*r^I7Ru;x8HV?X&Y|99TP(nYdO z340b3ux;8jFNOt9_-aAkpl_}vjc1?YnM#89n!_P{^cF(TmqGh^ z!())`uix)=$Ex5zg)rehQpt9omQbkm^>IxZi}%~9?|w`eWzPkFSE&1KwKm6B=q_XR zIqw1AvWlAImw&?Ad&HxSxLB{JV!Xa@SO5&qZ9cG#49I{s=@<+m2~PZvEC%1ivD^-L68#4 zz*?+wR=_NZ%jk8IFPEBQbN2v3=M&LS4gVkh;0<63ufm%9=#hg|aT%dX5XTW)g^j(w ziLi?K+oN%X+=CgayjWeTfy7*FdBB^Pck9h#jM0O)ghl+`?RzK!ml+El2gW5%dV13r zp8jGCc6Y_iD?@RbDo2Jy&2r9w=Pj5Av&KB+@g}`mMaI&nR-DN>$qT_j@g;Nam|x~c zA|pKWw+}_z<~dG@yBE{L^>Ojdi5R}bNo6F8a-ain$EZOSq0ZgV$d=l;HFOt`iS_0w zHi0Kf7>-f#XPMlO&ph{4=y6j_%ptgg16j?83Ns{1z=tq4Fjq#whNFxmXDe9MXj!?3 z(542~($cpbocD0=$_>JVZpZckPLZPwsEYo91AFm6v=duSMJ4hb9zAx1WZY`JU?<~8 z@4lC%T_1hyK~74WiJMf-8AZXGCE3@UiS{l|MAJY3e&3!Y@d4v#BVlBx-#;G{97lcs zBYR`>u3jy(F}}d*U$<|d%n`~$61KEn9_EQ*^+b6hMI~>WB_zr-tHcwl=qxM~HU$q@ zMR}N;X^0*DM`G(XjwZf*HTEAoNXbpY=2||$pce7gU!LVSV2;Zk#yf%lU(X3!EqxeM zv3AdlED+K&iSjkgK6@aQcF((z7aQ_)`QoKK%6SH_m7z%s(7u!JeU#;AE2v~ug8;6Q zGDeP!#=B({xLMBPu)MemwZ@)l6gQP=zyTpL7Z4gs$k8yNS@V?Yti=$VyxcWH(C_Ny zNNWBW3g^p{6lYN=7fmdOUb56~t5gt7_^nrFGxDSkkHQqnWY3;hnIi10tvPySY$&i02k-_b;j3!Iy3xEwyd`ZmQd)|6 zwcyS*@1tHEQ3)X?9C0VFSr5JnWR*x8O2eW7IR2A)@m_0x-0dbQha&NTo7AE+T!zh_ z3IR;jm%{T3e|n&Nsla)^Zc;re9`4fuQxMK7IVyYpu7ytR+2mbum{p`#+F%?qWv_`W zj!R-x1P#}Swc{^^@b%9s1+JZco3Mt=!O&D5B~&Ih8NOojax)5m`5tQd5(&$xiO0p0 zP`n6P_nvi-P`Jta`uf004-e8}X8A8jISe{IT?(BdBd^?t%pfhf&cpeL`+V}0*pE4!ArEEqFbsNfp3h~Wcb=9+6Un>bj;bX z55+&%#_@svd@^Gg8d3%4l+t(J+mTEwj&&vG|1*0`2qalopsIw68WqV zLa#uErP!8mQyAwl;q7y-&oX-rGUKR#A$Xg^p>-eN-V{Y0FLP8Wb4`lZAb-xWJB1}+ zZh1XnC|vS+?zJz4>QoNs$1oxEtQ7X_PxvI#0Rw4Bt`%@ru@R-S!9`JrI&_5#rf4Gaj=B?xb@_0`Q<-BV(_H%h&kQQ2Ln_jmO z;hL68?pJ`6;klpvvme(w`2+VSPhdgQ+gaa|df@RyGIY*uK?p^N(n!t0;? zkZrq`@UB!uox3E2=Q1%kC+G~zJXKWaM)XSKQ^Ly4B3>|V_fG>vu&ccNe;v$~!c?x>Nj-uM}lh*S%ie3KlT2n}!pXG^z04OKHM7yj2iYgw-M_G(zVeA(Kl7!sfplo&Mm_{`~Z=^?r{bS+j9L}>yVZnAv|`&@3AiJqg?@lXHvug0+_JL2m2 zyM#5cujg8bl_wwSbD^8vt5U{gzd|1Ep_6DO9il#2f1uX$n`84Nwq^Sn|I ze?)1L=i}bSIn9z8P+#Vx5h%kty8Z6i-LnC;tP{wMB_t$eS|65!kzd)lM27(2~HZx^> zJu(V(qYhc4hosmk4psY)-}+DS-sw{@jUlIzyn)?E2V?vF+oO&(?(W}2+_E^$a2 zN{zAqZEYPS3HQX6D;MLPAO9pSB4oE=y{=_XDNC#><3Z!UOMZ$WZTCpVR#Ccwhpe7_ zh9&wn*?)=T;Jy0~ATMCpBGjb{1Jd3jJ7e!-dl1$s4T}c>RHocFieCqYjFdGoj#wKGxOFRvOcyx`?Q?O0YC&Vypr;cJ z2;z&hQ4w42YXBa2guuDUTg2AGM~**pUp(^Q5vm$>Q3>pB@@nOTKHxD)2-&SmL+lUm z6$o4CBE4?VSdG3t+dJ52qu>*=7bI7#FVtDe5+4szfq%h#NM{Q?#J;JzA z8yZ{n_^yyEBE4$q_Ut3f0Q9fSPyyX-ZtaQR{Ad3hnW2$NL3kor-;7*A?%I2BGyIt2 zjMAFfVLmC;OBoc-%-B}zm4N%b8sV}UfOigFZcM%wd=R=G;oUfK?^iIj zZJ&YuGe0iSr{A^WFEXAg5YaeW6&&ZNFusgi$9PDsz!@UMb261x`fw~VcbVoIt{(+T z=UBrg()ucb*HAOIvJQ+o5)HX97kgAc;r@D3+v$2m5n+otHSl9HH zKs-#6(PN}L-}Ny*UWG*=X5&yUUF;ch#@-VC1gT|7{kbUpch z3UNHZI2a4W)$yxD*p};K{GtM$E>f*_tysuZz{CrQfn#dKdTuPB3B@X%5>4Azd9W=# z5sp(KBt21#&Gk?5h5aM^a({Wnap#80|1m;lPiLucF;2(6QJq9f!#pvWhCR_7YXY+b z&PkMjk>l&d{kV*fug9w$1;+83W0MJrFwZsI-CBh8c9;~^)7_IwmInZ7Q8A%XE2RWf zA=SO%`1Jnh;VENv+*+`;Ji120zrZ?Yk(^YiV7k zJ(U4hF(D=0@EdJq8D3!NT5nW|Nhw)g$~`hO^C*0x2(lUxq<^e5G*IA635A(2X_5py zU)lbKa9{AmR-K19XrgHGT$cM&k+2QN0_^8tzK&7cG{jFAb|Y5ev;a9j#_f|nr_h$6 zfhdSggd7;U)liNT`D5YP15>ddkMaNTi_gV2;>6Ehy%npC9r5RX{4Jiv zLeHMMcI9%cV1>4wX-Y^^VLXn$G#Gyi>;Lm#=MbrCYMYOIin*|z+U8Yp^&A$!aYGvQ zpfyAX*2YHQGCFbvK^QA039e^8ya5e`mL~1eFoW`d)p?eXCULfbeygEX8#G9ItL3`^ zAyChp>u;6J+Maq&6z%d8(r&Q>-am?@_@B+iC>{(*tvomfD#(Cn-V1EGsm6hsI#wOAxZH z_Z^I$o&B+S-|pz$Ms%-XBEda2$wTDydVgg~qUg^Wiw5Ho|p;20Pj;Tg*~2s=C; zn85zSo|rKc!7zYkCJckYfP|!OT575Ht*X2B%F2D;Z|-^LdCpfUb6csby!X4`_b%r> z=XZ9rhvuH@=seXJpZojI5*v&O8Z-a)aswU1_Atj2$3=;y-L)zj4UoJ*T*x4$jo7aY zyJ%>kNzEzTIjCII1bv%{+CaOg8U)Ab^XCCUWZ~YLj?P*LX}=lk5y&5)vR`c}N5Pe@ z7CN=xk$uvko&m{}AXJ4YIGlmvy3TG(yMN z+KJm!XrHsJdtcsnk(jbt5dPVx`s0yLJQxpt@V@96=^&YdHQMJnSUeR%WjbSN86;-{ zzyRPd_L>uQY~TUR-*sfm|1mL1G{V5O(KbZlN5@!PdH-_Ue3clUnMQ~mR}=!alf>u7 z9t0V}*qXC_Bi%84wuj~n%kkB(k$7`=62LW@2`SkeN?|xUklNG=TB4S-CK6cMdq|Mz zA~qPSdG83>&@_lJhA2-tNYqY&TCE$YF>B+YZLE5_g`ut;G-aRa1 zp2ZXkqB00I16ffwkq0StTG~MRJ(?CZ0-AVxhT0eu+=>l8 z)I`oSMH&WE$u&g5E>fCy5Ccj8vw)KY^F6bKMD!veIZAT}W^%?PI=989FHLT?jw{d* zf*>UyXpw6Sx>r)F$@m=Wr|Cm7z%_&7&3?tKe+a_0PL;^qAT70oRMnV6Gb)}^@5qd9 zP~aYiv4M3ck>%TfW{_?X&~gLlI0H|4W|l+(Q5*Z?ytBq0EYR4XP5r$bOy!W8hdv>Z zs~SR@q$umqbBt0qFf7MhTh|QnAR!@mI|ZpxERiLJKA$ls2K8I`*D`GtI=57pIBd8Q zT5$s$;=62atBQf)_FOL}g8Emewz^uXI7rnHh4DbPq1&VMfc{YtcuH*MlJP<8tv<&h z@c{IXnyS_)5?6XckE{m>n2eq}1e#hw4d#vN7QWK}s%&OW4ndk~Jv_I1pSkhfOcYT7 zLl>&hN=FY~YGK?rO@b+j3+bKGnspOFI0io#9VF`GI#G;ht&_TXtsvJO4ia~P?h2Y! z3a_op3^mz#@QQmWA;oJ+?|H4X3_YoN`20i{42V~v7$$w>ju4Tlou)~QeKh-2mBmQtP=sWd4w z-aHFj<3@<$iz%>2n)q}*z}%`a=UDieMRQ~FzPbgNv^$h^CE}A-i#6lAyr28)y`?Kx zhCOBv3zW`#e80G!*?Mok!_rA#yK1k}4PW%f^PR~sJg4|K*A=(q`htG^(>3=4@1bUK z^nK|d?*kpXwq-zu?P(22R`XFalRnML&Q{ip&FkI!)Kref&%Ww1>O**^TaEE#r3LOG z%^ubLbiTS?6Q!pEL%Qc0Kj5XQ>E1v2?M#X_vL(FZFUBq1!mIh7SNP4N`K4=yH$?01 zb81hlSR#9`hQ>U*qV?3Sd00W6-l@1(F+R7{bEd}5iu+Zsenv*SYfC*EvWl?t^o3cMGe4YvNd>W8UREO9Vc@f-wHZ zxBik0R5ErcFmJq(2~El$lYW}}(NwYU1TuApSdD68V-9(~bs9SXARU;W%@#a4I!gRp zC-k=#trW+9^7Ig`T9@P6@?QMKS6_(t-lO{v?Od%sFg7-p$pbZ%{dnMqPsY#v)~DlB zpF06)y-or2yTnTnTe063cdh}@CaGuG(#2dY*;5@4Ka53*Ow+NsyYVXK{I_4cfw^;u z0_eK*igk6K@0gWFp;yYgoDu1rToR*nTicCh@1lyo^HcC`cW(nA#tHMGxu z@`nOW7jM6OCAO%SS&w`bkSdc4w+J`^fOp}s!Du^13~bd|GmHerMQ0g z{rH=2y^#9@L!qosXYSGHv19Bp8qUNx=4HyJY~quEJevV#0vdtZF70%+6bh_t&APQJ z2t`AzKLo>QPftWKiYT%JppQhQe`7>4yH&a3Mrt~UVu3Na+BfZ41!{!exjq%=P}FKm`_XlN2@4#&@~}T9 z-ebKm`!mDnJ<$}$AEz$vXt{J^F1Fx{v?x=V)Ni8vpK}|J#^&dmbf~{~?bv zK@eI@cNf^NiMZYMsXe;I(W;vxkvNX~=$5$e!g2QOSiJc9ODPM0o8T}kCEzx3aLo_? z-N(PYPj*Q)V7QTmuGXfG;4o`Kvuz^>xT&23jG4L`K;J-%KM8Du1Awe&&@y#Qdc?o< z3=>dxz~MpzSAGhJ;F&N+|K?B_EH6No3Db>4&>@(Y=?bg_g936*&~?NGwREHY114FL zWa`H1$ixMTN#1Od0!OXMz z8J4<09BZLiu(k%<_#BvTYp;E*Td7G!90sN!=T|u-VSsUO60m2MvwqyeLu~GthU!7? zEo81$)ftS&xbR7~c3@^LK@eCUI*21uIjL)=BL@t+V*5pIs_$BNM(=U7S(G{f>?T^W zz7#XIG|j7k=$0tqv-2J*I}(+9DnNt9DMJ%%tG$|8%yCTyYb==Y8!h;5HD&JL7n32X z5LRVYH2cjuNG}B#2QreB5XB-N1kvXN17dY^pd>;Syy(6F@5b?eVr1_nuNZw0@gs(7aw=X|e zu~E>dl%xrkzbywK2s4w*nw2H?(D7%P8w7;s#(SwKcm~WiHVD-0K!b$^(ROF?j5!BQ z@-m8M?pqY}c8v;Us4%Tczr}bI=t+k;e$Gv9o@>u{_|G{d0A=S?Km>a2ST5Q!%i2B0 z9F5hZl`K;3g+Sou>*J za}?rnQ&yUt%lGAJCZRyv{Ow-n%gY!~u|5h>lZ5^Yc=sAEmpJbMqT=k|`*y+C_{>Bo zuPEFs_dqlEO)x3|fjX*e9!N>2P!GPe758F`)Mrj%&=}puVR>t zll}4KTIId?PC@@Zr+EI+nU;TfPoBazm?t;M`7(waw4d-^Dsj2Y1@BY1=&m%;$o8$% zGgEQr&NYAxZeGO6AqEsINu1Rpb6=j>)Io9(e)|#$SJu)Dj?(by2MD0<k;p zy>x*l^& zHyI}u#F}Q}VruE;MG{m^6R{GyT>$Fr44J$Dnp3Av#I4)cG63DZ6cE|@ z%DSE=#0>Uy0sQ7N%X*Pw>&lVL*5telo}upKDUt)=V|yr23ggj{V`+ferrTNF8pew{ zN*`eyYefMYJ^vuh7|zG=$rH@kM64C+n9dN4e}k;*TQPromiUR;c;L*r`2LT7kRbT6 zn7BfN1FY?4P}}uuVVsO_x?6SDbw_WpLH)zJ=7KRt9N7SJ>qnz zQ@Juj5?~C`9jiyCf(wv1C3ur(b#J|z&uJ5Uw@kSp>U$D*R7G5FQ$KvL3rjY}ptI*5 zgH~>$L|lc3T9E=!4$m+~OqqA0^Av6pXprD=NGCSYfaHb#r+lrd7QkW!Ch=s4_#AQ3 zHlBhN_Iru(9J0=>r%v+k-I$)5j0F-C$^_KgY(}}I0qCcH>L;>u=pK8!df+)A)0CK$ z^)v`0pW*wRJv6v@>dAQMp$Cc8S|E02CF5yq@~}eeObZ63RvI|eP^M~U6Zs1uT?6Bo z8Nbh%je#-K8>a;a%UDqil^9m&iKL4{9w#XLKCxdj0w;V;I~?at*ehPf`%9|f{hY1;^+lX zj_;uWxDP79Wooxtp3;BIFU3;Id*hc)7}fABP94De^4TBx;dtZQFUQrFZ?Sh(u{gGZ zk+;CK5I@+CGg-CCScxGPBQvnYahe?AZnw$NKm6tYDn9wC566G_zkZK772ztn@%-7g z1}kJ(^Alfs_RHNp9cjHY8!-hZ12eE&f!#ZZkgg}l4sE}w6N?m<-YtB5_M%n;u@#`P zI~km=I}Wg`t+@t8LCd~1pEB-6K*=P%PGM)(nt-%m_z=0ELM^Wop-BusVE!11D;nio#GULp9p3;NKubt@W#$_ zd%)DRz9|^o1U(rg{JHgHfAA&d&gMZ4dNxd0sB5=$0?bdr#WOR80mi0#vDN@Y7`f|Z zq4`!k7%hE%2cjkOrwR*(T0c*P(6xNR=x`m?;Xdr_<{&4)u}ccphIL9Bad3Ol8U?b; ztJeF%DqP!{f>uF#0C4AttPh(BlQf<{5Un!R)LV{YC&-*Ck=e8Xvm}ehaz!c!ErVoE zwc@MBQhz{!S<0ocYbpn5v&_8_H>Ap!N>FWj0t)M$yhLS3OK=vqg(-L_wNffWjZ;)Z z-AV?-$guEOE2~D212IE^B}NaD9w!T~rTk|tjsUF8kUx{T3B1xu!u=c3d=r4M9$C^u z#>O~&pukj6EO^o)t?N;1pjK?Vxs=KN5p-Id*h3yWE$5Al?A-Y$UHE zK$uoTJ_o0<*NpXN|HW^7E24-6;H*FetbgH07oev_5}L|8b5B1jKo8&L`^Ns5_+W;s z3`lzQ-|n$xA*5}S3S0|8wLfOS33QCXQPDA2(hM(wnL)e7copKFU1rHWT%YV(!aIdW zz;)JwEF)#Nd5~jw-`suILjW(ZZ(+Ukd?{e0Qw5C7X=s4VE(gd=i zK2uou9M{@XPJF~m4}x((o-@{nYto|c^SRdYK{C*+9suA-?+t)98yxT)_d<~3z6#J9 znu!g<3t*3cTWW~Oyu(>7~z+Wst-aD~Q2sQE6mu{p%zH>P zg=f=;L!a!I`=fH~v!$~-j2sx*2wd33%UZhC&; zc}~@}kp~{VoL`=Se9Lht#Mof|c76$xc{ddqKjr{2)7Y@M;->GjC4J~bHGnlT(1zcn~;3R{kfb3>dvdp7SqdHY86*6wC|(wDD{ z#Wa>mCIwT4SC1UT5B$A{<7fWShvVeKSmfglu}L>Gmd7)xF!Tr{j1_tb|@(NSt5<|YLh(LkvXF-yX`_ z9Cbw(RyI?it-|nDMa$AN4?K)=gyj|Mb|>Y4Y6*gG9w3eg`C67g4xlX2MQdMvgymT8 z{4v0hF-3O(pKH)`xr$cV{O5UEC)j@%`WZRb89gIyblqBu=N@^Q`jLIi3m^$ETUn*8 zGKmK#o_r>TMjwC=S=E5DF*jZ%G2jZFzQ#$wSfhynNf6_-A)W=y5pMH^FZ?K_qFzbB zEY27@(;lbpCu`S;2R!!HMHa-abAK_!(!xT#^vbtl@InV!nHw=bhSizY%Iou0aqATV z%Hek>&s`uRn8Ns2jSm39^0+LkSRoU5f>4%K40PPHtCuoUy^IsN+&%(b5$J#Y&8zGY zat->hj_t(cBuy0N;^9XgO{M87pF<>Ztnmc2F-vDLc?0Jv&y42=WdT`POPJHaA*F4~ zGsLiIImYD5{#HS^r|!QHr=EP2Ch-u-OKM$_w-oCL>of2gVJ2JOPCb0VV97Wg$c670meNj zAR5YyYZeo2(FJUiP_At(_GajI6LKZ~gZdI@iTB*oJpgcJ8)f2PEzUhgSQG7r?_SxD zn>S~(w&?k%hvN9f#+aJ9jRH9z3zKB}ZdAv}z(tbQD8WP=@%vYjx(nLME=qR-rK~0>0Vir_LD#US>{_CfADX9 zKXy=NTky7*;S2kO^NcQ70$T5>8T4Dc6^OTZ)G zuv|}d6XhIuhCoXNrbYx=1A{|2SyX>(74T-PC(IB2Oa_smKir8g?b7LDkrY6e(8 zgvJc>MtRvG-fVM+tg;$BAA~MdNFm=NGwYDGgKGfH5oTEB7{(;9G7hl5gE%;ZYI&__ZMlUNsf4nl09S!hTGJ3fd=o_z=W#l zNCQq`&jb`qooD!djpH$X(`*{nI02Aj;C>!7uhV;x7CW^^!6!sdV@p#}msm%wcL6CD8aQhkpA@fZsFGA#rhVoXS=oDa93IHtV zX3EhpJ^>5orO@PAIl%|_5P-S1Ew}@Vnw~|rXBF>N2#$B?dV~^8aX06n&=v4|eJ2WU zx?E}PsmG=j^|c6tiNZrSsAyi$^%19hGyXS(7;fF^mXDv1-45|OSpr36ExPN zlnAh8WS$RQ;D#>L%ftjsfB@GKqTnn5GGLPIEJO5fy#!jsQ4aXqXBKP2xyGvMzGtEj z^AsG`u^x3W)TB!;&TdP9K1%|bAmGcfcpvBNv9w%;tKr@llIIxNxCFpex$j)(h#0R5 znS#adCb956$0kTsb;vR1{REF*$31-(KR&Af^BOf+i+l#-=BtGl4c@>y`{SHbk;w0| z=UYYnNCk%cOPbtAKx%o`{cb3RgHkCIoN?D;%#MLI65M+hYOr#;|E`sV!Pgk8rC#*R zIY0Ng4(6}YZWu?NHQvi{bFF8<@ur8&Iq|n(KP~5m!rYsuzjM;LZ_Jfv3KqS>+uU0b z|8Z|Ec^1$259jR~m?15xnlz(j*{yJ_8KuPc1oKQNzoX~OdAU!{?MSiCdGk~ij8v|8 zc5zL?+nk@t-?A#8Cr7*_U4su6y>0Nxuhuq6lnx!4ig_04S`&fCMZk3_a|xr@#=jalqR; z=c$vHwjF!*dKIPCq*mAN$#->4n03csuIP8a1_LW_LQr2B^ zajm5dz5060JiiC5?ME)2hvuOnET)?PAHnwye5;Jo6*MYn$E$wgf%{_rF8ruWlb1?6 z^13b_I{i#E<82qrRVrAhSI1-O>Rsd??Q~Jds^HN(o2@Wt2{yi?hQIro0&&ze_FEPZN)|5x1{QXQ)pD?(+Gm z&FnBbO-tC?()Bob{(OSy2>|vnV(35k+=t>aAwG9*-Jw|ia#X*AH@$i-E+7`$PHOZ- zj`!S-bDyDh?F-jX6iP9&cLLzp5f|^fNRnNBeEl!J260#DbT&ZkPim*mf}!}khLEkQ zL5w7Z=|cZ9EY}(|kbhT#j$)MUSt+-~YE2h)aF@`(TI0mS55%#DE>d)VI&QyynRxg< z<}n>#{l;I%3bk)P@r94aCx76{cz5!z;=lQ4Kg9Fji5I?hjkxngs$7)gn}73sTsV7< zr@$wWgYxus_N&&6a1P?it#Z6_osvEnl}|o%GFnDkIh%w)q0m)f>9@1p5}DF<7+8%I z*ajOl5XsR1J~9Gfy9RFs=Vf(??q_5rkDhFdi90A3cocvhl+_~pxEc#Oc+N1Z&HX0U zXRR1998j0|L5t>ic*9K;ZlV;ZQ~ni zA>2hFWl{t6VbPEPt&Gbs8`uVbqZoe^o(c=V%Jkg~%O7PSWF-wNV6Cr1!jqb@NO(Uj zkd}22;IGcfn6rA6om%vm1U7g%DzzMH=gQo&QyK^B047wk!~8g4 zEj?In2d#?-p>W*RNF3?{uv>0vXNiO$v{;2_JLMY$8=ET-3h|7{24dp4DeE8>GSp%N zcd;ctwqf*Z+w=538Dv=E2jW3!7Z7q z0uXk}ybHmuMbPZZ8a``8TM;O!`Bv(e@>;<_qX|q{6pjC1TqL;$z}t$<{zsAbaM>JJ>H9Lx7&y}4$OIeMsV zpUairEMGOyZVgP^@26E5fNXt7l~+r$)RO37cIYO6+AMhYV*@33op|JliOJ}}nrCOI zS(wBY_ed)+sx#!%s0uK?W8$MHy%T}JkNBP?0ndSs=B)<3ZnP!ag@DzM$1?E#I ziTBxGXh9~-1w4oI%e`Q|d%4!C#W}JzmQj^Zx8Lmyb(TKusLXXt|eaFJ5$zNTY%vtl6 z2x`M1z5Vwd!S@biVlF8E_IZ}c$xa3c9V7JpR#QBR-lvZfiAunR_BLx@q@1N;t~*eUP!b_jvL;nnyT)Bq8#%va%4nbz~BM z-^Hl;7-t`_VwbEfXkib~pZO=b-+G77Lf*x-l9Rl@*g|WB`Fx2R?qL-fka{MS_(o`8w zcXh_oYmKH1S$BVoj1CfdLqaZKU%;cWOm$PEPYK>pi}x1+8Kt42G!$t?wTxV)3z~)2 z^s>4y6^v6`jCX7A7DFUl<6sHJjWeHf)EbWL$C`Vv#d%|Iynn6oYPTvTucL^NT&t(4 z#C?$}gzKOL^inf+d^zRK>#x6>Ueizfz(?c8`YY4`osYwHjDFkj4c1yEV0hH#D%-5% z1c|eQ@gOxg517y5#B_9#)9^3<<*&!xiShWYfBV}Myq}48zKLfCob=#F>8dzFa|Obx z&}ibR&zz07zj2G4j>+f*2aJ$cvI6LS`k80Q`bN1~nrDx?VtRHO+Q9k?nBH0dl}lbAtFfqr*Pnq0 z>@-vBbTvw|v>@hw&5U<*cAbU*bfm*5YOPpE97}yu44r)<$oPQn%W)ZWa$Cxen{f7 z(0C77*@}XoG1dK9$3t@R#0l_HQ+(~uUI9m!;>3mXaR)En+#)$Dh9iwn$GH!jN6AA$ z+F3{pvAn2~60S>oFoKaAiRFQ26kz5B@H{F~J!@%+@sJFC?#Mp)#r2ixb zw3TrZMwpga1hSgh0`APU1r`&bsW7*fv%;tm!fNV9fDr>P01}Asg#0fBBMeApuUk(+ z!Q1Cfrz!r?kuOgThn4xfqApt9P%=IAy_2r48VARcg1+u(`U}asZ%ILFns%&pFBwd zRX6dL?2Q7z5F$ffS~y_RhGP@38ir%6rM1EI5GkyWfS#bP_`YD4E?)vn=Hr_xFaqx* z@a=aLu$jcpb@>@H5@`Ar?OZZPpX+{#wggF=)LfH(vIG|Qu*RC-6^u+xVLikOQO1S8 zK%)2(R+}Z5?d<$K&tfeQni*ojS#WIk$dJO<7F*UjQH)z~Xjdd(dQb#tTAK|w=_anT z2?a?&d5mr>Yt)s~%e6*f>~54l+o1ZgE~v`?Cj7v%9<9)eC`)!PL)2BBfXy&K!w6yi zhJr{3GFLxyZPUuic{&QmO4m5|N$b69sA9poRU;f~2?NrKn*G^%i0fx4pZChpGA3Mv z78UEoqULv9PnGlZ7O<9{pzI98I^{mG8z#5;Z(puA2RV6)iq0;;%D;2G?iK4Lh~X`s zU0B-lyDqr*<^w^q{K(JsbU7c#S%gzLKfn)07+*)v_46H=c*bi(qb(GuH&`B=M;gxsPnl!blxreQdcIv}*XIzLuQKlmUJ6oY zg`iOG>)yHB{^pBpq!#NUVY{FL-e>N)uFzT0NwPLN;1^m4o%h((81)kWEM9x#RV?|l zBv-!Ap5p~$3>qqUj4PV^;gfgy(uqeu5G}(e()uo7-X|QRw0;9C`c9nt(3u!{ibTK4 z7~Ozo*c-o37on{h;?)lb(;**?b7WV|gI$2;hDnYZ@}LPC)eH=^$BB~wSz@U(Ngot6 zGe_M(y9y7n@p(sv}GSWf5T4jACzBuU&YsVC0~>jWM%nZP9HRb`O>lm;$mN8Zk9x8x$&GHPe zC(325m#%Le1tnm$08x|{c$&$Wlop8J_?7>WwylHl_3u6(S8v^l_ue0m_usq9zQDh* z)>gA;T45M&hS{K%#o)OUaq9eOJg6Au;AtvwmSU^d^Nh#43cy%T;%yzded{Z`apV2# z0Jg1|A?ADf>KcYTjA5Q53{ZNFHCk;G-m$e#6ade%3eX{;TP+wa`v-eDdl;h#b6Lmx zy*Q7-4(u~DRKePKJ<}6_5%;lw+Vr*KSpu?C(`h> zLJ%kC56{A{;k$9fv1;t5YPnf4XfIeod zZ7)Vw!aF$%btp7-7=#DNd8vh$?=~()`$IF#h53^+p$COkiP<;d=W8eugi{S48;x&$ z{l$3wrI+CiJf0ghQxKyS8uI+^Vc=RI#7nFOQvqWUGIzE&`i@cTfGCa~%=(s(+QTT_ zN(rv!K8%TdgvTIn%H%<86IoHB+~vwV3NZIRPE&+sLbLRcoxgAjp47q~Lh}%xwRs!Z zpXI%|Jn~z6%iNy|M1-Onv5{r37-Tl=Av9@e5<}eL3BC`#wC=={PmUrB3_Io;a&`9V zXsm#DLb{gkX6^Z88>}Dv)MybST2v@%DUwo#-eMciAkWmGUfD+uqF7pccMk)+3CJ6i z=la~2{%%}6{|Gq&Zz0raKCs*z*WbH=f_XK5;lKVoM(ax4S$#VeduRzvuE+e1-I%=% ze+A<;Fc;Jc4EW?45aKoaI+dwNqOJqSHuX^GqpvWErgffi*FvLMhBz$zL^Hjh>)PW& zC%v^fjJoeyalLf+qhZxe0Hk!ClBf!^Ekk5&!z@0Q?R_lQ3YA*Gqu^3-(@4dbEkNDk zBm~9#y9`FxxYk_6Tpsos+@$q{tdy6r>{>O5gJsS@3F0rq>^T!GD%M#9N(ZrNC9L-z zu*OynWk0}zh0(gRN006uG_EYv3lZ4fR_mSwDN*RwO+3%lYWdh1d7orDn2dQ3b?xQo z8lVfu`bu;GGC21cHdKX`rVh)ohoOOxD7GC20lePEHH^nVw||C(2nKu(lwg$@6QQeF z2P-KZ0>1Z9qC8v*B)e|e`+JKuX)E=lHEFACG6_V;CJE*%9E99PtVvd%m|tAvJ^&3u zYN}ZyVdNHFJvP?o@lqgcuw&sO7%wq^84zDqAQDsTX5+nZ?j7C02C4}K z%9-H__2xTN@m$udiFjA7!<&>5tFB@p80#U5Mupzr^$0?m9rE-w#?3RZ&bHN`jK!_R z?Be1&Y2h|Z=&*)f@myH}xNEJdA*uP0LuF!>y~vHL%Wg=F3lk7~K-PT=V00dk$MfrP zO}hy23JxjC#ELQITny7NuHHiZC2~L9d&7zR&LL}O2Oo9=<^s6k5X!)l<{+;v0J2tX z!H0rc3+6UNrZw6_Eo5$}rVu!v=^{U5td1eU3VWJpfiWC`BtzM%5`+L~patLerH~WQ zz(--6Bm!|WftS}RT*MqD)>L4v#VYTsg2VbAl0%_4jFZCmviGNE@Ue_D^}Cx`Y)+m&otC3kuF=|?p%EzaDp5AE5J+Zd2Io~ToqRxVmuoM$u>PM) zonbHE1AH<~!4ga`*NgRY?c@_sFF&3m=Fc^HLsT!g1Ai0JMVOgjQ*VMmOWN~yTDMre z1pB#V3bVrN;4@SR{muVz6<^@Dh22eB^d(KC3!P_j6(bOE1Lj#%!KH?VqIp3<7jCBa zGi*iSmKJM%`+n{xbMZc2qt#y;@yEaY@p=9hy!yVhuA*r-HN&I)caDPPaNQLd&Q)Mz zB@*|MuXBlGaDJQ>?&&&PZ`h7@CZ9>;e1b4C1cqmEUrAc}aDCJuRV2K(FVB#r`JA)& za}xeL!=|`KFS7K_rl#C;-{e6IBY$`VFTw|r3tIjP?}2Neml(RwXL2p;t_RA*=A-p+ z9aM(pdFGr5+Sw%b$UP$t7QW>E>h1OA_&l?YkKN{`>><}WY;EWnP6DkWae2dYsHeo6xYs-`U{u&B7%MSfjQ&0C;eDMokq@MHR@jJisFSJo` z4lu~z{mf8S&RiW@kjle8O5h%bpr!5U=p~<^`vhUHc*#!ykj@NVK;F#8)aK=wTD`&8 zka4x`u|(+QL=SVWVeEHr!~2P`SE<#Cu9r&1Y3BMwOx`+-ff4A3B;hhK z*yqkY5g)$z42FZ5xHbK1Y*1jHp;Im8RMOiWc<$}1BvS&~vB$86?Zlyon4Zy}55^tCX{3Yq7&m!&!(HP)211aP~a>@qwE zZ$4{O1D~#jZ`4zj=76~V!#(6Ph9G&3#;!vOmF#07ow#-@PLeNh^4Qs!TAHPmauc5L zt{5H}A`$ckh9~xos^^9%mX|5lIWihgKKEo)Sl4WIjHSIFtN7B=bXJtXiNb+TAnsee=ac?SwQj`}7=USbI zmME^z88c+Ggq+<$b}x@_VE|zd!7(?!bsM9abc)<1BxM6YZp&%G?~5p5hD`Mi^`nT* z#mqPrcP3Vl&39tx!a($&qB#LS)05M5TZ3l+HkYnpRAZ&Dl*Z$OUphw!^+8LPg zAHN?>hg8@h6l!RAnEVXd0pFcY#m}1FR&HCSov%3s8$@7iEpw8tuqz!q(S5Old`yc| z5aGc(7;Pc%s;MEjqtXJOA*To#yM`yrcI5L@oOI^dIYI-0nSPoO%ol6O-fv+QZ$3q` z?d|!v{l<7)zdjaI6O+hY=n-R)_`~y>71>zJD)_f~JSwQ3+XJ4nF;Dw!to>bt_ZSt@ z)<+oEatS4P4mzzwJD8@HXYB8Q9Z7^A_@7M84q4ny7PpMYL<>PC z>v<%(ryx*EhjG?bSR4wgBZtgqBKY(K$b4JSCJ!kQwhuGd*`H7MRv)bxd+Ez=8AYa@ z@n<`G#JgcJn_IB`AGr|ZOBgO=SXDjQ>BIu1<=0r++9s?QR1>fV`#xjSa#>X&tc3rt z@3KCGHCf4r@79#oP-xgEv;uOcV7h`Sm{v8FR^bCLZCWtnG z4tu8uCEsDc4Oc4?QQ3bk6u=w-C;?GjGB}h`z=;LjkyiP6Vf7STu?K)UEt0uT+{5Zc zddZ|$E>0^C2P(ro3=PrsF0Iwp^hOsA2v)GnFCYMSh^>|0wg7mx%rzXY9zkb9sNM|M z%$A;dIMz|Vv`(jWm3s*i%?}vHHQGcNR3o&i2h8d+Sv3f2iY~);_S7?JH53VG69tju)ZZbr)W&c6Waypf~Qq9vQNk{7h_`vl4!1#V=}kH&T_7!eA0CieoO1=t;oN4jv!jT z$r(+L830@*MgZhmxc2U=kLMOSZbN{wpaahnNaxU)tHug>zu>`KHKR{Nl#WL^;tZ0{ zYJ_5JyPWwZXeSjHK69~^X)++!$N5NiuCt0vDsTWBJxp5W4ifqC`8|ceHU7kZ{~J$7 z6tMt$T9N=5TNMXG@98oYycS~KTe}FE;0&7cwg=ts@xEQ0JO>RuB$YmXJRbeXX_63| z;FV1=fF*YC;1+E@>DmOKz!HFb0W^_AwnUi94s#qlb_|Pq6X6e3bh&$oS56DBNy;5j zPbtuSD%@;hsWv<`D$OxIwH1;z7#KA{ge2A|MCyw>eX@f#6;x* z-XMn!a6kdQf!1?XC zJ2xB6y@WU&u2bM+k4{XF#mS-5@W94+@wNXgeIS1K7fF-3ZyH54TbDtfJ8S2mvJUl9PgbocA zr6U%<+go~K<={OUYQa&!4y|}8r)I9i{L&Ps2QY3a>G~EdtAOiwk)4LzQF*1cvx#Oq zcslS9Kk?)f&@a66L;D$mIW;juw<#>Ya~nnZQGk6l^rLdBSEYm(eNQmUJXGNK8T;V< zXQG~D@LO-7urS7+;l8-<{7F2hz483FzC$88a*$gfA!8Al)j|2pZD@COl0;qOSdfV~ zUc4PQ-nt!a#|h6oNwWv6xO^pg9;{%cUL@p{vxLm(zkqp;u(0>Ox*pTt!Gna%eDlqB zGIX~C-fx(6J&M;hXVfz4?!zje+}uUIzNM<37<*v?CV}yHpdorr-~}Q?I`elhD%Moy zFj{F`fZoA&``d)5k|$;;_3X_}FhfL8}eJ3{}je4Brtre6n*0RA|HS-5Jv&yqp&PourE4L2PNV!`9A2X=>6Mqqi6p`BiY&8| z;XnGhU*N2jp! zi&)BQ2xqEAL$oo-Hh?iuUW{EA#8U zVr!kl34Xu!EmwO*8qrXsr1vr`lj0sa92WE zI>0-Uz=X9>u-2eCmaN$?pyEC|fCpF34mjo%DA*fO29R3Z59aGQQOraa#-rB(+=h?$ zLo}>k>l(_`D#8Wjh%L-s2{5}IavoTTWC*&rCWvX|4%`bjIY~;oN>?eZ<(>zT2_fIH$i1Y-3h zsGD%+y))bg;4fGZYe@?g4&>2GTGyrR{9IoFMS==0I09k{Q7wTDScUwz9-kv+fl2c} zcF6IvHu++YQi0Wv#^8O6$h@D@-SuGjbvSok}~P zTxKuLoA0o+w7@tZ1AbHpj4w3&$b|;OhCT6wd`W|< z3XUfY7Tiy3zI5z+Dl#e%CdqPl))QXs^{Fthm~6Op>@&B*s|}$7tQ#t21rq6~gtFI) z@|)|(IP~_)w^g#7x97*cAo)Cb9rKkgUrPFYrD8>195P8Sx&Ys`fe~$MInJC#2^j5()!jF! z0`vxQ51w7qN#O{<%gSs5js}t|KOj-*uo7gvC5QAWF363>I8CoFfN_= zF#QTnQGJI38Z5vTe0FJNF5Y?j-FWl$H)H(j&6M*40N07>D;SHa;}?JF=P8Fd6w~8h ziSK*-T%Og-?_8w{N=N+ovp*b{`p)9XdfI_-SvLu%9y-gKJ zK6;4_l!RD>nP5L1uh%mFIG?=l>8u z3Que$6zURHj4nNZQ83RxG>VcfZ!!d0`j>w6qC*hSRdH5SRPB3U_)2{I6U-B=B<*=` zexIh2PV#-uw*14N?k@Hkx}KWA0u0D8gmh!YFfI~bR~NA0FUdHFoFGZHo4WcJhN2aW zWUm_AzSnkc##2At7q504#O<$6LN?Iu?d23<{{Dy496N6ZWpKnv2 z|9Fg?JRVzbl?i8?qXg+@H12jp`*HFDscINlj$IRZd>#1ZKqa7h17j)Gi|{5K80N@$ zCvIUZ+^D6|8f%4tx1P_}4>zOzOda+1`zWW{5^ugV6a5&LZH3%~0e6RFnZwX z$k#Y?XHK0$nHY^W;@oG+Yq)Xcl{|;815yS>ic+^kC7gSZMs9;N^8(|j=-s}4gUE<7 zt)!{aM98$Vwxvn0*CB11mt*Mulkv>cPsgJVmI^r4T&Bcq@Qx1Myc2yYIu_C@l#A>=43X-P+YPGNU1!Ccqk02SWu& z!T626Hr`CDz9BHWBt6U}Cb)=|l6P^9s2!nkHW8z?)XoOM0MJ4!cn~T~Ql@N)P+jcC z<(JShwJx;~b7tnZp(|;1;dA52&Cm~&lomq_sDuOhPb-8jnDPo>cb)kt4DQ9?TUn?L z>x*GEu=)h&f=?9|JqnZ%;C-y7j7f}Qay0BCf15a^5UdAqGFyH;oPcz`pEc(hyCp9h znwMAuJrh`I>qzD22%2%&w_RXi!!*YP3ra%HWNg5n4AGKRRs|Bgr1cphb*^j`2UnN_ zEazZY`DmYfi!8lLgBh?IpHy);uymC8QAyBh#WP@KxTF@b5@tTDN@48$;{3RW_~u$2NFm-JGhG@8ntQg)l1 z6bG#V){qs;3bqm(y|W6j^QJATRkDOo(wG3;@wbo~zdVn|jYmbR-`M2c7(GrGKVXq%SsK(QcK3C2 z=Be!rQD|Z9=^b!tFg+7}u_$1@X~KHaf`_26U;~EO6Ju`$M|vVk_7h-Fl^u>R6Fi+S z&vd^F<;FK$Qwd5aS^VwVNK3L38I;1-mGejCL9nT^&8Dz^E+8x84{MR2AQ#QI=6kNQYgi}? ztd(Q*me8m7HK(8*_j)mZ?vjQL$5SX{6>^p&a6@x)m|D}(5;b@@0_opp8kXkEv1KKq z9IK$7ec?*aqoCh$3m|lDp$++$*W{u@ z<0Tk~Ym)gc+|$pzzly)t?-?S+c+AW4ro37m*SUT-KKKTlQ z*}sYZ^yVMNG;#eZmwm$+004tKBPg6GXa{(dH59Eaz^8ZR=~$!}@Pe(frt*5mv^usP zKlho>lM^@_SMIzNizqSzSDRT( ziid_e̵^0i7~(S)&}8RS06n30DG!^Z*7f(~@cx0Iu)JhSO4N78etlfydA-qN;fj<6&T{IlU<9-8=cBL_{ zY+R33JVg8JjF)_or#|=um2}oo^0vr57>R2)uBK9O>f+hBa~YI|Jf50fU=4>DJF=as zg0(%YrM;rSXbd8Gf*aX=qK+H@?u!9Y71D*{A^~CXJDDW@BVn;nTNrhhLslBVqxJNXC=n3Q2vt|`n@Po{lmvq2h)qU1-_}OD&t9Nj!Va%iK+J|<9#aRlt`2v zLc!@D1z-b=Z@+anUisr|$V+f5;X`51qHEOF1!&>&ct3RgV%t#@z5MJiK0f?|C6&TGsMFr^=fvo9ZL?0~ij zpW0oeBq?6P4ZIqHjH-GK`-B!wPclD@7KGKc<3ZTKn^@a~CRs~&IyG{Y0h@vC@P$}s zCS4oRh81Fc4R@q21+=eP+mgp$|@6s&A&NJ>m>GeP_A?pS5Onbtzo*41-ylDl3Ku-b>^)d zi^ma746P85CitmBnb<+2(sk8@R&SQSM03cqN|aqxBdoy!A`q&9*o==2@*nDm->ao! zj{-!&VI>*?T*gZ&0rKEUfb2SGpz2<)fX@E5)HscAG%o;{h4lx=58*^LEO=uR3}%g_ znJt?5$e1{=x~2~(KU!O7B?7DwhCLDX(f||R3%mItz^ycd3yrf8J-9%->MpWrfN0|2 z6kGBDv*Wk05E!=NebxTps1WieGtBGdzn*a7+|C2vv%JBmq|HzLcfCQ+LMg;}MHL7Kf9(=9g0=2B< z!hEFwC-0nH0JbG{9p9Ce}o(=ugY1AZi1}ah0(eXSj)FO0Zu~UdbxeIoyvHtbdkiRH162Y}x4Xk+ejZ46R`6 z!TM(&MRzYXya}(H)9tUvK|lrV*<4Hjt~W))fF6mAuST)3WSe0@KFBq+?5^j_SJDB$ zO?*zU%Nu^rd=5X9wz6uDi{KtSq_M6_>c|bZCqwf-0LMC|DE^bZ$J0B zqSH(+xVM3t`xp&BT;broFF$@y0`qX`3gPjI*8N=aF_FS-XcvKV#^>B6uf=-8LGe8` z5N^yh{2%bb7#tIAbiEwFq{?UQRmqJIaQj1d)2ev}2*wkH2@Et`g={Ct_592qqBkvrl)0gf^fvWZr&gzN6;?pTtGIE-apM&6dKCnO47U-J zZSV1R&|)hlXA{YYuLUF-%XYB6!k*H|06=63m@0&eiEA>%eT2A{C|y*#2)5N$6Sc7);9Y|Ts59)*;DQMthSj&u&-t`{b&xS?K00%86U>w3G*5ksXAiTqs7 z*&Pqve>Prw?d4d!N=;id%*GKcZMg1hb&J6$ny}vLp4kGBXm!{G5x@CDIW9f|iXCFP zak1-K->KfD?kGu$>zLh{Z}Z8Xc;eZIGaTTsLi2%**|m{KVTU-?5qKEFn(r>p zF5=2A#pgcvQ&>uO<6E!1!gCe4SX3KU*-1^}?89D}1dBTvvcl+~XR1;#R?o$s{pp|Z z8*s)MUs|Y(iK#W#WS?Y;zIgN#r{l4Y-WPAYI2kW~FVkO^p+U&a*W+xiN~He4Q;l>1xg)R-&?U@8NV%(BIOP1vsBYzSzCzR zK2Sp>F;Jde!D4N8GP{_TJa@i*cre}euYTt$oy-=H^i^^9wQ1Jjay)qQ<55pM)?fV5 z*W+jZ!B6s>axCs%k-eqHu?o81nxaeE&B^4&Cq^&O!LyCAl3*k#MZPr>!m_o#4Sf@T zL}QFCXr$&Be(TXMw-SfbKt0Sh0tHJHmaicTRSS)+{fMTAn9&08<8X`A05nD5UV^Am zUt!$!Y>;NOG9?RUXJClbc)}tH10Y_(8k-_4R>n?Z&D|^zs+7H#Yfw3>2_jE(9V+M{ z6J90wQS?`3mnjfp3DajwhyD@;9733&5D$_)oNF7AeL!IvphaC{d1Z-+qw*{Co_%v;Pz!rh-U#% zt==K%%TCvp44kZJ2P+_fE#w>`T zp@dJ}{oJdURPs5{EkTzYcb zqf$WXwlOGly}U@Py_qN*JeqsNmoP&gR6xqnofo4t*3>s+zL81Hj z?^CKkS3Yl(L$pDrPL#=}L->~0I5hZ_MfYY;N*@d_k*8UxG6LW5x9cIj*h$9576Kt_ z{Rv!jFXO}?7ntaCZWl0z3t7gbyNjl{@N8l{ihOg!N-TS_K9N15WCNm?(V((?C2*mow@hXJ|Dd= zFwSgT?&16;N;BDWGkHQLF|kI{tn<$A@eIvq`MvyBu@-{(j17VwG=-~8c-{>B=bL#; z>DT`>WpXp-s(k?7$@S@C;5?}zLqbVIN$*_e8BDb85T#*$XHrZOpKIb;74vnh3^@1N zvzM`LT%YvIZM>E1l^S7w&-L_GSYe8Va2;6_#|N#rhpv0^5}l@5QEpb-^IMW zKac1A9nQld?PsT^@8sB@d+HKdtZ&8d{ox@goVaz|}?PU>E7?|vtK|Mz}3{_n5+ zVSMB3U(1-uwVhSWv=i~QuYVO2A$vg}x0bR-&blsA>js;lDt}Ri8FXDJBa|B<;N0aO zCnmyjH8Hg;zUI-rzZJ#kdT zlkF}*2b8R;ZL!h{Appn^`EwHiyefOnZE0pL6aK2fNg6(CF`no$?j#BF^draPkVNHC}(KAqgln7FeU^E1?q zh2I>fPU-m%jbN4skAa&?#1D6M_Hi9&9pDfA#eBY#UHG(OwPB`jK?xV*b#^w=8IMv) zJbnWuU>A&{+*qHbv=mwXVwHO1g`lbotw2GQXDN#fK@nGOvvyB^;KOnMx%2Vu7oSI0 z^%~?VG_}LI=^H(Z3~!GY|LQ;Eet3`C&sdXt!53ZfC5IDbQN5jszr>NNPL43iGlH-9)J!vk|iY;63iSHmjh(Ccr@l%aiU4deO@&>@NwdZy*ha0C;ht_HN zlzIU(%0r`>Fsik1Y9615(GB5)K+A>%`sAYk> zu(@PQLu9LHQwjphWz^vku*AVGaY~jus3mK~3@mYnLVKOA4-3@eEU(%8pa7NP-Cca7 z7>t+$IG4cCvH%w*+*&)t0a=RM-`yPQsw}!mB~)y(0Xbu4WssZltE|BP<`$!GQt;7k1%Sc zJ#-_!#yT`(>$d4%E`YWG1wqS=R2ZtMqou#x`TJ0h$Dxnm@ zeVbs+v~oJ&f}Bv19m3j}ch8gc3iSjRNKna_7&V=Uf87`LX1HF-L+j!D#VsXdi#QV>PJJ z*oz}zn2G>vP$XEWTtZRYBS9S#Q@M0a{2N@A3n2K->|y|iia~-cnWd}761ooVuWQEJ z`6|Ge?+Y@Evo63pTGzB1`#gSBQvB^JKlk50Cu5@$GUvIMW(17xWpOJG$-Qz<_=-Mpd7jEbW_|L$_mUo*kMqkj!;^jXdGosWvO7aC)dFMb)H^P-T z7PCd_w_o{2eDmusFxP|Z+|}3L!@k1%j19m`Vd*5}b!_I*pnH$smgbtp4d@z}pkaKJ z$#2cA)Om)#C=V9qD5lSUR*10ai8Y=lv5rpNri>spq??K1Y}8}GuGHdX>8Txzcdxxm zTiN#s1CT!vppU$qnw-e-G&R%h3?qZ`sT1CH^7KhcqfH|-EWI*>eCa^m^Sz#K3eyu8 zG;x)n^FK$CzyL)u-FM%9HOA-1Nl+t6z$8KB*v=-HW2-x6Z%`r!IT9o&>?5l$-ai~e zC%SOTuaE`0M6F30I#9Z(3m2#|0a9QO7&qt1dgRi--AWf`He>3}0>-pex_5TO0CC5J z4g;J^#8tLNZ$GuO;TJcrT_r{XfR82CvsI5`R|o!)j(o}AD#*|#fTuX8op`S%*&bVY`LL!_Pog$4%+DU6X)CwmLk5Tt(Qz{%F!63YFthUqoDXoMWlduY$^iO^)PChnF0|w%~MjGRxCoV?U zIl9Vb;5r7T-VuznTXE;DarR^@9{<2IC}ONBmh(+q@!R0Af#DHA@gP8$_#})@M9;*} zeDoLN!E+bm8{hpJHA{)X0o01Cthbx;4xCZJ;JimX`T-{M@zxBr@9W|hfA$N}2OZr4 zSJ^hYidy#z7_G;ki`DhT=tB`Y^VkrIR>U;QUQG>d@QJT=Y(l<8EC6ez@f(`3(T2=q zZF2=9S}XFFWS*`8I<659k8)y(tmSFy&`e{9U8I95iL2Yhz#U+S-NP8H;@L^KLW(}d zZ!2NjIq$+hf9aQh1>nCJ&p-bn7~5=fhy`BQfQ&u&&;xk&2II|_ze`NqblgNvU;4gb zxIMTP{Jpab{SaH&OmW$@Ilc(rDx=(bhU+jMA5y7DV|6Xfnmn5=jJ>^~J8FLE%V)n# z0xhl*7$w??j8ulH{ZAKBFLEZok$ z)V>e432KH5AD}VI3@iZd=rY~{O->iKLCUSPEVM(AtuJe#U^|Jm1LS(?rh_0d%!|W>7R1f(-NqfSUQSxOhrb28(gTCd{gvMG z;`~1cEuaxx@O|dxJ4deskWO92f!_*Ol_O?TT#~mf1W0C&^1fjLCStG=30!18x{?J9 zW&;-%`=U3wS!)*kueU51z(QJS!a#It-?O!_iiL&R zOMK5z0QbL@?;aIt7fcx!q%v%rp(lGi$}EH{(4E$&@h;RRm9 zGkLDBf`*Ilnr-F<`5ZpZy>&lf(8cj|0DR8oI!3=MjrgwbcwPm)3Fa_37*^w0@=;tQ zKQMq_*R6`03VkiW%Dv|6c^|Fpf@G)0|ETeYF-3kyVpkpB%F@76l)9qErui;va9Rhpz#`_57J-3EH30Aa}=N|E{F=c{p*E#`wuC;5xIx|m`kaBOi zkMlE;K*e_#u*yeC2g-G@)h3qr+AhFQwF*Bg+g8Y8|jhPsI)jP zU+J-s7Wg}r8P6ck^xy6SE0(`uU0%(8InI1oE~4||A!&6>1&XyXJj{_~U0A-AiZ}Ok zEZ&CMd$XLE^C+Pp_rjP0!9q}UJ!^kvurF@jd@FwQU;Xd#pZ=VX zp}h;pu4D1UrAK14tt-C${0msGvEVXqo2a<{8A`@u1Tu}p@Et!s7>k@u4Kapu8CtWv zuz^+pZxq6cfY`gM5z{mwVN(wpt=*YADbchkA&p71~CI5hWE9m=Tmp(TBcF|{I`7cl;qO~dhO ziIq=oz%w?;C#?H1_6`g0{;EO98$NLic+pI!g`c5e+ck3X^em+ojr0kJ#y+?YVSelUm}QZTP&ie1@aVQvEJpL z(%SXZai}gGKG}lY0KeWvU_+=Wat((X&;3q5fm0k z0KXgzZjLVO1VPH58%TZ22#@ut*{UGL3#Y@ z=YEwa*@1NR_9*gJ4}}`;Az$mWRFr!d1qrKv|LOF^eIHJreCj7DSG}Bm^x8KmTwt>Q z#wbjqR~l&n84g*1B2orhHi?-gEsu(nsUtzZo&Mt={~>J>9D!$U6OR_npcG(eMt-~R z{s+=aKl(u!V9m9E{1Kov7{+0nS^n`g=8d)AgMQYRL6ETi6^zjoRFx<~W(NSY4&BgI z0!GXOUOh`>Nsc7JCGb6);O-Q{jVd&8wTHQ>XByE`0%M8JyuO6R0-3xKXpfAvi11yY z+JO}h%A|j3#sU$R7p&e}z+6mvTHIjano*U3hU_vadrZJSMFw0vt9f)HfNX-VVaNms zAbc{Q9Boz@Rg7y>clRCwd;w;tW!aYkVFS<9#nDU^iH_PMSUCvsUM?^Ov~I%YRZR3+ zqzZuW5L!L?Ha%o($P?y4yJfOWj}&6!1#M=+saA5B z8lQ)79ETHL5*7Ce#+;F_wn~gRYUpjhHFW3|?yp_e$r1(v0?0kfK>f9E7W~vkk59 zQpIb4RAj&Ov%j2%Mh>U%od2^lFw&MTzcHHr<^TJ|pq*X%)ku1uBPUMcJ!neb{_m?S zJB?3WP5(&l+epW-HSh*Ot=ppSdq8bm@6Le0(72pa711MInw-yR|i2 zRG%dIJ>HckPCXoz%6j`c(redWCr*I1K(e(^vlTRfPT?y!7lZ_QdGSc@$!zq@shD56dV{KFRDJ_)Qb}IM9Q4qE(l*G?(EaTQ zlZ{~8m&qHt4iu-Biemj~o`M4(`@~1WQ)0TkEnvEC?_KtFa|bkuMc-RKuAoBy^R65E{+g`vnZC7PqN6Og>-)+a|mx^BYva!|>=4p~1oV+7iMA?71~h z5sc%_c;}u^uU&f!g9zspv}sDly~~&yh=5 zw1hwjpJXAX<42DI+iFbH?bB&tpbG;u@d)hs`3n=Nrjw{(XnYemcr6Cl9Kozj=q%MU zbC3|h6?+Kp>l0lHz@6)sQsB^X zv`vU|CDFtVv<|O9VC4}ggjnbWD1zsA05#Ni-XI@gK#cgip1>iaeru@;R#CZ*P!Em^ zBHt&0^7bZ+#eGWcq0u6&*Ai6K-+qowpCi}T1owD^!aSX1w4t!_S1qLO)m zuudMdWhTd*i}j>b*v7S3!$vX5t9i(pQ699+?@+8jjARjOe31=Pv5%k#_T4H*dKfl| ze=xI68ST*5I@qr*m{bYD(x}@igO8xd9j;T`t>SPEAs>sh%vH_M!UN0mXu*|jX{T|6 z&CH>Ua$&3kJd1hrJ!3*xvNZC<=yFZQb-Aw_jnC`N0b^HBJ)i5_8SQCl3mY2j-3%T{=gzhrSIF3#e-j5Zb3G14$32Cw! zt(>hC*k0GudswJA)J(;+va%A)qr^N3ud$YUcqX`J9k-{Jb8|9kMO6@0%+QjjWm`c< z3&w$9>LIR$!&g{QWq#b`dt+-9K8?Fj0Fk+em?d}fdY|palBsZQz-%b_Vh*^>Dz7K0 z3dOoTtg%`cme-o70(Lk}Pf-j4$WN9=1?!?768gBoHNzvg-8t-!961`0DHS8D=!yGp z;y|_?l`GTL=pj_dH=Tvd(;SkvEquynD*$H7fPHt2j)FlmuCVrVjlzPT_*;ynLbZp2 z%sb*DywNdd!;ZOAdEsf<^SIABQ29_ejc2odMrk`QV$wt4RH&qXP(XAMN6Jlp3)Gd|ru+YHZTmEt?sC2;mJ$~c>8g+9@rupHB0yGAqF^L&8qGiNGB zQh;sv3Za!#J(NQF?6c!O+mH_V!)Jzo=2N-GzUgN68T+x9jmv`mpe-SS8aY`$XqAlOg7n7;em^Xc-NH`9d|?~qiS{R`COA!Fa4H*i5LV)JW&*m0v?ZXt)^xpoqNwAJbHWS^5x6qksU^W7Igp}v4?Y@6OdQox5fj}R~ALc zG@Jq`4@NSKAtDee@Q!4VE$sMsFsiYrBig0$w}7;6aNeC%34*Q}PYuspMaaDT&I(w0 z4Fnd0!mzzde z^TdNv;gtD0*Kr64>KL#eieg}FY60$c^VSuV&dE?JGg`t!0fb76J|jeaf~MU6tkVu2 zna<(E&~^g~O7s zJXb}zV1IbFs>tnr>huVCSjR9hVkklGF$Gj(J247#(+KHpKyom=(yvE8a$mZ0WiCyi z4MZwS#WmzE4Q=vN&#)o}xW;B7Zhh(2*lkkvQIeSPSrJgkxkA42OZ&5AjKMHa&w4m* zVKC$V$PdriV&S0MtE13EReIpax%A`%kEZ9obv}*$a3W;9eT<&9$V9bzy@;KuH`bOu zRNuHr9daz3IR;Zt&P|nolCBnn(K-%~m~c9hho~jca`OTkz*H|r&LglwWZT|-8}eK3ZgnC|Ar#iHQ<9XlBi(FB2xJ*-T7^Cpn2f=l1@C9k)$;e*lzfC%3+$ua zgB%SNH@wW-qg;Wk40L6e=dKX&5W$Er+Z>frHVM*MA-`DUA?2R^up+ZyV9{F|+SBpA zGamCXD%~a4ql@^Vn zTN~i&aOI)H5Y&XgRnPbl!UDe!GimR@au4hWuNrRQb*wokMKG&&EadrIZye&41X}Il zeNfm3^cEUl4VXv?%~V2*a|;oJq42l3Knw=%FCkTGr~v`Ma--s4;BbPF_7Kp^XuqW` zm=5t5YRL-w6{K&90~kUL%Pi|tLWzj{b$ClWtafRCgGw(&tV<@MmRa<)khhM)0ynWl ztuNt8DYbDeK~ONp5(KYx%2y*w<1TG0Y^vQ>t?6OOg1US9hf*aQW}X9O9(VZM!YnjL zNkJHf-W=mKEatF5Wsy4871JnfgR4YIGC8yYLj=*yA|zo!kS31}9|^&c#B*4^Nz9LB zd<~1f%&HmJrNW^BgnN4gf}y-=VX+cd6Ljm#gyc#c#uEs&F@gVq5aWmxk}IG`VKnwp zs6q&fFg==xxtAgM3ZcsP+~~N>c!8{yec*@gT7EdfLcsG+At?$_O#}}qj5}ApRCYbM ztw40@_=z}_&3xg!YPo}uLujvu>9+w3aG!O6x1F4vAm)NYka1dlqzgEMFUOX}N?wqnlp3@fdTy{E@XgSm#(}$kEwnhw3OjfTi;g zC>^W!@s0D934-y4j}Z0{P@H$40VAMqwnzJMNA|xg7?ykQ_RmWrXjBc$E4@D+;5WSZ z!)N#u=ZBucqu@bY<2b!1tk}E_MTSqbW^-KgjgO-rEFfDj`sOUCJo%E`6~-0xxS8wu zAlyjNlW04`rF$V`;hrc#4NWs8w80z~i9y1B@8Mu&L+mgJ6rbrjqxFu|4>hxqUm zflW2{FJ4)_TYGpl?S%AI&S5V$MvAzks@j%RvuCf8Ys> zDXylUzI@M9?KZT|yusMRq8Mw+UX*6J`QUlN^ZeWOi`WP5cAu!YK~;RNu;3hI>md)b z6^%Fc-<-lKkzQW~ghE9SbVRh{XJ(_RKzMNfM_@X4ODS1QgsbXwLq9%4L4);^e&lZo z`L;oixQSAV8etPlzi|J4Q z=-*P-bsR6Uf<6!B`6~S?T*46*$P_pYx-Y^rLs_CZlNQWld42NJpH83t%x5re71E_E z-%Qu8UPy1gbR}JV?GEeHiQv_j`l$rARsx$px0S|A<1vN;mOOiX4dp{Y$pPocR8oxv zy{o^M>_p()F=S|E-NU9a z!?1AgQ&@@VtLtx!US~%qaG6^KN-hT^97z2PNE9}o9kQwm2%bBpHT);lsBeriFBiSZrc@7$sA7~g=zKX0_ zDC?ppgki0an7#_w^L#BOg9%9HQ@qwI7-5JXT(3?2BkgfU$g8E3T?FA(yfN)<#Gv5O zC}7l5Sj4HvI?=8_80Mld0sCbA5@!I4#xC(A{f7}!-COMWW5*s&BOUeW%~!5OWib`n z?mn^bO;N>5EPfpcM~dZZ7>9wBRQ0B|hWnv>XcEKg($aFY?c51PD_~r8lbiEf1>wJZ z_gm>0P_Fkq{22Irs-GeS2&0{wqwGD!orJM<_jHpKUoR+o3!{<-O!Jo;zvup5ri~?V z^NT>Hj2AVI(fO5sY!k;~)*v=B<3mPSg*V{c&#|CYC}0{JmzNfaG2P;f+Xfax+CZR? z7W3#hOnzGSriP~u;t7S5MTuo(LE~H|>nE5w*Gw!ud4DN_us#D1Xx>TPN0DEEq_yKo zTAjNYO8n62!?c;5ISY)fjNG(?;q=^@dx@7^OUvZy?LXOpY|@<0Kle8Kg0bV-(!-J& z2y+|Q+WMBAG)Ofp7hCw% z3=#E9xK#Ji=vSAUQV)SSB^X~L77~MKtk6_PhJejFlrgGEtdKpbOug_NheZWzR4!t5 zf*8>_APfpL;8h@$8*58{WMwc3zase_!C4-TRcx$nu#&EU7VTQ`!~SEursOj-if|7P z#cE(T4Op8M8rBxM4o^=Nib8dRaUp1-#9s)rDiNKo7skzO(087VwN;wsIn%fpucf}m zQ!pefBUr?CaQ%vn7NfSiYmIFfK?^D3IJK35#JOXPrZZUev6h&axU_+_9swrJ?4+@= z+mZ56I;jJK)4<^~kAky;rW-LQ3`FK{&dU;ly=A5ISX*Qw3LjeYs~Gd>)Hs{1GIh3g z@N9^l_p!I2M+2Oz2;@<4SAbCSbLr}7Y)ZQR5NTm>02JF>(a z8?0S|HgyFm^yuPr1N)4C2C?rBDv$Y@;zE7PW!`p8N!Hcb2|P^Y7Ft4Z(aP7vq0)*cL{G^Y!EOcy`dY@SbNu89WfXXW zFcB?*!hEtH4;u-%h6M{oM8D&}3L=VWjIk<2`R_oG&aCG?i;*%W*TXR~6WoO8%-n~N z8DsMc$H*?_!2gj{_g(f3&&u0je7}UmMa1a&RvG@ z!U~DD?^yJZshFzFxE`61s-nYu(kK6Rq#pLsCf9}sg3pE39K5poJT~bAZFu-afVrKH zTlt3fj**82?^2Ogh&jNvdpL?`H#R{sb^NMRwyVLzdG-EXg7PY`M5;SgH7z3)sX&|E z2@e*7FN1Ne=72GCnHcz(9Qw0^G*PPp53j3+nCcLgoHx6qQmHXS;l->+b~|P|#&6f{ zOSVP<=V479tDkG=kfDXN&78mC}Z5PM>fo*h-uo zdgxRfvKPFIy%)YAZnu5$-d62_Q>3#4XVS)*CnDMY7{PmI!Abwx|M07n9KDJ63;~q3 zf}R+Vf!bR0Dk>Vu7DRG)%zp-%?yhq#v4;)fUdPLqN7(-TfA)(&`hc)Ze?NWu^*7Vy zmv5x6|K*PnoDkyrhf&Z;V@O}>@Nn-DlxxPI3cUP>8wY@lamGzfBXp~H7(bTrAB`8o z2)`QyIC{7_{_s>|-7mAxjiu`w>`lW*deii6ya)u`Rzho5eJX-AUL<>qXK0zZdicHf zvp((#&NpbS1EKlmr5oViZ<9@>6(N>+w<-Htm{|ovY}LaS@a>F~z2bSPq2}1pE(!@z1mZo1F-);8 z zIX8+3aUIAq2z*$j$3|HP&iv*EEX6#_s$K1!?e7Y}>+s+;QlzdS2(OShzeph-bG8>T zy588Il*xxct8iF_GAfKONB>PABClO1Xm^$3N!Q*YQ_U{t8r7=0j?_^S`gr>2=}#lS z)TNO_m(mCj6br)WlMKU=0yu+{?-J z5FszaeTULV&pkvRrqe&1|6cm&`##Cq5K}fCQMnOFT?G(o zgQ&N7CWVBc!_M9|6hB~81Z;a=DGOE;#~?aGXpJ1ll*Z!uE2hxd4lY!)bpgd%2UtI@GB_~w3k;k9oO96N%72V4evmWP^>RJgE)&z8E@ zv^oJKd-4YN5rDo*)+XSz@?B*Z&oR&10zv1>8!vqS#lY@211Z};J{~yNmQEB=Fy281 zLqRMNoV!jEfl8H0;P;>XrO%=yt);Ji+S=KWpN@BC_AO89K)8xwcnHOSP zkVDo~i zI=(>CUM(c+8*{iKr&Av}svkS_2^Ja)K54Kz+qwy8AmDa&DJ^3K(N$!ChujR!TUZC`w!*biCS8`ny1{|i6eaL zX{Kzw1vE>7JQ!Cq@eWl$HEOVm<;)rcM9L}{&3FlxmggC(?lB-7C^q#349%ByO%b@v zLtN_yQhW)wF^{kkl@)|`gny`nDb)EN4BFru$b@xZ-4JTmS#$BEX06yIz*ud)lA;ME ze$;mLB9+UFX?EjnzQgiJ9D~7%R*%uTw*w>FCa&UW(@~To;0$<2_K0)XBhXRfsHyTG z(zKl?fD7E{8UZ@YKKmrIf(t*|0GDU^XkE?<+*>F-GK#HW(wP-q@08ZOb`GOP(#CGG z0Sds&u8!U0{_WI(d(B`SnMoc7XFc(MH+tsRQp&IZ209yOuuL_aYM7vkK)t3UHsBiV z*FygmvZ!XBJJ~bU9L)R*<{-23{#`6_CH9+wsG^4!ksJc&KE&q1SOxQmfYq=_XD}ie zl5GX1kl+4&pbTU&20ZBQD+EyltW43!#A;zeGHBhTdGpRoNW8l9UExmN%xwaTf}tSb zbYQWwAcG1^opgi{V4(2P6Gubv3n7xR*luJ9g7L|K!h#RY7)oEiq1St-~+ zA*f5pR`_xpzU)mUxH@j(Jr)?A51)eA4xXg|o?Y*?USnFcxEtZg1D|jC#xD`P)zbOX zgE5;Q?{dyq1>Q!!WS$b530iLXE>FN^jE^jm0;vt8FUmW)^GP zf;%eAWlsuWD6aC_7!OZmZL(oA2hNk%hQdSNpab2~ieW0Yrqy%rMUXE0+OdQug+BOO ztG@ztkcwVa?su;3moDL;6M7p0F;7)MN0jFP=8z*aUW>cfXO)yz zgcoOGpBOCjTG!!0)5@3!Dyaagfk5Ti*Fjqj)bZJ;=&P1GuJLbU8XQ+tdW4T2COI7KOsdA?1MFPMqU3#lh;_ z1y5m7xZI{)gUn;ab@29#CRh{rg$8qZA|fK!qJSZQ`3(Cd70Sb0UcCt_D5jJ&+nS}> z_jq54BGmxjICHW&jgQ|Z|2f%L*oW3^K+B2#nrxz$wUpP@;OX6=*h~TM=;4!tX}W%j zeC|uB@67S^&;R8=OAkGABHfyPHNEiqoit8$q3?g`#dPEKae81?&xX{6uxYYCOcbyq5m>OMgUdfY;dDS?rxfIwJKo)=dovy#qjE$k(n1l!3zQJp(GR zKKGKMvN|o!V^PQ8rO=~MVRCYw*fYwI!uu>${lL@rvvfcgP(ZA2aO}jf^wM`%}Qt!Pad?34wdCliB{P0MO zV>?tmnt^tU>DHxXV5dXr!|(lt^vD14|3g`TzwXSCiEk5q1_KvuRRei02y^biD2BpA zVmeYDw-Z;_o}T}^@1@C?@1&WI*>v^;C(@ZG2N9r1{=q)lAs}`g;Y%;io-kj|xA68s z56VfNt&PMl_5)|FswNGiIpN{?>WM;lo;~xi^vRr9b%cA8^JX(6J`f`3b2d z&ZjcMAdh3L3aRc11`Pu;rUoj;D6_#{hIh%hVy{b+$OFzFXKNk^SW8D0)?DFT$SC|1 z{;E+A3<6f^TCC|HFMwMU@Qw-!MomvoVR)10aPA;a zc*;g`9-e77ssakum8-8pBZpbH98evUnlJ{jvp6q$$dXh?pSFnaxOVAUY9LEZI{R3f zm|jlf(`)IkzWSB)#XtI+w2p!J@^_KNU@v+4F1+;KCx=t#af6;2H}wRLoES>;RI{6> zx|kl+*#+S>z;d=pO^y6u{e@c7ij%!DyerJ&&TMdk;6L&(y=U42O!vLCIEQx&d5tWF z8Zb0K=%FfMHQyo~yoMtKimO%fILlj`+d#V@-)K2O20C%%a46Pi9(1eFQ|vZ4Noc zB9Dd_BZ#o?m~&`H4@z^>X6irQMq8C>8fELs)oYX*Kb#(U$&6T?=&9_eB0WFRt zu-KC`1gnC>g^U7Tw@=3u_6)j2R)wipr<}kV1ZdrwRzTYUuHwNI z@e0IKKq)X0*Sir2i>|45m_<`Ca)D=B*`1RE0Zf5IYd|n;=*xpWARG{wOeiebLaDHP z>%A$8g~u{uDywavT}9HUZP549x-crBGtL1@kf*Z(l?OzLLk`;@LD)OE8avV;nKkY? zJe2xzBNhDY@I{{j^A`y&0@b zTAEZM43zcR219SFJn+x5&Rb~U!aMd2u;X?Y*nlE43>m=eGYwc2isKwR*9ss8Eh{bY zAD`KL$?6iQrK@M&@T)6W&M2!W2)CZ%XI;(pr088h(6H2%+1N3P*|e> zVY%gV$K#|pB(CH-6j!eC9>2?iJ=wqf9T(l5i-Y;FXtg? zv;Ig=yI7(8OvAu7JX9X%{9t?|^(zMtZgewTK=|xgj3#&;HOg3!s)To7o1nug_Z!!n zp4a)Qf`3%*0S`g|_N9WS5T4CTe8kwC5!cYMJ8jwegmOj4L!jgf;V$+^Z;SATh@Wu) zyu43qqG#=l_{Ac%N^ke&e$MD#PPE1(RSd-XS(1!HoJZ6{q zf9`R8;t=Ie9^lNn-Z3KATN+0=j?cMH*pBoWwCZoJR0nfUDZ|N2?06eF8W(_SeB&Em zPZ!^Qhm0;%uA6mNoqgb#P>XWBK^ky@;|19 z)!Fpy&wV2O-v9aAcTg4?;r9}M|oen!YGi7$C4aoSyph zsnmU{oEA4mp#`J_(`W*@*+q8PV{cg6wGoJPXIB#zdxAs<+sHsjkR|bC8cV84)Um(W zkQS~L(!^`)1dy(#_DzC}NoTo_;pd&v%Lt($1rT1{W5!(IcnV%%@g|K!=6RPFM2TuP zr=9}^RW0#(N(eW#(+5h}b`Ox=nUu5z;6$hH>r9765D+6%BZ*P+N5H957>a}}Go!54 zg6YVi2l8FkV62Mjodh{+>2p7ZoIv~XLKPC0dIa{9x6{~uB<8Kr7b zf-n5w0=xveLQW|IPcAN1q_vs7Ghy_zX?k4 z!qaFi^4vb~t7_yob5AJiDaUA}g`|QqTe2+oK{+6-STH#fp*=zV)?}rG1 z(mEb~VlQo59;`R3q;Xm=1oJd=b3Bb+emivnJD!_ZPE*(C(=68a4xo2cVMynBJhqks zWx}+kN&Q>GTQ+oP2o{49$vheay$doV&o$dt!KEj^`_KMY`n;XK@aKPqoVSX>7Wh(X zO6?6@={mx@XNG)N&%_fybvm^k0)2o-X>N)bz6X(;fjWifpEK_EYbcJi*#U&Dm0AML zebo9O1+V8wJ!{y(daUAUnwbRBMdrh5*YVU71D`)~=mhvW^5yQ32?r*WL(6r7D+un* zQdtsBuCN91T6kawfqT2?XQaQ}B)iO3H-R?|5D>!Z5_q=NMu>)pZmb>m-G3r=4U(y* z{7zaaPlB_aL69Sp0MFWC12i;}qtwHO0%f(e;_4Kb^-LnbLF|!tzlKz32!?9}BP|dx zMJySgl7 z?m+B|1aqw}t2{BjJc}L&rI07@RRBfFfa#hi+DP|28zD~{2Hz6HoVJ#TCm1UdHI9If zOH(DLrPZKU8GWXBc-ZY=p%>@rv9!;|F=JC4s4(n^NY@3dnUj|Xga@^0(kvTYEt6(5 zD7-3YT7SU}BZeKiWrKOTF$ZjV1w4gHpRoa6a0~6(G03RhV;jabU^OYxp9(g;kRy&> zVhj*^n3p{QiG@||z+}vxa(3uk8txfLJseCPE^DUYL|9l}MuCKxHxAwm?;xJ)9W-ugm51Hk`{es{|`Z$w){)MD?9~3TzBvBO72%kWMn*t=P6=wL&gOC zjz7GJx$piqQ#6n@=It4twh_>Pj-uutJ?~fQ)dD|`?R|Qr{rj`@Zj8kS6<0Eh9Ajq@6Inn}h zAdt`8+AWo)7A2>)DIv*ug`lM}$iay{v8f(DsB%S56o&1O0dVehpP8+bZ~T+rbW<9T zwqkyuWy^F&Um1r=c0dQbFy=ueI8%-ShULdGe|*NAIA=a5=!zy|Z-jCa7sjBR8t(~M zO3W)3c-!K7c{g7&W!XAppJwaCC2_yJ&iQvu^g6@sxc{z#8e>+Gk=MG$4xAgEOh-lk z?7uwCcH$pjHvmz*JwB1HUA{za^gC%{^bXGiU(a1yQa2E|j2$TBW$+v<@b@x_9Pa(# zgmvvwu2?Izb<$ifhoJ5pIp0>V{n^ibDs}cZ0pTFkB_7R3Df*@5@~u~1 z3xQWSgGzur&qN$r&^OjFZ1Fs+%jy+x2m4+k(MQit@`R6Y1Tl$a(LD%s10y#i8sCF#fhf$qAdY)7@SELh99ZuaNEhN4G?%Tsz znj=BQCWSc3b8k?q@s0A(u5PgS{f&5RdVo%1?Ix}1_HsT=UfW6&S69eP1C*pZk``{1 z(@WnPPv^gOGrj%2J89}|>OG(cy!ib$(mQWoPG|2q0X+aS;+)R|)2c@?Qt7pJj{&@r zk?SO%Z6mFD-w=W|@aL#AFi3!STRDWdMv|qN#ZCDnk@e`@{SR_xV-YVjqeS)Lf$oh| z<}33i8OhFz*fXr5u`&kx%8!J22M^(_TqiNdI`iTgXa*nS-&AZ}pMk!q(++`}2S zf|1Uchhl2WwSiQaORxR#`^a9Got1Hwc zFh&52cz0(QQ!)>z{7oK7pYb47ki15(zHls`S-6Xf7Ye3q=)QD6cxK2)gx`X@3x27h z?0F)@tzr+u9(z$<-;c$=uIB&xot zm>!Yl0puxYRMbVJ|3n_igQVGD&NVJ~ws&HvWWMDCT5(YhipAyh@IxO=KlSmSNE1`g z)C(^#Cyb8-EAL^wCvQzeET=|G20%L0mDF>#J#`FMroI?-r*20suZS7E6>HlOl8aKe)5Nj@$?50qPLSrp7q;ial8*@$g;& zus0HjJ}m))!zWSkDsHER(zUc*v&6GekC1}qW)=c$z6CIGO(~BYn9ca*DlqkK0qBPJ z(+!ik4$yhD`nw*8wq2z5`68iE=a4lUnFkK{WI40G<_L-N8 z3UMKD%O*5E=H9Z%mhY8gNVS@TlmLBpjane5N*3)3s2r2TF4QU(EKYysA&1|Zp~VU4 ziD18-lUnkQ8AQN=LSF>MD~;JlOyvXuD7mk%@;5G^s4T?Xs3kB}SvdUNYmIvHR2G9U zI|t`hVJ;VfCW+pr;>LYv?@fcfeU$4XPym*i;p|x(G=m)d*Nhj^2Na|vt)`;? zTk%g&<{l?pm#-b!?emHSZ>(FEqTuV)81N_9ylDkIpf~q~(3M!6OvQ|bI z3);-l_M%lykUrFAl;+~ZR6At8m2_wV&P(3WkWlE35)|v8IRY|`_kxbSU#UKm7&B>4 z%|r=WZgyaDb?~;~Tyr=7DPi&;Uaq5NN_=w$M-sFT{4M?|aAMp`OZ@+CKb7(oANf z*s{IJ=8Z?iXFfMfO?;bSB)H0r#9TRNwwKWuZ99*A9tBivhaa!;S-vt?j?$=FIBhvU zckhYjy`JkF$H8yusG1Yib#%kISDo*0H+esF;wkCcDV@oiw$V1%h>G0C)KX0X@Op3q zH3U4_ULLbx9-6FW|27c;+JJ=8(hWeDbTO4!QR+#u-rNPjgf$QO^cYA7&xET76Yuu-titHy7 zySvu>k8AC>ea!+_!X)kz$m47`o|cJs(RZ$umuTX>e&YrevMyp)Bx3=*ZHALPXK)$g ztYEzMiNg?S0_@A;e5xDhPQUV-zfP2XRk}U%y>w&j%{1}GT>6uL^=)`RCpi_FeC`I) z^o7@7BU=R0taD8V8<6H|;g=Qc+brj2Ieq3=J{37Lzxp@dNvlZT?|WeQCIcd({-;R#HO z0e(l1L8~phY3drO@2I*|!rk3~l(Yd1p-744A{v1y;KbuMiXOv5Mxb+mu$Y%wcuHC2@=Zh{OHeshPkBrT$y5uchby^TqV? zHzw0tKSVlxV=;}sxyI*9>CRQMoRsKWz6tDZCo;~_)Qr2JjjCZKa7as(kp0#_{t&b6 zGX1J#?ZDr1t@%dPfPz0UNY_oExfl=&8dbAr7J(CJj?ZJ2bDRd`ZIaWwz3oIAK7Nu7 z!MU@DY(hZcDj2BcB_foW<85e4J`i-udjoQGM|+Nh#>%-Yo@xz^&;8I24%DEw-ho~s zc6-QQN`VF<@}za=MK`hQwv81R-$#1;xfjxR{?m(T_WE=hJaIg5VSD?y3}|P2&MIJN zJlr|IFpn&Q6_4%O(Atm8g4FAz|@ujyo|L6~!Mz>KWe@LsiEN>GS3Cu*S@gC-J zDeNQx8Ww9cSGC8((*cyVm_v3yei=um;(ygnVFdgpqOr_vHar##G}D z;6407*Ki$tum4C7npi8c2?YY!p9a#@*R-W0ef{a##~y~xpm8*%M#?1DVZz@iQ9y$H zv#a2&HpE)7W_7q+_Q11OP}bSV`dwrLc2wl8ZACiYM5|pQK(w|B>*hmWLhe&?I%mGduQDQF;t*dy?UopkxFcS3H>_zW|GthxkT;oN(Nanq5u z5SWAG*Uc4tlkaD)6A(fMnNNJ=gXt(~w8o~#*iGzd++Bs`y);Hn+A{p(Gr#aNSeb95 zeNY|k-~9V8eBQi93#&8f&g?Ci5&1K>U|?IA81{%FAnbXc&k$u$YZW3e2MG{oKxMwK zCUR6en&tc~jmKODLof11EI~J$0!4}g6wp23dH)rO5Lw4By+)pG8Td9&8_VS*Kh#lz2E_WuBCs^vWq;DLx)Yn=F}{`us#>z_-;f^ z>K>{Kb8{$zB>5%fjZ8z_Q9p;vhu-^ay7$CAX>tLz9FV@<;P^uz!+#1VSI}3*xAKh# zjS7)zdt`u`7Zo2hg9sXy5h?Trh!l4KGe=u1%p-vujKS!uHpWq8uGTU8i<5D~MF0kv zAWi!U3T{3zLz*gAFtfBYwQ-#$HL(ORN0^VZB}Sv5`GrGPY=QN{S8=H*VCpWge5nGn z5vgI?3+b z!Tl`;(&7T)c4S}s4Chi@SeyyGr&kU<<|~vk#wb0@d;BgAEChLa9&_Zi&Z&pBuXnFy zK2$7SzXKPlXg>p9Ylh4aANuUirQiI$-vS&lonF8GP1dI({o{XnJ$>nq&Qqe6#N}t- z121n0Ua>g402bq&pcngcps?2>TuOU>cKp=QbfkZn9NMIXgO7?w9UH$!f8N4PEg0Rt zy0_u!2%BOARr9h^qwcqr2EJPYAIqds>35x z2b-QxfBXkuz+gC+e)6Y2n(lkzWGc67X89_wla^Q0%5DYwJS}Hk(LGeHoAM800J?a^(2^o z^kes>vrnB$N8U%uq+al|f6|WJfIDKjCS7}FDZTOSiS*;Y zxsyg;r4;TIf+P6joN4z0>kBOsBxvc?0*U=^U!SDh@NAm6J%>=ZMBmv9;7yvk2+$bm zNSz~qz==R!C6Dwv0{gVrUd1(ZV;TdO)f!0wgwR<_#*PIlm)(4MJney_6-GezpmgTm z$I`jePm_fH9%KtrFoM&b9-B_{6pWB}o<4gv=xceER3WT-U<;LRTz&{o-lcha4GdK? z2CF#JQ4)uTM<_hehvL!|!Cff!Io~tKeSc$qlqu&-{vit|q7x`&fGUdvC!zGM;Tv zQ59#uk;b008(_;UAY~8|+$oe&bC~xX>Ethrhd@C*lZ__w540uk6~+Kp z1Ip2mc9E$H%&}<^t*Y8P&>rW!I5zvI>Bn z*eG+clRowHA5X_m97!)QA3LN-j58W~(3RhcLYJo>mEfN8W}Sgw#AyZ-*+uTzApK7b z{8sped2BTl+jb=e;|fLNJCaJHS)&WRP*K<;`o8Kqsb)>7lbj>34tYbLrfJ!)dzu2B|x1 z2-28MH!hIQ3s>SM(aBFe`7r6_2sAC13G=SKlnT|arTzh;9|2=+gWE8&rF z2B!xN5q~B9kQW=}Vl8xm+8#by`+`_tJiGYmJZQJ^SQJol$cLU*nhdX3lZ3%ll42&e zfOd~10^gDmKbspI#^lwr#NM6?0gTW{SS?0-0BKEER3vIOa*{D-t>WTV_O`*8pzJ6N zwCC_LkGg5x{`; zgy};*mR8)vT2hI(LZDtH!a)s8WW7WJW(8Xm8($uZ`&bi}m+)<2$*ZUqXoeC;^4x7A zhKex4%ECgr1JHA4l|x>29pW{Sz7b3^ixL7fEG-fu@8Ahi7RvLT3YYUN{vnUjQetlO zplH?dzG`qJ3fMl!1JlF3fE7Kgy{y}oNfiU)}Go< zo=O!cSW8&A5Szi3y@&F(eFS9SOJ`H*H&eK-G!Gl=?V+sDY@ZE|C_^hDNV84~PI2%v z8ij5J;Y>J@RmwaV3!iBv;8Nxh3mETk>Z@Wg_R|kwm3Jj9VmG!CI~}TX){u&lL~bE%#$9I4blxo;R-5z)p3~0 zq%4|YG#&gm=qbEPa4Riv7OY@U&Ok7o5Ly_6tMAwxnb$iu4+DSsK7?T|F0;SIfLYF0 z`crw4*%|#->s5!@pknP^$%F=z?_O{)t>3%np>*-l{A9%ECuv>(nR&%ZnCbTfOdpB>Pc z_c-TzfqeO1rNQ~(t<#y!3z|*LA^XsM;(95>T0KYWU>O?pdbSF@#2JJS!P_c9{MP}n zZzig4i^Fgk^@B~t@QAx4PhHL_e zy+y5hEs51&2O1jSbsYeh@zXa}_t76{g$~rPw+@3R-Rau=|GYRnM`y{}~sT9#~7!1VaVy6%7ctpSFgVF92g&Y-gR zQu-RElp@J@J zM35rH^=zY9IHyIPR|(&Z{HgTCfRPu(8DK&0>FQ1`jYrZ;ul$hnnEaen>l8R%La@_| zyiA}$vq1tVp!M*EZG@XOEc|C(Wtdm_xZC zQm`msa(|90NjDqPvG?>KxGsYYy+aTh`xYcXcQ0jZQDXNv*TvcHVVvB6e}Cd9Kbn5# zXFi;sd+Y1z^7z{nD^+>J`_37llHHDgszTgCvie+Ya~i!km)<##aD=6_0ZVL6Ig5^8 zG?*CbD6OMC1z@d{i@;3{ff{EI-m)~$sED;ZB6OyNhMZfzEPsKQ!wVY0IF^w! zR>zUy=$ppQu`A1X;M>EXC)ERa3a1nVk+#Q3Kx7!H&bJZ3rH@ z-Io5lpZpkfvXx$W^>vgb6baU7A5Vk4%?fNq(nM+`S-w7#x{sZKMc^HQ$2#sDRphFX z2MWkEQ&ZOiCl7=!6qOykSdTvT323u2T|%j!V{ION>~R7x8q;6>@t?#Qp$yg1(Go#G zx!^(29ob*X+bA~GvnX6BJRmH#$T6R5CBTMpVA~4$LIvKeG=!jJc(dNhG<>Eht$)np5WRXg~IF^$VmbxIk#DC1~{CPb@x()DhNnTdf1%QBXEd*oonD z>A?pcpdS$E8i{r{@kY$Sd}nJ=n)=gyLqC;rg=0k7v~%c;aG0;6Ax_ekX&F2~6*6>9 z0kJyViDd}aXFZXw5Wbt)4dy{?l`>szukwBY<6rO zW=|e4Tuo4Kz<(r6g+dh$1wIeHR*2J7OnWJ`HVjRLcn$!i9*H`Dxa%CC zx*h|LL~~lk6{w7i^Ds3pot9O)5d92`4i<|n?FGVLL{>rz-0M&LpZ(xJg|k?5{_h;n zxomv&)p?SKsFXTJ*F~XBi*pQ{zN-|}vd`{5IGCIVPN+6KE)#rVyzqc`MLycGO+94M zqAePTl?f?&q1%oqhK1d#`s~{3tZK7=lQ%fDE3?_qJ=B zdOK_+=7ct7)^Q8B#Y@4B&oWEA%v>RX`?=S3k$FdYD=&5&_TQc>c}bg0rUIXj^K0iK z>m5(vnoQ95vluwfa)!CqcA}5Y6$`=xY&V2pp64?iuYFbMbl>qojE&29He*H^*yCU{ zAM$=B-c<;tjTAqwU;(~zuSvi+vA`WYdYm%1M~VC! zB*5hc7B_h4Vgs;6gfXr$LA_P@wt-DTmXwPpdGB3!jPtF*B@6&GDD1%5U{xu02>0ZzDpas;f8NmZB+C4g&K6go)XxGq~)o_%-Cr90=qM~9+)x4+Vb z%N@Ljw~jkUM5-R*n>Vge)MN&$9YSYImLjOYez)ZN&;Q2f(*OO3zmIUaklwuZ6$G*A zbn&f?bpHJ9G&V}rNB(sjI-Kr%;6VhI4WdGa(kDOsQM{y0=>~xZdKRTsdT;Zo0gtJ9 z4lM^eHO&|&ZUILaPq%JgPZQ&}G1Ad4meqkHgX!_dpJW`QoP(Dtz?GnnCIZt8Y%u@z z9_#OLs}OKpBUiG31k;4T_Tm5j!x)%Q7D)4FWW11`ul@Dckr_zx4{w>pTXATxJAL5s zhuBl=>Bhok<_3dXA0B()W{rIq%Q(YMJ#{?gx+>D_JaC}4LHMNJsYM_%T1x?(cVMxk z>ftuI6qnW(0x}b42#^u;KgW8*6AK8G4Ma!(>?hxs+EHM?ci}mL7lL@`J9~n?R1XqC zkI4O}h;+@jr+@s)OKI)~WmGZD3=#d?bC~K@oi!Bp*+jT+NT=>SL%wQ+>FQ;ES3@)7 zcpoM(e&V&|o`+7KNrw;hlYXw0#_^W4b#z5e&}Oo5GzzX~Z|YLwF>A(iB8z=_jzG7upljOulhE+XakkBTpx*8)HSywnJXsasC-JweC&|y-GVIV4y2Z?!J8YM83bJ(B~ z`(R-wd6kgDy83-QCR5~xHMoSmj6AasByQ;J(e$3D9}J#*=kiz-05aXuGLf$45-mX}*IzXjma`xu&A2O82KO7p$P4-u5Qn!faRUm+S=z9LQW!^?9+ zC2o~$BwNDa*xyb47$=camA@KMn0tU(jiVgQ0}C_Yud}n8y~cXgcBhB#e;m4>OV6MG z0Xb?<(!Ztjqi=nObc z#KIbBBk|}u(n{(9DPA5$I)tn>ge|LytS%u?FJKL$a!8pz?vW2uCSm`~Yi6`eX&bA! zlZp^59+J1QkXofnMzG>VsHDAmDx^rnYI;VCX%#MG5iXF&%>n@8=Ie;At1v3KK*}C+ zSyft*3c-2{fp%jJ?F_7#)K-Z%V53{GgIaQMgLqEh0TK~U;E{r@uiTgg(Qo9z%4=N;Zel!{wKzM-^w2>CeChvX$0%Yk0zdSXeID0jAA~UH=L*ga)Uo+G5UjQ4 z#@~KVM}x747?<^#3%Q$X=$lZWc*(eUIV<*41rXm?*1BW!QFKCvudpaHjrRF0gxBZ_ z^Tada{|+^}o(Z3Dl<1$|$LIbJdf+xY9_{#%ZL75U4BLryK$xoFP<37u=C#C;;Tc}0 zb%#$_`>oiB6lO8%ketI62stVVm_q0$&t(B7iG&z(m^N!*E^x&f(0{c7&+^Z>F z9(z8$hWp9nv#XyvTp;cP+H1;$b=&U*^A*X8lg3 zM`bFk}>~$uDKO+Z=wEaK*^WRRt_nV(fZ;k(Xx-s!1 zB2ahJSHJvvYVJ6}UT-3+1a4Di-JFJ6jr$Mxr;*`9RI_SKZ@>O>x^&@6@PmLLFgDks z8h#taUf@Vwhwu_n8Op@-3M#IR3C$&;dK8h7kzvMx@J-qw`L=0e#GtFpITkZNSjN~h zcnd?Cd&}$!KmFN{htWylKZH%z;_v?CE6gLFMU-#X#B?wJ`79d zFeU8>u0;H<0;O`Sd4%&bLZ1%zq!Y(Zkg&cty>Ru#u&f)Usv=fT5uZF3YiYHoVKBY? z+-qc@z`~5tLwQ8mqnSDB#>(H_F-+uq4-wUqSgt3@quT~AXFiBAC}6qWm|sj|?_5pa z`P$dgcmDqC>Gc<0NqgKMo*$KN=uXg{Pywx|8L=U9tE)m3%mtEcG%Z9e|$ClkeHCSfACsb8e77%olBQqyqL~^>*cgGIZ1Ym zxpeW`4~h9WkCpt5bmN`Zi8Yx+38c(C>t2aMR);XY18jDg7{}VS9_9_t$12DKXm1sz z0EAr#;K2i+`yAKTx7IOkV1>kuUU}mdUW1Bs{PFwJvqY+^aK1uG+r>K=%RrP^tcF#0 z)b*HNUDZ&U!3EiBtxgE1nCAR&0)xrmVs}97g7nG;v;~yut;=tuZ=L@>F&fAQpf0l9 z!|)Q;SA(xbuj2S3!dcm)>jV+yte?EuYyuO&x7Mamq>I(*#OY%W0uKQ4CfAo)(`F2Y z{k@&()mNWOZ=ZiH<$=~87(~JV#FIpP2*oR*ly70JzVFdz)3u2O1pljX?$6#@#`A;M zmIz_*X-&Q46`e|7{NmrIm)^dH;@3-YuXe@^&*BU*14=D%Ux&^$rQ^>G;sN*p(Tz>s!*6QjD2IZ2|*DUPaDbg2~?^?E7iRS zCt{P5atMrnVMT%00~9MeZuxC~j~qIl9(~VaSgvQ&C@~kcK&u)WT7p@!gMf%^rrT3* zrRkaLu}C>I^4bn+RktDNV!>P8oM4ku4!H*6uUbNoq%s!xM)QSj!$4}-=*~rEz2xu* z7GMxc6FJy)1#dz;QIZpxCtnM8jX;)N3oEeUJZSfsQ!RQO6lur-iE<5UNqF2lq(=y= zI%xvfCrpJK)j(vepa|S-#Hb%0;tY#mUIs=GJBfD&mdSM0#P#C1Q58V_@k9?CEt4-q z!W@Yvv$Z7fHLMgKM2Clt!o+ZSBQ)RHSV#-FWnqIbOm-0-81WH?;p>_4kkcwJQwVIf z!U!}WW7y$Gt6UT`pwDh(UGSMyf$+~9DUc~F^!N2m?;XWgmtg@oz_Hw~Wy*lhP@tC% zr9c0vAZTF;;gEJzVk|#cgCI3|XOvh5;!!wQ=LukzxtJb7MabmSTK=`R+IRb@@*>p8 z-&#?$rdW~2!?vW=mp-cWWd$OfH()$o!(0hd2_aQM07m0}Qy@`j_q}TnI9!&8ZpYlq z$TG+VpZS8$y%xe`Ok9H?>NTMd#Jcf3mzjBz)>IlCd#oq7XMz~d;QIrMw~RHO!SfDY znGk5dXo`M$U`88v>B#H(=5D`z@9NNhK7ZHR>&N`~8CPcgiu<_Q&+q<=@o?$gi)qKV z{?Fe&)pmI6IHC~S7ulw%FXWpCM`@l1)9s#L{h4oD^=r_hMgkht#tpZ`FlUd~~ z00z_5LAn$SJ0_#=Z8(Ib7}z4^POC6@=-o8X*ug8Pn{vxJS6P)uFnYhq=GXPH@AlF6 z@$xfx1sXwE;yr~qe$T?lTpwy1?nnPg_wh{DCX^@s_H&6Mu3=&oOcc)C zDY!Fwwd=?(tT^WO`racgT+2qVAqyZs4C zRQl{^e=Ut19!a-ujS^f+P%Hw?d+xtK{ra!|8aV%n^zCo_Lwe~v0w`;`@4UnddPWf1 zLvVWhgGbWGKJ`%A+?%Cr?>xdsrnouwBG%Rl&dXXXsFz;5n5GdbOrw@5ZYaU7f_amV6rJ1RlC?IDZTF$HECx_E7z4ue>xov_XuLAEy5kuKS=Av)> zz@tq%@%;BlV}wWF_yW`6t;`X-0OFvgl5~d?o9WU^qv`S+H^`3)q!(prd7d+uv)zhM z?t5be_sPXsL#h||R1qO|W@?Ijl4EJ=%5{=~tNfgyT7L|<%uy^BapX- z+>%{=%^1Pp<3Oes@SJS&`35;44ZbeJV=m$889sUxVVcxLoafTdTkX>woury7YrLFj|p}gfp#+>*+E|j`0c3U3Geh z)Ku*ho2X^~=jd-OM%H$6rISTFJb@b&HmZYOOgGa&K65&gmI+MW-XNeCWv)Q+p~YEX zYUCIe4wS(dF;H>-nOl2+R8uOGodbjEkqiy}?c{ zGvA3cm$i+o4NV(Q5%EYQoDqKNdgn2E8}qlgvY0kli)xe! z+&e0Qjdi5GIz;l9Iu#8W0YjmG8^n&Tk?)B&*jDP2vA zn^%#4@kGrEq0V@Z#z{+(d**f@!oW`a5c&tusYgM}OrvPLgU16|3z5e8S5Ybx)4R7s z>>n^ctA>_IwK#oyJzaWbG|k>Xp0g4vGQJQxjS>~evQ@wG`$s+xNipdN&i^y$=GLItA+@jFKqJ$eAZIL{uo0Q~Zzi~h^ zE*4ccv6h@_lpb-A7?9ccC=O)WSzpKdL3V;7Lc51UOFM2TCeh%HZ5V77d6@1 z2KTr@LR^J(Eznw4P98hSCfrJwZd?o^wcir3N==b#4AQGd=^8#pnZ{$scs5wN92zMn zi$xl?Lda!|VEZfZMC_Aqsg8Lu&;-mLWr4;XpXr>=@N1R z5i39_M4B1KHJhFRzD0uV0@iwCNW5OpljVh5@py&jfc6ypLimB+#EhEtLj1OnpgG3R z1amk5OCz98Pk{=jmCuCCIhQI*GE1$)y1Es-o%;|z>@ZXi9rrD?#+QsS`y&&&voU%7 z!QXrmWAO&pO`+QXIQGn{>l55-{EqMc>%G#muh@(Hp%CEMg;;Pmw{b0F_Cv_QWuQx0 zJKmQGP5yQq3epb3A78d_f3m;g{~<8)%xLT2cP12a<-6Bs3W+!VH*N9$UE$GwF&@{@ zYkWl#Y1+>2iZ1hgHim#j(EsQU_xamyD%krR`+YDLzjMsaNn95#sJOUyArM2Cxv-`hmyzs#Wu|Ramd0L^ism%<4`oHPb;BD8zEx`2;0h5K}8eK5zjN* z*$U-IH@668-1z^=de0zB&+NYM>AoFq=bZD*^yGvzgS-oX2)o$8-4!X5Wiyy8o0d$n z1t`*xdp6ukVgM=~-$l!-v@a z5N4z;+(e7)H>S;?d>OMsidH?o`j_MRhXz;Xe022jITsA%|n&1J^dbqt9Fy=gkhpkGIf&+K@J`K@SRO7w_Ny9G8*W z^v8elN0|R}>9gU#N_Rh-O8>_nznwn+lhPNumU*y-uK?d&0-o?gOzmg ziOcEC$w6ofaZ#gVfJX9MLYvHUsiGtD6LgGC>U zL>}$`zx?0-ls-v|W3jkLI=Ykk`UcqN#p$Q-eViWQGTeFU1+?NdXe^7<@lzMm)eFz0 zyW`i;CXYeeR?_73r&uK(GXLAeR80kKTtg$gvAT!L#7Jtd8$fdk&@Jl?#bJ>ktC;+Y zbATG?`5Hl~l?c2qP!87s()*8YlU?ai8XcL!s$!8N!J-ytEd#aq#uHBgZdqBv(ms!? zOha?zqaOQw9)7M;h4p8-y~Z{7_{(wKsMF@y&UG}C(>s(ucLLpofgIcob(@u zh}3($H)z5V2`sibpXO>+dEhxvC>XkPAGc&O;7w1b_TGln+K(3mI764EIsm_XZ<#_2 z<+y}bkk6duPt&7-W5DVjfHH?$xY%)Z30?$SlplFCN(joWBbH(ntCB*}GBGFbl3{4- zF7B~lp>2}G1l~f>48;W?Jrt|-TlKuwjPoy?$5(zDoQc9ti#{=tk6k{`{w+x}B?FwY=S|qpT<|6wF z`nrSCGva{2j4BT6NpNFf00ms*>ZD;A_*Y>ARYvf!>SseM#VDw*mn(OEaS6i0v^d52 zr|q+6PN$wTC(|j`_Ei5V&i*Iq{dazd0B4pq;yJ9oi<{Kk=rTNjzO$bKN5Mntz|zwE z?!Lj)d*%!=JFI&>`FjhCktxdkD6sGE=}u3bem=ea`s<;{o=1sV$s7PL((L#eS*;K> zIrAg;m^%cTibO8xZ~gb}Z!K;Bt{_$ub7i8Eb13poO{TLhs`M*^P9Qlx79APHuwj68un=&NxSon^GM}$!(0wcd{&sZq06(p=Jdxg{{?yH+oP(T08|Gf~B?BQc80A z1%06j#Z9V|+TX`d*5Y3{5E{4fi90MV<(0W_gvx8yDUZ~3GCTTX0Ia28w-8EZCk^U@ zpy48*@2-S?kN)Vk5&&Rz0rS>6jG9u1XbDWByMyg+8x5e0Lt>;>LXBZvqp1TMNEOzW z`unMb!Ne_C{c9B|8{)N(1x4`c97MOribw4FAg-nB+0Eq(x~QK>Xfk*Q1o+1~t)r zX&T%{t8V~!q(Rkl7Qmk+|3~vla^r^j9;{ETjHezyIRc~sFjkO}2tp@;?e#vU$2#kf>i-gpyV?=s4En}9f~#Gz5Bf*|T9 zq&ZJf+%l622gZYxvQClukUnKIFHz)x+B3$-oB#V4|MD)N0bFIklxs!hT^l2t7kocFxAciyV*Gxt@Bx*Ge#X7F9nZ+Xed_ z?QzXfAMBek8k{)h)>-yiX`AN_*FzH~Krc(wqUj>UesIY{D3%5Qs@0;}_yQoT0Mji; zgBPg0I^D1C4fE$~AC%J2jkxfaRfv5(7Q3v?EG&9#WzskO8}_G@KH6VJ!p zgz`ScScHqAvAR5{F+2i82IRWmW6feVoQcDizcLTb6B8Q3w)4Wxu`c#An=kf>dnNXc z-#hlq?eoaxCOcnzM?0>m|Eb`~;*^|k1PS`#bqYjZSOB<%|KqH<7E1)7x|S6xT(~D& z-%Q|AxL4o^;eb9WxT)091uOQ=;aUkQB!H}3?CbcBab+@UanPP+vhftwr_=xWkKaP` zx0&9%{{8g!zxgQr+kbWqx>AP=2Uc>DALzn*e{>?f_wIY?;ip4s2WBlEvRvuKi|1qh zKKkgRGy$MonV*A?Y^D=W^+ws-Q;+uqkNEDt{%QL8&qhe5Pv#P61}uxuaW#ZL?f`Bb zq-ha1NwvK=sk;Z`$9LKN;%0GoDufCUbt&z@HL|vy;Kpi%JC>pv@L!Ck^n17f=?7^! zaow{5FmaEuTPKbmryT%RRbT2m{YxZYe=^mVo+rUb7fQ7KbbsnLIO85n8RZmh7VR3> zN9k&`yYpCHb){qn1W#W^n5hLA_qSh!?>1m5I1!2vGWn-@wC&Ekimd~9+ReMy)A!!_ zi}a)GA7kOsB^He}FX@fe@C`7B2eOYC7cm3xsJOfsMnEUNI4Ek>9G5v@4{%_o)=F-bge*^1WA7{p@ zfADS)-#RYPTYFRK{=IAI`VT)qkeNlJ&Uts z2AJmorZs@t8e%o8+gN)tBEo10+{V*^c#WbRT)=RvmbO%Z8f?u429#NFY;KUr4hP3*i9~+0IBmUWs7of=M^lblIAcKa$0c zKIo00;K$N|7s_~E4f__}OpJrs8C}03+{F&qm%FU>-2YU%{7FigF94af5*V(GYIN?i zu-FP7ptt2zM#5nkkWFuH#q~n!!D8NyMuTU_7o(J~Sqr%t-d4F|HYw9B=#Y{g_ zb2m7{DB?*cyN>mvsj4HLXg))X0q29hzW&GK=6(!&QM2#lum$CaKWJ?ms^G}Za| z(0cmdheIj%>VMPz*6zNV%oWNLV%kC~EN}pK_bD3)fy|gV%L@=JqXcUy>Vp!4F>sqP zKXIYJL@{AgOXFlWxRZtna56I4pvPi1*zN&UMPO!vg?$zS{LDci@CC_hX{IR^7VuGD z#-zQbhMEc*xF6Nkb*84;^8mC~Ok`-_*(3%LR}l!OR@6mDhy?tZQdG?tV2Q<0^CC%e z;kM0-k1(S~ehsa8o@o8jG6*6Eu&5Xp5+{Srxk#F;Z6dhUZksWr3a~Hph@AQ|2*&NH zuK~a|w-D&efiA1U-9tjbq10y|Y|cF_#tPaQO6V%3AyBfoNX$sy21{=d7`mnjnjY@z z1I`@jj}OOTE_Vp{tq;ODJwJ~2i24aIs_k_&`ULPEaQIfRn9LL0wL~56&;%pIOpPKi zERvoQ0)2p$KtK7D7_7`tO&$e0x| zff2Ygiz#$!2+4aA!peM^3zw_8RA!472V|FCsWdxNs5!kqe%M?LvXvC~)vyNu7K<0)V zsZ&jjwJ#7y7;#(@{s@dFMx(724N7G_gr49(h@g1$!VD`(0ak@JK{tYJg5T7vDY(I! zv2Ikdwx+PfJdGxELz5)mZ(?qa05gEOCV4q_5G}?nNb@YPr13j%@e%+9vt!xAzmf}_ zpKP2Nh-cyAS_MMEmFp-_kQTB7SOeWFd`Tqv#*N0yX9|32kRLwdcY$58_xn&gc>7SyOji}W{u zyKVaf2)JE#oIaCZ*oR)}obb-`VtV2`Z_4@-)8ZA5i=TM&E*}~#`&q_Byg#bJ@l9yA z8K4?0L6>8VdwmXns8w{Vq1|R2d=fy!dDX>HAxYD_0s3b7k`_h4u{0Kh=J}NQ88lXc zud1;j?Znu;@$}1Su(Laj5C1$(Esv%JTutWD>RCE?ao((GWx>?6C>o4kvV#T;?N=rF z)AJ?m^xQdRtT8YC<~{;&VrA93rFPUY=~m)+P50+GT#uu*^l#m&1nYk4cg~TPIR9qQ z+{5h07v9MH@L!nZVeG!N)S02gp13xdFqzGDT)@JZy4b$ZSbXGjO<^l9^7jgW@*1%^ z_S=+yS|;tk-?_icDx(4+EE=3^&`<~jHs;<4mm}6j*E<&=-c{e(+xU!U(GN&d?5+Rd z_kM$<8feHr`t$TB|MQ=w58s`D-n4|vglDQ8U{T-Lgf^Vg^SFIZ-=8HAl59`VkB+{M zboHsL!3Y2HFaCX6KA1|6y>uqM@td!t=U;g$Ew0X!ZKVPY<19YiwX6yNPj^B04m@xl zJ`i2IvM7crxw8toJU-4 zx4;{WF8ER%imx*E;`L8IO~3zlf19+NTj`(t**_u)#+{(Gi|bPe>y(my>bW#}=Ck7b>ugdYWE#4BSxO^p`_)?Fn<>k=7@s7Wx5C6V}tu2YS~WpKWU zH7qj;3Z53Li&$;MdT6ri5iF3j*c)Ii_EVnm?!ouf<~x(-W^;#M(2D1(THE+~=Oq6?qrMBKRlKi|~pPqY^hBE3+BjxX$`E zcaTAeXNY&=+0aJ$)E@4W?%jmOejkCrwbO;_fbs7mNT@t;&AJB^Tmg2RZRryv2_TKj z4fOQPv&TcYo+Hiu0gB@K`daV?Y4ySF+bCo!dkFkSVJdA`v#;fCk*c>fEu^&X>PRDZ z?xxu>DylIjR_xs5TsD#ksDd-xLi*Yb+|QR-Q-xBMg}Qkaumn!eJ|aG*A4O+v8hLmv zb)TqbUbeu2;KGSr5(YddAjphQZSFUSr#mjKR~iu!xS_-f#qT8fXM*}d$>x`5Tli?scs>c zZ7*74HW2``$`p5XEHK$)Dy=mRGcu^C2sfQ#%!kbg3C*n?Fjs&w%x@dXeHZr$)-e7y zm@Cg_-zT1d@x~Yo4BaubY`2({55mvZW! zBY0$)VQ>K=aLa(IN-PzuBd$9XDVfsrm=A!4IKWh~=^(V4?DiSEfX#H2#cZ5iK^#A( zY1?yi;^wnr`YLFTT8ODf0AE18%uqt@0V#nnmNBP`m=Oy|qAk>jZsJg@DT|_KST!0N z^y8F>0;V}+mgK`Zk@zu3#Kh2m&P*Bjb5tHOX9r))0PlKd^YwDhzEC0AERPVNb`Y^wgyn z)A@5(0Nx$(-08^?w2@>Qpr2Zwx_kQ3Fth@6DKohOQzJd6<|j450Wu`Qj72=)0WtDk zUwbtPs&p(Lt=q!V;v9u+F!hmXX^Avzt1vxJ9Xx^cN05N{B6hKBAEh)XF% zh`=fq82g?95Qrr%1kS(2L{|sFoK^yh=~(NAfQ|jK$YwKPLS%Dd~7h=cv{wHnKmaip2e56$hEX1 zi;Cy^FwTTL+P0&8zVqc*_Sctv_mzFm-(KTpw|Sls_X7UC$>04V>$}v-7oS7_qCY$@ z`pf0^#A{_9jxnqo+@ERUy?|%L6^GB@JKKo4;CcnLuto?ToD_a+`>4O+`oiF}GDHl5 zz%m{g1CHl26Z}Wtcn(t&{Wk-PV?2QK7g2YmC=X5KJSZ2I!HCKMCL35O=Qk(Q1SKn% zphLOF&CvEX^`RXk<>A=KVwxBw1!*x!8)~{!WeH%Rn9}BD4Kb#yBiZaYKiLcIsNm5# zpfkI;YU*<0E5^rlu1N^W_AkC+%*;tx4){QZsg_f>ocKQeaZWQgJ-38E5kSrSDZu-9 z*^~mUz?K>2dM1P4q!rGQ$^q~5V*925U_agWRTcQlPEB&I*5EO9F{3^_+Hjx5{L(7F z#vbK4faZPN92Og_=RM92U>>fAbHal@%`S#jUvxI^?-)9?p`dqOX(UVTfh9q#q@{&;J2v-Fp#dj zbB!R?@2BH^0|;mp_?@>i7ILrSqb-o}%&3W~1ss`_0QSQkL(zs%u>sKDQ#;Fl0wV)~ zwFu}o$}Gnjt!oBoj1Mzr=ql^uTweRhX8_Q36gUPB^RYCCxv#{^+eSv3nx;u9##_=Dg-zSb=NZnoi)K?fy9c{ho%Hz+%c1%~xcu|yzVRniC^NbqLD4kK`t|nS{ z9JW{Vc^ga05^gS@8QF%<(p6O8Zgv;0;xmPC)*Ifsj`d^{ZEGn!P?~R+JMl`yqIqp} z8+RSt>DmcU*0s-comm?IZFuMbn%QQOp*S{OVBCY|*!SF>L(?YX^0JO%sHsT|1Y>QZ^fjUgz{Mzfd zTu-MT{rDXOA$SNp(V9Z*fKnEhp)x6q)l5lR>LZSla}*X2We@ z2m07k2#Deq6}sH7H$hy;`D+C;Y$8C-&&(tIsF2RmjF#7U-V;32GQr#F%*DP?s40A` zk`YT~_4eL+YHI9Hr=PrvP}ooU@jBM3l~_LFDyXN?&`n8R+_H4LT|$Yw$y_c`GF#WB z3M~0r-W80sPPFvaq&9+q>)`XN?4uQ8E<6WQxIMLjel2Infsh<}r7wOw=m7;)PwzH0&WJg@NsnVF)H>41vs~0e{B` zSaHGW=k^L3Dk>GXww++(LU7Td%LLRmuR{dQdo1BnGgjYLf=?IE5QGCH85p46>^(^A z96<9)Ys#{SmBqvKm)n@*I|6&HaEo3R0^5e!5?@XH?jzSVhpvvy1WIH2@oB1?p2Vs_ zic{TjO=7yM$IfBeC7N8qVx>cU$0H{(2fG;eD3t#!9>GIQMgtiG zYRS!8Lh3MsL0@>{x$t$jQnDI|W@?=a6fBlu7MTXJAE^1Bn=-=^5~>DN7HVgv{r>0(vA5lrFc*XK!9JtdUY)s zgs{p~qZKGFgQ3ud;CqvFTv~tx+1@|Udpte+^b6^Ym;Q@%<%ut$p~t_N@f2`X`kYJz zivV?*&=ghx*RK--pGUi-=3;4Yh*%Kz$*#Hc2{eZ(XSMBp7DS%C>b_MYV?J|eX|$zB z^c2NoRv-xai)JZa0o5>j-E?H=6@Yb_n%SjR$n;}7RRDc8L>8CHG74cjgD?g$=lco{ zkS_1c!$!?+c~WNiBER${96jM;VoFV1g0bqh50 z35*XQmI6~51b=?<5_CDvBVg|S3}(l_5@%n|jfC0HL&JIq>@r}JG3WryG-Mx+wDw2B zLx4K&_DY#t2J~1f=ELX8l(iK25~O*(>ku8|v*@GW^IV^!z|OF<-@V7rd2YCHI2gY4 z+5XDMUV3;<2KEGM(Z}r4cxKjb>4x7$Bk?>5ZU`@5wBdKcB;IXXfcXW;{G0pYDxTx` z=mpnH+p<2{ekecehl|6@>*KNhDYxX4m~;9QLfPT?<8!`^8{z``;`sT=*3!TEnkJ$` zv)B53T+EDBnxh`bJQ+e70C>lb9S8g`rqRvev_;V~+V1E%3YZ6aoel;C>L8_=;=d68qCdDQ!yIRy5_3X*pVp{wy9b=`sg8#*pIlWfA)X4N*fGV~9#;K@~ z(oLyCpbfpzZKNFG^gM1y$7=iGwEDSIyp%nA|Mo4ETey&7IoY7lWlC=y{G)wR2I+^p zi;q(o5zr?Fh@8cIx`G)*31$K5>D?c{56~dnM81u}0RVz|O#PH+-JHb_1m>#Q%v6OQ z98s>mUuu!UxgCJ8j|3|tbc4yH-IV+vo6Jq*Bq{Mmu4^lng05nd(uO{1M&idra*x4V zoAlShGon?qNq9hv=l_H5QUG9pk_vKR8cGbK->B6Ox>7W0Le+mXs zYpMh&NaJw2cX_tjEpu*qD0zDPS0tcfkp5LdW|W3jRO!rrJwYNK;@RO10tX6=9e-Rl zIS`RzKcs@Xu&6Ic155vXy4?b8>*U1YXlzTM=LcM6nxo`~Gu_Rjze zB@-o`0$RrNm!IYSHjw!}HuM~7eKtO8R}100*n1-V*029Y`jywencjTqwbWEyi>n9a zlRx|%e?6p1&HiX>w{a^mAhEPHr49;cj10rz1(W0{6*bj%K^)Ma3o2k;>B(n)4Q*&6 zKx2+l;g8Z9t{od_hXr!Z`6^~3HJ17Z9Z+evv6GCkfT`)xrPR~k7g}h|W6xZXqx=PZ7Fx< zYp35*EUmEgE_PI@nM^B>ioTp$-3OvXsNyD}v$UlMq_9CHF4XFl z$z29bZenZ0eF1eg6>a9WQOS}T1+i>yf>8G-`WfhBX z4xAr1r#MI~4&0`i&5{Ro?o+|5hy=D4F+d*14h2QS-ndR00(J;gUq>u#P)wYWzoODm zS72_etf8-{2+Ym3XUHwJ`lp2F`%FX8jh@~E5$1^0Y3l(QV$$&UZ#?~-bmi>RX=-(h$QjEB616OGR6PNu zZEfl$v3CoS$z-~J=K(|!lL{(<=8kSu(oIa!0U_zPK;qM=z_BBat>PrgI2X!^GA2ns zBIE**M9{V9v&7ISU~_T8{umdz$F(gX&O^|7rhA=vWvszHnWIh8DeT~L#59vD?09iO#=fb(uu)SL7*Q_ z-oj?Q1Q&sTgJAYI;70ORx)V{|{GOGng+Qv#Q7zBjJ>Js?0Tq2??}c6KKO|AloPC4t z#oPsvlDKn|MBVb95+Rdw%kAU>qS%Z`Vm4wv%rn_YbXHeyPY^;QuD#B+mT-?jxTc3E zDf37a)f~1x)KR9qDBv%qDP@m}CZ#w5H8u8dhDa5$i7gYtvro|pQ?KZsBFdj73UFz0 z2B~I(XR`m14s2gaWo>u!-?>WmEo`fSvdLR!G%?pHHHGgF42C1kbn6`Ym7iEfZHJ{s-XD00CUP7PjgHpTmond7b z%Qn2;Hu%Y;5wGP6?h7=}7j%zZrnJMl_yw{iK zJ^TBp&$gLKLYb;P8y7PaG)^A_pXVTtp6Qq~$;M~+m5!J0HT&u(S)Uw(FV9tc$(tX! z7WRjp@P=yg2z)qqv=ehEU~ymby&%NToIBUheq`+01_3zS?|whf#Avz*%4qIW{=EVJ zxW~?4NUKzToY=TYq#=Hm*o9N6rZXefIXk2cGRS5RfU}CWWpk-G9qSq(l}ihfCeg!~ zjk8zQ=fgkZ?Et?5`=iRtK9$i&Ga$GR{Vz*F!*2}Q{bmbv_;641SGIhyPlFS~Z;tk; zUxq{&Eji!WS^i=!cvh?nEykY7n)ctrm-FOWy1q_&2Dp?;c?jBuJU%DW00_*|5VX8V zpLKC9Rn^9PyXUx%{{z@$Sr(vsF+O_ebu^ugk8!yHcq)Spy1a}-csbb$YA|X0yd3HB z9!-;0W|9Cpv`ocl1I%H(3R2%DRg7TA=v4VgK9O+(e^3O0Fo#dyUruur6EM|@kLWv( z?*iI}cmDcAq#N@Io=KhO$XnPraylb8D$?umq;u$8qJwV9?_y5W4sb>+dSs>!tV&=qgDBzUnhB%_ z92K*;6rmxkX|2QOrJhtd^J#60R8VC5XhKL()0i`e5zVebhyIIJpRK1TCRr1yu z-hiEzDYS17NJ~U@tz!It_VD*{J=p5J7r=OwN+#3E?OPv_vgaZAmB`tx)pYCK&&bDH zm!3UyI^{MN(iC}T+gQW#N7Jdh@dUvs&75sOKKN&iDyDn*#a9wQu~k$^1DCq+=f(fW zbVx*3H*^zJ13olT+Ja4keNTXiSJU-R-UHCLr+I+={L~EEMD+jc8HHTaHY$i#pbafD z>kK|yYZRR-A;`20b68tDg{3Y(fp(xf{rviS?D?hCf2suyE}3G;m0yGasX!W=iht+k zD7!sFn#HMEQXiQInt9Od3Vqu|YrakJO77`jIrr9{ngg7j%4@QE%MJMs>~$9i%cpdj z2Z&Bt=1p5j2?V%|MRs!(1Ikoc3i!7;6h%8!OhV!_QNFebH@i%s0#$+$rrRv!4njoa z>O>;m#96T(N2^>Zk`+$v)awO^p(@7)I6bii7+pZUK>~Lisd9R_l}y1*@TWi!FpsG@ zjyCqmorH7y4jKYA0&X_#f@^?codPxJ>O-_nxj|Lu9^}FtwzySoNtg@-643Vw!nC1L zK};}}hfk0yBgarcr7W>G4aYuqtXcvCbUH0%LJ#P>(zDUL%LF=@&#}CcR2wZYRcuy! zRt3T7YZG6SzxykHFP)+ucmXHNq5DICHLIp!tEC@0(#tf<>4c>UBgZMGj#?r`6*8_4 zJeRJXeUTEO6Y2A*A7F-ItO!xNE071&I}#O5I1?n=9ep?+hq$AM2q@-v9${ggO)$Uk zhTg_JKm6PJ!4ku@P<({;$bNd#nV-L(NtUX6srF z^!JnUp@HfM!P><>W`TrqNq!URYno zs}!GsOv*6ge|F*0CGzHxYK21Yv(uD4#%WKX;r@LbHK)k=1_N@>7%f%=s4@cC=UEO| z2R{#Ulp19(DmV)G)g0+uIreZk-MRG%=8!3fjj4kaR`FkgsRBtjPY^K}gWy`$AoAEn zAkG3WGVPOE09C3oubd2*Ya)#hcX*3$@sATHC}x})(8<7&Kqh-}pQwkIpeDP9>%E2h zWRmV1Ay{^e*JeaLK#)L6kR+JmIyGz|6mX58Gw$M9s)%_&fT&mm`^0^U=U$XSINPDS z-p6BP>;d@r!*=}F^$K-5&+_?4z}r1y`^<*F#CO~fZTpX=1r~%}WuNI}?(gaO;49{WcY$a?QT`Ed=zHMY_$-A1&l>;2+&Ih7 zsu&AvX+NAl+wn6WVmsa#DsF#q2>hqOWpeRtv^^>?6eP}*R>JHJlcZbG_Xbgv_UIeu zc-`uKxCTs(FHzN}kUsikD6Nd=(m>llGL=OYPT)F^LMZrQPeK2q17A!aHh5^FX}dxo zV%)0C5eMiCV`H5(F~=+X^TL>`Dv*Y*ua6pbf0vuv+%wg<c=2m=6^yT4cU%7_Vo(op<+5yz@+7 zf&l*UH)xBQGqSywtuAZh)XG~Jlu8r>*&JRI0I30=*Qn-Jx|&w7(JxH^J_*XR&BcXz zOvES)rq@VWQb>aXrvbmDuhEg0eV{RXVG?>so6@B2a}*Ikt2{#5mBK33lF%TIKOBLk zrF8Dn@pSs~anh1>;d4ZFPn1F{_<$4x0?pmH1V7kDU@&8Zv{69L=E6~WKb?E*TpB!g zEY9Ty&it27yu!RRg_KTBGh=hHKfp93USp}t25q%G&n z>=-*#%(4oXCRAq9?aLSd(qzFf=t(7u=2`tc$CmGR%n5>n z6*SGVl6J{EN_q0fpZO{-6Yv})sud!<&G8<2i9I_kcEM~ZD0rn+D&5Q@>ewT{ru%?S zZ6FxbkU^xJ^Efv;MpSuox^!j`Hwtp6LUSs}Kfevoo*_CI9L@Q}#bN_rWa(u1S20l9 zRavO8RJ=@pC*c18pP)^!CNc^%k@C8%*#=lF-@K7(+6+jA*8}J)z*xJuIo!KWg}-6a zdDWGqJHrG{nG2P6|NOf@O%HC4GKO4w<<(aaTnWg0M1ENmoswh=NKr5kD^DXrpV<`< z4hZlwAE!#@^`%GY!?!<7cR#(C`p@)X>Z(omuH6qhQLi-vQ=7rGbKr;ihE^~F^hcj~ zEIU?8yKwe0XNU2y(Vu(ebrhyc6xaGm>gsDjxr{r?1augIzL+&qpw}Ivx2HdS_02a^ z-?4uDorf^fiPgcFSV^jxPxBAwzVx46d}|9;zz%>|?LaBv()E=mQX^8KdDwP|2r zhGtx+SDnGPI6!LDt3v#s(Mgt>E-!{?by$xU~$ygW&gd*KsHiCWt^T z#-~Z35_!~c+B)BRDZu}K`tc9aJP|0Gr3^4|!PH2qL8!pmQAtu>eLi$tET#xSO?7EH z-~AZo&Dr$xk?#XgIXs$|uym}kfdzjO*8&kb<9D$mBM2BcP*3hNqyDDvK`cqbu*0F) z-D0vh{4zH~*=3|M-#rc}CzEzFup62Im=B|4uF+Iu(uw?(98@768v=_D2YHPkjdc!^ zRv>b^T|WJ_^tI=IKRt2bMGp9Onjot8<4=E>KKSSdX>#Hr{y+2S&W-C4?W3=abFAZ^ z)|)4A*3*G*o@k)aN275dw($+yXK$#zG4&Dw&q51KoFpzs!?Z2K)$p1lkYgKjtLY>x%D#>3{X7#4G6K&OWq&gs zL9BqzX0o~R-A4S$K9{Z>0a`B%hi=wKK&Qn{pB&+ux3L7#CxM;63(jSqjzzA|pNxk# zyqI^_Mc|*&nz+&yf=RMl<9h!Vkwh>+f@pe8jvU z_5yPhd-iZI9<8TW>r1F$WuJ`fUqb`50)4lVrOEQMYgtRiF5NQ~9ne4aX;g9JuOlH% z8fhvlueEL8qv9EDXXlTFHl-Yz)HW%njVXiTtj;r@Eo9l%hu=q_Cj>jau4!Hy&VF9ZPMvQM7k< zh3i9QBbs%r`1L)txcHL&fHJN61i2^8Guw8e8&`#uR83yQXI{DzXJQvC<>~$}rCuzT z?tSM(t!gROjP25O3cU=bOBWCZx_b~D2k=3|*%@B3&boW1O?a-Abcw2UTf2)0LV!@# z!fXNC1T}3U3~k`ADPT9%S}AFKD(XmEQ*#qn4{fN`05lrDNw1hMzcvZ*fCv=BFGVX7f*)-;r$)`#mi2C_gPkpkux+7b>6T zP-IPG#hpVTHBaE%;=(LW@mZv1SV!rvSspr%qijX_IWxV8phnJctTD~_&K4UyhUQ)H z?ICu%m!i?0nI_n6;xHHtjl2S+t~R)jBG>_H5kSQqb6hutHM$f`p>`hD*(+oa>h=Ew%s#maDx%!bAaK& zfhW?hzV@|r{r!*9Psj(_($>sekz<&>>)ABZKz(~XV?cRH)z39F`8x!^?SZ36KOYJU zP9oZVxGj5lZD9!|+X@xe>eI2Fj`Rp{yoWF_jb?j+^iYdb@{%4&ymKgoBbb9enObiT zArvD9lSO-5L`vhEzK(m2)~_;fj{>^1(ZWy_M5vn?Kt+YovkH_oDC?wEE6mf)`}fl< z8)gb@TY_@7y|XplyET~>2-^G*OVrh?SJRVEJrU`^yuYw*iC`^h8z^H?XhZMH(B_9v zEOWlPxR&l*8zQ)7lk(l2?C(68S4e$CY2jLP$bwD8B~~rflc2F4yv?3NHxUZs8}C2F z@->v+fByqqfcn9r>nK#;p-Lcm*by4hwQ{DQKg^Amw6&lZI~HBhvd(8oxb^ ztC`~i6GLn0QYM_e`kPnY(y4QYlt9b#lwGTC2jC*{1H4eWB0ZQS*c3S=kVP^sLFzHl zY8k#=iPSLrJ49CPVnzxFd-G{hVv=j>iIk}!4_O|fr?d=>52md~wj5_cL5-zQYv}yo zNu?hS5{yKL`IRaB`N&tbOnRJ4k3Sn0hS^0z-SPcXBbu+UB8RNj99}R7nI!L9q{s2S z9^s3hftwO+46P^n4F*=se{n&+B~_yaGr1PAlD{|pDndpj~kGY9UGhE>%?5M zzFe99(2rFFRz$Kh~^(j^0~7Yk4)$C+w{)+-=osa5L%i(n5_=WkFZ3*5Fk2i2?7=%g6uDSEyqTPGRK8S zpFa;PtAlBGf%VS-#yX zA-Zdu63Kn$UyWl4?qng5At8JCig0;QTb4r`Qc+1H7yG-6$Y+U5IuJEMj4`wLXVzEfyh-<@G!6F35xD(9T+ z;ICLj$`uK+){PKinRAmBFOhdpK~$^3(kjf8y@M1U`)3QGs01OyB;ua6&@MxO%Sr7d z_!NLcC;3i5?gjCG?0?U|Fa9tI@wdc8=5zENztWD{;OxZ+4zHt4L6+?TfC7lo#u0GM zz(aPePt`3e2qXI}1P0dM^`m>-eblxldf7Uk@|~~9T)^*HyP=EcGoPuJT961G@KOkP zyadMdQ9;6cY{s$hPqZoc7P4iZap~d11#!PPW+yJ&LsB0v^Lv{(>=(}qAnF7&zBr%$ z&iFiYTADJ+#=g^n*GHec#PRVS8`azGXLR(7-?O%PW&j<>MjO0kT3p7B_ENy>Tnat| zkn(#7EHtICSclt2Bd$FUhaLd*&~S0x7W-D3kt5AdT}=}-h&;~Fwni9Z4Z$Wex}=fT z?8cqjJZ>MFhBGOhy*~6$>a<*qiP_zmJYPU}w>B zx?dSj><9j~&srBftI>bvX&*WxE%5J9DoGHdjSPgx`q?y!o7e-KH|O_gANu`K0E%t+ zcf3cNfCJBz=ZlmV%VMv`vM9yA6O%Mxka?db0=!e0X;_wUGO{qpO{t?)3bZpGnVt z`ARyd1M~slR}G3{Y`^vQe?7Gx@1`)zYWk%&Uk+lX<+52NZW9Rt3MELxECZ z+A6St*)PN(yyn$v^(xQpDnjc@u$D5&blvsr}Vbdw?nI|#mOxR$`VP=@0ELhwb7G&ySS z74)p?<-3~P1bFW_)*MB0VsEhR?+@K2U~(aKUusQ*k9Viub7Vznr95aerZgi@x*_TN9_~gv@KF8r=SWNQ!F#vT)&lu0vG|qXref9$EreTSW$AA0 zPnS=v0bCwSN_h`w@S+e z?f7yd@p$njKHE!+R3k7qBU*7QLX_b~k;DDQ#wA!DMoCo#}cy65Ud)4gbb0O~wJ~clIf( zD{+)?3-TrM`c_M*^oLEv8sq@Bd5Q}(Xj8=!Qctwig|nA1@mV^~cz`X^8_n^)lTP-W zPF+Y~CB=2z$0o0`aumLFSRyp(=K$xH3@xDDKoEvdC{W1AF{bU}K7@Twze+X4{r#lH zVR5V(u4Uxm%xyNs5++384Zv4F?-IZqu#@wSB?MrJOpP`K%|aXZbvLJRqE1(4SsSdI zT5p&d)>FPf+yooOv=J)+*e#^uVuT$9Ffo$gK}mfTaIF@jv5|!PjjgdCI0noYQEZG) z?c_G5Q)4-b=#yZwm_w;4P=M52*U$PrO7DL3z4Yn4M79e zuj(!sles;KPG zTkyo+^h1k+;3p5*(Be|=+%PAC(Evq!7r>KSofn=IOoHLFxBNE)p#hqC#?eb4>MQam z!q^>$ciSev1OYFyP5xFM@EP%)*YUStPytK+}BQ``WnD$S9eF6fv1@k5A>fwG@_>S|&-yF?Q92l?DMmf~JmXkr7XBJ2pj^e|Zu_hD< zpWnWVYe+r~o;Ze(wv~qOe2$fHIh}p%Y-;T$Maa-7aYvQ}rac&}N6wl4ce(PQKNr8en6<9X_lqkyvFZQRw$9mGarw3BkdDiAcb?SYL;xZT8 z(uEg~rBlyh&AHH!I!FuC*hj{acRNp!dAMLFI{CC2~%`b|8##p7<)vZ-n zaPYDqCTAZdQ)%@&ZX{T|w-DTJe}cwZS_veP z*QS`v^TaF8i<86IQ0ogY&^=X!EC&+4f@K(wj+$1UfdC_}GuA~#T{&}a5fUqYNuG73 zStq4qIqoSnjqVSv^P5=hhj1HjLg=bV*RFq*P5^#$2tG?_>y3!l9d1=OH1JRb8C4p) zYrzawdBfVsc@nHQ^uupS&!`KF57XVPgX(&1#4z4}h+8kXpsb2$bO_f5Wv_L;DMomy z#9GwWNns2ymr6`|qMB{vK@SKd&$SY>0gXF%{(Nes`rVy-_jq=u@N^%Uy3o1%-G6-M zlaw-FfFi6ea9{c@g+#eE=29npV~Bfa{{Xz%18pPe7(Gq4&sU7VXxxnKQ&13-|&+&GWgNske*Hh?L#gcJ*E zU^MrX?zN}E`fsNC@*XA?Q+B>R0xc$&Z?bUn9*aZp%(x&Zp^Pj zfw8bL8D`|5G!V=rv4RH~Q&|NrDovy^VnHWK9J_-MWv)@JGaj~Wt~Xz33Bfh%XqyW7 z?rlL(tY9UB^Wd3_)KbubTeSpQ3077gp&>I_?-Fw%!6lx8=yWTTX%aSfS{XIb;d2mP zEHP+OCLj#=AAA}vD$uH6A}8Cg5NC2W-Tdqtu?Y{^PY?ovTFl*c`TXOoOb#EnVFHHd zkZvvcOT`jQjg|Py>3*<|=F#9FV=kP}TFNEvp~1X!dk8^+J;^+puE~HUg%thWY$so@ z1{^Sgsa zZ-F_NAevD@;x33X$78nUYCJ09$F(;}cRfC;2LSvTtP`btreMxu#TgRN)z%`=I#&M; z@Jb&7-^eN?b-^;SeHwGZ$Dj(qRRDjU@9#%Hj+;MO7eBXM?-%e2=7l2s#XeNf_9LGu z^c76Fb}}!njycFczt_ZtSx*Fv%(IN#Hl#}-P;f-;YX+lvDwheiBd;Cnlw&P?HW%@- zj{>o*Z_!^-r1$e~zwHBm#&vOrU-RQPeCS8fz8~9{=(At(w{xMX*GsJ^C(!c?L9p$6 zPli-s$b23Fv+?|_d$EU{qX4m@_^e&W7E{W6gpk2Hx(6IfwCk6Q&wVKH&$`Jw|2a2) zCTMm~t9|yYy4QT^ifLMwYW9x>D^v=o^mz4k0?kM;K87$*lBbEUmIC_3oa%S%Z2o`tt9NrYxYVs3 zzq>{POBF$xHHi;sQz3hbVAXP~JVCRqBB^#XwE46XEo$`LCycjxKTib#~`buhPXo4S5Q3*@M#0&{3=A=&q zox)ewcWyC;#dNCua%wAAV9*PrC@qtq-2JATQFb0UJ-D$J!{-zT#%4c83Bt4RGAIV< z=sMO&y0v3{JzR&S7CzI|IGD!gAEe2pYw0oEjVcj9H`tF>3tIz-F`2QSRC;K2@{H(8 zyF9m?9*q-INPOD!Uw$LCpSzGsNs-ri>NH?|FxB+|IP=6-Y*L(N;yu=NEq(J#FQlJ- z{Qsm6AG{AnIG{?G)tm4{U_X0ow{R=k0_UvZHdDa*EC4pqhZbu~I7i{1=(lUn`U0+Z z@T6EJNs7EzC6kpX^X1jSBL!)?6|G|F8GlH{$!%Ox0H#&7YR^gAh&Mt73H8l$E`QGA zgmh4KkMF1UfqH~QEJcg>xx;_m{{q*nPg4ccke4P|RRd1-@BhD#yXPXX<* z^XZwVpG%*9`WY6Qwbb5^n<9N(CGfbMoZDuzou3Vw;!gR!q2xLQ%0ngGce1kX?E5Vvda+5t>AT5^h zoDYZ_&|~2taez}Os3C%Q&`|7&;Pu>r7LB5G2k^dbp{KR-^zh~s#T+QiGNHErfCMZn zxZ=$d({qge6HhyP{{LJ7^yjNt0DxlAtJ{_qAhldcpNyH;B*8*S(=^D;(8&!X2d@)5*E6%W1uIlB^Z@{j*U>qQ<^OlMYdA#vg3( z)F8ns)Cnhxjv%%bccd$qE)sMzP3Q%}3hofO{qA(R>o?f+)oEq-HX9ev2Kt;`9!bNq zx6w4BxnK+x2q5JZlxIV$TSeJ-fEmBV5+Cs4N7K>L*&~f(}03VhGiz=+l;7)?2q_B?fH}ieK zyxXoCaNSb$58K5*-&A5-WCsX;dx(weP=x7h4^xn!fIs6qSQ}$UJ4HWs+3-i;zX$@> zbsWn9(t9bAv1V*po546%k}FdE2SK+_1{IssSYP`fj09}0AhoWN4GL=}TA#9Xta~uc z;sSJyz~D`;*KI}y*xAt=09y^^G=afcmY_;<&2{iO(l8bwbZ>Fc9fRPvqoaepNx3^L z2gL^jX2F~_C7Wc|_!Hd{1et0b$||{^dgE&ApLp^DU>QKA>kNU^n)YS<3V>Nb1I%%l zKP8AO8X15%Cc6KKDIlxJ2D_$R`q1Rp;G+By0>1FE*;{KPxI3z*cb=$c`F1Vhkd=7j;$Cfheag1<9r z-u##TagEy2BEUiP%bR>1_FZ3Ntt|p@*D?Cz_lKAHw{v%JC`1VQd7@9}5}qY+>3&FG=GI-gvx@27o=Gm4P3LiU+8 zfSw;VcIDE^!8X9sT)KU8n)S}A@R=7^E1~UH5!^Tn0QHP66y}4;RMt@boBdUefL_L# zRN!@x^MIihgzr*ZiqyW^mlcHf9ej})VC(_sBmlMZ;(T$D!FI78G9Nw&s|I5fFvmR7 ze}yLdDJN7oi=7;ZTJN7Jl(P{NM6$`%Gmh)#@@m!wiwYH%j{sIEd z)ZAQZB7wMZ0bxO-->!9(3ikP|Q_MC%!i}X}%)O$f-Or__J|oVUor?SIGqlq+R;X}I z`Of{_1Y_(W4(suO^Xcj-{CIIG$fE=qMFU<(KMb@qE6n)l829J!6>daWAlpo9E1A?z zpzy%$mf%|5sOF%h9asyd7l;#JJ@ili-oO0wv`8knRe~np{j2xW$3OZx+R=6bZ;zpI zx3oGw__$qeY@zU}oliaF8dWGMCnK2|Wva-y(}*xoODsbZ`C|nfoA8!(K=B;3xR$+O z^)RExwg6s&UiZ90e~g`(F!o^MU^z`K-b$M}$`-R0mAOWqQJv18J;U=%$c_dV%SLNs z3vE@T@r9pI=wKSa+enUWz#0h|CdQ_q+giA^8sV12eyh$mGftja+L>M;t>f?f*Z(a* z=mp$+R?;SXzOLPJxl^qDNZMK)N+XlEiK&>SI@@^q`JH#trw>pRqLg9x(Jq1;{m7IW z>^+FFF?diE)>=0;1ax?~7UnGg^6H9~EAo3UfQ`&ZXcjn*n7PKc;5A`6hYlM2so!`d zXKE8VzluxDi?4h;z4Y8y)5-3xba&zgK-)bH4pAY**~%>2nWDq<>}%hEUa|shETWJ@ zk*88p@XT}vjxmu0i-vHCrubM->NwFwRz7n65?>e!UWF3eVyu`N{2yf@-g?9!5<`in zhU7t4qY`2v_uxfVQq_CHvyf%COKF;N%iU*4szLUfwK)Jac%XvRp^|m?nCD5nr~r^{ z0qGuy4WY<5IP<8hH|fQ4A|BiUGp-ZkXk3tjPYqh^#)kTI{rYui5s7KAsOxUErBZ@` z2aL34C<*P7qo-$P06QTCt-H*nFVW9QGF z#@ZpKMd^lzfNSpOn+P#x7+S)DZ}vtr8HIJ4qpW3q6D8=Hicq{{wy=^|DC@+)v2@~8 zZ@PW^RvI224y+@;%7G0`Hd2Q6o{h!WNF)NSTn5wjpY8#i;|fT?vr5}$Vs)C@$uP9O z2xhHLrzwu{!V9mWc)5&%m>}U&GCM(|N~nt3%bxk{#%Exwd4v-%2YXAmDwV0XKmVNZ z4uD6e*f6ZGV4pP!tv^al4qDJ3zDl{T|NYvxmKPtf*tnT+pw#?V=@SLOC<6eNU;>86 zG2xm*0W~GKY?LDel;XEchJ#eb!O2l#I2D*geD7&A7%qP7S`hZBft;D~$ee=@rJGA8 zU;^Pa+##0nbKgK}+hn4J$rxZ@Dp(6B8*CX^wfvQAOapUU0Y7;($X2J?hhgoJ;;W(f zmzb=&^l<$95WFR{`w+q9`7}(8a(z7WH5+I+U}gvaW@f3W=ncRtQB}G&0>3QMULl2Z z=nQlbn7|((7)%`fm1*1G=7vru0`2@}>O)gQAU#BCD9w&y6~I+#dC4eyT!skBSzXCM zh^{=A!CP5mBSApwtB$b^GKOUJt{Mo#-0Bb;ah`dWNwJ9!*P*VJfL|oZ^%;u@5KK)< zH~1974yQ-cJH$z|ooH=t8jS_<0a{{mj46EkAOr!na6dBWTM#I*4-GBMNX=WMrDDMQ zXw}u^oqdYB)Mx9MUk>a7+Bkgn_6;0@$e0?;N*`tT%WJOpvsPRNa#%?GF4A+6T1;QR zWzv?F0**HztT%6b3h>8`Xw8ktD#NG>XyXljF_Y5@V7rvV+}BIlUQB!QvvUFHBTEQ^ z#{t4$6|p61_Kh1TCQz*bUwg{|8c-(q$zx9R01JA=7M4m?ALR1tG=`RG>|<c>dHgWL`a(l`c?otZYj7(;3uUtRB22YSq*FbB z5PJrUHEiz zKLQ`G=idM$0fs~+hp(gSd=F6Nhy*h6l92effRyvXXF)0suT==ul>Lh?8xkV-x6c(U z9X%;)p9c8e?|cOlw&A#noeOR9JA=TqzrH*ZH~k$NZ_$V~!=RJT>}nd*Smgq2lGWu1_#AK8v&9^|UDs*ka!m zOle0!4-+_)Q;D$+H^Mc1z{f{PxksJ)HME*52>GVS6ex$8oinfn)3luxlnni7YR%ZT zxs4)1|55;$jNh5DN?vnAb7&iLSXOO&b>m?uVFXVlB*_m~9me8xyN1q9&?rXkK7n01 zc*e-s8Mk9}P8?-6H;0xk?`J=lTp$E7?(xLA@4Zrh>0L*{mqKH#G3|O~>=&Bzfjc;yq^T_@S=YM-4qgG+TYp#r5!B$RgrTp?x(!vM1Bv{TGyTyx^sXN|+^pt9;P0?2;iXPdtOE!{ZEcZak8kD6$SHkNf1s ztEr`qN>%XR!qi3@9b!(Fu=1khTAaa6gjBlAgt)#PqeZ>AfKxxW(@PGjNIfeoD-P`Uz z_pMcv?WsUdx6|zS(C@2Hr8mC*1}>(0C9viIR*MR0Vb&c}_vQ#@Mp;!^ zl?m;U@B<#X18tKT_+>P4dn;TgS z2-eH5eC169?uK;l;ayr2k7o45m?@J_IOn>-EAW&tml2=B!YDY^P$^B9FFMcE=xMT- z#>Xc@h>$)SWL-yXg@MPZGS*>lrC+!+;R>~kr$7w?a0_mLVvbpaTm_(V=0bed*4mz4 ze&&tz`pd7Pv^kgl@~{6R?t&u-3etRWYc1XvV|c5O3Lat4b#-sYB*SCGQ_>mb zjitT5w6@Ai{5qli(2iOoz!aZq@6FJ&VK592FXA(`=W3%v zNB|)26Ua~uNY|z2Ag3t;?J4;P&nWa*eO-itB z9hF&((BH%~w@6vPJY*UNYsPpKM?sEavNfqKQHa|nKY7_65$nvgCMqjjZHbd@Iq2`rpipZPx=poU=ttvX%I)g5MF23bbW* z5;Y74NqKlOf4d?|OP?9U4BSRu-W6gdLh8p|2;cr)tR)gSQ;#B_GAByuWDTOfJc>1E zeOCu@g(^Is79FK)=zTw&ph&zYalQge;Zli7t!2Jew!=5HSF6+DkU&UZdn zAZc4c=;AX8nq%NfzVn(a_&GkIeb0yg`5dog;C|-#`H4;DTM$P3m5qy8i0gg6f{QQj z$ry9o=v`T#1fTYuwelhvhJuFs!1h9u?cn)|KG2W2CVFFkvoU3W*rsfQHsdkSCBBb) zZP)p=GyLvYq3yhm#_e;VSv=EZ0Mcn`G#`06{mX=stYRRpFM!VaRy3EW0*CN1pT~_t zS2V4iSPu(W$ToLoNIyCR9XLqsx|tA9RgAW37x#%B z7}64!hSgR4UA3fPy(}T^WRzS*m~(*g7~2%EyGptU`FZBXMm;+$qL_h`n@J-gB$g$1vV`8&K$<(0DwB7w11Vo4@j3qG`UX!0tp zFvwg+e`()f%&h(F8O&;Y7iU~>4`WqWJQ8qX@6*@V=PnS>bSyN)`iLF4mS@-D@{(!K zeNsx*C>Tz4AE&HvH$mAHp3>qAU92Hsb9n`KN(8nxYL@7(a_+)~(EK}Zx*=M^wG3B@ z4uHY&`hoQM$2VdhT3%NRLRSx3NN7=4M|XPjwQr^?r=CcIt%K=U{~+lFf$F6<(iHI@ z52q&5_~M;3w|$#YqM^h(PU;$5rO^J9fTN?SoeVCQ(h|&PZ0(0>boIkDx_+HR7WWtz zeS}vyFS}UBSKyNy&|Bl^tOT~k-q{5+oI7y^L81ZeKh`RgG-htu1dQeoBKB~1Sy+3R zrk3xg>H}l5un>$sqF~k>ioH{*sry`d{^jpbT~iBbYdY0cn?88=hv`Ru{&xEDU;I`2 zvw!v7^x+RaOxx4&u|+GgQRM0YW$oc@IqL<<2UrT^BIXkTL?AcM_{rdM?%4VC{G|)& zMO;$Ooj#eyXQ$HWJ*;Bmrii5SCffY!Tm@E^#WaGJd2Wax&AT&cc9?oQ6zN)-SRy4U zi7@zVp6qz_cs~d&tk(9~`=6zG1d1v$HhtsUU&ZyKBYpJQJEWRa@#8*)cEFbf-IkSg zLsX&+;j~yD2)=Z>sRt16}|Etg$}bM zf>pyRB_@%M03mn>FK`Ck-x1rx6RWUR?;m6vph43UPZAq4a7QPHcluAN#=7&3yf{nZvrRfeXLh@3TYz?>=?SmQfD8#CA^$1(I z)8Mi;1Kuh}(bw767>Q3daG}n_BZG!=mN)0vE1ZSL2+l1btD?M;0~<<4`>ck%wux+j zczzfQ+TCFcW_e^0oTcq*teb!98^4^cA_RYW?=$F{F2e|G%>AyIQu5Cv=5%KQ$Hm#E zeY07qm=pqEx^Nz=&SRY4ViKIN=5tiKBTJR3!Z!iiZG+%G3fzge?2q)Uv84_SL8e3% zi3A6iV_~~+_Efs}@q@H5v7YYTyr16t@y7%j6Ks3}w{!NzgRuvpu&l-{s2Z$d3VD?a z(i*)F?58ACi@dZzAyXRe-UtgRZA71WSLtc-xZL0Q<5O>$lUy)cM3$c!7{ScQI%Xaw zl3cKnafFRgAP?~#>6W%uhzc)oIM)EN>$qf4B~DF4kV6*N^q~TsB7cYIo2!3u7GKc? zTp%psoSho9D}en{BElPvV><1L4ZXXK2D{M6eMp1{1gPMiBcs$0HCtqQ zv{4HK#K#Sj6T6WsL$zB|;IcgWxK1WRlHk8+D*_a(c!@R2758ok7?Gcd7z5--ouEH|m23O0ZR!MgT zg@D+7oQQ)>gaa%O`b^5))%0r~H27#=2F|+2+Ttg;#Q~8p`C3P6+u#5$6SE+Lu0wFK zs7W4Qxh;gmW!l?iub4r?6jura`ojxq_t-mXO%!nS5jB8GqP~FZjj;wkN5ZxVurZs; z7BK(^_@lZ;2D_^HG;%&P=rC}DlY0rg>FDXg>c!pwC|OWQBBqAJKr1Z~YiJ7q&I7d~T^;+W zogg0KXv#3XR!}3olAkEwZGn};126EmZoQgOfm>U13u}U91gnIaMT?+Sz_bk{W0qJZ z01!VJQRi;}QGhMKWAA#AZ}IN5@}JkcR|L6!{)?{w#@xXrzwpX{scvL`9PQF*l$oU^ zuI5wkv32&0{f+TteUY#pwdbA>Z8~=d@;nQfwS_tNiLANvZV$y+w&&Rm(B=0$*XPAE zqftR7ANmZx_PM+n-!>t37vfbV_tyPO7jFmOY>&G%4%Jd5(rp9o%y4=pTSqYzjXkG z3ISdCp)Q~;)%-s>hC31eZE!AQy;<|^a6MFcKp(VF0xZg*InoTZQYsak0Ov9+SI{0u zm(3#sW*|em584~Q+z;sHP9T_HtUHE0Km{7oB#gu*$e57*WO4c zPo9D&l%$gs=(>36@&A{w_YSW7y6!wL@Q`zE#BOx%W(PLAnMIM5Xa&lYWyv&U$+B0T znk{>r%2ghFX61F6HRJW}c=iw5YtO7zAutV#c~Yt_{ghfX4-oCaoe0)hDhsc-**^w<+mq=$jboHBZk|PmESz8)h{fD$t zagOz|+tOvarsb^xyuk>3#LbmpaBD*mXsqdm*O#QGdU+*6@!I7y zyZK5QTO!5c{5z!l#_&hF;||Wby?c*>CGX-a9R&$6lt!jUQ-Ak;crL)pJ4b8-JtG?F z+OSa90RPlz^3u0oP0#(^x6|7fMnDA|1L}ftXZKwgEH;r>-cDz~_4ny3U;OKI^{w+* z=!HgN6bc2=^I#b$6R9^j6pLifyH3vc!OO!0V~(Ud7e{$E8Jl(kfrb9=T$m=6;&l4f z*Iy#D-P!ckJFllj)>Qs#4Ww>7GS7Vc>Gbr|Karkz?9udD%Deb!( zh(T1KC+*`r*^Q)qXrV@rH?*eb-9lk`GAJsjhIfIsn>cGm1_!Vb7t;O%2QX^YrQ5d$ z2qq?oobyk<e)anh#AHMDTETRF!mBnUq-Gm+==25XJQiE|klk3DDhzzCd$ zR(tM3p+^SQqu{s{tVf5Yz_E`JxUTRik7F${_!;bM>Q|iFfgT_iR6vRs$(*RtC-R~m zrL`J3y4d?QEZsSH%sf_lJpkhTqX-#LtqS4+>?8Itj{Gum_kRG$Q+=9T7|wVEdt2z0 zlh)PVO?p|=r&H%=4|S*dcBPhvj)2+;!C3-o^v$n-1Bme;7WhW$R6slMZ560ZY9Afx z=uKrDMtY!y0o|90={4Erh>_vpo-xPm18X^)830<%HV~xEG<;fs1_hJDyrhHE>nB} z0cz9AlniPOC89j6qY#oS#70;JUl!X{~_48S$p zjcGj1Fd#uls7qv#(StCT9|yxXMj#9WCZ$u}YG({=k?$53890Pcf_1{++e$EwRYaZ; zZpAeSkh#4O6gh(zJ8>aV(XNE-4_jEe%Snkg4K!umioIHT*mWFM#R3QAn$~~PNyPz0 zK+-D3p_a&s!G3Vl>2a7w_@9}v3UEW}qDY8w4+?ulaCGZj82E79mTnw_W+kHfCf)2< zn!x;7{rKT~KLj@ZFqX1>x=qH1N~R$^Ss zYp|@rY^p4~t(6|4vcOgRu-hxysI_b=V;vef(1CU$ctXH16kVZabMk=Gl3oBmSzO>? zA3^bg(A_}7XO_U+l^R&fVV);0VT*bTj!8wxJoMF8$-terj2WQ{D~4&xbbp3020diN zPJgkU$!26AanL~{Ax!bkwN^XzcO`vlBcQDg2F8R!OZ3)kN38ccbY{a^56cO13^+RqIf!u)wzxfCTHv6e;@J zNNT-4HffH4<06c{fs9wR2=_v(R7$nvX5Kay0s9z!7oSAGaj^@vV+;x_ZD4fuoR~t< zfV@WX4trfWZuDvtJvtEB`hYm_z7i{J69SwWzp5kxA-O>EEmj%Me*pOaHn(6Ts0yg z&cPh#(Ds{10cHa34D1YZmf89&eSSz)g8fl1Lb`XYBN=zBDcA6Lqei6{*O6hdK77dO zTm~c6iVw#>TZWWPm(`(^AFHssS-Gzl`7Z$w|QcZzfn7W*%S1zQ{xl7C^m{!aj72w3?NG-U# z<3u{r{1BwloaT$y(|XxEX|e2fTFwy!xJjBb7+52bj(c#^mcjV0-#HI;Qh^bvufO{g z2aD2*!Dd*Iz$P0U`1bXe)93!ve@2SG1E?D7R$EW%+_RTp4y07v>6c&pa=P^9_rM@w zF)=ATYOQOdYuu$VWlD7e1%g_X;=o*zH@BU`dmSPZU#fZAa=>kSp<8es)d&>BH^?di zR`-KH{$m9GO{KRky@9o=z@cy;-E+^GG;s4~`u*Six9Knc?7tF8infb(C;n1R(*T0t zJM6tm_JdgB=3Nc!3CzE&d&izG()+chX7ce~Jo~-$@^i1JuYZoJQZEjr#w%=9ROrra%0n zKTc;~dM!Qk%ul9=PCS?{+_;p!_1)*e9^C;KIL2N<+l8i0!6?6%&nti#yy{Gd1MBJO zjb|9dX>dv%*#yL9YbDD4%Co1Q&6h5q+F{m3I`^=#8de!K=I^6wMlC6R*2x#SYKdTK z(d%PXLX+9)?=I&){gV1`1fgw_LPg0;vyV(gX-vXsX~!T$TUD4_*yk{Qh#MlWs4^rT z>A8wN5LKH`4Y$iNJd&0~P{w*VP7 zw2*hJ0(A_gvayNZab=WR)A=-dhX|C}CJ4gHVV1^2;==^8$YC=(Ka3A&mI*P*HA~OJ zl#p;tAiP=wLu68+v^bDFnKZRgo^Kq}JtgEEUy%(ki9c+WDit%+n1=ulrjo3ZLnkR1 zs!5}vwAq0w7m;|Cc_hK5^x(;lq`h7D5rsFJ-gxb7nz=!s1mhHl+S}KSFMSzMbJ1=+I+G?}mh(Wi3!U;+l6Z*!iim00sVa8Ii55QGO&1LlGT7QO^x6q8a~yH5Hc zq^)DiDaE*6ATEUZwqpxj7PJqcyYF*f;DUKAR!`*&5imBBmzGN2@8Uu`!KQvWiYQ5#4H*V(N+j zMz^3vtL?eDYb(h`*0qmotbj)!V5Ft=om~>(S*!l&+cp)B^J z&xvNeia&OQR8=dt)EB=W_?B?R7i-^VzKHj4>X3z4P?LIqS>=6auOpwh!&-&q`hCo9}W zU`2EhDSQ>TmN3&%LKByvU}Pa%#<6b*$vHU22U zs-=K_>q7BNQP|CZ|Cu5ruW?SYKnkwoec-s9JN^aWm3v$&`44}_OFELag65G1O%wKm zdFNUegY!LXRi zvBP(z_rB|q^x)|;>G1CTMC)?iKqJ)(rHnhsg8QYKaz3|p=Pjeitl^Bt0GqFVFD;f_ zVy@t{oP#;e$)o!|O776poRx_bO4IPPb$$6EZw{efJq=UOprLOvY ztl>$pLRrC_t%`xPStVuxx5eCWdS~!j8l4`52H|g78rN?yzhwW|e-}JnGpkuhuuy?D zdgu0A>G^Mc3!WuzvIEn4A1?h7XmT)JJAV%F^JJ`7U|X4Wg;d8PAJ+nxrGdf)j|FZX z3}z8!*I(JfA|_gGO`Cr{3pgo z)75L2LMd3II_es35c!n3(%pA~wLwlW%6<*AaP8jK^!}fIf7*NOAZFdk5ISq1_46;E zOJDu-uca6N;X=A|bp|1pa}fcVfndI0Bsc6B7EFa;6cXG3$^`Cju`|-EG)^}CG;xFC z0Kg4k<-zaUNklKE-vqTH-BC}TBLVco)VT!k{Q zi=s{Q(`1A}{%CARn1)VDw`x<*u3lgxD5eC<-5D4S{4@Gk@N@$UEXH&HJ@=>k?mZ3M zWhh;}G!#mWyrB^{kLR)_kIfWPNx+|hfX;O#7PSUk@_SX@2w>d6#W0AayqvX`N5NI% zob{|78zPO@$Pg(W!Mq`3tS@n*Yt^Q01IM%q9=f-i@1PfWuX$HzhL8c74|z#5(f7;H z-3ZWy1#r$K$l5n<+)CH3--@}{<)>@t*s;6P-G@&wzqxez`sLtT1+d~<$W+Q2;>ZGT zh=OQd=oJ({i?)~+Xpu2yk<(z&&7+Q7tCc_ZbHCI7?09jQjl!fOOyvoEL3B;;zxPS} zg=f-o{bf@4422J31t7QwoCjK#>Os5*hl<33AhJnVyqZlZIgQ+SYJXCIe$*O-d6~Gm z=``a8z6aoV`1ZNB3?ajP-^U^+OM zBvTi55dpXglW9ZUQ9i*Pf?0l(%2By$+!oPCt^*&#*5rEU!^j`79V9hgLE~GT2Z#pV zq=FjVJstbgy+?l})s?rSh3qM#CmO3C;CsO z!yR|y&!rsQ{6t(N2F7#)<>YZKBb`nO<152zP+iaAPxX%4rmV7r0y5hvlsx2Egd_!d z*9~h7K*a-lPqEH9e2>@ZBY`1$RXFrE5wJ^{AH(ZSx?8Iy2EU-K2`rR;4AnQX*3GC@ z&Z3AcSSd~wib1k@r zT2d3pkt*DK+rqzds1URB@=?BmVe_3<*m%c!#(?AdAKV}>P}s?C+rHT=`8=+V@0e}p zI<8WA$Y{s|z;)jdes_L*nU9T#rFfII>$M6>_L-Zw21A9G1ZZLBGTZRwwL3pM0-u61 zi@{I)7YBFtd<947$392DnI?gy%(=bxzff{~4mUe8=`a4WAN-4G#_fi^%Dv%r3aYj( z$kglTRXl@_;uUGv_$*T%SaSHBU5uvKcQL;_!#(V05wKwsToOa&P_;_pGV2~fWPGQ9 z!2ZKv;5GKdpb+X{Fb;Sa27@hlj#a~ENiEUYOL`fU|C<1wr-z{B^)BfX%bWpPDyiY=kxtlO(WsLH&+`q(FD_LCJs3`$E9{Nz$+qZa zDhblCpVBsyZi*}oOBx$|xi$)U>j>R(aB^LE2l(75Y1mY{Jv=C^6^klGNeDX(;fpro zK6qO!S1f48gQPdzdEyE(#U)d;?Th`;Eat+jvw!2o7=mB&#`8zRi&)hm&&OJS%@Isfynb+4q-~a75Ui*Vrb{$s!P4GpVFS01?0r=ZOz5~~E zD^~lx>B#QgAt>fa7d4F%Fh0JT#%CwM;*z!pqumg6;RqX1`Z|apZmvB+ANM23YndDa z^FyA|Rk1)Q1Dw}Y<#>YVR4IV%vB@_P{%iTTEOk+^rMIy)?SiKM)8GHQbnE8rP-d$T zzG^DxS)*c_0BCQ8B+n5&SQhY+cYC&k$A&pk7TLt8=XsbTqX=x5Jk?|~;mjy;jo{~9 z`}@;VKl4G1aAUa33BDtb_J`j0bUHvWpg9UijC1~)SM|v8Lqwy3*(JZDfq;EfZ1Y?Y zlcK9y!1p@5nMF(;hjx>M{$Hk#|NJu%$aVR`B>?kzg7Rw87zyw%e(jyKzQCGcg?G=( zAADKij7f(Flv2)n5f2B-1qwi=?tX-I@g2IB8>?GUq&w0ufd_d49M&1Tg5C^iXGVu7 z#KNllYQ*%rS|XP9B(m&$2H(I#sCJQ2 zF7$;LVi^FtnKc?okk9l}jSH_8ipqr-N7A+L&d^tsE54f>M!o`bB=*m<(S6km9;daf z31NL2!`DhWar`8P({Uob2Qcyx28#D%g}j_gL|$v~Dy2GEE5Q_2ViSWbSl(AFN=5@% zF*CO0;L{5j*%p>Cl)*nrn9FK{LER7T&e3br>8pSB?exvBKA-j;>P!0$>`$W;V~;*~ z?z<^vSO5%-Q@}!&m@jx3a*=^7>&%bF2~Z}XB#9$7H~z{5DTZ>5skgsBjbi|ocU$Rh z34p!I|1=T$cLq@yC|HmOE2jLZ9E{$EYzv-xp^*07jS}17*?`gru4!htkZ!*YScvIv z-?NLp!)*DlNV+JaearE3DEqj#oX0m_e=Ys#AAL6c-IxC^J@D>(pnFQb-@FOmVa^E{ zl8=a?D&;_z-^uQ*I4aGvT?keI-ZH_1fqATeb6Z*@wHwNC?){%BdDcnPg`g|B91HG4 zT^~3q21tm8xlf>w9Se&%R*3>rAXLyq06rqpwD3Ir zV?RfZsK)`a7t&24w5mxbWEtiy=D(f;xV?>X#YX(%;%sQcicN6_tsX|xRw4ZA5%5|! zl}Yvw?>Yu!JH+9%p018wK)8iCImEOu6mXZBI-n9F)}mGisJ@m+p9+9}6);A0LR@c@ z{iF1rIqC$vn+8Og$;wqSgtn$m@LB_Da9{vS3>GNdeWn*GfuWhycLN1U;G7%9)H(_( z9?T{iFQ9=7LX=@eX;^5WO^*q5I6#!*BFsdHKn)wTv%4n^O$>q&o8kIQ$TByYI3Evn z=}aNpC~6s88AeH&))H&(VNh-QD};22f@ftysn6B=vq-Kt$x#6!DyngWw&AiB0x)ld z5#DbhG0dhlNv?W6$Y%Sb^!RU`=SoO)0ZA|C;nFHm(Y< zNk>x~xonR_B%lE;?af%w8gc1!D2nAWx2YT7^{`=?6w{Ec0e`zTyv^S8m+ur}v%h34 z2we(UA$fR%uT1%L4YIu)4Mn?r5OWfjG5LWN;yRwNEeI%#XY1*D?06xif7%fXDc!`_9FvcNnK~EQ%P~nT z&b`0v%=Ytm7tirRpX>Oc2|x2EI>J}5J!nm#&U>S&Xu&bX+|&Q8&)K)mvPy~l3NN1T zndh*AvmJMGlQOz!%`wPF#S0q68e^p&&Y^wx5cb|I=P!zX9ce{K0|OPIgyXU&McJ;d zCQ@innGcWI;LRn<*|KpQ5p-#rCIByRU^F9~ee#)4q@Vi4PX!46=JgxwFFX;&arWvv zfXGKk!re-4M=j^%wSyM5fQQ>3B_O{tN#!Z>H^bMO8+yS&k_{n7!KfKy&?J@(LD7Eh z6ge*6s04*l&1Wu##sP^Z#1z)<`+`5;xlXP@rs#4rbHNAvQYrI%$&?45;GwA?JpaUNsL^i z94f;oH8gP}T^oKSjZTgN`y%Ie*OAm*c>?~|MQ%qdk;O5BoeMj^L2_Jv=Q;(-#{^!4df=mVwpi{TU-sTzv@D0wp z3bKkYAH+t|))H_e&NmeAYR<-GDm+FS`PH590~ZhkWCN@kSzq) zD;p7DHwo^WHpNVDU{tgcXb0Y;-~QLXl|Jy)`_t)r??#xfzypuGf)P%6uoBC?nI$yZ znGZJ#D?#fyWSn()O=EWn!a2$c9*xW2no0FUayNCfq?s{O0RqipFa`rIQryS!tup@w zcujsKpE`Ov2pZ`~Z@qajbu#~VpE#8UuA_Xj?bj%LWTmuKlpKwODqPG~7%&U;-871| zpabS7=P>q8!4|+cGtPh(5=>_$)<#nBjG??0Cri?i!-w&X)ubCk*DyF_p2++>MikEH z2Fo2Y@2kqNw5yg=MijD#<2ClTr&FVF3lx94ZE?TGMolmNtAHWHP( z&_pYMp{#C@z@N>Z2dc9|o#yIJaIH+x7RrSdG6l9A5R9@KT;xEMwo0%Vfa%&IS*;Nu z!D+E<7gul@mMm~s&?Z9LmJ!scnQO_x1stZa9egdkpr(;LS52fap!&|0flKMTFF!{< z)63*W8$zkEbTYa5*a)Q%MK$@?D!>ZXwr!;|2Yxi2zU%#1I>yqatJm1PM)Xq2hen$y zvE0@|dWYU_@}D-Q+E#QIa(GqZ8qx|^3Gr#Ii*!C5Y*UmNT!k*M~GR40ciL3S6gNj>Va2!G^Va_xgs!m(h3u;$V$H>&8{L&gRR$%g%T$BKBQjUpk2Z0<54$Q?+-XykQv$Ya8D> zUTN3Os}f-(Q;r`auw3u%?v8Zt=~D!s;F87y01u*V=Z9-Uz%uodbirIf(zkae21S<5{-5 zBdqXD8dD4l%PpU)gnL+o;OmXNY*j(g&-q`ho%>Rnu`Sz(>-@k$a7PH2G_ntYE?mcl&dIi|xH^1{u z1fYrZ?nfSsz1@KTbok(@FhY<}nl=bT3HS`_h;gK1E6mT_>NsZ;h2QnIj@nlpE;B!cId&g%4_|MKVm(28*8N{E%M4r2lPb2-zlaE zl#k5A!58seeBqbOr@SF1ls9&pY^mvW&_%hj)XyJWU&2Zi~(eWBc&MuOBhlD0vUVMUeo7T zT?7?=`!0L^;IIGwCp+K#;P=j5`ovcdOcu$8u(EZQj$?ewQBkboU8%YL6!MS|RZ_u_ z2wyl^eqJG-#Ob6?(UZN71#}(ZX?z0f>iVU$w1roGbut}sxu1&U?)zVyBaKLzCI5zc|7bm8ht>1g*+_|#VVFMs_X(%IWru;TW>TZw5Gw!&z| zPn)XA3ZoUke7X><>Vbvq!qC;x*@umGApQM+eGZ|Ev{~0o16N30dmG7IVN@r` z31CZ5YIxW_`76Je?tkR&bnfD->4mTSO?vH1-%T(6?Tcyh0?H#smpZCBNw>GJ-ATnf z$~iRdyhq+fiC%gJE@BlfCI>;1+?bqwtK{QUaq7EkPbd`&n9A?WTq0J*q89ZLXjV@- z*!?FDr$hJMjY4oNop|U>y8F=w)A2{nr29Ykcslw1N7LyKz9;Ry|8%N9us8J+6Vln! zLs{cJsSm}Wf@)#;JkaUFEJ^4ukx_$6mssO95?WNr>=^E~B4?K8v{0)YXX^%ZS_PE2 zxrgZFUDfRA-qc*vNo03Nx^?4L7{!ze8_4Z_@}3i<5o%2nqoeRBA*|3jYb@M056nk& z$M~&DptAFf6BrO{W~DhjJqsAC0Fg!f)DAumI`9w{* zsKIiBRDtQxzua|F`Y6&(1eWAcm5*|A^!{Tf)FDv(Fv0{3cSl3VecGt6Np~4M3Y|0VH z_mv%iG|#1plCxj~#YrJxqVbtlqP(;fq6IcXyJFbNR04$Yi0KSy5LzFaQ(Zk;F2b&Y zQz=Zo6xfU#SZ{+?p0r{012Nj-$vEtp^tBbs>Y=HxwWZzl@1>j~P!l!^7sGt?9%Ki9 zIe5v4;6!Mz1TR(tgs7&@=fPHlq40b~3UO&_uSR;JqM>~)VCAOB0M_hHU_-(;R2r6n zZMg7u*5Dpl>iPyT0>#@fbSw>6fq-q}@Q;f1jCUc$cgtX zu@J&5Z#|#Roqq*)3&A}c0w&fKt1FXS!4m8NQVrIv7Gg7?tDckyL&JmV!sROjNwMP0 znU?ZMoXxdb`8~k3fSb%Wt764?`B#pnUQZeX=XVlJ;S#9^>_{H>=Hezf&Db0gfw(s< z*Bfjo6*t{;xe%yfWoV-2uv$bjHg{bCh>S|12a?g;=HK%ki!zw1rUorr%dCo@mirvO zX#=v_fEd&fiR%GcFvlkwc@v9b6Sy}So@q49c_vdagx&G#dKw!@M?pWVH?CXo+D!x&+fC03Q3m?sY~KCHmwQ-#$;u0Qjs`m|hjG@d%2<@)LW6Yf9;F zOCwg3mi`bZG6(=O!{;4!)U9vXg*#B@0R!To%i%&*`!9!%Ecxoe8NximW7Z~(@(Llw zen4aH8x<(`EnkEXW544Kza4Y@i_iRcyA7|8x4aKQC*IIj*3gb1tDu`T%mK)~e(sO0 z?EKaM5LYpG;VIm1H`45IVI+fRjXh9*_?5MnwVCyqZ=r_}s(l8R^DEvv1|JeiM>Jwv zDvz`a4Jd3n8~*rQKeylZ%b$3`R&2*>?Ys05_c;FGTTTV{`QWU1{*0Tp$4)EGIqMPq z+HTgiyx-^gh3}<%-O5^*g&+CebRD)UTu7lcuHmq8yh7#dPb?(uSXId6*`){>QRR{B z4=N0hEAd#k(6L~eT5Fj@(L+rkX<~rfhQN081kM7Dc=@ffKt4$Sf$?B!d@AxKKXmFy zdiT2?gXU0p;d_;p4XF)}Q#_klvxClFQ&o2wUA>q_$@f^!xv{(T9{No59wMyX0(MP| zb9c8|Q;{j|=7vd4S#6ib9OO_q<{7!&!!V?XVU)0V^o?vl)ii8FVlA}JeR)m{Fl;~UObnu z$KmmZA0x=Sk6a_GP_780alWdG>ER#kPoMdFG0p?z<5_l-fs76 z$L>zj-r$wvyEQz!(u)w*NG-%zEh~K2-AB?#pZ;)K#n4&X(U6+^+AtOZvEj^w6fw3D z)3B6JSMOX%m)^RN-nnRwNtBh_R79i1vS*4=S3Op6sh~|d!kI*gSbi-PANtrO@OWut^X?7+AA;otT+O zy#!Wo0oQs6DES8;e@fD(ACv4Opfm`acA%}ZJsmxI1jR`MAeMC2p{1S5ZzwdA#4CWU zNXrPurVNzFS+-h%*|S|QjDgif&R3H>M9L(LS_>!) zlIGItc7r#EgTFR_@>oUDO&FiotoDY$-bE6B_o#A;^JiR@N4|4tETKW*LeaFy&mtcb zFvj^(0AJq7^m_5LOdffoP$OhQ z!HRIHEmEi49HBiuSoGH^b<47ud&sIw9;)>fxR^*5Z;kQgd?lq`J!G_sVA-K#;<*;l zA{0=>EXk&1u%!y?2;By&RIGxjuX{Xo)Ep<_^l+LTL#W$4ia^hSS4j>xaL{H%@UY6| zMzG)m#lfnufxuT*PEup4SA|8`+S+nu?$82SMM|W-hw(0K)x+Ge4CKif3Uq0Fd>&l* zkwVWTkY%1l&T@T~K@^p2{vuXy>v-#SHUM`SeBjg=77F7g7{}Dcximvi3GwhGMP}YM z$d|@}lLMlq=4Mrx8m#|%JL=jHhBxX-R!>|6c|%LsoJ$1n%+7*mpFw$JUaQcGD}jJ) z60j}i&x{lX-J}I_51SmjT?}rH@tXX%6-=V= z7xXvUK*3=SI@>y-p|x}yS6&FmsgE%+MuHEmn6UtytfPE)svs5AI0xy@K_{`+!w__b zq1SPck(LQ&v@WcSf4VTuqpBvqLhOk_&l?8KicthURf|W*K;$xo9E5T6n_4bYU%B3y zoQmM0g18n_v|qGu50WDNmPT84p=9t~Egpy((o1nZ5V&I*-=%cz)vMtAz}YcQ?{_Ym zn!%iz~L^&rCHn5Nnkp8 zQQ7PYC<;>5z{YB^UbNtruV)@*aPchWUA(o7R>j67`w&6=sH|oAEExwAqD9}Af{JpB zKVkV~Ug9h0Uj@Njs^&fnfro3{dC_M4FWc)ZOnfdgay~<#vq4_|?cu(KU={EWK9BeG zA*`>^sQ36%SQ6K22mH(ycE0l&h*NxJ4>xEc#DxxZfO_nS#Mvc!MXM zL0+z#wD8W*HECR{(Q2MO zg(c2s%NkeU%O!Y3E6O_&a1q$pH(L7ixUO^9)>Q&!oSjOW1Dl}~)aVVSKjjEQpnxz) zY=)b?jAvD29W=d}{2&Y1-kJ5C-<8Azbf%iw8nEZXq=6G|(#fp3rqC;EQvq*W3mWAa z?B|S6z{i+lVLP=bvt=CIdfdtwyK9VpF6a~n6b26ol+!49xT@K@^U7K}UX`Km7d}84 zSLyV6U;bUfJSnieJ$R9-TO-VAF72W6U{e=Actr}Srp3S!<0oK2jydJlvZF_Is8N%Pq!u{)%9r}@A8!Z^h<^PH(G2st&Shlz^&AZs*1 zj_Gkun-@sVa0W@Np1?;$HO|f5)kjkAnf`Qo{}bu;8)wt^Uids#-LZ5WGUgK{P{F?9V@o{61iogb}bz|Og~y`;pxN; z0P|P@*B(Y8c;vDBPQDzvkieA3pmhkRn)bAVu>8KfnF<4d!3ZH?XLl z_B1d(i6ys~-uw8Er+vp;(xvlX2pXK31Y=F{frYt6WF7)^@fK{Ld_=%FykHSXU3&#W zG)q#$I64S)U987ow*bv3O)FP$oqKkJ_5XuE{yhY}we;;P-^0s6S!UA1&5&(J_+1rw z#+QMJ)DVO(%`|WZER6v@VQtU9dLfKrv3x_kpnmr+9+70 zp@@ARJO*9@HR1^&euyLz1}a6ud&ff$ZZqI z84wtJtqU0KuCC^E@7;&eAAR;erdKasXD=q^@=|*7g%^M@9Z$zjokAfPNw;oYBZnyG z8Y?z6O?h(SDK$LnY1djG6-<$b4j$}D|MRc?e0ufO*V5;|@*Ea^_!xrv3h-L@uRPYV ztiuB|I`v|b13wW=PJ$Iw@80fzSO$aw);LJ7Gk`l26!-u(6H+g0y z=#rc%@A1t?Gd}06PQEsiyT8RMQna|{;(L0lzwbghf4FiIoh7Cy*UKm#duuJiHD4|fE=UDCNJNG z+&DC(LXY&!9@n5`0+eo)z}!#%i~48RDVrqxWm95gkaMI_T4O;0#vm+Vp@eb^2w)ow zdSeBFjSbYmCh2T%A~`t`)?2d(@szDY^Vr0q#BKbDJ67oQVAuNXVT=_6?Zd-ctGMnK zsXwyBK!Cb`>dN<|o|b#5;XazqUwJuAj^f&b*>2JCCCc4(9SoO385kiFq;($1Qf8Sk+1{Bp@xh_1bc9B z0Rbn`EP^?VNz2<7n?-llI#%*}gu5!V{Yo){B;L=_Z*(>+(Poz+yV2xr(z8vak$B#JE$9f*00$m6gYqJf1lh(#%a zvU#o^1}zK~Smpxj$(mUK#2}YVN+MD!J{6{j2cZP1L0DqZb>Uk8LZ-Y(dbmZb2(D{A zmS?T=iv)pr=$6yhO`uQ9XzSBtJCfZ?X9}BUDlko#hq=UBgGWIIWBVSej&+I5!1HA3 zkbcU5cX_b7w)J>mN~=(YP&8NZm#r}WkSPswWb(b$kj!wBLn$o5n>33%w?G-^72L5h z&)~d_TY~ivGRRWP#N>=+I4yIT37JB1gxwflu#A-9^$Zx#0I!Vxi@SDXYb_GS zpr);|Cv&GmbQ(s5;#-FzT}R4A8J7Z|^lw@+=RgKfk5@z?a2X{mrE|I@;RO{yqCrKQ2}Sh0USMWHV>Npb#kNr~UC+TB|cC zL&ikK$~&4k9QadV#IA>s%*6W)kA?V<@BN#vV=Rux`xNH=?T^pgdBwWZKxXwsG}&&O z%XlFI&>xzJ2EAAMl1{zeHp7#_Z~L7^5M-eRtL9Z;oTx%*C}+=vp@bmE zga`KF2ab_y@_yP~BaJ2)e=`3l2joi0LKgFr&5@(n*qThkOT;6sVpX;e?5&Q*9{NIC zh?s1Hm^UXztCe#TF+`zIc)06b%l3WnWWLWjBeHwBB(7OU=u#1>!K=Nvo{wbqS8rZt&VcW6-yA$>6h&!ISAQs}o-KMCTn~)^ z!BgOAKF>WB`<@Oc2-=a1Ml(7og-@^nSy8@Y-OSn(H11rf0LCpEDiHYn%sw*4mGYtV z`t;|yw>Gs@fdPjfR=~rukvbAz@LuLYWl93qt0;~6Ij!)28vmh}->kDyr%CQ11hs@@ z7*)A8Qlt*?%@9h|9r}(@Cmn;e+E0cAR#ci~-cA~tOg*=J6iIDmpgUS#&j)Yu3X z${XpM&wVRB|Fsv%0COEnKG~)CIey~t^rye|hj@r;X`iuSi1@;DpG&{<2fvkO!TdKN zKfsUHPWvQknKNaETa((L&_Ze$V?JN$-2+VSesOhaYZC z-TQ%;HnFBH^{J&BPgP%QYDJjW=&?vOykGmJU&F)Noc{fv{5!nSZRw#$-<@U_=__U6 zmoX?6wXSk@bspWHKKyf^NT2$ppFtVgNN>OS5@+5b#w&&Z+&Pb;VvK@wTm@8CnP-)_ z0U@8Ic>7yA8`7RVyYV>JrHdENqiAE$ghp$DjYxA=2lLz$>eqnM@|CZDISt}jF^Po5 zGV~@#1uI~6Whh{k7{27+6_f!Vy|a*pZj;9v7)Tj1m1vFpG>Jq;NCq-6lt!-(r1Re; zM&sH|EZ)=U)h~ZDz4PKLY2ekj((u(Q2=-voIoIyo8Gv6vX|6Adh5JwbXi8iIsZ9!E zro#%(9HA?g#Ept&KniOcifLeWD1Gg#Z>6`+UjYemAi9-2kCX7-TjxE})?#2D9S ziE8$UOI+okE94uCSh1+=Vw6P_jfh6g0TdzPwy-LTd+wd@$Bo4QvIkyzvjF zWMvBAAIv{F)HfXv9|*zJo6_0}y+eep{XoRX$3AtPF~dg+$S>$3$Z>1w!5{BUExjli zB#qf5ZbAc00gZbdIV7zTzk)L2Iay9d!KN-UreRQc{pHuvtKWDlp26ecLF=ixlN7ed zI{gRtrW;q@K}ns$^Uxh-+J)r_eXD6HOWjAQt_P17{zS6`l$()_pl8NrnQimD_w;s! zhZy4x6F@$o6)J({j0<35*UL;H#Uv38pvzL3z?$=^*hD_zHabg$G*;l+R&iZqYP5>+=La5CPu+K3-h9!M+=~mbWN&(8vZi5TXivy@d|Mv#lXq zs?>OB>SQRc*P=8SSV4dW%0`ieQ%I{IGmh4#RTyABR)&p2Gtv7f2EcgaFy+*>ZtOs4 zB%?t&l~l^n>NZV3N|OIPLDCD;S5Zh1GzlX1*+O-4cqyt7W{rBc3YwAH*s!9exaE}V z0Q8G3UFBe2U&fMyHBQ$^J=uB6`oQy&Qgyv-E)Cs)LGqRE@e($79f|bosZ+hMK9kVI+Yf-z7YiPnn(pH+`#zGJZvZYwUg-JzlD*}5(9<5OYihp&a zN!!vg=7zw(W58As_t-M2Q;QIoXNc~yN|;U+sa^wG1Y^k4jxZLzIle5BxQZpoNc{%N zB1X&whu3mB7A2~EY!c9F%FRfj#o?I)8)y#>?CB!iXiK_y^&O^S4Gd&2)WdTu+<@iF zYFnl~6-y{)PVYx29tQi>*sPV%%6H89DljChpqw7$w#Hq|I!ePOFpScdeqEcxc3uFE zR&L&ewpgt2^pxpcqE8+W8?2=zR1JYxbuHos z6C1D@LMy{l>o3`MLu(E6P}bw-rZo__$a^KwI=n2xJM?e}gHVCc?dRH2wbJA`$9dwE>P_x|?BSA5RzNOTbIFz#t&U>y`7RW4$z zKGW|Rdw3S&y#keu*mtgq@3QCkvEK!*+ZK3U%-#0q3W7WRi23Go#~#;k;BT>C6kfjn zolp0Bu5&h3K%yoBG^l{+dP^hHLPjFl$Q(;4@gjkD_Vnt8`5trOJ2gb!q8 zel>?Qb(ii|b}#~^f4FQzZYW`Bb&Vi$SNYA`d<-Lnd5m+u31 zfx@KD?!1^j4*7&+M1GqGZibfz(^pJmhs zSYMw{<4de9R>p<7iGZjGFWa-HH;s-Cr&qr73a}TGYY1-?_Y4fDwLAU7KmDb&3xg3N zW;%EIh4g#>=l_c0HAb%X(NvBzx2AqNb@pRq0!q7#z|_&cFZFfxk~;Ak@q$=R2gjg4 z`2#Wqe7G8qYf;4y-v83KUrBGiy#+=deu&`I&_aeZj9i{o2Bj9kM~mAM#!<4-fN^R% z{fmF|Ke0zk(@%f;pFj&$0S7aqR>vWXjtHRt@-x4gKK5fzr?cl@N}vC$KTCO_H{$09 zZ;!#B5eiMJ(Nvi_cM*fun)Ro-t2T{Zo9a=U4jMNM5^GktQp0Ua1D}Vy1BwHZK{*bv4xUtUG zU>sQ{MfW5INe=9QFRRRWe5lyZjxNwg%54{}M8&xBHO^Z10_Uq9@&f(#5VC5dJhGJj zx@L`}5>`<;bb3#^O)N+nsU}tch!mZw=q-}{2ZaJ2X6&Wqgf+%h6F<6elb8e?qPvSd z0fSnmo3OY+rvrP#Pv2rbaD~FQ!B8-vDRrPA&RFk;BuIi7_6vF#6m11P|5(@0OFy;5{lO2 z;bP{9O5j4mBs|J*aOV{d%7zbO9V4g=4H+AQF0ap$d`lYbYLl|h?iZoJ;+7WLMFc~kC=piW;6K z!C-j;b5IiSTC&k#*ZkUe8krgZZnPFf zeg?@6EDo-f#I`75Ww}4laIFv^y4mLfe z^M<$(o8FVwmAJK4O%`$Mxk21G=FV35j*i-2{)zeYzzZc3+Kh}J(61$o zEvJZG1uF?|KsK#G=5_7j48<~&H3-<%lGYFcuz4icaDZy5Vs2b#nRZxK;G`M+gsYgl zm@DR3MYI;LfK|lm$y+VNVGRMm`_<^Hu_%={jvju8;RTEuJlHx<@z|xpAkJtbO_ht?#ribNAY4!{^gZJeECgpMKDW@7<%^@ALej zZRgC-{M#eh5%}YO*=IlKfn$j0a!(viv|-?Ocp1E&8~odARGMVQHnQ`XJVYxp+lg!Z zzh7rh@l^Jn9c98#Gw+S_8W-|G)>o}>#sl~q4Fcf>;ovF&i71G(-y+R1_^Opoo6lS*(;+z1wCOr zLZYOjY)lG8jDqnhD5;#w(@e>0KO_-7p)6MzLEOVJjkF&MI#)TrLI2EM%q3&wL)x_N z2AR)VNdHV=;4IJ+Y_;@@wt3_{fnxR5WQ9Qi*HRiD0dI4E)AdLd8tt=kEKK&pSp<*H z)}2n#LHq2OcT)G-xJs`)*59%I+X|l4w8orImHsWpk!z`HV_&M-Is(rD0%;~T{+U&X zBQGt$ck}SR1-ysL@Fk8ylmV>$uwi77pkVisq$f==vNVa;cOFAe^pCw?UPWvcR@k}K z0dW1CZ8=H-FP=+Zd+D3$O{~b16kQs|OMUJ76$(-*-~X!^p}{w)2)^Pfu_9p&lqC+>y6T9p&QdUzaR z6&NmK(?h+65k{Cv9YL&1i?dvlCtFnmfu?f-@yQW@uJRWN77z-HWR@AbzLa zOBOI9YGB#KKvm>C2W=XjVxc+Df8%-9uLO@7^oZy5z6Ty5U3D*&BCjK(FQ@arj5A)r!dcb;F-uS1;U0>kH6xn=&BnOdvKur4 z_<3vRlymMa&#>1~yz&z$Fax7${MvB3@ba7K)vv#h&VKuabnbhvq{+b{$~F(C4FuF2 zDaY5#Mv-e0o}tdP|KJ0h$!)2ty9+}EnFdvGp>qq$Zp^`Z2>_dg0hqKq}^Ir~U~y@ZiZShv{y z$oA&Gbr)!iVy+9w5mwT4uWN?LkC6otBf^oBM^a~32W?^8Krst1yXOG&U*J4HecwsE zJrs;XInfwk4TT(YWZNqsE^;6%lq&ouU(+ZlG|h}kM<3n`;$WIN%Lo5l1*y_~rZOEo z)qtEq%q1~@^Q0OrXHN94=4XuwK>22aRsvslzgKtD7wTt~R|1_U#&ex}rf(A;0N%f> z4&E3_FV~TJeG{d=9GNj>4TdiaYK3}E(-_g)dmAy(Ap6qx;v8~{f!@+t@Hk~P_$llx z_snnZdbX;hn^a!MIH>3Zn0x3tBT`sHJvqi3aADQf0I=Gq#hpih63`b5K$Sq;21IJ* z&_Y^@=b#MBd<#S;OI_hmv0zSfE%#T}raE#**U`Ave*+aODJHF@Dv^7i%a~%}Kqn!3 z(nwNli{hw!&oz{%HSOe{&J*eW6HldfEaG#;n@pD_%-x#< zlUG4>c?9Nd)#07U^1)jfv!N9a3Gu=eq!DF`c`TV_m}K@DgoyP{;hO*{t8coH1{>oh ziOp}dNONvm=GSa52CasovDJ**Y6ds!q0~$L^@YW06d^1SBTH%Y7MA`=2ok22Czz(B z3JdzarqsO$!4d2ERt`%I7KIHI3JA{!*i)K*Lf~XLoupOBrxS#Vqf}?`~Ie1}x zY{i|z$_L(ztA+wTk@GzBJ zhnERK%nc|5X(8uxePnkag8;%-4yCwzcNewe@#H{b8+eS&T4ZXlHO9KaCf?Noa|Pye z^uV!n-^u&a@%<-KPg^e*jx2yuK@*8G6DnBua_A_8W#ke*g;Dc8z48@;u0n|ac#fER z_p0)S%q_0*H+wp}L*XDk-u|EWxaZxqKhz#$m!Q1|4k6)l9Wwzv#nt|MgP(6726C}Xf%S{*8UD_MD;TZ! z@Z)uS8}0gk$L}W|9=7TF>3)vPEp$ua$&UKo>q1b2?suLY3I>-rOs6A+Os?~}ewW>= zHP}ATxIbJM5A&G{!CdA?3X{I#zUWW(J*`Jii>#R~#yZ#z)MZ1isfwW1F9Y?L3-CM( z+OZ$p-&(unM{7XwtQy)(;9o5P@`n!|Ahj&PaS*jxyUbjs5NDhZL>U5a4Z_L8?>Ruc z5kZeUs|ep|ZmdkN-To>T>d{ny?`f>5fOl1sDzv7unOGl^AC$L*GCGe(ps+%wGMHme z!x0n%u+sE88!_|B8oLI@G)X7s%U1BuF5ytdf~KOEt#kHUL4Zj>$mUv%O&U3(s3An> zKdto6r}Sd`K1;fZ{lPnVy{oR}-k@xGToaz__IBdGs40@i*d&ZV5?#T=u!KR!9R0PV z(1gmiDIV)7W71Hf!d1kL>*{2ERdM7Uols#AzT~sBb)_$~5Ol;P$FFPg^V!gh29ppRIF^7}Bh)ib ze7?loXqo{EjpBr71>RXp`t8zkj9>X-1nRAHvg0&uf`d=DBet&$q4OFUZf<~I9-q7e zF9nVYQox||kNw0)fp2!FnfXbsxsiJI)}_5iu#OV9_D_HH*VD(Id@5ae=VH2e`Ri$L z{!04uFZ@-yL!qU5j9Lp=sP(?@IoOpB9O?yktzcE4^6WbGa^`?P+=kBrLGJCq0N9mo z58dR5*yOyUGVwG9IH4BMh;TPOk?yN9yr7i?pl@NgoEw48iN7nuLhYQJm2Qecl$!+R z3iWVde(!gGFFkSM;dHpOKYi!?-vP&6BFoLS^xQZ8K0Wuv@1)^tOJPXVd*`!!NtdfA z)TSUU6!T>AL182gaB^sYtTq-y6;gw8fEC=}Vf)uljEQNs=P5>MoeqEtvCqTnMVr!l z@4oIdJ~OcmEhR>yiS_4MOThIOfgS0Uufck6eA!B514GCqp@_AM6kL~(aSC9@l}l7m zEQqGWXNDVjl&TG}b0&q9;dfz6uo zbmGoLx_CZ%mGKs&hl1{1Q30lUJ2 z9~{Dl7_s9#c(|W>JMHP_;2n@9tTE?*83+#xoQ2VZ`aZH9nS#%SEck&pvEBzC*iG8l z8lY-pAY>fpN;>oeH8A!FA*RsVAfCi;4yX2>W>5v3jeNF(e5oAu5Lu=QMe0fI_AF=3 z@zR>5Ef4HVnQ3ZlnzJ2`)9zguhuOgxQ5#z;QWNr+UWzr*8+iXrezOiDW`-*7t=+YO z;90=9sey z?lu6(^+?NWNbc)Mm-@IG$pIy$XC^pInMhnp)B>`q+2N@a&ce=ou>ND|;g(;5GTZTm zbpyiR11H^0I+Q-7|9-e1d7PFAIw%nRT5*KW4kCr^B3fh@r_4csip_BF8e9Rp)6w3? zXq#H~>+f)qP13(k68%z-yA?^P?OwiXqCWH$B8#CCxScB`pu7~#MQ*GwJL8O2ZUPLu zu)uc!$>@6peXj)YsGFjQn5d@XR&d4F0dYl28l=Z9F;4_POy1r|16Mf_i;NlbZ5uhe z8Ysmp1=lp<&#mV~M9N!J>gR-^1DKPEy0h#aC$UZar0GA@j`qNMV$v~%OsQq4W1_!(XMDk7DgGqn#31&HO*gs}uzPM~ES5w`BSER0=0Xi4fs%6<5u2QV4U zroa92U!)sjl+OdZqVybTSXehzdjXV!Lz^grREcIe3s7(q>0Cc~RSCGOI{d?&vYJ&C zBv!$xZ9(}4Dk%BXU=9&4VVhYUZL|V*f`@M-T%iQDzla$@DMMhf!00Np&^pXjHEnQ0 zu0zO4^COy7(LhjbYAFV>fjMLimyHrwJ!Mqo@CE#(wqx2ArH^u>o-oO5@;*R}tg%sV z9X*}Qg^Ny?sPv9!-5c{gl6~V;_O% zgMO4Cm29OYNllGLTu4Yio{SK&oJ!1-l81t0NV`18lhYG2tI*_ougG2@vACf0N%Mh3 z?Du>U>lo6kpT*_b|JiLc9v|=xFP>>T5jD$ND)sY3ezUPc8fRy6YnaCQ#(8rJxHNk) z@qU&)D|>cGSkXG~9hW!out+Bq*D@Y{duoTN+m@m;PHi^E?05FX_dk%H@hR>&P%IYMdSKJf?c@7X0JKLFbnKCw}RgOCs= zDRpYzI(_RVDYxvmct=xYC^=dpwC5n0J2KOlhy1X^TiHb{YZ6;Nh0NSoQxIHJ*l56K)mSZyt)=+6Z#=~dQl6?r8 zx6}N{!<{3EMm$&$5VXbg!MEMJe8;-PeEWo;0q5G|+;x%0xP%|uuVf-D(OF^|IxJr; z;2;{&%tyevEW>lWFVe5kZW(8tSS;NU?m^wmA(?`Wk=F#{u;Faccqkf)w8Ll8Mg)#< z-%ddb`{rrw?~G=g2WWsf$d}?Ct$7&tohD{p5101MJ6EDQ;~H**LDEv)q^j27wW%~ZG>bWWD;>GJ7jrh4U5?Kc%k+YG zSj5t_I9p8DuU$_+`I8?{2k&mDg4RU3b7dMYj+QS>I7o7yi8(BeJ$-waJ6r&)MK%22 za@8U6(&)skVUp4pI6HX&Yo13Q_{1kZforQJy?*U`>Fni~LW3T>HIW7`Q>b799)_l; zMJ(2ibyR~3E1xupWvi=?{GFt4*{M+JIU+u|x=!~u&=&fzj`ePL?_MH&NfC(kh~o<> zKasAD#Q-dW83 zwI-SmS%E$qxRRYA8UMAgQ18)tf;dyU^~!V_zde!$u3V#9+Kn_mIFLpMZjfeYCbXh9 zf=ZiO>xsVaCQVQ`RyJ~mVrgC_ANw@8nCU6Z+N6aV=Nz5fL{8&uZaUDLw!jCMle?Un zn`E0oli`fRdzacQ^IF^!V2xpwf{sKq$| zm!(AZuy8Y^02&9hRLEog&k@ZCxKt1s>@WsajeuqDPr;Wa=2zVrnY~1`@;Cw=zDN8j zntkdMk!hIJ2PR)j3&l|;goA{ERwIm=kFyl)5_3uDzxW6>V*u)?h+;TQ%qqjEMJ2Xo zy4S8kcq{xUbjV0oDRi(5KFFwJgANw)nHg2*M7MP|rz3YU9|&{JEtb_=A`w1dJ1RN| zlGk6umZF-BOO<6rb;4+veg1X8h(F1^)TB2r zTqKHTmV*_O4nlm99DOFbryDec07}rrLk~WR|Gfjy8VPL)@PJRy2L&M*148jKl_ku3 zBLLO$yUq%!;evpeDxpeC&zZ6mvnXIHh>IO~{k2zNFql4YVPG9c z5kz?BfKhOYdDdmbTCc&Bmhf5DvMF>ol%Yh`YA(d==FiJ-yqa!c7AnVeM!Yw!aTMnBw|2nFQJtRR{y{H8Q2DBtz; z;1wq$16M%uwMFFZ8j7D*bD5k)2`Y(<6dR%cY;lsHiAgrAPIvlZzXTbs5%FVbKKF-0 zmTm%-CC#XoRBa-PO<`&U^Hz>EuK-wiS0(PEnDI6?%6dPI#fd`-Qfu(7W0T21Gg1zo#L z3fVEaa%+5|pqU9)zW0?0NYOfT#HT)k-~Py+LLj3}g;V#LK@wTpacQ&>?bt(v$S}S0 zf%C3VLJ#6^g)Q$>xK^l_;VQ(JD5Q8DzjvN%dzmSByWd{tsJ`FnY+bxgq0x2rtH{$D z&-0t@7tdA5-+5)@g<;8*yvy$yUj%za-mR$>zGJo6G z%$-4tT&Hs7TuGnuy0{njo54}$pj?dHyqK*(WG_OelamWnHX4bDI?G?0Z@C28RGIRS zZSN-SR3}<69V^F#EZ-ZS#vR%AWIEFM5PPdGOsPvV9N6ryX7HmL7qF%wRInf6GfNxe zBt0JsQ(0H*;W!gw78tjCA`~x|Bu(Sgp`M*UtT@AZ{Ns^RaYKw_GUyf?tQS2OrAD;b7S-u4nLm*fj6u-M)0u6w9a6 zM%{}{B1XDR+$+nc5wzit+~e3HZzBVB-&hhkXe@{}qc??D_niCDlulLkm=Duvn#f;H z17mNefr+8?&h4A&^5D(%<|XoTq9Fd!fB6Hz-2{>~;fjUdncZT6bTv0GEv9}<^FQ&k z?@4=)*Qcr3fpq)&FtlWu_PX@Z5B_p`*U6_-PwN3vrQoI?y-KAxjEI{fxG$E{L+?43 zj-T3x5=iP6+{Q}?D3)RECVICLfb+%kSHZR-jFG2u@EzPo@J^-xSz#r8EwVl1Z!2_4(?;Ic zDxh**T_af_FbHYvvTX3;9P$8W{0}_wVNSE!boKmTy7u<1FasL|W~q5y0&>-vsaZ-U z4-sV0m45n}pCw@K9OEVen!J+v5ugh2{yqJDWCby)y!$^p(<;E$3e#BgjK_t~;Gvw1 z{+BTw8`LAuc8`eH<)EPu5qVE36tJoPn*lB1e$J19@c~!sST$H>N;onTN`RPT-5DB{ zB4=qlfth7<_Z~rU0k<-A(Oj0EMa((Z$}{OSiX8GIyrrg&=;gIxvRvFmE*QscNXjo1 z?)rv(C~bR@sd~7FifN2vjQpVsM9|KQ$H5TMM8tRBak8%T;Ob~6r6Wp|X^SRE-=uIa zFVN7Uaqg3!{5Ux4iZn7l%=(w7!3!hMC;@XDsHk$dv;y#rBg4P(5yoEW|AmDX#xW{(wX@`H#q&dv@yG}Kw zyWib_v4goMh-KjQ74`xZYB`UZ4aghqN3L3Y#J%PffziF@J{V$lJV!T25pg+XLRN^ zf<3|!8qG4;m&L*j=+HE~}k(t(I z{uY%^ve0sUVp^X)QauPgbi>-S?bFT_fXr{`;a))+mR!{`2OSev#sW#OjvI?Y0VnDL zc$p%W{x!h;8wi_5wjv+nzwAx--}@loH>@KySY@!bZ=y`ev{9PzY0?iAh9X*~fHbXW zrb;rhRsr0@$%CmtyOva5f_awW?O0NqV*{@d(N$5p7c0todg0}lu&xY3JUp1FXe}EJ zM3Ct36Y$*xknq5%qv`mOlW5O3(#?U(@vJqXhx7?=koEyrHU|(p1OkvrdH74i9`Fi^ zR_m%jaBXWnnC?6NvDDUaiZoCsQ+L;?)D9zWY#`x3MA_6#GF$LHQy4)JUzf4+yEu?Sf+40#iJq`v> zxL))cCa<=vK(4RT$YYcYHib%DAZJEA9zM)J@xwMd?_)XUS^fh|r)=BMx^BUvEgQby?k9Kck% zUyU`S5uvBO6Hg37nPYFs)DGTrciMO4AfK_;SYF3QMz8>Yuj4?7Xg%>awD10D0z`f6 z$T6bfdcwOF^uzo(kL7wj7=IOlkda}w=QvFRpkkx&q#zRltfcIrPrGXT-X8)v^DF&j z-h)gq;>x&}X^EG6Tan+jusyF=ptng@Blvpc7j>U)3!d@0SMW@)%ko8T-^+DA+dY}R zvOd_pAHiyacETIM0~Mk|0OW6$ zVMu=Gpd7z1x*G>wjL~oTj!)ycS<|+Zy^kBV|Cc=_d*eZAd!fX{N8Cw2{Fez{yqC9Z z_a)kmHd&vzj!y8MV^R43f7p8OAj#A7zVDssneOSHJUuz=Y)-d(`4)!*4ml79fFi*p zPy|$j#9)_Hl2tZKmdobJa!D>rf0$+c<8oOl%N7Ar21p1bK@da+?r?{_z1wh`!|YDZ zIrSueKF@FF09Lv0W~Zn77vA3+p7#mg=ljr1WjbpUB^O0En!JBscKR@s{4>twd=<)Q zwdZ@0c-eOt#l-ik8GB1#hXHJY4x92i8;4i|@dI6l@WxZ^aeiSYJ-YiSE#X0`!jrhP z#{32Btvs~zPM(LApbq*kymM`E6{A0qr1u0B>pmLT&$lkWk-qZ({1x&Y2LrMR;0n$H zHe0Gc(i89i0#>b>28ezi89xhF2;M@RNS2$o-C`octIad&9kU9UI3}?Vdia#(5xsMa zvMzJ*V!lKKCOUyW<-cs0_x^{MKXo;weJnsIG#(Q#*0UPCf|#+fuT?FgHJ* zzVqhm>CWU>8ihVgZ>Wl8D^YNb@d6#N!*Me(BaF_Dv6jJHxj}Fh$8a0vN)Mh2dDb95 z-TynrO(jlGq91c-&iDS#Q9s9T&uLlP4l2u43KT6M8c>}l3bWNo$#INK-e*6I{5pXn2ku6|>^HB!ncjT;a$rPC@Q4o%4EW@%V=zKT zpzV3@(|GE0(574mi^&}1vE)T=FSn;J{GGqcIN)U2nMk7)vXIu!Qng(@J?X-GUI4XF z0Pqjf2>Xb8rSfQ-{iBv@OS2=RffYel;u%TsBN|YEapIr;tN$A8QEU48Uw#8RpSMw} zm>(vbG1nSUnA`gLQklJ^`v?pS-6Sh0>wCIDBDA60N^eBO8U<4VdonmMs$~%Y$Fs5r zUMMr1&C)c9;{L5+_5rx4!M1LL)dKyKo_p_U_^YAWL+5G)SMs-h@t4xaKJ?M_pZ@Sa zr8_t72S$6HeRP-t2Ak*%Q_y{l-i2e6s7Lp46=Iy2DB$Ba(M6U@;XxL2;<`}|%7)?` zcRd>ws$eJ)0+08^hQPx)@7NUn+<=ibu8jdaf}TIP5jEObLN^OkXMYdo8N{zJbS z>uP3c9=)TLDxOp$gBQC9Hoa<>XTn3iIK5892!$TqcMHR(G=N@lipbOHE-*?YK!E95 z#d|J2n$GMa{e2kn*P{)h1LpA3n@nhw%n0@8fBhx&lZkZa!CeNrHVOva8M#TspMFA} zk!3|>Ku(l_iJ{D?74X3t0m|uEv-FXUg=NOOeXu2p5maILzUOa|RowwznoR7)shafQ z3J8|k7O1s)QlVQ+J^cb!dWpv=k^Gs1lT^I>o!6!qgG7-a3PrT#!MbArj9bl-wkS;) zt5%9oy#B+B>Gp$L=ws-0Mnc>IHTd~Yw1V_YIxi!U0m$ay-xIC9)%CCq@9ik4_1Vjw&oKSTa|hcG4p>Y5|zSm~D`C2mjGx4w^JJO%g?OP9eVB0~9HgxC5= z!YB=R#x9mNcs8sdEGjmoQUjYADb!4N1_f<%w>>RF1GIpZR>xvcpl?$`SdgGz;&l|g z8m=KV3GS5$Eh;Yq>Vm;+xVsK0i4~x7HOpAA%-9lMTa>+87QzNX$I2i(61y8BW(e4y zh%(es`?`(u;Gv=<=KA^+*T!3er(Foo#wy7aSg>Z&8sV^n+@6_uSeSY$86yV9Y-ou7 z5wLrdI2L@?w1)Q`J=4$_&8F{f;lU@Awn)g2{?oJD($WA=RRb--n}ZiYD3=w$di%T6 zqlx=zY~o>B+aioN1*tv}k$N)OT(D-k)HBKFrZjGtz>GQmYbGyNH6b&Bd=2$y(i3N> z8DB?MihB+9@h<_vnvCec7I3OM!e*_BQh|Vg z>!3f<;3^QU*}#aB&Y``oFN_iEnlC}VA6_pMxNQb{V*?l!v>^qQbkpipUiHXGrfx{j zd%Nu9(X3`nG|o)B*E_p0%dtdr>RYm`t$N^C1XEIFoT?{wfP`jx^|KNLfA17(g)??B;7^ud9H#s6f|bwB1UKclJP#&pu8yz zq+Mu2+3}gCsa|8q?!=k1@ai6RL8Ys2yqU)CJp_aaXqUNZm9G`J$_^hHKzITVV*Z(Z z`1INH@X*2&%5(3j#BoyeF@6z8dVR$(-#AgYmxcqA(>h4D9_MXB zNmfvDeea>VWsk+?!sprDxfbI>ck(=Xc~xkfpWa`w#&Mi$t78GtfM_ob*GSvJ==E>! zs`W7;S8VG^_$T+ad-aCJhgnuE$ky-2cN&wKR!VAMLe^|`iUMv&v9-tO40V1 zSFyRcDP+&6^Cjy+<}KkW4NcGYGbM_Bp^?NN%wyyWZMT4g=SxlF3ze02^BcLw7d$6t zalC!yGiTm&Mi{}aH;h>dWr}hp>u={z0mZ1frOY+)Dy}Z#iPYJHr<8&oZfG67%@C&p ziU-(T3t0ENe=>qMX+9l(e2{2hBdu7%Ben?)0r`XyQA0$b0SMqOP>i~&qbTb*X4t&H z_MiVUoji7iC;*}H2NUV!z&Yf82cBTE8kvvlc-jk;5N@bPcF*2R_hv4o!S;8dpxPrb zLj3DyhcUs1wgIh&g$|sP8o>6S<7O$*9Gw!Q%u+YX=$u-CxDk68$ORz!KiHjlw?k7JbS%-@86d=C5G)*7W@2Ur1f8 z=Xee{o2y<=r+O(HOwpTJI3bNR4^4eLefN8BFvnWbYp+~L?|EQr*02J z4>!pCR6(stW5XjTY3{ek&9Sxg-LL-{`yxgM-tGo~&kb;rI!-N63oSq|GeEi8L%k_| z>en7m*G?<~?a8O#`|W?0zVau3ib3-O6lQoW2~Qi1&k;qLfr4mk9KbkAul=>9GfX%x zWq|+nzx$V|y}cp*;UE6T)YEx1edI$wmu^vY>=6o`&Y*RYYgS1JTgBmIl2veL5^Z&C z!ncMuI@TNG;~uU|H0eND5bKn2(MwxJa$t-%L+*V=AX(iftsdE#hVR1T3PZ@1?+&Md$Iqo|h~ka_;Pc*0B{>@*9<)WWR$W#>VsezI*CkzS*(%xEqYQ;Dc`cuKl zxP*+E1+us^sgrXY-)0rYUy&$5YdZJtWAy%By7T(I^yckX(#0F!#b5*W#5$`7f+Ly! zHVzJ7{dm&nG2YpN(<2x^gg?q%I@o(8Robi4FcAhLTSkF&1m{2*$iZ9%Duym)k+=sa zUL0p-X#t4%XzDr9!8${OuUq7hM*5gFxmHXc`oQz)gCF<+bGRk_hj0IHpko+sA=kj{ zw&|lhhO~(UET?q!CpXi{lgBYcHq!YEr_#b*tAeTnt)vU@e-eFbhD`N`sq0vGFvnQ+ z)S^m4wUudmOw1i*qkQRA?g{kC4!Vwpphny}&d@CqH*+8P!kL%FGj5Y@TjAoZNcSAv zN(BV1+oh0}9lZ92I(Jmiwh?$+bUs478qB}=i&Ye5l}WjZs&?Uw;)Z&OugrWW-B|iM zB**s|0F)(!@!_UVrk>o>X>#*^x;gtL>d8;CAqgF!abf720*|*G4@w>gjMAvKnyO*I zN0^5g0AlJ<4B3d}+uXuyzPm|?lFV5%w>E^lpjT%_2^eS;BrzUlWvoJ=Y9$9f^bB1H zO3)MFunF;D_|y^Vs*~KeEFFQ$B(NAlY`cWpun2uqk_TyShQ2p)0u zhViT)y#XXiWzG#Q!?wGyRYp-3gs&iwIE{(%2!jU?9h*>=X{<%aK+^|8bu96_!y?*2 zu+XsZLaWd|*9LCWn@`n%@@u@|y{iRknXSi3%PSRUmU*7@rh{`c25|=N- zg*G`6i$o)6)qZD)PM@YEChh;xcOinFVliT<~E);k%=g$Y$<5NrB)Q{=;~y?>CFc| zM2Pb7)2CBAike=2J^Yp-w7j(8a=oH%gnG*N)k|kV2<}jR`PA=KEHg!draKtCoGZ>B zj(+DA3OiSbLGxVRg+j!!S?Op$ai+Kl_`}~^KxcrzLm}a_P{iY0jg0J`9_sNw`x6)A zALpFkXD`~y9}%_ly3vZPMZV393MK^?R<-q&!E5}^A7%sd#;3GB$`Tb9=R`ca56oQJ zzhXT9Pyh0K|H@wPeC}f650%CP*pf$tAwxSn&im1-xU62|IL?RX6{;u)cin34XaSswFrd8zbu!rB*|>hEnWLKL_9Ez)m`P9PZ;5!tA7vis={IEIrrh$AO^~ z_4cl&ekdpkCDvl)5YbQIf%*3IM}PdKGzo{~_{d~pl)#u7Y1UiWG&$t&!^SxhroXD zJ`Z1m>+;R?>Q7!v_a6?iPp{Bl$V;A6=!eGf7M^hx@e0xNDst}Xnhyg%c`y%GsifcI{E^zg+A1thSXU=epU|1y7JkbSK2Ww1)vjPO7oc{JN z|7!XG5%}%OP#RyqOgrZB{GyEZvA42Eeedce@(MfAC5+TAVyKK$4wh&xxcc1lF_&Mox1#a~7RLDkImk zRYa}8wK~t-V4rDVkB#+)qE$ zlieH@wG=0cUgo&+&H3RRb*jnPuAU+sSh=!;*Z9z5&_6NfXNPnxW^zi;^)3`j)6?X$ z2p6;RAjW$Y^Gnti$LjRizI5yDi|O^ZuH&GD+KH%JH4w3dQDDvVZv`cMgT30htutD# z(t`PLKxcP0DRx5Pq=|9}OHCTK-{>YoBFUZUzN1zng^H9!eli2l^TORL;k zxWAodp`lV`JTUF1y6LQ_co6_GF_IywS%rWy5doRp!$lyySb zS4WQ>PGh6v=*nbJqgUwYb{^zD{97kpPPKDB_Z;|&dw`2oe*cuWdR7y?k-NWPbaP95% zaOul@K8FH{b&56YWYH3K^6U_HTxOzJz7WJ31I$CBh}EJXmCGpqY*aaEW*DqB$lm9d zC__u4GK@{347=$YR@3{E30xIRr*LMqYYYg-aX+mqvH22q5f|UqR)>cS1(I-H5rw@D z{FmN(VIo~!c+a2!ssrn<*|&+Yp*PD-LGCg+Bvl|K0B$aWTPxFtjU??b5t>M7&9itn z@b0g|<0RMB!h(<>HxD-(AS~RkKJe!#{do0N2G&^sOJF7CDOKTk6H6TI0ycmYRZeiV3)nJKtqF$yGo)-y+Qcm};(wneKP0nT`-JXPSyGUU_> zB~EuA+-HQK3nXqA>j%^11QZj5jq^lY8YptGOJb6$tDISSDK;_IHVCE0=F2>ks!3&O z7b*j*pXm`1XB=K*JR1#AoRrJtrmz8T?#yNi0E^*|zp(brlrk*9YGnZJgTECI znlPSFs^}*zWR-~lgA!ElTMvK zK~nYrI7bUm)QL~_Yi(4BM8CPKGqGmSED<6jSMN+vm7?4dG9g23AopjOm(*$sD7|SaPhIr0YN))&o4dijpt?4Q0^J740I53c7yD#>2gm!I1P=VPYW`Zr=L z)~;9Z8rji~ukP-+wzuO?O+ba7FVBzb*)ZA~VNIrdz#x!muI<>oRJ80z4s%?>OyanM zHpPYGihuJT=Tr29XV5-hwk}@#CrAHqZ#Vty+5Cb{(J%P_z;N(pe(y`gFZv!uGTO(k z>io{eLWSS{cN2}1d^AqyGq1^K!dQ&>__8lGhQpBJJ2&Wq&traiV@~r~>`oQFcorVV zf+lX{Mo?YTdX*=JD9JiClun%P3ktTDGGj$f)ATr-)CNhi9gXSG2^<(i|N4-dz1>74 zp;s%^o<;d-qtxY%^o_6m6`^m43ApF){@vdpDsT$9si6U`7Fi?dLamTC^1Vvr2CZz5 z5mj5@nDx}#avTM`*{9_({6Su0_-l;Ymu_%IzBC4uyO!2t(U|hV8bIxwpcoI%&xW0rjZ@b54$C2ScEsz_|F>%)2UBIPS)A+KY zxjJUlOKa_<$mDH-@I<}??Xz7jl|Q z-c6^TW@jyLq`sE(XuuLpY_m5uqKvJkKmUs_;`~@bUpU7o9!IW|2RJv1vk1;(@(Diu z3!h-zr{K=arw_jGHOo*rX$B8KR9|U9XfUx%A7;YH^}M-p~@RPnx1^( z0tz~nrWS|N@aik@pw2}^u9nD|gdU3s6 zqlcgBP5+pnbu;KR3}-diO}b!m>oTs+9_1I6?>9`_Hw z=NHn+6Q`l-n@o3qcpdyV+@;|4H)ht!9pnBy^BPcBd1Tvw(k+6^mgu7ecuzh1uhe(u zDk)A{A%5535)$P4mflW-F$dB9GVrDqA`zw50yz$y%wuu=&{eUg2(77W&8Jfr1|ku) z)I}j3ATTYRMDXxdJIU6_lY9Gz@LjF~dnI_jK8x{&3bP4RNocYWa0Ex}VRgWPKk>;= zr$74QAA&n4-wq-asdILSaOnJUPPy)~Ig4CHi8MOzI&ki49EmYEpKzD_ibGGf0pTI? zKTEWHmgnbKskhAc(XcX#&;V-0b>W&biCM$y*7#fL(v7R>?wb#Y2H`Mc(yZe3-{5DL z$mKfsQ}?qEt1*B=CUA)y5ImidJ>5rGTganb#sL(>(hSBGO1KeW$aT}-|ApU8?|S+@ z=_i+7L00H6UWbNiBpt?^{&09K&9I)OUQ@1^_an{Zp2dRv&Du17)4U5|(1iZe5;+>m z?8s?TxGJv@%R3rpO?&By4|Smv(q6`UW`gMJ%r51Uhp9eDVWl;kkgT<~a%(`|ba2^6 zTR5_n%M-UCPQV~qWd3yy0&^lVi$6Ebk1nR?Kk!8Afig>&+a%G7`u)nH-ri`5`h>D0 z^itNhv{A=S9j8=wO&Y#EhNi`yaj;*L22j(`iWS7-t;RbeMvSpcja6N#p1&n8ucwUQ zY7V$Zx`l;#HZ|9t3LdByszl7B(X;@5FJF&ht5OI*+76R=i)esdQUe4;3#qt^bxOjj zp6;3TD+sCC@RG_Ul>=*4#H(Jzq*%-`(V4tvIq$4C5~_h@me|NHIRhmUZ91`x3i&}c z!C?jl(sl-M9r#EU4^ku)+sq~-=h?`}3LSy*!h1IiMwW;I-f;%67O!jC!;25cnTo#b zJ~@&<+S-~~lz0!>&=l0x(6)-j+{`LrYubqwSY$IWF}9|5F>PL^Le&cS3-fd-*KTZO zf;XZp2K;7!!Qdg_g7yRi&afanAR%so6@VfVo_#iR2cw9A(M>JsRtzIydehUnNYF!_ zhgX=5lQ3?PN$lWlRg<*Lf8yqa-ELXsg$XDFk7VB4@T6|?Y%L_ubPVD#;#rCq0V*Zi zZ1PQn@AVE~@1bO(;?Q=5go!?TDDF#aO2M(o%5NhK6zxhL<(<%C16;Pw$qnYJMxkp#1!0Qv%!YSe>}>IA9>`R~w#F;UKH)!=fqINO zv6BhMrj=~^1~5E`Ov!X$$bTl_U({J&B9ejv!2}FtWs7??bBuGa1xALz@3TtRsM0sk z-yR9X)&cMB>P!6{os2asOvET7$(QGHHJJoyMGY27QukU z_bNCl5zIu^XC|~WMaLByGK+sve&R0b(j3W;-#T;k{%|JO&#uECyjY(o8RlkG@f$Hn zztNGjZwDzPEWr@J=lGr8&ro1_uL5?U)Pxb^b(vdCAifc=j!E`H#o?GQ4ZQ@FYH6^oMdj z`iI}j2nHyw89b|=#8IK#)Bh@`ikdgeHtb442xud%_QQ~j@+ee zCv#3H&l(}`%dc)RpRcDEKYf@26y51P@2*8z97-3zIhFc0tm0^*B~`UhsMZtMWR}C>6WFR@YjUNCjArC1G|9T4zT%tLPR>b4RWba6zOsN(u483TvD-XP%nJ?=T0W zuU+5t9aD;xtn-AvZJbcBcIS;Ov!u`XiF zTtmXFkXa~THT)Ei1KT*hT;sJ6M^x=CFxnV^%KT8!5aO&C+v?_TS##O^8av6C+M7*R zhwi|u`G~z_2IB>11YT6x+*&)@Lz$XK!Km=exzZqtEqO@f0DS0qI6nuDX3znQ7!%4@ zaWDz5t*)+5j}2ZxnH@@fCvRh9Tp^PEwN$_L>vS${VZSZ$TtzE;68(Sa{lANo@LYN@ zKAJ{`UrDdL_UBPzSnlZE>WTFB&D-fm-~8X%W8jo*>cS)1%RRFEr7cM1HMD}L0fj4v zv+X*3ku&TAjqrpPib2HyJtfkgZw^22z0G9qvlr1g+-&K)c zAhEK$znuQX|N7siYuB%)Fa3wFQ6cUO40Rt&*GWiT2bwA5YZs+-8-+Os0fo-8(V1Im z;o&M}cWL*;@F-$$6wu#6AI>6=#iLck@ z)4BKeGQP0jU?^+>rRr$u0FrCYBJu`od^0+Z%DS-0*rP!am`FSgpQOghrU}712a(-I z!z#4b;z>LzYmx6lu3;Q=Of?E|5R4#)0^-D)%cC>SQtj;IV}t43b4Syo+i%3UMB*{0 zOL$V#O%!m7NXi`ZoC&XjJcq-exw}8rbufv60ji*Mk5ZmC3L2r zT)76k%KTM!cAmwGr?MuwrYb}!Dk#(zE2)s*;M&UpyJ`SFRpe)b=v6>r8UNTOktxD4 z9k;E`cJ>IO2IN0(E+emy@%eVHOM4fYW7~7+!XxCPqF2`7FyAENv56w4Q&1gd;KcEC z3RFNdOhQ{2fW1dL()?rOe^5Dgi~HSua4Q@#%6(r(^4x756E~+F;PZ7{w`-s$mAhNg zL-I=9tK~hd!Rf3+Q5_-o*?;!<%l+Lj^N@hiN(Gr7HiJ&!FzqE7eNaZKTBuBbRa-nn z8Q(L6e8J`=%l(n)LQVJ?LU7hq7c1!`QTb4L4oeN8XA*F!*oDAriBNB4XMr=pL9ro+ zx1Mv<9X5pM*g!13ogE(<+!hQzMD*B15JGrY>R~3RAs&_kCZs26ZAlV#vA9rIVG%VE zVymXDOVoKbf?ya~NQ;S3^;o(_6BM}Owzeosj4euAV5uS0R!=={y*V4}lX$-;)17NG z>DKF;X>w>SO=0a7U>0hI-qOJ_#IlWmuHK?H^$JNe3wWQQa1=k@0Hv8*zg`A${4{vrf0G2E8AhK)Me9P)IeFVVt5bLHUV7pDQGSyY+Nej1&<|m}$_kq{0$xSQ3ARNa zTjj$;p z`ny0Ii`ibK~lN;fEZVC{&~iQVdLJ6AM+r*#yrM0R=HjaAxP>9ZU!c)mI*WV9)_Wc zhN>hkfD85g%RoTJa=yQVfFH^3-N` z(~F1$?Nom=F;ZGP`_fQu6W5YWNzYxdFGRalH8X|A&Bu1CvW60)PGeswDA=X=md`yt z9+!XljPE&{_nD&W_i-A1;Ws=O|Hdb=e&QYfpps_aaeQ2ZLs`uXy+*OBspmT{IhOAf zP@y>UcicmT+`DqVhGcka`EB#6FeV(Vc+W9jM0nGI(w03#T!?GybytpLp-vTL77laP zb9{E5nR~~J%ZAdG{pBC~AJY#U;&)zKVIOC4yp0ZnD;|Pt^1Wj%{86RRwdphPFF$dE zX!rhw{lo{3>w1ialfCV@*uEGipN~yfV=1C2-kq`ceO%VR<3jsug0|QfaT(8b4E>C@ zdYB4I7$N)jwPns*pUqj|2G0{@MuE@5Z8|$UVPq-e8H4N`BetL9wzd|k4mCk=4H3ZM z!$AI8ECWgj-$s~1SQqOXDEG*K*S>cnZO+wl%;EHlpZIioz#6}K=LUrPCKZ$9$hLp|K!#{)C?hdTc<*!huMj`~P|*=S z==kW76mH6}$NKemoX3MZUPeSTn&L#qjJ}FF!#R=#&;`7l=kz1z1p5ichu0Ok?wT7K ze*~Ki7y(`;(^*b-DT?O+gJj;A)K`ZRYQ)Y+od(I+%)?;xGIu~ZE5uLP?MsEw@%FcC z)Ag?RKA<_apSC%_B3elv)Lsz}Uh5scN6u3Tvv%PO`z%TwGN!A)NWqaj&Xz}%%>8k? zej9J+oEOX^z5Qas>)8Bru=BOI9VX}KW9j*)e?Gl<;o~S9MET5BCnG z{K>| z7xEM4fv?q&FTpm$b5&Eam%VkIJpkEmk%$ULM-WkR_i1)4srn@msv<`~i&=u$bNlApQ<#CP`$#<-<@71xly!e3+r$77ZAM;!&qx8RW z)CEx?{~?o~%2E15#}50SGQLnJou@6Z50kX1}ZoV;G{GKCWw%%er{)RSaQm+&0OmI?G)q8eSTed)p7 z`|0ZIH%LS$N0FQnDMrU0jG)|;bM@#E&+ZyPr3vS&4!0~)sXk(xjG%UtFDlIW_M3Om zUvVfihgZ&ysqQi8_;%^PT=uXah56!=nYV(>YQBFc$}Fm!c6RNLrgBlO~TzH@D^();`bU z#U_)VMX|MxfWu>=;3^_I)l*Y#3Lg7WEf*)^dS=wh*Uj^q6#Ie zQ7tT>m@kRL%eq1|LILksE>Q~AP6!}axCxZFyFxelNLraJ3wvnrAK9|{>^3@%LT80_%uwFuvJ zTm&vz7Y>iZaX^w>F!Zy=3zI(-2n|ldjyqHdv7#4pG8Vs_%VmUyi9H&d=G7EPhIbCn z5~7Pd7lcK4=(#4lB!bD3%ovX-s?CIp`?^?|z#@9OyD?%2~{p3r&Vs^EqyhWv_K^GS`L(;mH;9-E!R_o4X4;DjK_ zl-y9ExHJ>NG4&q&7cdp24KIIrTiH>36W=2^{nkxXA>(KM=gW4*O|!#)`dRiR^Vv3W z^!|@64!1xbIs03s+RwecbCc&}eq>)eAB`RPR%GYKy7x0a^nco9F3`akX82E`C*GUy6}ns}dloS z5WuGNt#5rh4Gj-LCyWP^&KE;pkD`44-c2e;y`Apey92xd#ZfOW_b))JScP|Ub`9n% za&8P{lyDyD_0N)H9RvHyrWv>gDXV9VKvu*-|5%cW`sSq3_Vfwr(+X1&>eL8L$ zm7Z%~*?IBH{^URJCV`1nb9%xU_)##mQ4Wm)^ShTDo!jCW)}*$+5SF%%R_#nlSQO`jO*DQ0$*f z=MTRp9qxPr2MGz^^p8-EDIy)x5H}Se{NR2Wa~bFULFfAk7tcqJ9vZnKYJyzI09li5uF_W$?;5_2CRGt24k{rW#i z=Z}66S#}CKsXq4EG2lEudJSsSi_;v=d?xDR zSQ*8c8=GNoK~ZIdU8BM?8hhCTJ8>p5eiOIn5PYstRa_$NEkyBAf~_hU@GYEouAw{z zpS1{tUDYuLtt~iyI;e}Zh%*i)*10Bx&Gk_Oew-;w-{PcXFI6VEmvr`Zq-S3^%O1D_ zo8EmCzG|v)K@LIgz#KU7nekOBnk``F0E4D&vr`LHl%k|H|CPvXd~|m%RftfpLSo@O zbv?CUB)E5&OfKHO6~pX>_q~rqOhlLV zT*psmAB;DUay0fdVu76vU! z570s&Pvi|2fi~|Ev48yNG00@{=`a55ThL!_@@zN|fLD)E?_u~Z`7N_J6Eu2QONf-v z@1#}@NUa0OKwW(6E#~Ji5v=Y=+?NV|1=PeU&P5?QWng4dhe|XOxr?+*D$i{zmI@!j z!DrRNwgDkWJO|81n;6a8L{oIGIN`z>!|nFStv&T3wXaa{Cm)hub!{y@e1*s-hD`w| z*A7l@4W%8T4Mo_0Od5A78g&q^HgkV5-MsuDb+mV+5B%H*sm;=b{!Q4GdEF%B7`PcR>i2LAkbwR7{`oU6 zSD@M2+yz^=M>aE<>y>Rh;!6*y^Nz=cP_X4{7gk22j{4-vT$);Xl!hknhBsu5#Vb=m zFP2;lEGion={pI5uy|7)^p8+)))bboP~oWs+G8h#aAmQ{kGsJ{bJO#)QV~VU9~Ny9 zuXV1bg5t*X24CL@D*)RQ8x}&EcQR6JU|CO57kU=h&@PH-3Ck>!va2tkq}He9P1sq4 z8{s|D8nu46iLr*trO=e+M*%DpX3Y|5=)VkI>i7z9*}iQk$QA z>Qi{Z4pDGqF~fh-%KV^7bzgfc_cz^;Mfrk zyF?GfF4pEr!a-53hR-(vgQ_8u-Q@8w?0{hzi4bwHY}Gb`IW0WCQ_v zsJ}0rK6)Z`0k!Bw>FR3BA`0!T?FeAr6KztV(GzC!l^zk34NJg-+|b=PTtmJ>DW_jt z*by(RdRc#YH8D7>!y>1CW)`>#xq{r|aVv^8sg9OJbxuzc}0?(IGN&ihB60hbA(Yk%(_ z7r%Xaj*DaC^X!?UZ#>{Z=0Q`^oa^>7zJ!7K7m~jisAk0dGui+S3_(|pwjo$gi8D!T2iaAh694oHV8Hfv-aWXg$ z^+K*8>^!#~*kcpX(YaZ1wGGH3Oz$P&nh*rIK6?hc(vwd;MRME`IJl1jvus7SuB97y zZl%fjxpecv{WLrUTMLS6Gcbou)=sX@f=-1rzIs0ttlpNBp{SS`uflCHMX5wti1ZU1 zFn#BE`Lm9PQ<#KGlelWfRM{2sk$z_ChJzvCKGFZq*9?{w^LBqQHIkG;&M7@}JS=Tu zUt>MGE*%FW-E(V%sH0!)W0WO4l%6Y-m^8jd_1d6SE|DO-Eryu$=8z3*Yjeo0RptlL zX^aH>(&&wHN_pli2*_7Fm-B)7PCFed$N0|peb|VA<>ot5WA52d;&)LD-iAy5`u!V3 zY+%WuE#YL+u);WMsXv0T_5_^$=Tf<00E%WH39M)7tLY`4oO{x5i)#~n-y2GVMvIXQ z71RZcyg8tnl`1&>t6ztC={5?|Qko}Oad>nrUAlN5Xu>c>I|qo69>O>T>&<;TOV6Y- z3jQ0{euU#{Dn0Y;GXbl;^u}w@G7~{SeL3ELJiYIIAE7wY=`=q6kR(mmr+C)LWuj$5 z$!4ZT)9BDWicSqdi3wcH5EL?f708awUq*+h!6CP3fvGX_=}5S3q(+MhlQ0y~35ZR+ z`74ls=z*?~oJ{g5$(l773{4oUMPQy%Uz-y+1JAn&zI*Kd!7zn`vW~>f&SOA=*$*lh z99#@^0FFqGuVa2lks14e-N~40l0RzWu0j=;!phq=lU*=O841vAA2@V z&naLvYv9mjhiL)np}bf`cUS@Yo;eZ8i#YcfP~_h9{jdH7`6HCBXYOk(1KdHWRyU(P z{O#w;6O~}w$3N%>?4c^kJZ}*Oo`LgSFg^kI+k-wFN3M{c@v+%3Xqe*YW}9i|-b$*k zX-@Bb@x?TR@~(G1&oxcJF2KlBA`ep*!$cmXwV<5O7ii-$QMVudkopynr08;Kgw08L zVWHP~=(g7u*2rfXPY-Y2CsCX8(G3}jSWl98xSW?^I}+-pan^=0s$6Oy+9?~`1WbNw zj7cU_Igyk1kRhzjFos%M+K^#7H|ZN7JdTe!ikrZKrRG$AE6YvLu4~SvCqH^DO^u?* zO>Lx!>zfJR22mT7Z*>mjiBRnICF-=)u2Djm=m&X@bvUU(WzugLgdjmCVR?FZ;~wz3 zA~ep_5dfN}Q(~0$vp{5oAqcpW^@$uK=b?G0NDcI}B6=8xX9p2j9LhQu>q*L9#~|It z5%&Crr_uyQf9}J--SqO<%meW1B-oPOJqw&pt*s2q$T-v|z=;>!DYmY=NOu7;icV7!(R`JwPgO3naU&k(44osaWf3G`7I#qNtmo ztm5bcX!W~y=INl|W`%NcYXdJUxdMi?G#)ldnq46DwuyySOUP}zf_I3-o?W~Yn>*mU z_a|v6hO~8y-WA@mOVWt6pcaAHL#g&z^v)VSa&n7RRpDEKLQM~Z*wZp}WOBobZLQ@w zON161uuNK-x=D6gNF!q-JU<=^!j`)z+0A&hqLLfWT+s`S0bp`TXDgfI>5tJ(4CKVE z^kFLQK5>>B?IcKD8T)2>^Uf5-Oz?CS!HGAw1{7xvY+F6m0u7b={1rkuD=;0+O~Uic z0@4?5Wn$~8OLLmX${2nG2QdPmKHry49l1d05dK-LMBA8~w38pOel=AHQrU`7I#h&eNDOq2$c*EdR{?ubia(6}rwpSA_j1 zRF$jJ}`vtg)H%+hPoDA}71fT{F=k zeCIJykcvLAPxPh5X3gA*267#bx1SUrBYLgt`vj__!&c=Ez24u%7xOF5e_8%P# z*v8@#xiW_C$%)B}T}2i)QxRQ-_|m0^%wr5wjOogv(L<}7165$v9zS_Hee}hjBf{0k zIk|N6=8g1_jVn`_@w9^1lUM)h?b~>@(|c)o3uXQ18D9!{U3n*AM?i^}ABaumJJwk}gF5xnbI`^nw4y zJc!P*Z#-oGIQvV5ha(UAc#Q~Rg~(+cGR&yM0-X5aIc1PDB_Mr-o4V@pC99-8>|l)hskO7JA9= zu>rDnw!ze_^MC}u5cGtaz`8ZHliWp>r&cW2O*C-br3$$QEQx`W{i*#_4@nlCsc8T= zKHQJBV24Y1+e|K-pPk1Gh~g4x(WIJxp#|E>Jye1vE6n-CLC0 zR%=v1Ip<1fX`UCJ2{y(Y8>ZH*tQ*}}SfBrm&%xR;M3(7T8oxD}?p>kG@=dIZX@-(} z=-OXiqC_qW+r+&USV&ZEiYNfJSOqf=3E5-SNquCtsEbK$R^CgqT`hz#fS2mk@BSd= zvGSu)CJ+IZu}G5Pem7d%WILUl_LYzZwQ)Ia%UvDm^z*0Sx6Y;S{KXGZ#t9Q}kI@@g z&In0n9nZ5%D7=Lv$foqfJ|az{3q>7VF-=9`J_P8Dv2v{Eh_{M5nig*6HBs`ilmP! zMNixDx^(1;b~sI`A~r&D!yUMIv1D}Jn^{~-s3Wk7^h=&($G$<795!&Vf!laE*C{D% z$O8j_XM$IC0vHVs(9&*_5kiA|xw*(=OUXz2BNIe=t7K_0)Z;VCAFrY|b{EYb!BoX~ zw@_+XA^ODm$ME1|dH(!A?00Q8 zT%g9Cc8TfXh+qW~%R!5F!er6~iH5~dK`5-D1uvq(N$|b~rOXm813`6y!RlK&X>UD) zZAnbbBIc<(k@CzCz&f-RtGa91A=QTHW(L9X!(=qoQQp#B2TGb^p((Cu58IJS-|k-O zT{Cg#W><*mNv;oYjZCc8(joe%h-pQ=XzE>S4TOdc*iX8nDi+Z;g2)|r387R)#_2~t z@Hr+HS+mQNX%Gsc_rB|WX=d|Qx<2*wG{Yp`st|KUqm>#6inFlL%%E-QWM?OKR9U9| za%3S~%%nE6XA4@PYQ|1Y!?Duro54!4u6RSK8`>pes<{4_AxNH|nIgV;B~2`k&_4n( zc`-5f7_U8ePSN8LQcNUV3$s+iv2R+S;n6)yxd-}~v1AuBJC}bDBaDQmqH3Su2$`EQM*{w3~RMa zI9_2Hd}CN_HV%@U^B*hNa@(S(=-4$vwn)`HT)>ee7e>je6k^(X9Pqn)|8krkM zXv#i-|NDX3NOn|w=6y1)KTZsxt>ECoj3s@dz~OH-BsGK3K$Y;k;(Cflzh|@ZvTE7( z=fQU#r-ovphQRiy62+>D#wndRjbY+9*H$v_zcL-fKbT{-G2Yoc&hb6J;~t!=8_JJu zo6YcCy#$WGAbU3<=MYa2=XzT$Cja!DgO@nBaGCH0BNfkU7My(?XKGD2r#L6No;Lfk zKeKnbbu-O6KIgvn*@64kaDrAxxT!hAznvGMo%4CTk7sdAXqV#_?Z^~{c*uBeuHqQ^ zT)fmCGfTyBqczO^Xphes$J-xVkM?_x^W0bTHwR|cl+Qq$>@QmB5n(%UjVzRjbYjg$ zs1A=v3D1t)gC&%t5^M)G^<(QLVSO9_%Fm;05@Zm*;V=z{}$TT2-8YwW6%{w)BJw?o2{@cQmu&Bre;b%*mM$!|1M_7gD)GHOt1x`f9 z!vruj;cT3_R?L57M!d4Vi%A<}MW3=Z+`~*(x%cR98krv9`LvuGXd@he;+p7J=cU)x zE!#i{R~I9#qzFJQg-W3OD!{9ZMc^G)u!IC^&o2y#Y?Ecn;>nXhrd91svA_Irtr&^;ur!_3G^{uRQuFqV+dsa;eX5j}7t>B<=o2o;I zi*9TNABRGF{E4Gbzk#{xB}tMsQI$t`pp9D4vjFl}v3^Wy`sAmue5ew6^YV4@Ny>2a zCf1?*qcS3r`Oq!4*ca-d&e7XMmxm=;Pr`Bn3; zbMA0DcDffVGui0ix|#sgSK*=i#792HK35eOER8wmS{VgY*Y(XS_mKq@;y|%={|tr_ z#->EVBoi-fPLPQU_Q#OHk)9spKwbL54=-Wxum%+LKst6*_$eBtC$2{Q4Fkme%D*js zREq~r1Lo9Yr_*o$&Tpn0WQ5O5f$3sCt6!SX;l3+9qheG!RZpm(g(TT(=7OA|>Q&9` z{o|t;ol8JWq<|DV1f8Kq;F05lkqpw=Q%*1a{KwO^x38q$Gsr}+Kdb0lo6BMgVQ?8| zesdGUdpm>?REZ89?jr#QFi?3JuLl%T%!9=-DB11{tx%U_Eux#W5^ibYp4~^yZpJF$ zC2y*pxOu3g5K;78wjXQl`?*-RE)1J>a7)ub z_p(ZlSdm(Q%|WYx&82C1nf9PUx>00qhY6&N?|2fADC#=DvBw1=~ z^nAX#1|Vx2fF>JN7Rv@SjZiWBw*v3vHWs#2MN%r*B^4IT_y{40**4PsamQ=}s)n4k z3I$whnuHs?#0JwqR)(ebbiYaP-rm;Dvq*=78y7{tjzwC)WkyJd1xR+w4B-Jn5RF*0 z^9%FoKDEBx(3J4T^w!DBT_rPkhKd%m6IkfDZbqnq-hf-Z@5qz5uNYvIx-+M+C@W*> z;_UaRsgAHkF-vf=mZ-t4du?G>?20Zpy#y|P`cX_Gv&RY~8fRM1CRcdBT^8sfo2S-a z9hpL5Ap@qL9(sWBHl7wp6Q6_&^zOv9y>F=;@AJ!}TQNj%~yf>j96tffWgN&ceEHk+JbFMhZt%5R*{cki6TsrsdKcYBxko{rRwAFBO+I~})!wi^;~ zZt{zl^Mn^CXP6oOqA#N@d>GGZLpjRNe8qSB??bUa_{_(MwsASz67S{fVC)q7D*g6b%%i}R zv4JwChB1_Tw%tUqIzpX>OI0iiRA-bqp%$J{BpNb-jI|~_Z*@?R75dZDC*Fl&tEaJm z0@#?^`@my#;leL(r{ldC%{elRs~XdT*_-KWfBC1lz?X5_5L#f(wGXtzlM5Du(4ory z7EYSsN3&_@!CdMf6fiMAjHwL{ulp2>wE7u(1Xl~S)KL&7?s^@%Bp+hAZ6Q4};@ z^$@P0JkWWx>R^2M6B%W;vdW^KRKv(_@O|Y0WB@qG9Qz~fSX`M-qf_@e7mpndH^b{* zNA6a`gW7dcZmkiHnOX%OFa!V^FYYFuA@MpUify2Px3(TaNxG2E5c(~_qM?$ZF=DtV zjCO|J_2>-FjB(n_bE{zN;=Eg`jMFZ%DM{S&Sz-kgU#g-Q!6Ovnsr2|`A4)GAI-8C_7u8zsPS0I<2|U6F)00ns zJYBf(-gNxfVd&h-+_MEvP8?Lo)GFGo66eeQ*di%wc8a1FB!bnVybCm+oF4=4P$ZPp z$KDJ+7-vaqCscjlhd0mz*5|6y6xqjDe>4u=+fq7y>L}wP=|6LXy|c#9FAFu7gRT$^ zvh;7&H~__NZgFpo4}=SIEkR{B6@67(CuD&dQ1v8z)VIZ49v;Dv15~bqEt1RVD;;kk z8^LAy0QR}Y>gxO&I1TZ&+?OxJhLp_gDMwvkjLNZ6Gt2(7MHER>9ah)dbluQ)YMFOjz2X>}$nfu;S@FnYO?G z&woE%xpF04{Lv4n0ESbRimiDP3I_TP@%ZX7j5F&e$KIo2=|1Y*a$U$r{_#(|4}+{P z{pi*2LEmIQfS;zyTR)`vvvac;Ve_mJakF#}` zo$`e3^X!vVI_vnahb-7`=*+(T-5-Y&Q+cN`VsgbQxZql_G}+q4J%>(Xv_;59I7B$M z4Fm7UvA*=3?|dx`l3E;vyEutC0Gu{0(_llEh%4Lxdu@2%O0`{w`iU-J@KROo*wbgp zerDa^JTx4*4u)L(WOrvbMol|L0>v#@uy6h3S}Gqwcf?!2fI((|A7+kDQ$~9SXoGr* z^8Cn2N>39}SR|B|?`2Qtc{b_03i$T%>+?LHdH^zkYI${x)i{+G*I|e0I@D=TqKAx*lt9@ogGv}gj8z>5&^phvC8R=!BgwaW7H6WrpS$p6`l}y&1B#%3dh%s? ziOdf3hEW=r2C^175O1`?jLB_hOA#Qkv&7O(TIwh|3xGH+$ zC`N~gCLqbS1gcX71TN4<1biNgZU+lq>xcjw6Bhn86x>}r90kCXIXvgfQ15KP1)5g@ z+&h+Px2ej67ot=s0}iCZOtG6bQ^Og{WC3e(o6uZM0vp6{!DCC4SUlTgrkUE!UG}Iu zUBzO_0b<|gb2-nPhHR7q=JRFTCljK4aLV;Z`=)hD>7A?0EDbI z<^Z-q?UMt`r9lM?8jf;~;hbEInQf=e`8*K?LYas@o&z2&l-9})AV~@_lmL?!*<1o- z3B?3~Gz2O%9)KB3Se9W#5)RX|ic^LSjw8T9A#%8w9$d8$g&2QY6Hh|`fq?xHg22tj z#iHV?QeT4*E%4I{T~!_6wxBuSdx4_@DqRHp$tLCI&R#zPwO8Z!V z5=u&`2q+Q-UuDNWQ?Mcrxd1JJPfeUv6^o91G#(h{slablfx74 zM;Zh173cHYmt!6uW>@@~_g*GD=p6;v=i;MGiLnJ9lX;eWW+@%~FFGr}jRR<##*=GY zZA@iG4}F*jag@E`U+Dhyv2EOUI(Yk^c~(vgh56vpZbUwt{by^Kg4v66q8!Oa zPW0P&Ccfp|Fb)_4K8(*}y!bqO2HNAjJ=YiS5Bk{qdKr(i?;SI}Yw<%Hy_Ze*-?)bD zi#GoBnLQxx=P_K*t9hQy@jTn+I;gF}=tdT7lK>Q1$F)Q{!Hrm=+|uCfRKYn?|E|=@ zF4kJ;;7lTL)sp~r=b#P2xz*05Q$$V_BhnTmc(&}&%6Q|@9z z?M5Lr2o54|%4nl^deAF`gA43GdQk26kO}l%SLZ-fQ*@r#?#by1yr{!*pLe2JjPVvx zk0~OrlXzI?rl#2!0hVI`2rDpg>BO-U>HMkpq;{NgwIp0+naeyYGD`#4@pm2h-h0RQ zPA$V@PC@#`N=sXX+Y#xlg8nYBS8Bi<@A}W8fy_x3VO)#za=TKN!Q4@Ok1d0}g5$;> zLjUvjMHHkx*eT#h?OaAVXZ~y~@~oTb?(i#6C%qktZ6A)KzIyh$hp(oeyamJq2iW;@ zPf-xy=`_uAs_cK_!yh5~J`Z~cU~`pFyrNM>jeXGST+Yt|N-Ly+X@N-p63ST_Z+jU9 zD>+}$ZPY<3aDF!7kZEUP-oHM^UNsPgt_rP*5Gw9P3|}jO)-|!OZ_UGV`6yla{u^+E zUP(77f-#4Iihc;y5OgjivlU2O8yt8#J@&+D6yc-k*qLML=-H!u54!^j7(E^HOfr4< z=s*>)Cph;i>=`ApLe;$l>hBQcF+d6}3AQBKDaPB)ZLRC%}P1<^Jyb{e=977+`m||~HSDOcLZ{*@1{^Ng; z{@*YC-|7CXyQ%p|L;B!nA5TZlmDA(zKAt}F%b!foJ@Yg`{<~x&&(cRG)v*V&7JlKE zKAC>w^PhnZtS$Xt|Nh_OES^snUbqmLw{7^?I}YLKL{Xo30EU*OCZfRz)c6qD>*zMl zD`i@Eyg9&iidK^VFA#Yrnjlz!1PFBms|lXXSG^u;o#6>G&q=sg9$#df;WB{bV+{ny;vG0y$qFwKnKTJXCu9|E zkymxyOmX!k)~>wHdC&wVd~Tq#YN%Cl^9APTg=e2l@4fII#x$RP@V!@3*GZhqEyw_L z=aI`Z@QM!rTjHD%v6|fBMT?MGKm2c3=nLg-XLyM1jO>v za$1IxsPnLTw0kd+6X*w@dHUIO_NnvyoK1aa27tyLhbOlqH8U5D&~CCWH262*E!9Y{ zKc}as)2VZ3NRB{n{DpsV_+=|0D3}FD^MnX}yx^3A)B)fLP84A?0^-{md(x4?V_1~! zTxdABG*_5-dXKDFrNtN=k;TDuxc(!lrcz4NG@@#Yum>4lD%#SOtdkYCi05<@-mhsa z`&By|uds?s1>pZ0SvujFLp1H+ZB3ha40hOE82AQ4#tnD?q$66uOR}&ujgm+QA)MFo zPMAGw#_t-6@D@>m_017}PvadKu`X2+JVi~}Sna+AXnA}7#=U_f+{k#cPSXL4UW<1O#?VP!(cUg&{8ll_bBqv$l{U5)zJLb63PHdv3sotAKS?W7L*A zkHrS@kj9VypV8lRgpya-0bhq%mb5Y!4-ucBCtMu{o zG3F{3zNaz#&idCKL&;=TC^K1e20DDAY#sKJ3@;mqWySU)t>|Knb^#_QRh1Ea(B z>#_dt%fF+qIL!5}Q1^L#(F2T;8{5zJAIH%_SwC{}V_Y~lTT8an%XojsA?s^5J=~Ksb+j?{6EWDqqgp{GHIZQ{>p_uH-<^Y$ZC#vZ?Xr*1cRDvT_-EMkW+o=mlaHN; zzD78n=^B^+6>j>mHqY^$)kHRQ2{2vqFW3Ds{A^;7xs70?BK2Q?gW#?qkDu zwFBinr*Z9UkFcuaZUF}x znW*-zeXD|zu_QF7{_-)PDjoFIYCv0uhVLVD8>l`APcxY4@daR}L><@W@g%ZeIGO+p z7!UB^FmXI{@+ssYN(qo$*Gy!o?yp-Rl+G3J8*_l)u2H0GD3N>i0}4!~4uF5%EkN!4 z@L%IR^Z7Z@YcWL-2kr?(O>1W{0Ox^uJWAWew>ftu^->YCueVIKr4bx(@TWpq;#fDg z!zsOSJqiTOZQV?JWuh20H|dM}6k*|>z<(ZK5d7Q!_;1quuJ?bK#uhJ<3oy*ybQ%2m z7DXDKOfS9kQaW<1C*8hx2L+FI`6NCJ!?&>v?2D%xJb|MQ!xU`{;{hBvG&M!&W(p`^ z^FXGQ%g4?S;?ZA0No`4|k3Wagi2WK^tIoN~4h9wf=`4^OzmC$#?eMOa`?{en0*j3? zDwnnQlhd)e2MR5&J9Kv;T?5a4^V-|#!L9pIf;+ewfyXwsv)3PLO6~nnWS+UT%zG;X$<|8V``qLql%~~+=W+B%>$>m-$H9kR>DMJc;+9*f+KqJmaD7`2?(1>?AkM86& z={kKn{o1ErN`LspFQ(nrjr8tcJPA61>Rfo8mx;Dtzk4b0_MiF8FTu@9;?DRqx(%|8 zxlvEV;l_Nf?IQV&@f zGdY))1ms<(`vLWtQ7of>%Kal}dqR5agnSJ{_3EX!NzPdazUKmlL^DNK@-XX^d*C!b z)tkBx1H)?pNy8>;G;ssn`-yiw$vVYR{+WM#njqknb1L*!(s4nGk-Yk^{|;he%k`EmXmHvhFw79NN`^TEZ*f=L%M|W@X`c0-+ zF1>=sj7(jGhDlWAGO6cS&+GHpSVU3WF^WKcu%(G~%yXPs$6N^dqILSG4kI9svagX* z3yn}C8M8Gz2qDIQVGiigEX5osA(;qW0u%~MRs+)A$3?C(!%(oe`K==327^MeLx>-d z#g9saxMi(*EONqID*tX?8diEulo%@KDm*IJLTmJptLU{;4X3NKhd!gK#+_Sui`Y2X zl=IL#c%nj~k;U=!xwGliK!2J?$V=Z;0GHR=+7r;H@DkEwcN)4Iii)9h@5b9K95MrG zi=p$N>EU@)^cD%Lx&cQGbv7a&P)sSO#+;vpe0XUFY6RwwR=u9cbs!J{b3zE}VW>lJ z>zy$#K`zj8TbZ_4Tp&}5ygm=e@X#!Ul~-@;-1bz3xyN{(FW++k?vW{3;l1`4{*H0y zo&RxUTtI~*+7i#@r#@4B%Rj!)p4Y3#cHyZUrHId7*>zOH!+5YQ%n$yEwy5-n66kNv zva<}xE7QVI;dIA9&z4t=*7*$n@3Yc|_|xlpEh$Nk@peRVo_(u7P5!0A_i!W-`reQ#y6~ zCXq%IlUlr%%wPF!^I*~0LNR19d{Za;x?IQG>GtgjGWA!f;x_g<&N0@!n9cu@Fo z4I_U#Fg!3t#yA_>v0b za{;9o9R@}_f}GXx#^F_&fe!C3&xEiBlJIbR2sv>qWLC&lp2+JupH$RnTD<%$#+$Kw zXTHQ9hIh683J#vdR9JnC!Ph`wg#%ZruAfWA@;Ftg9*~Fd1`7NuX^A2M%jpiXb|-zf z_kHPn%Y}4fj0|t)$>Q1q1{io*p17e+jI~jBN~%B67jT3_4UeZ|F(GKj1*NkJ*DD<%tL`ZPnb#)w1Kltijr)%H7mL6T5VQvyJ z=)$RpvE?45b1CL726VAyHywYfGo5_)NZNx5V;&E;kl)r`D@WpxsqbRHI7aTvaoSkM z5xJVCQPO9J$06?@N((5GYf#d(Qmv^4I-4A2Wb@5r)Rpn*vKKkO29LuJO+Lc%`T~U~ za8@z@Wr1*JHuHH-g$+nm1*JR(y(`0*8T4xzasxmKM=St`$`6$WK$VY$FF`h0r!+2^R53B$}51|iX}8kG1R zB3|P_VYV>1+B$#|)2_7%)+o7tt{dfv>nvm?*DGE4w(&aa=fX@fY1tUrgMpwD3k>^r*rT6;3_>D&?Wmo{$GHYLnr^4NHOqxV= zE8fpMf)5tsuAY1+x*Z*kDs%%UqtM2Kp@&qFdpG+wOa`~6fWVu(AlGq2I2Ua5A`y-i z3KXepRI`sa6J;7WbU4l9pfH+nXZU^?U&6H3r{J9*qA1EEBU)8Z6aj??0xr*c9nl+Q zOAhE|(;@m72gDXm^)>kA*H(~2IDu8@?IVpt?`_csjh7nAFqesd=r~%Wvg;z;ql;94 z8^S1kbZ-Q71n^Agha6M$XZDOrcA#Srq3J!{NkVuBk;)AUh4rW0=l;8MFH0;c7IF(X zjhXM8Sl&UF><+DP;kTs>d!QU>ZKNz-)f~KT-yzY7WC4^0y}c%stdY372`;3s`F!eV z=!ZP`W||zkPB`;Unpv^p)CAs}DTFvgz0_dctD=N2o)F^3yho%I2{{#j&oF^=wIo(* zLDmt5hL~84Y8}c^d;L)qQ54m^w|PJ&FM_GMfXBK(nLJs@FbuZw&RVX`$bhBts+k}w zSXaVOTH$e1a88WQ&~7#^tWP&~C-xE{=mmH>hbZ&AI5&=>3d{%rq0*LZ8El|T2w@o| ztl>sH=4R&a(a9LtY@%RAfeY>6k@n>xaRN%OWAbcE8SEk!qv1b2)f-?Q*Qq+T0nBJ) zg-w?9t0LZMv+`?zQ7N6H>J%PZ+?)X#CBO2=SobnW(~1=H_Nt z=#;%BbXkB)P59V0g(K!z>;>RmbwINW3Fpc2s3tOC-;R!ey+nvt;dK_$wYx`h5?jR{ zxZigm4EqXfwmjXVH98ILyNPjsWLg#TB2T11&(|j8x+=~tI4fP)4h;eO)x@^~;nxQ8 zQP}$sQqk*?uQ53_&iN=?%%xW7emdG)(xLy4t~dRyG{5freq1lt%YENjtgWkS=|yU- zt?rg&$r^hsTajdsof*ZGaT31?FtCwH21q(HFOq*?UIYjN1OYP1ATw|hj4j#PT2f2B zch}Ojv5I6B_kAbtB`?W*KIb7@N`TyIHhJ%Tp5OCZ&-y**ba$towpK8-YVHl`_I;9T zBj6hxTuX4g`Aee=AA8!lFlg_|#ACcPPie_?RuCxqYq0N$7Jh{ch=>Wd&G#pi!ZbwP z{XOw{xF{KKJ#e8mAWjlenRFXgv<}`|(MRRd04Y;VsdQ=y4R{K@_a%|gs+R>i!rQ{eWM778mfhUwV`!CVm)m&)?2{e8%79OrCo|LG+Uw{LA&=w=etd z7~@5Q3Z$7bl6~a=GotPTZA^hgRux~w-(mT-MU@4fnYDe|fA2Z{#CFsnnO-jSDy|P{ zHXD}T%S@abg>qjC%ql7&sGqju7=8Ks?DI_dVf|>s1O#;2!^?h+MzS%-$2^_sh%xe= zy~tYg7W;GB7jLr-Uwr7eV)!wBX4{+iU#v-7xOb2EmAW4_a%ffSFToYEW&Z|+GU-TLrJnjR+^_e6F2@R!T!t)JXS zpJ0 zkVHFHEu5I123o4Y2!@Wp=DyrP$=BQ9yh~Y1afUoc{y?MLyR;pGKXV|n^vdw=(N1Pf z&n)WhF>%ATtzzdq)j>bDH}y7?Dh*3+2BBpF*fP>8p(gcK{R2XAfS^edHVmX%giT0+ zcbgYYMZfeS*AbWZ>dUXC9&%DMh*-?z-__M~?A!LVe?rB^A_^In{N*Ks zDl1cGKL(nbt<;{*r|V6BnvORc)8GB{uhRQZKS;~uo}O8rOdq^Elb#}=PEE|G$%oq{ zz^CHcoh@i~ce--*ne@<{hQK)vkpb2gC__)t97omH4{n=WkKnChO>+vvQzr)zY zIuShD3#{``zWYytfB*Zx`cZoK|9p#Fl1HhTShtmhz4QR3V`Uy86?)uA0Sq(NWC{C) zU}ETlpO@!0)8H6cgT@B%4y-|^u~q{Kf?eThXr(_G8yV~H$WXfa{PlG4Qy0V7H9PS% z43q5$u_lEuSEkCVCrd+%bbj{fQb56a&%>L}S#G!-FB4}A%c@=|<&5A*>{Df)?h)hz zvSzW8>|c$a@Wj;vicxHaP-!6pVQOszlUAU(3u!UEU>HJx0{0Ad!4Ip6QzR=7!Obsx z?(^w|mniRi_*N)Xp`fyN416wNoSgd@1!rO(rGr5DWsIcmC&sOk(a}z>>!GeRbfZ7= z!QQ%g3qDqx28Mc(@d)_ecnWd^MV|_!Yvi%j5G;=q=e|0H@eI8&glNu=^z4j|8=KfM zKrnqPNC;%g8j_``6lt`4{9rasV@z0@2a3ZSt}pJU9V%>XEN=rfv;=i7Euna5e2td` z9j`9SFmq##59>x_WsQYph6q;eK39ZyWdK0{Pd^HJ6@EQ)-*eb zVneWc4v&ufM;dK_y=fIspd|+A8>DRM-veaW;Y$~QpLK%_Xi1ZI$m=BwYy`<|x z`88q6`ZTaI+I4MqP>$#EI+zeZy4}VYq;-eLsP1P`8Y+#mPiNByw?0Z0fBxTHf4zo! z@W)uB3kX~07}G7`f|y8shk!1HV}lecIShs4Nzy+7ufeq<0j;VNUyi_O9x}8ZGyOC* zkqn*!0mWmW9>fT2rFq;h1d|}3HX?X7whqw2ML-f6`Xu9^a$g-8w~_E!N?f*?#@2M;R*A>qGN5YhS-`3MZvI= z4c17Ik3{rEy2Y(vTU?<=h+wtnn0Pv=PLjl7OAto zXURM&M&Uupw+{0;8J8fi3Z%_!q+x{BvGbSH@Wm0_JXDlIlT#5I92h~+$F)bWR2i87 z8o-u!QW>SOlOR5=>MXLE8xFviEljVZCDN%C&?FU*{1NjiBxq>?S3X*QSh-2?`_tehNz7~6}d6`fvjx?;|F%E?-)2|Vr)-aBw=k_FO7-)>39A+{VaQCw#EVPvm3S>R#yJt^~wBY zZ+QoAohUHy1GbAo@ukwD`xo@_8s7ybs~fx!@Vz@A$J_g76O$cAsvqR8mqMY%7(Z zTH1l@5tw&exykngsclle`$1d!)sN=W8{eN#4{niptAt=#Y(N?&P=P~cI*|@~`YIXJ| z$Ld3J&=!+MJbLs9?>WVK<_Wxo))o*n7AUT;utY+AvrM?)>;w6T(7;}*KvqKQCueWc zFEPsm7(*{Try=;$Z)sDur+gHO0sT9I-yi2N!~q9-{OECdbpLIN15Bg|g!7%^21W}Y z1l1$t7$xUt*_Tpby*q6$G^gS!Ib3%d(==;^+{OA}t;GOzV3DECyRk>a?Q3vrBGXGA zOZwa_@o@{o5ZtgLP+cHq7X-Z2IP+bLI z8)rUpz@&P{$O`nu&!p?0eLg*WI+2bL#&-~wi^u}&b87^fBLiTpLL&uQ1P$d(7-agR zpZpNTVx7MmBB0g84P^+|wHD*qCE5qqC?3RK#t5jlH)2}&-qTr4>}EaB)!KYiO%|^<;7;`E?YG`b(^K>5`4?`$ z$F|9us32OCKK0^f(%|qgG8XXd)%CRe6v&N^|5F0Vk;-^D^jC=5Hd!P)2Z9N1omh=6b@C+<-hO@&C5p79ww zSpPj2A`}{%f!HEHZ8Ja0dwnPXQoftF@20PQ?Nwp|2M9u6pb!NyFUW+IAX-lFq=@)9 zhCiQREUiT8GHzfWl#A64_b@2#V;ng`(KJVX9g0H(NlkXhYrjuFg^0&>`ZQmyzp-5QM$tW1L{ zQlVlW>+~{Ow}i2Hz$PY@m^jc#XpSITjc{Dg1~y%0Een<-os!yV2M5;(?G4~MCEC|; zSj+WgSdVp2o7zf^P>is|@WtV8r0)DVaL+&r*{lsro|$Kkfb`%5;ZR!r3D^mTxPYKG z@fhuS9?Ln_au2-L5C!gfGy0HR-zfJgJOkCW;J?W2+cS6!MvqN__I!MZg_Jy`Q$yaYnm02tx_s zf{6QItY&q^?jaqbk#yzS#q^i|*?*oc4_-(^#M~IS^l$&_-=??U zzL##@nMyM=D=;d~B#hf!){*cZLDs@8o)P!o9s>ChbjWm~dpn3)coEExZUCu2mfL># z@M~ZGH!r1>VFAW**+Y||>9*ldk#l?oK@fAz?g$}U;nT4@uda&~BZU`f08(Idt%VDi zQc(JN%yC*3vLxp={Gw6;6NNxnn)}&vA7>W<)7`xd>~wh@fssSfM5Y-P zsI&cQ-uIw4JZF7nGjij%5-C{?<1UZ}O~Z_kqKNfE2x+3ql5^bJJOYh6On2|S$@!Bf zkRlO&F1*qHL%iV+JcSLYa>8mtYgfUP6I3}=N^iXJ<8 zwXVn!zr{0iq>eo|I+h-QFnI9`&!*=;e?48gaSqvp{fGc0-!2@3-`2~DM_ZWK)0qiq z2r+%gH3qjH0|$bU1l(DrFFecOy>>QE((23>nRviZGhdatQ>BnL7%Z}m-T~&2sW`%j zrZ7JD=qX+WV&;JAR6*O(UDzZ07~ZB)+!n(--wp)h!b@W)9@UiPMnI2DF2ZUN&Vby? zbC(yur(f#8^Uw1e5!~7j(#7XT!XUK0g7+RdWS{=$u)-TFP+LQZUb0HbBj#t9@J#Ha zA>s@Qz(Cd(v4$hdS+e*D#K9rLYY2&?T&+S@Q!YDqeK-|CTU>czG|lXOl(r5R=nt8L zmPxfbN4Z<@{JSfh8F;PU1o^M=aPCiOKs}ii_uyRyZyO+c{>6cS>=uz5XYg>mOAv24 zhTJmb8kL#JNh+VAtPBs2rt{B$>CXchpCW#Nxea3ydrAe&Gb8OY_MyC<*aCR#HnQhB zTIXQbFb2L>pvfqnp`7DoM^a!78kSh!1AKVa8y@9X(V9{bjpM> zyFkM=M0%};hFuD7$m*S^fCRCx5749rdIt%c;CrB1UHw!DnUr0VUu#)Gwa@BP=U@2t#-zPB=IO+UlY_U}G1GWB_8)*U4hDzMc!K{URG; z;^7`Sdl85UMorXGFOfgDra?Fm0xZ$f1}itR_?=j1FI=uq1H)jyJJAe#Q3^V78zP)- zpedUXroIM2oO51+fw?fIYt*`Ar6=>ntGX7csY#wdv z6y(TabnfD%botunQcv%t)ZRXffK2g`hEA@lp)wm4&$g%)0Q^Vn{Ti8X3}~HsGJ{ov zde)n|JPBl`#P9Or(=<<+=FPoHl%gq8Xp&|R?@9&n7Y8T;Cly$Sa;vNxmIdHHDn-T! zD9mSpkXQ_0KF2_7s-aGez+DdU_~<1#BD;}gbyaZ8`jNwxS6|&gfs$@4IL#bxya5Q| z&1A&LdmnS4r9v=iY$d>7+6{mqjYt^3E;d`>o+ z5c**_3ilzzF|qc=aj9fEr?wgW<39J03YP_MY(dXLJm_>jLI`DVSllP>qkl11ytV!8 zZ{E0%`IohBcl4-AReAZL-#Y-uCpqCmTZ|X&8?Rv>L&2l(|KAr^WOK$fUS~so7Ju_z z!P&OMo$t(e4P&rZzU+@5o!%PPo_*){&OtWT?0S3WT z*J%o9Pd(Y#YOC^CQx6c_P2@m|L(YK-2##T{TH&Lf1w&Lw7V2Tn^flb~O~CxhSR3O4 zRQ7B5c{)84Fdc=#fDF1HVDb^*Ok2hgi0Rk*?{ic#T<(Aiajh9T4A2Z0JkQ&Q3HzO? zNh2LJb3;#J9L}dP0~!fTHLaIV&w}%*MGL9X%wwFdIwiKTbeLFn!pV*VP*xBnnLe)2vZP+*4y z3zx}XWzY;R0+#J;#cq1$QdR03JW3PG@1|ShALIQUNY7jb=2LkoUA^#IsqCN~>mut< zwmQ$P&v8roBV^cPpBX=VzMqpa6}Z#w2XCY&j}Rn51+)?n8v)|L8Xi%oXz9^Tnnkdh zoxPvlyZtd9_g#W3hsi88l-~Q{Bdqa2!0_~&{Gu5zL{I%?%FsTO+S__5HB3NiiMR^9 z@1C2d)5IA9S6NvhPJ+0Jz2X|OMm<*68ocB1`M!ZLx_I97kQKI=cX)e+JRRPsLbgkd zmn{^w11$9lrk=?<_;>-s+&CG8R>;>bKQQ}>kSV@q&k--dd{n`|j^Lq(Br<7fYd}r` zHGyF1YI&{csFgif+Bh!-_Vdo(YHIARNf$mpn7YQmrh~pXM5)^_O{9Uk7$)~HO1Aem zAmp=u@aj}l?WE2TXuolPjN{sKR|$M>M#u!RL`wAnvdhXeWpnXBZ!ICLUT#nQV?Fep z0#!>JD1v4|#8^r^g-}~9=5@#dRTvU0Ko1OFqiEF0Oxi^Fmk#c1FpkYive^LfdaCj) zU(L?EC_c@_4L)>{|c~n(t~4MC_^667=8k_-^i}Wf~XN3IwKE) zT*ThfxS}9$ER!`XYKiygA7Kv+b_0{cAV5~D)&UZPbaOA0yRm|txypGBT7g`Gk$Ptp zg#x-^T!J#4hpwN%_2$kdV9#VV0s_Yk>>pwRhz(F#thaa>JY{zYuL%1x2Q8J>m{PQJ z3{Mkl@QZ(QD`cFCKYM-TbrMp7`9m9H(LEp~Og@A!5_AFaHDCl6!Z87}2WY+$#sUX> z5AEIJ84|gM2H-4ANEBqMCfya9<{^cb_JM=wvQWY)Xh|W~K(~#7CNvzMKRfhQYL9c! zSMR3VQ$Gf;r$Pa~kn8SGQPy~+p43ik_GMBet&oeIpoz_OET^RYvS^w4>x(Qz2@Usv z&(t0b9I7Jtwu(i`Hypw!*i1}R11X{MC@J+UuuL*iQXN5nv3$89atL1vu?BEOwgC3O z`GG$vf`-snS*EZ$2oCcqu@NW^T8tn}mNZUB2*3!-Mvhzez6i6?UAc^FclzmUdNMVg zo;;yQ#yYOkMRE+UKjZ)`!(0|vKo0KqL;hZ3ULZ~-HX87vsvNFJg#H{*Dq$`;EDdE~ z+|+cnKAxy8WAb<76)8?tKp8nA*msRU;}RuvEtjj6-Z_z=eYKB`3bQ zmaFlWNH)!{ZO&q~X*fwdtBCiepmiSwL{Oahu`gbr(jWvt>zfJ!wyRTNh)vIF+BN0^ z>ywPwR9Tg*t3t5?ws0QX=?41q&2Ro*dhO*`2o9zSBpTfZMD73W5C1m(>}Nj%qiU5$ zv}^Wx9t%eQIo#+uJX4%0VMj-3C>CEcqlnMZ6`8}cQH9`B1x%`dE7QZI$Yby!JB=G< z>I6Z79ld;Aw|rT;`TosFA1h%S9%i4MBY>>8hdorA#_v7kkgul&>Upn_f4aA~FQ5=Y z80uJ8_ngA3Rw0Fk#zxYNai2oWxg#USIziYH9B0$j$q{pBkZ zk9ZTDTiGD4fwadw#Py8p?E2Fu#l0ca@>dA?(%R?+ZMv?f?|h6iSZt&!bl)BgBQ9WWln-*(pi`I(AB2)TaFmHzE_-Xl!Ro3Q=IXBiC0 zYqEQSzS$qHcfL97Tqh(=j?MiFy(Dd z5xsJZzKhSYPqVi9kb8akE5^qkGOifU*>`aPV?q$%8{fsleM0u0US&6B?XWweuiT%l zLw23;m$=}p4f+>-=DKVSvo&%Ke2VkpZ%5%dgs?*686Sxi6lTR7KbZlx0{yKc-+Lo; zR*2#{nOH0?`6u7`PtrGk`w!CG+M~2{I1c04OF#JOU#FkE{qyv6b`e^FB@y@e(~lQn zRzOczVPpiu)#BMTC9>1zxMYA%o#W3`I=!LQ2ywuDFg}khta?DcrDFvM)M)yM0!yKIIlNZ#vVT^SJDiX^|N(Xc@XM%z;Ck> zS$)&_spYv^B2`+XcZdQpdv26UGPv1ip)y%K!!}zFqXN9PE#DjGNW+JD$c-;5W7CC9 z!yH(|%3@s;v-g;g0>T}YXQ4rdT)Ry1qkZT;;yHcTN^`)5YRJM@RYuwo7mt@pS|Y9U z)#nh_O9^3cDw)(&vxt*I71S4ki!3;?fn8|wlLgo2@FtveR=jut|QfVVkE<$ zSxrGai%1qpRIxa-mLA>3qXDm}GgBTOs(oVJ{9)+qNrXxaoGl1%7YT5#!egL3kpmj6 z(B~X#^|F{IB#%CjsKh{ZAj^Z#wxpp^41yS&iqu0=PB6vw1_>nkM>)j`53V6r*B5^PX|A)w6o?X38dWAIFbzlP^s$QDh(0z@u24 zI1|Sh_f#zPWS1dBj9eE+OnrknlDqKl2>#bRjMsyBiC_Hfhe3xfk&r_#hD8A&fz%$Dg;m81;Wxg1nipe9009ptHC41q<{4G1NufFI_Gf< zOST?8E$-5FgU8G9J_*4-FfTvnI1C%IYOLz}C{MzcG-jwAAES7gj$A`P4X_}CZH2T8 z2~*BBEwD8KR)|BHL0+pz!R)!vlj{0v(&t|K402#!nsyw>l?)!g0|C^lOW{@0Yp4gs zGiWTIbU~Pp%E0KzFm+Wr(C*fCEr<}=1FyGhR^n{cD!+oM|JKaUw7iB4w^~HQE|dTA8yv;bs$ZWAG{7D6TbYi;MwR2!|^O0+Sjz zZ3UTlw3aY&@em;_9_41~aWO1x!L?Nosv0!A>6Krldrv3?K}Mgxt`WXZ>BI34zyTsC z6P;b6en1%tgTbI$pRL%~0Jh1(A-eYz;416Hsd3oQpv$qu?~|_VkQ8R9B8)+eS^=ZX~;1b1`= z_d>+XPT+xeBiq+nycunMRaiGzc-&#md!DqC$5j5cV3pt`pXEy|yH<7?<>pdlnz{>y zlqmFy6QDW-BG*!_Qi#^l>=r>+i{S-ub4w`Qqvnm3z8hLqR`yVN3B+gwAC;_w0FH2`Rn=>15oM;~kPK$H{tFS9N{ zxFvcDV8#IiCR1w)($~NKwe;#M zuiDQ4ShMh*lul~|jE|3}ciwsn_w>8z?#BeDlaDu~PWHA0QM}z}KlgbuQ}n>>-Jb+T z<03pju#jzPIh65f@iKr&=~H-!%2GflfLA$3GDM4U7+Brhp2tH}o*q4X1i=Sqh=ryP z>wgVIsf9|360gUP$i)m{ZDu_?JG#=d*Pcb0Gz}9>uG?b>mKl%kZ}IhfAi8$p!*< zXP_|w+#6a?w2?{wnYk(JkcTdwN1x8j{(fTrGU+>ej+TKN) zXz!19=v+J|g;~v_`lnZCZiFUR4J{HOA?7RMHKlbzNUaxhLV_|HApKNYjM0>~e z(bj1n_{6?@w+BdnwO6^3%p(fh0TbGaB*AqMq8jj+?R{8E1|*`D1SIXJk>2a+yMO;5 zQmXu;^kn5H1O#A)o}5Knrm7+qoh_nkw>CG^;xxG>$01~MMDCGauO1V4VL97WsN)cO z)_^$|Goa>Lf`}u8U>*lIXvz_A2UZId?_qSIk;1EFH`u@uu>)qv->W3|URov*Jrd3q z!ArZC{k+~3U1vB$T>+Z+&(>PAal-#sW>HY6NN{Od9Rt z%3!ZaDAah>VeG#N?^%RcgCgx|{n#LFp7{*70L?cy5XFcUNi)2zg=`Haf>^GwrU@pV znYoW^mSpVhrH?Yxh$- z?u9Qr`{mSE-J0&M{!O~I@izeeds5k6AN#^RqRQvI!<=}pi$m?!L+-fTr|bv`suIIt(&(Y)ElICStN@33u&N(9ILFCQu#iRfT{Zn zq;)}>GLn-(0>|t#4K`7eQj-yTxeMTaagn^68}pH-&`MF2Sl1d_u__W&*EP_bUi#|A zbosNLfc1O9+ky{b%wkWqJSxqq5o@A%ecYRetV??r`9-NLRo{Sgie{nCU{H#wS;Vdy zENAJ~3epk@nv7D(`C=Np3JwU3LIrXgNo{*kOH*y?9d3q^t97}LNCAkZ-Q9_l4tT!= z?&c85R`-;Gn9Qk@@0QVEOrSq<1xzXoX?cc}iD-IV;86~QD*&{YUTot|F^x}M2N^;L z#Pp17P2C1-J@df|!6|DnSp+7ePjNK{x(Nw6V*RVpMAu;SGHf*kwQSYt9V~Y3qolY4 zpua+dt_*;2;%vbHtzFX6Iz6LJl984 zy0l$ahA*KlEK6wTVNnes3-m*+!MGS-n+(X{$~0(1iw^61hUvO7B@{%fMAum z*a2QqiJ&aK_k3@orHIXulFpH7Mr@&4i9tod)VXGIYBH@*1VoLh8eCW-YoWxywE+DB z117&RYlvD+31LV|Ed91Wu3^9#n!uxPuCSiFscir^AoQ#msEfHt&ELL9(98i9W#uJm zJmoxJ0&Xte%9fLK;rV`WW}Le({_)uB<>gIC(@Y{K<}4iffQ_q312PsjR_9nSHl*fQ z^Hm)n0hwb{da+L~FGV(0y}&G>qS;xoABvGy-7-y1W21*a$yYUw0h3~AWp|3GA^d(E zj?O-ks8_V6J5S#t`e=c}Tpe|jjpV36h3)QcNbQ}C_#DBY5K-eHN!3W9jez1tk@mYW zpR+(~sJ?|rBF$#VCpH`Tq1dcyAf4r(!+h0+Y7B4=Tu2jbr)nai2-U8x!3=@KTTVr+ zV*vFIU9fn?36Zd6RvqaiZxzInvF#AWvb+LQ;c%GjHS;BBK)M1dlaZ=22&;EFFBTQh zOi;x*JB)}T>1;Vtb)JQUT!c@@oQ}@9KDw3;BuJR40-zDJg30x_mLFjjIn1ZEd7`eC zNnwO(-^~?(Xs}dUM8}wyuDhGEcRX)*lg$MNYn^Hn8~F1N*Z_hx^Q8Bv$0VjOpdT6; zn~eqo1ME52LqP~?khgMpt)8X&ybZG!7-}quC9i{UQyFBR3U3FP1Z95JsH3y9QxNuP z{QE@JFfVnO9J*jwUF}0KEO7!Pdv$-nJUpC^&GZ}kI@NGIxE?~aD=CBkxv<%Bh$T4g z#}1kp8yibse(g0hf!6|i@T+&;On>wJf1mDtIG*Nk!I%>@tF;X?Py^tf8qaTi?dz!I zLl7b`W<)dZ;U8PXRbzsFE@2*dD8ECh9j24g%~H+bG_b%(Z<&jcKWhjW zK!-!3Z^T4kWRUo%hYuc*24V@bpJ|FZDHOmQ;0Ce&yM|Y*)CpicTVqY;<^ofIupbf+ zH=_NuPktz&^Dke_66>bW$Tbib*E0k|KZjVv=NuDn;0oSzKi5P9S#z=O{u%A_hcd3r zJ=%_EWyz2ltPFU)BAyZxL?Z6WJd z{OMDm2d|IU={TGR``~=j^Vp|xAMX_YoEOLBTF`1Xw_d=@zMid@ z?h79n-<;l~g}_(l4$v>}x3(Fp4!v~%&a2PJo^#foLY(dU%I4nz$J>|-#~;cA*SNCM zAtxkyz?a_YXnloNjP#>mV^EN3J9H@;tklp+9>p$%FK9Hf0jXS$($$eKVlI1{zW?U` zK*Zn}=Xr;-u#TVIRBZ=o=v*gNmiibN(Q>2|GBVHt9^x%}hdVeUM028SD3~dD%fywa z_n}oLk1uCqC`6W{m1`E=Ciuo2l?wao1dw6y3!1OmAg*T@npe%30@%-IZeLCJ-s_oD zAm4|k9}(fJMQTqhBPKlQk#lGoJ>3Wel!QHn3uuXZPR4J=Fx>`XxLRQ}GB8wxo>tCYcK8wjok%%|?yvJw&1SRsc7 z##ndV$M8(f>L#UHEjDyv42vP1ynE{|LO1yib^n4t*#kVik9mE0dIziw$|X4|4>0VU z5KN$zQy1%z7R+ttA8R3--W@Q5UCij43&&}Z0tk~I&T~;^ zMC#MYa7XVWFC4P|4UKguP6i-i-9r{}9~=->Yx^2f3|iGfcW~7-qdZn4dy0iD0(m)6 zzT?>%GgLatpodwY7W5doP^;w$GNiP~vn~F~AedV6s~!NssDLK#AdeSO^bUv+51F05 z8YCbeQmi^;t=dPMQtvs81h0MPGq3LuvR+FXn=%9)Z&16`bh}HfXWC=~GfCW>!)I7+ zj69^Tip_|e!V8Med)W-1Ak3lvYWjsxOKijX22qkhRRAQdMdMRI-rZ#&!JuTEYDgR; z!lf6tCrLU_>H>(LOsB261M5MFNQX+SNF>t5bbmsGYJo$4aOJ42CzfayyvNm%*J1!y68a9=MP zHXAWVbWgq>91R3!a}&1!m=u|C`3ai>>y*Mj>J^)U1!Ue-cuQMEr4YTf4<^g-|H38- zjLD7K-QGh<+BT$fgl>pYIXTsKOhSwmUUaxtA7y^)4T$I{T?`EZN4p){v&QcK)I0QIaB(-xcYNTHpoEa4VnqbW3KF|e+@ z8#B){s#%xnD*U<})CQEUW~x^R>}$l=X_XeGJOe`5>Z}7MV-BlP#}fQXz_wNRYIF^R z)s7Spue|zd`pTETMn4+Ut=l)#Pu}<`LjNo&AnKSSFk|5>M3ZLzIm0pUTJZY%`qHyk zp9zM)2eTi4c%R^sah?q!SFWNT5>ypjmWr`*VCbI0LNz9RizL)R;3Oo~oEPy_k=&d! zV*rfqu(tO05M%@^E-fyGnP0QM{?r}z$qI@>L?y$N^cC8_5{i$p3WKX9a#A0d8&(>0sFIg@Z8&u{c?QHnL=*}&OXQX zZNv5F?+_N*7x8>QwquOr?B!o)Lbo5AjN0k85iwq2#fDCVFi&6HYqp!M4PAh+XWoo# z%nG}qIPv-E3`KjoV^pfc^2W@m94eT_4-eBnpLIN&QzV7xvmc(VVbEIcv$~avcBWuD z64%*Id#`hxogLrXiicYU()?trYpFHD5GB2%y>dC+ufH>EHjw|B;^m z)OiGr&Gg_An8=4zEy7Z}g{7?yCYnRAd-8~CkKhkl$rZN^ec8hp(1t~@Z?KCrZi{4s zcmf?X=!Pgqgi!+o6zD7!Zhl6S=_@pwVn=4Z50O-P%l2MoSP`^;2yUdC%1P&_wzH0L z!aT@Ub`QHDe$^~HE2bZBj&nD zmWqjmx3KQ-fZaKl@-;oUfJrBGyn*616a5cb(L^dc3{M>`Jpt_3#lJ}qm{E!<d3Uf-=MW5=+PdH?l%>o@glU;p z_=~e2G>x;!nibhQB`S0t5M{ngO2AzJ@mP5P&V!jESPvi&9xmuTZ79bW99Km&@g_p( z9B%bva;clp{>Z=%_|z8Qe&Yfv;IHy1D|og5S!g4lr*vfp{^(!IGZz1GKh(g-G=RBr zOg&YBpnUbkOQ8TyvOW`cSFz?2VZ4YCNm?P*38ssr@<%`xD8_-8qewBKW}-VUU#1!r z)w~p5jco3s07omSXjW%YgoyOj(AiGz@NIHXS{>AqzzszATXpYSz8^U^iouk? z5d?0}fwG#pNi_miQ7Jc1P^@qiS%Xyt%W-{e6Y!}q*Lf~(Wb@=+79X@ruF{QJ&IbW9 zDj0~)q{Bi^B9*4+ZhL_}ir1!RpdT1U6VC>hjGVI1o>s2*NQw<=qELojCS_6S%F^5t zsWjgHa;lyo9WHByOIudyZ=H<=tcTcA31X3mWfn(SDqq(I zF^6E7CWcB_6%a7$Tf$_BJVtvqKVB`RgY%7@5NOgDp=oa7#@=B=c<^+UmX*zM-9DC@ zCs-q73JMRLS{77Gk%Etkf)rE&-`vv7hHt=1-V7#$P;N}?>uUrOK=e+IjSj_)hQj5> zJ!C#L&5C0xvY@3sHkcM{($`q6Ov0@@`r75|>6ziHxW6ei3C>Hvc|25(KP4YavRZ)-EH;n!uRJw5$j; zJTM%@UyE9`H}NuRxQQPvJ#I`P0U@Zyx6)+VS8$b*&>Mdc#rWJm{a<#C(zR zOL**y-421(?)wOaB9pWoe{&%{i@*57zhs{L_A$}r(++#T>%o2YCEDgAU$!5EjPE^e zUd(mes-W#pW>M#H2vITs`xlB$^n_bGbR&Z#;}EI5OE7?E{5M zs`Etl!K_cHaI{4v+@o9Dq}q5C*4LgPlwgSU>?3mKt(Y4b_c>?m!JQ{0>YhP4D5lY| zu~1~Bffix8aAgGH2;)!-w2&+hy0Z0l@>dp6=j5v>d$qWD6j#o&}8vGTkk# zVk#Q3w(OsnZ0C#lW^z~$){^TK+N8h2G@NT`n|MmtbqLQ)au{aB+U|=(WDdm|Dm1tz zTRXry_J9!p`>7Hhgm>1pJYZZCQ}@!$=2SYwXs1#p6=-WX7el0xo;-v%^Dz z%n%+{5ZZeC`pLuHm45y+3dU?hXURoaI9^Xb`SFj^+yp5~YOfI7j*T7m8qIm*+Rws|cOsT`a21(T{Y5kVV@XlZ0S+rbHqb z6tYevtuPwTx56mhkNs>ks*~zC@LLf)!1`baTqOGU7=U~$T^62*8WfGj z-l}xz+E@g~Jif!eKsYxbVr_0abzOj`0E<~ASZ5Q3AW!6MJxTa0n+m{?u$4d*iajLo=9qBbOm^^;|l&7 z5Zq6+N-o)T(qYmNuQgHw6;?4wA)JW6mcG>!VV|XAI;PUw=dkG_Bh?}AJpbE+uNSc{ z@8SyC0PEO-wV?#eL#xQnY8?si5kwI@j}Q0_6yks%Tdc+#V57_{la5y5;7|%AgjRak z0Qc$U%2jkR(M?Q9CkFs`Z%G_k4--ywRvAJMC59Ua^{zu>twA_5F@F=pTU_fg6Dl~o zM0vw%B6W?ZlKCt}vVUvHmme>_J&wn~NyeiN}x9^b9%Z)@OJYT64AT z&N&#z9M=L3!iqtPFcux0orDs?oh6TkVbhKa7Ww9QrcoeT;7xB~x(lmwi5=7GuTUS> zKwOzweoi)|OhUnhg;#<)a4b*}b75Y+e1#H~Ptp^Ja6C`q0wr>@D9;|DFmc>DytwW- z*jr%8w!n&=;2ssSRDqHpxP2FFn;U;&aSjN}V!D6-X8P!(n_=OTlNteQ07Z^M0j-4p zH;^XjNQTBn?SgVrXTeq32U}PtEH*%yUs73|@RqBB&6u%k>A$RWZfcFYl`Zr|WuY#Zp_pc>ZEUC)Yvrdpf!Z4(JY?r_4*` zvWoT9yP)8p;H4XojYDHHWnYR2{_>r34cYM;f5b)v~ zKFx%GnPfbZ-@g1?;V@j-{PvlV9*C9{h{D=x`}85M<#(nG&?lZPu~D#z`)Dt-erMN4 zXZS2U6WkYLvMu|}I|W}yYM&I8Z7+I7d(OKr39K)l{hP0BjHmO&cOk5Ezirx1W(~(8 z%RkS;Coeh?ZP}d9<9DRlVq&w__&;;3lIT9+>bTWDD@Ysk5D~b3%4hb^Hngm2F^)*o zOq%CI|5NbeIW*-S(Uauo&X)>@?S=)OwaZ?9=`|TWJ5zp~bMMP`h;w+_jBR_P58x*R zj?+G$&6(e4W9B|Bz;xJdM8^>{{L_B;QsHnt_&-0KyXX%0&|d}rs(P3c`(l$)s1H6O z>%qDZw;boUoTxPD)pi?}|EDlFg;(e@3DZ@Knt{ie-bVqVs9^>Ayo8WfPbP;R7~18_ zLnz#)A0rK2OJ^GFA4$(X^KAO^E3c;eMBH0&Kx2t><(!-2I%uSOg}tLEwyg{06eCjs zIKnbH9fd|{oKfgvu)$LT>I0K?UJYK_1pX)_$SPsSSU|}q(5m}7TFU?ppB@sNf40^7TTn5bJ7$)z0>%nmz^a!YUsH~_wXh5jJxMo@y zjW%W&XvMm^vNexULCd*oaFSYcqtN**r?gf^*CR&)|8*jVSLS#w-cd6(+`N68=xGYa zkj-HO`0dlV#|Y4~@LEcqc3g*kf}3>|j!{p1j7WY7&!sQYmN0H{{yfX_FZU^wAhq-# zr1MuzijK$ju$M|pLov4E$z)pHoMsN8-^>%O#X7c#!YbvT2eHh56L`@8dD)wg zi%8$aSut})9|(oj!b0rH3-#@k7(%9`l=epiAr?y*a-nSvS1Pqc=@kU@wel^7ll7LXs76))EziT#BJDj!O- zH3(>|G=LUSgiQ&h=0bqaK=-LC}m87Rlk-$n(5Uk4*y}of@D&n|P5* zRvzP-`)pQIU>PSOZB&^s_Ni2k@#HhVH}<+hr+h-;ehrsw1NDZ>ij;4hrnK&&#R6DR zh(aCK>SHYThdW@>5P+>pAk#Y{!qqZ}78+>4i%`o%>b|V0=s=*=lFjI#n#XAOg@OlD z5fJMzC3(#idIH3x3Sy_r!({v7-j6v@rCkV{DIi#Q?8p#S78k*Hd$Bw{yiNV*xwcs7 z)n&X2Kv4P#NU^$v0WS)KCia)m7YKNeTHIkX;x0ZWn)vWw zi)(S8U~O67z*7LxUZPsd8r5kQNj*_yQ|Sivkm)9@CD)eo=1YZWm%NgPcsh0=xK>@W zQiSeiJpj6~6*g;$VPFl=VmBdj1g8-dt+i$kZSjETtDu++x=3`X*nNXu@QTuRe-tQ< zt_HFN!!S^$7ChQsX@&|ekJ)6BSO_hOlfSh*<=CJ{rIJr1^sp z61|IXq!r!h*nKwf(kxaMh^LUF67ERzaCulvaj*fCH7Do}pP3&}E7t1r9PtFq`RXDY z6?Ynp!vnKV>JU$}R(&mZ!fV?EoLw^&)@o5$>KZwW z;p*QdX*4A&pU$BjOSIV+(Vm1@Cah&kr2c*@&OZ54fYS2mwa#IT zFTVNpo`^a1JKN-1x4O?e8@mhPZlJMvjw_=uY^t{sQTt3ddsOC9pSB1ttfU}}LdX0J z#+4F@&GF#f`{<6n4GWnNN*S>GVj00y%zi^#b^5}~p9Uhbk@|-^(kowlo}fVUf1ae_ z-u^VwH^4n53=?5!!hgb&nK16wQnC{Zb0)i9)pFj|8J93^vGYgjx<7tP@wTzW0#jpffLCyF}`?QF41O16%4%yGNumVU679EkGB{y=dIn z7ru0ZN@g=CI@9nh42u|s2tmxz3IWiQ6O-vzZ~ZdeAvR!XdLE$%o(-JL zb9jW6b%_AX$L)XdGd%i@zYv2z*(Vc9F;t$pOEecg_L2(x)uqv z9=X!VUT#ah{Z+&fXl-WCv6qTeq-!B&at&|8F3*>4%A;1NfeLYM`!04Ms~r<-*pmMA zKmA9kv#TTBefU1~3MrB~C{QS22?){(&wWB{!ra^{LT4@wJ=31rhpN)R<>oYWwKKJy zLzu*Bzo{Vncs70i`+t{y`h%aRpZ(y?^z*mgz`}ov{^AYbIbz?ND9OBsWmE(03UfGq zcOl)sJCSbQA5W{uBxYGr5j6Iq8YQO&d9;tq!KJg)|;DHB<y%Rr-oR(Tl{bSY1wD z7}d#GfBDe~^6&mS9#@o(R$?g{dy&hCkzJfwU=P4=J)ewcg#ht3AYc%2l_wxXgz4#Sc$cK?4Ugw z39o?d0(a+!Dcud85RBIrm83wD0LdT)3WTPD$*92^pGdKDgjRiGYypGP`fV=C4O}66 zxNOv{wOAjA7H%FX7#uFO;}Vs;P#TU7sbGcXy^ZjCgq25S;snd}+HwsTs4fl}LiS-J zcqueRHd{~oATBx=9C!~4a4fMRmu6I&fk6*-zLXj$%ezj0&Aqm~wgf(H1NRRa?8Xv@ zO=5!ph>%r{Kzm4k6iO5lOuDKNE}TUIRte(};|i@q5y%!93s2b18Qp21L-b|Fqo zDC0zEB~O|QZ*WEkM+hWUIjhQuDfBlv%cvlcBQ;GGDUQl1ykO$x%8EVavkJ?Q7H=R= zWOk4biOlLyuEZtK*5SC5cujWz}M_WsUjAjJ;?m;5@ z2f#nFt~)bqZg_ni`A+KzfRQO4((fErm4OT0z}Lj$l3fYw6i=L%0Uqpl9D3kcih_48m$%)CwMKn_k)SQsM;EY`;0|1?d6Z`bV*8~~8fkMl7pCJG_RBgs(#=gfc3ShEPhMf|1il=OF}27+b_0a35Xc#g{zS@6LYWQ{KnM@=XYyaar`| ztT)kK^egLwm->ol#&7=id;Sw5l)+cBr^f0zIeeKZD47yKSAOGVAF}>wX?4EO6c9f? zn-Ax~zWCfve*4(dw&J(nee!`%&Dsv@n{pa2;GT z6)~P3eRj@2nL{UuHg#X0DF}SvwvB7-tIRmYPaDpMt!SlkKBO&Lkq&x;?%WY@CpIcMC;^I{E7=P;W%=fjclkNxRST0OMHB}He6c?ldq`3goeIcgqd!U{YR^419^EZDARz;-8{L|(|7}*sV!Iy&E+ip z-XSK+mvEea@W;QKp1snW{)pe-`n}(!{@`L-J9z6!kxoL)@>JzY0{ zG%%SA!M&QDuV@OT?~r_;3+dkd2LxIZa0#8z`p7PU9%a}dAfN**XD&nAsc`)P!SMPQLPaSq17N% z9uNw&@pbNZ&jtU3b_0h3M=dUUdWN#8^l9|jfe6Tyr<;Clk+Q9;4!rF@2YZtpgS?_qPX$={Y8z#znv6}0Snuyo%#onvAierk zWBb0C-o5p95T&}-nxHL*$PPIYoHQc%SE}TaC;h;&AZt|A13N@fSb~RFafX{IG%_A(dcW(12fHyT*IKJ)rjqkN)Bku;@=>q1zEHnJ@hKeB^eHBca_Hgj@QF(VFUH760I$KlAzkozS8m0ktfm z7GVQ;4}o%UJlO1vhJ|o)c5$hStLyI=hOkl7dTlne&_*<;4qSB#GFmrf7Kde6)`9(q zr?afC)*&lRtRrmiAtYO5Kr8Su49}8m>+6=(gTU!J!7H#sy>x4G>()C&@INY09~v!b zbG?~Et923PslcmZQr*h}w?Np9Ex<|w;V|g-h_-gO@MN5v$5Ne7?|t||`sJIqIee6% zoVM;b<&kIiup|%@F;C`)O}q~X@(P4eb%M#)DianO1YQ*a{~f}-iU*Wr({0Fx5l$l( z{RsF`AK4Ftc&x2WLL{~!c3OS4HiP#f(3ZatcIk5zHXuU)LO&2dw6`2VJ6NCd9P|#X z2~^^!Z^Ql9Oh%z0#!CRvwM!(bZ8@I~Dwk4Oew~AY^{5UnLODVQii2xop4UoUIw|yAKMD+{usG4*1fPGlSIFQ59rg;c21+im@w8BvxR)e-ytC-beSy~M| zshe9*$N_}%2#chigDsNpird_!(%c40**==}Ch6BU$)I2gu0x3T309gitOurQJ;YmN zEQWHnDFfNY96rWLdsfF2W)rjuDouCxQNjz{;La}j1y)XNCbyLytp7aC?L4G? z&etAE9EdH`{qHlJ3FlL&qlO4G_SomhF2?vxvaVNq+e|^^x*iw-qmHl?@*~r0OqC2< zdhYmyd3j#5bL+W8c*d9zp4^yYy*N;A&YMcTC(eH{7p|jgLqm?$X>o4`WR5-w6O5H} zzPV1}q!uf;f}CDP8MGMkm`l!^inGQPJ>Ob?At51nWOL2AN06}X;BN&oyulkC!wGY% zcS2A0%KCJ8SzDXh5!m&>7t;b5Xp<9&E1$!9x->=H01BB3s|vv4>f_)W3YuA!GnES9 z5D@kP9*Ofq-Rb=Ke7ZUJ@6xT=pQK0g@200~4+ucsV)G*)6T7vv`6NxueVk@iKPJ^A zDLx+(z)O`(t1rd;As|*EfI4RR%>Kb1S$S447U3;Haa^53u*TwRz^pJj1(tg9b{i0D z;&_n(XXn(d+emDe`{433&!lTuhMj!*!Zw+4s_^FSA1$#jk4c_ClsbUHC~xdy$yZ3! ziYtv!c=DVD&Cm_Xe$;Tc4BTiFV_^=C_%c@G+Vrt|o$?$I^fNz3<`WdoeBT z-ARoD2>!^LLR>umE90cgCBL>XZskR@H%Xb~&n1kcZ9S$XETsqU&8G)9A4A(tjeP7E zX`XPMa+GxyXARXkya!=~qaDY><+0X9&>9Q}?@I#)V1nppSMeCEMZmh4dZD|Pf(Asy zIjsd@P{kQQ-b21I)+cy1XE!_Z$cxB=JsqPU5}+Z-vN;lmI9Fy+JA`IQ<4u>UmtHQV zf+Xz_zTAcpwy_sjkwutL05)ck7nMh7(|w}nVx1i02cRp=xvtDCOmT;}gQ!%9k~lOr z2;u|T2ZN>OrILN&{;Guj?Jc5QNUzvO!uMOKpe8Csn54-MD*mJYdhGSGip(4!Fp0l88;d@z&b8J$`QO!tu~HeW?9}%f@_b2op_*IAZTQXfR>%+0~Uj1h8_s> z27)}on1K>%FS-vsXaPzIa`4XJ`sw^dk~YHCqF*yKjd>k_T50Y{GD3AzS_0mhL!)13#mDPf2eY7Z6=uoStrPN z1%a21it-{9#$eW+T{iyG1Nu+;QVzdZ*?{_hjnsk`44RQuk;TVo`x+E4qq&8kRBQbQ zzgS^4i5Bu$lR^wWm7Y8Iwba=>kY<)YN}DGiLwwe-qEJnXnbZO#W2(lApipRE6%b5& zcOdiyiCRGwIu`^~+-wM_%uhM)$gci2@(lNpnWHT%5M?>y1kk{1@lcR-4-YApBu)@y zl0^pp=2O8+ma`!$j+?;x4PsSjO%2t?#uc7gO?zhrwpZgNY z`McA{4h?Ni`tN(cT6&flz$UFeo zVk!-h!Iq|$ATCxvlJJW|lsP+2e|AwKRL%op#yPTRN5p^FGJ8~^ zKtig)z}^cll9@ObGZ00V584gUC5Vz1Ui!?B{r8zpioYd3Hl)xevx!%Ho~=z-p?J?W zi+PB9PVe>F?7c#f?Ph&aVaPt$SnMMdcfLP6PZC+z z;5T2{^*j&ZOb>$3&V+Xvj|?CaG`%OTg~qWnXrIaDHa$Fgib8RMkUB3~jKiwrJzf<4 zef!8e9+9mNfAg$zCPe|uDRT;0UVeOIS8UAZIXYgfhg0L&Kbd3pr`Lrt;r+2NUc^t_ z;wDf~aUK;&r2ssNud_AuDtZQOIJ+kV281ogZVJX7evHe~xI}Nm3f)Tb0n3e>!9!1- z-U6#EUAo+#YWh(Sau3t&{>N!z^*(z!6G+YJz6C5}9}68lVGMXKaY1*sbYTs{63yfo zlvhRIrSlpooJklVHhL5|Loa1pw}6S5CbERVLfoy4OSt7eiiCl>QRoEY1yckq2a}6& zL3(1`PFS2(Mj5z**4aK{;7$UsSgl~zF;C7Rm%C0{0weV?lP9fWB@hyz+gRD1H&<6n zrOLg$%Ow=Qo9!z)!{i&sAmq$clR3P(u7}rQ7lq%*V>$2ANakgKpPCHdr^}%KS1y64 zZ|y~JLui8rn)}_n`+6OPLbf(_AmHENUdB&J+vU|6h6t3T4DU2>3dd=R&8^KnEU;#A zgU3_xa$u+xtoeQztB%T)3k;COq6shELy;*`X;gk{VvEUfDn1dgXdrk48C7tkGY_6a zpsOTSxG*}Bu0MB~%s)@DhB|4L5#;-=@BSf4D6slY&mhEu9)L%h&Cl2fm5J`&j)3?n z*b7Cy^C3n)6ifxF6O=@QmB+xWE3nSaPQ%Ym(!f|d+4}0DxQ@b*8S8{{86Th$78A?* zY$Ky=!}C2;?k&%V=jI4uy=$~L4WV34J$Xu6PYNEPAT=~2VCk*E_;f)r9AwwaoX;5H4M8K-0VfRq2RM{1D0?3Yz2RgT<4Dz1mS1piWL zuMv5})UpN*gP)_l1I~dNxJ;W#Ibh&H#Mna&lKMO#b5q2px+1S;lw@eMX4ZGF!njml}^})6QDYgDGWoVMu|$! zzPwtR#k9rzx&QSjZICK+cbU|q^ORa9`&V;EL%MhKUix4Em;XKe?Co3dU1%ya$XFPu zR0WXo9D8;f7@eh+#o2qjq^VVWUf+UZ1kAU9r%?m1LalV!X*83dYn3CTUo%Qu1zsns z#U6Mr_%R6EF_;*JPBO>HSj0rAbomesH~VJHB!Ie^_*lJ^8z_{P3T{JbDbJytf^twX z2`?#hk!guDD4&;ZZ{uC5Kv~g>&r_iVA_kBniB*tZdiLuy0yrr4zVce<0PEOEs$2UH zG;4PWxsJJO0j(jgP)@9^u0>fhu1Pe5-T;*~m7dH)wM7=Q=GLavM0w&p0;i2*vf5}J zRmBW|-dj_S!yD#&@SXE1-Eo~N{_rm@zpe|^V8+pbcCdjRDNiogNM(iPb^nO0Fl5@P zB_(HFQ!#b+f=_G#VzYv!1xvTklBRAf6Sb6-^pG&wmRH>*XlLTd6s1xrsIZ`QBRiDk zq*KbFG3pkKC~7U_EVwS&E`m9MVIfFat9^yVl8GY3(yl^@{Z_y^I#EJ|5V8RjsH)Ml z+L(Ym+D<)zH75jRn0@FFPeif6pb^s7nS^~T8oGi_rPVqq*2G3_;ngX&x zt9jZmb(Mjn7J<=a& zDwc7mJ30xpL3osjFz;*x4~T-S*@MVB#Ky=~P=b)L5~hu?LS-zk<3Nc)l`xq7rk0$< z_ov@TONE__T-Wq)y_UW}hzme|2s{!7qm|2)Q`SDH&y7+M zZ4jL3FbWXL74xa!qV}JquiM8Xqs4)P4^b$AyY^_N`CIuj>&7|zqse@*6e%dEE7oFoWFTrCJZyLoX<>1&03Sl_?==;%uV#l zrn5eWHX#9(cAWOXJAd;P&*1|x^7ie_+aV!Ucye6!{FA<&{+%%|`|R0(JFt$9*&k_& zzim6N;CI0N=vT<+tfQZ+WJzef*5^t*71*OAv@2nip}S85^1)+d*ane#o%1QAKQ{SxbBd+V5TaS3gkY@x#9-CvF|Vr|GAGetnjhp458jDXfCdIW_&I` zF-H1FC;T$@1l`IQxu0<@%`tAT<+o!z?W6BAg)Tb6wb{#&*?(W00Qv)DW*vqD=HE|L zQgxM^?|mBvrG?IY8^Q1F)10wnoW=I8W|(3{2!?H7Z*zKbJFqwstF?)T*mWBn86_iF zOPVL3e|^r(XG}8v9vQrxw$>DE^}zOlFW*E^+yQdf5>*JdiAT}{Th4kMVVJV=;?f+r zZ5aC@Fw-g+sfvQau%0)^Do@he_WzLe9zd2}*L~;h+^=)aGu=Ho05gL`WFiwJM2Msa z28e_tE76iyRg@)J!D|^+-c)7Ut6F=#TP}N7+SO{;ilj_YB1IA)7yx3BW`IdOGu_iU z=bZQVJ6|_J@4i8!U%&T-``vr)iT`uX997Vq7b7IDrQ>%kEs)?^zK`eUQ>G>i1!WUs zV|($EpQxxaOK1wxzmP_M114rSP5myj9ah=rS&I4l&-mj=G`DecM2a@E3B z{M)mjHE`%_$SH2X-0L{di8h4W0jN_271(mLhG&^d3#pQWhj1GUL#Ke-!Kg(0L6Ui! z;MtXJDE(X)UR&SVP9(UNs(Dlhr09iZl1qVxl+oDe^5rY(`g=n-edv7xB(tWz3`bHq z`B~&^)vO~!;I}v|Hh0h)P^#C-MG!8Qw?h>ip4~h#p$zgZi$@p@X{c)emQ|LHbRH)k zkX#BNN>()*=i?u|kL00VcEc9bEP6be!b-y^*v8PbZjb;ny3QsPw_=~^teEPZ1 zKb>BF`x}gf^T;79WnRiCV_uFCutZJ-^+n=(!j-6R0WO>95-^pH!}P#FNfU}IRLEyn z_Ul;4_&P=(25aBhCmG)(`3g1idWqAeZj zI!UpR+mMMgsv4b6zxyx#oAjY4KA2v3{-@Je|75D^pwc7fZj$sojkkMc0$D(`X$Sf4 zUj6R3zMtND?E=vUkOM?tbOs8+UMG)lhiX056)mhECTD3Y@}J~Z9I<(f-8%OgF}|ET zohHsnF>5|K!h8_%t72|8fi(}^97?x(sem;@InbeH45(QQ1&V+W%`iG+Eb1JHAL-sP!` z)4b3L`XAJ^lw(lQunU9QszlP?8U||Qcri!v4*6#(JfM(7?`ql+t2b|upSegA7Nth z`Ym{zdS1cvzjgXW@on;^W;`?E+?fu-M|nX+dYS5@oh~)zTNqTExMr~MgoHMhs0GVJ z*OH_n4&RcLViIS;9*z-SnE(R{FM=~!jtV8$zSSfFS2l6KGN4AlEQ<1Ma)4sQ%eK9Z zEhzV7EnBuf7%!?Bxw^3l^+qdpcJNm0f@QQSgi!(&fE<|G#zx9Z8iHoCFH@3gVIIYI zoseGv)rJr_)mRr*c=5ARVr)c0DOQm&lixCTjm;P!)S1;&Vm;vk7Ei2p0b?vQ6vMSk znZW=0m%l_akAvyG8?U9W{N0y%UIxe*^vV1GVBIo)n3WApEO%+$;e5ER1Y`1x49~j8 zq1=FP(_&j&f_U+sg@UJ*4%9|3KH?@e{=gWaRg#G_hM_W}z-A`~IqBs_u>sq+wgAIe z5buW}GllkzXU0fHB5YhpC5fiW6C^2}Lzv-#LU1pWn~`PHBQPN{mJkBuSvFxz;Oz3K z(VI4^K*fO5+dLQszzzw26+~e44!OY%t19fp&CZhCw?tK++4R@}VRY#2_uEjq5VZMJSg@4g?YBLM zg5SkABwUS;he7E$t@Eq*p$?(9$fli|#!!SINM{NJujjRV4mYb+O7!N*RQ0NgQ}%cn zfdXHMN+gVJ{UhJ z%Gs6U5D#^R|G%&Bf`wJFaE%2VZ(s8yo5)XKEhLypgW) z333L8Mp4elo5JZNFA{RljFUz_bo2Tsm|2Ye*pJXO-nij`Nyc29uJyn}W@d?Gx8Pg= zT0&kxv3mfx-`q5Nd4^KgD1eg_)4^j+h}1jgQ&qC(tutSVcMgUa5jr(5_iIFSFzhYD z5bMVRMd?ZuC+FRw6U8{36skr-@_`S!?(Rvr4H0K**Mo-^vSh5V`~EnPg^)lR1!2MS7^Mn1fQCjrBg6?g;g3QL8Y-a zHB}_%qGafhv-EH&6&&;^MkLsrD4xs6djq9Ib#xuO=SX_~XFiv1KXWE^bam06hvt|a zTdAz=-Bf*GG)=;9My{CT46gxZa^>w&&Z80>1sU>*8e5uC=itFZMXBp3IO-#Gkv@Q8 z2duRaWpa6n^5Im4ni-wP0cR;}cqROJ1&5b<#I<*CrG64R*Mv9m>{AnSRDimQLyn>p zIOcSYSgT!_X(abcgRFt{Xdw?Ml^-dV8GwxQ;in$Eh>>K%Z$31-3^HLEB*RXDb@of> zrdt{bnHo}fDkPpA|i=R#BU;Uf(Ln>1pK8ypW9OHxNMh!}w zim1^C`wxhe2uBFNltE)73u&NdGCa5y=s7xXl;2@sKvO!fRDgwUaeHJb*mm>~6KZ!E zD`|WDLVlE_h6cI{gR00wKN%gGCf{ZRhu=avbnJBc$N%Ju=}6mw^xMDt zo0K?RNS&u~>~@q;sSDWmB+e%K2+a)9Xo`G46;t(ti1xFG=SQd-gM+GyYE=1DZBh=o zAi_|ddGKdaZC*?I(y#r;bmL7D=IQKRo@?N{%pp~RQLtkUDKt_7-OdrMSYN?7#B*QQ zUYXjCcSJ=V<(x1Yc1q|QCRp3?c#~sma0HZ78dwOE1xB`YyE8{yR#gIeRSu15z^su! z@T=$FdNQR93$Q?y#j(jz3>Wwq`j&Dm94^p+j<9g%z%)7zG^eMYdL&JZj^fd04`Tor z8Pn-0FZ4&p!{Y2>YNT_sMwy>2Poa;I|Jl-29md4O0M;GrtnTLX$RDJ=3d(^m<8+v( z>d;s(1_V{9%Hb^=(68632O1^x&8UIoDu3ODw#w>Br0$?PB>Z81n0-BpvCbhI?9*wW zY4w$LF*s%-lSw|8h#2m=Mt;)@duxk*U=-A`xbNj7>SQ_Stq7xLo6^FY7=Zv5LVxDz zvCHWmjG>+W(To4~pBKFFPw#qhfl>l%TwU#FjZx0Anv3pSOeSNdToy_#N(b9p=%Q7P z%aJlK6wh5*L7S`PAYX(3p))ffc$UU>MG%N@tdV(9X40K%mVdLY_+|r#l*nrgtuVFa1A%`6tW+l+UxtMbK^H$M~qOFEiw-0Aw)(xa282 z;VQ(lq6wgZLI_Yr<^r?{fdzTRrs#IxCPR8_jWqOCtcG=KWnt;86W1bp+hhdGDe2GM*5+~lk8mIAyXLJ@^Od@++Z(v3_-U@gJzv94ENM#ZO()0@J4iX6*)&FW)->+; zRSGXlLGnN?1{)e!R5}N6zkr{T5`JS<6$92Wt8Qv)0_vvDD&dfYxdGf+BUl0bWPuH& z@u{0>aI81=4P8zHH!f1k>>^1aqiKV?aQjjKfZK#067DHAgN{Q7$nI{!tu~Q{*@ed9GcF;=gkGV;aMK{kv`+G% z2{i!S5q>TwZEZe`c2ARlC0b5SGgpkm@P%~|=fov&tSX@zKqD4oDTx~8lqJypX9*!` zNogIfO0;;Tg`i34+~@Ld_e0^xW)dCGEbZ z`u;iYW3I8U?ec|Mwj7doKRQ19S{#OVVI6qmeSC%Ml>0j`YG~Pq{-4j_bo>{``)t1W zGmd><%gsLB*ZBRN?FHQ~x=*=AJRiJ^Idcs0iqH10ksLEyqynG?QA|@j6<4K2hd+&a4ri*QxRa%VH0x>1WU6I)&!XTF2bPx_J=u5t^m5 z=G<_V*c;x)>t}AGm^J1u3+FiR*)zvEoFC8c^Ty?TR&LKRd=cM8V*#!nZgSecI=;1S zN>JePmcHcG(tZ(1F-mG{UHZ5CMmKd<0u8N|=eJ(3*>csm;7Y(unlv^vFpWpyR+>i3 z&y|DMpw;UdRUzj2ZXT&U4nEVVsKnJ=K^)vB0t*0tlo!rzSYM1qx3X)zL8{>bc*Au-+7;BsmKRCCYq(;hy&b(9>~%8>b?wF80b6kD z?1@OOu#(3r+F3NW!PTslX`Ss#Dp0t-(wW+(2~~xhj|UYI0dwLROVQYD>Y!&pFF_v3 z9b@A+)1f<$q-Q_&6o+sv_@eW;woLc7k-xxGhHOo_bn(JfCW#U-WRUiZUP@zgly(~* zCR2EYE=OhQ>bs+9`UW1p>8ve%1sd-0<0sRhLrshgO&N<~Y(SaL{2@1s@#<&|7oqh^ zJ0^cDPl>6-Ge|Av=GN9oYLnnBUl(nY1^5#J5r1P+mfkU~9dWUS&UJ^{D%Jg`=OjX+ z%zb$^-XHduGBZ(wQ>#Gim^H}joAFkbV)>vk3AiVU6ze9PyGJX*F=-*p0|)5sxy%|Z zry;#wXy00gu2}(^x>k;s#sY3#Ysj*>`R!)Wy|1o=b;9%YU%-+gY+nWTvn5`iaETfc^qT03T84`{pKGrxC-G8R{!SQ%VIg8JXqw0^I1Z}nyE7B$s?kCqQuw&p~iU=uGHD@&Epc1p!rKnWr|<#?RuNGdVI zTOOh&@4mN6%8^5d;6-5e(CppDv5}!i^@UCGca*8z!ICPbESGgx#S>bO*)V|E%W9HW zN~pga;*oU@FB~#|p8fy@pZ}+Kyf{4t2y(g>Wu=I?>n=E+9o)E+5U$KgW(46vELCLm z5J;t*&J3U|TKlc!;#&Mqu)Z09b)===cKBY#1=tZcirur;AdZ53 zK--)juH`~9f=d_3jCvzo8G030IUxw#LDu35O(Up?@naI2f*&;}i6L#XI5g8>KFHR9 zky~O(7oR5YCgBgqqVbRqpiFV%@QOob`#Thm7>He^AZW0tf+wu z65JnzXmqI?e%M}z0g~u&?$q66=XAgzsqvZ1vaGIO>m_!@#ImVW(PWq^l8(1fz<+EA z1eu?~xHM++V7QC8?dN6&;b#KVreKO>^I&w9fb+7vk;s~{iE%a+zuf-#dDNc zz|cea-^Ro@PA`Gv7Z|rjVFtLmOX5nt88fuT%ZE5Bkc73eC4D(IF+swO_{-<1dhjZQNiBcXtMm>Dm~&#oWVtJS69JlSE)>~8BDdW@lg zP+s_6I`zGZkEJNqn4=BWPDM+FR^`%Xx9nHA1epW()4t;G!>RxVli?3bB0zfa+?kuq z`3QoIXOF$e@zQ6U=Y*+EaZSe{(+Pz#n?!JP9P4)+;pd@j@0}Zp=HBNHK_x~;$t3c_ z%W%Wqwc~GY6JzsWer${oH{?U(#@=WDrJ@nW1|RqwFscqlrgF#uu<-DCf`V;r2b~+*5>K#iX*R!yR zl_?6@hlag0AiUoKNO=pktqVq^Q`-wyW#(TKyFtQ8m5DKL-W7W?Yp_z%6 z8JZ)Z&1~c2M>}z#RHw1&Nk*^mCe^IIH5veHuvSix8G|vW{3$Qjz{v1MX0sy85a?Oi zDf|rL7@cSSFZM`!Q)xBpSAlP0g6`szXHTW>BS%tmdppi4csD$0WNa8PI^)4`nVOtO z{!!+w1Bbgd^Yw)-R)qj_50t}^e) z2C6h55ox(zhvN;+0qlehyj_$U9fR{I*z&o~L)}plPK8k?k?U=^z!Fm%;8jdUa6d|O z@;>QQOo)2PE{cxwbQ$bW1$Z3yjE!?pxlrs-Vo^4$aee2|ZCFOCh0Gv#4AHVCtXmGp zr~EkhE%%r}`Fs4S`ze{!ng%F;B;Bkbr`;d5=s6XXXBi#B@k85ItAZ2(3s9bH;=~ld zp2s?G6Hc1K5W01dk~!mxU`LxdCetw@REv_f!JaUrb?9(S>ONdc*S8A77j)2s#%!If z@+Wr3{fF%nMpSoqclxQ1e>T1J+JB<_&eu^uQDVVs=UP_N-S?hEFX2F3(z@xbQ@TR^ zZuhRClZ26xz|8GnAQ^V4B6Icj`|paRzYW4n8jx_jG}})C4a&TnL;b6%Rh$#}Cgjz~ zjR{=f(-EfV0yq86_by{J(Wn~CUjc^D61wFq)mWCtST|h#W#HA6OEoI`7&=ksoDC$l zWoa6jh6B)WkaaMXpXz$zfAbitbv)3vit*;H>DcDL6Kl(_z>+yTj^-EVs9$5_a%`lVvc?E^vYAwY)pGNp7HCyFJ zd8}p8Hf6_1NKlR|7u-=HL*N5;u3H5Y30q`sa%t!)C7RlSevqlW4%EX^RO`^ZyI&>T z`uFn1hC}FF@XR@JT2wJn2Mi|5HP;CXsr=jVFWNOLheCGj^ImxlG=)5?BFtD$h{w>G z&IrSv%5}TuiTN+aa4f`uVlxWG>I!_J;OSpJ@uDr=b^uAbQ)J`bXcHo|M$+O8gQpJY z?kXTBtjT3QTc)_Z8HX-NZjvL?iLo9mbjG8S%0cBjo)mi2$>aQEW_x?3OuV8!FB)b30nCL7);%4xnOr#LxKK zb;HZP!-Y0s+J|A1XUT@r^)Wd&%-Bq8A4}K!Ur)DgP>6SGiO(?ek^L^%q1r>~r|`hE=L(%lL8; z%B%|37EH>nTzXlxbc@LTEG8n72gq^X67epU@qFHYv3}T5? z#$|NY%{0@F=}LEWJ)WxayHL*OAuE&(7IqoHonXIG$^soccmM%6htTXxBSRzUMsF`` zOS1!%6ct4FG|ImLwnb&#xZ*P%Vt%pi%n79}j z;2Of)k_ckK@U1YHY;npD7=;pGx`Q`=c@s<;**0dYn_*U9mKo6-F*5lWd#*_y_#6SM zc9Btml2Oc%nI@I1rZIaF@9J+>MkegiIM@%p*Ix$xL5p zd~4g9kQhoD1~3L>40~ig_J9CBJS{wDFbSSPZ<)+V z#Z?@gg3dkfbu0@K_!Mpsbs#yOH!*I7X*MB#!~@2QGi4hwpzLdvs4TlQV{(3ZHgn-Y&yUgFws+5s=j`xn+s^s&M!K-vf*&sP?y!M_XyJ-y9qq93_ zWA+}gjxvqCZ$bg)9()u3yi|5={?@qLyT88$k;cB^Ix3y}bD^SXvV~sAePd&vkw@gl z#b;b`DIO$yUKMvdh+J6QhtXE&Stp0<6nxv5do^m1wo_qVL5$SM@qXV*d%2%lXqq{=C?4Ii?S0SXU%z^Fn*N%+P(lty6W;ZcZ> zo2-#2$bve6_fu08K<^zOpbzsNxya=&(`ECiux&8sSV#kK^5}NFV!!N7F|>^FeB}em>f&>x>u~ z>P`RpU;c~q=IiHq5@?fb>J@Od*1TLMW0nsCvpA1JKSIZ%b+93XFhK@vV66al&()naD9R6kGT%_g|gcPQyLZ z6#FN65mbc{UY$PMIQ+z{%zx+q3b3^s%;)^%EXuw$wM(I$A?9(JeXESz zq82LEUn+2PwL+^X`zCxYL_omqsw2$O`gm%1 zo~-3qK?X~73%O>k?-k}n7_s%)=fUl4QTk6MUE@&Qu>`$AQ2t;1*_YGTzwuJaYrq)E zqa-EGv6k2`ufF~Ydwwx}?6c3oTPjjtPaixO=OH{o1D}Ye9*`->< zTNng-XB1WftzD4S4Z;FpxpFL-yK98#^rqN~?%<)abo#ayLVI}EsikBAd=*>0NG_>H z#{!XsZJGq^;x*a8vbLx94h;r&AygIZd6@Jz;lm|cvju}yA*MSN6Dm`$V1X(K1LFgL z05(&1TYGx=frs$|@20E3QTlI=ok&NbopBXex4-)IZ>Rdw z157$?hBxU=4by%1;#&ad=#4!#3FDk(E(lSP0aeasv_@uhYQO_71WIM67!QKw3-q4u zu-onEVR&c(#!>}CuP3{;v8@FFI-Vqixet?tG&p z8cLLrMHwOHTnLrKtpfVNlb@t4PX`OTK^;wNXf|d_Hj21jIcryqj*X+N+c5?24@B%- z&JjlU$Bq~RHs`147y|Q~#*48{=*kXXu8qQRfd&sVV}w3o#+wL%jXB0PlZUrs4Ome- z=Te$rgeVKyMlu?+W~_}SquZcF#L@z5N(5sWKw=(i3iYyC@ktS3B142ZhnS%|Glun! zM>ZCQfrbBt;uJuah4xNlu2t^sCL-)h_|}f4mRNU$kUdFRGIyS<6@X9le6UUE-FfwZ5u|aQqUWmYP{MM$6g5-SXXC7zoa%P(2t>dO~ zK-e6GNyEj@P~YO391_nOWgs{MKEgBOp+X>pC)`mep;@m`p54Lz&40c^*!msUlUBW{ z3S;baPhjD#CF**s{A!S8ZH-lM`BpB*i?4q4((&Q3?5MbBJV<;i-Rv>R7=wTF9BNkH zF~*sBw7nnSa&MJamFT^DXh7`Wi{l-k5xOX6#yUrIgabUpEBLI=h1tUK;)^h(q!*45 z(&kHg2}L>fEwc;b)|n93*;BAp$UVp5hDSZl%-*Z8x=&+1I4;Xv<5~8O`H`W-cl*k^ zqjda^(Q9VU@AL6F221u8f2+N44F8MI!f5k4%-z0GG0SKP8at z#1x)p2Ojy8MsgvF)xY|k-%g)+ z?3r}s${Xndg}S$JFiivd5e`^~BGf0c2o!bc$;N zJH;^2(XQvsInk(!k}u4a5jWS}=$!jSa#ktT*wb4W9z*CVpS>yU#b}&+)@fCi2$wYq zAbrha7!|^&IXRpX8i=-7zvJ8;!Q+ZB;QMagigl`?2J`~i#mZ_8I-#vCc&havssO3@ zMrj(16rrE_fXknJ;equ07e0znU?%olI^Fpo{D9iuR2O*h_kSx|yhl`kiLk_7QE6E^ zcH(q;_Suh-N!*xv-yKZvym~YBUKk=OOJWz^o*Vs_)0O`BqAjo<{b?LfOE^JL3II&g zovD*rh`W^4!yz~_-HQhnS^+NNd|PK$ULf7&@cm>pb6H?Q=h@4WPKdi94t zM5f{(6o!C7S52nq!K1CI@!&zhy}@6%F?KYtf@jFqSO@pN4liGB-6f(B){(5Lr^^;f zXc=&caum)bj8;nmx|dqJajej+AfKc`*H2wU=#d7p3flx|gI;P(uFw=GDnl@C0vDXQ z3WTSDh$MPL@NGFf>z-3yJALEX@<=b7#0eW~s- zO&#+p#=L|ld2T%GG}T6;$es7zfibI-wh(3g9^g&$X=MHiGyn_*8LV^7$iv*N{B-LQ zrJ;sNxv1ZOnK|hG77$;e$fQ{a9pe@bf=v$gwNljgq>F6B|jE&mo9A5}}!bH7w z$fYl8t< zh_0|kjZsVI?BZl_^RB5k@VQ+|tuQk`wVN)#F$`_cm8?xi>_+OQ!Gg}s>pj=gQ6Os# zB*TpM4#l{Pz!sA{SxIYXP7lYg@Tdc4+n`h6?u=<}1g}5qE`7=GDye_Ggki4HpTT%+ zLSRw=41?%JdzX8X7RpLI@}+6u&R#y4xtlZXjQlWw4WpjADz7s%0#q%>{^6tCYzP@2{b29#{oQ z;l06w+C;#RrITG(O~y$*AxJ_UwnQ5n!Ggmhbn@u&C?RwHt(Vf(YvXKqlrMyXxY4pA zI#jjYojRyt*-+C(rdBRJefwkSY}egsabqlf>+P?kAH4qE^xA9R!g3o=^C)23JBG7% zQ`eEU)ZIZX%(4cEOeK^WRZMPmEt5za-D2DtPJE^$u;_emYI}W68zpDx&rYzzPGPtD zdk9_ifmbC=3pQ+X2?hgmDZzWz+0hw-e-fi;9p&FCh^!VOHSiD^IUU8gr((9Xx|^mA zM`=S$2n<0|R%WOZDA)7=%oQsU=BFYdLF%=Pj8RBRuR;{6$C%ng=&EFIu?}9dFr+^W zN`d7jK$%e?*As1TY86gHOhON8>+8s*Jq~W3vK^BsnPeu5X;;B45e5KbUs}Km$aPdW z^-6_u4bw9en5Xb_%flF~{b@gJ6>;ye?>#j`y6ezEpk4LkFX+J%&LR_5p@YugT(clb8WhS%+$U26s9W|E6VI&KvEzo!#a}gM6 z#_CkC&~cx@YO@$__WHU8q5#d|y`No}!iW%~ufoRvnQxh{GRJ4lLhu+(%r6e4XeX^< zHHSh}3&g|0%GK)7Vl{X;eM4R3xOMTFgtjj=#L@qv9VE% z1TvkFZnk3<%WSr#<-7FAxNrT==!UeUFbeD~>k?&A5O8XZDsDcs9tD+~eTC3jh>UbT z3uRQrnOAt@Lkah^>^v1xJ;R|KiHF5|?R#HdUt^08LO}bO-x-Bc+4ZOp)_&oOIgAL5 z@7V{wvMXil;JJ?4f7y#~L;LkQUfbg^*m!o=g53~@1=HrxOu^3Xkwq4`5+`PmpzNCd z?eM*pbn2kq|IF~do@X2nv5zRe%m^c|i9=(W;yg=J05##gV>SZfJ`)!%Z_lb8XgF{! zt{>y#_AwUj@7PryeNGh{|HZS%G2X{{aEz?M-fvEhzxiL#Gaq{Z|78>Du`v&vvHv`t z95c$r_)r~3;&ki*qzYwXUdc&XxNT=_@D7u&H_cm z>4UQEWvax(b9_YM@N{=JHKb2I^%Sj3ucep1`%3B^TB8kZB`^$XRL{Xv*b@znb(ANm zqbB57njd6u107r(U#El6SjZGZ-_2z4-G1MR)Y93+UZFG$Yh>n}_}n7lH$?G#iYUI1 zp&x4j{Rl}ZAU8k|4x{uSZLP3p3lP$l{m2wXqrRT&TIu-FU`LoE$KHGEJ#gjL z^hD7{ps(W0a4)-G^c3<2B?rR_FKr&BRWuCtG{con%Ui|@?oaSZW zP@sENX+4GJJ9pBldub8;U}w7XuG47}{)RN5BV|px=vEXjkjOfV6$KLCGVkiVzVHl40~eGwe`Op$8Y*TIyTmeb^DTfatinEUn!&;q$W1tQyo}tlUpt7`rr8$RL-&cDsU~JQ^3&_nZ5d~p8gI5+%;)#|yM(8qug`dZHH+mB% z69xUlDW&0!vTWp7xRPb&ZVmL~6eOem7JCa%{KN>E_|U9IFH9@8dlB%G3R))D(1d7* zF-{SYsni%i<_HDVkyX{2PdBrAAX0dqaXRgyJFen1w65(O5y8nxx{A>{+M3}jI11;` zUCfWk;qxmeXVl1x*}`SOb^XqI>|4-`x`p$v;oCsDr#iBVw;6j4HE+ASxHP#rbUsn1k4-?j7p~= z8k2_CQMlGNur^oPcvuYtEx=6CaDHm)v1nk(TY$>-wporQ6eW@`>e!qXLG_w_eZ3eI zFokU<0A~1|*Ir5ewBsu#vDI31#d*1O?!+faFuEuDa~Bk|5Nx2ig~@cJ;~>ddqv?PD z@*kwHe*Md&m=6a&Q=E1g3Yo2tAG-e|>At%@iqUYC#emU9qB5QllX4IwhSh{_Y>*%Z zeMU#FidqB%;k8mi+iMsS*C{7-$p(;X!HP=puDZ<$c!(#jHyc7VmF>jrh!B1DfBEP+a> zhNWC|l}h`ihaxB=gG_P|kRBkF5)~MQh#oR)OUgD&*tl$=5O^x-oSZ)Ku}`pmn?Qg@ zVzcXsC}fQ)*tsgGmRM3S+A6oS1eh52RzX&XMU(_atkjJzMZ8xLtbh>}MJW!v&LBli z!v4U0_fkTrpFZkCFbeVg%*Pimr=LzK!vf%;%ArJC(0OBqL>!_n0i9v|G9inEN1(xE5}DH(r!>K*<&JZ5=T8&dl5- z^KBOvBPWKT-IVMX6boM(wt64j8RBsD%$HH;)DT_{`cSbjf8fZW!-3s1^sGn4(kXf^ zbA*OX9A>CM14!OspIQb%W^?uG6?h7s1D;=FP>+^tya+wa!c?aT`3oOV`49ppyl4)E z$OK>&7HPpmbQx24lzBGUjY?U#LYc#WPO+9MJg%wAqe75j1hezGI9RfX-{C>#dyn&= zkQeR~zvUpWpZ%xzB0I)tSG?yKud8Cq0>=A&ueTE@T}wCKZ~x`@DnFsvLaFgxe60e_ z{rE0h{p=i%)$^{h>bcI1_l&FhhL?BtIDb^GvaxZF-!WGzfzjrgzZKk(c*qsRVj7;+ zXmp*uzVo2qT!Hs_k6`Frg8|?HMCpUsOP8T&Gu}|<*|+=`W8)O3BHJravF~Gg7){W) zfAEU)yaQv-9_ang+XE|K=l|3(v`;0JTRNScSq7+{MDbP zuYL2S)IYLGG5Iqn!ZmcFnn+9I6s|{EnnziC=gnJb@+MeY{Am&S zA?Cd6=up8jf)q8kd2U-l8wD_*$4T9vi#7J0^_Yc#XJiO%p#B$ z8!(ax&2j|>AIjbo#R?|zzEevnXbnOOZz*j$88iHee?mH})0r<6g-CfZc<|cR`t)?qlhRhn`KDkS|M)-CSO4;F=$PbovHLF zP|&nUZop-Xs>1Zq=k81o(D~`cz%4pjmZa9BK)8OgD?R;*1L@f5dX&=UG&r!B`fivM zSdbnfG1E4(!<1z?4l1A=nIJTA6=i;ol7b7vIQf8dj9#bA%S~h{9`!AhRTD!)VMb(6 z;S^aMqb={)EDB)-xeNJenmO2kN89aqVh*KsVFX?}1B|LFwI1uHfPY8ozWsPQeC8zW zkty(iw-}L%__tsJYtI_bf(Or`=y!CrM{U>&cydILkXK8Tp&6sI=n@86310OaN$R^8 zQ)@VlVozcSS?ztDXa$5GZPGe=H= zO&OMEc(#p}G}JpBu$bu~oIyCj+FB3utVC3Zj6d}BZRy;@2ap}iIeB+_ju*h}s|-TM z$V7PuXasaGBUCy$%aN~Z6tJHkLe~J%z>}e~tk7bY&aLt(##l_Tm(~-=c1lG}+`!71|3FE2}hg#G{Wk2Z5G-q=L$aYmtbDFwFazDD` zHi~;Oj@YVZbPLu^)inREV5=}&5}k*h#&;H7A$d&)7CuS$l;Cgk2$C^&cS5d>5M!gDcA zFIdlgwD50!R8PX9WI?x0G-h6lz@BY2fw!BWejW;_xd%l#A~Sfo zBr@x$LOa~YaHJkru=M0dsH7@j2sNMt^Bn{bH7XH&DtxY?kO{p^3S@@JOQt0xMB-%~ zU|x?s{&*1mE8qPt!q?*( z$``R4gFqC%;}Vaz%Y5#A?+x(3d=t-XzTCA`e|O%xU{tU{u~ z;NLO~lbWPI=}l$WwO_ypsISMn#JR;7L^UMZ31V_1n-`!7W;GFi7eUv)yoYPAA;5vL z=R*PF?@+Q)PJ$1hz$mnL+~6L`hmZ>N$9dd8*2n`nUIknF-!}x}1jopS!5q1d%5%Ua zcppl#*YzTCex|_1d5%wk#P{CQF*5wPwu+qf5cjL-aB3U_%Ma#ibTa713p*ZvagO8j zTG{itH@FUT;(EumeP(_%a6MjSz}Ai4K5+Y7XWxk6=J9zvLp*@hp^4a}s@3->}XNRjW2wpyi<5!{4FbD(Et2ig#$M3n6bLN9<(C_RvQacw-kH1H! z$I>>ueRd0a=fDM^aaG`~bp#k`+icu|KLf8ohiszJ#^BIMy4gR>x%iifIL{5_r}w7K zi^3yTRG#X{8OS?~mkX$yjT#(pLv4NV%XtjqdX!sfH*&RLitYowWv-LndX;Yt1ZhO2 zGRuj;3FANDMAEQ`6}|Lo?H2R5=suH$!j2LEQ&?d?IoB%qLM7H1yivHo{`2_;QCt(V zT_5qeb|ACHqxA0Bm?@?nNRiQDtJE-OTREzU;9T1Pt{Hhs><6nTxmZ@lah_GO^TCb^ z``iU~B_5i`ORMA-=qxCJA63;=r3apTD1Gy9zY)Ch)NMyob2qJ<$8M#kaBx-Q$^06) z{XhTXKTZGW7ym)}9oh$?fk0Nu+5{{&4!_&z~cqmzJ$JucfPR(}@gYqnx7q z&wk{?w0iDNZ%zF%5y^%0gYUeP1}`y=QSjU}X!y$1t7)3(&yjm^d?0VE;o<}9$M3br{O-aJXduW+4$fB4+Z@HEI1Ig8N)EN0^J0<}yj z+|hv-8;Da;G2UTSa6`e=fn;@Ww<4IZ`vk0feK&Bhg?Ew!!GlO<8Twv8@iC}=wY4T308(juXh?Q5e9 zx9%Dh5f`Wm*LmP%I@o!bHqEbdU+9L!?F}}dxO$U?mpJ~BM;_(pq4Zn-Z$w zAhka0(>ve)e!y9(+l&a$r>@&$323wo*@oj$XJEsDs&xEZ7di$l zj;S=cNu|S2fBy3rF|0F2pz|Q#FqvJr?liFDS~2fb9qR8JM|Y#@)G2jA>Id(DPHCgi z1zo$0a!lfY>poVO>heJZ5Zx%s)u4P=6QMGvXCURqD$|j}M`P}9^xg#GUcvkjf|I}%sWjm?v0M2A!Lf3CxOHIc~z8?d&3w&pCkisP=$!*2C+t)Lc z+C~~NJb~KQIn;P4QJ*j)P8|LRKt;qDtC(hnbd=9!PAk9_1q zL{-Kyz^DW@1$-FZrg66bEo_3Gc;w+H(;xrEUj!ni`*>|?Kim>fI4hbJyzraPy=V*A z#<~OPV8>B54%KlA3D4zId7$JFouKOBy)aE+F_T-gdD|Y3!3q)3dv8L_k|C%`APnOy z%&^KX#DZ$EjAA}Kwv@(i6@t&Efc-)_;Wx17?6tL}9GHGu$d-5Esek|pJ$g-lEFxS2 z9&l@iL?&DJp`e?K5Y;VU)Dq$jMOEjp63rKogZiD5z0GE`)Kd4s(|FixiENzUnVJxy z3kZ`}Nu0gJ!(T;l9_Jn;&{p8-!R235V_R1`r!=?G@u7>2ZIS;9Di_dhkru#jy#1fk zu;{M&7KaN&|&%NshvKi=a@^!CSpa2`M&;%w9pYPEwW0w0g%JtKlA| zjt*Gu>4Oh`Foe?t-W{U@D_G}t=2CGMtEvZ`i-huAz;8p2d1R=M5VBLDTUa6-40FL| z3a_GJ&{aGsX4*@HD%`?kR5DdIge=Ld@)01R7%?7|1n(n6YZbVVf;v<9c#Ci{c}l^r z^012_e)35YkAC=iI{*54#^d;idf@%G?3_Yw2f?a$T_)jt#ChC@E2t`X`M}W%ol!94 zlZbZuy9=}lZ^o)pGagpn)Tc4#<( zLQrC)#+(VFSr`?N3}H`2x^hz?&sI}0b<45ml$v1(Fs`tiQPk^5U_W*GY`}2Fv7q(# zsc2@NV==yLRD`#`jZ|6nqm*!}a2yp+qfnW`B}53X7EIAw`W#kAQQbT2sYQcv8j=Lw?Zeo84~7( z>AEwRhK2^9_qFiO1WbX*LtQ<|L-HF2jp`NPUHKdd5)6wx5&L~pS72?dW~_zFXg zqxWAv>;CJleFug?Y8w~WQ>{MfxURI-=iBpcKIQ^xo> ze(!|6F?h6#vj61#js2Uus zjuwrCQX;%ML?S9_BnD-?jKn?1XuH)iynUA9jUPPd`E+Z_2llXw3^Z3Gj8~q~=eBii z0ejG@A%!?Q!nwkGNSQ(sM3UieMO>qleOy5vgN_M}Cijvpe6xgm_$v-AS)|(%IY2GH1CoF-t`z zx;u^cn;5)7m7;^`H$MGq>5W@oOM@$YX`SL3N4t-}H=FR#vNvXcUrgXQpfc1pRrQK; ziy?2LoHr2NscS-SFaYRixdU$wxE8YCl9OVk4XfvYMyR8G>o$t{1UVU$H6S_EA|cBG ztC|DyG7YA`mGZ47k(Ofch#Xi1HsbM1$UI>~d6Xkx&s&UgllMG*SNg(l{sKwA-N@*L zboI^4D0@UqDOSHm6~Yaw${hy!ncxNXh*!mcN<#`z>&6yP*|?i_To6y(3O^;^t64&e7%XUQGiRrhqPW5=CPSk#~S`jbXirJY6SpV(HHuMg6PT%h67l zy;t8-9ggScp8G=j`Jelx^v!R49mwNUz?zK`pE`949fiC~j85UcGScWA2Hg~qkUKxK z7>2Hzqg!0(aQ%UDj2aw{6koaf>|G%zUO)eO`s!c*O?u@!uK?wHC%yaLl{5-OOV7T% z+mri1n+y#Pv!-^kBU*~?JdR#@0tezM(X8SAk@SgAeHMpA9^HKVk^QVEP$#Q9t<#3N z;3t3ii5FE`OYtyQvj|b29PF7%qtkSMvKxusz$+tfrO`ROlQ4)v6djXV)+yy89AXIz zZJ5n8JWObYq+64Fg)OMZ?tpn<*t3dn&AMzGlRp_n$x1thp=g{dNl9b6-_KZiu~- z4YLVcXagp@4W3f(yRa9nMimXiOCB5wy!@7b5=|&M5R}BI@V8rt7_u<1SP#8y_Gi~n zsipV99LZ~MoPU!!G|@XdZ;h;VX-CW-7lirX*^&V|KK>bsbt8mVQx%1qg}0UWdVTHa zCJbyIi&v|DrYmX#-&}N5>oUIRYj_ zw#WmY`!0yznKLBzm{`FF0Z;cts#^C2_> z6y`FG7V-v+5I8^H@4`Fp;N@3gB0qq{?Hku`U_6Y)y&Q`Q>e%=w5vyTnO1xAZUi`-g`{!qeKwN$+ zlZxNjxzcH@`5x_gl*$!@k9FXG@!RouPyWXde(A4lOkTs6v>OVML-5%6#Ql|BBaY#7 zkMSI7%ww{@_bw8j@Tq?W9s9kPkZwF)2JN-8&pbbV_$96T-Tsg3vH!iN=)skTc}g}9 zT$mfFkPEvKb`p^Q4$chG?AoD7OYh#4m)F+_?Dbd|4v;<~S`o*i9CKX_W*r_z za$+9&&I(-C%=bE_)P!zmXr$-Nq14uKgs3K^?ZB^BQ!iX+gy-nBKXe!mB8uU_;9zPx zRF~SiQ5@(*X%Cm~?lu$>=T8qOd)9~y|F-bP)Z)-tL11qY?QCgkjlD2Q9qe`F$1+hq zz4k`YG(yGfE7yYe!N(lCkHVEgYl9cz z;n$n*I?7)eZtxDYC7vlLBxYTnmGKE?fVqeg&m7M~#QKGC!n&A1xP-B^I>(#=8(o`U zOBc^yNbg;}MAGf|m^~sC7%}R$SFZI@ooO&lVwBYa!%=-G1^|jBRR-Xe|z}p0{x*4VKaQhzdgR%mtL~Dxf{T`GwD=U;3#R(g$uom+n1r zC)Mrlp-nA0a(Gkohy)0Y)3N9rs4xjTvE6PC=w%7#*ELlIT`XfLObrA-RZ#x9tda3waYis>)$>f zu$V?59Ctr{jxJ}8!}-NQ$a+;%)I>*?oypdSV$2eSf_;Z`%<{Ivl-+MCePu)&eIxzlUw;kA3l(vY zVYQ7lJTFcmazu8DSJM}M?*(XgDShwdAEw8i{vdgq)UF@uO_O7TX`Gyf6(B)PEgd8b zgRvi|rDy_YQUeLl?Z=QWz;;&I7c2AmX{?9xsH4yv?`O%kS%fAmW2xb8 zviJ=9Y%ktdw0TwtfwI?>K%Kfp!3{j{3!JMgI&!1~uX}O2LISRQ4@zQP zby7{H)+6LIkZ3*l9=UErQi@rZ1|ULfRF*6%Y2a9^1IjzppJXyr9RlL5qjMYBjfGPd zXW&1S0oQ4~0pGRLocl~3AR2&l;j~2LX6)Jm^D-BDVJkdi78)L!=!4&a>BsqIjzj_a zK|@mub6k>cUg{w~a4QX59z|y$X9&k|3FYit4j^-ZJ=u|M8E3C-yb3;*1M+3*(|X2l z4#gM-=fn3rk-qhfZ=f4YrAI&fq4d;;o=RtKzdhZ!aU%>m^YlWmWe=-ol(D|@E88XK zs&eSRV)QrQfi>t46i=W8de6mc>DH}*(7UV}sN+ID;Ti}h49F#O71sfpJUlp(-hTT6 z`4vQJ8fzmbqocheeeQEF#JawN&T#I`nJ`EuD22O4-FnNjTCAbqqrddTi^6a=P!jw4 zuM&!#1YR(nCZ{I|xy+>g$%S-%xF_8l?8k)wvo`98fN-3ypmYBcN9{)}spS zHvOqpL}aou@@hg@bzsd@;A~W}4D4csfe+m3fpJh$Q9@$_guIkF^PNeni<>iCa2+C~ zd|YEG@D5rqRszvsaq97A?iK)d;QK;gGaKNJbw^vhJRlWZXOn2h$k>(i&U@d3Ij}e|PD^5@JQAq3 zA!Zt9m9pVyNYWY^8pQir8Kue+V`*sINN4Xl7JUaG8w><-S$Jy<+wzPmqNS8Xt*@i) zF2;h@gC-|QuB9}T2G}x{bQFZngO!oCVfYG8l{n#1mdO-CggS{b??%vjK=m2~o{X!& zGMX@~O)jvzDbPdZ7X?B=iDIto4-YJ#>3wF01=He!L7eD<7#j#a%7xV-O!cORXBdmh zHOoAuw8JH#M+HbvnkosGWvzuqWYeet}imD zCchFHLOJq!Do~M7#C4-;3+Gt6G5q>4TfJw7SGNsoMFfJmCZWe<$1iU~cqg z+tPH$#CGV@#BB`<6Pbwx@*Q3V?sw$wGwJbB=<&JU|u!k`g;3#E}%v* z3lohNNyO8jJcGa#z7vU)lQZevcS#B)cR_)(gm*!P;xp)>6}BW6v!Rjd6pW#}vy1c3 z&|v5k%!_u*7_|zi9621eF7+4{sH}Fs#uc;5`Uw3}L5)OZl!;Jq`5kls0fiCknoCT+ zR4lnzD1>~r|GB>vh-!}eO85RToWv>8hwGUs_8gR5&g-#Hv+;qGhL3p9OkwuZy&sNI zarV3HRIUfjNo#TMpiGYDOYb3Vx;La@$DvXb*K*9<^GD-#WSk=ndQSE{`_Gk)(J^_A zOgVN=yhaS4bF$x==a~J=_+o?jSo||@#$y$fF_xHz>^VF$q9Xj(K#ae+V5a|j13mFR z6OIDz`MzPmXZv%{dCtlH^Z0CBfgi5v=lsh?l6~fN;>7`uJ$v5ld_NDq#rl+xvG}bKAg}TOpWFG7j>GWbSl6qRzoo;L#%6HSo$1t(GkBxmhj_Xz*BG9} zN)rDNjQ2cvF12^pqSQ~bUhoO7Z*Gme0m&2VCU0{+jJhrp0v{R1`0XDK51JwL1}haF{?Qu8A+@m+#%{0UMH@qy?|T`;2b* zj%(`Kwdg`%bK%p%S@q>331ZJm7AEaQlRxH3CxXYy-PeJ6JTtr%QDkCeT<&2a}OOO6X5o3rEE`yYgezQx88gw zU45?y$OHKLSt_38gNG-q+*sF0fs1Aod47t!Ff?dtGyz1HvfzDG-@DrXHU{01^xC(F z)7Ss>78Qdg@!I2D;NG^}ee{vD>8GCl5Z-HgK}`1(DcDYzE>5Sv`}>PH^zbUgd5FL;ZoB#z zI8#SkJM>P%IavJ)D(x&$3}fI53h`ScWz(m?=2CR@3SKWHrJM?j^%Qh5seh=C!V~1U zMgD@)7M|%#IchY`HQXj&0Td7#O#w|ur_;c-37n(QII`D$AmlEG(cEwdha6+-14gz& ze!&_#PBjp$BI@<`Ubuo$-Gbf%vI4%~ew6=+TQ8()V?GgdVKR}AL6pIq8VzLg7&Qb+ zQCLKSEP>4zA+U`OxC;zz^fCnqC}Y}jxSdE{K1%uHkzZ&28f8??Bhky7->H5t+=-H?v&%kDHz{s=;+b;U5TvJ+FhQ>s{C==!| zG|Vm81qrgrI@*%hJsqz-z8a2O_>qyyQmaHp9wl-}rpDT+8*Blg_j>Y5`D=~NL!z87 zeeyqVRBlDD3Id3HdcjdDC}ToJVOa#kLV3R7??7}yvVM`xTwaX zQcsqCDIH<+S>R$OO0S2w#}&fGTZAWzOcusKsRu(<3S6R;Fiku7`KsClJl>3f=3Tie z&fBqjB(N0)4G*v7u9AJ`u9K;>c`~i%hUrvHNRcq#fu?)XBX@i%-FE68P&x}~bnz9u z&4YN|ZT>Nl21W}(@I8G+qwL}>kn^$EZ)Ce}df;5y9 z&dZb%WGMpB1UJvuo2U>~m@3$!8Wd~n1&kQ1!Z_B6CTQ(y$RLt^BsQ?gw9sJeh)nr2 zn!rTyxs1c+F9?$bas?)Uis5B}^!Hcn}W` zwXLtdi}HUZ#vK+s=Wk<32-8@_Xp^~Z5T@75q<6$T6`OvrvV6vzLo0r!N8hB#pN!TSiG&nfOu^6pHJxuVm7QG=$4O}Y+O-)bY7{9KYr)?{onq<8vXxXu4uMyq20Qw%x&ZoNSFMmgsH^% zV$~Q6$FqmLuQ&6b^N|q*;|a~q!T5D{eQ7$*52c@XK`Sb92ys5k)K`vIN#?su;b#rG zh{~Uzsl;Xqb(|hF==l6h169v!DAtTC=$`A!sIxnBZQjJZ`I}?nbH?vA>{jaj)qsrq z^O<9h+2eOMp7<;1%6rC}c&eBIj^~zZkR0cq&bn zHEyo&NEM~^@Hc2re+&5Tt=%P{Y&TJMmWb@qizc^_4&V3lzn)Tt1t_fo0)VGIBAp^? zTtgrnASKwZYVN`e;H@`qQ66!K2CQ|3o_B|dWPr^&iS~$JV6{;xm%k|&m zRa{EtN9T<5;rp1mSWG4Y8i?WoJeANys5`gXe7 zcO`xMZ@-hi`tQGuV9QJ0M>^A^Pdo&kq6{r~Jk3%bhces{HFcoJ!J>f$7AsSaHL{6? zYD;^qDus)pFw zYdpI|!NMXKS4?DSX-toN;{NpL$4(Ja0;fyn>n+^>(WHruW8$URz77FMjdq@_1 zDHh8-Zr~NPcd>|u@U<-a!MR-f{&fg{K6T#(E{KQo36%Bgdy{T&U{L&Atm@I*y4VEsspsWOc!`08Ae>5dcXPes(h3Mx zFOVBl;e=&I!k?bDC6Wu=C_+;7PAeFfXJiD74FS6YQM+lwGU!1OWCMQs<3E|6`S4S; z555hh(1eI;Ea11)2_}v8xpw}obn)tC!fDfSJsC~(ZOQ;E2UM_H(jY+@pic;F$QM`4V-BJKn2L~eS$KU|hK?eb*T!jd1hnoYqmtO$r zk7J!wP1!|U<(5e~4Up|fcQGl>WoY@Wxap6i4lodANG+!uXvk=xJhVXN1x)7xOpc-= z2n7=}6%HXy?3WD|1cF(@{W?QUNVO~_d@254Bu z8R}A#n*y}JkH=aZt8<35c z#n9CN6r~ErOv^IX?lZ70`HtUv=Xg+PlCB0Ok>Mh*p@kFu)_rcpGSBSUA!Rw&azK6_ z7#xTl`4;CZ?Yt3(u!)tRWM>)Yqy-=8KhP-00`@muUoft254(MN-ntnboY-*y8a#0- zHDj{R5ZAkY1d6>YG)oN&iS~6?TQp zZL9}tp=-vu_MW;Le4jBRN06;ce9trcw`&|D@*qF+7cZG`y!N%G>@U9H3>fFWo7K9p zo~KqD79(rp`uOu-UV{s{*51#zOy=Gl;+{dre&I~}Eq$rQ$F+GMmPUM< z{m*Oqa_=b=Jl|t8ypPusfXv?78=qR3G@0Ee(}*J`&YZ;{(X@OnnX}jSIOi^&gEdhL zb&h?e{cAW*M`}U&7`y}GJhxUPrsqsz)0y#EYpnBL#@kBjgw`#X5G7xWwMlUZs0_F`(E;u5JF3UKw- za9`ATo|_qlrU86|sZ;6DDnLfTGpA_&F_7{r7RXwhOGi)EraL}x3~$AO)ZTa)a53O; z__d1MI^L>HU6{C`?f$i~OBQ{1DaGvVUI;xCeS7w^AB>6thEXSbC({~0Vj2iR1GrIf z8#+E7GJdPX>}@liHQurAgeg@NI7}WU4a7Py7vWi9ZO}EbV&tvx3XTz<>O6!c#zVm= zow00C1R-Ht{${J!$Pt6(H8Jh;V6F=fRNkW7O^5#C4iSl15!C zC6(Y|Km6to0cz7_3d?13knpMd3mPlLi#Y`@x&OU!lw4$GT`|vaBE8-Fjnp%7K3(p4 zE4}lBn`xO)zs+J+!7$u$H=z2~I=m!<&?a)i&VYm!o%X9ZYqBjaSJm zFF{5bvX~*O)%w`mcoEM%)`d=ok%1e3aU?G_Avd-OKYaRyr=fd5<^4A?x{$^Ed-wch zjD{KNY1Sc!R>O+>k<3Xda;lYU3imn>#J%$8#fdbJh@c0U;Ev{?JD`M(>0rf=t5t zrC-J94y;gcD z<+H_=fd1J(Auo6&L!>iPByK8Kq+98ZXN66HuVd`b{KK;^E(7ehT!ZBtifItg!o*ir zudvCc*!;892c>~PHCZn?z(jPsswr!w%+Ea57=nSTTFQq}(7nsFeq)}z=izdsb{Ekl zEX(@jSe{KcyFjnb1J&uw>BH%vqfe$YjUP=1>Q1FLinbP)QH!eRJWAIBN)mjE4W)pY z2aho|G3&m8LEvtOxMga^0>oKxu>Jr?nJ9=<*N0&RrjCtcD_7kx#+*nZSS?G}F7~DO zdd{c8k&EDxC_&Rpd}kDEcb4ptIV?_qaDx79a!`!XkY7UWQDSR38YmQ%!#oOR7=J@L zc%YFE9XHb3S1%G{EyDfESl6p*g-an?6yzx~MJ6U;V1!t7Pj9gZCA82Jlc8X>=&zYf z(=l!5w90uia?8R~d;Lk3RM+f_D-nauVGcAUmE43sUQ< zlHo|e*32qqeibl=Ow8yuejRNPSz~KzYPsqTDpG0Yvv9AW>?u%nlPiRzb+hc;du|eb zxn)^*3<(6c0#@NFv5Ez7oUXYz7zy8Fb%ROR@#1-9KAvNJFYASohmRlzM@IcX$7uNo z8CMD3jkcCUWO1E^CjlrNzs@rcf+2vhGDc)Bp#ttvh&!tiCBkg-DoSNRx0M)j1%QFq z0WC7xOmMJyxiI6?8s{xz(e3PpQ-G^D_&j@HkN#%18E-a5E*FI3)tPW!**RR8 zd;Mr!-iIN`K&5e&VXwz1;uw$1#vGU6GwH{5b9jd3vggmfg&)C$fBdz7ABW=gxK7^^R3@8XYN;i8+!5; zG_Ob?|Js34p;j7Yxg}nDYd(wjVm{Ok0V(HFr@Bx&e zte`j-XH_ysktC2Q3euKaDoT~1OxuiRRcs=BNvE6liR2}I2hDNtCiT0$riSit?^=5O zWy;M^wV;eJz$Olkre@sx%rBr;Xd0Xkxk4;Ibmx%SaqFf zNB<}bAo?;%2D`|>#+C*nyvPmqCF`{?h8_TITQ#JB9}}z!@ZvxB>7Pq~@EiXz-F5Q+ zZ|X2-{*bejsJV^t!Zff zdKz8oBbcL%;sQ;nWiN&~ctYEbrqoc^OmT{V^qJ3m8XRSNI&=Cg{b6qWf!F1(Q?ZMp zG^MyV%Jo9P1I%|0%V6KB1R;RJpWO>9tX~tAr-sR4`u>@-B$&tO#C=Ot+8m`)f*Y+qG+jnUaIfbCUT6jJH^dbW7qQ78eMtcdMBY5SG+izo@0rXFg za|S39Ra!$}1!*E_7na8P#S(6S9zkUq=K62;QMJshBG6+GH-ec;fNP*p%zUOTqXTmqXU7N(-!OdiM= zc_?8-eGgrsVAJ3@(~59hMNtt9Yco_OHF_71d-P=kgR?x%e$1oPu~?~LC(+QP4>T=P zDSR%EdDYmt@6b*+JhXE(W-F^avr852f*PlnuA#ZFB zaxi;ZjQLxyo~GY#r`O(iHE?1HgSnCI>y*?sT3pwna-AnrgPK@-4V-$IOxHJtfLdK$ zo$h76{A&8- z(?1g~&r$$tP~6@q^b>27jvq=}6) zX|({Wg&hRblY^kC7R8J+L1<-!lgmg1-a*qiOe9-J!0*BmFf$HG^8hhb98JE1p>ilu z>>*Mnxg|xy7*%Bku#8MXJVpV_jkEC6}dx;VFrel=VD!%r8vMm z;CFLo8MQ0Es1&RVD;?t(7iIKH5kNyfUm+^mQo|v1au^N9C>5=FKr(^42Afhb=9e$i35`mONrIU~=3ehQtWO6*hEcrDZ?x zx5PWeGeNNDI>?%w&qSO>J0>ekc4{_;3@}c8n>(# z8k6r#Qm-DgYGJ?xId$YCSZwZ2s#T-R`jmQojI#Z*sLaMQY`l4jEIiE)Bx7>i}8#S_^-Bfc95BoL^ra$#yC2WSc|U7WEs z9G8p7y6Bo`HnI~&+X`JlR9H{xi?1!=HT!P!L%;A+wi>I@qfu=fXlH6;&8$Pyq6p>ZSW_mNZo*e4`T%z%>(Z< zz&d(n?34Y*lLo_e@6MB}Zg{FU9X!&Kj^NhcdkC*DMw!7bqWtla_2czjA>sHk=b)v7 z5~@T+IvZBwS;q};M-@_?Q|HC8M4^vR5E(nuCJrf{9ecu$7_al@lG;yJ<`$yHW>YY4?u&>Yi!CK0|EGuy_oIE z@D#?J$EqNtuo(R3e%^rR1WbZ)mcFTpZ+xc)|b8#Rp-v0e=)uI`qxoDfZvd7 zbRCG0RjE3TRHT-DrNO)2KX;X}VhjRWGschWZvuPIyJu7Y`5YE!m4SRb0(Arhs>rRc zAfLECEW65Ha|;F;*3N0lyG7bn=S8>LYVZ16gj#4z%`_`NqVfQ`YY>fnFlsfUPTH=qH1* zFD2jRp-zI*&bf`!8nUy>tougVW#0qm8sDx*4)yLU-DFI;89b*7q11llB10}Chux$o zL^*5p_@_>SL+=3}PNXp?h&3{rtRvJ{0b>cPHVSY)a>@ur5saEe03?wpNByT&+qYlnu00Q&>gL8-|F zRZ$4aCAV||{@Ot~;+wIC9fx-X7Wac!E)gg)51OPJ1!N8+2i3hsN768X7}BsZgJdyC z=OXtk;4zvakk%Zt@_6Uavl-Rc;!tDEMzN|R*yhV$`EuIXxij{_ID1X`VHw%O%4jKuD$m9+t{dRPF!1x09#Ukiv@T|VCS$rmq>UJIKzchQl0K; ze>!c;-H{g7N77U}l?K*cN*5JTA0fmP^?+pz9!4oN@C>^ElevY7W?+^M8id) zU>*eb3X@Sluj3JbLE!6R#+6njVJbqZ!MHu%0m>Im))d zAR#)sDA(wad(o@;3MNmbz51%aY(`}=#;H|MZ<^PXf~6cB9g0+1<~5ugV($srUq_{D$xxc-~{1u6Jf7<{H}exeD;nU*^$k z?1xeQ66^Rpgw1Tg?jv9Ee?IrQda``3!oNzm_xQJGT_Mt|jc{dTei`@1yR0exX8*;v z+4Wu-N1$X;C?W}SZ5az|U8oa{piSs8+ zQ~`Jv!;1`aZLSCiMpv90VKifX89#hZyc!A$JPrke+AS+B-7-(oL z{-%wXZ}+wBy8kz=@}?$|7O7OZ4EaO@y3Sfg^=z2OGV=MdkHg~@3z9uA3!Fl!fQ|^a zQZ>6lLxe9e=1qsMKX5+ zx8{H+6*Q+;e|Vn$LN9X{)3#m9q{2CpddA)&$17_|+4d0CF#?j z{y4%Pg?{h|Fzif`q1$)ddnA44m!C}MF1?=`J193hK$a4$!Ok6Kw#enVPN4v4co9}w zQ3dkVi9X^5WF*Y>1O~Kce(l%O?>+k!<|;3}arz(9w_p7xWmWg4zy7nA)A&^kW2BQY zkgg2xxUk&CjpN?ct#$`TfP;nBApdMLg6I`CtnH07(FH?7$Q&&^{{ zG>8Hg%KlwXH!qTM@d|y*tz#}ZgG6M{1Mw*ZZez4`TSrqEZbo~T(~WbJRQ7_71{B7< zihQIYkv-`-TtWU?ChkH*)C$7*z>RU_BF-MyRFe8>8~o}sKlkbMPd@!q>9L3JOD7+^ zGo5_m9t=%X`D0Es*wg|K)5)wCM~_xH#dz{THhBJ3WTP+*@}rg5gg0OB>Pa_I-Za*B zUk8@T`1RP-A*>qVKLeU%W(LbX-XM_^MV!-e454*EbS4K;c7aUg3S~yY=xAw6M-LxJ zdpp|Gd#B#QouwxNqh1>j!iFjgyYMZK?vHm9v4g^JsJM z{YCcS8p>ZGN`?WKlhBJ*s>vaK1zq>dD>G=6koHewyxZ2+p1%C$UyF11+8b|%Qa3yT zTnxG%at}8jxT8J&+V4Ia*?Fv#w=g+RpDdHiJ|(zM?kD8z?0X-i=fC+)BJI!dIS{i! zAXJ>S8FKc!7d5JAkdnU&JubsYAJ%_pf&5jusu&s8VpDl{eM|+wv;ja4sQQ3N%w*yw zMWb4n0v-dQ)jFAvYi|wg+49O1)&(r-9Eh1oOEHq%xxNhx3X51!kDFCS14cRrR}#!i zD@I*abE+%vWB|ChmL}*Fn}^8R?xlCqaQc3l+2~EZ(>K6r;i_50Dg^wZkjTXqIy#DW zI!7SM>Jmh2iC`NvWi2cwWE%(bH-hH01}0ugy0%CdIUCdvYfoej@B^)%Cb|`KUI-Hi z!h!2@b)q`*m<{)jrhf8{4fGD7`Qe4&U=)$Ar?#<_*wJ8WNbRJ% zAy*h}-g`Sa*>1Z9rKJKT33t@A6xH*=bYLM_O1&c&QR;9> zF~!Cfd_nYAgx!%_~0Emxd9b6L|w_QuR0a~M@v<-ZG5R*2YbVHsrob=_j!buHaBt=_ zxDtCuiom+qVyekBTW$Nd;IdnwWgs@{k6U zdA*j<>gpQiE@P_hgZPoq$BV&nuL52C=Dine3CF<@yoc-kZ0on<6#gVZR5%V#55H9k zL$Hr)GXfeZST>)r)rOC@ExWOm4#&T>)bk1dapwPjFTeMdU6*~Y;GL_W>{mb8Z<=sC zKHF>J#rF!%{@`b&b(no0_xZO9EN%O@|M!0}5A-=`0Bvu@ z6}V@;z`GS<;(FdI2!*vazRo_iKMK)yE>n&+8L};~KJ+@Cm3`0M?r*>2a~Xj`Xgq-T z*?KE3Y^|BsJMQREJll4%m(O?ZnMc~ll(1~QV}AWCF7^3YJ3iAX@E)I0$oxn@WB%~( zk76~|9iB(Lm)AKT78L2bKAFy)zM1Cm3R-1paL`mR7d7yysIR$tXAEANS+{!PUZ@qTxF$BL2_cG6P zU)z?(uMMC!u9-(ioViMev`#@}5#z#Lx9(1V{I7m99Xhrny?^>bsw&u@stS*!)|wOG z~L7$ zi^yv6@{4Cu@5RaR#F~3kD9y=7ZcU&2)hE)q-tPt+ZV|6M8!I4+YP$<46PWg5T=|p~ z&c`@63!JPNJp1HLO3?xnR-nA&{)f^>fJxj!0PffR?*B|bI{kb)dGgV8`RYZ|0*yqq zslH1@8lD(ec?fv)_BK+*Fr-Fx`)z^{Rrw91S7v^*J66_jmz+OkpWp;fe+At zq{29hEYo+LlEA%l2z1Cfc;X8QelDdd*fg;%!vrnYQ8K-WtXQFu1xivaOI`O{CN z9sAnT>37bdj51Kp!XSe69Vd??ivjOZz{MLathlU-m`>0a{XOG}+D`B#Q$rS~XTSJN zI&!FRV)9?6lwko5rXN24-SpZIUZ%(c5GK+f!R*sCbEh%dOePDUK8#yX5i%18RV$c( z$_rD}A_s%9nQ2N`L+Q;tg0-Hd(Y^_Se34g>M+!dJxv1$6%cLf;zDyg1oflQ54G)2Sc3SKDbi04`3hqSX8 zPnhzpo(Dbpvt*lD1V(0zUrlW}UL|CB=0ymivP-Z_R?a?EW6UnEEFk4(Q_K(dfVIXV z>!22Zj?4fvDJrPO7(#j;TyGVPBnd7b;bJxcLRaoa8G&2kGO6Z(6+q#n0%23(&cv)s z%VT_|l?xX|KCp{Q65UqqPCbj~)8z8$R6qdI45@ieE&Lzk>8wdLRFSAI2J?x=R7BOX z5UA@j=W8W`T6iu4U#67o^cX@mTAqnr zjpkp5z-iIZ@?*e}8^A=&BR%t}>uOi(0Rkj0D?f*bY9=lp|b`1v&ILZpzria-UbTL*`SQ3+T7q|;oP&{(6zHh)dOVRua zi?hlsb1G=$PJ>sb+wb0$jvPHm(DZPcT$o5bs}LbJs*2Jami>aZ;;7ClbWczlY4HGzw)t|T24ZY`^n=$V+K8ru>S zTjrRT&m>`?U!LHHxREu8o(En=Y%Btqmcv@AXQql^GWI;y90JAoPN9AU0bpp5cnye# z*h||8%i{Vx$`--fTn`OlP9O>pQH7*+_Lpf*O!+YbW;R#r8Kx1TTXN|V##3Gi4iW2} z0@o}-KtgW}6m>pFhrxFuv{jU0p<0mmzWc-SHKL*GZSQvIWFP{ zp~8!lM!b-h0*X*DKM11bH6fI2UdK0nt702gV(ztFuh|kNX@!gE18r!j%wS;5fzRB0 zWs1P&-+Z14a@m*Bf4gM6YF)g1uT?&VFI8j+nb4el7uWF27)Nx~Uh;zwW;x}svcbs> zu2)d@{Koyk=jbbEn1}G6aTb|i2~qIL(Ts|X**C_+cM8422Njlc5h|OT zwx^jc%do@v@N!CvHHs)Gm6lau=^clT3?ZBl#{#Xc0B*1h27L~9w{02U)3Uub9k`cL zuMIg=(iux#7uJAzl234^F#XZL{>$|5c zaL5OSnFF7Qb@lwgkBfo5%z@u$$GIA`zX8sDEaf85my;vg)MJE`A}mPDFjl}*A9(O! z`sUaF+qAHDHvOl!Pm^lT6mB!=(Eejs&!PV{yVAa0x2AW`ybV23(BV8yrS!trdee{o zt_wj68r58!wh@bkNRjS6dQaNF<3PG__9EE#-c(1m&|N31!f5x-8yA_Y#q`7P|2TDx zpGo&Ve3bTvnIi=p435yz6$D1Bk$TSH&?qZHE~uJ?rD&Dvq$7BT>WgdAFZ{~0>BL7) zrmibj)2Dyqv+(yDAO$AVjoyK@XWzk~WxW_^EGe9aXLhDPr5~K_CYN|$YTwm{(Fopx zRlBg77(V!9iHHHL$EzgM&w&SSP+cnuI}lXbeY&4HZzLeJ2+u_|FrYeQrZQpWfER0caE#A-(~0J`*jMkLeV6kH&YiKie&Ym;D zi7DNN%yZ6MjS?SlJHe^ndgE_Hi7SKubF`q{&>VP-{3g5r>?x~_vMW*efZGCrZEC1Y z^Taq>-ngf`4<(W~n-_i<@GaXYCWoX2F;s-|F+6Qx@L0!!AM^>*Hba4^);+apcU3!a zk0j6lt~B1|-qKj6@SWwwY~R(Ip8e8OC>r(Y-~P}43uk~mElp&8ExPlquf3SsDgA5Y zuIC`1{cQZeBIkXXIK=buL|!eh@~ckP8rSu|w2pTKBj%zQjM zt82!M+86qZ7@<+kAXxX|@H!ckNI_^JDrF4~*cuOvVd;KtD%DX`p{xOUfiu-T@J?zv z$h?!jY~(ss=z4}yUw?o4*q+BzGr`Aw#DeZPXbMR4rjOuJtR!}p(!>~nXNRCAdRdq^ zX`iTzQQ*dpKK3AGws)oPeD815d+(f;Af^F=cI97%pc-Nln7avrx-|qg?<7IXEwzzG zvJ|?J+si3MIm%%o*O9H2DV0ZjmT?EIcp99OT5{W}oDSm^QLY!3vh~s&fuPe#TPo>ffiCL+ZF1o7vzaH(2-Os6%f<)5s#)d`Od&c?Ds4imtR%T`8v#B9o$lCCo3`(&kAU4$E#~xS z8LifY##-^mVUCk%uL|azege4>LiBjxF|fR$g$d?y=T;CbdhF1?bZ+=mIzM%We5ixW z0-JzCWHyb0H3VyvrD4kAnm($ftrcd4%axR5GyFY|>r7~Yu?ptCR4B7TN3E%+aIr;{ zI~&(3J!j5-fEUM-%#^Yt>&G|{7@3;S!{H2}TSC#JqE-zalloQCn~Be7qRU~d;PDF4 zyc6XC^9j1H!p&Mnwt?ol&eXbXdyJ>7sDk}L&SJaw=2NLg(jZ5Ym#c3|82Zfsy8nL+obN0_dTY zSZTRXxbqt;oVaH)3y6>p<_qtajNHQ(N%vv+AvEAold;)Glye5BfB4{`bm#Hg(*)(Q z{d^1vgelLAHvkIA+@N$Z27|w~o;bca=6D`OJzsY;Uc2rtbAOUEm+MT^=6)C26;K5z zVG)8wKTL2EZ_dORf&Nr{vvzz#9kDyWBJ!ClJphqnj2)*?w`CbtuxBPhAay(nk%*+u z2v6ZB1Uv|m)5AP?jjwE0`0bhEvrM4eyo-xH`*9O(SjJjq!puOfi!Io0xG#B1VAZ+Y z=h9fT=VuC&3ad2n;dA5R(E=ayo&W5!7ujYATpzY+i;RWuqu*X;yWAIJjK}%E?0yx( zt(SA;nUH{o6`D)*;;E?UsSrk`vKrmvy6_IrwtsJpC!XniIX{k@w_X=>$Aw`D<}>$b zjD`2Y-=gpY7b{3QuAlT6cwHvK+UIPYe2%m)lmfo@@4)%T=f2Objk$=sypMiF*Z9p? zefc+EZ2qQCthr;=E0SI76ZywD;{y+ffAeMZi@EY|ZeqN==ejIF+P4bDSVW=g(m7rI zMKCu%uco+)<2H+yUDf+#pP~>4%KIF2b%GN0)BSUisiU^33YY%A)X+-6T`Q$c$y=$n zZ=NIpbDY1M@QMMfrnTGJ?FwhYT%_zJx)f8v4B9u(`67aomU)G2tNJWqX`O_*3KMjm zOEHKo6CWo$0o9V^`gJhv=`=|~_EoIL%V3w6Fv_eTM3#UbwrWu**IfON=M4G*UCv@m zvbAw;Jv%BkLQVZHpphsWc>xnbcu{~s8CgJ4a2^W`sC9Z{IiXMNMT^T61OH(Q1^mFZ ziOgiQt071$qTX6*K$M8*LNn70ufLs^+q=oGlZQvVI9*rpeRHSs)wT#sc1fDR^@aCD5-C{0*#Quy-nb_dBOZ$Jm7t&(x5!5id85 zV6Vs1n6vwzJV7AsdYYhe=C*BZ7;H+x)I-xuY=J;*zG~ze&Koj=YX$qOA_i}joU(^b z>`CAFzy3ab>ft9--{AT5Cx7zC2>&-zH>n{_2*T`>_VV5|jOF|0#V&Us!K}bXXlDZ$ zv*%lDX(0#%`LY>vG)(9*7s{&epH~9^Lz&8@FROf~>?4+Y3E82MtU;X!yqBp!HZ@A} z6pS!>lFxr|K4gieHasMx$P5@4`(a{G;S+(0GggQ)%c$7T^o2FDAF>)tOGm;6ggn=# zlB`*SJvY-b0~Z3?+R=hiHVY(ZThMdw)xe|?(U`8L=9aMV8sp}^K^9b8pkTsYlA3hz z2MQ4Tw7-8a`chhk5Dh(XzHF6u@7A`Q=-i@3ILfBVOvDPU%_t8udcnaE13Xko-Mkm#bZCOrVE!2H#E z17?N@NXHvs43RN!7cX35&A6G(tvn_zDI_sRDR86-G5DmGErstD5v#F;akPw>0ZEqe zeDd@u#;G!_ax2YTX0J}M7vdtut6*KY0)#ua99R$4aq}`rbphEME1Gjqcp6h>TXp)w zfAI(DjzjmQ?*6M}gB&6Tqk=vmXfwCAJ5Rvz{3sg}B}aL{{+fuSskJfv^FR1PI<$|V zbi4=0jvi0<-+yb`v1=Pf<|35hKIDP*)Y9IX4xiZ1o}P`$W$bj|K*T=a%O~zLlN^Z0G}t;LISNP(5z$96Tm#oBl8EX(bHm9q$^i0 zrM%C5<;mwd2rk&W??Bqtx*h0FZ9HIs%ni#Nlvym3bKu3Twg7n`AR2)gghS3ES|VcPB;E*N5n7H4vEEm5z$S2yOpLM-=dr$mD-Y`?R->c0w5MH% z8o3_J2y>vtOyU`P4{PiK?#FQm{wmhvJOZ9q>5GD%a3?;Ag;Ya~1!XKQpwl)!Hkq1h zx24ZL`9-4NAAzveLO_<&g)47>Tb#i|&_}HO37n|s z5{E|j4T~uQti+O2h8ED+UQ6I-718yjSj2(*lwd8w!?nBu#D~C_c>>m!@jMtKpv#Qz zu<#yugI@Ry!B&R2x(JhB);)Q$Es##ji9vMjVq}SOSBc(rorTm50ZPA z_6z)}({wY;61KxXNIxT3=+Uy2)1Y8b$UY zJVeHxSYTmsG#8;ZAB~ynF1Svj>JK11C@iY{6`{1~ z2`CN$8>roh&@XEBcQBsvTTma%{I|INcW~r*8h$76#tPo}ls;#BuHrB8r zgb2rwS+t!SJvW}yOlbCW$M>v_WjDs^`hxYnkz$5XWh2>pp+S^P*Q? z>PY>AYY@&d;h8NO>!Bi+c?)D#(lO6QJbUw=xZnM-*{%XhtOuTji zmkGjr%Gz@VF|;V~7QyUPo>i_$Cq4FmfAgM{GAsZ_7%Zaf-v!QvbXrKDFo=1w$IT){ zEW_LsZiQ`}Kh;M9`wO&3`S=ye$wH&&u|Up(>o)U?3{t3s27?I%g*l)>%fK|ZJWlRD z=O%NDDCk=R8F2Pv{I-mh9GCbPyIEL5nZt zVO=_Z`bs+e*15=QuK+3}L~Q#4{fyX2_M1WHMKx5O+EW>U%2qLj6SE$Aw%J?Igawk5 zY|u9|&H?X0S=dN3@VJLR{y@6(p&jXa@BLl6G~UacjHe5)43Q}*+q;pXQkue=s34qTx6@;fJ(^Bmc_mdfv)56| zhi)Px5D;!mAyf~AOrwBCDCjBk7>Y6PtvYC&z{v0vdkdpHnUbFRsgI>0Am2m8*c5=8 zF_y?!kdTeISDtCL+PvR-?hDTeD-bH9wanCQ14F%NW58A*dKI`xEHkI8q@}$*tT0Om z83ti;x^OL_Ss~=6wNeI%fJ)HqA|-Q&M+eEz`-3#O+Jm(bVhk~qVJn~;45NF(a)?fz z8CRANFsDh$bq#k&KT+#*C>IiIv6o`rOK`=AQ*&WQS@_9uv{RnHiU3rGRlKRbD0S?j zq%fOg94uGfQU#U=({q`Y4yMZHmO(mjyoafL8$nCt`8O3OON}sWj%5@MrC46(l3ze; zHCawB2lI)0o=&%&K*2$Bon7opufFj&>GgNs09(!>#f7GY`o`!L=ssobU}VBZrU%Sj zPi6tCAQ!Dy6M$WZnu`!hDdOUsditWvn{;q6zyk1V;spme)U!lP6xOVznw_}Uwi5x5 z+sx`z25%SQbwOl|JhjDDSjJ@boDEZfBDZo#W!N$hEy1`pIS{$PhO`bVNSap5z++}J zf?T(w02-?>N_x;mAUF2GRC0k;=5Q3@Ml3>cva~XGvDkdnxd1<>E{$ESSy&o z9EY*GvL@Ym>wRf_9#6^45ZKWn`pLnCD3ucAZ2GjY>`MeW^k%scJ1<;C3*bC^>f+fR z(q&q5k~BZSiy*sLo3Q3x9R=e_mYW%}XRSKRK(AK$ z$)YMV?bta}X_P?9+zpl~q6Wnr-T>XcX5(pUY5?!LJM#PMnk}RBa}~@;W~nk~+>5Xm z1vEVX#+4}iE7-g5+>Am8BwaplnSNRNP^MGDynW=+N3noSrh)O{2vYPStt^qhpX;ov zp@3h1)G5n2Tc#wyyTK*Z6%X8qv06 z@;blux%Sg{*~@nD`~1tuq4Pou9bg~dG}{fKK3gk^G>5bZ_&a7H%! z;q*8k3jf)>+6$lWIpTS&ksaOYUwA0kM+)w)b==LD;z50m{f++cWhet)qx9#4cy0u^ z@|kmHyIU@)7< zl5B4M#A~+Zi8H|-U&8us@&jLs#LDQxFXzlT5xUmR@*$rxYuf1G1cHL4;}wRTP=$Et z5={N*iG5LnWpTVPoqOk6`rtLP3BfC{MK}o|upYuH_mL-_NN>G$n(q*p*y}}{OP?i8 zlh-(X#VE_goO2b&B8&$X|0y9xtAsR&jzGGj@?2(WY|f?FY~iPVuJX{fyBT3Kvk->D z$30;TV0|L>GVA9abuGO|ScPlk?7K(JjH9L2=X+KSRMomEbWdU53}8YfRGu;+-?QSr zV(+@=XwK`MpI895Ep53DDqt!P?jchWXm#7!CH@)zUi|N8k1b$z##4~VMW2%@3$L_hRsHQ?GkmM>b z-gy`x@4oMz^xS{()pV`zLi)zve?9%p%im6eV{_@;%jYqSVW`vy1#g|fLf^Qpkt{`b zrYC;-!F1^UhSYeVEbX|hF;%n_q{iJ~z&n92;fXiRtg%wCAPi^3r?C%2NkmCrc??1? zT?*O7I6_lD-v7uWln8zorBFIu2=A*Ru$|PY@B(>nV*^>(N~-}~^}xGxh%1A?P6~s| z=z(*dK@>fg3Ue6Qu3SOhGDYsKk$!WC;7>8|@(#W2dUc2T2N1lMsJB55eqgO(z~Stu z)ajX=gsUUqKZ>jRT{-+{-x?_Z~{D`_>)Je914|*X0y~YuJvL4^P`9 zbt{Ajno8TaNDrlPpc?TWeBhzS z%Yet3J#c;u83ZpwO)G{6;9Y?tK)Jd(*p;qb>LC>^GA!9L^`_a9>+Swu97jST-Kw#e zCy2dJM*hL~UrbBv;aQLiZ@%+EIz?dbo5Wtc@cJw1`!BzQcY*Y<7*iiUc^||prOQ1x znNN(Vpe=gdK^{Xc73Qo*4BeGB=_~t6rNDL87$TpMHpug&S<0@~O!?w7zks)HEL}c* zi8Eyk8DDcvAR6>2AUo#$%x~{`Zg}Qm>K=M8oxAoXMO;3BKvBp6t73C|Gumz`8g4mN z@vwN;G;mWMsf-9%WTS1+qwQeh_wRg+jZ2EC5$ca$zkw@{G;BkcLjW@i$;LVYGJ>(# zM)nz_C`6D~W7PskmLJ?qQ*6$j9_oWbP-bxxDu}Fxb%sNPwpX(aK^0Y!kFHvj>9oV z>Zn!PHBd_d$qf-g6V%FVH_a3>69n?+saMl?U;3-`AOGJ!O)tFtY8uD#D^9r%BGEhA zoBF1ESP-lpjAwaD*ROjLC`@h*%&P`sipGg$+dQx%JaZJ-(*Ok<`g?Drn>VO?5BAJp zDO0xA5Jg`>(Ch}vmx_}X72SBI>NGW&2hzF$xoF_QFbI}i#zK!>hy%ueIS8vuVwfcy z1*xWnpj9@h<$zT@w7TU;=qZ#jF-ssR<23Mki8+vHxVg&N{4&=9f?iyAE!k!r$j3Tj z$};CwyrMdjDJEq?mEhtZdF-QDKxWgGzW1Z=W&)~0Nzxs@&e+%I$aKIVTLhOn-=CZM zE-!!qWE{&By28sdGGO5q+>7*83%f*ZhA8<7T*vlZ84;Ln_Dr71UsKiP!xIOll%*lP|XO7MHk(Lbw!)p}c z!$QxOnP3Scp-@}H1*n7B`pSahN$HRDLJjX{#_-#VLH)fRU`TQqd**%#+ z6aDZzu66DLmW9$|b3P+$B5O0At>uk>(a+8HGw2L^hkGQp5?ANVnb?B<#QH*Tv#wipmXS0ohSR~yxL|ke(tmlh3#w}wMyGho}xm}KU;Uq(Koxp=j+Ue65W=C zSLHGk^%!&Xkgu6{$HGsHneVh_+m8S2r@!sHq}YBr$2`gI#piw_FOgox{PUccFTVEz z$Ds1L)qV&CwBt2f*ZI7y_x^S+T%TA?#uA@7zqA!f6W^L?D@WRut6~ZSjH6) z=hDCVH~%)h{^py|^mj0xklcWS>GZ06$pf5k;jONd$`#$@OW-w}C9l`I?b(q|Iwt9f z*Lntxd9q?{RU;{h<#BxOe6WIX?T7oD`(lpVAk2mPhbM=SDm?jlt z;Ux-+7>mPNzw=OGQXc=a#s-n(~q66+@)K;D5ln+p2U+3P4^@NUm?tUhNLSwmVPKY?6^-OxE-CtKIP zTU+3JtO1JHBC&cK2-mB`LPX4w`xSY^%!&(~C!tF_j{{knxum;s+ z1u9~$S4bK&J4WUlgmyDeiBb>`Z+xo8%oz|Y_uqd@>OcW#ZfZ%b?9m^;{W|*xC6DKu znK29=3`gjc5TwZ9M?dHA7!Kc%o&z;3y{tbA8U*t z(okvyuC={oI~kz*$=G+E`2_+_kgkPlbDRL^cNL0;vQ15WV`|>sn66*$NiVPi;Ys?z z4_`{Z`lV-3xQPe7-phGcE&yVN=TQFSKF)jUcblJ^Tca}L{4ha4WKLn4E`&WUJNk;~P z*peoz04y_tj)!UuH=n^I7NA*1d9lDn4gzi=dEtxrS;r%>2zGP@LI)p0h(=h&qhOk_ zX)sr#xDCY}TJSasHx4bsHwgvP0$sE43>E~N|$z@9}h!yP(9CJ{CQ^Z;#FFOszoN>Y1! z2YG{=QSi1w5Juwuptuwb9!lV}(Wm7t+n173cDq<#-1yRH6l4PlxH(v{Dp9q)Txvwl=qL z^s?(+Pv>u7h#)f*3Gt>1{VtN(Qxq1Y8zof4cE9JsI3$iLf?hW^JQ@tYwUssFS_QyK zQ?H>2lrrub)%F4+!BXK0f*(r_0QHld#ENBa$Toso9Y(hSuE_H;f|iz7*~r`@H7*acKHx2g&>RL# zed9`3y7WO$`rs7RszzLU($|p&$RbGQ@6_8Bz*cbeaQN?Qp zpuHA2CA%CLN3%>!Mxx6Z4M3cZcupD23Mm@vz?D_tZg}PkpGik&LFaWLKN9gILR@uKJ7yM*K{s6S&gab8&q1Jxo`6lYhhknPI3Qjnnv z+9C3yBQ&TLOpUnbv3!{m6X7njHTGKt*(;Pt)zVtF*Kl41%xgQm1k=tEh!!Yk-CdHLenKuu7xnnU@u?Sb3PlX$r|%p%A)9-%M4QOJ6J6m?z5m z5^9~sd!P}Rz%|KB<}?8sa8ZdRc#V`h{p;Q7?D-4njn__-le9ctztM$daWsk-Bm@C5 zLu!FRf9ZFDE=YcBS&ST{Bf2F^Q`gxpTq1L6qz~*8=f1iX$s5hd^DC}u1&z@N=!v1( zbo~O)U=MhnwYud~SWAiC_hPD?V=W0AtVd(Zw$!kzF+Ke0CsH#drx(He7J;2U_xd%0 zY{*W7mMp!=M?-a799+(-1%PVON1u2s=4S)}Q=u(f1spO3JgpYFJpbkI{wRIvSH6hG z+nnBb^;FOSt+%VRBVEt?+21|#oYDXh*9-)ygYoB;>0_EfMIMBx4nu56vw#V45mj`9 zfgY~8CH?eWzn2;+Zl#f-bY=Q~O_zFK$F%K%$%s)?Q$4DOX5>xt0|bzvvp_Nnazzme zzP89BQJo}kK>#=xjI@HFhZKZVEO4x`sg}VYHF1#jEzY5qu7EvSBPEd0Mnj~Z8K#c> zG*}LnELCt|BqmBj#pKF?E@NU=lc9Dq6jjY+73o%q%84Atykx41iu#IFZ>T?jxKx5_ ziIyW~sBIqre99)bHZ+EbAP+Ek)6V*I`1bv&Z5P07w2GU(-63I?7gux0z=Uvs7I9~| zYuDK1%OL)9k^W2T5fE@yFvB6~D+w9htiP^^DA!rWFfqvC$7IlVei2_2?aYINnrG~6 zn2@&kt$CM0a<4h&&}3>kFdh<#I3n)CICBFdDkZA-^G`p6IiiTb1tfGBNOpkTm>zUC z6+lL{3BS|#^uYZO!e~i8OVnyUrp*AVGaebmA~|G@SXL@urQb3^B}fUjNv@ka+eQvB2piGfi_|x#DW?F!oe_Oq3sd+Tr$`vCdT&&VK4X1xnwOU$ zrV=M#0z(DUn%`28WHDo`#w>2MTprqq;P?W};ku$^n2~qY2q(1|On5S0+toZ`0%7;# zx+V%4J>SU~_wPDF{>_fSpAAw0ZVruamSBPT$hW1rSwhITkTJaA1%c!adrRl zfA$(00KWFlj!x!@C_{+4uWf)a)o>L96cEfq7+4_YQ%yv3kbA?vs7mti-w|lKZjYO3W>i2hT@!Q;;OuN8p5FlpujjgLv$pAKVaEIQ##I9FTqrLe33=-jvm1IMCp!}}E-l@Ml$sJ?CQ)zlTtoM!}3ocsLuj~F1| zXF_wBih1wfj@k3i!KWP;JXR!#UVP2RG)dpR*2`=)zNT;Vhw(*vqM$X|_t64OAqx!g zHt(?YtPlPb&yG*IjAtr5+YiUUm$61Nh+v%DkO^`8=9!E>NrOpM^5v+(M%jdb52%J6rS3GfRp{Lx>VjXGNq^n7(dH*H2f0X45gur6Z%H z?c({S+u`Qe51Rfxhq^AqoyMmQj#+j@F(!e{ex{k*pd$15wzjqeJ#u2C!Mbl%AnkAL zXKr;~?1MW-rEP+^o8uqNFTkhILbTc_cQIh0Sh3tWO_QQ@n2hzaJeiG7y zCj;+05BPlV-hJu#vBTsKouDcmLL2=lt3x3|vC@iHNUqO(@{sF}f8@bOQa)*Pu5@>$ z-}v%xrmz0~AChhm{15ch^R$mNFot3dfb%dz#qa;lm(%6GbLoQ%=UHnqWl+h=z<`mF znXrZ(zxyuoIaa~LO5qJy=g9kQHOBznW9r|&XHV+Caf8AE{p=m+3@NF`CWg|{Lx)M1 zgN27Vmal0k?eFSN*REVjSFT=6*RS6U!QNnv{vKRnU}uWBMwgQ`$}=au33|S1%~1h} z?amq*7*G%9rmB&9I1{MZV5{JvC~DGJad^M=+rO0_c;F86x7b3q%r6L0bZo;a@%23M22@0CR{cr(g)jnvZUxRNM zB)S3~2P(<(vNt)i%2^qvh;i^wWw56k<(Kq~2H3RVI;zLuP=pI(1J|Vik5XidHE8ve z9Trof*~}hBRvQ(aN7fSscxm%Ga*O!CW#%=Pvv|wxccjBdkAa&eXYmY$95%+%#Y-2{ zz-g9_khVB0s1IcLXo$6Z`>l6^X4$5Y0dc+>1Pov)f{uwH{MKLpO?vXF zCxG?TQ*Z$cB;zb5I4~^kKl}TKpL0RORenKjZ6|o5B+ms0{;5S^rHK9Q1B3HL$X>5uqom^Ut zNxotwweBoN^EN+cW!kZ0CyPqTiq_1}CunqJXf$28cph^uq4u=9edi982U1CFVD%_Q z8L5M~0W>Ti6;7TdeCuE(Wo~db#;VoNv|yQ9f*=8eRwJDiZ1s~_m0}iG!Q2pwiZZc4 zve=C|tWC_r;tEkP`laECx(4!5SY+h-)MaQEe^^EFy8G_C@wE{tH~d|MJfEc!rZv*T z#>Qns%HAS(7Q)8C0|!H?I(PAXfPJJQLa5D!AWC4C=W}2J9%AJoI5S1;szqHg?k0tfz(3TGS3+~3r);RM7=H<=eCe> z>%K5bSEZyDg5QWGqyisi1mZAM1xjBMXo;W#yu{dbQ3%(Ppl`RSu@&LJ1BP4>m0V1= ztdH_W&yCne!8BPQ1=whVwhdxwBK4Q0KI;koStN);;b#=4EkSio>LDfy2fC0KtgpL*U(L%a=l3ROMrRv$2ZHgt=`F zIFu=$@1EQ2tG|8gdU;)7OneY8Ki|4GTR%U`=F)K};KhW-#mrlLM_*!}FdvR_^T&C$ zS$}(mxhNY8qxG{j&ZINGrZI?bH3pSqb<9H`)Y2}kQ6VmcG25m%6_waLjREXy1&4Xg zqtU#vHtt9Eu*#T`u-@Z(Wk!+g92&r8`BoKf*XtjZ82avBVLX0ke$9s-ehAtjUQtg) zHmB~Ju*OL=oHFj&qD?WMv^h5koku8xk8{Ss5j5A49^?oXU1%e?%N)+T7!0N}_JtOD zy3CJj=uBk$$sEVdSpj^>ap-vv*Ra+1*iH5!)`h(pd&~U{kA|;Cf0zU5s(}m&cDgbZ zgcKx98zkmIgMlUCx08wB*}WoxODnP%&F^br118mwdu^e=i^!TKXE69r1tduyU(X;l+s$NLKYws zxEHbMAPRP{+hD z!nx&~;f`%}P3iVqj*%R{D+1$U-!Z$s0)tmUVVimtd;&&>$zzU@G562@Y{#*bGAsal zLoAC*wfoWi9Wn;}FC{=gxC7G~%0MMFU|U-ovfwE5sAP-LlD+Oa*0oYy3`65;zz#69 z!at3q4%!1|-F+)>Ex~(GNKSN15(hSnt`#A~7ZFft-t0ntixo1@`4BSZ5(NsBYjX&` zTt`-216RC`TpE$u7#;HQa8y!ex`-kRLP^NHh;oTcz?yWBndja|9!_`Mb8i@|^;T$* z+lDu3aPT^5GJ7fcO=bcLHHcALm`nuqwZyX)0mn-x%1SFEYsv&TBKe`brUb=Z`WK!C zXDYp=c?`y%9wpZ#Yio>Sn;1u#{RPr^~QD36t9 zBA{cLMW{2`dV5uCdTjeIrkb+-aoGA6{yd#p{NK@bZ{ixfik1VBmJphFUj^>D1}7 zMC(kluq)}7<0r5}k=lqT+T4mM6aknMmBVsM=BP3PL#=@LkkA!F7Y7b4lIJ&WtHpg< zgq5NJrb0CvlpB?rAdrkL2jM1CK43}6C9*ir{8kh&m}BhmH<417;amZav_TZH$^EUx zox?aPNvPb@4Z-TYK*D9UWW7DK%^MlIX(#e>!N>sKUtcpI0Om@tz@@7fNEw9Z1n{AA zT~2;eQ#{O(^DZ}{e2LYqLFq#%Hqmi_qO)U-;bs*eUOm7!h!-1L*-RGiW||nALVzX7 zFji_Aq>*qXV3EY2c^GwN>mkzWBcrf9vdk%jP>5q8{j@~0dt@5I7y0ejCzhn1BrV6t zF#Cw#DBoDVWkv`XFqS14rUI|lI|UWzA*^RGP5Y&QB=gg%5)tVz++)X15a806`bO{s z(5J}H%8hzH*yQXT4!Zk?-z8`%fdeWd2n6kWk)+C2frCzkb==B88{DTrAW`1l0)s*! zFsNjdX#7z;ZCW$t$oUx&(i|B3BW2W`=Y2BD+WLk_gQNm5%{s}@4PxmZ=#Ta#JbGia zqSOOYbev^)_nI2FMHM%R;vCkoDUxuj2FTQ;>qn%8-yx2R_L4F8KC{`Of4O^{>%%G&1ZOi0e2<$Hxh zJK@~hn9o*8%-Z&EAHdJnwS2FTg>TQWLg)HsJHGd2AGE01gwNqFKaIT}R$3qF zC;rdh?DtU8+)(y_`-LOfHQUM*5ANZ;mMQ7B%+|RuO3&PerVx=aW{q;MM1b~n-9v7o z&(U7?M8@Lm(f_S=VGXlc;GX!}Q;J7lQws6X}VaRgmuN>_liB zNUtCiP2*hl5wm%_&!X^mF362|;+J{Z&P1Dzps+`!7L_ti)0 zEs1rsAN14v72I6&DACT8ny_EPPZ8zOokhz63qvu5j{CsiB-bQUD(N`wO6NQmabBEh z*3Hyt@}mOwiPJC7(c|m>Ux3EcBFtvx&}Y(1-~VyigOHmCoq7MlWq22WW-1g-APAm0 z|859c6;zqR(vUVL^3rEN@u_s?or}<%an_(RlnnQOF`ks|dnw}#tV>ABmQX4T=ODSR zKlO7@rsf?Dsf(byI?@0={p=UgB=h&@fBEmzORteeg)9`~R>MNZFc3q-#?bXyEUJ^K zV-KE)-hQAP6To-c(yslTc#NTQ@Q=NFkEZh%NvG1^12z>y2fqB3-%NdY z3;G9#H-%aGE6|d9yvtOu+)h=pOI-x>@%=P>>>!AM68QH#azVwm{B-+AZleV3&b0H^ z4z6EIZ(V&Go@@SRa-ObJL5=gc%6f^Cm>3ufXh{yn!+Ii=TX&U_l8MOW!&Mj~3)40f zoK^ViH~}%zT1-{YEucer)qm4;6AeN~P=%Q$iM?T-0D-eB|*b(#eM( z2%6m8)d&9`B9CZG+O?w-Won4C@+JW-^{9!SV?0vGMFnM4*n8-~^vR!kGF|VwOyJWV z$h=GwRsPnSZ>L-Err!6^UFpo3(-}?s z$Y<|+uC!n~m9=&=a0!)GU@e+352INj82Kdee{laVq{f0BX>{q0^ufZPruQemoBBo} z6l@-0H#J0g6c%wWevy?~1WsmBTw4*9I2x#`aA?=rS~2DooJ{eWZfXbVg^ru zG_n-Dl$+d58&)@j1NlWMR`STthE}TOqzHj)2{W?-<{a2Zv7izU;WtZc2;@@w3?arE zo`~AXL&-3`%fX}DW5MT%>`|#Og4lAcGAXUy!h>{`TAjlL{AylCr|8;WK`0%V>`KEE z{p5!lqtqm2XQv=yc|;krQTFWJncC4nhsL`JBB0_4N`<^at9Ag!Ap*rzF$>1ZV9R(` zeJieO0w^k}ZnKh05_l@5ZFCwqe3=u`sgsV@DAXF1cL%k|akRBEV;}FZG5zGM$X2WZMhd@}g zY*~+6Yr5L7ruv8+M)Tw&KEr~H94@@Y5{0h6`$ZU%0T%TwHNklD=y(CHPy1)y*-4o5 z9LkeSNG1gfmJIM5_84oeo69KRC;`k-ji_1VR}B^z3p1=! zaAJW}P3sU$8G_7R#-~6(1H?$dIE(mpO|(4ngTkkfDkMOdiXE$hL8-)b^;}QA{e9#} zGl!t}l5&9r(UvEiVGL6U;8Fe&Myb%|KP_T@ufhT_5Tb|@3-MS%5D}80l~RGjTH?(R zg2B<@bYq}*Q*c0FVBb1+^MaZ-N#^f_>C>9K@hm6cb~9&^K)(0icURLGEf zaT^3cg)}?~oEM3=b0_xG{*7Wq5;D|KS5GPBiij?z$FU9u1qeNm;Q9)SA#*8X@F%k_ zxHp-{IH=h^;)-CP_S-(lR9wF-I{@Mz_r{C8;JZwa<{h7BPm;X50ZU_<$V9--;tKZ{_?qu zC$m<@t<2%28ba9H z(MK**@DuhVU2~i==y=a9(iiuib73D8taLfYIHbS+4fo~Jplw?5Xn3GCHzE&ddqUs1{n! zh5evvuDdlebcbG z?kbh#{4Kr7l%UOR#xoqdYY^$mpk3KKI(&Jpdu}Zqy5&H?a(a5ZgWvc(mq20N>%7M` z_T^EEGmY@iV#of$i2y7H!KbTF2xU|{>tmylSkPG4pi_9*HDWisfKm&QrgzpnOb(V2Hlz%2R4DBXdTafJ{23=i}$7b zjz5^Lk+CgS}|@d-E;4K>EVYTNq_Yhe}&hg7i545^r6@AndzYw zO8Jrxvxusuqtofk``3a8F!_Onw~)@^dk&;SckD%R7$840iUoYbv^@zJhiRK;$f>zN z68z!IcpQiwD62y5K#-gqLbxOclPk47^>pvb`6ISNLkNw>bgmL8g~H= z1kz-YfaO`Uz2v6f``j1Ok55s=02D!8J4yhSdY_A!6{&+(@OD(!)ue;WMQgQ}xr&+UqYhH{N#7ig{7~j!37QSr}MyBB61g@IGKyb>SWE%+LXvkTvKm;JL>g8%r;FX2o$iM;* z4*p6aFEMtb3+WJtFLM%#G!G#X7p3;x*oak{6g`%5HK+{DAmtM1qT+z!6fUXEVyUnK z9k@+wIZMfLc+{BNqahQv!R$?;YqFUy#j?ix6W7;LMTc}l2-tyLEGF0E2GQ2$wgNI#NIIWfI;X%ij~4D>6?UQ}ZG8ck8dbdX zuBc%vJW7<=V+fOYeDIVQ)ogVhVMYN7;X4^fXtx~7V5l6jLh@-Mt}^c#etHSFm$^R; z&6hYRNJ2w!aq>izgNR~QE8;uL3U2keL6uw(=}l>j|Hz->sw zR0I($A}YKBMRgt*UEpaIq!_ma1^ipawSe%xNYGOdc&zEu9Ck}^nizbBpD76cYy>e1 zb(>?Y!7B26^ZOM*EGBW8>NdSbN~qr90fZWaH0E6;L{EyAH~tq&j6!1tuKDmb&~I~V zntn>S0vjtBvk)&MjP|LjwA-q;OvS0z*fxLAMlrOFbs}M62 z=T&#F3bq$GUJDM$ls|+4A$5lE>JEZADKvRjR1#b>CNu;Rp@0fKDlKjR=h^+B;3ZR0 znBc!?%K%CDgP;2o@A*4Y;rLf}A@6vvAj5Cl_Qy6kqH(@8G{iIF%FX-YDcLi98)DRl z&)NK(Ph(8bluR)8f!TNQ3_f=qv@%EEy^il=ym2MN=6lc6mgP4TY?Vr8pGIwLb8P<9blO`dnZi}7q~2<|Bb z0u_Flk_v;2kxM`9Q+PR|Gx0y>Je!l~_cA|@fu6?y*Ei-r`-lHkN}E^!zK^eCPZbpk2LejTIb6b` zw9I+Sr(9=M>ujoOSx6@zKay@e*%7pK8XlC0Gc&iSkUR|;_A3-p=E2k((ndOlK?lVi zW-jcm4v5Pf0+l&+{odFlR~~00-@`MUN8^atlM2mRScNzmOHkKP?*hh|rq>^?xnuC$ zn2S326=zpis0xM?5-=Y;xJOY2E?v8XKnI_4z1as_>*X{!?^^t%iNcfYgJ;e;6sl;9 zfw?0Mm~`I=so_oFg2T(nHE|Yf+(-ND*rTphd=$@%FT7GKDdWlZIcHwQLn8rg8#}ar z-!5p&Jn03mAlOpsbl>i@b7ynvn|l|2VTK!6yL>MFr*D3X0N`vt8FS#C z*Hf`hIy>oQ1WU@RAQ)}O>(Xa_^@&shtacKJ?4wUTP2YgGzVT8T0$wylQgn;%j1d#K zh_I-^rGj*Z?fV)LBq`KGu5gWqwXM0Sa}On8duP+&#pQJ6z3b`po99yh@a3q|bmZ`! zbm+E&scnC2I(7Cf0)D@PRbMaeI_YT2!{fa;4H|-ZTtYYO;!=p=!vc`%s4U@*Dkbnbo~UZ>an+Mmz)O;dhAPxpK8 z7w+%gbIw~)s;wQo zNOenT`1o;@@)hJ=IsN^MzeJ9pkWU&cuTf_c{7{pc>GmdWlfg$aE;J?-;SQ;pFs=iG zwba&aB`4A_?u_4e_jhCPYfiuU`RCGCzVdu}=uyUexHH|E86kJE7&(2jH_6K<$PqcGC}SFjx4>b)^U+UW6WvL_c>OJu(HxNz67-8Cz`uL_0*31e_7sLe@OoocK6O$Pw@i-7+gILUAMd0ue&w^N>dXK6voBaj zGUd<^H|CjG5EqQD7ABZuqwQf)UnQaA<~WHJY>*;xPet07>aJ{{fO4}L@AWKzU;>TJ zqU+Xq%V7;DSJ4U$=2}l4!6aJp-K9G)uBy~Y37$rZ|5h-#M&kNA$P90%{aYP_(<;o^ zU@!;{HdqxQD9t3nNIZfjCBrJ9YX?RXiJve+nXwyB3$|{-63??mQbZ(Guz^i{R6DmN zVI9}OFps60-#lHRoPMVTLz&@8r6t8Qi$ za}5;Ep0B6DM7lKqft3%I5fZxs6_}79&K!(2T*ihdY?Y~wAkb!v(w|^VVJlP2;(qrs z@WQ!<%BdvztP1jswo&p}2ZfKK+X^ju2`WJh&W*}aC!s?oGTDfs8cTqMjRh1Xo=f+% z%7HcXHpmkQnq$4gipyw?E4QYq^QvXp`N}M_6h>Yl@E?8aVF30FzV#+yCH{?)yCJOyi&c}`qgKMPAd1h&oIu@3GI z$x(;SHKp^9ccuOz%3*b2QpR&Cjg?7CoE%-II}(;g=Er1wL-4HSt5r zXc|1&pT6+L-;Qc7yX>dQ(NU~2$swc0}H^=^UdLwJ}MB#k!yvO*!-INA32L9_DC9=8VxI^A(reE9-WyI z^fM1Zm&ilsOfW8BQHhZ5mEF>uwNUqv$#sMVF@@X&zJkZZ!tS0Y-S`YDk-uJN4vU%B9L2I)iIILN%KHd>9_b z09{+KDvcXPA3XShN1=z7^dGVfATTL-vGv-nFYMHQO_(^Zs$pgHk?C+u9CHorD#;L!&WiE zY*?lmJld`s{My#HT?1H-pL#Go{o$w5u@lGe9CxQCKsW-OcFp5yWaLiF$sElJ9(ndi z$X~BJKfAzOxV9ReRQ#(62cg2#KnwFv%XlzX-&wlK)vTuD4;)G#c;r-?*}a`AXgbtF zDACN)wRE0Vxz|T8rI+9SS$gZ@D`{+Ug!Z@XY4~&>$-#O*^o-K*hfpA$nYBcE*tuY8MsmIc_8`sENuu=`-+LPcBu_Sc5ixN-3}Vy-I1zmFP6KPJYb_4I#=T_#wCd6BEI+n#nsk zgU3l3di~1HG)3--xsr#5Pms%U6h2Ql5g1#;=*$1_kJ8n*E`bR#t{Iw-Y&W_1?saO0 z(tf+MtDU-{;63nms>VLYN8`(bSc)NNyVmZ{fZDNB1QZOsq|?PJ-13JT?c%xn|p zYh@;5+;qi8{ZWM~49Yk+1;-u)l1E$DkgO%iu2>2hUAEps5Yb5OSha0Ivla}&vd^n^ zK;dMpzRXtP<+)k_>an`l(cqww&Tcu}TsbWH_5R%?!xF68nJ1S{oEt_M5c5xUA`_qq zPluiuOf3$@9AFepUnA!hBckx zM@xZ;-8n!;4zHC;k<7>PWO{9sqLM;@130xY#wNsXGe6PtkM+~CF7>J4stoA03Tq%M zCsP!k2}q;r2+DaT%u#ljGnq(7S06``Gs3<4`uanGFmFH*WB8oPj>?-fk%xJ=w{=8l zmP$y>3xGqQqrft@)qUZ8UBj?UbKOb8qb!E*-g3Sd@M;*E^ za;!gept8ea2vnr$c=3DxZ$Dva{1+1AVR>oUbMoR@`CRK@C=e3Vy<`93`#6QuIfwIw zg>|F&C)=}>H>(Z#uV1R0_ zpn&muP+44)+2K9^`gnYvotJ@iJ)YU~$?13ozL(w&tB!ihAF8yNgw7Ym^7TzGY{Z?j&`S) zE?r0q7!XOEs>A(7~|2RE--y^BBr7`{Nr{7Bpql;1CV19lv-6oXck!K%C zL-!w~qD(zWmyKzP5H1USw9qtY59_f7{)tJd*z%X5De}1pO_`+wmUG1zD^y-T*1?}E=V+^aX)t1)M$a_>cB5uAK*;|EyrvXEt z3YzW1!|OoK;No2hXV9A2a=KfFp&>W?t#Q|=g*($XsdQxbPMQoH!~jbO)LeN1g`hVNPMc`5%z6CBR%KSslVHffcRsoU%`)=-(Tj*;~5zx%^3@yKiJ{Qh_TJYD$JtK|BTk3ir}`uzP3Da> zvSxKef|#GTi=rhjy?*spI&!W*O{~2~$k<|f=%L3*df&t_JsLc)$a<8ROL_@IFU&3c zUS2SP7YW=$G7B+f6;|-6>x>@jcjvB9hH4%Q9}izkDZ@%MTtEZkf|&#KXb-%_=GWEZ zM%#gKOd6uMEz1gF;9;$*+6+x&t8+CCGWkWA^z>9c@nvilI*{1%aexQfBmY1|p*+kd zfLFs2G|ZYby*Qbc`OdHvEuTx&>9t#w3tb{9rnQ6mwJ=r7JlY%}lH7PSkUvcVn8nJb zsI^=PSl`)MNYxw;9AP#&Z1)%kCPhv92;$f z$?mr0;Witgs=@>lz&lCQ_Pn2;*A2Rx=5D;5F1&P|XJ<~j%IU~?EV?Yb8eL5t-j9k| zT%lHP&_V%Y!yC}k*AvWq6StgJ+D-EzxW2-DPv!uKP|;|nl;}1IH`V11+7`Exltg6# z;`SPdhgHZoV9noQ4#KR9Ah5*M4s&Gexnf#@ZdE5M3~lOpG5L%{<*<7Hg~5nL`>aEJHrTk-GGHeJY?ed_3fHpd41wF7Y5G)HV#QvG%Sp zvwD(X%*Hk<0NG@D@{L zgfPKOnbu}q!^ZWK+d)#;6OW!kDE7j4D`|&4i{B zRkGH2UIB&l7NIqGr&NHA*EDunMrCKQ3K~?D*&ixTmW>q<_6!{3>If7*U&DHlqvkQt zh3jz-uDMPH61@aD&X*Z&p)9l-PwaFsDS{^zX5;_)1Z89ug@*mj+dcN16W73;g;EW8 z$|L9^OpMP}7^1rq7E-&%$Se za$0yv1U!P=Sipd?(3~JAJI>=(AcVFWCd83p9p|@8?wY&8&V^vr-}^7`<10WZe;cA^ z`PU5GdavyHJ;Sw<52&~WR%JZSu}V>xzNC4Rx8ejYqQdMku61|;xQ3&H z=|xDP*AcKmt&D+V`E3-xV0_=B;^%sTLd)k4cFl2ZATN$`*K#?m=z0XCYafy638l&@ zw47_?d;n4e_&a#1Vvn$ve((Ia9`OjC%T*my*)Rxpo1i#CB)QMNLf}m^VDI%a&xSRr zV+3B5m){!~zQb?p+grv~4{VIu@p3%7iqH9pHPSnPkgmpnQ9-3KyuOR$c&lh$V>hu? z3LmVaJg==yaTcvwksa3kk5CrYN+sB99YR1G>T3Ah8fCEm>_7Z(DM!NNVT=!z_6Xp< zvt`=O(^WE?0J`(__d)y2DMGiwFdnX>G0HrfCid{~nPUNrp8e<(k4U_0QvpHP-~EbR?w)oy+HV4)=hk zmEG@LqZ;06`O-{TlfLnKGgcLL>0<-SyCH*Tjt^k?Dj;tNg@RVr1to;OOx%HYO<1{v zsy7(ShK?OfXYW6bmGn6Kpqw5UJQ2L>)R~6#x37}SIlh=?#^6ynqPRI}C$l=3Qs1eAw9muEO_}oV#bSLr@ z3CXI&LhGE(FWv>-tOoy)N2&y9jkXJzmD! z>F|*gks!J}3piY<4Z6Sn>U7BU7CZ;<{cMDF0r;{va)jk}p2C~Z(;9Nluv_o7jt8cU z+$*!cGfu`cA_MlI=}o}72Ccvtc+#Ob!l|&v~0s5WCo7DH^&TVZo`FSYjYES-e< zP#Uz7`Dr2P*~Uog&}I|343Q9#2y3k65@mX;Fx*+Z#PXk3MQp;$Z+XtOB_l?VdoUE| zu3QFNvcCkk0$^#~SMV}$=cO0%z~pAr@CQ3m$51X62nEvM*hEf&hROAstTLYaLjLSN z?Ksj;hgfg`AybgI+zXG}HKIBw;F2e516*}<@NjyZQk}2A2KM?n$?F&;jZ&DInM)(< zcwH!F(tuUoaBgX@8E?bK9)CQwU@-mFdp8IR?+m(~FA(X1v8;ut6%EklAWU2*?+>qr zhBG5JI(h~|sbD425cwG;FyLQ(^PB0?g$uzOF@*XX=3$qxdxjSL`9vmDbYzLQGevpY& zZ~>|S+D$EFikB8r8_g_gYgU3;lmOd?C75s`)5yc{8c1Hzb<~K)nWxTeZ5%CK(HxxdzV%LZ34jla^9?CiFg$P7j?$i69AgeH?(*8fBagA2^#j zTZzqUA{hzL)!#OdmbL)3Ra)}d_$;=dptt~$3nXBm0iqyaJD>3jkV}Y(Kl5(b1_%+b zhxToFf(nZaY7GxdWjTk1gN|9a4Ws&mOo~mZ+g0id>lOD{TR;h;Q(A>x$Y8ER11EW3 zCb8re$7Mj#1gePT=GhooSTPo9b8O=k1{{JzYp|B)Wn<#7z^G{kXf>3|&Bv+Xp+Hfv zq?ST{Wf{u=b6rAF5!ebYYH+(*PdSG`YrxvyiAz|}y|6+Fv>Dnt2f-nzP_PUaQdy0p zY1V5Gw?8lqH`sVQ9cVw5+8d8xw|-R9|XFaj0uz&aqf=9kpSt zbu}qUm{+102dxs9%As8ma(D}cWhF+@%hqCMnl=%jjfcfWTYi6@XQxa z8*{{LqKu(zFp!K{8u!RV0^SUDlUb@9M@wNCYJ@6rtS)l%VswwUDtrLu8KC7Hu3d#f zBp^4Qy^eh<_%u|=@Cm&Df`6H5C?$X^-HxuY)<~JnHiFv9E9NP9oa<@mlciH-8TV~F+RvBXEl*@pm2VVj1;6_hM675u~A9Fz0c47=Nd|#Ne+tR_2LE#+75_R;Ujd4E@9(gjVkFofyN7sOjF;~{L;Ck~pU0Qa?vKa( z132Fwr$_iK*}c4$04_oWoCg6sFTeJAOz`#?QboRvdsMQ1%60u7S#f^XoH2zX+Zun_+)vr z$oG!bZ^Ibl^Kp%M?@*!7c;`Nf`BBl<+aajU6a?>`tswt~vXhY{Q2k?GzH>WoE?j5Q^!XMXnL|F&DwVEy26R zzR)_^iZ!se4{ti)#fDO6&YvfrXNqfNwdM_HwF+M&C4Flsz);4RQv*MeH|W)4k+eiO z_2KDO8FVH5$F-1ac!;&y_h1=$z?So+S6~~yxNVQNu9>fYv)zFaS#-Zd~ZNWiC| z!H^B<%UFHa!M$I`lH7zWZ)AMeCdcA?>z%iv(9{tEWWN{mpv?MiTs&8vpTA4k!Id;N z{bpL&zMXbzF%D8K<^G2cMf`UIm0<{RMv+)gfAX(hARq2b6lwV3PrprD_2uyJmoS!n z^oviC@Hj|P4P-wU24#+8H2}EMXbS+aN~tU> z?gvF!jHDWc(C}x$uzeBQdg(ERhhY(~%gimpXt}=q3Lr#ikNaW-wj-^DW;*-Xdqfb7 z-=5|gD4^h@4KRq;HxE$$-+ib%43GJ4uqsud3pKp4$dD$4m+aGuN4stl>mt4P#~zYTbm=!o6?vy`G8IcO$DNWqzW$wv2cJa z>(Lw5BZ$|+VmsIX1B^=xc1PV1){SD6pu0OemnH~1icWcG#nIjs7nJec=MLPTP7YDu z86vqo^E!)Soqq^-i`vDVbWcPBYa{Ne3#DLW;d+{(%$Y*1hB2xD6iKkzAY(m8Y0h21 z9K1R_A5D$8gbTbpsGqEL0BXlw(n*9m&uz-<|X z1R#<@m^8hOwR#K7xf@uaU`ftAggsA8n83oAe3c-R&a{$Q|GvU6C> zT5Zv`0GJI?k2@?B6Y5^D?#2MT6EGf>zP&ke zVeW=i_P%rXV?E3fBY7vyk(g87L%;%lWaI)VV?qD%m%ciKamk+_c6Bmg>Zbb-SigT}y#@wx8)oPdYVSMWaI>1D5TzH7od7Yw;$o%~*9KyWET z*6SbuwGO-aKISaQcmt?2UY|oIrgEb-cE9RKt+d5p!KFdN`@k~*I`@pkRGv1Tm-k$X zYi0m*|GNOR&IR8|cOEMMFEQtP%!wWuxCG--p<=J`w?_pKWl}SMEU1hN1pRrXxEA-+ zGAbQQm)_rb_fN;I(ZDq@ENdTVN3yA71bKQ*!_eXx<9zQEmyYlGGQ2(^H2uQknJ35R zbEwQ&Q7HrEe4ylnw)9qJ$3io_ckaEc_w(6Z<4l?GDh}Ik;}7PEd-z^(&EJkw&zFMN z^E^7v;=Ak=6(OHFQ)-+CA0@kTc5P$I1KbNF{eqwHRCs;=leXi^zH_k!sfI8b8uSl8 z9%mi={je*Kj`imL*;up7`<&*)c)avhjRmtV-^9^?A`*EOcq>$Gi&BCDRtQ;d(FT0p z^>kiUnxs!F%YF2zkEc(5^m%y69zb%AHGu+|3i-dD&zi8B11RRO#^br$HrI^s!y14| zJ)lFc6|5_j2#YNPG#8fV@kZeGC+rofOT$Np2!DBoGJdB>mOY-@4pLl$&;e_Mm+;QH zM-Lx69ywf#xQCtjYMyZwBSD^a;~Ir_Xua+;cXi{1$0%e&G~DvcCj5fub524fWz5pR zzD3{34u!_}Yj~Y%g7wKO$z$8bvt>w`p=8;j@h^+(+DIdze=)~FzpR1bRQ5#SR(wl| zXk_Krzwt#%Y#vQ7laOcwE^DVB=syy&)bNgliK!^vT_P-Gf&4f7BJ2@Pxr&UN$A~vR zO)2M<>1d23lR9{`D-E7(O~dzLA?3LR<*&W{BIlISlaD<^tMx-vhoL+1tvAxkFTKhd zLXUtYLE^-%alG2fC6zG}g$b=080-xL$P(qEm&woD#uI(;B(ex+=1~72A-B7P@mvQS z*QOD2Knzv=!0E@*_^r+K%Fk{PQZ|=f{qb13c3~y`=Ci+-KK|sh>DHaM(wpzULCJAw zMUaScw?fz!ZY-1m3U*+;di%9&5&AKEcb0wS82MpV87lIQj=RLITVmT<`|g3*(i{lo+?lPFF!+uATT98Y(dTJd*p(W#c622(c-^T_~jmh2AUve`VYtD%ER zIYe>H7T{BaXF_vz(17EV4?padVg}4t$+5A-_Mupb|gUB4B2nsrConCb)s{t_FK{(Aa3K6g*?iXQ_(vtr{PvS3) zUJQ=Je((p1{ekYj^rzEX-})a@%3=Zj+kch5gh6eC$cbz3zaRU0==2f1BSXa36RJ+} zkH!NSz%a^Ld9Vvb^w{ZR^)Xg)(baPzlnsmeY^;;Z!tJJ+pZ*gxOY3Ps zesF#EjdNbzhC!JQ;5#U$M-HBgoYGv5hKhs@r%%1`{r|4&zx?l?c_9Zy&QaHBiE;^c z%Ic(?PriZdUTQI>atjoU+D1Ts6PXB`o)lC#lryIsNLGox46}9tW5@;sqZN;)g zbk8z4N^|2*2}B81H9>I_y3}QPng}3;%(g}Apcj#0iP#MT^4Hatk6m9RJG_-t_Sn(v zWC)Zi^Bcm|tZYzTZ!fV%n9OPj;5Tu?CJ6OyS$)iKWe^V}s}NM^?WHaD%QMx9BKC8{@e#x|mGC3GXu2+H~|_6CC3`{Lwea)-I&qdj8|7m!iVc z3uEcsn-^*Ai1x5d>=E?4f**Xxgcf{(>lZiS zis}iVjVw$#59KaIet9H^s6oW1e^WPvF;TNN9)1(n!I7{_l0d?8-dGn7LlK&F-D!dNT^y0USR=%8oT$0C55S7E(wnjDfA} zCHY2Ff!4z3S~WaBzTmf-lWxF0HhrLf&M}!K>lg1HWGr0E=M@F`axR&qY)$ynr}Z13 z%a@NL;*^Gi7W{;JWFzrk&UOCyFVH66W#4ih^y2*aTm8eA^Unt{-_p1U$vu+29DnwA z+}*KyJN^+3f|*!1&;%BQ-OW`xI4-0T7NZ>UWpQ#g-KBNv8g9%wGEPfmdDcPin!%?h zCZY4C^uPScze~USk*^RNSxW!*&;Cu?p#A73H43+s30R-%a2>8vK)wbfLw;APCvmD# zlRU{*Rmh($R8usS3+qB1hC%tp=Vp;&nVCOgIbiSMyd|IOIaebg@YEDSm$yNIj9`g9O!;l^AGnl^Xinlq|&w) z0OJyQ)Og8Ll(GDix;u1tRHcjWzsrPs#% zFWY37t}THqaZT8Ey-cN%HLQU11Y3)sH;}KF-@B3CzH}vByE8!oA+e7QSQc}uX>9Il z+QX9e+n@b>`mN7B4?dw84w}b0)h-H)3%GSm$iTYcT<~nt&tck2uc2X8u%106Uo`br zrn%M0^!m#;us%vlnz9r1Ki^KLPaI2M{^FNu4LgwDeESOMr~zzKPM6<(FWrCcNV++H z6MjP5TX?y|D6d>aM(8Ge^0^1nyO-W$?@-x-`lG!EyWzje4P7wuYvYIvlE?ZJ`#@`q zbIo319?bfb6`FCXmag?~mvt&(!Y(dUr{%l+ED^&DUa(WDrRii_m3d6x$D6UbQA^sI zskpRHo;(`yEOu2rdTubv3t4nt*+%a`6ko`WUou{y#Z?v7H}AFE_%M>|ax`lGhx z^|!B)xHzAtuHvp@UutAP_;Amzq1dVMmz{Nfww|NJ-KOjq8!3eK!f z$4?)pzU$HS?i-gw3k;66PEuZ%aZ4cYGA*B}C2T#+HlD+q%+AfrFj5bVBY;(C;lzGp zfn6g(qW>V3F|cGGXz5AqwTIGcBo0~g+(pRY$~y~MfAxoSTP2xJzUcf!A|iWG_r@M{ ze)aZLx-mVIN_pm;4CF2F-`EurmPq_GK_Vo8a}I_%{m?_Sl|G9rs)4wpU#0)>KmG4% zn-Wm%bUj0?b6>1@SgiA`qncZC-EqSU+6=kSrK#n37Yuy$or`JY#+|f5vxyac%H=vr zNq#zg__@!d+czoC#l9^v_s7rUf@tOvRH|8|#%%@G%Zf(gn)*m^B(Znm4mFm4b(tg% z@Zmps;qe!i0AMmT12pwDl`xwKf~x?&4b{|TtY`^_ks?EcZdd{}7%*#1h7gQ~KxnX4 zm|#knOloNv+toA}U~dV+nSnvi;j7Zri?<{7*BniK?Wajtz~qwXU{oliQBtT?R}Xfd zNXG^r58*vBMf+BmMwyy##v^6-%v4b)T_4;QD&7WQQr{)AA0lCc0qRTmwKkUdO^r-S zltPqD#~L6VMzp(akfRwW1i2GDUd9(!u}#83Wh-$>nHg2{jgl~rzXlr)|8tztm25uC zE=&~YDzb5xbpq%B5JO#5{-Z`)N3sA2yFUIlT{3Q zWUMebnNEQ7R+9?=<}mLA_)VMeBkGS5!%(7%I-x$78v#?!Gy$Fvqryh@zLN1;|I}bN znS8`$!9D8`NX-PE8ob7E!(?i#ie|`d)&)Kp%su9e@tb8Vu--*clL=b#N1(EYZ{Kch znrEu_dWi8E2B<$CHEJ~xEv+m9I$3K1^OuO#^1a!vOJuJX$>8x~f?Cb)-YX9<)ofw* zY#x(MHNsTzIW*jfB1!w)nOT6uRwTw;(rQN&(XXu{xs0v zAHjXZ`^a=W8KppgA3uOOTasr;n)QLHMm@jAV z3x*?6#PecsjEZsYA0Hp_IIdgF5lq81aqN50!G4WpukE#bsW^E)|1f^}kF>_;@CJS{ zHvZ+ugeiaf==+MOZ~R+6AX@VHcqXsIJ$dqMa^iDoRF{!VFfQyraj!6edQ-0DHL~%^ zOR~>A&d+@7tr!L8Nr}AJrzTWn`w~8F{35*H3vZQ{hzB6K2t!Y6b329?O2d%s*3*p( zk(fne7V3#n$no2Fms6)tr7u4Fx%B26uciOtPyc0FTV%hp*DKj$b;L?EA*9Q=6gASc z!uK_TtfOGv8J_}h)uj!tQv?_=hL}|?@wnRJeLNJbfsd|RO&x;fS3p?JWxiC*yY2+hgmnu z&tPh<2CP*Qico{0qN$-h-5p;Do@e~xg|{x^`lT7bTW@hsDkR8r;7$GAz3CGl{Ybie z=Mo8y7)gjltwagX&0eJB;GtvPX?YX(25~9A`VV2mBj#zA4BVx~)o79sYQ#G!KCSp`pP<(S3eFgDm%IlIB7)X9)CVoA%4FZy+QwK>bRt1?KTTN5fOg_OF!gwG5(38Y?H>7KCUBw+Ti>t0K zwG0qK0ameYt4Wise7|gVFLPLA9Tw*boC|-1rfX@*Y|Zlp8ZsE#5J_k7kIq3dkx`m{ z`rV6Zk#@y-H56sGli zAp20<*e>t_;{)^QD^rIKkPyYJvBgv=C92#!HQZr6tip6f=JVJ7S z)v3R)H=Q|i9(PzTShAQN`oP0!d~}k%M5PasPZp?4I)?%>a{2xAtMC0VjorKx_{3Jn zOEjEVg;ocKI)iT-Lo7B5Ou+)$$&TJnX^BcSVa)pNKYsp&wPjXb<|V$R-EMi4Yzltr ztBAR%9Zv1}!zcmzZnslQO&>rRt0t}18Yy|;#w}N{09ceQ4YG^IH`sbUfPP_}+FTHT z8+>P*!uHF6&f-Cc9X|~~VjjjgvP6&MaSH9Scs;Gh(=+#d8H;L5x;pkcmKFSo8$A#o zLEOYrSSVFctU%KLYrpX2X*R+%40nan3mY)jRk~OK>bH!A;ob&0M&<<8ehaJ{Ty57Y znZ21VMauGQ6TqeAzW=}=fq}Gvqc)?hC366_3>-jSTu0`(OLk3(E*?<=g?R{zrk%L( znQHy2Wf2ToFK$uakvZ9c!fWU&EkpHuC7jl-8oGPsQMT3*5?d%Gn}A`hGiF{{wk3ov zg0D!!Xp6<3d+dIg8-8%gW8~2Os|on3sAhf$7_7lFW5b0eVz_cm_@cO8iA}pkGD9O- z%{A2IZ0sP#yswgWxKBh`hf3nO)HIiEzbj~iD79$1f!(OaamesPJ_H<5DDzf<$Fms3 zs^>y$hZ?rUwwJL^RFH+M)jj*z^GFMSqGocfwSS&9NC$z#Ig@QP73VUEmpYyTOWA~1x2SWnaXE%AW|2mowbXs<6Q zaSmK-t(x99f*TpjDhi|mI22Pnj=wHxZq+Hm# zmNY>=M>%#VG-%79*=;0(Wtk!tgl}a2J0y|tZ_u>^k#4oF$9`sv?pw#DU=C2rI{6A9 z>cIIwdkKcJH~#kdgO>U2-`RJWa^T-tY)p0?FA2PKL~*Q7=%x0r=lQ-IOJq=M1>ncK zWj}yj<`^o1hKv{D`d-t;@czt|3S$6&&++2!)fh*1tk1}V`akFJ-1ii)IEJshzSr_h zN0ViN@|){q~%~=Ac z8Eg4qFJ(m-i%R1PMvkBT;HT-8mtINjT^(VxslgcGJ-x^_`)d`2q8f0p%wD{5{WhN7 z?bLqw0G?PHo#2rkxqgFvZ%I6O8Zu7M?la0~^zsGIm*yD1^W;nLHTvG|bnD_ZVv69Q zc(CR9#@h&8gn$CZjN3} zlXqyLTC}3Xb~^oJYkK(fed);YzI0;fV49elO3y$4d^$Wd%yn+1?FtN;Z5Vox=kvsV zZDIv$r7M>ePnJ>ceE2o0A6ThDQ7dhF!!^yJe|q-Q_+OnUt3CqpSWhBFe3Le^SwBcUYX!K8br z#y4Yfi&(6crDkXgLS35bdJ+f#<;z%+H;H@Iu#v}O-_hBI0@8;?x{;Q;D3H_yHA!%b z3MIe)PrpH&H39c`Fnl53Rcr;c4J~Ak<9WwX7RK5JJSipy7HGC$!wj)Q6YHaqN_tn} z^;xZqVnDe@tSw?|dIr0wMM_LL#yUN}TCFXwWOCXZi5V>oT_i1XOc$m2;C1W7=qTqt z^|?=_y3R`0gm^F%wOQ!m$OkA-$;3`wr?CRf6XmA+oG47*ES|#@l_LGc(;`I4%eD#I0nINlJ7mF=uh%6XoWLm0^;)*;* zV0|~YLKClo=~T4IN?{uaWomuyQY#d$jF2MWh&q%Su{Q{_S;g|dxV{WyZ3Uoq^5A48 z`@rEg^bNEIxbjB#EcD@Z5suF&z?UV^&A)P z0({U|=ST!F(60wKt;Dj7m8MKxs)|Z$ZWhq)$N5co1fP{)8oJ!A*W~>y(v0E?!2z~l z8iKJsh;kRgBUByY)XJ^Z4z|qHvHWZnk7psVfHNI{8Or`d_AUU0F9O(5P6yRHJ@D}P z^m~8s2dS}#Anrs-B7D?lt$5u8A< zM7un|^#Sf1(_~ti!Vaj#mI>2%htF*}%X1SOFWB>B@jswvfyP1v|O$~IyXz9dNdkBj;){4q{m{%#? zzH>X;FfOlBoPKM8Y+vOlYYvS{^nz(Okr;*v2u2wp2#h@|n9lseqy^9R_4Yr3c68C< z46}4CeBF!{Zl0jxnFUhtnKKofxIWA(lt`GYAjA6@VkWK0q-4GVB^jfce`S;|E$;@) zkI}*}T=u$0VTJ+3xF<{RP+=^Bel{q5*lkPfa|jV+%vvoJ(uQrU6A}=Df(`4l=+)7q zAV-q1MKT9;%H<;S*iK+JK_~&3TX?RLfxukbP%V$<J@cDhAf`H0w|Rpq1aO-2x0m zaHmByly?Mzii72X%xu##=5y=Tjqo$98&tW#C z%CDXRs|AF`m~(xZRhhv&*Ej>F&WB*$_b>w#K|1~+a2<=!Ye0D@%FLCDgI#kB5Z7WX zSTzXTQXB@Y%EtYyQZJDMR(kv5Z`Wbps?G;~$dBh_Ya;yz2=-%No*yqq%wqCZVCfn= zcZNK9KS8fY2FT{KUta@I6xU`5`ym=JHqIBQd5)tKYzeppV|pD-M)D<%dFOb=%R z$IJWP8&iDlF^n@j0&%=Y?THG&66M+mAMkiX@KykcUERo|Hux%vvpio=5;Y2uOP-j` zQTEL8RR$frUqcjPnDEv)TE$LBb}*4Dc*^itHWL1mvj{ujfpOOvs4W0wU&(Vah4tS2 zG3RY~JDO1%nh^Ni{e9FOq#F~JJhaNzq5x^}R&i7wxo>v4u9kX@a!5-+9w9x{nv_>T zycx|g)(D$pZlyb>{?~II2?Vho)Ny9-Z?V@aYVlAIN_yu0UJOEHj?T(g2{HySX%v7# zhJs|a?&VEozkFwT)zp=YKGm0I2P2OX&T^WlwdIW|M zJn4Acn=o>?zYP;~e{p)c_TH`Vrn?VwJtQA&-b^ER-{tls9d68|ci(+0U3vetbo=&g zjEZ!wYR95X7|Dqf52O>v&ZZ+rhiGyzo+ig=XaJu)dGbX1jo|MTA1R#6!2e0Mst?{oHaxf zu+-swED=f*VboZoHwpPs$urDMHlepZytYuz@#7~$25jJAXzao3)NHM2q8GqsSs6gq z#}H0Jd{?lt8$sZ;w&Bqc4$k%}QdwgFFN)22ECn??KT6G5j6}$X>068G@S)S`(Pti` zj_>Q~%IlYSBzb^g)bLqYhO8q;g&I5W)!_AMV;s`B3P=wPCwA*7roV?5n(k$dR8*12 zXzUcATQy^iO!9-r+n}eabnDeQyc!#19rvXVf9eyI^|_O-Qv=qdfGVu$(kfy!_KC8> zXPufIPe1NSVe` zX@T|nMt~kg*Y^Mw67%d z3+2ih;Vm!~unSqrSTq~-Jm?le37F0-2gbb#5NmOt4GwjrzJp{?kT}pxnsp;7_60P_ z^XDE2!G8<)()_Ghy@V3M#M|2sL`=R#(MvllNE2Flr8xzFX(n5%ZPv0;3kMqpW8%StgBXh_6O^EA zDDSP^DU)si(Cb!KG7He_u*7#DY>TX^vE%{@Gm`S8=;tYpZ8(#lv`A)^WpD&w^)Q(w zvg?-7gxy?vSUgXGTV}p$v93F3f~Y8$fO5A%_)`T|B!Q2DN)Q@(4?N2jgs=5Dlru{j z$jG2;NGZU&gBA2_VTo8ldKiBzm=+gW5Zu=$8nX3J6~+R0>z28(wM@w*707}Qt=?hr zW0orP0alqTu$QHLTX-C3+$p{q?!Mr8bCLp>TlCAkVH5QctPtE>j)wVF_aCg#v~ zmxdHA(fV=|0399x@;7wbYO-;Z0x+#>&W+$x_jf1>C=nBPM>wA%+_^U9hraDto!r}I zCs$GT*RjKTm?xA9l!0{=HEGAst@9~xzp~9@&U7_uLR7Nc4Bb*B+$TebZxW2#j;6Y_r zd2nqaK($1vI2hisTp%k9r9;IentR|0aE;BiF#A&)RvD<&m2TD@dvO=zMcGnLmj599 zAWwbC+G>Q3bHaa)QF_e6vbY!z72rJfgd=jkT|u7~rtMy4&p5AI^qminclas-3M5|w znXveCE*B3$se-Ry<)omixDV%sXMvAx-cf?C1;i>LKDRIH-uaQ)VsSsl8!rdpoW-ho zBQ7oBa!rM2cu1HN!JPoqmlWv;eI{RCo5g`&@f-fCu|i(zy7>%VBj!Y4$W!hs3)0H| zOG~ledI$e<&LS7%-dyf`J^~MS>CtPP5dD5Cb{fXa(FV&`h{b&D@N(?+Ad(k%Mgv6#@ zK;YsmG2KLaHq%;O+Ek&p@yQB#20gnMsvE}gjdHtkgY10dgu=fQ@KDCUV@q>mz`09Y z*P77#h9~z&mAnrSqM@J?1>yjp-gxY&dn`@zJnQhd68z8_s=E3&*-I?|iG@XKSR3Yv zSF)iB3k&OJSgBG#Pp5{STIPI8Vg*yMt1FGLQ7jy)#)tTwid9z z4GnIfW@GC&uw1LQY#8h$I!!^>LLkyg3a4hAuXM4e423Lezlx)Uvd zH|fkZHZqmoze;Ed#x6azdg4b%E~Lw^I3Id3$(3r}OrQGf=b}-;1~TjyZ@iu+Y4MIj zAaxvlFm;`2OHHiF{l_cP?eUA`DNq*`rDccE;^QZuAUm}`ya+c4$$96OKTme{D$LW4 zmkWf!5*pZrPJ$0WCmXS^E7PHq2ccQ&LSn&x|3a4096XaXv~q~eeWbSyEaMtc;C28~ zc-t}-_a>A($V%_{pl?GC4^fm(y zH}SG;V9cvUhG|$ata@#k>lg;a{S48qvpE398^b@NTPyOjt=N#dYRt~Z=mYKVSBR)b z0i}X$gsDaN547aIXhWGU)~=;l!rLmF@t*Y3*a!osxgnd#-V#dVp)Gu&9Gm;+t$dl>NK!5t)|N4IrYDA4xaIdxFYoRH)AfbEg z4fv;_RP9t{*{Iz}w=a&R{__Xu@P;9A6wf_4^6<%H>EzinX?|)tz4ntILT6Nq!E?8; z2<`$C4bqWLMRR!yLWOs6Nq7jn3=BJd6C*SgP(JqY=VHIkQ4*=XuA3>)^HWT({QNC& zhByz!23|l`A|XXoBp_nIu8DRLGY>@La-K++I+V1RzWpA1XCOWGwL?4w`MFdKsjgWk z)19gW$i+RXpj7?tfAKpnRKdJu^a`gOvHliUuLHnq8BudzTLp}8<6>zxE$rT=mKDr} zKYX_!02U(*%I?0seDq6KGzzWGNnN z%=3A=Q}iEgia75nG~~@S`_G$Su*2k`!P1jk0pdc}5_?$Rh3l>p0ZtjAJra&q2;3t3^;J4^SezCnqow6O;=&c|MNAGm zJAk4lT)_ZTW3@~{mk15d2&T>^bTI^g3W-%F+t~OW6ox;`P&hbFhQ3ZhTY-?LBE+^#;u|(uEDn?vA{QYZg5$L0w$MY{ z*yKel2&x4XYV%5WlZnkQ5X`zcd^vATBn?|+y0BaTfHtJ3KlBV1Gz#*vhHA+x z%+YE=j|z+(&qG*hnI3DH?Uc4|e&K3LSu6n158>uKg`kH~)ssBb3PTr~noHq;%hXcy z_9omm)HPz+W*%8v<5#t!TR`3O`IGP$Edl%iohz2zcwCm};y9GEEjyG@&r7EcSd&ly znXchL9E^ruVh)VY6sCBNIS=MWuw8;KZIGZ9MD!F&6kk}zL;zh+a#<~aU#9Lo7x+Q#!Ov$lpU z2*6dE=NIM?s9Hcx!kR&l5*`Jh(IqQ@Y-%HK0YTjaI04lne_uCb#2Z0?_gt z0;f=95qQ#v%+190U6}R;7BVd*CWX453nT(+`HyA~OoK)O)dpV*qY}saqeW^Pmw$K& zIBo|)p!G?CAU&JYvA;0#7w0GD-uYrW-1m;l36?Ii522%9FD9SGi(~x%-shYRAAyFM zU$3`+IETbqxjzjdiA(w+FO|7C(A)4n@;I0a|BmnYt@7kcWh}cFypJnNulp~LS5R>C zY`9pmJl;5WX=YzxiQ^SS{O4!cj#0VE2H^D_zhjm688+tl!i%tf5B6NPe*5Eb>|BHO zkLTu6Y?)t=HNK6>;BNOG*W-QpJ3^i~KIoRQhBC)9%eS3#d9{{x$Ho6KFW$!?%bTq& zyiw#>Q*&!XvqfaNh)6th3<_sj0%qp%T7CNXjaT zxqsVigNKNHB%oGCY6y}~D>pPI7#8UB*8^HzBSA`qYjJNmc2u!~%N$ zg(#7~lmIe}(PLI9o`3tw^>mlSVzd!g2OyRm>iNp(3KZB-o4AJiS&-xSfK<+6%#EeH z9JhSXR`z;Tno$6n+dD`oZi7c*kRV*47G>JS(9*AQtAfy;YQRn%CGM;#nZvVC0SK&u z@8>Zx1nmI~b9fzU=nlGxytMY?u~P#WHFl|+X|iml?7IiHnNN#=xPG=8e*elfrWIOZ zQW{%Z0$kp`Go9|-n1Zk4QN~!fM%_$-?*a*^J9q#JlLb5tgs8yd8u0q;VYu2Ld}9e? z;R5A!R;N)okZs0UYsqgvNSQinh_4W)`u=-YgP%0PM>{$RP3h{P;mZKZAoydKWOD47 zB|OZVu*9}>A4-S721n05NQbirQ^&v|at#jPS!yQvoGzX;r!bFd zdKAOx63>p+_pP72l-~ZuFVb}qIB#CN8DX;9D3a1bShmby+m8}{L=&cEfUX!vZ$J$maAn@~&BZi#lX{k%qZM6-Brl0>q&y43A;^iwNF+D6 zz~V0n3{$z)l797rU!@6}NXVz%Swpjkh@=Eg~SoZt$ zh*72rg}$_=SDP?Cl#L4Tk^FI+c`w5QnmUQI2S2S8s?!OSkV`MWjmBpMNJ+p|I4`#KC+E z4If5GSU9C;pf%{bj90*2>r1SqvNJU~iBern4bxc?xo-x&xc-4%Fqn9}%B&^Qn!u1+ z`Q?9`M9JL$(0QN;-W(zV^itNam!*&5Jauz#uhD*(u*fRLz&UxsgKX_=8Oo~v?4SSM z3-ti0I=dwhmQjagI}4pC2N2)fWV2v3+(FA!73HbazXSXw>=2)n4gcD~BBZo{NtA}|CAt1w5X$O;>o#8Zh(w?}myu1G4?G?Tbo zLzxFVd06mvi|qP4XkFLe0O;@Gc7=gU6#cz}Y;HoK0QiQ6Y*8$Jr;>0c1cIxtHvsi1 zzOUl5h<#(+Ze|m{+}tRScu)8avy)X(mT76XKF5+_daB^FT3ZT3tb!-qZ`A;Mfgq|n zKWKalxN`8sR6;l_C||=zP7u!1n11y3{|v+-kqK*z+*50fOthMfucoLqVq?7p%PB&V zT4S|z;qrT3)LPvILqTTDUy+39Woq25kyEh_XftEp`glbMNNJkGQY9f4u()oZ73+!N zsrU*@Hts_;P3M)z=DKWdn4k<^S5=eAbZX5U)6-3yKh>RROiV ztYxc4!E+g5Yx4m&s#S&@8 z?WZ+H8Kx`OntWMpF)o85p_SJ2vhW+?3!%AcT;_QMw_!AX#|7i3L6<#B)eb;ni!ik{ zle17tw7}aPOyS}j=}D*|nLBE?Ma)??Hzt`oSql0bAwQ1Gcw4P@?uh`@Fzc1VT7+TA ztyET{IuB!DB8G@w1a!caS!RuvBV)<%k}o<`98CIS{n5}(t~pw{|j6>*FEQ|sl@0Gw~@tr zm)}b-z5ZGnBT&5-wJ6!`-@$H8+a(+8e=n!2862#Fw+ zlmtX8)6CO~wFnP>^4W9zoMG=RqzSr~sYnz7P3G6lQyj$bJVSaMhjY75PT5_QoR{56*EQC>9K-pld z9lbPZpbAhO<7%aegvI}_T%Jy&H>fGj+*Iy1lQ%L0-~~j{%*D=F^&)yzKjf^G80zw4eQftQIGr& z_$Bf|Ba5=KqaP0rbQt3%bf$&aew@YN0puv-5Ljyq7k|kQV*jzvG~O86)O)%!H4fmN zouU3M^j3=kU_O9EUoKQq*K`S)FW}xv1B}t@xfk?8Yk=t(f@Y~csWETvE=`GuB)LmP zq9Tn805Px%pBcgf_}a}gYCnTzKJta<@Tk|NX_`csXQM&4v4ea>o@0<}Vx0^Z*gm+1ak{^^kYD{LTZ11R>OY6a5oPZ7X!`8uKAApE^{Hn+{6r}2w{G6f zhTy(J4rolVt#E0VTuHcMtTG-MWlbYt+7%c&tDA8h$W)CZuSTn| zl#ofS8-Po#X3Z@JSUBSQH?j5+j*@Wu<*<~?BDQgL2|5jtXlyg_fWrJ;B`nIcNNYmV zqzHQl;VHn9^ghYNE6WtsMSCx=Q=*BDlgJ_1#4B*~{kc?+YqqLxJ9XhEGLwAuD$JOP zG?__DObd3PVeYFEH)FP$jbrry* z2{{i_l~FHK$tZ#4V4DCs7#gLXw0Px+J*ze;eb6O}aoFEp#R0z3Esv?Mg5vp~}M^`VQ9($I&LvUaj0A>Y~ z>TGX^!J++9na2hYK9lWywML#ec{aeYRcH)Df{aoTFOYyyrf;PKX{w%>%4)ni_~ZzC znL+u<%2R4p*2++$m#I*dT6y2jMi#I{^(8{UI@++t;Fg`H!p{spdLRT^f(`+|4)eB+ z_rX|KL#MR7*}BlR(*on#RZ|>SkHi*MCfkyRWdOmaz^ukf7$Fxhn<^5Swe+hf=$_ui z%hidX)QTve)XKevYu&DPWvuvdKKELujpL2$bnwXaFt#O1sUc(mu1qvgSTdvG1??6G zhT8d?TngxD2HgFD!;w2_aXPE}qZ%6V$j)0+%Y3%dFfW4J}aA7O}jyRkR}v zDGSP+s|aW*>O|3Veus|?r7prR49}^8nKI+-6GH4dDJ{$%7ef2rDlSnKX04KW6eRbs z7C)~gfKfnb{nt9nWAZi11;Tp0mhLdL@vM>`Y^(TXK$H*I`|>KR{+v^UAIZyPeii0h zP7p+GvPt`mpfqbZJMPW-@Y2!P`lp2t^w z!=F~f3I&o6eAck6GJe6lG3^BhIj${#V^6p)Oi`?x zm4F2M&X)qjYbdC-PVW-p5y{_ivLIh*3{T{^P%5Ckcm~ede-fV~JZY?DfP4Xm^Uo{p z!M%{wF&;k)x`BXV&vBg?Jm2bc(t7V8eSUvObtA6pS~3(b$fbB*596VI$`}vd`MDuJ z@!lKp(mL+5TWP8mpyJ;}=+tq#-{f5?IyoxK=s|3O=P!`3y4#@4AcO+>Q$+BN-ns+t z1X!_8<$>~)2tOkHQH7j?!!w{K(`y$2<9pDe-fEBY+77<~)3G$Hf*dpjPf35J9&c*N(9*WERj;;c0rM{7t3`(I5>9IHylaBy%UbiCGHOO`GnF!-a_&nN zn8;aUvFy?W=Zf%w96+MC>0ng#StIXZ3yYRCdhXbxcqw?+O2*6a3n=1x8&!PFzsj*L zpP_ZGd2+vetMv$cq%Hx(Ti?H$F8=skq6RwCATm8d5Aon=_zSPDzp>70z^h|!OL)(X zDrl@f#ki^bRDVCpLwDNAJxJ4+Z!umf;PKzv7k|o}4PmG}lkOh6md1&nfA?2EPF;N` zQURmX#O>RFY2*|5q7H*XBeKy!xd&{8ULZ`zYC-auMsSZnSMRJ|8^c89ZQLx3J?n&P z=*=`IPpnkM_00chZ+!qh_B#Buj(kN*I#Mi!U7D7+m%AfJkI=JU|XqQArZZS zFr?Zvji<#hx86f7Y3NL6T0vo}q}mgykd!3VTb_lPY5iAL>=1X}MkhAA|LtI1SGHNO zizf;kwnT-V8i2j~t%P^z*4tx{`6jZwBV_9?*DIlf_YkseWydf4yyc52Ww8L8Sgi%X zB%i|ji6kNPr4D(m(y>I*7L}5r;i2@@M?aJ<-?$iaQH52`>Q|e9^TLYCi;2m+pU6V6VD(e|T6nqbJS2iwh&`#SdS zY#Q9_PVFbaKmc=_K3sU|1~g?OEoeplDvn@W*t9SLlK%omF#l$apB#M21|)T0A43|O z*3H31VVp%CW6(A9_4vSG>gwo8m)?0NEse3h2Qf6Z?}S$@H`{;`fL8=;d7$}dI@A1- zbY3Ai6B{E7k6p}%rl1ow;Pt5Snx1CUm*EWL96wO1=h7f zg&GC92^TqlU3*&(CA+Y`w$7v;k^>uIN;)dp324?^)MB^9V zFxfD_#!@VaVm<*ht~jLXybH8!>u{t))OZzgv_CZ8kZW49h*!QToyg-q1>r4DkUTNE1`}` zfRM4Q4OkUU9ezHt@~>X_IaQ4aKcQ=d@qyb2sXD+|U9E{`SU9WL#H@nyTA^FbJT^7w zQ!jChR@YH5nN%A6(D4KbzywNy<{ZSeXQ^0vMtGb`q9q?g7z4K31-J~oy*d$RS^(6=uNf)G{*op4I7AX z94av^j5GJOh18upz|I3CbIvNI*$S(4NFly&8^LG-8B&Ai-B;uYV?!|16CZE>hPC7^ z^;tn+4W(L*E!^W8+?G|LQyt~r)&R&uCl3Hv zRA^z6Y}Pu)u?)bqV^AH+fnY-gEt2^hqXH6|Gp^Evc@>9XpwbtLqgKC0yfcRATz=;= zf{yM*fQMG@0+W%JjDOCbY=pQ1Guh;_g?9qyuaajYr7T{+RTl7|7Mg=;q^%~v)pD)4$ zTi|2FPU|AAfChFe+4o$GnI|D`Ew}Tmw0LdA@BvR#sLAkm;R6c&0Kd)?^DE$u zIrBX)t=zsB^s0CZGBXQ&rrg+4UGQi7+Bna(=h~tCFpus_)eITCG#DU*@nzOC0X|@t zdkQcFm(q<&v!7SMb9}`Zp)2PxsDtlgLhpU4l8{4s8LlJkgr&`Uvnf?#6hxlux@TZg zCC&HDtpK0>BE50(Q2tc94Z$;c-Z{7PRpb{q8N6Ie#~SJrdnAtMbB{IQP7ux6{N^v` z%FnYLDeDoUd2@leUv!iMCUzEIIBcv@qtC=61aXb-S?u z_*H^iuBkE2`~kypa)4MB&?@H05Ke)H`3+j(^_g&2~&KVveSgo$OIp$ALEaM=Gx)O!F~c3$_LU+3=EIp;V%c`(2r4?qwU6TqA# zQ6$Zn2&jCr|^Wl_QRJ5 zJ(_0xjAs)?7@Vf8E(VPDAL~u;|I}mYgHJrj{%oepgO}5*0~f=4ee3Pvv^+qV2zzeG zPI!fpvy1iQZd5~KcyRZL3b4ef`)lsNBH=ZAC$afZu3h^j>-$>#^B>x`srcOwpE2}AYy_5e-o8U$&V~(rpFSN{G*3X zq$7uqq{klkHGuSNI`?Piz>N(U8xN&tfAtJTNkV1yEJ5dDKMl0ytU)F8vqz}&23^>u zF9X;)+c~Nswe&N$5zYn0M-B)*+9r}^A1iqwMO&_1yO`eh#D`N0#^hhXDSBK<{HHEN3oF$LrFiUyP!Vg0Bcun;Iei zq|cl=ynQ5pA1sp(iFeW7QF;lv8#JyF6FlS@z{t=-x!|RF!l}Sb7hXG0e;)T0F_>s>GEI{WjkmLU zi`MwLS%4WDYC!VIL z^Jd59sK~ICj^5K08gnrKARlXW1C?ny+PV-1Y))Y{2N=M`=5i4sJ(RNo9Bi*3lsJGn zl6Q1lxoM4ngks{*OtKNE_$9**N{sVmZK5YPe-lMTLL$uElR)XveXSk<|JW?79xQNb zD(_Cm+8<}k)We@&AVHb3X-%{M#HF`@W?D-1ivYN6upWUraBy$yeO$jlGmSSmlr+V` z)ntoZ1-Oi5k7u#+aX3ua*us)gNvSV$5Ei!dLLd}O{ye~9 zhLy{NPhH^Bi?Np;)&LIxltT9Rpb&-$5LiJ2udrko+s0;Mxx@9$sT82FG5im~iGa#O zKotXO_-2uH~}F%C?c+_RzQ0M=r+aKlwNrD zTDt3@wshoVD*_b3%Qb8js|P?rICOTH05m}j=z5x*QK14x9A}bwxIR|Xl4dLg7?yUv zlZPjxPoQlq0Mt}%$6PVJ?l<$!*ucD#VB27g`yG|vx0Gp1+J)LJvs65+vfV2G?;Sjt@wgvUNeS0*d-T}P=Rayu=Si~vwp0r6NQDGR8T z%$nW%yy2lU#v`=EAQz?!FzaLn%v;r zZE6JI#wxD_5n|Dz8Xf$};uOovG_;!~l!y|dRh57c*3vVpk{cH2h*1>O0CX@s*?`LA zS8fGOF+ae&icKSpy;J}i8XGB`LE=7O(!v7nv*CNvo!+x`fNBm)>$YHlxu|&AmeYA? zh~P5#2wpK1k)^j#tb6JR$)uE@9zQ-ee4vW(oH;xZ_B*J8zORuVkOxm#!b016w2A5` z2b3rsNVjecBM)lQL+`wgPztQED8AJy7@R-13-aLqkVD2>bN&U5?prZlZ`+vq$|rZv zs&AeT!>;5RR#LP4sovd{B^rnp&BwY|lki(bK?oC#wF*0#QgZM#POMV!WBd}37URdl z^;b!>#gGSYwQAEkVA3K71+1(6ZQNg#z@x{zA}0CG^B19!?ey_af1E~Q3+dR=qv@&t z{9gzIB4k&z3AOBVrsaR`!|~%s(=_s>w4ykDP z7}emZfZ&w_zzMWbF>l#z!*r0&5KIBAd9RMjKPZyh%zuf>jmju{3~5-^OS=kRHhfax zzb-!0pav}{BUe$%tw^`GffvKG9=iuUswhBX(q<7QYTrxPO)<@@7AXTR=2=^vO4w$h zD?cwt*9p5i|I(Z3JAe5U^l*X#B~$75{*Qm1Zr-|y{Gs$YbRO&oulyeLwM$M&AqJ;> zifrr?7Aiki_Lg7>UBWX_4mRvQT!)crHm%HTrh87FNFB{BX?T*e#X5ZVKR=cJ?A-s9 zN-tds+~m<0Zz-$y7E;c*LU}9Uts1_7 z(U38?@5|)NHjqHST?t*_J=xQHt#ONUWQej$<+;%Kx@>V83}<>Dwu$yo?(SpE7`r)` zZe6_!eyKzeAQ7Fjg@8;q2m{)^dMk`oo)?w8?HQtB);PDd7`z+H@pKoZw%+zMIX#IS z-b`Qq^dF)$Zlxcc|4*slkI((lIrJo0a2cY(Sa{+Qv9fB3-670oZJ%z8`B=r#mI?td zZc`s5YLaNkBC@wY&^XjIOc`grMw0 zfGdEzZJLyoP_?JF5m-QCl!=GKLo+01aA3C}ayGgu*O6*CoX~R+|0Wt7VZRW70)QAR zsvBjlaS|g~?3*N0EY9GPgoqWW#yPjLfq4g|xCc3W0KuxF>U5%?czYBMUC_qVD>OH^ zaJ}r#jNRhsdx&?Cwo0zy0k z0JK(>!*~w}i4tU+80<#179Wl}=vRkc4;RnF_dJ%GsLZplHHn)jhoHz}-Ql({>YY?u z*^IEPNmsAYyaXWU`cxv!SeaNGEo3tE4TO~yZXzZcrf;*HjL^6bLTR3IdB%EMX=j`2 z3&tPX*W3=zRx&bVN~@MYz%{S}fK^LW4w<6ENl%>pVmjIXXjp!K_TmrI;KWUoB1&y? zD2;P0#elHLsA>s-QBb2dWnDOG6zYV7yjwZ~J_Ov`j+<+)x^=sDdHsd=5sm z&H1s%fgurEz^tb|kD{t2ZWFL+oKYKx~)D(#mak2Z=}h%H=l>H>-~71c~rjKFRxLk zN?(FU-@Ef%1x0|oSTCO;7|+J(vmAqTD_;sQ%@-s5gE0$Oz0nxU~pWDUky>Iq~?3vz)ao_n)c9pB(6L=1X#e2us|Kans_<*ul zfOyXv@i}vGY(CH1yLOp^!lzz$`&#E}*+*&5b@1ChH|7XS_IK~&J)^WNe-yx%DzUWb z;f6S&D`XL8pp@{zLX2PwSY)dztN9y1$9J`~X?)Pj;}co|ZuZAmB3wjIu5JjZ|oHoeBUhRy&aoFTwvHwK&7lnPDv|lCjpzRr#pQP=SL`7$>=F@CIY&rzXY# zT8pvnl_cV(bnf8Kz1xKsCduf<)HyXD@hbV;paQfUU9%#w$sIKm3PI0J)q)r1W~0Ku z@+|IRgsGjPe)HEq5gYl^^UqUX9Fr=9=_VJ$q&zfB#D`cVpzUaH`sCNXls@*vhcK@$ zr8jTfOwEm5X=M5$TGz2UB5h(1_v*ty`|w)nZJ>qf9|D-3CSUliwkSg z<>!WBOgm%`H(+JJ6$ypxVL`Imhm}Hl4mCq0WT4JcJp%!{OERKv%t8{k)bj0eRY|!P zHM;{sB~&Nf02!vlqF|MAZmt+pX%_BUxLccATGE;O?+w?|-~GS;AzqB0s^}iK-1IP%Y4!F_;2{uczW&ng)}rb1dxLn zpzT>5#gn>;MdjDt@xk=*kN*bR&{#0_aGhucV91%~1rZ{Pwy9+;gRjQjRNGQVX`KvV z8|Oo1HY~8tktD#w^#~C(asXct3->DHfSIK~JHQ-?^U*COn`d|cG2+{BYdrL!M=+&N zrstk|p13p)B{C}N*g`WX#c{Z|(9{pM>D0Hhny%xTDZ&NuiC_ODT|P&sszOsFQ8IK< zgOF2*Z68o3hl%Xq3R#;l`DY6^Q4Lxa7AkH!GYmMxUQmHG@&Lh7Pf4M+wpJSbjHj7v zpzZ5ZfyR#LlsQrKI7M)EOr_zVs z_hCwcViEiw&wb;Zd|LFsV?g*OCU+2+{_WjD)Y1H5`a@ICjDWKe>QI$YqpG-^`e6iH z6_nEcu%5}3SSxci{7-gk(C@m<s-ROb0>O($2JG3@1on7yw$tHvY#-d{ReG)Ny!0 z6PWqzojyyU*=-I|0~tK+ZT+d5cn|}zsoF)H4hIMmhxQH^Twh~(ep8@Ky9?RM)tZRd zO~Ut-1C3PCTCcJO|B;*w4z6jmtEH2!N4@w%FndBQme*nXFkj5Ed7uv%g{C7(Kwq8# zYOaZXTbYvCi&54>5@F+%S`wsweZ>z-1RKj8K{p;S4`De$i)I4LX}#9Av}1HMxjuY3 z5Z-4$@&(L54e93GPf+*Jl!}R~fasJQN~P-ZmUOc3J@LGW;Q{$fjAdb(F~iIxFb#hC z#5XNf1oAcG#KN~0$1Tzyi{z5PiUPGU7t~X&V)K-DFx$yu!E6nI^PoM<=Zm)g~mlA*F}O^OjB zngcYylu=;M?sbe9x~(Agsur5(JgG%#dSHLz`}`7Zd|@Dwv5s$A`jb&b%TUY!WR(wi8RDEBX~JeB z7zp@LNKhj!uiTT(k95ht5lqc2Ljf>0$b#qYPnf#cNPibqYdZ5R`V!wV=y*?Noe7LM z?AhmjpC2_ZQLynCfzbIL(~R$W3(=}E5Ap;NWn_c#yV||9=h;?+Pz%exk0l877sFu9 z{MS7Ql+AM@I~5xBUj9hFj#ZjeOH}K09zmdd>VF}Cct@=^=%2ZY*iGv3nU)dLWN&a1 zZ}Sc3=8`jikfn3?ZxfTEwYYbWx$_IxdjEJ550Iz&@^65xzi>|g3qSEL_l&Gx|K)z! z*t2ikNrrGe)Z)YC$@+%ICy_%Vs2G!SNN~QiE zJgKM%b1OTgz>oReo|D=Cu9u)lw{>)lWL(b6m;>W!OkViJZ+$ooO%0{d$swL+txf0_ zb7=|fi*r~-DgkcQ2;zK`xR9gyfVu-*#|}^vtKMbNz3WkkYhW3R&y@?;aD5-7M;>`N z_4anB8vtDsh|H3={F`)DRj<1Vkg|eXc6=sHOw6Qpv~~=7$eSH%ch5k3Su+BI)(tSO zB#zJhbk9m~(?#t~_B?9~4{^p8-QHU-f(Ak}boV&tcCE54X+9MYY`${lp_A#zJ#7@K zZ;JAE8+5$F48?QVSJuEdqZ#5&JWpzHKG=2i{K=0G-~q*0Xl9XR*12q`t0CTIff|@t zd7yR6lQs2qrzgJnne>O>{FC(izx$2U+SZodym=%2r>DP{E)5Lvz3apsPNswEX%ZCS z$H>qZo_zz?GL;kHZB>|}2dLoQTg;|GCVtEHIQl$ji zs;n(b#~*H|Rq~Ob-PuXp5ya1A)-x?5@IfpjQIMZ=UswsBKz7*1-8BxG$GLm`g{#m~ zLHhDnznUI;_`%dfLHoIB))zCsbI?+iRUDve8l{88X_d6*t!}D9_G1ymWN%45Evkj^ z(VA{zZsAEgl>bwih|+hi%`&)QkQgtNsm!GZ9defI54We^{U5%b?mBfeefRJGp3G`2 zMs&)m@5*>1!U4J56xhXi?_%vM#$u)HSts_i7Hm|5lJ<+|eibxiSv+}K#7B|=o2%nK zXw>sGp#rgy;DI}&8;pYs#8Nm*CsqO`Q$;lsKGg12y7!&MJW2TPDE5Fh37$s@+Ssk1)a_xOWkniFCJ zd&}WQU&WklX0D9`j1jh?MaI7}ehTN}*~l!OVLYi;G%bns)K4eQ6!Exx)*+g1X%6_8=7io*=^Eg&GDpF;X^7nh->mJ>83$b-;q8z|!r zp^TZ%QYu>7u!_U9@tMK^^961MBsSo$szc*-*Ck^8g=F5In2 z_a6HgCftVf_NDV=sph47j-N=kZjO;@HJ8rZMZ0F}-N9(haNah?d(ie2~X|Rw=Y=TRjU38+cg`VPio^W_Lv#OvZdfaQ;W2K_X$nQyNwjZ-xf=7J z&{Fxcj8PO)=R0xaVGuUL*g`4I%>CR{VqiQ6D8L1j5vZ!iO+{@tGZJkH>M;>4NFNs8 zHa17)c@w5pj?k;FB}kZqz8vi;QKV!5J_F{e>w>$&m|RN7jE@q4;{3}{SiP^owG0&o za*+DFd%|DK4Z@7B8PXthtr;?ETck1pGT`Wf0^?BumS#Mg&fh(|HGGpXa%aY_*;)n6 zY;7}>4eZqdA+TPeq{%9A4Yt0ltRqr`7LTsgE*6>tva@tg321FaS%VcwM(&SiNq4{u zS>wwHPXUCC?dIShE*&zd_*s}*!*9A4@e-B?GKgEHKnsv_)g>W)6xCvRV+`^Yl|0)^ zuA$f_?h%$q5tk^EAA{w!4EfrE7qBBJ;FxdYo+@^(aeN#~ia;g+lxJR`$b;QOz7~6~ zD=h=`0#Qi-Agu!KHE|;egYAUybOku?2pWc;SiVX(N0{UJTZPkm#f$4z(u`9venURT z=otg1W&h^30m2zixE?&?yqD3V>+3Tan{>?w@+kS3LXHQfN1_d;^|nAkO^Mub6qI@=Tc~L&fEi;XRoPssdnQr@d)-NLCYiH{G~47;zM?nOW-YjYbjcDGHhd zVwRfoJ>Elcr2>t2LeTMjm7Xcx}$`UJTi8@Qy6>zpq z=&GAI+ecv_ur)i%yfF+Cq9&Z`yc*(`kjCD z2kA3k`Uhl4_G7T4mGi*W^c?L}hlYkIPdJ%wUcU%GA7XtbQ+Wf;Y#IT^jDPY5aVm3} zV!5$^^%YrQjYm6^9UPRUS-OC&P|9V!Xd!**$q%N+nwE6w+Lg4pZnXvlrB+nrH$3Gx zKK;@3`OkbZU7=Kvu`tEdDIa)!D2-j6A$gJgh1V&|G)lI&w5G3p{R^x!>y1%vk(#Aj zcn>th8 z!)jS&#$Q(8&+>S!^%@Fobu8{vu6^cnpC?grjCj1|bpQPig6*iOiZZpwJXhwxZYXvw zy$!^GQBILEfbaOogBY<$Pz2YQ0FXxl>EbB(XRj<0OzXf4X1eF^m!_c?#|iORCO)^5 zq<}%5Ns}ZzSauav(OdvL2VdqHt~+-NKBO^H!+?B9{$uGt!!+_p;FK1OsTG@?efWWN zZQy#GIo*ew#N{A8)A3^`m=DHNd6(RVvnFo~Il;NoV56}|{w@E=T-NeT=!ShzPH61Y z03aTY1{xSomCIdSb?GA?eKI}s%+J&4z-*ct$I}LHwS%Vyi-`#o<(j%w@YR3xxpND+ z1nkVC#k4Zl$QX+7_bo7SJ6Hf5^NBm5tC+7C<31KjG;9m7ru;0$SYnf_XF^UshGGCC zt|Umz2dg!eRbUOoqPq{cvJ?pX0KtewqmW?B(z+4>{}ENj^eb`wAu%F73jlNrm%?$8=1WXAVCNqB?4um z9uLu-){Z<(>XDI%RCS4zUtx)3O%z7QJ5Hwi?!GTw9Q%G6TO9^Kvd&O~mJyyBNgM26GxDZZZX%)5Lj?TJt|3iIPHWb>{cf{fha8{VAZLV@aYO&0G>Jy(%-}=tq zrtkmYDegm1JOXlZc$&aG6f^?t^Giv=FF|AFaHvdb9ngYi#|*G%5O|36+JEvdPp9-> zV*wbT3yUaJxZSqkvBokSHSe(S1D=;~3v$AsBWMlKV^84F-}xLBo8 z0aVIZ&BE7mFiOdx(aXyus-XB_(U3-EJX-O0vCJ4BH9t*h2HsOKGU!l&t7TOIY(Tz7 zR(q@OQ}U?>=4Lk}E#6uitPNWT0u6Mu^ka(=1Mbe8KK>EYz@9n^NU3kDRQII?i@gF zgZg$etl13lQH6AeF&RROEax_acCe7oUY9!y3xe zgC`)sRAMsgeRj5(A$yWhgxACCLm>)}A@j7BonTNUNxG1aWQD2yD|?n@#zOJrnZBem zmDdb7yNLYn_T?Hc!dlPWI5lwx?jHB|`R=R2+B^A74psoD5KP=7KJk8B=RBm(AZkD1 zeV^@Hu2U?9?=gQf$h{|j#Agmly3M=;ch-XMaGyKx@hdMM<~J3FejdNNsiB-i269}I zt@oYpx*yW5^KtEsIdgvA-0Y~)c)+>Zj?<}mVsp6(ui$q1fKEp;eeGI4m8US>U% z?z!MtaBb$WN{VH0rU4sQFOd;X4NC!!FEgd(V;jqiT>}!dtC4{j0`gFTJ)7uT?zJG* zxV@-B9qY^i#X6{E)j7F_fmqFcMBvW@VMS*^<2p z9P%C$xO{FKd~hA3#tN2lJ3-}Y8q(Q^9zbT7rjLC5uCXJ4$ zPNJt+%67kY$;RBqJ5){p^#kwgN^4laYisJm8azvz)*h;gRAOzN1~hf|b*4Z1?|z48 zT}(fH;XL45ppBw|7g`y_v5d2HCx&foc!XG3x@fIZaRkfj*~ibOd;o7NH7XzZNMCyN z11(tDF>Fu|_55=~bav91+cCQ#uanV|8?i7c}M<6S7Qsd4x0I+7>&Vv&!zx5_@WK?Wfno9rWr$3Jp zJbhT`AA0v$3MG(b%HC{f7;;kV+tMthYA_JiwpOFKVC@IU>f!1*+QKv7k%YX+gXMRY zjxro(of@wi)LiX*AHENn*OGqtt?#E<+5>N*5UpFP3BF~x%mU3JT4+At_t%Nf-lJ9W zHsLa!M`IuDfcHs4fzF({m*m44=GRF`*gWG{Csb)V4G&zWS;aV6`)#SF)8sD{Vmji@ zOyQLwa>d%y3?&j-5qqf$1G9XgfM|tADqmPNV3%aW@f%a=#h*VH#u+_nYbXFS)QO$L zv!?v9I)~*bD^Vh98|yKuqRvLu}*mB z>1e0D;K-39abJxJMOxqGfA9zSSH@RS0@#nhMyyX}$bnDDs~Dq(hypx=Av;j2EQzUs z?Hhma#dP}2Q5rzarPs(XUPWfEVh}5*owddyjchB(`*Esz6nydbKYh+@L9Ma5GBRFZ zhI6#pH4u204Jgd3!ZL#OaswBX-8pdgY!gGWw?XLu+R<*KX{nX$kZB|6Q26TJ70fi% zwgAp*urh0@q%I~gW(ti(fc`RBO=ffylbxHtPc{S#wq=+A6DkFOZEaFMprC{Al378W z(v95#n9ei|fo>Bd8JG@k&B)7_FCnmHAP6-*6U1Uw)?vMA19ea{Y#Ban=ZMO3vVaMA zGj^-1sSi*&Py0WT39yE*!Uz=ZdnJ~`(URZ>0mQKgZ2+_ktr2W(@AuNB>o27etl#~|k42#P5OqUGN9J$|1Be)FIab<7 z9(;dltn8t+^Dx>smRLZv`;!TOKnnY^O{bkU>V);-uA|Z|G?1eOEyZ{V2eP%DnLt`N z6t0iH<54nFZlq^^`V;2K;YUfBgUAf;kf7G=J+6hafgYDo#tqV76F(bfrS58>`2@IMt@A%Z|lcQbq&ix1$w3V?|cBgm7X z$2I)FEk*^hYQ7R0dirTNxQ7{C-vCrY}Eto_VA#(ye6-?W93M9Zp zJRACDd{w2CH^CLSfmLn`AY$D<1(seHEz&`A%+<9uWG?DYGA0!W-P^jH832r0MrCLP zK!CI29Q>|UPRn)Kl}ryq1Z?x3G~phWW9hQQR36?U&%izjDPei$4>Q~>!!kahwI|B} zH)-NXe`~tyu0yo(#6pAh&4ihG%6M7ZuD!1{&bfWi4UG_s>A?uF$()%87C6R*@ZZ5x zWeA*Mf9qJBwD9fFjHIXn8sm&g8^AKIW3y=Ru?p~CaSd2Qv7Kem1V$=anZhCnmPtb$ zfW6?x{2_qzjH^KNPwabuQNHV5NteF7uf`F-@7%qoXCi2aahs&!nC=)>yjjo*bmVue zx9Z<2E$)}V&1*GFI2P$mFM==MiLvmxV{l(oc%}PLuo%9^m-r@g@fvv{7sNcg&L6Lc zXEK0Lepndy+&TH*9pyPPBAK6xh$*?Uia=epYfRT_*t^S1NzyUxR_wTx>`GhD)Fn2;jaFtbqlW4QsenGz|$*5WPC za9;!=@@nJJO-wN^Tko792A&(|&trmjX)!lEZ3kgX_;tiqTY*QJq47a}mIr8r+K_de z3HCg$hYnfM>_t zrGKk-T)%Xk20(QbY5#Qk>Oc5$ddG=VSTQ?eEv{n-c>d)V0sF6}S6_Y^;QCg&F+gVY z&<)NzVTs5Jfv|=)jSEWv{K;E1CZd#4&k3*tNf=YqhJ5AclX$gPQe!jV83SdzYtmkq zY5}EneT}L4cr8}%!8E>bE1m9pF!l7b7k@Ss zHLDFXQzjd-TY+NG)>|E+*Ydf%oyv3;MW_(N(CRwIHl8K^Y3Zs+_q?+s^*vmZj=rlo zoq9)W>Nr8;ff@`}$kZL`d~QHz%K&U;M==V>NkWQj!T!Uif0SN%`OQ?<)BZ0WCyT30z@exWgVl0>c zLBwE3lGZ#ktev>X^WtYA8=yz|qh2zBx%4?aGR)bR&td@Q;O#OVu|h*b0|vNi!u)Oo z&y|nL$K_mxV%l8F1U)6muVpcKG#Ay&F&E6bYLv zX=-+X+Tj>I2rH_lB%wUOFfp;)8lln!zxzjDJ!e>qT2mnhRKaI^GgufCz#7S9lm~A1 zlI0BYDqwJpSS7TE(AJMSo~y*4FU(XRU}-);85PUO=yIzqX=ft=A`qQGR^a5JU8ls* z20#1M=-i`Yh+3u0(5y`-j-EzV<^5~ecBZoL$4;5Pi(pyue(<(XviHT-Y#){T!GcdFeXBVu)J71)B8Ja z0hEMe+_=Pjjha+bWI^W9%_J;kx#1C8j8dDYp;bHfnkg@;)!ti=1*i_7zL;(E8#u6A?eY$e}63<1@p>znuw*d!H zZ;OZA0TZ2_9iafoFwCRi3g!(6mdRr=h)!4lg{*s*$`FZDx`Y+r9(?xPL(i$?;4=o^ zH~dR*R+Phv!d}V@e4gVIVESLC?F%@J`J`5;%`2cIWBvp(7RdMBtTyAah8f@hWmrXE zjrG*3FX&OzckZ5D!B=fr6VHMcd7jMN#ZuWfJYbfvsZcZ-LjarG;7LW+aT%szvjQy| zhIbi8r1jCg7u;xp7t~nSFh|C^E@c6j?S$=2XLwKz?InxYPs>dlP(ot|CTQV*!KfA! zg{6$uCKLgr$s>8syBNeT&0xl( zA$oS?i*giLhOJ9$_?|`z&#l?f0sJ`o`Q#0)@8Lm#4H%QVx=9|wG|7sDBDrTO>4tro zH7dYV36#mlSq@N~jg?*IJ@*G*$NLIt&lq3H{`CT0yv<&#oMy4IW}Nay?7vFX78aZU zXbi~b2@C~G@(n+a--Y?CtuF(ufK_2GZ{%c}RY23gqyIMMrI@~JsNwhOJG4sb0N$2>r2tUu$G*GG~nU{1gyKxBiURn`jy zxQx&w!ISx8TkvMVi}i}-R}z8i=}HS^t*5-QGzR}-osjW)1t=gGUUu+MR-&}?V2p=z zQ59lsu{aIhL{Pau(hy3Jp_z-+8a9vMfBqN$GX1ar;$NkEj{_7}=hGXPFQiMCuchZ- zcriWu?6c|erAz7R)vFQmCtuZqT7hM~ufIF>9BocdKKXe1;K$xe6$#oB71IDF(7RZpnzjA3FM-mkJK zKij3y{lw~Zl6UcP5}GnYNup_Lzh0(RrwUP5`$?J71dZ=nuiEB5hKWs&%%;kY zid0XX&>cc!9((Kw%Dj|Pm%299b~T{bHKqmfBm{Ehgf17M=vaGI%usIaPCQnNv)0~5 zDS=MxvU$f)Q&fJC+7ELNLXHqF>GY9P81%*npCvy^K8ymag3KW%tg0$K@Wf+;Rbs5< zj4V(%e~zD05?veHsgX@cmsagnYLsi$R|&O5()t{7fH1K8AAB(KIYx#?*?#$xVSC^c z_$f7=|4r{bSaP#ifH<>UlDc|3IU78$cs^*(ITT@-UZA-WWDE{4&ulw=>5E@VvjEsP z-g+A&?HKc%23ux|>n1X2o#scCrB!K#nw^7Jh8Y{)mpz*5r-+aBWb18FX;iWZ(9PeQBA>^a8Vq?|RR>)5SO62)@yKq!*>J3~vY?G4|ba`;FiG0wGRq>4!gjI$apJ zm4=2#c_uUipR-oG-cIM{{%YhatRQ3t_KM4#5Cawx`!`_~S<;}w&^n0FMQ(B36qj|vggYXkZmJl!24J&{(2%1&z3HH%p6?1e0BhSfX*@8(+C=pu72RSSXE)zIU9m8_d-9y|qU14x5 zDWpBnCgADjv(Nkng3YAagu6JOiVMd1*WmWGfV#|m8IY);-hz>OFs!R6a4lj{w*mN@ z#DH7FGCXD0?}V9IB)*J)Z3XQi*~Ig)Ny(#il)VD-RqPQjb5o}_234<&@qq`dLpvQ| zrl+SkNS3U!!Y80m`vq z138$-8li9QbrD*SIUoBl35(9FIH;sqYj=bCy34r8bJR;Sjy>uG#(uDV97wPOXMip; zG{@*ZN<#|((iBN-Dr77^!XFC&=drem&$xCI02z2pn&KA=Uq^B?vHCd`BdkE4?`_V9 zvGjTZEc=9r=Re%N(2k&E3+q7*O$I8ea>1N+lR`}>bXfd2Q(8yWh4v8WGM!?+gK;f@ zhGnNz5@alPSSfEITmgnBh?r$xnwsk9P&0*~(kMWg6B=#oFK56@Fgrdr7RrWUPAYun zm;C0ZKFD>O>9vUqsjva1f%8${Tn*!%2`0Qr!2)ZAUVVECfj>w8bgZP+1yK)8knR2j zkVj$Bx?F1cC)U@5tOW|F=MbEWvs8_s?%%=gV%lnCUx^!CAl}quQG$k2JB>9~$(O1i zX;24)%1a#5ER=YY!sd2N9sDf2PO%Xvj_>;NyHc=!szjK;CXHxy_4_KGp)ByPK=n@8ko&4q zBY^g$pye-*)BpeCdb|C34=qmo#k>TCJd88RkbD$t$6hK_;7i7u?_<@I`G#`K1>r&9 zdww?_$AnZ=9giTvQ=GlH7NBZT1;?hqQK{4emMJ4FEjLg&hLXaULgoopp=-xvSd6^i z_Q8U8Eq4BJ!V%JRIOIEvgh8MO0O~a)hC@ z4bgE-TGiG#(|cI@?N}5IG?=^V8DNTe3wGU`rj}-mQP3AL)Fygc+#J@E^07QdU14ex z=OGJA>NG5{FA$n#-ZlwWQFgjM0)2*vmko;_pl1VxwvzIDHvSM;NFQ2KySvG!!)ss| zW{!2+XFP@{Nxbq|lLu{Fq0vvzs-b@e02F17HFOKB$g83%&pshc7KqSGXdZ=UQlRQ< zmGVI5lo_fiKq;W~p0Ut7D4{E~jn(sb^4>#f_~vqY^|@(?i#ng})SSkkqqVpUV_qYm z_H!TobUF)QeD;~|rq|xO5=xn#zzID3@^tyP8?EQdT%}H|_y72}|0EsjKa_^%Z>39P z*O;f_tLqd#XiP2b9e~C;6nK(9CuvqtNeBmFP{rkWFBGSSZopN^Zn`k@H-yZjbiA)Aef_gv!Z5m+{`tT9x9P>_-b^PSy&JlP_88AD z9&Tmy_|zaFSkx#-9u`pNa+4yJJ*Vr_iHG~?@JSdG;XKz~CoX$*0&6~AQW9Es){D~{ zuie04S(AS4iT8)+b`{UY&Fce%-_Fn&f&wWN>u>MLQ6s%6j2G2d>1W6Ry!0X%>gELD zLo~I}fCZf~9M%-Y%;qxeUojr5JiPo>?IeWrdodxj+naW`BW#BdNW*oTNm@PiNP9Zg zQ$tR{aTGyX#WRPC12-tjLWeaN!W?pPaGk14t!=66v%P7W=U%^Xi4d9bGy<+Mx23hM z8EgPHp~PhH8F-M^_})kR(zQ2e$4n@dU0J1L4__^{F8$mvV}Y_ptC6cHmRwURP$r5) zW7P^uiBS~WC>00bnltyGNsm7KNP7C|@1^;n0m9nWqWaGG;0m=|o6~>)r~eqax0`c3WX@Vjv7k@xLEN5E< zV`$(yg+1W^(7w?MJFJuZM}BAMjSBGw-llbMf%sBSg8Vo_?qK$MAok`tKjbyaKRnO2 zIxa)uzj6H<_saN+HFY)kh*Om*Sr440oRy{*@S=^Fj9-Y72gblu$@-II4K}MZ=McbK1aF@s9IfC_zWK-J zDvRo13J?&&{!Yk&2_G^YwH0S%|8=d2zC5H>MX-o)sWJI1#0E9XbXTq5A*d{EP906B zDC7><+lQbw%F~4_uabyE%;5no;{fays*RX`LOE6zu?hnMH>lz=Ju^?Z&^XT`1E09- z2)RKysmXsdmF#tp%(_dQG@9uoObceZM}1P`_pN7YLWz}eHi`R{xVBLcMoFyMAfDO+ z_J$(#_O(SEBXBRZSN4-!jBAjX=(e^ttTx>NeqMh4CBj_LNCmrC8rJ}{u8US7ttv~C z8~_qL4L>qLL@SD$u4Pm!XW|W3^S`?X1x8N z`j-*c4>6J8y9E)hAUIZuhu1Q$tJQ3AZ;0l?u<>@jeGh~wAtY-J@Am-ksOKH#}L(^zhZP$fozdg9pe^x+SE zBz~X5itC9e7PKI!w8oKX8T+(NkLMtEn2wChYqc(xp&Y1zM>PPn{8EH*u14mqVwi;$ zgsYLWB}f%e*#Sjb5zMQEuhR!TVowp)T^-%5S1ST&3smG8efmjY%zjYc6Fh9K<0&UMb)1n}OwSUcvfVmTI zYMbG(qzYi!Lso(F;aTE=@Ck)WfL0Ebf`fPCJwdMrEffRZ^Q8yDy%+RxyWp`}Z)4-) z^UQjl*~7E%`u(66o}-{Mge~@*b#V=&vs@?_aRVO0zdSor7zC8;o%GEd!)h%6blq66 zNG|4+>~pS#uI`N2XUZp>o7dm5y7Nue2EYaFDkpqigbZ;XUs|ADJLeacLGS5*zTtaZ z!^*%fT`BpI3Wi#g=L5dtWvmJR{MY+tW4e7k*ZP@z6;@})aBHygx>q@a_o)11(k>~L89QFnyh9Mg!`ehyoKmg+ug?`{^QV$zN2O>e!OCj2m+rz7$^Hs}T+=Gznhn*p z@kuD&$S1>GS^YS_8Z9(zsSueD;eK0pcx8!DAgTyT15qKUu$DE)GHs|4=VlYoHa@(c zMsDDlRMDZik70M-PZ{E~OrU)BnV+YpNCJNTmDd2~<7s&4X2fQPM@_E~4-_OWF3h1! zm8JjlFa9E(=mJAT-5TnLC!)cAx2#( z4@1!!wV_S!RKc4e6n5zPI7zh>XD2N23VVg=JO9rs>HITP4Z1}H1t4*c z?`#2VTiaSG+1QFqs9@YznfEXe3QKr08q@*J(t7O1_~k;gP|@z0EvU=I~k(j;uv|Kd_?aEHf zhSP+a-r`3{{vOD$M(?cN` z=!-pCByYm-9qCIsTaIC91AKP%g<Ln8eWK4fDSU zC#!7G7>%Kp-<3OAQ5MS1F3rXmzW1H)rhzL1skx78fp}x)##RYEnoU3c!B5gFgpK*k zGGg9aJDX^Q+?v{2yHH@W3WUHxsw2U-;4$m-L_3iDeCBI_ zp}zBYH-=vdqX4w>z;|;qhSq?mP&PG+uS0tpxOU+YJ1CN2{KAl?S3qy5Jj}*JMz+Ao z*hk*uxhjw5c!g2LgKIPb^sD@coJ1}E(7k+GTp&-0zj<3FTtmg7{{E;MxPsxY?Qm~e zMsD}tbr^5_dOCclB^^H8pZ?wd^;eWWHDnRvDm=+#;J_*1X}u1!$a(qE>J*CWKmX=G zJC{QkRumIYg#|og##)~hpbP|J&{8Q=b|ZDGuaE>lbH8)ETWE2OR&t5j3hpT0V?45A2!(7SY5vo(dM#Md9;#{L^Mk?1u7Z1 zMgcguJ|C-?)lzZ@SL4+7D003}0#KsLPcfEVFf#2}vp=xf2>x6E>{HoPuq<0j{qC$RhY!d-xw(mZD<};WFqZ_t!{HX( zI2SikA*E&Cibdi?tuR$hfD^58T$2HJKquFlXOdOrfT<$LP26*BtZuR}O^ZrEj4nqz z%xNu%N=AT3CMagFy#Y7xMYK<8O^#!W#Ed+NAmzOU8G2Cg&>$eYTMq+U`;saUE6AE9hjTm2w-S!7Arg;VK2+;Qf)-$_ z6v_O{2nop&Dg`tOHvZ2;&Hn_c(sjhs^L%OQ&QH)I_w_u*E3Wrzo(1_WLW4E)9=_|} zzWm;uR|pH};|@JbH=a?$69l>P6Thzl;59Bn_I&so{8i%gwyv|!^)v4gV}Kc`crwCR zbJr?BH}~_psCeTY8IHm{zQ^_6&^_{ec~%u%cgF6xRW3ar!TY?H`~7h8Kk|(%7C_HK zRV3A*fih z0?h4#06xqysElm!`95c&3+ue08Y9CaF;oG1wvX^~*9ahC{+Me~2AJ0Kmsc#GrlcXp z0M}mtp3$X-2EYMpw`+S=iYC~?yQ!6uh>dl@uVWpc9lchD!$m7`Jp8VMVNQbBb%5*C z^`SI!1uyQ6p)@@>me!fafj}Di-?h7DDbK-U=NT_2cTZ0#qYMMY3Kr4LjWNb7-&BFe z^9rNWNTwp864%z&5=xBuMJlT`_2q=}wXxPzw?T%E+@fUX0$_@3i|N`|!rWVXG0X{) zG2~n(@mC&l=4^jDb$?g-##g?Qj&&bS&tG{Z&EeUfBMfVsea#nSL%lVWd!5H~yh3Q9 z20X(@+WOdUytT@Siafv_Va+e&36M|7+ubKaSzP;V{_g0n17y*#XNKDH0PdGwIiIEo zH(JE7=H@lA_dpQnirDbE)sR=qw6Hhc`&}QnC%xx4Plbi}Cx3lD{rEq=N%u4M(ZpWl zla_mPFRbJ=$(qlOOa#q$c66n#!+ii>I;}x7<9JMO4h*HCtE1pV=&c=%Sf{bW6xRB}RNVz%Qyg*5OKz9BuEZN- zG{MB>)%5aA%9N>Qk#mkUoaY&iX zoY)ijy~@ajo(*s-oePN*lX?Of(!Q_^LRUDk5n*BEAR1HWBCk@SQ+Mh?9vS__q;2A+ z+a&&K6-{Vm9VB!TjiOuUy~H=wH;wHgk+a$RfhOUVjwPS9;>*TGK>_$H_X|ak8pz;Dn1o8 zA1lmW%Wn z6|w_Db`0+d*FN(RpqBu^O}JJlmsYc0&|FVnFKeXWw0Vi52q4B@ZJ_+QD_JbH3Xad` z@bO)1AJ?{^)JdPv^#Tc4#t#-3Z{e}9`3dE`R5%)`CfG&(<2#l)G@PoGB)7VGF*XPV)nSJZxbUOJ+OX?!t-=6Z;NDk$p3=d?R;0S@i z*ygE1VSDW=1c}}Z4GIEytu^jN8AcBGQ}_J_pO@%i!5eT;LQ8QxZ2NdAD`}u&TU5K9 zsRTi8@Fl_|Ty98W3m57^n#CH6Hw8KvKe&uE>_dNGEPM6ATuVsOm+pPZk60)VT%4)w zJx0R^T7d(kdftNHa;Z#(zg2|y-ShZM@Qh&hK71xv5v0qPeT)j4G^5ffKk(k3!%);X zTLFflBWcLbR7SmzPt`INN(BJU90TVXpYgmq3WS13#o3oepa?r~#Ezfu%NL@pxtf7v z;?5?t%1bzj{H~E3;%kpal&4sA{k?kIdhJJ_O0?H1u_So<9EEl z9mOeb>U+GhXK_yBVO-0c6`DcA@FquSh)|)O2mxcTK^9T362KeFv>u?0&iMdd)I`{Z z;Mc}Zg2djQUc%WXL-4v5y6Pk62>@RQ?OCF4o`M>Fr$U~Ja$QwLxG4>xUVHtGSOZI@ z=Ba%sSCOd}xA6CMEZ*kbt&zl8wNLdCK!RR*t%G*)TggLKvBxS@o-^2F$^*y?Vk_}x z0W&0LVESbKc^}jo-Ov!trNIQOXk58u|ADm z&8hwDY-Q>DI_VfISOkRFH>YTjOGAzM8IMwT&$#owg%?Xw5t^O1tvT=<@U^5sNOKEVlNUq!(&n?9}QcBpZidAD>eVQD>GYt+{Ti8#7*S6D< z)8(w~LVDwk^BB2a2544ML%uDIE{_raPdKLZ#o17yy7;SE;_+EGXv}jnjBMH@cSJ?3 zns7#W@#YFf62LRoI6UXesh$MdCqMJPv{^Wx1~1R0AN>30i94rI2?b3;iI*P%&Rt_3 z#5%J^to_O?9?|jQI?`B zg!Uq};#F7D0<(g*k`MCLuYL>#3VOTztJHp+YLA59u8+dw+u;9{3b)FR@_Q3r*w9gx ziq~@K>cz>Bw`C~tYa8Y~U?@VV#8w53 zyY2LF$dP|FJ2MWQ7p9(5{e&uv60JcW9{B!Ur%!RuqV)FL7t*7TJ^~=0Wyv$+hf1)@ z#aI5|EAV-qN22bP%WtLOr6HD>Q;UZtkG=?N%bT3Z#UQ zs^NRww+6+=5c@pn-1E zyA%d4@ad&1m*~$$z67|z7TQ&K0?W%wz(Nyg{MI_Ws*^KQie)LUtG{aS?o2%vO9tZgi( z<#o$7St<+5ba4fhNXU9F?nuA(z(>>R!;hwFl!^`9gbO4U6;dUrq!t$+8)!8K7p$5X zF*kF4BRC5H9L%^`Wtc}v-YrtW<%U|h#1bAVIm>0 zvGxfLjn%H^;1~mF#(yCjnJcOV6kzSclCno78#D_Nc?sP>+dLp%OrZ%@q3$rT*|^mj z0AdYjg>`Ml?4#Hauh&df9T|jHPiw-K6N_c}w1~BZ$Yhji%vz0f57}@DBse z`N~RS#A;~9k*nm3_H=;RMUyDz70tt7?rI{gzw35q?cD3-*(?l4SD%TyNNCoCEBIMv z{rp|pC`L$`C>kLJJYS)}AIul4AmgmBC;3~8DuC3(C&NELQPCZ!K+_c{P~8UfZ}A-O zyR$B{#G)kOb4Wws!KGz_%{pOH9HZ3Rdx&r&lgzQ+q995OT05=Jt2f5vDBZ$gZRfD+ zVpr?kM$oBYYbA!fLb-&4psZpU=f`%mizr}gR7Q#_Gze$dF$Z2tB;=-&;61P~sSuji zU?_q6=sG%Q=Ov01^cyoPxQV$kz5&=*cpd!zxGnXfY~WCdft= z9#&K#TTG*y>S}}#VR8G=wC-hN(ZL<5_t4>Rm(LMP>Irro?B5^_8pfqZp&WO*3YL`! ztb1&AF6&*JC}~of>tpioHtT5^gr2Qx06;S#TJi)72CQ*bHxM3f@iI;K))52&N`z+6 zQN-RuV})9pILmruEJJDtQ%6S!3LsV`>VJ21HxY&frJ$%Y3HAhg3#APOD=R1|j?MeJ zk8RW&F9h@-$Y24CdJoD8<&2t2kij^QZ zZ$~R6w9hiQ=fRKIGif0HaSx>;1r_w+9{Dm9B#V)UR=if};499NmKv1+ufBcH_)z5} zvmOWk<8!@(eqRfsIA4QF2ITo>e4n-9Gl8tn5WvfW{741EbKp~?Q^V6V{PEj9Ah>&fyWcCTtR0} zz%1uVnd-jDC!Bje&oX?<0tfPtkehhgx(;^+-!CAJTQ9guk~FOhagNQ+@&I@hJii`L z+TPw4_Y>rJU(dTCdG4E4x(di)8=wlz&%gC_8oqoZ-Yf1PSbx9GP5?=2(@a^n{9)UT%>eEY}g7e9C|U3vLRngH}15LUZR zeD)GatBoy{cpz)i)J+Tk}zqy^RGPO8 z*x%*3sr0!|e>i>fPygF={p#E4fBU!pJ9~qHlJDe7sVemFS-@f;(H3OD!XuBi7!QZUVGHFlv`Y+nS3!QDz z-FkD292+WLuC7iX{}!X!>R#D8m7$7LV=tlUM;b{`ZcJ5eD8I+rz@8YNjQGGX=yR$u zfK3tqzZph7a3{Q`ySoh>Kvlt8V^kfYaKMo+3Ubs?EC2$YJ6#>uO0RX`poFAmH3Gj9R9&s(IJ|6cD&BMTMLKDzkz)9f$z!PGGFY zDG)EC_HMy$4yLYE4UG_w0p}t@1q1*qX#Uc+&Akj?E-6QgBI%}*Bv)&SSL4#G0$>}~ zP$3}Ud4@=sbXZ=6GJsR1j6Ma0<-}sqXTd~BEqukyGqg`MG69g^vsJkQLW1>qCQHKi zu>{fl3Io{+3A&{K4ilQp#uro?s#ihb(73)&|fo?*|Zea< z1b`uKx*Q#6#0n#@b-@KJgHcE`#+*Y&FdoB&7z;$MWkF>^a8pgZu)xjAH=9%;@QmnM z*BfEdl^r>)xZ%qTemw(uyceDl&I!Y3PP?h=Xl<%*%K;=3ohau}d>ympPx9#pj|wX zXdFC#HxWgb#x7rsE@DVQW9T&9Op?g3j#k*+1*c~KW{0-%jA65$pj1!A?CkEUa86o*DW8wi#6_#_yU1f%98dJ{DS%Jn_IV*qlI7gZvu<6cm@ge{|yd1=;-akD4k(&f=UKH_$`MuaUok9e&(X`V3-7r*-q9s!|q zBcCjT=iY!O?vahbYmArUV(uZs&Gtb}IB0-7fVH?@WUrRv++ie24QK7Mc+b!LKA#3^ zbS|9vxW?c8+z-8Hc74Veg4TGp$cCHo55Id)rL8)d^Y>o<#rrZ6Zs3jMKHkgwdVhb9 zU|ocs!SD5T943P8bJUc!42-*EXC~2w1;aJ>`+He6=60m6aUMPoXIKOs*Ar-8jd{>o zgQ9qoZcxk-Ak`#85mQ5KqwDS(S{Pi$W#^(QH1skwH8Z;{b+tfob{TX9Mg7eRmTAeb zY#m-rT~PN`&CKMXme#hQhp-GVmQon7^ew_y*vt@H^31n?KwVDC;z3T*nvB^oSc1r% z&XIIb*VK^u4K1U&ds4Bgb*~z4wA( z`od*fM>g!x<*S@U-O%h6=gy$!MCR|%wZ(M(##Msn(e~>>qK(VcgoNk#pC}$niLL49 zaEH&1fF`j3+BxmU)msr~GJT!)%Gbxz5rFx36KF8*W z*6MrIcpaZ$3{wjPp3~_HG_7kkPhm?o_RQdD8oNc(!RTD@g8S+6^?@hfm;UlE|0=R> zZOg7%{hbe=NWb^>FQ)tMIh?-vPyRUlhi^TF03lu(dhYJ)P49mHliX)0z4Y8`iD&|N zGD3-YO8c3R1AKs3Kk?vu(kDLfWcvQEeui+D4{)a7m(rQw$(U$^_hkmFB)Tk8f3mEB z1U(S>Ho|v}@vOt=OW^;;VTX%}`BuQUd+zQ@>j;iFe)&4d3-jsPm8|t&=&iVXaX@u7tdJ=_8+i+lS)a>n;QoG5(BEGQ#WD>S>J+( zr7;py$z->Uu4lz^b!zS_Cqd~H0)1?X zq_me_i)52<-w@`ZD|>um5^aoxQSOzGfYtg`1~}8bwMeY7d+qO9A^qKDJD|EnRUPhX z;>?PPvC9!#Tmm>NXIwdBOtCHq4sE)T&l(Htc5wSI&0}pt@Dd$F%i}fH7VUv3CF^Zh zHGmdtge8h-MLbE(ACFPayf zbNXR!1lbv!!HvpF=&S&Wx7h#8(r6l)dn>JxNRTWIRBLPmLV$(ZBgKArjDtJVjxVe^ z>VNKo*3%Azuu@KJ3k1tS?(6KOKln(14hb(&({mT}XrXxwV~i)umWuRm7&B)f1eX3% zP0ymLkSz*ZzOzlzflS;E6Gb3y!9`X-4o1NJovSEcbZd-}C0ok$ z?=vch?eTuDmrOwcVI0$rTGBQ|$(ZbbW$mvlyAACHL8Tc?dK+Vuk__jIx;j`wR{^xvs`Rq~-Oy8mI z2znhG9z{z9v;p2Md~4>O`{4Z~K(!CYuUSH&9wtb}>(~WN3Ra&ha~3dI%TnMfV2!w) zdXfcKWEvb8L@Q%XmOnyUQUkCqTcLhM?yrz#2my#>wq|5E zc9x|2R@!CKnP%+TDhv?@OI=)(J3>Wgt}A5q7-X`?Jd88b4djiSm!*+x!z&0oKq$9# zw1}X{UZsdWF;4DR1mP;&8G|KIq;C&vOZWyg7?1>9TKkUebYfE#Fv z=Js*_JLX1B{L&1kf_>y{#hQ5j1?8-ZcfGwXEC+P!nasu+W>k5%&xPiEy7Sax!a{Ke z#?3r3H-KZ$l#rMP7pv^tc^S0t_~Xfp({*!9zFarm0b%~+Zxxav_SvccWh5X8@+&FI zUk<2m1-SixRJ~`Eq~~?#_jFf>>YQ_!?#X#T1Ofzx1VAu=6txsq;))`7tqrD?Z6B?V zk3aYvzt+nS!8?+WhX)UcpiWEqKAdxW$lT+uMLsfTISGRw^`_547 z0%oSW>aF*E!hLT1-+Oy{y8~CDD^bf_-lZZ^!5J$6Al zN3O4DwX+Kh69%3NAWK3^+dE{=e+WI0OodW+;@nx3_-YJgJTm;3!+qM()rEoF9Jo2l z00|zpIY2k{FD*oFh;D=ONL7H*S5QW`$f}fgG;Yn&}RPVJGfXK5sBdoR(I z{`Y^DZi8QHr$%fu9wA`|+l=+24^X-Z-Ijq_sAuI{_=_`=;e7Ru!moH(9S)4EcPO3o z{#`=PDA9yyxka2MvDg{bkGVld2ZXBg%(V#xw7gGw87@?*_Bh_BHgU znov&>BQOops^o*@1yk2E7NK5CYbg~@aQur_hYo>zRkJl#CuGQ>(x2JW1Gm9 z0##ISBJE-;?@$)2iZ*Ziz+DSCldMFe2ekqnstV64wX^U(V;s5mg{QF=NE6`ta z=t9c!9HB2etr=3VmM|?!=y-`CAhs92F29u)ZAOxZH}nnn0*j$eD|^s+1RiyS?wN#s zgj<+QBFA(uq&=NR{gl@dS7~Oja=-*g8*a=@Ef^Ab`Q_jmP=^fHpIuDk z4&}9_i4rd@gW(aYL*bJEjJKL%kGa4?!X!|{s{tV%;4+xs5lXcocEb)A?86FH5TNk=@yXu+{Fj#*o zeHA1{)J5zU!N@^T!DXKTa+$Tr0Yf?z!Hg()H9QvCOA~-GZUS^)kd_zz&2|cu8PPR~p zJjjRaho1+q5t`6L$#7W@7$3m5W`L=!tgYGH^^JC=qR2MNQ!SyOeV!YhR6K3QxbvCFVMm(8&r1wpHhM@`F>OQ-32w*ALGRqy!-?b+_ zo`5>yjnI)D+v=%-8MqFNwpGeJ7*A~k&sBo~ss^ZcLz)y*WvkBq?jDQ|3+Pi%4{VX{ zezUARb1DVWv_ezk#w~MmhCrVTB{GwqH&R!Uc%+KdV7q=h^yl@z4fq}y@ZYeQq5c|8>wG}*bCr+Lq zgAY*t_7qH7&j<qmUMQq0;SUEi zx!6ECv6;nV&kUuGBb#`BEJvd;$Uegpc1c{q!B2D$g$Q9_VP?DW2n;=B&0Q}$irKWn zI>W~iuH*N|nImkM&x({C6MNI$M@PCQzQb)FCe2s63kJK>o`6@g9Q8DxTjz>A* zJ^b8IF^}>8cxC69S;Jc#sdE;86iO4{<;NJS7yG$?d(Y5XxCvq}<`CD#eNo}~MY=Oo z?W|NHb?~@uI$<(QP;6^<=ADQG;sLI!-}NoeT;_<#=B*N9F>d*L8J@4#)bH#<=M`d3 zHTIyBDvSrS>bJp)g;9;b?||oR!GH8FF5*o#9%07t1~`N*svU$v$k;p|mP>2IsFWV= z-n*a9KY9kbGFhgL(BeFCd@|iB=^{Psz+>cV;y8>JG~wY5rwVjrE7wFfTGrxos1Tz#<~U#8uiB+wg4{z;r{MM%mn_$l@gu zQlvT0hB(Pt+J-N4W>FL^+qSlhg9RnXbr+N2KAbsy0leM@;O(X0A$E*fLoTgS`J)mg zs)o`v@?cCCWJ6Xy?Zp>9k8)W_4RH(&)^6hF-Jm5+KDoK_(G84b7Jd}JS+UcQ)+f0) z(U~Ta#7>;-;k+NF@w@y^(!uEIfuIS)@UC+a9%zFUal|Hc>5Ld-?d5YCR|5lsX*Wlt z2Z!ZuZkyyh&N0qD>3*ApqSNP2r9|?rlSa zuxIf2h^Rq7WfJJzA9g6{fG?!K^$)(9juQ2#fQHBJ-beXE(cxM{(aN>o`tN@={rZ3X z52<_kO8VXpUJpmcXTS7(`nBKqM*96f_=EIEfAXj4`mH<2PN59QRW#zL_<%NMH^-5Y z;VeWvw~^MV4_?=dE`kF|qH@e0082?N(}3vxcc)3Z&_RjLjYqnPXwx1JBV~m$Bu8U{ z8XTp2lryZwVbcgsStU*IlD~2TBL(WKh*2q=|s;8Pm$B93gKKU1e>4Tlg3Bn2WudFb8msaJVM1>T#SUB59N?N#h=^X1koK8J<2`qPI zn!Ggue?s2TJV?h&9ma#sM|o1Tw1)3s>SR0)dNRSRghN1OzzrAqbIxL ztS!$HRiLcY+%3xHO>Gb<+J=62)6xXE*cEt<)JO;2BJJnrELVAB7sCOJ>J5_GZWPn> zm5nrevzV4{as3p@i#N8@I|-u(Q28UN`Y(u-B+nKwa4P?Wv*c0K36Y64T_97rVv6f2VbsT zGm!%U_qApd!>g^FOoMo=omy`cDzPqOrbA>t_HKzNw`VO$pI0J418=7gPuy61$%+gVyT!KzO0u? zFRqG;9?3GAvWkXTdMcU+Za_+ISFP6V-TNZ$ppz<<=M>^A7)t{+gx^Iczoi? zCj%_Mj!^vbKmC8x`|ppVh*K~e#nF(yidhk1ZKyTMW)SU(Zd-WLjViVDGyp;*Qw?FX zGK~>wZ*g!t>D?mf$&0+*o6?}@V2<{9chH(n& z*FaUF(?iFChTv0>%gCcdjO!pE7Qs~?QJR``=FA0Xc0EmuA#@RTku}73Eo~xW%@IMe zUR+z7J?n=tl<u@_@4v#wwUCE|bHRTfj4S5lJQ$RG7&RTG3p(<~j1P|Lr*V$` zOgX|u@vh6|r{04fV|;$sKudh)y7F1ZgSju~$us48g(1&8Og@QX|GbwaxR}Ad37r)= zKaJq%dRQ-wBWf}mnPQ%87y>p&nSwXXQ6g^xkFP3-H6d$Z=sFv$p0GSkWdf9Qy{taZ zyjJsEatT`zl5`i0_1?A zZ3v;!RR!O##Y3wRY6S<+P^o}CHL1_2C^B@EOwu~|?H!^K(t{C_u3q2>c&X(_FuPF7 zrAcdymryu$f*Ao^1a_qiFEIam*KR-9$B^bBPjuwCiF%m%kpBr@w*zrU7d=%%?!Q$0I3RF}PxlklxA2mLqX?e@eI_pJu zyecw(b$&Y^`Kyr+OL6J_x1vcECT5UC^6iZ@ePbbQ&YRH= zt{ho^fK1-QiRE#Tzq#-H!}wSS9+8W7YX4>=#kyXgJoF04&ZKj%6HiqI87Y(O0sYaQ{?fAmue|jLiUQ@ z=NdALw}C@gUZ_CrK!>a+$PyMZNyE%~Erg+Tb(xK8=$29gC}A#;qeK$JVJ0VJnV-2w_~2DNd4^a{BOJ1D(70OZ*@LQwp}cOL?- z(E{);!n`D24o3}hv5>A{ZTrh_BOvo@)b{KLN5}j@mIM||rf&YejG+YL=3?u?i5s&l?hVan?TZ^*EjMWb_rpP!BYA8P3+mg-?j;6u> zAr6)qyLNyhaRCL;?CO{?T`jE}nWLW98W)oF#iy2v?5#|j zC5Jq)WI0sqP%zh=;cbMBMk{~EK`psU~%_39kZ3CGBbd7F*Y zl9i!!;N@z-0N8|9%#PErbJ-NI^0mmahUxEOC2zr;RDvyc(F+Wqqq8f$@%HQK!}mXo z{V>w8h4*}ia2iv9mWV`wZ$E%$H8S$hrNTyIrkR9{j=uUZOjhQhdosIK9?g>5g=tzo z%GzLCU>b7=EzyLci_oGiqs>?=VqEU-t>IyUX|RVvg(9lKn5(J_2ns^{%&B1Lnl%-x zXePKJU!*zo#ePC-8dNfzMwD-v|J{4Hg6TA}=9Xg6v%7;5TThdZlf$EwW9ab^;eFvY zWH1zI?P-*wNPXUlO4{o|qZ(2wxVs4Y^7ahxSyPatB7Tp^1p97_%RH^T9lIItEo3Xz zlXbXFc6z>w7U3xQ+ofefoEY?kPW#W3`NaMifvg9Xv0R|9{p>AhtvZe01m4pQhW6QqUMm&S}Qso)>GeB7mX%}UbdP;0Ds|NSTbuD&yqM& z72!N|eTb2y;#!T-P%STnzEwKa%!0o`)&Xx9 zSzPwM~C0(=q@sij0hH#hghI20qSC@t6$w54uF%G{7=jUuNB2EzRyU)-} zj1QjUSmocoLW$s>Lea9{B^y6S0hO=u+!(9;l!NS;e7;72V`Q0p0=vKi`9&TdP7}rx z_me+564r(*7^4C>bF63(>V4K=5r*ZwvzKG`V*b~GL|F}!s+^Rg@p_;0upE=$lP|hw z8s^G`5@(?S5cM8Wq}^IF3fuY+wtbvM+pCh%7jPfw8<|!hDQFt&(v{3{wmZnO&BN@U zBHA_$WM%xL`N|Kf|EOHY0L92v*(5Z3;m|M5SG+=)mk-L!9-K zN6)5i;8*|hcYcqmB6zHUe<+uN-VU;8ZwY}>#X8g=|4oFG7fR=~(z%u=b5Id>@OsC4 zfnt5@-+zmBqUK`{O}IB=6p*F8c5P-XErRYEJ5zca1#46BGBSr70FYi@fY% zxUB8YrI)_`mGtCu&!=fx4_nd0nxl)<8#Nkj?!~qp5=kIj+ITOV9Gp#ujn!`P-iMd+#$U5?R(;u>e3d8wq9h!hA%Lm1$(CO-m$ ztaE69@3Yo6J;FKIf&*3^tgfpfUHIAaX=ZXZEs)EV$#}Q~Yt;+{uL>uOxcVv_HI|XI zqu(Ci8^3xZ-F*ET5e?`Tu>0I@8$;$K)`~o90rKK5FOuo<4puU4Q2`It4X=(NPegw3WBS zP<3h<1+ju*Q;B|NC5?UcBKZ{0lJ7*b1@_*Tj?>5(=d*B9yNO&>kCW;6iF4_-*M7)e zp)&~aqrU1_e(7hKf5I_456_Y_@(lD6=|_$$Z3+ySW9x6Qkik zCWkVY6K{V=QU>8yz5X^45F-Vj4#5&wWR}9i)kR3_>BmX2KL&9j-DcqseN$pT)c!2kNzXq&4M7+-!s>t58PNNW<2#19y zRiTCOU`%#GA#j$APWym{)9tWrdYp?5ke?J zo0j4+y1~>?I5UOHpH&FhX*OD`8Djw@zp0fH6itR4@zSDqYQRLcD+Fk+f}RMY3VOP2 zIoyG9b^gfZ@Zwt!)3BTwzZ)2A_ZM%cWy(4|a2SUoxWC7Gqp((^kosH|jRFTZ3?bIn zPHcg5gVRgo58u~oFJ9Fev2`3En=b6*MKTf-m|dzW8zz*=9I7lLK%^!U+w|;LMl;T?lYhc!r>2|en?i{is# z`N4L#6MFCN=>eB$y-?Lc6cnDR*Q=TXXh#|uvq^Y*288zX5?OfU{3CdRC(?T#TnT)p zibzD35P%07R$MEssfgG{(=c>(Gvh`WRRKL~XhEPd=XqN5=~1$*fyJ5)N9SRLEhg5{ zIm|5JTB1FAPE9riSqwwl#7n-6;-eCv;lzHhFOJhSRFN#vx_gy^)$14|Mz3n{l}?49=Qg~$3|5bLEzq$Xa}dtdicw(U z@DfkeTA+Ox^er8gX&7u{7Fw4MSr6D2 ztS7AlHS!Gfo~%xt1FRS8WK=46G0eZXj-k4YNTX=Dk(DltoLZP*70SI)iw+Va?I>pi zvPq96V|LAYX!)x5+H-5kkp|9~MwEtSqNby~DqfTwGn%RMLnG7Y8r6_>%gO^L#CS6p zPxckZD^r&ColEdBcn3F*3)CHag#rxitNeREE`9I^MV4`8QHKBI9`QxQJlA=9Up`Bo z?AoblcuxoE*knTSH=FmL$^s!(Do)b3WAn^DRJ1i38GPJZncy0^W_l4FqYE^s zqvFrU+1R}2!~1cLgd+K2T=(Gm;6toM_LT>p^PB7Hds#U!3{O0h(zcMF!sa}@6Xz6$ zu`L^r$Fb(6&m?0&hdR9u6by_9R-EOW-sC6z+g?D zH@cwnP>5|krC!{7>3JT*XJLkV&YTUMEJB&I;N%TPPn?SoqfHz-Dx#e|=KDz7u69Q6 zmhu}tc7*dp(*M|aHb$Pw?s^mW8hn;N!cTtVH~us7n+DR&J69s&Qb6w9D^8KXPyQA2 zQU;q;?RYEUF%9Pbpe(MgfDy#FkX+1%Kq{wS|D|7qr!A(x{K=0v?<4}RR;Lf%9!rlt z^>jLU=0tk!yFZ4Put&g8mgn$FqmZd5cuoR>h!JU`w_|gU^aewkteZcxe$LX3;7ub# z@4WdoSpV06A5Kvp^9BjGG-Kks8VvFZ5f6>jWNtY^XEtQC-xtbZ_rsBsy-{VNvBOd| z)K^`eO%s#j>B1u)PvWEu$Q#{~F2&`mqZMcd zF?VYo>^=@Fj2U4*=2`J56nk_p?$b$>sB0KV4o}#Q1mF6|OR>c?BMYThD7TIi&%I-e zGmA%j7h1h@=SKSA$_Kz>7t@LJgXtJ;n~y#QG>bBhjlJ*-pywvA*P8DDJ;|pIjJ(5x z^5_;D&_@x7?_)2VO#8Vaw0TH-~*;+&P}D0cvAB~J}` z^AmkZm(h+~8eosB3WVfxOuz|=7xWd+i=X_72KL}mE4u`_9kuOL+Md}2u!$(lc`BFT|;t;^9F1IUnGkwh3@ z++hcG%YN=N7xB(EU;*RNoOlH_i4S%EGZ|louz=;EOsUN3wZK#Rz$(l$1}!W-6ICQ6 z*Hcp}I*yQ}sK>d8H6?wQD`|!RW7{LyB|L9fRN{y$uml@`VU+PAn!H>>IWatJf^MuG zyKh~>#;7oXHiPGTiDaJwOnu)F4oY%8iA&oYXuV1%@`aa;90t@|rT}p;qn36&^AMI6 zXW-k|ml`2E%um7D#yu)|_6D6(Jk(_bjb2y15OK&>AOe(`l{Lb1l*zq2aSNCS#us;3 zLwPj%;AyG{i&_RZy-)VO82IuQW5^5X5uyZisR3`uCPW33HgaQxqfD4H4m<0tw_KrU zg$IRN1&4@Sv>3CVkqY+C5S`e^9lQwZcqH_G6)-IFw4KdS080f!LB0i3FuZBR!m%0Z zR*9`d+3z6)ZuHFp{`-^x;&!Qfki>C}8zV8bc;33HJ2rH3DD9Db--I%!N7fpAk$=Gb zhK~%h)-W`3S5)+yVPblQE5$FeKZnp$2v-hqFl-lrH6CA>Obhhrj(W66$~Gf1FpzB$ zcydIq3U~!;iHMZ3s#T|=2tLYO2pu#S@zy5Yws+Gsfq6C%tw0+_v1g}Yn4aYv$yt`5 z0;rB^BZg5!X`Q61O(J8f>;ql&C}>?`4XdfrV5u&RqGl7KoD-hJJk=6R>hcuGBsbYB zR+k+&$^%`o211SMJ1`3A^@mV>3xj*v2BR}KA$v$ncI6OgLX z!Cb-~;eLz-rdwUVj)M*XV6+6!WfL7_YVqWhSW_H482HV2V%Z~vb{KC8FxG8ww4Gw% z9dx7Fgq~oUBwmH`!wOgw|A4(zLDY*`xx-p9hOs+i%*n*)Lh8gL*9NS@>O|Wp31{#I zb>p$KV$deZYpRJt2^xq*G?B9*)4qRuE)Db#hJZUH$488@IUOp~;;r|XcRgOY1L({I zxjYepGBlwA>ABX>m7_|d{NqoYJ?M%xRBQdL8DO7;8URZQsjiI zMrD~bP^qlKu!oKCnS7S-bL}BYYTVy1a*s?vae>S@c)dy)w8*_x?EDkY#~W)j)vLrH zD@h*yrxK)WP`F6c3h+2rD!M$+G4Oy`H;W_0ePh~uAnm}DJa;NT-U1SfK})wzHcRx) z?xAv|iRE*=Hp+johOTY!rU&n#TArJ_!v-KC@N)4!=YB?nn8z_V0}k zuB9LR)vNGp#=#mGk!tS&9)$BP%S&Qxyr&$yar+jYaXi1srw@PfK6HkIPDPfz6n-m= zOeJRc*hs28UXw~lxc$eCXF7p;(qm~JB5JILSp`13@5AL+*{ zOmq+3LK?X7A>|5DH2a2-+3c;xc^okmhC6rf1O#UVy#JdE2;CFU$JbBMB+}S^m4e=@X1_%b|%evUUg2tffFcMh1Fx=o*Lcffbs)#PK z20GO20F@aFo(0#9Xb7}ah9(Y;ZZm#c8ZQD-oS&RR`9pugsnm*c-PJ=gg^jiJuYT`; zOBWw`K0W{3i$t!7ZYtA}(-rXG-FloC&|*jRQIb}HIi-8>R}dP|yvm?slpT#+BoTTI z-DeM7kmx~6vt!lJwE+gQk9i2Sk)8rd%|4hUuQ4P_LWm_xfGp)N&Or%Un7neA9A6wP zB(Qc+4>WiS`@9A8#^~gcM|)G-iRLtYeJ=GM@8le)L*Qg&pH~XA>>YfLgCftgTWb!5 z-a60K`wcjh>VS^Tq?P-Wpady#`kAgYc(Nmnv>al*?4(J$;Z?#5ySvC^MW?X#?y#Vogb533Fq?4E1N-c_2i*a%;s2m?+8}3a31`qlcd8C{N<4U?<>{4Q1i=lgYt* zJ36t{2)XPZpqP`m@r#!)rgLXU@fN3a^WJOe`s!FHb@kS*eg;dDy6b`6FG^HaD|n`JP^FW}SXH5!1_+vjX6)nVZNq<5Q;lu0+PMF9wniKXEgGP1$}Q<23+4tmmn3@kH5ScL*u zkA-a3zkR}MP|m6lqBUS->+x<>f#a@%q2*BaRp|31C|6+!G-9n=2GJtKMitCUQHaa9 zH>my)`|WX(PFMgt`&b{-FuBUc4u**90--W0m<7Ub8JLlV<*8FA(-TiVl2$hHh?C?l zQ#LHSqX*C_K-(%tLS$-K0~I^(WxjySu>o&})uL=Et#uztIkaQ)wD{x_ReAKZiA61u zpk{QTk&t~AiCIFPWYiTXCKj~zeC9Dc?G(~P)T;qQ$;3FB;C}Tk1(Asg*;S1wL!EVE zNj9u)X1`D>{L?^!o}LkfibhIddnrxMzfbhxHr|JuD5-Pcf@g?afa%vLuU%&@v{|gB z)F1|OImfxL=3hj~zGVXuqS8-zjUO;5w^cSd1*58IdX0P(df$(K8^r(OYDMU@$=i|lh=?ciu^Rj-`e>oUmNij>pk^4Mj5!YAoiMKzb>mqydc$41h{%O~;V%7004#`#gexg{Cc+S&Tw9o$|S}DW!$9c z$IKWqh-6e{nfVgtH|PNP#N!vjp!&D}&mW})lemct|LSl5X8PQxpH1KX-XEp6UcDBM zrwRoCy?XjCzb zFUFKb#E{SIixHq&=9g>a{_wa4I)^^>!bqw|eyl7K$;CrGPsC<>-6UQh((te8xqIo> z?c3-YMnNe;LNfqm_BKkc(eIYl4xEJ@IEdz294h~#k zsK|WtWO6vjc2;p{>BL~4tw5-gLLG9I#PoV134^CP$x~w*96wlotbx2qea)N#&$2RJ zsIYK4o08n)49aul8F}*C{)_+e>*?ADSJQv^kKchlL`RTUfDU0Ta-08nmgH^zQ6H6^ z8BH)LTt}yExz+1*P>2T5c(fXrYvoRXvfviZA!#J(v5MvbG7Jly+v8XaW)sTXKo^Q#9mIpjvVkrQBmHeacxZ#WvzD$- z-b)*JWU6S-TZtz*vYlZ>ZjLVn)Fz%hJ$hoOj5b(&eUH#!o~kvr@HIlAk_VWV;yS<) z7>5|;(<4tr(}R4jnG%Co$q4aioPf96mTS=lm!x93%{tcG+zhp2?=GheGRRlRB)@-k zoEpS19g>l1feWbQvn!D#DORt>l5LEAht6307ze^-wg{tbkP9J}a}}smnP`AX$7Q`$ zR2>Ojf%_cuAM1Y%LCGD@PysTs@fIEld1OSjYH;so9(zQ?jeM<9fRyD7DL zujp6Lds?d|5X82mMXiR2lu+N8)Ja%^QP`Z89fdZZG3UY zAV?IJ)ZiubTFGJCZ+;HnXodx_bQkA|&k;O&>=8P4gfcm^77w}5j4G_{JceBz&oP|t z{^{wLiB&P@DhxA?DDk&?yk)9!C=}kz$9miQ95%DzO`NqFfZ;sz6t)O!t}HIXv`paI zNMkqdr*#@sSfRkGF?J@grEZb1x%INGS8tNn(gHYEp7YX|e=a>nwV;o#UqQG~GM7Va zNP3krbV8`Mfxj(K_u0-jJ>c?W23hi?Op-l?(brZG;i_y7 z9AK1bAfZd~hdJ@D9v{2k$%xX_<5HQJ258K7tdJ@W5jT- z1S?;`&k0X`CAjH4u(dk=B%nQIDr{6^z*lm;)w=Y;tL!#(a2}#!m)bD%gc1%hT55^N zDHNQa3P2qm5X)-rVukT-kRNdV z(#iDHDF9 zT*U8uk2m6sJcQxskyN?O9*r=HYq`cZcy=hq9xC{z&yPg?xY~~y8v@8{1LowJ<6a6q zjTEuuu3y&m%d^f1RT{Wg43vMoE)!-vQw28`foDCqmy>^}AU{<0{j#1g=PIX{w`-iR z$c(iNGErSkt3~~|dGm3|8 zQ};O|DtRhJIU~y`T_#zZ?C!swJLyQcta3=&_A~j43b#wKv`l9%yxMwM^$s%JF!uE} z8lkn(jn%)b<|9oQ#f(2F);&FaKy*6N#QpK~&O2{|C#K^T|2Z#{68+cF3mdWGQJig2QR^yJ!Fkw+P*lB$uXmwyg4z?-jMPnSOVMCu%APsh%V&^EW9aRJ?-B(P8& zzpF7Ty{kB>yacQiT3^|jru8r7dMPMS*REF>9t4RyH+jBFl;}Gu zlZteXa0n~s+UQ>(9UIYzd{&-SgR`v-$JrK@uQ?Z_kb`#lqLadNXi5>0);18Jf~v_ zSX6&&`p$R$nzQB!@DEON_Fo*oIXj1()rS#LX)ALy0vSX1^M2|N8J{(>13mKGSSL2a zz0hE)ZMH`b9T7bTM6RP4jc~rd|Bt@@a>h7iv=T9vC=265GA{&g#h~~n;%ENu3#J1B ztbn7NUn8tfS1Lm&DkfUXCJAnr?**hM*N7F4(qcsg!?F2Fytfdx?F8%VS^Qlp2tklL zn|S#+AVJ2e`&xwH)cJrRx}(eDUR!l;@bU!*+Vc3PnK z_B!=^H>l3EwaA{K^lqY@uTwRlnR^cn4MioGO_;rd%VSi?m|Rnnid2X^0}5q@i?8QD z{d~Gad9rsudIM;c2}OkbnUmf$FFXvz7Gn!~@3Q(0{Ss`$C{3zl2ra9t&O*?8Fn~L^ z?$Qo>fjRgr=huL+XYL$Q@zv}QbWGxFcuYXU&FxSscbA&QFpn}GCFowyLJlu^dmAk& z+eXu9$7$B#AiaO*8n{&|bFIU0=+$wg{X{4w`|FGo1H=3jF@}d$#^Pste8|WorZwPW z!V&h07^llZ%P>fde!+mYH}INcqzZGA!Dvi)4yrN6t<2zB37fEbh!scjmZZZVFsfHa zXtt)2u_MF+-&u{J#=0B=uhSSVY~<*@K#gn|Vh0S`Qgnv0r>EDbQUc_Qb5$UjD;3&E zpoG{NvqE#1#wzAwXkF(Tfiu`6ftvVUWe?9VcAt+a%+pwzEC0d zSp%S+s$8wqabE$dwu3P21V-+E@XjDZ;50&$Z236cwp3AQ>4&_E>JiteIN8ud~3_dqfCvT6( z$355+p2M(s-zXXPbRf>bi+ssXcu!^BZw3~d_k3WigHL5_8gKG6xI`wqoQqbq^v2bh zapAam`34Vo_@4K5#p01X$Ims)9gtF*~rPh)N>pcD$3u@4}lz^MDDPTo_puwANNHTTjZ=;AxZdC z2bI>ETZn{eoS;vjN1=cbWu`)n9#LT%u@A9l&>X|r!{aPAdKnqe(h1xJXVfMJymVt3 zZ)Kte(;ocm)FY1uuU}rjmag4=H{E$#x`XYqx6-A3XEfS&G2V>SSj5EAo{m>qG%By0 zGWS6vLpc?)MW+;z17(H~Ece9qm6o!;8_*bch_lR~joRyEsv(!G8AZPg)Yg)(ch7IN!v^(2TvkzX4BJ`pW*&^z482X zc3=RId$t3bqs*FXcm#BpG1Nk{;^W&Ij!|`RDUIKlM@C8amCPCFt2zgCr$Q}z;H$AV z8mSs@p7kOMW|_ARuw0^|Y`GjX8BA@%$Qpn?wuD zA*ey_3A3qf6T$;e=0Bb68DBxpadwn(o=+Vz>ING6k3DlSZQ&%ErK*=a%t8UKl}^`i z$gqa{md1r=x|SN+u3`L)F;l|OkIHMTy$)e{zC2LFH;f(5tMcgllSjkZxjJWrpEY3r zynj_e0!CQHqO!>qtVjV5pbRgpu3#!!v~G?^ik*-<@~>I=D*DwdxLyP;ZBSPb1rvZE z%xkRF^}+qbxz&OLk8Yu0k`SZRSMG-8QTmfgCt9KP;A$I5BDP`-^#{^^{Q^KAtTZgM zJrWzX!S*v;A`dVI6sTR2Qnj9g0U|^#m#O0K2qox76fX(hl8bvR){*+&)PvG#chUk$ zyiA&BgZpCK8YT+nx88dfEd3&$ZHS9`ZcrkuyhkM<1v?urqhT-E0IJxa&6{v)59*m~ zYvCa$h5=)Ofk9tegi}4J)?d!IVu6ASm$8*#YI>G;zw)4d#atrvT3MIQow>-mp}e!t%}oeT z=2pajkePXCqTvWK(-L*Tppg(KPMkDrAg3z3W8R?P$ zv!Ou_5u!zEx+62cwKSuH(y&h19qW~i~eQpp%HiOaZ;x%jO>`t9D@0+^u<23mZjak^MdfHqc={%OY z@DSd4?=}meXMn|4fnje<=0k4|ucSC$38WT$zD!)@(*3Y92o`**+pEirBR5N$g0D+b zw!M})HVZRiEpmANJ&XA!jCed38hJUONA-lWD}n16*{h_cAhAXo{V~Ztn?yZD^pltZ56!i#zeYw@j`eAtt=!YtYef&U=!z=6Hwk_U_663L|Ry%#=yb> zhJo8d9zx`Yw81r41DU&?8*_F%myS>4MWMm3F>dw8SV7*?n9vCD<$0HGJ=+RQe^$8& z1vfrp!&#H8GLL)gIb%N&AmL$WoqU#jJoeeoy&2~q{^d;-1+Ni8V7`ZAlCP+^h4R7; zA)gqZXVq7%y%(x+p(OhKN*pBNf%hIwISFKmUQi>(zM@(XS7ga_nPKAlaBep> z93d(R-ib};f7ih0IDW2Y&Y4#Xx@R0#xpIAl6?v@+q>w!wAdcM!p_`n{e4q>0Q@ny);ibYjDvjOu-@#X$ICwHJqWe3}5;7fB1{2`@avp+e=5eZ0?&f z#Z{5t;xXaA%$0NCJ@x!KnHx7g%h<`JHY7fN@vc|a-N*3H{N^`-f960&MUm$ zD8!CREKUQVugA}wrJw)`1TX|WwXUD{Q27^H+6KSrZtqGHIAn|<>U1g-IX61Az&Vq! zJYOr&e{)AY4UsnRMgw_*7ijc(ZuA=0!o%cIIu-2rYVG^`*Jpt1RHf0=T_{#St1y_1 z&evE_K<_3c9960{y2MBep{o#D#m-r3^~gC+4c_xpPYk9TZ{1GgS1`c2PRGq%s?>b? zD_=}UM~|heSKozKt);~=&$z}HFfC|6hmKxbZV3G_E{qV%9&b_-)U)b)-b+KzwLXCG z-B&*|@mXUd5}={kI0}3&g$oljXUTI~NiK>N-aMCXexUZqQac8-xAf_&R;r{ zrY7e?2G-)ZJbLUb<3$$buF{L4IZ8h6uwR=*V=M}C`shGvCVGwMD!uiC*O5&qwG?OQ zrLV!t8pH90GhiGjb@Y))-r?Dx|WX)l*jx{2?yf@g_mR|VU z^T3>l`hcEz`kj4J{-9&zCwgs(07{i@#FpKC1PL?RDE>}1T!D;2->bkin5v11 zvh%hc)V!^)Zcn{EL#e8DncXC70gzdlUD`v-w?}hD16D={C1_xhf{Dc-DE;=RZCOAX zvxsnZTyYH4>?mCrm1uzxuY|fZK_T>zV7e}C^LVLwG@Tz9N<-jLl(cK$kko+G1a+=UvTw%Oo&tK*cK-q^4w0 z)MI76bdPJE?xR7P5V4QT7JEC!tF)J5B5c5-xPRdaVk!<)>U29@6J3*XHOGZ{!}!%m z*hc=de3I^=eLJM_mEd4#!Dcoekil#y;Sgz4W{;}(aQ9b_BBj8L)OUwe&%mWhU8Fx! zOs%4l(_yXHz)CP*2cq_;sl2j0$y3$tE7E&!{fJTsa6w}0W;U4&;=PTn8XC%QVa$a+ zk_NI@=b`KV%WX733bz=elULvtXL zv76d)O;|J?)v0$F&DD%c*0Bnlke-NbLIul!RW}#eM_ff}V-2k}>9Ob9Q}^&5B}SUk z`~-L}B=Y$q7wAxU6PG08fgkAtH37%-Bb};g>;U4Q9tZ1i*c{#tIkV^3Mj)FHa1dJg zW-MCIRs6sTcmczb)(f>p=_aAV#f3%A=T2xTYJq~UA7!l_^Cp(6u2yR(bL+_D+sBi! z4n`!f0PL&hEfR*XE_HQUP`-tP1>4fof` z*FJO!^DFoAFK7r&-t(vS=-x{E79aO~{Pem9-C~rS~g@=K0 z_zYiuS9hiZ3wLK|H_#j7cJ8*B{b^XzhdTkr<5b))=j++hs`I(hq*sMy<5-~PD97>O zS<<%8bp5qT{F1zruf})z2bztG;yP{=r#5cR&pt1X8()wQDF=Mdd#crD<6O|W6s;?a*k5n)2LW_jn6mmej3){+(| z?5`KWmh#(pDga(^78omhrK`O&-MldgpT_e74QydO$!E+~7W?DA*=67{kar-9n9~;SSG{3ct?S}VIIo_HPD;Wx;ssF-uJOL!%7Ci>Il{-d>RId0fBk3E=RW&cFq&)W)xY{4^(Swn`KcoJ zBy0gGyGDD4jMmo0v$pWA|2ReCXUD-& zamz~hs3&J`dWte{o9X85JK;gJs>L#)|GQL!$W>9Tfam#4q<4&$;j$<2Oe&Y81KKkW z_oHEU(qxH({FIW~!M)BEe8-`C9-%X2j%VKWcfa)Xl&s8x%#xoegSEz`BYwIfWEZ29D^@$!i{MB&s=^wb%4k1 zhCU8RSQ?ucOJm6UW$;)w3(_KMYX`4{CA7J>9Q2L&rn_;_88HIh~JUMVbm3hp` zI>7P>)f|d&Yjc0<>pF(Hgn)(sjawAKGen_XGJ@&=g0=Nw8dA&8?3qwpd0f?gGch>& zD&x97fa%CN1X@`%-!`K%V#g_hE>BT%!x93`II*}sE7PL`$LN|xad2GBTU@(Fxf2(? zk(!PQC$R_YVVJZE<|1HkO(`>hovZ(Qq$AD~rCZsLurV2Cy`wJ`ztQ%h;J zDqW$}g0McMrA1mOhHJMFnPCy|qKCzfQm&+K6B&zwKV0~{E90!fO}L9B~%#JTqU3VVJ*#4(^Pjabc-pNWtb`eu{>WxuHiWMK_lzA7lpk<-B{azL1i49E9nWT9y2N`a{?(;+R2`63KMNl_tTzTT^U zpg&#y)H6JHEltf#hVmb-51A3MT-P=51~NyF3B(}t%M6Ghj1Z<8%({4v9vh|T@&M&3 zdg%hv&HCS^28ne+Y2_F*1$N>sAk37rl3d9MKg@4js%ld1alMdTX6>k81)7ifsGXy7{!=-!0A+y z)wPaN{m3V~)5vMUj+8g>eBS?s7y%;+RDuzv{S4&lOG(^tRpHGsA) z>5ZSf0lkuN!2C_r=xFaIoPi-r{lC@43?J2r%e0Y&|Umk z&@76LhO%_(IAabjT>Rnh;J^IpJMr_w&s4EpU(Z2&>XyX1dTyjK@2_IQlJFVVvwz-0 zeh>z(U*nZ2@!1olaIXpe%Fm7`FhPEw`}invLfKEU+{}pc48?c( zVFYj=p5=dGM8tPgO?dHKc?1^(4~Q1-glKgTRA6LoT6P0Rfwdp)pl8RqKF($v*wRLv z3Ef~Hj~p3=9-w2CfEM<72_sU4(g!$q>CCQLAA9s7;YRLBNJ@iT9O%gCXgE6l_}}~q z-?1zm0Bz)+JXyKr__jIw@+eDzJS_L4(j4Iz6l>?EOJDq>$wxX!l zj;+C=fpp~P5%9LN@E4RJjMQb^p6|c?A^dA0wV+6u*oKtjEJDwO-Yf7|&drm7sH>E9 zE@+r+!#{D=Zy_^Oj<#rxzBEs_z*!RLQ1-S7g|<*%S@&t_a0}z8F&(9gQ)@Sw)8?kw!;Oq~7knIC~$wavL}T@|N-~I*619(!IP{ zS-MNITYGygN)XstjH#FY-q+L5e(7geqg;CR)xSm&+~$n7P%pYCeeCk%a4Z}g)K<1Z zjEZ@bzT({+6(4e74_B8+Ji|#4iD>ZC1L(rvgJ)v!__Iz0=`f5T)+WZod2_9$YhUu? z_?f+uS9`|z3k)u`W&2nQRv-^n(au$0|JvVyUo@nTKDvP+RZffJ8)*?2_s**FL3eg_ zYVCs_;n$yd@)^#5E*zWNIBY!EhQlhU&X73;pbZt<2mgfwg7wmQvj@H8=<2GI)dU5* zvsw~I+Hq{5W1z5_V90Hw)20J0zG65BHF{ zjN2SOV%BjLdw7TwK&P6p2kT)5zu@d@NO-LxczDvPI{^ z=&K25K6KDnGu%=;!VnBK>i7|2(@e;vju1sNwMzHduL4PU4qn-!5g4F-?xk_+ikRG> zpGCJeg=~A{-7D$l6vm2pU7o2! zKz-rF$z$pBFFl*?-@nWFcGEh>fsQ+Iy*22URbn9aD8m!XpTnX#?g6f4K*nY5EE5?C z49X=ChZ3z0gK$ykI7D6Ty-Zwhm_YVTHX6WxFCINg#S{TDZ;~m&)Bu1DftVRYj&9M3 zWFDp73||y7GEt$B5K8&_9VSm3QvXxYpk8Qg#Xwr)=R3r1wgKr!1|DOHtXJ2IMQgzG zF8eNUZ&X?~4P}S-RVs|hz32a28r6*1&V?AYvAs|DyOh z&)h-5+AG=i5oU)FQQ5ZiiZvP60hfEIExz6WW7f;h)u~qI$f5J_7&j^a@;Bvsl`JQ_ zUsOO;9)3HfM5!RxHkbiI4^IK=76-@75D15L&SO#JL0A`Py=>-_annWS(N0ECWd+t8 zV0_CQ8IjRjyst+_=8iF7@nV^Yv?lA8jU=YRGWcqBLwfKA)T zG!jth8im&kg~o_YHH^@GgkS;;%-5kk_6kF>p3LkP6v5%aVJ4H+xLl?N-0eHFp&WT2 z%_6FW=`@jzU5!E91{ipjvNY0-HjKu#F;0!=gHj9UP{yF*K`fC4u+Aq_2_xYyp0?xt zC!*WV67fM6&kn0wHKjGl8Bf&gagTIr=L z@%#XJb1L+_>)|pUDQ2e~WA`N6vnp38517zRzRPfVhKO85##`*GW_q&H?8)oONuF&xd#nD8! z+4-5YwmgF;lQJ&+tS1|e&$=K-%r}wu@?G0)j~+XgHs3=ou-BeBWz6X_NBJD*7C5~; z{~|PTq@09j)ug}pv%ge_KpqiIW*_w8SK@T@drTwN)Q*#Z1K_C|yB0T>fm+tcrX`+L zYY^^(A*HHa##^A(o0~LimkaA$=aCxw}+kAo{J*F;`km!@FHz ztfV;%bP|pdwH@n^h>iy)9^mjr&h_OdhlsNGqz;th|M;D6F=mqVsJxJGJ|9hY z#Ov8XPGiu>MUiOv_fDRY#52H9Ys+GUTYdIp&N} zlY>qMzz;3Z{p|XWUrFaskOz(prseHb9B&|1_)Y`ou8t(ZcdmaJncy1G8ptKaGc&$| zQwm2fI^G7=M076gf~;69E~Wk>1F&TAkDJhy^`uDv#rToNH30VE8REXApKhFR%8=Q6 zduiY_kqqQm&qWa$IGjQTN%+hiT~U9OyfzGa2Dm!0mfA>~vBX@Nv+Q%?>~VJN>}sjN zPzC%B>;4wX|7$;dHJp>Yg~J_Olh?v`jHEKCh)!O+axHz~=U+&tPn=IbdHqdvbsX<2 z(j3=;G0*+nFZ|5Q4%*F<62ptnm-YIE2Zbrg90UY^hW(@ahwF$x_mFqxm;E)RDL9#=!!zSihIGC#K* zj91G|X3rT5=0`)g)KH6{hrsk2TbNv?C?nG&E<9ilMJ+(U-i%Fk9x8?^s(Nw+kTwG1 zIzgz|0pivX&pvPy4GpR| zGdG2;59e2j$CJX#P*`}5p^Ff_5Tsc@dfr+nM)Tte7}N&3CerAr*MTy3^Nw z>1*lAmG{#tul^vEQbT-YEboe{1=a*6NNdJbN(F2KZ;{eH%pML8r9w>xsv>Sow3!x$ zY!8aKhYDaOo>)sI6<}~%)PU173+b^Y&YM4c=a8fnL!Y*H4TXih^pxt!(9rXl(w&Fd zt}O7uSdR=i%i4#OdbmDjkpT{eiKsP19aw~k$qh$Fj;4{L$H+cD64}qQ^Gie&CeqZ@ zeT<;R7>|2oH>&`>?&3K|$kqc|_3+yiqXvb)f1n2sRBu@Jjl-n~-;%J>j_jNw6+vKeYBaw?x@rg?77atb-j*;)hov z6gxlmkF*FKM_8L>+lASig||ay*9zr4{MQhwJ{8f*DjSrHqJEGnrKwf zcbpo#`(zi=V|^FzK8p4Rk+E7ppeav}d+NA30UjIshQAhJOQ>r4{?R} zk#C@wJkWT;k-?Wk7QpbL=)7z6;HA;QSNL`&E99TyX@=&cm-t>>uX3Ns702ZFVlI9^ zUVP5aPQiaOZz75%Z}>;vEN!_i!K+=fh!p#9){wh}LBk~Dto!+c`~FmZu~uP($CLTg z3nU!x8}rS^;u_g<*n4Vld0%UJYOq`4&XOi3eXZdwwd9mnmT+Xq_uGjKb#?a={TWDS zPM@IDWN$h$IK;WxO_P(8>EdH2;Ul9cIyfUV47p!NR}ba%>eBbV_kH9Ht=TE=trBW< zQrQvSedP(~t%L{I@7c*L#>aaqaz?MJ@LrZJiN{ziKa_))>~NMf=A)H8_i}Aqd0`>G zXYEgE$>c0EVU>F(L$TP(SssTJv#9K z4I$zPpSKvh=Oc_o*4b=L4a2#~#gHl1(pp=f{VVXv4WK{MfT~yL@eT_wA~Im4&8W2m zVU5a7V25wN`5xn~NaGV9a%QQUIZVbs2KeNSIrsrk4H|stu?_jk{+R$VOYKmraGX8; zD8}4S)OTIS2%=s?j89t9GrbBdL}OnVh7J>>rvk%`1j$Q{dMVqDmKX(%iY7ct!z@qv zGP_(h0lnxS=}o5|J(12naXg)W>TJ63+-SP=-05`TnWO3a<L*z zc2&f&YO{w;ywde3`Z_IUKN!cM3Wy&*T5ckVk&a6+H7@&=q6DiNXgM z9yC|MDH#S`oL}_;yy@^qj(YGe*RTx2!ib;=IXY2w(fEoCbbi*c{LCjGPmeruf#j5T z(#-5aI(oLBDi7#cUY5H-9=e8a&j2(KQdtgI2*{1g?n?9 z(VB>)fhH1Pq~8*2BE-|=z_z}g)Y;dSme-e|^8M5|Li83$&FTWMRw7MP_vce9#-4JG zVWV#}qyKQe(NWEe_CFnf5&7nfm61#G0UpD-6Xr+sWz}dN1|$wfBi}{}4{@#rR7yL5 zaYwQe{2U3u_>7{n_9QqcZ{$xp3_P3Cygq>v)lh=l^O@_ao+h6P1BCTa_l-(2o@v%r zo~0x8hu?mU%>Q{3EubA_?Y@bT(6r97+%J6frI)#mHxN$z-~+vh-Ey=#ZA~V=*1^Pe z{FO0czQjW-QF;w?D*V|}lr-0f%{w66SGvzak&%&lpjhBM9IC}FEK=}{2hPWQ&P~#XJ<+DwZBkbz)+#fq@G!J$omf4@Dwm;j z0of({3s`YLptT)^n@CkumFy zQu%Mh9j6D)dVgALVt(~3nXRkm-7F-Tj6&Y)H40UDgaaA&H$zYl7Sq?uU&H#K@ zX$X|h;ZV?*%9kVty=8ovbL4lj-!)u4+veS9xXSdlH;Ed;!#H>m8+I4kJLtzu&Pl+sHXEo$%>_S68?KyXZ`0I}Xx#=Yfjwnib z1@9`%)1uMbQv`+K%*n|^A`2Q0&?oeq1N+)P+zEZ}qOfpg;fr}-1~D%bUJV)<_tHFF z4abcEVZ6n8ZlJAncksMT>fd=D?ToWU)Z@wrAF@KO1>eOP;Gr~M=IH40G%|7&rGeI> zc=o>V#TWUvkgi?7h1ZSJXYfvQemr|N;?R&ade=z-?G}VgN5e3XfkakwN9f@SH$%Uc zTGAL$=(sOog!ycKcD%l%L(fEr0oFsFrxEEmRfgTOP-e5?#^<3}Bb8&XSqqK+zqw;( zXm}^#Cp;i%E*{1gocqJ5Lny%f&SQDsbL!YKB}jY1=eyuu=jZY!ZX4HV04rB~=~;9z z3cKJl(4u1t_3lq(9DHIB)bOYw)5#ILAMnV(FP+)#5*So>77p zU@|IZMSkz4k+kJO&d|v%kVH4Co+;5L}bCTHu6qoYYFf5Ax^0S zl%x`+zIN#@R^SW^9~2I{G{2mF{%c=NfBzf5#`%@M7U+t#MwJQp2j@h%B2vX&h+5K- zC&4(R`EXwF+1;x%X?n&eA<^u*7RH2gjrK#I9D4%%ItyM{u?&O`Rv_OZzjmcG_;A_B(HH6R; z;D>h7Tthc8ucwtN0+zFDB=4i%Dh7H}cxg0Prp_Y&3v(+(JIos*hopk)EOhf~rEq=U zNLxDn$YA>TXU?WieBqJw=nH4*fHji3$>cT5-J0bZbla2-tOHF@(*%SJ17K-}{0wp_ zTj3{*3rolZ_^(bQbc`Jwj5;o@y3h_3nI(%?I6kdiEZoX7W5f}tb|~$xtp}PHpJ0?4 zMdy6rtP$#(dGvF1%c4W$3RO|m4LB1joM!E6=$pM=1zhR?hogIFDGKFh7@F}s@2-rb zP4C=fx_bQv5o2n!!q2;hnlQBCgG8|Qr~pz0jUJF|sL~&l3cv8>{(EL@>Fx7(a4uTX z&Rjz0d5FWO0vKtjVi5&hIYXAeXPmuL_E(cj&^I)ItN@nUxsy(uruOdiYT90|OmkBh z@#uP16R}o!ExftJx-XH#XT5ka`|C77unS~BgH-4dt!iyXS4%_>08?PRP->|I{g<_{ zTWM`O`__)U@5E_NhP;rn8XEUl1)+}mz>&j-9un{=tqFO%b77qzU7QIh^*qOED3b#@ z0WGLg=7F3CbLK3%Ql3fwGk+rZ1#4K#!a@#AhZbA7 zDn^Olb8Vem;lN-eGQIeQ-(ny#Qbt!pVOiATv|gU)UYW-Te?BcbTXEtNQlV?k!s;Sn zmVvN1Nh1zM;@!I1PEeTk4A?qw?xd~XnH^8_tK%pd;F~Ze+Uo`(lr@U;uR&-K7@mwu zyrFiy*eU3x5F<%=lg|{64Pb2ZOkAQoJb)qW0jXl}r_9tyNn?DhLe&bIwCpTHCaCFiS@dFCNN=N-)c1QUM{<$+(A}>O5bvtV|S5 z=VOIZGnOf{2<5%vu#wL(0)PkUMRIJyOj^ky`2V_Kc5o?(X&6Od$vz1gKLAjNH~ zZ~?01d7?5sf5Kgj10W^qzA-LrcT5BxNfMBEIvxqjG=lTO6eD%Nm=RZg( zivB)>w^5^VVG2bB0a3+Lp^;-mh@w>^+OdRy z6@)S-16w4<)Hro5RbBLZ$JdfsX(4WnxZogxTuT=xlGiFEEbo)NtYyO(&rO;-h`G=t z_S~}tS%CR<5UqL{_c*yG!iW2zz@pVHEMfdDqqW8M7ryWXtan4{0|dtR-+vE93>e+3 zgX#3ch+$mdO9S0rv=G$PQc8d}yIXW?(etB%rJ_|Pd5MPv^tDol|#D#al6vq?=Ven8|UBI3VE%$KY2(|3d@;p7( zFGH88LEG6;0%n0%X%w~Yw($rQ5d2Fdf(_wDEmGKd5et%I7#3l!kqVbDU7xG$*_1*C zMh8QkR8maTyf-#Z%hG!Rhr0By-Mq#9C|)oNOMaBG9)_R#cVY@}QB> zk_GC~s^}C@3T-=C!t$0!sa%)w&x#kCDwcC-*y!1|q^ZfXm-Z@|Rt zE@csb5R|1U?78&CSXFUVN*A>@J!DUgv$ANyp`b0}@WTk@Ibi6hxdG4OUs%mK1-$~n zFb)_^=K12X0~+Ko8RG^A{EUJ9;n)H%6uJ-F16#O2I#~^@t23Fj7#4pn4z?r>c<0=et+l8ghMI=(4KPY8U0ztLV2(2Vn1{g0xXuN#!X*7ur7xSCt8!rRjN4;g z{0jc2H8S&dc%vWk9N;!9^T9y)=j9_R77Q43bGU5H*~p5{hH$$3)^yaO-k{J!$*@Jj z3vyM9)Rvx0*RS6QZ_pkF4GU8&5~8O;rv6A6>^2g#cOewOWAxnC_rXud!|5C74TW`j^nUD_;k{ZAi9kqKKfaoJ`r+Nc>MR0piRU+B zh%h|0k=Sl!gGNdU86PkH8$z>%x6Y6jZUTqdYiJ<{5xmW7d~bM(9>9I@Im)}K;2`9k zIC+`^4mU$sUs}SzA)Fy>-nW(_YsupoFLh%0X&{FX{%kiW;Yv8OM)Kt!VzI@5z(d#? z1X9lO)_W3}lm^e>fuod}!ciAxRZ4VJTw7mA|MA!VL%Q+tohZ8B1n=D<^h1xw?Bslu zOj?}5GEErlwad2x-^D-iUgJ^&mi~I`4L|b8`Si8to(+(ko}5f~XXn$amoBBTxx3)6 zh7Sx#8(9$yEw+1%QLBtW;;(s6HFfZ%Em{e)p85vh3yVubv7~e}r!lno~n=r9!XF zx{Oe6-PaoUdo9*=VN$-Lv2T}xB?WTJ+K*spMc`f_v}Ol*R{`$sZj12PiLtvZ3t9?( zsVJ#bRv6n3wpGZK%Cqh5@ZtK6boQYKLgv%B=5^+?C?}MtPYvP&b1VZnMC-D3xJ|MZ z4wvuKr7hxo0DI=2!!Y^Ls|#u91PRN?P8vPs+4~fBIeqdWlmN(O#$zQ>SJuo{dDW* z2kD)UUQajgz5`KOq2GF)iF1&el7VQ&jBbHA$ImZF0iCHP843V=nt>G|&aU%z8 zO}DUy19(;ku{r|a5*s6f;9TP+pem>v&#dNbK163v3&rXw;!aH^ErbOQPbOp)7(FIl zZ<1Rhf{u)-He&r16p1$mKp+vZU+!ZRn`U1GAQ`Q$DJw7)5F~ZyXz4L=mTAP{>E^N~ zX$Li*x==Q>xGd4`ZLR+4W zZ$x8~94@m{pixk#eQpD}5eki1l1XCMKy=DQsye zaW2BJN@;{W01qmrhKC6gj$t_2I^k&RR%@cJCm+Z96q6$1mV>piLOtuqS*MLKn_AAR z*xVXn5hh6H32$gDl3d?&ie2r82x1in3_Q208gn5eT2$Di(DUj%l|QhYwPQ6i>|t(( zy1^t4YfPm~cvY|!Zpz@DRyN($)AzX^R4BtVgrCg_Puxr66MCoce88M1sWP@o zm7tDx+~@#Rtzi`~-$5FoNKfuh&judboaD~EdkD@Fdr$zSRwCejhBpYrt>Y}h-RmB+ zo-oL8zR^Y(eD;i;r}LLa`F&W$S!FN}?i0#=@kxv=^;R*D-<~sPpTjEVGeH*MU1rRU zUMDgfyu=UbpX(0K5%0a$>msbt_k5-Tppe8{uJe8^aDqg*=@wkMG4%RC@bxqlR#~$*LxdA4I?-N_UfHGIaqJL!g7zvLf;gJ@W&OLMr zPeE5CSz3i=9!u&1Mz?tcIhDT_X=kaIy>ox6(nptxxyCALNTd~~T(^7Zv$Yy(m1MWv z+Bp9LillpMa_c$;wWaMDLjfMRtoJD4MeQUe@QTndgcHL#3{z|AYK3QHVKX+;P{>u$ z&*NPvcJyI%gJ)R0oujE?)7|%83Lo@zcSi*+&+4JiJPhv9$rC|M8WBb=!UZwe@1z zrXYd&4IF=1dwT{5&m?Tb{hYpiAKZqxGaYlo^uW808jO-EP!}G2FdZlHzlSiH+t+C? zPDPp5U!}a?la%J(5TysX>n>j zElq8wUwYveS?_kbacwMp{N6q2IL0hI&6vg`EYZ%JhIn=Lbf%%xcx5{YF@?V7yK2(W z3w^1!qydk5%&cn@f+bxpBCp8Dtm&yoAVPwv!XyS664a=%O_)=Tg16TKYZ?Xh((GW} z-ruEbXdnQ44bQL{LkXCw#gJDaELS>Y6)1UXsZgK^MEWekKHtMYzV^*aJM!WxT3PEfp}XXhyPTnN+@QSxgn@@l|(Vn z-e7T)8X`6-B&p8>XVuIxkI_^wtht>7$55<*d6RkXT_@^-vx7qL1J8r^F<|5jNZM{exWJ&cZd;Es^xdT3*3S4X;d@yYbm zr=H?`Z&EB^0q+47DVLCWfq9dx5MY>5c!!yqjYSND6wggS z&Jih6U~L-7jdl=*U;M^b)6HwwP^@SwMd)gwj>cKgjNDgV{PN3T;sd(F9VB%WsrVNM zs^EYC5T9`fPEPCGCocsVJ{LQT`>IFyX(@E2mWJ-s(%4DoiEfG^cZQp@87uv2Wj0;8 z@oFTGIA+lDL~rknDH3-l5a_YMV<9LJ22f9&pH+MAUZEli?R9;wE>B;!YjndT_B;|K z!$m{Y6x*<_Kma2IfolxA(S7N9oF_7Tv|6-6kM1km|4BGymRd**BNF(x3A)qDDPyy= zo1si={?JFe8YFRV+}zCi$@nr15YAFhSs009qN!rt-8Y6J5Qn)kwoF~CvPpsBg#rk0 z#&(cYjlgfUk(0-daz7PKW+?}Tg;B;UVz@TDeRyV8wJEl4<5{SNh@s)*YRaKC8zvP5 zkuf)L6ZmT{{(}E(9czaHhhiea z@R^8YA@F8Okkvp87@qxAfE0ESs$qbhfqUT~?rj~4To&6dtf&AnPrqkK3+f&nImWm( z>Gd~W$GVMMm~fe>Gy^e|;OR|roE%DBAR~vHm4?G=QQgEG^jg_68yF)VTuY9Xq1+Q; zQ|qY7;W8R~r{~Ps;Lm~z4Rcd502?7ToFR&{&rpi7M4%R8EvYgvs6oJ><5_LQ5QRdx zKgqM{XpKZa&(LHpyPfQCztF2NDGs}%;UWg6Rgc6m6(W-z6#(`%0&DUyo;Wv;eiTACZUT@hr+R; z87r$OmUG@g*aOLphv9ziYa~I(Cdh0uAy+sH8pYmf@rh6so+#YI8lg{|M`(Wvfp{o5 z5IrW+fZ-t&MZ9B2heu)#!m%{T>Q3j+pJ)F0G&V7o=15>%Lz!~Eo`oR@!cxQ;GcOfn z>4}PjG{C$RnL`N2z_B3U;f3W|z~>L;V9(6C^VZYkZ29qk<758^%mep3?71-RUhTVQ zB9z?rlNZ$%0aV^jATWO#3@%u<>=5~c?=bR}c&1$-6| z$^_SD=MfY9WS*`$GpfWaz(1|N3N1W@|2P)*K-$QQIr(8IB|P8xc>cWMdwv384%ZR4 zd)L9;+#4GwIbeNpha-D|>yU2vn_q4bzq0N`^35z4#{81b?g2vgidgxnG&Vj?a`1gZ z)w0#o}wAGYWN9 zTd_e-_Yvm9c*6Va)MV-$=wgiebmGDTsiC7eLQ~oXyGY#Zg|Q+?V>lZ)HIPOg8c9b^ zjiiwWhSJcPfz)-pJ#`*wO}(dDQ`?co)PII5S=}{MIO$1Ge(D@8Q}JMKPo=ps9+c&U zRE?**wU0viZN%B5e8UFuD3Z7j4v&yL`j7sPzXfig%#*x1hjl)q(X5+wXu$A{3MC42 zVBAQXN)!|R zGdF%e=)SOc2sN3)YgqAidWXX7S3mq9ojP_Dul-DV?VT$TqOpioetv!lBSmW(raf{6 z^Q{$Y?4^Pivzt)Euf6nq8k-s;p8p=9ud@+23^dQ-ILHennos|gB1bLCvrae_C09M!w=wb}3E$Qu%CfPBoLbzA7xBtb*UVApB zEEa(CFdt1=-y}55(#TdGl1^l($sG7kxYcM-*9>)rx2Qa*B$P>@)}W<<%nqT-Y|@V+ z(G-t<>QQj#4kP0|MS&4_BwyydWma(Ln}#i99=lZ?6F({!Tthr77gUl^Lo?qMS*yqzB?tm<;({hkT<?w};vf|oX4x^a5g~pR8ONd<{kK?~{5yAu&+wME04O%ym4$r=nt z@^(vwx-arD6&pQW+b9QC{_)&$OsYDr$3xCNA#bASKKb;;^vp9)QK_sy$ed%|#apFU zz0^aK0%V5VPrvw6FAL+DC4>z^pa^UV+=4-c@C7pxZ8#vc$w*F8fMzZt(~rL#D@Xje zp+b$h?o_JnO{Ka%fRK(2bk4ba>(w;BG|9%H?XfURan_UBX{bn8IN3QPDvHf+JeF^@ z<&~_5R+=!G)$*+Nf>@Y>z|jFpS63ES zk2@7h@G7A=4Ml_(JSn>cin0;r;JmerxmbG}C`{Wf*-jt2*+pDkc53&FbCl>r(E8$Y zUnIG%GhoGd)om(8@L&i6RtAKSJn?x|2LviB{Y7#W$6+&WRNjP>gvFn!?lOjFl*f_1nctLfgx%A zH&F8$H;piWB6^4V*wT^>2=Uf7eM#_~xR0*iH7V3v1@1Tz`C~IAJ9v zm7>FOBAn9IF_>5y)}&vV z`H5qvfm3UK1G}CBXS+hUN@oZ0`QWJ?w6xOdv5A(`z;tGe&`6+=g9Vp4cs`Lg!0Ieb zokLJ_NhS+ds7eDH1WuqWSV8r6S7Xu600z7Ort8V=B1bnBdwSsoEEWaP`Wg>CeKx!X zO(^_!ZqyqaiNgq-(S8?PUmz@KY8+44;tIbf)7U*?_U}#*B1O0x{9ub@RP(R)fp_!e z7BHwh@yNr3*sZ6DMLI4*i%*?Dkw(spq?u*%01-eHGNaQVdtHX6{=;ASoA4eBHI< z{it`;~wC_tX3DzJ_p*QU)JY_?KSj39wfC7Q&)l855b6Jtpx2z46YwX&!}Tno_VO z44?9rMUus*s6Mlcfun!8ivlIkL_$xk=2yay(nfxWc}WL=6mZf)TlkLtrZjr@4qXW^ zpg`856pRHOv*&=&Y&mmzob+)GuYtd}>AtmxcXnxJBi;FM3WFL2A}BE#RmeC?6;Z36 z8Rp_E>lp+8uZ3R$7mehtwIWkUf6Px~H^EPS;71z&R%y*{x6T~$k_H@=_uUAEfxoja z1ro8{+apJhrY?*^Mlc9ZMouV426h9B!nP=>K}k(&OQ}(d*sRO zP|JUj^If4@;n`<~*#|vBg#U~K(WvmC}k-z-JpHFsx z_r5Ylr9z+pZ)lBi$dz3ZYj$n;K^!>Nj|zhOE`p85HC;j0TdgPV3m5@V(Fp5`2r%tI z{M7ihp4Fq>mD(LoP{r*JkwjLHvDRtD+77WYMvdd<`@Qs&7hg)BdGe>|4gNv8d3!uH zwjB$CZ}JX7T5K$CBZLwY`l3A@!m7ZsjdFrm5^Gh1l=;iVv;+cUXlM{g{u%46>&SRX zw@=~6kSc5JM$9ey=|2Udj7c(U_Z>iM1W`&HkRbtMV`J&oozV!jP~-G@TDQG7k45LP zb5F;<-MsfU&m>HQaF1nrqFcdb4-4uV$fFg&a$OQQ!$j1mo2j2`BClFw4NBVK8Asz9 z*2R_vvt=oPm|>;kyn(EM1>qqWHSRFyEH~Lfu!JrH{2(NI5@T}eHuWQ9xzZKxwz<$g1BKkF?aSpk_WMdZ{X_IB4fMWP6Tl)lUUx82?bMDpd1<+ z!Xkx2i#09amiW`UPAtqVR9R|mjP{(3#04s3i|01TX~G)SaA47Ir4(W-LVtH3R!-*X z_*&;(!~7gg&hWm?E#G0^&D()lQc|tFi9(EozDc>&eGqbou!l_wG4BCK51&1n`a7F( zzh7f*D=j>^UBvoHeoLtxg$Z2P02WLF))KZw;{7Il>7yxK1%Uu7%qH-% zfif_27lmK~#d}f<56qnXG$y|;XH^V@0Tk9oLey&V@4p|!*9t3N`l+8vr%#{8 zE7Ou*`IA@DpZwrYL-=nXCV3yXei=o;-!%v%%?LJCxUItl4J=xy!P2OD1cVDcath-L zx1p;lpoy>ta}vt@+)&A|k^*tQf->`Y_5xN}r?lKPIi5eljY?>|KM;f!aYK;i#_aZt zH)Nr7C{KjdNg|{(!fFWP?4$S-N(%4YWBwO3K~IMJ##2}h>p-cJ$;SGeCEu$Mc)ib! zYk0EPhZn|m^0_ac<-ZK8T%q%DevT`>^cwadCN5*;Z$3Z7E58Wuhc`MF-^EyQ1K+tG z=fOW;(nrIS4!z-l&j(|>Pd=}KWcOC^r?Z{f=XeKINO>T_03m`f>tYcBi?j56#icsS zfg((sS78Ma#7wSfgx-xqpWuUp32CL4E?S*M!MBxu6eum*e8cKcnM9?1zMT-)5qeaH^B*Ik{a1ua79$~gGR`(-TR z`{XligWHc@Je#O55S3as;AIN79AEU{>FDTYe~3OP8q>_YXwW2l>xq5swYP{oXioq5 z?|nNx@Zj0>=38%uqNBk^xg>Ce-BiJ=Ewm8h{ZNFM*3_0zgb2Bz(x6I%ioI3mEU9gL zeFH*pdrv++e)8e;yMOq71VZE!ybcOm#`4=Tc#)962KMdIr|2bsEMjB=gAp1ulrBDg zFJb&?Idg_@+ z)AGh#dgSaQgt6?Ww+TJvm^mYrU1^WJU6_+ziO&eTz?AdITL3>fH<(f_X+*A?MSIH^?*7lcC361=dIa!&U1Mn2RW? z@~0K}TZwSzufF(vdh^Xo@IyVLLZrrIc{&pU_990H{w|(JE~(lJa*7@g6$HJRq12&h z3nwacXFg4NY7(w3FfwMVUzDmUaG?$bS!JhMc>;c;kX=J=CRQpuXBtb{806+vyfGRj z<<$|&$sTLKj;IfK&xNNSA_VA(G%|DoW6%)cqDzF0P9lqb3@lNCwsn9ob_$Ikt4ZhT znux0D?!w4RJ_sJBi0@~<(h?ih*}Tw_vWA*QWiY0(gCT8)u(cfwZSJk~2DHZDPCf_c zv_@2fMrIFAqe2&6{M^@m_Jx--Vy!^0RVx!yBqshHE_N1e0*=hgLA7=OhCzTNhM8pr zVBpj2$M=V?fD-8mK z|6l*-Z&MeSWMC}%%gfUUQggv{YN&XkMZdfS5o0a(8!(B`83+fETGv;BP7Ru~ z;@QF-3<}^10nU8@FxAvVKFh^%1BBl&CX@q&(df(0SSnxj2zPn|R3L0TcK7aG0LdMO z@bVnXOPQzO{UVX^lpkDv3j$gPG1^IEx2X_Ah{W7I%G6OZZJknyTBr07SaaB}zh>vJ zub|!DwMqes62w8nd7L*uFLTK(0$S=&s}VFa;UGIv=P6+kqC`0vepH00)8b@JWQ8S# zU_3J1Z#l9#2;~Y4XN3;3mV#TSN|3zUE_ykH-klASJFysOeO;kcsAUu@7(@1PaXE)F z6%6j&zL&0DyUo5S*tka!9QIu$AS{;<3}HlAl@R5L4HI_p_k{K>XnRN2``7oU7OT-<<4X!a6AH(G*hWf~Pc^8vakL0a1kj)N2`2MV0lBhJyEy=AaD zHFi^aw1eg_SaQs95#|jATqSn-J~6J#5E8v^v(uwA^BQNb@PN^UaGUXsTQ<2&*Rb@U zK*gRNs()=q6$<7;NZd0|KAKJ*8K#fH+c3o$6#S~xL#SAZY6(^@Xzr!L2Bn2dAiQp7 zD+aAlMs#}(Ts$DCpcy#DnXn!$Q|^=DGd^>F3x#2ciW8%RsZA}zgmUT5^_3u&Ihd}Y zOOP>?9_R#?o=C3Z%%y2fSjQBk4qq|~G@mP3P-%)|BU0oRvba^i|=-?>xW5x&OE_;q?x#(BgS8Sggu5MICG}0Po`&L7$kD z_{qnS-@x5odx*1+7Gv^U24oLZ=%f6PS{wuadAXu4tHYVOiX|Rf;Hx0Zy*cO_tLQud~JuJ01%wR;cs(Rg#sf z=xbApfe4I^wR1u2OIv3L0-H)Q!n!miy}bW_c{sY^)Y^>A)tr6(!|UnBs~@GAtK+Fm ze5kErCHTe~c%4=tH@!$)Vn<&~OxO~3N`@xD6_9pLoIaJl^{u~?9z6S4`f!mX2^cfD z%?7D4YDK}c$s9_=@pa>h?da}|@1|+CGCn>_oH2xxgVwG2gI8Y-?aQF=Afg^96A%z8 zFxe+Q`+S&S4esA0JNu3A{#j%c?-C5YPl>$>=3&b{>Eb9-CZ-xQ@1MW*I-1KCL}V~k z5!Y3ZweB2R#SL7->$p5j>MP)8be&4xx@D)P#?#wByab`pTwY6sfZ5bbDJYXJ){vxi z-)@q;(9uWI7|9Bi1v;T&`qjeYhHoz(l7qu9?-;YK#V4knIQ z6R?MY5R+D-KB>~LO&mm6;Z-;=PB;fM*`eq@@ zQIi~^u!)5ha)vdH$|*1oiGvm@g}Q2j=d~1uW{c#nW+0;4VayoTHQdM&U)^f?Ml7Bc z%F_`GrG-x`vJ!qEE^FEaE(|xwAt*@Hw-F$Bv633Vze}QnL?y~Za;>2o3KLeJ8N5Ky zeJ5HGH940ifxUaUd-7zzV`X(MdE$v}TxIg2%>?!dFUeOEm@X~B%2viAyG<qmMqGUU=aJN-6<&;8iP0D0c1C0;FrMhJ#a}Sf+OPI>Mx9supH8 zeGA#q`HqIvcZ{Z8X!yoED;P*TO=R=l+*i~H;3(c0R9=YYKdH0oY{abqY-YCknL&`@ zmt_M5BFz#(?EwyN4C)vV>HiLZGycxr6V993YWp#55W|C%utMOqF*mjcRgzczlEP`; zZKWK50ag13e|bB=QboBJsiXcF=T<-h6a+(JWiurlUM#>W-C4g;vnexAZ@>Se^hba3 zy$Ecy5c?kL@ILjvl=^%Z)hFX&_nc#+N5SzJcn5mJ3 zw~(?bv~}EKzlZvVc@E3d=Ot3A>Z3YGk!4K<&Y2a28qY%fEr@U_bF>~Ndw_~;V7o!3 z!PVHKsM{5yLbeafnEiLrU@G&BU#7r2S>MYG8$lopPScbVo!eMTn95)zN;-CEGx0;_ zVc~4E>-49o(MUj?oA9SrqK{GJC?HX4PHUoGtZ;H@dWaxtuCD-JVM2i8X!595J3-x6 zAk>Fy0|{Lc7Ux$AsK))ENiMiOdk|&?XqF8k7@q5aMPMK}DRY+dx=HSnMn-*wN>OI| zDeZ;!g?Cu@fknl4{9!I;Z5!(b*u;Fem1jY8;Dq}0Jxe7l1A>>>1KiRSOMaL#BM(qE zht6`C(<%^ciL3jf77?-=13qx>q3HIqv`Xh1O$QKf;Z(DaT1XI3<{Pdj=D)Lxq)-E~ z!fX$8p*i?B?lQDzn}cXtQtxu@##EUNuXHZl7KjBaP;$cJ_}j*-AaMvVOT^GE5O4SH zo0n5PjT8DH@Oe!8S8ftZfOa!+Z8_Z~JL~q<1%5B%o29nx63GA(JQ$HREj5D}8zX+p zD1gj3C*uJPCKfJoXtQlieQACM4H(nhh`GlccF>x|h4)gmf{?Z@e`IWrwN+ooHuIJ@Ht&cKZr2j4J~ZmVW-vf8n#~ zx#ylsmnaqTo4@s2>HYUEhhSp~Fd2G(UvK=LoVo|inMH$Yz>G?%GEBfF${01(5m4R1 zHSF(RteZUq26biWmMQ~F0+{6_bX$0UVVz-LLK+39#Eb05OJVQK+wokFf|&GMhNw9) zbN_K2pJk?fXgOfyKKtXdSbn_3?MOJ{{4I_x56JO+Mm?AVb3PCpyg`A;S0v={c%LC= zI7W;Q?~$%JAGr{3c^p(7oPjKa;NjCtzsr z>@RJI>-^q_`4_)3-sLrZ;x2Lle>jwQE+w=cpd0BgUo4wHnUiDlFQj|#V`h!G?dodC zpoG^%3@C7BHz0S)I45Iz+-vDfNa@tNR2#72i_hQ0dbC2vNE4<)sNh*En3dgc`4cA> zLh+%6E|vm)iMh#O_;Fl?DFy3DB!Z`lx5mycQU1nQgc3Ze1?|<6ZU!7nOWkYF3(#QB z`)Ph+KHa)}hkCoy@F#c$!fXw+5i%L&qT~fWPX)j=m5GmqW5(Y_tPItZ#?I7)Jqj@JM?7`X9fRe&g4FJH7qp$7x|~IX(T< z)9KpvkJGKY_i(qOq`=!-yMd3IEXl#WM6r*t6=n@#5XTSCkfRoqXf|{o0g`sjxFy{$ z`JH>JP#+d7X!j;oE9rkbv7=?`p@z16fFFSitbrwBb)^P4a);%h^6*BJM$~MTm2AhW zi$SE!6U$^H220^cqLe4)8{$ko*{f~cE$OK*JjoudK&wf(Q?_Cs3b@s*zp8skq z%ffAjOryzsgXS{Zh9Q6p(k^3?bp=4JSOYvBd7%#c5*8wX2|lN66ZM>VPGlg%*A&qE zhH&McKvC+%os30!V*$AdD^y=o4@s-ky_c`B2It|wh!bYN9A2eL8N+U4#^Q93w5Kn= z_*{DL?GJIOwWrTM|4gh|w~#W{A~17*W}Y}XGq9V>4&erm8PjU@%;18r$?W>80E(dZwTu>I5c~AoH9)llqc*-by&O_ zIJ^E+#O@AvgKI63V7brrD$1aB$Q#@2%{Ges<`yOMzKR0iCWc83FhwE=6fvRbk6Ixp*rKe}C=eo}ia-w2v;@3prV|9OUd4e8(hufz zmN8#W&yf+qv~2V0Lj5O$FdNa9w&8PT&6=pgFJ>?%=6{hWbZv%Gh}4p>Hi41?k*f|< zp_*-GUeju%A4)DwteEi0_6QM2r`O{%9Sxo z60Eal&Zl!{F0%IsZJ3!#^%TCr4~jDb+_H+WnD zWf?nnp7pKMO1hfwxWU5%Xkd5>o(e&X?LSXc!AN{i7^*;oYVlu-dnyJ7_Z~a-6pa_^ z)5PdaidoOb`atq>?-39 zcx4vCJBnu=3_OqETp&JxuPHU4s20Ewc;=imQQAL4rG8yFyQlU8^RLsD>+hv^uY7>t-&iS?3|tSm zPrv)!?;_kUaQ0a(;xN54{;{JYslBZorHZzn6d7+|zY4@X3{c~)L{o@7t)qu}k8%d| zvB%_Vz-_oFfK8i@R5&N~O)YfdquGC9*5;3(&vIK}OCdN++`yY~Bt3HOBCIf?;4FO% z;Y7IQyD+UYj%|W{@AwKe!nEUwYYMsi7kH|m9%DKWo>8sAK)~?(xL=&{9JE#$M*w~W zzKAy6bUa zEAR`mHl1*8Jjr9=JqO8qyVaR=;?}5`?rGH{TefToJm|H}Mm)>RKtLQB0s| zqP?Sw?p|~0<2T+%ac)a}y~D(<630snPodDp92H0rFpc|UeJc0jK_lS+{eyjk7Lep5 z9rj$9i-&cYZ|1ul3ARR*F@Q}YUG}_<`e1C_y3qoQUcO}V`Uwd1RmKCWIei1iE{FE&qzasjRcH~ zHQvyqE)5UTY_B&7#CYBIUR-{716C;DZle`p$xexNbqJNzK%S#q(06|K`{|YM;ElLX zc5kPKkaFszc4%WWt$Z6$z@e=}uShTC^jf+Mhg*JB4dboytRT&LjSH|LpEdy5>8@jne5|-cX6PsF%r$PB6 z<~VCk9VbKe)943#VRZ!9T>+zfC}B^ zjPDj071RUA08Z&h-%(to$Y0aAdN&ZDEl=cODEM=O2n8HwxGfBd2YeD^C`%b5r%P8D z5D$!L{2Sl;8A=aPIb#R@fxNMXk+QezBzzAK3OHh{t%?h`F(vNh{506P4^b+JCmXlg z(3(%b@XOyM+@&hL39L0@b${T@X<&6Qc)!VnwZMc*hOx8*BgfJ&ed}+gySG0Ajx}Ji z-_&B#z_-7HVH@~T`8MQ}qeOqUQGfxP zbr&dDi_BUZqF^e3ZCL~bt1MO9!874stD`}02J9J!poT6Y$dCklFf@=k4k^G$;?Ci6 zC~I5OJR0q+KpRp5cdhLNb`hIF;eG1zEs-IryXE-c88+Xz8MRGrVcrsT=bWb~yU1CG zrM-bHVar{_=MYIZ1iWHHDLp_dIMd{1e8KQoOH7C7864?+xA4=eF^SY#Z>%|K8I?SN z#wM&) zvk>x?Xr!R#?LOBrhdN>N;S0w|y&405HO7A?WXpP1prp)fySNKFas zG&@DYpv8nQiR*yj<2kUKnF6y78;nKsev3Hkty!|oEc@bqsf-B+kO!<5nPqfSMhE<< zEa)DLy$$9n;Ra3sh8zl;Eji8Pv-_IQuzOVmV0$flspUc^hHH*ePbfQQ!}VDXy! z&U{>B5Yadm_TFSHkA!nHOA9pR11*CPQ2rT+sDj;yl4sH89B$mEHe8Ck2nbm7SUT5N zK@V6n=e-x%gX}M{Kl|djLDE|CIoB1eX$!<^ts3v8posFWRn^iadUYC!FY}yKJleb3 zz*Chg1E%qy!L%AE*RZyZ51WnI00tCpwb0o~#c8NqdA(;JaZVTytg~Be6%J4cV9sWY zCz2Bsc{kVCZ%9_0FZT$qK0K5G4$HkT%S?t~EYyV)BkAA%%U?_PSI5%-`r+@eW~_^p zcd)}+FUbO_H8p0CBpWg31q0Gc{SaZC5diD|mxPU*h zv7Lu!;Y*xVS$7}>O7Bvk@mgQ5FVLIceiu0I7t%zp7sj()jn862eC^$#D6sEfNj$GQ z51Y&VR`mS-#a0xSx z?fPj$4EJ2cp_vTUdT@ychUaJpD{nIiIv2n2sdVzmhqAq8Y(3QOckn~*6=waB-WM^X zCP$5bsRTkk*3lab|HCZnd#Ql=v#5s8ifN4~!lvi|(Ji#l1XsjF#&I1L@5- zewfB?-AS)~_Xm+^u!-lcMC?>2rN+8@h7rUmIo90AUK3_iSq#O%(rpTH8XtT5J1DH* zj$jPGhi)i%S`V`uTzdA)Uu94C(wUP-DA%~1{_yvH4C59)a@QhkOMc)+;UlUPJiZi5OV67bAvo#0u;cja9&rNtMkQMas*B9o@mQ zOr1(T#o|rrKGtJmiY4IciOXzm=R7GJ==0qZ%hD;1$=}7vp#7X%16CE=K-a@J)=^%n zF*GtW`1~%Vt9psu&Mep};vw^~H{yhKxa}xWnS&pKpH{1|DMEy%aK;+WJZTk>mae9# zOfg4G=?jmaOF#LQ&qgzVyL81HAD>DeUj8us(f3{nI#LF{EsZD-4~53P;&~cI#ZytW zr7w#s9RH9e60FZQz6#{RuCiIcnsNru*8m|}Dc`AR+tp0k$X9_g?uBu(lUUOy=4R9I zgDt_Ew-DCrs(RA5|It4td7&lUnYaVrg2zeUSfBf&tY?^!hKXI`oE^WDfcs+z_#a>W zAWdQTQW<;y{mV21n#JIQGRU*#RV%dIZtLnyFaGrx)8%*HPFJtpgeF)f(O`d*9q>k# znLF#SU9>W;<9|dbS{n++=Rfz^@F44TS;r{)&;FlZO%Gl;jW>LgcG~X*97@>r0Gf!h zLe~4t=oHGswA6?sg(K4lfwE-jwAVtfgt3VtE%LZH^MNJL9hJ`WcR zi}#?G5&jjdjPY70&dv@w*6_;GqOoXj-7?ze0-1#N7uV8jb^|L5hto%%ZaL6kLImfDPlaeJkk3<^v$kfkR*v1o1T>M8UZVO&`l;D}))(LmSz~ zZkmO`>nS-SfLTm;ge*)6aRqCDu0U>$JqT-;d&;!5oNQ=$1&dw8bjdj4f zURtdU&eb(sr$r6wzGvZlTyITSU?dvGFIJHrUr<1X>1!cLJB?UA2tUGF8OWz}7%? zo=Yf44x(9$aBlrjiM@xSkRT%13R-}zqTS;LM8psciCS2uSht0W%?x(^y6Gf{mV#JA zBjuwIkYx;Jjw?)uBFg$DBE~Mtn5;i)_VcE}{J=vU<5bnKFRW2;swPBCi?2cwlj3}U zSFGDRtgC`j=lY{e00i2-62AT0IcmApLT3_(&y&e1$VbT!tn7}dWd$^dGgg>yA!A7b zWypiXMZr_#HVi0-1wlNL_-ZwhnX71M<%s%%tWky?UJa%zj;L^pYo_~Kq+8|L@%b*Sx@}mnlg1*5`JxFGFnlV*Pi48qF z5-oA7jCDk`w3O5a_Y`D)$n#Zn`w2mt?7*^wl*dE}1&qYAx45=~Vl_%$2xEL+zy)q$ zYJ1jm5r0dFbCd`&qzS>8oyZ{o*xbTBHa@Z*#@5l*!g^7*gjJ_gAxXv36SQu$*@hmo zCdM`%RnL{obr)rn0}VP73NUzR>(Q(ox6BCviuL~5|NAded4DDShu{0vw6MRJYWIkH zCeG7vG_AL0t*gKo;BBc2m0uaoCTBYfbBM4)=u0OhWW?o*7oJFupZQekY#a{v_#B}c zhLZ??8eTRKc*@YLxm9Q^o#kxnt#WLGh3UBo%94x+%_tP1Bb=>smzo_~hyh=Ai5J3z z-}?$0&xcj)alH80Ifl|7?=yy~&@3$r;Vl@k^czIvCESOR0p(Dbm)3bko~8Hkjt9#q zIU^bN!S|sMFh(fQ&^=*FhQ-rk4&JXYC%!wVfBT&Rs?S!47cU&sF%&ioN$^@87ck|m zt|xmWYvei&HCpC*8{b9UM;{S;;<&txopK&L!|&q%QW3^-4B#nysZ4Oae`XUsxF);h z&_m?x)*$SQGtSG7Vr(C0l;KTQvB1neN?MU6YzCTesOgS^AKXJpyd$29!J@W2&1D5^YJCQIwCMose&cA1rjEYETg=nn@-bmndLkZ33-(qh;l`A;jIXRL zrFBZ}6roABEmky9IoGRbs7w`ewm`H#&lP*nMRZq&JgkHVnWqR0V(oJ*!K2qPMR^rY&wm&Yhut+99Fe9$zBfHZ`; z?dG@A@ae1pRH3>#U3qsRefra%OH&wH6qZAwcCp~O^w_64=NyS#Hjq-$GIHehJwoOP zVU|DFo8?PVki32u?}*`8mc{zoH(pH7fBnnpfByI1O*cpH;&nmppuqqTK@t&FJ5cC$ z%g7Auqja$X4->BT@Zi1vCx1-X(1&S@e1k?ROf+@ckfkLh zwRD%Km+0trpBxVIE;zeIyl~Qx4Gaal$Wys*eDmusvwyMi5(q7;LDbkV1w3O(>y6)CNsAW4D`7rL)l_$f}(;$GF0Rf7gqUh+n4O()kyK&dN69oBI%2~?)Jg^X!lP#?o?IPFo;?t*ZEB$< z)OkcG7Gt`0!w-ZNU(V=Yr%=AJ@RQN6HPg~xnK?*fDW z>g8Xh2zYz?wcq|f(PiJLg_1>0pkp%F2>@SF&5+TTm?8`FLuVGbC3930YTwZg)U%`%Y@ko z{p8uajh~P_`OfuZ0#0^6Loz1!2yg-4KE-Eo?(CsAOJ9Am@8epo%l5{b{odWjWA+}k2PWQggds_?Fo3X<#e$D zt35QpXG-H7Q!kHMzYR29kp3DbwubS@GIX&7{wJTZPP2F(C0D@DX-d^35*i=;^yBH7 z7rv0TQ2JS9&^xO*m@~C-?|!=R?lme*%tm~-oUv>YCQr8fm@jLGOGAuBwz0)N!pRnf zundO4BhQvATEL60vwgrQxX67bbgJ0ZK<8Abq{-rj@MWl|GDU=K9q>l2x(#^i?^A2^ z9YP=$r_5hU)CQ*(adRi_wg>T?_0v>9x(rETje157&&53xW@Z-_vUze|Kf3fo+BUBQ zT`DV!aF1{#XO*WEs9I77PkHcDoJBLWHy4>WXYR&o%+lx~BW=cT;9e`VTe3!&(=#D> z$;*syS259hBR;4AY#?xIe9;5vZwth0trieqrR)WY0v3AtO%t7eu0{1Fyh z=)GMH=ND)x%pUqPPfKEJlomM~Y2qxE8)n8y28C`9o^DH%Sk`AJ%(KK3g4fro5`Yuy z)w=;+3a-#Pg=b3!HjyKsQm{cYr)rE{qW<T4{th z8rUdp@|=(dO(1r^E#G5u^(JyqJ)v67U4*tm>lRT4TvW8^R<2XNM5tF86+Zn5I>JE# z8u4EE(-@|qPa0w9Owj+3Gq&iuhFr!!yOvGdUO~QHPY*uyaJn;oC&JN;))3Z=9?%P5 zw1Dug5-tpj>(VbhGn;I#aziHxx_!rb(iXB}2VSe8!HaQw&-0Bi@P zew<1&)X~Q92So?(tk;b~$oVgVCmJ2y_1rJ~;!7{9g$dAti<=dP5!);A=Py3vt2iv* zMyDq>z=WGwge+V5x)Y9J0)`fVHIU9e-0@KEHAZfa39RBR$gbt#2yzyZU)h}HV2D?y zNPZ21ZfMw88w3)2ji2ee5X44nyD`>^nhI7mgwQBBE9_?NZ+Q?TNd$Jj5_Ur;ghaL6 zJvud!)dZnZXaq56S1Ut^x|1x-!`}x0vjVNSVR5f1TuHo|=thzUA+Yr$a1b*IQZG?G zpoVPy99sMW$htz3i^!d^;Mh$7&%ZN<<{T81b)gRqQ&WyGrKs(MLbFZck6I>|f^YyY z0OgCbp z;{NjdwoYRcvEJ5Xln7N5(sAX=b*#AT5zh;OpN-aXtMFlIk7^JNITWFVy@V3WoVAcx zu-Cs;-Xx;yFevj7f><)k=)`uQGM0OyK(0r?6D=@D ziMiw1CBy_NKUaW{gdYg?212KTlESQslbP!-6XsMt6<#+f5ffDp5YR-dEnKiRcG!fN zg=L>PYJrTpc_>@fhm(m3r=c|QcZuAK_OAA@#tP@IGlL(6TlWQTl%Xx)l;fC$>$91B zFlhpr^Q#zhMcVO3$TWnzBN)j7zS5&FX%#a&&T zA*}MXo;Vv2?4dYgZwFd4p$k9{SW;;dzmVL)0metj4o^)mN6(tiEsRVL-p-C=z+qj? zZ3izHwkVztfb${R0(&ekglxSV;(I$a z6%|UCs8wpB*x?#CU{rhHy=7Jm394rO&eKL1Io;vRO-q&)APl{7{~br6cZry5TZMk< z*}szh{qO%u8pkqkf~RwYOhK?=fP_LxNjj1)1Lk9_ zkSDTIzJmvkTdXvJ^`~~PnQ^PrrFY*)A;5#hvJ_y18?C3()1_q_n=EGn8)p$#F?QRz zDiCtO?7viFF>B~^IOktC4`-p76 zEms5C%3#aSdA2VZu`kDB$T%~-o-qeU@I8M>@BHy|wv$HzzkJPVvgbITxDI8=HTj3* z#vRf@n5di~28d+_J;?}gc5gNZ))8~g?vCm4zt}tO_3v!#Y+k}~@JI01p}8{F;;Ihs z&CnFwJ(!KF(+bLkf4P3%XZL%fypNq{9vTdUb&n{Dbnrf4AA%jNxS^FrJeDPd_Dr}S z-kJ@PX4{dj01R8JHfk6%hkXK>;r=NTz{p{cR>ljcPD9yGIG*#k zbL(Du{dc~XZePAmeEMqCCX{AD1nEpQN$uU4w2knw2j7DPLeHSj?6V1#>v)UpdL^uR zKwH4b(WlSQDG*DP>xQ1o`x_~o-%h8%8Vm_H-?>5ibBtd52v9Wu2vRmpY_3J}8vyil90(;UDZWE+KH z_eB1(LJN0cMw4c|6Q=o0jNg?Wi$hD;ev zP%W)yXQUxgCV8lI{g9TqR`E}vU-?ViGTc4uoXPn@^Upjs0&RwCdJdFfY^d!@-+bxk z(#^Z0A;{|~vcZjrFWpng(#XT(p-J#Y_^G&?XKW24%AOH!)z*Mg)}8K6ji>QDQ`ASN zfd+IG=y%bf@GB>1K(r3&8Zt?-4Ml^*^=<_Ifg_zHFt?LrUL=ab<{^ZNu}8w)9(+m} zXdflQ8s!V~w7SOY(12WJPEai}vTNHwzE*xHLNgwF@-dXVT5@8L3%~jEFTTui%D;nr zh4v)Ta~lKrC1z4Z*Ue`v*aukz-vpcpOJLCoAtH2Gr?80Y4UkYdu7~8|8!}{KszO>J z1O*lVpjjtU2A3}ZbcL00ooU6;D&}D>p*0$-Z!)M0S%>+7SIh^(gw|IOxUqfbn#vb+VC)4oQ4JsYXaKcUxf3u82OVJl?lF|w1G`(RFr zbFXk_BB1Lvb6>`%b;*@EhX0pP7Xz6fYJ?2+V7(QF&1o7D}&GxMUj9Dpr9{s}S?&KjzLrJkw`+rgaGXR=;X*9|SMD zsL((?P#OUQ17q84|LDGZrh3$jYvnPlPom4n5F|wmSU<<`=7nm}UOQAR3OAY#Zl)a6OIr3A9D-5R+8s$Eu73c1Q(S(y()$|IiCO+ zp0DC8Jq_WVzpd&PvAI5%r}(|2$tQ#XD}kAZaQO0DKMzj=c;I|g9<~80hT|ujv*bqW zeBhhU=jDBVFHDJJD&L0|OYX|d zSi6I@w|6mLg)~5yGnVEp6OLhLy1CiK2tR8^5d7TFeI=bHac~l0a+U9zp*MzOu1+qd z@BNqGPcv8U2A`D2ii>(ec_JP>!w;av;8#=<(HjfyNsH@M^w_IB;dAD2l}P}uhnGD1 z!qbsw;L@c1F2tOLR)qhV@#%Eq($$O>Ld!Sx$U-x$jcX{Z>wN6-C&DUith6{AG&77R zRLmlzqJ$ytN3Z-@y8QZ^=>~?XZFro;LP}H^QrT}ruqhSE3t%0QqoCjk-daRLgP`G< z?o){{$&s_iQ#;Ym*$ML>p+4TW)PYK&q8&>+2<<`B%HOq4OI zUB5J+MnAyvPK)07YMXOnI2^4Tk!wAGjX^cZ|;0mpiox5JL*i^%|lojxX#-6fYdO9FZjGcuL zO2gM}am;fYfpy8HHN&l4F76~~|G;s|ry{d#EY_rVUb_OHs>h=?oi4xgVRXbZCo+rz zo(udyS{t|xZIy<|_so@35o^R^AECwUb8mMK`;1|b>PCiX*`2O|meqQ7_pyZPMYpu7 z#@&6wn+BJU2_n4w&C;0URDLP;Yvisu)op@6<@pQI(~C6 zc+%Ax@UVuWIYpy_dX(aCQi?S9*M8||Up9mS(>JFMgW>>#=m4ZLPLW1}Xt<3GVH<%^ zOT1x1BA86s7}=|M2I2JWu-dc##Xp5`-rX!F)>{s_7KO=q*A_C(*Yhf4Nr@MTRBCQS%QKD?q(o~r&aMs*i!MJl zh_w?=Lvp2pjTH+58DS9ny8sZxKdgHUrV_A5;s1hh@K~3b5#Pzsm6}ULd8z#L*on{`er3UIgVI^N#nE}@+nTMxA0cVSKTh3SS zlFF!FI|V*15x%NW3M8r~ZhIc$Tztp9?zisQsA0_+XaQAtsfWPog~0I)VWVooGJ8+l zqty+}4QQl!!O3&PKQ~)i4R7PJRaDHGA-0oi%uCv^*#nA~F5hqov-c|3DkXaOGFLo= z9{4Dt5Q+zuX)6)tldEpRLIyo0E?-DF+`13h)oi4KwzjEM6K_c!~lLRcbS|w6KQG;H?*xYuQD(&7R%!(6CHLUdw0OB^s% zBeRV05qlJZC0}|b(n9yh_rjcT3-BDkn&ZP%`P(qX4NC?>Z=)O~^vBB;PIF%w{XG7T zgB}4|i=lVIySU8#p)fFFz$E(=RmgZ=RGDKQ8BX|EHVGv_Wkn28N%a{q8*m~7bnxE~ zg>Cmgi=hYJSic4rQ6~fd-gu4o1g(LNinGo)3|qo0{|-Zrg2wWh&vo%}UF-tmeDawX zn2pPkaOMX%$rYI}@7MAco*6jqGR!+d5_iV`v8k3aJ`iH@h?TZBzuQMRVOHOVji>8t6D@BMkY z`p!o@1Ac=wxdwq;t7t8IY^Nsq!Y*{(h9RDd=K;~6#4}b1#^yUwKNRGkXfQx37f-dq0ZEshP8OX8cD%@Bi-SZRUnh^7Moh|UQpnG`>g_%;Ra(dmkpyH zXs{yRR!*=;h#@P+(*sfPOXLn{@GkVR@8keh`Q{J`w-63h#HyeP*FL@x_w*1}w6;XK z^9ee$B6FQQcZ{{{P)^dOLK+68)fRUk)E7@n6N-yQS-p-{hLV;pQm{eQ#zsz%O!x}; z;`t;0K=PQAiZ}2L-^v=4Km#=dkw+$8vBaPQ7N;n4zRGkZJXcYrki9e#LuE0Nu?bTy zlOMs2&}5W$=4T${nKP%-L+2la=dY*UzAhB3hBSDzKW*m;HA28%rf^JoQuq*;RXF68 zJemKb*MV^WsfGh-gV#mQ1J|xE!T%7stSBkJl25C2Sx&F#XekYxf>#&SrRhBhkPnmv=(wR*c&OZ)uKsqC^82E#_4Ld%%D`VEH$VYdqQ%W7=em;;*Ili` z{MOPYP&ygowY)a!p>v%N3QaGhDbN>!9F`R(>H_y2$M*;ZmZy|SXRdi%ObA0{Mxa&E z;S1OL)W+L{2GP;WkRh2I@qhINGPMaCu>Urhxoluc<8;89#l>YcpdH!}?}BLCyAU4c z@3SISQxXTp2f_l$ZkT|Ok^6v(*fg*4ALM`og-8Gl3gKMLe|r#=PMB>W5-acCGyg#? z)JjP}kpdb+DO@pnzxGQ6>-ZNcTwU<74?TNkj|7(@#Gg_u2?VGw6O))B%3b z;9!Xe5m?VSeD6`H3TrFEv|$7$8VNX-KD0{0UbX$u$v{UCaV~k3kVX=PnjtL9xZmzm zlCiwK1YxFg4gCpJawIU0Yix9}4F=G_`Q{Kl+yjMmEpuAvTmcKIV1a2si?-^E@y-eg zCRH2mpzBoNj8GZ?Sn1mN8_%ae;w53-;0SCfsYk`n-DeTR*3#x9sCWv@Rv_?MDsLuZ zS22KluJEdbIS-+<8qU6Zgi`2BAzL_8aC5IDNE;-n8m^`_NMfcXTf!?5Q>Y0S9Ea#! zJR(_;^dJONTLOa?!NePKO9?Jv@DzF5xBuV21VjJg<{tky*k)OZ(%m9Mb8ozJkB$! zDzWZik_?-B`3&WG`tCLg0E7&;Hc-VpQkutWxMa;`Xp6YnN;Qi1Zd`mSym&4g-1F3u zcKY$Q)Y@N{=I;^zi(tDnzlz0@7#u2W*i%G83rmi;T;%FtM#9*}vg?2zAuLJTR zQzbV>Cy9B&^XNi0%QNuM`1}wIz;*Z4CP>bssf4nkpjW1r`@PY0WAsK;_0URb#WUg1 z7K*}gxQ#_-p0D!l8F^S4GiN@4g=dwq@x%8rDrh%~n%4>^!K}H?YqJVNu10vufIz`j zA;gx7GX_iJ4Y+Q@Z>hA5w-0w*nS9E~cn7w=O!XGaj4puaJyyhtA9 z_1QdpK0|v$_H6I<11I5g`Q=N#_v5dqI3}X=$^;7bgsbB!;Yecdcl;=z2l4k?{>wbf z*~nhzJK@fI{QZ|tJ$UP9jNw0DF+YAEKEuC#TE=TxAM{EOUp$>d6^CpXB@Q=YWi-xT z&sl4CGsX&XmY@NnA6+Ak{5~)=pI-a(SMh!=rFN>pl?by?v5)Xt6?Cr6nlsUr5T0T` zf;|i^@-=C(&r*@HoURI|-dy)iW!}bL91?Us3u9|U*c0Bhz>mE>nij{+onhL%SV0(T zq|XXU8pIT~>!DSX)I%Q}9E$a5SYolj)=rd1eC~74q+7RdrvCnZtjt>Mg=eW4a1760 zT$`g>)>L}<+{0lIxqbaMNv~Mt36p82hPo}#>kEVoQR#07@8}kUQ@JLLoX~xb(m8II z+ley3;FWKs?C^40n1D}X`PPD}k;(ixd&c+@w(FsZ{U>_JJ3|(c7NZHd-&s zV}*AGH$zMec?y|?J(gyuNPOcTd^J7u#f$0kmCLRM4?TO!{#g8kBj+qP)7i()q<{FU z|0EqfdK6E{HE0MSI?%ro29b`Qt~At#unDc+GJIzS0||V(XQ(Set>1tBCi!u|jf>o%T*Ao7Q$VF>lKF}95FQ-`SU@K(=MR4i!vZn%u@;ND`=CKW-mbx{S}QhQTIna4 zAI}Lm<-Q`YaN)7@&@K#v(4=kPwU*6x@7wUqs9>(!XbakHsQ4G2{d{`jiN^^~BdL|2 z+4~FW)R~iMU~mBIAUqd^ZH>M|tF(#_ezm;=T;%p>l zl!9%V6}lC7xdnKDzt^m$Gmm!$eAv-;ZIWUc;oawg1EkJj#qU1WmAX!~Qj;IJqEJPl zjlDyesc1;Us4WiPM?Te{s_|wGn6(c@;5st3vWfDAQ5<@W3>gbv>~=G6!?(>JT_V{U zkU(KjdF9Lk?{r@Lc=CRr;B zFeN182y+1u6rMdyNv5&8TRb<)Ofiqc+yDx&met{!)wgz(0V+aWB5C6{$Jp!F5eO#R!rHDJY)h)N`twB#eY?_H-MHWID)kST1j zj&sBn3&5|u@=8W<0Zz-Q86KhVW>`t&4xrp*;VTfGgL#FpYL5WIU<`w*r{ao?84?@_ zUb~Bs&NOOv!4^CmYYXf@rK}1yg%FPHXPyt)vQrCfe;8IZm&S9wa=JgYoSwH6|jORARMSD7@J+8vcWoT zxIMyi3Ap0+M3+28`iD!2n&iHuu0j6vB-bimyY<$zjgi1U%pxBB(NJ=)}qM?bPOGj(99)CTX|? ztrm|K=NBWytQpThC$+nGX_&K0SX?7x8v|=@P}wsWJn=Ip*_pbG4BN0Lm1-K??w*UZQThFUGxa7wc3tOT=IQKGs;Q}@Qc0<%X2zam;)%;~MPn&mBrA?=TB24iq6vZ^ zw(dsn`@X#1yXX7<3n=qQ0NwB1ckem>`S(>=`R z{wEBSBGPiCA#o2KMk3hC$?^^fh4dY7Q4+sipuJDy9Qup5H*#Rp*a-;~~yERH%!s&VNm(&5GQ3X;;*HJvS@e((pY}FGT5N9wSp&y+<@)Y38 znKUyjOhxK|=$iFZM%sjHCgX)S>xng>XmYO61r5;dV)$;^OCA2-fWoc!U*&uBJ2%qe zDD^TZ>e7hc^?u?+=nLRa)Jwr%cTiZB4Mti!QD|qUrcu&ttm>6_t5T&Av}K=iP`vyYae zr=a!L;9VMQ&ZF6PMi7RGyg*t!AvO890smu?u#xU8vdiJ{ ze1C5rXb#<>j#fN*c&T+^cJ6ELXbc5!pNO?t5-Q7{Y4B6{z#%D(Kl+LHrmY%0$~YXB z$8fs98f(cP$BhF_kJ2Z8;eSd$^zk1_zxTO+pT6{8{|ugoa|3-x+I#frXVSR~4^vBY zly1;XX?|uUqN#e3-}eLWOIKfgE2h}$3=wa9nK6;0i(1rRfkRHgwSJ_UCM6ami^Hqb z83lKiR^2aaD32#&WmY$g4z@!p~rch!PH{RFBT5*lkLaihM*?u!<19>PSC*MJt5ZcsrTnFoZju+?O&QRhXh zjAh1xjYirpcBS|I==)L+lYNWmPkYo(*u8OkZauBu*KOGK3}UT5Ax(3? zJnBXg4y1+Q#k9M?vsa-h^q&(tL?$}$9ijv6kZ!d}Hoy`yv38L?+cYsT`@+=j4I)Y! zEz*~?nHk5@JPu-RAuQd8ZO#LS*{gez#^f7OV+oDL=<_&3c(&hz4+R~5>gRv@3fIG0 zWLN+KpedYH$RZs`M1{G^i0uC#UWnj=04#(z2k!5@TtFr)5OUrHBJ-0_$~kvFNu3RZ3mH_hOe>`W^6d99V_`=D zI05LOi0SEB?g`UDm?%_i<>?6StcSYD27;kjS!{ZQ$601)4!hSDQFv|l=zb%d%e_TP zTab=TOu*3S_`%xB@D^0r(o|iU??wcMGYGSd^$IhM9<4d&515N?=OA;!Ye7_p*%!GfVV45dv;FgqiQgN=HqpV@uKeOjPxrjtg1 z%DY~SA_d6i)2X1R<6rvh*GY5CasfP1nGH&tN}tPTLy`9nBT&gari088Yts{oyL!1Gy@cRy_GG#@@!n4)$U`X5$YbvAQi+X;R}%TMu{ zvobQaNuFhNyyA=`$}nO$_h9ka#kq5`bb_P@wqVPW3L*r*)TmLyubBCmxPeg=N> zVzWpQTbP(5qxBLwXJC{P#mPu&|DlC)Fd5)EARek~(SDsSz%Zj4_I+2F)4B=`ERgWZ z5y_K9^1oJ{u!C3HoIpbcy0i;eh5cs>Y4AZ^c*Mty46d#7ogLh4{%5RRpt?Va)Z7rIwi9M# zmJ+!P1XRaLBP>8woQEy<-J{XCqoVz8Ju&TOdcxIWZB)a|)UL{DrSx;3{E4XaT%JWxQvRL7tx#Kf!HE<-fP|qqz8O8%E=L1wMoO zgs{sjp8IzV>(4J_$RBHDVLp;QaIFjXSMTl5u3ZlAd;A*VGJzw!vYYaAe4JgM{g~km zdr~L_4*3wS6VC2cEj_-++lJZYoQPs7`pJjETTsRHsm2DTEJ2Uv<{UxC@i-oB;TzzF z`5`7)i~TIkMadJM7k_2CwgC!Dh36T930SgV8Iw$oAHvgPp^I9aZt@+vW7ztw08<}* z?RJ_U#v0(PT&U+RjQBrbqi03{XyNKT2^@k!?-Hh)nMQLIJ~CeRsX*9+Ml5bAqA=9c zFkQjw2y;X300fJT`l+A!XlkxGNZtyoEMch%v@_Lhm(LyufiwFQZv?%!q!Prh9Up@uU<}fM+hPE z%tLsk^etZBxjvj0Osr=g^4%IjyVWWTDo?y8+KS_@IruJbG`F^-)>Cbfe;OVot_qql zLc^L&6VSt=B50wHXRFsm;V#wLl5vN9I-1seH1X!W&p#I4;A^kH8HxF}@i8o~mTT`^ zv7(H7+Vo%V(Bd-fp>UHgFHr>x&oMPXS}&sbR8q@j>cj2|_n`N4)+S2+08BwCC(r3M zmi7!=>Foj(p@@XVz;kr3M;}-AQc4IvCiP(vm?L&Vp~cXXt$bJIl{23p%uE-UPyXW1 zr_0Yhnr6vQotmDC(8U&^mlI@TaeyEi~mrOXbGFxB+k^aKa1jBekD<7&2EGh+25$L2$oFLJh*^p%AbKW;|Ro zxt*%JaNj`emF46K;nLoiXMEx^h($tvRUTwQTa{@X6m5!}^;)YTnYSDlx(A)*f$;ob z9J~iIRx`RZ$0V^TDm!(N=XhQ#)m6GrC6XHEmDiCIFp1k>6D?OpU|in-wCo%}AiQM; z#y`=f)h=Za9EM|Xtzr?_GAVnCZLLt1!E=O5jKM>3IzsD|a2p%QJ>Wi880l8r;eDBK zxYzIcb9A&!x0a;^3tjCS;y!f3%-34KUoi5%=+^IcHXL$_bfc0%q z-56DqTxa#5CgmEF&o-%y+}@(W?G|I^unrD)I1kS5fTYq$?niO+e0+{X=~z2wE7&du z(qr!w46M_$ind8MU5>TFpoO0TL94>E(xt9u9n2orgE=xL^x`y+eOHlKYS=?dGb%D+ ziqWf4Lc-dqB;Uck$#6w@lh_q_wlJ_#b(;q7%d^C0sa9HGbnX%iTHFm4034L4n_KDq zNi~dS|33Hdk5$*YIXqXdS7~uzK=yB*3Qa5_?rGkYXR8HbK9tFXn(zm}HhP&M80;gf zU<^frvlD0di}$r!z<=Q3zvbXwSkurNhljF^yB5XMuguKOqRbU(*)fuqY0I$Fhvmq^OhDtwYDKP?EA1>(?7>6@OOSX7}Dd4|v=%i>?}MHmi-yvCS9$C8==s#|W0ie^8Pp~^%REHYj# zs0e33W41`}K3OXb4F_PsTBR9VY}r~0O_y^}fcUwVZ#k_YYEs)k1;P_SeTsD%?}j<< z;`&mkY!YKVEM2GVM^k#>d_M|I0p(zsWar5=KS^tp6((&pxNn3OE>K;F2b|UF5-u5H zy@pL}rE~ybP7z{SBA=iNhF?q1 z(!}smS{tLVi8d=17l9=KWAsIS7H6eq`3u|*npI&ooaVWER-T<~ z#RMLOGO6Lf%X>iZ@wsOue-jSECMz7vt_v4&z>Ld*nQOT3wsT!9&fNxGNh97-WmU^L z%K>6-;J5Uvp&?wq{{lAN!*hxCxLB?U1C8rg$@eeKeh}x`3!e#cU)k=nYk5S_5f77< zNxp~(;m37+7VV5&EpLRe>zls#hi%2v+&{=0>y6J`dt3ti16qDPdymaz?miwC`(pvw zeKn$WTW4#HX>EL0y0xdXMs9!5}v)mcLrn96zhn{uMLw<-1G3Ran)8lMS1U`S8e5dpH6dE{fY z)8Y)A*d~EIS|1_*J@-aJG1c_zUZ0mKz#ou#t{JR{R~1w1=>{Y}o-*jFfj*M+!?Wq{ zf8rCV8HM(@ul(yYc55=^@@IeLv#F!2Jzc+c9lnGzi8H{QP-){32FbC8G39^*b0~M| zFZ|5AA4_M?4vF8164<)`DeQ67C9k5f+sl(GZGcikk51|}z$6^yhJ+<`2^<_F~ ztDz_E&qzjl6M6c*F(j5$Oivv1Q)NeF+^C-jFC~ejy zPvVHg-B>HUXov7?=xFf&8hF3?PNu5Jf7F9dKiPzL*8EGpNQauc*XGmg)D%S(o9Smh z{!{5(|5Icd*KbiN|{=(7JGSRjMnRR}y_J*{BDyL&ebg@(#(Z zbJPql`KPHrRl!h>4#*i=Bo4zsuf~RU7!0oO1BEY^Rw&*gB+4ZUcMZvF#?C7!I@rP5 zv?o$u#~{`c`48j@>DAKBtu?jHgrr>*a-aYMf-pg9;US>dS|CsqIIw=dW7tFFRJgNT zegX^_#y0oWlXK&*$qriF%GD57FCQgbU6k8t3()U)asXyn)_mfp>aH`aP-({ zsybO6EZCR&=Z__L=sspyWDwc~ zf^8;G`>(=P1Hi`b#=|m!299HRQ~{gFWq_bqS+-8%kL8?UCEgS7?Iaw%91`;Z3V9Jl zj()nVhontL7TW8xgztpw9%19-#be9qb=~V7kbyZ}!=%Z5tZbEbl@49wdN*xFQ4dp4 z=z_w*Jbb3-Xa%a*SYad19?^mUw@oPB8lcAPJa_R$;sSCo8ZHD73h4ilcx`@L*6WSi zg+Ef|YK6wVCXz=e1=Lfpg%QnXRVc9Nv7B#xde3{FN^iXRN*JFj#8K@L=|0Sfvxp|c z&wDv}Jl@?bDIxjt5$jq#SLh?1N;xC^RtU)L=|LNSE7TDew2j!p?4 za6k+VtCSNJU~$ra`~f&Jr|b{BglF`fKFvtOOBg(?!+Y`|4MCImZ3A|=s~1q^1|wgd z&An=ftydR_6yT*Z=c$(VJtySa?BiLlYd=UI{8$guwv1tCEj-p%m!s8AeLF&=g`R!; z8>|=Hp?`2iC4|GcGKW;i%sfvkY#%){kbd^#Kb)G03@kwdRkqt;d=@_2MvUh ztiUyJC}PY1;TgVyNAa(`5Yh@f_I;K5Aa8I-o@Sn0=CE*$Tqhm)uei^jpbLdWP=|LA z$5gK6pBjzX{iJhoE22TX@4ft%&+;c>shqYrNc;-k#3$a#m%nEem_1wi5k?#**F%%{ zzTeZs*-=g5t*Ie1NNQ#w0zG4=H$5BYtTkoO{8IjjKRfse}PRYFwKkJy+{6;}KW zJykF)gGx<|319MCWxX%=9T5*;z>JV|lsjbNRY&^iPk$mU@6ftPg&3tQ@P~b?OnMgn z9_`?e55hJaamWR8lys6fJ*|z3*D_jp-6qoIWYSFe=ga(mcIaFfKChEI^vI)+(r)W) zdg-fQOMm)D-@#!>evSN$2c06{==6hU0$0N^f_z+C*hufZIg<7<_)p+PwM6ICMM@RE z!kItE?9X8)mGGp23#S8)yEy;C<>8s|ndfNgLoIZxtlP%ZTgHAOp_?^!b(3sPG2P}e zxg_vJ_iosCk(NG(({MMP%oo#FI_&;L0JEfEBD2W0NIzx8dD$3^%V zq5gv+MqXV^;o9HDnSx`c5}m;+;#%^R2M2o6-~C%Zl%9L;>GXv!{t@!hLI`m-2z*0EtG}VVnhPp_6zEKpoc!F)-%IIUE71Q9lznWE$g|nS@gf$jkmNp3Wvpi$=g!THmgp7N0T&T6a5nO|fteB0fDr{6B42z5 zp<~3V_LQ_nRC*rHg|eDntd;H@}=H_7c}A3j4K zU_pp^>7JeVXZ3Y9iB39VbI_Ii&KzNNL%$P6Tfp2x0v!zENfiY@D)2f+S)D+p92O$$=RSw*IrD#VH!U{>K708b4ClH3&wdhRPIM|XWP?oy&Ctt5il zgLHnKJQ6H;tC-4F)?fr&7M>c{ukey^Ju{mS8$F<+08UKXgDDVuEfZ3DjrK{7+@;lz zXPrfdRD4jvAsCISDm+OJW6*nPPK~fs35bwAa1wSBiAJZ0zjHt+jpRAQzbdC*XO*vF z)6lT(BxJ5uBV|>X?2qS3RQ4wg!Gy=!A`~asS!Jfs)yTZ>@%A~wadmZ{c27+)qunsz z!ps`UuO@^dY{3)TNJywoqdP^lu4VKZ3<}2Zj&Ys%EWt!|6?;=)yMGNVje}Kk%T8#h zANfzxhe{!Pl@VkNNQF{`QMv&(DDI}Lgp5W3)0^oz=y)w9=~5;TEvK2U~@eTJ-fLoo!Z3Cu< zz%g=}IH#xv;~b2HsEDxbfUVvRg>q#bd3p37E0E4&f6L}}8WT5=bV_sM9O*3tQG(`W zWO~vcp&i!6hc5v$7%i~T5Y184;?y0D1J`SW=GbJ@z!4SI3Ox51cZ8s7X=>a|qvTF1 zf6EKyI0M$w#dkF_>JdCjGkB&H7*_FBrq-@Pdh*4_^zbwFY2X12<2D+250C@Z*%V&4 z;szK7Ua1IF!qoreFZ@F4>FfaL$5hr%rHKiWiRsfpH2uk!zL@^{Yp>9P3FaG4G(#M|uU>Yg@N&1sN|8n~7%X4XE@g&`y9SIy>nqOvb(`jaYhD2rtDBwNX zWJHomrAC?f1qQ+gzzc9Y5^C93RG_f|oj&p}g-zr*f=A+o$>7m>0hXjelqtaM7is}? zf&2IINB+bGaSewb&$ur){9Y)CvRgh=aq}MW;u&#fyk`_IGt`kId>FEd2l)N_FJ)FJ z{k(5v$^%gN@((Ys;|JG%?+fw``IYam^^9KSrCj5^Oq~!m-o-EacU;aoPjn9PU-k^% zmG0x-(s^7J-3UrlekHco9?YtT^?1)wJT&H8`D}LMCFl+r6{5tlLmntg95}8j7;*Hed#2XM`&aieb-4R z4gKP0KLfwnz!+EqH{~(3gyLKu7#xa7x6TAfK?i}jSB(soB6N%zmDiAQDxx}F1yP8V zNpuog7AfPEZ5F?TvdnoFNcEDgOjjy5l?WqBnzMb5kwmi$j9#jzRoRIJKBvKvxa_CGZ)fe_qnuvybL3u zPZI~Pg0^mT6jc}p+qHNHX?L)Mfw1M22}-r8xL8Di(A%xHrT0W7FFd)dE(!qdg=Rbu z1&4dB0GKjTnJ8O8VZ;jKp!EKQMF)61NIl1}FyMIadwTjg0Q0Fg^&arMAu8eH7xX^q zEtSb;AxH$FF4j1BJfc{8j1I!;lM$lO!i08dk#fYSQm8PLReP&d12glJb;`82FcF38 zmS`Yt4^kC6#i`!>s9t3s5t$MDIM*C0L>d=*I;%-@FcCc+SEJArQDX3>V+hzt*eYL% z$tq4B9cOK#{5tJ^_6QGZWT?c;K#V9^9xoB=Ne#h)k_~#rx@A0i=nwZWipU*whyth> z3?SeREG>c8%Z#%Z1jaKXj8GI!-d9*D9KzGc`8j1w;*wZgzc{9`8f}62*=i=+wx~E2 zd7h{kjl20wBT52xO;%2|b(|sh97zb{I(cm6|p)b#%G`#BW#i)~E7P^huTiZ`L`P=rx-~vLTQ{ii z4#5xH#UOD~gOz$^R|BlM#_KRtRK#SU_i`A(Vb4llQO4NI#WXQBfs(Pp`J!kL{{@Sn zAp{k4U`k*^GehpB2lk_!lbOqB)>oM|KevftMl_9dvYXc&gNRb!>4cmO=n@0$kg0?_ zjnJ?~IHinR+@VKco)CN;f`NW-;JF5O9s^|)Wq!8^K0<@i?g4^wY-Tq7_iug&flI#b zz4U|zTBzu8QYqEQ+QInU#*oYvJQR5$X$(XItWXx;HfQOrH*=7drnv_m{iv8Ht;gQD z3QOCyK;-EkfASxu-~8lnr0!Eac-RioAAI5W(i>NAA*@J|QV3V?rXT*;a|mEtlI^D9 z;Th7fIKMg83SblrGI*elF|m_|dVA8Zed0G#d!awQJ^7XNzy9x^OJl>Y)PQ-#N12v={|0oMg0~Bu*exVG*_xLUD5vex!c&IC| zYB+=>dsoItLq1BUfd9#h5aS_2i#(Uoh9`Gcb8_uZ@i2ngSI<6Ir@@}r)4-}mE4ne-B_0F)UGg^I>H z`Ome-OZ)-H6xu5Pd=zk3hJ+m9W}z^|_0V$cPlFA3yyBWh4wP5HU_(0T5E zl(vp1QeDeQ>g=hUPAO6t z$7$yR{fe*3J*N#?^}hq}(n+Xse)F{vxU!Mp^3+Ad_TdN4q*FK^Zx2tTk>R_f?vOfz zu4B56IIH7K2W0%NKEitU@D}@T69XPxztN;kc!qc??g>lv7thQyu{ZYeIPIT^%Aerz zw~|=pUfl6MR`TvL^7izZA)Gb6(duUxqr&f*PCayXkZD+_(&)_*oE}gCB&I&3VXJJ- zqmS4xx{|g)`35McY&X?3!`I-wYcufG=)#Z{-zk4vT8NqeBOT4QB?{WNI|=WFuEyHf zO2}r5_9ANoc^ggn#ckeGXXrX#pW6Cq3x@-6j|_?WijFk;&Sd)I|Mv@l!z%T12R0e> zC2aVGX9Q`6%#b#ub#Z$g47AdO7t%&4Dz!)wvN!FhE9775*22jv|rF(l)0 z{H%=k-ix%wd+^=o($nudPjqYqop*!%5G7}A;uOjbOp4HHW#LNJPIh0{w z8$l@hjbHoB6`v4HGus+pbPXT|4-HQP%pjN`gzFkb5MRL%c|+kVGw^zqG9k_1d=4{3 zAu+_uM{E)Gm_v?h_*}-%MJ(DdyOp<$V{L9j@r7ZOu{bJRZ%uwJP0ozNBw%5E*P@mD-~uxD-vRo3Gyun)wBexT41nC*lBZ9%h+pGWWA@) z#dFP`T5Qw6_okL9D&#~Y+)}*5$1r)LGNI^!VD8PaayhDNtMNwLo~MNMT5!3?{B$EE zk%OmYsq#<3k}VVzFk#@Zc3Uwiv~Aq)1S#JSuL67V{-z75TxJXw~jSN^Uw}2QX3OI~>5=#VjJ@afn|YltvVIq*V)!hGM|&g6P0YLvyA? z1)R861}-D=a*aB@(4ow5ZhD@?4awSZudzO!?-@qTt~y;wmFa)gl8KALguHi ziQTe)&%0=kQgBQoO*&Fos2CA|2&2RA9C1FG_lxs}U~SiA8zZ*O^?LTCN23bXw{YGl z%l=%6_eH}`1Je9HnTe4d_hn`JCSDj7sziiBL;Hwm&Yp#1?t{Ork#S=xFydPnQ2ZW7 z59@Nh{Ld>pM=@FEDqnE#)<&2PXBai6z99^^@lFXS&nfd-dS6B)Qq6tLgt8dLOK3m( z+af3`P^=Z0rW^@>&sFAWtR`crlNV1vx?>^?dYMOIr0B#Kat)DsxK9E2UEmbd1buq8jqI5CV9coH(X;RJQnI zAN&xhjm_!zKmU8_|NPzGO*d|j)6O7u83esqa zv`3UvexuQ9I-n7oCoWw`S3dRYbOgwy>o>ode(QIBJFU>3L#6GAw1bXrs^=(dYa(T; z>eMl#QM*#-1=1W^F$4yPe03C4FR2=5PIsieGSVwVaGvATJ5D;o57sPyv+&bmKY5ed zDyW0f{PPi9CgCq-CESHAH@2 zD&&6!Zd}J&bX0{b3Vg-L;(Q<`-sijSLB!_0>^a#zeKV^4S%cSPsYpKFd-;WI9l_&$ zR&X2nRJI=PDUD{HPcCOo!Q1}-hJ%mhIt9P?kxyzE-LF+BVZZSt{&Fp@DfpDyKZ4!$ za&f$u87@LX-1F~@XQ?2>f5OgB`PaShgFE0JzVDgIXTU*z_audD@Ja|-*j6Ab@{9y` z!KO6nc@HNHYd<8?pd(HXu{=wsm~}=DBG5Q2MG4eN#{*JFUk7~1HDr1HPh5UHJ#cOa zZ~an4+J%FBOC^bI5M4M*y=O?LAtJCwEt3khd{o(EDvHY0r6(SxFK~YvM80=-cj2+F zr>L2BWGFC>p*5{jIa8$1_MN-KJZqVV{&Kqh<}hhZE2$>m!ZgnQG;;MW^-`NIAmCw& zMJ2i7))my^!LOo8ymW0eG@L+zKhO^be1&=hhR?3)bHGsLd2N0z-G1d3(Q#WRl~Ri| z0N$y{L2vBK^HwJ3XrEU?bYllc!7+3iSrgs^UZGxOQwuX9be74#8(UR4i6#In`;i*u zC?l6@4&!5Br~~DPxs&5-5t64S&llmg*o!gClp zdp2FZd>LHcOCuvA;0o}e`-R4jFX>vmG$WuE#v9+|46Q|w$C{ZSJ?2}E=qo({Otx_z z;eflnkw$Nf;YTrRK?$JCnD1GzCdOM8`^`tPOfQS7-rRs2;Nca3N z@8Iu%iTqBy3z;w9$OtL$-m9FRW}d(ND2Ji@7IkEW?vr%xxsH@;p(~9wqUh2E`r9}4 z^`)M3=Td!7N2=>-OBD>#=zZjD>Uy{*4L)%$J^1`XY2YH`nFrCgfAgRI(iH({ahNNL zTta{x(^EheH!b6d|Db4@kD)=S`#!^s1$qw-B)nA;7H*SnVawb4*sHt=d0ZZBaR?z?DnsT$5srFH3!I75zQVc0d-sKk1Q0E0R8;xT+O@B_?*freD(i3*6aKQJ>0 zNqDnaa8D-T{et-UIgcmG+`;giL!1^rHBw|#OE^`7NVu!v*itD+q^BCCR2*M{VJWy( z+=YDfy5;*ucv@PUgJ8`EbYx(Ae>+_Tq#NlYBjk2wpzA@iw^)QU#TnM)`V{1LxzRH& z?%nUXso;z}Sri=McVED}=XkMHCu(W*xLt8}pRj4IR-I+f&-szg^X!dk=G2|4wRM2xET@rE<8kHW_A*dxTZ zVcJECus~T-W>752^WR)PNt2^HY5ewjT9_un$9l~DEz;R%a_lg@eQh~S-B$50NvF@% zQ%AshNu4u=V`E99ihi9Csq|>xq{Z8?mVoU#^%j*dS-X!IO0}Re_@KrKk+1b>oD58! zsc$C`?m7D}R|j|7ZUf%EwOX9O$Gt_#Ax%`Rrm#8hp4WJ$Si2oqMPy6;6@Y+OS3{146i( z{9GcU7dxMUZzR?-f>8yIAHs`>2q>g1ZqyhO*HzRUilM&2`4Yq<8n_ zeKS53>*1_@CnD^AM>*;nUc+Z`uecv)$-7}_03%lz&*z@^`CwLHZXtVCqvXR?-ZX;v zDZb;KU61z_mcEsta-p#=< zQ9vJXU~Onne!r!;1%6+ZCPv4|e_u^=^Yf%~+B6<7JuT8UNF}Mjd#(e~5$C3wEKf2< zc404l;Mw=3VWMiYp zUvtv%AqEET=`b;J+|W`_YEoHRo28#CMbky1yt%5nboGsE>HOt0Fi9K~D12RmZ8QO3 zG87StaIV5DO)=IOIE1&?(fp#8Xhr|o0rp96XHtnqsh!xSOHT>>!k09m!F50c(tAq3 z_3$h8m9nbC^wbLvr;q>i-%1ZY`apW>$!BnI)TKEJtsUINNd`_zsyfkP4g8|BXah&f zb02&vz5Dsc(>w3nVBcAk#pWvl=fTSlrH}r=N78@%5B~>^CE)$=SVvQ@5&5Xaz^fsh zah}O%Mf9*DxK|`&B2O_n4@8b-29}DuJlc`G<+bQQ>MRTJ5agG%VU4EeIWsepEw*e6Q=Oa@;HYxX;&ju|ye=B|gRf;2N%uZfAl-^Qf2Kjs z4bxzM1;t3T0^ftbs1u0C+)rE)7VlUf)aiHRRpJ<11VEJrNAKq zkS+Iwr~##1SG^-)T!Q$1*DeBjPjV{sFiDM!GQ5v^Ol%dVXGS0f?noTKpg?$3 zV5s0mSS=z5>;)cEz@rn&D{$7+?H9zb@S5^{6??l=NKknc83nDK3EWSujqrijE`mz?Ls26^SUUs1?OD1 z%7R5!84X&bM&S^~aW!}*J?x^iM((D#%U*XkVe2q}Tr`pwUI;_)&*zoE#JqeNny?lB zgr6_h#Zve;V`%)wQcw;sSnaN5$D>oEx^#AQbI(cyIl_(Bo*E^l6xeY`uX72?s=aZ& zk91Iq(Ns?AN@@8ahJ86+K?ZQt!u07`E4*tcMAR@3FOJNFJ%^%0o_==zSs|KmwMqI3 zxI%C`JX>~~I{iRB!kPOKfzT*+%G@TQUcec`MP@Bs)gw$z^U+wEotlsInV5itQK&(r za-bmb`HYtdVi;@0N%RayL?N?Hvl5wM)pS2hL7h1ssg&xnNa9X2I<2sL{W9;r- zTGHJ~7cUH@_kZBo^w86N>C8hYAWd_O`Cj32#z9sQicWKM`5^ZnMa(vHdV3Es%4~O~q_y4gba(DXS|Q3-T}m2MIbn6YhI-)Oa*?Vb?piP!vVe2s1Y)0h=ln&S5O3vk z8E$(%zOPBBB*_G)zi0H}JJ}hEcb=gy@j+O{P5E0o72g%08V2H!Mu*p_e3}Xo-teG{ zj1K&+&rwi^K;=2v@7$mFG(_T_9=>?w8d-ey9_xa(T!rg9K_18>eM$eZJTM?qdAu+G z`HDUAJq1D*vB~a}$w=?U%k_F^_LBXYtt)WJXR;P~rO`O=s&e4(@{;iSvknc3NX_9n zuG7zax@&ch*_xGK^1e*@=Y!12Mek5{|Hpv&4y2gZ#XC>vmc;XWujgWUR+%$2=P{ zl))qHq~}^ZJI~OwF?y@hWtH}c!*_<$J6EqUX^D4D$O7{r8?x6thV4!Re;orjJ zzd@&#_OAAH;qqBh14;1#46_)~D@6K)Lj}=_=2pC)^^M@APA+%{_#=E&dIP`!Igj!; za_x5Nf?qY`BpkUt9EzG}W%Nc~X++g?RHm6Ca`Ux2G>P9!jVSO9r|fY~69M>}5fSwx zjph1w3~ro^5FAdc%=24ETi}u-d5#fqXp!@*YapGdw+?3liaAD-4G3}!z?rx`o%$ba zgAZdcU>vmcHp1UYbtKZ?iAPt%O`~BSf!)A_xFQ)1Q+MewH%~(qV6;ij`1;yrL~)#c zx(m&pRMOdu!sqUxb|zjCDWs`@0}X!eAOBbyINOq5zxqnLeRGQTtc&o_>h#LDzXzPq zQ&8Fu@e~DZa^_L*h;rcQT=VE#IvR*P%%`@t7WgY&({MbR8vNl8emI@!9!mfD_y3So zAksh$BycvSW?BsF`PRbYwsBs?&>VP7o;;p1e+igcW)qp6225snpz=78h-9k5T-KH8I z1E(k6-xvJD8ow=)DpsbU6<{Hc3*8T#ti?#OMkK41l7ETYE+*q!+nl?!ifD@{hI*5R zxCVmzCQ?YT64 zeJS1k?pT_5YbKRl`Ded&C73FUk%jn@h!p%=o$)%;lh#*&2}Mq2wKpFB=P6~m0I@!WB6s3!Sv_Y-u`h^i?y&8_ z%ys7}d>$f7P_q{lMqpp4CVB%LB`niR%%KK~z#rp7I0{pYG&?P@i&Ra|=95wsE#BY6 zD@3E!^r82?7t5X^bcopVF%6}Zkaq}7g^@_e|Eh&Sh$4dcfZ{8hW!~%d@`aIGdmtr< z-!iLZ!q$fbRK+b2tW2Lp_=JlwMwj#j9%$7yD4i}A~}SljI)ce*0I zr~k(oKU$N%d-qFeo@q5zrL>vMwK2hw0&oR0YtsO#rI&g+II}Rn0GeiWV0{4x&K~7M z@UIAq8zJCx&RQk$zNgIZNDIRF{=aZN0Tl0vU*dH3%FaTMyU|Jcpt9(`!Qj)4$oZXs zKZ=;N?6bmAW$`Fy<#VKULGN0mCDSQWT%{|<^4w~-lhG#XI; z9YAjmq&b3q>{e4|+S{o{`m^U#Z5NZrI#FCZN!h^At-<)Mp{rT}W!4<#WqSD6Hq^jp zNbjMDKabKWAFQQNHAh>|O6r~R>yJO?nVMNH$rIfp$``rH)FniiKTuZen|829=}zfXJ`z&z@SC&TW7RU=RNkg z3yz$iGgUO#rgP6cnfhLMI=%lVeJ{M^eJSB}TkEXe0tQzh;Lyzq{qaz?E$|6@ss!gPJ+)G0tw@oigj@kfQ zEbhTvots^5`%2cM!msk=9?eC>j=)MpaHue${3!Uc4lpV}dI@Ck?#2B={xCGx6w15| z4dtf2s};oLTAp1;rxTf70R|Y}aS+#bk~K{rC5U-)Sgjm@c(D+-6HgK^xR-vqSnuTH< zp#V`}bkMq)dQU@8Dn~s31J8aGre2q>55FDfCIdE_Y|(1YIIUdq_*%RU>j*)7!rDv|5sy45`y%ICs7R=sORpARJ3J-pvLj=`0HZUH_~-e!mQ6-so2mi7MaU9CBV@QZ zrokc|8kJF*RRICFq1Sj{y5;0gfR#Op^~P{(m?y<$2uz1<=?UfCU?ftq=_xHJ0}5*m zklDGp@SH^*4g#dA#4(fL4X_F2jPaZ*TN|r-FEFl%Vzl)hAq4R})?*0d0X3h8g2x&) zxNVPFPjN3CGi?#(T%5~B8c$A72X05fH4D&-C=BFHo!uSbacXH{N*)7Wz~(4mG#x6! z{Or>dxl;Ip@RScYJHVV(`Bp34KgLOMkhgXB^+dmKpR>$73VS%S#PIYj-bksndzVxs zqA*A$yFkdV%Atdjy?4MNipflO;vl~I##hoC*IrG9uG-WCjgF4Yrlz*KRDj9fxPB)c zFfMaudkS~}OK4{oZ&ZB?%7+q@--UyQkCXO3{;?lTZ{PhoQH9a;)~%~)rMQ?zmvDqE z%%Q>HX{2y6N5pXIHlcQUch`|)JT&llI-G08C>>9awNIy4-}zb^9>;ssb|w|@8Xhu= zI;STX8Y*I3SD`R+Xz=8544bVmtehyQ#}iQDQ8vW`PaT9MPIJl#6#kVAfGLO)2AgXR z{NfZ;k`&lkst(8n-eiVYCOpHC=PbRy-Z{{dbLV^Dka%Gpudq8!yf&yFq~L>pa-^b^K{gw2|zIdR*1lp5;+XMrELWY5aid-TJ-GeZSfc^xw3{=5O{(Qa9lEKXXKm&97R zr?o;Bkt-NnBVY+x>(8!3149KwoORO?nFg0*;1?G~B#e(%R**sPmU-wU+F)|MErm~L z5`h5ZsVKYhI(agCLeB{Pe{#g>@=SBhg|9FLgaO}5@VSHb-A0_`M8(BN7Gg89I-Du!6;vBSfYV4cJn4cp}VDPDw|qYez}HGY<;`_H=DB6?vCv^ThQT28NN&^JohL4`~lf)S*y!Hr1o!Ofzt; zmV#&tOf^Kys*jj;FdwpAr&SR?bAX)bInzO85KlH|ZiJz^ttmRG33Jn+*HGxEzC9C; zo?TL^Jy5j1G)F2_B?Y5 zs*baSpMxj1p*Qh8Z%T$?`sAO0MttOd|Q6S}|vk5R-Qf*gry8PgIQa733fdcNl98fZhNQXb} z&P>sL4!mPeWh}Ik^fHbn_Gz8!39u+bo>cSPHqz3rW7N!y%wbT_EaGy3=nU!1$k5{6 zRC?qi52VTbRGNHi89ad};)HG@FLv@pWA0YD%xygIXiZ&7wZ58Dad|l7B9xIVU#opFT9##=c!ZwczZd+JxF^^hrv0iyG zgeAfNB|t1*gl_?5$LAfAN4>{LW5by$B>)gY$up>e!bFhFcBJN7A91)2c$yjBNG(0p zskK~-HnSGZTaz&C#*x)u=%bJ&V%YF^8!a)Mq;OzyhQWxUZ69Kp`_cjv9l1^m zSh0@I*KS`0U+8?o zeZo5t0-0+(bI*fwGdI-ps3H$srGPg>nA!!&v>)-^^tNrDS4$)ck_((v4^T-Irkn!1nt);SFz%Eqv^f7K^(xpXldno>u zq(3Q;$k;(agOLCy*L>mP#mIv_L6MHaEfpL(Bj^>@%WRZPFO=zQ#|S4=r1Y>!Betwm z33afYbuRPM%h98{mVDIacGk)8s~tLLSqtOL_&7sw!nEtTnv!5ChLI zGMJ{ac0asLdC~#^61n~w#wsa1n1?GIOStC>UMK%}Z&P#pR~na5JC~yzW^dFYTQKXz zgQthu6s7$>Ivg||plsJ*U~8ohC+I^v4th-wImU#c{B@RhOow=m&dpDxJD;m>zwk6~$&ZO`@PQlZs$k z+Q{{_R9AJ9I{FzvcI!2ofXzqUL~)V&C_Rx6n=(h*7R7M#R*!a9xNE4Q^s8{!!_(w% z>%c)}TqV%7MK%UZI5X%^?`EmI3jE+N60@@Rq6HuOla9oRP!#!B0jL8-8c+^}j?P!T zHzdoqGdZIn5%pT!TiKyw;{>|)SsAZqE$|mRoyhCq1mO1ncpksT+9Bn*w>;QrQ#^)s ztN4bziqA7%qTFx=8YYSejYS^ETYir>P*l|sJ(n$Q9}sfY?oTRL(xQ<7Wv4X(Ix5_o%59`bxp-f~kv(MTP3((B31k2G z(ECP=pYIn@?s;1KNgruGm1$;gGHp<7O9%+QZ&#MRm@KPqaOzYW(W!+rJvGVurlD{h zdk*+>59QK*r5fqUHLNiM(7k78u=dQY2NM|OM5bMr4mXWI^T>_($RqOdPChe=(MBsw z;imy)yKbaiYV4(ZV0&%q8px#%()TKJI_$<$$K_M$#iOpYv00M7_7^wP#K>V9JZC)s zG`qgc9&(xfpmXpbmljsW;9(de3>4iaIw~JOKsi1_i4D9&4UBatqg^{j*%~~z9);dB zwAlm~L}clEfB8nbb?rua?nmBDstvUiWF^?yMp|%^vX0VnoG49w4e9Uv#D{5Zd6@po zAN{9Pe^3i1kmA$dmL~AH_nm2B4#El(&B?HCoPypA{ug8{ygUf%}SS`B==bpE z)!rO5u}asTT?HvL3}u9KsE_E?6E8lE)0cI_Lu@GFcUOt}Y?GgFO@u~%6dmILsv)wb z9tme?JvKU?!0jWS+*RRDLgQ3j#s8L@B3XupKs>nb^7z>gSJYF&H}}hkgQfo zBC~Oy6{><3mgl0%)kxeC@$_mi-4cD{e}}}-OOa}tSi0xcc)h<`w-PGTOmG1sNp~>s zjF+h7$VpqNim5|0)BWJ|1*b9XGX<)K7>hQjQ^zc4uR#NtoTQBinv+_NW#uPmf3z9~ z({Y>8x@Qwtbtx{5B8is9TWcwY4>yAFfRc4nZ!$(sE2FfUCctQO_I@QK$Z{(HDvhK530k~Z?bsPGj&p0ko|exBSd)~bXxo^03EE3fyz9aAQ$P7*>GtG1>6JIp{HSip;f}V+a&J$2di|YOajjmB&`>onJpi}1 zu_!@`I3Jn#o)VMYH4(#>%gE)dFhd~?4N@16tz3fZ^L_I@oYLhh3PF%;(a#6?1>UlL zD~g0!C7j6~cQt5Pa;Y6PeAaWt4vtZvwSrudFTuU?GzKO& z0-Qf2B(zFij(B5R3AqCY0hstc79}JEZmCU)bGnH#3&u6A(ryWsgNj5TB7k$1O_aQuI4;Pd4xkwdQz8(t@?J!pdj0IBC&b6_!jO;rQkO3S$yXxo$4butiOV!<<4{yOZE0^D{1Q5Cipi? zl~ya&TKDJ;ZK*(8k0Z1JtG4PI4+%4DV;K-GEHkFA#D+FXml#e5PJ!0DOb1F0lR%7$ zFtqMlx9`AXSWY>W*vz~?hVH6JY`u8lGCXB9)wEqt-R*U0lt$Y}6f!I>V0GZ0szS6? zlWXwSE7#KiEPUuOie36>q+V8_s%{|Ya&wE}7yfroL)f9XU}kfiX=wS>-b6TIX+CJ( z!|+;iM=iN|)mR6SGYBthZ5d=Qc*kn7 zbdeB6T>BnS(7NrxI1?=a3$8pX!;H~UBQ#KG5x6P?TnnemHC)eix_UiR&oE%)Il5nX zN?3^eEHn;SxahHs1-*c*)m6*Z+PNLnLHt??JJJK^R4#L&6Zn60H zv39x49MW7l<9b())8rV%GZev@eD8TV2Ho-E8ynCesaOZ_l|3SWixXQ^Dq{&@+q&m! zc*u#sgr}IN+t^3~Kbqf5U-%joy`Aalk3ODC@^mSIZf%33E}#zH+|r7?I8N7Yy@5)GqI4zczLngqP;4gT_&_BNQM>ob z)&>WgD3<6OY)XerRrO4j&v5OVwU-<; z_PRz<#16$3>TbI9B}iqmxzG;6&>C=EdS5Hi*4?Ojk~pTmsF%&+L@g^85T3DZa;h4T z1LCY={g5_y_92bloqS(h4Xd2*mm^Ep=2y~>|M(9Pu9-<+`^L9fmnk--sqg$yI{WOq zA{7HYo)8FyFFYsm&A4|td6@gkAoPzMI)x`K@2S!j6K82yBryGQiQhH*vSk#zYF;yH1KwC;>YVoJkFoNpbBJUcjVCog*qvT9v z#O19!ZX1HoI9Xg;Oyii2@l8O_OgjxJhb zeY%8XvLgQhc?da-8z=^Ilbjv$X;iFDq^_h&)7Z7lb%zQ{OX>9{?7^6}=@qCVsNy8! ziC88uRVeIkft0unR8n)~+OWzX5PNE_k{oF@tBSCVy-oI%Ff>aQ#==^JHyM9}D#hBW z+SJ{Q{a1e(IF+TVw_YbNg352) zv=zidt#Y23-b#1J$8lLvg)Cf3h~JUCuTrhhR1j&@s>@fCv*a|BT%|@I@Ih!qs}r7S zg_X0!k6Di{1oHyo1-w}id*eUg9Iib+@LiRFS_sq-nJ(fyW1+p)BteSo6if8gX>5YGNJ)9eQAd*VAXro|! z(ZL25K!8QKqA^}sJJ`szC|~?!HB=c!f)!3haIrvQZZH=Fei4Z#KH7##rl=dvlMMHU zbW1*vG@H>gILrOYF+R+NGA}Pv^kIzPF{l&B`T(_$us8s@u*Q>zlDA8asX`b6fyC8dWHp66_DEO(M{gEPn zeG>hhRzlNbG)m_}m2GTl@RhUWdJRt9X6D98qZRDy`R6W?kI6(so?}e-@POP!jU-OC z3WKJij1iu?-%vmzKTEuZCm(|YTjaS_BCO6|ps9E(u*MxYb`zK|0dN4Tse@^1_D^lC=}8TRc5)SJ;RjS`@MghrZxjT&ohGav+c3SN6(# zUe8|bm#Q%?)_YGZ7ZeL!%fJgdR9*3<y* z=0U=G?$Hkl77hkmk8rZt`uWVFrHn5GB;yYLB|jH#LO5>0ufCEfM9E)x2(2>WD zU!JEQCrWY|9RV+|(vS@IJa}%8 zTwxzQHR)+v++=bf?%G`jip)>bB5bCJ%Xf_yL0f5jxJZr!d=t7V!sDk$F<>Xx`Lo2e za|!t{4^&*LI-` z3oLYk1dhWi8d0(hE11VLc56HhbPSU3*Btm5#-2H9@Qb^13{L?^ccE2VrR|`+Uw`#Z z=nd7#aYJu)jfK=j%ee<1J4dB+OE`dR=3q$7+?S^AnzR)|V8HQG+By-+q4Nt(3aY69 z?n;k8>4ZiLt&p3@3eQR7rWDw!Tj(s!qi|k&b|BiwsUJDX)#*ItoRI}k3-dGJla5UW zO{h$7Z_qlAJrXM4L9s;a1(M10vzBi#HV#P|H6+dBQA)F}A{rvnas_VuB0846N;4-Q zdK!ko+VWaB0#&LZoZ!iU&ESKExp~9oHH3>%YHc;Oijz~`>r~l73msr^C}{wzA85qH zID7Q~a9mssI#q9iCZd{~^Rtl5{5~iazK1b_t|Z;6_jC+4rq01SoRy4fJ}MCMrA5^; z4n)qzk?!igmEd)eww3NfXHWxS*MIbLKb>Cw`ZowY9i=Bf@SfC0g|7}Ihx`_M zZ_@K0d^!z}-bjnQXYfRahmMRQvh3FPrc>9sW(*+=UgRwep~*k0qVN7xni`#Dn~VVO z#7JP$TG^E=|LlrLt#u`HQRy{C=RvBZhi3#-kijX`{a0pe+&A;)WQ;Pv$M=K53v4)$ zOj#kCS&?o&V^A_StuGZn^CC>VUn7@C@1WB%jAz*x-qQB9&6SumqA-iTpIYgBuZ?kd zGt<-Q?%feQT;%ouxGgpESUcWa1d#iQRx}`^UdRn%^BcsDJanrOYwPg-lYdi+Fh-7Y z5O`woTrbWqlLT9gya*YTadB=%m9NZ}`-WlzT#4-&*|7hX@LE|}$&_0>PLW6!j2{F> zF;t1st0nTsWG5t!iX2~Lt>zjWFtx;rZ2T)ICx>_ej3tjqYHo)X}DTuhx^ox!N|wp($kAX4~*(9$KsInJ?e4FY}Ulk1CV6*y(yS=Nk| zZ}i0|h{~UWSq1R`WsWbiE`C=qgn%MqVNnGzt3vT_?Z!inN8zN31WnHcPmGy4)gKqNfd?M$GZ24Bg#0T#(;q|={?v2 z_<>=@a~vI7-^9M*Kqf&dNY*eCHHr>^4-1Mj&^sZs%-V~%2G3p;(Q}K^g;KJk;6gAs z!qLjEhDX)=HX1fNx-&)>K&;WsyQf6wcT;)C5w-)`X|Oj;i4rDu^}0=kbPZEKm?Y z%M9B@*oLgWk{luUO;u?_>Z^S;z4xgX$qiUWL8lD^YjZyV3+|~P(Mx9=6}@O0X)6Bi z(FA4NKFMFCPnF#~-u1rzZf0`QuAx?Zw%r9WWpc2TtvUc6ICqx)aX<2wPH4Gxh_PLQ z(NR@(oSMnW`NYS6B0Y8SA%+;(?{Jyu8FfL%swtc5Uct6Ayawhj(mk7BC*KHp!?hW$ za+2V2e205#%zB;~%;bF>h|lCRJ)z>YXRNW3Ic&1C^Eh!YWvEk)8UnZZH$E9*ePjdncT+hRBM7WIpX zTeRcpZ6ON+MV@?958w85b2HPyORH&&?>OD<^=&4#b)*L$IGyI!=P@d1z>kburFEiX zW7QwbP%z`+mw)YNS^E*ag2@dDg&F)hB8RuWm0=8XX=a?ZL3l25;D~3qOKTwevD-Rq ziTuTosjOcE&VJs~mNMOb+wRreJ)I|^2XdV`p_w5JZ)3L-hj?Md@gZxM2gFMn5V!gL zejj2pIx5HjsnOAbL9s{DfLS5tQkt8aqe@ztxJTP0izlT+9YD%5jSv>b_b@JD$~vHY zgwN-H$p%LX%02SM+-&7wO(#=WPvdlfH#n_zW0E{+rdCF084Vria*OfXu$Z`~&*<#y zOqU)VO7FaN9mf>|uAYA;DxXp9LKZprq6B`~($av;(~#K$rr_A^ze&rZ!)b$JvA19P zW;&Q!PNy)ezwyPtOfz?;pgYzIjLhZIxL+ZE?84JM^oL#`P|TE5`eSdSC(GY7QpC$6 zB0M*~H%qQK9WE>Y@$8ZNO$I`f9rV_M24IjHOz_UsH=SC^>i;En>iJbwJgo ziVKv>9$c%!#Nq~&7d~&I$*u~BjN3h{P+Li9!nx@RRz;~uTFDYsLi>96X$yj9d~XXS z%a9mrHLXSk-^6UalN&TLmJzdM@QxmYC^)7h$gK5bB~moVj)T!sMJV$x-3e48yD9OkghMCr&owzU)K76Ms|<98swbsiM`|+L~^S-%Ya{v?NhsW^GYH zctpsHv8WbwtkV)<2d16j6_4Vl7+?Sqizniv0|GQKHfavOMul^D(s-H^SL|tOGQ0`w zyTBb^Lw07a2IlPQxdktOvaehLg}#I88mhfvBxMN9W`1sNX~J7lLj-FQIITuf;xSbL zHTl+v7_6_7iu3>VbmlR7-giR(`JMBf8P9zm9^2z%?AUQ)r*@7uZL`^g^ayO#g9x@1 z5dUOXT#Cd}mJ(H{@COpR{~!TNMYSLSRaYe}MJh{oLvuKZk2pROd&ZvozVDfL4)ghZ z9}llHnR(y$d4A9D_}(YN!8%@tS=_ZzP@A>;ilz~qgK+7diC0k`iuWN9ArMHwmMOop zWX&-4!87nx&3>zpZh*kH?A!!?%+$7~nwJzK#O?y)T1ASTBPVCVc5}(5oVR_$+uJaOB*W9q%rjfpdGzZ~Y zTtM;Jsz6DgEC!iT2H_tk%dogup^=O@ve;%|{_7}>;>E%OM75za9X@m}m4V;KYc8Z` zF1(UHxb{)HdhdQHE$9}u-dz)DJC{uE(w+E{d_3y&Mo!MaqP>S=?VC-}FVpw5RVJSA9YD@tjdEG-5J-EZlx-OMuKl12iLA4%_g zcsbo6glYJ}F6~6y((B*-ZhGyT-$`HjORvysbtpak%%#*fa|>lzZxxAOQ<+P@qopf# zw726%UrTpBy@Ro3oNVH0y0=k&0^zL|W^#XU0As--OpIh7nCu#sL6Z+e7cF!mvdXM4 z=}i2KGe=Noe?!3xXkq?dBp7iO<6xIdo7 z8HMGK=Lv^g7W75$()m1?Q?o&*}h<=lLq6YeAI%c!T(mS?-x6aIS)v(OGZ? zVZs1+7Z5yz_j{q3h~M%%Uyi}~@(h3W1)$)YFyyFR>V1!{Q*aUfxG?rZfm20+?T9&= zT*FFYj~(4-I3MXafB2CL=6;16KYygCyVHChncQBBQ7A4&SO~nt^P@1%j>g$ZYxGju zSx{xY0(iHEtb2lLLM>keX#$yN5Jtv_2S{k4PAJAR6@fL{eb1sm&Jubu+mH9@!CacY zJCkO~j-B|7BJ>o#pSjO>gNtcqkh->b)fc#DU15Z|MaNHCW-rs~+eRU)g#TzuzzoKm zP%;tHHB9KuH6WF#wAQE zc-5s%l^73p(#?;i3A3TiD(|!XKw;va@RfPA*$lRW3r|NEv_1H~p>m$7=VRx=gM?{L%}=EMn?qp$Rjybii`6=z>(;V{9~by+ z^8Rva>LC;n7&Gd?HC9FP10hF@+uGigE`9l_bnfh9gv@rKyiqR`udtydMF{Z^xxSKf zBz-r1>szm&92BMZu73#aFgk)4z_drJV~q`q@U59agnVFL1DWd-HswFUnwD!zZf)UF z4zCDfi*FhO&OUjDvVvHx3AMFLRuwc}guok7K+32M7#&UL5$1gR;n?$ zU;5&DRN(ZiTAIrmtXmit^cJ-CP-FBkiUvH<(Can2R!$ACVN?T0pl^D$rFGH+>uU?Q z&E3s-q{~uY-!Ns7M$(o4`vJ`cM$*LHfpqWcC#mo9we$js0jGPZrcwSW#>9EDdFkqS zzKMBnrMLg|&(r1@$)kisuYymr<5MU!gl{9m+SSaK+8fNxM2aH3R|h-oXh|~zLOnMz zN^8hiv#AzqQGVc)G<1J1B2x}Fi07OyxZBu4LkkiGg@yaK22=pveADw6z!gBWEHQ$g z#6JaB-z#hdaYXBikJvzori3{D`2@${okMu72k1e>0q~u|y+m-E&x~K%F_eTSsl9le zTCakc-hu+mUm@1TL=7>97ua+OPSW9H1r03EwNLkUr*2Z!H?|kj6rnBF`OMsJ2r&?f z6$q2z5pK{ym~|EPI_oIBTZ?wQ1%d&7q4{aaF%u`DKp5Hr6$oGmt9w|;j6p?cW{->k zl=yHlYbI-sLd#xf5Ap&+GfQXv2x{aWw0Aazkg$w^9ZCZ-iW;NJhJv@iWAXF~-j`L{ zNd#nL*j+pLfHVS}N|WVfw4MNSFkXlp_gV@?&CakGnU>R*X($Yctj!HAX^Q%(i{=QR zI2dveLO*+|My|z9qzDGaoVD;{%#M(rZ8D`)0D>vWkeHLtG*in~&%%TZ(i*P~gc4bo zF!CpGIm(na5!AIz&;tVCm7(y*4w<$I{c^>Ozac?5v3X9yxGgLgtF98P39Mhd! zW;hnGn8BFBOD4=ZhEQKcOt_!5w{`M7J!M#J8X7_)SL0nKhhlDaNS@1nfP8#bAQWY4 zVmz3Y!rdmyZWPRBoeIW=4;iv19Efka1GVNyo`7f1bw0D)gIGRYZKg@x{^)2I6=2cPKX8Uq|OJLrYc(=aUiag434m5T1gG zOLW02V;6@82f*z~JU-Lx=?v#Z>@GO(UX>gEMM3+)Pw|WpDl|2LC2|Iw1vXxI;W^?( zcf#9Kfo0J+?Pc0*+F=lG3?nGz{MzuSOptMgve0@y{kOM%ntt{1jZ}-^zQddFX%0zv=d&3XX zJQ3A}x6G@b0|C5NDcJt(B>H!!T| zktKFoE1m+t?81C>ahhME<}sf&5!<|gCqTtpYpwJ%qd_4i1kR}N86GFoXG=p^a^hP4 z+~e%!cNw!6PsM3zf$$MReO%}FKr=LkQ{g}E=Ul|gfIB|5fq*QIzxYhFQJBUn%el%A zG-&BPP$Ac7;dqYaZ4pYydtp|=DjpHvXX9{TMkg8A`(*y1XmEXm_^|d+ih+}`?#F7~ z2R#o8tX$7;=M>L`*XbQ~-M;tv;$Q3&bC0?Cm1K+G;=9k2#zo#k%tN~I2=BbqzcniH zoVd;V_dj<-ppouPcwxNX=lBVKNVELFv(ySIE-36UXpHN*sPxv+eXF6q8NnJa5)}|u z;Hw%QqU9};0E>sWV;Q>^f*hSd4HTDjc(Oq)Y3Uh?c;dx&e*r5TyWV-v|bb4@OHGOtvmh54| zde>DDFruv@1K^ooUx2sajb6uk9^E`Ke(XZCHi=%yV*s*2Nhz|$F7(Y(H}*co z`McjuU;FKE({_A}cEB^Kv8I{mRTdAtWFgDI20X(W$SnxBm5nG87>m~Ff@ZvYTW1Sl z&s3uTK2%o7{G;O^@a)0zKMg3#P{Nnd4I}p^2%SYyl7@4RATCA=*vEb%wRddo&X0O*Ugb6n-A3 zV672w1=w(Y8g9*3E6u}~3A-hE0eDhb5Wfu();k1!1$1^T!6cvIM}^q?!w|-}_2%7C z{UT4ur~Go{F5y`ycEZ1gR6s0nvWjwjB^XS$*h6!=1bZ+q@JbIV4-FYdy5O9HwgazR zMi`i01Nl4nO!pbisjKQ)O`%NWj0 zn2)>xEbz*5kDY2s=bi<23%8&r>^!0FJ9{gsqqj6&eEE1PC+A^eh&;0XNoIs>V#qBx zDSX+^UjAtq=qw(c<4?9n#foV>+0!W14b&t58{hk0814@D97(_P_kTD2z1M#Sn4o6r zCqvw4**Ih!=uS1yNhnl}7~W6-e()yX0VQ~3ontp-(h?(g&O|nLeUl3A1&xoEE2tXcI{N@@pDPS`gf`!pb@HARbTP+L}3y+`3;xiOL ztYKwASuOMjxaRB6olTd{9zoEpO@s6I(+HNT0?1ibMyVc>nk+=Vinm~sG9q;pV{LEm z-~rAK!az*Au9L9vlo%fd0`nYeehLVCSo*eU9pvYhiNUZyY#o9E$-r(+8G@EI+ZtMF zaDYI*f~B?&BG~}I8^5TxsMAG?TyDtzZtShYex&PC@0Rsvn z&es<^%szrwSk*u+6dgWc|4Ue}JbrZ@%k#r3=4FBYgnN+0WduW6NQ=G9`}gv%p1`qD`AB41w9b|CH7vwfxYc@&W?#I_-m(iS6IP6JpbeD(dy* zsl%Jwt4;5`cNbv-JmKJ-M=^=OYdbR}2GXh$nEd$u^)xnTadGl0d_fym35Pp?m*^o1 zgzJ`G)zD&EoGFl`_tX-5x<;X`n+IIU+D<^H{X%YyB)Mp-43N zYZKY=q z4!90)1J|kWgy)a72JOWf=+*u(yGJ<0TC(ATE=M>Mb21j)V&tw@*!Dr$ekthtVfJ|z z8YGW*M4##ES!wJWcQ{W532!UwZ>!I&-Wy_{_P_f1cLuSWxlEmSbpGB?M*_VQUS`={B@sgOHF76o4f{ACa?hykiNs zZZ_e52T%x#Fo4XAET(0Yfdb8OEO1do{5PFdp&djqkSM&%XFJf-T?|@!OywztLzwGj zxJ%@0KpL5TgqIXS`z<3QOE9iDgE#de+2K#r9XY^t#s$|?bP?dy>#Q9k5Pz= zQsH21;I`b)pUPk)~H-aE!wA zoYAqi-qcuKmzMBazJ28yCt$cG1?35`H8&tnIFE_UXL?WKb-WQF*y4kVf#=I#+*6Ff zWgHraAqk)mPpT=Cmd99W-BL?Qd1V1hxwVmvl!!86TpRceFvznZ=@ELQ^}V)5!y7pj zl!OBbGT2~@kVn7|y}w?!108n{BcBC+FCLZ={_~@M{!h|#7f;X<_%5Da3~MNJ$~Z=A z)Z}Z@rAy~2^n#)2!5~T&as%BbBRqz&m0Oru$Q9y%-t+x+3GboQvvs%gAedYa^S!mS~uhGySs}k>9(JLh zr10yX;fPwJ@I@U8n=+TiaTO>$7R3E@ozAD{UVfYsNC#8jXZHzO&>LrXF-jUgS*KZy z>UhVRuynHolxjheo4?_xFGB`(d2<@xS|{^gn*~KhyPhZ_{QQFZL|(g}0)j z8QOt-RYD2H9I_e8fZl*05&YseaWKK#nR2{>$VePO778R0z07aBl7?$*-VRN?;tE}89&Cs zS3+oB{X#D_k4_WoO5M8$eTlli5L*BZMie1QJWoWaBMIWv>Bj)b)l3+P&?1C22#?lN zmmL-x=IC6)lEGYZB(raRL7CNniZCiRUWLlS$97VYX=a{_hlG`s(*387?S~m4yjjiWKq+lLdH5?Y zeT{IGR`!Fn5$3c`9)yg@{az=w*Gf0xmIT0eAymc{O1LEOGM~`q5$Z&S{RFij@KSS@ zh>p%a>*^9e{f1NF+KA;82)IUI*P0%LA^^lS=F+*x&c+!p;sP|p!)hQAU(7vPd8>#& zD=wlpJc*6$EZ0J)5!`j_hFgy7WJ+P$WIk%U3IqOr$X|2_xI-`&waguqP z1Z5ImjL&!q;Lg32cyK?HsOb5S5GX|IAe*63_vl5w86ZY^;$T#VpfIqNM&&aYpd0M zSZ!Dqgb%H8T2vK|4<9)SDYo(h`vo--k6a7Oi&x+pZ`_j*v=HEnAL3;|0Vo1)gXIGM zQ5lN$xEJo0o}7AG^MXoPoM2{HJFFsP;*%>72BN??fA}n)qfqG^zcbF?PHTp&S%HBs zxt|1J&Li#!oXrF|abAfb!zq|Xt=9|*3yLYkTQ6b6P9td?kwMXl_3hC72#9=l- zADHVlmdLPD(+LOq4dca~Ox?K>i~@$YY_GC^?33N%WUdFW&g*WsQN+Lf$seUZ{*#}j zxBk~J(uY^C(Z+WuUB2=`dguLjQ-A;cG~7RmVuF{^us?vOv#m3AcC_OmXuzUAlluDm z!C_+fdrl#wAaxLm@Q|cti?Wqd&C4?~@#SzZoI3fU@8=!1%I;3{Vu z^b>g9z_lYo5pFlB`MZRheTix*fGCEEJf6vBEdBRr>Hr79m2m7_Tb$)*Hzy(-aTV+5 zENzX&6+M#|FI}Xf#c2BV)yoI~B$6R`Sbb!P?s$fZ=ovWNbvXW3`L{Kua4u6c(UBtz z*w(rn69=9G7KCHrDc-Zsz)mn?-UpDeb@CgIAYKX_p1m-;&wqr)fO7tD90hlA1WFb{ zs+>h0lUWOq)Od;enG)hVh5L+#itkP+aFqKMo)u!k_`p~4Gx0zkkv`S`i84|9ixDKKVF=T7f$h7JkQ$xUbM?AMHB-KOJGL;PK4ask=YY5O?{}2MjIz z@oW?j!vaf*Tdr>)p}4Lsgr2N00zy9f5?)2dQuuVvOopL2L-xz>{TKf+uWWr3!ylMp zj}dt3NY>ibPRBC5zJzkF(SG(LaP91qkHZg((yuPRhqoE!lTeqIo+AjDgaX0?(|^!z$Yv(o28!)pUlMkng_xPWr{q-bxd9hSM^_<0=Zt0V;K^ zV9A{soF-q8YHf5@)7a^GA7F1hCyNr~n@E7hsI;|&T%uqcyiEgZaT-N9uWll@3gyC@ z+;NWZQ|UtA^?UG*W9j_G=gCoAA#4_J4a!NG$`W(7Bbgp#t;vQhYY@=b5zP=3fHAg~ z!lNdK$j zsItanSmXkO+X>Oq%j$$(xA1CQKTr<8Csz!5FRTdLhVY&`c|5)H;&WL4cL?3Q293k` zGtcP}p6yK*f4P0@PI}{we?W7l+H~#Xn;7FVUaHr57jL}q1}O3@_&Vb&%SdmelPchm ztH9pNtG%q8B=L?@)!es@7nNk@?TR!<$Hj*fr{OM!$%RMMs005#6$Wev& zTSy}4HRG$yh%*|oH0a#BF&45-Q%4QT8$1tBv3!b6;2zdRIO(A?Rq5OpPo>duyy9ca zfh)zpa>&T+wem$J6$PuX>UVW@Lzf9zB5%by-Fa?pksg z_wwXhF4OTZ%h||Rg3E8d`On^D&Aix9QH9NCx^WV}mbpN3Ec?fLjyo+jbTnKl8)%W6 zxXHA@N)SXO391Ay+-A(O03j$M17{DRz#{1t2#Z!KF&v`=*A%!C1==41BnOylO>HIa z8A2BT2tx|=1ekxIz^&#P8T=rHuZrq&tSBS|aqxE8(9_XQA9Rs(iNgHOLtxB-vpL7TMj1T?hpc%50w`ws`MDXc z1J0R&LW|#*K}fCSq}S%L(+#9+BQv)O zgA7uOo8eb`gpH}#+GVc@eB0k+6;{toxRO5T6*4jS+~bd9Sux}X&j4#@_;?vlYKU6} zZda>L8HXH-7=%MYX40<~zddk9DwV~TyE#~=u;|aDnAFm}Op9$EqI~>#PpWOEQp2;| z2>4jP8|gCC0O!W~66J+;%U81Z)r77QV_8vaH$L)+P+W5e@X=U7D-CcmG(43)ymBK= z5UaUP%&DD?Y=klk(H$NgP4|b!(#_j_>Elm6qmcjoG&+2b(54A!1bI(*9<`R=!isM7 zotIyFIX!mfTxuuB#88?Nh{?_lN)?Hb`C5|dsDx4Ad^t8Oc$FxP2r4ARSzKCLr2-J4 zi!428R^W^@46hB$!G!Pib-EL=OLnAje^e~zDZIT1^R!&7$=$^?=+NV^ht)Gz4r7C< zZu9pJ#I^!uWNmvb4J{0zywF}2MytYVN1YmA(*!|17CqfPaYibth7BpJYALm}u*a`w zNID}Og1Tg|%r(*qEyN1L-Xx3%pW(MOieK4h{Kcw6fOEC3FTM~i!A)ta)G3sq7*|m8 zy^LQt5yzmjd?(HDUY{*&N_!kr*S)_LupBQ+_=(3V6wE*KY>R&!hy;cozjKb6)!e_> zRy7QTjcmqTpuHf7_)??*#BrEKrquS}RK zC)vNch6*-Sgojc2tgyz9M254?6fCd@Mhj|eVV08PZzxR72Tt1Qh>;L}m(B9P$JiB{(Xm#I;XLd0x+b!K4k(l}9 z=gx zKsE%pO09!KF#MwqD{(2bQeJOJunJ@Yxl~U)@mv_6`tIE$vY;iS)p%E0+FCKZP#$uU z@Q??Cc&}+5LK}9tLKvu|5mvr2fHX)5l_;(32CGnjIC@#jgp-bOGNZ-h;0(3$fm(>G|$@Zq)h zNd%upc|mp~bjtnG7$@$_FAYsrmIyimJ~@UcEV7El#Tvy(nm&BS&S)s|%!yR4G(+q4 zT|$06E8!=+-{79}+$Jo@G3@LnA5tMQv{*dyx#DH;N0n(T8Uvrq5|=;0`ip7q(}EW{ zQ$iUkuCpk`0m74xeXc(BUN{z&7m1QZ8OXx3a=^+q0)OkF>PP@@XsN?6GfQdqTEgRy zHKym-L(jt~p6zu0iO18IzxIW6^f+S;Z={djzC-zHEMtVl7pV-m?-EV_@5_!X#+>@O zT9cZzT6z#RuE{JVAUgo4AnM84HaIjN7VE;jRw66W3WDR%5HyLc!hu^!>oN!%!WTla zhpz=~tB9;?xu8lR*A-)xkLQrRAH;>P6apIzMFC;f3S_tEardM8OwO2use9M?v)P=3 zNVHD)!OYwo2nWNr4lBT>E7y9&)}L3XRkJf3L(OoRkPq9=n*4>VgeEGU%5t>+XY;qz zlNLD-B`k{q5bNE*QXdRRRg4=;vAmY^Q^qsR#x~?7kH%k#C05Ib$$4cE3foSupgF6V zhn5Tgxj!X@7EL37PEO9G5rhlpZ3vcoZLGW&gl$}xT1-Rv;5);k9%AK<)8s4!+5+yg za+IWKf64xMZgF<3DPp@lH?DIHVLd{b5V(a2&rI(Fe{lu>XG~wZN$1#C_cltB0{k*y z0WScaHEdAQEw@tz@qp;obN&@`{Lsw_Ca^{Rg947v@fpTwi_n%ib#Gj!F41bNPIeIq z2FLi3zzn>|+_h4~9HlbMU4Ga|3~OsMHQR6B3*jLknYkGs7@ascXI+U=I#fo*K4;8Y z1k4h}%L(1la$nzo8;CFasNMp_4VcIyw3gwPeHiV1$yKN~L5!z(F3iL5j$M+)5jkN3 z><6&I@<2uuP?XmSH2Fqi#f_tsONtNWcFW={6^6TDYC|IfsefPqV#2&p5STKsmW42| zQ3$nGelV6@<^&8Np!>O@V(53k6J;ZTGYPSs&JJkVwgLtMJfbq8XbK6I0(N&076Y$* z7H1eRCQb{VS~s-rL3|W$_zB|4b{n_odQFt8#MM}X70-lx1tl#m3>0UkG$dRoBz9xH zu(WM||9z}eB*7tEt`RyXu5&y*3*yH0DlCGT>yxNjm0}r7wFzlq1?8Ogt(iiu|5m9Z zZTZ!P4qEa;gbU^g73@I)LnX{Em!r1;iH&+5)`=S~=s9FO8HlyUOYsVnlVecdfwhkw z1Y0}y1F*V+IIXozOHx1+e;dkY=`iOkVTRdZC~*EJfzObSQCdxCUUh$3+8ZJvcorpc zEfw%IG{LkxS~{rY(TYNFi10W!!TR4Z+L+|vefM;Gq-heqc;<_G^8t7svRxlAr z8dZwWvYlI_oaj;JLM4)si8MYq!8qn63F}nQXl^5!xFgO`!-TLQbFZlc*Rgg=cRcfq z=Cd~_E8^$AFsf1L5h>v}&j#)kpaWib&nLY6%y}v}X0$Ilc=2rr7s8GI7}xc?A3j6{ z+qEj}__B9|bSZl=e_4#*dp{47wiI!W4&%Myb#MmofS`Zi+B#j;!mF*a0(^@nnMF~h zM*Nm1DsZZ3fG7O8ms!G|LP7v-eD8C--v|0}NFv#SnZ7jGwJJ2bPQUPPaTXjE=lI_D zo`bmMp84GP;XA&Jarh$kBdqYTS8fnrJBEMkU;D@a=v&WGg~H`Wy?JS;*E63m+M!T{ zwbMBWcg(}GpEb3Ogxn$EQQdEgr1&j_EpzDn?fv>k?T=$}w&8zUgwz>NzpcRsLB(r= zN1^aekXUKFeO-G?I)36*I&r8o{roR}36IAUJ2gom4#R!mKeIEjzdQ0<=L4S=zUQDR z(^Ip2Z>+XO12}Jpu!SZpN3Kkv(a^Onl4sc2)t3IvAO0|1JlB~nPra3zd*M0IfaL`% z1|CRL;|p<~${S*gqJ>37=H-9U={M5IrW41HruVLXjM0=<&g{KCSS;Zgo+;!yb4K!1a;vDpgNxTu*Pc9A(Dp~D zJOEu*M$7PKhmh~O^i@0oOS99#izcY1vx+1XLYRq{nmv+((@s02Q&GhNLd+GY< zZM+4-6rK)voz>oy0X=WSbcAOu_u{*8`ys*yJ(IT)WrC8jMTNz#Gj;4WaLzN9Ch+(W zdb&D8xFgpdL=n;eDL)9=0r|~D;#K5Kalw7_d%aCYL?{DliOkAM{r$t~^Iy7@M#jd{+rRvf=PsaRuA@LSssQ}pO&^)PBnco9 zQm+8Pu4jZ7Bnjb5p^T3sEQCuZzIF<;law-D?qE=i1!v;+v zZwS9En3V>jFecr#xfV_z0g?k^{T{SU&MI+DvRX8M01xe#ixK{)AkRLa@LOH%YO6Znfs$n?mxa@$Yca*2kYXo&~b_{`*GrvuSoo>~)v zS@2yBg-S*wgWE-bP+&8m+Pnh=Rp%Z|$8u2n=gPTzze$&95OJoS2p`5+GE*6bWAe0^ zhs@1HDKI9iEuF^+C#XcZDoEIdX;Qr=otDDq6B7UK8LVXf`Vb(r3yvc z;fD0slRfmSr&Tsp5{xMo1|@b1AeNxZ!5oV1Z^8N{;y^2kqW#}#e+x|6L%{N(?ir+p zANv7lNfrh(D~yD>p7_yv+`s0&h)?eK9*V7@M8<+kXjD#G4mU!uwrFR#0Gcow=OGgd zYoDuiuX$Bg93)14B;sb*;~YJAi7^7AN-Wp$mN{!tM%aXT8V@NPXjQjvHHQ>qW#JAA zOVTCQ%G^vcE`qVuV;s;5X@1P?)E?Hiy)1kVHN5*lTPPRDdyWA&9G2Ed zrlF@xM%PA_7s1+ zqW!Uz%Dl%<atiXN3M5Yt(Pw#DlFz^10{Frr&<;ucSY_^Y7EFiZ*AUzrta3*+3zT8kgghbZq*oN4(^|Ytvo))b3lx@U|F8*&)$!v-$E{iBM9W)iR&j}aD z{>xK>&vX$M1>M-3UZs){%>YpX6lC>6+Y?4XcZDbdfIVF}`&=3x9tyrSjrT$$fmM30 zy?-P1-Rw`duie43y+Y0f9*zS15jo6OwbBHu<>iP=92*>G{7f;{ASI3ZYp;Db^>!a2 zr>TqbmF&afIOz`SsT%${KJ-cY@lSr5`Z;gU4ip84QT9*(gy~}DV19@u^%|l5>~+%5l#(Uir;bjHTPYRb{}yunNEid6v*I6Svu5)BG%qy$Fkg8EqgRfW=iY&OGvaf96SXS(l7ZhV0J9HXe z4}G-i0HB0vpm1FjcyNr`B0$^x*>(SUt7yTX=FEc?HUY(&-3+LKf0M z7c29yl}0fk7hUI>SRhGRk^ zIc=GxGslON|+qZpe*`*+zH}cnwv71O}2uA}j0nCE^-V5V4@#$I<{iQa(^PGG@Y@Ay#}G zf|5d}j4e);i{%$+Kw*uJ3PB)a#L#1<+#~!*%bg866zbgf0`7bGF)xt`1%P=8)S_>-rp&F!KCra?|3~%8P{{OiJZx)TH!@9Ha322mSz}@p*3|> zo>|rsMqH%d+uvNmqYx+26Hc7A;Y0QrD8>@4g>Rkt2M$2cvvbUtlrSx``@##)LU2}? zm*awSoKqR$FEy0mH0e;GMP=8jGc2$EsV!}cI!Al8W@r+tgu~-u%07SvOtY?8yi;8 zAWg+uOVTELra&jn6{hr_D7cAN5U5BnY(3nR2Jg%vM6jm_R?=N9xGjeeyij0wVT@HM zAXY?EATY#Cylp?$m^u!%tsk%cDvI*>D=&Weo7_jA1~ziw=5VU1sN=j*g4m1h&Q6RS zlvHHCdUm#P-*C?;CQrGdrBn`Boono?;SVNc> zK_+9&G=FJ{F@DXNO0Yy!6I1Gl?P)sFhO3VXB3K}{h_}!ZVUr~9abs-BuBB#tYCeq+ zyV*pI-2!TajI*2|>fR?QFW64J1ul$r5|7G>@9n;DIBj#^%GLnYF^ba74@c7SFd9D$ zzymZErJVKFlUU^bbR2I_<#d=jXo(*Pzz&^17w48hP^>gmAeb4XW8&mOYHDjlsx#qD zVT*x4bVC^I9)?6i``3Apa21UnNtA5|Y}j{A&kv&<+BUV!M@Pm2VbqSAsX4q~W>71w zR=Z{Dkew(n6KvY+82Xyru zg%H=Ig)vIKQIc*OxQnDkU}#?$Rz4Q-#bX62VaIx~n`kf2Lq;v}6)D3?!rmyHni;)3 zyo1Rfmp5S&_07b%qCM%#(o!P2n#fUpaEIi9CdIb24IyqN)3~OoKFtozrCNw{V=EdZ zNN%FcBF!eMVCbPe^W3ASyVJ4WW9iy^w}MbfM5Bw}xpyvIO>NJxA?cOheD%dYdEt9M F{C~}jdwu`_ diff --git a/research/compression/image_encoder/msssim.py b/research/compression/image_encoder/msssim.py deleted file mode 100644 index f07a37127..000000000 --- a/research/compression/image_encoder/msssim.py +++ /dev/null @@ -1,217 +0,0 @@ -#!/usr/bin/python -# -# Copyright 2016 The TensorFlow Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -"""Python implementation of MS-SSIM. - -Usage: - -python msssim.py --original_image=original.png --compared_image=distorted.png -""" -import numpy as np -from scipy import signal -from scipy.ndimage.filters import convolve -import tensorflow as tf - - -tf.flags.DEFINE_string('original_image', None, 'Path to PNG image.') -tf.flags.DEFINE_string('compared_image', None, 'Path to PNG image.') -FLAGS = tf.flags.FLAGS - - -def _FSpecialGauss(size, sigma): - """Function to mimic the 'fspecial' gaussian MATLAB function.""" - radius = size // 2 - offset = 0.0 - start, stop = -radius, radius + 1 - if size % 2 == 0: - offset = 0.5 - stop -= 1 - x, y = np.mgrid[offset + start:stop, offset + start:stop] - assert len(x) == size - g = np.exp(-((x**2 + y**2)/(2.0 * sigma**2))) - return g / g.sum() - - -def _SSIMForMultiScale(img1, img2, max_val=255, filter_size=11, - filter_sigma=1.5, k1=0.01, k2=0.03): - """Return the Structural Similarity Map between `img1` and `img2`. - - This function attempts to match the functionality of ssim_index_new.m by - Zhou Wang: http://www.cns.nyu.edu/~lcv/ssim/msssim.zip - - Arguments: - img1: Numpy array holding the first RGB image batch. - img2: Numpy array holding the second RGB image batch. - max_val: the dynamic range of the images (i.e., the difference between the - maximum the and minimum allowed values). - filter_size: Size of blur kernel to use (will be reduced for small images). - filter_sigma: Standard deviation for Gaussian blur kernel (will be reduced - for small images). - k1: Constant used to maintain stability in the SSIM calculation (0.01 in - the original paper). - k2: Constant used to maintain stability in the SSIM calculation (0.03 in - the original paper). - - Returns: - Pair containing the mean SSIM and contrast sensitivity between `img1` and - `img2`. - - Raises: - RuntimeError: If input images don't have the same shape or don't have four - dimensions: [batch_size, height, width, depth]. - """ - if img1.shape != img2.shape: - raise RuntimeError('Input images must have the same shape (%s vs. %s).', - img1.shape, img2.shape) - if img1.ndim != 4: - raise RuntimeError('Input images must have four dimensions, not %d', - img1.ndim) - - img1 = img1.astype(np.float64) - img2 = img2.astype(np.float64) - _, height, width, _ = img1.shape - - # Filter size can't be larger than height or width of images. - size = min(filter_size, height, width) - - # Scale down sigma if a smaller filter size is used. - sigma = size * filter_sigma / filter_size if filter_size else 0 - - if filter_size: - window = np.reshape(_FSpecialGauss(size, sigma), (1, size, size, 1)) - mu1 = signal.fftconvolve(img1, window, mode='valid') - mu2 = signal.fftconvolve(img2, window, mode='valid') - sigma11 = signal.fftconvolve(img1 * img1, window, mode='valid') - sigma22 = signal.fftconvolve(img2 * img2, window, mode='valid') - sigma12 = signal.fftconvolve(img1 * img2, window, mode='valid') - else: - # Empty blur kernel so no need to convolve. - mu1, mu2 = img1, img2 - sigma11 = img1 * img1 - sigma22 = img2 * img2 - sigma12 = img1 * img2 - - mu11 = mu1 * mu1 - mu22 = mu2 * mu2 - mu12 = mu1 * mu2 - sigma11 -= mu11 - sigma22 -= mu22 - sigma12 -= mu12 - - # Calculate intermediate values used by both ssim and cs_map. - c1 = (k1 * max_val) ** 2 - c2 = (k2 * max_val) ** 2 - v1 = 2.0 * sigma12 + c2 - v2 = sigma11 + sigma22 + c2 - ssim = np.mean((((2.0 * mu12 + c1) * v1) / ((mu11 + mu22 + c1) * v2))) - cs = np.mean(v1 / v2) - return ssim, cs - - -def MultiScaleSSIM(img1, img2, max_val=255, filter_size=11, filter_sigma=1.5, - k1=0.01, k2=0.03, weights=None): - """Return the MS-SSIM score between `img1` and `img2`. - - This function implements Multi-Scale Structural Similarity (MS-SSIM) Image - Quality Assessment according to Zhou Wang's paper, "Multi-scale structural - similarity for image quality assessment" (2003). - Link: https://ece.uwaterloo.ca/~z70wang/publications/msssim.pdf - - Author's MATLAB implementation: - http://www.cns.nyu.edu/~lcv/ssim/msssim.zip - - Arguments: - img1: Numpy array holding the first RGB image batch. - img2: Numpy array holding the second RGB image batch. - max_val: the dynamic range of the images (i.e., the difference between the - maximum the and minimum allowed values). - filter_size: Size of blur kernel to use (will be reduced for small images). - filter_sigma: Standard deviation for Gaussian blur kernel (will be reduced - for small images). - k1: Constant used to maintain stability in the SSIM calculation (0.01 in - the original paper). - k2: Constant used to maintain stability in the SSIM calculation (0.03 in - the original paper). - weights: List of weights for each level; if none, use five levels and the - weights from the original paper. - - Returns: - MS-SSIM score between `img1` and `img2`. - - Raises: - RuntimeError: If input images don't have the same shape or don't have four - dimensions: [batch_size, height, width, depth]. - """ - if img1.shape != img2.shape: - raise RuntimeError('Input images must have the same shape (%s vs. %s).', - img1.shape, img2.shape) - if img1.ndim != 4: - raise RuntimeError('Input images must have four dimensions, not %d', - img1.ndim) - - # Note: default weights don't sum to 1.0 but do match the paper / matlab code. - weights = np.array(weights if weights else - [0.0448, 0.2856, 0.3001, 0.2363, 0.1333]) - levels = weights.size - downsample_filter = np.ones((1, 2, 2, 1)) / 4.0 - im1, im2 = [x.astype(np.float64) for x in [img1, img2]] - mssim = np.array([]) - mcs = np.array([]) - for _ in range(levels): - ssim, cs = _SSIMForMultiScale( - im1, im2, max_val=max_val, filter_size=filter_size, - filter_sigma=filter_sigma, k1=k1, k2=k2) - mssim = np.append(mssim, ssim) - mcs = np.append(mcs, cs) - filtered = [convolve(im, downsample_filter, mode='reflect') - for im in [im1, im2]] - im1, im2 = [x[:, ::2, ::2, :] for x in filtered] - return (np.prod(mcs[0:levels-1] ** weights[0:levels-1]) * - (mssim[levels-1] ** weights[levels-1])) - - -def main(_): - if FLAGS.original_image is None or FLAGS.compared_image is None: - print('\nUsage: python msssim.py --original_image=original.png ' - '--compared_image=distorted.png\n\n') - return - - if not tf.gfile.Exists(FLAGS.original_image): - print('\nCannot find --original_image.\n') - return - - if not tf.gfile.Exists(FLAGS.compared_image): - print('\nCannot find --compared_image.\n') - return - - with tf.gfile.FastGFile(FLAGS.original_image) as image_file: - img1_str = image_file.read('rb') - with tf.gfile.FastGFile(FLAGS.compared_image) as image_file: - img2_str = image_file.read('rb') - - input_img = tf.placeholder(tf.string) - decoded_image = tf.expand_dims(tf.image.decode_png(input_img, channels=3), 0) - - with tf.Session() as sess: - img1 = sess.run(decoded_image, feed_dict={input_img: img1_str}) - img2 = sess.run(decoded_image, feed_dict={input_img: img2_str}) - - print((MultiScaleSSIM(img1, img2, max_val=255))) - - -if __name__ == '__main__': - tf.app.run() diff --git a/research/deep_contextual_bandits/README.md b/research/deep_contextual_bandits/README.md deleted file mode 100644 index b81309af5..000000000 --- a/research/deep_contextual_bandits/README.md +++ /dev/null @@ -1,444 +0,0 @@ -![No Maintenance Intended](https://img.shields.io/badge/No%20Maintenance%20Intended-%E2%9C%95-red.svg) -![TensorFlow Requirement: 1.x](https://img.shields.io/badge/TensorFlow%20Requirement-1.x-brightgreen) -![TensorFlow 2 Not Supported](https://img.shields.io/badge/TensorFlow%202%20Not%20Supported-%E2%9C%95-red.svg) - -# Deep Bayesian Bandits Library - -This library corresponds to the *[Deep Bayesian Bandits Showdown: An Empirical -Comparison of Bayesian Deep Networks for Thompson -Sampling](https://arxiv.org/abs/1802.09127)* paper, published in -[ICLR](https://iclr.cc/) 2018. We provide a benchmark to test decision-making -algorithms for contextual-bandits. In particular, the current library implements -a variety of algorithms (many of them based on approximate Bayesian Neural -Networks and Thompson sampling), and a number of real and syntethic data -problems exhibiting a diverse set of properties. - -It is a Python library that uses [TensorFlow](https://www.tensorflow.org/). - -We encourage contributors to add new approximate Bayesian Neural Networks or, -more generally, contextual bandits algorithms to the library. Also, we would -like to extend the data sources over time, so we warmly encourage contributions -in this front too! - -Please, use the following when citing the code or the paper: - -``` -@article{riquelme2018deep, title={Deep Bayesian Bandits Showdown: An Empirical -Comparison of Bayesian Deep Networks for Thompson Sampling}, -author={Riquelme, Carlos and Tucker, George and Snoek, Jasper}, -journal={International Conference on Learning Representations, ICLR.}, year={2018}} -``` - -**Contact**. This repository is maintained by [Carlos Riquelme](http://rikel.me) ([rikel](https://github.com/rikel)). Feel free to reach out directly at [rikel@google.com](mailto:rikel@google.com) with any questions or comments. - - -We first briefly introduce contextual bandits, Thompson sampling, enumerate the -implemented algorithms, and the available data sources. Then, we provide a -simple complete example illustrating how to use the library. - -## Contextual Bandits - -Contextual bandits are a rich decision-making framework where an algorithm has -to choose among a set of *k* actions at every time step *t*, after observing -a context (or side-information) denoted by *Xt*. The general pseudocode for -the process if we use algorithm **A** is as follows: - -``` -At time t = 1, ..., T: - 1. Observe new context: X_t - 2. Choose action: a_t = A.action(X_t) - 3. Observe reward: r_t - 4. Update internal state of the algorithm: A.update((X_t, a_t, r_t)) -``` - -The goal is to maximize the total sum of rewards: ∑t rt - -For example, each *Xt* could encode the properties of a specific user (and -the time or day), and we may have to choose an ad, discount coupon, treatment, -hyper-parameters, or version of a website to show or provide to the user. -Hopefully, over time, we will learn how to match each type of user to the most -beneficial personalized action under some metric (the reward). - -## Thompson Sampling - -Thompson Sampling is a meta-algorithm that chooses an action for the contextual -bandit in a statistically efficient manner, simultaneously finding the best arm -while attempting to incur low cost. Informally speaking, we assume the expected -reward is given by some function -**E**[rt | Xt, at] = f(Xt, at). -Unfortunately, function **f** is unknown, as otherwise we could just choose the -action with highest expected value: -at* = arg maxi f(Xt, at). - -The idea behind Thompson Sampling is based on keeping a posterior distribution -πt over functions in some family f ∈ F after observing the first -*t-1* datapoints. Then, at time *t*, we sample one potential explanation of -the underlying process: ft ∼ πt, and act optimally (i.e., greedily) -*according to ft*. In other words, we choose -at = arg maxi ft(Xt, ai). -Finally, we update our posterior distribution with the new collected -datapoint (Xt, at, rt). - -The main issue is that keeping an updated posterior πt (or, even, -sampling from it) is often intractable for highly parameterized models like deep -neural networks. The algorithms we list in the next section provide tractable -*approximations* that can be used in combination with Thompson Sampling to solve -the contextual bandit problem. - -## Algorithms - -The Deep Bayesian Bandits library includes the following algorithms (see the -[paper](https://arxiv.org/abs/1802.09127) for further details): - -1. **Linear Algorithms**. As a powerful baseline, we provide linear algorithms. - In particular, we focus on the exact Bayesian linear regression - implementation, while it is easy to derive the greedy OLS version (possibly, - with epsilon-greedy exploration). The algorithm is implemented in - *linear_full_posterior_sampling.py*, and it is instantiated as follows: - - ``` - linear_full = LinearFullPosteriorSampling('MyLinearTS', my_hparams) - ``` - -2. **Neural Linear**. We introduce an algorithm we call Neural Linear, which - operates by learning a neural network to map contexts to rewards for each - action, and ---simultaneously--- it updates a Bayesian linear regression in - the last layer (i.e., the one that maps the final representation **z** to - the rewards **r**). Thompson Sampling samples the linear parameters - βi for each action *i*, but keeps the network that computes the - representation. Then, both parts (network and Bayesian linear regression) - are updated, possibly at different frequencies. The algorithm is implemented - in *neural_linear_sampling.py*, and we create an algorithm instance like - this: - - ``` - neural_linear = NeuralLinearPosteriorSampling('MyNLinear', my_hparams) - ``` - -3. **Neural Greedy**. Another standard benchmark is to train a neural network - that maps contexts to rewards, and at each time *t* just acts greedily - according to the current model. In particular, this approach does *not* - explicitly use Thompson Sampling. However, due to stochastic gradient - descent, there is still some randomness in its output. It is - straight-forward to add epsilon-greedy exploration to choose random - actions with probability ε ∈ (0, 1). The algorithm is - implemented in *neural_bandit_model.py*, and it is used together with - *PosteriorBNNSampling* (defined in *posterior_bnn_sampling.py*) by calling: - - ``` - neural_greedy = PosteriorBNNSampling('MyNGreedy', my_hparams, 'RMSProp') - ``` - -4. **Stochastic Variational Inference**, Bayes by Backpropagation. We implement - a Bayesian neural network by modeling each individual weight posterior as a - univariate Gaussian distribution: wij ∼ N(μij, σij2). - Thompson sampling then samples a network at each time step - by sampling each weight independently. The variational approach consists in - maximizing a proxy for maximum likelihood of the observed data, the ELBO or - variational lower bound, to fit the values of μij, σij2 - for every *i, j*. - - See [Weight Uncertainty in Neural - Networks](https://arxiv.org/abs/1505.05424). - - The BNN algorithm is implemented in *variational_neural_bandit_model.py*, - and it is used together with *PosteriorBNNSampling* (defined in - *posterior_bnn_sampling.py*) by calling: - - ``` - bbb = PosteriorBNNSampling('myBBB', my_hparams, 'Variational') - ``` - -5. **Expectation-Propagation**, Black-box alpha-divergence minimization. - The family of expectation-propagation algorithms is based on the message - passing framework . They iteratively approximate the posterior by updating a - single approximation factor (or site) at a time, which usually corresponds - to the likelihood of one data point. We focus on methods that directly - optimize the global EP objective via stochastic gradient descent, as, for - instance, Power EP. For further details see original paper below. - - See [Black-box alpha-divergence - Minimization](https://arxiv.org/abs/1511.03243). - - We create an instance of the algorithm like this: - - ``` - bb_adiv = PosteriorBNNSampling('MyEP', my_hparams, 'AlphaDiv') - ``` - -6. **Dropout**. Dropout is a training technique where the output of each neuron - is independently zeroed out with probability *p* at each forward pass. - Once the network has been trained, dropout can still be used to obtain a - distribution of predictions for a specific input. Following the best action - with respect to the random dropout prediction can be interpreted as an - implicit form of Thompson sampling. The code for dropout is the same as for - Neural Greedy (see above), but we need to set two hyper-parameters: - *use_dropout=True* and *keep_prob=p* where *p* takes the desired value in - (0, 1). Then: - - ``` - dropout = PosteriorBNNSampling('MyDropout', my_hparams, 'RMSProp') - ``` - -7. **Monte Carlo Methods**. To be added soon. - -8. **Bootstrapped Networks**. This algorithm trains simultaneously and in - parallel **q** neural networks based on different datasets D1, ..., Dq. The way those datasets are collected is by adding each new collected - datapoint (Xt, at, rt) to each dataset *Di* independently and with - probability p ∈ (0, 1]. Therefore, the main hyperparameters of the - algorithm are **(q, p)**. In order to choose an action for a new context, - one of the **q** networks is first selected with uniform probability (i.e., - *1/q*). Then, the best action according to the *selected* network is - played. - - See [Deep Exploration via Bootstrapped - DQN](https://arxiv.org/abs/1602.04621). - - The algorithm is implemented in *bootstrapped_bnn_sampling.py*, and we - instantiate it as (where *my_hparams* contains both **q** and **p**): - - ``` - bootstrap = BootstrappedBNNSampling('MyBoot', my_hparams) - ``` - -9. **Parameter-Noise**. Another approach to approximate a distribution over - neural networks (or more generally, models) that map contexts to rewards, - consists in randomly perturbing a point estimate trained by Stochastic - Gradient Descent on the data. The Parameter-Noise algorithm uses a heuristic - to control the amount of noise σt2 it adds independently to the - parameters representing a neural network: θt' = θt + ε where - ε ∼ N(0, σt2 Id). - After using θt' for decision making, the following SGD - training steps start again from θt. The key hyperparameters to set - are those controlling the noise heuristic. - - See [Parameter Space Noise for - Exploration](https://arxiv.org/abs/1706.01905). - - The algorithm is implemented in *parameter_noise_sampling.py*, and we create - an instance by calling: - - ``` - parameter_noise = ParameterNoiseSampling('MyParamNoise', my_hparams) - ``` - -10. **Gaussian Processes**. Another standard benchmark are Gaussian Processes, - see *Gaussian Processes for Machine Learning* by Rasmussen and Williams for - an introduction. To model the expected reward of different actions, we fit a - multitask GP. - - See [Multi-task Gaussian Process - Prediction](http://papers.nips.cc/paper/3189-multi-task-gaussian-process-prediction.pdf). - - Our implementation is provided in *multitask_gp.py*, and it is instantiated - as follows: - - ``` - gp = PosteriorBNNSampling('MyMultitaskGP', my_hparams, 'GP') - ``` - -In the code snippet at the bottom, we show how to instantiate some of these -algorithms, and how to run the contextual bandit simulator, and display the -high-level results. - -## Data - -In the paper we use two types of contextual datasets: synthetic and based on -real-world data. - -We provide functions that sample problems from those datasets. In the case of -real-world data, you first need to download the raw datasets, and pass the route -to the functions. Links for the datasets are provided below. - -### Synthetic Datasets - -Synthetic datasets are contained in the *synthetic_data_sampler.py* file. In -particular, it includes: - -1. **Linear data**. Provides a number of linear arms, and Gaussian contexts. - -2. **Sparse linear data**. Provides a number of sparse linear arms, and - Gaussian contexts. - -3. **Wheel bandit data**. Provides sampled data from the wheel bandit data, see - [Section 5.4](https://arxiv.org/abs/1802.09127) in the paper. - -### Real-World Datasets - -Real-world data generating functions are contained in the *data_sampler.py* -file. - -In particular, it includes: - -1. **Mushroom data**. Each incoming context represents a different type of - mushroom, and the actions are eat or no-eat. Eating an edible mushroom - provides positive reward, while eating a poisonous one provides positive - reward with probability *p*, and a large negative reward with probability - *1-p*. All the rewards, and the value of *p* are customizable. The - [dataset](https://archive.ics.uci.edu/ml/datasets/mushroom) is part of the - UCI repository, and the bandit problem was proposed in Blundell et al. - (2015). Data is available [here](https://storage.googleapis.com/bandits_datasets/mushroom.data) - or alternatively [here](https://archive.ics.uci.edu/ml/machine-learning-databases/mushroom/), - use the *agaricus-lepiota.data* file. - -2. **Stock data**. We created the Financial Dataset by pulling the stock prices - of *d = 21* publicly traded companies in NYSE and Nasdaq, for the last 14 - years (*n = 3713*). For each day, the context was the price difference - between the beginning and end of the session for each stock. We - synthetically created the arms to be a linear combination of the contexts, - representing *k = 8* different potential portfolios. Data is available - [here](https://storage.googleapis.com/bandits_datasets/raw_stock_contexts). - -3. **Jester data**. We create a recommendation system bandit problem as - follows. The Jester Dataset (Goldberg et al., 2001) provides continuous - ratings in *[-10, 10]* for 100 jokes from a total of 73421 users. We find - a *complete* subset of *n = 19181* users rating all 40 jokes. Following - Riquelme et al. (2017), we take *d = 32* of the ratings as the context of - the user, and *k = 8* as the arms. The agent recommends one joke, and - obtains the reward corresponding to the rating of the user for the selected - joke. Data is available [here](https://storage.googleapis.com/bandits_datasets/jester_data_40jokes_19181users.npy). - -4. **Statlog data**. The Shuttle Statlog Dataset (Asuncion & Newman, 2007) - provides the value of *d = 9* indicators during a space shuttle flight, - and the goal is to predict the state of the radiator subsystem of the - shuttle. There are *k = 7* possible states, and if the agent selects the - right state, then reward 1 is generated. Otherwise, the agent obtains no - reward (*r = 0*). The most interesting aspect of the dataset is that one - action is the optimal one in 80% of the cases, and some algorithms may - commit to this action instead of further exploring. In this case, the number - of contexts is *n = 43500*. Data is available [here](https://storage.googleapis.com/bandits_datasets/shuttle.trn) or alternatively - [here](https://archive.ics.uci.edu/ml/datasets/Statlog+\(Shuttle\)), use - *shuttle.trn* file. - -5. **Adult data**. The Adult Dataset (Kohavi, 1996; Asuncion & Newman, 2007) - comprises personal information from the US Census Bureau database, and the - standard prediction task is to determine if a person makes over 50K a year - or not. However, we consider the *k = 14* different occupations as - feasible actions, based on *d = 94* covariates (many of them binarized). - As in previous datasets, the agent obtains a reward of 1 for making the - right prediction, and 0 otherwise. The total number of contexts is *n = - 45222*. Data is available [here](https://storage.googleapis.com/bandits_datasets/adult.full) or alternatively - [here](https://archive.ics.uci.edu/ml/datasets/adult), use *adult.data* - file. - -6. **Census data**. The US Census (1990) Dataset (Asuncion & Newman, 2007) - contains a number of personal features (age, native language, education...) - which we summarize in *d = 389* covariates, including binary dummy - variables for categorical features. Our goal again is to predict the - occupation of the individual among *k = 9* classes. The agent obtains - reward 1 for making the right prediction, and 0 otherwise. Data is available - [here](https://storage.googleapis.com/bandits_datasets/USCensus1990.data.txt) or alternatively [here](https://archive.ics.uci.edu/ml/datasets/US+Census+Data+\(1990\)), use - *USCensus1990.data.txt* file. - -7. **Covertype data**. The Covertype Dataset (Asuncion & Newman, 2007) - classifies the cover type of northern Colorado forest areas in *k = 7* - classes, based on *d = 54* features, including elevation, slope, aspect, - and soil type. Again, the agent obtains reward 1 if the correct class is - selected, and 0 otherwise. Data is available [here](https://storage.googleapis.com/bandits_datasets/covtype.data) or alternatively - [here](https://archive.ics.uci.edu/ml/datasets/covertype), use - *covtype.data* file. - -In datasets 4-7, each feature of the dataset is normalized first. - -## Usage: Basic Example - -This library requires Tensorflow, Numpy, and Pandas. - -The file *example_main.py* provides a complete example on how to use the -library. We run the code: - -``` - python example_main.py -``` - -**Do not forget to** configure the routes to the data files at the top of *example_main.py*. - -For example, we can run the Mushroom bandit for 2000 contexts on a few -algorithms as follows: - -``` - # Problem parameters - num_contexts = 2000 - - # Choose data source among: - # {linear, sparse_linear, mushroom, financial, jester, - # statlog, adult, covertype, census, wheel} - data_type = 'mushroom' - - # Create dataset - sampled_vals = sample_data(data_type, num_contexts) - dataset, opt_rewards, opt_actions, num_actions, context_dim = sampled_vals - - # Define hyperparameters and algorithms - hparams_linear = tf.contrib.training.HParams(num_actions=num_actions, - context_dim=context_dim, - a0=6, - b0=6, - lambda_prior=0.25, - initial_pulls=2) - - hparams_dropout = tf.contrib.training.HParams(num_actions=num_actions, - context_dim=context_dim, - init_scale=0.3, - activation=tf.nn.relu, - layer_sizes=[50], - batch_size=512, - activate_decay=True, - initial_lr=0.1, - max_grad_norm=5.0, - show_training=False, - freq_summary=1000, - buffer_s=-1, - initial_pulls=2, - optimizer='RMS', - reset_lr=True, - lr_decay_rate=0.5, - training_freq=50, - training_epochs=100, - keep_prob=0.80, - use_dropout=True) - - ### Create hyper-parameter configurations for other algorithms - [...] - - algos = [ - UniformSampling('Uniform Sampling', hparams), - PosteriorBNNSampling('Dropout', hparams_dropout, 'RMSProp'), - PosteriorBNNSampling('BBB', hparams_bbb, 'Variational'), - NeuralLinearPosteriorSampling('NeuralLinear', hparams_nlinear), - LinearFullPosteriorSampling('LinFullPost', hparams_linear), - BootstrappedBNNSampling('BootRMS', hparams_boot), - ParameterNoiseSampling('ParamNoise', hparams_pnoise), - ] - - # Run contextual bandit problem - t_init = time.time() - results = run_contextual_bandit(context_dim, num_actions, dataset, algos) - _, h_rewards = results - - # Display results - display_results(algos, opt_rewards, opt_actions, h_rewards, t_init, data_type) - -``` - -The previous code leads to final results that look like: - -``` ---------------------------------------------------- ---------------------------------------------------- -mushroom bandit completed after 69.8401839733 seconds. ---------------------------------------------------- - 0) LinFullPost | total reward = 4365.0. - 1) NeuralLinear | total reward = 4110.0. - 2) Dropout | total reward = 3430.0. - 3) ParamNoise | total reward = 3270.0. - 4) BootRMS | total reward = 3050.0. - 5) BBB | total reward = 2505.0. - 6) Uniform Sampling | total reward = -4930.0. ---------------------------------------------------- -Optimal total reward = 5235. -Frequency of optimal actions (action, frequency): -[[0, 953], [1, 1047]] ---------------------------------------------------- ---------------------------------------------------- -``` diff --git a/research/deep_contextual_bandits/bandits/algorithms/bb_alpha_divergence_model.py b/research/deep_contextual_bandits/bandits/algorithms/bb_alpha_divergence_model.py deleted file mode 100644 index 5b9c0ebd0..000000000 --- a/research/deep_contextual_bandits/bandits/algorithms/bb_alpha_divergence_model.py +++ /dev/null @@ -1,373 +0,0 @@ -# Copyright 2018 The TensorFlow Authors All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -"""Bayesian NN using expectation propagation (Black-Box Alpha-Divergence). - -See https://arxiv.org/abs/1511.03243 for details. -All formulas used in this implementation are derived in: -https://www.overleaf.com/12837696kwzjxkyhdytk#/49028744/. -""" - -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import sys -import numpy as np -import tensorflow as tf -from absl import flags - -from bandits.core.bayesian_nn import BayesianNN - - -FLAGS = flags.FLAGS -tfd = tf.contrib.distributions # update to: tensorflow_probability.distributions - - -def log_gaussian(x, mu, sigma, reduce_sum=True): - res = tfd.Normal(mu, sigma).log_prob(x) - if reduce_sum: - return tf.reduce_sum(res) - else: - return res - - -class BBAlphaDivergence(BayesianNN): - """Implements an approximate Bayesian NN via Black-Box Alpha-Divergence.""" - - def __init__(self, hparams, name): - - self.name = name - self.hparams = hparams - - self.alpha = getattr(self.hparams, 'alpha', 1.0) - self.num_mc_nn_samples = getattr(self.hparams, 'num_mc_nn_samples', 10) - - self.n_in = self.hparams.context_dim - self.n_out = self.hparams.num_actions - self.layers = self.hparams.layer_sizes - self.batch_size = self.hparams.batch_size - - self.show_training = self.hparams.show_training - self.freq_summary = self.hparams.freq_summary - self.verbose = getattr(self.hparams, 'verbose', True) - - self.cleared_times_trained = self.hparams.cleared_times_trained - self.initial_training_steps = self.hparams.initial_training_steps - self.training_schedule = np.linspace(self.initial_training_steps, - self.hparams.training_epochs, - self.cleared_times_trained) - - self.times_trained = 0 - self.initialize_model() - - def initialize_model(self): - """Builds and initialize the model.""" - - self.num_w = 0 - self.num_b = 0 - - self.weights_m = {} - self.weights_std = {} - self.biases_m = {} - self.biases_std = {} - - self.h_max_var = [] - - if self.hparams.use_sigma_exp_transform: - self.sigma_transform = tfd.bijectors.Exp() - else: - self.sigma_transform = tfd.bijectors.Softplus() - - # Build the graph corresponding to the Bayesian NN instance. - self.graph = tf.Graph() - - with self.graph.as_default(): - - self.sess = tf.Session() - self.x = tf.placeholder(shape=[None, self.n_in], - dtype=tf.float32, name='x') - self.y = tf.placeholder(shape=[None, self.n_out], - dtype=tf.float32, name='y') - self.weights = tf.placeholder(shape=[None, self.n_out], - dtype=tf.float32, name='w') - self.data_size = tf.placeholder(tf.float32, shape=(), name='data_size') - - self.prior_variance = self.hparams.prior_variance - if self.prior_variance < 0: - # if not fixed, we learn the prior. - self.prior_variance = self.sigma_transform.forward( - self.build_mu_variable([1, 1])) - - self.build_model() - self.sess.run(tf.global_variables_initializer()) - - def build_mu_variable(self, shape): - """Returns a mean variable initialized as N(0, 0.05).""" - return tf.Variable(tf.random_normal(shape, 0.0, 0.05)) - - def build_sigma_variable(self, shape, init=-5.): - """Returns a sigma variable initialized as N(init, 0.05).""" - # Initialize sigma to be very small initially to encourage MAP opt first - return tf.Variable(tf.random_normal(shape, init, 0.05)) - - def build_layer(self, input_x, shape, layer_id, activation_fn=tf.nn.relu): - """Builds a layer with N(mean, std) for each weight, and samples from it.""" - - w_mu = self.build_mu_variable(shape) - w_sigma = self.sigma_transform.forward(self.build_sigma_variable(shape)) - - w_noise = tf.random_normal(shape) - w = w_mu + w_sigma * w_noise - - b_mu = self.build_mu_variable([1, shape[1]]) - b_sigma = self.sigma_transform.forward( - self.build_sigma_variable([1, shape[1]])) - - b_noise = tf.random_normal([1, shape[1]]) - b = b_mu + b_sigma * b_noise - - # Create outputs - output_h = activation_fn(tf.matmul(input_x, w) + b) - - # Store means and stds - self.weights_m[layer_id] = w_mu - self.weights_std[layer_id] = w_sigma - self.biases_m[layer_id] = b_mu - self.biases_std[layer_id] = b_sigma - - return output_h - - def sample_neural_network(self, activation_fn=tf.nn.relu): - """Samples a nn from posterior, computes data log lk and log f factor.""" - - with self.graph.as_default(): - - log_f = 0 - n = self.data_size - input_x = self.x - - for layer_id in range(self.total_layers): - - # load mean and std of each weight - w_mu = self.weights_m[layer_id] - w_sigma = self.weights_std[layer_id] - b_mu = self.biases_m[layer_id] - b_sigma = self.biases_std[layer_id] - - # sample weights from Gaussian distribution - shape = w_mu.shape - w_noise = tf.random_normal(shape) - b_noise = tf.random_normal([1, int(shape[1])]) - w = w_mu + w_sigma * w_noise - b = b_mu + b_sigma * b_noise - - # compute contribution to log_f - t1 = w * w_mu / (n * w_sigma ** 2) - t2 = (0.5 * w ** 2 / n) * (1 / self.prior_variance - 1 / w_sigma ** 2) - log_f += tf.reduce_sum(t1 + t2) - - t1 = b * b_mu / (n * b_sigma ** 2) - t2 = (0.5 * b ** 2 / n) * (1 / self.prior_variance - 1 / b_sigma ** 2) - log_f += tf.reduce_sum(t1 + t2) - - if layer_id < self.total_layers - 1: - output_h = activation_fn(tf.matmul(input_x, w) + b) - else: - output_h = tf.matmul(input_x, w) + b - - input_x = output_h - - # compute log likelihood of the observed reward under the sampled nn - log_likelihood = log_gaussian( - self.y, output_h, self.noise_sigma, reduce_sum=False) - weighted_log_likelihood = tf.reduce_sum(log_likelihood * self.weights, -1) - - return log_f, weighted_log_likelihood - - def log_z_q(self): - """Computes log-partition function of current posterior parameters.""" - - with self.graph.as_default(): - - log_z_q = 0 - - for layer_id in range(self.total_layers): - - w_mu = self.weights_m[layer_id] - w_sigma = self.weights_std[layer_id] - b_mu = self.biases_m[layer_id] - b_sigma = self.biases_std[layer_id] - - w_term = 0.5 * tf.reduce_sum(w_mu ** 2 / w_sigma ** 2) - w_term += 0.5 * tf.reduce_sum(tf.log(2 * np.pi) + 2 * tf.log(w_sigma)) - - b_term = 0.5 * tf.reduce_sum(b_mu ** 2 / b_sigma ** 2) - b_term += 0.5 * tf.reduce_sum(tf.log(2 * np.pi) + 2 * tf.log(b_sigma)) - - log_z_q += w_term + b_term - - return log_z_q - - def log_z_prior(self): - """Computes log-partition function of the prior parameters.""" - num_params = self.num_w + self.num_b - return num_params * 0.5 * tf.log(2 * np.pi * self.prior_variance) - - def log_alpha_likelihood_ratio(self, activation_fn=tf.nn.relu): - - # each nn sample returns (log f, log likelihoods) - nn_samples = [ - self.sample_neural_network(activation_fn) - for _ in range(self.num_mc_nn_samples) - ] - nn_log_f_samples = [elt[0] for elt in nn_samples] - nn_log_lk_samples = [elt[1] for elt in nn_samples] - - # we stack the (log f, log likelihoods) from the k nn samples - nn_log_f_stack = tf.stack(nn_log_f_samples) # k x 1 - nn_log_lk_stack = tf.stack(nn_log_lk_samples) # k x N - nn_f_tile = tf.tile(nn_log_f_stack, [self.batch_size]) - nn_f_tile = tf.reshape(nn_f_tile, - [self.num_mc_nn_samples, self.batch_size]) - - # now both the log f and log likelihood terms have shape: k x N - # apply formula in https://www.overleaf.com/12837696kwzjxkyhdytk#/49028744/ - nn_log_ratio = nn_log_lk_stack - nn_f_tile - nn_log_ratio = self.alpha * tf.transpose(nn_log_ratio) - logsumexp_value = tf.reduce_logsumexp(nn_log_ratio, -1) - log_k_scalar = tf.log(tf.cast(self.num_mc_nn_samples, tf.float32)) - log_k = log_k_scalar * tf.ones([self.batch_size]) - - return tf.reduce_sum(logsumexp_value - log_k, -1) - - def build_model(self, activation_fn=tf.nn.relu): - """Defines the actual NN model with fully connected layers. - - Args: - activation_fn: Activation function for the neural network. - - The loss is computed for partial feedback settings (bandits), so only - the observed outcome is backpropagated (see weighted loss). - Selects the optimizer and, finally, it also initializes the graph. - """ - - print('Initializing model {}.'.format(self.name)) - - # Build terms for the noise sigma estimation for each action. - noise_sigma_mu = (self.build_mu_variable([1, self.n_out]) - + self.sigma_transform.inverse(self.hparams.noise_sigma)) - noise_sigma_sigma = self.sigma_transform.forward( - self.build_sigma_variable([1, self.n_out])) - - pre_noise_sigma = noise_sigma_mu + tf.random_normal( - [1, self.n_out]) * noise_sigma_sigma - self.noise_sigma = self.sigma_transform.forward(pre_noise_sigma) - - # Build network - input_x = self.x - n_in = self.n_in - self.total_layers = len(self.layers) + 1 - if self.layers[0] == 0: - self.total_layers = 1 - - for l_number, n_nodes in enumerate(self.layers): - if n_nodes > 0: - h = self.build_layer(input_x, [n_in, n_nodes], l_number) - input_x = h - n_in = n_nodes - self.num_w += n_in * n_nodes - self.num_b += n_nodes - - self.y_pred = self.build_layer(input_x, [n_in, self.n_out], - self.total_layers - 1, - activation_fn=lambda x: x) - - # Compute energy function based on sampled nn's - log_coeff = self.data_size / (self.batch_size * self.alpha) - log_ratio = log_coeff * self.log_alpha_likelihood_ratio(activation_fn) - logzprior = self.log_z_prior() - logzq = self.log_z_q() - energy = logzprior - logzq - log_ratio - - self.loss = energy - self.global_step = tf.train.get_or_create_global_step() - self.train_op = tf.train.AdamOptimizer(self.hparams.initial_lr).minimize( - self.loss, global_step=self.global_step) - - # Useful for debugging - sq_loss = tf.squared_difference(self.y_pred, self.y) - weighted_sq_loss = self.weights * sq_loss - self.cost = tf.reduce_sum(weighted_sq_loss) / self.batch_size - - # Create tensorboard metrics - self.create_summaries() - self.summary_writer = tf.summary.FileWriter('{}/graph_{}'.format( - FLAGS.logdir, self.name), self.sess.graph) - - def create_summaries(self): - tf.summary.scalar('loss', self.loss) - tf.summary.scalar('cost', self.cost) - self.summary_op = tf.summary.merge_all() - - def assign_lr(self): - """Resets the learning rate in dynamic schedules for subsequent trainings. - - In bandits settings, we do expand our dataset over time. Then, we need to - re-train the network with the new data. Those algorithms that do not keep - the step constant, can reset it at the start of each training process. - """ - - decay_steps = 1 - if self.hparams.activate_decay: - current_gs = self.sess.run(self.global_step) - with self.graph.as_default(): - self.lr = tf.train.inverse_time_decay(self.hparams.initial_lr, - self.global_step - current_gs, - decay_steps, - self.hparams.lr_decay_rate) - - def train(self, data, num_steps): - """Trains the BNN for num_steps, using the data in 'data'. - - Args: - data: ContextualDataset object that provides the data. - num_steps: Number of minibatches to train the network for. - """ - - if self.times_trained < self.cleared_times_trained: - num_steps = int(self.training_schedule[self.times_trained]) - self.times_trained += 1 - - if self.verbose: - print('Training {} for {} steps...'.format(self.name, num_steps)) - - with self.graph.as_default(): - - for step in range(num_steps): - x, y, w = data.get_batch_with_weights(self.hparams.batch_size) - _, summary, global_step, loss = self.sess.run( - [self.train_op, self.summary_op, self.global_step, self.loss], - feed_dict={self.x: x, self.y: y, self.weights: w, - self.data_size: data.num_points()}) - - weights_l = self.sess.run(self.weights_std[0]) - self.h_max_var.append(np.max(weights_l)) - - if step % self.freq_summary == 0: - if self.show_training: - print('step: {}, loss: {}'.format(step, loss)) - sys.stdout.flush() - self.summary_writer.add_summary(summary, global_step) diff --git a/research/deep_contextual_bandits/bandits/algorithms/bf_variational_neural_bandit_model.py b/research/deep_contextual_bandits/bandits/algorithms/bf_variational_neural_bandit_model.py deleted file mode 100644 index cb87c2335..000000000 --- a/research/deep_contextual_bandits/bandits/algorithms/bf_variational_neural_bandit_model.py +++ /dev/null @@ -1,352 +0,0 @@ -# Copyright 2018 The TensorFlow Authors All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -"""Bayesian NN using factorized VI (Bayes By Backprop. Blundell et al. 2014). - -See https://arxiv.org/abs/1505.05424 for details. -""" - -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import numpy as np -import tensorflow as tf -# import tensorflow_probability as tfp - -from absl import flags -from bandits.core.bayesian_nn import BayesianNN - - -FLAGS = flags.FLAGS -# tfd = tfp.distributions -tfd = tf.contrib.distributions -tfl = tf.contrib.layers - - -def log_gaussian(x, mu, sigma, reduce_sum=True): - """Returns log Gaussian pdf.""" - res = tfd.Normal(mu, sigma).log_prob(x) - if reduce_sum: - return tf.reduce_sum(res) - else: - return res - - -def analytic_kl(mu_1, sigma_1, mu_2, sigma_2): - """KL for two Gaussian distributions with diagonal covariance matrix.""" - kl = tfd.kl_divergence(tfd.MVNDiag(mu_1, sigma_1), tfd.MVNDiag(mu_2, sigma_2)) - return kl - - -class BfVariationalNeuralBanditModel(BayesianNN): - """Implements an approximate Bayesian NN using Variational Inference.""" - - def __init__(self, hparams, name="BBBNN"): - - self.name = name - self.hparams = hparams - - self.n_in = self.hparams.context_dim - self.n_out = self.hparams.num_actions - self.layers = self.hparams.layer_sizes - self.init_scale = self.hparams.init_scale - self.f_num_points = None - if "f_num_points" in hparams: - self.f_num_points = self.hparams.f_num_points - - self.cleared_times_trained = self.hparams.cleared_times_trained - self.initial_training_steps = self.hparams.initial_training_steps - self.training_schedule = np.linspace(self.initial_training_steps, - self.hparams.training_epochs, - self.cleared_times_trained) - self.verbose = getattr(self.hparams, "verbose", True) - - self.weights_m = {} - self.weights_std = {} - self.biases_m = {} - self.biases_std = {} - - self.times_trained = 0 - - if self.hparams.use_sigma_exp_transform: - self.sigma_transform = tf.exp - self.inverse_sigma_transform = np.log - else: - self.sigma_transform = tf.nn.softplus - self.inverse_sigma_transform = lambda y: y + np.log(1. - np.exp(-y)) - - # Whether to use the local reparameterization trick to compute the loss. - # See details in https://arxiv.org/abs/1506.02557 - self.use_local_reparameterization = True - - self.build_graph() - - def build_mu_variable(self, shape): - """Returns a mean variable initialized as N(0, 0.05).""" - return tf.Variable(tf.random_normal(shape, 0.0, 0.05)) - - def build_sigma_variable(self, shape, init=-5.): - """Returns a sigma variable initialized as N(init, 0.05).""" - # Initialize sigma to be very small initially to encourage MAP opt first - return tf.Variable(tf.random_normal(shape, init, 0.05)) - - def build_layer(self, input_x, input_x_local, shape, - layer_id, activation_fn=tf.nn.relu): - """Builds a variational layer, and computes KL term. - - Args: - input_x: Input to the variational layer. - input_x_local: Input when the local reparameterization trick was applied. - shape: [number_inputs, number_outputs] for the layer. - layer_id: Number of layer in the architecture. - activation_fn: Activation function to apply. - - Returns: - output_h: Output of the variational layer. - output_h_local: Output when local reparameterization trick was applied. - neg_kl: Negative KL term for the layer. - """ - - w_mu = self.build_mu_variable(shape) - w_sigma = self.sigma_transform(self.build_sigma_variable(shape)) - w_noise = tf.random_normal(shape) - w = w_mu + w_sigma * w_noise - - b_mu = self.build_mu_variable([1, shape[1]]) - b_sigma = self.sigma_transform(self.build_sigma_variable([1, shape[1]])) - b = b_mu - - # Store means and stds - self.weights_m[layer_id] = w_mu - self.weights_std[layer_id] = w_sigma - self.biases_m[layer_id] = b_mu - self.biases_std[layer_id] = b_sigma - - # Create outputs - output_h = activation_fn(tf.matmul(input_x, w) + b) - - if self.use_local_reparameterization: - # Use analytic KL divergence wrt the prior - neg_kl = -analytic_kl(w_mu, w_sigma, - 0., tf.to_float(np.sqrt(2./shape[0]))) - else: - # Create empirical KL loss terms - log_p = log_gaussian(w, 0., tf.to_float(np.sqrt(2./shape[0]))) - log_q = log_gaussian(w, tf.stop_gradient(w_mu), tf.stop_gradient(w_sigma)) - neg_kl = log_p - log_q - - # Apply local reparameterization trick: sample activations pre nonlinearity - m_h = tf.matmul(input_x_local, w_mu) + b - v_h = tf.matmul(tf.square(input_x_local), tf.square(w_sigma)) - output_h_local = m_h + tf.sqrt(v_h + 1e-6) * tf.random_normal(tf.shape(v_h)) - output_h_local = activation_fn(output_h_local) - - return output_h, output_h_local, neg_kl - - def build_action_noise(self): - """Defines a model for additive noise per action, and its KL term.""" - - # Define mean and std variables (log-normal dist) for each action. - noise_sigma_mu = (self.build_mu_variable([1, self.n_out]) - + self.inverse_sigma_transform(self.hparams.noise_sigma)) - noise_sigma_sigma = self.sigma_transform( - self.build_sigma_variable([1, self.n_out])) - - pre_noise_sigma = (noise_sigma_mu - + tf.random_normal([1, self.n_out]) * noise_sigma_sigma) - self.noise_sigma = self.sigma_transform(pre_noise_sigma) - - # Compute KL for additive noise sigma terms. - if getattr(self.hparams, "infer_noise_sigma", False): - neg_kl_term = log_gaussian( - pre_noise_sigma, - self.inverse_sigma_transform(self.hparams.noise_sigma), - self.hparams.prior_sigma - ) - neg_kl_term -= log_gaussian(pre_noise_sigma, - noise_sigma_mu, - noise_sigma_sigma) - else: - neg_kl_term = 0. - - return neg_kl_term - - def build_model(self, activation_fn=tf.nn.relu): - """Defines the actual NN model with fully connected layers. - - The loss is computed for partial feedback settings (bandits), so only - the observed outcome is backpropagated (see weighted loss). - Selects the optimizer and, finally, it also initializes the graph. - - Args: - activation_fn: the activation function used in the nn layers. - """ - - def weight_prior(dtype, shape, c, d, e): - del c, d, e - return tfd.Independent( - tfd.Normal(loc=tf.zeros(shape, dtype), - scale=tf.to_float(np.sqrt(2) / shape[0])), - reinterpreted_batch_ndims=tf.size(shape)) - - if self.verbose: - print("Initializing model {}.".format(self.name)) - - # Compute model additive noise for each action with log-normal distribution - neg_kl_term = self.build_action_noise() - - # Build variational network using self.x as input. - input_x = self.x - - # Create Keras model using DenseLocalReparameterization (prior N(0, 1)). - model_layers = [ - tfl.DenseLocalReparameterization( - n_nodes, - activation=tf.nn.relu, - kernel_prior_fn=weight_prior - ) - for n_nodes in self.layers if n_nodes > 0 - ] - - output_layer = tfl.DenseLocalReparameterization( - self.n_out, - activation=lambda x: x, - kernel_prior_fn=weight_prior - ) - model_layers.append(output_layer) - - model = tf.keras.Sequential(model_layers) - self.y_pred = model(input_x) - - # Compute KL term - neg_kl_term -= tf.add_n(model.losses) - - # Compute log likelihood (with learned or fixed noise level) - if getattr(self.hparams, "infer_noise_sigma", False): - log_likelihood = log_gaussian( - self.y, self.y_pred, self.noise_sigma, reduce_sum=False) - else: - log_likelihood = log_gaussian( - self.y, self.y_pred, self.hparams.noise_sigma, reduce_sum=False) - - # Only take into account observed outcomes (bandits setting) - batch_size = tf.to_float(tf.shape(self.x)[0]) - weighted_log_likelihood = tf.reduce_sum( - log_likelihood * self.weights) / batch_size - - # The objective is 1/n * (\sum_i log_like_i - KL); neg_kl_term estimates -KL - elbo = weighted_log_likelihood + (neg_kl_term / self.n) - - self.loss = -elbo - self.global_step = tf.train.get_or_create_global_step() - self.train_op = tf.train.AdamOptimizer(self.hparams.initial_lr).minimize( - self.loss, global_step=self.global_step) - - # Create tensorboard metrics - self.create_summaries() - self.summary_writer = tf.summary.FileWriter( - "{}/graph_{}".format(FLAGS.logdir, self.name), self.sess.graph) - - def build_graph(self): - """Defines graph, session, placeholders, and model. - - Placeholders are: n (size of the dataset), x and y (context and observed - reward for each action), and weights (one-hot encoding of selected action - for each context, i.e., only possibly non-zero element in each y). - """ - - self.graph = tf.Graph() - with self.graph.as_default(): - - self.sess = tf.Session() - - self.n = tf.placeholder(shape=[], dtype=tf.float32) - - self.x = tf.placeholder(shape=[None, self.n_in], dtype=tf.float32) - self.y = tf.placeholder(shape=[None, self.n_out], dtype=tf.float32) - self.weights = tf.placeholder(shape=[None, self.n_out], dtype=tf.float32) - - self.build_model() - self.sess.run(tf.global_variables_initializer()) - - def create_summaries(self): - """Defines summaries including mean loss, and global step.""" - - with self.graph.as_default(): - with tf.name_scope(self.name + "_summaries"): - tf.summary.scalar("loss", self.loss) - tf.summary.scalar("global_step", self.global_step) - self.summary_op = tf.summary.merge_all() - - def assign_lr(self): - """Resets the learning rate in dynamic schedules for subsequent trainings. - - In bandits settings, we do expand our dataset over time. Then, we need to - re-train the network with the new data. The algorithms that do not keep - the step constant, can reset it at the start of each *training* process. - """ - - decay_steps = 1 - if self.hparams.activate_decay: - current_gs = self.sess.run(self.global_step) - with self.graph.as_default(): - self.lr = tf.train.inverse_time_decay(self.hparams.initial_lr, - self.global_step - current_gs, - decay_steps, - self.hparams.lr_decay_rate) - - def train(self, data, num_steps): - """Trains the BNN for num_steps, using the data in 'data'. - - Args: - data: ContextualDataset object that provides the data. - num_steps: Number of minibatches to train the network for. - - Returns: - losses: Loss history during training. - """ - - if self.times_trained < self.cleared_times_trained: - num_steps = int(self.training_schedule[self.times_trained]) - self.times_trained += 1 - - losses = [] - - with self.graph.as_default(): - - if self.verbose: - print("Training {} for {} steps...".format(self.name, num_steps)) - - for step in range(num_steps): - x, y, weights = data.get_batch_with_weights(self.hparams.batch_size) - _, summary, global_step, loss = self.sess.run( - [self.train_op, self.summary_op, self.global_step, self.loss], - feed_dict={ - self.x: x, - self.y: y, - self.weights: weights, - self.n: data.num_points(self.f_num_points), - }) - - losses.append(loss) - - if step % self.hparams.freq_summary == 0: - if self.hparams.show_training: - print("{} | step: {}, loss: {}".format( - self.name, global_step, loss)) - self.summary_writer.add_summary(summary, global_step) - - return losses diff --git a/research/deep_contextual_bandits/bandits/algorithms/bootstrapped_bnn_sampling.py b/research/deep_contextual_bandits/bandits/algorithms/bootstrapped_bnn_sampling.py deleted file mode 100644 index 7c44b681c..000000000 --- a/research/deep_contextual_bandits/bandits/algorithms/bootstrapped_bnn_sampling.py +++ /dev/null @@ -1,98 +0,0 @@ -# Copyright 2018 The TensorFlow Authors All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -"""Contextual algorithm based on boostrapping neural networks.""" - -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import numpy as np - -from bandits.core.bandit_algorithm import BanditAlgorithm -from bandits.core.contextual_dataset import ContextualDataset -from bandits.algorithms.neural_bandit_model import NeuralBanditModel - - -class BootstrappedBNNSampling(BanditAlgorithm): - """Thompson Sampling algorithm based on training several neural networks.""" - - def __init__(self, name, hparams, optimizer='RMS'): - """Creates a BootstrappedSGDSampling object based on a specific optimizer. - - hparams.q: Number of models that are independently trained. - hparams.p: Prob of independently including each datapoint in each model. - - Args: - name: Name given to the instance. - hparams: Hyperparameters for each individual model. - optimizer: Neural network optimization algorithm. - """ - - self.name = name - self.hparams = hparams - self.optimizer_n = optimizer - - self.training_freq = hparams.training_freq - self.training_epochs = hparams.training_epochs - self.t = 0 - - self.q = hparams.q - self.p = hparams.p - - self.datasets = [ - ContextualDataset(hparams.context_dim, - hparams.num_actions, - hparams.buffer_s) - for _ in range(self.q) - ] - - self.bnn_boot = [ - NeuralBanditModel(optimizer, hparams, '{}-{}-bnn'.format(name, i)) - for i in range(self.q) - ] - - def action(self, context): - """Selects action for context based on Thompson Sampling using one BNN.""" - - if self.t < self.hparams.num_actions * self.hparams.initial_pulls: - # round robin until each action has been taken "initial_pulls" times - return self.t % self.hparams.num_actions - - # choose model uniformly at random - model_index = np.random.randint(self.q) - - with self.bnn_boot[model_index].graph.as_default(): - c = context.reshape((1, self.hparams.context_dim)) - output = self.bnn_boot[model_index].sess.run( - self.bnn_boot[model_index].y_pred, - feed_dict={self.bnn_boot[model_index].x: c}) - return np.argmax(output) - - def update(self, context, action, reward): - """Updates the data buffer, and re-trains the BNN every self.freq_update.""" - - self.t += 1 - for i in range(self.q): - # include the data point with probability p independently in each dataset - if np.random.random() < self.p or self.t < 2: - self.datasets[i].add(context, action, reward) - - if self.t % self.training_freq == 0: - # update all the models: - for i in range(self.q): - if self.hparams.reset_lr: - self.bnn_boot[i].assign_lr() - self.bnn_boot[i].train(self.datasets[i], self.training_epochs) diff --git a/research/deep_contextual_bandits/bandits/algorithms/fixed_policy_sampling.py b/research/deep_contextual_bandits/bandits/algorithms/fixed_policy_sampling.py deleted file mode 100644 index d5ad6e3ed..000000000 --- a/research/deep_contextual_bandits/bandits/algorithms/fixed_policy_sampling.py +++ /dev/null @@ -1,51 +0,0 @@ -# Copyright 2018 The TensorFlow Authors All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -"""Contextual bandit algorithm that selects an action at random.""" - -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import numpy as np - -from bandits.core.bandit_algorithm import BanditAlgorithm - - -class FixedPolicySampling(BanditAlgorithm): - """Defines a baseline; returns an action at random with probs given by p.""" - - def __init__(self, name, p, hparams): - """Creates a FixedPolicySampling object. - - Args: - name: Name of the algorithm. - p: Vector of normalized probabilities corresponding to sampling each arm. - hparams: Hyper-parameters, including the number of arms (num_actions). - - Raises: - ValueError: when p dimension does not match the number of actions. - """ - - self.name = name - self.p = p - self.hparams = hparams - - if len(p) != self.hparams.num_actions: - raise ValueError('Policy needs k probabilities.') - - def action(self, context): - """Selects an action at random according to distribution p.""" - return np.random.choice(range(self.hparams.num_actions), p=self.p) diff --git a/research/deep_contextual_bandits/bandits/algorithms/linear_full_posterior_sampling.py b/research/deep_contextual_bandits/bandits/algorithms/linear_full_posterior_sampling.py deleted file mode 100644 index 15ef8fa9b..000000000 --- a/research/deep_contextual_bandits/bandits/algorithms/linear_full_posterior_sampling.py +++ /dev/null @@ -1,164 +0,0 @@ -# Copyright 2018 The TensorFlow Authors All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -"""Contextual algorithm that keeps a full linear posterior for each arm.""" - -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import numpy as np -from scipy.stats import invgamma - -from bandits.core.bandit_algorithm import BanditAlgorithm -from bandits.core.contextual_dataset import ContextualDataset - - -class LinearFullPosteriorSampling(BanditAlgorithm): - """Thompson Sampling with independent linear models and unknown noise var.""" - - def __init__(self, name, hparams): - """Initialize posterior distributions and hyperparameters. - - Assume a linear model for each action i: reward = context^T beta_i + noise - Each beta_i has a Gaussian prior (lambda parameter), each sigma2_i (noise - level) has an inverse Gamma prior (a0, b0 parameters). Mean, covariance, - and precision matrices are initialized, and the ContextualDataset created. - - Args: - name: Name of the algorithm. - hparams: Hyper-parameters of the algorithm. - """ - - self.name = name - self.hparams = hparams - - # Gaussian prior for each beta_i - self._lambda_prior = self.hparams.lambda_prior - - self.mu = [ - np.zeros(self.hparams.context_dim + 1) - for _ in range(self.hparams.num_actions) - ] - - self.cov = [(1.0 / self.lambda_prior) * np.eye(self.hparams.context_dim + 1) - for _ in range(self.hparams.num_actions)] - - self.precision = [ - self.lambda_prior * np.eye(self.hparams.context_dim + 1) - for _ in range(self.hparams.num_actions) - ] - - # Inverse Gamma prior for each sigma2_i - self._a0 = self.hparams.a0 - self._b0 = self.hparams.b0 - - self.a = [self._a0 for _ in range(self.hparams.num_actions)] - self.b = [self._b0 for _ in range(self.hparams.num_actions)] - - self.t = 0 - self.data_h = ContextualDataset(hparams.context_dim, - hparams.num_actions, - intercept=True) - - def action(self, context): - """Samples beta's from posterior, and chooses best action accordingly. - - Args: - context: Context for which the action need to be chosen. - - Returns: - action: Selected action for the context. - """ - - # Round robin until each action has been selected "initial_pulls" times - if self.t < self.hparams.num_actions * self.hparams.initial_pulls: - return self.t % self.hparams.num_actions - - # Sample sigma2, and beta conditional on sigma2 - sigma2_s = [ - self.b[i] * invgamma.rvs(self.a[i]) - for i in range(self.hparams.num_actions) - ] - - try: - beta_s = [ - np.random.multivariate_normal(self.mu[i], sigma2_s[i] * self.cov[i]) - for i in range(self.hparams.num_actions) - ] - except np.linalg.LinAlgError as e: - # Sampling could fail if covariance is not positive definite - print('Exception when sampling from {}.'.format(self.name)) - print('Details: {} | {}.'.format(e.message, e.args)) - d = self.hparams.context_dim + 1 - beta_s = [ - np.random.multivariate_normal(np.zeros((d)), np.eye(d)) - for i in range(self.hparams.num_actions) - ] - - # Compute sampled expected values, intercept is last component of beta - vals = [ - np.dot(beta_s[i][:-1], context.T) + beta_s[i][-1] - for i in range(self.hparams.num_actions) - ] - - return np.argmax(vals) - - def update(self, context, action, reward): - """Updates action posterior using the linear Bayesian regression formula. - - Args: - context: Last observed context. - action: Last observed action. - reward: Last observed reward. - """ - - self.t += 1 - self.data_h.add(context, action, reward) - - # Update posterior of action with formulas: \beta | x,y ~ N(mu_q, cov_q) - x, y = self.data_h.get_data(action) - - # The algorithm could be improved with sequential update formulas (cheaper) - s = np.dot(x.T, x) - - # Some terms are removed as we assume prior mu_0 = 0. - precision_a = s + self.lambda_prior * np.eye(self.hparams.context_dim + 1) - cov_a = np.linalg.inv(precision_a) - mu_a = np.dot(cov_a, np.dot(x.T, y)) - - # Inverse Gamma posterior update - a_post = self.a0 + x.shape[0] / 2.0 - b_upd = 0.5 * (np.dot(y.T, y) - np.dot(mu_a.T, np.dot(precision_a, mu_a))) - b_post = self.b0 + b_upd - - # Store new posterior distributions - self.mu[action] = mu_a - self.cov[action] = cov_a - self.precision[action] = precision_a - self.a[action] = a_post - self.b[action] = b_post - - @property - def a0(self): - return self._a0 - - @property - def b0(self): - return self._b0 - - @property - def lambda_prior(self): - return self._lambda_prior diff --git a/research/deep_contextual_bandits/bandits/algorithms/multitask_gp.py b/research/deep_contextual_bandits/bandits/algorithms/multitask_gp.py deleted file mode 100644 index 0c35dfaea..000000000 --- a/research/deep_contextual_bandits/bandits/algorithms/multitask_gp.py +++ /dev/null @@ -1,374 +0,0 @@ -# Copyright 2018 The TensorFlow Authors All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -"""A Multitask Gaussian process.""" - -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -from absl import flags -from absl import logging - -import numpy as np -import tensorflow as tf -from bandits.core.bayesian_nn import BayesianNN - -FLAGS = flags.FLAGS -tfd = tf.contrib.distributions - -class MultitaskGP(BayesianNN): - """Implements a Gaussian process with multi-task outputs. - - Optimizes the hyperparameters over the log marginal likelihood. - Uses a Matern 3/2 + linear covariance and returns - sampled predictions for test inputs. The outputs are optionally - correlated where the correlation structure is learned through latent - embeddings of the tasks. - """ - - def __init__(self, hparams): - self.name = "MultiTaskGP" - self.hparams = hparams - - self.n_in = self.hparams.context_dim - self.n_out = self.hparams.num_outputs - self.keep_fixed_after_max_obs = self.hparams.keep_fixed_after_max_obs - - self._show_training = self.hparams.show_training - self._freq_summary = self.hparams.freq_summary - - # Dimensionality of the latent task vectors - self.task_latent_dim = self.hparams.task_latent_dim - - # Maximum number of observations to include - self.max_num_points = self.hparams.max_num_points - - if self.hparams.learn_embeddings: - self.learn_embeddings = self.hparams.learn_embeddings - else: - self.learn_embeddings = False - - # create the graph corresponding to the BNN instance - self.graph = tf.Graph() - with self.graph.as_default(): - # store a new session for the graph - self.sess = tf.Session() - - with tf.variable_scope(self.name, reuse=tf.AUTO_REUSE): - self.n = tf.placeholder(shape=[], dtype=tf.float64) - self.x = tf.placeholder(shape=[None, self.n_in], dtype=tf.float64) - self.x_in = tf.placeholder(shape=[None, self.n_in], dtype=tf.float64) - self.y = tf.placeholder(shape=[None, self.n_out], dtype=tf.float64) - self.weights = tf.placeholder(shape=[None, self.n_out], - dtype=tf.float64) - - self.build_model() - self.sess.run(tf.global_variables_initializer()) - - def atleast_2d(self, x, dims): - return tf.reshape(tf.expand_dims(x, axis=0), (-1, dims)) - - def sq_dist(self, x, x2): - a2 = tf.reduce_sum(tf.square(x), 1) - b2 = tf.reduce_sum(tf.square(x2), 1) - sqdists = tf.expand_dims(a2, 1) + b2 - 2.0 * tf.matmul(x, tf.transpose(x2)) - return sqdists - - # Covariance between outputs - def task_cov(self, x, x2): - """Squared Exponential Covariance Kernel over latent task embeddings.""" - # Index into latent task vectors - x_vecs = tf.gather(self.task_vectors, tf.argmax(x, axis=1), axis=0) - x2_vecs = tf.gather(self.task_vectors, tf.argmax(x2, axis=1), axis=0) - r = self.sq_dist(self.atleast_2d(x_vecs, self.task_latent_dim), - self.atleast_2d(x2_vecs, self.task_latent_dim)) - return tf.exp(-r) - - def cov(self, x, x2): - """Matern 3/2 + Linear Gaussian Process Covariance Function.""" - ls = tf.clip_by_value(self.length_scales, -5.0, 5.0) - ls_lin = tf.clip_by_value(self.length_scales_lin, -5.0, 5.0) - r = self.sq_dist(self.atleast_2d(x, self.n_in)/tf.nn.softplus(ls), - self.atleast_2d(x2, self.n_in)/tf.nn.softplus(ls)) - r = tf.clip_by_value(r, 0, 1e8) - - # Matern 3/2 Covariance - matern = (1.0 + tf.sqrt(3.0*r + 1e-16)) * tf.exp(-tf.sqrt(3.0*r + 1e-16)) - # Linear Covariance - lin = tf.matmul(x / tf.nn.softplus(ls_lin), - x2 / tf.nn.softplus(ls_lin), transpose_b=True) - return (tf.nn.softplus(self.amplitude) * matern + - tf.nn.softplus(self.amplitude_linear) * lin) - - def build_model(self): - """Defines the GP model. - - The loss is computed for partial feedback settings (bandits), so only - the observed outcome is backpropagated (see weighted loss). - Selects the optimizer and, finally, it also initializes the graph. - """ - - logging.info("Initializing model %s.", self.name) - self.global_step = tf.train.get_or_create_global_step() - - # Define state for the model (inputs, etc.) - self.x_train = tf.get_variable( - "training_data", - initializer=tf.ones( - [self.hparams.batch_size, self.n_in], dtype=tf.float64), - validate_shape=False, - trainable=False) - self.y_train = tf.get_variable( - "training_labels", - initializer=tf.zeros([self.hparams.batch_size, 1], dtype=tf.float64), - validate_shape=False, - trainable=False) - self.weights_train = tf.get_variable( - "weights_train", - initializer=tf.ones( - [self.hparams.batch_size, self.n_out], dtype=tf.float64), - validate_shape=False, - trainable=False) - self.input_op = tf.assign(self.x_train, self.x_in, validate_shape=False) - self.input_w_op = tf.assign( - self.weights_train, self.weights, validate_shape=False) - - self.input_std = tf.get_variable( - "data_standard_deviation", - initializer=tf.ones([1, self.n_out], dtype=tf.float64), - dtype=tf.float64, - trainable=False) - self.input_mean = tf.get_variable( - "data_mean", - initializer=tf.zeros([1, self.n_out], dtype=tf.float64), - dtype=tf.float64, - trainable=True) - - # GP Hyperparameters - self.noise = tf.get_variable( - "noise", initializer=tf.cast(0.0, dtype=tf.float64)) - self.amplitude = tf.get_variable( - "amplitude", initializer=tf.cast(1.0, dtype=tf.float64)) - self.amplitude_linear = tf.get_variable( - "linear_amplitude", initializer=tf.cast(1.0, dtype=tf.float64)) - self.length_scales = tf.get_variable( - "length_scales", initializer=tf.zeros([1, self.n_in], dtype=tf.float64)) - self.length_scales_lin = tf.get_variable( - "length_scales_linear", - initializer=tf.zeros([1, self.n_in], dtype=tf.float64)) - - # Latent embeddings of the different outputs for task covariance - self.task_vectors = tf.get_variable( - "latent_task_vectors", - initializer=tf.random_normal( - [self.n_out, self.task_latent_dim], dtype=tf.float64)) - - # Normalize outputs across each dimension - # Since we have different numbers of observations across each task, we - # normalize by their respective counts. - index_counts = self.atleast_2d(tf.reduce_sum(self.weights, axis=0), - self.n_out) - index_counts = tf.where(index_counts > 0, index_counts, - tf.ones(tf.shape(index_counts), dtype=tf.float64)) - self.mean_op = tf.assign(self.input_mean, - tf.reduce_sum(self.y, axis=0) / index_counts) - self.var_op = tf.assign( - self.input_std, tf.sqrt(1e-4 + tf.reduce_sum(tf.square( - self.y - tf.reduce_sum(self.y, axis=0) / index_counts), axis=0) - / index_counts)) - - with tf.control_dependencies([self.var_op]): - y_normed = self.atleast_2d( - (self.y - self.input_mean) / self.input_std, self.n_out) - y_normed = self.atleast_2d(tf.boolean_mask(y_normed, self.weights > 0), 1) - self.out_op = tf.assign(self.y_train, y_normed, validate_shape=False) - - # Observation noise - alpha = tf.nn.softplus(self.noise) + 1e-6 - - # Covariance - with tf.control_dependencies([self.input_op, self.input_w_op, self.out_op]): - self.self_cov = (self.cov(self.x_in, self.x_in) * - self.task_cov(self.weights, self.weights) + - tf.eye(tf.shape(self.x_in)[0], dtype=tf.float64) * alpha) - - self.chol = tf.cholesky(self.self_cov) - self.kinv = tf.cholesky_solve(self.chol, tf.eye(tf.shape(self.x_in)[0], - dtype=tf.float64)) - - self.input_inv = tf.Variable( - tf.eye(self.hparams.batch_size, dtype=tf.float64), - validate_shape=False, - trainable=False) - self.input_cov_op = tf.assign(self.input_inv, self.kinv, - validate_shape=False) - - # Log determinant by taking the singular values along the diagonal - # of self.chol - with tf.control_dependencies([self.input_cov_op]): - logdet = 2.0 * tf.reduce_sum(tf.log(tf.diag_part(self.chol) + 1e-16)) - - # Log Marginal likelihood - self.marginal_ll = -tf.reduce_sum(-0.5 * tf.matmul( - tf.transpose(y_normed), tf.matmul(self.kinv, y_normed)) - 0.5 * logdet - - 0.5 * self.n * np.log(2 * np.pi)) - - zero = tf.cast(0., dtype=tf.float64) - one = tf.cast(1., dtype=tf.float64) - standard_normal = tfd.Normal(loc=zero, scale=one) - - # Loss is marginal likelihood and priors - self.loss = tf.reduce_sum( - self.marginal_ll - - (standard_normal.log_prob(self.amplitude) + - standard_normal.log_prob(tf.exp(self.noise)) + - standard_normal.log_prob(self.amplitude_linear) + - tfd.Normal(loc=zero, scale=one * 10.).log_prob( - self.task_vectors)) - ) - - # Optimizer for hyperparameters - optimizer = tf.train.AdamOptimizer(learning_rate=self.hparams.lr) - vars_to_optimize = [ - self.amplitude, self.length_scales, self.length_scales_lin, - self.amplitude_linear, self.noise, self.input_mean - ] - - if self.learn_embeddings: - vars_to_optimize.append(self.task_vectors) - grads = optimizer.compute_gradients(self.loss, vars_to_optimize) - self.train_op = optimizer.apply_gradients(grads, - global_step=self.global_step) - - # Predictions for test data - self.y_mean, self.y_pred = self.posterior_mean_and_sample(self.x) - - # create tensorboard metrics - self.create_summaries() - self.summary_writer = tf.summary.FileWriter("{}/graph_{}".format( - FLAGS.logdir, self.name), self.sess.graph) - self.check = tf.add_check_numerics_ops() - - def posterior_mean_and_sample(self, candidates): - """Draw samples for test predictions. - - Given a Tensor of 'candidates' inputs, returns samples from the posterior - and the posterior mean prediction for those inputs. - - Args: - candidates: A (num-examples x num-dims) Tensor containing the inputs for - which to return predictions. - Returns: - y_mean: The posterior mean prediction given these inputs - y_sample: A sample from the posterior of the outputs given these inputs - """ - # Cross-covariance for test predictions - w = tf.identity(self.weights_train) - inds = tf.squeeze( - tf.reshape( - tf.tile( - tf.reshape(tf.range(self.n_out), (self.n_out, 1)), - (1, tf.shape(candidates)[0])), (-1, 1))) - - cross_cov = self.cov(tf.tile(candidates, [self.n_out, 1]), self.x_train) - cross_task_cov = self.task_cov(tf.one_hot(inds, self.n_out), w) - cross_cov *= cross_task_cov - - # Test mean prediction - y_mean = tf.matmul(cross_cov, tf.matmul(self.input_inv, self.y_train)) - - # Test sample predictions - # Note this can be done much more efficiently using Kronecker products - # if all tasks are fully observed (which we won't assume) - test_cov = ( - self.cov(tf.tile(candidates, [self.n_out, 1]), - tf.tile(candidates, [self.n_out, 1])) * - self.task_cov(tf.one_hot(inds, self.n_out), - tf.one_hot(inds, self.n_out)) - - tf.matmul(cross_cov, - tf.matmul(self.input_inv, - tf.transpose(cross_cov)))) - - # Get the matrix square root through an SVD for drawing samples - # This seems more numerically stable than the Cholesky - s, _, v = tf.svd(test_cov, full_matrices=True) - test_sqrt = tf.matmul(v, tf.matmul(tf.diag(s), tf.transpose(v))) - - y_sample = ( - tf.matmul( - test_sqrt, - tf.random_normal([tf.shape(test_sqrt)[0], 1], dtype=tf.float64)) + - y_mean) - - y_sample = ( - tf.transpose(tf.reshape(y_sample, - (self.n_out, -1))) * self.input_std + - self.input_mean) - - return y_mean, y_sample - - def create_summaries(self): - with self.graph.as_default(): - tf.summary.scalar("loss", self.loss) - tf.summary.scalar("log_noise", self.noise) - tf.summary.scalar("log_amp", self.amplitude) - tf.summary.scalar("log_amp_lin", self.amplitude_linear) - tf.summary.histogram("length_scales", self.length_scales) - tf.summary.histogram("length_scales_lin", self.length_scales_lin) - self.summary_op = tf.summary.merge_all() - - def train(self, data, num_steps): - """Trains the GP for num_steps, using the data in 'data'. - - Args: - data: ContextualDataset object that provides the data. - num_steps: Number of minibatches to train the network for. - """ - - logging.info("Training %s for %d steps...", self.name, num_steps) - for step in range(num_steps): - numpts = min(data.num_points(None), self.max_num_points) - if numpts >= self.max_num_points and self.keep_fixed_after_max_obs: - x = data.contexts[:numpts, :] - y = data.rewards[:numpts, :] - weights = np.zeros((x.shape[0], self.n_out)) - for i, val in enumerate(data.actions[:numpts]): - weights[i, val] = 1.0 - else: - x, y, weights = data.get_batch_with_weights(numpts) - - ops = [ - self.global_step, self.summary_op, self.loss, self.noise, - self.amplitude, self.amplitude_linear, self.length_scales, - self.length_scales_lin, self.input_cov_op, self.input_op, self.var_op, - self.input_w_op, self.out_op, self.train_op - ] - - res = self.sess.run(ops, - feed_dict={self.x: x, - self.x_in: x, - self.y: y, - self.weights: weights, - self.n: numpts, - }) - - if step % self._freq_summary == 0: - if self._show_training: - logging.info("step: %d, loss: %g noise: %f amp: %f amp_lin: %f", - step, res[2], res[3], res[4], res[5]) - summary = res[1] - global_step = res[0] - self.summary_writer.add_summary(summary, global_step=global_step) diff --git a/research/deep_contextual_bandits/bandits/algorithms/neural_bandit_model.py b/research/deep_contextual_bandits/bandits/algorithms/neural_bandit_model.py deleted file mode 100644 index 99d7cd4dc..000000000 --- a/research/deep_contextual_bandits/bandits/algorithms/neural_bandit_model.py +++ /dev/null @@ -1,220 +0,0 @@ -# Copyright 2018 The TensorFlow Authors All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -"""Define a family of neural network architectures for bandits. - -The network accepts different type of optimizers that could lead to different -approximations of the posterior distribution or simply to point estimates. -""" - -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import tensorflow as tf - -from absl import flags -from bandits.core.bayesian_nn import BayesianNN - -FLAGS = flags.FLAGS - - -class NeuralBanditModel(BayesianNN): - """Implements a neural network for bandit problems.""" - - def __init__(self, optimizer, hparams, name): - """Saves hyper-params and builds the Tensorflow graph.""" - - self.opt_name = optimizer - self.name = name - self.hparams = hparams - self.verbose = getattr(self.hparams, "verbose", True) - self.times_trained = 0 - self.build_model() - - def build_layer(self, x, num_units): - """Builds a layer with input x; dropout and layer norm if specified.""" - - init_s = self.hparams.init_scale - - layer_n = getattr(self.hparams, "layer_norm", False) - dropout = getattr(self.hparams, "use_dropout", False) - - nn = tf.contrib.layers.fully_connected( - x, - num_units, - activation_fn=self.hparams.activation, - normalizer_fn=None if not layer_n else tf.contrib.layers.layer_norm, - normalizer_params={}, - weights_initializer=tf.random_uniform_initializer(-init_s, init_s) - ) - - if dropout: - nn = tf.nn.dropout(nn, self.hparams.keep_prob) - - return nn - - def forward_pass(self): - - init_s = self.hparams.init_scale - - scope_name = "prediction_{}".format(self.name) - with tf.variable_scope(scope_name, reuse=tf.AUTO_REUSE): - nn = self.x - for num_units in self.hparams.layer_sizes: - if num_units > 0: - nn = self.build_layer(nn, num_units) - - y_pred = tf.layers.dense( - nn, - self.hparams.num_actions, - kernel_initializer=tf.random_uniform_initializer(-init_s, init_s)) - - return nn, y_pred - - def build_model(self): - """Defines the actual NN model with fully connected layers. - - The loss is computed for partial feedback settings (bandits), so only - the observed outcome is backpropagated (see weighted loss). - Selects the optimizer and, finally, it also initializes the graph. - """ - - # create and store the graph corresponding to the BNN instance - self.graph = tf.Graph() - - with self.graph.as_default(): - - # create and store a new session for the graph - self.sess = tf.Session() - - with tf.name_scope(self.name): - - self.global_step = tf.train.get_or_create_global_step() - - # context - self.x = tf.placeholder( - shape=[None, self.hparams.context_dim], - dtype=tf.float32, - name="{}_x".format(self.name)) - - # reward vector - self.y = tf.placeholder( - shape=[None, self.hparams.num_actions], - dtype=tf.float32, - name="{}_y".format(self.name)) - - # weights (1 for selected action, 0 otherwise) - self.weights = tf.placeholder( - shape=[None, self.hparams.num_actions], - dtype=tf.float32, - name="{}_w".format(self.name)) - - # with tf.variable_scope("prediction_{}".format(self.name)): - self.nn, self.y_pred = self.forward_pass() - self.loss = tf.squared_difference(self.y_pred, self.y) - self.weighted_loss = tf.multiply(self.weights, self.loss) - self.cost = tf.reduce_sum(self.weighted_loss) / self.hparams.batch_size - - if self.hparams.activate_decay: - self.lr = tf.train.inverse_time_decay( - self.hparams.initial_lr, self.global_step, - 1, self.hparams.lr_decay_rate) - else: - self.lr = tf.Variable(self.hparams.initial_lr, trainable=False) - - # create tensorboard metrics - self.create_summaries() - self.summary_writer = tf.summary.FileWriter( - "{}/graph_{}".format(FLAGS.logdir, self.name), self.sess.graph) - - tvars = tf.trainable_variables() - grads, _ = tf.clip_by_global_norm( - tf.gradients(self.cost, tvars), self.hparams.max_grad_norm) - - self.optimizer = self.select_optimizer() - - self.train_op = self.optimizer.apply_gradients( - zip(grads, tvars), global_step=self.global_step) - - self.init = tf.global_variables_initializer() - - self.initialize_graph() - - def initialize_graph(self): - """Initializes all variables.""" - - with self.graph.as_default(): - if self.verbose: - print("Initializing model {}.".format(self.name)) - self.sess.run(self.init) - - def assign_lr(self): - """Resets the learning rate in dynamic schedules for subsequent trainings. - - In bandits settings, we do expand our dataset over time. Then, we need to - re-train the network with the new data. The algorithms that do not keep - the step constant, can reset it at the start of each *training* process. - """ - - decay_steps = 1 - if self.hparams.activate_decay: - current_gs = self.sess.run(self.global_step) - with self.graph.as_default(): - self.lr = tf.train.inverse_time_decay(self.hparams.initial_lr, - self.global_step - current_gs, - decay_steps, - self.hparams.lr_decay_rate) - - def select_optimizer(self): - """Selects optimizer. To be extended (SGLD, KFAC, etc).""" - return tf.train.RMSPropOptimizer(self.lr) - - def create_summaries(self): - """Defines summaries including mean loss, learning rate, and global step.""" - - with self.graph.as_default(): - with tf.name_scope(self.name + "_summaries"): - tf.summary.scalar("cost", self.cost) - tf.summary.scalar("lr", self.lr) - tf.summary.scalar("global_step", self.global_step) - self.summary_op = tf.summary.merge_all() - - def train(self, data, num_steps): - """Trains the network for num_steps, using the provided data. - - Args: - data: ContextualDataset object that provides the data. - num_steps: Number of minibatches to train the network for. - """ - - if self.verbose: - print("Training {} for {} steps...".format(self.name, num_steps)) - - with self.graph.as_default(): - - for step in range(num_steps): - x, y, w = data.get_batch_with_weights(self.hparams.batch_size) - _, cost, summary, lr = self.sess.run( - [self.train_op, self.cost, self.summary_op, self.lr], - feed_dict={self.x: x, self.y: y, self.weights: w}) - - if step % self.hparams.freq_summary == 0: - if self.hparams.show_training: - print("{} | step: {}, lr: {}, loss: {}".format( - self.name, step, lr, cost)) - self.summary_writer.add_summary(summary, step) - - self.times_trained += 1 diff --git a/research/deep_contextual_bandits/bandits/algorithms/neural_linear_sampling.py b/research/deep_contextual_bandits/bandits/algorithms/neural_linear_sampling.py deleted file mode 100644 index 43fc55161..000000000 --- a/research/deep_contextual_bandits/bandits/algorithms/neural_linear_sampling.py +++ /dev/null @@ -1,180 +0,0 @@ -# Copyright 2018 The TensorFlow Authors All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -"""Thompson Sampling with linear posterior over a learnt deep representation.""" - -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import numpy as np -from scipy.stats import invgamma - -from bandits.core.bandit_algorithm import BanditAlgorithm -from bandits.core.contextual_dataset import ContextualDataset -from bandits.algorithms.neural_bandit_model import NeuralBanditModel - - -class NeuralLinearPosteriorSampling(BanditAlgorithm): - """Full Bayesian linear regression on the last layer of a deep neural net.""" - - def __init__(self, name, hparams, optimizer='RMS'): - - self.name = name - self.hparams = hparams - self.latent_dim = self.hparams.layer_sizes[-1] - - # Gaussian prior for each beta_i - self._lambda_prior = self.hparams.lambda_prior - - self.mu = [ - np.zeros(self.latent_dim) - for _ in range(self.hparams.num_actions) - ] - - self.cov = [(1.0 / self.lambda_prior) * np.eye(self.latent_dim) - for _ in range(self.hparams.num_actions)] - - self.precision = [ - self.lambda_prior * np.eye(self.latent_dim) - for _ in range(self.hparams.num_actions) - ] - - # Inverse Gamma prior for each sigma2_i - self._a0 = self.hparams.a0 - self._b0 = self.hparams.b0 - - self.a = [self._a0 for _ in range(self.hparams.num_actions)] - self.b = [self._b0 for _ in range(self.hparams.num_actions)] - - # Regression and NN Update Frequency - self.update_freq_lr = hparams.training_freq - self.update_freq_nn = hparams.training_freq_network - - self.t = 0 - self.optimizer_n = optimizer - - self.num_epochs = hparams.training_epochs - self.data_h = ContextualDataset(hparams.context_dim, - hparams.num_actions, - intercept=False) - self.latent_h = ContextualDataset(self.latent_dim, - hparams.num_actions, - intercept=False) - self.bnn = NeuralBanditModel(optimizer, hparams, '{}-bnn'.format(name)) - - def action(self, context): - """Samples beta's from posterior, and chooses best action accordingly.""" - - # Round robin until each action has been selected "initial_pulls" times - if self.t < self.hparams.num_actions * self.hparams.initial_pulls: - return self.t % self.hparams.num_actions - - # Sample sigma2, and beta conditional on sigma2 - sigma2_s = [ - self.b[i] * invgamma.rvs(self.a[i]) - for i in range(self.hparams.num_actions) - ] - - try: - beta_s = [ - np.random.multivariate_normal(self.mu[i], sigma2_s[i] * self.cov[i]) - for i in range(self.hparams.num_actions) - ] - except np.linalg.LinAlgError as e: - # Sampling could fail if covariance is not positive definite - print('Exception when sampling for {}.'.format(self.name)) - print('Details: {} | {}.'.format(e.message, e.args)) - d = self.latent_dim - beta_s = [ - np.random.multivariate_normal(np.zeros((d)), np.eye(d)) - for i in range(self.hparams.num_actions) - ] - - # Compute last-layer representation for the current context - with self.bnn.graph.as_default(): - c = context.reshape((1, self.hparams.context_dim)) - z_context = self.bnn.sess.run(self.bnn.nn, feed_dict={self.bnn.x: c}) - - # Apply Thompson Sampling to last-layer representation - vals = [ - np.dot(beta_s[i], z_context.T) for i in range(self.hparams.num_actions) - ] - return np.argmax(vals) - - def update(self, context, action, reward): - """Updates the posterior using linear bayesian regression formula.""" - - self.t += 1 - self.data_h.add(context, action, reward) - c = context.reshape((1, self.hparams.context_dim)) - z_context = self.bnn.sess.run(self.bnn.nn, feed_dict={self.bnn.x: c}) - self.latent_h.add(z_context, action, reward) - - # Retrain the network on the original data (data_h) - if self.t % self.update_freq_nn == 0: - - if self.hparams.reset_lr: - self.bnn.assign_lr() - self.bnn.train(self.data_h, self.num_epochs) - - # Update the latent representation of every datapoint collected so far - new_z = self.bnn.sess.run(self.bnn.nn, - feed_dict={self.bnn.x: self.data_h.contexts}) - self.latent_h.replace_data(contexts=new_z) - - # Update the Bayesian Linear Regression - if self.t % self.update_freq_lr == 0: - - # Find all the actions to update - actions_to_update = self.latent_h.actions[:-self.update_freq_lr] - - for action_v in np.unique(actions_to_update): - - # Update action posterior with formulas: \beta | z,y ~ N(mu_q, cov_q) - z, y = self.latent_h.get_data(action_v) - - # The algorithm could be improved with sequential formulas (cheaper) - s = np.dot(z.T, z) - - # Some terms are removed as we assume prior mu_0 = 0. - precision_a = s + self.lambda_prior * np.eye(self.latent_dim) - cov_a = np.linalg.inv(precision_a) - mu_a = np.dot(cov_a, np.dot(z.T, y)) - - # Inverse Gamma posterior update - a_post = self.a0 + z.shape[0] / 2.0 - b_upd = 0.5 * np.dot(y.T, y) - b_upd -= 0.5 * np.dot(mu_a.T, np.dot(precision_a, mu_a)) - b_post = self.b0 + b_upd - - # Store new posterior distributions - self.mu[action_v] = mu_a - self.cov[action_v] = cov_a - self.precision[action_v] = precision_a - self.a[action_v] = a_post - self.b[action_v] = b_post - - @property - def a0(self): - return self._a0 - - @property - def b0(self): - return self._b0 - - @property - def lambda_prior(self): - return self._lambda_prior diff --git a/research/deep_contextual_bandits/bandits/algorithms/parameter_noise_sampling.py b/research/deep_contextual_bandits/bandits/algorithms/parameter_noise_sampling.py deleted file mode 100644 index 19944ad57..000000000 --- a/research/deep_contextual_bandits/bandits/algorithms/parameter_noise_sampling.py +++ /dev/null @@ -1,187 +0,0 @@ -# Copyright 2018 The TensorFlow Authors All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -"""Contextual algorithm based on Thompson Sampling + direct noise injection.""" - -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import numpy as np -from scipy.special import logsumexp -import tensorflow as tf - -from absl import flags - -from bandits.core.bandit_algorithm import BanditAlgorithm -from bandits.core.contextual_dataset import ContextualDataset -from bandits.algorithms.neural_bandit_model import NeuralBanditModel - -FLAGS = flags.FLAGS - - -class ParameterNoiseSampling(BanditAlgorithm): - """Parameter Noise Sampling algorithm based on adding noise to net params. - - Described in https://arxiv.org/abs/1706.01905 - """ - - def __init__(self, name, hparams): - """Creates the algorithm, and sets up the adaptive Gaussian noise.""" - - self.name = name - self.hparams = hparams - self.verbose = getattr(self.hparams, 'verbose', True) - self.noise_std = getattr(self.hparams, 'noise_std', 0.005) - self.eps = getattr(self.hparams, 'eps', 0.05) - self.d_samples = getattr(self.hparams, 'd_samples', 300) - self.optimizer = getattr(self.hparams, 'optimizer', 'RMS') - - # keep track of noise heuristic statistics - self.std_h = [self.noise_std] - self.eps_h = [self.eps] - self.kl_h = [] - self.t = 0 - - self.freq_update = hparams.training_freq - self.num_epochs = hparams.training_epochs - - self.data_h = ContextualDataset(hparams.context_dim, hparams.num_actions, - hparams.buffer_s) - self.bnn = NeuralBanditModel(self.optimizer, hparams, '{}-bnn'.format(name)) - - with self.bnn.graph.as_default(): - - # noise-injection std placeholder - self.bnn.noise_std_ph = tf.placeholder(tf.float32, shape=()) - - # create noise corruption op; adds noise to all weights - tvars = tf.trainable_variables() - self.bnn.noisy_grads = [ - tf.random_normal(v.get_shape(), 0, self.bnn.noise_std_ph) - for v in tvars - ] - - # add noise to all params, then compute prediction, then subtract. - with tf.control_dependencies(self.bnn.noisy_grads): - self.bnn.noise_add_ops = [ - tvars[i].assign_add(n) for i, n in enumerate(self.bnn.noisy_grads) - ] - with tf.control_dependencies(self.bnn.noise_add_ops): - # we force the prediction for 'y' to be recomputed after adding noise - self.bnn.noisy_nn, self.bnn.noisy_pred_val = self.bnn.forward_pass() - - self.bnn.noisy_pred = tf.identity(self.bnn.noisy_pred_val) - with tf.control_dependencies([tf.identity(self.bnn.noisy_pred)]): - self.bnn.noise_sub_ops = [ - tvars[i].assign_add(-n) - for i, n in enumerate(self.bnn.noisy_grads) - ] - - def action(self, context): - """Selects action based on Thompson Sampling *after* adding noise.""" - - if self.t < self.hparams.num_actions * self.hparams.initial_pulls: - # round robin until each action has been taken "initial_pulls" times - return self.t % self.hparams.num_actions - - with self.bnn.graph.as_default(): - # run noise prediction op to choose action, and subtract noise op after. - c = context.reshape((1, self.hparams.context_dim)) - output, _ = self.bnn.sess.run( - [self.bnn.noisy_pred, self.bnn.noise_sub_ops], - feed_dict={self.bnn.x: c, - self.bnn.noise_std_ph: self.noise_std}) - return np.argmax(output) - - def update(self, context, action, reward): - """Updates the data buffer, and re-trains the BNN and noise level.""" - - self.t += 1 - self.data_h.add(context, action, reward) - - if self.t % self.freq_update == 0: - self.bnn.train(self.data_h, self.num_epochs) - self.update_noise() - - def update_noise(self): - """Increase noise if distance btw original and corrupted distrib small.""" - - kl = self.compute_distance() - delta = -np.log1p(- self.eps + self.eps / self.hparams.num_actions) - - if kl < delta: - self.noise_std *= 1.01 - else: - self.noise_std /= 1.01 - - self.eps *= 0.99 - - if self.verbose: - print('Update eps={} | kl={} | std={} | delta={} | increase={}.'.format( - self.eps, kl, self.noise_std, delta, kl < delta)) - - # store noise-injection statistics for inspection: std, KL, eps. - self.std_h.append(self.noise_std) - self.kl_h.append(kl) - self.eps_h.append(self.eps) - - def compute_distance(self): - """Computes empirical KL for original and corrupted output distributions.""" - - random_inputs, _ = self.data_h.get_batch(self.d_samples) - y_model = self.bnn.sess.run( - self.bnn.y_pred, - feed_dict={ - self.bnn.x: random_inputs, - self.bnn.noise_std_ph: self.noise_std - }) - y_noisy, _ = self.bnn.sess.run( - [self.bnn.noisy_pred, self.bnn.noise_sub_ops], - feed_dict={ - self.bnn.x: random_inputs, - self.bnn.noise_std_ph: self.noise_std - }) - - if self.verbose: - # display how often original & perturbed models propose different actions - s = np.sum([np.argmax(y_model[i, :]) == np.argmax(y_noisy[i, :]) - for i in range(y_model.shape[0])]) - print('{} | % of agreement btw original / corrupted actions: {}.'.format( - self.name, s / self.d_samples)) - - kl = self.compute_kl_with_logits(y_model, y_noisy) - return kl - - def compute_kl_with_logits(self, logits1, logits2): - """Computes KL from logits samples from two distributions.""" - - def exp_times_diff(a, b): - return np.multiply(np.exp(a), a - b) - - logsumexp1 = logsumexp(logits1, axis=1) - logsumexp2 = logsumexp(logits2, axis=1) - logsumexp_diff = logsumexp2 - logsumexp1 - - exp_diff = exp_times_diff(logits1, logits2) - exp_diff = np.sum(exp_diff, axis=1) - - inv_exp_sum = np.sum(np.exp(logits1), axis=1) - term1 = np.divide(exp_diff, inv_exp_sum) - - kl = term1 + logsumexp_diff - kl = np.maximum(kl, 0.0) - kl = np.nan_to_num(kl) - return np.mean(kl) diff --git a/research/deep_contextual_bandits/bandits/algorithms/posterior_bnn_sampling.py b/research/deep_contextual_bandits/bandits/algorithms/posterior_bnn_sampling.py deleted file mode 100644 index 0f0c5d365..000000000 --- a/research/deep_contextual_bandits/bandits/algorithms/posterior_bnn_sampling.py +++ /dev/null @@ -1,92 +0,0 @@ -# Copyright 2018 The TensorFlow Authors All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -"""Contextual bandit algorithm based on Thompson Sampling and a Bayesian NN.""" - -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import numpy as np - -from bandits.core.bandit_algorithm import BanditAlgorithm -from bandits.algorithms.bb_alpha_divergence_model import BBAlphaDivergence -from bandits.algorithms.bf_variational_neural_bandit_model import BfVariationalNeuralBanditModel -from bandits.core.contextual_dataset import ContextualDataset -from bandits.algorithms.multitask_gp import MultitaskGP -from bandits.algorithms.neural_bandit_model import NeuralBanditModel -from bandits.algorithms.variational_neural_bandit_model import VariationalNeuralBanditModel - - -class PosteriorBNNSampling(BanditAlgorithm): - """Posterior Sampling algorithm based on a Bayesian neural network.""" - - def __init__(self, name, hparams, bnn_model='RMSProp'): - """Creates a PosteriorBNNSampling object based on a specific optimizer. - - The algorithm has two basic tools: an Approx BNN and a Contextual Dataset. - The Bayesian Network keeps the posterior based on the optimizer iterations. - - Args: - name: Name of the algorithm. - hparams: Hyper-parameters of the algorithm. - bnn_model: Type of BNN. By default RMSProp (point estimate). - """ - - self.name = name - self.hparams = hparams - self.optimizer_n = hparams.optimizer - - self.training_freq = hparams.training_freq - self.training_epochs = hparams.training_epochs - self.t = 0 - self.data_h = ContextualDataset(hparams.context_dim, hparams.num_actions, - hparams.buffer_s) - - # to be extended with more BNNs (BB alpha-div, GPs, SGFS, constSGD...) - bnn_name = '{}-bnn'.format(name) - if bnn_model == 'Variational': - self.bnn = VariationalNeuralBanditModel(hparams, bnn_name) - elif bnn_model == 'AlphaDiv': - self.bnn = BBAlphaDivergence(hparams, bnn_name) - elif bnn_model == 'Variational_BF': - self.bnn = BfVariationalNeuralBanditModel(hparams, bnn_name) - elif bnn_model == 'GP': - self.bnn = MultitaskGP(hparams) - else: - self.bnn = NeuralBanditModel(self.optimizer_n, hparams, bnn_name) - - def action(self, context): - """Selects action for context based on Thompson Sampling using the BNN.""" - - if self.t < self.hparams.num_actions * self.hparams.initial_pulls: - # round robin until each action has been taken "initial_pulls" times - return self.t % self.hparams.num_actions - - with self.bnn.graph.as_default(): - c = context.reshape((1, self.hparams.context_dim)) - output = self.bnn.sess.run(self.bnn.y_pred, feed_dict={self.bnn.x: c}) - return np.argmax(output) - - def update(self, context, action, reward): - """Updates data buffer, and re-trains the BNN every training_freq steps.""" - - self.t += 1 - self.data_h.add(context, action, reward) - - if self.t % self.training_freq == 0: - if self.hparams.reset_lr: - self.bnn.assign_lr() - self.bnn.train(self.data_h, self.training_epochs) diff --git a/research/deep_contextual_bandits/bandits/algorithms/uniform_sampling.py b/research/deep_contextual_bandits/bandits/algorithms/uniform_sampling.py deleted file mode 100644 index 15c073fbe..000000000 --- a/research/deep_contextual_bandits/bandits/algorithms/uniform_sampling.py +++ /dev/null @@ -1,43 +0,0 @@ -# Copyright 2018 The TensorFlow Authors All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -"""Contextual bandit algorithm that selects an action uniformly at random.""" - -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import numpy as np - -from bandits.core.bandit_algorithm import BanditAlgorithm - - -class UniformSampling(BanditAlgorithm): - """Defines a baseline; returns one action uniformly at random.""" - - def __init__(self, name, hparams): - """Creates a UniformSampling object. - - Args: - name: Name of the algorithm. - hparams: Hyper-parameters, including the number of arms (num_actions). - """ - - self.name = name - self.hparams = hparams - - def action(self, context): - """Selects an action uniformly at random.""" - return np.random.choice(range(self.hparams.num_actions)) diff --git a/research/deep_contextual_bandits/bandits/algorithms/variational_neural_bandit_model.py b/research/deep_contextual_bandits/bandits/algorithms/variational_neural_bandit_model.py deleted file mode 100644 index 7700c08ba..000000000 --- a/research/deep_contextual_bandits/bandits/algorithms/variational_neural_bandit_model.py +++ /dev/null @@ -1,346 +0,0 @@ -# Copyright 2018 The TensorFlow Authors All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -"""Bayesian NN using factorized VI (Bayes By Backprop. Blundell et al. 2014). - -See https://arxiv.org/abs/1505.05424 for details. -""" - -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import numpy as np -import tensorflow as tf - -from absl import flags -from bandits.core.bayesian_nn import BayesianNN - -FLAGS = flags.FLAGS - - -def log_gaussian(x, mu, sigma, reduce_sum=True): - """Returns log Gaussian pdf.""" - res = (-0.5 * np.log(2 * np.pi) - tf.log(sigma) - tf.square(x - mu) / - (2 * tf.square(sigma))) - if reduce_sum: - return tf.reduce_sum(res) - else: - return res - - -def analytic_kl(mu_1, sigma_1, mu_2, sigma_2): - """KL for two Gaussian distributions with diagonal covariance matrix.""" - sigma_1_sq = tf.square(sigma_1) - sigma_2_sq = tf.square(sigma_2) - - t1 = tf.square(mu_1 - mu_2) / (2. * sigma_2_sq) - t2 = (sigma_1_sq/sigma_2_sq - 1. - tf.log(sigma_1_sq) + tf.log(sigma_2_sq))/2. - return tf.reduce_sum(t1 + t2) - - -class VariationalNeuralBanditModel(BayesianNN): - """Implements an approximate Bayesian NN using Variational Inference.""" - - def __init__(self, hparams, name="BBBNN"): - - self.name = name - self.hparams = hparams - - self.n_in = self.hparams.context_dim - self.n_out = self.hparams.num_actions - self.layers = self.hparams.layer_sizes - self.init_scale = self.hparams.init_scale - self.f_num_points = None - if "f_num_points" in hparams: - self.f_num_points = self.hparams.f_num_points - - self.cleared_times_trained = self.hparams.cleared_times_trained - self.initial_training_steps = self.hparams.initial_training_steps - self.training_schedule = np.linspace(self.initial_training_steps, - self.hparams.training_epochs, - self.cleared_times_trained) - self.verbose = getattr(self.hparams, "verbose", True) - - self.weights_m = {} - self.weights_std = {} - self.biases_m = {} - self.biases_std = {} - - self.times_trained = 0 - - if self.hparams.use_sigma_exp_transform: - self.sigma_transform = tf.exp - self.inverse_sigma_transform = np.log - else: - self.sigma_transform = tf.nn.softplus - self.inverse_sigma_transform = lambda y: y + np.log(1. - np.exp(-y)) - - # Whether to use the local reparameterization trick to compute the loss. - # See details in https://arxiv.org/abs/1506.02557 - self.use_local_reparameterization = True - - self.build_graph() - - def build_mu_variable(self, shape): - """Returns a mean variable initialized as N(0, 0.05).""" - return tf.Variable(tf.random_normal(shape, 0.0, 0.05)) - - def build_sigma_variable(self, shape, init=-5.): - """Returns a sigma variable initialized as N(init, 0.05).""" - # Initialize sigma to be very small initially to encourage MAP opt first - return tf.Variable(tf.random_normal(shape, init, 0.05)) - - def build_layer(self, input_x, input_x_local, shape, - layer_id, activation_fn=tf.nn.relu): - """Builds a variational layer, and computes KL term. - - Args: - input_x: Input to the variational layer. - input_x_local: Input when the local reparameterization trick was applied. - shape: [number_inputs, number_outputs] for the layer. - layer_id: Number of layer in the architecture. - activation_fn: Activation function to apply. - - Returns: - output_h: Output of the variational layer. - output_h_local: Output when local reparameterization trick was applied. - neg_kl: Negative KL term for the layer. - """ - - w_mu = self.build_mu_variable(shape) - w_sigma = self.sigma_transform(self.build_sigma_variable(shape)) - w_noise = tf.random_normal(shape) - w = w_mu + w_sigma * w_noise - - b_mu = self.build_mu_variable([1, shape[1]]) - b_sigma = self.sigma_transform(self.build_sigma_variable([1, shape[1]])) - b = b_mu - - # Store means and stds - self.weights_m[layer_id] = w_mu - self.weights_std[layer_id] = w_sigma - self.biases_m[layer_id] = b_mu - self.biases_std[layer_id] = b_sigma - - # Create outputs - output_h = activation_fn(tf.matmul(input_x, w) + b) - - if self.use_local_reparameterization: - # Use analytic KL divergence wrt the prior - neg_kl = -analytic_kl(w_mu, w_sigma, - 0., tf.to_float(np.sqrt(2./shape[0]))) - else: - # Create empirical KL loss terms - log_p = log_gaussian(w, 0., tf.to_float(np.sqrt(2./shape[0]))) - log_q = log_gaussian(w, tf.stop_gradient(w_mu), tf.stop_gradient(w_sigma)) - neg_kl = log_p - log_q - - # Apply local reparameterization trick: sample activations pre nonlinearity - m_h = tf.matmul(input_x_local, w_mu) + b - v_h = tf.matmul(tf.square(input_x_local), tf.square(w_sigma)) - output_h_local = m_h + tf.sqrt(v_h + 1e-6) * tf.random_normal(tf.shape(v_h)) - output_h_local = activation_fn(output_h_local) - - return output_h, output_h_local, neg_kl - - def build_action_noise(self): - """Defines a model for additive noise per action, and its KL term.""" - - # Define mean and std variables (log-normal dist) for each action. - noise_sigma_mu = (self.build_mu_variable([1, self.n_out]) - + self.inverse_sigma_transform(self.hparams.noise_sigma)) - noise_sigma_sigma = self.sigma_transform( - self.build_sigma_variable([1, self.n_out])) - - pre_noise_sigma = (noise_sigma_mu - + tf.random_normal([1, self.n_out]) * noise_sigma_sigma) - self.noise_sigma = self.sigma_transform(pre_noise_sigma) - - # Compute KL for additive noise sigma terms. - if getattr(self.hparams, "infer_noise_sigma", False): - neg_kl_term = log_gaussian( - pre_noise_sigma, - self.inverse_sigma_transform(self.hparams.noise_sigma), - self.hparams.prior_sigma - ) - neg_kl_term -= log_gaussian(pre_noise_sigma, - noise_sigma_mu, - noise_sigma_sigma) - else: - neg_kl_term = 0. - - return neg_kl_term - - def build_model(self, activation_fn=tf.nn.relu): - """Defines the actual NN model with fully connected layers. - - The loss is computed for partial feedback settings (bandits), so only - the observed outcome is backpropagated (see weighted loss). - Selects the optimizer and, finally, it also initializes the graph. - - Args: - activation_fn: the activation function used in the nn layers. - """ - - if self.verbose: - print("Initializing model {}.".format(self.name)) - neg_kl_term, l_number = 0, 0 - use_local_reparameterization = self.use_local_reparameterization - - # Compute model additive noise for each action with log-normal distribution - neg_kl_term += self.build_action_noise() - - # Build network. - input_x = self.x - input_local = self.x - n_in = self.n_in - - for l_number, n_nodes in enumerate(self.layers): - if n_nodes > 0: - h, h_local, neg_kl = self.build_layer(input_x, input_local, - [n_in, n_nodes], l_number) - - neg_kl_term += neg_kl - input_x, input_local = h, h_local - n_in = n_nodes - - # Create last linear layer - h, h_local, neg_kl = self.build_layer(input_x, input_local, - [n_in, self.n_out], - l_number + 1, - activation_fn=lambda x: x) - neg_kl_term += neg_kl - - self.y_pred = h - self.y_pred_local = h_local - - # Compute log likelihood (with learned or fixed noise level) - if getattr(self.hparams, "infer_noise_sigma", False): - log_likelihood = log_gaussian( - self.y, self.y_pred_local, self.noise_sigma, reduce_sum=False) - else: - y_hat = self.y_pred_local if use_local_reparameterization else self.y_pred - log_likelihood = log_gaussian( - self.y, y_hat, self.hparams.noise_sigma, reduce_sum=False) - - # Only take into account observed outcomes (bandits setting) - batch_size = tf.to_float(tf.shape(self.x)[0]) - weighted_log_likelihood = tf.reduce_sum( - log_likelihood * self.weights) / batch_size - - # The objective is 1/n * (\sum_i log_like_i - KL); neg_kl_term estimates -KL - elbo = weighted_log_likelihood + (neg_kl_term / self.n) - - self.loss = -elbo - self.global_step = tf.train.get_or_create_global_step() - self.train_op = tf.train.AdamOptimizer(self.hparams.initial_lr).minimize( - self.loss, global_step=self.global_step) - - # Create tensorboard metrics - self.create_summaries() - self.summary_writer = tf.summary.FileWriter( - "{}/graph_{}".format(FLAGS.logdir, self.name), self.sess.graph) - - def build_graph(self): - """Defines graph, session, placeholders, and model. - - Placeholders are: n (size of the dataset), x and y (context and observed - reward for each action), and weights (one-hot encoding of selected action - for each context, i.e., only possibly non-zero element in each y). - """ - - self.graph = tf.Graph() - with self.graph.as_default(): - - self.sess = tf.Session() - - self.n = tf.placeholder(shape=[], dtype=tf.float32) - - self.x = tf.placeholder(shape=[None, self.n_in], dtype=tf.float32) - self.y = tf.placeholder(shape=[None, self.n_out], dtype=tf.float32) - self.weights = tf.placeholder(shape=[None, self.n_out], dtype=tf.float32) - - self.build_model() - self.sess.run(tf.global_variables_initializer()) - - def create_summaries(self): - """Defines summaries including mean loss, and global step.""" - - with self.graph.as_default(): - with tf.name_scope(self.name + "_summaries"): - tf.summary.scalar("loss", self.loss) - tf.summary.scalar("global_step", self.global_step) - self.summary_op = tf.summary.merge_all() - - def assign_lr(self): - """Resets the learning rate in dynamic schedules for subsequent trainings. - - In bandits settings, we do expand our dataset over time. Then, we need to - re-train the network with the new data. The algorithms that do not keep - the step constant, can reset it at the start of each *training* process. - """ - - decay_steps = 1 - if self.hparams.activate_decay: - current_gs = self.sess.run(self.global_step) - with self.graph.as_default(): - self.lr = tf.train.inverse_time_decay(self.hparams.initial_lr, - self.global_step - current_gs, - decay_steps, - self.hparams.lr_decay_rate) - - def train(self, data, num_steps): - """Trains the BNN for num_steps, using the data in 'data'. - - Args: - data: ContextualDataset object that provides the data. - num_steps: Number of minibatches to train the network for. - - Returns: - losses: Loss history during training. - """ - - if self.times_trained < self.cleared_times_trained: - num_steps = int(self.training_schedule[self.times_trained]) - self.times_trained += 1 - - losses = [] - - with self.graph.as_default(): - - if self.verbose: - print("Training {} for {} steps...".format(self.name, num_steps)) - - for step in range(num_steps): - x, y, weights = data.get_batch_with_weights(self.hparams.batch_size) - _, summary, global_step, loss = self.sess.run( - [self.train_op, self.summary_op, self.global_step, self.loss], - feed_dict={ - self.x: x, - self.y: y, - self.weights: weights, - self.n: data.num_points(self.f_num_points), - }) - - losses.append(loss) - - if step % self.hparams.freq_summary == 0: - if self.hparams.show_training: - print("{} | step: {}, loss: {}".format( - self.name, global_step, loss)) - self.summary_writer.add_summary(summary, global_step) - - return losses diff --git a/research/deep_contextual_bandits/bandits/core/bandit_algorithm.py b/research/deep_contextual_bandits/bandits/core/bandit_algorithm.py deleted file mode 100644 index cae4e1676..000000000 --- a/research/deep_contextual_bandits/bandits/core/bandit_algorithm.py +++ /dev/null @@ -1,34 +0,0 @@ -# Copyright 2018 The TensorFlow Authors All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -"""Define the abstract class for contextual bandit algorithms.""" - -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - - -class BanditAlgorithm(object): - """A bandit algorithm must be able to do two basic operations. - - 1. Choose an action given a context. - 2. Update its internal model given a triple (context, played action, reward). - """ - - def action(self, context): - pass - - def update(self, context, action, reward): - pass diff --git a/research/deep_contextual_bandits/bandits/core/bayesian_nn.py b/research/deep_contextual_bandits/bandits/core/bayesian_nn.py deleted file mode 100644 index 310961591..000000000 --- a/research/deep_contextual_bandits/bandits/core/bayesian_nn.py +++ /dev/null @@ -1,36 +0,0 @@ -# Copyright 2018 The TensorFlow Authors All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -"""Define the abstract class for Bayesian Neural Networks.""" - -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - - -class BayesianNN(object): - """A Bayesian neural network keeps a distribution over neural nets.""" - - def __init__(self, optimizer): - pass - - def build_model(self): - pass - - def train(self, data): - pass - - def sample(self, steps): - pass diff --git a/research/deep_contextual_bandits/bandits/core/contextual_bandit.py b/research/deep_contextual_bandits/bandits/core/contextual_bandit.py deleted file mode 100644 index 984673789..000000000 --- a/research/deep_contextual_bandits/bandits/core/contextual_bandit.py +++ /dev/null @@ -1,125 +0,0 @@ -# Copyright 2018 The TensorFlow Authors All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -"""Define a contextual bandit from which we can sample and compute rewards. - -We can feed the data, sample a context, its reward for a specific action, and -also the optimal action for a given context. -""" - -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import numpy as np - - -def run_contextual_bandit(context_dim, num_actions, dataset, algos): - """Run a contextual bandit problem on a set of algorithms. - - Args: - context_dim: Dimension of the context. - num_actions: Number of available actions. - dataset: Matrix where every row is a context + num_actions rewards. - algos: List of algorithms to use in the contextual bandit instance. - - Returns: - h_actions: Matrix with actions: size (num_context, num_algorithms). - h_rewards: Matrix with rewards: size (num_context, num_algorithms). - """ - - num_contexts = dataset.shape[0] - - # Create contextual bandit - cmab = ContextualBandit(context_dim, num_actions) - cmab.feed_data(dataset) - - h_actions = np.empty((0, len(algos)), float) - h_rewards = np.empty((0, len(algos)), float) - - # Run the contextual bandit process - for i in range(num_contexts): - context = cmab.context(i) - actions = [a.action(context) for a in algos] - rewards = [cmab.reward(i, action) for action in actions] - - for j, a in enumerate(algos): - a.update(context, actions[j], rewards[j]) - - h_actions = np.vstack((h_actions, np.array(actions))) - h_rewards = np.vstack((h_rewards, np.array(rewards))) - - return h_actions, h_rewards - - -class ContextualBandit(object): - """Implements a Contextual Bandit with d-dimensional contexts and k arms.""" - - def __init__(self, context_dim, num_actions): - """Creates a contextual bandit object. - - Args: - context_dim: Dimension of the contexts. - num_actions: Number of arms for the multi-armed bandit. - """ - - self._context_dim = context_dim - self._num_actions = num_actions - - def feed_data(self, data): - """Feeds the data (contexts + rewards) to the bandit object. - - Args: - data: Numpy array with shape [n, d+k], where n is the number of contexts, - d is the dimension of each context, and k the number of arms (rewards). - - Raises: - ValueError: when data dimensions do not correspond to the object values. - """ - - if data.shape[1] != self.context_dim + self.num_actions: - raise ValueError('Data dimensions do not match.') - - self._number_contexts = data.shape[0] - self.data = data - self.order = range(self.number_contexts) - - def reset(self): - """Randomly shuffle the order of the contexts to deliver.""" - self.order = np.random.permutation(self.number_contexts) - - def context(self, number): - """Returns the number-th context.""" - return self.data[self.order[number]][:self.context_dim] - - def reward(self, number, action): - """Returns the reward for the number-th context and action.""" - return self.data[self.order[number]][self.context_dim + action] - - def optimal(self, number): - """Returns the optimal action (in hindsight) for the number-th context.""" - return np.argmax(self.data[self.order[number]][self.context_dim:]) - - @property - def context_dim(self): - return self._context_dim - - @property - def num_actions(self): - return self._num_actions - - @property - def number_contexts(self): - return self._number_contexts diff --git a/research/deep_contextual_bandits/bandits/core/contextual_dataset.py b/research/deep_contextual_bandits/bandits/core/contextual_dataset.py deleted file mode 100644 index 9fae7629c..000000000 --- a/research/deep_contextual_bandits/bandits/core/contextual_dataset.py +++ /dev/null @@ -1,166 +0,0 @@ -# Copyright 2018 The TensorFlow Authors All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -"""Define a data buffer for contextual bandit algorithms.""" - -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import numpy as np - - -class ContextualDataset(object): - """The buffer is able to append new data, and sample random minibatches.""" - - def __init__(self, context_dim, num_actions, buffer_s=-1, intercept=False): - """Creates a ContextualDataset object. - - The data is stored in attributes: contexts and rewards. - The sequence of taken actions are stored in attribute actions. - - Args: - context_dim: Dimension of the contexts. - num_actions: Number of arms for the multi-armed bandit. - buffer_s: Size of buffer for training. Only last buffer_s will be - returned as minibatch. If buffer_s = -1, all data will be used. - intercept: If True, it adds a constant (1.0) dimension to each context X, - at the end. - """ - - self._context_dim = context_dim - self._num_actions = num_actions - self._contexts = None - self._rewards = None - self.actions = [] - self.buffer_s = buffer_s - self.intercept = intercept - - def add(self, context, action, reward): - """Adds a new triplet (context, action, reward) to the dataset. - - The reward for the actions that weren't played is assumed to be zero. - - Args: - context: A d-dimensional vector with the context. - action: Integer between 0 and k-1 representing the chosen arm. - reward: Real number representing the reward for the (context, action). - """ - - if self.intercept: - c = np.array(context[:]) - c = np.append(c, 1.0).reshape((1, self.context_dim + 1)) - else: - c = np.array(context[:]).reshape((1, self.context_dim)) - - if self.contexts is None: - self.contexts = c - else: - self.contexts = np.vstack((self.contexts, c)) - - r = np.zeros((1, self.num_actions)) - r[0, action] = reward - if self.rewards is None: - self.rewards = r - else: - self.rewards = np.vstack((self.rewards, r)) - - self.actions.append(action) - - def replace_data(self, contexts=None, actions=None, rewards=None): - if contexts is not None: - self.contexts = contexts - if actions is not None: - self.actions = actions - if rewards is not None: - self.rewards = rewards - - def get_batch(self, batch_size): - """Returns a random minibatch of (contexts, rewards) with batch_size.""" - n, _ = self.contexts.shape - if self.buffer_s == -1: - # use all the data - ind = np.random.choice(range(n), batch_size) - else: - # use only buffer (last buffer_s observations) - ind = np.random.choice(range(max(0, n - self.buffer_s), n), batch_size) - return self.contexts[ind, :], self.rewards[ind, :] - - def get_data(self, action): - """Returns all (context, reward) where the action was played.""" - n, _ = self.contexts.shape - ind = np.array([i for i in range(n) if self.actions[i] == action]) - return self.contexts[ind, :], self.rewards[ind, action] - - def get_data_with_weights(self): - """Returns all observations with one-hot weights for actions.""" - weights = np.zeros((self.contexts.shape[0], self.num_actions)) - a_ind = np.array([(i, val) for i, val in enumerate(self.actions)]) - weights[a_ind[:, 0], a_ind[:, 1]] = 1.0 - return self.contexts, self.rewards, weights - - def get_batch_with_weights(self, batch_size): - """Returns a random mini-batch with one-hot weights for actions.""" - n, _ = self.contexts.shape - if self.buffer_s == -1: - # use all the data - ind = np.random.choice(range(n), batch_size) - else: - # use only buffer (last buffer_s obs) - ind = np.random.choice(range(max(0, n - self.buffer_s), n), batch_size) - - weights = np.zeros((batch_size, self.num_actions)) - sampled_actions = np.array(self.actions)[ind] - a_ind = np.array([(i, val) for i, val in enumerate(sampled_actions)]) - weights[a_ind[:, 0], a_ind[:, 1]] = 1.0 - return self.contexts[ind, :], self.rewards[ind, :], weights - - def num_points(self, f=None): - """Returns number of points in the buffer (after applying function f).""" - if f is not None: - return f(self.contexts.shape[0]) - return self.contexts.shape[0] - - @property - def context_dim(self): - return self._context_dim - - @property - def num_actions(self): - return self._num_actions - - @property - def contexts(self): - return self._contexts - - @contexts.setter - def contexts(self, value): - self._contexts = value - - @property - def actions(self): - return self._actions - - @actions.setter - def actions(self, value): - self._actions = value - - @property - def rewards(self): - return self._rewards - - @rewards.setter - def rewards(self, value): - self._rewards = value diff --git a/research/deep_contextual_bandits/bandits/data/data_sampler.py b/research/deep_contextual_bandits/bandits/data/data_sampler.py deleted file mode 100644 index 55d1bae38..000000000 --- a/research/deep_contextual_bandits/bandits/data/data_sampler.py +++ /dev/null @@ -1,374 +0,0 @@ -# Copyright 2018 The TensorFlow Authors All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -"""Functions to create bandit problems from datasets.""" - -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import numpy as np -import pandas as pd -import tensorflow as tf - - -def one_hot(df, cols): - """Returns one-hot encoding of DataFrame df including columns in cols.""" - for col in cols: - dummies = pd.get_dummies(df[col], prefix=col, drop_first=False) - df = pd.concat([df, dummies], axis=1) - df = df.drop(col, axis=1) - return df - - -def sample_mushroom_data(file_name, - num_contexts, - r_noeat=0, - r_eat_safe=5, - r_eat_poison_bad=-35, - r_eat_poison_good=5, - prob_poison_bad=0.5): - """Samples bandit game from Mushroom UCI Dataset. - - Args: - file_name: Route of file containing the original Mushroom UCI dataset. - num_contexts: Number of points to sample, i.e. (context, action rewards). - r_noeat: Reward for not eating a mushroom. - r_eat_safe: Reward for eating a non-poisonous mushroom. - r_eat_poison_bad: Reward for eating a poisonous mushroom if harmed. - r_eat_poison_good: Reward for eating a poisonous mushroom if not harmed. - prob_poison_bad: Probability of being harmed by eating a poisonous mushroom. - - Returns: - dataset: Sampled matrix with n rows: (context, eat_reward, no_eat_reward). - opt_vals: Vector of expected optimal (reward, action) for each context. - - We assume r_eat_safe > r_noeat, and r_eat_poison_good > r_eat_poison_bad. - """ - - # first two cols of df encode whether mushroom is edible or poisonous - df = pd.read_csv(file_name, header=None) - df = one_hot(df, df.columns) - ind = np.random.choice(range(df.shape[0]), num_contexts, replace=True) - - contexts = df.iloc[ind, 2:] - no_eat_reward = r_noeat * np.ones((num_contexts, 1)) - random_poison = np.random.choice( - [r_eat_poison_bad, r_eat_poison_good], - p=[prob_poison_bad, 1 - prob_poison_bad], - size=num_contexts) - eat_reward = r_eat_safe * df.iloc[ind, 0] - eat_reward += np.multiply(random_poison, df.iloc[ind, 1]) - eat_reward = eat_reward.values.reshape((num_contexts, 1)) - - # compute optimal expected reward and optimal actions - exp_eat_poison_reward = r_eat_poison_bad * prob_poison_bad - exp_eat_poison_reward += r_eat_poison_good * (1 - prob_poison_bad) - opt_exp_reward = r_eat_safe * df.iloc[ind, 0] + max( - r_noeat, exp_eat_poison_reward) * df.iloc[ind, 1] - - if r_noeat > exp_eat_poison_reward: - # actions: no eat = 0 ; eat = 1 - opt_actions = df.iloc[ind, 0] # indicator of edible - else: - # should always eat (higher expected reward) - opt_actions = np.ones((num_contexts, 1)) - - opt_vals = (opt_exp_reward.values, opt_actions.values) - - return np.hstack((contexts, no_eat_reward, eat_reward)), opt_vals - - -def sample_stock_data(file_name, context_dim, num_actions, num_contexts, - sigma, shuffle_rows=True): - """Samples linear bandit game from stock prices dataset. - - Args: - file_name: Route of file containing the stock prices dataset. - context_dim: Context dimension (i.e. vector with the price of each stock). - num_actions: Number of actions (different linear portfolio strategies). - num_contexts: Number of contexts to sample. - sigma: Vector with additive noise levels for each action. - shuffle_rows: If True, rows from original dataset are shuffled. - - Returns: - dataset: Sampled matrix with rows: (context, reward_1, ..., reward_k). - opt_vals: Vector of expected optimal (reward, action) for each context. - """ - - with tf.gfile.Open(file_name, 'r') as f: - contexts = np.loadtxt(f, skiprows=1) - - if shuffle_rows: - np.random.shuffle(contexts) - contexts = contexts[:num_contexts, :] - - betas = np.random.uniform(-1, 1, (context_dim, num_actions)) - betas /= np.linalg.norm(betas, axis=0) - - mean_rewards = np.dot(contexts, betas) - noise = np.random.normal(scale=sigma, size=mean_rewards.shape) - rewards = mean_rewards + noise - - opt_actions = np.argmax(mean_rewards, axis=1) - opt_rewards = [mean_rewards[i, a] for i, a in enumerate(opt_actions)] - return np.hstack((contexts, rewards)), (np.array(opt_rewards), opt_actions) - - -def sample_jester_data(file_name, context_dim, num_actions, num_contexts, - shuffle_rows=True, shuffle_cols=False): - """Samples bandit game from (user, joke) dense subset of Jester dataset. - - Args: - file_name: Route of file containing the modified Jester dataset. - context_dim: Context dimension (i.e. vector with some ratings from a user). - num_actions: Number of actions (number of joke ratings to predict). - num_contexts: Number of contexts to sample. - shuffle_rows: If True, rows from original dataset are shuffled. - shuffle_cols: Whether or not context/action jokes are randomly shuffled. - - Returns: - dataset: Sampled matrix with rows: (context, rating_1, ..., rating_k). - opt_vals: Vector of deterministic optimal (reward, action) for each context. - """ - - with tf.gfile.Open(file_name, 'rb') as f: - dataset = np.load(f) - - if shuffle_cols: - dataset = dataset[:, np.random.permutation(dataset.shape[1])] - if shuffle_rows: - np.random.shuffle(dataset) - dataset = dataset[:num_contexts, :] - - assert context_dim + num_actions == dataset.shape[1], 'Wrong data dimensions.' - - opt_actions = np.argmax(dataset[:, context_dim:], axis=1) - opt_rewards = np.array([dataset[i, context_dim + a] - for i, a in enumerate(opt_actions)]) - - return dataset, (opt_rewards, opt_actions) - - -def sample_statlog_data(file_name, num_contexts, shuffle_rows=True, - remove_underrepresented=False): - """Returns bandit problem dataset based on the UCI statlog data. - - Args: - file_name: Route of file containing the Statlog dataset. - num_contexts: Number of contexts to sample. - shuffle_rows: If True, rows from original dataset are shuffled. - remove_underrepresented: If True, removes arms with very few rewards. - - Returns: - dataset: Sampled matrix with rows: (context, action rewards). - opt_vals: Vector of deterministic optimal (reward, action) for each context. - - https://archive.ics.uci.edu/ml/datasets/Statlog+(Shuttle) - """ - - with tf.gfile.Open(file_name, 'r') as f: - data = np.loadtxt(f) - - num_actions = 7 # some of the actions are very rarely optimal. - - # Shuffle data - if shuffle_rows: - np.random.shuffle(data) - data = data[:num_contexts, :] - - # Last column is label, rest are features - contexts = data[:, :-1] - labels = data[:, -1].astype(int) - 1 # convert to 0 based index - - if remove_underrepresented: - contexts, labels = remove_underrepresented_classes(contexts, labels) - - return classification_to_bandit_problem(contexts, labels, num_actions) - - -def sample_adult_data(file_name, num_contexts, shuffle_rows=True, - remove_underrepresented=False): - """Returns bandit problem dataset based on the UCI adult data. - - Args: - file_name: Route of file containing the Adult dataset. - num_contexts: Number of contexts to sample. - shuffle_rows: If True, rows from original dataset are shuffled. - remove_underrepresented: If True, removes arms with very few rewards. - - Returns: - dataset: Sampled matrix with rows: (context, action rewards). - opt_vals: Vector of deterministic optimal (reward, action) for each context. - - Preprocessing: - * drop rows with missing values - * convert categorical variables to 1 hot encoding - - https://archive.ics.uci.edu/ml/datasets/census+income - """ - with tf.gfile.Open(file_name, 'r') as f: - df = pd.read_csv(f, header=None, - na_values=[' ?']).dropna() - - num_actions = 14 - - if shuffle_rows: - df = df.sample(frac=1) - df = df.iloc[:num_contexts, :] - - labels = df[6].astype('category').cat.codes.as_matrix() - df = df.drop([6], axis=1) - - # Convert categorical variables to 1 hot encoding - cols_to_transform = [1, 3, 5, 7, 8, 9, 13, 14] - df = pd.get_dummies(df, columns=cols_to_transform) - - if remove_underrepresented: - df, labels = remove_underrepresented_classes(df, labels) - contexts = df.as_matrix() - - return classification_to_bandit_problem(contexts, labels, num_actions) - - -def sample_census_data(file_name, num_contexts, shuffle_rows=True, - remove_underrepresented=False): - """Returns bandit problem dataset based on the UCI census data. - - Args: - file_name: Route of file containing the Census dataset. - num_contexts: Number of contexts to sample. - shuffle_rows: If True, rows from original dataset are shuffled. - remove_underrepresented: If True, removes arms with very few rewards. - - Returns: - dataset: Sampled matrix with rows: (context, action rewards). - opt_vals: Vector of deterministic optimal (reward, action) for each context. - - Preprocessing: - * drop rows with missing labels - * convert categorical variables to 1 hot encoding - - Note: this is the processed (not the 'raw') dataset. It contains a subset - of the raw features and they've all been discretized. - - https://archive.ics.uci.edu/ml/datasets/US+Census+Data+%281990%29 - """ - # Note: this dataset is quite large. It will be slow to load and preprocess. - with tf.gfile.Open(file_name, 'r') as f: - df = (pd.read_csv(f, header=0, na_values=['?']) - .dropna()) - - num_actions = 9 - - if shuffle_rows: - df = df.sample(frac=1) - df = df.iloc[:num_contexts, :] - - # Assuming what the paper calls response variable is the label? - labels = df['dOccup'].astype('category').cat.codes.as_matrix() - # In addition to label, also drop the (unique?) key. - df = df.drop(['dOccup', 'caseid'], axis=1) - - # All columns are categorical. Convert to 1 hot encoding. - df = pd.get_dummies(df, columns=df.columns) - - if remove_underrepresented: - df, labels = remove_underrepresented_classes(df, labels) - contexts = df.as_matrix() - - return classification_to_bandit_problem(contexts, labels, num_actions) - - -def sample_covertype_data(file_name, num_contexts, shuffle_rows=True, - remove_underrepresented=False): - """Returns bandit problem dataset based on the UCI Cover_Type data. - - Args: - file_name: Route of file containing the Covertype dataset. - num_contexts: Number of contexts to sample. - shuffle_rows: If True, rows from original dataset are shuffled. - remove_underrepresented: If True, removes arms with very few rewards. - - Returns: - dataset: Sampled matrix with rows: (context, action rewards). - opt_vals: Vector of deterministic optimal (reward, action) for each context. - - Preprocessing: - * drop rows with missing labels - * convert categorical variables to 1 hot encoding - - https://archive.ics.uci.edu/ml/datasets/Covertype - """ - with tf.gfile.Open(file_name, 'r') as f: - df = (pd.read_csv(f, header=0, na_values=['?']) - .dropna()) - - num_actions = 7 - - if shuffle_rows: - df = df.sample(frac=1) - df = df.iloc[:num_contexts, :] - - # Assuming what the paper calls response variable is the label? - # Last column is label. - labels = df[df.columns[-1]].astype('category').cat.codes.as_matrix() - df = df.drop([df.columns[-1]], axis=1) - - # All columns are either quantitative or already converted to 1 hot. - if remove_underrepresented: - df, labels = remove_underrepresented_classes(df, labels) - contexts = df.as_matrix() - - return classification_to_bandit_problem(contexts, labels, num_actions) - - -def classification_to_bandit_problem(contexts, labels, num_actions=None): - """Normalize contexts and encode deterministic rewards.""" - - if num_actions is None: - num_actions = np.max(labels) + 1 - num_contexts = contexts.shape[0] - - # Due to random subsampling in small problems, some features may be constant - sstd = safe_std(np.std(contexts, axis=0, keepdims=True)[0, :]) - - # Normalize features - contexts = ((contexts - np.mean(contexts, axis=0, keepdims=True)) / sstd) - - # One hot encode labels as rewards - rewards = np.zeros((num_contexts, num_actions)) - rewards[np.arange(num_contexts), labels] = 1.0 - - return contexts, rewards, (np.ones(num_contexts), labels) - - -def safe_std(values): - """Remove zero std values for ones.""" - return np.array([val if val != 0.0 else 1.0 for val in values]) - - -def remove_underrepresented_classes(features, labels, thresh=0.0005): - """Removes classes when number of datapoints fraction is below a threshold.""" - - # Threshold doesn't seem to agree with https://arxiv.org/pdf/1706.04687.pdf - # Example: for Covertype, they report 4 classes after filtering, we get 7? - total_count = labels.shape[0] - unique, counts = np.unique(labels, return_counts=True) - ratios = counts.astype('float') / total_count - vals_and_ratios = dict(zip(unique, ratios)) - print('Unique classes and their ratio of total: %s' % vals_and_ratios) - keep = [vals_and_ratios[v] >= thresh for v in labels] - return features[keep], labels[np.array(keep)] diff --git a/research/deep_contextual_bandits/bandits/data/synthetic_data_sampler.py b/research/deep_contextual_bandits/bandits/data/synthetic_data_sampler.py deleted file mode 100644 index c7de48aba..000000000 --- a/research/deep_contextual_bandits/bandits/data/synthetic_data_sampler.py +++ /dev/null @@ -1,179 +0,0 @@ -# Copyright 2018 The TensorFlow Authors All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -"""Several functions to sample contextual data.""" - -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import numpy as np - - -def sample_contextual_data(num_contexts, dim_context, num_actions, sigma): - """Samples independent Gaussian data. - - There is nothing to learn here as the rewards do not depend on the context. - - Args: - num_contexts: Number of contexts to sample. - dim_context: Dimension of the contexts. - num_actions: Number of arms for the multi-armed bandit. - sigma: Standard deviation of the independent Gaussian samples. - - Returns: - data: A [num_contexts, dim_context + num_actions] numpy array with the data. - """ - size_data = [num_contexts, dim_context + num_actions] - return np.random.normal(scale=sigma, size=size_data) - - -def sample_linear_data(num_contexts, dim_context, num_actions, sigma=0.0): - """Samples data from linearly parameterized arms. - - The reward for context X and arm j is given by X^T beta_j, for some latent - set of parameters {beta_j : j = 1, ..., k}. The beta's are sampled uniformly - at random, the contexts are Gaussian, and sigma-noise is added to the rewards. - - Args: - num_contexts: Number of contexts to sample. - dim_context: Dimension of the contexts. - num_actions: Number of arms for the multi-armed bandit. - sigma: Standard deviation of the additive noise. Set to zero for no noise. - - Returns: - data: A [n, d+k] numpy array with the data. - betas: Latent parameters that determine expected reward for each arm. - opt: (optimal_rewards, optimal_actions) for all contexts. - """ - - betas = np.random.uniform(-1, 1, (dim_context, num_actions)) - betas /= np.linalg.norm(betas, axis=0) - contexts = np.random.normal(size=[num_contexts, dim_context]) - rewards = np.dot(contexts, betas) - opt_actions = np.argmax(rewards, axis=1) - rewards += np.random.normal(scale=sigma, size=rewards.shape) - opt_rewards = np.array([rewards[i, act] for i, act in enumerate(opt_actions)]) - return np.hstack((contexts, rewards)), betas, (opt_rewards, opt_actions) - - -def sample_sparse_linear_data(num_contexts, dim_context, num_actions, - sparse_dim, sigma=0.0): - """Samples data from sparse linearly parameterized arms. - - The reward for context X and arm j is given by X^T beta_j, for some latent - set of parameters {beta_j : j = 1, ..., k}. The beta's are sampled uniformly - at random, the contexts are Gaussian, and sigma-noise is added to the rewards. - Only s components out of d are non-zero for each arm's beta. - - Args: - num_contexts: Number of contexts to sample. - dim_context: Dimension of the contexts. - num_actions: Number of arms for the multi-armed bandit. - sparse_dim: Dimension of the latent subspace (sparsity pattern dimension). - sigma: Standard deviation of the additive noise. Set to zero for no noise. - - Returns: - data: A [num_contexts, dim_context+num_actions] numpy array with the data. - betas: Latent parameters that determine expected reward for each arm. - opt: (optimal_rewards, optimal_actions) for all contexts. - """ - - flatten = lambda l: [item for sublist in l for item in sublist] - sparse_pattern = flatten( - [[(j, i) for j in np.random.choice(range(dim_context), - sparse_dim, - replace=False)] - for i in range(num_actions)]) - betas = np.random.uniform(-1, 1, (dim_context, num_actions)) - mask = np.zeros((dim_context, num_actions)) - for elt in sparse_pattern: - mask[elt] = 1 - betas = np.multiply(betas, mask) - betas /= np.linalg.norm(betas, axis=0) - contexts = np.random.normal(size=[num_contexts, dim_context]) - rewards = np.dot(contexts, betas) - opt_actions = np.argmax(rewards, axis=1) - rewards += np.random.normal(scale=sigma, size=rewards.shape) - opt_rewards = np.array([rewards[i, act] for i, act in enumerate(opt_actions)]) - return np.hstack((contexts, rewards)), betas, (opt_rewards, opt_actions) - - -def sample_wheel_bandit_data(num_contexts, delta, mean_v, std_v, - mu_large, std_large): - """Samples from Wheel bandit game (see https://arxiv.org/abs/1802.09127). - - Args: - num_contexts: Number of points to sample, i.e. (context, action rewards). - delta: Exploration parameter: high reward in one region if norm above delta. - mean_v: Mean reward for each action if context norm is below delta. - std_v: Gaussian reward std for each action if context norm is below delta. - mu_large: Mean reward for optimal action if context norm is above delta. - std_large: Reward std for optimal action if context norm is above delta. - - Returns: - dataset: Sampled matrix with n rows: (context, action rewards). - opt_vals: Vector of expected optimal (reward, action) for each context. - """ - - context_dim = 2 - num_actions = 5 - - data = [] - rewards = [] - opt_actions = [] - opt_rewards = [] - - # sample uniform contexts in unit ball - while len(data) < num_contexts: - raw_data = np.random.uniform(-1, 1, (int(num_contexts / 3), context_dim)) - - for i in range(raw_data.shape[0]): - if np.linalg.norm(raw_data[i, :]) <= 1: - data.append(raw_data[i, :]) - - contexts = np.stack(data)[:num_contexts, :] - - # sample rewards - for i in range(num_contexts): - r = [np.random.normal(mean_v[j], std_v[j]) for j in range(num_actions)] - if np.linalg.norm(contexts[i, :]) >= delta: - # large reward in the right region for the context - r_big = np.random.normal(mu_large, std_large) - if contexts[i, 0] > 0: - if contexts[i, 1] > 0: - r[0] = r_big - opt_actions.append(0) - else: - r[1] = r_big - opt_actions.append(1) - else: - if contexts[i, 1] > 0: - r[2] = r_big - opt_actions.append(2) - else: - r[3] = r_big - opt_actions.append(3) - else: - opt_actions.append(np.argmax(mean_v)) - - opt_rewards.append(r[opt_actions[-1]]) - rewards.append(r) - - rewards = np.stack(rewards) - opt_rewards = np.array(opt_rewards) - opt_actions = np.array(opt_actions) - - return np.hstack((contexts, rewards)), (opt_rewards, opt_actions) diff --git a/research/deep_contextual_bandits/example_main.py b/research/deep_contextual_bandits/example_main.py deleted file mode 100644 index c71a5aa26..000000000 --- a/research/deep_contextual_bandits/example_main.py +++ /dev/null @@ -1,454 +0,0 @@ -# Copyright 2018 The TensorFlow Authors All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -"""Simple example of contextual bandits simulation. - -Code corresponding to: -Deep Bayesian Bandits Showdown: An Empirical Comparison of Bayesian Deep Networks -for Thompson Sampling, by Carlos Riquelme, George Tucker, and Jasper Snoek. -https://arxiv.org/abs/1802.09127 -""" - -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import time -from absl import app -from absl import flags -import numpy as np -import os -import tensorflow as tf - -from bandits.algorithms.bootstrapped_bnn_sampling import BootstrappedBNNSampling -from bandits.core.contextual_bandit import run_contextual_bandit -from bandits.data.data_sampler import sample_adult_data -from bandits.data.data_sampler import sample_census_data -from bandits.data.data_sampler import sample_covertype_data -from bandits.data.data_sampler import sample_jester_data -from bandits.data.data_sampler import sample_mushroom_data -from bandits.data.data_sampler import sample_statlog_data -from bandits.data.data_sampler import sample_stock_data -from bandits.algorithms.fixed_policy_sampling import FixedPolicySampling -from bandits.algorithms.linear_full_posterior_sampling import LinearFullPosteriorSampling -from bandits.algorithms.neural_linear_sampling import NeuralLinearPosteriorSampling -from bandits.algorithms.parameter_noise_sampling import ParameterNoiseSampling -from bandits.algorithms.posterior_bnn_sampling import PosteriorBNNSampling -from bandits.data.synthetic_data_sampler import sample_linear_data -from bandits.data.synthetic_data_sampler import sample_sparse_linear_data -from bandits.data.synthetic_data_sampler import sample_wheel_bandit_data -from bandits.algorithms.uniform_sampling import UniformSampling - -# Set up your file routes to the data files. -base_route = os.getcwd() -data_route = 'contextual_bandits/datasets' - -FLAGS = flags.FLAGS -FLAGS.set_default('alsologtostderr', True) -flags.DEFINE_string('logdir', '/tmp/bandits/', 'Base directory to save output') -flags.DEFINE_string( - 'mushroom_data', - os.path.join(base_route, data_route, 'mushroom.data'), - 'Directory where Mushroom data is stored.') -flags.DEFINE_string( - 'financial_data', - os.path.join(base_route, data_route, 'raw_stock_contexts'), - 'Directory where Financial data is stored.') -flags.DEFINE_string( - 'jester_data', - os.path.join(base_route, data_route, 'jester_data_40jokes_19181users.npy'), - 'Directory where Jester data is stored.') -flags.DEFINE_string( - 'statlog_data', - os.path.join(base_route, data_route, 'shuttle.trn'), - 'Directory where Statlog data is stored.') -flags.DEFINE_string( - 'adult_data', - os.path.join(base_route, data_route, 'adult.full'), - 'Directory where Adult data is stored.') -flags.DEFINE_string( - 'covertype_data', - os.path.join(base_route, data_route, 'covtype.data'), - 'Directory where Covertype data is stored.') -flags.DEFINE_string( - 'census_data', - os.path.join(base_route, data_route, 'USCensus1990.data.txt'), - 'Directory where Census data is stored.') - - -def sample_data(data_type, num_contexts=None): - """Sample data from given 'data_type'. - - Args: - data_type: Dataset from which to sample. - num_contexts: Number of contexts to sample. - - Returns: - dataset: Sampled matrix with rows: (context, reward_1, ..., reward_num_act). - opt_rewards: Vector of expected optimal reward for each context. - opt_actions: Vector of optimal action for each context. - num_actions: Number of available actions. - context_dim: Dimension of each context. - """ - - if data_type == 'linear': - # Create linear dataset - num_actions = 8 - context_dim = 10 - noise_stds = [0.01 * (i + 1) for i in range(num_actions)] - dataset, _, opt_linear = sample_linear_data(num_contexts, context_dim, - num_actions, sigma=noise_stds) - opt_rewards, opt_actions = opt_linear - elif data_type == 'sparse_linear': - # Create sparse linear dataset - num_actions = 7 - context_dim = 10 - noise_stds = [0.01 * (i + 1) for i in range(num_actions)] - num_nnz_dims = int(context_dim / 3.0) - dataset, _, opt_sparse_linear = sample_sparse_linear_data( - num_contexts, context_dim, num_actions, num_nnz_dims, sigma=noise_stds) - opt_rewards, opt_actions = opt_sparse_linear - elif data_type == 'mushroom': - # Create mushroom dataset - num_actions = 2 - context_dim = 117 - file_name = FLAGS.mushroom_data - dataset, opt_mushroom = sample_mushroom_data(file_name, num_contexts) - opt_rewards, opt_actions = opt_mushroom - elif data_type == 'financial': - num_actions = 8 - context_dim = 21 - num_contexts = min(3713, num_contexts) - noise_stds = [0.01 * (i + 1) for i in range(num_actions)] - file_name = FLAGS.financial_data - dataset, opt_financial = sample_stock_data(file_name, context_dim, - num_actions, num_contexts, - noise_stds, shuffle_rows=True) - opt_rewards, opt_actions = opt_financial - elif data_type == 'jester': - num_actions = 8 - context_dim = 32 - num_contexts = min(19181, num_contexts) - file_name = FLAGS.jester_data - dataset, opt_jester = sample_jester_data(file_name, context_dim, - num_actions, num_contexts, - shuffle_rows=True, - shuffle_cols=True) - opt_rewards, opt_actions = opt_jester - elif data_type == 'statlog': - file_name = FLAGS.statlog_data - num_actions = 7 - num_contexts = min(43500, num_contexts) - sampled_vals = sample_statlog_data(file_name, num_contexts, - shuffle_rows=True) - contexts, rewards, (opt_rewards, opt_actions) = sampled_vals - dataset = np.hstack((contexts, rewards)) - context_dim = contexts.shape[1] - elif data_type == 'adult': - file_name = FLAGS.adult_data - num_actions = 14 - num_contexts = min(45222, num_contexts) - sampled_vals = sample_adult_data(file_name, num_contexts, - shuffle_rows=True) - contexts, rewards, (opt_rewards, opt_actions) = sampled_vals - dataset = np.hstack((contexts, rewards)) - context_dim = contexts.shape[1] - elif data_type == 'covertype': - file_name = FLAGS.covertype_data - num_actions = 7 - num_contexts = min(150000, num_contexts) - sampled_vals = sample_covertype_data(file_name, num_contexts, - shuffle_rows=True) - contexts, rewards, (opt_rewards, opt_actions) = sampled_vals - dataset = np.hstack((contexts, rewards)) - context_dim = contexts.shape[1] - elif data_type == 'census': - file_name = FLAGS.census_data - num_actions = 9 - num_contexts = min(150000, num_contexts) - sampled_vals = sample_census_data(file_name, num_contexts, - shuffle_rows=True) - contexts, rewards, (opt_rewards, opt_actions) = sampled_vals - dataset = np.hstack((contexts, rewards)) - context_dim = contexts.shape[1] - elif data_type == 'wheel': - delta = 0.95 - num_actions = 5 - context_dim = 2 - mean_v = [1.0, 1.0, 1.0, 1.0, 1.2] - std_v = [0.05, 0.05, 0.05, 0.05, 0.05] - mu_large = 50 - std_large = 0.01 - dataset, opt_wheel = sample_wheel_bandit_data(num_contexts, delta, - mean_v, std_v, - mu_large, std_large) - opt_rewards, opt_actions = opt_wheel - - return dataset, opt_rewards, opt_actions, num_actions, context_dim - - -def display_results(algos, opt_rewards, opt_actions, h_rewards, t_init, name): - """Displays summary statistics of the performance of each algorithm.""" - - print('---------------------------------------------------') - print('---------------------------------------------------') - print('{} bandit completed after {} seconds.'.format( - name, time.time() - t_init)) - print('---------------------------------------------------') - - performance_pairs = [] - for j, a in enumerate(algos): - performance_pairs.append((a.name, np.sum(h_rewards[:, j]))) - performance_pairs = sorted(performance_pairs, - key=lambda elt: elt[1], - reverse=True) - for i, (name, reward) in enumerate(performance_pairs): - print('{:3}) {:20}| \t \t total reward = {:10}.'.format(i, name, reward)) - - print('---------------------------------------------------') - print('Optimal total reward = {}.'.format(np.sum(opt_rewards))) - print('Frequency of optimal actions (action, frequency):') - print([[elt, list(opt_actions).count(elt)] for elt in set(opt_actions)]) - print('---------------------------------------------------') - print('---------------------------------------------------') - - -def main(_): - - # Problem parameters - num_contexts = 2000 - - # Data type in {linear, sparse_linear, mushroom, financial, jester, - # statlog, adult, covertype, census, wheel} - data_type = 'mushroom' - - # Create dataset - sampled_vals = sample_data(data_type, num_contexts) - dataset, opt_rewards, opt_actions, num_actions, context_dim = sampled_vals - - # Define hyperparameters and algorithms - hparams = tf.contrib.training.HParams(num_actions=num_actions) - - hparams_linear = tf.contrib.training.HParams(num_actions=num_actions, - context_dim=context_dim, - a0=6, - b0=6, - lambda_prior=0.25, - initial_pulls=2) - - hparams_rms = tf.contrib.training.HParams(num_actions=num_actions, - context_dim=context_dim, - init_scale=0.3, - activation=tf.nn.relu, - layer_sizes=[50], - batch_size=512, - activate_decay=True, - initial_lr=0.1, - max_grad_norm=5.0, - show_training=False, - freq_summary=1000, - buffer_s=-1, - initial_pulls=2, - optimizer='RMS', - reset_lr=True, - lr_decay_rate=0.5, - training_freq=50, - training_epochs=100, - p=0.95, - q=3) - - hparams_dropout = tf.contrib.training.HParams(num_actions=num_actions, - context_dim=context_dim, - init_scale=0.3, - activation=tf.nn.relu, - layer_sizes=[50], - batch_size=512, - activate_decay=True, - initial_lr=0.1, - max_grad_norm=5.0, - show_training=False, - freq_summary=1000, - buffer_s=-1, - initial_pulls=2, - optimizer='RMS', - reset_lr=True, - lr_decay_rate=0.5, - training_freq=50, - training_epochs=100, - use_dropout=True, - keep_prob=0.80) - - hparams_bbb = tf.contrib.training.HParams(num_actions=num_actions, - context_dim=context_dim, - init_scale=0.3, - activation=tf.nn.relu, - layer_sizes=[50], - batch_size=512, - activate_decay=True, - initial_lr=0.1, - max_grad_norm=5.0, - show_training=False, - freq_summary=1000, - buffer_s=-1, - initial_pulls=2, - optimizer='RMS', - use_sigma_exp_transform=True, - cleared_times_trained=10, - initial_training_steps=100, - noise_sigma=0.1, - reset_lr=False, - training_freq=50, - training_epochs=100) - - hparams_nlinear = tf.contrib.training.HParams(num_actions=num_actions, - context_dim=context_dim, - init_scale=0.3, - activation=tf.nn.relu, - layer_sizes=[50], - batch_size=512, - activate_decay=True, - initial_lr=0.1, - max_grad_norm=5.0, - show_training=False, - freq_summary=1000, - buffer_s=-1, - initial_pulls=2, - reset_lr=True, - lr_decay_rate=0.5, - training_freq=1, - training_freq_network=50, - training_epochs=100, - a0=6, - b0=6, - lambda_prior=0.25) - - hparams_nlinear2 = tf.contrib.training.HParams(num_actions=num_actions, - context_dim=context_dim, - init_scale=0.3, - activation=tf.nn.relu, - layer_sizes=[50], - batch_size=512, - activate_decay=True, - initial_lr=0.1, - max_grad_norm=5.0, - show_training=False, - freq_summary=1000, - buffer_s=-1, - initial_pulls=2, - reset_lr=True, - lr_decay_rate=0.5, - training_freq=10, - training_freq_network=50, - training_epochs=100, - a0=6, - b0=6, - lambda_prior=0.25) - - hparams_pnoise = tf.contrib.training.HParams(num_actions=num_actions, - context_dim=context_dim, - init_scale=0.3, - activation=tf.nn.relu, - layer_sizes=[50], - batch_size=512, - activate_decay=True, - initial_lr=0.1, - max_grad_norm=5.0, - show_training=False, - freq_summary=1000, - buffer_s=-1, - initial_pulls=2, - optimizer='RMS', - reset_lr=True, - lr_decay_rate=0.5, - training_freq=50, - training_epochs=100, - noise_std=0.05, - eps=0.1, - d_samples=300, - ) - - hparams_alpha_div = tf.contrib.training.HParams(num_actions=num_actions, - context_dim=context_dim, - init_scale=0.3, - activation=tf.nn.relu, - layer_sizes=[50], - batch_size=512, - activate_decay=True, - initial_lr=0.1, - max_grad_norm=5.0, - show_training=False, - freq_summary=1000, - buffer_s=-1, - initial_pulls=2, - optimizer='RMS', - use_sigma_exp_transform=True, - cleared_times_trained=10, - initial_training_steps=100, - noise_sigma=0.1, - reset_lr=False, - training_freq=50, - training_epochs=100, - alpha=1.0, - k=20, - prior_variance=0.1) - - hparams_gp = tf.contrib.training.HParams(num_actions=num_actions, - num_outputs=num_actions, - context_dim=context_dim, - reset_lr=False, - learn_embeddings=True, - max_num_points=1000, - show_training=False, - freq_summary=1000, - batch_size=512, - keep_fixed_after_max_obs=True, - training_freq=50, - initial_pulls=2, - training_epochs=100, - lr=0.01, - buffer_s=-1, - initial_lr=0.001, - lr_decay_rate=0.0, - optimizer='RMS', - task_latent_dim=5, - activate_decay=False) - - algos = [ - UniformSampling('Uniform Sampling', hparams), - UniformSampling('Uniform Sampling 2', hparams), - FixedPolicySampling('fixed1', [0.75, 0.25], hparams), - FixedPolicySampling('fixed2', [0.25, 0.75], hparams), - PosteriorBNNSampling('RMS', hparams_rms, 'RMSProp'), - PosteriorBNNSampling('Dropout', hparams_dropout, 'RMSProp'), - PosteriorBNNSampling('BBB', hparams_bbb, 'Variational'), - NeuralLinearPosteriorSampling('NeuralLinear', hparams_nlinear), - NeuralLinearPosteriorSampling('NeuralLinear2', hparams_nlinear2), - LinearFullPosteriorSampling('LinFullPost', hparams_linear), - BootstrappedBNNSampling('BootRMS', hparams_rms), - ParameterNoiseSampling('ParamNoise', hparams_pnoise), - PosteriorBNNSampling('BBAlphaDiv', hparams_alpha_div, 'AlphaDiv'), - PosteriorBNNSampling('MultitaskGP', hparams_gp, 'GP'), - ] - - # Run contextual bandit problem - t_init = time.time() - results = run_contextual_bandit(context_dim, num_actions, dataset, algos) - _, h_rewards = results - - # Display results - display_results(algos, opt_rewards, opt_actions, h_rewards, t_init, data_type) - -if __name__ == '__main__': - app.run(main) diff --git a/research/domain_adaptation/README.md b/research/domain_adaptation/README.md deleted file mode 100644 index e8a2b8379..000000000 --- a/research/domain_adaptation/README.md +++ /dev/null @@ -1,124 +0,0 @@ -![No Maintenance Intended](https://img.shields.io/badge/No%20Maintenance%20Intended-%E2%9C%95-red.svg) -![TensorFlow Requirement: 1.x](https://img.shields.io/badge/TensorFlow%20Requirement-1.x-brightgreen) -![TensorFlow 2 Not Supported](https://img.shields.io/badge/TensorFlow%202%20Not%20Supported-%E2%9C%95-red.svg) - -## Introduction -This is the code used for two domain adaptation papers. - -The `domain_separation` directory contains code for the "Domain Separation -Networks" paper by Bousmalis K., Trigeorgis G., et al. which was presented at -NIPS 2016. The paper can be found here: https://arxiv.org/abs/1608.06019. - -The `pixel_domain_adaptation` directory contains the code used for the -"Unsupervised Pixel-Level Domain Adaptation with Generative Adversarial -Networks" paper by Bousmalis K., et al. (presented at CVPR 2017). The paper can -be found here: https://arxiv.org/abs/1612.05424. PixelDA aims to perform domain -adaptation by transfering the visual style of the target domain (which has few -or no labels) to a source domain (which has many labels). This is accomplished -using a Generative Adversarial Network (GAN). - -### Other implementations -* [Simplified-DSN](https://github.com/AmirHussein96/Simplified-DSN): - An unofficial implementation of the [Domain Separation Networks paper](https://arxiv.org/abs/1608.06019). - -## Contact -The domain separation code was open-sourced -by [Konstantinos Bousmalis](https://github.com/bousmalis) -(konstantinos@google.com), while the pixel level domain adaptation code was -open-sourced by [David Dohan](https://github.com/dmrd) (ddohan@google.com). - -## Installation -You will need to have the following installed on your machine before trying out the DSN code. - -* TensorFlow 1.x: https://www.tensorflow.org/install/ -* Bazel: https://bazel.build/ - -## Initial setup -In order to run the MNIST to MNIST-M experiments, you will need to set the -data directory: - -``` -$ export DSN_DATA_DIR=/your/dir -``` - -Add models and models/slim to your `$PYTHONPATH` (assumes $PWD is /models): - -``` -$ export PYTHONPATH=$PYTHONPATH:$PWD:$PWD/slim -``` - -## Getting the datasets - -You can fetch the MNIST data by running - -``` - $ bazel run slim:download_and_convert_data -- --dataset_dir $DSN_DATA_DIR --dataset_name=mnist -``` - -The MNIST-M dataset is available online [here](http://bit.ly/2nrlUAJ). Once it is downloaded and extracted into your data directory, create TFRecord files by running: -``` -$ bazel run domain_adaptation/datasets:download_and_convert_mnist_m -- --dataset_dir $DSN_DATA_DIR -``` - -# Running PixelDA from MNIST to MNIST-M -You can run PixelDA as follows (using Tensorboard to examine the results): - -``` -$ bazel run domain_adaptation/pixel_domain_adaptation:pixelda_train -- --dataset_dir $DSN_DATA_DIR --source_dataset mnist --target_dataset mnist_m -``` - -And evaluation as: -``` -$ bazel run domain_adaptation/pixel_domain_adaptation:pixelda_eval -- --dataset_dir $DSN_DATA_DIR --source_dataset mnist --target_dataset mnist_m --target_split_name test -``` - -The MNIST-M results in the paper were run with the following hparams flag: -``` ---hparams arch=resnet,domain_loss_weight=0.135603587834,num_training_examples=16000000,style_transfer_loss_weight=0.0113173311334,task_loss_in_g_weight=0.0100959947002,task_tower=mnist,task_tower_in_g_step=true -``` - -### A note on terminology/language of the code: - -The components of the network can be grouped into two parts -which correspond to elements which are jointly optimized: The generator -component and the discriminator component. - -The generator component takes either an image or noise vector and produces an -output image. - -The discriminator component takes the generated images and the target images -and attempts to discriminate between them. - -## Running DSN code for adapting MNIST to MNIST-M - -Then you need to build the binaries with Bazel: - -``` -$ bazel build -c opt domain_adaptation/domain_separation/... -``` - -You can then train with the following command: - -``` -$ ./bazel-bin/domain_adaptation/domain_separation/dsn_train \ - --similarity_loss=dann_loss \ - --basic_tower=dann_mnist \ - --source_dataset=mnist \ - --target_dataset=mnist_m \ - --learning_rate=0.0117249 \ - --gamma_weight=0.251175 \ - --weight_decay=1e-6 \ - --layers_to_regularize=fc3 \ - --nouse_separation \ - --master="" \ - --dataset_dir=${DSN_DATA_DIR} \ - -v --use_logging -``` - -Evaluation can be invoked with the following command: - -``` -$ ./bazel-bin/domain_adaptation/domain_separation/dsn_eval \ - -v --dataset mnist_m --split test --num_examples=9001 \ - --dataset_dir=${DSN_DATA_DIR} -``` diff --git a/research/domain_adaptation/WORKSPACE b/research/domain_adaptation/WORKSPACE deleted file mode 100644 index e69de29bb..000000000 diff --git a/research/domain_adaptation/__init__.py b/research/domain_adaptation/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/research/domain_adaptation/datasets/BUILD b/research/domain_adaptation/datasets/BUILD deleted file mode 100644 index 067a79374..000000000 --- a/research/domain_adaptation/datasets/BUILD +++ /dev/null @@ -1,45 +0,0 @@ -# Domain Adaptation Scenarios Datasets - -package( - default_visibility = [ - ":internal", - ], -) - -licenses(["notice"]) # Apache 2.0 - -exports_files(["LICENSE"]) - -package_group( - name = "internal", - packages = [ - "//domain_adaptation/...", - ], -) - -py_library( - name = "dataset_factory", - srcs = ["dataset_factory.py"], - deps = [ - ":mnist_m", - "//slim:mnist", - ], -) - -py_binary( - name = "download_and_convert_mnist_m", - srcs = ["download_and_convert_mnist_m.py"], - deps = [ - - "//slim:dataset_utils", - ], -) - -py_binary( - name = "mnist_m", - srcs = ["mnist_m.py"], - deps = [ - - "//slim:dataset_utils", - ], -) diff --git a/research/domain_adaptation/datasets/__init__.py b/research/domain_adaptation/datasets/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/research/domain_adaptation/datasets/dataset_factory.py b/research/domain_adaptation/datasets/dataset_factory.py deleted file mode 100644 index 4ca1b41c4..000000000 --- a/research/domain_adaptation/datasets/dataset_factory.py +++ /dev/null @@ -1,107 +0,0 @@ -# Copyright 2017 Google Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""A factory-pattern class which returns image/label pairs.""" - -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -# Dependency imports -import tensorflow as tf - -from slim.datasets import mnist -from domain_adaptation.datasets import mnist_m - -slim = tf.contrib.slim - - -def get_dataset(dataset_name, - split_name, - dataset_dir, - file_pattern=None, - reader=None): - """Given a dataset name and a split_name returns a Dataset. - - Args: - dataset_name: String, the name of the dataset. - split_name: A train/test split name. - dataset_dir: The directory where the dataset files are stored. - file_pattern: The file pattern to use for matching the dataset source files. - reader: The subclass of tf.ReaderBase. If left as `None`, then the default - reader defined by each dataset is used. - - Returns: - A tf-slim `Dataset` class. - - Raises: - ValueError: if `dataset_name` isn't recognized. - """ - dataset_name_to_module = {'mnist': mnist, 'mnist_m': mnist_m} - if dataset_name not in dataset_name_to_module: - raise ValueError('Name of dataset unknown %s.' % dataset_name) - - return dataset_name_to_module[dataset_name].get_split(split_name, dataset_dir, - file_pattern, reader) - - -def provide_batch(dataset_name, split_name, dataset_dir, num_readers, - batch_size, num_preprocessing_threads): - """Provides a batch of images and corresponding labels. - - Args: - dataset_name: String, the name of the dataset. - split_name: A train/test split name. - dataset_dir: The directory where the dataset files are stored. - num_readers: The number of readers used by DatasetDataProvider. - batch_size: The size of the batch requested. - num_preprocessing_threads: The number of preprocessing threads for - tf.train.batch. - file_pattern: The file pattern to use for matching the dataset source files. - reader: The subclass of tf.ReaderBase. If left as `None`, then the default - reader defined by each dataset is used. - - Returns: - A batch of - images: tensor of [batch_size, height, width, channels]. - labels: dictionary of labels. - """ - dataset = get_dataset(dataset_name, split_name, dataset_dir) - provider = slim.dataset_data_provider.DatasetDataProvider( - dataset, - num_readers=num_readers, - common_queue_capacity=20 * batch_size, - common_queue_min=10 * batch_size) - [image, label] = provider.get(['image', 'label']) - - # Convert images to float32 - image = tf.image.convert_image_dtype(image, tf.float32) - image -= 0.5 - image *= 2 - - # Load the data. - labels = {} - images, labels['classes'] = tf.train.batch( - [image, label], - batch_size=batch_size, - num_threads=num_preprocessing_threads, - capacity=5 * batch_size) - labels['classes'] = slim.one_hot_encoding(labels['classes'], - dataset.num_classes) - - # Convert mnist to RGB and 32x32 so that it can match mnist_m. - if dataset_name == 'mnist': - images = tf.image.grayscale_to_rgb(images) - images = tf.image.resize_images(images, [32, 32]) - return images, labels diff --git a/research/domain_adaptation/datasets/download_and_convert_mnist_m.py b/research/domain_adaptation/datasets/download_and_convert_mnist_m.py deleted file mode 100644 index 3b5004d3d..000000000 --- a/research/domain_adaptation/datasets/download_and_convert_mnist_m.py +++ /dev/null @@ -1,237 +0,0 @@ -# Copyright 2017 Google Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -r"""Downloads and converts MNIST-M data to TFRecords of TF-Example protos. - -This module downloads the MNIST-M data, uncompresses it, reads the files -that make up the MNIST-M data and creates two TFRecord datasets: one for train -and one for test. Each TFRecord dataset is comprised of a set of TF-Example -protocol buffers, each of which contain a single image and label. - -The script should take about a minute to run. - -""" -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import os -import random -import sys - -# Dependency imports -import numpy as np -from six.moves import urllib -import tensorflow as tf - -from slim.datasets import dataset_utils - -tf.app.flags.DEFINE_string( - 'dataset_dir', None, - 'The directory where the output TFRecords and temporary files are saved.') - -FLAGS = tf.app.flags.FLAGS - -_IMAGE_SIZE = 32 -_NUM_CHANNELS = 3 - -# The number of images in the training set. -_NUM_TRAIN_SAMPLES = 59001 - -# The number of images to be kept from the training set for the validation set. -_NUM_VALIDATION = 1000 - -# The number of images in the test set. -_NUM_TEST_SAMPLES = 9001 - -# Seed for repeatability. -_RANDOM_SEED = 0 - -# The names of the classes. -_CLASS_NAMES = [ - 'zero', - 'one', - 'two', - 'three', - 'four', - 'five', - 'size', - 'seven', - 'eight', - 'nine', -] - - -class ImageReader(object): - """Helper class that provides TensorFlow image coding utilities.""" - - def __init__(self): - # Initializes function that decodes RGB PNG data. - self._decode_png_data = tf.placeholder(dtype=tf.string) - self._decode_png = tf.image.decode_png(self._decode_png_data, channels=3) - - def read_image_dims(self, sess, image_data): - image = self.decode_png(sess, image_data) - return image.shape[0], image.shape[1] - - def decode_png(self, sess, image_data): - image = sess.run( - self._decode_png, feed_dict={self._decode_png_data: image_data}) - assert len(image.shape) == 3 - assert image.shape[2] == 3 - return image - - -def _convert_dataset(split_name, filenames, filename_to_class_id, dataset_dir): - """Converts the given filenames to a TFRecord dataset. - - Args: - split_name: The name of the dataset, either 'train' or 'valid'. - filenames: A list of absolute paths to png images. - filename_to_class_id: A dictionary from filenames (strings) to class ids - (integers). - dataset_dir: The directory where the converted datasets are stored. - """ - print('Converting the {} split.'.format(split_name)) - # Train and validation splits are both in the train directory. - if split_name in ['train', 'valid']: - png_directory = os.path.join(dataset_dir, 'mnist_m', 'mnist_m_train') - elif split_name == 'test': - png_directory = os.path.join(dataset_dir, 'mnist_m', 'mnist_m_test') - - with tf.Graph().as_default(): - image_reader = ImageReader() - - with tf.Session('') as sess: - output_filename = _get_output_filename(dataset_dir, split_name) - - with tf.python_io.TFRecordWriter(output_filename) as tfrecord_writer: - for filename in filenames: - # Read the filename: - image_data = tf.gfile.FastGFile( - os.path.join(png_directory, filename), 'r').read() - height, width = image_reader.read_image_dims(sess, image_data) - - class_id = filename_to_class_id[filename] - example = dataset_utils.image_to_tfexample(image_data, 'png', height, - width, class_id) - tfrecord_writer.write(example.SerializeToString()) - - sys.stdout.write('\n') - sys.stdout.flush() - - -def _extract_labels(label_filename): - """Extract the labels into a dict of filenames to int labels. - - Args: - labels_filename: The filename of the MNIST-M labels. - - Returns: - A dictionary of filenames to int labels. - """ - print('Extracting labels from: ', label_filename) - label_file = tf.gfile.FastGFile(label_filename, 'r').readlines() - label_lines = [line.rstrip('\n').split() for line in label_file] - labels = {} - for line in label_lines: - assert len(line) == 2 - labels[line[0]] = int(line[1]) - return labels - - -def _get_output_filename(dataset_dir, split_name): - """Creates the output filename. - - Args: - dataset_dir: The directory where the temporary files are stored. - split_name: The name of the train/test split. - - Returns: - An absolute file path. - """ - return '%s/mnist_m_%s.tfrecord' % (dataset_dir, split_name) - - -def _get_filenames(dataset_dir): - """Returns a list of filenames and inferred class names. - - Args: - dataset_dir: A directory containing a set PNG encoded MNIST-M images. - - Returns: - A list of image file paths, relative to `dataset_dir`. - """ - photo_filenames = [] - for filename in os.listdir(dataset_dir): - photo_filenames.append(filename) - return photo_filenames - - -def run(dataset_dir): - """Runs the download and conversion operation. - - Args: - dataset_dir: The dataset directory where the dataset is stored. - """ - if not tf.gfile.Exists(dataset_dir): - tf.gfile.MakeDirs(dataset_dir) - - train_filename = _get_output_filename(dataset_dir, 'train') - testing_filename = _get_output_filename(dataset_dir, 'test') - - if tf.gfile.Exists(train_filename) and tf.gfile.Exists(testing_filename): - print('Dataset files already exist. Exiting without re-creating them.') - return - - # TODO(konstantinos): Add download and cleanup functionality - - train_validation_filenames = _get_filenames( - os.path.join(dataset_dir, 'mnist_m', 'mnist_m_train')) - test_filenames = _get_filenames( - os.path.join(dataset_dir, 'mnist_m', 'mnist_m_test')) - - # Divide into train and validation: - random.seed(_RANDOM_SEED) - random.shuffle(train_validation_filenames) - train_filenames = train_validation_filenames[_NUM_VALIDATION:] - validation_filenames = train_validation_filenames[:_NUM_VALIDATION] - - train_validation_filenames_to_class_ids = _extract_labels( - os.path.join(dataset_dir, 'mnist_m', 'mnist_m_train_labels.txt')) - test_filenames_to_class_ids = _extract_labels( - os.path.join(dataset_dir, 'mnist_m', 'mnist_m_test_labels.txt')) - - # Convert the train, validation, and test sets. - _convert_dataset('train', train_filenames, - train_validation_filenames_to_class_ids, dataset_dir) - _convert_dataset('valid', validation_filenames, - train_validation_filenames_to_class_ids, dataset_dir) - _convert_dataset('test', test_filenames, test_filenames_to_class_ids, - dataset_dir) - - # Finally, write the labels file: - labels_to_class_names = dict(zip(range(len(_CLASS_NAMES)), _CLASS_NAMES)) - dataset_utils.write_label_file(labels_to_class_names, dataset_dir) - - print('\nFinished converting the MNIST-M dataset!') - - -def main(_): - assert FLAGS.dataset_dir - run(FLAGS.dataset_dir) - - -if __name__ == '__main__': - tf.app.run() diff --git a/research/domain_adaptation/datasets/mnist_m.py b/research/domain_adaptation/datasets/mnist_m.py deleted file mode 100644 index fab6c443c..000000000 --- a/research/domain_adaptation/datasets/mnist_m.py +++ /dev/null @@ -1,98 +0,0 @@ -# Copyright 2017 Google Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Provides data for the MNIST-M dataset. - -The dataset scripts used to create the dataset can be found at: -tensorflow_models/domain_adaptation_/datasets/download_and_convert_mnist_m_dataset.py -""" - -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import os -# Dependency imports -import tensorflow as tf - -from slim.datasets import dataset_utils - -slim = tf.contrib.slim - -_FILE_PATTERN = 'mnist_m_%s.tfrecord' - -_SPLITS_TO_SIZES = {'train': 58001, 'valid': 1000, 'test': 9001} - -_NUM_CLASSES = 10 - -_ITEMS_TO_DESCRIPTIONS = { - 'image': 'A [32 x 32 x 1] RGB image.', - 'label': 'A single integer between 0 and 9', -} - - -def get_split(split_name, dataset_dir, file_pattern=None, reader=None): - """Gets a dataset tuple with instructions for reading MNIST. - - Args: - split_name: A train/test split name. - dataset_dir: The base directory of the dataset sources. - - Returns: - A `Dataset` namedtuple. - - Raises: - ValueError: if `split_name` is not a valid train/test split. - """ - if split_name not in _SPLITS_TO_SIZES: - raise ValueError('split name %s was not recognized.' % split_name) - - if not file_pattern: - file_pattern = _FILE_PATTERN - file_pattern = os.path.join(dataset_dir, file_pattern % split_name) - - # Allowing None in the signature so that dataset_factory can use the default. - if reader is None: - reader = tf.TFRecordReader - - keys_to_features = { - 'image/encoded': - tf.FixedLenFeature((), tf.string, default_value=''), - 'image/format': - tf.FixedLenFeature((), tf.string, default_value='png'), - 'image/class/label': - tf.FixedLenFeature( - [1], tf.int64, default_value=tf.zeros([1], dtype=tf.int64)), - } - - items_to_handlers = { - 'image': slim.tfexample_decoder.Image(shape=[32, 32, 3], channels=3), - 'label': slim.tfexample_decoder.Tensor('image/class/label', shape=[]), - } - - decoder = slim.tfexample_decoder.TFExampleDecoder( - keys_to_features, items_to_handlers) - - labels_to_names = None - if dataset_utils.has_labels(dataset_dir): - labels_to_names = dataset_utils.read_label_file(dataset_dir) - - return slim.dataset.Dataset( - data_sources=file_pattern, - reader=reader, - decoder=decoder, - num_samples=_SPLITS_TO_SIZES[split_name], - num_classes=_NUM_CLASSES, - items_to_descriptions=_ITEMS_TO_DESCRIPTIONS, - labels_to_names=labels_to_names) diff --git a/research/domain_adaptation/domain_separation/BUILD b/research/domain_adaptation/domain_separation/BUILD deleted file mode 100644 index 14dceda27..000000000 --- a/research/domain_adaptation/domain_separation/BUILD +++ /dev/null @@ -1,157 +0,0 @@ -# Domain Separation Networks - -package( - default_visibility = [ - ":internal", - ], -) - -licenses(["notice"]) # Apache 2.0 - -exports_files(["LICENSE"]) - -package_group( - name = "internal", - packages = [ - "//domain_adaptation/...", - ], -) - -py_library( - name = "models", - srcs = [ - "models.py", - ], - deps = [ - ":utils", - ], -) - -py_library( - name = "losses", - srcs = [ - "losses.py", - ], - deps = [ - ":grl_op_grads_py", - ":grl_op_shapes_py", - ":grl_ops", - ":utils", - ], -) - -py_test( - name = "losses_test", - srcs = [ - "losses_test.py", - ], - deps = [ - ":losses", - ":utils", - ], -) - -py_library( - name = "dsn", - srcs = [ - "dsn.py", - ], - deps = [ - ":grl_op_grads_py", - ":grl_op_shapes_py", - ":grl_ops", - ":losses", - ":models", - ":utils", - ], -) - -py_test( - name = "dsn_test", - srcs = [ - "dsn_test.py", - ], - deps = [ - ":dsn", - ], -) - -py_binary( - name = "dsn_train", - srcs = [ - "dsn_train.py", - ], - deps = [ - ":dsn", - ":models", - "//domain_adaptation/datasets:dataset_factory", - ], -) - -py_binary( - name = "dsn_eval", - srcs = [ - "dsn_eval.py", - ], - deps = [ - ":dsn", - ":models", - "//domain_adaptation/datasets:dataset_factory", - ], -) - -py_test( - name = "models_test", - srcs = [ - "models_test.py", - ], - deps = [ - ":models", - "//domain_adaptation/datasets:dataset_factory", - ], -) - -py_library( - name = "utils", - srcs = [ - "utils.py", - ], - deps = [ - ], -) - -py_library( - name = "grl_op_grads_py", - srcs = [ - "grl_op_grads.py", - ], - deps = [ - ":grl_ops", - ], -) - -py_library( - name = "grl_op_shapes_py", - srcs = [ - "grl_op_shapes.py", - ], - deps = [ - ], -) - -py_library( - name = "grl_ops", - srcs = ["grl_ops.py"], - data = ["_grl_ops.so"], -) - -py_test( - name = "grl_ops_test", - size = "small", - srcs = ["grl_ops_test.py"], - deps = [ - ":grl_op_grads_py", - ":grl_op_shapes_py", - ":grl_ops", - ], -) diff --git a/research/domain_adaptation/domain_separation/__init__.py b/research/domain_adaptation/domain_separation/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/research/domain_adaptation/domain_separation/_grl_ops.so b/research/domain_adaptation/domain_separation/_grl_ops.so deleted file mode 100755 index 4c35473760a76dcb743d58f45eddccecb5f5161e..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 26002 zcmeHP4R}=5nLhcG7%++W7X;-3!3KpP6O!;#q8XBb83>RjfwD#RGR#cKlu2efGlAgR z8XK@oLn><@x3p3>t+aLRA8lPlN_S&XOWmJ`wOb!`ab>mD8Fd%x4`o|4`+nzVa_^lx z7+v?--RJS-nfdPbp6`6;JKs6yyFYXD-R5pto}ZV;R4HILGUEDY2&7R~j4YD}AdPGh zn}F|Y*;FoDG**>$^hmvc6lF}~0QGSZKB=Xg1=aY0qWcR4J}jwfmk*fQ9c~cyhZ{70 zpy(-5jG}0T{Q&v99ArIk`nQIe(7`rAZ+j(_9iY!*EzV5X>Q_X)HkJI7nfrC^)v zFPC2`3WT7dayWc!m!pcRSFBmb?)k=%_5W7#XA8B(-(30Nn=Lips4KmVT3(6|4TbC^ z-<0nvu3-6j)m2Swq`0X3Q+f9lv*iB%iyK*2LH?xtOG&l{pE`UN;j;uEIb~QWqPRe4 zc{Jm*9G?~Vti-1UA1ZUEes$f_U5|X_)nA=1-8TKJ>mHc()ph+3hkG6w`d#T0>wj1D z>}NJi^HeQ3_|uo}x^>Uo1qVNSZNYb^l{~X)nfuqT|Md9cqGx?Ky>;&H2j2Ly=6;~` z%Y*NJ_K}U1Yww%(!51VdukGEOvH&j(hyY}V*W~a+!32BxzlMQ!dcKy!&h_Zm&i`9;*uN{synAzut97Ei-$rxH z};$u{wDC%5MkHbN%=lv#qXSNS|nVf`*mB7zLV|D?Zlw%~t{ z`J-}H%I|S$#AeE@9P;0n<6?7!sIoI8$2Ba+i{`w%#PwHX+0%P@48S8ci zyM#5)7rZ72yGZ({U-qlyPfNU7`d?mluoBoq^QFo8QfoU8<0iaO;!1x$)*IOqm3mgn zeotck+u`It!mFh})fzr2`|X$dDV|cfL)kCq_gm8cPf0z)(hqxtINK}zd8kSNXwJ(? ztY_-iD$jI%Yj>nak0*Swgsw9^)DucDy%U7#-j3CJAQ%g74aF0|SjXz7aHJ>L;oA}p ziaPrm-QVXU1-@|T76`7~uD3}IO<`X=9*i^nQ)`^niC|AW66*{{b}S0@Kt1H03s?KL z1)I8p{%vcc?TJ{ZXR9|Ha=TmG<87-tbbPAy)>ZzT?Q5K}?do7W?%Nt%)*A}L{wAke zwrYmVQ@n9_B4ZGcK?U4?QIfpqp)Gb^diLLj=YTVnYWqTwXcf(ijPsidl!5w0N zLW`|Ca`fNqq&Dk%c7%EYaBaM|8(!A8_Vww}U@RUXzbAI;+pDcM)%qi`ptHIy*x3~6 zg~tQUVhA1UO|y|D=C&#to3FZ-$X4m=<-UY3EIdu#Xiqrnbv+VK#DczV<#9hyr@zY= z(-SdYC=vJiX$lg~1zUXakY7oAJ9NymyS*NB-M&B%kH3528S_kJ4QG5p)qGw8;Z{D$ z#_4g^tc^AYJLTGvK3vltuf<$MYpV5jJ=h(^1jK?-?5ao%>oCiMbzFtAM7DKSV{Hb4 za9L!hyKPmw5ALy!-O@Q<30Q-PMWIMshZlpjUhH2?gIJpuTy)6yr!Nugqm6EMmRUX2 ztZCPsPO(s1LqUJ`NX#wjygkw0#QSYkzqU6qzGijtE?+c=<v9abzCGbf^u|e_?#${nTmO4> zu%J2OABR_*Dxm5StR&+yYlH7Y+j%H!TgAt?*jW9^m@kUW?`_%Qbh}{-g>(Nl-QTrM z@AQRe2X(Z+_a^V~jzflmg^nfE=i3t6?sP81F-3Jfu~SS;gu0I=y0Gs9`nKRs-H)S0 z(7MtVt&Ofi%nybay5g?*&K|!_98@Asr#J5Ii{g9%7i7yh7p;i-0-<0}qAds)#eHG8 z#x}IsNoLJl#xk|FO`G4YFS*;^Q%=N`o*mseozYaoR~*f~@DA25;z6%J5sLI=TY91M zW~kftylFZELZt12XF$6Xa_D`WknP%c+Z(pZ> z_|7N(4xCNu?5-T_7R17)NH@CTu>^bHVo25`WJ%O7n4u| zk1o!PIXnKgE~oIG+3y$HdNf8$c(hwPT+AFHaYdlVLpDLu8j^GBGe_kyb6GgFC7uZQ zZ@9q`k2vay-Rk%2apIiB`30^A1GqLrRNjIx&9C#B9_$IQ6-`b00!KAl(c)d!q}MoV zGercRCgbc*6z=*E$v(IeBl9&!0{*Mz<#U{0XEu64i8E$Z&gRDe6v- zYH-scO93dmU^NtjW<-d;??~uZUfmB_^A8ZDyfHX z7jG%PJO;ekfVUd(RR(;60rwd2%?6xISN(PwaCNUs&7ubUI)k1)2E4|A?=#@F27JE( zmybGl?XUrFGRWU=z}53_sy<}ETMY91Z_+qND;E=WbxaRLS;A5*D`VF{TB3##D z1C9lmDI*5l_+0F`0jJ*!R5@Y5uTmiBGX{LN0Y7EH*f9+$=s2%pQ3#8<0J;|GMxN1L4A;LLkuY0R^GBvF*wh#JqpW5vgW#Aw~J85$d%-Hh+D>5VudzKbf; zr$$j`FCjjU_!FG3AwHk@5zb#vd;#%?I6sqk3Ki*L&R1StKfv@Z3JvL2&c8u?3Gt1bf0cOJ^66^MzeqfV__W6P=ZL4!o-XJ7 zQ^Zp!NHfkqNj!ys^qF@7%>Fs?wDr@cIDdqAIt8RpaQ?@{(^gN9aQ*?}X^W>1asHdc z)7DN8bN*|@)1^RqALs8Pp0;?ppYvZNo=zF*DCa*maRkzyp}NP3BwtG1LzP2yv$0~1(5!osucTgqV}Q*hj8+n1FA2G+gwbju z>`sy!tUG=Usd^=)36T(qJRn8(a#aoMd2cVLo+lBAev7vh76v>Egvq@np4%UzL2TS~ zEEOk-)afXjz6%{NwvWbt9s_H~Mz2Wy;5-@Ba1|Lebj1li z;JW9DV5C3dl|?U3qsqEkY#CVM8LUcG38Os8|441(c2#?R!L@sKpG{0}87kU}?F)VP zU|{HOFZ4Y^8z2?o4PQvT@<((#@K^=xmL~t2kmTvqJm?#=ox>be+oKg{yy3l`>>>7{bE@Ei{{E{WTt0e1V&8o46Z1Kw@I~@ zxRy+VeTm9xnCt?CTr%k6q<8m7@5NY~q-f(YES~F7d1Ni=NlLrkA|nUME+B)|p2790 z)MjCL>Kq257HqpG`Nz~%Vq_cB|N5UsV`;uz>s{-F&&XTlErUNiN8JqmTN zdIxt^rU)786Y>)vzk2X{mCsNYg9j>4sIIO-SE7%x^c716rmqdIU-da zRw|#u0aq#4i_z{`R(;e;==9LmNY0u!iS1L4?{w}sQSRRmwwqxOg zzIHFR%+DYI|E?`B+eMx}f~wIoWshFE`>C=!pXBY%ralkd>7P-&8E6<9lcy1yYqvS-xvDlDfvX$r2A>?wwC1C)Y;!d_LmgR1{(5&>?g|; zn+{96^LLMw-T53<(}GJKC+WYVjk@yO&}T-Iv2)2M)1QE>k-yZ;zqZd<*+G@9=Z02} zCOzkpCtmwYEMgJR5h~vK+x=JwwET-x&tnjX%LoUa$A89@5@_(84XPrglYei#$G`)o2%c{##-1@;9i+Y<)n9qd3{9$dM(>>=0M zrK4rL=hBg|9W_xPVDTIE@cQ_0Ph)nx;83E~<2G{oH`{?LiCU2l*w9%mQ-%9HG< ztR~T__$QW*_7-{uyP42F?Y_?))}syZtdTZeS$9 zPg!&wAVp-2gc}jd0#H75E9@D%qNEVPE%|b4#akFU_TcW{lNqq;wJ&7W?_t`bqoe3! zmdnEj)qp*J8y{NP9n(=on--_FTls0N1u9eDQTm>SKH(iW!8GFCmr+X|EQIoP9)7<- z%3#{9WaJg}a#!ksH%CWb`%%Uo)K^gb<*2`j>YqbB8NaKS0+SeCT@W=#Zi6j+)lsPZ z3E4#R4o00orvsz9he^dy-63l9b(l;d{A%EHHjQ2-dw!`pI*)VPk$z)GD{b05L~ZU? zZFoOH(NAhBi4WDCN8e=1-Zv2Y=z=Nj$5-dR^vy)Z)?>QG!Y?I0bNa;fNb*hRS%i<& zUB4S0#R9qQ8ZgeYsh6M_N4TtWp52(f8wQE>hRUZ1nT2{1Z-uyR@wZo=;egzoerp6xSkA%^!(|f^jX;71X#z zS|Y+3VUZT-rMG-E`D>OIjYXoqtv>p>bFSv=36%5%!I|327F%E<{Qzcc;wUMRqZYrT zIZD*H1wU6<6VN(ik#1p*gYMXE%6t71j(2Z@ecFxW`H;3$i{n+NsyVDV6wrDi2@P%y zYy1rnfYzB=8%2K~p8q>zzV6_TNNn5uNEAP~({JjS0b$=#E$TycT%UI%cT9ZVjndK& zuYX!TTN7cdk8r3b7|^!|>GwIl2>yr0NxuQdWe5C)FO6Pu2nLpDGrV!U-qeBmD*j9H z98FE_hd16af6E^v1K5gN#G7Vn)Qp+!GDy;H?dc7Nqls9?IAx@Pn%1BFPlGIPYQe{c z(S{!KoP2$B^eE_o)1#vlRra149X$_v0Q4k+&Qe4cdZ+a$=u*%Y#G?Qx#U*;DIE=IR zt%%7-K`#fT>y5dfCqbJ*&x76!It|we-vd1W`WxbL-9+!Y9{}wDZNa_O0nojmM?uxI z~ zTUuMVh6(q4y;NgUm7ZGEk*;b)PTo2Lu1z zk?$pYn@hDItZkNFWULl4v@W~Q&vud@0%J2zt1a_H?F!+FLukk6H)!9^I=|FzDcYS= z{WaLPv-U^xnLxWnT>H|ywuN&{?V4ykttcHS$p1PF#S#+>s(IpCquuU_eBIJD&i&XY z4bUmp4SZ6(a-s)bB>$d7J>9dd#Bm{A$$Ev4`!R9kUuu6C`{O>Ur%OWaU%noiORMwy z4a)(Q43CZTCQZR&_?WhZMM1%U}Boew2D2Y8)#5Pl@34$qT(>Mn&=T z4x5puch;y(5+w6~uj*YP^?zkkWW9RtjDC=%!XLwd(hs)0G-#aAbqN)9jrjJvqY>iI zNJG>an*Ohrih7sn8CUPvm(5slA!jr93-rS~74^>2erc$} z_sa@pr+ftw|M4b@8Xul*ar=vItKm;y zE0C8ZJuB%&(*=H|q}NGWFX?hg*Gt+d>2^u)kn}Gk{kEhJN&1AOPfPl;q-Q0)NR$0b zdYz>8k}j8Yy`-IzZkO~9N&iC9Z%g`+q)$ltw4^UfdREelW-wl^l=M1D>m^+->3T^! zCEYG*)Fl-OHS}`r60K_87CaB@)oLA!9JTXmdO7F(oU_(ZUF%pdN8osmCcd*f;oAb5 zhzZ)Ij_@!k=-ArR>);O<=7j=`b6vi87jp!5_MoMpiI}LNe}KTlGZUkuE*1>?sDV6+ zh7-)e|JBbyuh%mN{$DSIx(I(&*b(f~`C|^fD}c6&gHpU-9PIN4qj=k#{~I9Jq5EPn z-%g=J9e>)7hNO&ohF)%^uRG+2k_h_4mxCK2I>f8V%;Ar8cjLb}qzC_dgyP%;d{lhN zl@~(wBEyej`MXVc+xGz@pQ!Vb$_t@r=DY_PDrBp% z#K9QbkH|Qgp~^9`QMrYF&eFbL#tB82s1vuPon+ZAOM7)bS5%$nl)TmcFUt1H%v16u zK1B;G4K$@l(z`6}t$9)oTI4P2*s!Jjo*L0W(Xg6$sYlJ9WZ46j_HWgR8bu#hf|6R- z|54!dGM(y1UH_z(iUw+($>y2$qxj=ED8om!SMS?1{xD~3y(xLC{r`*#vPrcsZqT@B z@i^N*YiY0E-`8aO{aoAFmo|3Dk6y+%t$o$Mx*kyZbxui#sd-cOt95)DZH@M;_ucp3 z#MQISf@Ho`dqv+ujj=sz)P!uKAW_-B;Kux^+P{MeV|#Uejk-h&Wv8{h;tSB2bgA+8 zyEIFkw6MHywRer} z8&?X>Bjd+4U>D+}#-r9X#Rt3o_pBG~OK5Q$inV`3Rn}nvtW)PxVi`FJkIEWyUXJ>ilEICuGhqX1tiG^Mx7D&J#rS1vrza_->Y;n8}-E#@X zolnn5+zXn=rm)YuG#riX>ka&P3cKB<;b;u+!!hYm&kxjn-(A4%`la(8$+uSF0Hs9` zb`q4^GgAQfNnF{p%cXHd?K2VIrmz7Eegg#T`mG0^XETpYz^UKrSzLYQYlFl$TkvfX zw>}@ao$DW)7qGnS<9PPEdRXdF&tFu>$GCj<`ph-17rA^HJ0s8UN)N>myYW)Joqx`A z`3lzO(nw@19wLb5*{n|o$FuXxERnc%-LBwx_P%HZZao)K|J}f6()zrfIL1`omV1HQ zx6}Vl4*8h~ z2%6MsBsT)Lvu6Ofo&MuF@E3uT{nq?VGjZ`|*Y8r`cKy`&Z$Noh7~Pqm$_QdUGEm%s~fBkhw@E1X_x73 zt94;Y3y~uSi(9YNm#XIJxtTwf zh$njS&MG{muWV}5TfFTZNXa&qflW_zBasAhu*F$sv}2I22O|2`aAb=wj65lkSX}q@ z_AxwEjfR8B_T{Kwyl??D+X(S=Z{o=%eAb% zhGo&)y%yLkk_?#^v^htYol&-$cnT#;c0k)?ev~E6WU9&^c%kHCA5zXOyVPeETg+DE z$OmUgiYA*QBMRj~vG_cb`z}XvK5Jq>t9_J$&0^n$vMeI#eSkDamJZAg$jU?XL2?P1 z6WAH^2W1>I?%Yo_eihElIVO?WPNdbdOO1EoL7I~E_j@;lc50#I^&7ytkO diff --git a/research/domain_adaptation/domain_separation/dsn.py b/research/domain_adaptation/domain_separation/dsn.py deleted file mode 100644 index 3018e8a79..000000000 --- a/research/domain_adaptation/domain_separation/dsn.py +++ /dev/null @@ -1,355 +0,0 @@ -# Copyright 2016 The TensorFlow Authors All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""Functions to create a DSN model and add the different losses to it. - -Specifically, in this file we define the: - - Shared Encoding Similarity Loss Module, with: - - The MMD Similarity method - - The Correlation Similarity method - - The Gradient Reversal (Domain-Adversarial) method - - Difference Loss Module - - Reconstruction Loss Module - - Task Loss Module -""" -from functools import partial - -import tensorflow as tf - -import losses -import models -import utils - -slim = tf.contrib.slim - - -################################################################################ -# HELPER FUNCTIONS -################################################################################ -def dsn_loss_coefficient(params): - """The global_step-dependent weight that specifies when to kick in DSN losses. - - Args: - params: A dictionary of parameters. Expecting 'domain_separation_startpoint' - - Returns: - A weight to that effectively enables or disables the DSN-related losses, - i.e. similarity, difference, and reconstruction losses. - """ - return tf.where( - tf.less(slim.get_or_create_global_step(), - params['domain_separation_startpoint']), 1e-10, 1.0) - - -################################################################################ -# MODEL CREATION -################################################################################ -def create_model(source_images, source_labels, domain_selection_mask, - target_images, target_labels, similarity_loss, params, - basic_tower_name): - """Creates a DSN model. - - Args: - source_images: images from the source domain, a tensor of size - [batch_size, height, width, channels] - source_labels: a dictionary with the name, tensor pairs. 'classes' is one- - hot for the number of classes. - domain_selection_mask: a boolean tensor of size [batch_size, ] which denotes - the labeled images that belong to the source domain. - target_images: images from the target domain, a tensor of size - [batch_size, height width, channels]. - target_labels: a dictionary with the name, tensor pairs. - similarity_loss: The type of method to use for encouraging - the codes from the shared encoder to be similar. - params: A dictionary of parameters. Expecting 'weight_decay', - 'layers_to_regularize', 'use_separation', 'domain_separation_startpoint', - 'alpha_weight', 'beta_weight', 'gamma_weight', 'recon_loss_name', - 'decoder_name', 'encoder_name' - basic_tower_name: the name of the tower to use for the shared encoder. - - Raises: - ValueError: if the arch is not one of the available architectures. - """ - network = getattr(models, basic_tower_name) - num_classes = source_labels['classes'].get_shape().as_list()[1] - - # Make sure we are using the appropriate number of classes. - network = partial(network, num_classes=num_classes) - - # Add the classification/pose estimation loss to the source domain. - source_endpoints = add_task_loss(source_images, source_labels, network, - params) - - if similarity_loss == 'none': - # No domain adaptation, we can stop here. - return - - with tf.variable_scope('towers', reuse=True): - target_logits, target_endpoints = network( - target_images, weight_decay=params['weight_decay'], prefix='target') - - # Plot target accuracy of the train set. - target_accuracy = utils.accuracy( - tf.argmax(target_logits, 1), tf.argmax(target_labels['classes'], 1)) - - if 'quaternions' in target_labels: - target_quaternion_loss = losses.log_quaternion_loss( - target_labels['quaternions'], target_endpoints['quaternion_pred'], - params) - tf.summary.scalar('eval/Target quaternions', target_quaternion_loss) - - tf.summary.scalar('eval/Target accuracy', target_accuracy) - - source_shared = source_endpoints[params['layers_to_regularize']] - target_shared = target_endpoints[params['layers_to_regularize']] - - # When using the semisupervised model we include labeled target data in the - # source classifier. We do not want to include these target domain when - # we use the similarity loss. - indices = tf.range(0, source_shared.get_shape().as_list()[0]) - indices = tf.boolean_mask(indices, domain_selection_mask) - add_similarity_loss(similarity_loss, - tf.gather(source_shared, indices), - tf.gather(target_shared, indices), params) - - if params['use_separation']: - add_autoencoders( - source_images, - source_shared, - target_images, - target_shared, - params=params,) - - -def add_similarity_loss(method_name, - source_samples, - target_samples, - params, - scope=None): - """Adds a loss encouraging the shared encoding from each domain to be similar. - - Args: - method_name: the name of the encoding similarity method to use. Valid - options include `dann_loss', `mmd_loss' or `correlation_loss'. - source_samples: a tensor of shape [num_samples, num_features]. - target_samples: a tensor of shape [num_samples, num_features]. - params: a dictionary of parameters. Expecting 'gamma_weight'. - scope: optional name scope for summary tags. - Raises: - ValueError: if `method_name` is not recognized. - """ - weight = dsn_loss_coefficient(params) * params['gamma_weight'] - method = getattr(losses, method_name) - method(source_samples, target_samples, weight, scope) - - -def add_reconstruction_loss(recon_loss_name, images, recons, weight, domain): - """Adds a reconstruction loss. - - Args: - recon_loss_name: The name of the reconstruction loss. - images: A `Tensor` of size [batch_size, height, width, 3]. - recons: A `Tensor` whose size matches `images`. - weight: A scalar coefficient for the loss. - domain: The name of the domain being reconstructed. - - Raises: - ValueError: If `recon_loss_name` is not recognized. - """ - if recon_loss_name == 'sum_of_pairwise_squares': - loss_fn = tf.contrib.losses.mean_pairwise_squared_error - elif recon_loss_name == 'sum_of_squares': - loss_fn = tf.contrib.losses.mean_squared_error - else: - raise ValueError('recon_loss_name value [%s] not recognized.' % - recon_loss_name) - - loss = loss_fn(recons, images, weight) - assert_op = tf.Assert(tf.is_finite(loss), [loss]) - with tf.control_dependencies([assert_op]): - tf.summary.scalar('losses/%s Recon Loss' % domain, loss) - - -def add_autoencoders(source_data, source_shared, target_data, target_shared, - params): - """Adds the encoders/decoders for our domain separation model w/ incoherence. - - Args: - source_data: images from the source domain, a tensor of size - [batch_size, height, width, channels] - source_shared: a tensor with first dimension batch_size - target_data: images from the target domain, a tensor of size - [batch_size, height, width, channels] - target_shared: a tensor with first dimension batch_size - params: A dictionary of parameters. Expecting 'layers_to_regularize', - 'beta_weight', 'alpha_weight', 'recon_loss_name', 'decoder_name', - 'encoder_name', 'weight_decay' - """ - - def normalize_images(images): - images -= tf.reduce_min(images) - return images / tf.reduce_max(images) - - def concat_operation(shared_repr, private_repr): - return shared_repr + private_repr - - mu = dsn_loss_coefficient(params) - - # The layer to concatenate the networks at. - concat_layer = params['layers_to_regularize'] - - # The coefficient for modulating the private/shared difference loss. - difference_loss_weight = params['beta_weight'] * mu - - # The reconstruction weight. - recon_loss_weight = params['alpha_weight'] * mu - - # The reconstruction loss to use. - recon_loss_name = params['recon_loss_name'] - - # The decoder/encoder to use. - decoder_name = params['decoder_name'] - encoder_name = params['encoder_name'] - - _, height, width, _ = source_data.get_shape().as_list() - code_size = source_shared.get_shape().as_list()[-1] - weight_decay = params['weight_decay'] - - encoder_fn = getattr(models, encoder_name) - # Target Auto-encoding. - with tf.variable_scope('source_encoder'): - source_endpoints = encoder_fn( - source_data, code_size, weight_decay=weight_decay) - - with tf.variable_scope('target_encoder'): - target_endpoints = encoder_fn( - target_data, code_size, weight_decay=weight_decay) - - decoder_fn = getattr(models, decoder_name) - - decoder = partial( - decoder_fn, - height=height, - width=width, - channels=source_data.get_shape().as_list()[-1], - weight_decay=weight_decay) - - # Source Auto-encoding. - source_private = source_endpoints[concat_layer] - target_private = target_endpoints[concat_layer] - with tf.variable_scope('decoder'): - source_recons = decoder(concat_operation(source_shared, source_private)) - - with tf.variable_scope('decoder', reuse=True): - source_private_recons = decoder( - concat_operation(tf.zeros_like(source_private), source_private)) - source_shared_recons = decoder( - concat_operation(source_shared, tf.zeros_like(source_shared))) - - with tf.variable_scope('decoder', reuse=True): - target_recons = decoder(concat_operation(target_shared, target_private)) - target_shared_recons = decoder( - concat_operation(target_shared, tf.zeros_like(target_shared))) - target_private_recons = decoder( - concat_operation(tf.zeros_like(target_private), target_private)) - - losses.difference_loss( - source_private, - source_shared, - weight=difference_loss_weight, - name='Source') - losses.difference_loss( - target_private, - target_shared, - weight=difference_loss_weight, - name='Target') - - add_reconstruction_loss(recon_loss_name, source_data, source_recons, - recon_loss_weight, 'source') - add_reconstruction_loss(recon_loss_name, target_data, target_recons, - recon_loss_weight, 'target') - - # Add summaries - source_reconstructions = tf.concat( - axis=2, - values=map(normalize_images, [ - source_data, source_recons, source_shared_recons, - source_private_recons - ])) - target_reconstructions = tf.concat( - axis=2, - values=map(normalize_images, [ - target_data, target_recons, target_shared_recons, - target_private_recons - ])) - tf.summary.image( - 'Source Images:Recons:RGB', - source_reconstructions[:, :, :, :3], - max_outputs=10) - tf.summary.image( - 'Target Images:Recons:RGB', - target_reconstructions[:, :, :, :3], - max_outputs=10) - - if source_reconstructions.get_shape().as_list()[3] == 4: - tf.summary.image( - 'Source Images:Recons:Depth', - source_reconstructions[:, :, :, 3:4], - max_outputs=10) - tf.summary.image( - 'Target Images:Recons:Depth', - target_reconstructions[:, :, :, 3:4], - max_outputs=10) - - -def add_task_loss(source_images, source_labels, basic_tower, params): - """Adds a classification and/or pose estimation loss to the model. - - Args: - source_images: images from the source domain, a tensor of size - [batch_size, height, width, channels] - source_labels: labels from the source domain, a tensor of size [batch_size]. - or a tuple of (quaternions, class_labels) - basic_tower: a function that creates the single tower of the model. - params: A dictionary of parameters. Expecting 'weight_decay', 'pose_weight'. - Returns: - The source endpoints. - - Raises: - RuntimeError: if basic tower does not support pose estimation. - """ - with tf.variable_scope('towers'): - source_logits, source_endpoints = basic_tower( - source_images, weight_decay=params['weight_decay'], prefix='Source') - - if 'quaternions' in source_labels: # We have pose estimation as well - if 'quaternion_pred' not in source_endpoints: - raise RuntimeError('Please use a model for estimation e.g. pose_mini') - - loss = losses.log_quaternion_loss(source_labels['quaternions'], - source_endpoints['quaternion_pred'], - params) - - assert_op = tf.Assert(tf.is_finite(loss), [loss]) - with tf.control_dependencies([assert_op]): - quaternion_loss = loss - tf.summary.histogram('log_quaternion_loss_hist', quaternion_loss) - slim.losses.add_loss(quaternion_loss * params['pose_weight']) - tf.summary.scalar('losses/quaternion_loss', quaternion_loss) - - classification_loss = tf.losses.softmax_cross_entropy( - source_labels['classes'], source_logits) - - tf.summary.scalar('losses/classification_loss', classification_loss) - return source_endpoints diff --git a/research/domain_adaptation/domain_separation/dsn_eval.py b/research/domain_adaptation/domain_separation/dsn_eval.py deleted file mode 100644 index b6cccdfcc..000000000 --- a/research/domain_adaptation/domain_separation/dsn_eval.py +++ /dev/null @@ -1,161 +0,0 @@ -# Copyright 2016 The TensorFlow Authors All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -# pylint: disable=line-too-long -"""Evaluation for Domain Separation Networks (DSNs).""" -# pylint: enable=line-too-long -import math - -import numpy as np -from six.moves import xrange -import tensorflow as tf - -from domain_adaptation.datasets import dataset_factory -from domain_adaptation.domain_separation import losses -from domain_adaptation.domain_separation import models - -slim = tf.contrib.slim - -FLAGS = tf.app.flags.FLAGS - -tf.app.flags.DEFINE_integer('batch_size', 32, - 'The number of images in each batch.') - -tf.app.flags.DEFINE_string('master', '', - 'BNS name of the TensorFlow master to use.') - -tf.app.flags.DEFINE_string('checkpoint_dir', '/tmp/da/', - 'Directory where the model was written to.') - -tf.app.flags.DEFINE_string( - 'eval_dir', '/tmp/da/', - 'Directory where we should write the tf summaries to.') - -tf.app.flags.DEFINE_string('dataset_dir', None, - 'The directory where the dataset files are stored.') - -tf.app.flags.DEFINE_string('dataset', 'mnist_m', - 'Which dataset to test on: "mnist", "mnist_m".') - -tf.app.flags.DEFINE_string('split', 'valid', - 'Which portion to test on: "valid", "test".') - -tf.app.flags.DEFINE_integer('num_examples', 1000, 'Number of test examples.') - -tf.app.flags.DEFINE_string('basic_tower', 'dann_mnist', - 'The basic tower building block.') - -tf.app.flags.DEFINE_bool('enable_precision_recall', False, - 'If True, precision and recall for each class will ' - 'be added to the metrics.') - -tf.app.flags.DEFINE_bool('use_logging', False, 'Debugging messages.') - - -def quaternion_metric(predictions, labels): - params = {'batch_size': FLAGS.batch_size, 'use_logging': False} - logcost = losses.log_quaternion_loss_batch(predictions, labels, params) - return slim.metrics.streaming_mean(logcost) - - -def angle_diff(true_q, pred_q): - angles = 2 * ( - 180.0 / - np.pi) * np.arccos(np.abs(np.sum(np.multiply(pred_q, true_q), axis=1))) - return angles - - -def provide_batch_fn(): - """ The provide_batch function to use. """ - return dataset_factory.provide_batch - - -def main(_): - g = tf.Graph() - with g.as_default(): - # Load the data. - images, labels = provide_batch_fn()( - FLAGS.dataset, FLAGS.split, FLAGS.dataset_dir, 4, FLAGS.batch_size, 4) - - num_classes = labels['classes'].get_shape().as_list()[1] - - tf.summary.image('eval_images', images, max_outputs=3) - - # Define the model: - with tf.variable_scope('towers'): - basic_tower = getattr(models, FLAGS.basic_tower) - predictions, endpoints = basic_tower( - images, - num_classes=num_classes, - is_training=False, - batch_norm_params=None) - metric_names_to_values = {} - - # Define the metrics: - if 'quaternions' in labels: # Also have to evaluate pose estimation! - quaternion_loss = quaternion_metric(labels['quaternions'], - endpoints['quaternion_pred']) - - angle_errors, = tf.py_func( - angle_diff, [labels['quaternions'], endpoints['quaternion_pred']], - [tf.float32]) - - metric_names_to_values[ - 'Angular mean error'] = slim.metrics.streaming_mean(angle_errors) - metric_names_to_values['Quaternion Loss'] = quaternion_loss - - accuracy = tf.contrib.metrics.streaming_accuracy( - tf.argmax(predictions, 1), tf.argmax(labels['classes'], 1)) - - predictions = tf.argmax(predictions, 1) - labels = tf.argmax(labels['classes'], 1) - metric_names_to_values['Accuracy'] = accuracy - - if FLAGS.enable_precision_recall: - for i in xrange(num_classes): - index_map = tf.one_hot(i, depth=num_classes) - name = 'PR/Precision_{}'.format(i) - metric_names_to_values[name] = slim.metrics.streaming_precision( - tf.gather(index_map, predictions), tf.gather(index_map, labels)) - name = 'PR/Recall_{}'.format(i) - metric_names_to_values[name] = slim.metrics.streaming_recall( - tf.gather(index_map, predictions), tf.gather(index_map, labels)) - - names_to_values, names_to_updates = slim.metrics.aggregate_metric_map( - metric_names_to_values) - - # Create the summary ops such that they also print out to std output: - summary_ops = [] - for metric_name, metric_value in names_to_values.iteritems(): - op = tf.summary.scalar(metric_name, metric_value) - op = tf.Print(op, [metric_value], metric_name) - summary_ops.append(op) - - # This ensures that we make a single pass over all of the data. - num_batches = math.ceil(FLAGS.num_examples / float(FLAGS.batch_size)) - - # Setup the global step. - slim.get_or_create_global_step() - slim.evaluation.evaluation_loop( - FLAGS.master, - checkpoint_dir=FLAGS.checkpoint_dir, - logdir=FLAGS.eval_dir, - num_evals=num_batches, - eval_op=names_to_updates.values(), - summary_op=tf.summary.merge(summary_ops)) - - -if __name__ == '__main__': - tf.app.run() diff --git a/research/domain_adaptation/domain_separation/dsn_test.py b/research/domain_adaptation/domain_separation/dsn_test.py deleted file mode 100644 index 3d687398a..000000000 --- a/research/domain_adaptation/domain_separation/dsn_test.py +++ /dev/null @@ -1,157 +0,0 @@ -# Copyright 2016 The TensorFlow Authors All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""Tests for DSN model assembly functions.""" - -import numpy as np -import tensorflow as tf - -import dsn - - -class HelperFunctionsTest(tf.test.TestCase): - - def testBasicDomainSeparationStartPoint(self): - with self.test_session() as sess: - # Test for when global_step < domain_separation_startpoint - step = tf.contrib.slim.get_or_create_global_step() - sess.run(tf.global_variables_initializer()) # global_step = 0 - params = {'domain_separation_startpoint': 2} - weight = dsn.dsn_loss_coefficient(params) - weight_np = sess.run(weight) - self.assertAlmostEqual(weight_np, 1e-10) - - step_op = tf.assign_add(step, 1) - step_np = sess.run(step_op) # global_step = 1 - weight = dsn.dsn_loss_coefficient(params) - weight_np = sess.run(weight) - self.assertAlmostEqual(weight_np, 1e-10) - - # Test for when global_step >= domain_separation_startpoint - step_np = sess.run(step_op) # global_step = 2 - tf.logging.info(step_np) - weight = dsn.dsn_loss_coefficient(params) - weight_np = sess.run(weight) - self.assertAlmostEqual(weight_np, 1.0) - - -class DsnModelAssemblyTest(tf.test.TestCase): - - def _testBuildDefaultModel(self): - images = tf.to_float(np.random.rand(32, 28, 28, 1)) - labels = {} - labels['classes'] = tf.one_hot( - tf.to_int32(np.random.randint(0, 9, (32))), 10) - - params = { - 'use_separation': True, - 'layers_to_regularize': 'fc3', - 'weight_decay': 0.0, - 'ps_tasks': 1, - 'domain_separation_startpoint': 1, - 'alpha_weight': 1, - 'beta_weight': 1, - 'gamma_weight': 1, - 'recon_loss_name': 'sum_of_squares', - 'decoder_name': 'small_decoder', - 'encoder_name': 'default_encoder', - } - return images, labels, params - - def testBuildModelDann(self): - images, labels, params = self._testBuildDefaultModel() - - with self.test_session(): - dsn.create_model(images, labels, - tf.cast(tf.ones([32,]), tf.bool), images, labels, - 'dann_loss', params, 'dann_mnist') - loss_tensors = tf.contrib.losses.get_losses() - self.assertEqual(len(loss_tensors), 6) - - def testBuildModelDannSumOfPairwiseSquares(self): - images, labels, params = self._testBuildDefaultModel() - - with self.test_session(): - dsn.create_model(images, labels, - tf.cast(tf.ones([32,]), tf.bool), images, labels, - 'dann_loss', params, 'dann_mnist') - loss_tensors = tf.contrib.losses.get_losses() - self.assertEqual(len(loss_tensors), 6) - - def testBuildModelDannMultiPSTasks(self): - images, labels, params = self._testBuildDefaultModel() - params['ps_tasks'] = 10 - with self.test_session(): - dsn.create_model(images, labels, - tf.cast(tf.ones([32,]), tf.bool), images, labels, - 'dann_loss', params, 'dann_mnist') - loss_tensors = tf.contrib.losses.get_losses() - self.assertEqual(len(loss_tensors), 6) - - def testBuildModelMmd(self): - images, labels, params = self._testBuildDefaultModel() - - with self.test_session(): - dsn.create_model(images, labels, - tf.cast(tf.ones([32,]), tf.bool), images, labels, - 'mmd_loss', params, 'dann_mnist') - loss_tensors = tf.contrib.losses.get_losses() - self.assertEqual(len(loss_tensors), 6) - - def testBuildModelCorr(self): - images, labels, params = self._testBuildDefaultModel() - - with self.test_session(): - dsn.create_model(images, labels, - tf.cast(tf.ones([32,]), tf.bool), images, labels, - 'correlation_loss', params, 'dann_mnist') - loss_tensors = tf.contrib.losses.get_losses() - self.assertEqual(len(loss_tensors), 6) - - def testBuildModelNoDomainAdaptation(self): - images, labels, params = self._testBuildDefaultModel() - params['use_separation'] = False - with self.test_session(): - dsn.create_model(images, labels, - tf.cast(tf.ones([32,]), tf.bool), images, labels, 'none', - params, 'dann_mnist') - loss_tensors = tf.contrib.losses.get_losses() - self.assertEqual(len(loss_tensors), 1) - self.assertEqual(len(tf.contrib.losses.get_regularization_losses()), 0) - - def testBuildModelNoAdaptationWeightDecay(self): - images, labels, params = self._testBuildDefaultModel() - params['use_separation'] = False - params['weight_decay'] = 1e-5 - with self.test_session(): - dsn.create_model(images, labels, - tf.cast(tf.ones([32,]), tf.bool), images, labels, 'none', - params, 'dann_mnist') - loss_tensors = tf.contrib.losses.get_losses() - self.assertEqual(len(loss_tensors), 1) - self.assertTrue(len(tf.contrib.losses.get_regularization_losses()) >= 1) - - def testBuildModelNoSeparation(self): - images, labels, params = self._testBuildDefaultModel() - params['use_separation'] = False - with self.test_session(): - dsn.create_model(images, labels, - tf.cast(tf.ones([32,]), tf.bool), images, labels, - 'dann_loss', params, 'dann_mnist') - loss_tensors = tf.contrib.losses.get_losses() - self.assertEqual(len(loss_tensors), 2) - - -if __name__ == '__main__': - tf.test.main() diff --git a/research/domain_adaptation/domain_separation/dsn_train.py b/research/domain_adaptation/domain_separation/dsn_train.py deleted file mode 100644 index 5e364ad30..000000000 --- a/research/domain_adaptation/domain_separation/dsn_train.py +++ /dev/null @@ -1,278 +0,0 @@ -# Copyright 2016 The TensorFlow Authors All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -"""Training for Domain Separation Networks (DSNs).""" -from __future__ import division - -import tensorflow as tf - -from domain_adaptation.datasets import dataset_factory -import dsn - -slim = tf.contrib.slim -FLAGS = tf.app.flags.FLAGS - -tf.app.flags.DEFINE_integer('batch_size', 32, - 'The number of images in each batch.') - -tf.app.flags.DEFINE_string('source_dataset', 'pose_synthetic', - 'Source dataset to train on.') - -tf.app.flags.DEFINE_string('target_dataset', 'pose_real', - 'Target dataset to train on.') - -tf.app.flags.DEFINE_string('target_labeled_dataset', 'none', - 'Target dataset to train on.') - -tf.app.flags.DEFINE_string('dataset_dir', None, - 'The directory where the dataset files are stored.') - -tf.app.flags.DEFINE_string('master', '', - 'BNS name of the TensorFlow master to use.') - -tf.app.flags.DEFINE_string('train_log_dir', '/tmp/da/', - 'Directory where to write event logs.') - -tf.app.flags.DEFINE_string( - 'layers_to_regularize', 'fc3', - 'Comma-separated list of layer names to use MMD regularization on.') - -tf.app.flags.DEFINE_float('learning_rate', .01, 'The learning rate') - -tf.app.flags.DEFINE_float('alpha_weight', 1e-6, - 'The coefficient for scaling the reconstruction ' - 'loss.') - -tf.app.flags.DEFINE_float( - 'beta_weight', 1e-6, - 'The coefficient for scaling the private/shared difference loss.') - -tf.app.flags.DEFINE_float( - 'gamma_weight', 1e-6, - 'The coefficient for scaling the shared encoding similarity loss.') - -tf.app.flags.DEFINE_float('pose_weight', 0.125, - 'The coefficient for scaling the pose loss.') - -tf.app.flags.DEFINE_float( - 'weight_decay', 1e-6, - 'The coefficient for the L2 regularization applied for all weights.') - -tf.app.flags.DEFINE_integer( - 'save_summaries_secs', 60, - 'The frequency with which summaries are saved, in seconds.') - -tf.app.flags.DEFINE_integer( - 'save_interval_secs', 60, - 'The frequency with which the model is saved, in seconds.') - -tf.app.flags.DEFINE_integer( - 'max_number_of_steps', None, - 'The maximum number of gradient steps. Use None to train indefinitely.') - -tf.app.flags.DEFINE_integer( - 'domain_separation_startpoint', 1, - 'The global step to add the domain separation losses.') - -tf.app.flags.DEFINE_integer( - 'bipartite_assignment_top_k', 3, - 'The number of top-k matches to use in bipartite matching adaptation.') - -tf.app.flags.DEFINE_float('decay_rate', 0.95, 'Learning rate decay factor.') - -tf.app.flags.DEFINE_integer('decay_steps', 20000, 'Learning rate decay steps.') - -tf.app.flags.DEFINE_float('momentum', 0.9, 'The momentum value.') - -tf.app.flags.DEFINE_bool('use_separation', False, - 'Use our domain separation model.') - -tf.app.flags.DEFINE_bool('use_logging', False, 'Debugging messages.') - -tf.app.flags.DEFINE_integer( - 'ps_tasks', 0, - 'The number of parameter servers. If the value is 0, then the parameters ' - 'are handled locally by the worker.') - -tf.app.flags.DEFINE_integer( - 'num_readers', 4, - 'The number of parallel readers that read data from the dataset.') - -tf.app.flags.DEFINE_integer('num_preprocessing_threads', 4, - 'The number of threads used to create the batches.') - -tf.app.flags.DEFINE_integer( - 'task', 0, - 'The Task ID. This value is used when training with multiple workers to ' - 'identify each worker.') - -tf.app.flags.DEFINE_string('decoder_name', 'small_decoder', - 'The decoder to use.') -tf.app.flags.DEFINE_string('encoder_name', 'default_encoder', - 'The encoder to use.') - -################################################################################ -# Flags that control the architecture and losses -################################################################################ -tf.app.flags.DEFINE_string( - 'similarity_loss', 'grl', - 'The method to use for encouraging the common encoder codes to be ' - 'similar, one of "grl", "mmd", "corr".') - -tf.app.flags.DEFINE_string('recon_loss_name', 'sum_of_pairwise_squares', - 'The name of the reconstruction loss.') - -tf.app.flags.DEFINE_string('basic_tower', 'pose_mini', - 'The basic tower building block.') - -def provide_batch_fn(): - """ The provide_batch function to use. """ - return dataset_factory.provide_batch - -def main(_): - model_params = { - 'use_separation': FLAGS.use_separation, - 'domain_separation_startpoint': FLAGS.domain_separation_startpoint, - 'layers_to_regularize': FLAGS.layers_to_regularize, - 'alpha_weight': FLAGS.alpha_weight, - 'beta_weight': FLAGS.beta_weight, - 'gamma_weight': FLAGS.gamma_weight, - 'pose_weight': FLAGS.pose_weight, - 'recon_loss_name': FLAGS.recon_loss_name, - 'decoder_name': FLAGS.decoder_name, - 'encoder_name': FLAGS.encoder_name, - 'weight_decay': FLAGS.weight_decay, - 'batch_size': FLAGS.batch_size, - 'use_logging': FLAGS.use_logging, - 'ps_tasks': FLAGS.ps_tasks, - 'task': FLAGS.task, - } - g = tf.Graph() - with g.as_default(): - with tf.device(tf.train.replica_device_setter(FLAGS.ps_tasks)): - # Load the data. - source_images, source_labels = provide_batch_fn()( - FLAGS.source_dataset, 'train', FLAGS.dataset_dir, FLAGS.num_readers, - FLAGS.batch_size, FLAGS.num_preprocessing_threads) - target_images, target_labels = provide_batch_fn()( - FLAGS.target_dataset, 'train', FLAGS.dataset_dir, FLAGS.num_readers, - FLAGS.batch_size, FLAGS.num_preprocessing_threads) - - # In the unsupervised case all the samples in the labeled - # domain are from the source domain. - domain_selection_mask = tf.fill((source_images.get_shape().as_list()[0],), - True) - - # When using the semisupervised model we include labeled target data in - # the source labelled data. - if FLAGS.target_labeled_dataset != 'none': - # 1000 is the maximum number of labelled target samples that exists in - # the datasets. - target_semi_images, target_semi_labels = provide_batch_fn()( - FLAGS.target_labeled_dataset, 'train', FLAGS.batch_size) - - # Calculate the proportion of source domain samples in the semi- - # supervised setting, so that the proportion is set accordingly in the - # batches. - proportion = float(source_labels['num_train_samples']) / ( - source_labels['num_train_samples'] + - target_semi_labels['num_train_samples']) - - rnd_tensor = tf.random_uniform( - (target_semi_images.get_shape().as_list()[0],)) - - domain_selection_mask = rnd_tensor < proportion - source_images = tf.where(domain_selection_mask, source_images, - target_semi_images) - source_class_labels = tf.where(domain_selection_mask, - source_labels['classes'], - target_semi_labels['classes']) - - if 'quaternions' in source_labels: - source_pose_labels = tf.where(domain_selection_mask, - source_labels['quaternions'], - target_semi_labels['quaternions']) - (source_images, source_class_labels, source_pose_labels, - domain_selection_mask) = tf.train.shuffle_batch( - [ - source_images, source_class_labels, source_pose_labels, - domain_selection_mask - ], - FLAGS.batch_size, - 50000, - 5000, - num_threads=1, - enqueue_many=True) - - else: - (source_images, source_class_labels, - domain_selection_mask) = tf.train.shuffle_batch( - [source_images, source_class_labels, domain_selection_mask], - FLAGS.batch_size, - 50000, - 5000, - num_threads=1, - enqueue_many=True) - source_labels = {} - source_labels['classes'] = source_class_labels - if 'quaternions' in source_labels: - source_labels['quaternions'] = source_pose_labels - - slim.get_or_create_global_step() - tf.summary.image('source_images', source_images, max_outputs=3) - tf.summary.image('target_images', target_images, max_outputs=3) - - dsn.create_model( - source_images, - source_labels, - domain_selection_mask, - target_images, - target_labels, - FLAGS.similarity_loss, - model_params, - basic_tower_name=FLAGS.basic_tower) - - # Configure the optimization scheme: - learning_rate = tf.train.exponential_decay( - FLAGS.learning_rate, - slim.get_or_create_global_step(), - FLAGS.decay_steps, - FLAGS.decay_rate, - staircase=True, - name='learning_rate') - - tf.summary.scalar('learning_rate', learning_rate) - tf.summary.scalar('total_loss', tf.losses.get_total_loss()) - - opt = tf.train.MomentumOptimizer(learning_rate, FLAGS.momentum) - tf.logging.set_verbosity(tf.logging.INFO) - # Run training. - loss_tensor = slim.learning.create_train_op( - slim.losses.get_total_loss(), - opt, - summarize_gradients=True, - colocate_gradients_with_ops=True) - slim.learning.train( - train_op=loss_tensor, - logdir=FLAGS.train_log_dir, - master=FLAGS.master, - is_chief=FLAGS.task == 0, - number_of_steps=FLAGS.max_number_of_steps, - save_summaries_secs=FLAGS.save_summaries_secs, - save_interval_secs=FLAGS.save_interval_secs) - - -if __name__ == '__main__': - tf.app.run() diff --git a/research/domain_adaptation/domain_separation/grl_op_grads.py b/research/domain_adaptation/domain_separation/grl_op_grads.py deleted file mode 100644 index fcd85ba2b..000000000 --- a/research/domain_adaptation/domain_separation/grl_op_grads.py +++ /dev/null @@ -1,34 +0,0 @@ -# Copyright 2016 The TensorFlow Authors All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -"""Gradients for operators defined in grl_ops.py.""" -import tensorflow as tf - - -@tf.RegisterGradient("GradientReversal") -def _GradientReversalGrad(_, grad): - """The gradients for `gradient_reversal`. - - Args: - _: The `gradient_reversal` `Operation` that we are differentiating, - which we can use to find the inputs and outputs of the original op. - grad: Gradient with respect to the output of the `gradient_reversal` op. - - Returns: - Gradient with respect to the input of `gradient_reversal`, which is simply - the negative of the input gradient. - - """ - return tf.negative(grad) diff --git a/research/domain_adaptation/domain_separation/grl_op_kernels.cc b/research/domain_adaptation/domain_separation/grl_op_kernels.cc deleted file mode 100644 index ba30128f1..000000000 --- a/research/domain_adaptation/domain_separation/grl_op_kernels.cc +++ /dev/null @@ -1,47 +0,0 @@ -/* Copyright 2016 The TensorFlow Authors All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ - -// This file contains the implementations of the ops registered in -// grl_ops.cc. - -#include "tensorflow/core/framework/op_kernel.h" -#include "tensorflow/core/framework/types.pb.h" - -namespace tensorflow { - -// The gradient reversal op is used in domain adversarial training. It behaves -// as the identity op during forward propagation, and multiplies its input by -1 -// during backward propagation. -class GradientReversalOp : public OpKernel { - public: - explicit GradientReversalOp(OpKernelConstruction* context) - : OpKernel(context) {} - - // Gradient reversal op behaves as the identity op during forward - // propagation. Compute() function copied from the IdentityOp::Compute() - // function here: third_party/tensorflow/core/kernels/identity_op.h. - void Compute(OpKernelContext* context) override { - if (IsRefType(context->input_dtype(0))) { - context->forward_ref_input_to_ref_output(0, 0); - } else { - context->set_output(0, context->input(0)); - } - } -}; - -REGISTER_KERNEL_BUILDER(Name("GradientReversal").Device(DEVICE_CPU), - GradientReversalOp); - -} // namespace tensorflow diff --git a/research/domain_adaptation/domain_separation/grl_op_shapes.py b/research/domain_adaptation/domain_separation/grl_op_shapes.py deleted file mode 100644 index 52773c680..000000000 --- a/research/domain_adaptation/domain_separation/grl_op_shapes.py +++ /dev/null @@ -1,16 +0,0 @@ -# Copyright 2016 The TensorFlow Authors All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -"""Shape inference for operators defined in grl_ops.cc.""" diff --git a/research/domain_adaptation/domain_separation/grl_ops.cc b/research/domain_adaptation/domain_separation/grl_ops.cc deleted file mode 100644 index d441c2b48..000000000 --- a/research/domain_adaptation/domain_separation/grl_ops.cc +++ /dev/null @@ -1,36 +0,0 @@ -/* Copyright 2016 The TensorFlow Authors All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ - -// Contains custom ops. - -#include "tensorflow/core/framework/common_shape_fns.h" -#include "tensorflow/core/framework/op.h" - -namespace tensorflow { - -// This custom op is used by adversarial training. -REGISTER_OP("GradientReversal") - .Input("input: float") - .Output("output: float") - .SetShapeFn(shape_inference::UnchangedShape) - .Doc(R"doc( -This op copies the input to the output during forward propagation, and -negates the input during backward propagation. - -input: Tensor. -output: Tensor, copied from input. -)doc"); - -} // namespace tensorflow diff --git a/research/domain_adaptation/domain_separation/grl_ops.py b/research/domain_adaptation/domain_separation/grl_ops.py deleted file mode 100644 index 50447247b..000000000 --- a/research/domain_adaptation/domain_separation/grl_ops.py +++ /dev/null @@ -1,28 +0,0 @@ -# Copyright 2017 The TensorFlow Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""GradientReversal op Python library.""" -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import os.path - -import tensorflow as tf - -tf.logging.info(tf.resource_loader.get_data_files_path()) -_grl_ops_module = tf.load_op_library( - os.path.join(tf.resource_loader.get_data_files_path(), - '_grl_ops.so')) -gradient_reversal = _grl_ops_module.gradient_reversal diff --git a/research/domain_adaptation/domain_separation/grl_ops_test.py b/research/domain_adaptation/domain_separation/grl_ops_test.py deleted file mode 100644 index b431a6c02..000000000 --- a/research/domain_adaptation/domain_separation/grl_ops_test.py +++ /dev/null @@ -1,73 +0,0 @@ -# Copyright 2016 The TensorFlow Authors All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -"""Tests for grl_ops.""" - -#from models.domain_adaptation.domain_separation import grl_op_grads # pylint: disable=unused-import -#from models.domain_adaptation.domain_separation import grl_op_shapes # pylint: disable=unused-import -import tensorflow as tf - -import grl_op_grads -import grl_ops - -FLAGS = tf.app.flags.FLAGS - - -class GRLOpsTest(tf.test.TestCase): - - def testGradientReversalOp(self): - with tf.Graph().as_default(): - with self.test_session(): - # Test that in forward prop, gradient reversal op acts as the - # identity operation. - examples = tf.constant([5.0, 4.0, 3.0, 2.0, 1.0]) - output = grl_ops.gradient_reversal(examples) - expected_output = examples - self.assertAllEqual(output.eval(), expected_output.eval()) - - # Test that shape inference works as expected. - self.assertAllEqual(output.get_shape(), expected_output.get_shape()) - - # Test that in backward prop, gradient reversal op multiplies - # gradients by -1. - examples = tf.constant([[1.0]]) - w = tf.get_variable(name='w', shape=[1, 1]) - b = tf.get_variable(name='b', shape=[1]) - init_op = tf.global_variables_initializer() - init_op.run() - features = tf.nn.xw_plus_b(examples, w, b) - # Construct two outputs: features layer passes directly to output1, but - # features layer passes through a gradient reversal layer before - # reaching output2. - output1 = features - output2 = grl_ops.gradient_reversal(features) - gold = tf.constant([1.0]) - loss1 = gold - output1 - loss2 = gold - output2 - opt = tf.train.GradientDescentOptimizer(learning_rate=0.01) - grads_and_vars_1 = opt.compute_gradients(loss1, - tf.trainable_variables()) - grads_and_vars_2 = opt.compute_gradients(loss2, - tf.trainable_variables()) - self.assertAllEqual(len(grads_and_vars_1), len(grads_and_vars_2)) - for i in range(len(grads_and_vars_1)): - g1 = grads_and_vars_1[i][0] - g2 = grads_and_vars_2[i][0] - # Verify that gradients of loss1 are the negative of gradients of - # loss2. - self.assertAllEqual(tf.negative(g1).eval(), g2.eval()) - -if __name__ == '__main__': - tf.test.main() diff --git a/research/domain_adaptation/domain_separation/losses.py b/research/domain_adaptation/domain_separation/losses.py deleted file mode 100644 index 0d882340d..000000000 --- a/research/domain_adaptation/domain_separation/losses.py +++ /dev/null @@ -1,290 +0,0 @@ -# Copyright 2016 The TensorFlow Authors All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""Domain Adaptation Loss Functions. - -The following domain adaptation loss functions are defined: - -- Maximum Mean Discrepancy (MMD). - Relevant paper: - Gretton, Arthur, et al., - "A kernel two-sample test." - The Journal of Machine Learning Research, 2012 - -- Correlation Loss on a batch. -""" -from functools import partial -import tensorflow as tf - -import grl_op_grads # pylint: disable=unused-import -import grl_op_shapes # pylint: disable=unused-import -import grl_ops -import utils -slim = tf.contrib.slim - - -################################################################################ -# SIMILARITY LOSS -################################################################################ -def maximum_mean_discrepancy(x, y, kernel=utils.gaussian_kernel_matrix): - r"""Computes the Maximum Mean Discrepancy (MMD) of two samples: x and y. - - Maximum Mean Discrepancy (MMD) is a distance-measure between the samples of - the distributions of x and y. Here we use the kernel two sample estimate - using the empirical mean of the two distributions. - - MMD^2(P, Q) = || \E{\phi(x)} - \E{\phi(y)} ||^2 - = \E{ K(x, x) } + \E{ K(y, y) } - 2 \E{ K(x, y) }, - - where K = <\phi(x), \phi(y)>, - is the desired kernel function, in this case a radial basis kernel. - - Args: - x: a tensor of shape [num_samples, num_features] - y: a tensor of shape [num_samples, num_features] - kernel: a function which computes the kernel in MMD. Defaults to the - GaussianKernelMatrix. - - Returns: - a scalar denoting the squared maximum mean discrepancy loss. - """ - with tf.name_scope('MaximumMeanDiscrepancy'): - # \E{ K(x, x) } + \E{ K(y, y) } - 2 \E{ K(x, y) } - cost = tf.reduce_mean(kernel(x, x)) - cost += tf.reduce_mean(kernel(y, y)) - cost -= 2 * tf.reduce_mean(kernel(x, y)) - - # We do not allow the loss to become negative. - cost = tf.where(cost > 0, cost, 0, name='value') - return cost - - -def mmd_loss(source_samples, target_samples, weight, scope=None): - """Adds a similarity loss term, the MMD between two representations. - - This Maximum Mean Discrepancy (MMD) loss is calculated with a number of - different Gaussian kernels. - - Args: - source_samples: a tensor of shape [num_samples, num_features]. - target_samples: a tensor of shape [num_samples, num_features]. - weight: the weight of the MMD loss. - scope: optional name scope for summary tags. - - Returns: - a scalar tensor representing the MMD loss value. - """ - sigmas = [ - 1e-6, 1e-5, 1e-4, 1e-3, 1e-2, 1e-1, 1, 5, 10, 15, 20, 25, 30, 35, 100, - 1e3, 1e4, 1e5, 1e6 - ] - gaussian_kernel = partial( - utils.gaussian_kernel_matrix, sigmas=tf.constant(sigmas)) - - loss_value = maximum_mean_discrepancy( - source_samples, target_samples, kernel=gaussian_kernel) - loss_value = tf.maximum(1e-4, loss_value) * weight - assert_op = tf.Assert(tf.is_finite(loss_value), [loss_value]) - with tf.control_dependencies([assert_op]): - tag = 'MMD Loss' - if scope: - tag = scope + tag - tf.summary.scalar(tag, loss_value) - tf.losses.add_loss(loss_value) - - return loss_value - - -def correlation_loss(source_samples, target_samples, weight, scope=None): - """Adds a similarity loss term, the correlation between two representations. - - Args: - source_samples: a tensor of shape [num_samples, num_features] - target_samples: a tensor of shape [num_samples, num_features] - weight: a scalar weight for the loss. - scope: optional name scope for summary tags. - - Returns: - a scalar tensor representing the correlation loss value. - """ - with tf.name_scope('corr_loss'): - source_samples -= tf.reduce_mean(source_samples, 0) - target_samples -= tf.reduce_mean(target_samples, 0) - - source_samples = tf.nn.l2_normalize(source_samples, 1) - target_samples = tf.nn.l2_normalize(target_samples, 1) - - source_cov = tf.matmul(tf.transpose(source_samples), source_samples) - target_cov = tf.matmul(tf.transpose(target_samples), target_samples) - - corr_loss = tf.reduce_mean(tf.square(source_cov - target_cov)) * weight - - assert_op = tf.Assert(tf.is_finite(corr_loss), [corr_loss]) - with tf.control_dependencies([assert_op]): - tag = 'Correlation Loss' - if scope: - tag = scope + tag - tf.summary.scalar(tag, corr_loss) - tf.losses.add_loss(corr_loss) - - return corr_loss - - -def dann_loss(source_samples, target_samples, weight, scope=None): - """Adds the domain adversarial (DANN) loss. - - Args: - source_samples: a tensor of shape [num_samples, num_features]. - target_samples: a tensor of shape [num_samples, num_features]. - weight: the weight of the loss. - scope: optional name scope for summary tags. - - Returns: - a scalar tensor representing the correlation loss value. - """ - with tf.variable_scope('dann'): - batch_size = tf.shape(source_samples)[0] - samples = tf.concat(axis=0, values=[source_samples, target_samples]) - samples = slim.flatten(samples) - - domain_selection_mask = tf.concat( - axis=0, values=[tf.zeros((batch_size, 1)), tf.ones((batch_size, 1))]) - - # Perform the gradient reversal and be careful with the shape. - grl = grl_ops.gradient_reversal(samples) - grl = tf.reshape(grl, (-1, samples.get_shape().as_list()[1])) - - grl = slim.fully_connected(grl, 100, scope='fc1') - logits = slim.fully_connected(grl, 1, activation_fn=None, scope='fc2') - - domain_predictions = tf.sigmoid(logits) - - domain_loss = tf.losses.log_loss( - domain_selection_mask, domain_predictions, weights=weight) - - domain_accuracy = utils.accuracy( - tf.round(domain_predictions), domain_selection_mask) - - assert_op = tf.Assert(tf.is_finite(domain_loss), [domain_loss]) - with tf.control_dependencies([assert_op]): - tag_loss = 'losses/domain_loss' - tag_accuracy = 'losses/domain_accuracy' - if scope: - tag_loss = scope + tag_loss - tag_accuracy = scope + tag_accuracy - - tf.summary.scalar(tag_loss, domain_loss) - tf.summary.scalar(tag_accuracy, domain_accuracy) - - return domain_loss - - -################################################################################ -# DIFFERENCE LOSS -################################################################################ -def difference_loss(private_samples, shared_samples, weight=1.0, name=''): - """Adds the difference loss between the private and shared representations. - - Args: - private_samples: a tensor of shape [num_samples, num_features]. - shared_samples: a tensor of shape [num_samples, num_features]. - weight: the weight of the incoherence loss. - name: the name of the tf summary. - """ - private_samples -= tf.reduce_mean(private_samples, 0) - shared_samples -= tf.reduce_mean(shared_samples, 0) - - private_samples = tf.nn.l2_normalize(private_samples, 1) - shared_samples = tf.nn.l2_normalize(shared_samples, 1) - - correlation_matrix = tf.matmul( - private_samples, shared_samples, transpose_a=True) - - cost = tf.reduce_mean(tf.square(correlation_matrix)) * weight - cost = tf.where(cost > 0, cost, 0, name='value') - - tf.summary.scalar('losses/Difference Loss {}'.format(name), - cost) - assert_op = tf.Assert(tf.is_finite(cost), [cost]) - with tf.control_dependencies([assert_op]): - tf.losses.add_loss(cost) - - -################################################################################ -# TASK LOSS -################################################################################ -def log_quaternion_loss_batch(predictions, labels, params): - """A helper function to compute the error between quaternions. - - Args: - predictions: A Tensor of size [batch_size, 4]. - labels: A Tensor of size [batch_size, 4]. - params: A dictionary of parameters. Expecting 'use_logging', 'batch_size'. - - Returns: - A Tensor of size [batch_size], denoting the error between the quaternions. - """ - use_logging = params['use_logging'] - assertions = [] - if use_logging: - assertions.append( - tf.Assert( - tf.reduce_all( - tf.less( - tf.abs(tf.reduce_sum(tf.square(predictions), [1]) - 1), - 1e-4)), - ['The l2 norm of each prediction quaternion vector should be 1.'])) - assertions.append( - tf.Assert( - tf.reduce_all( - tf.less( - tf.abs(tf.reduce_sum(tf.square(labels), [1]) - 1), 1e-4)), - ['The l2 norm of each label quaternion vector should be 1.'])) - - with tf.control_dependencies(assertions): - product = tf.multiply(predictions, labels) - internal_dot_products = tf.reduce_sum(product, [1]) - - if use_logging: - internal_dot_products = tf.Print( - internal_dot_products, - [internal_dot_products, tf.shape(internal_dot_products)], - 'internal_dot_products:') - - logcost = tf.log(1e-4 + 1 - tf.abs(internal_dot_products)) - return logcost - - -def log_quaternion_loss(predictions, labels, params): - """A helper function to compute the mean error between batches of quaternions. - - The caller is expected to add the loss to the graph. - - Args: - predictions: A Tensor of size [batch_size, 4]. - labels: A Tensor of size [batch_size, 4]. - params: A dictionary of parameters. Expecting 'use_logging', 'batch_size'. - - Returns: - A Tensor of size 1, denoting the mean error between batches of quaternions. - """ - use_logging = params['use_logging'] - logcost = log_quaternion_loss_batch(predictions, labels, params) - logcost = tf.reduce_sum(logcost, [0]) - batch_size = params['batch_size'] - logcost = tf.multiply(logcost, 1.0 / batch_size, name='log_quaternion_loss') - if use_logging: - logcost = tf.Print( - logcost, [logcost], '[logcost]', name='log_quaternion_loss_print') - return logcost diff --git a/research/domain_adaptation/domain_separation/losses_test.py b/research/domain_adaptation/domain_separation/losses_test.py deleted file mode 100644 index 46e50301b..000000000 --- a/research/domain_adaptation/domain_separation/losses_test.py +++ /dev/null @@ -1,110 +0,0 @@ -# Copyright 2016 The TensorFlow Authors All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""Tests for DSN losses.""" -from functools import partial - -import numpy as np -import tensorflow as tf - -import losses -import utils - - -def MaximumMeanDiscrepancySlow(x, y, sigmas): - num_samples = x.get_shape().as_list()[0] - - def AverageGaussianKernel(x, y, sigmas): - result = 0 - for sigma in sigmas: - dist = tf.reduce_sum(tf.square(x - y)) - result += tf.exp((-1.0 / (2.0 * sigma)) * dist) - return result / num_samples**2 - - total = 0 - - for i in range(num_samples): - for j in range(num_samples): - total += AverageGaussianKernel(x[i, :], x[j, :], sigmas) - total += AverageGaussianKernel(y[i, :], y[j, :], sigmas) - total += -2 * AverageGaussianKernel(x[i, :], y[j, :], sigmas) - - return total - - -class LogQuaternionLossTest(tf.test.TestCase): - - def test_log_quaternion_loss_batch(self): - with self.test_session(): - predictions = tf.random_uniform((10, 4), seed=1) - predictions = tf.nn.l2_normalize(predictions, 1) - labels = tf.random_uniform((10, 4), seed=1) - labels = tf.nn.l2_normalize(labels, 1) - params = {'batch_size': 10, 'use_logging': False} - x = losses.log_quaternion_loss_batch(predictions, labels, params) - self.assertTrue(((10,) == tf.shape(x).eval()).all()) - - -class MaximumMeanDiscrepancyTest(tf.test.TestCase): - - def test_mmd_name(self): - with self.test_session(): - x = tf.random_uniform((2, 3), seed=1) - kernel = partial(utils.gaussian_kernel_matrix, sigmas=tf.constant([1.])) - loss = losses.maximum_mean_discrepancy(x, x, kernel) - - self.assertEquals(loss.op.name, 'MaximumMeanDiscrepancy/value') - - def test_mmd_is_zero_when_inputs_are_same(self): - with self.test_session(): - x = tf.random_uniform((2, 3), seed=1) - kernel = partial(utils.gaussian_kernel_matrix, sigmas=tf.constant([1.])) - self.assertEquals(0, losses.maximum_mean_discrepancy(x, x, kernel).eval()) - - def test_fast_mmd_is_similar_to_slow_mmd(self): - with self.test_session(): - x = tf.constant(np.random.normal(size=(2, 3)), tf.float32) - y = tf.constant(np.random.rand(2, 3), tf.float32) - - cost_old = MaximumMeanDiscrepancySlow(x, y, [1.]).eval() - kernel = partial(utils.gaussian_kernel_matrix, sigmas=tf.constant([1.])) - cost_new = losses.maximum_mean_discrepancy(x, y, kernel).eval() - - self.assertAlmostEqual(cost_old, cost_new, delta=1e-5) - - def test_multiple_sigmas(self): - with self.test_session(): - x = tf.constant(np.random.normal(size=(2, 3)), tf.float32) - y = tf.constant(np.random.rand(2, 3), tf.float32) - - sigmas = tf.constant([2., 5., 10, 20, 30]) - kernel = partial(utils.gaussian_kernel_matrix, sigmas=sigmas) - cost_old = MaximumMeanDiscrepancySlow(x, y, [2., 5., 10, 20, 30]).eval() - cost_new = losses.maximum_mean_discrepancy(x, y, kernel=kernel).eval() - - self.assertAlmostEqual(cost_old, cost_new, delta=1e-5) - - def test_mmd_is_zero_when_distributions_are_same(self): - - with self.test_session(): - x = tf.random_uniform((1000, 10), seed=1) - y = tf.random_uniform((1000, 10), seed=3) - - kernel = partial(utils.gaussian_kernel_matrix, sigmas=tf.constant([100.])) - loss = losses.maximum_mean_discrepancy(x, y, kernel=kernel).eval() - - self.assertAlmostEqual(0, loss, delta=1e-4) - -if __name__ == '__main__': - tf.test.main() diff --git a/research/domain_adaptation/domain_separation/models.py b/research/domain_adaptation/domain_separation/models.py deleted file mode 100644 index 04ccaf82e..000000000 --- a/research/domain_adaptation/domain_separation/models.py +++ /dev/null @@ -1,443 +0,0 @@ -# Copyright 2016 The TensorFlow Authors All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""Contains different architectures for the different DSN parts. - -We define here the modules that can be used in the different parts of the DSN -model. -- shared encoder (dsn_cropped_linemod, dann_xxxx) -- private encoder (default_encoder) -- decoder (large_decoder, gtsrb_decoder, small_decoder) -""" -import tensorflow as tf - -#from models.domain_adaptation.domain_separation -import utils - -slim = tf.contrib.slim - - -def default_batch_norm_params(is_training=False): - """Returns default batch normalization parameters for DSNs. - - Args: - is_training: whether or not the model is training. - - Returns: - a dictionary that maps batch norm parameter names (strings) to values. - """ - return { - # Decay for the moving averages. - 'decay': 0.5, - # epsilon to prevent 0s in variance. - 'epsilon': 0.001, - 'is_training': is_training - } - - -################################################################################ -# PRIVATE ENCODERS -################################################################################ -def default_encoder(images, code_size, batch_norm_params=None, - weight_decay=0.0): - """Encodes the given images to codes of the given size. - - Args: - images: a tensor of size [batch_size, height, width, 1]. - code_size: the number of hidden units in the code layer of the classifier. - batch_norm_params: a dictionary that maps batch norm parameter names to - values. - weight_decay: the value for the weight decay coefficient. - - Returns: - end_points: the code of the input. - """ - end_points = {} - with slim.arg_scope( - [slim.conv2d, slim.fully_connected], - weights_regularizer=slim.l2_regularizer(weight_decay), - activation_fn=tf.nn.relu, - normalizer_fn=slim.batch_norm, - normalizer_params=batch_norm_params): - with slim.arg_scope([slim.conv2d], kernel_size=[5, 5], padding='SAME'): - net = slim.conv2d(images, 32, scope='conv1') - net = slim.max_pool2d(net, [2, 2], 2, scope='pool1') - net = slim.conv2d(net, 64, scope='conv2') - net = slim.max_pool2d(net, [2, 2], 2, scope='pool2') - - net = slim.flatten(net) - end_points['flatten'] = net - net = slim.fully_connected(net, code_size, scope='fc1') - end_points['fc3'] = net - return end_points - - -################################################################################ -# DECODERS -################################################################################ -def large_decoder(codes, - height, - width, - channels, - batch_norm_params=None, - weight_decay=0.0): - """Decodes the codes to a fixed output size. - - Args: - codes: a tensor of size [batch_size, code_size]. - height: the height of the output images. - width: the width of the output images. - channels: the number of the output channels. - batch_norm_params: a dictionary that maps batch norm parameter names to - values. - weight_decay: the value for the weight decay coefficient. - - Returns: - recons: the reconstruction tensor of shape [batch_size, height, width, 3]. - """ - with slim.arg_scope( - [slim.conv2d, slim.fully_connected], - weights_regularizer=slim.l2_regularizer(weight_decay), - activation_fn=tf.nn.relu, - normalizer_fn=slim.batch_norm, - normalizer_params=batch_norm_params): - net = slim.fully_connected(codes, 600, scope='fc1') - batch_size = net.get_shape().as_list()[0] - net = tf.reshape(net, [batch_size, 10, 10, 6]) - - net = slim.conv2d(net, 32, [5, 5], scope='conv1_1') - - net = tf.image.resize_nearest_neighbor(net, (16, 16)) - - net = slim.conv2d(net, 32, [5, 5], scope='conv2_1') - - net = tf.image.resize_nearest_neighbor(net, (32, 32)) - - net = slim.conv2d(net, 32, [5, 5], scope='conv3_2') - - output_size = [height, width] - net = tf.image.resize_nearest_neighbor(net, output_size) - - with slim.arg_scope([slim.conv2d], kernel_size=[3, 3]): - net = slim.conv2d(net, channels, activation_fn=None, scope='conv4_1') - - return net - - -def gtsrb_decoder(codes, - height, - width, - channels, - batch_norm_params=None, - weight_decay=0.0): - """Decodes the codes to a fixed output size. This decoder is specific to GTSRB - - Args: - codes: a tensor of size [batch_size, 100]. - height: the height of the output images. - width: the width of the output images. - channels: the number of the output channels. - batch_norm_params: a dictionary that maps batch norm parameter names to - values. - weight_decay: the value for the weight decay coefficient. - - Returns: - recons: the reconstruction tensor of shape [batch_size, height, width, 3]. - - Raises: - ValueError: When the input code size is not 100. - """ - batch_size, code_size = codes.get_shape().as_list() - if code_size != 100: - raise ValueError('The code size used as an input to the GTSRB decoder is ' - 'expected to be 100.') - - with slim.arg_scope( - [slim.conv2d, slim.fully_connected], - weights_regularizer=slim.l2_regularizer(weight_decay), - activation_fn=tf.nn.relu, - normalizer_fn=slim.batch_norm, - normalizer_params=batch_norm_params): - net = codes - net = tf.reshape(net, [batch_size, 10, 10, 1]) - net = slim.conv2d(net, 32, [3, 3], scope='conv1_1') - - # First upsampling 20x20 - net = tf.image.resize_nearest_neighbor(net, [20, 20]) - - net = slim.conv2d(net, 32, [3, 3], scope='conv2_1') - - output_size = [height, width] - # Final upsampling 40 x 40 - net = tf.image.resize_nearest_neighbor(net, output_size) - - with slim.arg_scope([slim.conv2d], kernel_size=[3, 3]): - net = slim.conv2d(net, 16, scope='conv3_1') - net = slim.conv2d(net, channels, activation_fn=None, scope='conv3_2') - - return net - - -def small_decoder(codes, - height, - width, - channels, - batch_norm_params=None, - weight_decay=0.0): - """Decodes the codes to a fixed output size. - - Args: - codes: a tensor of size [batch_size, code_size]. - height: the height of the output images. - width: the width of the output images. - channels: the number of the output channels. - batch_norm_params: a dictionary that maps batch norm parameter names to - values. - weight_decay: the value for the weight decay coefficient. - - Returns: - recons: the reconstruction tensor of shape [batch_size, height, width, 3]. - """ - with slim.arg_scope( - [slim.conv2d, slim.fully_connected], - weights_regularizer=slim.l2_regularizer(weight_decay), - activation_fn=tf.nn.relu, - normalizer_fn=slim.batch_norm, - normalizer_params=batch_norm_params): - net = slim.fully_connected(codes, 300, scope='fc1') - batch_size = net.get_shape().as_list()[0] - net = tf.reshape(net, [batch_size, 10, 10, 3]) - - net = slim.conv2d(net, 16, [3, 3], scope='conv1_1') - net = slim.conv2d(net, 16, [3, 3], scope='conv1_2') - - output_size = [height, width] - net = tf.image.resize_nearest_neighbor(net, output_size) - - with slim.arg_scope([slim.conv2d], kernel_size=[3, 3]): - net = slim.conv2d(net, 16, scope='conv2_1') - net = slim.conv2d(net, channels, activation_fn=None, scope='conv2_2') - - return net - - -################################################################################ -# SHARED ENCODERS -################################################################################ -def dann_mnist(images, - weight_decay=0.0, - prefix='model', - num_classes=10, - **kwargs): - """Creates a convolution MNIST model. - - Note that this model implements the architecture for MNIST proposed in: - Y. Ganin et al., Domain-Adversarial Training of Neural Networks (DANN), - JMLR 2015 - - Args: - images: the MNIST digits, a tensor of size [batch_size, 28, 28, 1]. - weight_decay: the value for the weight decay coefficient. - prefix: name of the model to use when prefixing tags. - num_classes: the number of output classes to use. - **kwargs: Placeholder for keyword arguments used by other shared encoders. - - Returns: - the output logits, a tensor of size [batch_size, num_classes]. - a dictionary with key/values the layer names and tensors. - """ - end_points = {} - - with slim.arg_scope( - [slim.conv2d, slim.fully_connected], - weights_regularizer=slim.l2_regularizer(weight_decay), - activation_fn=tf.nn.relu,): - with slim.arg_scope([slim.conv2d], padding='SAME'): - end_points['conv1'] = slim.conv2d(images, 32, [5, 5], scope='conv1') - end_points['pool1'] = slim.max_pool2d( - end_points['conv1'], [2, 2], 2, scope='pool1') - end_points['conv2'] = slim.conv2d( - end_points['pool1'], 48, [5, 5], scope='conv2') - end_points['pool2'] = slim.max_pool2d( - end_points['conv2'], [2, 2], 2, scope='pool2') - end_points['fc3'] = slim.fully_connected( - slim.flatten(end_points['pool2']), 100, scope='fc3') - end_points['fc4'] = slim.fully_connected( - slim.flatten(end_points['fc3']), 100, scope='fc4') - - logits = slim.fully_connected( - end_points['fc4'], num_classes, activation_fn=None, scope='fc5') - - return logits, end_points - - -def dann_svhn(images, - weight_decay=0.0, - prefix='model', - num_classes=10, - **kwargs): - """Creates the convolutional SVHN model. - - Note that this model implements the architecture for MNIST proposed in: - Y. Ganin et al., Domain-Adversarial Training of Neural Networks (DANN), - JMLR 2015 - - Args: - images: the SVHN digits, a tensor of size [batch_size, 32, 32, 3]. - weight_decay: the value for the weight decay coefficient. - prefix: name of the model to use when prefixing tags. - num_classes: the number of output classes to use. - **kwargs: Placeholder for keyword arguments used by other shared encoders. - - Returns: - the output logits, a tensor of size [batch_size, num_classes]. - a dictionary with key/values the layer names and tensors. - """ - - end_points = {} - - with slim.arg_scope( - [slim.conv2d, slim.fully_connected], - weights_regularizer=slim.l2_regularizer(weight_decay), - activation_fn=tf.nn.relu,): - with slim.arg_scope([slim.conv2d], padding='SAME'): - - end_points['conv1'] = slim.conv2d(images, 64, [5, 5], scope='conv1') - end_points['pool1'] = slim.max_pool2d( - end_points['conv1'], [3, 3], 2, scope='pool1') - end_points['conv2'] = slim.conv2d( - end_points['pool1'], 64, [5, 5], scope='conv2') - end_points['pool2'] = slim.max_pool2d( - end_points['conv2'], [3, 3], 2, scope='pool2') - end_points['conv3'] = slim.conv2d( - end_points['pool2'], 128, [5, 5], scope='conv3') - - end_points['fc3'] = slim.fully_connected( - slim.flatten(end_points['conv3']), 3072, scope='fc3') - end_points['fc4'] = slim.fully_connected( - slim.flatten(end_points['fc3']), 2048, scope='fc4') - - logits = slim.fully_connected( - end_points['fc4'], num_classes, activation_fn=None, scope='fc5') - - return logits, end_points - - -def dann_gtsrb(images, - weight_decay=0.0, - prefix='model', - num_classes=43, - **kwargs): - """Creates the convolutional GTSRB model. - - Note that this model implements the architecture for MNIST proposed in: - Y. Ganin et al., Domain-Adversarial Training of Neural Networks (DANN), - JMLR 2015 - - Args: - images: the GTSRB images, a tensor of size [batch_size, 40, 40, 3]. - weight_decay: the value for the weight decay coefficient. - prefix: name of the model to use when prefixing tags. - num_classes: the number of output classes to use. - **kwargs: Placeholder for keyword arguments used by other shared encoders. - - Returns: - the output logits, a tensor of size [batch_size, num_classes]. - a dictionary with key/values the layer names and tensors. - """ - - end_points = {} - - with slim.arg_scope( - [slim.conv2d, slim.fully_connected], - weights_regularizer=slim.l2_regularizer(weight_decay), - activation_fn=tf.nn.relu,): - with slim.arg_scope([slim.conv2d], padding='SAME'): - - end_points['conv1'] = slim.conv2d(images, 96, [5, 5], scope='conv1') - end_points['pool1'] = slim.max_pool2d( - end_points['conv1'], [2, 2], 2, scope='pool1') - end_points['conv2'] = slim.conv2d( - end_points['pool1'], 144, [3, 3], scope='conv2') - end_points['pool2'] = slim.max_pool2d( - end_points['conv2'], [2, 2], 2, scope='pool2') - end_points['conv3'] = slim.conv2d( - end_points['pool2'], 256, [5, 5], scope='conv3') - end_points['pool3'] = slim.max_pool2d( - end_points['conv3'], [2, 2], 2, scope='pool3') - - end_points['fc3'] = slim.fully_connected( - slim.flatten(end_points['pool3']), 512, scope='fc3') - - logits = slim.fully_connected( - end_points['fc3'], num_classes, activation_fn=None, scope='fc4') - - return logits, end_points - - -def dsn_cropped_linemod(images, - weight_decay=0.0, - prefix='model', - num_classes=11, - batch_norm_params=None, - is_training=False): - """Creates the convolutional pose estimation model for Cropped Linemod. - - Args: - images: the Cropped Linemod samples, a tensor of size - [batch_size, 64, 64, 4]. - weight_decay: the value for the weight decay coefficient. - prefix: name of the model to use when prefixing tags. - num_classes: the number of output classes to use. - batch_norm_params: a dictionary that maps batch norm parameter names to - values. - is_training: specifies whether or not we're currently training the model. - This variable will determine the behaviour of the dropout layer. - - Returns: - the output logits, a tensor of size [batch_size, num_classes]. - a dictionary with key/values the layer names and tensors. - """ - - end_points = {} - - tf.summary.image('{}/input_images'.format(prefix), images) - with slim.arg_scope( - [slim.conv2d, slim.fully_connected], - weights_regularizer=slim.l2_regularizer(weight_decay), - activation_fn=tf.nn.relu, - normalizer_fn=slim.batch_norm if batch_norm_params else None, - normalizer_params=batch_norm_params): - with slim.arg_scope([slim.conv2d], padding='SAME'): - end_points['conv1'] = slim.conv2d(images, 32, [5, 5], scope='conv1') - end_points['pool1'] = slim.max_pool2d( - end_points['conv1'], [2, 2], 2, scope='pool1') - end_points['conv2'] = slim.conv2d( - end_points['pool1'], 64, [5, 5], scope='conv2') - end_points['pool2'] = slim.max_pool2d( - end_points['conv2'], [2, 2], 2, scope='pool2') - net = slim.flatten(end_points['pool2']) - end_points['fc3'] = slim.fully_connected(net, 128, scope='fc3') - net = slim.dropout( - end_points['fc3'], 0.5, is_training=is_training, scope='dropout') - - with tf.variable_scope('quaternion_prediction'): - predicted_quaternion = slim.fully_connected( - net, 4, activation_fn=tf.nn.tanh) - predicted_quaternion = tf.nn.l2_normalize(predicted_quaternion, 1) - logits = slim.fully_connected( - net, num_classes, activation_fn=None, scope='fc4') - end_points['quaternion_pred'] = predicted_quaternion - - return logits, end_points diff --git a/research/domain_adaptation/domain_separation/models_test.py b/research/domain_adaptation/domain_separation/models_test.py deleted file mode 100644 index 69d1a2725..000000000 --- a/research/domain_adaptation/domain_separation/models_test.py +++ /dev/null @@ -1,167 +0,0 @@ -# Copyright 2016 The TensorFlow Authors All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""Tests for DSN components.""" - -import numpy as np -import tensorflow as tf - -#from models.domain_adaptation.domain_separation -import models - - -class SharedEncodersTest(tf.test.TestCase): - - def _testSharedEncoder(self, - input_shape=[5, 28, 28, 1], - model=models.dann_mnist, - is_training=True): - images = tf.to_float(np.random.rand(*input_shape)) - - with self.test_session() as sess: - logits, _ = model(images) - sess.run(tf.global_variables_initializer()) - logits_np = sess.run(logits) - return logits_np - - def testBuildGRLMnistModel(self): - logits = self._testSharedEncoder(model=getattr(models, - 'dann_mnist')) - self.assertEqual(logits.shape, (5, 10)) - self.assertTrue(np.any(logits)) - - def testBuildGRLSvhnModel(self): - logits = self._testSharedEncoder(model=getattr(models, - 'dann_svhn')) - self.assertEqual(logits.shape, (5, 10)) - self.assertTrue(np.any(logits)) - - def testBuildGRLGtsrbModel(self): - logits = self._testSharedEncoder([5, 40, 40, 3], - getattr(models, 'dann_gtsrb')) - self.assertEqual(logits.shape, (5, 43)) - self.assertTrue(np.any(logits)) - - def testBuildPoseModel(self): - logits = self._testSharedEncoder([5, 64, 64, 4], - getattr(models, 'dsn_cropped_linemod')) - self.assertEqual(logits.shape, (5, 11)) - self.assertTrue(np.any(logits)) - - def testBuildPoseModelWithBatchNorm(self): - images = tf.to_float(np.random.rand(10, 64, 64, 4)) - - with self.test_session() as sess: - logits, _ = getattr(models, 'dsn_cropped_linemod')( - images, batch_norm_params=models.default_batch_norm_params(True)) - sess.run(tf.global_variables_initializer()) - logits_np = sess.run(logits) - self.assertEqual(logits_np.shape, (10, 11)) - self.assertTrue(np.any(logits_np)) - - -class EncoderTest(tf.test.TestCase): - - def _testEncoder(self, batch_norm_params=None, channels=1): - images = tf.to_float(np.random.rand(10, 28, 28, channels)) - - with self.test_session() as sess: - end_points = models.default_encoder( - images, 128, batch_norm_params=batch_norm_params) - sess.run(tf.global_variables_initializer()) - private_code = sess.run(end_points['fc3']) - self.assertEqual(private_code.shape, (10, 128)) - self.assertTrue(np.any(private_code)) - self.assertTrue(np.all(np.isfinite(private_code))) - - def testEncoder(self): - self._testEncoder() - - def testEncoderMultiChannel(self): - self._testEncoder(None, 4) - - def testEncoderIsTrainingBatchNorm(self): - self._testEncoder(models.default_batch_norm_params(True)) - - def testEncoderBatchNorm(self): - self._testEncoder(models.default_batch_norm_params(False)) - - -class DecoderTest(tf.test.TestCase): - - def _testDecoder(self, - height=64, - width=64, - channels=4, - batch_norm_params=None, - decoder=models.small_decoder): - codes = tf.to_float(np.random.rand(32, 100)) - - with self.test_session() as sess: - output = decoder( - codes, - height=height, - width=width, - channels=channels, - batch_norm_params=batch_norm_params) - sess.run(tf.global_variables_initializer()) - output_np = sess.run(output) - self.assertEqual(output_np.shape, (32, height, width, channels)) - self.assertTrue(np.any(output_np)) - self.assertTrue(np.all(np.isfinite(output_np))) - - def testSmallDecoder(self): - self._testDecoder(28, 28, 4, None, getattr(models, 'small_decoder')) - - def testSmallDecoderThreeChannels(self): - self._testDecoder(28, 28, 3) - - def testSmallDecoderBatchNorm(self): - self._testDecoder(28, 28, 4, models.default_batch_norm_params(False)) - - def testSmallDecoderIsTrainingBatchNorm(self): - self._testDecoder(28, 28, 4, models.default_batch_norm_params(True)) - - def testLargeDecoder(self): - self._testDecoder(32, 32, 4, None, getattr(models, 'large_decoder')) - - def testLargeDecoderThreeChannels(self): - self._testDecoder(32, 32, 3, None, getattr(models, 'large_decoder')) - - def testLargeDecoderBatchNorm(self): - self._testDecoder(32, 32, 4, - models.default_batch_norm_params(False), - getattr(models, 'large_decoder')) - - def testLargeDecoderIsTrainingBatchNorm(self): - self._testDecoder(32, 32, 4, - models.default_batch_norm_params(True), - getattr(models, 'large_decoder')) - - def testGtsrbDecoder(self): - self._testDecoder(40, 40, 3, None, getattr(models, 'large_decoder')) - - def testGtsrbDecoderBatchNorm(self): - self._testDecoder(40, 40, 4, - models.default_batch_norm_params(False), - getattr(models, 'gtsrb_decoder')) - - def testGtsrbDecoderIsTrainingBatchNorm(self): - self._testDecoder(40, 40, 4, - models.default_batch_norm_params(True), - getattr(models, 'gtsrb_decoder')) - - -if __name__ == '__main__': - tf.test.main() diff --git a/research/domain_adaptation/domain_separation/utils.py b/research/domain_adaptation/domain_separation/utils.py deleted file mode 100644 index e144ee861..000000000 --- a/research/domain_adaptation/domain_separation/utils.py +++ /dev/null @@ -1,183 +0,0 @@ -# Copyright 2016 The TensorFlow Authors All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""Auxiliary functions for domain adaptation related losses. -""" -import math -import tensorflow as tf - - -def create_summaries(end_points, prefix='', max_images=3, use_op_name=False): - """Creates a tf summary per endpoint. - - If the endpoint is a 4 dimensional tensor it displays it as an image - otherwise if it is a two dimensional one it creates a histogram summary. - - Args: - end_points: a dictionary of name, tf tensor pairs. - prefix: an optional string to prefix the summary with. - max_images: the maximum number of images to display per summary. - use_op_name: Use the op name as opposed to the shorter end_points key. - """ - for layer_name in end_points: - if use_op_name: - name = end_points[layer_name].op.name - else: - name = layer_name - if len(end_points[layer_name].get_shape().as_list()) == 4: - # if it's an actual image do not attempt to reshape it - if end_points[layer_name].get_shape().as_list()[-1] == 1 or end_points[ - layer_name].get_shape().as_list()[-1] == 3: - visualization_image = end_points[layer_name] - else: - visualization_image = reshape_feature_maps(end_points[layer_name]) - tf.summary.image( - '{}/{}'.format(prefix, name), - visualization_image, - max_outputs=max_images) - elif len(end_points[layer_name].get_shape().as_list()) == 3: - images = tf.expand_dims(end_points[layer_name], 3) - tf.summary.image( - '{}/{}'.format(prefix, name), - images, - max_outputs=max_images) - elif len(end_points[layer_name].get_shape().as_list()) == 2: - tf.summary.histogram('{}/{}'.format(prefix, name), end_points[layer_name]) - - -def reshape_feature_maps(features_tensor): - """Reshape activations for tf.summary.image visualization. - - Arguments: - features_tensor: a tensor of activations with a square number of feature - maps, eg 4, 9, 16, etc. - Returns: - A composite image with all the feature maps that can be passed as an - argument to tf.summary.image. - """ - assert len(features_tensor.get_shape().as_list()) == 4 - num_filters = features_tensor.get_shape().as_list()[-1] - assert num_filters > 0 - num_filters_sqrt = math.sqrt(num_filters) - assert num_filters_sqrt.is_integer( - ), 'Number of filters should be a square number but got {}'.format( - num_filters) - num_filters_sqrt = int(num_filters_sqrt) - conv_summary = tf.unstack(features_tensor, axis=3) - conv_one_row = tf.concat(axis=2, values=conv_summary[0:num_filters_sqrt]) - ind = 1 - conv_final = conv_one_row - for ind in range(1, num_filters_sqrt): - conv_one_row = tf.concat(axis=2, - values=conv_summary[ - ind * num_filters_sqrt + 0:ind * num_filters_sqrt + num_filters_sqrt]) - conv_final = tf.concat( - axis=1, values=[tf.squeeze(conv_final), tf.squeeze(conv_one_row)]) - conv_final = tf.expand_dims(conv_final, -1) - return conv_final - - -def accuracy(predictions, labels): - """Calculates the classificaton accuracy. - - Args: - predictions: the predicted values, a tensor whose size matches 'labels'. - labels: the ground truth values, a tensor of any size. - - Returns: - a tensor whose value on evaluation returns the total accuracy. - """ - return tf.reduce_mean(tf.cast(tf.equal(predictions, labels), tf.float32)) - - -def compute_upsample_values(input_tensor, upsample_height, upsample_width): - """Compute values for an upsampling op (ops.BatchCropAndResize). - - Args: - input_tensor: image tensor with shape [batch, height, width, in_channels] - upsample_height: integer - upsample_width: integer - - Returns: - grid_centers: tensor with shape [batch, 1] - crop_sizes: tensor with shape [batch, 1] - output_height: integer - output_width: integer - """ - batch, input_height, input_width, _ = input_tensor.shape - - height_half = input_height / 2. - width_half = input_width / 2. - grid_centers = tf.constant(batch * [[height_half, width_half]]) - crop_sizes = tf.constant(batch * [[input_height, input_width]]) - output_height = input_height * upsample_height - output_width = input_width * upsample_width - - return grid_centers, tf.to_float(crop_sizes), output_height, output_width - - -def compute_pairwise_distances(x, y): - """Computes the squared pairwise Euclidean distances between x and y. - - Args: - x: a tensor of shape [num_x_samples, num_features] - y: a tensor of shape [num_y_samples, num_features] - - Returns: - a distance matrix of dimensions [num_x_samples, num_y_samples]. - - Raises: - ValueError: if the inputs do no matched the specified dimensions. - """ - - if not len(x.get_shape()) == len(y.get_shape()) == 2: - raise ValueError('Both inputs should be matrices.') - - if x.get_shape().as_list()[1] != y.get_shape().as_list()[1]: - raise ValueError('The number of features should be the same.') - - norm = lambda x: tf.reduce_sum(tf.square(x), 1) - - # By making the `inner' dimensions of the two matrices equal to 1 using - # broadcasting then we are essentially substracting every pair of rows - # of x and y. - # x will be num_samples x num_features x 1, - # and y will be 1 x num_features x num_samples (after broadcasting). - # After the substraction we will get a - # num_x_samples x num_features x num_y_samples matrix. - # The resulting dist will be of shape num_y_samples x num_x_samples. - # and thus we need to transpose it again. - return tf.transpose(norm(tf.expand_dims(x, 2) - tf.transpose(y))) - - -def gaussian_kernel_matrix(x, y, sigmas): - r"""Computes a Guassian Radial Basis Kernel between the samples of x and y. - - We create a sum of multiple gaussian kernels each having a width sigma_i. - - Args: - x: a tensor of shape [num_samples, num_features] - y: a tensor of shape [num_samples, num_features] - sigmas: a tensor of floats which denote the widths of each of the - gaussians in the kernel. - Returns: - A tensor of shape [num_samples{x}, num_samples{y}] with the RBF kernel. - """ - beta = 1. / (2. * (tf.expand_dims(sigmas, 1))) - - dist = compute_pairwise_distances(x, y) - - s = tf.matmul(beta, tf.reshape(dist, (1, -1))) - - return tf.reshape(tf.reduce_sum(tf.exp(-s), 0), tf.shape(dist)) diff --git a/research/domain_adaptation/pixel_domain_adaptation/BUILD b/research/domain_adaptation/pixel_domain_adaptation/BUILD deleted file mode 100644 index 2bc8d4a49..000000000 --- a/research/domain_adaptation/pixel_domain_adaptation/BUILD +++ /dev/null @@ -1,90 +0,0 @@ -# Description: -# Contains code for domain-adaptation style transfer. - -package( - default_visibility = [ - ":internal", - ], -) - -licenses(["notice"]) # Apache 2.0 - -exports_files(["LICENSE"]) - -package_group( - name = "internal", - packages = [ - "//domain_adaptation/...", - ], -) - -py_library( - name = "pixelda_preprocess", - srcs = ["pixelda_preprocess.py"], - deps = [ - - ], -) - -py_test( - name = "pixelda_preprocess_test", - srcs = ["pixelda_preprocess_test.py"], - deps = [ - ":pixelda_preprocess", - - ], -) - -py_library( - name = "pixelda_model", - srcs = [ - "pixelda_model.py", - "pixelda_task_towers.py", - "hparams.py", - ], - deps = [ - - ], -) - -py_library( - name = "pixelda_utils", - srcs = ["pixelda_utils.py"], - deps = [ - - ], -) - -py_library( - name = "pixelda_losses", - srcs = ["pixelda_losses.py"], - deps = [ - - ], -) - -py_binary( - name = "pixelda_train", - srcs = ["pixelda_train.py"], - deps = [ - ":pixelda_losses", - ":pixelda_model", - ":pixelda_preprocess", - ":pixelda_utils", - - "//domain_adaptation/datasets:dataset_factory", - ], -) - -py_binary( - name = "pixelda_eval", - srcs = ["pixelda_eval.py"], - deps = [ - ":pixelda_losses", - ":pixelda_model", - ":pixelda_preprocess", - ":pixelda_utils", - - "//domain_adaptation/datasets:dataset_factory", - ], -) diff --git a/research/domain_adaptation/pixel_domain_adaptation/README.md b/research/domain_adaptation/pixel_domain_adaptation/README.md deleted file mode 100644 index e69de29bb..000000000 diff --git a/research/domain_adaptation/pixel_domain_adaptation/baselines/BUILD b/research/domain_adaptation/pixel_domain_adaptation/baselines/BUILD deleted file mode 100644 index c41a4ffee..000000000 --- a/research/domain_adaptation/pixel_domain_adaptation/baselines/BUILD +++ /dev/null @@ -1,23 +0,0 @@ -licenses(["notice"]) # Apache 2.0 - -py_binary( - name = "baseline_train", - srcs = ["baseline_train.py"], - deps = [ - - "//domain_adaptation/datasets:dataset_factory", - "//domain_adaptation/pixel_domain_adaptation:pixelda_model", - "//domain_adaptation/pixel_domain_adaptation:pixelda_preprocess", - ], -) - -py_binary( - name = "baseline_eval", - srcs = ["baseline_eval.py"], - deps = [ - - "//domain_adaptation/datasets:dataset_factory", - "//domain_adaptation/pixel_domain_adaptation:pixelda_model", - "//domain_adaptation/pixel_domain_adaptation:pixelda_preprocess", - ], -) diff --git a/research/domain_adaptation/pixel_domain_adaptation/baselines/README.md b/research/domain_adaptation/pixel_domain_adaptation/baselines/README.md deleted file mode 100644 index d61195ad2..000000000 --- a/research/domain_adaptation/pixel_domain_adaptation/baselines/README.md +++ /dev/null @@ -1,60 +0,0 @@ -The best baselines are obtainable via the following configuration: - - -## MNIST => MNIST_M - -Accuracy: -MNIST-Train: 99.9 -MNIST_M-Train: 63.9 -MNIST_M-Valid: 63.9 -MNIST_M-Test: 63.6 - -Learning Rate = 0.0001 -Weight Decay = 0.0 -Number of Steps: 105,000 - -## MNIST => USPS - -Accuracy: -MNIST-Train: 100.0 -USPS-Train: 82.8 -USPS-Valid: 82.8 -USPS-Test: 78.9 - -Learning Rate = 0.0001 -Weight Decay = 0.0 -Number of Steps: 22,000 - -## MNIST_M => MNIST - -Accuracy: -MNIST_M-Train: 100 -MNIST-Train: 98.5 -MNIST-Valid: 98.5 -MNIST-Test: 98.1 - -Learning Rate = 0.001 -Weight Decay = 0.0 -Number of Steps: 604,400 - -## MNIST_M => MNIST_M - -Accuracy: -MNIST_M-Train: 100.0 -MNIST_M-Valid: 96.6 -MNIST_M-Test: 96.4 - -Learning Rate = 0.001 -Weight Decay = 0.0 -Number of Steps: 139,400 - -## USPS => USPS - -Accuracy: -USPS-Train: 100.0 -USPS-Valid: 100.0 -USPS-Test: 96.5 - -Learning Rate = 0.001 -Weight Decay = 0.0 -Number of Steps: 67,000 diff --git a/research/domain_adaptation/pixel_domain_adaptation/baselines/baseline_eval.py b/research/domain_adaptation/pixel_domain_adaptation/baselines/baseline_eval.py deleted file mode 100644 index 6b7ef6452..000000000 --- a/research/domain_adaptation/pixel_domain_adaptation/baselines/baseline_eval.py +++ /dev/null @@ -1,141 +0,0 @@ -# Copyright 2017 Google Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -r"""Evals the classification/pose baselines.""" -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -from functools import partial - -import math - -# Dependency imports - -import tensorflow as tf - -from domain_adaptation.datasets import dataset_factory -from domain_adaptation.pixel_domain_adaptation import pixelda_preprocess -from domain_adaptation.pixel_domain_adaptation import pixelda_task_towers - -flags = tf.app.flags -FLAGS = flags.FLAGS - -slim = tf.contrib.slim - -flags.DEFINE_string('master', '', 'BNS name of the tensorflow server') - -flags.DEFINE_string( - 'checkpoint_dir', None, 'The location of the checkpoint files.') - -flags.DEFINE_string( - 'eval_dir', None, 'The directory where evaluation logs are written.') - -flags.DEFINE_integer('batch_size', 32, 'The number of samples per batch.') - -flags.DEFINE_string('dataset_name', None, 'The name of the dataset.') - -flags.DEFINE_string('dataset_dir', None, - 'The directory where the data is stored.') - -flags.DEFINE_string('split_name', None, 'The name of the train/test split.') - -flags.DEFINE_integer('eval_interval_secs', 60 * 5, - 'How often (in seconds) to run evaluation.') - -flags.DEFINE_integer( - 'num_readers', 4, - 'The number of parallel readers that read data from the dataset.') - -def main(unused_argv): - tf.logging.set_verbosity(tf.logging.INFO) - hparams = tf.contrib.training.HParams() - hparams.weight_decay_task_classifier = 0.0 - - if FLAGS.dataset_name in ['mnist', 'mnist_m', 'usps']: - hparams.task_tower = 'mnist' - else: - raise ValueError('Unknown dataset %s' % FLAGS.dataset_name) - - if not tf.gfile.Exists(FLAGS.eval_dir): - tf.gfile.MakeDirs(FLAGS.eval_dir) - - with tf.Graph().as_default(): - dataset = dataset_factory.get_dataset(FLAGS.dataset_name, FLAGS.split_name, - FLAGS.dataset_dir) - num_classes = dataset.num_classes - num_samples = dataset.num_samples - - preprocess_fn = partial(pixelda_preprocess.preprocess_classification, - is_training=False) - - images, labels = dataset_factory.provide_batch( - FLAGS.dataset_name, - FLAGS.split_name, - dataset_dir=FLAGS.dataset_dir, - num_readers=FLAGS.num_readers, - batch_size=FLAGS.batch_size, - num_preprocessing_threads=FLAGS.num_readers) - - # Define the model - logits, _ = pixelda_task_towers.add_task_specific_model( - images, hparams, num_classes=num_classes, is_training=True) - - ##################### - # Define the losses # - ##################### - if 'classes' in labels: - one_hot_labels = labels['classes'] - loss = tf.losses.softmax_cross_entropy( - onehot_labels=one_hot_labels, logits=logits) - tf.summary.scalar('losses/Classification_Loss', loss) - else: - raise ValueError('Only support classification for now.') - - total_loss = tf.losses.get_total_loss() - - predictions = tf.reshape(tf.argmax(logits, 1), shape=[-1]) - class_labels = tf.argmax(labels['classes'], 1) - - metrics_to_values, metrics_to_updates = slim.metrics.aggregate_metric_map({ - 'Mean_Loss': - tf.contrib.metrics.streaming_mean(total_loss), - 'Accuracy': - tf.contrib.metrics.streaming_accuracy(predictions, - tf.reshape( - class_labels, - shape=[-1])), - 'Recall_at_5': - tf.contrib.metrics.streaming_recall_at_k(logits, class_labels, 5), - }) - - tf.summary.histogram('outputs/Predictions', predictions) - tf.summary.histogram('outputs/Ground_Truth', class_labels) - - for name, value in metrics_to_values.iteritems(): - tf.summary.scalar(name, value) - - num_batches = int(math.ceil(num_samples / float(FLAGS.batch_size))) - - slim.evaluation.evaluation_loop( - master=FLAGS.master, - checkpoint_dir=FLAGS.checkpoint_dir, - logdir=FLAGS.eval_dir, - num_evals=num_batches, - eval_op=metrics_to_updates.values(), - eval_interval_secs=FLAGS.eval_interval_secs) - - -if __name__ == '__main__': - tf.app.run() diff --git a/research/domain_adaptation/pixel_domain_adaptation/baselines/baseline_train.py b/research/domain_adaptation/pixel_domain_adaptation/baselines/baseline_train.py deleted file mode 100644 index 8c92bd81a..000000000 --- a/research/domain_adaptation/pixel_domain_adaptation/baselines/baseline_train.py +++ /dev/null @@ -1,161 +0,0 @@ -# Copyright 2017 Google Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -r"""Trains the classification/pose baselines.""" - -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -from functools import partial - -# Dependency imports - -import tensorflow as tf - -from domain_adaptation.datasets import dataset_factory -from domain_adaptation.pixel_domain_adaptation import pixelda_preprocess -from domain_adaptation.pixel_domain_adaptation import pixelda_task_towers - -flags = tf.app.flags -FLAGS = flags.FLAGS - -slim = tf.contrib.slim - -flags.DEFINE_string('master', '', 'BNS name of the tensorflow server') - -flags.DEFINE_integer('task', 0, 'The task ID.') - -flags.DEFINE_integer('num_ps_tasks', 0, - 'The number of parameter servers. If the value is 0, then ' - 'the parameters are handled locally by the worker.') - -flags.DEFINE_integer('batch_size', 32, 'The number of samples per batch.') - -flags.DEFINE_string('dataset_name', None, 'The name of the dataset.') - -flags.DEFINE_string('dataset_dir', None, - 'The directory where the data is stored.') - -flags.DEFINE_string('split_name', None, 'The name of the train/test split.') - -flags.DEFINE_float('learning_rate', 0.001, 'The initial learning rate.') - -flags.DEFINE_integer( - 'learning_rate_decay_steps', 20000, - 'The frequency, in steps, at which the learning rate is decayed.') - -flags.DEFINE_float('learning_rate_decay_factor', - 0.95, - 'The factor with which the learning rate is decayed.') - -flags.DEFINE_float('adam_beta1', 0.5, 'The beta1 value for the AdamOptimizer') - -flags.DEFINE_float('weight_decay', 1e-5, - 'The L2 coefficient on the model weights.') - -flags.DEFINE_string( - 'logdir', None, 'The location of the logs and checkpoints.') - -flags.DEFINE_integer('save_interval_secs', 600, - 'How often, in seconds, we save the model to disk.') - -flags.DEFINE_integer('save_summaries_secs', 600, - 'How often, in seconds, we compute the summaries.') - -flags.DEFINE_integer( - 'num_readers', 4, - 'The number of parallel readers that read data from the dataset.') - -flags.DEFINE_float( - 'moving_average_decay', 0.9999, - 'The amount of decay to use for moving averages.') - - -def main(unused_argv): - tf.logging.set_verbosity(tf.logging.INFO) - hparams = tf.contrib.training.HParams() - hparams.weight_decay_task_classifier = FLAGS.weight_decay - - if FLAGS.dataset_name in ['mnist', 'mnist_m', 'usps']: - hparams.task_tower = 'mnist' - else: - raise ValueError('Unknown dataset %s' % FLAGS.dataset_name) - - with tf.Graph().as_default(): - with tf.device( - tf.train.replica_device_setter(FLAGS.num_ps_tasks, merge_devices=True)): - dataset = dataset_factory.get_dataset(FLAGS.dataset_name, - FLAGS.split_name, FLAGS.dataset_dir) - num_classes = dataset.num_classes - - preprocess_fn = partial(pixelda_preprocess.preprocess_classification, - is_training=True) - - images, labels = dataset_factory.provide_batch( - FLAGS.dataset_name, - FLAGS.split_name, - dataset_dir=FLAGS.dataset_dir, - num_readers=FLAGS.num_readers, - batch_size=FLAGS.batch_size, - num_preprocessing_threads=FLAGS.num_readers) - # preprocess_fn=preprocess_fn) - - # Define the model - logits, _ = pixelda_task_towers.add_task_specific_model( - images, hparams, num_classes=num_classes, is_training=True) - - # Define the losses - if 'classes' in labels: - one_hot_labels = labels['classes'] - loss = tf.losses.softmax_cross_entropy( - onehot_labels=one_hot_labels, logits=logits) - tf.summary.scalar('losses/Classification_Loss', loss) - else: - raise ValueError('Only support classification for now.') - - total_loss = tf.losses.get_total_loss() - tf.summary.scalar('losses/Total_Loss', total_loss) - - # Setup the moving averages - moving_average_variables = slim.get_model_variables() - variable_averages = tf.train.ExponentialMovingAverage( - FLAGS.moving_average_decay, slim.get_or_create_global_step()) - tf.add_to_collection( - tf.GraphKeys.UPDATE_OPS, - variable_averages.apply(moving_average_variables)) - - # Specify the optimization scheme: - learning_rate = tf.train.exponential_decay( - FLAGS.learning_rate, - slim.get_or_create_global_step(), - FLAGS.learning_rate_decay_steps, - FLAGS.learning_rate_decay_factor, - staircase=True) - - optimizer = tf.train.AdamOptimizer(learning_rate, beta1=FLAGS.adam_beta1) - - train_op = slim.learning.create_train_op(total_loss, optimizer) - - slim.learning.train( - train_op, - FLAGS.logdir, - master=FLAGS.master, - is_chief=(FLAGS.task == 0), - save_summaries_secs=FLAGS.save_summaries_secs, - save_interval_secs=FLAGS.save_interval_secs) - - -if __name__ == '__main__': - tf.app.run() diff --git a/research/domain_adaptation/pixel_domain_adaptation/hparams.py b/research/domain_adaptation/pixel_domain_adaptation/hparams.py deleted file mode 100644 index ba9539f7d..000000000 --- a/research/domain_adaptation/pixel_domain_adaptation/hparams.py +++ /dev/null @@ -1,201 +0,0 @@ -# Copyright 2017 Google Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Define model HParams.""" -import tensorflow as tf - - -def create_hparams(hparam_string=None): - """Create model hyperparameters. Parse nondefault from given string.""" - hparams = tf.contrib.training.HParams( - # The name of the architecture to use. - arch='resnet', - lrelu_leakiness=0.2, - batch_norm_decay=0.9, - weight_decay=1e-5, - normal_init_std=0.02, - generator_kernel_size=3, - discriminator_kernel_size=3, - - # Stop training after this many examples are processed - # If none, train indefinitely - num_training_examples=0, - - # Apply data augmentation to datasets - # Applies only in training job - augment_source_images=False, - augment_target_images=False, - - # Discriminator - # Number of filters in first layer of discriminator - num_discriminator_filters=64, - discriminator_conv_block_size=1, # How many convs to have at each size - discriminator_filter_factor=2.0, # Multiply # filters by this each layer - # Add gaussian noise with this stddev to every hidden layer of D - discriminator_noise_stddev=0.2, # lmetz: Start seeing results at >= 0.1 - # If true, add this gaussian noise to input images to D as well - discriminator_image_noise=False, - discriminator_first_stride=1, # Stride in first conv of discriminator - discriminator_do_pooling=False, # If true, replace stride 2 with avg pool - discriminator_dropout_keep_prob=0.9, # keep probability for dropout - - # DCGAN Generator - # Number of filters in generator decoder last layer (repeatedly halved - # from 1st layer) - num_decoder_filters=64, - # Number of filters in generator encoder 1st layer (repeatedly doubled - # after 1st layer) - num_encoder_filters=64, - - # This is the shape to which the noise vector is projected (if we're - # transferring from noise). - # Write this way instead of [4, 4, 64] for hparam search flexibility - projection_shape_size=4, - projection_shape_channels=64, - - # Indicates the method by which we enlarge the spatial representation - # of an image. Possible values include: - # - resize_conv: Performs a nearest neighbor resize followed by a conv. - # - conv2d_transpose: Performs a conv2d_transpose. - upsample_method='resize_conv', - - # Visualization - summary_steps=500, # Output image summary every N steps - - ################################### - # Task Classifier Hyperparameters # - ################################### - - # Which task-specific prediction tower to use. Possible choices are: - # none: No task tower. - # doubling_pose_estimator: classifier + quaternion regressor. - # [conv + pool]* + FC - # Classifiers used in DSN paper: - # gtsrb: Classifier used for GTSRB - # svhn: Classifier used for SVHN - # mnist: Classifier used for MNIST - # pose_mini: Classifier + regressor used for pose_mini - task_tower='doubling_pose_estimator', - weight_decay_task_classifier=1e-5, - source_task_loss_weight=1.0, - transferred_task_loss_weight=1.0, - - # Number of private layers in doubling_pose_estimator task tower - num_private_layers=2, - - # The weight for the log quaternion loss we use for source and transferred - # samples of the cropped_linemod dataset. - # In the DSN work, 1/8 of the classifier weight worked well for our log - # quaternion loss - source_pose_weight=0.125 * 2.0, - transferred_pose_weight=0.125 * 1.0, - - # If set to True, the style transfer network also attempts to change its - # weights to maximize the performance of the task tower. If set to False, - # then the style transfer network only attempts to change its weights to - # make the transferred images more likely according to the domain - # classifier. - task_tower_in_g_step=True, - task_loss_in_g_weight=1.0, # Weight of task loss in G - - ######################################### - # 'simple` generator arch model hparams # - ######################################### - simple_num_conv_layers=1, - simple_conv_filters=8, - - ######################### - # Resnet Hyperparameters# - ######################### - resnet_blocks=6, # Number of resnet blocks - resnet_filters=64, # Number of filters per conv in resnet blocks - # If true, add original input back to result of convolutions inside the - # resnet arch. If false, it turns into a simple stack of conv/relu/BN - # layers. - resnet_residuals=True, - - ####################################### - # The residual / interpretable model. # - ####################################### - res_int_blocks=2, # The number of residual blocks. - res_int_convs=2, # The number of conv calls inside each block. - res_int_filters=64, # The number of filters used by each convolution. - - #################### - # Latent variables # - #################### - # if true, then generate random noise and project to input for generator - noise_channel=True, - # The number of dimensions in the input noise vector. - noise_dims=10, - - # If true, then one hot encode source image class and project as an - # additional channel for the input to generator. This gives the generator - # access to the class, which may help generation performance. - condition_on_source_class=False, - - ######################## - # Loss Hyperparameters # - ######################## - domain_loss_weight=1.0, - style_transfer_loss_weight=1.0, - - ######################################################################## - # Encourages the transferred images to be similar to the source images # - # using a configurable metric. # - ######################################################################## - - # The weight of the loss function encouraging the source and transferred - # images to be similar. If set to 0, then the loss function is not used. - transferred_similarity_loss_weight=0.0, - - # The type of loss used to encourage transferred and source image - # similarity. Valid values include: - # mpse: Mean Pairwise Squared Error - # mse: Mean Squared Error - # hinged_mse: Computes the mean squared error using squared differences - # greater than hparams.transferred_similarity_max_diff - # hinged_mae: Computes the mean absolute error using absolute - # differences greater than hparams.transferred_similarity_max_diff. - transferred_similarity_loss='mpse', - - # The maximum allowable difference between the source and target images. - # This value is used, in effect, to produce a hinge loss. Note that the - # range of values should be between 0 and 1. - transferred_similarity_max_diff=0.4, - - ################################ - # Optimization Hyperparameters # - ################################ - learning_rate=0.001, - batch_size=32, - lr_decay_steps=20000, - lr_decay_rate=0.95, - - # Recomendation from the DCGAN paper: - adam_beta1=0.5, - clip_gradient_norm=5.0, - - # The number of times we run the discriminator train_op in a row. - discriminator_steps=1, - - # The number of times we run the generator train_op in a row. - generator_steps=1) - - if hparam_string: - tf.logging.info('Parsing command line hparams: %s', hparam_string) - hparams.parse(hparam_string) - - tf.logging.info('Final parsed hparams: %s', hparams.values()) - return hparams diff --git a/research/domain_adaptation/pixel_domain_adaptation/pixelda_eval.py b/research/domain_adaptation/pixel_domain_adaptation/pixelda_eval.py deleted file mode 100644 index 23824249a..000000000 --- a/research/domain_adaptation/pixel_domain_adaptation/pixelda_eval.py +++ /dev/null @@ -1,298 +0,0 @@ -# Copyright 2017 Google Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -r"""Evaluates the PIXELDA model. - --- Compiles the model for CPU. -$ bazel build -c opt third_party/tensorflow_models/domain_adaptation/pixel_domain_adaptation:pixelda_eval - --- Compile the model for GPU. -$ bazel build -c opt --copt=-mavx --config=cuda \ - third_party/tensorflow_models/domain_adaptation/pixel_domain_adaptation:pixelda_eval - --- Runs the training. -$ ./bazel-bin/third_party/tensorflow_models/domain_adaptation/pixel_domain_adaptation/pixelda_eval \ - --source_dataset=mnist \ - --target_dataset=mnist_m \ - --dataset_dir=/tmp/datasets/ \ - --alsologtostderr - --- Visualize the results. -$ bash learning/brain/tensorboard/tensorboard.sh \ - --port 2222 --logdir=/tmp/pixelda/ -""" -from functools import partial -import math - -# Dependency imports - -import tensorflow as tf - -from domain_adaptation.datasets import dataset_factory -from domain_adaptation.pixel_domain_adaptation import pixelda_model -from domain_adaptation.pixel_domain_adaptation import pixelda_preprocess -from domain_adaptation.pixel_domain_adaptation import pixelda_utils -from domain_adaptation.pixel_domain_adaptation import pixelda_losses -from domain_adaptation.pixel_domain_adaptation.hparams import create_hparams - -slim = tf.contrib.slim - -flags = tf.app.flags -FLAGS = flags.FLAGS - -flags.DEFINE_string('master', '', 'BNS name of the TensorFlow master to use.') - -flags.DEFINE_string('checkpoint_dir', '/tmp/pixelda/', - 'Directory where the model was written to.') - -flags.DEFINE_string('eval_dir', '/tmp/pixelda/', - 'Directory where the results are saved to.') - -flags.DEFINE_integer('eval_interval_secs', 60, - 'The frequency, in seconds, with which evaluation is run.') - -flags.DEFINE_string('target_split_name', 'test', - 'The name of the train/test split.') -flags.DEFINE_string('source_split_name', 'train', 'Split for source dataset.' - ' Defaults to train.') - -flags.DEFINE_string('source_dataset', 'mnist', - 'The name of the source dataset.') - -flags.DEFINE_string('target_dataset', 'mnist_m', - 'The name of the target dataset.') - -flags.DEFINE_string( - 'dataset_dir', - '', # None, - 'The directory where the datasets can be found.') - -flags.DEFINE_integer( - 'num_readers', 4, - 'The number of parallel readers that read data from the dataset.') - -flags.DEFINE_integer('num_preprocessing_threads', 4, - 'The number of threads used to create the batches.') - -# HParams - -flags.DEFINE_string('hparams', '', 'Comma separated hyperparameter values') - - -def run_eval(run_dir, checkpoint_dir, hparams): - """Runs the eval loop. - - Args: - run_dir: The directory where eval specific logs are placed - checkpoint_dir: The directory where the checkpoints are stored - hparams: The hyperparameters struct. - - Raises: - ValueError: if hparams.arch is not recognized. - """ - for checkpoint_path in slim.evaluation.checkpoints_iterator( - checkpoint_dir, FLAGS.eval_interval_secs): - with tf.Graph().as_default(): - ######################### - # Preprocess the inputs # - ######################### - target_dataset = dataset_factory.get_dataset( - FLAGS.target_dataset, - split_name=FLAGS.target_split_name, - dataset_dir=FLAGS.dataset_dir) - target_images, target_labels = dataset_factory.provide_batch( - FLAGS.target_dataset, FLAGS.target_split_name, FLAGS.dataset_dir, - FLAGS.num_readers, hparams.batch_size, - FLAGS.num_preprocessing_threads) - num_target_classes = target_dataset.num_classes - target_labels['class'] = tf.argmax(target_labels['classes'], 1) - del target_labels['classes'] - - if hparams.arch not in ['dcgan']: - source_dataset = dataset_factory.get_dataset( - FLAGS.source_dataset, - split_name=FLAGS.source_split_name, - dataset_dir=FLAGS.dataset_dir) - num_source_classes = source_dataset.num_classes - source_images, source_labels = dataset_factory.provide_batch( - FLAGS.source_dataset, FLAGS.source_split_name, FLAGS.dataset_dir, - FLAGS.num_readers, hparams.batch_size, - FLAGS.num_preprocessing_threads) - source_labels['class'] = tf.argmax(source_labels['classes'], 1) - del source_labels['classes'] - if num_source_classes != num_target_classes: - raise ValueError( - 'Input and output datasets must have same number of classes') - else: - source_images = None - source_labels = None - - #################### - # Define the model # - #################### - end_points = pixelda_model.create_model( - hparams, - target_images, - source_images=source_images, - source_labels=source_labels, - is_training=False, - num_classes=num_target_classes) - - ####################### - # Metrics & Summaries # - ####################### - names_to_values, names_to_updates = create_metrics(end_points, - source_labels, - target_labels, hparams) - pixelda_utils.summarize_model(end_points) - pixelda_utils.summarize_transferred_grid( - end_points['transferred_images'], source_images, name='Transferred') - if 'source_images_recon' in end_points: - pixelda_utils.summarize_transferred_grid( - end_points['source_images_recon'], - source_images, - name='Source Reconstruction') - pixelda_utils.summarize_images(target_images, 'Target') - - for name, value in names_to_values.iteritems(): - tf.summary.scalar(name, value) - - # Use the entire split by default - num_examples = target_dataset.num_samples - - num_batches = math.ceil(num_examples / float(hparams.batch_size)) - global_step = slim.get_or_create_global_step() - - result = slim.evaluation.evaluate_once( - master=FLAGS.master, - checkpoint_path=checkpoint_path, - logdir=run_dir, - num_evals=num_batches, - eval_op=names_to_updates.values(), - final_op=names_to_values) - - -def to_degrees(log_quaternion_loss): - """Converts a log quaternion distance to an angle. - - Args: - log_quaternion_loss: The log quaternion distance between two - unit quaternions (or a batch of pairs of quaternions). - - Returns: - The angle in degrees of the implied angle-axis representation. - """ - return tf.acos(-(tf.exp(log_quaternion_loss) - 1)) * 2 * 180 / math.pi - - -def create_metrics(end_points, source_labels, target_labels, hparams): - """Create metrics for the model. - - Args: - end_points: A dictionary of end point name to tensor - source_labels: Labels for source images. batch_size x 1 - target_labels: Labels for target images. batch_size x 1 - hparams: The hyperparameters struct. - - Returns: - Tuple of (names_to_values, names_to_updates), dictionaries that map a metric - name to its value and update op, respectively - - """ - ########################################### - # Evaluate the Domain Prediction Accuracy # - ########################################### - batch_size = hparams.batch_size - names_to_values, names_to_updates = slim.metrics.aggregate_metric_map({ - ('eval/Domain_Accuracy-Transferred'): - tf.contrib.metrics.streaming_accuracy( - tf.to_int32( - tf.round(tf.sigmoid(end_points[ - 'transferred_domain_logits']))), - tf.zeros(batch_size, dtype=tf.int32)), - ('eval/Domain_Accuracy-Target'): - tf.contrib.metrics.streaming_accuracy( - tf.to_int32( - tf.round(tf.sigmoid(end_points['target_domain_logits']))), - tf.ones(batch_size, dtype=tf.int32)) - }) - - ################################ - # Evaluate the task classifier # - ################################ - if 'source_task_logits' in end_points: - metric_name = 'eval/Task_Accuracy-Source' - names_to_values[metric_name], names_to_updates[ - metric_name] = tf.contrib.metrics.streaming_accuracy( - tf.argmax(end_points['source_task_logits'], 1), - source_labels['class']) - - if 'transferred_task_logits' in end_points: - metric_name = 'eval/Task_Accuracy-Transferred' - names_to_values[metric_name], names_to_updates[ - metric_name] = tf.contrib.metrics.streaming_accuracy( - tf.argmax(end_points['transferred_task_logits'], 1), - source_labels['class']) - - if 'target_task_logits' in end_points: - metric_name = 'eval/Task_Accuracy-Target' - names_to_values[metric_name], names_to_updates[ - metric_name] = tf.contrib.metrics.streaming_accuracy( - tf.argmax(end_points['target_task_logits'], 1), - target_labels['class']) - - ########################################################################## - # Pose data-specific losses. - ########################################################################## - if 'quaternion' in source_labels.keys(): - params = {} - params['use_logging'] = False - params['batch_size'] = batch_size - - angle_loss_source = to_degrees( - pixelda_losses.log_quaternion_loss_batch(end_points[ - 'source_quaternion'], source_labels['quaternion'], params)) - angle_loss_transferred = to_degrees( - pixelda_losses.log_quaternion_loss_batch(end_points[ - 'transferred_quaternion'], source_labels['quaternion'], params)) - angle_loss_target = to_degrees( - pixelda_losses.log_quaternion_loss_batch(end_points[ - 'target_quaternion'], target_labels['quaternion'], params)) - - metric_name = 'eval/Angle_Loss-Source' - names_to_values[metric_name], names_to_updates[ - metric_name] = slim.metrics.mean(angle_loss_source) - - metric_name = 'eval/Angle_Loss-Transferred' - names_to_values[metric_name], names_to_updates[ - metric_name] = slim.metrics.mean(angle_loss_transferred) - - metric_name = 'eval/Angle_Loss-Target' - names_to_values[metric_name], names_to_updates[ - metric_name] = slim.metrics.mean(angle_loss_target) - - return names_to_values, names_to_updates - - -def main(_): - tf.logging.set_verbosity(tf.logging.INFO) - hparams = create_hparams(FLAGS.hparams) - run_eval( - run_dir=FLAGS.eval_dir, - checkpoint_dir=FLAGS.checkpoint_dir, - hparams=hparams) - - -if __name__ == '__main__': - tf.app.run() diff --git a/research/domain_adaptation/pixel_domain_adaptation/pixelda_losses.py b/research/domain_adaptation/pixel_domain_adaptation/pixelda_losses.py deleted file mode 100644 index cf39765d4..000000000 --- a/research/domain_adaptation/pixel_domain_adaptation/pixelda_losses.py +++ /dev/null @@ -1,385 +0,0 @@ -# Copyright 2017 Google Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Defines the various loss functions in use by the PIXELDA model.""" - -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -# Dependency imports - -import tensorflow as tf - -slim = tf.contrib.slim - - -def add_domain_classifier_losses(end_points, hparams): - """Adds losses related to the domain-classifier. - - Args: - end_points: A map of network end point names to `Tensors`. - hparams: The hyperparameters struct. - - Returns: - loss: A `Tensor` representing the total task-classifier loss. - """ - if hparams.domain_loss_weight == 0: - tf.logging.info( - 'Domain classifier loss weight is 0, so not creating losses.') - return 0 - - # The domain prediction loss is minimized with respect to the domain - # classifier features only. Its aim is to predict the domain of the images. - # Note: 1 = 'real image' label, 0 = 'fake image' label - transferred_domain_loss = tf.losses.sigmoid_cross_entropy( - multi_class_labels=tf.zeros_like(end_points['transferred_domain_logits']), - logits=end_points['transferred_domain_logits']) - tf.summary.scalar('Domain_loss_transferred', transferred_domain_loss) - - target_domain_loss = tf.losses.sigmoid_cross_entropy( - multi_class_labels=tf.ones_like(end_points['target_domain_logits']), - logits=end_points['target_domain_logits']) - tf.summary.scalar('Domain_loss_target', target_domain_loss) - - # Compute the total domain loss: - total_domain_loss = transferred_domain_loss + target_domain_loss - total_domain_loss *= hparams.domain_loss_weight - tf.summary.scalar('Domain_loss_total', total_domain_loss) - - return total_domain_loss - -def log_quaternion_loss_batch(predictions, labels, params): - """A helper function to compute the error between quaternions. - - Args: - predictions: A Tensor of size [batch_size, 4]. - labels: A Tensor of size [batch_size, 4]. - params: A dictionary of parameters. Expecting 'use_logging', 'batch_size'. - - Returns: - A Tensor of size [batch_size], denoting the error between the quaternions. - """ - use_logging = params['use_logging'] - assertions = [] - if use_logging: - assertions.append( - tf.Assert( - tf.reduce_all( - tf.less( - tf.abs(tf.reduce_sum(tf.square(predictions), [1]) - 1), - 1e-4)), - ['The l2 norm of each prediction quaternion vector should be 1.'])) - assertions.append( - tf.Assert( - tf.reduce_all( - tf.less( - tf.abs(tf.reduce_sum(tf.square(labels), [1]) - 1), 1e-4)), - ['The l2 norm of each label quaternion vector should be 1.'])) - - with tf.control_dependencies(assertions): - product = tf.multiply(predictions, labels) - internal_dot_products = tf.reduce_sum(product, [1]) - - if use_logging: - internal_dot_products = tf.Print(internal_dot_products, [ - internal_dot_products, - tf.shape(internal_dot_products) - ], 'internal_dot_products:') - - logcost = tf.log(1e-4 + 1 - tf.abs(internal_dot_products)) - return logcost - - -def log_quaternion_loss(predictions, labels, params): - """A helper function to compute the mean error between batches of quaternions. - - The caller is expected to add the loss to the graph. - - Args: - predictions: A Tensor of size [batch_size, 4]. - labels: A Tensor of size [batch_size, 4]. - params: A dictionary of parameters. Expecting 'use_logging', 'batch_size'. - - Returns: - A Tensor of size 1, denoting the mean error between batches of quaternions. - """ - use_logging = params['use_logging'] - logcost = log_quaternion_loss_batch(predictions, labels, params) - logcost = tf.reduce_sum(logcost, [0]) - batch_size = params['batch_size'] - logcost = tf.multiply(logcost, 1.0 / batch_size, name='log_quaternion_loss') - if use_logging: - logcost = tf.Print( - logcost, [logcost], '[logcost]', name='log_quaternion_loss_print') - return logcost - -def _quaternion_loss(labels, predictions, weight, batch_size, domain, - add_summaries): - """Creates a Quaternion Loss. - - Args: - labels: The true quaternions. - predictions: The predicted quaternions. - weight: A scalar weight. - batch_size: The size of the batches. - domain: The name of the domain from which the labels were taken. - add_summaries: Whether or not to add summaries for the losses. - - Returns: - A `Tensor` representing the loss. - """ - assert domain in ['Source', 'Transferred'] - - params = {'use_logging': False, 'batch_size': batch_size} - loss = weight * log_quaternion_loss(labels, predictions, params) - - if add_summaries: - assert_op = tf.Assert(tf.is_finite(loss), [loss]) - with tf.control_dependencies([assert_op]): - tf.summary.histogram( - 'Log_Quaternion_Loss_%s' % domain, loss, collections='losses') - tf.summary.scalar( - 'Task_Quaternion_Loss_%s' % domain, loss, collections='losses') - - return loss - - -def _add_task_specific_losses(end_points, source_labels, num_classes, hparams, - add_summaries=False): - """Adds losses related to the task-classifier. - - Args: - end_points: A map of network end point names to `Tensors`. - source_labels: A dictionary of output labels to `Tensors`. - num_classes: The number of classes used by the classifier. - hparams: The hyperparameters struct. - add_summaries: Whether or not to add the summaries. - - Returns: - loss: A `Tensor` representing the total task-classifier loss. - """ - # TODO(ddohan): Make sure the l2 regularization is added to the loss - - one_hot_labels = slim.one_hot_encoding(source_labels['class'], num_classes) - total_loss = 0 - - if 'source_task_logits' in end_points: - loss = tf.losses.softmax_cross_entropy( - onehot_labels=one_hot_labels, - logits=end_points['source_task_logits'], - weights=hparams.source_task_loss_weight) - if add_summaries: - tf.summary.scalar('Task_Classifier_Loss_Source', loss) - total_loss += loss - - if 'transferred_task_logits' in end_points: - loss = tf.losses.softmax_cross_entropy( - onehot_labels=one_hot_labels, - logits=end_points['transferred_task_logits'], - weights=hparams.transferred_task_loss_weight) - if add_summaries: - tf.summary.scalar('Task_Classifier_Loss_Transferred', loss) - total_loss += loss - - ######################### - # Pose specific losses. # - ######################### - if 'quaternion' in source_labels: - total_loss += _quaternion_loss( - source_labels['quaternion'], - end_points['source_quaternion'], - hparams.source_pose_weight, - hparams.batch_size, - 'Source', - add_summaries) - - total_loss += _quaternion_loss( - source_labels['quaternion'], - end_points['transferred_quaternion'], - hparams.transferred_pose_weight, - hparams.batch_size, - 'Transferred', - add_summaries) - - if add_summaries: - tf.summary.scalar('Task_Loss_Total', total_loss) - - return total_loss - - -def _transferred_similarity_loss(reconstructions, - source_images, - weight=1.0, - method='mse', - max_diff=0.4, - name='similarity'): - """Computes a loss encouraging similarity between source and transferred. - - Args: - reconstructions: A `Tensor` of shape [batch_size, height, width, channels] - source_images: A `Tensor` of shape [batch_size, height, width, channels]. - weight: Multiple similarity loss by this weight before returning - method: One of: - mpse = Mean Pairwise Squared Error - mse = Mean Squared Error - hinged_mse = Computes the mean squared error using squared differences - greater than hparams.transferred_similarity_max_diff - hinged_mae = Computes the mean absolute error using absolute - differences greater than hparams.transferred_similarity_max_diff. - max_diff: Maximum unpenalized difference for hinged losses - name: Identifying name to use for creating summaries - - - Returns: - A `Tensor` representing the transferred similarity loss. - - Raises: - ValueError: if `method` is not recognized. - """ - if weight == 0: - return 0 - - source_channels = source_images.shape.as_list()[-1] - reconstruction_channels = reconstructions.shape.as_list()[-1] - - # Convert grayscale source to RGB if target is RGB - if source_channels == 1 and reconstruction_channels != 1: - source_images = tf.tile(source_images, [1, 1, 1, reconstruction_channels]) - if reconstruction_channels == 1 and source_channels != 1: - reconstructions = tf.tile(reconstructions, [1, 1, 1, source_channels]) - - if method == 'mpse': - reconstruction_similarity_loss_fn = ( - tf.contrib.losses.mean_pairwise_squared_error) - elif method == 'masked_mpse': - - def masked_mpse(predictions, labels, weight): - """Masked mpse assuming we have a depth to create a mask from.""" - assert labels.shape.as_list()[-1] == 4 - mask = tf.to_float(tf.less(labels[:, :, :, 3:4], 0.99)) - mask = tf.tile(mask, [1, 1, 1, 4]) - predictions *= mask - labels *= mask - tf.image_summary('masked_pred', predictions) - tf.image_summary('masked_label', labels) - return tf.contrib.losses.mean_pairwise_squared_error( - predictions, labels, weight) - - reconstruction_similarity_loss_fn = masked_mpse - elif method == 'mse': - reconstruction_similarity_loss_fn = tf.contrib.losses.mean_squared_error - elif method == 'hinged_mse': - - def hinged_mse(predictions, labels, weight): - diffs = tf.square(predictions - labels) - diffs = tf.maximum(0.0, diffs - max_diff) - return tf.reduce_mean(diffs) * weight - - reconstruction_similarity_loss_fn = hinged_mse - elif method == 'hinged_mae': - - def hinged_mae(predictions, labels, weight): - diffs = tf.abs(predictions - labels) - diffs = tf.maximum(0.0, diffs - max_diff) - return tf.reduce_mean(diffs) * weight - - reconstruction_similarity_loss_fn = hinged_mae - else: - raise ValueError('Unknown reconstruction loss %s' % method) - - reconstruction_similarity_loss = reconstruction_similarity_loss_fn( - reconstructions, source_images, weight) - - name = '%s_Similarity_(%s)' % (name, method) - tf.summary.scalar(name, reconstruction_similarity_loss) - return reconstruction_similarity_loss - - -def g_step_loss(source_images, source_labels, end_points, hparams, num_classes): - """Configures the loss function which runs during the g-step. - - Args: - source_images: A `Tensor` of shape [batch_size, height, width, channels]. - source_labels: A dictionary of `Tensors` of shape [batch_size]. Valid keys - are 'class' and 'quaternion'. - end_points: A map of the network end points. - hparams: The hyperparameters struct. - num_classes: Number of classes for classifier loss - - Returns: - A `Tensor` representing a loss function. - - Raises: - ValueError: if hparams.transferred_similarity_loss_weight is non-zero but - hparams.transferred_similarity_loss is invalid. - """ - generator_loss = 0 - - ################################################################ - # Adds a loss which encourages the discriminator probabilities # - # to be high (near one). - ################################################################ - - # As per the GAN paper, maximize the log probs, instead of minimizing - # log(1-probs). Since we're minimizing, we'll minimize -log(probs) which is - # the same thing. - style_transfer_loss = tf.losses.sigmoid_cross_entropy( - logits=end_points['transferred_domain_logits'], - multi_class_labels=tf.ones_like(end_points['transferred_domain_logits']), - weights=hparams.style_transfer_loss_weight) - tf.summary.scalar('Style_transfer_loss', style_transfer_loss) - generator_loss += style_transfer_loss - - # Optimizes the style transfer network to produce transferred images similar - # to the source images. - generator_loss += _transferred_similarity_loss( - end_points['transferred_images'], - source_images, - weight=hparams.transferred_similarity_loss_weight, - method=hparams.transferred_similarity_loss, - name='transferred_similarity') - - # Optimizes the style transfer network to maximize classification accuracy. - if source_labels is not None and hparams.task_tower_in_g_step: - generator_loss += _add_task_specific_losses( - end_points, source_labels, num_classes, - hparams) * hparams.task_loss_in_g_weight - - return generator_loss - - -def d_step_loss(end_points, source_labels, num_classes, hparams): - """Configures the losses during the D-Step. - - Note that during the D-step, the model optimizes both the domain (binary) - classifier and the task classifier. - - Args: - end_points: A map of the network end points. - source_labels: A dictionary of output labels to `Tensors`. - num_classes: The number of classes used by the classifier. - hparams: The hyperparameters struct. - - Returns: - A `Tensor` representing the value of the D-step loss. - """ - domain_classifier_loss = add_domain_classifier_losses(end_points, hparams) - - task_classifier_loss = 0 - if source_labels is not None: - task_classifier_loss = _add_task_specific_losses( - end_points, source_labels, num_classes, hparams, add_summaries=True) - - return domain_classifier_loss + task_classifier_loss diff --git a/research/domain_adaptation/pixel_domain_adaptation/pixelda_model.py b/research/domain_adaptation/pixel_domain_adaptation/pixelda_model.py deleted file mode 100644 index 16b550a62..000000000 --- a/research/domain_adaptation/pixel_domain_adaptation/pixelda_model.py +++ /dev/null @@ -1,713 +0,0 @@ -# Copyright 2017 Google Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Contains the Domain Adaptation via Style Transfer (PixelDA) model components. - -A number of details in the implementation make reference to one of the following -works: - -- "Unsupervised Representation Learning with Deep Convolutional - Generative Adversarial Networks"" - https://arxiv.org/abs/1511.06434 - -This paper makes several architecture recommendations: -1. Use strided convs in discriminator, fractional-strided convs in generator -2. batchnorm everywhere -3. remove fully connected layers for deep models -4. ReLu for all layers in generator, except tanh on output -5. LeakyReLu for everything in discriminator -""" -import functools -import math - -# Dependency imports -import numpy as np - -import tensorflow as tf - -slim = tf.contrib.slim - -from domain_adaptation.pixel_domain_adaptation import pixelda_task_towers - - -def create_model(hparams, - target_images, - source_images=None, - source_labels=None, - is_training=False, - noise=None, - num_classes=None): - """Create a GAN model. - - Arguments: - hparams: HParam object specifying model params - target_images: A `Tensor` of size [batch_size, height, width, channels]. It - is assumed that the images are [-1, 1] normalized. - source_images: A `Tensor` of size [batch_size, height, width, channels]. It - is assumed that the images are [-1, 1] normalized. - source_labels: A `Tensor` of size [batch_size] of categorical labels between - [0, num_classes] - is_training: whether model is currently training - noise: If None, model generates its own noise. Otherwise use provided. - num_classes: Number of classes for classification - - Returns: - end_points dict with model outputs - - Raises: - ValueError: unknown hparams.arch setting - """ - if num_classes is None and hparams.arch in ['resnet', 'simple']: - raise ValueError('Num classes must be provided to create task classifier') - - if target_images.dtype != tf.float32: - raise ValueError('target_images must be tf.float32 and [-1, 1] normalized.') - if source_images is not None and source_images.dtype != tf.float32: - raise ValueError('source_images must be tf.float32 and [-1, 1] normalized.') - - ########################### - # Create latent variables # - ########################### - latent_vars = dict() - - if hparams.noise_channel: - noise_shape = [hparams.batch_size, hparams.noise_dims] - if noise is not None: - assert noise.shape.as_list() == noise_shape - tf.logging.info('Using provided noise') - else: - tf.logging.info('Using random noise') - noise = tf.random_uniform( - shape=noise_shape, - minval=-1, - maxval=1, - dtype=tf.float32, - name='random_noise') - latent_vars['noise'] = noise - - #################### - # Create generator # - #################### - - with slim.arg_scope( - [slim.conv2d, slim.conv2d_transpose, slim.fully_connected], - normalizer_params=batch_norm_params(is_training, - hparams.batch_norm_decay), - weights_initializer=tf.random_normal_initializer( - stddev=hparams.normal_init_std), - weights_regularizer=tf.contrib.layers.l2_regularizer( - hparams.weight_decay)): - with slim.arg_scope([slim.conv2d], padding='SAME'): - if hparams.arch == 'dcgan': - end_points = dcgan( - target_images, latent_vars, hparams, scope='generator') - elif hparams.arch == 'resnet': - end_points = resnet_generator( - source_images, - target_images.shape.as_list()[1:4], - hparams=hparams, - latent_vars=latent_vars) - elif hparams.arch == 'residual_interpretation': - end_points = residual_interpretation_generator( - source_images, is_training=is_training, hparams=hparams) - elif hparams.arch == 'simple': - end_points = simple_generator( - source_images, - target_images, - is_training=is_training, - hparams=hparams, - latent_vars=latent_vars) - elif hparams.arch == 'identity': - # Pass through unmodified, besides changing # channels - # Used to calculate baseline numbers - # Also set `generator_steps=0` for baseline - if hparams.generator_steps: - raise ValueError('Must set generator_steps=0 for identity arch. Is %s' - % hparams.generator_steps) - transferred_images = source_images - source_channels = source_images.shape.as_list()[-1] - target_channels = target_images.shape.as_list()[-1] - if source_channels == 1 and target_channels == 3: - transferred_images = tf.tile(source_images, [1, 1, 1, 3]) - if source_channels == 3 and target_channels == 1: - transferred_images = tf.image.rgb_to_grayscale(source_images) - end_points = {'transferred_images': transferred_images} - else: - raise ValueError('Unknown architecture: %s' % hparams.arch) - - ##################### - # Domain Classifier # - ##################### - if hparams.arch in [ - 'dcgan', 'resnet', 'residual_interpretation', 'simple', 'identity', - ]: - - # Add a discriminator for these architectures - end_points['transferred_domain_logits'] = predict_domain( - end_points['transferred_images'], - hparams, - is_training=is_training, - reuse=False) - end_points['target_domain_logits'] = predict_domain( - target_images, - hparams, - is_training=is_training, - reuse=True) - - ################### - # Task Classifier # - ################### - if hparams.task_tower != 'none' and hparams.arch in [ - 'resnet', 'residual_interpretation', 'simple', 'identity', - ]: - with tf.variable_scope('discriminator'): - with tf.variable_scope('task_tower'): - end_points['source_task_logits'], end_points[ - 'source_quaternion'] = pixelda_task_towers.add_task_specific_model( - source_images, - hparams, - num_classes=num_classes, - is_training=is_training, - reuse_private=False, - private_scope='source_task_classifier', - reuse_shared=False) - end_points['transferred_task_logits'], end_points[ - 'transferred_quaternion'] = ( - pixelda_task_towers.add_task_specific_model( - end_points['transferred_images'], - hparams, - num_classes=num_classes, - is_training=is_training, - reuse_private=False, - private_scope='transferred_task_classifier', - reuse_shared=True)) - end_points['target_task_logits'], end_points[ - 'target_quaternion'] = pixelda_task_towers.add_task_specific_model( - target_images, - hparams, - num_classes=num_classes, - is_training=is_training, - reuse_private=True, - private_scope='transferred_task_classifier', - reuse_shared=True) - # Remove any endpoints with None values - return dict((k, v) for k, v in end_points.iteritems() if v is not None) - - -def batch_norm_params(is_training, batch_norm_decay): - return { - 'is_training': is_training, - # Decay for the moving averages. - 'decay': batch_norm_decay, - # epsilon to prevent 0s in variance. - 'epsilon': 0.001, - } - - -def lrelu(x, leakiness=0.2): - """Relu, with optional leaky support.""" - return tf.where(tf.less(x, 0.0), leakiness * x, x, name='leaky_relu') - - -def upsample(net, num_filters, scale=2, method='resize_conv', scope=None): - """Performs spatial upsampling of the given features. - - Args: - net: A `Tensor` of shape [batch_size, height, width, filters]. - num_filters: The number of output filters. - scale: The scale of the upsampling. Must be a positive integer greater or - equal to two. - method: The method by which the features are upsampled. Valid options - include 'resize_conv' and 'conv2d_transpose'. - scope: An optional variable scope. - - Returns: - A new set of features of shape - [batch_size, height*scale, width*scale, num_filters]. - - Raises: - ValueError: if `method` is not valid or - """ - if scale < 2: - raise ValueError('scale must be greater or equal to two.') - - with tf.variable_scope(scope, 'upsample', [net]): - if method == 'resize_conv': - net = tf.image.resize_nearest_neighbor( - net, [net.shape.as_list()[1] * scale, - net.shape.as_list()[2] * scale], - align_corners=True, - name='resize') - return slim.conv2d(net, num_filters, stride=1, scope='conv') - elif method == 'conv2d_transpose': - return slim.conv2d_transpose(net, num_filters, scope='deconv') - else: - raise ValueError('Upsample method [%s] was not recognized.' % method) - - -def project_latent_vars(hparams, proj_shape, latent_vars, combine_method='sum'): - """Generate noise and project to input volume size. - - Args: - hparams: The hyperparameter HParams struct. - proj_shape: Shape to project noise (not including batch size). - latent_vars: dictionary of `'key': Tensor of shape [batch_size, N]` - combine_method: How to combine the projected values. - sum = project to volume then sum - concat = concatenate along last dimension (i.e. channel) - - Returns: - If combine_method=sum, a `Tensor` of size `hparams.projection_shape` - If combine_method=concat and there are N latent vars, a `Tensor` of size - `hparams.projection_shape`, with the last channel multiplied by N - - - Raises: - ValueError: combine_method is not one of sum/concat - """ - values = [] - for var in latent_vars: - with tf.variable_scope(var): - # Project & reshape noise to a HxWxC input - projected = slim.fully_connected( - latent_vars[var], - np.prod(proj_shape), - activation_fn=tf.nn.relu, - normalizer_fn=slim.batch_norm) - values.append(tf.reshape(projected, [hparams.batch_size] + proj_shape)) - - if combine_method == 'sum': - result = values[0] - for value in values[1:]: - result += value - elif combine_method == 'concat': - # Concatenate along last axis - result = tf.concat(values, len(proj_shape)) - else: - raise ValueError('Unknown combine_method %s' % combine_method) - - tf.logging.info('Latent variables projected to size %s volume', result.shape) - - return result - - -def resnet_block(net, hparams): - """Create a resnet block.""" - net_in = net - net = slim.conv2d( - net, - hparams.resnet_filters, - stride=1, - normalizer_fn=slim.batch_norm, - activation_fn=tf.nn.relu) - net = slim.conv2d( - net, - hparams.resnet_filters, - stride=1, - normalizer_fn=slim.batch_norm, - activation_fn=None) - if hparams.resnet_residuals: - net += net_in - return net - - -def resnet_stack(images, output_shape, hparams, scope=None): - """Create a resnet style transfer block. - - Args: - images: [batch-size, height, width, channels] image tensor to feed as input - output_shape: output image shape in form [height, width, channels] - hparams: hparams objects - scope: Variable scope - - Returns: - Images after processing with resnet blocks. - """ - end_points = {} - if hparams.noise_channel: - # separate the noise for visualization - end_points['noise'] = images[:, :, :, -1] - assert images.shape.as_list()[1:3] == output_shape[0:2] - - with tf.variable_scope(scope, 'resnet_style_transfer', [images]): - with slim.arg_scope( - [slim.conv2d], - normalizer_fn=slim.batch_norm, - kernel_size=[hparams.generator_kernel_size] * 2, - stride=1): - net = slim.conv2d( - images, - hparams.resnet_filters, - normalizer_fn=None, - activation_fn=tf.nn.relu) - for block in range(hparams.resnet_blocks): - net = resnet_block(net, hparams) - end_points['resnet_block_{}'.format(block)] = net - - net = slim.conv2d( - net, - output_shape[-1], - kernel_size=[1, 1], - normalizer_fn=None, - activation_fn=tf.nn.tanh, - scope='conv_out') - end_points['transferred_images'] = net - return net, end_points - - -def predict_domain(images, - hparams, - is_training=False, - reuse=False, - scope='discriminator'): - """Creates a discriminator for a GAN. - - Args: - images: A `Tensor` of size [batch_size, height, width, channels]. It is - assumed that the images are centered between -1 and 1. - hparams: hparam object with params for discriminator - is_training: Specifies whether or not we're training or testing. - reuse: Whether to reuse variable scope - scope: An optional variable_scope. - - Returns: - [batch size, 1] - logit output of discriminator. - """ - with tf.variable_scope(scope, 'discriminator', [images], reuse=reuse): - lrelu_partial = functools.partial(lrelu, leakiness=hparams.lrelu_leakiness) - with slim.arg_scope( - [slim.conv2d], - kernel_size=[hparams.discriminator_kernel_size] * 2, - activation_fn=lrelu_partial, - stride=2, - normalizer_fn=slim.batch_norm): - - def add_noise(hidden, scope_num=None): - if scope_num: - hidden = slim.dropout( - hidden, - hparams.discriminator_dropout_keep_prob, - is_training=is_training, - scope='dropout_%s' % scope_num) - if hparams.discriminator_noise_stddev == 0: - return hidden - return hidden + tf.random_normal( - hidden.shape.as_list(), - mean=0.0, - stddev=hparams.discriminator_noise_stddev) - - # As per the recommendation of the DCGAN paper, we don't use batch norm - # on the discriminator input (https://arxiv.org/pdf/1511.06434v2.pdf). - if hparams.discriminator_image_noise: - images = add_noise(images) - net = slim.conv2d( - images, - hparams.num_discriminator_filters, - normalizer_fn=None, - stride=hparams.discriminator_first_stride, - scope='conv1_stride%s' % hparams.discriminator_first_stride) - net = add_noise(net, 1) - - block_id = 2 - # Repeatedly stack - # discriminator_conv_block_size-1 conv layers with stride 1 - # followed by a stride 2 layer - # Add (optional) noise at every point - while net.shape.as_list()[1] > hparams.projection_shape_size: - num_filters = int(hparams.num_discriminator_filters * - (hparams.discriminator_filter_factor**(block_id - 1))) - for conv_id in range(1, hparams.discriminator_conv_block_size): - net = slim.conv2d( - net, - num_filters, - stride=1, - scope='conv_%s_%s' % (block_id, conv_id)) - if hparams.discriminator_do_pooling: - net = slim.conv2d( - net, num_filters, scope='conv_%s_prepool' % block_id) - net = slim.avg_pool2d( - net, kernel_size=[2, 2], stride=2, scope='pool_%s' % block_id) - else: - net = slim.conv2d( - net, num_filters, scope='conv_%s_stride2' % block_id) - net = add_noise(net, block_id) - block_id += 1 - net = slim.flatten(net) - net = slim.fully_connected( - net, - 1, - # Models with BN here generally produce noise - normalizer_fn=None, - activation_fn=None, - scope='fc_logit_out') # Returns logits! - return net - - -def dcgan_generator(images, output_shape, hparams, scope=None): - """Transforms the visual style of the input images. - - Args: - images: A `Tensor` of shape [batch_size, height, width, channels]. - output_shape: A list or tuple of 3 elements: the output height, width and - number of channels. - hparams: hparams object with generator parameters - scope: Scope to place generator inside - - Returns: - A `Tensor` of shape [batch_size, height, width, output_channels] which - represents the result of style transfer. - - Raises: - ValueError: If `output_shape` is not a list or tuple or if it doesn't have - three elements or if `output_shape` or `images` arent square. - """ - if not isinstance(output_shape, (tuple, list)): - raise ValueError('output_shape must be a tuple or list.') - elif len(output_shape) != 3: - raise ValueError('output_shape must have three elements.') - - if output_shape[0] != output_shape[1]: - raise ValueError('output_shape must be square') - if images.shape.as_list()[1] != images.shape.as_list()[2]: - raise ValueError('images height and width must match.') - - outdim = output_shape[0] - indim = images.shape.as_list()[1] - num_iterations = int(math.ceil(math.log(float(outdim) / float(indim), 2.0))) - - with slim.arg_scope( - [slim.conv2d, slim.conv2d_transpose], - kernel_size=[hparams.generator_kernel_size] * 2, - stride=2): - with tf.variable_scope(scope or 'generator'): - - net = images - - # Repeatedly halve # filters until = hparams.decode_filters in last layer - for i in range(num_iterations): - num_filters = hparams.num_decoder_filters * 2**(num_iterations - i - 1) - net = slim.conv2d_transpose(net, num_filters, scope='deconv_%s' % i) - - # Crop down to desired size (e.g. 32x32 -> 28x28) - dif = net.shape.as_list()[1] - outdim - low = dif / 2 - high = net.shape.as_list()[1] - low - net = net[:, low:high, low:high, :] - - # No batch norm on generator output - net = slim.conv2d( - net, - output_shape[2], - kernel_size=[1, 1], - stride=1, - normalizer_fn=None, - activation_fn=tf.tanh, - scope='conv_out') - return net - - -def dcgan(target_images, latent_vars, hparams, scope='dcgan'): - """Creates the PixelDA model. - - Args: - target_images: A `Tensor` of shape [batch_size, height, width, 3] - sampled from the image domain to which we want to transfer. - latent_vars: dictionary of 'key': Tensor of shape [batch_size, N] - hparams: The hyperparameter map. - scope: Surround generator component with this scope - - Returns: - A dictionary of model outputs. - """ - proj_shape = [ - hparams.projection_shape_size, hparams.projection_shape_size, - hparams.projection_shape_channels - ] - source_volume = project_latent_vars( - hparams, proj_shape, latent_vars, combine_method='concat') - - ################################################### - # Transfer the source images to the target style. # - ################################################### - with tf.variable_scope(scope, 'generator', [target_images]): - transferred_images = dcgan_generator( - source_volume, - output_shape=target_images.shape.as_list()[1:4], - hparams=hparams) - assert transferred_images.shape.as_list() == target_images.shape.as_list() - - return {'transferred_images': transferred_images} - - -def resnet_generator(images, output_shape, hparams, latent_vars=None): - """Creates a ResNet-based generator. - - Args: - images: A `Tensor` of shape [batch_size, height, width, num_channels] - sampled from the image domain from which we want to transfer - output_shape: A length-3 array indicating the height, width and channels of - the output. - hparams: The hyperparameter map. - latent_vars: dictionary of 'key': Tensor of shape [batch_size, N] - - Returns: - A dictionary of model outputs. - """ - with tf.variable_scope('generator'): - if latent_vars: - noise_channel = project_latent_vars( - hparams, - proj_shape=images.shape.as_list()[1:3] + [1], - latent_vars=latent_vars, - combine_method='concat') - images = tf.concat([images, noise_channel], 3) - - transferred_images, end_points = resnet_stack( - images, - output_shape=output_shape, - hparams=hparams, - scope='resnet_stack') - end_points['transferred_images'] = transferred_images - - return end_points - - -def residual_interpretation_block(images, hparams, scope): - """Learns a residual image which is added to the incoming image. - - Args: - images: A `Tensor` of size [batch_size, height, width, 3] - hparams: The hyperparameters struct. - scope: The name of the variable op scope. - - Returns: - The updated images. - """ - with tf.variable_scope(scope): - with slim.arg_scope( - [slim.conv2d], - normalizer_fn=None, - kernel_size=[hparams.generator_kernel_size] * 2): - - net = images - for _ in range(hparams.res_int_convs): - net = slim.conv2d( - net, hparams.res_int_filters, activation_fn=tf.nn.relu) - net = slim.conv2d(net, 3, activation_fn=tf.nn.tanh) - - # Add the residual - images += net - - # Clip the output - images = tf.maximum(images, -1.0) - images = tf.minimum(images, 1.0) - return images - - -def residual_interpretation_generator(images, - is_training, - hparams, - latent_vars=None): - """Creates a generator producing purely residual transformations. - - A residual generator differs from the resnet generator in that each 'block' of - the residual generator produces a residual image. Consequently, the 'progress' - of the model generation process can be directly observed at inference time, - making it easier to diagnose and understand. - - Args: - images: A `Tensor` of shape [batch_size, height, width, num_channels] - sampled from the image domain from which we want to transfer. It is - assumed that the images are centered between -1 and 1. - is_training: whether or not the model is training. - hparams: The hyperparameter map. - latent_vars: dictionary of 'key': Tensor of shape [batch_size, N] - - Returns: - A dictionary of model outputs. - """ - end_points = {} - - with tf.variable_scope('generator'): - if latent_vars: - projected_latent = project_latent_vars( - hparams, - proj_shape=images.shape.as_list()[1:3] + [images.shape.as_list()[-1]], - latent_vars=latent_vars, - combine_method='sum') - images += projected_latent - with tf.variable_scope(None, 'residual_style_transfer', [images]): - for i in range(hparams.res_int_blocks): - images = residual_interpretation_block(images, hparams, - 'residual_%d' % i) - end_points['transferred_images_%d' % i] = images - - end_points['transferred_images'] = images - - return end_points - - -def simple_generator(source_images, target_images, is_training, hparams, - latent_vars): - """Simple generator architecture (stack of convs) for trying small models.""" - end_points = {} - with tf.variable_scope('generator'): - feed_source_images = source_images - - if latent_vars: - projected_latent = project_latent_vars( - hparams, - proj_shape=source_images.shape.as_list()[1:3] + [1], - latent_vars=latent_vars, - combine_method='concat') - feed_source_images = tf.concat([source_images, projected_latent], 3) - - end_points = {} - - ################################################### - # Transfer the source images to the target style. # - ################################################### - with slim.arg_scope( - [slim.conv2d], - normalizer_fn=slim.batch_norm, - stride=1, - kernel_size=[hparams.generator_kernel_size] * 2): - net = feed_source_images - - # N convolutions - for i in range(1, hparams.simple_num_conv_layers): - normalizer_fn = None - if i != 0: - normalizer_fn = slim.batch_norm - net = slim.conv2d( - net, - hparams.simple_conv_filters, - normalizer_fn=normalizer_fn, - activation_fn=tf.nn.relu) - - # Project back to right # image channels - net = slim.conv2d( - net, - target_images.shape.as_list()[-1], - kernel_size=[1, 1], - stride=1, - normalizer_fn=None, - activation_fn=tf.tanh, - scope='conv_out') - - transferred_images = net - assert transferred_images.shape.as_list() == target_images.shape.as_list() - end_points['transferred_images'] = transferred_images - - return end_points diff --git a/research/domain_adaptation/pixel_domain_adaptation/pixelda_preprocess.py b/research/domain_adaptation/pixel_domain_adaptation/pixelda_preprocess.py deleted file mode 100644 index 747c17b18..000000000 --- a/research/domain_adaptation/pixel_domain_adaptation/pixelda_preprocess.py +++ /dev/null @@ -1,129 +0,0 @@ -# Copyright 2017 Google Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Contains functions for preprocessing the inputs.""" - -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -# Dependency imports - -import tensorflow as tf - - -def preprocess_classification(image, labels, is_training=False): - """Preprocesses the image and labels for classification purposes. - - Preprocessing includes shifting the images to be 0-centered between -1 and 1. - This is not only a popular method of preprocessing (inception) but is also - the mechanism used by DSNs. - - Args: - image: A `Tensor` of size [height, width, 3]. - labels: A dictionary of labels. - is_training: Whether or not we're training the model. - - Returns: - The preprocessed image and labels. - """ - # If the image is uint8, this will scale it to 0-1. - image = tf.image.convert_image_dtype(image, tf.float32) - image -= 0.5 - image *= 2 - - return image, labels - - -def preprocess_style_transfer(image, - labels, - augment=False, - size=None, - is_training=False): - """Preprocesses the image and labels for style transfer purposes. - - Args: - image: A `Tensor` of size [height, width, 3]. - labels: A dictionary of labels. - augment: Whether to apply data augmentation to inputs - size: The height and width to which images should be resized. If left as - `None`, then no resizing is performed - is_training: Whether or not we're training the model - - Returns: - The preprocessed image and labels. Scaled to [-1, 1] - """ - # If the image is uint8, this will scale it to 0-1. - image = tf.image.convert_image_dtype(image, tf.float32) - if augment and is_training: - image = image_augmentation(image) - - if size: - image = resize_image(image, size) - - image -= 0.5 - image *= 2 - - return image, labels - - -def image_augmentation(image): - """Performs data augmentation by randomly permuting the inputs. - - Args: - image: A float `Tensor` of size [height, width, channels] with values - in range[0,1]. - - Returns: - The mutated batch of images - """ - # Apply photometric data augmentation (contrast etc.) - num_channels = image.shape_as_list()[-1] - if num_channels == 4: - # Only augment image part - image, depth = image[:, :, 0:3], image[:, :, 3:4] - elif num_channels == 1: - image = tf.image.grayscale_to_rgb(image) - image = tf.image.random_brightness(image, max_delta=0.1) - image = tf.image.random_saturation(image, lower=0.5, upper=1.5) - image = tf.image.random_hue(image, max_delta=0.032) - image = tf.image.random_contrast(image, lower=0.5, upper=1.5) - image = tf.clip_by_value(image, 0, 1.0) - if num_channels == 4: - image = tf.concat(2, [image, depth]) - elif num_channels == 1: - image = tf.image.rgb_to_grayscale(image) - return image - - -def resize_image(image, size=None): - """Resize image to target size. - - Args: - image: A `Tensor` of size [height, width, 3]. - size: (height, width) to resize image to. - - Returns: - resized image - """ - if size is None: - raise ValueError('Must specify size') - - if image.shape_as_list()[:2] == size: - # Don't resize if not necessary - return image - image = tf.expand_dims(image, 0) - image = tf.image.resize_images(image, size) - image = tf.squeeze(image, 0) - return image diff --git a/research/domain_adaptation/pixel_domain_adaptation/pixelda_preprocess_test.py b/research/domain_adaptation/pixel_domain_adaptation/pixelda_preprocess_test.py deleted file mode 100644 index 73f8c7ff0..000000000 --- a/research/domain_adaptation/pixel_domain_adaptation/pixelda_preprocess_test.py +++ /dev/null @@ -1,69 +0,0 @@ -# Copyright 2017 Google Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Tests for domain_adaptation.pixel_domain_adaptation.pixelda_preprocess.""" - -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -# Dependency imports - -import tensorflow as tf - -from domain_adaptation.pixel_domain_adaptation import pixelda_preprocess - - -class PixelDAPreprocessTest(tf.test.TestCase): - - def assert_preprocess_classification_is_centered(self, dtype, is_training): - tf.set_random_seed(0) - - if dtype == tf.uint8: - image = tf.random_uniform((100, 200, 3), maxval=255, dtype=tf.int64) - image = tf.cast(image, tf.uint8) - else: - image = tf.random_uniform((100, 200, 3), maxval=1.0, dtype=dtype) - - labels = {} - image, labels = pixelda_preprocess.preprocess_classification( - image, labels, is_training=is_training) - - with self.test_session() as sess: - np_image = sess.run(image) - - self.assertTrue(np_image.min() <= -0.95) - self.assertTrue(np_image.min() >= -1.0) - self.assertTrue(np_image.max() >= 0.95) - self.assertTrue(np_image.max() <= 1.0) - - def testPreprocessClassificationZeroCentersUint8DuringTrain(self): - self.assert_preprocess_classification_is_centered( - tf.uint8, is_training=True) - - def testPreprocessClassificationZeroCentersUint8DuringTest(self): - self.assert_preprocess_classification_is_centered( - tf.uint8, is_training=False) - - def testPreprocessClassificationZeroCentersFloatDuringTrain(self): - self.assert_preprocess_classification_is_centered( - tf.float32, is_training=True) - - def testPreprocessClassificationZeroCentersFloatDuringTest(self): - self.assert_preprocess_classification_is_centered( - tf.float32, is_training=False) - - -if __name__ == '__main__': - tf.test.main() diff --git a/research/domain_adaptation/pixel_domain_adaptation/pixelda_task_towers.py b/research/domain_adaptation/pixel_domain_adaptation/pixelda_task_towers.py deleted file mode 100644 index 1cb42e2d8..000000000 --- a/research/domain_adaptation/pixel_domain_adaptation/pixelda_task_towers.py +++ /dev/null @@ -1,317 +0,0 @@ -# Copyright 2017 Google Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Task towers for PixelDA model.""" -import tensorflow as tf - -slim = tf.contrib.slim - - -def add_task_specific_model(images, - hparams, - num_classes=10, - is_training=False, - reuse_private=False, - private_scope=None, - reuse_shared=False, - shared_scope=None): - """Create a classifier for the given images. - - The classifier is composed of a few 'private' layers followed by a few - 'shared' layers. This lets us account for different image 'style', while - sharing the last few layers as 'content' layers. - - Args: - images: A `Tensor` of size [batch_size, height, width, 3]. - hparams: model hparams - num_classes: The number of output classes. - is_training: whether model is training - reuse_private: Whether or not to reuse the private weights, which are the - first few layers in the classifier - private_scope: The name of the variable_scope for the private (unshared) - components of the classifier. - reuse_shared: Whether or not to reuse the shared weights, which are the last - few layers in the classifier - shared_scope: The name of the variable_scope for the shared components of - the classifier. - - Returns: - The logits, a `Tensor` of shape [batch_size, num_classes]. - - Raises: - ValueError: If hparams.task_classifier is an unknown value - """ - - model = hparams.task_tower - # Make sure the classifier name shows up in graph - shared_scope = shared_scope or (model + '_shared') - kwargs = { - 'num_classes': num_classes, - 'is_training': is_training, - 'reuse_private': reuse_private, - 'reuse_shared': reuse_shared, - } - - if private_scope: - kwargs['private_scope'] = private_scope - if shared_scope: - kwargs['shared_scope'] = shared_scope - - quaternion_pred = None - with slim.arg_scope( - [slim.conv2d, slim.fully_connected], - activation_fn=tf.nn.relu, - weights_regularizer=tf.contrib.layers.l2_regularizer( - hparams.weight_decay_task_classifier)): - with slim.arg_scope([slim.conv2d], padding='SAME'): - if model == 'doubling_pose_estimator': - logits, quaternion_pred = doubling_cnn_class_and_quaternion( - images, num_private_layers=hparams.num_private_layers, **kwargs) - elif model == 'mnist': - logits, _ = mnist_classifier(images, **kwargs) - elif model == 'svhn': - logits, _ = svhn_classifier(images, **kwargs) - elif model == 'gtsrb': - logits, _ = gtsrb_classifier(images, **kwargs) - elif model == 'pose_mini': - logits, quaternion_pred = pose_mini_tower(images, **kwargs) - else: - raise ValueError('Unknown task classifier %s' % model) - - return logits, quaternion_pred - - -##################################### -# Classifiers used in the DSN paper # -##################################### - - -def mnist_classifier(images, - is_training=False, - num_classes=10, - reuse_private=False, - private_scope='mnist', - reuse_shared=False, - shared_scope='task_model'): - """Creates the convolutional MNIST model from the gradient reversal paper. - - Note that since the output is a set of 'logits', the values fall in the - interval of (-infinity, infinity). Consequently, to convert the outputs to a - probability distribution over the characters, one will need to convert them - using the softmax function: - logits, endpoints = conv_mnist(images, is_training=False) - predictions = tf.nn.softmax(logits) - - Args: - images: the MNIST digits, a tensor of size [batch_size, 28, 28, 1]. - is_training: specifies whether or not we're currently training the model. - This variable will determine the behaviour of the dropout layer. - num_classes: the number of output classes to use. - - Returns: - the output logits, a tensor of size [batch_size, num_classes]. - a dictionary with key/values the layer names and tensors. - """ - - net = {} - - with tf.variable_scope(private_scope, reuse=reuse_private): - net['conv1'] = slim.conv2d(images, 32, [5, 5], scope='conv1') - net['pool1'] = slim.max_pool2d(net['conv1'], [2, 2], 2, scope='pool1') - - with tf.variable_scope(shared_scope, reuse=reuse_shared): - net['conv2'] = slim.conv2d(net['pool1'], 48, [5, 5], scope='conv2') - net['pool2'] = slim.max_pool2d(net['conv2'], [2, 2], 2, scope='pool2') - net['fc3'] = slim.fully_connected( - slim.flatten(net['pool2']), 100, scope='fc3') - net['fc4'] = slim.fully_connected( - slim.flatten(net['fc3']), 100, scope='fc4') - logits = slim.fully_connected( - net['fc4'], num_classes, activation_fn=None, scope='fc5') - return logits, net - - -def svhn_classifier(images, - is_training=False, - num_classes=10, - reuse_private=False, - private_scope=None, - reuse_shared=False, - shared_scope='task_model'): - """Creates the convolutional SVHN model from the gradient reversal paper. - - Note that since the output is a set of 'logits', the values fall in the - interval of (-infinity, infinity). Consequently, to convert the outputs to a - probability distribution over the characters, one will need to convert them - using the softmax function: - logits = mnist.Mnist(images, is_training=False) - predictions = tf.nn.softmax(logits) - - Args: - images: the SVHN digits, a tensor of size [batch_size, 40, 40, 3]. - is_training: specifies whether or not we're currently training the model. - This variable will determine the behaviour of the dropout layer. - num_classes: the number of output classes to use. - - Returns: - the output logits, a tensor of size [batch_size, num_classes]. - a dictionary with key/values the layer names and tensors. - """ - - net = {} - - with tf.variable_scope(private_scope, reuse=reuse_private): - net['conv1'] = slim.conv2d(images, 64, [5, 5], scope='conv1') - net['pool1'] = slim.max_pool2d(net['conv1'], [3, 3], 2, scope='pool1') - - with tf.variable_scope(shared_scope, reuse=reuse_shared): - net['conv2'] = slim.conv2d(net['pool1'], 64, [5, 5], scope='conv2') - net['pool2'] = slim.max_pool2d(net['conv2'], [3, 3], 2, scope='pool2') - net['conv3'] = slim.conv2d(net['pool2'], 128, [5, 5], scope='conv3') - - net['fc3'] = slim.fully_connected( - slim.flatten(net['conv3']), 3072, scope='fc3') - net['fc4'] = slim.fully_connected( - slim.flatten(net['fc3']), 2048, scope='fc4') - - logits = slim.fully_connected( - net['fc4'], num_classes, activation_fn=None, scope='fc5') - - return logits, net - - -def gtsrb_classifier(images, - is_training=False, - num_classes=43, - reuse_private=False, - private_scope='gtsrb', - reuse_shared=False, - shared_scope='task_model'): - """Creates the convolutional GTSRB model from the gradient reversal paper. - - Note that since the output is a set of 'logits', the values fall in the - interval of (-infinity, infinity). Consequently, to convert the outputs to a - probability distribution over the characters, one will need to convert them - using the softmax function: - logits = mnist.Mnist(images, is_training=False) - predictions = tf.nn.softmax(logits) - - Args: - images: the SVHN digits, a tensor of size [batch_size, 40, 40, 3]. - is_training: specifies whether or not we're currently training the model. - This variable will determine the behaviour of the dropout layer. - num_classes: the number of output classes to use. - reuse_private: Whether or not to reuse the private components of the model. - private_scope: The name of the private scope. - reuse_shared: Whether or not to reuse the shared components of the model. - shared_scope: The name of the shared scope. - - Returns: - the output logits, a tensor of size [batch_size, num_classes]. - a dictionary with key/values the layer names and tensors. - """ - - net = {} - - with tf.variable_scope(private_scope, reuse=reuse_private): - net['conv1'] = slim.conv2d(images, 96, [5, 5], scope='conv1') - net['pool1'] = slim.max_pool2d(net['conv1'], [2, 2], 2, scope='pool1') - with tf.variable_scope(shared_scope, reuse=reuse_shared): - net['conv2'] = slim.conv2d(net['pool1'], 144, [3, 3], scope='conv2') - net['pool2'] = slim.max_pool2d(net['conv2'], [2, 2], 2, scope='pool2') - net['conv3'] = slim.conv2d(net['pool2'], 256, [5, 5], scope='conv3') - net['pool3'] = slim.max_pool2d(net['conv3'], [2, 2], 2, scope='pool3') - - net['fc3'] = slim.fully_connected( - slim.flatten(net['pool3']), 512, scope='fc3') - logits = slim.fully_connected( - net['fc3'], num_classes, activation_fn=None, scope='fc4') - - return logits, net - - -######################### -# pose_mini task towers # -######################### - - -def pose_mini_tower(images, - num_classes=11, - is_training=False, - reuse_private=False, - private_scope='pose_mini', - reuse_shared=False, - shared_scope='task_model'): - """Task tower for the pose_mini dataset.""" - - with tf.variable_scope(private_scope, reuse=reuse_private): - net = slim.conv2d(images, 32, [5, 5], scope='conv1') - net = slim.max_pool2d(net, [2, 2], stride=2, scope='pool1') - with tf.variable_scope(shared_scope, reuse=reuse_shared): - net = slim.conv2d(net, 64, [5, 5], scope='conv2') - net = slim.max_pool2d(net, [2, 2], stride=2, scope='pool2') - net = slim.flatten(net) - - net = slim.fully_connected(net, 128, scope='fc3') - net = slim.dropout(net, 0.5, is_training=is_training, scope='dropout') - with tf.variable_scope('quaternion_prediction'): - quaternion_pred = slim.fully_connected( - net, 4, activation_fn=tf.tanh, scope='fc_q') - quaternion_pred = tf.nn.l2_normalize(quaternion_pred, 1) - - logits = slim.fully_connected( - net, num_classes, activation_fn=None, scope='fc4') - - return logits, quaternion_pred - - -def doubling_cnn_class_and_quaternion(images, - num_private_layers=1, - num_classes=10, - is_training=False, - reuse_private=False, - private_scope='doubling_cnn', - reuse_shared=False, - shared_scope='task_model'): - """Alternate conv, pool while doubling filter count.""" - net = images - depth = 32 - layer_id = 1 - - with tf.variable_scope(private_scope, reuse=reuse_private): - while num_private_layers > 0 and net.shape.as_list()[1] > 5: - net = slim.conv2d(net, depth, [3, 3], scope='conv%s' % layer_id) - net = slim.max_pool2d(net, [2, 2], stride=2, scope='pool%s' % layer_id) - depth *= 2 - layer_id += 1 - num_private_layers -= 1 - - with tf.variable_scope(shared_scope, reuse=reuse_shared): - while net.shape.as_list()[1] > 5: - net = slim.conv2d(net, depth, [3, 3], scope='conv%s' % layer_id) - net = slim.max_pool2d(net, [2, 2], stride=2, scope='pool%s' % layer_id) - depth *= 2 - layer_id += 1 - - net = slim.flatten(net) - net = slim.fully_connected(net, 100, scope='fc1') - net = slim.dropout(net, 0.5, is_training=is_training, scope='dropout') - quaternion_pred = slim.fully_connected( - net, 4, activation_fn=tf.tanh, scope='fc_q') - quaternion_pred = tf.nn.l2_normalize(quaternion_pred, 1) - - logits = slim.fully_connected( - net, num_classes, activation_fn=None, scope='fc_logits') - - return logits, quaternion_pred diff --git a/research/domain_adaptation/pixel_domain_adaptation/pixelda_train.py b/research/domain_adaptation/pixel_domain_adaptation/pixelda_train.py deleted file mode 100644 index 4ca072cce..000000000 --- a/research/domain_adaptation/pixel_domain_adaptation/pixelda_train.py +++ /dev/null @@ -1,409 +0,0 @@ -# Copyright 2017 Google Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -r"""Trains the PixelDA model.""" - -from functools import partial -import os - -# Dependency imports - -import tensorflow as tf - -from domain_adaptation.datasets import dataset_factory -from domain_adaptation.pixel_domain_adaptation import pixelda_losses -from domain_adaptation.pixel_domain_adaptation import pixelda_model -from domain_adaptation.pixel_domain_adaptation import pixelda_preprocess -from domain_adaptation.pixel_domain_adaptation import pixelda_utils -from domain_adaptation.pixel_domain_adaptation.hparams import create_hparams - -slim = tf.contrib.slim - -flags = tf.app.flags -FLAGS = flags.FLAGS - -flags.DEFINE_string('master', '', 'BNS name of the TensorFlow master to use.') - -flags.DEFINE_integer( - 'ps_tasks', 0, - 'The number of parameter servers. If the value is 0, then the parameters ' - 'are handled locally by the worker.') - -flags.DEFINE_integer( - 'task', 0, - 'The Task ID. This value is used when training with multiple workers to ' - 'identify each worker.') - -flags.DEFINE_string('train_log_dir', '/tmp/pixelda/', - 'Directory where to write event logs.') - -flags.DEFINE_integer( - 'save_summaries_steps', 500, - 'The frequency with which summaries are saved, in seconds.') - -flags.DEFINE_integer('save_interval_secs', 300, - 'The frequency with which the model is saved, in seconds.') - -flags.DEFINE_boolean('summarize_gradients', False, - 'Whether to summarize model gradients') - -flags.DEFINE_integer( - 'print_loss_steps', 100, - 'The frequency with which the losses are printed, in steps.') - -flags.DEFINE_string('source_dataset', 'mnist', 'The name of the source dataset.' - ' If hparams="arch=dcgan", this flag is ignored.') - -flags.DEFINE_string('target_dataset', 'mnist_m', - 'The name of the target dataset.') - -flags.DEFINE_string('source_split_name', 'train', - 'Name of the train split for the source.') - -flags.DEFINE_string('target_split_name', 'train', - 'Name of the train split for the target.') - -flags.DEFINE_string('dataset_dir', '', - 'The directory where the datasets can be found.') - -flags.DEFINE_integer( - 'num_readers', 4, - 'The number of parallel readers that read data from the dataset.') - -flags.DEFINE_integer('num_preprocessing_threads', 4, - 'The number of threads used to create the batches.') - -# HParams - -flags.DEFINE_string('hparams', '', 'Comma separated hyperparameter values') - - -def _get_vars_and_update_ops(hparams, scope): - """Returns the variables and update ops for a particular variable scope. - - Args: - hparams: The hyperparameters struct. - scope: The variable scope. - - Returns: - A tuple consisting of trainable variables and update ops. - """ - is_trainable = lambda x: x in tf.trainable_variables() - var_list = filter(is_trainable, slim.get_model_variables(scope)) - global_step = slim.get_or_create_global_step() - - update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS, scope) - - tf.logging.info('All variables for scope: %s', - slim.get_model_variables(scope)) - tf.logging.info('Trainable variables for scope: %s', var_list) - - return var_list, update_ops - - -def _train(discriminator_train_op, - generator_train_op, - logdir, - master='', - is_chief=True, - scaffold=None, - hooks=None, - chief_only_hooks=None, - save_checkpoint_secs=600, - save_summaries_steps=100, - hparams=None): - """Runs the training loop. - - Args: - discriminator_train_op: A `Tensor` that, when executed, will apply the - gradients and return the loss value for the discriminator. - generator_train_op: A `Tensor` that, when executed, will apply the - gradients and return the loss value for the generator. - logdir: The directory where the graph and checkpoints are saved. - master: The URL of the master. - is_chief: Specifies whether or not the training is being run by the primary - replica during replica training. - scaffold: An tf.train.Scaffold instance. - hooks: List of `tf.train.SessionRunHook` callbacks which are run inside the - training loop. - chief_only_hooks: List of `tf.train.SessionRunHook` instances which are run - inside the training loop for the chief trainer only. - save_checkpoint_secs: The frequency, in seconds, that a checkpoint is saved - using a default checkpoint saver. If `save_checkpoint_secs` is set to - `None`, then the default checkpoint saver isn't used. - save_summaries_steps: The frequency, in number of global steps, that the - summaries are written to disk using a default summary saver. If - `save_summaries_steps` is set to `None`, then the default summary saver - isn't used. - hparams: The hparams struct. - - Returns: - the value of the loss function after training. - - Raises: - ValueError: if `logdir` is `None` and either `save_checkpoint_secs` or - `save_summaries_steps` are `None. - """ - global_step = slim.get_or_create_global_step() - - scaffold = scaffold or tf.train.Scaffold() - - hooks = hooks or [] - - if is_chief: - session_creator = tf.train.ChiefSessionCreator( - scaffold=scaffold, checkpoint_dir=logdir, master=master) - - if chief_only_hooks: - hooks.extend(chief_only_hooks) - hooks.append(tf.train.StepCounterHook(output_dir=logdir)) - - if save_summaries_steps: - if logdir is None: - raise ValueError( - 'logdir cannot be None when save_summaries_steps is None') - hooks.append( - tf.train.SummarySaverHook( - scaffold=scaffold, - save_steps=save_summaries_steps, - output_dir=logdir)) - - if save_checkpoint_secs: - if logdir is None: - raise ValueError( - 'logdir cannot be None when save_checkpoint_secs is None') - hooks.append( - tf.train.CheckpointSaverHook( - logdir, save_secs=save_checkpoint_secs, scaffold=scaffold)) - else: - session_creator = tf.train.WorkerSessionCreator( - scaffold=scaffold, master=master) - - with tf.train.MonitoredSession( - session_creator=session_creator, hooks=hooks) as session: - loss = None - while not session.should_stop(): - # Run the domain classifier op X times. - for _ in range(hparams.discriminator_steps): - if session.should_stop(): - return loss - loss, np_global_step = session.run( - [discriminator_train_op, global_step]) - if np_global_step % FLAGS.print_loss_steps == 0: - tf.logging.info('Step %d: Discriminator Loss = %.2f', np_global_step, - loss) - - # Run the generator op X times. - for _ in range(hparams.generator_steps): - if session.should_stop(): - return loss - loss, np_global_step = session.run([generator_train_op, global_step]) - if np_global_step % FLAGS.print_loss_steps == 0: - tf.logging.info('Step %d: Generator Loss = %.2f', np_global_step, - loss) - return loss - - -def run_training(run_dir, checkpoint_dir, hparams): - """Runs the training loop. - - Args: - run_dir: The directory where training specific logs are placed - checkpoint_dir: The directory where the checkpoints and log files are - stored. - hparams: The hyperparameters struct. - - Raises: - ValueError: if hparams.arch is not recognized. - """ - for path in [run_dir, checkpoint_dir]: - if not tf.gfile.Exists(path): - tf.gfile.MakeDirs(path) - - # Serialize hparams to log dir - hparams_filename = os.path.join(checkpoint_dir, 'hparams.json') - with tf.gfile.FastGFile(hparams_filename, 'w') as f: - f.write(hparams.to_json()) - - with tf.Graph().as_default(): - with tf.device(tf.train.replica_device_setter(FLAGS.ps_tasks)): - global_step = slim.get_or_create_global_step() - - ######################### - # Preprocess the inputs # - ######################### - target_dataset = dataset_factory.get_dataset( - FLAGS.target_dataset, - split_name='train', - dataset_dir=FLAGS.dataset_dir) - target_images, _ = dataset_factory.provide_batch( - FLAGS.target_dataset, 'train', FLAGS.dataset_dir, FLAGS.num_readers, - hparams.batch_size, FLAGS.num_preprocessing_threads) - num_target_classes = target_dataset.num_classes - - if hparams.arch not in ['dcgan']: - source_dataset = dataset_factory.get_dataset( - FLAGS.source_dataset, - split_name='train', - dataset_dir=FLAGS.dataset_dir) - num_source_classes = source_dataset.num_classes - source_images, source_labels = dataset_factory.provide_batch( - FLAGS.source_dataset, 'train', FLAGS.dataset_dir, FLAGS.num_readers, - hparams.batch_size, FLAGS.num_preprocessing_threads) - # Data provider provides 1 hot labels, but we expect categorical. - source_labels['class'] = tf.argmax(source_labels['classes'], 1) - del source_labels['classes'] - if num_source_classes != num_target_classes: - raise ValueError( - 'Source and Target datasets must have same number of classes. ' - 'Are %d and %d' % (num_source_classes, num_target_classes)) - else: - source_images = None - source_labels = None - - #################### - # Define the model # - #################### - end_points = pixelda_model.create_model( - hparams, - target_images, - source_images=source_images, - source_labels=source_labels, - is_training=True, - num_classes=num_target_classes) - - ################################# - # Get the variables to optimize # - ################################# - generator_vars, generator_update_ops = _get_vars_and_update_ops( - hparams, 'generator') - discriminator_vars, discriminator_update_ops = _get_vars_and_update_ops( - hparams, 'discriminator') - - ######################## - # Configure the losses # - ######################## - generator_loss = pixelda_losses.g_step_loss( - source_images, - source_labels, - end_points, - hparams, - num_classes=num_target_classes) - discriminator_loss = pixelda_losses.d_step_loss( - end_points, source_labels, num_target_classes, hparams) - - ########################### - # Create the training ops # - ########################### - learning_rate = hparams.learning_rate - if hparams.lr_decay_steps: - learning_rate = tf.train.exponential_decay( - learning_rate, - slim.get_or_create_global_step(), - decay_steps=hparams.lr_decay_steps, - decay_rate=hparams.lr_decay_rate, - staircase=True) - tf.summary.scalar('Learning_rate', learning_rate) - - - if hparams.discriminator_steps == 0: - discriminator_train_op = tf.no_op() - else: - discriminator_optimizer = tf.train.AdamOptimizer( - learning_rate, beta1=hparams.adam_beta1) - - discriminator_train_op = slim.learning.create_train_op( - discriminator_loss, - discriminator_optimizer, - update_ops=discriminator_update_ops, - variables_to_train=discriminator_vars, - clip_gradient_norm=hparams.clip_gradient_norm, - summarize_gradients=FLAGS.summarize_gradients) - - if hparams.generator_steps == 0: - generator_train_op = tf.no_op() - else: - generator_optimizer = tf.train.AdamOptimizer( - learning_rate, beta1=hparams.adam_beta1) - generator_train_op = slim.learning.create_train_op( - generator_loss, - generator_optimizer, - update_ops=generator_update_ops, - variables_to_train=generator_vars, - clip_gradient_norm=hparams.clip_gradient_norm, - summarize_gradients=FLAGS.summarize_gradients) - - ############# - # Summaries # - ############# - pixelda_utils.summarize_model(end_points) - pixelda_utils.summarize_transferred_grid( - end_points['transferred_images'], source_images, name='Transferred') - if 'source_images_recon' in end_points: - pixelda_utils.summarize_transferred_grid( - end_points['source_images_recon'], - source_images, - name='Source Reconstruction') - pixelda_utils.summaries_color_distributions(end_points['transferred_images'], - 'Transferred') - pixelda_utils.summaries_color_distributions(target_images, 'Target') - - if source_images is not None: - pixelda_utils.summarize_transferred(source_images, - end_points['transferred_images']) - pixelda_utils.summaries_color_distributions(source_images, 'Source') - pixelda_utils.summaries_color_distributions( - tf.abs(source_images - end_points['transferred_images']), - 'Abs(Source_minus_Transferred)') - - number_of_steps = None - if hparams.num_training_examples: - # Want to control by amount of data seen, not # steps - number_of_steps = hparams.num_training_examples / hparams.batch_size - - hooks = [tf.train.StepCounterHook(),] - - chief_only_hooks = [ - tf.train.CheckpointSaverHook( - saver=tf.train.Saver(), - checkpoint_dir=run_dir, - save_secs=FLAGS.save_interval_secs) - ] - - if number_of_steps: - hooks.append(tf.train.StopAtStepHook(last_step=number_of_steps)) - - _train( - discriminator_train_op, - generator_train_op, - logdir=run_dir, - master=FLAGS.master, - is_chief=FLAGS.task == 0, - hooks=hooks, - chief_only_hooks=chief_only_hooks, - save_checkpoint_secs=None, - save_summaries_steps=FLAGS.save_summaries_steps, - hparams=hparams) - -def main(_): - tf.logging.set_verbosity(tf.logging.INFO) - hparams = create_hparams(FLAGS.hparams) - run_training( - run_dir=FLAGS.train_log_dir, - checkpoint_dir=FLAGS.train_log_dir, - hparams=hparams) - - -if __name__ == '__main__': - tf.app.run() diff --git a/research/domain_adaptation/pixel_domain_adaptation/pixelda_utils.py b/research/domain_adaptation/pixel_domain_adaptation/pixelda_utils.py deleted file mode 100644 index 28e8006f2..000000000 --- a/research/domain_adaptation/pixel_domain_adaptation/pixelda_utils.py +++ /dev/null @@ -1,195 +0,0 @@ -# Copyright 2017 Google Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Utilities for PixelDA model.""" -import math - -# Dependency imports - -import tensorflow as tf - -slim = tf.contrib.slim - -flags = tf.app.flags -FLAGS = flags.FLAGS - - -def remove_depth(images): - """Takes a batch of images and remove depth channel if present.""" - if images.shape.as_list()[-1] == 4: - return images[:, :, :, 0:3] - return images - - -def image_grid(images, max_grid_size=4): - """Given images and N, return first N^2 images as an NxN image grid. - - Args: - images: a `Tensor` of size [batch_size, height, width, channels] - max_grid_size: Maximum image grid height/width - - Returns: - Single image batch, of dim [1, h*n, w*n, c] - """ - images = remove_depth(images) - batch_size = images.shape.as_list()[0] - grid_size = min(int(math.sqrt(batch_size)), max_grid_size) - assert images.shape.as_list()[0] >= grid_size * grid_size - - # If we have a depth channel - if images.shape.as_list()[-1] == 4: - images = images[:grid_size * grid_size, :, :, 0:3] - depth = tf.image.grayscale_to_rgb(images[:grid_size * grid_size, :, :, 3:4]) - - images = tf.reshape(images, [-1, images.shape.as_list()[2], 3]) - split = tf.split(0, grid_size, images) - depth = tf.reshape(depth, [-1, images.shape.as_list()[2], 3]) - depth_split = tf.split(0, grid_size, depth) - grid = tf.concat(split + depth_split, 1) - return tf.expand_dims(grid, 0) - else: - images = images[:grid_size * grid_size, :, :, :] - images = tf.reshape( - images, [-1, images.shape.as_list()[2], - images.shape.as_list()[3]]) - split = tf.split(images, grid_size, 0) - grid = tf.concat(split, 1) - return tf.expand_dims(grid, 0) - - -def source_and_output_image_grid(output_images, - source_images=None, - max_grid_size=4): - """Create NxN image grid for output, concatenate source grid if given. - - Makes grid out of output_images and, if provided, source_images, and - concatenates them. - - Args: - output_images: [batch_size, h, w, c] tensor of images - source_images: optional[batch_size, h, w, c] tensor of images - max_grid_size: Image grid height/width - - Returns: - Single image batch, of dim [1, h*n, w*n, c] - - - """ - output_grid = image_grid(output_images, max_grid_size=max_grid_size) - if source_images is not None: - source_grid = image_grid(source_images, max_grid_size=max_grid_size) - # Make sure they have the same # of channels before concat - # Assumes either 1 or 3 channels - if output_grid.shape.as_list()[-1] != source_grid.shape.as_list()[-1]: - if output_grid.shape.as_list()[-1] == 1: - output_grid = tf.tile(output_grid, [1, 1, 1, 3]) - if source_grid.shape.as_list()[-1] == 1: - source_grid = tf.tile(source_grid, [1, 1, 1, 3]) - output_grid = tf.concat([output_grid, source_grid], 1) - return output_grid - - -def summarize_model(end_points): - """Summarizes the given model via its end_points. - - Args: - end_points: A dictionary of end_point names to `Tensor`. - """ - tf.summary.histogram('domain_logits_transferred', - tf.sigmoid(end_points['transferred_domain_logits'])) - - tf.summary.histogram('domain_logits_target', - tf.sigmoid(end_points['target_domain_logits'])) - - -def summarize_transferred_grid(transferred_images, - source_images=None, - name='Transferred'): - """Produces a visual grid summarization of the image transferrence. - - Args: - transferred_images: A `Tensor` of size [batch_size, height, width, c]. - source_images: A `Tensor` of size [batch_size, height, width, c]. - name: Name to use in summary name - """ - if source_images is not None: - grid = source_and_output_image_grid(transferred_images, source_images) - else: - grid = image_grid(transferred_images) - tf.summary.image('%s_Images_Grid' % name, grid, max_outputs=1) - - -def summarize_transferred(source_images, - transferred_images, - max_images=20, - name='Transferred'): - """Produces a visual summary of the image transferrence. - - This summary displays the source image, transferred image, and a grayscale - difference image which highlights the differences between input and output. - - Args: - source_images: A `Tensor` of size [batch_size, height, width, channels]. - transferred_images: A `Tensor` of size [batch_size, height, width, channels] - max_images: The number of images to show. - name: Name to use in summary name - - Raises: - ValueError: If number of channels in source and target are incompatible - """ - source_channels = source_images.shape.as_list()[-1] - transferred_channels = transferred_images.shape.as_list()[-1] - if source_channels < transferred_channels: - if source_channels != 1: - raise ValueError( - 'Source must be 1 channel or same # of channels as target') - source_images = tf.tile(source_images, [1, 1, 1, transferred_channels]) - if transferred_channels < source_channels: - if transferred_channels != 1: - raise ValueError( - 'Target must be 1 channel or same # of channels as source') - transferred_images = tf.tile(transferred_images, [1, 1, 1, source_channels]) - diffs = tf.abs(source_images - transferred_images) - diffs = tf.reduce_max(diffs, reduction_indices=[3], keep_dims=True) - diffs = tf.tile(diffs, [1, 1, 1, max(source_channels, transferred_channels)]) - - transition_images = tf.concat([ - source_images, - transferred_images, - diffs, - ], 2) - - tf.summary.image( - '%s_difference' % name, transition_images, max_outputs=max_images) - - -def summaries_color_distributions(images, name): - """Produces a histogram of the color distributions of the images. - - Args: - images: A `Tensor` of size [batch_size, height, width, 3]. - name: The name of the images being summarized. - """ - tf.summary.histogram('color_values/%s' % name, images) - - -def summarize_images(images, name): - """Produces a visual summary of the given images. - - Args: - images: A `Tensor` of size [batch_size, height, width, 3]. - name: The name of the images being summarized. - """ - grid = image_grid(images) - tf.summary.image('%s_Images' % name, grid, max_outputs=1) diff --git a/research/feelvos/CONTRIBUTING.md b/research/feelvos/CONTRIBUTING.md deleted file mode 100644 index 939e5341e..000000000 --- a/research/feelvos/CONTRIBUTING.md +++ /dev/null @@ -1,28 +0,0 @@ -# How to Contribute - -We'd love to accept your patches and contributions to this project. There are -just a few small guidelines you need to follow. - -## Contributor License Agreement - -Contributions to this project must be accompanied by a Contributor License -Agreement. You (or your employer) retain the copyright to your contribution; -this simply gives us permission to use and redistribute your contributions as -part of the project. Head over to to see -your current agreements on file or to sign a new one. - -You generally only need to submit a CLA once, so if you've already submitted one -(even if it was for a different project), you probably don't need to do it -again. - -## Code reviews - -All submissions, including submissions by project members, require review. We -use GitHub pull requests for this purpose. Consult -[GitHub Help](https://help.github.com/articles/about-pull-requests/) for more -information on using pull requests. - -## Community Guidelines - -This project follows [Google's Open Source Community -Guidelines](https://opensource.google.com/conduct/). diff --git a/research/feelvos/LICENSE b/research/feelvos/LICENSE deleted file mode 100644 index d64569567..000000000 --- a/research/feelvos/LICENSE +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/research/feelvos/README.md b/research/feelvos/README.md deleted file mode 100644 index 69017c8b1..000000000 --- a/research/feelvos/README.md +++ /dev/null @@ -1,102 +0,0 @@ -![No Maintenance Intended](https://img.shields.io/badge/No%20Maintenance%20Intended-%E2%9C%95-red.svg) -![TensorFlow Requirement: 1.x](https://img.shields.io/badge/TensorFlow%20Requirement-1.x-brightgreen) -![TensorFlow 2 Not Supported](https://img.shields.io/badge/TensorFlow%202%20Not%20Supported-%E2%9C%95-red.svg) - -# FEELVOS: Fast End-to-End Embedding Learning for Video Object Segmentation - -FEELVOS is a fast model for video object segmentation which does not rely on fine-tuning on the -first frame. - -For details, please refer to our paper. If you find the code useful, please -also consider citing it. - -* FEELVOS: - -``` -@inproceedings{feelvos2019, - title={FEELVOS: Fast End-to-End Embedding Learning for Video Object Segmentation}, - author={Paul Voigtlaender and Yuning Chai and Florian Schroff and Hartwig Adam and Bastian Leibe and Liang-Chieh Chen}, - booktitle={CVPR}, - year={2019} -} -``` - -## Dependencies - -FEELVOS requires a good GPU with around 12 GB of memory and depends on the following libraries - -* TensorFlow -* Pillow -* Numpy -* Scipy -* Scikit Learn Image -* tf Slim (which is included in the "tensorflow/models/research/" checkout) -* DeepLab (which is included in the "tensorflow/models/research/" checkout) -* correlation_cost (optional, see below) - -For detailed steps to install Tensorflow, follow the [Tensorflow installation -instructions](https://www.tensorflow.org/install/). A typical user can install -Tensorflow using the following command: - -```bash -pip install tensorflow-gpu -``` - -The remaining libraries can also be installed with pip using: - -```bash -pip install pillow scipy scikit-image -``` - -## Dependency on correlation_cost - -For fast cross-correlation, we use correlation cost as an external dependency. By default FEELVOS -will use a slow and memory hungry fallback implementation without correlation_cost. If you care for -performance, you should set up correlation_cost by following the instructions in -correlation_cost/README and afterwards setting ```USE_CORRELATION_COST = True``` in -utils/embedding_utils.py. - -## Pre-trained Models - -We provide 2 pre-trained FEELVOS models, both are based on Xception-65: - -* [Trained on DAVIS 2017](http://download.tensorflow.org/models/feelvos_davis17_trained.tar.gz) -* [Trained on DAVIS 2017 and YouTube-VOS](http://download.tensorflow.org/models/feelvos_davis17_and_youtubevos_trained.tar.gz) - -Additionally, we provide a [DeepLab checkpoint for Xception-65 pre-trained on ImageNet and COCO](http://download.tensorflow.org/models/xception_65_coco_pretrained_2018_10_02.tar.gz), -which can be used as an initialization for training FEELVOS. - -## Pre-computed Segmentation Masks - -We provide [pre-computed segmentation masks](http://download.tensorflow.org/models/feelvos_precomputed_masks.zip) -for FEELVOS both for training with and without YouTube-VOS data for the following datasets: - -* DAVIS 2017 validation set -* DAVIS 2017 test-dev set -* YouTube-Objects dataset - -## Local Inference -For a demo of local inference on DAVIS 2017 run - -```bash -# From tensorflow/models/research/feelvos -sh eval.sh -``` - -## Local Training -For a demo of local training on DAVIS 2017 run - -```bash -# From tensorflow/models/research/feelvos -sh train.sh -``` - -## Contacts (Maintainers) -* Paul Voigtlaender, github: [pvoigtlaender](https://github.com/pvoigtlaender) -* Yuning Chai, github: [yuningchai](https://github.com/yuningchai) -* Liang-Chieh Chen, github: [aquariusjay](https://github.com/aquariusjay) - -## License - -All the codes in feelvos folder is covered by the [LICENSE](https://github.com/tensorflow/models/blob/master/LICENSE) -under tensorflow/models. Please refer to the LICENSE for details. diff --git a/research/feelvos/__init__.py b/research/feelvos/__init__.py deleted file mode 100644 index 6f1373443..000000000 --- a/research/feelvos/__init__.py +++ /dev/null @@ -1,14 +0,0 @@ -# Copyright 2018 The TensorFlow Authors All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== diff --git a/research/feelvos/common.py b/research/feelvos/common.py deleted file mode 100644 index 98f5a9ce3..000000000 --- a/research/feelvos/common.py +++ /dev/null @@ -1,163 +0,0 @@ -# Copyright 2018 The TensorFlow Authors All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -"""Provides flags that are common to scripts. - -Common flags from train/vis_video.py are collected in this script. -""" -import tensorflow as tf - -from deeplab import common - -flags = tf.app.flags - -flags.DEFINE_enum( - 'classification_loss', 'softmax_with_attention', - ['softmax', 'triplet', 'softmax_with_attention'], - 'Type of loss function used for classifying pixels, can be either softmax, ' - 'softmax_with_attention, or triplet.') - -flags.DEFINE_integer('k_nearest_neighbors', 1, - 'The number of nearest neighbors to use.') - -flags.DEFINE_integer('embedding_dimension', 100, 'The dimension used for the ' - 'learned embedding') - -flags.DEFINE_boolean('use_softmax_feedback', True, - 'Whether to give the softmax predictions of the last ' - 'frame as additional input to the segmentation head.') - -flags.DEFINE_boolean('sample_adjacent_and_consistent_query_frames', True, - 'If true, the query frames (all but the first frame ' - 'which is the reference frame) will be sampled such ' - 'that they are adjacent video frames and have the same ' - 'crop coordinates and flip augmentation. Note that if ' - 'use_softmax_feedback is True, this option will ' - 'automatically be activated.') - -flags.DEFINE_integer('embedding_seg_feature_dimension', 256, - 'The dimensionality used in the segmentation head layers.') - -flags.DEFINE_integer('embedding_seg_n_layers', 4, 'The number of layers in the ' - 'segmentation head.') - -flags.DEFINE_integer('embedding_seg_kernel_size', 7, 'The kernel size used in ' - 'the segmentation head.') - -flags.DEFINE_multi_integer('embedding_seg_atrous_rates', [], - 'The atrous rates to use for the segmentation head.') - -flags.DEFINE_boolean('normalize_nearest_neighbor_distances', True, - 'Whether to normalize the nearest neighbor distances ' - 'to [0,1] using sigmoid, scale and shift.') - -flags.DEFINE_boolean('also_attend_to_previous_frame', True, 'Whether to also ' - 'use nearest neighbor attention with respect to the ' - 'previous frame.') - -flags.DEFINE_bool('use_local_previous_frame_attention', True, - 'Whether to restrict the previous frame attention to a local ' - 'search window. Only has an effect, if ' - 'also_attend_to_previous_frame is True.') - -flags.DEFINE_integer('previous_frame_attention_window_size', 15, - 'The window size used for local previous frame attention,' - ' if use_local_previous_frame_attention is True.') - -flags.DEFINE_boolean('use_first_frame_matching', True, 'Whether to extract ' - 'features by matching to the reference frame. This should ' - 'always be true except for ablation experiments.') - -FLAGS = flags.FLAGS - -# Constants - -# Perform semantic segmentation predictions. -OUTPUT_TYPE = common.OUTPUT_TYPE - -# Semantic segmentation item names. -LABELS_CLASS = common.LABELS_CLASS -IMAGE = common.IMAGE -HEIGHT = common.HEIGHT -WIDTH = common.WIDTH -IMAGE_NAME = common.IMAGE_NAME -SOURCE_ID = 'source_id' -VIDEO_ID = 'video_id' -LABEL = common.LABEL -ORIGINAL_IMAGE = common.ORIGINAL_IMAGE -PRECEDING_FRAME_LABEL = 'preceding_frame_label' - -# Test set name. -TEST_SET = common.TEST_SET - -# Internal constants. -OBJECT_LABEL = 'object_label' - - -class VideoModelOptions(common.ModelOptions): - """Internal version of immutable class to hold model options.""" - - def __new__(cls, - outputs_to_num_classes, - crop_size=None, - atrous_rates=None, - output_stride=8): - """Constructor to set default values. - - Args: - outputs_to_num_classes: A dictionary from output type to the number of - classes. For example, for the task of semantic segmentation with 21 - semantic classes, we would have outputs_to_num_classes['semantic'] = 21. - crop_size: A tuple [crop_height, crop_width]. - atrous_rates: A list of atrous convolution rates for ASPP. - output_stride: The ratio of input to output spatial resolution. - - Returns: - A new VideoModelOptions instance. - """ - self = super(VideoModelOptions, cls).__new__( - cls, - outputs_to_num_classes, - crop_size, - atrous_rates, - output_stride) - # Add internal flags. - self.classification_loss = FLAGS.classification_loss - - return self - - -def parse_decoder_output_stride(): - """Parses decoder output stride. - - FEELVOS assumes decoder_output_stride = 4. Thus, this function is created for - this particular purpose. - - Returns: - An integer specifying the decoder_output_stride. - - Raises: - ValueError: If decoder_output_stride is None or contains more than one - element. - """ - if FLAGS.decoder_output_stride: - decoder_output_stride = [ - int(x) for x in FLAGS.decoder_output_stride] - if len(decoder_output_stride) != 1: - raise ValueError('Expect decoder output stride has only one element.') - decoder_output_stride = decoder_output_stride[0] - else: - raise ValueError('Expect flag decoder output stride not to be None.') - return decoder_output_stride diff --git a/research/feelvos/correlation_cost/README.md b/research/feelvos/correlation_cost/README.md deleted file mode 100644 index 6cdbe550c..000000000 --- a/research/feelvos/correlation_cost/README.md +++ /dev/null @@ -1,36 +0,0 @@ -# correlation_cost - -FEELVOS uses correlation_cost as an optional dependency to improve the speed and memory consumption -of cross-correlation. - -## Installation - -Unfortunately we cannot provide the code for correlation_cost directly, so you -will have to copy some files from this pull request -https://github.com/tensorflow/tensorflow/pull/21392/. For your convenience we -prepared scripts to download and adjust the code automatically. - -In the best case, all you need to do is run compile.sh with the path to your -CUDA installation (tested only with CUDA 9). -Note that the path should be to a folder containing the cuda folder, not to the -cuda folder itself, e.g. if your cuda is in /usr/local/cuda-9.0, you can create -a symlink /usr/local/cuda pointing to /usr/local/cuda-9.0 and then run - -```bash -sh build.sh /usr/local/ -``` - -This will - -* Download the code via ```sh get_code.sh ``` -* Apply minor adjustments to the code via ```sh fix_code.sh``` -* Clone the dependencies cub and thrust from github via ```sh clone_dependencies.sh``` -* Compile a shared library correlation_cost.so for correlation_cost via -```sh compile.sh "${CUDA_DIR}"``` - -Please review the licenses of correlation_cost, cub, and thrust. - -## Enabling correlation_cost -If you managed to create the correlation_cost.so file, then set -```USE_CORRELATION_COST = True``` in feelvos/utils/embedding_utils.py and try to run -```sh eval.sh```. diff --git a/research/feelvos/correlation_cost/build.sh b/research/feelvos/correlation_cost/build.sh deleted file mode 100755 index 37d9adb31..000000000 --- a/research/feelvos/correlation_cost/build.sh +++ /dev/null @@ -1,37 +0,0 @@ -#!/bin/bash -# Copyright 2018 The TensorFlow Authors All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -# -# This script is used to download and build the code for correlation_cost. -# -# Usage: -# sh ./build.sh cuda_dir -# Where cuda_dir points to a directory containing the cuda folder (not the cuda folder itself). -# -# - -if [ "$#" -ne 1 ]; then - echo "Illegal number of parameters, usage: ./build.sh cuda_dir" - echo "Where cuda_dir points to a directory containing the cuda folder (not the cuda folder itself)" - exit 1 -fi - -set -e -set -x - -sh ./get_code.sh -sh ./fix_code.sh -sh ./clone_dependencies.sh -sh ./compile.sh $1 diff --git a/research/feelvos/correlation_cost/clone_dependencies.sh b/research/feelvos/correlation_cost/clone_dependencies.sh deleted file mode 100755 index 9174313f5..000000000 --- a/research/feelvos/correlation_cost/clone_dependencies.sh +++ /dev/null @@ -1,31 +0,0 @@ -#!/bin/bash -# Copyright 2018 The TensorFlow Authors All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -# -# This script is used to clone the dependencies, i.e. cub and thrust, of correlation_cost from github. -# -# Usage: -# sh ./clone_dependencies.sh -# -# - -# Clone cub. -if [ ! -d cub ] ; then - git clone https://github.com/dmlc/cub.git -fi -# Clone thrust. -if [ ! -d thrust ] ; then - git clone https://github.com/thrust/thrust.git -fi diff --git a/research/feelvos/correlation_cost/compile.sh b/research/feelvos/correlation_cost/compile.sh deleted file mode 100755 index 6025292df..000000000 --- a/research/feelvos/correlation_cost/compile.sh +++ /dev/null @@ -1,46 +0,0 @@ -#!/bin/bash -# Copyright 2018 The TensorFlow Authors All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -# -# This script is used to compile the code for correlation_cost and create correlation_cost.so. -# -# Usage: -# sh ./compile.sh cuda_dir -# Where cuda_dir points to a directory containing the cuda folder (not the cuda folder itself). -# -# - -if [ "$#" -ne 1 ]; then - echo "Illegal number of parameters, usage: ./compile.sh cuda_dir" - exit 1 -fi -CUDA_DIR=$1 - -if [ ! -d "${CUDA_DIR}/cuda" ]; then - echo "cuda_dir must point to a directory containing the cuda folder, not to the cuda folder itself" - exit 1 -fi - -TF_CFLAGS=( $(python -c 'import tensorflow as tf; print(" ".join(tf.sysconfig.get_compile_flags()))') ) -TF_LFLAGS=( $(python -c 'import tensorflow as tf; print(" ".join(tf.sysconfig.get_link_flags()))') ) -CUB_DIR=cub -THRUST_DIR=thrust - -# Depending on the versions of your nvcc and gcc, the flag --expt-relaxed-constexpr might be required or should be removed. -# If nvcc complains about a too new gcc version, you can point it to another gcc -# version by using something like nvcc -ccbin /path/to/your/gcc6 -nvcc -std=c++11 --expt-relaxed-constexpr -I ./ -I ${CUB_DIR}/../ -I ${THRUST_DIR} -I ${CUDA_DIR}/ -c -o correlation_cost_op_gpu.o kernels/correlation_cost_op_gpu.cu.cc ${TF_CFLAGS[@]} -D GOOGLE_CUDA=1 -x cu -Xcompiler -fPIC - -g++ -std=c++11 -I ./ -L ${CUDA_DIR}/cuda/lib64 -shared -o correlation_cost.so ops/correlation_cost_op.cc kernels/correlation_cost_op.cc correlation_cost_op_gpu.o ${TF_CFLAGS[@]} -fPIC -lcudart ${TF_LFLAGS[@]} -D GOOGLE_CUDA=1 diff --git a/research/feelvos/correlation_cost/fix_code.sh b/research/feelvos/correlation_cost/fix_code.sh deleted file mode 100755 index d4f285db3..000000000 --- a/research/feelvos/correlation_cost/fix_code.sh +++ /dev/null @@ -1,33 +0,0 @@ -#!/bin/bash -# Copyright 2018 The TensorFlow Authors All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -# -# This script is used to modify the downloaded code. -# -# Usage: -# sh ./fix_code.sh -# -# - -sed -i "s/tensorflow\/contrib\/correlation_cost\///g" kernels/correlation_cost_op_gpu.cu.cc -sed -i "s/tensorflow\/contrib\/correlation_cost\///g" kernels/correlation_cost_op.cc -sed -i "s/external\/cub_archive\//cub\//g" kernels/correlation_cost_op_gpu.cu.cc - -sed -i "s/from tensorflow.contrib.util import loader/import tensorflow as tf/g" python/ops/correlation_cost_op.py -grep -v "from tensorflow" python/ops/correlation_cost_op.py | grep -v resource_loader.get_path_to_datafile > correlation_cost_op.py.tmp && mv correlation_cost_op.py.tmp python/ops/correlation_cost_op.py -sed -i "s/array_ops/tf/g" python/ops/correlation_cost_op.py -sed -i "s/ops/tf/g" python/ops/correlation_cost_op.py -sed -i "s/loader.load_op_library(/tf.load_op_library('feelvos\/correlation_cost\/correlation_cost.so')/g" python/ops/correlation_cost_op.py -sed -i "s/gen_correlation_cost_op/_correlation_cost_op_so/g" python/ops/correlation_cost_op.py diff --git a/research/feelvos/correlation_cost/get_code.sh b/research/feelvos/correlation_cost/get_code.sh deleted file mode 100755 index 337142166..000000000 --- a/research/feelvos/correlation_cost/get_code.sh +++ /dev/null @@ -1,32 +0,0 @@ -#!/bin/bash -# Copyright 2018 The TensorFlow Authors All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -# -# This script is used to download the code for correlation_cost. -# -# Usage: -# sh ./get_code.sh -# -# - -mkdir -p kernels ops python/ops -touch __init__.py -touch python/__init__.py -touch python/ops/__init__.py -wget https://raw.githubusercontent.com/tensorflow/tensorflow/91b163b9bd8dd0f8c2631b4245a67dfd387536a6/tensorflow/contrib/correlation_cost/ops/correlation_cost_op.cc -O ops/correlation_cost_op.cc -wget https://raw.githubusercontent.com/tensorflow/tensorflow/91b163b9bd8dd0f8c2631b4245a67dfd387536a6/tensorflow/contrib/correlation_cost/python/ops/correlation_cost_op.py -O python/ops/correlation_cost_op.py -wget https://raw.githubusercontent.com/tensorflow/tensorflow/91b163b9bd8dd0f8c2631b4245a67dfd387536a6/tensorflow/contrib/correlation_cost/kernels/correlation_cost_op.cc -O kernels/correlation_cost_op.cc -wget https://raw.githubusercontent.com/tensorflow/tensorflow/91b163b9bd8dd0f8c2631b4245a67dfd387536a6/tensorflow/contrib/correlation_cost/kernels/correlation_cost_op.h -O kernels/correlation_cost_op.h -wget https://raw.githubusercontent.com/tensorflow/tensorflow/91b163b9bd8dd0f8c2631b4245a67dfd387536a6/tensorflow/contrib/correlation_cost/kernels/correlation_cost_op_gpu.cu.cc -O kernels/correlation_cost_op_gpu.cu.cc diff --git a/research/feelvos/datasets/__init__.py b/research/feelvos/datasets/__init__.py deleted file mode 100644 index 6f1373443..000000000 --- a/research/feelvos/datasets/__init__.py +++ /dev/null @@ -1,14 +0,0 @@ -# Copyright 2018 The TensorFlow Authors All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== diff --git a/research/feelvos/datasets/build_davis2017_data.py b/research/feelvos/datasets/build_davis2017_data.py deleted file mode 100644 index 5e093fc3b..000000000 --- a/research/feelvos/datasets/build_davis2017_data.py +++ /dev/null @@ -1,163 +0,0 @@ -# Copyright 2018 The TensorFlow Authors All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -"""Converts DAVIS 2017 data to TFRecord file format with SequenceExample protos. -""" - -import io -import math -import os -from StringIO import StringIO -import numpy as np -import PIL -import tensorflow as tf - -FLAGS = tf.app.flags.FLAGS - -tf.app.flags.DEFINE_string('data_folder', 'DAVIS2017/', - 'Folder containing the DAVIS 2017 data') - -tf.app.flags.DEFINE_string('imageset', 'val', - 'Which subset to use, either train or val') - -tf.app.flags.DEFINE_string( - 'output_dir', './tfrecord', - 'Path to save converted TFRecords of TensorFlow examples.') - -_NUM_SHARDS_TRAIN = 10 -_NUM_SHARDS_VAL = 1 - - -def read_image(path): - with open(path) as fid: - image_str = fid.read() - image = PIL.Image.open(io.BytesIO(image_str)) - w, h = image.size - return image_str, (h, w) - - -def read_annotation(path): - """Reads a single image annotation from a png image. - - Args: - path: Path to the png image. - - Returns: - png_string: The png encoded as string. - size: Tuple of (height, width). - """ - with open(path) as fid: - x = np.array(PIL.Image.open(fid)) - h, w = x.shape - im = PIL.Image.fromarray(x) - - output = StringIO() - im.save(output, format='png') - png_string = output.getvalue() - output.close() - - return png_string, (h, w) - - -def process_video(key, input_dir, anno_dir): - """Creates a SequenceExample for the video. - - Args: - key: Name of the video. - input_dir: Directory which contains the image files. - anno_dir: Directory which contains the annotation files. - - Returns: - The created SequenceExample. - """ - frame_names = sorted(tf.gfile.ListDirectory(input_dir)) - anno_files = sorted(tf.gfile.ListDirectory(anno_dir)) - assert len(frame_names) == len(anno_files) - - sequence = tf.train.SequenceExample() - context = sequence.context.feature - features = sequence.feature_lists.feature_list - - for i, name in enumerate(frame_names): - image_str, image_shape = read_image( - os.path.join(input_dir, name)) - anno_str, anno_shape = read_annotation( - os.path.join(anno_dir, name[:-4] + '.png')) - image_encoded = features['image/encoded'].feature.add() - image_encoded.bytes_list.value.append(image_str) - segmentation_encoded = features['segmentation/object/encoded'].feature.add() - segmentation_encoded.bytes_list.value.append(anno_str) - - np.testing.assert_array_equal(np.array(image_shape), np.array(anno_shape)) - - if i == 0: - first_shape = np.array(image_shape) - else: - np.testing.assert_array_equal(np.array(image_shape), first_shape) - - context['video_id'].bytes_list.value.append(key.encode('ascii')) - context['clip/frames'].int64_list.value.append(len(frame_names)) - context['image/format'].bytes_list.value.append('JPEG') - context['image/channels'].int64_list.value.append(3) - context['image/height'].int64_list.value.append(first_shape[0]) - context['image/width'].int64_list.value.append(first_shape[1]) - context['segmentation/object/format'].bytes_list.value.append('PNG') - context['segmentation/object/height'].int64_list.value.append(first_shape[0]) - context['segmentation/object/width'].int64_list.value.append(first_shape[1]) - - return sequence - - -def convert(data_folder, imageset, output_dir, num_shards): - """Converts the specified subset of DAVIS 2017 to TFRecord format. - - Args: - data_folder: The path to the DAVIS 2017 data. - imageset: The subset to use, either train or val. - output_dir: Where to store the TFRecords. - num_shards: The number of shards used for storing the data. - """ - sets_file = os.path.join(data_folder, 'ImageSets', '2017', imageset + '.txt') - vids = [x.strip() for x in open(sets_file).readlines()] - num_vids = len(vids) - num_vids_per_shard = int(math.ceil(num_vids) / float(num_shards)) - for shard_id in range(num_shards): - output_filename = os.path.join( - output_dir, - '%s-%05d-of-%05d.tfrecord' % (imageset, shard_id, num_shards)) - with tf.python_io.TFRecordWriter(output_filename) as tfrecord_writer: - start_idx = shard_id * num_vids_per_shard - end_idx = min((shard_id + 1) * num_vids_per_shard, num_vids) - for i in range(start_idx, end_idx): - print('Converting video %d/%d shard %d video %s' % ( - i + 1, num_vids, shard_id, vids[i])) - img_dir = os.path.join(data_folder, 'JPEGImages', '480p', vids[i]) - anno_dir = os.path.join(data_folder, 'Annotations', '480p', vids[i]) - example = process_video(vids[i], img_dir, anno_dir) - tfrecord_writer.write(example.SerializeToString()) - - -def main(unused_argv): - imageset = FLAGS.imageset - assert imageset in ('train', 'val') - if imageset == 'train': - num_shards = _NUM_SHARDS_TRAIN - else: - num_shards = _NUM_SHARDS_VAL - convert(FLAGS.data_folder, FLAGS.imageset, FLAGS.output_dir, num_shards) - - -if __name__ == '__main__': - tf.app.run() diff --git a/research/feelvos/datasets/download_and_convert_davis17.sh b/research/feelvos/datasets/download_and_convert_davis17.sh deleted file mode 100644 index 011be61ba..000000000 --- a/research/feelvos/datasets/download_and_convert_davis17.sh +++ /dev/null @@ -1,77 +0,0 @@ -#!/bin/bash -# Copyright 2018 The TensorFlow Authors All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -# -# Script to download and preprocess the DAVIS 2017 dataset. -# -# Usage: -# bash ./download_and_convert_davis17.sh - -# Exit immediately if a command exits with a non-zero status. -set -e - -CURRENT_DIR=$(pwd) -WORK_DIR="./davis17" -mkdir -p "${WORK_DIR}" -cd "${WORK_DIR}" - -# Helper function to download and unpack the DAVIS 2017 dataset. -download_and_uncompress() { - local BASE_URL=${1} - local FILENAME=${2} - - if [ ! -f "${FILENAME}" ]; then - echo "Downloading ${FILENAME} to ${WORK_DIR}" - wget -nd -c "${BASE_URL}/${FILENAME}" - echo "Uncompressing ${FILENAME}" - unzip "${FILENAME}" - fi -} - -BASE_URL="https://data.vision.ee.ethz.ch/csergi/share/davis/" -FILENAME="DAVIS-2017-trainval-480p.zip" - -download_and_uncompress "${BASE_URL}" "${FILENAME}" - -cd "${CURRENT_DIR}" - -# Root path for DAVIS 2017 dataset. -DAVIS_ROOT="${WORK_DIR}/DAVIS" - -# Build TFRecords of the dataset. -# First, create output directory for storing TFRecords. -OUTPUT_DIR="${WORK_DIR}/tfrecord" -mkdir -p "${OUTPUT_DIR}" - -IMAGE_FOLDER="${DAVIS_ROOT}/JPEGImages" -LIST_FOLDER="${DAVIS_ROOT}/ImageSets/Segmentation" - -# Convert validation set. -if [ ! -f "${OUTPUT_DIR}/val-00000-of-00001.tfrecord" ]; then - echo "Converting DAVIS 2017 dataset (val)..." - python ./build_davis2017_data.py \ - --data_folder="${DAVIS_ROOT}" \ - --imageset=val \ - --output_dir="${OUTPUT_DIR}" -fi - -# Convert training set. -if [ ! -f "${OUTPUT_DIR}/train-00009-of-00010.tfrecord" ]; then - echo "Converting DAVIS 2017 dataset (train)..." - python ./build_davis2017_data.py \ - --data_folder="${DAVIS_ROOT}" \ - --imageset=train \ - --output_dir="${OUTPUT_DIR}" -fi diff --git a/research/feelvos/datasets/tfsequence_example_decoder.py b/research/feelvos/datasets/tfsequence_example_decoder.py deleted file mode 100644 index 2fa3e95d5..000000000 --- a/research/feelvos/datasets/tfsequence_example_decoder.py +++ /dev/null @@ -1,118 +0,0 @@ -# Copyright 2018 The TensorFlow Authors All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -"""Contains the TFExampleDecoder. - -The TFExampleDecode is a DataDecoder used to decode TensorFlow Example protos. -In order to do so each requested item must be paired with one or more Example -features that are parsed to produce the Tensor-based manifestation of the item. -""" - -import tensorflow as tf -slim = tf.contrib.slim -data_decoder = slim.data_decoder - - -class TFSequenceExampleDecoder(data_decoder.DataDecoder): - """A decoder for TensorFlow SequenceExamples. - - Decoding SequenceExample proto buffers is comprised of two stages: - (1) Example parsing and (2) tensor manipulation. - - In the first stage, the tf.parse_single_sequence_example function is called - with a list of FixedLenFeatures and SparseLenFeatures. These instances tell TF - how to parse the example. The output of this stage is a set of tensors. - - In the second stage, the resulting tensors are manipulated to provide the - requested 'item' tensors. - - To perform this decoding operation, a SequenceExampleDecoder is given a list - of ItemHandlers. Each ItemHandler indicates the set of features for stage 1 - and contains the instructions for post_processing its tensors for stage 2. - """ - - def __init__(self, keys_to_context_features, keys_to_sequence_features, - items_to_handlers): - """Constructs the decoder. - - Args: - keys_to_context_features: a dictionary from TF-SequenceExample context - keys to either tf.VarLenFeature or tf.FixedLenFeature instances. - See tensorflow's parsing_ops.py. - keys_to_sequence_features: a dictionary from TF-SequenceExample sequence - keys to either tf.VarLenFeature or tf.FixedLenSequenceFeature instances. - See tensorflow's parsing_ops.py. - items_to_handlers: a dictionary from items (strings) to ItemHandler - instances. Note that the ItemHandler's are provided the keys that they - use to return the final item Tensors. - - Raises: - ValueError: if the same key is present for context features and sequence - features. - """ - unique_keys = set() - unique_keys.update(keys_to_context_features) - unique_keys.update(keys_to_sequence_features) - if len(unique_keys) != ( - len(keys_to_context_features) + len(keys_to_sequence_features)): - # This situation is ambiguous in the decoder's keys_to_tensors variable. - raise ValueError('Context and sequence keys are not unique. \n' - ' Context keys: %s \n Sequence keys: %s' % - (list(keys_to_context_features.keys()), - list(keys_to_sequence_features.keys()))) - - self._keys_to_context_features = keys_to_context_features - self._keys_to_sequence_features = keys_to_sequence_features - self._items_to_handlers = items_to_handlers - - def list_items(self): - """See base class.""" - return self._items_to_handlers.keys() - - def decode(self, serialized_example, items=None): - """Decodes the given serialized TF-SequenceExample. - - Args: - serialized_example: a serialized TF-SequenceExample tensor. - items: the list of items to decode. These must be a subset of the item - keys in self._items_to_handlers. If `items` is left as None, then all - of the items in self._items_to_handlers are decoded. - - Returns: - the decoded items, a list of tensor. - """ - - context, feature_list = tf.parse_single_sequence_example( - serialized_example, self._keys_to_context_features, - self._keys_to_sequence_features) - - # Reshape non-sparse elements just once: - for k in self._keys_to_context_features: - v = self._keys_to_context_features[k] - if isinstance(v, tf.FixedLenFeature): - context[k] = tf.reshape(context[k], v.shape) - - if not items: - items = self._items_to_handlers.keys() - - outputs = [] - for item in items: - handler = self._items_to_handlers[item] - keys_to_tensors = { - key: context[key] if key in context else feature_list[key] - for key in handler.keys - } - outputs.append(handler.tensors_to_item(keys_to_tensors)) - return outputs diff --git a/research/feelvos/datasets/video_dataset.py b/research/feelvos/datasets/video_dataset.py deleted file mode 100644 index 17b62e989..000000000 --- a/research/feelvos/datasets/video_dataset.py +++ /dev/null @@ -1,196 +0,0 @@ -# Copyright 2018 The TensorFlow Authors All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -"""Provides data from video object segmentation datasets. - -This file provides both images and annotations (instance segmentations) for -TensorFlow. Currently, we support the following datasets: - -1. DAVIS 2017 (https://davischallenge.org/davis2017/code.html). - -2. DAVIS 2016 (https://davischallenge.org/davis2016/code.html). - -3. YouTube-VOS (https://youtube-vos.org/dataset/download). -""" - -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import collections -import os.path -import tensorflow as tf -from feelvos.datasets import tfsequence_example_decoder - -slim = tf.contrib.slim -dataset = slim.dataset -tfexample_decoder = slim.tfexample_decoder - - -_ITEMS_TO_DESCRIPTIONS = { - 'image': 'A color image of varying height and width.', - 'labels_class': ('A semantic segmentation label whose size matches image.' - 'Its values range from 0 (background) to num_classes.'), -} - -# Named tuple to describe the dataset properties. -DatasetDescriptor = collections.namedtuple( - 'DatasetDescriptor', - ['splits_to_sizes', # Splits of the dataset into training, val, and test. - 'num_classes', # Number of semantic classes. - 'ignore_label', # Ignore label value. - ] -) - -_DAVIS_2016_INFORMATION = DatasetDescriptor( - splits_to_sizes={'train': [30, 1830], - 'val': [20, 1376]}, - num_classes=2, - ignore_label=255, -) - -_DAVIS_2017_INFORMATION = DatasetDescriptor( - splits_to_sizes={'train': [60, 4219], - 'val': [30, 2023], - 'test-dev': [30, 2037]}, - num_classes=None, # Number of instances per videos differ. - ignore_label=255, -) - -_YOUTUBE_VOS_2018_INFORMATION = DatasetDescriptor( - # Leave these sizes as None to allow for different splits into - # training and validation sets. - splits_to_sizes={'train': [None, None], - 'val': [None, None]}, - num_classes=None, # Number of instances per video differs. - ignore_label=255, -) - -_DATASETS_INFORMATION = { - 'davis_2016': _DAVIS_2016_INFORMATION, - 'davis_2017': _DAVIS_2017_INFORMATION, - 'youtube_vos_2018': _YOUTUBE_VOS_2018_INFORMATION, -} - -# Default file pattern of SSTable. Note we include '-' to avoid the confusion -# between `train-` and `trainval-` sets. -_FILE_PATTERN = '%s-*' - - -def get_dataset(dataset_name, - split_name, - dataset_dir, - file_pattern=None, - data_type='tf_sequence_example', - decode_video_frames=False): - """Gets an instance of slim Dataset. - - Args: - dataset_name: String, dataset name. - split_name: String, the train/val Split name. - dataset_dir: String, the directory of the dataset sources. - file_pattern: String, file pattern of SSTable. - data_type: String, data type. Currently supports 'tf_example' and - 'annotated_image'. - decode_video_frames: Boolean, decode the images or not. Not decoding it here - is useful if we subsample later - - Returns: - An instance of slim Dataset. - - Raises: - ValueError: If the dataset_name or split_name is not recognized, or if - the dataset_type is not supported. - """ - if dataset_name not in _DATASETS_INFORMATION: - raise ValueError('The specified dataset is not supported yet.') - - splits_to_sizes = _DATASETS_INFORMATION[dataset_name].splits_to_sizes - - if split_name not in splits_to_sizes: - raise ValueError('data split name %s not recognized' % split_name) - - # Prepare the variables for different datasets. - num_classes = _DATASETS_INFORMATION[dataset_name].num_classes - ignore_label = _DATASETS_INFORMATION[dataset_name].ignore_label - - if file_pattern is None: - file_pattern = _FILE_PATTERN - file_pattern = os.path.join(dataset_dir, file_pattern % split_name) - if data_type == 'tf_sequence_example': - keys_to_context_features = { - 'image/format': tf.FixedLenFeature((), tf.string, default_value='jpeg'), - 'image/height': tf.FixedLenFeature((), tf.int64, default_value=0), - 'image/width': tf.FixedLenFeature((), tf.int64, default_value=0), - 'segmentation/object/format': tf.FixedLenFeature( - (), tf.string, default_value='png'), - 'video_id': tf.FixedLenFeature((), tf.string, default_value='unknown') - } - label_name = 'class' if dataset_name == 'davis_2016' else 'object' - keys_to_sequence_features = { - 'image/encoded': tf.FixedLenSequenceFeature((), dtype=tf.string), - 'segmentation/{}/encoded'.format(label_name): - tf.FixedLenSequenceFeature((), tf.string), - 'segmentation/{}/encoded'.format(label_name): - tf.FixedLenSequenceFeature((), tf.string), - } - items_to_handlers = { - 'height': tfexample_decoder.Tensor('image/height'), - 'width': tfexample_decoder.Tensor('image/width'), - 'video_id': tfexample_decoder.Tensor('video_id') - } - if decode_video_frames: - decode_image_handler = tfexample_decoder.Image( - image_key='image/encoded', - format_key='image/format', - channels=3, - repeated=True) - items_to_handlers['image'] = decode_image_handler - decode_label_handler = tfexample_decoder.Image( - image_key='segmentation/{}/encoded'.format(label_name), - format_key='segmentation/{}/format'.format(label_name), - channels=1, - repeated=True) - items_to_handlers['labels_class'] = decode_label_handler - else: - items_to_handlers['image/encoded'] = tfexample_decoder.Tensor( - 'image/encoded') - items_to_handlers[ - 'segmentation/object/encoded'] = tfexample_decoder.Tensor( - 'segmentation/{}/encoded'.format(label_name)) - decoder = tfsequence_example_decoder.TFSequenceExampleDecoder( - keys_to_context_features, keys_to_sequence_features, items_to_handlers) - else: - raise ValueError('Unknown data type.') - - size = splits_to_sizes[split_name] - if isinstance(size, collections.Sequence): - num_videos = size[0] - num_samples = size[1] - else: - num_videos = 0 - num_samples = size - - return dataset.Dataset( - data_sources=file_pattern, - reader=tf.TFRecordReader, - decoder=decoder, - num_samples=num_samples, - num_videos=num_videos, - items_to_descriptions=_ITEMS_TO_DESCRIPTIONS, - ignore_label=ignore_label, - num_classes=num_classes, - name=dataset_name, - multi_label=True) diff --git a/research/feelvos/eval.sh b/research/feelvos/eval.sh deleted file mode 100755 index 96cb7f409..000000000 --- a/research/feelvos/eval.sh +++ /dev/null @@ -1,86 +0,0 @@ -#!/bin/bash -# Copyright 2018 The TensorFlow Authors All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -# -# This script is used to locally run inference on DAVIS 2017. Users could also -# modify from this script for their use case. See train.sh for an example of -# local training. -# -# Usage: -# # From the tensorflow/models/research/feelvos directory. -# sh ./eval.sh -# -# - -# Exit immediately if a command exits with a non-zero status. -set -e - -# Move one-level up to tensorflow/models/research directory. -cd .. - -# Update PYTHONPATH. -export PYTHONPATH=$PYTHONPATH:`pwd`:`pwd`/slim:`pwd`/feelvos - -# Set up the working environment. -CURRENT_DIR=$(pwd) -WORK_DIR="${CURRENT_DIR}/feelvos" - -# Run embedding_utils_test first to make sure the PYTHONPATH is correctly set. -python "${WORK_DIR}"/utils/embedding_utils_test.py -v - -# Go to datasets folder and download and convert the DAVIS 2017 dataset. -DATASET_DIR="datasets" -cd "${WORK_DIR}/${DATASET_DIR}" -sh download_and_convert_davis17.sh - -# Go to models folder and download and unpack the DAVIS 2017 trained model. -MODELS_DIR="models" -mkdir -p "${WORK_DIR}/${MODELS_DIR}" -cd "${WORK_DIR}/${MODELS_DIR}" -if [ ! -d "feelvos_davis17_trained" ]; then - wget http://download.tensorflow.org/models/feelvos_davis17_trained.tar.gz - tar -xvf feelvos_davis17_trained.tar.gz - echo "model_checkpoint_path: \"model.ckpt-200004\"" > feelvos_davis17_trained/checkpoint - rm feelvos_davis17_trained.tar.gz -fi -CHECKPOINT_DIR="${WORK_DIR}/${MODELS_DIR}/feelvos_davis17_trained/" - -# Go back to orignal directory. -cd "${CURRENT_DIR}" - -# Set up the working directories. -DAVIS_FOLDER="davis17" -EXP_FOLDER="exp/eval_on_val_set" -VIS_LOGDIR="${WORK_DIR}/${DATASET_DIR}/${DAVIS_FOLDER}/${EXP_FOLDER}/eval" -mkdir -p ${VIS_LOGDIR} - -DAVIS_DATASET="${WORK_DIR}/${DATASET_DIR}/${DAVIS_FOLDER}/tfrecord" - -python "${WORK_DIR}"/vis_video.py \ - --dataset=davis_2017 \ - --dataset_dir="${DAVIS_DATASET}" \ - --vis_logdir="${VIS_LOGDIR}" \ - --checkpoint_dir="${CHECKPOINT_DIR}" \ - --logtostderr \ - --atrous_rates=12 \ - --atrous_rates=24 \ - --atrous_rates=36 \ - --decoder_output_stride=4 \ - --model_variant=xception_65 \ - --multi_grid=1 \ - --multi_grid=1 \ - --multi_grid=1 \ - --output_stride=8 \ - --save_segmentations diff --git a/research/feelvos/input_preprocess.py b/research/feelvos/input_preprocess.py deleted file mode 100644 index 954c0b42e..000000000 --- a/research/feelvos/input_preprocess.py +++ /dev/null @@ -1,280 +0,0 @@ -# Copyright 2018 The TensorFlow Authors All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -"""Prepare the data used for FEELVOS training/evaluation.""" -import tensorflow as tf - -from deeplab.core import feature_extractor -from deeplab.core import preprocess_utils - -# The probability of flipping the images and labels -# left-right during training -_PROB_OF_FLIP = 0.5 - -get_random_scale = preprocess_utils.get_random_scale -randomly_scale_image_and_label = ( - preprocess_utils.randomly_scale_image_and_label) - - -def preprocess_image_and_label(image, - label, - crop_height, - crop_width, - min_resize_value=None, - max_resize_value=None, - resize_factor=None, - min_scale_factor=1., - max_scale_factor=1., - scale_factor_step_size=0, - ignore_label=255, - is_training=True, - model_variant=None): - """Preprocesses the image and label. - - Args: - image: Input image. - label: Ground truth annotation label. - crop_height: The height value used to crop the image and label. - crop_width: The width value used to crop the image and label. - min_resize_value: Desired size of the smaller image side. - max_resize_value: Maximum allowed size of the larger image side. - resize_factor: Resized dimensions are multiple of factor plus one. - min_scale_factor: Minimum scale factor value. - max_scale_factor: Maximum scale factor value. - scale_factor_step_size: The step size from min scale factor to max scale - factor. The input is randomly scaled based on the value of - (min_scale_factor, max_scale_factor, scale_factor_step_size). - ignore_label: The label value which will be ignored for training and - evaluation. - is_training: If the preprocessing is used for training or not. - model_variant: Model variant (string) for choosing how to mean-subtract the - images. See feature_extractor.network_map for supported model variants. - - Returns: - original_image: Original image (could be resized). - processed_image: Preprocessed image. - label: Preprocessed ground truth segmentation label. - - Raises: - ValueError: Ground truth label not provided during training. - """ - if is_training and label is None: - raise ValueError('During training, label must be provided.') - if model_variant is None: - tf.logging.warning('Default mean-subtraction is performed. Please specify ' - 'a model_variant. See feature_extractor.network_map for ' - 'supported model variants.') - - # Keep reference to original image. - original_image = image - - processed_image = tf.cast(image, tf.float32) - - if label is not None: - label = tf.cast(label, tf.int32) - - # Resize image and label to the desired range. - if min_resize_value is not None or max_resize_value is not None: - [processed_image, label] = ( - preprocess_utils.resize_to_range( - image=processed_image, - label=label, - min_size=min_resize_value, - max_size=max_resize_value, - factor=resize_factor, - align_corners=True)) - # The `original_image` becomes the resized image. - original_image = tf.identity(processed_image) - - # Data augmentation by randomly scaling the inputs. - scale = get_random_scale( - min_scale_factor, max_scale_factor, scale_factor_step_size) - processed_image, label = randomly_scale_image_and_label( - processed_image, label, scale) - - processed_image.set_shape([None, None, 3]) - - if crop_height is not None and crop_width is not None: - # Pad image and label to have dimensions >= [crop_height, crop_width]. - image_shape = tf.shape(processed_image) - image_height = image_shape[0] - image_width = image_shape[1] - - target_height = image_height + tf.maximum(crop_height - image_height, 0) - target_width = image_width + tf.maximum(crop_width - image_width, 0) - - # Pad image with mean pixel value. - mean_pixel = tf.reshape( - feature_extractor.mean_pixel(model_variant), [1, 1, 3]) - processed_image = preprocess_utils.pad_to_bounding_box( - processed_image, 0, 0, target_height, target_width, mean_pixel) - - if label is not None: - label = preprocess_utils.pad_to_bounding_box( - label, 0, 0, target_height, target_width, ignore_label) - - # Randomly crop the image and label. - if is_training and label is not None: - processed_image, label = preprocess_utils.random_crop( - [processed_image, label], crop_height, crop_width) - - processed_image.set_shape([crop_height, crop_width, 3]) - - if label is not None: - label.set_shape([crop_height, crop_width, 1]) - - if is_training: - # Randomly left-right flip the image and label. - processed_image, label, _ = preprocess_utils.flip_dim( - [processed_image, label], _PROB_OF_FLIP, dim=1) - - return original_image, processed_image, label - - -def preprocess_images_and_labels_consistently(images, - labels, - crop_height, - crop_width, - min_resize_value=None, - max_resize_value=None, - resize_factor=None, - min_scale_factor=1., - max_scale_factor=1., - scale_factor_step_size=0, - ignore_label=255, - is_training=True, - model_variant=None): - """Preprocesses images and labels in a consistent way. - - Similar to preprocess_image_and_label, but works on a list of images - and a list of labels and uses the same crop coordinates and either flips - all images and labels or none of them. - - Args: - images: List of input images. - labels: List of ground truth annotation labels. - crop_height: The height value used to crop the image and label. - crop_width: The width value used to crop the image and label. - min_resize_value: Desired size of the smaller image side. - max_resize_value: Maximum allowed size of the larger image side. - resize_factor: Resized dimensions are multiple of factor plus one. - min_scale_factor: Minimum scale factor value. - max_scale_factor: Maximum scale factor value. - scale_factor_step_size: The step size from min scale factor to max scale - factor. The input is randomly scaled based on the value of - (min_scale_factor, max_scale_factor, scale_factor_step_size). - ignore_label: The label value which will be ignored for training and - evaluation. - is_training: If the preprocessing is used for training or not. - model_variant: Model variant (string) for choosing how to mean-subtract the - images. See feature_extractor.network_map for supported model variants. - - Returns: - original_images: Original images (could be resized). - processed_images: Preprocessed images. - labels: Preprocessed ground truth segmentation labels. - - Raises: - ValueError: Ground truth label not provided during training. - """ - if is_training and labels is None: - raise ValueError('During training, labels must be provided.') - if model_variant is None: - tf.logging.warning('Default mean-subtraction is performed. Please specify ' - 'a model_variant. See feature_extractor.network_map for ' - 'supported model variants.') - if labels is not None: - assert len(images) == len(labels) - num_imgs = len(images) - - # Keep reference to original images. - original_images = images - - processed_images = [tf.cast(image, tf.float32) for image in images] - - if labels is not None: - labels = [tf.cast(label, tf.int32) for label in labels] - - # Resize images and labels to the desired range. - if min_resize_value is not None or max_resize_value is not None: - processed_images, labels = zip(*[ - preprocess_utils.resize_to_range( - image=processed_image, - label=label, - min_size=min_resize_value, - max_size=max_resize_value, - factor=resize_factor, - align_corners=True) for processed_image, label - in zip(processed_images, labels)]) - # The `original_images` becomes the resized images. - original_images = [tf.identity(processed_image) - for processed_image in processed_images] - - # Data augmentation by randomly scaling the inputs. - scale = get_random_scale( - min_scale_factor, max_scale_factor, scale_factor_step_size) - processed_images, labels = zip( - *[randomly_scale_image_and_label(processed_image, label, scale) - for processed_image, label in zip(processed_images, labels)]) - - for processed_image in processed_images: - processed_image.set_shape([None, None, 3]) - - if crop_height is not None and crop_width is not None: - # Pad image and label to have dimensions >= [crop_height, crop_width]. - image_shape = tf.shape(processed_images[0]) - image_height = image_shape[0] - image_width = image_shape[1] - - target_height = image_height + tf.maximum(crop_height - image_height, 0) - target_width = image_width + tf.maximum(crop_width - image_width, 0) - - # Pad image with mean pixel value. - mean_pixel = tf.reshape( - feature_extractor.mean_pixel(model_variant), [1, 1, 3]) - processed_images = [preprocess_utils.pad_to_bounding_box( - processed_image, 0, 0, target_height, target_width, mean_pixel) - for processed_image in processed_images] - - if labels is not None: - labels = [preprocess_utils.pad_to_bounding_box( - label, 0, 0, target_height, target_width, ignore_label) - for label in labels] - - # Randomly crop the images and labels. - if is_training and labels is not None: - cropped = preprocess_utils.random_crop( - processed_images + labels, crop_height, crop_width) - assert len(cropped) == 2 * num_imgs - processed_images = cropped[:num_imgs] - labels = cropped[num_imgs:] - - for processed_image in processed_images: - processed_image.set_shape([crop_height, crop_width, 3]) - - if labels is not None: - for label in labels: - label.set_shape([crop_height, crop_width, 1]) - - if is_training: - # Randomly left-right flip the image and label. - res = preprocess_utils.flip_dim( - list(processed_images + labels), _PROB_OF_FLIP, dim=1) - maybe_flipped = res[:-1] - assert len(maybe_flipped) == 2 * num_imgs - processed_images = maybe_flipped[:num_imgs] - labels = maybe_flipped[num_imgs:] - - return original_images, processed_images, labels diff --git a/research/feelvos/model.py b/research/feelvos/model.py deleted file mode 100644 index f145f9161..000000000 --- a/research/feelvos/model.py +++ /dev/null @@ -1,480 +0,0 @@ -# Copyright 2018 The TensorFlow Authors All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -r"""Provides DeepLab model definition and helper functions. - -DeepLab is a deep learning system for semantic image segmentation with -the following features: - -(1) Atrous convolution to explicitly control the resolution at which -feature responses are computed within Deep Convolutional Neural Networks. - -(2) Atrous spatial pyramid pooling (ASPP) to robustly segment objects at -multiple scales with filters at multiple sampling rates and effective -fields-of-views. - -(3) ASPP module augmented with image-level feature and batch normalization. - -(4) A simple yet effective decoder module to recover the object boundaries. - -See the following papers for more details: - -"Encoder-Decoder with Atrous Separable Convolution for Semantic Image -Segmentation" -Liang-Chieh Chen, Yukun Zhu, George Papandreou, Florian Schroff, Hartwig Adam. -(https://arxiv.org/abs1802.02611) - -"Rethinking Atrous Convolution for Semantic Image Segmentation," -Liang-Chieh Chen, George Papandreou, Florian Schroff, Hartwig Adam -(https://arxiv.org/abs/1706.05587) - -"DeepLab: Semantic Image Segmentation with Deep Convolutional Nets, -Atrous Convolution, and Fully Connected CRFs", -Liang-Chieh Chen*, George Papandreou*, Iasonas Kokkinos, Kevin Murphy, -Alan L Yuille (* equal contribution) -(https://arxiv.org/abs/1606.00915) - -"Semantic Image Segmentation with Deep Convolutional Nets and Fully Connected -CRFs" -Liang-Chieh Chen*, George Papandreou*, Iasonas Kokkinos, Kevin Murphy, -Alan L. Yuille (* equal contribution) -(https://arxiv.org/abs/1412.7062) -""" -import collections -import tensorflow as tf - -from deeplab import model -from feelvos import common -from feelvos.utils import embedding_utils -from feelvos.utils import train_utils - -slim = tf.contrib.slim - - -get_branch_logits = model.get_branch_logits -get_extra_layer_scopes = model.get_extra_layer_scopes -multi_scale_logits_v2 = model.multi_scale_logits -refine_by_decoder = model.refine_by_decoder -scale_dimension = model.scale_dimension -split_separable_conv2d = model.split_separable_conv2d - -MERGED_LOGITS_SCOPE = model.MERGED_LOGITS_SCOPE -IMAGE_POOLING_SCOPE = model.IMAGE_POOLING_SCOPE -ASPP_SCOPE = model.ASPP_SCOPE -CONCAT_PROJECTION_SCOPE = model.CONCAT_PROJECTION_SCOPE - - -def predict_labels(images, - model_options, - image_pyramid=None, - reference_labels=None, - k_nearest_neighbors=1, - embedding_dimension=None, - use_softmax_feedback=False, - initial_softmax_feedback=None, - embedding_seg_feature_dimension=256, - embedding_seg_n_layers=4, - embedding_seg_kernel_size=7, - embedding_seg_atrous_rates=None, - also_return_softmax_probabilities=False, - num_frames_per_video=None, - normalize_nearest_neighbor_distances=False, - also_attend_to_previous_frame=False, - use_local_previous_frame_attention=False, - previous_frame_attention_window_size=9, - use_first_frame_matching=True, - also_return_embeddings=False, - ref_embeddings=None): - """Predicts segmentation labels. - - Args: - images: A tensor of size [batch, height, width, channels]. - model_options: An InternalModelOptions instance to configure models. - image_pyramid: Input image scales for multi-scale feature extraction. - reference_labels: A tensor of size [batch, height, width, 1]. - ground truth labels used to perform a nearest neighbor query - k_nearest_neighbors: Integer, the number of neighbors to use for nearest - neighbor queries. - embedding_dimension: Integer, the dimension used for the learned embedding. - use_softmax_feedback: Boolean, whether to give the softmax predictions of - the last frame as additional input to the segmentation head. - initial_softmax_feedback: Float32 tensor, or None. Can be used to - initialize the softmax predictions used for the feedback loop. - Typically only useful for inference. Only has an effect if - use_softmax_feedback is True. - embedding_seg_feature_dimension: Integer, the dimensionality used in the - segmentation head layers. - embedding_seg_n_layers: Integer, the number of layers in the segmentation - head. - embedding_seg_kernel_size: Integer, the kernel size used in the - segmentation head. - embedding_seg_atrous_rates: List of integers of length - embedding_seg_n_layers, the atrous rates to use for the segmentation head. - also_return_softmax_probabilities: Boolean, if true, additionally return - the softmax probabilities as second return value. - num_frames_per_video: Integer, the number of frames per video. - normalize_nearest_neighbor_distances: Boolean, whether to normalize the - nearest neighbor distances to [0,1] using sigmoid, scale and shift. - also_attend_to_previous_frame: Boolean, whether to also use nearest - neighbor attention with respect to the previous frame. - use_local_previous_frame_attention: Boolean, whether to restrict the - previous frame attention to a local search window. - Only has an effect, if also_attend_to_previous_frame is True. - previous_frame_attention_window_size: Integer, the window size used for - local previous frame attention, if use_local_previous_frame_attention - is True. - use_first_frame_matching: Boolean, whether to extract features by matching - to the reference frame. This should always be true except for ablation - experiments. - also_return_embeddings: Boolean, whether to return the embeddings as well. - ref_embeddings: Tuple of - (first_frame_embeddings, previous_frame_embeddings), - each of shape [batch, height, width, embedding_dimension], or None. - - Returns: - A dictionary with keys specifying the output_type (e.g., semantic - prediction) and values storing Tensors representing predictions (argmax - over channels). Each prediction has size [batch, height, width]. - If also_return_softmax_probabilities is True, the second return value are - the softmax probabilities. - If also_return_embeddings is True, it will also return an embeddings - tensor of shape [batch, height, width, embedding_dimension]. - - Raises: - ValueError: If classification_loss is not softmax, softmax_with_attention, - nor triplet. - """ - if (model_options.classification_loss == 'triplet' and - reference_labels is None): - raise ValueError('Need reference_labels for triplet loss') - - if model_options.classification_loss == 'softmax_with_attention': - if embedding_dimension is None: - raise ValueError('Need embedding_dimension for softmax_with_attention ' - 'loss') - if reference_labels is None: - raise ValueError('Need reference_labels for softmax_with_attention loss') - res = ( - multi_scale_logits_with_nearest_neighbor_matching( - images, - model_options=model_options, - image_pyramid=image_pyramid, - is_training=False, - reference_labels=reference_labels, - clone_batch_size=1, - num_frames_per_video=num_frames_per_video, - embedding_dimension=embedding_dimension, - max_neighbors_per_object=0, - k_nearest_neighbors=k_nearest_neighbors, - use_softmax_feedback=use_softmax_feedback, - initial_softmax_feedback=initial_softmax_feedback, - embedding_seg_feature_dimension=embedding_seg_feature_dimension, - embedding_seg_n_layers=embedding_seg_n_layers, - embedding_seg_kernel_size=embedding_seg_kernel_size, - embedding_seg_atrous_rates=embedding_seg_atrous_rates, - normalize_nearest_neighbor_distances= - normalize_nearest_neighbor_distances, - also_attend_to_previous_frame=also_attend_to_previous_frame, - use_local_previous_frame_attention= - use_local_previous_frame_attention, - previous_frame_attention_window_size= - previous_frame_attention_window_size, - use_first_frame_matching=use_first_frame_matching, - also_return_embeddings=also_return_embeddings, - ref_embeddings=ref_embeddings - )) - if also_return_embeddings: - outputs_to_scales_to_logits, embeddings = res - else: - outputs_to_scales_to_logits = res - embeddings = None - else: - outputs_to_scales_to_logits = multi_scale_logits_v2( - images, - model_options=model_options, - image_pyramid=image_pyramid, - is_training=False, - fine_tune_batch_norm=False) - - predictions = {} - for output in sorted(outputs_to_scales_to_logits): - scales_to_logits = outputs_to_scales_to_logits[output] - original_logits = scales_to_logits[MERGED_LOGITS_SCOPE] - if isinstance(original_logits, list): - assert len(original_logits) == 1 - original_logits = original_logits[0] - logits = tf.image.resize_bilinear(original_logits, tf.shape(images)[1:3], - align_corners=True) - if model_options.classification_loss in ('softmax', - 'softmax_with_attention'): - predictions[output] = tf.argmax(logits, 3) - elif model_options.classification_loss == 'triplet': - # to keep this fast, we do the nearest neighbor assignment on the - # resolution at which the embedding is extracted and scale the result up - # afterwards - embeddings = original_logits - reference_labels_logits_size = tf.squeeze( - tf.image.resize_nearest_neighbor( - reference_labels[tf.newaxis], - train_utils.resolve_shape(embeddings)[1:3], - align_corners=True), axis=0) - nn_labels = embedding_utils.assign_labels_by_nearest_neighbors( - embeddings[0], embeddings[1:], reference_labels_logits_size, - k_nearest_neighbors) - predictions[common.OUTPUT_TYPE] = tf.image.resize_nearest_neighbor( - nn_labels, tf.shape(images)[1:3], align_corners=True) - else: - raise ValueError( - 'Only support softmax, triplet, or softmax_with_attention for ' - 'classification_loss.') - - if also_return_embeddings: - assert also_return_softmax_probabilities - return predictions, tf.nn.softmax(original_logits, axis=-1), embeddings - elif also_return_softmax_probabilities: - return predictions, tf.nn.softmax(original_logits, axis=-1) - else: - return predictions - - -def multi_scale_logits_with_nearest_neighbor_matching( - images, - model_options, - image_pyramid, - clone_batch_size, - reference_labels, - num_frames_per_video, - embedding_dimension, - max_neighbors_per_object, - weight_decay=0.0001, - is_training=False, - fine_tune_batch_norm=False, - k_nearest_neighbors=1, - use_softmax_feedback=False, - initial_softmax_feedback=None, - embedding_seg_feature_dimension=256, - embedding_seg_n_layers=4, - embedding_seg_kernel_size=7, - embedding_seg_atrous_rates=None, - normalize_nearest_neighbor_distances=False, - also_attend_to_previous_frame=False, - damage_initial_previous_frame_mask=False, - use_local_previous_frame_attention=False, - previous_frame_attention_window_size=9, - use_first_frame_matching=True, - also_return_embeddings=False, - ref_embeddings=None): - """Gets the logits for multi-scale inputs using nearest neighbor attention. - - Adjusted version of multi_scale_logits_v2 to support nearest neighbor - attention and a variable number of classes for each element of the batch. - The returned logits are all downsampled (due to max-pooling layers) - for both training and evaluation. - - Args: - images: A tensor of size [batch, height, width, channels]. - model_options: A ModelOptions instance to configure models. - image_pyramid: Input image scales for multi-scale feature extraction. - clone_batch_size: Integer, the number of videos on a batch. - reference_labels: The segmentation labels of the reference frame on which - attention is applied. - num_frames_per_video: Integer, the number of frames per video. - embedding_dimension: Integer, the dimension of the embedding. - max_neighbors_per_object: Integer, the maximum number of candidates - for the nearest neighbor query per object after subsampling. - Can be 0 for no subsampling. - weight_decay: The weight decay for model variables. - is_training: Is training or not. - fine_tune_batch_norm: Fine-tune the batch norm parameters or not. - k_nearest_neighbors: Integer, the number of nearest neighbors to use. - use_softmax_feedback: Boolean, whether to give the softmax predictions of - the last frame as additional input to the segmentation head. - initial_softmax_feedback: List of Float32 tensors, or None. - Can be used to initialize the softmax predictions used for the feedback - loop. Only has an effect if use_softmax_feedback is True. - embedding_seg_feature_dimension: Integer, the dimensionality used in the - segmentation head layers. - embedding_seg_n_layers: Integer, the number of layers in the segmentation - head. - embedding_seg_kernel_size: Integer, the kernel size used in the - segmentation head. - embedding_seg_atrous_rates: List of integers of length - embedding_seg_n_layers, the atrous rates to use for the segmentation head. - normalize_nearest_neighbor_distances: Boolean, whether to normalize the - nearest neighbor distances to [0,1] using sigmoid, scale and shift. - also_attend_to_previous_frame: Boolean, whether to also use nearest - neighbor attention with respect to the previous frame. - damage_initial_previous_frame_mask: Boolean, whether to artificially damage - the initial previous frame mask. Only has an effect if - also_attend_to_previous_frame is True. - use_local_previous_frame_attention: Boolean, whether to restrict the - previous frame attention to a local search window. - Only has an effect, if also_attend_to_previous_frame is True. - previous_frame_attention_window_size: Integer, the window size used for - local previous frame attention, if use_local_previous_frame_attention - is True. - use_first_frame_matching: Boolean, whether to extract features by matching - to the reference frame. This should always be true except for ablation - experiments. - also_return_embeddings: Boolean, whether to return the embeddings as well. - ref_embeddings: Tuple of - (first_frame_embeddings, previous_frame_embeddings), - each of shape [batch, height, width, embedding_dimension], or None. - - Returns: - outputs_to_scales_to_logits: A map of maps from output_type (e.g., - semantic prediction) to a dictionary of multi-scale logits names to - logits. For each output_type, the dictionary has keys which - correspond to the scales and values which correspond to the logits. - For example, if `scales` equals [1.0, 1.5], then the keys would - include 'merged_logits', 'logits_1.00' and 'logits_1.50'. - If also_return_embeddings is True, it will also return an embeddings - tensor of shape [batch, height, width, embedding_dimension]. - - Raises: - ValueError: If model_options doesn't specify crop_size and its - add_image_level_feature = True, since add_image_level_feature requires - crop_size information. - """ - # Setup default values. - if not image_pyramid: - image_pyramid = [1.0] - crop_height = ( - model_options.crop_size[0] - if model_options.crop_size else tf.shape(images)[1]) - crop_width = ( - model_options.crop_size[1] - if model_options.crop_size else tf.shape(images)[2]) - - # Compute the height, width for the output logits. - if model_options.decoder_output_stride: - logits_output_stride = min(model_options.decoder_output_stride) - else: - logits_output_stride = model_options.output_stride - logits_height = scale_dimension( - crop_height, - max(1.0, max(image_pyramid)) / logits_output_stride) - logits_width = scale_dimension( - crop_width, - max(1.0, max(image_pyramid)) / logits_output_stride) - - # Compute the logits for each scale in the image pyramid. - outputs_to_scales_to_logits = { - k: {} - for k in model_options.outputs_to_num_classes - } - - for image_scale in image_pyramid: - if image_scale != 1.0: - scaled_height = scale_dimension(crop_height, image_scale) - scaled_width = scale_dimension(crop_width, image_scale) - scaled_crop_size = [scaled_height, scaled_width] - scaled_images = tf.image.resize_bilinear( - images, scaled_crop_size, align_corners=True) - scaled_reference_labels = tf.image.resize_nearest_neighbor( - reference_labels, scaled_crop_size, align_corners=True - ) - if model_options.crop_size is None: - scaled_crop_size = None - if model_options.crop_size: - scaled_images.set_shape([None, scaled_height, scaled_width, 3]) - else: - scaled_crop_size = model_options.crop_size - scaled_images = images - scaled_reference_labels = reference_labels - - updated_options = model_options._replace(crop_size=scaled_crop_size) - res = embedding_utils.get_logits_with_matching( - scaled_images, - updated_options, - weight_decay=weight_decay, - reuse=tf.AUTO_REUSE, - is_training=is_training, - fine_tune_batch_norm=fine_tune_batch_norm, - reference_labels=scaled_reference_labels, - batch_size=clone_batch_size, - num_frames_per_video=num_frames_per_video, - embedding_dimension=embedding_dimension, - max_neighbors_per_object=max_neighbors_per_object, - k_nearest_neighbors=k_nearest_neighbors, - use_softmax_feedback=use_softmax_feedback, - initial_softmax_feedback=initial_softmax_feedback, - embedding_seg_feature_dimension=embedding_seg_feature_dimension, - embedding_seg_n_layers=embedding_seg_n_layers, - embedding_seg_kernel_size=embedding_seg_kernel_size, - embedding_seg_atrous_rates=embedding_seg_atrous_rates, - normalize_nearest_neighbor_distances= - normalize_nearest_neighbor_distances, - also_attend_to_previous_frame=also_attend_to_previous_frame, - damage_initial_previous_frame_mask=damage_initial_previous_frame_mask, - use_local_previous_frame_attention=use_local_previous_frame_attention, - previous_frame_attention_window_size= - previous_frame_attention_window_size, - use_first_frame_matching=use_first_frame_matching, - also_return_embeddings=also_return_embeddings, - ref_embeddings=ref_embeddings - ) - if also_return_embeddings: - outputs_to_logits, embeddings = res - else: - outputs_to_logits = res - embeddings = None - - # Resize the logits to have the same dimension before merging. - for output in sorted(outputs_to_logits): - if isinstance(outputs_to_logits[output], collections.Sequence): - outputs_to_logits[output] = [tf.image.resize_bilinear( - x, [logits_height, logits_width], align_corners=True) - for x in outputs_to_logits[output]] - else: - outputs_to_logits[output] = tf.image.resize_bilinear( - outputs_to_logits[output], [logits_height, logits_width], - align_corners=True) - - # Return when only one input scale. - if len(image_pyramid) == 1: - for output in sorted(model_options.outputs_to_num_classes): - outputs_to_scales_to_logits[output][ - MERGED_LOGITS_SCOPE] = outputs_to_logits[output] - if also_return_embeddings: - return outputs_to_scales_to_logits, embeddings - else: - return outputs_to_scales_to_logits - - # Save logits to the output map. - for output in sorted(model_options.outputs_to_num_classes): - outputs_to_scales_to_logits[output][ - 'logits_%.2f' % image_scale] = outputs_to_logits[output] - - # Merge the logits from all the multi-scale inputs. - for output in sorted(model_options.outputs_to_num_classes): - # Concatenate the multi-scale logits for each output type. - all_logits = [ - [tf.expand_dims(l, axis=4)] - for logits in outputs_to_scales_to_logits[output].values() - for l in logits - ] - transposed = map(list, zip(*all_logits)) - all_logits = [tf.concat(t, 4) for t in transposed] - merge_fn = ( - tf.reduce_max - if model_options.merge_method == 'max' else tf.reduce_mean) - outputs_to_scales_to_logits[output][MERGED_LOGITS_SCOPE] = [merge_fn( - l, axis=4) for l in all_logits] - - if also_return_embeddings: - return outputs_to_scales_to_logits, embeddings - else: - return outputs_to_scales_to_logits diff --git a/research/feelvos/train.py b/research/feelvos/train.py deleted file mode 100644 index 16c085722..000000000 --- a/research/feelvos/train.py +++ /dev/null @@ -1,630 +0,0 @@ -# Copyright 2018 The TensorFlow Authors All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -"""Training script for the FEELVOS model. - -See model.py for more details and usage. -""" -import six -import tensorflow as tf - -from feelvos import common -from feelvos import model -from feelvos.datasets import video_dataset -from feelvos.utils import embedding_utils -from feelvos.utils import train_utils -from feelvos.utils import video_input_generator -from deployment import model_deploy - -slim = tf.contrib.slim -prefetch_queue = slim.prefetch_queue -flags = tf.app.flags -FLAGS = flags.FLAGS - -# Settings for multi-GPUs/multi-replicas training. - -flags.DEFINE_integer('num_clones', 1, 'Number of clones to deploy.') - -flags.DEFINE_boolean('clone_on_cpu', False, 'Use CPUs to deploy clones.') - -flags.DEFINE_integer('num_replicas', 1, 'Number of worker replicas.') - -flags.DEFINE_integer('startup_delay_steps', 15, - 'Number of training steps between replicas startup.') - -flags.DEFINE_integer('num_ps_tasks', 0, - 'The number of parameter servers. If the value is 0, then ' - 'the parameters are handled locally by the worker.') - -flags.DEFINE_string('master', '', 'BNS name of the tensorflow server') - -flags.DEFINE_integer('task', 0, 'The task ID.') - -# Settings for logging. - -flags.DEFINE_string('train_logdir', None, - 'Where the checkpoint and logs are stored.') - -flags.DEFINE_integer('log_steps', 10, - 'Display logging information at every log_steps.') - -flags.DEFINE_integer('save_interval_secs', 1200, - 'How often, in seconds, we save the model to disk.') - -flags.DEFINE_integer('save_summaries_secs', 600, - 'How often, in seconds, we compute the summaries.') - -# Settings for training strategy. - -flags.DEFINE_enum('learning_policy', 'poly', ['poly', 'step'], - 'Learning rate policy for training.') - -flags.DEFINE_float('base_learning_rate', 0.0007, - 'The base learning rate for model training.') - -flags.DEFINE_float('learning_rate_decay_factor', 0.1, - 'The rate to decay the base learning rate.') - -flags.DEFINE_integer('learning_rate_decay_step', 2000, - 'Decay the base learning rate at a fixed step.') - -flags.DEFINE_float('learning_power', 0.9, - 'The power value used in the poly learning policy.') - -flags.DEFINE_integer('training_number_of_steps', 200000, - 'The number of steps used for training') - -flags.DEFINE_float('momentum', 0.9, 'The momentum value to use') - -flags.DEFINE_integer('train_batch_size', 6, - 'The number of images in each batch during training.') - -flags.DEFINE_integer('train_num_frames_per_video', 3, - 'The number of frames used per video during training') - -flags.DEFINE_float('weight_decay', 0.00004, - 'The value of the weight decay for training.') - -flags.DEFINE_multi_integer('train_crop_size', [465, 465], - 'Image crop size [height, width] during training.') - -flags.DEFINE_float('last_layer_gradient_multiplier', 1.0, - 'The gradient multiplier for last layers, which is used to ' - 'boost the gradient of last layers if the value > 1.') - -flags.DEFINE_boolean('upsample_logits', True, - 'Upsample logits during training.') - -flags.DEFINE_integer('batch_capacity_factor', 16, 'Batch capacity factor.') - -flags.DEFINE_integer('num_readers', 1, 'Number of readers for data provider.') - -flags.DEFINE_integer('batch_num_threads', 1, 'Batch number of threads.') - -flags.DEFINE_integer('prefetch_queue_capacity_factor', 32, - 'Prefetch queue capacity factor.') - -flags.DEFINE_integer('prefetch_queue_num_threads', 1, - 'Prefetch queue number of threads.') - -flags.DEFINE_integer('train_max_neighbors_per_object', 1024, - 'The maximum number of candidates for the nearest ' - 'neighbor query per object after subsampling') - -# Settings for fine-tuning the network. - -flags.DEFINE_string('tf_initial_checkpoint', None, - 'The initial checkpoint in tensorflow format.') - -flags.DEFINE_boolean('initialize_last_layer', False, - 'Initialize the last layer.') - -flags.DEFINE_boolean('last_layers_contain_logits_only', False, - 'Only consider logits as last layers or not.') - -flags.DEFINE_integer('slow_start_step', 0, - 'Training model with small learning rate for few steps.') - -flags.DEFINE_float('slow_start_learning_rate', 1e-4, - 'Learning rate employed during slow start.') - -flags.DEFINE_boolean('fine_tune_batch_norm', False, - 'Fine tune the batch norm parameters or not.') - -flags.DEFINE_float('min_scale_factor', 1., - 'Mininum scale factor for data augmentation.') - -flags.DEFINE_float('max_scale_factor', 1.3, - 'Maximum scale factor for data augmentation.') - -flags.DEFINE_float('scale_factor_step_size', 0, - 'Scale factor step size for data augmentation.') - -flags.DEFINE_multi_integer('atrous_rates', None, - 'Atrous rates for atrous spatial pyramid pooling.') - -flags.DEFINE_integer('output_stride', 8, - 'The ratio of input to output spatial resolution.') - -flags.DEFINE_boolean('sample_only_first_frame_for_finetuning', False, - 'Whether to only sample the first frame during ' - 'fine-tuning. This should be False when using lucid data, ' - 'but True when fine-tuning on the first frame only. Only ' - 'has an effect if first_frame_finetuning is True.') - -flags.DEFINE_multi_integer('first_frame_finetuning', [0], - 'Whether to only sample the first frame for ' - 'fine-tuning.') - -# Dataset settings. - -flags.DEFINE_multi_string('dataset', [], 'Name of the segmentation datasets.') - -flags.DEFINE_multi_float('dataset_sampling_probabilities', [], - 'A list of probabilities to sample each of the ' - 'datasets.') - -flags.DEFINE_string('train_split', 'train', - 'Which split of the dataset to be used for training') - -flags.DEFINE_multi_string('dataset_dir', [], 'Where the datasets reside.') - -flags.DEFINE_multi_integer('three_frame_dataset', [0], - 'Whether the dataset has exactly three frames per ' - 'video of which the first is to be used as reference' - ' and the two others are consecutive frames to be ' - 'used as query frames.' - 'Set true for pascal lucid data.') - -flags.DEFINE_boolean('damage_initial_previous_frame_mask', False, - 'Whether to artificially damage the initial previous ' - 'frame mask. Only has an effect if ' - 'also_attend_to_previous_frame is True.') - -flags.DEFINE_float('top_k_percent_pixels', 0.15, 'Float in [0.0, 1.0].' - 'When its value < 1.0, only compute the loss for the top k' - 'percent pixels (e.g., the top 20% pixels). This is useful' - 'for hard pixel mining.') - -flags.DEFINE_integer('hard_example_mining_step', 100000, - 'The training step in which the hard exampling mining ' - 'kicks off. Note that we gradually reduce the mining ' - 'percent to the top_k_percent_pixels. For example, if ' - 'hard_example_mining_step=100K and ' - 'top_k_percent_pixels=0.25, then mining percent will ' - 'gradually reduce from 100% to 25% until 100K steps ' - 'after which we only mine top 25% pixels. Only has an ' - 'effect if top_k_percent_pixels < 1.0') - - -def _build_deeplab(inputs_queue_or_samples, outputs_to_num_classes, - ignore_label): - """Builds a clone of DeepLab. - - Args: - inputs_queue_or_samples: A prefetch queue for images and labels, or - directly a dict of the samples. - outputs_to_num_classes: A map from output type to the number of classes. - For example, for the task of semantic segmentation with 21 semantic - classes, we would have outputs_to_num_classes['semantic'] = 21. - ignore_label: Ignore label. - - Returns: - A map of maps from output_type (e.g., semantic prediction) to a - dictionary of multi-scale logits names to logits. For each output_type, - the dictionary has keys which correspond to the scales and values which - correspond to the logits. For example, if `scales` equals [1.0, 1.5], - then the keys would include 'merged_logits', 'logits_1.00' and - 'logits_1.50'. - - Raises: - ValueError: If classification_loss is not softmax, softmax_with_attention, - or triplet. - """ - if hasattr(inputs_queue_or_samples, 'dequeue'): - samples = inputs_queue_or_samples.dequeue() - else: - samples = inputs_queue_or_samples - train_crop_size = (None if 0 in FLAGS.train_crop_size else - FLAGS.train_crop_size) - - model_options = common.VideoModelOptions( - outputs_to_num_classes=outputs_to_num_classes, - crop_size=train_crop_size, - atrous_rates=FLAGS.atrous_rates, - output_stride=FLAGS.output_stride) - - if model_options.classification_loss == 'softmax_with_attention': - clone_batch_size = FLAGS.train_batch_size // FLAGS.num_clones - - # Create summaries of ground truth labels. - for n in range(clone_batch_size): - tf.summary.image( - 'gt_label_%d' % n, - tf.cast(samples[common.LABEL][ - n * FLAGS.train_num_frames_per_video: - (n + 1) * FLAGS.train_num_frames_per_video], - tf.uint8) * 32, max_outputs=FLAGS.train_num_frames_per_video) - - if common.PRECEDING_FRAME_LABEL in samples: - preceding_frame_label = samples[common.PRECEDING_FRAME_LABEL] - init_softmax = [] - for n in range(clone_batch_size): - init_softmax_n = embedding_utils.create_initial_softmax_from_labels( - preceding_frame_label[n, tf.newaxis], - samples[common.LABEL][n * FLAGS.train_num_frames_per_video, - tf.newaxis], - common.parse_decoder_output_stride(), - reduce_labels=True) - init_softmax_n = tf.squeeze(init_softmax_n, axis=0) - init_softmax.append(init_softmax_n) - tf.summary.image('preceding_frame_label', - tf.cast(preceding_frame_label[n, tf.newaxis], - tf.uint8) * 32) - else: - init_softmax = None - - outputs_to_scales_to_logits = ( - model.multi_scale_logits_with_nearest_neighbor_matching( - samples[common.IMAGE], - model_options=model_options, - image_pyramid=FLAGS.image_pyramid, - weight_decay=FLAGS.weight_decay, - is_training=True, - fine_tune_batch_norm=FLAGS.fine_tune_batch_norm, - reference_labels=samples[common.LABEL], - clone_batch_size=FLAGS.train_batch_size // FLAGS.num_clones, - num_frames_per_video=FLAGS.train_num_frames_per_video, - embedding_dimension=FLAGS.embedding_dimension, - max_neighbors_per_object=FLAGS.train_max_neighbors_per_object, - k_nearest_neighbors=FLAGS.k_nearest_neighbors, - use_softmax_feedback=FLAGS.use_softmax_feedback, - initial_softmax_feedback=init_softmax, - embedding_seg_feature_dimension= - FLAGS.embedding_seg_feature_dimension, - embedding_seg_n_layers=FLAGS.embedding_seg_n_layers, - embedding_seg_kernel_size=FLAGS.embedding_seg_kernel_size, - embedding_seg_atrous_rates=FLAGS.embedding_seg_atrous_rates, - normalize_nearest_neighbor_distances= - FLAGS.normalize_nearest_neighbor_distances, - also_attend_to_previous_frame=FLAGS.also_attend_to_previous_frame, - damage_initial_previous_frame_mask= - FLAGS.damage_initial_previous_frame_mask, - use_local_previous_frame_attention= - FLAGS.use_local_previous_frame_attention, - previous_frame_attention_window_size= - FLAGS.previous_frame_attention_window_size, - use_first_frame_matching=FLAGS.use_first_frame_matching - )) - else: - outputs_to_scales_to_logits = model.multi_scale_logits_v2( - samples[common.IMAGE], - model_options=model_options, - image_pyramid=FLAGS.image_pyramid, - weight_decay=FLAGS.weight_decay, - is_training=True, - fine_tune_batch_norm=FLAGS.fine_tune_batch_norm) - - if model_options.classification_loss == 'softmax': - for output, num_classes in six.iteritems(outputs_to_num_classes): - train_utils.add_softmax_cross_entropy_loss_for_each_scale( - outputs_to_scales_to_logits[output], - samples[common.LABEL], - num_classes, - ignore_label, - loss_weight=1.0, - upsample_logits=FLAGS.upsample_logits, - scope=output) - elif model_options.classification_loss == 'triplet': - for output, _ in six.iteritems(outputs_to_num_classes): - train_utils.add_triplet_loss_for_each_scale( - FLAGS.train_batch_size // FLAGS.num_clones, - FLAGS.train_num_frames_per_video, - FLAGS.embedding_dimension, outputs_to_scales_to_logits[output], - samples[common.LABEL], scope=output) - elif model_options.classification_loss == 'softmax_with_attention': - labels = samples[common.LABEL] - batch_size = FLAGS.train_batch_size // FLAGS.num_clones - num_frames_per_video = FLAGS.train_num_frames_per_video - h, w = train_utils.resolve_shape(labels)[1:3] - labels = tf.reshape(labels, tf.stack( - [batch_size, num_frames_per_video, h, w, 1])) - # Strip the reference labels off. - if FLAGS.also_attend_to_previous_frame or FLAGS.use_softmax_feedback: - n_ref_frames = 2 - else: - n_ref_frames = 1 - labels = labels[:, n_ref_frames:] - # Merge batch and time dimensions. - labels = tf.reshape(labels, tf.stack( - [batch_size * (num_frames_per_video - n_ref_frames), h, w, 1])) - - for output, num_classes in six.iteritems(outputs_to_num_classes): - train_utils.add_dynamic_softmax_cross_entropy_loss_for_each_scale( - outputs_to_scales_to_logits[output], - labels, - ignore_label, - loss_weight=1.0, - upsample_logits=FLAGS.upsample_logits, - scope=output, - top_k_percent_pixels=FLAGS.top_k_percent_pixels, - hard_example_mining_step=FLAGS.hard_example_mining_step) - else: - raise ValueError('Only support softmax, softmax_with_attention' - ' or triplet for classification_loss.') - - return outputs_to_scales_to_logits - - -def main(unused_argv): - # Set up deployment (i.e., multi-GPUs and/or multi-replicas). - config = model_deploy.DeploymentConfig( - num_clones=FLAGS.num_clones, - clone_on_cpu=FLAGS.clone_on_cpu, - replica_id=FLAGS.task, - num_replicas=FLAGS.num_replicas, - num_ps_tasks=FLAGS.num_ps_tasks) - - with tf.Graph().as_default(): - with tf.device(config.inputs_device()): - train_crop_size = (None if 0 in FLAGS.train_crop_size else - FLAGS.train_crop_size) - assert FLAGS.dataset - assert len(FLAGS.dataset) == len(FLAGS.dataset_dir) - if len(FLAGS.first_frame_finetuning) == 1: - first_frame_finetuning = (list(FLAGS.first_frame_finetuning) - * len(FLAGS.dataset)) - else: - first_frame_finetuning = FLAGS.first_frame_finetuning - if len(FLAGS.three_frame_dataset) == 1: - three_frame_dataset = (list(FLAGS.three_frame_dataset) - * len(FLAGS.dataset)) - else: - three_frame_dataset = FLAGS.three_frame_dataset - assert len(FLAGS.dataset) == len(first_frame_finetuning) - assert len(FLAGS.dataset) == len(three_frame_dataset) - datasets, samples_list = zip( - *[_get_dataset_and_samples(config, train_crop_size, dataset, - dataset_dir, bool(first_frame_finetuning_), - bool(three_frame_dataset_)) - for dataset, dataset_dir, first_frame_finetuning_, - three_frame_dataset_ in zip(FLAGS.dataset, FLAGS.dataset_dir, - first_frame_finetuning, - three_frame_dataset)]) - # Note that this way of doing things is wasteful since it will evaluate - # all branches but just use one of them. But let's do it anyway for now, - # since it's easy and will probably be fast enough. - dataset = datasets[0] - if len(samples_list) == 1: - samples = samples_list[0] - else: - probabilities = FLAGS.dataset_sampling_probabilities - if probabilities: - assert len(probabilities) == len(samples_list) - else: - # Default to uniform probabilities. - probabilities = [1.0 / len(samples_list) for _ in samples_list] - probabilities = tf.constant(probabilities) - logits = tf.log(probabilities[tf.newaxis]) - rand_idx = tf.squeeze(tf.multinomial(logits, 1, output_dtype=tf.int32), - axis=[0, 1]) - - def wrap(x): - def f(): - return x - return f - - samples = tf.case({tf.equal(rand_idx, idx): wrap(s) - for idx, s in enumerate(samples_list)}, - exclusive=True) - - # Prefetch_queue requires the shape to be known at graph creation time. - # So we only use it if we crop to a fixed size. - if train_crop_size is None: - inputs_queue = samples - else: - inputs_queue = prefetch_queue.prefetch_queue( - samples, - capacity=FLAGS.prefetch_queue_capacity_factor*config.num_clones, - num_threads=FLAGS.prefetch_queue_num_threads) - - # Create the global step on the device storing the variables. - with tf.device(config.variables_device()): - global_step = tf.train.get_or_create_global_step() - - # Define the model and create clones. - model_fn = _build_deeplab - if FLAGS.classification_loss == 'triplet': - embedding_dim = FLAGS.embedding_dimension - output_type_to_dim = {'embedding': embedding_dim} - else: - output_type_to_dim = {common.OUTPUT_TYPE: dataset.num_classes} - model_args = (inputs_queue, output_type_to_dim, dataset.ignore_label) - clones = model_deploy.create_clones(config, model_fn, args=model_args) - - # Gather update_ops from the first clone. These contain, for example, - # the updates for the batch_norm variables created by model_fn. - first_clone_scope = config.clone_scope(0) - update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS, first_clone_scope) - - # Gather initial summaries. - summaries = set(tf.get_collection(tf.GraphKeys.SUMMARIES)) - - # Add summaries for model variables. - for model_var in tf.contrib.framework.get_model_variables(): - summaries.add(tf.summary.histogram(model_var.op.name, model_var)) - - # Add summaries for losses. - for loss in tf.get_collection(tf.GraphKeys.LOSSES, first_clone_scope): - summaries.add(tf.summary.scalar('losses/%s' % loss.op.name, loss)) - - # Build the optimizer based on the device specification. - with tf.device(config.optimizer_device()): - learning_rate = train_utils.get_model_learning_rate( - FLAGS.learning_policy, - FLAGS.base_learning_rate, - FLAGS.learning_rate_decay_step, - FLAGS.learning_rate_decay_factor, - FLAGS.training_number_of_steps, - FLAGS.learning_power, - FLAGS.slow_start_step, - FLAGS.slow_start_learning_rate) - optimizer = tf.train.MomentumOptimizer(learning_rate, FLAGS.momentum) - summaries.add(tf.summary.scalar('learning_rate', learning_rate)) - - startup_delay_steps = FLAGS.task * FLAGS.startup_delay_steps - - with tf.device(config.variables_device()): - total_loss, grads_and_vars = model_deploy.optimize_clones( - clones, optimizer) - total_loss = tf.check_numerics(total_loss, 'Loss is inf or nan.') - summaries.add(tf.summary.scalar('total_loss', total_loss)) - - # Modify the gradients for biases and last layer variables. - last_layers = model.get_extra_layer_scopes( - FLAGS.last_layers_contain_logits_only) - grad_mult = train_utils.get_model_gradient_multipliers( - last_layers, FLAGS.last_layer_gradient_multiplier) - if grad_mult: - grads_and_vars = slim.learning.multiply_gradients(grads_and_vars, - grad_mult) - - with tf.name_scope('grad_clipping'): - grads_and_vars = slim.learning.clip_gradient_norms(grads_and_vars, 5.0) - - # Create histogram summaries for the gradients. - # We have too many summaries for mldash, so disable this one for now. - # for grad, var in grads_and_vars: - # summaries.add(tf.summary.histogram( - # var.name.replace(':0', '_0') + '/gradient', grad)) - - # Create gradient update op. - grad_updates = optimizer.apply_gradients(grads_and_vars, - global_step=global_step) - update_ops.append(grad_updates) - update_op = tf.group(*update_ops) - with tf.control_dependencies([update_op]): - train_tensor = tf.identity(total_loss, name='train_op') - - # Add the summaries from the first clone. These contain the summaries - # created by model_fn and either optimize_clones() or _gather_clone_loss(). - summaries |= set(tf.get_collection(tf.GraphKeys.SUMMARIES, - first_clone_scope)) - - # Merge all summaries together. - summary_op = tf.summary.merge(list(summaries)) - - # Soft placement allows placing on CPU ops without GPU implementation. - session_config = tf.ConfigProto(allow_soft_placement=True, - log_device_placement=False) - - # Start the training. - slim.learning.train( - train_tensor, - logdir=FLAGS.train_logdir, - log_every_n_steps=FLAGS.log_steps, - master=FLAGS.master, - number_of_steps=FLAGS.training_number_of_steps, - is_chief=(FLAGS.task == 0), - session_config=session_config, - startup_delay_steps=startup_delay_steps, - init_fn=train_utils.get_model_init_fn(FLAGS.train_logdir, - FLAGS.tf_initial_checkpoint, - FLAGS.initialize_last_layer, - last_layers, - ignore_missing_vars=True), - summary_op=summary_op, - save_summaries_secs=FLAGS.save_summaries_secs, - save_interval_secs=FLAGS.save_interval_secs) - - -def _get_dataset_and_samples(config, train_crop_size, dataset_name, - dataset_dir, first_frame_finetuning, - three_frame_dataset): - """Creates dataset object and samples dict of tensor. - - Args: - config: A DeploymentConfig. - train_crop_size: Integer, the crop size used for training. - dataset_name: String, the name of the dataset. - dataset_dir: String, the directory of the dataset. - first_frame_finetuning: Boolean, whether the used dataset is a dataset - for first frame fine-tuning. - three_frame_dataset: Boolean, whether the dataset has exactly three frames - per video of which the first is to be used as reference and the two - others are consecutive frames to be used as query frames. - - Returns: - dataset: An instance of slim Dataset. - samples: A dictionary of tensors for semantic segmentation. - """ - - # Split the batch across GPUs. - assert FLAGS.train_batch_size % config.num_clones == 0, ( - 'Training batch size not divisble by number of clones (GPUs).') - - clone_batch_size = FLAGS.train_batch_size / config.num_clones - - if first_frame_finetuning: - train_split = 'val' - else: - train_split = FLAGS.train_split - - data_type = 'tf_sequence_example' - # Get dataset-dependent information. - dataset = video_dataset.get_dataset( - dataset_name, - train_split, - dataset_dir=dataset_dir, - data_type=data_type) - - tf.gfile.MakeDirs(FLAGS.train_logdir) - tf.logging.info('Training on %s set', train_split) - - samples = video_input_generator.get( - dataset, - FLAGS.train_num_frames_per_video, - train_crop_size, - clone_batch_size, - num_readers=FLAGS.num_readers, - num_threads=FLAGS.batch_num_threads, - min_resize_value=FLAGS.min_resize_value, - max_resize_value=FLAGS.max_resize_value, - resize_factor=FLAGS.resize_factor, - min_scale_factor=FLAGS.min_scale_factor, - max_scale_factor=FLAGS.max_scale_factor, - scale_factor_step_size=FLAGS.scale_factor_step_size, - dataset_split=FLAGS.train_split, - is_training=True, - model_variant=FLAGS.model_variant, - batch_capacity_factor=FLAGS.batch_capacity_factor, - decoder_output_stride=common.parse_decoder_output_stride(), - first_frame_finetuning=first_frame_finetuning, - sample_only_first_frame_for_finetuning= - FLAGS.sample_only_first_frame_for_finetuning, - sample_adjacent_and_consistent_query_frames= - FLAGS.sample_adjacent_and_consistent_query_frames or - FLAGS.use_softmax_feedback, - remap_labels_to_reference_frame=True, - three_frame_dataset=three_frame_dataset, - add_prev_frame_label=not FLAGS.also_attend_to_previous_frame - ) - return dataset, samples - - -if __name__ == '__main__': - flags.mark_flag_as_required('train_logdir') - tf.logging.set_verbosity(tf.logging.INFO) - tf.app.run() diff --git a/research/feelvos/train.sh b/research/feelvos/train.sh deleted file mode 100755 index 63b7ea19d..000000000 --- a/research/feelvos/train.sh +++ /dev/null @@ -1,92 +0,0 @@ -#!/bin/bash -# Copyright 2018 The TensorFlow Authors All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -# -# This script is used to run local training on DAVIS 2017. Users could also -# modify from this script for their use case. See eval.sh for an example of -# local inference with a pre-trained model. -# -# Note that this script runs local training with a single GPU and a smaller crop -# and batch size, while in the paper, we trained our models with 16 GPUS with -# --num_clones=2, --train_batch_size=6, --num_replicas=8, -# --training_number_of_steps=200000, --train_crop_size=465, -# --train_crop_size=465. -# -# Usage: -# # From the tensorflow/models/research/feelvos directory. -# sh ./train.sh -# -# - -# Exit immediately if a command exits with a non-zero status. -set -e - -# Move one-level up to tensorflow/models/research directory. -cd .. - -# Update PYTHONPATH. -export PYTHONPATH=$PYTHONPATH:`pwd`:`pwd`/slim:`pwd`/feelvos - -# Set up the working environment. -CURRENT_DIR=$(pwd) -WORK_DIR="${CURRENT_DIR}/feelvos" - -# Set up the working directories. -DATASET_DIR="datasets" -DAVIS_FOLDER="davis17" -DAVIS_DATASET="${WORK_DIR}/${DATASET_DIR}/${DAVIS_FOLDER}/tfrecord" -EXP_FOLDER="exp/train" -TRAIN_LOGDIR="${WORK_DIR}/${DATASET_DIR}/${DAVIS_FOLDER}/${EXP_FOLDER}/train" -mkdir -p ${TRAIN_LOGDIR} - -# Go to datasets folder and download and convert the DAVIS 2017 dataset. -DATASET_DIR="datasets" -cd "${WORK_DIR}/${DATASET_DIR}" -sh download_and_convert_davis17.sh - -# Go to models folder and download and unpack the COCO pre-trained model. -MODELS_DIR="models" -mkdir -p "${WORK_DIR}/${MODELS_DIR}" -cd "${WORK_DIR}/${MODELS_DIR}" -if [ ! -d "xception_65_coco_pretrained" ]; then - wget http://download.tensorflow.org/models/xception_65_coco_pretrained_2018_10_02.tar.gz - tar -xvf xception_65_coco_pretrained_2018_10_02.tar.gz - rm xception_65_coco_pretrained_2018_10_02.tar.gz -fi -INIT_CKPT="${WORK_DIR}/${MODELS_DIR}/xception_65_coco_pretrained/x65-b2u1s2p-d48-2-3x256-sc-cr300k_init.ckpt" - -# Go back to orignal directory. -cd "${CURRENT_DIR}" - -python "${WORK_DIR}"/train.py \ - --dataset=davis_2017 \ - --dataset_dir="${DAVIS_DATASET}" \ - --train_logdir="${TRAIN_LOGDIR}" \ - --tf_initial_checkpoint="${INIT_CKPT}" \ - --logtostderr \ - --atrous_rates=6 \ - --atrous_rates=12 \ - --atrous_rates=18 \ - --decoder_output_stride=4 \ - --model_variant=xception_65 \ - --multi_grid=1 \ - --multi_grid=1 \ - --multi_grid=1 \ - --output_stride=16 \ - --weight_decay=0.00004 \ - --num_clones=1 \ - --train_batch_size=1 \ - --train_crop_size=300 \ - --train_crop_size=300 diff --git a/research/feelvos/utils/__init__.py b/research/feelvos/utils/__init__.py deleted file mode 100644 index 6f1373443..000000000 --- a/research/feelvos/utils/__init__.py +++ /dev/null @@ -1,14 +0,0 @@ -# Copyright 2018 The TensorFlow Authors All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== diff --git a/research/feelvos/utils/embedding_utils.py b/research/feelvos/utils/embedding_utils.py deleted file mode 100644 index 233c70d93..000000000 --- a/research/feelvos/utils/embedding_utils.py +++ /dev/null @@ -1,1082 +0,0 @@ -# Copyright 2018 The TensorFlow Authors All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -"""Utilities for the instance embedding for segmentation.""" - -import numpy as np -import tensorflow as tf -from deeplab import model -from deeplab.core import preprocess_utils -from feelvos.utils import mask_damaging - -slim = tf.contrib.slim -resolve_shape = preprocess_utils.resolve_shape -WRONG_LABEL_PADDING_DISTANCE = 1e20 - -# With correlation_cost local matching will be much faster. But we provide a -# slow fallback for convenience. -USE_CORRELATION_COST = False -if USE_CORRELATION_COST: - # pylint: disable=g-import-not-at-top - from correlation_cost.python.ops import correlation_cost_op - - -def pairwise_distances(x, y): - """Computes pairwise squared l2 distances between tensors x and y. - - Args: - x: Tensor of shape [n, feature_dim]. - y: Tensor of shape [m, feature_dim]. - - Returns: - Float32 distances tensor of shape [n, m]. - """ - # d[i,j] = (x[i] - y[j]) * (x[i] - y[j])' - # = sum(x[i]^2, 1) + sum(y[j]^2, 1) - 2 * x[i] * y[j]' - xs = tf.reduce_sum(x * x, axis=1)[:, tf.newaxis] - ys = tf.reduce_sum(y * y, axis=1)[tf.newaxis, :] - d = xs + ys - 2 * tf.matmul(x, y, transpose_b=True) - return d - - -def pairwise_distances2(x, y): - """Computes pairwise squared l2 distances between tensors x and y. - - Naive implementation, high memory use. Could be useful to test the more - efficient implementation. - - Args: - x: Tensor of shape [n, feature_dim]. - y: Tensor of shape [m, feature_dim]. - - Returns: - distances of shape [n, m]. - """ - return tf.reduce_sum(tf.squared_difference( - x[:, tf.newaxis], y[tf.newaxis, :]), axis=-1) - - -def cross_correlate(x, y, max_distance=9): - """Efficiently computes the cross correlation of x and y. - - Optimized implementation using correlation_cost. - Note that we do not normalize by the feature dimension. - - Args: - x: Float32 tensor of shape [height, width, feature_dim]. - y: Float32 tensor of shape [height, width, feature_dim]. - max_distance: Integer, the maximum distance in pixel coordinates - per dimension which is considered to be in the search window. - - Returns: - Float32 tensor of shape [height, width, (2 * max_distance + 1) ** 2]. - """ - with tf.name_scope('cross_correlation'): - corr = correlation_cost_op.correlation_cost( - x[tf.newaxis], y[tf.newaxis], kernel_size=1, - max_displacement=max_distance, stride_1=1, stride_2=1, - pad=max_distance) - corr = tf.squeeze(corr, axis=0) - # This correlation implementation takes the mean over the feature_dim, - # but we want sum here, so multiply by feature_dim. - feature_dim = resolve_shape(x)[-1] - corr *= feature_dim - return corr - - -def local_pairwise_distances(x, y, max_distance=9): - """Computes pairwise squared l2 distances using a local search window. - - Optimized implementation using correlation_cost. - - Args: - x: Float32 tensor of shape [height, width, feature_dim]. - y: Float32 tensor of shape [height, width, feature_dim]. - max_distance: Integer, the maximum distance in pixel coordinates - per dimension which is considered to be in the search window. - - Returns: - Float32 distances tensor of shape - [height, width, (2 * max_distance + 1) ** 2]. - """ - with tf.name_scope('local_pairwise_distances'): - # d[i,j] = (x[i] - y[j]) * (x[i] - y[j])' - # = sum(x[i]^2, -1) + sum(y[j]^2, -1) - 2 * x[i] * y[j]' - corr = cross_correlate(x, y, max_distance=max_distance) - xs = tf.reduce_sum(x * x, axis=2)[..., tf.newaxis] - ys = tf.reduce_sum(y * y, axis=2)[..., tf.newaxis] - ones_ys = tf.ones_like(ys) - ys = cross_correlate(ones_ys, ys, max_distance=max_distance) - d = xs + ys - 2 * corr - # Boundary should be set to Inf. - boundary = tf.equal( - cross_correlate(ones_ys, ones_ys, max_distance=max_distance), 0) - d = tf.where(boundary, tf.fill(tf.shape(d), tf.constant(np.float('inf'))), - d) - return d - - -def local_pairwise_distances2(x, y, max_distance=9): - """Computes pairwise squared l2 distances using a local search window. - - Naive implementation using map_fn. - Used as a slow fallback for when correlation_cost is not available. - - Args: - x: Float32 tensor of shape [height, width, feature_dim]. - y: Float32 tensor of shape [height, width, feature_dim]. - max_distance: Integer, the maximum distance in pixel coordinates - per dimension which is considered to be in the search window. - - Returns: - Float32 distances tensor of shape - [height, width, (2 * max_distance + 1) ** 2]. - """ - with tf.name_scope('local_pairwise_distances2'): - padding_val = 1e20 - padded_y = tf.pad(y, [[max_distance, max_distance], - [max_distance, max_distance], [0, 0]], - constant_values=padding_val) - height, width, _ = resolve_shape(x) - dists = [] - for y_start in range(2 * max_distance + 1): - y_end = y_start + height - y_slice = padded_y[y_start:y_end] - for x_start in range(2 * max_distance + 1): - x_end = x_start + width - offset_y = y_slice[:, x_start:x_end] - dist = tf.reduce_sum(tf.squared_difference(x, offset_y), axis=2) - dists.append(dist) - dists = tf.stack(dists, axis=2) - return dists - - -def majority_vote(labels): - """Performs a label majority vote along axis 1. - - Second try, hopefully this time more efficient. - We assume that the labels are contiguous starting from 0. - It will also work for non-contiguous labels, but be inefficient. - - Args: - labels: Int tensor of shape [n, k] - - Returns: - The majority of labels along axis 1 - """ - max_label = tf.reduce_max(labels) - one_hot = tf.one_hot(labels, depth=max_label + 1) - summed = tf.reduce_sum(one_hot, axis=1) - majority = tf.argmax(summed, axis=1) - return majority - - -def assign_labels_by_nearest_neighbors(reference_embeddings, query_embeddings, - reference_labels, k=1): - """Segments by nearest neighbor query wrt the reference frame. - - Args: - reference_embeddings: Tensor of shape [height, width, embedding_dim], - the embedding vectors for the reference frame - query_embeddings: Tensor of shape [n_query_images, height, width, - embedding_dim], the embedding vectors for the query frames - reference_labels: Tensor of shape [height, width, 1], the class labels of - the reference frame - k: Integer, the number of nearest neighbors to use - - Returns: - The labels of the nearest neighbors as [n_query_frames, height, width, 1] - tensor - - Raises: - ValueError: If k < 1. - """ - if k < 1: - raise ValueError('k must be at least 1') - dists = flattened_pairwise_distances(reference_embeddings, query_embeddings) - if k == 1: - nn_indices = tf.argmin(dists, axis=1)[..., tf.newaxis] - else: - _, nn_indices = tf.nn.top_k(-dists, k, sorted=False) - reference_labels = tf.reshape(reference_labels, [-1]) - nn_labels = tf.gather(reference_labels, nn_indices) - if k == 1: - nn_labels = tf.squeeze(nn_labels, axis=1) - else: - nn_labels = majority_vote(nn_labels) - height = tf.shape(reference_embeddings)[0] - width = tf.shape(reference_embeddings)[1] - n_query_frames = query_embeddings.shape[0] - nn_labels = tf.reshape(nn_labels, [n_query_frames, height, width, 1]) - return nn_labels - - -def flattened_pairwise_distances(reference_embeddings, query_embeddings): - """Calculates flattened tensor of pairwise distances between ref and query. - - Args: - reference_embeddings: Tensor of shape [..., embedding_dim], - the embedding vectors for the reference frame - query_embeddings: Tensor of shape [n_query_images, height, width, - embedding_dim], the embedding vectors for the query frames. - - Returns: - A distance tensor of shape [reference_embeddings.size / embedding_dim, - query_embeddings.size / embedding_dim] - """ - embedding_dim = resolve_shape(query_embeddings)[-1] - reference_embeddings = tf.reshape(reference_embeddings, [-1, embedding_dim]) - first_dim = -1 - query_embeddings = tf.reshape(query_embeddings, [first_dim, embedding_dim]) - dists = pairwise_distances(query_embeddings, reference_embeddings) - return dists - - -def nearest_neighbor_features_per_object( - reference_embeddings, query_embeddings, reference_labels, - max_neighbors_per_object, k_nearest_neighbors, gt_ids=None, n_chunks=100): - """Calculates the distance to the nearest neighbor per object. - - For every pixel of query_embeddings calculate the distance to the - nearest neighbor in the (possibly subsampled) reference_embeddings per object. - - Args: - reference_embeddings: Tensor of shape [height, width, embedding_dim], - the embedding vectors for the reference frame. - query_embeddings: Tensor of shape [n_query_images, height, width, - embedding_dim], the embedding vectors for the query frames. - reference_labels: Tensor of shape [height, width, 1], the class labels of - the reference frame. - max_neighbors_per_object: Integer, the maximum number of candidates - for the nearest neighbor query per object after subsampling, - or 0 for no subsampling. - k_nearest_neighbors: Integer, the number of nearest neighbors to use. - gt_ids: Int tensor of shape [n_objs] of the sorted unique ground truth - ids in the first frame. If None, it will be derived from - reference_labels. - n_chunks: Integer, the number of chunks to use to save memory - (set to 1 for no chunking). - - Returns: - nn_features: A float32 tensor of nearest neighbor features of shape - [n_query_images, height, width, n_objects, feature_dim]. - gt_ids: An int32 tensor of the unique sorted object ids present - in the reference labels. - """ - with tf.name_scope('nn_features_per_object'): - reference_labels_flat = tf.reshape(reference_labels, [-1]) - if gt_ids is None: - ref_obj_ids, _ = tf.unique(reference_labels_flat) - ref_obj_ids = tf.contrib.framework.sort(ref_obj_ids) - gt_ids = ref_obj_ids - embedding_dim = resolve_shape(reference_embeddings)[-1] - reference_embeddings_flat = tf.reshape(reference_embeddings, - [-1, embedding_dim]) - - reference_embeddings_flat, reference_labels_flat = ( - subsample_reference_embeddings_and_labels(reference_embeddings_flat, - reference_labels_flat, - gt_ids, - max_neighbors_per_object)) - shape = resolve_shape(query_embeddings) - query_embeddings_flat = tf.reshape(query_embeddings, [-1, embedding_dim]) - nn_features = _nearest_neighbor_features_per_object_in_chunks( - reference_embeddings_flat, query_embeddings_flat, reference_labels_flat, - gt_ids, k_nearest_neighbors, n_chunks) - nn_features_dim = resolve_shape(nn_features)[-1] - nn_features_reshaped = tf.reshape(nn_features, - tf.stack(shape[:3] + [tf.size(gt_ids), - nn_features_dim])) - return nn_features_reshaped, gt_ids - - -def _nearest_neighbor_features_per_object_in_chunks( - reference_embeddings_flat, query_embeddings_flat, reference_labels_flat, - ref_obj_ids, k_nearest_neighbors, n_chunks): - """Calculates the nearest neighbor features per object in chunks to save mem. - - Uses chunking to bound the memory use. - - Args: - reference_embeddings_flat: Tensor of shape [n, embedding_dim], - the embedding vectors for the reference frame. - query_embeddings_flat: Tensor of shape [m, embedding_dim], the embedding - vectors for the query frames. - reference_labels_flat: Tensor of shape [n], the class labels of the - reference frame. - ref_obj_ids: int tensor of unique object ids in the reference labels. - k_nearest_neighbors: Integer, the number of nearest neighbors to use. - n_chunks: Integer, the number of chunks to use to save memory - (set to 1 for no chunking). - - Returns: - nn_features: A float32 tensor of nearest neighbor features of shape - [m, n_objects, feature_dim]. - """ - chunk_size = tf.cast(tf.ceil(tf.cast(tf.shape(query_embeddings_flat)[0], - tf.float32) / n_chunks), tf.int32) - wrong_label_mask = tf.not_equal(reference_labels_flat, - ref_obj_ids[:, tf.newaxis]) - all_features = [] - for n in range(n_chunks): - if n_chunks == 1: - query_embeddings_flat_chunk = query_embeddings_flat - else: - chunk_start = n * chunk_size - chunk_end = (n + 1) * chunk_size - query_embeddings_flat_chunk = query_embeddings_flat[chunk_start:chunk_end] - # Use control dependencies to make sure that the chunks are not processed - # in parallel which would prevent any peak memory savings. - with tf.control_dependencies(all_features): - features = _nn_features_per_object_for_chunk( - reference_embeddings_flat, query_embeddings_flat_chunk, - wrong_label_mask, k_nearest_neighbors - ) - all_features.append(features) - if n_chunks == 1: - nn_features = all_features[0] - else: - nn_features = tf.concat(all_features, axis=0) - return nn_features - - -def _nn_features_per_object_for_chunk( - reference_embeddings, query_embeddings, wrong_label_mask, - k_nearest_neighbors): - """Extracts features for each object using nearest neighbor attention. - - Args: - reference_embeddings: Tensor of shape [n_chunk, embedding_dim], - the embedding vectors for the reference frame. - query_embeddings: Tensor of shape [m_chunk, embedding_dim], the embedding - vectors for the query frames. - wrong_label_mask: - k_nearest_neighbors: Integer, the number of nearest neighbors to use. - - Returns: - nn_features: A float32 tensor of nearest neighbor features of shape - [m_chunk, n_objects, feature_dim]. - """ - reference_embeddings_key = reference_embeddings - query_embeddings_key = query_embeddings - dists = flattened_pairwise_distances(reference_embeddings_key, - query_embeddings_key) - dists = (dists[:, tf.newaxis, :] + - tf.cast(wrong_label_mask[tf.newaxis, :, :], tf.float32) * - WRONG_LABEL_PADDING_DISTANCE) - if k_nearest_neighbors == 1: - features = tf.reduce_min(dists, axis=2, keepdims=True) - else: - # Find the closest k and combine them according to attention_feature_type - dists, _ = tf.nn.top_k(-dists, k=k_nearest_neighbors) - dists = -dists - # If not enough real neighbors were found, pad with the farthest real - # neighbor. - valid_mask = tf.less(dists, WRONG_LABEL_PADDING_DISTANCE) - masked_dists = dists * tf.cast(valid_mask, tf.float32) - pad_dist = tf.tile(tf.reduce_max(masked_dists, axis=2)[..., tf.newaxis], - multiples=[1, 1, k_nearest_neighbors]) - dists = tf.where(valid_mask, dists, pad_dist) - # take mean of distances - features = tf.reduce_mean(dists, axis=2, keepdims=True) - return features - - -def create_embedding_segmentation_features(features, feature_dimension, - n_layers, kernel_size, reuse, - atrous_rates=None): - """Extracts features which can be used to estimate the final segmentation. - - Args: - features: input features of shape [batch, height, width, features] - feature_dimension: Integer, the dimensionality used in the segmentation - head layers. - n_layers: Integer, the number of layers in the segmentation head. - kernel_size: Integer, the kernel size used in the segmentation head. - reuse: reuse mode for the variable_scope. - atrous_rates: List of integers of length n_layers, the atrous rates to use. - - Returns: - Features to be used to estimate the segmentation labels of shape - [batch, height, width, embedding_seg_feat_dim]. - """ - if atrous_rates is None or not atrous_rates: - atrous_rates = [1 for _ in range(n_layers)] - assert len(atrous_rates) == n_layers - with tf.variable_scope('embedding_seg', reuse=reuse): - for n in range(n_layers): - features = model.split_separable_conv2d( - features, feature_dimension, kernel_size=kernel_size, - rate=atrous_rates[n], scope='split_separable_conv2d_{}'.format(n)) - return features - - -def add_image_summaries(images, nn_features, logits, batch_size, - prev_frame_nn_features=None): - """Adds image summaries of input images, attention features and logits. - - Args: - images: Image tensor of shape [batch, height, width, channels]. - nn_features: Nearest neighbor attention features of shape - [batch_size, height, width, n_objects, 1]. - logits: Float32 tensor of logits. - batch_size: Integer, the number of videos per clone per mini-batch. - prev_frame_nn_features: Nearest neighbor attention features wrt. the - last frame of shape [batch_size, height, width, n_objects, 1]. - Can be None. - """ - # Separate reference and query images. - reshaped_images = tf.reshape(images, tf.stack( - [batch_size, -1] + resolve_shape(images)[1:])) - reference_images = reshaped_images[:, 0] - query_images = reshaped_images[:, 1:] - query_images_reshaped = tf.reshape(query_images, tf.stack( - [-1] + resolve_shape(images)[1:])) - tf.summary.image('ref_images', reference_images, max_outputs=batch_size) - tf.summary.image('query_images', query_images_reshaped, max_outputs=10) - predictions = tf.cast( - tf.argmax(logits, axis=-1), tf.uint8)[..., tf.newaxis] - # Scale up so that we can actually see something. - tf.summary.image('predictions', predictions * 32, max_outputs=10) - # We currently only show the first dimension of the features for background - # and the first foreground object. - tf.summary.image('nn_fg_features', nn_features[..., 0:1, 0], - max_outputs=batch_size) - if prev_frame_nn_features is not None: - tf.summary.image('nn_fg_features_prev', prev_frame_nn_features[..., 0:1, 0], - max_outputs=batch_size) - tf.summary.image('nn_bg_features', nn_features[..., 1:2, 0], - max_outputs=batch_size) - if prev_frame_nn_features is not None: - tf.summary.image('nn_bg_features_prev', - prev_frame_nn_features[..., 1:2, 0], - max_outputs=batch_size) - - -def get_embeddings(images, model_options, embedding_dimension): - """Extracts embedding vectors for images. Should only be used for inference. - - Args: - images: A tensor of shape [batch, height, width, channels]. - model_options: A ModelOptions instance to configure models. - embedding_dimension: Integer, the dimension of the embedding. - - Returns: - embeddings: A tensor of shape [batch, height, width, embedding_dimension]. - """ - features, end_points = model.extract_features( - images, - model_options, - is_training=False) - - if model_options.decoder_output_stride is not None: - decoder_output_stride = min(model_options.decoder_output_stride) - if model_options.crop_size is None: - height = tf.shape(images)[1] - width = tf.shape(images)[2] - else: - height, width = model_options.crop_size - features = model.refine_by_decoder( - features, - end_points, - crop_size=[height, width], - decoder_output_stride=[decoder_output_stride], - decoder_use_separable_conv=model_options.decoder_use_separable_conv, - model_variant=model_options.model_variant, - is_training=False) - - with tf.variable_scope('embedding'): - embeddings = split_separable_conv2d_with_identity_initializer( - features, embedding_dimension, scope='split_separable_conv2d') - return embeddings - - -def get_logits_with_matching(images, - model_options, - weight_decay=0.0001, - reuse=None, - is_training=False, - fine_tune_batch_norm=False, - reference_labels=None, - batch_size=None, - num_frames_per_video=None, - embedding_dimension=None, - max_neighbors_per_object=0, - k_nearest_neighbors=1, - use_softmax_feedback=True, - initial_softmax_feedback=None, - embedding_seg_feature_dimension=256, - embedding_seg_n_layers=4, - embedding_seg_kernel_size=7, - embedding_seg_atrous_rates=None, - normalize_nearest_neighbor_distances=True, - also_attend_to_previous_frame=True, - damage_initial_previous_frame_mask=False, - use_local_previous_frame_attention=True, - previous_frame_attention_window_size=15, - use_first_frame_matching=True, - also_return_embeddings=False, - ref_embeddings=None): - """Gets the logits by atrous/image spatial pyramid pooling using attention. - - Args: - images: A tensor of size [batch, height, width, channels]. - model_options: A ModelOptions instance to configure models. - weight_decay: The weight decay for model variables. - reuse: Reuse the model variables or not. - is_training: Is training or not. - fine_tune_batch_norm: Fine-tune the batch norm parameters or not. - reference_labels: The segmentation labels of the reference frame on which - attention is applied. - batch_size: Integer, the number of videos on a batch - num_frames_per_video: Integer, the number of frames per video - embedding_dimension: Integer, the dimension of the embedding - max_neighbors_per_object: Integer, the maximum number of candidates - for the nearest neighbor query per object after subsampling. - Can be 0 for no subsampling. - k_nearest_neighbors: Integer, the number of nearest neighbors to use. - use_softmax_feedback: Boolean, whether to give the softmax predictions of - the last frame as additional input to the segmentation head. - initial_softmax_feedback: List of Float32 tensors, or None. Can be used to - initialize the softmax predictions used for the feedback loop. - Only has an effect if use_softmax_feedback is True. - embedding_seg_feature_dimension: Integer, the dimensionality used in the - segmentation head layers. - embedding_seg_n_layers: Integer, the number of layers in the segmentation - head. - embedding_seg_kernel_size: Integer, the kernel size used in the - segmentation head. - embedding_seg_atrous_rates: List of integers of length - embedding_seg_n_layers, the atrous rates to use for the segmentation head. - normalize_nearest_neighbor_distances: Boolean, whether to normalize the - nearest neighbor distances to [0,1] using sigmoid, scale and shift. - also_attend_to_previous_frame: Boolean, whether to also use nearest - neighbor attention with respect to the previous frame. - damage_initial_previous_frame_mask: Boolean, whether to artificially damage - the initial previous frame mask. Only has an effect if - also_attend_to_previous_frame is True. - use_local_previous_frame_attention: Boolean, whether to restrict the - previous frame attention to a local search window. - Only has an effect, if also_attend_to_previous_frame is True. - previous_frame_attention_window_size: Integer, the window size used for - local previous frame attention, if use_local_previous_frame_attention - is True. - use_first_frame_matching: Boolean, whether to extract features by matching - to the reference frame. This should always be true except for ablation - experiments. - also_return_embeddings: Boolean, whether to return the embeddings as well. - ref_embeddings: Tuple of - (first_frame_embeddings, previous_frame_embeddings), - each of shape [batch, height, width, embedding_dimension], or None. - Returns: - outputs_to_logits: A map from output_type to logits. - If also_return_embeddings is True, it will also return an embeddings - tensor of shape [batch, height, width, embedding_dimension]. - """ - features, end_points = model.extract_features( - images, - model_options, - weight_decay=weight_decay, - reuse=reuse, - is_training=is_training, - fine_tune_batch_norm=fine_tune_batch_norm) - - if model_options.decoder_output_stride: - decoder_output_stride = min(model_options.decoder_output_stride) - if model_options.crop_size is None: - height = tf.shape(images)[1] - width = tf.shape(images)[2] - else: - height, width = model_options.crop_size - decoder_height = model.scale_dimension(height, 1.0 / decoder_output_stride) - decoder_width = model.scale_dimension(width, 1.0 / decoder_output_stride) - features = model.refine_by_decoder( - features, - end_points, - crop_size=[height, width], - decoder_output_stride=[decoder_output_stride], - decoder_use_separable_conv=model_options.decoder_use_separable_conv, - model_variant=model_options.model_variant, - weight_decay=weight_decay, - reuse=reuse, - is_training=is_training, - fine_tune_batch_norm=fine_tune_batch_norm) - - with tf.variable_scope('embedding', reuse=reuse): - embeddings = split_separable_conv2d_with_identity_initializer( - features, embedding_dimension, scope='split_separable_conv2d') - embeddings = tf.identity(embeddings, name='embeddings') - scaled_reference_labels = tf.image.resize_nearest_neighbor( - reference_labels, - resolve_shape(embeddings, 4)[1:3], - align_corners=True) - h, w = decoder_height, decoder_width - if num_frames_per_video is None: - num_frames_per_video = tf.size(embeddings) // ( - batch_size * h * w * embedding_dimension) - new_labels_shape = tf.stack([batch_size, -1, h, w, 1]) - reshaped_reference_labels = tf.reshape(scaled_reference_labels, - new_labels_shape) - new_embeddings_shape = tf.stack([batch_size, - num_frames_per_video, h, w, - embedding_dimension]) - reshaped_embeddings = tf.reshape(embeddings, new_embeddings_shape) - all_nn_features = [] - all_ref_obj_ids = [] - # To keep things simple, we do all this separate for each sequence for now. - for n in range(batch_size): - embedding = reshaped_embeddings[n] - if ref_embeddings is None: - n_chunks = 100 - reference_embedding = embedding[0] - if also_attend_to_previous_frame or use_softmax_feedback: - queries_embedding = embedding[2:] - else: - queries_embedding = embedding[1:] - else: - if USE_CORRELATION_COST: - n_chunks = 20 - else: - n_chunks = 500 - reference_embedding = ref_embeddings[0][n] - queries_embedding = embedding - reference_labels = reshaped_reference_labels[n][0] - nn_features_n, ref_obj_ids = nearest_neighbor_features_per_object( - reference_embedding, queries_embedding, reference_labels, - max_neighbors_per_object, k_nearest_neighbors, n_chunks=n_chunks) - if normalize_nearest_neighbor_distances: - nn_features_n = (tf.nn.sigmoid(nn_features_n) - 0.5) * 2 - all_nn_features.append(nn_features_n) - all_ref_obj_ids.append(ref_obj_ids) - - feat_dim = resolve_shape(features)[-1] - features = tf.reshape(features, tf.stack( - [batch_size, num_frames_per_video, h, w, feat_dim])) - if ref_embeddings is None: - # Strip the features for the reference frame. - if also_attend_to_previous_frame or use_softmax_feedback: - features = features[:, 2:] - else: - features = features[:, 1:] - - # To keep things simple, we do all this separate for each sequence for now. - outputs_to_logits = {output: [] for - output in model_options.outputs_to_num_classes} - for n in range(batch_size): - features_n = features[n] - nn_features_n = all_nn_features[n] - nn_features_n_tr = tf.transpose(nn_features_n, [3, 0, 1, 2, 4]) - n_objs = tf.shape(nn_features_n_tr)[0] - # Repeat features for every object. - features_n_tiled = tf.tile(features_n[tf.newaxis], - multiples=[n_objs, 1, 1, 1, 1]) - prev_frame_labels = None - if also_attend_to_previous_frame: - prev_frame_labels = reshaped_reference_labels[n, 1] - if is_training and damage_initial_previous_frame_mask: - # Damage the previous frame masks. - prev_frame_labels = mask_damaging.damage_masks(prev_frame_labels, - dilate=False) - tf.summary.image('prev_frame_labels', - tf.cast(prev_frame_labels[tf.newaxis], - tf.uint8) * 32) - initial_softmax_feedback_n = create_initial_softmax_from_labels( - prev_frame_labels, reshaped_reference_labels[n][0], - decoder_output_stride=None, reduce_labels=True) - elif initial_softmax_feedback is not None: - initial_softmax_feedback_n = initial_softmax_feedback[n] - else: - initial_softmax_feedback_n = None - if initial_softmax_feedback_n is None: - last_softmax = tf.zeros((n_objs, h, w, 1), dtype=tf.float32) - else: - last_softmax = tf.transpose(initial_softmax_feedback_n, [2, 0, 1])[ - ..., tf.newaxis] - assert len(model_options.outputs_to_num_classes) == 1 - output = model_options.outputs_to_num_classes.keys()[0] - logits = [] - n_ref_frames = 1 - prev_frame_nn_features_n = None - if also_attend_to_previous_frame or use_softmax_feedback: - n_ref_frames += 1 - if ref_embeddings is not None: - n_ref_frames = 0 - for t in range(num_frames_per_video - n_ref_frames): - to_concat = [features_n_tiled[:, t]] - if use_first_frame_matching: - to_concat.append(nn_features_n_tr[:, t]) - if use_softmax_feedback: - to_concat.append(last_softmax) - if also_attend_to_previous_frame: - assert normalize_nearest_neighbor_distances, ( - 'previous frame attention currently only works when normalized ' - 'distances are used') - embedding = reshaped_embeddings[n] - if ref_embeddings is None: - last_frame_embedding = embedding[t + 1] - query_embeddings = embedding[t + 2, tf.newaxis] - else: - last_frame_embedding = ref_embeddings[1][0] - query_embeddings = embedding - if use_local_previous_frame_attention: - assert query_embeddings.shape[0] == 1 - prev_frame_nn_features_n = ( - local_previous_frame_nearest_neighbor_features_per_object( - last_frame_embedding, - query_embeddings[0], - prev_frame_labels, - all_ref_obj_ids[n], - max_distance=previous_frame_attention_window_size) - ) - else: - prev_frame_nn_features_n, _ = ( - nearest_neighbor_features_per_object( - last_frame_embedding, query_embeddings, prev_frame_labels, - max_neighbors_per_object, k_nearest_neighbors, - gt_ids=all_ref_obj_ids[n])) - prev_frame_nn_features_n = (tf.nn.sigmoid( - prev_frame_nn_features_n) - 0.5) * 2 - prev_frame_nn_features_n_sq = tf.squeeze(prev_frame_nn_features_n, - axis=0) - prev_frame_nn_features_n_tr = tf.transpose( - prev_frame_nn_features_n_sq, [2, 0, 1, 3]) - to_concat.append(prev_frame_nn_features_n_tr) - features_n_concat_t = tf.concat(to_concat, axis=-1) - embedding_seg_features_n_t = ( - create_embedding_segmentation_features( - features_n_concat_t, embedding_seg_feature_dimension, - embedding_seg_n_layers, embedding_seg_kernel_size, - reuse or n > 0, atrous_rates=embedding_seg_atrous_rates)) - logits_t = model.get_branch_logits( - embedding_seg_features_n_t, - 1, - model_options.atrous_rates, - aspp_with_batch_norm=model_options.aspp_with_batch_norm, - kernel_size=model_options.logits_kernel_size, - weight_decay=weight_decay, - reuse=reuse or n > 0 or t > 0, - scope_suffix=output - ) - logits.append(logits_t) - prev_frame_labels = tf.transpose(tf.argmax(logits_t, axis=0), - [2, 0, 1]) - last_softmax = tf.nn.softmax(logits_t, axis=0) - logits = tf.stack(logits, axis=1) - logits_shape = tf.stack( - [n_objs, num_frames_per_video - n_ref_frames] + - resolve_shape(logits)[2:-1]) - logits_reshaped = tf.reshape(logits, logits_shape) - logits_transposed = tf.transpose(logits_reshaped, [1, 2, 3, 0]) - outputs_to_logits[output].append(logits_transposed) - - add_image_summaries( - images[n * num_frames_per_video: (n+1) * num_frames_per_video], - nn_features_n, - logits_transposed, - batch_size=1, - prev_frame_nn_features=prev_frame_nn_features_n) - if also_return_embeddings: - return outputs_to_logits, embeddings - else: - return outputs_to_logits - - -def subsample_reference_embeddings_and_labels( - reference_embeddings_flat, reference_labels_flat, ref_obj_ids, - max_neighbors_per_object): - """Subsamples the reference embedding vectors and labels. - - After subsampling, at most max_neighbors_per_object items will remain per - class. - - Args: - reference_embeddings_flat: Tensor of shape [n, embedding_dim], - the embedding vectors for the reference frame. - reference_labels_flat: Tensor of shape [n, 1], - the class labels of the reference frame. - ref_obj_ids: An int32 tensor of the unique object ids present - in the reference labels. - max_neighbors_per_object: Integer, the maximum number of candidates - for the nearest neighbor query per object after subsampling, - or 0 for no subsampling. - - Returns: - reference_embeddings_flat: Tensor of shape [n_sub, embedding_dim], - the subsampled embedding vectors for the reference frame. - reference_labels_flat: Tensor of shape [n_sub, 1], - the class labels of the reference frame. - """ - if max_neighbors_per_object == 0: - return reference_embeddings_flat, reference_labels_flat - same_label_mask = tf.equal(reference_labels_flat[tf.newaxis, :], - ref_obj_ids[:, tf.newaxis]) - max_neighbors_per_object_repeated = tf.tile( - tf.constant(max_neighbors_per_object)[tf.newaxis], - multiples=[tf.size(ref_obj_ids)]) - # Somehow map_fn on GPU caused trouble sometimes, so let's put it on CPU - # for now. - with tf.device('cpu:0'): - subsampled_indices = tf.map_fn(_create_subsampling_mask, - (same_label_mask, - max_neighbors_per_object_repeated), - dtype=tf.int64, - name='subsample_labels_map_fn', - parallel_iterations=1) - mask = tf.not_equal(subsampled_indices, tf.constant(-1, dtype=tf.int64)) - masked_indices = tf.boolean_mask(subsampled_indices, mask) - reference_embeddings_flat = tf.gather(reference_embeddings_flat, - masked_indices) - reference_labels_flat = tf.gather(reference_labels_flat, masked_indices) - return reference_embeddings_flat, reference_labels_flat - - -def _create_subsampling_mask(args): - """Creates boolean mask which can be used to subsample the labels. - - Args: - args: tuple of (label_mask, max_neighbors_per_object), where label_mask - is the mask to be subsampled and max_neighbors_per_object is a int scalar, - the maximum number of neighbors to be retained after subsampling. - - Returns: - The boolean mask for subsampling the labels. - """ - label_mask, max_neighbors_per_object = args - indices = tf.squeeze(tf.where(label_mask), axis=1) - shuffled_indices = tf.random_shuffle(indices) - subsampled_indices = shuffled_indices[:max_neighbors_per_object] - n_pad = max_neighbors_per_object - tf.size(subsampled_indices) - padded_label = -1 - padding = tf.fill((n_pad,), tf.constant(padded_label, dtype=tf.int64)) - padded = tf.concat([subsampled_indices, padding], axis=0) - return padded - - -def conv2d_identity_initializer(scale=1.0, mean=0, stddev=3e-2): - """Creates an identity initializer for TensorFlow conv2d. - - We add a small amount of normal noise to the initialization matrix. - Code copied from lcchen@. - - Args: - scale: The scale coefficient for the identity weight matrix. - mean: A 0-D Tensor or Python value of type `dtype`. The mean of the - truncated normal distribution. - stddev: A 0-D Tensor or Python value of type `dtype`. The standard deviation - of the truncated normal distribution. - - Returns: - An identity initializer function for TensorFlow conv2d. - """ - def _initializer(shape, dtype=tf.float32, partition_info=None): - """Returns the identity matrix scaled by `scale`. - - Args: - shape: A tuple of int32 numbers indicating the shape of the initializing - matrix. - dtype: The data type of the initializing matrix. - partition_info: (Optional) variable_scope._PartitionInfo object holding - additional information about how the variable is partitioned. This input - is not used in our case, but is required by TensorFlow. - - Returns: - A identity matrix. - - Raises: - ValueError: If len(shape) != 4, or shape[0] != shape[1], or shape[0] is - not odd, or shape[1] is not odd.. - """ - del partition_info - if len(shape) != 4: - raise ValueError('Expect shape length to be 4.') - if shape[0] != shape[1]: - raise ValueError('Expect shape[0] = shape[1].') - if shape[0] % 2 != 1: - raise ValueError('Expect shape[0] to be odd value.') - if shape[1] % 2 != 1: - raise ValueError('Expect shape[1] to be odd value.') - weights = np.zeros(shape, dtype=np.float32) - center_y = shape[0] / 2 - center_x = shape[1] / 2 - min_channel = min(shape[2], shape[3]) - for i in range(min_channel): - weights[center_y, center_x, i, i] = scale - return tf.constant(weights, dtype=dtype) + tf.truncated_normal( - shape, mean=mean, stddev=stddev, dtype=dtype) - - return _initializer - - -def split_separable_conv2d_with_identity_initializer( - inputs, - filters, - kernel_size=3, - rate=1, - weight_decay=0.00004, - scope=None): - """Splits a separable conv2d into depthwise and pointwise conv2d. - - This operation differs from `tf.layers.separable_conv2d` as this operation - applies activation function between depthwise and pointwise conv2d. - - Args: - inputs: Input tensor with shape [batch, height, width, channels]. - filters: Number of filters in the 1x1 pointwise convolution. - kernel_size: A list of length 2: [kernel_height, kernel_width] of - of the filters. Can be an int if both values are the same. - rate: Atrous convolution rate for the depthwise convolution. - weight_decay: The weight decay to use for regularizing the model. - scope: Optional scope for the operation. - - Returns: - Computed features after split separable conv2d. - """ - initializer = conv2d_identity_initializer() - outputs = slim.separable_conv2d( - inputs, - None, - kernel_size=kernel_size, - depth_multiplier=1, - rate=rate, - weights_initializer=initializer, - weights_regularizer=None, - scope=scope + '_depthwise') - return slim.conv2d( - outputs, - filters, - 1, - weights_initializer=initializer, - weights_regularizer=slim.l2_regularizer(weight_decay), - scope=scope + '_pointwise') - - -def create_initial_softmax_from_labels(last_frame_labels, reference_labels, - decoder_output_stride, reduce_labels): - """Creates initial softmax predictions from last frame labels. - - Args: - last_frame_labels: last frame labels of shape [1, height, width, 1]. - reference_labels: reference frame labels of shape [1, height, width, 1]. - decoder_output_stride: Integer, the stride of the decoder. Can be None, in - this case it's assumed that the last_frame_labels and reference_labels - are already scaled to the decoder output resolution. - reduce_labels: Boolean, whether to reduce the depth of the softmax one_hot - encoding to the actual number of labels present in the reference frame - (otherwise the depth will be the highest label index + 1). - - Returns: - init_softmax: the initial softmax predictions. - """ - if decoder_output_stride is None: - labels_output_size = last_frame_labels - reference_labels_output_size = reference_labels - else: - h = tf.shape(last_frame_labels)[1] - w = tf.shape(last_frame_labels)[2] - h_sub = model.scale_dimension(h, 1.0 / decoder_output_stride) - w_sub = model.scale_dimension(w, 1.0 / decoder_output_stride) - labels_output_size = tf.image.resize_nearest_neighbor( - last_frame_labels, [h_sub, w_sub], align_corners=True) - reference_labels_output_size = tf.image.resize_nearest_neighbor( - reference_labels, [h_sub, w_sub], align_corners=True) - if reduce_labels: - unique_labels, _ = tf.unique(tf.reshape(reference_labels_output_size, [-1])) - depth = tf.size(unique_labels) - else: - depth = tf.reduce_max(reference_labels_output_size) + 1 - one_hot_assertion = tf.assert_less(tf.reduce_max(labels_output_size), depth) - with tf.control_dependencies([one_hot_assertion]): - init_softmax = tf.one_hot(tf.squeeze(labels_output_size, - axis=-1), - depth=depth, - dtype=tf.float32) - return init_softmax - - -def local_previous_frame_nearest_neighbor_features_per_object( - prev_frame_embedding, query_embedding, prev_frame_labels, - gt_ids, max_distance=9): - """Computes nearest neighbor features while only allowing local matches. - - Args: - prev_frame_embedding: Tensor of shape [height, width, embedding_dim], - the embedding vectors for the last frame. - query_embedding: Tensor of shape [height, width, embedding_dim], - the embedding vectors for the query frames. - prev_frame_labels: Tensor of shape [height, width, 1], the class labels of - the previous frame. - gt_ids: Int Tensor of shape [n_objs] of the sorted unique ground truth - ids in the first frame. - max_distance: Integer, the maximum distance allowed for local matching. - - Returns: - nn_features: A float32 np.array of nearest neighbor features of shape - [1, height, width, n_objects, 1]. - """ - with tf.name_scope( - 'local_previous_frame_nearest_neighbor_features_per_object'): - if USE_CORRELATION_COST: - tf.logging.info('Using correlation_cost.') - d = local_pairwise_distances(query_embedding, prev_frame_embedding, - max_distance=max_distance) - else: - # Slow fallback in case correlation_cost is not available. - tf.logging.warn('correlation cost is not available, using slow fallback ' - 'implementation.') - d = local_pairwise_distances2(query_embedding, prev_frame_embedding, - max_distance=max_distance) - d = (tf.nn.sigmoid(d) - 0.5) * 2 - height = tf.shape(prev_frame_embedding)[0] - width = tf.shape(prev_frame_embedding)[1] - - # Create offset versions of the mask. - if USE_CORRELATION_COST: - # New, faster code with cross-correlation via correlation_cost. - # Due to padding we have to add 1 to the labels. - offset_labels = correlation_cost_op.correlation_cost( - tf.ones((1, height, width, 1)), - tf.cast(prev_frame_labels + 1, tf.float32)[tf.newaxis], - kernel_size=1, - max_displacement=max_distance, stride_1=1, stride_2=1, - pad=max_distance) - offset_labels = tf.squeeze(offset_labels, axis=0)[..., tf.newaxis] - # Subtract the 1 again and round. - offset_labels = tf.round(offset_labels - 1) - offset_masks = tf.equal( - offset_labels, - tf.cast(gt_ids, tf.float32)[tf.newaxis, tf.newaxis, tf.newaxis, :]) - else: - # Slower code, without dependency to correlation_cost - masks = tf.equal(prev_frame_labels, gt_ids[tf.newaxis, tf.newaxis]) - padded_masks = tf.pad(masks, - [[max_distance, max_distance], - [max_distance, max_distance], - [0, 0]]) - offset_masks = [] - for y_start in range(2 * max_distance + 1): - y_end = y_start + height - masks_slice = padded_masks[y_start:y_end] - for x_start in range(2 * max_distance + 1): - x_end = x_start + width - offset_mask = masks_slice[:, x_start:x_end] - offset_masks.append(offset_mask) - offset_masks = tf.stack(offset_masks, axis=2) - - pad = tf.ones((height, width, (2 * max_distance + 1) ** 2, tf.size(gt_ids))) - d_tiled = tf.tile(d[..., tf.newaxis], multiples=(1, 1, 1, tf.size(gt_ids))) - d_masked = tf.where(offset_masks, d_tiled, pad) - dists = tf.reduce_min(d_masked, axis=2) - dists = tf.reshape(dists, (1, height, width, tf.size(gt_ids), 1)) - return dists diff --git a/research/feelvos/utils/embedding_utils_test.py b/research/feelvos/utils/embedding_utils_test.py deleted file mode 100644 index ddebd7b4e..000000000 --- a/research/feelvos/utils/embedding_utils_test.py +++ /dev/null @@ -1,213 +0,0 @@ -# Copyright 2018 The TensorFlow Authors All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -"""Tests for embedding utils.""" - -import unittest -import numpy as np -import tensorflow as tf -from feelvos.utils import embedding_utils - -if embedding_utils.USE_CORRELATION_COST: - # pylint: disable=g-import-not-at-top - from correlation_cost.python.ops import correlation_cost_op - - -class EmbeddingUtilsTest(tf.test.TestCase): - - def test_pairwise_distances(self): - x = np.arange(100, dtype=np.float32).reshape(20, 5) - y = np.arange(100, 200, dtype=np.float32).reshape(20, 5) - g = tf.Graph() - with g.as_default(): - with self.test_session(graph=g) as sess: - x = tf.constant(x) - y = tf.constant(y) - d1 = embedding_utils.pairwise_distances(x, y) - d2 = embedding_utils.pairwise_distances2(x, y) - d1_val, d2_val = sess.run([d1, d2]) - self.assertAllClose(d1_val, d2_val) - - @unittest.skipIf(not embedding_utils.USE_CORRELATION_COST, - 'depends on correlation_cost') - def test_correlation_cost_one_dimensional(self): - a = np.array([[[[1.0], [2.0]], [[3.0], [4.0]]]]) - b = np.array([[[[2.0], [1.0]], [[4.0], [3.0]]]]) - g = tf.Graph() - with g.as_default(): - with self.test_session(graph=g) as sess: - c = correlation_cost_op.correlation_cost( - a, b, kernel_size=1, max_displacement=1, stride_1=1, stride_2=1, - pad=1) - c = tf.squeeze(c, axis=0) - c_val = sess.run(c) - self.assertAllEqual(c_val.shape, (2, 2, 9)) - for y in range(2): - for x in range(2): - for dy in range(-1, 2): - for dx in range(-1, 2): - a_slice = a[0, y, x, 0] - if y + dy < 0 or y + dy > 1 or x + dx < 0 or x + dx > 1: - b_slice = 0 - else: - b_slice = b[0, y + dy, x + dx, 0] - expected = a_slice * b_slice - dy0 = dy + 1 - dx0 = dx + 1 - self.assertAlmostEqual(c_val[y, x, 3 * dy0 + dx0], expected) - - @unittest.skipIf(not embedding_utils.USE_CORRELATION_COST, - 'depends on correlation_cost') - def test_correlation_cost_two_dimensional(self): - a = np.array([[[[1.0, -5.0], [7.0, 2.0]], [[1.0, 3.0], [3.0, 4.0]]]]) - b = np.array([[[[2.0, 1.0], [0.0, -9.0]], [[4.0, 3.0], [3.0, 1.0]]]]) - g = tf.Graph() - with g.as_default(): - with self.test_session(graph=g) as sess: - c = correlation_cost_op.correlation_cost( - a, b, kernel_size=1, max_displacement=1, stride_1=1, stride_2=1, - pad=1) - c = tf.squeeze(c, axis=0) - c_val = sess.run(c) - self.assertAllEqual(c_val.shape, (2, 2, 9)) - for y in range(2): - for x in range(2): - for dy in range(-1, 2): - for dx in range(-1, 2): - a_slice = a[0, y, x, :] - if y + dy < 0 or y + dy > 1 or x + dx < 0 or x + dx > 1: - b_slice = 0 - else: - b_slice = b[0, y + dy, x + dx, :] - expected = (a_slice * b_slice).mean() - dy0 = dy + 1 - dx0 = dx + 1 - self.assertAlmostEqual(c_val[y, x, 3 * dy0 + dx0], expected) - - @unittest.skipIf(not embedding_utils.USE_CORRELATION_COST, - 'depends on correlation_cost') - def test_local_pairwise_distances_one_dimensional(self): - a = np.array([[[1.0], [2.0]], [[3.0], [4.0]]]) - b = np.array([[[2.0], [1.0]], [[4.0], [3.0]]]) - g = tf.Graph() - with g.as_default(): - with self.test_session(graph=g) as sess: - a_tf = tf.constant(a, dtype=tf.float32) - b_tf = tf.constant(b, dtype=tf.float32) - d = embedding_utils.local_pairwise_distances(a_tf, b_tf, - max_distance=1) - d_val = sess.run(d) - for y in range(2): - for x in range(2): - for dy in range(-1, 2): - for dx in range(-1, 2): - a_slice = a[y, x, 0] - if y + dy < 0 or y + dy > 1 or x + dx < 0 or x + dx > 1: - expected = np.float('inf') - else: - b_slice = b[y + dy, x + dx, 0] - expected = (a_slice - b_slice) ** 2 - dy0 = dy + 1 - dx0 = dx + 1 - self.assertAlmostEqual(d_val[y, x, 3 * dy0 + dx0], expected) - - @unittest.skipIf(not embedding_utils.USE_CORRELATION_COST, - 'depends on correlation_cost') - def test_local_pairwise_distances_shape(self): - a = np.zeros((4, 5, 2)) - b = np.zeros((4, 5, 2)) - g = tf.Graph() - with g.as_default(): - with self.test_session(graph=g) as sess: - a_tf = tf.constant(a, dtype=tf.float32) - b_tf = tf.constant(b, dtype=tf.float32) - d = embedding_utils.local_pairwise_distances(a_tf, b_tf, max_distance=4) - d_val = sess.run(d) - self.assertAllEqual(d_val.shape, (4, 5, 81)) - - @unittest.skipIf(not embedding_utils.USE_CORRELATION_COST, - 'depends on correlation_cost') - def test_local_pairwise_distances_two_dimensional(self): - a = np.array([[[1.0, -5.0], [7.0, 2.0]], [[1.0, 3.0], [3.0, 4.0]]]) - b = np.array([[[2.0, 1.0], [0.0, -9.0]], [[4.0, 3.0], [3.0, 1.0]]]) - g = tf.Graph() - with g.as_default(): - with self.test_session(graph=g) as sess: - a_tf = tf.constant(a, dtype=tf.float32) - b_tf = tf.constant(b, dtype=tf.float32) - d = embedding_utils.local_pairwise_distances(a_tf, b_tf, - max_distance=1) - d_val = sess.run(d) - for y in range(2): - for x in range(2): - for dy in range(-1, 2): - for dx in range(-1, 2): - a_slice = a[y, x, :] - if y + dy < 0 or y + dy > 1 or x + dx < 0 or x + dx > 1: - expected = np.float('inf') - else: - b_slice = b[y + dy, x + dx, :] - expected = ((a_slice - b_slice) ** 2).sum() - dy0 = dy + 1 - dx0 = dx + 1 - self.assertAlmostEqual(d_val[y, x, 3 * dy0 + dx0], expected) - - @unittest.skipIf(not embedding_utils.USE_CORRELATION_COST, - 'depends on correlation_cost') - def test_local_previous_frame_nearest_neighbor_features_per_object(self): - prev_frame_embedding = np.array([[[1.0, -5.0], [7.0, 2.0]], - [[1.0, 3.0], [3.0, 4.0]]]) / 10 - query_embedding = np.array([[[2.0, 1.0], [0.0, -9.0]], - [[4.0, 3.0], [3.0, 1.0]]]) / 10 - prev_frame_labels = np.array([[[0], [1]], [[1], [0]]]) - gt_ids = np.array([0, 1]) - g = tf.Graph() - with g.as_default(): - with self.test_session(graph=g) as sess: - prev_frame_embedding_tf = tf.constant(prev_frame_embedding, - dtype=tf.float32) - query_embedding_tf = tf.constant(query_embedding, dtype=tf.float32) - embu = embedding_utils - dists = ( - embu.local_previous_frame_nearest_neighbor_features_per_object( - prev_frame_embedding_tf, query_embedding_tf, - prev_frame_labels, gt_ids, max_distance=1)) - dists = tf.squeeze(dists, axis=4) - dists = tf.squeeze(dists, axis=0) - dists_val = sess.run(dists) - for obj_id in gt_ids: - for y in range(2): - for x in range(2): - curr_min = 1.0 - for dy in range(-1, 2): - for dx in range(-1, 2): - # Attention: here we shift the prev frame embedding, - # not the query. - if y + dy < 0 or y + dy > 1 or x + dx < 0 or x + dx > 1: - continue - if prev_frame_labels[y + dy, x + dx, 0] != obj_id: - continue - prev_frame_slice = prev_frame_embedding[y + dy, x + dx, :] - query_frame_slice = query_embedding[y, x, :] - v_unnorm = ((prev_frame_slice - query_frame_slice) ** 2).sum() - v = ((1.0 / (1.0 + np.exp(-v_unnorm))) - 0.5) * 2 - curr_min = min(curr_min, v) - expected = curr_min - self.assertAlmostEqual(dists_val[y, x, obj_id], expected, - places=5) - - -if __name__ == '__main__': - tf.test.main() diff --git a/research/feelvos/utils/eval_utils.py b/research/feelvos/utils/eval_utils.py deleted file mode 100644 index 517ec0d78..000000000 --- a/research/feelvos/utils/eval_utils.py +++ /dev/null @@ -1,153 +0,0 @@ -# Copyright 2018 The TensorFlow Authors All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -"""Utility functions for evaluations.""" - -import numpy as np -import PIL -import tensorflow as tf - -pascal_colormap = [ - 0, 0, 0, - 0.5020, 0, 0, - 0, 0.5020, 0, - 0.5020, 0.5020, 0, - 0, 0, 0.5020, - 0.5020, 0, 0.5020, - 0, 0.5020, 0.5020, - 0.5020, 0.5020, 0.5020, - 0.2510, 0, 0, - 0.7529, 0, 0, - 0.2510, 0.5020, 0, - 0.7529, 0.5020, 0, - 0.2510, 0, 0.5020, - 0.7529, 0, 0.5020, - 0.2510, 0.5020, 0.5020, - 0.7529, 0.5020, 0.5020, - 0, 0.2510, 0, - 0.5020, 0.2510, 0, - 0, 0.7529, 0, - 0.5020, 0.7529, 0, - 0, 0.2510, 0.5020, - 0.5020, 0.2510, 0.5020, - 0, 0.7529, 0.5020, - 0.5020, 0.7529, 0.5020, - 0.2510, 0.2510, 0] - - -def save_segmentation_with_colormap(filename, img): - """Saves a segmentation with the pascal colormap as expected for DAVIS eval. - - Args: - filename: Where to store the segmentation. - img: A numpy array of the segmentation to be saved. - """ - if img.shape[-1] == 1: - img = img[..., 0] - - # Save with colormap. - colormap = (np.array(pascal_colormap) * 255).round().astype('uint8') - colormap_image = PIL.Image.new('P', (16, 16)) - colormap_image.putpalette(colormap) - pil_image = PIL.Image.fromarray(img.astype('uint8')) - pil_image_with_colormap = pil_image.quantize(palette=colormap_image) - with tf.gfile.GFile(filename, 'w') as f: - pil_image_with_colormap.save(f) - - -def save_embeddings(filename, embeddings): - with tf.gfile.GFile(filename, 'w') as f: - np.save(f, embeddings) - - -def calculate_iou(pred_labels, ref_labels): - """Calculates the intersection over union for binary segmentation. - - Args: - pred_labels: predicted segmentation labels. - ref_labels: reference segmentation labels. - - Returns: - The IoU between pred_labels and ref_labels - """ - if ref_labels.any(): - i = np.logical_and(pred_labels, ref_labels).sum() - u = np.logical_or(pred_labels, ref_labels).sum() - return i.astype('float') / u - else: - if pred_labels.any(): - return 0.0 - else: - return 1.0 - - -def calculate_multi_object_miou_tf(pred_labels, ref_labels): - """Calculates the mIoU for a batch of predicted and reference labels. - - Args: - pred_labels: Int32 tensor of shape [batch, height, width, 1]. - ref_labels: Int32 tensor of shape [batch, height, width, 1]. - - Returns: - The mIoU between pred_labels and ref_labels as float32 scalar tensor. - """ - - def calculate_multi_object_miou(pred_labels_, ref_labels_): - """Calculates the mIoU for predicted and reference labels in numpy. - - Args: - pred_labels_: int32 np.array of shape [batch, height, width, 1]. - ref_labels_: int32 np.array of shape [batch, height, width, 1]. - - Returns: - The mIoU between pred_labels_ and ref_labels_. - """ - assert len(pred_labels_.shape) == 4 - assert pred_labels_.shape[3] == 1 - assert pred_labels_.shape == ref_labels_.shape - ious = [] - for pred_label, ref_label in zip(pred_labels_, ref_labels_): - ids = np.setdiff1d(np.unique(ref_label), [0]) - if ids.size == 0: - continue - for id_ in ids: - iou = calculate_iou(pred_label == id_, ref_label == id_) - ious.append(iou) - if ious: - return np.cast['float32'](np.mean(ious)) - else: - return np.cast['float32'](1.0) - - miou = tf.py_func(calculate_multi_object_miou, [pred_labels, ref_labels], - tf.float32, name='calculate_multi_object_miou') - miou.set_shape(()) - return miou - - -def calculate_multi_object_ious(pred_labels, ref_labels, label_set): - """Calculates the intersection over union for binary segmentation. - - Args: - pred_labels: predicted segmentation labels. - ref_labels: reference segmentation labels. - label_set: int np.array of object ids. - - Returns: - float np.array of IoUs between pred_labels and ref_labels - for each object in label_set. - """ - # Background should not be included as object label. - return np.array([calculate_iou(pred_labels == label, ref_labels == label) - for label in label_set if label != 0]) diff --git a/research/feelvos/utils/mask_damaging.py b/research/feelvos/utils/mask_damaging.py deleted file mode 100644 index 74f3cdab5..000000000 --- a/research/feelvos/utils/mask_damaging.py +++ /dev/null @@ -1,176 +0,0 @@ -# Copyright 2018 The TensorFlow Authors All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -"""Utilities for artificially damaging segmentation masks.""" - -import numpy as np -from scipy.ndimage import interpolation -from skimage import morphology -from skimage import transform -import tensorflow as tf - - -def damage_masks(labels, shift=True, scale=True, rotate=True, dilate=True): - """Damages segmentation masks by random transformations. - - Args: - labels: Int32 labels tensor of shape (height, width, 1). - shift: Boolean, whether to damage the masks by shifting. - scale: Boolean, whether to damage the masks by scaling. - rotate: Boolean, whether to damage the masks by rotation. - dilate: Boolean, whether to damage the masks by dilation. - - Returns: - The damaged version of labels. - """ - def _damage_masks_np(labels_): - return damage_masks_np(labels_, shift, scale, rotate, dilate) - damaged_masks = tf.py_func(_damage_masks_np, [labels], tf.int32, - name='damage_masks') - damaged_masks.set_shape(labels.get_shape()) - return damaged_masks - - -def damage_masks_np(labels, shift=True, scale=True, rotate=True, dilate=True): - """Performs the actual mask damaging in numpy. - - Args: - labels: Int32 numpy array of shape (height, width, 1). - shift: Boolean, whether to damage the masks by shifting. - scale: Boolean, whether to damage the masks by scaling. - rotate: Boolean, whether to damage the masks by rotation. - dilate: Boolean, whether to damage the masks by dilation. - - Returns: - The damaged version of labels. - """ - unique_labels = np.unique(labels) - unique_labels = np.setdiff1d(unique_labels, [0]) - # Shuffle to get random depth ordering when combining together. - np.random.shuffle(unique_labels) - damaged_labels = np.zeros_like(labels) - for l in unique_labels: - obj_mask = (labels == l) - damaged_obj_mask = _damage_single_object_mask(obj_mask, shift, scale, - rotate, dilate) - damaged_labels[damaged_obj_mask] = l - return damaged_labels - - -def _damage_single_object_mask(mask, shift, scale, rotate, dilate): - """Performs mask damaging in numpy for a single object. - - Args: - mask: Boolean numpy array of shape(height, width, 1). - shift: Boolean, whether to damage the masks by shifting. - scale: Boolean, whether to damage the masks by scaling. - rotate: Boolean, whether to damage the masks by rotation. - dilate: Boolean, whether to damage the masks by dilation. - - Returns: - The damaged version of mask. - """ - # For now we just do shifting and scaling. Better would be Affine or thin - # spline plate transformations. - if shift: - mask = _shift_mask(mask) - if scale: - mask = _scale_mask(mask) - if rotate: - mask = _rotate_mask(mask) - if dilate: - mask = _dilate_mask(mask) - return mask - - -def _shift_mask(mask, max_shift_factor=0.05): - """Damages a mask for a single object by randomly shifting it in numpy. - - Args: - mask: Boolean numpy array of shape(height, width, 1). - max_shift_factor: Float scalar, the maximum factor for random shifting. - - Returns: - The shifted version of mask. - """ - nzy, nzx, _ = mask.nonzero() - h = nzy.max() - nzy.min() - w = nzx.max() - nzx.min() - size = np.sqrt(h * w) - offset = np.random.uniform(-size * max_shift_factor, size * max_shift_factor, - 2) - shifted_mask = interpolation.shift(np.squeeze(mask, axis=2), - offset, order=0).astype('bool')[..., - np.newaxis] - return shifted_mask - - -def _scale_mask(mask, scale_amount=0.025): - """Damages a mask for a single object by randomly scaling it in numpy. - - Args: - mask: Boolean numpy array of shape(height, width, 1). - scale_amount: Float scalar, the maximum factor for random scaling. - - Returns: - The scaled version of mask. - """ - nzy, nzx, _ = mask.nonzero() - cy = 0.5 * (nzy.max() - nzy.min()) - cx = 0.5 * (nzx.max() - nzx.min()) - scale_factor = np.random.uniform(1.0 - scale_amount, 1.0 + scale_amount) - shift = transform.SimilarityTransform(translation=[-cx, -cy]) - inv_shift = transform.SimilarityTransform(translation=[cx, cy]) - s = transform.SimilarityTransform(scale=[scale_factor, scale_factor]) - m = (shift + (s + inv_shift)).inverse - scaled_mask = transform.warp(mask, m) > 0.5 - return scaled_mask - - -def _rotate_mask(mask, max_rot_degrees=3.0): - """Damages a mask for a single object by randomly rotating it in numpy. - - Args: - mask: Boolean numpy array of shape(height, width, 1). - max_rot_degrees: Float scalar, the maximum number of degrees to rotate. - - Returns: - The scaled version of mask. - """ - cy = 0.5 * mask.shape[0] - cx = 0.5 * mask.shape[1] - rot_degrees = np.random.uniform(-max_rot_degrees, max_rot_degrees) - shift = transform.SimilarityTransform(translation=[-cx, -cy]) - inv_shift = transform.SimilarityTransform(translation=[cx, cy]) - r = transform.SimilarityTransform(rotation=np.deg2rad(rot_degrees)) - m = (shift + (r + inv_shift)).inverse - scaled_mask = transform.warp(mask, m) > 0.5 - return scaled_mask - - -def _dilate_mask(mask, dilation_radius=5): - """Damages a mask for a single object by dilating it in numpy. - - Args: - mask: Boolean numpy array of shape(height, width, 1). - dilation_radius: Integer, the radius of the used disk structure element. - - Returns: - The dilated version of mask. - """ - disk = morphology.disk(dilation_radius, dtype=np.bool) - dilated_mask = morphology.binary_dilation( - np.squeeze(mask, axis=2), selem=disk)[..., np.newaxis] - return dilated_mask diff --git a/research/feelvos/utils/train_utils.py b/research/feelvos/utils/train_utils.py deleted file mode 100644 index 02a04cd33..000000000 --- a/research/feelvos/utils/train_utils.py +++ /dev/null @@ -1,269 +0,0 @@ -# Copyright 2018 The TensorFlow Authors All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -"""Utility functions for training.""" -import collections -import six -import tensorflow as tf - -from deeplab.core import preprocess_utils -from deeplab.utils import train_utils -from feelvos.utils import embedding_utils -from feelvos.utils import eval_utils - -slim = tf.contrib.slim -add_softmax_cross_entropy_loss_for_each_scale = ( - train_utils.add_softmax_cross_entropy_loss_for_each_scale) -get_model_gradient_multipliers = train_utils.get_model_gradient_multipliers -get_model_learning_rate = train_utils.get_model_learning_rate -resolve_shape = preprocess_utils.resolve_shape - - -def add_triplet_loss_for_each_scale(batch_size, num_frames_per_video, - embedding_dim, scales_to_embeddings, - labels, scope): - """Adds triplet loss for logits of each scale. - - Args: - batch_size: Int, the number of video chunks sampled per batch - num_frames_per_video: Int, the number of frames per video. - embedding_dim: Int, the dimension of the learned embedding - scales_to_embeddings: A map from embedding names for different scales to - embeddings. The embeddings have shape [batch, embeddings_height, - embeddings_width, embedding_dim]. - labels: Groundtruth labels with shape [batch, image_height, image_width, 1]. - scope: String, the scope for the loss. - - Raises: - ValueError: labels is None. - """ - if labels is None: - raise ValueError('No label for triplet loss.') - for scale, embeddings in scales_to_embeddings.iteritems(): - loss_scope = None - if scope: - loss_scope = '%s_%s' % (scope, scale) - # Label is downsampled to the same size as logits. - scaled_labels = tf.image.resize_nearest_neighbor( - labels, - resolve_shape(embeddings, 4)[1:3], - align_corners=True) - # Reshape from [batch * num_frames, ...] to [batch, num_frames, ...]. - h = tf.shape(embeddings)[1] - w = tf.shape(embeddings)[2] - new_labels_shape = tf.stack([batch_size, num_frames_per_video, h, w, 1]) - reshaped_labels = tf.reshape(scaled_labels, new_labels_shape) - new_embeddings_shape = tf.stack([batch_size, num_frames_per_video, h, w, - -1]) - reshaped_embeddings = tf.reshape(embeddings, new_embeddings_shape) - - with tf.name_scope(loss_scope): - total_loss = tf.constant(0, dtype=tf.float32) - for n in range(batch_size): - embedding = reshaped_embeddings[n] - label = reshaped_labels[n] - n_pixels = h * w - n_anchors_used = 256 - sampled_anchor_indices = tf.random_shuffle(tf.range(n_pixels))[ - :n_anchors_used] - anchors_pool = tf.reshape(embedding[0], [-1, embedding_dim]) - anchors_pool_classes = tf.reshape(label[0], [-1]) - anchors = tf.gather(anchors_pool, sampled_anchor_indices) - anchor_classes = tf.gather(anchors_pool_classes, sampled_anchor_indices) - - pos_neg_pool = tf.reshape(embedding[1:], [-1, embedding_dim]) - pos_neg_pool_classes = tf.reshape(label[1:], [-1]) - dists = embedding_utils.pairwise_distances(anchors, pos_neg_pool) - pos_mask = tf.equal(anchor_classes[:, tf.newaxis], - pos_neg_pool_classes[tf.newaxis, :]) - neg_mask = tf.logical_not(pos_mask) - pos_mask_f = tf.cast(pos_mask, tf.float32) - neg_mask_f = tf.cast(neg_mask, tf.float32) - pos_dists = pos_mask_f * dists + 1e20 * neg_mask_f - neg_dists = neg_mask_f * dists + 1e20 * pos_mask_f - pos_dists_min = tf.reduce_min(pos_dists, axis=1) - neg_dists_min = tf.reduce_min(neg_dists, axis=1) - margin = 1.0 - loss = tf.nn.relu(pos_dists_min - neg_dists_min + margin) - # Handle case that no positive is present (per anchor). - any_pos = tf.reduce_any(pos_mask, axis=1) - loss *= tf.cast(any_pos, tf.float32) - # Average over anchors - loss = tf.reduce_mean(loss, axis=0) - total_loss += loss - total_loss /= batch_size - # Scale the loss up a bit. - total_loss *= 3.0 - tf.add_to_collection(tf.GraphKeys.LOSSES, total_loss) - - -def add_dynamic_softmax_cross_entropy_loss_for_each_scale( - scales_to_logits, labels, ignore_label, loss_weight=1.0, - upsample_logits=True, scope=None, top_k_percent_pixels=1.0, - hard_example_mining_step=100000): - """Adds softmax cross entropy loss per scale for logits with varying classes. - - Also adds summaries for mIoU. - - Args: - scales_to_logits: A map from logits names for different scales to logits. - The logits are a list of length batch_size of tensors of shape - [time, logits_height, logits_width, num_classes]. - labels: Groundtruth labels with shape [batch_size * time, image_height, - image_width, 1]. - ignore_label: Integer, label to ignore. - loss_weight: Float, loss weight. - upsample_logits: Boolean, upsample logits or not. - scope: String, the scope for the loss. - top_k_percent_pixels: A float, the value lies in [0.0, 1.0]. When its - value < 1.0, only compute the loss for the top k percent pixels (e.g., - the top 20% pixels). This is useful for hard pixel mining. - hard_example_mining_step: An integer, the training step in which the - hard exampling mining kicks off. Note that we gradually reduce the - mining percent to the top_k_percent_pixels. For example, if - hard_example_mining_step=100K and top_k_percent_pixels=0.25, then - mining percent will gradually reduce from 100% to 25% until 100K steps - after which we only mine top 25% pixels. - - Raises: - ValueError: Label or logits is None. - """ - if labels is None: - raise ValueError('No label for softmax cross entropy loss.') - - if top_k_percent_pixels < 0 or top_k_percent_pixels > 1: - raise ValueError('Unexpected value of top_k_percent_pixels.') - - for scale, logits in six.iteritems(scales_to_logits): - loss_scope = None - if scope: - loss_scope = '%s_%s' % (scope, scale) - - if upsample_logits: - # Label is not downsampled, and instead we upsample logits. - assert isinstance(logits, collections.Sequence) - logits = [tf.image.resize_bilinear( - x, - preprocess_utils.resolve_shape(labels, 4)[1:3], - align_corners=True) for x in logits] - scaled_labels = labels - else: - # Label is downsampled to the same size as logits. - assert isinstance(logits, collections.Sequence) - scaled_labels = tf.image.resize_nearest_neighbor( - labels, - preprocess_utils.resolve_shape(logits[0], 4)[1:3], - align_corners=True) - - batch_size = len(logits) - num_time = preprocess_utils.resolve_shape(logits[0])[0] - reshaped_labels = tf.reshape( - scaled_labels, ([batch_size, num_time] + - preprocess_utils.resolve_shape(scaled_labels)[1:])) - for n, logits_n in enumerate(logits): - labels_n = reshaped_labels[n] - labels_n = tf.reshape(labels_n, shape=[-1]) - not_ignore_mask = tf.to_float(tf.not_equal(labels_n, - ignore_label)) * loss_weight - num_classes_n = tf.shape(logits_n)[-1] - one_hot_labels = slim.one_hot_encoding( - labels_n, num_classes_n, on_value=1.0, off_value=0.0) - logits_n_flat = tf.reshape(logits_n, shape=[-1, num_classes_n]) - if top_k_percent_pixels == 1.0: - tf.losses.softmax_cross_entropy( - one_hot_labels, - logits_n_flat, - weights=not_ignore_mask, - scope=loss_scope) - else: - # Only compute the loss for top k percent pixels. - # First, compute the loss for all pixels. Note we do not put the loss - # to loss_collection and set reduction = None to keep the shape. - num_pixels = tf.to_float(tf.shape(logits_n_flat)[0]) - pixel_losses = tf.losses.softmax_cross_entropy( - one_hot_labels, - logits_n_flat, - weights=not_ignore_mask, - scope='pixel_losses', - loss_collection=None, - reduction=tf.losses.Reduction.NONE) - # Compute the top_k_percent pixels based on current training step. - if hard_example_mining_step == 0: - # Directly focus on the top_k pixels. - top_k_pixels = tf.to_int32(top_k_percent_pixels * num_pixels) - else: - # Gradually reduce the mining percent to top_k_percent_pixels. - global_step = tf.to_float(tf.train.get_or_create_global_step()) - ratio = tf.minimum(1.0, global_step / hard_example_mining_step) - top_k_pixels = tf.to_int32( - (ratio * top_k_percent_pixels + (1.0 - ratio)) * num_pixels) - _, top_k_indices = tf.nn.top_k(pixel_losses, - k=top_k_pixels, - sorted=True, - name='top_k_percent_pixels') - # Compute the loss for the top k percent pixels. - tf.losses.softmax_cross_entropy( - tf.gather(one_hot_labels, top_k_indices), - tf.gather(logits_n_flat, top_k_indices), - weights=tf.gather(not_ignore_mask, top_k_indices), - scope=loss_scope) - - pred_n = tf.argmax(logits_n, axis=-1, output_type=tf.int32)[ - ..., tf.newaxis] - labels_n = labels[n * num_time: (n + 1) * num_time] - miou = eval_utils.calculate_multi_object_miou_tf(pred_n, labels_n) - tf.summary.scalar('miou', miou) - - -def get_model_init_fn(train_logdir, - tf_initial_checkpoint, - initialize_last_layer, - last_layers, - ignore_missing_vars=False): - """Gets the function initializing model variables from a checkpoint. - - Args: - train_logdir: Log directory for training. - tf_initial_checkpoint: TensorFlow checkpoint for initialization. - initialize_last_layer: Initialize last layer or not. - last_layers: Last layers of the model. - ignore_missing_vars: Ignore missing variables in the checkpoint. - - Returns: - Initialization function. - """ - if tf_initial_checkpoint is None: - tf.logging.info('Not initializing the model from a checkpoint.') - return None - - if tf.train.latest_checkpoint(train_logdir): - tf.logging.info('Ignoring initialization; other checkpoint exists') - return None - - tf.logging.info('Initializing model from path: %s', tf_initial_checkpoint) - - # Variables that will not be restored. - exclude_list = ['global_step'] - if not initialize_last_layer: - exclude_list.extend(last_layers) - - variables_to_restore = slim.get_variables_to_restore(exclude=exclude_list) - - if variables_to_restore: - return slim.assign_from_checkpoint_fn( - tf_initial_checkpoint, - variables_to_restore, - ignore_missing_vars=ignore_missing_vars) - return None diff --git a/research/feelvos/utils/video_input_generator.py b/research/feelvos/utils/video_input_generator.py deleted file mode 100644 index c0135e501..000000000 --- a/research/feelvos/utils/video_input_generator.py +++ /dev/null @@ -1,558 +0,0 @@ -# Copyright 2018 The TensorFlow Authors All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -"""Wrapper for providing semantic segmentation video data.""" - -import tensorflow as tf -from feelvos import input_preprocess -from feelvos import model -from feelvos.utils import mask_damaging -from feelvos.utils import train_utils - -slim = tf.contrib.slim -dataset_data_provider = slim.dataset_data_provider - - -MIN_LABEL_COUNT = 10 - - -def decode_image_sequence(tensor, image_format='jpeg', shape=None, - channels=3, raw_dtype=tf.uint8): - """Decodes a sequence of images. - - Args: - tensor: the tensor of strings to decode, shape: [num_images] - image_format: a string (possibly tensor) with the format of the image. - Options include 'jpeg', 'png', and 'raw'. - shape: a list or tensor of the decoded image shape for a single image. - channels: if 'shape' is None, the third dimension of the image is set to - this value. - raw_dtype: if the image is encoded as raw bytes, this is the method of - decoding the bytes into values. - Returns: - The decoded images with shape [time, height, width, channels]. - """ - handler = slim.tfexample_decoder.Image( - shape=shape, channels=channels, dtype=raw_dtype, repeated=True) - return handler.tensors_to_item({'image/encoded': tensor, - 'image/format': image_format}) - - -def _get_data(data_provider, dataset_split, video_frames_are_decoded): - """Gets data from data provider. - - Args: - data_provider: An object of slim.data_provider. - dataset_split: Dataset split. - video_frames_are_decoded: Boolean, whether the video frames are already - decoded - - Returns: - image: Image Tensor. - label: Label Tensor storing segmentation annotations. - object_label: An integer refers to object_label according to labelmap. If - the example has more than one object_label, take the first one. - image_name: Image name. - height: Image height. - width: Image width. - video_id: String tensor representing the name of the video. - - Raises: - ValueError: Failed to find label. - """ - - if video_frames_are_decoded: - image, = data_provider.get(['image']) - else: - image, = data_provider.get(['image/encoded']) - - # Some datasets do not contain image_name. - if 'image_name' in data_provider.list_items(): - image_name, = data_provider.get(['image_name']) - else: - image_name = tf.constant('') - - height, width = data_provider.get(['height', 'width']) - - label = None - if dataset_split != 'test': - if video_frames_are_decoded: - if 'labels_class' not in data_provider.list_items(): - raise ValueError('Failed to find labels.') - label, = data_provider.get(['labels_class']) - else: - key = 'segmentation/object/encoded' - if key not in data_provider.list_items(): - raise ValueError('Failed to find labels.') - label, = data_provider.get([key]) - - object_label = None - video_id, = data_provider.get(['video_id']) - - return image, label, object_label, image_name, height, width, video_id - - -def _has_foreground_and_background_in_first_frame(label, subsampling_factor): - """Checks if the labels have foreground and background in the first frame. - - Args: - label: Label tensor of shape [num_frames, height, width, 1]. - subsampling_factor: Integer, the subsampling factor. - - Returns: - Boolean, whether the labels have foreground and background in the first - frame. - """ - h, w = train_utils.resolve_shape(label)[1:3] - label_downscaled = tf.squeeze( - tf.image.resize_nearest_neighbor(label[0, tf.newaxis], - [h // subsampling_factor, - w // subsampling_factor], - align_corners=True), - axis=0) - is_bg = tf.equal(label_downscaled, 0) - is_fg = tf.logical_not(is_bg) - # Just using reduce_any was not robust enough, so lets make sure the count - # is above MIN_LABEL_COUNT. - fg_count = tf.reduce_sum(tf.cast(is_fg, tf.int32)) - bg_count = tf.reduce_sum(tf.cast(is_bg, tf.int32)) - has_bg = tf.greater_equal(fg_count, MIN_LABEL_COUNT) - has_fg = tf.greater_equal(bg_count, MIN_LABEL_COUNT) - return tf.logical_and(has_bg, has_fg) - - -def _has_foreground_and_background_in_first_frame_2(label, - decoder_output_stride): - """Checks if the labels have foreground and background in the first frame. - - Second attempt, this time we use the actual output dimension for resizing. - - Args: - label: Label tensor of shape [num_frames, height, width, 1]. - decoder_output_stride: Integer, the stride of the decoder output. - - Returns: - Boolean, whether the labels have foreground and background in the first - frame. - """ - h, w = train_utils.resolve_shape(label)[1:3] - h_sub = model.scale_dimension(h, 1.0 / decoder_output_stride) - w_sub = model.scale_dimension(w, 1.0 / decoder_output_stride) - label_downscaled = tf.squeeze( - tf.image.resize_nearest_neighbor(label[0, tf.newaxis], [h_sub, w_sub], - align_corners=True), axis=0) - is_bg = tf.equal(label_downscaled, 0) - is_fg = tf.logical_not(is_bg) - # Just using reduce_any was not robust enough, so lets make sure the count - # is above MIN_LABEL_COUNT. - fg_count = tf.reduce_sum(tf.cast(is_fg, tf.int32)) - bg_count = tf.reduce_sum(tf.cast(is_bg, tf.int32)) - has_bg = tf.greater_equal(fg_count, MIN_LABEL_COUNT) - has_fg = tf.greater_equal(bg_count, MIN_LABEL_COUNT) - return tf.logical_and(has_bg, has_fg) - - -def _has_enough_pixels_of_each_object_in_first_frame( - label, decoder_output_stride): - """Checks if for each object (incl. background) enough pixels are visible. - - During test time, we will usually not see a reference frame in which only - very few pixels of one object are visible. These cases can be problematic - during training, especially if more than the 1-nearest neighbor is used. - That's why this function can be used to detect and filter these cases. - - Args: - label: Label tensor of shape [num_frames, height, width, 1]. - decoder_output_stride: Integer, the stride of the decoder output. - - Returns: - Boolean, whether the labels have enough pixels of each object in the first - frame. - """ - h, w = train_utils.resolve_shape(label)[1:3] - h_sub = model.scale_dimension(h, 1.0 / decoder_output_stride) - w_sub = model.scale_dimension(w, 1.0 / decoder_output_stride) - label_downscaled = tf.squeeze( - tf.image.resize_nearest_neighbor(label[0, tf.newaxis], [h_sub, w_sub], - align_corners=True), axis=0) - _, _, counts = tf.unique_with_counts( - tf.reshape(label_downscaled, [-1])) - has_enough_pixels_per_object = tf.reduce_all( - tf.greater_equal(counts, MIN_LABEL_COUNT)) - return has_enough_pixels_per_object - - -def get(dataset, - num_frames_per_video, - crop_size, - batch_size, - min_resize_value=None, - max_resize_value=None, - resize_factor=None, - min_scale_factor=1., - max_scale_factor=1., - scale_factor_step_size=0, - preprocess_image_and_label=True, - num_readers=1, - num_threads=1, - dataset_split=None, - is_training=True, - model_variant=None, - batch_capacity_factor=32, - video_frames_are_decoded=False, - decoder_output_stride=None, - first_frame_finetuning=False, - sample_only_first_frame_for_finetuning=False, - sample_adjacent_and_consistent_query_frames=False, - remap_labels_to_reference_frame=True, - generate_prev_frame_mask_by_mask_damaging=False, - three_frame_dataset=False, - add_prev_frame_label=True): - """Gets the dataset split for semantic segmentation. - - This functions gets the dataset split for semantic segmentation. In - particular, it is a wrapper of (1) dataset_data_provider which returns the raw - dataset split, (2) input_preprcess which preprocess the raw data, and (3) the - Tensorflow operation of batching the preprocessed data. Then, the output could - be directly used by training, evaluation or visualization. - - Args: - dataset: An instance of slim Dataset. - num_frames_per_video: The number of frames used per video - crop_size: Image crop size [height, width]. - batch_size: Batch size. - min_resize_value: Desired size of the smaller image side. - max_resize_value: Maximum allowed size of the larger image side. - resize_factor: Resized dimensions are multiple of factor plus one. - min_scale_factor: Minimum scale factor value. - max_scale_factor: Maximum scale factor value. - scale_factor_step_size: The step size from min scale factor to max scale - factor. The input is randomly scaled based on the value of - (min_scale_factor, max_scale_factor, scale_factor_step_size). - preprocess_image_and_label: Boolean variable specifies if preprocessing of - image and label will be performed or not. - num_readers: Number of readers for data provider. - num_threads: Number of threads for batching data. - dataset_split: Dataset split. - is_training: Is training or not. - model_variant: Model variant (string) for choosing how to mean-subtract the - images. See feature_extractor.network_map for supported model variants. - batch_capacity_factor: Batch capacity factor affecting the training queue - batch capacity. - video_frames_are_decoded: Boolean, whether the video frames are already - decoded - decoder_output_stride: Integer, the stride of the decoder output. - first_frame_finetuning: Boolean, whether to only sample the first frame - for fine-tuning. - sample_only_first_frame_for_finetuning: Boolean, whether to only sample the - first frame during fine-tuning. This should be False when using lucid or - wonderland data, but true when fine-tuning on the first frame only. - Only has an effect if first_frame_finetuning is True. - sample_adjacent_and_consistent_query_frames: Boolean, if true, the query - frames (all but the first frame which is the reference frame) will be - sampled such that they are adjacent video frames and have the same - crop coordinates and flip augmentation. - remap_labels_to_reference_frame: Boolean, whether to remap the labels of - the query frames to match the labels of the (downscaled) reference frame. - If a query frame contains a label which is not present in the reference, - it will be mapped to background. - generate_prev_frame_mask_by_mask_damaging: Boolean, whether to generate - the masks used as guidance from the previous frame by damaging the - ground truth mask. - three_frame_dataset: Boolean, whether the dataset has exactly three frames - per video of which the first is to be used as reference and the two - others are consecutive frames to be used as query frames. - add_prev_frame_label: Boolean, whether to sample one more frame before the - first query frame to obtain a previous frame label. Only has an effect, - if sample_adjacent_and_consistent_query_frames is True and - generate_prev_frame_mask_by_mask_damaging is False. - - Returns: - A dictionary of batched Tensors for semantic segmentation. - - Raises: - ValueError: dataset_split is None, or Failed to find labels. - """ - if dataset_split is None: - raise ValueError('Unknown dataset split.') - if model_variant is None: - tf.logging.warning('Please specify a model_variant. See ' - 'feature_extractor.network_map for supported model ' - 'variants.') - - data_provider = dataset_data_provider.DatasetDataProvider( - dataset, - num_readers=num_readers, - num_epochs=None if is_training else 1, - shuffle=is_training) - image, label, object_label, image_name, height, width, video_id = _get_data( - data_provider, dataset_split, video_frames_are_decoded) - - sampling_is_valid = tf.constant(True) - if num_frames_per_video is not None: - total_num_frames = tf.shape(image)[0] - if first_frame_finetuning or three_frame_dataset: - if sample_only_first_frame_for_finetuning: - assert not sample_adjacent_and_consistent_query_frames, ( - 'this option does not make sense for sampling only first frame.') - # Sample the first frame num_frames_per_video times. - sel_indices = tf.tile(tf.constant(0, dtype=tf.int32)[tf.newaxis], - multiples=[num_frames_per_video]) - else: - if sample_adjacent_and_consistent_query_frames: - if add_prev_frame_label: - num_frames_per_video += 1 - # Since this is first frame fine-tuning, we'll for now assume that - # each sequence has exactly 3 images: the ref frame and 2 adjacent - # query frames. - assert num_frames_per_video == 3 - with tf.control_dependencies([tf.assert_equal(total_num_frames, 3)]): - sel_indices = tf.constant([1, 2], dtype=tf.int32) - else: - # Sample num_frames_per_video - 1 query frames which are not the - # first frame. - sel_indices = tf.random_shuffle( - tf.range(1, total_num_frames))[:(num_frames_per_video - 1)] - # Concat first frame as reference frame to the front. - sel_indices = tf.concat([tf.constant(0, dtype=tf.int32)[tf.newaxis], - sel_indices], axis=0) - else: - if sample_adjacent_and_consistent_query_frames: - if add_prev_frame_label: - # Sample one more frame which we can use to provide initial softmax - # feedback. - num_frames_per_video += 1 - ref_idx = tf.random_shuffle(tf.range(total_num_frames))[0] - sampling_is_valid = tf.greater_equal(total_num_frames, - num_frames_per_video) - def sample_query_start_idx(): - return tf.random_shuffle( - tf.range(total_num_frames - num_frames_per_video + 1))[0] - query_start_idx = tf.cond(sampling_is_valid, sample_query_start_idx, - lambda: tf.constant(0, dtype=tf.int32)) - def sample_sel_indices(): - return tf.concat( - [ref_idx[tf.newaxis], - tf.range( - query_start_idx, - query_start_idx + (num_frames_per_video - 1))], axis=0) - sel_indices = tf.cond( - sampling_is_valid, sample_sel_indices, - lambda: tf.zeros((num_frames_per_video,), dtype=tf.int32)) - else: - # Randomly sample some frames from the video. - sel_indices = tf.random_shuffle( - tf.range(total_num_frames))[:num_frames_per_video] - image = tf.gather(image, sel_indices, axis=0) - if not video_frames_are_decoded: - image = decode_image_sequence(image) - - if label is not None: - if num_frames_per_video is not None: - label = tf.gather(label, sel_indices, axis=0) - if not video_frames_are_decoded: - label = decode_image_sequence(label, image_format='png', channels=1) - - # Sometimes, label is saved as [num_frames_per_video, height, width] or - # [num_frames_per_video, height, width, 1]. We change it to be - # [num_frames_per_video, height, width, 1]. - if label.shape.ndims == 3: - label = tf.expand_dims(label, 3) - elif label.shape.ndims == 4 and label.shape.dims[3] == 1: - pass - else: - raise ValueError('Input label shape must be ' - '[num_frames_per_video, height, width],' - ' or [num_frames, height, width, 1]. ' - 'Got {}'.format(label.shape.ndims)) - label.set_shape([None, None, None, 1]) - - # Add size of first dimension since tf can't figure it out automatically. - image.set_shape((num_frames_per_video, None, None, None)) - if label is not None: - label.set_shape((num_frames_per_video, None, None, None)) - - preceding_frame_label = None - if preprocess_image_and_label: - if num_frames_per_video is None: - raise ValueError('num_frame_per_video must be specified for preproc.') - original_images = [] - images = [] - labels = [] - if sample_adjacent_and_consistent_query_frames: - num_frames_individual_preproc = 1 - else: - num_frames_individual_preproc = num_frames_per_video - for frame_idx in range(num_frames_individual_preproc): - original_image_t, image_t, label_t = ( - input_preprocess.preprocess_image_and_label( - image[frame_idx], - label[frame_idx], - crop_height=crop_size[0] if crop_size is not None else None, - crop_width=crop_size[1] if crop_size is not None else None, - min_resize_value=min_resize_value, - max_resize_value=max_resize_value, - resize_factor=resize_factor, - min_scale_factor=min_scale_factor, - max_scale_factor=max_scale_factor, - scale_factor_step_size=scale_factor_step_size, - ignore_label=dataset.ignore_label, - is_training=is_training, - model_variant=model_variant)) - original_images.append(original_image_t) - images.append(image_t) - labels.append(label_t) - if sample_adjacent_and_consistent_query_frames: - imgs_for_preproc = [image[frame_idx] for frame_idx in - range(1, num_frames_per_video)] - labels_for_preproc = [label[frame_idx] for frame_idx in - range(1, num_frames_per_video)] - original_image_rest, image_rest, label_rest = ( - input_preprocess.preprocess_images_and_labels_consistently( - imgs_for_preproc, - labels_for_preproc, - crop_height=crop_size[0] if crop_size is not None else None, - crop_width=crop_size[1] if crop_size is not None else None, - min_resize_value=min_resize_value, - max_resize_value=max_resize_value, - resize_factor=resize_factor, - min_scale_factor=min_scale_factor, - max_scale_factor=max_scale_factor, - scale_factor_step_size=scale_factor_step_size, - ignore_label=dataset.ignore_label, - is_training=is_training, - model_variant=model_variant)) - original_images.extend(original_image_rest) - images.extend(image_rest) - labels.extend(label_rest) - assert len(original_images) == num_frames_per_video - assert len(images) == num_frames_per_video - assert len(labels) == num_frames_per_video - - if remap_labels_to_reference_frame: - # Remap labels to indices into the labels of the (downscaled) reference - # frame, or 0, i.e. background, for labels which are not present - # in the reference. - reference_labels = labels[0][tf.newaxis] - h, w = train_utils.resolve_shape(reference_labels)[1:3] - embedding_height = model.scale_dimension( - h, 1.0 / decoder_output_stride) - embedding_width = model.scale_dimension( - w, 1.0 / decoder_output_stride) - reference_labels_embedding_size = tf.squeeze( - tf.image.resize_nearest_neighbor( - reference_labels, tf.stack([embedding_height, embedding_width]), - align_corners=True), - axis=0) - # Get sorted unique labels in the reference frame. - labels_in_ref_frame, _ = tf.unique( - tf.reshape(reference_labels_embedding_size, [-1])) - labels_in_ref_frame = tf.contrib.framework.sort(labels_in_ref_frame) - for idx in range(1, len(labels)): - ref_label_mask = tf.equal( - labels[idx], - labels_in_ref_frame[tf.newaxis, tf.newaxis, :]) - remapped = tf.argmax(tf.cast(ref_label_mask, tf.uint8), axis=-1, - output_type=tf.int32) - # Set to 0 if label is not present - is_in_ref = tf.reduce_any(ref_label_mask, axis=-1) - remapped *= tf.cast(is_in_ref, tf.int32) - labels[idx] = remapped[..., tf.newaxis] - - if sample_adjacent_and_consistent_query_frames: - if first_frame_finetuning and generate_prev_frame_mask_by_mask_damaging: - preceding_frame_label = mask_damaging.damage_masks(labels[1]) - elif add_prev_frame_label: - # Discard the image of the additional frame and take the label as - # initialization for softmax feedback. - original_images = [original_images[0]] + original_images[2:] - preceding_frame_label = labels[1] - images = [images[0]] + images[2:] - labels = [labels[0]] + labels[2:] - num_frames_per_video -= 1 - - original_image = tf.stack(original_images, axis=0) - image = tf.stack(images, axis=0) - label = tf.stack(labels, axis=0) - else: - if label is not None: - # Need to set label shape due to batching. - label.set_shape([num_frames_per_video, - None if crop_size is None else crop_size[0], - None if crop_size is None else crop_size[1], - 1]) - original_image = tf.to_float(tf.zeros_like(label)) - if crop_size is None: - height = tf.shape(image)[1] - width = tf.shape(image)[2] - else: - height = crop_size[0] - width = crop_size[1] - - sample = {'image': image, - 'image_name': image_name, - 'height': height, - 'width': width, - 'video_id': video_id} - if label is not None: - sample['label'] = label - - if object_label is not None: - sample['object_label'] = object_label - - if preceding_frame_label is not None: - sample['preceding_frame_label'] = preceding_frame_label - - if not is_training: - # Original image is only used during visualization. - sample['original_image'] = original_image - - if is_training: - if first_frame_finetuning: - keep_input = tf.constant(True) - else: - keep_input = tf.logical_and(sampling_is_valid, tf.logical_and( - _has_enough_pixels_of_each_object_in_first_frame( - label, decoder_output_stride), - _has_foreground_and_background_in_first_frame_2( - label, decoder_output_stride))) - - batched = tf.train.maybe_batch(sample, - keep_input=keep_input, - batch_size=batch_size, - num_threads=num_threads, - capacity=batch_capacity_factor * batch_size, - dynamic_pad=True) - else: - batched = tf.train.batch(sample, - batch_size=batch_size, - num_threads=num_threads, - capacity=batch_capacity_factor * batch_size, - dynamic_pad=True) - - # Flatten from [batch, num_frames_per_video, ...] to - # batch * num_frames_per_video, ...]. - cropped_height = train_utils.resolve_shape(batched['image'])[2] - cropped_width = train_utils.resolve_shape(batched['image'])[3] - if num_frames_per_video is None: - first_dim = -1 - else: - first_dim = batch_size * num_frames_per_video - batched['image'] = tf.reshape(batched['image'], - [first_dim, cropped_height, cropped_width, 3]) - if label is not None: - batched['label'] = tf.reshape(batched['label'], - [first_dim, cropped_height, cropped_width, 1]) - return batched diff --git a/research/feelvos/vis_video.py b/research/feelvos/vis_video.py deleted file mode 100644 index 211bccf52..000000000 --- a/research/feelvos/vis_video.py +++ /dev/null @@ -1,500 +0,0 @@ -# Copyright 2018 The TensorFlow Authors All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -"""Segmentation results evaluation and visualization for videos using attention. -""" - -import math -import os -import time -import numpy as np - -import tensorflow as tf - -from feelvos import common -from feelvos import model -from feelvos.datasets import video_dataset -from feelvos.utils import embedding_utils -from feelvos.utils import eval_utils -from feelvos.utils import video_input_generator - - -slim = tf.contrib.slim -flags = tf.app.flags -FLAGS = flags.FLAGS - -flags.DEFINE_integer('eval_interval_secs', 60 * 5, - 'How often (in seconds) to run evaluation.') - -flags.DEFINE_string('master', '', 'BNS name of the tensorflow server') - -flags.DEFINE_integer('vis_batch_size', 1, - 'The number of images in each batch during evaluation.') - -flags.DEFINE_string('vis_logdir', None, 'Where to write the event logs.') - -flags.DEFINE_string('checkpoint_dir', None, 'Directory of model checkpoints.') - -flags.DEFINE_integer('output_stride', 8, - 'The ratio of input to output spatial resolution.') - -flags.DEFINE_string('dataset', 'davis_2016', - 'Name of the segmentation dataset.') - -flags.DEFINE_string('vis_split', 'val', - 'Which split of the dataset used for visualizing results') - -flags.DEFINE_string( - 'dataset_dir', - '/cns/is-d/home/lcchen/data/pascal_voc_seg/example_sstables', - 'Where the dataset resides.') - -flags.DEFINE_integer('num_vis_examples', -1, - 'Number of examples for visualization. If -1, use all ' - 'samples in the vis data.') - -flags.DEFINE_multi_integer('atrous_rates', None, - 'Atrous rates for atrous spatial pyramid pooling.') - -flags.DEFINE_bool('save_segmentations', False, 'Whether to save the ' - 'segmentation masks as ' - 'png images. Might be slow ' - 'on cns.') - -flags.DEFINE_bool('save_embeddings', False, 'Whether to save the embeddings as' - 'pickle. Might be slow on cns.') - -flags.DEFINE_bool('eval_once_and_quit', False, - 'Whether to just run the eval a single time and quit ' - 'afterwards. Otherwise, the eval is run in a loop with ' - 'new checkpoints.') - -flags.DEFINE_boolean('first_frame_finetuning', False, - 'Whether to only sample the first frame for fine-tuning.') - -# the folder where segmentations are saved. -_SEGMENTATION_SAVE_FOLDER = 'segmentation' -_EMBEDDINGS_SAVE_FOLDER = 'embeddings' - - -def _process_seq_data(segmentation_dir, embeddings_dir, seq_name, - predicted_labels, gt_labels, embeddings): - """Calculates the sequence IoU and optionally save the segmentation masks. - - Args: - segmentation_dir: Directory in which the segmentation results are stored. - embeddings_dir: Directory in which the embeddings are stored. - seq_name: String, the name of the sequence. - predicted_labels: Int64 np.array of shape [n_frames, height, width]. - gt_labels: Ground truth labels, Int64 np.array of shape - [n_frames, height, width]. - embeddings: Float32 np.array of embeddings of shape - [n_frames, decoder_height, decoder_width, embedding_dim], or None. - - Returns: - The IoU for the sequence (float). - """ - sequence_dir = os.path.join(segmentation_dir, seq_name) - tf.gfile.MakeDirs(sequence_dir) - embeddings_seq_dir = os.path.join(embeddings_dir, seq_name) - tf.gfile.MakeDirs(embeddings_seq_dir) - label_set = np.unique(gt_labels[0]) - ious = [] - assert len(predicted_labels) == len(gt_labels) - if embeddings is not None: - assert len(predicted_labels) == len(embeddings) - for t, (predicted_label, gt_label) in enumerate( - zip(predicted_labels, gt_labels)): - if FLAGS.save_segmentations: - seg_filename = os.path.join(segmentation_dir, seq_name, '%05d.png' % t) - eval_utils.save_segmentation_with_colormap(seg_filename, predicted_label) - if FLAGS.save_embeddings: - embedding_filename = os.path.join(embeddings_dir, seq_name, - '%05d.npy' % t) - assert embeddings is not None - eval_utils.save_embeddings(embedding_filename, embeddings[t]) - object_ious_t = eval_utils.calculate_multi_object_ious( - predicted_label, gt_label, label_set) - ious.append(object_ious_t) - # First and last frame are excluded in DAVIS eval. - seq_ious = np.mean(ious[1:-1], axis=0) - tf.logging.info('seq ious: %s %s', seq_name, seq_ious) - return seq_ious - - -def create_predictions(samples, reference_labels, first_frame_img, - model_options): - """Predicts segmentation labels for each frame of the video. - - Slower version than create_predictions_fast, but does support more options. - - Args: - samples: Dictionary of input samples. - reference_labels: Int tensor of shape [1, height, width, 1]. - first_frame_img: Float32 tensor of shape [height, width, 3]. - model_options: An InternalModelOptions instance to configure models. - - Returns: - predicted_labels: Int tensor of shape [time, height, width] of - predicted labels for each frame. - all_embeddings: Float32 tensor of shape - [time, height, width, embedding_dim], or None. - """ - - def predict(args, imgs): - """Predicts segmentation labels and softmax probabilities for each image. - - Args: - args: A tuple of (predictions, softmax_probabilities), where predictions - is an int tensor of shape [1, h, w] and softmax_probabilities is a - float32 tensor of shape [1, h_decoder, w_decoder, n_objects]. - imgs: Either a one-tuple of the image to predict labels for of shape - [h, w, 3], or pair of previous frame and current frame image. - - Returns: - predictions: The predicted labels as int tensor of shape [1, h, w]. - softmax_probabilities: The softmax probabilities of shape - [1, h_decoder, w_decoder, n_objects]. - """ - if FLAGS.save_embeddings: - last_frame_predictions, last_softmax_probabilities, _ = args - else: - last_frame_predictions, last_softmax_probabilities = args - - if FLAGS.also_attend_to_previous_frame or FLAGS.use_softmax_feedback: - ref_labels_to_use = tf.concat( - [reference_labels, last_frame_predictions[..., tf.newaxis]], - axis=0) - else: - ref_labels_to_use = reference_labels - - predictions, softmax_probabilities = model.predict_labels( - tf.stack((first_frame_img,) + imgs), - model_options=model_options, - image_pyramid=FLAGS.image_pyramid, - embedding_dimension=FLAGS.embedding_dimension, - reference_labels=ref_labels_to_use, - k_nearest_neighbors=FLAGS.k_nearest_neighbors, - use_softmax_feedback=FLAGS.use_softmax_feedback, - initial_softmax_feedback=last_softmax_probabilities, - embedding_seg_feature_dimension= - FLAGS.embedding_seg_feature_dimension, - embedding_seg_n_layers=FLAGS.embedding_seg_n_layers, - embedding_seg_kernel_size=FLAGS.embedding_seg_kernel_size, - embedding_seg_atrous_rates=FLAGS.embedding_seg_atrous_rates, - also_return_softmax_probabilities=True, - num_frames_per_video= - (3 if FLAGS.also_attend_to_previous_frame or - FLAGS.use_softmax_feedback else 2), - normalize_nearest_neighbor_distances= - FLAGS.normalize_nearest_neighbor_distances, - also_attend_to_previous_frame=FLAGS.also_attend_to_previous_frame, - use_local_previous_frame_attention= - FLAGS.use_local_previous_frame_attention, - previous_frame_attention_window_size= - FLAGS.previous_frame_attention_window_size, - use_first_frame_matching=FLAGS.use_first_frame_matching - ) - predictions = tf.cast(predictions[common.OUTPUT_TYPE], tf.int32) - - if FLAGS.save_embeddings: - names = [n.name for n in tf.get_default_graph().as_graph_def().node] - embedding_names = [x for x in names if 'embeddings' in x] - # This will crash when multi-scale inference is used. - assert len(embedding_names) == 1, len(embedding_names) - embedding_name = embedding_names[0] + ':0' - embeddings = tf.get_default_graph().get_tensor_by_name(embedding_name) - return predictions, softmax_probabilities, embeddings - else: - return predictions, softmax_probabilities - - init_labels = tf.squeeze(reference_labels, axis=-1) - init_softmax = embedding_utils.create_initial_softmax_from_labels( - reference_labels, reference_labels, common.parse_decoder_output_stride(), - reduce_labels=False) - if FLAGS.save_embeddings: - decoder_height = tf.shape(init_softmax)[1] - decoder_width = tf.shape(init_softmax)[2] - n_frames = (3 if FLAGS.also_attend_to_previous_frame - or FLAGS.use_softmax_feedback else 2) - embeddings_init = tf.zeros((n_frames, decoder_height, decoder_width, - FLAGS.embedding_dimension)) - init = (init_labels, init_softmax, embeddings_init) - else: - init = (init_labels, init_softmax) - # Do not eval the first frame again but concat the first frame ground - # truth instead. - if FLAGS.also_attend_to_previous_frame or FLAGS.use_softmax_feedback: - elems = (samples[common.IMAGE][:-1], samples[common.IMAGE][1:]) - else: - elems = (samples[common.IMAGE][1:],) - res = tf.scan(predict, elems, - initializer=init, - parallel_iterations=1, - swap_memory=True) - if FLAGS.save_embeddings: - predicted_labels, _, all_embeddings = res - first_frame_embeddings = all_embeddings[0, 0, tf.newaxis] - other_frame_embeddings = all_embeddings[:, -1] - all_embeddings = tf.concat( - [first_frame_embeddings, other_frame_embeddings], axis=0) - else: - predicted_labels, _ = res - all_embeddings = None - predicted_labels = tf.concat([reference_labels[..., 0], - tf.squeeze(predicted_labels, axis=1)], - axis=0) - return predicted_labels, all_embeddings - - -def create_predictions_fast(samples, reference_labels, first_frame_img, - model_options): - """Predicts segmentation labels for each frame of the video. - - Faster version than create_predictions, but does not support all options. - - Args: - samples: Dictionary of input samples. - reference_labels: Int tensor of shape [1, height, width, 1]. - first_frame_img: Float32 tensor of shape [height, width, 3]. - model_options: An InternalModelOptions instance to configure models. - - Returns: - predicted_labels: Int tensor of shape [time, height, width] of - predicted labels for each frame. - all_embeddings: Float32 tensor of shape - [time, height, width, embedding_dim], or None. - - Raises: - ValueError: If FLAGS.save_embeddings is True, FLAGS.use_softmax_feedback is - False, or FLAGS.also_attend_to_previous_frame is False. - """ - if FLAGS.save_embeddings: - raise ValueError('save_embeddings does not work with ' - 'create_predictions_fast. Use the slower ' - 'create_predictions instead.') - if not FLAGS.use_softmax_feedback: - raise ValueError('use_softmax_feedback must be True for ' - 'create_predictions_fast. Use the slower ' - 'create_predictions instead.') - if not FLAGS.also_attend_to_previous_frame: - raise ValueError('also_attend_to_previous_frame must be True for ' - 'create_predictions_fast. Use the slower ' - 'create_predictions instead.') - # Extract embeddings for first frame and prepare initial predictions. - first_frame_embeddings = embedding_utils.get_embeddings( - first_frame_img[tf.newaxis], model_options, FLAGS.embedding_dimension) - init_labels = tf.squeeze(reference_labels, axis=-1) - init_softmax = embedding_utils.create_initial_softmax_from_labels( - reference_labels, reference_labels, common.parse_decoder_output_stride(), - reduce_labels=False) - init = (init_labels, init_softmax, first_frame_embeddings) - - def predict(args, img): - """Predicts segmentation labels and softmax probabilities for each image. - - Args: - args: tuple of - (predictions, softmax_probabilities, last_frame_embeddings), where - predictions is an int tensor of shape [1, h, w], - softmax_probabilities is a float32 tensor of shape - [1, h_decoder, w_decoder, n_objects], - and last_frame_embeddings is a float32 tensor of shape - [h_decoder, w_decoder, embedding_dimension]. - img: Image to predict labels for of shape [h, w, 3]. - - Returns: - predictions: The predicted labels as int tensor of shape [1, h, w]. - softmax_probabilities: The softmax probabilities of shape - [1, h_decoder, w_decoder, n_objects]. - """ - (last_frame_predictions, last_softmax_probabilities, - prev_frame_embeddings) = args - ref_labels_to_use = tf.concat( - [reference_labels, last_frame_predictions[..., tf.newaxis]], - axis=0) - - predictions, softmax_probabilities, embeddings = model.predict_labels( - img[tf.newaxis], - model_options=model_options, - image_pyramid=FLAGS.image_pyramid, - embedding_dimension=FLAGS.embedding_dimension, - reference_labels=ref_labels_to_use, - k_nearest_neighbors=FLAGS.k_nearest_neighbors, - use_softmax_feedback=FLAGS.use_softmax_feedback, - initial_softmax_feedback=last_softmax_probabilities, - embedding_seg_feature_dimension= - FLAGS.embedding_seg_feature_dimension, - embedding_seg_n_layers=FLAGS.embedding_seg_n_layers, - embedding_seg_kernel_size=FLAGS.embedding_seg_kernel_size, - embedding_seg_atrous_rates=FLAGS.embedding_seg_atrous_rates, - also_return_softmax_probabilities=True, - num_frames_per_video=1, - normalize_nearest_neighbor_distances= - FLAGS.normalize_nearest_neighbor_distances, - also_attend_to_previous_frame=FLAGS.also_attend_to_previous_frame, - use_local_previous_frame_attention= - FLAGS.use_local_previous_frame_attention, - previous_frame_attention_window_size= - FLAGS.previous_frame_attention_window_size, - use_first_frame_matching=FLAGS.use_first_frame_matching, - also_return_embeddings=True, - ref_embeddings=(first_frame_embeddings, prev_frame_embeddings) - ) - predictions = tf.cast(predictions[common.OUTPUT_TYPE], tf.int32) - return predictions, softmax_probabilities, embeddings - - # Do not eval the first frame again but concat the first frame ground - # truth instead. - # If you have a lot of GPU memory, you can try to set swap_memory=False, - # and/or parallel_iterations=2. - elems = samples[common.IMAGE][1:] - res = tf.scan(predict, elems, - initializer=init, - parallel_iterations=1, - swap_memory=True) - predicted_labels, _, _ = res - predicted_labels = tf.concat([reference_labels[..., 0], - tf.squeeze(predicted_labels, axis=1)], - axis=0) - return predicted_labels - - -def main(unused_argv): - if FLAGS.vis_batch_size != 1: - raise ValueError('Only batch size 1 is supported for now') - - data_type = 'tf_sequence_example' - # Get dataset-dependent information. - dataset = video_dataset.get_dataset( - FLAGS.dataset, - FLAGS.vis_split, - dataset_dir=FLAGS.dataset_dir, - data_type=data_type) - - # Prepare for visualization. - tf.gfile.MakeDirs(FLAGS.vis_logdir) - segmentation_dir = os.path.join(FLAGS.vis_logdir, _SEGMENTATION_SAVE_FOLDER) - tf.gfile.MakeDirs(segmentation_dir) - embeddings_dir = os.path.join(FLAGS.vis_logdir, _EMBEDDINGS_SAVE_FOLDER) - tf.gfile.MakeDirs(embeddings_dir) - num_vis_examples = (dataset.num_videos if (FLAGS.num_vis_examples < 0) - else FLAGS.num_vis_examples) - if FLAGS.first_frame_finetuning: - num_vis_examples = 1 - - tf.logging.info('Visualizing on %s set', FLAGS.vis_split) - g = tf.Graph() - with g.as_default(): - # Without setting device to CPU we run out of memory. - with tf.device('cpu:0'): - samples = video_input_generator.get( - dataset, - None, - None, - FLAGS.vis_batch_size, - min_resize_value=FLAGS.min_resize_value, - max_resize_value=FLAGS.max_resize_value, - resize_factor=FLAGS.resize_factor, - dataset_split=FLAGS.vis_split, - is_training=False, - model_variant=FLAGS.model_variant, - preprocess_image_and_label=False, - remap_labels_to_reference_frame=False) - samples[common.IMAGE] = tf.cast(samples[common.IMAGE], tf.float32) - samples[common.LABEL] = tf.cast(samples[common.LABEL], tf.int32) - first_frame_img = samples[common.IMAGE][0] - reference_labels = samples[common.LABEL][0, tf.newaxis] - gt_labels = tf.squeeze(samples[common.LABEL], axis=-1) - seq_name = samples[common.VIDEO_ID][0] - - model_options = common.VideoModelOptions( - outputs_to_num_classes={common.OUTPUT_TYPE: dataset.num_classes}, - crop_size=None, - atrous_rates=FLAGS.atrous_rates, - output_stride=FLAGS.output_stride) - - all_embeddings = None - predicted_labels = create_predictions_fast( - samples, reference_labels, first_frame_img, model_options) - # If you need more options like saving embeddings, replace the call to - # create_predictions_fast with create_predictions. - - tf.train.get_or_create_global_step() - saver = tf.train.Saver(slim.get_variables_to_restore()) - sv = tf.train.Supervisor(graph=g, - logdir=FLAGS.vis_logdir, - init_op=tf.global_variables_initializer(), - summary_op=None, - summary_writer=None, - global_step=None, - saver=saver) - num_batches = int( - math.ceil(num_vis_examples / float(FLAGS.vis_batch_size))) - last_checkpoint = None - - # Infinite loop to visualize the results when new checkpoint is created. - while True: - last_checkpoint = slim.evaluation.wait_for_new_checkpoint( - FLAGS.checkpoint_dir, last_checkpoint) - start = time.time() - tf.logging.info( - 'Starting visualization at ' + time.strftime('%Y-%m-%d-%H:%M:%S', - time.gmtime())) - tf.logging.info('Visualizing with model %s', last_checkpoint) - - all_ious = [] - with sv.managed_session(FLAGS.master, - start_standard_services=False) as sess: - sv.start_queue_runners(sess) - sv.saver.restore(sess, last_checkpoint) - - for batch in range(num_batches): - ops = [predicted_labels, gt_labels, seq_name] - if FLAGS.save_embeddings: - ops.append(all_embeddings) - tf.logging.info('Visualizing batch %d / %d', batch + 1, num_batches) - res = sess.run(ops) - tf.logging.info('Forwarding done') - pred_labels_val, gt_labels_val, seq_name_val = res[:3] - if FLAGS.save_embeddings: - all_embeddings_val = res[3] - else: - all_embeddings_val = None - seq_ious = _process_seq_data(segmentation_dir, embeddings_dir, - seq_name_val, pred_labels_val, - gt_labels_val, all_embeddings_val) - all_ious.append(seq_ious) - all_ious = np.concatenate(all_ious, axis=0) - tf.logging.info('n_seqs %s, mIoU %f', all_ious.shape, all_ious.mean()) - tf.logging.info( - 'Finished visualization at ' + time.strftime('%Y-%m-%d-%H:%M:%S', - time.gmtime())) - result_dir = FLAGS.vis_logdir + '/results/' - tf.gfile.MakeDirs(result_dir) - with tf.gfile.GFile(result_dir + seq_name_val + '.txt', 'w') as f: - f.write(str(all_ious)) - if FLAGS.first_frame_finetuning or FLAGS.eval_once_and_quit: - break - time_to_next_eval = start + FLAGS.eval_interval_secs - time.time() - if time_to_next_eval > 0: - time.sleep(time_to_next_eval) - - -if __name__ == '__main__': - flags.mark_flag_as_required('checkpoint_dir') - flags.mark_flag_as_required('vis_logdir') - tf.logging.set_verbosity(tf.logging.INFO) - tf.app.run() diff --git a/research/fivo/.gitattributes b/research/fivo/.gitattributes deleted file mode 100644 index f706c0421..000000000 --- a/research/fivo/.gitattributes +++ /dev/null @@ -1,2 +0,0 @@ -*.pkl binary -*.tfrecord binary diff --git a/research/fivo/.gitignore b/research/fivo/.gitignore deleted file mode 100644 index af2f53751..000000000 --- a/research/fivo/.gitignore +++ /dev/null @@ -1,104 +0,0 @@ -# Byte-compiled / optimized / DLL files -__pycache__/ -*.py[cod] -*$py.class - -# C extensions -*.so - -# Distribution / packaging -.Python -build/ -develop-eggs/ -dist/ -downloads/ -eggs/ -.eggs/ -lib/ -lib64/ -parts/ -sdist/ -var/ -wheels/ -*.egg-info/ -.installed.cfg -*.egg -MANIFEST - -# PyInstaller -# Usually these files are written by a python script from a template -# before PyInstaller builds the exe, so as to inject date/other infos into it. -*.manifest -*.spec - -# Installer logs -pip-log.txt -pip-delete-this-directory.txt - -# Unit test / coverage reports -htmlcov/ -.tox/ -.coverage -.coverage.* -.cache -nosetests.xml -coverage.xml -*.cover -.hypothesis/ - -# Translations -*.mo -*.pot - -# Django stuff: -*.log -.static_storage/ -.media/ -local_settings.py - -# Flask stuff: -instance/ -.webassets-cache - -# Scrapy stuff: -.scrapy - -# Sphinx documentation -docs/_build/ - -# PyBuilder -target/ - -# Jupyter Notebook -.ipynb_checkpoints - -# pyenv -.python-version - -# celery beat schedule file -celerybeat-schedule - -# SageMath parsed files -*.sage.py - -# Environments -.env -.venv -env/ -venv/ -ENV/ -env.bak/ -venv.bak/ - -# Spyder project settings -.spyderproject -.spyproject - -# Rope project settings -.ropeproject - -# mkdocs documentation -/site - -# mypy -.mypy_cache/ diff --git a/research/fivo/README.md b/research/fivo/README.md deleted file mode 100644 index 36d355b1b..000000000 --- a/research/fivo/README.md +++ /dev/null @@ -1,215 +0,0 @@ -![No Maintenance Intended](https://img.shields.io/badge/No%20Maintenance%20Intended-%E2%9C%95-red.svg) -![TensorFlow Requirement: 1.x](https://img.shields.io/badge/TensorFlow%20Requirement-1.x-brightgreen) -![TensorFlow 2 Not Supported](https://img.shields.io/badge/TensorFlow%202%20Not%20Supported-%E2%9C%95-red.svg) - -# Filtering Variational Objectives - -This folder contains a TensorFlow implementation of the algorithms from - -Chris J. Maddison\*, Dieterich Lawson\*, George Tucker\*, Nicolas Heess, Mohammad Norouzi, Andriy Mnih, Arnaud Doucet, and Yee Whye Teh. "Filtering Variational Objectives." NIPS 2017. - -[https://arxiv.org/abs/1705.09279](https://arxiv.org/abs/1705.09279) - -This code implements 3 different bounds for training sequential latent variable models: the evidence lower bound (ELBO), the importance weighted auto-encoder bound (IWAE), and our bound, the filtering variational objective (FIVO). - -Additionally it contains several sequential latent variable model implementations: - -* Variational recurrent neural network (VRNN) -* Stochastic recurrent neural network (SRNN) -* Gaussian hidden Markov model with linear conditionals (GHMM) - -The VRNN and SRNN can be trained for sequence modeling of pianoroll and speech data. The GHMM is trainable on a synthetic dataset, useful as a simple example of an analytically tractable model. - -#### Directory Structure -The important parts of the code are organized as follows. - -``` -run_fivo.py # main script, contains flag definitions -fivo -├─smc.py # a sequential Monte Carlo implementation -├─bounds.py # code for computing each bound, uses smc.py -├─runners.py # code for VRNN and SRNN training and evaluation -├─ghmm_runners.py # code for GHMM training and evaluation -├─data -| ├─datasets.py # readers for pianoroll and speech datasets -| ├─calculate_pianoroll_mean.py # preprocesses the pianoroll datasets -| └─create_timit_dataset.py # preprocesses the TIMIT dataset -└─models - ├─base.py # base classes used in other models - ├─vrnn.py # VRNN implementation - ├─srnn.py # SRNN implementation - └─ghmm.py # Gaussian hidden Markov model (GHMM) implementation -bin -├─run_train.sh # an example script that runs training -├─run_eval.sh # an example script that runs evaluation -├─run_sample.sh # an example script that runs sampling -├─run_tests.sh # a script that runs all tests -└─download_pianorolls.sh # a script that downloads pianoroll files -``` - -### Pianorolls - -Requirements before we start: - -* TensorFlow (see [tensorflow.org](http://tensorflow.org) for how to install) -* [scipy](https://www.scipy.org/) -* [sonnet](https://github.com/deepmind/sonnet) - - -#### Download the Data - -The pianoroll datasets are encoded as pickled sparse arrays and are available at [http://www-etud.iro.umontreal.ca/~boulanni/icml2012](http://www-etud.iro.umontreal.ca/~boulanni/icml2012). You can use the script `bin/download_pianorolls.sh` to download the files into a directory of your choosing. -``` -export PIANOROLL_DIR=~/pianorolls -mkdir $PIANOROLL_DIR -sh bin/download_pianorolls.sh $PIANOROLL_DIR -``` - -#### Preprocess the Data - -The script `calculate_pianoroll_mean.py` loads a pianoroll pickle file, calculates the mean, updates the pickle file to include the mean under the key `train_mean`, and writes the file back to disk in-place. You should do this for all pianoroll datasets you wish to train on. - -``` -python data/calculate_pianoroll_mean.py --in_file=$PIANOROLL_DIR/piano-midi.de.pkl -python data/calculate_pianoroll_mean.py --in_file=$PIANOROLL_DIR/nottingham.de.pkl -python data/calculate_pianoroll_mean.py --in_file=$PIANOROLL_DIR/musedata.pkl -python data/calculate_pianoroll_mean.py --in_file=$PIANOROLL_DIR/jsb.pkl -``` - -#### Training - -Now we can train a model. Here is the command for a standard training run, taken from `bin/run_train.sh`: -``` -python run_fivo.py \ - --mode=train \ - --logdir=/tmp/fivo \ - --model=vrnn \ - --bound=fivo \ - --summarize_every=100 \ - --batch_size=4 \ - --num_samples=4 \ - --learning_rate=0.0001 \ - --dataset_path="$PIANOROLL_DIR/jsb.pkl" \ - --dataset_type="pianoroll" -``` - -You should see output that looks something like this (with extra logging cruft): - -``` -Saving checkpoints for 0 into /tmp/fivo/model.ckpt. -Step 1, fivo bound per timestep: -11.322491 -global_step/sec: 7.49971 -Step 101, fivo bound per timestep: -11.399275 -global_step/sec: 8.04498 -Step 201, fivo bound per timestep: -11.174991 -global_step/sec: 8.03989 -Step 301, fivo bound per timestep: -11.073008 -``` -#### Evaluation - -You can also evaluate saved checkpoints. The `eval` mode loads a model checkpoint, tests its performance on all items in a dataset, and reports the log-likelihood averaged over the dataset. For example here is a command, taken from `bin/run_eval.sh`, that will evaluate a JSB model on the test set: - -``` -python run_fivo.py \ - --mode=eval \ - --split=test \ - --alsologtostderr \ - --logdir=/tmp/fivo \ - --model=vrnn \ - --batch_size=4 \ - --num_samples=4 \ - --dataset_path="$PIANOROLL_DIR/jsb.pkl" \ - --dataset_type="pianoroll" -``` - -You should see output like this: -``` -Restoring parameters from /tmp/fivo/model.ckpt-0 -Model restored from step 0, evaluating. -test elbo ll/t: -12.198834, iwae ll/t: -11.981187 fivo ll/t: -11.579776 -test elbo ll/seq: -748.564789, iwae ll/seq: -735.209206 fivo ll/seq: -710.577141 -``` -The evaluation script prints log-likelihood in both nats per timestep (ll/t) and nats per sequence (ll/seq) for all three bounds. - -#### Sampling - -You can also sample from trained models. The `sample` mode loads a model checkpoint, conditions the model on a prefix of a randomly chosen datapoint, samples a sequence of outputs from the conditioned model, and writes out the samples and prefix to a `.npz` file in `logdir`. For example here is a command that samples from a model trained on JSB, taken from `bin/run_sample.sh`: -``` -python run_fivo.py \ - --mode=sample \ - --alsologtostderr \ - --logdir="/tmp/fivo" \ - --model=vrnn \ - --bound=fivo \ - --batch_size=4 \ - --num_samples=4 \ - --split=test \ - --dataset_path="$PIANOROLL_DIR/jsb.pkl" \ - --dataset_type="pianoroll" \ - --prefix_length=25 \ - --sample_length=50 -``` - -Here `num_samples` denotes the number of samples used when conditioning the model as well as the number of trajectories to sample for each prefix. - -You should see very little output. -``` -Restoring parameters from /tmp/fivo/model.ckpt-0 -Running local_init_op. -Done running local_init_op. -``` - -Loading the samples with `np.load` confirms that we conditioned the model on 4 -prefixes of length 25 and sampled 4 sequences of length 50 for each prefix. -``` ->>> import numpy as np ->>> x = np.load("/tmp/fivo/samples.npz") ->>> x[()]['prefixes'].shape -(25, 4, 88) ->>> x[()]['samples'].shape -(50, 4, 4, 88) -``` - -### Training on TIMIT - -The TIMIT speech dataset is available at the [Linguistic Data Consortium website](https://catalog.ldc.upenn.edu/LDC93S1), but is unfortunately not free. These instructions will proceed assuming you have downloaded the TIMIT archive and extracted it into the directory `$RAW_TIMIT_DIR`. - -#### Preprocess TIMIT - -We preprocess TIMIT (as described in our paper) and write it out to a series of TFRecord files. To prepare the TIMIT dataset use the script `create_timit_dataset.py` -``` -export $TIMIT_DIR=~/timit_dataset -mkdir $TIMIT_DIR -python data/create_timit_dataset.py \ - --raw_timit_dir=$RAW_TIMIT_DIR \ - --out_dir=$TIMIT_DIR -``` -You should see this exact output: -``` -4389 train / 231 valid / 1680 test -train mean: 0.006060 train std: 548.136169 -``` - -#### Training on TIMIT -This is very similar to training on pianoroll datasets, with just a few flags switched. -``` -python run_fivo.py \ - --mode=train \ - --logdir=/tmp/fivo \ - --model=vrnn \ - --bound=fivo \ - --summarize_every=100 \ - --batch_size=4 \ - --num_samples=4 \ - --learning_rate=0.0001 \ - --dataset_path="$TIMIT_DIR/train" \ - --dataset_type="speech" -``` -Evaluation and sampling are similar. - -### Tests -This codebase comes with a number of tests to verify correctness, runnable via `bin/run_tests.sh`. The tests are also useful to look at for examples of how to use the code. - -### Contact - -This codebase is maintained by Dieterich Lawson. For questions and issues please open an issue on the tensorflow/models issues tracker and assign it to @dieterichlawson. diff --git a/research/fivo/bin/download_pianorolls.sh b/research/fivo/bin/download_pianorolls.sh deleted file mode 100644 index ef7050b4d..000000000 --- a/research/fivo/bin/download_pianorolls.sh +++ /dev/null @@ -1,30 +0,0 @@ -#!/bin/bash -# Copyright 2017 The TensorFlow Authors All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -# A script to download the pianoroll datasets. -# Accepts one argument, the directory to put the files in - -if [ -z "$1" ] - then - echo "Error, must provide a directory to download the files to." - exit -fi - -echo "Downloading datasets into $1" -curl -s "http://www-etud.iro.umontreal.ca/~boulanni/Piano-midi.de.pickle" > $1/piano-midi.de.pkl -curl -s "http://www-etud.iro.umontreal.ca/~boulanni/Nottingham.pickle" > $1/nottingham.pkl -curl -s "http://www-etud.iro.umontreal.ca/~boulanni/MuseData.pickle" > $1/musedata.pkl -curl -s "http://www-etud.iro.umontreal.ca/~boulanni/JSB%20Chorales.pickle" > $1/jsb.pkl diff --git a/research/fivo/bin/run_eval.sh b/research/fivo/bin/run_eval.sh deleted file mode 100644 index b30bcedc2..000000000 --- a/research/fivo/bin/run_eval.sh +++ /dev/null @@ -1,29 +0,0 @@ -#!/bin/bash -# Copyright 2017 The TensorFlow Authors All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -# An example of running evaluation. - -PIANOROLL_DIR=$HOME/pianorolls - -python run_fivo.py \ - --mode=eval \ - --logdir=/tmp/fivo \ - --model=vrnn \ - --batch_size=4 \ - --num_samples=4 \ - --split=test \ - --dataset_path="$PIANOROLL_DIR/jsb.pkl" \ - --dataset_type="pianoroll" diff --git a/research/fivo/bin/run_sample.sh b/research/fivo/bin/run_sample.sh deleted file mode 100644 index e0c82a0cb..000000000 --- a/research/fivo/bin/run_sample.sh +++ /dev/null @@ -1,33 +0,0 @@ -#!/bin/bash -# Copyright 2018 The TensorFlow Authors All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -# An example of sampling from the model. - -PIANOROLL_DIR=$HOME/pianorolls - -python run_fivo.py \ - --mode=sample \ - --alsologtostderr \ - --logdir="/tmp/fivo" \ - --model=vrnn \ - --bound=fivo \ - --batch_size=4 \ - --num_samples=4 \ - --split=test \ - --dataset_path="$PIANOROLL_DIR/jsb.pkl" \ - --dataset_type="pianoroll" \ - --prefix_length=25 \ - --sample_length=50 diff --git a/research/fivo/bin/run_tests.sh b/research/fivo/bin/run_tests.sh deleted file mode 100644 index 2ea58f016..000000000 --- a/research/fivo/bin/run_tests.sh +++ /dev/null @@ -1,25 +0,0 @@ -#!/bin/bash -# Copyright 2018 The TensorFlow Authors All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -python -m fivo.smc_test && \ -python -m fivo.bounds_test && \ -python -m fivo.nested_utils_test && \ -python -m fivo.data.datasets_test && \ -python -m fivo.models.ghmm_test && \ -python -m fivo.models.vrnn_test && \ -python -m fivo.models.srnn_test && \ -python -m fivo.ghmm_runners_test && \ -python -m fivo.runners_test diff --git a/research/fivo/bin/run_train.sh b/research/fivo/bin/run_train.sh deleted file mode 100644 index a84595977..000000000 --- a/research/fivo/bin/run_train.sh +++ /dev/null @@ -1,31 +0,0 @@ -#!/bin/bash -# Copyright 2017 The TensorFlow Authors All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -# An example of running training. - -PIANOROLL_DIR=$HOME/pianorolls - -python run_fivo.py \ - --mode=train \ - --logdir=/tmp/fivo \ - --model=vrnn \ - --bound=fivo \ - --summarize_every=100 \ - --batch_size=4 \ - --num_samples=4 \ - --learning_rate=0.0001 \ - --dataset_path="$PIANOROLL_DIR/jsb.pkl" \ - --dataset_type="pianoroll" diff --git a/research/fivo/experimental/README.md b/research/fivo/experimental/README.md deleted file mode 100644 index 649de0ba9..000000000 --- a/research/fivo/experimental/README.md +++ /dev/null @@ -1 +0,0 @@ -An experimental codebase for running simple examples. diff --git a/research/fivo/experimental/bounds.py b/research/fivo/experimental/bounds.py deleted file mode 100644 index afc970c59..000000000 --- a/research/fivo/experimental/bounds.py +++ /dev/null @@ -1,673 +0,0 @@ -# Copyright 2018 The TensorFlow Authors All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -from collections import namedtuple - -import tensorflow as tf -import summary_utils as summ - -Loss = namedtuple("Loss", "name loss vars") -Loss.__new__.__defaults__ = (tf.GraphKeys.TRAINABLE_VARIABLES,) - - -def iwae(model, observation, num_timesteps, num_samples=1, - summarize=False): - """Compute the IWAE evidence lower bound. - - Args: - model: A callable that computes one timestep of the model. - observation: A shape [batch_size*num_samples, state_size] Tensor - containing z_n, the observation for each sequence in the batch. - num_timesteps: The number of timesteps in each sequence, an integer. - num_samples: The number of samples to use to compute the IWAE bound. - Returns: - log_p_hat: The IWAE estimator of the lower bound on the log marginal. - loss: A tensor that you can perform gradient descent on to optimize the - bound. - maintain_ema_op: A no-op included for compatibility with FIVO. - states: The sequence of states sampled. - """ - # Initialization - num_instances = tf.shape(observation)[0] - batch_size = tf.cast(num_instances / num_samples, tf.int32) - states = [model.zero_state(num_instances)] - log_weights = [] - log_weight_acc = tf.zeros([num_samples, batch_size], dtype=observation.dtype) - - for t in xrange(num_timesteps): - # run the model for one timestep - (zt, log_q_zt, log_p_zt, log_p_x_given_z, _) = model( - states[-1], observation, t) - # update accumulators - states.append(zt) - log_weight = log_p_zt + log_p_x_given_z - log_q_zt - log_weight_acc += tf.reshape(log_weight, [num_samples, batch_size]) - if summarize: - weight_dist = tf.contrib.distributions.Categorical( - logits=tf.transpose(log_weight_acc, perm=[1, 0]), - allow_nan_stats=False) - weight_entropy = weight_dist.entropy() - weight_entropy = tf.reduce_mean(weight_entropy) - tf.summary.scalar("weight_entropy/%d" % t, weight_entropy) - log_weights.append(log_weight_acc) - # Compute the lower bound on the log evidence. - log_p_hat = (tf.reduce_logsumexp(log_weight_acc, axis=0) - - tf.log(tf.cast(num_samples, observation.dtype))) / num_timesteps - loss = -tf.reduce_mean(log_p_hat) - losses = [Loss("log_p_hat", loss)] - - # we clip off the initial state before returning. - # there are no emas for iwae, so we return a noop for that - return log_p_hat, losses, tf.no_op(), states[1:], log_weights - - -def multinomial_resampling(log_weights, states, n, b): - """Resample states with multinomial resampling. - - Args: - log_weights: A (n x b) Tensor representing a batch of b logits for n-ary - Categorical distribution. - states: A list of (b*n x d) Tensors that will be resample in from the groups - of every n-th row. - - Returns: - resampled_states: A list of (b*n x d) Tensors resampled via stratified sampling. - log_probs: A (n x b) Tensor of the log probabilities of the ancestry decisions. - resampling_parameters: The Tensor of parameters of the resampling distribution. - ancestors: An (n x b) Tensor of integral indices representing the ancestry decisions. - resampling_dist: The distribution object for resampling. - """ - log_weights = tf.convert_to_tensor(log_weights) - states = [tf.convert_to_tensor(state) for state in states] - - resampling_parameters = tf.transpose(log_weights, perm=[1,0]) - resampling_dist = tf.contrib.distributions.Categorical(logits=resampling_parameters) - ancestors = tf.stop_gradient( - resampling_dist.sample(sample_shape=n)) - log_probs = resampling_dist.log_prob(ancestors) - - offset = tf.expand_dims(tf.range(b), 0) - ancestor_inds = tf.reshape(ancestors * b + offset, [-1]) - - resampled_states = [] - for state in states: - resampled_states.append(tf.gather(state, ancestor_inds)) - return resampled_states, log_probs, resampling_parameters, ancestors, resampling_dist - -def stratified_resampling(log_weights, states, n, b): - """Resample states with straitified resampling. - - Args: - log_weights: A (n x b) Tensor representing a batch of b logits for n-ary - Categorical distribution. - states: A list of (b*n x d) Tensors that will be resample in from the groups - of every n-th row. - - Returns: - resampled_states: A list of (b*n x d) Tensors resampled via stratified sampling. - log_probs: A (n x b) Tensor of the log probabilities of the ancestry decisions. - resampling_parameters: The Tensor of parameters of the resampling distribution. - ancestors: An (n x b) Tensor of integral indices representing the ancestry decisions. - resampling_dist: The distribution object for resampling. - """ - log_weights = tf.convert_to_tensor(log_weights) - states = [tf.convert_to_tensor(state) for state in states] - - log_weights = tf.transpose(log_weights, perm=[1,0]) - - probs = tf.nn.softmax( - tf.tile(tf.expand_dims(log_weights, axis=1), - [1, n, 1]) - ) - - cdfs = tf.concat([tf.zeros((b,n,1), dtype=probs.dtype), tf.cumsum(probs, axis=2)], 2) - - bins = tf.range(n, dtype=probs.dtype) / n - bins = tf.tile(tf.reshape(bins, [1,-1,1]), [b,1,n+1]) - - strat_cdfs = tf.minimum(tf.maximum((cdfs - bins) * n, 0.0), 1.0) - resampling_parameters = strat_cdfs[:,:,1:] - strat_cdfs[:,:,:-1] - - resampling_dist = tf.contrib.distributions.Categorical( - probs = resampling_parameters, - allow_nan_stats=False) - - ancestors = tf.stop_gradient( - resampling_dist.sample()) - log_probs = resampling_dist.log_prob(ancestors) - - ancestors = tf.transpose(ancestors, perm=[1,0]) - log_probs = tf.transpose(log_probs, perm=[1,0]) - - offset = tf.expand_dims(tf.range(b), 0) - ancestor_inds = tf.reshape(ancestors * b + offset, [-1]) - - resampled_states = [] - for state in states: - resampled_states.append(tf.gather(state, ancestor_inds)) - - return resampled_states, log_probs, resampling_parameters, ancestors, resampling_dist - -def systematic_resampling(log_weights, states, n, b): - """Resample states with systematic resampling. - - Args: - log_weights: A (n x b) Tensor representing a batch of b logits for n-ary - Categorical distribution. - states: A list of (b*n x d) Tensors that will be resample in from the groups - of every n-th row. - - Returns: - resampled_states: A list of (b*n x d) Tensors resampled via stratified sampling. - log_probs: A (n x b) Tensor of the log probabilities of the ancestry decisions. - resampling_parameters: The Tensor of parameters of the resampling distribution. - ancestors: An (n x b) Tensor of integral indices representing the ancestry decisions. - resampling_dist: The distribution object for resampling. - """ - - log_weights = tf.convert_to_tensor(log_weights) - states = [tf.convert_to_tensor(state) for state in states] - - log_weights = tf.transpose(log_weights, perm=[1,0]) - - probs = tf.nn.softmax( - tf.tile(tf.expand_dims(log_weights, axis=1), - [1, n, 1]) - ) - - cdfs = tf.concat([tf.zeros((b,n,1), dtype=probs.dtype), tf.cumsum(probs, axis=2)], 2) - - bins = tf.range(n, dtype=probs.dtype) / n - bins = tf.tile(tf.reshape(bins, [1,-1,1]), [b,1,n+1]) - - strat_cdfs = tf.minimum(tf.maximum((cdfs - bins) * n, 0.0), 1.0) - resampling_parameters = strat_cdfs[:,:,1:] - strat_cdfs[:,:,:-1] - - resampling_dist = tf.contrib.distributions.Categorical( - probs=resampling_parameters, - allow_nan_stats=True) - - U = tf.random_uniform((b, 1, 1), dtype=probs.dtype) - - ancestors = tf.stop_gradient(tf.reduce_sum(tf.to_float(U > strat_cdfs[:,:,1:]), axis=-1)) - log_probs = resampling_dist.log_prob(ancestors) - - ancestors = tf.transpose(ancestors, perm=[1,0]) - log_probs = tf.transpose(log_probs, perm=[1,0]) - - offset = tf.expand_dims(tf.range(b, dtype=probs.dtype), 0) - ancestor_inds = tf.reshape(ancestors * b + offset, [-1]) - - resampled_states = [] - for state in states: - resampled_states.append(tf.gather(state, ancestor_inds)) - - return resampled_states, log_probs, resampling_parameters, ancestors, resampling_dist - - -def log_blend(inputs, weights): - """Blends state in the log space. - - Args: - inputs: A set of scalar states, one for each particle in each particle filter. - Should be [num_samples, batch_size]. - weights: A set of weights used to blend the state. Each set of weights - should be of dimension [num_samples] (one weight for each previous particle). - There should be one set of weights for each new particle in each particle filter. - Thus the shape should be [num_samples, batch_size, num_samples] where - the first axis indexes new particle and the last axis indexes old particles. - Returns: - blended: The blended states, a tensor of shape [num_samples, batch_size]. - """ - raw_max = tf.reduce_max(inputs, axis=0, keepdims=True) - my_max = tf.stop_gradient( - tf.where(tf.is_finite(raw_max), raw_max, tf.zeros_like(raw_max)) - ) - # Don't ask. - blended = tf.log(tf.einsum("ijk,kj->ij", weights, tf.exp(inputs - raw_max))) + my_max - return blended - - -def relaxed_resampling(log_weights, states, num_samples, batch_size, - log_r_x=None, blend_type="log", temperature=0.5, - straight_through=False): - """Resample states with relaxed resampling. - - Args: - log_weights: A (n x b) Tensor representing a batch of b logits for n-ary - Categorical distribution. - states: A list of (b*n x d) Tensors that will be resample in from the groups - of every n-th row. - - Returns: - resampled_states: A list of (b*n x d) Tensors resampled via stratified sampling. - log_probs: A (n x b) Tensor of the log probabilities of the ancestry decisions. - resampling_parameters: The Tensor of parameters of the resampling distribution. - ancestors: An (n x b x n) Tensor of relaxed one hot representations of the ancestry decisions. - resampling_dist: The distribution object for resampling. - """ - assert blend_type in ["log", "linear"], "Blend type must be 'log' or 'linear'." - log_weights = tf.convert_to_tensor(log_weights) - states = [tf.convert_to_tensor(state) for state in states] - state_dim = states[0].get_shape().as_list()[-1] - # weights are num_samples by batch_size, so we transpose to get a - # set of batch_size distributions over [0,num_samples). - resampling_parameters = tf.transpose(log_weights, perm=[1, 0]) - resampling_dist = tf.contrib.distributions.RelaxedOneHotCategorical( - temperature, - logits=resampling_parameters) - - # sample num_samples samples from the distribution, resulting in a - # [num_samples, batch_size, num_samples] Tensor that represents a set of - # [num_samples, batch_size] blending weights. The dimensions represent - # [sample index, batch index, blending weight index] - ancestors = resampling_dist.sample(sample_shape=num_samples) - if straight_through: - # Forward pass discrete choices, backwards pass soft choices - hard_ancestor_indices = tf.argmax(ancestors, axis=-1) - hard_ancestors = tf.one_hot(hard_ancestor_indices, num_samples, - dtype=ancestors.dtype) - ancestors = tf.stop_gradient(hard_ancestors - ancestors) + ancestors - log_probs = resampling_dist.log_prob(ancestors) - if log_r_x is not None and blend_type == "log": - log_r_x = tf.reshape(log_r_x, [num_samples, batch_size]) - log_r_x = log_blend(log_r_x, ancestors) - log_r_x = tf.reshape(log_r_x, [num_samples*batch_size]) - elif log_r_x is not None and blend_type == "linear": - # If blend type is linear just add log_r to the states that will be blended - # linearly. - states.append(log_r_x) - - # transpose the 'indices' to be [batch_index, blending weight index, sample index] - ancestor_inds = tf.transpose(ancestors, perm=[1, 2, 0]) - resampled_states = [] - for state in states: - # state is currently [num_samples * batch_size, state_dim] so we reshape - # to [num_samples, batch_size, state_dim] and then transpose to - # [batch_size, state_size, num_samples] - state = tf.transpose(tf.reshape(state, [num_samples, batch_size, -1]), perm=[1, 2, 0]) - # state is now (batch_size, state_size, num_samples) - # and ancestor is (batch index, blending weight index, sample index) - # multiplying these gives a matrix of size [batch_size, state_size, num_samples] - next_state = tf.matmul(state, ancestor_inds) - # transpose the state to be [num_samples, batch_size, state_size] - # and then reshape it to match the state format. - next_state = tf.reshape(tf.transpose(next_state, perm=[2,0,1]), [num_samples*batch_size, state_dim]) - resampled_states.append(next_state) - - new_dist = tf.contrib.distributions.Categorical( - logits=resampling_parameters) - - if log_r_x is not None and blend_type == "linear": - # If blend type is linear pop off log_r that we added to the states. - log_r_x = tf.squeeze(resampled_states[-1]) - resampled_states = resampled_states[:-1] - return resampled_states, log_probs, log_r_x, resampling_parameters, ancestors, new_dist - - -def fivo(model, - observation, - num_timesteps, - resampling_schedule, - num_samples=1, - use_resampling_grads=True, - resampling_type="multinomial", - resampling_temperature=0.5, - aux=True, - summarize=False): - """Compute the FIVO evidence lower bound. - - Args: - model: A callable that computes one timestep of the model. - observation: A shape [batch_size*num_samples, state_size] Tensor - containing z_n, the observation for each sequence in the batch. - num_timesteps: The number of timesteps in each sequence, an integer. - resampling_schedule: A list of booleans of length num_timesteps, contains - True if a resampling should occur on a specific timestep. - num_samples: The number of samples to use to compute the IWAE bound. - use_resampling_grads: Whether or not to include the resampling gradients - in loss. - resampling type: The type of resampling, one of "multinomial", "stratified", - "relaxed-logblend", "relaxed-linearblend", "relaxed-stateblend", or - "systematic". - resampling_temperature: A positive temperature only used for relaxed - resampling. - aux: If true, compute the FIVO-AUX bound. - Returns: - log_p_hat: The IWAE estimator of the lower bound on the log marginal. - loss: A tensor that you can perform gradient descent on to optimize the - bound. - maintain_ema_op: An op to update the baseline ema used for the resampling - gradients. - states: The sequence of states sampled. - """ - # Initialization - num_instances = tf.cast(tf.shape(observation)[0], tf.int32) - batch_size = tf.cast(num_instances / num_samples, tf.int32) - states = [model.zero_state(num_instances)] - prev_state = states[0] - log_weight_acc = tf.zeros(shape=[num_samples, batch_size], dtype=observation.dtype) - prev_log_r_zt = tf.zeros([num_instances], dtype=observation.dtype) - log_weights = [] - log_weights_all = [] - log_p_hats = [] - resampling_log_probs = [] - for t in xrange(num_timesteps): - # run the model for one timestep - (zt, log_q_zt, log_p_zt, log_p_x_given_z, log_r_zt) = model( - prev_state, observation, t) - # update accumulators - states.append(zt) - log_weight = log_p_zt + log_p_x_given_z - log_q_zt - if aux: - if t == num_timesteps - 1: - log_weight -= prev_log_r_zt - else: - log_weight += log_r_zt - prev_log_r_zt - prev_log_r_zt = log_r_zt - log_weight_acc += tf.reshape(log_weight, [num_samples, batch_size]) - log_weights_all.append(log_weight_acc) - if resampling_schedule[t]: - - # These objects will be resampled - to_resample = [states[-1]] - if aux and "relaxed" not in resampling_type: - to_resample.append(prev_log_r_zt) - - # do the resampling - if resampling_type == "multinomial": - (resampled, - resampling_log_prob, - _, _, _) = multinomial_resampling(log_weight_acc, - to_resample, - num_samples, - batch_size) - elif resampling_type == "stratified": - (resampled, - resampling_log_prob, - _, _, _) = stratified_resampling(log_weight_acc, - to_resample, - num_samples, - batch_size) - elif resampling_type == "systematic": - (resampled, - resampling_log_prob, - _, _, _) = systematic_resampling(log_weight_acc, - to_resample, - num_samples, - batch_size) - elif "relaxed" in resampling_type: - if aux: - if resampling_type == "relaxed-logblend": - (resampled, - resampling_log_prob, - prev_log_r_zt, - _, _, _) = relaxed_resampling(log_weight_acc, - to_resample, - num_samples, - batch_size, - temperature=resampling_temperature, - log_r_x=prev_log_r_zt, - blend_type="log") - elif resampling_type == "relaxed-linearblend": - (resampled, - resampling_log_prob, - prev_log_r_zt, - _, _, _) = relaxed_resampling(log_weight_acc, - to_resample, - num_samples, - batch_size, - temperature=resampling_temperature, - log_r_x=prev_log_r_zt, - blend_type="linear") - elif resampling_type == "relaxed-stateblend": - (resampled, - resampling_log_prob, - _, _, _, _) = relaxed_resampling(log_weight_acc, - to_resample, - num_samples, - batch_size, - temperature=resampling_temperature) - # Calculate prev_log_r_zt from the post-resampling state - prev_r_zt = model.r.r_xn(resampled[0], t) - prev_log_r_zt = tf.reduce_sum( - prev_r_zt.log_prob(observation), axis=[1]) - elif resampling_type == "relaxed-stateblend-st": - (resampled, - resampling_log_prob, - _, _, _, _) = relaxed_resampling(log_weight_acc, - to_resample, - num_samples, - batch_size, - temperature=resampling_temperature, - straight_through=True) - # Calculate prev_log_r_zt from the post-resampling state - prev_r_zt = model.r.r_xn(resampled[0], t) - prev_log_r_zt = tf.reduce_sum( - prev_r_zt.log_prob(observation), axis=[1]) - else: - (resampled, - resampling_log_prob, - _, _, _, _) = relaxed_resampling(log_weight_acc, - to_resample, - num_samples, - batch_size, - temperature=resampling_temperature) - #if summarize: - # resampling_entropy = resampling_dist.entropy() - # resampling_entropy = tf.reduce_mean(resampling_entropy) - # tf.summary.scalar("weight_entropy/%d" % t, resampling_entropy) - - resampling_log_probs.append(tf.reduce_sum(resampling_log_prob, axis=0)) - prev_state = resampled[0] - if aux and "relaxed" not in resampling_type: - # Squeeze out the extra dim potentially added by resampling. - # prev_log_r_zt should always be [num_instances] - prev_log_r_zt = tf.squeeze(resampled[1]) - # Update the log p hat estimate, taking a log sum exp over the sample - # dimension. The appended tensor is [batch_size]. - log_p_hats.append( - tf.reduce_logsumexp(log_weight_acc, axis=0) - tf.log( - tf.cast(num_samples, dtype=observation.dtype))) - # reset the weights - log_weights.append(log_weight_acc) - log_weight_acc = tf.zeros_like(log_weight_acc) - else: - prev_state = states[-1] - # Compute the final weight update. If we just resampled this will be zero. - final_update = (tf.reduce_logsumexp(log_weight_acc, axis=0) - - tf.log(tf.cast(num_samples, dtype=observation.dtype))) - # If we ever resampled, then sum up the previous log p hat terms - if len(log_p_hats) > 0: - log_p_hat = tf.reduce_sum(log_p_hats, axis=0) + final_update - else: # otherwise, log_p_hat only comes from the final update - log_p_hat = final_update - - if use_resampling_grads and any(resampling_schedule): - # compute the rewards - # cumsum([a, b, c]) => [a, a+b, a+b+c] - # learning signal at timestep t is - # [sum from i=t+1 to T of log_p_hat_i for t=1:T] - # so we will compute (sum from i=1 to T of log_p_hat_i) - # and at timestep t will subtract off (sum from i=1 to t of log_p_hat_i) - # rewards is a [num_resampling_events, batch_size] Tensor - rewards = tf.stop_gradient( - tf.expand_dims(log_p_hat, 0) - tf.cumsum(log_p_hats, axis=0)) - batch_avg_rewards = tf.reduce_mean(rewards, axis=1) - # compute ema baseline. - # centered_rewards is [num_resampling_events, batch_size] - baseline_ema = tf.train.ExponentialMovingAverage(decay=0.94) - maintain_baseline_op = baseline_ema.apply([batch_avg_rewards]) - baseline = tf.expand_dims(baseline_ema.average(batch_avg_rewards), 1) - centered_rewards = rewards - baseline - if summarize: - summ.summarize_learning_signal(rewards, "rewards") - summ.summarize_learning_signal(centered_rewards, "centered_rewards") - # compute the loss tensor. - resampling_grads = tf.reduce_sum( - tf.stop_gradient(centered_rewards) * resampling_log_probs, axis=0) - losses = [Loss("log_p_hat", -tf.reduce_mean(log_p_hat)/num_timesteps), - Loss("resampling_grads", -tf.reduce_mean(resampling_grads)/num_timesteps)] - else: - losses = [Loss("log_p_hat", -tf.reduce_mean(log_p_hat)/num_timesteps)] - maintain_baseline_op = tf.no_op() - - log_p_hat /= num_timesteps - # we clip off the initial state before returning. - return log_p_hat, losses, maintain_baseline_op, states[1:], log_weights_all - - -def fivo_aux_td( - model, - observation, - num_timesteps, - resampling_schedule, - num_samples=1, - summarize=False): - """Compute the FIVO_AUX evidence lower bound.""" - # Initialization - num_instances = tf.cast(tf.shape(observation)[0], tf.int32) - batch_size = tf.cast(num_instances / num_samples, tf.int32) - states = [model.zero_state(num_instances)] - prev_state = states[0] - log_weight_acc = tf.zeros(shape=[num_samples, batch_size], dtype=observation.dtype) - prev_log_r = tf.zeros([num_instances], dtype=observation.dtype) - # must be pre-resampling - log_rs = [] - # must be post-resampling - r_tilde_params = [model.r_tilde.r_zt(states[0], observation, 0)] - log_r_tildes = [] - log_p_xs = [] - # contains the weight at each timestep before resampling only on resampling timesteps - log_weights = [] - # contains weight at each timestep before resampling - log_weights_all = [] - log_p_hats = [] - for t in xrange(num_timesteps): - # run the model for one timestep - # zt is state, [num_instances, state_dim] - # log_q_zt, log_p_x_given_z is [num_instances] - # r_tilde_mu, r_tilde_sigma is [num_instances, state_dim] - # p_ztplus1 is a normal distribution on [num_instances, state_dim] - (zt, log_q_zt, log_p_zt, log_p_x_given_z, - r_tilde_mu, r_tilde_sigma_sq, p_ztplus1) = model(prev_state, observation, t) - - # Compute the log weight without log r. - log_weight = log_p_zt + log_p_x_given_z - log_q_zt - - # Compute log r. - if t == num_timesteps - 1: - log_r = tf.zeros_like(prev_log_r) - else: - p_mu = p_ztplus1.mean() - p_sigma_sq = p_ztplus1.variance() - log_r = (tf.log(r_tilde_sigma_sq) - - tf.log(r_tilde_sigma_sq + p_sigma_sq) - - tf.square(r_tilde_mu - p_mu)/(r_tilde_sigma_sq + p_sigma_sq)) - log_r = 0.5*tf.reduce_sum(log_r, axis=-1) - - #log_weight += tf.stop_gradient(log_r - prev_log_r) - log_weight += log_r - prev_log_r - log_weight_acc += tf.reshape(log_weight, [num_samples, batch_size]) - - # Update accumulators - states.append(zt) - log_weights_all.append(log_weight_acc) - log_p_xs.append(log_p_x_given_z) - log_rs.append(log_r) - - # Compute log_r_tilde as [num_instances] Tensor. - prev_r_tilde_mu, prev_r_tilde_sigma_sq = r_tilde_params[-1] - prev_log_r_tilde = -0.5*tf.reduce_sum( - tf.square(zt - prev_r_tilde_mu)/prev_r_tilde_sigma_sq, axis=-1) - #tf.square(tf.stop_gradient(zt) - r_tilde_mu)/r_tilde_sigma_sq, axis=-1) - #tf.square(zt - r_tilde_mu)/r_tilde_sigma_sq, axis=-1) - log_r_tildes.append(prev_log_r_tilde) - - # optionally resample - if resampling_schedule[t]: - # These objects will be resampled - if t < num_timesteps - 1: - to_resample = [zt, log_r, r_tilde_mu, r_tilde_sigma_sq] - else: - to_resample = [zt, log_r] - (resampled, - _, _, _, _) = multinomial_resampling(log_weight_acc, - to_resample, - num_samples, - batch_size) - prev_state = resampled[0] - # Squeeze out the extra dim potentially added by resampling. - # prev_log_r_zt and log_r_tilde should always be [num_instances] - prev_log_r = tf.squeeze(resampled[1]) - if t < num_timesteps -1: - r_tilde_params.append((resampled[2], resampled[3])) - # Update the log p hat estimate, taking a log sum exp over the sample - # dimension. The appended tensor is [batch_size]. - log_p_hats.append( - tf.reduce_logsumexp(log_weight_acc, axis=0) - tf.log( - tf.cast(num_samples, dtype=observation.dtype))) - # reset the weights - log_weights.append(log_weight_acc) - log_weight_acc = tf.zeros_like(log_weight_acc) - else: - prev_state = zt - prev_log_r = log_r - if t < num_timesteps - 1: - r_tilde_params.append((r_tilde_mu, r_tilde_sigma_sq)) - - # Compute the final weight update. If we just resampled this will be zero. - final_update = (tf.reduce_logsumexp(log_weight_acc, axis=0) - - tf.log(tf.cast(num_samples, dtype=observation.dtype))) - # If we ever resampled, then sum up the previous log p hat terms - if len(log_p_hats) > 0: - log_p_hat = tf.reduce_sum(log_p_hats, axis=0) + final_update - else: # otherwise, log_p_hat only comes from the final update - log_p_hat = final_update - - # Compute the bellman loss. - # Will remove the first timestep as it is not used. - # log p(x_t|z_t) is in row t-1. - log_p_x = tf.reshape(tf.stack(log_p_xs), - [num_timesteps, num_samples, batch_size]) - # log r_t is contained in row t-1. - # last column is zeros (because at timestep T (num_timesteps) r is 1. - log_r = tf.reshape(tf.stack(log_rs), - [num_timesteps, num_samples, batch_size]) - # [num_timesteps, num_instances]. log r_tilde_t is in row t-1. - log_r_tilde = tf.reshape(tf.stack(log_r_tildes), - [num_timesteps, num_samples, batch_size]) - log_lambda = tf.reduce_mean(log_r_tilde - log_p_x - log_r, axis=1, - keepdims=True) - bellman_sos = tf.reduce_mean(tf.square( - log_r_tilde - tf.stop_gradient(log_lambda + log_p_x + log_r)), axis=[0, 1]) - bellman_loss = tf.reduce_mean(bellman_sos)/num_timesteps - tf.summary.scalar("bellman_loss", bellman_loss) - - if len(tf.get_collection("LOG_P_HAT_VARS")) == 0: - log_p_hat_collection = list(set(tf.trainable_variables()) - - set(tf.get_collection("R_TILDE_VARS"))) - for v in log_p_hat_collection: - tf.add_to_collection("LOG_P_HAT_VARS", v) - - log_p_hat /= num_timesteps - losses = [Loss("log_p_hat", -tf.reduce_mean(log_p_hat), "LOG_P_HAT_VARS"), - Loss("bellman_loss", bellman_loss, "R_TILDE_VARS")] - - return log_p_hat, losses, tf.no_op(), states[1:], log_weights_all diff --git a/research/fivo/experimental/data.py b/research/fivo/experimental/data.py deleted file mode 100644 index 0842f2129..000000000 --- a/research/fivo/experimental/data.py +++ /dev/null @@ -1,192 +0,0 @@ -# Copyright 2018 The TensorFlow Authors All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -"""Datasets.""" - -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import numpy as np -import tensorflow as tf - -import models - - -def make_long_chain_dataset( - state_size=1, - num_obs=5, - steps_per_obs=3, - variance=1., - observation_variance=1., - batch_size=4, - num_samples=1, - observation_type=models.STANDARD_OBSERVATION, - transition_type=models.STANDARD_TRANSITION, - fixed_observation=None, - dtype="float32"): - """Creates a long chain data generating process. - - Creates a tf.data.Dataset that provides batches of data from a long - chain. - - Args: - state_size: The dimension of the state space of the process. - num_obs: The number of observations in the chain. - steps_per_obs: The number of steps between each observation. - variance: The variance of the normal distributions used at each timestep. - batch_size: The number of trajectories to include in each batch. - num_samples: The number of replicas of each trajectory to include in each - batch. - dtype: The datatype of the states and observations. - Returns: - dataset: A tf.data.Dataset that can be iterated over. - """ - num_timesteps = num_obs * steps_per_obs - def data_generator(): - """An infinite generator of latents and observations from the model.""" - while True: - states = [] - observations = [] - # z0 ~ Normal(0, sqrt(variance)). - states.append( - np.random.normal(size=[state_size], - scale=np.sqrt(variance)).astype(dtype)) - # start at 1 because we've already generated z0 - # go to num_timesteps+1 because we want to include the num_timesteps-th step - for t in xrange(1, num_timesteps+1): - if transition_type == models.ROUND_TRANSITION: - loc = np.round(states[-1]) - elif transition_type == models.STANDARD_TRANSITION: - loc = states[-1] - new_state = np.random.normal(size=[state_size], - loc=loc, - scale=np.sqrt(variance)) - states.append(new_state.astype(dtype)) - if t % steps_per_obs == 0: - if fixed_observation is None: - if observation_type == models.SQUARED_OBSERVATION: - loc = np.square(states[-1]) - elif observation_type == models.ABS_OBSERVATION: - loc = np.abs(states[-1]) - elif observation_type == models.STANDARD_OBSERVATION: - loc = states[-1] - new_obs = np.random.normal(size=[state_size], - loc=loc, - scale=np.sqrt(observation_variance)).astype(dtype) - else: - new_obs = np.ones([state_size])* fixed_observation - - observations.append(new_obs) - yield states, observations - - dataset = tf.data.Dataset.from_generator( - data_generator, - output_types=(tf.as_dtype(dtype), tf.as_dtype(dtype)), - output_shapes=([num_timesteps+1, state_size], [num_obs, state_size])) - dataset = dataset.repeat().batch(batch_size) - - def tile_batch(state, observation): - state = tf.tile(state, [num_samples, 1, 1]) - observation = tf.tile(observation, [num_samples, 1, 1]) - return state, observation - - dataset = dataset.map(tile_batch, num_parallel_calls=12).prefetch(1024) - return dataset - - -def make_dataset(bs=None, - state_size=1, - num_timesteps=10, - variance=1., - prior_type="unimodal", - bimodal_prior_weight=0.5, - bimodal_prior_mean=1, - transition_type=models.STANDARD_TRANSITION, - fixed_observation=None, - batch_size=4, - num_samples=1, - dtype='float32'): - """Creates a data generating process. - - Creates a tf.data.Dataset that provides batches of data. - - Args: - bs: The parameters of the data generating process. If None, new bs are - randomly generated. - state_size: The dimension of the state space of the process. - num_timesteps: The length of the state sequences in the process. - variance: The variance of the normal distributions used at each timestep. - batch_size: The number of trajectories to include in each batch. - num_samples: The number of replicas of each trajectory to include in each - batch. - Returns: - bs: The true bs used to generate the data - dataset: A tf.data.Dataset that can be iterated over. - """ - - if bs is None: - bs = [np.random.uniform(size=[state_size]).astype(dtype) for _ in xrange(num_timesteps)] - tf.logging.info("data generating processs bs: %s", - np.array(bs).reshape(num_timesteps)) - - - def data_generator(): - """An infinite generator of latents and observations from the model.""" - while True: - states = [] - if prior_type == "unimodal" or prior_type == "nonlinear": - # Prior is Normal(0, sqrt(variance)). - states.append(np.random.normal(size=[state_size], scale=np.sqrt(variance)).astype(dtype)) - elif prior_type == "bimodal": - if np.random.uniform() > bimodal_prior_weight: - loc = bimodal_prior_mean - else: - loc = - bimodal_prior_mean - states.append(np.random.normal(size=[state_size], - loc=loc, - scale=np.sqrt(variance) - ).astype(dtype)) - - for t in xrange(num_timesteps): - if transition_type == models.ROUND_TRANSITION: - loc = np.round(states[-1]) - elif transition_type == models.STANDARD_TRANSITION: - loc = states[-1] - loc += bs[t] - new_state = np.random.normal(size=[state_size], - loc=loc, - scale=np.sqrt(variance)).astype(dtype) - states.append(new_state) - - if fixed_observation is None: - observation = states[-1] - else: - observation = np.ones_like(states[-1]) * fixed_observation - yield np.array(states[:-1]), observation - - dataset = tf.data.Dataset.from_generator( - data_generator, - output_types=(tf.as_dtype(dtype), tf.as_dtype(dtype)), - output_shapes=([num_timesteps, state_size], [state_size])) - dataset = dataset.repeat().batch(batch_size) - - def tile_batch(state, observation): - state = tf.tile(state, [num_samples, 1, 1]) - observation = tf.tile(observation, [num_samples, 1]) - return state, observation - - dataset = dataset.map(tile_batch, num_parallel_calls=12).prefetch(1024) - return np.array(bs), dataset diff --git a/research/fivo/experimental/models.py b/research/fivo/experimental/models.py deleted file mode 100644 index 62801ca1e..000000000 --- a/research/fivo/experimental/models.py +++ /dev/null @@ -1,1227 +0,0 @@ -# Copyright 2018 The TensorFlow Authors All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -"""Model.""" - -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import functools -import sonnet as snt -import tensorflow as tf -import numpy as np -import math - -SQUARED_OBSERVATION = "squared" -ABS_OBSERVATION = "abs" -STANDARD_OBSERVATION = "standard" -OBSERVATION_TYPES = [SQUARED_OBSERVATION, ABS_OBSERVATION, STANDARD_OBSERVATION] - -ROUND_TRANSITION = "round" -STANDARD_TRANSITION = "standard" -TRANSITION_TYPES = [ROUND_TRANSITION, STANDARD_TRANSITION] - - -class Q(object): - - def __init__(self, - state_size, - num_timesteps, - sigma_min=1e-5, - dtype=tf.float32, - random_seed=None, - init_mu0_to_zero=False, - graph_collection_name="Q_VARS"): - self.sigma_min = sigma_min - self.dtype = dtype - self.graph_collection_name = graph_collection_name - initializers = [] - for t in xrange(num_timesteps): - if t == 0 and init_mu0_to_zero: - initializers.append( - {"w": tf.zeros_initializer, "b": tf.zeros_initializer}) - else: - initializers.append( - {"w": tf.random_uniform_initializer(seed=random_seed), - "b": tf.zeros_initializer}) - - def custom_getter(getter, *args, **kwargs): - out = getter(*args, **kwargs) - ref = tf.get_collection_ref(self.graph_collection_name) - if out not in ref: - ref.append(out) - return out - - self.mus = [ - snt.Linear(output_size=state_size, - initializers=initializers[t], - name="q_mu_%d" % t, - custom_getter=custom_getter - ) - for t in xrange(num_timesteps) - ] - self.sigmas = [ - tf.get_variable( - shape=[state_size], - dtype=self.dtype, - name="q_sigma_%d" % (t + 1), - collections=[tf.GraphKeys.GLOBAL_VARIABLES, graph_collection_name], - initializer=tf.random_uniform_initializer(seed=random_seed)) - for t in xrange(num_timesteps) - ] - - def q_zt(self, observation, prev_state, t): - batch_size = tf.shape(prev_state)[0] - q_mu = self.mus[t](tf.concat([observation, prev_state], axis=1)) - q_sigma = tf.maximum(tf.nn.softplus(self.sigmas[t]), self.sigma_min) - q_sigma = tf.tile(q_sigma[tf.newaxis, :], [batch_size, 1]) - q_zt = tf.contrib.distributions.Normal(loc=q_mu, scale=tf.sqrt(q_sigma)) - return q_zt - - def summarize_weights(self): - for t, sigma in enumerate(self.sigmas): - tf.summary.scalar("q_sigma/%d" % t, sigma[0]) - for t, f in enumerate(self.mus): - tf.summary.scalar("q_mu/b_%d" % t, f.b[0]) - tf.summary.scalar("q_mu/w_obs_%d" % t, f.w[0,0]) - if t != 0: - tf.summary.scalar("q_mu/w_prev_state_%d" % t, f.w[1,0]) - - -class PreviousStateQ(Q): - - def q_zt(self, unused_observation, prev_state, t): - batch_size = tf.shape(prev_state)[0] - q_mu = self.mus[t](prev_state) - q_sigma = tf.maximum(tf.nn.softplus(self.sigmas[t]), self.sigma_min) - q_sigma = tf.tile(q_sigma[tf.newaxis, :], [batch_size, 1]) - q_zt = tf.contrib.distributions.Normal(loc=q_mu, scale=tf.sqrt(q_sigma)) - return q_zt - - def summarize_weights(self): - for t, sigma in enumerate(self.sigmas): - tf.summary.scalar("q_sigma/%d" % t, sigma[0]) - for t, f in enumerate(self.mus): - tf.summary.scalar("q_mu/b_%d" % t, f.b[0]) - tf.summary.scalar("q_mu/w_prev_state_%d" % t, f.w[0,0]) - - -class ObservationQ(Q): - - def q_zt(self, observation, prev_state, t): - batch_size = tf.shape(prev_state)[0] - q_mu = self.mus[t](observation) - q_sigma = tf.maximum(tf.nn.softplus(self.sigmas[t]), self.sigma_min) - q_sigma = tf.tile(q_sigma[tf.newaxis, :], [batch_size, 1]) - q_zt = tf.contrib.distributions.Normal(loc=q_mu, scale=tf.sqrt(q_sigma)) - return q_zt - - def summarize_weights(self): - for t, sigma in enumerate(self.sigmas): - tf.summary.scalar("q_sigma/%d" % t, sigma[0]) - for t, f in enumerate(self.mus): - tf.summary.scalar("q_mu/b_%d" % t, f.b[0]) - tf.summary.scalar("q_mu/w_obs_%d" % t, f.w[0,0]) - - -class SimpleMeanQ(object): - - def __init__(self, - state_size, - num_timesteps, - sigma_min=1e-5, - dtype=tf.float32, - random_seed=None, - init_mu0_to_zero=False, - graph_collection_name="Q_VARS"): - self.sigma_min = sigma_min - self.dtype = dtype - self.graph_collection_name = graph_collection_name - initializers = [] - for t in xrange(num_timesteps): - if t == 0 and init_mu0_to_zero: - initializers.append(tf.zeros_initializer) - else: - initializers.append(tf.random_uniform_initializer(seed=random_seed)) - - self.mus = [ - tf.get_variable( - shape=[state_size], - dtype=self.dtype, - name="q_mu_%d" % (t + 1), - collections=[tf.GraphKeys.GLOBAL_VARIABLES, graph_collection_name], - initializer=initializers[t]) - for t in xrange(num_timesteps) - ] - self.sigmas = [ - tf.get_variable( - shape=[state_size], - dtype=self.dtype, - name="q_sigma_%d" % (t + 1), - collections=[tf.GraphKeys.GLOBAL_VARIABLES, graph_collection_name], - initializer=tf.random_uniform_initializer(seed=random_seed)) - for t in xrange(num_timesteps) - ] - - def q_zt(self, unused_observation, prev_state, t): - batch_size = tf.shape(prev_state)[0] - q_mu = tf.tile(self.mus[t][tf.newaxis, :], [batch_size, 1]) - q_sigma = tf.maximum(tf.nn.softplus(self.sigmas[t]), self.sigma_min) - q_sigma = tf.tile(q_sigma[tf.newaxis, :], [batch_size, 1]) - q_zt = tf.contrib.distributions.Normal(loc=q_mu, scale=tf.sqrt(q_sigma)) - return q_zt - - def summarize_weights(self): - for t, sigma in enumerate(self.sigmas): - tf.summary.scalar("q_sigma/%d" % t, sigma[0]) - for t, f in enumerate(self.mus): - tf.summary.scalar("q_mu/%d" % t, f[0]) - - -class R(object): - - def __init__(self, - state_size, - num_timesteps, - sigma_min=1e-5, - dtype=tf.float32, - sigma_init=1., - random_seed=None, - graph_collection_name="R_VARS"): - self.dtype = dtype - self.sigma_min = sigma_min - initializers = {"w": tf.truncated_normal_initializer(seed=random_seed), - "b": tf.zeros_initializer} - self.graph_collection_name=graph_collection_name - - def custom_getter(getter, *args, **kwargs): - out = getter(*args, **kwargs) - ref = tf.get_collection_ref(self.graph_collection_name) - if out not in ref: - ref.append(out) - return out - - self.mus= [ - snt.Linear(output_size=state_size, - initializers=initializers, - name="r_mu_%d" % t, - custom_getter=custom_getter) - for t in xrange(num_timesteps) - ] - - self.sigmas = [ - tf.get_variable( - shape=[state_size], - dtype=self.dtype, - name="r_sigma_%d" % (t + 1), - collections=[tf.GraphKeys.GLOBAL_VARIABLES, graph_collection_name], - #initializer=tf.random_uniform_initializer(seed=random_seed, maxval=100)) - initializer=tf.constant_initializer(sigma_init)) - for t in xrange(num_timesteps) - ] - - def r_xn(self, z_t, t): - batch_size = tf.shape(z_t)[0] - r_mu = self.mus[t](z_t) - r_sigma = tf.maximum(tf.nn.softplus(self.sigmas[t]), self.sigma_min) - r_sigma = tf.tile(r_sigma[tf.newaxis, :], [batch_size, 1]) - return tf.contrib.distributions.Normal( - loc=r_mu, scale=tf.sqrt(r_sigma)) - - def summarize_weights(self): - for t in range(len(self.mus) - 1): - tf.summary.scalar("r_mu/%d" % t, self.mus[t][0]) - tf.summary.scalar("r_sigma/%d" % t, self.sigmas[t][0]) - - -class P(object): - - def __init__(self, - state_size, - num_timesteps, - sigma_min=1e-5, - variance=1.0, - dtype=tf.float32, - random_seed=None, - trainable=True, - init_bs_to_zero=False, - graph_collection_name="P_VARS"): - self.state_size = state_size - self.num_timesteps = num_timesteps - self.sigma_min = sigma_min - self.dtype = dtype - self.variance = variance - self.graph_collection_name = graph_collection_name - if init_bs_to_zero: - initializers = [tf.zeros_initializer for _ in xrange(num_timesteps)] - else: - initializers = [tf.random_uniform_initializer(seed=random_seed) for _ in xrange(num_timesteps)] - - self.bs = [ - tf.get_variable( - shape=[state_size], - dtype=self.dtype, - name="p_b_%d" % (t + 1), - initializer=initializers[t], - collections=[tf.GraphKeys.GLOBAL_VARIABLES, graph_collection_name], - trainable=trainable) for t in xrange(num_timesteps) - ] - self.Bs = tf.cumsum(self.bs, reverse=True, axis=0) - - def posterior(self, observation, prev_state, t): - """Computes the true posterior p(z_t|z_{t-1}, z_n).""" - # bs[0] is really b_1 - # Bs[i] is sum from k=i+1^n b_k - mu = observation - self.Bs[t] - if t > 0: - mu += (prev_state + self.bs[t - 1]) * float(self.num_timesteps - t) - mu /= float(self.num_timesteps - t + 1) - sigma = tf.ones_like(mu) * self.variance * ( - float(self.num_timesteps - t) / float(self.num_timesteps - t + 1)) - return tf.contrib.distributions.Normal(loc=mu, scale=tf.sqrt(sigma)) - - def lookahead(self, state, t): - """Computes the true lookahead distribution p(z_n|z_t).""" - mu = state + self.Bs[t] - sigma = tf.ones_like(state) * self.variance * float(self.num_timesteps - t) - return tf.contrib.distributions.Normal(loc=mu, scale=tf.sqrt(sigma)) - - def likelihood(self, observation): - batch_size = tf.shape(observation)[0] - mu = tf.tile(tf.reduce_sum(self.bs, axis=0)[tf.newaxis, :], [batch_size, 1]) - sigma = tf.ones_like(mu) * self.variance * (self.num_timesteps + 1) - dist = tf.contrib.distributions.Normal(loc=mu, scale=tf.sqrt(sigma)) - # Average over the batch and take the sum over the state size - return tf.reduce_mean(tf.reduce_sum(dist.log_prob(observation), axis=1)) - - def p_zt(self, prev_state, t): - """Computes the model p(z_t| z_{t-1}).""" - batch_size = tf.shape(prev_state)[0] - if t > 0: - z_mu_p = prev_state + self.bs[t - 1] - else: # p(z_0) is Normal(0,1) - z_mu_p = tf.zeros([batch_size, self.state_size], dtype=self.dtype) - p_zt = tf.contrib.distributions.Normal( - loc=z_mu_p, scale=tf.sqrt(tf.ones_like(z_mu_p) * self.variance)) - return p_zt - - def generative(self, unused_observation, z_nm1): - """Computes the model's generative distribution p(z_n| z_{n-1}).""" - generative_p_mu = z_nm1 + self.bs[-1] - return tf.contrib.distributions.Normal( - loc=generative_p_mu, scale=tf.sqrt(tf.ones_like(generative_p_mu) * self.variance)) - - -class ShortChainNonlinearP(object): - - def __init__(self, - state_size, - num_timesteps, - sigma_min=1e-5, - variance=1.0, - observation_variance=1.0, - transition_type=STANDARD_TRANSITION, - transition_dist=tf.contrib.distributions.Normal, - dtype=tf.float32, - random_seed=None): - self.state_size = state_size - self.num_timesteps = num_timesteps - self.sigma_min = sigma_min - self.dtype = dtype - self.variance = variance - self.observation_variance = observation_variance - self.transition_type = transition_type - self.transition_dist = transition_dist - - def p_zt(self, prev_state, t): - """Computes the model p(z_t| z_{t-1}).""" - batch_size = tf.shape(prev_state)[0] - if t > 0: - if self.transition_type == ROUND_TRANSITION: - loc = tf.round(prev_state) - tf.logging.info("p(z_%d | z_%d) ~ N(round(z_%d), %0.1f)" % (t, t-1, t-1, self.variance)) - elif self.transition_type == STANDARD_TRANSITION: - loc = prev_state - tf.logging.info("p(z_%d | z_%d) ~ N(z_%d, %0.1f)" % (t, t-1, t-1, self.variance)) - else: # p(z_0) is Normal(0,1) - loc = tf.zeros([batch_size, self.state_size], dtype=self.dtype) - tf.logging.info("p(z_0) ~ N(0,%0.1f)" % self.variance) - - p_zt = self.transition_dist( - loc=loc, - scale=tf.sqrt(tf.ones_like(loc) * self.variance)) - return p_zt - - def generative(self, unused_obs, z_ni): - """Computes the model's generative distribution p(x_i| z_{ni}).""" - if self.transition_type == ROUND_TRANSITION: - loc = tf.round(z_ni) - elif self.transition_type == STANDARD_TRANSITION: - loc = z_ni - generative_sigma_sq = tf.ones_like(loc) * self.observation_variance - return self.transition_dist( - loc=loc, scale=tf.sqrt(generative_sigma_sq)) - - -class BimodalPriorP(object): - - def __init__(self, - state_size, - num_timesteps, - mixing_coeff=0.5, - prior_mode_mean=1, - sigma_min=1e-5, - variance=1.0, - dtype=tf.float32, - random_seed=None, - trainable=True, - init_bs_to_zero=False, - graph_collection_name="P_VARS"): - self.state_size = state_size - self.num_timesteps = num_timesteps - self.sigma_min = sigma_min - self.dtype = dtype - self.variance = variance - self.mixing_coeff = mixing_coeff - self.prior_mode_mean = prior_mode_mean - - if init_bs_to_zero: - initializers = [tf.zeros_initializer for _ in xrange(num_timesteps)] - else: - initializers = [tf.random_uniform_initializer(seed=random_seed) for _ in xrange(num_timesteps)] - - self.bs = [ - tf.get_variable( - shape=[state_size], - dtype=self.dtype, - name="b_%d" % (t + 1), - initializer=initializers[t], - collections=[tf.GraphKeys.GLOBAL_VARIABLES, graph_collection_name], - trainable=trainable) for t in xrange(num_timesteps) - ] - self.Bs = tf.cumsum(self.bs, reverse=True, axis=0) - - def posterior(self, observation, prev_state, t): - # NOTE: This is currently wrong, but would require a refactoring of - # summarize_q to fix as kl is not defined for a mixture - """Computes the true posterior p(z_t|z_{t-1}, z_n).""" - # bs[0] is really b_1 - # Bs[i] is sum from k=i+1^n b_k - mu = observation - self.Bs[t] - if t > 0: - mu += (prev_state + self.bs[t - 1]) * float(self.num_timesteps - t) - mu /= float(self.num_timesteps - t + 1) - sigma = tf.ones_like(mu) * self.variance * ( - float(self.num_timesteps - t) / float(self.num_timesteps - t + 1)) - return tf.contrib.distributions.Normal(loc=mu, scale=tf.sqrt(sigma)) - - def lookahead(self, state, t): - """Computes the true lookahead distribution p(z_n|z_t).""" - mu = state + self.Bs[t] - sigma = tf.ones_like(state) * self.variance * float(self.num_timesteps - t) - return tf.contrib.distributions.Normal(loc=mu, scale=tf.sqrt(sigma)) - - def likelihood(self, observation): - batch_size = tf.shape(observation)[0] - sum_of_bs = tf.tile(tf.reduce_sum(self.bs, axis=0)[tf.newaxis, :], [batch_size, 1]) - sigma = tf.ones_like(sum_of_bs) * self.variance * (self.num_timesteps + 1) - mu_pos = (tf.ones([batch_size, self.state_size], dtype=self.dtype) * self.prior_mode_mean) + sum_of_bs - mu_neg = (tf.ones([batch_size, self.state_size], dtype=self.dtype) * -self.prior_mode_mean) + sum_of_bs - zn_pos = tf.contrib.distributions.Normal( - loc=mu_pos, - scale=tf.sqrt(sigma)) - zn_neg = tf.contrib.distributions.Normal( - loc=mu_neg, - scale=tf.sqrt(sigma)) - mode_probs = tf.convert_to_tensor([self.mixing_coeff, 1-self.mixing_coeff], dtype=tf.float64) - mode_probs = tf.tile(mode_probs[tf.newaxis, tf.newaxis, :], [batch_size, 1, 1]) - mode_selection_dist = tf.contrib.distributions.Categorical(probs=mode_probs) - zn_dist = tf.contrib.distributions.Mixture( - cat=mode_selection_dist, - components=[zn_pos, zn_neg], - validate_args=True) - # Average over the batch and take the sum over the state size - return tf.reduce_mean(tf.reduce_sum(zn_dist.log_prob(observation), axis=1)) - - def p_zt(self, prev_state, t): - """Computes the model p(z_t| z_{t-1}).""" - batch_size = tf.shape(prev_state)[0] - if t > 0: - z_mu_p = prev_state + self.bs[t - 1] - p_zt = tf.contrib.distributions.Normal( - loc=z_mu_p, scale=tf.sqrt(tf.ones_like(z_mu_p) * self.variance)) - return p_zt - else: # p(z_0) is mixture of two Normals - mu_pos = tf.ones([batch_size, self.state_size], dtype=self.dtype) * self.prior_mode_mean - mu_neg = tf.ones([batch_size, self.state_size], dtype=self.dtype) * -self.prior_mode_mean - z0_pos = tf.contrib.distributions.Normal( - loc=mu_pos, - scale=tf.sqrt(tf.ones_like(mu_pos) * self.variance)) - z0_neg = tf.contrib.distributions.Normal( - loc=mu_neg, - scale=tf.sqrt(tf.ones_like(mu_neg) * self.variance)) - mode_probs = tf.convert_to_tensor([self.mixing_coeff, 1-self.mixing_coeff], dtype=tf.float64) - mode_probs = tf.tile(mode_probs[tf.newaxis, tf.newaxis, :], [batch_size, 1, 1]) - mode_selection_dist = tf.contrib.distributions.Categorical(probs=mode_probs) - z0_dist = tf.contrib.distributions.Mixture( - cat=mode_selection_dist, - components=[z0_pos, z0_neg], - validate_args=False) - return z0_dist - - def generative(self, unused_observation, z_nm1): - """Computes the model's generative distribution p(z_n| z_{n-1}).""" - generative_p_mu = z_nm1 + self.bs[-1] - return tf.contrib.distributions.Normal( - loc=generative_p_mu, scale=tf.sqrt(tf.ones_like(generative_p_mu) * self.variance)) - -class Model(object): - - def __init__(self, - p, - q, - r, - state_size, - num_timesteps, - dtype=tf.float32): - self.p = p - self.q = q - self.r = r - self.state_size = state_size - self.num_timesteps = num_timesteps - self.dtype = dtype - - def zero_state(self, batch_size): - return tf.zeros([batch_size, self.state_size], dtype=self.dtype) - - def __call__(self, prev_state, observation, t): - # Compute the q distribution over z, q(z_t|z_n, z_{t-1}). - q_zt = self.q.q_zt(observation, prev_state, t) - # Compute the p distribution over z, p(z_t|z_{t-1}). - p_zt = self.p.p_zt(prev_state, t) - # sample from q - zt = q_zt.sample() - r_xn = self.r.r_xn(zt, t) - # Calculate the logprobs and sum over the state size. - log_q_zt = tf.reduce_sum(q_zt.log_prob(zt), axis=1) - log_p_zt = tf.reduce_sum(p_zt.log_prob(zt), axis=1) - log_r_xn = tf.reduce_sum(r_xn.log_prob(observation), axis=1) - # If we're at the last timestep, also calc the logprob of the observation. - if t == self.num_timesteps - 1: - generative_dist = self.p.generative(observation, zt) - log_p_x_given_z = tf.reduce_sum(generative_dist.log_prob(observation), axis=1) - else: - log_p_x_given_z = tf.zeros_like(log_q_zt) - return (zt, log_q_zt, log_p_zt, log_p_x_given_z, log_r_xn) - - @staticmethod - def create(state_size, - num_timesteps, - sigma_min=1e-5, - r_sigma_init=1, - variance=1.0, - mixing_coeff=0.5, - prior_mode_mean=1.0, - dtype=tf.float32, - random_seed=None, - train_p=True, - p_type="unimodal", - q_type="normal", - observation_variance=1.0, - transition_type=STANDARD_TRANSITION, - use_bs=True): - if p_type == "unimodal": - p = P(state_size, - num_timesteps, - sigma_min=sigma_min, - variance=variance, - dtype=dtype, - random_seed=random_seed, - trainable=train_p, - init_bs_to_zero=not use_bs) - elif p_type == "bimodal": - p = BimodalPriorP( - state_size, - num_timesteps, - mixing_coeff=mixing_coeff, - prior_mode_mean=prior_mode_mean, - sigma_min=sigma_min, - variance=variance, - dtype=dtype, - random_seed=random_seed, - trainable=train_p, - init_bs_to_zero=not use_bs) - elif "nonlinear" in p_type: - if "cauchy" in p_type: - trans_dist = tf.contrib.distributions.Cauchy - else: - trans_dist = tf.contrib.distributions.Normal - p = ShortChainNonlinearP( - state_size, - num_timesteps, - sigma_min=sigma_min, - variance=variance, - observation_variance=observation_variance, - transition_type=transition_type, - transition_dist=trans_dist, - dtype=dtype, - random_seed=random_seed - ) - - if q_type == "normal": - q_class = Q - elif q_type == "simple_mean": - q_class = SimpleMeanQ - elif q_type == "prev_state": - q_class = PreviousStateQ - elif q_type == "observation": - q_class = ObservationQ - - q = q_class(state_size, - num_timesteps, - sigma_min=sigma_min, - dtype=dtype, - random_seed=random_seed, - init_mu0_to_zero=not use_bs) - r = R(state_size, - num_timesteps, - sigma_min=sigma_min, - sigma_init=r_sigma_init, - dtype=dtype, - random_seed=random_seed) - model = Model(p, q, r, state_size, num_timesteps, dtype=dtype) - return model - - -class BackwardsModel(object): - - def __init__(self, - state_size, - num_timesteps, - sigma_min=1e-5, - dtype=tf.float32): - self.state_size = state_size - self.num_timesteps = num_timesteps - self.sigma_min = sigma_min - self.dtype = dtype - self.bs = [ - tf.get_variable( - shape=[state_size], - dtype=self.dtype, - name="b_%d" % (t + 1), - initializer=tf.zeros_initializer) for t in xrange(num_timesteps) - ] - self.Bs = tf.cumsum(self.bs, reverse=True, axis=0) - self.q_mus = [ - snt.Linear(output_size=state_size) for _ in xrange(num_timesteps) - ] - self.q_sigmas = [ - tf.get_variable( - shape=[state_size], - dtype=self.dtype, - name="q_sigma_%d" % (t + 1), - initializer=tf.zeros_initializer) for t in xrange(num_timesteps) - ] - self.r_mus = [ - tf.get_variable( - shape=[state_size], - dtype=self.dtype, - name="r_mu_%d" % (t + 1), - initializer=tf.zeros_initializer) for t in xrange(num_timesteps) - ] - self.r_sigmas = [ - tf.get_variable( - shape=[state_size], - dtype=self.dtype, - name="r_sigma_%d" % (t + 1), - initializer=tf.zeros_initializer) for t in xrange(num_timesteps) - ] - - def zero_state(self, batch_size): - return tf.zeros([batch_size, self.state_size], dtype=self.dtype) - - def posterior(self, unused_observation, prev_state, unused_t): - # TODO(dieterichl): Correct this. - return tf.contrib.distributions.Normal( - loc=tf.zeros_like(prev_state), scale=tf.zeros_like(prev_state)) - - def lookahead(self, state, unused_t): - # TODO(dieterichl): Correct this. - return tf.contrib.distributions.Normal( - loc=tf.zeros_like(state), scale=tf.zeros_like(state)) - - def q_zt(self, observation, next_state, t): - """Computes the variational posterior q(z_{t}|z_{t+1}, z_n).""" - t_backwards = self.num_timesteps - t - 1 - batch_size = tf.shape(next_state)[0] - q_mu = self.q_mus[t_backwards](tf.concat([observation, next_state], axis=1)) - q_sigma = tf.maximum( - tf.nn.softplus(self.q_sigmas[t_backwards]), self.sigma_min) - q_sigma = tf.tile(q_sigma[tf.newaxis, :], [batch_size, 1]) - q_zt = tf.contrib.distributions.Normal(loc=q_mu, scale=tf.sqrt(q_sigma)) - return q_zt - - def p_zt(self, prev_state, t): - """Computes the model p(z_{t+1}| z_{t}).""" - t_backwards = self.num_timesteps - t - 1 - z_mu_p = prev_state + self.bs[t_backwards] - p_zt = tf.contrib.distributions.Normal( - loc=z_mu_p, scale=tf.ones_like(z_mu_p)) - return p_zt - - def generative(self, unused_observation, z_nm1): - """Computes the model's generative distribution p(z_n| z_{n-1}).""" - generative_p_mu = z_nm1 + self.bs[-1] - return tf.contrib.distributions.Normal( - loc=generative_p_mu, scale=tf.ones_like(generative_p_mu)) - - def r(self, z_t, t): - t_backwards = self.num_timesteps - t - 1 - batch_size = tf.shape(z_t)[0] - r_mu = tf.tile(self.r_mus[t_backwards][tf.newaxis, :], [batch_size, 1]) - r_sigma = tf.maximum( - tf.nn.softplus(self.r_sigmas[t_backwards]), self.sigma_min) - r_sigma = tf.tile(r_sigma[tf.newaxis, :], [batch_size, 1]) - return tf.contrib.distributions.Normal(loc=r_mu, scale=tf.sqrt(r_sigma)) - - def likelihood(self, observation): - batch_size = tf.shape(observation)[0] - mu = tf.tile(tf.reduce_sum(self.bs, axis=0)[tf.newaxis, :], [batch_size, 1]) - sigma = tf.ones_like(mu) * (self.num_timesteps + 1) - dist = tf.contrib.distributions.Normal(loc=mu, scale=tf.sqrt(sigma)) - # Average over the batch and take the sum over the state size - return tf.reduce_mean(tf.reduce_sum(dist.log_prob(observation), axis=1)) - - def __call__(self, next_state, observation, t): - # next state = z_{t+1} - # Compute the q distribution over z, q(z_{t}|z_n, z_{t+1}). - q_zt = self.q_zt(observation, next_state, t) - # sample from q - zt = q_zt.sample() - # Compute the p distribution over z, p(z_{t+1}|z_{t}). - p_zt = self.p_zt(zt, t) - # Compute log p(z_{t+1} | z_t) - if t == 0: - log_p_zt = p_zt.log_prob(observation) - else: - log_p_zt = p_zt.log_prob(next_state) - - # Compute r prior over zt - r_zt = self.r(zt, t) - log_r_zt = r_zt.log_prob(zt) - # Compute proposal density at zt - log_q_zt = q_zt.log_prob(zt) - # If we're at the last timestep, also calc the logprob of the observation. - - if t == self.num_timesteps - 1: - p_z0_dist = tf.contrib.distributions.Normal( - loc=tf.zeros_like(zt), scale=tf.ones_like(zt)) - z0_log_prob = p_z0_dist.log_prob(zt) - else: - z0_log_prob = tf.zeros_like(log_q_zt) - return (zt, log_q_zt, log_p_zt, z0_log_prob, log_r_zt) - - -class LongChainP(object): - - def __init__(self, - state_size, - num_obs, - steps_per_obs, - sigma_min=1e-5, - variance=1.0, - observation_variance=1.0, - observation_type=STANDARD_OBSERVATION, - transition_type=STANDARD_TRANSITION, - dtype=tf.float32, - random_seed=None): - self.state_size = state_size - self.steps_per_obs = steps_per_obs - self.num_obs = num_obs - self.num_timesteps = steps_per_obs*num_obs + 1 - self.sigma_min = sigma_min - self.dtype = dtype - self.variance = variance - self.observation_variance = observation_variance - self.observation_type = observation_type - self.transition_type = transition_type - - def likelihood(self, observations): - """Computes the model's true likelihood of the observations. - - Args: - observations: A [batch_size, m, state_size] Tensor representing each of - the m observations. - Returns: - logprob: The true likelihood of the observations given the model. - """ - raise ValueError("Likelihood is not defined for long-chain models") - # batch_size = tf.shape(observations)[0] - # mu = tf.zeros([batch_size, self.state_size, self.num_obs], dtype=self.dtype) - # sigma = np.fromfunction( - # lambda i, j: 1 + self.steps_per_obs*np.minimum(i+1, j+1), - # [self.num_obs, self.num_obs]) - # sigma += np.eye(self.num_obs) - # sigma = tf.convert_to_tensor(sigma * self.variance, dtype=self.dtype) - # sigma = tf.tile(sigma[tf.newaxis, tf.newaxis, ...], - # [batch_size, self.state_size, 1, 1]) - # dist = tf.contrib.distributions.MultivariateNormalFullCovariance( - # loc=mu, - # covariance_matrix=sigma) - # Average over the batch and take the sum over the state size - #return tf.reduce_mean(tf.reduce_sum(dist.log_prob(observations), axis=1)) - - def p_zt(self, prev_state, t): - """Computes the model p(z_t| z_{t-1}).""" - batch_size = tf.shape(prev_state)[0] - if t > 0: - if self.transition_type == ROUND_TRANSITION: - loc = tf.round(prev_state) - tf.logging.info("p(z_%d | z_%d) ~ N(round(z_%d), %0.1f)" % (t, t-1, t-1, self.variance)) - elif self.transition_type == STANDARD_TRANSITION: - loc = prev_state - tf.logging.info("p(z_%d | z_%d) ~ N(z_%d, %0.1f)" % (t, t-1, t-1, self.variance)) - else: # p(z_0) is Normal(0,1) - loc = tf.zeros([batch_size, self.state_size], dtype=self.dtype) - tf.logging.info("p(z_0) ~ N(0,%0.1f)" % self.variance) - - p_zt = tf.contrib.distributions.Normal( - loc=loc, - scale=tf.sqrt(tf.ones_like(loc) * self.variance)) - return p_zt - - def generative(self, z_ni, t): - """Computes the model's generative distribution p(x_i| z_{ni}).""" - if self.observation_type == SQUARED_OBSERVATION: - generative_mu = tf.square(z_ni) - tf.logging.info("p(x_%d | z_%d) ~ N(z_%d^2, %0.1f)" % (t, t, t, self.variance)) - elif self.observation_type == ABS_OBSERVATION: - generative_mu = tf.abs(z_ni) - tf.logging.info("p(x_%d | z_%d) ~ N(|z_%d|, %0.1f)" % (t, t, t, self.variance)) - elif self.observation_type == STANDARD_OBSERVATION: - generative_mu = z_ni - tf.logging.info("p(x_%d | z_%d) ~ N(z_%d, %0.1f)" % (t, t, t, self.variance)) - generative_sigma_sq = tf.ones_like(generative_mu) * self.observation_variance - return tf.contrib.distributions.Normal( - loc=generative_mu, scale=tf.sqrt(generative_sigma_sq)) - - -class LongChainQ(object): - - def __init__(self, - state_size, - num_obs, - steps_per_obs, - sigma_min=1e-5, - dtype=tf.float32, - random_seed=None): - self.state_size = state_size - self.sigma_min = sigma_min - self.dtype = dtype - self.steps_per_obs = steps_per_obs - self.num_obs = num_obs - self.num_timesteps = num_obs*steps_per_obs +1 - - initializers = { - "w": tf.random_uniform_initializer(seed=random_seed), - "b": tf.zeros_initializer - } - self.mus = [ - snt.Linear(output_size=state_size, initializers=initializers) - for t in xrange(self.num_timesteps) - ] - self.sigmas = [ - tf.get_variable( - shape=[state_size], - dtype=self.dtype, - name="q_sigma_%d" % (t + 1), - initializer=tf.random_uniform_initializer(seed=random_seed)) - for t in xrange(self.num_timesteps) - ] - - def first_relevant_obs_index(self, t): - return int(max((t-1)/self.steps_per_obs, 0)) - - def q_zt(self, observations, prev_state, t): - """Computes a distribution over z_t. - - Args: - observations: a [batch_size, num_observations, state_size] Tensor. - prev_state: a [batch_size, state_size] Tensor. - t: The current timestep, an int Tensor. - """ - # filter out unneeded past obs - first_relevant_obs_index = int(math.floor(max(t-1, 0) / self.steps_per_obs)) - num_relevant_observations = self.num_obs - first_relevant_obs_index - observations = observations[:,first_relevant_obs_index:,:] - batch_size = tf.shape(prev_state)[0] - # concatenate the prev state and observations along the second axis (that is - # not the batch or state size axis, and then flatten it to - # [batch_size, (num_relevant_observations + 1) * state_size] to feed it into - # the linear layer. - q_input = tf.concat([observations, prev_state[:,tf.newaxis, :]], axis=1) - q_input = tf.reshape(q_input, - [batch_size, (num_relevant_observations + 1) * self.state_size]) - q_mu = self.mus[t](q_input) - q_sigma = tf.maximum(tf.nn.softplus(self.sigmas[t]), self.sigma_min) - q_sigma = tf.tile(q_sigma[tf.newaxis, :], [batch_size, 1]) - q_zt = tf.contrib.distributions.Normal(loc=q_mu, scale=tf.sqrt(q_sigma)) - tf.logging.info( - "q(z_{t} | z_{tm1}, x_{obsf}:{obst}) ~ N(Linear([z_{tm1},x_{obsf}:{obst}]), sigma_{t})".format( - **{"t": t, - "tm1": t-1, - "obsf": (first_relevant_obs_index+1)*self.steps_per_obs, - "obst":self.steps_per_obs*self.num_obs})) - return q_zt - - def summarize_weights(self): - pass - -class LongChainR(object): - - def __init__(self, - state_size, - num_obs, - steps_per_obs, - sigma_min=1e-5, - dtype=tf.float32, - random_seed=None): - self.state_size = state_size - self.dtype = dtype - self.sigma_min = sigma_min - self.steps_per_obs = steps_per_obs - self.num_obs = num_obs - self.num_timesteps = num_obs*steps_per_obs + 1 - self.sigmas = [ - tf.get_variable( - shape=[self.num_future_obs(t)], - dtype=self.dtype, - name="r_sigma_%d" % (t + 1), - #initializer=tf.random_uniform_initializer(seed=random_seed, maxval=100)) - initializer=tf.constant_initializer(1.0)) - for t in range(self.num_timesteps) - ] - - def first_future_obs_index(self, t): - return int(math.floor(t / self.steps_per_obs)) - - def num_future_obs(self, t): - return int(self.num_obs - self.first_future_obs_index(t)) - - def r_xn(self, z_t, t): - """Computes a distribution over the future observations given current latent - state. - - The indexing in these messages is 1 indexed and inclusive. This is - consistent with the latex documents. - - Args: - z_t: [batch_size, state_size] Tensor - t: Current timestep - """ - tf.logging.info( - "r(x_{start}:{end} | z_{t}) ~ N(z_{t}, sigma_{t})".format( - **{"t": t, - "start": (self.first_future_obs_index(t)+1)*self.steps_per_obs, - "end": self.num_timesteps-1})) - batch_size = tf.shape(z_t)[0] - # the mean for all future observations is the same. - # this tiling results in a [batch_size, num_future_obs, state_size] Tensor - r_mu = tf.tile(z_t[:,tf.newaxis,:], [1, self.num_future_obs(t), 1]) - # compute the variance - r_sigma = tf.maximum(tf.nn.softplus(self.sigmas[t]), self.sigma_min) - # the variance is the same across all state dimensions, so we only have to - # time sigma to be [batch_size, num_future_obs]. - r_sigma = tf.tile(r_sigma[tf.newaxis,:, tf.newaxis], [batch_size, 1, self.state_size]) - return tf.contrib.distributions.Normal( - loc=r_mu, scale=tf.sqrt(r_sigma)) - - def summarize_weights(self): - pass - - -class LongChainModel(object): - - def __init__(self, - p, - q, - r, - state_size, - num_obs, - steps_per_obs, - dtype=tf.float32, - disable_r=False): - self.p = p - self.q = q - self.r = r - self.disable_r = disable_r - self.state_size = state_size - self.num_obs = num_obs - self.steps_per_obs = steps_per_obs - self.num_timesteps = steps_per_obs*num_obs + 1 - self.dtype = dtype - - def zero_state(self, batch_size): - return tf.zeros([batch_size, self.state_size], dtype=self.dtype) - - def next_obs_ind(self, t): - return int(math.floor(max(t-1,0)/self.steps_per_obs)) - - def __call__(self, prev_state, observations, t): - """Computes the importance weight for the model system. - - Args: - prev_state: [batch_size, state_size] Tensor - observations: [batch_size, num_observations, state_size] Tensor - """ - # Compute the q distribution over z, q(z_t|z_n, z_{t-1}). - q_zt = self.q.q_zt(observations, prev_state, t) - # Compute the p distribution over z, p(z_t|z_{t-1}). - p_zt = self.p.p_zt(prev_state, t) - # sample from q and evaluate the logprobs, summing over the state size - zt = q_zt.sample() - log_q_zt = tf.reduce_sum(q_zt.log_prob(zt), axis=1) - log_p_zt = tf.reduce_sum(p_zt.log_prob(zt), axis=1) - if not self.disable_r and t < self.num_timesteps-1: - # score the remaining observations using r - r_xn = self.r.r_xn(zt, t) - log_r_xn = r_xn.log_prob(observations[:, self.next_obs_ind(t+1):, :]) - # sum over state size and observation, leaving the batch index - log_r_xn = tf.reduce_sum(log_r_xn, axis=[1,2]) - else: - log_r_xn = tf.zeros_like(log_p_zt) - if t != 0 and t % self.steps_per_obs == 0: - generative_dist = self.p.generative(zt, t) - log_p_x_given_z = generative_dist.log_prob(observations[:,self.next_obs_ind(t),:]) - log_p_x_given_z = tf.reduce_sum(log_p_x_given_z, axis=1) - else: - log_p_x_given_z = tf.zeros_like(log_q_zt) - return (zt, log_q_zt, log_p_zt, log_p_x_given_z, log_r_xn) - - @staticmethod - def create(state_size, - num_obs, - steps_per_obs, - sigma_min=1e-5, - variance=1.0, - observation_variance=1.0, - observation_type=STANDARD_OBSERVATION, - transition_type=STANDARD_TRANSITION, - dtype=tf.float32, - random_seed=None, - disable_r=False): - p = LongChainP( - state_size, - num_obs, - steps_per_obs, - sigma_min=sigma_min, - variance=variance, - observation_variance=observation_variance, - observation_type=observation_type, - transition_type=transition_type, - dtype=dtype, - random_seed=random_seed) - q = LongChainQ( - state_size, - num_obs, - steps_per_obs, - sigma_min=sigma_min, - dtype=dtype, - random_seed=random_seed) - r = LongChainR( - state_size, - num_obs, - steps_per_obs, - sigma_min=sigma_min, - dtype=dtype, - random_seed=random_seed) - model = LongChainModel( - p, q, r, state_size, num_obs, steps_per_obs, - dtype=dtype, - disable_r=disable_r) - return model - - -class RTilde(object): - - def __init__(self, - state_size, - num_timesteps, - sigma_min=1e-5, - dtype=tf.float32, - random_seed=None, - graph_collection_name="R_TILDE_VARS"): - self.dtype = dtype - self.sigma_min = sigma_min - initializers = {"w": tf.truncated_normal_initializer(seed=random_seed), - "b": tf.zeros_initializer} - self.graph_collection_name=graph_collection_name - - def custom_getter(getter, *args, **kwargs): - out = getter(*args, **kwargs) - ref = tf.get_collection_ref(self.graph_collection_name) - if out not in ref: - ref.append(out) - return out - - self.fns = [ - snt.Linear(output_size=2*state_size, - initializers=initializers, - name="r_tilde_%d" % t, - custom_getter=custom_getter) - for t in xrange(num_timesteps) - ] - - def r_zt(self, z_t, observation, t): - #out = self.fns[t](tf.stop_gradient(tf.concat([z_t, observation], axis=1))) - out = self.fns[t](tf.concat([z_t, observation], axis=1)) - mu, raw_sigma_sq = tf.split(out, 2, axis=1) - sigma_sq = tf.maximum(tf.nn.softplus(raw_sigma_sq), self.sigma_min) - return mu, sigma_sq - -class TDModel(object): - - def __init__(self, - p, - q, - r_tilde, - state_size, - num_timesteps, - dtype=tf.float32, - disable_r=False): - self.p = p - self.q = q - self.r_tilde = r_tilde - self.disable_r = disable_r - self.state_size = state_size - self.num_timesteps = num_timesteps - self.dtype = dtype - - def zero_state(self, batch_size): - return tf.zeros([batch_size, self.state_size], dtype=self.dtype) - - def __call__(self, prev_state, observation, t): - """Computes the importance weight for the model system. - - Args: - prev_state: [batch_size, state_size] Tensor - observations: [batch_size, num_observations, state_size] Tensor - """ - # Compute the q distribution over z, q(z_t|z_n, z_{t-1}). - q_zt = self.q.q_zt(observation, prev_state, t) - # Compute the p distribution over z, p(z_t|z_{t-1}). - p_zt = self.p.p_zt(prev_state, t) - # sample from q and evaluate the logprobs, summing over the state size - zt = q_zt.sample() - # If it isn't the last timestep, compute the distribution over the next z. - if t < self.num_timesteps - 1: - p_ztplus1 = self.p.p_zt(zt, t+1) - else: - p_ztplus1 = None - log_q_zt = tf.reduce_sum(q_zt.log_prob(zt), axis=1) - log_p_zt = tf.reduce_sum(p_zt.log_prob(zt), axis=1) - - if not self.disable_r and t < self.num_timesteps-1: - # score the remaining observations using r - r_tilde_mu, r_tilde_sigma_sq = self.r_tilde.r_zt(zt, observation, t+1) - else: - r_tilde_mu = None - r_tilde_sigma_sq = None - if t == self.num_timesteps - 1: - generative_dist = self.p.generative(observation, zt) - log_p_x_given_z = tf.reduce_sum(generative_dist.log_prob(observation), axis=1) - else: - log_p_x_given_z = tf.zeros_like(log_q_zt) - return (zt, log_q_zt, log_p_zt, log_p_x_given_z, - r_tilde_mu, r_tilde_sigma_sq, p_ztplus1) - - @staticmethod - def create(state_size, - num_timesteps, - sigma_min=1e-5, - variance=1.0, - dtype=tf.float32, - random_seed=None, - train_p=True, - p_type="unimodal", - q_type="normal", - mixing_coeff=0.5, - prior_mode_mean=1.0, - observation_variance=1.0, - transition_type=STANDARD_TRANSITION, - use_bs=True): - if p_type == "unimodal": - p = P(state_size, - num_timesteps, - sigma_min=sigma_min, - variance=variance, - dtype=dtype, - random_seed=random_seed, - trainable=train_p, - init_bs_to_zero=not use_bs) - elif p_type == "bimodal": - p = BimodalPriorP( - state_size, - num_timesteps, - mixing_coeff=mixing_coeff, - prior_mode_mean=prior_mode_mean, - sigma_min=sigma_min, - variance=variance, - dtype=dtype, - random_seed=random_seed, - trainable=train_p, - init_bs_to_zero=not use_bs) - elif "nonlinear" in p_type: - if "cauchy" in p_type: - trans_dist = tf.contrib.distributions.Cauchy - else: - trans_dist = tf.contrib.distributions.Normal - - p = ShortChainNonlinearP( - state_size, - num_timesteps, - sigma_min=sigma_min, - variance=variance, - observation_variance=observation_variance, - transition_type=transition_type, - transition_dist=trans_dist, - dtype=dtype, - random_seed=random_seed - ) - - if q_type == "normal": - q_class = Q - elif q_type == "simple_mean": - q_class = SimpleMeanQ - elif q_type == "prev_state": - q_class = PreviousStateQ - elif q_type == "observation": - q_class = ObservationQ - - q = q_class(state_size, - num_timesteps, - sigma_min=sigma_min, - dtype=dtype, - random_seed=random_seed, - init_mu0_to_zero=not use_bs) - r_tilde = RTilde( - state_size, - num_timesteps, - sigma_min=sigma_min, - dtype=dtype, - random_seed=random_seed) - model = TDModel(p, q, r_tilde, state_size, num_timesteps, dtype=dtype) - return model diff --git a/research/fivo/experimental/run.sh b/research/fivo/experimental/run.sh deleted file mode 100644 index c650f636d..000000000 --- a/research/fivo/experimental/run.sh +++ /dev/null @@ -1,54 +0,0 @@ -#!/bin/bash -# Copyright 2018 The TensorFlow Authors All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - - -model="forward" -T=5 -num_obs=1 -var=0.1 -n=4 -lr=0.0001 -bound="fivo-aux" -q_type="normal" -resampling_method="multinomial" -rgrad="true" -p_type="unimodal" -use_bs=false - -LOGDIR=/tmp/fivo/model-$model-$bound-$resampling_method-resampling-rgrad-$rgrad-T-$T-var-$var-n-$n-lr-$lr-q-$q_type-p-$p_type - -python train.py \ - --logdir=$LOGDIR \ - --model=$model \ - --bound=$bound \ - --q_type=$q_type \ - --p_type=$p_type \ - --variance=$var \ - --use_resampling_grads=$rgrad \ - --resampling=always \ - --resampling_method=$resampling_method \ - --batch_size=4 \ - --num_samples=$n \ - --num_timesteps=$T \ - --num_eval_samples=256 \ - --summarize_every=100 \ - --learning_rate=$lr \ - --decay_steps=1000000 \ - --max_steps=1000000000 \ - --random_seed=1234 \ - --train_p=false \ - --use_bs=$use_bs \ - --alsologtostderr diff --git a/research/fivo/experimental/summary_utils.py b/research/fivo/experimental/summary_utils.py deleted file mode 100644 index 04e4aeea2..000000000 --- a/research/fivo/experimental/summary_utils.py +++ /dev/null @@ -1,332 +0,0 @@ -# Copyright 2018 The TensorFlow Authors All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -"""Utils for plotting and summarizing. -""" -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import matplotlib.gridspec as gridspec -import matplotlib.pyplot as plt -import numpy as np -import scipy - -import tensorflow as tf - -import models - - -def summarize_ess(weights, only_last_timestep=False): - """Plots the effective sample size. - - Args: - weights: List of length num_timesteps Tensors of shape - [num_samples, batch_size] - """ - num_timesteps = len(weights) - batch_size = tf.cast(tf.shape(weights[0])[1], dtype=tf.float64) - for i in range(num_timesteps): - if only_last_timestep and i < num_timesteps-1: continue - - w = tf.nn.softmax(weights[i], dim=0) - centered_weights = w - tf.reduce_mean(w, axis=0, keepdims=True) - variance = tf.reduce_sum(tf.square(centered_weights))/(batch_size-1) - ess = 1./tf.reduce_mean(tf.reduce_sum(tf.square(w), axis=0)) - tf.summary.scalar("ess/%d" % i, ess) - tf.summary.scalar("ese/%d" % i, ess / batch_size) - tf.summary.scalar("weight_variance/%d" % i, variance) - - -def summarize_particles(states, weights, observation, model): - """Plots particle locations and weights. - - Args: - states: List of length num_timesteps Tensors of shape - [batch_size*num_particles, state_size]. - weights: List of length num_timesteps Tensors of shape [num_samples, - batch_size] - observation: Tensor of shape [batch_size*num_samples, state_size] - """ - num_timesteps = len(weights) - num_samples, batch_size = weights[0].get_shape().as_list() - # get q0 information for plotting - q0_dist = model.q.q_zt(observation, tf.zeros_like(states[0]), 0) - q0_loc = q0_dist.loc[0:batch_size, 0] - q0_scale = q0_dist.scale[0:batch_size, 0] - # get posterior information for plotting - post = (model.p.mixing_coeff, model.p.prior_mode_mean, model.p.variance, - tf.reduce_sum(model.p.bs), model.p.num_timesteps) - - # Reshape states and weights to be [time, num_samples, batch_size] - states = tf.stack(states) - weights = tf.stack(weights) - # normalize the weights over the sample dimension - weights = tf.nn.softmax(weights, dim=1) - states = tf.reshape(states, tf.shape(weights)) - - ess = 1./tf.reduce_sum(tf.square(weights), axis=1) - - def _plot_states(states_batch, weights_batch, observation_batch, ess_batch, q0, post): - """ - states: [time, num_samples, batch_size] - weights [time, num_samples, batch_size] - observation: [batch_size, 1] - q0: ([batch_size], [batch_size]) - post: ... - """ - num_timesteps, _, batch_size = states_batch.shape - plots = [] - for i in range(batch_size): - states = states_batch[:,:,i] - weights = weights_batch[:,:,i] - observation = observation_batch[i] - ess = ess_batch[:,i] - q0_loc = q0[0][i] - q0_scale = q0[1][i] - - fig = plt.figure(figsize=(7, (num_timesteps + 1) * 2)) - # Each timestep gets two plots -- a bar plot and a histogram of state locs. - # The bar plot will be bar_rows rows tall. - # The histogram will be 1 row tall. - # There is also 1 extra plot at the top showing the posterior and q. - bar_rows = 8 - num_rows = (num_timesteps + 1) * (bar_rows + 1) - gs = gridspec.GridSpec(num_rows, 1) - - # Figure out how wide to make the plot - prior_lims = (post[1] * -2, post[1] * 2) - q_lims = (scipy.stats.norm.ppf(0.01, loc=q0_loc, scale=q0_scale), - scipy.stats.norm.ppf(0.99, loc=q0_loc, scale=q0_scale)) - state_width = states.max() - states.min() - state_lims = (states.min() - state_width * 0.15, - states.max() + state_width * 0.15) - - lims = (min(prior_lims[0], q_lims[0], state_lims[0]), - max(prior_lims[1], q_lims[1], state_lims[1])) - # plot the posterior - z0 = np.arange(lims[0], lims[1], 0.1) - alpha, pos_mu, sigma_sq, B, T = post - neg_mu = -pos_mu - scale = np.sqrt((T + 1) * sigma_sq) - p_zn = ( - alpha * scipy.stats.norm.pdf( - observation, loc=pos_mu + B, scale=scale) + (1 - alpha) * - scipy.stats.norm.pdf(observation, loc=neg_mu + B, scale=scale)) - p_z0 = ( - alpha * scipy.stats.norm.pdf(z0, loc=pos_mu, scale=np.sqrt(sigma_sq)) - + (1 - alpha) * scipy.stats.norm.pdf( - z0, loc=neg_mu, scale=np.sqrt(sigma_sq))) - p_zn_given_z0 = scipy.stats.norm.pdf( - observation, loc=z0 + B, scale=np.sqrt(T * sigma_sq)) - post_z0 = (p_z0 * p_zn_given_z0) / p_zn - # plot q - q_z0 = scipy.stats.norm.pdf(z0, loc=q0_loc, scale=q0_scale) - ax = plt.subplot(gs[0:bar_rows, :]) - ax.plot(z0, q_z0, color="blue") - ax.plot(z0, post_z0, color="green") - ax.plot(z0, p_z0, color="red") - ax.legend(("q", "posterior", "prior"), loc="best", prop={"size": 10}) - - ax.set_xticks([]) - ax.set_xlim(*lims) - - # plot the states - for t in range(num_timesteps): - start = (t + 1) * (bar_rows + 1) - ax1 = plt.subplot(gs[start:start + bar_rows, :]) - ax2 = plt.subplot(gs[start + bar_rows:start + bar_rows + 1, :]) - # plot the states barplot - # ax1.hist( - # states[t, :], - # weights=weights[t, :], - # bins=50, - # edgecolor="none", - # alpha=0.2) - ax1.bar(states[t,:], weights[t,:], width=0.02, alpha=0.2, edgecolor = "none") - ax1.set_ylabel("t=%d" % t) - ax1.set_xticks([]) - ax1.grid(True, which="both") - ax1.set_xlim(*lims) - # plot the observation - ax1.axvline(x=observation, color="red", linestyle="dashed") - # add the ESS - ax1.text(0.1, 0.9, "ESS: %0.2f" % ess[t], - ha='center', va='center', transform=ax1.transAxes) - - # plot the state location histogram - ax2.hist2d( - states[t, :], np.zeros_like(states[t, :]), bins=[50, 1], cmap="Greys") - ax2.grid(False) - ax2.set_yticks([]) - ax2.set_xlim(*lims) - if t != num_timesteps - 1: - ax2.set_xticks([]) - - fig.canvas.draw() - p = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8, sep="") - plots.append(p.reshape(fig.canvas.get_width_height()[::-1] + (3,))) - plt.close(fig) - return np.stack(plots) - - plots = tf.py_func(_plot_states, - [states, weights, observation, ess, (q0_loc, q0_scale), post], - [tf.uint8])[0] - tf.summary.image("states", plots, 5, collections=["infrequent_summaries"]) - - -def plot_weights(weights, resampled=None): - """Plots the weights and effective sample size from an SMC rollout. - - Args: - weights: [num_timesteps, num_samples, batch_size] importance weights - resampled: [num_timesteps] 0/1 indicating if resampling ocurred - """ - weights = tf.convert_to_tensor(weights) - - def _make_plots(weights, resampled): - num_timesteps, num_samples, batch_size = weights.shape - plots = [] - for i in range(batch_size): - fig, axes = plt.subplots(nrows=1, sharex=True, figsize=(8, 4)) - axes.stackplot(np.arange(num_timesteps), np.transpose(weights[:, :, i])) - axes.set_title("Weights") - axes.set_xlabel("Steps") - axes.set_ylim([0, 1]) - axes.set_xlim([0, num_timesteps - 1]) - for j in np.where(resampled > 0)[0]: - axes.axvline(x=j, color="red", linestyle="dashed", ymin=0.0, ymax=1.0) - fig.canvas.draw() - data = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8, sep="") - data = data.reshape(fig.canvas.get_width_height()[::-1] + (3,)) - plots.append(data) - plt.close(fig) - return np.stack(plots, axis=0) - - if resampled is None: - num_timesteps, _, batch_size = weights.get_shape().as_list() - resampled = tf.zeros([num_timesteps], dtype=tf.float32) - plots = tf.py_func(_make_plots, - [tf.nn.softmax(weights, dim=1), - tf.to_float(resampled)], [tf.uint8])[0] - batch_size = weights.get_shape().as_list()[-1] - tf.summary.image( - "weights", plots, batch_size, collections=["infrequent_summaries"]) - - -def summarize_weights(weights, num_timesteps, num_samples): - # weights is [num_timesteps, num_samples, batch_size] - weights = tf.convert_to_tensor(weights) - mean = tf.reduce_mean(weights, axis=1, keepdims=True) - squared_diff = tf.square(weights - mean) - variances = tf.reduce_sum(squared_diff, axis=1) / (num_samples - 1) - # average the variance over the batch - variances = tf.reduce_mean(variances, axis=1) - avg_magnitude = tf.reduce_mean(tf.abs(weights), axis=[1, 2]) - for t in xrange(num_timesteps): - tf.summary.scalar("weights/variance_%d" % t, variances[t]) - tf.summary.scalar("weights/magnitude_%d" % t, avg_magnitude[t]) - tf.summary.histogram("weights/step_%d" % t, weights[t]) - - -def summarize_learning_signal(rewards, tag): - num_resampling_events, _ = rewards.get_shape().as_list() - mean = tf.reduce_mean(rewards, axis=1) - avg_magnitude = tf.reduce_mean(tf.abs(rewards), axis=1) - reward_square = tf.reduce_mean(tf.square(rewards), axis=1) - for t in xrange(num_resampling_events): - tf.summary.scalar("%s/mean_%d" % (tag, t), mean[t]) - tf.summary.scalar("%s/magnitude_%d" % (tag, t), avg_magnitude[t]) - tf.summary.scalar("%s/squared_%d" % (tag, t), reward_square[t]) - tf.summary.histogram("%s/step_%d" % (tag, t), rewards[t]) - - -def summarize_qs(model, observation, states): - model.q.summarize_weights() - if hasattr(model.p, "posterior") and callable(getattr(model.p, "posterior")): - states = [tf.zeros_like(states[0])] + states[:-1] - for t, prev_state in enumerate(states): - p = model.p.posterior(observation, prev_state, t) - q = model.q.q_zt(observation, prev_state, t) - kl = tf.reduce_mean(tf.contrib.distributions.kl_divergence(p, q)) - tf.summary.scalar("kl_q/%d" % t, tf.reduce_mean(kl)) - mean_diff = q.loc - p.loc - mean_abs_err = tf.abs(mean_diff) - mean_rel_err = tf.abs(mean_diff / p.loc) - tf.summary.scalar("q_mean_convergence/absolute_error_%d" % t, - tf.reduce_mean(mean_abs_err)) - tf.summary.scalar("q_mean_convergence/relative_error_%d" % t, - tf.reduce_mean(mean_rel_err)) - sigma_diff = tf.square(q.scale) - tf.square(p.scale) - sigma_abs_err = tf.abs(sigma_diff) - sigma_rel_err = tf.abs(sigma_diff / tf.square(p.scale)) - tf.summary.scalar("q_variance_convergence/absolute_error_%d" % t, - tf.reduce_mean(sigma_abs_err)) - tf.summary.scalar("q_variance_convergence/relative_error_%d" % t, - tf.reduce_mean(sigma_rel_err)) - - -def summarize_rs(model, states): - model.r.summarize_weights() - for t, state in enumerate(states): - true_r = model.p.lookahead(state, t) - r = model.r.r_xn(state, t) - kl = tf.reduce_mean(tf.contrib.distributions.kl_divergence(true_r, r)) - tf.summary.scalar("kl_r/%d" % t, tf.reduce_mean(kl)) - mean_diff = true_r.loc - r.loc - mean_abs_err = tf.abs(mean_diff) - mean_rel_err = tf.abs(mean_diff / true_r.loc) - tf.summary.scalar("r_mean_convergence/absolute_error_%d" % t, - tf.reduce_mean(mean_abs_err)) - tf.summary.scalar("r_mean_convergence/relative_error_%d" % t, - tf.reduce_mean(mean_rel_err)) - sigma_diff = tf.square(r.scale) - tf.square(true_r.scale) - sigma_abs_err = tf.abs(sigma_diff) - sigma_rel_err = tf.abs(sigma_diff / tf.square(true_r.scale)) - tf.summary.scalar("r_variance_convergence/absolute_error_%d" % t, - tf.reduce_mean(sigma_abs_err)) - tf.summary.scalar("r_variance_convergence/relative_error_%d" % t, - tf.reduce_mean(sigma_rel_err)) - - -def summarize_model(model, true_bs, observation, states, bound, summarize_r=True): - if hasattr(model.p, "bs"): - model_b = tf.reduce_sum(model.p.bs, axis=0) - true_b = tf.reduce_sum(true_bs, axis=0) - abs_err = tf.abs(model_b - true_b) - rel_err = abs_err / true_b - tf.summary.scalar("sum_of_bs/data_generating_process", tf.reduce_mean(true_b)) - tf.summary.scalar("sum_of_bs/model", tf.reduce_mean(model_b)) - tf.summary.scalar("sum_of_bs/absolute_error", tf.reduce_mean(abs_err)) - tf.summary.scalar("sum_of_bs/relative_error", tf.reduce_mean(rel_err)) - #summarize_qs(model, observation, states) - #if bound == "fivo-aux" and summarize_r: - # summarize_rs(model, states) - - -def summarize_grads(grads, loss_name): - grad_ema = tf.train.ExponentialMovingAverage(decay=0.99) - vectorized_grads = tf.concat( - [tf.reshape(g, [-1]) for g, _ in grads if g is not None], axis=0) - new_second_moments = tf.square(vectorized_grads) - new_first_moments = vectorized_grads - maintain_grad_ema_op = grad_ema.apply([new_first_moments, new_second_moments]) - first_moments = grad_ema.average(new_first_moments) - second_moments = grad_ema.average(new_second_moments) - variances = second_moments - tf.square(first_moments) - tf.summary.scalar("grad_variance/%s" % loss_name, tf.reduce_mean(variances)) - tf.summary.histogram("grad_variance/%s" % loss_name, variances) - tf.summary.histogram("grad_mean/%s" % loss_name, first_moments) - return maintain_grad_ema_op diff --git a/research/fivo/experimental/train.py b/research/fivo/experimental/train.py deleted file mode 100644 index 8abc9909b..000000000 --- a/research/fivo/experimental/train.py +++ /dev/null @@ -1,637 +0,0 @@ -# Copyright 2018 The TensorFlow Authors All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -"""Main script for running fivo""" - -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -from collections import defaultdict - -import numpy as np -import tensorflow as tf - -import bounds -import data -import models -import summary_utils as summ - -tf.logging.set_verbosity(tf.logging.INFO) - -tf.app.flags.DEFINE_integer("random_seed", None, - "A random seed for the data generating process. Same seed " - "-> same data generating process and initialization.") -tf.app.flags.DEFINE_enum("bound", "fivo", ["iwae", "fivo", "fivo-aux", "fivo-aux-td"], - "The bound to optimize.") -tf.app.flags.DEFINE_enum("model", "forward", ["forward", "long_chain"], - "The model to use.") -tf.app.flags.DEFINE_enum("q_type", "normal", - ["normal", "simple_mean", "prev_state", "observation"], - "The parameterization to use for q") -tf.app.flags.DEFINE_enum("p_type", "unimodal", ["unimodal", "bimodal", "nonlinear"], - "The type of prior.") -tf.app.flags.DEFINE_boolean("train_p", True, - "If false, do not train the model p.") - -tf.app.flags.DEFINE_integer("state_size", 1, - "The dimensionality of the state space.") -tf.app.flags.DEFINE_float("variance", 1.0, - "The variance of the data generating process.") - -tf.app.flags.DEFINE_boolean("use_bs", True, - "If False, initialize all bs to 0.") -tf.app.flags.DEFINE_float("bimodal_prior_weight", 0.5, - "The weight assigned to the positive mode of the prior in " - "both the data generating process and p.") -tf.app.flags.DEFINE_float("bimodal_prior_mean", None, - "If supplied, sets the mean of the 2 modes of the prior to " - "be 1 and -1 times the supplied value. This is for both the " - "data generating process and p.") -tf.app.flags.DEFINE_float("fixed_observation", None, - "If supplied, fix the observation to a constant value in the" - " data generating process only.") -tf.app.flags.DEFINE_float("r_sigma_init", 1., - "Value to initialize variance of r to.") -tf.app.flags.DEFINE_enum("observation_type", - models.STANDARD_OBSERVATION, models.OBSERVATION_TYPES, - "The type of observation for the long chain model.") -tf.app.flags.DEFINE_enum("transition_type", - models.STANDARD_TRANSITION, models.TRANSITION_TYPES, - "The type of transition for the long chain model.") -tf.app.flags.DEFINE_float("observation_variance", None, - "The variance of the observation. Defaults to 'variance'") - -tf.app.flags.DEFINE_integer("num_timesteps", 5, - "Number of timesteps in the sequence.") -tf.app.flags.DEFINE_integer("num_observations", 1, - "The number of observations.") -tf.app.flags.DEFINE_integer("steps_per_observation", 5, - "The number of timesteps between each observation.") - -tf.app.flags.DEFINE_integer("batch_size", 4, - "The number of examples per batch.") -tf.app.flags.DEFINE_integer("num_samples", 4, - "The number particles to use.") -tf.app.flags.DEFINE_integer("num_eval_samples", 512, - "The batch size and # of particles to use for eval.") - -tf.app.flags.DEFINE_string("resampling", "always", - "How to resample. Accepts 'always','never', or a " - "comma-separated list of booleans like 'true,true,false'.") -tf.app.flags.DEFINE_enum("resampling_method", "multinomial", ["multinomial", - "stratified", - "systematic", - "relaxed-logblend", - "relaxed-stateblend", - "relaxed-linearblend", - "relaxed-stateblend-st",], - "Type of resampling method to use.") -tf.app.flags.DEFINE_boolean("use_resampling_grads", True, - "Whether or not to use resampling grads to optimize FIVO." - "Disabled automatically if resampling_method=relaxed.") -tf.app.flags.DEFINE_boolean("disable_r", False, - "If false, r is not used for fivo-aux and is set to zeros.") - -tf.app.flags.DEFINE_float("learning_rate", 1e-4, - "The learning rate to use for ADAM or SGD.") -tf.app.flags.DEFINE_integer("decay_steps", 25000, - "The number of steps before the learning rate is halved.") -tf.app.flags.DEFINE_integer("max_steps", int(1e6), - "The number of steps to run training for.") - -tf.app.flags.DEFINE_string("logdir", "/tmp/fivo-aux", - "Directory for summaries and checkpoints.") - -tf.app.flags.DEFINE_integer("summarize_every", int(1e3), - "The number of steps between each evaluation.") -FLAGS = tf.app.flags.FLAGS - - -def combine_grad_lists(grad_lists): - # grads is num_losses by num_variables. - # each list could have different variables. - # for each variable, sum the grads across all losses. - grads_dict = defaultdict(list) - var_dict = {} - for grad_list in grad_lists: - for grad, var in grad_list: - if grad is not None: - grads_dict[var.name].append(grad) - var_dict[var.name] = var - - final_grads = [] - for var_name, var in var_dict.iteritems(): - grads = grads_dict[var_name] - if len(grads) > 0: - tf.logging.info("Var %s has combined grads from %s." % - (var_name, [g.name for g in grads])) - grad = tf.reduce_sum(grads, axis=0) - else: - tf.logging.info("Var %s has no grads" % var_name) - grad = None - final_grads.append((grad, var)) - return final_grads - - -def make_apply_grads_op(losses, global_step, learning_rate, lr_decay_steps): - for l in losses: - assert isinstance(l, bounds.Loss) - - lr = tf.train.exponential_decay( - learning_rate, global_step, lr_decay_steps, 0.5, staircase=False) - tf.summary.scalar("learning_rate", lr) - opt = tf.train.AdamOptimizer(lr) - - ema_ops = [] - grads = [] - for loss_name, loss, loss_var_collection in losses: - tf.logging.info("Computing grads of %s w.r.t. vars in collection %s" % - (loss_name, loss_var_collection)) - g = opt.compute_gradients(loss, - var_list=tf.get_collection(loss_var_collection)) - ema_ops.append(summ.summarize_grads(g, loss_name)) - grads.append(g) - - all_grads = combine_grad_lists(grads) - apply_grads_op = opt.apply_gradients(all_grads, global_step=global_step) - - # Update the emas after applying the grads. - with tf.control_dependencies([apply_grads_op]): - train_op = tf.group(*ema_ops) - return train_op - - -def add_check_numerics_ops(): - check_op = [] - for op in tf.get_default_graph().get_operations(): - bad = ["logits/Log", "sample/Reshape", "log_prob/mul", - "log_prob/SparseSoftmaxCrossEntropyWithLogits/Reshape", - "entropy/Reshape", "entropy/LogSoftmax", "Categorical", "Mean"] - if all([x not in op.name for x in bad]): - for output in op.outputs: - if output.dtype in [tf.float16, tf.float32, tf.float64]: - if op._get_control_flow_context() is not None: # pylint: disable=protected-access - raise ValueError("`tf.add_check_numerics_ops() is not compatible " - "with TensorFlow control flow operations such as " - "`tf.cond()` or `tf.while_loop()`.") - - message = op.name + ":" + str(output.value_index) - with tf.control_dependencies(check_op): - check_op = [tf.check_numerics(output, message=message)] - return tf.group(*check_op) - - -def create_long_chain_graph(bound, state_size, num_obs, steps_per_obs, - batch_size, num_samples, num_eval_samples, - resampling_schedule, use_resampling_grads, - learning_rate, lr_decay_steps, dtype="float64"): - num_timesteps = num_obs * steps_per_obs + 1 - # Make the dataset. - dataset = data.make_long_chain_dataset( - state_size=state_size, - num_obs=num_obs, - steps_per_obs=steps_per_obs, - batch_size=batch_size, - num_samples=num_samples, - variance=FLAGS.variance, - observation_variance=FLAGS.observation_variance, - dtype=dtype, - observation_type=FLAGS.observation_type, - transition_type=FLAGS.transition_type, - fixed_observation=FLAGS.fixed_observation) - itr = dataset.make_one_shot_iterator() - _, observations = itr.get_next() - # Make the dataset for eval - eval_dataset = data.make_long_chain_dataset( - state_size=state_size, - num_obs=num_obs, - steps_per_obs=steps_per_obs, - batch_size=batch_size, - num_samples=num_eval_samples, - variance=FLAGS.variance, - observation_variance=FLAGS.observation_variance, - dtype=dtype, - observation_type=FLAGS.observation_type, - transition_type=FLAGS.transition_type, - fixed_observation=FLAGS.fixed_observation) - eval_itr = eval_dataset.make_one_shot_iterator() - _, eval_observations = eval_itr.get_next() - - # Make the model. - model = models.LongChainModel.create( - state_size, - num_obs, - steps_per_obs, - observation_type=FLAGS.observation_type, - transition_type=FLAGS.transition_type, - variance=FLAGS.variance, - observation_variance=FLAGS.observation_variance, - dtype=tf.as_dtype(dtype), - disable_r=FLAGS.disable_r) - - # Compute the bound and loss - if bound == "iwae": - (_, losses, ema_op, _, _) = bounds.iwae( - model, - observations, - num_timesteps, - num_samples=num_samples) - (eval_log_p_hat, _, _, _, eval_log_weights) = bounds.iwae( - model, - eval_observations, - num_timesteps, - num_samples=num_eval_samples, - summarize=False) - eval_log_p_hat = tf.reduce_mean(eval_log_p_hat) - elif bound == "fivo" or "fivo-aux": - (_, losses, ema_op, _, _) = bounds.fivo( - model, - observations, - num_timesteps, - resampling_schedule=resampling_schedule, - use_resampling_grads=use_resampling_grads, - resampling_type=FLAGS.resampling_method, - aux=("aux" in bound), - num_samples=num_samples) - (eval_log_p_hat, _, _, _, eval_log_weights) = bounds.fivo( - model, - eval_observations, - num_timesteps, - resampling_schedule=resampling_schedule, - use_resampling_grads=False, - resampling_type="multinomial", - aux=("aux" in bound), - num_samples=num_eval_samples, - summarize=False) - eval_log_p_hat = tf.reduce_mean(eval_log_p_hat) - - summ.summarize_ess(eval_log_weights, only_last_timestep=True) - - tf.summary.scalar("log_p_hat", eval_log_p_hat) - - # Compute and apply grads. - global_step = tf.train.get_or_create_global_step() - - apply_grads = make_apply_grads_op(losses, - global_step, - learning_rate, - lr_decay_steps) - - # Update the emas after applying the grads. - with tf.control_dependencies([apply_grads]): - train_op = tf.group(ema_op) - - # We can't calculate the likelihood for most of these models - # so we just return zeros. - eval_likelihood = tf.zeros([], dtype=dtype) - return global_step, train_op, eval_log_p_hat, eval_likelihood - - -def create_graph(bound, state_size, num_timesteps, batch_size, - num_samples, num_eval_samples, resampling_schedule, - use_resampling_grads, learning_rate, lr_decay_steps, - train_p, dtype='float64'): - if FLAGS.use_bs: - true_bs = None - else: - true_bs = [np.zeros([state_size]).astype(dtype) for _ in xrange(num_timesteps)] - - # Make the dataset. - true_bs, dataset = data.make_dataset( - bs=true_bs, - state_size=state_size, - num_timesteps=num_timesteps, - batch_size=batch_size, - num_samples=num_samples, - variance=FLAGS.variance, - prior_type=FLAGS.p_type, - bimodal_prior_weight=FLAGS.bimodal_prior_weight, - bimodal_prior_mean=FLAGS.bimodal_prior_mean, - transition_type=FLAGS.transition_type, - fixed_observation=FLAGS.fixed_observation, - dtype=dtype) - itr = dataset.make_one_shot_iterator() - _, observations = itr.get_next() - # Make the dataset for eval - _, eval_dataset = data.make_dataset( - bs=true_bs, - state_size=state_size, - num_timesteps=num_timesteps, - batch_size=num_eval_samples, - num_samples=num_eval_samples, - variance=FLAGS.variance, - prior_type=FLAGS.p_type, - bimodal_prior_weight=FLAGS.bimodal_prior_weight, - bimodal_prior_mean=FLAGS.bimodal_prior_mean, - transition_type=FLAGS.transition_type, - fixed_observation=FLAGS.fixed_observation, - dtype=dtype) - eval_itr = eval_dataset.make_one_shot_iterator() - _, eval_observations = eval_itr.get_next() - - # Make the model. - if bound == "fivo-aux-td": - model = models.TDModel.create( - state_size, - num_timesteps, - variance=FLAGS.variance, - train_p=train_p, - p_type=FLAGS.p_type, - q_type=FLAGS.q_type, - mixing_coeff=FLAGS.bimodal_prior_weight, - prior_mode_mean=FLAGS.bimodal_prior_mean, - observation_variance=FLAGS.observation_variance, - transition_type=FLAGS.transition_type, - use_bs=FLAGS.use_bs, - dtype=tf.as_dtype(dtype), - random_seed=FLAGS.random_seed) - else: - model = models.Model.create( - state_size, - num_timesteps, - variance=FLAGS.variance, - train_p=train_p, - p_type=FLAGS.p_type, - q_type=FLAGS.q_type, - mixing_coeff=FLAGS.bimodal_prior_weight, - prior_mode_mean=FLAGS.bimodal_prior_mean, - observation_variance=FLAGS.observation_variance, - transition_type=FLAGS.transition_type, - use_bs=FLAGS.use_bs, - r_sigma_init=FLAGS.r_sigma_init, - dtype=tf.as_dtype(dtype), - random_seed=FLAGS.random_seed) - - # Compute the bound and loss - if bound == "iwae": - (_, losses, ema_op, _, _) = bounds.iwae( - model, - observations, - num_timesteps, - num_samples=num_samples) - (eval_log_p_hat, _, _, eval_states, eval_log_weights) = bounds.iwae( - model, - eval_observations, - num_timesteps, - num_samples=num_eval_samples, - summarize=True) - - eval_log_p_hat = tf.reduce_mean(eval_log_p_hat) - - elif "fivo" in bound: - if bound == "fivo-aux-td": - (_, losses, ema_op, _, _) = bounds.fivo_aux_td( - model, - observations, - num_timesteps, - resampling_schedule=resampling_schedule, - num_samples=num_samples) - (eval_log_p_hat, _, _, eval_states, eval_log_weights) = bounds.fivo_aux_td( - model, - eval_observations, - num_timesteps, - resampling_schedule=resampling_schedule, - num_samples=num_eval_samples, - summarize=True) - else: - (_, losses, ema_op, _, _) = bounds.fivo( - model, - observations, - num_timesteps, - resampling_schedule=resampling_schedule, - use_resampling_grads=use_resampling_grads, - resampling_type=FLAGS.resampling_method, - aux=("aux" in bound), - num_samples=num_samples) - (eval_log_p_hat, _, _, eval_states, eval_log_weights) = bounds.fivo( - model, - eval_observations, - num_timesteps, - resampling_schedule=resampling_schedule, - use_resampling_grads=False, - resampling_type="multinomial", - aux=("aux" in bound), - num_samples=num_eval_samples, - summarize=True) - eval_log_p_hat = tf.reduce_mean(eval_log_p_hat) - - summ.summarize_ess(eval_log_weights, only_last_timestep=True) - - # if FLAGS.p_type == "bimodal": - # # create the observations that showcase the model. - # mode_odds_ratio = tf.convert_to_tensor([1., 3., 1./3., 512., 1./512.], - # dtype=tf.float64) - # mode_odds_ratio = tf.expand_dims(mode_odds_ratio, 1) - # k = ((num_timesteps+1) * FLAGS.variance) / (2*FLAGS.bimodal_prior_mean) - # explain_obs = tf.reduce_sum(model.p.bs) + tf.log(mode_odds_ratio) * k - # explain_obs = tf.tile(explain_obs, [num_eval_samples, 1]) - # # run the model on the explainable observations - # if bound == "iwae": - # (_, _, _, explain_states, explain_log_weights) = bounds.iwae( - # model, - # explain_obs, - # num_timesteps, - # num_samples=num_eval_samples) - # elif bound == "fivo" or "fivo-aux": - # (_, _, _, explain_states, explain_log_weights) = bounds.fivo( - # model, - # explain_obs, - # num_timesteps, - # resampling_schedule=resampling_schedule, - # use_resampling_grads=False, - # resampling_type="multinomial", - # aux=("aux" in bound), - # num_samples=num_eval_samples) - # summ.summarize_particles(explain_states, - # explain_log_weights, - # explain_obs, - # model) - - # Calculate the true likelihood. - if hasattr(model.p, 'likelihood') and callable(getattr(model.p, 'likelihood')): - eval_likelihood = model.p.likelihood(eval_observations)/ FLAGS.num_timesteps - else: - eval_likelihood = tf.zeros_like(eval_log_p_hat) - - tf.summary.scalar("log_p_hat", eval_log_p_hat) - tf.summary.scalar("likelihood", eval_likelihood) - tf.summary.scalar("bound_gap", eval_likelihood - eval_log_p_hat) - summ.summarize_model(model, true_bs, eval_observations, eval_states, bound, - summarize_r=not bound == "fivo-aux-td") - - # Compute and apply grads. - global_step = tf.train.get_or_create_global_step() - - apply_grads = make_apply_grads_op(losses, - global_step, - learning_rate, - lr_decay_steps) - - # Update the emas after applying the grads. - with tf.control_dependencies([apply_grads]): - train_op = tf.group(ema_op) - #train_op = tf.group(ema_op, add_check_numerics_ops()) - - return global_step, train_op, eval_log_p_hat, eval_likelihood - - -def parse_resampling_schedule(schedule, num_timesteps): - schedule = schedule.strip().lower() - if schedule == "always": - return [True] * (num_timesteps - 1) + [False] - elif schedule == "never": - return [False] * num_timesteps - elif "every" in schedule: - n = int(schedule.split("_")[1]) - return [(i+1) % n == 0 for i in xrange(num_timesteps)] - else: - sched = [x.strip() == "true" for x in schedule.split(",")] - assert len( - sched - ) == num_timesteps, "Wrong number of timesteps in resampling schedule." - return sched - - -def create_log_hook(step, eval_log_p_hat, eval_likelihood): - def summ_formatter(d): - return ("Step {step}, log p_hat: {log_p_hat:.5f} likelihood: {likelihood:.5f}".format(**d)) - hook = tf.train.LoggingTensorHook( - { - "step": step, - "log_p_hat": eval_log_p_hat, - "likelihood": eval_likelihood, - }, - every_n_iter=FLAGS.summarize_every, - formatter=summ_formatter) - return hook - - -def create_infrequent_summary_hook(): - infrequent_summary_hook = tf.train.SummarySaverHook( - save_steps=10000, - output_dir=FLAGS.logdir, - summary_op=tf.summary.merge_all(key="infrequent_summaries") - ) - return infrequent_summary_hook - - -def main(unused_argv): - if FLAGS.model == "long_chain": - resampling_schedule = parse_resampling_schedule(FLAGS.resampling, - FLAGS.num_timesteps + 1) - else: - resampling_schedule = parse_resampling_schedule(FLAGS.resampling, - FLAGS.num_timesteps) - if FLAGS.random_seed is None: - seed = np.random.randint(0, high=10000) - else: - seed = FLAGS.random_seed - tf.logging.info("Using random seed %d", seed) - - if FLAGS.model == "long_chain": - assert FLAGS.q_type == "normal", "Q type %s not supported for long chain models" % FLAGS.q_type - assert FLAGS.p_type == "unimodal", "Bimodal priors are not supported for long chain models" - assert not FLAGS.use_bs, "Bs are not supported with long chain models" - assert FLAGS.num_timesteps == FLAGS.num_observations * FLAGS.steps_per_observation, "Num timesteps does not match." - assert FLAGS.bound != "fivo-aux-td", "TD Training is not compatible with long chain models." - - if FLAGS.model == "forward": - if "nonlinear" not in FLAGS.p_type: - assert FLAGS.transition_type == models.STANDARD_TRANSITION, "Non-standard transitions not supported by the forward model." - assert FLAGS.observation_type == models.STANDARD_OBSERVATION, "Non-standard observations not supported by the forward model." - assert FLAGS.observation_variance is None, "Forward model does not support observation variance." - assert FLAGS.num_observations == 1, "Forward model only supports 1 observation." - - if "relaxed" in FLAGS.resampling_method: - FLAGS.use_resampling_grads = False - assert FLAGS.bound != "fivo-aux-td", "TD Training is not compatible with relaxed resampling." - - if FLAGS.observation_variance is None: - FLAGS.observation_variance = FLAGS.variance - - if FLAGS.p_type == "bimodal": - assert FLAGS.bimodal_prior_mean is not None, "Must specify prior mean if using bimodal p." - - if FLAGS.p_type == "nonlinear" or FLAGS.p_type == "nonlinear-cauchy": - assert not FLAGS.use_bs, "Using bs is not compatible with the nonlinear model." - - g = tf.Graph() - with g.as_default(): - # Set the seeds. - tf.set_random_seed(seed) - np.random.seed(seed) - if FLAGS.model == "long_chain": - (global_step, train_op, eval_log_p_hat, - eval_likelihood) = create_long_chain_graph( - FLAGS.bound, - FLAGS.state_size, - FLAGS.num_observations, - FLAGS.steps_per_observation, - FLAGS.batch_size, - FLAGS.num_samples, - FLAGS.num_eval_samples, - resampling_schedule, - FLAGS.use_resampling_grads, - FLAGS.learning_rate, - FLAGS.decay_steps) - else: - (global_step, train_op, - eval_log_p_hat, eval_likelihood) = create_graph( - FLAGS.bound, - FLAGS.state_size, - FLAGS.num_timesteps, - FLAGS.batch_size, - FLAGS.num_samples, - FLAGS.num_eval_samples, - resampling_schedule, - FLAGS.use_resampling_grads, - FLAGS.learning_rate, - FLAGS.decay_steps, - FLAGS.train_p) - - log_hooks = [create_log_hook(global_step, eval_log_p_hat, eval_likelihood)] - if len(tf.get_collection("infrequent_summaries")) > 0: - log_hooks.append(create_infrequent_summary_hook()) - - tf.logging.info("trainable variables:") - tf.logging.info([v.name for v in tf.trainable_variables()]) - tf.logging.info("p vars:") - tf.logging.info([v.name for v in tf.get_collection("P_VARS")]) - tf.logging.info("q vars:") - tf.logging.info([v.name for v in tf.get_collection("Q_VARS")]) - tf.logging.info("r vars:") - tf.logging.info([v.name for v in tf.get_collection("R_VARS")]) - tf.logging.info("r tilde vars:") - tf.logging.info([v.name for v in tf.get_collection("R_TILDE_VARS")]) - - with tf.train.MonitoredTrainingSession( - master="", - is_chief=True, - hooks=log_hooks, - checkpoint_dir=FLAGS.logdir, - save_checkpoint_secs=120, - save_summaries_steps=FLAGS.summarize_every, - log_step_count_steps=FLAGS.summarize_every) as sess: - cur_step = -1 - while True: - if sess.should_stop() or cur_step > FLAGS.max_steps: - break - # run a step - _, cur_step = sess.run([train_op, global_step]) - - -if __name__ == "__main__": - tf.app.run(main) diff --git a/research/fivo/fivo/__init__.py b/research/fivo/fivo/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/research/fivo/fivo/bounds.py b/research/fivo/fivo/bounds.py deleted file mode 100644 index 088519033..000000000 --- a/research/fivo/fivo/bounds.py +++ /dev/null @@ -1,317 +0,0 @@ -# Copyright 2018 The TensorFlow Authors All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -"""Implementation of objectives for training stochastic latent variable models. - -Contains implementations of the Importance Weighted Autoencoder objective (IWAE) -and the Filtering Variational objective (FIVO). -""" - -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import functools -import tensorflow as tf - -from fivo import nested_utils as nested -from fivo import smc - - -def iwae(model, - observations, - seq_lengths, - num_samples=1, - parallel_iterations=30, - swap_memory=True): - """Computes the IWAE lower bound on the log marginal probability. - - This method accepts a stochastic latent variable model and some observations - and computes a stochastic lower bound on the log marginal probability of the - observations. The IWAE estimator is defined by averaging multiple importance - weights. For more details see "Importance Weighted Autoencoders" by Burda - et al. https://arxiv.org/abs/1509.00519. - - When num_samples = 1, this bound becomes the evidence lower bound (ELBO). - - Args: - model: A subclass of ELBOTrainableSequenceModel that implements one - timestep of the model. See models/vrnn.py for an example. - observations: The inputs to the model. A potentially nested list or tuple of - Tensors each of shape [max_seq_len, batch_size, ...]. The Tensors must - have a rank at least two and have matching shapes in the first two - dimensions, which represent time and the batch respectively. The model - will be provided with the observations before computing the bound. - seq_lengths: A [batch_size] Tensor of ints encoding the length of each - sequence in the batch (sequences can be padded to a common length). - num_samples: The number of samples to use. - parallel_iterations: The number of parallel iterations to use for the - internal while loop. - swap_memory: Whether GPU-CPU memory swapping should be enabled for the - internal while loop. - - Returns: - log_p_hat: A Tensor of shape [batch_size] containing IWAE's estimate of the - log marginal probability of the observations. - log_weights: A Tensor of shape [max_seq_len, batch_size, num_samples] - containing the log weights at each timestep. Will not be valid for - timesteps past the end of a sequence. - """ - log_p_hat, log_weights, _, final_state = fivo( - model, - observations, - seq_lengths, - num_samples=num_samples, - resampling_criterion=smc.never_resample_criterion, - parallel_iterations=parallel_iterations, - swap_memory=swap_memory) - return log_p_hat, log_weights, final_state - - -def fivo(model, - observations, - seq_lengths, - num_samples=1, - resampling_criterion=smc.ess_criterion, - resampling_type='multinomial', - relaxed_resampling_temperature=0.5, - parallel_iterations=30, - swap_memory=True, - random_seed=None): - """Computes the FIVO lower bound on the log marginal probability. - - This method accepts a stochastic latent variable model and some observations - and computes a stochastic lower bound on the log marginal probability of the - observations. The lower bound is defined by a particle filter's unbiased - estimate of the marginal probability of the observations. For more details see - "Filtering Variational Objectives" by Maddison et al. - https://arxiv.org/abs/1705.09279. - - When the resampling criterion is "never resample", this bound becomes IWAE. - - Args: - model: A subclass of ELBOTrainableSequenceModel that implements one - timestep of the model. See models/vrnn.py for an example. - observations: The inputs to the model. A potentially nested list or tuple of - Tensors each of shape [max_seq_len, batch_size, ...]. The Tensors must - have a rank at least two and have matching shapes in the first two - dimensions, which represent time and the batch respectively. The model - will be provided with the observations before computing the bound. - seq_lengths: A [batch_size] Tensor of ints encoding the length of each - sequence in the batch (sequences can be padded to a common length). - num_samples: The number of particles to use in each particle filter. - resampling_criterion: The resampling criterion to use for this particle - filter. Must accept the number of samples, the current log weights, - and the current timestep and return a boolean Tensor of shape [batch_size] - indicating whether each particle filter should resample. See - ess_criterion and related functions for examples. When - resampling_criterion is never_resample_criterion, resampling_fn is ignored - and never called. - resampling_type: The type of resampling, one of "multinomial" or "relaxed". - relaxed_resampling_temperature: A positive temperature only used for relaxed - resampling. - parallel_iterations: The number of parallel iterations to use for the - internal while loop. Note that values greater than 1 can introduce - non-determinism even when random_seed is provided. - swap_memory: Whether GPU-CPU memory swapping should be enabled for the - internal while loop. - random_seed: The random seed to pass to the resampling operations in - the particle filter. Mainly useful for testing. - - Returns: - log_p_hat: A Tensor of shape [batch_size] containing FIVO's estimate of the - log marginal probability of the observations. - log_weights: A Tensor of shape [max_seq_len, batch_size, num_samples] - containing the log weights at each timestep of the particle filter. Note - that on timesteps when a resampling operation is performed the log weights - are reset to 0. Will not be valid for timesteps past the end of a - sequence. - resampled: A Tensor of shape [max_seq_len, batch_size] indicating when the - particle filters resampled. Will be 1.0 on timesteps when resampling - occurred and 0.0 on timesteps when it did not. - """ - # batch_size is the number of particle filters running in parallel. - batch_size = tf.shape(seq_lengths)[0] - - # Each sequence in the batch will be the input data for a different - # particle filter. The batch will be laid out as: - # particle 1 of particle filter 1 - # particle 1 of particle filter 2 - # ... - # particle 1 of particle filter batch_size - # particle 2 of particle filter 1 - # ... - # particle num_samples of particle filter batch_size - observations = nested.tile_tensors(observations, [1, num_samples]) - tiled_seq_lengths = tf.tile(seq_lengths, [num_samples]) - model.set_observations(observations, tiled_seq_lengths) - - if resampling_type == 'multinomial': - resampling_fn = smc.multinomial_resampling - elif resampling_type == 'relaxed': - resampling_fn = functools.partial( - smc.relaxed_resampling, temperature=relaxed_resampling_temperature) - resampling_fn = functools.partial(resampling_fn, random_seed=random_seed) - - def transition_fn(prev_state, t): - if prev_state is None: - return model.zero_state(batch_size * num_samples, tf.float32) - return model.propose_and_weight(prev_state, t) - - log_p_hat, log_weights, resampled, final_state, _ = smc.smc( - transition_fn, - seq_lengths, - num_particles=num_samples, - resampling_criterion=resampling_criterion, - resampling_fn=resampling_fn, - parallel_iterations=parallel_iterations, - swap_memory=swap_memory) - - return log_p_hat, log_weights, resampled, final_state - -def fivo_aux_td( - model, - observations, - seq_lengths, - num_samples=1, - resampling_criterion=smc.ess_criterion, - resampling_type='multinomial', - relaxed_resampling_temperature=0.5, - parallel_iterations=30, - swap_memory=True, - random_seed=None): - """Experimental.""" - # batch_size is the number of particle filters running in parallel. - batch_size = tf.shape(seq_lengths)[0] - max_seq_len = tf.reduce_max(seq_lengths) - - # Each sequence in the batch will be the input data for a different - # particle filter. The batch will be laid out as: - # particle 1 of particle filter 1 - # particle 1 of particle filter 2 - # ... - # particle 1 of particle filter batch_size - # particle 2 of particle filter 1 - # ... - # particle num_samples of particle filter batch_size - observations = nested.tile_tensors(observations, [1, num_samples]) - tiled_seq_lengths = tf.tile(seq_lengths, [num_samples]) - model.set_observations(observations, tiled_seq_lengths) - - if resampling_type == 'multinomial': - resampling_fn = smc.multinomial_resampling - elif resampling_type == 'relaxed': - resampling_fn = functools.partial( - smc.relaxed_resampling, temperature=relaxed_resampling_temperature) - resampling_fn = functools.partial(resampling_fn, random_seed=random_seed) - - def transition_fn(prev_state, t): - if prev_state is None: - model_init_state = model.zero_state(batch_size * num_samples, tf.float32) - return (tf.zeros([num_samples*batch_size], dtype=tf.float32), - (tf.zeros([num_samples*batch_size, model.latent_size], dtype=tf.float32), - tf.zeros([num_samples*batch_size, model.latent_size], dtype=tf.float32)), - model_init_state) - - prev_log_r, prev_log_r_tilde, prev_model_state = prev_state - (new_model_state, zt, log_q_zt, log_p_zt, - log_p_x_given_z, log_r_tilde, p_ztplus1) = model(prev_model_state, t) - r_tilde_mu, r_tilde_sigma_sq = log_r_tilde - # Compute the weight without r. - log_weight = log_p_zt + log_p_x_given_z - log_q_zt - # Compute log_r and log_r_tilde. - p_mu = tf.stop_gradient(p_ztplus1.mean()) - p_sigma_sq = tf.stop_gradient(p_ztplus1.variance()) - log_r = (tf.log(r_tilde_sigma_sq) - - tf.log(r_tilde_sigma_sq + p_sigma_sq) - - tf.square(r_tilde_mu - p_mu)/(r_tilde_sigma_sq + p_sigma_sq)) - # log_r is [num_samples*batch_size, latent_size]. We sum it along the last - # dimension to compute log r. - log_r = 0.5*tf.reduce_sum(log_r, axis=-1) - # Compute prev log r tilde - prev_r_tilde_mu, prev_r_tilde_sigma_sq = prev_log_r_tilde - prev_log_r_tilde = -0.5*tf.reduce_sum( - tf.square(tf.stop_gradient(zt) - prev_r_tilde_mu)/prev_r_tilde_sigma_sq, axis=-1) - # If the sequence is on the last timestep, log_r and log_r_tilde are just zeros. - last_timestep = t >= (tiled_seq_lengths - 1) - log_r = tf.where(last_timestep, - tf.zeros_like(log_r), - log_r) - prev_log_r_tilde = tf.where(last_timestep, - tf.zeros_like(prev_log_r_tilde), - prev_log_r_tilde) - log_weight += tf.stop_gradient(log_r - prev_log_r) - new_state = (log_r, log_r_tilde, new_model_state) - loop_fn_args = (log_r, prev_log_r_tilde, log_p_x_given_z, log_r - prev_log_r) - return log_weight, new_state, loop_fn_args - - def loop_fn(loop_state, loop_args, unused_model_state, log_weights, resampled, mask, t): - if loop_state is None: - return (tf.zeros([batch_size], dtype=tf.float32), - tf.zeros([batch_size], dtype=tf.float32), - tf.zeros([num_samples, batch_size], dtype=tf.float32)) - log_p_hat_acc, bellman_loss_acc, log_r_diff_acc = loop_state - log_r, prev_log_r_tilde, log_p_x_given_z, log_r_diff = loop_args - # Compute the log_p_hat update - log_p_hat_update = tf.reduce_logsumexp( - log_weights, axis=0) - tf.log(tf.to_float(num_samples)) - # If it is the last timestep, we always add the update. - log_p_hat_acc += tf.cond(t >= max_seq_len-1, - lambda: log_p_hat_update, - lambda: log_p_hat_update * resampled) - # Compute the Bellman update. - log_r = tf.reshape(log_r, [num_samples, batch_size]) - prev_log_r_tilde = tf.reshape(prev_log_r_tilde, [num_samples, batch_size]) - log_p_x_given_z = tf.reshape(log_p_x_given_z, [num_samples, batch_size]) - mask = tf.reshape(mask, [num_samples, batch_size]) - # On the first timestep there is no bellman error because there is no - # prev_log_r_tilde. - mask = tf.cond(tf.equal(t, 0), - lambda: tf.zeros_like(mask), - lambda: mask) - # On the first timestep also fix up prev_log_r_tilde, which will be -inf. - prev_log_r_tilde = tf.where( - tf.is_inf(prev_log_r_tilde), - tf.zeros_like(prev_log_r_tilde), - prev_log_r_tilde) - # log_lambda is [num_samples, batch_size] - log_lambda = tf.reduce_mean(prev_log_r_tilde - log_p_x_given_z - log_r, - axis=0, keepdims=True) - bellman_error = mask * tf.square( - prev_log_r_tilde - - tf.stop_gradient(log_lambda + log_p_x_given_z + log_r) - ) - bellman_loss_acc += tf.reduce_mean(bellman_error, axis=0) - # Compute the log_r_diff update - log_r_diff_acc += mask * tf.reshape(log_r_diff, [num_samples, batch_size]) - return (log_p_hat_acc, bellman_loss_acc, log_r_diff_acc) - - log_weights, resampled, accs = smc.smc( - transition_fn, - seq_lengths, - num_particles=num_samples, - resampling_criterion=resampling_criterion, - resampling_fn=resampling_fn, - loop_fn=loop_fn, - parallel_iterations=parallel_iterations, - swap_memory=swap_memory) - - log_p_hat, bellman_loss, log_r_diff = accs - loss_per_seq = [- log_p_hat, bellman_loss] - tf.summary.scalar("bellman_loss", - tf.reduce_mean(bellman_loss / tf.to_float(seq_lengths))) - tf.summary.scalar("log_r_diff", - tf.reduce_mean(tf.reduce_mean(log_r_diff, axis=0) / tf.to_float(seq_lengths))) - return loss_per_seq, log_p_hat, log_weights, resampled diff --git a/research/fivo/fivo/bounds_test.py b/research/fivo/fivo/bounds_test.py deleted file mode 100644 index c970f74f4..000000000 --- a/research/fivo/fivo/bounds_test.py +++ /dev/null @@ -1,183 +0,0 @@ -# Copyright 2018 The TensorFlow Authors All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -"""Tests for fivo.bounds""" - -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import numpy as np -import tensorflow as tf - -from fivo.test_utils import create_vrnn -from fivo import bounds - - -class BoundsTest(tf.test.TestCase): - - def test_elbo(self): - """A golden-value test for the ELBO (the IWAE bound with num_samples=1).""" - tf.set_random_seed(1234) - with self.test_session() as sess: - model, inputs, targets, lengths = create_vrnn(random_seed=1234) - outs = bounds.iwae(model, (inputs, targets), lengths, num_samples=1, - parallel_iterations=1) - sess.run(tf.global_variables_initializer()) - log_p_hat, _, _ = sess.run(outs) - self.assertAllClose([-21.615765, -13.614225], log_p_hat) - - def test_iwae(self): - """A golden-value test for the IWAE bound.""" - tf.set_random_seed(1234) - with self.test_session() as sess: - model, inputs, targets, lengths = create_vrnn(random_seed=1234) - outs = bounds.iwae(model, (inputs, targets), lengths, num_samples=4, - parallel_iterations=1) - sess.run(tf.global_variables_initializer()) - log_p_hat, weights, _ = sess.run(outs) - self.assertAllClose([-23.301426, -13.64028], log_p_hat) - weights_gt = np.array( - [[[-3.66708851, -2.07074022, -4.91751671, -5.03293562], - [-2.99690723, -3.17782736, -4.50084877, -3.48536515]], - [[-6.2539978, -4.37615728, -7.43738699, -7.85044909], - [-8.27518654, -6.71545124, -8.96198845, -7.05567837]], - [[-9.19093227, -8.01637268, -11.64603615, -10.51128292], - [-12.34527206, -11.54284477, -11.8667469, -9.69417381]], - [[-12.20609856, -10.47217369, -13.66270638, -13.46115875], - [-17.17656708, -16.25190353, -15.28658581, -12.33067703]], - [[-16.14766312, -15.57472229, -17.47755432, -17.98189926], - [-17.17656708, -16.25190353, -15.28658581, -12.33067703]], - [[-20.07182884, -18.43191147, -20.1606636, -21.45263863], - [-17.17656708, -16.25190353, -15.28658581, -12.33067703]], - [[-24.10270691, -22.20865822, -24.14675522, -25.27248383], - [-17.17656708, -16.25190353, -15.28658581, -12.33067703]]]) - self.assertAllClose(weights_gt, weights) - - def test_fivo(self): - """A golden-value test for the FIVO bound.""" - tf.set_random_seed(1234) - with self.test_session() as sess: - model, inputs, targets, lengths = create_vrnn(random_seed=1234) - outs = bounds.fivo(model, (inputs, targets), lengths, num_samples=4, - random_seed=1234, parallel_iterations=1) - sess.run(tf.global_variables_initializer()) - log_p_hat, weights, resampled, _ = sess.run(outs) - self.assertAllClose([-22.98902512, -14.21689224], log_p_hat) - weights_gt = np.array( - [[[-3.66708851, -2.07074022, -4.91751671, -5.03293562], - [-2.99690723, -3.17782736, -4.50084877, -3.48536515]], - [[-2.67100811, -2.30541706, -2.34178066, -2.81751347], - [-8.27518654, -6.71545124, -8.96198845, -7.05567837]], - [[-5.65190411, -5.94563246, -6.55041981, -5.4783473], - [-12.34527206, -11.54284477, -11.8667469, -9.69417381]], - [[-8.71947861, -8.40143299, -8.54593086, -8.42822266], - [-4.28782988, -4.50591278, -3.40847206, -2.63650274]], - [[-12.7003831, -13.5039815, -12.3569726, -12.9489622], - [-4.28782988, -4.50591278, -3.40847206, -2.63650274]], - [[-16.4520301, -16.3611698, -15.0314846, -16.4197006], - [-4.28782988, -4.50591278, -3.40847206, -2.63650274]], - [[-20.7010765, -20.1379165, -19.0020351, -20.2395458], - [-4.28782988, -4.50591278, -3.40847206, -2.63650274]]]) - self.assertAllClose(weights_gt, weights) - resampled_gt = np.array( - [[1., 0.], - [0., 0.], - [0., 1.], - [0., 0.], - [0., 0.], - [0., 0.], - [0., 0.]]) - self.assertAllClose(resampled_gt, resampled) - - def test_fivo_relaxed(self): - """A golden-value test for the FIVO bound with relaxed sampling.""" - tf.set_random_seed(1234) - with self.test_session() as sess: - model, inputs, targets, lengths = create_vrnn(random_seed=1234) - outs = bounds.fivo(model, (inputs, targets), lengths, num_samples=4, - random_seed=1234, parallel_iterations=1, - resampling_type="relaxed") - sess.run(tf.global_variables_initializer()) - log_p_hat, weights, resampled, _ = sess.run(outs) - self.assertAllClose([-22.942394, -14.273882], log_p_hat) - weights_gt = np.array( - [[[-3.66708851, -2.07074118, -4.91751575, -5.03293514], - [-2.99690628, -3.17782831, -4.50084877, -3.48536515]], - [[-2.84939098, -2.30087185, -2.35649204, -2.48417377], - [-8.27518654, -6.71545172, -8.96199131, -7.05567837]], - [[-5.92327023, -5.9433074, -6.5826683, -5.04259014], - [-12.34527206, -11.54284668, -11.86675072, -9.69417477]], - [[-8.95323944, -8.40061855, -8.52760506, -7.99130583], - [-4.58102798, -4.56017351, -3.46283388, -2.65550804]], - [[-12.87836456, -13.49628639, -12.31680107, -12.74228859], - [-4.58102798, -4.56017351, -3.46283388, -2.65550804]], - [[-16.78347397, -16.35150909, -14.98797417, -16.35162735], - [-4.58102798, -4.56017351, -3.46283388, -2.65550804]], - [[-20.81165886, -20.1307621, -18.92229652, -20.17458153], - [-4.58102798, -4.56017351, -3.46283388, -2.65550804]]]) - self.assertAllClose(weights_gt, weights) - resampled_gt = np.array( - [[1., 0.], - [0., 0.], - [0., 1.], - [0., 0.], - [0., 0.], - [0., 0.], - [0., 0.]]) - self.assertAllClose(resampled_gt, resampled) - - def test_fivo_aux_relaxed(self): - """A golden-value test for the FIVO-AUX bound with relaxed sampling.""" - tf.set_random_seed(1234) - with self.test_session() as sess: - model, inputs, targets, lengths = create_vrnn(random_seed=1234, - use_tilt=True) - outs = bounds.fivo(model, (inputs, targets), lengths, num_samples=4, - random_seed=1234, parallel_iterations=1, - resampling_type="relaxed") - sess.run(tf.global_variables_initializer()) - log_p_hat, weights, resampled, _ = sess.run(outs) - self.assertAllClose([-23.1395, -14.271059], log_p_hat) - weights_gt = np.array( - [[[-5.19826221, -3.55476403, -5.98663855, -6.08058834], - [-6.31685925, -5.70243931, -7.07638931, -6.18138981]], - [[-3.97986865, -3.58831525, -3.85753584, -3.5010016], - [-11.38203049, -8.66213989, -11.23646641, -10.02024746]], - [[-6.62269831, -6.36680222, -6.78096485, -5.80072498], - [-3.55419445, -8.11326408, -3.48766923, -3.08593249]], - [[-10.56472301, -10.16084099, -9.96741676, -8.5270071], - [-6.04880285, -7.80853653, -4.72652149, -3.49711013]], - [[-13.36585426, -16.08720398, -13.33416367, -13.1017189], - [-0., -0., -0., -0.]], - [[-17.54233551, -17.35167503, -16.79163361, -16.51471138], - [0., -0., -0., -0.]], - [[-19.74024963, -18.69452858, -17.76246452, -18.76182365], - [0., -0., -0., -0.]]]) - self.assertAllClose(weights_gt, weights) - resampled_gt = np.array([[1., 0.], - [0., 1.], - [0., 0.], - [0., 1.], - [0., 0.], - [0., 0.], - [0., 0.]]) - self.assertAllClose(resampled_gt, resampled) - - -if __name__ == "__main__": - np.set_printoptions(threshold=np.nan) # Used to easily see the gold values. - # Use print(repr(numpy_array)) to print the values. - tf.test.main() diff --git a/research/fivo/fivo/data/__init__.py b/research/fivo/fivo/data/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/research/fivo/fivo/data/calculate_pianoroll_mean.py b/research/fivo/fivo/data/calculate_pianoroll_mean.py deleted file mode 100644 index 93f712bd3..000000000 --- a/research/fivo/fivo/data/calculate_pianoroll_mean.py +++ /dev/null @@ -1,65 +0,0 @@ -# Copyright 2018 The TensorFlow Authors All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -"""Script to calculate the mean of a pianoroll dataset. - -Given a pianoroll pickle file, this script loads the dataset and -calculates the mean of the training set. Then it updates the pickle file -so that the key "train_mean" points to the mean vector. -""" - -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import pickle -import numpy as np - -import tensorflow as tf - - -from datasets import sparse_pianoroll_to_dense - -tf.app.flags.DEFINE_string('in_file', None, - 'Filename of the pickled pianoroll dataset to load.') -tf.app.flags.DEFINE_string('out_file', None, - 'Name of the output pickle file. Defaults to in_file, ' - 'updating the input pickle file.') -tf.app.flags.mark_flag_as_required('in_file') - -FLAGS = tf.app.flags.FLAGS - -MIN_NOTE = 21 -MAX_NOTE = 108 -NUM_NOTES = MAX_NOTE - MIN_NOTE + 1 - - -def main(unused_argv): - if FLAGS.out_file is None: - FLAGS.out_file = FLAGS.in_file - with tf.gfile.Open(FLAGS.in_file, 'r') as f: - pianorolls = pickle.load(f) - dense_pianorolls = [sparse_pianoroll_to_dense(p, MIN_NOTE, NUM_NOTES)[0] - for p in pianorolls['train']] - # Concatenate all elements along the time axis. - concatenated = np.concatenate(dense_pianorolls, axis=0) - mean = np.mean(concatenated, axis=0) - pianorolls['train_mean'] = mean - # Write out the whole pickle file, including the train mean. - pickle.dump(pianorolls, open(FLAGS.out_file, 'wb')) - - -if __name__ == '__main__': - tf.app.run() diff --git a/research/fivo/fivo/data/create_timit_dataset.py b/research/fivo/fivo/data/create_timit_dataset.py deleted file mode 100644 index ea1cd3b10..000000000 --- a/research/fivo/fivo/data/create_timit_dataset.py +++ /dev/null @@ -1,180 +0,0 @@ -# Copyright 2018 The TensorFlow Authors All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""Preprocesses TIMIT from raw wavfiles to create a set of TFRecords. -""" - -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import glob -import os -import random -import re - -import numpy as np -import tensorflow as tf - -tf.app.flags.DEFINE_string("raw_timit_dir", None, - "Directory containing TIMIT files.") -tf.app.flags.DEFINE_string("out_dir", None, - "Output directory for TFRecord files.") -tf.app.flags.DEFINE_float("valid_frac", 0.05, - "Fraction of train set to use as valid set. " - "Must be between 0.0 and 1.0.") - -tf.app.flags.mark_flag_as_required("raw_timit_dir") -tf.app.flags.mark_flag_as_required("out_dir") - -FLAGS = tf.app.flags.FLAGS - -NUM_TRAIN_FILES = 4620 -NUM_TEST_FILES = 1680 -SAMPLES_PER_TIMESTEP = 200 - -# Regexes for reading SPHERE header files. -SAMPLE_COUNT_REGEX = re.compile(r"sample_count -i (\d+)") -SAMPLE_MIN_REGEX = re.compile(r"sample_min -i (-?\d+)") -SAMPLE_MAX_REGEX = re.compile(r"sample_max -i (-?\d+)") - - -def get_filenames(split): - """Get all wav filenames from the TIMIT archive.""" - path = os.path.join(FLAGS.raw_timit_dir, "TIMIT", split, "*", "*", "*.WAV") - # Sort the output by name so the order is deterministic. - files = sorted(glob.glob(path)) - return files - - -def load_timit_wav(filename): - """Loads a TIMIT wavfile into a numpy array. - - TIMIT wavfiles include a SPHERE header, detailed in the TIMIT docs. The first - line is the header type and the second is the length of the header in bytes. - After the header, the remaining bytes are actual WAV data. - - The header includes information about the WAV data such as the number of - samples and minimum and maximum amplitude. This function asserts that the - loaded wav data matches the header. - - Args: - filename: The name of the TIMIT wavfile to load. - Returns: - wav: A numpy array containing the loaded wav data. - """ - wav_file = open(filename, "rb") - header_type = wav_file.readline() - header_length_str = wav_file.readline() - # The header length includes the length of the first two lines. - header_remaining_bytes = (int(header_length_str) - len(header_type) - - len(header_length_str)) - header = wav_file.read(header_remaining_bytes) - # Read the relevant header fields. - sample_count = int(SAMPLE_COUNT_REGEX.search(header).group(1)) - sample_min = int(SAMPLE_MIN_REGEX.search(header).group(1)) - sample_max = int(SAMPLE_MAX_REGEX.search(header).group(1)) - wav = np.fromstring(wav_file.read(), dtype="int16").astype("float32") - # Check that the loaded data conforms to the header description. - assert len(wav) == sample_count - assert wav.min() == sample_min - assert wav.max() == sample_max - return wav - - -def preprocess(wavs, block_size, mean, std): - """Normalize the wav data and reshape it into chunks.""" - processed_wavs = [] - for wav in wavs: - wav = (wav - mean) / std - wav_length = wav.shape[0] - if wav_length % block_size != 0: - pad_width = block_size - (wav_length % block_size) - wav = np.pad(wav, (0, pad_width), "constant") - assert wav.shape[0] % block_size == 0 - wav = wav.reshape((-1, block_size)) - processed_wavs.append(wav) - return processed_wavs - - -def create_tfrecord_from_wavs(wavs, output_file): - """Writes processed wav files to disk as sharded TFRecord files.""" - with tf.python_io.TFRecordWriter(output_file) as builder: - for wav in wavs: - builder.write(wav.astype(np.float32).tobytes()) - - -def main(unused_argv): - train_filenames = get_filenames("TRAIN") - test_filenames = get_filenames("TEST") - - num_train_files = len(train_filenames) - num_test_files = len(test_filenames) - num_valid_files = int(num_train_files * FLAGS.valid_frac) - num_train_files -= num_valid_files - - print("%d train / %d valid / %d test" % ( - num_train_files, num_valid_files, num_test_files)) - - random.seed(1234) - random.shuffle(train_filenames) - - valid_filenames = train_filenames[:num_valid_files] - train_filenames = train_filenames[num_valid_files:] - - # Make sure there is no overlap in the train, test, and valid sets. - train_s = set(train_filenames) - test_s = set(test_filenames) - valid_s = set(valid_filenames) - # Disable explicit length testing to make the assertions more readable. - # pylint: disable=g-explicit-length-test - assert len(train_s & test_s) == 0 - assert len(train_s & valid_s) == 0 - assert len(valid_s & test_s) == 0 - # pylint: enable=g-explicit-length-test - - train_wavs = [load_timit_wav(f) for f in train_filenames] - valid_wavs = [load_timit_wav(f) for f in valid_filenames] - test_wavs = [load_timit_wav(f) for f in test_filenames] - assert len(train_wavs) + len(valid_wavs) == NUM_TRAIN_FILES - assert len(test_wavs) == NUM_TEST_FILES - - # Calculate the mean and standard deviation of the train set. - train_stacked = np.hstack(train_wavs) - train_mean = np.mean(train_stacked) - train_std = np.std(train_stacked) - print("train mean: %f train std: %f" % (train_mean, train_std)) - - # Process all data, normalizing with the train set statistics. - processed_train_wavs = preprocess(train_wavs, SAMPLES_PER_TIMESTEP, - train_mean, train_std) - processed_valid_wavs = preprocess(valid_wavs, SAMPLES_PER_TIMESTEP, - train_mean, train_std) - processed_test_wavs = preprocess(test_wavs, SAMPLES_PER_TIMESTEP, train_mean, - train_std) - - # Write the datasets to disk. - create_tfrecord_from_wavs( - processed_train_wavs, - os.path.join(FLAGS.out_dir, "train")) - create_tfrecord_from_wavs( - processed_valid_wavs, - os.path.join(FLAGS.out_dir, "valid")) - create_tfrecord_from_wavs( - processed_test_wavs, - os.path.join(FLAGS.out_dir, "test")) - - -if __name__ == "__main__": - tf.app.run() diff --git a/research/fivo/fivo/data/datasets.py b/research/fivo/fivo/data/datasets.py deleted file mode 100644 index 6d5324623..000000000 --- a/research/fivo/fivo/data/datasets.py +++ /dev/null @@ -1,453 +0,0 @@ -# Copyright 2018 The TensorFlow Authors All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -"""Code for creating sequence datasets. -""" - -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import pickle - -import numpy as np -from scipy.sparse import coo_matrix -import tensorflow as tf - -# The default number of threads used to process data in parallel. -DEFAULT_PARALLELISM = 12 - - -def sparse_pianoroll_to_dense(pianoroll, min_note, num_notes): - """Converts a sparse pianoroll to a dense numpy array. - - Given a sparse pianoroll, converts it to a dense numpy array of shape - [num_timesteps, num_notes] where entry i,j is 1.0 if note j is active on - timestep i and 0.0 otherwise. - - Args: - pianoroll: A sparse pianoroll object, a list of tuples where the i'th tuple - contains the indices of the notes active at timestep i. - min_note: The minimum note in the pianoroll, subtracted from all notes so - that the minimum note becomes 0. - num_notes: The number of possible different note indices, determines the - second dimension of the resulting dense array. - Returns: - dense_pianoroll: A [num_timesteps, num_notes] numpy array of floats. - num_timesteps: A python int, the number of timesteps in the pianoroll. - """ - num_timesteps = len(pianoroll) - inds = [] - for time, chord in enumerate(pianoroll): - # Re-index the notes to start from min_note. - inds.extend((time, note-min_note) for note in chord) - shape = [num_timesteps, num_notes] - values = [1.] * len(inds) - sparse_pianoroll = coo_matrix( - (values, ([x[0] for x in inds], [x[1] for x in inds])), - shape=shape) - return sparse_pianoroll.toarray(), num_timesteps - - -def create_pianoroll_dataset(path, - split, - batch_size, - num_parallel_calls=DEFAULT_PARALLELISM, - shuffle=False, - repeat=False, - min_note=21, - max_note=108): - """Creates a pianoroll dataset. - - Args: - path: The path of a pickle file containing the dataset to load. - split: The split to use, can be train, test, or valid. - batch_size: The batch size. If repeat is False then it is not guaranteed - that the true batch size will match for all batches since batch_size - may not necessarily evenly divide the number of elements. - num_parallel_calls: The number of threads to use for parallel processing of - the data. - shuffle: If true, shuffles the order of the dataset. - repeat: If true, repeats the dataset endlessly. - min_note: The minimum note number of the dataset. For all pianoroll datasets - the minimum note is number 21, and changing this affects the dimension of - the data. This is useful mostly for testing. - max_note: The maximum note number of the dataset. For all pianoroll datasets - the maximum note is number 108, and changing this affects the dimension of - the data. This is useful mostly for testing. - Returns: - inputs: A batch of input sequences represented as a dense Tensor of shape - [time, batch_size, data_dimension]. The sequences in inputs are the - sequences in targets shifted one timestep into the future, padded with - zeros. This tensor is mean-centered, with the mean taken from the pickle - file key 'train_mean'. - targets: A batch of target sequences represented as a dense Tensor of - shape [time, batch_size, data_dimension]. - lens: An int Tensor of shape [batch_size] representing the lengths of each - sequence in the batch. - mean: A float Tensor of shape [data_dimension] containing the mean loaded - from the pickle file. - """ - # Load the data from disk. - num_notes = max_note - min_note + 1 - with tf.gfile.Open(path, "r") as f: - raw_data = pickle.load(f) - pianorolls = raw_data[split] - mean = raw_data["train_mean"] - num_examples = len(pianorolls) - - def pianoroll_generator(): - for sparse_pianoroll in pianorolls: - yield sparse_pianoroll_to_dense(sparse_pianoroll, min_note, num_notes) - - dataset = tf.data.Dataset.from_generator( - pianoroll_generator, - output_types=(tf.float64, tf.int64), - output_shapes=([None, num_notes], [])) - - if repeat: dataset = dataset.repeat() - if shuffle: dataset = dataset.shuffle(num_examples) - - # Batch sequences togther, padding them to a common length in time. - dataset = dataset.padded_batch(batch_size, - padded_shapes=([None, num_notes], [])) - - def process_pianoroll_batch(data, lengths): - """Create mean-centered and time-major next-step prediction Tensors.""" - data = tf.to_float(tf.transpose(data, perm=[1, 0, 2])) - lengths = tf.to_int32(lengths) - targets = data - # Mean center the inputs. - inputs = data - tf.constant(mean, dtype=tf.float32, - shape=[1, 1, mean.shape[0]]) - # Shift the inputs one step forward in time. Also remove the last timestep - # so that targets and inputs are the same length. - inputs = tf.pad(inputs, [[1, 0], [0, 0], [0, 0]], mode="CONSTANT")[:-1] - # Mask out unused timesteps. - inputs *= tf.expand_dims(tf.transpose( - tf.sequence_mask(lengths, dtype=inputs.dtype)), 2) - return inputs, targets, lengths - - dataset = dataset.map(process_pianoroll_batch, - num_parallel_calls=num_parallel_calls) - dataset = dataset.prefetch(num_examples) - - itr = dataset.make_one_shot_iterator() - inputs, targets, lengths = itr.get_next() - return inputs, targets, lengths, tf.constant(mean, dtype=tf.float32) - - -def create_human_pose_dataset( - path, - split, - batch_size, - num_parallel_calls=DEFAULT_PARALLELISM, - shuffle=False, - repeat=False,): - """Creates a human pose dataset. - - Args: - path: The path of a pickle file containing the dataset to load. - split: The split to use, can be train, test, or valid. - batch_size: The batch size. If repeat is False then it is not guaranteed - that the true batch size will match for all batches since batch_size - may not necessarily evenly divide the number of elements. - num_parallel_calls: The number of threads to use for parallel processing of - the data. - shuffle: If true, shuffles the order of the dataset. - repeat: If true, repeats the dataset endlessly. - Returns: - inputs: A batch of input sequences represented as a dense Tensor of shape - [time, batch_size, data_dimension]. The sequences in inputs are the - sequences in targets shifted one timestep into the future, padded with - zeros. This tensor is mean-centered, with the mean taken from the pickle - file key 'train_mean'. - targets: A batch of target sequences represented as a dense Tensor of - shape [time, batch_size, data_dimension]. - lens: An int Tensor of shape [batch_size] representing the lengths of each - sequence in the batch. - mean: A float Tensor of shape [data_dimension] containing the mean loaded - from the pickle file. - """ - # Load the data from disk. - with tf.gfile.Open(path, "r") as f: - raw_data = pickle.load(f) - - mean = raw_data["train_mean"] - pose_sequences = raw_data[split] - num_examples = len(pose_sequences) - num_features = pose_sequences[0].shape[1] - - def pose_generator(): - """A generator that yields pose data sequences.""" - # Each timestep has 32 x values followed by 32 y values so is 64 - # dimensional. - for pose_sequence in pose_sequences: - yield pose_sequence, pose_sequence.shape[0] - - dataset = tf.data.Dataset.from_generator( - pose_generator, - output_types=(tf.float64, tf.int64), - output_shapes=([None, num_features], [])) - - if repeat: - dataset = dataset.repeat() - if shuffle: - dataset = dataset.shuffle(num_examples) - - # Batch sequences togther, padding them to a common length in time. - dataset = dataset.padded_batch( - batch_size, padded_shapes=([None, num_features], [])) - - # Post-process each batch, ensuring that it is mean-centered and time-major. - def process_pose_data(data, lengths): - """Creates Tensors for next step prediction and mean-centers the input.""" - data = tf.to_float(tf.transpose(data, perm=[1, 0, 2])) - lengths = tf.to_int32(lengths) - targets = data - # Mean center the inputs. - inputs = data - tf.constant( - mean, dtype=tf.float32, shape=[1, 1, mean.shape[0]]) - # Shift the inputs one step forward in time. Also remove the last timestep - # so that targets and inputs are the same length. - inputs = tf.pad(inputs, [[1, 0], [0, 0], [0, 0]], mode="CONSTANT")[:-1] - # Mask out unused timesteps. - inputs *= tf.expand_dims( - tf.transpose(tf.sequence_mask(lengths, dtype=inputs.dtype)), 2) - return inputs, targets, lengths - - dataset = dataset.map( - process_pose_data, - num_parallel_calls=num_parallel_calls) - dataset = dataset.prefetch(num_examples) - - itr = dataset.make_one_shot_iterator() - inputs, targets, lengths = itr.get_next() - return inputs, targets, lengths, tf.constant(mean, dtype=tf.float32) - - -def create_speech_dataset(path, - batch_size, - samples_per_timestep=200, - num_parallel_calls=DEFAULT_PARALLELISM, - prefetch_buffer_size=2048, - shuffle=False, - repeat=False): - """Creates a speech dataset. - - Args: - path: The path of a possibly sharded TFRecord file containing the data. - batch_size: The batch size. If repeat is False then it is not guaranteed - that the true batch size will match for all batches since batch_size - may not necessarily evenly divide the number of elements. - samples_per_timestep: The number of audio samples per timestep. Used to - reshape the data into sequences of shape [time, samples_per_timestep]. - Should not change except for testing -- in all speech datasets 200 is the - number of samples per timestep. - num_parallel_calls: The number of threads to use for parallel processing of - the data. - prefetch_buffer_size: The size of the prefetch queues to use after reading - and processing the raw data. - shuffle: If true, shuffles the order of the dataset. - repeat: If true, repeats the dataset endlessly. - Returns: - inputs: A batch of input sequences represented as a dense Tensor of shape - [time, batch_size, samples_per_timestep]. The sequences in inputs are the - sequences in targets shifted one timestep into the future, padded with - zeros. - targets: A batch of target sequences represented as a dense Tensor of - shape [time, batch_size, samples_per_timestep]. - lens: An int Tensor of shape [batch_size] representing the lengths of each - sequence in the batch. - """ - filenames = [path] - - def read_speech_example(value): - """Parses a single tf.Example from the TFRecord file.""" - decoded = tf.decode_raw(value, out_type=tf.float32) - example = tf.reshape(decoded, [-1, samples_per_timestep]) - length = tf.shape(example)[0] - return example, length - - # Create the dataset from the TFRecord files - dataset = tf.data.TFRecordDataset(filenames).map( - read_speech_example, num_parallel_calls=num_parallel_calls) - dataset = dataset.prefetch(prefetch_buffer_size) - - if repeat: dataset = dataset.repeat() - if shuffle: dataset = dataset.shuffle(prefetch_buffer_size) - - dataset = dataset.padded_batch( - batch_size, padded_shapes=([None, samples_per_timestep], [])) - - def process_speech_batch(data, lengths): - """Creates Tensors for next step prediction.""" - data = tf.transpose(data, perm=[1, 0, 2]) - lengths = tf.to_int32(lengths) - targets = data - # Shift the inputs one step forward in time. Also remove the last timestep - # so that targets and inputs are the same length. - inputs = tf.pad(data, [[1, 0], [0, 0], [0, 0]], mode="CONSTANT")[:-1] - # Mask out unused timesteps. - inputs *= tf.expand_dims( - tf.transpose(tf.sequence_mask(lengths, dtype=inputs.dtype)), 2) - return inputs, targets, lengths - - dataset = dataset.map(process_speech_batch, - num_parallel_calls=num_parallel_calls) - dataset = dataset.prefetch(prefetch_buffer_size) - - itr = dataset.make_one_shot_iterator() - inputs, targets, lengths = itr.get_next() - return inputs, targets, lengths - - -SQUARED_OBSERVATION = "squared" -ABS_OBSERVATION = "abs" -STANDARD_OBSERVATION = "standard" -OBSERVATION_TYPES = [SQUARED_OBSERVATION, ABS_OBSERVATION, STANDARD_OBSERVATION] - -ROUND_TRANSITION = "round" -STANDARD_TRANSITION = "standard" -TRANSITION_TYPES = [ROUND_TRANSITION, STANDARD_TRANSITION] - - -def create_chain_graph_dataset( - batch_size, - num_timesteps, - steps_per_observation=None, - state_size=1, - transition_variance=1., - observation_variance=1., - transition_type=STANDARD_TRANSITION, - observation_type=STANDARD_OBSERVATION, - fixed_observation=None, - prefetch_buffer_size=2048, - dtype="float32"): - """Creates a toy chain graph dataset. - - Creates a dataset where the data are sampled from a diffusion process. The - 'latent' states of the process are sampled as a chain of Normals: - - z0 ~ N(0, transition_variance) - z1 ~ N(transition_fn(z0), transition_variance) - ... - - where transition_fn could be round z0 or pass it through unchanged. - - The observations are produced every steps_per_observation timesteps as a - function of the latent zs. For example if steps_per_observation is 3 then the - first observation will be produced as a function of z3: - - x1 ~ N(observation_fn(z3), observation_variance) - - where observation_fn could square z3, take the absolute value, or pass - it through unchanged. - - Only the observations are returned. - - Args: - batch_size: The batch size. The number of trajectories to run in parallel. - num_timesteps: The length of the chain of latent states (i.e. the - number of z's excluding z0. - steps_per_observation: The number of latent states between each observation, - must evenly divide num_timesteps. - state_size: The size of the latent state and observation, must be a - python int. - transition_variance: The variance of the transition density. - observation_variance: The variance of the observation density. - transition_type: Must be one of "round" or "standard". "round" means that - the transition density is centered at the rounded previous latent state. - "standard" centers the transition density at the previous latent state, - unchanged. - observation_type: Must be one of "squared", "abs" or "standard". "squared" - centers the observation density at the squared latent state. "abs" - centers the observaiton density at the absolute value of the current - latent state. "standard" centers the observation density at the current - latent state. - fixed_observation: If not None, fixes all observations to be a constant. - Must be a scalar. - prefetch_buffer_size: The size of the prefetch queues to use after reading - and processing the raw data. - dtype: A string convertible to a tensorflow datatype. The datatype used - to represent the states and observations. - Returns: - observations: A batch of observations represented as a dense Tensor of - shape [num_observations, batch_size, state_size]. num_observations is - num_timesteps/steps_per_observation. - lens: An int Tensor of shape [batch_size] representing the lengths of each - sequence in the batch. Will contain num_observations as each entry. - Raises: - ValueError: Raised if steps_per_observation does not evenly divide - num_timesteps. - """ - if steps_per_observation is None: - steps_per_observation = num_timesteps - if num_timesteps % steps_per_observation != 0: - raise ValueError("steps_per_observation must evenly divide num_timesteps.") - num_observations = int(num_timesteps / steps_per_observation) - def data_generator(): - """An infinite generator of latents and observations from the model.""" - transition_std = np.sqrt(transition_variance) - observation_std = np.sqrt(observation_variance) - while True: - states = [] - observations = [] - # Sample z0 ~ Normal(0, sqrt(variance)). - states.append( - np.random.normal(size=[state_size], - scale=observation_std).astype(dtype)) - # Start the range at 1 because we've already generated z0. - # The range ends at num_timesteps+1 because we want to include the - # num_timesteps-th step. - for t in xrange(1, num_timesteps+1): - if transition_type == ROUND_TRANSITION: - loc = np.round(states[-1]) - elif transition_type == STANDARD_TRANSITION: - loc = states[-1] - z_t = np.random.normal(size=[state_size], loc=loc, scale=transition_std) - states.append(z_t.astype(dtype)) - if t % steps_per_observation == 0: - if fixed_observation is None: - if observation_type == SQUARED_OBSERVATION: - loc = np.square(states[-1]) - elif observation_type == ABS_OBSERVATION: - loc = np.abs(states[-1]) - elif observation_type == STANDARD_OBSERVATION: - loc = states[-1] - x_t = np.random.normal(size=[state_size], - loc=loc, - scale=observation_std).astype(dtype) - else: - x_t = np.ones([state_size]) * fixed_observation - - observations.append(x_t) - yield states, observations - - dataset = tf.data.Dataset.from_generator( - data_generator, - output_types=(tf.as_dtype(dtype), tf.as_dtype(dtype)), - output_shapes=([num_timesteps+1, state_size], - [num_observations, state_size]) - ) - dataset = dataset.repeat().batch(batch_size) - dataset = dataset.prefetch(prefetch_buffer_size) - itr = dataset.make_one_shot_iterator() - _, observations = itr.get_next() - # Transpose observations from [batch, time, state_size] to - # [time, batch, state_size]. - observations = tf.transpose(observations, perm=[1, 0, 2]) - lengths = tf.ones([batch_size], dtype=tf.int32) * num_observations - return observations, lengths diff --git a/research/fivo/fivo/data/datasets_test.py b/research/fivo/fivo/data/datasets_test.py deleted file mode 100644 index e6bbfda67..000000000 --- a/research/fivo/fivo/data/datasets_test.py +++ /dev/null @@ -1,303 +0,0 @@ -# Copyright 2018 The TensorFlow Authors All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -"""Tests for fivo.data.datasets.""" - -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import pickle -import os - -import numpy as np -import tensorflow as tf - -from fivo.data import datasets - -FLAGS = tf.app.flags.FLAGS - - -class DatasetsTest(tf.test.TestCase): - - def test_sparse_pianoroll_to_dense_empty_at_end(self): - sparse_pianoroll = [(0, 1), (1, 0), (), (1,), (), ()] - dense_pianoroll, num_timesteps = datasets.sparse_pianoroll_to_dense( - sparse_pianoroll, min_note=0, num_notes=2) - self.assertEqual(num_timesteps, 6) - self.assertAllEqual([[1, 1], - [1, 1], - [0, 0], - [0, 1], - [0, 0], - [0, 0]], dense_pianoroll) - - def test_sparse_pianoroll_to_dense_with_chord(self): - sparse_pianoroll = [(0, 1), (1, 0), (), (1,)] - dense_pianoroll, num_timesteps = datasets.sparse_pianoroll_to_dense( - sparse_pianoroll, min_note=0, num_notes=2) - self.assertEqual(num_timesteps, 4) - self.assertAllEqual([[1, 1], - [1, 1], - [0, 0], - [0, 1]], dense_pianoroll) - - def test_sparse_pianoroll_to_dense_simple(self): - sparse_pianoroll = [(0,), (), (1,)] - dense_pianoroll, num_timesteps = datasets.sparse_pianoroll_to_dense( - sparse_pianoroll, min_note=0, num_notes=2) - self.assertEqual(num_timesteps, 3) - self.assertAllEqual([[1, 0], - [0, 0], - [0, 1]], dense_pianoroll) - - def test_sparse_pianoroll_to_dense_subtracts_min_note(self): - sparse_pianoroll = [(4, 5), (5, 4), (), (5,), (), ()] - dense_pianoroll, num_timesteps = datasets.sparse_pianoroll_to_dense( - sparse_pianoroll, min_note=4, num_notes=2) - self.assertEqual(num_timesteps, 6) - self.assertAllEqual([[1, 1], - [1, 1], - [0, 0], - [0, 1], - [0, 0], - [0, 0]], dense_pianoroll) - - def test_sparse_pianoroll_to_dense_uses_num_notes(self): - sparse_pianoroll = [(4, 5), (5, 4), (), (5,), (), ()] - dense_pianoroll, num_timesteps = datasets.sparse_pianoroll_to_dense( - sparse_pianoroll, min_note=4, num_notes=3) - self.assertEqual(num_timesteps, 6) - self.assertAllEqual([[1, 1, 0], - [1, 1, 0], - [0, 0, 0], - [0, 1, 0], - [0, 0, 0], - [0, 0, 0]], dense_pianoroll) - - def test_pianoroll_dataset(self): - pianoroll_data = [[(0,), (), (1,)], - [(0, 1), (1,)], - [(1,), (0,), (), (0, 1), (), ()]] - pianoroll_mean = np.zeros([3]) - pianoroll_mean[-1] = 1 - data = {"train": pianoroll_data, "train_mean": pianoroll_mean} - path = os.path.join(tf.test.get_temp_dir(), "test.pkl") - pickle.dump(data, open(path, "wb")) - with self.test_session() as sess: - inputs, targets, lens, mean = datasets.create_pianoroll_dataset( - path, "train", 2, num_parallel_calls=1, - shuffle=False, repeat=False, - min_note=0, max_note=2) - i1, t1, l1 = sess.run([inputs, targets, lens]) - i2, t2, l2 = sess.run([inputs, targets, lens]) - m = sess.run(mean) - # Check the lengths. - self.assertAllEqual([3, 2], l1) - self.assertAllEqual([6], l2) - # Check the mean. - self.assertAllEqual(pianoroll_mean, m) - # Check the targets. The targets should not be mean-centered and should - # be padded with zeros to a common length within a batch. - self.assertAllEqual([[1, 0, 0], - [0, 0, 0], - [0, 1, 0]], t1[:, 0, :]) - self.assertAllEqual([[1, 1, 0], - [0, 1, 0], - [0, 0, 0]], t1[:, 1, :]) - self.assertAllEqual([[0, 1, 0], - [1, 0, 0], - [0, 0, 0], - [1, 1, 0], - [0, 0, 0], - [0, 0, 0]], t2[:, 0, :]) - # Check the inputs. Each sequence should start with zeros on the first - # timestep. Each sequence should be padded with zeros to a common length - # within a batch. The mean should be subtracted from all timesteps except - # the first and the padding. - self.assertAllEqual([[0, 0, 0], - [1, 0, -1], - [0, 0, -1]], i1[:, 0, :]) - self.assertAllEqual([[0, 0, 0], - [1, 1, -1], - [0, 0, 0]], i1[:, 1, :]) - self.assertAllEqual([[0, 0, 0], - [0, 1, -1], - [1, 0, -1], - [0, 0, -1], - [1, 1, -1], - [0, 0, -1]], i2[:, 0, :]) - - def test_human_pose_dataset(self): - pose_data = [ - [[0, 0], [2, 2]], - [[2, 2]], - [[0, 0], [0, 0], [2, 2], [2, 2], [0, 0]], - ] - pose_data = [np.array(x, dtype=np.float64) for x in pose_data] - pose_data_mean = np.array([1, 1], dtype=np.float64) - data = { - "train": pose_data, - "train_mean": pose_data_mean, - } - path = os.path.join(tf.test.get_temp_dir(), "test_human_pose_dataset.pkl") - with open(path, "wb") as out: - pickle.dump(data, out) - with self.test_session() as sess: - inputs, targets, lens, mean = datasets.create_human_pose_dataset( - path, "train", 2, num_parallel_calls=1, shuffle=False, repeat=False) - i1, t1, l1 = sess.run([inputs, targets, lens]) - i2, t2, l2 = sess.run([inputs, targets, lens]) - m = sess.run(mean) - # Check the lengths. - self.assertAllEqual([2, 1], l1) - self.assertAllEqual([5], l2) - # Check the mean. - self.assertAllEqual(pose_data_mean, m) - # Check the targets. The targets should not be mean-centered and should - # be padded with zeros to a common length within a batch. - self.assertAllEqual([[0, 0], [2, 2]], t1[:, 0, :]) - self.assertAllEqual([[2, 2], [0, 0]], t1[:, 1, :]) - self.assertAllEqual([[0, 0], [0, 0], [2, 2], [2, 2], [0, 0]], t2[:, 0, :]) - # Check the inputs. Each sequence should start with zeros on the first - # timestep. Each sequence should be padded with zeros to a common length - # within a batch. The mean should be subtracted from all timesteps except - # the first and the padding. - self.assertAllEqual([[0, 0], [-1, -1]], i1[:, 0, :]) - self.assertAllEqual([[0, 0], [0, 0]], i1[:, 1, :]) - self.assertAllEqual([[0, 0], [-1, -1], [-1, -1], [1, 1], [1, 1]], - i2[:, 0, :]) - - def test_speech_dataset(self): - with self.test_session() as sess: - path = os.path.join( - os.path.dirname(os.path.dirname(os.path.realpath(__file__))), - "test_data", - "tiny_speech_dataset.tfrecord") - inputs, targets, lens = datasets.create_speech_dataset( - path, 3, samples_per_timestep=2, num_parallel_calls=1, - prefetch_buffer_size=3, shuffle=False, repeat=False) - inputs1, targets1, lengths1 = sess.run([inputs, targets, lens]) - inputs2, targets2, lengths2 = sess.run([inputs, targets, lens]) - # Check the lengths. - self.assertAllEqual([1, 2, 3], lengths1) - self.assertAllEqual([4], lengths2) - # Check the targets. The targets should be padded with zeros to a common - # length within a batch. - self.assertAllEqual([[[0., 1.], [0., 1.], [0., 1.]], - [[0., 0.], [2., 3.], [2., 3.]], - [[0., 0.], [0., 0.], [4., 5.]]], - targets1) - self.assertAllEqual([[[0., 1.]], - [[2., 3.]], - [[4., 5.]], - [[6., 7.]]], - targets2) - # Check the inputs. Each sequence should start with zeros on the first - # timestep. Each sequence should be padded with zeros to a common length - # within a batch. - self.assertAllEqual([[[0., 0.], [0., 0.], [0., 0.]], - [[0., 0.], [0., 1.], [0., 1.]], - [[0., 0.], [0., 0.], [2., 3.]]], - inputs1) - self.assertAllEqual([[[0., 0.]], - [[0., 1.]], - [[2., 3.]], - [[4., 5.]]], - inputs2) - - def test_chain_graph_raises_error_on_wrong_steps_per_observation(self): - with self.assertRaises(ValueError): - datasets.create_chain_graph_dataset( - batch_size=4, - num_timesteps=10, - steps_per_observation=9) - - def test_chain_graph_single_obs(self): - with self.test_session() as sess: - np.random.seed(1234) - num_observations = 1 - num_timesteps = 5 - batch_size = 2 - state_size = 1 - observations, lengths = datasets.create_chain_graph_dataset( - batch_size=batch_size, - num_timesteps=num_timesteps, - state_size=state_size) - out_observations, out_lengths = sess.run([observations, lengths]) - self.assertAllEqual([num_observations, num_observations], out_lengths) - self.assertAllClose( - [[[1.426677], [-1.789461]]], - out_observations) - - def test_chain_graph_multiple_obs(self): - with self.test_session() as sess: - np.random.seed(1234) - num_observations = 3 - num_timesteps = 6 - batch_size = 2 - state_size = 1 - observations, lengths = datasets.create_chain_graph_dataset( - batch_size=batch_size, - num_timesteps=num_timesteps, - steps_per_observation=num_timesteps/num_observations, - state_size=state_size) - out_observations, out_lengths = sess.run([observations, lengths]) - self.assertAllEqual([num_observations, num_observations], out_lengths) - self.assertAllClose( - [[[0.40051451], [1.07405114]], - [[1.73932898], [3.16880035]], - [[-1.98377144], [2.82669163]]], - out_observations) - - def test_chain_graph_state_dims(self): - with self.test_session() as sess: - np.random.seed(1234) - num_observations = 1 - num_timesteps = 5 - batch_size = 2 - state_size = 3 - observations, lengths = datasets.create_chain_graph_dataset( - batch_size=batch_size, - num_timesteps=num_timesteps, - state_size=state_size) - out_observations, out_lengths = sess.run([observations, lengths]) - self.assertAllEqual([num_observations, num_observations], out_lengths) - self.assertAllClose( - [[[1.052287, -4.560759, 3.07988], - [2.008926, 0.495567, 3.488678]]], - out_observations) - - def test_chain_graph_fixed_obs(self): - with self.test_session() as sess: - np.random.seed(1234) - num_observations = 3 - num_timesteps = 6 - batch_size = 2 - state_size = 1 - observations, lengths = datasets.create_chain_graph_dataset( - batch_size=batch_size, - num_timesteps=num_timesteps, - steps_per_observation=num_timesteps/num_observations, - state_size=state_size, - fixed_observation=4.) - out_observations, out_lengths = sess.run([observations, lengths]) - self.assertAllEqual([num_observations, num_observations], out_lengths) - self.assertAllClose( - np.ones([num_observations, batch_size, state_size]) * 4., - out_observations) - -if __name__ == "__main__": - tf.test.main() diff --git a/research/fivo/fivo/ghmm_runners.py b/research/fivo/fivo/ghmm_runners.py deleted file mode 100644 index 1f1ba6d4f..000000000 --- a/research/fivo/fivo/ghmm_runners.py +++ /dev/null @@ -1,235 +0,0 @@ -# Copyright 2018 The TensorFlow Authors All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -"""Creates and runs Gaussian HMM-related graphs.""" - -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import os - -import numpy as np -import tensorflow as tf - -from fivo import smc -from fivo import bounds -from fivo.data import datasets -from fivo.models import ghmm - - -def run_train(config): - """Runs training for a Gaussian HMM setup.""" - - def create_logging_hook(step, bound_value, likelihood, bound_gap): - """Creates a logging hook that prints the bound value periodically.""" - bound_label = config.bound + "/t" - def summary_formatter(log_dict): - string = ("Step {step}, %s: {value:.3f}, " - "likelihood: {ll:.3f}, gap: {gap:.3e}") % bound_label - return string.format(**log_dict) - logging_hook = tf.train.LoggingTensorHook( - {"step": step, "value": bound_value, - "ll": likelihood, "gap": bound_gap}, - every_n_iter=config.summarize_every, - formatter=summary_formatter) - return logging_hook - - def create_losses(model, observations, lengths): - """Creates the loss to be optimized. - - Args: - model: A Trainable GHMM model. - observations: A set of observations. - lengths: The lengths of each sequence in the observations. - Returns: - loss: A float Tensor that when differentiated yields the gradients - to apply to the model. Should be optimized via gradient descent. - bound: A float Tensor containing the value of the bound that is - being optimized. - true_ll: The true log-likelihood of the data under the model. - bound_gap: The gap between the bound and the true log-likelihood. - """ - # Compute lower bounds on the log likelihood. - if config.bound == "elbo": - ll_per_seq, _, _ = bounds.iwae( - model, observations, lengths, num_samples=1, - parallel_iterations=config.parallel_iterations - ) - elif config.bound == "iwae": - ll_per_seq, _, _ = bounds.iwae( - model, observations, lengths, num_samples=config.num_samples, - parallel_iterations=config.parallel_iterations - ) - elif config.bound == "fivo": - if config.resampling_type == "relaxed": - ll_per_seq, _, _, _ = bounds.fivo( - model, - observations, - lengths, - num_samples=config.num_samples, - resampling_criterion=smc.ess_criterion, - resampling_type=config.resampling_type, - relaxed_resampling_temperature=config. - relaxed_resampling_temperature, - random_seed=config.random_seed, - parallel_iterations=config.parallel_iterations) - else: - ll_per_seq, _, _, _ = bounds.fivo( - model, observations, lengths, - num_samples=config.num_samples, - resampling_criterion=smc.ess_criterion, - resampling_type=config.resampling_type, - random_seed=config.random_seed, - parallel_iterations=config.parallel_iterations - ) - ll_per_t = tf.reduce_mean(ll_per_seq / tf.to_float(lengths)) - # Compute the data's true likelihood under the model and the bound gap. - true_ll_per_seq = model.likelihood(tf.squeeze(observations)) - true_ll_per_t = tf.reduce_mean(true_ll_per_seq / tf.to_float(lengths)) - bound_gap = true_ll_per_seq - ll_per_seq - bound_gap = tf.reduce_mean(bound_gap/ tf.to_float(lengths)) - tf.summary.scalar("train_ll_bound", ll_per_t) - tf.summary.scalar("train_true_ll", true_ll_per_t) - tf.summary.scalar("bound_gap", bound_gap) - return -ll_per_t, ll_per_t, true_ll_per_t, bound_gap - - def create_graph(): - """Creates the training graph.""" - global_step = tf.train.get_or_create_global_step() - xs, lengths = datasets.create_chain_graph_dataset( - config.batch_size, - config.num_timesteps, - steps_per_observation=1, - state_size=1, - transition_variance=config.variance, - observation_variance=config.variance) - model = ghmm.TrainableGaussianHMM( - config.num_timesteps, - config.proposal_type, - transition_variances=config.variance, - emission_variances=config.variance, - random_seed=config.random_seed) - loss, bound, true_ll, gap = create_losses(model, xs, lengths) - opt = tf.train.AdamOptimizer(config.learning_rate) - grads = opt.compute_gradients(loss, var_list=tf.trainable_variables()) - train_op = opt.apply_gradients(grads, global_step=global_step) - return bound, true_ll, gap, train_op, global_step - - with tf.Graph().as_default(): - if config.random_seed: - tf.set_random_seed(config.random_seed) - np.random.seed(config.random_seed) - bound, true_ll, gap, train_op, global_step = create_graph() - log_hook = create_logging_hook(global_step, bound, true_ll, gap) - with tf.train.MonitoredTrainingSession( - master="", - hooks=[log_hook], - checkpoint_dir=config.logdir, - save_checkpoint_secs=120, - save_summaries_steps=config.summarize_every, - log_step_count_steps=config.summarize_every*20) as sess: - cur_step = -1 - while cur_step <= config.max_steps and not sess.should_stop(): - cur_step = sess.run(global_step) - _, cur_step = sess.run([train_op, global_step]) - - -def run_eval(config): - """Evaluates a Gaussian HMM using the given config.""" - - def create_bound(model, xs, lengths): - """Creates the bound to be evaluated.""" - if config.bound == "elbo": - ll_per_seq, log_weights, _ = bounds.iwae( - model, xs, lengths, num_samples=1, - parallel_iterations=config.parallel_iterations - ) - elif config.bound == "iwae": - ll_per_seq, log_weights, _ = bounds.iwae( - model, xs, lengths, num_samples=config.num_samples, - parallel_iterations=config.parallel_iterations - ) - elif config.bound == "fivo": - ll_per_seq, log_weights, resampled, _ = bounds.fivo( - model, xs, lengths, - num_samples=config.num_samples, - resampling_criterion=smc.ess_criterion, - resampling_type=config.resampling_type, - random_seed=config.random_seed, - parallel_iterations=config.parallel_iterations - ) - # Compute bound scaled by number of timesteps. - bound_per_t = ll_per_seq / tf.to_float(lengths) - if config.bound == "fivo": - return bound_per_t, log_weights, resampled - else: - return bound_per_t, log_weights - - def create_graph(): - """Creates the dataset, model, and bound.""" - xs, lengths = datasets.create_chain_graph_dataset( - config.batch_size, - config.num_timesteps, - steps_per_observation=1, - state_size=1, - transition_variance=config.variance, - observation_variance=config.variance) - model = ghmm.TrainableGaussianHMM( - config.num_timesteps, - config.proposal_type, - transition_variances=config.variance, - emission_variances=config.variance, - random_seed=config.random_seed) - true_likelihood = tf.reduce_mean( - model.likelihood(tf.squeeze(xs)) / tf.to_float(lengths)) - outs = [true_likelihood] - outs.extend(list(create_bound(model, xs, lengths))) - return outs - - with tf.Graph().as_default(): - if config.random_seed: - tf.set_random_seed(config.random_seed) - np.random.seed(config.random_seed) - graph_outs = create_graph() - with tf.train.SingularMonitoredSession( - checkpoint_dir=config.logdir) as sess: - outs = sess.run(graph_outs) - likelihood = outs[0] - avg_bound = np.mean(outs[1]) - std = np.std(outs[1]) - log_weights = outs[2] - log_weight_variances = np.var(log_weights, axis=2) - avg_log_weight_variance = np.var(log_weight_variances, axis=1) - avg_log_weight = np.mean(log_weights, axis=(1, 2)) - data = {"mean": avg_bound, "std": std, "log_weights": log_weights, - "log_weight_means": avg_log_weight, - "log_weight_variances": avg_log_weight_variance} - if len(outs) == 4: - data["resampled"] = outs[3] - data["avg_resampled"] = np.mean(outs[3], axis=1) - # Log some useful statistics. - tf.logging.info("Evaled bound %s with batch_size: %d, num_samples: %d." - % (config.bound, config.batch_size, config.num_samples)) - tf.logging.info("mean: %f, std: %f" % (avg_bound, std)) - tf.logging.info("true likelihood: %s" % likelihood) - tf.logging.info("avg log weight: %s" % avg_log_weight) - tf.logging.info("log weight variance: %s" % avg_log_weight_variance) - if len(outs) == 4: - tf.logging.info("avg resamples per t: %s" % data["avg_resampled"]) - if not tf.gfile.Exists(config.logdir): - tf.gfile.MakeDirs(config.logdir) - with tf.gfile.Open(os.path.join(config.logdir, "out.npz"), "w") as fout: - np.save(fout, data) diff --git a/research/fivo/fivo/ghmm_runners_test.py b/research/fivo/fivo/ghmm_runners_test.py deleted file mode 100644 index 50044ad47..000000000 --- a/research/fivo/fivo/ghmm_runners_test.py +++ /dev/null @@ -1,106 +0,0 @@ -# Copyright 2018 The TensorFlow Authors All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -"""Tests for fivo.ghmm_runners.""" - -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import os -import numpy as np -import tensorflow as tf - -from fivo import ghmm_runners - - -class GHMMRunnersTest(tf.test.TestCase): - - def default_config(self): - class Config(object): - pass - config = Config() - config.model = "ghmm" - config.bound = "fivo" - config.proposal_type = "prior" - config.batch_size = 4 - config.num_samples = 4 - config.num_timesteps = 10 - config.variance = 0.1 - config.resampling_type = "multinomial" - config.random_seed = 1234 - config.parallel_iterations = 1 - config.learning_rate = 1e-4 - config.summarize_every = 1 - config.max_steps = 1 - return config - - def test_eval_ghmm_notraining_fivo_prior(self): - self.eval_ghmm_notraining("fivo", "prior", -3.063864) - - def test_eval_ghmm_notraining_fivo_true_filtering(self): - self.eval_ghmm_notraining("fivo", "true-filtering", -1.1409812) - - def test_eval_ghmm_notraining_fivo_true_smoothing(self): - self.eval_ghmm_notraining("fivo", "true-smoothing", -0.85592091) - - def test_eval_ghmm_notraining_iwae_prior(self): - self.eval_ghmm_notraining("iwae", "prior", -5.9730167) - - def test_eval_ghmm_notraining_iwae_true_filtering(self): - self.eval_ghmm_notraining("iwae", "true-filtering", -1.1485999) - - def test_eval_ghmm_notraining_iwae_true_smoothing(self): - self.eval_ghmm_notraining("iwae", "true-smoothing", -0.85592091) - - def eval_ghmm_notraining(self, bound, proposal_type, expected_bound_avg): - config = self.default_config() - config.proposal_type = proposal_type - config.bound = bound - config.logdir = os.path.join( - tf.test.get_temp_dir(), "test-ghmm-%s-%s" % (proposal_type, bound)) - - ghmm_runners.run_eval(config) - - data = np.load(os.path.join(config.logdir, "out.npz")).item() - self.assertAlmostEqual(expected_bound_avg, data["mean"], places=3) - - def test_train_ghmm_for_one_step_and_eval_fivo_filtering(self): - self.train_ghmm_for_one_step_and_eval("fivo", "filtering", -16.727108) - - def test_train_ghmm_for_one_step_and_eval_fivo_smoothing(self): - self.train_ghmm_for_one_step_and_eval("fivo", "smoothing", -19.381277) - - def test_train_ghmm_for_one_step_and_eval_iwae_filtering(self): - self.train_ghmm_for_one_step_and_eval("iwae", "filtering", -33.31966) - - def test_train_ghmm_for_one_step_and_eval_iwae_smoothing(self): - self.train_ghmm_for_one_step_and_eval("iwae", "smoothing", -46.388447) - - def train_ghmm_for_one_step_and_eval(self, bound, proposal_type, expected_bound_avg): - config = self.default_config() - config.proposal_type = proposal_type - config.bound = bound - config.max_steps = 1 - config.logdir = os.path.join( - tf.test.get_temp_dir(), "test-ghmm-training-%s-%s" % (proposal_type, bound)) - ghmm_runners.run_train(config) - ghmm_runners.run_eval(config) - data = np.load(os.path.join(config.logdir, "out.npz")).item() - self.assertAlmostEqual(expected_bound_avg, data["mean"], places=2) - - -if __name__ == "__main__": - tf.test.main() diff --git a/research/fivo/fivo/models/__init__.py b/research/fivo/fivo/models/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/research/fivo/fivo/models/base.py b/research/fivo/fivo/models/base.py deleted file mode 100644 index 5ffcb7af2..000000000 --- a/research/fivo/fivo/models/base.py +++ /dev/null @@ -1,342 +0,0 @@ -# Copyright 2018 The TensorFlow Authors All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -"""Reusable model classes for FIVO.""" - -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import sonnet as snt -import tensorflow as tf - -from fivo import nested_utils as nested - -tfd = tf.contrib.distributions - - -class ELBOTrainableSequenceModel(object): - """An abstract class for ELBO-trainable sequence models to extend. - - Because the ELBO, IWAE, and FIVO bounds all accept the same arguments, - any model that is ELBO-trainable is also IWAE- and FIVO-trainable. - """ - - def zero_state(self, batch_size, dtype): - """Returns the initial state of the model as a Tensor or tuple of Tensors. - - Args: - batch_size: The batch size. - dtype: The datatype to use for the state. - """ - raise NotImplementedError("zero_state not yet implemented.") - - def set_observations(self, observations, seq_lengths): - """Sets the observations for the model. - - This method provides the model with all observed variables including both - inputs and targets. It will be called before running any computations with - the model that require the observations, e.g. training the model or - computing bounds, and should be used to run any necessary preprocessing - steps. - - Args: - observations: A potentially nested set of Tensors containing - all observations for the model, both inputs and targets. Typically - a set of Tensors with shape [max_seq_len, batch_size, data_size]. - seq_lengths: A [batch_size] Tensor of ints encoding the length of each - sequence in the batch (sequences can be padded to a common length). - """ - self.observations = observations - self.max_seq_len = tf.reduce_max(seq_lengths) - self.observations_ta = nested.tas_for_tensors( - observations, self.max_seq_len, clear_after_read=False) - self.seq_lengths = seq_lengths - - def propose_and_weight(self, state, t): - """Propogates model state one timestep and computes log weights. - - This method accepts the current state of the model and computes the state - for the next timestep as well as the incremental log weight of each - element in the batch. - - Args: - state: The current state of the model. - t: A scalar integer Tensor representing the current timestep. - Returns: - next_state: The state of the model after one timestep. - log_weights: A [batch_size] Tensor containing the incremental log weights. - """ - raise NotImplementedError("propose_and_weight not yet implemented.") - -DEFAULT_INITIALIZERS = {"w": tf.contrib.layers.xavier_initializer(), - "b": tf.zeros_initializer()} - - -class ConditionalNormalDistribution(object): - """A Normal distribution conditioned on Tensor inputs via a fc network.""" - - def __init__(self, size, hidden_layer_sizes, sigma_min=0.0, - raw_sigma_bias=0.25, hidden_activation_fn=tf.nn.relu, - initializers=None, name="conditional_normal_distribution"): - """Creates a conditional Normal distribution. - - Args: - size: The dimension of the random variable. - hidden_layer_sizes: The sizes of the hidden layers of the fully connected - network used to condition the distribution on the inputs. - sigma_min: The minimum standard deviation allowed, a scalar. - raw_sigma_bias: A scalar that is added to the raw standard deviation - output from the fully connected network. Set to 0.25 by default to - prevent standard deviations close to 0. - hidden_activation_fn: The activation function to use on the hidden layers - of the fully connected network. - initializers: The variable intitializers to use for the fully connected - network. The network is implemented using snt.nets.MLP so it must - be a dictionary mapping the keys 'w' and 'b' to the initializers for - the weights and biases. Defaults to xavier for the weights and zeros - for the biases when initializers is None. - name: The name of this distribution, used for sonnet scoping. - """ - self.sigma_min = sigma_min - self.raw_sigma_bias = raw_sigma_bias - self.name = name - self.size = size - if initializers is None: - initializers = DEFAULT_INITIALIZERS - self.fcnet = snt.nets.MLP( - output_sizes=hidden_layer_sizes + [2*size], - activation=hidden_activation_fn, - initializers=initializers, - activate_final=False, - use_bias=True, - name=name + "_fcnet") - - def condition(self, tensor_list, **unused_kwargs): - """Computes the parameters of a normal distribution based on the inputs.""" - inputs = tf.concat(tensor_list, axis=1) - outs = self.fcnet(inputs) - mu, sigma = tf.split(outs, 2, axis=1) - sigma = tf.maximum(tf.nn.softplus(sigma + self.raw_sigma_bias), - self.sigma_min) - return mu, sigma - - def __call__(self, *args, **kwargs): - """Creates a normal distribution conditioned on the inputs.""" - mu, sigma = self.condition(args, **kwargs) - return tf.contrib.distributions.Normal(loc=mu, scale=sigma) - - -class ConditionalBernoulliDistribution(object): - """A Bernoulli distribution conditioned on Tensor inputs via a fc net.""" - - def __init__(self, size, hidden_layer_sizes, hidden_activation_fn=tf.nn.relu, - initializers=None, bias_init=0.0, - name="conditional_bernoulli_distribution"): - """Creates a conditional Bernoulli distribution. - - Args: - size: The dimension of the random variable. - hidden_layer_sizes: The sizes of the hidden layers of the fully connected - network used to condition the distribution on the inputs. - hidden_activation_fn: The activation function to use on the hidden layers - of the fully connected network. - initializers: The variable intiializers to use for the fully connected - network. The network is implemented using snt.nets.MLP so it must - be a dictionary mapping the keys 'w' and 'b' to the initializers for - the weights and biases. Defaults to xavier for the weights and zeros - for the biases when initializers is None. - bias_init: A scalar or vector Tensor that is added to the output of the - fully-connected network that parameterizes the mean of this - distribution. - name: The name of this distribution, used for sonnet scoping. - """ - self.bias_init = bias_init - self.size = size - if initializers is None: - initializers = DEFAULT_INITIALIZERS - self.fcnet = snt.nets.MLP( - output_sizes=hidden_layer_sizes + [size], - activation=hidden_activation_fn, - initializers=initializers, - activate_final=False, - use_bias=True, - name=name + "_fcnet") - - def condition(self, tensor_list): - """Computes the p parameter of the Bernoulli distribution.""" - inputs = tf.concat(tensor_list, axis=1) - return self.fcnet(inputs) + self.bias_init - - def __call__(self, *args): - p = self.condition(args) - return tf.contrib.distributions.Bernoulli(logits=p) - - -class NormalApproximatePosterior(ConditionalNormalDistribution): - """A Normally-distributed approx. posterior with res_q parameterization.""" - - def __init__(self, size, hidden_layer_sizes, sigma_min=0.0, - raw_sigma_bias=0.25, hidden_activation_fn=tf.nn.relu, - initializers=None, smoothing=False, - name="conditional_normal_distribution"): - super(NormalApproximatePosterior, self).__init__( - size, hidden_layer_sizes, sigma_min=sigma_min, - raw_sigma_bias=raw_sigma_bias, - hidden_activation_fn=hidden_activation_fn, initializers=initializers, - name=name) - self.smoothing = smoothing - - def condition(self, tensor_list, prior_mu, smoothing_tensors=None): - """Generates the mean and variance of the normal distribution. - - Args: - tensor_list: The list of Tensors to condition on. Will be concatenated and - fed through a fully connected network. - prior_mu: The mean of the prior distribution associated with this - approximate posterior. Will be added to the mean produced by - this approximate posterior, in res_q fashion. - smoothing_tensors: A list of Tensors. If smoothing is True, these Tensors - will be concatenated with the tensors in tensor_list. - Returns: - mu: The mean of the approximate posterior. - sigma: The standard deviation of the approximate posterior. - """ - if self.smoothing: - tensor_list.extend(smoothing_tensors) - mu, sigma = super(NormalApproximatePosterior, self).condition(tensor_list) - return mu + prior_mu, sigma - - -class NonstationaryLinearDistribution(object): - """A set of loc-scale distributions that are linear functions of inputs. - - This class defines a series of location-scale distributions such that - the means are learnable linear functions of the inputs and the log variances - are learnable constants. The functions and log variances are different across - timesteps, allowing the distributions to be nonstationary. - """ - - def __init__(self, - num_timesteps, - inputs_per_timestep=None, - outputs_per_timestep=None, - initializers=None, - variance_min=0.0, - output_distribution=tfd.Normal, - dtype=tf.float32): - """Creates a NonstationaryLinearDistribution. - - Args: - num_timesteps: The number of timesteps, i.e. the number of distributions. - inputs_per_timestep: A list of python ints, the dimension of inputs to the - linear function at each timestep. If not provided, the dimension at each - timestep is assumed to be 1. - outputs_per_timestep: A list of python ints, the dimension of the output - distribution at each timestep. If not provided, the dimension at each - timestep is assumed to be 1. - initializers: A dictionary containing intializers for the variables. The - initializer under the key 'w' is used for the weights in the linear - function and the initializer under the key 'b' is used for the biases. - Defaults to xavier initialization for the weights and zeros for the - biases. - variance_min: Python float, the minimum variance of each distribution. - output_distribution: A locatin-scale subclass of tfd.Distribution that - defines the output distribution, e.g. Normal. - dtype: The dtype of the weights and biases. - """ - if not initializers: - initializers = DEFAULT_INITIALIZERS - if not inputs_per_timestep: - inputs_per_timestep = [1] * num_timesteps - if not outputs_per_timestep: - outputs_per_timestep = [1] * num_timesteps - self.num_timesteps = num_timesteps - self.variance_min = variance_min - self.initializers = initializers - self.dtype = dtype - self.output_distribution = output_distribution - - def _get_variables_ta(shapes, name, initializer, trainable=True): - """Creates a sequence of variables and stores them in a TensorArray.""" - # Infer shape if all shapes are equal. - first_shape = shapes[0] - infer_shape = all(shape == first_shape for shape in shapes) - ta = tf.TensorArray( - dtype=dtype, size=len(shapes), dynamic_size=False, - clear_after_read=False, infer_shape=infer_shape) - for t, shape in enumerate(shapes): - var = tf.get_variable( - name % t, shape=shape, initializer=initializer, trainable=trainable) - ta = ta.write(t, var) - return ta - - bias_shapes = [[num_outputs] for num_outputs in outputs_per_timestep] - self.log_variances = _get_variables_ta( - bias_shapes, "proposal_log_variance_%d", initializers["b"]) - self.mean_biases = _get_variables_ta( - bias_shapes, "proposal_b_%d", initializers["b"]) - weight_shapes = zip(inputs_per_timestep, outputs_per_timestep) - self.mean_weights = _get_variables_ta( - weight_shapes, "proposal_w_%d", initializers["w"]) - self.shapes = tf.TensorArray( - dtype=tf.int32, size=num_timesteps, - dynamic_size=False, clear_after_read=False).unstack(weight_shapes) - - def __call__(self, t, inputs): - """Computes the distribution at timestep t. - - Args: - t: Scalar integer Tensor, the current timestep. Must be in - [0, num_timesteps). - inputs: The inputs to the linear function parameterizing the mean of - the current distribution. A Tensor of shape [batch_size, num_inputs_t]. - Returns: - A tfd.Distribution subclass representing the distribution at timestep t. - """ - b = self.mean_biases.read(t) - w = self.mean_weights.read(t) - shape = self.shapes.read(t) - w = tf.reshape(w, shape) - b = tf.reshape(b, [shape[1], 1]) - log_variance = self.log_variances.read(t) - scale = tf.sqrt(tf.maximum(tf.exp(log_variance), self.variance_min)) - loc = tf.matmul(w, inputs, transpose_a=True) + b - return self.output_distribution(loc=loc, scale=scale) - - -def encode_all(inputs, encoder): - """Encodes a timeseries of inputs with a time independent encoder. - - Args: - inputs: A [time, batch, feature_dimensions] tensor. - encoder: A network that takes a [batch, features_dimensions] input and - encodes the input. - Returns: - A [time, batch, encoded_feature_dimensions] output tensor. - """ - input_shape = tf.shape(inputs) - num_timesteps, batch_size = input_shape[0], input_shape[1] - reshaped_inputs = tf.reshape(inputs, [-1, inputs.shape[-1]]) - inputs_encoded = encoder(reshaped_inputs) - inputs_encoded = tf.reshape(inputs_encoded, - [num_timesteps, batch_size, encoder.output_size]) - return inputs_encoded - - -def ta_for_tensor(x, **kwargs): - """Creates a TensorArray for the input tensor.""" - return tf.TensorArray( - x.dtype, tf.shape(x)[0], dynamic_size=False, **kwargs).unstack(x) diff --git a/research/fivo/fivo/models/ghmm.py b/research/fivo/fivo/models/ghmm.py deleted file mode 100644 index 07cf6c50e..000000000 --- a/research/fivo/fivo/models/ghmm.py +++ /dev/null @@ -1,483 +0,0 @@ -# Copyright 2018 The TensorFlow Authors All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -"""A Gaussian hidden markov model. -""" - -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import tensorflow as tf - -from fivo.models import base - -tfd = tf.contrib.distributions - - -class GaussianHMM(object): - """A hidden markov model with 1-D Gaussian latent space and observations. - - This is a hidden markov model where the state and observations are - one-dimensional Gaussians. The mean of each latent state is a linear - function of the previous latent state, and the mean of each observation - is a linear function of the current latent state. - - The description that follows is 0-indexed instead of 1-indexed to make - it easier to reason about the parameters passed to the model. - - The parameters of the model are: - T: The number timesteps, latent states, and observations. - vz_t, t=0 to T-1: The variance of the latent state at timestep t. - vx_t, t=0 to T-1: The variance of the observation at timestep t. - wz_t, t=1 to T-1: The weight that defines the latent transition at t. - wx_t, t=0 to T-1: The weight that defines the observation function at t. - - There are T vz_t, vx_t, and wx_t but only T-1 wz_t because there are only - T-1 transitions in the model. - - Given these parameters, sampling from the model is defined as - - z_0 ~ N(0, vz_0) - x_0 | z_0 ~ N(wx_0 * z_0, vx_0) - z_1 | z_0 ~ N(wz_1 * z_0, vz_1) - x_1 | z_1 ~ N(wx_1 * z_1, vx_1) - ... - z_{T-1} | z_{T-2} ~ N(wz_{T-1} * z_{T-2}, vz_{T-1}) - x_{T-1} | z_{T-1} ~ N(wx_{T-1} * z_{T-1}, vx_{T-1}). - """ - - def __init__(self, - num_timesteps, - transition_variances=1., - emission_variances=1., - transition_weights=1., - emission_weights=1., - dtype=tf.float32): - """Creates a gaussian hidden markov model. - - Args: - num_timesteps: A python int, the number of timesteps in the model. - transition_variances: The variance of p(z_t | z_t-1). Can be a scalar, - setting all variances to be the same, or a Tensor of shape - [num_timesteps]. - emission_variances: The variance of p(x_t | z_t). Can be a scalar, - setting all variances to be the same, or a Tensor of shape - [num_timesteps]. - transition_weights: The weight that defines the linear function that - produces the mean of z_t given z_{t-1}. Can be a scalar, setting - all weights to be the same, or a Tensor of shape [num_timesteps-1]. - emission_weights: The weight that defines the linear function that - produces the mean of x_t given z_t. Can be a scalar, setting - all weights to be the same, or a Tensor of shape [num_timesteps]. - dtype: The datatype of the state. - """ - self.num_timesteps = num_timesteps - self.dtype = dtype - - def _expand_param(param, size): - param = tf.convert_to_tensor(param, dtype=self.dtype) - if not param.get_shape().as_list(): - param = tf.tile(param[tf.newaxis], [size]) - - return param - - def _ta_for_param(param): - size = tf.shape(param)[0] - ta = tf.TensorArray(dtype=param.dtype, - size=size, - dynamic_size=False, - clear_after_read=False).unstack(param) - return ta - - self.transition_variances = _ta_for_param( - _expand_param(transition_variances, num_timesteps)) - self.transition_weights = _ta_for_param( - _expand_param(transition_weights, num_timesteps-1)) - em_var = _expand_param(emission_variances, num_timesteps) - self.emission_variances = _ta_for_param(em_var) - em_w = _expand_param(emission_weights, num_timesteps) - self.emission_weights = _ta_for_param(em_w) - self._compute_covariances(em_w, em_var) - - def _compute_covariances(self, emission_weights, emission_variances): - """Compute all covariance matrices. - - Computes the covaraince matrix for the latent variables, the observations, - and the covariance between the latents and observations. - - Args: - emission_weights: A Tensor of shape [num_timesteps] containing - the emission distribution weights at each timestep. - emission_variances: A Tensor of shape [num_timesteps] containing - the emiision distribution variances at each timestep. - """ - # Compute the marginal variance of each latent. - z_variances = [self.transition_variances.read(0)] - for i in range(1, self.num_timesteps): - z_variances.append( - z_variances[i-1] * tf.square(self.transition_weights.read(i-1)) + - self.transition_variances.read(i)) - # Compute the latent covariance matrix. - sigma_z = [] - for i in range(self.num_timesteps): - sigma_z_row = [] - for j in range(self.num_timesteps): - if i == j: - sigma_z_row.append(z_variances[i]) - continue - min_ind = min(i, j) - max_ind = max(i, j) - weight = tf.reduce_prod( - self.transition_weights.gather(tf.range(min_ind, max_ind))) - sigma_z_row.append(z_variances[min_ind] * weight) - sigma_z.append(tf.stack(sigma_z_row)) - self.sigma_z = tf.stack(sigma_z) - # Compute the observation covariance matrix. - x_weights_outer = tf.einsum("i,j->ij", emission_weights, emission_weights) - self.sigma_x = x_weights_outer * self.sigma_z + tf.diag(emission_variances) - # Compute the latent - observation covariance matrix. - # The first axis will index latents, the second axis will index observtions. - self.sigma_zx = emission_weights[tf.newaxis, :] * self.sigma_z - self.obs_dist = tfd.MultivariateNormalFullCovariance( - loc=tf.zeros([self.num_timesteps], dtype=tf.float32), - covariance_matrix=self.sigma_x) - - def transition(self, t, z_prev): - """Compute the transition distribution p(z_t | z_t-1). - - Args: - t: The current timestep, a scalar integer Tensor. When t=0 z_prev is - mostly ignored and the distribution p(z_0) is returned. z_prev is - 'mostly' ignored because it is still used to derive batch_size. - z_prev: A [batch_size] set of states. - Returns: - p(z_t | z_t-1) as a univariate normal distribution. - """ - batch_size = tf.shape(z_prev)[0] - scale = tf.sqrt(self.transition_variances.read(t)) - scale = tf.tile(scale[tf.newaxis], [batch_size]) - loc = tf.cond(tf.greater(t, 0), - lambda: self.transition_weights.read(t-1)*z_prev, - lambda: tf.zeros_like(scale)) - return tfd.Normal(loc=loc, scale=scale) - - def emission(self, t, z): - """Compute the emission distribution p(x_t | z_t). - - Args: - t: The current timestep, a scalar integer Tensor. - z: A [batch_size] set of the current states. - Returns: - p(x_t | z_t) as a univariate normal distribution. - """ - batch_size = tf.shape(z)[0] - scale = tf.sqrt(self.emission_variances.read(t)) - scale = tf.tile(scale[tf.newaxis], [batch_size]) - loc = self.emission_weights.read(t)*z - return tfd.Normal(loc=loc, scale=scale) - - def filtering(self, t, z_prev, x_cur): - """Computes the filtering distribution p(z_t | z_{t-1}, x_t). - - Args: - t: A python int, the index for z_t. When t is 0, z_prev is ignored, - giving p(z_0 | x_0). - z_prev: z_{t-1}, the previous z to condition on. A Tensor of shape - [batch_size]. - x_cur: x_t, the current x to condition on. A Tensor of shape [batch_size]. - Returns: - p(z_t | z_{t-1}, x_t) as a univariate normal distribution. - """ - z_prev = tf.convert_to_tensor(z_prev) - x_cur = tf.convert_to_tensor(x_cur) - batch_size = tf.shape(z_prev)[0] - z_var = self.transition_variances.read(t) - x_var = self.emission_variances.read(t) - x_weight = self.emission_weights.read(t) - prev_state_weight = x_var/(tf.square(x_weight)*z_var + x_var) - prev_state_weight *= tf.cond(tf.greater(t, 0), - lambda: self.transition_weights.read(t-1), - lambda: tf.zeros_like(prev_state_weight)) - cur_obs_weight = (x_weight*z_var)/(tf.square(x_weight)*z_var + x_var) - loc = prev_state_weight*z_prev + cur_obs_weight*x_cur - scale = tf.sqrt((z_var*x_var)/(tf.square(x_weight)*z_var + x_var)) - scale = tf.tile(scale[tf.newaxis], [batch_size]) - return tfd.Normal(loc=loc, scale=scale) - - def smoothing(self, t, z_prev, xs): - """Computes the smoothing distribution p(z_t | z_{t-1}, x_{t:num_timesteps). - - Args: - t: A python int, the index for z_t. When t is 0, z_prev is ignored, - giving p(z_0 | x_{0:num_timesteps-1}). - z_prev: z_{t-1}, the previous z to condition on. A Tensor of shape - [batch_size]. - xs: x_{t:num_timesteps}, the future xs to condition on. A Tensor of shape - [num_timesteps - t, batch_size]. - Returns: - p(z_t | z_{t-1}, x_{t:num_timesteps}) as a univariate normal distribution. - """ - xs = tf.convert_to_tensor(xs) - z_prev = tf.convert_to_tensor(z_prev) - batch_size = tf.shape(xs)[1] - mess_mean, mess_prec = tf.cond( - tf.less(t, self.num_timesteps-1), - lambda: tf.unstack(self._compute_backwards_messages(xs[1:]).read(0)), - lambda: [tf.zeros([batch_size]), tf.zeros([batch_size])]) - return self._smoothing_from_message(t, z_prev, xs[0], mess_mean, mess_prec) - - def _smoothing_from_message(self, t, z_prev, x_t, mess_mean, mess_prec): - """Computes the smoothing distribution given message incoming to z_t. - - Computes p(z_t | z_{t-1}, x_{t:num_timesteps}) given the message incoming - to the node for z_t. - - Args: - t: A python int, the index for z_t. When t is 0, z_prev is ignored. - z_prev: z_{t-1}, the previous z to condition on. A Tensor of shape - [batch_size]. - x_t: The observation x at timestep t. - mess_mean: The mean of the message incoming to z_t, in information form. - mess_prec: The precision of the message incoming to z_t. - Returns: - p(z_t | z_{t-1}, x_{t:num_timesteps}) as a univariate normal distribution. - """ - - batch_size = tf.shape(x_t)[0] - z_var = self.transition_variances.read(t) - x_var = self.emission_variances.read(t) - w_x = self.emission_weights.read(t) - - def transition_term(): - return (tf.square(self.transition_weights.read(t))/ - self.transition_variances.read(t+1)) - - prec = 1./z_var + tf.square(w_x)/x_var + mess_prec - prec += tf.cond(tf.less(t, self.num_timesteps-1), - transition_term, lambda: 0.) - mean = x_t*(w_x/x_var) + mess_mean - mean += tf.cond(tf.greater(t, 0), - lambda: z_prev*(self.transition_weights.read(t-1)/z_var), - lambda: 0.) - mean = tf.reshape(mean / prec, [batch_size]) - scale = tf.reshape(tf.sqrt(1./prec), [batch_size]) - return tfd.Normal(loc=mean, scale=scale) - - def _compute_backwards_messages(self, xs): - """Computes the backwards messages used in smoothing.""" - batch_size = tf.shape(xs)[1] - num_xs = tf.shape(xs)[0] - until_t = self.num_timesteps - num_xs - xs = tf.TensorArray(dtype=xs.dtype, - size=num_xs, - dynamic_size=False, - clear_after_read=True).unstack(xs) - messages_ta = tf.TensorArray(dtype=xs.dtype, - size=num_xs, - dynamic_size=False, - clear_after_read=False) - - def compute_message(t, prev_mean, prev_prec, messages_ta): - """Computes one step of the backwards messages.""" - z_var = self.transition_variances.read(t) - w_z = self.transition_weights.read(t-1) - x_var = self.emission_variances.read(t) - w_x = self.emission_weights.read(t) - cur_x = xs.read(t - until_t) - - # If it isn't the first message, add the terms from the transition. - def transition_term(): - return (tf.square(self.transition_weights.read(t))/ - self.transition_variances.read(t+1)) - - unary_prec = 1/z_var + tf.square(w_x)/x_var - unary_prec += tf.cond(tf.less(t, self.num_timesteps-1), - transition_term, lambda: 0.) - - unary_mean = (w_x / x_var) * cur_x - pairwise_prec = w_z / z_var - - next_prec = -tf.square(pairwise_prec)/(unary_prec + prev_prec) - next_mean = (pairwise_prec * (unary_mean + prev_mean) / - (unary_prec + prev_prec)) - next_prec = tf.reshape(next_prec, [batch_size]) - next_mean = tf.reshape(next_mean, [batch_size]) - messages_ta = messages_ta.write(t - until_t, - tf.stack([next_mean, next_prec])) - return t-1, next_mean, next_prec, messages_ta - - def pred(t, *unused_args): - return tf.greater_equal(t, until_t) - - init_prec = tf.zeros([batch_size], dtype=xs.dtype) - init_mean = tf.zeros([batch_size], dtype=xs.dtype) - t0 = tf.constant(self.num_timesteps - 1, dtype=tf.int32) - - outs = tf.while_loop(pred, compute_message, - (t0, init_mean, init_prec, messages_ta)) - messages = outs[-1] - return messages - - def lookahead(self, t, z_prev): - """Compute the 'lookahead' distribution, p(x_{t:T} | z_{t-1}). - - Args: - t: A scalar Tensor int, the current timestep. Must be at least 1. - z_prev: The latent state at time t-1. A Tensor of shape [batch_size]. - Returns: - p(x_{t:T} | z_{t-1}) as a multivariate normal distribution. - """ - z_prev = tf.convert_to_tensor(z_prev) - sigma_zx = self.sigma_zx[t-1, t:] - z_var = self.sigma_z[t-1, t-1] - mean = tf.einsum("i,j->ij", z_prev, sigma_zx) / z_var - variance = (self.sigma_x[t:, t:] - - tf.einsum("i,j->ij", sigma_zx, sigma_zx) / z_var) - return tfd.MultivariateNormalFullCovariance( - loc=mean, covariance_matrix=variance) - - def likelihood(self, xs): - """Compute the true marginal likelihood of the data. - - Args: - xs: The observations, a [num_timesteps, batch_size] float Tensor. - Returns: - likelihoods: A [batch_size] float Tensor representing the likelihood of - each sequence of observations in the batch. - """ - return self.obs_dist.log_prob(tf.transpose(xs)) - - -class TrainableGaussianHMM(GaussianHMM, base.ELBOTrainableSequenceModel): - """An interface between importance-sampling training methods and the GHMM.""" - - def __init__(self, - num_timesteps, - proposal_type, - transition_variances=1., - emission_variances=1., - transition_weights=1., - emission_weights=1., - random_seed=None, - dtype=tf.float32): - """Constructs a trainable Gaussian HMM. - - Args: - num_timesteps: A python int, the number of timesteps in the model. - proposal_type: The type of proposal to use in the importance sampling - setup. Could be "filtering", "smoothing", "prior", "true-filtering", - or "true-smoothing". If "true-filtering" or "true-smoothing" are - selected, then the true filtering or smoothing distributions are used to - propose new states. If "learned-filtering" is selected then a - distribution with learnable parameters is used. Specifically at each - timestep the proposal is Gaussian with mean that is a learnable linear - function of the previous state and current observation. The log variance - is a per-timestep learnable constant. "learned-smoothing" is similar, - but the mean is a learnable linear function of the previous state and - all future observations. Note that this proposal class includes the true - posterior. If "prior" is selected then states are proposed from the - model's prior. - transition_variances: The variance of p(z_t | z_t-1). Can be a scalar, - setting all variances to be the same, or a Tensor of shape - [num_timesteps]. - emission_variances: The variance of p(x_t | z_t). Can be a scalar, - setting all variances to be the same, or a Tensor of shape - [num_timesteps]. - transition_weights: The weight that defines the linear function that - produces the mean of z_t given z_{t-1}. Can be a scalar, setting - all weights to be the same, or a Tensor of shape [num_timesteps-1]. - emission_weights: The weight that defines the linear function that - produces the mean of x_t given z_t. Can be a scalar, setting - all weights to be the same, or a Tensor of shape [num_timesteps]. - random_seed: A seed for the proposal sampling, mainly useful for testing. - dtype: The datatype of the state. - """ - super(TrainableGaussianHMM, self).__init__( - num_timesteps, transition_variances, emission_variances, - transition_weights, emission_weights, dtype=dtype) - self.random_seed = random_seed - assert proposal_type in ["filtering", "smoothing", "prior", - "true-filtering", "true-smoothing"] - if proposal_type == "true-filtering": - self.proposal = self._filtering_proposal - elif proposal_type == "true-smoothing": - self.proposal = self._smoothing_proposal - elif proposal_type == "prior": - self.proposal = self.transition - elif proposal_type == "filtering": - self._learned_proposal_fn = base.NonstationaryLinearDistribution( - num_timesteps, inputs_per_timestep=[1] + [2] * (num_timesteps-1)) - self.proposal = self._learned_filtering_proposal - elif proposal_type == "smoothing": - inputs_per_timestep = [num_timesteps] + [num_timesteps - t - for t in range(num_timesteps-1)] - self._learned_proposal_fn = base.NonstationaryLinearDistribution( - num_timesteps, inputs_per_timestep=inputs_per_timestep) - self.proposal = self._learned_smoothing_proposal - - def set_observations(self, xs, seq_lengths): - """Sets the observations and stores the backwards messages.""" - # Squeeze out data dimension since everything is 1-d. - xs = tf.squeeze(xs) - self.batch_size = tf.shape(xs)[1] - super(TrainableGaussianHMM, self).set_observations(xs, seq_lengths) - self.messages = self._compute_backwards_messages(xs[1:]) - - def zero_state(self, batch_size, dtype): - return tf.zeros([batch_size], dtype=dtype) - - def propose_and_weight(self, state, t): - """Computes the next state and log weights for the GHMM.""" - state_shape = tf.shape(state) - xt = self.observations[t] - p_zt = self.transition(t, state) - q_zt = self.proposal(t, state) - zt = q_zt.sample(seed=self.random_seed) - zt = tf.reshape(zt, state_shape) - p_xt_given_zt = self.emission(t, zt) - log_p_zt = p_zt.log_prob(zt) - log_q_zt = q_zt.log_prob(zt) - log_p_xt_given_zt = p_xt_given_zt.log_prob(xt) - weight = log_p_zt + log_p_xt_given_zt - log_q_zt - return weight, zt - - def _filtering_proposal(self, t, state): - """Uses the stored observations to compute the filtering distribution.""" - cur_x = self.observations[t] - return self.filtering(t, state, cur_x) - - def _smoothing_proposal(self, t, state): - """Uses the stored messages to compute the smoothing distribution.""" - mess_mean, mess_prec = tf.cond( - tf.less(t, self.num_timesteps-1), - lambda: tf.unstack(self.messages.read(t)), - lambda: [tf.zeros([self.batch_size]), tf.zeros([self.batch_size])]) - return self._smoothing_from_message(t, state, self.observations[t], - mess_mean, mess_prec) - - def _learned_filtering_proposal(self, t, state): - cur_x = self.observations[t] - inputs = tf.cond(tf.greater(t, 0), - lambda: tf.stack([state, cur_x], axis=0), - lambda: cur_x[tf.newaxis, :]) - return self._learned_proposal_fn(t, inputs) - - def _learned_smoothing_proposal(self, t, state): - xs = self.observations_ta.gather(tf.range(t, self.num_timesteps)) - inputs = tf.cond(tf.greater(t, 0), - lambda: tf.concat([state[tf.newaxis, :], xs], axis=0), - lambda: xs) - return self._learned_proposal_fn(t, inputs) diff --git a/research/fivo/fivo/models/ghmm_test.py b/research/fivo/fivo/models/ghmm_test.py deleted file mode 100644 index 15a03c0c7..000000000 --- a/research/fivo/fivo/models/ghmm_test.py +++ /dev/null @@ -1,313 +0,0 @@ -# Copyright 2018 The TensorFlow Authors All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -"""Tests for fivo.models.ghmm""" - -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import numpy as np -import tensorflow as tf - -from fivo.models.ghmm import GaussianHMM -from fivo.models.ghmm import TrainableGaussianHMM - - -class GHMMTest(tf.test.TestCase): - - def test_transition_no_weights(self): - with self.test_session() as sess: - ghmm = GaussianHMM(3, - transition_variances=[1., 2., 3.]) - prev_z = tf.constant([1., 2.], dtype=tf.float32) - z0 = ghmm.transition(0, prev_z) - z1 = ghmm.transition(1, prev_z) - z2 = ghmm.transition(2, prev_z) - outs = sess.run([z0.mean(), z0.variance(), - z1.mean(), z1.variance(), - z2.mean(), z2.variance()]) - self.assertAllClose(outs, [[0., 0.], [1., 1.], - [1., 2.], [2., 2.], - [1., 2.], [3., 3.]]) - - def test_transition_with_weights(self): - with self.test_session() as sess: - ghmm = GaussianHMM(3, - transition_variances=[1., 2., 3.], - transition_weights=[2., 3.]) - prev_z = tf.constant([1., 2.], dtype=tf.float32) - z0 = ghmm.transition(0, prev_z) - z1 = ghmm.transition(1, prev_z) - z2 = ghmm.transition(2, prev_z) - outs = sess.run([z0.mean(), z0.variance(), - z1.mean(), z1.variance(), - z2.mean(), z2.variance()]) - self.assertAllClose(outs, [[0., 0.], [1., 1.], - [2., 4.], [2., 2.], - [3., 6.], [3., 3.]]) - - def test_emission_no_weights(self): - with self.test_session() as sess: - ghmm = GaussianHMM(3, emission_variances=[1., 2., 3.]) - z = tf.constant([1., 2.], dtype=tf.float32) - x0 = ghmm.emission(0, z) - x1 = ghmm.emission(1, z) - x2 = ghmm.emission(2, z) - outs = sess.run([x0.mean(), x0.variance(), - x1.mean(), x1.variance(), - x2.mean(), x2.variance()]) - self.assertAllClose(outs, [[1., 2.], [1., 1.], - [1., 2.], [2., 2.], - [1., 2.], [3., 3.]]) - - def test_emission_with_weights(self): - with self.test_session() as sess: - ghmm = GaussianHMM(3, - emission_variances=[1., 2., 3.], - emission_weights=[1., 2., 3.]) - z = tf.constant([1., 2.], dtype=tf.float32) - x0 = ghmm.emission(0, z) - x1 = ghmm.emission(1, z) - x2 = ghmm.emission(2, z) - outs = sess.run([x0.mean(), x0.variance(), - x1.mean(), x1.variance(), - x2.mean(), x2.variance()]) - self.assertAllClose(outs, [[1., 2.], [1., 1.], - [2., 4.], [2., 2.], - [3., 6.], [3., 3.]]) - - def test_filtering_no_weights(self): - with self.test_session() as sess: - ghmm = GaussianHMM(3, - transition_variances=[1., 2., 3.], - emission_variances=[4., 5., 6.]) - z_prev = tf.constant([1., 2.], dtype=tf.float32) - x_cur = tf.constant([3., 4.], dtype=tf.float32) - expected_outs = [[[3./5., 4./5.], [4./5., 4./5.]], - [[11./7., 18./7.], [10./7., 10./7.]], - [[5./3., 8./3.], [2., 2.]]] - f_post_0 = ghmm.filtering(0, z_prev, x_cur) - f_post_1 = ghmm.filtering(1, z_prev, x_cur) - f_post_2 = ghmm.filtering(2, z_prev, x_cur) - outs = sess.run([[f_post_0.mean(), f_post_0.variance()], - [f_post_1.mean(), f_post_1.variance()], - [f_post_2.mean(), f_post_2.variance()]]) - self.assertAllClose(expected_outs, outs) - - def test_filtering_with_weights(self): - with self.test_session() as sess: - ghmm = GaussianHMM(3, - transition_variances=[1., 2., 3.], - emission_variances=[4., 5., 6.], - transition_weights=[7., 8.], - emission_weights=[9., 10., 11]) - z_prev = tf.constant([1., 2.], dtype=tf.float32) - x_cur = tf.constant([3., 4.], dtype=tf.float32) - expected_outs = [[[27./85., 36./85.], [4./85., 4./85.]], - [[95./205., 150./205.], [10./205., 10./205.]], - [[147./369., 228./369.], [18./369., 18./369.]]] - f_post_0 = ghmm.filtering(0, z_prev, x_cur) - f_post_1 = ghmm.filtering(1, z_prev, x_cur) - f_post_2 = ghmm.filtering(2, z_prev, x_cur) - outs = sess.run([[f_post_0.mean(), f_post_0.variance()], - [f_post_1.mean(), f_post_1.variance()], - [f_post_2.mean(), f_post_2.variance()]]) - self.assertAllClose(expected_outs, outs) - - def test_smoothing(self): - with self.test_session() as sess: - ghmm = GaussianHMM(3, - transition_variances=[1., 2., 3.], - emission_variances=[4., 5., 6.]) - z_prev = tf.constant([1., 2.], dtype=tf.float32) - xs = tf.constant([[1., 2.], - [3., 4.], - [5., 6.]], dtype=tf.float32) - s_post1 = ghmm.smoothing(0, z_prev, xs) - outs = sess.run([s_post1.mean(), s_post1.variance()]) - expected_outs = [[281./421., 410./421.], [292./421., 292./421.]] - self.assertAllClose(expected_outs, outs) - - expected_outs = [[149./73., 222./73.], [90./73., 90./73.]] - s_post2 = ghmm.smoothing(1, z_prev, xs[1:]) - outs = sess.run([s_post2.mean(), s_post2.variance()]) - self.assertAllClose(expected_outs, outs) - - s_post3 = ghmm.smoothing(2, z_prev, xs[2:]) - outs = sess.run([s_post3.mean(), s_post3.variance()]) - expected_outs = [[7./3., 10./3.], [2., 2.]] - self.assertAllClose(expected_outs, outs) - - def test_smoothing_with_weights(self): - with self.test_session() as sess: - x_weight = np.array([4, 5, 6, 7], dtype=np.float32) - sigma_x = np.array([5, 6, 7, 8], dtype=np.float32) - z_weight = np.array([1, 2, 3], dtype=np.float32) - sigma_z = np.array([1, 2, 3, 4], dtype=np.float32) - z_prev = np.array([1, 2], dtype=np.float32) - batch_size = 2 - xs = np.array([[1, 2], [3, 4], [5, 6], [7, 8]], dtype=np.float32) - - z_cov, x_cov, z_x_cov = self._compute_covariance_matrices( - x_weight, z_weight, sigma_x, sigma_z) - - expected_outs = [] - # Compute mean and variance for z_0 when we don't condition - # on previous zs. - sigma_12 = z_x_cov[0, :] - sigma_12_22 = np.dot(sigma_12, np.linalg.inv(x_cov)) - mean = np.dot(sigma_12_22, xs) - variance = np.squeeze(z_cov[0, 0] - np.dot(sigma_12_22, sigma_12)) - expected_outs.append([mean, np.tile(variance, [batch_size])]) - - # Compute mean and variance for remaining z_ts. - for t in xrange(1, 4): - sigma_12 = np.concatenate([[z_cov[t, t - 1]], z_x_cov[t, t:]]) - sigma_22 = np.vstack(( - np.hstack((z_cov[t-1, t-1], z_x_cov[t-1, t:])), - np.hstack((np.transpose([z_x_cov[t-1, t:]]), x_cov[t:, t:])) - )) - sigma_12_22 = np.dot(sigma_12, np.linalg.inv(sigma_22)) - mean = np.dot(sigma_12_22, np.vstack((z_prev, xs[t:]))) - variance = np.squeeze(z_cov[t, t] - np.dot(sigma_12_22, sigma_12)) - expected_outs.append([mean, np.tile(variance, [batch_size])]) - - ghmm = GaussianHMM(4, - transition_variances=sigma_z, - emission_variances=sigma_x, - transition_weights=z_weight, - emission_weights=x_weight) - out_dists = [ghmm.smoothing(t, z_prev, xs[t:]) for t in range(0, 4)] - outs = [[d.mean(), d.variance()] for d in out_dists] - run_outs = sess.run(outs) - self.assertAllClose(expected_outs, run_outs) - - def test_covariance_matrices(self): - with self.test_session() as sess: - x_weight = np.array([4, 5, 6, 7], dtype=np.float32) - sigma_x = np.array([5, 6, 7, 8], dtype=np.float32) - z_weight = np.array([1, 2, 3], dtype=np.float32) - sigma_z = np.array([1, 2, 3, 4], dtype=np.float32) - - z_cov, x_cov, z_x_cov = self._compute_covariance_matrices( - x_weight, z_weight, sigma_x, sigma_z) - - ghmm = GaussianHMM(4, - transition_variances=sigma_z, - emission_variances=sigma_x, - transition_weights=z_weight, - emission_weights=x_weight) - self.assertAllClose(z_cov, sess.run(ghmm.sigma_z)) - self.assertAllClose(x_cov, sess.run(ghmm.sigma_x)) - self.assertAllClose(z_x_cov, sess.run(ghmm.sigma_zx)) - - def _compute_covariance_matrices(self, x_weight, z_weight, sigma_x, sigma_z): - # Create z covariance matrix from the definitions. - z_cov = np.zeros([4, 4]) - z_cov[0, 0] = sigma_z[0] - for i in range(1, 4): - z_cov[i, i] = (z_cov[i - 1, i - 1] * np.square(z_weight[i - 1]) + - sigma_z[i]) - for i in range(4): - for j in range(4): - if i == j: continue - min_ind = min(i, j) - max_ind = max(i, j) - weights = np.prod(z_weight[min_ind:max_ind]) - z_cov[i, j] = z_cov[min_ind, min_ind] * weights - # Compute the x covariance matrix and the z-x covariance matrix. - x_weights_outer = np.outer(x_weight, x_weight) - x_cov = x_weights_outer * z_cov + np.diag(sigma_x) - z_x_cov = x_weight * z_cov - return z_cov, x_cov, z_x_cov - - def test_lookahead(self): - x_weight = np.array([4, 5, 6, 7], dtype=np.float32) - sigma_x = np.array([5, 6, 7, 8], dtype=np.float32) - z_weight = np.array([1, 2, 3], dtype=np.float32) - sigma_z = np.array([1, 2, 3, 4], dtype=np.float32) - z_prev = np.array([1, 2], dtype=np.float32) - - with self.test_session() as sess: - z_cov, x_cov, z_x_cov = self._compute_covariance_matrices( - x_weight, z_weight, sigma_x, sigma_z) - - expected_outs = [] - for t in range(1, 4): - sigma_12 = z_x_cov[t-1, t:] - z_var = z_cov[t-1, t-1] - mean = np.outer(z_prev, sigma_12/z_var) - variance = x_cov[t:, t:] - np.outer(sigma_12, sigma_12)/ z_var - expected_outs.append([mean, variance]) - - ghmm = GaussianHMM(4, - transition_variances=sigma_z, - emission_variances=sigma_x, - transition_weights=z_weight, - emission_weights=x_weight) - out_dists = [ghmm.lookahead(t, z_prev) for t in range(1, 4)] - outs = [[d.mean(), d.covariance()] for d in out_dists] - run_outs = sess.run(outs) - self.assertAllClose(expected_outs, run_outs) - - -class TrainableGHMMTest(tf.test.TestCase): - - def test_filtering_proposal(self): - """Check that stashing the xs doesn't change the filtering distributions.""" - with self.test_session() as sess: - ghmm = TrainableGaussianHMM( - 3, "filtering", - transition_variances=[1., 2., 3.], - emission_variances=[4., 5., 6.], - transition_weights=[7., 8.], - emission_weights=[9., 10., 11]) - observations = tf.constant([[3., 4.], - [3., 4.], - [3., 4.]], dtype=tf.float32) - ghmm.set_observations(observations, [3, 3]) - z_prev = tf.constant([1., 2.], dtype=tf.float32) - - proposals = [ghmm._filtering_proposal(t, z_prev) for t in range(3)] - dist_params = [[p.mean(), p.variance()] for p in proposals] - - expected_outs = [[[27./85., 36./85.], [4./85., 4./85.]], - [[95./205., 150./205.], [10./205., 10./205.]], - [[147./369., 228./369.], [18./369., 18./369.]]] - self.assertAllClose(expected_outs, sess.run(dist_params)) - - def test_smoothing_proposal(self): - with self.test_session() as sess: - ghmm = TrainableGaussianHMM( - 3, "smoothing", - transition_variances=[1., 2., 3.], - emission_variances=[4., 5., 6.]) - xs = tf.constant([[1., 2.], - [3., 4.], - [5., 6.]], dtype=tf.float32) - ghmm.set_observations(xs, [3, 3]) - z_prev = tf.constant([1., 2.], dtype=tf.float32) - - proposals = [ghmm._smoothing_proposal(t, z_prev) for t in range(3)] - dist_params = [[p.mean(), p.variance()] for p in proposals] - - expected_outs = [[[281./421., 410./421.], [292./421., 292./421.]], - [[149./73., 222./73.], [90./73., 90./73.]], - [[7./3., 10./3.], [2., 2.]]] - self.assertAllClose(expected_outs, sess.run(dist_params)) - -if __name__ == "__main__": - tf.test.main() diff --git a/research/fivo/fivo/models/srnn.py b/research/fivo/fivo/models/srnn.py deleted file mode 100644 index cdfb560ee..000000000 --- a/research/fivo/fivo/models/srnn.py +++ /dev/null @@ -1,587 +0,0 @@ -# Copyright 2018 The TensorFlow Authors All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -"""SRNN classes.""" - -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -from collections import namedtuple -import functools - -import sonnet as snt -import tensorflow as tf - -from fivo.models import base - - -SRNNState = namedtuple("SRNNState", "rnn_state latent_encoded") - - -class SRNN(object): - """Implementation of a Stochastic Recurrent Neural Network (SRNN). - - Introduced in "Sequential Neural Models with Stochastic Layers" - by Fraccaro et al. https://arxiv.org/pdf/1605.07571.pdf. - - The SRNN is a sequence model similar to an RNN that uses stochastic latent - variables to improve its representational power. It can be thought of as a - sequential analogue to the variational auto-encoder (VAE). - - The SRNN has a deterministic RNN as its backbone, represented by the - sequence of RNN hidden states h_t. The latent state is conditioned on - the deterministic RNN states and previous latent state. Unlike the VRNN, the - the RNN state is not conditioned on the previous latent state. The latent - states have a Markov structure and it is assumed that - p(z_t | z_{1:t-1}) = p(z_t | z_{t-1}). - - In this implementation of the SRNN the latent state z_t is Gaussian. The - model's prior over z_t (also called the transition distribution) is - distributed as Normal(mu_t, diag(sigma_t^2)) where mu_t and sigma_t are the - mean and standard deviation output from a fully connected network that accepts - the rnn hidden state h_t and previous latent state z_{t-1} as input. - - The emission distribution p(x_t|z_t, h_t) is conditioned on the latent state - z_t as well as the current RNN hidden state h_t via a fully connected network. - - To increase the modeling power of the SRNN, two additional networks are - used to extract features from the data and the latent state. Those networks - are called data_encoder and latent_encoder respectively. - - For an example of how to call the SRNN's methods see sample_step. - - There are a few differences between this exposition and the paper. The main - goal was to be consistent with the VRNN code. A few components are renamed. - The backward RNN for approximating the posterior, g_phi_a in the paper, is the - rev_rnn_cell. The forward RNN that conditions the latent distribution, d in - the paper, is the rnn_cell. The paper doesn't name the NN's that serve as - feature extractors, and we name them here as the data_encoder and - latent_encoder. - """ - - def __init__(self, - rnn_cell, - data_encoder, - latent_encoder, - transition, - emission, - random_seed=None): - """Create a SRNN. - - Args: - rnn_cell: A subclass of tf.nn.rnn_cell.RNNCell that will form the - deterministic backbone of the SRNN. The inputs to the RNN will be the - the encoded input of the current timestep, a Tensor of shape - [batch_size, encoded_data_size]. - data_encoder: A callable that accepts a batch of data x_t and - 'encodes' it, e.g. runs it through a fully connected network. Must - accept as argument the inputs x_t, a Tensor of the shape - [batch_size, data_size] and return a Tensor of shape - [batch_size, encoded_data_size]. This callable will be called multiple - times in the SRNN cell so if scoping is not handled correctly then - multiple copies of the variables in this network could be made. It is - recommended to use a snt.nets.MLP module, which takes care of this for - you. - latent_encoder: A callable that accepts a latent state z_t and - 'encodes' it, e.g. runs it through a fully connected network. Must - accept as argument a Tensor of shape [batch_size, latent_size] and - return a Tensor of shape [batch_size, encoded_latent_size]. - This callable must also have the property 'output_size' defined, - returning encoded_latent_size. - transition: A callable that implements the transition distribution - p(z_t|h_t, z_t-1). Must accept as argument the previous RNN hidden state - and previous encoded latent state then return a tf.distributions.Normal - distribution conditioned on the input. - emission: A callable that implements the emission distribution - p(x_t|z_t, h_t). Must accept as arguments the encoded latent state - and the RNN hidden state and return a subclass of - tf.distributions.Distribution that can be used to evaluate the logprob - of the targets. - random_seed: The seed for the random ops. Sets the seed for sample_step. - """ - self.random_seed = random_seed - self.rnn_cell = rnn_cell - self.data_encoder = data_encoder - self.latent_encoder = latent_encoder - self.encoded_z_size = latent_encoder.output_size - self.state_size = (self.rnn_cell.state_size) - self._transition = transition - self._emission = emission - - def zero_state(self, batch_size, dtype): - """The initial state of the SRNN. - - Contains the initial state of the RNN and the inital encoded latent. - - Args: - batch_size: The batch size. - dtype: The data type of the SRNN. - Returns: - zero_state: The initial state of the SRNN. - """ - return SRNNState( - rnn_state=self.rnn_cell.zero_state(batch_size, dtype), - latent_encoded=tf.zeros( - [batch_size, self.latent_encoder.output_size], dtype=dtype)) - - def run_rnn(self, prev_rnn_state, inputs): - """Runs the deterministic RNN for one step. - - Args: - prev_rnn_state: The state of the RNN from the previous timestep. - inputs: A Tensor of shape [batch_size, data_size], the current inputs to - the model. Most often this is x_{t-1}, the previous token in the - observation sequence. - Returns: - rnn_out: The output of the RNN. - rnn_state: The new state of the RNN. - """ - rnn_inputs = self.data_encoder(tf.to_float(inputs)) - rnn_out, rnn_state = self.rnn_cell(rnn_inputs, prev_rnn_state) - return rnn_out, rnn_state - - def transition(self, rnn_out, prev_latent_encoded): - """Computes the transition distribution p(z_t|h_t, z_{t-1}). - - Note that p(z_t | h_t, z_{t-1}) = p(z_t| z_{t-1}, x_{1:t-1}) - - Args: - rnn_out: The output of the rnn for the current timestep. - prev_latent_encoded: Float Tensor of shape - [batch_size, encoded_latent_size], the previous latent state z_{t-1} - run through latent_encoder. - Returns: - p(z_t | h_t): A normal distribution with event shape - [batch_size, latent_size]. - """ - return self._transition(rnn_out, prev_latent_encoded) - - def emission(self, latent, rnn_out): - """Computes the emission distribution p(x_t | z_t, h_t). - - Note that p(x_t | z_t, h_t) = p(x_t | z_t, x_{1:t-1}) - - Args: - latent: The stochastic latent state z_t. - rnn_out: The output of the rnn for the current timestep. - Returns: - p(x_t | z_t, h_t): A distribution with event shape - [batch_size, data_size]. - latent_encoded: The latent state encoded with latent_encoder. Should be - passed to transition() on the next timestep. - """ - latent_encoded = self.latent_encoder(latent) - return self._emission(latent_encoded, rnn_out), latent_encoded - - def sample_step(self, prev_state, inputs, unused_t): - """Samples one output from the model. - - Args: - prev_state: The previous state of the model, a SRNNState containing the - previous rnn state and the previous encoded latent. - inputs: A Tensor of shape [batch_size, data_size], the current inputs to - the model. Most often this is x_{t-1}, the previous token in the - observation sequence. - unused_t: The current timestep. Not used currently. - Returns: - new_state: The next state of the model, a SRNNState. - xt: A float Tensor of shape [batch_size, data_size], an output sampled - from the emission distribution. - """ - rnn_out, rnn_state = self.run_rnn(prev_state.rnn_state, - inputs) - p_zt = self.transition(rnn_out, prev_state.latent_encoded) - zt = p_zt.sample(seed=self.random_seed) - p_xt_given_zt, latent_encoded = self.emission(zt, rnn_out) - xt = p_xt_given_zt.sample(seed=self.random_seed) - new_state = SRNNState(rnn_state=rnn_state, latent_encoded=latent_encoded) - return new_state, tf.to_float(xt) - -# pylint: disable=invalid-name -# pylint thinks this is a top-level constant. -TrainableSRNNState = namedtuple("TrainableSRNNState", - SRNNState._fields + ("rnn_out",)) -# pylint: enable=g-invalid-name - - -class TrainableSRNN(SRNN, base.ELBOTrainableSequenceModel): - """A SRNN subclass with proposals and methods for training and evaluation. - - This class adds proposals used for training with importance-sampling based - methods such as the ELBO. The model can be configured to propose from one - of three proposals: a learned filtering proposal, a learned smoothing - proposal, or the prior (i.e. the transition distribution). - - As described in the SRNN paper, the learned filtering proposal is - parameterized by a fully connected neural network that accepts as input the - current target x_t and the current rnn output h_t. The learned smoothing - proposal is also given the hidden state of an RNN run in reverse over the - inputs, so as to incorporate information about future observations. - - All learned proposals use the 'res_q' parameterization, meaning that instead - of directly producing the mean of z_t, the proposal network predicts the - 'residual' from the prior's mean. This is explored more in section 3.3 of - https://arxiv.org/pdf/1605.07571.pdf. - - During training, the latent state z_t is sampled from the proposal and the - reparameterization trick is used to provide low-variance gradients. - - Note that the SRNN paper refers to the proposals as the approximate posterior, - but we match the VRNN convention of referring to it as the encoder. - """ - - def __init__(self, - rnn_cell, - data_encoder, - latent_encoder, - transition, - emission, - proposal_type, - proposal=None, - rev_rnn_cell=None, - tilt=None, - random_seed=None): - """Create a trainable RNN. - - Args: - rnn_cell: A subclass of tf.nn.rnn_cell.RNNCell that will form the - deterministic backbone of the SRNN. The inputs to the RNN will be the - the encoded input of the current timestep, a Tensor of shape - [batch_size, encoded_data_size]. - data_encoder: A callable that accepts a batch of data x_t and - 'encodes' it, e.g. runs it through a fully connected network. Must - accept as argument the inputs x_t, a Tensor of the shape - [batch_size, data_size] and return a Tensor of shape - [batch_size, encoded_data_size]. This callable will be called multiple - times in the SRNN cell so if scoping is not handled correctly then - multiple copies of the variables in this network could be made. It is - recommended to use a snt.nets.MLP module, which takes care of this for - you. - latent_encoder: A callable that accepts a latent state z_t and - 'encodes' it, e.g. runs it through a fully connected network. Must - accept as argument a Tensor of shape [batch_size, latent_size] and - return a Tensor of shape [batch_size, encoded_latent_size]. - This callable must also have the property 'output_size' defined, - returning encoded_latent_size. - transition: A callable that implements the transition distribution - p(z_t|h_t, z_t-1). Must accept as argument the previous RNN hidden state - and previous encoded latent state then return a tf.distributions.Normal - distribution conditioned on the input. - emission: A callable that implements the emission distribution - p(x_t|z_t, h_t). Must accept as arguments the encoded latent state - and the RNN hidden state and return a subclass of - tf.distributions.Distribution that can be used to evaluate the logprob - of the targets. - proposal_type: A string indicating the type of proposal to use. Can - be either "filtering", "smoothing", or "prior". When proposal_type is - "filtering" or "smoothing", proposal must be provided. When - proposal_type is "smoothing", rev_rnn_cell must also be provided. - proposal: A callable that implements the proposal q(z_t| h_t, x_{1:T}). - If proposal_type is "filtering" then proposal must accept as arguments - the current rnn output, the encoded target of the current timestep, - and the mean of the prior. If proposal_type is "smoothing" then - in addition to the current rnn output and the mean of the prior - proposal must accept as arguments the output of the reverse rnn. - proposal should return a tf.distributions.Normal distribution - conditioned on its inputs. If proposal_type is "prior" this argument is - ignored. - rev_rnn_cell: A subclass of tf.nn.rnn_cell.RNNCell that will aggregate - forward rnn outputs in the reverse direction. The inputs to the RNN - will be the encoded reverse input of the current timestep, a Tensor of - shape [batch_size, encoded_data_size]. - tilt: A callable that implements the log of a positive tilting function - (ideally approximating log p(x_{t+1}|z_t, h_t). Must accept as arguments - the encoded latent state and the RNN hidden state and return a subclass - of tf.distributions.Distribution that can be used to evaluate the - logprob of x_{t+1}. Optionally, None and then no tilt is used. - random_seed: The seed for the random ops. Sets the seed for sample_step - and __call__. - """ - super(TrainableSRNN, self).__init__( - rnn_cell, data_encoder, latent_encoder, - transition, emission, random_seed=random_seed) - self.rev_rnn_cell = rev_rnn_cell - self._tilt = tilt - assert proposal_type in ["filtering", "smoothing", "prior"] - self._proposal = proposal - self.proposal_type = proposal_type - if proposal_type != "prior": - assert proposal, "If not proposing from the prior, must provide proposal." - if proposal_type == "smoothing": - assert rev_rnn_cell, "Must provide rev_rnn_cell for smoothing proposal." - - def zero_state(self, batch_size, dtype): - super_state = super(TrainableSRNN, self).zero_state(batch_size, dtype) - return TrainableSRNNState( - rnn_out=tf.zeros([batch_size, self.rnn_cell.output_size], dtype=dtype), - **super_state._asdict()) - - def set_observations(self, observations, seq_lengths): - """Stores the model's observations. - - Stores the observations (inputs and targets) in TensorArrays and precomputes - things for later like the reverse RNN output and encoded targets. - - Args: - observations: The observations of the model, a tuple containing two - Tensors of shape [max_seq_len, batch_size, data_size]. The Tensors - should be the inputs and targets, respectively. - seq_lengths: An int Tensor of shape [batch_size] containing the length - of each sequence in observations. - """ - inputs, targets = observations - self.seq_lengths = seq_lengths - self.max_seq_len = tf.reduce_max(seq_lengths) - self.targets_ta = base.ta_for_tensor(targets, clear_after_read=False) - targets_encoded = base.encode_all(targets, self.data_encoder) - self.targets_encoded_ta = base.ta_for_tensor(targets_encoded, - clear_after_read=False) - inputs_encoded = base.encode_all(inputs, self.data_encoder) - rnn_out, _ = tf.nn.dynamic_rnn(self.rnn_cell, - inputs_encoded, - time_major=True, - dtype=tf.float32, - scope="forward_rnn") - self.rnn_ta = base.ta_for_tensor(rnn_out, - clear_after_read=False) - if self.rev_rnn_cell: - targets_and_rnn_out = tf.concat([rnn_out, targets_encoded], 2) - reversed_targets_and_rnn_out = tf.reverse_sequence( - targets_and_rnn_out, seq_lengths, seq_axis=0, batch_axis=1) - # Compute the reverse rnn over the targets. - reverse_rnn_out, _ = tf.nn.dynamic_rnn(self.rev_rnn_cell, - reversed_targets_and_rnn_out, - time_major=True, - dtype=tf.float32, - scope="reverse_rnn") - reverse_rnn_out = tf.reverse_sequence(reverse_rnn_out, seq_lengths, - seq_axis=0, batch_axis=1) - self.reverse_rnn_ta = base.ta_for_tensor(reverse_rnn_out, - clear_after_read=False) - - def _filtering_proposal(self, rnn_out, prev_latent_encoded, prior, t): - """Computes the filtering proposal distribution.""" - return self._proposal(rnn_out, - prev_latent_encoded, - self.targets_encoded_ta.read(t), - prior_mu=prior.mean()) - - def _smoothing_proposal(self, rnn_out, prev_latent_encoded, prior, t): - """Computes the smoothing proposal distribution.""" - return self._proposal(rnn_out, - prev_latent_encoded, - smoothing_tensors=[self.reverse_rnn_ta.read(t)], - prior_mu=prior.mean()) - - def proposal(self, rnn_out, prev_latent_encoded, prior, t): - """Computes the proposal distribution specified by proposal_type. - - Args: - rnn_out: The output of the rnn for the current timestep. - prev_latent_encoded: Float Tensor of shape - [batch_size, encoded_latent_size], the previous latent state z_{t-1} - run through latent_encoder. - prior: A tf.distributions.Normal distribution representing the prior - over z_t, p(z_t | z_{1:t-1}, x_{1:t-1}). Used for 'res_q'. - t: A scalar int Tensor, the current timestep. - """ - if self.proposal_type == "filtering": - return self._filtering_proposal(rnn_out, prev_latent_encoded, prior, t) - elif self.proposal_type == "smoothing": - return self._smoothing_proposal(rnn_out, prev_latent_encoded, prior, t) - elif self.proposal_type == "prior": - return self.transition(rnn_out, prev_latent_encoded) - - def tilt(self, rnn_out, latent_encoded, targets): - r_func = self._tilt(rnn_out, latent_encoded) - return tf.reduce_sum(r_func.log_prob(targets), axis=-1) - - def propose_and_weight(self, state, t): - """Runs the model and computes importance weights for one timestep. - - Runs the model and computes importance weights, sampling from the proposal - instead of the transition/prior. - - Args: - state: The previous state of the model, a TrainableSRNNState containing - the previous rnn state, the previous rnn outs, and the previous encoded - latent. - t: A scalar integer Tensor, the current timestep. - Returns: - weights: A float Tensor of shape [batch_size]. - new_state: The new state of the model. - """ - targets = self.targets_ta.read(t) - rnn_out = self.rnn_ta.read(t) - p_zt = self.transition(rnn_out, state.latent_encoded) - q_zt = self.proposal(rnn_out, state.latent_encoded, p_zt, t) - zt = q_zt.sample(seed=self.random_seed) - p_xt_given_zt, latent_encoded = self.emission(zt, rnn_out) - log_p_xt_given_zt = tf.reduce_sum(p_xt_given_zt.log_prob(targets), axis=-1) - log_p_zt = tf.reduce_sum(p_zt.log_prob(zt), axis=-1) - log_q_zt = tf.reduce_sum(q_zt.log_prob(zt), axis=-1) - weights = log_p_zt + log_p_xt_given_zt - log_q_zt - if self._tilt: - prev_log_r = tf.cond( - tf.greater(t, 0), - lambda: self.tilt(state.rnn_out, state.latent_encoded, targets), - lambda: 0.) # On the first step, prev_log_r = 0. - log_r = tf.cond( - tf.less(t + 1, self.max_seq_len), - lambda: self.tilt(rnn_out, latent_encoded, self.targets_ta.read(t+1)), - lambda: 0.) - # On the last step, log_r = 0. - log_r *= tf.to_float(t < self.seq_lengths - 1) - weights += log_r - prev_log_r - - # This reshape is required because the TensorArray reports different shapes - # than the initial state provides (where the first dimension is unknown). - # The difference breaks the while_loop. Reshape prevents the error. - rnn_out = tf.reshape(rnn_out, tf.shape(state.rnn_out)) - - new_state = TrainableSRNNState(rnn_out=rnn_out, - rnn_state=state.rnn_state, # unmodified - latent_encoded=latent_encoded) - return weights, new_state - - -_DEFAULT_INITIALIZERS = {"w": tf.contrib.layers.xavier_initializer(), - "b": tf.zeros_initializer()} - - -def create_srnn( - data_size, - latent_size, - emission_class, - rnn_hidden_size=None, - fcnet_hidden_sizes=None, - encoded_data_size=None, - encoded_latent_size=None, - sigma_min=0.0, - raw_sigma_bias=0.25, - emission_bias_init=0.0, - use_tilt=False, - proposal_type="filtering", - initializers=None, - random_seed=None): - """A factory method for creating SRNN cells. - - Args: - data_size: The dimension of the vectors that make up the data sequences. - latent_size: The size of the stochastic latent state of the SRNN. - emission_class: The class of the emission distribution. Can be either - ConditionalNormalDistribution or ConditionalBernoulliDistribution. - rnn_hidden_size: The hidden state dimension of the RNN that forms the - deterministic part of this SRNN. If None, then it defaults - to latent_size. - fcnet_hidden_sizes: A list of python integers, the size of the hidden - layers of the fully connected networks that parameterize the conditional - distributions of the SRNN. If None, then it defaults to one hidden - layer of size latent_size. - encoded_data_size: The size of the output of the data encoding network. If - None, defaults to latent_size. - encoded_latent_size: The size of the output of the latent state encoding - network. If None, defaults to latent_size. - sigma_min: The minimum value that the standard deviation of the - distribution over the latent state can take. - raw_sigma_bias: A scalar that is added to the raw standard deviation - output from the neural networks that parameterize the prior and - approximate posterior. Useful for preventing standard deviations close - to zero. - emission_bias_init: A bias to added to the raw output of the fully - connected network that parameterizes the emission distribution. Useful - for initalizing the mean of the distribution to a sensible starting point - such as the mean of the training data. Only used with Bernoulli generative - distributions. - use_tilt: If true, create a SRNN with a tilting function. - proposal_type: The type of proposal to use. Can be "filtering", "smoothing", - or "prior". - initializers: The variable intitializers to use for the fully connected - networks and RNN cell. Must be a dictionary mapping the keys 'w' and 'b' - to the initializers for the weights and biases. Defaults to xavier for - the weights and zeros for the biases when initializers is None. - random_seed: A random seed for the SRNN resampling operations. - Returns: - model: A TrainableSRNN object. - """ - if rnn_hidden_size is None: - rnn_hidden_size = latent_size - if fcnet_hidden_sizes is None: - fcnet_hidden_sizes = [latent_size] - if encoded_data_size is None: - encoded_data_size = latent_size - if encoded_latent_size is None: - encoded_latent_size = latent_size - if initializers is None: - initializers = _DEFAULT_INITIALIZERS - data_encoder = snt.nets.MLP( - output_sizes=fcnet_hidden_sizes + [encoded_data_size], - initializers=initializers, - name="data_encoder") - latent_encoder = snt.nets.MLP( - output_sizes=fcnet_hidden_sizes + [encoded_latent_size], - initializers=initializers, - name="latent_encoder") - transition = base.ConditionalNormalDistribution( - size=latent_size, - hidden_layer_sizes=fcnet_hidden_sizes, - sigma_min=sigma_min, - raw_sigma_bias=raw_sigma_bias, - initializers=initializers, - name="prior") - # Construct the emission distribution. - if emission_class == base.ConditionalBernoulliDistribution: - # For Bernoulli distributed outputs, we initialize the bias so that the - # network generates on average the mean from the training set. - emission_dist = functools.partial(base.ConditionalBernoulliDistribution, - bias_init=emission_bias_init) - else: - emission_dist = base.ConditionalNormalDistribution - emission = emission_dist( - size=data_size, - hidden_layer_sizes=fcnet_hidden_sizes, - initializers=initializers, - name="generative") - # Construct the proposal distribution. - if proposal_type in ["filtering", "smoothing"]: - proposal = base.NormalApproximatePosterior( - size=latent_size, - hidden_layer_sizes=fcnet_hidden_sizes, - sigma_min=sigma_min, - raw_sigma_bias=raw_sigma_bias, - initializers=initializers, - smoothing=(proposal_type == "smoothing"), - name="approximate_posterior") - else: - proposal = None - - if use_tilt: - tilt = emission_dist( - size=data_size, - hidden_layer_sizes=fcnet_hidden_sizes, - initializers=initializers, - name="tilt") - else: - tilt = None - - rnn_cell = tf.nn.rnn_cell.LSTMCell(rnn_hidden_size, - initializer=initializers["w"]) - rev_rnn_cell = tf.nn.rnn_cell.LSTMCell(rnn_hidden_size, - initializer=initializers["w"]) - return TrainableSRNN( - rnn_cell, data_encoder, latent_encoder, transition, - emission, proposal_type, proposal=proposal, rev_rnn_cell=rev_rnn_cell, - tilt=tilt, random_seed=random_seed) diff --git a/research/fivo/fivo/models/srnn_test.py b/research/fivo/fivo/models/srnn_test.py deleted file mode 100644 index 39e10da13..000000000 --- a/research/fivo/fivo/models/srnn_test.py +++ /dev/null @@ -1,105 +0,0 @@ -# Copyright 2018 The TensorFlow Authors All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -"""Tests for fivo.models.srnn.""" - -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import tensorflow as tf - -from fivo.models import base -from fivo.test_utils import create_srnn - - -class SrnnTest(tf.test.TestCase): - - def test_srnn_normal_emission(self): - self.run_srnn(base.ConditionalNormalDistribution, [-5.947752, -1.182961]) - - def test_srnn_bernoulli_emission(self): - self.run_srnn(base.ConditionalBernoulliDistribution, [-2.566631, -2.479234]) - - def run_srnn(self, generative_class, gt_log_alpha): - """Tests the SRNN. - - All test values are 'golden values' derived by running the code and copying - the output. - - Args: - generative_class: The class of the generative distribution to use. - gt_log_alpha: The ground-truth value of log alpha. - """ - tf.set_random_seed(1234) - with self.test_session() as sess: - batch_size = 2 - model, inputs, targets, _ = create_srnn(generative_class=generative_class, - batch_size=batch_size, - data_lengths=(1, 1), - random_seed=1234) - zero_state = model.zero_state(batch_size=batch_size, dtype=tf.float32) - model.set_observations([inputs, targets], tf.convert_to_tensor([1, 1])) - model_out = model.propose_and_weight(zero_state, 0) - sess.run(tf.global_variables_initializer()) - log_alpha, state = sess.run(model_out) - self.assertAllClose( - state.latent_encoded, - [[0.591787, 1.310583], [-1.523136, 0.953918]]) - self.assertAllClose(state.rnn_out, - [[0.041675, -0.056038, -0.001823, 0.005224], - [0.042925, -0.044619, 0.021401, 0.016998]]) - self.assertAllClose(log_alpha, gt_log_alpha) - - def test_srnn_with_tilt_normal_emission(self): - self.run_srnn_with_tilt(base.ConditionalNormalDistribution, [-9.13577, -4.56725]) - - - def test_srnn_with_tilt_bernoulli_emission(self): - self.run_srnn_with_tilt(base.ConditionalBernoulliDistribution, [-4.617461, -5.079248]) - - def run_srnn_with_tilt(self, generative_class, gt_log_alpha): - """Tests the SRNN with a tilting function. - - All test values are 'golden values' derived by running the code and copying - the output. - - Args: - generative_class: The class of the generative distribution to use. - gt_log_alpha: The ground-truth value of log alpha. - """ - tf.set_random_seed(1234) - with self.test_session() as sess: - batch_size = 2 - model, inputs, targets, _ = create_srnn(generative_class=generative_class, - batch_size=batch_size, - data_lengths=(3, 2), - random_seed=1234, - use_tilt=True) - zero_state = model.zero_state(batch_size=batch_size, dtype=tf.float32) - model.set_observations([inputs, targets], tf.convert_to_tensor([3, 2])) - model_out = model.propose_and_weight(zero_state, 0) - sess.run(tf.global_variables_initializer()) - log_alpha, state = sess.run(model_out) - self.assertAllClose( - state.latent_encoded, - [[0.591787, 1.310583], [-1.523136, 0.953918]]) - self.assertAllClose(state.rnn_out, - [[0.041675, -0.056038, -0.001823, 0.005224], - [0.042925, -0.044619, 0.021401, 0.016998]]) - self.assertAllClose(log_alpha, gt_log_alpha) - -if __name__ == "__main__": - tf.test.main() diff --git a/research/fivo/fivo/models/vrnn.py b/research/fivo/fivo/models/vrnn.py deleted file mode 100644 index 4e2552088..000000000 --- a/research/fivo/fivo/models/vrnn.py +++ /dev/null @@ -1,572 +0,0 @@ -# Copyright 2018 The TensorFlow Authors All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -"""VRNN classes.""" - -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -from collections import namedtuple -import functools - -import sonnet as snt -import tensorflow as tf - -from fivo.models import base - - -VRNNState = namedtuple("VRNNState", "rnn_state latent_encoded") - - -class VRNN(object): - """Implementation of a Variational Recurrent Neural Network (VRNN). - - Introduced in "A Recurrent Latent Variable Model for Sequential data" - by Chung et al. https://arxiv.org/pdf/1506.02216.pdf. - - The VRNN is a sequence model similar to an RNN that uses stochastic latent - variables to improve its representational power. It can be thought of as a - sequential analogue to the variational auto-encoder (VAE). - - The VRNN has a deterministic RNN as its backbone, represented by the - sequence of RNN hidden states h_t. At each timestep, the RNN hidden state h_t - is conditioned on the previous sequence element, x_{t-1}, as well as the - latent state from the previous timestep, z_{t-1}. - - In this implementation of the VRNN the latent state z_t is Gaussian. The - model's prior over z_t (also called the transition distribution) is - distributed as Normal(mu_t, diag(sigma_t^2)) where mu_t and sigma_t are the - mean and standard deviation output from a fully connected network that accepts - the rnn hidden state h_t as input. - - The emission distribution p(x_t|z_t, h_t) is conditioned on the latent state - z_t as well as the current RNN hidden state h_t via a fully connected network. - - To increase the modeling power of the VRNN, two additional networks are - used to extract features from the data and the latent state. Those networks - are called data_encoder and latent_encoder respectively. - - For an example of how to call the VRNN's methods see sample_step. - - There are a few differences between this exposition and the paper. - First, the indexing scheme for h_t is different than the paper's -- what the - paper calls h_t we call h_{t+1}. This is the same notation used by Fraccaro - et al. to describe the VRNN in the paper linked above. Also, the VRNN paper - uses VAE terminology to refer to the different internal networks, so it - refers to the emission distribution as the decoder. This implementation also - renames the functions phi_x and phi_z in the paper to data_encoder and - latent_encoder. - """ - - def __init__(self, - rnn_cell, - data_encoder, - latent_encoder, - transition, - emission, - random_seed=None): - """Create a VRNN. - - Args: - rnn_cell: A subclass of tf.nn.rnn_cell.RNNCell that will form the - deterministic backbone of the VRNN. The inputs to the RNN will be the - encoded latent state of the previous timestep with shape - [batch_size, encoded_latent_size] as well as the encoded input of the - current timestep, a Tensor of shape [batch_size, encoded_data_size]. - data_encoder: A callable that accepts a batch of data x_t and - 'encodes' it, e.g. runs it through a fully connected network. Must - accept as argument the inputs x_t, a Tensor of the shape - [batch_size, data_size] and return a Tensor of shape - [batch_size, encoded_data_size]. This callable will be called multiple - times in the VRNN cell so if scoping is not handled correctly then - multiple copies of the variables in this network could be made. It is - recommended to use a snt.nets.MLP module, which takes care of this for - you. - latent_encoder: A callable that accepts a latent state z_t and - 'encodes' it, e.g. runs it through a fully connected network. Must - accept as argument a Tensor of shape [batch_size, latent_size] and - return a Tensor of shape [batch_size, encoded_latent_size]. - This callable must also have the property 'output_size' defined, - returning encoded_latent_size. - transition: A callable that implements the transition distribution - p(z_t|h_t). Must accept as argument the previous RNN hidden state and - return a tf.distributions.Normal distribution conditioned on the input. - emission: A callable that implements the emission distribution - p(x_t|z_t, h_t). Must accept as arguments the encoded latent state - and the RNN hidden state and return a subclass of - tf.distributions.Distribution that can be used to evaluate the logprob - of the targets. - random_seed: The seed for the random ops. Sets the seed for sample_step. - """ - self.random_seed = random_seed - self.rnn_cell = rnn_cell - self.data_encoder = data_encoder - self.latent_encoder = latent_encoder - self.encoded_z_size = latent_encoder.output_size - self.state_size = (self.rnn_cell.state_size) - self._transition = transition - self._emission = emission - - def zero_state(self, batch_size, dtype): - """The initial state of the VRNN. - - Contains the initial state of the RNN and the inital encoded latent. - - Args: - batch_size: The batch size. - dtype: The data type of the VRNN. - Returns: - zero_state: The initial state of the VRNN. - """ - return VRNNState( - rnn_state=self.rnn_cell.zero_state(batch_size, dtype), - latent_encoded=tf.zeros( - [batch_size, self.latent_encoder.output_size], dtype=dtype)) - - def run_rnn(self, prev_rnn_state, prev_latent_encoded, inputs): - """Runs the deterministic RNN for one step. - - Args: - prev_rnn_state: The state of the RNN from the previous timestep. - prev_latent_encoded: Float Tensor of shape - [batch_size, encoded_latent_size], the previous latent state z_{t-1} - run through latent_encoder. - inputs: A Tensor of shape [batch_size, data_size], the current inputs to - the model. Most often this is x_{t-1}, the previous token in the - observation sequence. - Returns: - rnn_out: The output of the RNN. - rnn_state: The new state of the RNN. - """ - inputs_encoded = self.data_encoder(tf.to_float(inputs)) - rnn_inputs = tf.concat([inputs_encoded, prev_latent_encoded], axis=1) - rnn_out, rnn_state = self.rnn_cell(rnn_inputs, prev_rnn_state) - return rnn_out, rnn_state - - def transition(self, rnn_out): - """Computes the transition distribution p(z_t|h_t). - - Note that p(z_t | h_t) = p(z_t| z_{1:t-1}, x_{1:t-1}) - - Args: - rnn_out: The output of the rnn for the current timestep. - Returns: - p(z_t | h_t): A normal distribution with event shape - [batch_size, latent_size]. - """ - return self._transition(rnn_out) - - def emission(self, latent, rnn_out): - """Computes the emission distribution p(x_t | z_t, h_t). - - Note that p(x_t | z_t, h_t) = p(x_t | z_{1:t}, x_{1:t-1}). - - Args: - latent: The stochastic latent state z_t. - rnn_out: The output of the rnn for the current timestep. - Returns: - p(x_t | z_t, h_t): A distribution with event shape - [batch_size, data_size]. - latent_encoded: The latent state encoded with latent_encoder. Should be - passed to run_rnn on the next timestep. - """ - latent_encoded = self.latent_encoder(latent) - return self._emission(latent_encoded, rnn_out), latent_encoded - - def sample_step(self, prev_state, inputs, unused_t): - """Samples one output from the model. - - Args: - prev_state: The previous state of the model, a VRNNState containing the - previous rnn state and the previous encoded latent. - inputs: A Tensor of shape [batch_size, data_size], the current inputs to - the model. Most often this is x_{t-1}, the previous token in the - observation sequence. - unused_t: The current timestep. Not used currently. - Returns: - new_state: The next state of the model, a VRNNState. - xt: A float Tensor of shape [batch_size, data_size], an output sampled - from the emission distribution. - """ - rnn_out, rnn_state = self.run_rnn(prev_state.rnn_state, - prev_state.latent_encoded, - inputs) - p_zt = self.transition(rnn_out) - zt = p_zt.sample(seed=self.random_seed) - p_xt_given_zt, latent_encoded = self.emission(zt, rnn_out) - xt = p_xt_given_zt.sample(seed=self.random_seed) - new_state = VRNNState(rnn_state=rnn_state, latent_encoded=latent_encoded) - return new_state, tf.to_float(xt) - -# pylint: disable=invalid-name -# pylint thinks this is a top-level constant. -TrainableVRNNState = namedtuple("TrainableVRNNState", - VRNNState._fields + ("rnn_out",)) -# pylint: enable=g-invalid-name - - -class TrainableVRNN(VRNN, base.ELBOTrainableSequenceModel): - """A VRNN subclass with proposals and methods for training and evaluation. - - This class adds proposals used for training with importance-sampling based - methods such as the ELBO. The model can be configured to propose from one - of three proposals: a learned filtering proposal, a learned smoothing - proposal, or the prior (i.e. the transition distribution). - - As described in the VRNN paper, the learned filtering proposal is - parameterized by a fully connected neural network that accepts as input the - current target x_t and the current rnn output h_t. The learned smoothing - proposal is also given the hidden state of an RNN run in reverse over the - inputs, so as to incorporate information about future observations. This - smoothing proposal is not described in the VRNN paper. - - All learned proposals use the 'res_q' parameterization, meaning that instead - of directly producing the mean of z_t, the proposal network predicts the - 'residual' from the prior's mean. This is explored more in section 3.3 of - https://arxiv.org/pdf/1605.07571.pdf. - - During training, the latent state z_t is sampled from the proposal and the - reparameterization trick is used to provide low-variance gradients. - - Note that the VRNN paper uses VAE terminology to refer to the different - internal networks, so the proposal is referred to as the encoder. - """ - - def __init__(self, - rnn_cell, - data_encoder, - latent_encoder, - transition, - emission, - proposal_type, - proposal=None, - rev_rnn_cell=None, - tilt=None, - random_seed=None): - """Create a trainable RNN. - - Args: - rnn_cell: A subclass of tf.nn.rnn_cell.RNNCell that will form the - deterministic backbone of the VRNN. The inputs to the RNN will be the - encoded latent state of the previous timestep with shape - [batch_size, encoded_latent_size] as well as the encoded input of the - current timestep, a Tensor of shape [batch_size, encoded_data_size]. - data_encoder: A callable that accepts a batch of data x_t and - 'encodes' it, e.g. runs it through a fully connected network. Must - accept as argument the inputs x_t, a Tensor of the shape - [batch_size, data_size] and return a Tensor of shape - [batch_size, encoded_data_size]. This callable will be called multiple - times in the VRNN cell so if scoping is not handled correctly then - multiple copies of the variables in this network could be made. It is - recommended to use a snt.nets.MLP module, which takes care of this for - you. - latent_encoder: A callable that accepts a latent state z_t and - 'encodes' it, e.g. runs it through a fully connected network. Must - accept as argument a Tensor of shape [batch_size, latent_size] and - return a Tensor of shape [batch_size, encoded_latent_size]. - This callable must also have the property 'output_size' defined, - returning encoded_latent_size. - transition: A callable that implements the transition distribution - p(z_t|h_t). Must accept as argument the previous RNN hidden state and - return a tf.distributions.Normal distribution conditioned on the input. - emission: A callable that implements the emission distribution - p(x_t|z_t, h_t). Must accept as arguments the encoded latent state - and the RNN hidden state and return a subclass of - tf.distributions.Distribution that can be used to evaluate the logprob - of the targets. - proposal_type: A string indicating the type of proposal to use. Can - be either "filtering", "smoothing", or "prior". When proposal_type is - "filtering" or "smoothing", proposal must be provided. When - proposal_type is "smoothing", rev_rnn_cell must also be provided. - proposal: A callable that implements the proposal q(z_t| h_t, x_{1:T}). - If proposal_type is "filtering" then proposal must accept as arguments - the current rnn output, the encoded target of the current timestep, - and the mean of the prior. If proposal_type is "smoothing" then - in addition to the current rnn output and the mean of the prior - proposal must accept as arguments the output of the reverse rnn. - proposal should return a tf.distributions.Normal distribution - conditioned on its inputs. If proposal_type is "prior" this argument is - ignored. - rev_rnn_cell: A subclass of tf.nn.rnn_cell.RNNCell that will aggregate - observation statistics in the reverse direction. The inputs to the RNN - will be the encoded reverse input of the current timestep, a Tensor of - shape [batch_size, encoded_data_size]. - tilt: A callable that implements the log of a positive tilting function - (ideally approximating log p(x_{t+1}|z_t, h_t). Must accept as arguments - the encoded latent state and the RNN hidden state and return a subclass - of tf.distributions.Distribution that can be used to evaluate the - logprob of x_{t+1}. Optionally, None and then no tilt is used. - random_seed: The seed for the random ops. Sets the seed for sample_step - and __call__. - """ - super(TrainableVRNN, self).__init__( - rnn_cell, data_encoder, latent_encoder, - transition, emission, random_seed=random_seed) - self.rev_rnn_cell = rev_rnn_cell - self._tilt = tilt - assert proposal_type in ["filtering", "smoothing", "prior"] - self._proposal = proposal - self.proposal_type = proposal_type - if proposal_type != "prior": - assert proposal, "If not proposing from the prior, must provide proposal." - if proposal_type == "smoothing": - assert rev_rnn_cell, "Must provide rev_rnn_cell for smoothing proposal." - - def zero_state(self, batch_size, dtype): - super_state = super(TrainableVRNN, self).zero_state(batch_size, dtype) - return TrainableVRNNState( - rnn_out=tf.zeros([batch_size, self.rnn_cell.output_size], dtype=dtype), - **super_state._asdict()) - - def set_observations(self, observations, seq_lengths): - """Stores the model's observations. - - Stores the observations (inputs and targets) in TensorArrays and precomputes - things for later like the reverse RNN output and encoded targets. - - Args: - observations: The observations of the model, a tuple containing two - Tensors of shape [max_seq_len, batch_size, data_size]. The Tensors - should be the inputs and targets, respectively. - seq_lengths: An int Tensor of shape [batch_size] containing the length - of each sequence in observations. - """ - inputs, targets = observations - self.seq_lengths = seq_lengths - self.max_seq_len = tf.reduce_max(seq_lengths) - self.inputs_ta = base.ta_for_tensor(inputs, clear_after_read=False) - self.targets_ta = base.ta_for_tensor(targets, clear_after_read=False) - targets_encoded = base.encode_all(targets, self.data_encoder) - self.targets_encoded_ta = base.ta_for_tensor(targets_encoded, - clear_after_read=False) - if self.rev_rnn_cell: - reverse_targets_encoded = tf.reverse_sequence( - targets_encoded, seq_lengths, seq_axis=0, batch_axis=1) - # Compute the reverse rnn over the targets. - reverse_rnn_out, _ = tf.nn.dynamic_rnn(self.rev_rnn_cell, - reverse_targets_encoded, - time_major=True, - dtype=tf.float32) - reverse_rnn_out = tf.reverse_sequence(reverse_rnn_out, seq_lengths, - seq_axis=0, batch_axis=1) - self.reverse_rnn_ta = base.ta_for_tensor(reverse_rnn_out, - clear_after_read=False) - - def _filtering_proposal(self, rnn_out, prior, t): - """Computes the filtering proposal distribution.""" - return self._proposal(rnn_out, - self.targets_encoded_ta.read(t), - prior_mu=prior.mean()) - - def _smoothing_proposal(self, rnn_out, prior, t): - """Computes the smoothing proposal distribution.""" - return self._proposal(rnn_out, - smoothing_tensors=[self.reverse_rnn_ta.read(t)], - prior_mu=prior.mean()) - - def proposal(self, rnn_out, prior, t): - """Computes the proposal distribution specified by proposal_type. - - Args: - rnn_out: The output of the rnn for the current timestep. - prior: A tf.distributions.Normal distribution representing the prior - over z_t, p(z_t | z_{1:t-1}, x_{1:t-1}). Used for 'res_q'. - t: A scalar int Tensor, the current timestep. - """ - if self.proposal_type == "filtering": - return self._filtering_proposal(rnn_out, prior, t) - elif self.proposal_type == "smoothing": - return self._smoothing_proposal(rnn_out, prior, t) - elif self.proposal_type == "prior": - return self.transition(rnn_out) - - def tilt(self, rnn_out, latent_encoded, targets): - r_func = self._tilt(rnn_out, latent_encoded) - return tf.reduce_sum(r_func.log_prob(targets), axis=-1) - - def propose_and_weight(self, state, t): - """Runs the model and computes importance weights for one timestep. - - Runs the model and computes importance weights, sampling from the proposal - instead of the transition/prior. - - Args: - state: The previous state of the model, a TrainableVRNNState containing - the previous rnn state, the previous rnn outs, and the previous encoded - latent. - t: A scalar integer Tensor, the current timestep. - Returns: - weights: A float Tensor of shape [batch_size]. - new_state: The new state of the model. - """ - inputs = self.inputs_ta.read(t) - targets = self.targets_ta.read(t) - rnn_out, next_rnn_state = self.run_rnn(state.rnn_state, - state.latent_encoded, - inputs) - p_zt = self.transition(rnn_out) - q_zt = self.proposal(rnn_out, p_zt, t) - zt = q_zt.sample(seed=self.random_seed) - p_xt_given_zt, latent_encoded = self.emission(zt, rnn_out) - log_p_xt_given_zt = tf.reduce_sum(p_xt_given_zt.log_prob(targets), axis=-1) - log_p_zt = tf.reduce_sum(p_zt.log_prob(zt), axis=-1) - log_q_zt = tf.reduce_sum(q_zt.log_prob(zt), axis=-1) - weights = log_p_zt + log_p_xt_given_zt - log_q_zt - if self._tilt: - prev_log_r = tf.cond( - tf.greater(t, 0), - lambda: self.tilt(state.rnn_out, state.latent_encoded, targets), - lambda: 0.) # On the first step, prev_log_r = 0. - log_r = tf.cond( - tf.less(t + 1, self.max_seq_len), - lambda: self.tilt(rnn_out, latent_encoded, self.targets_ta.read(t+1)), - lambda: 0.) - # On the last step, log_r = 0. - log_r *= tf.to_float(t < self.seq_lengths - 1) - weights += log_r - prev_log_r - new_state = TrainableVRNNState(rnn_state=next_rnn_state, - rnn_out=rnn_out, - latent_encoded=latent_encoded) - return weights, new_state - - -_DEFAULT_INITIALIZERS = {"w": tf.contrib.layers.xavier_initializer(), - "b": tf.zeros_initializer()} - - -def create_vrnn( - data_size, - latent_size, - emission_class, - rnn_hidden_size=None, - fcnet_hidden_sizes=None, - encoded_data_size=None, - encoded_latent_size=None, - sigma_min=0.0, - raw_sigma_bias=0.25, - emission_bias_init=0.0, - use_tilt=False, - proposal_type="filtering", - initializers=None, - random_seed=None): - """A factory method for creating VRNN cells. - - Args: - data_size: The dimension of the vectors that make up the data sequences. - latent_size: The size of the stochastic latent state of the VRNN. - emission_class: The class of the emission distribution. Can be either - ConditionalNormalDistribution or ConditionalBernoulliDistribution. - rnn_hidden_size: The hidden state dimension of the RNN that forms the - deterministic part of this VRNN. If None, then it defaults - to latent_size. - fcnet_hidden_sizes: A list of python integers, the size of the hidden - layers of the fully connected networks that parameterize the conditional - distributions of the VRNN. If None, then it defaults to one hidden - layer of size latent_size. - encoded_data_size: The size of the output of the data encoding network. If - None, defaults to latent_size. - encoded_latent_size: The size of the output of the latent state encoding - network. If None, defaults to latent_size. - sigma_min: The minimum value that the standard deviation of the - distribution over the latent state can take. - raw_sigma_bias: A scalar that is added to the raw standard deviation - output from the neural networks that parameterize the prior and - approximate posterior. Useful for preventing standard deviations close - to zero. - emission_bias_init: A bias to added to the raw output of the fully - connected network that parameterizes the emission distribution. Useful - for initalizing the mean of the distribution to a sensible starting point - such as the mean of the training data. Only used with Bernoulli generative - distributions. - use_tilt: If true, create a VRNN with a tilting function. - proposal_type: The type of proposal to use. Can be "filtering", "smoothing", - or "prior". - initializers: The variable intitializers to use for the fully connected - networks and RNN cell. Must be a dictionary mapping the keys 'w' and 'b' - to the initializers for the weights and biases. Defaults to xavier for - the weights and zeros for the biases when initializers is None. - random_seed: A random seed for the VRNN resampling operations. - Returns: - model: A TrainableVRNN object. - """ - if rnn_hidden_size is None: - rnn_hidden_size = latent_size - if fcnet_hidden_sizes is None: - fcnet_hidden_sizes = [latent_size] - if encoded_data_size is None: - encoded_data_size = latent_size - if encoded_latent_size is None: - encoded_latent_size = latent_size - if initializers is None: - initializers = _DEFAULT_INITIALIZERS - data_encoder = snt.nets.MLP( - output_sizes=fcnet_hidden_sizes + [encoded_data_size], - initializers=initializers, - name="data_encoder") - latent_encoder = snt.nets.MLP( - output_sizes=fcnet_hidden_sizes + [encoded_latent_size], - initializers=initializers, - name="latent_encoder") - transition = base.ConditionalNormalDistribution( - size=latent_size, - hidden_layer_sizes=fcnet_hidden_sizes, - sigma_min=sigma_min, - raw_sigma_bias=raw_sigma_bias, - initializers=initializers, - name="prior") - # Construct the emission distribution. - if emission_class == base.ConditionalBernoulliDistribution: - # For Bernoulli distributed outputs, we initialize the bias so that the - # network generates on average the mean from the training set. - emission_dist = functools.partial(base.ConditionalBernoulliDistribution, - bias_init=emission_bias_init) - else: - emission_dist = base.ConditionalNormalDistribution - emission = emission_dist( - size=data_size, - hidden_layer_sizes=fcnet_hidden_sizes, - initializers=initializers, - name="generative") - # Construct the proposal distribution. - if proposal_type in ["filtering", "smoothing"]: - proposal = base.NormalApproximatePosterior( - size=latent_size, - hidden_layer_sizes=fcnet_hidden_sizes, - sigma_min=sigma_min, - raw_sigma_bias=raw_sigma_bias, - initializers=initializers, - smoothing=(proposal_type == "smoothing"), - name="approximate_posterior") - else: - proposal = None - - if use_tilt: - tilt = emission_dist( - size=data_size, - hidden_layer_sizes=fcnet_hidden_sizes, - initializers=initializers, - name="tilt") - else: - tilt = None - - rnn_cell = tf.nn.rnn_cell.LSTMCell(rnn_hidden_size, - initializer=initializers["w"]) - rev_rnn_cell = tf.nn.rnn_cell.LSTMCell(rnn_hidden_size, - initializer=initializers["w"]) - return TrainableVRNN( - rnn_cell, data_encoder, latent_encoder, transition, - emission, proposal_type, proposal=proposal, rev_rnn_cell=rev_rnn_cell, - tilt=tilt, random_seed=random_seed) diff --git a/research/fivo/fivo/models/vrnn_test.py b/research/fivo/fivo/models/vrnn_test.py deleted file mode 100644 index 2d9bde3d5..000000000 --- a/research/fivo/fivo/models/vrnn_test.py +++ /dev/null @@ -1,137 +0,0 @@ -# Copyright 2018 The TensorFlow Authors All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -"""Tests for fivo.models.vrnn.""" - -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import numpy as np - -import tensorflow as tf - -from fivo.models import base -from fivo.test_utils import create_vrnn - - -class VrnnTest(tf.test.TestCase): - - def test_vrnn_normal_emission(self): - self.run_vrnn(base.ConditionalNormalDistribution, [-4.509767, -3.242221]) - - def test_vrnn_bernoulli_emission(self): - self.run_vrnn(base.ConditionalBernoulliDistribution, [-2.63812733, -2.02216434]), - - def run_vrnn(self, generative_class, gt_log_p_x_given_z): - """Tests the VRNN. - - All test values are 'golden values' derived by running the code and copying - the output. - - Args: - generative_class: The class of the generative distribution to use. - gt_log_p_x_given_z: The ground-truth value of log p(x|z). - """ - tf.set_random_seed(1234) - with self.test_session() as sess: - batch_size = 2 - model, inputs, targets, _ = create_vrnn(generative_class=generative_class, - batch_size=batch_size, - data_lengths=(1, 1), - random_seed=1234) - zero_state = model.zero_state(batch_size=batch_size, dtype=tf.float32) - model.set_observations([inputs, targets], tf.convert_to_tensor([1, 1])) - model_out = model.propose_and_weight(zero_state, 0) - sess.run(tf.global_variables_initializer()) - log_alpha, state = sess.run(model_out) - rnn_state, latent_state, rnn_out = state - self.assertAllClose( - rnn_state.c, - [[-0.15014534, 0.0143046, 0.00160489, -0.12899463], - [-0.25015137, 0.09377634, -0.05000039, -0.17123522]]) - self.assertAllClose( - rnn_state.h, - [[-0.06842659, 0.00760155, 0.00096106, -0.05434214], - [-0.1109542, 0.0441804, -0.03121299, -0.07882939]] - ) - self.assertAllClose( - latent_state, - [[0.025241, 0.122011, 1.066661, 0.316209, -0.25369, 0.108215, - -1.501128, -0.440111, -0.40447, -0.156649, 1.206028], - [0.066824, 0.519937, 0.610973, 0.977739, -0.121889, -0.223429, - -0.32687, -0.578763, -0.56965, 0.751886, 0.681606]] - ) - self.assertAllClose(rnn_out, [[-0.068427, 0.007602, 0.000961, -0.054342], - [-0.110954, 0.04418, -0.031213, -0.078829]]) - gt_log_q_z = [-8.0895052, -6.75819111] - gt_log_p_z = [-7.246827, -6.512877] - gt_log_alpha = (np.array(gt_log_p_z) + - np.array(gt_log_p_x_given_z) - - np.array(gt_log_q_z)) - self.assertAllClose(log_alpha, gt_log_alpha) - - def test_vrnn_with_tilt_normal_emission(self): - self.run_vrnn_with_tilt(base.ConditionalNormalDistribution, [-5.198263, -6.31686]) - - def test_vrnn_with_tilt_bernoulli_emission(self): - self.run_vrnn_with_tilt(base.ConditionalBernoulliDistribution, [-4.66985, -3.802245]) - - def run_vrnn_with_tilt(self, generative_class, gt_log_alpha): - """Tests the VRNN with a tilting function. - - All test values are 'golden values' derived by running the code and copying - the output. - - Args: - generative_class: The class of the generative distribution to use. - gt_log_alpha: The ground-truth value of log alpha. - """ - tf.set_random_seed(1234) - with self.test_session() as sess: - batch_size = 2 - model, inputs, targets, _ = create_vrnn(generative_class=generative_class, - batch_size=batch_size, - data_lengths=(3, 2), - random_seed=1234, - use_tilt=True) - zero_state = model.zero_state(batch_size=batch_size, dtype=tf.float32) - model.set_observations([inputs, targets], tf.convert_to_tensor([3, 2])) - model_out = model.propose_and_weight(zero_state, 0) - sess.run(tf.global_variables_initializer()) - log_alpha, state = sess.run(model_out) - rnn_state, latent_state, rnn_out = state - self.assertAllClose( - rnn_state.c, - [[-0.15014534, 0.0143046, 0.00160489, -0.12899463], - [-0.25015137, 0.09377634, -0.05000039, -0.17123522]]) - self.assertAllClose( - rnn_state.h, - [[-0.06842659, 0.00760155, 0.00096106, -0.05434214], - [-0.1109542, 0.0441804, -0.03121299, -0.07882939]] - ) - self.assertAllClose( - latent_state, - [[0.025241, 0.122011, 1.066661, 0.316209, -0.25369, 0.108215, - -1.501128, -0.440111, -0.40447, -0.156649, 1.206028], - [0.066824, 0.519937, 0.610973, 0.977739, -0.121889, -0.223429, - -0.32687, -0.578763, -0.56965, 0.751886, 0.681606]] - ) - self.assertAllClose(rnn_out, [[-0.068427, 0.007602, 0.000961, -0.054342], - [-0.110954, 0.04418, -0.031213, -0.078829]]) - self.assertAllClose(log_alpha, gt_log_alpha) - -if __name__ == "__main__": - tf.test.main() diff --git a/research/fivo/fivo/nested_utils.py b/research/fivo/fivo/nested_utils.py deleted file mode 100644 index ef956a80c..000000000 --- a/research/fivo/fivo/nested_utils.py +++ /dev/null @@ -1,139 +0,0 @@ -# Copyright 2018 The TensorFlow Authors All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -"""A set of utils for dealing with nested lists and tuples of Tensors.""" - -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import itertools -import tensorflow as tf - -from tensorflow.python.util import nest - - -def map_nested(map_fn, nested): - """Executes map_fn on every element in a (potentially) nested structure. - - Args: - map_fn: A callable to execute on each element in 'nested'. - nested: A potentially nested combination of sequence objects. Sequence - objects include tuples, lists, namedtuples, and all subclasses of - collections.Sequence except strings. See nest.is_sequence for details. - For example [1, ('hello', 4.3)] is a nested structure containing elements - 1, 'hello', and 4.3. - Returns: - out_structure: A potentially nested combination of sequence objects with the - same structure as the 'nested' input argument. out_structure - contains the result of applying map_fn to each element in 'nested'. For - example map_nested(lambda x: x+1, [1, (3, 4.3)]) returns [2, (4, 5.3)]. - """ - out = map(map_fn, nest.flatten(nested)) - return nest.pack_sequence_as(nested, out) - - -def tile_tensors(tensors, multiples): - """Tiles a set of Tensors. - - Args: - tensors: A potentially nested tuple or list of Tensors with rank - greater than or equal to the length of 'multiples'. The Tensors do not - need to have the same rank, but their rank must not be dynamic. - multiples: A python list of ints indicating how to tile each Tensor - in 'tensors'. Similar to the 'multiples' argument to tf.tile. - Returns: - tiled_tensors: A potentially nested tuple or list of Tensors with the same - structure as the 'tensors' input argument. Contains the result of - applying tf.tile to each Tensor in 'tensors'. When the rank of a Tensor - in 'tensors' is greater than the length of multiples, multiples is padded - at the end with 1s. For example when tiling a 4-dimensional Tensor with - multiples [3, 4], multiples would be padded to [3, 4, 1, 1] before tiling. - """ - def tile_fn(x): - return tf.tile(x, multiples + [1] * (x.shape.ndims - len(multiples))) - - return map_nested(tile_fn, tensors) - - -def where_tensors(condition, x_tensors, y_tensors): - """Performs a tf.where operation on a two sets of Tensors. - - Args: - condition: The condition tensor to use for the where operation. - x_tensors: A potentially nested tuple or list of Tensors. - y_tensors: A potentially nested tuple or list of Tensors. Must have the - same structure as x_tensors. - Returns: - whered_tensors: A potentially nested tuple or list of Tensors with the - same structure as the 'tensors' input argument. Contains the result of - applying tf.where(condition, x, y) on each pair of elements in x_tensors - and y_tensors. - """ - flat_x = nest.flatten(x_tensors) - flat_y = nest.flatten(y_tensors) - result = [tf.where(condition, x, y) for x, y in - itertools.izip(flat_x, flat_y)] - - return nest.pack_sequence_as(x_tensors, result) - - -def gather_tensors(tensors, indices): - """Performs a tf.gather operation on a set of Tensors. - - Args: - tensors: A potentially nested tuple or list of Tensors. - indices: The indices to use for the gather operation. - Returns: - gathered_tensors: A potentially nested tuple or list of Tensors with the - same structure as the 'tensors' input argument. Contains the result of - applying tf.gather(x, indices) on each element x in 'tensors'. - """ - return map_nested(lambda x: tf.gather(x, indices), tensors) - - -def tas_for_tensors(tensors, length, **kwargs): - """Unstacks a set of Tensors into TensorArrays. - - Args: - tensors: A potentially nested tuple or list of Tensors with length in the - first dimension greater than or equal to the 'length' input argument. - length: The desired length of the TensorArrays. - **kwargs: Keyword args for TensorArray constructor. - Returns: - tensorarrays: A potentially nested tuple or list of TensorArrays with the - same structure as 'tensors'. Contains the result of unstacking each Tensor - in 'tensors'. - """ - def map_fn(x): - ta = tf.TensorArray(x.dtype, length, - name=x.name.split(':')[0] + '_ta', **kwargs) - return ta.unstack(x[:length, :]) - return map_nested(map_fn, tensors) - - -def read_tas(tas, index): - """Performs a read operation on a set of TensorArrays. - - Args: - tas: A potentially nested tuple or list of TensorArrays with length greater - than 'index'. - index: The location to read from. - Returns: - read_tensors: A potentially nested tuple or list of Tensors with the same - structure as the 'tas' input argument. Contains the result of - performing a read operation at 'index' on each TensorArray in 'tas'. - """ - return map_nested(lambda ta: ta.read(index), tas) diff --git a/research/fivo/fivo/nested_utils_test.py b/research/fivo/fivo/nested_utils_test.py deleted file mode 100644 index 87991dd79..000000000 --- a/research/fivo/fivo/nested_utils_test.py +++ /dev/null @@ -1,125 +0,0 @@ -# Copyright 2018 The TensorFlow Authors All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -"""Tests for fivo.nested_utils.""" - -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import collections -import tensorflow as tf -nest = tf.contrib.framework.nest - -from fivo import nested_utils - -# An example namedtuple for use in the following tests. -ExampleTuple = collections.namedtuple('ExampleTuple', ['a', 'b']) - - -class NestedUtilsTest(tf.test.TestCase): - - def test_map_nested_works_on_nested_structures(self): - """Check that map_nested works with nested structures.""" - original = [1, (2, 3.2, (4., ExampleTuple(5, 6)))] - expected = [2, (3, 4.2, (5., ExampleTuple(6, 7)))] - out = nested_utils.map_nested(lambda x: x+1, original) - self.assertEqual(expected, out) - - def test_map_nested_works_on_single_objects(self): - """Check that map_nested works with raw objects.""" - original = 1 - expected = 2 - out = nested_utils.map_nested(lambda x: x+1, original) - self.assertEqual(expected, out) - - def test_map_nested_works_on_flat_lists(self): - """Check that map_nested works with a flat list.""" - original = [1, 2, 3] - expected = [2, 3, 4] - out = nested_utils.map_nested(lambda x: x+1, original) - self.assertEqual(expected, out) - - def test_tile_tensors(self): - """Checks that tile_tensors correctly tiles tensors of different ranks.""" - a = tf.range(20) - b = tf.reshape(a, [2, 10]) - c = tf.reshape(a, [2, 2, 5]) - a_tiled = tf.tile(a, [3]) - b_tiled = tf.tile(b, [3, 1]) - c_tiled = tf.tile(c, [3, 1, 1]) - tensors = [a, (b, ExampleTuple(c, c))] - expected_tensors = [a_tiled, (b_tiled, ExampleTuple(c_tiled, c_tiled))] - tiled = nested_utils.tile_tensors(tensors, [3]) - nest.assert_same_structure(expected_tensors, tiled) - with self.test_session() as sess: - expected, out = sess.run([expected_tensors, tiled]) - expected = nest.flatten(expected) - out = nest.flatten(out) - # Check that the tiling is correct. - for x, y in zip(expected, out): - self.assertAllClose(x, y) - - def test_gather_tensors(self): - a = tf.reshape(tf.range(20), [5, 4]) - inds = [0, 0, 1, 4] - a_gathered = tf.gather(a, inds) - tensors = [a, (a, ExampleTuple(a, a))] - gt_gathered = [a_gathered, (a_gathered, - ExampleTuple(a_gathered, a_gathered))] - gathered = nested_utils.gather_tensors(tensors, inds) - nest.assert_same_structure(gt_gathered, gathered) - with self.test_session() as sess: - gt, out = sess.run([gt_gathered, gathered]) - gt = nest.flatten(gt) - out = nest.flatten(out) - # Check that the gathering is correct. - for x, y in zip(gt, out): - self.assertAllClose(x, y) - - def test_tas_for_tensors(self): - a = tf.reshape(tf.range(20), [5, 4]) - tensors = [a, (a, ExampleTuple(a, a))] - tas = nested_utils.tas_for_tensors(tensors, 5) - nest.assert_same_structure(tensors, tas) - # We can't pass TensorArrays to sess.run so instead we turn then back into - # tensors to check that they were created correctly. - stacked = nested_utils.map_nested(lambda x: x.stack(), tas) - with self.test_session() as sess: - gt, out = sess.run([tensors, stacked]) - gt = nest.flatten(gt) - out = nest.flatten(out) - # Check that the tas were created correctly. - for x, y in zip(gt, out): - self.assertAllClose(x, y) - - def test_read_tas(self): - a = tf.reshape(tf.range(20), [5, 4]) - a_read = a[3, :] - tensors = [a, (a, ExampleTuple(a, a))] - gt_read = [a_read, (a_read, ExampleTuple(a_read, a_read))] - tas = nested_utils.tas_for_tensors(tensors, 5) - tas_read = nested_utils.read_tas(tas, 3) - nest.assert_same_structure(tas, tas_read) - with self.test_session() as sess: - gt, out = sess.run([gt_read, tas_read]) - gt = nest.flatten(gt) - out = nest.flatten(out) - # Check that the tas were read correctly. - for x, y in zip(gt, out): - self.assertAllClose(x, y) - -if __name__ == '__main__': - tf.test.main() diff --git a/research/fivo/fivo/runners.py b/research/fivo/fivo/runners.py deleted file mode 100644 index ec6fb91bf..000000000 --- a/research/fivo/fivo/runners.py +++ /dev/null @@ -1,489 +0,0 @@ -# Copyright 2018 The TensorFlow Authors All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -"""High-level code for creating and running FIVO-related Tensorflow graphs. -""" - -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - - -import os -import time - -import numpy as np -import tensorflow as tf - -from fivo import bounds -from fivo import smc - -from fivo.data import datasets -from fivo.models import base -from fivo.models import srnn -from fivo.models import vrnn - - -def create_dataset_and_model(config, split, shuffle, repeat): - """Creates the dataset and model for a given config. - - Args: - config: A configuration object with config values accessible as properties. - Most likely a FLAGS object. This function expects the properties - batch_size, dataset_path, dataset_type, and latent_size to be defined. - split: The dataset split to load. - shuffle: If true, shuffle the dataset randomly. - repeat: If true, repeat the dataset endlessly. - Returns: - inputs: A batch of input sequences represented as a dense Tensor of shape - [time, batch_size, data_dimension]. - targets: A batch of target sequences represented as a dense Tensor of - shape [time, batch_size, data_dimension]. - lens: An int Tensor of shape [batch_size] representing the lengths of each - sequence in the batch. - model: A vrnn.VRNNCell model object. - Raises: - ValueError: if the config is invalid. - """ - sigma_min = 0.0 - if config.dataset_type == "pianoroll": - inputs, targets, lengths, mean = datasets.create_pianoroll_dataset( - config.dataset_path, split, config.batch_size, shuffle=shuffle, - repeat=repeat) - # Convert the mean of the training set to logit space so it can be used to - # initialize the bias of the generative distribution. - emission_bias_init = -tf.log( - 1. / tf.clip_by_value(mean, 0.0001, 0.9999) - 1) - emission_distribution_class = base.ConditionalBernoulliDistribution - elif config.dataset_type == "speech": - inputs, targets, lengths = datasets.create_speech_dataset( - config.dataset_path, config.batch_size, - samples_per_timestep=config.data_dimension, prefetch_buffer_size=1, - shuffle=False, repeat=False) - # There is no bias for the generative distribution because the test set - # is assumed to be already standardized with the training set statistics. - mean = None - emission_bias_init = None - emission_distribution_class = base.ConditionalNormalDistribution - if config.model == "vrnn": - model = vrnn.create_vrnn(inputs.get_shape().as_list()[2], - config.latent_size, - emission_distribution_class, - emission_bias_init=emission_bias_init, - proposal_type=config.proposal_type, - sigma_min=sigma_min, - raw_sigma_bias=0.5, - use_tilt=(config.bound == "fivo-aux")) - elif config.model == "srnn": - model = srnn.create_srnn(inputs.get_shape().as_list()[2], - config.latent_size, - emission_distribution_class, - emission_bias_init=emission_bias_init, - proposal_type=config.proposal_type, - sigma_min=sigma_min, - raw_sigma_bias=0.5, - use_tilt=(config.bound == "fivo-aux")) - else: - raise ValueError("model flag: %s is unrecognized" % config.model) - return inputs, targets, lengths, model, mean - - -def restore_checkpoint_if_exists(saver, sess, logdir): - """Looks for a checkpoint and restores the session from it if found. - - Args: - saver: A tf.train.Saver for restoring the session. - sess: A TensorFlow session. - logdir: The directory to look for checkpoints in. - Returns: - True if a checkpoint was found and restored, False otherwise. - """ - checkpoint = tf.train.get_checkpoint_state(logdir) - if checkpoint: - checkpoint_name = os.path.basename(checkpoint.model_checkpoint_path) - full_checkpoint_path = os.path.join(logdir, checkpoint_name) - saver.restore(sess, full_checkpoint_path) - return True - return False - - -def wait_for_checkpoint(saver, sess, logdir): - """Loops until the session is restored from a checkpoint in logdir. - - Args: - saver: A tf.train.Saver for restoring the session. - sess: A TensorFlow session. - logdir: The directory to look for checkpoints in. - """ - while not restore_checkpoint_if_exists(saver, sess, logdir): - tf.logging.info("Checkpoint not found in %s, sleeping for 60 seconds." - % logdir) - time.sleep(60) - - -def run_train(config, create_dataset_and_model_fn=create_dataset_and_model): - """Runs training for a sequential latent variable model. - - Args: - config: A configuration object with config values accessible as properties. - Most likely a FLAGS object. For a list of expected properties and their - meaning see the flags defined in fivo.py. - create_dataset_and_model_fn: If present, calls this function to create a - dataset and model instead of create_dataset_and_model() above. The - signature must be the same. - """ - - def create_logging_hook(step, bound_value): - """Creates a logging hook that prints the bound value periodically.""" - bound_label = config.bound + " bound" - if config.normalize_by_seq_len: - bound_label += " per timestep" - else: - bound_label += " per sequence" - def summary_formatter(log_dict): - return "Step %d, %s: %f" % ( - log_dict["step"], bound_label, log_dict["bound_value"]) - logging_hook = tf.train.LoggingTensorHook( - {"step": step, "bound_value": bound_value}, - every_n_iter=config.summarize_every, - formatter=summary_formatter) - return logging_hook - - def create_loss(): - """Creates the loss to be optimized. - - Returns: - bound: A float Tensor containing the value of the bound that is - being optimized. - loss: A float Tensor that when differentiated yields the gradients - to apply to the model. Should be optimized via gradient descent. - """ - inputs, targets, lengths, model, _ = create_dataset_and_model_fn( - config, split="train", shuffle=True, repeat=True) - # Compute lower bounds on the log likelihood. - if config.bound == "elbo": - ll_per_seq, _, _ = bounds.iwae( - model, (inputs, targets), lengths, num_samples=1, - parallel_iterations=config.parallel_iterations - ) - elif config.bound == "iwae": - ll_per_seq, _, _ = bounds.iwae( - model, (inputs, targets), lengths, num_samples=config.num_samples, - parallel_iterations=config.parallel_iterations - ) - elif config.bound in ("fivo", "fivo-aux"): - if config.resampling_type == "relaxed": - ll_per_seq, _, _, _ = bounds.fivo( - model, (inputs, targets), - lengths, - num_samples=config.num_samples, - resampling_criterion=smc.ess_criterion, - resampling_type=config.resampling_type, - random_seed=config.random_seed, - relaxed_resampling_temperature=config. - relaxed_resampling_temperature, - parallel_iterations=config.parallel_iterations - ) - else: - ll_per_seq, _, _, _ = bounds.fivo( - model, (inputs, targets), lengths, num_samples=config.num_samples, - resampling_criterion=smc.ess_criterion, - resampling_type=config.resampling_type, - random_seed=config.random_seed, - parallel_iterations=config.parallel_iterations - ) - # Compute loss scaled by number of timesteps. - ll_per_t = tf.reduce_mean(ll_per_seq / tf.to_float(lengths)) - ll_per_seq = tf.reduce_mean(ll_per_seq) - - tf.summary.scalar("train_ll_per_seq", ll_per_seq) - tf.summary.scalar("train_ll_per_t", ll_per_t) - - if config.normalize_by_seq_len: - return ll_per_t, -ll_per_t - else: - return ll_per_seq, -ll_per_seq - - def create_graph(): - """Creates the training graph.""" - global_step = tf.train.get_or_create_global_step() - bound, loss = create_loss() - opt = tf.train.AdamOptimizer(config.learning_rate) - grads = opt.compute_gradients(loss, var_list=tf.trainable_variables()) - train_op = opt.apply_gradients(grads, global_step=global_step) - return bound, train_op, global_step - - device = tf.train.replica_device_setter(ps_tasks=config.ps_tasks) - with tf.Graph().as_default(): - if config.random_seed: tf.set_random_seed(config.random_seed) - with tf.device(device): - bound, train_op, global_step = create_graph() - log_hook = create_logging_hook(global_step, bound) - start_training = not config.stagger_workers - with tf.train.MonitoredTrainingSession( - master=config.master, - is_chief=config.task == 0, - hooks=[log_hook], - checkpoint_dir=config.logdir, - save_checkpoint_secs=120, - save_summaries_steps=config.summarize_every, - log_step_count_steps=config.summarize_every) as sess: - cur_step = -1 - while not sess.should_stop() and cur_step <= config.max_steps: - if config.task > 0 and not start_training: - cur_step = sess.run(global_step) - tf.logging.info("task %d not active yet, sleeping at step %d" % - (config.task, cur_step)) - time.sleep(30) - if cur_step >= config.task * 1000: - start_training = True - else: - _, cur_step = sess.run([train_op, global_step]) - - -def run_eval(config, create_dataset_and_model_fn=create_dataset_and_model): - """Runs evaluation for a sequential latent variable model. - - This method runs only one evaluation over the dataset, writes summaries to - disk, and then terminates. It does not loop indefinitely. - - Args: - config: A configuration object with config values accessible as properties. - Most likely a FLAGS object. For a list of expected properties and their - meaning see the flags defined in fivo.py. - create_dataset_and_model_fn: If present, calls this function to create a - dataset and model instead of create_dataset_and_model() above. The - signature must be the same. - """ - - def create_graph(): - """Creates the evaluation graph. - - Returns: - lower_bounds: A tuple of float Tensors containing the values of the 3 - evidence lower bounds, summed across the batch. - total_batch_length: The total number of timesteps in the batch, summed - across batch examples. - batch_size: The batch size. - global_step: The global step the checkpoint was loaded from. - """ - global_step = tf.train.get_or_create_global_step() - inputs, targets, lengths, model, _ = create_dataset_and_model_fn( - config, split=config.split, shuffle=False, repeat=False) - # Compute lower bounds on the log likelihood. - elbo_ll_per_seq, _, _ = bounds.iwae( - model, (inputs, targets), lengths, num_samples=1, - parallel_iterations=config.parallel_iterations - ) - iwae_ll_per_seq, _, _ = bounds.iwae( - model, (inputs, targets), lengths, num_samples=config.num_samples, - parallel_iterations=config.parallel_iterations - ) - # The resampling type should only be used for training, so we ignore it. - fivo_ll_per_seq, _, _, _ = bounds.fivo( - model, (inputs, targets), lengths, num_samples=config.num_samples, - resampling_criterion=smc.ess_criterion, random_seed=config.random_seed, - parallel_iterations=config.parallel_iterations - ) - elbo_ll = tf.reduce_sum(elbo_ll_per_seq) - iwae_ll = tf.reduce_sum(iwae_ll_per_seq) - fivo_ll = tf.reduce_sum(fivo_ll_per_seq) - batch_size = tf.shape(lengths)[0] - total_batch_length = tf.reduce_sum(lengths) - return ((elbo_ll, iwae_ll, fivo_ll), total_batch_length, batch_size, - global_step) - - def average_bounds_over_dataset(lower_bounds, total_batch_length, batch_size, - sess): - """Computes the values of the bounds, averaged over the datset. - - Args: - lower_bounds: Tuple of float Tensors containing the values of the bounds - evaluated on a single batch. - total_batch_length: Integer Tensor that represents the total number of - timesteps in the current batch. - batch_size: Integer Tensor containing the batch size. This can vary if the - requested batch_size does not evenly divide the size of the dataset. - sess: A TensorFlow Session object. - Returns: - ll_per_t: A length 3 numpy array of floats containing each bound's average - value, normalized by the total number of timesteps in the datset. Can - be interpreted as a lower bound on the average log likelihood per - timestep in the dataset. - ll_per_seq: A length 3 numpy array of floats containing each bound's - average value, normalized by the number of sequences in the dataset. - Can be interpreted as a lower bound on the average log likelihood per - sequence in the datset. - """ - total_ll = np.zeros(3, dtype=np.float64) - total_n_elems = 0.0 - total_length = 0.0 - while True: - try: - outs = sess.run([lower_bounds, batch_size, total_batch_length]) - except tf.errors.OutOfRangeError: - break - total_ll += outs[0] - total_n_elems += outs[1] - total_length += outs[2] - ll_per_t = total_ll / total_length - ll_per_seq = total_ll / total_n_elems - return ll_per_t, ll_per_seq - - def summarize_lls(lls_per_t, lls_per_seq, summary_writer, step): - """Creates log-likelihood lower bound summaries and writes them to disk. - - Args: - lls_per_t: An array of 3 python floats, contains the values of the - evaluated bounds normalized by the number of timesteps. - lls_per_seq: An array of 3 python floats, contains the values of the - evaluated bounds normalized by the number of sequences. - summary_writer: A tf.SummaryWriter. - step: The current global step. - """ - def scalar_summary(name, value): - value = tf.Summary.Value(tag=name, simple_value=value) - return tf.Summary(value=[value]) - - for i, bound in enumerate(["elbo", "iwae", "fivo"]): - per_t_summary = scalar_summary("%s/%s_ll_per_t" % (config.split, bound), - lls_per_t[i]) - per_seq_summary = scalar_summary("%s/%s_ll_per_seq" % - (config.split, bound), - lls_per_seq[i]) - summary_writer.add_summary(per_t_summary, global_step=step) - summary_writer.add_summary(per_seq_summary, global_step=step) - summary_writer.flush() - - with tf.Graph().as_default(): - if config.random_seed: tf.set_random_seed(config.random_seed) - lower_bounds, total_batch_length, batch_size, global_step = create_graph() - summary_dir = config.logdir + "/" + config.split - summary_writer = tf.summary.FileWriter( - summary_dir, flush_secs=15, max_queue=100) - saver = tf.train.Saver() - with tf.train.SingularMonitoredSession() as sess: - wait_for_checkpoint(saver, sess, config.logdir) - step = sess.run(global_step) - tf.logging.info("Model restored from step %d, evaluating." % step) - ll_per_t, ll_per_seq = average_bounds_over_dataset( - lower_bounds, total_batch_length, batch_size, sess) - summarize_lls(ll_per_t, ll_per_seq, summary_writer, step) - tf.logging.info("%s elbo ll/t: %f, iwae ll/t: %f fivo ll/t: %f", - config.split, ll_per_t[0], ll_per_t[1], ll_per_t[2]) - tf.logging.info("%s elbo ll/seq: %f, iwae ll/seq: %f fivo ll/seq: %f", - config.split, ll_per_seq[0], ll_per_seq[1], ll_per_seq[2]) - - -def run_sample(config, create_dataset_and_model_fn=create_dataset_and_model): - """Sample from the model. Only pianorolls and pose datasets are supported.""" - - def sample_from_model(model, initial_state, initial_inputs, mean): - """Samples a sequence of outputs from the model. - - The mean must be supplied -- if it isn't the results will be incorrect. - - Args: - model: A model with sample_step implemented. See models/vrnn.py for an - example. - initial_state: The initial state of the model. - initial_inputs: The initial inputs to feed into the model. - mean: The mean of the training set, a Tensor of shape [data_dimension]. - Returns: - samples: A Tensor of shape [sample_length, batch_size, num_timesteps, - data_dimension] containing the samples from the model. - """ - initial_state, initial_output = model.sample_step(initial_state, - initial_inputs, 0) - output_ta = tf.TensorArray(size=config.sample_length, - dtype=tf.float32, - dynamic_size=False, - clear_after_read=True) - output_ta = output_ta.write(0, initial_output) - t0 = tf.constant(1, dtype=tf.int32) - - def sample_step(t, state, prev_outputs, output_ta): - state, output = model.sample_step(state, prev_outputs, t) - output_ta = output_ta.write(t, output) - centered_output = output - mean[tf.newaxis, :] - return t+1, state, centered_output, output_ta - - def sample_predicate(t, *unused_args): - return t < config.sample_length - - _, _, _, output_ta = tf.while_loop( - sample_predicate, - sample_step, - loop_vars=(t0, initial_state, initial_output, output_ta), - parallel_iterations=config.parallel_iterations - ) - samples = output_ta.stack() - samples = tf.reshape(samples, [config.sample_length, config.batch_size, - config.num_samples, config.data_dimension]) - return samples - - def create_graph(): - """Creates the graph to sample from the model. - - First, the model is conditioned on a prefix by sampling a batch of data - and trimming it to prefix_length. The configured bound is used to do the - conditioning. Then the final state from the conditioning is used to sample - from the model. - - Returns: - samples: A Tensor of shape [sample_length, batch_size, - num_samples, data_dimension] representing samples from the model. - prefixes: A Tensor of shape [prefix_length, batch_size, data_dimension] - representing the prefixes the model was conditioned on. - """ - inputs, targets, lengths, model, mean = create_dataset_and_model_fn( - config, split=config.split, shuffle=True, repeat=True) - input_prefixes = inputs[:config.prefix_length] - target_prefixes = targets[:config.prefix_length] - prefix_lengths = tf.ones_like(lengths) * config.prefix_length - if config.bound == "elbo": - _, _, state = bounds.iwae( - model, (input_prefixes, target_prefixes), - prefix_lengths, num_samples=1) - elif config.bound == "iwae": - _, _, state = bounds.iwae( - model, (input_prefixes, target_prefixes), - prefix_lengths, num_samples=config.num_samples) - elif config.bound == "fivo": - _, _, _, state = bounds.fivo( - model, (input_prefixes, target_prefixes), prefix_lengths, - num_samples=config.num_samples, - resampling_criterion=smc.ess_criterion, - random_seed=config.random_seed) - sample_inputs = tf.tile(inputs[config.prefix_length], - [config.num_samples, 1]) - samples = sample_from_model(model, state, sample_inputs, mean) - return samples, target_prefixes - - with tf.Graph().as_default(): - if config.random_seed: - tf.set_random_seed(config.random_seed) - samples, prefixes = create_graph() - if config.sample_out_dir: - out_dir = config.sample_our_dir - else: - out_dir = config.logdir - if not tf.gfile.Exists(out_dir): - tf.gfile.MakeDirs(out_dir) - with tf.train.SingularMonitoredSession( - checkpoint_dir=config.logdir) as sess: - samples_out, prefixes_out = sess.run([samples, prefixes]) - with tf.gfile.Open(os.path.join(out_dir, "samples.npz"), "w") as fout: - np.save(fout, {"prefixes": prefixes_out, "samples": samples_out}) diff --git a/research/fivo/fivo/runners_test.py b/research/fivo/fivo/runners_test.py deleted file mode 100644 index eb050c0a0..000000000 --- a/research/fivo/fivo/runners_test.py +++ /dev/null @@ -1,242 +0,0 @@ -# Copyright 2018 The TensorFlow Authors All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -"""Tests for fivo.runners""" - -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import os -import numpy as np -import tensorflow as tf - -from fivo import runners -from fivo.models import base -from fivo.models import vrnn - -FLAGS = tf.app.flags.FLAGS - - -class RunnersTest(tf.test.TestCase): - - def default_config(self): - class Config(object): - pass - config = Config() - config.model = "vrnn" - config.latent_size = 64 - config.batch_size = 4 - config.num_samples = 4 - config.resampling_type = "multinomial" - config.normalize_by_seq_len = True - config.learning_rate = 0.0001 - config.max_steps = int(1e6) - config.summarize_every = 50 - # Master must be "" to prevent state from persisting between sessions. - config.master = "" - config.task = 0 - config.ps_tasks = 0 - config.stagger_workers = True - config.random_seed = 1234 - config.parallel_iterations = 1 - config.dataset_type = "pianoroll" - config.data_dimension = None - config.dataset_path = os.path.join( - os.path.dirname(os.path.realpath(__file__)), - "test_data", "tiny_pianoroll.pkl") - config.proposal_type = "filtering" - return config - - def run_training_one_step(self, bound, dataset_type, data_dimension, - dataset_filename, dir_prefix, resampling_type, - model, batch_size=2, num_samples=3, - create_dataset_and_model_fn=(runners.create_dataset_and_model)): - config = self.default_config() - config.model = model - config.resampling_type = resampling_type - config.relaxed_resampling_temperature = 0.5 - config.bound = bound - config.split = "train" - config.dataset_type = dataset_type - config.dataset_path = os.path.join( - os.path.dirname(os.path.realpath(__file__)), - "test_data", - dataset_filename) - config.max_steps = 1 - config.batch_size = batch_size - config.num_samples = num_samples - config.latent_size = 4 - config.data_dimension = data_dimension - config.logdir = os.path.join(tf.test.get_temp_dir(), "%s-%s-%s-%s" % - (dir_prefix, bound, dataset_type, model)) - runners.run_train(config, - create_dataset_and_model_fn=create_dataset_and_model_fn) - return config - - def dummmy_dataset_and_model_fn(self, *unused_args, **unused_kwargs): - # We ignore the arguments in the dummy but need to preserve prototype. - batch_elements = 5 - sequence_length = 4 - data_dimensions = 3 - dataset = tf.data.Dataset.from_tensors( - tf.zeros((sequence_length, batch_elements, data_dimensions), - dtype=tf.float32)) - inputs = dataset.make_one_shot_iterator().get_next() - targets = tf.zeros_like(inputs) - lengths = tf.constant([sequence_length] * batch_elements) - mean = tf.constant((0.0, 0.0, 0.0)) - model = vrnn.create_vrnn(data_dimensions, 1, - base.ConditionalNormalDistribution) - return inputs, targets, lengths, model, mean - - def test_training_one_step_fivo_pianoroll_vrnn(self): - self.run_training_one_step("fivo", "pianoroll", 88, "tiny_pianoroll.pkl", - "test-training", "multinomial", "vrnn") - - def test_training_one_step_iwae_pianoroll_vrnn(self): - self.run_training_one_step("iwae", "pianoroll", 88, "tiny_pianoroll.pkl", - "test-training", "multinomial", "vrnn") - - def test_training_one_step_elbo_pianoroll_vrnn(self): - self.run_training_one_step("elbo", "pianoroll", 88, "tiny_pianoroll.pkl", - "test-training", "multinomial", "vrnn") - - def test_training_one_step_fivo_speech_vrnn(self): - self.run_training_one_step("fivo", "speech", 2, "tiny_speech_dataset.tfrecord", - "test-training", "multinomial", "vrnn") - - def test_training_one_step_iwae_speech_vrnn(self): - self.run_training_one_step("iwae", "speech", 2, "tiny_speech_dataset.tfrecord", - "test-training", "multinomial", "vrnn") - - def test_training_one_step_elbo_speech_vrnn(self): - self.run_training_one_step("elbo", "speech", 2, "tiny_speech_dataset.tfrecord", - "test-training", "multinomial", "vrnn") - - def test_training_one_step_fivo_pianoroll_srnn(self): - self.run_training_one_step("fivo", "pianoroll", 88, "tiny_pianoroll.pkl", - "test-training", "multinomial", "srnn") - - def test_training_one_step_iwae_pianoroll_srnn(self): - self.run_training_one_step("iwae", "pianoroll", 88, "tiny_pianoroll.pkl", - "test-training", "multinomial", "srnn") - - def test_training_one_step_elbo_pianoroll_srnn(self): - self.run_training_one_step("elbo", "pianoroll", 88, "tiny_pianoroll.pkl", - "test-training", "multinomial", "srnn") - - def test_training_one_step_fivo_speech_srnn(self): - self.run_training_one_step("fivo", "speech", 2, "tiny_speech_dataset.tfrecord", - "test-training", "multinomial", "srnn") - - def test_training_one_step_iwae_speech_srnn(self): - self.run_training_one_step("iwae", "speech", 2, "tiny_speech_dataset.tfrecord", - "test-training", "multinomial", "srnn") - - def test_training_one_step_elbo_speech_srnn(self): - self.run_training_one_step("elbo", "speech", 2, "tiny_speech_dataset.tfrecord", - "test-training", "multinomial", "srnn") - - def test_training_one_step_fivo_pianoroll_vrnn_relaxed(self): - self.run_training_one_step("fivo", "pianoroll", 88, "tiny_pianoroll.pkl", - "test-training", "relaxed", "vrnn") - - def test_training_one_step_iwae_pianoroll_vrnn_relaxed(self): - self.run_training_one_step("iwae", "pianoroll", 88, "tiny_pianoroll.pkl", - "test-training", "relaxed", "vrnn") - - def test_training_one_step_elbo_pianoroll_vrnn_relaxed(self): - self.run_training_one_step("elbo", "pianoroll", 88, "tiny_pianoroll.pkl", - "test-training", "relaxed", "vrnn") - - def test_training_one_step_fivo_pianoroll_srnn_relaxed(self): - self.run_training_one_step("fivo", "pianoroll", 88, "tiny_pianoroll.pkl", - "test-training", "relaxed", "srnn") - - def test_training_one_step_iwae_pianoroll_srnn_relaxed(self): - self.run_training_one_step("iwae", "pianoroll", 88, "tiny_pianoroll.pkl", - "test-training", "relaxed", "srnn") - - def test_training_one_step_elbo_pianoroll_srnn_relaxed(self): - self.run_training_one_step("elbo", "pianoroll", 88, "tiny_pianoroll.pkl", - "test-training", "relaxed", "srnn") - - def test_eval_vrnn(self): - self.run_eval("vrnn") - - def test_eval_srnn(self): - self.run_eval("srnn") - - def run_eval(self, model): - config = self.run_training_one_step( - "fivo", "pianoroll", 88, "tiny_pianoroll.pkl", "test-eval-" + model, - "multinomial", model) - config.split = "train" - runners.run_eval(config) - - def test_sampling_vrnn(self): - self.run_sampling("vrnn") - - def test_sampling_srnn(self): - self.run_sampling("srnn") - - def run_sampling(self, model): - """Test sampling from the model.""" - config = self.run_training_one_step( - "fivo", "pianoroll", 88, "tiny_pianoroll.pkl", "test-sampling", "multinomial", - model) - config.prefix_length = 3 - config.sample_length = 6 - config.split = "train" - config.sample_out_dir = None - - runners.run_sample(config) - unused_samples = np.load(os.path.join(config.logdir, "samples.npz")) - - def test_training_with_custom_fn(self): - self.run_training_one_step( - "fivo", "pianoroll", 3, "tiny_pianoroll.pkl", - "test-training-custom-fn", "multinomial", "vrnn", batch_size=5, - create_dataset_and_model_fn=self.dummmy_dataset_and_model_fn) - - def test_eval_with_custom_fn(self): - config = self.run_training_one_step( - "fivo", "pianoroll", 1, "tiny_pianoroll.pkl", - "test-eval-custom-fn", "multinomial", "vrnn", batch_size=1, - create_dataset_and_model_fn=self.dummmy_dataset_and_model_fn) - config.split = "train" - runners.run_eval( - config, - create_dataset_and_model_fn=self.dummmy_dataset_and_model_fn) - - def test_sampling_with_custom_fn(self): - config = self.run_training_one_step( - "fivo", "pianoroll", 3, "tiny_pianoroll.pkl", - "test-sample-custom-fn", "multinomial", "vrnn", batch_size=5, - create_dataset_and_model_fn=self.dummmy_dataset_and_model_fn) - config.prefix_length = 2 - config.sample_length = 3 - config.split = "train" - config.sample_out_dir = None - - runners.run_sample( - config, - create_dataset_and_model_fn=self.dummmy_dataset_and_model_fn) - unused_samples = np.load(os.path.join(config.logdir, "samples.npz")) - - -if __name__ == "__main__": - tf.test.main() diff --git a/research/fivo/fivo/smc.py b/research/fivo/fivo/smc.py deleted file mode 100644 index 25d496904..000000000 --- a/research/fivo/fivo/smc.py +++ /dev/null @@ -1,338 +0,0 @@ -# Copyright 2018 The TensorFlow Authors All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -"""Implementation of sequential Monte Carlo algorithms. -""" - -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import tensorflow as tf - -import fivo.nested_utils as nested - - -def ess_criterion(log_weights, unused_t): - """A criterion that resamples based on effective sample size.""" - num_particles = tf.shape(log_weights)[0] - # Calculate the effective sample size. - ess_num = 2 * tf.reduce_logsumexp(log_weights, axis=0) - ess_denom = tf.reduce_logsumexp(2 * log_weights, axis=0) - log_ess = ess_num - ess_denom - return log_ess <= tf.log(tf.to_float(num_particles) / 2.0) - - -def never_resample_criterion(log_weights, unused_t): - """A criterion that never resamples.""" - batch_size = tf.shape(log_weights)[1] - return tf.cast(tf.zeros([batch_size]), tf.bool) - - -def always_resample_criterion(log_weights, unused_t): - """A criterion resamples at every timestep.""" - batch_size = tf.shape(log_weights)[1] - return tf.cast(tf.ones([batch_size]), tf.bool) - - -def multinomial_resampling(log_weights, states, num_particles, batch_size, - random_seed=None): - """Resample states with multinomial resampling. - - Args: - log_weights: A [num_particles, batch_size] Tensor representing a batch - of batch_size logits for num_particles-ary Categorical distribution. - states: A nested list of [batch_size*num_particles, data_size] Tensors that - will be resampled from the groups of every num_particles-th row. - num_particles: The number of particles/samples. - batch_size: The batch size. - random_seed: The random seed to pass to the resampling operations in - the particle filter. Mainly useful for testing. - - Returns: - resampled_states: A nested list of [batch_size*num_particles, data_size] - Tensors resampled via multinomial sampling. - """ - # Calculate the ancestor indices via resampling. Because we maintain the - # log unnormalized weights, we pass the weights in as logits, allowing - # the distribution object to apply a softmax and normalize them. - resampling_parameters = tf.transpose(log_weights, perm=[1, 0]) - resampling_dist = tf.contrib.distributions.Categorical( - logits=resampling_parameters) - ancestors = tf.stop_gradient( - resampling_dist.sample(sample_shape=num_particles, seed=random_seed)) - - # Because the batch is flattened, we must modify ancestor_inds to index the - # proper samples. The particles in the ith filter are distributed every - # batch_size rows in the batch, and offset i rows from the top. So, to - # correct the indices we multiply by the batch_size and add the proper offset. - # Crucially, when ancestor_inds is flattened the layout of the batch is - # maintained. - offset = tf.expand_dims(tf.range(batch_size), 0) - ancestor_inds = tf.reshape(ancestors * batch_size + offset, [-1]) - - resampled_states = nested.gather_tensors(states, ancestor_inds) - return resampled_states - - -def _blend_tensor(blending_weights, tensor, num_particles, batch_size): - """Blend tensor according to the weights. - - The first dimension of tensor is actually a 2d index compacted to a 1d - index and similarly for blended_tensor. So if we index these Tensors - by [(i, j), k], then - - blended_tensor[(i, j), k] = - sum_l tensor[(l, j), :] * blending_weights[i, j, l]. - - Args: - blending_weights: [num_particles, batch_size, num_particles] weights where - the indices represent [sample index, batch index, blending weight index]. - tensor: [num_particles * batch_size, state_dim] Tensor to be blended. - num_particles: The number of particles/samples. - batch_size: The batch size. - - Returns: - blended_tensor: [num_particles*batch_size, state_dim] blended Tensor. - """ - # tensor is currently [num_particles * batch_size, state_dim], so we reshape - # it to [num_particles, batch_size, state_dim]. Then, transpose it to - # [batch_size, state_size, num_particles]. - tensor = tf.transpose( - tf.reshape(tensor, [num_particles, batch_size, -1]), perm=[1, 2, 0]) - blending_weights = tf.transpose(blending_weights, perm=[1, 2, 0]) - # blendeding_weights is [batch index, blending weight index, sample index]. - # Multiplying these gives a matrix of size [batch_size, state_size, - # num_particles]. - tensor = tf.matmul(tensor, blending_weights) - # transpose the tensor to be [num_particles, batch_size, state_size] - # and then reshape it to match the original format. - tensor = tf.reshape(tf.transpose(tensor, perm=[2, 0, 1]), - [num_particles*batch_size, -1]) - return tensor - - -def relaxed_resampling(log_weights, states, num_particles, batch_size, - temperature=0.5, random_seed=None): - """Resample states with relaxed resampling. - - Draw soft "ancestors" using the Gumbel-Softmax distribution. - - Args: - log_weights: A [num_particles, batch_size] Tensor representing a batch - of batch_size logits for num_particles-ary Categorical distribution. - states: A nested list of [batch_size * num_particles, d] Tensors that will - be resampled from the groups of every num_particles-th row. - num_particles: The number of particles/samples. - batch_size: The batch size. - temperature: The temperature used for the relaxed one hot distribution. - random_seed: The random seed to pass to the resampling operations in - the particle filter. Mainly useful for testing. - - Returns: - resampled_states: A nested list of [batch_size * num_particles, d] - Tensors resampled via multinomial sampling. - """ - # log_weights are [num_particles, batch_size], so we transpose to get a - # set of batch_size distributions over [0, num_particles). - resampling_parameters = tf.transpose(log_weights, perm=[1, 0]) - resampling_dist = tf.contrib.distributions.RelaxedOneHotCategorical( - temperature, - logits=resampling_parameters) - - # Sample num_particles samples from the distribution, resulting in a - # [num_particles, batch_size, num_particles] Tensor that represents a set of - # [num_particles, batch_size] blending weights. The dimensions represent - # [particle index, batch index, blending weight index]. - ancestors = resampling_dist.sample(sample_shape=num_particles, - seed=random_seed) - def map_fn(tensor): - return _blend_tensor(ancestors, tensor, num_particles, batch_size) - - resampled_states = nested.map_nested(map_fn, states) - return resampled_states - - -def smc( - transition_fn, - num_steps, - num_particles=1, - resampling_criterion=ess_criterion, - resampling_fn=multinomial_resampling, - loop_fn=None, - parallel_iterations=30, - swap_memory=True): - """Run a sequential Monte Carlo (SMC) algorithm. - - This method runs an SMC algorithm that evolves systems of particles - using the supplied transition function for the specified number of steps. The - particles are optionally resampled using resampling_fn when indicated by - resampling_criterion. - - Args: - transition_fn: A callable that propogates a batch of particles one step. - Must accept as arguments a batch of particle states and the current - timestep. Must return the particle states one timestep in the future, the - incremental weights of each particle as a [num_samples*batch_size] float - Tensor, and optionally a set of arguments to pass to the loop_fn. If - the loop args are not provided, they will be set to None. Before the - first timestep transition_fn will be called with the arguments None, -1 - and should return the initial particle states. - num_steps: A [batch_size] Tensor of ints representing the number of steps - to run each filter for. - num_particles: A scalar int, the number of particles to use in each filter. - resampling_criterion: The resampling criterion to use for this particle - filter. Must accept the current log weights and timestep and - return a boolean Tensor of shape [batch_size] indicating whether each - particle filter should resample. See ess_criterion and related functions - for examples. When resampling_criterion is never_resample_criterion, - resampling_fn is ignored and never called. - resampling_fn: A callable that performs the resampling operation. Must - accept as arguments the log weights, particle states, num_particles, - and batch_size and return the resampled particle states. See - multinomial_resampling and relaxed_resampling for examples. - loop_fn: A callable that performs operations on the weights and - particle states, useful for accumulating and processing state that - shouldn't be resampled. At each timestep after (possibly) resampling - loop_fn will be called with the previous loop_state, a set of arguments - produced by transition_fn called loop_args, the resampled particle states, - the current log weights as [num_particles, batch_size] float Tensor, a - [batch_size] float Tensor representing whether or not each filter - resampled, the current mask indicating which filters are active, and the - current timestep. It must return the next loop state. Before the first - timestep loop_fn will be called with the arguments None, None, None, None, - -1 and must return the initial loop state. The loop state can be a - possibly nested structure of Tensors and TensorArrays. - parallel_iterations: The number of parallel iterations to use for the - internal while loop. Note that values greater than 1 can introduce - non-determinism even when resampling is deterministic. - swap_memory: Whether GPU-CPU memory swapping should be enabled for the - internal while loop. - - Returns: - log_z_hat: A Tensor of shape [batch_size] containing an estimate of the log - normalizing constant that converts between the unormalized target - distribution (as defined by the weights) and the true target distribution. - log_weights: A Tensor of shape [max_num_steps, batch_size, num_particles] - containing the log weights at each timestep of the particle filter. - Will not be valid for timesteps past the supplied num_steps. - resampled: A float Tensor of shape [max_num_steps, batch_size] indicating - when the particle filters resampled. Will be 1.0 on timesteps when - resampling occurred and 0.0 on timesteps when it did not. - final_loop_state: The final state returned by loop_fn. If loop_fn is None - then 0 will be returned. - """ - # batch_size represents the number of particle filters running in parallel. - batch_size = tf.shape(num_steps)[0] - # Create a TensorArray where element t is the [num_particles*batch_size] - # sequence mask for timestep t. - max_num_steps = tf.reduce_max(num_steps) - seq_mask = tf.transpose( - tf.sequence_mask(num_steps, maxlen=max_num_steps, dtype=tf.float32), - perm=[1, 0]) - seq_mask = tf.tile(seq_mask, [1, num_particles]) - mask_ta = tf.TensorArray(seq_mask.dtype, - max_num_steps, - name='mask_ta') - mask_ta = mask_ta.unstack(seq_mask) - # Initialize the state. - t0 = tf.constant(0, tf.int32) - init_particle_state = transition_fn(None, -1) - - def transition(*args): - transition_outs = transition_fn(*args) - if len(transition_outs) == 2: - return transition_outs + (None,) - else: - return transition_outs - - if loop_fn is None: - loop_fn = lambda *args: 0 - - init_loop_state = loop_fn(None, None, None, None, None, None, -1) - init_states = (init_particle_state, init_loop_state) - ta_names = ['log_weights', 'resampled'] - tas = [tf.TensorArray(tf.float32, max_num_steps, name='%s_ta' % n) - for n in ta_names] - log_weights_acc = tf.zeros([num_particles, batch_size], dtype=tf.float32) - log_z_hat_acc = tf.zeros([batch_size], dtype=tf.float32) - - def while_predicate(t, *unused_args): - return t < max_num_steps - - def while_step(t, state, tas, log_weights_acc, log_z_hat_acc): - """Implements one timestep of the particle filter.""" - particle_state, loop_state = state - cur_mask = nested.read_tas(mask_ta, t) - # Propagate the particles one step. - log_alpha, new_particle_state, loop_args = transition(particle_state, t) - # Update the current weights with the incremental weights. - log_alpha *= cur_mask - log_alpha = tf.reshape(log_alpha, [num_particles, batch_size]) - log_weights_acc += log_alpha - - should_resample = resampling_criterion(log_weights_acc, t) - - if resampling_criterion == never_resample_criterion: - resampled = tf.to_float(should_resample) - else: - # Compute the states as if we did resample. - resampled_states = resampling_fn( - log_weights_acc, - new_particle_state, - num_particles, - batch_size) - # Decide whether or not we should resample; don't resample if we are past - # the end of a sequence. - should_resample = tf.logical_and(should_resample, - cur_mask[:batch_size] > 0.) - float_should_resample = tf.to_float(should_resample) - new_particle_state = nested.where_tensors( - tf.tile(should_resample, [num_particles]), - resampled_states, - new_particle_state) - resampled = float_should_resample - - new_loop_state = loop_fn(loop_state, loop_args, new_particle_state, - log_weights_acc, resampled, cur_mask, t) - # Update log Z hat. - log_z_hat_update = tf.reduce_logsumexp( - log_weights_acc, axis=0) - tf.log(tf.to_float(num_particles)) - # If it is the last timestep, always add the update. - log_z_hat_acc += tf.cond(t < max_num_steps - 1, - lambda: log_z_hat_update * resampled, - lambda: log_z_hat_update) - # Update the TensorArrays before we reset the weights so that we capture - # the incremental weights and not zeros. - ta_updates = [log_weights_acc, resampled] - new_tas = [ta.write(t, x) for ta, x in zip(tas, ta_updates)] - # For the particle filters that resampled, reset weights to zero. - log_weights_acc *= (1. - tf.tile(resampled[tf.newaxis, :], - [num_particles, 1])) - new_state = (new_particle_state, new_loop_state) - return t + 1, new_state, new_tas, log_weights_acc, log_z_hat_acc - - _, final_state, tas, _, log_z_hat = tf.while_loop( - while_predicate, - while_step, - loop_vars=(t0, init_states, tas, log_weights_acc, log_z_hat_acc), - parallel_iterations=parallel_iterations, - swap_memory=swap_memory) - - log_weights, resampled = [x.stack() for x in tas] - log_weights = tf.transpose(log_weights, perm=[0, 2, 1]) - final_particle_state, final_loop_state = final_state - return (log_z_hat, log_weights, resampled, - final_particle_state, final_loop_state) diff --git a/research/fivo/fivo/smc_test.py b/research/fivo/fivo/smc_test.py deleted file mode 100644 index ae32a62f2..000000000 --- a/research/fivo/fivo/smc_test.py +++ /dev/null @@ -1,241 +0,0 @@ -# Copyright 2018 The TensorFlow Authors All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -"""Tests for fivo.smc.""" - -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import numpy as np -import scipy -import tensorflow as tf - -from fivo import smc - -lse = scipy.special.logsumexp - - -def _simple_transition_fn(state, unused_t): - if state is None: - return tf.zeros([4], dtype=tf.float32) - return tf.constant([5., 4., 1., 0.5]), tf.zeros([4], dtype=tf.float32) - - -def _resample_at_step_criterion(step): - """A criterion that resamples once at a specific timestep.""" - def criterion(log_weights, t): - batch_size = tf.shape(log_weights)[1] - return tf.fill([batch_size], tf.equal(t, step)) - return criterion - - -class SMCTest(tf.test.TestCase): - - def test_never_resampling(self): - """Test that never_resample_criterion makes smc not resample. - - Also test that the weights and log_z_hat are computed correctly when never - resampling. - """ - tf.set_random_seed(1234) - with self.test_session() as sess: - outs = smc.smc( - _simple_transition_fn, - num_steps=tf.convert_to_tensor([5, 3]), - num_particles=2, - resampling_criterion=smc.never_resample_criterion) - log_z_hat, weights, resampled = sess.run(outs[0:3]) - gt_weights = np.array( - [[[5, 1], [4, .5]], - [[10, 2], [8, 1]], - [[15, 3], [12, 1.5]], - [[20, 4], [12, 1.5]], - [[25, 5], [12, 1.5]]], - dtype=np.float32) - gt_log_z_hat = np.array( - [lse([25, 5]) - np.log(2), - lse([12, 1.5]) - np.log(2)], - dtype=np.float32) - self.assertAllClose(gt_log_z_hat, log_z_hat) - self.assertAllClose(gt_weights, weights) - self.assertAllEqual(np.zeros_like(resampled), resampled) - - def test_always_resampling(self): - """Test always_resample_criterion makes smc always resample. - - Past a sequence end the filter should not resample, however. - Also check that weights and log_z_hat estimate are correct. - """ - tf.set_random_seed(1234) - with self.test_session() as sess: - outs = smc.smc( - _simple_transition_fn, - num_steps=tf.convert_to_tensor([5, 3]), - num_particles=2, - resampling_criterion=smc.always_resample_criterion) - log_z_hat, weights, resampled = sess.run(outs[0:3]) - gt_weights = np.array( - [[[5, 1], [4, .5]], - [[5, 1], [4, .5]], - [[5, 1], [4, .5]], - [[5, 1], [0., 0.]], - [[5, 1], [0., 0.]]], - dtype=np.float32) - gt_log_z_hat = np.array( - [5*lse([5, 1]) - 5*np.log(2), - 3*lse([4, .5]) - 3*np.log(2)], - dtype=np.float32) - gt_resampled = np.array( - [[1, 1], [1, 1], [1, 1], [1, 0], [1, 0]], - dtype=np.float32) - self.assertAllClose(gt_log_z_hat, log_z_hat) - self.assertAllClose(gt_weights, weights) - self.assertAllEqual(gt_resampled, resampled) - - def test_weights_reset_when_resampling_at_sequence_end(self): - """Test that the weights are reset when resampling at the sequence end. - - When resampling happens on the last timestep of a sequence the weights - should be set to zero on the next timestep and remain zero afterwards. - """ - tf.set_random_seed(1234) - with self.test_session() as sess: - outs = smc.smc( - _simple_transition_fn, - num_steps=tf.convert_to_tensor([5, 3]), - num_particles=2, - resampling_criterion=_resample_at_step_criterion(2)) - log_z_hat, weights, resampled = sess.run(outs[0:3]) - gt_log_z = np.array( - [lse([15, 3]) + lse([10, 2]) - 2*np.log(2), - lse([12, 1.5]) - np.log(2)], - dtype=np.float32) - gt_resampled = np.array( - [[0, 0], [0, 0], [1, 1], [0, 0], [0, 0]], - dtype=np.float32) - gt_weights = np.array( - [[[5, 1], [4, .5]], - [[10, 2], [8, 1]], - [[15, 3], [12, 1.5]], - [[5, 1], [0, 0]], - [[10, 2], [0, 0]]], - dtype=np.float32) - self.assertAllClose(gt_log_z, log_z_hat) - self.assertAllEqual(gt_resampled, resampled) - self.assertAllEqual(gt_weights, weights) - - def test_weights_not_updated_past_sequence_end(self): - """Test that non-zero weights are not updated past the end of a sequence.""" - tf.set_random_seed(1234) - with self.test_session() as sess: - outs = smc.smc( - _simple_transition_fn, - num_steps=tf.convert_to_tensor([6, 4]), - num_particles=2, - resampling_criterion=_resample_at_step_criterion(1)) - log_z_hat, weights, resampled = sess.run(outs[0:3]) - gt_log_z_hat = np.array( - [lse([10, 2]) + lse([20, 4]) - 2*np.log(2), - lse([8, 1]) + lse([8, 1]) - 2*np.log(2)], - dtype=np.float32) - # Ensure that we only resample on the 2nd timestep. - gt_resampled = np.array( - [[0, 0], [1, 1], [0, 0], [0, 0], [0, 0], [0, 0]], - dtype=np.float32) - # Ensure that the weights after the end of the sequence don't change. - # Ensure that the weights after resampling before the end of the sequence - # do change. - gt_weights = np.array( - [[[5, 1], [4, .5]], - [[10, 2], [8, 1]], - [[5, 1], [4, .5]], - [[10, 2], [8, 1]], - [[15, 3], [8, 1]], - [[20, 4], [8, 1]]], - dtype=np.float32) - self.assertAllClose(gt_log_z_hat, log_z_hat) - self.assertAllEqual(gt_resampled, resampled) - self.assertAllEqual(gt_weights, weights) - - def test_resampling_on_max_num_steps(self): - """Test that everything is correct when resampling on step max_num_steps. - - When resampling on step max_num_steps (i.e. the last step of the longest - sequence), ensure that there are no off-by-one errors preventing resampling - and also that the weights are not updated. - """ - tf.set_random_seed(1234) - with self.test_session() as sess: - outs = smc.smc( - _simple_transition_fn, - num_steps=tf.convert_to_tensor([4, 2]), - num_particles=2, - resampling_criterion=_resample_at_step_criterion(3)) - log_z_hat, weights, resampled = sess.run(outs[0:3]) - gt_log_z_hat = np.array( - [lse([20, 4]) - np.log(2), - lse([8, 1]) - np.log(2)], - dtype=np.float32) - # Ensure that we only resample on the 3rd timestep and that the second - # filter doesn't resample at all because it is only run for 2 steps. - gt_resampled = np.array( - [[0, 0], [0, 0], [0, 0], [1, 0]], - dtype=np.float32) - gt_weights = np.array( - [[[5, 1], [4, .5]], - [[10, 2], [8, 1]], - [[15, 3], [8, 1]], - [[20, 4], [8, 1]]], - dtype=np.float32) - self.assertAllClose(gt_log_z_hat, log_z_hat) - self.assertAllEqual(gt_resampled, resampled) - self.assertAllEqual(gt_weights, weights) - - def test_multinomial_resampling(self): - """Test that mulitnomial resampling selects the correct states.""" - tf.set_random_seed(1234) - with self.test_session() as sess: - # Setup input. - inf = 1000.0 # Very large value in log space. - num_samples = 2 - batch_size = 2 - log_weights = tf.convert_to_tensor([[inf, 0], [0, inf]]) - states = tf.convert_to_tensor([1, 2, 3, 4]) - # Run test. - resampled_states = smc.multinomial_resampling( - log_weights, states, num_samples, batch_size, random_seed=0) - resampled_states_values = sess.run(resampled_states) - self.assertAllEqual(resampled_states_values, [1, 4, 1, 4]) - - def test_blend_tensor(self): - """Test that relaxed resampling blends the correct states.""" - tf.set_random_seed(1234) - with self.test_session() as sess: - # Setup input. - num_samples = 2 - batch_size = 2 - blending_weights = tf.convert_to_tensor( - [[[0.5, 0.5], [0.25, 0.75]], [[0.75, 0.25], [0.5, 0.5]]]) - states = tf.convert_to_tensor([4., 8., 12., 16.]) - # Run test. - blended_states = smc._blend_tensor(blending_weights, states, - num_samples, batch_size) - blended_states_values = sess.run(blended_states) - self.assertAllClose(blended_states_values[:, 0], [8., 14., 6., 12.]) - - -if __name__ == '__main__': - tf.test.main() diff --git a/research/fivo/fivo/test_data/tiny_pianoroll.pkl b/research/fivo/fivo/test_data/tiny_pianoroll.pkl deleted file mode 100644 index c5501c6ce..000000000 --- a/research/fivo/fivo/test_data/tiny_pianoroll.pkl +++ /dev/null @@ -1,10979 +0,0 @@ -(dp1 -S'train_mean' -p2 -cnumpy.core.multiarray -_reconstruct -p3 -(cnumpy -ndarray -p4 -(I0 -tS'b' -tRp5 -(I1 -(I88 -tcnumpy -dtype -p6 -(S'f8' -I0 -I1 -tRp7 -(I3 -S'<' -NNNI-1 -I-1 -I0 -tbI00 -S'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x9e0^X\xbez,?\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x9e0^X\xbez\x00\x00\x00\x00\x00\x00\x00' -tRp101 -g11 -(g12 -S'H\x00\x00\x00\x00\x00\x00\x00' -tRp102 -g11 -(g12 -S'N\x00\x00\x00\x00\x00\x00\x00' -tRp103 -g11 -(g12 -S'V\x00\x00\x00\x00\x00\x00\x00' -tRp104 -tp105 -a(g11 -(g12 -S'C\x00\x00\x00\x00\x00\x00\x00' -tRp106 -g11 -(g12 -S'G\x00\x00\x00\x00\x00\x00\x00' -tRp107 -g11 -(g12 -S'O\x00\x00\x00\x00\x00\x00\x00' -tRp108 -g11 -(g12 -S'V\x00\x00\x00\x00\x00\x00\x00' -tRp109 -tp110 -a(g11 -(g12 -S'@\x00\x00\x00\x00\x00\x00\x00' -tRp111 -g11 -(g12 -S'G\x00\x00\x00\x00\x00\x00\x00' -tRp112 -g11 -(g12 -S'P\x00\x00\x00\x00\x00\x00\x00' -tRp113 -g11 -(g12 -S'X\x00\x00\x00\x00\x00\x00\x00' -tRp114 -tp115 -a(g11 -(g12 -S'E\x00\x00\x00\x00\x00\x00\x00' -tRp116 -g11 -(g12 -S'H\x00\x00\x00\x00\x00\x00\x00' -tRp117 -g11 -(g12 -S'Q\x00\x00\x00\x00\x00\x00\x00' -tRp118 -g11 -(g12 -S'T\x00\x00\x00\x00\x00\x00\x00' -tRp119 -tp120 -a(g11 -(g12 -S'E\x00\x00\x00\x00\x00\x00\x00' -tRp121 -g11 -(g12 -S'L\x00\x00\x00\x00\x00\x00\x00' -tRp122 -g11 -(g12 -S'Q\x00\x00\x00\x00\x00\x00\x00' -tRp123 -g11 -(g12 -S'T\x00\x00\x00\x00\x00\x00\x00' -tRp124 -tp125 -a(g11 -(g12 -S'E\x00\x00\x00\x00\x00\x00\x00' -tRp126 -g11 -(g12 -S'L\x00\x00\x00\x00\x00\x00\x00' -tRp127 -g11 -(g12 -S'Q\x00\x00\x00\x00\x00\x00\x00' -tRp128 -g11 -(g12 -S'T\x00\x00\x00\x00\x00\x00\x00' -tRp129 -tp130 -a(g11 -(g12 -S'E\x00\x00\x00\x00\x00\x00\x00' -tRp131 -g11 -(g12 -S'L\x00\x00\x00\x00\x00\x00\x00' -tRp132 -g11 -(g12 -S'T\x00\x00\x00\x00\x00\x00\x00' -tRp133 -tp134 -a(g11 -(g12 -S'>\x00\x00\x00\x00\x00\x00\x00' -tRp135 -g11 -(g12 -S'A\x00\x00\x00\x00\x00\x00\x00' -tRp136 -g11 -(g12 -S'J\x00\x00\x00\x00\x00\x00\x00' -tRp137 -g11 -(g12 -S'S\x00\x00\x00\x00\x00\x00\x00' -tRp138 -tp139 -a(g11 -(g12 -S'<\x00\x00\x00\x00\x00\x00\x00' -tRp140 -g11 -(g12 -S'C\x00\x00\x00\x00\x00\x00\x00' -tRp141 -g11 -(g12 -S'L\x00\x00\x00\x00\x00\x00\x00' -tRp142 -g11 -(g12 -S'T\x00\x00\x00\x00\x00\x00\x00' -tRp143 -tp144 -a(g11 -(g12 -S'C\x00\x00\x00\x00\x00\x00\x00' -tRp145 -g11 -(g12 -S'H\x00\x00\x00\x00\x00\x00\x00' -tRp146 -g11 -(g12 -S'M\x00\x00\x00\x00\x00\x00\x00' -tRp147 -g11 -(g12 -S'V\x00\x00\x00\x00\x00\x00\x00' -tRp148 -tp149 -a(g11 -(g12 -S'7\x00\x00\x00\x00\x00\x00\x00' -tRp150 -g11 -(g12 -S'G\x00\x00\x00\x00\x00\x00\x00' -tRp151 -g11 -(g12 -S'O\x00\x00\x00\x00\x00\x00\x00' -tRp152 -g11 -(g12 -S'V\x00\x00\x00\x00\x00\x00\x00' -tRp153 -tp154 -a(g11 -(g12 -S'<\x00\x00\x00\x00\x00\x00\x00' -tRp155 -g11 -(g12 -S'C\x00\x00\x00\x00\x00\x00\x00' -tRp156 -g11 -(g12 -S'L\x00\x00\x00\x00\x00\x00\x00' -tRp157 -g11 -(g12 -S'T\x00\x00\x00\x00\x00\x00\x00' -tRp158 -tp159 -a(g11 -(g12 -S'<\x00\x00\x00\x00\x00\x00\x00' -tRp160 -g11 -(g12 -S'C\x00\x00\x00\x00\x00\x00\x00' -tRp161 -g11 -(g12 -S'L\x00\x00\x00\x00\x00\x00\x00' -tRp162 -g11 -(g12 -S'T\x00\x00\x00\x00\x00\x00\x00' -tRp163 -tp164 -a(g11 -(g12 -S'<\x00\x00\x00\x00\x00\x00\x00' -tRp165 -g11 -(g12 -S'C\x00\x00\x00\x00\x00\x00\x00' -tRp166 -g11 -(g12 -S'L\x00\x00\x00\x00\x00\x00\x00' -tRp167 -g11 -(g12 -S'T\x00\x00\x00\x00\x00\x00\x00' -tRp168 -tp169 -a(g11 -(g12 -S'<\x00\x00\x00\x00\x00\x00\x00' -tRp170 -g11 -(g12 -S'H\x00\x00\x00\x00\x00\x00\x00' -tRp171 -g11 -(g12 -S'O\x00\x00\x00\x00\x00\x00\x00' -tRp172 -g11 -(g12 -S'X\x00\x00\x00\x00\x00\x00\x00' -tRp173 -tp174 -a(g11 -(g12 -S'H\x00\x00\x00\x00\x00\x00\x00' -tRp175 -g11 -(g12 -S'O\x00\x00\x00\x00\x00\x00\x00' -tRp176 -g11 -(g12 -S'X\x00\x00\x00\x00\x00\x00\x00' -tRp177 -tp178 -a(g11 -(g12 -S'C\x00\x00\x00\x00\x00\x00\x00' -tRp179 -g11 -(g12 -S'F\x00\x00\x00\x00\x00\x00\x00' -tRp180 -g11 -(g12 -S'L\x00\x00\x00\x00\x00\x00\x00' -tRp181 -g11 -(g12 -S'T\x00\x00\x00\x00\x00\x00\x00' -tRp182 -tp183 -a(g11 -(g12 -S'E\x00\x00\x00\x00\x00\x00\x00' -tRp184 -g11 -(g12 -S'M\x00\x00\x00\x00\x00\x00\x00' -tRp185 -g11 -(g12 -S'V\x00\x00\x00\x00\x00\x00\x00' -tRp186 -tp187 -a(g11 -(g12 -S'C\x00\x00\x00\x00\x00\x00\x00' -tRp188 -g11 -(g12 -S'F\x00\x00\x00\x00\x00\x00\x00' -tRp189 -g11 -(g12 -S'O\x00\x00\x00\x00\x00\x00\x00' -tRp190 -g11 -(g12 -S'X\x00\x00\x00\x00\x00\x00\x00' -tRp191 -tp192 -a(g11 -(g12 -S'A\x00\x00\x00\x00\x00\x00\x00' -tRp193 -g11 -(g12 -S'H\x00\x00\x00\x00\x00\x00\x00' -tRp194 -g11 -(g12 -S'Q\x00\x00\x00\x00\x00\x00\x00' -tRp195 -g11 -(g12 -S'Y\x00\x00\x00\x00\x00\x00\x00' -tRp196 -tp197 -a(g11 -(g12 -S'A\x00\x00\x00\x00\x00\x00\x00' -tRp198 -g11 -(g12 -S'H\x00\x00\x00\x00\x00\x00\x00' -tRp199 -g11 -(g12 -S'Q\x00\x00\x00\x00\x00\x00\x00' -tRp200 -g11 -(g12 -S'Y\x00\x00\x00\x00\x00\x00\x00' -tRp201 -tp202 -a(g11 -(g12 -S':\x00\x00\x00\x00\x00\x00\x00' -tRp203 -g11 -(g12 -S'F\x00\x00\x00\x00\x00\x00\x00' -tRp204 -g11 -(g12 -S'M\x00\x00\x00\x00\x00\x00\x00' -tRp205 -g11 -(g12 -S'V\x00\x00\x00\x00\x00\x00\x00' -tRp206 -tp207 -a(g11 -(g12 -S'E\x00\x00\x00\x00\x00\x00\x00' -tRp208 -g11 -(g12 -S'F\x00\x00\x00\x00\x00\x00\x00' -tRp209 -g11 -(g12 -S'M\x00\x00\x00\x00\x00\x00\x00' -tRp210 -g11 -(g12 -S'V\x00\x00\x00\x00\x00\x00\x00' -tRp211 -tp212 -a(g11 -(g12 -S'D\x00\x00\x00\x00\x00\x00\x00' -tRp213 -g11 -(g12 -S'G\x00\x00\x00\x00\x00\x00\x00' -tRp214 -g11 -(g12 -S'L\x00\x00\x00\x00\x00\x00\x00' -tRp215 -g11 -(g12 -S'V\x00\x00\x00\x00\x00\x00\x00' -tRp216 -tp217 -a(g11 -(g12 -S'E\x00\x00\x00\x00\x00\x00\x00' -tRp218 -g11 -(g12 -S'H\x00\x00\x00\x00\x00\x00\x00' -tRp219 -g11 -(g12 -S'L\x00\x00\x00\x00\x00\x00\x00' -tRp220 -g11 -(g12 -S'T\x00\x00\x00\x00\x00\x00\x00' -tRp221 -tp222 -a(g11 -(g12 -S'B\x00\x00\x00\x00\x00\x00\x00' -tRp223 -g11 -(g12 -S'E\x00\x00\x00\x00\x00\x00\x00' -tRp224 -g11 -(g12 -S'J\x00\x00\x00\x00\x00\x00\x00' -tRp225 -g11 -(g12 -S'T\x00\x00\x00\x00\x00\x00\x00' -tRp226 -tp227 -a(g11 -(g12 -S'C\x00\x00\x00\x00\x00\x00\x00' -tRp228 -g11 -(g12 -S'J\x00\x00\x00\x00\x00\x00\x00' -tRp229 -g11 -(g12 -S'O\x00\x00\x00\x00\x00\x00\x00' -tRp230 -g11 -(g12 -S'S\x00\x00\x00\x00\x00\x00\x00' -tRp231 -tp232 -a(g11 -(g12 -S'<\x00\x00\x00\x00\x00\x00\x00' -tRp233 -g11 -(g12 -S'L\x00\x00\x00\x00\x00\x00\x00' -tRp234 -g11 -(g12 -S'O\x00\x00\x00\x00\x00\x00\x00' -tRp235 -g11 -(g12 -S'T\x00\x00\x00\x00\x00\x00\x00' -tRp236 -tp237 -a(g11 -(g12 -S'<\x00\x00\x00\x00\x00\x00\x00' -tRp238 -g11 -(g12 -S'L\x00\x00\x00\x00\x00\x00\x00' -tRp239 -g11 -(g12 -S'O\x00\x00\x00\x00\x00\x00\x00' -tRp240 -g11 -(g12 -S'T\x00\x00\x00\x00\x00\x00\x00' -tRp241 -tp242 -a(g11 -(g12 -S'<\x00\x00\x00\x00\x00\x00\x00' -tRp243 -g11 -(g12 -S'L\x00\x00\x00\x00\x00\x00\x00' -tRp244 -g11 -(g12 -S'O\x00\x00\x00\x00\x00\x00\x00' -tRp245 -g11 -(g12 -S'T\x00\x00\x00\x00\x00\x00\x00' -tRp246 -tp247 -a(g11 -(g12 -S'C\x00\x00\x00\x00\x00\x00\x00' -tRp248 -g11 -(g12 -S'G\x00\x00\x00\x00\x00\x00\x00' -tRp249 -g11 -(g12 -S'O\x00\x00\x00\x00\x00\x00\x00' -tRp250 -g11 -(g12 -S'V\x00\x00\x00\x00\x00\x00\x00' -tRp251 -tp252 -a(g11 -(g12 -S'B\x00\x00\x00\x00\x00\x00\x00' -tRp253 -g11 -(g12 -S'E\x00\x00\x00\x00\x00\x00\x00' -tRp254 -g11 -(g12 -S'J\x00\x00\x00\x00\x00\x00\x00' -tRp255 -g11 -(g12 -S'V\x00\x00\x00\x00\x00\x00\x00' -tRp256 -tp257 -a(g11 -(g12 -S'>\x00\x00\x00\x00\x00\x00\x00' -tRp258 -g11 -(g12 -S'H\x00\x00\x00\x00\x00\x00\x00' -tRp259 -g11 -(g12 -S'N\x00\x00\x00\x00\x00\x00\x00' -tRp260 -g11 -(g12 -S'V\x00\x00\x00\x00\x00\x00\x00' -tRp261 -tp262 -a(g11 -(g12 -S'C\x00\x00\x00\x00\x00\x00\x00' -tRp263 -g11 -(g12 -S'G\x00\x00\x00\x00\x00\x00\x00' -tRp264 -g11 -(g12 -S'O\x00\x00\x00\x00\x00\x00\x00' -tRp265 -g11 -(g12 -S'V\x00\x00\x00\x00\x00\x00\x00' -tRp266 -tp267 -a(g11 -(g12 -S'@\x00\x00\x00\x00\x00\x00\x00' -tRp268 -g11 -(g12 -S'G\x00\x00\x00\x00\x00\x00\x00' -tRp269 -g11 -(g12 -S'P\x00\x00\x00\x00\x00\x00\x00' -tRp270 -g11 -(g12 -S'X\x00\x00\x00\x00\x00\x00\x00' -tRp271 -tp272 -a(g11 -(g12 -S'E\x00\x00\x00\x00\x00\x00\x00' -tRp273 -g11 -(g12 -S'H\x00\x00\x00\x00\x00\x00\x00' -tRp274 -g11 -(g12 -S'Q\x00\x00\x00\x00\x00\x00\x00' -tRp275 -g11 -(g12 -S'T\x00\x00\x00\x00\x00\x00\x00' -tRp276 -tp277 -a(g11 -(g12 -S'E\x00\x00\x00\x00\x00\x00\x00' -tRp278 -g11 -(g12 -S'L\x00\x00\x00\x00\x00\x00\x00' -tRp279 -g11 -(g12 -S'Q\x00\x00\x00\x00\x00\x00\x00' -tRp280 -g11 -(g12 -S'T\x00\x00\x00\x00\x00\x00\x00' -tRp281 -tp282 -a(g11 -(g12 -S'E\x00\x00\x00\x00\x00\x00\x00' -tRp283 -g11 -(g12 -S'L\x00\x00\x00\x00\x00\x00\x00' -tRp284 -g11 -(g12 -S'Q\x00\x00\x00\x00\x00\x00\x00' -tRp285 -g11 -(g12 -S'T\x00\x00\x00\x00\x00\x00\x00' -tRp286 -tp287 -a(g11 -(g12 -S'E\x00\x00\x00\x00\x00\x00\x00' -tRp288 -g11 -(g12 -S'L\x00\x00\x00\x00\x00\x00\x00' -tRp289 -g11 -(g12 -S'T\x00\x00\x00\x00\x00\x00\x00' -tRp290 -tp291 -a(g11 -(g12 -S'>\x00\x00\x00\x00\x00\x00\x00' -tRp292 -g11 -(g12 -S'A\x00\x00\x00\x00\x00\x00\x00' -tRp293 -g11 -(g12 -S'J\x00\x00\x00\x00\x00\x00\x00' -tRp294 -g11 -(g12 -S'S\x00\x00\x00\x00\x00\x00\x00' -tRp295 -tp296 -a(g11 -(g12 -S'<\x00\x00\x00\x00\x00\x00\x00' -tRp297 -g11 -(g12 -S'C\x00\x00\x00\x00\x00\x00\x00' -tRp298 -g11 -(g12 -S'L\x00\x00\x00\x00\x00\x00\x00' -tRp299 -g11 -(g12 -S'T\x00\x00\x00\x00\x00\x00\x00' -tRp300 -tp301 -a(g11 -(g12 -S'C\x00\x00\x00\x00\x00\x00\x00' -tRp302 -g11 -(g12 -S'H\x00\x00\x00\x00\x00\x00\x00' -tRp303 -g11 -(g12 -S'M\x00\x00\x00\x00\x00\x00\x00' -tRp304 -g11 -(g12 -S'V\x00\x00\x00\x00\x00\x00\x00' -tRp305 -tp306 -a(g11 -(g12 -S'7\x00\x00\x00\x00\x00\x00\x00' -tRp307 -g11 -(g12 -S'G\x00\x00\x00\x00\x00\x00\x00' -tRp308 -g11 -(g12 -S'O\x00\x00\x00\x00\x00\x00\x00' -tRp309 -g11 -(g12 -S'V\x00\x00\x00\x00\x00\x00\x00' -tRp310 -tp311 -a(g11 -(g12 -S'<\x00\x00\x00\x00\x00\x00\x00' -tRp312 -g11 -(g12 -S'C\x00\x00\x00\x00\x00\x00\x00' -tRp313 -g11 -(g12 -S'L\x00\x00\x00\x00\x00\x00\x00' -tRp314 -g11 -(g12 -S'T\x00\x00\x00\x00\x00\x00\x00' -tRp315 -tp316 -a(g11 -(g12 -S'<\x00\x00\x00\x00\x00\x00\x00' -tRp317 -g11 -(g12 -S'C\x00\x00\x00\x00\x00\x00\x00' -tRp318 -g11 -(g12 -S'L\x00\x00\x00\x00\x00\x00\x00' -tRp319 -g11 -(g12 -S'T\x00\x00\x00\x00\x00\x00\x00' -tRp320 -tp321 -a(g11 -(g12 -S'<\x00\x00\x00\x00\x00\x00\x00' -tRp322 -g11 -(g12 -S'C\x00\x00\x00\x00\x00\x00\x00' -tRp323 -g11 -(g12 -S'L\x00\x00\x00\x00\x00\x00\x00' -tRp324 -g11 -(g12 -S'T\x00\x00\x00\x00\x00\x00\x00' -tRp325 -tp326 -a(g11 -(g12 -S'7\x00\x00\x00\x00\x00\x00\x00' -tRp327 -g11 -(g12 -S'C\x00\x00\x00\x00\x00\x00\x00' -tRp328 -g11 -(g12 -S'J\x00\x00\x00\x00\x00\x00\x00' -tRp329 -g11 -(g12 -S'S\x00\x00\x00\x00\x00\x00\x00' -tRp330 -tp331 -a(g11 -(g12 -S'C\x00\x00\x00\x00\x00\x00\x00' -tRp332 -g11 -(g12 -S'J\x00\x00\x00\x00\x00\x00\x00' -tRp333 -g11 -(g12 -S'O\x00\x00\x00\x00\x00\x00\x00' -tRp334 -g11 -(g12 -S'S\x00\x00\x00\x00\x00\x00\x00' -tRp335 -tp336 -a(g11 -(g12 -S'A\x00\x00\x00\x00\x00\x00\x00' -tRp337 -g11 -(g12 -S'J\x00\x00\x00\x00\x00\x00\x00' -tRp338 -g11 -(g12 -S'O\x00\x00\x00\x00\x00\x00\x00' -tRp339 -g11 -(g12 -S'S\x00\x00\x00\x00\x00\x00\x00' -tRp340 -tp341 -a(g11 -(g12 -S'@\x00\x00\x00\x00\x00\x00\x00' -tRp342 -g11 -(g12 -S'L\x00\x00\x00\x00\x00\x00\x00' -tRp343 -g11 -(g12 -S'O\x00\x00\x00\x00\x00\x00\x00' -tRp344 -g11 -(g12 -S'S\x00\x00\x00\x00\x00\x00\x00' -tRp345 -tp346 -a(g11 -(g12 -S'C\x00\x00\x00\x00\x00\x00\x00' -tRp347 -g11 -(g12 -S'M\x00\x00\x00\x00\x00\x00\x00' -tRp348 -g11 -(g12 -S'O\x00\x00\x00\x00\x00\x00\x00' -tRp349 -g11 -(g12 -S'S\x00\x00\x00\x00\x00\x00\x00' -tRp350 -tp351 -a(g11 -(g12 -S'<\x00\x00\x00\x00\x00\x00\x00' -tRp352 -g11 -(g12 -S'L\x00\x00\x00\x00\x00\x00\x00' -tRp353 -g11 -(g12 -S'O\x00\x00\x00\x00\x00\x00\x00' -tRp354 -g11 -(g12 -S'T\x00\x00\x00\x00\x00\x00\x00' -tRp355 -tp356 -a(g11 -(g12 -S'<\x00\x00\x00\x00\x00\x00\x00' -tRp357 -g11 -(g12 -S'L\x00\x00\x00\x00\x00\x00\x00' -tRp358 -g11 -(g12 -S'O\x00\x00\x00\x00\x00\x00\x00' -tRp359 -g11 -(g12 -S'T\x00\x00\x00\x00\x00\x00\x00' -tRp360 -tp361 -a(g11 -(g12 -S'<\x00\x00\x00\x00\x00\x00\x00' -tRp362 -g11 -(g12 -S'L\x00\x00\x00\x00\x00\x00\x00' -tRp363 -g11 -(g12 -S'O\x00\x00\x00\x00\x00\x00\x00' -tRp364 -g11 -(g12 -S'T\x00\x00\x00\x00\x00\x00\x00' -tRp365 -tp366 -a(g11 -(g12 -S'7\x00\x00\x00\x00\x00\x00\x00' -tRp367 -g11 -(g12 -S'C\x00\x00\x00\x00\x00\x00\x00' -tRp368 -g11 -(g12 -S'J\x00\x00\x00\x00\x00\x00\x00' -tRp369 -g11 -(g12 -S'S\x00\x00\x00\x00\x00\x00\x00' -tRp370 -tp371 -a(g11 -(g12 -S'C\x00\x00\x00\x00\x00\x00\x00' -tRp372 -g11 -(g12 -S'J\x00\x00\x00\x00\x00\x00\x00' -tRp373 -g11 -(g12 -S'O\x00\x00\x00\x00\x00\x00\x00' -tRp374 -g11 -(g12 -S'S\x00\x00\x00\x00\x00\x00\x00' -tRp375 -tp376 -a(g11 -(g12 -S'A\x00\x00\x00\x00\x00\x00\x00' -tRp377 -g11 -(g12 -S'J\x00\x00\x00\x00\x00\x00\x00' -tRp378 -g11 -(g12 -S'O\x00\x00\x00\x00\x00\x00\x00' -tRp379 -g11 -(g12 -S'S\x00\x00\x00\x00\x00\x00\x00' -tRp380 -tp381 -a(g11 -(g12 -S'@\x00\x00\x00\x00\x00\x00\x00' -tRp382 -g11 -(g12 -S'L\x00\x00\x00\x00\x00\x00\x00' -tRp383 -g11 -(g12 -S'O\x00\x00\x00\x00\x00\x00\x00' -tRp384 -g11 -(g12 -S'S\x00\x00\x00\x00\x00\x00\x00' -tRp385 -tp386 -a(g11 -(g12 -S'C\x00\x00\x00\x00\x00\x00\x00' -tRp387 -g11 -(g12 -S'M\x00\x00\x00\x00\x00\x00\x00' -tRp388 -g11 -(g12 -S'O\x00\x00\x00\x00\x00\x00\x00' -tRp389 -g11 -(g12 -S'S\x00\x00\x00\x00\x00\x00\x00' -tRp390 -tp391 -a(g11 -(g12 -S'<\x00\x00\x00\x00\x00\x00\x00' -tRp392 -g11 -(g12 -S'L\x00\x00\x00\x00\x00\x00\x00' -tRp393 -g11 -(g12 -S'O\x00\x00\x00\x00\x00\x00\x00' -tRp394 -g11 -(g12 -S'T\x00\x00\x00\x00\x00\x00\x00' -tRp395 -tp396 -a(g11 -(g12 -S'<\x00\x00\x00\x00\x00\x00\x00' -tRp397 -g11 -(g12 -S'L\x00\x00\x00\x00\x00\x00\x00' -tRp398 -g11 -(g12 -S'O\x00\x00\x00\x00\x00\x00\x00' -tRp399 -g11 -(g12 -S'T\x00\x00\x00\x00\x00\x00\x00' -tRp400 -tp401 -a(g11 -(g12 -S'<\x00\x00\x00\x00\x00\x00\x00' -tRp402 -g11 -(g12 -S'L\x00\x00\x00\x00\x00\x00\x00' -tRp403 -g11 -(g12 -S'T\x00\x00\x00\x00\x00\x00\x00' -tRp404 -tp405 -a(g11 -(g12 -S'<\x00\x00\x00\x00\x00\x00\x00' -tRp406 -g11 -(g12 -S'H\x00\x00\x00\x00\x00\x00\x00' -tRp407 -g11 -(g12 -S'O\x00\x00\x00\x00\x00\x00\x00' -tRp408 -g11 -(g12 -S'X\x00\x00\x00\x00\x00\x00\x00' -tRp409 -tp410 -a(g11 -(g12 -S'H\x00\x00\x00\x00\x00\x00\x00' -tRp411 -g11 -(g12 -S'O\x00\x00\x00\x00\x00\x00\x00' -tRp412 -g11 -(g12 -S'X\x00\x00\x00\x00\x00\x00\x00' -tRp413 -tp414 -a(g11 -(g12 -S'C\x00\x00\x00\x00\x00\x00\x00' -tRp415 -g11 -(g12 -S'F\x00\x00\x00\x00\x00\x00\x00' -tRp416 -g11 -(g12 -S'L\x00\x00\x00\x00\x00\x00\x00' -tRp417 -g11 -(g12 -S'T\x00\x00\x00\x00\x00\x00\x00' -tRp418 -tp419 -a(g11 -(g12 -S'E\x00\x00\x00\x00\x00\x00\x00' -tRp420 -g11 -(g12 -S'M\x00\x00\x00\x00\x00\x00\x00' -tRp421 -g11 -(g12 -S'V\x00\x00\x00\x00\x00\x00\x00' -tRp422 -tp423 -a(g11 -(g12 -S'C\x00\x00\x00\x00\x00\x00\x00' -tRp424 -g11 -(g12 -S'F\x00\x00\x00\x00\x00\x00\x00' -tRp425 -g11 -(g12 -S'O\x00\x00\x00\x00\x00\x00\x00' -tRp426 -g11 -(g12 -S'X\x00\x00\x00\x00\x00\x00\x00' -tRp427 -tp428 -a(g11 -(g12 -S'E\x00\x00\x00\x00\x00\x00\x00' -tRp429 -g11 -(g12 -S'H\x00\x00\x00\x00\x00\x00\x00' -tRp430 -g11 -(g12 -S'M\x00\x00\x00\x00\x00\x00\x00' -tRp431 -g11 -(g12 -S'Y\x00\x00\x00\x00\x00\x00\x00' -tRp432 -tp433 -a(g11 -(g12 -S'C\x00\x00\x00\x00\x00\x00\x00' -tRp434 -g11 -(g12 -S'G\x00\x00\x00\x00\x00\x00\x00' -tRp435 -g11 -(g12 -S'O\x00\x00\x00\x00\x00\x00\x00' -tRp436 -g11 -(g12 -S'Y\x00\x00\x00\x00\x00\x00\x00' -tRp437 -tp438 -a(g11 -(g12 -S'H\x00\x00\x00\x00\x00\x00\x00' -tRp439 -g11 -(g12 -S'O\x00\x00\x00\x00\x00\x00\x00' -tRp440 -g11 -(g12 -S'X\x00\x00\x00\x00\x00\x00\x00' -tRp441 -tp442 -a(g11 -(g12 -S'=\x00\x00\x00\x00\x00\x00\x00' -tRp443 -g11 -(g12 -S'L\x00\x00\x00\x00\x00\x00\x00' -tRp444 -g11 -(g12 -S'Q\x00\x00\x00\x00\x00\x00\x00' -tRp445 -g11 -(g12 -S'X\x00\x00\x00\x00\x00\x00\x00' -tRp446 -tp447 -a(g11 -(g12 -S'>\x00\x00\x00\x00\x00\x00\x00' -tRp448 -g11 -(g12 -S'E\x00\x00\x00\x00\x00\x00\x00' -tRp449 -g11 -(g12 -S'M\x00\x00\x00\x00\x00\x00\x00' -tRp450 -g11 -(g12 -S'V\x00\x00\x00\x00\x00\x00\x00' -tRp451 -tp452 -a(g11 -(g12 -S'C\x00\x00\x00\x00\x00\x00\x00' -tRp453 -g11 -(g12 -S'F\x00\x00\x00\x00\x00\x00\x00' -tRp454 -g11 -(g12 -S'O\x00\x00\x00\x00\x00\x00\x00' -tRp455 -g11 -(g12 -S'X\x00\x00\x00\x00\x00\x00\x00' -tRp456 -tp457 -a(g11 -(g12 -S'E\x00\x00\x00\x00\x00\x00\x00' -tRp458 -g11 -(g12 -S'L\x00\x00\x00\x00\x00\x00\x00' -tRp459 -g11 -(g12 -S'U\x00\x00\x00\x00\x00\x00\x00' -tRp460 -tp461 -a(g11 -(g12 -S'9\x00\x00\x00\x00\x00\x00\x00' -tRp462 -g11 -(g12 -S'E\x00\x00\x00\x00\x00\x00\x00' -tRp463 -g11 -(g12 -S'L\x00\x00\x00\x00\x00\x00\x00' -tRp464 -g11 -(g12 -S'U\x00\x00\x00\x00\x00\x00\x00' -tRp465 -tp466 -a(g11 -(g12 -S'>\x00\x00\x00\x00\x00\x00\x00' -tRp467 -g11 -(g12 -S'E\x00\x00\x00\x00\x00\x00\x00' -tRp468 -g11 -(g12 -S'M\x00\x00\x00\x00\x00\x00\x00' -tRp469 -g11 -(g12 -S'V\x00\x00\x00\x00\x00\x00\x00' -tRp470 -tp471 -a(g11 -(g12 -S'>\x00\x00\x00\x00\x00\x00\x00' -tRp472 -g11 -(g12 -S'E\x00\x00\x00\x00\x00\x00\x00' -tRp473 -g11 -(g12 -S'M\x00\x00\x00\x00\x00\x00\x00' -tRp474 -g11 -(g12 -S'V\x00\x00\x00\x00\x00\x00\x00' -tRp475 -tp476 -a(g11 -(g12 -S'>\x00\x00\x00\x00\x00\x00\x00' -tRp477 -g11 -(g12 -S'E\x00\x00\x00\x00\x00\x00\x00' -tRp478 -g11 -(g12 -S'M\x00\x00\x00\x00\x00\x00\x00' -tRp479 -g11 -(g12 -S'V\x00\x00\x00\x00\x00\x00\x00' -tRp480 -tp481 -a(g11 -(g12 -S'<\x00\x00\x00\x00\x00\x00\x00' -tRp482 -g11 -(g12 -S'H\x00\x00\x00\x00\x00\x00\x00' -tRp483 -g11 -(g12 -S'O\x00\x00\x00\x00\x00\x00\x00' -tRp484 -g11 -(g12 -S'X\x00\x00\x00\x00\x00\x00\x00' -tRp485 -tp486 -a(g11 -(g12 -S'H\x00\x00\x00\x00\x00\x00\x00' -tRp487 -g11 -(g12 -S'O\x00\x00\x00\x00\x00\x00\x00' -tRp488 -g11 -(g12 -S'X\x00\x00\x00\x00\x00\x00\x00' -tRp489 -tp490 -a(g11 -(g12 -S'C\x00\x00\x00\x00\x00\x00\x00' -tRp491 -g11 -(g12 -S'F\x00\x00\x00\x00\x00\x00\x00' -tRp492 -g11 -(g12 -S'L\x00\x00\x00\x00\x00\x00\x00' -tRp493 -g11 -(g12 -S'T\x00\x00\x00\x00\x00\x00\x00' -tRp494 -tp495 -a(g11 -(g12 -S'E\x00\x00\x00\x00\x00\x00\x00' -tRp496 -g11 -(g12 -S'M\x00\x00\x00\x00\x00\x00\x00' -tRp497 -g11 -(g12 -S'V\x00\x00\x00\x00\x00\x00\x00' -tRp498 -tp499 -a(g11 -(g12 -S'C\x00\x00\x00\x00\x00\x00\x00' -tRp500 -g11 -(g12 -S'F\x00\x00\x00\x00\x00\x00\x00' -tRp501 -g11 -(g12 -S'O\x00\x00\x00\x00\x00\x00\x00' -tRp502 -g11 -(g12 -S'X\x00\x00\x00\x00\x00\x00\x00' -tRp503 -tp504 -a(g11 -(g12 -S'E\x00\x00\x00\x00\x00\x00\x00' -tRp505 -g11 -(g12 -S'H\x00\x00\x00\x00\x00\x00\x00' -tRp506 -g11 -(g12 -S'M\x00\x00\x00\x00\x00\x00\x00' -tRp507 -g11 -(g12 -S'Y\x00\x00\x00\x00\x00\x00\x00' -tRp508 -tp509 -a(g11 -(g12 -S'C\x00\x00\x00\x00\x00\x00\x00' -tRp510 -g11 -(g12 -S'G\x00\x00\x00\x00\x00\x00\x00' -tRp511 -g11 -(g12 -S'O\x00\x00\x00\x00\x00\x00\x00' -tRp512 -g11 -(g12 -S'Y\x00\x00\x00\x00\x00\x00\x00' -tRp513 -tp514 -a(g11 -(g12 -S'H\x00\x00\x00\x00\x00\x00\x00' -tRp515 -g11 -(g12 -S'O\x00\x00\x00\x00\x00\x00\x00' -tRp516 -g11 -(g12 -S'X\x00\x00\x00\x00\x00\x00\x00' -tRp517 -tp518 -a(g11 -(g12 -S'=\x00\x00\x00\x00\x00\x00\x00' -tRp519 -g11 -(g12 -S'L\x00\x00\x00\x00\x00\x00\x00' -tRp520 -g11 -(g12 -S'Q\x00\x00\x00\x00\x00\x00\x00' -tRp521 -g11 -(g12 -S'X\x00\x00\x00\x00\x00\x00\x00' -tRp522 -tp523 -a(g11 -(g12 -S'>\x00\x00\x00\x00\x00\x00\x00' -tRp524 -g11 -(g12 -S'E\x00\x00\x00\x00\x00\x00\x00' -tRp525 -g11 -(g12 -S'M\x00\x00\x00\x00\x00\x00\x00' -tRp526 -g11 -(g12 -S'V\x00\x00\x00\x00\x00\x00\x00' -tRp527 -tp528 -a(g11 -(g12 -S'C\x00\x00\x00\x00\x00\x00\x00' -tRp529 -g11 -(g12 -S'F\x00\x00\x00\x00\x00\x00\x00' -tRp530 -g11 -(g12 -S'O\x00\x00\x00\x00\x00\x00\x00' -tRp531 -g11 -(g12 -S'X\x00\x00\x00\x00\x00\x00\x00' -tRp532 -tp533 -a(g11 -(g12 -S'E\x00\x00\x00\x00\x00\x00\x00' -tRp534 -g11 -(g12 -S'L\x00\x00\x00\x00\x00\x00\x00' -tRp535 -g11 -(g12 -S'U\x00\x00\x00\x00\x00\x00\x00' -tRp536 -tp537 -a(g11 -(g12 -S'9\x00\x00\x00\x00\x00\x00\x00' -tRp538 -g11 -(g12 -S'E\x00\x00\x00\x00\x00\x00\x00' -tRp539 -g11 -(g12 -S'L\x00\x00\x00\x00\x00\x00\x00' -tRp540 -g11 -(g12 -S'U\x00\x00\x00\x00\x00\x00\x00' -tRp541 -tp542 -a(g11 -(g12 -S'>\x00\x00\x00\x00\x00\x00\x00' -tRp543 -g11 -(g12 -S'E\x00\x00\x00\x00\x00\x00\x00' -tRp544 -g11 -(g12 -S'M\x00\x00\x00\x00\x00\x00\x00' -tRp545 -g11 -(g12 -S'V\x00\x00\x00\x00\x00\x00\x00' -tRp546 -tp547 -a(g11 -(g12 -S'>\x00\x00\x00\x00\x00\x00\x00' -tRp548 -g11 -(g12 -S'E\x00\x00\x00\x00\x00\x00\x00' -tRp549 -g11 -(g12 -S'M\x00\x00\x00\x00\x00\x00\x00' -tRp550 -g11 -(g12 -S'V\x00\x00\x00\x00\x00\x00\x00' -tRp551 -tp552 -a(g11 -(g12 -S'>\x00\x00\x00\x00\x00\x00\x00' -tRp553 -g11 -(g12 -S'E\x00\x00\x00\x00\x00\x00\x00' -tRp554 -g11 -(g12 -S'M\x00\x00\x00\x00\x00\x00\x00' -tRp555 -g11 -(g12 -S'V\x00\x00\x00\x00\x00\x00\x00' -tRp556 -tp557 -a(g11 -(g12 -S'D\x00\x00\x00\x00\x00\x00\x00' -tRp558 -g11 -(g12 -S'G\x00\x00\x00\x00\x00\x00\x00' -tRp559 -g11 -(g12 -S'L\x00\x00\x00\x00\x00\x00\x00' -tRp560 -g11 -(g12 -S'X\x00\x00\x00\x00\x00\x00\x00' -tRp561 -tp562 -a(g11 -(g12 -S'E\x00\x00\x00\x00\x00\x00\x00' -tRp563 -g11 -(g12 -S'H\x00\x00\x00\x00\x00\x00\x00' -tRp564 -g11 -(g12 -S'L\x00\x00\x00\x00\x00\x00\x00' -tRp565 -g11 -(g12 -S'X\x00\x00\x00\x00\x00\x00\x00' -tRp566 -tp567 -a(g11 -(g12 -S'C\x00\x00\x00\x00\x00\x00\x00' -tRp568 -g11 -(g12 -S'L\x00\x00\x00\x00\x00\x00\x00' -tRp569 -g11 -(g12 -S'T\x00\x00\x00\x00\x00\x00\x00' -tRp570 -tp571 -a(g11 -(g12 -S'A\x00\x00\x00\x00\x00\x00\x00' -tRp572 -g11 -(g12 -S'E\x00\x00\x00\x00\x00\x00\x00' -tRp573 -g11 -(g12 -S'J\x00\x00\x00\x00\x00\x00\x00' -tRp574 -g11 -(g12 -S'V\x00\x00\x00\x00\x00\x00\x00' -tRp575 -tp576 -a(g11 -(g12 -S'@\x00\x00\x00\x00\x00\x00\x00' -tRp577 -g11 -(g12 -S'C\x00\x00\x00\x00\x00\x00\x00' -tRp578 -g11 -(g12 -S'I\x00\x00\x00\x00\x00\x00\x00' -tRp579 -g11 -(g12 -S'X\x00\x00\x00\x00\x00\x00\x00' -tRp580 -tp581 -a(g11 -(g12 -S'>\x00\x00\x00\x00\x00\x00\x00' -tRp582 -g11 -(g12 -S'E\x00\x00\x00\x00\x00\x00\x00' -tRp583 -g11 -(g12 -S'J\x00\x00\x00\x00\x00\x00\x00' -tRp584 -g11 -(g12 -S'Y\x00\x00\x00\x00\x00\x00\x00' -tRp585 -tp586 -a(g11 -(g12 -S'<\x00\x00\x00\x00\x00\x00\x00' -tRp587 -g11 -(g12 -S'E\x00\x00\x00\x00\x00\x00\x00' -tRp588 -g11 -(g12 -S'L\x00\x00\x00\x00\x00\x00\x00' -tRp589 -g11 -(g12 -S'Y\x00\x00\x00\x00\x00\x00\x00' -tRp590 -tp591 -a(g11 -(g12 -S';\x00\x00\x00\x00\x00\x00\x00' -tRp592 -g11 -(g12 -S'D\x00\x00\x00\x00\x00\x00\x00' -tRp593 -g11 -(g12 -S'M\x00\x00\x00\x00\x00\x00\x00' -tRp594 -g11 -(g12 -S'V\x00\x00\x00\x00\x00\x00\x00' -tRp595 -tp596 -a(g11 -(g12 -S'9\x00\x00\x00\x00\x00\x00\x00' -tRp597 -g11 -(g12 -S'E\x00\x00\x00\x00\x00\x00\x00' -tRp598 -g11 -(g12 -S'M\x00\x00\x00\x00\x00\x00\x00' -tRp599 -g11 -(g12 -S'V\x00\x00\x00\x00\x00\x00\x00' -tRp600 -tp601 -a(g11 -(g12 -S'8\x00\x00\x00\x00\x00\x00\x00' -tRp602 -g11 -(g12 -S'G\x00\x00\x00\x00\x00\x00\x00' -tRp603 -g11 -(g12 -S'L\x00\x00\x00\x00\x00\x00\x00' -tRp604 -g11 -(g12 -S'V\x00\x00\x00\x00\x00\x00\x00' -tRp605 -tp606 -a(g11 -(g12 -S'9\x00\x00\x00\x00\x00\x00\x00' -tRp607 -g11 -(g12 -S'E\x00\x00\x00\x00\x00\x00\x00' -tRp608 -g11 -(g12 -S'L\x00\x00\x00\x00\x00\x00\x00' -tRp609 -g11 -(g12 -S'T\x00\x00\x00\x00\x00\x00\x00' -tRp610 -tp611 -a(g11 -(g12 -S'6\x00\x00\x00\x00\x00\x00\x00' -tRp612 -g11 -(g12 -S'E\x00\x00\x00\x00\x00\x00\x00' -tRp613 -g11 -(g12 -S'J\x00\x00\x00\x00\x00\x00\x00' -tRp614 -g11 -(g12 -S'T\x00\x00\x00\x00\x00\x00\x00' -tRp615 -tp616 -a(g11 -(g12 -S'7\x00\x00\x00\x00\x00\x00\x00' -tRp617 -g11 -(g12 -S'C\x00\x00\x00\x00\x00\x00\x00' -tRp618 -g11 -(g12 -S'J\x00\x00\x00\x00\x00\x00\x00' -tRp619 -g11 -(g12 -S'S\x00\x00\x00\x00\x00\x00\x00' -tRp620 -tp621 -a(g11 -(g12 -S'0\x00\x00\x00\x00\x00\x00\x00' -tRp622 -g11 -(g12 -S'C\x00\x00\x00\x00\x00\x00\x00' -tRp623 -g11 -(g12 -S'L\x00\x00\x00\x00\x00\x00\x00' -tRp624 -g11 -(g12 -S'T\x00\x00\x00\x00\x00\x00\x00' -tRp625 -tp626 -a(g11 -(g12 -S'0\x00\x00\x00\x00\x00\x00\x00' -tRp627 -g11 -(g12 -S'C\x00\x00\x00\x00\x00\x00\x00' -tRp628 -g11 -(g12 -S'L\x00\x00\x00\x00\x00\x00\x00' -tRp629 -g11 -(g12 -S'T\x00\x00\x00\x00\x00\x00\x00' -tRp630 -tp631 -a(g11 -(g12 -S'0\x00\x00\x00\x00\x00\x00\x00' -tRp632 -g11 -(g12 -S'C\x00\x00\x00\x00\x00\x00\x00' -tRp633 -g11 -(g12 -S'L\x00\x00\x00\x00\x00\x00\x00' -tRp634 -g11 -(g12 -S'T\x00\x00\x00\x00\x00\x00\x00' -tRp635 -tp636 -a(g11 -(g12 -S'0\x00\x00\x00\x00\x00\x00\x00' -tRp637 -g11 -(g12 -S'C\x00\x00\x00\x00\x00\x00\x00' -tRp638 -g11 -(g12 -S'L\x00\x00\x00\x00\x00\x00\x00' -tRp639 -g11 -(g12 -S'T\x00\x00\x00\x00\x00\x00\x00' -tRp640 -tp641 -aa(lp642 -(g11 -(g12 -S'I\x00\x00\x00\x00\x00\x00\x00' -tRp643 -g11 -(g12 -S'M\x00\x00\x00\x00\x00\x00\x00' -tRp644 -g11 -(g12 -S'P\x00\x00\x00\x00\x00\x00\x00' -tRp645 -g11 -(g12 -S'U\x00\x00\x00\x00\x00\x00\x00' -tRp646 -tp647 -a(g11 -(g12 -S'F\x00\x00\x00\x00\x00\x00\x00' -tRp648 -g11 -(g12 -S'M\x00\x00\x00\x00\x00\x00\x00' -tRp649 -g11 -(g12 -S'R\x00\x00\x00\x00\x00\x00\x00' -tRp650 -g11 -(g12 -S'U\x00\x00\x00\x00\x00\x00\x00' -tRp651 -tp652 -a(g11 -(g12 -S'A\x00\x00\x00\x00\x00\x00\x00' -tRp653 -g11 -(g12 -S'P\x00\x00\x00\x00\x00\x00\x00' -tRp654 -g11 -(g12 -S'S\x00\x00\x00\x00\x00\x00\x00' -tRp655 -g11 -(g12 -S'U\x00\x00\x00\x00\x00\x00\x00' -tRp656 -tp657 -a(g11 -(g12 -S'B\x00\x00\x00\x00\x00\x00\x00' -tRp658 -g11 -(g12 -S'N\x00\x00\x00\x00\x00\x00\x00' -tRp659 -g11 -(g12 -S'R\x00\x00\x00\x00\x00\x00\x00' -tRp660 -g11 -(g12 -S'U\x00\x00\x00\x00\x00\x00\x00' -tRp661 -tp662 -a(g11 -(g12 -S'C\x00\x00\x00\x00\x00\x00\x00' -tRp663 -g11 -(g12 -S'K\x00\x00\x00\x00\x00\x00\x00' -tRp664 -g11 -(g12 -S'R\x00\x00\x00\x00\x00\x00\x00' -tRp665 -g11 -(g12 -S'W\x00\x00\x00\x00\x00\x00\x00' -tRp666 -tp667 -a(g11 -(g12 -S'D\x00\x00\x00\x00\x00\x00\x00' -tRp668 -g11 -(g12 -S'K\x00\x00\x00\x00\x00\x00\x00' -tRp669 -g11 -(g12 -S'R\x00\x00\x00\x00\x00\x00\x00' -tRp670 -g11 -(g12 -S'S\x00\x00\x00\x00\x00\x00\x00' -tRp671 -tp672 -a(g11 -(g12 -S'?\x00\x00\x00\x00\x00\x00\x00' -tRp673 -g11 -(g12 -S'I\x00\x00\x00\x00\x00\x00\x00' -tRp674 -g11 -(g12 -S'R\x00\x00\x00\x00\x00\x00\x00' -tRp675 -tp676 -a(g11 -(g12 -S'8\x00\x00\x00\x00\x00\x00\x00' -tRp677 -g11 -(g12 -S'I\x00\x00\x00\x00\x00\x00\x00' -tRp678 -g11 -(g12 -S'P\x00\x00\x00\x00\x00\x00\x00' -tRp679 -tp680 -a(g11 -(g12 -S'<\x00\x00\x00\x00\x00\x00\x00' -tRp681 -g11 -(g12 -S'K\x00\x00\x00\x00\x00\x00\x00' -tRp682 -g11 -(g12 -S'P\x00\x00\x00\x00\x00\x00\x00' -tRp683 -g11 -(g12 -S'W\x00\x00\x00\x00\x00\x00\x00' -tRp684 -tp685 -a(g11 -(g12 -S'=\x00\x00\x00\x00\x00\x00\x00' -tRp686 -g11 -(g12 -S'I\x00\x00\x00\x00\x00\x00\x00' -tRp687 -g11 -(g12 -S'P\x00\x00\x00\x00\x00\x00\x00' -tRp688 -g11 -(g12 -S'Y\x00\x00\x00\x00\x00\x00\x00' -tRp689 -tp690 -a(g11 -(g12 -S'?\x00\x00\x00\x00\x00\x00\x00' -tRp691 -g11 -(g12 -S'F\x00\x00\x00\x00\x00\x00\x00' -tRp692 -g11 -(g12 -S'O\x00\x00\x00\x00\x00\x00\x00' -tRp693 -g11 -(g12 -S'W\x00\x00\x00\x00\x00\x00\x00' -tRp694 -tp695 -a(g11 -(g12 -S'A\x00\x00\x00\x00\x00\x00\x00' -tRp696 -g11 -(g12 -S'D\x00\x00\x00\x00\x00\x00\x00' -tRp697 -g11 -(g12 -S'P\x00\x00\x00\x00\x00\x00\x00' -tRp698 -g11 -(g12 -S'U\x00\x00\x00\x00\x00\x00\x00' -tRp699 -tp700 -a(g11 -(g12 -S'C\x00\x00\x00\x00\x00\x00\x00' -tRp701 -g11 -(g12 -S'K\x00\x00\x00\x00\x00\x00\x00' -tRp702 -g11 -(g12 -S'T\x00\x00\x00\x00\x00\x00\x00' -tRp703 -tp704 -a(g11 -(g12 -S'D\x00\x00\x00\x00\x00\x00\x00' -tRp705 -g11 -(g12 -S'K\x00\x00\x00\x00\x00\x00\x00' -tRp706 -g11 -(g12 -S'T\x00\x00\x00\x00\x00\x00\x00' -tRp707 -tp708 -a(g11 -(g12 -S'?\x00\x00\x00\x00\x00\x00\x00' -tRp709 -g11 -(g12 -S'I\x00\x00\x00\x00\x00\x00\x00' -tRp710 -g11 -(g12 -S'K\x00\x00\x00\x00\x00\x00\x00' -tRp711 -g11 -(g12 -S'R\x00\x00\x00\x00\x00\x00\x00' -tRp712 -tp713 -a(g11 -(g12 -S'8\x00\x00\x00\x00\x00\x00\x00' -tRp714 -g11 -(g12 -S'H\x00\x00\x00\x00\x00\x00\x00' -tRp715 -g11 -(g12 -S'K\x00\x00\x00\x00\x00\x00\x00' -tRp716 -g11 -(g12 -S'P\x00\x00\x00\x00\x00\x00\x00' -tRp717 -tp718 -a(g11 -(g12 -S'A\x00\x00\x00\x00\x00\x00\x00' -tRp719 -g11 -(g12 -S'I\x00\x00\x00\x00\x00\x00\x00' -tRp720 -g11 -(g12 -S'P\x00\x00\x00\x00\x00\x00\x00' -tRp721 -g11 -(g12 -S'U\x00\x00\x00\x00\x00\x00\x00' -tRp722 -tp723 -a(g11 -(g12 -S'@\x00\x00\x00\x00\x00\x00\x00' -tRp724 -g11 -(g12 -S'I\x00\x00\x00\x00\x00\x00\x00' -tRp725 -g11 -(g12 -S'R\x00\x00\x00\x00\x00\x00\x00' -tRp726 -g11 -(g12 -S'U\x00\x00\x00\x00\x00\x00\x00' -tRp727 -tp728 -a(g11 -(g12 -S'=\x00\x00\x00\x00\x00\x00\x00' -tRp729 -g11 -(g12 -S'M\x00\x00\x00\x00\x00\x00\x00' -tRp730 -g11 -(g12 -S'P\x00\x00\x00\x00\x00\x00\x00' -tRp731 -g11 -(g12 -S'U\x00\x00\x00\x00\x00\x00\x00' -tRp732 -tp733 -a(g11 -(g12 -S'B\x00\x00\x00\x00\x00\x00\x00' -tRp734 -g11 -(g12 -S'N\x00\x00\x00\x00\x00\x00\x00' -tRp735 -g11 -(g12 -S'R\x00\x00\x00\x00\x00\x00\x00' -tRp736 -g11 -(g12 -S'U\x00\x00\x00\x00\x00\x00\x00' -tRp737 -tp738 -a(g11 -(g12 -S'C\x00\x00\x00\x00\x00\x00\x00' -tRp739 -g11 -(g12 -S'K\x00\x00\x00\x00\x00\x00\x00' -tRp740 -g11 -(g12 -S'R\x00\x00\x00\x00\x00\x00\x00' -tRp741 -g11 -(g12 -S'W\x00\x00\x00\x00\x00\x00\x00' -tRp742 -tp743 -a(g11 -(g12 -S'D\x00\x00\x00\x00\x00\x00\x00' -tRp744 -g11 -(g12 -S'K\x00\x00\x00\x00\x00\x00\x00' -tRp745 -g11 -(g12 -S'R\x00\x00\x00\x00\x00\x00\x00' -tRp746 -g11 -(g12 -S'S\x00\x00\x00\x00\x00\x00\x00' -tRp747 -tp748 -a(g11 -(g12 -S'?\x00\x00\x00\x00\x00\x00\x00' -tRp749 -g11 -(g12 -S'I\x00\x00\x00\x00\x00\x00\x00' -tRp750 -g11 -(g12 -S'R\x00\x00\x00\x00\x00\x00\x00' -tRp751 -tp752 -a(g11 -(g12 -S'8\x00\x00\x00\x00\x00\x00\x00' -tRp753 -g11 -(g12 -S'H\x00\x00\x00\x00\x00\x00\x00' -tRp754 -g11 -(g12 -S'P\x00\x00\x00\x00\x00\x00\x00' -tRp755 -tp756 -a(g11 -(g12 -S'<\x00\x00\x00\x00\x00\x00\x00' -tRp757 -g11 -(g12 -S'K\x00\x00\x00\x00\x00\x00\x00' -tRp758 -g11 -(g12 -S'P\x00\x00\x00\x00\x00\x00\x00' -tRp759 -g11 -(g12 -S'W\x00\x00\x00\x00\x00\x00\x00' -tRp760 -tp761 -a(g11 -(g12 -S'=\x00\x00\x00\x00\x00\x00\x00' -tRp762 -g11 -(g12 -S'I\x00\x00\x00\x00\x00\x00\x00' -tRp763 -g11 -(g12 -S'P\x00\x00\x00\x00\x00\x00\x00' -tRp764 -g11 -(g12 -S'Y\x00\x00\x00\x00\x00\x00\x00' -tRp765 -tp766 -a(g11 -(g12 -S'?\x00\x00\x00\x00\x00\x00\x00' -tRp767 -g11 -(g12 -S'F\x00\x00\x00\x00\x00\x00\x00' -tRp768 -g11 -(g12 -S'O\x00\x00\x00\x00\x00\x00\x00' -tRp769 -g11 -(g12 -S'W\x00\x00\x00\x00\x00\x00\x00' -tRp770 -tp771 -a(g11 -(g12 -S'A\x00\x00\x00\x00\x00\x00\x00' -tRp772 -g11 -(g12 -S'D\x00\x00\x00\x00\x00\x00\x00' -tRp773 -g11 -(g12 -S'P\x00\x00\x00\x00\x00\x00\x00' -tRp774 -g11 -(g12 -S'U\x00\x00\x00\x00\x00\x00\x00' -tRp775 -tp776 -a(g11 -(g12 -S'C\x00\x00\x00\x00\x00\x00\x00' -tRp777 -g11 -(g12 -S'K\x00\x00\x00\x00\x00\x00\x00' -tRp778 -g11 -(g12 -S'T\x00\x00\x00\x00\x00\x00\x00' -tRp779 -tp780 -a(g11 -(g12 -S'D\x00\x00\x00\x00\x00\x00\x00' -tRp781 -g11 -(g12 -S'K\x00\x00\x00\x00\x00\x00\x00' -tRp782 -g11 -(g12 -S'T\x00\x00\x00\x00\x00\x00\x00' -tRp783 -tp784 -a(g11 -(g12 -S'?\x00\x00\x00\x00\x00\x00\x00' -tRp785 -g11 -(g12 -S'I\x00\x00\x00\x00\x00\x00\x00' -tRp786 -g11 -(g12 -S'K\x00\x00\x00\x00\x00\x00\x00' -tRp787 -g11 -(g12 -S'R\x00\x00\x00\x00\x00\x00\x00' -tRp788 -tp789 -a(g11 -(g12 -S'8\x00\x00\x00\x00\x00\x00\x00' -tRp790 -g11 -(g12 -S'H\x00\x00\x00\x00\x00\x00\x00' -tRp791 -g11 -(g12 -S'K\x00\x00\x00\x00\x00\x00\x00' -tRp792 -g11 -(g12 -S'P\x00\x00\x00\x00\x00\x00\x00' -tRp793 -tp794 -a(g11 -(g12 -S'A\x00\x00\x00\x00\x00\x00\x00' -tRp795 -g11 -(g12 -S'M\x00\x00\x00\x00\x00\x00\x00' -tRp796 -g11 -(g12 -S'U\x00\x00\x00\x00\x00\x00\x00' -tRp797 -g11 -(g12 -S'\\\x00\x00\x00\x00\x00\x00\x00' -tRp798 -tp799 -a(g11 -(g12 -S'F\x00\x00\x00\x00\x00\x00\x00' -tRp800 -g11 -(g12 -S'I\x00\x00\x00\x00\x00\x00\x00' -tRp801 -g11 -(g12 -S'R\x00\x00\x00\x00\x00\x00\x00' -tRp802 -g11 -(g12 -S'Z\x00\x00\x00\x00\x00\x00\x00' -tRp803 -tp804 -a(g11 -(g12 -S'I\x00\x00\x00\x00\x00\x00\x00' -tRp805 -g11 -(g12 -S'M\x00\x00\x00\x00\x00\x00\x00' -tRp806 -g11 -(g12 -S'P\x00\x00\x00\x00\x00\x00\x00' -tRp807 -g11 -(g12 -S'Y\x00\x00\x00\x00\x00\x00\x00' -tRp808 -tp809 -a(g11 -(g12 -S'B\x00\x00\x00\x00\x00\x00\x00' -tRp810 -g11 -(g12 -S'P\x00\x00\x00\x00\x00\x00\x00' -tRp811 -g11 -(g12 -S'T\x00\x00\x00\x00\x00\x00\x00' -tRp812 -g11 -(g12 -S'W\x00\x00\x00\x00\x00\x00\x00' -tRp813 -tp814 -a(g11 -(g12 -S'A\x00\x00\x00\x00\x00\x00\x00' -tRp815 -g11 -(g12 -S'P\x00\x00\x00\x00\x00\x00\x00' -tRp816 -g11 -(g12 -S'U\x00\x00\x00\x00\x00\x00\x00' -tRp817 -tp818 -a(g11 -(g12 -S'B\x00\x00\x00\x00\x00\x00\x00' -tRp819 -g11 -(g12 -S'I\x00\x00\x00\x00\x00\x00\x00' -tRp820 -g11 -(g12 -S'R\x00\x00\x00\x00\x00\x00\x00' -tRp821 -g11 -(g12 -S'U\x00\x00\x00\x00\x00\x00\x00' -tRp822 -tp823 -a(g11 -(g12 -S'=\x00\x00\x00\x00\x00\x00\x00' -tRp824 -g11 -(g12 -S'I\x00\x00\x00\x00\x00\x00\x00' -tRp825 -g11 -(g12 -S'P\x00\x00\x00\x00\x00\x00\x00' -tRp826 -g11 -(g12 -S'Y\x00\x00\x00\x00\x00\x00\x00' -tRp827 -tp828 -a(g11 -(g12 -S'D\x00\x00\x00\x00\x00\x00\x00' -tRp829 -g11 -(g12 -S'H\x00\x00\x00\x00\x00\x00\x00' -tRp830 -g11 -(g12 -S'P\x00\x00\x00\x00\x00\x00\x00' -tRp831 -g11 -(g12 -S'W\x00\x00\x00\x00\x00\x00\x00' -tRp832 -tp833 -a(g11 -(g12 -S'A\x00\x00\x00\x00\x00\x00\x00' -tRp834 -g11 -(g12 -S'J\x00\x00\x00\x00\x00\x00\x00' -tRp835 -g11 -(g12 -S'P\x00\x00\x00\x00\x00\x00\x00' -tRp836 -g11 -(g12 -S'Y\x00\x00\x00\x00\x00\x00\x00' -tRp837 -tp838 -a(g11 -(g12 -S'?\x00\x00\x00\x00\x00\x00\x00' -tRp839 -g11 -(g12 -S'K\x00\x00\x00\x00\x00\x00\x00' -tRp840 -g11 -(g12 -S'N\x00\x00\x00\x00\x00\x00\x00' -tRp841 -g11 -(g12 -S'Z\x00\x00\x00\x00\x00\x00\x00' -tRp842 -tp843 -a(g11 -(g12 -S'D\x00\x00\x00\x00\x00\x00\x00' -tRp844 -g11 -(g12 -S'K\x00\x00\x00\x00\x00\x00\x00' -tRp845 -g11 -(g12 -S'S\x00\x00\x00\x00\x00\x00\x00' -tRp846 -g11 -(g12 -S'Y\x00\x00\x00\x00\x00\x00\x00' -tRp847 -tp848 -a(g11 -(g12 -S'G\x00\x00\x00\x00\x00\x00\x00' -tRp849 -g11 -(g12 -S'K\x00\x00\x00\x00\x00\x00\x00' -tRp850 -g11 -(g12 -S'R\x00\x00\x00\x00\x00\x00\x00' -tRp851 -g11 -(g12 -S'W\x00\x00\x00\x00\x00\x00\x00' -tRp852 -tp853 -a(g11 -(g12 -S'F\x00\x00\x00\x00\x00\x00\x00' -tRp854 -g11 -(g12 -S'M\x00\x00\x00\x00\x00\x00\x00' -tRp855 -g11 -(g12 -S'R\x00\x00\x00\x00\x00\x00\x00' -tRp856 -g11 -(g12 -S'V\x00\x00\x00\x00\x00\x00\x00' -tRp857 -tp858 -a(g11 -(g12 -S'?\x00\x00\x00\x00\x00\x00\x00' -tRp859 -g11 -(g12 -S'N\x00\x00\x00\x00\x00\x00\x00' -tRp860 -g11 -(g12 -S'R\x00\x00\x00\x00\x00\x00\x00' -tRp861 -g11 -(g12 -S'W\x00\x00\x00\x00\x00\x00\x00' -tRp862 -tp863 -a(g11 -(g12 -S'?\x00\x00\x00\x00\x00\x00\x00' -tRp864 -g11 -(g12 -S'N\x00\x00\x00\x00\x00\x00\x00' -tRp865 -g11 -(g12 -S'R\x00\x00\x00\x00\x00\x00\x00' -tRp866 -g11 -(g12 -S'W\x00\x00\x00\x00\x00\x00\x00' -tRp867 -tp868 -a(g11 -(g12 -S'?\x00\x00\x00\x00\x00\x00\x00' -tRp869 -g11 -(g12 -S'N\x00\x00\x00\x00\x00\x00\x00' -tRp870 -g11 -(g12 -S'R\x00\x00\x00\x00\x00\x00\x00' -tRp871 -g11 -(g12 -S'W\x00\x00\x00\x00\x00\x00\x00' -tRp872 -tp873 -a(g11 -(g12 -S'B\x00\x00\x00\x00\x00\x00\x00' -tRp874 -g11 -(g12 -S'H\x00\x00\x00\x00\x00\x00\x00' -tRp875 -g11 -(g12 -S'K\x00\x00\x00\x00\x00\x00\x00' -tRp876 -g11 -(g12 -S'P\x00\x00\x00\x00\x00\x00\x00' -tRp877 -tp878 -a(g11 -(g12 -S'A\x00\x00\x00\x00\x00\x00\x00' -tRp879 -g11 -(g12 -S'I\x00\x00\x00\x00\x00\x00\x00' -tRp880 -g11 -(g12 -S'P\x00\x00\x00\x00\x00\x00\x00' -tRp881 -g11 -(g12 -S'U\x00\x00\x00\x00\x00\x00\x00' -tRp882 -tp883 -a(g11 -(g12 -S'?\x00\x00\x00\x00\x00\x00\x00' -tRp884 -g11 -(g12 -S'B\x00\x00\x00\x00\x00\x00\x00' -tRp885 -g11 -(g12 -S'R\x00\x00\x00\x00\x00\x00\x00' -tRp886 -g11 -(g12 -S'W\x00\x00\x00\x00\x00\x00\x00' -tRp887 -tp888 -a(g11 -(g12 -S'=\x00\x00\x00\x00\x00\x00\x00' -tRp889 -g11 -(g12 -S'D\x00\x00\x00\x00\x00\x00\x00' -tRp890 -g11 -(g12 -S'U\x00\x00\x00\x00\x00\x00\x00' -tRp891 -g11 -(g12 -S'Y\x00\x00\x00\x00\x00\x00\x00' -tRp892 -tp893 -a(g11 -(g12 -S'?\x00\x00\x00\x00\x00\x00\x00' -tRp894 -g11 -(g12 -S'N\x00\x00\x00\x00\x00\x00\x00' -tRp895 -g11 -(g12 -S'T\x00\x00\x00\x00\x00\x00\x00' -tRp896 -g11 -(g12 -S'Z\x00\x00\x00\x00\x00\x00\x00' -tRp897 -tp898 -a(g11 -(g12 -S'A\x00\x00\x00\x00\x00\x00\x00' -tRp899 -g11 -(g12 -S'N\x00\x00\x00\x00\x00\x00\x00' -tRp900 -g11 -(g12 -S'P\x00\x00\x00\x00\x00\x00\x00' -tRp901 -g11 -(g12 -S'\\\x00\x00\x00\x00\x00\x00\x00' -tRp902 -tp903 -a(g11 -(g12 -S'B\x00\x00\x00\x00\x00\x00\x00' -tRp904 -g11 -(g12 -S'K\x00\x00\x00\x00\x00\x00\x00' -tRp905 -g11 -(g12 -S'R\x00\x00\x00\x00\x00\x00\x00' -tRp906 -g11 -(g12 -S'Z\x00\x00\x00\x00\x00\x00\x00' -tRp907 -tp908 -a(g11 -(g12 -S'D\x00\x00\x00\x00\x00\x00\x00' -tRp909 -g11 -(g12 -S'P\x00\x00\x00\x00\x00\x00\x00' -tRp910 -g11 -(g12 -S'T\x00\x00\x00\x00\x00\x00\x00' -tRp911 -g11 -(g12 -S'W\x00\x00\x00\x00\x00\x00\x00' -tRp912 -tp913 -a(g11 -(g12 -S'F\x00\x00\x00\x00\x00\x00\x00' -tRp914 -g11 -(g12 -S'I\x00\x00\x00\x00\x00\x00\x00' -tRp915 -g11 -(g12 -S'U\x00\x00\x00\x00\x00\x00\x00' -tRp916 -g11 -(g12 -S'Z\x00\x00\x00\x00\x00\x00\x00' -tRp917 -tp918 -a(g11 -(g12 -S'D\x00\x00\x00\x00\x00\x00\x00' -tRp919 -g11 -(g12 -S'I\x00\x00\x00\x00\x00\x00\x00' -tRp920 -g11 -(g12 -S'S\x00\x00\x00\x00\x00\x00\x00' -tRp921 -g11 -(g12 -S'Y\x00\x00\x00\x00\x00\x00\x00' -tRp922 -tp923 -a(g11 -(g12 -S'B\x00\x00\x00\x00\x00\x00\x00' -tRp924 -g11 -(g12 -S'I\x00\x00\x00\x00\x00\x00\x00' -tRp925 -g11 -(g12 -S'R\x00\x00\x00\x00\x00\x00\x00' -tRp926 -g11 -(g12 -S'W\x00\x00\x00\x00\x00\x00\x00' -tRp927 -tp928 -a(g11 -(g12 -S'B\x00\x00\x00\x00\x00\x00\x00' -tRp929 -g11 -(g12 -S'H\x00\x00\x00\x00\x00\x00\x00' -tRp930 -g11 -(g12 -S'P\x00\x00\x00\x00\x00\x00\x00' -tRp931 -g11 -(g12 -S'\\\x00\x00\x00\x00\x00\x00\x00' -tRp932 -tp933 -a(g11 -(g12 -S'A\x00\x00\x00\x00\x00\x00\x00' -tRp934 -g11 -(g12 -S'I\x00\x00\x00\x00\x00\x00\x00' -tRp935 -g11 -(g12 -S'P\x00\x00\x00\x00\x00\x00\x00' -tRp936 -g11 -(g12 -S'U\x00\x00\x00\x00\x00\x00\x00' -tRp937 -tp938 -a(g11 -(g12 -S'D\x00\x00\x00\x00\x00\x00\x00' -tRp939 -g11 -(g12 -S'I\x00\x00\x00\x00\x00\x00\x00' -tRp940 -g11 -(g12 -S'P\x00\x00\x00\x00\x00\x00\x00' -tRp941 -g11 -(g12 -S'Y\x00\x00\x00\x00\x00\x00\x00' -tRp942 -tp943 -a(g11 -(g12 -S'8\x00\x00\x00\x00\x00\x00\x00' -tRp944 -g11 -(g12 -S'H\x00\x00\x00\x00\x00\x00\x00' -tRp945 -g11 -(g12 -S'N\x00\x00\x00\x00\x00\x00\x00' -tRp946 -g11 -(g12 -S'W\x00\x00\x00\x00\x00\x00\x00' -tRp947 -tp948 -a(g11 -(g12 -S'=\x00\x00\x00\x00\x00\x00\x00' -tRp949 -g11 -(g12 -S'D\x00\x00\x00\x00\x00\x00\x00' -tRp950 -g11 -(g12 -S'M\x00\x00\x00\x00\x00\x00\x00' -tRp951 -g11 -(g12 -S'U\x00\x00\x00\x00\x00\x00\x00' -tRp952 -tp953 -a(g11 -(g12 -S'=\x00\x00\x00\x00\x00\x00\x00' -tRp954 -g11 -(g12 -S'D\x00\x00\x00\x00\x00\x00\x00' -tRp955 -g11 -(g12 -S'M\x00\x00\x00\x00\x00\x00\x00' -tRp956 -g11 -(g12 -S'U\x00\x00\x00\x00\x00\x00\x00' -tRp957 -tp958 -aa(lp959 -(g11 -(g12 -S'<\x00\x00\x00\x00\x00\x00\x00' -tRp960 -g11 -(g12 -S'L\x00\x00\x00\x00\x00\x00\x00' -tRp961 -g11 -(g12 -S'T\x00\x00\x00\x00\x00\x00\x00' -tRp962 -g11 -(g12 -S'[\x00\x00\x00\x00\x00\x00\x00' -tRp963 -tp964 -a(g11 -(g12 -S'H\x00\x00\x00\x00\x00\x00\x00' -tRp965 -g11 -(g12 -S'L\x00\x00\x00\x00\x00\x00\x00' -tRp966 -g11 -(g12 -S'T\x00\x00\x00\x00\x00\x00\x00' -tRp967 -g11 -(g12 -S'[\x00\x00\x00\x00\x00\x00\x00' -tRp968 -tp969 -a(g11 -(g12 -S'L\x00\x00\x00\x00\x00\x00\x00' -tRp970 -g11 -(g12 -S'T\x00\x00\x00\x00\x00\x00\x00' -tRp971 -g11 -(g12 -S'[\x00\x00\x00\x00\x00\x00\x00' -tRp972 -tp973 -a(g11 -(g12 -S'A\x00\x00\x00\x00\x00\x00\x00' -tRp974 -g11 -(g12 -S'M\x00\x00\x00\x00\x00\x00\x00' -tRp975 -g11 -(g12 -S'T\x00\x00\x00\x00\x00\x00\x00' -tRp976 -g11 -(g12 -S']\x00\x00\x00\x00\x00\x00\x00' -tRp977 -tp978 -a(g11 -(g12 -S'E\x00\x00\x00\x00\x00\x00\x00' -tRp979 -g11 -(g12 -S'M\x00\x00\x00\x00\x00\x00\x00' -tRp980 -g11 -(g12 -S'T\x00\x00\x00\x00\x00\x00\x00' -tRp981 -g11 -(g12 -S']\x00\x00\x00\x00\x00\x00\x00' -tRp982 -tp983 -a(g11 -(g12 -S'H\x00\x00\x00\x00\x00\x00\x00' -tRp984 -g11 -(g12 -S'L\x00\x00\x00\x00\x00\x00\x00' -tRp985 -g11 -(g12 -S'T\x00\x00\x00\x00\x00\x00\x00' -tRp986 -g11 -(g12 -S'[\x00\x00\x00\x00\x00\x00\x00' -tRp987 -tp988 -a(g11 -(g12 -S'H\x00\x00\x00\x00\x00\x00\x00' -tRp989 -g11 -(g12 -S'L\x00\x00\x00\x00\x00\x00\x00' -tRp990 -g11 -(g12 -S'T\x00\x00\x00\x00\x00\x00\x00' -tRp991 -g11 -(g12 -S'[\x00\x00\x00\x00\x00\x00\x00' -tRp992 -tp993 -a(g11 -(g12 -S'H\x00\x00\x00\x00\x00\x00\x00' -tRp994 -g11 -(g12 -S'L\x00\x00\x00\x00\x00\x00\x00' -tRp995 -g11 -(g12 -S'T\x00\x00\x00\x00\x00\x00\x00' -tRp996 -g11 -(g12 -S'[\x00\x00\x00\x00\x00\x00\x00' -tRp997 -tp998 -a(g11 -(g12 -S'H\x00\x00\x00\x00\x00\x00\x00' -tRp999 -g11 -(g12 -S'Q\x00\x00\x00\x00\x00\x00\x00' -tRp1000 -g11 -(g12 -S'T\x00\x00\x00\x00\x00\x00\x00' -tRp1001 -g11 -(g12 -S'X\x00\x00\x00\x00\x00\x00\x00' -tRp1002 -tp1003 -a(g11 -(g12 -S'G\x00\x00\x00\x00\x00\x00\x00' -tRp1004 -g11 -(g12 -S'O\x00\x00\x00\x00\x00\x00\x00' -tRp1005 -g11 -(g12 -S'V\x00\x00\x00\x00\x00\x00\x00' -tRp1006 -g11 -(g12 -S'Y\x00\x00\x00\x00\x00\x00\x00' -tRp1007 -tp1008 -a(g11 -(g12 -S'H\x00\x00\x00\x00\x00\x00\x00' -tRp1009 -g11 -(g12 -S'O\x00\x00\x00\x00\x00\x00\x00' -tRp1010 -g11 -(g12 -S'T\x00\x00\x00\x00\x00\x00\x00' -tRp1011 -g11 -(g12 -S'X\x00\x00\x00\x00\x00\x00\x00' -tRp1012 -tp1013 -a(g11 -(g12 -S'C\x00\x00\x00\x00\x00\x00\x00' -tRp1014 -g11 -(g12 -S'O\x00\x00\x00\x00\x00\x00\x00' -tRp1015 -g11 -(g12 -S'S\x00\x00\x00\x00\x00\x00\x00' -tRp1016 -g11 -(g12 -S'V\x00\x00\x00\x00\x00\x00\x00' -tRp1017 -tp1018 -a(g11 -(g12 -S'@\x00\x00\x00\x00\x00\x00\x00' -tRp1019 -g11 -(g12 -S'O\x00\x00\x00\x00\x00\x00\x00' -tRp1020 -g11 -(g12 -S'T\x00\x00\x00\x00\x00\x00\x00' -tRp1021 -g11 -(g12 -S'X\x00\x00\x00\x00\x00\x00\x00' -tRp1022 -tp1023 -a(g11 -(g12 -S'C\x00\x00\x00\x00\x00\x00\x00' -tRp1024 -g11 -(g12 -S'O\x00\x00\x00\x00\x00\x00\x00' -tRp1025 -g11 -(g12 -S'T\x00\x00\x00\x00\x00\x00\x00' -tRp1026 -g11 -(g12 -S'V\x00\x00\x00\x00\x00\x00\x00' -tRp1027 -tp1028 -a(g11 -(g12 -S'C\x00\x00\x00\x00\x00\x00\x00' -tRp1029 -g11 -(g12 -S'M\x00\x00\x00\x00\x00\x00\x00' -tRp1030 -g11 -(g12 -S'S\x00\x00\x00\x00\x00\x00\x00' -tRp1031 -g11 -(g12 -S'V\x00\x00\x00\x00\x00\x00\x00' -tRp1032 -tp1033 -a(g11 -(g12 -S'<\x00\x00\x00\x00\x00\x00\x00' -tRp1034 -g11 -(g12 -S'L\x00\x00\x00\x00\x00\x00\x00' -tRp1035 -g11 -(g12 -S'O\x00\x00\x00\x00\x00\x00\x00' -tRp1036 -g11 -(g12 -S'T\x00\x00\x00\x00\x00\x00\x00' -tRp1037 -tp1038 -a(g11 -(g12 -S'C\x00\x00\x00\x00\x00\x00\x00' -tRp1039 -g11 -(g12 -S'J\x00\x00\x00\x00\x00\x00\x00' -tRp1040 -g11 -(g12 -S'S\x00\x00\x00\x00\x00\x00\x00' -tRp1041 -g11 -(g12 -S'V\x00\x00\x00\x00\x00\x00\x00' -tRp1042 -tp1043 -a(g11 -(g12 -S'C\x00\x00\x00\x00\x00\x00\x00' -tRp1044 -g11 -(g12 -S'G\x00\x00\x00\x00\x00\x00\x00' -tRp1045 -g11 -(g12 -S'O\x00\x00\x00\x00\x00\x00\x00' -tRp1046 -g11 -(g12 -S'V\x00\x00\x00\x00\x00\x00\x00' -tRp1047 -tp1048 -a(g11 -(g12 -S'G\x00\x00\x00\x00\x00\x00\x00' -tRp1049 -g11 -(g12 -S'J\x00\x00\x00\x00\x00\x00\x00' -tRp1050 -g11 -(g12 -S'O\x00\x00\x00\x00\x00\x00\x00' -tRp1051 -g11 -(g12 -S'V\x00\x00\x00\x00\x00\x00\x00' -tRp1052 -tp1053 -a(g11 -(g12 -S'<\x00\x00\x00\x00\x00\x00\x00' -tRp1054 -g11 -(g12 -S'J\x00\x00\x00\x00\x00\x00\x00' -tRp1055 -g11 -(g12 -S'O\x00\x00\x00\x00\x00\x00\x00' -tRp1056 -g11 -(g12 -S'X\x00\x00\x00\x00\x00\x00\x00' -tRp1057 -tp1058 -a(g11 -(g12 -S'@\x00\x00\x00\x00\x00\x00\x00' -tRp1059 -g11 -(g12 -S'H\x00\x00\x00\x00\x00\x00\x00' -tRp1060 -g11 -(g12 -S'O\x00\x00\x00\x00\x00\x00\x00' -tRp1061 -g11 -(g12 -S'X\x00\x00\x00\x00\x00\x00\x00' -tRp1062 -tp1063 -a(g11 -(g12 -S'7\x00\x00\x00\x00\x00\x00\x00' -tRp1064 -g11 -(g12 -S'G\x00\x00\x00\x00\x00\x00\x00' -tRp1065 -g11 -(g12 -S'O\x00\x00\x00\x00\x00\x00\x00' -tRp1066 -g11 -(g12 -S'V\x00\x00\x00\x00\x00\x00\x00' -tRp1067 -tp1068 -a(g11 -(g12 -S'7\x00\x00\x00\x00\x00\x00\x00' -tRp1069 -g11 -(g12 -S'G\x00\x00\x00\x00\x00\x00\x00' -tRp1070 -g11 -(g12 -S'O\x00\x00\x00\x00\x00\x00\x00' -tRp1071 -g11 -(g12 -S'V\x00\x00\x00\x00\x00\x00\x00' -tRp1072 -tp1073 -a(g11 -(g12 -S'7\x00\x00\x00\x00\x00\x00\x00' -tRp1074 -g11 -(g12 -S'G\x00\x00\x00\x00\x00\x00\x00' -tRp1075 -g11 -(g12 -S'O\x00\x00\x00\x00\x00\x00\x00' -tRp1076 -g11 -(g12 -S'V\x00\x00\x00\x00\x00\x00\x00' -tRp1077 -tp1078 -a(g11 -(g12 -S'C\x00\x00\x00\x00\x00\x00\x00' -tRp1079 -g11 -(g12 -S'O\x00\x00\x00\x00\x00\x00\x00' -tRp1080 -g11 -(g12 -S'S\x00\x00\x00\x00\x00\x00\x00' -tRp1081 -g11 -(g12 -S'V\x00\x00\x00\x00\x00\x00\x00' -tRp1082 -tp1083 -a(g11 -(g12 -S'H\x00\x00\x00\x00\x00\x00\x00' -tRp1084 -g11 -(g12 -S'O\x00\x00\x00\x00\x00\x00\x00' -tRp1085 -g11 -(g12 -S'T\x00\x00\x00\x00\x00\x00\x00' -tRp1086 -g11 -(g12 -S'X\x00\x00\x00\x00\x00\x00\x00' -tRp1087 -tp1088 -a(g11 -(g12 -S'L\x00\x00\x00\x00\x00\x00\x00' -tRp1089 -g11 -(g12 -S'O\x00\x00\x00\x00\x00\x00\x00' -tRp1090 -g11 -(g12 -S'S\x00\x00\x00\x00\x00\x00\x00' -tRp1091 -g11 -(g12 -S'[\x00\x00\x00\x00\x00\x00\x00' -tRp1092 -tp1093 -a(g11 -(g12 -S'H\x00\x00\x00\x00\x00\x00\x00' -tRp1094 -g11 -(g12 -S'L\x00\x00\x00\x00\x00\x00\x00' -tRp1095 -g11 -(g12 -S'Q\x00\x00\x00\x00\x00\x00\x00' -tRp1096 -g11 -(g12 -S'[\x00\x00\x00\x00\x00\x00\x00' -tRp1097 -tp1098 -a(g11 -(g12 -S'J\x00\x00\x00\x00\x00\x00\x00' -tRp1099 -g11 -(g12 -S'Q\x00\x00\x00\x00\x00\x00\x00' -tRp1100 -g11 -(g12 -S'Z\x00\x00\x00\x00\x00\x00\x00' -tRp1101 -tp1102 -a(g11 -(g12 -S'C\x00\x00\x00\x00\x00\x00\x00' -tRp1103 -g11 -(g12 -S'J\x00\x00\x00\x00\x00\x00\x00' -tRp1104 -g11 -(g12 -S'S\x00\x00\x00\x00\x00\x00\x00' -tRp1105 -g11 -(g12 -S'[\x00\x00\x00\x00\x00\x00\x00' -tRp1106 -tp1107 -a(g11 -(g12 -S'C\x00\x00\x00\x00\x00\x00\x00' -tRp1108 -g11 -(g12 -S'J\x00\x00\x00\x00\x00\x00\x00' -tRp1109 -g11 -(g12 -S'S\x00\x00\x00\x00\x00\x00\x00' -tRp1110 -g11 -(g12 -S'[\x00\x00\x00\x00\x00\x00\x00' -tRp1111 -tp1112 -a(g11 -(g12 -S'C\x00\x00\x00\x00\x00\x00\x00' -tRp1113 -g11 -(g12 -S'J\x00\x00\x00\x00\x00\x00\x00' -tRp1114 -g11 -(g12 -S'S\x00\x00\x00\x00\x00\x00\x00' -tRp1115 -g11 -(g12 -S'[\x00\x00\x00\x00\x00\x00\x00' -tRp1116 -tp1117 -a(g11 -(g12 -S'<\x00\x00\x00\x00\x00\x00\x00' -tRp1118 -g11 -(g12 -S'L\x00\x00\x00\x00\x00\x00\x00' -tRp1119 -g11 -(g12 -S'T\x00\x00\x00\x00\x00\x00\x00' -tRp1120 -g11 -(g12 -S'[\x00\x00\x00\x00\x00\x00\x00' -tRp1121 -tp1122 -a(g11 -(g12 -S'A\x00\x00\x00\x00\x00\x00\x00' -tRp1123 -g11 -(g12 -S'M\x00\x00\x00\x00\x00\x00\x00' -tRp1124 -g11 -(g12 -S'T\x00\x00\x00\x00\x00\x00\x00' -tRp1125 -g11 -(g12 -S']\x00\x00\x00\x00\x00\x00\x00' -tRp1126 -tp1127 -a(g11 -(g12 -S'>\x00\x00\x00\x00\x00\x00\x00' -tRp1128 -g11 -(g12 -S'L\x00\x00\x00\x00\x00\x00\x00' -tRp1129 -g11 -(g12 -S'X\x00\x00\x00\x00\x00\x00\x00' -tRp1130 -g11 -(g12 -S'[\x00\x00\x00\x00\x00\x00\x00' -tRp1131 -tp1132 -a(g11 -(g12 -S'>\x00\x00\x00\x00\x00\x00\x00' -tRp1133 -g11 -(g12 -S'Q\x00\x00\x00\x00\x00\x00\x00' -tRp1134 -g11 -(g12 -S'X\x00\x00\x00\x00\x00\x00\x00' -tRp1135 -g11 -(g12 -S'Y\x00\x00\x00\x00\x00\x00\x00' -tRp1136 -tp1137 -a(g11 -(g12 -S'C\x00\x00\x00\x00\x00\x00\x00' -tRp1138 -g11 -(g12 -S'R\x00\x00\x00\x00\x00\x00\x00' -tRp1139 -g11 -(g12 -S'W\x00\x00\x00\x00\x00\x00\x00' -tRp1140 -g11 -(g12 -S'X\x00\x00\x00\x00\x00\x00\x00' -tRp1141 -tp1142 -a(g11 -(g12 -S'>\x00\x00\x00\x00\x00\x00\x00' -tRp1143 -g11 -(g12 -S'Q\x00\x00\x00\x00\x00\x00\x00' -tRp1144 -g11 -(g12 -S'V\x00\x00\x00\x00\x00\x00\x00' -tRp1145 -g11 -(g12 -S'Y\x00\x00\x00\x00\x00\x00\x00' -tRp1146 -tp1147 -a(g11 -(g12 -S'>\x00\x00\x00\x00\x00\x00\x00' -tRp1148 -g11 -(g12 -S'Q\x00\x00\x00\x00\x00\x00\x00' -tRp1149 -g11 -(g12 -S'V\x00\x00\x00\x00\x00\x00\x00' -tRp1150 -g11 -(g12 -S'Y\x00\x00\x00\x00\x00\x00\x00' -tRp1151 -tp1152 -a(g11 -(g12 -S'>\x00\x00\x00\x00\x00\x00\x00' -tRp1153 -g11 -(g12 -S'Q\x00\x00\x00\x00\x00\x00\x00' -tRp1154 -g11 -(g12 -S'V\x00\x00\x00\x00\x00\x00\x00' -tRp1155 -g11 -(g12 -S'Y\x00\x00\x00\x00\x00\x00\x00' -tRp1156 -tp1157 -a(g11 -(g12 -S'@\x00\x00\x00\x00\x00\x00\x00' -tRp1158 -g11 -(g12 -S'O\x00\x00\x00\x00\x00\x00\x00' -tRp1159 -g11 -(g12 -S'T\x00\x00\x00\x00\x00\x00\x00' -tRp1160 -g11 -(g12 -S'X\x00\x00\x00\x00\x00\x00\x00' -tRp1161 -tp1162 -a(g11 -(g12 -S'C\x00\x00\x00\x00\x00\x00\x00' -tRp1163 -g11 -(g12 -S'O\x00\x00\x00\x00\x00\x00\x00' -tRp1164 -g11 -(g12 -S'S\x00\x00\x00\x00\x00\x00\x00' -tRp1165 -g11 -(g12 -S'V\x00\x00\x00\x00\x00\x00\x00' -tRp1166 -tp1167 -a(g11 -(g12 -S'H\x00\x00\x00\x00\x00\x00\x00' -tRp1168 -g11 -(g12 -S'O\x00\x00\x00\x00\x00\x00\x00' -tRp1169 -g11 -(g12 -S'T\x00\x00\x00\x00\x00\x00\x00' -tRp1170 -g11 -(g12 -S'X\x00\x00\x00\x00\x00\x00\x00' -tRp1171 -tp1172 -a(g11 -(g12 -S'A\x00\x00\x00\x00\x00\x00\x00' -tRp1173 -g11 -(g12 -S'Q\x00\x00\x00\x00\x00\x00\x00' -tRp1174 -g11 -(g12 -S'T\x00\x00\x00\x00\x00\x00\x00' -tRp1175 -g11 -(g12 -S'V\x00\x00\x00\x00\x00\x00\x00' -tRp1176 -tp1177 -a(g11 -(g12 -S'C\x00\x00\x00\x00\x00\x00\x00' -tRp1178 -g11 -(g12 -S'J\x00\x00\x00\x00\x00\x00\x00' -tRp1179 -g11 -(g12 -S'S\x00\x00\x00\x00\x00\x00\x00' -tRp1180 -g11 -(g12 -S'V\x00\x00\x00\x00\x00\x00\x00' -tRp1181 -tp1182 -a(g11 -(g12 -S'<\x00\x00\x00\x00\x00\x00\x00' -tRp1183 -g11 -(g12 -S'L\x00\x00\x00\x00\x00\x00\x00' -tRp1184 -g11 -(g12 -S'O\x00\x00\x00\x00\x00\x00\x00' -tRp1185 -g11 -(g12 -S'T\x00\x00\x00\x00\x00\x00\x00' -tRp1186 -tp1187 -a(g11 -(g12 -S'<\x00\x00\x00\x00\x00\x00\x00' -tRp1188 -g11 -(g12 -S'L\x00\x00\x00\x00\x00\x00\x00' -tRp1189 -g11 -(g12 -S'O\x00\x00\x00\x00\x00\x00\x00' -tRp1190 -g11 -(g12 -S'T\x00\x00\x00\x00\x00\x00\x00' -tRp1191 -tp1192 -a(g11 -(g12 -S'<\x00\x00\x00\x00\x00\x00\x00' -tRp1193 -g11 -(g12 -S'L\x00\x00\x00\x00\x00\x00\x00' -tRp1194 -g11 -(g12 -S'O\x00\x00\x00\x00\x00\x00\x00' -tRp1195 -g11 -(g12 -S'T\x00\x00\x00\x00\x00\x00\x00' -tRp1196 -tp1197 -a(g11 -(g12 -S'<\x00\x00\x00\x00\x00\x00\x00' -tRp1198 -g11 -(g12 -S'L\x00\x00\x00\x00\x00\x00\x00' -tRp1199 -g11 -(g12 -S'O\x00\x00\x00\x00\x00\x00\x00' -tRp1200 -g11 -(g12 -S'T\x00\x00\x00\x00\x00\x00\x00' -tRp1201 -tp1202 -aa(lp1203 -(g11 -(g12 -S';\x00\x00\x00\x00\x00\x00\x00' -tRp1204 -g11 -(g12 -S'C\x00\x00\x00\x00\x00\x00\x00' -tRp1205 -g11 -(g12 -S'J\x00\x00\x00\x00\x00\x00\x00' -tRp1206 -g11 -(g12 -S'O\x00\x00\x00\x00\x00\x00\x00' -tRp1207 -tp1208 -a(g11 -(g12 -S'<\x00\x00\x00\x00\x00\x00\x00' -tRp1209 -g11 -(g12 -S'C\x00\x00\x00\x00\x00\x00\x00' -tRp1210 -g11 -(g12 -S'H\x00\x00\x00\x00\x00\x00\x00' -tRp1211 -g11 -(g12 -S'L\x00\x00\x00\x00\x00\x00\x00' -tRp1212 -tp1213 -a(g11 -(g12 -S'>\x00\x00\x00\x00\x00\x00\x00' -tRp1214 -g11 -(g12 -S'A\x00\x00\x00\x00\x00\x00\x00' -tRp1215 -g11 -(g12 -S'G\x00\x00\x00\x00\x00\x00\x00' -tRp1216 -g11 -(g12 -S'J\x00\x00\x00\x00\x00\x00\x00' -tRp1217 -tp1218 -a(g11 -(g12 -S'@\x00\x00\x00\x00\x00\x00\x00' -tRp1219 -g11 -(g12 -S'C\x00\x00\x00\x00\x00\x00\x00' -tRp1220 -g11 -(g12 -S'H\x00\x00\x00\x00\x00\x00\x00' -tRp1221 -g11 -(g12 -S'O\x00\x00\x00\x00\x00\x00\x00' -tRp1222 -tp1223 -a(g11 -(g12 -S'<\x00\x00\x00\x00\x00\x00\x00' -tRp1224 -g11 -(g12 -S'H\x00\x00\x00\x00\x00\x00\x00' -tRp1225 -g11 -(g12 -S'L\x00\x00\x00\x00\x00\x00\x00' -tRp1226 -g11 -(g12 -S'O\x00\x00\x00\x00\x00\x00\x00' -tRp1227 -tp1228 -a(g11 -(g12 -S'A\x00\x00\x00\x00\x00\x00\x00' -tRp1229 -g11 -(g12 -S'H\x00\x00\x00\x00\x00\x00\x00' -tRp1230 -g11 -(g12 -S'M\x00\x00\x00\x00\x00\x00\x00' -tRp1231 -g11 -(g12 -S'Q\x00\x00\x00\x00\x00\x00\x00' -tRp1232 -tp1233 -a(g11 -(g12 -S'A\x00\x00\x00\x00\x00\x00\x00' -tRp1234 -g11 -(g12 -S'H\x00\x00\x00\x00\x00\x00\x00' -tRp1235 -g11 -(g12 -S'M\x00\x00\x00\x00\x00\x00\x00' -tRp1236 -g11 -(g12 -S'Q\x00\x00\x00\x00\x00\x00\x00' -tRp1237 -tp1238 -a(g11 -(g12 -S'A\x00\x00\x00\x00\x00\x00\x00' -tRp1239 -g11 -(g12 -S'H\x00\x00\x00\x00\x00\x00\x00' -tRp1240 -g11 -(g12 -S'M\x00\x00\x00\x00\x00\x00\x00' -tRp1241 -g11 -(g12 -S'Q\x00\x00\x00\x00\x00\x00\x00' -tRp1242 -tp1243 -a(g11 -(g12 -S'B\x00\x00\x00\x00\x00\x00\x00' -tRp1244 -g11 -(g12 -S'H\x00\x00\x00\x00\x00\x00\x00' -tRp1245 -g11 -(g12 -S'J\x00\x00\x00\x00\x00\x00\x00' -tRp1246 -g11 -(g12 -S'Q\x00\x00\x00\x00\x00\x00\x00' -tRp1247 -tp1248 -a(g11 -(g12 -S'C\x00\x00\x00\x00\x00\x00\x00' -tRp1249 -g11 -(g12 -S'G\x00\x00\x00\x00\x00\x00\x00' -tRp1250 -g11 -(g12 -S'J\x00\x00\x00\x00\x00\x00\x00' -tRp1251 -tp1252 -a(g11 -(g12 -S'C\x00\x00\x00\x00\x00\x00\x00' -tRp1253 -g11 -(g12 -S'G\x00\x00\x00\x00\x00\x00\x00' -tRp1254 -g11 -(g12 -S'J\x00\x00\x00\x00\x00\x00\x00' -tRp1255 -tp1256 -a(g11 -(g12 -S'@\x00\x00\x00\x00\x00\x00\x00' -tRp1257 -g11 -(g12 -S'C\x00\x00\x00\x00\x00\x00\x00' -tRp1258 -g11 -(g12 -S'H\x00\x00\x00\x00\x00\x00\x00' -tRp1259 -g11 -(g12 -S'O\x00\x00\x00\x00\x00\x00\x00' -tRp1260 -tp1261 -a(g11 -(g12 -S'<\x00\x00\x00\x00\x00\x00\x00' -tRp1262 -g11 -(g12 -S'C\x00\x00\x00\x00\x00\x00\x00' -tRp1263 -g11 -(g12 -S'J\x00\x00\x00\x00\x00\x00\x00' -tRp1264 -g11 -(g12 -S'M\x00\x00\x00\x00\x00\x00\x00' -tRp1265 -tp1266 -a(g11 -(g12 -S'<\x00\x00\x00\x00\x00\x00\x00' -tRp1267 -g11 -(g12 -S'C\x00\x00\x00\x00\x00\x00\x00' -tRp1268 -g11 -(g12 -S'J\x00\x00\x00\x00\x00\x00\x00' -tRp1269 -g11 -(g12 -S'L\x00\x00\x00\x00\x00\x00\x00' -tRp1270 -tp1271 -a(g11 -(g12 -S'7\x00\x00\x00\x00\x00\x00\x00' -tRp1272 -g11 -(g12 -S'C\x00\x00\x00\x00\x00\x00\x00' -tRp1273 -g11 -(g12 -S'J\x00\x00\x00\x00\x00\x00\x00' -tRp1274 -tp1275 -a(g11 -(g12 -S'<\x00\x00\x00\x00\x00\x00\x00' -tRp1276 -g11 -(g12 -S'@\x00\x00\x00\x00\x00\x00\x00' -tRp1277 -g11 -(g12 -S'C\x00\x00\x00\x00\x00\x00\x00' -tRp1278 -g11 -(g12 -S'H\x00\x00\x00\x00\x00\x00\x00' -tRp1279 -tp1280 -a(g11 -(g12 -S'@\x00\x00\x00\x00\x00\x00\x00' -tRp1281 -g11 -(g12 -S'C\x00\x00\x00\x00\x00\x00\x00' -tRp1282 -g11 -(g12 -S'H\x00\x00\x00\x00\x00\x00\x00' -tRp1283 -g11 -(g12 -S'O\x00\x00\x00\x00\x00\x00\x00' -tRp1284 -tp1285 -a(g11 -(g12 -S'A\x00\x00\x00\x00\x00\x00\x00' -tRp1286 -g11 -(g12 -S'H\x00\x00\x00\x00\x00\x00\x00' -tRp1287 -g11 -(g12 -S'Q\x00\x00\x00\x00\x00\x00\x00' -tRp1288 -tp1289 -a(g11 -(g12 -S'>\x00\x00\x00\x00\x00\x00\x00' -tRp1290 -g11 -(g12 -S'E\x00\x00\x00\x00\x00\x00\x00' -tRp1291 -g11 -(g12 -S'M\x00\x00\x00\x00\x00\x00\x00' -tRp1292 -g11 -(g12 -S'Q\x00\x00\x00\x00\x00\x00\x00' -tRp1293 -tp1294 -a(g11 -(g12 -S'C\x00\x00\x00\x00\x00\x00\x00' -tRp1295 -g11 -(g12 -S'G\x00\x00\x00\x00\x00\x00\x00' -tRp1296 -g11 -(g12 -S'M\x00\x00\x00\x00\x00\x00\x00' -tRp1297 -g11 -(g12 -S'O\x00\x00\x00\x00\x00\x00\x00' -tRp1298 -tp1299 -a(g11 -(g12 -S'7\x00\x00\x00\x00\x00\x00\x00' -tRp1300 -g11 -(g12 -S'H\x00\x00\x00\x00\x00\x00\x00' -tRp1301 -g11 -(g12 -S'J\x00\x00\x00\x00\x00\x00\x00' -tRp1302 -g11 -(g12 -S'M\x00\x00\x00\x00\x00\x00\x00' -tRp1303 -tp1304 -a(g11 -(g12 -S'<\x00\x00\x00\x00\x00\x00\x00' -tRp1305 -g11 -(g12 -S'C\x00\x00\x00\x00\x00\x00\x00' -tRp1306 -g11 -(g12 -S'H\x00\x00\x00\x00\x00\x00\x00' -tRp1307 -g11 -(g12 -S'L\x00\x00\x00\x00\x00\x00\x00' -tRp1308 -tp1309 -a(g11 -(g12 -S'<\x00\x00\x00\x00\x00\x00\x00' -tRp1310 -g11 -(g12 -S'C\x00\x00\x00\x00\x00\x00\x00' -tRp1311 -g11 -(g12 -S'H\x00\x00\x00\x00\x00\x00\x00' -tRp1312 -g11 -(g12 -S'L\x00\x00\x00\x00\x00\x00\x00' -tRp1313 -tp1314 -a(g11 -(g12 -S'<\x00\x00\x00\x00\x00\x00\x00' -tRp1315 -g11 -(g12 -S'C\x00\x00\x00\x00\x00\x00\x00' -tRp1316 -g11 -(g12 -S'H\x00\x00\x00\x00\x00\x00\x00' -tRp1317 -g11 -(g12 -S'L\x00\x00\x00\x00\x00\x00\x00' -tRp1318 -tp1319 -a(g11 -(g12 -S';\x00\x00\x00\x00\x00\x00\x00' -tRp1320 -g11 -(g12 -S'C\x00\x00\x00\x00\x00\x00\x00' -tRp1321 -g11 -(g12 -S'J\x00\x00\x00\x00\x00\x00\x00' -tRp1322 -tp1323 -a(g11 -(g12 -S'<\x00\x00\x00\x00\x00\x00\x00' -tRp1324 -g11 -(g12 -S'C\x00\x00\x00\x00\x00\x00\x00' -tRp1325 -g11 -(g12 -S'H\x00\x00\x00\x00\x00\x00\x00' -tRp1326 -g11 -(g12 -S'L\x00\x00\x00\x00\x00\x00\x00' -tRp1327 -tp1328 -a(g11 -(g12 -S';\x00\x00\x00\x00\x00\x00\x00' -tRp1329 -g11 -(g12 -S'E\x00\x00\x00\x00\x00\x00\x00' -tRp1330 -g11 -(g12 -S'H\x00\x00\x00\x00\x00\x00\x00' -tRp1331 -g11 -(g12 -S'N\x00\x00\x00\x00\x00\x00\x00' -tRp1332 -tp1333 -a(g11 -(g12 -S'@\x00\x00\x00\x00\x00\x00\x00' -tRp1334 -g11 -(g12 -S'C\x00\x00\x00\x00\x00\x00\x00' -tRp1335 -g11 -(g12 -S'G\x00\x00\x00\x00\x00\x00\x00' -tRp1336 -g11 -(g12 -S'O\x00\x00\x00\x00\x00\x00\x00' -tRp1337 -tp1338 -a(g11 -(g12 -S'>\x00\x00\x00\x00\x00\x00\x00' -tRp1339 -g11 -(g12 -S'C\x00\x00\x00\x00\x00\x00\x00' -tRp1340 -g11 -(g12 -S'L\x00\x00\x00\x00\x00\x00\x00' -tRp1341 -g11 -(g12 -S'Q\x00\x00\x00\x00\x00\x00\x00' -tRp1342 -tp1343 -a(g11 -(g12 -S'>\x00\x00\x00\x00\x00\x00\x00' -tRp1344 -g11 -(g12 -S'C\x00\x00\x00\x00\x00\x00\x00' -tRp1345 -g11 -(g12 -S'J\x00\x00\x00\x00\x00\x00\x00' -tRp1346 -g11 -(g12 -S'S\x00\x00\x00\x00\x00\x00\x00' -tRp1347 -tp1348 -a(g11 -(g12 -S'>\x00\x00\x00\x00\x00\x00\x00' -tRp1349 -g11 -(g12 -S'B\x00\x00\x00\x00\x00\x00\x00' -tRp1350 -g11 -(g12 -S'J\x00\x00\x00\x00\x00\x00\x00' -tRp1351 -g11 -(g12 -S'Q\x00\x00\x00\x00\x00\x00\x00' -tRp1352 -tp1353 -a(g11 -(g12 -S'7\x00\x00\x00\x00\x00\x00\x00' -tRp1354 -g11 -(g12 -S'G\x00\x00\x00\x00\x00\x00\x00' -tRp1355 -g11 -(g12 -S'J\x00\x00\x00\x00\x00\x00\x00' -tRp1356 -g11 -(g12 -S'O\x00\x00\x00\x00\x00\x00\x00' -tRp1357 -tp1358 -a(g11 -(g12 -S'C\x00\x00\x00\x00\x00\x00\x00' -tRp1359 -g11 -(g12 -S'G\x00\x00\x00\x00\x00\x00\x00' -tRp1360 -g11 -(g12 -S'J\x00\x00\x00\x00\x00\x00\x00' -tRp1361 -g11 -(g12 -S'O\x00\x00\x00\x00\x00\x00\x00' -tRp1362 -tp1363 -a(g11 -(g12 -S'H\x00\x00\x00\x00\x00\x00\x00' -tRp1364 -g11 -(g12 -S'L\x00\x00\x00\x00\x00\x00\x00' -tRp1365 -g11 -(g12 -S'O\x00\x00\x00\x00\x00\x00\x00' -tRp1366 -tp1367 -a(g11 -(g12 -S'<\x00\x00\x00\x00\x00\x00\x00' -tRp1368 -g11 -(g12 -S'H\x00\x00\x00\x00\x00\x00\x00' -tRp1369 -g11 -(g12 -S'L\x00\x00\x00\x00\x00\x00\x00' -tRp1370 -g11 -(g12 -S'O\x00\x00\x00\x00\x00\x00\x00' -tRp1371 -tp1372 -a(g11 -(g12 -S'@\x00\x00\x00\x00\x00\x00\x00' -tRp1373 -g11 -(g12 -S'C\x00\x00\x00\x00\x00\x00\x00' -tRp1374 -g11 -(g12 -S'O\x00\x00\x00\x00\x00\x00\x00' -tRp1375 -g11 -(g12 -S'T\x00\x00\x00\x00\x00\x00\x00' -tRp1376 -tp1377 -a(g11 -(g12 -S'<\x00\x00\x00\x00\x00\x00\x00' -tRp1378 -g11 -(g12 -S'H\x00\x00\x00\x00\x00\x00\x00' -tRp1379 -g11 -(g12 -S'L\x00\x00\x00\x00\x00\x00\x00' -tRp1380 -g11 -(g12 -S'R\x00\x00\x00\x00\x00\x00\x00' -tRp1381 -tp1382 -a(g11 -(g12 -S'A\x00\x00\x00\x00\x00\x00\x00' -tRp1383 -g11 -(g12 -S'H\x00\x00\x00\x00\x00\x00\x00' -tRp1384 -g11 -(g12 -S'M\x00\x00\x00\x00\x00\x00\x00' -tRp1385 -g11 -(g12 -S'Q\x00\x00\x00\x00\x00\x00\x00' -tRp1386 -tp1387 -a(g11 -(g12 -S'A\x00\x00\x00\x00\x00\x00\x00' -tRp1388 -g11 -(g12 -S'H\x00\x00\x00\x00\x00\x00\x00' -tRp1389 -g11 -(g12 -S'M\x00\x00\x00\x00\x00\x00\x00' -tRp1390 -g11 -(g12 -S'Q\x00\x00\x00\x00\x00\x00\x00' -tRp1391 -tp1392 -a(g11 -(g12 -S'A\x00\x00\x00\x00\x00\x00\x00' -tRp1393 -g11 -(g12 -S'H\x00\x00\x00\x00\x00\x00\x00' -tRp1394 -g11 -(g12 -S'M\x00\x00\x00\x00\x00\x00\x00' -tRp1395 -g11 -(g12 -S'Q\x00\x00\x00\x00\x00\x00\x00' -tRp1396 -tp1397 -a(g11 -(g12 -S'=\x00\x00\x00\x00\x00\x00\x00' -tRp1398 -g11 -(g12 -S'L\x00\x00\x00\x00\x00\x00\x00' -tRp1399 -g11 -(g12 -S'O\x00\x00\x00\x00\x00\x00\x00' -tRp1400 -g11 -(g12 -S'Q\x00\x00\x00\x00\x00\x00\x00' -tRp1401 -tp1402 -a(g11 -(g12 -S'>\x00\x00\x00\x00\x00\x00\x00' -tRp1403 -g11 -(g12 -S'J\x00\x00\x00\x00\x00\x00\x00' -tRp1404 -g11 -(g12 -S'O\x00\x00\x00\x00\x00\x00\x00' -tRp1405 -g11 -(g12 -S'Q\x00\x00\x00\x00\x00\x00\x00' -tRp1406 -tp1407 -a(g11 -(g12 -S'>\x00\x00\x00\x00\x00\x00\x00' -tRp1408 -g11 -(g12 -S'E\x00\x00\x00\x00\x00\x00\x00' -tRp1409 -g11 -(g12 -S'N\x00\x00\x00\x00\x00\x00\x00' -tRp1410 -g11 -(g12 -S'Q\x00\x00\x00\x00\x00\x00\x00' -tRp1411 -tp1412 -a(g11 -(g12 -S';\x00\x00\x00\x00\x00\x00\x00' -tRp1413 -g11 -(g12 -S'E\x00\x00\x00\x00\x00\x00\x00' -tRp1414 -g11 -(g12 -S'N\x00\x00\x00\x00\x00\x00\x00' -tRp1415 -g11 -(g12 -S'V\x00\x00\x00\x00\x00\x00\x00' -tRp1416 -tp1417 -a(g11 -(g12 -S'>\x00\x00\x00\x00\x00\x00\x00' -tRp1418 -g11 -(g12 -S'C\x00\x00\x00\x00\x00\x00\x00' -tRp1419 -g11 -(g12 -S'L\x00\x00\x00\x00\x00\x00\x00' -tRp1420 -g11 -(g12 -S'T\x00\x00\x00\x00\x00\x00\x00' -tRp1421 -tp1422 -a(g11 -(g12 -S'7\x00\x00\x00\x00\x00\x00\x00' -tRp1423 -g11 -(g12 -S'C\x00\x00\x00\x00\x00\x00\x00' -tRp1424 -g11 -(g12 -S'J\x00\x00\x00\x00\x00\x00\x00' -tRp1425 -g11 -(g12 -S'S\x00\x00\x00\x00\x00\x00\x00' -tRp1426 -tp1427 -a(g11 -(g12 -S'7\x00\x00\x00\x00\x00\x00\x00' -tRp1428 -g11 -(g12 -S'C\x00\x00\x00\x00\x00\x00\x00' -tRp1429 -g11 -(g12 -S'J\x00\x00\x00\x00\x00\x00\x00' -tRp1430 -g11 -(g12 -S'S\x00\x00\x00\x00\x00\x00\x00' -tRp1431 -tp1432 -a(g11 -(g12 -S'7\x00\x00\x00\x00\x00\x00\x00' -tRp1433 -g11 -(g12 -S'C\x00\x00\x00\x00\x00\x00\x00' -tRp1434 -g11 -(g12 -S'J\x00\x00\x00\x00\x00\x00\x00' -tRp1435 -g11 -(g12 -S'S\x00\x00\x00\x00\x00\x00\x00' -tRp1436 -tp1437 -a(g11 -(g12 -S'@\x00\x00\x00\x00\x00\x00\x00' -tRp1438 -g11 -(g12 -S'C\x00\x00\x00\x00\x00\x00\x00' -tRp1439 -g11 -(g12 -S'H\x00\x00\x00\x00\x00\x00\x00' -tRp1440 -g11 -(g12 -S'O\x00\x00\x00\x00\x00\x00\x00' -tRp1441 -tp1442 -a(g11 -(g12 -S'A\x00\x00\x00\x00\x00\x00\x00' -tRp1443 -g11 -(g12 -S'H\x00\x00\x00\x00\x00\x00\x00' -tRp1444 -g11 -(g12 -S'Q\x00\x00\x00\x00\x00\x00\x00' -tRp1445 -tp1446 -a(g11 -(g12 -S'@\x00\x00\x00\x00\x00\x00\x00' -tRp1447 -g11 -(g12 -S'C\x00\x00\x00\x00\x00\x00\x00' -tRp1448 -g11 -(g12 -S'H\x00\x00\x00\x00\x00\x00\x00' -tRp1449 -g11 -(g12 -S'O\x00\x00\x00\x00\x00\x00\x00' -tRp1450 -tp1451 -a(g11 -(g12 -S'>\x00\x00\x00\x00\x00\x00\x00' -tRp1452 -g11 -(g12 -S'J\x00\x00\x00\x00\x00\x00\x00' -tRp1453 -g11 -(g12 -S'M\x00\x00\x00\x00\x00\x00\x00' -tRp1454 -g11 -(g12 -S'Q\x00\x00\x00\x00\x00\x00\x00' -tRp1455 -tp1456 -a(g11 -(g12 -S'8\x00\x00\x00\x00\x00\x00\x00' -tRp1457 -g11 -(g12 -S'J\x00\x00\x00\x00\x00\x00\x00' -tRp1458 -g11 -(g12 -S'M\x00\x00\x00\x00\x00\x00\x00' -tRp1459 -g11 -(g12 -S'S\x00\x00\x00\x00\x00\x00\x00' -tRp1460 -tp1461 -a(g11 -(g12 -S'9\x00\x00\x00\x00\x00\x00\x00' -tRp1462 -g11 -(g12 -S'H\x00\x00\x00\x00\x00\x00\x00' -tRp1463 -g11 -(g12 -S'L\x00\x00\x00\x00\x00\x00\x00' -tRp1464 -g11 -(g12 -S'T\x00\x00\x00\x00\x00\x00\x00' -tRp1465 -tp1466 -a(g11 -(g12 -S'9\x00\x00\x00\x00\x00\x00\x00' -tRp1467 -g11 -(g12 -S'H\x00\x00\x00\x00\x00\x00\x00' -tRp1468 -g11 -(g12 -S'L\x00\x00\x00\x00\x00\x00\x00' -tRp1469 -g11 -(g12 -S'T\x00\x00\x00\x00\x00\x00\x00' -tRp1470 -tp1471 -a(g11 -(g12 -S'9\x00\x00\x00\x00\x00\x00\x00' -tRp1472 -g11 -(g12 -S'H\x00\x00\x00\x00\x00\x00\x00' -tRp1473 -g11 -(g12 -S'L\x00\x00\x00\x00\x00\x00\x00' -tRp1474 -g11 -(g12 -S'T\x00\x00\x00\x00\x00\x00\x00' -tRp1475 -tp1476 -a(g11 -(g12 -S';\x00\x00\x00\x00\x00\x00\x00' -tRp1477 -g11 -(g12 -S'J\x00\x00\x00\x00\x00\x00\x00' -tRp1478 -g11 -(g12 -S'O\x00\x00\x00\x00\x00\x00\x00' -tRp1479 -tp1480 -a(g11 -(g12 -S'<\x00\x00\x00\x00\x00\x00\x00' -tRp1481 -g11 -(g12 -S'C\x00\x00\x00\x00\x00\x00\x00' -tRp1482 -g11 -(g12 -S'L\x00\x00\x00\x00\x00\x00\x00' -tRp1483 -g11 -(g12 -S'O\x00\x00\x00\x00\x00\x00\x00' -tRp1484 -tp1485 -a(g11 -(g12 -S'9\x00\x00\x00\x00\x00\x00\x00' -tRp1486 -g11 -(g12 -S'E\x00\x00\x00\x00\x00\x00\x00' -tRp1487 -g11 -(g12 -S'H\x00\x00\x00\x00\x00\x00\x00' -tRp1488 -g11 -(g12 -S'M\x00\x00\x00\x00\x00\x00\x00' -tRp1489 -tp1490 -a(g11 -(g12 -S'5\x00\x00\x00\x00\x00\x00\x00' -tRp1491 -g11 -(g12 -S'E\x00\x00\x00\x00\x00\x00\x00' -tRp1492 -g11 -(g12 -S'H\x00\x00\x00\x00\x00\x00\x00' -tRp1493 -g11 -(g12 -S'J\x00\x00\x00\x00\x00\x00\x00' -tRp1494 -tp1495 -a(g11 -(g12 -S'7\x00\x00\x00\x00\x00\x00\x00' -tRp1496 -g11 -(g12 -S'C\x00\x00\x00\x00\x00\x00\x00' -tRp1497 -g11 -(g12 -S'G\x00\x00\x00\x00\x00\x00\x00' -tRp1498 -g11 -(g12 -S'J\x00\x00\x00\x00\x00\x00\x00' -tRp1499 -tp1500 -a(g11 -(g12 -S'<\x00\x00\x00\x00\x00\x00\x00' -tRp1501 -g11 -(g12 -S'@\x00\x00\x00\x00\x00\x00\x00' -tRp1502 -g11 -(g12 -S'C\x00\x00\x00\x00\x00\x00\x00' -tRp1503 -g11 -(g12 -S'H\x00\x00\x00\x00\x00\x00\x00' -tRp1504 -tp1505 -a(g11 -(g12 -S'<\x00\x00\x00\x00\x00\x00\x00' -tRp1506 -g11 -(g12 -S'@\x00\x00\x00\x00\x00\x00\x00' -tRp1507 -g11 -(g12 -S'C\x00\x00\x00\x00\x00\x00\x00' -tRp1508 -g11 -(g12 -S'H\x00\x00\x00\x00\x00\x00\x00' -tRp1509 -tp1510 -a(g11 -(g12 -S'<\x00\x00\x00\x00\x00\x00\x00' -tRp1511 -g11 -(g12 -S'@\x00\x00\x00\x00\x00\x00\x00' -tRp1512 -g11 -(g12 -S'C\x00\x00\x00\x00\x00\x00\x00' -tRp1513 -g11 -(g12 -S'H\x00\x00\x00\x00\x00\x00\x00' -tRp1514 -tp1515 -a(g11 -(g12 -S'<\x00\x00\x00\x00\x00\x00\x00' -tRp1516 -g11 -(g12 -S'@\x00\x00\x00\x00\x00\x00\x00' -tRp1517 -g11 -(g12 -S'C\x00\x00\x00\x00\x00\x00\x00' -tRp1518 -g11 -(g12 -S'H\x00\x00\x00\x00\x00\x00\x00' -tRp1519 -tp1520 -aa(lp1521 -(g11 -(g12 -S'0\x00\x00\x00\x00\x00\x00\x00' -tRp1522 -g11 -(g12 -S'@\x00\x00\x00\x00\x00\x00\x00' -tRp1523 -g11 -(g12 -S'C\x00\x00\x00\x00\x00\x00\x00' -tRp1524 -g11 -(g12 -S'H\x00\x00\x00\x00\x00\x00\x00' -tRp1525 -tp1526 -a(g11 -(g12 -S'<\x00\x00\x00\x00\x00\x00\x00' -tRp1527 -g11 -(g12 -S'C\x00\x00\x00\x00\x00\x00\x00' -tRp1528 -g11 -(g12 -S'H\x00\x00\x00\x00\x00\x00\x00' -tRp1529 -g11 -(g12 -S'L\x00\x00\x00\x00\x00\x00\x00' -tRp1530 -tp1531 -a(g11 -(g12 -S'9\x00\x00\x00\x00\x00\x00\x00' -tRp1532 -g11 -(g12 -S'E\x00\x00\x00\x00\x00\x00\x00' -tRp1533 -g11 -(g12 -S'H\x00\x00\x00\x00\x00\x00\x00' -tRp1534 -g11 -(g12 -S'N\x00\x00\x00\x00\x00\x00\x00' -tRp1535 -tp1536 -a(g11 -(g12 -S'7\x00\x00\x00\x00\x00\x00\x00' -tRp1537 -g11 -(g12 -S'G\x00\x00\x00\x00\x00\x00\x00' -tRp1538 -g11 -(g12 -S'J\x00\x00\x00\x00\x00\x00\x00' -tRp1539 -g11 -(g12 -S'O\x00\x00\x00\x00\x00\x00\x00' -tRp1540 -tp1541 -a(g11 -(g12 -S'C\x00\x00\x00\x00\x00\x00\x00' -tRp1542 -g11 -(g12 -S'G\x00\x00\x00\x00\x00\x00\x00' -tRp1543 -g11 -(g12 -S'J\x00\x00\x00\x00\x00\x00\x00' -tRp1544 -g11 -(g12 -S'O\x00\x00\x00\x00\x00\x00\x00' -tRp1545 -tp1546 -a(g11 -(g12 -S'A\x00\x00\x00\x00\x00\x00\x00' -tRp1547 -g11 -(g12 -S'G\x00\x00\x00\x00\x00\x00\x00' -tRp1548 -g11 -(g12 -S'J\x00\x00\x00\x00\x00\x00\x00' -tRp1549 -g11 -(g12 -S'O\x00\x00\x00\x00\x00\x00\x00' -tRp1550 -tp1551 -a(g11 -(g12 -S'@\x00\x00\x00\x00\x00\x00\x00' -tRp1552 -g11 -(g12 -S'C\x00\x00\x00\x00\x00\x00\x00' -tRp1553 -g11 -(g12 -S'H\x00\x00\x00\x00\x00\x00\x00' -tRp1554 -g11 -(g12 -S'O\x00\x00\x00\x00\x00\x00\x00' -tRp1555 -tp1556 -a(g11 -(g12 -S'>\x00\x00\x00\x00\x00\x00\x00' -tRp1557 -g11 -(g12 -S'E\x00\x00\x00\x00\x00\x00\x00' -tRp1558 -g11 -(g12 -S'J\x00\x00\x00\x00\x00\x00\x00' -tRp1559 -g11 -(g12 -S'M\x00\x00\x00\x00\x00\x00\x00' -tRp1560 -tp1561 -a(g11 -(g12 -S'@\x00\x00\x00\x00\x00\x00\x00' -tRp1562 -g11 -(g12 -S'C\x00\x00\x00\x00\x00\x00\x00' -tRp1563 -g11 -(g12 -S'H\x00\x00\x00\x00\x00\x00\x00' -tRp1564 -g11 -(g12 -S'O\x00\x00\x00\x00\x00\x00\x00' -tRp1565 -tp1566 -a(g11 -(g12 -S'A\x00\x00\x00\x00\x00\x00\x00' -tRp1567 -g11 -(g12 -S'H\x00\x00\x00\x00\x00\x00\x00' -tRp1568 -g11 -(g12 -S'Q\x00\x00\x00\x00\x00\x00\x00' -tRp1569 -tp1570 -a(g11 -(g12 -S'<\x00\x00\x00\x00\x00\x00\x00' -tRp1571 -g11 -(g12 -S'@\x00\x00\x00\x00\x00\x00\x00' -tRp1572 -g11 -(g12 -S'H\x00\x00\x00\x00\x00\x00\x00' -tRp1573 -g11 -(g12 -S'O\x00\x00\x00\x00\x00\x00\x00' -tRp1574 -tp1575 -a(g11 -(g12 -S'A\x00\x00\x00\x00\x00\x00\x00' -tRp1576 -g11 -(g12 -S'H\x00\x00\x00\x00\x00\x00\x00' -tRp1577 -g11 -(g12 -S'Q\x00\x00\x00\x00\x00\x00\x00' -tRp1578 -tp1579 -a(g11 -(g12 -S'>\x00\x00\x00\x00\x00\x00\x00' -tRp1580 -g11 -(g12 -S'A\x00\x00\x00\x00\x00\x00\x00' -tRp1581 -g11 -(g12 -S'J\x00\x00\x00\x00\x00\x00\x00' -tRp1582 -g11 -(g12 -S'S\x00\x00\x00\x00\x00\x00\x00' -tRp1583 -tp1584 -a(g11 -(g12 -S'>\x00\x00\x00\x00\x00\x00\x00' -tRp1585 -g11 -(g12 -S'E\x00\x00\x00\x00\x00\x00\x00' -tRp1586 -g11 -(g12 -S'M\x00\x00\x00\x00\x00\x00\x00' -tRp1587 -g11 -(g12 -S'Q\x00\x00\x00\x00\x00\x00\x00' -tRp1588 -tp1589 -a(g11 -(g12 -S'7\x00\x00\x00\x00\x00\x00\x00' -tRp1590 -g11 -(g12 -S'C\x00\x00\x00\x00\x00\x00\x00' -tRp1591 -g11 -(g12 -S'M\x00\x00\x00\x00\x00\x00\x00' -tRp1592 -g11 -(g12 -S'S\x00\x00\x00\x00\x00\x00\x00' -tRp1593 -tp1594 -a(g11 -(g12 -S'<\x00\x00\x00\x00\x00\x00\x00' -tRp1595 -g11 -(g12 -S'C\x00\x00\x00\x00\x00\x00\x00' -tRp1596 -g11 -(g12 -S'L\x00\x00\x00\x00\x00\x00\x00' -tRp1597 -g11 -(g12 -S'T\x00\x00\x00\x00\x00\x00\x00' -tRp1598 -tp1599 -a(g11 -(g12 -S'<\x00\x00\x00\x00\x00\x00\x00' -tRp1600 -g11 -(g12 -S'C\x00\x00\x00\x00\x00\x00\x00' -tRp1601 -g11 -(g12 -S'L\x00\x00\x00\x00\x00\x00\x00' -tRp1602 -g11 -(g12 -S'T\x00\x00\x00\x00\x00\x00\x00' -tRp1603 -tp1604 -a(g11 -(g12 -S'0\x00\x00\x00\x00\x00\x00\x00' -tRp1605 -g11 -(g12 -S'C\x00\x00\x00\x00\x00\x00\x00' -tRp1606 -g11 -(g12 -S'L\x00\x00\x00\x00\x00\x00\x00' -tRp1607 -g11 -(g12 -S'T\x00\x00\x00\x00\x00\x00\x00' -tRp1608 -tp1609 -a(g11 -(g12 -S'9\x00\x00\x00\x00\x00\x00\x00' -tRp1610 -g11 -(g12 -S'E\x00\x00\x00\x00\x00\x00\x00' -tRp1611 -g11 -(g12 -S'H\x00\x00\x00\x00\x00\x00\x00' -tRp1612 -g11 -(g12 -S'L\x00\x00\x00\x00\x00\x00\x00' -tRp1613 -tp1614 -a(g11 -(g12 -S'7\x00\x00\x00\x00\x00\x00\x00' -tRp1615 -g11 -(g12 -S'E\x00\x00\x00\x00\x00\x00\x00' -tRp1616 -g11 -(g12 -S'H\x00\x00\x00\x00\x00\x00\x00' -tRp1617 -g11 -(g12 -S'L\x00\x00\x00\x00\x00\x00\x00' -tRp1618 -tp1619 -a(g11 -(g12 -S'5\x00\x00\x00\x00\x00\x00\x00' -tRp1620 -g11 -(g12 -S'E\x00\x00\x00\x00\x00\x00\x00' -tRp1621 -g11 -(g12 -S'H\x00\x00\x00\x00\x00\x00\x00' -tRp1622 -g11 -(g12 -S'M\x00\x00\x00\x00\x00\x00\x00' -tRp1623 -tp1624 -a(g11 -(g12 -S'4\x00\x00\x00\x00\x00\x00\x00' -tRp1625 -g11 -(g12 -S'C\x00\x00\x00\x00\x00\x00\x00' -tRp1626 -g11 -(g12 -S'H\x00\x00\x00\x00\x00\x00\x00' -tRp1627 -g11 -(g12 -S'O\x00\x00\x00\x00\x00\x00\x00' -tRp1628 -tp1629 -a(g11 -(g12 -S'2\x00\x00\x00\x00\x00\x00\x00' -tRp1630 -g11 -(g12 -S'C\x00\x00\x00\x00\x00\x00\x00' -tRp1631 -g11 -(g12 -S'G\x00\x00\x00\x00\x00\x00\x00' -tRp1632 -g11 -(g12 -S'M\x00\x00\x00\x00\x00\x00\x00' -tRp1633 -tp1634 -a(g11 -(g12 -S'0\x00\x00\x00\x00\x00\x00\x00' -tRp1635 -g11 -(g12 -S'C\x00\x00\x00\x00\x00\x00\x00' -tRp1636 -g11 -(g12 -S'H\x00\x00\x00\x00\x00\x00\x00' -tRp1637 -g11 -(g12 -S'L\x00\x00\x00\x00\x00\x00\x00' -tRp1638 -tp1639 -a(g11 -(g12 -S'7\x00\x00\x00\x00\x00\x00\x00' -tRp1640 -g11 -(g12 -S'C\x00\x00\x00\x00\x00\x00\x00' -tRp1641 -g11 -(g12 -S'G\x00\x00\x00\x00\x00\x00\x00' -tRp1642 -g11 -(g12 -S'J\x00\x00\x00\x00\x00\x00\x00' -tRp1643 -tp1644 -a(g11 -(g12 -S'5\x00\x00\x00\x00\x00\x00\x00' -tRp1645 -g11 -(g12 -S'C\x00\x00\x00\x00\x00\x00\x00' -tRp1646 -g11 -(g12 -S'G\x00\x00\x00\x00\x00\x00\x00' -tRp1647 -g11 -(g12 -S'J\x00\x00\x00\x00\x00\x00\x00' -tRp1648 -tp1649 -a(g11 -(g12 -S'4\x00\x00\x00\x00\x00\x00\x00' -tRp1650 -g11 -(g12 -S'C\x00\x00\x00\x00\x00\x00\x00' -tRp1651 -g11 -(g12 -S'I\x00\x00\x00\x00\x00\x00\x00' -tRp1652 -g11 -(g12 -S'L\x00\x00\x00\x00\x00\x00\x00' -tRp1653 -tp1654 -a(g11 -(g12 -S'2\x00\x00\x00\x00\x00\x00\x00' -tRp1655 -g11 -(g12 -S'E\x00\x00\x00\x00\x00\x00\x00' -tRp1656 -g11 -(g12 -S'J\x00\x00\x00\x00\x00\x00\x00' -tRp1657 -g11 -(g12 -S'M\x00\x00\x00\x00\x00\x00\x00' -tRp1658 -tp1659 -a(g11 -(g12 -S'4\x00\x00\x00\x00\x00\x00\x00' -tRp1660 -g11 -(g12 -S'C\x00\x00\x00\x00\x00\x00\x00' -tRp1661 -g11 -(g12 -S'H\x00\x00\x00\x00\x00\x00\x00' -tRp1662 -g11 -(g12 -S'L\x00\x00\x00\x00\x00\x00\x00' -tRp1663 -tp1664 -a(g11 -(g12 -S'5\x00\x00\x00\x00\x00\x00\x00' -tRp1665 -g11 -(g12 -S'C\x00\x00\x00\x00\x00\x00\x00' -tRp1666 -g11 -(g12 -S'G\x00\x00\x00\x00\x00\x00\x00' -tRp1667 -g11 -(g12 -S'J\x00\x00\x00\x00\x00\x00\x00' -tRp1668 -tp1669 -a(g11 -(g12 -S'4\x00\x00\x00\x00\x00\x00\x00' -tRp1670 -g11 -(g12 -S'C\x00\x00\x00\x00\x00\x00\x00' -tRp1671 -g11 -(g12 -S'H\x00\x00\x00\x00\x00\x00\x00' -tRp1672 -tp1673 -a(g11 -(g12 -S'5\x00\x00\x00\x00\x00\x00\x00' -tRp1674 -g11 -(g12 -S'A\x00\x00\x00\x00\x00\x00\x00' -tRp1675 -g11 -(g12 -S'E\x00\x00\x00\x00\x00\x00\x00' -tRp1676 -g11 -(g12 -S'H\x00\x00\x00\x00\x00\x00\x00' -tRp1677 -tp1678 -a(g11 -(g12 -S'2\x00\x00\x00\x00\x00\x00\x00' -tRp1679 -g11 -(g12 -S'A\x00\x00\x00\x00\x00\x00\x00' -tRp1680 -g11 -(g12 -S'G\x00\x00\x00\x00\x00\x00\x00' -tRp1681 -g11 -(g12 -S'J\x00\x00\x00\x00\x00\x00\x00' -tRp1682 -tp1683 -a(g11 -(g12 -S'0\x00\x00\x00\x00\x00\x00\x00' -tRp1684 -g11 -(g12 -S'C\x00\x00\x00\x00\x00\x00\x00' -tRp1685 -g11 -(g12 -S'H\x00\x00\x00\x00\x00\x00\x00' -tRp1686 -g11 -(g12 -S'L\x00\x00\x00\x00\x00\x00\x00' -tRp1687 -tp1688 -a(g11 -(g12 -S'<\x00\x00\x00\x00\x00\x00\x00' -tRp1689 -g11 -(g12 -S'C\x00\x00\x00\x00\x00\x00\x00' -tRp1690 -g11 -(g12 -S'H\x00\x00\x00\x00\x00\x00\x00' -tRp1691 -g11 -(g12 -S'L\x00\x00\x00\x00\x00\x00\x00' -tRp1692 -tp1693 -a(g11 -(g12 -S';\x00\x00\x00\x00\x00\x00\x00' -tRp1694 -g11 -(g12 -S'C\x00\x00\x00\x00\x00\x00\x00' -tRp1695 -g11 -(g12 -S'J\x00\x00\x00\x00\x00\x00\x00' -tRp1696 -tp1697 -a(g11 -(g12 -S'<\x00\x00\x00\x00\x00\x00\x00' -tRp1698 -g11 -(g12 -S'C\x00\x00\x00\x00\x00\x00\x00' -tRp1699 -g11 -(g12 -S'H\x00\x00\x00\x00\x00\x00\x00' -tRp1700 -g11 -(g12 -S'L\x00\x00\x00\x00\x00\x00\x00' -tRp1701 -tp1702 -a(g11 -(g12 -S';\x00\x00\x00\x00\x00\x00\x00' -tRp1703 -g11 -(g12 -S'C\x00\x00\x00\x00\x00\x00\x00' -tRp1704 -g11 -(g12 -S'H\x00\x00\x00\x00\x00\x00\x00' -tRp1705 -g11 -(g12 -S'L\x00\x00\x00\x00\x00\x00\x00' -tRp1706 -tp1707 -a(g11 -(g12 -S'9\x00\x00\x00\x00\x00\x00\x00' -tRp1708 -g11 -(g12 -S'E\x00\x00\x00\x00\x00\x00\x00' -tRp1709 -g11 -(g12 -S'H\x00\x00\x00\x00\x00\x00\x00' -tRp1710 -g11 -(g12 -S'N\x00\x00\x00\x00\x00\x00\x00' -tRp1711 -tp1712 -a(g11 -(g12 -S'7\x00\x00\x00\x00\x00\x00\x00' -tRp1713 -g11 -(g12 -S'G\x00\x00\x00\x00\x00\x00\x00' -tRp1714 -g11 -(g12 -S'J\x00\x00\x00\x00\x00\x00\x00' -tRp1715 -g11 -(g12 -S'O\x00\x00\x00\x00\x00\x00\x00' -tRp1716 -tp1717 -a(g11 -(g12 -S'7\x00\x00\x00\x00\x00\x00\x00' -tRp1718 -g11 -(g12 -S'G\x00\x00\x00\x00\x00\x00\x00' -tRp1719 -g11 -(g12 -S'J\x00\x00\x00\x00\x00\x00\x00' -tRp1720 -g11 -(g12 -S'O\x00\x00\x00\x00\x00\x00\x00' -tRp1721 -tp1722 -a(g11 -(g12 -S'7\x00\x00\x00\x00\x00\x00\x00' -tRp1723 -g11 -(g12 -S'G\x00\x00\x00\x00\x00\x00\x00' -tRp1724 -g11 -(g12 -S'J\x00\x00\x00\x00\x00\x00\x00' -tRp1725 -g11 -(g12 -S'O\x00\x00\x00\x00\x00\x00\x00' -tRp1726 -tp1727 -a(g11 -(g12 -S'C\x00\x00\x00\x00\x00\x00\x00' -tRp1728 -g11 -(g12 -S'J\x00\x00\x00\x00\x00\x00\x00' -tRp1729 -g11 -(g12 -S'O\x00\x00\x00\x00\x00\x00\x00' -tRp1730 -g11 -(g12 -S'S\x00\x00\x00\x00\x00\x00\x00' -tRp1731 -tp1732 -a(g11 -(g12 -S'A\x00\x00\x00\x00\x00\x00\x00' -tRp1733 -g11 -(g12 -S'J\x00\x00\x00\x00\x00\x00\x00' -tRp1734 -g11 -(g12 -S'O\x00\x00\x00\x00\x00\x00\x00' -tRp1735 -g11 -(g12 -S'S\x00\x00\x00\x00\x00\x00\x00' -tRp1736 -tp1737 -a(g11 -(g12 -S'@\x00\x00\x00\x00\x00\x00\x00' -tRp1738 -g11 -(g12 -S'H\x00\x00\x00\x00\x00\x00\x00' -tRp1739 -g11 -(g12 -S'O\x00\x00\x00\x00\x00\x00\x00' -tRp1740 -g11 -(g12 -S'T\x00\x00\x00\x00\x00\x00\x00' -tRp1741 -tp1742 -a(g11 -(g12 -S'A\x00\x00\x00\x00\x00\x00\x00' -tRp1743 -g11 -(g12 -S'H\x00\x00\x00\x00\x00\x00\x00' -tRp1744 -g11 -(g12 -S'M\x00\x00\x00\x00\x00\x00\x00' -tRp1745 -g11 -(g12 -S'Q\x00\x00\x00\x00\x00\x00\x00' -tRp1746 -tp1747 -a(g11 -(g12 -S'5\x00\x00\x00\x00\x00\x00\x00' -tRp1748 -g11 -(g12 -S'E\x00\x00\x00\x00\x00\x00\x00' -tRp1749 -g11 -(g12 -S'H\x00\x00\x00\x00\x00\x00\x00' -tRp1750 -g11 -(g12 -S'Q\x00\x00\x00\x00\x00\x00\x00' -tRp1751 -tp1752 -a(g11 -(g12 -S'@\x00\x00\x00\x00\x00\x00\x00' -tRp1753 -g11 -(g12 -S'E\x00\x00\x00\x00\x00\x00\x00' -tRp1754 -g11 -(g12 -S'M\x00\x00\x00\x00\x00\x00\x00' -tRp1755 -g11 -(g12 -S'T\x00\x00\x00\x00\x00\x00\x00' -tRp1756 -tp1757 -a(g11 -(g12 -S'>\x00\x00\x00\x00\x00\x00\x00' -tRp1758 -g11 -(g12 -S'J\x00\x00\x00\x00\x00\x00\x00' -tRp1759 -g11 -(g12 -S'M\x00\x00\x00\x00\x00\x00\x00' -tRp1760 -g11 -(g12 -S'S\x00\x00\x00\x00\x00\x00\x00' -tRp1761 -tp1762 -a(g11 -(g12 -S'<\x00\x00\x00\x00\x00\x00\x00' -tRp1763 -g11 -(g12 -S'J\x00\x00\x00\x00\x00\x00\x00' -tRp1764 -g11 -(g12 -S'M\x00\x00\x00\x00\x00\x00\x00' -tRp1765 -g11 -(g12 -S'S\x00\x00\x00\x00\x00\x00\x00' -tRp1766 -tp1767 -a(g11 -(g12 -S'>\x00\x00\x00\x00\x00\x00\x00' -tRp1768 -g11 -(g12 -S'H\x00\x00\x00\x00\x00\x00\x00' -tRp1769 -g11 -(g12 -S'M\x00\x00\x00\x00\x00\x00\x00' -tRp1770 -g11 -(g12 -S'Q\x00\x00\x00\x00\x00\x00\x00' -tRp1771 -tp1772 -a(g11 -(g12 -S'@\x00\x00\x00\x00\x00\x00\x00' -tRp1773 -g11 -(g12 -S'G\x00\x00\x00\x00\x00\x00\x00' -tRp1774 -g11 -(g12 -S'L\x00\x00\x00\x00\x00\x00\x00' -tRp1775 -g11 -(g12 -S'P\x00\x00\x00\x00\x00\x00\x00' -tRp1776 -tp1777 -a(g11 -(g12 -S'4\x00\x00\x00\x00\x00\x00\x00' -tRp1778 -g11 -(g12 -S'G\x00\x00\x00\x00\x00\x00\x00' -tRp1779 -g11 -(g12 -S'L\x00\x00\x00\x00\x00\x00\x00' -tRp1780 -g11 -(g12 -S'P\x00\x00\x00\x00\x00\x00\x00' -tRp1781 -tp1782 -a(g11 -(g12 -S'>\x00\x00\x00\x00\x00\x00\x00' -tRp1783 -g11 -(g12 -S'D\x00\x00\x00\x00\x00\x00\x00' -tRp1784 -g11 -(g12 -S'G\x00\x00\x00\x00\x00\x00\x00' -tRp1785 -g11 -(g12 -S'L\x00\x00\x00\x00\x00\x00\x00' -tRp1786 -tp1787 -a(g11 -(g12 -S'<\x00\x00\x00\x00\x00\x00\x00' -tRp1788 -g11 -(g12 -S'E\x00\x00\x00\x00\x00\x00\x00' -tRp1789 -g11 -(g12 -S'L\x00\x00\x00\x00\x00\x00\x00' -tRp1790 -g11 -(g12 -S'Q\x00\x00\x00\x00\x00\x00\x00' -tRp1791 -tp1792 -a(g11 -(g12 -S';\x00\x00\x00\x00\x00\x00\x00' -tRp1793 -g11 -(g12 -S'E\x00\x00\x00\x00\x00\x00\x00' -tRp1794 -g11 -(g12 -S'L\x00\x00\x00\x00\x00\x00\x00' -tRp1795 -g11 -(g12 -S'Q\x00\x00\x00\x00\x00\x00\x00' -tRp1796 -tp1797 -a(g11 -(g12 -S'9\x00\x00\x00\x00\x00\x00\x00' -tRp1798 -g11 -(g12 -S'C\x00\x00\x00\x00\x00\x00\x00' -tRp1799 -g11 -(g12 -S'H\x00\x00\x00\x00\x00\x00\x00' -tRp1800 -g11 -(g12 -S'L\x00\x00\x00\x00\x00\x00\x00' -tRp1801 -tp1802 -a(g11 -(g12 -S'>\x00\x00\x00\x00\x00\x00\x00' -tRp1803 -g11 -(g12 -S'A\x00\x00\x00\x00\x00\x00\x00' -tRp1804 -g11 -(g12 -S'E\x00\x00\x00\x00\x00\x00\x00' -tRp1805 -g11 -(g12 -S'M\x00\x00\x00\x00\x00\x00\x00' -tRp1806 -tp1807 -a(g11 -(g12 -S'@\x00\x00\x00\x00\x00\x00\x00' -tRp1808 -g11 -(g12 -S'E\x00\x00\x00\x00\x00\x00\x00' -tRp1809 -g11 -(g12 -S'H\x00\x00\x00\x00\x00\x00\x00' -tRp1810 -g11 -(g12 -S'L\x00\x00\x00\x00\x00\x00\x00' -tRp1811 -tp1812 -a(g11 -(g12 -S'A\x00\x00\x00\x00\x00\x00\x00' -tRp1813 -g11 -(g12 -S'E\x00\x00\x00\x00\x00\x00\x00' -tRp1814 -g11 -(g12 -S'J\x00\x00\x00\x00\x00\x00\x00' -tRp1815 -tp1816 -a(g11 -(g12 -S'@\x00\x00\x00\x00\x00\x00\x00' -tRp1817 -g11 -(g12 -S'E\x00\x00\x00\x00\x00\x00\x00' -tRp1818 -g11 -(g12 -S'H\x00\x00\x00\x00\x00\x00\x00' -tRp1819 -tp1820 -a(g11 -(g12 -S'>\x00\x00\x00\x00\x00\x00\x00' -tRp1821 -g11 -(g12 -S'E\x00\x00\x00\x00\x00\x00\x00' -tRp1822 -g11 -(g12 -S'H\x00\x00\x00\x00\x00\x00\x00' -tRp1823 -tp1824 -a(g11 -(g12 -S'@\x00\x00\x00\x00\x00\x00\x00' -tRp1825 -g11 -(g12 -S'D\x00\x00\x00\x00\x00\x00\x00' -tRp1826 -g11 -(g12 -S'G\x00\x00\x00\x00\x00\x00\x00' -tRp1827 -tp1828 -a(g11 -(g12 -S'9\x00\x00\x00\x00\x00\x00\x00' -tRp1829 -g11 -(g12 -S'<\x00\x00\x00\x00\x00\x00\x00' -tRp1830 -g11 -(g12 -S'@\x00\x00\x00\x00\x00\x00\x00' -tRp1831 -g11 -(g12 -S'E\x00\x00\x00\x00\x00\x00\x00' -tRp1832 -tp1833 -a(g11 -(g12 -S'9\x00\x00\x00\x00\x00\x00\x00' -tRp1834 -g11 -(g12 -S'<\x00\x00\x00\x00\x00\x00\x00' -tRp1835 -g11 -(g12 -S'@\x00\x00\x00\x00\x00\x00\x00' -tRp1836 -g11 -(g12 -S'E\x00\x00\x00\x00\x00\x00\x00' -tRp1837 -tp1838 -a(g11 -(g12 -S'9\x00\x00\x00\x00\x00\x00\x00' -tRp1839 -g11 -(g12 -S'<\x00\x00\x00\x00\x00\x00\x00' -tRp1840 -g11 -(g12 -S'@\x00\x00\x00\x00\x00\x00\x00' -tRp1841 -g11 -(g12 -S'E\x00\x00\x00\x00\x00\x00\x00' -tRp1842 -tp1843 -a(g11 -(g12 -S'4\x00\x00\x00\x00\x00\x00\x00' -tRp1844 -g11 -(g12 -S'@\x00\x00\x00\x00\x00\x00\x00' -tRp1845 -g11 -(g12 -S'C\x00\x00\x00\x00\x00\x00\x00' -tRp1846 -g11 -(g12 -S'I\x00\x00\x00\x00\x00\x00\x00' -tRp1847 -tp1848 -a(g11 -(g12 -S'9\x00\x00\x00\x00\x00\x00\x00' -tRp1849 -g11 -(g12 -S'@\x00\x00\x00\x00\x00\x00\x00' -tRp1850 -g11 -(g12 -S'I\x00\x00\x00\x00\x00\x00\x00' -tRp1851 -tp1852 -a(g11 -(g12 -S'7\x00\x00\x00\x00\x00\x00\x00' -tRp1853 -g11 -(g12 -S'@\x00\x00\x00\x00\x00\x00\x00' -tRp1854 -g11 -(g12 -S'E\x00\x00\x00\x00\x00\x00\x00' -tRp1855 -g11 -(g12 -S'I\x00\x00\x00\x00\x00\x00\x00' -tRp1856 -tp1857 -a(g11 -(g12 -S'5\x00\x00\x00\x00\x00\x00\x00' -tRp1858 -g11 -(g12 -S'A\x00\x00\x00\x00\x00\x00\x00' -tRp1859 -g11 -(g12 -S'E\x00\x00\x00\x00\x00\x00\x00' -tRp1860 -g11 -(g12 -S'J\x00\x00\x00\x00\x00\x00\x00' -tRp1861 -tp1862 -a(g11 -(g12 -S'4\x00\x00\x00\x00\x00\x00\x00' -tRp1863 -g11 -(g12 -S'C\x00\x00\x00\x00\x00\x00\x00' -tRp1864 -g11 -(g12 -S'I\x00\x00\x00\x00\x00\x00\x00' -tRp1865 -tp1866 -a(g11 -(g12 -S'2\x00\x00\x00\x00\x00\x00\x00' -tRp1867 -g11 -(g12 -S'A\x00\x00\x00\x00\x00\x00\x00' -tRp1868 -g11 -(g12 -S'E\x00\x00\x00\x00\x00\x00\x00' -tRp1869 -g11 -(g12 -S'J\x00\x00\x00\x00\x00\x00\x00' -tRp1870 -tp1871 -a(g11 -(g12 -S'=\x00\x00\x00\x00\x00\x00\x00' -tRp1872 -g11 -(g12 -S'@\x00\x00\x00\x00\x00\x00\x00' -tRp1873 -g11 -(g12 -S'E\x00\x00\x00\x00\x00\x00\x00' -tRp1874 -g11 -(g12 -S'L\x00\x00\x00\x00\x00\x00\x00' -tRp1875 -tp1876 -a(g11 -(g12 -S';\x00\x00\x00\x00\x00\x00\x00' -tRp1877 -g11 -(g12 -S'>\x00\x00\x00\x00\x00\x00\x00' -tRp1878 -g11 -(g12 -S'E\x00\x00\x00\x00\x00\x00\x00' -tRp1879 -g11 -(g12 -S'M\x00\x00\x00\x00\x00\x00\x00' -tRp1880 -tp1881 -a(g11 -(g12 -S'9\x00\x00\x00\x00\x00\x00\x00' -tRp1882 -g11 -(g12 -S'=\x00\x00\x00\x00\x00\x00\x00' -tRp1883 -g11 -(g12 -S'E\x00\x00\x00\x00\x00\x00\x00' -tRp1884 -g11 -(g12 -S'O\x00\x00\x00\x00\x00\x00\x00' -tRp1885 -tp1886 -a(g11 -(g12 -S'>\x00\x00\x00\x00\x00\x00\x00' -tRp1887 -g11 -(g12 -S'E\x00\x00\x00\x00\x00\x00\x00' -tRp1888 -g11 -(g12 -S'M\x00\x00\x00\x00\x00\x00\x00' -tRp1889 -tp1890 -a(g11 -(g12 -S'9\x00\x00\x00\x00\x00\x00\x00' -tRp1891 -g11 -(g12 -S'=\x00\x00\x00\x00\x00\x00\x00' -tRp1892 -g11 -(g12 -S'C\x00\x00\x00\x00\x00\x00\x00' -tRp1893 -g11 -(g12 -S'L\x00\x00\x00\x00\x00\x00\x00' -tRp1894 -tp1895 -a(g11 -(g12 -S'2\x00\x00\x00\x00\x00\x00\x00' -tRp1896 -g11 -(g12 -S'>\x00\x00\x00\x00\x00\x00\x00' -tRp1897 -g11 -(g12 -S'A\x00\x00\x00\x00\x00\x00\x00' -tRp1898 -g11 -(g12 -S'J\x00\x00\x00\x00\x00\x00\x00' -tRp1899 -tp1900 -a(g11 -(g12 -S';\x00\x00\x00\x00\x00\x00\x00' -tRp1901 -g11 -(g12 -S'>\x00\x00\x00\x00\x00\x00\x00' -tRp1902 -g11 -(g12 -S'J\x00\x00\x00\x00\x00\x00\x00' -tRp1903 -g11 -(g12 -S'O\x00\x00\x00\x00\x00\x00\x00' -tRp1904 -tp1905 -a(g11 -(g12 -S'7\x00\x00\x00\x00\x00\x00\x00' -tRp1906 -g11 -(g12 -S'>\x00\x00\x00\x00\x00\x00\x00' -tRp1907 -g11 -(g12 -S'G\x00\x00\x00\x00\x00\x00\x00' -tRp1908 -g11 -(g12 -S'O\x00\x00\x00\x00\x00\x00\x00' -tRp1909 -tp1910 -a(g11 -(g12 -S'<\x00\x00\x00\x00\x00\x00\x00' -tRp1911 -g11 -(g12 -S'@\x00\x00\x00\x00\x00\x00\x00' -tRp1912 -g11 -(g12 -S'H\x00\x00\x00\x00\x00\x00\x00' -tRp1913 -g11 -(g12 -S'O\x00\x00\x00\x00\x00\x00\x00' -tRp1914 -tp1915 -a(g11 -(g12 -S';\x00\x00\x00\x00\x00\x00\x00' -tRp1916 -g11 -(g12 -S'C\x00\x00\x00\x00\x00\x00\x00' -tRp1917 -g11 -(g12 -S'J\x00\x00\x00\x00\x00\x00\x00' -tRp1918 -g11 -(g12 -S'O\x00\x00\x00\x00\x00\x00\x00' -tRp1919 -tp1920 -a(g11 -(g12 -S'9\x00\x00\x00\x00\x00\x00\x00' -tRp1921 -g11 -(g12 -S'B\x00\x00\x00\x00\x00\x00\x00' -tRp1922 -g11 -(g12 -S'H\x00\x00\x00\x00\x00\x00\x00' -tRp1923 -g11 -(g12 -S'Q\x00\x00\x00\x00\x00\x00\x00' -tRp1924 -tp1925 -a(g11 -(g12 -S'7\x00\x00\x00\x00\x00\x00\x00' -tRp1926 -g11 -(g12 -S'C\x00\x00\x00\x00\x00\x00\x00' -tRp1927 -g11 -(g12 -S'G\x00\x00\x00\x00\x00\x00\x00' -tRp1928 -g11 -(g12 -S'S\x00\x00\x00\x00\x00\x00\x00' -tRp1929 -tp1930 -a(g11 -(g12 -S'9\x00\x00\x00\x00\x00\x00\x00' -tRp1931 -g11 -(g12 -S'C\x00\x00\x00\x00\x00\x00\x00' -tRp1932 -g11 -(g12 -S'E\x00\x00\x00\x00\x00\x00\x00' -tRp1933 -g11 -(g12 -S'T\x00\x00\x00\x00\x00\x00\x00' -tRp1934 -tp1935 -a(g11 -(g12 -S'>\x00\x00\x00\x00\x00\x00\x00' -tRp1936 -g11 -(g12 -S'C\x00\x00\x00\x00\x00\x00\x00' -tRp1937 -g11 -(g12 -S'G\x00\x00\x00\x00\x00\x00\x00' -tRp1938 -g11 -(g12 -S'S\x00\x00\x00\x00\x00\x00\x00' -tRp1939 -tp1940 -a(g11 -(g12 -S'2\x00\x00\x00\x00\x00\x00\x00' -tRp1941 -g11 -(g12 -S'B\x00\x00\x00\x00\x00\x00\x00' -tRp1942 -g11 -(g12 -S'J\x00\x00\x00\x00\x00\x00\x00' -tRp1943 -g11 -(g12 -S'Q\x00\x00\x00\x00\x00\x00\x00' -tRp1944 -tp1945 -a(g11 -(g12 -S'7\x00\x00\x00\x00\x00\x00\x00' -tRp1946 -g11 -(g12 -S'C\x00\x00\x00\x00\x00\x00\x00' -tRp1947 -g11 -(g12 -S'J\x00\x00\x00\x00\x00\x00\x00' -tRp1948 -g11 -(g12 -S'S\x00\x00\x00\x00\x00\x00\x00' -tRp1949 -tp1950 -a(g11 -(g12 -S'7\x00\x00\x00\x00\x00\x00\x00' -tRp1951 -g11 -(g12 -S'C\x00\x00\x00\x00\x00\x00\x00' -tRp1952 -g11 -(g12 -S'J\x00\x00\x00\x00\x00\x00\x00' -tRp1953 -g11 -(g12 -S'S\x00\x00\x00\x00\x00\x00\x00' -tRp1954 -tp1955 -a(g11 -(g12 -S'7\x00\x00\x00\x00\x00\x00\x00' -tRp1956 -g11 -(g12 -S'>\x00\x00\x00\x00\x00\x00\x00' -tRp1957 -g11 -(g12 -S'G\x00\x00\x00\x00\x00\x00\x00' -tRp1958 -g11 -(g12 -S'O\x00\x00\x00\x00\x00\x00\x00' -tRp1959 -tp1960 -a(g11 -(g12 -S'4\x00\x00\x00\x00\x00\x00\x00' -tRp1961 -g11 -(g12 -S'C\x00\x00\x00\x00\x00\x00\x00' -tRp1962 -g11 -(g12 -S'H\x00\x00\x00\x00\x00\x00\x00' -tRp1963 -g11 -(g12 -S'T\x00\x00\x00\x00\x00\x00\x00' -tRp1964 -tp1965 -a(g11 -(g12 -S'4\x00\x00\x00\x00\x00\x00\x00' -tRp1966 -g11 -(g12 -S'C\x00\x00\x00\x00\x00\x00\x00' -tRp1967 -g11 -(g12 -S'H\x00\x00\x00\x00\x00\x00\x00' -tRp1968 -g11 -(g12 -S'O\x00\x00\x00\x00\x00\x00\x00' -tRp1969 -tp1970 -a(g11 -(g12 -S'0\x00\x00\x00\x00\x00\x00\x00' -tRp1971 -g11 -(g12 -S'C\x00\x00\x00\x00\x00\x00\x00' -tRp1972 -g11 -(g12 -S'H\x00\x00\x00\x00\x00\x00\x00' -tRp1973 -g11 -(g12 -S'L\x00\x00\x00\x00\x00\x00\x00' -tRp1974 -tp1975 -a(g11 -(g12 -S'5\x00\x00\x00\x00\x00\x00\x00' -tRp1976 -g11 -(g12 -S'A\x00\x00\x00\x00\x00\x00\x00' -tRp1977 -g11 -(g12 -S'H\x00\x00\x00\x00\x00\x00\x00' -tRp1978 -g11 -(g12 -S'Q\x00\x00\x00\x00\x00\x00\x00' -tRp1979 -tp1980 -a(g11 -(g12 -S'A\x00\x00\x00\x00\x00\x00\x00' -tRp1981 -g11 -(g12 -S'H\x00\x00\x00\x00\x00\x00\x00' -tRp1982 -g11 -(g12 -S'Q\x00\x00\x00\x00\x00\x00\x00' -tRp1983 -tp1984 -a(g11 -(g12 -S'@\x00\x00\x00\x00\x00\x00\x00' -tRp1985 -g11 -(g12 -S'C\x00\x00\x00\x00\x00\x00\x00' -tRp1986 -g11 -(g12 -S'H\x00\x00\x00\x00\x00\x00\x00' -tRp1987 -g11 -(g12 -S'O\x00\x00\x00\x00\x00\x00\x00' -tRp1988 -tp1989 -a(g11 -(g12 -S'>\x00\x00\x00\x00\x00\x00\x00' -tRp1990 -g11 -(g12 -S'E\x00\x00\x00\x00\x00\x00\x00' -tRp1991 -g11 -(g12 -S'J\x00\x00\x00\x00\x00\x00\x00' -tRp1992 -g11 -(g12 -S'M\x00\x00\x00\x00\x00\x00\x00' -tRp1993 -tp1994 -a(g11 -(g12 -S';\x00\x00\x00\x00\x00\x00\x00' -tRp1995 -g11 -(g12 -S'C\x00\x00\x00\x00\x00\x00\x00' -tRp1996 -g11 -(g12 -S'J\x00\x00\x00\x00\x00\x00\x00' -tRp1997 -g11 -(g12 -S'O\x00\x00\x00\x00\x00\x00\x00' -tRp1998 -tp1999 -a(g11 -(g12 -S'<\x00\x00\x00\x00\x00\x00\x00' -tRp2000 -g11 -(g12 -S'C\x00\x00\x00\x00\x00\x00\x00' -tRp2001 -g11 -(g12 -S'H\x00\x00\x00\x00\x00\x00\x00' -tRp2002 -g11 -(g12 -S'L\x00\x00\x00\x00\x00\x00\x00' -tRp2003 -tp2004 -a(g11 -(g12 -S'7\x00\x00\x00\x00\x00\x00\x00' -tRp2005 -g11 -(g12 -S'C\x00\x00\x00\x00\x00\x00\x00' -tRp2006 -g11 -(g12 -S'H\x00\x00\x00\x00\x00\x00\x00' -tRp2007 -g11 -(g12 -S'J\x00\x00\x00\x00\x00\x00\x00' -tRp2008 -tp2009 -a(g11 -(g12 -S'C\x00\x00\x00\x00\x00\x00\x00' -tRp2010 -g11 -(g12 -S'G\x00\x00\x00\x00\x00\x00\x00' -tRp2011 -g11 -(g12 -S'J\x00\x00\x00\x00\x00\x00\x00' -tRp2012 -tp2013 -a(g11 -(g12 -S'A\x00\x00\x00\x00\x00\x00\x00' -tRp2014 -g11 -(g12 -S'C\x00\x00\x00\x00\x00\x00\x00' -tRp2015 -g11 -(g12 -S'G\x00\x00\x00\x00\x00\x00\x00' -tRp2016 -g11 -(g12 -S'J\x00\x00\x00\x00\x00\x00\x00' -tRp2017 -tp2018 -a(g11 -(g12 -S'@\x00\x00\x00\x00\x00\x00\x00' -tRp2019 -g11 -(g12 -S'C\x00\x00\x00\x00\x00\x00\x00' -tRp2020 -g11 -(g12 -S'H\x00\x00\x00\x00\x00\x00\x00' -tRp2021 -g11 -(g12 -S'O\x00\x00\x00\x00\x00\x00\x00' -tRp2022 -tp2023 -a(g11 -(g12 -S'<\x00\x00\x00\x00\x00\x00\x00' -tRp2024 -g11 -(g12 -S'C\x00\x00\x00\x00\x00\x00\x00' -tRp2025 -g11 -(g12 -S'H\x00\x00\x00\x00\x00\x00\x00' -tRp2026 -g11 -(g12 -S'L\x00\x00\x00\x00\x00\x00\x00' -tRp2027 -tp2028 -a(g11 -(g12 -S'@\x00\x00\x00\x00\x00\x00\x00' -tRp2029 -g11 -(g12 -S'C\x00\x00\x00\x00\x00\x00\x00' -tRp2030 -g11 -(g12 -S'H\x00\x00\x00\x00\x00\x00\x00' -tRp2031 -tp2032 -a(g11 -(g12 -S'A\x00\x00\x00\x00\x00\x00\x00' -tRp2033 -g11 -(g12 -S'H\x00\x00\x00\x00\x00\x00\x00' -tRp2034 -g11 -(g12 -S'Q\x00\x00\x00\x00\x00\x00\x00' -tRp2035 -tp2036 -a(g11 -(g12 -S'>\x00\x00\x00\x00\x00\x00\x00' -tRp2037 -g11 -(g12 -S'A\x00\x00\x00\x00\x00\x00\x00' -tRp2038 -g11 -(g12 -S'J\x00\x00\x00\x00\x00\x00\x00' -tRp2039 -g11 -(g12 -S'S\x00\x00\x00\x00\x00\x00\x00' -tRp2040 -tp2041 -a(g11 -(g12 -S'<\x00\x00\x00\x00\x00\x00\x00' -tRp2042 -g11 -(g12 -S'C\x00\x00\x00\x00\x00\x00\x00' -tRp2043 -g11 -(g12 -S'L\x00\x00\x00\x00\x00\x00\x00' -tRp2044 -g11 -(g12 -S'T\x00\x00\x00\x00\x00\x00\x00' -tRp2045 -tp2046 -a(g11 -(g12 -S'<\x00\x00\x00\x00\x00\x00\x00' -tRp2047 -g11 -(g12 -S'C\x00\x00\x00\x00\x00\x00\x00' -tRp2048 -g11 -(g12 -S'H\x00\x00\x00\x00\x00\x00\x00' -tRp2049 -g11 -(g12 -S'L\x00\x00\x00\x00\x00\x00\x00' -tRp2050 -tp2051 -a(g11 -(g12 -S'5\x00\x00\x00\x00\x00\x00\x00' -tRp2052 -g11 -(g12 -S'E\x00\x00\x00\x00\x00\x00\x00' -tRp2053 -g11 -(g12 -S'H\x00\x00\x00\x00\x00\x00\x00' -tRp2054 -g11 -(g12 -S'L\x00\x00\x00\x00\x00\x00\x00' -tRp2055 -tp2056 -a(g11 -(g12 -S'7\x00\x00\x00\x00\x00\x00\x00' -tRp2057 -g11 -(g12 -S'A\x00\x00\x00\x00\x00\x00\x00' -tRp2058 -g11 -(g12 -S'G\x00\x00\x00\x00\x00\x00\x00' -tRp2059 -g11 -(g12 -S'J\x00\x00\x00\x00\x00\x00\x00' -tRp2060 -tp2061 -a(g11 -(g12 -S'0\x00\x00\x00\x00\x00\x00\x00' -tRp2062 -g11 -(g12 -S'@\x00\x00\x00\x00\x00\x00\x00' -tRp2063 -g11 -(g12 -S'C\x00\x00\x00\x00\x00\x00\x00' -tRp2064 -g11 -(g12 -S'H\x00\x00\x00\x00\x00\x00\x00' -tRp2065 -tp2066 -a(g11 -(g12 -S'0\x00\x00\x00\x00\x00\x00\x00' -tRp2067 -g11 -(g12 -S'@\x00\x00\x00\x00\x00\x00\x00' -tRp2068 -g11 -(g12 -S'C\x00\x00\x00\x00\x00\x00\x00' -tRp2069 -g11 -(g12 -S'H\x00\x00\x00\x00\x00\x00\x00' -tRp2070 -tp2071 -a(g11 -(g12 -S'0\x00\x00\x00\x00\x00\x00\x00' -tRp2072 -g11 -(g12 -S'@\x00\x00\x00\x00\x00\x00\x00' -tRp2073 -g11 -(g12 -S'C\x00\x00\x00\x00\x00\x00\x00' -tRp2074 -g11 -(g12 -S'H\x00\x00\x00\x00\x00\x00\x00' -tRp2075 -tp2076 -aa(lp2077 -(g11 -(g12 -S'H\x00\x00\x00\x00\x00\x00\x00' -tRp2078 -g11 -(g12 -S'L\x00\x00\x00\x00\x00\x00\x00' -tRp2079 -g11 -(g12 -S'O\x00\x00\x00\x00\x00\x00\x00' -tRp2080 -g11 -(g12 -S'T\x00\x00\x00\x00\x00\x00\x00' -tRp2081 -tp2082 -a(g11 -(g12 -S'E\x00\x00\x00\x00\x00\x00\x00' -tRp2083 -g11 -(g12 -S'L\x00\x00\x00\x00\x00\x00\x00' -tRp2084 -g11 -(g12 -S'Q\x00\x00\x00\x00\x00\x00\x00' -tRp2085 -g11 -(g12 -S'T\x00\x00\x00\x00\x00\x00\x00' -tRp2086 -tp2087 -a(g11 -(g12 -S'@\x00\x00\x00\x00\x00\x00\x00' -tRp2088 -g11 -(g12 -S'L\x00\x00\x00\x00\x00\x00\x00' -tRp2089 -g11 -(g12 -S'O\x00\x00\x00\x00\x00\x00\x00' -tRp2090 -g11 -(g12 -S'S\x00\x00\x00\x00\x00\x00\x00' -tRp2091 -tp2092 -a(g11 -(g12 -S'A\x00\x00\x00\x00\x00\x00\x00' -tRp2093 -g11 -(g12 -S'H\x00\x00\x00\x00\x00\x00\x00' -tRp2094 -g11 -(g12 -S'M\x00\x00\x00\x00\x00\x00\x00' -tRp2095 -g11 -(g12 -S'Q\x00\x00\x00\x00\x00\x00\x00' -tRp2096 -tp2097 -a(g11 -(g12 -S'E\x00\x00\x00\x00\x00\x00\x00' -tRp2098 -g11 -(g12 -S'H\x00\x00\x00\x00\x00\x00\x00' -tRp2099 -g11 -(g12 -S'L\x00\x00\x00\x00\x00\x00\x00' -tRp2100 -g11 -(g12 -S'T\x00\x00\x00\x00\x00\x00\x00' -tRp2101 -tp2102 -a(g11 -(g12 -S'C\x00\x00\x00\x00\x00\x00\x00' -tRp2103 -g11 -(g12 -S'H\x00\x00\x00\x00\x00\x00\x00' -tRp2104 -g11 -(g12 -S'O\x00\x00\x00\x00\x00\x00\x00' -tRp2105 -g11 -(g12 -S'X\x00\x00\x00\x00\x00\x00\x00' -tRp2106 -tp2107 -a(g11 -(g12 -S'C\x00\x00\x00\x00\x00\x00\x00' -tRp2108 -g11 -(g12 -S'G\x00\x00\x00\x00\x00\x00\x00' -tRp2109 -g11 -(g12 -S'O\x00\x00\x00\x00\x00\x00\x00' -tRp2110 -g11 -(g12 -S'V\x00\x00\x00\x00\x00\x00\x00' -tRp2111 -tp2112 -a(g11 -(g12 -S'<\x00\x00\x00\x00\x00\x00\x00' -tRp2113 -g11 -(g12 -S'H\x00\x00\x00\x00\x00\x00\x00' -tRp2114 -g11 -(g12 -S'L\x00\x00\x00\x00\x00\x00\x00' -tRp2115 -g11 -(g12 -S'T\x00\x00\x00\x00\x00\x00\x00' -tRp2116 -tp2117 -a(g11 -(g12 -S'H\x00\x00\x00\x00\x00\x00\x00' -tRp2118 -g11 -(g12 -S'L\x00\x00\x00\x00\x00\x00\x00' -tRp2119 -g11 -(g12 -S'O\x00\x00\x00\x00\x00\x00\x00' -tRp2120 -g11 -(g12 -S'T\x00\x00\x00\x00\x00\x00\x00' -tRp2121 -tp2122 -a(g11 -(g12 -S'E\x00\x00\x00\x00\x00\x00\x00' -tRp2123 -g11 -(g12 -S'L\x00\x00\x00\x00\x00\x00\x00' -tRp2124 -g11 -(g12 -S'Q\x00\x00\x00\x00\x00\x00\x00' -tRp2125 -g11 -(g12 -S'T\x00\x00\x00\x00\x00\x00\x00' -tRp2126 -tp2127 -a(g11 -(g12 -S'B\x00\x00\x00\x00\x00\x00\x00' -tRp2128 -g11 -(g12 -S'J\x00\x00\x00\x00\x00\x00\x00' -tRp2129 -g11 -(g12 -S'Q\x00\x00\x00\x00\x00\x00\x00' -tRp2130 -g11 -(g12 -S'V\x00\x00\x00\x00\x00\x00\x00' -tRp2131 -tp2132 -a(g11 -(g12 -S'?\x00\x00\x00\x00\x00\x00\x00' -tRp2133 -g11 -(g12 -S'N\x00\x00\x00\x00\x00\x00\x00' -tRp2134 -g11 -(g12 -S'S\x00\x00\x00\x00\x00\x00\x00' -tRp2135 -tp2136 -a(g11 -(g12 -S'@\x00\x00\x00\x00\x00\x00\x00' -tRp2137 -g11 -(g12 -S'G\x00\x00\x00\x00\x00\x00\x00' -tRp2138 -g11 -(g12 -S'O\x00\x00\x00\x00\x00\x00\x00' -tRp2139 -tp2140 -a(g11 -(g12 -S'E\x00\x00\x00\x00\x00\x00\x00' -tRp2141 -g11 -(g12 -S'H\x00\x00\x00\x00\x00\x00\x00' -tRp2142 -g11 -(g12 -S'L\x00\x00\x00\x00\x00\x00\x00' -tRp2143 -g11 -(g12 -S'T\x00\x00\x00\x00\x00\x00\x00' -tRp2144 -tp2145 -a(g11 -(g12 -S'E\x00\x00\x00\x00\x00\x00\x00' -tRp2146 -g11 -(g12 -S'J\x00\x00\x00\x00\x00\x00\x00' -tRp2147 -g11 -(g12 -S'N\x00\x00\x00\x00\x00\x00\x00' -tRp2148 -g11 -(g12 -S'T\x00\x00\x00\x00\x00\x00\x00' -tRp2149 -tp2150 -a(g11 -(g12 -S'C\x00\x00\x00\x00\x00\x00\x00' -tRp2151 -g11 -(g12 -S'J\x00\x00\x00\x00\x00\x00\x00' -tRp2152 -g11 -(g12 -S'O\x00\x00\x00\x00\x00\x00\x00' -tRp2153 -g11 -(g12 -S'S\x00\x00\x00\x00\x00\x00\x00' -tRp2154 -tp2155 -a(g11 -(g12 -S'@\x00\x00\x00\x00\x00\x00\x00' -tRp2156 -g11 -(g12 -S'L\x00\x00\x00\x00\x00\x00\x00' -tRp2157 -g11 -(g12 -S'P\x00\x00\x00\x00\x00\x00\x00' -tRp2158 -g11 -(g12 -S'S\x00\x00\x00\x00\x00\x00\x00' -tRp2159 -tp2160 -a(g11 -(g12 -S'E\x00\x00\x00\x00\x00\x00\x00' -tRp2161 -g11 -(g12 -S'L\x00\x00\x00\x00\x00\x00\x00' -tRp2162 -g11 -(g12 -S'Q\x00\x00\x00\x00\x00\x00\x00' -tRp2163 -g11 -(g12 -S'T\x00\x00\x00\x00\x00\x00\x00' -tRp2164 -tp2165 -a(g11 -(g12 -S'A\x00\x00\x00\x00\x00\x00\x00' -tRp2166 -g11 -(g12 -S'M\x00\x00\x00\x00\x00\x00\x00' -tRp2167 -g11 -(g12 -S'Q\x00\x00\x00\x00\x00\x00\x00' -tRp2168 -g11 -(g12 -S'T\x00\x00\x00\x00\x00\x00\x00' -tRp2169 -tp2170 -a(g11 -(g12 -S'>\x00\x00\x00\x00\x00\x00\x00' -tRp2171 -g11 -(g12 -S'M\x00\x00\x00\x00\x00\x00\x00' -tRp2172 -g11 -(g12 -S'Q\x00\x00\x00\x00\x00\x00\x00' -tRp2173 -g11 -(g12 -S'V\x00\x00\x00\x00\x00\x00\x00' -tRp2174 -tp2175 -a(g11 -(g12 -S'<\x00\x00\x00\x00\x00\x00\x00' -tRp2176 -g11 -(g12 -S'O\x00\x00\x00\x00\x00\x00\x00' -tRp2177 -g11 -(g12 -S'T\x00\x00\x00\x00\x00\x00\x00' -tRp2178 -g11 -(g12 -S'X\x00\x00\x00\x00\x00\x00\x00' -tRp2179 -tp2180 -a(g11 -(g12 -S'>\x00\x00\x00\x00\x00\x00\x00' -tRp2181 -g11 -(g12 -S'N\x00\x00\x00\x00\x00\x00\x00' -tRp2182 -g11 -(g12 -S'T\x00\x00\x00\x00\x00\x00\x00' -tRp2183 -g11 -(g12 -S'V\x00\x00\x00\x00\x00\x00\x00' -tRp2184 -tp2185 -a(g11 -(g12 -S'>\x00\x00\x00\x00\x00\x00\x00' -tRp2186 -g11 -(g12 -S'N\x00\x00\x00\x00\x00\x00\x00' -tRp2187 -g11 -(g12 -S'Q\x00\x00\x00\x00\x00\x00\x00' -tRp2188 -g11 -(g12 -S'V\x00\x00\x00\x00\x00\x00\x00' -tRp2189 -tp2190 -a(g11 -(g12 -S'C\x00\x00\x00\x00\x00\x00\x00' -tRp2191 -g11 -(g12 -S'O\x00\x00\x00\x00\x00\x00\x00' -tRp2192 -g11 -(g12 -S'S\x00\x00\x00\x00\x00\x00\x00' -tRp2193 -g11 -(g12 -S'V\x00\x00\x00\x00\x00\x00\x00' -tRp2194 -tp2195 -a(g11 -(g12 -S'D\x00\x00\x00\x00\x00\x00\x00' -tRp2196 -g11 -(g12 -S'L\x00\x00\x00\x00\x00\x00\x00' -tRp2197 -g11 -(g12 -S'S\x00\x00\x00\x00\x00\x00\x00' -tRp2198 -g11 -(g12 -S'X\x00\x00\x00\x00\x00\x00\x00' -tRp2199 -tp2200 -a(g11 -(g12 -S'E\x00\x00\x00\x00\x00\x00\x00' -tRp2201 -g11 -(g12 -S'H\x00\x00\x00\x00\x00\x00\x00' -tRp2202 -g11 -(g12 -S'Q\x00\x00\x00\x00\x00\x00\x00' -tRp2203 -g11 -(g12 -S'Y\x00\x00\x00\x00\x00\x00\x00' -tRp2204 -tp2205 -a(g11 -(g12 -S'H\x00\x00\x00\x00\x00\x00\x00' -tRp2206 -g11 -(g12 -S'L\x00\x00\x00\x00\x00\x00\x00' -tRp2207 -g11 -(g12 -S'O\x00\x00\x00\x00\x00\x00\x00' -tRp2208 -g11 -(g12 -S'X\x00\x00\x00\x00\x00\x00\x00' -tRp2209 -tp2210 -a(g11 -(g12 -S'C\x00\x00\x00\x00\x00\x00\x00' -tRp2211 -g11 -(g12 -S'O\x00\x00\x00\x00\x00\x00\x00' -tRp2212 -g11 -(g12 -S'S\x00\x00\x00\x00\x00\x00\x00' -tRp2213 -g11 -(g12 -S'V\x00\x00\x00\x00\x00\x00\x00' -tRp2214 -tp2215 -a(g11 -(g12 -S'@\x00\x00\x00\x00\x00\x00\x00' -tRp2216 -g11 -(g12 -S'O\x00\x00\x00\x00\x00\x00\x00' -tRp2217 -g11 -(g12 -S'T\x00\x00\x00\x00\x00\x00\x00' -tRp2218 -g11 -(g12 -S'X\x00\x00\x00\x00\x00\x00\x00' -tRp2219 -tp2220 -a(g11 -(g12 -S'C\x00\x00\x00\x00\x00\x00\x00' -tRp2221 -g11 -(g12 -S'O\x00\x00\x00\x00\x00\x00\x00' -tRp2222 -g11 -(g12 -S'T\x00\x00\x00\x00\x00\x00\x00' -tRp2223 -g11 -(g12 -S'V\x00\x00\x00\x00\x00\x00\x00' -tRp2224 -tp2225 -a(g11 -(g12 -S'C\x00\x00\x00\x00\x00\x00\x00' -tRp2226 -g11 -(g12 -S'O\x00\x00\x00\x00\x00\x00\x00' -tRp2227 -g11 -(g12 -S'S\x00\x00\x00\x00\x00\x00\x00' -tRp2228 -g11 -(g12 -S'V\x00\x00\x00\x00\x00\x00\x00' -tRp2229 -tp2230 -a(g11 -(g12 -S'<\x00\x00\x00\x00\x00\x00\x00' -tRp2231 -g11 -(g12 -S'L\x00\x00\x00\x00\x00\x00\x00' -tRp2232 -g11 -(g12 -S'O\x00\x00\x00\x00\x00\x00\x00' -tRp2233 -g11 -(g12 -S'T\x00\x00\x00\x00\x00\x00\x00' -tRp2234 -tp2235 -a(g11 -(g12 -S'<\x00\x00\x00\x00\x00\x00\x00' -tRp2236 -g11 -(g12 -S'L\x00\x00\x00\x00\x00\x00\x00' -tRp2237 -g11 -(g12 -S'O\x00\x00\x00\x00\x00\x00\x00' -tRp2238 -g11 -(g12 -S'T\x00\x00\x00\x00\x00\x00\x00' -tRp2239 -tp2240 -aa(lp2241 -(g11 -(g12 -S'<\x00\x00\x00\x00\x00\x00\x00' -tRp2242 -g11 -(g12 -S'@\x00\x00\x00\x00\x00\x00\x00' -tRp2243 -g11 -(g12 -S'C\x00\x00\x00\x00\x00\x00\x00' -tRp2244 -g11 -(g12 -S'H\x00\x00\x00\x00\x00\x00\x00' -tRp2245 -tp2246 -a(g11 -(g12 -S';\x00\x00\x00\x00\x00\x00\x00' -tRp2247 -g11 -(g12 -S'>\x00\x00\x00\x00\x00\x00\x00' -tRp2248 -g11 -(g12 -S'C\x00\x00\x00\x00\x00\x00\x00' -tRp2249 -g11 -(g12 -S'O\x00\x00\x00\x00\x00\x00\x00' -tRp2250 -tp2251 -a(g11 -(g12 -S'<\x00\x00\x00\x00\x00\x00\x00' -tRp2252 -g11 -(g12 -S'C\x00\x00\x00\x00\x00\x00\x00' -tRp2253 -g11 -(g12 -S'L\x00\x00\x00\x00\x00\x00\x00' -tRp2254 -tp2255 -a(g11 -(g12 -S'9\x00\x00\x00\x00\x00\x00\x00' -tRp2256 -g11 -(g12 -S'@\x00\x00\x00\x00\x00\x00\x00' -tRp2257 -g11 -(g12 -S'E\x00\x00\x00\x00\x00\x00\x00' -tRp2258 -g11 -(g12 -S'H\x00\x00\x00\x00\x00\x00\x00' -tRp2259 -tp2260 -a(g11 -(g12 -S'@\x00\x00\x00\x00\x00\x00\x00' -tRp2261 -g11 -(g12 -S'C\x00\x00\x00\x00\x00\x00\x00' -tRp2262 -g11 -(g12 -S'G\x00\x00\x00\x00\x00\x00\x00' -tRp2263 -g11 -(g12 -S'O\x00\x00\x00\x00\x00\x00\x00' -tRp2264 -tp2265 -a(g11 -(g12 -S'<\x00\x00\x00\x00\x00\x00\x00' -tRp2266 -g11 -(g12 -S'C\x00\x00\x00\x00\x00\x00\x00' -tRp2267 -g11 -(g12 -S'L\x00\x00\x00\x00\x00\x00\x00' -tRp2268 -g11 -(g12 -S'Q\x00\x00\x00\x00\x00\x00\x00' -tRp2269 -tp2270 -a(g11 -(g12 -S'>\x00\x00\x00\x00\x00\x00\x00' -tRp2271 -g11 -(g12 -S'B\x00\x00\x00\x00\x00\x00\x00' -tRp2272 -g11 -(g12 -S'J\x00\x00\x00\x00\x00\x00\x00' -tRp2273 -g11 -(g12 -S'Q\x00\x00\x00\x00\x00\x00\x00' -tRp2274 -tp2275 -a(g11 -(g12 -S'7\x00\x00\x00\x00\x00\x00\x00' -tRp2276 -g11 -(g12 -S'G\x00\x00\x00\x00\x00\x00\x00' -tRp2277 -g11 -(g12 -S'J\x00\x00\x00\x00\x00\x00\x00' -tRp2278 -g11 -(g12 -S'O\x00\x00\x00\x00\x00\x00\x00' -tRp2279 -tp2280 -a(g11 -(g12 -S'<\x00\x00\x00\x00\x00\x00\x00' -tRp2281 -g11 -(g12 -S'H\x00\x00\x00\x00\x00\x00\x00' -tRp2282 -g11 -(g12 -S'L\x00\x00\x00\x00\x00\x00\x00' -tRp2283 -g11 -(g12 -S'O\x00\x00\x00\x00\x00\x00\x00' -tRp2284 -tp2285 -a(g11 -(g12 -S'A\x00\x00\x00\x00\x00\x00\x00' -tRp2286 -g11 -(g12 -S'H\x00\x00\x00\x00\x00\x00\x00' -tRp2287 -g11 -(g12 -S'M\x00\x00\x00\x00\x00\x00\x00' -tRp2288 -g11 -(g12 -S'Q\x00\x00\x00\x00\x00\x00\x00' -tRp2289 -tp2290 -a(g11 -(g12 -S'@\x00\x00\x00\x00\x00\x00\x00' -tRp2291 -g11 -(g12 -S'J\x00\x00\x00\x00\x00\x00\x00' -tRp2292 -g11 -(g12 -S'M\x00\x00\x00\x00\x00\x00\x00' -tRp2293 -g11 -(g12 -S'S\x00\x00\x00\x00\x00\x00\x00' -tRp2294 -tp2295 -a(g11 -(g12 -S'@\x00\x00\x00\x00\x00\x00\x00' -tRp2296 -g11 -(g12 -S'C\x00\x00\x00\x00\x00\x00\x00' -tRp2297 -g11 -(g12 -S'L\x00\x00\x00\x00\x00\x00\x00' -tRp2298 -g11 -(g12 -S'T\x00\x00\x00\x00\x00\x00\x00' -tRp2299 -tp2300 -a(g11 -(g12 -S'C\x00\x00\x00\x00\x00\x00\x00' -tRp2301 -g11 -(g12 -S'J\x00\x00\x00\x00\x00\x00\x00' -tRp2302 -g11 -(g12 -S'S\x00\x00\x00\x00\x00\x00\x00' -tRp2303 -tp2304 -a(g11 -(g12 -S'>\x00\x00\x00\x00\x00\x00\x00' -tRp2305 -g11 -(g12 -S'C\x00\x00\x00\x00\x00\x00\x00' -tRp2306 -g11 -(g12 -S'J\x00\x00\x00\x00\x00\x00\x00' -tRp2307 -g11 -(g12 -S'Q\x00\x00\x00\x00\x00\x00\x00' -tRp2308 -tp2309 -a(g11 -(g12 -S'2\x00\x00\x00\x00\x00\x00\x00' -tRp2310 -g11 -(g12 -S'B\x00\x00\x00\x00\x00\x00\x00' -tRp2311 -g11 -(g12 -S'J\x00\x00\x00\x00\x00\x00\x00' -tRp2312 -g11 -(g12 -S'Q\x00\x00\x00\x00\x00\x00\x00' -tRp2313 -tp2314 -a(g11 -(g12 -S'7\x00\x00\x00\x00\x00\x00\x00' -tRp2315 -g11 -(g12 -S'C\x00\x00\x00\x00\x00\x00\x00' -tRp2316 -g11 -(g12 -S'G\x00\x00\x00\x00\x00\x00\x00' -tRp2317 -g11 -(g12 -S'O\x00\x00\x00\x00\x00\x00\x00' -tRp2318 -tp2319 -a(g11 -(g12 -S'<\x00\x00\x00\x00\x00\x00\x00' -tRp2320 -g11 -(g12 -S'C\x00\x00\x00\x00\x00\x00\x00' -tRp2321 -g11 -(g12 -S'H\x00\x00\x00\x00\x00\x00\x00' -tRp2322 -g11 -(g12 -S'L\x00\x00\x00\x00\x00\x00\x00' -tRp2323 -tp2324 -a(g11 -(g12 -S'5\x00\x00\x00\x00\x00\x00\x00' -tRp2325 -g11 -(g12 -S'A\x00\x00\x00\x00\x00\x00\x00' -tRp2326 -g11 -(g12 -S'H\x00\x00\x00\x00\x00\x00\x00' -tRp2327 -g11 -(g12 -S'Q\x00\x00\x00\x00\x00\x00\x00' -tRp2328 -tp2329 -a(g11 -(g12 -S'7\x00\x00\x00\x00\x00\x00\x00' -tRp2330 -g11 -(g12 -S'>\x00\x00\x00\x00\x00\x00\x00' -tRp2331 -g11 -(g12 -S'G\x00\x00\x00\x00\x00\x00\x00' -tRp2332 -g11 -(g12 -S'O\x00\x00\x00\x00\x00\x00\x00' -tRp2333 -tp2334 -a(g11 -(g12 -S'9\x00\x00\x00\x00\x00\x00\x00' -tRp2335 -g11 -(g12 -S'E\x00\x00\x00\x00\x00\x00\x00' -tRp2336 -g11 -(g12 -S'H\x00\x00\x00\x00\x00\x00\x00' -tRp2337 -g11 -(g12 -S'M\x00\x00\x00\x00\x00\x00\x00' -tRp2338 -tp2339 -a(g11 -(g12 -S'<\x00\x00\x00\x00\x00\x00\x00' -tRp2340 -g11 -(g12 -S'C\x00\x00\x00\x00\x00\x00\x00' -tRp2341 -g11 -(g12 -S'L\x00\x00\x00\x00\x00\x00\x00' -tRp2342 -tp2343 -a(g11 -(g12 -S'7\x00\x00\x00\x00\x00\x00\x00' -tRp2344 -g11 -(g12 -S'C\x00\x00\x00\x00\x00\x00\x00' -tRp2345 -g11 -(g12 -S'H\x00\x00\x00\x00\x00\x00\x00' -tRp2346 -g11 -(g12 -S'J\x00\x00\x00\x00\x00\x00\x00' -tRp2347 -tp2348 -a(g11 -(g12 -S'7\x00\x00\x00\x00\x00\x00\x00' -tRp2349 -g11 -(g12 -S'C\x00\x00\x00\x00\x00\x00\x00' -tRp2350 -g11 -(g12 -S'G\x00\x00\x00\x00\x00\x00\x00' -tRp2351 -g11 -(g12 -S'J\x00\x00\x00\x00\x00\x00\x00' -tRp2352 -tp2353 -a(g11 -(g12 -S'<\x00\x00\x00\x00\x00\x00\x00' -tRp2354 -g11 -(g12 -S'@\x00\x00\x00\x00\x00\x00\x00' -tRp2355 -g11 -(g12 -S'C\x00\x00\x00\x00\x00\x00\x00' -tRp2356 -g11 -(g12 -S'H\x00\x00\x00\x00\x00\x00\x00' -tRp2357 -tp2358 -a(g11 -(g12 -S'<\x00\x00\x00\x00\x00\x00\x00' -tRp2359 -g11 -(g12 -S'@\x00\x00\x00\x00\x00\x00\x00' -tRp2360 -g11 -(g12 -S'C\x00\x00\x00\x00\x00\x00\x00' -tRp2361 -g11 -(g12 -S'H\x00\x00\x00\x00\x00\x00\x00' -tRp2362 -tp2363 -a(g11 -(g12 -S';\x00\x00\x00\x00\x00\x00\x00' -tRp2364 -g11 -(g12 -S'C\x00\x00\x00\x00\x00\x00\x00' -tRp2365 -g11 -(g12 -S'J\x00\x00\x00\x00\x00\x00\x00' -tRp2366 -g11 -(g12 -S'O\x00\x00\x00\x00\x00\x00\x00' -tRp2367 -tp2368 -a(g11 -(g12 -S';\x00\x00\x00\x00\x00\x00\x00' -tRp2369 -g11 -(g12 -S'C\x00\x00\x00\x00\x00\x00\x00' -tRp2370 -g11 -(g12 -S'J\x00\x00\x00\x00\x00\x00\x00' -tRp2371 -g11 -(g12 -S'O\x00\x00\x00\x00\x00\x00\x00' -tRp2372 -tp2373 -a(g11 -(g12 -S'<\x00\x00\x00\x00\x00\x00\x00' -tRp2374 -g11 -(g12 -S'C\x00\x00\x00\x00\x00\x00\x00' -tRp2375 -g11 -(g12 -S'H\x00\x00\x00\x00\x00\x00\x00' -tRp2376 -g11 -(g12 -S'L\x00\x00\x00\x00\x00\x00\x00' -tRp2377 -tp2378 -a(g11 -(g12 -S'<\x00\x00\x00\x00\x00\x00\x00' -tRp2379 -g11 -(g12 -S'C\x00\x00\x00\x00\x00\x00\x00' -tRp2380 -g11 -(g12 -S'H\x00\x00\x00\x00\x00\x00\x00' -tRp2381 -g11 -(g12 -S'L\x00\x00\x00\x00\x00\x00\x00' -tRp2382 -tp2383 -a(g11 -(g12 -S'7\x00\x00\x00\x00\x00\x00\x00' -tRp2384 -g11 -(g12 -S'G\x00\x00\x00\x00\x00\x00\x00' -tRp2385 -g11 -(g12 -S'J\x00\x00\x00\x00\x00\x00\x00' -tRp2386 -g11 -(g12 -S'O\x00\x00\x00\x00\x00\x00\x00' -tRp2387 -tp2388 -a(g11 -(g12 -S'7\x00\x00\x00\x00\x00\x00\x00' -tRp2389 -g11 -(g12 -S'G\x00\x00\x00\x00\x00\x00\x00' -tRp2390 -g11 -(g12 -S'J\x00\x00\x00\x00\x00\x00\x00' -tRp2391 -g11 -(g12 -S'O\x00\x00\x00\x00\x00\x00\x00' -tRp2392 -tp2393 -a(g11 -(g12 -S'9\x00\x00\x00\x00\x00\x00\x00' -tRp2394 -g11 -(g12 -S'E\x00\x00\x00\x00\x00\x00\x00' -tRp2395 -g11 -(g12 -S'H\x00\x00\x00\x00\x00\x00\x00' -tRp2396 -g11 -(g12 -S'L\x00\x00\x00\x00\x00\x00\x00' -tRp2397 -tp2398 -a(g11 -(g12 -S'9\x00\x00\x00\x00\x00\x00\x00' -tRp2399 -g11 -(g12 -S'E\x00\x00\x00\x00\x00\x00\x00' -tRp2400 -g11 -(g12 -S'H\x00\x00\x00\x00\x00\x00\x00' -tRp2401 -g11 -(g12 -S'L\x00\x00\x00\x00\x00\x00\x00' -tRp2402 -tp2403 -a(g11 -(g12 -S'>\x00\x00\x00\x00\x00\x00\x00' -tRp2404 -g11 -(g12 -S'E\x00\x00\x00\x00\x00\x00\x00' -tRp2405 -g11 -(g12 -S'J\x00\x00\x00\x00\x00\x00\x00' -tRp2406 -g11 -(g12 -S'M\x00\x00\x00\x00\x00\x00\x00' -tRp2407 -tp2408 -a(g11 -(g12 -S'@\x00\x00\x00\x00\x00\x00\x00' -tRp2409 -g11 -(g12 -S'C\x00\x00\x00\x00\x00\x00\x00' -tRp2410 -g11 -(g12 -S'H\x00\x00\x00\x00\x00\x00\x00' -tRp2411 -g11 -(g12 -S'L\x00\x00\x00\x00\x00\x00\x00' -tRp2412 -tp2413 -a(g11 -(g12 -S'A\x00\x00\x00\x00\x00\x00\x00' -tRp2414 -g11 -(g12 -S'C\x00\x00\x00\x00\x00\x00\x00' -tRp2415 -g11 -(g12 -S'G\x00\x00\x00\x00\x00\x00\x00' -tRp2416 -g11 -(g12 -S'J\x00\x00\x00\x00\x00\x00\x00' -tRp2417 -tp2418 -a(g11 -(g12 -S'>\x00\x00\x00\x00\x00\x00\x00' -tRp2419 -g11 -(g12 -S'C\x00\x00\x00\x00\x00\x00\x00' -tRp2420 -g11 -(g12 -S'H\x00\x00\x00\x00\x00\x00\x00' -tRp2421 -g11 -(g12 -S'L\x00\x00\x00\x00\x00\x00\x00' -tRp2422 -tp2423 -a(g11 -(g12 -S';\x00\x00\x00\x00\x00\x00\x00' -tRp2424 -g11 -(g12 -S'C\x00\x00\x00\x00\x00\x00\x00' -tRp2425 -g11 -(g12 -S'J\x00\x00\x00\x00\x00\x00\x00' -tRp2426 -g11 -(g12 -S'M\x00\x00\x00\x00\x00\x00\x00' -tRp2427 -tp2428 -a(g11 -(g12 -S'<\x00\x00\x00\x00\x00\x00\x00' -tRp2429 -g11 -(g12 -S'C\x00\x00\x00\x00\x00\x00\x00' -tRp2430 -g11 -(g12 -S'H\x00\x00\x00\x00\x00\x00\x00' -tRp2431 -g11 -(g12 -S'L\x00\x00\x00\x00\x00\x00\x00' -tRp2432 -tp2433 -a(g11 -(g12 -S'7\x00\x00\x00\x00\x00\x00\x00' -tRp2434 -g11 -(g12 -S'C\x00\x00\x00\x00\x00\x00\x00' -tRp2435 -g11 -(g12 -S'G\x00\x00\x00\x00\x00\x00\x00' -tRp2436 -g11 -(g12 -S'J\x00\x00\x00\x00\x00\x00\x00' -tRp2437 -tp2438 -a(g11 -(g12 -S'A\x00\x00\x00\x00\x00\x00\x00' -tRp2439 -g11 -(g12 -S'C\x00\x00\x00\x00\x00\x00\x00' -tRp2440 -g11 -(g12 -S'H\x00\x00\x00\x00\x00\x00\x00' -tRp2441 -g11 -(g12 -S'L\x00\x00\x00\x00\x00\x00\x00' -tRp2442 -tp2443 -a(g11 -(g12 -S'>\x00\x00\x00\x00\x00\x00\x00' -tRp2444 -g11 -(g12 -S'E\x00\x00\x00\x00\x00\x00\x00' -tRp2445 -g11 -(g12 -S'J\x00\x00\x00\x00\x00\x00\x00' -tRp2446 -g11 -(g12 -S'M\x00\x00\x00\x00\x00\x00\x00' -tRp2447 -tp2448 -a(g11 -(g12 -S'9\x00\x00\x00\x00\x00\x00\x00' -tRp2449 -g11 -(g12 -S'H\x00\x00\x00\x00\x00\x00\x00' -tRp2450 -g11 -(g12 -S'L\x00\x00\x00\x00\x00\x00\x00' -tRp2451 -tp2452 -a(g11 -(g12 -S'5\x00\x00\x00\x00\x00\x00\x00' -tRp2453 -g11 -(g12 -S'E\x00\x00\x00\x00\x00\x00\x00' -tRp2454 -g11 -(g12 -S'H\x00\x00\x00\x00\x00\x00\x00' -tRp2455 -g11 -(g12 -S'J\x00\x00\x00\x00\x00\x00\x00' -tRp2456 -tp2457 -a(g11 -(g12 -S'7\x00\x00\x00\x00\x00\x00\x00' -tRp2458 -g11 -(g12 -S'C\x00\x00\x00\x00\x00\x00\x00' -tRp2459 -g11 -(g12 -S'G\x00\x00\x00\x00\x00\x00\x00' -tRp2460 -g11 -(g12 -S'J\x00\x00\x00\x00\x00\x00\x00' -tRp2461 -tp2462 -a(g11 -(g12 -S'<\x00\x00\x00\x00\x00\x00\x00' -tRp2463 -g11 -(g12 -S'@\x00\x00\x00\x00\x00\x00\x00' -tRp2464 -g11 -(g12 -S'C\x00\x00\x00\x00\x00\x00\x00' -tRp2465 -g11 -(g12 -S'H\x00\x00\x00\x00\x00\x00\x00' -tRp2466 -tp2467 -a(g11 -(g12 -S'<\x00\x00\x00\x00\x00\x00\x00' -tRp2468 -g11 -(g12 -S'@\x00\x00\x00\x00\x00\x00\x00' -tRp2469 -g11 -(g12 -S'C\x00\x00\x00\x00\x00\x00\x00' -tRp2470 -g11 -(g12 -S'H\x00\x00\x00\x00\x00\x00\x00' -tRp2471 -tp2472 -a(g11 -(g12 -S'9\x00\x00\x00\x00\x00\x00\x00' -tRp2473 -g11 -(g12 -S'E\x00\x00\x00\x00\x00\x00\x00' -tRp2474 -g11 -(g12 -S'L\x00\x00\x00\x00\x00\x00\x00' -tRp2475 -g11 -(g12 -S'T\x00\x00\x00\x00\x00\x00\x00' -tRp2476 -tp2477 -a(g11 -(g12 -S';\x00\x00\x00\x00\x00\x00\x00' -tRp2478 -g11 -(g12 -S'B\x00\x00\x00\x00\x00\x00\x00' -tRp2479 -g11 -(g12 -S'K\x00\x00\x00\x00\x00\x00\x00' -tRp2480 -g11 -(g12 -S'S\x00\x00\x00\x00\x00\x00\x00' -tRp2481 -tp2482 -a(g11 -(g12 -S'=\x00\x00\x00\x00\x00\x00\x00' -tRp2483 -g11 -(g12 -S'@\x00\x00\x00\x00\x00\x00\x00' -tRp2484 -g11 -(g12 -S'L\x00\x00\x00\x00\x00\x00\x00' -tRp2485 -g11 -(g12 -S'Q\x00\x00\x00\x00\x00\x00\x00' -tRp2486 -tp2487 -a(g11 -(g12 -S'@\x00\x00\x00\x00\x00\x00\x00' -tRp2488 -g11 -(g12 -S'G\x00\x00\x00\x00\x00\x00\x00' -tRp2489 -g11 -(g12 -S'L\x00\x00\x00\x00\x00\x00\x00' -tRp2490 -g11 -(g12 -S'O\x00\x00\x00\x00\x00\x00\x00' -tRp2491 -tp2492 -a(g11 -(g12 -S'9\x00\x00\x00\x00\x00\x00\x00' -tRp2493 -g11 -(g12 -S'E\x00\x00\x00\x00\x00\x00\x00' -tRp2494 -g11 -(g12 -S'H\x00\x00\x00\x00\x00\x00\x00' -tRp2495 -g11 -(g12 -S'M\x00\x00\x00\x00\x00\x00\x00' -tRp2496 -tp2497 -a(g11 -(g12 -S'<\x00\x00\x00\x00\x00\x00\x00' -tRp2498 -g11 -(g12 -S'C\x00\x00\x00\x00\x00\x00\x00' -tRp2499 -g11 -(g12 -S'H\x00\x00\x00\x00\x00\x00\x00' -tRp2500 -g11 -(g12 -S'L\x00\x00\x00\x00\x00\x00\x00' -tRp2501 -tp2502 -a(g11 -(g12 -S'6\x00\x00\x00\x00\x00\x00\x00' -tRp2503 -g11 -(g12 -S'E\x00\x00\x00\x00\x00\x00\x00' -tRp2504 -g11 -(g12 -S'H\x00\x00\x00\x00\x00\x00\x00' -tRp2505 -g11 -(g12 -S'J\x00\x00\x00\x00\x00\x00\x00' -tRp2506 -tp2507 -a(g11 -(g12 -S'7\x00\x00\x00\x00\x00\x00\x00' -tRp2508 -g11 -(g12 -S'C\x00\x00\x00\x00\x00\x00\x00' -tRp2509 -g11 -(g12 -S'G\x00\x00\x00\x00\x00\x00\x00' -tRp2510 -g11 -(g12 -S'J\x00\x00\x00\x00\x00\x00\x00' -tRp2511 -tp2512 -a(g11 -(g12 -S'<\x00\x00\x00\x00\x00\x00\x00' -tRp2513 -g11 -(g12 -S'@\x00\x00\x00\x00\x00\x00\x00' -tRp2514 -g11 -(g12 -S'C\x00\x00\x00\x00\x00\x00\x00' -tRp2515 -g11 -(g12 -S'H\x00\x00\x00\x00\x00\x00\x00' -tRp2516 -tp2517 -a(g11 -(g12 -S'<\x00\x00\x00\x00\x00\x00\x00' -tRp2518 -g11 -(g12 -S'@\x00\x00\x00\x00\x00\x00\x00' -tRp2519 -g11 -(g12 -S'C\x00\x00\x00\x00\x00\x00\x00' -tRp2520 -g11 -(g12 -S'H\x00\x00\x00\x00\x00\x00\x00' -tRp2521 -tp2522 -aa(lp2523 -(g11 -(g12 -S'H\x00\x00\x00\x00\x00\x00\x00' -tRp2524 -g11 -(g12 -S'L\x00\x00\x00\x00\x00\x00\x00' -tRp2525 -g11 -(g12 -S'T\x00\x00\x00\x00\x00\x00\x00' -tRp2526 -g11 -(g12 -S'[\x00\x00\x00\x00\x00\x00\x00' -tRp2527 -tp2528 -a(g11 -(g12 -S'G\x00\x00\x00\x00\x00\x00\x00' -tRp2529 -g11 -(g12 -S'J\x00\x00\x00\x00\x00\x00\x00' -tRp2530 -g11 -(g12 -S'V\x00\x00\x00\x00\x00\x00\x00' -tRp2531 -g11 -(g12 -S'[\x00\x00\x00\x00\x00\x00\x00' -tRp2532 -tp2533 -a(g11 -(g12 -S'H\x00\x00\x00\x00\x00\x00\x00' -tRp2534 -g11 -(g12 -S'L\x00\x00\x00\x00\x00\x00\x00' -tRp2535 -g11 -(g12 -S'T\x00\x00\x00\x00\x00\x00\x00' -tRp2536 -g11 -(g12 -S'[\x00\x00\x00\x00\x00\x00\x00' -tRp2537 -tp2538 -a(g11 -(g12 -S'A\x00\x00\x00\x00\x00\x00\x00' -tRp2539 -g11 -(g12 -S'M\x00\x00\x00\x00\x00\x00\x00' -tRp2540 -g11 -(g12 -S'T\x00\x00\x00\x00\x00\x00\x00' -tRp2541 -g11 -(g12 -S']\x00\x00\x00\x00\x00\x00\x00' -tRp2542 -tp2543 -a(g11 -(g12 -S'E\x00\x00\x00\x00\x00\x00\x00' -tRp2544 -g11 -(g12 -S'M\x00\x00\x00\x00\x00\x00\x00' -tRp2545 -g11 -(g12 -S'T\x00\x00\x00\x00\x00\x00\x00' -tRp2546 -g11 -(g12 -S']\x00\x00\x00\x00\x00\x00\x00' -tRp2547 -tp2548 -a(g11 -(g12 -S'H\x00\x00\x00\x00\x00\x00\x00' -tRp2549 -g11 -(g12 -S'O\x00\x00\x00\x00\x00\x00\x00' -tRp2550 -g11 -(g12 -S'X\x00\x00\x00\x00\x00\x00\x00' -tRp2551 -g11 -(g12 -S'[\x00\x00\x00\x00\x00\x00\x00' -tRp2552 -tp2553 -a(g11 -(g12 -S'H\x00\x00\x00\x00\x00\x00\x00' -tRp2554 -g11 -(g12 -S'O\x00\x00\x00\x00\x00\x00\x00' -tRp2555 -g11 -(g12 -S'X\x00\x00\x00\x00\x00\x00\x00' -tRp2556 -g11 -(g12 -S'[\x00\x00\x00\x00\x00\x00\x00' -tRp2557 -tp2558 -a(g11 -(g12 -S'H\x00\x00\x00\x00\x00\x00\x00' -tRp2559 -g11 -(g12 -S'O\x00\x00\x00\x00\x00\x00\x00' -tRp2560 -g11 -(g12 -S'X\x00\x00\x00\x00\x00\x00\x00' -tRp2561 -g11 -(g12 -S'[\x00\x00\x00\x00\x00\x00\x00' -tRp2562 -tp2563 -a(g11 -(g12 -S'H\x00\x00\x00\x00\x00\x00\x00' -tRp2564 -g11 -(g12 -S'O\x00\x00\x00\x00\x00\x00\x00' -tRp2565 -g11 -(g12 -S'T\x00\x00\x00\x00\x00\x00\x00' -tRp2566 -g11 -(g12 -S'X\x00\x00\x00\x00\x00\x00\x00' -tRp2567 -tp2568 -a(g11 -(g12 -S'H\x00\x00\x00\x00\x00\x00\x00' -tRp2569 -g11 -(g12 -S'O\x00\x00\x00\x00\x00\x00\x00' -tRp2570 -g11 -(g12 -S'V\x00\x00\x00\x00\x00\x00\x00' -tRp2571 -g11 -(g12 -S'Y\x00\x00\x00\x00\x00\x00\x00' -tRp2572 -tp2573 -a(g11 -(g12 -S'H\x00\x00\x00\x00\x00\x00\x00' -tRp2574 -g11 -(g12 -S'O\x00\x00\x00\x00\x00\x00\x00' -tRp2575 -g11 -(g12 -S'X\x00\x00\x00\x00\x00\x00\x00' -tRp2576 -tp2577 -a(g11 -(g12 -S'C\x00\x00\x00\x00\x00\x00\x00' -tRp2578 -g11 -(g12 -S'O\x00\x00\x00\x00\x00\x00\x00' -tRp2579 -g11 -(g12 -S'S\x00\x00\x00\x00\x00\x00\x00' -tRp2580 -g11 -(g12 -S'V\x00\x00\x00\x00\x00\x00\x00' -tRp2581 -tp2582 -a(g11 -(g12 -S'@\x00\x00\x00\x00\x00\x00\x00' -tRp2583 -g11 -(g12 -S'O\x00\x00\x00\x00\x00\x00\x00' -tRp2584 -g11 -(g12 -S'T\x00\x00\x00\x00\x00\x00\x00' -tRp2585 -g11 -(g12 -S'X\x00\x00\x00\x00\x00\x00\x00' -tRp2586 -tp2587 -a(g11 -(g12 -S'A\x00\x00\x00\x00\x00\x00\x00' -tRp2588 -g11 -(g12 -S'Q\x00\x00\x00\x00\x00\x00\x00' -tRp2589 -g11 -(g12 -S'V\x00\x00\x00\x00\x00\x00\x00' -tRp2590 -tp2591 -a(g11 -(g12 -S'C\x00\x00\x00\x00\x00\x00\x00' -tRp2592 -g11 -(g12 -S'J\x00\x00\x00\x00\x00\x00\x00' -tRp2593 -g11 -(g12 -S'Q\x00\x00\x00\x00\x00\x00\x00' -tRp2594 -g11 -(g12 -S'V\x00\x00\x00\x00\x00\x00\x00' -tRp2595 -tp2596 -a(g11 -(g12 -S'<\x00\x00\x00\x00\x00\x00\x00' -tRp2597 -g11 -(g12 -S'L\x00\x00\x00\x00\x00\x00\x00' -tRp2598 -g11 -(g12 -S'O\x00\x00\x00\x00\x00\x00\x00' -tRp2599 -g11 -(g12 -S'T\x00\x00\x00\x00\x00\x00\x00' -tRp2600 -tp2601 -a(g11 -(g12 -S'A\x00\x00\x00\x00\x00\x00\x00' -tRp2602 -g11 -(g12 -S'L\x00\x00\x00\x00\x00\x00\x00' -tRp2603 -g11 -(g12 -S'S\x00\x00\x00\x00\x00\x00\x00' -tRp2604 -g11 -(g12 -S'V\x00\x00\x00\x00\x00\x00\x00' -tRp2605 -tp2606 -a(g11 -(g12 -S'G\x00\x00\x00\x00\x00\x00\x00' -tRp2607 -g11 -(g12 -S'O\x00\x00\x00\x00\x00\x00\x00' -tRp2608 -g11 -(g12 -S'V\x00\x00\x00\x00\x00\x00\x00' -tRp2609 -tp2610 -a(g11 -(g12 -S'G\x00\x00\x00\x00\x00\x00\x00' -tRp2611 -g11 -(g12 -S'M\x00\x00\x00\x00\x00\x00\x00' -tRp2612 -g11 -(g12 -S'V\x00\x00\x00\x00\x00\x00\x00' -tRp2613 -tp2614 -a(g11 -(g12 -S'<\x00\x00\x00\x00\x00\x00\x00' -tRp2615 -g11 -(g12 -S'L\x00\x00\x00\x00\x00\x00\x00' -tRp2616 -g11 -(g12 -S'O\x00\x00\x00\x00\x00\x00\x00' -tRp2617 -g11 -(g12 -S'X\x00\x00\x00\x00\x00\x00\x00' -tRp2618 -tp2619 -a(g11 -(g12 -S'@\x00\x00\x00\x00\x00\x00\x00' -tRp2620 -g11 -(g12 -S'O\x00\x00\x00\x00\x00\x00\x00' -tRp2621 -g11 -(g12 -S'T\x00\x00\x00\x00\x00\x00\x00' -tRp2622 -g11 -(g12 -S'X\x00\x00\x00\x00\x00\x00\x00' -tRp2623 -tp2624 -a(g11 -(g12 -S'C\x00\x00\x00\x00\x00\x00\x00' -tRp2625 -g11 -(g12 -S'O\x00\x00\x00\x00\x00\x00\x00' -tRp2626 -g11 -(g12 -S'S\x00\x00\x00\x00\x00\x00\x00' -tRp2627 -g11 -(g12 -S'V\x00\x00\x00\x00\x00\x00\x00' -tRp2628 -tp2629 -a(g11 -(g12 -S'C\x00\x00\x00\x00\x00\x00\x00' -tRp2630 -g11 -(g12 -S'O\x00\x00\x00\x00\x00\x00\x00' -tRp2631 -g11 -(g12 -S'S\x00\x00\x00\x00\x00\x00\x00' -tRp2632 -g11 -(g12 -S'V\x00\x00\x00\x00\x00\x00\x00' -tRp2633 -tp2634 -a(g11 -(g12 -S'C\x00\x00\x00\x00\x00\x00\x00' -tRp2635 -g11 -(g12 -S'O\x00\x00\x00\x00\x00\x00\x00' -tRp2636 -g11 -(g12 -S'S\x00\x00\x00\x00\x00\x00\x00' -tRp2637 -g11 -(g12 -S'V\x00\x00\x00\x00\x00\x00\x00' -tRp2638 -tp2639 -a(g11 -(g12 -S'C\x00\x00\x00\x00\x00\x00\x00' -tRp2640 -g11 -(g12 -S'O\x00\x00\x00\x00\x00\x00\x00' -tRp2641 -g11 -(g12 -S'S\x00\x00\x00\x00\x00\x00\x00' -tRp2642 -g11 -(g12 -S'V\x00\x00\x00\x00\x00\x00\x00' -tRp2643 -tp2644 -a(g11 -(g12 -S'H\x00\x00\x00\x00\x00\x00\x00' -tRp2645 -g11 -(g12 -S'L\x00\x00\x00\x00\x00\x00\x00' -tRp2646 -g11 -(g12 -S'O\x00\x00\x00\x00\x00\x00\x00' -tRp2647 -g11 -(g12 -S'X\x00\x00\x00\x00\x00\x00\x00' -tRp2648 -tp2649 -a(g11 -(g12 -S'G\x00\x00\x00\x00\x00\x00\x00' -tRp2650 -g11 -(g12 -S'J\x00\x00\x00\x00\x00\x00\x00' -tRp2651 -g11 -(g12 -S'S\x00\x00\x00\x00\x00\x00\x00' -tRp2652 -g11 -(g12 -S'[\x00\x00\x00\x00\x00\x00\x00' -tRp2653 -tp2654 -a(g11 -(g12 -S'H\x00\x00\x00\x00\x00\x00\x00' -tRp2655 -g11 -(g12 -S'O\x00\x00\x00\x00\x00\x00\x00' -tRp2656 -g11 -(g12 -S'X\x00\x00\x00\x00\x00\x00\x00' -tRp2657 -g11 -(g12 -S'[\x00\x00\x00\x00\x00\x00\x00' -tRp2658 -tp2659 -a(g11 -(g12 -S'J\x00\x00\x00\x00\x00\x00\x00' -tRp2660 -g11 -(g12 -S'S\x00\x00\x00\x00\x00\x00\x00' -tRp2661 -g11 -(g12 -S'V\x00\x00\x00\x00\x00\x00\x00' -tRp2662 -g11 -(g12 -S'Z\x00\x00\x00\x00\x00\x00\x00' -tRp2663 -tp2664 -a(g11 -(g12 -S'C\x00\x00\x00\x00\x00\x00\x00' -tRp2665 -g11 -(g12 -S'O\x00\x00\x00\x00\x00\x00\x00' -tRp2666 -g11 -(g12 -S'S\x00\x00\x00\x00\x00\x00\x00' -tRp2667 -g11 -(g12 -S'[\x00\x00\x00\x00\x00\x00\x00' -tRp2668 -tp2669 -a(g11 -(g12 -S'C\x00\x00\x00\x00\x00\x00\x00' -tRp2670 -g11 -(g12 -S'O\x00\x00\x00\x00\x00\x00\x00' -tRp2671 -g11 -(g12 -S'S\x00\x00\x00\x00\x00\x00\x00' -tRp2672 -g11 -(g12 -S'[\x00\x00\x00\x00\x00\x00\x00' -tRp2673 -tp2674 -a(g11 -(g12 -S'C\x00\x00\x00\x00\x00\x00\x00' -tRp2675 -g11 -(g12 -S'O\x00\x00\x00\x00\x00\x00\x00' -tRp2676 -g11 -(g12 -S'S\x00\x00\x00\x00\x00\x00\x00' -tRp2677 -g11 -(g12 -S'[\x00\x00\x00\x00\x00\x00\x00' -tRp2678 -tp2679 -a(g11 -(g12 -S'H\x00\x00\x00\x00\x00\x00\x00' -tRp2680 -g11 -(g12 -S'L\x00\x00\x00\x00\x00\x00\x00' -tRp2681 -g11 -(g12 -S'T\x00\x00\x00\x00\x00\x00\x00' -tRp2682 -g11 -(g12 -S'[\x00\x00\x00\x00\x00\x00\x00' -tRp2683 -tp2684 -a(g11 -(g12 -S'A\x00\x00\x00\x00\x00\x00\x00' -tRp2685 -g11 -(g12 -S'M\x00\x00\x00\x00\x00\x00\x00' -tRp2686 -g11 -(g12 -S'T\x00\x00\x00\x00\x00\x00\x00' -tRp2687 -g11 -(g12 -S']\x00\x00\x00\x00\x00\x00\x00' -tRp2688 -tp2689 -a(g11 -(g12 -S'H\x00\x00\x00\x00\x00\x00\x00' -tRp2690 -g11 -(g12 -S'O\x00\x00\x00\x00\x00\x00\x00' -tRp2691 -g11 -(g12 -S'X\x00\x00\x00\x00\x00\x00\x00' -tRp2692 -g11 -(g12 -S'[\x00\x00\x00\x00\x00\x00\x00' -tRp2693 -tp2694 -a(g11 -(g12 -S'E\x00\x00\x00\x00\x00\x00\x00' -tRp2695 -g11 -(g12 -S'T\x00\x00\x00\x00\x00\x00\x00' -tRp2696 -g11 -(g12 -S'X\x00\x00\x00\x00\x00\x00\x00' -tRp2697 -g11 -(g12 -S'Y\x00\x00\x00\x00\x00\x00\x00' -tRp2698 -tp2699 -a(g11 -(g12 -S'C\x00\x00\x00\x00\x00\x00\x00' -tRp2700 -g11 -(g12 -S'R\x00\x00\x00\x00\x00\x00\x00' -tRp2701 -g11 -(g12 -S'V\x00\x00\x00\x00\x00\x00\x00' -tRp2702 -g11 -(g12 -S'X\x00\x00\x00\x00\x00\x00\x00' -tRp2703 -tp2704 -a(g11 -(g12 -S'>\x00\x00\x00\x00\x00\x00\x00' -tRp2705 -g11 -(g12 -S'Q\x00\x00\x00\x00\x00\x00\x00' -tRp2706 -g11 -(g12 -S'V\x00\x00\x00\x00\x00\x00\x00' -tRp2707 -g11 -(g12 -S'Y\x00\x00\x00\x00\x00\x00\x00' -tRp2708 -tp2709 -a(g11 -(g12 -S'>\x00\x00\x00\x00\x00\x00\x00' -tRp2710 -g11 -(g12 -S'Q\x00\x00\x00\x00\x00\x00\x00' -tRp2711 -g11 -(g12 -S'V\x00\x00\x00\x00\x00\x00\x00' -tRp2712 -g11 -(g12 -S'Y\x00\x00\x00\x00\x00\x00\x00' -tRp2713 -tp2714 -a(g11 -(g12 -S'>\x00\x00\x00\x00\x00\x00\x00' -tRp2715 -g11 -(g12 -S'Q\x00\x00\x00\x00\x00\x00\x00' -tRp2716 -g11 -(g12 -S'V\x00\x00\x00\x00\x00\x00\x00' -tRp2717 -g11 -(g12 -S'Y\x00\x00\x00\x00\x00\x00\x00' -tRp2718 -tp2719 -a(g11 -(g12 -S'@\x00\x00\x00\x00\x00\x00\x00' -tRp2720 -g11 -(g12 -S'O\x00\x00\x00\x00\x00\x00\x00' -tRp2721 -g11 -(g12 -S'T\x00\x00\x00\x00\x00\x00\x00' -tRp2722 -g11 -(g12 -S'X\x00\x00\x00\x00\x00\x00\x00' -tRp2723 -tp2724 -a(g11 -(g12 -S'C\x00\x00\x00\x00\x00\x00\x00' -tRp2725 -g11 -(g12 -S'O\x00\x00\x00\x00\x00\x00\x00' -tRp2726 -g11 -(g12 -S'S\x00\x00\x00\x00\x00\x00\x00' -tRp2727 -g11 -(g12 -S'V\x00\x00\x00\x00\x00\x00\x00' -tRp2728 -tp2729 -a(g11 -(g12 -S'H\x00\x00\x00\x00\x00\x00\x00' -tRp2730 -g11 -(g12 -S'L\x00\x00\x00\x00\x00\x00\x00' -tRp2731 -g11 -(g12 -S'O\x00\x00\x00\x00\x00\x00\x00' -tRp2732 -g11 -(g12 -S'X\x00\x00\x00\x00\x00\x00\x00' -tRp2733 -tp2734 -a(g11 -(g12 -S'C\x00\x00\x00\x00\x00\x00\x00' -tRp2735 -g11 -(g12 -S'J\x00\x00\x00\x00\x00\x00\x00' -tRp2736 -g11 -(g12 -S'O\x00\x00\x00\x00\x00\x00\x00' -tRp2737 -g11 -(g12 -S'V\x00\x00\x00\x00\x00\x00\x00' -tRp2738 -tp2739 -a(g11 -(g12 -S'7\x00\x00\x00\x00\x00\x00\x00' -tRp2740 -g11 -(g12 -S'M\x00\x00\x00\x00\x00\x00\x00' -tRp2741 -g11 -(g12 -S'S\x00\x00\x00\x00\x00\x00\x00' -tRp2742 -g11 -(g12 -S'V\x00\x00\x00\x00\x00\x00\x00' -tRp2743 -tp2744 -a(g11 -(g12 -S'<\x00\x00\x00\x00\x00\x00\x00' -tRp2745 -g11 -(g12 -S'L\x00\x00\x00\x00\x00\x00\x00' -tRp2746 -g11 -(g12 -S'O\x00\x00\x00\x00\x00\x00\x00' -tRp2747 -g11 -(g12 -S'T\x00\x00\x00\x00\x00\x00\x00' -tRp2748 -tp2749 -a(g11 -(g12 -S'<\x00\x00\x00\x00\x00\x00\x00' -tRp2750 -g11 -(g12 -S'L\x00\x00\x00\x00\x00\x00\x00' -tRp2751 -g11 -(g12 -S'O\x00\x00\x00\x00\x00\x00\x00' -tRp2752 -g11 -(g12 -S'T\x00\x00\x00\x00\x00\x00\x00' -tRp2753 -tp2754 -a(g11 -(g12 -S'<\x00\x00\x00\x00\x00\x00\x00' -tRp2755 -g11 -(g12 -S'L\x00\x00\x00\x00\x00\x00\x00' -tRp2756 -g11 -(g12 -S'O\x00\x00\x00\x00\x00\x00\x00' -tRp2757 -g11 -(g12 -S'T\x00\x00\x00\x00\x00\x00\x00' -tRp2758 -tp2759 -a(g11 -(g12 -S'<\x00\x00\x00\x00\x00\x00\x00' -tRp2760 -g11 -(g12 -S'L\x00\x00\x00\x00\x00\x00\x00' -tRp2761 -g11 -(g12 -S'O\x00\x00\x00\x00\x00\x00\x00' -tRp2762 -g11 -(g12 -S'T\x00\x00\x00\x00\x00\x00\x00' -tRp2763 -tp2764 -aa(lp2765 -(g11 -(g12 -S'<\x00\x00\x00\x00\x00\x00\x00' -tRp2766 -g11 -(g12 -S'C\x00\x00\x00\x00\x00\x00\x00' -tRp2767 -g11 -(g12 -S'L\x00\x00\x00\x00\x00\x00\x00' -tRp2768 -g11 -(g12 -S'T\x00\x00\x00\x00\x00\x00\x00' -tRp2769 -tp2770 -a(g11 -(g12 -S'@\x00\x00\x00\x00\x00\x00\x00' -tRp2771 -g11 -(g12 -S'C\x00\x00\x00\x00\x00\x00\x00' -tRp2772 -g11 -(g12 -S'O\x00\x00\x00\x00\x00\x00\x00' -tRp2773 -g11 -(g12 -S'S\x00\x00\x00\x00\x00\x00\x00' -tRp2774 -tp2775 -a(g11 -(g12 -S'A\x00\x00\x00\x00\x00\x00\x00' -tRp2776 -g11 -(g12 -S'H\x00\x00\x00\x00\x00\x00\x00' -tRp2777 -g11 -(g12 -S'O\x00\x00\x00\x00\x00\x00\x00' -tRp2778 -g11 -(g12 -S'Q\x00\x00\x00\x00\x00\x00\x00' -tRp2779 -tp2780 -a(g11 -(g12 -S'A\x00\x00\x00\x00\x00\x00\x00' -tRp2781 -g11 -(g12 -S'H\x00\x00\x00\x00\x00\x00\x00' -tRp2782 -g11 -(g12 -S'M\x00\x00\x00\x00\x00\x00\x00' -tRp2783 -g11 -(g12 -S'Q\x00\x00\x00\x00\x00\x00\x00' -tRp2784 -tp2785 -a(g11 -(g12 -S'>\x00\x00\x00\x00\x00\x00\x00' -tRp2786 -g11 -(g12 -S'J\x00\x00\x00\x00\x00\x00\x00' -tRp2787 -g11 -(g12 -S'M\x00\x00\x00\x00\x00\x00\x00' -tRp2788 -g11 -(g12 -S'Q\x00\x00\x00\x00\x00\x00\x00' -tRp2789 -tp2790 -a(g11 -(g12 -S'C\x00\x00\x00\x00\x00\x00\x00' -tRp2791 -g11 -(g12 -S'L\x00\x00\x00\x00\x00\x00\x00' -tRp2792 -g11 -(g12 -S'O\x00\x00\x00\x00\x00\x00\x00' -tRp2793 -g11 -(g12 -S'S\x00\x00\x00\x00\x00\x00\x00' -tRp2794 -tp2795 -a(g11 -(g12 -S'@\x00\x00\x00\x00\x00\x00\x00' -tRp2796 -g11 -(g12 -S'H\x00\x00\x00\x00\x00\x00\x00' -tRp2797 -g11 -(g12 -S'O\x00\x00\x00\x00\x00\x00\x00' -tRp2798 -g11 -(g12 -S'T\x00\x00\x00\x00\x00\x00\x00' -tRp2799 -tp2800 -a(g11 -(g12 -S'A\x00\x00\x00\x00\x00\x00\x00' -tRp2801 -g11 -(g12 -S'H\x00\x00\x00\x00\x00\x00\x00' -tRp2802 -g11 -(g12 -S'O\x00\x00\x00\x00\x00\x00\x00' -tRp2803 -g11 -(g12 -S'Q\x00\x00\x00\x00\x00\x00\x00' -tRp2804 -tp2805 -a(g11 -(g12 -S'<\x00\x00\x00\x00\x00\x00\x00' -tRp2806 -g11 -(g12 -S'H\x00\x00\x00\x00\x00\x00\x00' -tRp2807 -g11 -(g12 -S'M\x00\x00\x00\x00\x00\x00\x00' -tRp2808 -g11 -(g12 -S'O\x00\x00\x00\x00\x00\x00\x00' -tRp2809 -tp2810 -a(g11 -(g12 -S'<\x00\x00\x00\x00\x00\x00\x00' -tRp2811 -g11 -(g12 -S'H\x00\x00\x00\x00\x00\x00\x00' -tRp2812 -g11 -(g12 -S'L\x00\x00\x00\x00\x00\x00\x00' -tRp2813 -g11 -(g12 -S'O\x00\x00\x00\x00\x00\x00\x00' -tRp2814 -tp2815 -a(g11 -(g12 -S'<\x00\x00\x00\x00\x00\x00\x00' -tRp2816 -g11 -(g12 -S'H\x00\x00\x00\x00\x00\x00\x00' -tRp2817 -g11 -(g12 -S'L\x00\x00\x00\x00\x00\x00\x00' -tRp2818 -g11 -(g12 -S'O\x00\x00\x00\x00\x00\x00\x00' -tRp2819 -tp2820 -a(g11 -(g12 -S'C\x00\x00\x00\x00\x00\x00\x00' -tRp2821 -g11 -(g12 -S'J\x00\x00\x00\x00\x00\x00\x00' -tRp2822 -g11 -(g12 -S'O\x00\x00\x00\x00\x00\x00\x00' -tRp2823 -g11 -(g12 -S'S\x00\x00\x00\x00\x00\x00\x00' -tRp2824 -tp2825 -a(g11 -(g12 -S'<\x00\x00\x00\x00\x00\x00\x00' -tRp2826 -g11 -(g12 -S'L\x00\x00\x00\x00\x00\x00\x00' -tRp2827 -g11 -(g12 -S'O\x00\x00\x00\x00\x00\x00\x00' -tRp2828 -g11 -(g12 -S'T\x00\x00\x00\x00\x00\x00\x00' -tRp2829 -tp2830 -a(g11 -(g12 -S'C\x00\x00\x00\x00\x00\x00\x00' -tRp2831 -g11 -(g12 -S'J\x00\x00\x00\x00\x00\x00\x00' -tRp2832 -g11 -(g12 -S'O\x00\x00\x00\x00\x00\x00\x00' -tRp2833 -g11 -(g12 -S'S\x00\x00\x00\x00\x00\x00\x00' -tRp2834 -tp2835 -a(g11 -(g12 -S'@\x00\x00\x00\x00\x00\x00\x00' -tRp2836 -g11 -(g12 -S'H\x00\x00\x00\x00\x00\x00\x00' -tRp2837 -g11 -(g12 -S'O\x00\x00\x00\x00\x00\x00\x00' -tRp2838 -g11 -(g12 -S'T\x00\x00\x00\x00\x00\x00\x00' -tRp2839 -tp2840 -a(g11 -(g12 -S'A\x00\x00\x00\x00\x00\x00\x00' -tRp2841 -g11 -(g12 -S'H\x00\x00\x00\x00\x00\x00\x00' -tRp2842 -g11 -(g12 -S'M\x00\x00\x00\x00\x00\x00\x00' -tRp2843 -g11 -(g12 -S'Q\x00\x00\x00\x00\x00\x00\x00' -tRp2844 -tp2845 -a(g11 -(g12 -S'>\x00\x00\x00\x00\x00\x00\x00' -tRp2846 -g11 -(g12 -S'J\x00\x00\x00\x00\x00\x00\x00' -tRp2847 -g11 -(g12 -S'M\x00\x00\x00\x00\x00\x00\x00' -tRp2848 -g11 -(g12 -S'S\x00\x00\x00\x00\x00\x00\x00' -tRp2849 -tp2850 -a(g11 -(g12 -S'@\x00\x00\x00\x00\x00\x00\x00' -tRp2851 -g11 -(g12 -S'G\x00\x00\x00\x00\x00\x00\x00' -tRp2852 -g11 -(g12 -S'L\x00\x00\x00\x00\x00\x00\x00' -tRp2853 -g11 -(g12 -S'O\x00\x00\x00\x00\x00\x00\x00' -tRp2854 -tp2855 -a(g11 -(g12 -S'<\x00\x00\x00\x00\x00\x00\x00' -tRp2856 -g11 -(g12 -S'E\x00\x00\x00\x00\x00\x00\x00' -tRp2857 -g11 -(g12 -S'L\x00\x00\x00\x00\x00\x00\x00' -tRp2858 -g11 -(g12 -S'Q\x00\x00\x00\x00\x00\x00\x00' -tRp2859 -tp2860 -a(g11 -(g12 -S'9\x00\x00\x00\x00\x00\x00\x00' -tRp2861 -g11 -(g12 -S'H\x00\x00\x00\x00\x00\x00\x00' -tRp2862 -g11 -(g12 -S'L\x00\x00\x00\x00\x00\x00\x00' -tRp2863 -g11 -(g12 -S'Q\x00\x00\x00\x00\x00\x00\x00' -tRp2864 -tp2865 -a(g11 -(g12 -S'@\x00\x00\x00\x00\x00\x00\x00' -tRp2866 -g11 -(g12 -S'D\x00\x00\x00\x00\x00\x00\x00' -tRp2867 -g11 -(g12 -S'L\x00\x00\x00\x00\x00\x00\x00' -tRp2868 -g11 -(g12 -S'S\x00\x00\x00\x00\x00\x00\x00' -tRp2869 -tp2870 -a(g11 -(g12 -S'@\x00\x00\x00\x00\x00\x00\x00' -tRp2871 -g11 -(g12 -S'D\x00\x00\x00\x00\x00\x00\x00' -tRp2872 -g11 -(g12 -S'L\x00\x00\x00\x00\x00\x00\x00' -tRp2873 -g11 -(g12 -S'S\x00\x00\x00\x00\x00\x00\x00' -tRp2874 -tp2875 -a(g11 -(g12 -S'@\x00\x00\x00\x00\x00\x00\x00' -tRp2876 -g11 -(g12 -S'D\x00\x00\x00\x00\x00\x00\x00' -tRp2877 -g11 -(g12 -S'L\x00\x00\x00\x00\x00\x00\x00' -tRp2878 -g11 -(g12 -S'S\x00\x00\x00\x00\x00\x00\x00' -tRp2879 -tp2880 -a(g11 -(g12 -S'@\x00\x00\x00\x00\x00\x00\x00' -tRp2881 -g11 -(g12 -S'C\x00\x00\x00\x00\x00\x00\x00' -tRp2882 -g11 -(g12 -S'L\x00\x00\x00\x00\x00\x00\x00' -tRp2883 -g11 -(g12 -S'S\x00\x00\x00\x00\x00\x00\x00' -tRp2884 -tp2885 -a(g11 -(g12 -S'?\x00\x00\x00\x00\x00\x00\x00' -tRp2886 -g11 -(g12 -S'G\x00\x00\x00\x00\x00\x00\x00' -tRp2887 -g11 -(g12 -S'N\x00\x00\x00\x00\x00\x00\x00' -tRp2888 -g11 -(g12 -S'S\x00\x00\x00\x00\x00\x00\x00' -tRp2889 -tp2890 -a(g11 -(g12 -S'@\x00\x00\x00\x00\x00\x00\x00' -tRp2891 -g11 -(g12 -S'N\x00\x00\x00\x00\x00\x00\x00' -tRp2892 -g11 -(g12 -S'Q\x00\x00\x00\x00\x00\x00\x00' -tRp2893 -g11 -(g12 -S'S\x00\x00\x00\x00\x00\x00\x00' -tRp2894 -tp2895 -a(g11 -(g12 -S'B\x00\x00\x00\x00\x00\x00\x00' -tRp2896 -g11 -(g12 -S'L\x00\x00\x00\x00\x00\x00\x00' -tRp2897 -g11 -(g12 -S'N\x00\x00\x00\x00\x00\x00\x00' -tRp2898 -g11 -(g12 -S'Q\x00\x00\x00\x00\x00\x00\x00' -tRp2899 -tp2900 -a(g11 -(g12 -S'D\x00\x00\x00\x00\x00\x00\x00' -tRp2901 -g11 -(g12 -S'H\x00\x00\x00\x00\x00\x00\x00' -tRp2902 -g11 -(g12 -S'L\x00\x00\x00\x00\x00\x00\x00' -tRp2903 -g11 -(g12 -S'S\x00\x00\x00\x00\x00\x00\x00' -tRp2904 -tp2905 -a(g11 -(g12 -S'E\x00\x00\x00\x00\x00\x00\x00' -tRp2906 -g11 -(g12 -S'L\x00\x00\x00\x00\x00\x00\x00' -tRp2907 -g11 -(g12 -S'T\x00\x00\x00\x00\x00\x00\x00' -tRp2908 -tp2909 -a(g11 -(g12 -S'E\x00\x00\x00\x00\x00\x00\x00' -tRp2910 -g11 -(g12 -S'L\x00\x00\x00\x00\x00\x00\x00' -tRp2911 -g11 -(g12 -S'T\x00\x00\x00\x00\x00\x00\x00' -tRp2912 -tp2913 -a(g11 -(g12 -S'G\x00\x00\x00\x00\x00\x00\x00' -tRp2914 -g11 -(g12 -S'J\x00\x00\x00\x00\x00\x00\x00' -tRp2915 -g11 -(g12 -S'O\x00\x00\x00\x00\x00\x00\x00' -tRp2916 -tp2917 -a(g11 -(g12 -S'H\x00\x00\x00\x00\x00\x00\x00' -tRp2918 -g11 -(g12 -S'J\x00\x00\x00\x00\x00\x00\x00' -tRp2919 -g11 -(g12 -S'M\x00\x00\x00\x00\x00\x00\x00' -tRp2920 -g11 -(g12 -S'O\x00\x00\x00\x00\x00\x00\x00' -tRp2921 -tp2922 -a(g11 -(g12 -S'B\x00\x00\x00\x00\x00\x00\x00' -tRp2923 -g11 -(g12 -S'H\x00\x00\x00\x00\x00\x00\x00' -tRp2924 -g11 -(g12 -S'L\x00\x00\x00\x00\x00\x00\x00' -tRp2925 -g11 -(g12 -S'Q\x00\x00\x00\x00\x00\x00\x00' -tRp2926 -tp2927 -a(g11 -(g12 -S'C\x00\x00\x00\x00\x00\x00\x00' -tRp2928 -g11 -(g12 -S'G\x00\x00\x00\x00\x00\x00\x00' -tRp2929 -g11 -(g12 -S'J\x00\x00\x00\x00\x00\x00\x00' -tRp2930 -g11 -(g12 -S'S\x00\x00\x00\x00\x00\x00\x00' -tRp2931 -tp2932 -a(g11 -(g12 -S'@\x00\x00\x00\x00\x00\x00\x00' -tRp2933 -g11 -(g12 -S'C\x00\x00\x00\x00\x00\x00\x00' -tRp2934 -g11 -(g12 -S'H\x00\x00\x00\x00\x00\x00\x00' -tRp2935 -g11 -(g12 -S'T\x00\x00\x00\x00\x00\x00\x00' -tRp2936 -tp2937 -a(g11 -(g12 -S'>\x00\x00\x00\x00\x00\x00\x00' -tRp2938 -g11 -(g12 -S'H\x00\x00\x00\x00\x00\x00\x00' -tRp2939 -g11 -(g12 -S'J\x00\x00\x00\x00\x00\x00\x00' -tRp2940 -g11 -(g12 -S'Q\x00\x00\x00\x00\x00\x00\x00' -tRp2941 -tp2942 -a(g11 -(g12 -S'7\x00\x00\x00\x00\x00\x00\x00' -tRp2943 -g11 -(g12 -S'J\x00\x00\x00\x00\x00\x00\x00' -tRp2944 -g11 -(g12 -S'O\x00\x00\x00\x00\x00\x00\x00' -tRp2945 -g11 -(g12 -S'S\x00\x00\x00\x00\x00\x00\x00' -tRp2946 -tp2947 -a(g11 -(g12 -S'7\x00\x00\x00\x00\x00\x00\x00' -tRp2948 -g11 -(g12 -S'J\x00\x00\x00\x00\x00\x00\x00' -tRp2949 -g11 -(g12 -S'O\x00\x00\x00\x00\x00\x00\x00' -tRp2950 -g11 -(g12 -S'S\x00\x00\x00\x00\x00\x00\x00' -tRp2951 -tp2952 -a(g11 -(g12 -S'7\x00\x00\x00\x00\x00\x00\x00' -tRp2953 -g11 -(g12 -S'J\x00\x00\x00\x00\x00\x00\x00' -tRp2954 -g11 -(g12 -S'O\x00\x00\x00\x00\x00\x00\x00' -tRp2955 -g11 -(g12 -S'S\x00\x00\x00\x00\x00\x00\x00' -tRp2956 -tp2957 -a(g11 -(g12 -S'9\x00\x00\x00\x00\x00\x00\x00' -tRp2958 -g11 -(g12 -S'H\x00\x00\x00\x00\x00\x00\x00' -tRp2959 -g11 -(g12 -S'O\x00\x00\x00\x00\x00\x00\x00' -tRp2960 -g11 -(g12 -S'T\x00\x00\x00\x00\x00\x00\x00' -tRp2961 -tp2962 -a(g11 -(g12 -S';\x00\x00\x00\x00\x00\x00\x00' -tRp2963 -g11 -(g12 -S'G\x00\x00\x00\x00\x00\x00\x00' -tRp2964 -g11 -(g12 -S'O\x00\x00\x00\x00\x00\x00\x00' -tRp2965 -g11 -(g12 -S'V\x00\x00\x00\x00\x00\x00\x00' -tRp2966 -tp2967 -a(g11 -(g12 -S'8\x00\x00\x00\x00\x00\x00\x00' -tRp2968 -g11 -(g12 -S'L\x00\x00\x00\x00\x00\x00\x00' -tRp2969 -g11 -(g12 -S'S\x00\x00\x00\x00\x00\x00\x00' -tRp2970 -g11 -(g12 -S'V\x00\x00\x00\x00\x00\x00\x00' -tRp2971 -tp2972 -a(g11 -(g12 -S'9\x00\x00\x00\x00\x00\x00\x00' -tRp2973 -g11 -(g12 -S'L\x00\x00\x00\x00\x00\x00\x00' -tRp2974 -g11 -(g12 -S'Q\x00\x00\x00\x00\x00\x00\x00' -tRp2975 -g11 -(g12 -S'T\x00\x00\x00\x00\x00\x00\x00' -tRp2976 -tp2977 -a(g11 -(g12 -S'<\x00\x00\x00\x00\x00\x00\x00' -tRp2978 -g11 -(g12 -S'J\x00\x00\x00\x00\x00\x00\x00' -tRp2979 -g11 -(g12 -S'N\x00\x00\x00\x00\x00\x00\x00' -tRp2980 -g11 -(g12 -S'Q\x00\x00\x00\x00\x00\x00\x00' -tRp2981 -tp2982 -a(g11 -(g12 -S'7\x00\x00\x00\x00\x00\x00\x00' -tRp2983 -g11 -(g12 -S'J\x00\x00\x00\x00\x00\x00\x00' -tRp2984 -g11 -(g12 -S'O\x00\x00\x00\x00\x00\x00\x00' -tRp2985 -g11 -(g12 -S'S\x00\x00\x00\x00\x00\x00\x00' -tRp2986 -tp2987 -a(g11 -(g12 -S'7\x00\x00\x00\x00\x00\x00\x00' -tRp2988 -g11 -(g12 -S'J\x00\x00\x00\x00\x00\x00\x00' -tRp2989 -g11 -(g12 -S'O\x00\x00\x00\x00\x00\x00\x00' -tRp2990 -g11 -(g12 -S'S\x00\x00\x00\x00\x00\x00\x00' -tRp2991 -tp2992 -a(g11 -(g12 -S'J\x00\x00\x00\x00\x00\x00\x00' -tRp2993 -g11 -(g12 -S'N\x00\x00\x00\x00\x00\x00\x00' -tRp2994 -g11 -(g12 -S'Q\x00\x00\x00\x00\x00\x00\x00' -tRp2995 -g11 -(g12 -S'V\x00\x00\x00\x00\x00\x00\x00' -tRp2996 -tp2997 -a(g11 -(g12 -S'G\x00\x00\x00\x00\x00\x00\x00' -tRp2998 -g11 -(g12 -S'J\x00\x00\x00\x00\x00\x00\x00' -tRp2999 -g11 -(g12 -S'O\x00\x00\x00\x00\x00\x00\x00' -tRp3000 -g11 -(g12 -S'V\x00\x00\x00\x00\x00\x00\x00' -tRp3001 -tp3002 -a(g11 -(g12 -S'C\x00\x00\x00\x00\x00\x00\x00' -tRp3003 -g11 -(g12 -S'G\x00\x00\x00\x00\x00\x00\x00' -tRp3004 -g11 -(g12 -S'S\x00\x00\x00\x00\x00\x00\x00' -tRp3005 -g11 -(g12 -S'V\x00\x00\x00\x00\x00\x00\x00' -tRp3006 -tp3007 -a(g11 -(g12 -S'@\x00\x00\x00\x00\x00\x00\x00' -tRp3008 -g11 -(g12 -S'G\x00\x00\x00\x00\x00\x00\x00' -tRp3009 -g11 -(g12 -S'O\x00\x00\x00\x00\x00\x00\x00' -tRp3010 -g11 -(g12 -S'V\x00\x00\x00\x00\x00\x00\x00' -tRp3011 -tp3012 -a(g11 -(g12 -S'<\x00\x00\x00\x00\x00\x00\x00' -tRp3013 -g11 -(g12 -S'H\x00\x00\x00\x00\x00\x00\x00' -tRp3014 -g11 -(g12 -S'L\x00\x00\x00\x00\x00\x00\x00' -tRp3015 -g11 -(g12 -S'X\x00\x00\x00\x00\x00\x00\x00' -tRp3016 -tp3017 -a(g11 -(g12 -S'>\x00\x00\x00\x00\x00\x00\x00' -tRp3018 -g11 -(g12 -S'E\x00\x00\x00\x00\x00\x00\x00' -tRp3019 -g11 -(g12 -S'N\x00\x00\x00\x00\x00\x00\x00' -tRp3020 -g11 -(g12 -S'V\x00\x00\x00\x00\x00\x00\x00' -tRp3021 -tp3022 -a(g11 -(g12 -S'@\x00\x00\x00\x00\x00\x00\x00' -tRp3023 -g11 -(g12 -S'L\x00\x00\x00\x00\x00\x00\x00' -tRp3024 -g11 -(g12 -S'O\x00\x00\x00\x00\x00\x00\x00' -tRp3025 -g11 -(g12 -S'S\x00\x00\x00\x00\x00\x00\x00' -tRp3026 -tp3027 -a(g11 -(g12 -S'@\x00\x00\x00\x00\x00\x00\x00' -tRp3028 -g11 -(g12 -S'L\x00\x00\x00\x00\x00\x00\x00' -tRp3029 -g11 -(g12 -S'O\x00\x00\x00\x00\x00\x00\x00' -tRp3030 -g11 -(g12 -S'S\x00\x00\x00\x00\x00\x00\x00' -tRp3031 -tp3032 -a(g11 -(g12 -S'7\x00\x00\x00\x00\x00\x00\x00' -tRp3033 -g11 -(g12 -S'J\x00\x00\x00\x00\x00\x00\x00' -tRp3034 -g11 -(g12 -S'O\x00\x00\x00\x00\x00\x00\x00' -tRp3035 -g11 -(g12 -S'S\x00\x00\x00\x00\x00\x00\x00' -tRp3036 -tp3037 -a(g11 -(g12 -S'<\x00\x00\x00\x00\x00\x00\x00' -tRp3038 -g11 -(g12 -S'H\x00\x00\x00\x00\x00\x00\x00' -tRp3039 -g11 -(g12 -S'L\x00\x00\x00\x00\x00\x00\x00' -tRp3040 -g11 -(g12 -S'O\x00\x00\x00\x00\x00\x00\x00' -tRp3041 -tp3042 -a(g11 -(g12 -S'9\x00\x00\x00\x00\x00\x00\x00' -tRp3043 -g11 -(g12 -S'H\x00\x00\x00\x00\x00\x00\x00' -tRp3044 -g11 -(g12 -S'L\x00\x00\x00\x00\x00\x00\x00' -tRp3045 -g11 -(g12 -S'Q\x00\x00\x00\x00\x00\x00\x00' -tRp3046 -tp3047 -a(g11 -(g12 -S'7\x00\x00\x00\x00\x00\x00\x00' -tRp3048 -g11 -(g12 -S'J\x00\x00\x00\x00\x00\x00\x00' -tRp3049 -g11 -(g12 -S'O\x00\x00\x00\x00\x00\x00\x00' -tRp3050 -g11 -(g12 -S'S\x00\x00\x00\x00\x00\x00\x00' -tRp3051 -tp3052 -a(g11 -(g12 -S'6\x00\x00\x00\x00\x00\x00\x00' -tRp3053 -g11 -(g12 -S'J\x00\x00\x00\x00\x00\x00\x00' -tRp3054 -g11 -(g12 -S'Q\x00\x00\x00\x00\x00\x00\x00' -tRp3055 -g11 -(g12 -S'V\x00\x00\x00\x00\x00\x00\x00' -tRp3056 -tp3057 -a(g11 -(g12 -S'7\x00\x00\x00\x00\x00\x00\x00' -tRp3058 -g11 -(g12 -S'J\x00\x00\x00\x00\x00\x00\x00' -tRp3059 -g11 -(g12 -S'Q\x00\x00\x00\x00\x00\x00\x00' -tRp3060 -g11 -(g12 -S'S\x00\x00\x00\x00\x00\x00\x00' -tRp3061 -tp3062 -a(g11 -(g12 -S'>\x00\x00\x00\x00\x00\x00\x00' -tRp3063 -g11 -(g12 -S'J\x00\x00\x00\x00\x00\x00\x00' -tRp3064 -g11 -(g12 -S'O\x00\x00\x00\x00\x00\x00\x00' -tRp3065 -g11 -(g12 -S'Q\x00\x00\x00\x00\x00\x00\x00' -tRp3066 -tp3067 -a(g11 -(g12 -S'7\x00\x00\x00\x00\x00\x00\x00' -tRp3068 -g11 -(g12 -S'J\x00\x00\x00\x00\x00\x00\x00' -tRp3069 -g11 -(g12 -S'M\x00\x00\x00\x00\x00\x00\x00' -tRp3070 -g11 -(g12 -S'S\x00\x00\x00\x00\x00\x00\x00' -tRp3071 -tp3072 -a(g11 -(g12 -S'<\x00\x00\x00\x00\x00\x00\x00' -tRp3073 -g11 -(g12 -S'C\x00\x00\x00\x00\x00\x00\x00' -tRp3074 -g11 -(g12 -S'L\x00\x00\x00\x00\x00\x00\x00' -tRp3075 -g11 -(g12 -S'T\x00\x00\x00\x00\x00\x00\x00' -tRp3076 -tp3077 -a(g11 -(g12 -S'<\x00\x00\x00\x00\x00\x00\x00' -tRp3078 -g11 -(g12 -S'C\x00\x00\x00\x00\x00\x00\x00' -tRp3079 -g11 -(g12 -S'L\x00\x00\x00\x00\x00\x00\x00' -tRp3080 -g11 -(g12 -S'T\x00\x00\x00\x00\x00\x00\x00' -tRp3081 -tp3082 -aa(lp3083 -(g11 -(g12 -S'<\x00\x00\x00\x00\x00\x00\x00' -tRp3084 -g11 -(g12 -S'@\x00\x00\x00\x00\x00\x00\x00' -tRp3085 -g11 -(g12 -S'C\x00\x00\x00\x00\x00\x00\x00' -tRp3086 -g11 -(g12 -S'H\x00\x00\x00\x00\x00\x00\x00' -tRp3087 -tp3088 -a(g11 -(g12 -S'<\x00\x00\x00\x00\x00\x00\x00' -tRp3089 -g11 -(g12 -S'C\x00\x00\x00\x00\x00\x00\x00' -tRp3090 -g11 -(g12 -S'H\x00\x00\x00\x00\x00\x00\x00' -tRp3091 -g11 -(g12 -S'L\x00\x00\x00\x00\x00\x00\x00' -tRp3092 -tp3093 -a(g11 -(g12 -S'9\x00\x00\x00\x00\x00\x00\x00' -tRp3094 -g11 -(g12 -S'A\x00\x00\x00\x00\x00\x00\x00' -tRp3095 -g11 -(g12 -S'H\x00\x00\x00\x00\x00\x00\x00' -tRp3096 -g11 -(g12 -S'M\x00\x00\x00\x00\x00\x00\x00' -tRp3097 -tp3098 -a(g11 -(g12 -S';\x00\x00\x00\x00\x00\x00\x00' -tRp3099 -g11 -(g12 -S'>\x00\x00\x00\x00\x00\x00\x00' -tRp3100 -g11 -(g12 -S'J\x00\x00\x00\x00\x00\x00\x00' -tRp3101 -g11 -(g12 -S'O\x00\x00\x00\x00\x00\x00\x00' -tRp3102 -tp3103 -a(g11 -(g12 -S'<\x00\x00\x00\x00\x00\x00\x00' -tRp3104 -g11 -(g12 -S'@\x00\x00\x00\x00\x00\x00\x00' -tRp3105 -g11 -(g12 -S'H\x00\x00\x00\x00\x00\x00\x00' -tRp3106 -g11 -(g12 -S'O\x00\x00\x00\x00\x00\x00\x00' -tRp3107 -tp3108 -a(g11 -(g12 -S'>\x00\x00\x00\x00\x00\x00\x00' -tRp3109 -g11 -(g12 -S'A\x00\x00\x00\x00\x00\x00\x00' -tRp3110 -g11 -(g12 -S'E\x00\x00\x00\x00\x00\x00\x00' -tRp3111 -g11 -(g12 -S'M\x00\x00\x00\x00\x00\x00\x00' -tRp3112 -tp3113 -a(g11 -(g12 -S'@\x00\x00\x00\x00\x00\x00\x00' -tRp3114 -g11 -(g12 -S'C\x00\x00\x00\x00\x00\x00\x00' -tRp3115 -g11 -(g12 -S'H\x00\x00\x00\x00\x00\x00\x00' -tRp3116 -g11 -(g12 -S'L\x00\x00\x00\x00\x00\x00\x00' -tRp3117 -tp3118 -a(g11 -(g12 -S'C\x00\x00\x00\x00\x00\x00\x00' -tRp3119 -g11 -(g12 -S'G\x00\x00\x00\x00\x00\x00\x00' -tRp3120 -g11 -(g12 -S'J\x00\x00\x00\x00\x00\x00\x00' -tRp3121 -tp3122 -a(g11 -(g12 -S'C\x00\x00\x00\x00\x00\x00\x00' -tRp3123 -g11 -(g12 -S'G\x00\x00\x00\x00\x00\x00\x00' -tRp3124 -g11 -(g12 -S'J\x00\x00\x00\x00\x00\x00\x00' -tRp3125 -g11 -(g12 -S'O\x00\x00\x00\x00\x00\x00\x00' -tRp3126 -tp3127 -a(g11 -(g12 -S'E\x00\x00\x00\x00\x00\x00\x00' -tRp3128 -g11 -(g12 -S'H\x00\x00\x00\x00\x00\x00\x00' -tRp3129 -g11 -(g12 -S'L\x00\x00\x00\x00\x00\x00\x00' -tRp3130 -g11 -(g12 -S'Q\x00\x00\x00\x00\x00\x00\x00' -tRp3131 -tp3132 -a(g11 -(g12 -S'C\x00\x00\x00\x00\x00\x00\x00' -tRp3133 -g11 -(g12 -S'J\x00\x00\x00\x00\x00\x00\x00' -tRp3134 -g11 -(g12 -S'O\x00\x00\x00\x00\x00\x00\x00' -tRp3135 -g11 -(g12 -S'S\x00\x00\x00\x00\x00\x00\x00' -tRp3136 -tp3137 -a(g11 -(g12 -S'B\x00\x00\x00\x00\x00\x00\x00' -tRp3138 -g11 -(g12 -S'J\x00\x00\x00\x00\x00\x00\x00' -tRp3139 -g11 -(g12 -S'Q\x00\x00\x00\x00\x00\x00\x00' -tRp3140 -g11 -(g12 -S'T\x00\x00\x00\x00\x00\x00\x00' -tRp3141 -tp3142 -a(g11 -(g12 -S'C\x00\x00\x00\x00\x00\x00\x00' -tRp3143 -g11 -(g12 -S'J\x00\x00\x00\x00\x00\x00\x00' -tRp3144 -g11 -(g12 -S'O\x00\x00\x00\x00\x00\x00\x00' -tRp3145 -g11 -(g12 -S'S\x00\x00\x00\x00\x00\x00\x00' -tRp3146 -tp3147 -a(g11 -(g12 -S'<\x00\x00\x00\x00\x00\x00\x00' -tRp3148 -g11 -(g12 -S'L\x00\x00\x00\x00\x00\x00\x00' -tRp3149 -g11 -(g12 -S'O\x00\x00\x00\x00\x00\x00\x00' -tRp3150 -g11 -(g12 -S'Q\x00\x00\x00\x00\x00\x00\x00' -tRp3151 -tp3152 -a(g11 -(g12 -S'>\x00\x00\x00\x00\x00\x00\x00' -tRp3153 -g11 -(g12 -S'J\x00\x00\x00\x00\x00\x00\x00' -tRp3154 -g11 -(g12 -S'N\x00\x00\x00\x00\x00\x00\x00' -tRp3155 -g11 -(g12 -S'Q\x00\x00\x00\x00\x00\x00\x00' -tRp3156 -tp3157 -a(g11 -(g12 -S'7\x00\x00\x00\x00\x00\x00\x00' -tRp3158 -g11 -(g12 -S'G\x00\x00\x00\x00\x00\x00\x00' -tRp3159 -g11 -(g12 -S'J\x00\x00\x00\x00\x00\x00\x00' -tRp3160 -g11 -(g12 -S'O\x00\x00\x00\x00\x00\x00\x00' -tRp3161 -tp3162 -a(g11 -(g12 -S'C\x00\x00\x00\x00\x00\x00\x00' -tRp3163 -g11 -(g12 -S'G\x00\x00\x00\x00\x00\x00\x00' -tRp3164 -g11 -(g12 -S'J\x00\x00\x00\x00\x00\x00\x00' -tRp3165 -g11 -(g12 -S'O\x00\x00\x00\x00\x00\x00\x00' -tRp3166 -tp3167 -a(g11 -(g12 -S'@\x00\x00\x00\x00\x00\x00\x00' -tRp3168 -g11 -(g12 -S'C\x00\x00\x00\x00\x00\x00\x00' -tRp3169 -g11 -(g12 -S'L\x00\x00\x00\x00\x00\x00\x00' -tRp3170 -g11 -(g12 -S'T\x00\x00\x00\x00\x00\x00\x00' -tRp3171 -tp3172 -a(g11 -(g12 -S'>\x00\x00\x00\x00\x00\x00\x00' -tRp3173 -g11 -(g12 -S'A\x00\x00\x00\x00\x00\x00\x00' -tRp3174 -g11 -(g12 -S'J\x00\x00\x00\x00\x00\x00\x00' -tRp3175 -g11 -(g12 -S'S\x00\x00\x00\x00\x00\x00\x00' -tRp3176 -tp3177 -a(g11 -(g12 -S'A\x00\x00\x00\x00\x00\x00\x00' -tRp3178 -g11 -(g12 -S'E\x00\x00\x00\x00\x00\x00\x00' -tRp3179 -g11 -(g12 -S'H\x00\x00\x00\x00\x00\x00\x00' -tRp3180 -g11 -(g12 -S'Q\x00\x00\x00\x00\x00\x00\x00' -tRp3181 -tp3182 -a(g11 -(g12 -S'>\x00\x00\x00\x00\x00\x00\x00' -tRp3183 -g11 -(g12 -S'E\x00\x00\x00\x00\x00\x00\x00' -tRp3184 -g11 -(g12 -S'L\x00\x00\x00\x00\x00\x00\x00' -tRp3185 -g11 -(g12 -S'O\x00\x00\x00\x00\x00\x00\x00' -tRp3186 -tp3187 -a(g11 -(g12 -S'>\x00\x00\x00\x00\x00\x00\x00' -tRp3188 -g11 -(g12 -S'E\x00\x00\x00\x00\x00\x00\x00' -tRp3189 -g11 -(g12 -S'J\x00\x00\x00\x00\x00\x00\x00' -tRp3190 -g11 -(g12 -S'M\x00\x00\x00\x00\x00\x00\x00' -tRp3191 -tp3192 -a(g11 -(g12 -S'@\x00\x00\x00\x00\x00\x00\x00' -tRp3193 -g11 -(g12 -S'C\x00\x00\x00\x00\x00\x00\x00' -tRp3194 -g11 -(g12 -S'H\x00\x00\x00\x00\x00\x00\x00' -tRp3195 -g11 -(g12 -S'L\x00\x00\x00\x00\x00\x00\x00' -tRp3196 -tp3197 -a(g11 -(g12 -S'C\x00\x00\x00\x00\x00\x00\x00' -tRp3198 -g11 -(g12 -S'G\x00\x00\x00\x00\x00\x00\x00' -tRp3199 -g11 -(g12 -S'J\x00\x00\x00\x00\x00\x00\x00' -tRp3200 -tp3201 -a(g11 -(g12 -S'@\x00\x00\x00\x00\x00\x00\x00' -tRp3202 -g11 -(g12 -S'G\x00\x00\x00\x00\x00\x00\x00' -tRp3203 -g11 -(g12 -S'L\x00\x00\x00\x00\x00\x00\x00' -tRp3204 -g11 -(g12 -S'O\x00\x00\x00\x00\x00\x00\x00' -tRp3205 -tp3206 -a(g11 -(g12 -S'9\x00\x00\x00\x00\x00\x00\x00' -tRp3207 -g11 -(g12 -S'E\x00\x00\x00\x00\x00\x00\x00' -tRp3208 -g11 -(g12 -S'H\x00\x00\x00\x00\x00\x00\x00' -tRp3209 -g11 -(g12 -S'M\x00\x00\x00\x00\x00\x00\x00' -tRp3210 -tp3211 -a(g11 -(g12 -S'<\x00\x00\x00\x00\x00\x00\x00' -tRp3212 -g11 -(g12 -S'C\x00\x00\x00\x00\x00\x00\x00' -tRp3213 -g11 -(g12 -S'H\x00\x00\x00\x00\x00\x00\x00' -tRp3214 -g11 -(g12 -S'L\x00\x00\x00\x00\x00\x00\x00' -tRp3215 -tp3216 -a(g11 -(g12 -S'7\x00\x00\x00\x00\x00\x00\x00' -tRp3217 -g11 -(g12 -S'C\x00\x00\x00\x00\x00\x00\x00' -tRp3218 -g11 -(g12 -S'H\x00\x00\x00\x00\x00\x00\x00' -tRp3219 -g11 -(g12 -S'J\x00\x00\x00\x00\x00\x00\x00' -tRp3220 -tp3221 -a(g11 -(g12 -S'9\x00\x00\x00\x00\x00\x00\x00' -tRp3222 -g11 -(g12 -S'A\x00\x00\x00\x00\x00\x00\x00' -tRp3223 -g11 -(g12 -S'H\x00\x00\x00\x00\x00\x00\x00' -tRp3224 -g11 -(g12 -S'M\x00\x00\x00\x00\x00\x00\x00' -tRp3225 -tp3226 -a(g11 -(g12 -S'7\x00\x00\x00\x00\x00\x00\x00' -tRp3227 -g11 -(g12 -S'C\x00\x00\x00\x00\x00\x00\x00' -tRp3228 -g11 -(g12 -S'H\x00\x00\x00\x00\x00\x00\x00' -tRp3229 -g11 -(g12 -S'L\x00\x00\x00\x00\x00\x00\x00' -tRp3230 -tp3231 -a(g11 -(g12 -S'7\x00\x00\x00\x00\x00\x00\x00' -tRp3232 -g11 -(g12 -S'C\x00\x00\x00\x00\x00\x00\x00' -tRp3233 -g11 -(g12 -S'G\x00\x00\x00\x00\x00\x00\x00' -tRp3234 -g11 -(g12 -S'J\x00\x00\x00\x00\x00\x00\x00' -tRp3235 -tp3236 -a(g11 -(g12 -S'0\x00\x00\x00\x00\x00\x00\x00' -tRp3237 -g11 -(g12 -S'@\x00\x00\x00\x00\x00\x00\x00' -tRp3238 -g11 -(g12 -S'C\x00\x00\x00\x00\x00\x00\x00' -tRp3239 -g11 -(g12 -S'H\x00\x00\x00\x00\x00\x00\x00' -tRp3240 -tp3241 -a(g11 -(g12 -S'0\x00\x00\x00\x00\x00\x00\x00' -tRp3242 -g11 -(g12 -S'@\x00\x00\x00\x00\x00\x00\x00' -tRp3243 -g11 -(g12 -S'C\x00\x00\x00\x00\x00\x00\x00' -tRp3244 -g11 -(g12 -S'H\x00\x00\x00\x00\x00\x00\x00' -tRp3245 -tp3246 -aas. \ No newline at end of file diff --git a/research/fivo/fivo/test_data/tiny_speech_dataset.tfrecord b/research/fivo/fivo/test_data/tiny_speech_dataset.tfrecord deleted file mode 100644 index 93fe8791b631da35b9d03d37e6494cc7c50cb55d..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 144 zcmd;JfPnvPD*O;ugFT

    >N=?-W4F$Yyt2dG`bWoXojv%< z?rCUjAY<^2ad;kYztKeEFuKh=A zrKbeP3fsYoX?=^F=N|xe?~OkThN#n>)VJtglZuYlY_t{bK5#;AVtRw6?l9*joR08P^BMLSYwi_r>uATRDpmIO$4tahWJpW zN8F5KjU>@&AUKaCk%p=XEn#UJrvCthBJC%Mm$!^nTkf>Zpn~lcNL&+>xaR<5p2Pmy zS8)RF22j*)CZi$1`=fTgm+B^>RKM0eO*bmAQdCDYthvd`vXVLHkaR_({{TA`lyJ@? z#Ak)&PW>0ua#N%>FAsY9tVqW)v<j%_?XRjrbh5@ zsw?4wk-8G&1~9}|T!dkcIV3~4{+zawYtxx7Rx`6|L}ea#_(=B~A6;}kT}>sj&v>?D ziDscU4Tp}w*%agR&%T(@+O3ULm9W(*I5mUNAIeW#{r-}&I{TF^M3X5l(o|E=At&7A zE_2*-&u@KF>JDImqClxs_~QQn{@;D-oR|16@<|mv2T;_`2{Ru$sE1#jp$Yz{jQ+aD z(`H+ZI-SUCnvSq(o<%qGH&65&u4L*eI_fxM44y+8nsrit#XDdF++Yl!o^&;904AQN zdh9JXz0+B~#S^Qn@Wn0*6_3MPU2AU1nnPPwSK};w{qc=WQk_SCa|x}eS|Xi0@+tb$ z)bx~b{T)Ap7Mq-h8Iihba92F-+~`>Gj!psi_tf<;^KnwaTSF0DqFYW$-VocZ%uQ7r zKSS89m6GrhYIdgG!Oqx>z&B(3yknhXWCGmejsjB)>p13fa`W5ffz{kPK8~_DuDvH| zin$b)iK;4;KO>Sef(O`*NNW?I_F4^J?C>~PsudT$k*BCypZFW5yp;nCkk-l@BoTw> zZSH%K-2I5qRAoXUAsBm!n2#t;cFz^;HlZArs~<{O$Qyjv=$#!t2dEtTd!1>*RQ}Ej zTiPvPzrtYu0LB`M#$Yc|81OS9v8N0@@W6M+Z85lNn_5vjwS&ZmA$KmR>5GJs)l=VI zJByVL)gkIKymsUcIQGt;LkjR@i)~ZY#x%DxoJ1w#ai+{Dy+_mGS%BIt@v4E2sR#;@3a&AF^+i8=cKy~0hm6p zol6w_p{>_=+vKWMMb{M(4Zp#HL%A8Pal^QL;HMmZzt>#m&=L*z-7cdsI!PYNteq|5 z6lvzi1!mVfOr=k!C%yvlqNXt5Os0JnAo$wpS77k>#JQq15XW&4u-vjn#g)DQ*fI`1 zk0(z0$}Mk)T|E(w&aae^N%KYMJQ?v)^GGgycK`)Mx5VVeQL zFm_dUi6_YQb*WlEW_pR%mZzz@g7)$GD$a=uz)O3s921XhoE{GYQPk~rG@O7+wvS^? z-c8fpFM}Q(+o6f_`2QtY##z*^d_&NUoqz>AF-KAC97~7(~os~!pE;`<>j*#iP z7ybuA8!ZZ;{{Wh+L<7G7=er#HwuIDq{>E0;qQ?IK<`Ii!^PxI+xJCP$q5iE@2~46m z*z=qav9sXkxAfN@@U|p{&BfDbi4)hVkEr}dzFdFh>=c&SX<0uFEgelkCm9H&k(1xD zjB%i&f{~r_y1q7{{{Zoea32T_k`EJWk~-?ECE5yXL`N=0%Ri#0& z0aDOzCD$U`v_wUM`$b(XRo=J5TO6$0;%TAt{I&pz5)DzY-=C;<+$YTWhiogt;SQp+6m zb!}4PEW_yFk9%{@?VE#rv;8y`D!MEcjUZT=@=4uG)v!b42<+y%hJC8iwR?e*J3Mi6 z6asl}3C=aN={e%&6xh1kS|^c9w0C=@4Z-NGog>p!&e(Ax#!mOgXPQC?_|GToHE5+H zGmeTT)_Wahi||+9ue!pPDxp{Us}%cUkonhHH|^Jd%L1y(~5Avd+Ab%Bubz`#wKSQsz>o@Yww+T>90SpKo6& zI>XZ)cE&d}LfuM0%T>)8KzTSIxFinbcjs2tbr%mA6Tx|{!PSh!LftxQ*LJmV=%(Hv6=CIp@c!77}ef% z8OPg^oayci$>DBL4?mSFXf(PW;^f4gEky*;M{Zk^?vpDyQz%2vcbK?dLEz*3w6k9@ zhc8G`U~B-L1)k)Q#_~@ol;F6_G5px5+CR3V5F`b4Dn+A+QoXLKA2Dk#4HUah)>S)L z<3EXk10R3yp|L{d!MRKH{YPN5vRf{b$6s9JAe7wWUVaudJBRn@L|9Mt7fAZ5bj&{f zeFXPyx^&eH)y;FGmfs9$zEqP{H}T~E01ATL{qu}{^rurX#H*W#Zru@EUr|)lMTAU=hYpcFR%pgeY6j3Sf+hI-w0LXuDWW^M^!v8Zn)KXp0OOPq>N`Pw&j4CxvQ;(yFv`OnE)MPJn#!Pk!2X!#M0vp44$N!J-E*iT!6g zJFPuXLKh7raYE_u%4M_19A{1#VWIg`f5=ro*$KGyA6Y{jv%UKH1k(aj>FE<{4nOM2 zBxlsP=ULq|H-uZQ5O>cWBI9(WAJ>XQRCwt^>m2H6SfH|c*lHv4IA4AAu5Zl zFDvqjPN(QeDrGQqJ<=*LGNCRgU609Q5de4X&c47q76*ulUIXhelHk$qi$~%gNKwKb#!ykv;JsRxiC=T3v z9zRUz7|OxkGaiY@wH!2P`B&(Cax^XHuZLYG(*^R?)d~=w865t+_8RW$yQb#;f8t#( z)~#?r;cW_fw|>)C^s9_V(RR3?0AeqykQEpJfWZF%L8*of*(Ptn6FYLN{{X0fe#yB0 z($o{-XXv`xa0$|Rmo8z40b$#4}Sdf+e_}8 z;~9#P{iy>faJI?Y%_G%#-_x_i&jqUVpW#V>ja|L+z+)fpI&B;q!6Gi9cW<|AN?NJ$ z`lbqsnC;y+PO&K3lFK2(9CMUkZ)^fin(PfiOnj-|+fFW#xlxn$g}o%gxOJ_$8M#Ppi4mmM+U;q3~?Q&+i5_g6Wb&lef5K~ zs+$4svlH9Tkc6*Zb#>Y%xLvN5D@xfC3w=~j%A1eA7d?hM9CN2PvvQaaPnw zFH6-uV?A_`Qguy8rYrWxjvJOWcw4we_d1m02&)$Ax&En`3-BMoRTMcEoQIk z6_RJW+CSo2W>TeI3XkFEKBd76yFTm>+gnq!hFEEX*20aZw1DzYk}GSi@eY<|T3Gsq zq5dY0EuDgvcstVi_zQHCV@_tkAIYl}(I zMEXKtzWmO=;^Deyr*ejVu%(cLowTq?AwL-_?JKu_?) zBxf1^XJSupllo~Uxh>A&?w;^NvVj2%sEuhOQ&nt=M%|ptb`H(XIS4^H`}=E!O(Hkw zQeJY{fMaeSiVy;AhIpiu{-z*LzdY)5h7UjXYI&+(+71R=0D1lu zO0O1f&jUo)YWK^H%@x5HjB*Ad0RDP&aBL$f5Iujyrgnuv;c>q|((0S7?-c?@qPkrW zhYYdLaEFugk{2WIr5qg~Vj%gT6`dwY2byZ9ukk|d6Fk=&jFgI3V56v_ecyBZU~%^E z^Q3ol8X*10*;vO_BQbQ3C2q36#p)Q55Os>keQ3{51Aq@=cFJ{)rH63`G)ENU-+>^X zQTbKkyLIeQB}MnBx~(P2^e0cV;Co_3ayjq#b+t|&!kC^ifr7XpxKT*xpf;JKupVa7%Tj=+AO z(CO2IXe~PVsh@|&Ww8UzW&KUnzCT(TLvy3++iJGgCIs4>Ng#%3l=_K1i3hR9m?`01 z{d`lhN{+K}&A!99%FQ6_?!Ti+0$lp0h%f}J$uDksA(cQpo^z{yl96mU3w7qAsxe1= z`J*%*BKVniMv5!l@@_D8$xw60%7O=P)N4MHgHMJpy%QUWG7Q_D(d%``#M`6~6pPf= zhlv%UqpfB<{I6l(xW>5QXcA6|v}odT!>7z&k}EYQjO`hc8$XCrqo^sq&rnwbBZBCD zH8pBfHE9#i@K>PBHYBtgf$8@~Eo=Kk)UqrcJJ!uQ9$9JSU**oy4hP(y^3-^i(E`o@ z^F-V>FHOE*l-nh{_JyiMSo&uB1hOt(YKqea{@gM-Do%bz4+lC$M$-fP2^#ZN)1!zW z4ZPtHPM-asC5o1w>1?w+2-#mUYH3#=^)@m3ep*+2!*ETNLk(L;JKw7=Q=&XodX=h@ zm&0l&PyYyJu_(kb@KEp*kQc?AFl8KW$q@$ydFwY!hDbFNe@%Pf*ihWNO zQ|J0x+7yM0g!1@I$GZZR4-Xa3@@YU$Ghr4fuaZ%!3 zgolYJ?aw@5mJ(JVc(LCjemM8zjZWM#8lS(q-}#C_WHCH`>3O_Rpqe?NrMINfX9Z)r zQUT5r*dEHKGBk5fkk?#qt(+}*!leG~gU0hl}CQ$rl*AL}QQ(`q_; zlc=cUg7<8qsfZkbB|9@FemTniLr`bSajNvqCb)mu7ub~B!tS59)-7!vI>5`4rZ|CQ z$sdH3B!8Bk%xkYYDc#wWzq4%{lvipLwC(CeuT)3GYKV7pED zr0D+uex!|2Q*yV~4h&Jnd#F|i9A$w)kH~75_*!kR{{YIMA<`U3Cz@HT>rSw;u#Tmy zYZCg3y?Cf#**GVBjqRT5JN@&bG??RS5y2v+GNnBn6+1VL(9+dZj-;xkxI(Ll*1ZKP-LMgYj| zjDkKjIHqLgHdcT3CP#wH%8sDv8{$Vj>!@wgaugbxIV5!$$=aquc8vEwVc(53(sM4* ze_JoT5XMoe$$OK%o1pqqu8upbeO+pTg`JfrqqisH1Ch&cagO7*l~Wi;gDKyyTUFF> zRH`>HbdCW&Md9@fl8TzXsW(bsq@-0gxH1GeqrKwD;S( zRaHS*S0!L}CM1fF0yWR`l5$VqAZoU;rLY2&z0GlHGr28AYpS|dOwwO3zH%=2`WXa4 zm%iMlNyj`9#+Y8#wo*EI^MJej4Dk@=WYiCzn;EjUp`hM(xn|$PF9DITCohURk z%mFCg*9Z7PASdc8TujF4bh6wnw{<{PrnaLIfsf~nWGle-2ZP&FRI39hGCJC+Z9B`` z4nnRp7B8rJ+ob(7B~EhhPLhO?alykm1dI-Tb*v7L4UoQ@OQxX^s87`06K+*suc9EX zHmS%%@!uok8p3Kgz|||iqB*PPwkw4+M&8tOC{P4ohbl9X+qRS#;AP3c2Q-$Y8^s*6 zD2}R*rf|nQW7;{#Y~*Rp5paOho5j7l;DsfcfhmLn8CIQ%ZzOze7RB}ROc-aEPFo{1)=sBwv#qlF@PVAIzE&9h z7w@1fYfiB}T`T@Ru43UNpM|aT{{V*_OMI4*62;W13yjGtQ5d$5k>+jx0N+tOv^4mw z%~OZ$`lcFNuJy9i_%md(Dos;(pts1XuFZCxhT;chSjwNj8OD?U045vZ09$t*RrPx& zm~6-gt%!r)I(XY^8mF(U5gKf4TTLV$Yv6DOjXtSXzAJ(fB18xEHntbG_s^n?)&;wu+o`?uKxh+Y`g2)(qW(uwYX||@I#8i{J(5$?kIXvmq`!V8kJJ#tn`$IEwxub@^l#xGM-0CBdY3vpV zDG(AOG|W|a0QWqf%<77GD`t6>7w_FvvZl}H0 ztJ7Csx|zU?J0w-2l|UZk90S~N26VU4q9irN zU75h_Gx-f{pXxOoXqPf!cxo%7Wj<=gxZF?VJMahFOYUG@0dR;0>daL&GBT{!F;2eh z6gD{c{{T$u8GDg*)=4*+MFl-^K_xZ%hE~Y*oCA-x;rZwCI#I-ccO8k&rT{YSdeJso zEInUTvdaZ^RYYkSWNIi(tmKT3Rsc|c#hUs zPu+GZn$i?Wyp~Lc86S);cs|{}`k2+Dm-|6gH?b?!Fx|Hwi|~_?M6hZ0W0SiB{ta+U^}ylgd$5 z?H7}B(J{qxy=_NYt06uW$P!YGBu#UwQbLk>3KX1wZ-Lw&M=rDI2hGz4IvWoooGaAc zDfFeftLYZy)7I&ud=i$iNThoYHfL|^kJnlB?*lWZ*%9qJO%UftkX5gqBHyK19?RDq z1z|3`R#~r<5*9h_@AKPODa*4yNZ<-qC$uv|gX6ACHMn0qONxc}`1jDW%yPif zT;wbO8Nn+aGtbAys-;GYos@zH5Tkx2t(mL{^XgKC&ZETZoD}a};&q<Z#_9E>+0W&5tNaQRbQP?!K}Qf1MRd zK)3pU^V{XUq zB=ZTIt2#p5JxoWO(-{5#0F)8NK6vLu9@GB-l5M#}%sm&nTJgte^;c%_`pHu?(+?85 zUQBLdvJw@2&d^_h&u#{l--x|3x!5WWHmP)a*Wv9HK21e8SJ}-uS=SnJ0$D9Fd;ayw_8Qp#;}V2yp00nVm# zQ;6bG8XJuTT>k)DHJ`(1WpFoTDxHA$Bq+f;>aBAIR+H?XPQCLMv>r(1HEp_%KrQs% z98o!8my*>omE+xtk)PK{sOLlyOmbA3gHZ#HRNEI`Rl`r@%~wul>^_=?c@dY4kN^OF zTAbnYKobe<#yNrs)6pu;mnE(A@j+AO84W{QCM6Ct$eLq;jC*PAXe|@7h#dUVY7k_z z))evE?6PiKwZ^E{l2C<;D8nCe0_5$-xxv!G%1%LV!f6cu0NIkL*MH#j8(5_gt^jY% zVoqNk%pBwN$K|bVBD0~QBa)h>Yzss_3Fq>&{-KYe?kO8u;);e$083O;fIdAv&*#po zrfW2e=F*|Y3NQQQCTvrR6(k^4M)&FdLJCGJQaNcgBR%bHsxdLfkbFGJ%+SD|2+8PEv_y zsOj2wg=7o4DcCX-oB#nO%76~+0nc%vr&NLfcjT%af~O)IJ9Jt-pP+mmzkM`tT&_m8 zMPDkN>Xr#lAIlaFPXM|=MZee+sqq-9%`p~UmLIdQue2Fk#hS%t=^K2O>m@Q%QOMqN zqp_H~ejKr4Px9-|uZnZX4kFu^(BbgZ=?H0rLbmfuPfsKj4`_)qgCjz!xW~yk{`_iV zB+9HjhQZi_Q(i2DJ5pBk^suNXp_)lInoJ23|8)! zrnkiB$`D*I8FSpK`*$ZK=-W-L{>D!wFMUwWfEyL4W2k}RYn@G=oZvAMGx6Nuk&ZvN zKWziv6Dg84jwzGKr)oLkn%iz=k$3sj=|1fH6p(g3vyaOf0Ox8IO|CNRgq1x7>Wz$+ z`_EOFNmlY{RedY>jo~}{dmQqAJu$_u5al0oPxO*h>s!=cNgUH(@9+jfJyv-b!VuWbs;HP5E*bz6fGSpzWJj@|bu{<=Kl{{Sh>!jdO9t=ACP`f}SQ zdYk?dfz|&2hAq@HM&}^%;aPz=@9pobBs%Pr<3WvZFTWs&kDklwih>_*4O9p_agYfkgP&u@fuI{osnwZIzt^GQU$}Px|t(*>pHTEhA4U86E!GopTyaX*FTXN8lbY9q~GzfJk<5U z8o%@R_eho3+pXdUj<>|VlAxzwPneg9lXw2DiiDH)(69jS5Hq?Ln(D2nGP>dDc%sRZBppuaSK*_5qN(UEQ>JiaVB1GO z;tB>%ik1`)?eRCh>x?g(fEC%X#t*pp;hEhM9dvh=m7c@#GV)U!&g;31viGB zYxObuUN^Q#ra%2f!hm_teN1~ni~tfnLVNozsJ2xso(g#GC-Q&r!>a_lcMMa;*ct9X zkl=IggQ=O?&2zj@01hZW;j8AHw&oDD@ZYS`ZziUWzMfQXExh26ZET!@mIs6Ht41if z#O*-*C}W}HUMo)N{{R8+mq(5|j-crKd$7rBi?S0K{{Yh-Pr2Y~H{)`g#F^zkr9&_c zaR4|3#}9%OcEF##I{x1j-e%A#t@BBM21^EQpnx&`d*}ygf+RGZRbNF+MtAABBeou# z@cPF*RX0=BcfdIjm|><+M4Y!uDZKt-*Lb>JbaEd=hVPfb;`b%YciPdwj$c4PNDJIYtus=J$$xmmtBq^xl~gm zdH(?YY%|Zu{WX;PHNcYXI`~0fw%+k^^S5fv?tUP3Ro+M`DDT(Gh?MPRj_Fl8IqnGD zMh9&2bDwQ`RPZ#&upWvN+I0!a0k;x{K;36eFp^0x)C}1>+IbW%ee;~2#Oqyvq+%ef zadvF^U~@%Qj`LX*=DzTieqr>Qir8GQ<>xTKe)uumTQco zBiIbY2OR$ZzICp|Q)mUiC$qEPP90`PBZ56M~o02NTi zcZo)0{g^3SNY?JB@b;E1yPI`1vN2=q0^pI(x!tbAFw^jzQ?i4g3W7N*biM@GscNLB ztn2=X6U4<2m1L-885scfApBmWx6?E3CGAYg$&1`7F9f6WGVt+CYs?Sp_oB5Rp$5S%b5;%xQ ztd(5{OXfpGV(AN&L{1n_SwjR*F!l;kFbEj#Mn-+~fLul7a*~@%A4P<=eB*B9$5-Mn zP13B60|E;a#@F&_3g-~}i%^O4&fw6ND53AZ(2?RsqxAV%)CDiCNGE~u zhVq~YK_0-U?cZJ8R5v;(kMasz5ti{*f=H?AYA%&=#M~~{n6Yj$52pk32j}mtW)cpC zRNmO9moPR$YWl;cs~#3I(cb4z;zk;=gRdCmGMr=MI;EItxDb7i{dFhWXP(8L-ui~i zQRc}{Wa-)fbGfG7D?c8`?xLPD{<5$# z?p}Q*mAD800DV2w>WdR2it}mKYacOgU$uGq*QSkAO#LxzV#n~9q;ZedPX5`_#VVvu zXqDA5w1JX62kj7kzPHa=1eErxY)csQMzAzX@q_CjhZyIN>!s78@JVxubuQGV;v1)s z3jJS*)vXkC%WtK(F5Tb3rh(PLTsH{Aq3NNVsT@ z3GMzm$%c{@5#T3PMGTbib@hDk4g_*gNx=UAq?SKC=-b+Kgm_?cUH<^rUO%JHm_cOl zDrA2ux2h|aQQR09dcJT;1K+lrSG4IDW@~(kjs1xR3(xO=*>e5}S4SL>()C?xz}z~# zM{a)p+7a3fahC+f_5cDy%&K(%0JHp2$gmP&b?e-2q( zxsAQZR+9(+0DVvY03i1eb_Cjeg@qSkMJ4d#rWT5lno9b4+MPVL4KBb_xyjr|a(%t{ z#+O&LUgsBR7Q3^lWQo&~^=j!FT4~ZsL2Rj}Qd%+l)>#UVh1e7?zq#+C`kT#xBQ1o?gyUsFz-WV#2s^1G<56GF8$ zonvZKfI^9Z$T=V5UvZ3`W2m*J09yQIAO8ScAr0336-m7+x}wWc;|gMM4031gyNJen z9brLX*f zt#HZ#@+$ORIdr{p%(lxl;wcqboez?+h51z&KOAG{jdSodL{5prw%SZJg-+ftUD4I8 z){7%jZ3lXJlGH|h_bF1i&lm$na0SK*^VqOFjq;|?CEi)7>Viq-g@QSf zqC#75{#Tb8pcp6lv+^{zVeSCb^0zWo6tPcxOP}cwlq#>?VOT^d({~!WDOH1nLjyAg z7-u`1E$@-@+gaR95Jd6Vr0=V~G7A)T!*1#*Wi?ej8D@?~QROXNXjY8#oug@9o>Rp8Q1!5x0_EJQZe3 zn&jh0X7Lp!j%aRFwg+Mmx>=w*K2GMGu;V?u{{Woss>y|66^o?0IRLEQ?R`rw(^cX+ zQ_basGa4|PBp`smE!skrE9u|_QFxnslCmoMm5wN>BV{6zNJ=Xwz>M+p&j+@?fw&^j2=(K^e1OE&2QUy9l^tJe zzul_E>dx(MOszMSI|-sj$E30Ju=w=kf0zsvK*<{Dn(E}JavS|L_Tie=-Mm%; z*^xKkt-(=E0Sba?81mR$`}h4dL8bw?K~kferv%V%5L2qGc7&;rsTq_z0mpC#2ldX3 z(*uCopY=P%lJxf$bqe;_cO2o_TXP>`IT-ijMBxCzxKp-BxB`9n6+#$pGDub!iZKNG zQ;;+B#=2TsNlFyt!2K@iW~iffk)e)fZ%FfE-Z7tS@ttIXa?cfw513(?*MexE>Rzp; zNe!^IBsc}Uh*3wzNdwzB=dsdXMUL>e%qB6fm|>IVsmXKdyPo4MWzx9|9Iu@0KKKLE zrixVTNU|rj#Ebl>qUrvrs@?wp=&Hz*-Q`A&26NoL28_58ExiM4G2 z7}p-qpdqe*6+^osp}ct%a@+el__1@{y2bmpJQKdMrYHw)%7C*U&mHlpUD^~0mlf2z zBD38tXdZur#TV?w;s(fsv^MzaJBRwb4xl9(xq@yFQ+8`y6F$R<94gXN!E4@<|D(ZNQ(ZxH%{EO^flb zIoRw_yM7m;PKFE!D{Q8#@Uy4vm2|xy(|rwStYrk$uaxt%0vHoB25e*x{k7-1z9OND zjxoDI?7hrBBM(ek0#`j=hNt(f?$zPF%IF9ptZg!%%(p(@IMsCU<0)WyXkKkxdyI($ za!B1p*439vy2{1zA?&hB>$)0ylB{Yo_(CBExX1*bZCy(Y(9Ev!DDicB&lYGtf8t)E z>Dg{|(waWHo$A<<`IrftC*+)*_tux;*wLAzlzgda_IY^&Ih}5)-MlWmQ15Dsh;ht} ze8t?{a!)?l{Pa}uIC+30$pwDH0&_TBCNE0Y)K1A0-Ycb51Nd-;91oL${q0@FKw$SGt`*-Kt8Uc)Dj{7Dz2;8T|?xd&at32+o z*IyPX7_QQ-zYCs7Au)mPkJCcjdmEtBg3*MaT@Gp-l(;TQ2lTxwV8wV{S?#E++`DlhR zu_Gvm=BRcagM2`jM;~-&r}|678)9H7b|DwM2(?=AT~+Kp10ySWaKl20GV zO7eB2)U(1X=&Fk-<8KA@`(W>`=03|q%n(J!V)_Qt-WiqmZ{{S@wRBy*-JJh$w z8W6@40O`6T(Odrj8&HXmI3M*cTq%0)&P;Wa&_b?4`J#rZR3sedca%W<^QE$#7aYP* zm6Ww`HJEJcb+J;QqOPl-{{YgRQDuS7;;Pe0Od~$zpD+Iaoq9r@CI|qP%)zwX=0QwH zrlzKaR^!&z2c#agL2{^71P@DO#e#AF0M5MHm8j++jK3vouq^<%xDmt~pvhyJLnF=B zT^!Jpff`o1!*PP4)kit__wTB+GIL}%;FntuN+&(J6dtTRV`*>94ef>HekJUpYxZAfLWhj`}R@aj-~s70i%Nm{L_4UEqeI zOC>WR?JOAOx$be?>8vdRP12o5!ZQ$!Qd;_cK^w19g_x)<8d6F?IOhZ@9^Y>I7S|va zmf1dz9-Ig!{1jAH%WcB43S_>~svhqkYyI*^g9{PML9Rty87#y)|X7 zihAqFlAt3f8*rhA^JBjozBL_eQM+k@qV@3jdQ{zaN_o3gSf{3jo?G>t(zhE?!FBiG z2%P7G?lmubb6Km?t6G+X{H0p!Plpv0z*Y5i=g5n2@~NPCHZeIk0wn`HchOO<^7+W4 z6tM9QCO9N}mAAq=DPUy2_2iJ8w&QI`!24y0Eyu@gSS!{)_!r`##ZbaF9VX}vWStpK zvb7!3=QJCMMVi(FNA@Sp=Yia3_0q$gkOI{$M$n%O-(&<&r+S~m${Ahm_IWN*@AKbC4xgrA&}NBaEh&;J02shS~r zpYzPP0y(PHo;XoeKpN3)skv940!dXAO6TV&OJnpJ(bmo1FU>e@lxus z+i2x}oV1vIbkub$3zLn-qz8A{E=HI;HJ959&&T7jZ)k3S?)$7pqPwlkvBqwf2Zc(n zmnCzM0RI4|aUXxr`}$)CN@2ne1@rZ()(6CI%`)3MudgAW%8RIK;DgeGb>*XE1HRml z0l*_99U<5jO~v>ERXBIoX>&o>in4mU#pq`wb^R?BD08q&c8#`=zDWd*Go5oV^a#C* zD!ANJ&R+7|%8H|^{6e^yKY4X$MfDV3mUwFFq1)}iW!lTw90mhA7vS2`AV)M0#o`?4 zbwEUI2<4*VMM||IrorK1sg+pyv0UVl;y61`mgUgk^O28@XVTPngc3ZJSW4g-ds{w# z;u~-2x$5S6x{2)-6yf)*%p}EdU9>eETzYi~vEw<{GvY@+OVDC<+!kVj}lULN8MOSfbrVvsX z#APBo!0p>g`j*pog}3Ifr-APv)O7XD6o#lE*b5mv za1XK2{ZRlC0rXUV!O|o-!>&pC!_ZwVO$04X(iYl^fgu5^KBEOkIT?+4#twdS@u_O! z>I0Rg1Z;(6JOdkG%{Gvo{{Z2ZSp1uH6`qWGVHR!_2ZmXew>nPBfL?I-y{vk`zjus!s? z@vYThW6rjPYMP#!+)7;PuLiB(0iuJ@Kj{gy@yB!98Yi6KWge;S07hV(KBk(XqN7?% zuhWx)=}ytId*rq-G5s|p5^rTw+|mFc3#p{Cz_QI1->7O?RF#b>0UwwmHQj@Q-<@vD z54!4EQ06|Ji3aMyjmD8)Zl^u0qDnqH2IZcYT${*(FnjCuQyog~n3+KA#>;!(Le z_oOHzCOf64%wd;U>SIs`{-qfH`O?N6@3B>kLj!MMznv(0?Z_1LveZh4)9768*WXob z%#OcH;5)+6E|~nLdWbJHR1~*(;g#a3B~@NA9Gij1DBi>8OJM+p6xYzGZZ>cGCOdyZ z_?vmR)7>e0E2)~@F)I}EQte6GJgjAi-;erpkG6^UmN3Ej`wzM{;OjZL!!<>!JU#J3 z&rT(+w$|F|h6q>T$?f<96aN6llIrYn@R{@9)FK!vtpt&9xwUjBTHQ&RsuG=d0qa@I zF&_8{jDEUxO4P^)dRW3$sdtSzrm8Oox`Mi8j%(zQwmnR=Q#!Bz0ExDbbMK?f-CU?| zW)ZK#bD{7CXQm?IenS4iVT?X8pT)fY0IsKgCLBoH)F+P0hY(7NF99l^P>XP&mOS98 zslmtShn;Kc(rg!KQXSbMV`2A3==?03re-ye(b}#60thQSrx?Nb9^_;J-&iqibcW2L ze`jPMgJHpFJv|LYz3E;~h3UXh;UtpW8I)%o-a=1&XWNY?y-@iXy*0Na=2NQCF>Ub+ zO>2tWsbxm7T~Om_`dV2ErU?H42paPm=yjbkuWd=y53H}OqLn0~>5iop1f9XG@8!SU zhyv;m`EBTIw$jmsAj#lKE()MS9;8EGUE# zvKeFjJ&8X&X>O&XTEi*ky$}KKFTqi)>aLPeG`AlX7AQ7=5|U>RlkJj(ALx5%nhEfR zPpTT2>^X^l$`;-tcv}rQPxzKgs{nH?bzJhX{G(75{-Z`|)*scmLEIB zFnjVeRIwPChZL+;Y&f+dJjYtBvg6`KI=NVEUL8;>dP7st#7_s?a<)h2J@s`gJIK4t zuLiGZ)MC~75Zje?e~W>LOLrJK&1?p&b*u;34lG`9A0qbp1fO6r}c_Qqi|q5;&tB~#wv zrBt`{q%oBQLiH6g%nmRKV5&d1si%O-<0k5CM`^aBECNn6*UN=HQ`5<2r9^dcigGu7 zhnPlxzN>I(cZh1d;XDgeYIA1&GJ7!3sNeIulZrF$7s_Fx8$xYlBlQ9lxq=4Fq4jI&P|& zL6UDh7DAXkiN@@4?TsV;mtDS4s;5G`NXYT>h*I29tLh<3oouv0hS+K42&l(BgUPpU ze&5$z3y1;H(NuShF4yh)R$B25wyLAby7dA8Hppq{qLlN_JjFwi+x62OOghdAimeTG zi@c-oyMBPby?Jv{XiN$Y7VBpb@$@>Mc%!S>SKS&SJ?mwhLAa}Z?jT~zpZ)+!Xb zS6pGY(}w~EhDjw>=bRx>G5PVOw;m(`a+L1PZ8n?7B_%~wwJ3b*j;OfF3Zp!MBBza^ z1F*}#IpgqxdC)f*c!XlDGTgIoAga@K&r4HMq%}3yE0lzu%}T>=J=J3k&ppq#^y;pv zbZ^Q{KGLMokeTa}&hC9p)AbWO)AWsyN1TRuP?qDiKpcJkHDd*6)RhJ!8q!H?ME?H( z3#v=MPrs22Js)M7xRQ5y;Wm$8s8ayocieNGDb*`PgC72+aK#Y>G)epWCOdafQPw5m zt)RMDnwFhj7^$u-gy+)UjfX#rJF$R0^)tm%$TlX$Ihc_0Sn(XBXqTbtJ0lu}sjBL4 zRA49n0Hmah7wkzY7@pbH6&+LyHh$bfoZ@Mb4ngL*+v)^#`)>h50_^t@C%?HU59m)E z{OPhTV0uUC4JyH{<*XZgDB*_Mj%2$~bp=H}MjD=~DKXE!RmOFKtHWiWLLpSlYi)Rk|Ds`cj;BD>F*AzT})mg z+!7)NexlzbioA39PIND<<7ZPv-%vL$Aaa%J9}g|{hFR?0Z_!n(tJ^XLBpiRf zbab&TZOcd#x=|VotE0!9kJ385E7kB@)_O~CMpaP(vFe3<4!m*y0GJtAd}AkAF*K%r zWOe3>jvAnDW|p2mlrp=gFLIc}%i;Pm$CXN3yh>&FIV7+rw{wqeYcUk_WVuN#s2h}) z`l|F^9oAgLhI_wHT2)9;C9S7&&D`x&e3AbEZ9TZgHL^jvHep!d4saaERwwZ1sVSkG zed5zyNgEYMhOs5%{{TZU{{ZuO(;JGVAcfBC@N{@SYHM#qM^7SDTrLJ`V*JFEG0q>1 zkh^4Ddkzjpb)Q(wwin)lmH;+f9q@ip$hr%u9i*zVKT+DLA0UAeN9gwh5@ZfI&XZET zA!h(pM+KT(!xlj+d^_tY>RW&Bip9o6vQtR=PJPOj1M|)_)?+DUAjI=i{{Y22xVB^} zPvM7BLm$-1HNG_%+}X)r&ylTmCYzi9M&8D#1cw2^MEx6osfs8kxTRc3%&|t%7uqw( z0374;)4SjdjaLrOsX22ft2`;{WluQ9i3S4zM2u8?mD!LxchtQYm*hQT*F6yBv4MIMoKb2Z&f8Z)8>^hTTv9003PnC=*o^Vp z+?^x6mu7G4@U@_SQ!*1Ry8YAiD1vpOxiXMHij#-?ll)~24nI!%PHM;cn3VceSO)(9 zFWP6N>-!vW!y9z(O)@zHbfrm*_rTpNaolr(&U6f+P!ld7s~L@G5d*JO(MNgdTm5*Y zlG9@-V%@eHx>s;OB>H0@{HHS%$GRO6%@U)c@c7FGRa109jr|I< zhC8GjA5yk6!0(Va(S2LXcAqT~<)UEClYL#@*F#K+7fJM`%?L$_R%CG_4C89NU}vAh zpKgA2EYu``<9(f7SYFn)8wDoQc%{*n=_(u_|4lrjDxk&KQzj`}xK zeo!?EGukX^%MeEmeG#h99IUq3V>Q&*_-7~?JlN&(z5!J`duKQ&Qydv47S!!}L;nCK zP|3V?=!Mw#3(T_u-+)Ip=@@n!m53hxo<>J)6R^dm!C~#HQwH&~RA!s@h44CINM{#% zdWhJP-d^Fm1B@O((oG7IK!ue50FYsWa*=gXzxIIeuAwE4lDhd#RkhtP0ajdcP^8PW z`{ee(Z$^lbm#S@7X(Y=D67^FMhbKJl3~({qIrh;8k+KgExOTk)WC~bnc8dIjvLiScbY>7*|-@T9zo;d+g(bFsyu)qI3|wQqy@yNDLiuU;8TjKexK@E zdxq~yD;YQQ-+Mph>@%i)Gv+oFH2ZH3L5AH+bn%A4e5f}I#2tMN7z(1DDZ)qbk+6Et zKK-$!GXPi&30YILX&(`3vL9sed&7E3C4wtASS2)s?`i4jm@qxT0RcY!&Un$g2T{V} z7E|p?Ai-(BTayd;q0x{aHXe+xy_tXW7E5Ld41A&y-?%^5No@+EMChph0G6g2O*RsD zLT~8qqBrMtrtscdDCS8digkrl5(gln^Y6Fq_R}jhG_?D#jsF0dBx^hf`{<9uiD| z-~^EyilUeD2e!?KF{lVNf9(r@sch3y*r=Ei%=Ff{(|Ezo;>4VD+l>V-&SSh%=;PJY z=lK;!Nomt;LZvT^mA4tpmA6aua4|cgrZJ*N$MUap+uY~Ao^VbCG>M))LXNL(JdraS zM8W3>1ifM^n7miY(8Wk9^GF6gk0k#9O#c8)Q&$8-q=dI)T4c((M|IQ)MyZo;bfZm`9b+2oUQ5x}i-gii zPj~6sp6sF-zsYgNblO;&U|!qO&I!(TUhp5EMQV!1_OeCtIA^8=g6%^rflvORRDbiS ztJW`*Os=Pfa}CUnN!s5JNJOHlvfD>fNrHC$44acZiOC?J>)T4G>bgvEUWF=bv!X`Z zD)Zj@E;&L{S?_e}I8<763(3ZL9B4kCtD;5H-&UC(79PBn^K$7b!SwN1`tGnQdZMI> zN_&1Si=Wpz>Wr!dpmX;|=LZN3DPrMYVJOCvr7Ni*SKFEHOr$r%DLfzPrvX!IgsEP` zCAlCClYh#erzhy=t0w#9+aUHHWb+n2z?5JHy0uq<4ZpHY157`}AZfqlX(?Udg*ykL zhS^mkF5%|UJ2N-ko*(bVo|spy*xY~7;}sses8 z2|IrwpU+G*t4vcV4#x(PLq)okczCf-Nw8Y_4hl#E6P@gKAF%`ve_^GJ6?e#Ugr$zw z)vRYerF*tXYUUD)z2NsWCvOQe~ zN>a{gW;Qv@Y6lxUv)>zB$L z{{X1&b#(M}Jy%b7t!Ce}ywLAG{dk(JluDUPZRzqQ0G zC9^GfakbtmYI(zd_t(U0r9wusfK zGd5WI4KUy%exv0HmdQ;M$l|WLkb*$-DONx|hpIOq{)6A`rkA|5MbZutx?D#u%JAFQ z_$&Va5Ku=Wo;=l=a6PaLeYo$=vRyY8OMMbVmsoRFsrqj2X7Z`}ipe`L^#1@E{{UQW zKt2Be_|shKMWZ#0s=kfWEpVPQDTa@tdagE&K9i@a<0sSgH88eE^~fheF*Kx!DE@`* zIf)RMCGhV3^u~IghUwYi9)l!T;{xu${3CH0$2{?=8;f9d>Okcxrr;hyE)DgBs(Kbl z=b33ba<#TG8>#QK(*(g887eW4{&hpKQUGxtg(cV!Ts$%V0P%g5d$&eK1H}qeR>I(A z+FI&)7A*YX*WDzv7eTBt)~eLYn>sb8LE2l|X_ z9g1NhM0%ZVuZNQeO|N;et7>)zvliKA+%C3|M|R{{Zfs_w9l-48^iCxv}P% z{{RvWh7Qd?DUKeA>p3jZ+u?6En{XzXQc#HA`AihZ`5cWVrAkE0w(?a{?7E`Ph&_3z zM}ybtUPx+EmX>n9VmV2eu=v=hP=pDCt=m^VK$6 zY>mg3^wmosAdqr~J;Sl~8OFE%MX@Xre75MMehswA&u>{$3l~PwPGhXT)mK2I0x8;Y zpV6RFJACT}Q#Wn=zbM+;46~=8wJS$bcxPa@*W0M5#BoUEwDm4(&ja})loih(13vnK zw|wXa&`-+rDC4lLVS%b!uf2F-bn@b@DwA)X54oR_&Rz&{O5)2`K_?t~nQYDJm)5H$Z8%Plpe1(iI z)6cg$;e~N&ZrIeM)a@seac;d8DxVVQX{VIh`fI1{$sye{#w3u;-o>}J82E~nBzUPoL^*@ z2yg~`{{Ts}J8rP?W}k0i=$Ea&DRmX8E>T(@%(T@&8Kahho^tr@mX!+*z~CGoai$*6 z%)rw&(j+9ewJHOhnw4?9VCs5`s+yaXA5Kp^I2hwe{{V;p0M}TX z-8lA@VrA#pE+_FOn(0jqE!(BKMn{e@5T2SjDdjlA?si!{YzN4}$vUL=X%5UEWrwth zY&oETxJSYTTX{*Rma^Gxim6f}T3K7nB}O?QfC20{&-v>{5!cM<7yCjAwTU6kC_?p( z%3~VL@VP=x)c|fkIXV2hbE+y~3Go6qsm3Md2|8}xkFQwXY7#h&qS15OPQ`i}cV-+D zmh$%;^W5hei|HyCPH33r_)cyvu>hO>_oHal7dv9o!_qZ&^7G}*4Jx2i&*2`Day~{g zoF2nSdnz=H&ahPGdze~TbLHf%(b=T7Nc9lg`t3;F6pp-P5u7m#&Ili2_tKkEqmKxU zQW=MP4VvNRjV+$dJW*0n^j-VvUYO1M!r@yc9&MzU)+m4g1K5`CIq#+eSc2OK*{!#g zeL78jolMCb(yh*qs``GqsVz3^_zxb{XrC{%s3baq4+P_%{xzbb{2=-$aa1eQBrsdA zRZfQ8;x|&rl-Bxd`_9Se2C7_;zbFaE820{}pwSGuHsYojELBGc5fQve*qv?O6?pV( zsjBGcYpPhNc#TBOGARe}lH4D-JbdX6#8GUtkUx4aMu%f#gTqJ4FyFIxiuYNbs_kjD z)3maeb&jONw1L!GjOjhS;# zvSWXaumB%nu~?z~mpVRj%UgZwI1vY|h!!-H9C~-INat94UCnIe7ffehQ4^S$Q6B?* z9kgWE^+jQk0R?2Ji`DI)NcKMX#*p6As7dha&2xJv2+&0OACyYl;ctWUm!+YVuTyU- z79T2iXZR?fpXvrP&Yb*hV+Sg!m^?Tz8$EJb9T$MUAIS?-JuHbMfZ1gV4mHH}0meT~^yt-nAk|rhUBrKx=zZVP!F~G+fTQT*C26ansX3=Gy z(QBa%Rp9NBVy36I(j{ZDSe6JX8*$GWETKrpKZ}pPkG-nn{(evv;hO+9Jd=fmZ9PB| zqL$LNFnOoT6GpK4n98yi8@>Mku^PudqjtenVZ&Z#EwX<+ld82XH6v3$O?2gzw-w(z zG>xDAPQPzq+fp3PGnrP`0BLxDS|9tVTkjd{QGcz+HimX zEs})NadgTVB^Oat(xn{UDO+pg*cW`+Bq8GqwJ_P|)Y(4-YqK(EDI55uhPc2lEAcTx zAaZDq7Z~jlYE9VdL>W;>R4Hf&O~Tz|`0f8iBqofGQtW zjGNM>bDjs!zv@&^wl$!eT&>Ol zf>ohZ0=6pUjd7e4oc=@g(N$=3blQ#2Xl1q{<*qVN2qThqMPS=Xk`VV89_RG;(H55Y zfJ|#%K_f)-Z0X1@R(R^(C@E@@NRmlvexGxD0E{`~xgXP0)Tx#E9AIo8;%y25w6VwD z3cYIS+hZictEjE=%(0hPX(IF7sPE1I*LTDbZMvhSgQP57>8f3$dmTN}C#s^VxKbC% z^zL?FvV)K5riZxdlIl%mLk*F|Zyh;z=$dMZb8G3^>wUq9B1!0<3z6>Jf%*2-Ze?N& zh3e6LHi;}D1MA+bskc}8b@$tiQ6Twg8p09Rz6&Yb2j)2+Iy^HDUkHo&^IML@Wgpwf zT_8!-Qd}jcp`ot2#ARXWWJSaMw&NqezP%Q;^A8qYKqcoikX3s6`-MGZl$94Yo}obu z32=Wezv=U=ZbC*;BTJ#G$1)lT+$VC1Ca-n#VrbC_1iYSV=k3Vv$F``9t%5lq7;yD*Gj9I?`7nDAeM;9;69NyyWjqCE66Vd0;*HZ%UEyOZ z^wYd}!ynx>1xFphC2{fb`RlVVfd@`W;|p6-KmPz!Zj$0HRRW+}I;z{|orKbv5l7qB z7##d$=@Sjh1FlPe4HJ;h0+|!8`jV04{nty@C`ly!L_`K(%V5%lCOY8TQO#2NugqG> zLGB(Y^^CEUdi$(VDGEsRVB@eSpX;M$2Pl?YaVl2hD+UnloKy|_%k*hf47$h%KGoDEA>wR&$B2hDmkpF!kO0DiI$tfZjZT1I#~Laq2}(NRT2*@e1h9aKc_khouKgn!BRUifqkTNLvDT!(a^;z zDplRCn13!xY1NctoVpNjJMbG!r`lPp4>hN;gL4ehvs%uLdo5Ku)KK*xc@EV_hB(!j z4sgVgt6PXJ1^@${*N8O4YOk#^PjsP(GaZ7ZSpqra63dXkaz?Sve1b=^T{^9M#&haF7ftbWFI8V+ zS>d9y*HfRvEj4vI!sFb66d&uT#x0Dtilc(5RtrmAy8i&1t2G`Mb=4_$rnOefO3DHn zdbnJm`1BFRd*kDsAN++9CD%mm%c=<=PnvJD_%YUVz>_uQ?Xs}TLno=D=YU2)2koTP z$I%XvGOWV(HBEUa+)dE+mR9V4U-Y1%FedoP9S)0RGh#yCWbD zFM@^{Y3L(rTHBqD@G|>VsxVo8HwQhwPtJ|?2CWcE4+{m|GE)6-QS`#XXzIUl(?hf} ztQ3zX(0*7j82qqvplNZ?n`xq%*xFI8xoDQE?wu@JXyLD^ql+M%Q5I9~4%a#U`e?-X zM*S3T#!>`;Py54akoD~)z62Fjbk)chg^4MlKu7+N4+rNX^3l^XUE#f(qc^WO3UZ>Uvm*m?fxNiJ)YQpI;=mS66TPS~%mctCb_p4+(YC1smbL)VNaxch!(p*aPa?_U3l;);*}`r?3Yk6Yg>`jWps##9v}peMDWR)5!5m z6x1CtTUL~o>8i^}zmBnK!p3vY0lDKRIsX7nDfOP_Nf4`UB9S>-OmTzuP80P107hHw z@&5a(>d6%Oi5#?)@CtXv+_vI!$~Pu>&meKAD)x)>Lj;4Y?@k7j0g_DeOK5yPr4y;E z@d`@hkO-uunjnh5J6n=+c*Zo^Sl2wkXoRO%!xHAW5;?*TcDm9cs#D*3(rDBaPpha@ zG2^%)bH{&-omLARZqXqsy;cb{rh0`&hpw*H`uk;H?@6kfo&h5diUDUNnm3e*ZmAQ`#*uz%0{=}#=yRYHT<3x@5#g<5)BngG$PGNT{mnMprv z1OEWFij(Ob64``&Ya?pzJ8k`uOHnBV_4;o-m{w&eW*m zrneW`)zm@zE+t-h?ce=0Mzi$^A4^VdZi`1tL`t!*pa2{Wf6tCP>Ux;R&9y3{ho%gN zXqYYNgUf_e+U6tFTp=(20QUX+<4bjH^Rul{wv!;`iA-`(+G~qVSXx?~ZQQW11pff} ztAq3NjUn}2BI%vLan3}zXjc&Q*G($4(p@5^oSZMDg+0AEApO7Ibn}b0U8$o9OJVyX zcAIsY8C1p9FhMEXff-a|$2bKKBc61p;v7YWywOslObpqI)hKNA@%buxy7^e!eA25{ zk?!aDdk%T`*PJRry8^GcrxPU2?o&yI3TPrBhm`!Wo}kE2$t(dU`tkSDT~xW-p;j2s z-QrP!bwGTWrnJ=1ZrsHu+GAgJAij7b;C@;n&?h;9nV2+8%3`%uQ8bMm9kL=-_X9rH z$J{V2yW6=O5v+S$8Vshf!!}m!O=*3WnJQb#k~l&I5yL+x(7Zv2C;?&;~fio;mUgggE>@eQP3q3Ld?N+LJ3 z#+D*>dz21&JbMB;)_ol?CGH-mTUxoh1(EA*%dT$GM=iqbbhL)SC~07hMFf+Az%UdI?x>r@ z`mU(L43a?SDH=+#RaGIV@jB&ENb25fb#wWc7(ax?A3x#m?e^x3pf+01pUR!VqTtCr zPc$agWwR|j&jsl!X_?4k&r-gke;!<}PalKuk@2ml=R{?{l3fZ-yj=CgFoR@D&8D_}tGuUZe z`7V}#69pEC4!_T@*>?R0)23lKTb|{BM5!Hn#~P^^?7!#FeJ$1B1aDdNjXaez;nzoq z5+Tyonx8IB(L2cU$)A%WVTtYB>y9bp%)%d{WrRvHmTN^ASYxy>A$ILpjDRV}8DeTW<#!$GTwo}l-OC7(Z<+sEgxQzr$5%1eSe}7}5eM6}znCnBvsExv&7`MeB zq@$hKDjJ%VjFauOkWaYw{{THHl^?98zbL(eT3X(jr%2?lu2x|C6`-t@!?$c?ga

    +Y5Tk@GE{g?W=X$3`XO&t&bwk4Ec{{V1)+F#aT&8L!~-GnlbTnC)0Q1}yNrV)R{ z9w7A9id7k93i7iI<2l`r>@?4;r#XNnP5%H5h7c;gBX+5&JT&kg3S@b@zT;a`psDg| z))9hQ69v}w(lc9sb=v7}mb*$w8_P8D1z9=E7i{CdePt$8e~9^B z6Y18ZT1aV+Yi&dA^FgHf&_gbn@`H9GWJ z0n7)F!t{EJqp3ccPMhhv8VaO8j;^9P1Ymp+fR^leCr4G)9XZT2tbRI}64ro?LDsfq zik?c_bTyS6@lCcYAIx%ocp6p^_>@W@s1bg!^)Pm3JfO zlh17vX**4)id|~LW-ac88=Q4jDKf!IlBp<>rMVBM=V>RC_S16I8Xd|fR(qy3*RlJh2Qy&oM<;x(dCRQDU*49uhqf~1UlC>hetCbz>R`KG(Q zWIH*YXh7@ks92?fUZA|RZRvG2FN1@gK|QnW^Q~qEoXBhuzeOR$Jot(9H@`JPPN}=p zATn269Way!7?FTatnbobXyj4-F}^Vg(_Et{udkiV&;w*`^D#E$y!r+FetfR$FY zcQ}fJmA^=p4_05+KQ5Bd6*>|b4Lu7*{{Zt0Xa4$Y@WRJ3$*qjX z-ku1_f-w|^G&%l!oWC6KKH6!Z z8!luPAjG%!-B zWC@5uqbcu^xp?Q}OrcJ@zp(&nfMSV0AHu00Ue2*SJwvObDI2{TuzP3Hay#hG1;j{? z&)p@M$HYHxRz)mcD%DwMsA_5AsfqXq$hlFIova2IKX31(p3ifvI=b3jpz2j1`XAEu zdhvp`>oQb*Ei_~u!(^`6_RbFj^1$z-!*);y#C28I?H3S80FQqpg>&la+nlA@>*Qi` zcJeB`fqEz<`7zP;k^>^>0ZXL4#Db5}DYlL~O66r@9%(XQ&5mYerhA2kv+=6?5 ze~m5Wt^T1kv|x)L%JF8m%`&VN)l#lTWO&qjdjj3K(?k3cLdw`s*c*R8gjyO~B7$e? zaZ4_CEN@4>vyb{@`Oy7PBJE8WYG2vJ`V0H2cPg7*AU{Xck;~~jaYD#J_~pL+^Q4?> zZyE$|!qOHFFqH+uqDD}+u!z|82seMI(X$Z1XRf)VyAMsIG|v^&O3Z)#_VN|rkxsuj z{{R^O0ApPD2Go|A8<$kWQ*2#C(#htpm2<0}Ku9?ejlcf@Ipe?e(*C8Ob^a~xDbD%$ zMJU!WC|db$j{^gd9H@=B_5p}s2XA6dH22nQY3`kd!ZgcVidLE=K(IZ2-MJ=u-TU$O zRRbJj-}`C+<{J<^hvilDvOF>C`c;yq-2#`Anv$W%;b~-$Ky!kT}o@Q&LO5#jRL{qbaJ{B_8S?3t=FxV>2r-U z1t6qlGZJ|XBR3o$@Sc0=WzRkk(nk4@l&=YC0Fq;LT@5yylx76BbcI#QwTa>Ck$}&* z!5P8(e!6+5t4RT>cjgv}JkZ*kXIxOpD5>a+g)`%yFf#=sxfx-{+uK^!v7*ivSVeFS zU^JE0h_*7@20WEl+G}lYEf|bAh zZ1DP;EZ{5^GPxNa0f+g1x}2JBI+^6Fqg{ePs0XU;uQb*4uM~AQ`qf6<#x;?D4<{L7 zG4H_VI@(lQns_ER632t$pD6pOXu7K33@J+_?mW;O0Z3)^p2TiU;N#m|>N4YXl)FW( zi7AGj`)G&CNp4oX@00?jjgSTVm4`mxrisiX0X~;c`o=VXO}QiV{d*0zMv|KI)9o7P z4;)o2*~fFe0LS$A(hUGDa8Y3e8{=sH4rT%Rrcrf`h-Im&KM;{zWDNfg~AO0oEd z7;tL6Pt=tPqH2zr>N2rUDc{jCDc*OZbfK||47T%xf0JUavN zvD}f{Kkqt@r2-~gs%L136CVi0XI4?t;!6Jj3U64+KAIX*$AN-3WSnQ;@1+V?mb?`I z0IIm~gd(5D8+fmtm;V3``l=>Fgo>Vyo=0LbJ8j_bPY2uc(H_ry6PQ3k?$UN_t#7*a z{y*C2Gf)2jhCO*w*kYThV5bgycHT}8$Ql=5D-k(`xX-q!l{-qHXeEX;SD^9QoSgpv z;D?Cx%)sH{q*+1x5~Ci+-=A$H;A&iE7UG0KW5h0%l^6?sTC@xIRN8!bBu6t z?WMO2(A>nQ)TraI`zy6Km}$zr#??9VG9B8Ng;C$sQ@1$J^wOI`O^O%SEwR;BgRS~f znIws?QPao)Bp{7l2Rv{y{j^muuL4L~-qE9!0#fZ?id`X2z>c!I)b3JLd7Xgc0EAJW z&)fRxwD2_ukkqQZsX##_rDySzqa&Hn-R|olUD**CImS;rvD+h`jZ}L!s1ibq_SHQA zFjZ=g8F~(CdD`De)HjWQ28tb#kUrqPNEz>p{Ao1%I;%*Xsb{t6n*k*%)p+yJFw}!D zN!=hI7@W^5ZdCTbZ~+(@#;d1*s0j`u$teE-Ad7c*L@PgN&kr}q_Wqgrsbc`4tCS(n z{VFrZW8Ya%v#UD!taoipKgg;70A`O>`5#ZzyZ9Ex19mv}BMD!BL1=haNMJf+k zrE*A6V~l{OpSORuvG|q^=gDnMIAk^M7vij&B_+BT>c>)9i{m>4Mn})(uw(lBYfd$x z#js>8ETRS9bVAobQ`D7jPL(yaASqyDrbiqM_C+Lryy;J<8VjNXETu?1h?qxV=nkl) z5}SRFqOO>>(~{(Z?CKjgk< zPk(Qn6ET@@n%{KVm`dfja{D1?!p^OygX&LC7EPyV`jU2^azG@UXScq!^;qk;fyZh{ z?0Sw9lrCsIH0yeq;|Z#ez+kuH%qbTXV$* z{{V#TD)UbbT(psGUGa&dVCA{{T%7eC+VyZI(!> zR*)c(y4vW6&rN6sI2MKFj4@>r)k@hu#Nz|++e)cj#7GEVi=@#3w)0nHvDj|`X=r+G z*+ElgO=nknlnb<|Nrm7OMAZ1bnQiI#UX~BR#yHdPTkH9+-H&jC)k~9+)wnG z>+H7;ixkdx5QW{knA{CRRM`4LQy>{tp$tGBh$V^s`c(%xrU@x;?T0zXEFUFChpUsT(YUpK4VVJUR{mU8}p>Z?3Of)3%DrlE9<9NsbB6 z3X9G#K7U;ar1n{-nq#rAannTc(Q(pTD%RUa&~!ESMw9r;w1|ry0%Wz(Z-`fjUx1gO{=IT-PuED`|6W5>>hjw$dj zGpO4feEpZJT8m3hdf4JGs=+#+!#f?Wz2vvi)KpmIMv@gZF$po-hR$2mPx6E9gWpf5 zTsy+x1BT`E-JsTj*Dwg{%p?KPcJU(`@2t1B;#GF5WDlI4JNL${E@^ZDCOni>t2w%e zxm%8II z&ZcH9h28^iWJe8GQt9&ug`2{v^sS?FU!eX54!`ho89%&u-&hZCq_u5bG81VenXg>rtc;#TC1BSA{kj zmNXWR-xVtwxOnNKj@4(VHhylqYSM?V~)mJ@zS6%fy-jIBn8%k3EegVnB`;Pi$ z8gJ7i&hz?HKNUgeFmtETT&VF^r>JSEsjRfUOK@aXR(q7_H{f?0S0|siJ@i7ILgF0I z0Zl!q0Le{(2zI6p% z_2j3D(PoAc*;YzO#Ev-wa}q{yeZBrMp{If%=5f_cTNP2~EL+Nd&FMs2sw7kqQ&SdC zUIb_{$nV>}e)_G$XtIH{i6n!6Vywf{74>FWDd@zdBY9+0VVIFdS+Sk~9GwH0z#tno ze@WEoHR91DX4N!S3EZ~Yco>p<&Z@)hf=9-Jqk}{{YD1d`VEJBt;*sza!m?)v%ziZN zfw173X@-L(UV9i(<@prRPNO@Js{{Ju^47H9U3`<+14s@&CBpR7<;iIBKpcblil_3tM+V%cr+Kq# z4I@?4PN`Yc6?Hq4%_u6$7hp0<$kFz}{3C;obIzVwOcFBOXp@+O1=14p^(B~0$#b-I z4a#7xB62(z(j=pt^xP&k9`|ihX9kWn#bUAZW0~NPX$V+qkLzMm``2Rw!=J|cfysqaB=R= zcAR~Q*7ulF-~fTXEVJlw4e>qx>zbZ|t)}`}Z+&sncXHrnC!b@kSDskmu-N|R^W%-A z9SvzHUxfg8sskJ6%h?cr40>7`NG?}?v!b_dUrI$U1;_G-*|g{L$nA||`%A@@t(5A$ zpG-3|ogW_sR2?hPb&P23wKSJz5$@k5)^UUIe42a{!Q_A2T5%Oz?9LV(4E&{iB|4DS z-xh?XIzoTIR3iF;~gR6P1XvkeV--?zM6I9hupj) zM~PCJUaYP{{Ntj8k*|9k$RoV{==C!I-+PZT4a=BnG%F?C} zjIauR_`v>}W3k8j$Y_Y4!Wtc%7~IVl;#ZV@7vAQ$6LzD5p53&Cl8T&3dy zQCo;CWQv|7F5dt~s&Vbf#M3g~Zk07O&zF$S&V-TQ zkQ<)-XPq~{Z8MaO626I!5=`??bp8|3SgF~m;(Cg=$_S&8h{wq;Nm70WKV56_LBck5 zZzW|GQ*4&%A#rZ-zG_s4N4D5m*-Pd)+F}Rau*k-9_v7p%6ky-4B)T-rObI7oT*ct6 z-mXcmjYCH@5abCKPcfI_M$#~I{#q}r4QOfAbFl6%*$BkG64{`V1+`JGN)7DrIVesJ zRC#0Pwsqf7ONjtL>sehni60S)901UuJDb?2;>`@+aPZ=lM_0noQ zxuwTXRj#B*hz?V`@|*g1!+BD_%%H2RiZy-F8jF;eh<72QSchpnOrQ)F z@ZPp=o9T+G`s#{WyO&d3Z%&3psHTcCki-;_g)qMO8_D3GwhAg&A)?j`(xzRS44*so z&2CoCg6aBBnOoOZ-EXj@K(ErxGQs9K;2p7sZMewEVUh{$sUL|svNQueYt_NhaBuye zKZS{P_f1-;s3^)Wz1y;nH!ZN-OctdZQr?=g0-BB-=t zvXjqXJ~V8`pZyv0Nf>=IhzEE}H*SXLY8WSymKkEHT(o#rAaU$u$rOYr9bp54%WbqT_TB zq>K$&0yxJ3S&N0*FfuvpJ~YOLIfN2pX+NZjm@>lvf=X$kt?MqESSF$Bj-8{W4mQ)$ z?x@KJfsKR_`jN(wR+}9X4EhAnYP4n!iQW4`Dz05$c%6-J?}^=*`IC774n}de(mzfI z+f*|TO`=2r*42-zUZaUJenMq6l@rj#O<6q}M-t;RQBpLly~$yRIri_N8p~!mm1`>0 z%LM!r@U2kX>g1-Hmb0a3FG(QCX(;0&Ke8}IRVRav-_u(57LefRTSBE!O@Rk}!moe4 zQ&P~pt7@UN6+;=sRQL5A)w58FC*)kKe3P%&ZhBJ+e1(jqiy{jD7JzgOIa=(-#dmerBT5- z-`tR>#`euN5BefFgpYLbPpmI+ni)-`LgRr2ID>(c#@)Pqv}XXul4r?zRl9Df@PZEr z)_(~)-$_q-s7S8T%K?`pPN#Nw?e00pZu(=Hlnjba^wcA~AWq$|uSI0&>MCi!e4EWp zNpi8%t1K?40K>84f_T9A{In+$;6MZA6I)7~q2Qg@Lh0@Hh~y5ou~$G#gB;TBa(4R^ zIpaKgXXmz!_`?uy5vRfON_}<$GB=n+?ojjuaVy*Cuhf-ffPCrVl0VY`6aHFQeM8|F z@+(DbIT^zU^Hpi*=}UD=P9y#3qbt5|G6JI+$c`yF=Zt4VRn-QT_cDQ)ib1jj^G2z9 zx;lF2HfEsd$lXG3r;0)d1CBQ_EIIXLk)HZspj^z8Wk>vxKh%Nc`%*p0Zxd6&cI&Iu z)GY1hVkdHk957d5yC;l&^QlfBD3HX#U8H!NN~+!-^xaZ8rs@W*s~n&*DhUBN$zI^& zoO6t4-&C_1QMYSiHA3oC3;%DPh(rsG>yq(aQ~XsEuO#FLN$e4gA52i7|BKnmkU z^?U?L3sZx@dkj`8>%UN*lA~0V6EB+6BCg^{%WzoaDBJmSoa$%x#29Rm%SG*Bdli>+ z$XTUT{=;B0Qd*+v+E`=6Mo-QDC9pzIiTiU=i1O0f;f3G1y8Ki@Zqe8r`#xJQ=DZDw4bazNAmkWY*Y zC6{E7<-}JDmfXo#Tyl9iCnx^^jd8)ZEwn~43=NT~g=$LqCR9p+RU}~Kau0m=(F`kE z?+^qcelI74wp0r9xXMAdXv(XhARqMh{+efY(G78qNL`5~xmqkVQ^iSjoz^JU7Lc9T zBZlwT@yF-iSZUG#lW??RpCCI`ZS6N`<4IC^o{MR z8{!Z@D&;+vzG4wo&PD)KE&2ZdeJuAv;$?FUM>|`T)kWc_SycTnhoSAfmpp`{D1|X0!{{U^@DY}EepA&0>?C6`x+y)t=SjGp=_89$jJzRAV zB)L+~!^JoZ&4QmOzh(~B~+?AC}QPzwgRQyDw+=nQZ`$<%dwZwk=?)3a)|zsE5y zB!>a!6*cf8{V=3~KXkRz&UocPR~(2}AS(q(@}BwJPjQTBzvNpGH0x^N{C^NE@H+DN zMbGTf)fDdvRNOjFpUgW@3Ws)f@3bgf=nreQA+z@PM1PA_Z-zv9tB`)rRRM8w>fV&J zP9(vOVB6d706UMyG5YB(?PfQ^)}JfAC=`Fhg~4G3}EZmyAZowAhk1LN$;27eUG=!xwyKH zY?hci?eK<|VbFLPY7^w4uIX-;qN)u^H&@iKkg#ZBr-(P?2|t#2)7#!S+z(Q+r(x4< z5kB84z1g}#I!GSZ(wEzPaLT;$)Ng59<<2(ejsOEjDAokDksP%2PN_nW7Ga(L0KKZ! zg7rg2rgPCU-45c}mU#lEMkiIj=cWi;rwy1s> zy6$|RMJrrEvj7~BHR?-^9E}om?@m+2fUZ7@oR+{i%M~ZKI~@?i3AsM1M%Ad-zyr}u zH&|RMpb*bT(^S!_g$W%!G8X>;2|vR;jycvGFldu>BDF#U>NoFAT{qS}T{Ts{hK5S3 zeZD=r%{fA-yzU?>kUMAkbE2t+V3PB1b?8y4SOFFbPaYxkGH#k$*>1J$b0eBpY&JXa zynplGUQlDnA(}5`xatuvl6S&bukpH;0Z|1nM6y&U!b;Us5hvV&fPv4j_tORq#kq^4 z$ttgF2HC6}?u0|)J?kMgNR8uW7Ms~*(6%sTE$DBBJ&$n>gagcOEh--7onb^Y6MN4x0B{Sn) zeQg+mw$0NO3`tKhX{u(Fk?o%}59mGgbns!r`&@Z1J)NveOH5BMgcd%$taCEZ(L)7W zrWqlCq4)%sn~#1*n?{{MZHNbGK1eVRDEq5+pAdSgsHwLG4HE2e=Ee(y>_NffN~wkp1cU9hHMq<} zVa+X7>HJ6WGNvkukSK1dz=#}F#fZo}kh#kf?VNr`OE^Z42=nvV`#>@HtCo;Y8%LJa zEeDByAyQM+(pOm|uV!7a%4wo+Ec@iRIT$B^Muw&iro?oynA&5+Bzb<&TF(-EL8u~{ zuElJUm^pO~M*@J@;E|v8(6EN?@$b7C z9D94~Mj6H@XWbaa;z*Jjn<*Bl>KOw`P5|=&^Mf3L+iI!g{r-8RGR8xW)Me|v6&-k&Qw(WCV3CjcJKe~ItXcq~N8g7=Ql0fvXl#jS7q29<3 zvomtT^Y7n9Q#t!(@OH+xaf)_#qPqkNfxqNZhaoU&01^bFOtz zM}|Dn3ILgj2a2NoH7yK&UCOTKOHL$Rs3l{L{{RP^kJnr)0tW<_;T*wz5&L8o3uSAG zo9Kg}5n?80Bz_RgM*xyDfDQ))Yg}Ag3Wif_UJi(bb=fa8-$hinaZd^0m`X<@4}Qe< z{Qm&HmMLYsVOdS*A*Mx&^?I(3=hIZSDtc=AdsGg>EN)hb&wMEvKhql4wb`?~RfVxk zur}U{Hh&JNr$m~9s`i92+`=Xd#~=D~M?Tu6wamc~F1-H$_;$PqpSmeQ($&O zxyd2~-zB^p006*g2*=%k_SZhBAVs5!I(QRIoS??tKhaI0RS45y1n3!LW1h$oSPqBSvQ(yBCP_k;J;l z)ZI~NpqU!pXfD+xfRxma%o_{ST1lpc{?VjaA7|x?JD?0I@}%IiDx84*X>2+uuYEU0^3s#hTt0iFct+ zOE*?qsTv#gJ>HQS*&Z*ZidFz}#g;bRfHTi-`q1iS-W#2D_29iv`5i$M%_L@8bW!ERAkG72$iO+_o80JMUXVFnMErHZPX5ls)gkO29h(QI z+hTI9{g(RTWr7=rSX#^nYO9E4Xt$6XYRTDv%M9_Ixd7y85A8ZqZPz7Rzm3%`1@G{k zV*dbiTJQTT^t~YI>s_a-uUEUQl17!)0**C?HaxeJ86kqG&I>k9I}GDXrQ2>Z;Q(=; z@lfC5)Qu=ECEIjN7mwM;!|Oqgnw~zW>Yb`G!si4V{=rs00sjEhar34B0L(qk*ax5G zP=AYI*?oNf00<3lLtHHL#|4AJj+E(Y1n*mMF;z-e-M7eH&N$&z4%%fbP<)z|JcYgx zR>C@;^&x zC|)`y{alhl-96It(6$6MRSJ<*w`1k3r0q}-IXL6(swvfA5?o2#5mm!I#1R>I{7&VU z-25xObtT%SnvU6R=+{WV^5?BD<{ac;Cg$8mPB1g7R`kmPU zrDfMXjj6Mlp{ivQ#6U4jJgGn|0LWv=ImrV!1ZNtmmMA(S^Y5{Fb?^?L;>XD-b99w- zi{`vc@r;!*DPTWe>!r4+n++$;dDSQw4$D5OOLs+GCS{T|k>VL($0MJ9`fKZe(;`Nw z#tFr+6SY4`K?z^3s&tbi81{z29gcm${W&_F)rlSes%-`XE;lKz(QcY#{nc!>QdEYR zP8ujY+11oAz+f_<=a4u%LG~KwP#2Fy*xcL)#y2t zG*AwM&9=JEp*Cv|R9)^ADuw>8OH6?r=kOLDfJQT%^Q&3f z#8WSOBh&pz#s2^iWQNl+HrZO6_EES|i0U7!Bs9s+W4Ajre~?z^oan#%6*D6*p`Y;~ z-^9E>+S`<}{>^rJbe6K|)V)zU4^tJi037|Y03^RpPm%c*E&As^+Fef7ssQfTJ=W9gtRN(i?zy6q_(|Z) zQc={Z^(_!>qYqxMB!eB<4`JK4^wMeL@deid)9$KoW?)NmgP%K$xSp6ym|5ADXNCxlijLn;RU z0GkxTNf$XK0o|XR@!L_=uT;1fRKEp#DbWKsrW-ru7UTZ3`Fib=Xg{ViCAPPXb8sTqz%Jdwhnd<9j>{<+ru zRUk8DzZ}HstH7HKbyL>UU2pVG%o$8EFYxj4>B;x*bxkhPk(Vg@r91fj5@bkJ>-;zA%9Ija zFWw+r1Yqt_9D{Ei@}L3OXc=6#;f)8H8oiB7jHXIjr?dD2N#`wH)9dETB#Bj4O9DHo zLV4u&KRRo%HRr-1NXi%rZiBBGT0NhrItEHNw{)54no3=vHq(fqW*xc43zA24gSqAcc2v=8GLEQ!mi_P zyV7N_jY9*VQ@{aPc*p1Sqo-H^Vnp#%oCQK`@>13E)nKdhA-i-fq*N5go|LAYus;V3YS(HpL<+a>U?Ga}o1MXl-hrfR8PUwkPH**^aO_R?xrtpJxJW3Zvxk3%X4 z>Td=ul@ZGwl|13;R#+rt$H)vw&m)~-sfv3GT2|G>;hN!Tov?sbb@xZd6-_23N;ce! z3^eW=2as~4`bRyv`)gfBqs);dBDOubqz_~grl+M=(6ttsDl2a?49NvzOc5i_NsSp% zcpPmXJZD<&_#W(!2>D0`nQTFlc7&sP>wcf6r21B}QdC@!A40}fHW>#JE&)7l_x}Lh zpG@}~Xr-@;4cT;&Ey=H{sVxn@iZqRbM+k}tB+1I>3!ID|PtG+@ag<59JNmkYmM6Rr z@5k8?vh)v0UM&bNT|rrIygRVjB|^LeXMzeN6(4T>^vB{$A{q!shV1HDNvLkA+`JWv zf*Qk9a-cq3?Gi?^u?{%KBvlGH&NJVG*y(KT10yx%ZBED;NSkaiu_-r3*y&isE)=Yo zW3?s71Ot%7^Zkx>qPxg|6kkTx7#e<5$EPUcqG;)jLP6#i8SckD!r%e%j@rWaW&!4Y zJy6uck$FWR=-O@Yw2~-gQb2T6+C8(3Wcy_E`RI;kNN9;nZWNd#t21B?*9Pq=UYajGM8<-(T5;r z>(O?-8Ce?xDy>*hcB-xb&&R9u#+EXM*qiZ$%6Ma#nX-;sRqmZt1-!(i7y}$=1`n_Y WI42$PofD`M?f!mXkxg%<{g!Fpl*pgKBG8EKe2^q{FL!rPaGJOqFU z1!ZZtva});22++-RF;K-tN_Y%$X5A8A^@aQY?nh*m`=F^TapODKm(!tDJ#vd^?``| zRUe4tul1pTZ1aF9Q0(xAmT$2G48bd?nXM~vfH)5@k!x~X#}t4N7XTJ=3~-Mo(GiD% za{v<^9UVO#6Fof>J0k-lJ0~j>6D#Ll4h~KZj=k(me;n zhGzHmj8bhuAx_KZZnAO#XF7Fgg>P6bz5jy1%5&W>J1e4l)7o9{MJ7&RPnXRH^y^GZs%4t_D_Ce2Ph~& zX{pFE(b7=MlSMcH+eHOR;4i>IEpz(%ZjuDgEjyQFh1}i;oVlT6#fiYmeG#T12|z0% z|Dp>NB6E+so{a(y`C&(de=mXPUm_5P047Rune2cDuws3~NcfQOrIdcPh>#@Zn9a4-@SZ?t#WvdRMhk3I#ZcPhiZE-%P1u;W!Pu(79lrPRG!q=FG zyWm&D((YGvWD3-kJKboUrM2^wzpZlrc+G?C3d{Z&_@LSLBt4oU418-_$1`(#O5NQf+-7Ywu*&kQ)?UaZ%BnLn zW8Xe%pm^FHlK4`N-(RVF5gEI7d*l}0NLszD9)L#X0oVMfm+nG+`R+*lybP z#$(aEvur5pIbqyugM#_EJz@0ap7odrey_3%&jyU}L*HOed@CyYpIptjTH1@*;MmaU zcF;>rRC{Z#dt)-KpDZ$wK&Z#KB4SQNL+!UQNL#bAY&Ji&vIdevasYXugS6OnLe6jZ4dv17%$4Qt;u$=3JC_#O^m`K(NX!HdU#&m{sq zx7@@}kEGwF%GfCHBmxUTiW}AK>o*!^#uCl2VKL^jy>25-gT`Wtay>NPcw$qGB5n;L z@~f8fHnDv1-8T5a7MVH(Zcansb`Dn+L2A$1+el{CI`eNjHRWPgu6QCg+;e93h4Wsw z>9Za0c+{NafXSOZMu^f}w3+kV$lna^vJB)J7-~&(L`k{6WtG4W@gCDcE)W&o<2(KtX55`Q1cctqhbo$XT3#^WXL%KL zZ?p705y*PHe3ScPgz#%3pf^pB=*6$rK5MHpKUy@;Nw|t|uJ5z6;L9yU%#D{Wmi8@% zj}rms6}fi%X~3whF`nNhZB6(kk8$p>>D?KK_s6o^b1k#`9Y@0Hp^cRqVj5F(Y5~_b z#njhsexvHSi= zl}k$-H~pWa%in*{Y4@0|L4Gl+4zYp0yAfQ~P2f*2>|Wt3QsmBKNuRsD@_g0Rzw~iN z!o0W;tA4X+P+u=WWNw`Z^hKqPRoah6%SEt%Fp@D}RLUuRrC2AiKlIv-d+eusUfVZ} z)vBR`J~7TSF(Xq8lY=A)5q9#$FY&7pQnpQMM>bB@7;{*4E64gTln{X;mM7m3>d{LL z{gsWSMIH!d+pxIGs`99*IRsZspPi#Jt0m{7q?d2M(Q46lMYdK~E!QQ!zW=Ae84UZNG+QIyY zpoMR0!paiZV;!q(6OWCZna;5KrPxg`pdNZ*4Y4jx#?D=I&`T_+duK2+{cbWpOpUD@K);&DU3NoV zr(fl)MFtluMSpk7zhq#LpV3P5a3w3C;-NPi z4h>~T!9bJ)yZg*51(vF5woCtG} z7~RBPi>xXalq&pWbx}s|$(=iM0(bS&ORrXmc7I$mA8XWD^Ag6>sFis6DT_B(-$c)t zy(lO;Jx%bf67GfH`G^|$=p-{f-H%L7POI}QYA1a7yje1My{r0yadphmNM?ud;aJ-U zpT^>EzT-32{MZPmeaD)_H}*Hp4n!D;G{H({AG}==i&**m@N^s#;*pO}#gDmi#LQ|` z+9u7T+`3s#w-p|yE3{6l!JiQ|37>Si$~x?Sh>grt%@F~HhNm0*X^#?Kn{qMaMVOb? z<(3(z5NH^AvB}d_n4u^wtb@+@CUYq7h9`H<%7sssrs;@^(=AYViQvd$Ncg>ii3lG> zcZcx9WiC21-yGTtMhZUvaH<+!9nT;Fv_ydJMr!VCZl7~)ak9BSOQgCQZZzT6K3DFS zjzr){vR=4Tpk$C{=Z@Xc#tj%=blJttA3&;i;$)eGi{z}*dguLE_i4bdid@TkRk$3oeem&{SN)ux__u82q9J`}v@HZmu6 ztyrt3O|24vqvO}vS%|;{dNY6hVx#{A$)xovei+n?au%8(m@q+|{6~Zh`}&^7B=Ye; zytKwy6rl8hDwaRl!ZxkTYG&W4Y);4U*SSC+Tt%^jBzBDEYg1vC8WGSLUC{D$nX0@t zG>XWfD~*#ZVVztI8b=-_ct(155KbP#+J87{34Fiymd{D3GHwk8j^I*R*9(Vmb2fW6 zyr(&<>ru5Yop0KhaybBVW2?3<(-q^Zi*@6UyyMOUHC(Lv>l=++Pc~B;yAv<5jib_L zo?<$P!1LXnsjoDStly64K2&qKb>YjaqxPFRQ33*75^+^r$@U7B4f3tteGo{;<-D67 zKdLn?AfJdpg9N%R7uRAZQ+2O4H~#8#KWwr>_UvALGTs&$zb^0wW7``1y=58? zTksXJGZmR}7S8gi?HlNu>3bR|x0#TZG-q4aFmE%w5=CF>3>%QWyX-w*cIByB=_dOR z1sl*x9*@;@l}^v>b@;AbT`Ilg?>tn5{id2&Jt=LrILYfV9$;j;lDIyWo7B5hnxyZ7 z^B9uJAFPt=>x$s&N3_@N+1D~^KA2`+w>IOuV#n?wfQgoSnSE8qhK7?6 z^bar@$)^4;(cv{!VJ2^1Ki!NWn4Ba6_l(RKi^8wm-gU|>+nm2EQqOH^*$5k<<&2R? z3n|e_uETV!78|1!M_trom(vi6x49S=HtjNf_{y;gjmwKa^oao8#9#QujK1IBTJwTAWLj>e9-rCZr)z9BxI)EO2n@GO!IL=DvGFeXWQFn#ffdh+iqh8!@GlF> zbjl0CnBc_-YGb~q>YWKG=6OWm(dB`eG3UPM);MgEQGQ}rYW$+?+IUBnR>$Rzk-B33 z(anNUK+)4wqYxjaenT8%{L&{qmYS3HYzE7~=x{;>Lo}x}3#Gv)E8DXir&l^#3bUCS zKUN8yNS1lYb0Wsdbkz>&!N0hoP!5`e!nYE|St;%IHbTyiLs@mA6ipAF#EFhQw?bF( zrH{8Zt_+83M0L8{?%!YVG}p$aov$bz0<*p{j2qA6x05ljMU~zONJ`|5 zIiUcLE9kRAa+luuV)aG6d-ZNq)ZFHS=_uVx@hhHOxe+PndkMZ*E+^ZB%o7BAa)uu| ztk#*$@;}Ajj=WK@A+=O{|40aT81FkmjXwhTkcb&vQc^ty9<2Tp6&_JMoIID)Za&s-uCXum_IGZu+a8OF(w*K=m6iCA znu2^~H#19cA7w#MG~V?N8(A-p|?c_pP#H6nI1>Xq|_YxAkM z33}qkGGlx6aJxF39bf7_i;bg(kh|KPTzwrO|YF6_e1PuO#PS+ z&|@zjL0wHVT%X+dNCX7Z<~SplH@|E&?~VwWE_&YB+|>BCF;WK;+7K9(bE08*ChXCh z(x-!g3RS~%IpfEOfH8khh3U9UnDOz7759gUG39}}zLVBpd0yTKc> zG8*{3*LjR?)tr4{HsVa$o=E<+-SKAmjdp{4K;tLwL{sp_gWzMo;xgra` z8v@Skb0vbK13#FLKO+Lqq5=@zGpe;OHy*5tC#zTEE5}Vu3x04%$UFB}q)l%qBKi)G z*0h!04v*>@X)i-WWGb%UOpcfE9+qsVEcoi=z{L@G>&X?G%Gyc!v7G)N1^J7f<=w_- z)Dm8WKlOnWGxI>`dyn1j@h9Y(XXYVj83OG2o+E-DQx4AA-n`a1QzKTJ?;<}Kcq<}x zVyw}6uaN|cM`PH^vC*q58nXDbrOS^APjy^e_i=6Bx~&=xeS-*MpWa}u$PBD4H_(5v zFW3%QUsJ@{)Aj9I#5c8~6T$dX9>)_r*LfKd?fUr=;%+8fnQ;F0Uh7#!@lZ!lMWx!R zefyCr^#<9Vr(Hzg34f=Cy^+yUZAs?^UqON4%`VCS>JL@1`{f1^PBuP+Rl!r6G1KBi z!0^t-@yMpVW7&!AT4$=%avoUrRjt)M8X@o_ick@?#mD0EtRwuU*7g;>$aD4yDeXAv zCxO2^D}kEUC?dq6G?MX({hqm|wJB9L&m;(o-L)-q!w0LMncEhKJ-QG+S9-N(7%0c& zIysg1BGZb73984P@R9c|Pem1^XzY89uWh%e@~(U#Sw=O78Io}d90sR4)W`Fu`z|jQ z4z#7L!rxsSSS@Y2I-U{Ee5~Qe;ni@Bk7~0Tg0l_sXq4CUw{M!T%=e@BwtksUb zJ(QVtywAWBzM(N^+USGE7d=xQ;*|>06YU0d8`Og7^4;Jk9Ja{gcZoPUtCAbWmk^^B zoIk76pn_9R8~he#UI9+2iNINKF5%2N2pjG8D!DPPY02xdI=>QU?pHX^*EC;}C@auj zKDQ#ai6Q9S-n@zuXOQ7vH(Kyw4(OQhfwmVkZTqgE+bN>H6c z=tLZM%&o%YdrqG$O?$U0enqPA&Yq8iQSQ8E3Dnu)Kp!k)U~xqqUtRgIv7&YRvE0qr zLkUDc=1E&dXIPBR+xz!or|w_5-!;)`HkRg^H($jqWjkzZJTPfLi*6X44vU&X<88#o zb>0@vq5^oNQ!c`=HTN!V9Wko?gZKYXFFRl|?_RwGXg2cRn`}>&Vdb+JYN83f|3y$x# ze~PhfFJ3IG8uv6WiglW)jsCi*>!Q;t9{7H|&26KiGJ>@`A{iWB7~Rg_ia?j$yI{YN zauccRaPG&DA*;{upwE>P9?fE}#=$woi;ZM_S{Gkg^#x=|b{xua^67=pkT(R0jg>bO zR}QO}F|~Y0NT5&%v8Wszw_5S~Mme5uu}dW4V}t@%CZJq<9+~Ex+JE|Hcb>{U0#DZ@ zMc-4b*c&dz(HU8v@H~8_-AvSZe9v+bq1^*j+njPf{-XHZ+>vjJ1B>0XG2s!Fb_7v7 z&*`Xp$B@@!t-b^}1xZsBGFw+SRE44!%9`bdZIYw(W8yoUn~GW%63!~ymZ!F`5Ms#r3gYbyL<~pq&Wz={C%>*8tL-WmNnvTtI^QD+& zU){t6FKcAkhr$&q5_wK0#|5S<_sNb3E;l<*lw%bm30`f1H1bfdSZ6nys*H#hHcjXo zm+Sl+Aba ziKw{NZRO3{-O&)?>uZo(HBG3DOz=LNZu}|Dn8(f)Csi1sA}exn+IpC;(ka++wdPg7 zX8EkNE63&v%MVptk4RF4zwD@3mC4=Ujo-w0 zPSqLdth-hf*X5``Qz)&GEU59#L|h#CDp7P)&xb$PGrMY@yKt~>Lv+I7_`357PxF`B z_SF~qC{!_U=k&PQfijRGUttmj&Os!Y|dF z-{7h{y`DFvbOts+8}miqMCAcDJmS=5z8%1=+0!^-|8VZ1jS*IGNKQ09HnFPjZgCaI zzJzD}aEY*NOu=dQ&Tgb=KcjR$fmuDvQdl<_pO+pc6bcBUXb2!16lX&+xn ze|4^H^S-GC&*k>QsHwzStV~`N?;Ujh+Mb%0;^zxRk>_iRtrbHn#`?;d4~-KvmZIM8 zQHzW8sT`Zj`4PKncU%$yk-%3FOjfQEf!VyQ!#&EPhG!$}1NY}Oz8XZf)$N^Ea1dTp zpydjGCp&FZ+V`XG`kWJP<%(<1;b|8|&c*6)u!ROxvW;BdUiE=h`@W6+AA9k`HsBKN zQtj)+A$`-F3-E`n$N0;RlIvI0#FC!kP4GnEW%zW|cd4bz zwVuwNwo`Wo)+A8A^)+pjk)tcBVJ#DmtAw%QW2vokg_|RGVRkmbCROutuA_u*XJ*dV zCBx=Vs?-t_H?I~3MX;K0YK&zozI=JC=CsB~@pRM8tBb|BCK?UXSB+HHP38OOJ5q{K zfj<%woN@+c!-6CpK&zWj*o)k{i4_gplEa$;WyOu3`Aj^6iTx{EJ@ZEF=Gj_mVL4IV zu2Z{p>wA?%tHWNWIf*0gtX8Z%=g3I4+9vQxQptrQj#=4Nd@m>v8dMY3hDK%=MDanVh=7<34L{z4 z1Sq?XhWM*jGMbxbR{+klFAW9#&NO=tJ^r!pwo9O`e~spw5KdsSVuiOQFlf`~?q+Nc z5jbBQq~VxWPLb=sM{zUfb!+82Ep*FCRfLOi!l~Teas}A5*HBy4P0mFL{1{*3u0Aa< z!m$_qu(5+l+Z!-xnNHKjjR2@nx&KR<6i3%&ON^P!I{O)7iR)e?{V^H3L$tyiZ;My z#jc{~1uf08FUR^H)fUWj*iQPO;ugYP<~`b%Y&AA`QMVrCdU%e^#=bxIM!`tML?zHB zLZshg(I&YrBJfT^&&a7OC%+m6Ii-#yM)S2>)YRtI_75UzSdF_Mtkof-S~uft>h>k! zeO~Qr4C>MxwgP{ve&i!6=eVujzoK2K3rqae-&}ru3cqmo!vN~5j+O*Y{pH%cig{UO zL1kcD>CM2R=NtRNd3cX0d1DKKL6m8l=^V;w=IL0m2QD?^s=&RV(l0Xh;@I>bqchcM zS*bx7LW41v6@!w;T5ZLdk9@lq zP!`8mM2nLoT5^m=j1-tpMHIf=WxC-QfvK`uVJzCaGxYr54}>krT~}q`F|SF>cQ}_aXj6R(<-W0bnQ`;}3Jq<zFziZ)rn?#DYQsJpEPUUBA@W+z;oC@x_Aadq|Wz zemLLlN`RGg3)qQRdSXp?B02%SI)6ocaeqZj+|bya6f;j;&`w0p2d%pk0rmJ7Yi$I| z^QR)Vv;0B` zouG|&hyD(4iACecc#9CU=Cxy$F)qpIdg{K8}+bT#fi8pv9i4B+_ zJ+|2VOR?6VbI|XKwL+k`@D>3GO$QPOa$rLQVEpX7kYMA27V7Qmxvf4HQi3HYlQss2 z!}y>vzMem`Fm0!hMEC{Iyp6Z?_VoHSfpI$llx>He)K@6yw`BO~CIs2OlKYVeIgRCK zq2NU$8rZToW-^ZCF6Bt3+_w|wBXeCQ$LYyu@B|^}0$r&{f#GK-0nudI6*ATkkn->V zQ_$cUbbJB>O0>Fy3jhKzRv&)YQeWDLpje`3fyzk_jr)U$}c4U|))rBRzYDKCS$r72!7&?HgYk`(t1M3cr#k}7E2FoC}A*nb%Z5IB;X9LbyjQs>#RP%!>4 z77l3d^n@)jEO!i@TrCluIv^!po% z1{2`z>Af=w>sgVwo0D-M4`$au9L50YiwtnZA>Ba)U>)kW{UgG#MNGnwQjB~&q2T3z zcl{{?1JHl_DIzQX?-*08=ih&>Xk5`aYgf;|QL!M9XtXsl1ZRXbv^F)_9uVoa(SIYR z_re5(X`#J6w>8DORe8g0G+1+5cch1FAbEhM4?+guenW1%js811qnoD=28{{$MY-%- z{Iv~s62L5g8OE2SYkC~U5B#CWB7YqL8PMQR`423U8+mB`2c8jZVy}NCC-=aEq=LX- za?qSLmdNl{ZzH*oZkaGAnT|vOK-<9+sdJ=*#~l!S2S9j15OmJm3p!i=amM`PjQPhI z^N%y;A7{)z&X|9kG5%sx-bBM z3wS_C&JC=97U({N0o*`0A}HwM=mo+!@W7HGKagV2f1rQ@I^Z7^kO8Ct7_jBiMfRfD z@?4??sKA22f8zUHUS2qypR$ycFILi(^g%931ja`y#MMsu52|z;SB^4wPFc=J~2$zJ(E6K{p zi<3VKCM6A*f=NqA!<1pt$}-Z>pFsdDEx_GF*-}^kXIWrOP2gvxf`fx4gJmT#0iIGY zI2Six-$x=1V{z?>tM{7;R5CHd({St7BRzyJhNF9_+26Wq@3xBd0B-?P|B zCb#BxCU?XRwIAr-L{^VGLJEmS`hdK#AT5k6pP#Z$0MZro;Isz4H@6$=FW2GUe6NjE z!p~et(CrhcCP4atn}jr6LR!&!`vYzg(sIhu(p#japfR|E{-OUJF~;57BlN!^H8oW> z^2Op@eGy0_T{QtvC`oT`cV#(Ogfvnf372p~B4847u5fn=SA-%`!WAwr50_K&Kq$z$ zk>vvO>tYarBz2JTZ`rOp1_AQeQK_;53@NSPDvgwIlXinkz+j4S2_*y)A>kpXC=Xg2 z(j5*{+~&6B;^vC}l`BaqcaWnz%uQO(1BR54l~q!ZkaJV?kbo=6%1Xc$VeX1>S!tM@ z3|s*ESGi;tEmMp;s9o7#@kTm2pgXgNHyTXP^~U<4T|)(+?g(WMOn{FoXk*?!uAWG# zlYU4~lF5UmgS9mD#)9n|`m+;RA^m?sXm2RlGL>BsBu8g80R+hsknRFM(cZt&?tfR0 zEw;g+d+z^e99v{qj0Y~*H2|sQ3AV(4XMs}xt_E1wp#NF^|IRx8cgz1%!3Zx`Ur!`B z&PoZ8Y9vJ(k+zyg>R(&#FUugg7n-<+A_K5Lm7wkF`IkEWq9fCgoa{lBLj6E;j4u@Y zcLfJ|SA-YT104!_O@mRCHx4QJtBH{_l1llnGXByOzb%{04XX5S3Lw{;RBg}}f16zk z>V^h?o1m_~?$BT_P}D6lD8>U69T(v0{j*YAAb3Wg{(WWs7g+sw)q}zQ*X#GQX|O17 zQhT^#f=O+J^~D535g5?r6X_1cqA`B|xrzQWga1?)EhyQSm!w-y?*La14Y?nh3W%IoWD%jjyu<@L0*W#oR7Lt9@*UmvC`r=>5W zr6mv7Q;=2C)7Dp%)78?4!F9A0e<_1JvizfhzpDwE@|RiC_OJv_y-48BoKJmwZuaN; zM+1K}@J9oGH1J0Qe>Cv_BMtnuYJ~I!Z()MLH6h{$rgz{C>TwH8bA2NdJ@P^qQ-`ac zADRMuvCRj3uN8cG6>9I`2&Dzz=>a$ZW&jTCaYbPLv@9%4z$GtW`$~d;25_-#wU7Kh z5YR0_;BqvDM|bUgK>T&_!XtNC?usWpoPRV)QU3HDO z;Xss));8=K075@$ym3a>+i(EJa)-Y!+RS7d?}aqlfst4}t8Ki8w?65;O>$XbUe#M&<3MwgCY(VEk`(&A_@ z&?eCq(bmy+(SD}=PRB^cPbWjCPv=PIOLvAYk*nH#j?Ok&nnES#d?f2j5U$9oVA^Gj*Xs8giVLdl`WDjne7Q%KidjB2Rn@2 zoZW~03VQ+jTlT44w7W!h>Fsjgb#7PYu9v$$a!_&za_Dd%IL>lpalGdEyqjjX=x(Fk zCwIs0F5ca?dx>)wryQp(X9#C9XAS2&E=n#DE+Z~9S3Fl4S05K)5APnWJ;*(m_LS`D z+VgEM_g>At$i0{MKG@s4m#`1IPj4S;-}QY}`$qRu?HAjBbbs*v)cvpb&vEbOR^di) zU*<039^#?k5#zDs3Fpb=Y2jJn<>NKr_2W(AeaSn=$Hk|~carZGUoGD>KL@`$zc>Fa z{yP2{C?`}4iiX~SzJe|t;5lG;An-uOfwlub1VjaF1kMON5EvC?5L6QM5WFS$LU2il zU&vJGlu)72kTAWllCYQXZQ(}YZz7^1b|Nt%RU%WOdqs^z!$b>3M-DO{)HvvW@czM` zLo|n!4xtXE9O@9G5R(`45=$0qKTL60;js7Nl*66k)Z%b)U-A3m{Sr(PS`tALg%a;2 zxg<>_&q!8DE=UPUIZ9oZYLwcPmX}6LXGsskcEOIo&cLc+UuDE(5HiU!J+jQQ`m#~7 zk7d8e9hO7NrOFM+bI6;@$H>2s-%wCg2v8_dm{t^0JfV0`v0rJol7-S0r8jVDxHdcr z{tUjRte}ijE>m7qkx)UY6sSz8imD=2vs6E-38=ZMWvIPVhpL}YPgj4ZaX`aWKf=?)@{?4@Tyh$F9znT)NB?;3wF5i9%-5{S956RBziB>dA!^}o@$@LoQH!I=M<*;{mZvNmtvIZXTNPNXT5DU!TMyeD zvGULa;8_$GF|#B&Q|tVBq<{_d)7G$wA+P zZG+20*h73nT0+G_FNIEq>4#;9Q-veKUz|E{>h!7i5n2%$k&sBY$QMz9QRkvQM;k=v zon|WZh5PjjCgc$QqlSrcE&PvlKv81*#wn)P<}+4K$cJNLgEa37c&L=S!$3L5$` z96drka(R?(G;xe)?Ed(n@dxh|-qpOcj9y#K+lB*iY-9&rL8-Bu?^A=1<8? zJ)72_?wE0!`8?}4`(y6HJp25;g@X%^7Bv@Jmh6{4e+l?P{CZ`1-*PTq4*z1sVr6XA zXZ6Ro%is5X&s$SkYb4kaCVvF4)2`pyIJ8l{X}mc?^dS=cTmxLma{)^5OaXq81o)Z@ ze(nMMW(!9L_~{Imt+_Fog1qh2PY9fDkfE*XFYwm0@g!Th73TMz^{5x*At0f!HuM7PXNG18l$eLP`j@ zebUx$P_G~&Bwf=&wv|k-!uD2AJ0DW{Sr4#wTS?#!P?Y2?ps1)Rslc6>sKK}IDcMQ; zL$U3mJHP?egzc8L5a9Ha;ZoRhLQ76iR(`LL;x*xVk?4m8+F;>ZY-kvnsc2{c4lu^P z3lN|>Aj3fo)6%vSJn9x8yPJm1)gRon397kt`uhE{7gh*PX^V`~=bc^Hv*q1FxDQ`% z2ENaHDR=JC7hUUHTtT!a++Xzw%PV9d{i3fu)U)|`K0#y`NnQ$CW?D*03L27-0EC_G z00k5#?YFc`K)cK01h`w&X&FnmfHP$;H2tq<+_?YY`B%!)&i5RG*XqCMSYgk~B5r1O zQ*CYc1Oa!sVh2^RLYn9Q34b*3M+1K}@c%gt&?dGXTz&I5u_@E;L03o%TEMbv74NNI z(wfK=mkwQjqVgD^bW%+ZC#`M(2y-sJ&j8@c%Fj1K%Oq)HG{77Uewm5w z%a{3jfHsY#0bDl#XcJ7CzDbkF{}zs_2ksHAHqGMGg6ZQ{4t@N3X|}srq$KELeDZ;r z8lCPX}pH)LYs1A%dpx<+uAW*}(+}++sIsPsRoABRf@H*&FS3Aw4X8y#5zq$ROnl{TUBdHGV^e+}6y2+#B&aJolz)O;ynm+^S!l zQeD)dL#ObtN$maPwQTPEkv7}VQ}}(8nfJN3qAdOa_?g^Duw=% zp{eEu>L801o|>mx9gh3L-g8-xLoDECcjL8or&LR|hrR|0^b9oi6O#qZ>pP&7tXIub_XkGOv-vGYG*W$Ej%3jCtrV)xwt5FXjDoDUe0SNsQ**R?C7LszoOOVIsoHCG zpM>C7_>g~EemPj_?GQ^eSm8j~To0wS0T%Tj*>?BFjH~u1%&z%mX7i%CVp1>1WSD8= zGQmmtKLYK|c>CyoVE-fH>)><%I1m3n8Q@7199b#RQ&7GLW%>?HQqg>>Y8P15Q>c2| zx_&PS?S2(-OWZsAGsdE~2wLt}0JzxJ`R8{3|2W0u$>X*!GGNPBweQ&inhaDW!{5Ag; zOq*hAf`>ic4|^v#N(+ZIIw)Q5VLsQ#;KB2nFGciVsy{3y>#DZv71!w(SfLWEhTkLA5$~LkmD88igHwrsdNALD-14r_$1C3H`;QK@s-?bG%xv?x<(qTn z#MOb&sa06b{zGrtHBvj34^1xLOfmd8S;Qja-C*ArZUbM&I`x;qM_n@~OgDa((s(#>yq*>`xW9paEG+6J8 zw(E2mB(&S@hh+2RslE!sjkm+t6uZxH(H5}6_)-*?TRe)qX^Yx!_@BLzLe+ouLrCmn_==6u~4$<4~$L_iJ#2v)LCo)GGMU?VGJld`AEQ1e*ejk*cC&*hx zipTFs({H1}G*YVJLcXsY`gG5r`*Q<%ox3cETcXfv7J$K$^L zlU*a9s%*|km=K?;vUdhwxoXo%>gOY^C;H7s{Lhvr8^x+7BFydY>CQLqRe?Xc_h$K0 ziprM`X6LVvk=T!gsVcG!Ll!FViI4pT@$o0l#_-e+T>XAB_a4)iaTc?H0i1C71spi7l2Se z0o~JVbnH-FYABG-1_{rTx~D>spQAa+)3N)51<=l^{)TC_|JF?7C#1 zw}Z42J`BQ?)(RA#{`%=;gPPDKQD*EAZ2=Sf*=sy6@pU}s4`>+zhX4vOO@VX>8({yI zfdWPWr8;;(OW>h>RwqNWeaod=X-Xp!K$zUw$=L>R0l60l8Y7zBU_yFC>!zN*5+{=Vdp zuAL6hSy0*1gLz16_8BLxv)RzSI>Zju z?}^ro3ub+_8H-%2t=}#H(#*o-=awGA|DU#kRvsM9N3Uj$Y2Gko^sSM4wh zzcnpnY$an0WZ83hoVvJ`Cf#6C1PVxf>HXOueNe=o!Ew)tDoFr-puc__vvtLqcb$s# zB$!__JZPzDA)t8?stc~uFdp4?c<-fsCp~Rsy-{+(*LN4Dm?u+ZbZHje6-_igG^710 z%i@Z@y*-tfGy|0(y9(xC1^h%St~z8GerL-4b$*K2{^x!kjM3owAnExv0Mpkc**|LX zYP&@W6A42Ow_YX*JYoBd>iY3%f=Qe4AEV0vaZM_^7w2|~MGI8Oo)Ci_-(TErr@C@& zd8Ihnz|!;xcN_m;L#WjHM!=?7xp6!m`zq{vvxjHn*knq^6W8&SH1yFC&Qh-ht=-XC zfWSk;qFrL3=-~czWv6~o?uT5c>iUuJ^eH{dmO6N8ZPJaAJx~hmo`_N{BQwxbP~j<_{fBKus~A4MN91n7!5DkEj_;i7FTJ7u%%#eQtjovdcLY%OjYoo)-u9`rS1 ze5cF7K_Y<^Bpa~-)EW87`%Z?8tnBNk2a)6e$>Dajxc8f$rLe)@2v+6$(r&M74DO|t zy8$XUs#kQc3_~MhWHy60x}7FJjf7)|)NfC&E_Q4(N_4;E%+6NxNc_w@VQC6&O+$D0 zq>6d8-@0qrllb=YU7@_ARCE`Q8lDFP1S=vH@!|ZxNYARyGqC&|zHm0{X5xjA0`ObM z9MOhmpjBJCQDrqeLQ}mmsz1mx^+=gLKUG@o3~r~hQRp01SWZqk_?rqSZtQt8F%DM1 z&_d2VTSyB4j;dSLpVvDoXa9XOH?3B&f10qt_Zi$glJ-V_TlMGsoUERT*~=<#jAd>d zb$1hTBl%5`dIumVZfNJVj+v^|NSXbqHgK+bQo>yLcJ5=Ai`L@ISHK=f4p67tR2QWC znLF}KK#S2;tqdy~-N~Yx?$z+!A5n3gR!I5#4wekCU89zUw3JDaY5YZu3r?A zIHOC=4%HK+=Fl^lNi7r5cPeqZ;<)ysIZa*}vVgw_VK{+v8Rlcsw;b`=&?V>*5ss!yo!yw|%ea=OILI%fu>mujM0k z=YnAR%{%2DQV@#zH_>#!5t7+xU-yG#C#MTT=})sU^DO62=|lp6COA+7z-dYflt;nI zhU$QFhl9!9i83*PDnCckZN@Bt1pe!78hT%$E!0^Dsl>Y)4!F}X7}}Ix6tc*=++hEz zlYu?4Brx&oO5O58YKha^q469s(;<9=Xzz=7lLjd`u4yfN+;7@W$oQz^W1H6EINR6x zw>oTY6dg&jZoO*{z3W_+JnTgJr1DmvO^BoVaKV1{Nv9Dbhm1QN2Sort7It@wQKBeK zkq>m&HDQ?E6Wt**Gyj_%G~jLS=_@kP03a;EViK^NwD(M@?tLqXORN zKRN%rVCyzFMf)GEKL}K9DXkY?kVzzJ3k!iENIrau<#iSG2;P9J8?bJrCV?0P1(nGj zrIJNBu+Nl<@Y|VzA z$M5t0IO4|hz?Hof(rQ$wi1nC3Rb3%XRh>|)&0bTArM!`u{$=R$(+;^iXmahR46PPm zRBiBJjs2Tj7dD>t_nc=pqvo;Qg=_D@s}mfkCDdJ)&8Xbm&Bl(6HuX zgr6Bs;g!Qk()+Iw5qWK{EC-@H(sd+VNqsX3FaB=!ed+KvgAHaMcVS zp-s=19^;0OJmPkGpwS+j9g{9OwsDxQ6oj__c~%;h6wLIpO=Wt~AQ}if(k!an&zc&2 z1d1VFsr4DmvVJHtR2?q=9ExPtl1SU^mVb@Rm4+C^FfS2-*X-|!tmzCzLpDajm3u#y9#$B0D6C= zQz?cXq%A!4^)`h9?=i zq+yWz64c|aS}Ko;w<|beTSiml?r~U#iL%l=I;Xa@X7gyhEBy`eYb)>Hwby?B{+_45 z;x`$ZSeCeC6q$=2e|+lYp+>~}^XvO=>fah=?xsid5dA*CTf^4Gu#>Y5U1Xx{kc8&I z@S@HJ{08nC2J!;#$$ESiOQsetDr@SKL(Mh;)j%!FfK*%faPX+-g3|9s8btQGUjWfw zf1_xVk{~g}Nj5(bBaIn%JvpY3IeKrbD(w>N=~pYTM~+JG4a*y+3tT3{h$01fiGaL!yi7}Epwub z9XJ7kV64Zlvh^wD?}d#X-hUCeYHzcVYK3c>T$rqhlZ@5lFP9NCs57$3(YJEP3Xh4) zmlxxB(KU6F7f+lf3_nzntm$1287y%O)R&mi^+2s*i8!p%`}mJReb=;(k$0WF1G0PG zS(azXTv9!sR@u()zcDT9>-ggWeHSBH?WZgB(2#5}2qcPHAJkqcctyM-F?LE>oXaML zAMRSm{pgk8bWL$8jv_kq$2}kG{%IYBzj(OY^XbJQgI0T-$cdz+{N4!DV#g0#aC}(g zJ%`<`kV(9RJ2T{pXR48f=Q#m37a1ns^5!zhOq=!8a#qRQ4pDE*xw0O0RB>SHL10at zm=!94q*J46d$Ax>2-T;czG(h+`1xe`1E(MOKN7`Yg2Zb#;=S}tbD7~Y%CDZKmtNxv z46sHGIU#|uB}mdFBJc@ZJ_7RcR4Fjn9AHL+;cI0~D99lHRI7rVrZ-IgNddnQ-Dyh5o*EIxMy}V%qNXnX8e#6STVvFZ)tw#J*xg8?GgT6NU4JCe|4n0?i-8qmes8(vh z%vLs>glqM8|96DQsL~(sKN3(W?Nm7IKZ(xJxX1LTyZE2-k-p?4EjS%|Ej9+Iy6icV z#Qc;en>!wr#z|wG7rXP_&ZrfVEhM{J1WmL_rItb0hvk$k<~C2DA5A8ii*awR!B}Em z(2X`0Nk;?apXxCA{sZT)L{akXeH#AzpP*rk4ase46tEdtP%>&qZj&;MqR>^nR+D!K z({mUhOr0du4trDF)p9%#4~LK-V)~agV8()mJuT-H)XkrJzL!4$XWNj6V{A66>IWp; zkak=pijBei?IB54osdHL;PWvbTe1FH$I!dZW!xX?Z{%*iY*&Bdc`8Viv6d%znBCP~ z{H%LS78pXVocwoI5*n*a?tR1_76 zFo5B}L8&w-Jx)`t4g3JnxD)o_80=?;^X%&9qf@J&Nezoqc`cO|E1g@DiBMySF8;`O zeF{c4RlVAo*0Ity8$BB{J0*r+KgoCO-sx#wGO~D44ay#2+K~Zjq@;m6J-RMVEP-Si zp4z7C>(}V(!ZtT~m(*r-RV_Z#z&o6rm~T*pQSwhaXw{5*`I`ia!9*d2*q5KEr;(|F z1lOw?G=9}|Q)zJ{Trp)_fz2GTX#0ZUvyiGDvtE9!eonGn1yUilNUXrsT%H-KIIDVn z2sFaEm8wWWqWk^ib(%OV$BB{hBwK}ZMFjy743kB@8~+>I{!b4vw-yfzzJI+Q5fq5fWab3K9erm7R8($C86XzL->`FTt>%G4P?oj36GSCI2S%{Hk<xjT#_iQo?Tm|eVHjL+?Dp@kH0#ChOw^Xk>uE1DP z&Mg@~cY1;YTXpb)k@i06)f62Xf94`vO}?oqrVgLxHVq9|Yw0M8B>rzapI`ZWbpF{P zlea+-xRQ}le_S%20hDwCy1iC%5omUjKd*FL!`>l=$X!2zcWt2otwz56K;(VV;k=AjDG z43~@7As-m$tc!3qp%0Oro1&ldFjc@NNkDsux34KoMbIsoFnKuvSQN`Ow~iryX2{CE zy+|wjhEFx40#nsX7_mfmfSCBSQgz++vux3GGx$QuY#(WH*V3ALd-w{*p#JjE*0!E? z<;-J|;E68ji%hafR#r*LF|>@b_&ob})ecIeTV)gB-+rL|OcaA2+|q8uo^+H}n|h^t z`W;w?TBi8zNafu&X3m}+*Km=7*Ha|KL+;v1&cFn@lYMT`m;_ZD@~2OVJ}-?|G;*Rl zIRiU6Tbg;{s*hbfi4axijdN3#Re)<0TIqb|B9{U2w#^SD-g&iu4ChXtc^7$|`CVi{ ziwIzC6UoZENb_U#p=)-LElC?E;f^BM{V<)CQbkij7%82|=WtDFSBCDA9-Mi3^J`Q7 z-yY7DKpi&7Cz)yF1Y$kt>%6HBuahjyYM|J2jSzV!X!d#azU%3LQLK_`&s|n(S_*m} zJ#eM(rRy?!7?(8b7jy<&@YKSVIxgxsxf*s=#wIR4zA00o;?ib0VNnI`6LOt}UK6Pt zFgrHEIw**WQKR<1S{~t*&&$AX5PSl}P)bIb98GCK!w)87&`ORzN9H5~SicwuR;SXBV_gB zLNgH`4!mkGuf}jz(Q1eyVd{X{umhNCdaaZvdkziQYM)J%DfgOBp%5E5@a?C8?Per=Bpp{el_jMCLL-0 z=H*lhJ*B=Q7Oy%bX~J%kA2)~$aGOBqC@|HjxP&lEc;KWLPK)4JNAdy64aQx$J%_`A zme4?+S?F3JN1jaej8_6={xsdH(b^@&97>;4)zH8^^*!%LJTI4MUAdvwSzzZ$Uk4VW z+x)J0yW-8?u)nq^(>{LJO!=fK%MvScQHxKXpFB@MJ4ryx0zyXX`olP#NvWOmh))a2 z0b(R)WlzQz+r#IHyd7;}O^Kg^9B=6#7q{DjPxlQCYI*a-VuVx|WsS}rX%|7mHN7ec z(5J@xE7Y|82EvLuS}N0MdDjepI77Wy-vL4`T!+aS+7oHTZ<}jW!RtyFn5o!rK{;#+ zc1sWSispKS6=jzlXb}YZb8EKBsu=47#BWdCZ)ndyLH=vN$0U8$wD=$*nby(qeB~_h zjM|mtP9QQ@%Gg0*6 zZ^piNMik&{#hEWsvOx=JshR6S$?!JHu>Me$$9Fi&=N@AM6Qp&6 z3So>{2wq6%}*0%oeS-Ysu z2VpR?AkSop1%vcxS*Wa>N~0klJ!?ij7wb|Ggj_Z$(|VK@$zE4{dGZbnUnqb7(~~bt zrb@NP<%Pc_wSd(`-^`TxBFvuTyECmnl&-eNb5eKRuFsSsOj?R^?sn!*dU_|M`v{jB zrxIWV4!KaqI+SjYr#{q7K%7B@ZNsGuQk2`P+EQD{SA~fIk;&{+uAV6p;*JJ&Yq)s| z#;i`hJO=!kp)mS#;SYl^7q~NP$yvZ43rIALeze}@Occxz%qBwV1Oc&R#gLijMpMPd zFz9K=uw%{m+hB93YX6I$|0o?t+;sX~9>thYLBMPk2YgxAWq{;WiDnK{<><~I;?7*W z*jdy&l@__FR}djGYrGj###UVluc$pcq)e<@;)|0OlF!UUl3Sovpw(u=;w(v7Qm4Q6 z%A91F8v#*aca_i9Wv(e{IRCzLjKD-^@q{Ya>P4W6AW%Xjw#} zEvR3{$Lg9hu>p^OmL+yj{~QB9Ln(+-D-$9DViVL^WvaE`hLWt zJFe+h{NgQ9JOR&;No+$Hw+Zta-fPlTa{v=39d6tfBaBSF1C1#M;^&{j0)jTdpc`iIU;ak!0a<16;j?^4Lt zXXA=J>qBy?Q3=u{pv&w%AnVA7#8IDN%{yaUINF4ve*Jo?+})_(0Zf~{-X$3gVoixX z(PCB*!=x?9uMLi-Bx8U=t;F(mT^^O2K#)umnYH>_|Dr}zf6Mq)C&;xVENB^kPKv>h zDh|n^Z_aXpQ9CY0TZOc=g%x$_e-&B$nJU1pR zC(e_(U)IV9w&7LMu-7+1fHo6plq`!`jF_@SnZQqt!wafG{8xk}qffY)Y2_ki_?QI9 zqy+`RK#c=MpEr{IH+?{D=j%H;C9|Qm zl^Y0>TAJk<7n?0k&IdZL`}I%I=H7Etr{6*aZJa7hHCV#cFQ=5R?Ag>~yn3+D6w6&s zf)Cq+UHh_m;R@%bhLseU(2zFKOu*tvzTO3j1|dOIS(Ic4ra_Vpw!M5 zaF1c4Wnr`ii&C@l(2{`9`~odR^zV`=vUx8`3tOpWWyuMcau$7H)V$5rBO{uq8Or`4 zShlB(LG5BG=aRRXtb3WaAqd2p3;}}F&z@)jL6D%(wkoUvU?3#_Q5d-?O9Q0_6b1q?kuT%@T~WtGXPAnQst5pw?K+7W&-z~#nhp_TkgT>(KDtjkr; zJpRUeb^Oh5NTf(utFuQ+#{n*ornb-5OB2nqTZfzC(O;O4&9jMvl~GzP6?R>ls*Fn}oC zl7aV|ij`fIzcRJC6&#!Nh*5h0kS8l2f2r090WF{pOiKF`^eQSteB*D8mJs*nMLZ0H zl)(zB$H70C+&rrY(3u{d^fuv%T~RbqmpSD8IJ1W|tkjhXM3sM{2cQb@TU&{e3k5&U z1cUgg7%6`o`In)z#nHoaW=Ai0oMJPZ0bM9LlYB@UaHaAYFqAP;&QLL0lbOe`f+1GX zRGA=hekyYYJ%0a(wEGBCrREF&tWF9*At|R7L6&V#$`931dr-@3ZC4g4No1hd40V#NS$m{ObS$sCiImIc*V3#bK@3Id$0{FI^~T^gpL z0jk2qfp5H~xWCmu8D#_JO^yn->JR$iPp3w*G}F<{>{cq~YppByRBhS|548q45B=a2 zwxSflimv|ha)r4`KZpVB_p&FSSP3eWkHq}i2i>sxXFINNt!^*Gv~CZqO#q4VH){bZ zt`mqqo)HAmiic9qKbwXQZukHGBfa3 z0_rUz4bVE~If3+Or~pU0mi5rv-G{o_q%X&${!q?ONB48|2dW~7Z4O}jLXIvTDiU88 z&q$BT#ffCCO`+EwsRs6=Y$sYKjkU_jdWxly6Q|XGiw{B}@4zmLMoABVD5=w$Cs^qN zuJMkkjT{5~%$&Zq_4R?>*4HEfiQ6({P7ILPfn-qZAPqooDNy~gVw0lDS(1Rhhy{o= z0D4XRF)hj~(n_WoN9q|R3&&-tUkTtp>C~cD_uwoMZEThst%{yDm7@>{bNMive#M&n z%~ry?Uqbz9Gh6|w7?Z`vB&4-9^xDfqPyqZtW$=^bD7KC~lok?c1!_IzaEHm!ap^xZ z6jzUKox5^$i=R@PjU}_3&wz={6&Ns-SpY)?kUkPtSf^yi<4uGU-MY3W6Aq2sX3h0w zXbbrGFY;6BdSLFR&%+lYpLQ78Z}isrqrKM}hBdn)@2$YMigv$=J^h=H#4+H>qo)RE z+_^;g)+or`M^Kj_isvRNscx%>W}_#JMJQaANS=}=-ZpGQRs$@_CtjHivgvJx@K3Ct zWMBaO@TO4e0~7<*QR`pLU&AwpoDXLYK}>8zfR?W7q{~z%pgltfKQ4t^^GYeSNkg?(>6RX8Q4R&;QLs+y zXvp)lH~pN>cNwJ8O6LsS$e3QnSiRG;A`lh8T7cWQAU9e_5X8v;quzrk_5Y>T*MB>o z1^hC>KSGL-9A)hBvE&0C!nWvDds{W4W*BWzN#P<8s8Hpzi2Y-cP|=YQY0T2WZ+w>f z!03lB==;sZq1tzL`vbI5=#HIOV5rvxo!VMAG1q@^_Nzw7;j(R){lU%t%ENgcf|#xC zklOU5V+6LQr-wMJvi5Od7!dWZM7GA9hk3jO&)^-Ef;z9kuZS^9o!&%>T6dg3y+4y` zFr&}iJ4dG_B*v3DTC~4F=Y>_3fx$`iehI!C15~#wcm(>3Jew(_lY|A%$9P+cvy2*y zIRQ!sQ2Kw{-F>cITKBnNw4SMTBBlBgZhH*zz+4DZB4&qLm5Kj&*zjqxPiWA8aIfNwd14c`=hh#H{HsIn4^m8c6kc3_euBD5HHC?UZPXrq!bRP>@j|op+u=38 z(5SwK+p5210pgWVre4KOy-e&vrgE&4=zQa+(-qr_F^F(K*o=!Y%K(?k;>6=*6?7F_ zB^7$M4fD8yEkR_3e!-!LVUCJiFepiO__8P`vLch8DkqO7em4zFx0NUoE3i)E&Qu0v z_&4p)fAedx&*s+xaJ?ADo-XM^ zm(lHAf<4{DJ1cK6F!kaOj(E*eU1}?&kO7Gj9+~I2bUSTsj$(!g^Ku%2hRVakpKK!E zEpkV^;qJ;i;%+?ZFjZ3VUF?0JC_ZakR8{cQL3nFcJgo8~$*mXegMFr%Z@TfifiPUB ziLdc`dn}k!sTmh|no-Hq$~2XOd&b>oJ&)N1OjA^-rHq_ zQ3}_*Y8*UudZN}XH857DY8p~+XkzG0#l?4zWzx@UQ*GoQ+3E2yDN{A6!BLG%CRxler>l9xs5pC>uopL zDCwL1vNvi)Y2v^O_Hg5EAu(-sm5Bd#URZi<*6dIdcYE|M-i|?ktBAOG=Uaxb=KcAo z3tbA29}xRW7ROVD)ucY1z!$B2{rFAQ=kp-zvdZ=so}ksEt(C8zTHh6Gu!Usfh#Kno zjwZaVN6&%4^<(+)N-D=}^8?YBJN@Abvzcf(zXRuC1E**;k8hs@9d&sw-7Eb89)YD) zBvPg2E(1SR;bBKn?-K@on)qp$AqWWng5Q*)lex=EQ}tBRD7FkTQXr>pCTZyIqJ7e7 z46`c({ckt8PsyeAom)B^T4ZLlJ!?AJ%^Y0rUETi^WFoW>q~6%l zKkT}N>7rNR9CKORnzeXaY_vG}P-A)9ac^OXu+?#NZ&UM{CY&vSbY7~{?tNH^kup?@~O+enm)DYnX!GA2#Iz4Xi6* z0wI~J&k`jhV{(;r!lW+=OI`$#F;PMfI~n9%kbF^BWKRl=Xt5* zUq-u16s*?dH%wN?tnUzCFn8ZyhPM9NycHda%9|_yt#84OeR&dmG5Jdnf$sa&?az;cd@0 z;q`rmuU&JuZrm6yJth_C{wc02_Jbm{mS}V0S&`H7ZpwiB{Fh4STZ2GFvW zR3`2;gK%yi2o<@jd`QK~pzO^?zv>lVSTo#}(uQ2vM4|1f0HsMB)H#(w%FzALzR`Q= zkpqXEncPYH2^2wO1e>V#R~7vUemZP3@&SA^vK3@b%QUl`@a4sa_ksA}10ZY#W7Bs1 z$mLn1=X-@fC&~5&g_2V>x}T$V@bvF)s<)TG6zF+Vp#G(YHxE>#gD@(|@XW z!!Y2#Oj#%4=zD|?wjcVr$QIF-u9(?l&nt#u+tb9X!G_9>{t(wbv zuEX1doj%IWq=5ttjhF* zv(>R~##W2;0=|OCNSm{QEqSsq_N56Gz+dx(1*hL5H|!_@z3Aet*KA0};O}uoC9r}O zm@>0;dSV!}i+>EfW}qY~nz;4q56(1#yIC@XiEG)pw02*l*iW7@SD%V=;xl|f^1+reT75cn|Km=_MS)e6a_DK_HJSG zHnwsV-_%KLOkaXi^lnD=MTXVT*7*h&-;J|st6m%;>^?{cE?s?-7x9JOPJiYN^#fJEKS4N+yPLz>Z;xp^MJp@R=X1$g`7lo|ezb*6INjK4 zX#x?U@0?!hMR2S29~$!_w0fQV4lh5sV5Yj%$Odb-<3##FG8u)H`h!(=>shGNCRTAH z3-tCrg3=%`V0*fJe(c0yGn{Y>!W(m zJNB@(2w44`rS^Uh@n+<`FU-aSl}`iy>BMznX~dDEPZg1Nx;u0f_ENY+=>f^{d*mf4 zD^=U&`QiNOhA$0%e))i0yHdaOrlarQ`xr`^tv#kPU>2qbpRBp6!q?74CfZqG`52OL zWTDXJq;w9A4(eGjX+-QVo=)vn(lL?O0oh@zq;M>qf(qyC6)XfQFOu&cm{)NJ$FvpP zZYriK^-9Qw=dfAu>(j=@fFP{Mu_52u>w3BFxPcZ)ygpC&G3w9E$p`zIeiW;uv>GGc zuX~PupVSY?SvkCo;Nz7uYhlHg(q`444Sv3{j5x?zd5nnA!FHIm@!akF@ZNRy>e2hs zoAj*9Bc%Jc#OET9syb~4x{A6IS5UR@*~D{*;qPbmJV|i~;<@p@2aAfuuyI0OjqiOM z>REqcuM}d32eG~S*~h{CljQR*p##f&V|=3VA2m7EANDtjRl@F%TCxr8e$1$v@jRv& z(Y}J)C!zBcZimRhB5~|(gF~f$utsZ+oCZu;4Z5P86SZnE^;q53^8Lr?A@kL!FI2|ru$;d$ujNQ3^pq1|~CLY1uz{#()h`>Whn zr8pPdy*4L;v^|{H@LXYCrf!AAqBXZ}!p&~4sJbh5TtW5+3Vu&_D&N;weYm00)HIm6 zHCbYiv>=PMb6tHKRdi$={aa2HV|p{eI%9*#xwMIBaSpj`teaRa702y5&c}FY%d!Do zOB;9ewy=O2H7+f;Xd1UUOEc^wPXTsGCQ8efqeb|kzCzILYWCL~EkBFo*8(}PRcV)n zdf+PCGWP#mTxv|;y5DV}iMNG46N0%qvLp5tdXTKfr;(T2a?kLcr7>G={Ax~&2C z>b<;&BSdg5FxPOs@xbiAx_dy>nV#tq_g#R6l*wT6O%b0Rzo#AcMc&{4?g#()*`W8? z^Bb2mmmfMhK3~+#s6Q6V8EIDff^hae4S5ghlpJ3{DLi>WIVH5t%|ek`%(vQY1QXbb zS~nm|l0BJzS<*<(OAR?a33Z`E)D^wyluTk2l68-HP#iEruc)sD$c2%TOoseTrvN_< z!D)oITJ0}w3OEU@=i$2bS;4=CwBBk|U`G>6H7=Q@GCcGVegS`z_1+&fpY1Mey{tl8a^(ZZM?1w$9v3n5q546 z+guU#OZDk~=r?O)YC(@&Ttd=rw<^yCE{YE~9H5(G4vxas`FQ1~JpvO7~E-4P)l%}=&v*~Y$?<>ciS zU1@t(*CoaYGR#(bUvlEr@I;-jR_@%mj)y{ytP9{j{SdrX$UP7Z`stv6!~izemmd)? zew6*UsPKfQHt z$4k<8<)*!ERYyS6tlXNlc#xM#$M@`aYZ;Romog%i{B8(Kp$mHFs;;hAv9$E$oXxoL z*~<}&@Pj&^dthSQlXHFVsHyM#eD~*DmbFp06mR*jA>JS5KN(tiw!6x4ujqZ-(eCEG zkz)ea*9mmHJ?*?*G6{G5s<+J~4Ki4ZK174wo-$Hzy)20|NOq(Caq$VA*%KC)>drW` zH6i(K$+V(Dl&)oEvB(^hBjfxC%cwrey)0irrwTJ&nO#5%R>*!7Eu>NXU%vXLxx=HH zxx>wP1GrS>%#gJWJ4*e*A-%louYmb7DlE^t`yHSd#8=HL*>p!FJ&k zqGzzO{Zo-ri0`|_^>0*BrtkKfkWU`2eqOn7bZLC%o4ThrtoQz=rU^FTfb{g<%VU`Y z$b~AL8%qc{cNTr1&4P|MJD1#%*>4JVX!qbo( zRrtBmLeXeBLF7o9S}b}?pCoS&@odi zU;1ajN!7P&C)*C9+&ZZY_3N*27!-RwzjjIBEB@IFmHm!a#fh8^tK8S?U$qoJ3B@}& z=vH>gPJ@l$2n8+~OkUe{i5^TvuS=u1_v%KYkJ>4D*Ng>8qTZ9UV>d5#u6VdOG?ZW>97$~vN%VHg*D%V z>!X404KB(d8-EBd%|_kIcRDzK>(Y~_HC7{qK%C-u!A|sVS&(hWl@yR#xx+t_L8j?c zN<>-pnesiblZskNazk_=EhH!y9v}^S=QhrKRd&!4LI-nKoVUTJg8tgJ53iGLHqX$Ch|DGrc+9nS7A8gz& z_Bg%u$_Tr?w}aaIh$mi_UcA}6+jspfx2ng2gV5ceh`Vr~yMcK@cXTDi8hsCJN6RWV zirll~FM4FN&1MUgpr_;mZY>EEZH%O3=F{7g2xn@Eoz`2c%T1d}Io&_s z&o>MH%_v186PmdgbJ)pc?tT5_6Fc_S&z)jy0yP+8vrlDM1vxJ;;^llw#k2>D?8fv1 ztg}*%VlV}o>msyhsK63zol}tGBTdkn$93bl7@IgpLtRn>hfQofQe2trInFU@Ez{p| zNlx(Xl0B(B8T)1>)w!9c@;CIa?MYhvGY;a;35;kC+m?f?i*;75cBw8cRY4k=fFUH- zq?uBmflq>|jEOSOELP9jh7)zp?XC(xQ~7MI!JP=A{Jux~*KgaaHHur<=~p{<)ZVHP z8`t{!pW)mWM~TmQ(@FU0&X1(Gypu|+(#Oc^bC8wOPBX}f(G)d~5{Ze6Y5dpFIy6C+ zX}r^_Hlp!z38|h!PaO+d%OH0=PnoKWW~r2I-p2BPMmf>(^8Gaa)ysB8b8SkqmJ(^H z^M)sLEmTH>{Ax609yMA@Ar18i!M7b%c4sDGv~cisp@N9b_tD4LKQl7Ceor%$y!5SH z^t10+&1FeSg4;&b?}AYaD+6&Z;wqHU=ggAk_-zYPE$_r>>y>K7kW)^RTVaZ*|bj8WD1jaPbM-0n`%pCG=RjZkcfZQ`{D9XBFB75|OlA$RC#D5O<3 zAvKtMe;B3bIgOhuYEf9jB{tEcAeqBxIW2k1X3Q?k_hhGYN(IXfj z4I{IJvzj~ofhd$7B5NT;*Pokuv58ZqhCD#u!-#*P=ox*fHCp>Aj>{%j5<0s44`!Vs zzXdu*&3U&Qu&>%47OZZ6qDzr&lsYf=T8y!rhSfUGY^KPfOBfBQ)e;qzgk)z4TGj2L zN}-S#$(#-CXV+niMmJj?1lqBE!PYAk-_o`xJ-3u7k#rSXY#$GKb*bz0pCHb8&0B+A zq(f;Q&37wb3Tvi6I37GY7Wv-cG}J3rQI5rzkR0qPaYZa|lscvDih|CVa8Fuyk}~p{ zp)lFWq+kkms7Kn!2y0p}3E9gdQtMmQ(pY#g)V}ED2)oWz<%_rFPQG&}SO3Gkdy9cnO53jQ;ZuoCXFm$7CvB+OdEp{BS;eiqfnJX zYTkm3px!|HdhHy_Oc-~!PNL05Mxr$n?A1N#?PIrDH+G$U-k~YDDBpmo%G@FU8DS*& z(J>}zOMkm2Bg2#vL1C}vTq2RYzF`q_hw~66Wh#pO1Y+Q);}}o32+ghwzao{2^ma!Y z2aIAy8;a_O*(-{8n?-Wigr0=VW);>2AU?dQ?(-pdKIU%6VMg1~vz^!oWoo`42UHw2 zZCg1H-5fn3EczJuXNLXO1$!m>Dlmqo6?_7Wq)eTYNTaqX?eGrou#M0*sIHnDgHGYP z=CMYVNRSrl4y1*Nmd`3qq&bs~k%rnShe=$2unwLbXQ(&(mbAM6=(uzeGFDCW$>eJv zL1t6cQOGFA%tWnI>cxEJSNC{BHJW`Z$x_rmMmS~8r7|Wdz0M+cWpaT!&loY=%j=Y2 zs0-y1>n6-c=-)wIjf3yYUINRyGna8k?o-zn^3|V(b^FzzPHWAl#x%v2<$_%dVF6n; z2(`a#FF&cEQ!4D_!p&DQ1*1kW650&Z4O$GfnX$S~$*Hx`+Bi30-mwj(## zpd828#Hf;a2QfEtif--A-8+?W<{}SesV7Xj5k}*d0G&-aXQlkg4TgtuwNw_qdPIlr zEvP^iQ*I4qy5RN+C*UX^&o_x0Dx*<$9Po>(0OOT*s2WKX6$*LBg7Sfww{m^Qg8h~0 zN8EP~57A1~)~^@IVdxAL2`-+P+Rw4&#I zu8t+nat6V#Wk+t^y0X~VDRt$$^q2e-)}32^yW$(IH_ItNU^P;h`mj*B1)~!*Fxo0FIskL zgiWhMel|cJ?tLEDJWp>G9BXMUdKNYT*J)s-tIL$1)luOXHPV8@s8em;%s%`Fhffag zJNK?u3hR4OSy_ZIP`b$1KB*EDK+<5WpQ2bfTxrRil8_)nHanRGFO85X^_}Io~|V_V~D5B&l{Lj#|vb`#`bqY#To8Z%=Som|fMTt9YbCny8{irqaN1TVFNiF1PPJ zxr6~1M^ChB?zjjKWvQ@d* z!aa6mWOgW%t5ILnjpYA7ntLEd|;S^Vd#m^25^2dZj zGK+ExIt9gZsTjrbsFN5^v^c@2*z}}pVKnhf&0q*ST*^|6+(p|(kQ`jVG8RXjqCGK| zN8{cSF4B+&ygVUuWk{xp{Yi$O{u*AC2T6MG%9g_6zyY5gPUWr9dqA9^)G@FeQg2obEuyH|#GJ{`h9H@eCQxzO zVpOQo9S@@@;?@la57wb8IgtWRJUr097AvXRg6oV?TOA8fJJ%G74F_)uPc zSy(|tc&CcWk3B4(Tvf~4soymG-l|`*QmNaqY{k^Qo_#(%$y~t-m>vR1OZ%V?GFI=0 zCDcT{gVu_8H`JwW)Z=;m)sI3drihd9P++++O;cB_wwk_xIIo9h&L0;tf;dP+iN5l& zaGb{s^WZ5DWrI6~OYv%qOj%J8;1>0JaMw28oZq-0krIV72E5LiC=TY~8XJXK&Op~^ zHJJ2L2X4=)3^*37;fFy%%yP)ff+N)+BId_NYPtjplzdHG7X+(;*dcNJKevU0a)+2qR2 zlm8F~i2dAURTa@;lM(Nl6g-3nI?nC3WV|^|kMa^$RV@S7cB~CqLlsLCJoKefZHChd zHuYlt^sshZhvi&kX4Ulk4%i!-(Hs`Y!L-~-4tORHXf=&)f^+m*_PXx5WG=>> zzoM!sbYv5ze1?GN{`?~Q4kevPRcEVpff^?TODxB%YWSo5tXW(Vdqqa!RxfwzPV~kw zyM*zZ7se|ylPEZx<+9Rk2KR}J-P}%3O)zwU!>5zBFYzdnAc_=V*U2P!TPcRm)9|{x z+%;9cNn9m~dMcgf#y>(Kez4D8kVicqa#oy`?(c!bLWgJ`><9Rc|XLk7bv<-dR#N5GclAN z;WVG_>x8%CPFhQ>NH6^dFd06b*ZWen5yfPpmn?y0(Ku=M)!T{|UJGT!I}&aPd|=*5 zF`<_fkvVHZ63ONZxc2R(m<%861=UwmCIp1UC~v!Zsw>_j z>mM-xv&&SXI5`tiQOuEZS(RghdDG?<#q=d*<(^d>=u;gZu)Hofh&hv|Xy)-CUbkp& zG40LkQ}r3Lye~AvvDNC|{fHqd91nRuZ=A|9X^b%YP_|tWhiy@p3O(l}tT2nOsl#6= z(fKE^a?22Xqo**R20bF_NIvqbV#SKbZ!mUIgz`J5aC^sCK_=^c$vsRyp za2n>ZpZwzjzQSXYv;|f_8S#PCGIvJgk4qVa9@7=Xy60)iPk7_iU4Ur`X8#2HwJ&LU zKF>ry<0{upQBuJXlLx&XVt=a|6>N#(omHTCGFaImw%*Eb}~rr$omT^oJh4DDjp8vvL^ofbeu%22b3-b4gDJueirn?fZnI zb>P({J2mQlnMEU5Ac~=i0zHV=fMme1> zA|$MbtT7|7VpbS?Mh_dL!TFyc|A#$pmgPz8|Ka@q-%kY@DL-$^Jda+={k2OH3}!e*XWZIxU6F7T1%areexwagRyxFzbCw65YQa zrit=>r}F%Muh$XL!f;`JzyP_h%X7Hol}8vj$d!*ZK^^5s9#wyEKxmJR+Nt*VI1^UY zqtg2Ow^o#%jE`><9p5QB;w2@G&o#(y1>vHRDWDY#`_bM0UmabJcYj^z3kGH23(-e2hZRx4p0c#`>k`FsJ?oT%E`XBD_rTK9y zr51g{E4D^m^Ly}yv}fA%s;jjarAX-Z5ysmj&wb-AXkCC~-=5_|#^3p9>xkA=-M`d@ zT6^~o2~wo_bDLT(R=>@8NVs;r%H2C>@xsP~EJ!!`vO0Q<}T&&)bD`@z?nD*r6I6ToLOZ%Q~R86OOor=JlQL#TnxPA)WKoMH52azpZKM(N1Z#3 zi5u?|dOtW~EeRVky`Lm~9t0XmG$jt2Rv-QenlfL#+pyw)^)w`IKJt!{tr%ftEsUc} z#-*o8G4xLm{Sn?F@O{~M@sY}#$dIIcI6iQdppY@Q5V7`$;Ce@kH);O9B4^e@{ef)w zV&zSnEysV_S=a1_IkqOTl3tCg^*b|LIkrtIQ};crTl>PQ+ZHC#)g6Ui_?s6xh4evM zt&ZlCr0z}52j^?ldHuOJdeyHRJ`eVsIZ&Edp`1_H*fp}`DzqT7bnNY4cjWG(NxZg$ zP56J(5 zohK{@qHh(O%q0__I1Ax)mp7H{?7VR~Ef>-^JAN%yy%P3vNW;>zqs%);(GClDaOH%b z4Yn47`Yj;JDdB{i>8|EYO3F=!TZ$}`AVL6|?C%oUH zm~R^hb1a+XCRh_R*MX`}n@L&r8(jBoB-O#zyVPABmllUyQrdAe$4x@H9we{;|7Gi+ zq5loSbY^=Q-cv2ZEMm$sxW3M}y;dn4JGTQbT3QHcvA%*MHB{WFm&^mao?M)sy*xR0 zlti*EV+U$0+M)K0j?^h=+1rEYaS6Fzv*E+o;3f@3Gf9hl^Vml@#klCSwGQU_9COH&w?&eo`;l0nNv9g;Kbd$<97?7NQ1qJx}!Z z$IRF1UN=pat(HCK=4#n9WN_;Oat=Q6Y--qSjc&&BX=kM+DZwjCnLDHVUeC7;-ySEI zZQ-|jhdYBus#Jp_Yu1rBNWdZc`USF7yX`${*^ZG;^NOjwI7(i;LJ6LgHdBh8I!0Qi zWliBuIGIzx-$AJj@+Yy68sM$R9rC7Uzo8J=(X^*@$Ipc?T^?OnaB^6wZg%k^-(}dB zuv}|c?|;z3-}mzs%>GTskA1Zj$5BmxWWI3h3?k(z^@?}QyTG7TmISwv2W+6j@A(pw z%pJw9M^2_Rk(DFmGhndU*;)bP3bSMMtd4WU=^BQPcx{DYtr>WLp0{W(cF^)|@%giX ze)8$$l=3_UFp;!)Jxz6{3^rIt^~@afk5wnrSH7+PGrRW~D*o}!#E<7%Y(T%2rp}HAs|{-U=L(Nkw$4FMt~+nIY7vU~2J!n^I`p%LmTt;axS=;J zh#!~(4ybv3DBhg-X2d`2(PCG9&yrGn-poXAgYQ$%LJu=GqG^Y~P1DZv%xe;fd<3cZY=gCbokM)APrnU-Y<%R~?;r<}1 zB3r>eH2A9j!E_3)CW0pbYF%31-K78#i7yO=V4#)-=!t+T2*}X^X^^l#HR^kK&$V z4mZQ7^rNlH8LaB4Ri#7vQqescV4pEz&Ews;gB1`A*T@oP0|yBVoNPYwYAdA^YYvH& zt~W+Wr9L}fSdcxh+3dU&2WMqys2s}e$<6=Ve6(!~w)3{= zTqS~(USeRLnrEu_ZuyN37Gp)Z4aC4T5Xi$hJ6`hV`a6tRK7@OHNSA?GW^$=>!{)ob zkPMXFqs}^d$hG+n8)RIvQ6-yZ_JV|9Y5d>d2`+)eAlgEoG6iP9zC@Spusc;p$_@b;obu9aoiyixUb-N5;}oBk}5Uxp`{gRX+gu zpZm1IJ*?vXt~$D%;wz-rty)EwsU8=Po?3*gFH6`E{xIPbNwM;`w+%K{1h*Chvvjau zscZrM9zKWZYpK$77Ukeg9}Gx4NiTk(&g`O`!PP2PKdt18y=%Ifm5s!@kGQxw$55)u$G^s&TQPVSh*YgElMLEqB#QFw%{20-17ncI>X@=H)Nqv`KqXg$8T&)yG zNr>uQwe@z+dOQ{D)o`jz*$hs#A12w<)C|qj?L1~>Z7Z7K-OYoqSa+~B0sX6p#sEw; zwMAukwrhF5 z|K7Z?JXoIj&J+pVU>L1`2VE+b$D~svH^0g53sa^{nG>sNz->?V;VY}7{r;21$@(ATrA5%jDg;)Q$;)Sslym+y*D7&Hf+sa3= zp9m6IVsvcNK$EM?QL%BeZd4pRK}65zThMfH(}H@Nsxm2Q4<3f-b}TJ%Ts~kMZtw=0 zHVe?L9MSM>WVz$YC?=#cvGZB!#Z#w@(~}=aokHe#o@n}kPSD*@1;bYA9Qvk}`&9+; z_mdyYo%lw2)bPJ|1P4QUr4dYbc zs0B8@IPPPcgP^B+u~78#^zg@##eLmM@%G*!N~t*|EVAh`qEjxl*mA=jf9V57;H2ub z1`59~ZP>C(&S-d+n(I0{>?N>jS@Qrny^y)QI)WdS-I|IGQIOQ$$a=fd_D#N(_EGWikSyEQSjgZ^CAY2;va z*f80I-ua?-un0cJrN|>?SYDT*d{piCL48r}=WskD+-DRCH(3 zD`^&IA?@Otejj$U%ebM-&Ax|??J);0Z>qC;OpBVQM$`QtW4J4%V2J4`>@)t;PBD#C zRejMtSwG^1UVQ<-mB#s&P#fzR&z_koo3o=-izw@41k~=TyT`EY5Yl`>sp?wp-5l!5 z(oApRNhfeNbT}(5HuEct5HN&_agSD1NeaHXY|r&AA?tAnN2K z?gj)yJOC;p0QzfCuF_|nR_`J9{$dLvpXu$r`hpW=Ao$mYTlOA-;9l3x%DNQ#il59C@etfC~;n+1>EXnYw!p zU;DD*SLseuv~~unicWP_#I853Wc!vbP~CEvUY6?iY71d}b56mnmySAuHEMD%&whfc zDbHG_HVI0!4Iqo@NJJ%mrgnd+LrTK{lu4`IPvt&4?es{Cz)gaH+(Wx+2n0>Ta9j+_gb3M6_KpyVzmK(^2RG-b#YrCxW-(vr`#U$;SSe|AT?=nb#4@|PLB0zNhggg zrB!xJu0Wtqo-&h`H^c)~Mm8~Fd9z9Cy_=KP@DW51v)Rwy!IR>pY&JaeI#AVH_@;`N zpgoS-Z%4Yc0vE1e`nDP!>vqVsF;nuX>vQQElUl#!9ORe7!1W)(3rpU2Nqy7<(v6Nk>||I!@0C3Kq_{X0m# z8=Rv;-Slr+uVuL^Eiz$aG53GL)Ng`#$F+!O5)wcrBHjdvR8B4s2e*heH;`^@5AS9D zcKljLVO(KG*QQf9glG!T(hFT406QQNZNT-&m0YCjK!pg)|I_9PYTG2@C4KcS?{S_Mzn1SHfw1-=WDuzW%CyPUW>uFd=NU#X*gkZ)Lnv(Xgm>bCJ>*%l!ZzfUD>1F3>W|@Eho<-R zW7qsHqnIOzifmjrN`rKZ@U3*~V}6QisnDtI{D0iYCek z!_LxpF+;*q^G@w_2-i-)mr$~NJ!8gyY5}toFXZ3*@fSRSjx0svupxD}~r&IV!w0sIC?d((`oC54Eptor4Dr4?C2 zvs^g0{_R;g^XvzPgou~czFEp-tG8}}v`BoTAS9Mf$Xmj#rPXiBjkx?hW}o6sV}D~pmw z9bn|fL*3u@Hk&p0R&FnmO5XZW`@i`$|Hj-=J`UMzY#%S={H#bJS@akCkgyD$@;@p^3r8~hz~ zb7~6f;cB@8kzX@dxrgbG*G#5Vz{#dlZnJ$#iy4`tNo2~C%}V{MEJ1R8G3=-yIO`aG zAfOnN^D*`5#KZ!Pku={zmqkR6F!mM+p*KR*8K1toev8lkUu1I@+65T368&41d#6*dPI8f+;|PVKs4faPI&LMzQ7yg70W68h{bT4(0O`r+RnJgZ zQ)|I7Od0BQc>^uNsaSgoe3=;p)g1HqQ+;4x{VXb1#ckQI+k5^EYvxY+A+NaMbEi@m zV2i%K9gVL|&1thMtV12Eb~oln6>~}T3g(1+{j(6S;iXydw|l?h#|abT_f_OYB=0Q{ z`-@#&1_)jekk96F16XjKWIe4E*r6C8p|5aoA zRKEt+3#&lsa)_*&SUaM*o=!bHOQ(8fsN=)9uz!0?F^LuG`GWd*YW%_0cJrY&(pKUS zycCf`qmN65=pU!7DUYaD2KmJal5)-3nl`X~Q_>d2nh5VKxz!5wo<&?txX_W%VLuX< z6317y|0F$N<6CTTc!4boKG3YO6atT=Js#-q+?<*TEN^l;9s~!D;CEpe(;q{OvhY5Q z!bvMfD7h_EVm*Df2b292k^ELM`ZD}FBQ}j(nJ_~e|K?v{jd3My#09~UR3N0@RSjf( z9`#pb{jMI+;kBF1iC1hFTf7f%X@fwbAbW46885A_A=RyWCsloSZs6?HNZ&M#Y$Le= zX5)P*!UlT4X7gCj1Pz>eWV^&r8&dWCr`=imL-u*IbhxSe4R|2N+eN!ncGaBPdvc0J zOADq`I)H!ye=Tr_cs%&7DqW%W!@$x2#Y`~Pb>=0h0uG%h!PFo(mlZ5e_!m=swS-9Icetj>)}m4@3@S2{E;Far#DMv%tfcaTD^)hh8(cu?uwK+xpzOiHLRS`o7p z==y=)b0J868$}RyjT$m6rGItJ>Ta>@r6~4BhkOSmc+-}d^y~)MWdT5WYJEPn^Ri}i_D*n1br?7_ofWE{=QRtYpahAFi|T;1d7rWG4}4(oEIIjsai=Rpzio@@8ABu4R z8N;}NJaeGGxJ}^f*O3sI40(R0_yDvU>ou{wt^XR(9^j44ww zT^~XGts45HujThix@>6PvQ`n8Luj_9YsW?=d2Eu)s3KiPscW}!~6|t zs#(RhhDu)Lw$$_R0lLP5NA*JacaTtH?#+8m%moyTg}_T1@vC{soE33z-5IT6xUmwv$@=y$Dv4hA5-0MkGl5cE2Lz0G%^<;R|H7#$1V4s&%8@S zJNp61mgr&o?@HU7oJ*VDEQgR1J5Z~#l?P&aJHC)+ePHV!kvKvQvo$Qf*q7R+nCXMo zeZP3CakY(cG?u83TE?jP&5-0gydLgo{r)= zDAfUfO3%U#A8i53G!vPlMfFd{_lL{2YHnUD0MdmD7uMceaoxH{MuLw~gU#OZjibT= z*>EVA;hMJAdklXIMLbIG8!WuXNwN1ZCV+Bg6EK{+*4xzOWHDtSu1*u< zF3WyBYQ(!uS&bPVMm{mra_}9)Gv&?~T#JK04t)qp`NJCL=h9DtVW~Vl{R0&)a)>+4 zRI<8$c9rH!!ugTWBjvtTgz@j^jkmWuM=m>+j%^QSf8tF5u*SyDC4MYD`>|Q+J$4X!2v=oS$X-n2+7H)D`GG8|X>Z zFMiNbZ*XS4OYOa3(%?k<%p~<%VWT@W)r~b$cpWR@5r}rV*@S6Ikh& zU5gy7h!Vn;e`a8-6pDYNpd<&Ur^YR*caY4gqXr79;}^Z@m0qpKSJo@G2b(*JAuB$g z8aLtguvD8FXtz_3=Qn#SXYZl8Wv?}Li5Cf_i-?_jW19DWK=b$g$j+27p5qxiAOV2| z5XUpXscW2lpPfU0QcHmW+X>^+Jk;9Y`_(5WphnYyVtO0CKW<@R+wwMzdER`fCs50* zTlFOfP`b&<`MO5BsZVrEw3GKnabASyc%1mx*W~RWT#)&dUXSTfCBey+{+@6F2wVeJ zOwlOoB}g8TPh^<2ZmgufDXi;RJmazqE8ly^OZ|_J%0&cMHClEH!t!|SfWL6gOa+-2 zlS0H`3wy0?eN!Df^!9@xi=#KXA}QXFuIZgqp7)*IKg}Jz(29VTUyI zwnJ$+Yh$CTcgZc3oTYZvJtc*&Z(OCnU{mG&PN<&7%>3ZOU5wQz18gurnPe?&7d=>< ziUK?#m1l?AUE^;Qd}tY@QyaE6e#PEz!g!~(2W)_qwwqA{z`_IGdk1fXKFy7H4Q>?` z6cS;6OB8bGH>PCq4}aEg&3);;bEtkNEViC<===cP?mLiQ1JKdNrc!-NAae=!=Mf?t zYr@qcIdj-3-G(MC<1oF+~*XS6!RDUy(A(KS* zNDbkeZ+8_QPQ6B+Z-*W?il-E9HhP4q4?Egy1|X+c4qRI)1uZ_wU+M;2(#qF^R=jdo zE+`CaAPv>)7YHX2Zr*zHV?+XV)9;{tju~b8B|S#cUU>^Rapejrj-`dU1S(Yh8m9{S zpOq8GeUJoT(IuFG9^L|Sy9jz|!o%|g1lwj4P$c^`tx`xRm)w`;Z(8L_9lj9&f&uX+ z_uj&)L^~Z=Z1J#ja_~z}sgalGeh~B?l7?oEC3#q)@f#RHEMeXq^(XD1ZoCb@5uRpv zZE@LGL9)PjWurGFD6UEJ4|{7ZtHr9iP5N%>%AEBM@Cd5i3JErPl< z4M#5~yNF}>DaSj6ugbLMqk`PWP|7yWSYOgIPT*)uh~ntE6EOS0LULt2Z7OX!5L-O3 zks~v?1B0)8v)fAv(S!(ngX%rBb2b0c)K&(eh7Tu9lzB4y!KeTiqNf2Yt|*K^8w?)3 zGt#v)7VK)zzXHwA?5HZFB&{zqesdW!?BpF8{ za$GllGK#+m8x~R*lsrx}!RIKolsy#Gp%$f<5jD%)=J=CyEez$?QAt# zDoEADwD=#eX@YyXCKV#Vb~s*M3mYFYzbP+Wjm*D#kDjgWB@{BK@0KRJGNs-UaPY(z zTLq+V89BDfb6T-{ap%Yl{|c9D@%Bz%CbFk`8vNrK9X`yJPG@NOY%mIcsDSK6z^H9% z%I7S#WGm(THa^qa5>C#w{hV6QDK+SwqV~9+GZRZHfvUCy)=_(dzivtoo1FzDq?MOi zM}gXRP!*w9qcTn9Y0VAjwPkwCjxey!`0ttb4#ts%&%0&1#W5)F*bX5W%0!5DKu6 zxPVAhtbP%wIvw1+C^uAum{NpYR>?V661aqe(|V8}u8Y5S87btgv!LZF&-?ZdtuM%( z>BBj8Am36``Da^mlt}n;#93HYvGC2|x6MV&u#RHkwXb7%A)-S+_2b6Yi0$;bdt+5V zY)Mjo>E?w6O<`B1O(8I$jn>@~9&Nj_UTIxY$JupNwq8a@K&MI`8V<$l1H~Y7>XBdz z&kk$j9Vx($1#z?Q zImI7&LE<7OLSLiV_SHjJtqs1xQavXykrbJAicRb@ZzA6_5Vsin3*BcWI`vY>9FOdO z?OnK+V1XrbtyuiZ3@YxVBzfEt;}V^Cx6pC~k$)4r&~cObN#8dO+0UD4Uek%yWX7|W z2AAArQ=Q@EE-FtO2f!!J*nY5CpxQ17tIy2@f6EG)V#8u9yfCsI2ZuVUh`NKpsMw(X zw@TA1P|a55Yi(bqTV1CwQBRAl@$wd4DYMXsMTgO72C5nnOlU{ zQS1*p4W&{qC1iP^gL)VAp5bMD5F`y!2RN*GfkLGs9EphuB0vTKZ8nD5r=KO0y@Fvb zbpi1kfBD1$5pL%`?Oaonap_~LP&TfPygFkun?vL(Fs^wZdUJxZcRYt^kaQGgr+5V8 zBG@Wtmn8>pvn49n0?~4MRN`Dyj6i+d3KFQ?V;IiTIL;t#@_OaO)rOG^TM#k<3q4;zw%X)70aQt$EZ}UH~(N z`h_TJShQ7VOyIZTHgL$Qm*1w(U)8;70&8@qAMf3<-5;KG z2!2|f96eaG9@3|zT7`QG4Tr=*C?TPN>Db?LgSOSlT=uTeq)mL$A=XVB0Swt*_@hcI5P_JGt0YTcS3-NGmo=EE)=>R#lu*@47}> zI2&9cq@pyjHqk^y1>uLi4yJqR{SF8R2Uy@fT*pw0R!`gLE!@(;4$5FHt)KKQmq5OK z7E-HI*?rd5Wpr{OiCTumNj>saZftk6Mvx|PF2i;*l~rG+A{HXdub;<^)EczPSh!}c zxZ1A5{>@(V7bJfzrhz34J0}nXWO^yvKsxUfw}B3@p3~)H6MF#?(M~*NrgK+MPdieT zSC2=Rz_NK7oOm0M|dpZCYznsywZHUuVhb@jr7P_8B}og7C6r+LzN&MEw09 zQ~KBaZv&eb1k%&h)#oXVe1Bh0=fMRL4v?;ab~>;v&nCt$d*<=g(msxa)BG2ucoTTt zbuRLP%sIH_nHAALnbAhmj0)tIv!drr{%xG&mlqlGfdguaxVOmslwef!A%~MSf8rO4 zLw%BDxjv}9G55^&@6y!jdNb9MHb+fkZYm6-J*?YCsZNBt>acM#yf^!tw~C+VsP9Y~ zpjY%8E`+L4(r&EP*0O8H2?=J%x5 zRRC}-m(xQl;V)8OJ+T?-cdxhcI>*FhU}1qPf(76-dHjUWTzdvtQ1(7M*)FuS{ z@Xb2Q)K7#do3*qeg+an;v+I#+(#$=36{Njaf#C0_XIIECOwuCT|Kw_aaHQiQTGC((4}W28e> zQbk|Rwh{*~cz1rKrBOZ4W7{hj>g6yy>)AW{a(hZyZ&=i_An!*n4QcO-|qL zN=i{#Y7p^oX;$(~mBlLg?#O=-0(gZPJR*9)Wnjq385!|VBpE1zWtEt3E-sxQq7BmJ z5=%J`$dv?faY;$BUH38j9ySn28t^y)8aMb~jq5WmTzr#>c<<7W&ake(XYFo3Wf9P* z(#7;Vc4w*Vi+iOUvU~)(l0ZeT9QD8`Yz#M{?Uwr2qWfd)Z_nW@avfU-)8Y+`_D0X^X6 z)P2Sd5-;V9cqR|8Nc)LVJSSQ1nxXyTt*Ku+1YQUz`fxqf?98YaPaWE@X2i)O2(yHn z0uJpk#hS6%w=*I8QQK!0N0-Oy5nc^JsWk^CYjHcv1?4u&bg{Kjj~~F=jxC-jvrO1RWhUmGQ9^vj*bL3i)GYNqjz8=eZRBWYvC-C6AQlzF)u4O z$VE|?hWCu!R+|I@DbFutR=SkkFvX4qK)XYYuBCnpKXx`Syh@YspJR2J>8W>Y6hKbGo4Q}pks1`v@)ba12{ZPAo<5OLs{;edS%C0hlO3(Krn=luT zkUC0Aa58a|9_ ziv>bt_^Znom4agJsg^#j+c`=?nk9bJ*qNTGlS=W1YDOrakk!>dqj;;6!!gi@Azl&+ zl@=Q;l(G6<2a8AkbtMZnpl^aWM?akq28y2(8pC|{B zn-xoVMn!+h(6p2PvqIi7Ze9r;$b7=`ev3%W%KF_uu9l~KIzO{NHF1lV)capGavbmE zA947#IUD<{-I|Wt09Sy3-CE$(!hKO-p9I*P1s2s0xc5lul|GYB0%V4WEfsR@CxW29 zp6|o&Bbnd!si0$vnym_-GR5V8NR9-;{&sBCB`+)+^k|R z<)-A7EGKJPXXn)Za$A>=ZDUHPCHk$I5@#KEqS~PE)OGKz_TPry`4ZL7K=_9xSfi6~ z2=%D5>0@eS2L>2t{Hs(r(mZt(ufvqsF2mb8d~oq&{gzND(*T>BZRfSv5`prX~5fXQ4hDoa>%Qz7#! z8RoOoIO?*drMXR@wUbj@@m+LMFETc{@ZsQx*v8^?bfP6>%qCNJ(%MTveRNSNhMpCo z=l3zW^47?Htj_>l*`#Km_DH{Y_Np>);6eMJ!R|i%Pbka-MFSvMfsOsv)go!2$eM`u z14R(vIRd8r0k5%kHUK%`^VBwFe{qqEmsb#rQ~AlF9N%@9?^JTQTeih&=b9>Bg4E}y z+?0~7DNnr-?PU@1f*wNFKVxxsaMPO#%g_vLOs8P*B&dq8OW1+1_`ydDvG4}pFxONE z%PfVrNFngK57EP=>p0{T4Y?`&hcgo%FnZKgZ?CFHtZOmYtSaj?sy0peM(pLZ{V406 zo$L}8Ep{PW!ItdN#+DO#i@0S*w}Qwsy*Hg}aq8!=ftVLxY~KF@Ge|M0T_FHa`XZdV zJfa4`1-Nx}0SUNUXba%;XOS1gcOB&*aS7lXjyX$TDPRp;yV5}H#@EY%*)1pky|^0 z9gu854pSXKf<ZHLl$qycn3*W+=~Wl`i0St^3`6Y2AHTpqt4Ij zQ@1D3JWm)g*9@$xZ@}Q?Rs7)=L_tC9Ia9zS>glW;Y;?Hk1@`~1*>^6WjllyHnG?|l z>=zC;V7n0r7SN8AV)GGu!ST=-s1Y2oYukuOii>bQ;MUdC0bP2or`<#x-1v!kTMpgK znq9b!Z-+dIK6k^WfAdnAV{c!9EM3Fml?5~ zH$#=V4ATXyldiUrTZD6uhuSKRdVU8fr9q#t;#A!0D2mv1L!57Qn?6FOai#r zGI&iL*)IZ0OT1)PUJa0c3I&n+8ps+~aStNaKa)ud=tsTi)*FdQh5~@1vmx`YiscBs zG=VXf$(5|_dGo_epA%lH^MmeEVe*ouD5k!3+i*~6-#Nl#Xko6wSo>?T<e5lgtTy7ckhn5C?9hOqpSWlA5- z=3KzS2@-g5B9;aJ#HWOOYVy3F(H&HhT z0&B`!$6DKfnCk(^u~ciH3d?F)jOp~b(hmuD&R?l8E)`3)IXmABoVjY1Q;F6c@NfTP zoE8%kW*;!&{ITA-HIsmBqGJ6RE#u0R|8*$!DBelsK9GnW5L5Ser7#GnZVPZVV5k2z zx$J;@l?U()vGH>N0hpZnBcoYAMTi}uE<9Y?tPTFOFykx(t7%(=3}&_$$YL=*UD&c5 zLnBLGN;E`9AoHpOF>~msEl?-p`tvwH(Z@;=hfZex*H`yh(~|$ z&VtL0$u*5?M+Whk)BFV>>9GW`z9*Hx{L?(dOWE=~8fE5;%+VJ}HH#TTcoK59T3%D@ z1HHK0{d1at%0>8E%`l_8n?g6r=8(W7{dDb#9#af@r+XhH0?a3{EygDHJo!FQV$VWS z0uW^&fT%%#hvu?#B=Ad}5f{;ZR%AE3VEb=e_`O^$PZi?u?U^PUVtF4RQD@%Sx=dG> zhZzjC*Q)lbiZwKt61)nh?bD|Vu~6plwi$*ZQ%VMZQKh}2@UE%8Zb^jNu^}ec%7H6fzZSy zGdpL=hu361FlAtuDD$3CQGH&0uz|Ur_eB`jl{W<5w!8zwaX|y>%y5J0s8X&*iHpZl zOJ}6at}nlz9U)q#zAzW7v39(f+TeLC#vr-gxl`~wtJWa2NKPIo8MM%$68GsFewLVN zYe%xNmSY(e} z2tzRm3Gvd&OE#(EQTMT_F}Ab4qNY*S_0>*T$gz64>X?E`81!Y(_As^J*$*WDec#=2 zMBW$C=V7mljJyaqkofq3I13O+)Kv7kg{ZN)nCPBA0p2s_NPJfMSX$(PloZ?EvpT7R z`X5INep(#wrP7JWA;_I`xAZNQK+ zN{93Pk@n)K-}eV}5)x|i?8JLUp@oh;*J-kwNiOX+8&|yG&_tJUf^OCMPE)M1XWvU{ zf?SGdifvnEk$gpxWR8=HVk42a1>=MdROo(tZ)N#cnEo&RcTj4I#T+}H0#j0OE-Ynl zZZB~+F8km{u<6{pfA6vElrza_&mUq1P5U#ydOrq!UgY=f?Jg4{EaD40B-j(=v@>oQCh_o}QdG8JmOazwlb!ysx9|>Ew6rduK{!9$HW8CJ#a$Yy zdl)Un={-ccd@QlnPBGy>Hpeer|NsB_|A2w8??WJtJ#3&odqLkp8{$W{lt&Z>ryOeA zTU}#A$;Wn3N?RJx)d0$urK;W6?^<0W{|~SKU%m!j|DV1F8u`0z@huJgU)ugpmkNWE zyCb;^yyo3O0bl<+lDezAue*=!&JS8y@N1n$tBrQ&fbCLkRkC1V6p@ZSAYk zAItp~mWs+QqY$;G$^_EcKI)9(~7kAj<@i3z(JH_p;=;c{;tzxjr2 z{0`$YN_e`oCn_o`BC0IwCTYOQ_V9(P=R5eUIIb9r^zGH;k`5NxF0=@|6UGQ*{|eag zSL{=CECllA9;`$Al5HPoqw~MwUfU6{g7VXLIxhLV4IOCDolEo<-@i1 zMq#h&get#<#R#rD;V4gnVCBjPF3o4r?tO}dg?$we-^RWq9BI@B%%8U*a{t(wk0yy_ zSi$^hBfaO-CmvA(8Fq76LFs&xD~C;_fsfGWR?tP0>nq46AY0~1qf1o%<)>|hF6-sD4{I-wc%_ z?fap}LQr&C4=rwCVd4LGCqO^jz@RN?77mZuk3B}%cV9X`Ad`1(ceu@aXhS$l_umTn zwGJfH&g7i+O}R4H>@Pnvf<~7u*O{A}&#~?Ky{KL>&+j=~2^VX?=rih1`MzV}Q)wng zwT-fq?e!L~yieBK?ap#C|CejQuB_fq{@v+I;tK$Ui1e|c8TJ!)om60-^kG24Kx?># zxjmbLQ6M})2Fur#&%+fJ6(n8OKq-ZyUkPrj~2kQT^VLFU+#*q#T5&4E;JgT zDXc9J>;Bb9;BF*GRI!pVI+^pLWnm!yNO;xfXVE4cXii6j8plJMI-MKRsY9`zzfqrOF0CB+d`BnDs5}B-B z)E85LeiW!@N^r)WYW-$;;`>CU0Zc5|?IKj`U{o&W8LWTVTWhU{0j(eI;zDojQkGp9 zfCj;hC7pbhedP^@ZN9c|!DbKTuAo1g?2gHmT5ni;}`2R*O+4q6OjU-}OEi-y7k(5~_11F?%P zb?%Z?Zya43#q@fBm`187yz) z6#{l&fjE!1L7@0yX!qN78RvBi3+-*g^A{(Aq5*PR5$iKV0@XOG!g{3kC+x5Py|OQa zM+VbP6u)rFV#GK>Fqu;M)gnYXv!t}^iMbx+}}8!=+JY!XEmC9hK_o zRI}{K!)I+;(VsIk+bC%BW zIeNqWt;{OlqA_e9r5>I;w%h&jhqCC^?z`sZ=r1?i@6s%EE=d-<%V|r?&r4#JO;JG! z&gK@JJx_}WBaadS<@19-pYrW<;V|G^w0TE(Hhsn7OqI4htpN2Imz~$_>#o11`>E3I zSO_69rR|N>{)>T(#T$tIod@N9H@)cZUgg8e^S0|0^jBGzbp6^7NaCt13;jVdFM*vP z^Ws+=Iu?FaI@)&Vk}kA-S7G|q*aaRu{Ot?x8|hE}pt*Zaxp~7!#L`p7j2pqNKDe#u zLRphL4?NOxh`z~J-aY5m=~kK$>^FbjGd;Un%F5Aq!( z`PGp8)g72&6Kzq;fQ#?3+);aZdh7D%0d;~}W)H`{+k5uK(G838XNRbK`cYpzS6Gk{Awk& z8Jvgylk47|J;npi_TP}cgi&(+YS{brl4eBX^EZ~xdz_6UkMug_vqeRro2A)MU-rm# z`}jUCaTfU)6e;xd&+M*@qds>|p|ANpoUfej)j2VJ^2&&9foqhe zEc29#IO)`ArHu**YLp7Xj=3$h<2^_-mJaWwXStt*ZO`JD+31t6n7~u z5N?W{NvN0aWfP+joMrkMLMzY0)*9k}@reZ3xAx`xJk4xTxx-u3qa#3Th`!6c>`svb%7t1LwwjPDCj7$|ulMjY79@oo8AiKR)ouc4BGRyC2{=6HuTG=P`?q((O^QyD%%8%@z zeR{iqfk_fj*yD`mHsTNtJDuJg!D6&gj7KFXT)8>l^KN|K8Qm4R5mpCenLsgQBIC0| zQRO*L#@=B4wbVAEQ=4XezDEc0;c!*ISavMcV@*MK?mha`;hG!E_$yQ+YQ`|Xzl7pB zXWK?sZxP$sq6t;cHtrLz!xgHFjfBZ)_X-CYXAl zyJJoC5%$$2MmxoONLH;sjV2Q!@P}9jnq4F*vJdM?Ol1#yWfnA%>V82{ zJRZ;GTkIlwY0U0LzqE-9X73Vzf%>fTQ(e{(R0ISFD3Yd*1r$iY?Ir=EFzsFhZKqkQ-V$&(@Y;MA8KMQ-|a15 z>E7R@#lQ`+y~x-$s#J~;OA5=Im@sr$y?$}?Pd~ewd^Pdv^ZHb>+yhIx)hv!MW|+QS z7&L^yUTZP5MLXNAwEYk5stwunW#v_Z|v(gE{n!t2-afuZG-5&Y9A z5$a$c67oCRA~v-Me?1XlOM(2`7~L+ky`dP-i*z;YNcgQJ_$vX+&~e zqQqeJoxle2*SCj?FBiTWb~T*RjO?`Tz%;FCz97y%V*n#AOio8~HV%sj^+@xXQq42^ zdkW*lt;^!u!_ED0^#+!MiPDQMLO9jwmZM%re2>vch_scLfN_#NPGQ@*2=X9I+!#b| z@XM8S;}gh#Yk%(WwI^himE9aP9e^Iu-&YbE-57ISS2rNzuX@XBUYyUr9o;`>d$iz! zfAgd7 z!#|eapqsa|Oo%+*Y}N0cAY0P`+vKnWMeo57^%yL7mi zX^A|nU92|K9J-;P zMb0kDV$TO>C(DQdywRqsQ8{W(OS-*vj+&Ly8mY>ZNdieZvraWuUcdJ(B7hxTD22Mv0RwN% zDMZwC-#&;o>}tW<9{ZEU(iz%yGv%7$8KibW8~d=7+Z9qu=>+mA3T!6 zoz*EOXV?Vbdh-sk5*bf@4J*+)ILLXjVxg$qS-xUYGY8xOAc5ZkU-*da9F|Nbr^sYY zBkIHDOQU1Ln6*7JnKOH_n$3|=poM0?d%w#v9sife5X#*HudylWb396}ib2zilKbdk z;>Ngvla(=7;NYgo$c(}+{Cgl85eH+c5BC>!Ugo_08w%hwsSitrQc8fDJ7{w1g53qX zHt_c;wSnXR?`h#l|4sk%_`m1=ANM%he-N|Jud>Ld%eY|*K?92dA(TY2SZ3-%3V|3Q z{9^lUqy&0+oH&=S#6yt=%N328woD^%8uQ_mrW=^!i`rbWgk<4#c_0pPR@F%Z-PZfR zJ24&CnGKBXe-Mc%2{_+f93%)U6%G0vy7MS`W}EVsPtu*?g@Zw7uTz&_Ok7Gk8~1AA z!2iDopU(XH;?2UDzfOSz+uh$qoOt1X&*D+CPpMBwA}!3>3*M6$_e#9xS644p#qCzN zRN~`Ns@ElR@8{Tq;@BI&;~MTf@qOfv-Iw*0y360287cod=p0#|yE56@^MXySYu3!R z+_SW3WlvGz8r%2V2;WxaKCfFHx+`)f+GEDL4*?rUI6NPl>&l##(G2mwDQFH5m*Uyg zfGS2g`TPHlZI8|BWpm3(IealPwar@(Ps^_N;}GN&_o7SD3MtmGGA~kt`UjD`h}sL3 zV7lW0SOIZmNdLavvzo0@m1kb{*IGguSm0$=lMBXToJLrrb507@v=hoZqWMIa4nEDP zDTweRVhdqKuY{L8vEX_&l@cBfe~qy=4qcYj6#JUdOoEOZ%hBP6SiR!*W68P3LX@50 zM0MXVE1gC3QcH*wmLxv~0iS7P8} zPJuL11xdOW>2RgQq27@vlM;`DWJ=kpC2sE(ue-g3b*?r zv|uQ&(_u}ex(E^jITb2*rO1m|oJiWw-yh5%eRIDZ!px(b{SYjJOO~&XOr9(yVZDbM z3sY8SX9A2dhGLJvTFR#^E*CcI3MPPRRMXYSHhokNl!0*kIzk8qMMWiE;ZUD6e|dMbiTD^VJD{?!dBYSsHff0lN~%*;ICml#Z*ieW&C0GsM9Qt^nNo9C z{gr}iAn;kib+#(!%JyT3o)TGSIJG=-dunuo%6@8s+Rn8-03%8Yo<#k4=2iUFR%YES z5ejQyCwp&BY+hc}9bJkHy1-n5zMZtYCE43F=!;HytUiZd;2fxRDl4+2IupkB>DR6v zi_DjfF&K2(Yxsz>9qu&7rIpip|Iy3y{gjuf<_yL|`tls5wW=cK?Bh`!n#*m(TK6r3 z9?}B?{BJmNzb5D^`GR7kCB^7Ba#sIgiEXHIqwL^Sifu5{V(h-$-q1?pX@q2~HRBQ8 zfulLcwTH9%YZ0xqgvs@4f34$}!CKzry>4tP)8Hy4H|F)$ZuuZt%3a3b-Z%F3@RCex z+0Ki8q5p8se{ngvRyTT@yz9IA;|80wf|{%nh0K{F#fssI=&ooV&zo}JazujZOt|1> zrA5Lf&cDaKVXH*<+|Kt~S%nGgUokVP+B|SfWN2{(#PoR)99JQknzcBUL~>-7WLH~cMy-?xl-C`Gn{`25 zUw-K|$s1|C3Qg&SS_txAU%~VcT9uY^!t2D&R}{C}D!1?{Xl+DNgxNVG*>u!1ao~wt z$Z6FgWNulAreziNT6??62pC;(RYR@(*4?dp2hO#ATaaCkqrfFv5uutJo>&m&)mhmoY+UR=A+K|SN-)M zCcoj-c8J10TtTrugrf{fW7{ApQW;2-b9J$tn3^fQ9L~SHS#Li$DhrId0Y&Ed-_hP~ z!<6GK+E;gbl;IAs4iSd;M)i;IM{)#p{LNEPH158#q1)c(_sKrhmKKMG-zWBNHSRh& z&_WDgbEGnP>>Xv`<4l#w82V>%VNiDjVW1dei!00*Q4r>S(knUVNPXU)vF%IxNH7Lb zF5AR<#X1Rs{LBYgejyE3DKx+n#P z*HgPpa%lWWUru1^3ca(v^>{q%FS@2Ml*}AhPvo2V}yIk&&~lOo28Gr%6RTO(_%8=7Gia!IE^FQQm$Wh|6-) zyS@z89O1r-YP;%j0=P)o9h`i)_wq^Ht^<1k$F zBt|9Th+RUc4z#q+JRO7Ss?nS(h~JqFyUS%GgvX?Tp+YP4`E0fbKOb%q^%D6= zQQsdnV{eOrtmy07&&q%CJMh9XOlwwoG(ROso=N04$K?vGTY_x#9-=4;TqUwk%Mmox zos1pIzZrq^)6T--PJCnFUv@#(f0dB3|-Im2zz6Yt_%KFpJiQKQ; z8-pv@+;Nc0v&;wya|YUIge^#t1WAnYVf&q>9Ub=Vnn*wA8>_`Dp8tcOFiri!!tB0O zeW=(!*HzBVKv2^t)CXPRGKE(Nl4Swac>1b=hvH%y>qOt1e2m zGBdsS5O1f)pc+>JY{7Sx}1~@`@xghkMM{a(wf=>lImrTP~uQc~IUW(!v2+AlsnW2VuZE8CU;eM@@MhgwCtT%@$-499Fbu@V+? zB#z4-s$AsxVJUpY?6wzERE_QCV2d+clJ=oapNl30Deq`nDR5=50~io$^+AOGF61jZ zvw`Icxk0#QmC@-+VZjZ1?Qz-e+@`kjKW$(B+H^fGjbGK>t=hGQyZBuB*NB4lFWT7o z8mFp}?m_efJD@(wv(CeB4Hmzpmu$j6<1F~46W z>E$w5mc(F(QdX+`M$hiyAa@qe_fbQ(bU4zk(udS%iN{#5x8AdQIEIvm-31M=^Z}VS zzqBu?oV`QHBW`51oj)-QYi$|0L!2{_+0H!e9j!tsx80QxYSzdtoN-@wzA=Q?QGN@e z34%%HP03OZeH9@lN3=1L7}8wKY$3!&J-Z_Jf%ghU?C&s@E`rWJvo50fZS97ajm8g1 zM?x{qR2Ik}^EEWS%-NBSCX`S_;anNX249L?N-?51i+*aEuyna_lXu{!;&HC&O};YW zF{eb)d=f_zdUkz%gznnE(mZj`Ik(r-Dh1JPElqQ42CF@+#g$M(?Ejo;ypYwbs}wXI zI~f*e&RYLNDRw{gmk;rM_MPU}8Jw+ASue#YmawJCa|=P`5Ls6BPUtJdWv{69ebe;Y zy{Xx~Z#m7UlHCiWi`82c#c;VsQs^nQRY$+&2HNhIu3WuN{nfN}$I4znN!0vchv@S- zcWDXyqb~6G=Wn=MfyH>sz1>tqsKowYf40Xq^k~KEU$bYg#@*iOJtc-h-+Vn!iR40& z$NbF@N=ZZ@t#xA#&7?ipP>AvzYPlQs!g4Nbo)TM2o`DeZN>UYBOtP*7X4p4i(wh1< zi%Uzh2q8_U?QP40rb5)XDh4*mH%M&4Jo_R>f-C8ug_Q6UO#K#TuIl^lHB}Q^ef5Nz zy%FoXJ<`urLmOceb)yI$_<){8`-AWkse0eEESwd-Tr4}0-*H*IuK$5nNy?-w_;#?5 zrP#XG8aFKT9l)_CzjuqOhvD2gED>&#{$Fm zHQmm-i9&(m%B?I}6y_;I8K)q5mrmK57@CGi!q&^5%lL{TyD?{?r%}rTu|5-#sdX#D ziz}@8UhN2(*UCkki#7X|EC2zd72HkO^{oZHEY)wIbj7=8pVjDEPvqSw z%?#mt(XnZ8i{;k$A?e=<$tkYyu!fIKIcM?=8^)r47`zxcGQhPRk-Y%3Z;>t)vJbnX zWGKaBag%T3W!U5JMPBM+1ihv}QM}5EYC~GiByL_=dXI-irJcMb@r&Rm7c0-MLORl! z@ER9kbly{rx;u+N9za#h7Zf}BSNGDjOJ2vvs0&00tto?lJ~F&NQ}S|(8=^LO ztJE;MYgDlg9Z3f_;HM~RN6pUWjbO@KhnGYJl?1{Kyi2UZmGV{BDsCfEp)L6m2SvZV zwIZlEuh=y-5lNNAzz{yld85I-VFGEFYPatEmD`qi+Ak}A4BSLv zBkq@Q!(!aq*?87VkDDVSlgG+G6MSy?-1%!T5%t3Iy%BCzAR%+MpZ5KqqM(0o-?*Zh zT-bUw#$~jo!IIx#`+3Tae-Mhp=pjuA=j$o~OffMiA2;ciFNZ^F=`+H%^*?kzwE8|7 z6j*KNQbouP$`o3Sau}37>=h^1Hk8!GOpLzO_3;0&>(JZ|(F$rNRqt7Q>!b0@s{d8) z?R{LOIKzDl+4DoBvDZd0mkYy;X-N)4rM;5|naa!ax3>^P{# zSi}2MSB?kX@YlqR#OdF74ZEfK2c-HcU);WDc>M1)JNrx8$nEIk=-7+du4BaZZOGjADVufb8b$x~dKZ4xv6i!n#Ld~S$v7JBdc0hBuJp6A);r0o*#-lBQ=U8k*lIA;q|v|)*M~h30)s$>|6fA zSQ+ z9JwK=(58MrYZd0#+!c#fMeZls#z}u0e@{-gxFUG=N?)mYLyxeNwJIV7<{hk?b)~C$ zm#%78sN?#kGh-zwRi%r(tuyq8cDdapJ8c_P5x$P2GjcC0-Ho^`*hOw{!!MX}hvnp4 z9HWgsLty@j$dVn}h{$8%PibG1|9u$aW>|&D*ihV6zs9&JTG+d zRpDvn`_X3F#pS(X??d<#cNfP4GcSFgaVfADEWM*j+Dwc>5exmr;yLrNzrtQN?ns)} zDcdQVs8N0)Q{=;?g!=hase4_}s1b_6rBF$d%d_Ybey4KD5LkF#kHy(jBUdQpraZM( z1+}@YoJcN+fAfg3vY~IS&A<0p#8AszOjeleIoWZ|jeDGrNUiM!}}(nVlrS$?`VIymF6GBi!^@CC^w=x)&YNLE7CVrdO#mt!k8M1%aKNs z)@MyfMt-D^(cD-SlqwMl;i@l)ZL=i0e;L0|bFt{6JZIxFx-KWy39syupPIfhN_~iX zv+gs*>KBTMgCM3bj`YiP!dcAUmXY5DjSLK@b%tH3JH+;lFqP1p{dL&J%*+s-B|Fk! zw+el+r4HztySI#6?~5~!m50VOrq{bf@aachEsStlYc2_mNyr#Gm?qxtTp{@JhN9SX z_04tu+AxD6BHcuVm55dhVacvKS!%Lb^L(uVjVR*7`LM(i8C_r2|FQ#V5qH1OzH+(2 z8QzulXuS+~O1|gymo#WtnJMzIO0@=s;uzS07p6AVVzw=}NiAVr%)7clyUx&- z%^>CEKypXTB*tE7~I26a>WRX=U?RxK_@w=uCpj94+o8^S%<|HyuCLD(ql zsg^GFY?1N9)ZrgggIeahYF*clU)ab5Ck!Ak;}<`^?yrBX{{akzm8gaZt3Xsz+@tWO z9;^}OyaYqykGEQBWbrERx-59cqEvwPo6lGMaL#uqKZ^_gbN6zW%uM=Z=oc!)>%%>iSwS z>?0$KUATgHO_jb>?}6*&F!xa9*f85bBLBzeZK8c$C*K{NQ~AzjQ6SDJdc(_VP^J&c zJ>Mj*Bru(ZTDk1~kdjc$xnVxgG&tgU1@^$j8#mhOI6=AUuehP)s%u+%I8MkVsB*uC z^UxPFuIu!C1nu`5Nz3>THk`42k#%eho@h(IAAW7%>GtGBX>SVf~D*V*GkxT6# zKcB&#@}e%7SiQGof#w>OBOL>fRQU@%xs@lf=*)>=8_)K$cEtS=ee1z#>z%ohNqRbd!Y8fe!O$k0_3CT~i#n?sncd;`>2A!D!M5|= zH7rotJ>;fU6!$!dvkr!E3-cO;um&=HYIz0si6ZiV+<26@JXW3;SxeRAIS#x#`e9Ri4Y1C4^F7+-N1INB4=<8% zBMIrZhe+L7E_+4d?3q zL^PKF`fK@6$J)$-4Xlp1NvBAA%V!1q)h3q}{cU!!s(@7g^&NHiMHfeMcAMP_3;lPy$yKGL8U5gL&JJB9v;d{~x%ZFkBBKYzS!1NroIdt#7@>Ud_#0Z=ml zm$%T(Yp6b?GI^1m-s}90G(qUJlZpol$PJaWYoo-0^f$+0IE3H zL&i7kCw#qP@Y;BODC%I~?w~TJMT_)}ok=@=!Y+jL>!F?z)SC#1-gJ*1$27|{ zx=_$;6S{~R&(;O37hWJsNcq>TnXc;EDA1J{!hx3#pfJlgLnuS+)zrMlb{%#d`0s!Z z*9kztQ3=W4$lSg<+<9p!YKFEZcTe0{Xf!V=I{IjEk_y?NqUYrUaoNwC(DAv!oQT#9MK!A5Xw2kq&NeiK}gPP~%`5MuZ z9?mY4x*F6>n%G<(A|ci3D)o)&(VtpvoyO3_loaDV@4~yYmZH{}?q(_dX%Y`|C{zSgjT$*68#N-XUQvQ+{R=PYpXi2+`)Bf-%p>VxPzZbOMs^KMs3_? z_bVGyDXG#drH9oyDfWh>+fT_OOVJaiSOa za)w>&BN2~Krr#J$Bh0r8d>FlD3i?plQ|^Q6E#1n}PP8x;aqKSUcl;&V&SUkGQC7=V z$K-w64;iKX6(K5A3CwSLvFaZLwR7yRFXx5UTuW;$OM-<1X|OF_`buNKkf+Lm-df9S z1Y`7J0O@KnM+n=ejj@_Vm}VV zSO|1CL0Me)t9;@F5u#gisF72?hCHAF?s&9=)e*1G!0U*avz&ws& zE3(sOT%vLs(ouqz+unQm3NEEiDns9mkW#NN@*2{Kt&pz^9_8hzr;<+8H--%dW>#a6 ze_p!yO#RyObJNx&STfZVjgrJl1DsyGzSy`pFZ?PpZ-q*Ezp+#}I6`tFLTr)~C*wa& z-|=C4;R@xV)^66TxOVOsGbM2>vYz!7j^^a6X0s1BVKbL&3Ck^Zx_x0UGN~8L%y=*TmQAw0+oP7@UwJg;#lL16)R(7q!S7ytvyCYckU+01AU4LdMfshX3() zmraF`2*kOK0H@B8P}@y(cpdNW^P0dt2Fi@}zH*o3>AqdoDK*DGpNP9nLn$SlUw2XB zp4nZ-oo?(x%%$OfKtjE*e#r&lTaRUm??YhhN*ZKpc|>gmJ^uVn#K$pw!ap)e(N?Dm3?hmB3sdh-zvH!1ou_NwLkp zNJFHvm?<=oG=UfX(!7=H>@^hkU^73#uSJj*dt>SH&c|KP@vj$;L6hrrS_! zGLJQ+HQ3oS_$~h3TfYtUrC-`rkaWNCIz~JD-gtGuWd+<>uSwh$dF~&}30dEN=JhZs z?skM&;bJP*n^Ii$&GZ%6H<)m|bCAG$RJ{kx*?iJ$eS}+7RUQ-eSdkB$zX#HrZ?^-peTd(zy} zFf_Z1c6_cbJa)t<#hFCvQ)&Q(CpoPUQEo19vmIoI2kzcMzMfv&x9p=3>WlVRcY77IC(p2W&@x?(BJOan4&MC)%ljsMdig?zef2A$&oa*8>{PZZGQ1 z_PD=jGwSlzq~5?g9W|L1+&7uFZ|+zAgZS4{x@JdGglz;rDJ=xD+Ytf3hBE;k?wS;JbRAt6K`b4kQvC$T=cb%4x{J<;7*P0J&f+>e_2`+76JYAJg8C ze3Le1!)vL{v{F>>64$@FX^lb!nHiUq@-5oj+t5rTW7swW7#AO3akmpWTaE(|rwU`m za`}s)7emioy)G1;KkVI|`6{wD>S*<*h?pUdBPIY6ml9k6v5I=$SPWx&7tD}jH8o?H zUeDnoUFEwh_@G<@jJbXQW&3}>0d(B|y>UvKe(N2-=K8ZewPDZ$bn_)6GX;mRxuv1gyy-W`|;K%?RA8E>gob9F0XTmOG0Hp1c zkQnBZw&#fBdEHpe2Sios%L7r7{cX;5SJ#EKfF!j8U z6YM_S^56eI-HCqP{xS18?Q6d@%eZSiaPaBOh5zH=e_s0Z6(93N#0{|XbD zc^xYTXnhS$wwHB)R+SkdrPtlBbxrvAJ*D~}${`p&ck@f<<+Gc_Y;ik6p6v$NGMzb1 z$*_lnYRc9T0~|}+{z3dQN9g^qbZ+aiTSFUC#ZQae&e-P74pU+c}-Z!UC?z ziLt26BLx6Z>I(p=EnI3lwWxiu?8bu}kVCa|JXZW)E*9X8aToBqQLb+XM*}?p(de3> z^2SgOS$Xal6{D-RUrih@Uq-jjZ@R`9>SQgj=ss>u*B0=vrL%h{YbsvF-{4c5&tp_O zWtLp79)NxUd2eAm0$78@fRb$3=LKl03{*8O&}-HiSOoIbesG}}O-BLD=cmWT96^eR zLdQ&ETrMnM0Hhuv7ePcjfip+p06;mD2&z0ZH+(e$fWjlo0SI(~Nm}bbBnH{bUH~Lu z@<$fPlr3#RdwDbnt@1}4%>|#qc*%_g;fOl@QLDK@knJc8^AuaUZqE_AAi6A_%#+XN z2yzHJsac0%C&g^VM@=?M{+J*BNb3C9eh>s+pC`xx@a*pilQp0SfVw8w%d(vX{K)%o z*V2#{NLbSjfoFa!k2m!dAi)g-R%Tj}a%<(!vmRws$J`VUh{B{`Qoz#zT=>%l=vV(a z`d_=Mg;UuigA90Q!cPKo*)$zFnM=dU{ms2!On4hsEReoVB`@64dgGJ!9qt@Vego)& z&;n7k5LD|&x+4gr%0aT+jsej9!`#8HW)NLXY! z5sIq-Px9(-zGMU5z?lefmv>e`Ld1f&a$m(~7g_^Jcno{^t*BTiv@QU31qeb(w%iGi zIfN#19aOMH2(HEU$yek8338q^cLHPtuqnw|Aq`#cGECnA04DEkv+58A2Ttb+R1)*h5h_MfgjHzm~a8X#VB>zzD$K+FP}D$4EsBo)yKovRB5il)-BIQ|D1 zje=XcPJ%~6bFQinI~YK&HywDsAo8ENz86!Z_aDAr$oxAWkm45F{$pE&z4g+h^F9`H z4Ek?hAA9c*6a&NeK0E zd{TPHj-vxyGCllMo@r}1aQ$&H=16gZ26cgbsU}^+}VKhU3h6S{J}2> z`95$VclDUehoP&z-ZaAbGhx=_>Cc~Zo2`)d*WaI%u@U>JR3Ebca}9g8M?4nQoAB4g zPmc~)+qy{;n#U> zytZTf^M~OhDb!r)B4C9(+o$YUa%u-{4J)^?M=3djAiGch0<_J0tfBSR5Ob9?`*69d z+gjTP-LX>P5nFO$bvV3$QNFJ0kA865Kvi#JkuD5l-v~ z8>ruLSfZulbWAXX4FSkHEBTlP2%{qrcdWdZu#+G9MGk6pw(?w6cg^)BIVe=294E(0 z#tacZ6ui@>l4ZX?cU^Q>COaSl?QaE8bqk%e^D>xvaa|gBgI!qA0tnWLvd>6<{$gJl z*7GO$v(NyK)fj!{V>K9K0LlT$_VYtEPeL{|L=vEoTeZeW`(ZJ7TUiUC2VYim%aq)C zGLW_Nb87QBDV}tlX7quNuOa*^+L=VF-iKW*&3lbrE%g^8X`LIXfH?Do?X>Aln@SZS zkMoc|K#$1C{?Ym<+=e9|J)F*);e&x>&> z4&&>wR-w`cJYqN|yrz*`x3}(V-M{v+V%qHd&~(}FBfZy{N2;41Q)WC}WeS0}slBX{ zlirH9q3zlGh|aX~Eo<<**>!c(KZtGz6k>s7f{&GJoC@eESU>@8yv23Jgc80cpaUG~ zYHnC)$Z&B-+#?GH5s&dh6kWEd_GsQ671+B*T7fC6>I1amB9=hljgeHc7Tq;H?1cY- zji2+KMaj@2o&eu?&PBy}2HcA5=^j8sMnB}*pII8Y-*%(arU;8MHWs>HvU949&Nh*f zuH=3NJywiMk&Xc5svgdvaN+45%PE;R{$^s>>%5J5OQD2DZAX8HdTRiS|*!~ zpsfIk&p;ELHSn`!&Y1ZXb^^<=k55izr=ETBcp*v{3uyR_!o~}V^X}JyGZUhS zQ)HFaa2jn_!r)lH;-=ige{s_Vv27sx5aD0L>DAWv#TAA~5&lq=+M263@K;`uIL|G< zGVA$vpdkPWJ>czN6rE={)-6#m9`zTNbKzY$+t}N^rVJJXxa^1`d6oW@J$c3jL2qnN zWz95J8YU>7vGjPLRiCRAdNeNjG8nx@xDVL4IbdOc4cG=qp%blzh$S!iTP$d?)0$~F zs8`p^h&T&f;KumKtZ4c13f*72ZCRV=H@0CzUKwI_#BsoR^e5;YDgPj}1MO`3kn2>E z?3gzE_XWT%JG%DjCN(s~+{ZtMrZ2$)f6aCmHjCw?Vjr{Jg$dz4OvG;|SHwLav9N0J zn@+TGY6Vqd?#Kp=MC(kal8}DI1mM>;Vm&0}Ld)3l&BCCfO9UG{lx)zht)Xns~I}xJtM@ES&hVB0TDn%NrXo(Y-$z&H# zuW#Sn{VXw!QwZ;scfv5?3ZTlh$5qVgTW;8+S+-!!L<-rX(5kQTCQFQF0`im4s$B=X zI0ykZPuf%os)iO@Grhs`03cw5y`#R%m^yS-agY{>a5M9m2L;;?17nDM2wUitZ0J3~ z7kt<=?!1If)}|ez0@hDSH&|i8`Wii;v~tL$8yIDZ`zm!e6y9I&UJrcyfUZ|_gi9F$ zAMQlt0YNKM{>S4T?VGct(FMQDoGyiW0%Ni^C$1vwY3ozSrbtPJJe6Mel{+)`9;Hgo z{LpcQ@}>a%^vB4Lx#4HN9l~Tr{j&GeA74m!cjtAY@a-Zz2;J{6%T)AteMK9eTSts> z_c!Ox`7Epg^2h8UXu*~QgCGO}JEr*(M1Z)!%m7%vQqEiJt5KQd z%b6Gh*7sS6hdg@Qg)dFVPcVmo&S{;D46bgC`LHS|z$xIUHo7pF&Z$EDK)HG8#f0Vf z+Fw@s2ZP!vc?O$IHoprQb?^cUPcVIA`$SMZX|7XN&SQLH%P z?)&Mn*USomi4sKq;E-WeP0H_iPujJ&YT{;v^QxdH!W48`3T@5VRU)nB&%4IBpYFH; z_*n2%Ah75g0P%Fey0kTadiZ#QM5419hbfF1L z09Py>E6X1l7;Wtq^4{KhHaG%m&BL+RsvYXj$@C4XR z*c!uYup5wgU6ycbWk?%1i{g=9PKK7_-E3%zI51il>0wZh|JitlAnb%}f+nvdmlYLG zc-Gfe?hT^BU9UG$d15 zq6`#MnhRJ4#I;|MNgTqzVhqVEyD51ML2GlS$LpSgc*x>RF($JTL`Jd}1NB5#VW+cK ztIa%CFPV>fJn%lG0u!rfkINID;4XuhWVK_{0cI({VYfh=So{bB`bM+H;78Y{+QzW= z+i6Vtb6cC@r>$h)1F{xEenox>vMDsHEr>zw9cUvP=g}J~Xt3$v%Uwp`S2I7L)9uMA ztvyVVM6?n1j!5=)6G41=zBcE+7?$l-)hOFGn=~ve;KF4nT0EQDgSEd+?Yp|R!Ilzf z>y%<7YY$H~V3L#I9c~#NK^N?6`<7=??G00;+<_2EjwoG%H5pKy9daTpg53PttQ<&e znI8%3QX*>-NGN3rHq!}kvwfPFMia&q7q*lk@mvUxj23@DguJ526~lm_cwcM} zfT?oKX80e(;ZSX&3jDU`luk?)vD|R%Lqhvs7s-ylGBD)i zLRQMkaxet}XB&NFGIvQOnQhO?15P<-OD}nBk+XKQcqz}h#D6JytZE_+up-i2qz}ruSEp;N!wyfCO$Nj3x z-t;XeSK{~MT4|m9jWEW&V#+Zx6CGoH9Ih|aq+oz>u4Zo9iWWw%@pyiNEx_qyUa}LqaF~m3O|2V?3h5690fRWHCT?`bE_nh|%EFK{ zt58M>k=bG=;w4LUSXoPkS8|XzKwR0%Fpz2)AMOU7HO{z~M2W2yoS5-3!yJVpVvv+X zR0G~N_C_=qrRob)T&go3&MSc<4v{28B5n~};j^OlOCl)(3Zg0b^&*x7sVMxj?UEp6 zb32}nBd@73%A%;RRHh81z)D6kHkNELdWgbqM#Yewv_muo06{Z~NA)flM znq~S-p9v?%N?e#phF4r>E5%Om8J0@@YqcmCRoInhh)hie>pCggb|xv{V4f+{9TsE3 zssyr?vV0k7gQiO(DNqE5O6j6aA_iI6_H`JDxWq{_&IOhEy5ccL0Na^Kbn|6HfCq?= zR$+q*uhf=zsM%Y@sDh~=c^SrN%;RFeBe&1~p~I6Zz@d&2{Fyx<*s4wFf%pUm>o68v zXM`wY(FC?{JcBw(q~QE?8tg7En3yga)68)}h+pmhIJ<3JX6c5fqp29NW zI$Uyd8J7`|L${Xr0fstyN@t0mCk4oqCTsGoZEQ{SaEt=lE-D+YHXk`& z-R-fyPM;auTM3i3Y=3!PNVKQQiC_6N)CkLX-(%ys>9s5|j6?%kYIj>ZqEZo;tM##WVlGw&WOqgsyxTu=r`yO8@R3a3?=pY1HL4P`F zj_d3Lp3uig(HteEHLMhh2y)r0a`EbiyPUaCDvudcWK$uNz4A1Q?ta-OzSvxWaW|l< zQGSue_M=(1pZkYD);!@-v10os;ff)TFEvIS;S2*O0VFo4Q$;;Hk+P%P1iw4$IoiO% z{dw6}M8pAk5`xtrP~;6!OjwO81Qiz;HSq3~DT>r8L%0a`>QH922wRi7O7mwItAoUs zl_BIzK)c3A{U|$dH_R|={?v4goka5UlS+hAU1eYEew;!l=c)ZgxHRT&>dTlW$C%v7 zArnRnT(*J9hh4II#6~!8I6Kh#|03i`ithv@)Q(k%A=Y^YRg{r*H zC7E!>%cJAF?Tik|z5*Ofp$%dXwm@V>mFwf8!B8Lo-5wKzdShP}Sc(cSK4;sP{^=N_ zg>z*yHPLG`z=nf)2_mn)K-GNAy+#d`TiM`Yw{Ud#)A#03NsIy6hrPf2fsr zhwAjm)kr}yn)oEz4ksP%w?gRCT_x7mQ9n%FurU|Ef)IsVHC4z{G}ZKlVEDt}MtHoe zTBnH2km9B+KCuH+6CN0d8y$j56W`>{2a5+byLuH^V*=0yw(NfzSBkrNfoPUX>y%fe zV8bN!W2$9l3n0vQAjKBK&<%o@ek9mcdWJiuQOCFXe){Vb9O}xj1uwbECjx@06bhUl z1QLmX0K5Pzm3HG=5egHRjFIKDB11T!*zm-uoWS8MY&oMIfZ@k@V%nTK7>IuK`LN&l z`GWd5XD$26%Vv!1&@4ecRo{jWegk<%Jc%HQrm2E{ zAbiadg58OTk$n7`lSw#FVM~*3+%I(+4uNXqs$k@a@&ZQNF|s=Vuh|qQvObVqOvHEN zpN4fmgMAn&QKr<*EnyoWaBGr#I*Ij6=Lfo<;o0zFHHa(IRfcrx@ED_K*U1JTGj#kY zJp;gF(hJ0Lf4*j@nA3jvf&C4Wja_Y&3{7E-P$@&d_^Nwf|CG}5hx2P*xppv=ro5sy zy0HLG2Uwas)s}*eg1im^UrC{IXHcKqiNbQeHd9TdSYG5QW~G$>S9;}h$6&cJMZRZw zd67#qj`CHXpW-mlNs6_222gLLieY8%0fC|n!8wxbm1K8q#DhYwHY8NU1N#dejNCS7 zTb$ad(Z$M{HtfXOWbe-tKgygUM{-Fjk z!wu}Cz>NPAx_|nx@GgQ09Rlln(cds;fBb>c9&5}B%D1WG+Wdp?*1j>{w4k9l`0r_y1UI~>QgOXBxm!d8# zK8=|xb|Ap@EnOwJ91I$nA}N!U4vh&CeIZ3+q#XDQ55Y({K$skS<&2vFI#moOd&{Je zgL)==R1*+d)Y#TaIzc?=|p zCXh(~ABhh>pve+_E`6Qj7;bl#-${{k`;k{R_r&S$>nquoP9j84~jqbhL6tRGbE7Df*Ed;r$H5 zqr+S|+f5E*{7R^bIz1x1&V{a6-r`%oqVVh_^1Ayq z=qcU1Lz&v31SHj8P!YY=l%eB0<-RCDso0vg#%{3#u==G|Tz`lRu|_r!O%PY}huKUNLAmaOpLJG@OY}Wogppf-+Aq#`qc|PL$+u zzfA{9FU>ftjPvt7^CQoAW4Nc%KAF#nTWp6<%2(tKxq3KSs}d#mfz+^Q3qg{Do7Ph^ z+KI-+BAy)&MRR3Uz8?-{!>Yk}Bt$%Dknc3rV1j@~ktAqC9JdTL?_0b?d21WlFcgMK zlJtbe+Pz3X1H?fwC1*}^4I|_{RSdelsZ!@o_N8WI_MiaiCOlq_jD4CSc~Di7&Yz@U z+#d*PttEOY3$d286fa-{RK(xGkHZE4ju1erNz;$GN*sIP;o_fSwUmOV+0b2(fiert zEoI+$)Kc*RLlNX)PuJn8=uNT33_NM3K_^QK_FBg0#7q(4gUD^EK@YAIEeuwD_%0vxFYU=o!Wk{6aw4^?BR?=}8QkV1%o)lb5N~%_0PoG(6@U2itx9=mye_GBV$^qYT|XsW`2r>k z-7nF+dfn-nIai6J!=Xi6!O!`bh4_c??1=C!omz_BJz|y;{(=;0YI%s-ojD)qYU{s| z9D?#b{&5P;w39i>Va|pHJ{V&8reUoLP{pAE^ty}Qs(y?|iC%-e@9iAzztoGn=&(~Z zyFQB`K{liwmZp_QHfnut!V?F#X&J;gUGuystA=ETan z@XJP{Ef&)?kIDw{Rr4wfZ%&v;ytFW2;h3f)!lt0tpvM8d zEc#U-uK|lT!NYwFe)#w-V_1Z3(umLf5s_$ZjM^YYi9}*;ey4J)90@{&lz@!s1u;#g zD}%Wopn#aq$m+?iKv_q*hL;YcZnEZiNizygi%A_C;G$lU9BqE3NoV`9ENT_vs>eCo zfF=gc#f!knMlV8C;$U;Muo?D6fe0h+kfK z4#6PN-FgUsQs3S)>mtVa6X}RQ%6}#zBjSZZ3Ihp&=~(TPe@Xx{DyDq$yTQ6Fg_#3T zgR><>#VrBj4j=-iZG;qH4aTYqhcuVs?^!rnO7dMql}dUlONLnt7J5U89YCZya;GYl zB&TXwX2az0TGlVI!R9cQU;=!)ApVfpA>MO-?H8@Adc}s4K{5JDCLh~sC5K;l(aMkv#H74WHS9O5Pc#2 zZkANPG@^aDw(gi#zBqM^7b3m$Nn z8Km`#jDsrV=%5hZx(|YBGSs%{B2)vx*F>OAR3lul&y*DCk0Wk4nrfyPHo#8Xd0B9m z<`aIo>_{UO z6S%rV*WI$##uE162{IwtT;CgZFs4yV#+E#=QSK_x)? z{e$7{-Ucov$riQLTv&|kDJE7zA-?h|!A*3aHk%g)lvi~PtM5fT3aZ-x!K;i{TZHbG zv`>afTchoP>U|#Iif4dMWXEXLCjb^Ovfs}z&=h?wouqd0cWi5E(q^8x@8i2#1fZ9g zgPpZ(wn9E-d9zS&hyz>*5Nr>ocvu&|)Ju5#TZWQdAe1O%d>Z_W9< zp>Io@^&{Ani}WPdZaxlCq-Rxe_|aqI3`PU-LFOziOAXlp7MZ;MVV>b%YW=e@bF0^X zzmq=R5i9&tCH*7jM{n~JTYJphUbYMzHP*(@kNx!1aC3>DyOO6btZtEn!r_>NYCd2< zr8s$VnJ4_EPAyYRe^vi8F;p7yc9(@vBMKPpH$-fj&GNX+A2P|NQ17A7gnmS>?(jaq zdqX_*@G+(JmgYEly_%AK_?{xd*(9ZoUnOXFx|bpgJlQ-MO2E9P90ny&OdTP~DPMMo9~XRe88YY3_| zs)+X)4#J?2v~W}vQ_IWysERUjq}n8TZvW^R55J0J3nfR~OETt7r}r46)dDz}d|QCB@IaqCa(bFkXp&`!@KB&N3{9Z&Nt{aFX7ubFmqKu!RJO5w{AUxVr#A zcKFNNdq7Y5gXvU21qWN`1!8yKGjE`0N*Dej&(IvrTC2Cww_R5mBr*Z&rQI<{tGU|ls;_2s;t@A+>_v0QElh2z(M@svF z^qakh4*IotW_y~oNy`G=Sbph1uKlX1js?X+2z#4G{!{6K&h4pe=ccR zfKt+$B8`qEUO$TzY?g0nr$ifvxP2&YDUshM@i3U6$-uwBg8@(38;A7aM^$fy>?~|V z?L_9SoS%9XXteGG$Ph3p@2O@qY^#ACqMyl&K`E;W2fG%^27p)sG@MnnQ_YJ8PcX5D zT;Vs2MRFc!DD$XQE+Ez+DmiLH0z2zqxS$a*tuvg1-SwCDpL@hV-Nm@eONL)tGvw5& z*7H4(db*EwX)&#P_4umrndE7-%1jrEiMD71n|%XEOb>Bi2|hT?klvFjAJ;a_=t;l8=FGAPT}f6GySm zi7;gliSF!GXxk1;!^Uu?U=Qb*fH23a+`Eqj$ww>b^k{Nw+UPj_c)!EyT)jxu0DK6? zX(r&v1+qIBjAD4isgNr10mEkFB_LllW{^R}xefkgi^=r70p!=bZmOc`#^7%ceBXww zlxAMBS7L&#HLRh+Gqq@}w>IJpoxB+4aP(VxS3bcp!7+$>8pU);cc}$MAnJ>GqkjRw zuEJKdJ?JE+pnCbPY0w&Q7Z^xFg)!g4y_~aYzmmr77DL1e{YGCm7R1S^oH}|FNfa(b z`xKwZ4Aucq?JRmo=+nDmJ_267A8;?MVu<9BYvxhNbB~iOwF|UOgJ5nr>-_ywp9fUM ziwC)|@rE}SXcrbA`u*;$48=s=5IdHpp*GA@MGO5|Alxs6Tm z!--Z~b@7!-z1lgH98*%9yp};sKLxNOY3HkCulebQN3xdg$^jg4YJih%`*c$&euE5h z0%E>jm?!~)C-86@>)?&-6&*ZC`xG|=V_vD0jP}7T4&CRA=e&Sj37zL2>Rf6Uufb^G z2O%OuCm`ig7$Y8aG6!skCqQh-Z8xyA-jB%A-u4rjh3tIDp+>Qf5T@idMfNjbL-BV% z`E)`60F{9gaV<4L)Esu$%gKjS07$ZYpn8Blar{%v-A)L1eHP(uHB17-g#u`0b>m$F;`yG6h$>Pr`RVB>>nDeHSM;S3i zRZE9xyY=qA(DD%uHxRx(!7999zaYCn7tb$buje|QOZD`l76Y(2fs+}k~e+qaQ=fu1JV@Bn9N-ircO*>R`~58ws_Oj`1Wel=@nFIa@S>4gx&=g{9T1R+ zIR~}_ngF2n0KqL`{-SEura?PF_C7m+U2SkJIRzkJz(2s>eNq7cUP>r=m8gb669TZv zqr>5rh30M|T3Sh5K-VEu?aR0a0r00%Rn)ca8(_!F%LD2Skpq6XH7dmc`ev+*>>}*ux2% zAdpZN9nOV!-yuLJ)FLMA_1e`+q0{$t%PwR9WMyRG>O_JXIA^dt0tk$ysl1eu_Vkt! zV}mhIT;Nhk1FRwJ^`&+tNB5yL^}r!?4-gjNB}>3R?6vn_pX2`5`@X`zzyDA1Kehhr z%X_KzpZ7iG>OV}Y_mryu_bSzYO8%?Hzm7<5yVC!>OO6^^5TW;tF*V)k{41*xkgxha ztI|!JkM2>&JCc~gnPf-)Yugpnb?oost>OVr-TMY^$dQiQi#PI{*qA9v?vVoF7w zVt$(vq+#;oW>3w?q^16={cCa~m(8_@eer*m>Sd;Q+;_0ryAmC>w&wm-bfcK8(HgqI zB|GJiQq$g``^MVuuqV1&wEA?UyQlcuJmpeDf2%lQhG;^{>xSj}_zqQ_8~m-McUD-D zx`E}ZHKk$cqN$1RoBmxy%js_WDzw`k`Q%96&Bwso6F>YZWlH}C)MespS6b^-gSraG z31O>r$j*gcjpT@@$H<&*LMboXb~wY?-)dh~J_!zzwqqst~z?HjcBHTk@~F$0?zFGCU#bzxnN3T=ulqMJrt33G~$G@em5 zSP3cW@4#lO%=Dj11=l^Q=Imhh(>qlhN;=)%=f;g@vAQv(dWUB8Ge-|G-Dz*XwATN) z3s4zXIv=8ZhZ_J9DS(y4)1>?SUbBZd)xYMh}I2*#u_4U8XBK$TzsQ zi-y>C3evb9Ic-j4T!XvrkZA5NnbzIc8J2CFd$*ET^W(gN0veVhwkAB9@1*)ukSC`3 z%~Tq9ay|A+Qv$TseRg)bW4DRzI_D?kjxSdXSY$j>CKiG(n{>Do52+ju z#kHC)a}=&(M;r6vowPX7+0ym4>H|l$kkFZLdIUsO#q5IY6(L zqPX{dAYfnoSM;SjHZ)Vu6GS=yEdSAnbMxG**y3tx?q~tglI7ULneWv}4o!)dM)khR zmOete!h88;mm-}W=~tp0bBJe~T?bFk6^*pY$IBbvY>g++ZTERP_f3#o-1a=-5~y9@ zv!AHrrd=dA)he&gr~H9(zI}in*zO*Yz4EDVg9f_uJ1vp+sD^weNrLOuuO;3L0^Tr= zIkOnvFwPOYKbMYZl$-oQ^+-nZGo2Ff z=T{@21Z)K**4#doPD_=zBZ9v^3JOx@8XlhwXiwv*<`c&VXnmR@M=ftl$ zmo?Ze>b<|N-82#kRJx7T{{^XAY$rGUy0IO%cA=j6*sN`~BRIWfCBC?}en;C9wcD?0 zMr?U0Hs1^Ls)~vj;FzzU>gnO~ZHus+pw2zwj{3d!7Zf*jO68M<{C(EtU4l9YtR^_N zfyqb@EOYQAygX_mv9DH6x$$#l*1k}fy;hF-3z|oXlum!Fl3;jAWGK+6*WG8*dPlcS z?w(eY$QNdvsD9XbeVxzq$#f71>f){`s5saDj###y>A}xj7kKaUgO7%PfzxB+(`=pc z(}xPsL;E%da7xC7FRnz5Xvy8(Qq=B)l(6Tiwa0jn+R9~)WxJU~WyZgtrrHc>#W-IE z=c&khf|+(LcOEZ^Kia1ytq6y7o=1^IOUuD4-29HQZ&D7yZ^}Ei8VxqpweZbatWL^T zZ-BVQ9dVYs!AYC-+3H(Y-LeS|QTQ-Nm30O3(aBUXnR|I^`up|Z-_S?_wPUPz<&ER@ z&-zdT&sQhT2LQC4k&nTu(&6o!noWJG8j-Jw&DS=4?4J z7e4y4$_&V^;94=QzgA{x+fh`-s`J(*WhA z2T+eHXJxMu8vHX*J#Xt)34twhZ~Eu80_nwZR|3lS*D-fYa$j}ba?ts?GCv>J#tDb)bzaX} zdihyW3Gtu4`hBm!+q$K5;AcnRv-Pm~+n|3>rUCZPl>696{WS@#>t&^b)ipL$0@n&; z4r>k7X2QT8ciYJngg}+o9ys3Y&~# zuG=U4WhBZm%*DBMls!MaLhyJs!j>~=HNx~*7R6`FQi-a5m2wHbNvyQnOJhodbMsYB z&2onWbN=hZ!JV?>IIT&?h0YIM9hv`(QayIvHGx(t*+gq|bDjN=3-21f?5%ZZE0?Uf z%PHc*BD81DULjml@$sH!(klT8{1@)9G{+Vz@A^qEu{??qktKd^?quDhw(sJyRfq$( zr-Jisx4xH^^f!rq+>DJ*30s#yNORiMV1Fvz^M!7O5GT z`rqqn^E1Ew-|GfeZPnhEQu;33aMg;_z#b5{qJ#*a%GTV9bN1jAyG4{3E+jT@C=O5@ z+R#RE2zY}@5JpEngnuq;Tie445?!k-CvS_WqYr$*C_+NMgvATbuUX41ml|IVL?fFQ z_HXss7H+_l6-k2f194zd`?Mx%L!sCJt4k?+g{L!zwwoqYSNg z{u0Sk#1~lEH)ZO@P?@kyyEs>69kRHGQ?pE&_7~JHYO|WsY?u;^Jdz00oLv_2Zxbrduh_GEvE2UCCMxZ}w~){_ZOZAAfqMw9 zu+%4CJUpJ9Z2pABbFmw~Qg{*DUcI5!D8CdArq|JT6GgsjPv_gdB168fW*T`3t8{|~ z*C&2BKoCaNmY4gt5I_!dXgh_{nAM}yT76)Z7b%q{)dmX+92Zr;_9(?4cb$>b{skG| zocHuE>+(f?(@l!ah`-#n8?_Rob?UV7C^dL~ng4NlC+NXm68-@621dy2ic=k#{_a?* z897CObuTFu^@yu{;;p&!OgnPKq48ZhVvgoXW~y1{ez{(UyS#E{okLkPd8i?(;~))d^`9CUN*}Et|^;ru5~Z8g*pFpPmFA=SR8oX)b+F^wP@9bTdDC&A1cM5O?N0{ z1)={vW53a0?U5^mQEh1#M+U>HXvoS9SDNpaHA)Tx%H5>-tw%`gOSC(c`qla82UnOK z5`Ei40@MyljJ`-8i%xyRWxK=#A6sKe<|jvy>@oH|-!CO@Yy&5%m8XvDh5CF~!fk5S zaucM_N!FbA-m&+J9tSVWcdhNIGzHhEx7#MxU)C<4iuoeN-?iGP9_`0AE%&a?tjWKk zgcV#C;kVD$AEM+-Z9kTMaXfxUYa-sTw(fM*I21Yk@&>W8e{n2SeHj~$)ZU`8Y;|(J6 zu@FfwhYjx0K;O<6hBspt$B;O3<1+umUC7zeC>fJPw{-Tj=tdM=xJXz zL|fJC^tkyblpk$huS-ssUzJiV)*})9^V7dI*^b#)li7%)0Hk{{%-)>*MYxdAUwhoKIPw z2UhKvkt*)m_!#{KnOsQx1-d%Rzt!1>TDjA-sv!>r$mM#ccFjc6 z59bz-H())*GGwbq5tjZ zAP!uZhaK%1f12N2R_}@+p>kcTUHf7s>dIZCz_3a$1xX+@@fVbLFKaH8Qk{lZ7uMUE z(wa@2ikgue>go=a!Gw0ssjVL`)CL7sPM*&;xgH*?4}5Za*Rg#k+~e+idtB#WqhN}3 zYv>S+@O^Oi<@n1TZD7VU4KUk9A$QHk3U97;BiFmYI{cK#KJKe`O8;0_$~;OT~3o}Rj=Ih=5edznc)^c zz+O&KCcAuhRq+52M~vEOcW87XhW04Oet0q=kb*a2cg@vqJ;>y0_J9}lILU zzqUo>Ma;`@kl$Ux(sWc88<=R`Oy7K6x@C_%^~D+ho?c$8D|Py2OkJps{)4+q-Saww zof(&gWu-4am)yE`e74sl&J4c%1s#k2zPntI*s*>27t|m~c>UD9&M#R<0^c2rmMkc? zKJZiNRwFgc(O@@S&)4gK_DlfC4l$RCbsJtkG|T-n^RUA2^mZM*@|9VyW#>-N_hTs+ zqhqB3&Yoe|t2L*eDT)Ukz0ECQ_x}DK86X@hI5w5l99DdOK{@xT>ydcfm#IpxXG#6w!(-d3 z(R0pqFXWb{IPH((!eYIf@U7gf?Nc96TIut2^u5CB5@+aj5NnF8 zhs7?Vu)a~zrmg7Q;R6W{A`Cij6cZHW7mK;CdMweyx(EpSj{wo&;l}xue5vS;Wo-C@JQmR+h)Ut=l^UtRaCdnv?BeLU3yQJ@nuvS(rkZfX@Wt&1b+dh z^eX6oUdd%7(ywaLero=)00-x3R4{_ZbOsqoH0`!7FcRzaW|F`FNkcoy`;LI~(9?39 zAKEX@KDQ&J=i6R<%r%qom_J_70^t9m*L8tuOI2Ft@ZKB(!u>vfCRza(T(m8P=}YfCqMT zeV5IMO$C?c4gXB<2m-k|gRdhg?poWdqh47r4`A(zI;I@qA!oyUCu?i8e~8#ePPqM^koA| zLa7?!6cTv~02T(3#9vvy-+}vT2F~)&K|9h=xBt~z-G1kzd72%rV^v$czaVE<%)g-D z2>)dQuN4B>#Jx*B$`TFSZT4=L-s-y5l;G<+3s;BXz17^=o_C0ZL|0C(J#9TyWzm28 zbW;ibudfmR_u9bMy1GZbkABs!hSiGvuXV5-hFzZ@$(VMV)Tur-A+4`I`@c2)zqj%K zRr|;K(TI%cYS6zf|MO~Y3t~>guIwf!`Rbmb4X%+Z92uv!p+b#I&E^NhBcE)jSTYu~ zkH@R&hkjPg6{h)-2@eMFy=qb*VOPczrlKkck)N*hU(n~v62qk#n>Q<59~as81Z%T9 zK9xHBx(J#l_!Rp%bt^nYWT6F}1Bm2g&9zeOHmw*jUaoR?mLLd|pq#f2FP*-76uE1o z3ahNEn>Bg-^;QDID}p4Ms&t3a`6282j6VNgP%Che{@K}ua%4bU7?2q-&Af1wpi&KR zyenw4IpG31wdLft0Co&X1|r-vAQCl=#SdUOI>}zPy&>$=6_!7mPXd1s8i#mRT%2~R z>$)c#7J{#2IL70579c4c9@_J>+Xg+4uj-JpZ#1dMJm{Q!*8a(=oQM$V)f{Bn zErs_oBBXkr3|u*Ad!1v;U}B>a(zjw-FKR|Ts)JMYyf^^#gz~7(V`BNR=8kriYM6aw zyuq>{cpD)8$=D$i{?K6VCrQ_xMM13geFMRkL;s`hhd+yUnB41&f?z2ingwvn0GFF@ znF|~s8o6Y|c##qZ2E6C0iJ~LM4zpS`Q$60TOEaul;A2<0t>{e}{WkFAB)WxC z*%%56<$#pz1;J`+r@8l(5&!g9Cbbkj7TK1XU1G+^0FJK5Et-Xc0^=eM$7he#bp&1r za(_s&xkRgSd;cq@TgR-U5Fo>%A1;8$jE9DXZ50j@q}l@vN0&vLB0^z3J74K1zB6I1 zC{&srdiM&G%9VyvijZsRO%{?}b3L4X{o_c;3u%=>&$08}`M z3=-I<7s*<^&xQvUy&Hwu1{RtUXT2h#$ZK~M*%jmZ$%brM`@Z{bEI~Ljw41+>!AP!0 z6WW6wI;+)1qP<8N@Dm1YYkp19Kt#qg{dqSV$SW#5vY6lewnPX>mF_iezh}#HDMB+~ zoq+tE$VrWCAC|}rE`Z^wTP!AAip+$0dhXHn?-90aK^7)vI-%`po5>w6^JO!2oimS< z%w(;#D9%1QG-gnm&RmOP&hA!K6+OFoKQi4+T;z-8WMmN7V!xYv_bycPsrRu?j_$@s zbC@{cKjXi*fpz$w+bk9y^2lr~pkV5gHHEbv4KKAPYzC!OM@71{+6*UQ<5c3+74}bd?W*h=dPGd6my)upO@LNP=PdWA~BB z_lpoP*@2`b*~_Ijw0%}1!Iwqd-u-(W2ahMDmk7fEShr+jG4&oE{~v4X6wROZk18X^ zIoJlMte)BjRMpvVt9C3E|2z#h33t(+jYw(i#^ve99<^&5y*AIK8DWzE`;>R;cH+K-atW3H30`wwV897EVs6-j8Mty_~Lrs#A@k5L8Rk486BA@*#!kPiiz z_sa?zF)*q_!yraI*F_*u?fny~6(f(#`L zsuX1u*2t5*1Y`&)bi*yOp8%w;PbQV7E*M?OrvO63iHT$CV%c=c0NE&J=yWJv8kJ1& zX@+UoZnCT72zm0fI~2U{?ZhZT54YWD|#NK-L7 zP(6MR;u(;Aq~WdloKtIuSE!Rm)HgkhIcT{j^!m6% z!RY%MsUeE4)BS&OgvvewvnS6lroqT=owul`ZLJx#)qw(G_6w^?7 z>=ncEtfY!+1ModGmR3_}o5n9@B3~v@J`A)cYoF*%2<#o%tR zdS%jjjtTwOY841^VVI^125@aKX*UbKVl~nYIS8<;%E`Tj12Ybr{?y6J&XL7$Lk#S$ z_oJjGT*<;omdS&J*yG#dG1)Pjt-LC@2(8_;;RB)yw0DNMv5(ny-atNS6L;`6*Q&GF zw-*?FHuzOpP>yf;-b@_@%K_TI#hV+X}TWm4ohakrqcroZAM0LN_W# zh?|)|>Fajws>@6gM?mv;W}(`CTht5n@wqqq!<4p60==S*jK=fv6HP3uYZJK;NhkQJ z!UT~2mK+YSY_U0!0(^07dG!J7P~t&MYQztI%?b2v{#u0)9&u4w8?bg851tiWV4^1f z>k_^x3MF#+0;?6vnsPh-K68j)@^netCpbCueVHP1>=g~Vb+k0l75-=78SW~@nrti9 za?IVpNx%Ge+}8$t1IJ+i%%Fx?RL326gxHot|CKhiYeDv|Oniz!e;m(kC)Nk{kh zABHd4@oM?pIqfc-BWYR^bq`r2p3>AhGR{e$4d81%>MIeZj_S6yr2>?pSRHYd^|POi zT0UE63S>K_k7qj-W?yS>`^&}>ZKPfOIwo&kZo*u&|E@2!;0@U2SFQJ~@(!A=-yKEj z@yY|sRuu$Pcz}+GIaQMd=$Qa^5{+CU!L*jj;ShCi!V%LM6(--ib@%eJ6LhGa5T7rgj~k-^#pL$e?m zOMzK9Y!;6OM%d@hwb!q5G`>4*vy%5-u_=u7&OaPG**8&wyUHknBVjD2mU4Rwn6A~V zI!>DybQEkp?5*%leWi?Ioh1hAD$?kxUFRGs{*3_l7#P`m4mw`6UH+_0r{st4d*&Ez z__a=$ZO|w9$Z2NMUFNR2=wdmq|AHP@tgk&J%lGt>x;fpt#48=u88gh~H}-e#c> zq$fKw9(pPX&=>+BLtyMV11#V7`wiqF9D)YL^ERrC>7EBOR8`XR5AjbH6suIDdE!m# z1#k@~w=v3-Uf}3A8(67w#&WihFFrwk0ScG$v?x}@X$%Fp@^ujt&QJ|!urtH~2p!)8 zX}X?I-%P}P2zF=hiIylHJMT3bWHg+Txz>s`zvyZOS)}G5v72Xle0I&?>>s? z8!NwY_H^Jkpxyt3m=J&bxnN?&>E~aNs zD9_IA2RqJryUKK=DKi3_>#psnzt_YRcT11n{CHjiwin{x;<0Muj%3pN#N1`=J{yV` zOlO?_)YsAn(*43S>x`+iW>>KW1CQVDT3cYZN0I<1fb5xh;5|zrC(yh1FRsza>o{_` z?ned=UvQ=~%N*6zPauCNlAcJ@rBD!$0R*Y?m^)b%kMur!N?}U_^jQ8t8b39;f{upD z$w5Pa^Qwv{HIz&4@R9=(wBSFJp5BeBq*KdJMU$3#UHZ|2Rl8;Ri7`$_eUx% zy?b)dG(5R){~hKw%g?YeyUQ=sT>0Xhu=!}666R_9$A!43_CVLhhFkzFu$U>8m2-}b zI`7q<;@`24WlV9hs!q7u99*L5nADv5IJIJmvZE-!+8)B;Y|sAIb7HEkSeDh1;t)4*J!YQ-AXa-!t6@w~`5 zIY2=ISd4^&)f>kY#gX8>ma_l@cvdR@lw(dve7bjGtbmp-=P?`wuW zus()|%d6w))AQocrfr%RFNZc>-c_ClCWSPa{GM7{t!i1@aSczgv(wYw7pnJp(QMEA zOKVv|qsE-DQ=TT?JGdAoXzyv0?46nJ#Q~C=sqYYehp3#XqG(}EL*ZVsC zva~uOgKn%3>CGW6Q3>Z^9WyJgPl|V5Aolk!-_=vc)o|CJ(Rb_qa@8^xp&QtJrn_D- z{C=OM8oUy9z3l?%74njnKz10VPqP!B?KtnGIV?twp5A)CAAF%+tut5b`Al6{wDxS) zJ~~me+gwpVxc}R-w|%qA@TVC#50ZKD#wJSpCMPy+T6`(=UF6ET+9<$qsS+$pKMZf6 z|2)i6UK%~U@O`+4sWR-NDjh_Y)Sjs$05*dLknEBPkGXOk0GxEvYE#nZY=|vHq6fE9 z%u|i}h7~4h=W{ggN%;$m9Y35)^_&C?W;zyInYh@;xpDEk=k(?i^oDKL(__Vc*)nlIiIE=}^!Y}4f+STFZcK+JhAeXa+a`a8Cs;us44JdD_i(0saYb3RW@7P@@v7GfoM@@)^7N#}*=1uclC*t|&gr;|0}&I;_wah?C?R z3Rp4_I{lo37NPU4FK)#YKk?VnFizm^YQ5Dl`*V(OZht{ya~kTy&WIcJAx}>&uIDd* ztsTWo_pU2^YjV1#PLS(QN$2UUOXd9ZV=@?#;W9X`CxMDLcLfj&yd}P>{$5tQ$w{D+eg>p_xL+3- zw*dZFX=YM-HW2|ap3yHRW>T27xGd6V$;p4n&_AIm&?&|Mz}3&5S>CNG-^u1ismP?C zF$)RC=|bG4KM*DHs{wR?^J?e$Vo!;a!}idx1I79<5?4sPUdPf;XLuNY&=Y-fq=Yub zfah3(cMi{6rFh4!vA0-YtbJG{(L{Z$Z|UpiF5kt)%+{?+a`UlZ&@bB@&s2O|B4+Q` zgeI5hvXwX<_#ybQP5GICTX5AQeb^&T8>&loS=wzA5o6!Q-U*>3%6HYvykYLvhg2y^ zg4SjsWxcI1voRZYRI7mcjWbkLmxt+gd4)Q*PMAT>ZN^l5$&`nSI332yOJEd8N;h*WYqck+?tizyyMVyv%|LaihI(d(L z6e#>p{_F6(Y@>HYC4<{d8cIjuxm}H+G9S)7>Z0^#5-X)S=oMZ~Cb6>ia!|;Gl}qrg zCFL!y7%P5575Fs1RK0ps_4*EJh3|BxJ659yv!8U>vtc>iZGGNFqPxO0Mw!zd1GC|J zLNNk~O__#SKU6|_Fce(~iN-f_P1Djjp+*6m=+*V{9CU#n#TB?(1%NXY)r&;K4n9 zI+`=fd-f?9zZ4BGsitgg9Kq>_P80PSmGc75%a^r(LEsQ~>n5RSlo^qhjC-ZA<4aW3 z`8eQ)I!%a(F7EVhq`LJbY$*lAwr-(M5Qqmbhyj5<|1BM;CEA=dfhO_*BrCg%r zZGVCBh=aLti;X1d%%QrTf54*f$AI+41#Cew$%0WyC2yis3^RG-nJ*geR5r~cy72ZB z=e{)_ls2g9sqt_Axg7?B5>dGCAH*7pwM^7uv*dgpQ2s?PJmp=Q^^0}9jS<8$c}{O6 z`-xqNwEYHHPv)q$E=|kXa6mLKG_KHoa`Es7@h{A9=`xrej$d8lp~NNVqwC60B|pj+ zypt`8GbqihqML{&f0av+8NrC$PjI__@sp}-30tR73C}URu8X8i$h9hyf+N1UtxoVM ztXa=5&)&5P&>RN*PMx$Cp$w|?MnMn>)gs|~ja)5M&ZR?Ym@AE?@ty5Ufq}DmDNGLS zk!7{sr}gCDWT)mBA9u2$Vgt(8!n>ztcRT0!hiT-et^2!x6=|2;ZBFX=Nc zf@|54^_sl3l48pdl?I;a5u%<(7FJv_TowN1NBbqFGnL@2`2H>rMrKvU4YUr!!Rc=N z(SK&Z?`$jC(i(gD$2f`~*=NuMF`fhi7S86aVNgO?ot%)N^ST`m6lX9s4>iQIASAo= zCHVb!{~7<}Hw*=}d41CnUGl446H3^qI`2iC#Lu0myk1Dg(8!qZB93Yacou^9O}Pxh{M?(`dr4G%$B6v#g}A9aKM^j$;B2HompYzo4tu zSX(X6(D&$k%b4>16Np-hp9sxs8ndQfU$XKhE8SthA_rP|MqHkm}m9NG0pEilqXViIk{@zs$Wv}BN&j@4%19ah#<*ZPL4ug!!p z@u{~9XMorQmPC40`>c_z{c%L>$1ixvqprkp^{ek@wnc+p%zw~*-UhMT$E_T&_-Qh! z*QyonU7ghV^hW+26dA%*jv^%Z@Xmm>cYU!SM)va z>@et1lTXHRDFaQw7 ziEyGg-zUjES7mAVgJ&!+^>*xa>Jgp|NGVPT3}m6-Vp80C4(z_FbC#W*ALLLbM#&8= zMA3ODq7(;}zk_N{uyRwYhTeP&w&_E>TRMSU+yVX0fvl#V$@cW+|>FFn2QM zC_kdN%dmbtbW@E=9v+<1CU0di3HoLLqmh74SCnbm6KGCF`~~Sp2}Dddi@Z;ujds~y zH+kC0pD>}Lbi`-Q&$0aieI(AL&(Llr=Xoqn*>WyYQN)mFU|u9m@|WQL?1P1KT2~^s z?ggu(1zl||iIM!FkHtCv4^Lkn5B2x`uT2Zugt8`C%D!(=5h_uVeUN>b8Dk8CY^`Ju z8M}%oJHw0_V@VjxSVD|3V;xInW`qn9@83&(zQ6m22e0w)@VfWhbI*OA=Q+<$bmSi= zOeRGhzki5=-!*Sa^g`O9!}u>OO~ny+g~rx&k@{b}wyqw2;UyO!GGEj>CLIrsx-_TW z{A5j*QhkmoaW_0LGQyKeci28 z4D25U`{>Q6rDiTlH?>9#k2?VM`sXwGi@UDHIn4<^=zsU|A`TPQ5%#|F=K~ch&Jlf` zLf8XWhUB|0WBcq`6D%23(`!K+3~TMtzGnXFjPP_*&*9sjvwurmIE^YceLy}eaSv1I za3XunX1m27$LdJu4NFeAx)UMG^;{`Nc%lY}Sq%)XcNp`))P8-FQX6@c5tKBD>GnFr zHe?l2et|H>nxe{?Ja&Mxm7^h)fh)rr*#N!rc!Q2u_|8e$Ib#Nmp3=MMB@Syh_N#fHeu!jq-GZUZLc5 z_EoY%|41gS<>biJQ@3?X@L#&>^|4*r6|-SsAJulf@{N5cQ&Ey0pU>`# z8@W=AX+UD$jnBb;<__q3AumW=2p(yUHzKIGNd(|3`-l|#HhF?~qrnN?f0H~~H?ohMA4)&SrT&`nL`IWqb zJnJxDJDvg4P7N=rmc3V2UG{|LfKn>z_# zek_Z9*<O^e{-vPmg;41O`VQJS7sje4 z%_IR+L)@u@=Yobzp@#rN@nKYV^qs0nT9gCzBJ&SI4)v zx*xd=;RjjLI@CYYoAXH}sOV`*bDy zihKN6%)|SR{KxTr!r$?@Fp_pRnXOQZ^@S2ATHMo;YK zY%nw_4qN=4CKW64@6VZ=FEWfOiYc=o6}$Vbr;$Ntxc-wcQ%jo%2XZG?fH}(L)eP3Set=A1c9S+`zGp;4CMpQ;iuzvPbT{(Sl zpYt+T7FUI3)V-D0$dzU21C!w^vL1DCT2?#-=d7CKu^XTHVp~_C66@uU9W%=iVN?Xw z39u0}Z*;L&Ri*rU^?!siL#o^xB4N2=dX;3gB4X10Q-R}`SS-noC7v-gd_4|n5Gp9s;^*ytzT{Us4 zY4iO(SsRRX*V>3?{nV-ie~KK=9Ahdle(LJg1AqM6t?O`x(OB&G1t`FAuiS8{Zy!(u z8@5~z%i@EU zgL^&?Sk8Y5bbxAcf$!vzJq*FZ0uW;PyL5ok!ryVP^vtjk_g4!VM8hhnrSVE|-*S9;q+yy-*R4<+-WGob6EbVYJz^rrLF~85XYWK6Q*&@@uelh@k zRit?mL|fGxNo+)Egy=@k$_4d8CrvDdH@FxrN4)`!p0_jQ0cRZzuXQfnsjKtGk$&zZ z-J@%K(}^5gxG-0uxFXoU*?TBG38+i)N)OeQ#!`IkLRQF@9_1^KI?o13vP#0xs}cQE zNke#*jeTAOzo_O37w!D;psMrGQ;RAJ^Wg2X>&+Pte=0l_N&PMdg1vGT? ze_Wj*nz9d@U3%pg$-t2tYmzjovgb5wMf^z)6jrWu&y5XG2zY8ocFRP1ue%czC zli5qwzr1{b@6<6M0ST3%8S8sr5@e_#br$gR1(r)w6 zRQuFM<#~-f+}!f>vX0;Gx<{F^qpFkd^;Uj}t#w(uPKsVd&-n+}#c>zCuRoTbJz5k5 zO`E$CL4b-yqhGC>3&<$tuvMU7lVOcpl~B&mdf3>SO^RDJE*kj|LzD92OC1inDU|gU z965O17Q56)er9;=Pv3Jr{_5j0jYANLp+r9E8 z;=hfQ1$Z$=QxRr$i5VH?v`TFQAG6V4TM0RLDiK%$K-_Y28f=W~)EIcwd1JdN`LKi; zTMWOU)%9S*U*pHknvD}bzo_}Pybka#1v07y1#bcQrM+DOIKI7c8vx)Qu`9=c{i87Q z_sRdh?ETU|(EdC?zR51&x#M4UQ%3Ogc!l@_1V7#Mz2iJV0qM)JUmj7l}C{Lp<`zy%}8Ww~QA==CI zKB?{Ws&}9-Oo!$8V75ynvL`h+>Z&#-LXQ1vF<{EdjFl%tmU&De1752h9 z&i{miZe6Ca4pD1EO+bFykgN^pa=bLfFY-G@vu4=2L-eMm@b*4=a@ZUmkyfmigY}(~ zew}K-_qxt_kt(pNVLMKz+0MG?m9xO@aHL)eKGnt1ftm{n31*p>n>#>=OebSyw9l9_u8K$zbQicWC> zSSDcec{hPczHy!p;4YPnzI^l#=nUKkq&Lq4kM`t;{!VY;TgQ*ax7l&0-u6i0o>Oib zOurMCwNvl>L(bR=>os1M1s&rlU9lbZ1>ZeUBZQ&vg-9ewGlelddZfPpn(N`6*u1LAa z7?h{OfuPqdPc7=%7L`wNN zX4lLPyTXU%AArq;JMl_NE6|ru#0o$f|4#Mey_6D=viLVO?cnuyt`TS+`$19b;$WODUt-LXYARq?kJ3;8lcjV+r zfh!DGEWHY8oSs0-bR6YAhi}S}Z5mp=axhEQ*f4=EJhHP})4HX5rN5~Y*P9?lcl0Rt zEKG=lY@A)f(-UVSOi#Zbs=v}se;%Y8Jif%QMS{lPAD4IN&Fz3!PX~&uf)+@#o^(6KD@rOCy@rpdu;@)d6EnP=N^gb~oKFMeMvGu19>s-s86$ZrvZu=#t(~R2V z7%_)n&My1y=ySY9@;v_5`n6P|uG>S-MRhM8>^1A*8LrMGJ0dhwWW&GnhgQWKJ%y-C zKfklJxq2&lbDt9#g4TW@R8h3w{(%ldkmkH_@=!eHUu|T`p0a#KDcYv-4M#yhWF2o&S*3ynW|!1DA8?) z-5zNd>b@A2W6RGj=`*7=gR7h;Kxc#(>QNiQ*tWRiZ|NvFQ5axREHCxxizkEUWKUPBDX` zb$bLtKtNu!q?h;UR(to?GMJE%SR703v1_C_doeok=F9(3ABfeb`xthn=sLUu`p4ymD$}< zvm9I8C`)P8AuJ($L5cNc;T2}0O1Z?WVcAZvf3;|Th2I_jfL)UM#Cpa>h_z8;XHHygqr>Jx2<*`dLgFTHHeSLa-IJ9AmTIRjkWXz?N;{~e?IIapp&+8lDTkY)W zMP1n0!Jpmt^J+}}5tKWAuG2|a8!6i22MBqCGb|eDH&j|$n})Wu6mEVp^l`!U1UR|g zim6k%ucL3DPHcCQb?6A^lOD#7fp@3y+@>A;lj@6h3g77G(=TcJk0IjR88b` zkFJE!^Qz?l$Z^Dp)_+jmOJG{|_`M}VZ$$P3UUQEVsx$l!@Rcv_@A=EU?fVNbIl$%s zlXNTv0Ed6i^NxHkiK0jPvX!M}L&N;Usec?tX3#(dKE@}Rm}(;2ffQZSedd=x?AA*; zD#ub(Ml$1*>*M>RRhyZbxI4zNwVIx70v0Dq>~dFhs0MUyE|LnJ=Bci~f0orI*y#Io_kH{x`1rGgwa=l4jt78dtQ_uhVwBli*L#&rmAbHD!b@UH*Qpg5jc3H%eDWd7Zv z@^}B^5L&ajkz}*FVAGBJiTbtk2&M{?XQ}_kAuPMAk*erc@ztI>xA_l$f0fR6QuIl>)1cF!;|pXu(5Pnb6#eU9j6NRR z&wQ|VlFc`e8@} z)3ETsjN8Wiy7V}fZO#fdPB%nEpjG7ZJ+5b3q9P5#%K}TkJ<*S>G)y#Ca*Qtd1UnyV z0R#hOrV15}4=X}m{JOT+??&PB$5O!6{&NRF1NR4x7{KsL0nNStxd^_!3(Ve4=HB36 zD>wqSoT?75#;t%mrk~}p->JxY` zOk)xuxOj~lh0IYr&7B_W!68{Dk=D=+vYEkDqrU44D5Y#Z9Hw)|JFS{tdfCC)wU|TW zGJvjuyt6NjHh2FbzD-){=dK&Iidh)}q0$HYJwLeT1A*qH$KqZdf4wv=;Bvk6|1AFT zjq`gG@W1gFAlY*cHohYOZyrcw&F!kWYc?i}UKN8re(f}mPvbk3|Dshue}LaGCGJT1 z>haeM@i+AdZ)SgB!u~*hyZU2h7Ha0fAJhS%j_l(=dU`M*W7Bp+H^L!IJIqdNcXV~& z_2xV-^aDDd^5Dhec>o$68B0|4*0g7niS?F#*_Eq1@zDs>$%S>opNlO=)GX$t+=N;5 z&9{eC-F&_svaII!9Mqj<>2ch!TnYc1S&IB_{&?>t_n?FS0SCY`>b!c&yasTP_80Ma zHbpY#5cJ!j`gh2Z30op8c?>F~m z9h~~b+khN}hy7nw^wh>DZ z@VzRNxX`gU)%xG=!W{tI>gvH}0v zDIg*X5QTuZhq6K{eOn7vlp@fIaJ_fi)SZVS5IRo2{zPR;-WE`d65X$ z*U_ZW&?Dt!q5JEmV-}sNskDiqWKUDdo(P-$LHmQVVOll)sUP7&|G=~Y0{N-VO~ghb9iM# zLv?{ZiY5z5^LW-EJ3hvRzvVMe?2{}i68psYCh_l)B{U1TqknPH-q&luKOVQ|qwIm7 z_WchM;wbrg=pVfnFxvmU8AqlyfVnw)Pnc!zSww%|t2 z-%2^Cz7FI#Qi2a-ch}=e+{_oMTw+DqYr!@&mg1+|J=Pp{>!I{uu$+K`7aa9_a;DM* zo=K!ELRUc|L18?7<*1=kcT-C{y((*dh_rD9h{TVm+>X554J4dTukl^-y(<0hy}tks znr|FX_lgO& zSEES2RUHm4$gmtg2Y2$Dp(L!wPMM(}^1nw`)JS^bC#t=@CSwSvJR&9j%^I zvN_9g!nELG)RU*|a;B+ZY62URLT26k&~0TE%ojh$OcvsI2#isLL|7{xfWJ;tE}#2Z z$-H6tGbqpSQr+F0VT6|C_{WzY?*8-fRKcN_3DRhleBv*_OvC}S5V$k$6%g_Ky*wHK zMD)v-felgc_Z#4&@&A7@x3bu)2>9zAkR$)DllF^$#P)KHt##%_!FEohB}jS)0fFCy znQOk1d)xXW>`+bpoY01|m+a8zua2e)i3cQRn@)HBeuk>c<+fF^PV2JK`O)T9I0jwS z+4*&c5|V04FeTiAN|v%@o#$gkz!h}FS*hu@Y?JdH8p*GR%U7(wd|oh3vN=(mZCVCV zs~D?uQD=^nRlEm;`?yHw%D5m`)n)>a_(^`U>K8xIt?D1<@+NaXN}r)lAp54Rh~NG^ z-#Yg$e_@z;jjNm9f`MDp`UC{%8 z%HZU~b^)4j+0Jygu7zahL;X41c;1NM@)d49Y`8>RJ~TOY1!=o`r%i_>Lk-63NUL*{ z8PWeJM&3W0XgXeIBlB{E`+n_hLoorCKT9voRH0 zu0-7~8cMiUfJn<{4ilkiN8F#R;NrqRs(m}w%GkQYoxA`l`eQkVkxN@`8Oo%}_7W86 z4Nv>8J}qq$TpgYXtjxIor!biB#{a$lUcvxyvM&J;LZPQ%F0GPZ-u<_zid zobo)^LCELS(U%=Y<0ljNm=^pBp8+9O3*=h?Uyiqzb531CJujxbV7o+A@G?J{*9GYn zoP7D3YNKuVkQb8u;B0i0okM&G5M(U*C}>^^`t#iE_oa&6g-)Yqlf4gFQQz$GH9RHj zodFe;WcA~ol+RNxM9#j5r!5e(_EVomQ;bpqoX_pn>ohKWv0*oW>Q{*eyK z9o3uPt%_DT-R|N#=GB`@7q2!?U_ld;fIuQ$aVx{Ot7&Mk{iDlYnOlI~sag<(7 z1Z1z{s-`!scHV3m|G4}TxS;U?8x9C5uh;Ee);RZI$UVSDha-nKH~t8qC;$9oh48#c zj$qE2YnYXTevgfVCO1mt=HWqU;|Gs)-Z*Q(l+;iYHy$fF@9+46QyXZYIHDq0>`>e; zSn&KL3E_+_P5nB8V>cNcIN-+=wC`#&75pNBIJP5h-zX)RoV|AJ?7~ECKQ;jPdy48A zb~9ShBlWuSuTSDr=!GdclIN1=2~q!X1pB;ber-~^cmc-P0!!7ON|T6oIpwb1ysP?H z&pYRyKNYk|$hCghncPUF3R4`uRbBjW?OwoFZJ6W>^vJsB+(MswwXWQDu#g7^rCn5H zPl@P>iCqguX7CR=G_LP2Z4(1ht?RjLRo6Am%$!7OIL%Z0 zbo-%@NJ&6-Tg*qfSvOeCseYj+hqh#oFrP_r>x+u~It8Fq@cz3H7Q`T>{maFRvR)&`2EUGlUbkj` ze8n&QYN7jlV&weEl#Oy1=r`Z!tqSXaSl6v8l_GZio%FQ;t&-jVlg{70=XTYn>u*e8 zM$rbFdP6%i3*0uBuqF%V4x(f1&57tyWMNX?;7a&Uy5zjw4ffb^fzCOIpVp#j=V8N6 zl<#oJmzA~gwS{CaMag+J$0^OkDUU&~c4luc_Ln0lx2irWBAcwByqa?3Or=jy9)pc= z(IyDhQKOQOB2GhVzhOSw6P|#=u8dFd34a3dR>cI^;f7t57~1lB_GmN!C2?7{P8wm> zuJOMo7uf_1hHikr@@}ang-Sx270u-AcEA!G9{Z%e*7jN4x>Ak9(V;{Jn zebo1R+ek&N=RNub_VVFsHf>Pj*E^v4jR3w8{9GpBN#VxhIU4EO&io7A%k7MKQ|Bnw z5tF*!NeZX7$r~&}5&I|5D$kR6+Z4j{sOR?b!p~BaXX^i`k?~hNvr=15uT)x<4HHLT=)^G7<7L$zHIUCNxO*k* zLer}S^t9J631UqNJ0hIzlB8!BBXaw{946^4u5(S8~Jh z_Jx-9pH72Wa18&s`36!V&d=rD(}5*1z$aVfY%7zQ1T9cmbQF?d+( z^X-B_wFjGxe=j5f555@$zg1AUF?e0+m~3^s;;@3#;!&BV(dy=(hv5FgA5z|;zG7Y! zHg#8-C7fA6B}bU3KB$#8wPbe?hbVo4xR11o3qiG6fnzxtsR9R5>yHUgdnD_@X>y+& zYwG4|t+BUqMBXQ%LZCT_P(O;|Y7LPQX)KgEn_is|eV@x#GjXz0LJ#Y@U`_d+>Rt)e zDh+XsCacGu3z^IAm;X85px4X_h9u)RAc&viu9LHit-c%T_)CU01Gv;j9RYnUs&c`& z?cC?CQzuC`YkSWTKogH~NLgtGt(E7!IZnV9pAZT@OcX@+ll1v8iMalw^hK1HlAXUq zw$^07p}$smu176*n9#Yd9)2->pyg+-dsX)$OM{sj9)vQLh9=STAXmHcOgL%e^sV?9 z#{laMtkkzq(o!Of*aijz%8{!3r1)nYxOyX7`K9+oPh!5+e2FA!z2S?Nr>L7DEiylo zyu{CZjTV@G?0)}#$O3>b ztlyH7nP{dvy&sx6Mr81xT?u}J&P|LFtVE1`x1=2flgwMX@Dq{NtHzkRl91bSYbs-= zG`-G2^`uvA<0lj698^@c<1HaIqfc3od=1k})uV4O*`4tok16+fw(P+_t9kZWUV@uP znkwIO|3`IdVaZCo2b3q44uKO;KRN|qvU)LAqpBMEBmctuj88@uFZmdlvlg-4&2&@P z#A)pqZJkw2d^!~}4 zwC@oyri!Dy0~7ox89Tc@+m?i$+Cv57pGgj3++ix9e7FrVH<~;FsaaTCCERcPbfY%` zSvBV8=FwE?L%#ia*T(ckKS>VzH&Hh(=Kd|6KbnQU(w%&7@(NcE_`LP3%-0-tRvSHcNcCSlOeRQt$Zj*~NGSE{2Vh z-ys{NU>-f4B_8!0XXc`{w;|-dRkCk&!XZ`tA_{*BJ0P-Q!+O9%SHzd2+pqtnR_N zj29XgDtLeOsirz$d_tl3*NUtxUd&wOL5?|n{``nMT?(WV;Ixr!jm~q0OlhClzGwFC zKQnVT=?OGtw8ki>Gby$fsNvGhatRB7WVV4}DUJF4R0^&)N4hikd?Xp{Jq&l#xJbiN zYj6u1enfj6Ew=`Nv>k>ZVZNmqopgmKq1p^4%Ec3wnZ)ymE_wJx1@mi^4DBx}Y59(i zQ|H4qpd&$U59a|p5o`2?TXi8Pu>SJT2hmzmNy8E{;=zcB{hHhs22Oz11jz;TtD+uD zMXc`7Y{<3T9|=4iPTePg_Om&8saFEW?hsUXn(0RnymyJU<8v#xBTT^kRY+t=JQ;9C zi||hlO$n*WQ?Q6>zwQ&$G%H%dk8YF|u&tX9v$-wk(a zV6@!S%c6y&_w{;~G`V0LVez>{YQQCKPaQU02p$%S8qirfrB_bS!|u03hitefsm2k35OGQzOBD!B&ah5Gh64a&K;|yId16(zOzE?~ zi9g{q#)XR$p6z+4SfhDi6}1A|hIcZ}e1beYAxdaxxAJ$VsLAe5MwN5Ym<&xRfTKZP zu2FV0g?o=g6}wlPQv%}?Aeoy^#JK>azKq6BrcMN_1&yzr*SHDleU-n}Ooy?qTzydu zc7E*#@G4}Q04=y*Apx%%1+|VW%FC(Teo^-I?AsMDH{OF^G_0Z?N?hoY8Y6yX*WBbi zcdhf@ONoGpcI!_J<%6`^i=dQRA#XpGjB`R|GKKAJlA}5cBJsSi=wzMs7RydDo>*^&gBK?9AlePwD*UCvW6iG{>TyDF9$Fz+zx1-`9`0EfB3SZ` z=!(8!5YJZye@%Y=vuXT%)eO6eG&cbOim*oqkoiBml-aJK#P43@o;p2oQGjix$=KOh1qbzqcW?O!Kx6mgFQ3+pKwXXKTZV{Zh>B_!uUPFHX z86E&F`DTE6Yz$gE!|fX}2aleNY07wdfsq4x*ELu>mk@W0Fqp#0$K?EgyKk30m`8n! z;f0WGQmF&t$wKHVDD@e+84VNBTnfaN1{Qm5epnT1so5DoO z8-8lE6TGt7Ll9h1xzZ3mre3p930>GJ#EwT`U4jz2=G?P&6XrH_MWvR6;W$#xx2z&Z z9QEgdsx&_@uUlmYD)k5H+(T8ZvP^_F7hfXM;1J^RIeZm%!JhUXhh_70^Y&D}owR_Z z2a_RJ*oY&^?p$J_H6@Np8Ot@6i9GFgs|b<$PPPx;gCl0#+|>VUOAB_sbNTa#{3eA8 zH2R1Suoi+AwV!Gj-PM5zyugWGt&1}?U6Q*ZEzK!k>o7h&(>^&NRPyV){JH3Hf{>`n zxhfCJdU34ch9l6ONkjUT;>u~^;=>zM($b&(u|poq6HpJ4Zp{#??PB#R1W}! zLj`mF$9=WDxq_~|L``n+Et|!4hEr=x@7ygps@qCkw&BZ+j4CGxwW02 z6CThu4~nR1?aEY{$m1^m4b~V-6Eb>R(*tTNR=faXOrML^qiKBZb69Bn3JpFkIZM*l zY+KWB(aRU;Fz)RNokx?3P5hRu?Wk0WA^J82)1>(Ov7KXgT@Qjy&|6uRV69Y_!(r4p zr_QU=PdhuHNp*GZzb82dRN)~3nkg>pD9Gu7jZ^1fs$qvlCKHr!NLR%*%dr3{iE}l5 zXeWA=0;9Ocy;j$(f?0TLU*?u0U*%m_Om#OrUseFNBl%uH*NncP6Txf9rCIe*Za(Z< zu>D)_I#HKCpN(@|{krqW)e}b{f|j5Q%qUZ$Lk!P znRC(FWW;i&%D(jD@*LRoZF<;=7eX7xo9jo{?B{oVJ0(1UT6`*$f_CiS94Y4?pHVNn zRi#*see|cHIKwNk2Ws!N9IVrSc}fJ9(f;s#A7fFxX8wwiyYkZ1YiN@`kXkJPxV3=q z0c20GWz2Xo9}_$7jppvZMmlbOH=>@mVRD&I z8xxd?Z_qZOIn-X_UGZ`v?`%xEPQhW7-;``Sc@m3W`tj5!HDnDN!F+*^;cz^pubhV( zs&fS`saIDI z$*hyhb8<)l(chSz`o}uk{T}vR>xw31#GOTFa-p1V`KY-amO8BN6PD^%pEiie4Q+^g z^2$9ts*@smvwtBxa8%xJP530#kL7R2tzkSnDchJ2bXPab>@xK5?v0d{*L!W#m>djs zsq;fHY~>A~V9NCLR(=jsk^L7CRgA!rO0O7`Q_g7W1oJ*}Bak!hlCw}ay- z4#oQ8QvEG;Advc;jVud`F%H`DM4#>T$OPRL)8?*Wca&Lx`i)f8zIaMglp*^w)G?N9ZQBdN=j0HClSCK%96Rl%>-!A}W8BjE)vecQmf3~k?p-FqBDD@-dia=tF-k0I*#?_E*?C~Y zu8av2mupz4;44EqK92mZUM|3{ui4gAs%Nv^*154z->P^^Y&K)!?wc;NM{?MBTV<-# zXfK8u;6JIqIM$NED?M&Mv+S_U2!8s5#Z>}zEONdZz^s1|qq2n3xdb0NI!~xyjwe2) zzx^}1m3EeO1JjXX3}(*{eJcRxo32X8ZX*R{!@}C!C5lO&i%Y`S$9F>PF;z3SF0{1K z3|w9vtMGJzf?stSy=kazifl>qlfHzTQx?Ex1mks;;~H8T zTR~SV#($hx$kz^ulK0*>9PmNxNot>~ipo42qCV$Q!eo3O^By*`iq4ktTc<&H6*@KB zvlk|@Oab)|j=tNH&7x)>U?UobXE(~quVh~pcT+u{W?S2R3*nMe?M^ePf^vD#Wt1%D z?A*YJ;ERYmdS?bsM41X{I^8k3XZ0XBKuzOje)ib5dPy}~*Uv@vW1}+dS05Pb>WL3FHsy$r(u%@=BUla*D57_NxyfuWC?1NY_8K zUh-2+_!&ycY=&1Sb#Wc64Ig9FY4GPp=@H1BM$eyV;z1gwiH26_F$KR(X))c4EA(@+ zCz3Ue2X!WCsJ8HSP;zBe_0>&YH*{~-eV^|Msm~0Gu4zBV@DmVtKB=4?ynp2kC`3Cf zAIX%Env&hPE<-v7LRK3FL;uLc(qB(zSPh1Cibq#-SrEQczpoBI@fJQla zq7Aa#zl7&LG%A#_e4)01@{Z!?|EDLAP+{85WBhx{b3RF}w;R?!4$Z@wq-ssF7NE5+ zosfTsmw&MJ^_w-}Klxo|%{lwneQkC*Hg-QU`Hg}qehi}JL=P4(W#b_@ZXv6$$_^xH z@AI#kcIRQ4S=FQ*)%tThlcbcf2^T$LMrwYjQYIv+{#AW5wl1y1Zwjc(mznT5Rru&D ziO46%^yg(4PvR75N6fk&=cOu0eeb^zT(W%UfE$(S!a?=KBOfh|#X&kS)n5_(wH-B1 zt;rt5AEMiduN{&nids-Sj-z*R81ERa#OT#Y2xK3g@q&g#g$R}7J6Bk@XIz5hA}`B; zMAVt-9i*9 z!!yczh;@^CWFe_dz$-uw>Meb z2d}{rkkG^$`a-p}Fd`zqO^7QOvA%1Tj$3lLcX*v|K{+Edc(M$&an_@;hZMP>zN>94 zF&GK%5w6qFb*H72T=5MPy}W&b&R+JZf?dJm9hkx|$~!M665WO1$4N(YmIYygY+at} zwS^q7U?|p3Wr>sv?Wn8Fn{#w$`Q>iq{vw~O)&j?_g7PZI??wc5W!;fe(HlTkr4g!| zR#wM%sVN$ABWm(jj?5> z??c7)@4&*aOI}hLNE$eu^J;|&_#yO>j;5=KVZIz@y-lKbqDGAZE zW`vOnf;uMJPgg$b@Gu;5L=WxM7{s!sijhr)I9weHijssD#zmgwi2W zhFUsd3|RK6@n`T(GOth7q)|W@{88lniZ^%bq5YPp(&Bo^cHyU0Bhp73miZiU52u?O z8k4-mB%6cxiGF`Fu1;uzNts;puF<|Z5s?yv$HgY446jT+np~j1KrXd*06<{@1Od;d zr{HE8oNl1W!1C}=Xx!4>&j+@xu<~2N>iOpKVV2c(gx-N*NfK`P^vdB$d&8_whlvTd z*^FaoDbvtu3jDaKA5DpHY`Qt1&G1S0q`QjseJ4cPoixM^9s&*juDo}COM&Srr(yu} z1%zxqzP+5_QLrGPW?{aKs9J@#39R>vA!{j(f!VX8wvN2Xv%F4CZ;EW4REBQ7S81|( z@UfC)t?1KK?H6@906EDo_~YtpzfXKjjJ}Q{l;47Lg#XDwv< zf}c=OM&NSZ+{dC*#Kn(cZ5I-1GBnL!KU5;CLlhe1zrYX&4!yq~2l0R8AwoG7CnW9W z3AU3Q@NRi}vjB~43{;&oVL51wenYbt_>Gtv=Q3TP(axs;&{KYqKtaQL%zLGbZ`Non z#p4!KDdVQka1`mzAJ#J){DbWtZ8tBDsngWQDZ%kq9_T87C9*s!67o_z^hhgXqg0hu z&(Kpjv5#dDtatpIN@%`!QLf`m+EX{rZog1{ ze`EHQ)YhU&bXQDl0r@J}Jt#Mw_0YjYvQd-w<|3MgJ)Gqh)B$wh$2UDp+3a3GFn8GQ zb}-oS6Wo8?lx=P0tiGbhsS$q=)J16l>{j=rTEPYU8+u_?WqD1w~1>HQW{xCOYtFz8S~BjA}S) zW-Gw!g`}iSs4V0O5x38V`dKBmmxA=nqD3@s${}-#@FIq$WomtTt0DpqeEPoIk3lQ* zp7D%A{O#9w*8I>mIs=K?KFJbe-q!ENwv`W|PKQ6!Jw_LC2KCeCl?TVoAB~oMt^%5e zX%_5f9l>3Ge$&cK2EX41emD4oG!3|)q|rjjfux+5Xnt#E9fpWLkb*<-x0Jf0U7^aE zwk~lFc%^XDJe=R*n3b~&#>9OAo7-Ztw({Gy6;9-m@`Bd{zB6%?U)tn%YGE!(u3I$f zw@RZag8uQFXjkT}#@xCV+Uu1+3MU*{TiYk9%dMzCELM5_B!gq8_xk6D+%<>|3vNm=9Z6`nM#dz+O`1Bo26};wx)YFb-VF** znlGV7l?V7$>DOnk{Fs^;b8s`Jjk7DQUaYu=Jl#~aiC8=Pg3dQkFOx6ySo=VdGAJc} zY<;OO_`H$+iF1x-^fwB*6N2YH4v_tBf{h(h+R4VGkmA&vUtqY$1s7uKUVVIA3iRk! zL2C@zbibILh*5=!)~CMO9SK_gK~t@R3@=S;&0?}@#$Il5`P20^;}PT7D!$cv7-pug zn9AD}GE)mVMTMKOuL^n0uBRu#RNXwD_f2EER4IDP#>sYF4FWY4uWPV8(eNu$(g%zZ zNFaLgmo4lPpF(FDScqZAylr-1Uq`Mg_)U-ZW?!&*H&n2hD>%(&Wmpv)(IXtlbhs3^ z!fyM2OuctllS$Mzj;rFjv}IKg14KnYq)82s0IMjhB4VXC>6k`{5Mn4Q2r5lL>4M5C zD!rwU03wpmQIQf-5E4Kk0TLtu0{F|m@Av(#@1I=PGn09qDQC{ioH^&dwc%BJ{RkxKM6blC(8L)8qE(YKyMD~;JnBmAzOdHgI$c= zFZX|tj#M@PK}_YS#osBrnSb71Jo`1W}rRB&B;4kH_oY`>|{n%Ic zTy=jE0Om&S$IkF9<@0b z46pu^5R>VDW~l1&!~JzFN5}F?j97_}V(#}^=9`3B@UA*;C6*Mw~vr~a^kbz$4_@vKPkR1X8RgRHhFAopvFybEFdOt4Y(W@Cv3@84kPdf}Ot z+4IL101pl=ksIz${vQ{7AuupOqdw~Trc2UO(|vl_i$djXPm;?F`Cf5fPNmVMB0j%& z)=X8!>t_t_UhnVTM7RfY7r1n7vt-lu*}cUdqSY((doLOv-0OQ-WI#R^Po6M|U4d)U z!{SQ}8=+H0K@iL~mk(f!)>phu_bsp;mm_hBegnKK+FY zj>_9RE>Mn$Zi=$*7hiMBQAxiR0#bkjE?P2e1f!qLHnOWr_nb4m`^nYF{t$gv%x|FI zCj>f1qG0}&&%GCXT#ULVx_fxzG-Q+$^6q+7%AT^uj!>eMvvo#hRNLFHVmAN|0u`PmEi zTzda)F)rsbLKfVeU|)B4`@(mwl8*UxQ0^fxBU9+)93b4|HL{C)>)pG0c2r($B6dXV zbM@aSu@u>TN3H(%JMTC(2S{we1&%sUGr%e3)x0u={5&Jruc*>h0#N_LvoOp@x>505~$4GW{KeRXp|n+fnk=+IHV{ zI#xeOYQEvN4c)>o9W^~Thfp6=m`Pa)siD*T5V0`LV(po>jz=E@jnJ5j$tU9vT*gKz zzzyK{cMaX$7~P6~hyaAv^?A*L6pHt$x1Hgytz-+GLrF=>Gmc;Sd=sCD$tAC^cC%{O z(5zz^@S7Tv7~SmF=CHV98NK*ZOKNJPS0Nvh&n)(!^Lj!z+mFFc-l!%4L)!55>_)iMsr-#|JF~T1I#`WseDGH%y zEyf{vmgls%k!m+POP((My+wt5;Mo-Ee)SU4{H#nDv_$Joh>2-zMkS>EqXHJF9C>5z zW7X9IwQ08{Pic>e`dZoK}rN55o3QuCKv>~=EI_2sh&Iw{WdF&@F}Opo}v~p*Eo5q9jRL_ptdGcxv^d%YTs$j_eBuNprSwc#{GP2`1Q3u zqlpurZdl6gg_}NZkb88zHtb?rFpYgr;8@zgKx!r) z-%b18K@- z*c-U<#{DI8vsXZ{S%~CBZGM*N ziOL)`E$~xgXm>AsY8wsp5jEGj>>ns(JVJXl&@)D zU{93)p@6e=<8m|YdWqmf|5-fz;=3qt0ooJB%03p`E&p#nj5&6U8 z;X=1^0rKj4y9y2POK-R}Mh!#;Z>DPV{xeD^s;Qt~)!;m{Mint_SR>YW6Sb`ms1oXn zF8cJMvgGF)8iV7!#tjWcMjBko$g|o<_7O;+;+z21A4|uN7i%hX)2B^il!gt3Vzf`M z77F?KveI16o-^z5zF~{3P>W^atf*M;x>H`E+o z5oQ2;S79BM#F3#WoR9MjsMo>cYidUkHmbS6O0A$vckd3QU#~upX<0Ax+t1zoO3@z) zwx3mFK9A(Dd+*3NrNpQsy;q?$uxbs6_jwA?(tn|)JGE~rJ+V~#H&!R$UtAISp)%w` zsXvoO*{l8~{mgau@`sDD97aZ2gmVh zJ7}Qp=8I}S(~u8fO}bIV?Bd^rP5hSPICj#=1W zA(Rzl{gSbntWPWyUe9_ee76u{cD9|rE*0O_A0V*XK2Q>IEXrqRGdGg{9QtK&y%BQ1 zPt!24esMZeZvt%sL(cf@i!5M~ac#wEm`>asj+1}j8YhjB-!`(Q3#0T0n;S|gnQU7< zW=zZ?^P=YG!ry;uu*n#u@M63bnxQSoqqEV^F81oTI?-Ux4VaDHDf_NWw}EkS) zoxD_Wug?qjM&+PdM@es4KNv=4@Rk9?V&}FY&oDErb0t#42#9*IGGu6$!9OCD=<4$* zWxbQGJ)2#LcLrE<7@X`hX)P^dEU$ttwB9!9J%>bK(=<4+p~lM#FU{X@iAptdIe+MQ0tQnmDTwF+ityu}Ns%^!PG6 z*vYio!{}Pq*?|mx7=%~twz*Qz>Z#&0G8dw!j}5!jY+tT3e=;lc@Oi|$xQz5uQS`#I z1`4HCFJR}EoB5AqsWg>(a~+JzbsY_n%y20 z9r^BXPWj__zNE-1+2>~95%3Q*Y`r|lcR%XG=x-aZbL6uOmF0#(FLA#A@acbueJ-Gh z|Rc+DQS-`(RB8-}aX+64i$L8EYwXj|{_Y-|u z2Y&uF_}%l)NZ2oQhcEuF)6TLx64UhZOq7xXjJ32|fDtiHkzjL@pJ1q*C*1cX)Ju93?zsIo+U zHPg}#Cp`2|`tw2$9U@;?97?dvZCSL&ZLSVwAe-sv5X?8@PSK5qfR3MkGHw^MFpJW1_o>pygl% zX@&a{z_2!W3MUbd5FHYAc3Od1eR|z-VIg=p5n@oZno^rH(yUN+b`~Np$$zad#2D3RQuoo&Nge`yi4tZR0!PY9eUK99-KZs-~$47Ok|OPzx^=ndn*{Ns0tQoYEtL;6gS%0^v>BunfibLUPN4|a69%g>@oH@glQ&=Y z&7^sgUPH8!q^hW}R-smJA^j-C3v8zE;EF1WOC%=N>IRW&F6!Dz&&LvCvj z*bC#H+Hu8-=U2ZlW~M>6GaYpLJ26!k85xKxD(Nn{=+um8u&smM%=U@-!Hc?tcs5!& zj)#cT^PmIX1Za)cOJ{Fi+~tt&wLa3y_-3gtvt!yb72jt$7vCQcwGVL;Ap&96nCA2Z z2Wp_|{s@n%LI04+;89;i3A$Txie<#O+%ynr8K;1BEd})Zz2wik*aPl=IYLK<5y~E~ zx)ejn9pQ(5$!KK8aMBA>6c&8Ps_ROE7k*zW`l1>Hs9-W+>f-ohj+ill!1mXqL0?Zr zNsMzqDxlc@wuSu0kyY5}QH_pjfqMZNN^2tO3T2P&MkzOxa12c$7PYrfi={=vxy}Vd>A~+49$@w70%=U{gBXOe&^qQm6tNEmQAGJlL9P29ll>Jx;Z?dsdn^MiX~(3-S+6s98>Go zzGsQ5x7JKXRp2Qz`FfMj+h!-OOGe1G1jqR5?Sxr59aI{(ozkD3mE_h&)(75w)QbXu zJtIFv&f?oGhRoBlo1dZ9o%wp%94zR3Iy=hUNS1J--@)i`nyiEM)jz)*HdTh1saZNY zx|WC^NbyVhOZ#L@#wA83#lYH>I$wY9AK}69^vq+8v&7s^Aj`Fk+$#6f002C z5rLZOLtM)^!zu#Boy4|NE3x^rfef`8O8`1c4kEwJvMxe`8p(=XnO#tI>(XwAds%w| z0;lzKNK-60k>40vmSv%zj&KD)@5g`xioOm#{2D-Do;RW)!ruziFd62-Fx5U0xiNw>bcFuwgWAupRp#zPTy?uV9nf<{htMy}q!Vgc+rxgTWiSpY%FqhDgz) zS+w)VXZGm?@k8Q&J64%iLFKXQb>f1e0T*yE-tKZwnG9F7x`Js_iWLoz#7y$~>Z6K@ z(i+KJycp!usv224aydk5x$1&LbqU(c%L7JdI(11Xmies7u8iBThWvB!Rw{&i#kBe3 z-~2?5bLLj$5f`*Zqv485{@v;wHZe)&@W+-;UvHOBTmxO|-S~~PuVfKsOe`~N2uA;f zyigAg)0aYu`62^Hq8I)_TN(7mII6=tUpM5$`^#l_IfT5M@Ol#sITn0HIc2qZy(Xlt zt&P3Pd^$>Q3S2ta=Vu4Q$%YjG`N4>#Kw@L3FW_zu;adG?!%0nz((lAP))#zudCR@| zy+H(Ji`tH!Y|39@QGTDtXPOyJlsK!1xc0BC8zq8bSZH5WCWTH-=PKY*UySpkSIa)g zCws_2M!t&I3Ztv8*P2fW6&l6_UR(&MDkvHZMjWA=s)*L|zExFI9(H<9Hx%eYEZCpl zP0#h*9EY62A7XohNG^>$)1VrQs^E!%8t2f$#-V4ebxmK<24#lL^SM0JVB#Q{S*A7) zWx_~N)5Cz2_Bn;pXjRok$+Oh`f?^P)bmjd8TB#1FDu$dj6mB(W(;ek>)FZ^mv?L5$ zH~o_pc9C=$HLAkUY7mHkK~PGn${OR9h#u;)hjrs9pHi7@g1> z1G~XFjdW6v%YDI>jg>tFROwZ>Q(wG#!s?xlxm)gV-dV1lwy)7IND){#Qv?7qERS1Q zql#Dq#mfY|Xz=6TLx+6EJfz#~9OV;l_WvHfv|7ENU=lOVZsElr_?TmX&t_TVHXhxL zU{Z4$JoXv`RY<$Z#kH+pyqgCiUIMSUkI1;gFUvJO`Q)X^7Ig(v!SfGxT(+}$%k(fj zk|%rbk0Y1fX?;(+kk)i(_7T88%41G6c_b2c-SE~O<9CL8tCY)GY>{=Zrq<~0j;r)_)ivI7q**XiEK0DD?89}ikLlrMH^)w`a*_Pjh z-f>nQ%RyR?`zCpXef_2W-k~frbB2Mu0cGopft&U?wzgiy6MDhz)x!@#&8@fl)Dm%F zg+&_+s);I5D+PG?>&YhuSb$S;j&<4%syrqaV5z$oW9O~~CoGx}+@{#ds-rPJ(b3|T zUox52I0OHEeo&06n!J&*wbHTW^peVFoVG*nzq?%xF|WR;;SfvnUOZTtF&y5@-+YL@ z5uY+n&`5;OC3Wa`w{C+S+#(*N^ygn!g;?Jw0?i8g4#6G8r7&%{CETowO!+ zss$0z@2|3CE!C^g*|!2?w@yF#eq3RHj;TuanX{Ky&)iWnN5M3 zL;OD;G+C>RSloX@VtsWlAmhHCI^Oa?=D_i^kj0OGc*l(y_9pgv@_0man~&UpDvfWd zzv;LhzFvE@>mtoB?d>f0frH`Nwo+X^!}5*17rgP<$L@n?j30SMW#-X~279u}Z9`CA zKQj=;A6drwK?Z|=4j{9uu=`wRw(M1_{RE?vDwB7v|7e_;WO8Da%2qrL)~>$qq3eJE zm0cl}vqAaYJP#iVtc$_=7DQ1b;?b<@(GrRAh&0EwM_Y)Z{tGRZm)kf z)642^FnqM6M$MkX{*t-W3k87kJUuzgTYM?Wb3f)0qjjb&o1@tkjeFX`;niPZ2LAMQ z^_*Oh&-eT4ozXxh!7!Hu@lP;TM%u(t^ty{!&#WQ8P`)G&qLn^oyu6u~LG@hNBqjGw z%rwDdZ!YLjRpOGK#OIw``(cFy7WNRFoANjzQ(Gryg#4C-YK>nqPm63TV~4VNSUCVC z%8!P@JGaBE7?iC{heZJ;s4@5}ehB~A@yiR6GC&T8<79uZVcJ=o?o4JP1PwQ=&oj(_ z$?WTt-6za=_{Vl3jQuLcD#CB>-h(gyjJd45 zk@fD~#=GRRr;Z=KS>BRz)?D!Zljq@Jcu+{)y@-hK%ExN2?NYm&^qC-LriaS2;E&<9AF>fvt(zRE|AL_2wN zLpc(xaWuF!?-Vu!nUiJzW@13LwI#42=cX$$Q;v0?tmA!Y+;rnvZrt5rVwT~HelE_6me73(Tm5~$;Nx9*7lF{g2{G93C^h* zCz~n zV843hr@Q{={RY(GpbHveEm;JjdO$Ne>$=+kb`rf8%bN$^YcFNyoaSd zT!Y$3%sz)wdSuIDFc+Z}r`88cdQO$-tZ0W|3ua)euU&O9FUirpH>zqH!EGIH=%`xl zdOi+)UCh@NbA0mu?QHoLoA|h9tumZI%$znQT!`l5Eyv)gPJ}_MCiD7ib{n*K6c-v# zbv!5H2W1(E?Twc6w2lmbb=-UxvM>g=)wOC1L>xkaV-RF^Nigsf5B}@qxIDxY7E%%y z{c`%qB)-q1fuVHVW6o0Eva##-;BkMhEt9f=NLZC<-WIWGX+LqTK?mYLSUYEo6^0DB zhEc~vV3h%?`YBZnuFxHqQR968d8JwC9nerSNqb69Lt$*0WSx~>qv#11EU{TP0QJamG3mju^TiI`n z^)Wz!wL$9S`P(Kl6$i2nLmdF$XI09iC~Qsk$LTWqO$QEZTaTQ6(`wOWvDLlHD=j|a z`wkbo`(IB@w|1ylu;i{jbg5R}kh!#Q=63w$w|=cMes4##0^8b~8b5p^Fn*vi+Mu?| zFJG>{`ZIhlU-8=6W{pP=q`-R@n=|_Aq`*$E_SfOvXhFL|bu172UVr#YCS%3#Mwqb) z4yhh$8at4h%X$oA?g@JmgEswu%#@w!8C8sKZ?tX zWBfZ+6J#~_H*1n7h>0ug#zM53GWM87Jo{u0cM2;5er&1Us@*jc&;j2ZN({r_q_|{< z%r@rl>Pyb_*jP`b+7tX^GkT?T_MW;RqW`4Vytr^uui4qTuhuw9MO3V+i}1TXvB+R`Z@v;hsmqqQ^-9>lGaleJxWV{4ppgW3sz z+^o-N-(ZBcui^*(d+#rqQO4QF0`!6G6k2C%5YT=bPp{pAH+uT-KUpYcH2mTA{P{BN z>b-v`LDWuNY35r>098tzxCLu;p!S{A;9Sqmm!>|IRs?^O)7ct0i-kngX9Ew`Q*~lT zf~oD)O5KWBR`~Rg8t-6g2L49kLi8&&j;yJ5pdd{{ZZR&9$}(T;%R(O&6a(cFS8=LK zE*wj{kUi-I3n<+TOH)h#{gI8{15<6arg%ZF1{4qnYYZR?vNX^dbC0Z?$91KNsC^L2 zk#eQwId`1KRNiY*$5dVgQpdTI57o5i3M5Xwmiw7D)n@t7v(??G(>@dvy&QwhffBqPNAp`HTi< zDs|cJ1ij`YnhRYq+fRlm5!2nE{+i+vG6#;Im<7Md*Y+E@Q8{mS7Cvq*%I>p#982e3 z7bS?xurErW>7A2N`8wxl+h^goHT`o}T$3)2Z6-16FHT{xvMZ>W$s;lFT>H{5)|l%J zg4{A8S;wJZ-D@nP?n{)bme6;#Rxb*4DA+3N35*0(7=lzZ-4I`xo$K|fX);P=m{&cm z?#Em<)>-crJ7n#ABV-FgUaxG=y$HpcE`kU2yFju_lqD|JK|e=|>v(CRinl)ZM3ip# zL-K0uCoprftZ`PHWfa3}Yfc-nZhVZFkHG|UefMa;k1I1i3N3ZQYUFfrn{h_IrDJd* z5IMjK7hhq0bGKcQiyc!=8We^d^N)%V5S_D@r8jI!T-O|s&8tysW<|3@SKDRSp#dor zB&nn;k2LLi%%AVku1!j_4I-?OTYdR06-FyYLyuXI;-NY7YUH)q($nMh^w#3+Tk840 z31O7f>gv9m&0htoK5x5AUOgR0lE4|S1q)Agxh{#blrL<%Y2n!Ye#-SJ>Z-&KS78&y z^?~2-3=)cMRVSpqLi&();I-9&+}Wfv;InX$cS|JwqFnTj-BKp91IhOyUVm{%oj>v zC9eo)seYGWMX$u!suD{lrDVxuyC8pq#vcOtFU-?|1tW!VY|GM8I6YEKait!9yW z%ecTMPT2wwjhqD!ntw)F`(2*d`?Kt@SkbmWXz5!w!&nE~vhp|nJyf00HgR)viGrF9 z>+VnK%Xz;5ByG=pkZ9t(zBE9{-9BY`F z?{w;MX`nkiWN#)j#QjeSORsal7k6&23mMSfg;=06p4ZlwCe+Fm#pwv1u-2WP(qEwm zC;tt8O*eQh#ZEpAXUw{)HVP~L0^cRMhW8`N^49N@w*?K6PWB5-c=D`m`vD@)?*uaO zYfH{dB46pCz-`@7DCz}#w*Z0n5V5I=hSVQ4&8t1-0qeNjJz62F%v>QvgX5^F{(6m+ zn~I*$gzlKpSx5`pgVji37(vFmrQAc)h(AU<9fjiEP~7WS=R`9zKc`^z@L>^URqDRho>&8d#2{%rhdt&#EdQgGVPVIy8u=6Xhv`12hIQc97OkC8da;{sy+{9-%t)JcV70KA|52Igv@3C#fpN(e z!T>Q~ATk5LqO}K_#ejHqyWn1+oWzN)mWbdO+{g4=Dife$D>CLAMM@@@I-~SU=Eh9o z27v~FSi|{fAn3=TK_@NlsF>uy=||?`gbNaYv~ugEzKKDx_P=Dz`WbW`29egQX0EP! zQkcK4tH-gtysoep8}iZG_ianQ(V}`n6VljQW1%62?&G@SmrUHuk^!&U%-G&Sd@zLioXQ?!G6>J ziTze;*TM1nma^Q(rkba?%qO2CD^dy=xa#)XU5uGXJFCRR1kWa(<;JYBcNaU#Qc6rv zamZ^1xj)jw7TFcQumMH*)NZ|ajsV#pkbx_@{s=AFGo1wl6Y}XZLaY6oy-sA2XlMA! zPFqV%c8&33?3{E#`|&XaJb3~G(@c!!E~<^2Fn8af^e?uBqE-h_GibG1)wi61M<|a7 zll(Ya4z5H8^q)xbUH{XriDI$lFBycf7B=_RC)3-6$3;4<)EWJO+rMN2QsttPr|l0d zyLq-w_O%&rHU&%{|3q0-j?nyP}5@RsH~Hu<0BO?^uW-jeRASj0`4=SshY5N zwzTkj%|7$k^!CEr3n&P%!Vd{Zbv^P^uG$PXOzHgqR_(!29rExA zC%*iWdD&cZe0%A~Z0KE7cKtVFEl*lZ$77`B&7ZRXQ=ZQ1 zVUP5Kvw;3Tw$DU>LxFk7=whZ=c|lB!-{l!+Fa2!y0Q4> z+cAK|1S9=8z{7) zeVK8AlzÑ<%?9s9t{?T~Ah4Zbj!v8tZHO8-nr9BoR^V{l?BnuDeCVrU;R9ETn; z->f|o%88WYY5?3ag4KJ1?p?PFeq|f)GPNr ziVcjwh>Zo4m$q^(f2%#lOa^f15kS;kiA`Vr||Y z!CWY0j0K5&grCJLaUrAh0|oWYxJkjqdCnG@XpXIX1=}~z){F3vOKCEXQd0d!Sb!O)Zur@7ZVDP*=}4>+K*j8= zpn)p4wWK{Yn~z183q-0;jXxT15?fymN;=`Dvk1GX_^{LZd#7iM@3;RL^ywe8$U5IR zt$4Oo=&t>GdbxD}w7=}mD8bM*rfK8+DEh{&GR@@1!;ObNIrOp>g(K8qC7&Z05bL54oiJ&|ooGpu*3- zx3>}x{oFb|1K&OoAu$wd2z2(Ht{YfOe8sIXVOvEMY`SVjX0|`~UG%e_FHUhkkddYq zL5iJwhfwp;IVx|MdNg*c5}Etm&tl%omtD_sEUykGDWwb4??%vkd{a2zLB{H!VaeYw4eU9)=fsjClqefSZ_+m@&={Lhu+Y7}0fbrZ1ZKA+eK064`&go#%K0a8yDtvU zZoK`B@TfS!{pM(bX2`5BS%DBFb){|pu29?CVc9u+n^k3A<+>L=BZXmxwIBI+<>C~M zm>?^j$@(Qj^l~*{R3)cw0G>i97>zB_i29v zGk|3e3KAojrZg~OAY#L)QBX#Gew!a+Y)2dTXE&2*P56 z`v&@yFfS3KfEukb`!v$p?nZJ$K%$#b8W#pv~W=k5EOFdmj<$SE_K zAsOwSS<>ZJFUFFeHpC%uRmg1Dw+MDvNDbm-LNGipS5Tony)LIVoJXM~7>R*1iwieE z`-L^2Y(d-#7Xpcz!-lnWvsHy{3xT~nvPNceCIiwx#AXFRXcP5fuh6M1rpfKmKzW_9 zM(o2Twojz{6AZx5S$eioG0reBEe%`*T}90m<|m>D)Ssa%Znn3&Nb)n)hYPq})uB?Y zf&&%cT$r2tW)4spxy%?|!i%(VO4o!bsK~g`(;G^O-&6y{V+PcJv{Kh2$TLyAYJ14j zQjQLxPmtHAo0kW01m6qbcys2CE_YsT$da2K{O!x|k!gkZsZP`e&SMeNu{iZ(5ar%y z_^oOJx;a0`3eyQ6gt(pGB1mdnsTZ9OiKMH`8-5w)YX5l9*i-^PqGU+GT-yVLPkf3!D8dObRJpFfgcK1Y>iZ7SB%ZH+4O04 zen`^r=BHs;Jz-laW5&AC&ivd9(}<8>!BeV3snsu;>>8_Cnosjb^qhDo$9w;pA4Fi5 zg_5`>X2)_=$v4)FgNk>M);pw|*-n?E9mFitCa7K=8vqk}(7#E8$~}4FTnM_PD|V|i zcP}NueP&DqN$2b!SBM+ck$s@9en?OnSzcx6nVcdDAh-I^;$J*JKo@1Ijf| zPd&h6o1u<*kKSCQvW1Xq%Fo!e0})wry_@lkO<4*kJVByVq((Q1^;cM%(|Ak}x@MX% zjB2Mfdn`d{pOMnyofRlW#Fd{;bZT`T2acnx}QwIE)bb zhW=d1;r@JP|GOEz1f#ZK^?7~KZFN*7KjHj5ZqYe6RxT4J&@?|6JFqf%c8VtQFG=eh zXBg#1Lvd7cs8-p}s!VVJ2;D6S)3|9rTtt;pH-3t-S2Fi#76E9u}23Ls&CRG64jwnQ-SP4eVxgPX~Sxx%_pP5_tMoQ5$$Rt?Y1YncHX2DL~Cd zU8&-VOB&IoTS=%1*BU-GQXX>cYHOx8V}=(keTN2>)=RZlWVLpN&D0uAH6EN)X!;S> z6$t+}65}(dn~EAo0NLh-6}CN_wELWBDlQZas&>dE^ zy8oXrtGF<2!K>}7F-(&dS;dKA`~G5gz>>9{F1?Oae1Vl)OMk?)Xyxo}Bw^k6l|7Hm z#ZNc4CiPW0#AHZ$_kz1_W(ajlE!Luk6n`$HXZDjYUd16JR_G(O`0OENI7t*h8yCvFv;y>vF?)3fsi*}g(TM@?0?a_BiKF3h56?`sw<~*9MSo*QmfRM zA}V!lQf|-(uzl6XnhNB^8S5`!l*?w=Cu2hgKKhwYHSHZUj%`;u5@wZ~@sd-edN}r> z%Gk5d%~lrbx)%ck5`~^!vF8fJ#6;SrJ;1Q>G8InT%KC85mSy~*&)GQ*aM0lJ_{qF= zWXeF5i4$ia_1KS%rR2u6XE^_cF_?7{P2K*e z3oyAb38Wku1IV~BXC(E1)~ivOE)$djg{8inETN`S(i&jX7i1)$tt;*MU?)9dr!86g zr_?cXq39`1&j#swo<9CemQzGDmE_J%U6gO&{BE5})i*lTyG%PUiE7Q#TYF$>P`hK< zcj({&{QM}+?ZnClNC9csJiN|k^v4wZK6gW$oG%M^x=fw-SS+cSj?srxXlqVSZZV-$ zt4s(D?UX(W-kK#$8GPVaOhs3%>z4#$D$j&HnWH!pGOWaFHhZf7Dx zxMi)BQ7759H=fd13FWjanRqzoNjJ%556Xjjmw~%|QAy29O6_5T)ws?j`U;7skeARG zy8Lvo?Ba3_09ven+~DvRirseMfZ_lAp?zMuWDwD|-R_!T)b`agQncI6V=GHq+@D=a z+1{5;SJ-_AYDoP!ml?J(oPky|%)dE!7M(aySXqu8lr+^PJ_CW(!`92fsoue)cH2i; zJ~IPJ)~FVQ`rQX*Uc#Hg>nIXJRU)geS6KfDkvP88&@y&pCa}!DSB+`s6wHq4ceI(z z7kNv8v89;^OhQy2Jw$uX7w_)9;x~sih!f+_tXo&Yr8@BmcE_07p#L}*|H&F`q2nkH zk~kZ-=b6e5Ma@<5Thpv1;n5|f4shu@CDA7RIdl|Majp{Wwv|sQF0L_0FKf|0NN z6HP%YZ%sjls@Rh1Ph7<52@BmI5&=^P#xfh;FG=8J@gQmf78U2)kF)c(zzbhdcNc29 zw50T|Bz89!O&(_kizW*v@vuyzRq|*+%93?3CcPzeF7NOnHX!9kUZog9KwC09bP+vw zwmNl+3EXY~!t)!vP`7s8=fO^RkA*_J!K&uxCx+WEdx-X7J-fGDp8jE5wAoy2cfoib zR(OSP?YK9FQUD{DUejs`{xE?BSZ6gsw(?5jhHS!MzO~pL^^+{WsL;*~w~}=X4)u^J z%w^83%Y{(dTW4KrSh;A~F|C?)bTG}=sb8Y;aUWXqgZ$=Hqw3_!=p5{*P!5AjeJt69 z8sJ{!Ga<=r=hs42EHqdM#2DswS99eOkBCoqaE+EuvkNPI?BCEkT}~3+eJ+D%2YVSxTrSTT3zMY)d(Brs|2X+R&k>#oMD7DjoySikUbpy3?6m(al?C0AGU`BGNfGNd6dp8%cvze5_A5hZ?`i%cdXvi?Yle3i+OvIChIt) z@tWW_PEe0d!5qPL8+C$h9~Yl6A21yY|L9_AM_sS$yUkPF2Sj127xIew5hIZb#E(l8 zgRMG%_~t=!P)JRF(@dRZ1AX47Jd+$PHDTSO+Jk8xtaqK(0SspY9jzn9q(qG{!C3yG zvopyX|4@gZL7ILv{Rjn;dKTH|$0q^j-})9VTxCUZGvRI_j%Tr_5tu=pHK;L{=*UhK z6duwzZ~F)O=IbRTwgu1e^t4r2QJ+b3u#x98?gFDPHhMG|R-%9{D7v-xB7%)zI>V|85NQ+*5_wADn-r^GGz&)czu-DixE zPg%vE*B=VMS?oM6;U^uwUWMv56`l+1M9asFVsiEZcjcRSQbQXD9%vuz_bKICk;-!| z!u#Wg;r;+$6ZAwk7tKq*&~@&h{M~`89wtKwRi@#G=GVi&Q+0y8_SOG(D&&5TUzLvG zc;y9yR=nyWFuJE;{cwzT5d4X|#{!nkQ%Gelg7QsW-gyxnsRz?XSG*=zjJ*^6*7vi| zbWdi=dZIwEdU6&R9%Tx;;vf9-k&cw`##h}3iB))QmbExduHP*0DZYGyoI_Ck%Vy6F zBnnyop5r(q>k!_#AVAr<_1TtkovrX-`OPuB<@&((?(Q& z^l*(I>ce5mb_Z(T~29e;}=~&U33=bYWKR7-J!>R?#C&Sh$sT;DzVZ zx(!aWINw#k|_0025sl>&S!z*?ik{jI6rj#%9R7YT?=mIKga4q1PY>G=PV^xknvzW@7o zd%PWGh67CeRM+k4!^g0(s0PZ3H{idWvEQvm!vdj z+=_JQd);?+Ss4n;FAY+@I%V!b!_s>O#z~X5T;eL-wewlFAim(~Ja~_} z)O_B}(`7Ba$76K@)6v+06X8Bt)h|)gOzRqX`P?2FE}pj5Lt5h$rgz)6(1HRUsoWMa z9Joe9opkWVZrH!T*in+3dUUOVEQJ%V8u{m3IIHiPB4PxEm6m$tv%I4^OZ|`Xd9zFTxvITf01rBt^_mQ6F%eJF6Z!d}4D4Mb2&Y zT1;>nA3RyN*_N6#sw5Dy_64gjo*5?(@z zmZvvO6(0FenM@q}9))ETINvQ#-neL;+_}&!CIZIj25yV3P6wzzQ)(b2pT<<>6{HNJ z6$EYj?QuSY^hZsET3j}u2s?FC5!}ChubS{xOKk=gKCIL8(D^dw&!!4S-7f`FKONkY ziq@Cr@>+pZIX&XrY?IvZnU3JzhytTQ$eFSYaV-Cy?yb;5TID+&Er6~S!MHqe^1t6# z)&h&sG#eC`kGh0YvP^#aT=BBX*1Ac6V^bhWN4{}%eJ@tx6xm8mlZ)EuF z$h6EspyP0XmPRiUs4Zh3@#E32b4XcLThGrfTz%FpdO56m@`+tsohn>S2zl~z6?s?_ zn^gNybi`zS=;_`~yDz?fByab@k8>*XKS9t4W7dy*20Cl%IZs|DcCLzl$;^?Z z%@S7sq%)0Fd#nnZ5&+=rmb&kK3bv z+1tWli@Jos@Kkzbnf2TN7~E%#d4!A%+-~zOrZB(6;{ee-A88s_s+QwMc%+Og34T6or#&^ z5{w%+wNSB1a1IA7>HMkZ+tlcCq8z@MICMyHM-&CdSs%U8oL?yMt%sM zWQ-JH*{GnQBq>s|yRzSN4ir!v6(LfLP=6Jfs1*-#phEr>Ux{le`#t!R{3XKj*rY^3 zbNYh}|9{`pgD#^1#^H;pDHK2@qBh2Hc*>n**&}g15e#RnM%|BD7YXpDfwhSGZ&_Me zn=|CtzO5JpXqXj9qLg(Y{8|`ZS_@tDsc3{4 zu|B4}kIf1_Qz7>4qV^L=&pNi5m@JN&y)+Z1TpM%S46vN<%^T{L(z3?rd_Vr1swWF= zS_CI|bw$|B#DgWN z_#taZD4DKVhtn{RWIYe*hK6g{qDL>cNR}9uoTei@lMOO6#GQWW}XQziG(PpSeFHcRPTL^Lnj6jaVvouXQ*GP(@ zri#{{w2*S!8qc7=+rc!YCieiBA)aQcNjp^&lK}aK?x}foEz{}g-@`hS-Qs6dydMic z7L0aTuv3o+e-AEK%}Wf#_h#|Cgkp44eFrHP!XGA8-gF2(+?#8fT%cp8&;4-uZCe7t zN2=l_161qGToM$$O(z8nwmJIFoe4}zy5Wx>Cq3M0si%h^!z7$~3@wU2)hDRDpEy;-R=PPBypkb%yk%1PTdjJiL}fr$uWj3ny@WLvnuL_;h9cQ!_-bqX zETEgDX6_zWb^Bf$eW{_(#Rr)(2(%3x)?88SXFVmiu}peRrcOn)FC(`5z!fR1X1-EqQ*fnB|;skTQf73!fTC&JDRsrLvP7| zOBiAd%Ne@}NEYu#79p~uJ(A@6*~1S*ewWk~b9bh{841U2z)ue_aYog?KUkbk6AV}r z-#AowvyAVNYLl^TW+qtkTt8CFHQ;tI7oY!U^+ES>>eX*r0t)|VvOwGOG9BtWr2vb2 z5=S7K1kaIl4d1GmlP85=;eNh2 z|Bkk}{u8+qk?qBDW7o+*vE<*c7`9Xw-t-b**gh@v{IFm4tZ!3bOJg!nHdyYc^23$e znOvyqzU7&Jx%-uc2mz|$h8e1{m6;N1fC|(PlJ^VGrY!vog-8=T=Jm_d02sHW@SA*V zMu1>Z8D7S@ysrOgwIT?>H=f2$k_a^nbB%7focy=qpl@+nhqP+XamW(CJFUZ2CJ zHg&xGclFoDQy0!&m%g#0Xk1%|qxEiAUETH?Hd0kdcG-~7_1IQ_d2v>^x1~cqy(?6{ zL_#3JCwgf*XGhIA6vzHx%69vTt<{~$&pKk_e2E46ZBi%sTv#L6k80C3tDU>ICqaKF zco|UN5D#45bd`}`-j&Ra1!2n~VIXDV(IQn03s%MV(<9^@xLC*2b5zF!K_yWKbGV$ zjxPZm9UW%w?!lXg>~p%E8+N}zCwwyY6zwJ8C@8AOJ1#X{OhQh?q3}lTuQkXx?tro;4L94kdq90W{Tet588dcCw5sLxL1SvFRMYtaTZ~Eu z>yV#j*5Yqk%Teay`-UH6aZ>uw(vtjZL|d66-QBA26RH z>OK`Mxf?*BDmmcjxAM2jekh45$B7)N8TI9~Ppq+C06<;nVak)Y8>& zDf=OH>#JnMiQc$@mEsc@os}nfl`YuOX33PZt)NE=<-NN;M&*NB{C*Q_;gZVN4$m;% zcFy@qD^1=fv2^bo%%anVu2)t#NHo(}6*o7h&IBXEU_>A`>leF;FQoc!=IH-9xrG5No_H4fjH`2#XaR0@Ku+ z-5&9CzNP#?gVa}9%~T||b!AD)&Q08(G(v&Pmps5v+V?`cd!fCpp!JYF|MBoT23Q@U zjYNy3L@s1FQQW6O71F{n?(nzfJmxhg_5q3VI3FEB$7Q*8Io(ShSaE#_rd`_!of!=+ zJi?`_mHhz)dWcimd*w*x-5IG#P;@G>&l1AqPj#*h%DnF6bdHomzDGXqiT5y^fKk`a z+WUz#<7|9bnxDN=Z=xiHsmc)(z!VZ+ya(mwpk-dmR5?5m7t2>1PJdB16*CV6E2G=L zK!dl}@o^1SYNU*!Rdg6CwGF@NYM|Nw^GpDvt z&mjuVv6_LLGlE&QDr%LUKkMpTst2sZ+9qYnG$Y`b@Mn)A;8WgVji(AGPEHxD7bM_T zNG6+;M%KIaA|n;EneXFX{IDkXZ6m|}DV_EWzA?qfu_)!J+GiQ!A_6`paz@3<^e zBH4MdYa!2a5ov@h;y!r>xndB&pS5}B;(VQ^-pF~?SvUM-VF_gco4>%NPI~%mp?6Ph zAvOF|b;ar$Ng}kk9A!jp5E40h@KnG0qG|@B@e?cb=GX69K!j-kP-NP0AyO`zvoe#3 zHXp>fZsbUpktnQS0L<)>+wi#Q+i7VP7Eb@7+R}X+c_)|=7KaUf8~lLjEBRIRQu^bC zja7h(LSZIxAV7b^$X{Jk{7G(M$nsB9<`XYWe||o+=m~>W)e)W+%iF!(xgFmy$n>yI zp@$X_<+7(P;G&w;uh%=>|gQzhx4!!X{O?{q!c5;cW zE67cPn-XYd3P_Gd@i7i>>BssHG)-M~StQ z{kSSp0Oh%?beILBD&t9I{1j|Ex#RjIC*R-J&3yFPQ>VfCyn0> zwPPX2>%&Rvhx@d3N^2u84h*cereDWAh$-CN^$ho)08-MnT)i;!$^U96bdG;r^6iB|jHq0`}-H7Tb&T5f!huh4QjYg1Fi_wkw~ zHJ=a!nE&{V;O-AvjywWvOP0RWcy}^QiotJ@@d(& zMqau@zbb;<(H%NpqTil>ui5@3xs#4;>Qpo0s|vnDi=W^}#uh;r;uy;CNAfsBTZEkS zlQ$d5cGGVk$mEQO=sXO~Z%i0+2i{$z<)HjY!wJi34<`mAo?tu}`!`7Xy^9#wJgEgO z69|tO1(7tLov$Q}z9r`<)=Y!taM?!D^yhuhu__SD4O84^P`;=X{uX_=aBa>ldSx z3o&{uUVG<$CGF8-5<%6$8(K6A|5M=sL<9Kyl0&#?W+v?xw?S?TBu- z3q|D2>%ZM&sbbSpCcUYg-}>P&<;KBzq&*1UGrtimof-S$HL;av-Gq8Fc_FTqUZ6(i2_P8_C1UUP-K^OUwbm;bLCqWnQ{~e<3mToge zx9uj32p=9vINsV(ZwvwA;Q8kC53(P)z$%TeF-_bA=gU$7?!o@WgL^{)( z&Zc@2cokiOW69kMEdu$c;}JmHF>n?G3LJ!8ozJ!?B!P=qxs5ESP*WE(Mw=v4lrn{H z&!nAD5aZLeSCl8Ydv3OyBH6!dKqN=qx1r11TBkPnU4v+=)gX`S1d!R4@>m>u#y1@j z>e8Dts=9^S6$qJ^V1DAp1NAI}Bfz-^d{u2mz$ra0f4lp0^KWG1@PMuOUsnsbGqLkyS9A~V86U@^R+u|K|!*gcw1xC zx=ecqnM@)!CvBT9pRhpGv;1|Y17wb~>Uw(R;DIBe$iud9317`-gIfOJz2&B#3s5a1 z`aM=W0PV*oPyM@}z9_je@!O649|QT_)k*qq}s@_5_RT)n)I5YE=h z7g=6afw=}0;zZS3bJm%`l3qqqkU!G=s+z8=Vsrj3wvT~B&j*LirY^O`|6pSA>iB5P zvZ-;3G(0*byoOB-^2_$Og-XW^HL^n^{U14WG`DxMz`nVLqOM+CEs%mu1>p>WCw(6X z;7~BBnJ!jv)pk#DjQR?SR+&6zfP$^skM8&9i~Jnn=78fpmx#u>E+|E}g<6@U!YA(E zqCB5p$HX`;oj<_l^kF}*8pZ^x%&2|lDYW{oOD=*qEq?j->}OMxwU1W-y8oVvad?d= zwU~Cb-ZdyIu!wjjm@u@+PO24Kr4^qH1jHc!ax^_l$)`L%CcilQ{$;hf^Cdk+1;e1d zgpNDCxAVy3gkte{LBygU!bhCrPXVjpRevyDnxDR0* z9-#mPDxCMke)syM`)&CH===J)h9mjX+WqGY@;~=pKkt6>;q7ulYpBPuHzEe#7qQfN z@?aJwEF-8?xny4K_(bSYl?e1UC7<=ir+sLUy=M1qO-2&8eDPm3Ytl}!XhJ~e-Ds<| z>F5mKX}}fdFT+3S@l6LyO_maAQ3lFLF*sgf^wA%i1S##uO~R0RUZ9a<4%sRmsmZ4+ecerzWqY^s4zPJGF5B?n z7Ppa@{S%@x&&fsCvzk%QRz(}r2AZtjCOCx`b5-N{RdZEZnNkPV^n5pD(>IUu36*A{ zV8fQFp%wuI# zq$xw>}_;Gs;8t?tGF+N>Ll%|$N%^1-;Tr)W@cG4|Mar|OfwbgMO$^4|B zlhyDYsb{t#8Y=&uPMi*E-g2-qQ)Q)r3CqKLn1BGf|JTsr*08!U-1pz4!Dx2y!&ApT z+XPm~J32-?2@4Ag-M4xt{GaOm`!}8%#GSI~YR0WBN6;r^`J_hu)y|0@J$g;kt-Z8t zEMd!L*|90pl60uXMXq$t@R7t`p+ujGN-VS%#1Csn8+hccX?eF#_rEn~4QjdxXx2C5 zNT}{wO!Rlm&x6m6b?T&u+QveU?9$OwuYqT2a_SNuHLMX-+L+);)vC^tCsX^V4E6BL zd1c$g-zvVUL}*&=*#*VCQ9s?9Vk*0KBVkY5=>VGX9lelsXgEw~>(K0XFAQapF=^F2(F@Vk9_&Wx{r=^#TLGw;_^#Yc)5 zFzwU9W&=%bqE>soyo@gGgDf)5@QTe2HV*=O_$;;-IzejH%BjKbe~e2n!qDznpT|;niJG;yQRyV4P$!y|m`- z?)yQV-+v{;pVgugvlNs)pOztZAo?8Up=xr^&HY+MMaAp;H?Cg2dRgeda z&f{y!=l^#|nYWXJv0_&cgTZR{t*Rm%Fzc`bLRl_)P7@RDws`b#t9PcDeW*H+r~MmL zjYU$I>VP%05*Q^wVA=As4~D6N+@u`62FdQ~e_&F!t(ns!VkUbwMT)m-;}ua66AM=` zPzy;;gE~>D<5p)cx|J~1WCVQH*Czi9?+l8+cO{q_RkH`NhXE~Ut@~X9JV`FvVRS;F zjwt*m1=AaDV9lRaShPHMyyZ->(vCemD@Oe@-^2n(Bcf9E5tjVrI^X3sbT0?iqyaqT zH@!$`Fs{e@h1B+^G1T{UDW5!eow=e9H(#6E<#BAk@lw8Q zPF@x*82Q68#8-<<6>Z80iynj=)ayYWM~@KPy2gW2ie(USo3r{iWqV5~OVs8rLJ4S& z*o*}sKR}z=gbdG^qr$s4A?MyNUbNwUd*ye+@%|zDkz|bUp!~qEoU}5=DBt-ZU5GhNlzB9iZ=@{vKAR8zRD;N*U49FqcuLa`vz5+L;HKSY@>r`m~0c@-+m0?H|FU*kfP8ibc_3y{8>+9)AXH0RnM1NZ6_QelR z!h^@PGH25*76GUeFma~czN@Y16pi9`j7f#IoloedDJ0|{@UW%0Wh{<70d$wrb7$b~ zj+M9becM@IFiy?7`Mo^ptAe8YLQu)zWR(4~!EokD-8ZIR>h7#&8%}2qOlS>_Z|*px zwsX#mzx11yIdU+`vi#H+#FQ-E)0rSl#yA#O)g;-=)9@4DGAb?+Pkgx(b@Gj+lk}@` zN8Zmc6=snI7qM`kg_Dj`{;Oa<**M=Kk&$5_N=w~buXCfnOQU1nzkgpWZF4ou?BoRd zDp{#y?(6sGckSGd4)|TvSQ>8MExS=-mD!wrt&~650UXN!Sj!)3tdGoEkBsWUIVTvr zyjCiW^Yw-kuw{xqs+iMl%m${2(G_rT5F7#L)_>m0^6G#5eSMODbgE|cdQOa$=Y@DG zU*XYrT)1)qAjJxtvs50_?*{mk;`kr<5`JyvbsM3`x1lT>1w8_?Wcw)fU{u-ua! zFSj=Pw{U-OXi>M9SPd#QEK`f`u-^s2MAL-@+Z2*Ym}hXv}<(E^h z{yMdP@?DIf`t5{o#A>KWMlmNixZ`oz+~&*|wWkBg$+@dH#B9Jy?a9?|?tgcvo-?^^ zrLSZ7?b!DukN!ELRlwgxK0Y24u^cIAm6}FQkrA>5I7X$L`|I>04`=)XU^T`AjQcpE2oyvMRiAbOL^R%CbO3^tD5fjgadqnA&cq39!q`>qz3o_C)=@WX7 zJs#IPt-TY$9EPKLRkyBHt$jc%kRhPjYe5{d5Q2jI0sYwqy81)@P7m%d*6u%{K`IrS z%Dq~n2Q!D(ZLw>kEA`m@(#qOkE!DCWd9d70dY^?v1-|RW14IA2CSzr8(>Pbjy z(SsR)Y@JUnCZUJomLo6zyulRiXpE#^jxs(n3n* zufE*F2pfJ8a-x0a0d9byE!N3(aat_<-?Yz6?)@KMrjo87bNm|}qrG|Yuqs6;lc?{g zrAG!b-W+u+d0e8@z~d70w~fA6T_gR&P})MRV1K%O3b!f+E(ra{iqIi%YYvvU1BlV@ z*O#-`SR!3azS}?C---`bDU2oJL2A<}sk+mflOF$d3n3aech>ADxK)0I5pbwv?S?uI zkA2=YeNfrhr5O_Hb93lLTal8kY#9B=7q_(Anat?$;!c+zw`U@(*O{-|HXOCh z!@Ga){ee~vEWjE$VcFfjYbgP^UjM|YW=!cPZ_GTJ$0D8u&iIm9`4yQ-Y?) zGTO=TY;twy|CMsRh&-Q!CDQ%dj=L@d*NMlq5XZOIRi0QLiVak9GR)|zznhGbgP*-{ z=`nwhj8GIg0EprB|yX?=`X}laMlQf#HBZ^^cJKOcT`%47ZB8mfBUJ!PJJxcmgulH;o zi+uC`arP>XsA3cv$w#OF*+QX+v{dFvkK!bm{1&FWv7teyD1DRFS=_|)7Py{drbN-XuyTd9URigvdT*c3h?fQkSwc2rR$e8N&CRPf0m>NN7GM;%J#eG zAaqKc;5`XSN)L%7nkIt~Zai_IoRU&ibLCR< zzky9|g9TjpE35@QDk1+%?UlKf#ibPrJTjXgQb&s0_GI?+xjsF-5cGbYscmH6vfjaD z^Ao>EX30SW}#zWH8p!aB@HLS!(|x__5Kz<#C5zcR~tL^`N)|8t#9|<|FZab zinpN-ueP~*^^&RTs$li!V+lhE9Y%zZ`S) zE9o`gw~sYnbDe~Re%;O7{?_wd!cp#H9?N{^yc=({GNrt5u_W(ZyY-idf)d8{th&qu z8)!w*g+BkV^z~Oq*MT6qzSnxA;rb+?V3QZ14@`S@miphNMKj=}ypanGi`s>^BVO6T z+D_sB9v?Lp7f9Z4>$dgd*ApvsXRu+CiPM^njEDNO-r!nIlpw&6h;VIVTxO@x)?mDT z9qgKd=U8C@Efsg9`%#}|zzM^d3F1b3Ub9tbr$p6IW3M(Ct*^%Vt^(T=PyFAZnk+)m z!V;)kvbk!obA#zudVIWRRBIK~f9+D=!|5PI;V0Wv5iQoV=j=?5Jjd)JSMP2gRUMvB zSPJo}lNQpMh#MP#|3O6=Bh_Qfr?zfB)`AikA(88*vZ&zl7RH)dRr)e;EkwIeG zo-3WZH@z^X=RfD#T#a))^!|PFJI7!5Kbnc$Gsx)ChHfcrtaW^-f71^=UI<7&Y0dR4oFQ`prkj4!RMhz zpTT-gZ8tyMe86Rtq=!b^-pG*JjXcgs;h57(1aS3DJ(_OmrKUkTX5sLIvMsn;HQ@ZO zJ$<^g)60R4f_K@ol2n&I$Zz{9_p06({wq>7roE8qSPwRe03PrR;GK<%JbSc1a|ToV zqd6i6JAYKw=n^oSCAHS;3w056Myc%<`I5d$m zbd?S{sEHz}c_TB>*~=@M01s5-R#X;SpPi+S&;#Zq@Pq_G>s%v6^S+S!_6AZ~eTKNU zJDi(Spd}V58HlRHH&8r=geSQ?{?bS&vum-^@ZSTbBs_Il5*buFqM@n3=6?k(avmh^ z344t-Y#S41sJMIUg54ye{oZ`1Yo?HZt+dM0~8UU8BCXco@|cN)LtM zB3E7EXf#N&Dg{dV;jeAH>+Vok6RP{WgjE?I*bl{Vr?};*gtCb%nZxdD=G~fmul(mT z2i?Ga-!;)xs+CSoX9%>^NJUk8=Pqc(hk1)X+i?EnjCDnBe+XopYKfizj@}MeEpPbK zxbqVLS#)@juTSd(d8a@koY5LC?5&Drqng8VH7*N##gnk=rtT8hBmfF5jo!_FbTOK$ z2D2u1yBH)0XElvB;s`VkGn3!~Y*7Fc|Ewim44e3xjGg@HGa<*kB5Sxd($pH=rqLP{ zSg(mDXqsT@&E8o!vGVqGHaTB^H455Q#s`L?8gN?^e5Bq8D5A|rT*u&)Y|b4S`fRSB z(;cP1GC#}moc=uinB;Wh<7?+}&@+;YI(7*&S`U6kT(XGk+EiC@=R`=J{{H_ONOP|nbiUcny}C%x zOgbF##`)4$CBp}$fsGbCICk5Z6vK=v$wv%g5AICsdv9n$|7D3hD70-u$}OrkooQ$p zpq*`Ni(j@@S!qr`+1qLqIUXv$pUQ|$qpO7PC2b)=PIaayIa|U9_nw9L^?4F-gErx| zp?}K5QTcxD>kaeaDTB|KY}J%nhCseMrgsN?f>mHVC=t-_{=qa3(F9Xlv~xIr44~Gj zb9pSIAm;DPbOw4(Tn^V7HPnGaU{(mC*RCJ4YskaZ_Ph5;J_?yIyv4Z~>bHG*eIX28 zzc+uvYz=bpSk^-||1!{r#lpXh$x}1N@lq~YBBSx_-{SR?AA-0Kjvmh<4$MFZk5tt- zq31_{Y753{;``a$*|m#tWA%+sYNC8#v`k!vaNo`o{-L39T!eU|AB8Jzn?+qQ{BkM5 zBmN0?jlToAmLiMnVDSV95V>Mi{(Cbp7&Z@|QKWjei!;S<0d8yU@==`woD^sHodBh2 zP3`^f(ECk)*r`;@*!@Zsvhbbno^PaH{Z`8RzbVp$k4I`6UVjokd!5`{98pu6ud*k$ z)_}gFB@_t*$ffR)+Ehu4QIH2&vB&P7)lbmP?DyH!qZ{|uZy45DS@#7pzfRTNbE2~6 zn(J+Nc19;*L*ks`u#G%L_r-%0vca6vq?0*WNU7|#?6<~eTc!c!vVA3OdXfF!RPYu~ z)1fe8N(;pkvj4$kJp;h;`6qc-sezc5kc(4kVa7}s z^v~;jD=d%c1_zTm__AUYR-T|A?%C?~2lX-@=!nS8ik;#ly8d=oGF zRR7Il6`sZ&l(DW?uPy5~n(r^wbyO8UG6)Ffii4x&B2HdfR$j08-t)=-# zKdku76~F3SX5+$V#^mBk`!L8NK*9SHn749+gB!0=ZJTT+SpI&kex;RK*^_MV?xC^) z)8PrB+-%Kyue~jgE#1;mY_9B8Jmkbt`N$rjCS$WD@rN(#-}GS~KU-UqLT0VVaQlcPWk13_@@cszo2n$nvL`34ba9 zdq4ArzFWW`jQ4t)HVPP((QnK2gYgJYUW|E3SlCf1*Z$}KRCEJ7>WK8i|W_HfFDQ@u22$+p{So?PxS z2UL6GPJehWE_4=JoU^Xbx%j4tep)@8#VX=^*yp~cpZb<~i_9Q$ zc0nk4kz2nx=3TV&V9;l&VIZll93YQ6aW0}C@sv8GljE*sbcIK0eYuc5Z4H;KYxB!d zE%FyJF6%{vDE7D94j|F^FcHd;ZU6Ov9N+ zZMH@hUNv>doXIERZaXkx&R9%uDj^ zXoCwI6xRN~LtH|r*3hiM@mJ+fy1p%Xm!^n)_iJT?NIT2=YStcc`;{7;@!9(Vj=!8F z|NCWmB93wP;Wb{SBP{ww>iH`}rz1}x0)D^Q8E|=>&)aGp@;Hij(5nQi;Yp#H!Rb|c)pX6Oxx%lUY1-?&@^18t9fAhG}fBJiB zJu7NNcDzuCuWXG!1IL?wb8XtY(L2Rj6Df+hClnA=7~TGUaC|9_JIV@QWJTAxweq#) z{&a^hnD$iK$)j)~FdOM3C^#Vv_M!C}-t^G#zu*y?(zX8m6}&wJhT$Y1u$AHHj&88p&EEo9{l1sF7k=USs-~epMj~$S ziWY7PySfHT+8MmK&Iz1@0w=a=E*9|Hv4;%C=**(2QKd-+K z>QYPsB~{`+t1i9#XI$qD5LIF>zHRU0hXK3KW9;R2?ywP`Cw*({;uh7?=zDftd}Pi4 z4zVE35uYb-zhBb}?N=jt*S1PopY!6ByvSgbs;Nz;pEcl|v!jydzo$MHtSO=&yV$Gj zd~?jZF>!s7mlW^lVM;yQZG(Wf`e*Cd%Y7d1KE)TuA@ibQgT4{x?1P}G;*|k%0Q;}! zn?VyXoClA@{>%@$fTo0eEPmU*ht?9BhDZ6#2DE;ZKCb!v@z>G0>TZWTi|SCpdmg|k zlb9Kh?WM(qM;%K|F7DERkha1jvhztkbuxh+-tbo>}bi~nCv;sVrM0XY&{$M9+Q7JdHfk`uOz@nB;b6q&ZP_$xD|hdnX>Z$inKqI7 z{97$ey>64C!sN9*80f$L#RfcYSpCh8$-6J74=0eT6h)a-xv&c7OC!;lZ8}Yx@AA){ zW=g)wPAgi_qtqt2;OIl6F8#+T418ifC8t_sm1Oudbmh(x(?yhn)^Tz2GUN z1T0lb^qlipe0}wUjCo{Cd0Z)|zl?du6&myvK!;wL|qVRJ#Fk^YRLjXV7h*M4)O zu|1PwFzJX&@UmY#Q2_43lx|%XojFNKZJ;kKL=Jl_%<0&;%C`TeZ@bdH#lU=@cz}Fp zS7LbtmJL8^sOHP?Y4JKXKMlUleL(_bnGwEg?x^hL6t)AmtO`@O%xU!Lugui?h~K}; zdd0M5GP^z2;BKjDjlEr(cbMKtBLil5pXluYHC2XIfJ`p4g6icTc0Ieb&tIm~Wd@>} zN4n<&d_Z#IsLjgaW7rB2;-`VlEk3wY^0@3%07%@U4P6ZX#&N6w;@aU&(E)Cs{V{|T zBc+UFqVeYnY*_iCcRx&w7KOHuuIXbG&oE2IYvG{P5ONc!k&|P9QZv>&!rU{0nhy-Q zh&6KWP4Dz)59{s#L}E6sL{v}uHfDzS24|e)WlD3^bK}x4J5G+h`OXtlMvF$8Th5TO z?o#`24pCaU6w%8+%WmJ!)u7JJKH2>DA)VbR2Eap%=$O0wS4qGJQp~; zm;<-uoJ*)s`*vNa<@G7a7W2eh8@1QGTgmu};idI2hs<>yo_M}9edlxg|6N+?pCLz& z;QUoiS6qsGb_~jlZ3CcB^rzvAf=;8>*--;XecB_Q(`$+kL>Y8je8QB{B>?$1*6h8# z@i;vJ)Pi>e5p+)u3Y|{ZQpObZ32Yrkn5Okb7;dZVtX6ESqM882rPDnucV9?F!aA(! z!3hJ7ssOF_(2IP=^0ofSGhyd(EO@&)kHgt;DjQxNd7MQ@Pg-*UH4^sCb1cn#Ew9b0 zI!z*fs+zblbUzF8abVS_Z>2KZi2U;OhDH(g4|RIEv0FTe>gU(XaL=qv8yzS8lZ@J6bf8RuX>4=$URbTe`IdgR_ zXNG6@W0KK01M*S51R5{eS3QJMRs2?@^ic?od7O5U)TQwLX%+EYS+r52<+#UE#49Nq z#9@z5GR}&cJ|=200Xgs1`Y16BMqhvQ+FB3-<2wpogZ{Ro|C|>DNyxW17dGytk?~-^ zAy?fk2+}V)uefm+1G<9n%b2Ragi8q`dACm0msAhU8uw?F{;=&e86^?V{x=BJ6zZHb z3{+)Ycvp+?PS>pINUPNyje6-Z_9+zQ;Y`5CiH8oX0-W7 zs73}$E`7?r)(JW-pMA&d-n&Qn<4NhdkJb+97)^2P0v_WjwubZu?P8)g*5913o5LEI z2gvRvKKs!|3k3)l&!?Zjfn2sGfL#Bvt1^vkizk4PviXRos}pfgS9*my&?4@)jjL$7 z5iln7%Pa^*P|)dufME&Z2JDM zI(*PLw!0yExVM6}@OIwwP&>toJ8tl{qN|CJVm#E|Tn7q5zL0*cRHVjPWPC$?I+c6e zD&!Qw;LQDK+kwE>7nDC&+uo3Nnj$^-SUvlfwLhY5i)6Lk?r*G=&riCs*8Klb_trse zwcVpAH44QF6bV|~0|fWtTHLijfZ_!A3hqvD2~gZMSc^;0;93ZU;uK1QmA*Ic_kQ2+ z&i$Qp=gvKI{yCZ1Gh5c$&syu5Jo{OWSI06Ycyz;+<<@W&kb=#s$G1rDx$N^)>c>Gm zzvY`6m$FVY$e4!CY3i1yp#AP~nx(vI&e+5>koe;xFsz8vKC|}2PicvuhW&|+Nubb^ zg$1g`Juy6T{(w-`w&OAQ0#US7vrP_q9Th|e0k)?_*E2>jIAljX@v@6uW=jJ!zK+mb zTFD=2)0jw({Cez5&Cv3Bc&C;vExkB)^Vo6(NUio+pxJ)T$}T9K zAB4{{Q0DUfsjx`5P+!z#GHSS@EGUgJ_y|x*(R)Sw*zIb0aM_f_bu3~I_fa8D#1bJ` z*muIUv-w@qa@{IZ?|2h`@^m(-+eCfv86@H?(>#8$T~8~3nmHZb58}d~_ltY;7nWwI zPUf}NtAfK%OZD|??`x=Q#nR*+Vsww0_!nzOeJI9dW6Q(nMT7{NB3kxr-)uS?GSti}C^Om#fk~y8};IKD(bTwPAP~S3)AV`hie%0R#FCnmEJuI2g4}i#NEkWqd!yitV_XWgnj^5p0drb1oFi|mdQ_I-Re1P^g{$0 zsc$L#jI!}e;yZdo*e8M|mB#G8yxs1|4N)2)dXc5BVawLP(#w5X@95|-hBOwwGgyJN zFCQMVy;-910H{=H17Dl=82Ss1Wx%aJT>XfoQku-IkGkMvsKT$zS1$K^G6 zDZ3o7oMx&9*|+*IX8MtgDU=K;&6)fz0ls>5S*HCgHmuSEce)CwFdO9N>5TQ-iHGc# zNSO&Bn_s>DYT;d@rx4eEms2#1xd;`OLE%u7`1(@Q58@A_!zg$Kb)4ZPYQD1}fuxG- zA5WhIy#cP4f5bC2$GG^lVW-MlLQ*)!g*P$+h^&sfCf~r_xIBa?W^t}Q((tNv;mg?+ z`iLprjq|7ct+v`^A^rVpA2+K`Fk8k=5Io(;-Sz^2gVi<`HB@o@e2G7@qkr?M*!bHg z8a0pAkkq#{+ne|XukrEZ3YP7SEtsi(<${Mj<9*xl$6W}96lZ;XBKh%NL4~ybctU>jfa|2uF}Ow({c?h41`Vp4NgK<+1oewYhAG&PShc9Aa^3 zxhLP7sfr~miD@5UH2voqh$MM>mxxHT5kIC(Rd!k1r~`u5EBHGc*%P+vg|M{I9V(8%;AjMcY;v4*Htq$tPLHEuxjEBH)V8x@3^yP|(hrmwxR$(MxEA2#b=yt?fa|0G;7{EYB{ z*e_-HqjenFDohBz(QHWQ!`Q*Uu*Po+tl%ASKrf?aL1C2Lo!xpIy1E>Cgj|=VAy^S` zhAzoJbq#v|dEG|>K^&WO#XP~ZN+0&Xu3)riJW6bJ-iUO~3hnU2W>Z-#OZ_{4=boH3 zd7=MayFpNnw?9ujRY(FM+@7|kX(8?F_D+vo?Kd%H&-q#W)66xEJpp2fz>l0M@$Uq5 zWi(6e{`Vg_*bqYXaVOi)cid$lO-r6cV;TZ_n0!wYE^0hVB_bDlHBIFe75FUhYr^!2 z@dMYire?a)p~8?Am-zaQ$@PuCL}%BX=C;T`W;~3n%>X-B0Z$6eMzH@mgdCgAbd0Fx z=gM=poHuqG)t1?wST41}@j0s5ye@{RfcTZQYtwpxpA#Hf2O!|Zi*sQPZ}F3 zD0*32kv+ABQ*=isZoG^xJT-LRKDn4#yT{Uw7-2RMi>-?DibK8`$}g7Hhy~gFuyEOx z^i0-fVlS(_-!n1bwRiaHNR@?K9_DCM%oX2_X&oxto!&-*T z$lk@7wHPj?=65J3xcI)JM+fVYg`^^?&(!<$trE~fZK3TtDi@F`=3@Gb z``Ft1!|-rWdqtm#LvFXPk?JJE&#`RdkzJ+QPxp#p0CsyKkNADH68y1nQqxnSptLTy z&xGMAm|ulSpPT}}u{~o}WF-YTLOHO4%L@dZ3?g1kj`UhQ3=YqU)Oho4+0%6}U+ayp z@9R|b+eDE^d>sY1BB}+Q@MtFyOvYM7^RS|=V4EQ|K1~XJbJ9W^4qDQe?j$Evi@I?D zx+*4P+z`>ZnRyLpoxXE(8o%9i#xHRDHM`X~84REB;&J0cjyQ)T$GW6v(kZIo{cDpu?VDU9*HpO>}%eA|7sYR(Y8;%q47k^e)|= zfFkhBVw(^k zfx@xe@zpG;eh8^ean*wt)Cv?GxV3?L^-eG;Er{iUcJz~dVirtV(r)lHbBhEME~W#} z<9csjw9dp$p<$bm92?x9(Sus5wr6c34J|fQYkeG2>H;AVR#_g7jNaVqkUIT#Q=GSb z!DmNlm(en_C@F!a+-I3GKsX*)o+Eppc=GVExNQ8TN_#Dp=KN z)oscC`zi)M9U^_%*H0eJ_*4}cSm{eW*FOviysaXsRCullBYC2&^J`!3ME1jDqpC;6 zIORQ+r9S0yjz36<1r4emr+;URF^2qWnbU^2sV4F!BqU-F1@ODL^OAP=ocAx!olo0b z(OA6mceVMl4hjmY!~_e(cT8QQh$+ZkB(ls#ZzK=PzKh6pD0~o7_Z4(OJ&D{T@x5z; zCNN1jf9-ER}|K? z+~0gdV6UxNm~D(sy5aI?@q9EKb7zK#Wp#7IMQr@Hhy^ydfG3G!OK*nx;p8LO%0S?n zoyR_S{8V>%B2vk$QbLwHym5*#DTLJ396h|bD;Tjgc%zsJM1hY<2ci5A-GDT#B{|FL zI}z;<622Ux(Y_K_-2As_QX#Ukh>YbN833ez!~$ctkFCPE*A|`{mE`pViW*=k%NfXF z_i+3$(DtdSHPAND*FR9SQeJo%<@q?BfY<{kuvm89n>89fgiX?g^>=vS+X`YS9K~A2 zFF+J(^ALNknCrQ-&riwnQyK21q35Z58dKg{=^%wAQK|Na^KG`?WBA&yn7qf;{da-E zp!L;{EB5X+5e4(9(S|5&yovad%}|M+EHBfdcvS+jnXc*79f7f<%D6Ak9)F7 z;>Uy$vps9;7+Cl&FQRqhY?bUkaAUQ%tpl5KC(%KLgJ)nEZ^>_>)#_~c@T^2jtVqS4Hu&V0v)WG0X z-Ik72=w|{QcKEXl%|gq1yqS6z;DVYrEs-e(0`nhx@<20P-@vjGS?^gPnmRfeLX}kI zh57b)4)nIa6Bxsn30*-boK{b+r^H7z179RLG+r6bv_fYm1y&B1LUZFHTeqh^r~$AN zy*5_7o;5dQPXuq*eV9zto}P1DBO%7grg1^5L?pC+s15iFt2D~bmt8uw}Ki4X#>3qhmQli1d^E`5K!PTR7K{(3^OVoum)n_j?yiGf`oI2rN^YbI#_ zlb`mcXVlTKBN#g@mBRSU=O_FlyIEmtNA^-(NZZ#ja;>KErct3gR^2OUHy3%`ajkUO6n z3gvD~9D^ahp3)rUM*&9@e*GS59%CA;dts|?hW7{$<;+v(EMBG|V)b^z6Xz?dVqblG zgKLXNELpAC(uv z_xa+FLkj9_d`Pf?^3zqi2Spo>`rgIG5bf(gQ)5=x!&*=gX#H!H3`8bT9HG z*=*J9?h^-78(6t>QD>PY$V#e9p{A92=mh~h>6fNFV(qNW5YyiXf5_aTT>`<&*ZEbg zZE|QVsD>cds_R3hNX}5i`~VlUZ_KaE)Q%TZ*4_C-OkPav^jMAM=8**J&q_1b5g8x% zgm<*IwT&G$W(_lDHokgwmUQY}j3dczb#>_cHau2WF%kJ>_)5L-h&|o4y_+-|!bMLa z@<3X9W+j3bM)AzdKkLY>t0qVW(#>&0-InJYctyjsUZ1lZK*6`SzWY;TwsQxFPho@v zK*abExvD1$M%a3%z3coZjxNJsryau@gZ6a`htn{o0^d9CHkO|)#|2uf6xZV-sJ7zv<2o0fGG3Z@WyrybIqDufBa@$7cr3?29e=5%t!ay}n+4|2=yHiQLQdCl6#_=#Uv(|84yKsHC)=lu_uq zdeCf9hmA@gd@r_nbJa7p<$g$;D$c*g(t6f=WW*Q4?0uo`|6M3QBatKAYjkQGa$L;P zHTWZQAg0R>-0s48s6E1ep1psQJgICQ4If`~HR-5-JJ~b_pD<*-WnEJgjDYeF2=bvC zJrQD<-?ifE?K0_3YT9*f(Oi zOGn}I1Y$XkenzZ={g~>hoRQ~~z!Lqp6%?>b1Dxcl4_1o$&wvJkFaljFCI&eLx@WA> zopC*WBp;+OXQGTBU_vIcF@R=fEqZx$MMwl|JBU>jWjj?a@#`r17LdDrkz}6j(37N=OF2^rF zv{Ue{wlfWDuAVfT3jGnGkMC@r6nB&ES1GJD-LZu`@(-d1(X(|d)7~Z~Cya}iT^*n` znvnQ;%J0jvhP~T8ThEkF6*>4NHqJyOj9Y zXTjSvu<3vMeZ629X;OQ}>b0=sWxG0@I zV1t=YSm4C0L1-c;F=lfPg~>m9o8_A9$oLg|0&$tWMw|ANyqLC-Zpr+V93QgjV!S>2 zx-{4IGWNSjR+kuFP`SivQ1-DO$FcGLacvQ!UQ(>G>6+i0X2Z5cKWh9Y#bA1mx)XK$ z7AL;swcy~?-X&6@q4E{!WdR!F5u5L^T+lYr^Jd(H0ZrXsZGV2AnlYa)a%0jM+*}9$ z@cSlsUBoX}!5&axbtOxejds_e^kkpKwJsbN|H7NadREMtU){}FcL)o^QmPOnUJX-~ z6J#g$1bSA-!Wg3$d>5Wm6unBV+~;^jtcVFs##-#D?t)`KQ7|!Bd-W)mK)+n?CC=!F zDlgU7FCHhB!W#CVm=C^-Ut8DIi?Mpko$J<9f7;tu%|d8o=% z!v|7_P?veT@(Z(JYaF$icjB7UdiF65k58{_XeQb#ods^Hd;?E=E!3oDpKeeWBZIaP zsA!LmGg7Fv9`L#6u9ug^eO;`&)o*zN{jHi0|SrOA&a*E7wEO_U!lpeRWt2fj;a~;eGVw zu|*wj&jQEvYtIVKxK?e(hbK5KWXojY&((?^=mg2?#zr?VP(F)}tv66yucGh-Dv*Rd zf8$iIOrni58=}K84I6m2@q3!pk0x!$D_eqQMBT;h`G>ktxvd~Qr5kETQd_S&d}gUe zap&Q|J4aKK)x7~Cud@8MACanOqN{=m7N=@A`BWpDZ8+~i>jM(N(rAk6(V-R{i470C z?g6WJe_=I^XzYW?m!#L>xMlpxF4T~mK@{(?Fs<9k61gaTYTT0`24pK2+%qV5jjWf= z5MXF}*ny_j5DMpXK})LuMYvsRa^Ho0mAHIA2DeB(3FGznBO3e1@oK`k0d8hCb7PFk zLS8J+PC)|N`!?3wq~w{H03&+jwk;AkIYMi~LbK88d(rC26DzO4tH+^pOg<0nr@PEg zxEuK71jN>sELPXX6(Hgp=i8?a+DD`cN3@}{i^!uFJ-x$b8=Ze)Q8M^S7&nj82B{_* zx$yO_1}gx2dzbIRd5By>c@(Z&3&#aiV(Vte9w=5fV(aL}&1yfkjIMe>UWBYvR4byG zrCP$6JaMq331=!O-dYwLJer3;ePW>aXSV{Y`g~SLY_kZf0{6tC>V5EwSKbWI^ADqq zt!}IY5NW~^M*+;)s_a3hZr{9Ya=(VF_0flrg$-&X9Dm51OohH~Z^<7P?qhnoW@-zt zrY=wV+RoA>Ox5`La?U=Oi-tD9fmw0#ZM+3_8j0&~zvN9>W?$3~bF(8ONR!#!xnt9p z(df)J2>QuE@U(^CR~u;pQGeHTf*0d1tWAY%(D{w|4`RN3z7dj^}d!x>=%jr-{VG;g~yT zvWta1Ng%&`-6f65x^bHMw$2SATIwPBx*qy6)IY%Nrj1j*0KZDd-iEyUgsaYObK%pG>Xm z?rWwS^q#K;Oj^AyC3p7Pm)@~Y`dl9Txqn!7$)3a7O=jv{uT^#HT}u|Y7zZ~n+JbqR zhtX&!kUx3#Tu82}`W_S9`xjPdK?UP0*)QdRMKitC`$=k-uzFq61Y#+Vk$)vOcL{Zy zjF88$4fPtX+U}&X$&ILDMMsJud)h8cJH^(L?b0=`io7+&h+N=v@Uv8{*(Y6$IvvBd z-NJmS_QY$r`68_z-z7Kd+wc5OUB3zjsQ+MrC)8`NDPfB>egg(f)1O$ao>GMFwhg^o z703o`JSm&}Su0cx72nYZ>*R~`x~)C2btb=i;bnXCjtCp$WP!tj7iWDzt4}4@-gt@- z50!#P?V%Z7MRUA$kHzs`*_S27Z#Yu6b<(Pnksf>a@9XiQU%MFcx1gwTqy3=DPdfml zwX+;OeeF6=$)$ErQfj1U65_qnv|5~NN7d^G>|zslMDX%~Ml>V#P0L*!8$l7Vn5cz6 zU!|2rs88L|Tv5Yz9q@uXW0xeVjLDAnK?`BAOTW$5WyF#Hk99}C(0YDtgb4Mz^xnut zJhEPXF<+vhS|1?C*+a2lppQ`*%ZT;&jXZf_L{$~+qTUa%DrcAuE499q>sCC!X0KMX z{D2h=0~#pIijBs?#H~ML>|kQ%S!l^ee^ADCw9FPT2WT6Gtibx-dUWaC0lw(9g)6Cz z?F;<74}sLHNKC&J$6qBpFNgT26NP)y`Xm%)5KMKnPDog1N`2Gg7LCPRYGKNk+0(WX zYl2Xgy00~EzSQ_yWSx^?>aFc;PwZYO+4r+d%Bug)s5w_|Fyps!K~K7!wU|m+McK7y zZsh#o1RR6>5jd`G)n@)hUwQJdzv9J9*s=suCp2f&jk#C|8v@K+`Mf>WlmYMdiSM-4 z3A!}22NyFpv|k~&gHmEQ+Np&liH`7tZ}8&+!_iq?+`*M3}SU!-2tJY=u&UZocDURS##+m#%&g69Xm`7EZU- zm6wI=diboWs#Vs&sMa8q-m=i(0NKN}n6D+_2S}-t2SscSD8W zKOF*edPSyU_rff`qvV9y}(%zB50JL(+?aB!3Otx}6JcU=LO*kjF% ziNt+Bv7Zee1Y(-nyoL>C9ymZAJ^G1;s5|Gkf1F+8ejwwV@kz2z`}nF!*nDWPHWlqT z%N+rGCCl8o)3uh;Gue?aEx{HLJ^V&^QlOWu@u0a)xKocY((epi?;b(d9n1EHg-SZoro`bKTVf_jsZW=O|rOZ2j$qeqGUa=g1uqO zgFn%~0ur?b8<8|_oIO+mI3bpUGC}jsSAZItrNv_e^`4fq=e65eHl90E9vteIV(JW# z=$>f%v=N`?Ju=)xLEhHwp%oDhFY=qI=RrC#DPe`h&DxSgE$t(*o>PA)Vi;va4 zaor!mvZG?ysX5~>dwW!2bQK+sm-MOLuhv^d?Z+s+qVTskEW-*kFwoCXRJ6?4cZz}0 zMZ-m5I=|@F44)Hq-4yt>Qvmw-LN@Y1D(lfNrLV_1{!Qm(Ale3}7*eJ7brSO>fp$SwCPE+~k+zOjIegc44Vf1Uht z^xC%^FaKaNakdw_w+*t$&=e`)a$(TcXu3daeY7ZL2KdLjkm<7Y-whK3s(m=cY00 zCcSJ_`C%VxYa?gFpoMHqCIhx2e4A!6D zzBKYYh4IQR=;O|`d>W<-du37NbL$N&uk?A8uRurq>%k*^_MU0oZ{~TotPd6S@^s?i zIQP=gozb^2$GQFR47v9aiU#^*25;jsj65_0&sZq9sjCBiO5Qnh-z@W-G8$fN-gzo> zfNL4rF3g+wZULq7Su5+?Y#yng0o=FT-e93jO>SRqt`&aw^w+T|up?@{^rq-84$^$q zFW{hNyEFte2ytqUyTn{9akoO@l$@N;z>`p9k#8bO5htIloH_4IjMmh87Y)!&b0=m0 zx{kdwkrPf!J>pQKypg>#(W_;}&1}|RRZGYlHJ!k;SjP*1u8v34RG--WgT6YR&{APK zZnUb_yQ=TXDmVV7chTv-W+2?kjdu8V6g{&j0)b&Tri6J!V)zMrFO+zKf5c>NS<7uvbyNln8B z=XOF8XFrHEkLSM7?ZRYNVyo8%Yqg}iFZRKi{j(p?qbq*VH*Pra#Z%LzBR>!2fVf~6 z;i)&5E~HV!j(Em(Z)XlT2Yk>q@onInTxx3X(=&glsvF&LK{E^Dyd%2SGh$5D@vcX6+@E+wxtiYDcRg4ge#LhvLhzx}d)lWrhi3TT7>$~T zS%<=%sqQ0}Df!TiT=;?@3H0b-p~ItiJ}j*GHohhHHy$1_@6DBP$AvDcaabC1cG`BY zz`bUFwrTU%wyb>I2_=Zq++MF+N$T!=e&4;oW^Z>IFHY@XKEVt2M*Bu?Wchzfh*Sk? zGzQi6+OA)(yMDz<_+)k!WcAq;7rb&p&wb}*e%>xA=rSxqkOcoa@19+5-;wZj-Y*2{ z5`e3BdgiA0=TK=Rp%Wf3vfk28?2i_~^PH1reH*dKQk#+t5?qXVk?fmr zbE8+$?WzZ=-KlsRSJV>f%AP-Jm2F2wk1r{KjI?;_vc3h!0mgBv2OVqenR?%fzJb- zJu-vhlQM%;Lr+PZUy8)EW_~)?Y(t9fJbaPo{ywF4mT%JQQbS_olAKn3gi=iFdcf)! z8rOQGib`-DT34)r3q-eOVncu}gQB^nxZScff+A5qzDYkeYlyc`%-es9RU7?QteJ;M zx6N#_Ev}jk-mV#4M;X^&LrtswtIeFk5`GAuhN%vISrLp^NE8RsKpjP)LY<*JmA8rf z-*4q93rlS1@?qQSk-2fYj-uJmXZf}XNC_Xz@|{9vJ-#L2;gfpfb6L?PL?=kd4~V{# zwXVL$iz?dr%`P)z3>Kte*WCHd^kL@5E}cyO^C`)`M)lz=T;=Pf2S$*B-d*?pzzn#g zZ9V16P~Z;j$0ZjJK1?Tim%moAa2`B-h%JMK>1fMdEg_*SQnEVrjkkVBvoGWwl`r1W z^<0CM)o}jD+dqQG6iesjZ+br`NId!V+j05-ACUV0wm1FnqY3Zm&i8+$3*7s;9djf| zu>893`oE9gjw^q2JodL({tM~DTlGrkyN>kgzN1YtOL`X3|Ftu80uS#_eTfbWb61K{ z)R-m<=a0bqV#f-nZmD9Y&;$Q5O-R{02Z+;;8;-#;OCqb5y?J}s+w&yT?`S=k5JygO z0m@+eLN7+Go33YTle11214VBsRQ2PwozcIpdf;%l)so)u2b3hkKg6uxycNP>6t&RJ zvUl+bGtzBOs#zY~!$!BBsM=qBa`$gs3~cUrRyqcAljLWB-oNoW`2WE}g#EW0dl#tx zU~8xuZ2xk{4d})QD)$4gRX?Q2pHJ&h~oZd2Z04s95aZRG4;b{>{38mNs5uK_9Pk#e zAHoS7LnTjlt;#B;#`i5I{s)*hd<(tt#OK{N6s=j5pn2{9jmY?9~;& zP>ung&8ZR@L;(m~ZsusMNBm>5!_7jIEF-wjVvtp-&eUV-B-N9LeV&P+ zmxGB63+CP|Zoc2KlZ+l7y-z-t6qPh(&J1uS>@l{~A~~$?%f0P)Qj=t=$^lx6PvH&Z zIj|;Jy3ibtlg8i28X7#eGj$Ynl0D4liK%oZVO7}*>r69^;TG_6te!-t&a#%4aFMq1 z^kp5zw#i1BxkX`3zc!%I1r3CuUQ4HBNR$omgU<3RD}|bcY~xc`47S3elY@sb%=#r?fIvWI4*a@>(BM=f@woFd+qE{tEf?Vo^^x$17;)ro@?o zBud{!Lvwx1{x+sk#x|#v?Knx-A-ig0Roe%WcDVSdx1mz^I)+1t_f|b_tm-#}J4?NU zA;CbizR}{?%QPlF=9jmU;-nx{f;8rYgpH8I5f`;B0yM6EJ?2ZBn+k3TD!W-WS(v0S zaI?`VU`c}%lusV#ya)lOiO^vj3P7EO@TL-i@aIPP(N?{9`Qh zw3Ggl`SXn57X;Q6_aFVguxy-Vd}NkC?`ok*0=_=rv2t?8LyUwV?8AzAE8HskT7;p- zN75$cWK1vjOqsrJ{G#6~^lS1{)*6Q0^EN=0VPXvPDU8n8l!rg@#bEGt$Xk_f5o^qU zW@FgA?N-DBze4rD^^4|#P;oE)&(ZN9YKMTKt8H=R^68szecCLBV%v zBji+Q$*;*f_?1XH(0HmsEUiMYos%eb$Nx!SuIkFKgWa>3bWKPalUOoTcZ{A{!EX1hCIF+ zQq(fq$dCutnGBnFWAr_Q%D0O?GY(Q#evW?dNHc#sQ1RTQSz|MLgm;ZhflhH_AN1y% zO4i`%6}})kNuRfoBk#qs9jy$byg+^~F#O_-05H_$d@7=@BjUZ>s} zm5N_hBDj%OEq3aEin~lo-T;9&seejH!}9aCgElRQ&8;2sH7$r|RZ}g1m0WZ-s!stT z9LIKT3hQ3zvaNh;H_6~Cw>*%^cqVi!dlD7+^XQP}#q zO^_CML<%&XOG;yaA`X@@*{~8(xC>Tt`eWh~IHM^b5q6412b5?u=Db7PB z1&c;C>}XX_eFjrJ&%C4bc-w#?lnAI*rwA=Pbzpq3=!d&V)?psCGW)?@7wq#%{Lrt! zz;B;F)qCd^e#LLPt1%CXaa&6jR+eVM=g$+NvwAzDh|)USqxI8upyK!W@>tr>zySBT+0RfQ5`4w6x01DvDdZFsOZ{eWxsKtPCx-=RVfAxDSF)LT6LF z9Imf?JSVA3YPc@LHD5E(8>Sde#H6*<4q+2Z$nkP`?Ch>a75N+x>;f*U4imITu36v# z4umCr$#%bgwrQ&4XDQS1REI;N^kw?k1z-5F+>M0{ZmruwRNP{3+CBZTsd#Yn!YWq~ z+^7&+l(dPR4%D|6c2<~mJk2|pqt3mp6W|@x&!&`O8rX8%Tac2wecD&T&|1tNqiDTB z3>bo3Khku}vJRH;phROeNhl|Ejl84gKf*$#3Om%lMFqh)HS-p<4E@3jEG&xQ zl1$R>s~njgD|R*FyLom{dm?fg zE@duOmk8Kel=ABI3 zlu&gfD76LE&cZ|*F>p>gi%?4WOtZcAO`L(pVlqY4r@KpGWIR@h4eGa21~}tStXMWw zAh+sFna}2rc!CoV@N1z|Y)JZW%~``g_xo07k3Pw^57VFJDau*H#vP%YUB z34jHgCFFii<+LvoC#;yk%GiHXb8xGApvuv{-9>S!G$(jsmtI213Bf(5Zx{aqP{7&4 zH>*Re1kemt6M25{E)xcxq(sC1!s_tp6!<6-?Iq^(Ny9sdl(u5Rz$mMFQ1tiw?y6XG zln!W?RM5soF;+)HzF1}hFNZMiS@x$)%al2uVq#Ef*81aG8CBK3t#SWG&J2R5Eac%e z)5cH*ec~5EM1bRdg|WVHmZz9)4b59g10ufkxqf~@R<(Xc@1^7NS{o%Rj|R&t^k8nx zs(_EvohpfJ6)ljCP)&qTJgoyvtnb#y1v{8MG7fGDHz01RP-y5*HUlOHM)C^yJAP!f zV9>W!?-y#L*~hQy(u94NV{)o=}e+k;k|%3p~`na?q4^zhATo*Y%+_S`ki9- z*O8{)tr|xoujN!U34b;WEc=t``h?JV2;e~63 zMy;yheY94^FY)oVo$hut_M!J(OYX>F@L?G>x44k`GITSv6A$AgG&eU-Ik&$z^-0yF zS#MKVy!h@!L#DTPL>QARd00to%RjM=n!UmgBd8J75iLSI$%O17IF-hW$ppIe5BDz> zL!%FUGL#GeFZ*E|d%VB*S>Xn=Vs8re+kerj63D3X=#XTL#l+`5YKz{vE}8B|Z+KhE z?RzL){1k+jPh*r zVxkR)nxtVQx5KjOVCKlc#7>2~7Yi$g22lM%tm;Lvm;vEa%L$s~&Mu}9=D4u~yxiHt zBZ8+CJ@@TdFibfFK8k&-( zAEehX^QS~kJ8Q7kw`gD;M5B+_0#cL%azlox(r5UfHchZaJS?-A=8@; z_lIl;Ua=HYbZF-JXLNnCe+&3Ea3g!z#3$nWZ{=@DRuNo=5nXTuFK;;Pp5~1G>kMW; z37LVQM#V&hAIc{Rve4|bI4DLLi-8m_PU8l@wh>*%Yn3)!^xMjQ*7fVd>?bU*KINmz zXw^aww|TZKE!98#46@iJ26GaY{wjhICOdU4vo;K*y~up#;jfBKoD9B=(wT?E^eZ_z zT~;-G8*-yry*S5N-QXphS={q2JZ7`oMr|_M%_7Om7Q=^CrZIJe_3MKM;puTt>F@<< zTm+9bx^9(ptUHE9e|jExzxhK<{MedPplB(LGDTGMz*&s9909G6MI8aQXq1i^rn?oZ z`%v{|=q-n?RAlP@0&?|&W4^4?q$*XqnGGaoW>wLghPvO(Qt?qmAA%K zuD|ADtGq#(k+KLnyNBj$hoJ?SB~15DcX0Hx^qx(CnaL1kNt0@24u-XQ$3vB9gyPhbYeo!UI2#8 z+mzYJ6c&Ec+znXIm2?|Q=<~*@BQ<`x%6WtsAVi@!%XIR*IB~8c#*(yzKjKu&LS$6- zg~Scm*98xcNXd&Mbyb8+896QV1(anE;ker=q+hk01$0RhP5PO^oR!nvmF|zkcuD4X z!(H%vlBZ;$Frms+b(#U*=P?ThR7~~i##7cK230%4#ZHRWi#Cn?r14Ijs#k!~q$tmK z$yF)f?ziIJ%5*@^^SI4HJxG#c(-d2VaR7m9x~nzu7SWC#0>?dfQP!$g^0$$k7 z)@2D3$|vE$b`sUi9*%Y27-t?#A?qLZ@0*)5)^|*4LUSm#jgp-;KMGeQpon#Y7@MU1 zvsam5-~AG+@cHgJHD#Az;@*imKR|J57#j>d@y{r%TD)Rlf}z37gK(eZ`Ta!S7%@h0 z(FV!yuL(@d>)y%HIqFb_6TPL``vGVCfg_3p&BlDK*bh)6{Q^MIj*`RkqZq{i#dN!^ zrWw0FjY4G}hT%j5!rF7%1X@xvnSP%xrc}fM=uTNTa8-^IAycwmUZ#_my|Lk?idj#J z%c?V@<7@M4RfVQlkN}UgkBsd~cmIp%+j8%$eIKPpcHt*fB*?+`C!ciE zJ1IEC6z9w)@lJY-AXHL-UO&xqJs8)s{=US~QSsPHUc>fM1&mhAa0M8l#;@EsleP3H z#AO6uD}_hu6kP*Wz`bjV=P%J~j~FCMc>U%F z)5=Te>&PgF4@>HM9g{%h^NALB8T>_((z>Hn@d{<3LljKQT@ici+k^zVpltg+DU(_FDo;Ir6{EOqml?zWf1QC{+m6?*Y9hd3v$^GDcVQl%rj{gby$7fP*nZ5p+bc3&*?Ag{O*> zd!Ta?QKBCzM##mxD3{r}a+Q720p9TJe)h_7e#YQOVxbSW7bTxQwu`ZJck(_kLeeLa zG*#R*b%V_heWS{rgl5gyvEW7Q9uR6h;4QV$aU7bl8{#4e10U#=Ss(X^Tg0Yee8`7Z zjx*ps6>nL?GUjrsl@y4inwOHXDus1sE@!kBa1l2d6EUBYruRN0hE!L+Cv&h+_(_9T zALBWpngGkY&z#*eTfA~kmZ2yVN`>pP@Pvhs^0lo`?ufrLiQM)nidEc$G<;-;+Dx(G z02-b%(TmUM@tJbyM<+%9yZJ5R+Rb+#Rq?hapHij>u~82Lls;+_MEANodpELMJ!S2o z=Zz)NO7XE`JUg_w5Y@WyP%@P1EB!pv-TfC=z58D%zvIlP9=jL)ty;2KJzqDBGT1`CGFbSKZ)KzhYS;_hr{YsDlbmOb~a0E(n( zg5BLY(CuBPBR7@ga~o|%`qCdpN1^E`abGKld(=&M@^-@R`Dd&jQ*yR1zcn)WwF_?x z0w{>*B8HZo5r`{8?TpCbnxgO0jGKdFgp2j)vD3%HY|(yJ4jo-9b-@4kU&2gB2PV=w z&i!BBl1KmXmMATH9)<=f&i_WoRo1#;mVa-t(3*bRSy(XZBO?X-KmGAvSa2U1 znNz3#DTgx|Ko*YkK)>+3KQ3x|sLL~4vEPUowP4HqZ8n)~2!~eOTsQOgpEkOM6y^S^ z(|dzu(!82K(h4X4hADje7nbY0IZPdt7vB2|f7Aj853C52#}nYV{{U*WuwCd>U(GF4 zs8v|rQ_bt^{5EM>$Tzxcy46f{@R+jtHc{Rpa*hcTV{?Y+6;5=dmk%AF^ZvrxvB*jK z#c9DdRpE{*SFhLp3u_dIS-8B9;5kLw5 zp&;7>^^Z*cha3M*NdHek{+G1v{v*kMxc+-6=zk~#p>!nwJpt?(X?()S8v~{F&kUIV zPo{~z;9rx10n!Zl*S!4)ZSo)Hmj6bkn!oOmo|Ux12yKh4i(8n5bm(AxWhIETmHRbD zL~uRY7e4LWI^}V59lC7CydUytpPH-x{UE{dN9tg||L50}L7;wVR?@<`6C@g7laGgI0P#Y+zJG@7I!Niq__kr?(nYx!8N#h@LeH!Va}M~A5s#ZF7; z&%*2H_Tyj90oUp0`<@?9|M(A~HU7y+u_LD<&pV|j2ZHMDu1)aZ`tQDZL?%Cl>Z1lrPL6U3iYz(Pa zG{Sa{MB5x ztz{(;>Wes!scJ*9;jp;C<{JPb9iO4^of}h^@CG-|&n;7zC>Ra|hAN+Tq@YN?vL?Wy z+3sXWELsJKMG?~&P3HipmV4N}FXcJ)*3-$55`++naiWVnh zn7%4eh2gXB+7hP5P%kx=&i65$F*#kDOSL0Sb!0FQzhJ#tEKGU$M9f)q7WBU0+Bn8a zTh>2>***B%jC?ZNG6V0ETeCzX>cqtx47;f~$6?g$Cbf%tFqkyb*ctyy+=tDdH+D_n z+G9F;kb~({xxPnpuj@VGub`T#U`EolRF5J(^>RYlCrvc z%9W2z-f@-%)>ro9DT4o! z)n2Gk+CV@UH7-eXrRd(EV{-hE7MTm|d%EoEr_P@8jEr30wW{;D!BjL;-X*;t9Zo%0 zKP`;>?=`h?keaZe>?g0Uj1u@goTyjnhW-*lCa_b$Xl<4 zpKLCxdL%B(l`~lt8LKdg$!Z6d7mXtK^lO=lF3K_cw6k`nxW5^tx?n@=zAqKFUMGp| zMDQiM^YS#yDQ`a4)jcc2qQ6SBk=6HPs?bnL!oev?#1A1Brk8f&S@GOlgLlm5N3NSw z!65-AV^!I*x7S}Sx@N01NTJW)+2+W^kmPmdJ32GJ*Yamx!i{lqca{L^kov3Zb|mCf zZ)YFyPF(Otv>?V&fTFRY(7O<`74?+fzdelZUaW=Q4IaZqS_+xP8>8EkDzL6qo-2%# z-XqolN*KuvM)i?fud<3_4KIg{Y0iAt za*naP=qZ%Tvlh+NAbrggalOkhvuS5+Cz5ng(dncJr&NV5GfoAQKxPTM?JIc2gqC8f z$Annt3jBR*p>fVDYokCCVkc_cIO_B={zGcfk0%o%mvN7mCDref?~9i2AQ^bwv!7 zd|0qoD7o8;(p;Mp301|W)4e+sg!p)UX6CteAauDbf#modJUT4dSs?fsCd=8wPw-~3 z94=Td6vf1~6-CNf#AHU2VjtJ&3gk;XlIo;Z;cRL*q!M;X!W)l(0I8ad zdwEr_`jE;4w=&g)^9#jYCSCi=`dg_1yP06>xyHT8%v5=|b&S~Wb?6!79~MJMlFaoj z!|>F0X!(v{IUqU5M1E?}wXy)3da#3=s_e79fe_y?3yY#Ou@9P|HJ~PD6a|@()O*8N zs7b$tDT4s`ViGSW(sq@-n-+!?Ft(G2iads{TVOOqPyS&wD^&)}6qi>DxoD|9h;5YE zO_i!T_hZJNVja*$<%oPnXCuPBAb4*S{+SKufH}qabB>%%Y)&F~6xrwSMa&)+AsGtj=D@ZB*I(!4b zT`dbpmGMbkEaW4!OOaZ!0FQ1nq@Irw>xI7H9|>!GA}ky z!d51}r&(5ScEw3`WeawH_30#@B;jxQ4PVBjuB-Ccf{9(+;-0S&!ZM#6u?l#p|0%Mi ze(gm}W)mM-YUDoQW~nR?4Pq4r(t0(l)d$K&!Z}|a`@My+?2||tbW$u{(=H&}d@>eD zF71@x!sGRKV|~JPCS0E(M0doM7;xE#50}sPwY)a0jBxP_R}$w-jBS@gLyi^Sk_@&pt4D$l(*yNgDhMV?giBps2fxzUCA?X8g3AIv&4%O1yS(AN-b+ieoHI_ z1n`wSSoElVcB}x4@LoE=->m)8Gh~SHnkM!*%xZzqRY6E-!cn}$52=5kO-;LT8q!W3 zCgSr#ju1W04W$p=oZiK2fEsuLF}9_Wtg=AgUSIv?EQD7*f!j0)ewezHsw()chE7jU zCF$pN<)1M3m0yutdrLTUD(QAftgMxTyq|`>%}Ct6SSa^PFlwN-w?#-ewThS0w@M(( z=XF+PKV=v<(pdB_&v3-|EN;h}uPHXcAeKVW2HS#=6&+xqc{?7z|6o@8Q z0R2}SYFuT_S!o{K)qX#2JiM8tRUQlC;MUYZr!$)-G8&lvn8LuE_^0;Xm|HM^Lu&ye zsnkleS6F<&2X^50&4-!9$G+)ZFH<+jYM|se`ShJo>TxUeg-1?tgC|tb*43V|wVpL@T)Z(c`13pH7RoGJckRMcC%_lIvfdUjt-rqp9uQlG zo0&H~6(UxVjA2fjPZ&rYFs=$ylUWOrT+lB}Sjpj`9Z6LDZEVERm@p$yd&CRMkhUMW zXsr@d5_2d)t1yu;`Zutmm%X>P7T#aElR#AadDE$$~xtN?Y(p}umECBrEoK791NcXdBP%H zJZx5CMhI2I zhqruF>67M22GO&CH*yHS^=eF)8R>U3w3(+>4?I^JAQoaeltvG+oYYj4iM zsnlCnn~37AOlJo=xzblcLMaDsm35>oykdrLr@Kf$luzswrwlyZ@=I8E;-M52vA?3X zl_W$C)5mEK>?+$@(xlj0HX3X0S;MwlQb99LUWh=26OHl|WO-@!^!B=sV&jx?&{2Kv zQjS7NTe>VMRC2*u4QZ=M<`iUzP#9$?tC~%vtw@7dK+DnFUZ|n6!I+ z$>w{fE^0e(UUSzsXh7P;Al9qA-Z9Jy&Xvt4f|u@vTLHL2?x#1!S~LsXu5_1!ocZat zjRuEfA;150k~m#(lThgt`A9v{Gu;m+?@>-6J#+b}SG_=-rbHLCB>5!7*$;iXL^t?U zV7%DrfVk7|z%}>9VdUYt(ZtylGgMM85=XCMWvuH&zv|58+B>M)n31+dWyJRLf8Qwo z_6sV`RWx36o!9X;FZ~z`hjnXJe70i@YK4}>>Je;ntKAsI!S414g<)1?PE$`o&OX@b z%uTG}9nKEOqphXh`#lXLQ zjYLT{n8QtUEh6H52)-xCR?cwxQ)*yG7~11N*5Q0)nLeE{^4$xM3KftWSKR>u;UAec zaoS1tA#k{SQi-=9@7NM(vib8$0tCMYt-4kn%^64zr^c9O2*3E+GhFDmE$)&um-DwX z(~92kC@t2EdyH8;xP&nw7)n3s^r*$z$+{O664R@8|3&<$VWo%pUX)R6s$U zTLq3F2W2z)2ztZPX>Jcc9gagu51GQ2h>cA^Ci1&{Kn%A-wG1K>SNdC;pD}i7#txb~ zuS$+%uJ3bmhD7sB0IRJ*>$>j-(t{0ZsFwuFQtmmBqcSlp46ZBP2r5*s_nhMsRIf98 z@A+v)qeQLRHgeh@*b-_u|C4rHyZ(4ASh`SyHq`$I|IVgGX^e;rvOLCKmtMe02R+B_ zePFW2<%60z2qJ+9xz@dZ1xl0vywInV;6LSD)!=K{&d>mmQ0g$@r0iTo(dLk;2FB6G zZ68_!HvzFswLg@~P<@d-cbK`HKeFnQ3jgQd-_y}L5S}TR?bXYVLd+8Wq$&*Lu$bwp zquiN^8u2Y1&0{Y6a$z39Z*y~GUXKcNn}69{sm@hT)rU#AC90)f1Xq4Y-=zqlF5I~Q zIIP{8Qk|c1++0t3J^<1twM(`K%auOZB+N8Istpc`tB#%gw*;(OT{z9rJJw)b(Ajzd z40M)>&FA|LRSxXO&Uul2KzErTmcGzFNE;IQmroRSNybWGkiuN{I3xv{>)?Jr-Y{V4 zhzG1A1p_}QsWq8)G`Ozu{Tl76h`XDutJr$|Q%D@z;8qGXLOyGV9}H?3(J`PHO3(FK z|D!*>XP2Z_nmk2*u2X5_W)jCOsb~_?bR<|D?XBEFrWZiP$4F zXj+2pBeX&R7eW_xfV+Eb+gT?XB#l~VE8of*NGL(&`;@9klmq-v#q_kqYOSukcvDm) zMM1~hM-E2~$dR$7P35)oPn20|fv-rhD^wHEp|(xqVd=nmQFg4XQ?#H~NKMz&W87*kYpVe2g1uLg-rPT@v15b8G7d5y4Cy?okdo zMt#oD1}{lNv_Q9iU#v^udrD(^(xSdbdlOv^D)*9pIs@BH82&jeFcbyXj z0uC%w+8iN|$9w+CF0OF-zmE6q#vWXJ{Um=xE{)I*CGefUsG*JfM1i!1E=Bt_|6x2{ zQ0s%fl@aS(r{oL8{;y^DI9f#00q)h$ILaj6GvHv#qH;3pSxQGwdk%>3J>ZHa6UN9w zaAs?9wKJa}ObNNY`KJF-(d(sO^(T3^46wiep zJKjxTS22s*bQxyao(JEDWTVujac=`=>9hsulJ~(i^O00nyirZ37CGxH8*R_hG|JGd z@&9!jOms89IlS&1b*b9<7}$J7=6(k$lqButA^O;{mMFEb->Kl$=mEB4$UNh0Bei*3 zUZ}=Mu7`u@CDD@W4fB`w_r>Wj2dKFfex__6YJCue*eXIT(*qZ! zNqSRjJdDm<7c;Lnvu09%q^gx~h?g^4PFFn-FGOIJ$(ukHQ@b*)K_NX?QU(5?5ycW; z@bpRj+$tSC!*o|@&wi(j+uaOM$HC6xbfSonuK#i3U^ePp?V_`OYOc5Ue$xD2N;pFp zrg+L3mAN4`b?|ZLVM?d)ds>8bEO~}Ko#BarEw7Rt`{~WXwcO~YrD->hL9A%ctD6MJ zjncg92RPGZu(|-1eKg>|zg$byJ1ecN3qFT7qb>(E>sbh8>?HN=GWs`t--$;}IyQUa zJkF0wk&Zt*_5RafJlpJG4I>K~GJdGA0$t@#S`07!Og>H&)6wic`9uMNh*bKW9WH0i zx>|Baw(3O>p-7Y8P^NP&)eF-zcvVd}=#Tl(=1oORYNhUxyJz#bYw=ajUj?t`^}t7NPSMX=uQv9;J3gSS z)Md(VJ3_XrgRft$1KoBUX$V2k|BI4wn8!os$u;Zgw-#f9}LaK;xV)Y7P!Xd-^RO~(; zytc_lJmayB2EDL;mDi6$$E!I5g^K+VVV`f2tsny{5XanS|YLY5qGs0HP z>Sa+Kz;F+$`_0l(aCWo1Jrn;nv)%D(7E6Ywj1(91g5e>aiUbPs zf?MKhP{uc@w*rlo7!&bkY)ftn;L zaI4|}=Z~?=`X81tSx&4_=jC0)RlA9ld_-lVO@`>aCVt%&@?dzubTDCtV6zk?nAkmQA~cl6>mkTHOy?((CYOPj-%zj(0(fH>pE#| zT(YrU=eS8cR#%qyfoWPnXU?2wzhbU+F|dO++Am}IW%5jr91Z?*GA2}l2?ad56lUsI zrt2{JeW3^7MfddDG)MXcc8oX{Ol&v4_N3!(=0<%h#SbG<(DQX3?+Hzhbs0)aqR7EK zv=Ji+W(@hMA??m<{mk^;pA6I)>hA{`QSas*$b~^pu3pdj0>d0#$9tWxtYiY1V~RE= zyAvcMIhhkUn^cwvn`TEl(gQ`mCJ$5Of(s^@`l%9BmCFjy3NR*0&W&-hn6S)j48y zZ`7A+P6`#Z1)+}XX`K4j@sQhUA<0Y&z+kW93O|i?Jkcwo1;o%F6<5W9(bx|ld?gxv z`fQBQ@_2o%TU2k`=2EQ2gF)eGeqTk_w-1$QryOP$!IwxY+XxY#D__@3^Gz`&ff#ao zv{G5D;xow0Xdb#{u-{Smb2NeT&md!~YXB9lh3k|;iHAf3Mk;J8P^v1%O>CyGUW8lu z2j^;@3P)nnhc3X+XhN9 zVPn>v1ol5HsBclTbyBE)*?pNieCySJdqRK`Xi*6gz-`Nn(-zor}8w7B&A7J7lj z)$aRpK-Z%vqkpwvF{GM{fzO-2*g9W>%tY{wEbYKSa3kSmU&Ha0b6**WGHsu>G~=&H zhOe2Sh74DAmJB9QOb%YBPANuulm@X>^SKp{*HK%=v(u+l0bIX9e5#T2KPh7M&(;FX z+=p1cAv1OB7!>n&vNEWQwz z!V0OSEGL^Qk@wPtI%QAj*4n}q*f>=x;|C(a=Q^MM@VWmV`&hy-JT;eSN!b6`=hD|? zSZ;Llt?%o=n@Qmjht*BRvysM=lhFWq1a|Img|v*_vGt`i1Kw8$#E^0K%Hf~SVzGeM?N0)$3Z>wzcLcNJU>fNF!VWYjNRf&4c(2)CJ z<942wfRnXPUb)c-0hM$dy}GEB$z1ul)x!$vuI8Cv0NI-QliN9^u&M`B>eSnNCmd09 zd(gPC5b8ykVvz&ScRqQpd6>pgWnV?+T_Z=TQe~4JTmZ~LmhJe)K&17g`oR^nXxC<# zhvDqyOFsVjw`=Ub{>-k>Ut^&Y<2P&o0^SY^4M|g#IY6>CK(Oys_R8`=p6Wm_V$hU_d=dr~D%_Gof<|aSmJa z@<~~^*!lUB=BAB)&02u=05?kiB%M9-y0Q()(qveiK4~}j!<(f~*3!>aXPn(8t&>xL z^uVw7@xR$cHx?^%vFWI@h{TQQMefZ6tmqv{_!`b|S^XIrsryUY$QQO|oYG@Qz3>pt4T$8MlOOWSN_)mES2r8_W}m9rpB*qkzC ziB4OOP;MJSa73yR+3wplUM2Z^TEE9>Up*j8)W&A&;kdw)^38X2F|a6I`STx~9s4KF zFX6b%0+oC(N&O@bWUfqt!9tyI^O6}^YqlpNG*`<#;?>j_m5L;Gi0}CcLYNWB$EyJZ zIR1!Yj2~oCMHqY66)+zOn9LQLUT-!QcOuCp8*69I+()~r;_k}ohR$1odWNVT?r*Ow zfDs5aF8{HEbB{~+QLk1#qt#WVH~M5)!(;*Bb2~$`h7p;Y6WdliW(^@v2Tzw%*~WS^ zs+_oUew8?`8jwF#TZ;icj+)I$-dWVgS`S}1Fx~|Qx&;ac9YCXm_I&AbJG1I%;6e!C+QJP53Qagpqh^KzS9u(4d9ordZ-rjJYyi# z0JS=@To+~&Z!iQ9$nmlu7IUkbDyk|tT)9yXklxere^{nR zv4+Eq1Hj3K8fNBK6`u-|w8>K&Dv@@Wzi3L;{k$g5G5QkwyS07HbVu2e{^XdcM=imV z%lObM|C6DASm?nUs!fKyuKj|(=GzXLE0tK4e^@+7Ob?1)26EQupP#GWkA`QBRNkvh zsWc1n47L4m1@m!E3C9$!P(}9GCfA4ClPvHIyNbEJoq)xX&nUj=cfRnl{^}oKUt%Uslw{p0JU;4TDhq7lwsDTrPf>yV_-jL&nsm8_M z8$2A;Fwv!ky;se)yx)r;zBQw*O}TzQoLV2ACwyk7&Jd&dh7>#CIR!>7?jLoErLz1u zwwvTFSk6PX{{D2Hs*{N{KNPEZq`@p%X+Q8E7Hk`X+=?Sr5hUj0pE1JS4^R3IxKCgy z>x^aEkc5+eReAbdPv_dW?eGpIVdpb4+Xho)oZS0srV^t-Zd%{ZuCQP6>AEvH#?{ng z@B-LS$NW=ek65CI;XzmF*yB)B<4S=KQuTzocv`1dkbJk3 z*FHV9p7`5s&h8D%)0$sn=^8ILO%N|1Th)~+H|NnvNMUrKyucTNzxEC;7Swe0YdH5A zZg;#A70T=K&`xwp0|2!*qg|6hd9pphouJS zU}*E~tbr-9)DE3jyuxcL)BgnQ&^b6Em4N`vXd_UbW`wCKWnL0SgJhOMTo1N!GDSCm zM#*Q|f9k!Fl^%#!$+jX2LSF^pOH8fR&R0Csvy(N9Q`K$hS~Nfol18%H<|sh~yeuwY z+~@$dv}MLxuAR-sV+l{+sU=$|)za9i=X19S=@S#VFo_!qCwgpk8wjV3`iF%%z>8_2 z7n33KGT0?N0BRv|5e<_!bkRn;ykBZ z@U3x=C6^d+4+7o28(4v`vhLgId`mZgovprpTwwSf9pWn6tF=*XKdUjU#J=RGA)2k| z44nD{A!1FNd0VBlAs7A+%S>clE>__mR?I)FLv@+koh~MZFH{Y^5p2_aNt0EwYD~=j zSw_eXkUE5@<8j1ZPp;U^@;&kVqMO-DLr-@dxIXyD#3_SMw65b#DEWHgt4}qFzlneM z3|w;iN|dPYHroa{^;;ZpgtnmWX%}vP{KM*qdhHpb3qv9r1izD8(p;iLs-}R=jV?dX zYJ-h~l{34RHk19Yh4B9t3NzLMh=;TiQ5t*3-f;$MSLVS z+{kz|i6!BHy)5+IZs~O?N#c@P6dIadh#|~oC+*f(`XTQ%ohAR5AnZ9lWyvBQw?aGYe(L5#9Zkppz!6?v*sTKZ~-+nm-^ zGVWvSrq^I5st+NYkI{IC`|Q8nX;td~N(MF4;+O`O(d75K*1voFR!02jBjc3qXv|%X zN_G`OUl@RGA*s+%oQt$c4?mVz2k(HGvg7gm`VUkNZH^N!P>R|MwOy ztrPN4#P1S6Jsbn;fXV? zQlx!A{EbC7PxXpBZg;ZxoV<21RU19&saI>5r$J}_>?}2kvSjDjGxuEZQKc^~7eUSR zIZZVRW$o=y)YOqzX$_o4Jk2dBcKN zvmE|tF}IV5vQX2WP~)*f=zv`zQ?|Cv!i?dIJRbp~#NSXYjp%HC?EcrCjv{%_f2y?EF{Y*)`)^zn`ZCZleNEso#D;4X`OKto`S6WM9 zUlhVZ9a9>bKE=b^`TGFe(T`m9;FkI2;ZZA^;WB94fKK@hL9EMzbLf&ntl?w0#skV4 zBZR%ubvhr!$AjVs*4kgQa4_DNNAlQf`7&y5u7s^->=L4ttUu5|Au+m;S5N6}^e^_7d`%(Se8{#lWdDJgFLpap|ARxz2j04bvo2z?m)JRa@q3Hfqit9B`v?Rd8Dr(0gj zQsdNA2zj(XWKmrql-N{APN6Jp>9FVTS@EQYM?Nng)b>9oJl%FayOk5ZHkRdz3z%JB zQX*f=Vyl$;sdL85)#L6h$4VEeARzd`t?>BFZO{5~m)o(S{mv+Z*Sg@lWr}STts=K^ zVF1l_WV3+V3Gc8=~E3gUsNCl{xP=_+pJglM^M(sVG4? z-yp>nEg+K3fGJqI4|Sc$+_)%;mEO{6U1`PYV{+qvSi?^Ic~=GGJg@Rm>OWmK9rM~o zLx96O;6Io`3SQu_2%?fiVyEPzD!FH`pZU*4VPoSZV}#HH%yjRttIgw1S2BvV;L}T0 zR+})1?&X>J{k;U|W}#8Lw&g0>#orT8&;Qfe) zn=dG!_8ukmoWFtM2PbA(g+>g+YfDUTu2KOVQuuaK@6kIMnfT6&Ms&e+eGgXRD?*Ds z2Ca0ud4i7XhTK)^-CX292F->AeUUW0G#0~aq4s(jyeTiohVWiHn=c}U_RF_8ty51m z-aXU7S15i4SD`LVfy~#fBRbuFPDgs^-lT~26v-NfpO*MoegcoRe&ng658 zV+$+d9Vd_!Iy3yc3GdE3b5VZt534Llx;Y*Kh82CQ`&_Wvhx9y zF-)qWUjnaV`t6rxM5?;EDEPw<~X=u>_65{*T&F=Gzi zJnBDC8kJMS#o|H`pIwy5r^nJYgk$y61IU2Df2 z$T}9FR==4q|5#(Nr9H@uHQ%8p?+d}u_EM_iNYxiF(~ZJO5(k9i*VfoEuMC!WOkhke zW5+!i2vJ4|*G_b54|gj+n8?o@Zn`XFSD89B%xgHemIH7~L1e+-GNeGFfrFVig9wDpA50#Vh1h|B!RmOE zZK2d3wlJ)<^}jam(+N-9x12nRT^Sj+55*Gji<)y}3G_{I z8-=!rh(u@|y-JV04?6Ler`WyOpI$N*zRqqmhU` z%AHJx1;yQj~+D{;;KbN9QgqpP<^hSwn1jB|mt#DtQ$G@Qz`PRZBSQ`nrj zBPxcn$*CB1M+8^~RG9r zf;D5;K;*8TBq_7vZosSB#+fZ%rWuEuCb-z&1lyP&j`}ttg+bF)3o#Zf=_5LOjDE&w zy)t)~?jPxm^HA^O@n^Eu3HmPF`Y?wjL110-&@4~E77=}Nb`)!t#OL)@ZY0;ODX6Ds zD5VHJfC&!%`p(d6#)6D6w@GRQO^7mK9unL8i%W_)#_iueN1Z9?g!f?&gT zBqrU{vUoiUr2$fMh;;bYaQ$r%$co%jJlL}I4mvVq1Fd9Kn(~v$KQ{8EuhY8`8+N1u zI#oxkqKr#Z)s5f5D7rT;pqC(+TWfeT$FcEjLBkk}>cBSiDc$eR1^UZ`1G}*w z=<)m+jq*5y$Qs&+JbeP|#ER+sb&CUj_||Q#rKEt;9r|I#kxD3o%;lCfPy9YVeh5kOC)n%o{SEW{1dq#};R2Gf-{+OKX%Ydo=%zOV63P zewnKwJPnp&zzD zV*y|uTDnO(%9@b=MVuWQjy&LMZay9#5UNHh@hg(Nd6AItEppgrY_v1VN;(u9;i5ND z2l>ReO$R-ILa8LM_^U@#Lg^T@gOVFc8g?)_nxnfMtKe73Qp zd+t`z=rK%4vPqFWqs-=8?amW)pHWaMrs+qKX(4&K?Yaj?^z&xqoBGScKL6Z7yqmL)?SL(f_IS0_Vdm|+ zo|QiCWtRFrp12;lJ=WkaHh|{m4CaIcCR*quL3l9H>QSrvt3bp5Q-hoI)YO2x8iVHj za+I&Fl%vQ@2U2^Fmltl9BoeBbH`^z*7bgwzupCF@UcLngW?PtVi z#APlx0x|M-wwg?>uTqQ|Te*jQj42cj8*jM(XK@Riia(paOeDr&tO`WhRJh(!F12Dl z@9K{$po9r6GBL|4EWOxOkzg((NRIL>s7*q$W^6t~8Lt`M@~`-(+s~W<7c)LSEzQQ~ zs-qeANw|yd!f)?N>rL9R*F`9Gu;{FRCP56Mx2`tp54*CjkBV?x1R`*2xR66unoBE@(&$e|g$4REeDpy8{acx*mkyLT5Y= zEy^cKGWB)`5QFTWo&x>59oNm)kJhw`lD{1H&^j9y!1Fb_Fw7=(vQFo9-1855VeI64 zAM;u^5*Lh91lA3}=`;AZ1ZtY3f(jIgZ2I)xwGH_p#{2`e(0mQUw-daD+5q|mZPc_F zdsTYi?4)O>t4^awJN!2VQr<^lc)MoS!K)p*YIO0st&8OCW_}e}HE8iDGug$ohOTADrXGFs@eYhOr2@D}Y_N2F7%K)9plLaf%(evg~8nsLe^kJoS; zM?P0*X8*p^)r5@Mn+Yd!njV-ZO~5M*eO-r`mSWuaHZpvoFhsl1Fd%3% zRL*yQCi&FxyH~#kZA0RlzD-B+`k>$ad|R|X%cq?3i5PVps@xOETKRB zz5;4TGilp0Ara!)1{q>Dob0@|EoB`R4_|tQrNZZA?6k!H^sTyc8b~dZnab!d2k&bP zg>wU@bG0YagA2dCJjCKFRGK_nqpYmu7dog_n%qW>c>d3d_pb>%6yL^#@ z80ziDu*tjT;fIpvptZ3LzH1aYI3=+1$ai;!_*B}Z_#wACYXOhz4_S7#cBG_ch zaH;do5QBu(#9$dyeLWTQbvz~bxAjehbC)_T4&E}B())6R_Qk^&E0*j7y9|Dyb07tm zu9oY6^=X zlKtRk#$4tkK+6ug%K?;fCV~?V7X}};e8OKW!Pcy;7tbxiWX^0PD)|PJd!X|*$MrWU zINr##{QBaDWQ7*-rX*6q9QX-J7f_Os=$BnhqPQGN#EaAH zk(NQ&Njp+0t%gMZ_0&{j_6Y zy<8|&_6yRaKlbT-q+OzwTHr6?5<6HsxDp>qi|&)nQq#1=^W3DaTB~`u)1Vz z8WSPeZ`U{^6Aq^6-nl8|zU1LaY#R}hl;w`ukT>ecp;@+9_%10w&dP+88%$Y2tJWz- ziLEB?6AOO2_H|im-m(%EdO6=|E3lkl^RWfuM&kI=m4qL^JYlXaa~AHAEvmYYZOEFF zQgI`ysZlxE%(togtVefcE&j8+9&C2brdcR9C#v4kKTdcf{w#v9K=dIe&;?%~<9?&G zn_0RF58SLR7}(au&6Vf9BHA2>4Bq`dFkEpwU#?zCwde?2mq9!rW)n5}FfH@nE-S~Z zOXnvg4+({r9bU%@+(R}++Moyoh!)J7qg-8g5X*8_(<;fDyUc42y7m2te=G)hav;gu zEPhp)xaVsf#63~#wiGN;v_te!J!zwAd&NqbCS;qzEZ3x zy^2e{lg^$cl+GTWGjIMry=mTL6L?qGN#S55>6)LCjDD@4pM3E}8rOeZH0gWsH50?YA<$xN-)8AT}B)Mz%WtH!AE->-+(jK$msZb)MdX#cX34OW~(*_Ts$7%xG~g>>nA~+ z8^yE$0AHscD{*dczQM89;^$UME^>4Zi6hZc!zF5`ZT|cW0Gjv5TQEm{MpNj_H-X#A z7JkGT7rFE1*cdZ=&r=(jMt70&G9+&oxY~{s&M|Qr)7rNHr#nzY$iA2*U!|YoIXZZ9 z(!JpMga zq!ZI+kFo+Ys+=>!#4KyZ2DAW_ZNCXLQbgY{hntI6`3D@>4|f#q~z$PSJh4Lsu0#G~jwu z-0!kItJ}*WrP!OwT=BjYcY4^|kr~_GNvCA(cJ@zOkN3)9zti9Mcodfe!SJbkNe+jq z9h+_kuYJ9}3l6emP^A;V&Y}zGZaCW4oOw1tsgE5VmLT`0RoJ&BeO(Tw2~hfR#Rg6H zb(d*3tx%zeGs~-sd-4>QnY&JWAmO{Nwg|CJ@f`P3&p)jE%+vhqm)PGfVk_T|cKqFv zcl;J+I0-QqI6Kc?Us2xc9c8KQ*z@qI?!EuNXnX6ZHk__ql+u>A6e$!9PJ$FD8r)hu zxVsc5xVyAaED$K}P>KYnxI-!K?gV#-;?|qrdEf8e`<->px6WPX+`qE2CNr7Yvq$!x zXFt368x-hoEP3g5-uUq)JRZKQ^T{7iE_M=v*G6F(?qNPEOX3^*;n#hW2=SJD^T)g` z43qEU`}AvihQ+17_iv8!jP2A52@eg)@e-?bb9gZcZJLHs76f^K8~53aLEu=58~w=f z-X*nj73VV4gA!Df4PdV`WNaW63$%ra$jW;$ddmyf>U-?5~!QlvqgY#JK6o|#K$)$Yuw zH>$f`kuS(K0|_NO&t9DD$!yQLpyw$1EWMVno6u(cCH_xgL;wnd$U&LjV{)BM$)&2f zL*Ld2Y^uga3G#k^!Z;O-_z#KjSZN?0R$1*4+p9kwPQ$AC6O_p@Q_2?C7(p4(35|Rl za!>{_X0SuIt@DvVVWdc+z4!>-!6wLZl)sckvt&*~8$aFbEE-5s^F@^_|q9!iYdV za1oOKmX5Wij1L(wb>QAiFuEhR7T&xH#48LW^?W0C@w2k1kEN1rIvNj;cJrs5aiU@Pfsj}Q~3{r15YF{{Yp>wi){+8SF zilB#WboY%UBFO03e_JAecnPC$bu~ak!y0&=;+d8FM_6dA*rMRvdmr?!cl`sYOT+w8 zNih<)gj+Eun^ud7%7p8hU-H*Z<~+;xo9ix}s5q`d(|TAlskN=#61g~lKmkdc{_qyd zs@WTV$1$JMT%&oF-h5#RfHAtRu!&5kICCM&-OA&C#NV23FT~z(qk$qo>i!*&9eS4E z!|)ePUKC!Dy)i=o7?M{eLeGuff9%%Gd$u$#O`2QUG&o~l zt6#rS`Hnin^;K2xQ3ch&(8+GKP_%rig5VA?CYIxa{y>2k3PMy6w6z|8*LR@eKUFDp z2_xJ0tt=eAJ$)&&Q&nsea6-3Umvn z4^0f_a7=NZZ)Tr3#Va51U&iICGV$~(XCHU*vuWV11}gVfqkSmE+*1{AUrJ_m7wKo9S&yyRN#eq?cZ6VL+D8o^~uTQJ_MPQhX!lo135f zlZ#{K@85Q@X7b5sY0kHDL1trA9u(c9~;uZ6w8 zXwsA#OEH;Lp6BDN<;gCLK|;T5;WlY<#2z7Qb9ZM5x3_=Mekv+FSmc};cb7k!vt0@3 zJGY&W`x}CdruRf!e61dKzc^dR&20hCW440RhGWT+?y#KgA;PwsZ-g|;y6JW)N2Ydj z0!0yUoLI>^wE`pStb*nDB+NMHjwe4H_Wr@h{bMkX1?Uw#UYv^FBmbiPd9gARFqr>% zQ}Kd5+~z=_+VVP9(h3D2f{m7OH%F%prkc+(Q>%*t9G`ew{`(;SjNaaXs&{j3dysK$u;4{FUIaq>K&|L z)#%KFzZ{iUSIa%6o;$a92(dSKf^MMLu&RoDlBotjDh}xZaEu3#7XZi`@xqwK8uW6% zmN0}~Jv4Qg$O9GNM67@1LaA9y0hvEJ>R{+Lz2NlbsPZ2CPx5v7`8)J7s^noE94Q(^ z-jlGCL6@eLoh(84rF3WDb%f$%>w=yKKmoUT_~fiFmt%0JT^7HjqUL~9MH6MIZC-eo z`1=gz&tTK1a&FCiOFWV$G}VC8S%bXl(tH34(oI}{sOC?WLCSjs6DHhSQc?8bAEfMd zlSB*;$98WXD_mpc^F}$F)Sp5{0HTI(Q=Oq9Inp$rDjBKq^SftP^-OV>SMTLccQ`w>Q&%^tKVtuV4?k7W#G`r}F6-oQyGcaqhohcPm{mC(_au`b`;vjT`cIz0 z3TxX~`9*JI?iEg4o|MHz3JA+LS^+@|0EsP@!Z|3*8f3ohY5Jf3{c%Q#vk{gO+Pu1| zE^;9{tLUkcbffgyjl(y&mZ?zLXzKMId4}7a09sp@tUc5#VFJ&8td)TCj_ZY2k0Dh3 zY5&6OUGr=4!y&L&^nv0VoX=v}j?B9eNMZeAqMyT-pZTALl@&yj`|Sd)j;j0;<(s1I z%j2tiy`tF_u^a-Kad%;cGBmk-VRXyLmk<39a}a2}fSZN-9~RXU~J3H|G6MA4G$ zhMv~GeZUe;;6aVg_}=csN_cU+NjiV(MXHYi8>R+rc;PI@mFpodZc})}ws6cg$FQ-1 zki#cE4a40OU%AZtM1j(lqhFhpd!65Q&%Iq?ZeoLcGcsrTM+Oo(=IS+7&0@6URz>VHV%X*+xJpBxr;y8^;GyXTG5sKhc(MK2J9I& z>%%UT%%uL2nHfrELy+TyjYbIR9MaTNq_Z+#c*8q zEzXzKcD|eqvjj3FVaFSiNufUOT09Kfw<)F^(s-Vd9bMjW~8}4UuYTjg zyS&Ix)0IzE?J*g|@N88;g@R4@B!OpzMly!M^B?+yXE_M%#w4(26Gc5PB8u(!)TlMw zs<~}1U>Q7hf6+eax3bEN7DFjD{kalm*Vt?4qZd2x5$Va1XZw_HD;4H7IUyW130wxR z*CmTLN)g)Uqsl<;Ub2FefWra zLC4pKZTaQRzi2hxLGc1e1XLpKVD5KxS3K_pteH(=hJa+u`i2=gEevKOJ54Vvw5gnr zhKmBX^0|t_H!V0g%yXJ#sx8zH>OAmWs{I(ML|SU7Np<(Jn(dZ|E;2pZ3%ABZqQCG| zLZ&NgrmI6fs{CH6d&|%~E=l}8-O-GTOgWTjRNF|ZJYrkyrwt@rYq_Pp?>oso163bEQW65vN~Tvr$Y$6BTGWzMO~uZO=jPk8rxU%w9- zqte9pzbZ{Yw5RZ_6(U;q9zyMo*T|1}lOq~RD`-G}#)&j9ZGmfXY!_qc`blMd1?sT* z97}8VV!1!l5ve>tC5wFPGU7%b^{NKT&;UH8&g$OOLHnZePg+Qy-LiH>+^ZjxB{qo2 z4bHtFsTLg-tjIPVAxL4WO7Yfw@N3X76%}R=3^W%#_q)218qU1>|J4p(4XE_<2QKGN zNsaOC=U{61MmF^#6MKk}H^8Y*xOP57f|Jd)vRAd|u}r!fk?b=v`1Fqm9>D4v{*ILw z1EE6A(=%7gN`Eo4S&={OBA(}mlmGPXNuEjj7agEuDPSf$&Zo}9a&^+iZ*JAPS~rDK zL&}M)$BEU{o(^w}IcDksr0*i*wf!8IUAtZ-XHg@;Tx*R`kIPBTU3wAq?4Wx!PQjCC z4E&l9p&IHYZ*ybxZ0Y2#7^JVDW+alTW*O$O5h6dZmVOkgleyK#9ZvOrd%pe0rbB7TOn*6N+Pqz64`0U%c6uxxVYwSAPA^W%S&Sm)4AwT$&Wj7cx z%SzZZ?CVhQEV!gl8c>$dm5T~dlPsjh+@9dOhi>cZTVI)@2Y|6wDo@r& zM41KWD?nCp6j?&rM53tN>%JS=G#lT_7PgoX`B=Pj4WHMc+xmIc9rB}HB&dI%Qb}X# zMjfzGMtyFZI0(6y@B?n$$)k$bH++UpeGBs7bp>)m)L5LWBi&gXszmq$=w{*tGFWJN ze@@w`HFmOX+~Y|TwOcwWT#MQ)=A{N4hXLrmAwsFKj368 zKLCn--!dSAt=04g(ta7NXNF!Yz&Svsr%|t;l6bgC0t{6GeHfKxP|o!z^)dg8R@}pQ zC8v1nGU(;=%iO{pF9G)Md+(*8pP6594|_yLHOqWmcwFFSE%i6<8Cs9if=%*vhY9N( ztK6r?DAiUnPx3-Zne(m%KMc$++Gs$UFC$mFP~{q>lYIXnXN~YaTI{4E7!p zI@4n&s^!O*u67L8of(@dTy+dHH=} z_dKp7O4r;(-Jys@NPe)XP7f|Ew5gB>(21o}K~1f?B)q7Y>@uk(%AoI}HbW{-fm~JF zx{hgnI+b-$8cVe-D|weRO^F#@i=AA?yp9(~PEBA-N@031^2GVd0PJk;WimAzDYQze zlj6tNYLA*;8=6TM3|2RNRaFi>Pcf`mLA^C6N;!wV7$;)t?L{C@BIKFf>m&?bd-nRu zeDhcc@n{|XpDb587;#3#Kwff!T-3Ck5UkX0doRP*a1hoCIN1nNLcdyaV@3hSy5deb zKqJ&sE(Wr}iQdXg$BxE^nG|ywb$K=(b{bm$Gv&Sn-x`1&`=AXFjhaGbR-?4=n#Vl_ zM1(}utLnT>GtbL#Lg((seEEu+g`(1yum&z{A>YkZ;xztM{mhz}gT|R9jjEv@_N0tz ztj6kaHI({ff2%>M&tJ5==Tr3X6z=y}fjkB)n+lPwjbQ^U*zhgXEi@L@4X5_n%!@Wy*51)!j>)(O$6%i( z{!w5F;8>^$p4q7usISp=wnY&`aM9r?>`ZGPy4m$cO z$LmwjR#YfNM6^c!E!!ijux8p$+jaegt^+EU${MQh@qbhp@bAL^Tr=xG*ZeOF|J^L& zU;p{&PCD?Ebj5jlj!~w#TObqKRNtiZ)fN7(|qopRULvkfUV%js1_B zye*5j0wdeqS4~;cI``L0S1vpIqFH21M}8ERsHk1!NKVP4uY*s+xm(r<{MbVzs_N#n z<)0Pw8Tl}5B`6U+J8OP|5D)pe^tHJo?kk8*qqK<RgY#k@rMUEM*YZ!$qs#rDaZyKBkoK&*0(T6I6q-o1aSBB&O!t%N)hkvTe zx7YY?S=|VQ$rQPx2aQ`PWQ)MX6M!5p?&9Wqg$2QBcc+n-KiXe~nv$(EyDW&c!fr!9 z5a^lNa5D4grMbBMbUHN=3$;VwjmD*1W<@c!|dnu}RKI3M-v+NXb6;;VWhg_U9yRBkrJ*l{a*$KjbL9LoTP;Gv8H# z#mb)I>7Gwm94)0wa*js>YSmP#pT^&a42N3wzkC!8Re1$&eMgj2RHOw6FNh2TVX}D6 zjm1WlzV3>h>94DS&+5HzpV%?OM=)(R6gr|SsK@Z^7a=XOl-{yL+mJlPxb#aLsd4?cPNFP-*78TEik#`{l2dEf4Hh#r z;A#!YYmag$+t9ql!MftHoTheP`8W2?qbY8S?-bAQ-FY`n(Otsp^x#BceJNTW$^l{WhTSi!tB7qoC`HO<)=<{02V~;D$FG_Kkr!PPPlXS0u z)ujPu_sepCA=VInO^-n1ZuX=oZoKNKLz^DGbmvgy1&ZG9pmhiCW_m zq1C?3b`_Ju0urfDX*@`*L(DO?>t&y5e9+ll`)kcAqIM(Q0PG#blH}?_L^s<@Xo_wcDJqi z@$_+_Zs2I;FWNA<^Cc^hjIlL?UN}XW`D5|tx&VC?(dMYYxo;aK$Ukx&dfnHZ`1Q@W zwYPwd z*J;$~R{IyN@}b9a*7i;; zh=61P_7Kcr&x4(*Z+GnsTl)%F|I}@)^ndHMQv)w7$>ZCaYb9L;U>1x*M-_UlsyW>J|dM2k^xYt^ryZd@lSmFmw zK!1A!uUj!T=Xvdo2j7}Pt4-mwsSE4zC$6oDW!l8#3dRP{URc3sL8)1#lWIX!>EA%D z_J|jO)$hlxAtG@Vtbftu4$bvSK|D**^!P12ut(g*n)wuSc5ES30zMia04ghzA%6VY z36gEqz&5Qs4p3$}Fjdnyq$>SZu9T-hrffCrTvwdHDaOTNh|6o?YM)y-&uVkR1~ft7 zDe+v4#iBdd(1z4u$hSOg=+4Gm^gA*}<+Wb!^VJa%-k+ zHI}zsZqmN}^@9z>LFDkzRgi zIoiGWb(`}HaFiLEhw&rFIx}Jao@8n#LZWkz6o~6-T})u3%WS;71OXzC)|Wm!$2}}m zS`Cs92tBN(wfv%bWg*8?Kqh`VHa^s9>UFJw{`pDOv0wE6bMzUZjy}Iq*-tsxp~fW4 zv=kD|)p%_V4>{83rN8V2vV|4m*RFfbuWKJNFnHkgb7L@*V_Bg1(Y&RkpM*d$XFz4p7CE>q4&Rd!G*7t(#6bxL-t z9VFJHQp7T@sTChvtoe=9u-N@f5>SNvqp|SZT$B3d*eI9k^^Y+z;KX-oY76S86U{x| z4NVKP98mC>Sg3hLY0l}pP^%n5q>31En-oR(MR;{{>RDD4v z1q-#GUDsl1e_~WqQ?%+ESd}LRwlzUbH@fE(%jM~<3v$urE=+}4OCWaPYZe1ARH~V| z6z(`_OvYkxR1w?sM8#%_#{KYTXUfCvVP*>3It@2%=Wh1d;yQx{5VJmwdD ze!KNnSmW~W{!lOYi^i5;+3gtFzFu)u!CP6{X1AMdBk$Z$&zV@LXS+D~&c~utxqwE6 z@B=9B=aczjKNcpz=awKw`rxqVJ1qxALpu+l|D7&Wn0i^i+a~h zL765E>=eiFA%VAG^7z&n2MksfotJ#(_SZf9^%bhBqxYR|N))VxZuqGJ`ehy%Rkkd= zz;w$f643rJDkFIzDk}&<*!8hX1Un07QEO!`Dy#ZGE~|HKhCu_f0Wa8lgr|J4yjwtx z8RQ7Vq4f_V_`wr;ZiY`Cu!RIo8W&PZtBBuG2PsP@z2Ua@Y%GeK&|4tPvx$m|lzD(eTLN)c_USCVqtRLfH| zXPo^)^AQ`OpZwAbya9%<2=fib>nUf3tAGA_Ma$S= zrX&{(U$@Dh;$EZoybgs9RZh2Uo z^gZq#BRTBFYA+<4QD2v*&#oa+PfDt(W^c3R?b8hkk}IQlqxDvVns7XKjqAj3z)^9k zUI6kx9hWtzLxMX@9;m_3hIEc3vl9r!UBFGR_OT z76+?($-O`VLYd2~yhaZDSoYA;M(taLiO7p?@e{i)e~K*2szZh39Z`(5JEdc)!!{2P zg}3M1-GP$mKc^!#Z~8FLMyxk?PQhmVozi4Zsy0bq>zd_$xyDoq7I*knuID*4W8yzS z4F52pPWxe}oF_Gc*PPt16i4R1rss|y>ag@17aTx5KmJ0`_FKV%F6>>?(7FGUE)5?U zV@HgyuIy*L8R0ju5$3Y*#f}Eya1y3YT|&bs+U}%NzLY;*9j$WJss8%LQ1e9gQw8Qu zc|D2o-2<~|6_DN#!ftcPNGrH?;)AplC(JiZ)M*QEPppzK^7WNHXE*Kpipu4dFxtsC z?~1ba?zH`FDW*qiX(T@mD|jJarJXQTXLt1f4*lLo5DT5Pp*0UwLX$RXJ492!M` zYebJX^S$7attJ=f5Ec-3K;tE$&zAr<12XfmhvYERulJ>GxmQT^zfj(}Cz%(y?jBj> z_7=CRbZl>!=1`>NEsX>Dk8yeA6kOa(k=xGQ ziFJp((!2&vGT;hiW4h26U!ljW1wAQzf9XM-pya(d@M#~{U$mrfpn_DLhUoJ|nd{b( zr$}Dh$Y#D$&y)Gto7$YH(-Mgat>@~A^0CZx_7)>{H)%Y!Rt+2T4Q}40&+=tjvgbdh zl%;TG%$3>GwH#R8Oj?;3nBfhfXj+^n^N__#HZ>=A?IAL@=~F1*?9hY@c-zkG#}0%88S@m|Az9QB^Lc>NOF{&sGDGlJ3g7`E4}zBj*0Uq)TA@uOzDLK|x*=%-5|R9fr+b(kr4v zfeC{GQ6qsF0iSL|F+1f*l)F>Hc?fy>G8A1=&WDr)rvaMDo$~@Wfla&yIm4b7*6{LL zS=mZn!m`u%t8-D_dc_%glWM(0&mVqgH71&%!*er&uAS_Qlc6zig=P%~%g(%?wiy@C z+$6~W_gAZk^-pjV01`Up81wKRsw&Vup26TF{yZQc6 zp)dMaAwEhQpoL{@5k^qdh(Fp47e>ChZ z4p!o(y>DCPl`(@xy+iXN7Xl)lc)!}AG^(~jt8L-FpZBPIZ?iQ z$9obCY0OucuPI}_UkrSW(-D+1bs{vB##_2DgWwjdd3dHZ2=;v`8QU4*9MjW`R_y0~ z{b6tfFgVsxB=-JfTxP4y1wQMx8HZOC^23g0pkq=(@+r%_$MAcL%8N$kv*i=o@AU&E zfr9Rb7P!Qm&(%l}H z(Q)j{UOBst`bBF9$~v{crhp>D+3B_fhNQBC-670;gnx8Osd$NBFlBxAJi$-IOrc(8zb3qVX%5aSyfyDx)7+yQ!N)~y z9VO67}Pl%4_jz~0-05gN~|BP+2x7_is?B7+4ByA5lzd-qlPn0w&L zYDGjrA>+Uw9j8*%{dwSFIOz%@joM%`nJ}q(UzdY!$;zE+}4sjs%fHm;p?pUry_U?OGz{0F%)1YmVNq+9V<&A)-2T_dd zR>2-ZYq$<;1o-Cs$JhshwBpMAR(A$~l&ek+?mK1Y!XG`h*8AG)wcIBYzZ;Jzc^B`G zx4QC@aT!f;SG!)B2=kP0n!MUbv=a&BF)p;2=9E~?IT_~zruY8|ParhaHob^kVr8}} z@6lH-pmxK9;Y4bV8O4#IT?5{!3)y2<{w$zAVCwf)zhR$WcCK-D78WN>v@u3NsvmtinN)^(IuW9!TVFY{cg3yj2E}sHg$!%T1NwCrdAZ-g?p-w zpxT5N$Hv-r+2HRx6yoUfZd43XIPFl{_+7#q4rOO1;HmpW8y?v zDuj~#V+C;C<*~s+U%NlN2^FH;TR5mB52l*rC58GFY$eCg8g zYrJOn+qxit2}3Q%J7tq;3cm9c)UDxOcnNy}c|dp>5%k3TEjt03J#R=u)zMm!J5|_U zG#-J!XfNjMX05%k9ktJK+b%&krJ<|e3BfzS2USh9Y;%h zmqBWAhu|j@KWqu?Gq{GbYSvHz7xsMki^?!OCqe=g-y)8!v$kIa7GTJwnWO8aBnlfQ z<5~G?=_6}4^`!7|g86DW4xPFBL!W$anQERKnhZ!U{?a*f7~DuH-`dHaKIq2PldAL8 zG~DbD?@fw?vrJTyf`h3WG2oR&l2&2Zo+%Evf1fLuSBI=LcB(M5M{Ct64d;ZV0He)7 zBmK>u;tvR+;iWrkMWYiJZ$yC2!G1W%+`Raxrb$E&@KGrP=F9DlDZ&*p@f(z>UOIsP zRu=Qn#nx?M=@*QZfD^31sEG^y^0D&Raso}q=5}x-hU2XQPn#TiteLaPXm^LjS%0O= zYv07VmXy=MhVrJJSR1KCbNo*8R7p%exM=NODj#s=#|W1m#e^s}^(k)z2MeSSbja{Z zc2QZi$$R9(FHCF{xc$m&w)m;69msE45G!gf>LkzozJBn|pX1(>iR`;t&OjjBvy2w- zFW#prbq+mK_w_&DJUOF`G^4YVwJ0q7{>&eJ7OA| z?KVK!wNR8-%m45nn<-M=)L~ZZ${-7B*WD47707UgyivGZ3Uu3zPWshxtW;==$|M9Q z#(dTwLwf}1vmb5(NuBGhtOnU?m_W#wmDm(%_sIsY0s!Y}tFqJ8T?wwPK5WoTjATN#Xp^9zD5u)g7!mr$VQb-6VXn zj45-f2lpElM|g2N3s*dw&r>0SZiH*(rfFDK4^E)0)T1?Q(A)4Fu5SGf>gNzrylxB0 zM8OO$$z{2pY(#b17}cVz`oe=~ks8f`twzyncVY(AiB=goZsG*v5^Uy7AyN~Sf=qRm zr+VbCM@VQCMwD@7NPm{3e^6Fd z)j>m~*Udl1S^SEn&>w?(ew6=|V~Oi_r9@C^sf68ZelAks>bszW@H1i?v$pU<1+tR1 zds4nLnAO1>|6OrPMC@Rj3QHHxZrE8*DsUl&-v7Yb>s~xz+1kJ zA*`4-?0l9y_^%Q*_jgD$t2w>Uj=FtfK+-41`;laJ?m}SHZd$K({WBOovl7Fc8UcE8 z#Z!L5(jsWpde;_AsKkon({%WNX3VZizI|-l?az!R`h)sbRs*g?GBPzLv1-!V(z88* zJAkJD*{AN(yonEzU8e9&`{F9OgHwYI|EIUY>GU}oC6oOHr4kK51B6$S9650fXHl!- zRMHm8tN2W%UM+nqVaPO6s4<)w*xLb))ZJd{Zsek?w4w0>TMO6#!xkRD_lmp>Kl&{C z`jh5oTL#OsmD+y0xeYWo4=38pZ_pEK;kEH$q9^-#XYu#3uH zo}6viGSpl25sn8&vu)4yOrMIw0;PKbnL4ps`0@^w;#auzZFDJ`pd0^&;u|ex@Lp!6 zz|%PD5GYGjDafA3q^81^TB4(h;M2!WOe8IeUCg2O-0a+c z=pQt#sC6!kn=2tao1nC!>lLJFuH355?&R#~p2+WZrnnDf->h5LdI^bf{51KhwSj`W zKmWQ88RWMW@Y7-)SGCj3l$+KeB{d73GmCWS-Yf5jm068lCZp$e>2OiXR$!0dVv4O2 zYL@8$S}irgc|%!rTa?~@Jw*ac=hfh#9}p_~+)A=pmVKKlD1eOB$TF5bN@!6Oe~u&f zvMW9*MMqv2YWZC)^>QNm717~#(`{WH`c{;TKF4#6?q{3)QF_3P!(#nfFC6Q8 z<=1b7#^Ux}JC4W{qKyYK9ww1qUo+f}y9DhJ6n5zSoKePHr)BD%$;2z|q7;%RKf#gO zmw1{yjnXwfWJkqeITJS3r%`i$315r5^T;a+cFJ){s{^IqKm?k*w-3 z*Syx<`r$fPb#^`YegbmzveldU)WN8*P%kKoLaV+wECv88Xu@dM&CCwevnrT7Rgu=j z3d#){J4G=E5)mf>m8S;OdST_)&z(pblAoqP=GsW~S}$o~4MaM%TD_eNYxi$zUX_?$ z%VA*@p5bktQSsjLF)=AHp^^C}CufcGp56myiPR`RcNDl|B3bYR`3rZAvAQAL>?4BW zE&u`q_A1;($2Dxp{*j-6d<|^>i6&aGMnaxsuVil3yXRCS)CZvC-XQV=G+_;jxp@8a z=~{JfV>rQUU=w$P&-JDw3!qjPwa{@tYF%cX&9=2pnMx09NLMQ!z7=+l*LFRl^_@a@l$9RSBgfjr(2Yr`9KW)=sN`efzz-eU3nwnN za5mHN?l|O;7Ahd7bgH9}4fwnLJe;6YzDh#g+1fN(g=MsTO}Zdjdl2P{N=sEA-y?PX z4Yi2X*(C61BZ3lHBtFs(fE(=GJRi&tO>r3`Zg_N9z`ozuQ!nvhu z&|86@*pffSgHGz^Tpv>QX5CkDauez9jNY2>9!;=~_f)#TK?2F{qcTD76*_Z{rR)#s zL03_%2JDiPvOmF{j=~=eQt#$=)7a;9*fukrk!uZcmujxvj z$!-uDbUR}s(@n=TUI`JawN~O-j(;e4%%Vfwg`&fC85b4N0V%wdQNb63Ey}U3ae8K{ z^Maz$&J3ObED280k@UZ$<~CbRn~`mwt1zETcRqmuPnlkwwA)9+pPDG}GDiu22-FO* zuoGj{((|R4sXq&?Mw(T0F3UX$0$DRR*39h+x5b12!p##53dLzu#)%R;ejgi4C5Ukp zHb`PWi6Q1&m}??Wf;e%Ma6!(hz)+`~!X(NwpPD>1P-^`e9xF0(b)usTRJ-Tl^kt-x6H$d}|0iVFbv-XtK6P5(jZ&~D;$qT2VE0?%dMYeaL zMl9YM;uHm9HMvzkIm^Q{GRjA?YJy|!jSHM3tQR*v3)a@^Idw$qN#^DM{0*UW0c%+N zN#l7dT{=w^D4b0QjMVC1B#8R6iwvik*vl}DSD7$YeP{RHSXM9aHTuguOqf5W$suq~ zO_h<+Utg17H33v;5YlP|q@1GtO+S~SI-xSta>F+xe>XBjZ+e#Oinv0^2sNWj9%Q2k z053Y?DhNTrHwO@%-H?`-9bY1(t7D`AG-dyLe=2w@C;u!CGxGW|wPxkcX z9q&%i6cSrxq~k4Rsf>*x0~+kJ9r!-^%up0mR{s@GjBX0|w!6!l=hHm1VY3qRr743w zaI-m09UM2BSmglca>{JzJgXHCA~i{dI_}ZJz@1CEm@ZVMk|+gu58C_adj2=Nw8DPtU@L~qNohhyAb%av?ud@Gy#|-=a)Tz zzJ0KOzX|~a0qEfr9~i@bi}RQep+VBz7+|@^)x;9}SPl@z%QTRRo_B+=Rlhbsi-m&i zunP`p$vo}+bH22%p4cHfa$u^7FxRMvj$%IRI`1CE`J=jDnqSpU05=@$6wGwE(s65| zhyFVn*duIDz#w-n z-L=NeinL39U}$QA=dygloE&{JcBU8Y>8AU2HF3*3w2%Pn2ZLFRAPc<3dDG@>g)-Dq z;%+Nh5wodf&mA3_dMsUxSI#vlna}i@k_DEF-LiEoLhY9M=@x=Tt3CZCr&x|LV*Z)a z9-iIFcJQ^nBGYxEi43@kLjJZ~kNLB}bJQbuaNrNFni(!G6AI>RGG-ZBbrj z6C24^t|w9*xjcDeq@n555W7E*_FyidXgaqy#TX{`s{87q?w9oOfxVUJ`sarlKOUAm z+7a}L$*Q5fP^0*@7GM!#y$qBPG8LBfi@EA7z-rp~j}hbZzl&lR z@T38k31d|~9eviztJ_{Ti$5vE_vk8Ul`&6AH&qJUWPS9#{V7wsZtIT|@?zzgL!NOx ze}=x6J8JLizJ)wCwHRdSCz$t0PR|Z5axPwsyktpBM1;?F_JR^$#8iP*RpMdfr72ns z2`;Sf?$R3{Ov9>gU7;YC(SpEjhtzsj9ZCg~9S5sK==2};?X0T| z5Pz6Q1*9~+@Dht6an9!(C!y8W(3`9(-nb|_PQ&hRI11H4KrN+EeD<@B>oMGT*HtHf zL~%ozPMR<5hu?l2a_y1JopM|WPAIP!sHwCzIJ7at1#DbsR(Zm%==l%E#c4&Ewg7Zd z{IXR=;Uo}pT$WhrJRS2Wh()vl$bUSM&Cjsyx%#A+)N+V{5_n1y5k-J6e85Cd_sVeS zPS*&Vb1RPi@@>2`d&7RB&@ADOa3d+9D4YKA*XA z>#dRYC=JT4Naqv94v;M9YDo5+R^*OCgE-)d8e|H3U&H721JW#Pb@C@bTJK~BIMKde zYtV|YnF?&8^6RBp)2s}D@t%iOiFA3cUj$MvIDu*}SM%j16yD2xrZ7k_YD1+d_(t)d z#Tx0&jFVT0;=d*Xyu5f;#9W<#ZHHy`ho&3>cvNJN$-$Fk?(Qu%2xK2yoDwvcDvN1y zNLv}loA?zOow!IzUCsQ4aRy%yN$S3?i6jUsU;T0J^?bPz6_LHAin^Bk8|VcJf_g@Mc4ah(SdExahRhbZApsK z?YbAWD2)Q&`2Ne>`}egHIX9T_<;E~`ws*da=CjO|e}$)W+$Z~E8=dt zi{s;1csizT?!oTwk~2BV^s`+mt>1(tc>R@${o58C6`Y@YQ+}D1wP+3q_ihO z5eiZn3mUvPR!2#ym(VsF+Myp)J!>5dtluETU;d(5mBqSFLf4@Setgbl8Jp1xoe3%n z%FDOS5v&^(L*d5u2F0A#n|7xAlr(_;)NqiM;f+zakYkq#^D;|1#nefOW}rlgxz)!! zY;Vm)*>pR_QiP2r`ER(6xLjM0^>Mi4%e#5taEJRWoMhT2A!O^9{fH7Kn zgftuF=nxR3ainzDMt7IeT^k`Of^?@S%J<&y{{LV1>-YHc?DOEfuIoIG<9#$&i8XKU zWTDLZQ&Nc@Ca6?+OdfIc^~H#8s2T1!IL=iXH*BU(98dA3DHW*M$fRv2y<6Ps7~D{4 zf%b};&fLlw8vl!Sjel%Zwk=a_#fKBlMky!B#M=6~p}72f{YZ5y;c0#Lf5x4}+tsGn z(lvN;au$!hR``MT#)syUfEtI6SQ3)XkMO?_m|L`qOaB#3c=|uloM)ve$%HXGFoU&d zM>d3xTv}sQOX8%gqF?hX7f`-DEc7<|v2Z~qoC#^{gA_%Z(1;4RXP`3^OK|V7((G-A z4QI6#mC-*s3|{b1s;@*wh|qhx8clp;(q>toF0Zu~*yfrDDF=IPq&`zeG2QcOD@>Mo zfA~#0e5jIQeShGSwx!BsZ1CqF2IVkOo|EZyqNER1`FZj1zxF*uy<4=3eS1DEwSXoN z+kjSl8X2cvV?L`Ma)ecy1}N&mxW!W+llb8~7zS8OV5BTkG2|2i7+Zr40$>7N-Kow_ z>(f1g?SXqH`Ql3J5gM)y!A!kPt2Lo{$?+}dF(>b7G8onS{e@x*EZ zWO@C?rv=o} zje?tZ^6|~CDw3nxgU9;so!d>EuZtz9zEWg3{`a>vN97`Yp{SVdF=qSjpaQRYLI1Dn z1;MLcdEfN1klLXbCWSE!i};Nr((Fym`f?0@%Zj%cdv4;d(D_n(15T(fcItmYLj$Kq z>!mjm*?5m3Ie!pKdg%Z~PwhtQa#Uo=|Eyt??_l)5mMYyD^+`6Tql`nJoc8LP_@pj( zn5W)QUN2zAqGUSFwSPs@K6d5zM~WqDp#BY2mmSbBaI?e8obx@BTM;H z{qt|lMFRg37>?ey&iB(4_VpNWeU5B1RZbF*Gf^MhUGz=p`PY0cuA@+9`FXD>N!~jZ zE+VSDkB!Facj%>oyD%BcUnd@bdkxYC-$tERI(lct&ge>z9SSDa-B)Q+s!-_<@#>Tg zb2t1t)zJ|%9oN+*RgkjyfnMb&Rq@g8cgvhp%AdFQj~$DOv+Ze?|1!Yaa=1{Y=L|aD zr6saakzUm&_JdiP7_34w8Ax=9rml?AbX>%7D+wGq*O<1GQq^_-J$gZZdUs>?W#T<* zhyX$1s3i4ACcD+OAJ>+6EBYI!6_@HlJ-RP*dp9K7;x)g&iJq^5Wb{F$5#s98rX8FH zRTUvXpv?1!&#mB(tBef~-D^B;!x}rMWf}FSl?%`449WW8TycXdKhVy%trk|irXAy- zUpqHT+l7<0h;6IQWlavy^WHR5tPgdFK1HmZ?wWR)DCgk@zsIjvA;Hp_#YfBaTh&vl zk9I<(o1PAaA?Y?%L=ZcRSd=SwH0C+x=rKx}yCVMq6x`{FNi~1rtGd2!^J(>4N1OBF{=TSSD+WwVvcoU%_ zOI~PiwM7We337FIyaFu#F#D61Fvd?}(j*ftB&RZZy}i=bdFU~GX^N7=iU`lv8UIvy z>uVn|ccw@#t&*WHeo_=g`0WWC_dZ^!!&^E#(-8MHe!|OnpvVt`$mh=$M(KfA4)Az> zgQ$YJ5z;z@7qYBg;!|k(d0CNWX_rOLmhbH`DvGJh^X%#h`TE536Wq1xzH067LGL?! zm3QXm%XUp0Ph63<$jD`YCHW_S&etwk83o0z7-BMvLt1r*D5;VI)!QIb0nxuTt6squ z|5F}xce$swk5}jxH{TA3=Ftv-MRk>8*g_-Mmr)&}2AnjCR+jb4F@Y##1AZRG*DjwP z;RlbMBAF`QldmGdth0&6+a9x>2Uql&bvC;2YLVA_@qW*XFMQqq??q#Rn9<5Y=WuT@ zNm{C$e1#As@$!pe*eYlLBk)D)A(dLFTM{YO2g83pUS?iqAtU(oL@LAIv|i4na~T!@ zxHq!1E%TA~kM~9b2pMtjas6S^s!GDXA^wzc_Tz9&c9*!8E=ty}^HtI_;~#3{*js$F z==7tqQ|f0+q)jE9tR~tS&Y39zDRe|J+J-@a@1OfKE_2qnn69pT!#Xzg$}9iT4{uE@ zCE`ZOr!XzP*A#%3M0)cj`EJ^-!o)PkF3GsRq_{6$UR)UB9#Zl1*}0l+L}d<#M+?`) z(*IVw$u!}r-x`d~1-}^mb0k~{`6+^k0-;?TD&mghGF$cB?A5BT{KGPD=>fsMbfuZa zICMqoXt*8k<~-c9a%Q+ahep%+kK=KS8lH2%Cd^IPp$m}6eU$jq^_daM<|=83I*z%( zsv~NlxcLSWO#L8hx5&lp&xGEVU{-?fRu{mqm@ zg>?Ma>?s+R*J*mYff=oTa$|qO#YOj+=1wC$B3zthq?{*ro3nNkry2pPB{9OAg z=&z?L4pURQ9c5J#H^o(zkB} zt=#J9?aJKW%Ziq4EoBsSXp-u_(B}mT<&Z9r9N`Jutd}|Z^-j_XI_>UNO!^}TSxF^< zGydTlb2edY{4vR6Jq80&&zaX4`PhLUh>5eZ zge@rpMa17}k{K*&^=YN;r4Y zy|0}&K9;M7Yj{csMZtn<+1%tbJU^V+enU)Ykfn?>=2u2#&}z50)I0TQ7X2lC5$Y5U zVY+6hZ(=CE^y|@z{H+iaiQZ774*}WBm@V6TUq`dXv&Hs?6vd+aPxIzBl_otQG!0Qj zU`T9O?aK>@16g8gF1gmE{g;V(gt*MG?c7KSZgd1A@YXw%zKL!R*c52m(CxZ*mA=qq z8HvHcjY8N3%TI*c;jDA6Nu4xb##<(aI7AuxM`}KYMH4&=kXtbD?nYeKjW+?x!r0rI zOFX1Z6EZz@)2rOkC%4~zxoQd^pYn)Lq<_3Kr`r-1N)%XvZeG+)0{vEARk*xAB3dHp zDT{^Dq?R{kai&`CZmtefaU@DgLL^Yd#d>e>k&XF4`}YsUOShJq#TT1?h6MSjNNZ(s zolb$&{_OLW!UH;7eeU9i%M?alUh zP-{)M55hqXh`-NQr7`an>jaNLUVHe_*D%LvzG||`QhFgSbAQy<-Yt=yM1+-wdfzK4 z0n6VEFV?LMM}Xk0ZX<(vP(t(*ucrsPnNlT2&)#iWd|0^mOM&1`iY|S1Zc9Eh{Ml>b znFFo*&rxin{l~>IaIXheoHi;m*XqCivHt2StT7{2F#qC%{Ueb;_WonhF}|G|AoX;s zs7c{m@_)}w=u@V4RRqpX<3i+LUe8ZfvseG@F}JvotsY^2Q9-E2sLJk_nOE9zH3$&M8i$Wq6`*z5qe<*PzUj$Gv2}SV@2d~IC-whhDr@$Q)LCW$Y#Pkox#{xXO_61VjJ=MSp*+JA`80{ zETTcolG~tJo@t2WXH>b`W+iDXH83N^1Kj1~XL_hkaGyoKK$C&ak3QxIrw-Hxl(zWg z&Gci7M|}}Z#J$r*?Y{(;odS-2Y&QjHSbM0NMyNc%G}frF#_>Yr4di)f2J=*feA|nj z#eHmzP_3EhpV11{w|iDZ94gQD5jsWs35b0qcTZN3+tIA0f9!BLU>D}ZsZvs4U__&(4b z3=n$S=wqC5G`##RD|zpVC{*ZuLjMYoOV_MJ`0cjOva^%3a89*^G~ZTot!K+(-#dBMjaF7*W>gAK{ef8YK+X1MZiWpxZKIN7m7Mf)jSoKV zdVHPB&VX*v>1U_O$F-=VCtC1$k^sPjGm%JEhEuu}EdqJp*=|CcqO5ADql61W3ZH;} zemhG@lvHISsdaMOI(K2q8{IQs_Q0twIYXO+iE77{**|d00=qrbf#vn$gb9XTwO9`{ zRXHCV$f~tUWQI>m{S_M&J>FDyflC*rzEr)N;ZwEBdam4jupokGBIc+wEG0TwyfP2j z$}k{~8_*LRAM>`@uVz=O1+(^8oC?n#&mXQO+kCEkKghr?qyLr_X#c3%8=C2hO_Mkw ziQbWru5-0*kM_N%fdnFm#FVSNvi&53P19#~8|&m6#Na8qG!0aAX5~>Nbjv92Sj({; z>J$&Lk)ndOTqP%DVOzavTm7cWj>=LX$bTkdO%z%bVDynmyk&d!*uH^6IF*mzJQVu6 zE}|>%?o{;&tcCU3u8tlc%_*o8T_y7RI+ZkA-MJf=MV+`9s-pN+F>SpXRC^PO??)z)QV2HOS@PA*q{J(eXC71)n!@Dtu#lwN1!8}5j^!!1~h|ksh3S_ON4Eao= z8v2(TxLWHUai8=zUB%+@aujV(>EP%_@q7I2-bf!s;)n3wjW-+K#{sax&-@5%j0veP$ZPD!BPjr!P%gJBDoX7U;HG5u)Yt6zXDhiyDc8j%}3B^wa((gHY zuOi{8b3bNIy6Ryz{z8$NqI5;+tTaU$;HQ=K>gXG|AQ>j5vWY8Rlc(IvH*G*u5tK^6 zW&DYZyhhE0fxqMBl7p&w|0bwee5Qu&ZzHy^KTWe7H+Z%v5E0!|Ufbm+xMdHWPkyH%q^2fRav7BgDz?xG}n*cfu!>g|pz2YdhTj z&uCArk^9v-e|-xx&`FS%jZmW`DZL48CV%Nn4WP&aIkB|~!W>^_ zt~d*tGS1H2Hg0o9h{;v08b>^*sG7B{_jqOb{@D6y@fD@-m!cs-+<`UK)Zw2t&_RO} zSRqSqMQpYitY*?+n<#?X^mKNg^2z5-3=xsvexNhy3Snj_g2xm$xv>5l?v$5Lo!|yJ zXj2-eEE(@4`7`>ugN3s@Nd|-Mv$TOvB_r=(1|-O;@3uIFl*vW}yUC+%wWr1~O&jo6 zhuLaWQS$t524H21RI7&PNHvs&%5;T{H^48eyi}P z-U~HXKsU%iS@Y-{lW%}fG-orOzuiA-(`KPYBzSMBxEhwWsd8kaQI{{H8mE$R?fjN= zy>!`Z0lIi-Z+G;FJ1q0fX1~mywc#vWIh2n={Y(fVbUEvCiLA!Gwln7kKQgBYC%xF) z@%~h<3Jk$;7%=;mTfa74EDS<0l}u-Y|}45la%ho%B?PE zCYQR>tw?oXb*9{ZN}l0))Eg&D)6ms0GeqY~y?3JQoTM5MXbnuCfsPluXq9WZm0C|e z0jar`pr#Ycw+yZZG`RZ0)_HM~*_f7s!fS zMi#Fp<96@Xmke49%xB8HJ%{JTS|;!Y&>x{>u%Y}Ju~(&G!_^a|^S~^_c|$d>Ot7{Q zr_lU&Dw{RO#^Ktqyp&qX4-)U@MlP#|_)Rt_CGz3LF>zeNB}KN0sW~Ec^Y2$E>B|lC z6)xe1xU$>7y;by5{moH4;c@@t8oBLcu%uLB!sAydjmAWLm?6jh~}Q(wZw-* z()$}kQ}icfVWlXmRJCHG%r%zEu8P~gk%J~cNRzhOMxy>bsyA|*vS!`dMWOg~+U#}U z?=;r!s5#{%1bfAO5{HB-ryBnn{}!zIiiaO39iw~_w|G^ri+2HI+3aYYi38ljiY2pT zb5XOHBuFuy+WiP5<7!^KRjULbQwYBzUm%38hQ*Y-p^7J%EU?~HLoaYsliYrvL=#Jr z7(9R7)Cs^CL%hhdrq;*rJYZ=6&0vAgeU)KcNxZbIOoNy0cPYW6{mjd2npzm$GWWx9 zara03j0uFeJs@s?n(K@4zwabnqoA|bQhFDGV!521E%yQCD-L!|w@_DhTm``tDC8b3 zEm#1+AM0Y^@J0A29eWz$#JKSGCNg=`aIh#{yEs!GSohAi`ct$Car+~md*(TM2S#;S z>u6A*L6jZ_;}qQS3EjBQxp~{Y)gPr$EtPiWiqDn&uPsI}o+jFGtLnChJG}lYt0mzw zpsgY>6N8Uq-{3FXk6EOaQ6HI*mMeH%!}pJl{Q^I~oG%}B@%(KTIcYN9LU+`SHlF*| ziwCoZGUeUufOQIB9%c^Z^Fi2IsjJIyd zu~3NPlsxxwgj0mt6;;dJ_oP6DJj&fR?S>pd;Mckd=|n_I(r>O$2&>c0rsH-m@t)8KT6@*J)-IY*~m+t+B(&zg4!~=Agt9+MBRS;g z$+ILABPh`CIQ2VRi~3#3mrtqiF;xMnOdVYfE3Ql}K{9Url*G-rJ&|jN(14qR3KSjd zTacJzRbvH|x-^cc|3oAnfduBHF0w;FZBjB0@zjo2brko#UK@v6`;ERX(al|GyS#1? z%9d@I>R(q9dA2=`c?wT6?IAy)<9lLg)}_(nx>6MfKcSZj*#3;)Ut4eR&eL5@pYaI{ z4Jw-}WPPN5mn8pl)fGIV@vNt0e(ztN4&e0(@fU=&OLBygZTHan5WY8jXh<&5zF>W; z-4NG)U;9yU&Rff;$C$erV5)yQ+m&zD+Px}@$6|93b_z9@+0}s@nI#ONn+S7cwFqdM1gfGg(i|w1a!|#)7X-^IRe$g@*=X8 zkLz^(|IYU#&ot^Y!mMl_m2Q4&3{ZhtYRbHj*et1WRfNCF)9O>Jprp&YpaQdOH8kxf zt=gYBObyB45IY@8OkWdU#?aClncNrf&M9_Xh`OEUh9YZZAgp4Zl~4Hz1$Q*doF47Z z+re6MU8T{5Fj~7UvvDEf)TSnYKIv%2f(<_>bR&?|$@6rQ!)u3dUY;P=vNZg-#Vs0e zdR7o0#aq9pKcs}bmkUNL2sKw*YCrA80%K)*ZwIIEPYH0&Sw=W17Ntz_wy_MV>G_4?OcVc#Zst^35{+2^H-%ukf+_ z%wIMnowGdWu8BOKZAe!u=KX5d+?U2F;v=(l7Lm`wTk*X;t)?TERU}=aFYcPxM?8?r@FH6~#pxFn4>D)Kc}M() z%JJA1PNgaTSGLQr@W8bnVp>XqPlXr6NKDv<5g%Q}eUA-Rk!JrK{>j6UA@3PWa36}h zM#Z?mq(pB6r;;4z^d_-W4vwB4KSOLFif8InXob=9W}}NPPB0(hY@A8y9DheE)#y3F zc%!|Az;i<A*%kb;pbqVnJ13&!Os6Sjsc#_yi8!jSblT` z?Oe6TpJ+Wonr3VDl~J78q&xwtg(aApPYz5%92#F3J?+j*$uqo zwR=aCisaIK-$Mdx!h1Ia>N$ICd-Z)QQLf$=oV6P&Q+z58@mzalkjk;;fU}}suDov` z`E%!i-d~J}c|q9~85(-yz7^eLPPTP=<5`{Z4|uVNXO#P%;~#)A58N)Jx$L~xhw*lc z21K=Z`wbv0N}n_Ib;DzoM2~uqm`Tqp^1UT(ef@X*q4Famk|7LNU_?r#1)PRZMlWJg z1)O7?ID<{dDgbzV<9%@N>=fX+PTYM)tM>8cj^}18K$y>F^SC=aDJ9%XxP<`Rt6w0{ z{yR}j!s6OY74t;PgdzNj^6Nb={`v{d_IC3D`!?$?UqjVYImfF(iFi%)yee;EGzgUr zG6nnl=Kb}10#I3$)31TK8vO|oEx=EwDqlYSzRF_t!GDlKJDSx zFL3|DSp?z9VL3w4>^FLcl4Mn;gwihmZWW84sr$US7baF?AloO*j%)oVLg?W_$*2?x{^i6B8d_AR6l-!sSx7F=Xl* zTcqs{VksdxzH%@FyD#&bCU|lsNDr8qLoFxcle(9tb*6bA@5fM}8m<_;3Yc$7Q;;4K z-ip-0xmTN)MRybw6&XqpRM7S{-n7i@DE47<}Bz~ zSbV{>7+*-{v-Xs^wfX?trL>$M7!p+f*oG?JrL0vCeVFXjlw2F_{Hk%a4<&~0-LcGx zcizg32DO)DKCgeh#iE4L9AzP{EkCj5Yoe0-6D_XS(TRyNTgI@8(A^2Ik8ypQ7AR%s zOe%8R%QOqT-EiDnMS2M4m`K_jKA*o zuyto1j|bX#^5^}LJ{xw3GrjAetgB2b2K0J4?iwoWjCf#&fBGa!;C(phMx){FRmZMB zZe|4QPPw^^&09iT9DBnXmNSqe9N0Z9!Hm<00Zc$ST4{B7fAqvrw(^EWNd9q+2CQ0W|JLC*XaT|Su$4;#Q1?%uQyF@pjj~Smx8PO_gL*> zeJXV0a3Kg5URq`lF96H>cSv*@Ri^@4e+1571bsXMSH%0U$2o3k0?***RRD=_(99T# z*ddZRW`1=;Wkqv!1v!(>*!E3T3r+IK$p2`HAK!~*(m#SIaPFmjt9XhL3KX7{E}c7| zSHnWkB>itomwAq$&PMhvFKo<|DvCzX1;#P6jI;x`I(6ReCsxAy7}XIJuM(W#&@>6* z`amckqi#$uEZ$kS-};FSgQ*#&wW@RV5Wc+^t4lIQS+Hz-zSi}xPW~E(QY1a(HU!YT2bH^$B|dA4iIv zIE(CAwFLZlch^&1QI_xsh|Zbu`Dy;a7B}9sTj{KrOygyz$iVrF%<}K3<+10NE*Z-s zMSL_i_Y;dv2pep-r!&c%UWl6JKDP_jW1LYA|*!&nbpX zk-MlJN?I?(=?3@XPSc9xh>u)yoB5&6YQkC<%P^SgXJbSB=yP?(b~I-@?SBL>`lII| zQWsG9{is&l9>-^BvH2_b$SnN+&0Cf2J2`h2vj#am1Z7%ZJ7{BMc{<}`u65wj?9Dv^ zfeyi#;f5ja?2+KYv}6A{4MDbg@d#_vedT2$Ch#aLm-#VCnijX6fs+wQ`TV)dyQurY z#}}|4H%c*eHMGo{7%k?H~!An6t!~ zl7VW2L-RDBhB(**Vi+c)r^esmF4AZ3ZU2*Z#}jbV<|SGAul8er?Zca#)FHkt87C_o znVFDy@y&SU04p@B5GZ6lSX$vRJbEr`V>n|=)v3Y2Jg)qFX_m2)oGE-~HH-U!7=e~m zZRSGtQqx+peZNPc*-{{<8covUNKE*Vo7%tCRiOW6DA(+H`jra}q7Re|p)>UHlTzU_ z&pBMMy7c0*iIF4-sg)h2b^qWFRL|&W^}F)#Qewx6^4q@U2fhCTc4hWPI~HPB=5Mk= zKKL@tx?+yLiY;I&XHid?b&#N-Zs^=~@K>*PyWeebJmEA@;L39Ls1m>vUS#h-=fS_S zw<9cnI!liVtJBDTthXrM9C|LOrn*3&eSnd8+mW-o zCbP>^M=!I<$BlF5DObdN&&+;R!kIN)osvB$*+ms3I_oJt3>P;G;!#p4 zkW-EOTjRbQ_L+J^(s;R%``3@5PQiTGEkkJ;y02^|NWx{s-MFz#a15Pk7eG@-kR$mL zx;$~XZHBa(>9lM5^NyuJAJlEsWKxw88@T*!)(L}~;QpW_c1HCC2y2BZpDK}buDBb_ z^bhy4XjFL$XYpqXB|ma#M4#BSr*HdD;-OkmJVaJVngP@L7x*LDXwk_sOW3*N8*YHc zy|?y!c{w(4Tw`+IDCQjEYA|t$oUqh^rKy%(+ncb8x4$Ia`RcbE+StBxiguY;TM&%C zDl!nx0#~Na|5!<3Bw-QbUUbIWPbA;{L4_uAY*h&$JzSPu9#swTFSx&sd;`ped~jTO z%&frTMLhAsyDDBURuI-A0ZMutU0@!dWmKpvsg2p~I_auB1S76(*ZR&6I|*X^CFZQ3 z^R+h}*%)xws6|4@AHGoj>(!KK_;Sv8@%#&;OSi(c)E^hZJWYur17i9R6F4|Aoyqg` z)uO@Lr6>i@jA_HDS*W23qv7EgCSF(4As}Kcu%d%CD|5JR-v(w<5Nd+x=AJTOb+S9& zOhiZ9&Wfh`jY1wK7KrkT#f&72qxxIMy_=8^+Vf|30es9tM2%~I(gzluEqMM}0Q_~8 zM@!}A$vD1D-IB>0wgpR{X(dtUoac1}P^t8ODo-f|73|k&Um_v;HIxt#Eh2{hFAUYS z{R8m`^1#9JvKA_`_S-20E=xFpq1l^wW%&o`Sy$wI#JhX2fA||lYNH8?D=g+kMPX9E zcXY+egWGsx~xL0E2qVO@uP%68#%)E(U)+;yr8=(~=svYhr63PV#(|By5SdX!FyEuUA8*TvyQsh|0B3`w)~E$ zt-IpmImP|%en3rn+ta*IG*JyY6^yYn*BBlqY84FIs-~oIxvJ3MRr9 z8&|%pnW|z!EzHZd$}=%QAf)(TVLQtCypZHO%K7eL!l|5{UrKGmu9v7W&H0g_2k0TS z!|&#u@`RVzU38G?-AJ{nQ=o0uJijU?XX_u{GP8bponh5(7!a0tVMFr)okU^1vFld} zO}hMk=BCLk^D9-_#)XE8J~o;?P@uoLMB2!)?hUkY- z631F%{eVl});Ta+C4*IgX;{bo$RefS4SVuZ=!*|}lG-eo6ScATNP8$SiOV^&>V8HX zYAKLGOv3w9@pF9IEhf6}!ra`%6=q0Tk4bq4fxb7{ zxFGO_Q&~X7mI+VFm1(o>I`e5+N|Nlc#qC=}0oZRNMq*Z-F_IB90z~%pC3cbnL7Q@Z zC$J;?)hx|hdEoT<5n)ad_C@yyyG)g{v+Xi4seXS=?$?Rg!m*8BW%Zv{BUo7|M=33n z-g(J+S{q+U>>EzAPq}4WZS=z8W~HylTVCDY!)eo%(Zm*p(`fQM4&RtoUkr3;ylH7R zL;TLgD2-idmd`eqiML8E13&)#P zlGa9?f|4hP__1ImW=;0{p9GL))mX>x$!q=D#i~?}+jv^q(q|hMtzf-l1c_dhgIzJ7Q7&SfPIWbhA|CO(EzeeqGE(1iXCdjcUL#3sew;E6RR5~Ia> zl^eKVqg}MnE{1Mxlmj)E!vr7M?g`(y*Z&b&^!=U&|6Tm&Lj_TG8>KA&@Hr4!U-ABW z#LdoauqYV?$n~oH!Cj<>HNc8K3$`=Xw+*;FL%Ida40JFHu^U_JGGUb%?7`Mp`OcRI z85?njSsj(F@!*LSwM38yb{Lz)Kb^Wh_-SlY(Fu3F?&w+1@FTG}JFM2scsVDBWhuJ1 zMZB(@V?)@clIx1{0fECaJxQ`syVE53q021FT~G_~9eh@(y1eAWT1$B9W=4m`f~@#N z__jO61qDI$>Nh~R8}W23Re5PgLs#bCR!xO0T@El6VY322h=xC&BGScl&|4t)HxreU zIBFo1mz4P2o*z)qVi43IS+Z)@8e?uN0OF-x<0-FLM%I0Dj^A@=KXep7-y_U=RoJCl zs8R<$yH&0c@R7>2^R_p}UY^*q9>UqK$gFOhxIdv>5dDW`>zF4-86W7Ox?jHXhsGeABc{l=o%lFc$zJqO|#zVZd1YCr{ zIE*Zr(UsD0It=|H)nYHTZsM#?p}*r_=guxJSs&L`uXmBG&|6-%w-2Z`8q9_-4Qu77 zo~(qqJ+vh~J|{0q{BO-az4i=ANxZyae$nu|m83T56@mWI zT2I~Tl^*R6U?8fi-22PgU`LD-&k>~hx33JW9nY}-vJ|}dFJh`$L9yxJ7V~Ndk04`+ zikHQoeck3Iso-#~>8~ix<9rvDVtO}mWZ~A`(LQ1Mi;`}*as?Nbkqeelc#7-OKV4{W zEB>}E(~Ts7mS$;$5*n0oX=Ix#rnkoywEcQ72XWyvdFIXA(MU?xc+Ta%QGB`4wAAYKvc)FqUdaS;MHhuWao7^#93xNBeo-Tq05aU6C=PGFy(-&SEH{jiZq@E}Y7HtkbcRZF zZO_yfvc5e>X`sd~QGH3mMndf=fG+Opf7;}}?~OfLBKd<;@I;q}U3K`v5^Wdy&DbRY zhC27Q9%+7?o?woq zN#fNw*iZ?r4bs}pn$~03 z@GuGB;oVfloCx163_Z8UMutw3wSG7A)I1@)hHy~uuWqke!OUP%3#%q{il&AFv~_qf z@WiydkJRx#6)$@jc%$q(P81C7u0PV_iM<2YwnJ^HOlMZ=Xr3esVIc!3SkTTH$9lEn z4LvfR!Qky2g5y)No-R=0g(OwbtWYsRS*9$@DU;#CV)XA>-q35c^<BTNKXn;@TJGbKCk6j6v<1 zN@hfwIs1NeS>PlLJnT*dCaZ-&X4^>`Y;|8staRl;yP|C}iyw7)CkQjiwA3-bbBO_2 z-VMavH z_IlEX=QQ7j91r&X82h|?Y1?As6+|kiTh1yEO{GQn(Z*M>#+-Q98dHHkxL9lZz^x)4 zYPTJc}RL{3CwX3|15ZU1zJ`I z7*X=sWGY+4^D!i=R#3lUI=CYf zk17Q>W>{dFviG3rXU}~Oni$f{JA&t8Pw(R}H}6cKyr6GnG71XWRB>P5+qagK;cD?( zE%eFaPxSY`4`WYaI_CLf{&=qS396M>gTI2IrB0c1w=FHTtdvwO`Ksq{ib$7is8)ae zR!#GK3t{4;-IslCm+>Jn4i~5(A}rp+=eV7$mqW9@2mk-yE5Cjfv9J+N{TN&UYyZ!{ z?t0Agvkr-emG!@Ed34Xv{Eh63uJQ`}4HSo<8?m;I!<2nq8f%!maeeCn|H-6apg0l} z_sL9)Cmce$3sxOMDR-_C7D30nD68W&+S~Fy6LSgx!&bP*ff|WIoiN*w1+MrF@m?Xr zy4JOa**H9B@J2;#+ZdnvVQw+4`RaHufCAid*jZ99+BX1vi?*->73?-x_a4lG;X@N3 z79ri8k@tqwXBqfLk-C4St*~XO7$t3k?pC9aXuF~WISCg?dnfS))(e>>`5&o3PEw_c zkA?4m%5f5rFX3E!`S8H1kq;{W5&Xg|xHA|T5KA~*`)KOBqI0^tvESeo6O{I5US;&5!e7;NPYDt+E zUbck~D*EKmY0Bx|XdEynp5l=kP=MMbU-3rpjRdO*HCV9S!U~Vplc#tN-Px+gJl3hf z1c;7##@NqFG^h>N4Tkq|j!9>5s$D%*JE~5!!XYlP@Zj;?&BROWt+IM6)n`9f%KBIf zU3QPJk#b9_fZ}ccu=O5+Dc6RS-Q~0c#YxqYG*YE#Ui}htk}iWqd#W)^j7j2WXob^^ z{p@OqiLcz45)X?lK?)zy5(?R>+uJp|_^%N5##;)5Jq2EC>g&Pv70O2nbYI9@tQ0QV zHXZ6~3Dy5}x|Mi&&f$oj-D3c0kBd*G_I5aU!vIXyw)KPU;>yPy)r_&GVV-$H-elco zgy+*6d4w?%pql41Z?>o+|BjOa?hApIi2l;9S;|rtwC~LKKOnEHZ+4SdY260!LCwp4 zdUV=eKA=*=R>`zh;9E2o+if!77;zlg3$6Jo29@bvX3p)k2r$NL0brq54ugn@#xSu| zSE?XPB(1^gcoXl{VaK;X*W!7r%Gn_uUZMCfPO$l0y@bBOars+=y=D%rwG{T02ET7x zOYXbTlA~1-Sy_tY0H-y`D{9ww%B1n(G$l zES;%xHlHV-Y5e*`*XSvyw0QwwFAbawSiv<3uN zbPr*MKQ3xtUp+i1-D8`&w$Uz#&{Qs_q5?V~{D~NAi!$~cu&XnJ(M2^Q{Unwv~{ zYy{;i@o$_setekaOXJ_EhMJz1qtU>MhQsFfl<)s9<;AocCfDxr`h2)|gyb{Qq)C0& zv&i18bqW@@`h~>#@!y8aKM+*p)>n)Qumz3NaFXA`?H5P57-AMZ+oP@=pkI@aoaW^g zq}k?T^;LH=L3M&>iETjzg)*QGNHwx@9sx4z(?6>ioQZ;I+ZUkY=M@U)0%}q=LdUi0 z;J)XY7z$b8NS$-{R#UOR@@#B+-H#)^bHQydWCt9|1GRxTIe^fM-7A@N+we41YI96% zNZA~Bk{>9+Gy!R=5=;)^hIB;Z8&MCqY@-3>Fpen!&BE5^tT*@+A!l)j*AiR`@^yUd z>kl`XvHQyHl!7{+6Lx163ZJ)I1vg$C8}W#4%NXMfN;fdSC1DxCM(>juEqGbDnsvXL z^ADd529FG(!Lb=E^ip%9Y5SLYlD|t^6PK@IC2Ix$M6Wp&<$=*D)G;GH^N35=Oa-&@ zOvkiEFSBaS_Z9Gs&HYnAG zJ;V(#g11e1FDK7IwT_pl@{|Xd23n!~>Al7^LE@;tWO~ZDGjUxLiM<85XXjzM+r5%d zA!b*Wv9weRcvVJxq-_vdJcv|J5%oTvcWxg9uqKF^(3XId=*TQ*A$IC}}wB-osKg&nM6u5pMsJ}L`xL8tn9$~DVdnZF9R z>(*3m*T*$xKD?$?4V6O2Nd16wv@e?9$^;peWLVE0yiuxJmo@$S1gQB+w8_O!>fH$7 z-OtmVhR*cq3kFlTP42@)CLJ1e{ZE}1^pX879bdXa)l*(W==nJJ$kGk?Q99bjvSc#V z`$NJTD*AVGGw8v`#lu&ZsuVpOmoRx3 z!WWry6EJ(#)fHzCEf3#UV}AOlaxXn^mBmg=OeAmrg6;k>=NuxgsOB(PN~{@UfEb4L zr7oMOv7BS)panQ zXTpsedwUw3bQ}^Egw9){zJuR+x&!OS$VUW8z;0YqjkVQwhreoihubxEp^?gtuP}y# z7L(Fw^z+fA3l#K2@23Z<=(h|$9pAhDr!VBiuM>Ri_s077_s`#5&S}|Ad}U-J2twt{ zk<-XRUIL!90uargBTa#eAad8hO)Pi4i%NJAmhk0e!b@%wQSuh$mn!HApIQ=9Q!?wop zl_j-j^`LHnpCNOtaHb0MAwJ1JR=j=y^4KHHQ!cNkO%kzdeN_^{Kv{WJg@#nQ9A=zV z;VyW|t9hGh2S^T{Wa<)=3EoZIIm%@^(y6A%xPO%qMP8tVmLmzIMVy4Z^i?(FM zF;Ecyk+znrfK{61lu@et@^T1z>s@(U9411m{AD%>_Vgv$ZO4Kn1%BlTW`8{8T2$tiTT51? z(!|O_zN{Y{YkpWmm-lMKZf1zo+2X_wJ3P|7f|ZNGa#UK-Y2He;ZkyGZa};1}8o!!< ztBV`jSY|qWy0<3K$<@4PHenpBG_k_p2y=uYM~(*hK_rC7X9wS2>W@E33ia0a}NnxUZ7T!1-MQ~tITjo1I+ zEZXTlOCaeha#3GR@(O4-XOR|R@!3ZtAZ20=F!RcA*8#z zVE}0chVE9Rdn9D&?(XgerE6di5TskAedoT<=RD_m|AzU^-q*GETHjSD)APZvUfu;h zA%AYIYsncYuk+d=$Uwc1N)kR=ULI?;4grj^gVn`&?Gp-1Dsx2YH-MbZVk_>z3*Yt} zzYtD*x->+Irn*njy6Za&1iFK+9$KP|B>L_xDpsW6N1t^sUw1FMi7FrE(y8y$n+fng z?e@l41r%@e{Cnz>u*)Z5Bz2W@k%VZ)Ufzb-4pMXn5(Yu956Y$Qg)6-*TLO6NPmV}y z+?(?C{vk25Eh?2={yaGx`-+bAqQ`b42N=!afK1x3%Gs8Q|GM?Ea+Bm6se4N=B6PA3*S_)Cf z`&*{|T(%!_uZsY^VzVqfwLZEgzvcLOa2vYQLOPE;D-sks#GU^{MKpLFSHXj?)O(@F zaFIT`+0xHYMS#*A=FVOB%_E1p)9DO{Bi-E_xUxpZtgNqbY}(^th5oTWQb|Xb(t%K# zR3BW=i8qws%b1zvyL5K3cr9+DTD0eGZ`{I2EGpHlkhX*9)*|Z2c>-Mefpf()5edD2 zNSZ}pv^a!yW4UEes@R0wapp(aPF?w&V;{n%K7p5ng{xicv#303Y?nDgZj93=-UZgG zuyOKA=K*ISz2-8|n@n^t{Y7SduH2bZ{eC;eu|!=ym^_}0iBO(Mpgpg!%6DgbFt+}+ z?*rbL{%DgFKBY+00fq@UDr<$G|7E2UT6T_2Y5V;jUdyR-Z{2WHl;fiezM+U z8{CokOU8klnQpi~*-Ieb4sVJ%*vS#b{!&3@{SjdrG6zlz@*m-IiRs8Orxjy5LPSud zpH2GkIjc6zJT{3l8N8zO1c$|anq`EG;#a4(1s5#f=iDMm91esb3yWjrz6vD;Z<^%F zCs)vVKY0E!s`=rgjTC7U@APFm?v+LkxKWhnrNMjp$5HplFjl_kkzjy#nfySBSr%)^ zc}{|vsC0RZuAwM0MLN=}=+BVq6{Gg|idB|sqqL0i?;1SebKQ2^n+T{0B*ros&3sO` znRvR9srBi+xZNT1)i~6~js#2RPeRRkfH3P^YB@Ne&50|AHR$X-=M#m&`xT(c&GSE` zLn}q}kLt~jofn#$3ki|m;2%WaONe>)t+V|aMZ`4+SdgCjMiOMb zF*Gk4lOn%i7wf&J@1YVdR357F1%v?p?pob%{*o|Y++{`iH5{D}nUpGp*D(V|hbB^Y zzPt^QY{32!Ng+)ojAn07t7WFVcHKKpf6kka0C&B^h(9tjP{zUhAZ~I4BY*ku`;4C5 zkuw3AF@gfm;gsq_pb=1`pmOnqoE5g9Vqqafg6YKMLn|mQSeNzJcC^zT9^%MPlR8vt z{kVWAbi9^WsXI55lth@?du1F=WD&hAGa#|{&kI@tyg0epW0dREHlNQZwPkr9%D%?z zysp#Mb4fC&ZC|`{vFjE$$eDIQSNQ(9tcD8gb?x^3Se??dT>{U zTt7Sp?_PP&;*Iubk}4oeyTa7s-F1{U%5{%Zu{&Bj<3useUHCSyns8JdgGhsa?1#q1q;Z&E%Qj^F@VB|0_8N zPWYpm7MZ~o6pQL39AoXjZILA0;ao8<1IDQjzct zWxbi`SymSDLS5$K;WEG{r_L072jllY5!U*<+b?#+WBpmj1gQ2^p5HLg*?ro^pZgqk zXhm(14aXF?A2;py*u`itPZbg{5LXFbb8(MqW^4@w2Ar__L)VcP6J9@adhyYA5x_@(;>I z<7RrX{H9#T?&yJ(Z0-1;To&HS${+lp}C+;fUlqo<(cKuFq={-agZW!VMM_0RLs94~u zouZE=l4z_$uEt~6yG5vV3Zm|2dLQR-P-BUDd9}7i`*3(Q`aXyK)LKDG_Mu^xcnZr=pgaG$*{t%q3{N|Q8}PpWC+nw@ zdpc~p?C5VM8ebc&$6e@+i(MUdmzZe?AQHdmyA6vJb;lwYf(^llRb2fyPdwj@Dt=Wd zyO>;^y(Tn~s8X5y-E9?s*NZPdn)W&F3pDZorp=ExF|v3x;8GML4?v!a7IrG-*3MMh zUn5Ed1T$d1WwV}kVGZ3iDg-H z+^DVPv8$3CW{uS4$jEidMc(m!)}1*G^2@BjC+}@wIh~wtiSiA>kmbUU*Ws}bT4e(c zTcH|t92c2_SQ}LRdnyLfX%*V?$0Z0{5Vywn-iA9g!I0Cyni8=*b#G{e5{hxTIHMDb z<~tI>?lW{~z}3LlpFuQ@$ft%`L*tN zmvQUPT{HcK$vAh^%_QRBdrxEKd0u1Uo!|EbvUPdKUv>R;)kSD0@tEFbj zM-X3u0b`z5`)#34haX6Fj;*szs3ghpTLug~4GP6utGK&t_F;P>r4jlG)4s9Re!-&GMSK{LnooCVMQ`5cFBt%7#eiP4no5kGj&qgF};l?e+qn`Jc@_8J&nQH zWjs>3WiTDPVvH!L4Sj>YMzn{4(w($D|8s<1&vVBteZpfuXxVm{@?1KzP{40uiV@ss zWOEG+315GXm!9dFYtubjgnn5(V5Yt7FfXPk+q*Z3p;-F*NwgV{u$5`eCKylHAO@ya z#h8u88Am5O5)I7pb4@D>x;(Sn0tN9OF>EE00YMxs+S#GR^!vFN(E?(3%~tN^_CVYg zJx=xEbV-VIkl%;K-wW_KSdimQ_Ob>gZjH?kGNlX<4j{)~SrGBWS~nCSyz(O>BIkXR z;`4^kHSSHd=&l)A|4YC4eAAngg zS%c1V+c%w7aiz26??{b^xae-9puc=|IruC2&;3|yIugCl?!Pz+2Eo#$2MpZ?S_1eP ztE~50%>P`syLAC$n$7kVW+zaP#NM8c;xn3IF55?9Y-_$Nbrr|FRL;6p}+w`+dDAR2ejQTIGOqTf{xYh>K zaQIzWNoggHI6|7h4Y%&VA_ca+Op!d@2Y-(l!$TU_fA}Vb3IrbGHF91q!=YEU$ zyr}c%$Z6*hq{9)gWH^QN^Sa!*QeHpB$^U~0{X;?sukg)0(9nO`KOTi;h$?jwTgQJ$ zwYke1tjSuniO4;`w3N<}7o;UsmWJYj6AF2aKUG)UB6gzduuwJ1G{o|{hu+zJS55ybj_V8MP&R`qGwO5RZ7jyP>F&(IlF(gA%LlcLtn~}0p zE#<{XSVx7EE~c9Vt0UP;Lw<%z(W^cS9E^ehj^t^Qra)GC-*dsZgIcEfYYc_16sJY` znt37FvO|XTS4Z>@rT1+t)fyHgv?(QfvBihKg5$^a*VHm~g*!*byV?&Cnlkyym7~4n zXO?~soSIq3`h~zZWeVhQBMcU05hi(@rG;L*1@X0RN!+J%f|mB%E#GEQ_j5`|4#kp1 z{=t7~# zCbfnlYwejECh43?16EYt@Fq*syV~V|`y7&(@|pIj=#~dIaw;`k#)QS$Dz?h{dtAJp zkh8H@k1M5elnWUGpySE}tQEu3g$dAipvOd+b=DGa^wwXH;Hvjy(cVLZSBkl#S1e0= zMI-=kSk>;6^S0`7!RB;_G3Ai+V7JyyOZH}7Q)vd74UpEy3&3MzLUsYB;p$M$Ja85yzwXlwU zF!QFxrqeCq*yhp&iutEle^J2CHl-KW&G--lkAq^e$ZKsr3A6gWZ8Q{cCsryEJXPwrum&&4CMu3bghe`LL>u)^5V0iqtb`l= zGNnD+)z~ct6y{yR2;Bz5KME6)Rm7xv!J;Q`s5mV~?iJ!xm!lw9E=vbf z#CoA8ILZ)yQ4(|dkPV1}N^@uE>kf`be|!@E`TI&tEE^8X`7noVv)gNK^ zb0|1z*mwIbaCP4#Vy~)Z@A}Z2&#?tE5>loqm_(gpSIE%{u{YY%tU!E)Q(bp|Wm%e8 zhU!xuTjBBeVG&2DD<^{gLoqvC3f&K)2$?TC_pqv`LV-u(>s)TjYDzqM-ijYlC4l=*fy$K;oULUfzamB1{L!NT~1x|ac4!%9g_1x{H$ zSJ%wBp)P(LN+j&ECNVAA!DkSmba1#%-qAa|^gkr-*NB(b>Bm5aXLw$xE=8zY+@JCk`!-ZTIFz^GVhOJ<6+GBab$JARaZn@EK&w9EJoG?be;0 zdul0eo9xB}XB~=^^224e-^ml{ziJ^0aiN1WW1b4!b^G?CT5$Hjn--o(_4I21&i{~# z$P>s|A0*2%)KrfXGbwnT@SX-o3I9V<+Pz4% z*c%SoO;vV|X_$o;$C%Ad_7uAoCpgFC7~-8HdRNt4fl{3Q87%FKVxK2$BUxkJb@V$L zA2tu1GifxwDm?Xqxm&Dg1sXc6{M4~|M!hZx=w(N8!R$@GF3yg7iyq~BxkkF`)k<`H zT9psa)D^L%kz-CRE>M4{<67Iw$gro!DRC!iYGb1&d9upFa9kJ(W0nW$*ZqKQzq!r; z6~)UZlr&GGxGayfRRQSU4n3CXi-}2Xkx1|OUmQkwekWFJGS$XF(2-a zSeND)#!j=)Wfz3#QoU#FY3 z2`Ji}Zj-pDLLG~<6jmlY&dzqI?0Jm*^EekpjQ&80cNCOA;9EB|FTeC?>-xrI;NZQF z9-Ra;zMUlhbh4vh^5MkRiMu0Yrl{P;k}wkK#GspfvxVQ6>&clw#c<-Wbpw~oiWRdR zxvkonE93jAui=S!sf2aeE<3sd6|-Rt_jbY0X6_Z+KUH_DW$O?Bg97`9v>sx=wsP82 z^jAC&H1gkpCrY%5hI=a{BOkt%!}}@uaYB!vDQy7pNfTZ9OHhUK&nnR`9nO(6(^A!NYNuG+ z+S>SdejS<_=))APhT>Yktz3?}A^v6A5{6svNVfm;PKE+v{i0GQ))l;20v z{sz(yRq_hAiWIeqE4IiNZ8KeI4g?d%{p@xcbZ!I*kQ)c5SS!n>(Y|)~j>yT1lPWT|BhrDYkd3oCgVTC>KChmnUfb`8OHkT+Dd*wz-Sa#`&DQ8ZmFa zJINS0^#6=&vtDFZJ`6@ENOG6Q%?)93#H|=D?1WetOD((zL;A@^tlVg;Ie2p3QBbZ~ zQIp?s8BNGJk;%Bwq1M)gfX8YnFw{Ncc8g5|L{-GQDOSG*UP(d;w(Sz!+)%GTR7W}# z``AQavd5|$2-|V7K(fi|;H67Hb}=vG9Ss}D zoqYQ~ew|u&sU1zI#u&Nao50pc^f!Auohp*VW`RZuayU&_KECzM&cj$M6SM9DVY6#X zpR{GXE?zWOJ=TatnBQI12;vLcTY=*W=&_%(IkR`j@_o9E8Od{Ls}lxyI$7R06L0ht z4@v!?=Q}A#(&9L{^RJ&DD#w$z`xuf>^|@lmoWh+n_q$*~h*K>uqz2U8sBkh#4g@F@ zFZarsbzC^p)&xl9KaoDieYeyAPjFb+W+z*VGu?+_df5Yq~C zE=0LR#CzQeN7ilkA5ssklk+0Ni}JIiF8bd0ACk7fgP+LYC(59i!o)QxVz-uVIq&$F zHpH)7_Aq{L_tWr96k+x*ITA0dh$=(YJfa5+{($ zQ-0${k@B4-8J~1idO< z!KdD$cFDKi<~6{Z4AQ4qM((zVH2Jv&)hnJe95gXSh9RLJF&8zT&@dUiQsWKn0YF$s z=bYWD<@c?xO5zUv+qb_ju{bc^%S$~uqni-y4Ob54*KQ4j4+)y13X~OHgpI~(EU27} zLNG`tupBJ)Jg_$MSrewgndI`Oi1<1DI~N6qcKrhJtDg;>Z4IEW-Ybh)Pba8V?^Ij~ z7WQCS1kz+NK#e6+KUi1f!qMx-0)#elr`Py|Su?A00iqXTOldIC-7_?s`h?fV z8%V4c z%Blv6SJOBcv9npcE)r)_UyA8cb#=OSqe%nA$#T!058!y@6o8HLn*c5=JG*+&9y9V* zccp}s@|ai{L|k2^$Q4od?a;KbVmDgC5IxMSnTe>E7-4uS?LjMiCk?t7?q$w}|7cQuV-35ft};H05)kJi08}ab#DdX> zZZRlk)F0Nx%?us)obT}-+94N_g4+Th-D`bu5+mc(#RfshZpWVMTnC2;C3P{SoDQj);*y zs_^_r7=x!l5fc-!d@skJhunBQPR%6+-TT&U>X_8XiMfSD&9g3*s`5n;=r#T_1-$0QB(R^q-L(Dc8)a6$S_M969Yfogw{G*@5u_3SEL+Ve@V zPBaS{`iFE>3o!@IysgZ&EsG#_QZy(fqt6wg3tez53WUH3gS!={tyBIyU=F;WDNUV@ zN7E!(^9PiC3|`j%<1qfhY=#!4E~qYbQL?U78Tu3F`ONKxS2fgQ^xSqxAc6E7F}P$& zr7p#sD6o$o6}0261;%r1e!!K&xkzwjUsNq#tQ}zF)-De)_(fm2S>oKTuGqc)T69E^ zaw@&8;aG3QxlQ&(SDQ&v^;O%kTW7EwRMY?ARcR?4IJ1koUV&Tm@_SlXH7{y3gvAM6 z{j9gXv=8wr8MLJE)daI9h6I0;yYbkimqdPG_BvRgQ(Xz^Vo#?=c6RV|?>ghux-8t{ zgX1{IKwA5pc3U@&CNU*El3Lu5bGEd+9J71@u||~s5)=*QA1e%sokJk93_~7k@9=S$ zk~lN8v17}4{#3`gpKz@;4DAOP-^}QU8Le^1sz*d_ZNQW#yv{2Z>B+F&=Z~ceEQ>c5 z>+Fg4JE=61y$t-!c`efA^+$iy|G%XQ2kyJS>pIYMIvH{~_P>$e2|YC204uWOH0!d6 z2;5nj#PF+Htl>TB!_k6f?F%bw3~iIM3d09*7I}yQRMr36qQC-ZhM%qs?LBO{(5CIT zPZqZQA}g~H+?BRY)57V@4D{QM$15^k9VFWN#P&uhIZB{cWxdzasCMzbUD4ciDp#2r zP6{!po#plgi;mcX&RBMB$I!n`l>X`eq=HWLbg?8_r>%06YH zirG~g1iP%6#8E1*NS+6{rmu};XFSj$axNYdRhH1-qXuFm%!t23B_8diKPQKK^tEw1k zfE`1B#58xr9C7AqI2^Nd7ewOE{S|ZKzS6@_q}ZlYqS_HlKn}Z+^MQC#lO152;~igx z$Udq*%J*zQ_KBlQ_A0=y8_~!UC}A^BcRoHD0K)WN6<{==CfDj~pE<9~Q?aYLmg;&D(8PbwOj{M%roH!~fe=&)2 zA+;-CEZGM!G2)d&s5%q3x*pM2jpHSHjn7L4&#pL+6SpHG23^`Ez!swtTU}r^8FIr? zmkL5rrCd-hUi>x%RQiIZ+NZKkW5+qQ8vOES6$0c8A|;zpI_n)_VRy$}Y4bVZRMO+G z*8+a#()9nu9^LEN(oQ)DWQ31O(|gOzX;CQeDtej1L)R9);_)o^JDcSX6w-1ef6ffY zUA{NW^QhIIt}U5YE-@;}KRXg&Q9HhaCrW<5stIKm5NQ@o%Te`l!tEfc{)aSd2{UzS z>{a98*>guk<7t_!P0W_#ptHUSJ=sn->t7u9 zb0mF}{(qRHpLNEgiQtarqpqN3n_&}61WDq3gKAj^N6v!_tW!cd?5dv?-SFz3mt%;m zN8ZVz@9CSE@Z|8Eg7n=fav#N1LhNWBG5;$Gizr?8&2%h%WBE3|n)8tPn0>o3-Z(wA zhGq`(P1V$S9Fm4u?^QYN3ODAIo93LAy1S%HAxbi2=qJJ0KIyf` zsx!@}tR1FqL5p2DB_T$iVhw)n1&AU!&2a`@!LIi8U04=I9)R4fuT@T>q)CB2Bwe0h z1@Vl`0cI!&XcMb9-&})NbWz%tG5ruMr%=J@te<*c-Q`C6_gq6Ddtd~y0y0sy0EFJp z)kRgBKc%?swK$vMaCa=gB|Vrf?$#ZR)9{NpO~$c~1o)W{jO=z_o2cHJ?>)oI>GLC0 zSBET%*Cb0-SWAqtxYlhkYYqI*s=YKiag~PA__-wF@lD8X(aOvfMG3hu7gtrx$~^7r zdD{5&FG3$oF__BNzub;LNx)S(U2`I1_4bv9>tmHg5~~@I9Dw*XpABT6vwTkn!oMA9 zG*D@c*H!H&_7KTTJ#uKP4YaEdpGdsmW@+G)8ix}Sc+z8{j6bEmaKAZZiM2gSK+Es_ zp7;1YNpXFuE3IBfNT%Wr_2oXDx;EvlN{e4!@3@| zB=1ufVyN?vtW)ttoxdoSG*AR=>h?MroIa82aN(v)C z)qAN5d)4bKMjk~IzJNztl`-vI?AaKh5H(Q~NN9j1qQEutaiBhk+RQ76aP&ub*)N+uk)mj#Xv9nhL@$!9H!=9>u4@g>G%KKnz{3)~b3)o+cHN(|L1s3Hdb58> zISG8;n@xL2KN(I04of7%vGQZzlcrUeaDm>-BHbhc3b>Ygosve{h+82n*7Ia#WjjC2 zSc3@_CKp39+Y?u!I|~qLJN{?_b(ilbkFp!MyD?u9z%^@AJ+Mt3`^aKQ{>X?9o^E5$ zu~SHbC%0fiN!!aA7C(wx(f+$C>iIqJhGreg5X3e{6okdczo`q3+y=LqteI~$7;SSu zO#lrMWjjicm~So1N-JikJ}0hdg?|te4(8~d=p3znjrdahX{qzeV#fZd@wK5Z9{S1m zk5otU6K9PTL}!go+`0$x?%Q+MEDNkmdy5WC3lUcayX>boF9R)Nxt^rSI!51sgjK4} zxo8)ACj8u|Q$DJ0P`IY#KO~IDaUA`WH7`EiXUd9Q4_NKykJ1)*8V1Bfw;!cVT+GxN z;#3GwBRuU;V_EaQu$vj$d?rxu%%0i|aT8hTnw_69NAvKF$x61Zk% z%t|UxXIO}m;_Uh3_s)-BDM3WQoIbxq3$dVDO)co%5rqWj)AZiK{+L~cH!G~qSQ-jfL%D>1@O+@ zQn=R#2?)a)zqM5n<*y~Q*8(hcX^1&);jtnh!mIJy#5~ig(Ohw1cSiTu$qlIddi#l+ zOV@#`?enRJsMZ;vFNNl!KLiQiS++TYL#}P$`fWl^iX7DcUStY)Sd?^YNXG3+c=3mY80A z$e>0kdN&zK#ebY(U70mc&PsM^CI?~VjN$IkIPENdm$SbO#E}55{>Aa-&mM5?)Fz)Z z{rf!Yrn0KN!5`(HsX&E|-@K*QFqzxh6P1d0SOR1A&vw`t=RGE}8|d~ICz1M;4*{J;##n~d zR)XtlALEKViNGFTz1{pVvN!4*mru2Q8aJDMH7!m5k(^RI$pnJtUe8~3?CF2g<>$^d zEH-%-{VRysnacoQ-cd*I4FLT0grf=so9E)w`TRpVeKGALkaW^5{*%P=uBZ*OYLOiz z;x(Fl?Dd^#Vd6k7E2*KI?Tcqf9iimw2I#oY>*FWF9|xtOLg*G#5OHVKd4VgA_G@!0 zNYP8hLfk{QfG(S&u7UyTTSaa3{O8z1FZb?+afcZ4bUT&`ZT6YS1yD*MlwU!S&{lj| zw{we_#CU4|t7f^ZK7^Tl=$Gx-NgqNQ5}T6yiOyTe2D0l^XN-LiG3hH(7b**TIlQJ> zqny-|oKj*fY0C`G(wh8(z$I4Z5Ht}}J2saY7O8xdNN;4NSb~VUZ@2iiH?ZGtSvPHS zOQD{wvPvR7k8e2MM*d%F<9{~sm)}hk@)9_GmPn+{R!52P`mG4~V4dMen9Xs&zP6g) zN!eF27|+)_q*13Q;e3SQl7)~IX3_Vb^?{3`Xd)IjFT9EZjYGY?C2~VsVcHD$2QyYu z21$_M9jm*R1Wp&Ck@2+x+sbFr%QnKJ0j}9DcS@<1SoUHsg3W#dTH+eOy{Mb$z#Y*_ z{&QPla5H5mchYf-k#V{s$<1K-7(sX*&IzmJm}lL|*de2?Pz?v=5-kkIl! zq^BU0~*6&a{c5!XAh zLFM3+>U+8{zK$A`t*rfL`ngnxyQ;bRNV-O-P3zpf(ZV&Arr5mWvX?Qr5NmCvf-8_y zrhLs!yee;mj!KNrDD=MDByaM5m74Pbss(8nclui#%+0Xv(c%5hT&-j`Mp>0iMh+rT zir^-)5asdt8Sqq(CST1E*C~^K$A)9STCK5sVxURaAlBP%tQ1UG9q!(ju&$aTlZm2f zS{fok_Dh_qih(Mgw&4K?(jMz(o~zPS5iDW~>9<~t7r(X@Yj^FoYo>M?U14niY8T__ zKMy53#0pWHGnA?2l~v6So4L0bh{4DkrO?9b=ETQeLpw&qUbr^K6)PK88&#+ML!zbP z{B)$oFEBGDqh~Xg5*{PYJArcspp$=_RUUG^cEBgW7piYSjdX=rhZ*hJG+TzwsW!}s z(1)Dj+dDl|n)lJKmt@VmR+dMFG6RF#e-CH>$01${{f7h#3fC%2z$*9QXeRx$(JFO$ z#HDLHMMK0dghfdbqe#CW?^9xY!>tT+bYZT_pb>noQ-7G@G&1s!SK!9wV}2&;=HJ#sIqWDB#78LVmsY%_CKJOML8Ru+fa2P$vb+`bOXAld zNy?QEaYvF%DT4Y4GwCyS!mCs~XP9CSjvXmmqB2MkrCt0=|7kPk%=qWasGfz;Yw@$a z-Lz{q0F|CMk=ts@yogH_Qs^-Q3R*#hWb~Nly=)n*>bbmaA{ij%9nS=1obU*95 zvH&UGJM3cT0m020cMtrL=zOF_t^KvXwm-Tea#nE*L;fKtKb`owddq6*7FRTjw(c^I zjf5=%`nv$(y7!YcckPXydRQhc2PkFVue+B9a4upV~Hd(`%>I0E_>xw-rK=b}T@UdPD@pIFatQH%)?lfDz?l&aZ zbB04$E3W1>S(H06gSDOTgw)D09Nr?BBag~GE_d=>nC^w;hf@Z-X*HzI)4ZFXVytH} ztgBKK=yqrEz>@hRCSd-z5hvM9goG|zPv9D`*L%D8nXLmcaUi(GXGq^9YPGmu@m>E9es+Ng>B6T+B z**SCitSOu6%YDU}p-e8>wq5f{&#NJLBiS{a10C-?d@0kqh}@ToUDr z^6g3jls%QIs%0~G=mk-0hW$hG_KFlKHQz$lCPthI&1dj$WHz~glypU~s1y%sENPM5 z_6KgeM7>D!r%Q+l|7(#>dV4mO3^|AKwGW@FZspnbPY{Js ze2I~>cqVZr93t4x1f6GVOe;SwB|Vhol$vv;g?ic9Vzo zk3CT>U>5*D;MzqSevP03tnzmnkrcRP!J*+4KH&+yYGi0bdjmaf8XK>^kU#lN^3H|f zAd4IEHWVd0D8Li*`rB1;$1MApLcd~iyl;USFCz~3#mdz}FZuDnF=?|8`@7E2^p7RM6FVHvSFc(r2P(HGd|BB& zJ8K0l_op7hSqw^AIA@ygRw`_5+k%~Y;b%mw6k3=}pNfz?Zg}smU^i~U@)&UT9gVc?HUp2mxB(F%ERGFI57NkBXy6)V%$zvx|RSrj(2D7`mx@<;7 zQprGYi079#RrbBj##d}-yzgSExNG#Hg8~x5P;`fCjP^~Le&BC{SVEEggSvwmh*DG- z%nJJ3BH;}w#-UZq&XDN5OSpiYTup248wgk1|Nmr@iSQ)&+6<=8fzbQc$!sg zkIwuq6=+m!re>aPjlE>-jFE2!-oi8D+vxUEP+GtEJ!DT^?o_&1@LVTWdmQNGt5Rp} zuFnh9B*^v}EH+2>5OGy)*UcMUme>k>q%EM~Ccf73A^&3?jqxgNI9ik}a&GD6?Cm#e z=n+~sc^KNu^k~lNwWYcqyX(djl%RD%RYl6mz%wruG$Ew&BxszLM0lGFL@t7J)U) zKetP_5E4e_=TsU)QMP>iD_mP>dO`khZ!m{OG(7vr#)t2MXk2@qss-Cb#6%X+XAwAb*0^ z%m?P!wgFbbtBYJmj1^GJ1y6H-8Nt*^1fA;KgN=ldo^LeVrLd4(yw7{V1c6D}$HKaGQuv zbF{0bFT%oTP~$>Qr>${J#`mC_g)?0+ceS`Qo-P(wg8VI{A$*g=)@&ZWng}*=v|5rI z$k0Z8B?3lDe4~OFgW{79xm({*WNwn?(*!evB}%LnL8ob!!JLESFNs3iAql$35Gt92 z)AdLjK7$-F^H*b~gR?e+bV^0rHy^7PnimGhAlIflan*lrd`M0cltQPRLoft zw;wYutmG4gT3;E*y=xHibcT0z^+YD(x4pJAtK5>1dyDIs@v)5IiC`O*QcSH=H5v&}~We4=PNW#G@6WrMxZX@sA4 zAE5?#mOLLuA>LU2+s z_5l2p$z)V`L1{eUm>mA|6&N*@g-K3P+zjan49%jR&H@@!B7vl;IE*A*=lg9ICfLiF zW1Tcb7c{%Lh3AVas*;l#u%k+I8+=oO9$#G0%2aK58EkRrz-g-h)^gK!(2Cvm*>?Vt zQno3Fl;eW_=Gl(9LVTov4M8U~e28uh#kNpi-}sqH|J3TmbSPZII9An>OU27{E%x?h zu6tU(djBV5f6df8K+efD*CZKq7gYJQ!P9Wc4Mrjlmr`$Ny`_E|X*99@-nm4H>I~7g zwG8iS=Tn$r1Ju(lI1m&3vR-pJg(qS^5VJR0$TJd^ysnN#SlFJc@mzN>;G1dsBw7_e zH#SrPsCtfdkw3b#E5(~+8I6|lipO`H>t8pVE|{T$xUkscm76#?YQhR_Ta3xK3gt8y zQp)ujnACA?Kk(hRK23}GU=4g~PWzy6ReS33AX5Ke^s9mV*rftnkDu#tb(e8(gB4;^h{^z zh5(rC>w8-rWc1o?)3o}Q22us(>B1$K__Y+qi8X^DGGyLrOSU#ktf_j_zRX8oH&|QT z96Fg29Y{5ezJoYE&gZHDk&?+EAKR~Qep1o{GZZZUY0xQ@#Mp@O*|D2qeyf(aZ9~n0 zU0qnwjN$;{2sX=jww*|Lm%4G2jE@t*_|X`EG#2rnS3_ z_WD?DFD7k2CepV|T)G5+d|oXdQRe#JOCHh8r)(DJ@~U9NC;tFY`Jy~|N}V30Fe70N<6Uq&)x|YF{{ZtTz*9th9;JcvX!?sjmIS<` zn;m6jPOrG z?6mxi(SoE9M^XL6G7_X3f(-~bURlpXJnxsT!4ADRrE9XV>P5P^UftG?Y`xrn_&ZTI zyV}+}A=VQrtL5s;m|t{&~UVWo0nybxVL6&sG~OQQYRu{-lMZ9^4d5LXDPA^O`@ z8sypPqv3HsNtY2p%cGSPiZM`RYS7$l%hBn1*hhy_Wst^8rKL)cc1Kpq_V4BSxZr5g z23Ltunmfrf6_J`+$0IbYR*`o*DQaMZb_mP~3P=a=U~6=aj~u3eButi&HlsTdjO%xa z^_{C#aLJ~YH{(LGTsAT(7u%lJy_05Y9&-OAmynx_8pE~Y2<|P*whFig>I12?@3jAU7Q#iPh-;(%v2f^-AA|jSVk|! zo|OgaDPFr?BqthMmW)|QBF$C~B3t^wW-_dB|;q?ua+IJm-4fY94UTAOA|{Oqo~t(|Vk zgCLlz5^p_5xz>or)kJ%jDz5f4_Mz2736VfsNj8*KW8G#B>%{HvVohj=*@(W=RD^B) z9(!%eWxc-E16p<=a9b+bQ*cMzejDBD{Qm9&NRE*PBww&srJ9@Ba<$vOb}j*=XKIob zf}>#rdA@&Z=HgvkP|3q8sUw<#6cFJ@LpJXAe5;o_Vly<25hRnt1hE3S*53DTbMvtt zhfOroJ8jya3fJpdYO1!Ia-;8V;Kh`~B5@+(4WVmnkvJRXDLgjwUkJ#CBuLo{MlKaK zwcpLBuFdE*&4VVS%cz`-Nw1JKnX}s2Rb%h|HY3!`NSc2Qqic<)G2y*;UPT@qxw{w; zNh>m|(5a=YuWAnrix(}a^B))K<)R5ha#c%`#4@7pzQsj#?W9>dI7tx|nHu6;h6!5j zrtOuvmhZ(}X*DF&xtgYcbW1AqCSI?$^wP@&dWQX%6@pu>R`!iUUQro1n+3c#@bhOz z!pp2>HZ7{Ci5jTxq;V=D{Ty=6Q9NpcK+6d$+7YdhwL3XCyzJQ1={0jav1#rQt1%%I zQEg8(S1R{cE;QhG#um9zz&{ZC{{Vjs6w);mr;9 za|qv@Yj}&M(*g{u8kUDc9Eg<>+jN@A^KS6_c$#S(sSHBx=_D>@U9>B1Nj){hQ6}q1 z?YF0(4$fP5yzE7;&`QzjVPNR#P{_)5F!;tBTdtOEThs=UXJcSKTTP6$NLvW+aT?{z+VqnXaxtXklIr5LA=FH; zmL%J3b0n$9k-LiD(Zl;5gO)vvV`C362FFutqmC<4)3&XT^7r60$r!A*Qi4VXk+J`lAsthM3zC(T{ftA&^oT>{l8ac z4zvFNr&`V|^ep_VSiv`7qM9K*FRVqoW?r8%;3wdDs37pRB;r`_rTmGG!XgAU2>K97yTydR0;SRMlyF zkAcj6+0@0%w2e}>x`!jWnYbE;^J9`Ze^G~rQ5#1jM%PvZ6+TB+%%3~=E-xn5OQ+dF zs>GuX>%`TKhordw0RFR-tXhL#^SrK*OJ4MvMy`%(_S(o>7yFgW^!(X~4eYXimeq=j zt@e4_gE>LH?D~m43>_CVf=wo&PSlxM7+0>=7gNr`)7mN;;N1tHh94br;iCgH(=|;% z>snUa+b|UngAH0^P%c(wSna&_Ix?qf>_+-HrEN+QW|X)wfM{Yt9g68Bd$M?rjppF$ zH1bPh-W6e-DytAsr17wCSryr1wWNu+vBoeY(LD>HEmcrPZX$)@tgV$ZGkz{TuR|Ph z>I{q)2+`ew75!n5Hug-HcCkLHDItba3#A+=EUdgIiWOv%Byy(h)wk;1aR!@5D@PWA zOp`o;iISi!F+`y7{i4Ku^os9Cd36r*Hmpe_M)K6`ANPCKJFIr|IF{j;TMVtK26g}dt-83D_LMApT+D$TnwQAS z610>|%e?n@mk4Jn(M=+kM{t6OMs6uq*xSAqdwV;$FUq0Q>3B^V!K8=Bjz*?=T4~xy z^LCxR6-BX|6LXycPp4T`>!^+s8rbK>3aq{yTu%*u7kd&fGKlNWrU_M-Q7snd40zGI zZc4k#*LvQ|?qVaQmN#~cB=SufyE#itaj;dRq*kc8;>~6EK0UeEBqt&cim(?t#}Y14 zyLMYKr-U=G3W-pwd0+yCR;uM)*xB*SVq2)3+I1I@TVz^cnUfHu~GoQ*@~}0#V!^2us}69%4%^+W24~BOm9$pX8@xEQ%qDI zqE6m`Fv}BX3%W)KoEKfY0jAGOJ)_RI0h|+A5U)Y6nBO}Mr(ltunx3i~pCf!-mBc!& z3QA*?(U7%i(pI+gU3OmWn3qd8gz(fqNnmHpN z4VN9XS+r1o<{M4SyR3T~#Y+cB_C`kxHR{j_sZNaJfZHP`ZRk7+eKzULtpk zZm*clyN1+i*6me{NpFj5Eq^h4TlktDVHs8pVp6u!gJQ_L7Dvs7MpoKD1d4&=QFqPU z?%?viS9H=!O)M4_17wsZE85wzb};ZD97&BCq*Wnf>)~y7y?0Liy{t5RG%>?s?C7Hc zR_^MYw&Aq8B2qAF;1G$>Ek?_Dj_%Uop${2QF1-U;vX3HXtKHe&w(Z;{=zIo840;u- zdK9+oyiLYl?XI|z$cjQi7hSfsy_$>~=#`e&{H4e2{Y`>RJdCm0p$HoV2WhYMjrzuB z@8UQ#?-v=-*a1>NO0$==HJNuiIWuB0snoeL5lxsk>*^Rjp+}|~u1}LsE>^COTz*jYtx4oL4ccf+2 znW1@t77W?IrL9$x)GfN{V%s9OXA2N!kg^Ais9W5ruMO$iUDS2#J`|v2mNh9LbsHG1`2O4;FF}bmxSly~F3~zfjZrz!gaIypJx=CYy^AgpT=!)@; zzO}dhufyy+U5#Y?%0`KFCE|)$)G%oU%corIs;KsH5g#)}pRw`tL;AO;f>gH1xuv9ElB$Ra)qiQTI!^_#; z;o?~$o=rFok%kH2v_U*q-s3ZIm7di}F7EeuZcAG7XNiSP1vLA63B=DUTKRomY(_bY zX~c^ORN}&|Td1mL;qEdnXyuW3>D}GLbIjU!G|U;|B5)Wo+LA`f*GjE*lJH#?OB(3% zVmP2+#zkGMNCl5cEhtU)&D>u0A4evV?ROI8)d@~eI%42cgy?f--@CVNOa{=2osmFT z-4$+PGrZCxoQm73o8Dr!HQILNsPpQy!Z_xg_=YJhq>rbL$dNd*W=rprPPtZR%+43y zIQgtos+*>!UW3eK({yrdEI zS$29?`3y8<75v`YLiLThx_9vIu5kbj@ir=a?sCEjiaGxujwn*9TlOCCNYJjMky zvp4Gh0Ln?`-HsIcnbp>}#`;o{$MH=Y$Sw4h**mR|^0PG6^2cZAKf^(Vl)}0VJ8ciE zB?%pDkcD~5`TRAH^_U#LmUBVPWJ-yiD6NTDb|RR{{C_#s6+Gs}DsAdC{{W{OIz9lv z(2l$gUCuZ>>z4sRr{)MI17$oZkNJq3yJVQVPIVscYS`2ygzEH%XX#WfyH8gAUaJkS zjT2kT)YIDZZYPJMw$bK?(bPv1VWWhWm8hT~xVNBpzK%GWQc^}1vGQ%NLHWDbRFYfO z&EzN`RAuPh`Z#Fx(wVGK($ivj#WvNYO)KDbRk#tSJFqtG)O;v5*%V*ba;z&yLIdo=I#2{q~ zcaqb-_=`!W(rL7|)g&NYcEQ7&#Aznt?_DD0?jqtISEJR>%yqV`QKC%Y1WImxBh0AK zK?v-sN%Jz}U(0lIm)1s<_K8)DbPL3F(rO0d@5OYhyV{Oim`G)k4IZe#jS95SD!QmE zsFr!GTV1WX7&P!N;=VN+%|wZ%jT(_W^D42KL30SwpB_usJ4(@~2^Q?xu_roxc9uS5 zWn(6prVxfo3j`q*%QW49-p~3gFU^f~Y%MFzCIlX;L#5fb_og48`MKFJs1?nO>tUm!D2ujTs%=QFHbHClEIW3kBbITwuo4KRLyVA z!B*X^DP6lZ{xxu@987u$ro&#FYl6`sjmV|`LDoYZ&pWk+jnEjX8>Y70ZB=?^dbaOw z+*45mEvqOaR~trKn_{rJ6sxFy*PVpXNU@2;V7S|25_;%auI_#|C5aL^)?zASu4-k> z$20x>8Ul8KPP%pd>@z^OT|#baLF(L0)~NiPEYOBA#HxU8pkjKuyE(B2lUx;>iNuj> zQ^TueVds*zdDuxCP8LnOq|_Sq*5zhgJL#l0Py|6mRRDIW+Gp`E?R?xkODmNvL1Daw zIn=tFcULdtVR=6-w225T<#vTTyf$uylkV)`>)s!UL^G3HR$EjFBwqcudDvm7X49gE;c6BoA=Z}# zQrVT2az`8YF)pV@D*BfQ?H~n-8;cUHkyz`R#9i+88ee!dj*b`=i6bEkR)}k{cdfgJ znJ%*fL(4SNCYk^dW+B}afo|h{QiE;RZeevSpidoDLI79-M-sPXR&CXlcU7Bu>~QsL zBne3ge8ufGg}gAnruK5zaPQyjCLfqWGb4C6gUcAltoLQwdUvD-oyUUm9)tjSpW!F)|pIP zJ0dx}GWWcAl~MSuv@C^)s&(3)lIFWMRqo~uKAup)M#7D8R;gCEwwv3y8uVzTVxd|$ zuT2@%W0N#eapSgRWN#ldYvLui&`jtNj~oyI0-|}8oY}Kd@3!=x^4Pz=)nYv+l^RLr zbwouKW@a{`D)qBA%xTkEAd_9Gtg~8fRJSh;JJxQq?#TPC>hPTIA$FHdqmgbsPT^x! z`aE0@d_yDh-eEjxQCp*Bj3tr|?5hhjZms6Ey{P4kkfZ}bcAvYwfFb}5l^-rO4^k=! z+tpo|@mEj*cJUVLDR*6}>vk+Uolttxk*um~)7ghN-F7%odu*$-Y>_c$EwiVFyE`9p zt~EaRd&{O0=+$^y7~Q1t5gE8liQwkuI-8$9RMEw|+QZJlRLIQqK}7_csEgTN>fyMn zwWVN5Wd~rnLR>d?=I-xe1c;k1k$`TH4YaN!(&?3-Q6mWBMd)J2mLE$I^B#8$EB;;< zu{Cx?ELFQt6Eex-NSoHpH;tJ*ENZ#*Wg&tXTEvQ2RpLe*L|EmGl2_GmyE*kO8?~_8 zBDNsab9PyDl1#eS(#;N89?xf4BaP9b@NKyz?cCBbNXdw&(diX*%+hdTj$-Out0%Io znw8PC@dIdo3P1# z;ySXjWow%nzF?Xpj(s8tWa2}@h6g2MTlCSRaNV7}2<&FR#Nj{HvF3LlX_cZVQdeTg zBbSK`(xWdHRU9rRc@r|pYBm}zQDZO$f@T2G7OlSB)wicJWLujqG;qPD(V6swlO(!) z^46fF`Q8a)OZWUKw|v}B&0Z*x=AdOn@YK@9;UT`-B4oP|PwxJgMXlu^$cql8G_DO? z-0qVv7-y>1NiMZg)!|!%qKlGpjW(SLZV-b=5sAxkp?MbUQhrCv)a_!>CYm9)`lpZb za1&F@bo>a`m5bRn4~>!dmwC;b96n_Ug{oI+1XK#rU4yQ{jkjzDRkx^N?eQa($n^pa^d`A5?7f zmJc@EG~Vs6F6Mf7xW>AeqB1G#vs{8W=Vkp4w}k%yt-B4P9I_Fr+Bho;71_)>`XGBLQDM22xeA7oeDt|`^U?JXdnP8aSpFq zoB^s%Gy&Oa>NYX4JV@_vReHyFX6y|_i60VUcv4Mlt+cT>6uS1^WbGpTW;Hy;q>W&R zi{TAP8m8(;czmnhW@~$|gRPRUAnpR_2aQC8)&g5%89r9qjTv zlI4!uR{W2u(ZpoaLI9D;XGAO*XOa6|q{W<9k&alzKrLUST_ntis=LkfaPUr(Nj#NU zk}4C?b)Qia%k&N<_Gu!Tle4keXSMNWTcF5OGdt(My>1&tuXW(j zZgED5$jyA&JD0XOg`frft33b|1H#jE4ZpoG{O`^E$8IZJQ>z^~s5t4a zHk?o#bl2dnD&~}?!+{o-k0)5Dz>VxfS~WJ4QB}9)^Ke5jiAvf)PyqtC_Jy{gflzuj z*Devj+6R`k9(T)zLzL9818?1%9?v6}l^uaZHB#i!LjCM8H54>d=_2m!JnyB2)pHUe zMMku*K3S^ke_3am>_ zA?Y-!Vt1X-pN%|%)?E}tG>E$y$P|u)^_$J-Vo7F|)?JDf1wkgc9lYM}X}uh9RRpaP ztpOCezbLbAZ=HsmTMSl;DCnW@c6XFzU!{lD>RPeg(LfZcgv~Ls&9Ww207g+Edf$!sw=nklRY<5fkb;3 zxVd9ok;a%hJsY#g363!tstV9sPpy*O=Il0Az?x<+sF$P@&`(=8dh8uut}s$^mdHxc zMr7c~%Ifh4Wwu3s4&KeyVo2ao1=EwRfVoVjU)(T}sAotx&1iwuhxyGcEX% z2bHxkvQ*bI8f;qiZuUNNVkYIusNhGIYvbX7idC$I$*FULq|^Ya;a(;+Ry}R!Cr0vb zr_sadVZTsV7{aR>=G%3-Ln^B8+LpE*!^F}?NS#WHkgfKMR@Yl4?Y2xJYhrhn^2;QM z6frp@w5w!KOSHY5KCe*BAPq(&kgiu{oz}H|E7s*k#8YXOGQ~KhUFf#aSx_Z1x^}K? z>6aPmZxzZyfp%73BCngf+QqiY?yaz)Ann?vc$-`9yAzp^q$7myHtw!}{bpwW0J}rP-AJc- zBIbO})%l*G*w1F_Rf8szPnqeb(kFu*p0hOj(zZE)iEdPqC8aD$%W!kddbb=JKY)JI*FG$Zcb!ERBf(Mt_lji{GWjauibY0nPZ zyxVZWxo0jSlmu#ZBBTm7cTx9cz~zD+29fp7kV4UFBr>f?;z*P?IE@wFP1_>6<=w|F z1Zf}r-WIYf_qPg6jNTM|m1l2(5IvPFdI=jVLqXU1xQ!$vqV(Ie z(2;je1iZ6VCaz?SUS(KFSnEstTJA*dk#QMP`8?KK7ws;HBopEEO)YBo9dhAVCtrMbvkGWgi3E@UgT=hZVu9;Tj=9Oq2=>N z+Ko%BMmjKf*_(&rG=U9o-# zJT`V#dDBwH0X**@Ynfuv#SdZaT+wq5WO6jkg`N0Nn9Xm2Wbx$#ql1=IDXTA z8qW&Vi!KQj!)hW{c;mFYg*L5w%zoy_1_|fW>0@H*q0D>zHd*maoFLvA+ zbwKi7aNt)_5^QEYdYvL=lls`RH`Ypuxc158U0*D1sjSl;BqViS!!$qIWxH|T9bkYz z4{HI7k<+%7AID#$F_o$ZMl2GPZmXJ~MX=h9G>;CWpv?mWb55y6I2xoq-9&l6Ot^S` z;pM*Hrdok4Awz5@#4udaX`8ji)5?kX*pFAp2Dee9B7*HqOjK8PJxp@u{;N|R$-8zF z-QI8BC|j2ST>cJs$7P#X)G>qlqhoH`Xm96sWU8Q6e}2Qw_NDT?cLtD6KYazQqQX> z%wdrg9+x9=;D}PR+_`T0c+%?3@y|B7R%S@rFcHs&(MamC-#x?BLl1_#(lfRfL8y@< zGDc}U&b%PoXuXAxi@1$5_)VFysng4*Fvlb|tkAS_PKviHvZRjCO&pIH;%c6EW(xr=WgY}W&Zdq5BD*>~mF-~4g{viKw;GY;z?O7j zn=wDY{{Sy{05Pm|01>v@c79B7%`H0t!rRxh8g=c!X&f!o+g-K)0HYF5qLSTSMGR^J zn(*YbdH1ZIZMa^Rl^CsPcF@pBwY96-AB?z3>;hoL%aKxQXsfp>jqTje-ob-O#C=Xy zE*oNs&<44!*^xgd2Su!E4kBEjf~4%C$Mux&c5lOIn&6TEb9T1fckt)LTHRc7JO)c+ zL9jCp+cx(JJ63%>G%mp+fEt+Ak%ujC3ay>m=FI+Tt|NtsBml+tT#_i&8*6`O4pdz@ zQL?*8P;4Gzu52Xot?UVRTgg$~>$I<%+U()4uL&NDl;)uB&}3cPa3_(Gvf3&MU^;E7 z-fOq5!$58&Ug|leMR%{veHQvSl6f2*MxBEj0efEmkz$>%1@0N?+ z{9Eve0j!D$QVFe3@wc6eCGu@G>7e_q#IB!Pv4|T++U4#!x9H=6Ff4U&i3?N!q`Hki z7r(?vxSy+OA$)2f*GytbN-YTO0@gld-pj;QR=&aTa>o~-Kk0JmqghSkdK zy|(PHdlG)m0xlY(P>Yxzg^`=J-Me{si|FB-Q^0K&#SC*1)Wmk0zgN40bf{v0AtFRO zE-gwZ^X+S{>|v(YR$C=3DYqjvvB$(T%AK{unj;~PPRLvhgP?3k-kTcktG#AyO|O$z zBTDrm_Nu5mtcZhedhbgU>SW$IxMX0$2?dS--c~sp zUFT+vzc(JoK2?%nz#6iGubuiWP`O^t8%1x=#5%n?)&fdOjhVkw!e`u$*5Ryc>h12~ z*fP3Itjg=K(T8aW-t78VT`rr#j<;S;-@}^0Tr`@;5$WVHlNj2qQ&Huuw&jm58c5?v zW7#6N4B86znR-^9o?k~2X(KU710-y!DW_VRxj@n*|y$g-r;h7*j6{wHD;< zrU499M)Ob_eBIxh=VOgZ%vP+|Y}hiVr}{_DcJG~zU8t!yarU;tX*|nQPm}GvE9(Gn zVC2#2qE-N@k#$nV8IT^#d~PECu@keAhd-H+!2-N`D2)gay|?R~n%ZjhXusHKnIhkk zd}#CM_8sp_&oJXWm z4ABs!yVU9Xzc&Uc91y_vdJmGApN7Y2B-ilbIMje{d#mLvcGADm#R0gVB}g^++sTbb zG?-hVBrHE@<)=tj%4b|+ob8#85B|#|@*yMBqYfAjKHYII#t}PyhC6ZV< zaSUCx1r;qbW&Z#z#c1U9i!8{ll6tNyF2CLFU^<1o7l~+~ve@kp5qBgs+h*LQ=4DT{PTJXw-!QMQI8oiNdU! z+tNMQ?p4l-y!avwKyP<+DCnAz5>Ys*{{VN=MQy37TK==x)mB~hhW$?y-gq?ng1`Z& zF+7o-rd5%p)TQ#WTDMyroz};+dxg&Ve>a()k-#+&iR~jd*6?M^mQBWZ+jlj%)y?l_ zfax_7#%_~S5ok6txRMd|j?Etp5+`Wd>vE;okUZ}k0gJoNp&U?FN#<@gPix2nBAU>Y)2muMi+jDM8WF@?9|U>zKJ zCWW%e=Q8l-{cbNpzU=e1rAmU@85_=At)jd8IFt45f%{ehurVAxOlJ6G9V%m;a^~LQ zy^>q?cm`Dp{p0{w(oNiKHxDSlrqBp9K2gz#q72Y#T~EcC6=jq4cp-s9-ST`i{lerWPn2S^ zs3*#_2l_ag4IF_|aYs@BVmrCJK8s+&35+uqWl?A~6#Kh4_GpqeY^}vxhe+E5TSRRd zSAC`?@KSwRnx-^I(SQ0ghHA(MbM(B z6&b0XPS)!A%gVI6VKEnGulOgl|aQtiE;!F85!TY7aY=$#v7Tft1F+EX$f!w!_}Z+Ew1e>iKsek}Ya0 z@!M4Ni4!jLb2@QrC)Q5e>)Un5GM`-6YNY1+hLr5u<}ERhS zt<1PsuNLnPgS75}`B*Urut=qLos~O6@WyQHt9R(I<6F!YSvCTSy#N}ZHk*yT{{Z)~ zAk(Ud!P20eee=yq5(n?Xh<8Vz0rvS zqD7}wwkQbLC^Cr~cJ};FO93JRY2hkP+if-IYj)LB>3&PLmLVlUs;w!P)@|Kx?c29y zy9Eh&9jK_yXku-+w`m1D<;jGPN@ZsoS&*qF(giq;k~Ne5ESbFQC$%|}SX$*RO|F_?_=6tMT!oVsI5-_TxujuR}sYw zQXnB%sct3L3X8lyK0MaliC``Q1T3#9=($6NW=+|ZM*+GPmfxX<(Q^UB;zWuV zlT>tDmwK6Ed2G#_&f#KFqefjC(Mdf4Smmn^m3;g($`27{DCBHBI~9YD(q~s+orTvy zGBGSN-A$*e-!2a+o?!ZbO>O9BBXi<4yU=SFcLm&*gf*-irI;I7t+!lJ)9Efaxm{p;8po*-jf8M`QQ? zek6xeAxBy&8w!dhRTbXK>%)%h*0%l;Ihkv=1)3nW*al}=EaTjqKR)gz^|;mObsCuE zfT1y~p}A$=R*7U=qD$7vJGZM@ZPF(i7cl9~1*Ss92^qJuSj}@M9V1J2W!+X> z85)MfBBN&3Be6`oxaK;oGKms7XDlm40J_Gl(cb*z#*LU8B<2w#0pzk){MZ53+M~*2K6OA`Ai&8z6W0gD|RFfwTCG{RN-H0Z5nvHCxDI+MPTyn>L zd0>%OER2cUOLAP&%zQAigpD|&(xfY-Vi;nHWRe2R&osAX@YNT48jVLZZC;`dR+%7z zHf4xN*|joD>X8w*Vs>6Eou!Fn!h5Hvf@t(ys-smlsEG9dN*$n%9Y2X~b;|Nwut(U! z`>pP5X$gUBW<#e)6GN$wNN!%k!;yD$Z(~)=x!lR6)<6V$K$ira zf%US-Bu)}(qq&z)9}m9rWoB!2!qXU09B`_l8wHVjOo~0DRU~Xu&Ek$GHZ zI3A&60_L<1(`IIF(|@wK_KC~Puz(3_vaNP4dWG22jSy#MgG&_@Z6!4vCtF8OX#W5^ zAGdwt(&@ER%4eEcCJQ3R94zr$6RF`@Y%MEDt(yJptAeTSlw{W^0vgl2ZA-56e;XOS z&{7Y^p#BRm9xrrgG?!@XCrGQ&Pe#y;ZLRTQ1tZt6{f@R!zQG-gh;$j9WJt?h!VMOvg=n3Ke-yHz2Wz6 zb-}VU;iJCYmJ2 zu=Cw4npw8naH_Z+sHIz@m4B;d?&6x=4xgB5nAM}1NF(EpXv)Z*j%b^=8)5Y)m+D~E zKTE8@^-yJOltGnkjaJOI;Z`{gt4cc(^nvNE$MToW#gNJ%)SifDAZ-IL3QflT^B&Rz zX*q=jH4q*gf}QCUQe?ZaI!d@Pa~Kz#XwLMIBCN!Ize4vxss8}OJ>RLj4KE@h_)m)o z50}lFCgE!|-;w5E>L%sxR-tN>7L3%=_~$#n)oKzq^P38XPdsRj43S8{F9sP>X)3vm!3AxeY4a*en)i6u&fm>}s^xF5cUT!(U2s(nifP4zQQ*dr z&$X5|v(u97apa9;<*6-Y%%DihR5W+yHw&#-P21U*9LJbIV~B{Xg&9dxLIQtB;&8FOY_b5$`I z2cuSCii8-r zi%9YuQK_?|WpP#q@MB|9hf&#z5D!qO!>K8h3Ikfy_I^yi(!|#icA>4?1#*v!E!(#C zu)19;n_8;uX|lO24G80#z*nXGI{*DwZ#Yq{61L$r0ySP_&%m@le z=-Ik(7>Y=%YXQ=B{{Yc~jB?b2U8B+bI59_f29$8t+qbV}Jlqu2DGZ{F+Y|tMC(4d9 zVI$%3NNbQgWoqRayMb=q!^tcPuPPQ%3Y&o-v0VG>+1~S;E^Io*ArUiI#{6#X3+g;qMyz32r~$2_wHnr5(_e!VM$z7& zf;IzPjho`__OmVqo-Hd#9A#7$uFwT7yJVZobF6+I4xd{YRgjR1YSjQ{GEN5yuiR7J zz#4Z*wDl{}6{+4|N64#*I!La`iV|P6U2eIy=Jc`cI$L2VDEB3xt!*+<>Am$$fvC{H zBy^~`&DE!EyXv1q3j*76RG9^8EAmlowX9XQ610UzxQ+5E%v*Ynxs{5l`JbfonivBK zLQooRh8DJFu3Xsma&KZwS7@|=R+nYEmw!tQHMiIu~E6yoKYdDasXNY@sF0rIoxtLHtaavBcN4 z=`3b2pxr`R<#*wWb1qtUw>Pm7=0%BAP+U+mP%^^2P`pkdD|0`2eC$4<<6#08%;J%k zYZhBOQsq>X@9^49>~Qq<4Ke``A#G~ieeL>3>6678wcB6T(u!^$V3;8nU+cn~c>{ zLPzl*OEwF_!emklv`h%tXC_3360}in!*O%GZ|h&3Za9)TCC$QzWkJoi z&HSHB2^OKAi$JKEYg3oR`dgy>9&@D^vH!y7gdLLAV~m z^o*Hza5dQ&EHvxHERrT zw1;9U+^e$O%S&kg09nJge+vWPMUq#hv^g2dXHsw~;Y9pW4_0Enn5DjKewJBL3q`cd!gZUK@-6)pVh8t@yt0S zu{@0g##rYB(7^O~S=QTn9d3KQZPDNWgt%&wPFr?mvJG7dht9)M38alfz_uY+M zUc<+eXhT6(Wn7tq?cTuA&hHfLwM$m2LaoyAH~4;+Z_>gM{NEER`G4m0`JVc?TjC5ulH;7%FYypP80e9RpNkKQJeNg{27Ri+x4 zE%Rg4$s(@b(YpF9xJk77%?^pyMQOd32~waAm}8b{qlx=gCxw1|i{ap&OU$&vScec}trmj_|~Aq>@Wz;y4+2d^pl2+-Yu+uzlL* zyva3uw3lH^&Qc6kWZ>jb^8qN}7#+rBM;%Q$&mlnl!vC-z2`Xvi3)Z(nrf88U{4d>Si*@ z#T{n&WR5;3dK|AGYrMr9mucy+*~8QWPQR}F++^i3aA`mUT9dcZ<9;p&v~s8fRRG>~zqtC7rsf1)`zu^+bN+``-X5iUw=;L7nK0bHn`!0J>QkmU6sf0pS9yGn zxm&$~q1Hj9)G};ICWTJU?1nVB`HzV7n#eR-C3d0IqB7VzZpzDd&iq_Hi$$oLoN~ob zBN~MfFg|Rji6W=!^Le=9b4e!UlY_P?5vD_5(aWb!k2XkR`PhzW^-t|6*>x~MG*d_M zq~R7qzp8%^&A`*i%(Xh^6$VK(v}OmhC94`g(anUOT|T0f&a0=5qCfb3L$a^-JSN5d z$*fg7ZCx}#ed1pY4hB-j zn->jdE4QS7pNDVBn-@i><*sN+RpuIcs0X!K6@MEHHB#teMFidL+Ggs?0<8D`E)?^A zYTA84Bou>AH48!4#g2HsjkqUI&fZ6+z}hEY;o`mr6Phsc+op5;Y%zE3mIugmvRCm- zVDR~}x01SPMhZnJmOiIYMMt9`TxB;8FGe7)QSmKH`zD7;dyBfIk z8nyNqO}b$mEQV@j^=$9Kk9paqmx<;(HeEi0-K{%8%s~!ybu%2^AfFC+PC}S1#^2uPQFVO|%4gyFQiJxK)<6kLh7#D$Nt^ zR;>j^H@e34thiY?WCb8T(sG@d@UrV?RZ!Ji4Os2yBf8@QRiTKEBDEW3Xbq-Nt+96+ z+p)))Z`L$K5(^a%e#u=)ULj=g-?Ns~RqY8Qwnso}tSvw;Usj!2dwaKH*rh6^#7u)@ z9w05#+YGCl&B=}ykvud4r)741{p>80Ad2rPt2d&cd3)FiAsjfmE~y}zTwBsUZY;kc zBE%Vd?v3ZK(fn*UOGiSZ0A7IYlB!zW?%lncae1VV7`DA8qbkO|InPq>VNsAvBM{s& zHi8cG9)(2nyLb|mXHWwKsOcR{-3)ykm`zNwvMaD_hQ)gBM;~v7s4iP~-SBndM`t87 zYqTP&^bXEW(_M15Bm!_s5GqDis1}sC*=knbO9|}eIRb1hP?Je3|8KiK417$|ic5PABhl)34tr^Q2tFYb6QRn9RI9(+9q?EN)*L@;(3!a+F+8TZcPOgEf(O;71-aK z)xEe$Iic7ZgtJr$3G=RdZDz^2Wv_(gFverjX#ofr3bA8zGe1XfExvAixM?J<*Bqju zMMbv?9qsJcGeTN1OthgF3Q#q672db;yVz-%ij%Yf<@m5fBnkmtwQBhY9jakXKCr){K;B39d$joHMS zeMFZdL{*|Iu+SB#e?hRTQQjLXv(ou;+lA5TWpIr3AR2`yWx#fDG}AFF6;W5suUmBI z-t)V+X2sD8B5fv>QQfHPB=#`cleRd;=8HjS6f!#WDD2(za3ev)m9~$+z0uMA406;R zUeW&mn4j&w4B&yN<$5%}%h4^@wCyCzQB<-fTYGJn7DvoEYRu}z9h8)+?9Qq#RjKt% z$nKX}yNzcs?*;Q{_Zf^D^ z<{Z93E|NEn7@Tx4i9$0W>sFhH(YJ3d2DNuoP?~l8crx)kU4Q|%XvBq$NNbqc zFxF_I*Ex|&Jcz7Fc4of2S0-zwvhCHv^0jr=>K*t&bm)ac6K{#B)V7;Ssdv_Ko-Wn) zXU30T3_5)i%Q|T!MtwWaBvRb4dpmOZwJONE*&&w8u1_88#*f}DBRqUS+Br(b=n9V> z2<4H_4qV)Oa_*}6xFC~8+DDfHO+<|&Dyjm+nktGSdMnw*Sl6I_3{)B%X>^4OG|w9j zBxQw+LE|)Meie@C8?j>JNv756q0vJak4*rM`O4|zFMN@#Ym&>j)-!*YX=LE~wp@EB zEY!+*OQegWj&?%p8EQO|MH2X2NiTSzy4brdENgs)oRY-UleVW3zJf^NozD#vTq<=4<3%JtUVji~6}6(6bS1J=y0vm}I3<Av2uH@u`|Y1)Hg$0>Dh~C=1(q_iCE?k#c8Kyl*D*)eyzPEX%jXRL(Y~M^_!wz8VKa> z+%l{(f7QEwj}NZonxt(tD%yW$guG&#Sv(@sAnj)2Cu-!+gZolQ^-(#gW{CqQp~4X@ zzHiaO>$ybQSluee!3?qunI!3Gn|pWBYa1LjmN?q&sTcuRDZbZNxZB^O?qi1}dLCuN zlunHBu~c+7L}DCERpRg3&)K=L%D-hSQ|kGam349nT0#*V4i%g!B5d+;cbn?*?L4=Z zEb#46(7~#8sNT-AzelBpdAUrxt!YgjsDMD&tY%=R~Ph%PaxiY`e$0yav z1ag8Ti(^Ff7M>&$T@#yoMJ~eW`G68>!L5!e0qnGWxSP&K#C)6HJv@Bpm$hw+RB5P; zcArViGMnl8MoBw7b;O$g0REk7#<@^mCoj|y9<8CSTjYbHZvQ%s+0o(DdSNJ{(DWE+gtMFpw7B=0hvbFPYG@BYVC6<5! z3EtV;{NBzY(`aT39C9;-Wds7k)-Ahz7jpLS{;#!-EILgkd1#g9hlwKWzkhw}!m9c2 zb|%vE4N2w8qPv+mx+q4o-v8+r!mA2%y}_Qa$&QLt?{ zG3e^UGu2$sCGjPBGTZZ(rQ_P`NeP}X9lF@unGatE;--Q!nBnvyH=e9 zk16iplR$-u3*;XiH?dgM8599{7T3LZly-A1zG~plEo4))5z5$ZChhqnV-E(-%AUtMQGAGwE?tH+{NVSXYTiVDTmbZ`0hy>h2LokL91=ex_5l_ zwJ{u;r!!0iNG=CJ6=t&Unz?aFBZerW6i|RE0QPQey;rvG7F{wILZZSAI(7ebvb^wK1O+!+ROGaWYzP$?F&Hn&L4*KYp zU04}PBqr6NHpyVOYXtRKaGb+UBSw)#V?>aGs+G+4gPG)5*52x2iMh~Jv1K&K zFWv69bY;cb{bOnL0@hWqDO-mX+DRKWTH1)cz3t35?EO2ix{E%c&ePF0h~V?UF(f5!O2qj}1jUNlQ6fjaPRn z_j}k$A&Rmpwr2xrCndWsMYVf*{+1cFXv-FB2F%?TSewafW{t1jeckMEWzn>Sn1Hg- zZKh4-?&QC_go{S7FB!~58H_}Cf)1oxcUIhRc}AG(H1McZv4Dkqyh(1&KhJw^-E2ns z*tlS*4`~B1-8WHPtn;qI>4;Z}!qmG|xu`8DX34}`taRN)zT8$;7Mj}K0EOu5V7`xM z1XPL&8g!qFb+w0Sr8ckwJx>ZdYK9jhS-Le_@m zKsp-U_U~&8s?^4lXzOPhEzwi6hz@A(`aPTQY+SXVKNWUhxf!+sN0jtq*|%Xz)OBnb zkEF#_S+HS++DDKt7dA?bwy39LE@tlT;pOUFss_%l!_kggPuKqde>dBF!Z;%mG(PV^ zs5LtjL$BgU?7g+&aV|_~^-rQ#)F}q|^u{=eAXZ;xds0Z`9<64(!+X0p<`crAF!9Ps z5uzH}v8}=-RW^*cV0^fODGjPyq1!!2{kAn5sj zn-KUFHxYJGT+C|C(%y?s=Py4H%iAxD^i!Q7eH= zybD2&Cx8{;%3_J$p)zG&%HmU^4!UCP!x9i&C2|QFXt&$FZUQdW zBmwfAN1@bcr;k%Cj}pdWd&x_9o*Jt=D7z770c$5zUn ztZ8}5@-1xU0?P5291|OZ9JV#N8js=0xqr^Yy2Rw#xYBoLQ1%0ym&_U0htqCvosL}> zIpx#B&wOQ_a<35@F#JsLuUSJle0!!H{WTRwkq! zgQu&bXPmxoqlsbCgQr;(Hb!I>2AM)NeH;G(+QN=tf_oW)mXWHUowjBVYd2==ce_|! zPJ%uJ@sP^vBZgXoa;|-~UiZ?(`Z=|ewwgi0bSy}9Y{6L@v-vN$*JCQMp#rzJ=V2Bd zY@uQSP>Bg3F4d)QW}4xsm5Vh9+I#G%s;ghd^swKvXLNZ59Mn*boz_}lJS?J3hTy6W z-zlaRZDcWOl(7twrBs&aY?kJ`HueK^A*ya=qkw5N%$zZ%REoS2!CPikOPlLt@7mt4 z8c%jPkN*II^-#xTFL1JIlICT*WOGREon_yA(Z6RBw<6{tHKYy;%VdZc8rdXq=gumZ zE&M(H9GGa%iaAu0K?VXkdp{{UkD0Q)rTEj#lZuBAp#MZA=o)Ak|(dwu=f=c)iz$yR*Z#-rv{)K>@lx zKa&&cSe7D4#Ym?}Z|1;c#A8~_(m-yGP{k!r>UAk=r=t0OmJ>(shLw4=YM?HrAR^bm zK5eP0RNnBdUi>q*CFdUE>l2=6BRQc^S63MBOQxk+*U^es>_LMZfUBt>f+#^9Adaei zp>;=1DcA5U#cyO_z&6oPYqVgw>0(a7PM@%&fa~OMS$5uMAu>2JGHjjoll2?jZ z90j!7S{&6a+5D_AmBR!z7f_o>A?vrj`OTNLByb0ffV3Tq3Hd?#FMwxjR1S@%l-slJ z{9FQeh^;Ubjk_t=f){NG!L(J#roms2_y#U*fDMHCa3O(>&S_n=_{Pj#A>qrgsuC(d z@_JpIBs0Tuh8BkuJElVE^i}tFn*sz=4vw`2hy;eFP`%50_T8G*$Fms2*eP={Pz_B? zX;Zi;VHs^3EbIo(thHjAi!TDRpjtAm;=W7^U?+N<46ruXwR{NBzEskIzk zLR8Ad8LLA^;Ilkx0^gI(OnU^;$-*I7|^I$Hekj9~l)KZ^1yNkOcVCpE5t#^T-2Af-M#LK$4Z8!wP*A{(*r~;r- zuJX0mcq)7VUESKHYJ7}0+~u_s3#~^PZZUyVZo_v7F}bx4&1<{Y*4Dwtxl#Njh{E8ff!YrQyyY=5WhB-+$^$0$QYvofWv#`dG6=0xK%fSc{sp%VoJE z9%5l7)GH!Xg)$7y=P$+H?BKHz9&C?=& zFP)ANsHHSu#-GdAIGHTv8 zJ9cbVyBcjQjU>95IFPw=O|KNPX{=j?$cw^TV(ek`DzxJGZoeGAkJdTiRoKR6**`}S%R_|w++at5(f@vLcM}hB$FO0UY zN})A@M)V^pPfqrn^=PFOw;YP%uPTKW5d&>%Yxsv^z1uo5>RW_P;kE&9?0Cl z$d?ObjyX!LMZuifx1#p4v)z}@#-()2r4zZU$g8Pnm~UE)#jM`V!9~MrF`Ilg2k;+xaMajXM zMo*oa5~nuh@x%*|oF$%Sw{C7Uh6YFSChwn$!^`K@uv)2&UN!V&6Y4X0&u%L>yYiQF zRt;pTzttC#Hn-Bp9P-UvXP`!rqKXcNwPkerc(iEwj9~Q9V>JV9t~l$n(Qi8q%k-bM zRT)a8i0d4&o~a|7izBmnkCFRWEeAQ*k+cG5eWTDW>xmcJ4x@?y{{S$HAzD{7ckZ^_ z1RTwX9!k6XoqR@`-g7|>C8d}*PkM`gCAYtFTYK8gV|LF3xGo(#=W%&~^GH9xm>7M3H|*9DDdYMLW5h<9z=BGV3sxKuPGma;ZDE#x`Y6m+o$1W#sV2@!V9%!Agbob$PRH zFRO+du64T#vz20s0bnU{0)?#ITK926Ha}%IfFZZao2hlXo^s=bGX`ez8dqlR7KWH9 z08kM2jpduV7rRWm%bBoFzIOL#0u4KbF4kV6w)Wd9i&G`}bF=qt z!)qY0@d#%R9WNhO)7Neek*!FVVY6u)YKd-b@1=vrs>-#>1x-86PJx!}Nej&*aYI$) z7d-$O-A-3t%vSDQn(Rxg(B89ZHo0PI-#F%_H&=BqMvanbWHbuQDPiAcEwQM|-kV{M zHAu{DB8=$N1ZHL8K1ovU*AMAnwEB(@E+89YT<#BBJuX!~V&QcBi%w(Z+G<|TC=Wn0 zOxyMDU!M|9ssdK7!U!>d!!Z%C%-5rDw(jM)I?Y2E73mTrG~!53%WPb}b6a(}a2r&h zYG!I6cPn0-RjGS^P6xICPQz7Dw(YI9lYRp_=2CXGDgpbuSdqkFD;m_>nDwy>)9GTH zR^d{h^w+H5@Q}F_*fM~ATxSYeyG43GrH%nBYx&9-XHwnKb_8$;k>0k}gp*Fm&3AbD zxb~_zETK`V4Hvi=bT4VL#Nn*e-xE3~SC)%^I7rpDAV zL-&~9C*ntLDhAWEIIY{f`Ygvo3&rRj$8?;@#=rK zo9(y%0OiI10M49qT@*T8(n}7y`-0XzLU5RrH#SXHk5C`eOe~Kbo-V)|zr~5qQmLol z#8=jf8@A4!l)^BnBdqxk#e!;RRh0=B?+PT4NUjV7JZ>KRN%)(0$#M-wW#D zKKn$irk{I>BzOIGB+*0|^6fz5X&mjyPS}~hNdtIo^CbGuvXbMpX=ET*riBmU_v*Yvs^OJZm72QL2U}+ta-U+YVO6PdHkC%whgWF;UJJ z{{Z$8fA_s`HJqwlRO&2f z+wx&_e8wvpHMM1oe|W#q!*H7T)|6~id2qUoB#jiS(8y><-|~65_KBVJ+L{6FI(>|3 z{{Xc^>a{6O@?D0HoU{<@2n6#^Pqr5QOp}s+OAggw^^oT zc&JSSNou=A`0M9j>2-S4GLT4hn&CmSr4rrl^7=PmQ<+>3Pl>ntoDL^5A?db~3V(1L z6pxt_$E%7zmIcj#7nz!ln#n+Z{{XYL2prCer=yBC*T~HR;-i|QB}w(G(d6}2`aeen zxqmf@UZ9Zea)H{nhL<;+fyd^tmOE;KfCJS>&3Bg&^Ia~c4q>X&v646^Yhq_mcz5K^ ziryyr?qYc)T_l=~Y=ro28rsFK@FR(mG{17&aN6#F1lkT?r_V8!kTFm%eiBE-cG>q` zg-><$8kuKS0gp^Xc8YOTB-!bAjFX>=dqpX%AZ{EiPO}u((g^NcM%;5BZLWA+i z>hAgYkM@!A>Ir3#R9(2Xk!q_qrz7uM9!tN<-Vn)XKuEf$Es`DYpxBZt%xOYH%S#PK zEwem~$=>?C)oj?}^LbxiD)jM1u&5uXt+r8Z@tv05&OMPRiU6c(YL!E=Y7;7}Qzff~ zXo0enVmc@?hc4fX{TvN3hPMIJvZtk&FKfGYUhiuLM~UNQ*eVvR7iPw-$7WpZgjov2 z9;H?27=_HuPd9q(9W;v40JQ}5(|2?zR_^*ZNo7+EkyTZL3Q><`yA9{9w|Y30w5m(W z;iP7ey#+R^X3MuKWU)N#MjcHdB!N=Kpq;lhxM*XOg6b@dSrn4T>%nf8W%>!}U`sj_M2?CoJkajmb8YWpUue@!3~{oUWSA9qBR%$S#=Ey8 z`OCS0h9qVNtosJTyvVk;c3=0fS5>W%VP^#+k0PC&sW+eI+mG^0scQ>fLE1oh%6*^S<9DvEm7JYb(zaR7mFL zCZ@bW`aIWJyN?)`E4q3Q;lPblk}0s%kGXMdrj6C{{Tv1?Q98o(pC#93#av|@D|HQi zFFOZUTwQHH)-S)pOWZld_(=QzWqRDfFu6j!*Wb^a>E*&+MR*WeHlx9*d zEVdIwEZ&|E9d56bnsj$}e-{jzc;XYDmkJ9HJETA3;QLI@(K3U>Q$SC4OE;|S+mi-3 zKvKwDgeozk6|3>=_kYD;j`SgK6@7cmrq2i=c&-4@*f!A4#))90%rY z?FDpIS&0=VVrZtNj?M^QK>g#g=9e3-l{)b;hB z<;6m--l}XkM$$J1W!SC5GAIXmWQ;O+d#$~eL{`VJ^Fms98MsoT1!$$TUAuU(@^K?| z+|RvcC*?fOGY+pz<~bDg5!O4(%bgpxmb(3pkEhjYb_3JkOVj1YG;z|&8nLBzG_ixT z?(bnyMY01>^`~BIuv}W~q}s&gHKy-b##Ri&V*E$E&DuGzkt}wabnH7%cNJxDWdPHv zmEKc|u%RZPpC%Zw+68Ozw~N#s>7Ry8a_jpo_e7!WEkl(nG%B$lnV z+tYP+Ta8|?Nuz!SfV$4|6^tvOx=4IxOQu1+3iy~%npxPe1!)tSgSBawwT04fWaGBQ zj>B<35>o6opO@%g!!(jfNR1HI+geF2TkgxlTcq=G2Qv4wnhDxBRrVbOZ6wb)tcj*u zL&)1B{S0?z@gh$X8p-|n(`xx%;VMq2-P5 z8PaQXGKJB5Ib?@~s!t8{%8<6BQFA@gHxt3g*If8n7no^vnzky^%Q%$G>;=E~sa9Sb zD|THhTnH1m?5l{q-`4YO9C{{)g{y`gJP~UpmzT!`aS<4i+O0G!Y~idYBE;L< zos2e)O?v5cMV*GHP^1^y7bHNjd894s_?~o$_%ID#iobj9|G!hvNUoj-cz=pAA9Fuw-o5L4ZSR4 zu*pyTr%G}&H+?v7=U{UQ_cH+y#l2_cz zml{rGJ4K~bVA91ZiCIr=Vo}6`Ygcilku7_0#K**sP#xooP9!`rbrelBsh#cKv3L71 zrI>wRFXkX=r-^Cp8fW50#H;Tyg=^T|^n2KOd8>|PqY)CZp?F(~*l8JVRg34}=HcX7 zG?40CP*6z06pAw2CCbdp-^6^=SIoo`hgVr4f<`L$cxE#d4pz4{t7Z?o(oCAYUZYOh zc{Ou0E1T<_DlJ;o@a}x}_>XobgIgjQqyvT!5`g#GMo}ltjcxPrL{mV?)L62bc}m-t zcfE22(o4I>DnVOp&R z@PAFe7Yh!yd1VUZiZaJ_!*ee8qlS~0XyOvbvag)ge@gP*Pdm4U({imNO*x|YYD8#9 z{{Uu}A|c%nF785!_w3A4>}nY8umS%KSiV z>bY?v2eR$cO@Vykr8;=)6 z3pX~BZCuDj5?4@)0oibmt(SWgr1wou%m@-X^yM2^{XCzw}pO9Zeir)IAKo5;y=%hKJ~e=>}kvx z79rSLr!RGF9Ayo6YG1|6gB%fv;sme)inTj?JJ`M$)lU^kroe8wU#rXgECERxBiISV zQhQk&W!{TM{&p>zIS~lnrBs2_l-($|yPpOq$atNXcx50SgzPs*X9hT3AdQV{6iThP z^`u>K-obrDQqJ8arIece8!!f+PYeKbGJ#(%@fZM+L*+d<=bZRut7tiT+SWq%P9PZUTxg1l=ZeD;@j%*5zT z2eX~MTf-KwLf{k8xT zN*~R@yQq_1ngTGb5jQVMtw+uH$DM&1ZF*6SYuES&5rbD~+g++UEyoeIxu@Xya6tKX z{8;&M=VNEz$4(D2oj5vB=#`L#YRHCYm_zHDIU^S&ws4!jjbyyq3Fejtw32LvRfmUU zi0bXCcE2XWTP*!|e$Q^(3(EmOr=FN{%{nac)NXHQd0*LOc_P0Sxr1Z5maDS1M0 zLN(QuiAS!?ybdRr*iK0=GnPqgE?yO`mN{i6Ssd^Z>F2-JOzAteT-*52=y`kU^>IZU zFlwU=SBSAKs>?LXlf<@P4nG%iJB zEacwy>Vm$yT@B(WVplSI6kHb7Tc~E?aS|9(IChney!;NF5RunXELn)}2lE zU7pv=h_yPsH#OAD52|^%k8Mra8bZKe>7di|LmwB4Ribwn_pV^yfm+N#rsf{*y6WMy zNvmBeEYi>vPArUgXqN9RNgA`}Y)8xH&}y66RHSi~-3-OpCzkhS^szR#S<4zoS!@y+ zHo(^{WRe(Pi*k&%OrCYPi$ly3Ij7SqN2k+o{qY7f9997tTK2_tpCNh$qgP3m0!uHVI68TnKieFnNl zy$scpPa)_cJyvIq@O@mCs^!F5ZegNL5Yv@d{hU3gxonLr+)p*G!bcX9Sr`IoFFyOAj!QSSr!5Np3XXw`OIg{u%(>%Y!6> zMg|r+hTha=m3+@vh&4Kx_1tI_ELr7t7wW13{-on1IGf0T!$5?)UQ~jJ@0y)E%R32S63`yFPXU zMpBK`1u8tIfvbfsS~&oy2n8wGzrH-&TEu{M6G~UB@vtL_1gL67>}gTnJL6&O<0;|H zMHg*G&x`t4CS#F+B8sd*JwCBonR4K)C7b|uGneVx*WT*-E0+w-7`mfvKJJVa08w@d z3gc)}4{3eG zzaBVY2i*fokwcmdlvpwL(ifTY6x23zi ztBwx0O{aNdQ$uqB*K2yQAj;l$8eL|PqAG_3NJZ4xY~n_Z__%2yiQz0&h-+xot*wxp z-R7nU3sdreflw55o|Yr<4A=!2I3sOGN6E6E?BPRg-MXD#R=6K0n}SPa+qS=XY5gog z0Id`^Pfqi1e1C1}o81T0Cb;sL!+SpQkJ>c_uJbS9;bT_UMXBbpw1}?)xt2vmM=`9C zK4pm#mcTqf@kn)6A#otaqXG8w&&+&T^|?TOOuYH8*<6)=3-ZK0o|nC;sa%^hPV~ z)6@MlpUr}cua!v@snWRvQrf82J&}Ec{_e*ud3K^il*V?2bnVC8F1cjvzFyL+ zVz{MoLyA{s2pxE@!SUl$%yb&1m48XtqX`{;K|}yED^LRYTKX0 zcj2!N$uZ7(@BaW$bda4MlTgv=i^x=#04zFq^wC<{)L3|1t;5rIEZm=zdzHv_4$?^` zsa5p~Be*5{gT<9RDC3GdzoCh_x4c}}Pww|6iY0LzHIUgSs|{=Bo>kq?#_mFx(`o(K zI@z?5#VTsG`q;>cU0cBRc%$CQ{{XA;yLUe3WYqJG9K7;L9fGqs)4(kpSk=Q{CUNUC z{;hL1HC)S6%hO4#=UyDTO%kNS9Xw*2vv6R7T~yogrs8mz$PbiEhqo3jp|+;ObdXrxd*qfonKlBZ+=7A%70M&d+|#9U}Oe!EP%4OWs7 z46qgk)>2iZy07d10Pi2#;%K1dNSvw(igDz363BoS_Jo2j{aaeyydW|~>qk98L7f~FMwb$j&{qISm)AD)ZUUjOGW0zDC zSmTN|8Ms__lP%d=P95AknR8-4*!kz5Yk+IF$%f($l>QtO86fmwpz}6`R$ZO_y zaR|~F^fxxxW~qwd(d_d5Posp69s{~52sJT=W8LA-jbozvH#g&BhpU4}c&%@SmHG~Y zXPbkjmK`E2t6QUvL~twEk!7lU+!$9wp`6A5GO|luxl@Tr)H7pA%k})fNuxUOS|`*& zA_4#{m&Lm2Ewxwib76GLpwh=H2Ld-)1d3GJ*N!UNySsxVYXU~ZCDFiXaox;W{7&lT zz|ra@M3OU79u>N$Z5p@?vc)3~fxutPrX!s+@G#V|SipgcF$X=7q?b2#ze92HF`G@L zhuQ5CaUykPENx#7NpAWn=V4ymA|oCdEgJ=??Kg?!#|$=xRi(E=g_rQ%b~OCrchbiC zw`Eu@b&#r^lE)gGcsnyYvfX!vt)C64hHWb%rPC?OR0C?Otc5}19o`N8b+|fMLTcV> zwT2;-Qsz~qmc*Oea-!zL=yg%R(LUO6_K!iUmjr4h;H?R`scpTL%iix}SUjA{U+#3j zy*eWelz>b~W+0Nr8$#1(Wv;u-!mh4FgxtYnxedfuBFM+_QDnEY*mp-SwP>-aBomcO z7coYucK3;sdl8};Do7=*S^!0A&Bc>Pt8z!F6SQ5L8YNXGEgTh;g-=F7s}?KfuvSTu<&wG?F&Ft0<$CCwhh zw<_iH?Z*xs5lKRaYgVMv*2vwqtNOmreX#AzU`J{Y8g>XS1m?)D@-J%SZQa~AHLzmPix8eTzydW`y`lI?`nTam>z{iLIf$BBb~6^5(n&e4;3D614eS2^rGX}gL8ppH z=%tDo98)r_^B3sy{M=rb#+9X1S=E`>qjxqo&?0RFii(g(+Im>#!pRe@!)V$nKRJGm z40BAOdNTZHygS*ir=x}fKw?@gEGnu7Ox!@dcxQrMB9@_Af)vwYEj=EEX)s|f6s4tt zn~u{@RydQN_X08+t6Z)u+baJ2nl90Xq(0 zD17P`IJJNQt4kYebkz22+1kKMlm)2@E+9~EGp)la{*{?~SmQ{|!+nC?qJ&jV*>d)- zw-&ikN;c9d@f?)n#`Y`b#l?XuEqyzD-ch}CvQCi)Urod~v z^J{_`z#vqMhQWqKU~D$EHri=$9&dXC!YoerK-r9nQJB}eQ_^;kfS|PpP=ES2tBX`L zpq;!%2lz<48*5x!8WKFDQ>z59s?bnu{5Mwb&Bj=+9eKy`VM8v_&gJ;D-i%ktWfW~cu z%pmRcA|)u1lh89c;+0bU4Df03Y*WJB1G3y7ixv?Y#=N z{ zteE9Gty71eUfL&i3J?}(pHjxf%DKC^jzUU{7*us(M0Ia12Sx>cOh(Vs!~h+gJ8=gy z)j_VCm}<7PP{~13KxbbLiymv1_iPfnt$Gtu1@RgWzXkys-Dnir4-5P}QFWTTdUJ69 z00vbU6x2xKyGaM5@UAH&WU};xd3Mv{zklarVrpSp3K-=i{TWsE;kbO*$1;`_Fo#{9 zRuV?ucIVB;qm^rx-A<@LYRZY7vg1m+ZZ9&IRIL91<+SoL1s14bCi7nH+KN8Q z_SYJ%Up+}5naK+KHj)WZS{8CuC)c#a5p3S}bas~8*Vg1!E)PqFgJh>)2Hno#l6gz{=2vPt2QJzD7(EY0eg_poxP zU)WV)GRLG{E5PikrSeB2MDEgrwmvPqSvzfrv^=Yng3wB2n~zmZEUJIYS7b}8nn)S1 zkJWgV$dcn<2ek73Ug$$brC7|7X=*r<>SeIS;%bw{SXTWyW|Jka9yA)?do}v5d&}bv zx+oh-3<)`L;&9}ONJGU9ylEaqiu6eMPVw(*xjw7ed0vecE)wZk#W_ znbBjCK|krcjg@g8yGyFp@Z*p>>6v4drBU?}I1un%_jr==6TOXhG;J1}kZFmVOE76m zYCJG_(JCr5(Y@Qz!|Nm7x;(hl`^&E|u9ft{O+K4f;mjs|$X&M@RgUN3qxhb#$lrD@ zZSU@qSvk(KI;;gHj!y@|)5oOY6c8lYWVzZa?!R$u#-BsZ{oMVJ$YXD4cV}sJa!3V# z%<7o?Q$-?O8>6aNyR5Aocs#e0a_D)Ehff5DPId58N}MoW-3(fI*{IoJNoQ>Rwp;JS z-tfa4X}Oe|Q{dWTaWGyQa5$C*(#I{FIZ>?PE`0aLjbA6{TJ_M)rbtq;K}n@YyENV; zVn}XVStC_fY%1zX>$!^pB-JvNC2QeD9BlFkV~#sg)ZgB0Th2y@PZ1&Q)ELoP+{)ZG zD3V*}D_e;rnfV*bCi66j*W@?T=SRDN#C}w$HVzcbTEEykKh8fp4L^eyEs+Vn7L}S% zM(WIu#g_fgSg6~(xLGw2`y;t3W07?xMI)h@!zA&t`P@eC+k>v|Ibee3gA=b+@W#wX~AZo&bw|5zP zH(kG%Hfw%N4=x6en5kKpqF#_ko!51_eH=*~P9#NRYbmHB;4q_}88s`_UiH22I}!4} zVNFLb1lB2b(0;C6==|Q!I9$1jcP5%`N%02FoGFug&F^FOA3f!CSlyf2boE_Imfzyt ze;GT9z8)viYV`WK6p?7w0_sOoq;T-9`S^!h%(Z%1CDDa285Uq-p$xWtRqJZuxt_Xh z4x>=RGfIx{(bl&3pK{_go?Q#W2590pA}67yOCk~{{Uy9ygK-DH5;jz$d9}9FtRNAB1*Mo7Yj)}p9~;AdiuV`3+tweU zleKA4mHS)W>=)ClP{(z&GRt*7ak-S?r_xUn839HdNkA%hm6OiHS*SwY13bh7-rM}S za8^xFl&f4FoUKQa)E0f~b7Pslov(=VeL+U@sgF%-eewI#8y=v;@Y{v329d)RAq&uT z+Q^P0UhZuEb^t^}U(!~BhoCJh^RoI_RE`0{4_jGP02{aXfvU9^TOMb*I~Z%?iaVGR zD6}Wa*6l2Q<-h>MKs&!!M()h0KotCu-OY$6mN{BP3`XulOKz^}KGxw|Cl@yM_Bf=> zr6`~oO-KyBcMmuGN8G?JI%-1EK-fCA_g4W0%Tl&cL9`A2w%_@xovbXHnF$ewwOq$d zqT8%kgJj>{{*D5~`zc$22WMW-I|9Tk8!kct1Gde5h!?&1sMs?;nlxz$EH9}(pJjX2 zt^WXL)k^?Xft!w_3TZ`M7t1BT3i*$~MsNX5odZ=Dyb6!C6RSQpDO-O*%X^ zYVPlNpV7b)I*RQ`*;OqgyGK(aZ+Gx6m>Alqt|(Yt(`z`h{;8Qa_po4xQ7o3#PZ`Xl zY|Mt?yti(7+nuZ=Z#<1}O5W-|YDUSMd%boHu!w|U5TJ1Z4{(z0j;X(&!?OPVTqMn? zlBjCywuZh&a$Ih%U%739VN$@9l#qhGJGQfTw|w6_94o=D(3iCd*}Y7xzc=Y&Oxl)a z*kn_f3W`a>c17RKySUh7)@qt*p&%t=;G>=kQq-mC{{X9nYm;v4 zev>W~q>iQfj-FRL4r{4zlPm6bFT%~H)GU=Gmzpq*Teo;Ih}G9Ez9TKY_Qd+ps#3au zs`0dKXtLm(Ca7rlt!?YK@QPc38v?AzsLfH*&cW?pG$+=2&D=>O+L-unR+Dc_14VX* zF_ul4_%?nsy2i_HY%;lsJbP$E0@U+r!Efy6%a-g1F}#sEVO5QVmWu5S znVG&~VZDwk?P3d-%4yu;hJEb0qs6=lZ9Nij+S>eLoL=>0g!prPZg=PYTi-Od6hCApyyZ2?? z&48<=)XRHVYyyJ5R+dQ8#vRwY1tErXw_xHcD7)ipobO`&cAjWZncB|}e%-~BCElJVjzUDbhm8UOv1r_HEzOcX?joJRDIClw zRfqzK&{w@CSMK(*<4R>4x^-Zxb{nOVN?k*;-rgjVCwa8cGw_I9!Dm#Jl1S40@ym2? zZ-$+n{7bD3J4Ust%D@EkVu*zM{7Ep;>F<)&4y341%6myl$s((Em2B)|iKT?z>u};J zRILpzw&&{lK6V)DLRCde1RzifmfBx=w{s);m?p0)-g6j)YeSl<6N#4GsL$!QcMAj6{J9Z> zlEAY>VQQ;vD}C25cN~+JxI`jBREz+C#R!g(s^ti`tGR zw5%i1X|ys~H1`Thh&Y*RZL>2!i+=Lvi#ALNA}UmXLk@xukn~{iim9PD5l|`CcCAOw zzW-B52od(p&PkCoUakTVyM zkcg@-cXFYc- zn(rojHm99wxoFigl_y0LK^PX=8-r<#`T28sxHIzjblP1?LKb~UiBPhjxy#6)Sd?7* zo_;0JaxH$BS2ncpxH@e{on7HM7$+HoaY(tXc~aF+tHz&|`^-OPa`>!@6)ZW9u3(jE z6ZVW;NO^QNu3KW$#oUSQzY7m0)cYkBRUM%-(ly?(Bz5s@^>XSjUh+OPJd;>;dg-+? zvqvzFB)yS?I?Wtu9BRwPP69;^Z_4HT+2Yhj&j$u=G_wXU)LxUUQ4SltmhfFAll;BN z>Dxutb8mSWxSDl^>8WoOC5V~flSZoiW_N6}#Erts9~XMmKJjwe>Uobd6r8brMx-l? zU0iD2;nQ2%P9@BWsF^orZ=p*0H28E1n{- zYvbWm$0Qcs+2dqQ>y>c_F88}t7LJUgO9rY~p;u!_?b2y9fSFn}eC)A!Np;Mae|Npw z67xMyol5E$#2`rJLuB!Xnp<8dvUu*%aW3_EXualiI;|U3`EaQsjZ3w%$C5ak>XH|c zr|mRry{%3~%3DttD&pu)vrVUkZI<}smJJg~i{oVVoGE>t-X{+nr8#phpHr(3AfkyS zK^=(|H5m!~?yiOMd)mOxqjSb0Vo0!!@&!zp3$@R>-b8>jHAuSt2pe(F-`ha{{WZ|!E zaX-0QX5(mNM#{1f#7Ngjv(B74_cB_HxopO(-Q8}pPY$z_ww)3kEG;9mS>Z(_k`=KQ zse3_pi{E6o8XkGhImu3LA&p|waHL024{AtF!QxgIHv@Xq%e{y-Ui5PaqvkqzPqZYA zsXG-cf>EWYnZ2UW@!KDXekb>*May*&^4Q}M!0w@9SW@G3x~X_@S2M{auVi=G=eO+$ zv<)tcPByfq6=f`@-()XXwF~Dfw0?dLt3?`5Jc>WdaZwsFPHM`}!IIxKv82#z5;LNL zHH|c}PZBIacQ4K2p4-WE&5b)tsWbBc;pJ5*uv7F)#POs(=gPirIkbz*c+<+~*e8op zi6SYuF^`8PRdi)e8~NBzW`aFfDhn8;N-H0$@meeG?oYzkjyw_R`5Y;1S^u(2Rk~BO^WK>=pEpK}=;GI<(dext# zx09nSEx~XNpny#_0&7phZ^2%?0LNkSV%F-#MF+`+k~6d#kGrc2q~`$Y^sIK?Ns&kD zvYRY#X`>+J8kN_1jCsW(1zH{=~t<6H5y5wkx8WsfYkD;Rl8m5dpH_% z1b)qEMvk#6l^HAz(|1m5xA(B{>LJoZ(nwjuF=^sZD2WrJ@U3ILiRaX63Fa#z2w{u@ zr3udSNbI&XyQ}$Fzx=Y+lm=d-^Lo7Oa6RDWPBl_Dg&vkVi+fJ;N+`LzEUDh}aUyaQ zI__Uz3PU7n^rqet!&8@0UohNS!(Kbvn9DvV_(uT+}pT=QOwncD%q-GYSCMm*-IF+-$^ZgtZI&Kgo+r@MQ`8QbHaj(-%EtW28GBENLhfJ(L zdSg&JnAfYbju2p3Xb4($_1L?ZuUD59ps!ZmtW`-Br;z?v#fwhp8#a@(_*)dF&acYA zR{^O`oxjh4B#3V6zob{mw-;BWQkbovITa)i&cP&-PR*=OMk~@OPMYKRwqTV26SUK> z?ie%xZLZ8#y*&dJD_d!4ARnXJz@#y_uUwQguExz9X1BB6?RKzk0b0FAqvh(yRX#jg zK_lYwc|J@C)Rwf3>eFYTM7seMSEveCqpxdR*f#CiQRF{(W2b2wN6XXGTvAX)7}GE& zo1wAiFUY|P)P(>(q8g1$7yXR)XMx!30z>%}G{{Zh|i*2Hm z=^yU=+zg`gO_z1~zYxb};ZQ&~Q2pIlCt+>06#S;HE+j2FZrT7J9t%A!8*5J|%d-We zAgTDNU-}LK2VliopprT|u*z7SyJ~Cvd%f&d!&y|-HrVZT{*|YdQJW)YWw_p?_@C-X6 zGgB*6xuG6H-G9~MNfHDNE@@bbGn!j;gyxZ{+HKqwiYAqhTWki?`bjsN$l1LtHo6GX z8T6H&NhwxkrL7I>Zrbv3KSz@u&RU?#rb_Gz7Sy8~F5N}B)w{lSAm)0M1{orRAh?>_ zbY+_2oxSC|&h6rTPNIGy>Z3|1=mZ+vZDSK`%UoX>*#t<7$3mTvXljs>h1I&>AO*}F!&>0pQGAd1(^h|vI56`%r?+I*jFx&uJ|?y8@V z>lk7PCnUup@R+FR3^-d`$>he1Maj8bI+&tnfNG_jBB48}(Y49fmD>xh<^&na0K_O>(;dKSbDWM(~&%b^khuO6&#w&M1v~DU@i(O+ZZn^Dt`Pe$GcP!O$w7TTg zX!Wfk6pV)oVs(qfNOcf#G}6ZYvh?nzHQbe^ME0l}IM60RC3ulL$kmf9ikqI5xSN&p zSm{2Q%yJ-Lc8?>j#$N6;{K5?i%gbVugEWz8>gux*9lp|w#JPQ9q|cj$@W}&EO$z|Z zJcL~vk2Tlx)3NYs`Tmby+ASoFShV9#WeV+9G4T=8ho`RV*xVTRuCooO_dA|Jr7a3c zGQp~vDHM{yVV)=w`-Pjw!|t`%)h4fB%%4T1sC`mRDTSy$t5VjPJHDPHIycqp^>KpB zBg)KCLA1XXNAT5Ix4YQXX(rQZeWyHgMVZEyMH9W2hQ^IWepX zfn<5zn5#t^hrfszTV;|-cVfi56PY24T$*)|9Xy(@0|p+bI>NGQB)c{7M6p?JX%lx5 zu8U8kLaoFs^2IDMC_OU9&8T-pY?d@~-Hi`4=A76LcNU&!(&@BXTIu3cwk2LH`6F3m zb!|r#E`L)O64>~BznuHa&-7fvep@(|Rk?8}9l;N#X>@nxwJuI1Q9k6EZO1d8lPUJO z;}Ss}o~c#jyHJjS>bDy>O~mlgb4MfO$1vuTYvEpduR_yC@yV%<*o!m|q=!tt(p4Hu zUMv3qvfRfk=Nd&V3M*<8Po;$TlFQ-CHD|JF@!DB$%G>w6n9~hMFs{3kJdvF+VHqq4 z;s;69CU%ZMt#m(~j$y5V^}0(7XonquB!#|RL|1yq=HldeZoh|mWyY(TY9oDCnIJ`i zO=5`=-p;TdZzZES$qwu8R!QlP;j$cNSrznoT*f=9LmI z6Rc^&^p7*qa@Zo*>KZ*pnROgFnTJ;zXa4}Z>DfO=;w|@aFUf>{b*R#5`HEF!({j?& z#u&ZK%<)4UclPAsHP>9Ze7DjKt z66`?~yr)m6f_ah_k>zp{q$@M!8-cN>#s@Ho%ENH4K+Q}0d^oh~qRB7)HxXm)VWf{s z6nwt*DW@W=LiVMZEs>;c?36215 zEMrhj>Wu*LBFfR){ja5sRJ`JD5Q|M9fe}kQv#@1>dy0tprID?*cXKujQmI#6gdUNK z;+4X$MksN!0t9ntb$ac%72Ts@v_A2I`6{}E*(%ef@6xZ`ZU;Q~c4Iz~$u~weigsj! z^1gN!K&oL%5E9@)8*|#&y4^V)N@Fw!y}!NX$N6nMGRTW@vKJ%)(&oqfY&<#*BvUj% znF_{Mg0#-X)l}Da2TQ_)qOmNlUCl|ks_i3U4K{{dJd!*~G>;lwwQ+Y1Or17*I8~6q z;Yh7xp+HTH{hg!I$tIO9T)|gfP9^M1~y<{OR2V%rG6{6<7dIQ zs{!I*%Eav*bjMGVqh>aL(SjYi>Ma!N=3kVTr(aJ-35^2RX&_Rx*|!H_+17dv3el_Op`|`n12zvu zhso1los|9C*ig}+zM+A0T2p1VjLm>$0^leqaRQ=;r>7MgP8y_Bry#D+>0zNV29huf zfl7ehuIg^?wV0Dft3{RxRI8&6j698vMvp-fL8W_8A_@UiNiOp3dz%J$$TR6CMkB7- z1+DnpHp__bp^`gASktFvEBsguHLKLMZnM?o!I6j&fLN2z02OBY%ZYVxq_3hzYDmNu zbF!Q`S8_h>-#fL0)WXE0fJWaay5Hf!mMWGf<1{{ZlM%agn%D53w$MLBoPkH+)x4jX zgpHV0CAy#jD6$)q=&g*j-rmcOWyw)3PG8P-kSOV*);S}m%xrQWc3wjgPX!s&RH%25 zu?l+aiyE#=tl>{4n2~eaaqV6BEyogQpbU0~%Y}v&i6{WeKz!beBr(v2BX3SLLR`_u zrHx1@ZKZr=+p(@oK6RK|MI%)TwOVvw7}lv!Nfn?q@?bUDrxa5w`U4Ug46^9 z(@(vPG%(u{mH3iK!io_*?uERzTfaLV(4noRsyaf~;`FhoZGC9m|4c21*++yL{j`d(@L6t_nE@Rt5pV@PcJ;@F^>s!&m%@86yj=? zcx$9GGc=l=M2)AvI{22g;6WT3ZB!5&r({T@5mi;g@Z+{VF}34KW#dU6X57Qg%C2OE z+BqJA)pRuBhOTb%W!!9PebV=R<%g9;H?_{v-6BOJg!ZXH8KiS^@Z)C8{6)Q1>`Slr zK{dMGbw|*$>k*=f6jEumTssk+Bs?^R$r&E|k-NH1W6t$W%DIEdG1Y0xaKiQC2Vyu_34v{psc(NF`B$2LrQ~JeQaiou!O(v6vli$ydY*Nq zEa)BX6CxxRC-q8v`9E{xOT4!Kq|Z)=8g_7Mr-jb9_{x}_Udkoq`T+32gzf(TGbp-EU6$JU?1n|Q zP8+TppK^UBm)gV;>2B3IsEsR|N~(N3jl>?toO5>>*dzOvhaPG<5-bdGbEBj z9FnE6BQLV|{bTOqnac*h*`GrWmSYD1Os~Wn?{tx?+ugOdlhDMT==+%@I>d%Y_UD=D zG$vu{;M0Y8xRR~btNOcJdkgMnf;l3dI7`2>pkU4ujM{Bnuf(2txMQ*5E!+&b@u}5w z2cMNfk`&WvCU(AS&CM$4VN$Bf`4wUf=jO-%01a~fSL1UKV@V|(Nn}bEaq1Z-V$w#_v5o3(wn(Tr ziBsBdNwFU>o+grbV-dg@iy;G}*`v1DTzA(#8ct=b<|Q3Mn3qxnbQ``CMA^QUBKG}8 zvN*Kj#g<)KfB`!Y8DikBe#)w5#BHQqTDn?RVK!q!e#hb3xAypGxJ8p$%tUb9BUYJZ zRSas@ERp12M;e`1KG*6_h6q@a3Ph-}jLj)jYq)Eh^02<~ay?X2DuP}YV6!2pyEe`{ z0h-HI`zeX%#dO5UnFHT9QKB8nM1>ZqLG2 zv$t=+Hr=sU^>l#N;PVKOm~J4Ev;)&_Hsa(Y5zs~>M3s?IiMbAKyBhV~ljnUbN2KO9 z!K!dq+G79@i;W)e{2x05QKz?22WDtmp!$2(U@3i0j(1KANb=nqC_NhxTi?gFX?0qu zWRl|28LAi&X6=^RgLeM_=;3umj|ONLN=6e)B0w_-UTLas?>J|hAy-B(E66T{wE{*~LfWz~tvadv4f z2rFBVA|mbGTd-v^PRPX4HJAn2^Dg{sCqCq`tQwS3iCGzxTr6e+Mmg=-z1qg7O8Wg% zdrqwC=34;e1CN@MxZ|zNATd0*SF!W zEBG<96eHlN2EQM{v*F81{CFE!P}YiRUo`w)ZYxt?m-#VT5kvTK@q9JL%y~X6bd{m% z#?ZVvZ0W6kCmV53fml5dR2871`~548mXUtsBh`6kY zpw&BH2nx)*Q2>RU?PZTQ)vTT?rHuF}jm8UH;b^V2u0|P6w&ui3$#iY1)ksJxVwJ>e zZOb88mo|L6_&noXqt(u`0wgk{AtkHg_`EC1`ImQct(L}tP&rXWVt9s@@mNrBne6i| zzvkfyBpIVuQfXSRhF343zIGlfRpfAE%vgN;qusN6HOt;yYPnp-$)lB2)K-p--9CEm z;J&fUt|@aOY_yshBJ_pclFO}JYGsH^8yKXFBZiHGvJi$`^KbizTZM9qm*!#!XCx#MHO*0L zUFuq%hW`L|-#Z8(kVT~fI)Douxp%iyy@ifTcwnya8%N7sxaN{`SHG6D^wU2@i)C4C zrr59T;!O_@L7azemXNQKjac84nWaWcBnD@c2R6$}cxbnWeC z#=n$un};mah;^EFy_rF4wA#25M`pi=CY~uf{5gEgX?3U(^qQA-FK8~W9(d#89Lk(I zU6S8Bva5(R=*4ej1b|H_=+(4+%Z)akI|LIzQcbkj1<=t?b?^DDD|T)83OEd`C_(5T z^o&Eu;gU&W(}oc=hNMPaj8Rv)1Z? zjx`p9{U2ionrK4F%!Fbk)ylWO3DinBSCm+iK;D(R*|DSK8u;Qoc%+xx8uArT(}OHg z4Z4Q?FBK8JWw?G>%f!#8*2N~CZB$ECjhg5k1D3mu7pz>4sg$I;fuT{hl!KHIF(s00cF`-E9^V!)_ zKK}q~8oe(o_iIt7)I`k!YNvZ4h_+or>kSgwVNBLV?JtdPn?b9On)6LjEjFqKETYg- z4KqyT<6a!M{wF?_W@>pE}X=IWwE5eC;D*avu zyVB~QgWcV1WKMMPu$&YtCkKfmw02KzoLeon_}VPXiT%;i%=(RIH-0Xj7h@aQOKC2u z1xL0UC1uRMV%>f;+H(38(LD9`?MUIt9F4iAE^6{c+T-VD`r9nI;^k23b^1gmj(X}y6;PB~Kc_cfDPSh2%c% zNc#O;B3(N;)83BYg^p;F{wuZD`JSE|PtLWo=o3-l88r<0XN<1iB9Id#o3~;$=Kk_( z^_*9#h?+qeD_;ofW@;=lwq7sf=ELg!;pMv2)bi%xm>O0r8NRV)R#>G&ZLcMlFPygB z%=ynXhg{l~26*(vtBTW&O(W2=)(G?EdhmUzboH1Eg3 zX=jkrvBFH)eJ3d9E3eY*wrGRLaOuw*?zhs!23cIbpj?Pp?vrY_PiWxB2A@|dtp)L; zvYnf4WESEtcsazL&XY>9qz=qv7Yd`ut9J{dx4UxPZatlKsj4&#?WZM_F%@OKt+|t- zjy0@FHRf`fouv3WUZ38W{&(j5032u8BIH%ZJ%%Gis78_~P z*YV)O8F-7Zt#*xm>P%rt%Bl{*=RMij`KrEc*ioTq zgd&BDmM6|tR9{CkKg`1`hK)f4GRO`4IvP8CHS;hIt49W*_Rz?I$Q7>6F@MCj4Gx|N zwDEShO0oVE5K_1%ISKR*-)snfNMqweito$I_+NCiztWkYRmOL9yufnws$ zs!hNIBW7yXHEsM?#)Dq~VDY=%=zu16t#c}>z1VqYl_EDWN0AF6owhSrR5zmA`@7iY z3nBJ^BDn8J0_}?fIB=gHObqf@F7b5OPjt zcQ z7jSeMMxg08YUYRj*BYH-X`>Is6x5PK08%p1S~8zY&c?q%5LNb$O{STGjh94H3jGGh zKG#yI2AU`XQ=pQ!C;d(|?8*o+6Y=ac!0KRb9!gdMAHl|Qd$T6VO%^wW}DBN1n z8)eB;UH0x^riuoV5qPQvpr+39-FM?HxI;L2)>a5Q&v=&4YAUY%&F5m+V-ms$B$h=R zO}!Uv?_u;>m{v)u_A0m)Bv!Y!U5~my-NgLQMW#p$+X|V6!CSH_tH;yZ{G3Ce(aPS} zJzY`QYzi)CQ|6xT8DVuL$f|J&2dlXF(8^`@F`{#)JT@gt&IC7hEO_PNymEFP?d5E0 zmrhz~d1I!bXmrD~Yvcx6&WQdrxEEKg3-H_}@lao6C~Yc$RxF^i0a1K38( z(ORh+x9M9dV@1w+;IA_0Q6kMO^GR76Wa6||D}}Wp?yl{(Vm=Fz0!9L;=nFs>))IP6_?ix=HJ{lx_md}Z~MBJKv4wf}$hg`Opp4ttlJzMW& zStQ-fi6+#mQWjG{NLqtizAV_2lj=1CPpP?OjnLGGK-@ke^vaZWyAbldA2EFg{{Rk6 z9P-5-@Crqi*^)@o_#RJM$=>Z>gys(}pvef$9+o$i)JnG`k_1!3h1T0oddvLZ+4_xU zx*ayMMb%0KgUP|6q=1^In%zQR1`~16$EyT zi>8t(p`DgDIXH~F*KcF?f4mE(=6u8jl38_9P6&zKl0KR~CZ<>(#kxH_JNDu(Y3{~~ zJwC22A&*VRwIr4^O0{#&jM-5qpVj)k+-m*d_of{-r&S7+iXc}~bkT)VHkQ;|eW|A6 zJefF>D@Pk-Txt25PGIG_f*mwPEsIYXTisd}mwIOUs1-AM$6Q6E&`d^-D8dMML#wSM zEqZM1v32>_8t-{xf|5tV6UyZZ1*`hXs?cv{IWhYm-L6-v*UAcnk~PaLYm!Oej4R6| zd0T|-hUdJeO4G<KDyZ9W!AAvIO$w1h2pOL#5 zc(qz+v!{@~{Bt6dgTSG7wtOO%xKwot072-fk7Wf@rn_<2-0ZcdCfGx2!3I!m6e@~QB7(~GBP}Jqg%Tjt#y{J6;&j7T9rIC zS_9hJ_xZd#F1qhS3RD26dg_7&J3e*W%HDQ3asmn1ZLI}=S(mE{9ux!%Q9&R833lPf zb+_+vBvs-!X|QzKC{_py5>){JvEF9%sJXCJMM&!C>G<0lc6O6;f&|Z}I)s;3DbV$FvtL zpb86@F8iw-mnQ3BPOC#cMMyfcGY(8<8IBg6~h=-(&SgCZ3Uxa9iKM@ zQ)ibOHe>Jg_2X_fsM>4tpMt$!LDT%$t0`ZI(!arW3R0A&(@nMVZN*5T#ScacwP>Qd z0VnST+DGlaRm25ET3KSrli(b4p zF2~Qd+={lYKgx3d0A)8_cM)Ia_>W*8H_gC0&OR_XF zFNm8iR#kSfua4G5MZ-?bT`}6STSi6zypYZldni|p?9Q#5u_KOb?-ZI9kRxzp1SDjC z>xkCL49h6>oHmX=BSkAN_C2eXfMPvLb`Og#%p#3|SNPA{cg3)dQKMqrN*9>jb9`86 zTmJW_c1dVRsjgS<=8n%h=G%^C%%fiqQOaVGk5$g`Y4ZHO#{U31>~r|JmY_u1-BKhq zsRe|u1(gH6DtdvForfAREJ9Tf!~hB@m3UU0 zyv^^;#IfoUDu?j+LuH+st8{(a+t^JiMzS=Zg2d1+&exiJ-2L2pJ9?EuriZ4ru5Z79 zSX=D(5LtZj#37!sfbfTeoC^Kh&>PfpIPYkk^%K_^iY0ONTX&C$!*+Z_HP zj`G{M*5Y{m<%~3jX|(bb8D&G^%IoHvq>tv{5>FSEYYMRFGDZ!L^EEnV`Z8lr&9sap zjzj=3?Ie_vHu={peDAOteQKEbKfIR6WhRQ6kx00nMeIjD?nxq%y;FP=X*EbBf>kK! z!l0=XAmR>fH1Z(jaE0ZXeKY{1a;aDLi6dS7v8;O%#jzQ&A2{TC{X^;H(#5Khd83X% z>ceYjU_jNi6cRv|2H#myDTjnGxeH!Hs3isEw8zUQ9nYJu=5G@@JSxBW@04{?yi#Ppz)%%FIO$H*pf+`xYBC1I<6lv zk~Gpt;`!amw-?6@l0(IEzAUmbe9TMC{oG~L#~}t&qqERo#|5-#cza zbJ@7NxR*i7rqau+jY*GANvVDQi+s7V?JqRuZex2xysE@1TeS-@ zV7`idZ|K_#C%PKgblR;@j!3mE30@h@&ZgGkv##rKy zo|KJ(u|~499Jb0TSG}{N?#+WPkC+};rd8rQzNS}rrB=Q>6(59S{aZD-he_`Y+D%rL z1AN+e;BX{cJU5YyvPPUqZIgqW!^yji4>Q(j(^WJxtRqdvXhXz&M@bee(nH}VhRedu zdn@kYSKRbr%;mLdG&EQgZ&X)@E7QY`Q^Wek-BoR8G11D9>-F~Wbqck54$OC#jXr8( zO(c4qUawKiO01|PcWJ4*vMqc?Uh8DU+@c7OYV_(Ja`Po4UD3sn9ro8Pi*w=6GuLVK z!B~V5hn1Vowvj5Yq}O@4sphi#QtfKM3KbnWQcI(6N%5-Z4DGI1R7VM9uxHY|;p^H%#nhipSkj^sktEsj)BS5>Jj%&9}E66#78ohOq>m}AaR+UiK ztWC8U8uzvajy@-D`*50`K+Yl5RoW^56K>F}R@Po$Q>SiU6UPjCeRGh_VevSP4~ZmoY}RJ8E+?Cp=(LNaV5+O8<1Ao- znn;7PK4$MFzpKW!ZfVQs)a!K%YC0@(0Ua}G=^;ZJ$bD<(y=_)pnUmgLU8j_C*fj`X zf=L$lk~?Lzw(@D$WzB2$V!yS4sdBZPv)TC?(;tzOD{Uw3O9Bv#SQT3Xoo%ZPk}BBa?S3P6mo^5VD0f3Z*D<9H)ZXuWHb)ea?Alh)2a#@(lWTdqy{ri3 z)arEFW=|3b=z!O~ENyXY*|Wat+in&(nqr7*p{05$r5LTHI#*_I%D_dEM?e4qN4}J= zY5Y$}v$J5(n@J{w*J(95nyvG9SF?-eV5{+;E(rusbkOtz@?aJWp>_osi5sby_5ANm zut7mecALx7)Yzct`7yVn;H5TsYmTu)O`@300jHPXrA2-gNM@1-B7x%q;A#!Qm^Hl? z?%QSAaEDbZG^w=MZB{=T<+(ek_z^pR>?*O;0!Mn?{+zh>RZT>Muv^j=aF=(bIZCH% zeV4U>#yZBry9lqy>egHIn*eZ~>(rDx4^Ht<%E6FUzz;zNl*&{p+||Kkh?wZ5NpK5S z%wdw%{U&cW3(~&5f)0$>}aQnur-&V6;-bO2*r_Zp^v8UMfF_W&)@bB$8|V zohG;n#)Q_NdjXX}9qNvPb6eAo=fRqHSkXmU*o6b<93X;TAdt~Y-M3>mO)`ojXWj1N zy?$Q2SX(!fY3aqTO$OQqAZ$Kq#dPQyYiT8lot-^~ubZ0yk+-DvE^R9_oA zcx=r}Y_Esr+l5_cE}2=$3OJFuJ$|Y;XXx{Bt<+0m z6E+nzEb+kY#z<9}#6`s!BSkLJEq9|_m*(wY#3Lf*3Ua(;t!@fgcy(L6>)*;dR)#521JP)8)FwF_nK9A-YoOwEouXrMAMxU+ReRZ5g;qDR(QH+tpZ51$2#5VkM^?TODR1F z+d8+fSq-fo(ecyc#~M_vKqGHS`0%RfLJv;NyACv|P!z(3gX2(oJnq&7Tv3-OrrM5` z`1i2OC=Eb*zAPkMGR{;IMO8qh8|iN0qR=-AVgOfRa*d+wy{xzu1LN6BpDrhdUa`k4 zbGoMvzN)o_n(p13*wja`Y(w2 zT+LliEN%e@xN9z*{)ZAT{6E-D%VP>P(RjL7ZIqEl;cldvE{Xo1XAo$;?}$gtG{b}( zI=PZH3&N5{Pp(z)g%sZdA+!PA7~h4E`OQRkJG|*N2yS+-h}68bUQBKBdOXnN&ku#*2`8H+p#H zQTx9l>$N(KWrk3kGN1+F>CJBc0K2O+%-g;uCgq&!O-7SL9-ugh7uzD$NX}$kM9{|R z?9s+8S@WaHxQ2aRq-kQ&Ku(}(qDWzNj1un9s>>vCBIZPfMU%6S+I+v2^C@&KAxPwp zOoBIR(Anc$_S>5;rHwy2(&;t)&rvhdK^WpVN360)BYPs#v3pUQixb&$CLdI<7Mq#s zwCN8DHMx)y*2y)O9@UK2F6LIthHX(zT$&(sx}8R$6_~c1_=?g<0O4ug>UMk0eh-7C zhu!FyG*bqCGuzM;V#6A*dNyQCFD&$_^xXGFrq#``+ps7T^yd=3n2byxe@-j~<$P;IEZr|L^htg|x14*RQ$HkUSl`C6qh_*|c zy_>yjVjV}j`q&PdNflv0jPo%gGZ{c`FM8CqE$l8e{?w3jT{f}Qahes4T!rDu3~c$? zIG1Bf&Ga(C$RnA!Y+iP5A~6PJXj<+k3^L`n!O3%O;to~r1gBZAj24neki37ZEh}Z% z`^~wH9-EkJwWm^KQ>vRxBV9gkTMTt!D_QFJyO(Bk>|$v73~_r;FDFMkAuA%X(zaS4lT}w9AS4e=X6O zC(@{S73H5`Ceho(kIA%9>CwOvSTi_X)oCQX*?ilvsno-!h51&lTEL3X5D5!)BUa+Y zCENITIf6+8CBUV*f$Xks&3kwneH2eLr=d|xAHXnm+O1fNn##Asrd14zJDRD~=Csc2_G0ZFlrG&B7w*9j9m!~hslaKq4*RdmW@au z3E8u{dpP!OdakBDnI_u9ZE{K4z?VYmmuzi3#C49&6{O|*b({jX7{GB}>t?#|_HbfN zKA(oeGBj&Sc?weBOA*5kpHCcWMS)QiEor(G3vthxC{*+PZ7I=9V5jd)e;wbO_8mZc z6|FF=(q&FsNq&pdNyJo?I85?v=nc z_7M3KVzC}{y>(byPxC&^r=`$RpoQXEAUK2~fkKf|G`PDIC%C&5*FbTX;ua|G(Bcp@ zxEBlV#qG`WeEoW_-+wuKc6VkavwL=D?mL-8jLo!qNX}c$^)m1$Wk!elJG2(|j<;IT-GZ7$#6@kFzlI7V5bAEq2w>SBw#i z{g2LM#hYfWBmr0XmJIp-J1X^_~p+8>!S?%Zc_88T{aBN~_XGIGGgG|I^2 z_5h=rAi*CVRu5A)W9duydFuipybmnc&-SgU;*=ogq1o{g>ZM&7&dQJPA8J3uI^jlwjk4owhlX*Mp ztSA>q_uLC>)oO9?mB?7x`fs+R?ry-*jzd&s%o@d#Lx`sNi7VHyiD>mWsE`XuOak@| zqo47t=e53UHr<@fx1+1;J3UU)p$wq1YIJBZ>@~-tGniFNs&!zn?wY@Ak}6Mn;l=D0G(-33BO$^m=~48mSo2vPKfvYf!16?oF7RM^p-1vP z+4Rmnxx=3ra^TpX-(agDW>Le|+AICw4^#R|5?)NgA|rhnbDNSz&oE;S^8%D+2PH`a zB2u3}i=TR4V*jQ+JU`ALJqfn}E98ZJ{b9>;7HmmPO?3cw*z?WzgZ^i1!vcPCf&yNS zFC9?anSAvoaWcI6!*fB> zV|E#7ctwuNvJtHc^PTz$<4LE6)z=-ENV{R9fL9(}GaU{*z(N$sVuYvHkHlaLZGbab zb&(c)aD!Nz4Qz69Riht1)=rV9o&SxyUtZ43i*r|7dAM?-W-$rR%%JZa$isOzTN$Zl z%1#SF|MimU-5!X&{-f`A_6I`t5z7?@OLX08yUPiRU#63DDqfu(DYO*gZ z;S9!A6s3wcZ4H8wx@s*>cN12YAL2jO>>H0pEem$4Fd}x1Y}OcalccNvq!|h7k_nEG z3%CJ~Ol0qPF)tOgDvT$(ysWqq*9Df=DwCSxJ)#eWXN@0BQPCV+b`kTozPXZPJiv(- zK6BJ8s?bdyrW8&PErL=GO3F(9Rskv%S;T2K3@qQL`YDefa75#t%f2Fg$!6)H-Wuk$ z)#{nj9=b)Qr6Fvvwg1Z8ltNJ}t|I!9&M$(C!^mL)226_gSYJC;LeP-#gi?`Z5B)WR zhk5v>Q@%rMiSnA8KHHvYQACU2eX9LI>jph+IF0LIXItgR*$ z04MFGQ-4Bx26e5cVVg-RAitMW^2a*8#`VT@__P79b(8(`l}_yy)6LBKA-E*h-nLt| z8=GInsE1Nv^Ouu?V?#F^Z-vtOce}=)j~v*0^jdanb_dE;y7} z51$WK9?Gxxk9Wrer*PnVzLnhmW$G*!z! zS5_8PQRvD=bZgezn9CBs&UXRRA_hoRcyqzT`O`-ra`(|5z1|yGTe&psxn#*}x}dE0 zx8*te1#xLfFj<{NN8T{0o+)oJjHK=N$@5l4644HmH4P}_Ai|~ATW#cu$T7X@JAn3on2?`jFFy}(3#z4L*|JgV_U(0K<`?C zC)paBTA7zgq#o|>KYdp=_DqnILqh3HJFola%>2k%@)u@jTj29OUOC?F1y11Ctj{h& zSz=%GM!EFrmbzD++y0t#%2G?G1Yk?YxT4M>{gS;Ut~jPET@|(tp@t||Xb&D8eL6DB z*a(5;Nk%LpyjpIK>8=_+YXs8o8e9z&PRP= zle-|?j}a1{1lW}-kfxe$x8oq|%WC1+Wu|m}s8s$4U(egLSq^p(stqbl%w z1k4}Ll$(i4s$N?#xfV5h^ja0ktmyuTRZX=t2Wtv0SkhT!k~+z+ibS&RbBt<@uB}xKvYO^k)NF;+?6j->1-VK@82$V`kFzWAa<0)p#%SqC_NlM4?`Vt~QnO~-e56s2}tOrMf8C8@!N zt8RH{GRvP2&ELQ2&?Wc7%{1rfrv0wX?A~h-t%R*wMJ~{mgDJ@( z;Mn*Froih%lz!tSnDftqBK+GAr}XLaiK~tmm#H(dIB-`tGL_@x)K6Ysr>fAvEQ>z` zX0K+Lz;}IhUA|9f8zQzle}CJPRmvFAE<&+eip*axG>j}c*_y{pQXEn^4Gh^411%^Q z)138hJ|ejS@ysS=Fyn(yp3uT5_ArRS+>m5mA>y8Hl^l9N{ZBxQ7?ldIPZL|syBWc- zeDjY^h;X08C$!063m%Z9LwZVL9 zmF6|&>9C2~U6bs+n~hm*wvBYi+iDK{uEGI$lJ;L13kz_o{h_;Z`Sduf30F;?fl_0!pXL?nry1k0reldCbB6Y%f z4)SRJeiYMOG~VSVfwgMRie_`to9~0=VgR%H*DARbKni=KnT_mGRmWM_w9N4LPqdOM zY9$FW>-11TS)Wg}j+$XP;SFnf-!UW!-XEeH3t=S4GUVyY6;wb_DA+yRa%TjQplIM- zqpPxM!n?D>FGKeTQ|;|Zx`K&a6P*&bBym}4e8Dr+4*GsZwq9_5xg9qrRRLfBQ{V9~ z?p4bv<#_{U@{l-ty(^IjJG!lcfppTk^4E@J@h{WZ>I?PD8Q*kwZnNqK0eZXv5~_+A z*}}na6vGCVdbT9L6y9R}oE)$!z0S**r}u4JMcKfxCH6?mpkvK`n}`%Iy*!@2%5c2H z0xwZ!EAxBaMR%(MrvcBR?-lpW(beK-+-35!-0H*510j{6+8&ugI|nw-h8_<`*%Hu| zWeP{0_yQeQtWr}fc71~~uexU4>h-eK7^4cNTR+u&@ksQ<=XAB4z%%faIvy~DY_=*Q?IUtM!|&?D0iT_*Q01_%br z(vjc$H42pQzyBt+7;f>{fI1zmbNG`w_LK$?v|K6Oe9Wky@KUwTMRoUli zpxZLovNYb(QOlDM*EXH6#wW48v}}J7X{h+>7_&`B-{oGTn#zoRPyV;6;=W02s|_=? z`>WCfMWXCG2nFKXU)x45`b*UY*t%zGM#|l9va<*LQb(?nE#O5pJ_C+x>!^CgU&lKU zRdnZ_^=J(IzDlZzx>WZ1)p~J7DSWxNz<5FifRUqx^BQ~XwyuqBQ&bEwV5-f@Dzo!t zS4?AetDd^7t{a&BJ`~G{@?6 z5~rn}&~{%|t*xe;>-euQS}nwLdAJK}`piwARcgS=~R%M5_+HzNI3vJ=TSB$A^aDzH8eFbxZvUc^|j5v00d`|45M@a_jF? zlg+Jr{*A~_bgcL3S2>?}$wmyua>zfw*UkuSk_SeMy~isnwd$!v%e7D>nzq|}R3Of| z8zXJ1Z)&&T=1prA$4OiCq)*$YkI^;OzIE~wLSlvdAGFwkM1Rlz#R8oprl#>+z@Z1o>{8M)DQJXNFMHIXB&7D@P?5GL;d%8Zo z%FZB)CCS<1Lzp^ji$X|qCgZSpQv!;_rz_ zXwHjv-;eXqRBZ74^pu}r^$$X7Bel4f!jh`@O)VB`hO8fyuVvaAX4R>xQ67|YYv23#DM1$tF>A{(cR9zEf( z+EOj+##73@3P_jdN@gfXp&7lTw-yccRR#Sze1Q%z!9Bk7z?zy95V&vB7n=U!<&%mX zt^fH)C5`muGwVk4rI!pN?@C{32#Tw)c`UmjHU6f!*FtrZBw{S2zh3fFnMyam`K{clY7}2qIlioBWOW9p41L4INu#dYD0!GQQNSqDR|o>M`Bo#jFa^DO<%2FVDi#5640!0tmX=)@|4u;~c8#fP)MF$Ku;q&ntaYio} z?C86APm0=&rok%_jVeez$X|-FI7aRa%X}{bm?{^l|lSkg`S$RsaPgm$P{fG^7 z@e#wCnT}^#Sb2{NZFP?_yM8m08pIF{Q~mT2Up zyDK$Cry-m+LHfS>_}d_JR&^P@hS&kRK6f3XS>ct2BBRaKMTo=2 zN#=uYJl>izWY;;RA}>XMGDI_gjdS>Pq1&VPprWj?G5pHjbDk|h>}chOQT8YWE30NP zT_fpn{3AICx-#NMX72-fBU~wStpj0)N~t3jd(v+2n0Nn-4bCe5xco4N^3J5=Ok!@i zGSxJ1fU7&3N%SA!;LBI3t7~nBhbKzCU49X4^^i1rb2==vjIwMX zU57iX-L@;EdZL3K@?9>unVF4D7Y12*2ULbbzo+&!%ITO?DHzN6NgE57PbS?PmwR3v zYjVyk{MbyQi3SK!5#Ug~K76yyi##GF##?o+7*^AUHoAF zdu?-K#=Iz~Vyj-UE-*u`a#FbYJC0aiovhk}{*~F-wn+*j!(>+|>2s@kcf1mpeRIEA z+gtZWM`Cu=#^NN~Je!81h>)w!idRZ<^yeq@7X3$*&(|5HB9?+*&FOUjLA3=RrC`f^ zEjpCEXRX0P*Lg!8CQx0940%Eu5fAZjWF}>=ilS+4ypD>=YATs?c&Ef6T3eR?Pp%c7 zPNt)zsgVB*6mPdyUxU=b$nX~9ad+XvZtS+7ok#{@e=jt(WcO^t4i#>Y9uM9VGGy#p z2$8^V!Wxt7!6~ahx;Oj^sJ5fT5mi3#=q@I_=DnxHJm58M;>22dp`fZ*DCCv~>d5)xhJeHS=*uUry*~0YyOKWkt9MiTlU- zgntYIE)OvBuT%aAef1%9!(zceU`&n+Ke7mS+wr;>*9@5uzu0XktSOti8r{x{oxf$L zDsrlEDEMBaUI?^^H!e>`S_{_;$w6B~IzagtY(vl9N<6E=UUi+XJtbQGgjp1nzcC+y z$v!?M$T;$sTSvQDtk#TB;f7Z#p(9%@cVyGvr1uANbh2DB>guV?3qKm|^*3Q; z(>`+x|B%0&pE$j5`YbGh`mWu*@EPT1^J$D{s06_T)u+J}fJ6Ph04NLDwayf)?Zr!@cP z-M$GOGzE_EL-LoOST}nd%Uqrtbi{?^s2m&vIKOq9hD%x9TG;bnorIuQ%YbjfQ7fTXl}6% zhWyjULKwlYr9=^LcczKFKfoc}IV|{sNH%+eM@L`3(r&lhTAMA z=LGcnC*^K*l;7|PO+{!j`DLCMdV07kKEcr~o2iUWTT z;38va&cbVEk9nW!s#lfEf2&b6dF?Vgp1k#0vnGUG0~P5_fJR54`4oo<7t4_ub}(0* zl9VWB6;`*|3kSfy5Z%T#$S20@xqyd_H$KqP^rosExf}>uOOVmqt)FlW!GSn*7EBJV zGkV&`?mN_e89Hskt|)l>E3%vwC%QsF7}vUNbLi}xQXDflpdm$Vt*=yn^r~>%B>66D zu+CtzLMv{2^2@=bl*9U-E4Tf^#~N<#F5 z69w_&$oqOTc`oG=6^U1z-Wr1@Tz37eu;{XDL+9qHalb_*GnW|O%kN6%dT?v4BEo|$ z?-abVWsNHNqD$$SAE@xeK%`MoSkH!R$0OCb5hhKjof$L8+-r9OEv9^8D=y&RWWwkE zCBJ_9JGK~`2^!Qje|mHK@FY*X$j-bkg~Vw4Zjcg9S_PQ#yY#Qj8R?WkHa@?4l>&KEPjV@?JinA$kOeCG|OR@N8Ylzycm)pX6Py$lDg( ziNjLfI^${=W~2eeH&pAh%gMTK5S2#h8EQgraEwZM`tsS;x z*_WHE;rucAQaoP5Y9pLefdu#(ab0^e*V2!S1r6@icO1**bovmj?J1sQ9tScK(!FAUtodTih6E0B5c5WOXDZmn$2<$CkQyFCrvIF0EwS{N!4VD{@-rHaXR+k~1F8_=ZgYEjc7 zq*(Ka>)6d!uLVj_T^Vgv81IO#V({JYg0o|kwm4_!(h=@?+$dZuRhZ0QbeRgH3zhAw z<`E0N;{Da^VNlD$KE}?uxB%`SK6)uYOktXRURHe4AFl>T6v_sVR}>DJ5qNWi(QUm6#Sc6 z=tuEJ)w?i;{bl0Ut=WTwt`xQDL)oLxb~k3K@TXO^CL_o>a0I7=aEQ6GK!U3(z4c`E z1kU=A5p02b;%UkijrHbKOeQX&n(IPmJSm;KW)S;$i@zrb8i?%QlplACgbs#E0~ySk zEFx@cp~Z$pF-3(YUwd-Cu$G5<82Bn50mL=8!RDkqOOKw5 zN6|}a>fepFoO+sr6VSk>#oD#1Ih#9y4RdA{-k{p`y&GIWLK38}Tw6vm`nSf$LQ-1( z$!xu~Vd&JkpBYiK9;s0XdemoY6Wy=XjlR}{+s2u{*F8T?(7hL-j2U|z%eoW+GDGwK z{J3#646>giPXhD6K7;7;PfYCRY)F{?5Yh%yV5lkmLc@7s$%mp|E%?fG{f(OVSB^i* zJ>0QBFDXx7lPQMO_E6IG9o%Bj2iyw9sERL#SG}W^A_Eo4#$x~WH#3v8yekjM$o#Tk ztE|%|5#+Gv4wOsdd{ZrRIHdEkC`)FUOx|0(T5}I+ssJj#u}8QOU1I?=-q7Vpdp7nlu)yHY% z4aYRR1Sz1;FHkQ*Pm%Fu;PTi3*6h!L@9bw~COMUxvHdN2L30ssrcN`U3*culU%m%Z4<7CTM z#=4FRJGSCk*TysSp1)smX3ih)=)9EtF`iEp%kvp9`9o%zK$dYnj|aC_`!PW>p134Gc|k8r_9;a$C26m^KL?9e(6Y;8^rpVSRef-=wlAj6 z3BCH12G&=A%I?0Vta@XIw_^Lr?(s+KPagt0qrSrC@No#utc^6%?{GkwH@tDNWt4le_5y3{!K!PSYG`|f}BUDz23f{y+RRApR7+V zRf2AnFQICckyLlNG|1izXsuuAN%y3WLoZPpNYv1YvX?Bp!m3`! zIg-)f?#KbWGJRQ?s>=p*_%10A&=)GVC!A$J*t=Zr9?QUi*QbMd7W&c>lfze;59EeZ zHV^u51z)N~^}eRrrF5mkzbijlE!i?>oC#Wsu+KjcpPyG&LSFdDugW(QGfd$Ceh0)` znb$VN&2psA1A83z;#|`gk0FJhd#J9YCVJv_V_yvLa-v7xbi=H}g%**{VBD5!w0%;h zqpmhyM`!vKFP{U+%u&gRLKV35a=`OZ^?EmU}`e7@h2 zJqtp>Y#~O*iLOa_sjRx}^Ys9VFZWT7o`(L8oP4Mo-1ahC(;C2}1V&Nro!~GpYlmgQ z+HKYJwTHe%Tg%nN{!Kwu38FxfA6uDd2yv*c*HA{gPLKL_+Z@D5{Ra?R_5}&ywRQJ} z_g9!IO2QorkS$HftP5sVdH%$2x zY=Aj(P_|Lal1S}{GnPt|&_Is@uxg<;$%kV3AqlBpBtc^1jsU?Fx!d9p>=^tsH8Bic75^@g{QvoxK|XdZ_1CfO2`$)PilDiF0QK;B)bIK+hA-zNc4x!$3GL;hjlIYk z^+-(0J>5<9f8_jEsdM9$e3`pG9K+U6Y@6smuXP1cUE?O8r|$N_JFhP5v$Se+l^C zDn9;S75}3-FXIc|a4b5W``HUuGOq`?{DG{1bkz$I@##a&J9abbm*FAPmN#LbP>&G} z>Fo$@#q|B6W(Nd(*t51Zy_V+`Uolf5!w!gWHgJAb0*X~7J0vd#=G4yravp1b=+3>f zn!StmzY#sge7qO=)lb;GEySggLTNmc!||MO$~nfJ=B%TX{H*T!ACx*GXf|lpDPNz^ z0-&fX#@|n9ogCLBCpT9jh6y_PkKNRdO8-uLbMk@Y{LxR#h1yo<3C%I~35`kU&lB2e z*fq&J+0o{WffvttVXvMws%#XW{n-<-Iq%@qeSN2+Z|`($(K5z|gWh(PBdE|&(@-x> z*P`}Y*5T*w&Df_H9$j@+jYzalw(9!Az1>-FK5jdZ>TIv=O#toA=ZlH?GUqlP=eB*6 z-zPtzS;}*Zv zm;PwBlbWpAvYFF2h)Q+VViZ%4=U>;*i8OPW#L)ZiGXGT%{1JnYKZS2wr!YD`&sNRp z1!F-;g2K$kxI82F+aGnEAb3AiNkx z{9nC|8iE+G=eCf0NEI|gD>z?eaf1OHYNwm>zcS=*KmLdMubWY$$O9srsqEgmAbCPN zeL`!azS~2M=zot=$~`u3rJvD=>C`7Z1kWiE`>KU4X!d3<|NO{r6|5KI;AZ zeW{|2uCx$~W`OMnJbqNQSTE4XrgNW{PnqzEY$HQkUCR*y50We^C*Hpl`zsAkKGa?7c?b+UPuoV*t}4#yR01M&9?I%%O)nO;jNd6n}DdkD0CE`J(r*+7>|8K z{*Gu4192v(6nT^yCd~iDt8W`G*}<@cpvWn@MsOR`0pY)sk}XBi$xx>eKn>N*i|s3&E`~fIzIRbZQ1b&%^PWWYJ`Hk*Rz>E=eYj)gmzU4*rLb-Z-qh@ zl}1X;%$t)xhGQ_T>9U7-3mvRJp<$}Y4ukL@-~aMdmWaGg`t4^}5~h-0&`aBN|7Eq+ z8(6Db6zW_v$RJ_cVFH@N?;1L77#|YF+@zx#DMw+iWY^Vz<8(TL{?Dr?w4XJ@mKX>H z(9)Jx-5VNdVW#|(wHpppbanP2)V@V0^a<^-UC7TyC-`w|+8<`$$iI5usQoaUI^nNb z^0@m*WirBV2#oioIHQUAeERK|6d7eKZNu5dr3m(&{Xvz*+LUI*uQi8#U0-wkeMk6B zBj46o@kDoj$CuOZz(C6clv|I5tN!XTzyIy=9kb(z$W;Fgb$j}e1I+G+3Fq$zn`{b0 z_UI*zbG&NRx=hOpMEyb&W~~+P)G6&hcJ=~VP0s$1%UOJF?1f6BQmwC=RxGbj)P<~u zn)rI<*CQEkMsFJO$|z@9@KkvXw!+6Cglg1s!D!7%!+Ge?`~Fa<`<&l}aVM|CpOfYL;t*u4*C~T$=(xs^OCTml%%*pb~+P_>WHE9K2As&7sX&acW zJS|yc)C-dyVw19@i<0pzCCFz7tG91vdY2>ooo1fUiZaf~DRLg99~t^$r3QFL%CgxF z@usyUh{I@?*SXv>D7qEL{=y34Ars@=%q6cO!sH@`$Ca==Yge7)vp7$m&mOSz_xQ-; zd8qy2i^?eZ0u(z3gl%v9voFAaXYh6);jJ|YtzxNB&1CBRF3YPRRqD~$2s?Za5Q8j#u3QafJqr<_+WpOVqDa7$G=jF3OT~`T zxrNDF5P7tPGW_`GY5Wy$Df&cNb7ih_LsI5b5Rvp<-lfOlnYMDdcff*iZbD)b#v%=C zf|IQk+y3@tN9vlgkrCGvU>3RSm*X)e%EV@M)qUy_&7QXIq&?7WVRS}1^EDoG;f!SC z?-ze>o6e6mE;ZYag{!1v&GXphQ@q3IxTIT?^?UlckZqO5F+5!I*c0h^ z@zGgLR&NRq}Vfev6$RrdTvVG#;wC{kE4yyzi(*2f)%F?x7n(p zh|We_=tf2kP-hPZl?$*+zW-+3d7CnYv zTU|7UcPRHNh5^}e)LlM#W@?VtU*PxlJ-MY>3$v?}UpyRfIJeOSbt_k{K=jk6LxH99 zBmWdch8i2Z(yX<&pJ6rEW5|2w;>6@CtRyky_mZE3mWDqWh7IFJTVmQbKA9+k0V=D~ z&I&Ucc_p@6*cDJ@jP;zg`yTE}DAM zUV0-bNsVN~rtFQ!#$_zwL)E1ed6eFgqQ$x+@OU--!0E_v_$IqtwJ>FYBND8g1Pip# zP0WUuHSoJEq1douz^=HJVSICpP&6Iq%PvUR)FFt$Q6mJ{KUZ#hRNeF$o%y;+a`#4T#ic4 z+yE?iS^}2y8ICUn@bV;m+cL#dPU%GY$$F(9IJHMI$kQTCIS-ZPmW&?m9~wCUEVycW zX8){##5GFf_hc)aNjRAuwiZ#|NLHmX4&`8U^_a%SvZR!&m9nR5M%N9mE0i|T&H#ZDF)(A5hy! zFzcDj%VThMS6!8%h$VxLavD(vIG1qB^tuPOyjR>mtsHmGl1qo##hW>tRhx4T)v1@e z1jmz#tQ*^D4m5}6gNr64DCB+)sOx_RcxX}96Z|<#y+)TAr5!l+fLlOxGhCQcacVON~xxxW{;S`=+SXT*p=eSD^~$q zCqbwF67zo73Y)ll0C{Gx8Odrr#vZoi0Q1mu;7cb^jlB3q*Bd znUg98UpH2FF*gf+fyqAh0{`L=&R}kLq}~^n?mb4g?>YQEuT0wVnCIIN-FFn}R2oCV zg>NY}H{`}HU*kyB-eQ~xiYMhAsQLbQ^lvDMhakUpd zz_oaG;>yYQM)`%c$=@gicz&ESofo(b`u|Xk->q1w4_t`&d^BvOoz7!!F05hWOl1h? z&n}lQjG8})L|I?EKw@h(s>xbj24n;`)apz!Q|?upG|!R(`vc!8_wq{j-uA3w6$7L6 z#+39JiCeC=j==UBvfg`6_}m%7Um;+7wolhbhgpAcQsd3#Z$`E*;wrS45(~H>)0b5O z{n88&@k4>bCCnJc9k<(DziIV5)V`Buf^2^!5p!s!LpgZKsq%NyvH~^D2Rugu4eZN; z@x`kLnRK8>Xd3F)GDp8!SKNrjjs@ebZ#=x|-yLSNH`D^4>z^2{m@AL2Z8Inuby1GQ zSJUu*XFB1~g9$E-h>Wl~yHOq#%SzSdBah!$7wld)3^T;ejuC%R@W7e+k@s?i{^Ufc zW)BhX_|Zo(&HYzPiLx0|ponuf-*y37BTLVFvaZ65v+XfEswr5kSv75i|9_{@;ml2w zz01R&6xE_xx`OIG(#X-b7#h*TIajHs5A?&cMR#_^AV2qt46X4&wCLfjLu~q%#TH;A zMSsfKs72W%#Q4&iHbz5TH#cNq8nf7YOMO*`!8ysWZC?I0?7^jIq| zz2=hVFZ@upGz%*Agob`+j~~&0`KerPNtp4?_F}*jT2D@$znOMX>jKA`qlfac?%CbWBrx5IU&NRA!9-qHw}y~l-@izroI<10eVSl;}`HN8gQ`M_Q~ z>WHyJvt)KJ*u1JMBLItXpTH0!Q>Z=Ul=F^z1!eXAPBh7jljZ4{V(!zXE8_1vm$lZZ zRJk3|_QDV!6BGs5;+l5M9KAmLO}r=bQy&3zru)5hf9Wd;dmlfdkfPBiRp{(cK4$5i-h1n1y5a8O?J51FvRdq&| zseJL>vhNDtq}f(Alm6D&4yFq}ge~_oqFf3@BC7MI&h*t5_zR}zMqVeWAExB3Fg)jL(duZGyY*pH z9np!^5IsBQqGDAjR43VZq4m0a+6weRf<9pvWH&Yr0hmCYcenHwldrR@4c#$w;Pfpo zChx$`x1T&3eX+ec7&4Ue5d-%^!YR_jHK4vJ{5DimI{)1iJZA%6s!BVRVf45?(+w%f zPi>#idE+Gy>?~HKmiyVpov9$@QxLPb?tVbU&@U-Ea~1FIs}^Cg=Nh=0lq9%8JU>hm zb|ii)f|^qO^h+o9ie|>RW!d7w4^gZ~cFmbgy59Y(9Y8&z*^&4PZ}x~d%%r2T;~uKN=j`;5Oc1HY41qJTtin&?hDojbI{ zWMLl_W`(f&4n{9iMzJGl5Y;pmaOPhcYwdbQi^}}{2@ZVx_$yFsZTIi@vN!LI&l*=z z$5D)A_U?b%iuhF($ZALim7HHsFJ}v;bsE0Jxau;mmC5Y;H&W=9!_F zr$Hu|`Jan)o(9nxcmebn;&iObwYdasbR4N~k;32I-qIKwIXfmIao~X^2 z?3I6|K{Sv=nW9&Mvz&{UdDN={)1rBd2DP;jc@5rEKzq zc9Qcj5~KcvhT0&(tvH9C(541&NRU3i{^?6liF+sCkY|wBk%-E{`Rrm!Cih}IFPS>B z3F(-StqS-mc34T`2v_ z@Vo$ZiLx@m$PejROlr1v@%)9)Vcp<2_}O0K*$J#`{JaJFfEHhIJbZ6uFk=mMJfn2V zogf=o-x}oNI~+a525ac!j+T36SlB}mfAC2>eU@wb#;mLccs*)+uq$Y|lj4Sfb|TkS z@9PB|Lp<8Y>2!9-L|!;w(2KRcCZVAs*rCI{Zt&gxl2hjem!BkmcH4h+<*XI`o^aw= zj4D4B?HEhYDOafx%1O}ZvGs9?Bhc1@*ECiFi#VxZ8H*Q5=@&zvv9u!gHeC-^>MH&E zo44{WC?HCIam!HZt^6@j)ZC219>bG8Mw2OHz*7g z!{UkQ&t-|qip^`9abA`eogN;=1gkAaiX5z~DQ1LRxj2nL#(}928(Q}2`QI!)1_)zZ zYq(YfPXb#t(VP=#(8J)$*0BhA%dG`+1eBg)5bOACE!%(kv(;kk!-L=sIBkoDJs^zP z#bwZ2vc$(|_UUy4I|JzSoF0vTdCTdHVr91iV!(jASy3wfv5R#7!3>JU+c^}2S`bW;(he)(nvsvSbK^Jb8Alj$!> z$R>G@J{(xNPUdp&n1)L0WQcm2Og6Q)4DyznrF|GSm08I6(K>QRE4Yqw`T|BXPIhii#5uKYo=LsV{CQsTKEg zeE7$PE6Y$=*DQl`*=6TS8ZQgnfU)+O)R``GwE#={Z+JX1$K^R1s^jqGWT^ zw3&0xPJK0TFW9I6UMq@jqWoMJQ|af9BOF7MP5(?@?z}Cm;`=?}{VTGK6laF#3&zu_ zY8UeWd%B1H+*pe3+Yyn?kr?YIG}MtO9CG?!ElPr1y!NrC8x|!bjO1sG>0Ic`d2VU8 zAdqD@%Z$o2W>7Z~5 zAgwzcgh77N)xODWN_mi@?sFXVFPnT+oBKZxle=@*4m&1wKDL12k*l)@3ef?cwle-; z`uRd`Lg47|6<2shE@7;jQXO~NY5LG=?I6nsBc~aT;zJySy!{PQjq z6l%-G2a8>xg$<(AHxW@Oe#FEqLha>zsD7!Ke3Y9*c3#`xH{?*{%f7@a|M4x|g$RXx zY@b8Ny^)SuCm1kt&l<8ZP0rIQd+>i4pE<4QRCEtK4pSY2oqP`DOEXcCd3o{D0lU~^ z1Z*3dF;a?$oWm|Ug>ML2v;#25jmZ%j!yRt?BDZn(@-c@0G8}r&iWi)ZlKfSVsi@OE<<3FISP5}5g}<|FH13P@{i~T5N1-{$^S>#TSv9oc73AI()MX_r@=}H zP`p5JYm2)E3)12c+?}=*Cungi?(XjH1lJPW-K8hb_s%!7W}P`_{z|e|vXVRdy05*j zy?-J|oigI`E>;GqFw|gKg-d(SOVCCbmM%)TM~|M3Fwb`TPA*0oLhpP_8-=)+cf`gi ze~z)B9a}X10-n5Oi{+5MZ~U}pu(GVlKSTXohH$X6x+Jp}mw!~^yaFfAl(k?hsS?4r zhtsPk?sr#z6LD_eR{9q$Ed9?qbfjg~S`Ahmb^2onSXoKa=p70G6!SLT@r6ooL9@3) z560BWr4<%TFX~%yuSQNB+iq>D?h`q(ul*!5HYSC86x#shJ-+^^MjB$js|u0a4qn`&yBGUHO7F zH<~6l;d|qrHqz{=hN|Z&Z&X*}2z8j>lBSk`1VBFTokr_HemXn39aXt3vK|^rT&t30 zZ)O+UN#y^mYH~7=)&munHBjEyUY!mzY@KEc-0~a4!S*v#j+FY`^|f0^S73wbJ<&$5 z58v|R5EfsgOE{Naq`8nUqX^!>I0an%>;SEAreSr(@O8sFNvJ@jnWpLBtZ_t? z!~5*ec+d}R<6mf#oDtZ>L^Tz3Hp8=mi?J+Y`Paz_G~tSd_~5f{V=|VC6RQ(2v!{3VxFif^WR(MtulP? zdlyP1T8)H)EUE(_4kEQAKlsBmZ8>i=gl9~;&peZ#e;R~ma`T#6x`hW_3oy4LeI$`1 zLp6J9YNWJNuhb^9J7`Z9N9BsG%$=GC>C#>h3>bwJ7{jx}Dz8eMHEJr?gb&7Zz+OEi zDd-j4Xd1^4vM4bBj1;p=#XYU=wWEjz2@mKnnktCyJkg>fW_kE@!U zRsMHP_W!H+Io3$T?8{Tjox-o3Hf>Yf9!7%mM^@e9YXYx5S3vReDhtx|!8Wyrl@dUR zLfy6$v~J06B-NWdpaR}XglX*mozjk$PFpTm94B;SIFGmZkg;v_x!k%CEC zfqzVqb0w>YCOgzmeXQxMYX*3WdyRt8XNWXp$XqfFcTMhRWExhzJT;OPd%b0(!HrJC zke5ik^E7P(SFWQzIPN>Fx78IYb53u`5=~9cMI_G z9Yx)Y^`;d-W}8B&87G5MxY>YFp%m6pkN`8j4GZzp(9gLU&n~7(1*gvU1CJE6?%s^q zQP8`T|4|_Yq9$$cEB?Fmq~32k$h_g!?|&^GN0NYUfW`OpA|_ya;&v_udi?&M2y|)Z z7nmn$n}R{5L*4St5Z&aaXc2I8^75JK#BMg1&_Qv$FqUN$D6S=>c$O>Iba{Df+ycfpGnWLOfiOXJ(AySlR$^JbQi$rP2YW+WnH;}hSoF%CAMfqCNRjJ*6>+>t#p5Q zgk|lW&b8}Vw^O4T!U2w{A_l0Ec?Wh+^a8_xK1A+yCa}ed|4uRgsc9yBa|6#v0S4|D z9EmvJz#iY4q=SQu<5K&L8~>uAzDc7yB{Z!E)ZDPa*#r7W0#9%Y>hG}$-gVJvpgJh? zk+}9(-`^Qzp)4-dmTRs;LMV9lo8Pb|`7M6uE+p_d53j5zosLzPHMFzcp50 zLxHn=_Di{|1fr-)Xp%TR`mlk%cRSg=@iLZE>9mtXd<-Q8U=Frgh$-ws^qc(es>*?E$%f4Lce)m8eKl`rV&$vPXy8x%lXFxKL7>?%7)-bBk_x0y=P>7ARzGy|`<%+~T99m@I#1 zqmpTe$1`I=VLLn&rPIF{NP3x=T=m~E{ZH9Ih(fX4qp%h@|4Aage#-n))+~|x6njR7 zpBzB+Caegg>DYM%i0VMvbY|#v_&+41aKn0SZ8mYZ2HS?j(FJ*MxC-sX+k*v*nhB&q zub13YF77(tw&*nKwq&yU-(Q(O9wIW#F?@#d-`PZtfN)j`PeI|wJm5n%!sRjd`pA14 z8;pMcF4`&?qe3%HDVI9d;?zY)RF(O~SPO1U zb0zpROc$(go>h`Rk{p;&1>rT|`HR+prJDoZ9pdJg;a>nAfv9TfO?`-mViBE7KGXFP zN~lw(TgGD!Ud@6vV=QQ~Qo0^ruG7+%wPIm0Ax_ERw>iIFAsVkr`aTE_D(Z#LOCBAS z9)T&!bp4%S*|5lqg6LZUo-#3DfBw+Cl{x=R4dtIn#WG+HTb*+Wwt2m)jjp53*P{i+ za+Jrl6zAY?E?_m6puc2H7ip2LP4_h$?uYANP|S}5L_L^6*>qaML2u82jrxVd@vb&j z77eeOy2rAA*qTSXXnfRV?xC<}`K~`+**`j*I%GfkwJeBQe(EMDLGy-V#P?+N)>?$$ z;Es~_TIPS1=Ghx|2zdU+og$?NUoerHn8BfuJUi!9`$5HO-t4`s`4+cA2j@C;U<7Of_Uv}-+F#B*xs1gwhI|EK7AHJ-<(&apR^ z(c)hQHC$dubF1c!-269g17YqwQM_RBdn!AszOXVOt#HuJ8NapmpMkQM{J(zR6tSp| z{6&LC%r^%rHo7r8Do2kllMc~BmfIMrERhO0t)O%T&RDk%<-zt#&RVP!N>jK@8P02s zv6e%8OX|~;#wUa4VIrABW* z%Qx2hF~$EX&nl1{+No;?ZR;pIGrh0sl|>1bTwaDQYeS~(DE8b#-^?Ib)Ngq#N*}Cv z;v+5M(CPTXWh|&tDP;6Zrw&SySDG*R(Tu-*!ca$DIGI2KGWp|?WtYr52*X`LwA=cH zp|~zvNk+T@6QHb?&-v9|{v=uYy||aM)pNr0Rx^ri%>tT~>>P>DKZ$f)HSu@9mf?WZ zbq>KQmPiy^V?AZ>62$fESAM?ok5I)uB<0JbOiCdO0`;kdZ@{TL<`YqtZ2>2gJm0Pq zrMG-B7j2ozMC|NK>73c!sSA)JjmnvQMv_wYje<{%;$4ZcIN9d3C|&_&wS?9h?n0Zt zhYM?OR_tDffVVE^r*~>%#vw+Y(ENqDh}5&6iO%*fA?%D_rBvbZ6IKcX?#*5bpg+3k z884)rjctaQ*+^3yKIKQK7WUgDR92P`N7He!?cZ6J%y&N=C-(7A?@606GO8$NydX)P z?mKwHTX8R1IoVL}V&~d4<5Gbuys0wU*dlyI`m1tV(!a3!NO~?%ZcIZmLU9uju!${Q zJ(CdsseeWyz=E%&+hJ=DV^_oyNy#p(4GJxM=n!r93svk$&vXWivg1nJZ-^DOT)AfoBUI$z_emqktIU4Gg zfJ!s1 zC%d=WioKc|gQ(@pL1`LAHtUa7WVW*21ODb~5%EMGAv>`PP*0qvIMo~H%#!11NJ^0! zsBiJtRmjNOUBBdi_yWKlD$0^0u`a(EpD=Z)7 zS%6XLBZ7IR@E#CHWG$Y1D}829K3#AKk^}H|;54O4(JEn>a+_~3#?>vXK(&)rvj4Tp(kSa&i@sZ$-za|9Y7kYR_|mmP-5UbQb!6kvB9qDhOmDU7K@y16Rn zDlRS_n#uF&s4PT-(^`X=Dl2y`-M9mH)Yl|MW_yZ;#_s}3N%jBN2tDBDPGt^Yg{Vxl zGYv}o`zL9TS&Oq4Pcj2ee#cr-MNlzrpxW2nOCiEE9D7#C(P8;Rqt~Q;cQp;JK=p`` z?uAluddaDGG9)sX1!zkp7UV-ot1_(%ftq9Dqjydmy%qf`AL+Mv!VxaXBf2s}4MbLv zLo~5NHN#r|3g`205>faH^o@rjF8YMVepZ;awyQRIkrC$|%iLqx0LJMamNkB(>L4Uv zwi3}?xDx#U;7O0C`kFhNXcT$+8KasGuSn{Bd|0KZ@@Av&G?~A*C&asUNr09alqzwW>D)K|Pr6YB|tJse?0W5=@9`e-w-BXfWB?>MyDVk)8 z;3hvH)3jONAgG=hdhD+YKfeML)!Uf9nJfqTs+(^@fmO5yHTwE@o1P1BDKK-PEC6{l z`N&=o`;crl?<-Li$==KI?FestP2)YEh&?e0(ep{g6}KPumm25|&=%eQV7k$$V;NMv zM`w5zwShBl^r{-Db#&+j@Uh5_Q04~6WTkN;2-x92gB=hk{QyepYKQe<@WK&OC;M)G^l z1Qn?FnP7dPYe5;o)kW9n{_}-qAx9j;eTgZ{AB@6=VCqn(sXMLYv~#U(YyHfjL0O0c zu(Mjnl~$Lx$%cH+K-S4vFZrk1_qLU_s|sqt&DWfJNUTDA)z0u7a`w#CS9Ry_v;A~F zVv8jH5d8MOS#Cx;ZDP^<_+Z(FRIfAHfD@2cE6`YQc9IqTUm3t>K15htr&P3>Q6}ik zwY@~>{G0oFq4bnj*31G8|I8$n9$8t^ZOAay<^u=1BU`)cOPD8G=l-v6@TRW>Co3`i zCre&wLyf+Sy{g&yb-32S8GFBipvw4*MvXZPRu;*FQcWeRcFf~?a6`h0eMS8}u6#&0 zn~@l(bg~luN#*pzzNY?Asq?{~A*`fB&a@z8;+u$=2&_q;w;z%oMN%(4>Vp{B3XYki zEQ}!GnD=F!4T2;OxWoNZ<~hIOoF^NZxTxaShKM>aeM)=I?Rl9uVW4tLxam~@j)BQ9 zlN#FE;o6gvg#^Vi@nTpPVzBm^XO;@lQ7imvU1h*fR_qN5+M)#RT-tc&PYBesd3RX_kW6+*l&coSRQI}mXxfFp_49Q&nvuDF}lzq z5&!E~;f932dG(QGJ>sn89j#J!03R)ur4?MeO2{5fpDptr^$ zV%yEGS2lH+!%h=&Mbo`7ilkc2^6M;d>C9#qgFp|LxAw32Hcv;nbu?Q!5hTPnGs*TO zerYUlEKYk(R5F`loD`brUeFh>Wbp{Zd5~hWe32R3y>y+_Cun&|qOYo26h%UuiD&@) zzw&!kVl z8xOfOI5CQ~neaApE{=uK!^Fkv)drY5FQMOIqeb&a?$TzFj;au z+MNDp5&9G}d1G5n%tX77^T?-flm&SqVO@9gI;i2tnoJPCC{vhNHi9+Z&$f5Culi_y zxk=%ESf$t~$tPhI=MOEal&q~$z?3c4j23K@x`CPlet43y9mMt*DE4R#+$_2)#l zDG_+0TT9m3VKgQ(+ACYq!X^8&N@1SXt%sHEko_)HaZN-nb!_yW&1~q_mn)5ug)26k8yNDQ2Rk8nX*!7;bSp#s z_-(gk;o1-jUJ|M#QfNPkg&N-E1)jR0a2%b>z5zC#BeqxOj4FVoVXe8KtS?1eE{U4W zb2AoLud`8JIgHWodo3^x00u0rp*-Rc$fN0Bv<0E2cuV)(jEgGA_cU$;M8+0TZ*fLu zQ7wm>$R*XX_Ue$7NB?_k-Ek1`IPco4oiSNX7BT^#XTFzI1nG~Aj)GI}mGhH= z{fZ07Xdn{{JgAc~+RRFwpsf+4zh!lDClAq6AYhIa%q&=0_sT<9+@*6bd-kfk9@dj9%E^u37GB}5MqyN~ z*CwPqm=5*eMqxgF^^B<9y8+4{b;BV-OrAqIftuM2TaKCc6Ee0AsC%rqd*#$K zWdcTLEvS&&q8&B3WuAzi8510fth_YeT3xOKD9Viep6JS%;Rq6X4J?n0Ky#~-t&Wy6 zT0UGu*@{YrXeS(?p=%hO8Hy+jfIV=i%G5d$iNJ(DAH8Hhrvx z#9%#dRXK~Z3g1XU7Qzy#p4{xqf4(eO%)i2=7gDcyDBd(=(Y z0i2eA$c=W(tUFxk`_$+U)kP5s?B8zrt6T!4<%24pDMw1)NJ{4Q@59v`6{z63_~^M> zsuC|hcYzPi<^E*5)&v_h>C}~8ftO;`QQH@S#6mi#Kpet08U{zqL63)r0ygxId#K71 z`}WghPcj+y4&`*TJ?E!s&yzO9r-mCXXc#hn(?F3el;k_DpwPgBW`ul3?zf~LdF+6f zyXHrOkNJtQPn2> zWj=KLA%>{IU{Ygu-ZX8ykn@jG2FLUYsAMv$>`LynKhCU$r+OzRw4B@<9wqO>mrmcAK=0%l1i|}Ke9$d&nm_FsaLGE;A7O$6$nV^SI zdQyg23RVSf1|PMWNZ7zgk~MQns6GVnuDa!SB02RjwFBMwY^CVDK9G38(knsXZ|$puiaAy`Co#xp@#@hh5qGwRRPfI+DxOWnt+huWi zwp?ucI?veL-yHq%XA`B(0iw0W}w~GOK zl!S8S4}2Tt5yOh8>&i^sqs4|~Y|_R%t?C5{e&ea{ z*yOORl{m$%N_P7gvc8YF%i(>hHP?ynz99-^0U%&K(*3FnKkv^#^G&g zX-G;@TxU|NEz=5QV68jQIj3^gC8mO)(qweBfe$V~$rnTwl{YBD94s0e#Q&-VPdwUk zNeU+uO3(qdB%OgbMxDM(+@?=;tSNsYqqX?aM zvMRZ0_?NNWcrOx@&?6KVl1!Uu>?sA+skgXgOT(u6#(^@YkQAMf?WVgp$YdYw_BpyA zAyV0^A;z-=zJR1))~ojRl$ad|aM!tc39Y`_b8a6iO3PBp6O(k{O-$ZH%9b1_k4SZR z11-Q4E-HrXRio~drM4M6Yv8mSi!5ud$UL>Y7uw8=(SZ6`&m}f;Xk#>wF0Ok6?sL`A zlc3@6X*@E5uuiw&p{8`|nw@4J(Q4Iz#Zu&CY~xV#aca29y(uMFr03v4)J#+7)a`n; zC(d@fq;_$kHzsLs|KLv1i0Dj*$tv6=>X6dfaxHJ8(GBc@)}t)&mh3N@HZxxQZGPx^ z1$9&Fxdf3%!QQ(xJc#zUra-XO2&{!uz@)8fZ41>&OBPPj6PQB4I0Om!W=$pqO(Y(c zh&Ke>z>St^0M56@w3l!w>=fiHhnag_r$DsudCKC8YII(T5sOijX2V)r%3^(lcD8f8 z5+_SkV^V+yDKmTahxFQuB+;+v!$1DnBp|>Kvmlod3aT42)a{=^l7a0l{+Sw%>dRY8 zBG{0OI%P**)Lyza35Q`8IP)Mf+uCxNH2z3PNtbE0BYSDLglat_rDG+T9HfcskTLF* z-gH(n1HxAuo!+27uN9M21+Y7^LUadrI#F#Ou7Fuj_%n&pU`F7e!2&)s_egLcwsD2T z{GQh+%6HxOJ_bLkZo74`=_hR z^q4n%$mJTj$Wlq5A#A*Tj1gRGNHoh&=xFT_3~0*KBwaL^$TBwIS;CRaoD`$_#+u8F zu0dUlz5lk!nr?Wx*wWpVRi7$cLS{{&fwPyNQKU0+}T=STs(hO{P1CLbaiSy@!T!$7MI%K!%@z)%mrU)gqu;K|5A;2 zC7#EEI0ptbDHWOTx{-p=rCT*A%$UJi`=39D$DVTCf6=&GU&dZ5lZd`sgri2;p}gF) zd1SNJbdmNJJ4M{}kNyW0J|k-}~b;R|Ap;988U1*K=ob3x%8HcjAMvhSkk6`eZhQk$?k-6UV zl@=n#mvsvmgul?mL-vl!-jPrCPAvLdMkL#_Zly$jmd6uP>lMuXxcmNg>HXN`v5^W@ zNQL+$Ta|RX>a=4wRJ0(`IM}zSTWD6Y!Y)P26pwqoKRImGT%>VF#ytZthS?r8v`$m*g=+A{?CdbY_u}toWw5LU!~URT?|S zZl3vms_KB%h|T>n`F%=x8FNY^QM-(Wk+>XcLKdK+qO6$LOj7aE3ceRQQart*@Yrc$5KPHtSZ-3Nvn1Arf+#L(QL4zrBn9I)TvThuu8 zALYMi{U&AXL>YSd=IW`$zUaCr;d9Iw*3(#C^Gp0#^*9+~=sr0dP;|uwZGqczxv8TF zLztGp1gxuG_Bh_JHS6ZFXSMq}k16ad8bVY?7!{S5Bygx2`JFUvl;&OE4S(4O1cywF z{9srM)1)e~^9>@{1^j?mNl5d?N#P2bem*1Xrb%1L%)}eK%e1aGhy}=<)`H3&zrmy< zH=?=8iGn)F`IO-f`MC)kf;6-T`ey_gg|nQCS;toHV;lyX0yX(6u$@jM9{|ZJ(e`%Q=%5r9oc4c z4j{NFO-2_7<+7fCVKb$-3x)eOJ4#l<2)^b8Cujy_h)0v%moVT%k{i<;jT8!I1TCBe z9K_@ZN{#~eQj;6!Ba@3qR=QB?+$?7upj=a~%8Gh_$-00jrcH^1b)~_y4CkbcZ;0Gz zS=xsKpH6^zAS=g-8%ZcjPAxQC%cubFoK=`-JhF;@@V0AhHhbgb!UR0Q?yMrli-_Hb z*%v2CKM+c5jd|^7JEkFH?RXEAg zaQH}he>CR<9iDK;-*HwVGWC$tu9voVflBQQlzD$=*zTd#vM;8Up;Da5~smbSt=C>RTAyF@qs@gZi`+ixU`F89hs|Y@c&^5opV;*ok}|-RDCsa6>6WciDeBKQU5@4 z_L|bD=~xfWH*q!sQpo;tQrY=^I&kJSV|QEgtm)%Zx<*v>A(rjCU`EGS)g^H0(5kb2 z?xx6v17Cbxe-Ak%Ka#6Q+1lhaL9?89ij>S*2`VS%rJj`0%yP96B;ew6h6RtJh9eLw$Rc`l={G|DvG)m`e%1mk1wmz72z$)u)Z_iu%IZ z+AmVwsw^0Zz9SAOq{gc+_T<5|wx8V&Z9kpx=N&2tVOoz6jS`p$XJ0uY`-kpOOS-~^ zVN17_mqUJ{BR4#8Uj3i-K|zOBPoaB`-`BqC#7WqkeIMr3n=tdJ98M`8SsZaR^Gw4P z1oyxljSFufYh_mp+HKSn#|bc!@;$@HG;Be(oJ+Y2#dN@vyjF&TQ=|W$4N(zY8;`dD z4|m$hV0Db%hY(I*DO{a9N1>3_X|ER)5G_2ZUlcD=Ur#x}Ers(;mKX>aMiPmk<6b`V zj|FI9biD-aN;)2GCYL?;{2F5-OLK@btuK>fBz0=$*yIKj#?$aqA=n>@#Kf!vBe}6@ z83gPdKFJ<|xPm6Kks*9lQvTD?w(xXM#*Dcb5PU!zElj6P#7>Q_ur3SEv__?MGTV?N zgD377LTM=Po1{Ki=EWH|o0h8JO^U{HO-$DvO4(}M+o_+mOPBtI1Bb zkki%<<$w~C>Mt@@g!m^^&5VE^_A7laJ7hI=xtr?P(OXwX*yYCs)%Y5T^bk0zT8Re2 z!)&)8d=PK5E;YmtD$1WcJEd`tu@s8dhQOOF06qJp%=(LSC{dB&oTqKM6RDBhiGkt} z88@kbj?dA>1Kpe<;nq0;EiO0I~A-(SZU`bKC}(8x%}2IHM5IbNPeG5_?2!FPv_ zy^?8^LZLKll49&wb+7TZd>BSFyy`cR#PpOzMMa5sv0s7WO?Lwtd?@)q-bohV9+8QM9Gej9ee#=t;TTw2HPW8*szb z%(DbqytduPzoiqYsZ?4g^Y(0^(tgg}+sm@mp_LYKKa{)vDRbKH((c191V;aR$VGQx zWhiia{}pQ*`dhad-WMBRn98$VPx*=+<_6XtO=@ewUTqRbZ58Lt-4$}=uSs~q?CVDl zJ51D#SJsZ9aQ4HR)Rwy7u0OwRHM?5x(*!&=>IfWdwi-S53Njn^unNSZ8+)EwGR?5*jDPNZk^z_?n{5Wz9Zr zZy3jiJ-|O5J6h`l1o?YV$J^n9$6|}ewJHfPEKeIJ(-V}qvQQJu7@3=q%>Inr$`=

    vYi4$2eTUbt0=r!TN^r}(9fCP`m%vW$<=Wa=qW zL=Dg%xNGo8dL`f1*-Y*{4o>f0CUtIZJJp*1{6ef%ruG%pM>$#9Sle3dURq7-!1$-~ zB*PpMcP|yPY3%MPUl3o@{V%#Y*`n?I^@+LfmNIIT*oIT2223~GLU+dJRVn-#>kWz} zH**=i0*{~TIRS9KX8QvZxd~W4g}hf}VWm-bj`!#uO|HF4mK(aIJ*^gMFfMLaF^>LR zce39?0Rt4aS(nw(l@5ti$SMq*S}Utf(lSMLRgG!gw1y=_TPunOCUEh;JHX^NFul^1 zO^MLPsow2;%f~{-PH;py+{9(K%AOpq`4^1`;k!lnXVdrGn)m4|#}NMovsU#nh#+dZ zyJAfQZWQZHuXx3<@pQF&(N;sEdNNSJ6HNOh>u30IQ8#?L(w1`|#`%<*dBTB`ottD? zC{j)$Y?j5yIGE+v@DtGPtn_k*f#Vt$&&xc%m*b8hkJjK%W8q(Xsj&ui(L~eWjdg7sDf=h3TWYL;m>%1fmhEzhNp2|HhHODKOonR2Yo3Kjo`Nj8dix z0`$KYqvStcrm4qfTtP9uD?D&MAyDLlV)zLaWHgKA+cHIv`DqQKm-VgeEUioYt1DaX z0BISbb@BdoUsDr}ypg8iZm-33OJ561nlGo_1pSbJeLllu?mW)B>SNkn^(W48-bY(Q zyWGDJ$9yLOYSnY-Uf&J8ZE`iPv{5+pS?ryv%?j>ZQ{4z*My=L6^3tZ7rW1w1hGIF9 z2<6%VawmMmBo{N|aA`wS6&pbD({N{}Of9RtaqLa~DFur7s`+MVJE^xkM7Pmxh~unq zfKft8F5oEuxbcuigA<2e?i9!1AwynAdzE}{U%b6qE1bSa$7$CRcW;SS7ktREd_npe zomVRZw)s03v?b)H?5=x8FhU&PsdX_IPut;|G)C{4 zuH)O0^_pY@pFvVAI7BSNiO3q74yx9HTd2>gxAmqtS zazO3LaiAvxrGtGhw%Afg7rugeqt1O%Z`uI})h2efoAoOQk1z31DV+yN)vt2IknG-; zCax_euK&BIr~y%2AC!JnrP?rmre+{5YgCWQXTy-~Q^xq9!yS;@k0;v$9qbPSc>tSt zY_41G!kNqqOTjT+ddt^M%BsHBvzA(~rpRuLC3(M!x56|Ij?=q`?obfv7PhJ@o193N zi@*>%=1P8HD-wZR$(0reuk{!^V{+ot#01wW0U4hpy-4NEx{`W~$B@Qs!@YIz?QGLR zH-e{RLHlc}jPikvYq<0|Q(Z&gL{vwd5O+3B=9h5H^248?vfVd)3@ zOq$jK8V|vsEdf*aHKhVzBYliLW5`qKmrmuZCg9*|ac7;-RuIk0;w19{)5S)*r1oS% zT}ELFsexNC5Sv90&6-8=BAVyC|1sBg;KpR@Az9horqWPfz4Asc{Cw^WSn;tkC;rk^ zF9g<5j566Wgf9nO=yv~p*y=l2k%y-&vsB_4M>S1;c*A*x@4%2fJnTNpKIQp}>kCG9 z&gsx6%c^gXtSC)(e47SM#~Obh?_tFJujv^1fGy%>iE6ToviBXE8= zR{;HTBtNXx-evoud;!O_pdei9#*tBnb_&W6Ah%Gq+)#4-b^F>j!H&jI+E3a4$8C`S zdB`2gQY_nxCGEuV!}uGL2>G8RH}#W8z8$v7LGA`D!CxjVra<;(AD`dM9dq%RS&bwE z^nLHmf)5(&FLl|CbC&o?Y^VtrbQVi$??r4oUhzNH_&!%n3a74At*wt6dd!}zuHE;s z?Yiem?bcRDZMha8>($_$Remi_Q42dI=B)}L&;eohV7pgEB60sMDkmxUHYcSoD80oh zXVS)&+O-@#FY%-T_(bVB{9y_~FGv4(k=%77MEt+^vL2T@En+(p*peJ>C_i(+F)ezInwH1{%A(vRY*9oFUHYsQ{V&{j z85kXTpEC;PEtXcz;`*l_F1kGwfGo=58TfcA5OtlxLIGk)uQXVjNqEU*WN7r(~g9_pTnX~fD{r89}F-ttx+DwWN+Ff zr;HYt7Pr}+pEaaI-@cKz?;zHgbgNq?onbr9`Z;lK547BfZinD4+E>`fky_sIJ;n?= z2e%^@?gHJiYSgdZ)+-=+ojFkQE^CF6O#-~wcuX#|gQ3oXBf-G&y)ch|oe_kqljN_tDB13+LRwe#0sc2I0H%;+$ z54n016{`VSsFofkqe^doCO%tV32k?EbVr^7j3)>QiHvIL zQJ3>3=Cd)B6S9o|7V60D!GhTW2dx}eAX`l1E{N7bW*8dC0}dcfB@R$H|E43@K~a0b zEQI3=6!r>7YiBIIY5QxAw@eUCMXp;$Ui5`NmC;QCN6#>%gnWriLg|6xy#SK+<=MM$ zsZDKa-&U)~u%6ppc?(;LmbW9YwVc%)qlnP7uwCl~+24LBLY<3cKygJt=02r;tJ<{% zU7iqZvD5E7MLpJkTq5Ud?4HYh1i5II9e?SdFEN=xQOktBlAI82_7^CIA7S+AWHP{0 zUqjF)GbbP`4X$>Po#){2&(DmM>v;CLpiR0k_Y|*opDx6&Xof$Xkm&&P8AZws$@?*v zw_j$ZeorYq7m1NS?d|DbgoepWw2&Yfh5w|QY|6x|2uOY>PJ@$~hSSwd)^egOhmu}n zM3om-j{o3jd3a8~+Wvoj_~mytnP4r-9zFkhgacB^u}sPyOfUQ!7M0OpB)!1r$D@-7 z?E*J6G5Dy3>;}h3orV%@0Zx)Z~N|bD#$z)G*rb97|TV54sz>`plJf5%b zmGpSh6~KP}U=MTse3#|?upTcem^TYsM4aQxP({ChNuD$UN`LJf+3w3Bh2eTVT%PIb zav7}RZ(Rg49j>HZ!Q|(5zc|OhnFSR1uMA(Fo2W^&Z;i`NTIg`c%4R0V`nR*`9o7e^ zPixUdy?-NBL*@mWw}NjQQoAbV*JP}StIO$V#?X1r}0ez*Y>K`wZb_{2JMI@w;_Yhd#Ixt+= zCs}%RKLxxnA!cZ7n`p=6S#Tx2Bg;?9DqcvR=+!o7Mi}C*>jLCH=_iw-u))$ zfe3hN6)6}b`JF4Wxxd!YG-N;jLY({+ysXqa6k_jkYo(*M^ElsBf4QN2$ir~s*^;`1 zNjI;T+w&C8HK^_`%YI?c(x@GM5n_t*o7E%ZLt$?PXJvC&cLSnp#D@>hQbL{JV>nB@ z>)Sv%*5Tl`Tb2hABXTsccO2quI}{(*^sGFO+kB>D>gPUK(rK-kTJnnzqOICuPM3#k zN;o4|9;@Xz`erO?KSXMYlW+*q5xbSM){YP~>CsE940tR?m?7Q*APXla)4I zB=To2s>AVmF0=~bD}*0P@L#rLiY&r*U9DF~f~%XOVVQwk(owX)5RyNqI=rVz%RO!W zm&vEOZ|?00>1>PfmGLKk>}V{_znDMcUTg@6X7%VRHszOBF0X4X>%6qr8Kc)5E!IkJsMdk#t1pbS1RB z)uB@QW_k_-Z}Qhp!}`i0FY;^AaQ=%IIB7Vk=>@&|#!AxI2jg$jBRk^J$~c&!VgtegVaR|hdfszu~BZ8ipfiue)2ZMUx_0d5hkVVK{!2y8HJ@mLo;0ff*vAZI1)8W$s=&7+2pIkm5&fw<EdfZO!qaL^2*(;s~lN(n-kCP8FQ*#Q>xyG zghymdt85`xV>9M#{{X!_znE$iO1g2V@QYi0+zp?Jh9vp&_s|t%FU#?BdzLq28dhS_1nj#ix zyY;pG96q?hgOli`X_1E4N0Dfz&EpbW-j4PMIpvLH<}~(uVD)=`gxk zwFXXLXbW)%0`Av@zZXhkeOm{Qm&*}{PUUSZv5HA9+{qit*?x~_5<#fBbvh;w6R9Hg z+r)*3YU?~dKM#0lbqPmUiia0>b9Z;;)oNOa8z3nh zXMLW%yK{O?ZVr!@6?omtGZG7yJHW8$UpE6+t<^b%7cUy@H106ovk9f;(TL%wsX@1I zLszAgcJZmxLZ(-Qk_PffnS8q{>zdqq4v$PE)ALRnsz_KOi4C&Cs(dnwiM0Ay;)jR; zkOS;4BT;RTFm3>MYai;D3CsCnH191^BvC_m zX`bAx7+vWb4WpBm&eKLhvm)H25nF?dtS#+f7grnUsO4E@Q+1TPiaXZf^n9vDSxVg_ zIZ-EZS#HU9=+UsKcyV%PWr!^78b*~xrbt(@#-+^rxLF0RcS8axnDUijo5BQ>B)Xx>cOaT1r}a<$?%mQ-553P$3u9Xt8> zes#Ff>LTT$PiR#!%`vf}Nc(fD`nI~(hqZ1!gP3Z)rU_wqwJ0N0R#xefI|a(oc6-~+ zgJzladLk>P)}oNei(6OD01&+Tu1v$oXsq;s6fdIjh=1yV@=EkrS&eY zBe8leV+@c6wRf^M@0`u{)L{OBe=1A@DM9z~w_q5%r2YYmqF=+%F~pFDZzeN}d7$Ei z06tt+y*^*$Twdm&^lYKU6JR|9uLY0--35n`U|+m?0X=Jhzco6 zdDtfxG3koejG}=u`+P^2=*~Lnk zTAQMaU!~FWumpC2bxy`2(XeL96uu0k5XWUmAe%0KDC-8IfDduoFu5ZtZ;BRYL$*YS(Bq&7Chr`K|f5Q!A`6s~eb_lB6;` zd%KiuwtVMp+(90tV>1Ym%&6r;*t|7XLwWG6-ib5c^;ntYLa!Qsj zZ2lW>;U7&Ti6AyDtU((ZFUfuzdA=W;g^N$jA&N$Yqm-FZ$Sm8@$r@dxrP4P68!Af4 z9F21(9yFOG8hF+*G;s>c2sb=P(k~Sr1dUU}Wm_+fJdZyW%LydXrNTN9bwJ(eEX%+* zLeaOYYZrFh9SD`uD3RDWsAeLUTr0Y-c2&PFX=LqSEXv6{JZh?>(U_ZKT={77Te;?? zz9Zf2k;h#y3&os96oSO6=~iL?0IT`yM7 zk~}ztt_?#s5yC0)9Sg**+J6e?Sulo@Gf5*CS_NT2o|ffNabGun)LbN`ZjVbIqf*V$ zmLSOj^=)aB((N~$91(^sBCWLLhGjr5I|oo0=%5%=QV8;5;t3>;pzX$5bYQBC=C$eT>BL;OOy)FV zK~y9&T}zqYvl3gGeKu@C%IZkdpot`7UW=PGHtgK(Shih*sGvd(4LZ;CZTws^uEi8P zK2D$L;k8EjmzRZy#K{*;Y$o`twWG6^M=aV3$f z((8SRo3Z%KyM=TNhz%IlxrcLSdwU6^P`15IcGRFZSIw<@er_JL2;C>-8Gb&KZVliT zBHxS7`0%1Y3{p5?NbZJ`o^EVWI!!vd@C^+^5X^eV#rJk_#x*BzNd%uE_^^;FpAg$$ zAU${zx~rsMgI1*~MSqEKj8yR>YXKpE+8nd@cb3mLb)9h}jhjxbL?LNH4_4mqeyTIcbn;NVpW%K3|=eoZnb-ByZJaeZGGH*R;{LHtqCHHb00M$-1Ge_ET%j3Mg)AI!_hSk+YHy8rlSWM)BBw(lsaA{5%J*YYE}8h#M4=(r0b<*#BOjt&>}ff^k||?t z8V1xDt88@3C}X{6qll)MDUI~#)~MQQ&6qy*%$8+r1kq|lA|Xjua2PaIrdDp!U4zSG zamI}mtn#aNmYdj;9RN`!Bo_byua>(ieBIn=ecEWLK9Dw|SC=hw%i+ef%_Egw*Jrhe zC*`t9snkk>IV`lwwXHH6BRm$#=;1k>x@ktMOtrmDz%N9CNR---=f{mMyIm9qUhxU! znZRcyv-nrmoG^P9`dE^A?mB3Q;h<}k)OM*$mizNA3@YbNj_pcGYL;B;sOxVX_QRMrMp!IxMNW@1u!ljKvm~WL0TnwMxWpZWLp(*S2f1 z?XbvMbn3+^Q^Y&2vaQ)0Hb=VOHxp~pBMQuJrA=x^Wp?c;!^tB6_gO=;XK0Vf#~uVt z9-`&8ILQI}7l%E^wE3fM!!{uUO0vrgyf!tpmu=iw-kBB_MdI&vEyNtSs;t_Pd~-=9 zQHUNdNLn&W^PiKZLJS~w@B~_{Oj`Mvh@g!W^ zREJ7mW1%sXtqR7;R_(RCYp~ika2b~PS&JZHZg8i1g4-1dl)vgLO}o zjJR3iHCFo~PQWloJ1JU&jVoy;shd+AL;=#Ty_jz0-NaDC0D%$iR;^l;up)k$+5FbV zs7|jZF?d!bMnPDP*L#+rqj1{|rfANsh$n{Wdy`6K?OWUPd)O(gNo07KKeI3*GC<2F zr4Vmd{j5k~WM$PFo*O$xfr)!sCAQ8Ttzxfd6Z38yv7KFs9FiTbi&G?!r`1Oz*}a*p z#hP#s5kpRs=|1~oqviM_`QMV?Z9-e8f})=~OcPC`pi^c6Ct%x5QbS4}kLAS;Kl0{{oC2FM#clE9h*f)%vER?s)MJ?in})2k@vAi28w)x$<@-| zi~TH8I$Smos7R>VLzc>LQVY% zQO7`Kx!o}FrFw1M%ZcXKm>n_5{BC}7XCR7Xt)=*S4E9H_qWbF6yc-tm~FJUE0@NDjhmHypg(>TYx~UP4&IB@+;dP3hpO!iDm~g(?E-J2Dfzz zDB|##Cy9~!4HLHR>r0kTX$=f>r2uEN2`rO`8e4pHnN^~6mvNOnWLfdWd0mSZB!v|= zhglhvTfY*g5^U(|o8C4i)yo@56ogff5-M3$QJ{Q{et$=abeXc_T?jlKMCkCTYH-=i zuPa2;Gh~*_xGJ|tf2{o?$EKS~(@wD+CCeiPkP&?Hc*v}-WtPNrm0#K9iB;DvvB}9N z5v>aG0wS@`A{d13!gb;Bme%!}h?*&8lR8BeL90D#dT10V-tAp;D|c+{u^R;{ zKoxJsTq_0&dOk{Qsr$ZeGm`yBZ3fRb95}_5+B&hYk_TDa{5YuiD^atknjgV|SpfL~ zI2SXWrlD0wR?V#6n}A+9TvS%FB9r(L>43UD8I9XxvbJJ9BUe93aL`8#+*3puR-Rqu zCgr=${A?X8Oi^DsjDxYMUFYn-McwJ=O(@ba zx#j8gQAbS8WzYRyDXwCrz@VjAD^kW|mY1cDzGi;X;$)4eu>cz8QEGu^W+9E%rSFS% zHka<)2_G~LoIKJk6;uW4Hu-7iU?PgsWhqMa;0aMlpxSx|-o>hJXku$Xr7G@=yZu~p z{VW^Ev~=v*<)Y$ccClEkgQx?RgJuYnaR$w&`0M$rfvFb8(UNVctxbCVb^wg6FEUrk ztCRcDg^pSWK+LbxuvOacqw}8zGHxlc&3gJhT6b3}+w(BV08tysTD4daT&F2kWo28N zD|#4sTDLP=5tQsC*>GLer+d=*sIMy)3dE|KfL)5XtfJMo^=WP0w#15rkdw0Q@`XYB z)5^WJV+>Ym`Wz`O{$Adl@V(p)Z!~bBXJHv2hYEhMrtgbPy{rb(t0@dbgLsuaHXS(PSBm&VdL1|y~t8RQ*B)X#N zg_DLVLa=TsGB(FszPEL)#Js*uIhRu*p)RFp4%=cz?fKZ&%ylwo=hK@TOypIOkBtYP zmvybe$I0PzjW!o|;$_`dc3$^kFjclOs-CJ2fz$HiPwx*h*6L%{OsugOST7DSkw$B; zzYh?Z^$jG2+DST1e~aefCO{+AMAyeCPB|G;nhR5v$36+bTO(dHg7KV+qfvi z(25b%MjCW7D>a}iHSwyA_|hibNxN%*qkxb@6lgjEqKXeEMlX{|3pU=*dt+?q&52Xp z`M4ZfmWoZF*RqoKK5~5Q2)Vf zNx3xfjaIFAbdiM$*7rnI%(hy&@glOLsc+?PCP?R2CBe2TY zhfzq|PS0Yul-uf?w{m%x_pq_@*(6jv~^+%$z`0MY8G=2HSRQzUE0VHGbcxSdnCA>0$ycRhfG%?Ux)` z>8Xip#i1%*GrWx5oHY6sz5)XxsVwNHGTm-tXL)g@)p9g(%pziC2*Tvr3ToiKm5yE6 zA>O+-G~c(NHtOA=o;essR5emJVse+gRzyr4M9?(6x-CBrR*IDls?NHHS1aM}T;%gwuOW=6G017y&K5J1#y#QK;Sq?W?Y(UJjr@7CPN%Y}Us%G&3vG^??r zPSUISF{@~GJS{$@)JbixBam%*Wo`K4^H+Vabv(8(ysMXn)4=qhnnUZ^(Z${$s?Yk& zY}f|C2S5X$53q=jJMvrYsEaK~fqyDYTvYs9Xg`M*f%2V){)|?G;vIi`7A0%Z)Nw*9 zZ2_;{_jZ>YI##_Te+PF0<4%pAA6Ltbq;&Z{)$f zw&7K7A4{@FcZSxrixg5l_v2a2dCLaXM;lulfn1k!?64{cERHyIGBix=-k}jZC(mPZ zDG#(tN{SaOrOEN6uy1gq@gzUuGoJ z@{9FcmsV6eI+CHvsmzIY48|m04;%D*_N!iygZ@Y;w}wHggS>C?B8m!dy(HpUpBkNP zku1psrW$P}UPZXXEngD3R@Zb+%yM9W={q4(La9z`2ZK{>y25uy_9tNK;`IzO8#<^zBlH-e~XkQdlQ;%-- zq1T7!;teyYWJnYhJ1WGql2iLxEcur9aW0Kq7K%oSwAe}>8#4UrvW!)UYVAE_;M-6< zq+9%WuE0ngI2At`AH#|YbkoU-9i;g>{{AUJ*VcAmjes7lxD+DO_sx9VQ2dy^RWCa# zdOaH9C)CW^SWv4}R68|qR7l5`&FMVcxJYMaiq@qaNMIOV9z{8l@2gKmdw3dPN}>R) zqyiiojMeP5zm>g*CMVQgBCKw!QqJ3pZ)Qc!nnvBmrJF~04DjY@n$=rbi`}t}U9Pnw zSsy2NJ{jcI#H`1(t_^9L>M!SZml~)aR|uI(0F;oh3*8}XeeKm|Zq5~7SA3ltBA}G{&UE5^{7ocLmT-3ycq)ypZvOq4kr z*bxdwQ>U^=ljUKLX;ADo1-6|8(B8e@op$zCz(fQvJ5=^+4#iqrRqndhw(a}w!|9_g z41@x07KK_|mG3isUuz9Yy~5*0TGE+=jjy~j+P$wmFw=~@Bv8P00biEXXKS-IX3M*7 z97x2O7`UZHcK!z!cXuCqHu@@6h*w~wb*UDsS0cvU=Iz?OyxQ5NP>0*(01AOY927RT z@9$46jn+3ZnZQ{F+Jgq;c3T5UsE$FW;wd$1h68IVwjA=#T3bLm9ak$6(xuMxVohxN z9ZwBv!3Na_t(=a|H2lV4!3h8*Nv#44RhP|W7@yv)Ypgt~0RGPj0_Ab*<#XC=cReg> z(}*F|%`3$UstUYq@pT$wP5Wm|qfM!GK{dssQipxx?QF^2TX3z)CW=!C96KJ8(B+jy zy@p)YziDwUtDRo)s#YlkW~czqY@JS552<-pZeu$uAWXy{+5TYmU9ghqp^Hp3^hV** z%9XnM_O83;wlv)9-V|MCp~D?I$^qdaH0fVh?M>slxDZ6E6lt+hMF9CQG<>cEmRTvo zC5;7l)m-bGOh@p*1PiU4rlTRoA0Bi9VBK>A;1poj#2b1h};Kix7O*1K1 ziIf`G&c>i}IawNDwnZ}=QiYk3C0zD3-K-|Bn?onEX`*9xnyfA+)m!84-F6O}NvRQO z6p1xbMr!W#%ce%`{;=Sx5|?$3y7_#!VYK>4mrol#TMh9~TH5!2^k6(V?b@;|iFF|j~X9f5{y*E3&+nmGc>(uhxP#a075;RFSRHI89KWoImZBD8*n##9H0)wN7 zH=5iITy-xOF_Sc4EzvsksV}{|hSiwTc%itons>T@9lT`qiW3c_G0~k_Wsor}=;0)` zo4u8_u)1zprH@M)LwjCX=3c%dzTg{#T1Jwspz?S4g#*g1qaAFJ_XZjCrG4lSOz^q96-X# zR_g4hdcE8YOa|zfn_H=XHrzbZ+3$Q6T2~FK&^)q6Bm>44Azs%Cxuwmyg`1op)$19W zOJp^QBF6h8i*&MXvv;xJMG{;srJw?7L=1biIG<0ehW;Bex+H)pV%DwFcGGS}#A^NJ%QSC67xp67oBGZr=sX&^`^F7>FK|cjUL*QIcAw z6%+%`lNBO|&KRLJsiw}qkYd&pVrjRh@v&9}DDsV`?l`Hb+0t;XYDuLB%Yq0Xb)8sv@ zzlyjSw(TLlO8xJnju;o8DIFe--<9~-6kycYss%p(07uTd=B6q@YHg;{e}gUwklf2& zuCwO+pGzE9J3cYe3AJf%ZS1*lbkM1wQf>|g&iPwi_iu2EnRj0{8D$r+R@fYaQIT)7 zrPj&%%$|-+Hl`s$OrhmsZEF-=B(?LdTaw$GH*XOvq^lAqVBMI4n--Pzt#h@Fn%*6) zc1$3c%fSVKP@oC{SGx0f-u0F;Hf!-5GSOsKMK=?+;+;1dJINjOo;{^@ZTs-) zBfR>xUkzm$Rn~bZMHLouXt$o}w^A$47Ii>8Z?V5*)!j45|G4R=$Rbsw3Mag8z zfMb}(#W;kjfKj8%M~zJ;GH$1_{Y@)-Mxrdgoz0cxu;;?D<$Y-py6MfNGto;bhBCx@QhM0(vjq#9L?MXJDsW58`vv}*=URC=go znVF?oRx9U8rg*gqku-(GMp%`_a-bE43#5!mBvMDkXkFvtM;k0kw&FI@N+nof4goH$ zXl@+J(OtpG5qWYgnuyX?yAewUcWF$~f-6R`{UKJXW*- z?DFD`QSyvaqyyD}X+!t0D*`#`&h1a4hCFx)3?j80IKU&SSX@_ylOR5%T@%E6sjfb+Pj9#o|Xg= zTH1xG#tSXWM!Abi+2bx{zV6r7Vl|3&Wtt?Jty+x3%5yebY-Mc8hu4CI*`m$b%Z3mvT76_=GIg;hI>WZz zurrPnC3oX3NYpUz{#G>p?Me=UIQGJ*0Z>M1lJ55iKKAxB^T;V0y0VFFuod%&&2j8u zG}@mp^*a;cBQV}+&c|4Y3u;UBx>C(r`5s-)!NF- zhmTy*tiqHLgR{)Jx0@4bA$}(e3#dpS9q*gxVPS{`L{O>+Km(wD~l7I@6 z(psIP-pBs{f_1RK^(V%JxS{OjE79J>X)#umD7>z~TeMVnYmN-jaU)=A2n@d=2NFf0 z(n$QDQyP%5#26!eWNzKtw^svACX!tyhCvcf3~0e#+AkYm%O~dJRR)hqG`d$Hq|wf9 zlJ#B#YEieM(Z3S^07^VZq=G0Wl*vi;6E@pQRiKTRY2EDlyYTwGDwv)|du{=lMS5c` z*XM6xtd*H0a!MMNVse&Yfu^32z>>x!Xvv<2cVIHSZNT-Z}#TyNktEa|WyxX{K7-<)g zK`4wsN#Zin#xHjMnHz1bh;$kj61$aFvtq`kp&9AWxhzHa3lKAO?5%JbQY9#I4cd9f zy(e|CSFuCJs>JHqc!b!8eZteg^0J4U+wL{UD7LUJ!pc+|BN}8Pa^=;7r`1Z*=C)L}R=PjMDwxdL@ z*jB$vOi+>W&~5xD8%f*Nadcw885=MOG#xtz1qoiRigoqlArxQ3f=5mc1YK4;dTIqQ6b3C^r(myZv@&{Fswhf=03920oWs^v(A~Ed5LESn<@>(u*cOeX zR;`2Aw`mnW^m*8*H2En?*9A)&4yD>oD(8lvbdyb=%Ytjra)C~coEAc*sXz@#+DFpM z_gQuWNLZ*9RwPg!%Vs`LH}bG7EhCm)r(grJJ17E*xAd`OvMHF1!AUzh2Ft&_f6Bmu zz#=H6iE;?3DiLEytfi^?X5v0C(yss&Dk_ANLLFN>H5TKWCTp!hNWp2_s6D zxa}gBv68`e7W}a~XS*i}qeaB?GRamCX%tYkTFCh=lY`^h*7%Iu*wD04IBgn;rLbHG z0IRLpt=wGFsy3s>;3{^JbanQ5qgAfO+1sG?$o5>GZsTUcz{Ckwy);`_Y}cvHxNT4O zuEEq#3dqwaX<67FC}VT*=rS&M58_{&!^w&DNaxzL@VkL=0Ua{TRZt|+@X)6TmGe5@ z*CdwW2{aAkG*T>#M@YD?XwK~$cQ2iGMZncdCmFR7#IdVbYU;8sqDbYg&+`hiNtb!i zWV48uJg^mC^7brJ5?bYMF_(vP+Ll2kRLYFI-nSpLrlzU1=2v)PplO-p3h{Q92(vuLxc%NJv{$1!drgW%aDG&+LiBzF!N%-Qr)T{iA7yGdyoM30%V< zF}{#oh~eYnaTX?SBO{`{@v6y()99Iok5?N=3}lpJ5H~{hs)(Xizb>JjVTlq;>}&C> zeN1aGju_brR?@s!9(_g0lZte*aJZ|(@hxuNZ9JOIQ}A@_#&>~1idYoN%ogLs;*K9HsH??`gy&NcRaSvDD9rg3yqoK4$>1c5GjSC!vxniP7N9d0C|ON+NEWMFy?L)* z^^a~UIw|G*-b_~73Xxn;n@HQTnDD_peN-EKxTrPg`04Vt4FzlXKI4iOSo#Sh5(m(! zz_V6M&=E*Z!hI%P-qU>>u}7LxGVKuo>2%BVe;&+RG`fp+bRvzv8!2z2fv6lp2-|T3 zr)Lu7+0@o=X8E{ZWQAi^t!){~?uNfLSz9%iTFSlVzX9~9M7ef_H#=6n9q9glYbR?3 zT23BPAX9Z})Vbt!Cakd@9}QLRM&ZkaGblho$RZEs@AK}@2y={+jDqrn;K~v z=_Z#-XR8_(X)7|RuW7i7xYFtLilk0jNnPo-w}yFkWMzqR_F=jx{blx{kXAVDwWC_O zMGTxYO6|Pb*E42&t+jEdiQrupvA0|>s_)3psq~YD3@Pg}-D+dmpGzE(y%@+Et^hgh zzUZ^pwHo()>{}EQ4O)*_iin4AZaThUyK>uib#R{0s+wsC;xfn?!R);*yNR~uiF>yy zt+}J)G*hTSVn)|7j;2?ZW-PbN*Jegli~7cFTMm2Kl_)l3guSymU_qB^kb4(o=6SGiUNy>_>X;aUeWOyM}P|r{!W!WgX*PH55t~q)IY561B*sFU`BXhUJs$Q5R1Mc4@$1y_LOI z-x!_SaQwD8)pfLRX-*MRav-02t=jPMZmLK$v0AH2-MhN>C{@jgH2S2WjF{+Z(Us}g z-7DVwoF_BTzXZt|!8C4m@dnguu?$u=q6n`JkpK#_o9}y%Mh7U>!B{o4kt7de3yry& zK8Ic1?(Nv0lDtPxCYCWAa*!LuQF1p%?|JbaznJaU>mx^y&;qIlg%(lJyWW@BQ{dT; zkM|9PkH1V%l1In41te8;ZjeU9^Kib?p+HoUtr=u0TX^Me*UrHGm#v>n>KFkaSq5Wv z*Bm)8ucMbt>k(5NfsbdRu$9=~KQ=WS$KDN0ORUtC;!PZ8>sO^zRbv-r&u?cBCoG>( zb!aLKj;t7V({saiT(Q4lu#w3eJgwuAV=yBEz#WmUrdn^d^Wf<;PIVJnT^%K@f_v0)wym@qk#&>lIf%X6zr95)jLYqE6Js5NUlAeRwOt%td3rl zxAm7{=873~^FY_AaHYjM8B^;Qvy%w(ogTZIOrggwbSoPMZ2Va(VowsYNph;T{`MmG zgOor-3LA|ROMGCS#4Z*&H*T|ub~OI;XyXe8y*N(9=MzFRCeHeFcd&JGqB|>WibcBM za=u#Hfj*WWU&`4`0YPMqgigI#VzyRvMz^)|aU79Iy1g{0tvPlzjfV7#*5B1yn7nK> z4fl^n!>ON3BgMg)bozLqO0=d;*15mk^RRTB-WF|IMZyp(gmo-}WYp>;ThSx5+al)8 zgU)r!!sRfBS(H)T$lhd+d81iAv%T5GPpO$=6Cynv<^lIAxDrBcMn!S)gV&3Y)DEAE z1}n;gJ8>WQiX7jx)Ca0K|0m%vx zOP!r9Zpte5n+?n~oLF?qQOq-d8d?!;u`Xm@(_Y!~_BiO$ccc8_&?k=u!MTFqCm>0;O;p(bbtn$sAlbXO z8wmh(fq)Q=r)D!BioeTG_Hk-vana-$$id5P>7e=DH&+)h*iYXdozeXq6{cV-(@bUS z&9|=tFi6-s4@ceoENnF=$VSg6$@nk;r4LA_PsF=^jo7)(NT$#_Jl>6;c5D)AdV0K` zmM<$sCv8m+$%DnA+przGdUAo}`~MxG4aw`(7M%m`->B&b4yXhY_`q>605 zSK$3)qIjG5F-+IQTJc5-D!wV=S$;}g#gBH&EydbH!eXYyC|W^%>%s|jDw3|DmAQv~bVGT1WMln_5Qx(d|r{SZ8A()k1+8bF#mjdR%rHqflgp!yD9O5#IF3nYib)7Y zRhmvO5ppjZhLYbEw^vFyvm|S)CK{T?nRu!hQgjnVe0sQ~k+^OCS=)ucyz?C`!CQu; z+C*}33WZ)BIC0J54`^8A;zb0F; z^!3nS1JTfa>f)9BJp%w!Whv9qv+nb}>=H8$gXP5muT?NjKL*~M9*T~QqXI@^Kp?0< z4Nu1O*_XS3%FVSgtH6aLH9J5> zwA;Mbu~kl+zJ6?J;uTq51$k*gSQb1y-HS7_ul!FF5{@x*9}Om#p;3UTfqN-S`=3j3 zbuX!yBz75+r)$bBiYUZ8WC+&XyPL$%za(v{l!a*;y>tWO#aD+QlcBoE*ltV4&F{^@ z;Z-1!P0mv6}8RqE_r_S)A_ zJxT0tG~7Nfn!HPJ@@S3dP_ywPqbnLU-WcC@{{Yq>BCbojOcmbo6IB>m!OEMYR=bAM zrH1I+b#qvx!0#Rh})hk#~wYKNg~M{Dp1Q)!1ECP%H~*W&~8%czGGS zJ*-!mYL+?83`T{@sm)}7pXp(>5D4e2Vwu8+alKI6s@Q#2vqB_@l?@1vpb}|tkuL8o zJJ^SmN27fiLR=^zZBxFSC;tGvGmW<|YZ7bq_Rp==s+GNV#~nRW`Q@`WcN1$gXd6#B zw^INlGRQu%4@q+};)yAnEdg$mPK-sWjT%_mDEP3BnHU_KdKb@b?jM+I7&e+_7eZ=E z5^EOb`zGqXCgi&HF|M|vDS))`0*WHaI%G}jJ4=lZD&~5`njH*|$A-SKO{Jij$?({< zo|k7DZd)1RMKhgNpp~x*ILE5Hv&0@8YS`Jc;_`iw)^iywjC2l`W8FpVHW&7KjWkF@ z1$q{KyJhoA_E!~O4^o1!6?Tt3e^=*rdsr^S1yD&fpa68X7g3~MDJN}nr39j0Z>y*i7N*R|Vmbae1BCBS^YQQO}7Y&DRWc=(+ zp?l<1P|g;#2BUd~%W)2)PpDRj2||(*MT6?w;#SPKeqF577zm-_tcn6KJK2s~e;W){ z?|4{<^$~1%pUr*OR_!M3xZ&z1)rlFZ(gzz`t$-HWRobHLN63zu9?+oinTUH>a3)z5 z9T}K!-EEDC`J^Yk_29jMcuu{UF^8lYPA}Wmp2w@UX|0c z3%Et=o;!D1+)8w&_+vC#dM`bz2U;fW{p@YQ;?lvi*Pv5vek^lNW2Mv;n_Co6l11rN z7O%7L?q`s$^jOmAIWM)zs*X2RNuw&RqNVt8_Lb|IdpUDqqxYwmyjq@M%MvWg&1K-F zvADP@KjPtGhOwAbBG`6-HV&QYW7{+(UsM1W6T`R;#IfgB9oEZk6HUzHni*v_#-T1( zZJr!&9o$A+u^z8g0-2&~qX%hIN(#cKN++g^|Ek8%RAv&Fl1=8Ul-u ziI@@nOuNBTx}mJ-UnvVJfC-G4Yq*n6jDEr%GkM1(nn6+qxkVwHn?t*2T|;g&cIT3 z*X8@OW2t<#+w$$eHQE0FM_wvP=-NM*y^Mg3m7{&%?qGlx-m}y9whxc9f|9L3w%;Xw z@xtDLpQ4H=Xgjxl6~6{3)ZDZT5rMY3Zd+Gw;Y&Lk$=hYlZ6JWd$OimHm^G}@9sIWa z3kMNGgjEzdPm^|w^nPu(VV+HehF2}H0uEsxYERML&6_f$kB%HGP}Mgv1h6z(Ef>9& zw{zdT*I>d2ikrzy<7ri9UhR9^%Y=obQKpzQQWh0kBpepn-XwfF@$#~@*8YvImzK_@ zbx_D^a|3#w3nj6ep4l_{7hs7c;-OzuRgK1heP;A)zN-7&P3#j8ZX_;*yr{^`cV*3c zCy7YY#qh6rt-@&5J_|t%uVv)WOBrca(tCC<4yoF`TQ%|s4gSv1!uK%DNE}ITw<8y| zq?5aBnb$kxi5OV*4ur}Wo2^t-EVoISrNcu5@AGv~pQ7prjNB8_g)h=$Z&uCF^b z=MmctFe}4QNHqahWyzY2WO1DFMBTfWdk2+YV-}=gfumeZs#M#XyTpGFx9aV%YV=bH z*GlUWy8xm&6w*2Dxz(c7@L8do^yx01WmZV;ZeZkz%!NP-Z7f*JA!_Zi({B%c{GI%1 zwJ#!x6}X*5_zWed4|=Y0B6hF8aapmc<>-frsAC+h8ZKEKw6-nLEuZAH((yH4q@iNf<_1RiqZVP8_PbdA*`M zu}a&rLE+nckCCyfZk;FGd@s}CCsX~TCt=#{c_@r)GI--ou7Bn#?!Oq=)ePL_`0k7n75$Qv3d( zm6m|qDMQoI)$_As!M6Q^(&Dr|d)*#&yWN}vA|(l?R;NzPwESEOAw^bNl_j=RZ3Dcy zn|JbX%_E}S8xqgDxknNDS9NY>UjF;{V34~!amNX7E7=*dj^2Np-fr#_l*1f{PLmg! zNJ`qku6UFKam?1r_b%>6z-=rCOQVQV6e_C3_T-m`+GFQEtJ>bh2x}BoiY>%evJycNDFIG18=M3KipZBVLMN?Da%VcahgqH!gYE9H(j z?1|GFjatnh8hKJ=Sb(d5qT&-GB#{1-v!AtwA|(aXI02c0Y#XTeUhNYJE^D*2nQM$g)pa3MWwnJfBW0+SRa-Bc;x-;hT&ih^3a&*qjOw&I`Bv|w_Ty1Xm2GQ~O)0i5 zKJ7a-%bPdvPc3C+qOPj=jmmFSktys7uA|sGk zw`n9D&dT}NWzjrxPAE7;?b>=z#f8)9k|}i*=)R3nwEW*Y5OYs>BLTGXw*nE>Y0C1r z-!0wT#FEP@-jSIttw|fdhZ1UZT0SCa){MfaO08HsU6+WJ-Bn)yx_RiSoWN>E12({bKgdzem70pZFh+P6YN zxC`pyK6Coc?iLA6+DDO0k(vdKPFHyA(M31Ep#ZT!PQQT4YNS*V&_U=w2gUo7^1f>j zpGz-Fo{)N^jkl6T?6Tw8q$YhVz{#f65lhe=t0#6?w`E_SyNLC6FwxCNDp=>C%)ixF z66y8ps7)#ZsEuI+l1}lv^f9lAcWTRVCX_CnIhKx9SkfYH)J8KpB!kxM+lje)i8P6* zLZ%50Rb@UJrP;W)_qVakd2hTyw9v6v(h#*$fK!McwSVO%1hU6Al)Y433OM~7@s{Yj zd2uF_T+OMDWHHFy49eL}ZS4;JC!gJ|Io2d_0|{a_DLKa7`Ig?!{vT7#IEGlB<_e%u ziMSW;x)j{`xQ|(&jqHgel5`*|QkIz`^ZdoevJOq6NF>z?I|2;uoe~sB-*UN=yMYdo zu2!XGkO0JXRsJ-^M$mc=7u#C0$_YdQrK#BTnC|0Q&Gm`HhDMM^kFdt!jY6T_mCjad zOwA^*Opj`pK&&7XSHAt|)w6LvZVxcg>va;aj{8}Q0zvh(ZT2T~{hV_RPd6rL=0ISc z?1Wq{R8OmFW6R~mzzU9yn{i&e9ae)>`bgmn@265$DOjE(%O#oktxuhX=B786NlQrV z+tM1>iEq_MZtCIMXv@syL`n@TBdz{6-9>y^JF?c`>Q;mi^=6%~J9h~KFyX{9DDw2; z>BW>ZJ1cL^=%aVA!>pQ+Iw|J)xaI2>+Hv|e0pLye-)$isP^~&U&w#JCr^&GG^5ZQ5 z{3jJGYu7>v`8&9~Wk9c%pB4fsVn>t9gBvkWXemmZ5Vby1DdqZKHyz%J4!@5s0Ai=e zJicAHtYawa9#4y_(ZC2gtJ72XF~Uvni*00lCT zwv+?z>Be9I9dyS*Ez)VTmRX%c{5C)ilW*bx;#Fm37S4HZ|UGp?pC0JqJXes3c4v-Vtog(?XNC3b8ZTH?U1h#7ddW^VP^ zPGQX3;&s*S;HoHuCM5t)|{s5*niN(*W0i_ws^+&rlgeBK;snRk;;7!rk9 z4#9eyt=`NXmwR!tTVmn$&ii4N;Zs0G(j&?w3PDG)Rc`t_%$QDTCS4{{%Ia=O96!|O zW5kj+i!Jda{tt5!^7IAv8EW9#E~?9JYphpwc&+;NnYU-PBZ5^SXN*Ntwv1cBl=-}N zUGLI5<4ZK;L<_N)$yHz^Hb-ru@RrG&V8e4uv?8_749S*gNMvPRfve^|Q53>0$P5vfm zawn_1g!bZi?vUzYNZJ9qZEK~UPO_o4bWS__EKz6hQaG|aOM?`M$vw78vR0(9Ss|9& zmAGZr8CN%k*)wBD3`E1Hk+^b9;DrfJE}%trW_TlKX;ovkC9{ntYSW z?<%X_YD$KXi0*r68F+=QNAQ{WYv$p#ii|YZ+FF%R*70q-w`^$ATiHz5iN>N7)wm8~ zYp9{KdftXBa`8MQjW<=K!IDULu>w};n&Ht^s45s;YaQL(8jBA@R=Ygc*<1iN{Cg={ zcWaBOQfW^O0%)wZ=2vaEa0 zyB~`q*~Muys$%_KrVtKOpI|reM0rO40C{^Bz59=A>EjCQpsnB?g!eaEdxS)d(lU0PM?ZE^X_Y{?wOlw+lwhnv{MjE~0>6yJ2j-n>T%4>Mr&lS2-;-^)6Lz zKpfipF5)`y#mLK>c1#s?MrQ!=WXfey1<=6|woV!gktKr+q8&HK~I=Z%U<0o1rVDNpkCt=)U7zdH%3)hdPIY#j^Y_rk%-V zW!m?z{;3Lzwtae4Ic;vfaGgTIRR8^lI+RDvl2hu6{)dHqz1$|7PN5-*LZYak z0C@-4SisbR#L&Nd25vw(IIdcJN0G6dMwq232&syv4uvZi9% zofkz(B*e!j{3gkQ^17m~)+EqV9DO^ikhbHgs3Mzx8}}OHZY1Se%!RW>C>vss46+Z4 zvnQL0c}F&R6Pjq`5OCQ?4~8=%tJ8I>e12=V>YU<^@BC*h&AFV-S;Jy`<~z~y*FM(QIX?(-OOoY)<&(Z(_JQ%y4(P!cDc7- zCu?^K@vwD%uxd8^c#l&MWk;%umeaFkY2EJkvHMp*6rDS)lEiiiBds!@t8{GNm5I5u zT1J{ZUZPo=&m^ zddFKrj_=XLaC_gVaVxx$#ifaYEh7Sxvqed6V$IsIySP}BbrdMB+5v;Fmrop;ngzX- zF#sEdw|Usp4HPOgh&2;WCEFF7IcqoB>z#9Fd};mO<1I+p3P>XO~w(_rekv;r5D-4quJyAAC` z_Vp(0X}gDzX1D`VO+|dZb$3_0gqMfLXH&IDMF#%>^ssd_ys@ID%2Uhe{x$$zk^Eae zk?mubwYv`-SNjRR+Kp+`2%-7Uj*<6#SR|c;@cqXC0hsh`=*0mhy(NE>pkR%vdOChQ zP_q{6{{Yj9wP`>}ug6P%Tv5sC=mMvsH4o)r)X}L!aOgc4C9WVHt=G5i{*E|kv^zYmo+JA@haL&P5%|1cs z_pokTi)`6ZU6*|>?j)O2Y9bz!Qcr6l^^c{yg0Rx4=G*Ez1taX5NZ0Q6JuC-^ED$vX zp?0+!H;I|kyPdsrT-I>y!?+D65x8WVsBT8<-gUFH9lfV)iH{LUCV&LmYfBMTI>=W~ zHVFR!SlFqe!B7R1qiijkcJRDO6;*9i^cw<3HnlHMM$@;o(y@-*Z2th+-GJ%p*K6EG zpp$N3ZD!%$ORdi7yV%DN605X|_1CQI^Od(+-<^XO3D)4ogo-oNSTg#;i)^+vz9rIK zytoCH@S)6Npp`B!MQ$ak+$mzYXS&sI`ncAm_(%e5V`%ETt94|*HM}R^#EC4IK!k+= z(U30@_iyu=^8Wybgwz@t3{zhixZQV%@w1~^Z*Ld8(j;iMX1Iu|h_bQRY~D*_WZT2y zM#|Gi*q4UO`Y5A?^+>vBf>e;&_W>BQh7^L`t{O7Qop^D^i-v+Y&9Q2mN~MVmYF<9Q zn_l#L?q_#i%y~=d9Q!_?MByoEC0UU*8FJP0Zf40P_i*D}5V16AtBj*8W3beljTQO0 zkjp0T5f~6;2!1d4q2n)LwvESWYertN18Zd))$S{ zVrGr9lFwqpBCTm0mq9A5o9Uf>Ui`WpX%NG!(H8`Zl_qHx>zX3X!D)SMyY?i#j}UVW z8b<_Jj_ICA(=#?i zyTNx+KQM%9Cx=fPMDm;NU^wSb9~8wu6ltW16}_VLli8uQR&FjeTDecP&ZsnTn3Ppf z)wqkNj+{8x*{_Y_j4XaP6V`Z(%At;7BqD7m7C8c?Bept7i*=Jkkx9e1B%8Lz*_#@z zN{iK+-7&4z&BR#kd)AOm9BS&UHryl$KESJpTohVx>&`<-~TUmRZoHnCW!Q)i5%Mbvl zwB6ac*}iSf-oj5gFlwZRi0DY8r;9DfHcJ}Zso&pZY^jMot}!YiaZIsQwie+na^1x4 zM<-_Ixv}Fhw`JBX#=YLnJnx->Boc9pP;ICxM*YKgGkUz%_8&HEQDbWWJ!OW?jbG^D zjA+do*5abH>ssvW`ahG3wpKj{Z_Vy)Z;KTu{!wb3f`>C6@147XA%)t5WdW(K+`T(- zXw%9_+gnXGbnWWH$s!RHZDYESK-*_!yV-k}D|cH73sO?>-k{V100hPAh zl1(@%Zt9JdONQLSB;WIV>#?cla2&B{m_%zrWpz4u!0xfRnvv(p>0)g%IcA0@a`ghN z#7OR~yBewRviAL6d|e!+Bq&>AmTjBt`$EK)S2MSpjteA#BPwVdgc)p|T<{|t6soXB)&mL&6%V|SQjsI97G1L;9b3#8oza4wHs$^)pL5xQHfZU ztB2jhe97i$rjbdG@C8-! zGshDyXZ;rque-$J*@tw1wwp{#NU}cLs(6+~nL0cqsRQrDfcwjORNl2-wvF_ILu7CCKYy35`}ncg+axs{bw8YX9%Yye4hE%l71sw2AF zcs-@<=DP?dOY6fBpc?~pM|5f7qBQQ2-F8R2g~eu(@vDYL5;)gLOAdorwEIBp?EUG{ zNirkpkWOSSX-`#VZ%cS!9WI+s4BBH`VH;#U3f7#j_pnDaMbpkJZ~(2|hnt9b=QPu< zr&FtI;o_1+XolF8R<@SQdoy;hQc8&1rMf5v3>sZp!l3YGVVGJ)ZRxh|(&oZxHPFP` zEf670K=GuLt)`c|F7>y1ID?kxbrE|7M2cfkcxE>mO4ap2_ZOB!*=w$lF1v#*QWu2vbx8h6Dk&)AF2Yw7PR-qf)ex4X7wfH=UGzJ`TP(#}!kA zvJsrGN%+3QXHFu6U2^f2intdjxB2m{l+J9t2xpF~Rr<6Yq_?52z|=NjDGV6xC7UyP zU934f9Fka#hi6_2En4Q-DX=QvVx*Yi@5c>_9Z8SmcR?c8gsPPZB=#ZMlWg!y(l2z~!AS%r5OC zjk!p>GU0VeJ8G1>fh#41itgGwo40+LcLr@qTE_~I8Hatlk@?N@aOG6Ps)n3MiK(Ib z#XYG05vz1CB@4+^FGSdcd_)GU%4h#Gb_k%VG3&DXqG^Pi;$6-mNMQk^SnOwxSLm| z;K3HMRYaZcSJX0}5?g9Fo7AV$#2P<$V2Qb-i(XmeBwE$+o=eRiiQSEF-Nbx$4=_+1 zfk0cdRcdHH2Yo8DPUT};KqYQAw7I%$TuI5bARQK#=#;4;Kn0ovxoz6=Uv!z;+&>^) zE9iAvg)1JiIx@*6DzK}Yp3cRrzORVbjB_cyrbkE@Z8~b za?2?~s*^KFica&{R9~BBTusd5oi3G$RVv2X?Y8!IZSSLxZj+hAD}idOM<5EdxFWUk zHe4AM^m>)4V7x|3p34)%HK%t1SJDXp=n+VbPwuk$mNKW!X%m zQ0(d#ZO1P!vP#debPob=wv_EY2o(8Q@OEY=qo{u#6p>TX0YAdtOlHhzHmRYdd)-IQ z#SKMhPglj$iYXTzn=m7URPDN(HhNv5-o+Y+Z%4<~_kKnNwuUv@dO+*yA9rD}5g0Wq zS_;zM%{<(BMFIW}j9dVq1X8anT4`16Hm> zBX{k(M(lrF*k)mxfC~pggKos9thY6a)^@RMO>})d4O^+U?>qB5SP1(<9-3|PX1Fra z+`GHjqYGjS^5)I?J?}BCPVkUg3atsN3et_L*S=!|GEfI4DmN6e4me$lb{{Xz|<;?ZEd*Y8b zgA_b1{w5tyiL8=2NLF2j*xAg5?RhrZw?v6+BKT>=nXc0`gkDlcjz<8rioqw@=J=~E zj=3KG@-8(Hptn;Daf+Z70yeT$x2C|2$9IiQLg?{E zs*LudYA;?Kk|c9!^T^L94p~qh7~@2jmstQw<5l)`mxYr3E3}iW(P`iGs<<--W{oC{ zJgXUv4G}yI745`jy)Vma#}(Jf8hDtJKptI0vLXQ8C9ND@lm1?A8!EK%X0g>8N9@E_ zP@0J(ZYJHOZ3_Y@Yu%#?Ws&pTvhcf$jU>=UNN7-98Mydp5$hcmimaSf+_iXoMz$Sal zx?IOpp;WUhh*+A*VWq7d%)BRl_ZC*lk^N=yb68f>s!;Zx!xlFqdJS8*ZnLq20862a zGn28wcP8HT`y0-+r0-p9QoZCvv;w;tz4Ov+w(m)B#`2(xf&-d{sJj&TnQi=>G}@S~ zk%0nK%OE1wHMe7bZm!+jxZ{Qr>A;|s6d+N$i!@QM4l9psGECPtM!@3iBv28uwArx! zS9cfbrj|Biv?1Q7Kzj7o&%l~CG0Xu0hFaa(yQk*v?_-0^IFhV!foyIgS|}6`40bHZ zl~**2VrEQ{Sg6v;9TOug zO&!@4^2vzxb1aNe1aTv%CdFc{BpOEObuM>B-7YhG^t`-?kv18t>N^yoo@2s z^_r#xcB7mi&SLtGPs*>6t^t9>Fs%);RfP&GSIH%xLb)-Dr=45Lp>xr@-A zgPS#t-Nuub!dgCI40bGKST*Rxtd~<_x8q|>&cZE3@GG!kT*P>S%og_JsL{^mM@X(h zwf!RJ5%*Qut;g*LnH~*Y;AtjOK~>2fp{5r)PB)h=n4e3m(rFWz#DP+TlHhL6(e`ic zJa^%FbgG`xpqF$MMgyUd`%E`+X=S-ugkgD(l2_ACk}THJxKb62y4i8oYJ;Mwq|w?qqS^ekS%^^u=jAJNXZ3d$HU8%U_A!zm>qLr{cmQMWQz zu-)7i2BDPg9RL%(k8fkcW*JAAI@x^hCNFbRqpVPE@>dTe^`tR7D<4P zD|P??*wpUb(}s#`O(VFsXbRH*086>Uzs|!n?A$^{2pda|kDH2WB|10>3P=FC6$-*M zUegcKGU5#mk!*q()l?puZ&keNoZn-!521=xY2p`1fbNe!x^XWu)fUuf^$w@nJBGHV zo^R2N-V^${JuF47i(+Zs=L4mZo+NGMs&4t%k5#8P!!id$ldi(3_$6`GdZK)RjV_pQZrRbr;LvD$ts``$~i^79>8 zgNhY|b2tHeHpV_(>_e>4aMk68vy`CB3h<_Te@KkkyLMytmV&CeYb7v2HU$h z((|1mO5GUN5Vdtj^^3#V!ArbM+r6x6Ak{mwBaR4iFOyEq;m#c@lC*ZOt87W5WLWhO z#TzNE#seW{t)}ajHKVnJ)aav&PpC%DBdQ4l9}#44_HhQMUCW&E%`Ku9E$YF_&mzHp ziGJ=7Ya`R^HQJ}GU=1w3Arx2YNc7_=UpDMEo0eIrls&d1R+iI=Q_z`Pws4c_WD-Xp z%EO?Zv0sY@4pl5rhDTO1GXu0`WU(I+a^7DYlJh5$IHr-&)+I&pq?I)us!P@FzWcj7 z*u6za*bO!T(UZG_@X&l2PB!hv+-%v35^3b={ybAvr-oYhqZRxjn2TSb(xelu^kk?T zRg}~lx|{8&$sisY?I+03{CK%QJv%(3&c$5xo{CrQ`MBF!8nt?M`CAosA%5`1D^8Mj ziu84k_tL=~J9<}WG{L+NA*WS(s=Yrq9Rp{So{D^5N9SRLt6-p@6sZJIAG^-M1&HcV zTA!onw|fYR1*2sN=oF?~LGoMK###a8#kBZ`yZph;Ky`|HZ->u z6eb3fh!Q zQ)XMT{+J5W+%De@+T0yH zC@40o9i_H+t zl*(zhhq{+1cY8eWsFXQI;Q{~*SHv7Gwyd&UClkYN%A;G6zsi-9PC?YG)F4$P2We{7 zU&W9|k-G`n%Yu=K2q2aCtI!A(yYXXkxg$84W`r?V0}tS0hruCR+1?+E0@D^o+M1MEPfhyairh{j@YGFL6+AtEA(|| zXGs45Nn^;FUk$I;v5qyd85OH5T-;_rsc}yaO>Kk1OKg%ju{NTnGp7md6+|Y3!CCEV@@wCDbNbY1RnP z$T*2B#m0#pZEM{)+(gTWG@3funn{p4mr$*2k0r^reLOrEJP*+EI2<`VNwo_!T2^gA zTT8fg4QooWO&ZMk?t!z)m)Uf#)<&4IU~OSoSjJ(pbq2gk)exrkE-{WkP96q$iKXm5@Kx>J=rIzZ&IyxPwim zW|^jCSdiSC8jA#*FLZ)VCl>W~b@6!IH}FxpR+YlCMo)3klBWA)YxF$vdhiM`( z#qGCs6se4_;yRx}NQpWL?TY|RTo44KE#Wi4(7;L3!U*W|zGrp>yV>o+X;sg&nm~5+PynubC7FF=H=*0ZDGm7+3{7L`|zTO&lWNeseA=I!!T)KgCEPD;Yg<2Ny z(ll<{I@`TwJ&qN!dq9y$CoRh6SCI>{@nmarw-VOPYh&4T)<7A6kSHogXxhvk=5EU> z-Slv#Nm-b>+eQYxrE-UQ+gpe27d6bQ=HeKmkgV!WDl1!aHxeqm*BiVp=Jxa9A~Gnbkv_(qlt@&ERkRy` z{-v$zT zTWZ0Sg$o$BiyUXY+qE$}$N)auD717f7A%XsyXili?BBgSqktHHWkskvSltqas^-Sn|G zq>d0$1xHPvCua%Gd4VYkFTyYN*r3M6-ee0tUqNS>JQf#?Ft2%kVcV z<)r&dWqDSaJIIy#&&b1}W_czdo>FP@T-~fUD%7le!WlduH8rI>X(N4Per_Z8huyVp zMuy^K(haQxfxAJrSQWE*tBpq{)G-t3B`%6~9vg3k{;l`BxQm=}5U(*#6zWYj16bww z?A`gek@XZ3%tb&QU}t}x-v>=Fhgi=Bt|coJ$+Q7mtdcX8*6cqpn?!kxQL1>+F({x{ zejiy9tNvSIwUb3JFPB@K1(KzHJ36lzlT)VF$Ekxv++k<0c+^^Bl$^4N*EP6p4xd{a zq^MrPEdrrs;jO(5w;G8xP)MN0tdhu@Kto6QbC(!8hmDKYx zinhmVF;=*&R?n?m(abYhhNo|F8&Uq^QYsHmu+ z>e>GG_nm=Vn@x6Yss8|@E(Me;O^gp&+2o^la^Td`gQIBlekE1M7-n_o+xO00&!ySL zP!EW=kYcj@gJn-oMioXuTR~uH^Oy5*kw(obf(Z&U5(Tz3r^7^RueolwGGxnv#E`Q& zs{n2_-QHgQTkU+gcY99?>G(kOGcdH8ZmJTwLihU(gP0izR-%fUb^=={E*(5bZz{UVOe@vWC#mee_SJJ>OFgI$Y3LN}PORmWcI&Ctjp@-U+X(bh9rh3VQOq3uXnq>zun#SSUQx91p_gz z=?38=R; zD@iY&IM!r*jl=9e4oo#`!C*&%2`bDMw}-H#-N>D7^w)V?Qxb%!Xu!DCRJ$vurulnG z`s|5^2?22ga!Azzq0vpjEgdNGM7i(Z-My{Hv1*~7ZD2_vM52hTyvcjXb@z9pSiNPE z-utVp{k;pkTr)13G^tKF$r8$l$XY8TUFmG_^LB7`Ji=nkR@GW6##yrZy)q_WrDEe( zs)&!;ys<+~e%>yfYr>ghUeqrq3I6~`Bi!%7m0BxfB-yD}3NA4*thtR<54uK+LhE$vVt7Ov4bK_VluL7W_7zy>f%T( z?aPq7W`XZ=;1(()NEPzsc1G0BjVebctBE~jmOT2ah>Vj06(L=NCl%<*8rQAS$hRv^ zHJ5MVRF#-V9P3aRp%%vL;riRIk{B|&aFTf)d$J~;cPELbMboPw%q(98qmr#lzs<`e zNNn)@C(f42`h^l8fbgyT4h*l3xxu+UC0waHU8zkw+TpB%>5gkqXBGP|r1q z;6}T#qdboqTXv#-exg$oLWwBhqJ|1lpTxIR$t%jYTXpE%P8zZ^VCm!mBLK8UIvZCr z-Edg=lq%1Dr;RgxWS3$lczvEd1gK(|5-(JBl1X5NcDxL$wT1Dm*}J%b^ybu_K-VeS zP!Vo^>jp-@w3r$_P%K(d$jWxE(?>^K(eU3k^{sDZaEqgv6+5*MP6B? ziEM$Hyfw^^fe$oeroJ;2cQD~ShUaCOZTiHr*)8nu;7hn@5IriS02_OCB)OZ@jWGOW zwNaGaT9u(IyWM6td8yvS@Gv!Nqa&yjlP=Puq>~U(vDYdo`Y1eSx^suc2j|nPOt!e|Z!{+X`BS{q`RUwv!g+kKk!%>t*_O13U z@4JYg(lF9GveD&5W>!+Hthaa!ZZB=^-SDiA#$9l@>TVZRfyzvm7k!NR`+r$|*y0V+ z>Z5H1#^Sv_Sz8|2BoV-jOKXF zip&Iiw8)HeJ+gYn^=lqpHx}Fknb^XpR!R~g5)z@9d=rXkggLmnyJa zjXgrEZrY3DGPjw2H0UJY!5zeLh!0~f#%OyyPdh8NMTSg6CYMsd?YWyaR=I++c6WOg z<+r_+R!gmUOnE~yvn>@`?6i_ht-Wkf@ocsC*%D=GP)In4rhqE;;_oH4+Og-d=3VS+ z;*G?DJ$#65B+!Vo;HAXvx6RAh{psYoopH+u)v^K7TDnyqz*`fIB(k(~Jr3JQ3$-|% zUXB$cWwY5CEzG1LXK6*J>-mg-bO$>Dl?+7L;gK95$ zH%0#da^g`XDi|BuyGd>IuW7qjRf>wS?74=;uG^2!#d&_7T|%`>%Al(J!u#Dgl5KmK zG^9{MSQ72ok(G8vSG>8grT30Cw^s%6%23@9;8SwxlgQ({{XD-c5y#7(?kA8kKh;_ zyGD(hI*iJxyV~sIQ71FhOD2)U*GRRr1(I|7?Q+t6x9mjRds zD%A(4@6eJlE}KuJ)Rl?tG6>d&oi-ZVw%?;`wrq<@mE$#`4?=!XX=J$k-qrvIX5W*} z8NVwO=%6*~{yb5IE*opmKa&BNPy_e?z^2h%z1sBd_4)m)akZ*zPKpkkFxqN*4`#R` zV!M2mucMdlw(JLDJfv+SXW;(;YXJiS%qmC6T6!nE{{S@;4-^uGc~r#B+6wy}qLQm* zo_*`u{U$h4JBAwtb}dl;Qf_8!yPQVCu*#vfz2`MfOOl?Ah^?8`vbFeFw4^%6MUZ(G{wA9II=1fK_a`pbE{>@y|uDTk1iLAigOC2nD%s9#=-4Vm?@)X8Li{* zUFYU*GbT9Na@4Fj)v{NJ6jZ&MV8cg7RSO%uPqLrPgb@%Nh7LD&m6VN{{U6y$m@tX+*}bQLxKuar8;z?h`*ZE!k$ zCYqu~jMC-=-$bJ{vTpF#d9ybDBGbU)K`}06aa)zGWr-3XvF5ArQ?qXqHfuEe1+kLn z73(KsB^P!yw#fIc&3CNB@~I^U*`hK8aj-($WPx|Mp33dJlf#pDg%e^(FBG+2g=^DG z(YG(TZINE=o0wK-cbObAC=|AoxuopoMO=ujm3Nl*!|C#e zVjUWzYcve(%9m$}E%4%v-1xy)u`hDi=ET$Kwyg>oE$If`lx*m)n#Z+S`x!98N&q|} z)XkI*x}DYVmoH-LuUss&0063XfT}1LZ>4SDm75U6#bwf>fLKt*7y{0AdezFY_jj!Q zCOxx=f!b$76^%%MUMyMjHgS{jZ)U)cT^mI0T3iVEdP39D{9FS2P?{@r5%FV&>ZK{t zHh%^k^pv)Ql_YH)CeeT*@afmq2gT8j6m59`jg=<{mIuBoXtBK*B~3+@-D|tH<4YE? zR%T%?p_PShE&g9lmM->KnXFIJUxd_|A2VMRY@piNR_+?x>a4!mcbj!z1)zwCz>u*W z1S##lRc;z~QTzUOE>#Q|8UvIci#Ofb#!XA03>ao1S+;ck4%c@Lr?x5HLa40ZsHJJ% z2^!Pp@iXu5LNx2fiQ)tsgO{PVQ9D)srMPPk7kbyX4b0+1GPFw=U>;J3F2Jhmdv3OO zSkY~JaqJB{#F6gE%%lTQmu9h2TbhzcjoGq3))z-Mp`%eDWh+h*b5>iOs}o!Gwl-Gn zcCmi7l*>Ghss_r`Q-^ch(nN^YJIdL3nO6*9sF*l%#?_D%9urib4E8Lwy#%`C)FC9l zw@6>oF~E9{cHhm*`_szga*G;`Rb7QhBtWaf^SvxCk`e_4YzJ*S@-V>Ea*Zt0Mp;Wp z1*_B8GF$Vl#-o^Xcw!o-Dj+qvqe^EL-BxGqFK4xehftzO-=sF${{Xe*z&cGM>YPGC zgJ6v<=r#3>{%30vwy1|~Qh@1AwCjz#%ZJxXA#%~AQOD4cMw_vquTOdKyt+A2buOil z9Mz`MlvCaK@urMKi>WRermbrYp;dspRP=s!t@o|>xy!&w;qgSfpMF;?) z8CZE<(V5%7fveEBM*0((4|V?lH|%PpgYjY7Z6qb7t>h&OR9MYb?c0{@EK!Og0HAzB zU>|5~#X9k~5sxK*i2bybJur=w*}VI35%v%O>iKuNZN)&Z^M4&NStw7Cnt|lYPsPQq zNjpI31L9kT?gLRm_Kw|(HY8{^$`1e4>dLrj zl04!c{}D; z+k>VIvARYSs0Q$TWlWp=M#;sMtl?$eNYe3g(m>ZWPsA_!AB*PVMwB z+Opg%ig;HNLsFp7*J_h?ULCSqy}N5&#Kx9hG;ce^Ahi=~lxE>&tA~lW*25-p4f2Q#yHGSB%z|X1`orLL^m(h$#00)T8y#f&Ap~j+)gBk5gauJ(9a9E34A+Q zQW;g&+>tA?=Vio&5J{zRs%I3aSrS<%)3oLu)WudVFNUs@zX|1*H(1kNguKral!o75 zM%x6m+b;6l*w8;K+8o-~u|H0_0&a_p;c zg(CvWDlD>@Un|pG}`?{ zY8j2ltwuJ}#LE$DmduhTXCUIArAR2p4C{@MZ8W3&mLvst=YJ1#a;Xqlf+3_O(_X#kf3muF`w2tncp)> z+Iu%zT{g8t@m7Qlj0Ja)_FC-MW%IJ)PG6}PF2;?(*vi|NcVzF!^fKVk)`Fy0@<(12 zf&{Iw06KcEZ;5+Ys#T3?LFKPrC+qrSY(4&zIamOCo z5FH~WjNOm+UWjef;?Yiyjuyh*5iW^dfU93qP z66oYwyoq;*Y`i4jShrsTL#{MgTH~_QwEEX7tDC&OZPLAsgpZY^X|Mv|F9ahm3^zgP z-A}8#HxoV@EpLp_i=INv0bcZ(o7H!%UK;LQg_TJXnAL1MTcY${7*@RfYi|8pNxh3$ zDO{jY3tV;*BMPyo>CgWDHL@kvzc7@Gi9c5dMZq4Pn*J@ny)nnAjyX`>vPBKu^5QK+ zXf*t24N-#7*QalL(QE}>iaG!k1Llv0fP8r3K4$pv!O+T7607l_lNyd=s*cj&uA5F+ zGc8SCLdVs!cW>`tJb-aX+wpn72+?VYI=x^jE$LLZGK&$BNC_A>5mu#ML%oWeKDS!-LY_{Ndnkjgjtp}#gGEzFlu!{-XB~&E zZ#M;#WA7TSVPA-TP9o*H*5buHZG3eEqDV=OKQN+ZPU~V`PpG3q7*i9msc4wD22VKU zXyo&-;uZob5`3Y6+5XNGnMTE7h}$xB2@|b6^WAJmsgArkkz`x85*S>S``--)K0ma! z<7VH1KK}smAGVRU)Iuro8=nT70S1)muihB)>_3O%#pkbRr{&;-upN{e#;2yfcJJii zkapK)M#Zk^wS8j+{)P+Ht~)w*)4Dq#dOmI$UV^(Q2F|X(y4+NbnhF81V22wC1dgiK zv_FfAx!5aBofPZ*+-%rwr<71&m#Y!h1$zGMz3eeBYX&DV^7$L9{nf;Y8QCL(%xF3Y zr7o^#Y)d5dl2VljI*CI|Te90*HxsqLi?wDr+7>ib6g2GdRgbJncGZ#d_wc%~)eN;l z!9xYE&n{-m-CS_w24bR$p!Zm{^~-Acsha-Y5w!u#l2j112}|x>s;z3=j+S~^t-B7I z1_p-q0A-8{Sa*bu-PTOG_(^Kv+E!a6p(6Sj4$QtC&BDs7a_?sKxp0!|0NPj!$55ux zb@!BSqut_L!-Z6VF)m6T$smW4<=o4>xvjMkH||!AiH;cZNatY$_5q_&^IztC>=ICSx{;ZO>>vDd6w?(-`VVzm{ll%>{@i2PYrGQ z9){{>!G`i!in9Y+k+iMA=gQVHTbUMCy9a?aULdN+ zri`G_F7n#9e`wvV)*DdlZHkqkVVa*U&mdjJWzD^pwI4gxrM9qQTcHQl#I@d?*_Q6_ zr-?O^YGcy?$v`&0>O5$)D#c64SqhC)DC(`&#k}_VD(6e@8xL!aeL#>FME0#aLmebi zv=W4pNVM%Gv9fqq=UjVcx0>qpIxU4F)G(N!Ct@tGdu+LL+^os#9#&hM*>nQ!RBF

    EEik;XAmH{gHt(K#hQ8`5fTT*FiN|u0RQ>&aPn;KIdh0 zd9Ot!hfCieP954nXHcs5TnGe9#F557D)~c4wGQh z-^ahCUT!Gy+YRLZ3J=V;!E67<8FV@Yfav@Rm|zWPdvM2qgLgk8qwAVMffoHV4%Y3I zieMmV*M*Ek9f**@1;h`KxiP&A44ff>^!~9q@u!1dfgQ@DX6xx(?O$76)pbDho3SGM z9dV&w2f4?U9MsnAAoitR$@w{^uHl+VbH750b7{KWQC*VO+|w6=3#Ik z(Y*;7vX8M4FE2@c)nfK2;tHm=7AWL7WxoJr+()Ms(2am*WB(dvUV_F5XUmYvFXKJ{ z#{IR1R_X!3-F~`m}s)m!Y-J| zHY+f)_dX$vDH^BJCMbU)$9{+CjQs<8{2yQ+ykFk!ju;|%DISQge9{7!=ZWDE@8>y* zsY}nEh%dnl8kv|#ig<+qY6yW%Uhx*|(5zr1Fv9#YlG8r54|SxrxtzgV7xY+P=Ai^T z_8!FLQ^p0(u=gXadhP8Hp1MDRR)$HKc9{7V|0 zZ5-brK6$9~5$9im$RTLNjqQ_G|JsIQ>j>vg zO|;(J`#=5s4`OMT3GZUMf-AtTBj|bjgo)5W9A~}e1ejvHIVXZS#kW4A$}bh zYLG82<+vPQv7Qll%ekLEX#D<`cL9n`LlQpfSDrppPo?bvZJ?FN<157cWlc=Dpo~SkC--{@ zsnP^~tnGCmn@}Hs`lJ76KvbL<5%CASU6smN&VWM%DfE!22iA z1{_UN9#NlQ+>i&;^Fs67D5_C(gP#{v1&F?#R7hx%v<76!UNlGq5M`d=OEqB3KIR=nE( zZ@v~*L$BG+vjK)=E1Fy>ICVJ~-Fy9lw*oSB&uhVJ^izO?g`kLo(S#Ge3aF5`27{Wr zji$)I6fVaA1$&;$c+Dvq4g{_Us9-_BI%v9T0Zbb6SZlBOh-O{2kPgu_`5_JO-{0Gd zefvGOYhpqdN#cOtggQWR_H7K6w0lU${tv@>eBXyqp|C%+_-lEsdL=M$rgHn=_v-_e z5&WXtM0-rVIjGE4Q)jeL$gkM~fv3+L{~S-UW*%WoclG#ozU{XUbjNs^SNDsVyoYA~ z{YSHR5u^jMzD56MwtVHD% zrb2uE=zF9$&+>3-PM0MZc&LE{9>>31CHjRy-c2gsA6f%w!EO6R_^&S-eXntB+peGK zS9Qb-qM7R4jOD4<0;1Ugs84&mc1K@c%vA-QrBZ_qx~^H2Hy&e44G>c(Qp`OQuGHW$ zzL{`r-n;0?tTFR#al-ynM~5#Yb6lmMzjHDZ36r8jf`W?szQ>G7l`zQAw<pbg_wzxcDLoJn2gm9Pm0v@mBf&&JCl`NJhFEM#+9-_^B3K?s6H0w|zi zehC7k4zhbMbblD+JO7?MpkM$0JbF3`r00N0C{X+|u|Ead6IUiP9xY}Z_czDrY+J6H ze2pn#QM|=@sva${1|_*BmnQiy+?NA|6asa*llKBK5mhiKq_~a9#?)s+R@OZ85M_m- z0Icb?1!IbSVACjJOg)ed;OXo&5FJPGDy3g<`zMGr(oup-3|Q(S zDWD_|w1o9%bkFQ7YZmpoz-)AbTHUt@TRDfU9X^j$w%&M^bHO765f9 zwlgJtw#Kgu^iZ~v)z$XZL9!j~T;s))5du7yFxSOBxs2G=vdSy(L9G(rDa(XnMYBH3 z^_IsI2!C#}kH35O&KAsrJPQpCg)|gURtJL{H=w0GlTcqxEpQ!i+v|RmX}>wiNuZ$T zg$w}|i@z2Os>ciO*L;U`E2@L^j}h)r*8B1doD&yXUf2k= z4PDdL+y|>0TPpYENu%l=*gHouO zYod=&Nzd4r7HSSpLc2whFM>5+9MFHGKZP;c3- zd5-V&Ciww*D5d`yY)>8bmnm4>_`(-o`l=5H-T`E8i=!SE#@dX~0%u<^%a+x(ERBFc z1Y{$a3^;C5P}x@015R|l(Is#GJD|+GbLaW#egmo)dsAj!UfaL8&yz;?>jkT0o}F08 z^+^##cPxWsITO0YOKfoI+qLXJwsn6xO==(s->aRj632o{cq1C*FywU>;+>Ev-9dbg z=k)h02Go*ZSzoJ!OEuenPdTSD3%nn5PtIi9SQB@i6Cg$F>gm~$LX@mmU4YtDc74Pk z<_LtpL`W0HRhI(6pMl`u#QUP^5`=2+wCsT#?b+E`)sd#}zkAAwTjoJirfs{JarHmW z6VJ3B-RVhN^9AiTJ^wo}ddUOLr&k_PrL8wR0dH4B|9T0}YsEqr6ZBcN^&h@E`Cm76 zg4+KXY>GJ(D$~C!dH#Gz-uu9Iku0}5B{(4z^Wm59kZWU*(G)PeB7G<+RO!@ z5?Jrc5afU)F+K!H708`V{$g7pUMK-bp+Y^;h&bma9-s>R>Cq&}cjA`;Gd~S<2Cw1) zv$c2dHQ?k^o&0?eeZPJeq%-?2m*Js%$9UWe!cQ)p?G@)x!HgMs!cIws4-`LqOdJ4W z_EoE0RDm=g;2u>OZ5OjL^0)d#JCvg5ak1r3{(6BKirb?P9dABc4X@bipSqJ1zn+AN z1|2@iGo#n%Hlfn0Cb+Hq+HKnLe3XOTH5_P(W25E|#X~%hx*!&@oXN)ypoerQh%w@o zE?nX|35B3;ccgrs>taPD{NgDOM|KZ>x%Exvkhz06cJhgZl66@Qb`%Q`$4Xj~s%Q!l zQ1I~ZCV{yd?*T-vyvGKq7aAe8;r_t^CZvM|>PZPIo2YpJATF#*W z9(BYQtkWnPTs`Ld^URfk$y@&I0{~{on{ebBD5t0Y5x(Ae*6MHRO z4GWjbxyzG96yYCKQsO6^ zfsxBCvz6~s%d|XJU9Lp_>4mV`MHzW{zqbIwu4ePZG6U!jIG>AndH6Z?Zcu8jTVzqJ z8U+*)hkgpmYRpO0talSN0N5u>x8~?sufK`8zwBi70bpN{ED05=(76SbUzcfFQ4e(g zF-tL!{_wB)TCP4)-ohk{?!Q^y<9c^={O@)Z+r+_wfUf$pyNZnWygJZJCY{)mXdQ{7 zIvFxFff_RNPhrZ*|K*SC8@u)8AkNwwElLQUiV66eKlu*uPJiqL|8&i#0BF=^?mwE; z;}nw{|5&%^0mQnT@Q?q%3&?^8MC*5XmXfpOfq1y9SEHBpct}7vi5!*UXx(p)B>k^| zHXZp*w7rGIqlJ)1-M73hM+|(l;-aJ^CRXpYn}2!IF-}JaJu^6H=GX$C&)@TiYjNQP zv!Kg8v)!fW#T$#o%cp7_i-B4)9u+le)_jly}n~VMN zHcp)BAqZ?b=*NFipAJeiD~#$FC++d{W&h28!Lbsg{>ZmyWD=3rX8F2a6!f>WB98#u z9rho{{&!Y!;yM1b^k-BuU;nh`0O4_#Zm3R?z53?gJrxk!f~ff){r#}Qm%BLb3i*nd zcrylnwpIT5exTnt{9y&|Wa-P3LyZP;gh z$B9zgk?blh_^g|n;qKk;HMq-u6rU(mBAZpIpY>=AV2A>jwYrc22Zw$TWeKol$k0La zQ%1h?*+RwJ@lMao_Ri+X*20X*j5>2n@|GZJGd_>ul8*DQTDqca3~LMMNNzf-bi}kK zh#-3KC?obROht6{_TouMN=|Ru&o3=urqUQDm*1ucZr$H^7uEIF<>r2)88*|<)q~qZ z`I76%v?jd8z2GjV@K12hdiUX-p0jr*LA!qKRqL|lCCKU_^2rv=I^>%R7Z(>#f<0Q; zVB^P+>|vfizSjfZ3w!_%sjc|>_2JT`PtVaEvydP7fz~nIimx>Txp4($?a^*^BUJWV zo3MDJ+@x7|5ZZNsq}o=|8^ott3cPozyy(K}Vhh1DQC0MwzD9$+=v2igk)_5RD-2^4CvdP77<`-B90=$Jk(R5ZgY_fUk1< zeBnC(63t9VkL%Ri!Ui#Ud0ikd4Gta$1QF$`%UklHo%ieaMg;}2u(6Ear6f-#v#{~j z*n+J9`YD-uKtLP_sY9zJ1Qv`1z=%;wzpsCfYaK|67n9fChix2qp|qJs7fPNij>Q;v?AcB5Dx?}93Ynn0$~o0hO_OazPkuv%V+QNJ$< zf%|N>nfFbwg)j8*ow#OaM~6{_e|G=S@(=b-d#sBH#=CYjs+0lmk|gvp6eH*p@GLGi zdr#3km~zudr@IjVi^`12>rHldLilVcxQ`QCj)IRREG@fdu_Sm|lGtnX##qSjN>N{K0Vp1Wfl;m%yFleG*-`;t6poD|u^#JL1F!A%Kk~XqIEZDs- z^)FqizX^eV1}|i^-~zsW(_^>z`lp8O#xkN!cE2)f!IdtWjiIS`qA^@P?l4XhK~-ES z>1<{&?H1wl1a`$$H0i$MXCIzhWmF ztU1f8q?pqWT8rnVr>FHTEv3uG@Os709SRig%aciB&2KeHiAC7U6XM0Z;7P0euIw}a zzOa>EAv1#HLJw~{O>!-oZ8s+ho~EC^7V=%;}57I<{l z+R62@vJ>D*Y6wvWWnF=WChyZ7s?P@#yeP9AdW9C+Qw20s@ON-aYzFm*P(p`yTzOAQ zyAK+igBrNTzbnR0na3Zu@@KjLb0*LHInmv3L}vBR7UL>n5Xj0Ry1E_`Hblq8y*$V; zL^t8|_|)ERq#x&~BXurs%2e>Yom43d>-?o>a5!99oqlxfh;OPwN<+c)p)2=;j4$n9>h)A-=&W!qVH?e5 ztc1T^Sh;^U&i%0or|Kmsl^_Ec{duSJhPEbD6w>`ZgtM`-ysAu$?M4Svs#+NVVwa*_ zMF=sn{X%99jZ#ZKHM2<;7qb$x<8=o12kh_fH#W+Xztm6BjQi;Lx!gvVUocEXPim>; zjShnT*^nuVgC6IyP=S!A&ERhF(fJD(8iUCB8qUdaQ^!t!{%lHomHdN<#L&efq>Ypj z-urnq(d061TWb2T*z&((+TUUf92Yo{8^I^88_#=;dwYqOHPX8$4`NAg_FhMO+VhHW z7n)22!#=N>ZSR0I-YDh)vXXpmm&5Xo)#M&|s|h>?4nHc`Jj*AlJDNyALqjuEh2JPp zTPUR{FE6k9j!xyQ6g%jO=O*c)R+?k!UvCq`7p@9o|;PEn=;`qWKTCF{_!ds#PYt8+>7v7?DyaC zsbv*o=OAwst2T_HS6y2OzMH5VN{40!Q=nHl7-Hjnz0%nrmRiWBh(&;SuXKafug7hu z=lvmjLby^yGVukeg+y2D)N2X@w^VOS5xj(PXT%|bizJ3NE9g96I?wU?MI{(y!gpFV z#S7osb8KH7oFNu#6Omzful%uvkBu!3I9I><{L-2>I$pa`YugO0b{~dy4Z#BAHE}nm zJ)h<5H)eMP(2oAZxR*w0I{O=QERmb%V+dPJk$p5PCR^}%nK_c`ztlDV9G3yU-?5BjYXEQYGDPF&)`N13~Of#2kY5%Fdrvb`Qc}uHnDU1RH}3L&!!}(A7;E# zp$wQzTJ+>*5xAO8M?3Azp?VaJr4**5;jQ76OuTh}X;F)V?0LlFowda0YPXt`dRAF@ zt8Vyu>=z};-Sl zat?;9adc|vS*kp=*ZbFGd7rk%t`66K@n>Y)ej!6LJbP1VBNJuck?3QZ_ zVLe^8wdOF4%k9m2iX9b)!GH zS-*9sP)Y$yc)L~VKAqqmwLVMG9iJE2WKkG+tTN#Z;viE_?oGS+hHBo}A!@w8K~ifM zKfJxY+gy3ccx0h!Bv^p6`Uq$B=q}dfz-J|^3o93C8dLS{8GTcx#50tV=o9!7d(`aw z@8|~f1C=3IqKgMtcCrR@R{Et8zSv~kA&0vv+2bass@i`p_4H;y*~wI{8F@#EHhtsy z)o9}MEH&E2wWUDnIP%HQhEOiXuey5mY6l28R0voqd*@UYVI${-oi(sDw4a1_ic(`@ zKhMU;#(QqWZUW9MhVO1jPw(SzaG=S2hE+GP-*1Z2&SvNe6JB6oYZ6R8>O}-K4A+d) zHbaj|4GDmVi~D->Em^9V%OnUfot_gS-Ib! z5V^w-LgI#|8N=>&(^B6#VS&Oc;<~v~ed+)Y#8{;Vi0%GZPvEY1cS@5|-Jn;Y4d{=I zVZuXL!^*#jT@r!X2_*>O1Qx}Zc_7Nhb6y=CdhNhz`d?X?zhDF?XU2lj=n{=@(mLfl z#Nl#Lo!L8SjDib=uWqQg-lI;~PrU-u%b@dpB7(IZ4pKDcx2BccbopFD{HMGOSbc+o zkv{(J22l(Wt|IarzWxSADRA|TnQxwixQwlKhyV$MJxw>> zL6G6SyJ=Enk~}&>DzZ}gX8O2y0IF`deSKXroRBTe(Hmx^V5IlF1?_LdWrjY1%~C0{upB_)3m+#N+3 z;I$hrS}CN85wIkMwTFd;<&*qwl37N1;p)qS^NWMQ$;r$$;n&1BG!I2t@$p5)HwzaP zd3>H4ymeqzV5bZd0PT+gC2w8t(FxKAO$D$TRr%a(cH(jmk7-`Ch$$xzLn;PN0q&#y z&p2KQ4I*auy)xP)`-v^vW=TidTlrWL4a7H$)4ue)Tkm1Z-%j{}9AR6bDgb+UwHki_ zm45iYCP=C@KsO`0dLmBIY+P_^O9F+KoO`N?#+j_U<}!Bn8$&%>-{*_L#Mh;g<6CHa zEet-<>0H~nD$2z=qNF?3yUe(eC=q4qXvvPb_R{Jq=7W~9hyDIA+)R_q+nA`^EsSC# zEx{K@f_XSG@!CHPG<>pLX7PzIn93-BqoYW#ovE7LveZ|kr-VTrVOU`5?97>S1;1F( zj6Ru5l~zjSxxqKOBX2DBW?MyXU7j3gb~ZNra*t`}Y-{{h;-_~55v;o{o9B%*Tj>=u z2MnU<+pPr!Q=}_lT=%M{jF4KaE@~8UFbahZH#>nQ3ozix$;s5B|9qJP9^}SqHx(IsJJ|W`=dZ zm~d->xd>^BVz5A~ooEm27HG6xDIK&v5`LMq0wB5O{OjhIGFT%`7$O^?BD}?S?&zDF zGv-|H8{{H4p4xO|zPT`3k|rXKkRq(~?=-MVfLjpeWqCbHTqX5GXyj>rBvVgN0#Kti(6fcug zqg`{kw0WCy<@sqK$K6goA{JSEK@iHQkwibB830eOTka+!i3uinjeTpwJ!`Eb?Tlh} zQ}{q{uaaH9gNA9=q@r#{os!;y9u6mjCPDG`&DVbw>;}M&s%1ENxtj2;@BPz2G1$-P zAPCRD^8Vm$X*I@&hag85NT6gSxM%OuT0G2(?ZAij8t^$77@t)0fSwpga9iIciN@JnN~hJQp5Pc2-eNu?=7m(OjxBSZu( zZMe#`zhxZpsLXQg3RuiS*vL~miptxRM*D~zMT{G=Kc;dU{cecwN2R@aRLtR|# zxXyy>XEo>e#+Fr-P2{xLHt0czAR_*17NoC+4+%$nFv3C;F1RPB9 z?g;R3TuwKkovz$Iqrjnp6(EhV|FWr$F(M&Xo6uJ3@ZsRqm(;1mf&m#F&6xcot>2r! zbbbA*>?!kbBx|@k!NbL5Zh73xW^bq5=8~BJ?m2}8p)WxKI~A6EwbW*)*+nl7kXs9e zb@V(v`3K2%mWTd5`}xbcb%Q@JJF)m$OKr2XUzRM^QdIhw-cHjYU?3^r8J3NPQ0Q{Z zz2~@ORc_q>+}$7|$!dC}p>QO){Rz+0b5AGXK==Ms!iyE39k+;INwXDS-WzKID=^Jm zjM|dV_(4=`bRdD>lk(GHVEEhfe`~&a4oC}DRitt`fCA8$d z=hPW}YQqV^bLrPa7kC${YO~(@td!hd{p6n0l%#0m?HS#?nq6AS!owQf&Y+?(Tx=tc zg|*U=ATKNDyEeWtW_eH#_eV;yODM?6+eMVk;-82e7NWqi@R_)bh(6p3%Qr?=N1TMv zdXfC@f6vb*eFXf1=2tY_VlEAOI+fjpg@ubstm$_yH?n8h%V)3m;_YTo$ztHPw`L21 zb}Z`$B}@U<+&h$_f~Ca50XIBE5Tkhe`v*ngvb;jl2Yo;N4+yVtGYsLK<4IQ5i1G?J z%TY$E5n{|b{1 zg8?Vgp@V+16f;4y%ER`#+(&#ljQFC$e9mN)EiDqoEbV%pf&@(X2*Y!QQ(a%aC=jOK zhsS`Ac=&y&$oI2LFYc1f=Iiq=Qiw!X(6+CA3JPce;5lL z+6%xOhRbI+3wLT^(f8+ERdF|rWnKuE^Ct*|Ii*%y_c3Rb4ZmsUy~t;sAvim`sGr;? zSxS<_EGutx*xu3c%-`$6)W#6A4ExnakvpvT5e?!Si~3-#CFb2!^IVA%jwKT?ma#nQ z_STfG3^!eB^D?dG<8q*-6qW?d2&X11_KW`pCc!x`130lG(ah4`0DEyaa?9I{hrfkw zqGoRWDdx&jPV-UGy4>_UmZ<*01VX?gdu7Dzi-FW>6>s0x2|5zmok1iJ z<1?DviHna99OMnrzO&86t%bv4bT{R*54ia+&q%kps; z+70z@%_x7!ypnUF-{83^=kjxS^?ut1~a=UfQCB0PDK4|6v-V7{lW`l+|zi zasjSI^OPFmhL#?H_)3&co6*qpRcx;#Zz71`xv_yMeIQfmZKJap-J#tq6gig>R0aF1 ziht|^P}H^2C8nH+QBfzkdey)GxwCIc8FXObIn1<02e;SwC8a0w;y>5YAV2Z~4y)hI z>dO!!7exW%s!@c8L6@nXE&X{LT-d|wo!6opR;v7 z7}%Pf%h6Z|m_2Y&I`>w%Rnt<_kwFG)1$&}T1dQ0!z<>?&vWms#_8G++zM=aE2J;zQ z36!Gu3hhc`8t!F9?yD=gEVvin!Hjq{RhWQ(kA$M*3CxV*u#gvqfrmKL4SEBF#8M*+ z>*3kh2ub%xx)INMoQ^>j4@zokY9%erzot>(w;;6s0h7wu*g%VxuUIjWULeb6&|^B_L3XsRCy#H1G8E91l}rlp^pe3$*Gi zQ!+~V0LUQmBy%h**-jas4_W@)WyJIWXOn6R%@xhA`FM%U`|Br^$VoFZhpP~^C8V(^ z=5n$VZ-gk|@UvS7bD;%ica6#fydm z8^)4F_@;kwf>O@C>uFJv62>)zp5kH-mmuu1sjDeHsmU-GSOHnD zJLZg-8*{lG1oh;Eb>LeQm)r>TQ~1q?|CNn6Rs$Er&^a4Or8wP39QJ{e^m1fNm0eAt zb<;C!0Y36P4g+q?jDb#>IgWkCW?Ok1-P|s`-DZ9{0IJ?MaB4htg*kB^;r$fy@lU6v zE8dpLm2+H6@My$&wAH8{NN+uHkXm5Uxw@?WoWM7m^;!5p)|jVOC)X=!27Uae5?OCN zT-LC4Yrp4Au2TM>%E2u$oC{LFY(_OAI2*MBqOjrObbwV`;i5uORx^ILCl(Ik2=+Kj zC_#vYB}(Ng2{9R1GPVBL(u2j<4lrt_DcGpEeB(eoi0aCw6S`Pwmne@hDRc&5qxdufOrNzY!n`MCvm3xMo-G z!DF`^scVW0%;Rib5h*Q4Pv>rzSh8E>5_LWpZM#*xS7O)P9MaKH=uU|#fKb6&O}pXa z;eOyDil961;0~f}0*siL;{6k0upt*S47dnT$z@;gQ5T>`d(ks$Ug)*sZRcTaeT#3d zzDaBkbO=f7>^|Nc+LYK%sv9RpB_YGW;hlto#Um7iphH`1F*sG)t56iGT#{_)U*zIS5jIh}+FFg7w=>up;tJbz8~SieYC97V zQ&3=oZ1|v`9ne7q*_-jd??n6uhY@DBJ*R&aF5!HGuZCDJtJP%D?Ddun##iPDtNRDo zW8U+{Eh75C9A=u{(!gF^w z^=}Cl=@>{NG)Hyj3(9QczkfF7Ri%!JPQdnC%9E zbB$_3V&Q)mJpEm;F(m*<NZ9+}+rl6Q}JRfD*#b3hSXW%n#bQ3j- zG&ZJgw}};A>302bZl$}sn}xVhYyqeC;|%k~QY{ndRiQLC#ZC2K?rPRm$1k?UG;(Fg zj|lPJ1J9QElvu{+Phe=3Fm5Lu0}P()FFeq{;M zYa?`ED7BnRd8!bRgJCuQTpOvr;#JVo)1w;9rKO|$i;C^<&JRrb&j$h%IGHjfqw#M$ zF?e4D8k{$=GrN^rT;Zah!%*Jusly-;E#&I#;G$ud;6tPFF8Qg6kHU+egX&U<<~nHv z{+=1x$5$eOy^GC{2opFu?#U}X&MfaE-Oo9sK4f*t7ApZC>$da&&1a~&S9Y*}P%KC? zD9EnfH#$AV+{450%+c20k8p%?G}bD-O?Hk${wSo^U@2Xie9HU1o1dUrevspQ1&%-` za-{q-8N4RKA0#VIlluQl0e0eae;hMO7F4|hOD!B|I*XC&Sf9zDR($nAWWoJ@Run54{#j}{9H z`&r)fgJKp%Z>%fZtx1YmFhTAh*O{kdTqq)eIo1TD2D%k#pM0CucnKy5Oxc}=aoz1$E+R+Lep*L=p?X5Ap zTs9dFs`fe#q$5yy)3pfN40(_IUjAjV<=F?y zl7zs**r5u!k%jF3y?fHHS2ff#O#Ffj3a`nJCSG!$bS>lE8qKXQ%n~HAF*+N0<-j5^p+`o43QG zqAWCchwV+LhxG{@uyM}ZOc+DQZf||06k^IN*i+|TVM&zo!GR8=n z%%}|&(IX^~O^ZllY&rcg# zC-3@7ZMskL4ly3$1snPG3KbS9metzIVKMk<8m?~$BUo3L)7(dVm^j`LwvLm!WJrc7 zZTicOAVih!$x2!x5jPX0pl|=gj*_3pf@yfty-!=s-{n@Iy#yPq+=F46v;wdsS~SNV zXG3kAi6(Np`y+vpWz&L3_u+AUos-grx`y#VTq1mCe5KMVp;PMSc32CSb1oZAe@MuP zl;Te)!|`>A8Q}`Pt(|HY-?-ZKxK$@H+@ix$jcR)R67qJ;T!qrg-EjEQa z;{nmzM~;X3DO#4NC%69TsT~VJq2N3ALt_3${v4R>m3VA=ZB`${&p2e*Cj#E@7Z*f%a#bg$q=pt8uQsQC0xqp2cWI1 z9BRtB)ifutxmhUH?MUCb+v+e?*?;6-Jd1I!-O!>=hWAb&!%edYDM5}~vDsHR81WEo zceB=>N>8WxkZb(3`I@Dzg4vY72g{4)} z-UZtiKJ}3bEi&Ci#BFw!MPY=%w_&$Ycpj(L;c2pqGyT@^pb>>8K1mAo3JDdEk0Vh`(z$gUFr^5oHG40tWPb_Q%i{j1me%TD_ zB|>vkmX;#(L3oe0UK2IZt_?uGLP zt`rH&VN5iwXu4l*I>LSSm-yA3z%2qLF%=v9@CFuI=r!4nit{5(bG{6F-5lOJ; ztt6dFj~T5pI0I|shdU@R>5NofkzANA6bpTwtG#bBS;O9ePa@@TyVb65mayej{0b$6 z#7vc-+yx@imMb>i>R|?KdqU(oqogYu~4`-zjf9gkccJku&s|B=p`2IBF`o_Z3aH7Yk zxAmsZ89Xl3w=4+m%&T51x^0oi-|3xKx|_Y2E!f{*KHNXiuTyIMb7X({b+t-H!V&f3 zGbrd(|NIYrwzCalIDvIIjof{ywL3cErEz8xgOyLjG4PBk8^ohW_L6RUl~3}LrDGGKtj&Z80fjx64Yus|+=B05YWv64$l-~5k{q`--vhRl zxv0l@yQ&^$-bjRsD|`knix2$8Kas>j;>=m-FaE_p!01|o7@e27*ZH}!Iazk>gEU$A zYr(kbCDC`p6Z7Zol4uczjvvpaRA%G$V_zN%c>3T?%e&RLFS3VKT>Sj47cw8ej3ESt zF}0t?f}hM?Dh{dSr0wotrLa2rZ2Paqn&ktNhMJlC426~gS zTMg<{waDEm9XGn{@rKfL#YRk#P%~0>pfgV~Wk-EjOP_vhkX40Ls@N%*J%Oy`)sC^I zw0mFRV1BEX*$}V$61Lf>*4>mE)2GTa&P{v6k1zxf&I}ddRcW_}Y`vC7KIq%J&2Oa% z`jObdp?7UNf!0Ant9gsS-AMG`T+y%lgezf!H9j2u1^SdW8{Kw^J5`5^785HYds1@@ z^C_u}S2#c;fmwTI&Un=QkZGeONB4R257}*v7JBO}uLuJ{n(d`)yes%#zV~kqSA@1w zJah5XjeKfs-DO>s$k)|(>kVmT?R$2MeBn^aPL*iW9PZ~o>=EHVX4pXVkL<4n-gbJWkGPU~`b6P)rD`N}I+8?_V)9;C) z9@s{YPiud$fhW#iSWMZhYBA=E7xaF-w9ZGKpF+l%6U z@RlcT%t8b2%3)bz=g3VbxeV>P3<0&D?7kvAm2=WdiUXNC63IovsST49lFzdHMPk~P zdfNN@4nGGGFbIFWP!%yuuh6GPYft22zV9|lzcNPc)JvSuWw=xrz=lZ0>A1piOXX|W zr(`-GfBSf~?5Ky*oJc%1UkApKyse*6kPE$o@3el((LKMI9Q>ZlK_%h%F~4{a2~=x2Az z>sA@NCt1$q=-5vqABzwURm+uj$|hLcV9opZ5jKK$%`AsR#lT)X%@MJC zZw~uz|2Kul?{#=*zcG-Y74+Gsa0EGVxvVpcC!1W*j2T(bTsKy71pB%sXfz9xATBX%G>fB5B5x&LZ}EE_8~o6_>()>bKJtI z6~Rx~u$$-QW)y-)EL@yY@$=1y7N`_s4PjL8&t5|dg^h~0w&-6APJJ`(HO^kyAT8*G zd#|0K-tKeeN*@K;fZe?eX8iK*J&Lk{ANc{{14(lf4J&MU8QsGp91FZg;iDaN*bHv9 z31b4oD%?$B#;O;e zT!Qk9-cvLTAI;7pmF$jIFAzQZMjhJqI|MjKlWX^ z)rx(2?Ir(j+<_u@7^UzlAE{Sml99mLDFrBPDqn>-@HzFp(BMr^3dz~3h=0O`jXA@B zp2mdmS~Q3Ya!*SpnA@lywioz_&uM;6$}PP2ezx-0!s4Qr!_#JQDP0MC_rB7HJe5z# zh-ZpW{ASDWb$UJI5xrJKNYAxmGv2BydD8H256Y<#!~Qqkh`(*C$%B18qo~-voxOi{ zLz9|eO&ZOwZ`Kx072JqCyX|FQ3);`*LtXw=5=Q=?KcgZuv6P<@(~>F!^BwfUxIVde zL>Iit!p+KWIfgMF(q9LGrBsA1ps&QLkCQTz)oouY+%Ur*|C(m@8;XpVL9R#_aux&mr~HM0#7LS{c=hJ>W-A zS&tM3MWub$D*W|uP<(@cd7mnIPpzf&ISV+(4US?BUS64Sqa!PXfKt@^2->n+7CP-S zJcJIVn|v1LDnjPUXbgGH5wGKfz8RH1H!d8caMD>bn|+Y2dA)d)AO^LYIMsS^;$0f+ z_v54!4Pq(hrh{Ss{{p~H2AVS{v2rU1cHr|bVXY6He+i9)fCwHW}O74=zmtIY_ zX1?%vv;&CI1IcIYCbmAeI;I>v-DH;(3TfmspB{_N(44V%ysdJmI3apCR^TykD?L`e zyuaiUDmlq}a%>wjZHh+n5=82Oy<9$3!P5xrlE%h2dRn0NWg4aB)p2@Z1Q;# zUqL`rvg-7^`JdkAdK3nLQQH?L(A0YL$qg0^SyosYbByCVdDtX-(cEHkRY}Wx3pFHW zpYR=&gT(OsJ;Af0n=8D#ufK6%d`=S%3W~_y-ZREH$}9>A9tU-BzP-HeK0Wz%?#J~s zAC5;CRs{DhRHzUzv`>~v2hJUsS$1Y^>=*V{Uih}^wttu0^Bc~SjI-6L*yH<2N)v;v zC2s9Yy(P{rZ#aI!sI%HUxLiooaALKjH{t{b2e&B3h)o2yUaX(d5w13>)%AW#10~1$ zb9meqQJLQo@htzRGynF^ZOwtFXQbHIKO|?nnn1BCh=thme{)c~wr1$vDftt(MEWv= z@bn^vNA+dIaCq4Bg$m5?^TVBF1^1Jww>4ICf>+%tw5IpwIWbG4EtoqMgZgxIZ7MKM3yrLw@wSjlOvv4+~tUinBm%yK}!yY8pJImwL*zPG4 z5nlDEbgoaJb1grDu9xbD$a}eudM;JO+;&=S>D4SbN~dh>kH)8HgW*aY!qkE@z!K`1l9B5- zsbwRwhV&91)s+$HUWURjr}~%N2!9}gU-#?iLH^CZs;4OY!RdO7_=u(fI4L2Ld`sc= z`GRiH_}Vv~#c`aaFLDUn&OB0&P(IVJ;FPO5BJ%0M^dD&)6zI{+M@8WYni2javNL*23S?kM*XpgrZ1!Mcs2KCkUx2+EF7Jk+#2)W?dQmRLBe~ zIMq$biDuVwaM^f)qwhw025D*QPWz*eF1^tn4>7KPkK&Y1tzF96l{L$9)aJ&W+0m>% z2lH`|(9u|>3Jo&bgWY=S#*cq&G_ z(~`~nXKMzrzV<{BWu3%y%S~Z9OgF29PTeKPEqtHzTQcK4RsHYse&h(0b?yZqtG`MV zlRO!*C{qU^2h||xZ+?qZ|M$PHn*y{mCb=uPK1E%vF307bShAs=VWRb_RnkxY8~$@M z8uonQubg9>ePxuN;4z$*VJKX;+I!W0E(9e^)Q%KZVXCqBnDHqe^hH`bRcMobGQ+3H zevN4qvZdTE?Gx4m>!)dU8HR~eby+g}2$PVX-d&rZs6H6e5=`Us^6pIBALO+F=Y+9! zk4n)Y&_gMBRm0PjiZwYS8dVn5E@#X31&($OQuZc!-i7M@&$w`>knL5%QSNtb|9?o8 z5Fphn5llZEAMEqYC_Plh{>fkb^e#ox0rgxNl>*V3p3i1KMBp)Oi=OzUL2{oSVqbAf z!bh?Hf26&4RFhfPKCCFBQ9%SmAb^6>oAlm0NDsXTh&1WFi;74G0Vz_1P(u^xH7Y`o z-cca*jsXIM&;s8L&b%}8Jl`LW^II#+71s*MefHVq+SlG^pUZ_v69Xp5y{*{jAwkJ` zkgP`uYg9f=sz_Ok*?8VL_Lb5MIMZ7($R)^uIceT!2fm(?Uy{n=xW10?Z|>XlF`i73 zHz18GiYs#qjTpE?c){n=#^lIS&($i2vBQr0RYsIh5k0>t^(Uw#nCs4VVOY;#pUL42ROI?+p0m98kQD$`$G+T@zpDX& zelT7E!nx97Bv!93We6UVw1XVxbs`s z&4qB{+;UQuRRuMYoatdyMa>_2@0@`T))(uPx5}<4=qLCMw(J+GpLPKhVW&?#Ak?^C zEOFh%BU=kUT*o9oO*XQ6$DLSi&N8>!H{)<_C9u}eCyQTYzWpJTMamKSP@n=9u>34) z^=_e|I(fkg3u*3?`b1X+Bt02#co)UJs$Ni;jGJxR6N(4#1j-xWs$^l+)gJ2Rxd19< zup)-`3qqK*1RRi*l^MJVPSj!8JwwdP0g}+TE#pz)gHX5|L<+|8aJhSKB~fqOR}!K* zAhTholK_*Q$E;d$*IB&)X%#i6o!R))G)Yi#hWYTzu%2h~XC&fpNg)_v2srpFv_WR1 zjAFX@8OQZem3#ILRp&F0?Yy5B;bFW!PAF06jTX|=n)I*eOC@txXMWe#P?+QNBug}C zyXROv^N#egYivP!r$^8MWKXShsZa_2VLdsV2(y&Y7_>eJ=_e@Z{+3-y6ZQ7XLj#z6 zC3aYEE(X4ngl?4xy3a7Dd33k|M(pn1%NrCu9up}=#P^D4!pL&X*H}WHH`174748SQ zFOVH(H2anyr4LpW3*qazx8Ef7#mtbH-Pgysda*YueP$~8exB9fAhf3f<)xs?vDO$+ zJ~WK{aTfCV)qo|o1w3ZE{ad0r{qQ3he1HrGna2C-MxZHao?n|ayR|xJyoBKplSjOiO;v{!Fr_k#}b8RT`DJBdDATG z8O8&OX;#^YX_-Z`-v>bJMF0Rg?C=?Qq*k}1L>}r5ex`XtK`OZ`I9oa^dB~!0&5)gJ z+C{%DI5pvergqY@K}eWRV*WB>JFTcPdaO2GVXgYJEonf5<3z%iEy7kbrmeU_3EV}$ zOG>xJnVCe-3Gs-hOpnZqiyGt7d(v&`m35)f~TXUDul?3t!hu4n0y;`vvvXPoWvz@tx8is_ji~5Sk%TXNjv7CiNO?5x*kt?1JbwV&CBYci-6aSk9P>P=rzsIr3C z03=cW4!AmSaMxgb(rn!2NY!s$QhT%q60IXv(BIsJwd8--8W>3L$lrkOn7r5Q54}!e z;v-|8Ek}`*G*@dNK|Q=z1PxjPy4RufjCnBK}s;5_w*b;~IiI&OlT?`MxpqZe%MyyL<*-Sqord!v>rIDX)c7tbY! z7-AbJ4{DTuU@*|(-7eYlOT*z{E@j{zX=zQn(FG^nma9Bib(Dd(h?tC0(J}i0hVbSV{1r-sOm4St1RSty=^yoY)%}%`ap?GC*(5EW#j0!t`=h~c|Y*BZ>L7?5#p229@N)b_A`C-{1-~K_$ ztfb?Iv5zwtyDJ8~D~;4}C3_oQZ4!*Pg4h#HHqO@FVU~Z~*9<$XFdf;-o$HGuTVp&DD~t{?`>Td3s&9*OOU;lqmt~tAe)BxoX-KkS2UsghfE;W}^PAfJH2?l)IqWkzU(m#QjZAaF) z*;lMo%!9rgQ+qJM1k?kIIQX9YIpnW z;w|>uruu)XG5!3CM#}O_qDs&E?}5qm(r%US)uuR%u(oRpmDe}GaF@={&p_M7)rOFW zOGzzHy5B2nx)o@C%`y}-5h3F7kEt((+K%}s*l9;A#0|&SHdayVW!8e#O&ap-WCy)h zR|j2`fUYf=R;3s+`b%(jVBOlN345KG%ZujzIE9u@`65bws^+!ni4B3Lp7S1|^n^Xs zvBECFFFYNRSNpDW{H_c7zjd7pzRChPqw@hD9Bw^{eJ#`}s?b2IJOq|_|1K@tV09ui zj2l1M*n+q*_=zbE7YE(4@*%GKB4eP- zVw2;BD&1-6ed;t#(DmkD_^?0xcCgyB4q3({UGG{C!UJECj4FvygC=na0E(NLO<8#V z?8VvPe5RTu_?7X2J6?T?8eaBE$7Ng1k0CT$Wf4kVY4UMBD#%XvQt>fX6b+oVLW0GW zw>C*`kW<=$F9n{*o?gN^8i5&{!Bq?WB8@mcNSd*4;KAw2{zYE-3Am+7)%^>CtLx*n zUhVC1$26{XTSr<-``CJ0(l0K?3J6a($ou07uUAVZB-El`9wiK4m~z{*uJS2%Y;10N zA)A;jT?Hc^{-LieqQk2;lg%wJe5+h1Q`!WH2+nSHf>YAIBW19=Ah!|-bP&LY>*L3E zsP(Ses}thKs@b^ZTunOL)|jkMGZIv^GUrSw{?exZ^<8iS;B(xQ9=#iyFwZpiw65J1 zpBm&-+Rw;!)CF-u9@2Pd%R-VsB(_dtVj)pF1_i5Il7NK;Od=4*X2wmg&p7gP!Gj>c zC@Zxp|3!Izdh(QucsKQ8O-yyk`|a^TGKEPiV&;8ik%`%Z0xf2`jUsE42Huxr7)bEZ z#9Z&NXWoZC}LK6BY$Rg^EhvN3K zuCQ+juF=bvkU7pU&MmF4|SxNs`d=~w_I%YfG+*yLR6?ICNn&+&4C-X5GT~ZCL;OrK2 zx~LR>YLhso1t{-s`;u?pMj-XlNB_saHt7+Q^9XS3%I1o*YR|v%DAS$*dZ>B#bb>n6 zO-n;TK@Wvqx05BPu==^qZdv}+qVN_01c<9$(D&-=uG00+$?Y@Qt^UM*hgU;@0f)>n z%7>~~pxOF4HwH?YTso4O*m zL*|7w3uyws9);E-&?Nz;Gh~{nXrO&yq=RM*+D+W6G24UT$!#YKK)*++tbE_y3D^0k zh`ZWkme*9s8WOsQ9rH z>aw`m5XEwRWx2!_MoQz##?(u8ePCTfqssW}XX)XWsdTHtF~}DoFc1VpWmPUvWT69V z(|pS+o3k9NLYb==YautBaiI5rdg(!r$Ib>Dds|{SOKuw2U3tx&YnkSzB(@Ji@~d)Y z%nM{&CY9<5*fTQnx15G|-|0dsH)Uw145&xJTg?Tl$-O2HHib7dDF{GUyNwSN$({{x z3JmB1JrF`%IStH_^9;;fIjb_t*K?mssiE1ZL9SwS5_*d5WbXz-*6W~N$kc8sh$76g zaJN&LXNBgB_dbW`b@M%Uf4aN(zX@YF5+DR*FZ5Fiw#asE9(_Hp=Ko$;@`ZIu>Bwjc zZC(#}WJMqeLt(s|FHSRtRP`%qa-C+}(w8TW$cw~10W#teM_I`20d2krZtaL{-S-8$ zG>T$*H1A6!d0A?*+d);XA$J$Tz>)(mqe2g40DC>!gO#2J&gUN&`SGf zecW0`Wkpl|t7GywPgjpHk2}l|0p+4~KNs24tj;?PU+uVJ&P8_y`7abo&;ZE!mH5@> zcgDGf2IVad7=;PPa;;m|q$)nIaH$R*-`J^Fhia%bFk2*)ZRA(*E5*}{7TpHAgS88g zS|1dnn*4`>!BvQv4C&x(&8}1>5J^?ZcyinYm4~J_98m?UvdR}$L}R4MUb{jVDPuTG zzQ~rk>abTfT#jQ%9&WWG3u#eEwH;z=Oo$ri(k%U;nI{?dLe?a?-D$LJxarWX?6{BU zB@u0o-kQ@jsNPz+Ry@7gh_3>TbJ(emc3!{u%6!bNM)20l|IKCpC;m-fd(Vrj zBAy-u>W@LstuvgN~fEO-j|M9a)6IL#7vNdA9b4Fzn+UGqZ<&DP-=Fu8+r?qW{5dYQVU};Gd6Ash}l7K|# zk9%bESHUjw85CMW3qESHS*EQ5|H)mIm5bzUB~ed8S1&~*t3jjyy3ekZ3_?g|Ri0KP zm9CJx<^%qE?4NgqrF-boYhy&absRh9R^s9&q|l8Z$+)h)VX7SIar^0d!{kvkj{j)J zxn>2#yz+BdF_iak{TD&>oACZm)+Cx3(3EQ(NTjl6Qp1|7(VBVA6YoHymYT+X1&0Gm zqZP%wS){AXT78Uea&$P?O~o2Zdq1#V1{l#({amu+3WT78Xk{7q2rkA!9!?PDPOoR9 z_>E?xRDS-PcKAi+4+(3=7h#OLxkB^)GD`Hd}5qu=CpHxKbIentM|U2chzJ<^A^V-C=QNZqxYWw%b{`^nXRk z|M9tCh10Ujy%W{K)Zr0?g}+R$|0^Swjls#=)yiI%1jZ=E0}zniNeN6N67fk<;W&Dr-oz?+jShSUv#eD}HaL(*;=VqgW z&oTVKq!AlT9U4!%+fS{>klH(TPK)Ov0pMLe1g)6gQmzAz>m&}d!o!#di(EC&Dj#`SU6-!Gv4`TM6$ zgr^Ry2+buo$oOQ^wJTnvBz)WE8X4BWsfD9{+`ZSofoM4g+B}a8EI9GaaUWq~)+xLf zCnH_s$Yv>21l6!~%^c6rrm^Vxll`y%0t1}GJ8R~94vDuz zRZ6`7*&z~I?RQ+a@cEqWGG>nu3xv&U3BK3r<*J*P=>^406;>4tXn8O$*YC!x(KeTo zF>RG0jUv8JnFz}b;m3V+-_fseEM6*v(XBG*nV~Y6Suw*UBZ*;+M>_EK`_Wo@5@t>g zYZ%NX8!a%21P9$+>8B;7y%auDL>2w@l+}IqDeua)6L`SjNRRKo2>0Lb0pS1Lj%`z` z;oboicY8@7r+F-xG0Dg4Lk6Ls#=rH#G3>#Oyc39R4c|;HEjxp#7u0| zAPk0+0IjW*f$%P;UicX8FBC2fOu}slg8J+SMhzRS$WuyV#~a*g6!q-gf^M1GVw-(` z;wj=hzZbzfiBG-Wvql=w--s|)3nMQOJRXbfLZJdqCKp%z z@+~D2qa7f0AX-L#WV`u4diFo#J|GbAX)}NVZJQXUT-`fT z(t&b~<0qmVmi2sLR>MrHyy0?UVOk1^(4$7@!(C;H3jgiw zs1#y8CPzX%6fK{CDC(AAzOgN`P+Qrm$Py(QqrvjAXsvOyB5<7#4Loy6#D_{8)f?~= zxLN%h$tfBO?gk3;l|M3RixmEPsQ>ZZcQ9b=SagMOU0_c--hRrm&Ldl7xmE2QXQb-s{&h z^SK|mRCX*lxmN^#-S2%fDQ_d3dul{*(0g<|Gn#zzIPHtHb~kN#a{e`~aIyuM-TVWE z)RS-NbfvbL(G{c|EM&@By3E~6xp_WKmr6`)8oK-M!9zE8s$yQTAA0(ZO_@&3UOU^f z7QhcU>=CqY{R5rc&=V5dS11r&1GRgtqbA7_K};Ro0l5NA?G7z7`m@pDXBR83QWr4-YmvGH%2n}{l}yh8T55ZI63BSmb9f1 zSjTRwb5PX*@=X!mpp&CGlgSTG7+K=8!;a7rnPI%B*?{7uX;AksW6B0}r`{%$Y2(Fj zXD804r><~ReKB&)^(VtW1}Gf!+fGA2vKp8{V;nNv7t30Ha*(MY zmeKaP%67gYoyp77qNt9hcb#o6tSCbgN6KUfVHjX{3Ry&IJyZ07y03G^jSlH@XkF-i zXE=GBtHhV+2+y4Kv~el;rUa$7`-*=SKZQ=4^Lr%jybb;RO)&=;S?WVgVTU=cxEMls z@dZ|K3Gh6HsQtTq@tOdkyQ)Juv0e3+W`gZ*0?fTbWuLDa^z!KstPZIozXCx%QN-#g z>+l?L)RfY#aZjADs>H7Rh7L;b> z>MW`|JBe!Y0b-?yZyvKyuh7r~jrh@73x(#OaRw5slR$cd?7*tBQtoiwAyJD-OaH5- z0HR+tt9J|lU8s4uw4B$(z9#oSdXB$QIn(`s68pOE5_lu}nwatRMc#RtmdeEBuUZLtkcs4otPzIb2~QUF(T zyH0iRXI89m@uT^p-S!F*lMxKpQ{VizqQ3~v-+ac?BWyKrA~4nklpk{mgYdY~uCifN zye+&|uY{^-!5!?buQ5(u4T%PeQj!^{D##iC_z`ZO1PQYdj$$k`iB3;faWNQ8s%8L@ zE-*9AyNtN`rg=_!R%6oacF$GMbP+xBe)WrJS;mu_xxI>Xf>f10_QE!;oE^dY3-JCg z?>^0)Hv50=J(`#7`lhZ1E`EptZ`YQ59o?3a%Rvx@37j>{dTX4rwmOhC79S!zhscQEb{)??$(<6B<(YZU z`7l?6Sjk(2?rIF~{kSqdqky?mqC;I^S9Dk_RjQ#5=c`9GO(m=uICN)$WM3$Ynf5)> z{4t$4ppWF?&(Y_AwSNLQO(tzw)V$W?ieqUtWDTTn3a^!T$w zx3GS!j6CiQx87AsQ4yUlHjkEe18Cu0A0pmXOdcuY{H}@Q+^Rw9W9NRwT+53cTTg@T z8*n61ZAY__{x(uN7rcGPxjJ(~jEFU8+Wvc5Y{yJu9+P^$+upaZ$Aox3>SV7wm>8`h z8Ru@>(k4M~7YfID=`q*|AdsDIrfEAh?+7jrUDrvEjyWh-|m9!+KqnS*E3>Y#O=v@{?IGHK6#ZZf1lBo zoSr-l*&~}@{kzb2Q)iH#;h1!Dxy&a4B7APi)>YRttQ^0a9X6Kgc8?IbtHkJ776iN< z!(KR8rfRWvsBl-Qsepw)t$XcS{<`1)7JY7LUMEZSs&Ly_r(NNaN`AxuSCNlsmt6o& zQ0>}B&liqsdFpzBKaN8NT{ipNuGI`fvrI(>KTs^4;r4gT8gH!+S-%Ygag_glxdq5F z3-Nn);~Vm6j%Nj)>RzFRud$%tn2K8Y2tPM(;Af}^tS2NbJc#K$0rKCM&_!n83YV4O z=VEI&GzH!grN}!SJaAWifgQztQqWrs93#8e!Rz=y@4e!oqE%QP=(Yp11~13j?UiJR zTa~tDNYIw5Rm4Q^w(!Id{PjjrbHCl%+fh?5a$6<4!O$>X4c)ybBA!FY{&Zc3vBCq} z0s0{GU#XgRaKDXvo|it;xz2x|+WQtMjB6zZe`%<3FQv^;D@+wHoP**CGAj;+xauwy zj(#(nm>8Ene;-u`6U{MMDwR!OeUzXFqGWzb>Q?3Q=-c^B3k_=9!m5(zD)k%u%(gi) z@UP}p_j;Fnw7r!X3!7S$#IfHA@BP{aQ4XNo=xZ!)J4y1}1~#n;ggW1$Ov%)#!qmXE zHv;0kgv)yc_3{xuW7>}tuobPvev_3Ueh02%QPQHJEF_q@Rqrm|mBJR0p5v~?I2200 z8cw_aS!&!rh%=RYR10R~PUA-ck+Un3O#>-+UjMZhKW1-gFhqYR{8Q-5@!}phxIzPC$=l8935A1XG zznpq)#kr{qbrnSFz>fsaUEJgRhA3Rl;1NPNF(ciMpxTqD7B0v2FQSEl>QgnGyYVn0 zTMpVJ$WVg-wBAKqiiWQM!rcBjBTxBmhA~zv8Bv}}dT(nsU|p)A;Gx$t=eRX8hxrrq zl3Pev^KRAcrme)=@Pn$`>WuwI!-)lsjtYGJ`?GhX*Dm88J+DxK(B9D+m4>HXQJx#| zU9KBELV|I3*BoIbbefvEb!7J_qbzoI1}PQc_xS=JCyhZ5hLwdepYM-j44BouW)VZZ zh_egy5WAb{!@k1ID_AzW8h0o=<=i<1kBVQ-GUxHiHFl{91=~DPwmk* z2{f-G&1g9ro!P5<5L^0Z=oW}eq8jIqO}8X{DE_;}tS1Z*p%5eO#ZEwk*MZ{JPP+t` zf<>dB$eT}^eD5{5@(bVgcRkVE04h5+6e+vD8&4hwu1m(04))I#18umLyTKWOsdITO z#)#?s#teO#rBb1PY&^Acf*8Ul`m-RXDHUgrOGY~|IJ z$fJ`J>+c1s?w5G~aW?PM1#*ptXt#={qphVcht@OruT+9c}trnBjS9lJAPDQQDRp=D7zp$MQSrTJPKo!nP_U`VqOj=KTunB zz+#Cp`eC}}EhqSmx-a4>U*zY$-IJeF zBDJEh(rg=MEM+0k$@wsNydI3F;4zj2qK6nNKrwp(@>gWC7`q=(ENCtl)M|p4_nTof zR)#0@VT`i|7}lB^OMi@^@sF`^bR`bjWMa)Ke6%WHc@|J=Z{t}51DJH`V*aBnFl~x2 zPM&#uWrH@x8%>WUI$4it4ca#I=PE@b=lnu)B~CL-!1gAp&INJS*>em3z0P=s6+c}9 z0iwKq)ThslN3Qnl1ni$7u9Y-_o^%3@*G_0O6snMCb9O75F5pLWklxuM3W(te2`6RS5^Zpr49^`|0m&2c`e1#*#}v83vch)J z>>**wy~814p41GKF%~MLdKky5#c%oJwq1!KNyxm{jYq>P)4KJ&zoxTvleLo>QZpOZ z_hr(0RS+}OQ?ZZzbI?Zq4e$Njy{}$&G30ZMLFk~Ulj8=7+ezyEQ4D_)i=P6>PZcjc z;r!pi#vd#)_*7bMds!duU8Es8->Ue%C-K<}o=NI^h7N?J?9kM1sFpqxs@aqcI4zrf zTTUoLKC6ih0a_y!w$t0pXMx@)DY65s$$)u3-5A3mcA~R79+g!uqMB} zs)E`msoJtWcWzhrsl!7{?)0{TA~jyhI%&-sa!PHt7||dvaEyCNK(S|rQ^Mj?R;EPd z^Mx64ll>dK_Sk(@se2F|7;8wTjvDj*_r5QVYw>0-o<{U(Jdo;+br;f+fPlbjJY@IJ z#r}~nOl{)y)))J%+8B^k&4pC3woXN*03DKqZbl6SAY)Y*u9kgV{N0NQ=uEhXYW{E{E zMdfy(vMiA1e`C_yYqw1Fp%@?6qtmm-&X(9yH6&}&{r#yZc4#h>&@R-2m+&u+}t!*?M*R&Dk<(O`M`LAezm3szHnjn|I z^k1Mo*p1^8R;m|9kSf9;xDqG8<<-u#G(oOWir=(o!2JUm}W~1TimWYu3g6aK;HLK?=+JA<wk8_ZQee$M#QGfQX;W; z`)S_}_a1e=iY>TG(Di2pPY-pKUtYE>>|RdEZL$Xx>Xsj?a?ka0zlD(1u@bHj6h)9S ztQLWQG4dP0kl_WG)IpUF-yL=SAC#>&emT27NhYoybltf^4C6`JOgrNMPiEI$sG+UA z&D5{0M|jIeJEy=MAG&Lh$Cqx4=KM% zR4H+ew+3m)>f>#>ug!0S31B^3MojbC9*0^fH0kzb?qyo-HiaOmpd+h za!PMO81hzZQjX-1*nha_9hjE@-sB`(nJ(~8` z15qEUi#-wNPQ2&CGGE$n@4TDi3;2=`_v$LAcw8~Y+Xd_YfaRi`14%hNDsV`36%qHS zXMmtIFXwA6%lpk}L_e1b!Q&T6nU#fsE5$}Jqo#Vpv=FKRoE2Y;51$J32~0Ji7enl4 zkoZM1;>B)(%P!WbwZuqZ&>n)3)qa=_E($PcbN9<;k%RH-FctGsD(FTId?#>frZ{w( zh;02#hyTj-Wb7UDGaXVnQI~}ZhL6E-s+KG+Y_ zF2Y?PZ(s$5q^3&WWe$nhB4x8{k)y1=ez$HcZlwQG>JAN|;PvyNA3T3y2Y^2V*f*Y% z0ClMm`_a<$LeBKRtnchy55wk`wfL>xqjzO(DJaoiv|A)1CYFT7X0um1pPyB+#McLuE1wx+@3um1x#mDOieFa%&uhAx4^k+#(l4Y++JNwqk3;HUVmPHqK6KIDijE-vphE?R)@ZZ4O{2^@bM;i0KFN{R zCUN!4?M(kuJi%_9n{~8NUbyskM9hS4b5rm!fn-}ly4Oaa-JW@qlPRB>y@ z7kNuD?fXzw%c1SK@biUQnoub0`f#d!puNCbKhO>8u3<;wD%cf15ce<$#Qvm$6l@1p zc9o-xGAeM@8eyZ)tmBSro0y7eJ(l7%+}1h)=|S(jAzxLNvDGU(9<4jj*S~hQ6*53q z(~d@R${BF^zi3el$Kwp3o7}y?`E{~4Kp`!LHn@obzM0u9Anp>LIm7ogLSo+ty{{g) zd7R4i=kaP$%HYr=>>JLL&t(YClP~HBMseH#628%y8PV%|vZJ2U%o#YZFpCJ<^P8ie zI%ux-$3pur{5?ROnFtMIFu|T!ti+ze6v@;c8Yrb9Vvfv$ZcAXOr#_%pIZx&`!ifB5 z-SWKwSXtxfFD~9L;HYLU7yt0ImG4Xb@bIo}rtr$gF|e1*!}pFNM+NXAXwiNpS-?QI zR0ErvVgd4?RwKw{hmqN8u0yJH6ABXV@Lr6<6U0b;eM(#b_oe5C92hWts5lAX%$ttZ z64i7{>VH}!9GmC(LJkDBWwY`pJ*PDZbWQV7Ms?hdp$3P44{H5<59O;aLB%SXfG1@@ z11$l-bR|nkP_=+IA02*}jzd8|#yA4oKOFI-EbxAl#FmdpP6^Php)_4YL zagt|hn}VlEuxe!UDB>+hh-}s9F^Fp=dEua>kW1T$Oo_g*M4)%cYfWI`?XP}nj@Lk! z*VQelEy};UWakY$&gR>kEZ5vDXcpx{9+?ITiRlgYtw53D+v(_l6ZA$KCvN(X7A0K& zgA=a6n9u9eJF;wCoFezjAeos`2*eldXO}!XB)xpHc4tB*p&@`rz1Z&dU$WfsW}j~yHwS0 zbM%EdX_sc0!9r$w_O!OX%Y%Bu8t{BU58>BgTf4jnaAPkL}9w4SFA!}2y8#_ zaYXJHKA7VCvsi6@AW@7c#}geStrgM;$xhi-?lo?;5z=nDL+yqiP@O->RIxh+GU?HI zV3QU5rp?paItzI|OLZm(?Ok)7g?W=Flhd-)PP%GoU|ZNpM#Z#rR?2obFeJCR3pwT0 zSZe~56u;+wQHSocfsljo1@fS)QB__>r3o4*MerK&$sjD|d)gUUl}2z{@R-qlbp7x0 z-)zShKjJ};!H+)IdWMJ8u6v(cpUgd($!ZM*gTY^;Mopv2nzoevSEg`2{xb``h>fF3 zr&epTg9&DNQNQ@!z@rQ3?x`Kaqv=ESimi%X8TCIvocC+Iqo(;AyWJAF?!BRtO?Gr) z%`w7<2-DCIku+-=q;lwmYuSs>#LajMoQ$E6=zhxoFt6Z>%K(h4C}?GDUT8so$vkrO zM>LFQDo708ahE<9tiaNDQnRQ~f0+SZA=zdWjw_-NH zMs~}tQV+B=5oqPxXf5xECUjq_{V_`$qM(lQ}! zJ=*v0M?ocHXJwcRhL~)IR7s7sZ10q1nb!Y|`=UrG)nZTB`rKH$-JD^{>shVd+}P}P7}82EuQ2X8~4i)Wyd($aHqrm-pR1(WAVeEjI&~yILuz6#1TL`32Y`6 z9Y%E8{9O&s$N&zmqscau`l#0{6vrf1a%97&z9MN(WNJGAZB&NS@{bL&Uw408DLtKb zg=zP*Cl)VYx)tGvJTMfEMte&<(XK_*y^Cv(+zCqr;YdN7wO@J*v_2^u=Ga-VZh2eS zi)8p7tkfQNVZ4v)mgZ1SK?;p?)Ej;15bqfSq~D?vO1;&sSDZ{;;?R98Gw9%(_jo+& z_<3}%S;MW~mA#+CMdd?6Nj0pjLA`sBFRL@{j^`n-t1-LflMCjACAa+kGGCH|4i6uSc6^H3>yyvrodB& z-!j8{IUT=loQ!d{Vd38JE87afwV3>4Vsp1ob^JrRN>xy{hHKt+l9grWxaYEy5q1>x z_z`WeW1eALr;(A1iPU@R3}4}lpkpEYu*p0e3LRa;thzIS@s^UJBM8rcBrm5dWky?P z__2fpf$4l_Qt{-*sPFlj9=d_Z#HyO8NAm_NYs|A5wc3||a`Qc z4=Xi|wk`$E7y(;xT#qM5d2&!yoA)3iSJ|_qTo=d$hh2aB^Ixs(AXhd=X$Wh_4hp?>}tD zG|VlD!(UIGBxW|2MR@OPPu)6MqD4)OKA<=&90I_B}bpjx66* zGR>JA!qx}m_|drO*cjK)7EQ_G$Ou!_!~z9I^aJXVxO{Ovks8in0Tisb@*uaWp0c0C zXW^o*@{OwVEO7c?#Pi&JNvmwc+V-Nu-yeNi^l}2aImVy+mKhGYSAM9Uy9~`dV*D&| z=u9JSHu483Ww?@715awCIq6l$eXTox)bHe{ceLXKcXb?Tkv_vYfB$E?7;g#XcgH#) zh8|~n;C&QD8^p|{;r=y502UdtCa#Y=1EHcB($3qShLUut0$A+2KB z^(HdeDMD*fq*vyr#j%j`ZRhR+gPCz=ThdlR?p?sqCq1SDjXMPzl1u@cFgwx+Ih`@` z3WKhtZT%i=%{ur&4sy=6xw1jKkQ17n?)+mHd%KQ-Q?&Vlx@j?tAp|k{U~qv&mREFb z1cmWKweUOH%3V0slHjY1fPAiEGB7Q&I^g#b;iR008TzavmcS@zB)_=jc|0|>TP5}Dd5ea;0O~#z>?_YK*%{0yFBY>EXz&{Mdd8`Rq`0R{fmA+`DKF)U$lBNVp8bi0C(WWRWz^wYyJvc3MnK)p11^>LF> z%;JN3B<-_op`rlHr|Y^X@M0m^Jy?B{?qMDZdP6O%QZ4JFtQ_LL0z+Bc7?ozzf#FXefHxj@=*?9DaBgyMY}DeUTfj1K zTer&#OUj}3NzptjS6V9Sg<_It_V*vr(utw7-BiS7O*ARdB)P%W##II2*7oz+NZjjp zesL)3%U>U4iK2YRjyu{ObSOi_IxLC<=DD=;xU@#OwO=c7j;|9kUauzWoTkY?6VE$N zK=j=w*L==);?E%e{c-Rb8nBabbOe@JW2Zc5vv+TFiGRQ5g8i?~!Ra{ACiczTzFD}_ zewG;sr>FlP*+1}2wKwp{H~j8o;`Rsttd5BwpB zW8DWMA?5Ao)2!eP!m(Z^4NXOt$imlZNn>ja6JO|<6pWa%L$a9CS$XHfWePJ7gzE;K z*7H|ointX)0(C(^gD$_V7{`No@r=O+7UtYT+_sED3Bq2vp+%mi1>RU?HppytW+XIM zPbYfZqUGT{JMi<%!cJ4%X;*)2FD_#58_`s#T;L+(>}YfG2KC4PD|TDo1n4Zp$ADcZ zdJvB5D37WA#~1vPB$G3`nP98%)INW1Xqx(v1POdxM4Wc<)*W`m#7qiiLT!3r1929j zD`vYpLD6MHtqr}_p!2Ar)V|}VBkPC4>$G2wCj)=_=v8I8PfU&C41N|`^aryLbr1I* z78|xTmtsXuHku5ZF_EL+ZL4A_lc|24qwDyQ>8M)0t6R0G&d5EuFwn* z=qVI7(SeLMVU~?*nj5}X_5O&NI!5&}e;qD4m@MPnT!}p}kGmU)6%O3ZX~c|qErjIL znry8VD+HeSP&C~j-B5gX@xNaqyuno%nsy6~si1DBPJx(Jv(Spzb5|;P&gZAcH@MeH zwOe%j)D?+uwrL~SfCi@X`91k)F~e_~7~FG}Lz<=F3boPdTXt;=DfbHmnUU7%Aek>ydlwrRDYWHzeM><^QY z#pi^~i6TdcVUDiE%Ko+zMvEJTb2J=)XuHh(zzX$;msF8eSmCrv|HIz-nRn}PFWk!X zYq6X3TN5=KaR^7SzBi46(*N&X5C-#Y(kQxd5q)D9IngksJ_$EQA}5r47|!3+7sJzd z`l0KgB!opNU5tL^n%K55g_@{2X&uY2vbB2XsCwf2Rng6tV7C2c3MEoQ zJot=RVRmwDMcR8`i#|q5we#=`F)uMPJO)cc%eO7L^H5(O^Iihxk5aG8(M*I|=w-VI zUXA)(w=8P>iovC1ZcOdd4@JwU81-b&Y~!~|{;-jY7Lk^jF31aWV`XlP<;}%va<5I~ zeo}r_^LU*?d4qn~g}0k_D|-djqhdIm*sd7bm-?o%pBf&2COvni^WkB_JD<1o|3m)v zZ$3rOoJeI*m|Coco*0`_FXZ>I$I&JI`m@+i^^AR`Wh$>E4xfW5h!zS7UM-Q|?B3r2 zeLQSySjcT7_F0@1bL*+qUz$5vo%&W6@o-k~uMPXZos0dHO4nDq5nM@#HmF-1>;2Y0 za>4ojmDo=plK06rA?Itrlgl0NwozWEP#9hV$tpuU zXrC7__EqUa(Rg1$l~IS;-rq-E9-5uA&Xq*Qxqer$7q@mD66KMbkHB#Y%D88wh1pId zTM$D!f7ZsH8SSHoH-QIyD9+FD@SkeiSAOweKK^9zs){Jme7~oNUM2GL!e6rOb1YL? zk4-X_VC59^%>Sw1!MG+YGT&U*$if-0`)ZLEokg%f<)o8ph9LXvkN##E4yx~wlNSsw z>4efIrF#WFWO#d*R0a?yo5Xo075znq8x6BvVuIFAz9Dt|yTWb{(^Dv>c>tL_}PAYhm!XI0r{b za(LM|xRS4?%|j_FhU*eKx;kS?<$9^K^)xT9Zjtn;rHYwXQtge4KW<$2pbs@JXW}U3 zaVGB=t0%nsLgVRtcFC>I@-9OXqu$U5>=cnAQJ-?Mv>g~<&>LDvbtRzI;;+rBTv>Xd z4|;bemPVR9+wRy9xSBdzcrX1-#_zQ+Yp4z}tG`*gbN=SvU(2L;Ato(bN7Qs!unkT& z2$Megm1^>?KYhK&D`TZU_v2Zw+|#7d`(C2#3e5-4e(G0~3*b`({-oLhQp5ROTfJ$Y|yWsV06H(n^CgDjRWLBAGOc8PMZSbd$ zP2F|Z>M9|Z=|9x&eDHnJC4Srzamz_UN}OYrFQ{J$B9KbGs8N|x+5J)n-u7VAn@PR^#d zE32|fmk>;%>AYK<<`X0vE%RX>c7b@e_j3tT#9HZfs>t`u1n8JHxBri>uMUg4d)`(= zDQN`hrMtVk8|fD55|)t0rMtURx;q7=k?!sk>CSh3%&$M+f4I)FANJzRnYqu*J+Vct zd+A(Hkzjl>7T+M^GIzH=t%*m+T)|?@79aB_cp;llPT@6eC0A=8PgG07PAgDm@Unxe zBcJr0#drMU41e$S@Rd$LNyE3SIstfpdu8;&miGVHZEGfsUH{v~3-yR6VRX)&djWnA zyWK!VUqh4gM+d)=G@Sw>?%uvP)lVl{HMKz|R$r9)A_5LLhx#D9zu=L>&2{0;;eyz5 z`;O6OY4Jsm@!;3#Ux)XH$`hASLek#30H|y}3xzEagKGv^uKH4L^`c{z`{Rj+DM4!W zqr1QoO%4e8xn+FiO#=XRg)N88%KVzcJE@vnv3dCB)pJYw2~dw%;ZD#TbysDxhRfbHJFI}*?h~W|Nh)2=K z2c?~cyeDr=!OoMO7n)cib5LT|OhGjhz$@*~Yp>tede&1c-Imi6k*FSzjA-ksWKUbM zf*x^j5GRN{C_rA5;FW#})tE`vpyHPeCuz`RaPAPExA{I{RZnhB36cyJgF(APbtF24 zYWcHsrc(O&#?x{qq5n*ISON0-XenW2Wb*;>Qv#E*fwgs6mf9ix-y*G4?5NX%NT6_vJsRVJ#pS*hwoWh;R@xc+ zunrxcc1mV}4CRup6Y3KG=(q{cc-ml%?!N z0D;M)5&2I+k>38@?ah5o(qhjT3C0spJ0$p5qK<5eCGn$z3?YR{;Y%NPhLc6Dp(=XZ zSmxjQsct|*N)n(gb{RR%QEU(#dXY5$+c=0tOmD3#gl5C_ zd`~?vK!dPbe9r=@A#Zm*Es_NXhjp(em#jnWDTPew|7;9 zbG5tRxfELBYU9wmFf>nabHG=&-E0<=+8gle;XfB(4JL6!%%)(cB%UR@Sahl9zS(R0 zpt8H(SW+idlj;hCFFbXsNDBsQH6sW#R2rsXLpb9Orb{D@I*1MGa&2grs=V;-y1Tq| zD3;cc8cZcz@t}YzxyAt`Pt(HA(dlxeTpbKkX5uebNVFRy@6)@5xO-xjbs*ai_l4M> z&b;h*c$amqC%h);oqfdIO&^*wrBO}nLNZJGRj!K92q9aI6ly-tpF9X(jEGiJjyd8o zf+TO8sS?PETK?P7`RS_`JqKkpPpzp6%5|kN)H1rzQ~rimYTwq~X4<3mbxpRO zem}e@+`>!jGu?(2w)d}(^fNy`_#sC$H)42&+Wz0L0sNauB+!HlVzx3lejK0kb^mdi zMA-bjMI{45pG*=TO{p9YNu%m4Y{Serdm7{RVLJRi|i4s-r5<{PZWVWYK8jvmxMh*zo zSW7tkodfG=&4P40;wtjM*U5-rraI+HgH-nWM86>U7R`3SuuE&SV`t~A*##z`qWkiwzOFm7KdH!)!%S{~Y0K>N77x;oC_}o>#V-UpF@owugAEBv->o*k#U;jj-|2?)|eT_b&T^XH=6F;%cEf31c zTjlj~2bn8Wum+fGbTlvX&Um!AoCck(5 z-Q`omg}8h(OCvRHT1C2u5^GEG+d4lZZJ$n&gf6pH{P?6lp$feIlG=*pjF^36b~W15 z4-}#rvUvD<%%$s^6BDBazRAj-L#8k5sh>b9xiD$x(hVn|Z*M1`@@$f<;`#?L=Am~8 zvv%-t`1`|JvyCSD#gm7zs(RP;4Cn{k_jI@re+juHYd$FY(m5z(fc_nu&t%e-8Af`e z$fnrXQ&ayo36*Gm0p5m#P?E3j!_f5!emT$}LqO{kN&lk=g5Rd|N9;SB+B)lu*n!!g zj?i-LDy4+TTPMeRcUG&Z(fn9yF-e(CW(<_J2 zcT1~XYHEageW8DDOsPWZLc)nZhP8lrE;%_^F7)gTB>=d>84Yfm9aoNo$7btojivcu zg*#8J302z@Tbz_rm$e=(qUI#YuKQ;qiCj#Wn^=Wf|>wc>w%JP>;(}uM%)uumV zieKw(b_nx5p78y`gIFRukD8qBvWN5jjiAzGGgBIj2rv^P`(8h@v%)KHL%$(Sm_Mot z!&esKvlU%(_1?SuuaQkgwUkJGo0Zmtl;Y)uQmhE4{hP&_s|_y%388~n5ydgOEV@%i zL~2VZRTEbsP z_^*Z9CrGQ7-3pduo>yf5dViZ4g3HQXTE}fOG&b~XxdnTDc`hb)5{L{F&TkG$PHGmS zDORlbjLDVY;J5w!LLM-AKxlpDrW+H7zcALGfY@Ig_W)`FmiXjofpyknxtOh7Yo-el&G%8*EOu6AWE)6q67W zOhVzMO7|IKLat{D=18bc{-M~`HuX}~NOP!gXRskPj6BcY4pa*>gdEPe=rGdKl-bH` z9OpH0S(d*_GUR^bU6#Ky+ZdDWBDvoVK5V5ssVH$mAfeW!8>z~8nod+|!Uxya2!uQ3 z>+20FZ*tahx<}1b7#H_&MvXOurc8s)$t^!fg3mw)GO#lJYON?P`{DAFzw%QPUv?M>@7DwLRnRs7b!#F9^&~|N~K=jBd&+4;I)BnHc->9J{*49)c|#~5SKZA^HNhM3Wk(+ zLRIi5;8xJlrW$ItCw4O?txMf1&52I4L!miK@rLAPMd$H512)G$stx|T8mq*#g1^sR>Lw&K zw5BRF=r33VzoZpiE^2Z*)VJNl$8WJJwHxC5jQsm(O+5L@KGG(HtF|2NWvb8Hj7=!2 zom$8e|CGz0I-sTr$+^xG-eSH{U#k<%L(|H4yH0c5OZQOb6Z6fY%hVuli!!go4L7mX z7Vm?XshBDg@O2B~bIq~^k*e>sx(nG>eYRGGA!9fl@f7SUAL#i&aqJMRrn(xk|VDsgHOpbpWq15 zu{7S8VY^Kg=84`pOMdP1H*CmqfooRDTPnx9b7=ZpZ9&oSbVMb{M=ML+X8!}*wb82V zQa6muQZC&lb`YD4TuMvo+Ys6Kw?#3Il~eL-YZ6d^s);S39 zl??)R#KQ*K$uzZ~3zlWGIN(%#Zm~x7{1gF@#VzK96X6Q9;bwGM)kI^O66+2NDHxMGt771G`;3eaH#&|fO7OhLVlN$O4{RsO%)8H3FF;SKlHqH#awOP*i zOZ{fwx7OF{Mt!mjhkEtLwq@P5fu~#^v_WiMu1fbaR@W#;OVIxi(6$mp$~j(7Yc*nz zCS40r+)~TO9kKIE8m+9 zm58IhXd42>4R||tq-k$@Y{Aav8jav^u@~%lv4H{0`&s&gSzQ7+W2-tFJOLeKC=dSQ z=Jp(1ECi_T${zrf#{{>S=1SQkJ>rAM9mUe(>t|-a zd>j$FyAGIDF15rVv>_FYwdM{4Qr21X0kz6$pqdq$611YUBe#EYZz168HRW8`3GaV* z462r04*7Vq85wH5#&_2fIcdxA8+(W$IWO#c6|TlKF&?^^Kgm5{R1ksMV}HW$pZ9b6 z8noT23!u@;MY84Qly3bR3gf|sve&D5D{(2dE#hNrk{`L7czhWNXai7|`5>C_Ap8aR zB1=`hL=PjNQhsrVd$TbWmzU>2kQ_r%l9*ljBkh~&3Z>!LFUlP5`&GfoyaafVqQh(A zpL~UoG_-NmB5=e%irsX~Xx1_93n`SofC~-g?#}awd{v-ChBkDP!_7-@pfc1GAKeMF zixP|*Wx+xgx+X67waG5TKFNHcFe0HGC9p$SfmgK2fUpbA(*%Ej5Ec>Cl68=;92ek47>8WqUD)k2GTx0%)# z6{MdYb>4HZC@l4?tQMt#HSyTvL5tZxGq8cBc-h<|Er=)3M5jZ{o4UsnN_X;_X^Q9@YJ)7dY z%z(alr6WUv!>e-N=^D$;by8sDz-C7UO;kQcP{3z|E((b(f$gZhEQ#0j(1Zce4s$P= zKW*2RH-F8@B!k1q46%x1$CRo;eBS%ven4?=UqHuNr2S&upZ%%x>P!Dz7jy0(G07l7 zndMwfEP@b&g4A@K|KECuPAu=V#&zsi`Ef`>Z2lyzkM>@XOvZcaS0qJ)vOPaVBDUet zVtAxTh<9k*B3xy6w^vh0_J#+$#?nxd+E2ZpXYCv7;g_jwUJ5 zmXosQERE48;0tk+xb+?8RtIp06SWi{u{~8|S>lDP@T*-#rIg4q)HjOe<0Re{9}<0D zidS<|t_BoRniC43WG-`NNhVE)kb~ji^BAkJE;NGz690e~hp2QmEI{kVnylcG<%p42 znDg-_Gg3Fx7s^$E+gBm_dKF10gSOnwu{aY6==%7dvTjBCPlO*^KEkLHMhx8_y z2fb|~JwPjszGF%WYUDiix40R1z0l3nL?;FZex8mh@8ols4zU?MF?$?? z{kz1zBTv-~x4|l-u3(=J12m+wY%|@2 z@?07Hi`!FzInfQZiC7s4i9a6&_lFOLbO>X$i@O74N7bBh+qK2b6&X2wGc^sD&9X^Q zo@L-aK=z0<`<-UHn`AwGTG>%FtZW4T5gd$xZLJ6qLP)6C&FWD~z-0Fjl-yq_gt9t6gYSUXCPtef^~Gr`04xzWAFrb7Km1Nb+6+!+6| zX#QNVyl#5Y&--4HeI(&qbdADE;&q!$re?^xat;cG${2M`rE9c0?IJEcw>WPh70I>YAkG&!o3y5JqRWh+Z1lS}9S`3{4&=q?Tg{8KqBI}?{i5F(r^hx%fb`y$e`ytJ> zv#GA_@+QV%_)kE8)?p!5Evr~HvdDLYH`T+*iZ5m)<1uzsnIF=P%S_TW5GBSvBu1DJsdLJ}_|p=FHuR44#A zz6E_k@}}P@eWb#J2Pw#FmM%!3ylzrWXYqs`4wH?D9s25ViCu{4iW%e-;nJ!3^ANmV zOC{c{o1IzyPi;Is;5gYofVvw5EupC?xVQBtrT4Lw5Y}AB6uQlmy)~#*8#+a(tY`Z? z(8-_UHbL5-Z!Zn%KE;KKezl{H($s*i!dkC~`flf3$R?dB;tQ9dxLlL6-FmdYfbhqn z%mF6(x`z9_CnIO$|&iiBSNM<)?DKx7+;^X}c5!b-*{=@uWov@80qrB#qk2 z{x6Pn5*@7Bhz6kC;pQ7z%&W;QjCh8J;mUW~1iF`t+qbCfU}xQ00peC>Uj(`M95T;I zT4Hvc6^K?r4ehoJ?pXqF-JVb#!H$g*9+p~n&lM0F#^vwEc$sCUh9>eD0I3)e4jX>j z5A6#;AJw*Cf0j=xfg4n%VNKI#P=05?vRVM`p{0AAGDN5Aa`o2BCdQg68oKr&~rlt-(g9&d&Yd` zSnWMM{#p(*hBBpV5P#z&0L1)Q4uSWm6-7Hhc>i)u(*S@Q`}?jT9%lY#`kuCK`z;@I z-oo>pltxj$yif|4k?VO+90V7>6*IlY)p<-=r6Tb2aY`a>=J*)FrB{ zGnwa7xRF)(#wLKRXTT}*G4p6iB-GckyoLJY5gIv^V6uZW>!67a1ci*&f>VbvLrE%0v zy4-N(dpJom?R>TR?3)@}BSTP{}wwPYekjT?#J+55$kFW+!`DP{=D=4@N+qXzF~C5bZ*tfK3#9^ zqRT5Q=i6cZxQ-Kqer?rxlDHICiIO$4d6wSLF$w;3wb!QD5D_L4IgA>F8GeB-!L3ji z;VUn<08_r{dDA4F`U0*?7^Mm<#qbRBk7WgOT=6Oi1U=Pyywb%x1(4SgRllGp+*~`0& z&lSuR)$7y|aYj2OdY`4Asr4+AFrt8qB&OX7I-B~42?^P>YeWDU)m*+xqF|@B);sVg zhn*s&XJV3<^C_PR`MbmL2Za*eiUOVNBI5pb$x8|viOy|%?6~kYXJC|r5#k4E6d6O{ z-ZZS|ai8ffER9FGZHqLZusjWp`o(|!HUK>uUKc0vZpiX8}DqDi>6zBrdAlo5Z>!9DAP8 zy3m|`;XEYfdjAX-IDzn>JDjS}{UE0<0vOxMo;G{!I+C45H9wn>tRuSv19KcA-Z$gB zJl|!~dQHM_oOyP$=LGz%O5dVQkd(OID%tVQz1w9>|7ico!$9d#R8eeb1oDrK>)oMj zFM}_kt7GH!BO__RTSpCE+qsgno66RxU!lP7Ylxo=b$b~sBo7NW;KfvEkMRZbM@55= zT1Q;E(XWcB!3hwxZg3K&%HIz$EfvlvzsKlGk`PmpP}HRj0~)uXvgYw;pK%&-8DGJp zW`4>EiaaX_m?yzb?&E^#uM|`X<_o35VMCPkyrt^kR|-H^c**aVjg*R&aUTytEIvIG@_+ zyGgonT);?cR5iJ*qkWKB&iKEI$C<(p^$}|HI%|#~togX$wrC5>bRQRO&+$;8er1en zVR*aZ@-^ze8v!2s!v`t6;FvVdN<}tn3H*7$`En>BRy=#`j`{d z{`_QP=eObghiJ3Gt@*0!uaYi$EBm7-nLL0=@{(23^q4=h9Fy3fPmKYDWITR+z(*KB-%t2sU8>*rMYS^vX^{l7=crB$?oN05g;mgWhZQp_GCDifg zlPOvT7+NyWnE;qNVRavja3zmG>ZE}b-W63URL!Uu!5rS_4~{~^&cu~^DnPl|+sXn> zE5HiLvr9AHN^Gett>hD)tDo!x5cf<%_I%kE=2IV<0`~6X(_pkLvKSN?$oQpSoaX_0 zq&kHCcb%;1N4e?cs)f4HVK6A=D?+4@y=9SbSZ*ng!#bqx*E8Hjd*hJN1d?9}s&#;! z6YyR4c-mVB~JFqmz!inoOBdZN8NxgeMhq;4L#nf+2DQ?UA@+pXgwTk0I zfwlNY2>$@A7BbXVhWV^|6yBMN1h44XfQJDceD%ImRtWlp{y_lDK%Do(*VZozR0s@E5mGPzw( ziHB7nRg|RB0t}R>8CBJ-ZW6A}6Tn{pc+oazJSef7jrwvVc%(_#Gl9dXD=~h&=QF;L zvA!4quCzCQZyrsHnd#w9*6t&;)@3q!nmw;&SV-fvGTJjUnZf(j+u%ddS1gH%?66ob z2Po!KrCe_rY7PV*d8<6oa=pJ4^N}bRWgx_pc_z}$I9p7!Z|O=xe7b1CMDsPE zDb!Az8EI&EfPipR)>~lF2wO;&MC>{TTP>vLg@pQ6`G@uC)%Jm&N*lAgC%{TPLh_D7 z1(sOJq+mUmC7F@IY?ING7XzlqBHC$@kqQ&zIpjf7FQAPF3q7od{xgUaVg3i~8mAK@ z4x3fsSKW#6@kqrB3%6){A6jh~e?|%4-2;AXWJeXNhFkR++wQF6b&rdi<1bVD8b&sQ z-nZ*>x^W&(V?03X1i8CmsiEGGC;d3lE587Vmf&#WwSuo~ivt(Zp-pxE14dxOe+HeIK->{sG^_!w_H? z8ilA*Rs){74fRTe?VG;N{RI3n$QQ0USX~wkc570Qmb)>l57UjmXc^tUHtOIT@STX4 z(6S9lXT8R$Lih}%F0Pvl^cY6$#*k=*laS+LgBv1h@hH6-stHbtpDeblAd@fq5w8te2$GMmg}?7-7E(>x z%uiq%arfP;h`3Fo$l*#8#@+;Gq&_>m5t<*x=0#5+=4vdX@b9+8WYX;H?35f1Ht95y zo`F*t|Hb~lep*T{m+g71bp*naO2TU0;QOXWP zyvO8okzRB=**ea8IIKPPxV>`cuyZ5{dso_Yewxz1mtGZl#rVAz32X)6T=!)|wz}0@ zk2{9?LJpD$*Ngg_?9!PKa81-NUH`q%!~BLphTiqhGhi8SI|!UMbCWjiy#{=_05`lx z3>?R^Zv;`djb?1_oxz;BKH-E{-n3x}BavW&c~$fE>GOy^(Wi_^&HT@!qUjH#r0fcB z$tXwijR#YUrbZIfkJgp4R!^x=9q18TKk>^2!gCJNc*C$W_WE=>{d!y@8 zK0mfDb}b%eY6nQ2i1@*VGrw~q7UlPjZ%E+8Ws$dU4zcOC>8N2wmG<-R1<%F8ZgEU~ zdtPm+37kn*i_h)6DA)0k^3NcH4grXFih;%{?TG||NNo5CO((aFKb{3^vC$Vs)&BK)-(SW>e;cEieYrLOwchg4 zQG3z8G@L9+X6taR;v@<`vQoD(34eK%nrraD5rPReB$RLW7>6FY)+8xfTrg^$M$Y_P zK+hhfqFQ9SBcaEWyaxn6EwS0$c5G0*nE7^F9C-<7@us{CipB^F3V_O0!J=3lmwOk!%^wRR9HsCe?|S83;*d?+cp{R55%}Dl^be8WvT`=6Fgem>61ooUv}+ zmC?hFvZG!l7#=-N9G5M#)$yY&VUQ$!P`DV)p7f6rou7OexE4vy<05>%CP*@KB4a@N zmtLY14(pUkh4#JSdK+W(nc#x^y25p{E5j$W2h4Q+{Odm#;pfhX1~2gaD>YNuue33( z4Evv4^nhoK$Q)^?_p=ZWw?V3mC**n67x#=Pt!HHt#Ej(0lfz}m4g!gjY>55v*vir| zDwes|rLq`F!na>y5b`;YDqax4g^E0dDqIo;0^+DwsG$ zmldc%D=IRufR3hT+gk4scrF9UD~bfOf)ey$ms0i^%=|uOP;Vq~?E3!)55&~UaBj6dh{XJz!ziCbRVuMwv{pfs&r0zh;^Jqv8|B|0zf z*c1FJr$o#oe6K-?LAaIB2-E=fSSoGVk$IOAIvy1X=k_Lc>#Kx`p13# zhyr0XGZre&Y-bV=o4nv}B8AP#Tt9Jw4z@5I-(_UsHJlaAA~>^srO4|xNp8n#HAC(8 zJ7{Q6W78}(QAx)An`vx5! zaY{+W$BOWtBo|F&c*5dmy@Ao)gYh`0JvC9Cyd0H_G-o{@!oEi9w*Zo0n@0^do~pmZ zq9qnd^owK^HZBunW(mRff)V>FWRyR5I)t#UlrLiQIh*V(T`{gwpDarC1N_7S1x0>; zws}MIvg=s$CZXG+c&5XOqg}21Vf|+1e>FEMao#yXSP_0# zy;oljdrx|PV8yUIwlBU*F?is;AC9^nf$bU z63+GhSjE*fRFdpUE01bl_p`8`H*6;<@gG)f1jJC(6c|Q!Q8Z;=d&4Ryj6cVn_=Sl( z0U-*A#M)hZt8(^fF+W>|r*N@KA`oU0?w{nK2y?M|3r7KWZ($M&E)^3B`lrh)q+4up zx&&kQ$e;HZdby1*OnB9opf$oG5xv$WHW&<5c`80r?(4mn4ffoWN?jP;ubG7fTcQI+ zX6nv9RkVGL;f9pB!oi6E(&7XITq)&NxKS0oh(@M6ypvtl+!AoO5N#C6Wy^`J%CQNv zJ?-EjE3Q0YxlrB71cROMOr?CYr=>lX54)bZmy2`R(;Qla$kc1*GPl-wre@E6aS)4s73oxAZaS zL+xHx=S)!?h{wS-ySu2ZK0V{lAVrZ{r(aewIvC9a{^U@$YAT;epvjdO1^4_c%FX84&H2rPwwo(Ke7^I}IO!=bvJRDaKt(?bIL3 zxp{S2arc~Z(5opN@06DvbugiAMrf#ehUIlxD;7?>*2(v}i`MJ^FA54S>s;;k#H}|F zv=ld~q`o})QOyGyBpD7Zl@i^6dsrpq7>7IsPXvkOZj)=b^nCe8<>W7pvDKMqI27Eo z*UdETHuvgyVdz1y#A97=3}(T0qfE>FoQpBq+?fI+oL>xi`K{ZQTQWr{hQAgqb$&S< z)Mm4kSp3`S27>&tu)$)XY$g^4iWuh?C4#7<@dW{S%g-QipmcMzV5SJw>mbgaTd-Q@ z7ZS%=E|Si%TKO;`;dn|u?L|KZ_e>=mud5M7UmJ*EO%xzQ2+%?jL=y4%L_W9Jj%&Gh zFSmp$5#@M^SHDTUE1fOM0grff8XCl=?qfnhhKl;;*u9#-J%2xWa>^ol++ zj6xKAxe@`K*at107#UxgzB*&{17<^9$tYRfTySZf(U7`{#pGnu_i{2d2MBC7t7r>W z^Ly-3>BF_|Fl@iJ&lVT;MxNIHXtH_=c|L#BbbA)JkG|_q+uDx(^Hu{%@uzF+#-9*hy-w`Tz8jz69_$f&&NHq2&fY-EfS>{fNd?OV&am;N4pfjnqZ9 znLWWlGEgXCS%ZEiQhRjv=SfTAI}^G){D*di?n%fj;wOA)#TtxAJ^V|Nl=;|oyV4ky z+#03L?oB$~ETYVrY$LRk#K#201+j%FtqEJVGfcqs;2PN;X}$6pI;7a`YApcFdJVbqqr&+^B=zn9iV_)iVIp}?0 zJ4EJiJC2U-H%y?E>s>d%ylI_)mKA>^7AiHvK@)NhL*W#ge85NI)I$Y4^utAIce8LgR$8X zT$j;S%AU(gV(0zW8~v>ulnrpRD>#Qu0degNru4-_mp^h0pxqs;N!(tT_Q%o;P_O+} zuL{2XKpZf85kntuF^&%h8n8#X zS?B)-S>Ilk=|ES3rzCv6fNa$MFZK5qCpQ1Gy2<2llzJyb- z9HVTfNAJpeqxiO5%uM?gN-Ik~+`do(r^9!{LG{+0l&v;-*n<+367oXN{hg@Uv7Xu^B8am$rp}EQd;p~Sz0#8rX;M{kCb1nhy4*n zzXL{I@$_QdIB=r&HYf@2N^cbIwCV6VZo{mlOXEF~-!HfQ_d?GN#pKHc|Ll3h^WINO zjxvk1IutV4Xw_d6;$OlPA%F{@H&>FORX>+8ga&L-^{4AAyha#j=nBZwDU~QzVOq`L zgcLR&O7?*Zr$gP4FjXpu6d>fNJ3qZRlX#ck?;paA7%KcEDfrVcX_c4eGs87s_g*7g zB#ak;aRm>82+mCwK!?JS{QK#WH&P5~e%4T)8H@1oX)!4J2&Zo&o1%RD#c`tmu4?tEw0DtsucoDjjLTLU;cOx$L6TRQ z&#myHJvGB6s|jO(B7IR&C|F`-_O9TTGx5g=QoP_>^Kf zn01qrU5@u4agv+tgr?W^gYimX(12W5l*5V%&JXa^fdJ<*Br!2DBMM6z3Sx4;^;GDcrOlf*Bi9mzN9{V{3}VLr2+Bv0i5%4-S>z#z*SrLkoHilRL>x zq!7`iiX6WARw#WO>LRnr@{JX;j!@0*E}6SbS65dsI|W?t@OV8;0yJ*ERiStut>jol zW-{BERkXSL2A1ivEAQY^-jobH{SnyQ#Y18GZ2err9$6HOO^Wha`?soX&?XidA+d<0*t#gE5L zAC6qhRl-tucameTLpM`tA1d2{8awsBCqk$sfGG!c!)2<g)(vr-f$C$FUxOQN^yU$lMc1!Z+ph8+`(+{_h}2k0 zAXo{Dus@u(^_t8rJNO3#j?OJmaqJ;qK?tzNI9pJvD`JMg+E;`Ih8%a%jo9?*~^bf#cM>Y(3Ss*`rGVQx2-B8S(<1IeTv<0O9|m?q8Eb3HuXZtGr;CBrBso;?|sI-Yk^^`J*``3Fkjix6m1e=Rp9i&t;?4c>S&v;@mY3ofro zyb#Rn?Pc_4XE_2Cz@)b+;&?vY(703=Ov=1+&EbU_y+oeD~W6( zRd{?(b+p~>LN%Cukj|okPcdIk8%(+&QKhi&OAVA_MubB}D8ZMAE3*^iPg8`|$M!0x zL>alSgIkU{k$4DHryOz?b8xsTRcwpe>M0%xy}+7S-ptk~-(Y4@W`H1qla=2Z48atL zfXzGRC=)28#$jhyuVEBC{?WT;9{VGtHb4I?o&5VM`zB7k6x5g9E(Q#q`yg%LLfK^F zMgX1i`go6m)7n{Dzntz(`R$#YJGsZrnitTq+~Z?|aoBT@+qAxkhhE+*GS#m!R1O|W z4$p@{CMge9N0&cPx{My2K}d*`+xS}AKd70O{O=L{(g9Kx_nL1$17~Y&)?%1=`Igj?%A3OH9&DD4A!3TD$LBE z&?a=>cK1@^s*}#0VB<|i#?xq6n-K{Y?wFD=_9cO%<>rg;e zk|r}fdrqmS$e@I7EMJ=?u9i&*km{T3M}l?g`AE37X2|4=D#&lA5)!ul8B)VKqw1Q51&67=G8JGN#&mWA>E8(*uYDk#`anz%&P?Cq^(4zrif%17ek7**ctxs}gIFBm zoj?MkMF?Se#k;D1h9YJ7!8xdEQ@_P+?8HviS?8J)88e?sK|95789KX!6I`iyJURC^ zNq2W@S@85E?IR3GdWIe8?Bb%g>)st&H*oaH(&{YoWyGEU`tl5)trV zB*rSkLIc3L#Iy|47!&`?oK7Dvg%Y-FoB2m8<0?f8emT!@QU+7-gzQhm)3-=Y{q}6d!3hA*w%(bNM!G%m`fM~F41}g{E#KU(d?r<7moeC zZufsbuYs_P7d%qoOW1fsQre}I)4j>Zd0V|=a5Gs{YagkP~*|BPQXsj0NTcC1; z;uAsA1*uNya6SnFT|cVG29kUzWIOS4#~x~Fk5B12`Xp#~B~GRa$jZFBOh;!rw(qo{ z)3Iv2@m4d3{IH|^3J~P0Ee8Bc3oe;eiYfOL1tQ>6!BaCHD-%*P;@*C*H5jh>)}UPz z<{R)2x%`rOYXi|?Gu>_KyhqtWn491Sm3fH#1x$LrpPaz|lA_yDjUTULn=aP+H;z7K zgOQaU17uG}Dk;oinh~sH?$Pj;){&FUTV7OC7uSiDj9?Yy*nMGG!5zT)^hkc8ZkyT6 ze(hzp&3HgwZmz1*=dq<)f$JN#*3MjVZ7?q>tWK28(2k+pw47EC9B0>v-`CAAEM1wQYRF_l$Gq0EnabW;+XS2UE0m67ln+Ej2;tLzzaRk|Bj zqYQ)B@NNq7sN%dDQ72z4-)3)3;z%MXkq{PpTUNZezSrN9ouqy@hrmI7k?`_A3R#0B zolwob9vd6moU&tQEH@-w)u;a7ecZocr%VY#?Bf+j{e47@DoVnek`IWZiuFLA|22i5 zM^yCkv2h&t`5k`G(Delje4t7&}>=*54Pc4jlfk;C}NVzRDNrh3K@WU+&BY)Pa)7e) zG6o-<5FpsV$}kfSmHBKt$?_XIkgF{A?JG;tolEu}R^a@u4MWh&spiv=&KQ{u|OQ zrPagMMDyNux0Ag$=+#J8uMe|if6f9}8DRg9=YT_%f0t?0DU5a05r7JuF^r?ZNzVFx zu>Z%=`1TgKPBn(Dg_i5B(=A~o`!7iQ>)Bxk$m^1rQamRK_ofq-3;~5X9$Zqp0GOh@ z_-&Ll%g$QF5EQXa@|CLeJZ@$1$R=e{Qc}a)+j!^SuJigM0?m)}cG0TlNuDSu3gCuS z@FvG|?-{@G7^)-A&+GMi6QR;w4o~&s;4|lb6Ggri82Gf5XsjGIZL74PeRi$E!(L(o zqg?%iDOuRjSN2%v!x=G)KDpXUCKf0WL-~=?JjO{zAMJ~m_=!iXkk6o?LiB29yunrJ zq6`X^!~af>|MIt&1tGR{BKYc7&*5$y#s1;OeUomP5i~oO@5T+Eqv-X;!bc;ZQ6@3? z8-P@lrK26*A1Mziv3bD`sBA6u`x;AyddSiq|ogE3@Vg@1?2P!kk)24sq%}zYf z1fa4y9_0|B(47G%*@sM;P}=^obOjqE?NmYdW-SVcKuC%hD&7#+a0IUG%WcGPQ8U_q z%3za)z%8(+lSPP+eS%sp2Oc%KoNk|JY!Q`3EAanFdkdhrqHJw6Bm@gV8h35n-L-*k zG)S-@3GRg8?ylXqTQ}|!Bv^uba0!y&PJ(NAWaj>N&F1wQO4kuv*V(l+<9oaK*%8?3aio?`b&3xXSx%`qB4eS1bF7ao)yz8Z|?3-PGsm$|L9{Hog z@}h=7e>|0=HN~HL1^=+m{?nxt==&ohf_J;|#OeR*wF>K}zH=*#j_qev27ddAwm0O; zImjuAU1gDxTFCiW+>2!q^4T^qvN0`lzmuAoB;L_~fAt!iR&) z`d*HfQaxY!Nk16@)-_H^mV~qZSdKX7jLmZ_Da{Xe^G?k;DpH_CT*ge^FBAktXO4^X zWM1Ah27|IviTozCb55fkNE~u329HTIubxF`%r_VOSoQT6+H>0+* zkz(R~x9Aj+zT)qwwQ}8y+kF+a{K@?GBgN#*>!2DYY-kMY=7&*ozYO*-k12*IGhxH= z&jqOnsIAcxG6dUB&?EO>3V z-1?mD!*UJ<1?j2eUz zF++N0xeWxrD{7T{YE;i6^h>3`Ye~l?s`J&bN`t4LsVBY&j1j>rErc<%2u-}Ww?JlY z?rB^ZlNY;RdeWIct|H1c90iLbO!L@NHj9itC3*p*1C>d4IiK8F|DyI9`ium2p3Y)d zdTzu*9UW@a+EV$+7KzBY)aMm-Rb&$V#^al+NIkePS|9J8zyI$()@Sln#<_bdr z|MLb%MDufq=ot1Xr+wh>GFty>-9LIF<|KY*wVjQ{Z_eJ|A*b`7e@{Dz!qIi&R;P~) zV8!LgqX{Ha(_$Qwew^pad=j-DoGC>#CR#Q$RprmF6$tPd1=NIg;jNcC-||%xeH6ya zEQ4ZhKPhU!^QlsrKIQV3&(QwpUXZD|+7Qn1^!qdsjr!~gGa6Ve4a1O`60L*k)3X`g z=cUM==H~g~80OJgyK{4^tIJAo7uz zRXxs+t}KPq$?YO*D|XZBL1rPoe9eBDo=I(`k}4Ud85{Y&{VHYjF>Ksd1fP_@AH3`$ z?VjI-R4`q^p2^|?{dVNV%z8lJfq?!tv|!x+wEGdP3BmTcT`br~q|umgz3#b7d*<~6 zf|}~j`h&wBFejxo6JrAN=kGZ&w0g|UpIZ-rTHM%On~$zZh-@%dR{9^rp&Nl*uBqv4xfmCBx+ zaSFUx{cRmeHRl2dOeVmSzRZ&lVgUvE$@e{W4Ol6W(LFhJFUKkBIz+{5d*puPLdoPI zyp>I_0>h#tX?>C9c_n(t(9AG`NS`ZC?|HTNQ;-`p%;@Sy`j23|_RUKJZehrljYYJs z=cLPT(&%eBuXUgISF3*S3uZ#)e-BlXU($fM`PoU?F$uXml-7_{#@@3DPcRMCuImjF zZ)e5ekAI0(crG$O8C;+^ZtkG$rpOGq=+WC}2Pj|-@+C&Q$zX=Gv!kUnRCe=Qf1m}u z_}EyH#o`Ec_CyQix^VL>*HUoS5GpQKM}lEQx}e6OzM2SerBht^Y7TLqfceY@6(-Pq ztFJmU`{5Ft{yJ+_Op-I{a$I2JYPU?Q%+JLr!p#H%vv8dE!eNh%m+j$OVLkVvjDzqC z2XAlby|mVP%u}z0TP_6$pntP{yBT28^G2Y#XIyq&9_Upx5v*uj{vKsJFrtq~5^q4t zFf;*zhNN!(1-6GDV%X72M1ZiZQMw_I+4qZWWtmqVLlPhte%;%FLoWQ>Y1}RW6C|Gz zpUYejn-g)}(k;x_K?oihaaVq(?#u8C%`Rcunw4b;G{@);{jPYtX1cDJ3u!QVb2{Z< zBDX&>4r#M&V9I#T%fM_wwnl44V>y_dYKbU*VZ!Cxmw`74It?s%a2QEiNGzzATi^BM zWi2S*@bTJ8_iPo3OgNe&37a_JYWow%7x>s0KFtc6C-m1cc>k>*KAv>pbXlVOJ*hj4 zJa+$c_``Lj+Q+sI3POQJ8uYDy(zO42`E>$$_Tm=%*-2M!JI7;&a>9MIkD~2uTeKK` zai!=}R#t|ms>udYA882;vch;r>HQK#kx;Wc?>*&I<7bdXC}N1XusI}ZxujjCS)^XQ zlj)oDKR;s$u<8$g@e)~1{!5Tigu0d=F7i*#pnbI`zhZH5N#4Pt-r+nSih9NFZZaCY z=d2tVMok{+5Q`?$|2{u9ierS`z$3`!_1y{a#f6!7C*vD$!wAQd$tgpa@nWN_z{RB^ zfk{p`oe&waCQFLf!xcOxrxg6WO1^A$(CkM~p@R(5UXK|DJ}^Bdr#h~^fZNLSYq+0y zbdFl_9{(b!5T-7eV^zSH*OyTiYIT0#*^KfMlCT=&EWO01m*gJzg&#eL!F3!xsAM$l z?Z9xkloj64MQjSDLrrX6Nwpwr=Qks>34wD=bNs$ zoJ3CtfTW}^#?7<9+mf9|?ZdG5U^&S5vf`%N+>a#4ZquRYSSrGzocCmPA7-lj7DN*I zH(rsWWlf)3mx%!tRw$r!$FX2fjG$U&2*Y7)SFB~S6Z{80FyN{3DI~-adYlmT`K*|< zjvHQ#d48tvi-sl|7gQk#l;=khOaVj44*qndmok}IAjE+3Q67+RG`+C>y@!>pxt$+n|wX>qxAdT zm6R?;m|*DN5f`(LODW;=+as^6G7Y;^W5uo7x_{?`p-9|*Z>u>>qg?H`wzHBlpvg-! zPV;tD%O%ykU}?3J;q6m~pGU}Sc$eM={ypmN%kc9~<%e(e53J(eCz9}o zby4f3z3$m)bl7p9!p-$)ZuBB z$S!u#sLK;B_kWtOPB5Edp1?Yvcd6jVUQtR)i`zCh9nR2W&_IFLX6Mj&aHZRj@V5gN z4xq0Rbm+aNktg{0HAZMFXUiLms=Hm&M@cG#25Owa-x^F!VTg?{d8u4Z6){$cwv|GK z_L!U(2;F+Aq-_?m2}(#ao*egjHPhFG2)77OxLcOo3C2n+4;}@|)3t2!qVMBZy88KG ztu-#v3r1WUZ{kV7yPYoJ9As)Apk}ClBV`Pcdo!tg2#s(M>P`tjeQJ>ohT}_z63cpn z4Sh^TF~)F8w$1WsRT&Ds4;0X01y*++<*tn$(+K;MS3j8tRn=o9oU#=iwu@{kuAFY4XM%Sr{jw*<{?%{$l=OZ+xBI1cYGtZ&snfN%Nq0=j z6n#vEua>6Z6k9-e$Oihl_~nq>)Z4-lTtgWalep)!Ff&~9gWb!X3ic1yLq?@RTDr>V z;UBih205K`lGznFhgmf8Pdrq(e`-`auCfT+=g(T4D&29hKQmgc zB*Im`Vv$N!PoJ}13(o%rUezsqJB2B{n$!VV)~PK!N{3m8JE<)O6vfBJyvCgKJ>cw| zZog4k-7KE`5^%{OT=nfowZv-2gwcxj2~lz(OS|IzM{WnfN9hoFO#5?Zf)05__~Dx6 z{^~Y$_5R3xwX@#)zh%QeEdT!_F#az$yVOFu!jmIw(C7h4tgN_&R(S_pFclBxcSUqc zszT&@G2=9HAiqJMNU@$%0Mf>fdxolBA3~Biw@k5*K-sY)i&PPwE$~ta%SM(L?qn!D zwlV}jsKbyU?n{r)TsRtt&vTd`+c@OODjym^3E;;q+WY{WE%ye~y}lXl6j6IP>=aYO z=ER9Xv*UN1B+m!^hOs4KNf*g}*lS?O+z0mVyeu_qDBb}=Lca;$zS1tdB^t&t;2UpG zWGiYg<6AV~jq`WZuXQ8VWKZq0P8mQ0jh4F%A@u781MexY_KLY1SnFZ4aR;JN%-?Ad zOm8VVEvAkSbFGN|!F@?XKn|KFE_dwV6yLD%-sldb2CNv*9=kpfew2ByWw_*hujU1+ zTC3K8K$)mHXt7~;YYS&PK3`U7=+`seL(~a|i=OcuHd)bZ_QSkGR&8Om0Q#>Zux=4!g5kWH?6P>lNZ4 zE_*1wZLqIJHv-2Q$5a}1_ufRQE)CO{A;(XeW^UGhy}Ri{4H zZeaIqjT^)b8kA6?1TegQT+X#6cctA`qw|k!2y6np`q<8>mIGhC+Y)p4j1gKd5c zZJ^19e;eF+C*2c8QWwr4KD25DoZdldde_&^^M%X41S|g?{(@|Gi`I0GV}fZ~IYW6I zDCyh=syd#sn>7N`0FsVE#J%`WI@cndFTEPY97bd4ARj}}5N(9R>w;L3{wptv zY@=)S)ekDh$pWK#&nz0{7o{=qG8=}2lc3e&ttUtgnd(WZy?>tioO|*#7a&-KQU2mq<#4q8d~Lyd8XM{ zRF|Y%-gD9;Bs-RYn-o&|+*@;&jllfP9?mL#Eu?u8*=cr3*__t5GsAGzan7k2o2hl% zlW(rM`iUL_?Ml@ zo9^fO0O$$;V2)zK?+Yc5vQvAn8G->JWa9H+;H`TynjmFEpCJ52#e>u(F{V~#LrCs> z7c*(U&4n06 zA)t+)7Oz@~$oePpm%mOG>hm&wvPz?rK94k`p?Qug&x6~cWoANKtD&E@b#EO%G(F{H z?Q1@~plXQ z9Nt*c3Nd}Gxw$#K@$*@IX>7F_AkRN-fyRsFe}!V z1KQ^2rU!EHg1B{!wsop7rZ00M{|n17`c{{P$*0??Np7wTgN-=tVMB0wn5gw{OWLVQAFpd6hoSR%*{^EDtcsj^7A zn}%${&aV%)La~lhUWe5iztuk8| zN5dKA7ouviZg5H>`(d;v0VCbj=4f%j&7MAhS(6oCc0Si{NlvqJz!z{MrI`u9_` z1qOb+@a@-$F8<3$Q(+R7QeAZv^Mw^e^!VmHfLkcv*5j;gNN1Xq zG6vB^YVK5epsTUnz&v+&Tx z^t`nmfC;X)C^eIJk@KJhr!;uv>#56Vj(GWGV#0VS&Vo-^B~sOo{KwG zdu#^2>&bS*mJg@#pdC%%Yc3P@gGI< zyEony4bQ}-Q4hrMQ6{5MX%GfGJ@^!0NVHc)@2qsNxZ`}l_Qhd>edIw&%rM}r5C z**1PDMEIWHPVQqtv}<<#suigiW(zS)fc_iPRCGvw@EvAMe2QhIXet|k_iI(8mez1L zQ{!U6#`iPU&vL1)8?#Gvqlu?cOvQDvx4ayJ0+M-gU2Yy1zj8F(y%)q)kE(!_G`1n*={e} zbkyB5XGb&+`~8S!9e^MmEL|EMWf5_Cpt6uk@5#ImXkwC|cG3E_(5kp?X_9bXHZaib z-6A7o*2IHLw(LwXFk3hx$^SIAfdwM9_-;0;p>DRLUAH5I?AYjj@OC@(C5tF@FKa*C zIB3M1TF7E1bDny+Azt5GHbUbds&RU#8LqV5B9rHJ_O{QxujRdfGY?B?NJxXhT>PHjv zK9D@Ao9~}&%>T{hm8IJE!U2$+Ad@RGwu~lQM02NaLTi31LAnXbNOT~Zw0OF;e%%B( zC7mY4isM{ETy|`p)f5ve3>gp!(o6@}g_+SBE+btdbHXL9RJX7QfNBC@*aSRDqrW|e zF?I1nNbB$=f-+p~rgKU2^z48YnUH4}v*)YnxJ#Y-zovW2>q{DNX6?{+)inKi&3w^&mrNOa470kG-H7dk*b~v^3tcI3)TwSCHA0}d;okgIR}1}^O1Deg zw^$ST-;;g=K?FRw$5>_zlR(PsF_j7zhXa*WA7z|jbDEeiCk1nVWdPzpv$4wb=XIn1 zpSAWq4yQ)hMFDH_+dq&2|C@<=9C$T>?t`DsBQIV6rl!Fy%~i%+DOCuU zf{%y}>7U+_`L)v2wT^JTB|Q5|48TNGgEc_~X*i}baA-geGW(aYXz@u@2G!`AS_MT| zbFG4MOABbx3j0KzSU1rSQp+iL^T=kn6Zp7m{Ve) zqwRw{U8W03fw@SmR?agB$1=lK8Xz>KQ(7L1)0pR{N|=LkT4bU!IvgjHNORXvXy{rj zobXzUb$IlmRW(!TqNMV3)t4UVjz+yC5`c44sElP$$n!|iwx*;iBC`xXQW@rtpLw%y zhq9)_kX!bxTh*J^?XYQtktR$+qw%9){ng}- z#w~J<;meP*3>uXOuWsGK?p}uzxdu}=ecR;yMoqDMJELy3fPn`wJI5Bc+7KfMFPIE` z5g-w=WVqb-!J9F^JSn{+xQW2ti|yBLYPe%*?|S9tw?}0?!T66^*OiH`%izUJ)byWO zfS&+YOg!onq%vX&Z}XIj)C{hLHzGV}7GyNg_%-C3bWn)tNtF{Fz9ELI>20XBxE26T zvd+WBRyewR{y^anry+}^)C`0#|F+Q(Ktz7Vj3PJ{5Czal;jejv5=od*%YdRxsi#aj zm<90q0xU4sKOyy0y3T3j53{z3!7+sCdLe2eTRe+LM9y~gyqzJ(WRXkO)bqp8g?#36e*O|eZ&P{rRKM$bk*ziT4aU2 z+~*VFi&C}iPkSquCH@2$u$yk35ZPR8JlqnN(CwSd<#8oG#2<+-y+^=9x{9L_k7e?e zbS+N0mQmoL0Gh=kyobx2aXqAj@0B%ev7X`e{1Z&KK9+c%|S{`qy-ykzQ8 zx})l1MZ)K+S?@{Km(xB-BdgR(v8zBfZ}USxprL~cU{^p2y zrvl!K7!idsqaqtr_ERG&99 zlBhZqmZ6*U`K%u?dQaOpvf@&3^3$xRt7)=$n4e9U)N-z2rH9SGrgvwyI8P3SOL$G$ z96Ql^=CW@SE~PW_G4@$SshY$ay|NftiZQOtNMg6~A<)osiSoPKph4`O>mT<^up#V0 zxV+1@{)p(K^XxLPQIum$(_-pV;^IN4uz7~`h+hNmOe%gq187hZ@y3W2#|sG8X{0bQ zYw)IPvjCqsAdszFJ~(-^AygWp|LEUKa!*qtkJd`P)c$Rg`Ej5mN+6XA1ID{&!m_U2 zuJ~Rcnz~|e&DUob&nJg52TcOa8@lMBc3LmZeAF$-=`lCWC&X9)f)+gPCaU~-xe3XZ zd6YYRU^W>A>RO6Wp}3XW@N;hy;cI!Nj9odXZZH-DZum#536CL6RgwDdK!Ln!;JQ^@ zYuqIp#upSCYsyAlcZz*^H##?BI937E!~r(T99grT5RZk3-g`ulLQC)jv}BphyQbDBvtrZ9!%=}{gH^K}bLm;Ti0hF0 z98KFMU-y8|lr1HSmmFhPD5^o~{RX0O%C4w$!$kG)*F+EZdOT$0X$o^pS?zhOysp}a9)*E`}_Vtmpy45dKyH>sF*}2(8&kxSp+FLBt zuCqJ(!=5*;*7$Bk$&qARSdTT;7%*|He2@UNPuthpP2TvBxc0Xg7#&Xi$a8cCk5{|x z?s-vJPW+@u$ijHdY&==hP}xi`yDPe>$8O@!c4MWOzLZj=!P~6_96#64tmxeaG_LHt zVOhv5*j5UzW4buO^!p?K1CQ0d9~`h{=j0Ua@9!T=M&#uB|MtAQEcsnO_qbX78>ZZU z4|(Fa-XB2SuOQdRVP2WeT>k?5#@&|;MAhgDCL9W&3zYw$ESrnsS(L9C%9a1yEJ>wZzJEI1FgRtN@Bw_{$j z@X?C&@$R9x#)i-u0pw*EXn7|x+DGT7Fg|al?V_n!Ff&K!dkyE$W@yNt$X!CNQ^e!E zHN;-4#^YHQ-$U&765QSg;I#H|51TlzO|&q-(TK9s(8oYoe3P|MQuR~+9k@mvg?6-f zMA?iN%(3nIgRv+$HJEuO>JUx~%0BWG0ioVIS;CiK#B1FH#Hd`V(O*5heOL!%wEA^%C1M@Yw z04>&6g}6?l%%xqL3nnEaPX$;8Y3BjltBfDmZ->vBGP!aug64wT;57+q90MlZZBQqx zaS`ub@iTS#9Ns}`SP>2w>vS60SW3qWbs_t(#?(5X>?~VEnU#`Yt2+CFXMy?^_i$G1 z7;X$bVWmzh{RJ1=uDq}t?F8UGYO|wzsI5VWIHQ^{#19j;koXS!1&UjXiq)lga zKoT0EKl##Imp_YS6}@_z$em>`4qx~i`#M$@QM!-7fV{}ueBK3Ub?wD3d~$1yqs81C zh`wf|V}MuK``3aZlC$!5WvR}szYKTzaD4gl$COv$9gK&Cw%gVnZ|eHr-K8XxDBj9R z(Z<4T&a>%iE5}Ygk%wY2BM7#eC8Uwpm(G?c+<1BL)Oj5~X@0Emv=KJ#!0<|Bk0y5M zCA3nnT)ksl-CXXAzPuYWa6sLDt+?MX=IG#kGxkRe_~jz6C@@UkCZU-o-6Vz5f08<%`aQKf8yQ4q zfXpnD#0(j_E2n4W_6cj^>5qa#sFKC*+S7a#X$vWf1v`z_mNETj-BaziBS6#;26J?c z&-Rf{<@|uSMLddr_tZI<;J{N^v1O#l!b}7iK3=;vat3P^G0{|?1pTR%Kfu$4^E$Nh zF~{XIbr%>%T_EQ?m_I0LOIAV}FoEzkFA@Gcpk4$Q51Ih{fScdvxGU}zK5^-^93Rhd zq%EHqgd|dm8v$o^w(my##7Of zqrX8WgrE3ZrRQEHK$DPc;(nfO;NEs?XCM`cX5k5D0%xNlQ~3c{{$3{E92Z{LHEu8a z1(FB5^faLIJo;FWUL29}P_pB?XxUPo>TAHR;4WcSZnO(A;>-pFGxo#7??$u0qPJMJ zkHvXAw14MFY(>N+<~o(HTiN@ynK3!-J~sTTV`C1UVWyp6F2F8{Fa_^lLwuB;t?AFv zg{xCx30FyADHZ4>OZe8A#j)5sw4QAeWtJrS?i`q;Q%V?wmcWl&!XxvP=)JBI95+H7 z<39>h)y#vLf}UV$j$U);iBgBzF9>x#DXEW-&fd9AtVz0-dNPY(x}aNZJ(d&Jo-~B7r@h_xeI~th5E0;EXr~2WhdeyR||Nr8c{GX}nu57xW zpUn&CxO<+qJomtAc`LC|!Q!=%W89tyuh;0K24R{{3%?na3Hy9Vt*Jx+Q;Rm>rnX~Z zjp^ecbTtdtqxA+WLq%)p*5jkh3PS5n2mHU@*}!L)Uep2De_5Lt`q7t{fLCClH74=Hprm@|CPpzdckN#d(Muku_Nkw5_@eZ0)Kr5&~V&Bk-V~HjMWi20*Ho^JD zDyx}8v>+dZRi~`kG_pjO$)u^4I6EMym^M}KD92O^@z$+#2VMzVbskver`9BvXx67t zHfoQpbq7a1C06SB52<{EY~ClJnwHEthd1XyRazb)_#t1M2TG%qGTO=8kweIL~^oXATr zXjbq}sOM6#AV0!>OcY&n$ZNXJeYDV7+l{d{$%8>&2~;WMy@iLG-2|_oByoIf1KLt{ zc8QvD#_XNY_WIG1y8Fq4NVe3Bo@H_zI>j~~=KU}F;tnT$XOy>f=08$O{JfCs&VSM! zzX?)|e-2lA+Kbd8uMr8ShS19W7HTld{$VqN`YiO@&eWv(;cJAjNAUq48fl9eGh+zq zNO*c(-%o7x7>_*OlxVY;<{zN*`R*CUPT(VA`3Xyw{190N-A} z!C|1XVnx2|WrT)u^a(AL$v7hsAq$wW^X35@LjCnG2&PJTDQJ0dy;B1US#`fE+|Q^< z(K9hmj8eM2(Y50Il*Vf5s!^0dEa?EMP4cxl^eCEjsV0(R77pXrZDXX1e_X?wTCtzC z=vsRQc+y&Dr8VeB4zRPJYwIlA%qW1KxFz9)46J9a@7D~73ENz%-k8@0h1W6jpQ*Fc zD6Ar*uhb=??Ilw|gg4dt#VFGKMP_&2R$QG$S{<`{CojR9vG;h7ih9!&nslHNZV6c(-=xGw zH07tYXiij~8Ya5ogbw+ubei%3RH!Z>b%W*rMwIFu5=~?8M(|Xgf+rf8I+Fd{OF=NT zbKMuFb~Tm0EE{~aX^ziujCAmQlAHx)x+x1Lbd&+H&3RMe&6X^F3h~guyi%k=4`l*P zFB;*u%hC<<;BwpgK`7gK#?N-6s~rYjO@&;2@I=H&2sm7t-A3RYh2r-QO?T(F~ZtV1%r77vTAKc|5mFngsnRRY}~6mVuBwVNTvD<3;yPj*Sygd^Rxck!2piiVF5q{8Q+L@=_(oe%Fz3t!>b>W~SF7gId7g^ttTp!yX%HdbCjI3HC>1Oe#jJtP{1V3r6YaIG&~yYd z8jp?+n&1FvLs3Rons)CyC@c$hEl<<#x2s&I43B~6e6vRb*OL@XT13|HW2fBbrJ2v0 z3A{Wea8nlg$ogU?&IR&m84J|YzfU;$?9CT`6P}yW} z$9Xh3sO{xu#)5b|=#2We@pxB%_q?YxHT08Z(je#C0fc9kS5C6@X6TUPzw1fg6V=Cw zr)Qvu+U(E~ zH-HR$2I~qiNGqcAbCAoWAkJMeH0y z|3b0(fFCI~33|hE`~LteKQ(E7s+-fc0GuFSei*67R2>X}nV=(q^MTEHPf;$PO-3Ch zBT8Tcg@H+IxXFHE&aCCKJaM4H4p~5fZFoYhf^O;H6S{YWdSiug72rJu6l&Ll0hYoM zo}s-$HZFjNiD4vlTQx2?2_$bKA4mNq!drZQsP4sYbnMo%3gWA9sd=3PS#Iv)Ee+A8 z{({E7avib7VGl@*b#JIT!3l&{GlkClWU!6i{H59ZJzQIQO6#M91hX2eF;YJ!-ZI&k z0*LP#Ef^v))LfWGXH}*XeF3;{M#sQqZI7Ty+f8qWvPBBS&hmbF$?S51<}{PhlgEw5 zAKnh)xhZYbKw*lt(U=mb2&M1IAT8v5T`i5&hgx_|g20Xty#Az~LU$AEJ*z!!XqjJ7 zW-!6!Im@<3RtFPMRO=-{`i0NEBitc7mQbPnIiPB-GSbxEo_cN2$*yTYqerur$Xs88 z!grmFqSIQX%1()N(BnLn9HD!*%*vZuL=gOaC|G8iz!%NH)bKAlVin#ai#?|{f6M!y zu-L@N5*f<$3(_m$Az5Lzz2o`|7C4+(y111hF>4@|#axn08gpvWO>5BS}}2-NR~g_}GAvX1*pQ1krXL z0aqtwJd%l_F|s#IkHt#nAvRx-mw{d|r!$KRC%FpL1l>q~+FCjEG zX%@_+=TtJ4c^x4 zl-@H^2x$q$Xe%UokFPR8nKFxQiqzS3a1tg+YIEo~=;{yg_krVWE$6;-C%3R}(n0M> zY%$AW%XNW8G5yMPTo---C;XtGjWS~dRP<^^d0H>d{*Ef8CpVd31mo%S8;cMr6-4 zIJ1|tl#CJtygM?A6)v@{pEt|uAg&pPK~4^Uu!Lk6PL6&6vu{#a#5&9cnJatlM~xCH zD6{+gxLNXjq8Hk4xTb9eY|qPKtgBhxZoFI6xr`KyGA5W^Pof)-~K8r6VvOXgC= zSf0>HS<~sQYm^a3_Yzm(p5DZobDQ-L`rGWcrx&-HCD_yks+__&UDYF)8@saSqexEiYZcYpxGY^SB$($C zmJi2+Bn0WWk1l3V^94s11B`ObnWkN4LXBz#hky-& z^_*_}a^1e|@A&F+=!sfVWZ>$UjpD}WDlXXZr6Lof1FwyORDiXNii9c_JrU6^ndyus zV>}5mQ@s^2BbSRhgKLEgWK#GO2<)77%4v~>OrGbZOx{>Z$W#XOYtn9e*w<@W7oXU- zXAoEu;M(%)i~Qh0-jZ2Ezo?QmZ3^`d3HByvmjuD#zeY%(>KuqP35lwRrU)sJ0!3zA zCV!t9+8h~Kj5$?)Ynds#U+EYww^1HYtU!QX@p>G)3tBg|D28u}btoJPcWAAQ&Zl-% z<3#W=%Wd~sD^wzYT$>B$%drivSUsZKj@2EIUf1ulW&fuZK)K`0UkLCck-%%897@_O zzw6D^j%$WPH&M{y=D(s(uhh{D?wHEG)c?kgd1MDX9K+_)oBt)B{{0B|=L5=z z%du(EM?U__=#K{{sS$Gn)!`>|K|(R3@6B7Ns*YL7ZrC)|OB>~2W%^_p9(7&v2^}Ma z3Hmx>UI`_x{LV!dwq#-E7%fLZu)r+?1;-#pgOYW<^_0%Iq|)uZM0TR875^17^JHeWk6`i?tj3Ug&6iLaX(#@z{8_q}?%O$pFp14gIp-JS(t!Y% zTC1sReUh*NWm$pjO*EY##UJnr<@Sn{8jbmAjYFK=99f&dPZvJ}DnLZVQSK?Kv#$D( zj{Hwv>iTJ-x!<@5-r!^JE4D+w(pf8LOl*aX zC<*%BFf7B%RuMTBkmaq@fmd3m_F^JrljBtT#y1vX+?1DaXHL}KZ#D*rZ?@#x@*NMF z9EGL?HuCE0fh4H|E3y$V-!pa=jdR13_lo+==FG5-(|)yC>rV=IKRR-)eGiQiRX1sl zfVpyHj>xt6M!O#FZ&p@*Iy|_F*mck-0SYGGk6t!y-R@2W*C$bmOJgLC64+XWwvFLT z>J*h7L-za0C=e2iERi;rW?C9KgA~&puwe-wyEqerECW)!$Gi1o-)2_yP2T>~FXJ<|zym zRqlEq8=#Oda2$P%00_pt`p%w{)tO_Kxp(ZRGkpK`$tTb#XZLC!e_*=oP)K zrunuZZYgZmHdpRLMydd5DEvq&D|bHXn*DpqCVTdt*3gi0k|=i-?*9<~Akpsei#B`o zF=J!KKUqS&Gxum&cA!PK==M@A;nlz<9to^(FrC@b3)&-1NXIb^!-=5G76i-ojQcg@ ztVL&R=lW~ZGhX!P`2F4xI~)j^rs)J>2S(&vx9htO(y0>`H0u~66xVid_85FGzPV(s*L6M}yCkQrG#X>jj{TdPLp5|znRB#U%k%qWe+)F0|8??KxJ*#nF=L` z#qP=iqfdMk(Qfod7{3xp2)%S^gwq&N(&cLp)~fD&wea?_YDGrWn~p2jMc-earUi9& zE4VkNqsucMd)7`u>rE`+lkTUF@FoVOwX{-|JXdXZN#r{JGTMV3_s?nLLOe z-haFGcgXyt_y?JW;(Eu!kI3u>JR%dC74((#Jr*Gt8$gv5xbvf3_^HChOL{=#dizIK zW}0&R^({}1(;fqxqVCE7?vCxv^EL=)f{cVQoi=%x%S?l{|GDd8+m(o`U@q-2`Q%`o znq^z3|Dz#3z|Ai&59Y+XreP)X^xXvjq8cvHQNlwq*KCqJ=rghsxFbN)xVPvqMWrwV zj>@#Pi9XZ9s|upSC0;jWaFSpzIj6;~%jrv!pzj}K7&1LOfr;bJ@&#cBl<+A}qV$xb zZ1~xRQR0#0Kzh7UihY_D=BrcIef5e$$VPC?6JUM-dlKHEXyp{mm83l1 zH^i-X{61Ll9y%J>*3A%mB>8)AqRSs|o5^VumB=mDHK~++%A7vTDR%n=ssAlv_+BD+ z=?%yjUBww98xwucEE~TzVL-F8@mR^XWiGM6Lgk|7`?_YOFEh$alwb#hVMr*UgmAd4 z0BVm~Ehb0r5;bqFqbb=Tl*1WI;dJQcyXbH`oj?*90=5=1D1Izr;W%zBn^bmT=0}T% zZ?epInXGPuYpdM3xzMbW@@|P9WfrYJ!mYO&E^jU=BI}!YcZxI=o__ued5`%~E%JMc z>~~S0+djvUEAD+^v@SOAk67tKUJ38I`5fHyaJO#qHzQn91nFTFD0zb`^yFWHBwp$d zNE8ANfRCE~5qTgs2OfdRBrd%_*zZ+)#lU$mIkY#1EpkUYM!_q3U6vw?~%HrRwUFtEm((LUGcfkwPL;a$%@p|m~} zHjLKd*##WaE0$K9XbNO!*;^kVV&ml^y`~j_pn_l7v(>b+cq9(GR(kHG*{&4}H0@{B zfm-l7_-x~v{&?viX@kXT1}_gA1(3?TIg;j=Gk)aIS_NVbx*R4#JoyTa-_B}i8ib== z=TYbus=4Bv)f5^?T3&D|3-xbK{aWb$rbFy7-@>uAq2GK8`q^@cOfN)xh#s`rg&zw>nX+dx&ul7_N9mU-# zry5=r_yH(u4t8w&Img9qSg4a`ak`2(G412m7VdZHbSCsAb&N2&<&Hw*Hk~JI<9tot z1*`TTAkW-iSldf9F~7U3c7gNj?cB~eA?*JoaEvSPll=GKA<$5P1~Tf*2Wl;?#0KDx zI?zNt96fnQ6Qxx8ax*;@$Qy0+4vk{C_C=}7QSR7OF}B(y`hxf3VFnwxVs><@cMvnu zN{KLQpKsk$q5^L)g!WdIjXatc!7z0%R9aS7#d8i8WZwV95|vYY_>2e2bC<+i{LBaz zNzB9vyir#ooVdrjHVb__MJtm{4PTxgFn(A@ z)Dyx>)6A7!gvoR<{`pmf&d@pjuYGBR*#AS?TfoG*M(e`6xVyW%!{F}j#oZkScXu6} zL5oAN;#!Kk6!+rpUaY-zpa0yOdvgEeoV}BGl9|cOmwfrwdfsO}OI{lcLvwUQY(5Q0 z>qYcO2%zI+dqAI2$0WZ{QMU|DwcFISztA`Zx?hZ2PBp%_?jdggJd@U^uLC`|cUjhW z`oru9cMqmS;Hp-xZ}W$b4_9iqFJGn3W8Qt~7x-O9xto3sCd&U8Js>?Ieh{RZG`=2> zDO&PiUUXgmEN(1?&?!vky{wpu3G()RouO+0zA;H2^W63|-z+f+lrr*Dw;93kj!=u8a4HmKw^Ao)v_v;%86`IkQcmbRGCs= zYav(4i%@Km$g1?_k(rty2KL?Y5(61K7or6%%5NGhzN*WzJ5}Gm+JT4og>p>zJQshN zG3UDxJxSe_i+PkAQx&l|qW88OI|RzApsiR%dHA{p#i(+rJfg==DYuWvQUT)k6wYE! z+bSnGrt_g|U4QEd@dYz|R4Oh|34()QP+xlinyr3$iqmG&HH%qLCr&AH{N0I-v?7LXO)=BRVt4Os{u$QIGfm~q^( zx$tQCNS9>06HXMR(DnJ0A7F4>rX1&!s>QfJCMF7|Trv@z+0DaBZa< z{d6hI+_&fI2mA-cFyyj;W`hz!`Ko=2=G={xL;TAzUiR72qLx{s{EtAlkCG?x~Sv*q-7~@@VW^lIG-aOun;7Rbv z`TuGtrxe|BRTljI4MhFSf)Z{}gH|_H%nd~FRlek#&M^?*0QJ-DiWId*_Vd}u28}1 z*Uu^dPbLqa#}>UtY&Gcsd8^`mvwRYvE)|>{z|APr+l6&OS;5^s+lX+J8x$v_)lKM@$cyq*32dX)i>WoVjC%a{0dKh@a(!g!IvG~6s~`wZ8o=S0q54OUdCEpM?-8L zlp5A2A&_A=VFsYIpCz(AO3RhtTxH??R`B+Un>!=t`~36$B@={oZ`Uk;czAdhg5lOZ zadCgREB=KvjiM3@SvW)POMUv` zdd2b=jgoZTXYJoN+Q-k&3?;~Nk~uky9Q`FtGG_SiM_k8*jT&e04=q#3Z>ZqpuWvpS z!E%f?DQ>3MoV>XckMhl_Ma;MwQHmIr)Y?-n(y9o{sT2q&92W@wf=WIz>InKxAz1_y za?+w`9d<<(UD$!G27^_W8BwqXoHNgcDc33IP>A_xukx4%Bgf*5Y3N!q*5MwhdV4ol zZW|@rByy(F_*o-P@otnhY3@YPI*Yn8)0Nd$#8M^P0Qq^;t@~LZ!TFhGC@nIpN>PVE zAwZ!1VvJ<4$=`2gsL}5IFqksxfUwnHPlLL?H+vc-6(4pb2lf93B=6MxnNlurQ`^k#b0)$4P?hQR*dqNd_C3! zdI~&XAGYe1K~KAdQw9|OT^MHqajDz`j`@)Xmiae1>8zf#9+`}ovP$_fQ~#_Nrt10Q zP$XUqUZ>i+xrP1PCQ3>*C7i;yK*AgOq-8bobIpZLM3PM}nYq9^nqqJ_Bb6`5#a&73 zvG)ox0N>WiThhm(>GXZT%2CvSI*jtkao2wL{0TQP=hp(o>)9HP*>$VZ8AWFjfT0AY zqF=gKEZ>1dH-1|a?Rnk&Y?u5LQG&=+@8q?Sn<7csiuZhFwN$G!{R!ZhuRV{Ol=H6{ zeb-dkG<78$mT=8!n}Xy%Iu;IuN#ExCc{8xsu{_&4{f9~s` zQ}1K+08Ap>HB^5u9$nASznA_o*mQ_Qexx&Y$3<6|P+JBrO}%G$o0$&EN3o!OK5N`8 z3wj+6QZx>@w_J7Lj+V-_pj$16Nv=>9FN7ZAQy{*rWNx*`&jp!-*CUtJ^A!yF$B5~r z4)f5_s^W>`okZ6t-MY)h9Z6k~nCXz=GlSHRqHHu99%XCkLno`)EuQw; zzF*U@kwA7VXe1JkDFsg+7Piys^D{8dc=XjGeXL>U1*nmBFc5=EI!vcZ+hKO{-N@C* zJ@$nTo3S>}wb_(Q;6Kv*(iWMjvAWMxe0Uo>EiubQd8T`@sa<=rg60KkpJ?YF7lBT? zrAX|2yktMN9@=Sa5V!G5a!*x-{Noh>7E-W4D`ZM3PZ{4Q$xT953<2*UA>*vAge9KGI#(H=`J*|Y=QVTD> zMS|EBURvG}F%GK${k>-G3|DD@p8cF8N0bzp=0S?x=nCFE3e?fnN*%tlylXCy*L&?vSXBxb?wXbZ+w@k_ zlLGyFmf4$4p1QogVAb^XptqX?9jlS;w~Y-(;_gFp@V9w_2Z_Vie1*=x35P!_L=vgU zyDlO-7x?SxFe9cL*4XRc>~3vqf4IlpwW&N^_N41`c6OSm(p-ri?v85Ga{cDztfH== zY>)=eU5*9NXUV4gM5;Zgc`-M|s9RwYjy*DvppHqZBUZ7bgLWzx+ z;a3ZwJYfFP(Q>tJ{hOY>gKf@_Do0=9$8a{lk(tWgnJMXW@z;|vDL=;j1)(&rx%LI^ z^Ar8&Oub7ZR!iz1X=@e3Q58e@9(ALj@S8omEF-}aUHV{gDshUUTJ&%$IH`L&%*q<| zf~CmWPtL01$&1NKTA{2E8ccF4l1VP++)GQ>Ix-r*Tcs?*TsfKdQWJm-4Vu<$BBMEL{mhrIjy=et78B zE%M?@@bmF0mD5@upWX2TJW@TQBzzwRac!IRN?}n+BW>nEg>+-?eaA&jhwbM0R3!#g ze#0Ll)A?0~E_rM6r3w|F^h7*9HM!d)OYskRjHo&SRH97rFHsgkn+aB>=r-=@uJ=(mp%t-h>{S)5jqk~Y<42%WuHuFkYs-@LJ@ zFh{1HK%%$ohR2o;aIV-5T+JHM67Mghc`Q`Fa18T0*1Bh505#o7G3~stVgyUOxUwE9 zmo=2U3Iw$L^*I|%CLdp#M0NC4>pCH|0?(aX8qKcvZ9vuTz1S)`hb4Vh<`E-KUNE~DYlE@&M)`c-Xh)^JU`qc$ z+c*V@eESD)tN%k#Cw z7*B8J@QjBjmTQ%hp(f!Pj;AJKa613$Q5e~5JiL>y79%@WZ^b}xzpJK*m82FIh>r(3 zeh&=AP)!6c&%oDV(0W#BDb{?PVz2R|bwcX`&@_>E`hKudK;`nMp&8_*!YM^6aL3BL z9YNylR?6$q)Rc#rIn-8Zou(aCGOSGqVamKH59c^uyROmwA{F8s( z)&=Edm&xHiUX;=%k(NX2#x}_^-&$$PakXS|U{6Xl(PPBf!LxsRyWxLN+K1a$Pm zoLIbkI@Ywn;Gc@xPkjWJCg1oekPA>T_kHiVI?YjfK5s+q%Tx49ku#b24ViirK ztl6wml+Nwo($euME|0~km|2lyBW)&f^CB@?bJMheO}v)S;MT6&wdunPcfi|%B2AaPl;GH+@GH4vpbeJBN$#kFK-4jQ7s}wqm#pV zRMx%^lVh2kCfQK2re%CCxeKPux99&!@=nXQPoz%Jg7^b4zdbSkK-CTN79wo&N%t4` z@r3@AVt-%tlTNd0VD#og%f$6g&f)RZ{!%3^<|49lVB>42`%~v;DgNf#&K_6IND5%? z!;tBqr&(#f29ZY**IW_1b0F!dEntrhC9XsH-VRLic4AyIxMKS`n{V#;RUtQ)b^3ba zTWPT!vq8Qw@l(r#_RG`CAEOaQdN-P9HUpumf5D-9RKj{-(_@TqeakRHFUD{0jJ9OL zD16uD(OdZ=p1Taj6ie@wAO`K4mETAG*uDp_fxg!}KE6N?aNupuX88B(agjI6Rf^#1 z^n}P!D~!Oh)Ob_Lxwb3@z^VFBK5O@nlJOh~3#8|pcXK!x%%~PWgKjUDj}{#xk~p-h zzGoZ38RPQ|jXX&5vArB!FLsdgYgqAr!seLC0v|n;~|70JmeDiY{sf*gx79Olm3J-15)`Hf3n%5U8ps^?Z#DDrYy<{-eb$#OI)Y$Z(M z)7>3%zxP{>%LyO6k3Qe|zxM$tBXkUXpXuIFQ;SB32tabN++JSFWmz?rIVsJVRO;yH zMhKCeUL)o}f9njX7XJOyga65Ym}%CHirX^T&Sb;rCZS9w3k+C~;+24O)eP(vdZENd zn^jG}$SY`=#(Zd$*ZZAYc~$1EBF!LKF!`R5%Phy?we^hsa+&TUn~hE{VE!ahe*X9qyZaTp%KX35D*tT}#yjC} zd(uCbajHmmqQaU4@p%eZU^+x0zYpxleKXPpmOb}BB5F~((`!sSC5bjj#FD#BWA{zo zRta&SBgBR$yKu(hg_4DV$mOV|9tgt;e^n@qu2VjhNrb0;jYHS-;t& zG@NrKhbG(@Nj?2FU*M$Pc9+TY$Q0k0WUM6TmHH{goDGK@&q-NFrXbX99u!3hiJT-X z&FL{~;1Z7oqd1J&EM5-fJDH^+*h0Iut<4rVVR%?N@BlM53*f9swUl<`yLErXK;K_6 z`yy_xMc}t%)7m-j)MERR>mdG2gGEWyobn3`EK7@RnHJNO$=kZlJ0>|Wn*ZQ$E<~{) zx>$OlT6%u3WT&gJX^NVP|Hyd|Bcc|E2Yg^d7m(+)lh&iOpg^ItXo69T=Tvl)v)dV! z=?(llCfh0(eHSMJ5z5bm{>|V9e>ww}vI9_I^I^8NiAW=_S<#0@HBW*m!K zv2~r)K}(MY<8ayeIM>NS5v=S;&f)wypCjJ|sHyLO?D~YmCVOY2^ z@{I=8V7R3B+1r>D$&xubo)bsKcyQi}?6sCcKb)67u5K4`8Ki`lDaXDt7-GaebZwGE zhATjor?Fgnh`Y?bPDHKrCzKR2ZCB_%&lg4VJtm7q`?wwu+4A3ogC$Epz<*9cl8hD* zYQR0c8!M5#dM__r@x)^Q%U)lF`ur?k*6yks8PQ~?LGVj;Z`uNib+t*73s-g;6fc`Q zAl1h*iKmlMKHe=YRZ2X^*gmNdpU!i($mCOa+aHsoBm+Q2{5?%05_Yi{15EUv`=9jy zN_O7)tZ7fS#Ldsq3az{T*wvx*-vKezg}`4UASq)Z6e4wnvML?enCOkN*h_GdUS)kS zr&E0Q)A!-6=E(mDg~6Vo+ou+vNhE=LFNf8% z59WZX9CDOr(Hd8dNnf0gsK#S_%J$R5T8J@ub z0@K-r=Q`P-=}WVO<0s#6q8TTFO0rkxsxMu%vbkv8X3S#T8&$3C_Gtanms1}~CnUrf zN000TN}Ow-?_L{J1WNK*ozlQ`NpV;*tE9=AVH{A+_1~af=ZpQU&ysktsV|GPBij1k z3ODu~B)U$iPp+(0UZWTfDk<;Pn;t(3BLXj)t+j$BqoqFIb6Ky^iP%Zqh%J#@5#^T@ zW-5B9yigeM@icfdplRiPj35T2;@`j0+)sDQZ|qZJl4CIAUYQC%fRxcZm2qt=PP?4W zuvy@Q*XoSPV3H%EW3b~Kihsha+zyVW-199ha2OhpRj4U&baZ#fXR_uG<5{wvjB=DH z%1!28Zb5yB(Oan!=PF*5?!+ZD(-@-Wl_y&jRJ^wvG+Ox{>8b00yvcn%Wg(Wp9_e4& zM%P3V*T6RzrK3l~dN1^~Cj9VjG4m;6>N;yMhK9m-4X*X?tdRhw_s!m@K+;UG^>qH% zf1kRQ{3G<>&kv|(f&AP~OZxSlC6>0%y(X?Aosz*7Vtt>gQC^z#>gGmAYQAZDCU$R1i8q9 zG}T!S#z%rROz&jp`9LAEwIe-0JH&{#QO9IgQ_!uqXcP!j?As2@cgx?ke0$Y9Gsy^X zEYuWPyMbcq+qoucX*9`}LRpt$cJmoWs;g})72l%C{0T`ob%Dk14DWDFtl3ya_O|P! z(1bHSjAy>Niv!R%KKD*DT0 zZfbUhXIr6coB=k(xd^WbcC@wFU*%^?`3VCRCbf{?2RpDawR}Ui+$I#=Jti|i$y9lA z6Ji~owG8w!BgAEAp9Nz*TGvX2I8;yt2P@hE-D=!Ljhs_8%8XO@NY~bb&9sP%!iHr= z;2539FPr2Eax^FCo$gZ{pYFa!Q~J)gU1!A0ZR{f(kVU1HT#}qGJ+&XR26+C_CC3#XQRmRe9cuqTrIN;6P6 zBHfsk42AzPw*BeX>DyS?0)KH^oVK1us1s6|8 z5dffl8FtHhDny1SBfpPb46#bu$K@>sPLJi*U&{+Pen>77F0?B(XgJMPm!dv4O^;4A zZlz$&p(Sx#Xv&II0f3(4xp`5xB|A1k@Y2M)nsNF$IF;02K~_InS6Tgvil42HrFjH7 zqo}AipqvLsM3O1w>KQeW)oY6USde3!1hqsz=Gxh#g{FaRhT1hP=szoiJaL}t6j@i~ z+{h^Na`RKVqN`PsiQ}39BrJf>DfbLf4RsaLw>R6w)S>q*bD0M`Fp{JaT^3F zjoJp^7*z;GncpNarFA&#tnDQuWG|Xy3{>DKCV?=?{Cp<9kJ$wib5CJDdMwnnn&fDe z0nMLh-#D!x1%vLo_s zy+L-}!ST*Ee+>>C-x!4CP-z7|5E$}_Gk5gdo`6MdU{dnmKp|h3LiV7+GiY9?KmaDj-Go}4qf;>W1nXc@rm^ej=0lxA#EHN8^ zv%1nj!NE-N{Oc<`eryIg_+EY=D$BD|?N)su^O1*A0y8`Y*rLXHNc5J3Yk2}ZhtG(UAQ6dhdF(FFD#9C z*W7n@cD^p&k^QS^pbZg9xK{B_BF5Bm{RBl3hhu=ZGCo1wU7#7CrcOSSFJ%}KDmnIy zJ!g_8;OTzp*uM7Af{B;hZ(?%6W7XNKjeGv00kt!ZkFG@GU5e;{Fqt!St526~_Y!@j+<=VuQ z3X^{9KQ&9Y!yMpQO@z#IkiFbB0xsuU&4RKCO=I4s?GA>i)Ke`gVUre*Ehh1vc=TB^ z9D95NIZEO|MDrDi1yevzQ)8r^v?v^BPj;KEZ;K90KOC1)#w8U|L@KYuRbW$h zAg6K$zlfk%G60rZt znFc%(d_wG$DB33S!d58Ep4ty!U)$^;-mn|wybQQLtej#VIydmQm*?})^CQWhY(nSx z0D(%$s-|{8pXnkXdQg}dF_!1yE1X|Lmb;r@O{ZPoW`{6Rqdp?pHY~#8hGHL|hr)OO zxu}Mw>XYSXGK^+@+$$9Yr*L5)glfO5SiE&b?+Mj(~t9m+TN-!ac>)tIL~z=;4G= zjj>k?e4JS4RHR`EQ;GF$3aJ(i*!yYfnHTQ&p7k5z)K;+?p-Q7PM{9iCgB$=f-~}w> zKTgOFP^QzmPa!qnkej3R9i;b`0PUf%%v>kLLC}!-NKe447@QHERJqtZWZeSs&aTjS z1w{iWnh)Y`4s19Z0`2P{gdYQ4>2S zQZvEPKqn#1MV<-5A}lIatO)rTOJFOW7YdMWV_7o6?Yc9py?TWdV2{&!a*?mD7R?Py zF~$@J)S0&pviX{>MCg3SOqfXCx@~yFw2pOsIN=n|iqE>#Gju!{{i>n! z=y`8HTzxaMAdJ(h`D9St8IWM_P{x9e_5qz2D&%xj(LMT{OA1O-^q;RpH2b&-42y@_j%cO?%VrWQ)z5$Z{R)EHriX}U(Fj>vI!4_T~Yue0=>W~)R+Lf`B z&yEi7T@<$ZH7iYXhf{h3T%yV%qX?E^<#6RP?uoGL2q@b^l!sEf)No|cCMpPGsU}GD zP>>+?sJ`#d&jWVNBX%Rf7M#ek3M`}vj^fI$xx82yqXTlHQcy>tjry#`)qNi1Tk)_G zjp&l((X8y@`Dl6*P@zgzfC}#Rs>(4Fv_%qr z)#^CqZ!A*-jf3WRvPaY8qJzA+_9$D&{*xoUgt%Pvl97)Q9vfOmeT%>H$BQ>&^p*?Q zc~L;^G;l6RrqR+m+2Za9wK@q7*M_F?@H%e8aOS&N9H1cUSEKF48U=jEwnYzh^wyp1 zYVh%A;klM)hhaHM@@{9bXxVmFs6sj7P1N~gy7~*drIg;koUpvN`W9IN^WA*WE)Ge? zoOA6>iW#>_extN2D(#caFX(Slmoxm_zHKLINs^F{ueHEYW8?4n|~-UbZ;J6gEU9dTC92 z1S+FaU^Z0em_4C(e2Dih5<%i{fM_H1iv=hg!vZUGFtqgpFoFarHdT6SsLhc*jRYCv zSKIy*n2#Nr7_Y~;nsy>9OjC*v)=WH0Lb!h3R#?;f3n2@F+A=BblIgk;M6Z(C7Xt0B zlbgb)Hw5OP$q3=FndR23-B2q*n~^L_IAxUS4>Ab4?r89KQ=%yp+HNGnG)^c^Jyc;m z{n^(*rqf(B(0SD1jkP&!GwwrC5Ci5D9wLX$A0_hyD*;&QX`TTdb1`DZIcjJpwNUz-mu&AtApmB=Mf?h8f;M{umd;+ z{Ts;V3F#qb#!MHa$M)bS769&qH?KJol0iU{eVUJ{;tnt&<$K4P8i{a7T)G%%h7JrB?@ah#0(=}d4C z#pd<|E|b3_b>4{O!q7FeUB8B+&-4wUm z2~_q5`ngyduLE69PhGuQIj5x9;l;7dlITp^t!2eP*6IcWGotboR+qhK&YfiD=y_zd zJq}{uE!KrNS*byvyv18_smQHLT_5peiWI-h9dlJL^w7oUiN?zVSr+lk;2R%ZlxhJ} zAR#yoyu*gM^C;By4oXY~T?00oVXOImf6u~^?DI$fNGke9W!`4%D zSHNxPae8ucdBR~$e1uX{Dt90Rx;{Qrm8>g#>PoDISY9YCh75_BX;Md#9;!ixBUD@i z51^K1F32*GM30SauP%#^5;Y>fyXzs2c+l7epd&D4j-_>MYP6D6t8!}o>)KREo@bK3 zgyJ3LJ@e9y&Q7q*Ag0h0)aHkTmBqsK_ z#E8ZG!G>ADl{{Xra0h;g5y7JYFy#mv9^_>-IHKVi?>sH94ez0kRoJ3!C^~L>Z{B9! zaI*U^h?H?zC|z5WmkD>P<5d$B4%t$)VpVGwhsDLA44RQaD9;?iGT3;xEYdN~}a&J%e7{-H0*!m82$cScuEhCAtVrW|mhA{L2JV98i1F;?(FBnua zi=ATSaw`P|fQ29e**%^G#h4H;CL8tc_aVx&5AfD-BrJYdhr6UV9{4fdO#G~+kD=WS zqUXIoewlGsT2)yx|3&o(b?c+%8(O=&xQE}yv1v~X2GgNYdq*i#6)A={x+?wZ?|baW zU+;hG3zN7svp~%^1q`BrcVMPSB(&tMb7iH#bhfo?(VJ@3$a4rN7~fbYl=qh0;$AGj zU^vGnv|rWkmp*&txhD+eohpU34-t9%P3*Vf=s0J#+iWx7{yg{pTtu)CuK^uPUoSj{ zx9Rx9;MTbyl7yd+l=Srx2IDW}tAvd#~zZn9ubOwYD0vm7oCU<%w;0%1hTRg`K64 zGb$6ZAW0^VREe|2dNL~)2UGPKD_ewEixo!Rz^!9W3`5g~=c^Wm@_vZM_u^$Sdmxpk z@cvY^U79PtnDz5jYCY_hGFpj64*9Yjb}df0RhR^iW&;y}IZ!3>ni8KBu|Q{RRmAbP zJLeTUvBo-?4{A7#2w``eN?KU8Kzun4=L=>op8FwvR;AG`ZIBN3Ghu%o6%j4On4){`kFu%%J0^pr?Aa`S zd0zS!_(hI@`Tw9hPfSk)BCgZyn4Z@En~gi6LWK2-264Ojbsm%#AH7kZ$k0$;}YgyH~)0FSlTw;Q9lQ)X5tQ3RjY`pfn+PIqJe z8Wf_e5!VTLyAdRbUdY=cHwQx_Dl-y70!elYG^$*nk!+`!&%g_V6AMY#^M|LhHi;=x zi^5(gmgryn91BgWzdWcx;7I%nwP6nn=o2zRbPhD2!(OY~sYY#KowyIYoD*mg7T=#O z(r*yWpN@RSxA4oJLp}vvoyb#-&ySNqS&4~Jpqg#=7b}0!s6B=4-DyYNYefvHw=#JwQdu@j{$ zaea`9!vsE&ra}4|2gjnn0;Z1PuRbyu4I}@O3y1#yoCRQP(13DX9+L1yH{6R@j3T=r zOf!_Cjk@432pwcswa%E#5K_BsexFcH)d~$Og}zz(295}{XgYGhffYL{;T7A@ zC~ci2;I@rb-n4pv0&J3b_5*yne&qU=gF#q%YKVDnQ=)v_a z*6E*v_oQ(2h|)~RyuWve>FVfw(f<0p;JGtz`wDWM&VT+T)#!IR^w(I9}N;BU9<6t`+ibi$|y@&Vy;V7T;!??%I` zk=3GiWd=0l7toc_p>eo5bU45kR(ExIaI{(muHRRVaPDX>W{2;g&Jw6)8%Q_8EzH%9{)hzHPdmL3m?_qagdh3tlIMpzq)M(*7>{cfbYr!HTxS`+7&LY}s3|#i`YvlamuRyS(?^+5h=+ zF@WpQQC*P>){P|?A)2!*b&>R^!C%Bu#wh8*M;_)TNnm!tceexCc{0xP#UiGU`$P7I zhbl~!0IK;QT2L-^bAupeqFj{{qu`}*X*<7QzGx_$_CgpW1z_@!m?vXL-R7!>`{@Dj zq4KbF9q6D+uRh+uH#}q6H?s8KM#2}yTE7lf_RIe)b!JF(x9N5bz$%=dZI zEH8P?6q_nKO&1NBmMicV!0b3>?+rjk6^e} zOa~4yiTjs-C;(31iy@xkUKb@?^7ioaP3^bzxcUd}b0!;$$<^1Z}nYwNRG?YH1b77BA@sU#wfm_{2AGmR`WN&ro^ zAo0pcDYnJ9*a1-+UC#UD3tH-M>K&GKjrvjY9g8*_+=SGadmhf8^l4_GHY+spR0s>Q z^>UC$#~YLx0#z#e>g;m%$X@?PGdKyg#DFh~&cy7Jph6Hn{iHeJ9<(>koRIUdQt1nb zpO6GJ-TI3`p_fM@0xUmY!0gkrK{c8NVH!wZoW*D6)`xehI$~R~eO_80oL7VE9a@L$ zTij3oxVC<#7ws;ax{sa1m6O3qmtlq;D^1>j0^QU`N0cCAC1G&k_ZKkQ6Q8m>~fT%szCrqqT)yagpZQTR?LuQK{Y^oKA%KF!rv!ro~*eALjQfE)e}=o#bx> zxPeMcQ`C$Vq4q!!wXA&jcsc$n^K%u48gkLl`g(lMY_LfKQnF28VDHdwz2id%cNcGP z*W8!ER9c7a3H%&zf!BrXUtqf6@IJ;qYzdP><+T2em0&mwoIV+zP`Xe*TC{5z$S~gQFrSwoHgx4I_{_O>`2+ z3{=YZRqSZ#v{E+S5llE4G|Mj#NfX%xn6jL%PO17Jl?nr**sZQoh^#-xQ5iYRlK6 zC{s`W?VscKJNCO6z!TAfkF>r*$rA%C0!%k-=99maCB_SkKtS43NoW;Ng_VSzu_Y)Y znxSw`rw!6mdVi8^^wVsDp=0472vWi^Qd)c)HKl=W{thyRRY#yAv$z-qvC~Ff zrE08-EmB-;v&PbCepG#7b~$`6ek|b3Wm~nR7EcI}k%-m=`Q>6kSJ7t0*Tdwq1I*;f zuo8>mq{ySJw`blBI+#;+y>L0bDAgmn1)i#a=$g|Mt*ew74EI?Jhe83>j-&W$j|YO) z3Tmfu+$@~nxA%e6xkR7K`j>vg_#|PB213(IsLSkWq7({9&9Sy|pFy*$M6yJ3<#~Ty z<-5X#0c)%=z1lV(wn81wTB4P>FL=|0QUEVL+ffX{15k`A=dLQ`W#$U#CkuwGJT-2E zDV+b?#ZBjt#vh-)pnDI0E-IF4`eXm^Xz9NT^?(0so$UQ82q3Je&!zrWi&-n}x&lGJ znD%m$Ybf<64~FOLi~$VazAS`Tb+YM|^BYA>V8MMe~<#v)0krO^FY##?bC69zJ80-|sb z=8(v7!nVSwgmgF~{z}ym$`uv8saN;CUh(s{qEPg@QLaaJC$@mzS+)sflGO~&qOK6c z9KdNxhb@uZqaql{CGQs79L5c{jGmAU884CGqjSZNdX=o+k0+`Qld$fRnWO}YSiPs* zY}TJkD#EgI+xGXeG0=7g)_GlY6GjB0veHFsv$xJvoDJ#;q!=t%2@HJ~BQx_G-QD`7 zPfr&ks3J*nv2{b~JjEVN{gpdhKm6o%wPIqR9K9m(Ny2lLH>x}}35z=Nt+WARS+(OP zWXApUF4zTO(hB(`kA5?Vmm*DNHr0J(;T+S6j&VhI;4#n%)2(>FSr<4y$9Yh)XxMDEQZ;@VC~m7P96#mB z1i4x%A=1&slB~;ws3hM?u%3MihdfdX={Ty=7Y}vH4wOpw9f~bO++uT-!%CW9bv3=* z(5lS0*!FxyDlSe@<=$$Ipv{1rVj!QK1*aR<%FT7f>xzhC1kPUIJ|aFAsOd~0tIk(S zadz7HSF+LZ5*Z=pUJlI~ca}Z>XAyqwba{4me2AeP|FQPfF>e#f3V$V27y2qkj25 z zL8*v3qLt8#^6_UPjWzu4>D(ODY6hdA+-QEJbV}*qGf7Yw9p+#(^Uh#ETosn=Q6H~0 zj7R+225+qqH7o8+MZBS*|6z&2%utvHJ|0CRh8vPu`@YUTZKJ;X-|%&zq1 zRkXCMb^6|P@e0!xVxGUiU{@pyB&?{RVv@Z)_6xrZ8j10gP(7vY2-$W7xC~9P`JV!@fvO0 zFGy(GY+)0UXrE3h`ti)rsVOgXzw96|{2@~=x$v?y{8P@_)|mf1H;LY+3zgWUsEI(s z3O$TS6gWS{x6pmUzc$Pl1_>c&QLd5~DVi!Sd#vxw)?#g}(~`halE>9NvwxUeUlvkj zbw1U_ya9k6g^VWm@_rh=1c^}uA z2>&EmS)q|cUhB;cGnI_#iZ>}&oi3J-AI}jjV)X{afqp`Ch;NM*Do%*xJM!u_$YLCb z6AwzZ;cfV_8iy8(p`TT=_SK7dHAaKB9yPp(#>|;Jk8Xfc;|y4fd;tblFd3EKfanaB zHo>VeY>5E*=raj}VEo`ALpY_kdZiRB-9!w1pKD!xWf`+j6G@+*G_5#*^3IQ@=uTOLr9s*M}p6HL{gjFVp;3*e!T@L0v!dckfMK#N_)5odFy zNllGeFAJnNNGqMV+ACl*ANwF>=r7^bl#!I~lfKrV-TY^7UuSrCP$z8LbIN}snB>9{ z*@BPQSc@3ol}gh#a6Zdp3(kr(hFQliI`kJ&_ni{6l(OmIfHL4Tf;z9)3zgL zL^Ys@yY7*cKZoJicL5@ahf)@+Vwq2@Xf$&3tO4;oy(8IK7Ts;1A)}?PjI?ScJ@=2h znZNxEXExStVItP|3P7g#U!=VSP+eQoEt(KCcyM>O;10nF1P#I6-Ccvb1$UR=5?q73 z1lYK{TX4IJ9R1Gu{#XCIw`vu&Dr}%KXV1~yqeu4yG!Q|;eiO+-#U(!@*iqZB&daVvjsjYc> zJcg}pZ9LfcI^BGhy6A9}nx>jqS4|-^8~M;y9{7_b@KY2}Hq$snr>Hn1Yv6FPMdvx0 z_0kU7*Y_om{#=F{SW+hUvmQeuM3jd^iXQ64tA*C?cgboC=oT+Q!_je+K_}&Jg$tpH zQGZar1k<**bVv^!E&pz=WI&~!e`X`5k@PLc=x$BPp-A94Znwk&#vInc)v&$!e?SBd zQSc*m9cFisB|$bJlI}WLQ&ZluJEWHhlL{{f5+N=MhTrQ zbF^O>I)7-bGfeK=7ckMy5orYd&zSArheWzx1zJy!5oQykh`zpBE#~D8HS0-Ei(s{O z(F?V65o}vVM+0B^z_84U_tA$gdGUMdOs95(s9bfMD`nZIvR>u{hR;`)_!ODfEV8ZP zNx{&gQ!iAa{qD6ro|C5bQwqYQo$EHeG}*tg*(=v5_13Fko>pJ49}?ER)PZkWx;+=X zTMzXVKL0}XYH0TpUxIIUM@`3ubmb0!w?+`xM)&e8{?9VXug~hioIFjt-tHDy9b~IY z0hC?e8xEJVjp%RSmE4dXXXST(Du=deM1NnOF@kG!g1523dA@gUj72fopgg zQ1%YU>B9Fo;M~6?9DirLtLgH+HL`?fSP`X4qSx~zP}AbVu=9r0AU^wZfV2u_ghI$* zmoK4uIG;K9g$SnFZhBWWQql;+On*Z@ofBBo{R0HImuJrpmYO64Qv7*)DlgNR{NYJ? zHS=pJb+9yDep2W&V!6u5bFHyn4@$Vo{EE&l_OK+z2Fwt~qixS@)7wyjucytjDaBh6IPDM<77~<(;`)_BzT7uJ_vCHOngK=R@Wk&>l!qE7>uz z_etL$_#35HBQuHSy*FK)O21%(v<_|-w!i$@oW{=!OCP-WimxBjM$nwgYY%)*X3o3- zUK1%;di9BtE%`BOVWH(Sak?q%0}%$KB227yvGpLQrMQmCVd9#mVFP{}%I4-4dLS_U zq#;qZ0Zv$ioOo$sh8<(M(G==`g|Q1{0emj&eZFtGu+Xj;x@&5rEJSrZCPhk zoIo}90caJ=Z>uuLQ7R*Zm0paOrk_Q8Y^Ny&8FNq-!b9c$KSOpnS zJ+*>*J`At*hlOvcv8hQOtN}T_Lt{9mD9F+PUAj?x$d@alCW)iwMv&ZgA2p?+X3$@5 zLMo}pd!#DkhaAidv*+!QYM>RHDWK>lSrE0_^0e`Aq(I^L;Jl zD*E`h-fqXW-Tn~0ZL`j3d$=m`y3jhzwLxLR4vF$|m})JHJ%-<0WQzol`hF%MZsJ(K zx>*hN-Gp=tLLlW!-S8!vO4tDj5mXa!a)+&3%T*-f&+wv#RtxfH*`(pxG%DLfnoCa9 zhsxm&%P;<@Yo4UI(xx(d+TeF-H|5Uw^jx!A*(7Rz+IBQC>8A(eE154q)6;36g^}O? zziAAQSio+n=7gG0Uob&=eQrPxq)&m*%RrKcVqTn#8ldonX8Oe1}qxwwL3{|FAw7%uq;>jqm_ti%R#iwx9D}-^PZn(2I6(>CK|J zMs*~q&6}CQE%~5T4=e)zOe8_Mh#_H4CKPyAjeRT28D|zptdO3ZtnQUBK(tP1H~OIf zLOP31u(@bTP`z?coF%YC*ybQFZS1(mLhkj~pgL5q0pF$1Z7=9Av&o~XVzC0R zhY~d72awh=UyGQpBfSNt=djDF`@L!~sWgNM()cYFhC=?mR@7tTX zU1mGbXYG%^?LXet5{IxMPrPi1Q+Jr^z0~kH!)~NtB>lqWN4z0XHLJL&X>;`LVJ7ks zNk2K7bszqx46S59JfW{lTb+qUAKOrwLT9?e&xC!&#dtv(8%Bls!AkCZ{V%h(18998 zNz}A7yxU{ulb9*Y4fA!F53$^ai;ufGQyix?j7ld=vogky6z?tXa|Jt-lX9y_yW`p$ zl9bz)>mIF*QN)EGR%aBWgM%D=9?8M8wDS#AnxwI%Pjo@SSo-8#4z}Z4(f*V^$WsCK zXsf(YUg0WKV{B4f-=xT?WmJ1qdAL=euMC`2s=YlvRt8>mzaOYI(ZG$8MCk!1u(agN zC@@-Gn}AmjeVQq5VcN)FjIUh$2LG+%8SD;HUKnI|#%^vJ^a}^REa3bNum118ilw(l ze+j0HKcSXXhURcDt37rvi=f#)Dsd{ah^wKpGvCFX!UPLqJkSZqj(^yRypIa+4MtH* zcRPY`JMET?(t>2e4nNRpyG4_X76=p^kG$VOoTu%Imu|8skWTAz=pgRnrB%CI(W$xs zw=i)t8W-Wr4;bP`(KZn|SA1%FDp+)T%!Mah;CYqes!)5;YLz*l_`r=z(G&5c6qSeqSF?dRt*yZFphI)J;(a2ngo!epLViM7z3#n;%;X0(DB`zDUwQ{V!nGvZ!neI|z*%N=vz4qyeS_LXD=xHtI+Nab9m{)l zaL(3sBM&XoX}_@t=!uhJ3Q8HG^>E)~^72UKV}`&j?*6}PUsCanGH+{8_i(@G644p*LgdLDbUsZ_bHZvXvrz*834 ze$gj9T~}@RnJ?*LFr7t*5xGw$m_Pu~QAi1&*WcIE$)}G7 z&A2<3?r}?;o1XqG;dF5A@7cxFdy|rO*p8F*;Zta7+@kzMWXm@MifF!c(*U`Dwsfq@n-@vlm!v{f zsZ+iUo=uZLKRY4AYrvc8Z#&W@uk=x|aeF}}CB;ImI+#{`TV}q&ZhMhe4`R&(Cbgtf zY^e8k$e9Jr!x-mNcCz%GOEuc{CH{1)lF@~G9AliC4D0t~vc)R;x>>e*^Jf2x5oU!p zy1KV;7G%#600fW%+*1Fwg8vr;e*1Ue)=DnSa*qhn9^yWcVZ%-XDv37|tag22zHIvB z$%OMGm`M(hHGS_HarXD)33Ft?sR~~rlM4snzNV9tJGT4C9){)Q4#F#&vObefCI<;` z{Co;DA| zCyvNB@hI-(NrNXWRC~kW4*QPmm7|yk#Cr17@7Kyv6Wcx|Xhc*c*pdoVZ}qwEh;P|9^Uo2rzFF z3Y0^#Acc9Ve1(jwb7IE`#4~fQ(z#w9+NlLv@ndE@sJ5OsTw6&aPmpw{Op-6@w=uPE zauEky^+x7;;vBwl25QU7)qG3`v|6n^aaNi)14`J;P$F{RFAF!pN$6eUBCBiYTpxr! zbG;@hp5CJ`VWyCoFV?zvB~7SnUQ9z%+uW=R=prnKW~;YT>rn|?kiFu%4Y;ZCJ0!SM zoYg7bg!d}%zXs173g%0};oZLwl}@jW?XyT;+8SIHuto_&mP1DCO^aXlBB4l3SHu7VD0Q{-6fZnmNjl; zn6efN`RPDr3sU-q1WrNpcn%K!@|>YN%K6JpzGGtT|BF=pT49alYc@wsm}sp)ew$x?`B); z8?xyD|9)TDq%Y=F((VczvnZ9Rj?MRe2?u?XIcaNd7YY<2vmbMFMR|n@M7+F@J9=e? zwK5}m9fdSQC?%Z}nZue`j=)4C-~D7^DJ(Gamt&LG!eUG)X-9yITlQeSr6hv>E=D;cU;;R86B_lTxD zSUVa?#6{jGuML=kRfh6VNlZECLymx_PlZ|>*r``i^cMey#<*SpHy8254TAXx1#hVZ zhJ%B1*+&vt|02Wfi1ThojNkPRvbgVmxj?|j^1&k%46{up>(&BH0*dm!%;nF;97o&w zpM6MjBNvU&Y?xI^$Sa&OpMCN?G}B-skYG;1NdYY`e1p@4hsgALx_sKMy0MQ++nL<` zcw}{!i^RfSr+qFNO1~~0bihs2g=bh{a|f>A?oOqbvO;byR1bqu0*P$m$W+^KUQj|{ zAl;#Yb9FE;<361nWj&J0X;dJc{!O%H6`g8&<<%t7bk7Adv@jMEot&-eQTeQwhWkQe zpQx~n(QbORBUj^~!6#K7uelQB`56%rh!E^A8T{Dccu~q(50L>ewvfK9ype5{=Md^Gn$xZitq)f@7*R=3 zYoiOPX=-irTzEk&*PT~(+oNhc>($s0^g=QlpfW=mUFC@}YZOPydEp?^Iz_f`Hxr?3 zpVv?b3omkw77JR99K<%wzenfDo4+J~Ph zwJl~4!av+OaRrVypC`k4C&A-MZ5fSHS$%C?%b~^m+{M3HlmF>ote-K=9(D-|A`bJ1 zq>5=DwXnAz@#xlVBWo0%F$;FX5xhP%HXLNUUi(l7x#_~YWGr$$NgIN*4)ZPI*J)?# z{`vCaI_ZgDmtO7$srXW!bzLNjgg18gZN&&dOGK3xE)z6jF3cI9a|TtUrL1;Pu@IJv z8YlnrLwF_P*^frJ zOmP+dr^U8gh_0*?Jk@Koy5AHP;(K1}xJ{%i`7MvCH5bXi|8s@^r)47qtAhf9EcO## z=l*Up-oE#KaSttbxu^lB*|a8fjIA_9=?330o#E~=u*UWs!i zWYk$kslv<4hE;|ZNGl6BK*CDZV=}}ReGv9GlGm41#j;T5AE7k?k2h?%XnZv<#sqn zz6VYL=b8+h1S;sWte+hPj6Tui{k#N&Myqa)ZFro)d} z=0?N}gaZ_$togPilJR52c-IW>u5|8nLJ}HUP7OR8@$_7iqK(~vMzndK_BW4iHi=cu z&F+aN$^Wjrya2loji!V@R0N$SP$f6Y8O#cSPotj;!-S#HgQ!_vMue{~y;c);(PP)g zKKcG2slU+yMIWW3Tsx@bePVBk#Cxzx=-GHCef($S?|r(1nFq%D_LGB(SW7-xHI_&n z8V-)6w#{QlrAUvYRWxbdm@4)5m9R`0b5~LeaS-9A7Ot&=)1QCg&*3g_yU2*MabvX; z*E}hTaQS)%R)>Rbe&=KTec)g(A#d&g^2S@K8&9FtEsm2OG+=j8dlqLuJU?CH?tPv{ zQm6QJ1STzh>#n({hPCWEJ}zw{Vz06wyj#lQFwMjDnWgpK2gp9d;gONGHo6ojRZ*sK zZ)=lZT*u#CfH}#@SKx|=bdSqf3yZ11<(stSO0B00t-6T<8Ko2lhUZxKw}(BvU5x)$ z&h4sc*`n<_w9WW=(2>cVvCe-tg!(iX_lsh!n!Dq+WzDDC>3NxlNtwlwENZrAoBRw2 zI&iMvG{3c}k^i16jB1t-QTJ1cIOy%rhe=UUVnbvJ@+4M_ZCc;dH#}-}Pwrc8Koacg zH4)D~eb7B+085B4OgF6NewrVO--??(b#1w`JWpgYXxD@YO zH2h)rQu4l;6|Gu5qh4Bxuen&(6b_v}?U|q?k_}%KR$fv|#ifr%t9^GsA8qWa3uZo> zfMIq$TP3_cIg>tl2vLWAA6x8ZFiOxD8(p(vvqVIJy>6Pzw9v>^7rY_~;{xf_Z|4Q0 zs?5gP@SU?prBmi}L*~&*#*Y0|bPjs7N;Guz*CY>eH_{(Q1(wXOjxFZytCkj|E1OFQ zzcm1E!$M-ED;NbTJ~;K@^~+%5FaLJk?T;`gS2s&ntyda$V9c+YuUd|45IuH;rg00T z|62TdWbkXF1cLh&sM6)*`B#WM+HH4DZEhs-Q`mp*;eWhXZ$LM1^B&tUGIK}kXOmpr zqEr^YqhdZ?-|p~zJoSeZ^U1AWUT!v?3hGo+?b_`bkvxcXi8@1NNhbCer%dx1>2^z zt&w-=@a~SNhqSLER@c%HDLBd_Na^*B$=l02N5=9&Wz4{NM3u0&ktzxD`V>rk5;`pb zgRrtmQuSZ*`rKKp8N)&(fn zG5j|uZfE@zT5mdg`7SHX3>jSeAz1zv=U={Gx$=6tI`qfTw9er_4VIbtD8pMbnuJaM z*FpIEBK8Khw>`|a%>wrb8aQQIxv%3~-i&%ZUgJtr0LrC$DsW(q5km=@S@)&^RfZF( z35@ztsxGoPtD3)x_kB!pG|*Gy*`h0O)sCosO%Y;B4<+Mf*dyJ#Mh}a@1zJ$A6X31D z2uSI>j+x~cI-DJ3P=%v{+CpYR(Xp7MPYPE9Eh{mjU~Q!0&iW}ctBsNClbY%94Qx4U zkCv;~&KWL(m7oY+!w_OsY6sAuq?Rt{M0J%2k9nPZfTu*PIMUZ2Rregh-Ulq2N{oPk z9TH5gjlC-|_(HT#eKNQdk4?icI}>dVdN%y+qz~!4tvpFZu@GWII&4y*vIySx+^CQc z*%LqJ1jPBhGqTD`*U|00usz;8OI>gC8nfBi=IMG3isk#Bhc|OBVQHfVxpx}nxaGf8 z*$9Hqx?8v7mMcOvlYbPO0F}TvIQca%A5a;T(Fynrlp04V(Z2o#TJ6Kc?X*7;`R`Bc zT^{p7?s1%{F#q=LzXiuZ?PZ%6oHt&CZaw*9a+L7z;gCld{Kgwx1|bzK#EvXU9^}=N~yEETC!kXM*+>M z*VJg6j|1Wj@2Ccf)R}IKbl<*L1_2|8RcH;#q35%u+Xm!qEJu|FK`AB3Pv~e zicNl1E0EF&!Ac~?QS~twL&_DU0{^;AC#sn5s$5MEUI0#w;?zlBUx=wgw{Wa6P{`?X zwaL7F(weFUGH%?>3)9&ab*nkXt-BZkhmr+@+%{Grr)O)n)`M1$ z{9vy~Joc|*p0yFu^};rv(elHQ)0M}JjOSU*Wqe(8x8_BW(xF=f=x*@jYR*_L()8;TIU&?pZ|A8pj0G1Elf< zpzP?UPp`R$1Y-@=aqe`iyiRA2lL$%POzRE3>?%M?itWHQO^5JLM;9p>D*gU&KVzFw zl(cs)) zqaw>UBTkxAWsra2`QKNJqYV70?a4EPN4NLo@*3L%+1-x?hX+a3j+Z~Nt`@>f z6+3TCd1%&ry*KtSM~A3UN$ilb?Gx_cbfJaoJ7xqYFANv4yZx=BK2Zq}lyTI2_K{2T z_OVGM!9ftA%=ym@ z<_{bKI!$GGPW>LbweGvi#9V)liyeNSO)nr?^HQVlD6njL+FG-NYIa7o6gazz3hebT7YrXXV$3s1+T_^kwU z8_RKYYnjtPFj*WA*4Mm!T&P0tLMnCr?1W=IqIM}G_7#Qlb6i_qMi-itu9?=UB832t zbqx{PYI4f1He!$jEQLAD^oJ(BaBPq^h|~~GFznF>#qT;3P6gfpJc$b0w4M-YmCr{X zbuEMvMIgwOg-Dnc6BYz*T}9~N5Y$hvhpe9sLrPixrlDCxxdAn#LLe`e7=HgD-l80B zLgHd@8chWXE5YZi<2K0A)ndMQuJYWo+}oMpg@XDN^w?G_dt%T`ZEvH7we72ghPpa8 z^bI-nsbd|o)`1|~jc-fnm#mPA@l9K?7HPk*#2e>>HjK}q&cNQ@o=(0=#6Q=5AsQT@ z^t^@AQraSKBb=JoW$jphRepXWtuBe%+kT#A1>(;Y&p)Uf%`aWn^4?AT>IwO$J^UKe z;ETxTJbWMoBsCe~z3c0?>S@5~Ux9q{xi-s|8@bRG2j!l;zHKMr-TddSSE0Rv;v%Tt zrbuq&Q5se6xk~NyUnlFsq(=`!W3Uh>2v9)-(kWA7SXn#-$rqJSw#cBhDr8|i!=_VR zgsNA7+Nd1qK}w%GgplZzePiOZuS(yXYHbDar7)7G?EqW1@-~LfcU)Cu*EAHN*&v%S z`CyHOs8HgcL?etK@f!jP{5ch>dT-vCiA({#uZpLCR`)N7E9&d(m8Mze$xt*PU9t&*1*O_2JO!cutAU z^YJ{R&I*`leu=_=ieeCB`afWpB6zyX7S`Q?5x?6ovqrP?IN#%GsVcNOKz4%ErW1cQA@(y%#j5+P`Qw%Pi|LDj3yBv zf$HkIQ70P|(wEn*#S0NhC)vkL`36kL^~sIR`|9A!Dj7hMO9bc&;G8&v2r1;$*Mc8h z`RL0I2GF6O)Dfr~>x$CwxHIZRE!)mdEoX51$ z&r3FhK!XDCb1)hIw9ue-K4@Gv8o34*ueQjfm#x>`4DnLGS{r|hM^_r5PplReWo}BB zZ|UylfiUv{ZVK~%SX*20=k(x^W*^CmVKc$&CZTl76t97qS{sOSQl6Jp)4Z4soO!d} zoKtf)DAA{Y5^E!&3LND%ODl(*k(u&r^OQ<0>VACWM=EgGjYLF-UafjDPl6QfV`#zA zemf&VRMzobW|T^VF)O1SlwM2=!6y4vL*uz>5oHLF`{ znO?H^si@}CV+ziI*AH_4ZAhCAG_E4KykU0d80a58g5KQW?Z6pey$kfZ4&3LvOlbW{ zwys+4wzyyBMo0HYdQ5C>gmQhB&}P02lnxe|LXAI!_YGquX|q($-LZiVX~x0m3h!cCqh99~V@$m6;$AOc@T> z$X)L=-p&%dHp5lT4BGvs5kP1Nv(_EPAMV&Who_u_nISvkohUR;0Uf178Sud`Imuu2 zA-$>I6iQwm=Ui8n?834H6J=!TIEv3P85A&oXpFHKys9FlyM;+ zoDF(NTtad~=GT;HI>2~*)O|~*i9}@LqVA|Kgo3M|u`UN1EQ#L4_4b~W3PK}*nJ5wO^E(D2j1K6 z1RhTbo+{lH{x-GY?^xdJ)82{5Z|?ijE8fk^UnR_QQ^~X2(8%bDT_6Ye>k+&!ts-&n z^zidu$#+tgn=bGPMNU{yzLNVX;Fv|UC&yGJ$K^O}%~Q2xZ2W*cH^v|EAvX7GxQ)$1 z6h==4ei&qZK-m%E9SG<0EylF<=f+6Z05(C#9Q`lE^GKH;8Ys5H83QpcR}#M?-8C51 z!av;NCJ^!+`P-EBU`80Zr*e<p=9q2 z)0pmFXLJq-Rj_WF5)?8;sCd*$$f`uTxFo*svktEm4G80HmJed><3r8FkeA`#od@zH zTP!biO~S4X(;gI}SF(N_$jUjh6|OA-+hgQ0FOovs(+sdaOtu0FtyD8z8@g{C2sY>h zu-j)C)2U%CJDo#<`zF(SI1XX!TuI@*ypw#P(WC5LA5SwG`A8DNp$RO#6fDbmWUPHB z1jtoIU0Z6#XUmS9?Q?D9?r6rI43lH zu;aF@QBth$!EIkkwheC@FrCKqw-Pg08HqbBa-thjuTh$Hyc{5uo;6c-NGaY@;?_W? zYBRoAhNx8tylGbOOyzqv>=jWIfUD05Hs$@pNT1`p=jY^COo9{P1Hk?5RQb}0Lrvx% zZlOI*V5Q>eZsX9(tLb4w=5ZtK<@>ODc>%~DK#+?M_x*#SXr#mSCH7n2Qk(olB+zq zcHr=WOlt^7K|~E}{Kim}W@ctkn59{NKhw|e)<(#HXxRrSv_DM2^Z)#X)ISnh^`9-? zqjMVM^XCurrnIxS-3$vEVct%AJt*CEV6IcE519KD3xVe` zU3Cc$o2jP5H_rGvVOn)FDU5IF*&TOo<)Pop21nVz1X6!IiiMO=A&7A*)cXw&7SzE4 z`=z_ulDut8{tI_F1&;52B5;L)tg&eKKsN-}-nJqBcZd*NX}vq*?mH>&X6cHBNd$;W!%<^}g>( zw=5GIM4cY$d@miF9F^CH{4PJ6>a9NWS~;O0!Iq8mf%N)iKNA0F^m<_3#7lrQ!z4C_yWtZAbmJ{ggNARE=-dn4eYRi*$!kh!tWY)n-FK_$Bvet+!pG`--NCuxtl z^A639V`!$F;xwPzqY{g~SR%PPqS{8ksP%T9ajV8dxuASz2ifD>J32bNp9pXt*`sEI zrBgX<^nAxn|1`hfU#z=ecuu=eE(^SF3)l})mjROG;5pQOI2e6AkeV0zeMl6rxws5q z&#C1>`O|HA`M&HE;NEn+9=E+Lfvf!Zy)^cXtNs9wzr*-v=w@|rU_b&u5EC?(9lEuo zpEAVV9WFl}?yHK$000%i_ndS#3vs>4qYxl6bfa>st+O1aoNHNKC+H~U9Crqm^2&6dNn#OlN1!%rDH6I$(aR(kL-t6=8cFd z!KFz$K$k|U%$`@|$D4Jit*4}?&hlL;mjjBKSWb9N5%*&;kz!APjf#)VlJ}ZI%)Q2COHHuXbjyv+b$HlA_6}MtY$0gQg?c<)7Asqc^%=Kp`!bju@#mpdS}A-#;9+gz1#aYpm}vPF)OWj%eT2 z7{%v&`c)%l=5BgjKnlL#I%h;vZ+ zWqB>=?Tko8K&=bE(y$@$Gi$pw5S!wU*rbMDP4y}t^ zX6_dzVm#=ggL}2E=?io()-NP)V(XPK_TcLC3~+e$H8|swvK_&jFI-@(+m8k@;N-^( zg)xzH3?z;RVSrN>V8oHSdpzjZ0G@uXaG(t@A&w`$|tZ|KZ2Hxue?GBl#_XEXB)3NcwRnCvwh+?>q^oDjxm0-S^** zx7vHc@1w>5%vnAe+jriff+P zti-app84t9I%iyElHii?A;H7yhx-zgS=@ zS#y5*Xe90Gu2~FxI#Kf>{gCmlXlu__$-(mO>UeJnXI?(MwRsyM(2J(2+v)IJ(dfWR z-$CJ`wed?^o8$4+`V=6BvMz?ErKQC+r7^j{P z=5>ZehCqLSwfAwA z;4#bN*loFK3)2AzL2<3XPKSB%y-xU2EE)T64qzRkTflwaSFjco5t>g+)Ju2!&#BH? z$$FI0uem*JM_k&5lt&{JQsOEiz}pFX7t_w`&Y3zsDi|aRv``h)cMr78D5s$d+^>jB zuvVLx3&>Zy z3eNAX2My$&oWa+7njj+e%qnqCXoqhY^glP}Gp(fN=Atc}i&_gyb&QuodV5tp%1YMO zilrACGZX?o$shr>!A4I5b8wW6c_VSyaC z;McF1G9C_$tab!ipB|2zo`%o!*;{r4%8I-Hi8H@NB`b!=eN{tCPmfp2W1AoEc{rZd z>Tn+17}#0F>418j^+%WmpA6;g$jxI%ybblK8>l3w>q<@B6R?*`R2jurXfmtsa1ME- zZiX%rg9DYozvi7jRbP?I#%mbdT)AymTEg>^rv($PYuWS{cn)sbD({cVDRD_Azmgi9x-E>Ucq1&HM~@reinP5___NA0mQa46L!}TgFq2O`oar2o zhV=^@NJ0g|QgnSWRVwq*nSsAgsEK$_e|wdX1&SiewD-rkJyi#14~}ojICyGs577fw zAon;&Pc~*zm~)IZp@eqHk|%DHQE9bVPh;QRooRf!rHUi^15to(ptJ-QO=iYWds@&J z^0OdCV%=^YiSfm57yBpWGjaWn`!Uq;5pKB~@tZ7O+WIPmj|!hX$=KE*=pvloIMh|h z+q0%KlvLUpg(?wxw%X=&8NG>1IO1&_Or;v=RQcIhkM5u_UQ;ODms2dqFV|WTKLYH# zG{Vc^U}*%W@L#gqzk(@iQ{?)sWU~_k(d(3Tm=6h>u3uSGtXY-*>MivqgI0Ihj4K8v zZh72YqCC2dzL}W#AB4CZ)L?)*EAS9tgt>|ud*ye5W{gTGfI(JHE-VAQ9+mJ4cu=r3 zkt&5Yb@aUt0?+%#N4N-@cPq%lGhj_5QGf)ocHG$z zWGRMMPjjlx34=j8LKeZq`K?IOxY?s1=aaIve@v5y%m z7Td9DjZT2FX^b6Kk61bMHqU2z1>h_}@( zgZ1PvyPErI_9qHAE$r!N1OHR#2*Xrt~+9gvylW3 zm8akLcYW^;&7N%9K5cK8GVxiCy7VwxOau(GA)JoI!>gGyM|_~RsBs;}cey5davT>U z@R)49e|q$)ZE6K1g7H(0tdf)5@qy6Pub}cJYW4N<7>hT0OA-YS;$Om+Fh7b&3)t91 z_hRVbAHpFf(z&a5bv?oj0j{P_}4UuK*RmXv2A#SwznIvoPd|3_< zE<_EM)Sc0%jQDOjYbu7Wba}eHK;47)o3s)-M}Y)1MFr~M(oSHuGXspI0OG{gV&3(`_xN?IN@tk~h7QgAgQH6Qkr z<}q9C<^ERk`SY{D3y9kLS0%gU*u!e7L+|OCvpr}S^*AaD&x!z!3W(g>U#7O)HO!rm zk1L)Tli)FuU~pHD)50f+ivAr(zGk6G5MHXFFj0E-=uWCFi4(EUN0|^yUZfj+9>fvh zVbw+KsUin8i$Mb>Mf&&>FrtxweyaJ}3piY(Inc1tOuUrZeWnmnS_D_2j8YDQx<6M< zyaNuN6U!V&1JR^^rtyAHX)W5Js2GP4UL#(3LT^bFjdHPS-R(gt>$Vxb8c~dsQg;79 zuElJSXKkuCU+r8x-D;wsd6GRygvg$JueJ#kP#oP07MRP@C8n}4D%jVh-2WLW$vP)u zCTu{z_6rrZuZw$u+#E8$Ry5Xeg$<;dl4$3}e3B`rzEeq3gy+;I-7ozi93^i&g#`2h%0>Rks1+O`JeXp`ZJ1bHg6hq49JtDDZ@;c7@`w!xL8Rqdj7 ziK}2hq1lOSkbM(sGgi`7!xafz=e#}lTy<6WoP&%ee^d{S5eJ@{|D|H`{bkG!b2uC4 zw_fk-fuPZKL32GM-Z4fCC?Ntvy^%d> zp4$a1x_no%KryB27LbkqJC{%NoPscDt*@={}7JD|w)5Z-Lx{F*G6&V$UrD>bSo}xfEiVWO_rA>|!9!%h`7_HLxG#*F4L2U9rBxc@bPxv_CnI56 zlzo_0uo+^Dl$x8d(kJ3Eo!gj%Z6M#yLej?9Vkn}CL>7Tw{6y%?5xEIrjUQ#5qOR8bwNH*9 zy9v&@HTQsH=@)91?AL)=zSa^F=yWxeX8xKzA^kp^)jLM(DxgEhl7&&Vwdp43x2u`3 z`L~PoUE*nP4cYW|BUH?44ezj<4!DC@Glo?l1q*^JVN#Mai{*OAS1J^+iZfFKI*~7h zUhktqVC8c;lieAYCRP7fS=eWoH6CNrKPB7Oi)y_i@pqFO%^HA#H2|Hie#(5Zaq4$&*r@XIK_d#bvE~E}igZCTiF@vV2Iy0Yi z2|j)AahDpIyEl{)I-RXG4D6NY@g$iiAFVMvwv$50Tvv8x_>2!~F&+(lp{*ZEMlbRa zg9FVHxu#}OSs74=0tXvJL)jJ5JtX{3^YETXd+|ixA80J&xc0=ahnt{o+ux5Mvi_%$ zrRsy(+1WWxNU^6yQBoS_7rNB6tzP+k5cU0yi2TRbR9kTAjb+*;fWex>Xg%@&7?DN( zT+wm{E2bu26FRYZDasQ@!XLxmh~rhpAN(jf_gN0_vg!Kt>H4N{7`R1+wTQDQuvNn! zrAJ);9ZO@sbb!eu*Bdh7I%^8~vlV%2tS-)GNhdr44BceMYDW8&Re~M@*As!_>3zk{ zN~haQRzge(Ar59nPJkSuE~g^jJ0Gbxmu&9x8zkcT3YDHLe=mH=e~M|+FqC+8k39U z=87J8jSTVZRRa^K{yNYa{q?x6U{XDFpq+Ys+)NzBQ5|Jvx%ftBF(zxHf2v31$hsSG zaI##8$Y;rB{6(C@eQSfs5WOC^2vFypAFM(w0&=RbhMT`!%W718x_FnKc6TAN=ufwt z=#~^REnSD~Ey+o>dh{GN{O>w*WHFWX7*{YxrIa=Vxbzq6VY+cMx|0Jrm@unWsjCZx`3LxC8?=QfDN5YgkhZW0 z#}9^1bmBs~ekUtjmoD0(Gi3@tBIK~~DJ3v7NH?3SiT^a6AWy2Hut&HLi(fvI z)T#dR-wEjwG_V8>$5|M}%??{L^3D{@2bxy=oh{URVetpFzM{d<-Rxn&Tf|G zu$83fgJg{BldB%WltW0Y%a42-6`0P9$&Of%Wl3a(Vr=)=usy3!))gydq%J&Z(g6=6 zSm;;`W0Xan>iZ>83?#xw6?b61oB7;}(1hA*N|VP~i(3lx|^r zg8_qTPzig>6OdTl!r38F=GkGNofC3f&pmDIiaZ_Q8oO>3%oZXOiF4A{4bWdtX3r`WAVE4x% z|62LfGH|}@?}&4Hk-W9}`Cfx`Krw0g-jI7ej2Wb${`${TxY7lTy^i2E2d*HbFszuN ztXp(UdG7J3tRCT_ar`9m$A153Fu}nI4#+ki2Q_@8f9`1A%}pNwnzq;$Ko`U_E@Z|7 z)W%}oX@BY#9t*>ar0gF2I>UV1yu`gQtlUzVp@YAVlN_tFmxIe4zDs@U8yF_k34R2( zFBgWg5B>%JCwn~(4h1Ob|Il^TQBk&Q+m~(_VCWjUySs*vE|D0JPyq=^L3+p`1SE%s zK>-B`kq(IgL>lStmXH>ZZ+Q0J&%58fzxTg!t()bnHP?Mz*Lj@B@jDZ#oWpjjw#<`+ z&^IR{2@DwoR0G=^@d^h?LM}lN*rpYqXwXq819B8v1?$Yz`e3Jq8`%RIo2hf9gM?_M z_iykewwMV7#K)(Ev2sw&EeFfcq>E&!(jndl$-bIN47sU#wDQL|yK*+u*wTf?vQFf; z^qtezoM`^74i)7aeH}CDCs7TYr!jzZI-{X>Djzs`XUC2+ym&)xS@bTG7&bVsu zVx6erL6~+|F$w)?%Z$rRTPu#s@R0nF$5euRI`o80P0N1G{?(?mgMVJ+^7ed5&f4n7 z_NX`si-r{$FP=8X(X=Y{92R3_;)cL{8X`4R+&J)4(PRs1inwD5c*6dwlw% z7)=RzzL5*9#tUrb1;?;kw{i-ZyN2de(jK~E)`1}9B3ld!)XiUy z4EpWUOMn00a2Bb_-@W6FJb&L9{&^#4r{!q>+CPfr_=!DuW$b;1^$W?Rz!M7s-Q&Nn z4CNwF(=wkidi*PzgXX1Mx8~dPxyP^5cmK_G_U{EpUe1F%J&(_0YE)KF1^qe*DCLZU zKHH5ppmw%0?fZ4dOYRS;5F!KceSLXeO?FEkgbCyyiG-;ns2ACy)bje78Je=8;W_B- zwaZ;wko&@pS+&WR>XP_IrBVpbocY|)=a}P%{VIH^VO*TKgK}2|7!W)d`wMru+4_J= zS?1BoQWYwM-AqqU&wfkIP<4soS;?|{>Y2hCn9D0>Oi63Kq!HNDI~vUkz|wDY_#>E4 z&V>Ave|VRuTi-kx7H&X*USwM2hU#yX_6dbVsW~7zZRB3z(hKm|WnR>Lcu2`?@R4;= zczCP5ROWHy-lxb(7d=k&l$4ZL<*T4SzRwu% zJ{69b{&&g2iOzJpt#L1iS)>@ZoeatTto*ZEx!-a!WSMfz&GoNs0Xo6_SI>YkXq~0CT$-g!bLq?}KI5^zG^D?W$0Sg1mITx8mJ3z1y zR$2Qe2YM2v#KbOUT$Ow>Mn?f4enb8%9LCvV`cEy;p!!*U=hf_HE}LC%*~y;pv2oq? zlh+|;SqS!6SVG%l65kF2UGr`9j&MDtNR`D{0IvsOQ``7*6!L0nSDcET-|^-t=XOqz z=bua+$^#vMvTpnwCVeW;+L}NV0>qQw-v#AtkbiqIa7qu!E53z?C&daMW$Rq&+IeW? zane~|{G-a9zOuC8pZJns$ zW+vUB2~@YuEFaT!A`^xVf2xD^gK75tGWG!F8-2Zo2aaS+^g4x!4hq*ZHgnz7UI*Jq&sv$6&F2T6 zL;GtuBsZa%iOtXYpU*XTgvWi>0N0 zfTm4&_R)G60yCyh$hfzAZm`qcQv>g!<$J7IfA0&1*HZt2y|g|URmNR3Z23A(l#oJu z259cuEFvabya>}%KklCG74R2+3q#1S6AKWsKJgQkNW>Cv7ym;*a&aZp9a~;gV2%32 z>$e$ex5h4%?@h{ICXnYQ_n}KxY-7{@`pH0Q|Fz8}oS4-Rq)*kxWG=|qUC==`v$D$% z-!nX~1z$9=p_eeA$&mANjt*|4W{+Fz8Q1=2D;DbWaG43^LHHT4biJ6Ba2w8aQ_zZf zY3oZAZ+Dz!^sqSpDf6&YiD06pEieXLu0%gBu36jOK*0j*P!?>4^Ku9IJt==f`vw_O%UEEw(9dbyw>+hY&j?eQl$6{mflCh|1*;xzk92zO-_y-x zE=Ax&nu4Pr)Xh@cYm`iYFFLjpf1z8E$;Xh^1A%}!OJ*P)YL@dP3%UAA5a4{`uDdd# zz02wi0Efod8|=2Ae2}Z6`Ne%c(!tu{m|%t}>h7%tQ!$>5<^1lEI90T#$yQu*qIj%2 zOXQkINvuz!LL6xk?l<897P4#PNf%ytLZRg&RC`TI@4n#g%iNagC`089|J^Fm-o)xY z^DN`j75vIqA$V^gTJdH@oT&``Z`+xa0{TtMMl5fIRGXe@srXl=U#oF~b%{ay_Ppao zP5-$<{_$%q6ycfIfUyhcZ zPaN^ZMrXozBj${ku|NRzj|xJ$vIm*}$kC_&g4=yplaFW5QuM+PpthG&tM09=&UwJe z_z>B5hg*HPVh9qAPaD3HkIjWlEUF|v&oztop*niw*S&bO9-ID!RIx9 z>2g*&7e!3Br9oS=OXgJ7Jb%p|x>xd`8|0TrP6b;Mxuf=9vSusk7mERvK_}TukQM8R z-2eU4{_AI^&ulMBRCwX7Yt6g!R)eux_OQk4Q$&#z>NaHxZT~1kuIyO@diq@g0e-FP zoguBOg78|j%<;?r*(@xW_RTE9r>M?QTIR{Lq-6x_m&oZTq9khPagK4ksjJ-t)(`HB zP|f#CJB^}x7>L}3W?u`6165$jzKoFT8`C)~9#`DgO)-d#UMpC$&H4AJHwbN5g(4Ye6v+c(13mo)*OuRy z?b`q8LkDY{i3RgcM+MI2z15vDul-1nMoix>V-v*nYxkl_!t!uB(0*tP$GOhouKby> zFMeX3%OdV5IT9k(!@Fim!xYWM&0vRT{L7GPU)4JL#3*$&fM)i+?JC)u7g*^nG>>Ao zw9^`y8heX^Xy`mUI>Gkf*DZO6>u|@odBi$;RY%|U?y?FHpiea)zqCAfc@-Ah@*3{l zAmPZg$3nRq7JMV$u>5`d9h2pQgM|*$b9|32_Jd5WD{DH!x`spQCS@kWvgv`*Npr^H zM8miuK{WvljU^Rv)cu2!JV|uajz#7_rD3QlUOCQV1lNl{t?P(5fzK#wT1cz__ur36 zVckpdLArynU&A4OE0O1dcfSOkc?7WjtA+U2LVrQt@asG8!nz&*U~)ch>;5xAFI8AA zFvITt!a;Md|NR2s(4r3?Ez5f`y0(B5&wCHmeyr9O2vfHj*$9uEMg%l;H!mur*DRI3 z1V&$&GxmuU5FLA26P>NOc210hkZBQJVk&%M$2&~PA|R##z)Kmh@q?z+Z(5N?1Ev2} z+jB)twK7`6MXwm&Jcf)vF%cY3_!^~7FSwWkkJ{?$+_vz>Q!wuG7tq?#kd0sf*P1;Y zOd^XW;b5G$sVWsYDILf&V?3qyX{oBs=Wn18-(r}jR%-)yUA~`|Tg?c_*(=b0Zg@d2 zik890AreEJ$9b#}x9BC8C>raOaN{XfenICmq;T{{e)5Zrj@WrEtv8vVV9BBV8?`gF zWfL}#@aTtzk69`Id*5o;BD=hI@7_yyTCW;+gKke0?`m$hP&lgFRTOIEW8!dhLG#4TBk#YS$Zrp!A{?8A~mw5a0jXw^W_A1s6 z9U}w$R%1B#(=FYqQDzfe-uap)6tLcm0Mqn|4ltXY2z<$>fJM$Pb3r)tn%^J3)Gzgn z5f6Zn!&83n9&J=Q*4lbL?^c5aFd6gvsq|vHQRsG{N48%Xu(um+0}srt>wFx_YglVh z-+1+r>66~nrn9aTxzC8x_wNf4XUXQ;Pk*HkP<}GYi>Tt-+LUnGHZ4_Mo<16Z7CgYB zmpcS}2QirI0C(Ryk+{OAK<_fc>4sfuiJ*o{GN@HX!ILidh%a{-HCZhyS7?n5JARXC zqeV438)(f{u#7v}kyN1HDeUQPH{WuI=k;v2MCi_&@k9XBOKYDRUgJY${qE#-nRrvW zGqZIaef$d1F&R}oa%<>1o}W#6MEvxz|Hf!-5b%+Mw z7o_I-!Y_A$tla$GOW1=HzNuXa>z4%~@u9CTs-;Hfo4LRziPTpOeh)7Zkyjem@C&n- z1u3iCMzEY)8|`?%tJdj$z|(P-c}oxntx2MpJCXT01_f0D-Ch6##oB>#n>xmjLL_w& zv*!Hal{&JGJ~YO&!ij~ZWdNdynFcudW(W&oDK2fx=W9yRQ9g=RMOCmiL4f-M*0kWq zk01RnO4R=@n*RBrY7w`b^D#ooQU>D&C0;q-B!B<=krG^J?ag(c?9!Fq^?i=QIO0q3 z|2Ga;m5v7qI-8hmui0@Hw({tvIRE@Qa?WCVM z=>hBzI=1s8yWVp7nNBkhep^@GP^<|3g`7v(POwZP044NgF%C0HHwAq*QdQ0z=IFN^asW>q zW+!EJwjG2$kUpa&l424c&D-le8EPl(<26zb?^CvXHX#wA9m9e&Q68TtuTk#(?8PuA zN#Iao(*N1qJM_0|NxCE8mAdA!T*bLfD_3Dwb0df|+WbZmxT#uoshSs>#B15(&xX3* zspMb1>KAdUQnK+G=uRe@3vs8E20sSKu&?N)@;{zO4T19}BG!y2-KbKx&A40-!V*%j z?;!9>q!nb{!NYvE!t>jsdbB`+z>t$K0AIf9BU6_|xzy|j2y2^jsZS=C%JXP1 zMBHp-8)X}DvxVQ)!=cWG!epH0r?*{8wmKS3bWKG`uz*bIsoGH&B`)Cipg-m9XI$^G zRX`PL8HY6M(4R2j#{y^*j9yEe;cbD#%sGW{+-kMaV{3dp%Yhx@)IJ;i^{R{p5^N1% zxaWQGqMzje+q(eB!4MWW&*OZfizc0F)31UBY-bdE>}v*@qEB4?vKi!I9h7g3q$;i1$Cf6GxeCmh<8I(zI2&7btmLUS6&fN=@OeDq$E zuRqZdTGCGT-S0L;0Q2icbxu4l*fm7<@ps~OxPeZ?J`%ba zKv{eBSwaCktE=EZ46i|sy z7No0Hgb_|xE}eNnZhhaeVADeM_Q&uRVauKX3-2CK5m4~D4YJ!~gNLxJ&S@4|RNGIN zjeRquaj2nAe*S7{cKUka#e~JD^tjkpKNnpGpOarB$T7VDv~T4vUI}1yB-UfOHH3L! zcd8?-;7jbSkF%ZQM1)OOIvq?^ zO?OG`M|BqY{R$Fl00;_JP@Q#uJoZ0sGeKdEr2OxlLjI;7|C${3_x8=={+raSf9ZFK zSeddfLNMg2U&Hv3&)+BiBp&qa=(-Oj7qbbE%FO8F|ARjE?{5mq^ur5vu56vm&$sG7 z{vo|nTN(6;|AMBqR!*{;1HDA_{#{WcxegbKSA%Gdhnf~o?$5Pl3Lw=`&J&!GFroLl z9p985D0yI!v!i`rV^?t8pW@;unC6%-*}^qu(o-6DZ=i;e{EqGqPJEm}NnS{cls0@s z#bE#s3z1)6Frku$7%gj#f0+Cvs_4A%>c@+K3$xqywe;;;W;UK#X@}frS9Xk9DLEUQ z_3clLmYf%kB!5E*`ywDOnKf&#RY-MZz_2v0JWcivfu ztODYl#w6I;OtL@4Io3tt^SRBf-EZL!?1L4Y_~}%e+3dKcA)@xKxOQCWjw?2LwmM|= z#<%GwuFOMvlF!tn$P5!j(N+L>CrYCvu^pQ)tjaCtUL5eGRY^+$&V(dCKAtWka5wn6 zFl0$*>uaowO#Tw&W#v3h`sAZ)T9ED+=O$4@9$iDiEM|`(23Sl(!U0E@8#m!`HmKXf zz0-@c9bIWGEm~URqP6qYWkv+62_gxaRO(;@9#M=KG7dY6vEGn93ACCsm!Yk4NKwCrA^x$PdH4Gl9{JI}rZ)$jn%~AQxFWz`oZemTqqJNB^&(3{x-Hjz zIvL>mDna~A=m_!8&bAKfh}iHj7LMf2;V2=p9_;1^Gi4rgeya`p8F!xU%rDX{^BlaS z_kZwVI#L|+qXvbcRHNfNDxYGeZxM+tsGHVvvIUMczeA?_^g-SNUr=z5(VW0Q!iM{LOQ%AS^63F`O0A zTisHa-w{RZ?^&0~)c_iP)rbcV08O3^CEtg#e7kIPj8MhaW$MC63Q+5Ff8 zy!Uo2i*l999iz;~w&Q$?JXnNDx5hgc~8tRO&IMaBTgh(s)fQFsk`iyS1X1@C9x9>31aW4_!{TKjOlCt8-mEC4_uXg3tQ4{; zau9mrtqSmRROYX!IQ>9+OeHE1zPZ^jmaR?u)nyaOriQIH>`i5=6|{tEG!~tm3Rp-q zHF$0R(pJFQ=CC|kyuk7?mHpF34d!nMS;H^S3m-XFzp^G3Jm;SA_$lN5V^!f;yq9qq z!ooZ{KcCY~I_eRJ&T>Gq#JVn;1MySDd`l&G^5k343N{@ZqzlfL=-j4~XOFT;nc*$s zi}%G%?IaoP!JIty9n0!{Gp zDM(0fulX6+r3XiaN znZDql&ll%we`SK`_ye3yjfilc0=z5jKjr|%CQlU$m!bV#&zM^+a^61P!%P@j+eyP!5vLTmcefT466Z_JKeD1_f#|y2j6j&0S z|LKBJopP~Jq7jT|rkd;bTDdGzvXcuEqiXD3OH}`9HgH0QU+R%E+n2o(LWGD~WGcU!6SOM& zEE9f3I<&watLr{q_3Gw*dE5ov)EkykiSI3$!&}FB!%dckg$7mpmXdSRW;6El4IV{L z9O9*6iGF03fNxQ1DUHnofNJ2Yd@jgvl|6V7meOCn)*+*#Abo~5{1Wh zWV)X2oxGKkf>wsy?w$AP2ag4#*hqJ&@I;P(Rmdg0(3c_Ca|h=&)|+?tNP@r6E^$w8 zR?o;jum0Um_;)+184l))+ANd-*k=zk23VDAE4aHicRN=!K3_nVE1|wjf{x8ZH+b|Y zFd^T98a~@Vhw8;Zlim>>V|9bMof}0*HFeg9&)2Xvna1Cl5xaUh&P6t`LUGQU8`QTL z@vXRuos_6MaqhUsmc{W6;E)3Jw|eiG3gYxfNI}ufdWq<4A8{d$s5~SwsUcM-aPuK9 z*a-0U9Zevm3e>Jo!e!Pa#oin@hfKn-qt>PKBatTcD+#ObBhaIFXw?IdRxDvWD>oy+ zar;}G&%_+~Hq@Bis@`878qr4rM1Z3PEn!!N-;_*UORFFwN*W+oF(gn$N{9*n1oRk5 z=#eT*$3B)Gsu|<;!nVR?%}XUa?;_C{nX!&m2+WQ7*d0N^{7Xw2Zse$XxsFcRLsON+ zuBPqH_EDVKGt50M=<(iDhMwJyMl6f~oret^SffOf5PF~Z*&E-KV@uO_lgdF|eLCG64S?Y`rhbolmT^+BD zbij`hV5XtR((50a^gb zeqZ=d=cG8mN!Klw7hELdOw*kWl!n$a#ES#lY+l16KF{RHCT|c(Bee_dd)6HGfr`fv zl$f-<4oE^-1l^}f|KmJy&VHaczOuBWm*#!kO*I#1eTrD2YRus|8I${$!064QG=)4% zUqq4GW|(d|n6`bB&g$G_AOC+U^hqJ;+{>>E=P3JQ6sw%r7t5Ee6N~pahdS3e;?@tm zsQ-K~LCK&#C$#FeF{tH8I+zhyZUE{m!dOx3^-3>t4i{0_+H_b@_Jwj@Fw+-&3+)U8 z`y2E~>%6>auer(V`o;yJgS{;nK0>_-Itb6u<0}206?=Qa*c5dQ^k-6-$}MA&o_&!r z1|08JZc_TUKn8tXk6@i)g!3;!M$phuPwb&LcEz^&i;6?cIWe2lQEesia6$Wm0xryG zMybjU`a)p~Al-73rf2if4^wU#0!YeZcwa4fTo4oXlhQeiq_blWoQnwLIvY zjts^qe!Ix?$(JOXV184o=qFu1n{wMO-HnZeSRIC%tHdJn4iMFWHvvSJ7y+#Gqm11q z)p~F`lGLKtwvPEr?h3d)z4mRFN`&JyR~BKCb3H#+PcB<0xV+>PQdKg97``u8&}g^k zw0@jbjHx}tcnNOK0jAdb!IHR3r7xGFBVgTejO8C-5!Q4DV!FP zFg)RW@qbWtFwg^gPIt>Ht$pP#Ax9{0SJT0&>3!rwn>=)U1<5uQwu~hc*MU|-+eFww z!G@R3z}T?XaXQ}-sX_qL`Q=x|6ozWyI$e2AYT3K$tER$?^UEowGV10>t zheq-%N|8@lw-DV9^goV4TW z^zN;N4=~q^{F3`eg`a_N@t6bVOE(Y+AmZM6%Mmi%7RVcBn&E#H}cr>xp!%{0yvX;$_0z;IL^KzFapsb1`>lBn;*ky$7 zvmo}Z@98qI+kZJ~FL-IM@MSr3q%zHbb07^GqG)V16MoDckCqsLI8EmqRj(eQoiaoO3j4FytJX#X>DynBOUE&;{E?YNkr z^NxvyWR&LgF&x9|OgzpNJdoz0uEF)1D!Q#|ESWnvd;>abL9oR96Rg=JdtFN*y-`WozcvT6SDu=j`**XPV{8_qy7E>2 zm^^8*403))aBS564|o6YACQf985Z}6M~}_v5=MJdF6$E&Z@Lt}+%te{??cY#gD4Ol zy!rag(j`&zR$f5-QdKWW&#Qy^*8@RFx)B*6oU09Rq;qL!Ml#quuA2jv9+Mo6~C7LuCL;<0L&L`{JCh~rzmXsHk z0#Se0?ulDo0e8Fvhv2?xg1IZvKTy+34#UOePYzm_k3#Rj&f7a==YFP)2q!X;J+MfL zHYkGEsnDn}ZYMzEjd(zkyib2iyp`;KKSDKY{8WjVG?I%SCUQOrk}2fE+IW#xp#&%q z1APw#*}4%k6|Y$XvJD6^nb9KzD&RY7*ho5H?#LsKwN>hdX{G7ZVmOQzb0FpiSQnX4 z2QL^#Lys0f8p#i&uw+|{z+$-G?U)m|j-ao?2u&p3Ymu_xA1Q@Me<|U6m!vxv<%!nG z9m_67h0n#szA6RBz;9p+&jb2M_*eH@eV_vF1se_80JL43N)56JrNo*=6if4Cp zNb9MNOGuJ@Ae-cSbU34f<0kQi-cHNl5iqP6#-(hVMbPd}bS;e5VH76Rc-Y{p${Im) z{?>N|aMROCrL%S9bc}Km;Ib?hE~=4))$RsPygHnHbB3DGa}!n*;LGJJyklC12GyHm zI)Z2Agni?a!f~AmjB;(SAKN@xj&WYIUpu(zuI<~XS`rdWe+~YzOnV;4OoJ1Wex%T~ ztnCdR_mhVZ)pkiQMGI`x&2LB<#zK2JouW2gx z&Q4Y70UFLo@TjnY!oidjr5f0~%kJPL1{=Z>^7&d@Y!sylK+(|J4ig^fATOjAy16cm z|0mx4Pq&eA{G#8Xlc<)p%V`%iMW!S<2Th@9b$Ci-IQrB@&gs2%xWOS zZrfTFuS2Phw5Z+sb+mLZq5!I=)qvlDt9tB?vP${YLgC?I#PsDlP;?Qhg#tw?K4sih z7V4H?9A!$zYD9^N+blRtqOc4p&IF1WTd8Dc3xj<5syn_-4Za*vA9iMSe0}PLLY%d= zuOJ2HE)aO7wp8Kz{LfoKokHR{GED#@bf-Y^>zU$}D$LE+zdIIw%=yQ;Zdei3|Yy>oBX_vO}aQE(J#Sf!cu zbPeCrohD%thXEqV~QM~Zk>a{SANo+PX}{Q}7ctyitZ0GNi8zv()kI!BSzX}*SjZt9>!wSn z%3G?CgVU9R?r4Xb+gN{o+yt}eJ~d6LlG0c`wKM;PesA;GU2VSg(o+`nGL=v}#0J}6 zk`}$^e}?d@O!4md{%2nw@^LE2;92?aygw)SdPG2wb#h48|M13uPwVD$+Fy+9`yBLn zAxY4;iiTDEfTIqaqB)IR`>cF#=0r5F9%axzA8=6cvKm(Hh-=@`vw5BW zM1sKyu*~8B;LlC;Y8+0eUwshHr>4#Y++m z1Ruoev$u37mhTa?Cu}vI^4FFB_uLqZpAi$;-k}c*Dj6YmGD&tLD;#;N9dA@N9x=tG zc?d}qtIny5uSxG2-v~J>cMy*=!QgrB@MXHCQ>#n z$}Bi78iQT$VbQ}B z0ZkLDZD7l3%Ckl{mw0gIq*0%JZ`^R?l~J1BviRo$zvphRB;?+OwZ}HfVcL5$X}3?r zA^7nlmK$oFiU^`bXkgJjJ4Y4q2 zkzNfq*~nn6QD~ss)`h|}l>r-)-%ut{?gYax8|R9_Qb1?QJ78$__XZn?=tN=D?DMtapCLL@CG1e(9IfC zeZN5S0E)qN2(Hf;yF)2ccJ)ia&8>e>;pVZXkN>h(qLOBK`_t83_lBPorf(kVM@Vx0 zX%yMw#r?d7#2U{3;|*ZIJIXW;9-Z~q4^%k6_r0RE%;-;lJ0 z&#R$HJtJmH?@-iI-u~V>PQsedTcv0|(u6rMLWq7HV^yhB$CtvH)Y4)g)7~2ZBjs%iCeEOA^JAIh)V- zCY`=iF$v)e#CS9~1Al9BG<=Vbx(=Ia3?A^Hpdf8yYS{3K-xY#mV&H$|AICdxHu`pL zk0H_1;!%k)Em}j3;p%BtN{rN_X(I4&gE3>Y?I#S${0L~L!SDgRBzOfL*jXLlEiR9d z@42#-ne;xUig;)<+0CeL^HtQ)h7z$sbFprd1|Xh-A7%yPV7PmtE^vdu-_Nu;7HNvN zSC2YN+b@=dJeaX#Rx=y6+1pebICOM%?3Na9&r$TfBm$>mj$)VobkTNp`;F>kS6Ta- zt$ohyK09k56}we@%Amtdj!R?j+@~flH;HhYGVM*jJMUxbjq5Z$Ma9=QcvL^17L3nE zMc;W{$@Sp7Y0!(dysYN<((Xz2w7bv#2@7^?gb_vwDB(v>NA*>|ZrQ|X4kG_J#v?l? z>of3sig5}+@lkuOfSQ%;#k?2j4?dz$QwK~sdJnh_gh#IOFkYYkJTW_gd93xk5#U(u z67eX0O0BQaL{(XWs%P2@G1GZDvLdOC{yXYzFk0QODMO*A+V@YmWb{|!KHu~VL^V0<72LIn}_<#HpB|=zVqu8eITA|As z-G2>+$iJP*muKHBdWh;#YTEd8gX3bS5Z{%L=1gkZN4q z305Lw)(th2Sw@iO^EU$Q}*FdnJ+4j00g<=(^MhPzX+?>m|w`{_nR{w8>gyo6}d19Xh(K56?}DMrK%>C`uoQA?``>< z1k6h{rxLBzlNu^(zd{sKFJIhk3osfkl70N=2rx=d z@FD&Szk-3TXAgjJN3hwIXp%Ilg9Z$=2a#MrhS9V4U0#c-KeutiBd0KX)|Guh2`PhY z{Mp?d^qXLFslgF;+O_15g z3=eO?sA+hOQ)^lrWr9;^aVs(_NoBNb4H8zoRP`#9+d7o`=+1C7B81Bs7#u)CVLwQ) zY#j(@@?9iG3T&p|wQ zrBw+gd@#9cND}szm z(a6qP(5%c2W@k&+jkIqQ?XjN-4x;O#R2FrK>Ssqesy+KY{D)(t{ew#AY8K~pXGDqo zPp4NX|HpNGH^ZkSSfHt~4sh!ZH3Dq-e)hoZ?Zwi|dke{0Ju&wapNU5t^)sqf+y9_` zqN14G=u@b$zi=Z5Bll&A!r6y9)X~EFNP55Zp0j*5{F2ymh1zYz$q5w%{=d?QsG{K> zHe7>xCoFlxY&|{4@=kKL_{u6XTGrVmgoUfi>*DA<^zJ#g)t8^|sX!>-)+1jIT1`>N zpLU#Ig&&k?RES^-$-tlOK^)O4Jee4ufk-l7hr#oXCVA({GJr$-AFQbsWfs?tGpdT))qN7$FXPMjm#21K5KiBJ^jI zVX8xc6nY}xQYIs#2_cR$;z+^{je18sn5xa^wHOwBeV1=wNe23O#hB^s&Xg#wcOupi z2Ka8W0&sYwJ8G*LC>w5@BSd?qQy%TN9Hv65TC@179_YJL2*X9tRYqU;$f{(k19j=nZ+vTzfAUjD7Uoz6-xn9zf@$w_6Rw;gaiRM^X?| z0hn9Fz>js>qi3lzR!Na5V2HP`lM3Njd8aJi-rJ7Z=Va2jwg8;#j+RE8ZMh$A9j?U_ zg!{rFmsDt0fS*r2VfeSgZ)v0r`5tkr<|Okg`(c<9*&Q_e0Ddq1;b!a@`3L%82Kl;v z$DZT2dr5}{z{A_Uf-rh&;W(y3e}S^1*5iv%Nj${~syZUG5RPC(DhE*o+1`%U<0QO3 z^Or`3QS|1>4A%KnQ8i9UQ`$erw@WGCYC&G6gx3~oa&@)F(y<@S!u!`4&!vAm+A=D| z{<$_)UGk9<^%!;_4HTHEk(=FgKavxbsv7KCml7gdayDH3#Kr$HDmKSi6X;4r&P#kQ zTX_}W=AIJphMS1g1bZ({KOAdyR!G>%A+LJyyZy6<>vjG_>9K20+>(E|`0?ZG#xEj*F%*+}+WO1adXf>HPad1k_f591P!Y;#p4>gJsOAEVqHuAjN!=Efwp!8M8I4oqgK~a z!tM!+UbIR{X-8HM+(4`wna^o#Z2YQ+#^EyBjy_w14R>7uRuL&r`ukT;HPvL|*xOw- zpH!5l6pw4dlQ!BXoSL_<+-$$C_2|D&qIaYPsv=C2@(`J=9It&;F(RR$*-UQ>xLf?3 z1J>Buc=zPb#*@0g$WEv*M3+GGzR2}Dr{YeCXxxQbhU3tc)uI36o6rB_cXy6>aCh#1 zGAPO9E9Q_NRA=Qqs&@W4qjezftlWWSAt6D0;nke$t3LLO>9ohzPKP0P*Plk)w=(;6 zZaz1m5U{NWzZwPv#{a*j<7F|{_F$v1D_%-nykSx4$_KS3Kk zbT3c8&K!wS29Mqyqr{MVM2l>mz&L7()-oGuLGtrQaYSGQV}MFNN)&KIabd{;7)3&e^R~ixP_i~ z^u9~cBPNzC2d`j15{OT;gK`L56r9=4vGwSx=q88m5r(PAH4I@~w4ix$P?2?e&Jzk~ z0(5M1fb(8lT03>zTAgOpf-ph^$q$XUB8*gW?g`W>0+Px)5!*&V;a|`(-cEvuHygmd zbWPJ$IS-2nZ80lrK}|c!FG)JF7bAl4kR#es)8qm1WYU+ye5f0px~1=KN09d+X_agf zrs0Dhsx1rHJw;CkrttS45Ll#)+!ZLU7Lyw`4~nzNNc}c=)O9@oud%nbIoJXyKy4K= zJl>il3Ml1}c*eT!K8^FYGC8#A@+KpFj2|O43{aILVW{zLzdgwS)!WH+;dsP9W8|-wzxC`XCyC*PgJ4=65(cPI}blbYU%&lRiW7zmXUcT zVon910ErZhaNu?!3=xJJ49Smi!Vs@vuwoLu>-sH)-``m494;80>nzh%m_E1=A2K&E zY{p1ZkjPlpf7f`Q!DecbU>e?%xEXW%GZ@+VPWGUoYrFFXC3En_ z@o$}X3kHt{-%f=Dng5*7ymvN`;%Y^wNVX}$Q4<@vjSBnmu;`r{_8I`#DhYhwqchiR_U2@yl5VE=!yz9w z{U`sAuCD-!YhAjX!CeOkNss`+U4mP1*I>cjCAbfe;2PZB-CYvgHMj+LcYVXTNB%3X zilU%uW{>QzyH~I7Uj6xMSOzC~%6Ip7c>cIU7L8EUtGNbu+&dEY74})|MQ7G(Bny9L z^|RN#U4qv|H}eh2IP)xW4I&us{vLt*A51&h@=m^aG>C4*_b$48ef-##&E%U9@|am{ ziWcy>>i3XJ`)U`bCz=TGEer=}qM^ewvifa|zEtnJ=PK?wBOB7gjg3QqJ6W{P`lk2tm@!u?LKaI(8h05uoK=nW<(CKJ)s zAu_J$Ofe*P-@}YdFuj~DGWqu*dY@XEj35T3-|)PrHF}+J&3i?~2atWeQQq8I8)a|H z?P)jHU2U(`dN?(gO!)8T{veKDHbb~~(`fs&sf>IwYK2O!Qqro@XG&$2`xSRVN_eKMfM z1!;x!;H^8x^GsMKIe9u(gqGTHs%$MX7cgO0ZT zt1V1!>=cISN1?HK=c_N$Z^g*!bdj5iV^&Q+>IvsB)o;D+$s@(oG&6M&(t|puzbmNS zE%^9b;^%aN{MEXTnMhkxz5+(Lhmp>Y6W4^igj|fr?VXvbs zeh>8dj+ZA8^1e&Jr%wR%9end1J(h+XUQlM@gH68%F758rWJzqP`sTczOotuH3YMrH zoRuF3k;a29eW~$65~|A~Km^ZjhRYWJqpY0*S}@Jl(ZN@+qC{5^ea`@-yR%ZY!pi{# z!q%M2=9|Eyg{l|Bijg!BINt9xFA3Z?B#@RQvJ0{#p}6A9??QEX!}Rk0tVF(LU9{!E z{mL&9==t4YR?i#5@BISdo0+8hxS1Q+;#$|HdC)kMRt$#Cx2$&!IJGY5vY-1J2{*Ac z?R!?eI{p2?j8&T*wqKanUGbume_;p6gOHZp*(}z#J5d(D?vTXzPGTp2y=ZE>7TF%$ttY|a?*T@c( zx_-6}ryPAuX1DGdjZ5hryf#@KxF%aho;4YA9U30KWj{E}OzkD|ULyLI!=#zQ^B~yo1R4Eq}LCJ+mS6_;CHJ+^e?r zVUwZW{iw0`rDAmtS*Fvz(IBPu01+V7Pk*||Kdgrgz{NL}T0kPTMT~&U>t|uu0Qiee zrKc(0RXq=W$};dO)JGpyP0<^p@7PKh76Qt`QG#eQ-$BP2W<+>y9Fzj)Y)vjM{M#Rq zk$lO?chsQM2?|K}&Jl}$hNT(pp-N(d{JvIFVd?%HRU5>?a_LK#B`?zuR-#tjYJsSvs@0P`g72r_JC_ucXqFzf$a04L= z(*6|olt$C1nuY+)*CN3-@+LWMr(dGizT*de82f-;IX0y{Ao(7_W)}4;7vkF~-V@}S z9va&kpkE``(T`z@kd|MzA{zc%b*ME9o5qKYu2a!p6-qlF4Zp&0yr$QZ0pW-FoV zR-Q!*q$xqY{`f&k5OYh(-=*l)R@bf~xU(hfa_@>6ZV7MJv2;42-VkTiF5dCMd*~zP zjFA4q%?~$iJOjH_X4j?_0gi8{uC{ba3GR)`CyskQZ+gkwanB5q?kDh2^kY)ywn`nw zf21Y#dL7qlb$s^+lyJn=^`ojI#4r-hI9K#w+|2h3gQmfx3AW1zGra;i4>RT;m8Y%r z?Uch{*E7H_1dgxRC`LpVc*L-7iGN)$fCCl zJllYo29j{V%zyhB;!9XY*RAfbbjHDn#6VJ;c~SIXk6s$-XC<+4wrz<#tx(IS3ntCT zF5tKg-1;YCj{(KLuQwj~zqxu3e)Ep?B-i+f>}4E7l;rWh3G+ZYdUnx|Dm3ZiPnU`8 z-(jx5Zw}PiFy8>Kb;COQMHk3A$i3`#AIFCD5bdZJ=KUMm4D2Wr0(+-tu%Nf|9zkkV zt*w`(Hr*>Ir()AMvKAuJAenuL22b4^>xLgUR?ah;_F+p94zh01<|^b)8KS58=U4+X z%nfJ1{!AC?<`xR+1SoCTX8>V7;|ki`pY;(9l& zaD0PJ7ZXxpStJnp^It6#|NdJo81U?5TN5-Ip>6vE_Gy#iWx_sU){^Z>^d}#c{@u$8 z_4^kUl7)JkKem2~g_1V8LPXu?Y#yg=P}8Hs%4i@JWwmA)*^&gxHGyByN;zHvQNHL; zfSgy~K7J=A_emguc7BCKpMe-Sq)HjLo2S4J3k1?nlm#2`K9~uDfzAfc3U%zS)?)K8>>CJed_x?AVBu(()1d2wXKe zEPSi-t6fj{{M$R{K9oV*JQZ49S?59R=%2v5VGghyTIQ1$_3huB%1m0O*rkG>#pMmryZZy>LPX- z6O%Ds$|~2))b^Hh_yrbu=j~T$KlrufKu@wqgmb? zFnHGa6N8Smf{m(dq1J?V27Yw}2ulR|PBfxux^104b){4S_cxjFo-6of;*L=%A|&MJ zhPc$hOErbZStKL;h>V&mFdqbwHc9x#GLtb066fiV!gr>-UJ)-tDPL^|A^T1F?-aq< zMRQxcf0l)mSYDazYqF=Pz+_v95A@?{Xmy4(4aat*pJIXntbdfl; zP8+Pw`0JEucT?+M&wxDL2RXCjMHlM^+VXfKwC&B^IML%c*iQ%2&f&aAco?xRn8;kY zdhnvBCC7hdz#sS0o6GLi`?70YHR)J`Mk-792qJ&w=WKN?^Y>f%_xE*1O^hLi?@pMr z)13CAX1@y&4chT@h}hO7Dp*gHb?doI4So`My7pYT?mlgQnt2Z9-^xRgx~75D8oUVr z6W3nH*URO}3y?S6Ab

    Yf%*eiIWRXV~`DKC{fv?uF#AcZvcFN7#>;D+`dxEFmec} ztDH})r1qmJLp%c`EXn1Q{lbZE%rQ)f8}7a=F`UK?J3X)FJ|k&&`0K{uocLPqwq=B; zWx$Q(SCv{a&Q4x0O5K@i69ADF0;0Aw-u6<(k&^di`0AJRzhxnXmV?CsRRSn?-JKn= zXS0RiKwIDOmQ<(H`h%*{STn>~nBEF?waOpt)T%j5#I$L#Dj~{?CE3kUNJV5&cRMkY z(sv6dqwSW=;wS?Zs=gfrjEoYx;-6`SsY{Ys7H{`UFtEydkgZCvy)KIA8A?t7ZDXZ_ z{6@CVxg4U1f{n65B~svN{q%lS;jx^N49B%5_D%NATeO1W!?`Xd!~s#{Sfx>D zd4U?&KgP2N$ikX0Osw>y@EN#cuR!(gU92Wgv*=T>AC+haNOCpiN@}b_)Z}~UbC4^L z7{_BQKZb|^75a=p_i;=~Drju7j%xWrRl4JBH3=1En0Q{a7$!f2qizG)1DDzH5umdb z3a)8`?~Za&XdHLD%V_ea<_E7Cpgf(NohR%^mkhOPZ!RL(bXj5MnJU8QFFPcw_*c5#By4YVK36SA+1r?dS*7*iVr>(f&PpP! zg0iD)5WQx?>G&vspotFm>FwRzd)-Yizdk4If&6qrBLL?ILMRnqL7V|uFrqlgfrzE1 z68wz-9F7rlA~d7NZJ3e3`>J-9qkwXhd=^gwTRzpZ*Rk(e`!_iJkSEBtereVqHgLj` zOtivsmDyZQm|`6XJH5qv_-Y*d?!thCOhO6mm#+@FuR#TAMK-Sbdk*NN>@~l89u4h{ zPl~A1f_M%f2cJ%;9JHfIOgMBmPl3oMMpRLAJOtl%)4Z%#n-K#VTwgY>9Fs=l-i8DU zS#Twmu4yJ3iLTojCX9a%rtqOb6xhoNlHh_X{^|M4*EKw1EDsoLd{eIhT{s439;;69 zfDj3zvqKnAz${1=t4Q{#WJVEQ9vf2*=mT>``59vc2ow5#_5E}z3~q3#r|Aj-LD|Ib zQ=To%ln=$QBAb^fvTJ{qfe<`wy{=+F!u8{&tW&Ybt|@!MLGHnyBA|6Gx9g+ap90< zRvAv%*KGCo+1hTmvE_k~&AdN1!q}Ik>#diN8jQQMJWhWxV0AKQq;&JlBzfD|u!s3X z`Wv!N=(xFWJ~F_&kdB$!JR@1iiDsQ4iwt!mSN6mUcfV(T`dUtFJ{cO{s!klPUK6stph>itmEdWMYmM8#~>N)7l{ERPKz9H#9&tF>*(Y=kAvD zImS`EHhkPPrfsjdjrs_Y+-);fb)wc{<|kgannx3na?L3=-ti2B*s3Zds>~xHEOX1* z8E2grY%%?N)PvXs2==04Kbms;QE^Z3>E}}P$%_3yh9f8H16f+lFP{GVuzk#x*d~Pu zLwB?F(x!DhznyB$*S{ih(K>?uIEJ1}bmK!5*j{oES(P&=;jM6EdHcBX{CEKYlF_>s z5RsG754ZFGH{kvgHI~s3(wA2R9Wmz-8l(`pPw-B|r`iO$v&@$NEHRd==K2kGfyC5~3gBy!DEI ziV_L&qRnBW6$I%E!CX&)IZlYd%oipp z(@OFYSNqvY*%qI{=~!Y1WJ44eB>r*Edq2L_&p6}szZWlu74#-Pz;d)wasS; zbtST6ZZ_a)KiyR8GswhRA=>?F!x$sm2_&Aj%t{hbq)Ruxm~84#y9jbAm=UnGcz^ZV zc=pI>Rs0ULFaQ~Glf+N@kQkJNGq9ZM*?q!`M82%@#b#Hf$IN80{erlyOtbap{3R4I zRw(Ow7;7pu_arj`A%T2)1w9RM5p<2wyGpmi`AG&Gmn61 z(M|;b*KY~ZrYi)gkKmThMr-pl+1cVpm zOKY)&9RSifw8G_Wcexke7(CvsjcG<@28QM9^pxqe6P`q7WNoK=^J%rZv8_@*vMs)W zP(8H|@;bj>q8$$(>_~EAALID?XY9{)|A`#u6o{WX204Ga&bHe+hg0!%{e8?Eh>@l| zoX=?5&uE-4x0)f`&E!jX$+LOrv1zfm_gnUuKp*I15R_kmz&E@A!kBR#oG1S0?Pp8P z>0|Dfd9L`Ud++DtWOqJD5c6%kca@Ezz?8yf_EJvJ_<<1_41A zvmTslK94T8Iu8D53S33FU?%%JApB_)iG4^F-frSO+iF-(eI|hq&M9#_bLxo@?xL^v zMtT`f_HA!BdUko!aaYzjE|6+6LLU7uc17umG5+XI(4 z3Y>+r5l0**`;E^KVUH5}J2|de=WGj6g&;~(shaS8f$=N5Q{TrHcJL>C-Wu|G!!zE| z;eE)OPY+Xcm5Z55v{ex>rh3MSh!p9Kn3JKT2wy+`ui!#lpBx93S?s_Mq-8suj=8Ia z-=OH?J0$I|QmzaW`YtsvVz=_92@ZA4Uf*Td5x5hkf(ojrU{$i4T)KAM6;I*>VjGJF z-KD)zRl$*Emzx7`SMCqgfADEA8Zju&y`xcxC{ZEE^(N64m~jI!)qG>r(m3Jhr`vDyyraFe{zFw^n)7 zRfm9(&|i*g=IsF2l)ukLNLvL~3vr})03eGgvDW{Ffq;+He=YAA*}?)GbTcI;v={Ugo#BWWt`c)lTeCVhMf5LC}k>)sC$r{JM_kHH@^VdsBo z0o127c`t`t?+GTZ9sdiNK@8!U!0xeI=Rt(^5t3;s+!dzZ6<&--c-1&KTK(5+|6wGW zim-f;dS2^cM7T#MyNA#nq+Ff|L8finQ3Yq(-Ztlz5uUWW)fq3es#N;jR6Ysm`1kFC zm*x~E&%4QV*>s2&E1^Wn%V+CYS6Z#?6#l}SHiwt-jsx07Y}cqMq;u-CG*AF+Sb(>S z#_NLu{(u|cqaZv5++of5ih(02(zrvW69`=Zrk|1_?uQ}P`-lqyLM>@iEX`p@3dCVe zprQH;P@^gX?gfogQ7|3E&lPt&_FntuY9OtST_PD9e0M80Fz*Pzgi0WWCh^7m${EP8 ziv&m{%B^KS?6Tu7HGyCJj7c=7uHyVUP~r+!nUOLWvWsdC=tA6aB8U-1Oml~1&5@Xr zlWm}{KS4OBQPy&!XsD%5)jXPsVw7H!F(3;PIcK&11)!p-sws#YzWgGJn<4>zwSS9% zWg;+S>PM+X{B}-3(^l?+g3m|^%6gAZPOTZAD|_gI1CWcu*J?L(M}->Qfr^T;F>&*b zn|i>D?D-S&LqcSd$rols4zlY$UiX{Ew-I30KI&|{9WCZhM!JQV5x3^ks^>_11r>!F z;7Rjv8~e34j70c;O#nTz+aRI-&@UfJ1u*mvvm7I`o3oySJrQqe$!R7I0FOo59g+k; zcm-L_j%I`AjOY_p>a!g5A9I*0K!9P$V70%>;1oQ^+|pc7lV7`iMzjR_@VF30vMoof z2Jt66Q*nFD*7>S%Q&HR^9Ct)c#q=;$R5hAC>l_osXT_?B+akLX)>?C*jO@CSL2#aSDJdy{rSitblLeWuwvwN+^$0u zD)fnA7uBR%eTNwa2FcT<{pv*K$tW_Qc3K;0e4O$DZo=Tyw`?3qm)-MK$`+L>3 zNBWhkt1+UbPJdrhee|0Ob>Y?-W3W)s`Cf%#b7>|Z><6)skZ-DFviYOs z_}irBhapTgWiP$x;Vjp}+;qtY&4GNcT1dj8MVWD(yzCs^zmZvjK2#K$CIIcY)_Db?f|mw9>7U|LKuZ>UMck&uULEPE7sjvHkg4qW7iWLp0?s zq~)b8EcNN7ZBjwj;Pd|sQ6(Kp1syM21Vj=bS|2S-}%y+%1;yE2m(UpB$Cp0GIfcu zcvp20zW{#j0=Y^qPTBz=y^kGlLcuJ(@r7+s4l##b_j`fAsBCYHu>LH0vwbqcm z!Q7AzjhB}~n^UBf&cYtYbLe*dp%)|R)(?FNjPs5AiJRRAQkJl}bX~}Fk&>Q3dSi#N z@I_Mk03XihMmO<_9Cx!vLE%BeNv6c5I#uz`cGQXPet8A?P zJ{6>+g`I%f5!Pc`UAS}~Wpqr6DOyl6GCHHW7PmQ3NxiV}KuS?VlHS-L8C4BOm zWHu}3Kzr2hKsN*8)kN(NQ_uwx>pa9&s+G7v*VENL(+2EzV#KFHHEtt^@3EXSV(`7Y zt0w}u2J3}!hkW~s0T}~s8@!37EX`LnJQuNTec5gN>I7+FpOAAi- z$cIqgtW{2uHwq7$`st_I$#UBY&RqE7uZ|CxuZgwVy$FcS_zw>#_s57K@D4{;4|H{X zbp`c``l;69u}Y=PV0o><;w6qHL*3usZ~F#a5Yipb^lqh~Leh%A_`yHHSDHR7{Xo2; zT;*4%r9WX_2TLhfH|a&zTLM4uLe=Sc()&&>;{9?69EbNG`0Itx_yd3G7C{oHcFC28 z;gtt9{d$G}y!f9?B!p{t0jS`l@`r(P>6bCb=%;<}$MR%;$!E&ecGL7Ij~yW*$@a_7 z?Xg~0Cf*+&@5-Ou?*2%(Ncqo5BbW^rsvs@^ljdYrFKew__ZwlN_gx2^P*wlI(Zz%% zP*>0?wsAGe5xs*P1F8e+2LZL)EPpd)hG56nqg`oR`S{RxZaeH?1VK7hhCT=7Xju~^ z0GX}`ZoDDY#KY1!C8~276U90VXiU^MHP3b;1$mknT~go;7X`b_YXSSvfK=9l=v)|s zy?MSV8v)!DOwX^P2jT9!18M-oj6jc1aRhLEVpxFR$@)=Y4|3Z~c=AC9)HD;X86_XL ztGNhfgjp4?h%6oBZlbG~3OX@XMQRVEO0A4Z=AxJcsR@X2SmQh$j<6!CsHiGOmu2}6 zL*2|+N$e*9;>g}C^S;J1k$dcW&O2h$#H@tGi*F~TZVQtVzikSeRqgHY^rm66dP3&{AoG|v3=Ww0HGrHVM?dbf>O=zf z4%Qacm{M`4tDcs4wG4u_ee+>Mio14I22QkLLa;Klt3;`zfMHj+`xg^<-C)#=^{+$k ze8CsUVHXQa3$RC+4c{`<*TzHv8C3xydE^HcVUpvoH?t0e&mCNBG47B7SQs%@#R1@- z9;L9@U3n9FZDnPgTr^%E3RZ$W!E)SSVeV#kAU5JFMJG}t9)-N@r6sLp7NM;jd6NRl zXP$w&pBxNL+b?)1`jbFnmo_x5M?zLQP)tlrAn+cYWyteyr1!V=#7hD@h2zZ|2}=H? z3Vi}p1n^y-HsqejJR#2hS}4LZ0f=|${QYI=z;49TPQ+Ssc|cI_|G3smjPWs%0t&LW zpT232;yVqJ@c|fmW0=B|O{H7?hDmJ~ zdlGwT(onQ&K-#x$j@-olOWWykIS2Fmp(v}XxU7oiw^JEQCulFf_GwPWjb^d zfYpueGC5ZKVk|5KBzQOuU%F=ow#0hk!pwLPhvgj8|p zs}FPO9u##cn8cVuj12GRbG9FAqkTfSjb}I^m~7YB&HA|*$k1WZ2Vf!L4mJ-{)qiC~ zfiw&@i+Lxu?Hj0qDxI^B1{2DR9n{@exgev;=>!P6a)O^K!Omt?v_rCuiXYWlkg1MW z+1+?ejNtQjxJkyCf?0~qHqH$RroTd_T15eo5^5qNoi$n3WVe=KbOH~z`X>Dw_k{cxhk6w>Ba5i}3(V?ujcG}sK-66eC`k)qu) zEvM={7U1&Rg@n(CiW?v-MVtbi+_xtF0t$D&$hCo?C6Nfx6X zXXh!shxFl_Pk(=Nh-)-E4?-~p$=Xj8FM+fxj~f_HMCSs{59LY@j}D?@O$qN;8@JO8 zY$7p{#!T{t%PD#?2%iE7!YG;IV*<$>tebN^Y zxvPAOnKB^UDVmu_^kE7o)yf!OxeLR-4t#i&hiY}G*xrry7g zs&-tLI8g?lV$oC{Y zV*)^CL$U8ZsN==Kx{#O*AyR%5RZEV{5b!OuptXkZHQzRE7tZ#gBVkeVo?LMI4QOf%(0*!I(NIcQ& zF7zE=l%m>1+ad{#3 zKJBLSMO;nJijiLR=>Om|k$wDl=B1l;-FnjoPK4SE8csH#Z$lb*b8N^VSGj{%s%?k^ zJq1O%cgiAkMS=`UE;N*!?^+VSu@=m9=5Dj=jKyiF`J?R1!_!ie#A*9qh^xbo(smqD zoQiz4*!wqa=fZ=u(;P-|`Pk!16r+C18}t7$6$oUkhuVHTSkrstI==q!6uO}o>*P&y zazOn8yhsSriQsLsHHgS`e6;aV+@t11)$RAl2jwsLhv`As`R@>Qp^WGhqK5l+vN^f@ zYTA`@?QCMFTi*-x(U#^pPxx9=n7PI{x$Qd`ElO~6w@v}y?y!2}Q@=vSr~Zb*NZmA9 zssi9@?OP+g$uUF^MjH+xj>RNKI}ijY;=tV-4q(#0U(G--B>jBW07+`J&{tSYqfmeN z3n$8`6mlQu!4~?-_6bZ%o!2B07{mE-^zc>@q4r4%!R74Vx7J9BzQds_5Fp0!5r%SQ zwFyJFSVj1y2V+Y9Yc8L_GHfh&bYoy_5}WMDtH!J@84a{w18Oz;Y2ES|Via`KNixK^ z<+JoDlC(P5V*Ve_;Li!>tN@{2CWHXFc5dh|NqNh6WWUaZrp+a%b0p@8KJE6zLgr=M z5`@oJ1JH2hw9hiz&6oBf;|gnj)viHnVp;mF3d8n6R>Mu7&b79ZqJEeI<$DP~dE9rV zyr=jaJTr!NKj&6gJ}a4h6{dfHKTT%+(@6$1tl6juWsvlzVSYh@=H})oJ42K) z$JgYF<(e^$rX~@!S#b>b!%HacO>h5KLU`Cq*ZEl4*k-_NPH?I2+R1be1*wzZQB25v19>jotA+NroKppz zi(#w$2g0LaiVorE8nOksfKWEAsSz-9@X@;I13j5cfQ@bLp?F*_o+Z=^@rnUs&4`stJ^&z!< zGmUcN8{yC{l#e2DIgYQX%pxQWxMewKa)UdD!YODmfY491waOne&0c}S4G|wzuxJz{;j7w^mT;L#<<>a9HvmfkhmFJM7I9Q z?oP#ZA3KivH_;OH&Z>R~(40-3n5Kc#=c&F-cc`hDAc=cL_bPz3IX9az_g(7f7u&`a z=vNAOyYfwOcc~O^V8U0(I|%9KpyFBgfhSV1{GP=rTRJmVsf&)OO|dtgX~El8`ne=H zfsjC+S6={j-JBWnUz^_ff&7j#-ZOqbh>;e8wRQtUEJ4nU4{}b2D+@M0d6Phhsp{XI zD}O@shDfB8YlhuhhDEKu%^Ib@_?t8h*oTZErDg}G#s6_GAs2D^ige?iEJm3pqwXUG z0omVzE-Jk9U3K}1(ZGl3RM(e>mY`<}z28zH(XR$@?28B)%OVV6dVIw zmc`BPxDtZ^pW~b-GL~hyZyXJr)3jZIk;pEETCC1~gI$LnsqTFy1V+Mh(M^P~@-g?R zo^+=h_eK|q+Iv$1%1l0G$z_~phYeQb?|^(w=?hSrG|{ncEW;`cMriXO+)Yr$B&%Z} zD>Fjuud&gn9%KI?Lp@Y+<$UO?as5iN^DzlSqWL8P=wru;qY>*SyK}hL5fj77?Cia! ztVo~TAX6Vg@VJrURvIBt@!d~z0|J9rB+LXx7N_&!g-g?QOM8U*13k(Ek9VNB{?DUX0d*`27dA2P^_WED)@i0^ugZIp{uN>89u$^=pRxlBpzE zblA14W|N|iJm$?$Iqj^iIB@LFXG>N`qIR0!{D##M1LDGUZzitCihwi^?{tvEFLv@d z0){2c2eOK{#xk=w9dv4gN2K_}FuyC^#WU^A@TqzXfR!XG9qUxw8&&V(nnKZ*g7jo8 zxkzN!f@>vcs(7Ll#KW1uhAeh{q6=k~u|n6euthaA_7OYpYKvp;the9iKz67rRysO` zmMvIyH!v3SAus*#K4c)6x6x>p6tj=u(X71oH`Mu#W+F8g&{TfTtm-~2x%j@UD$c}; zcX$SepK$&M_xk=KWL1X850J+Q2^2XQ==ABqyFiCs()Xj zpS&TGZ*xEJ;%~}2T=lX#J3EmH6PK$0t7_R&k96~6=~~5m)cWr`XbAB(q0YYl)kgFu z=O$j$abCGiHJCf0)bK~fbHjP29c+8udoAY<8ojNzj%CVKQQ}mk3p5-%&P`2K(4faN zq@I&-cQ??@a(#V_(%(cnEAU8+_$q^G6C+aL#uv(@;7~C5&djp;dConzbgIg|-hN5R z7^|D<70QHZj)5>O=~h9YRYK305UFXhpd4{`>tPJ%MzTJ2M(`G;eY}R}=;!$lBYR_Y z>UUlsspss zg|0*yySj`Yw+X7kc1=384JlrqE#pVT_@Hpn1y&9gMrT79atWWu$4C2b?zXnJWp;#y ze}BJ!rHKu+NHt{OtRsQdIH zV+XlV?gnzu0lx12fG*PLS*&&Ewi1U%JOG+|tZrWCYAJ$JQ&MtXgr{}~bsjXEJnx&;Op_G$bl8NyAC>mkRMW9( z#Oy0>Py5CnL^SViUayF=+Z^Q4sNU_KoLoBCo@h9@twBxl6ljKCT*<75m+l zhkftZWw-rr7i-82IW`Y>5yHH1jBdMIv8={&7ayW)D3;U87owSu>%O_n?@wtL?9?8r zr!Ec2$PWb9QgPYzP7z`sM_6r}xBe&30&cjvlDjhqu zAKrD#Gh-Dh1I)PZ-N~qn`qyiXk3Bx!u@l|n0&3r5{MNpkhcw&)POTRk(yqYBFob6C zDmz6&xpTaBW$|Z(Q?8bURy$3#baW=gbqOYS@vf}4TJ`Lo=Az~d^K%$N!VhPSDy(Xj z;(Gb2X0BKxh6+&CeJINRg)!XDEH?4~A|C!abuLXHM(GQO()de`870H-nE)tr>E2oUS`n4kwPGq@UwEmoaG>XT+1X9IH^U1Evn=p@bSj+(hg ztQM7s*d3;kONlwUM4mgdx{M|+>cY|&L&7or{s6c2-!b=3FAPa_pOBp+K0_t~{&wnw z!a#n?b7Jboi2v=oPfGq^kS^fw|J5_CxXSAnL>T|z3wr@Pdj%GVSUo;E0Q}bcZJFxS zx0X!=p4=sK?3b@!S@tdgdCoMn_ z*0_iuMlin>&et=_wsHA=7!0W2xeXWmb$oNVO#+-6Qb3*CT;992G>fI0dN%-Ma7Wyq zR!|Qr#NY2-FW8ZkZ_UA#6{(9*4;`^(-@&c0_dZMi`2rl;r1eF^&S6$30*B0vHFGu? z^64VPea?Rh2LuE}^g$>8cQutK8ENOd>>T7xMw(O}3!mV45pcxbpg?9_am%R6q{9zCzm`92^+!QFMh_Q;*%>60oWZ7-|Y2mHtMh;T5+xX5&@uC?+(ot;x24Jp4val|HQ)PCyNUmIxCLGwC zz?O^|VQxgK_<{U~-hi%hsbS3A!W*&a`5rl&lu~{sMFTeQM4jDq{RGAJVq4l_y;al? zCDk_s4)wf8()G;Z^$RkG=k-7jx|M~;`R#7&!s9L9JDTKQP1@ZGGlMl6BmGXlGPS?1 zK+Rk*#9QWS;w)SjFSNCz%2NAFY8i8Gmb{hN{F=>JOC8csNCbP; zFY1)0520L(Ttol!ghQh3xfhjD!VdExRxc!%|5xUHN(1q1gzY_PKmI-f|8~0@ZeCLJ zkap3lF(gd~*fi>df=6)cS1bOZ73)cKu}2Z=Um@Kjt&qK{yBiy1GUU-0Tgf(gbCz*e zS635ZZ`ju|ANBLC43OWNW=%7nJv=u>a+~kfy{^}k0t=T~=X=QPE~}d%u~w`2zfs9A z>Jwm1$cOZ{p~VscMs#bSJ*tIL7K!&I@$)&6voO+X7op38Pd7J}+rOL>>@kZjO`8A$ zoECae*x4B|+ZD9?S7U;Mp*bN^!ffVLCm}=ug0f13|a%{snjQk z0|uL}Pk4Hle&TyOYre{1`>fw@mr9Cf*?YA-v->eVRrlLQOwUm?8?E~5TtKgtSxDiz zRr13M$hzJUafUtYn$zsoHeK7g3TiY5;u_QXjU46wGiYf7U{hdMp=f=z{<+s{+^gq( zkaa!(>*)OJ`L+r;C)XeF9E}d*aa`O$E=9Uo@UgW#73BfX0f4tq0Wi3T`WTvI72=*S zIFP$4zACuwy7)-#fujAj3EEJ}@$Ac|sD7~-t~zZHYu1!LV%JPdytLq{NbLhDs zxvXfni|V%bfHGN<*d=u7DhqXZXBAYuH1N^;nP+onXNRZr|2Vh5KCP`8=tQTXnN{jg z;IH7XY5Kw-eA6c6{{I2Xcd!qw!`N8!lMrAD`N1Nm-p5gw1F-k3+jR&HNav7%O-1ss zd_>M~dFGL(6noP0+lb63tS!=4EYkY!u!WIoA`4=~Wv~j6guZB}K#A4t*R_6W4>hk( z<+ZtXC4OcclZA0`lCG?umaxb4GwYs%=BgxnO(cHfe|baj{XZ{t%(N z1+##2MxZqHNNPpYz_f)V%nfvoPrNv9>Sy7={Ylo!Pl@ijd3Jtz#I60QyLuwOwr(@- zkJbb=pb5E~ZsFqif4P+td88QrwF`N_osv=ge@2slG$#e16F=q%Nr{F34p)DKr!?ai zY(LrJcD010iSpu$b`GGQ4?{m5)_u{r7R^?;>wxFO0F3K0ia$ffE+X?L%?wGK?6+mz z*2CDdA_#ewmzKh4IbF>(ACluws6+-VHW6eWb!(Ic zH4RKLb;|paF0J@TZzQn&YsH(EMtFqd{7{!9kyR>h8{Fj0L1yEcbkPyfx6!=tK$ME9^H~pL( z`Tq~{YsrQhk-iAWYV`Zh@b{+=Lj;|ekj3jp5oF=G3?cD+VjS-@&kVS^+^i!e;az2b zss^AnP3v?`;ms8hT_K+YT;{H#y>`B8v68Z)R=}IcE=2zLE+)fDXa*L;G=jlX8JSbQ zy*V3x;ncx3gCserdPvoS+Hdgji`L#3o!YfILKj?^U}o(8FC%TrJ+WL7G^#e`A&*W{ zr`4u0rSs*$uaJV3d%a5$G0%u z_QRZtjv}nZF(e;xRrrAGL*bih{-m3~w`-ZWg?xahooN-cwCj;bzIgndDwiFgtk4s) z*wQbZb0AVx?3b6);;$hxgTD#1{{2zH#zAgd>+zd{mi*V^m@2ZiSbbXdf{@4e*^WA$ zKV_9d!h)o!yAiuPyeHLD^Fp|W5%bdUfB@c{C)G(b1lc| zong$z@`eDew|D#XZhayF%%o>^f1gthheJb~VO<+FngSFVj>_A%8n zn{9u1U=`qFyBEjX?_}ZWo#+aX`~6kz=r7p!XNpwPk^5Hg0t~2`-0qV2x%$b zw(Y6r4wpit!gT#l`t)xHioObJ`*uY1^aUQa{6Ds?0;uhM=_U{$xLa`#QrumFyOlz5 zheC@NC{WzBxVsj&;_k(*K!FB#D_Usjm)`sCd-uKj&4h#@`Oh$u{h!~SvuAfJZYIz~ z^^tPy)5;{JZ5Z(V3uR<|;fS1dc6L^pYV6G{iDgAQYcl=gcgw1Ol=zPg?9#QhHUF4` zOEF^vYi0+aFQ}x9?N0nh(+gz)sU#jrP$F|Jb{LGzJ+}r(;ua_&mRF)=3ZB=%^d+cf zIf@nrNbL`09C>ra-QVpC{=7_OJXoxw29KQx|2&ac>-#Ds8UZLbp0_u!_(*gtjr+cU z+1*W$SSw$+CxQ=)Cp9lWTPk@d2&+dmIW*{1n}=ikkj%~u4f`rCb<^g~Vz!;IU>4F+ z(c6aP_*6N6ET}fm>%|-_+_1a~*{lz(pQ7Z(BTgnhV$B`ZP!%&IvZY+Q?cK1|LnkT} z58pki-ERhL7Qd5~D+fsOx}aM?ZpXFef(LonRQFpDqT$TDuEWzY%VQYgkJ_sTR7a&0 z7E=7MRoH0F0e%#^@B3iKN1Rlz=KF(5{k@^2cC5AiX%S&h+~K)xw(f%N5`)~}o4ds- z-nZAvb^$*vj=L9kYMTANH_aC>8h`onWtfU|!o%gMEPBSEuj(vIU8cF#jmB$*=>cBy z;ty1hhx1>SRSBE>QH5rcmP{tRO7-cC#>Sh$1Qyu^I4B;s>Vuc|nQ8|11`rnZEw}^+gm02Q9US`K^Uf9qR zd+DBJ5_;C7i7s%?1S#o?Yf#3c$_;ypGMZtKS^x;(28k_0(*!`_8}={5S3Co>9f*~H z7uq94R$nwdvI|F9C|selWTB9w*n8Ahhu8k<{-oTz3B5GEGC-*+kj?%&S{j%N$fu@8 zbdbg-c6o=Gqf1MpDVzm+y%0urfG}(sE=%eoTPcz`iC) z18P_-*mP7U)oXdm7xatna4E%pQMESeNRP0NwFUONG0n`QAtD8E^umq_qNQdvzH$CA-AeN z3fcCCnJ3l2Vs59RQ%$$N)t9h*Z+<4$8TzMKSR*W;qGGH8Tt0pzyuq0z6RDnklMqvV zc(kAxl*>e~m?%>!)c}(g&B(y$WwvPP1wWS%JxB{wft!77+V%_U$oPcs_zEWp>5q&t zLHO5NZ#pb%c;`P`n=k_iF_%cfMKX3A64svjmtGcF9q1pIEj~cQ_=^Da3nvf4pW#{a z_?zaED5ciUy8H{4d_VEEM!r;FYVxI4qLqUf5`^{v$F8&ONc=MoHSJ<(2E`|u%+)v{ z0sKAxsPrls)oxXpqoRT5K90+9E@^Oyil^m!6EC!um~v=>rAD`IPG@rPs=Z9N;q}|5 z7rwtmf54`|SD(xL;OTxx?fMZy@b`S=_J)7t7qIsbryPzF41A;a6C0d=ouZ;N@VI&i zH@rNfSGNrijrqrAe+rDy-)V_D)LPvfDaQ_p9Z5y!nFh}AZf0wbimY);G36?!NbX~ zP2OnApnz*t+)K#`6*K8;sGLQep76o4Mi4Ek_qWG&Xh6s)1C>sf7(5TC$EzCkEO_tx zsVu;s*lHQ|Ty~x11g{RU8a*3$xfUpB)YBlVT&t0L0fxd3O_4{&9 zsSEI$<}8;{AuMvUitz;g@g17uViTo8weF%H<_J1LCQoM^CV!0i^A0v4nydM4_VW)} zs045!7#AQZTQZK3CJADG5i&3cuaWNe(@Rgd2YSVry#Rqi#$93(`OZ`7@fJiyyM)t= z4o*Z8@)g3vbTpp6JEB~9kI7`pdRizsPNnZYGOfc6MrAxK=nq7hB@UA>N8FxccvLb? zyII-}=4=~4QO|2I^3U5^{+?KO+8iMAlu(i8|hhCWSt_XkV*GafmLQPt1_&D1pu%i)$ zY=fBJO~mQonyR9$itlT{u%axna5u-+k)bsDI54@9vhKct`qS!7SVog}bJb~hP{M_u z?l10==PvLDqMmk5NzNIsv5k# zt@STg_#bl6UwHw%%!z@PwI@P$uzBXl-hb*8{S4MiFs*$$Wyrj2Jc-Ot4mUJTi4mtq zz>#D@&m^W2>E6tHZ!#MYb+ysJ{pkv2>^_m|^mSSYC-J#V6z_AXR4~0iy5gHyI1;hu z6$p3UEGl|mPV!C!SO*tXE?Opx292sDCgk*x`g4F{_dC$Txg8`62>}CHDnhKLso#B4 zuxuoG);@53b`w+kc!IS;~Kw5(D^w=?$jR zd5Sajybtb;B~GwJ?A~!He0~)>3kFDoaaJ`fSu!{l52^~i?q{p;>)C;0PQPxT&Wc1G zDA6OFxO(CB&ToPijj@+E0F|7nE-dYMjK_1stk2r`?^ga?~ zgSlYW1~*EZ;hrc}=CfLqH=!B1>0AK|ab65^nTDCPb1Ui9M9=xpLwz@`GRA8F-c=*y zyb{9EY|?WgCrxAjdMu#>70iXZB~l34~FfbQ79R zCApJL5n0k0lyaFqKij9*%~GyM^+;x7ssz-bKEa=#4WT@|cCF&~usA@%=r=Z(Kz&wz z-veoXD3>XzdVEZFyF3a=N@=I9aB(Wg$@p3FSZtYBWoRYq&XAkDQ<0RlrleQ*wk{6? z8t4UxNO`S+*ziZyFD(4OZ-!7OG zMm_L4s(nJ`ZyTG1cFKM29hE3%&HfDRerA^ytNofA072+O+|=*7CVD9nyt6XIEWkY; zqF!&p%i6>i=B!Fxb6q`BjHdu4tGU4AE3Ho`Ts&c?L)IKVt*M?37*|*`JB+C*eVlZk zZ2G7_6?q+X%f66KOWbVrwn^+u^Zhuyj$7HYzP{dCBuM*V^>Rw$U3K06S5W*N)f+}O zz!nUVel7#xpUlzI1?^5MovF0R2Hu76pIhKBwFq8hsQx3>oq<>CquNVu;Jnf94<$6a zS5`V1<(K>VtGS=;O_%_VEC2Hac>MytZeR{Rsw`z4fm4H-GvOYjF^{ORRK#SzMe?FW z>fHZ*BADYjFk%(qwAHCp!)X zh5~lI&FeQ{aw%&v?iCemb%Zwk1TSlTCUlKo8KK3f>8f4ljDkuiwKD$kl4SNk3{v9} zPEE@zRJIZ+tV2URK_do9TJG_1v2>L%OY6hZ&TQihM3oLu?Q9-h#DJ7PS70Q;SK7|8 z4irh*TCRiZ#K=nN;DS~W%i;V<9w&TV;Q=Nm`!{Ty%0i)wsL`|?l@WFbp6)JC^;x8a z@%BpjtLLSpoHG|>bMq%$s5*+OM5*4{W(EGGiw<7^{L_kgnux1c5sGLEt1R5R;{gp? z-mmI2jlC|sXI*y?ivp(5p34SQjVLtIqYrMv)H1r9Sv$W^{mQZq8_r)L?5WSm!^91>@OD>F4T_N!z5*&o!kCq|J)}+aisz`Dc`b z;yKPlh$_vkw7f0Mn*IcK=yvV2vubmRo^nU6u{#ug+0as zEk?Nx0C9tX_C42mA9rz}2({8-G#-{w!2%H=`Mc7Wk|Amm*K&dQoV_i@r!$nF5jfp) zFJ7L0K3a{Nan6M=%s?IGqYkw`iZ_zuoJl6t53shh*fd90AaIb49_`u}-bC!u6dj(^ z&8T6q|F!@T{h<%ULdxiErDpzi2U3|w+&#b^JxBBc3J^Y)JyU@%QXA!N#G*!g4#J$5 zSxzSS>QY`-8)MDdOPLY=K=>$M*}-1?^cQb+99EzK!hl8z755ffitY;PJ0w=-C`>We zH<&H2SEJPv-)memn`NwL7m-NqzXcJW}eT-W< z+<3))R2s};7x;k)enz`S$K@5QqS-JTMQtdp{*%#;ox5;4X=v{8!Ph4~Dlw>+z0eKJgQ`b#rL zYKYMhCf=>rOb=LL-DA0MtK(fpH1sIeuesLbndK90WF~Ino!O4`Z{rhUdT>n6()(S_ z98=l;XUjG`%AUbQO;iqCiG8u&QsNw}=d}EX8@9lwXx+Yv{MLDHwJ_oQ&;GbSB6}(Y z0q%ufHc*$hKw3vfVkcB7LizOxds3V_w5I?rQvlGeHtOo(bA>uLJP9;rWS*j#ssK?J z$4U@IsW}-9s8@5Kq_d3EKo5 zX^=hZ!5fqe1zqxA)?O<&JI;7ksjedxNlF6(GypP|R28Xr;FgBXHW`nyeO3iV(}JY~ z!EwrbS-nW@9=+#;<@l#|a#!1aUHN>Ae)A8K{kp_8mNIXVMOIDfQRtg_kL!EoigLA@ zKTy7{DLsCEw+NK{a3Rk*Bh2lVoR+5A#d~Rw9H4Gh5*gn*UVQMeb^d<#{nm)zkuKsl zlEJL6Q9?YpN0sAhD{e-9{K4X5z|fo(w#+4~Z58IkoYoU%c-jE>I_J#JWEYaZuliS| zyu(9w^siX}{|%?VebJ`0`Y*l>JB`^i+%}*7-3Hy!fN-{ZoA9Rjl=A=0J>UWADf|aJ z92Q^1-d5$`FyHVX41{4=$DPo%rmdnlZI^aCax@yUUY{ zS|)kh{ryE3^U29>SArNLBtTS+G*$`s42J~B{Q7V*t&$snYx%^$SNK#xH zmI2fRAgA~f1=7g%VD*#rU;RKYo^dj_J5P++^K_Gjl`SX9K?@ zX<4P&l6ue>`a8CjBMR1Aily8&zweGYNn=naTz-E)=08ve06$5fDWkQRj_>@2XZjh7 z)ZhD|-R?a9w)8Q~SZx8TC=QlGXSD~6(`?QR!Y4vFN{bLPK$yi05U5yI&Q5(LN3q2; z*Cu+8LTG*H9q?4EAp6Bybd;I!~qiQ6nk#=pNH(OS??<0dB+SuWQ|C_c>Ug{6e z1Qw+YT7|oV>zBMpYI95uU&ew}^oK#p4a8$T*Ff;ATriX^V^`hQWP`i3&K;D^Q z{l>VInvUvOYc%agnHT9> zYMl;OCF)@@DQ^J!W&3yETrKS*`#i1u`##!7ycw2HkWwc4Cc^ws`rW+bSc@k`3I9Nh zsgxHx%tzy6ZC;C@TT?)uWz=ob&BuC%1FJU;T11_x@xmDay`NL!y|0mOi88M$z z=k~8V2Hx1TzIk~o_P3&rXc>Z^#;#Pd%ik+Ke^c*Z`tX9$*X4ceo}%PRPx?=SP2aYKVPLH$WQF3J!S42_FabDz#f5|;4F9VP;L7!gH{(0VfHjVoL~Nnz*Td{308#;7|L#B!d+ek9Aw5K-E$am*(43Kr@Lo@?7@> zP!YbOg)@ey<6Moj@4`-Oj*VkGKB!ccR33(m)Yi}*5MiFr5Ll0v>?+I8WL&mYslOeFr=`(aprq|lzO~wd#n=hZPAyB?gVMq}zNY8rsW7Hqn@8L)A+W-EV%2C}vKsCU| z7%0*4FwRkj_*W1*Z4e5c1#yRFa{QQGk@{QY{XNKN2n5gG{O*~efqLk%o=3Af9her> z=L9|h;l~VH_@}C6sFl)Z&=le{T0LmI>*XQ@^t3Z0Jq>RQptV%b3upmT0Ks-oR@BXq z40~AiJ$TEiU?{!Z`=Lthi(c(p>DH=rna;e9Rtn{Nr%*B+X>D!Wv6`d1z4e)rzh5* zs!eB%vPk)!buAJ3fO2b8pYZbvf)`m*tFkJOE81cKiZ3s*{M;%bbut;*FT3eJfR?T$ z{ii(2p6Tr1f(4%Q)k`+{c3~7kW3$w3skR~ZH@|T^6 zifzGxBzT3TrK)|IU5!J=oMNQ?6vf>+ATiM|K`14|UBc$sV5b!JMqyp1%1ntxM%=5*+?6uavNpYksx*x)03L4#js&G82Cm| zN8X#PIE6M*Q&0`?q5lhR&mp{&_3k>#Ztw@;CB_?%S*JHB68jR=dpP=KX@a||+L8>r z2DvJ8n=ru%>^SYJx0hCj7I~k9?VpMam^jB*Mx1se$uLLJ8#d%!(Zfod+LLVAiRZ0M zj#8R!ncq8T^BPm6D_`EWiL`X950x1|xu9HL>mF;Y4AfEv8rYpJ93j=N(%T2T-Rh*M zx5?)#Nx39F678q|IpePSbYDSstA%v$qTV?eKd;#E`F91cLE&d(-X+Irs|LwJw!aN@P{7B1T))SbTYqeXG?h0 zBfS3(|6Wz7QT;P2alEwe)#Kb05Y~tVNUj$g>~Oe>bhT=YYa;=q0vsYzF$)Fx$Yx>*!_0l zmF?%4nKZ($f!M&KfQ09Ligdm!{MX)gx@$Q~CM+Q3{#_=nX_{bi)Xb zB)b7<6#-2Jni}KMy@;{3p6-}w&^4e&gnMDNH^~8YERmsnJX`&Q;LxJ5zl&StGEQLF zrs*3SWz0{P1n(~k75IliAgOD?#$7M1^N-dcrS}9DwUq!7|=z>qX6KETh`3v0mO~*>s%@zMC`hkO@hw5lgocra2Jn+ z{!Z;z<7HbVM~+iy&A?Z1zwjK#;)|EaPDKKef)&-R?^Jqr%4xqod0Tq=^-}t$u2R+I zjlt2%;P0`s9&hqpr!?VJ&MB@jnSUZ`syzJoeCxHilJJ*)+EVsgKaFbRzWMYIx7Q!I z>iHh+P9&45ym=4qN-RN0C1RX)ISjJ4Vw?^7g%ITyEk{Qk_$ot1Z8lrv%}5)!f^Y0k zU&^I-#8U_8fy~%HQ84BVSwBshV_D``N>vVZ$nTO0Mge@<5U9Tm;hWI-Ln_a?8DwHw z;M+q4^`2UrAte-cjSUsBi6xjKldVQN@R;ez73@QQx}Y+KMY&Y9x5`($S2a&aVE}JU zjc$}@Is((?wO1w;3L6<+K^ zq**UJBDv`Z^JLt|S-bhCL%nx|3hdR~Z*>wnYe%D~3EAzXm_W@4b%D5z>s;`={N48y*+r6Xk`A}745zX%+yVhoqBakFOfvg z62&4H5NIp#rqQqM(dLm=8Uw2LuthknGY!rhpB0ga4+GPDbZPq```v5)?7bDHqCTtv8 z^yuB?kZU=*)3k@wde#uYUz{2fln++Gv7N)U%`>=)LjFK8A_WV~%iM8DS6gJVL$)il zeEI=(8Z#dpeSrMD?~WUFV|Ol+P26qB7=R{C1|SSH0ru%nxM8*w3VHY7XQ5lCxs)x& zU;sDtwruVgf2{ynx>h^OTfxUDrh(2Ibk`YND+=ZeF7kt^4fSj`*=|QMga+u$`uG;C zndlcbBQpKMcIin~jKlSCT`*Ra|B}WeWlop2MjfEnCoQZ&v)zO0S*F#2M{|teH_aZk zLTV7E>KP-ptlLawRXdZPAE*yPwj0+<_g}J5u3s7ld^%+tRU6m@uNnaEY|q>R%#hc( zEWUYOncL*)Yc6{Q-_15r@>t4TfsBV0hp1DwHB#CR`A5COA@_dHa zCJh_iMMd_7;-03!Q8TKBfDr#{>`4Iz(Gk5W?X~H1)r4P~I!DvHiBgk%uX$hABnm|c zvfi8-=x5Sn9h6pxKeVJ$9K)28J#SFHrGe%1a%`TjJ{GOihaWa-$M5uTfZ~Jdt0oD zZo+7h`p<*w;VSZ^YKP~>RmllTH&+dk%GI}gbRQ@$&%06_XPlC`*--CZ)aH^DvdL|* zT3m{%eobqb(M?R5J+_F(gRd*VJTZ`7YYGtBUTDrbU3ew*a?@?93Mp4Vpj7=&4tsS~ z)g<_9Nnil2fGuQcEZ{36Kvnd#$wH-c3l;h)fjhXR3DqDQazsE^!`LJ z+OK=~Htjc$fRrHvLxM<&??2CP972UWM}ljAJ%i@l>Yq?Kk!zz&i@*C3-6SFAT>X75_!+*N9jRv`G8YEhjn*NX&8ayJ2y=hCDW^UXz5N(V_geF5Z& z++V=6niR5ipRCZr(fDoY44-4#TlsDTV-a3j=a(V*z-s1k84DV42T)^UB+j zq35~J7Lhkj5H;{hh=vO_ompe5%=b^tI1#_Q(7gdDA_&rm_j0ju3IH=3v3fWwk^{*%~}QT zTEX0%ucqfMGwSuU)im=t z>-v;$FZ78#mHCMxKA{f*3DG6;YOz?s&*Y6xC#Y+AJLKs?Lb@%~x`2WmLijQP&|ZY; z!Mgr|&F56al4I~_(J zzOE^^7i;D()UBu$8Z%ma|DLKZC*&&c-O(-wgx9WHbWvfYk7ILz&!U5_Bm-0NV#4P#`rie2P1M zQ{Fq^bv%dsBMQchqyR2ApxZ6(G!XK(^S|D2Z!&1atuVduN zSGr7X{!f^ETuesy044*JUTE>Nw@~~(rS2X+eksUMJK;p9(0qi@riRV#NF?|wq5Ffs zALwzMRvYID0@xiGPP>75%cBAkMc^$U`HViu`VJU5@_uf{_mR#w=G4U!%etIz&!l*% zu~{={L-GsArzi&@g&ym{ngXUwiU0V~R}VwI2o`3JA&FcDHn%oPOpt#tV29av01}wj ziwt~U^5;@>58+W83HDiCvxW`1R6h(9z9YSx@m)&_udrAs&%_fqHAz$^xsDQkU|fQ-5B^>{M=6a6?3bfvkv4iO!gTfQ^alYf4@Z>ctqh zX@Hw4vw6uTW<71P;N2CHY$q4r4bKB2?axt6QR|R58RUX6Xi&RI03q{Zlmw=T={f@> z09**qjAvQ{!@`feoSBcOkh!R7a4+9jQ9+I*{T<`6Bc5g|nF**8f6ieHK z+xe8)=-uAkND)(>|I|Q!DuPr(wA8>dw2dZfv)?F9Rdv<;MW?anVZ4rETb7 zq-#1h#vkAo#bzYykv#wRPcYkn`%Z{-6sAcC`0f`J=W-XD(M#xN0{15Cwo7~aAT3icZNbI1ywLi%P25gT*_hk*6DBGq1i~L2FNsnT6cwC$>z@r^V1byk=LNqssX-tc%@@Kzn z&cxET72rM`4xKTr_%+o=QvsVW^WjAYYe&b$$(w<9g6;1*n2zOEPsxR{7{lbTbl()t zNaj~)$;l0Lp6zXuv6VagOb0)F4s1!@LmMhM`JZgTGH;43&4vr zg&o^n{@eXF9U;EkRxJ@Y$q?pNi6ySHX4cWBv`wMzs6Lxzf}h+K;K>LpU(?r>yDf5= zqK5Vs9qg09mxBTo%Hkb!v0*34;Uzi+s?9vZ?bfjZm{H~wWWifXKJQ@2(oS4?xG3+F zre;J7)VRJq8x;ezVjL~z0Y1-TpCMf@z4AQR?ik+2?zbb?e(UbH82j)eEW6#|;}?&; z1T|S0GOmVY0a;^OM&$pj#7d^k<2W-e-2#jR1(-A_b~$Qn{+=hU`+M>@lqGNjY!#2BCP|o*MaVbk|AH zO3&!MdmLkw$M?CAXsYSHAA0Qi@%Bgt@O@L>WQi%)_fT5%hmzm;8Y6s98+t2406~IG z9;vk8_wC*hB+_ORDZ%oWE7(cSd9sq`FdWxti*T@FQowFhF@?BDofYZ^P)mmD7A5{g z8q0k4q>XK1=vC5}NzZ+u>?rZzP;m?z&`q5Y0aXl1{>U+?*Pu&CNz{a0OXLT8(;1Et z3O?4%YaG(jA)xKooNt68d7FB`BTE6n%gudi!2m2jBq^?hWFW&^C~M+HP+8n;Yrrq&!APT8?6?G z_mmA?Z4%;}*IXR{~OpL z^7YFoRagCvPe4_ub6c3)GRDrddVFL{s#EapG-F$-$Q)x_Y*Uvl&A^e`$ma4?`J+7513z-SHCXt#J@>vV4jR(T4!sIFxtRs z#(3YIXDZ@UP5PCZZ3H9kC#nmY8|jUsr@ttB5iwK~{L;rjTZdaN8tq#$&n9A#(`3~2 z>yV!mcxM-RWnm2-_o924SLBq1#^EXh_vG^gKWLIRO65i>7VN{xU5sWMc)Pb=_G=b5 z_C62UvMJFtl><5m0!oSxu*SV5!|2a;QoAex<);}rB6)`B1qpq ztoa}ZK{$)-X-bJ$r>B23d-Co=#V%YLy_j+gjSv*SOz;hBI|0uer4Z(0f@X4rZ7z?7 zTbfEYs|q9yG)FC*_Ozt6MZ=MR5X>a!k@HDHiwK64gSuRxx%r#}`%KaE{riePawL9D zbKss71qN$Kx*W(X9>}mna?b#05eCUXn)MW5*`x>j0lUpc{#X%ePdSu=o>gzo^d)`^ys9OVmHZ^`o+U_-CB|W3K+Aq# z3e7-Kc*TINY?e(-886>ZqD7CG3AU(w>a-^{2NJV=_-KqX&fxogD9^Z_l?4-Qry9MQ z*}8cD>)viFvDJku-dSjn>V3e)?talecDxLuv}INXN{&r@f%gYIHC4yKyw^cAP%bR6 z944Jo6%meN%d35XF{mQyL8PYrv}$N8p#xbTK_Q1G#%2YM3;%%d*-48E=tV>d!7+o= zXex;^Hf~f8o^OLge<1}&{hLJoGU<2hKjP8mNsm9Z=?l5~J5d4xm`oE)v7~^~q_Iy9 zR9^di5bqeiy%b+fr1t6_}*ggYN)&7pteFfAUJsaW&+4$edhe0oM7&7)Ws# z!f(WAZqnkm3`R6PWk!=|ft^~bn^8L?cDi5+LeLaO)&%?ROQQz{m{z~)oN2J9Tr~HW zIq}T*fB1ZB$N}FR_&N|!i1{j!5noO~8}A>u+7jOL;So{Y=E&qGsIuYO{po*VEJ5;z zihyyqeKp5e2McpD{y(mT!*UBdsQLScWDcEbIh7Ud0W|;;~m)njjB8ES4Ax z;=`5_jvSPWRDd=?6SYmC3~`mjkji&85b}LlNG+*!elAEr5g6XDOi60IxZ|{05%^Kn z3|Ij)S;^YF$dH9idXf>)Z8m6x;UThmEx11_i1BI*4PC^_ z{!?g_)9AgW&mnHRMdbJUtdRbe#rLr$9~&-*YH3H9%Q)L`=&mera+k!FTj^!Ikb}0R z`1z1wQ!7}X*Xk+Qp2Ts2MD3`w#>}7;Pk5v}Q7w+xT%J2Ti%yJc{dm)C(iiQ!Xc-*T z>kdi25W{1v&nd@FP{VrZ6&YvV<>y~|Vf^AsXA$=;JInsDNOGI%8@Xxb0PQQ%_82b` z-kdt%_-nB$4f2`|O*SeQj$`!}dFjGv;lcaWA05NjUHjqJ7Dwa1_q;YUM-imT7W|hgEoxX8O4X(ceu1OKV z%w>QoLm6Dqj6NRp!v^kr>WXiCc6N?DR)J4~6u;Co?PqkzXcCwLY8Kk3iTs^fS@czc zG8pW!RbKHV>4g_>Zb=wKrW1L_d0OI##voQcTx!`qeK|7WHLRFZ1LHv^o?4Xis6T`h zW*VGIi;nA7HpuxNYfoye7k`X94-H|mWw3u{dmr3}{>t=K_<;0=)zWJRK}o88R+(IK zkuMkKY>|zIv}5>_qz+-O8z)UZfr>RL(Q3iAa+YC|65##q4zytXLqxnzGUOv6;a*?< z$<|u+?JN{JT4%?*e%Hw`7wy=fgpAEPtdcE@9&hsw!dSu~!-5Tl<*nzYDz4Eep14rv zj}Dx*pcp{vTA0I-AD&y5^kX?uI6+8+u2nmGbYt*6DpORW!9uh7Hf>TVz6`3J{|%%f zxeRX2slzJ3Np`I!TPny!XiyMx8t+y_R}cw*wu4gRyiS=HY7AzeJm#Y(FkzDOH(-*s zU~;tA!_adGx=n{S5(Q6u8~5T|QNy-4&`-XqLlHEtB&*QO2{aCFH9K7$HzceCo^kU* z8czv7c7#hv8J(dMuRTDHcV^^Nnz~CpL&QRY)R*jIFp-mTT5j}vZH~#Fx*gVZ(({ks zj25UG;;Y*xsdVUdXy_Px9~u;Ujis(ng=t#)J*~CHQ^0)4(>1^Q$&sFbjMHW;h(mPi zvIw=c*w?Ku+=A_^Q9oz!blaQyR$RGx{+Ukx z^=zEm!{5y`bEaUN+irjpkSXo|%zu^?L8!~8qr3Zc;Rcvn`W15lwrUA}>Kipi{7kjI zyFiz(452dy_NH`6Xnh--A@4M)OcqO@_MRPY}A7kT!!sm)LhBbEnHf=`DSy>he;ZI6Z@Do1WQ z*f^GS-fCopUs_Ny5}wv!Sy9Cn`Zo;(InK`HMGtIZG|O-x_kSax`##{&*Wol+A~ITx zGjH|a5cd^5&SKnGCG(&SqH`hpfwZtLFyTBHvIxIdl^vt|y%ilLxEDzwR{H=7MGS zV&h=rb@gNldS~r3$d=<~$XPZ7-x)Ka`f2?s&kdn#O;pr#-VTeqB*#k@`bkIEkkUF@ zDp%~Lh$k-~!*5Wh)Tum)^XTI+KRHeEZ_e;HPcD(pmE<~gP8==sn}7MzSS(EI)P%|@ zJkww^#m+Z^uP~tWug$oBONfZ!2q(kbT!LM~Ze79j@Ap&bs9V3gbm$oT1%-b_#~;LF z7jJ3zmOgUb$MrVInh_zDfjWJk+?+tvTTBjH`YLU(C$v95N|rU8?g!%4-5p6FAaGNX zmUMzRQ zC!}4}&@qcrR$x^OukLY9Vko@RdB(1tNynll_W_wPM7~d|i0m^#1s-k$bvMi$7-WN$ zX%o@G#_Ho42N62P@lflZmKCQ{qqfJ%^cwiyB02Cqk4nzvLP0ouK&yW}jf0CnoL`7> z&`a0${q;!jjQP?Jye$KB%HwtyJLCjoK~PF>mL6m{YZm8I9aeFE(xvnZQ5!dg$(U1Q z#IJXJfo(eBKN=*n84Ad%IJ+awvFC}n_ohNnF}>!qo=uu1kx|-CmRzQPf+#z&nZ{mm zPSaqDEz;LsG$}Ja^WR3T!Pi_=$5{|Gqi&5Q4SDLH5H{aF<>-c zBYH&o=-5quTe6pT^S}55tH52l`Ng5g*k;dKe(|rPw&$85uRG50Yt!6_?BApCuiw|% z0v!F+3#J^_kDrwB;kHbI=<%}~yb1hVp!8#=8_`2_5Bn(CC#4b(T+rm=&y9s4Xb7ghyt{CPzRYOTN$5S#?;+TxYmSjtV6AnfLFUGgg`mkW#3D;;z0dpV7OaJn zlakJ(oJ~w85Yh@r{;0ehZV`}Zfh-UtG|U$kEK?r-e%@024L?ECvovy{Sv!}Sh-CfL zYo{*hmlqIcY(Juv7t6U_-&edz^soZTBuETi3)T|gUmkjz#RfjpF4`2%&n3JWsW60* zXPf!rHTJuX$+9|`=Irp)@Z_p-c?_Iiw6VomQ_Ce!;hdldy?sK*LUaozRm&k?aX{Er z23sSYUPv-a2&PR2luW87SuB>njm=yWUPU|#zDA|VtkwQNuDuf);FcU+C-|oJBq&f< z06p3|)IP10Rex1yKFYA>V)e^_k3htf^yUalbc9#SN3Y$plIJ*6+6I<62#9Ka5kK!v zMztRfKhW(NQui&biO^M=yb2OuuX&(5+VIVW{haVsr(rKy!fy}XI}Uj0r<1=}{mdKv zR8Z@Az96H4($gvG73WiuO!>qowDNAWBhZvHi*rPWZ81^gZe+5SAz!fEA}r1#X>07k z$C*g`n9iErh|821SCbkYHu9WU z_kc)4#AyTN+vHsTUP}pYzYUH}yf##U2GY{f%W*f{uWq4}crN>kgk~@bW`)EYve)2c zk%$T+98*rBgSgGRiUy3#QcL4v@n;cQd{5kET*6<#W*&@ z)qd@_T!zhD4HW#&_t}DUXrMkn^b=dd7XI7EtMW^wnUC_5y_bZvE8{K-iX&d{x&;7l zon*B?6-FfEhbtM4rds@7eRMu%o01sP>R@8J*|s$AzVDyE-3K5n4Tldc^sk(pU{rO#`x@DSZGvJk z{6GuUl-4aFLX?+sW{pN(yNFpeBZ}0N>*G1~{nh*Y#b~taH?Z~+{wjQaz5Nf;K#J&m zx=3vg6qy|oj~O=0f(&llajsmMJDy}-};rLvBSkl z98=2z$Jx=*2>44+dF+4Fp`I#gxJ0=gX|+iIFQkSk{Xw9z;@kO1WJaAZ!J}yI66gEOI6SAOr@b0T=`*->kOXEhGb;R<4*n8K3Tp&SjTcl^|XEDNr^Ir5l_TH!6c&%`#E-SusW%XraPls~S#Uo_7mgtb% z3P+851R8DLZ>wmiW%BHuQf#ChQrEQ{&AkG_F&$5 z4O3}+zMFW&J$eX4Hoxi^^sKQ3R-kfs!N{sd$MCYN}1GN4D75 z!016=jIR-?W*)x?`}G2QuFk2-sgM0m+L#^l!UMhy`$7m0{5OpPE5wN2C=4-N$elkUlGlyZLzvR8nO(DNQgn%WZ-*#7 zx%;WtJFXo`$rZvCb5BBqJEHd4D9~o=U4De*{s;~G)k8Qt!W+|fufLvt?S8Et4h@l_9n&)*qz+Z z)xkVm+EWZ?aVRL&F9EJ4UWSPWYTx6X^HTq?AB=fy!s#$S=8FHFV;{j@?q5Qo#mv!=B-_VJ9d^4fc% zpEPxPwmW2hW=6tF*^$;%>l1vtv{7glKz;-^>8E3JP7^F0=gFQrKDgZD_q`vF&7oMa z$a0M)|3-QL>5Gz3Fn9HvI6UkM;{Bk0=RJ^r8c%{APKqa@ya=Os6l-{dDHdcT)G5d8 z>%a&!{Nexj`s%1C*Y0geX@+hXdI;(67(zh0JETJdC8T@k2I+2)l#*^yNsxD<>sbT%$9?a6-`92RYwsuMRO}pw7LWdi99>pra(G>X1b|F7U5Xi4 zxJR=p+xZG<;HOl4({^+y*S@`QgT}?GjQ4;(5u+I9IH-?~81E~&qSrDfl<~F5rkG(M zSwBssCp;(Y|3MktT$GwUBk-~wTk$?+t;EjBRCpOC#Wcd7otrLa+e7REk!AtIrLzKz z(8P!flt)s$-3$5bx+*$fU3m~m0YiBAuu;sF92m|I^NHlZ6Gift+zi&Z z=1s&;O2KAgnqRnrUJhZ(y)KoYV;lmL6c$&;j;oV^X)9SOSw&(GbDT;RGcIt(l3)Sa zCKTXkNh8ak{=av;K#o5RT;`aJ``<8kks`pcbBm_sYN0Cr-T(dpEM7hM5zksiKCM(5 zgO`hOVPfLSa47qkHRQgo{*!&dF>!)wh(9RgZh3+hOq&i}ImNpvF}a_m6c=nDH}7w! zx|iFYLYp&gD()+@IHpKG-DF8-N#xr0zuWuHK7 zYRFh5pKKNuC+}#mIi@9sGTN_kSv5qs_0LTvbJf}RKN!MPI6Fva@J1Z?vQ|CEkY}WC zZYPXtLLCCG5_BMYQkY2OAf;nMUy+r}65stM;C*vjPYJ+ar-gp%n`M5`Lbho0T)hus zh?y!Wwq1~SqapOw{pHh}R}YsL=j#>K`hT1w;!sgFcqN##=0SipCh{rwx*j2=a~AQluoj|Rav1^R7`4P9tdU&x-u6|& z^9Z?SNwQ0-i-<$Ql#t2Er~ycW1`s+DLqJShfS)%p{#qF8{GcPXiPCs>XLx zC$kU)Y9&g}e$3Dd)E)(#q>*|?w{Ie~es}3NewM`)rosC&?RKRb< zef$TEvC4hEfk|L{pp)S**3>e)TQCvcrNs!7 zAPBS}opI*~Tw^u@T(UyB9-JA^99wi}k0;^@f`-qDTp@(g0@!8UIRU1@AU+Zt+Cc1= zfj~Sp-29haua2cL13-YxgrjHoUkh%6HzmbvzvLY@L`MYK*$^ebyU0$P`4en7XF11g z$s^?-B>`{&ch+J#&E$zRC!fvvjzNk@(&dvZvE$#Q>GCP_;RyuZL3$Dk7BDbfd3(GkTa;hjks5OO{=Loo*&JB1w&pH4p{xjcv7vY%jb#uJpOZ^%ZGS^>0E02~ z6v!VFS;dy3D4eFWk77c8fe>X?U)I!OZep6ip^ul(PY|aX(ftI+cAQO#MOMP)xMW0) z95d|v4u5km@q(C%xr)Pm-8jbe9B+ey1Zk7al2h&zJrUSo^b2C}}L!i*6jN-q&?F(NTn9N}e3t zsy4vUKTVE)H3z%R=}Q>gvi@^eRqzD>s?Crt^+xM|(kk5iJn>RkHDLM?9lX zb05?f{ojJ)W6jYK{#x|&lrdGbGU5jzINwC0Z7tL;^daCRK0x4Idk=C@$3BhANcX){ z93=f)Ij)IBC~@L1Ie>nsBS+%ob!o3W&jC=iSuzLJMF{11nPcm!Iq1n)USLhniE#|2 z^e$O$#1{_Z7-i{x7s!T29u)*IZtQ@(V&Sd{^-zVP;e=hs*{0h$RUieUHv^dlBIfw^ z==p47#V|mW)bPsVWTk>N;<8NUWNTu_&v8CFz)t@HMno(bQ#@su~@ZNkPsl2 zKJ|5&exN4PERepiV)pO?r`x(D7D3WN!1{;L=Dr}8J9nF!F_71Ba1mR`HVm$bu0LKf z!43#A8v!OYuLY!7pSC2&Jty`w4@G(YrLKVhOF=j(1SIuKE^gppv9>)3A$9QjsXOb^ zTyN{S&;9#|i~iT7SKM-|A_eljr(4-{Y(82Z-~c34`)$kD!y*w9_~Ao2f|mLC6H%aN zo&?UyubTxBKbIXy8y0Nw3>ZhowXbLR9zXvWeUeFr6aEcThB#I~nA6;0*IKzSi{;A) zozTsAZ4q%VpGBWV;SAkaW|AATvvP82e=zcM#(rM~S8)7^>$)v=lox0)h)c5+Z=mP3 zE1LuRXtJlfVe^@1y@U4vpLtpeYUI_a$4H$+hTx%$XQ}1p#U)H|5V;LChLLsS#4Z%S zlvj!PX*Ohb<+Q))?FJ}Y(q(gX-_lF|kk&uB+ z@|ZMry!bLk_i}GxuVyfr`+kVb7a(zZ`3d&O=&E?QP(KlSecm6GBN58H*X!lu&WJq)mIcOU6YX#OpT#=l2k^G-(nWJrGq5E0GQWpl9iM#*JpfH z-JU-=nXPUbX!Vh8pm>^TG>qRINnEn*4H5s0Sd2F`yB_d?I?NLZ<*gxw%AWZ48qawb zmr|)~x#Rqf>@)Mif){YwP`V*0CCqCB28c_4=IZzsH`&5Xyd{K~FBHttd=r8R^#64( zR^RZz*;ayGi^EB{Il>1kO9UJ2tJnF+@}AHxo>@BvjOO65N7W0*z7_mY@U>4g<`h{l zxM`f)ty<0-%~69iGMsBgdpqB5;b!5qN=S_!r+F3;=5?^Ej+JHnXAC-vqRKP0X73iO z{YjjjeHCs!pM*U2=Bdx8`njfeaGv?bK(2|tlzLh)V<6AuSDcSS08~(Imf40} zby%r%Y4$38IR?h<+8xcm8us^O-sSpCG3ht? z-xAEQ_&)J{WhN^aHIh|Cqd_ME#p=`mvD5=k5?!i>ky# zT47%|o3OE*8Jxm=)V>Om^yZaVC&X}z%_E#^YfV!N$i94|qPfaVnt_c#(htMnzLPeNWgC|Ike$rCK zbvmnN1&`EII&L=R)KL|aiq9j&6@plc5WbwmTN3=JAl6&#^~9b)%tVR*YZgEuP?Vmj zpV8r+v|y@_IoY0Y_F6-R;ZAsv`DopVjE?Jbxi zVta&Z#*zLscBAp`sBH(S(@VAw284asXQ~#yr^q2{qgHRl9 zTB3bbT!Q!lifQ1hC4s)59(dE6jkNHH9D7plU4vUmFvHl0IA_Q2N4ruYsbA&~Uz4nd z+F_M}YF!qgx9j5xXz?0HU-nl_mtD|kdzTT#B=J%BSUpQB2vy&1ZK3W4F1X|+YqOTS z3PZ`}p1NOD`3p>hA-)mui2;f7A{I~&OzR2;L4#gvf9Xk2pU{U&HT6f&c41w4T}{fQ za-@6^o6zFy5l}@&x@7U>IfT5Rdw;PvbUE?l z#xw^%z13%ptCSoCVe+U8Sgy4npH{AA=^FezU8$HaYs?{hOI;LPCD`n4itt{`2Yl}7 ze36ZA@znhQdTF|JEiC*ah;>5>1+HeT8IZk(99?nQmDfPKy038>PB4Q!riaxs-CdSb zgRSjgJ^cN>p}(YAXBd&}$T9kQNneWYisSlTqQzQnnm~6m{KAweiGm#Kpor{x_%COB z(PNmO3idJsl?z{oAi;e1JYD!~TfUln@d5n3VkGGr@fPXlEm!i-H*vmV)eFP`tSm<>;EgR(9p!I0f;jbY-qH zcT!kV(7D*vQNXi@9d@H^PhW2bD)H7=U%iNXy9JuW?++BC+GDq#=gd73HhYTWaY8;3 z78;ioQNv40q$@Rt@46!t)pPS4B-=2y`aI)QQ1P4l43jE0(GA4^>}V7kb9hLtuI3o) zd(`E7IYY}4e0H|t6D8QID}_*1KiT->Ge@XI=@h4F0+_89Yf#XH)j=%BKre+aOKGp7 z`rcts4l*3<_j>PWZV(dHAW=ZvTS7HgpyKQQn&G`LV&wt?7#MgI@@uBueT)kNej$m9 zgom1l6%?MuRq}GuIIQ-gp=yWKIz{y$!z>u-FEq$cxC%rD{jE<3yJvf4;RY8Y!LReKXdFs2szXstVKAex!X|a4c1x|C;AK2_b4Q|=u;l$?_xx;TWXBb$p9cKA&3>4IScs9kTqX+IO(nFjEk^P`fb-SC~b; znuI}3P()cBZ1OX9p@#LwA4VMa9-%z<@IN|Y*+6kpNp)!l_nGY z?4OPNtb3!2ScsvsG6it!#f8b?t+aMWR+geI;=jzGuC$T~06E4;O1CQ%C{{J%t!`H9VGam5HT$IhWG~@wl_6n)!{sI=*&s zsGs?oE$U0@DE|HdbMVU$c)xi&wQrS}WB_;gNSGwB)62T`5dcAk<#SJX7oo$ z;OSdUrXPr@x>T_{P82^3uv2K~-#FKH^>AglloSCYG+v*O8;Hmq06j24m_=zYq5f=s zvMh2F!Zek#DzJvqqeqNs%(+!5u?|Yw!X}#&_1NOV5yzO7)GUh?!{VCg?hg0@PLaT} z@0EFb;pRT?Egohc_a@$kcH|?%32RKF7klREXRE9q_8n}*dM>xe-giegDy*O6UQK$uv;`y(LUYL%)dinXDs$^KR~vu@O5vWaqQ6z4_p2Nrn|S9v)A z29Q|^*m6d_NBn_1_+VR7*A|cvwkmbme}Nv_RV4R$h{r;fSZajSf)w)x5XGD^D$9mH zQo`75qOVu&fVC1j;e`hFSke80tClDrMkj053TpobqW1Dwr^6qm{*G}Ca0B#*bB!&I zjvtWkHBZ(@o?>8FWN>ztzFM><* zR+~VvenhF+ySclI(oE!v+aTeNW}TVu3Wz2PT?m&L`}&L|#>}snD>wvSOe3+29TFa& zERZdnLJvL-S#*A78tJn?Ur1wT7d;l_S8p=(I9auC5>1KS=ZxK~{D_kXuZ^PViEY04 zWTH{_UW^=LfR8vDhu6XXFkFBoJKR+x(X_=ru9cQ1(1J%8TT(Y^mG4GGh@q z+uz!Pj)lefqyS=>!D`!*&MQ<~us3zjqOKy%c~Q*x@7IiwCMaVkjH8$%(UZo+s=1$E zTx|2rlFIJd)fc=q!yY*!Ut&7K4G{^a5E5~>Y|xa- zZ9$g_@}*k+7I50^ewck~wVuAPASylv#8R_DTa_>AMBn(>K@pifqUPVU>X-uijq8)L zIdg}72Vdg~vXt60&m!Z8MVTs-HxF9xM5@^iv)msXyv$yyuFiqUgbX&?c6DrT+TI;g zddh7$>{bE^zA!!R%-kblaKzL8{j4;_ck!9b(=)<+n0yC>BYJ-Vghk2##|{u}%hi5W znEvnZ^AFL+LmXBdr$Ch}UrKDt`K4Y><2;1|BR4iO05zK|V8sz59g6hsQ%z+3<0pEr zCQn^_cIQm?@4aqd^D?49;ze0Nn$b@0tDY;LmBz(36!gDrgv$k14)^ z$L)HFxa3O$S+&mq3P_+j3g+Y#p3uULEu;}wE{xpb`O$Ydo_u3*)?=r73-`mNkEE_< z>xEtyhjYEy;%CRV2ksA655FERt1l%w8=qny%sswVezrg^mZJmX=Yd2cYJ9L1T``~* z($0&4K3V+HqR*bDTQa<$GWC4#h-p?9vGA&Iz)Sj5lSs{q^Gm&BF?au3 zhS4mSajSu(C}U-W}X z9223YHj}n)3@+srPV|ja?dK@6dHrt5{3Gt8f#j1dDxG^T3j4cWkrL~Z*8(LTj$E+(k!!0T>k24UGpPyF(nAsqD zGK65L_v$aFCgo6xx~JK+an~(;j*;8RgZ6A%BZWGKy>AZ2v(xY%(r*cpLbS}Duvu4^ z!3>A=;Dw;eD&{-jM}G&ZtI9wjsMSK0LNt{s@sRx~hB@$!>eJ+?&3&V`ROC#*4vG#*$t>1WfUR~U8D?SOIN`DPwZ#~}GRcWKphmn$ ztfkC#vJIBY*l4;QMjaCw5YP6Ob0Vz$@Q?UUi7igWJSQ1cAf)1A$;*9P<=k{WgkR9DJkUq;+%Rkcnc&m z;rb~NUWM}+KyMR(6SV<(uOZP8@G?nSLCQxKw0e~xPmWiB^;!wvR@MUL1tg*m#1r)^;dyxl`<3}5>XngJ)mD>ltvgOMN z_7LnUNH2Rzf)U6Aj*pBFmo3PP1ZPswvlL~>0fo%SNMhAOoEAlRX<@Zr9Y&Zn zyn0lD{`bVcto2B7y|mPs|JG2B_)r~AWk`HShk|n2{{<;Ly4Sj10BiO)`4YmNdUouw zXpXKC7o)OHuGgB)pLI#tfGQF30^l+3H^bcnhjBlAst!+$J%^`RDCl=oa41!5QV*4jPe~$+4#ahn%o63h?*Y3j^L1cyq1@x~MpTEBnot>EY z9FML5dtEzaeCg7mSq&xG&=+?(G@Q*DaB^Zki-B&7zO|K0mpw=~s67rab<|FQPF%B;glLpS(0$xxfALT@qQy zBtD-Kg z%f;U|Fl3aQM#&UtoOe+{ToYAG@bJG0y+?$w)X{H*B~7K!9@4Lx_$MZGjzp~#uWqA5 zkY!9zdh^_dneAWmjwR}&NJ1lEGDAPr1zyg2tngAF)SE2F!mJ10J~`@0NckErGIPeD z5FJem{A^;1JgKB?8&yct}JP%QpZ|WBbWDMsf|H4l2JX<$7a^n zW2cwcmi-fk0=KCmxvCREs!5w6axy{!(E6WtwlbYHi1G0;)Vm_Sw@|Cdw2@*w1F6tq z)e=(Ey^{T~)HC=yxH{yAx7G9;4HB@qiXii)DCZ>WsODh3HlRWu!&n;i>{==oaep}O z;h_HvnaB07a(rMPmu5?wfIUU;%$nbDlm5MVG{-4pERy1RPE{a&duj;sd+b#-n?!OlFTH=+ z`Gyx9On#q`uDv-r3U#}1GZo?SU4Tv^kt+c3Bh|h0TDtp}w}wIstSK!+aFzWg<@{3- zpXQfKXq%6&Pv{s!ZckP@W_&U3zYjtOL~p;5<9I7T_$_#N1Dm;Xn@`WuUI3qK5}!V~ zAe~iW`Tszk z<|93kyU}7mHV8D5BhFu;E#`N>YjOKc;GsZZ3)`@dR1!oQ6PmKB$A0KJ8t`MmmydRO zh5!giy=%*Rlk;}W0svI-!ITQ%eSUCAGKRV73&bVJ=KcUk&GB>y`n4vgtde!AKNo_Z z1YUlJ3u|TFZE+&~_!sg1)A2RMa3rMm6qNuUR&7mTdV$f*zT)7Tq)F!wF4ypJqnef@achf#c z>g}@+);An{pHM7}qh$7ds_ayi`-jvb({zj1u@B+w8?W-=ZS;dSU|q;fM$BrqVd|1d ztFr*>O`Vglf$svVN^8_;UDVt6i8^f`r23tsZ^(_ke-QMndZg%1yJ})hi%t z7t=!A5DZq|2O%u^2^uI6XE8Ty?9Mliv3+8roU`gwS|=ErBg?+3C+2eSv37MSAKidC zKoUp4kJ>ir3dCZk$)T0lHEEf*5E+caB$sGJ5Z>{a_r0E98c&)Mn+ELN0KZ9v%i1{O zMDDMqT^kqKm!QX= zYa?Y{!Ix=D#cX){2YXNzzY~pD*6!}pVu66Z`2t)_Ikq1T!D(*6x0D~zg#_|6Vf|Bf z<2|^;V1W10TYE6YaAhH+zR6lKDGiuD{&0u=U=tSSiz~16^kG|kHte(v6C2CaP-1{s zomr^LpgC8PEa-uSnod->VU@qLmUJQJDS7hyRwat~h~wgi##$HX6c`dNKFx*i0C zu!;CM-tu5(4YkN!^|5=T8j47IdWPeFBYa0K{A{=F$it_Ah43S{k@(!sdNSGeFqr7# ztbC-sa=w3z%@ZI;rPwGWsGdUW6y zM-u;qvgJ-!J*e_gX1Nj8hj~WnCV}tsz65fexK(z^@Sai)}3Do)mnpJeS35j19} zz+UrA8{nT37vK?#i&;QQ08vkpJj%o9#Xhr`OOrG0ro%}#dp5D$mRA|%Ys#q?-Dn4q z!Nf91qO2fz^G2YC zgrQ3TB5-99q>}R9HP+w;Y7X1TD^&&e)xKM&Tc;n;lmhEh=0M?34fDJ)<4XrT1CNOS z@yJ1Wu3Hy30~U87$VW4DcGL>fAy0JdwUF*2H|KF*Bi*H6I5&J6J$0FaqESa`!*+zS zIWpHkdsWxGBGu+qIF}%4F7hu4QSTA@$o_s*~DuLy~p;A-Nzg5mjQRQ?mt&@ z*$!_!OkKyz=kG+`{0#9&&QoKT9s)74fAL!UVX@4RlIWyuVPYxsgb^uf7?HYj_RG^r z^#g9GVy#l`txZ4=7Cxb?6~{i2LeI#Iau6W)Y!@3=1t|dGUI2GRyw1PU{!(Qi#dt~? z65tpGWWc5+MlAMD7`bp9o3`3yXWR!M2kG0r$yEX~=^c1fXmz zqsO;LM}mbIXD`x9`9m7;d7|F-5jLo31*v(I+gY9#0Q79beL>Pi-+{se1Yr8J8H^jq zyFNz<2t2Xnza7taDI<1*>ni^4dNPKi827Xa1Tb^LB*u8>!_n_Ck)@WP`0YB+=jTQlV3enQ#c;{h1H$LZ)t3?qwhnwCc= z57aAe(*-=8&Thn}a59;#0$iCQSTq4GHTFg@v3YPsU}IFyP4K&35@0V2+e}^q4$Q|g?&HS^1b3IU z9?Di(O#iPalnMTt;&YL6Sq+TQkib~o=o7)ilVq8|m|SOLVfIf%1sI~efch0v!9RqVq#(pOY0h&R8=@x2{7 z7r*H%Vu;Wr31+gRTIAmVsM{QhPz28>qDzg{M8EH0U7M=dBx5#PtTsLkO;%dfSLH&O z9wNK}=6wtT+m^mOdbWOQ>DYPIgu0%aEQhDh)e6X-g6ejNyMUW}urCw?MF)7Yl=oEp z-p{UCLmEOp9*&K>J(NXSAq*idk^lU$i|zf;+Azt+-v6;d&(prw*U9~yx|I5GMP*R< zQ_I5K7!W^X!PQR1dF_#c8hXMfg*QI`zCG!7yz~NgaR2-MgFOd)M{K+cBf>{o=8-e{ z+uGDd3Fo7>@$0gz@t-j5Pp~%f2Zz1&ZA^IlF-%mG^71!VyzS>FD)#T|?Gvy09!#oi zfezj&1FEQ|n64uZg9ir(eRJYHuOUCUw=P6`a5_=iSyQ!>TC}1z)+T%byeK$<=>%YE z5re$;{?C)eW3djeVT}^)XTbZKhlro1MuiagAnwCgAvV&KGN|f<<#|f@bdea-lKcKg z0eF<%Q~nHnzIs^v%*3=`x6LX66in7a+}1qZ&gF|%a6;1Gk*?3o42 zg9QgPBp9|gLn3L~^EWYFGR=4v3ynW;!X11<`kH^0U)NvGISdwfy8@Z0K?9TOSM#2; zxX*d3&6$V$M4a%R>qT4) ziDp>lwvO8sU%H2-{FrDfvj1Dh-9{}1&GlG~33Tsj=#&&*{c0~QS*7E22pqYM`Z6+E zj@Bg&!i7C+J3i3fUOR;!Pb^w2>aB@Fr7>n!q>d$3&18f@hyA06gAJ0{{Dx$WM0bvPQ=%?5Vz}64GDavxZbOyM(Jl5 zxiW+(jUBa-K4^xNf zSKX_n=zkugW*NjY_XL9mXnOg-o|6#7GvNgEiLC%wmSh3Ih&mkIFv20NTDj>7xfRkW zqM9Gtmd?HQDl^Y!)curcxZWoRl=EQz1 z5)w5jt(hx^L{#xxbfNlhU9a-AHb<~SBt#ScF)Z)4+sv3Y(gEU#2*U{?979PBepfjMmM zEew%OlHBg9N5hE}30p_(?7hZMjXRV=B%w8bfqZ3AQ>cn)hPoPdBlSjs_#8qqkj{zt zLJ35qu@>(z%knkM(gN+{TD`|b=B;M8Yxb^o*pmy+03@5V(?t$zNwGZn;QcL{i8tDF zhn;EAHEZq+HV|Hs09TTkXeDe^7aA{Kthd(MT}_=!_8iTp-Dhk7>k5%kBQXVT*lT9F z73=CWI<(p=w(YxxfaJG!H=m-TpO#^U#UzVpF%615@$^Wsh41eq^ zuXUl?-zn~|eZ8U21wA=T?R*C-Li{H=q9C5hknAJ=PpbUm=P2EbjVq7SB2+bCoW%37 z3_QadeT{^HE`W}2;Q~f6-;tc$UNg-H_m!f5^*lT2AqDT773MSk#1r;*vpKyYPf)es z*0j!+lqc=JBcc7O7+T8KJ~Qv4OU|@oxy2e*qb89B8egI;EV{q{P}B||Z$1L>={9JCsi1}|a#slxzAWbU`_5VhF%x5?UQc&Fzb zCc8gH@s9~nhIB-=vHV$KyR4Mi8*NTmjp-Giu zmZ2CRsD;7+0hd9k%)=zy^BO4=0Q zbxI1Y3bj66+&ki{r^zd-n$2OQQf;xcL7MUE(C$5Sc?nSzNs^Z^6KXWEmK9Q2SpKN9 zI|eF~{0mrbb2%|>K2@Gb2;;CyRB_+yo9;<+*^nz}sN1VSrB&OYCu=YA&T8!w&z$AV1+BFHE)9Tzz? z8^t)pMh1eV8B$=5&rJ701aDeA%qHbsA5gZjM-<$$>eFHEveciH#ATp0KBn3k0ND^` zbRsmQU5{(hUdF6+ zM_W4m7mxnuViN(z59QgO6wJZ4qWaPH$71?!%~zpBQV4%xlA}Q(0~aF939ooW*;*t` zdwY~U;Rm`?Wga%uz_>vusI0HAw~wV1t;3D?tu%Z;5({e;?Y22S8z)@y-8@Gw%EKUi zDpQbY1mwY3MVRQS!BAIMjxWWwMOjrx&MWo*s38F;_d7z7QN?;onl2*mEW{eNo|wnh zPeb|3d^-VeDmLqr`#Ac+iYLr9|H2jpv#@C_Z8=I%u?|J-Z_`OGIrmcDD- zoAo*g=rJo+ucTS51a5hKkKLv8+br5|fO7#FD^I&=lDP?F+9A8R@fy*p)DHl)ik|FM zr!`V_RxX32a0;ptHU`cUoF!|5UZ7d~m6$#0zZ2n8k}C+lNj%Kk>NkO44?#cztki}cq;1cyi`u;ec@iT~#o{!RQ< zJ}Z@u6K~G6*{06BDj(NN@(B~5i9~uXIEkASr@R>O1~)kDIlvJVqLg$Fxaz0H`x(b- z>M%a*Z^PZ{x8!;#l1>QlO8ERW>S+OXUe(M?b4-jF2cE|Er`XTdJk&bNQb9pM$m;R> zt&#gEX*O3Q78N<(_Mazet2n;HuQ88fS?1GJMJYUwQB@$BeZ?_AFc^96f}-_xnkmNC zXYD5=E0+GQ6j`#pk0PT19<=gHsPi1L(~4@GvaZe4v4eMmn#xco_b9TLkPrPW*XmEE;X}7ooy> zBvc!fMOObhgRDjr?k?t~ms>fkC2`p#l_+#qIhSReV;BvIR)s=~^%bDonxi2pJ{b7( z%nKn_AW5RnjE7dUpkPH?ZWYJCImUXbE}0Fm(*J@sHJrE>NzFe&nGOgA8dJR`euhFA zk50xLob#?$;tW_yESUebQbArJjvq=r@U4a&g%&lRWQt`zh+e%OGwA|h>?+Cy&oMD6 z{$;LzZ~+AecM+Lf7ZR<4^}KEeFJA>Eq3g&o7{qG6`pAo*L#w-F*yGwN)P_h%&eWLhcbX0{Tu zrkeQ!As9)l*KEcOq8=rvJEWcdq8ZW*fnCA!GN;>pmiZ2~4Fo&IfV3z$D-a*=zF*Y% z8*XJIF6N2N)vdl^5;{6?Y#Bf%bTuAbCN5GvDy%yymm*K)Oqu%SvycYYB`$Q=nYz02 z_Ta9cp9d_B*q=!g9Scn1(`cFkEC>MC8AkTlurQ8cmYQj%JGf zU~B)ap#SH29v^-R%t*|u%ayE ziC&#k^;p1)#EdE8)g>|d09Rw*HH+eg#uyXd45IpkgUkcU)KS#wFw7yrr5=%@Ha2Hp zz~C8VZ7dM;UiYC5tS(|M)Tw#XV8Eg6k=4fCp%LVf#aFFl)HA!ctFh(Wz2CKM!L(iO z=wkTN(0*RAMu$|okD2|m3t&u@ag2-%n+)f6y^@62vn;>_jxUTv^RO#9`)EB@_`FZhM{%1qM-IS zUu#3<9ZAqc066p z!N5@GfWAchWC9q7DJl)T)WB~+7wZ1t&kT3dQ-IgqRi;u5U>ye*(V>uI1_P1pQ4;{P z#I@RirE)ewjv5!aPx=r-E;Xc?(U8={GwL~C1>uG(N(|}xzrB-aZwTcHo~NbAdnSIE zgpL{d&ewpQKhR&;3To29FT}kn7I_oXr1sKBm2Y(gC!$7&q-dy#WjJZx!YuK_1<|WZ zY%)E@LpYs3=Bzp4J}$=din5m9qRFoaBW2=sQU|>A$khXC1UtwQU zz@vo@v!YTE#(f|C7yGq7;?F)=PUz#7ybS-|`d!?C(uIWteKb6S^1Q}yo0xO4E$-3iMY}ToiiD{n(_9W2TRBx! zpGuMSM)<#2Ma;vW>izU8Fr?FFwYAIIo5pFmb(-6xxIlpjG`GRy&wa(;Vd!|^-CYl* zn756mng?C}j6*0$4x*_kJ*&&+_>r}}1G@wTCtf}2*#NB~K!^cW30zMr62QiZ2B?K{ zgG1hZtef&a5FFH@q?$=6#u-R;eTV!V)zch5CGIp~ntTg6?c(rsKv75PH7b*sK%%u; zjF9U?w29lm)xcawAZGS#L{`i9-EQsm2vToOw%Vs3&0{Ebp6wD?)ZTJGSw0L~+>zmf!FEuAD9TByGY1|@sa$`h$x1`I za#MF!D|7>imArYi!nEj6>D^!Nv}qIKBxTLP`F6!R(^IVc(O`^Gbhu-Gk*eci8DFP{`QNy zOJ&>3#>K%P$`51FzCcXaCM{&`^KGO~=yT zXNUYLHolhlB8b0^$-90#%Vb5%wh&%nNDJ$e3sVwUHPTQ7+^T zIXK}SFCoEwQn#NIYbX-QaS}m%k_-x|XR$uUv9XH7k2u2c&z6s9x+QU?bN#&AI6;)Fq@;4zy|T^sVkFnI*-1)JI#g zV@8p+nAH~8J>2z{p~mqgjqLot1EQDtzvH3K#Qz^2g7L8ri|ICC8s{unP|aF>Y^>2{ z)5aYYo8h4}e-Z-cbP&2r937@pl9SbeRy5{?36EU>-*Vh*Ct_nNaarrTlt8W)a&VwO z8#ELoxAuARWLpF_gW)=X5Mh=yMS^rE&)A5X6lc^POkXa-h0}ns{?i4Ev7pu)1WG!nlo8Dn_Lf}rBshA0e4`F+6pL^_aKs2 z8GI!H+HR%F9A&5t;(MzsXl?C0TEJ%UO8keMZ|nXjLRPlQ6Pj6}*JB^UOLSjshHITA z{?gFy?k*Z=?73f^mFBX@YQ(e}DQ`qmtGLvdK@+d)3$;!jQ4_OVW@XUAH)J2Y4KoF#jfouaDHl2+yJxBNx&dB=LyCR9LwN zxj5_ZB(wgiXM4i-gk{M#-;3x)EOd~-@swDpJ|01X7xXc%Ssbdjhi&-pP*kMi(b2MX z^e&X~wzxKZBT-9^6K;m417lxby7llmPd~lM4^Udap(IGCbVLZzo){n9N5i_L<$7@)~j!S`mo@&kO$weeiR#*hz_!}IQS)n*Ie;t0oB>73R{UG z+tqlUWo6k@$?*2PE4#dwDuV$`Vh*NBcZ?F5G7VxJfXY>p*t_pEItG{E$3bOUX92V7%?)$=Yi(|248n)=3(>OM z@}8l*mel()K{aQwloSUdP<#H(&BfszPJsP$)43rqOuG2#Vn6=dY8?9e=Ravf7~1)} zsK4y>==)fTlt=pa0}J=)*GDJR)4&gCl82pEgWwyx7aF=-g?z2COL?3V3=x7N<_e-a z3T>^xi&aO8AC?pmL__uvfuz^k0)%>2uJVM6XF6<(6;yuqL5JD;(vfS&(?9SlQ-S(w z78S(dCew?y+Z{Sc0f`MFMCLN}0~X z7d4CUtALO^%AKfpA)CB~MYvMmU4(v#xeUqbWt)+{mrj`l)W+7ezA-DD&b>pnh=B^- z{URG=wQkAz&5N>*uv)vCDOJz01=x?s6&m?I<#bM;s^ZNuBYQ(H|CG^wNUC1*Mx zxZ6DVaucpc)}v*|aA22+tqlf8U*)QJ0V& zh5}2@uo?{Z#l@`s8|&6UB}|f_Y~)o0$zjvUTr?biKq|d7lRN=yq;Mz{Ty+&NN3VrN zml}Q)=?)P~3Qz!Z=}Yb9dPv$rzNtv1A!-?Q`On9%QufoKDYT85%#T@f56O>vsxl6~ z?P-MsYo18VvAHxD)nyHj9S2_fUYee{XrOT90tyffz9KFcIyyeABz*5Y|ZM5 zJK#ly@#{mgSJbVlK|{K%IXqAm=$r#fH;$k8TEn!+#r^xHuveUJ<&WZ?BdP-n-pJ;u zy|6W=;Xjv20&$TK&e!vz&gB1*_Lfm`ZEMzWLU0Ha?oMzgxI-bqAqnp8?(S6KBm{SY z1$TE39^4%goInbPuQ<2Q>AvTt`|Ypq81P{;b0A!>U% z*mK_Zuf2Y&7kMP0c@&KHycy`AV- zz+sOJE6d>p`SZzr#79m+G5W6rOmp4N1605ZOL9u)FZmfGYHDJ*U&&n$bMZ~l{f5Y; zO_CEZA~Xm*GwQVg<9wM3KQS|BK*GI6Z}8)1KsM7+VfdtyanpGuhP;UmBa+7$3A}3P zve*ODXh*J`Sz1eox9=8XIwgrcMM!N3hL5|z@NK=Mhv@57`Hv#r_CKlBKV;lyG)j!{ z8Nz>Q{xYv?6S18K{F=E~8@DherG0!A4xP#6sKi;`m`~8OLzWH4lMtojkVl&$ZBFyk8Z3Zyyg&5D@;tanC`CA&u1u6aFVl2uFa+P_6tFGSz?*sp?hUJ z!)QN`(1?n+u1p2*0HVA%7}JNU`Q|Dm;L{o(d@&TuxT1lKzmP>`H5p7EGqp(*H&Li!OK7?+Cq>8Vh+$~MF_-nyi#}J zVS@xS_yv=z7nkjXO#q{Cql;+{2QK`|X4cRK2~L{s6$c;jC=F!P#_Iffx%r;7n&3za ztxpWf41VbjK(E8+xY!c&C>G&Q;Mk!@YCcqc}Y z({(4|`C!7T4nO0IHHt4#4yk3bjywBy*fa3MSaUJV(0@3G$EH@OwlIBMd#Pu}O8s3H z^RU%rOgkTqUXC9gA^H4{GS2<6lG~9B7vY3#IMSR%Tad3> zzx@iX+IW=t0KJqaQv)xksfFic>G48aq&|)3l4(G|Uy+=wd*M7-FL+NcbAFbbb5<9W z0Ff%*X|$ehbVPAv9q<- zuOcT!{RO+V2M0XNteiai{QKY=JtzP=l{uK#%vKeaB`Tm7h!+O=z!!7#vVsC^U`BbD z?~jn;`;C;NQPe{$P!fSa|3me~qWeVPAdGYOYvf2RrwF>)E(7r{^nvxf0EX?)3^oym zY{48gp#lNq!IU(MVyN3GV4cB9haip5A33)$Na+OWsI9gsPt%+O0cb|1+^-^n`3JdG zBwSSAh6dFM$-Eo>2I)BTIfpNl#?Lg^zmk#6nqNu6MM?Nr0L))-S^w=rFO`;FwbW=79S+ zz>b|myP?|Ba_gTxX3;LT&VBbium;VTzs0XeyLnY-R zv9mms4~Rtqa=-*JkoQ#`&ko+S2qaQQD#Of3X|c2xh*I@ z&0#rJUgr!Yx2#UiQ((ui6&RFH(BxjMUNv}Th@mcZ7CTSZ8f=tHPKy+ri2*O?cyHHW zu8a5q6Y*np8ME9OoFg8)Bm7n#<0r;9&(sGbrApuW09BO05<5*l>#JhWQ#0Rvp<@er zyaD97euNQkEOz}$kO@mrZ`L?G=rXO@sXx#k-0S6Q!W)DoJlrj*(rAa?!B}nffdM%p zHOC-Z32I{*-vlgxSJmWSOBM*>{L6X-P9df54Y%CL6;Ek?`7@~txb6cxb}LDMls-P7 zUGsO%^oOEip$$C0%XsOYD?-(jzE@lXT=&I_OaAVQHQ;6I2vhUC4keU*_#5F*CZV#+ zdd(%{@3F#cj5h;1)aQXWFa!Z~`vjGGT|rd; z9G;iYOlDSM1U|7*Z!G!vkzlN?Ix|B!P%kuwM3iebF32F{j8SR*9pdHkpN+oGs zW-f$<*|1N`qCA^dVz9EnvQzH-*o)k+(t}4h+45C5x%unL(liNbl_dl7rTtAR22KO1 z1~~^0Jt#(zIVy3rHO8)A@7_#P;v|zo z(kPrUYVi6rmzSzbiSw*2H+l~2N5xCP!?S7J+(Nyc2M4{Y2b$q+@bUQW8XHl%#NphE zmI@a*bpoxp+JJi|rI#nRiB-Dd7#B|5HInZelZ%%4q!LRd7;X18>waPyQKsK#bJiUB zS$b0~Uy7X)`ASn@_;RTA1B|4i$2`^qgJ$H0krkjq5-6t-0NslEQ++)H?mS|G(|7+W z1hYu#v{+A!lxvvRUq--#=P7TvJj&B@SZW$C z^&oW@KLl|ii7>JVchv|d)h}Fhg`f|}vp%8Qxt8_QLeaKz@~?vdMKZyxK4;BAk5{5I zTh{DUs*48{jF=(vgP0fzgoXtAEvRRWAA+r7ioR#VFtqmq2ngpivEaGd-|2e7_}yKt z53vsa6fP~N4i#-}9??qZ;vLW(b{zl0)IhpV8sgk0C_x76qD;qb{C>JZvcoT*jqI^Z zh**CLtqn>hHE2hT(3x(M{tnxAKWZEo)#7`n*(-I*V!0({6$I1Ad}N1DQ%6}PwwHP69ewA$ zWb8Slx$Q;CGkf{x=GgsKG<4$T=+0ZrXM64mqr>W3YOHG>miq_&>KYo%g%IcK>-#H* z9YliE{a8Z9%R;qA8+)bN76}nIeAfkDJL+xbPl)^hn$>!k|6xTR}Ov* zlU{il0Eb2@>%GBch$5)WH<{3^@>5v`n+)w0V4gZ7yVr3|Djpq$xJ9E#GvE^n<2t8= zS#BfCoQZc6HwRXqxOZ43DwX_~;jahiHtRIQKPy+GdTr9>HeNz~RYoNo<2+kU#TWn=|607fYrxKE3f(rDF9$D+5NO6YLlxjgcAU zZ;YclQtPn3e1`jRiAvFbf=&`Nr!1Sh=qYKC-&fxK-3PyD2kRI7BFu>jpSF`A#-7H( z_E%<`95l7qB8++Y91cq1n{kEUxkenajy(IgGdt8WD^{QUdFdtk4O6}-f}+mp^p)u| zmp3^RVb(t_~SL~blXqu53%_b(V(9N z!8zrT`0B=%(&Xamh-e+1w)(4XrWy|G0 zzN0GG+e$WiGO?q!?;CS&?2g%1@B5Y1_I(?zl8;D^twyJTF7N#C1E&`?l>X+9zG{_SgVMp4NAHChnnam9Q!VzJ?3NV>dAl}myh+oooVU;cw7)QZRB3lsU#yfi z_u^tTI}jMWG4V}M-;_qnIWj3f6|tPVLjz#^^JSTvWc>6(zEFMr{KDH!esgV$Z5>z- z+%v&NbBx>X3HLtsu{BwyQ!E#mkT=8YuVjvMPg!P%`3jk9h;4W_7wrXegfwlRybV!N zqg*)$L}amleyw#ajH__}t8nW>XmWyP#PuPfKx8aje%?qt^R3i1Qzp=2#P1@#&*d zRM5*c1?Z2Tm{CXaP)vXv1cNa-h+M0CKQR5G4W9r#8u$5A9J+QF@@PtNtABbH%6rz- zDP(}8C)p800t)aMD$v8C)-^zNf)1;5_<;z;6Mn=1pa1sg{|AfNDe#mNxQdiT^Y_f? zKWI7~ESRsWJ_=~6OMpN!5Y!_z4=9A+=$4gt34#lP-%FTjMIjrVC_pT-^p|)xuAO30}rZB+ECg~Ozo%~c>CrKYLQir9kfVHF%$Gl z<=E83L9S~heqoci)3BO~h~;jt`mOrvNw$gFaiw*OR6b2$o)wpd#1%QQW57ywy+3bi zw~HF`olqL)*7DLn{}u?hrdc!1Y>080;n6Bw3z>u*lmPc?wJbjtiEE}6CY@t;TDoWj zrE^)%A#*C#^!sYA>aU9&Jk(QxA=qEe22~@%$Zs5v;tmoWHvJMxKAKzeRk$hz^;y9- z)t*KwKKan)9D(N%;DV_i?^o-M{z|0&jfDxcmV5FR>!nMge|y?m;hsZh5rN=_@2aI1irUHz9ibNibWAN_{eH0ZCtr-oX$_kvt~f8h&% zsipt&#T-l7EVpLC0{G7&{145v-b0ZsLUrvhrX_$A82Mx^C6#^gvTR@VTxN%Xk8?rh zx`|jXbWQcZZj4RnwnpTWM7L_VVuupNVGA(#D6f&Jb2ZoL*PG?7rbl}m1xHMJF`aQj zal}apV}KjsL>tWIvt#eDwkb=#2iabkkfyuUiYa0;cUzs=PC3m~%-+5NjK7*bqE@Mh zq{?&-B+{4#t;yZYSojEDW6Wt27!jR!kyz8)4ft+}`J}Omcs&g7QzD`LCe= zY`zd795mJBzr5|=zVA_H`@3H1%)kCu(hj-+>!}zrAA0#oeB_$#{JVa&@MvQ9xf2aR zNtpG;KR^SLv$Q+XmuX+Dw$T6#)BsVSY0vHj`3l%5oUU%etjGb80Rh?7YLJ$FvEe2XtwJgwf9R>ntM=_2fTOelj-K}iD|wKx;2fb-Hgo*Nt8Q4tF>2`(g; zr@@o8!Bew*+eChxszl@};_7(7zjge14?yp*-Y0o7SfpHFe*UZ^*+Pk>Mp=c$8ThiH!>w z=!gm;X9?z5i{}Nq0`^Cmo0P;YNl0VAdcE9Kqf3hnbfb?x<7=k;VlIJqdzXJDBG+i% zigG2Qc!uWSB&Y`|DD&-fNl|HADp;U zRwHi>e&4r1#5~>aJh)27-&(0h(|dYe+}>;#3suC}sw$N+#2iNE(Q?zp+OP@KCJjnX zA3&JjB%>pVQ<#Vu8J6cPzanNY=x1kGlw`*^Qu73p-&l0#T;+vp2=!oa3@_uy*x;qm z{iL>Ttv4_884w!CoH^!}&JEpj2}cQs#mA zXPUJdh4^GUBuo3DEj_oZ@@M$7`r=8#Pq8fR(`ndiqGR}b2>ToL{pXSV`=dazuqT`E zXkUUeR^1fv@VSO%6)ZO$Jx|f@PM&OtWEj>eZi4{TBK+@75v0T292^WHjSOSWSdSh( zuNf8t-lx?DjjQPzv+2$NFTUPRS z-Y-n+3E$xjJ0re6MZJ+QCTvsuw(Vz0{{fR9i74pVS1ONJ@DL8PhCBs0y`s0hob@9I zezSEV(1u+QZ!%xAC0~dkaaDgtZ)!VtJ>sOS4lhW{qNn@5elK+Ov?;X;nK4F!vyQ23 zxB4X5^7X*ct!;Ty0}|Y`A;1lpKuQ|3h7#lL~lG9@qh` z(EVA;FB|Tf!Zo+U$)UuVMp4hnx}hGpUWvM2UUjx2LuG01x`Vs!gC|6OYF5a{$lt*X zMCdJUkGu7ALWtmFyTZhkK_-d2yU}lR5@k<(Qr(k(RyQU$`TGO^Ti@beZt5^SZFAzu zl6RrjhJdVgtN7H^Dt%%$$buuo!!0-Iqp!Ei>2!Otpk*t9X2>ubU9BY;9OQsx#-@4g zMjt-|FaU~4eh}@R0tFg{W%v2mv6|nyX(&*P(A&VPymk8dO%)OJ?R6dT0$Af#aKyB2 z*@^1dAs!T22!QEVXZ-dd57fulDq#|otplu$UX~h>cs=3`*%Cmnbo;SMUcSB=KO_4x zSR)iMXm1{G-_+({#jE8RjNn`3h~x~5_M)S{hwe<05nF!gL}pNb0RZb$W(pl5*h8>SmRay==N$7K1#!V3_EwLIO6@{5fzyN|bt~pwy-<1C z8gr$hS+LzeJp$6APTsZVL}+8c$0Rw7Mh}=ay%U{C<3M(g^?K+?~GY|s-?=Q<}b);_sNIlce>*-h1Vy^-Z4O9T{3#% zjh)B+BG#pF9m?!y^`A9uX#pH!N+yk~qyw)83QnQQdNrWavk&;zWa%w* zc-P-`#sB_J;DHo0#v!`{2A3M*gvnXox!U%&EEzNM!A9pXTO)^DX^N9qiPk6b!3t0H zEkh`1yx2v4PSQ;{#sah}rw4DkuM@NyEneiJL^v)Bpa#QI`G}*yva3+R1`UMlQ;o4! z=@nK=k}c@-@+sJS5dHc*2cYn!uY1lwHYGt=a8FL-MK#)$0qiicnb_g<57C7{@tn8; z#Fmh25W_B|>_U)Caab}))0`g=&(P$ zIgXAZ?fq)_vB%KfaH%LP&y9UR_h7`Gjl_XTqI4!I537LD3#8!0u#Mau@krrB9Q|G+ z1<7stRknY;4(E<{ABnbO3cxb7VGmDc)^Q~(ndBcQfPxUthL-Y z&@49AD8*{Z=-%K@j+&%F*$W#t5C-xqx>5@u5Fj5C#VY3VR$N2u>?IDNYoSY(G-7Bs zikFldrB7HCo1K)z$(A-U26Q6J$w(YNd=MHaL|K-JqlEmYn8K)H5MQlnPIlf~35#A= z?a=NlSn=EhdQ)X&6K!#z-GC2pr5PbZ8W;8zh-;u785E(&U}6%a3+79IgY>Q0+^gm4 zn;z%k#pYb=1*1&>c^(~Qglz;k#>@;s2H7dfJSg(D7%s!Af*yuh26DbJ!8=P9{;ij# zVNiTd_}zxDA`U>o5w1jXs-N+sVa<^Y^)mjWs=QFDdub$Xppjiyr-spp!{in~`R7aHxSMqG#|nNh#n72`s_%q%Ql!i3V@?TegxQd=O=x!H5qB zdjk}y5E(<{P7ifW4Y!Sm)o}<(wN_o)?gotMn=yCCBpG3isWvgdrdng$A?jt5pr9{# z&~a>%$0jM54Q`qqW_OuIcuSvYv`Qm=L#|Q~;Uab*y;r6J<`6`-7$6V|&(2>FT5O5@ zaoiN2mZ9Kn5y2BC^nd?A%i2P=;9V&EE@7W5WNrhPGeeGmnkCj{h$?)Kr+-uL`6|-18ro32Ndv)>C*iR zGXpYLBfOt0#rMG~_&H;6b0&;1tqjK7=4z@d7M6!j==>&Tl)ZcH>*$&m zK|vwiA7Vyv2Zcl6Fz0DO0q8qQgJmH@hUkHI_=bYm>Y;;I^?GCb`VGyX27u}5T{GRH z+YzrGPob^*(9&}MPqE;=V{@`>FkW-wd81wfU#G12&AF8D-Cp*7bE>;UJI@onsA0nX<&mQiG=Q~g zY|m+tkHRG0JJNp)Q8Z{hHGVaYgp#wDuXmN2%gY%MbyE|EK)(AX6 z?Sc^$Mcznz04(kE@??i8;S#W$D-+I?$Je<;S`4J-rbd3e2uG^vHAJIz~D~R zBM4uQMe%U%qq!dKvt?$yL`p1$UB|>zY=0Fd@pweC2(TIqy{et^jmEyUcg>q%ti=h= zzC1({4cf%f8q1Cf-A_B2>{vEE!av|%48O5xt;V<2U@P=h7dJnDgD{D-{|lwT>{^O- z5+`cO(AlolI?XS6<_njtZ{v=MD?}mr=uNMz_Us)2Fp*KDerFtRHbBTOg9uVRE!-|p zZ74F(c0BLaP`{#AnL}z9VJ&~O*frGM?!{fRrTM%{$FzpB%1A7bz|T=A7QF%evZ+_M z*a5!0qQ^*QNPWw&oaFmHS|{)+y8`nGt&sEKMOOHC-Y&43UK7`V7@q|3ohjlO z1?X1RQ)`ihmr3h;s2hZ<8Q8H#OIZLSpN%Kr^V7qxf)9K0$pH1Ix;tM1oxW$?{K5)z zH=_7vWGC7@5(Ui54Ssl+FT`^v*`nUc19_kp${}mY1>QSiz@Z_oYmoL9C+B^m{f2No zp1JnZa(#JPt`fhX2>pL&xo)W)-t+>GSP##a?$e-ces?T7H2vpVOZXJUZByi1+UoOo z*SR<=m9>C53UDguKS^CVhpSPIh)oE^3vkT>-cVo?y49EIq3T3^eCN!h1{V#-iAkX? z|B_uQ8X|b&-SUDRTF0RRO;$m6m9EIrB15mi{shNCmD90{A0oZA`ixz4wY~Sfo#z9_c z|Elxein@Hux$3sy6Km{f?_<8~V?Qwdz^JpSrol758-F>X9 zKq+4Ma>EYJ?v9}Q$qim2eznWyqJ| z_-IZCV_8&r-$v*x_D2@TNPh-y4-GX}_98jpdUR2HkhlCw>k(o_33+S%N>S@T=X&uw zT7|9u(e=^8TUzc6dnYo<}ZYTk+u%q9g4GPadO<$qJk($21>{U4pu+pdLi` zn0u1(fcMa((-AtoE!c=9+1JQAd2k%>Mjeu9CpxbAy3ywN5H}_e%2HHpY6F~ntp zYD8Fl;Xa6a(jxMGkqMOzS5f6@>@MchX37$O4Ek+@-~ZWl{OhTQ5-f|F{DoQ???Kx2 zk^M=>m?8Fy*0Ei$sCbz)S*UpQK*a-2AOTlQ4fY+$xO{>}B2^w>1{-OIkVK9X1g`Jr z9`t6RT>RRSD3;QtKd7{w8w1mw_3^qplkz|rP9+{;>a+V09V{VQsmMM8hBP<(0lcQt z;>qGwf6OlgDibMjeI#~_T}}owmfU1eZZ5HSydN0>NV%swR0bQXYE)kO)t&+Cic`d) z?4UfX0J7%3;zo`Gt>J+fO|=nbAnbz7cYUags5`H!AC%s9G(Ds(+K#oroSe~V@F*?Y z5^u72;L%&<84T8#Y>2;I-$j>{^x%~mRb2#ubvj{*B2uqBz+8Hp{8e^L@1U4BlquQ6 z!BQv$G(Fk&|NEzti^>5EjWVMSy%!a+MJfxpDcf&ndDGnttpzEb0ze^F7cX=&T8kW==jT8BeFZLCHO-& zQ#H0bL(elfC+y^%(|#}hV7iy?)Dhs;7!;dn$2JXn)B`9ZxQ>hoZDO|<;ls)saVS|t@?gCkVO&=Mx61M#xT{$6|NWWt~0yg^S;s6 z7jKlgYRtn~nf%k64QLtp6REnCgMM)ovB3{2CeRLQz+y~7Do=b_Xu9DUL0qzeoF)if zE-of;)hLc-&EQXzF~S@eFPkZ}0e!O42)BTj6k8J;a^5aJAvQ7;x>4pdK>$!_N|-Dt zqzEFj@i{W;O=(J95d#XM7_K1+a$FwG#ij*WcR|WPrrMxn@Ti&x9WOXE#l|Bu zfv<`e-M;Oni#IWUrXV(PxX#TQzM#CL&ly=*ogEJ0EaNV*M61p1TJ7FCk}}z$Fjc5- zbl#46*=aN|f9Ffxp;w-iK&va+1`8~$hASUP!;?KVrmn3!%6tT0;?e0t$5x}zmU3u| z1t5u!I9YjKO3sXepI2+1n$ba@e^$!lf&6?KRpf=~pi%E_;Ifns zPLZ;N!QPozdhJbLrh~hn7doa{72C5vLJTqTVU9c84_;<14M-YBxeYn5wErDop+tP$ zs1KNiePwz#72!|+SZcqUHRGR}H+4Iv1#vmx`5`1!pim}u0X%SJ$B-V2V7S_)3&oy{ zXv3E2{bpr%6%&Lu@;N(%0Z|YKh%2c&FQ_Y;uA=371)CCzwvH8qf=HL$5kQVm?=cT4 z1_apHN9#EBk|Yf3y%WdoMHPb!^5)a^NTUQGbJHPh2)_j-s`4I8AU#^cN{}Fmf7q78 z=cI!mIAY3+TXOd$s|uE`l$cW@6T~eW8aD(4BUR&8ja3fm33c9x$%jNdUQ6s@V^VZr zrd7~m|6xj=L7U%80~BrbD5r1_b+6PmSU56pP(mG8!d^^j@YwvoBE1_J^yFc8N`2y+ zW+*41d8%gH-f5KVlS>mH%KjbA_nzX^uMemm`V}Yo{B=f8v9Gdy!XOhY0aR-g{dzMg z?HDF9xcaTj8-`s)SQ3&Or5VPrh8>-Z2!W#+#RI%jK?>Sj2x^^y>gOUcM+`}l@;yS)lt9}d`N}~cQRyO3#9D}q1m75oVvF1!cXR7I#y9pk9SpIMo$!mvH^HcSjRHdKC zMH@32d7;@()||9Di`fgR}-Cg_ySD2$Ss#=;-ox>joQ?_hy)qAcq2z zM{WH)m542$08FNZP=MQy$qM11O1w|3Q(H5=$J19asD-0eJO*h`f(ks8%j+IN& z#UoQ5%G~tzPvYLl?-tKzqsK@!8@#6)>O0=2rGOI8>K1D?G~p)U)V=Dma>+qXr__VJ zAHK7kEHYuD+EMm=DK1XOe3nF%mJhu$@iqsJGT))-7M0 zq@L9=wyf7Vy?NcpL!6R@W^FcrY{Zss&NV=iu_-1pCUS=QUd*K58xuuo4&?-?#_m0K zsUaE(w>WQyBCq>pH2WBY=;YEGt&8wPaJJJ|Ue=_M!2m@?S2e%Vc(r7gSZz30`Y9}6 zz{pPiZX3yg)yB-e@NdQ3a4>j`G@E!QOZx5hjMS?MW1hRvIv{t%ve3D^jeM?Yt9CbO zYnIi{Z%1d7nR9xP{9xtW??{x0E^Kedt6y&-6$O!x1eC)lp;f^a!dEF$(d;Wbgeank zx^5VeOk<-m7ofOs3kwV?1F7a~3Z+Khh!tVM~nG*M<(T zfjlgfjnmMxYxd(RGj+8xUjfVC{BF3~tN{{~uO7yQ!(7D_m>2vO@My|VCZqrIptNhw z#?V%FIA-a1-OnjKm&@o3p)@GjB8<=$)blOKd`JKxtpJkbkt#w6W=-=&a+qCvl zf9-^DFRYG(+h>dzhI-j~mA+hGxiiEYG{68tx4Ps0fo9-8G=_xnWc^zyp7K?b`(lmM z%=Z6XfcXzQFc*y(T#8&VIpSLcYYs$Q?Ve6DgY4wa8Y#X-`VGmlY8&^xz=({+}M1Vky+WmSBWr)|j5hsxw-iA;o+e%LLHK7dgugXZDcMc-8=%a?T~Hwir5Gd=6!Cm-2>?;%>NVJ}*o}X9?dmD{@=>r3vpzihR*lUVFk9de43XcC zGb@fpknpddU^3dJp1lz1;jg(y-6V&6;uhf**VWE6ojq`8mQc|5%+#JGGs<7w9qQdp zek<|m-3TuCibyrzKDKObkB=FF5kuALN6CZ2^g4r?w7at#8B?#&K0p%S$vS8HTcwjd_nDp*Z%pK}%ejr3ohM{V9- zqz&h#LM1<}%Vs%_qmcX(;b77=7B%9-;lwFHz6MK5&P1lZ!>3L)4aQk^YvF8#vE*IJ z+MiT>P zYoMUm^$q*1{=CKO+OE;GS?LUS=cP9GdLjWH9Ss*B4+~niLxPVGeZd;mhhN%Dj}QV_ z%3_jBa$+$d(wM_ZNFl@o??6$ONE}q_i?SbQ`0-QjOH`s~F{YZv%lq!ZxQ`Ia`n{?0 zdp&kW#kc%r!_spH$2~+MZ@6O0);w;suC15-%5PQGh(3qO?KelBAzb?S z#!z2Le?D@rBS_<=h;gHUr`wdIWzX@T-O)-{+o3pWIO5fLPw=LZJg}DxzcrRZubC?> z&Qf^+z+Y>q*HQ_VPMW~K^nSmor!Hjm6j|!QoKVsXFQW&Vhi81GeSFQGV~@l991gr#X|Hd zF2_qlG*~hg1~H+~XT{4|ix}12F1w*PZ%t^t{4CCYj5NPB;n}wr>z%A&@rf<7=BZ?< zA#vR^sE<#I5xP>pgiIIx(JO|ZW*idbj5!%d>AZ|E9C9${Zb8RZ(c3e^

    u3f31co#g6W|WI8&TqEivUC7c7~3?CeLWP36X8LKzPUnI&C!l%+! zp8F-kX~Ok|f{uZFw#XcXQE45$;{z!NK~6=MYy3h!I>x~K&AlHLjW`>???Rx7x&Lk2 zk-W`p1|`+Y5OIJ{0w7ZK)&vJu>Xpx|Wtc{sEuv<)DxCmk?|c~rrTA`&m$y}en|&&Y z7-FbrB!`LYJ|)CSWq#Y^ssC+esM({zn&dl@sGSv34t%j{m@Z@j6&uPvkp4%qi{M-m zotLDM<}U~#Qqb&xQDFqwkYCU%cC5UdbKd_m`VIq z-S7aUXn0=xWqM@rl{26h2%yeSsKp)7I;?0&7J12K-D)5*FG^&1_ykDP=S} z@%_I?WLpE*g(AVpNZS`q5Os*QGrvs($4KL|br*kgNc~Di8=YE?AzQg_b>+`S?juxx zYwnWbwS_M!d`0xw?Y)~1TGnO7`hq>Om8eFj8pg*SY$2fB7a7WyD@G=LYIDcfSZQ04 zldPFS4dw%DS}>D}UaDHI0Z}Xi&f_$-Ep6s}KpAaqE*)C2aw2Qd`X+ zc{XQ|YbM#T#Pr(*IS_9|;jP=}7j#p9FGNT`*`cZpmoB--bvJ#!c7!R5P4`ff|nF z%p>9Yw8(ZL@6&m+LE^JC|HrR1Sco6VaZ^Z$mwH&+tA7kj^a@u7o8NMaqY^+|_aM4k zNnhzBjC2mo-H2FiA2O*5e@OKK<*P~+&5o#3iLlC)CAMq-$k0iUf?=I}y}fElR2QV7 z0 zRl3ylO5vMl7{`}k?7m%@PM8@=kM+37drCRG$Ktg&qlDH8KWbV^{u`nk82QS<5=so$ zEBI}Q`21Mw_}Xz3^H?&Kx3D{Suv=@hG_~gNwP~BU?A4@+UREPVO=;yyDQU}Ah?_Fw zeSD-R9l2~a!y;i!JlOO*srx~9{ZXQV3K<#MlK+UmX=Fu~1Qcuaieh59eOnSYR472Y z6nnf@zp_HIw6aw5_*3Mu!tLb2*My#sQd=GkLwa94bI8R?-{+hQDCvA)?Dy*`h2(KS zon`jNnxDaC^O75aQBIn7!(pu7g?ByI>QCOd;X!xNz$ur&5wUYD0e=44wY&YbqmGBH zN1kdb5^Wem={nweOa7{(W^0})3fMPU3l^{I^=51AQjN2~Z62r7!X(QzcuJXz8yh+I zTSJegEJGhPc22^e!m4 zTPE6)$hawescPQaUY|;rIYjSzo2*D6oXy(+IAaa1@l(1 z*d+M?#fxf|>MB&Gz6lSuYFiVsMN)fjAwuQ2*FO)1Q}UC8nb=O>ntpPn=q&y?;xvuK zaq5zNCzGrT&yeo7`pdkN@Zp*bdmH;_^A|ZhIaj)5aY3D=lmwbVOrRk`pK|FBMpHMk ztAa;Cr4{=KSM&RvBl20R=F`D?3VeoL6c8AfJHUc&*a;6w#iKV$=~V))sV2GgXm_$@ z&;qESi1s5mR%PUaC||d0F}7i1IvP*!w=p(suZwFTEOPf%j9HfbH=ax4Styi9LG=0LDO5<{YU|t@8?o}Vlrr+ ziosH;&NKejeCPis0&C{(={D7Do0A{n5@@&i(CH zMOy*B1^jb`hJY(dvIVmHG5_kd`)%6Uu5@9J>*XeQ1kHf3L8;f3w@W-WC<_751RNM@ zRr81R92Brsn!(*HL_wh45s$#s3vyO=3;A($>$W$myR@)cMFFG`8X$Qceb~JUn+{GOu zg5fivX?8as7(d-zoreeHnzp)E+TE@bI+8$Y+KBgZN4rTzbThDS{)m*Z16kf?W#lRQ zeEB!C{J@=|5#X!PyPc$;Z;s@5I#Y2_wJ9mo)qn(Ir7N0pfsswrhl=0ECY9kV{8G)J-OYpdCVon*#m4FYGwXz3r zZVzf|ZZH&+=jKZBU~{|JEaAGW$GZ;i462rdW4oeCT%^@yVFEFg|yUA?FWYy!5$RjlPEpYSjFhK9P>$hH;7|PF0qn+X6@f!{XS-v;yK*_<+uoT|_MhT8W5O+TWp}=*J8`FD~>qT(r zXwyp(Ei;hw0tw-$x5GlU;Fe!gjRINpa7}?tX(Q?QuE9YYjzUR;WxLzyduK@Ly}aFs z2(w$N$rWhHbTsQZt{O2qSGA!lvLMc}rKNW8h}}uF!x$f|BUSByU!u1*ploB+{kiu! zk5DEV*^P~ehwfuju!^96%oE(C!wj=GDo@QmIm5pf83VC@gxDXEJ)V)VdJ$YdRk{S8 zP@k&%|5AT=bnci^hJT8xPT^snT4DM_%>5zoi5yR^Bh-|i4*1^<^#1`c_E4X?`(Hvw zI{QCflI`x+Sw9tvNv^X#Y_i(e+n=cReg1nv9WVg);YZfP(8I3%gB06Sj@3E>?P;O^ zO(EpAz139r1Ic2XS-N|BFXyhw{GYCFY&G}*!D~w=lKDMIHu5-^_8z=*oOSx>jib?&)U-|>; zGk@$=Fvitz$DJG)?8kPar2$(`7MR4UzyG+U(_uUl16-HIyt>+oeMQs!fwmA@e{f52 zI{tV!4sFh`hA!&~-B}N582^Voe-`#h0gh>xe|Gi)BPmWJ_3y;}{hZh>cAl6_W~95J ztUD;OWH2+lRND=9q=E-S ziR?m0lN&`B!AM8B`7M*YI7r!CWrH3N;0d2MG@8bZkGg&mb`({=@sS|r27n^Tjwwk* z!KpG2bDtCH%26t?k6t#20s>j3tu#emQ)#cuEwfU*HZmjXf6bRV&!g{WdONPycx8Dq zOH%PBvYm%>z(vO)VaY@AXVr#*9lEfW0hS4)ky`H7M=XKBi7!AJS~Hv6PO#wh&RO`H zAZ*88W{SnLocCnjazpSbsmXPNzEs*Oc+0A0%tin@XitEo`}0zZ@qPb}Td>M$p>REc z*qv?wWhL>Ur!x1{^i8JJCuhy5Q|Ju*Js6W4kuQiPIEl@R|9=j&} zz0UeKR8@nC`p z)v4OG_b#pEeeeANwaI5XM;kQ{LaqU8gs_sX-#;85khjj%}3R=?1+t|R2UN5PLJ zQv3@Tg$G0Yq{gucaUr2~<=;}LuP^QE+1qaQ4O zK3pxQx19STdlH>JyNZKItZ#jl-z4s{QHSFr@n|J+Kv#FRREn$4O3TVkUJ3=x+MDnP z()RnOv3}1OzfG6*wOVz_Hi{E*IJn^5MbDr9Ys40NitF}#xlgUjk_9w@W=dbbwWo!% zq5lZAHxO#LTL!|7y#6Kd{D_bYd45b#`1(&U^bUo;v!8oY!5KZejwtZfIw{Vp&sHSI z=o{+~Ws%KiLDBYOsyalaSTNHLUwbopk4aBanSne)7=(#4*J7_A@7TFL_>Uy-*^am6 zX~cSH<`62lr){r|af>C(yMk47Y}bnR-dwNvEiNNIluGQ4*bpEkU?86Y;BrKZHM3wx zN!gr>R#{Z?E?X(tP}0ssVa!%UM6yW)D#2ACUe9`cajGFp_D+_nmZ*?1xiNXAjZ#{4r@qd=|F??aA&YrZ*$AOWPu~dG~D7SCMp~L6< zp`i$mBoCN)bCAF@X5#^Kv*{9ivL=7W*EfCl&UIi;rjq8K5?f&?J=G}K&8}01hOH<* zCNREsAMt!3s$7@m1Sgpynm!q&hQpd)uPHFYGyP!X*+WjTPJ-lcRxB$Hh+xjwSY*bb z)CgG`b-taNui}_FnT_bmYyquOXI1i7DYJMElXhWFN+OT-YKhO zx(;0W{49x}D;(YnqM<4esFP?yhmBx5Ib4ni@ay5-D#*Klr|v+5g>Pe}?g8(_FAwx> zCb9IL6h=BFJ>vQtHapk z1mOJa&cX8oeocc$bS>tQ9(?Fg*Voro_eXp=lPsyB8ffR*V<`}`?z=6OfGfnh>D2qM zKdF%JRnGJu-mFd|p^0cgWR~pi<0!1Q{s29?yg09iRw=1FaVt@_k=~Q_?(Jgwru`wE zh8}GOEJC5HUqWUj#Z#=!U%G%%pbJTt}{)p0Z^XnCing=TfiH#}{ICb16;b_mRGrknb(k)wCa)U4NuQ za4xO?3dF4Al|%lKK#V&CiUX*;033WL;Ku?PiK7E!Xi`?X6tgvnoP zJDIWuBH=dI&KHd&W)zZAX_=uCF%ZwmVG}hP@kuhNMuJCZ{bq!{| zn)(*dE?pj?DjqK3e^q@V~!@S`Z=9C&NLpv8qE6*`_@zS*Kseq zQ%LcfX6Ks$HPUNy2Di*546rI1KH?~CTEvO> zaQ&axbh{Qj9uOv2plHr7m|HFMXSw$+pPmr zR^x2Qmt;^eLZxTB<8F~VyEWIMx907+Wc()BxoO+^BHD*}rSfkA5dDiGiQ1Mb{u2y| zGrb3onW=5*U1#kL3(Gf@CZU2Q8dUT^*Np2&Z>d%H*k3>QnvYkI@uIyAKuu``$Wh44 zFcQGU7#%SCo=>42n#~w3T*vPb5^Kv0x<*)iU$BGE>Iv#vRvT{xEn%n5MRIFs;re6u zyl;qxpgP}hG-Y>YWk`ykSwSqfU*m<|!EX#AS$dftAX0|4R9Sw4^fo|{8QyY28vrW} zaA!5t3s+T$DF*Eo5{TP)^JayhhJ#zJ*H^Ys6S}@ZI?ZV@%`)kjmGpayUq%y0M`hxd zqZMg1uhxs)lFHdDMffV`lfYjYt?h4xc4Uy^uUDWQqdsj+3{hcxz2r)?o6o2b~X91d5h3JtmC7vI# zQly)E#nhhH&z_h;i~5r_YdMuy}mAt7lx6JZ~B zBjla=k}}@;t>b0P*9YH)>=0gs}Zm3KE&ZlEt zI9DOExrk%*(p90$W$q}?xwvF6!mB-X>8I~$nZ$by+b)Bpv;Ho%ANkf5e5$t@X74Q5 zx$;FL)uo>1B<~NpZ0(BOm?yPWG7LlXOy8M(THGxT%BexKk*UKb&*-_JzMV*_wi7g4 zGC$3aiQXHU^7Q5JH%yiBgX#PROm%#IvH=(wG2bSz;wB2jT_wEbl z+;*p7MFk+1_;w6QUZFkuo>S3M%AcI{6&rSMpq%R*3-)Vo)bfmuY#CH-M^R1B z1#DP*6Jpe2;5ODSs8Cb;DR;sS!%J8uAl$AF~ z>2=?4IigpW0Z6Q0v?f@GNvv+jM-*dUMqr@Gx$j%3vC~|S2~X`+{EQTi&3JvO#ZZIv zh~3m|!VNB3oTx@VUSWE6ezlAQmd6qkLF~I6++0~q=x!n)j}7!drM0Mkvz@fw9X$F| z5-N9dvQdY-HRf)^V4RMUm6}NxC0r)V!j2DdaERC`W`7YK=>tEdbMWTLi(hnV#v1TD zIj`AP5B(Es0h&46-w`elXg7RV-!tmCWf}G8GZ*HwF$YmtPC0S}3$FPdCb?qM6WL%C zyGnY%aj5hFtJ82&oS(Oo+Oc2AL5(Y7W*Tnuony4Px#_HM5AqYrez3?!5jES}(HQI- zlI6m^A)|247nH#T-%n8kTbdg~eOsJQ5*Q-}cX>N@+@{!%ZTIEJ6~>4L7BlZEC4!6| z#64bLmPjHJ;OyU)N|f~1KdDdxeNi|KR6Scmw8XyABhL{D$#I0l7@tx1zZhd{7-J_Q zM$PPF>PM;l#Gz<@O@AL_nM>1;d6W=-T#*6$Ev(m*35pTXVbtcwW!4Vxb3@^oCJG|a zD=KS)hNdIcAstIwrGTRN*Iur=iAN-&8)k5KBc#PF*!F`xjkh)2pUJdi7YCQ3`eAQKM(=e0nLD^M8CB`HioeIaYoT4Sfd8wE>Zt98;Cy!B#`B%iGIP(*3oi zdn)H~1}qZ2SUTMEG05uRU1+4P=ke3yZ9KsfKj))Me*wh+dGPfC({(-7e(k#H>?)bn z@s_}X@3w4+g>J|ftkoW>(<>!g<4BL&k*;f_g9`%7D5>qESdUx`=747|pYi_Un+TOC zq<9(o&bn`Eu`QJR>UgEdZL5i<^Znu5z4ZnOq(h89>m{un{xZdpMc`J$ug3|kI&t>#-eA6{Ch3Y z!va4c4F;UcNHHGS04Br(cob~PyV5XF_`EhvDfRWNvBXS2_!88FpP>~~&se0B@t!@= ztpgl=BV@%j2D#I*I|llCnj*15Z)9N+*{Yy14Owl;$Vd;XXQ2SJ{TqUqBXun?)_cM5 zxArcUeb;m^m5z3!mKW6%j?L2zNLf9a21}cEkUeP#6*f#`O#Up``&Y<0_PZb821)42fEu8r+V|5|BLjumi z++-(_kB--IxaNc}y9+is9c>&fH7MD!Xd<`=QqkyHeBmUuRd&hEwtWY{^7QsrUq= zC|p4|e1E$VhK{sCuwuGy14$L$Ai;7Q3G0k-5$vJYxBg9>Q?4zSGL>;_93 zw$x!y2v>fmbQPZ!L-umuTgdD-EwHUqqG|KbGJ^QiV*4BhbJ-*dkdR}_ADmQ%*}Znc z#h?B?jZGd$GW1?nQy%lX`l4f9`51F4cVC9_2&=Jqk^l(nhnNi(y2aGDd`!}fLisiZ z%v{_N=bUnhqgKamf1M@sKV(^9B=a4c$-mlVTDZgKSN96oDmIM2P6)nYArB=xH>}^^rkEW3s4#KF2_s zOWq}}0?(tnEfF}w;KK3V&{~7_ljH>H&#kGBy?hdYBe8D>1$X8=%fM2X8B%KB#d+mG z0`3tj?5ig{i;fVXcW$#a!R$;-t1wWlR_t!Vg|ZVvH6sB86V zjXtt92^gn%B75l;dRa`YUUtNdMHq%DtEmc35HLugYBXFef&Y||YD_rA<@g!@h;8DD zwr?E9xE4I1wOYT8dO{q8dmeax3#XRMYUzRbG<>5vwh{-S#Cd$>lHd9MJxpqu=<-+5 zfzncxu!B_)L_+R5(Y3q!_|9on4x%8OvfL*!(E!h@a(V>X^S+V!*57$ig!Nd}2E$N! zyZ%y{ZDA+XoCSCk}buFJd1MY6#LFstSivT4~Cc*5KUZg(J zf`_!ATh?)BG6OFXBCli#C@CG{X~8(La?e)eAjN9_x8FqJ-~isWIrLX-yoC0@izL)H zAHJ*4MBYru!s?_MO4#MzJQ{j^x>_#bgp=bHIL#2&HF~yF=Q#gRk}c75X+U?#7R3@s zbfcZ=>4MY11(P(w*uD-C5UX3Ad^cP+^{%kJp8L$eH*FCit)yR}mhj!L{#dJ@8nax6 z2$+<*15ECOmswqCdIRSuI%Wf;4`euL?t%o>?L&MOi|Osbk~VlidOC#d9CSw@$@=j7 zJkz`TWv{lbWqRK%I!QV#oa6K(g&fcPwk22f0amz4yKXkEOZIr#VK%!!O!$x%e#FOR z2GH_b8>%3MJyt=Nm%Q}-O0R;{Cijb+B;Kk9M}Kli)r_jGNFmb>ev@Kh7o;>mlj$?6 zLx=n+o8uBN2{@%j8aD-%?5eG7h?^%+VOyy{1Wvw@yRReM*Gj!ZOW{(jinGQ0XXyJ| zW5OOL#Xmj8uTTlI7pQn2?VN#RX-eo9i)PLeE>Yu z0%*CYu0{+PsSAa_W71-449F9AV)7?7&4_uDd>xDwt^o*Z&||=ht-*-)CWaDyU5~}q zgakcD-(GTa0z%!m6Hb0VNhkqUUsZ}U`}^9gIyr$A;kGT1o4TPaZB9H_ehfxtkC^N? zFNE%*JUGT!_1jPU+n?RPe_k;MUoe|Z!t`_AV{~MoAjZDG>7TYpyM4JVJv5likx!+N zo;HQ{|=v*UAhsMGJ7cK{xx+zRR43Oz6o=t5I`dIZN_-q zHT!LSe>WNY+Xcowrj6*~3xigDP>N!)KKU@G)TKoMdUVY>*(0gp6!UxRiHQ?cy8|9-V!tR38cB2PXUy7I zWBsEx$FB$vB{O4Pj+Dr?kE#W?yUc_ai70%xBQ^wqhvkmes!k%J?vf4(+)Bdc+i5C*PGi0Y}G4jo@tC8B?vL+GFe2h9_%)OnMoyd%``?Pas9XSUVz& z+0jng_Zxfdd1#OJ$OvC-KqjrhySzHNiUSC;S;qSyy9Wff!PaascJXfrN1||Y4pset zApebBmXHL=`=LKV6Jq|}-m54Z{2^w)FEU*Hu~vl;**b(FMC!go>f(>u+YjH-5Wjgs z+I>#r&7Pe8UjEN#(-2weVR`cLkJQ~ne%EuM5H6;Fu_nyKzm}Xzgt!{0@;|cBR1wUt zac|Q*$*cscM_!P#Y?{$p(ffQ zL_zr#pWT@YZps&gW9^e#O}5UKv2Mjt@5(kag~jtyd|ySeRt~G6J1u|ucvKqC-ZMl! zj25Q^sF#zgU9Z)zO7W$eTuutF_%wm9NSkOMz>DELo4O{(hr&9GDkZFaPcy-sq6l)tOL1`5+!m3Zs3_ z=bxij6|Tc`eY}|sxrQWv?4IN5cPy8Df1U5H$kCgQp=e7w%U@t^)_kj%63>Dn+9C~k z6__T$F?k~|$YyN^8iNj>b)W8bqJUV}-JXf|QIcczHAtSx3!0Z0gJzvc)(%b*cUUKs zVtYsHR6oRXR1#p6Z{_+?!ohbF!!*{T+3LgW0?xP8w8q+Fq|R1T zT1;j6?S&79MYtWb+Pdq;U{x*ZMnb4DW^T&z;t24AxWFJ7VSGUT_7-; z{*SI)xZYNEG}~szn>y-#_Z@-eKO7^YPk+A@Z+thK;^v>?M{yXLh&=u8ot|ceZ*K?* z`@dW!xc@}EyHmJp<(SMaq`Cf-o-~_zQCDH7L!T=Cstv?lSPn6)Za_AHnE(KZ5eF}j z`nGo^W1sHDre|JnK%py-A7rzhZ+6c;bVFF1WXB!N`hrT9Q zgr)+?;H^Bi3M+->KR7E}Ba-8Up5ek#%csvo2y(8USBH;5e|3ajAK}9POoZY-a7pM_r0JR|q$XZ}opB8LS5P0PzWRAr%x+Q7t~4e=|KUk}Z?J z&%{W6_P-bA|GL#-h`d>oc`*Ls(t05VE?vzw69SM@1b~uLOK5=`_{)ml=y^gm1cVx% zqYF7eUsM9)WI96K!epYjjU+!PB>o1SKFAz141W#r`?23W$3QgQGRI$M_E?A})333_ z{DI79tOGL`=rkf=n}}dgegf;R2-%{JG&L?F1<*x&jUY*LkMid?BahLUj)ubI+So)E zXgRfBnZ!e|I#HQ+a_Zm}ET2gP6s)}QX-XTSkZ|gl9_jR|ErX+~w$#1J@irR5-+cFM zNE!!MS)_YeyA1O)28cSFgWMVn?i%t6(lQ34&eHRrh>*uP353FS{b;onoI-bKWT1Cg<8U_SJS-TJzGW_{9pD7a|ih7EP z11`+QfMeBe2xL&wlq!rC81oRWO(&^6B`Q`4n#)UNiLG{LFDP0VR<%!%!MB9#BhSdA z0Im%S1>a*!9$;_N+5;<>f3A6T#oC_dGIexd+dx9|vD!Om-0Q_p7&hbuxr}Nx%p{?? zREk;euS^o-n8>#GYJOEWR`-XRcl9ZtVbFYOYFwPejJfLD$n$#!5pLxf(FMX zM2rwMBjUo9=WkrZ;0=Dx`Ui0lSb~yB6_{du+bH$s2ij9PZ_va_`0Ual>lf-}Va`|d z$-EI$<_0A6Ncom*eKY9TZCG$-oZc9WVjL7KbW5Pslz5Z_8xc9qI}yVqun56x9TS#0ZFuI}f#;}8(5PRN)z zqbEwr=(zO~r7!+c5zqW9l4GDUNyU^3ehIIn7p#YWAyKIuG97|dqKzN=IQ*P~Xj)5z z3!~#I7gcD?T4pXYKJ(y6e7*ILMfMoy%-dGQ<7;dQykh!5SbGSce<~kAX+OoXpH_da zuq9adN|~kPgcT!Gv51i3rBa=1ANkN|H|| zu+9ccW1_pBEC-r27*+vXQQV=6oj_8~TB%`d*3%@5#ebNTxB2$%4*q@u&&MJI-Jf|+ zR}vC#MLrdN#dVS=b%lUar~za0jHBma*xQED`YzYD$+m5=iE7kTxQSS*vg3(Ef9FEJ z`x$=Xh96GwFQi{X$feJR_CI#ry6{%&shS{qpAA#w3Dv9X{5T1||3C`SdwMH=%GY??O^;;mRD-5B_{_n?MLA5GPzyZ zVhY~=9aVNi_SF3vl`!dWRyx*iGEYgrBB?P#uQ>5XDCxpbSlMtct&D;M)Jt9|jfH&l z3<{Y-w)D>+`zSN#F6}(cY4}w%N^LH}JY-)M-952lYEPA8MRUPJmn4%ueMsz(Zxt4| znyBDF>B)BcF4|S#<4iW2430}_C!|oT(pU-*VJU9~=jdR`(fPv1pn;(Tk_PM1_Lq6QaSO5~D5-&jJ#%WM#|6v2J*F(m zoOqqve6_peyJ*D(jyXm9s$CE*924L!tUeO6v!F50vAHwl>7LGcvLUP5pppP+ZdBhp ztQkgToqomc0Ew&YdfP;o(@XM>zG|2cu2paEt+YiORlYUB``sq%v`~1gOyDL%_)Mv{ z*>P)e*9ovXXRHzBha1gxO|Wj9`c)~oZS@%}v@Vdt~x_W#m? z|L1o6zwv)p3d$|{Z=Wh`(f0sk#diP@h7tvU5ITh*FPO3A(NPcOj!-tT%}^=26|;_D z_5~xl)AzEL1X_w%A@V@D0x%$|$yROra;RZUlRGu9B+W?nbcLNA@AOB2lE}T9IDP?? zbgUeg(>A46d^IZ5m%WyXd8XWnlQ2+UQ!mHn%y2o%KcDeO;sD44_<4q!!!YyZ^=C~B zD2m*wU15Xx4?`0pE=@Ib$E(jg%J!G5dLbhPR=3LnKSrM%6jVZ66NgS=>b(>j z(`j%+W#1A5qkBeSHb;i`N$#>cv(nT?E(gobNAr4&oMX*iRUl-WKJ7@Jpw7;j=&|}4 zN$^|q3TyF|nv5r|71H(2m)*O;_+9q)Vcbrj*codjtCELGzs3`_8tsR`Y01|7Jz{dr z!|6)W&(vb09!pVrw)}7s9}|YQOSZZX`{K;aKWOC1l1I-a7+yrh=n!QHYJ3{hyAimi zH=R@1qMwbv*>UvK61xsreDnLDb8No;FM6~3`kNOYp_dk8+rL@L|Hlj1696{9U49B6 zn#Kb8T{HhlMhD9o0?6M-_tFy4k1Yfjfi*t0-fPQeuyJJMT$lJfxs%=YTKyYCMlII`@AoMiV%txvNABZp=fct&jA_55-2 zQ){9_;GHatXHHo?iIDuU`uB?DfuEyX>*)!n^p+h{=9GJPJCO`zEp;!(6@I@cNYo%hOJ)-%k)weNiKOu5qNCQ|C{UX8xZhs>c%BFPE~ zxuosID)E%aqJ!KUE1_t%9Mvhx-gvP>JvpCRMHgI3&?ePTT_4M;c>gKKayV09Njc1_ z%(*KjU?776olgOQ@X6ZHyBp)*B(tlnsMl^9+iFhRg#Cz!p%8BrF_u%5WG%^riuvP? z<5x#-{KD@V#F{jEV3hMQgiy~LFb!T1#!V!eLYDeEx4T9vc;8F-8J82gJ6z|g6rSck z+WW~f2-IkkE%ig|@HwJ1Z{8Hw;*<%-vhryzIveD4hutOf2*0&-!!AmL><7fk3@5OW z$4Bx1Eumc{LKcY$oqyqdKm^-AD3&@DP&R5ZVP-ot8rB_!XDN@CuIW|>k#}Y#vDZns zg+#}o;@qj{EK2~Znl$J$0;?lHxS~c)LMpovW7e}`r2F|wD^1ib91^MfqeQ&~JUXKa zU)LY(snZ=c2>C`b?k^CKFY)Zzd7l;A@@o=RPm`D)q-6|xjHqZJEUOT@#E^f0ueS4+ z;1vt?9S7F(2rFnjy-Ws_WSQm$PLsHX(Ol#6|HNmT7RyxN4mrKBt3Lsh8Al2$FPi|< zN64LkKoA)rgg}ozXvK%Xa|jA1o`os7;zWO>RZ#u;`85SxJ6c^bd~#{MuV{tgO!F1p zG!mWej|z*BCq_1lCLOw~0YTY{)=1Op{^4l9Ix(3lew-wtTn5Q%tyn7THsNhz&S2#{ zT55=2wg*02p(9#eb>&GRl=&4{SCk@6&oHz>nwq{rInqX02&_dwDd)6AmN}AtGG?Hv zqoh{mP6fj?B28wiudAvTd-gG*9o;P7i>qEk&VxvuH9E$<4LD=O_0Z7Y1a|(jatylC z^+;$8zJ-RNb&;BUxsqK@MsYX#GVTb5;1n}B(+TOL4~}$-9IbwUON_XrG3Zd@E7hb# z4Z*{ohSE_AqA%sU)3(ESTF{F0oxW1YV*0cAVSvQ11ROCeij{i`ZLNznhaq?p zN`PLFgeE@SoS_q;L%6a7i5|-nPI6qdPZZk+InZkY6)VHw6dt5Y$mWFYQC!K zN|mt-_FP5*_SVgKE6x~x1n0EU;%4R9xG+{J)iRPu@|slv?$ z4UfJoVfFNO*@UdXVluBK-1`j=fs7%V7RXt#7b(TM*rKv|(?L*N)^zL)wx$od6Hspje{pumO|JW~FhNH$ zd)&cFF0!>sqYu<@Ujd-r6-&H1Osi9PS{ttg*NRLW!k7-ZGY1<*GC&_$~4t zZ4J6e6jYjkq#{Ev`gs&KBpHf6!c7Q4x8W|OLILwg@ow0C9sp`h^U$vJj4|b?N&G;= zPyyB?nwO;)%RDF&hwPp)>YbkxN7wyU<(|_ZZUf`jktSn2>gkaw+1EV_^TWh)kKjF1 z+$om8I}=XCSLj-ROI728aPsXs?->lDA_3}$hWRIet)WmjMY25<8%(vO!o*}8a|^10 z=Q|umD|>vhQ#DmVXWES#QcF9JTprgEucwh%w$YJ&#IN;mp*zqbsmKjr+OmwNj|S8x zS?h>YX5U)r2PfmQy2isrMAb3MfY7Kh^{NqlxuDVP$M0E#mD`40m9BR29Gz^8dW{#R z+lD>GM~X+E13)(@0UTS|XRv@m16k?5TdU##U=fy+#<-$7;lsD`-&U-{FNZC6c{;oz#Nzp3H zpRbRYWwMN%mpy_Ezm4Mi`*7$Y6zR^ zZvVIS6+h1?wQ?{CNA5<~BL>Ga#y8W{hHP|5IG{N4u3@T%9<(NoNhzKfF9bcs7$%Rs5Fau371D47q^jdkYBBwxxeia^D4`;u{^EjA1 zRorH_#p)>gX=QM>2zl%QklX_GS(NUElY3UbOh5v(5avbTWM|XIR6k_eXG0X{2!!OR z&v?EV1=!Lit|%9Vv_xXeEGX{RF%xPvpUm2@YL0~5o}oT_$Zrksg-g~2wjb?s?Uxt{ zy%Wu$naMpQC|YY3VpLlnG>hb@o24ac6)v#-8W$lJrGJ}n+q}x@HD?}@AjVk? z8@;*nlo5v|!c*>|`A=de+(gkog4&@HFXYTgjia7>w|`byRCn2mKfsnn3CCH-9LvLG zXZV>kp%c6k^DFxBtz>^NSZc{t(|f)RPZUkBIp8kwDQ9d52;~}~oUe6V{4P5tOE#Bp zsS##nAKLynM`h1UKxcnyT>2HGU3_flV0MUB0@5-|WpR4!Qb2879hD;1-%agacac$v zt=2Z|r`x0(I(PGiXYeN1?+#U6Siw4R1~t{K@~hqa1i#1a@58E!u~|so>ll`USYO6O z!u?<4#C?Vm^Ym7ZXwwEYOU{Nd9B{q16dWI!t83x47`zG2mQy}qAs7a6`N=0EYloF8 zAN$6+oA~3bVA^e6EX3ZSByAF;bh32>aM`hrlACpKEJ+h!`)|E>|M5zmjpO^)Ly9%< zI)G#!N#x~tLo!IowJ^xRK|9*WF&V^B6v|0%AV%=ZWCCs`O%Myp)x>RJyzdS!GAdN^ z7#CqmGyTlaqc;|elB~#GD|f4{ZLQUt&e0oZs!ZwcP}ftT8Sl@>{I1B7p6@N>NJ<2?^-Izdc`4e@sgQ-pRC*^J1KQ5mks>Sa zfr%RPAK2dugPAf;g9Fng5rf(!C56D%aY0C-Oki(dhu&TU-pOXjV6FwX!h%@q1ndSN{tycZ0krBey;iAvl$oTea}0#l~~{O zZ@c%0OcLQdCH9ylnDGR+I-R`aAEh$vh_TR^_Xv(Wj~1IGAQW|k<2ycSL08)%K|ir^ z59`r#uFiO(5|)Amb3P|sq`U-yx#w=no{+1Lxpxs|;Gw9Jj{pghk9@e^(_zu4cK z6cRi*P`P{vhGHL}6T2lk=KzRyHA zqPz^5+nYFbLQ^FIMcVz(+tg+$tgt}WZxbfMZX_38wRW>8P4_`Itt8h`H}89nre0B` z!oew?Hy6CTfM2i@>RQ+4br8xlH6=-vtw?BLzSuQC+?W8X->RNS+cBq~eUn;=)uFpjdQ{AcpRZL%S zyQ)eCMM)Tw1RE?VDj(9OjeY}-F)VaZCWU(mqGaDT<*+9@z%+}AuauJR(4Dn16Dt%^ z$Ca(>rahPnktEjRIq0XaE3?H@&nUm@l~lfiIhVEEx<*K=)bgYE%^`44N!D=DC^$@k zJjj!ikV?>c(yzqpW#jfU0gpk-yx`QW z+ZK*VzX6W|BetZ3laMXP8ZPoTa-W`x&D_rj_8DzkUR{XT?jsZlM)Jj4LHF3g;KJU7`MOFdS(z9QY!oiL;vF?7UR=iiMc;O&=~ z&D+l(!XY4&`_#F3RC@a59wRUR`$c76KcXYZL#%$}(@C2?(v60H`P-`g`{zs&|MnUV z-rn50WO({wWGd?Aq&^@|I9g(~gU*vGHI2^lGwL4S2-<>bGMr08u~{%Svm8>z55rku zHPo`<&7N_1lahvtFN+=pl($0|Y4%yg{xl3lW5uJNRkLCrq0hjoyH)I8=05Afh_hw zCc0AP{vfPCyWzGH0bn*=%qTxxdZdj3Dd|c;EHF=fND{1# zbrh#R(YIq4=L5n3+vFz>odipXON`AFV2M%LHirY+d_Ypq>uNCF%15I9DmR^TYNjD^ zo!WIu(XG`)jxxH3SxJ*zTN~X@#eLEh(gHw6ZeBA?-ax%Lhr?ZwX!ffqs2Wpp1jO(V zP&diqkpg8N@dg)H3fHrOYGulaKp9Q++W5Hg9hXYE)mx5?HXqB-(*aqvhBzM9e3+Al zh2>gqQqf>FasWGE{6ONN->+8wMr*6Ip4{EdyLuw2>kiyet0eyEbHD3}GD}nHa(SGX z2A}(YsI|ll-hq$U$*XAA^%&Ntn3(UTb(44OF;Prs`#g=e)nxl_$jm;X4?K9&aTEFM z67Du|VK~8l6@zYMXj+IZrplIM@*9kIZ=Gw(v0u~S(|>L!C3qIayeYM=GQg)|tr1G- zE!xFD>ey9|-2dYa0vb1~;861jGk^UPKRM4V@m=hgZd)CYKl(K7WQrXQhK~jsE1^Lc z?Fl!re6{rX+;?H-tCx}9aeSC(li)pl1cRhGuLl}DCq(fZ?SMGCAN=LnsP_NKeENUM zZW|cLcltM*sh=zv!-F2AIL-L=_TEir^G04T+ZXL>NCns(3CG(pz`@g#X!p+!Lzq}# zC;YRXQB%)wWJP6Di`htNB-IYj)4Q)v} z+)a zUN0~mT^WMi&!d1+x&(I|sOLrbC^bmm?K{x_5z5qHS2BYG$ut_lwuXJ3GSC3;*V?@h zC9$dPj@6P?g@*`^s39D#-rC{!(J~oLp{*@D<)0ierq3kNVuG-N-&3wkR8Ry0;B=H& zwKI_h%AjOi7m8syH$CzSo810XVNX52sJC#<7a_Agk7Rj%1Uh+YtHN*Y%7qE&8>&%k z+pCS|LyLag>F^UVgwEdFhI)(;^&3LX!+UB;1U(fd@Q2rA*~4G3JW42hW2#Moa^mIF z3#J^X8fF-Hy^lNz<-Hs`Q^)p)=$27Y6`q9EJ+K2ntg{J_4tV~AMrbyMa7%;bc6lEKw0A68o@fH z!~Ajlt0F>(nz;gYVS8qGBp*bW#gQniei_*~M~X}l*mL?vH*E*VJ6lMamN6 zKP;0}6?>A7!#RXmUf^V?svra*1=sAO{0d!_>g0}B;_*^Y8tWkTF^|KMljlbw){yka1;3#$famV{bRUx6g%Z(tD!Nk> z(44>AMgxoSel|!uXLXvHuam#NZisW@?$$XrVM}OMK3se^CKpOv%~Ab0LCo*4mXkXY;x!F5;`~ZN#KMVR>|VS_35{C#a9$8 zwRsZN9CBsc^feu@*9ir%Zea8>MwO@H*hefWb;x=!t`i5H;G0F&L#FBb zD=SamcZu!hABaG({}`6@%?sDc&A-j}Qr{9|dFZ1YXYkXN#51YTGGpac7erKM6zDA4 z>z_yv2%Ih?-}Rrow@rk;zs%-?p5w>;!tfIdIz$i``hMy=MN8N_UQt$4e=tY{zu$Ms znfX!1ybV}Z&>#|*)~GLwQx&rcH00*Q38^EH=(LbrHur~POGLkDEaInKjZ#$}s<8xh z#~6#+W9@p^$x+f3JttQewCa^1#$Q!IXZVHL6hNshPjOstd^$_)RllC%93er!js4?A zD|-~0hcZ?15CpBYH`9ScVyNlyaCY|9-K>JD(5H88UBmP#RNKJfVRCDLrX82{+48Jp z=SnR})aW;xmRESdt0ntR6iSg*b$ln)A!G)8^z|ii6zzT@B$e6_QVL(r;rkg2S$71Y zEzN}@kvwVVPZZitRUlt+L0*t*UBQGkNmAAwRHLqLzR&<4rR@Lk&ax8@@wXi}>dm2> zAii9Dwby4rnVu8xCF(PQi8PAB7zfo?;q1lk2ec88sJJmQS>+XXMpUD)uu)Zc+ED$d z+D&Fx7`cMCvXu#w?hQdPPj-|QU{z4$e9fZrhnfvq7g0B^?-JHc6X0Iz^-)y!1E2Hg z-S5QgX<4ws<~Q>aAPVn(oAwjrr~F!2f&qm!N!xdTaCvTHG+$OTVj>6j8Z-$B+tRhk zUyeP}t!dvoF(cf5DFrOfCeV>({6Y{!m=v$C%ae9%r!wr5FC?QepOGXE7R{jkHIaTw z^&Q+)s-S+)|0Y=}hK#XLYu-N!ws0>w^J6WaTY<-M@Yo_p5E-<5L1pYL@|@CDcppt& zbSnerWF=lzW%ZE?nCm$%&V%#nwkUlEa){sb<<9ig_UsEzDrsULF6(3)RvY5*b5Uyp zY85kxrMTRsDetkD_svVf+X;NcDlL5uvC?1Z}ATP(`|!=ZYPKB-3~waJ^9c^8mI-0OBJ}$wER! zp-(0&9|X2yBqd~M&oYzJ^|ny0S2gD+Z>O_AaA2k4%Yo9C*w%I#bmUZHkWg?Ml9L+H z0n}2<@`9w+YKFPvpep(=qENib5yg^aMX8#oDVfkXoKPSNE%8WxMBIvlvUN_h$ckAu z#>U{w_y`H+3U(uh*;B2)ghECKBL>R*MKjmga)kUD)Qh3pU%uWKcJ}5vHBaFyU7bv$ zd!m1sZ4~cSm7C-HhD+3+^@SDlcnubBRGdPlK)Msh=kMB0A}cE7V)=4duKx#XZxz+n z+P00ZQc8hRBuH@y!L4X<4G`SjiaV6zUfd zLpB+1Gr$?t#GJ<)^0gt_USI}p=w?Nv3CVE4YRG|xN##N80EYXT6(86KCXbd zQz|+BK%#l7?r~v`?YNn6t|_iSMJl1<8AFkbfS7V{5}G$)SFm+xy_^VEIWiS49abS)wpA8Rj#ZC z1RhL{ZknXp3p8(h$N1RJBp!Bcf9&|!ux==3xy0Yq*18jHZ}uh|uNNPvy1 z4shrKBh$@-DJ+e)`$XinBtKYl5m%cZzFRzNH2OFuL3?V5?Lo#4MF;>sNS{#te~a_~ zWDfm*|0RWxJ*HXJPiXmJ1pKO8X!56g;TQ2?i>R*>V#^p)7Vq0sQ8Lz;qriJ#XkeO=?ef2rlA#%f$zQu|2!El?7v{$NlB7X%niEhyehiPltQ98w#WRKz~ zt^Lfx&bi_kpdFJQ$w_-Bh9i>>kny1pj{&KQV-TcAnAqNagp~nUhvHO2Fdha*!Sr7> z>5K3ZXDdK#s`@nvvEgx=LAUg)!BUKMuOmyDQF|8r6mrK6-60Ff9`WK)hY}HWXkvt= z$re!zH5>-OjLKewPnyA&7MI9Eu5a9(rkDHA!J8$ZeGg~9%O2tWiX> z1OF|Y!@B&nyJ&hA2n?4sk6|P*@zWj=qDH@kEjQp^V3j;I*Llqm@#yz|9Jpbo8lTE4 zlR}n~XVFPVRw62m|Fia1Cna3ErfeoT-2cO%Jl?9ry3IfJ#WbE!x6dutWDRY;Td=~y z;KZ%1yKDR+;m&y>#BeE%R837A`BM4U|Kaka_+OIESje|SkM8A=@VwO{rk_zWpvEhab_ivS`kqM75zDb84(~GvXbbYNGL<3`3Xi>a55R0yn;a=RO`y%@$`29xTJ zs%{5MV1yNfX$s@u=an%Ut|a%p%>+nrt0Ak2Rt3^{(-%Tbz!kXc#+%5eG_Uc5;>tNrODxC_yL4mTvN7o6g?GbG7H9fR$fx0X+81Yx!T7a)%vlwm>Tu$RJts~7mSrXzy9LC#=HNWxJA(U;3ub9n9P5g$r*h@R`WG< z^BUHN{A%Dvah3aU?{!JJj!o(J-gTJVj@0-5VJ)cE-GjzXswF*l&L~ zhC>L<23xW;BVJS|E;{z&-+q)v=nMF)G-}^26qzv%bEysQMdZ*vshsozP3>XcAz$+UmAL3GB7&@}RKsUHQx^HMUX?s0VLhtniR=3&dl&ijFOjLy1y zbn25*S-L(sKm2&a)B~_h*!QMPTHmpQLy%x)IG<#syNQpQ2HdPDr`<6?zyj(f{DpAP z`aE^BPGtn5+F($hGm{Ozb%y~RWqwSi1ov}#VI&+w{^-aM5TeC{j(1g~=nIb*q8lqU zQQhvb7f1Pm7lJEu}v!N%{7-oHoQ zo1I9Vbm-*w_wsL0ZZ8MbtfZU;U8nqFv#gc5r)}$-%SsPzqd216#Z^ z$#$YEljTc4XFQAVGv)Km3)I`2pyO+kls5-oD@{N$71;!n^Wfxf2OdZ*wlPY6`$UP zJxFt!E;ae7`)A=`nd^oNfk^*`z2M?u0e1h0y{nVltl6lRh^KN=8kJDMMoP*LNwky{ z{hs%mt0cx!_^x_KdO!)`rYWEBPj3uYvzW5hB^8z$eRVtpnn?wujH*d4W(;}-i6nd* z)DCJEm#QC7TnCA3qL8Dpju+Ld^{kVDKhH$j82hY%y+%u;j*uxqNX8f5FHC( zK01sXjScj9GIn?bSCTkG4lXWU9>q)^7masf0 zgVO2G*l20K&_PPvP@;TNuD3nWKrWS8LR~bszsrLn>Ufsn5!$M8|lE1}m8$|e9 zNIopn<>{}{lve|&z`E5x_Ir`;FDLX*$ULe)26@euwo6GhW4@tScJs${=)tFU6|v_E z#uqX{WjY^~)>+a1(EgJeDi_FIFI&T}C#;)*(<=+W-NKOOL@RE74fq;S*%21vgdUJ| zJ!-f`(#w=WQW}8rVj^;FSp8|w<^mBt!?uoLSw{q`IFTf%Y~@#ahwQ_$noV5f^HZBu zCF%BHsYnDLp(a8mnYFzx4QE@eX1-M!t&2RVnSK9q3>HAnR5JJIvidyIUpMe3vXMjx zYvB6|eNTc1>y)g3e4b~=4%apv0w=XZ^T{3sPb{AQ{1>b|f-o&dUa3giWyyz89EAPz zmQ1|-5J=BIE>q31KsLykug|F?T#&o-Nq$^p%Kv%beh?=X8uR8KXiM;(zyjTr%>K~V zIz6hQJsdY?9Oe4?Y#?ZnI;{&m@EuJ%l{5|jg~3aw2n?LS{JZaz}GRZ!zm%p zB?-yrEqINlWTi39g#n44qW+r3c@w?JE<>at{t5Qxkf8-^gp3I&Ea{wQu)*xV`GIDP z2~4%=l>8F--9q|0%`dbB*}qxL1{%Fh8y1;YCzva%!;3ftQ2UFOXLjLCj= z4e_J^|K46CW0jRWhL0BV^EK>DQADyXkh;wsISs*?h)M6V=MdS@&0#nW`BpFc*_Y!X6jvkN)~nooYr#gsxtR4DiaKIXMM0sq`OORFlW0DjluYDT z{q;wWzI@oc)qA#xd~&FxD7iH(B*QRYiYR5PuSf5C?HVF4YW5vN9L_~AA$w((TJqx(N;5~!Of3>wcu@~zDuNckRpkso`L?XjHYM^jRlG+*@xo)^SgqC**XM4_ zflEcppKDn3a+?3b;(Iwsv%~e?V8nQy^3Y<#N%?(XiE_c`7f}@IX-G{S7z~J!IsED* ztg!lGC7XFdO}iog=HaRm~+rw_SyQleS{~Q?NTWC zy`KMRSM;HB7VZN%HotjI&dOD>s_+cvs*>*{3i@s+@OdB(CrC)A(5T8^2F6fpcy%%D zFql@KTYz=!oA7*-`^<|7-Rcs`25R&7fvBL@!pEFh6lA>tR#x-nhxC)vLLL6@6L-%9 zl9PkE>#4IN6|?WFQdp)hLJAKH5%T#T09!>RBdhKp#H`FldD?0MQ`}ekud)9BKw1BD z7|cTn6;gfHFZ+o4Y~JZJ(HZZ*w!)?d^8rd4#(#*lkVRFolhxtGd+HaFqe-GElBLM{ z1ch48HSHnM^oHGRLw^;S$TwJ_n<&(1MZZ5P6?kV^56OXOeSbNL%hbBE7=YQ6#o0vG~ZmRFAw@nC;{Ce*7#Q9}WQA(}2cGuQ^(f;ktfQPuFm z9;3xOgDDm@{SfCmpLdcJN8K$m4eRj@k1Gm4fE;(o_J^EiQ5+j~Fbl9MXNasHpv+erGdgiwln6nkICx1K( z1HXIclEOn9=Lx$_*|e=Y>F<_4dO)AP8)3|n%sOK2?K9{k^SD&`B{yny%G9}}j~}cm z-C$4sk^Xae(lJ*ca{DBQthCwRU3DDbHiOSlxsLnu^GMpG142BA#;<7}qRMonV#Wx+ z31>I|quLp1C-Wnp00Y&D)=9)gX?%iq?}6fEW1O^J*$?B@JL>8dY;Vt$)V{{FqsCFw zb4L-qqSZ%gcjt?P=KM_s*+hj*+As$82{^^0L#E&9Yp~CS&f%&hK8{<|R_#J*@&rI~ zE8%WeKY3>F93x=m@1T<`q78hjMX*gg)19A%aET&aSmBmf$q{}%_hm#*;cpB!!loJX zxk2UsrmpOMarQMP{c%CUMCL zCMkw+X;}8qK4`8e& zP(Fpsu5uVdw!q|_BzzZ;5{4n|>B#koDk;K4TP+wLz$dTrTmLr|Nl^dRX8AM`-W{9B zOj&QvVk&>EX-ME+bQ0fFfs&qyHSXTmR+4>Uqm(_nM!FzuZ`Bi=kw9jp_lhC6U`*vr zy&xMY@1YJbgL!Uk>NbAvWd$!tk>PpIz}dW3ypMhB2g0pHmhM!5Hu<@Rg#-W~o5OLB zp6t)5fTC{xsKhy=q~xzqohf(gb(Juo;-Ed+G5aQKmMi8WrO{2VQJHGm~N4VsvB-MPPh4Yt>W6uklzLM_paRmi%FV6?@d=9NTGQ6p|_)tXX2r&1rP^dErrN) z&A5lObH8YNrYe{Gk!iVx!CYAqdsUY-^kdOm7do?0aJ2>2Bx}8Dlu?ZI$3H zbaOT+^pq$n_Z!pss?}mUmh*EsWcubb!OrZ4Efc%lsn^Vrakh?ai)GxygW+sXW8z$4 z9k{F%@#jA?!U_u#0dBkAj=AH%BI+?-$&jtoaL}W7+YX_Ja(G4OSgz)#)3OI5znTKt z{N)29S40swb!)4CWR5HH_|Aij((@VTm%Q?~(PD1#Oxo-Tl3q;TRw(iAZ)(~FXv3Sy zVPm;$pc$1jLO0Q4A?^FxFIZFT*^1&(t?`3Us&A-$TueVm82X*sXTzY<22X)7l3dPb zfX}7OC=jOsH?Taa`hf-b;cgk{LfQMu9@~}@DC^w|GVAW5M+;UAB*wrGkU7k+>7qJv z#v8Gch$NnY`}@pMN+LMgxcBUn9-uT85#PSXYi6gUJ1SKb5seyZ86dejM{f#tc0tY# zS*cF}7gb|u4S}R=RI9X;WIye-I`l}@>;+Xvc(4QIj^5i^B)j;2fV*f?Wp+23hG44Q ztFf2gx5R4fcmKo3PT+E!37lvv%|*D?HY)dc9FZakI*H$a~k}fV?E>Y~P6+8>K(3x%f3Z0!vPQ zxQ7Rip@<&)1?J}wsWg`U9PaU-p|L%Jy#f?l`XhjbzLSl|v+ucxx4nXnR0;2Cms&VN zk=(MaxB~o(uL)xETtaPIskZ4f!5fsll>l=lEs4}p$h{%tEBk=`rVSz!5F=3{8Ogn9 zvfj8hRee9A9*n}6k#f;#CMFgG{4A^aoxYa4mila8HN*8`Ac@y&v_c*P8w5DHgOT;w6o)DuU8z+=ok7xz!PY3>sn3YFv-tBmb1EI5)5 zM85*Vdz|g=9--9oM{HWioj|Wn>2WD<3c0s0N=! zI2k(%OAd@cIsr3F1Esnm@-tNu{ufEiQsEJe_8HI`D`?|hg?N&XXn7jvWAx~e+>eUV{(dtvb=hk=N$>e;cr2I1d!0hsC)W&e2l(?3Mb0e2>N!-D zp&ybwMb2R3olEE!Mr75WU8UHCF`}{LbtSgp4Uctr8 zgU2+q;<}|I&!U~yNNf-=E zT$MvP;+jBnnNnOGV@#6!PvMS5X8(A#@Rp>d3N+Q@vz^g#oMS|B70NR~WFKut>eO+l zk9wRwckgG3$vju7DQIS9L<@1lGRutAv{Vj+bRq`-=3{W0Fs^lLw|)5|?Hz06L6Hiq zi=W8gxqk8>B_>=Tv_x&~v$WHmkZkWZU64*s0!{(eOX~GhRhC@p0l1S=@>!`IT-@A5 zJ|&WbUtRKey$+k(&%T?EWKWC|brP^TjR&Fiu^B~mqEpRdZm>maA^p%y&krvJhT1f( z=TzeSv)7?86~K$NoOPYm!Frr?oI4B|NSr9;)m}{Nm3D$W71|s!rEKVM7@gtaB(5hz zO0ph!J9hL5`Mq?_wTCqp3KWyGjQ&XXX{!w>2Ae+x6?>OqeD1UO$i{XVH`44qDm)(A zwQg%YvUt`C-}#R@X^HOdD0uamX#VxT_*Mis=Kd0(r@g<<^G$yX$YLgla@iTro^~Zm zkKA|h^CK5ey7LKj3v1&V=Ec>q1W9;kMxL4r_;7CE1EUx0QnoMNI32{N9|t{n@Vsg-&?v^?N0-4 zl75!dsS%Whr`L_Sdyp@5MiL&fMEW#>1f!wUaZF=($keylRP*5#*dc?8 z2c~;yX|h%A34&grcwVC@qIlUH*xTNweY&n&E%I6{C|1fXk^?j0f=QR%Qh5-Q2v+Ud zrCA%5P^3P2OJ%Uw0LQVGWv7-yvOdUzfo6&KF4n9R_D<$EypfC!;5wh1Qj0R2Md#iFFuaYxLJ~lcVkgpAJ`_SarM__xATAITwn;uqYC~ zTcsYav}X!G?vi&W5$z25_$(z}Nz)9tebrp}9&27-c<-=0H@Q61au{csFKJF?^4)_x zI@UyW;`X5+z3>}{tQz;;@z$H}JU){~=llCh|L4gUsIdPjWv~6q-pjqx-bS2=qI+`~ zh5xyX4W0g?-L0D&qYe{m$kt>STq&8ui`ihnjf=-izHct=(>;>)_$_O$DkXjtUD!VV z1=iY1AMhCN@FI5Kp8ITgzajFx(Od^mczRfU3Btxjto71+$9-fR_)gNsh1EB=e;gjP zI{xyDIIQkspeLSQZ7N!5{}~M+Tg42Ua(AV8@z@NhWid8pt>5tfhFM%kX0R;i+IHfU zYDtmC)#|wS?$DRUc_QQHCt6!RGA-^Ft@jn|#o){TA4l#@;klaYla=Ab*1&~P@E1YL zM}0fr$;D5Cv>*B%vRvK{#L$GtTUj|Zih9(cvj_Gv0OIwtjF6Ec&_SsD7$zCD9;8RN zEzzAe0*&l$59RYJq{GMX8}DFY`&PN4LhqYn;J7wbV@o0{j%`U+pK;8NSmPWWTn6JDO+Lii(2Adv8?xO zPOuJ@<8Y(V!^P%zGszanF-^;F8IuH8<_X^CH{P<}VWT80>7JN>YMg5?cmIe}N_xKp zVZq<3n-8mAGmnX+Om5ViNMaeeu0+lH$*hjZcm56TQSFUFS4fWlt*dFme*RJED?eF$ zdbp90FSx)<1)T66TAuW!HhYlzrB`QsyuN9sBv;(=`)TB8WOjD9aEy2Z9 zz6iG|?%l51{!2*32-T+tQRw`^kkZJ*lNd|YN09R+eXiOC+o--MdMNCiR7$Rquzh-< z3!8OUL(&#}`sSHwBmOzb+9I%IE%C%Rfxt`wI=?o#LAQ6mPg~o=<(hd6Z74}=+v;Zd z*<=_?q!~$Kqft9Y$Fx;i!CHAgqJ3+GvOBTxx6nboQb|qSv@XF#-FeUr86`&o7te9tscZq-4dJ3DVLM9s)&FR7!j-mGDtP!=tFM zea27JO9H+epT1++_Sbw+s}0hw+;V0aCft?Nl&7qa z^fRsmEmY3WD_WGBJj|M2S7IO@YDM!MdGyH)Ho31L)vu8)x6Eqe*pq8t-l~rF3YQ(4 zt+7f(wt_7H;V7L$-ZB3-hxY_Oph-ghG6Z7u%;bgU7HckO|LBwTQ7}5?4{>V)3X$^Y zNMboQF$S(xtXdpa6Z5iHDRwu?I5Gsw3zY0N?)X`4r z1M9Ndhi&)q`zp_re9z@isi~$3ibWauv(amcR~v;$`~DW}W}ZgsK;0OT60$25OT%#? z=*ZO{bz5v{93CM~nk=dM7Fp7pwnS*~TG_ny(D_fnC4*Z|L-r+&>3g*ZdDB`IeO0+* zN?{GDzzkkdB2u@hWu~a}%|`}uVkZ6A#!#94)HFyp@h^3OUI`l-AaU8TLBg-?g~4vx zgSSh@XSWqXG}QTn)m|>mUY%QZE@l_HM&gWE?0VTiiOI{HIAfj5(1 zWC{2w7yoCQpam&$piOvjy<>Rs$M5!Wl5EAXdJ9qOq@nCWVf3;mSq9VsL3LZILl!|_ z7-siEekS3Ay^Lo*imY6!te78c_Bd%{vF&wm??|#$|2&p#{9}o9g7zvZVkI%ih4A0J zdZVacpO|Uybvrr|qmq5Wh%TC)D0M2~d55Obha*et23>0Y(y7oJAz&OZx|PQ4HUmeb zoQAMdu3LE}Dr>FfDD>4HP0}j}Ea!Ld59RYX4+;W&sB(?QqNRO`(9BCM-$d0N%zc4L z<08^`AtjD&F@F^81r4H_Bg9CFRuF4nF}<(0LTVU$9rga|y=vM9`c=E`(8DG-O@@ia z30q{K14F9#x9(f*Qyx2V%?ABIVA*2E-C#ku`dyCtu%8$C5%o{vNRDHQR<5FBIOeiZ zC!fRpaM5NgSP;LPyibpcbQme0)Lodt34wIh_SJc(+3g+Yr8zQvyPN$M3!o0N}}lCX`<@!qFq5)rWI7EEnj#t=dK>7i)_b&qp^ZcWR}c zyvS2Go?T*hlKD(C_nEv0^ih&Mt#Wl2Q+RK}iw5Pve+QKZyFq!AGJzb8Mu%leh4mY0 z=YxA{w!?zvzZrPWqt_}+8jw4a&b1d!Um)F*q)jX)Y{=lv#BR-LY+R}eqD5>%hHSNY z^A5P&5L0^?g*o-rhgY%N1IUM*T6_M(Yqm%1rq~k$R-Wo81^F*M_aE7o7zer@?_m?v zxJPOZGgc;MhQnuM9O~H}{Z$|dYfibgI28TlDT5$uw7nId{evr93Lt@L;phUkUb-QGTy+9#AC1#0I`sRopsSd$t?8Er1rIur%{LjhkTW zeT^7ugw+R2%RM@gN0ogXZb8p;(38jug1e!MBQHAg8hXfgX>XHFw9b^29^*SeS?gy| z7v0~Qo|GF@MmM2}0VfG2;dpwUU%C*8{WXhxsx@~lwQ1M2Zy{MUoIs6e_04o^x4|_f zj#I6t^)jxS?JR+;s-j0^42~zQPAB=!sJ(yODhHfKr;sVBneN9mH&`6LKH1>7F19k_ zIjm8bnl6-FgttgS>iI^es`huY&kYOTOLe~$9u_wK5Y)H5=JE!1GHAB>O`l0)IXR0y zwzsXzi@qt`Mnsz-(@K$-!^V{LgkN-7Vr<6Q4^q%2`(ET*F15N|$ydwmW!ACA-lpY3 zw}H3~@3Mc&liKT*1_u7BlB&*=jYZrs$Cnfzmi7pei=PEW0Ok(U==@C$9L|dszqqtY zcm1xzHP_<_3pr9ebC@!dmTQ};Qm6!b`|M;MG0rUWYg`rvF<$g!ckOt6#(uiR z_4~NiT$nQ=kR5hw@ptr#t?AuIot-lO;^71Ojx#8e?ZQWzx7@L%y`|T)pp~8z<8BYT zKNL~@K|Ma7Xiv%hy9%GZ`+NFtfYZSLtbBb0jy+h5j!(nSvy+`xU~;B+6H$U%ny`d) zhzXTYxvvhxLg?|ImLLU=m3a2T6L;&FcnB~OQRxE?7j^@yi}Ea!;jy_5ni{&{fIiA{R3lJwyiz+O=QXO*psR$2R4!uyo;=^ZzE6a(Z*=4} z#lv^1_BdbqlMT&@GDaF

    oGZL>KOSow=Y}y`*K_ZJ`owJ%t$!@OKV|B%h`^<1jlx ztU234f?vrzL*@Ozgek>zwbKIcI8&l2%uBIF(ve zn?lb_mG92@B70V9s+d_TQw~y0Qnb}Gt=ysMC1Ob$)IMq{Fl;NC0{o>KL%osokKDag zieoIo!4P)ZZBKSB3UvMxZ_@CZDb<|+BHkSTLA=f1nz;WPJgNg9Y4uy$u3p4JW&o1h@qty2ZL9G|pAdVnIN_=la*nWZ?a_Et;0uBuG zy(H+9aIvPGktaM5%Sa1Tn6)2L&Ub5AEK&%=W<=)UZY+0c2lkAq0_k-q7GiQf_{68s z_z=SDAc=4c>k=FAM>YO}5EcKW1QMH%4EAzRcNeqf1*&W1Tx26}yHX9JqMaDWcB@)` zD{1rgl|mX_w0Fa0Nm19S%YUFE7s4Am zp`Qut6Q|4kQ3d2l=Mk=^s6T(8?@INyQzrGu4+?p4COl)Vg}Ho^O?!S|O4qVF6}M)R z7@*IJk-L%H&yLV+{KzLC98^;Yp#!Rv3!Uzz9m<8l%3wu;byeG03AOjwT4U{1mQ%)A z9opSt$Sw!?sSK7E&?0)yM6u;?0ABT7t<5)%)?`x(!iZ5t_OLi?Lo)dFJ?&Ar-}=CgBQoDCvMWDFv`2?Eo^JSnqs|Nn4+pa`1S5H_@G?Pz#R`M3jXj{B zi^M;FujS1wrg%CHvyMDn|5gT*C_NW>-g%L?jFM~xc~5R_c}x>H{ogDAM3zF|kMmwu z9}>QK3}Gv28Lnvu>}Q{Ak3?Z#(MD=y$+>c}K*C_s!+d7(w^^#?&5{0a8TBwSjc&Y{ z^YKUQl+y}3*2vC-)=Rcd!=eMf6#izB=2C=Es5-Z(?xNfd0}74wu%bW{yJSc$mw?hi zhv@qWp;kdcOl_$sraad0fg5b?27Knl%OPpYNk&#KdFv=|GFZ~;i7S`$)NEjmjNy(w? z(&%M%Vydq%9FbOI>8&3coD)I5287|@}C@U$Pows5e zhuLq}xtz5t!LuozK576CbbkmivKJpRW$&JI2K-v*pLz8tpi zC2nf^Rk;HO+Z$IFdVC4wW+5F^1|y#wZZlt;MkwqD-pgPvOV&As_Oz$&SC#2QwIAC= zKxMTytq?aXJ_EasT`?py(yR-C2{EURP2B4LO5px6?8|(&wqJe5t!6Maaq`o9*?sxp zdZ|kE@+b$`U~c>V68a6qBVVkBWD44!2K9|P5y-+l?x{3=ehRbcS`#eV2FJTj7Su^* zRycF8Q45uTnG@iu)NZ@@r*-5sMM zsbr{#3yqzy2ZU8sU4KxR_Ac&})m3URh|!day@6}SU^iw_E2;$XUP#sSPZMz9x!U?o zS8Lea>OpyN9hrZDluFb0p@#*!RvGvAfA=%)?9z-Bh*d7fNN0Vj<0FVwp~~K>3v^@yl>=%sSp`+JUJulJTo_2#uc`i}iMM^C5QI;J+5oYbrhVxb= zWM{##D~Jo*@YAaiaJBav!H$2SocyIgd5SUu{`Bf3ca{=rT%w+a!$3;}f29{N2t4@= z%##F;Xli@tJc-6>paR~YN!qUaXuerT83M~*TF1+Y+pd+cz%9EmsD#4Uy&(#f+db}8 zwpwI>FY5XcW7N~}-Z8*nc~5K&370!p zjxR;U`a`hE@ zLX;2>sO4%z{-GP1ey@VaUcb2cWYFmjl` z!%tu%7k-pkTor*Js0~DKGQE&RF&>B#?vKrK?@1v7q%O509B?SnJhAFLnk~j@U}CZC zsG6GMycY}h^inObw6qN-LPp(8P~b^=P2~*SW=WUkR=QUpHdi3t6zU&`y9mm9QgA?U zF6<-ab%662*HgRUZsDYNUy>A4rXdMR^aS|g>Pab7zrxGT?Fpl!%#cEiU?Fkp_#Je}rL;o1_V7f@0<g2=Lmt`_IjA zkkp0AitmrJ@dko|)K^+Uw8474_4C$orBbTc5n@W@#aEWORCcAabr1z4d15koc)TCr zQEsJa=AIcpTnb3Ufg|01fF0hR9QiqlxmdShrB9O=kceTtTOtz9w-qkK*R9vDdzNu} z)0gG?9y5Ou6hnubW2x^%-6GpITnibY;rJlkUg^n7e#Cp9IcQqhC6-%$hlX*mTkP?L z?fvDAx*~S7!;~oLVgu^@64n@OiK{N^MbTBONqTv|aoM@*=-TUU_Lxk_=gwhnd5fI2 zs?l;z$hjfAGxHZ#;x}^`#dd3p&A%CKPf~u}u;LoLOkI|X_;jYtZwY)5((hpP+*v1z zj*VhFqkfK>OXeVusGKx`@?(UIMfjfap4eY7>2ww;uHdKvX5q+O$8li19ePo>le+> ze!JxS9QiiUuhqR=7B@I=tweziHUimRQ|(5hYN0QzlT6MLHY(->m2&%zmpCcB=x(4k ztd&4Xz=cH&;?HT(=`%^~A`vZGHH4tetPyaJI>U0P?>GGd^7^rqeQA?-_AOQFG>GfN zUpHYL_rhFQ1Iv>pXI@rUg**CpNq>wPj6Vju$0CY)6Dth*uV*5BI1Y-^0HMDtb$M4#(&Bng%JWQFYP#T6%_lch0esT#aU= zvhU?QX?yy3^;UxL3jR5~x#tStP+ZP^AYf|W;PHEY7%WkcK3TFVD?d#qRs#JSgj&Fr zzx81N%FH#NnXsr1BJ&p1n^=i(rqD zXYEwianH_qZU7A- zQ`5Yh25sY)e&PdyV@hhMlCA0(;9ix!3inuOq6r_wzu&si$SJCP8x5UKLVEr7gnCU7 z43vwJsP76brPg^f8GI4V4#JIjiz*62x~kC!sz5{G56BFLj8)f7DDX(Oa6~ zjAl)gf>pQKNa(g9_tr#sItVCk-#SOmz@i%JGEG~kKpf_lZA)s+DytFsg~Fyt-{vp* z&)5VP;vtzu8f0jdT4Eb$q^ya!!4}^UDYc$_8e`TY>}!NDXosG?II>~O7e+X$sURi5bIfwj^sCO|zj#gS)Ux`Mw`Sci*(vj;EIxthg>V}4^>H_+;TT+3yJjZA9#=AacEFVPEn)DG= zN=uhcFa3GwAt+v%p8JR*=2SRd@`s`9AsLc>JnJ+y*6?-wjCeK84nT!bK7A{+2E70S zO_FLP-m8aNB@7uWCtfDs-)ysfE?;q`>bc%M_rt(-MlUAX#{?758-vk^JaN@XsF!TV zS|m9B=whCs7RPwiSQHL6WdAiQhK?}DY|)jHpb_5XM#b3ni%jm5^aY5 zX(8JfSVO|fdlp??TSq^6tDY>=vF1&5<4PI}g|5YsZwBs<{C}ttys-o4X%;ZFQ{_uF z4xgTB6}Sk1ijriROHHaR3`DsZ))T5fLS+ewi_g0J3eK!lg(Qic)NI>C3d5G9Wqify-;Yk~Z5iWVBb%5HR_fMb7b08wj0 zh!09ThUjBTlw`Dpg7aV7>>|1rz%i(x5_l8`4Q|bVZB+PlCQG1yxbPE>hiCPrA%t|q z6M|KH%^e}X&jg00(K6$NUKe?ToY-k^I2N}gX0sQsJ}J)u?jc`I#P3kMB5p!ytp^9< zkunlGnOC4x6DjQe9k$O)?Fyj9uiq6;f1mX`Oz)2&`+<;|ai1Uri&F@<&;Q!paI1^N zLDsQ))Vf;gf3P}h`cIal*Y1*_M*|s>q}_7evXl263FbL9NHWXuXWWS_lE13WoN~8# z2>}F6hdAyAu^}-CG&wFSVjV9l)!mV{SS1PHKS_rJpIlB>+PS2z2`MG$?~63ZKDR4h*FOxBEqc$E;UH8aPweh6+p6?~W+sV^-9NBFpN zt~kfdBuX+-Q;rmK4TE+xF5iNo7$RLrCFGk@NH+kH3{edpcF`P%#$cK7PnSn^!fF8-LH`m3;IWesf&Y7sWgBu+%;4=|9Zcnq!uEoxoKFr9~YnM|YC zT9ef}u zh`Y)0F_F11K26i!1iRP7VKlm7)cBLsuf3Wm^YOH<$ohi(w~mER2i#HGT zQ+UW{#rJT$s9(6FLEV>ZhphV$BOwjs4?&z0dQJ4-7H2fk)Dj}pzDH^kHp#GO=6S`)akcTTfy$SYDM;o*9sJ{hP^A* zxeAisKI)d z; zRB?uLP5*1WjTR`u5U-%#V+cDS!ENlW%hx365)PMoSJa1rxn7%1fuvq;)ZJBGX>kCYOzGU|F&@o)Gd!`N@~Zyl>6L_758zrfL z!07Jy&F}I5-}>JAemo9t?FKhq*L9t*GoH`KNy1?SXmPO7<7&7^iuhET$uWlYr)8dN=+^u}6(nS)#}td9aol)j-Lkx;~KW zM;^!2h;P1khDIS{JelDt7|U8cVB*^F zU?6E_-h|GO*JQ$MVEp`sa0f`@kI=bx^UR&A<@dkW%DvU}Q|Al4t>=g8cqmMrP7YMk zqC`r&nqXBrG%r>2z{gQOz*dzH7HJE^JyvOO4^=p9STYTX zBD@(c_WB{731p5?4VNm!<+-Ovd+-8EA#FD`OI|u@wfnQoFCQB|rrTc$9R~F=tNY1v zz@s??6rJ0H*;OdTugZ|*_B4O;)VSHp;D*mUZaaHR!MH?9^r6OKfXYbDpzyjFCe?H3`@MYBlg~U;e>F~jtLwsiS z7g^xdqFEJ!wfJw5V(`@IbWyabU$~<-+YsApm<$H04NT=0hYSCKG2yj&aj4@w#4hK~ zO&+1!7+acxJcsfserRy1dHXXoaF~Tw8N~Sq02{Aw&t`41$9!y*R^_xC%=&ucXT8(A zfCx&QaZL^Qd^?Fg(!CI-H(e=v0KV#d@efkBR~Bl=aJg1);p-16t?<{#}zwt|yby`Ik&RZ=SRI z-5SU)a-c9g;*arNx%}-7g4YEA9EJT`D($G23l|xbYkoV;-sQKhaze!7!%=RHSO{N8 z!o5XB`;I$7igLkB!lviWZHQQnR!~87IPb!T+j20^ZxD!D$~U>y_lGK*#_{xZelq#Y z$HsB8AQPTPT6Y2|3+EvdIhY`5;F`jP7lb4SOHW{xNDFcwaQzU%%uL+!JkL9{>Y+?_oy6QmN8!gc{}vB4GUbAvAWUYoM%w(-E=o2xOrgG zTjIW?(x2;OlkHJEsOe2!R=ye@aVX(Nu!{OxkdQnItiWhnr`lyb&2jGRM*8K}*&<)7uQe!Q#*x%gwUe3{v#b&^R$1xf)+V zFt8Qx^a^T$xyyq!A9?ku5Xu#_>`3k*%uO8$4M{^yThcyfGJ-b=P9_%fHHg*qxSH#%^< z0YJ;6yxB~n@KdG z56rrbZY9w)ebVCq*fPQ0)@S9u3rlDM9euY)SV^C5(_a9dCO}~@(`l#)O;|Nkh*cERwM$SN&^QK4pzj? z)o0&DL(nb?mh>qQ)D^n0B<>q3@pPV1s>Ap7lSXw?V2=$Dh#&Ayp%Y!h6(bhaaz-)P zSVkteZ-jN&RtIymSSf6@6%MPM)W_J zw-t9wNBQb+TF1TspJ)XKj%I8XyvhyFms*m&MUS|z(OdK($8Y|av7{Um$AOOAPTi+W zze%fXD-UENGS5v9UKw+c?o;lO-80oP=1Gs*c>Kx=L`Mu*QT0%NPHV?Eo<_Wa+>1#?LmoX%(Y#REd`(%ZlO$9K2q293 znHPN<&b|0;j?!G_A=Ny0&>y>$-p=Mf4*(bp(nsT66tuwL!+pmePhPn>AC9*^bL5!* zT)X}{qTHT!aI#+wRKHu#$;!R6maaxn@d%7h;6=qhrIv$-!CfQ-}-JW}QMfgt{1*Lwd;je&vS$=T_NK$oS(jXxE({>cbjq=_Yk$XG+`*_vo@6U~j~dq@km zp=~{&`}MFAEfhg2~g6DiY_*BlW(9# zAq$h_7^#)J>FyzwF+A@$JKrC7KQvdkEw`}@L|TzQ!tjU~Z!1`6vJH+;ps`Bxf->;T zX>H!^8zD-z(n+qeUKGqL-nxU&5+5I1_q|&wlW+Wq6UM8=7l&=oT%xgxQi8Z|VmzLWv&n(H$SxjW%)Kilb z!h1YCTweQ}#GBu_Ec?KxNf4+LT1rpT=f*t3jr29dFKe;&@A+DFH`+-jGp3YseoY{I zNYy>TpHfT!GwdsKAnw^O7ETCTMiO3vKHB!3e7KAb&&iS)HbcU6Dx$V$lDl7bTKshT z74#!)e=uWZwm{9SA+46rPgkg1S~%rH)6pS~uaHAcR#nO?VdeyaM{vx8TKH}!#kr6f zjt}m@s35%bp{HC@893_r>!aVEm?KRX??Iha#fZ=%2dx8Q9jLx7qw&ta;<${@rz#a4&y)U9e1~mpE$$wZ5zP`&qjr?T zZzW*zIIp0oc71BHC8+PuG+0LTD&076@OHB#L+hUsGMMxKLyq)+H-@(gX?y~OhBD>1 z2C|eNmCnZ7__g z4+Vt=XaheCXPQpXXouy~vL{)A*U4%=s&H%0V$?v7fUe!q&mo#Z2@`NiB8Sd$)KABq z7^7__MPpF2`yeyO)tMC4=?%1SSkUYgQ9nb+s`z zxsCJUJyPox$H;hz5^=8T=&EEZ1$JsjkH>5A>zV_~2DHjf4TpI&>YZ<8K%>P$TUAN# z6`oP3eqJ#x@RlqZbEws^+%M0F!oh(JHVJVZ zPyCWpaeK@i4^8@g%}&-N>^8(HBn(^O;#-|2S^)LC=RC;b*9Etp^~KM5wsZ+{Il`AL z#+Rfz_*it9jdq!ch~2mgM*cy)I=i3=;RXihQWInU{)%oe={@o3 zBQq6F?-m-eHx;A)CxAE>;1e%|4nhV`AIzU}wszb=iW7*Y{_E727we0If4PTDQfeIG!Ao?C9@w$@0$>1m2QLZ4M8fn0qSvwCe1zIh=1b^k! zGPv6Vk%}7B!H0b7*L5*qoD6?-vND6(=%$1GzVOn&bDl)Z@1kgce_|6Hlj)Jv#?eiu z?LA{lS0c)+-0_a!h#`k7mMZ&`^RMJ&+HvH*={~!OAkUMol?Q2+KYT-Vfp|G~)~Tb6 zRJo26o2sPIxSf!Zc9I`^pulBOsJ1j+0cAf>Qm)`3hhMkq7C63=4%51W(TkugG~9#r zyh~6nF{6*0)gVoj$k-Mbq1dYd${sJMzSF!pIga*)ALg#spO#BO)PGvxZ%e$@X2p1d zLjFwpcAW~&-7v3wSYrrembPS?fPh4K6yZIH2}{~qnx&?j&CT|vO>ot@(RydI{_K^X zvnqy)c&$Lm{oDAr$vfc>aLMdF>Xv|z>g#q9YLR3Rsc9AA)k$R<{Dj@uCvL=#h#>LJ z*pZ>2UUB&pC`e1`T_Qd@a?hknFR+!G(^-v+9MLRk^!+%xtqWL9zx%^xKobtls94C~ z#R5Om`%CWBlT!t?x-%|G%p|MruRH0tZO-g|ra0T8XtonV1J0ZOWwrae|1#yTP|clA z8oB<;3l_gQUC26P*5+B?nUXVO7HD&Lsy^E^Zxc8DX6zz0cOY=y|= z%~FR)^-)L76D~_l3&~FQZ*eA(Jv4?3T(Z7twe$-a4q2^7b3ECd+@P5G`6Xh`Rf|-UcqeoVNk6YeMojDOd?wtn(Q4 znUXeeIA^8Tvd;)je#sfmhlItByoHoVIg02S@%s_i(!W{oOWirZB3))2S6r+sX>)5u zVOH(!VGyN?9@>bE2B(6>esg;5B!GvQG3u$JJgvPT^jiM#@RiSP21U{bks_+pdesHR zVe@l8ZAshJS@ziMm)1%Qgiy}SKN*E5G^A4XQ#P>0BAAyOU33|8Fuu(JP*=XE) zZ?n~oJcN(r!WyIb8JX1buKn%jsotm(qw;x^&7qud?kB}TXKhCRMej4uw4@vaZObt- z37NGts7wr`0gb6NmOj1|YK!rX@@DddbuoqrT!@H|!i)_m5^o4l2k!~@s0yDa*Cx@7 zts7ajaC~5)8|%lcem!u0?axzTb{-vGxvofIGCg{L58VBm7Y+b_S>?$_W#w zn>?UWXh>ASZ)jjaAP9QdH_#^j#D$OM@s5YfTfVO%nSF$r8B-&^R2<-*?Pnuk2_H>D zBus|$RG5ciFTb_63#<}pVi6qJ*#8LQ{77Tl}*@CNhh zMK;PY`By%pLQpP&+t~7>?b^5)Q47fq=XM$dMxvq2gSeqj_wt>X7l@;#Go$~#1VK^| z26@L>I-n6w+h}xtaBCzfvbyur!2xzjSu`cZ-f`s|=3Fdqlhi4?%&`+*EBr-z95#?od?stsC>S1SZ0TG-pD*av?m~ zieAXCoE>|%lVhNU&2GJbQiI{XjDyD%=7x+ktiluEv%Tag_e*t2AT$rkPUA(T$W~x0 z_|7Bj6UUWvA`1UdCzNv5{>YmMMXcDG{f9mF+?`_4t;m+6jj_t9XEg;ZmXb6;)h`oI zlPN()wwWVZ4Bqf6>VAXGDlKKj-z*BhNQ63c9x@>weP8B+j z#KV%ocUl%|m6aHG=3FW~Dzf$oRYe^|KOt0b@}#M{p42IYNW7bCTpRC%@Wgx*x9ueM zW2<*;`EKT?5>sdJ);t^O?PBec@K61hWX??Em+*I}nb}n0u~YUT+8f-hcuM}btX3Xu z#Gi()*Rts3qm2cz3P;n{@i9^W9E>}G2?%fW%zJy_Z?-6%6f(5l2Od}o`T;0PQSDDi zw=DV@yZ&nhfJ!Gk{st&9e`;6^jJpa%Ezgc-MQeJfrEMk7`F?%L9->qtb%O`$CdZ3Q zyOP zc{>Zl7q4HEY9vHW?KqYzJ@(9@@xw*^X@7wiWqf-l71mbYPMuuAT++2-GxWpN5hN)0 z9TOi-YzEb#m2(N*-3bs~M6{9##?$Qtm3)6rwWo>fqbVP~y=ix;1?9tr;%d`jIkN##V@@@aepN#@V^Xhm=rc=_fsUY8XBJ|{ox%bMDnIyx zlRnJ&1-HyzM$I=H?6n?WOB%%aD8)l9Q!7kqDj=NAN>#nX|`GI z`SsK3)15RCq6y1`W0* z*dsY;=S!4pobv11VSq|9w|3#xCcGizLdLPG+Y4DO-J8QtL5j));V){-6*$ujp6zIX zI;3*$;nr$}G%t0KBDgQF(qJbKG0XU*@h>vKAo9fP6k&sDNnqx~hh{rBu65g$kkTId ziA1pW$pbbfNBo=m=dZ*Sz@+6Tw^cWv?h_@uoF+Qim4LM`K43y4!y;W+CKg^BiAJ*( z;4P{lMXh}p5a&dBwmwnJS&@@MP&cIaE6ySakoYc#3@z+Qk5LyJv(p_{mRR)AVp5eK zXJpHXyJ^Ts{Z8`G&ML)1CU)sohqPJ|=oZkc=@)YK zrqv1w1WOEa`I{%cw(kls&|>8>L&p8yForAGcQn*g!|Zz;;y$w+;FYc=EdKXZ&!r?ewDw&A zSMZT#1}kgB+d?Cs!rojf7RXC&`>-u5zZe^n&3- zng1|dClQHx$V}Ju>zsSn1o{-(#y5lNX3qy(B~9&)7>DHQg@hU0%@am=q9dLNB0W~mBx77C!?caSK~Cih78|zEPk!`F_Ts`>tthUcL8{#$+{bx3Y{F<&E$g!_#yj!@2(aC963_P0 z1G!s*P+Mhy2?`r3xgVj^A*wk`x&~WIOye2+U#4&qVj#l!qz)bm?o}4#v$G^dT-Boo z#A42hWoRSDsw5PWEqwY}759K$L~xe!RH|z?n*HGpqQ8PGrN2gg6wR%h8un^%lTdZ? znj9{WvH|>Kb8ZoEgdNaO6DIE(_l)+kyYmcO)M6okdY#;wL_Fq{ek$AZv^Mz`o$`LJ zNIX9Km05*>wHHzmVC5%KR5dRA#qjKipzPKQTkEk~jQF7~ATCt6q6bJ_L~Wx0agn7O zcsjSq$hyZw2DS`EN#}j$2pbc_V1DHlx@Wqmw9g~ZG4bvNpp@9x z>@RpnUu<$mru$K)!vf#tPD8F_&IrJbF)@(C>=|c+Vn)|qr5{K^y8`~cH$S}lB;RRf z&bXH|Y>MsZ!mE=EQkrMOT4zi!8OueO7agBYG6zxUV)|7Ma~O}onP-)Xv39Q(j=$vT z-_D3A=D#v9oe9Y=6rYtEEA$V^+tZM=zrnY_<@^t$%3IV6kgtUplan{{vCn-Nz#d}; zDv3kOB%9Lb4c*MWhUS3Y?%|r?)Gel)(8CCCJ(EYu?P=P@(z8hC+E08PK`pd@CVNyM z2s_iPMtw_V8wOXhnQsMv&0*JK!WmY2k6U{<;|3eZC|7>6*@ef2C}OCL#NqPl$ZkAa z#;fXDjLL9X2ETC4b7*L}zz42StQ>rZJ@U<#8( zIID_oeHU+k!lND@Q)6f}=$)G}oM2Kr8N>EK;QlPpU13f$a*bn`xq2!5L%MyUWuj}XEq~yCt|B1|6e-JArf&B*X) zrkBOK_IW1PlD(?*h1dJNPaF;(%>19h1nR%CUZ^^_F(j6V`QN*~BwJAWe`b2%m9?uW z->J7NXIl$@kI8rkCJOjlukvtGU2bc=5(Ir9fz-WFrcsCAe^LRd2Pm>vCOW0Dmsl_Es(zE+GE>#@EZB`xO>NYzg5P#i;?C*oSFp29gl zt#ilQL_IzHsh}CItL@a}q|(|QALvIcORCv>w=29JrICYcH#<)hE1hXSSMU*Hu*$i5 z!>-)z8?Ok=xM)DT^%b9;gHky;^v|Tndu&_k$6_4-hE@oT%cChYP%fY&Hl7t_ zW3kx6Kc%Y2%X$*h9OgxGCkBavEMU;bUuuvWc{lS^NO6Q$g@X2pCh{(jW~eQhopY-; z6UUBKi38T%5LB>;v)cfUz$A#-WiT`m{4?ncFB4IeOUBCbJWsQ?FTg7_HWx=)Fj4@0 zuSPCQXN4x>aw4yHybB1mc8#@8wh4b4Rt>zTuuILUFt0*mo6Hv0&5;{jFD!xJbdAES zhFUR?{1-KochG&qNKrm9!9l!-#A5Tc3VWdal)8YjN7=C4!$B)Sicl0Lv*wONqlzKk z(#p(rP;mSgZ=|k-oxPzglWv7LO?kfS$Y;HWl+cWCdeSbR?`}Mv( zZ%^bwMv6~;js4PC;jI$Rb^i=H>mMaW6XRpwef{>Ld{5@`6A2jM%jqRETq14mecaV) zrG?{%^63frL)_WA5N=z~#jCR5D zXg(=`@}8Kvc_E=c*e`@9+j6+2PJV~fzsw#XOw@Un;e#g6hX<#<0!eFyH>&(yB!6v` z+xItdd7Mtlda7BsI$RUYdM?}1ohG1lX}0(;8^}o@S^8_UO{#l^Z~d^lFG9cjNZl<_ z6p9RIHz5?(hId{=g1KmKrrX3nd_m&Wvq7vtWK{Ql2%(k-og^ZkzSYUi_u8-ZM2HbQ ziCiAIOL!}}xIA8|X%#3Aa5N5Fc^K%lPxq70yw)Lca`Rqwx0`F^p}Z01a!lktFiwL{49S!I-wCa4BC&7 zCpEGiVH$w0jBu=5i`s(bSGB|rCQjzg4kE|Ne3&3;%m$}7{+n-&+Qjzog`15;G=!rg z9Y|EzqianVg$9bQ0+LT zq+NcvgTlmSO9f$Ag7?MxQf_t_VDj%S3)?-U(iVAG6vw(x8b2O&4!7|A|5*SG&NbkO zVFK84`+=~dnY0>|`kxy(^SKSgZ!-fHT%z7xu7*BfuPgNoxA@z1slyeZ{!B%mFM1LYV?~}8VP%OzlZ9f zg-Z4zZ7i7hCCtB@lOvpxlCFnqSKLiCTTd-DMr1W&ID?OwS)r#n}VrZbC$Z)2- z4Mje;`a~a* z*jikt+rtkYo68659;X}AG~Z8}ZxLE-arZiZAqIv%B|k5yA{Luet7k=Dr@E%hMQ6)0 zd227Ilj(ERcY5P)d63)iBd?0s0SoD+;^fCOk2`>z;&2PX@U3pqd)G!TW=LYfCRn~F zdgIHc30YngQ&hLh;jC|zw>Ku4CXcdIX48TXds_jqGC0~et`=$BSZGwkf;_(d-e$l$S*H2Y<(!)zviANKjTYA2GQRE zcg*wrmAdK+h9j7K3w0mVW3KPC(`cH%4c~6NT)NDP$%APmXb^%=*Nccf`9CP9Zd@Ha_3jj8$Kg7U;F5@r#2VASD_`ui3uFV16;F=x6CdEH`uB6*%{%*B`EjaZHU?-H6Haa_~}m-@1Px# z1id7gX3uvQiHEbu{Ge`GaWPd)ZZbF-xMS7Kkx_=wZjq>GMU+ZZp3Nl#3Sgd_e>%;2 z6eq=E@1{4MSY@L9Tk^6O@yqY3^npY_6!eQep9uFQKgzlW5B z8UIZj6S}y|H^}F|#tvg8{|Vb5&1bsW2c7jTpIV-m?W#HSi$0m%=-YiX>*Ch;Y`bBb zD)FSAqPc`*)M+Vsyz2We^SNfqkWGECUpm6ncvAMfGw|@)MOjy+K-FrIzF0;G3wl1DV5FEQ+3?=UbTSM?rI|ZTwKjlBwFVk)mP161NIIp;!Mjq z?Bkcu@SpF1wR6dwZQ;-Jy|mMfXF&^eH#c8Cj2Q9ICLao`FMV=$2XDVuECL5$FJ1U| zTYM+09GR*?^v00$yAa@*5_71JO!@MwTwZZb@};!(JOa&LXIIe0maU-7ji_xA1Og z%)dGN1e#x>@nkL|Isf=}3Hkjj0i`0_FJhgHLkIxX`8^o)5hzm;>$;FGv&r(6@YY*n zSBzcSKCk}A&h)7_LEvJs5-%WD?|EI$($+H0Ky`$heWi>;!B4mOxn0rs^(ggshX=mc zt{MsR*sgrn)0Uffh~v_e%xxtto0)GZ48S{*|F@dt|K*qUfz`g>lEA^9E7O|T zjqG5quv(VuInq|QiS%JZY|I;*NqX+drC^QkZ&0ZQ@`lbyDtOR6Z(aN}3z%Ho_@ zPu%s`bjj$Iyk(n3@+-v=B0I8wvuUV?>AZhE@nO-nhNTeTTEK@g8m4wbD3!0YYDhyi zdvD(=%|w4&6+uEZt-YVL)^g5)6wNrmr7!bMg_Z5n>$JYVdFY0y{DFVV7}X83V}J`% z4(~s7*P3noIK3}?;CMOLRXfLSsvoUbD*V+I^%8WK=h#}YcOak+(P@{Sio1k#b05#J ztoz&c9>1-dnLphcEl&p07f>{*au&?ON{2!{X*^~er%UetJ`3aw?bFQOLfe%~z& zT0mBEQ?vNKzg?L~rcX|)$H(94uXF0eK!!F`h3xy0?K+ki;cjT(Oi(FxqCq8JIoe@# zxYC^3iMinFAs|$esc%TuFP2_|7iWs30eF4;emt)P3SiOVtmHm z`>l-|O0Uc_Lm-tnFB_B^bUwto^}LrP%;7%Y-n|51r#`!LozDfIDB`OUX@nFv#Kgop z{KxOt8=H~S4@tM)AYIpu=gmi>YvYnX6FTsgGE4TbT^BjA3rG_-DC06P1KWqMnV;X= zyj*W93O-+oS;)hP#}R&VT}t0egn9a2ut{fQ?$-J}FF&sh_CejH)dIaxOZO6Pc9G)c z>?#JWY}!8YUZkBbU0FZhfZf!p^^bGd*FBDc_?%o8?u}Sl38Pg% z6Fj#WzXf;`D*DIdx$9r>0^TjgDoSKx1s%h|&_WHFa|kJNt^VP~qT^}}8P-p^dq^sZ zF2<==tkW(F^GAOF(_6$ZlhpGM_F=pjCfo$dfw9UW=f9197Iw7NBNP82cC3J&Br9Xd zxL8*3+LT1wfe_c2bnxqU_D#(v%}{p73_7W~ajXh&%xAkkZ`LX4v37>A&5EQAl2z2+ zKS+~nWpyHReO+aJQb7N)j~tzuP>5(MiAN#oWh*WCFY@Cwwyl<4(v-DJ?FfGAZPGq= z_VJSOK`=Yb%TG7Pq=3p-9hFG7J)bF+!_9q#6ER6il_D+;g2#&jU0t>h9|+Y3bX#~F ziJW;nXU@2;LHnAp0m}A>&v;V?MRnwTs*df1oVVfvyBrLyH z;z2>arT|t-LY0*@9v|7A`TJv9Zvj38ww#_wYjnTF023AVR!gG`A1pFVrquesNWcH49)1jyr=tA*4?Y7%jW4Evj zk^RGFHEU@3jxEi{tP;Jmyfmm7HT39*0f#!+z+w%JzaEHg1O}x{iNw)PpZp5*h=sEE zK$CLLK(D){%ynEZ@m{t&Q$O#ydTlyZih}Oy>Tvklv`aAc8Zip4Bm*Mp88Y4LU98?>W88EtRs~K$$meOO zd{(8^W;vP(wxT%Yb_h%L2hWnr0zgZ?+)LmES4t`)8FTy)bAIHsoafBFvdpH*dMpkU8E4Xs z`?_qP%k+7si%bji0(ZOQ$&#c^zyF7ZbFA68xMtu)GPzFDmbSYN00ltmmFcc^M%HOP zlGp^Uslr%#(Ow5ABpAUdKdOlsx{}MSFoz7!sn-sypHw#&ybnN$V7+aj)1J&KpDL1J z9j3)Bt!Y~^CZJc-+>Rv7J5v%57Vi0Cj70xXSYblD6tp~gtbQ~oSpwqtKYCP`By#Tc z4c+?P;s5k~$Nv>@!Leo6R@!2oG9rNN^+b*fjqwVncy_s6urwG;(F0l<^TOFHR~6)BcC@JHB24 zfNO-7CU3%3zyNt+pig)$r|9C1a}NbCWjK7vm>~1fv;t2k>H@;xAiuo@&6Bh50v10? zEAZD>J_TO}!E}=-ED;qJQ-?GDmG_Xvx~l~e65`7NaacOnH4x|dk%@if;qxWTJ9TyS zxs2AbyDeNR8cipuwIC$1ykREBTTUJ&R&sxOOmJo4*0JAgt@mb0f{A0(5^tq_jdrQV zUwh3j0rlsX%>F5TLPKRQcAx#r%`KWjinNdFg~rYYKPN#Y19wkHR!>;-%+gL%j{KuzE`qDEbshnHg8^D$YK&OzB7J7 zvQHLwY4mTxM51m**+L$CqiMn*MAfIr)0gwWQfab3K|Q1!UNms)OB6Z6+hT2gav&7j zfo-|YxfLhh+6&oa@Z8~QX+8StXM*GU(ABe>RBOPG=gJAp_{$kUIpR-Qn{$G8k{SpP zkjt+w-|j}a4o`s&ZJu~k)D8KH4miboDO2*?Nwv1NuG&z+V`YvPl2DPL`++CKBKX*_ zdZ3-YEaMW^dxpZ*25}7&4s3aZiP|$I8tF$>cn=Uw@V*V6OWQNtBQ{N*V@;bf86;lf zf2(XR)0bUA+D}rEtw_}0Wc+{T$DE0R8qmU-5{4pLTV_VsZFIq`@G3?kr^pu%R817(* znW}kNuCQ##K3VLjn}WN4&vny5Bz9tH__GCn`bl2D+ziX4j9En^A6=#S#O&Xaf?ed_{Ut zGtZEVY<+Z(_vKmK-(zd$zgBrND?mc|*>_4+{ObpAW?}u(YOtwrmL0r71$m9M!NH-@PNvj33}3y%I$Xf*rp6DhHP*1qC1l)MmJI zmUR&)6V`4xX3k$Qho4;7r7vNpaX&}J*_n%>eqZC^TwDW${L0ptdP_SgDG8W_T@n*L zb>KRzNz|AYzN^isop6?)zvJPdM$mCXU6^2Z6&KaLyIbWY_usr9aCi;7C>Ly8ZuAHC)!;^jIAm()tda5m07GvXaA=-AfN>al5IqvYe?E5sf zgN>%m$fvlf#tvZ1V@CbC^!~cC^6bsl)uTMe!&?V-`PQ|>actYN`2X)={e5-+-DF7u zVv0H{*6(`&dN#IS|DmJ+0zw7@4!J zKuj==JxtI}F^Cpk_9y#o^|Q|E2R>o{A-cS!n9?E9ADmWt0}?%nQNt!e@%^01d}*;X zUZ8ZC#E>TiXU$K({-}02@~4dgf{JrJF2#EpB2`Lbdp~k&JVVu)Mi~5++7LV9Y$n!X zjD`Dl@wl48dfpMr!1{LuMO*_~vYSPPIR#fe5dpn0wceV8{np{l`scc(wYlT|(GqJT zBbMI5r>fpBf*H~^18kGEKtGFa8`{J#=QC<7)`TBGu`S7ouxq`Cga_)()*)44V5@dP-@)o-)K8$aFT%}!iA-nAyKt?|M2hAtL{<{>!)1iLEL zOVa)>?XzX0`Po*_MePOGs11EJg`BJLL9rS z?XJg3G8MH~mY_qm*bSp_?{m(l1qfl(Ga8+Ru$+Jk+mpE=2F)eR<|Qc(P;05lVTMdk zx-Rz;iT8yQP+4~m?&2@3WI-Gfx3^)+j??eHe}F%WG>p^A(Eh=gzWl+ARuyNH$NR+3 zGsC4kj?1g8AZj!h@wyIPw$GIC`swM>d?LX)_%yQ?I89M`3yxg6tJUa!YI@QcCA;Lq zkj|yyKkNk)dXXqxLW^A7XXGk@uXgfE`+G1c_sHi2Ux??835`BSY8kPf7n?A38q3C{ zaqoRMyAbLCVpUkQR$g%3BQ__sBwygH>sB)koSM!m9Jt1(n-@QwHm|G%xdiW@w=BB; zoufPrX%oA8?!Ef@bzYj|B|}c~t$*Q1ku+Isq`3^|tMh`)4TRY2pK$;@+3MK?N`(v5 z6sac7>s@n5?N?VP0MNxBzVGez5NcmOu{hdi{23ic>Tg^pOSwhOf_bl9By%km1b|Ca zW~kw;Q|9vX>%p6V#GAGm>qWGd_y2&U&9V!uW0Q2||NEd3ySK0>hQ($pHDWMtPv(K= z!?=fPsJC7>-rqqMKf1opX_bS?OF`%eT1A{`uSj`~3~#JIn@-BG_OB4si-uxi7CU*F zbEGwLg7-=M!gD!UW^{3~Aj62~>IL>yD)7xdT#a#$Q&kxEu>9CuvX1RuISK`$deXm$ zrr$r>oOT%RWNT=CW=G9`>Z;wq705m2VL``h!V4o)V+4j{D$6NHL1)Tzv zS+S_Sz`b+klw~K<>?&x1tnreil`>L2IUwD7inL4dA6%|ZynjQ!=j+y`#%exgz8(eD zhwg|Nu|_raKF`d(EH7)O6Dj@Jtyhe8YSA*%m>9oysr}NR(vub)HmN7Qp1r;162^Um zSHcMa`Cn`>H-Ff__j&(7>qZ?%10#(PV8BiwgQ_WEZCQcS|BJPETS@w5ef{nPZE)?q zM`R!5d*l=)Oa$-_b*kXrMjGqTs%5j2M?+ z0T{2Z6iqg<*4)wt0g>{6$Xb;L7`DO|Nqr&o1k_@9%q0`fB36u)BssMf6y4x4!wBdDw|_^7U* zRyNL>U^>Az$VcE#se#+w#2qiR>;(t!nQ33n>s9z3u6yH+sb_9~R$@BM7bz&BLwe}2 z56&-X;O7Zkz%7S&vo#}vJL-yzc!~2!}Z;<(G z;|Up)|D|h{H+|_G^hDAB#Xmf;t8_v4Mb~ zND&AC<6>%c^;4CgZH?ywoiL!|DK4;}ui zhI0~d{tAEK-TU~*WxbA<($!VCUWa5BfC#b-1hsXC!b-(@m-YZD$ZaWOfU;<(6WmvZ ze)iy}oboN2>;)sYMY}sW_6;KRjjifKaqGVSi@ z(eT*$_Z`pFPHK#H+5o>Vwh8d5^OBXNJq*8!J#i^CC-n^|afm~w#%YqIl=L%RwqQ;7 z3R_Q)~FN`z0_j<#D^#`t5E`^MHY|I|eD zH1E8g;A`nEl5w3Ws{L-B^kSUCKj%Xg!b0+2ZBevQy3LzRdByrZc@@G6%f{9H1!#3b z8`%IE>ZPKrz~zu}G$S)m#eF0s!Uw^qF~)_h)mr3DetPHY=ZB@GF(e<;{xN{U{#J2( z3Nbb}w7Sps_c?}tzKlSAt)2MT;?7F;4Do3wUV3nb_SZA#@18mPAAEg2^!$uukH}U6 z@E>-_-H5q~X2}%m1h84$PWN+2jdG~E~ z9=>Cj7d12d9aXq9M*hAFqJnVUhyA-^?*1b#b6nA~$UAOp-`Kky(zAQ0G7$4&W5wmn zo?U)pYo?+{Xx%Nd#ZTuGaQk;}!F2_g%z1MBu&AKrU_}5282uLMLf8Az4pgxCb$_a) zdk_1jBr1C(=ep3McR9GG0oVrBDo>ChNRMIPApEzXm6^lIq73q=mUb(y%j&Mtf)1CM zPYpNm&lf#3>hMJV@_%T`|1mpFYuBP)@FQMHS65Cqp+bz!y3YS|6j;?esh;b>9KQ5} zIBszn*r@C1^wSxrayXy4_4Rf6Jd+4cr51#AG{CwAi=C zGSK^oS%O5elEZ%M&;~ND^8}u|Mvw$>74U{aIQh}TikHDeH^B871r>{iU5mxK_M`+~ z_aH=te%f(e7^z~iZo2NNO~v0C$1pl=T$Pl#GlezmIC85m?MRA)i-SauE$_8WIhJyz zpWl5Fl*;lJcx(S)t37M?*Qb;N4s5;uueBrZB=}PO&QRCbY~XU%enHI4Vpo+vGNe|G z=Hz=~bXF(+Ym4oAG|x|0zw;ls`~SS7yJwycy`p*k;s&9X@K`BBtSkm7PHuZE{PfFk75w zxtXktgK#b|Qw%9TSQ(#5;WT)pdn*3suF$eyPWaGn@2H^-WC^ca0HYf)nvO{VLuVKr zRF`jyAPvwkoa=IZM#A63sD|usa4lg8{}5_qExcARCs|>7{RO5e5@V5NkOV<#VX@w0 z;)sq?#|+&eU!PlESW;sU#za61ULyw;VEhPWWz03YuJM5ttlaaS{$IQ7LC(p>p{}2Z z9)G8E`Hw=w|7$DLk08F6f_Nrb>xVY#Vp~i+;d~&lUW=%1jPsRX*U-V%y>D}iXJ)1YtcQ-KNyYR zN4H0y(UR+t4Fv61-Xi~XkgKnr8lw98 z6^)AcWx@YEQvR1Y0y^mKG9kO@?@cF!ORHCQ>>aJyToMYu#^g^w!okn~^8EcWMJ0by zz26kGsr?L#SE|7`JU-veigco0XmG#%z% z|6&(utXtQ}oL#YtTDCAoJt2*Qc<3NntCOVvwJo{eBzyqnuJ3;NSITdPBhvlD#FvaVun zBJmHVrtgx@(MD8m3tZ2`=uZ0t%NW-Dw9@<*OU!UD)fsby;FfJ(L|Jabi$}6*0I5-c zjIumF9!kUr`AtdMzFjH=r6Gb+U!0m^2qmx)#||ZZY0P8?-<$?Tqu%TlAwSLxP0bt{OFao z$nzPje$)Ik<2)~a|3&ZBhdH_mPYFob_5bHw{s&*z5*Z)x2kCK*=9Vq(dF{-9aX`BO zcXjz{n{lAL6k#Lno;bN;+>%efG&w=8f2bH?WJ^$;Gj<)&^p0}8bImy0T|QnHaNDIY zfw{z{L7FrZP;F(WKfBMyLG6O*uNY-$v!mw;$Ia-ZS@g^59Oy>x{FX}=R7)HsppQkd zL^^u+!t(dWoJ~4;;<8N^3=DOiUABiKUPxgbiLhqCj#(O7QtG>UVz5LXXOWSynt~yp zW)1%;8y=h!yj^s`txhlEx~H=I(2VZ6c4K)tj!Jf$TNW5>5ER<$U8?JL zBp4i84b^fckxdl3v+`hNih9AV8e!EZft9w_>aIP&^Jx`~C>Auzh%2v`IxW~6&T@rx zt;4XmK-w{N5X=~R9MEAt(c%m%ibKBz9=7xSeUtIE0-aJ-3W87PAv)l)wLhc5QsZ-t z`<_5D5)az@N`LV;X%#jmMK*DTk$l=70%YY(DB2dx-k*5_TnI01c=RtIqv#?^M|R386hr1`H<9zA>_&0c>i(M_IBa* zI8G2(no*iaf~Y})zvNwynU22+xQ7Ns6xgrH%BN9q@ql7lKt{82^ zp1*4y+tNo1sbYrKI#MvzUP!B^2p_|K^9<%?*2TG9)ty3i#Nt*oAtUh3!zh3NSG=y# zvZ%;5+(i4CMLrC_Sb*Lc)uzjlD%ar#wMh-lD2)X_6L(Ltb{*3c(kexx)VWKC>)Z;8 z`jm%QlJb3a1BTk3Bx&oi-C?)cC9*1aE!smT3U(_%v;8a2Y;1#Vs%SON>3LpF`T__G z?en!d*Q*9H*p~2T$mybiBPy22sp-kpVq-Lfbb*=kz0fwnj}-leYE!qQ!|+}py%`p zhKqCxLMxa5CVVu)_E_2PRhxLniOF)`y(u*G1EiYR%Wl5vz$?HAK8NO+uA~hdc&GRC zwyAsA$7x3zdqH4$7HU6r7OK+wT`GqDns2Fqj{3^vk;Zc8zaX0-K)T>M0Zpv|-Yh8^+nGp*|4;llQ+e^RcUy;Kc7AB(D;Bh3(!J&S2l zxaJjhtk;4KW8{f0@!5+`vyjzKO8$r8?g5G9p-1#-9%#iHXZCLL!}|7m5>v?Jvpad* z@xS_9k0Wrblw;D!XA_WFR){=-MV>FieK9r+QmUdTA3ti?#9m?ehDSpi5;r_Rb9exQ z`I+H&c5yRlG)ND!1Bn7RC*G{%ry;Pn8PW)Bn5u4X#lo;;T)q}<9S)yFE1f1f4N-8# zDV#aM{8q2nA`&CR8k2clu2c}d_pzVwYS@9zmIqxzd@QNE^oS@x-!a9-@5(-|SPj}v zS*Um38=WIHiC4--`O+3NR}YPBaGZvv+y}q1PCVjtw}h3H2mcVtuDLTj;HSz6N;Z}2 ze^J$ER}+53v7v`$UgEt-8i)Y#?e&+ULyZ*SJZU#GbrVRjDB!ks(>nc^+&tR5wmm4x zwbtp40~*a+q)PzB_Ak!PSbz>X$CvwNS2eEarvL|zHp46mF7aGv15|EA9>5Rjv_nPrk0Lk_psf@OMkPV@PGyQGXUHvSo(Ws?~enq}>7_?vi&}DNXpdt6T7r9(<;eSCStaJ*xDml9bPE zX=~u66W2fd+S9|+0Bkx?-kstUUzOwtOoY3qI!^|+ObjI#Nnz%W z?v5{K9eV`JHTC1zPCc7OZYloi=e{9)Ghyg7Fm&!`Ex}mqZxpeF*AhyD0YWPWRD|dPrE28=!Zkzu{G*biLDt59{_yD-Xe=_V@{0I`U!Z%g z=H;Z|4~g^f2d*%KrZ4QL_Wkqmdc{^0(_6rqeaIluuwOZfr%hY_))rXr*O47Fu`E60|a^Jd(el?o0(>R<21q{ts(V}n~XBdBnQ;(y< zM!&FoHXJZ5WvnglZb-^8W{Ybo(e7_A%8pdh7tmTXVu2w5S7zSwe0LXse=#9z9xR(o zNlAnbT<_J-0=3QXh+=h0xn@`HFY2b|=p0yMcu=Zt^AvEg|k91 zbbo&Sf?QC_uB+{;KN$x%QeYEGBii_;hZg@->X?g^65o&-0!8rZIBIJZzH|;{2PNf7 z@s%o7hXx$CKm2@&mi>N-@;{nCVUiW1-8lV{4f&rjmH$)g*l3)ho=L;N+isWbo49aJVHeBwQ9{-V!%Q^rajTbVk@ zcjbAW4RnTknLrj=`M5hugqI&3Sy^(r`iK8BT@jR*USNrWF#5AZmMGxi(dI4c#^_N3 z$X(zDd6+b7XXYkA=eoPb^OArcgaMs|3~9(!cregn+QKszSvFkEu-}bMwex=?{Mq8% zowncfBL!E~BPP-WIP|6-ow8m5NtZ^Y@GW-2|GSDu_P0=NR`4elp8)M*I(!SA2C?^* z6TXi(;X=RQZ5M$jS}66SGRTn)EH?fH3uF&3ezOd~t#!HA0<5Bg0zri~Vj7WhKehKC z2vwYD9!c3fDF@T(-ABo{ebin237gX9f7>Wn+F}>0{=uTiAivD-rE7=vDkge59|9GY za!5aZ761CJhPp^;g##}NA~fMR%doKtZ~7@VLoQYYZG+v_QTubl&g5+!@waNi z3D|Y@TW{(1|E=<#dq#cHbIeeCzG-QB^{LRKN5D6;->KOuD+=<{i?`FqoF9+Yge#5qYr?tgLM-T;+;4vuqb6H*>g}np%*!0 zMR^$LnxNS)rI2G`7mI!^R!$BI$nVdP%iR=HN&W*}_8L3}G64M&#(!9_ zZ~DUuqzKK518kWFk{TWra9fcXZB_I%vA8(isMD#HPB*Dh!F|@P!y3-zBik_2(IDC+@X@-1zyV`tIV-@kChXP?4Qv#Ivd2KfnHUoQbHtw?#sY~SuU<~1{JlUV4tgal{ zQi^SlK-S4=5nqgMl7uYtdaG2Zf=69p$0}J>t2IeZ5XoT=85(h8P@|U?wgIP39M2SQ zxfl{5LtMxm{6ZTdBq(tu%eeBOM zJ$Crn3UpWy9mG8gkS<(`tLpEAyd`w`p!lOjQp9UN>SHp^L_JRv!XZJJI``c%y(&s6`fgPvu|j= zEXB*mS5XJUF?$h$Es}N=;-5;#WWI9)Fx7k&SFj8}bH3L96rLXNK0f=0Xkg+#4ePA# zpRLa8i}=Kwp5>9GEs;~juk0a)@#`l>HqViV!Ik6$M#5GmW;&?~aw7+@3FJE?3OF}| z_8hP9HI?i!P)MXb?M=o27C06|#-NR~dA^T;92LAW2E{&*qjS=NU>D^jb{?=*GC?UGgii_@=?QR`&1T z#b%1dQh+hNRsIQB!eD2R=l^#Xz+iLL^d<<}`$Dj{K(6QVoUPV=Y6c@L_mFs**f_^$k?r~^eq9gABf$6@)Um_td7 zlIiu>seg>9Hm)&%RI&r3Z)$?T5Yl~#u{@GAtW1fMoVmgw^_1QN9EI}KvnJ6rL4>Zl zJsv=MWQPWAfOx;TWJKh-c5g?cA5$soK2Bx7c*``58p-fx5W-+lz&9&sJ&!$~nRa;C zl~EROKMmu`OJn^tS(lqBxU~ULG$2;(a)nG zkdDg!uZY%dyFzW1ll`u~xSglumc}kuVNPdZ8dtWgv6}^oaQ|->dmM2}K2VvQ!v4^K zZ8d0NHuqA^wcTZsZuHwD<}{UcBMq3b3QfZ*h;-p!pOedrNl*venA@AC?W+rtYNuA|m668U=t1%~nt; zAx%s_@@Xo|42(`~5wSb%M_pTjDkztJDt@;JRoE!Q2ubxrZVCH-U3`kQB;HENiq9-I zau-fg5ORAxsF5P{Gh0`yk%t3*IRCNE7;2{QeNO0LzL|_=_YDnA#WpG}l)3lW^!EE@ zZtq_hl#6Q-(heqGJYC~$ZDpnI-34WgGmM0QmCYp#VX=!! z9hgp^xh21#xxq17lBLWCsf zUYX)#9ygBM9Yg5Q77xLuvR8FqpbO@>Z#a@UM023N{QIQE5rmX&PM3Mm$x1Nzrxv+S z2^&@&gNMBj>ge;>C3iS;E13rg7lUpVgJ>a^kEL|jGH%D;4^Eb^>Fk){w`ReSF~9_0 z#&`>`j&J7C92x82Nd8Xi;BbL-OW${t>{31%O>@;B9O#8HpIF!a)&BQsBrxWA))yu# z>IeHnBi>!(rx77ROsSJYqv7lcxjgzaWc18MS$b&wxuBexueiCg#R0um)kFeTd+XQN z@cx$%eDI)?rOlF&Wk8D&T!6c|Ro!E5Bb;8Pn*@8svWY-=GnNuFwrPPeev07@3q&{( zkmy~eEixvqj0>HvsrjMZ54Sr!+UFm@9V8gLD_C+8=}#i+uyRIOeZ_rSzsGj_$m#^g z3hh_w1Xb18{`4QN>BOtK203v6f=xzG-T;_d%yk013E>SAJnLFqp<73t3!~$9&6BoL z3P=EntLRl_wjnasJtr#T+xzU(Ts%m3I{4T+s7lKTN)EqUzs1Jmf&9wods};xN)8z| zb_U{KT^RF(*F{P_xXL}oAOc?<<7OwH(WJ{IR>If7jVjS9x?)9d)SsWW7oLkV(Rd?C zru;urS`%Ky z6|+J<#)VNyTV|@zDRoXArNmKmS7Gmq5IT;2`7s=x|hXd%N+8Jh5f6+dgXMJdePQqn+})C^r^D=x)`7#vK8P zqhA+Qv}p}bn@^H=BGs|)sw33!f_q#_UUMN<7fw;fQN;v8Yz7EWssJw&iGNYRj@4*QDoB`6f>wLWSV(DZ(v)sRed0oguLc9 zyr={e&1*J=Z2MA5X;-t1=jwQz)gGE`rc_S*Trv3zXTG*p!`(=ZE=`=USbXomO;V6g zccDJ1UX0BIYEwS^Ym+ZHIQH4b5}YdG zxFR_ewsd4A&5wRbn|sv@`e>A<(+UahnAmk!0LfoppWPaSTZo#52V`=^X{q-4TnxE@ zGB^|}UBOd>$I3k?z?=9Xybga3EOWo(D_L7lWM2yfW(m3s))9*z-TYVyypre6Vvqky zlbovtN$U7Kvx|$A-yda+dzI0pa-!rQS}w1s7zLt+!c&=B#SCwwk*TA;8C^!nm!)R5<(8Uwx{WCjAYVgb%*k3r z31-?mFc1$qrp>{`s~k(Hkj z!;U;p_{M0%J`I9x8nhKT4=seBwAyvK`g%9EPZWHgcMcTS($YewU@Hem{ww2pD%ABD zfRk@wOc&??Xbkb)SYNa~Q9vH9H-8;)I~bu&yvM#}K_`W-N7`Nu9x&qyS$i0CaAGe$(o{!Evx?j*Qv3Uc zu6{|Lv;I6+nSQW%8xUhRZKKymsCiHQAvDnXVgS+Mb5Q9WXHGA=tq{X;%Zt~h46sGA5wl~uV9 zFDBMUnm}ez%)DS_mbX^WE-3HwX30)nAr>HqzIdvU`%e6O$U4%os?ySiVDaY{f ztbT<`Wt-!wVJCeUFHh7f1eypD_c(fLZfV2 zn_Dyf5U_YWpgsMC?(u4|qG#7Xrq++9R)1QK*BI^$kD%|ZkB9-RJ?(33)Sxzp?L}y} zOT^S-?h&GWz~bed9;CX%NZZ%bcImS_RsOs;U9vVax4n`r(RlD);Ug(b8B)>*q_I8k zG^^4_9aW!v*EV%ArnlgDCapI-co!fuz=-<3)j8&`TD#v96P6rGElwlG_r=G0vOrex zJAyZbsm)sNcBkD^RVr}X`hzm1m*lmAt9|C6mz1y($CKj6Jhb_jh&TwMVvKNdbaJRt zQXW_tyz4>LKcR%709oXdcnc=_k+lm2gR=)3GKZyG^yA7gVP96J2JaDVees)b&F zs&1?wOq_s_>y50GK7AFv6KcT;o7;W+EB{t21*wf%Tu9~&+^Sx4ytLPaQ;66hKqI+k zRqXvvsnV_;C^3 z#hQW)^oZ2yQ(B>H+DmNPh%(_z`y!MI95aOQEk!u`g3ak^K+VBQ6`br#y>~wnEPEtz z6zTWn--@~5ujfg}2edBb;u^x7_YXO;A#R#weFeupMBT(lJP?q7=4>1DP)cESDT-YI zc+r+*X}C~}m>`yo=NKjpXX(*%f_W-OAvQmWk=7{- z9;t}{lvdbxJ9kCF(nbSU#5i`bpW+g!F-$a0BzphqxoQ~1f$yT{$MX6ZXMM@|^{l(; zPRnZs!q($HQt+HLb#lGCumfhvO^Q$XRc{Y6?`)t33EQ7K}U9Ar%Q<9DUyO6#DKHW;}^RLn5w+0RHdu5|5J zu+;o>*d{OgkM$_z6Y9z^MLQ=m(D^yJTY+kno4+X|{Z&cFU2L2>POTa)$Ilus=7{t* zmO-Lwjr7u1R6zPwmDeiVWGsar!imz?n08od`sZ@rz7F-P7~Bu+NFCd6G*#Y`s!sc9 z)fH%W`?tCHN~iUyyPDFXW&WdQ=l;-lv8U;~GaRSoW2|a-OYe280)OuBoW^Hpt{+TT z*=dgk?p1zdNS%(ioND$@9R}2IUt8#R43IH%{&mFqOh~eTaQSu=`NzxKZH8_f&5@QX z_e%}j=^7@+91{JL?LNUjVo?i4+ZiM#y@|q z_e;4g6W+49PomK)`m>ZxeH?eQ+;ivr{l$IYQF@TWi4}`XN zxC)99MZNoi%;u!^l3}~buzcjg&wcZiBr8=j7Vo&9+f$QeNygNSKyl-r=9TTTlw!YB z5ca`z0-zgY8 zZ(~eq+-&Zm#xU2B`9fq$sx^isn8>*7?CKU*GVTk$uAw3TNB?|~}ymXhTMl$Bbt3Qcx zv`ieLPxG|Yzd^be#-1&EKNi~SeNN9*PKqJpN93Zi(Xo?Lh2N0J+nXSvyZq-Pb*?== zHHX^Ed@o zR@H6cJd@mnu(du!2m%Oq&MNKdV@;!NXKf8njJr-69<^|UXA#9%g=#W`Q>HUs)dpia zkJn2!5WNC1RV6=n7*S~Sd&E*CL!^>b!A|}NbOUx&3wvIPAYaYWPvSXxviP?!CR+|1 zY3poZndDTglxJG8@Wk*sd*U=UowLBL{n=}y;1llF>>cY!qvZg@pISkwkej{RHiOXM z5@^k=P=9uJv4yrU#G3EM63(s@s+f))20=aquKORbdA81`0=o)CmI0A+|?4E)S zlcV{ zWYCHZtcLf^*i889J(NJB61+2gbok;Oo_Kxx_9jx?GNkb29v{()kAY&h8kujrAEbSY z3PQjJw@*S& zPaThn?M!>P(j-|rk;)UM=enm-rirfDVQ*~Nm_i5DKFYv#8LS%LAlN(~08N7Abo6gm z&+`f`%**LxWIXBl-{N*EcV@{;{*H$I`s;!5k>x0pxN@%a=0VE)xYe{f1YFls%T+Hc`;Dmg!>^;$x*42c#|8t1!;j# zL0VXAWyZ_SqhCo(yRkV`a&F$Fvdc)9+{ub^>e1TJ8zNK>=j0nsEcMc)Yj_Sf4tE7b zo2 zVH@c7H1m)I4A{-Pzh*M5X>Xa+gUdk!GmmFN#B8&SUOb8~t9pnP0;~!?b$)L&`KYrA!a*uu!78Ipe`pr6;d;|iV%nIVhaP`z%5U>4*h=0+Qnyz_(>QJVAx~R-!;f?)bvau_QyMM_-Cf|8 z;5mOI4N}xY+|IZp4}b;n!_|CqUnX@lYTBEB#xq|VvPsGbmmJ-8CQEU&^5j@c(y6FX z-`!+jrIR&zP8GhnZBGXX1`7!tZD?_2^%-MLEZX!#5wb^X;g)cI^I1DvLfElGs-G`b zOxBp%p>iUl40~(9_g4d*Mor%{yK}_3A4>-7dx2=<%;AhS)hVDUW1vwrZY)&9TvPRJ zMnn$sX6fn-qam>HYIMR54+vELQBs7JvNPuIE2M8b8l1eM^7-9?{tn?RV3?VEB-^C2 zW6T~UG#TutpcDBsWverV2FFWp5zl$5+WgcxbFdeaMigK1vM zPP^ekVKcSskyP$rnBQD9fJ<4e@v?M>W>GH`1u?Evs(UlcgnvnIxa)tio;W4sviF-B zKkV);vHiQu@OBWmJE!EqvKM(im8;et8@hLFuApE+>f07RI!GHItRgrNknzXcL$=!Q za{8v)_>V@oc4;dQ*YL2=>X(78PL>^uy9OiO2lDq23g~>-??2)P{n%ECPS}9~;UhuU zng9&?r3ty2B#CbGa8w_O4^Eg@Cjc*ZlX_uZIrL{Fjb;-AXd z3m9k}QQX!&{u|#^Kr4XnW+k8Q(v~F+uL6j2j+(RTBo)^i{4e?9%x~R5%LCjKBzdzO z5`hl4N(Ta<3f>v*cbNkJ3h%euc8Eo(&UL%QFWJqg7Yq72F{-DwsQ2jDe~uv})IuE% z$>TaA1`GNXx|@Nw(ccSa5{m7SX(Zj1x8eTGEWDn8zHwKg__7-R!Y2MDPX_?rfbRMg z9pG<5N^T_tRhzmnoO~P!Ci7LvQL+r)b%oZ&3}3m9Z)trf%wjPavtVQIyl<7EldTE4 z)=SkY(1OOu%v$TT^}Hv}-BgQ|sDi;CI^wVx#?*#h-ORxDd!gr@!Fr{C-!5owRbzBY zeoH{45dzg-8dL#$1Ckl134B+e_m}1SI7>Chb~ksGps*aJSii^cW~mGpPwou(jg(zt zXukFJ_@H))@&Tsv)ss&EBsoVk2DrTkPYjQL-d<~T5Fc+0%QmubS#Q<~+Lz2zV|-*F zCqV}L=1IH$s5E9XKI~D@#J3bn+hs9)_+gm4&is+MYv`7+CC$MdPT`nX;Mh&&{oRrg zyZhUD?&9R|L}`A&{OqrbGDPnEsvXdHKcxE3n8_VSFr_<~yo#|Svf&+uv(T}K=cl#L zuR9Dh*}$DI8A5+ptKK=)bMUDQg5SJpxZTFc{fJgzs%o@=?3KUs#%n*h?IyuZf|eJ>-lSNuGHG=YGA zHIRg*4}a5tuZ9iJiq7O>w7%#?+ouvK9X>r}s^cyi`_A``#&G#S8mv{_XW6p?!3E}i z@+!d_3P4FQdR-o>B(2x%{$A6r=UV+Lsh?>d8w9A)?tJTRi_~@Vb?@ZV#muBJSOAOX z@2QQw^-vQ*28BpR`(Z;zhaxVn`Nzy1%`xBC?`^la zAfa{jfrxo?bNA3x!vn8^o|yoBDeFaofc|gB)%NbeT~WttrGs2cH6Y(;-^KmxwtiFp zjOkaYsY5||SC?O}H?Is?tlc~n zeA39QH?n8{?5zKCKP7gOW$&Whn}XxhB&0E75-A{K5ib6OH=jjrtDRLA!U5ttyusr4 zInZj%;(Zt`#(!vsVTZ-(+h})1!POpx#Y}VRZ~8$9TBuKV^7X*GZXeaG?uuUDrFT-k zkfbO^Kcgx$rHvBvHn;JnnX-e&x(FAtl52FPHpXFjv7vaDL^d8F1NIC+~&J4R1sAV?pA&0WDFL>z|Txz0^ zfPAB)6^i<+HRYEnO>mj04+r`JRR93yt0dSwGY-*k7Y$>+$LvVjY*z3egSb|Nt?~>& zZWR6KjWBayaB{t%_o*Yf=zf8;vw(!=BEl)ofk)tat0&}&s8ls4l6gke`<>*=*`T6Y z?$ppX+!8Zawv8|5DK4tlA8jSacoy^pud=@a8o#BG8J{OVG7|g-H_)cQmd~W;oqHi; z>D$-4b>WS_l9XN?F~vOFqx2-0-Vbtp0l-NcA2a0{2=F*#$R(n-vVwGi#4QvB0&D~I zT|Jb@dfr8B+~T)?0_85Ce31#Dy=GCy`EYGgo8apmqF`LL% zP_EL!La~MSHCm?)b6PS(^utU4s05;w%v?02RLkw_b{+KEF6GNySNG)*%CC8VO2d=0 zd`x~^+?m-1ZvY-j;A_1`01i>p;*!B9sCsi?Ttudmw`#BJj11%b98ibvM5)#%CWjjh z#9Y2Xv)|*RtJ`ye57_Y{*7CMV5R&d z#W?3BnK$ZXI0P8_s6`HxMz$7(H!deu4p1HDUsg;$wBuG2SZ+drvnH82{Xg;B=U}6k zEce%rk^XX9eDCEdnn~2|Sg+4%E3b5!Be&uW#QA0uit*}q?r+*`Cu3Pq4K{Lvkzxy) zU5^WUs5;B0n--Th)~Mnk#EOQbHSE@s*mk>Qt5Q@$Mg|#X66`A$bY+Y?h+G$s3dz67 zfPy?3m@i9Ip~TrC6D2!lArZYk9Vx1Ow2gP$?T6KeShzC2snAGjZG*4X4V#hRf}shM>G)UCwr z!6xUATB~~e6!@h_8x|2V_K_}>=KIUWq<<@;F2FNGe9MVA@ejS@0qusb$5Y`O=u1~s zU;cd&&w|#UymX_Hr{%8NOF^$#T?e%Wo2J<7yyqyf&4lb|uKv|DsCqo|3 z{5<>^@^jmYr_-vdW8#kJs1S6PHcRx^Un(Mhvx8R*oNua3-GVJE4XtU&A{3N(hlFeA zzGlhP5A3lIDvp~pyJXC{$qN?01kknEZ|9{IT;$x5hDyGZin?QXbvLT9%QM8d>YR!#fcj;WGq_61};PX3qhHX=^YyA4|Af-KGY=d!mvf{pBvb-V$;X+_N^7lV&w_ z^)#n!UTUZ#c2(`aScGn8{x9C$X>uXmI&KN*l5BV)e@Lw+oFkR0yT*KcBi5csS9L$=K$J8|EhOkhHWgIWdrJ%kBQbqC05#fpSO~& zdLfrsBn^R28iJ7HBD~rvuU8NrqM6+vJf7U11+%?d30UT|bzAeDxkesf>Txh5uejaN zzaF!wHZ(dYYlm3vJ&3#$rb%W&Icp*UuJRIO^; zF2lvywalLu;v0Qw<#5R<6B%>@WcQF2m6*#yOmNL z{8~WbjZBZcW^%y6^9Cjy0ASdhbVn=NsW37?yap?>-?H1^eF2x>NNvJ2h1eM0+m$6f zrdT6OLb zp2g>|HczKiHwC+Jiz(y0KJw=YqfrOph`IXGdaR+ykgZ2x0M3U76t{UF?S*B@3ex6m z{?TOpqmIv+=cec{(_YnRT1TDLcP7HhvZ2rQIpED_mA9A5cmZ5{_o4Og)>y559CQ$6 zF84Vr877;b@lv6tqs=@9yk7M}VigSupJeTx(Xmn&dR;OsoH}bg)Lex=&i>eX)Qa%D zDujOjBP%L-BhtaJioX zu49{rQLp2*2)?`AS!N#V3T-#yUd5XwEil+R#P(w}{i-U4g({nuMVa;!|7NOMtRGwNj8 z0wHiGySLWvLX`V!&bA5Bi-9W8rS+gHlP*=+BhYzS@h1C@WDCMNkTwXnxHvzxU&AUs za)ti3VCPup{j0tJK3X4?jo$yYvc~3mJ@2)Q_m;Yo{c9^OzCEr%9>+gvA z-aqWJ)-s*;{5Uor%fG8coxd2CcJ-2e=w0C!_$`OHho4*=MvWlsMP}0`L=e!SrA8ai z$D3k`e!Am9LS(42UdC&^>f{v_0N#Ai77HjcP>M|3NQd@dL-9fdik>m0%ejIoc^R``gYq1hBh@jzfl5ySli6PB z^&@UT{NAmP?BYDvb?%8t6u2_^->%g+wbq~WI-tZn$rib%k4&lL=%>x-lv*UDy}y}b zG1p}G&~=u+?H;#*d`c_Wz~=dm@CA_1SX3@~y+W)u11XU)*l- zB`R<7zFvRCDZ;i`U)m;jjO#?!bCj4ws)Ra))*S*RN#YfQ=?uh2u|SCN@Q6+DuHw>X zqDGcKjA`GTV$7b3AHH_L){A{zBWLVpF>2KOHXkb=5=| zn}x}Q+6qQ5E8xc;KI_$LCLMPt_B=kC?}nT#ROL5UcmyqnpCJ^M4l>wFHV$NmA5>oT ze$!PVp4a^IK1Mx$EFX-+857n5-er7xIhCJzE3?v2LBE(bHPN)_muQeQ>&p~8=JI_E zs)|`?di{21_5mk7N?Uj zkN;P_Pj<@nlkV!lVZRZiFb3Ga5NJIwSehk?V11F!Yn_&7Bi03I-GJd?%ip^Nl2AEC41?6ME=Q~-Vm z^vs(O2dmzx@yzdVv$X)S9k`~w8aKRRJjd!=iBN2NJnZATA=Q;ql+f!pxBCFMMeMP; zuOeIC6Ku{rOZl4$-`89B-X1*!1*Wdw+F7m0fR}9k><<0hByj2i&Nn{8C3|Ae?0&pa zaoVf@gYG=-{+dWyCt@hTM*rlWAF%qnil5z{{554!*!`H~Cw!9CEpy9C$Z4vjkmC%6Q+;O_439^3;BG)}NUaA@2c_uS5Z#(g^v zXY4)3dhM5Q^_n$nR@JOC(r_HFnncurkkHt=jqrA7uf$n0o_&rm30(2lMC+|aiQ&f0 zEyrTdt!1@v0~}-~&;^zcjy<)b^*3pz(E-B5W{R!yj0s|zv5`PzhGA2%;;amX7ic?r zjfyObSrVt78-0>Wh4z*+`FG-7l6a?IbEB2ICxy{{v*e=QRc{h~@oiU6X%eHK^B+oJ zyGc0*b7Kf_p`}=7_n`-!ZNOCd!}0A$>#rKi&9;-Y(o$J$8c*?0*Z4Uat8G_^Eo^=) zH)~H!^JOXjaL;k>cNUsZzKoi68O?ge<&)W?iM0&B_`uKPqhf-q-6}-2cJ^MW0bpl<#>e-D3`AR;%R7G)lBSj{g#_etOexygD z!^|b%CDYmF!r~<%A}RX#a|Y&8okJ(6NBz&6uLoRHhFy}PYXI{PH**GQY~t@fD2IhT zURQ|Z=s`e9aW$Ds!NfO7#sYFoAaA$2ouUL?nBUJQEyZq`@>SfVufFSF0$Xl(mwmFn z1f-Rk_VnUY#_nqx`|)bm>sDWcNA-f+*FjFGi;!OnUvfN+)dN~u;4OWa0C|mKLcDT( zYkZ9`vn1@~himyz8ycLB+or`T37tz3fb`B+>U`cKeYq-^)AOcPxQxADfVxv_rG0#l z?r@Xv_1$YLYn*W+{lN~wPJ?=|?VJ!tNq+tuH%k z8w;*fq=!N*m-R;tRi98$(~T!~#*R7O_l|CDwsCF4?T*3pt*G;%7Hd4EqS|xi@cPG9 zdxpQIhCG?gGS0+5vE}~?2L5V2INnt zjGKM7$sA=LYbIEcPyAf`lBHe35_XVVL0o&2TjF4*nv3GX7E1&kWM(ZAFttsmZ-@*ya$q zc2)aItEPp*>}r}>}jBa zUrZb`pR&h!gxeRTPjh#As40-8{M+^*?duY?2&c_ni}+3s`Kc7t55Mc?9cc2Fdwx~B zXQvR!iD)jAZ9Kn5H!pl-VuAO1YjdSpdVV9qh%ic{4L7TIZjm7O>}wt zFJ)wIs)GCnbtC*Y(YPQAnC>NFSB=Xlk+Tdj&+ZtE>M%zc$4lffQ;?XHAH}Oy{oVlJ zD1FjtPEbkQ`5dxg!Y|R!N8zQzryH{!CWSW6EP)*_iOVma7HPnX?Ph61pcZw4fiM{Y zoR_6#q|UIr_A|H#K2*n*k9{?%kr+^YTVI}l@NuyW|GVTKwym;q3%VS8>1|UwyhY>4 zkge`u-kC5jmijrI?LQbyj!#;Oh-m_T9&yO&$VC_=ZvSQuVsjc$JX} z{Fx&|=Y<<2Y^&I+4u|9VYRH+gi<3Pw-V*QTI#!Gj;L|cJ8BD~X|h3af$ zt7Y(~+ts?<_XAn7D+8A@vN6`8wq4UJNqeEle<6Py-hmNk=ntq*2%!Bhb3O8lWvmAL zPY=rPbP@^C$5};~)ps~$nHTR@d7^WY5;$^DKB@~P`o>uw-?A#xy0FKr;`+^S7c~SmnXB&gXjNfdV~t$0$Ma;o4Om4WkV?3|P*Z!qSmV!a zKYj}E)igEMgA>ZE+NkV$GzJ2}kekO1Q8!OL$TSI|l3w6$RxcJ3nvr^EQb{w_g0x6F z{nF!@JM8Ci_-rVrh~Ku2w3B9Kz~unYHin6WN3X0B(s9Hhhsm*uLg^OS9eZsj5Zbgv zX2eMr94|isTDg%gB2o+{U<|h5wD#hZ^3R1g`XW&xM183Lz`{`_LBE_LMOY<_N9LDz z7joV83bpVYI(J_*M)8+n7)Ni{wzuC7Rgw}*F2}PDKNWdQKg|WmO{tG{gB$OApPszZ zh$O}%uzjE%b-JI9nlTW}hCCaQV$EnR@u%hOUr4E6i8jexX=P|wgSN3>sSIuH0<4Ra8#!}N2}vk=BoP(y z4?iB^dn?s^(aWwoA;S z|A=3bquHD=04{0tzRRxfA-NR zS`X(M+I_9`zHx+yhEn%=X3_fG%E%VG9*kFb(l5)80B9OYajm*S7K zO{$qFpMBco_U!B32k#ueU!X`m>=ds?e5C@4UL9U^pPfTOhq7PCwpCXh2y4k6x>j!1 z_3&XX4xVVpFh>X2z9nwoCFT$R62&wBHmyYNesSLXBG)h9AE@;?7^V)o2Trd|w3I|B zCdFaMv?vCJ)bA^QNi;X>7?>SyO!8sYTtabvjIoX8sy$ks4V`-Fx$Y=yU7ZQpDLOt% zAFYXKCdgHhlT2e_HiA8KVzz4S(-gvuDT|YQCDx+L*K#em=xmBJpM>R8gWsR+%l6$0l3gM`7|G-$z z=|uqw@;;ZrDAYy)G8KaXukc9N1y6_jDf4Hx82K4ulWc85% zTdTTYeG@<}t+{T2v9tx9U|ey3AM~+$VRcmOku7T9Ir7FPN1Av7UT`s(t+`dr_S*Hz zQ<=`iNA!=)xiaXfQ>`jliv8TNu+{RwoVu^i$Q5@1B*;GxS*W(r122SD<0StP7@`vL zNvRhcSj-INfhqWR693Bt`o#VNC;UP5c9#b8LM09ZBbp&Nhw=**Dsm*lqse!vEM*>r zc^jOzZc;;6Y~^_esz@qy!ziB4y$x=U?~&qPRcjTJ3^?Uju_?@>aI+eTYjNMx>X!1j zQ_nkMtDBc6O_vd*$gfn=$*(e=m^b!z0bQNsJ_S&N-ND4P-&sGyVQeO zr3~g-bDx35xN8*^uT9a%SAmYf^CxYsH&?P~j_Ezt_lxOO>kn6_mVa znW%7_R?h9a*ku3WyYS2BocZm;vC|hF>DB1*MmjVsxCGxaG1xZVqAM+FmGhP_l2#AVu&$Sj8pwQ4 zZTDCEy(VLKPEW zY*9H#zFLwTVUa!-w%@0fk$Hft?$k=B07@Yzk)DGH;YxRkHP@d{bJt*!rQfxINK(=D zGvl3dYEvgn+k0x{`K1VVQ$|zbixR~ehEL4dtjd4kObWn#3o(lbZe+pk1mE)`GvEvP zmcuOYZ}nQ zWG3?tZvAiSvNd5tBtSN&GwQFH(p>4=@gYd;t#yygOHTnkWR(>7^scqVz2K_nwxesF z$gFRJH(MOmj?)l^Nr#K7lrIaiDO<(FSs@>2XVx3~7Kw|1B$!)bux^Y2rlQC{!P z++WFuG)FMTB^(zZNw2lvfv!-8khVQjaW*t)cCe)|W~0_JBS`KaOIjpku#C3Ik|(gY4$bu zTXWBddgho$KDyP-^2>|B+_}5d1WXKE4S^X1{s*fx-oFM9 zu6cW@WJOPG*oZqKLS0K@iI^OOqV|lBwS%$p1JY^k3#{1gJ0>NQZ;@#_1~C+{XK{9t ztwrP*h}siMw!Gchv?+CT?&Ow~_$SKO_6L-N9aNOQzY(|< zXtn7}g)h3vaa8y5Vl%ADEx&*E;D{BW&N^^9)zZ0=Q$Bbra)^=p2J} zan#k4YzW$+7`Zs>f&Sn!`QnZD=`{X$>7_kYqR5D=kiv{G_;CHEh^T{tN?= zXyfqc1nvtZ`1_~21h)(2Xi`+~hVT3vA?MMQ*ztf3-hhBLqI1+)4EOa-A4y?5zx5R> zwg$xEixseAo}*>^k-(CO5crfd2-7G1w~f5HzHdBz3iRETi>&>S!cGvW{`c}Fb+7whqiB};BnR*5azKtmi(aChR*!Ix48<3c@k&BE`K_?)Vte&hn@O={SaIu&4h${J+anqG+XfD!?I%ZW?>mYE&NGIB#bm@0Rb5VP4Auv%c@|PjTyK|{Ue~I)02ckDR=Y&dY1e7g}-Dt0Ev;BHy5laSIhFwR3H`Ak}qq zCXP#rbuNt)i(q5VBzT}DS~uwt3)p5YPfahnLoX|%c|Gk-ekqgdaP(b85A(3G0(pczft81750y z!<{m%yGzOnHe1L!E0}U3Vq+BhsZ4YzN*R1LdzL?CAMX2U#NBiMq@x=3QfmZo+zF7Z z1Lc}GG&+%#6by#8UNyDk+G__d4W0wPv5)V$dp9@#HjVq>tQ^r=&pKG>dlaM(JjxQO z19i$(+=qzb_+p#|E#eBJkUVTO0w--F_MO+SsZNgBkB{6uU2e`6jzPY;k>B2RPr?@! zJe2WQan598&v{~D!v0|Y?gX}_elss3UZ0>*CBJ6?$%^m`m7;!kEOu61ZIFXgEolgN zed%ibU08)aY)7OKAT~{j6sbV$U>H(S!3v=;*#v%b#tG;3>@j}7pJ12)wrpy(Qpxk| zGdev_ngQ?_pvy3I?9#`t_5%Zb&3`P%=u2GCBh8k-mK zZwUF3tp6s_nnNl+5z_;8G+|V&xn$&!CIkQ_3YMR{db3&xs;BO@p zd7h7pk7EgBPgO2k4}Z6se~x+Bl!kW~rLyy>FM!a zsZ~x-BE@8gNR~@MV=Gfxf^P?_>K;hCCD1sGdfW#L`alCK5R*)fgC>p|6A#R^mJKgL4G!#oKwa=!j&133 zJx(Hmrd5c~3H0p>HLA-h&K**}V?tYmSdQQ_72F{*gbtuCVtm3P4yK=HcRaIz)3u4Z z1H?`!S$fELH zjQcF3bpLv>-m!1u!x{|D(JZ5n%w?wY4EOz5xnMYw>U@v*8;t+8#EI@K@ww{OU2$%I@i^xXXo^1;`@C^;}QNhpfmYo54W@5k5D&C~$01f-{*;=ibKw++iY z37ootN>vwCk~QCVd_Yc95lvyEj!S+IC)HGSMtxgBM67U>7f6&&_q=35~ zzb7~3aE@$mMkP0XbQPNa2xf%e&4wF(Ymtaugj5=dJ&Qqt(mG#Cl!pK<3jc)5jh8{| z`^{Zb|JyU?5*vTjsNFsSb=>SSV;i7VzDvJjWajMGuLa`c<5^A(ysYA^pT2orPN0Z7 zCvuE2$$6P}Rs_QNa%5q1ToeZW%e%M9tM3O4v@QxK`faC#!ZN;t+D2&@P$+tR>VkrI zzx`|{)uc2gXgiDvA#;PE4<@&oE(=Ry8MN6}dssyni}mto;3&cn3J|o(4@t0Z|Nh$rTq_m*M1i8`n~qIr^sRsNhNs6Pw>4dKkS)s zV9h*g%|!o+?Z#myD1HS+AL;5^Sc^GCjKN7o(>q^qtz)=IbqDe8Xj&Mof9rPHZwD}b z|Fm52Y3qtU@N?K70@Jc$w_Y`_wsWrQIJ};g5eN*GM(?O3L{wY+HFH;!d7bcZfL9hP#qe1<*A7T~==#l!o z3b_15<8$sQmJNMjMc8E95yExe`IrvQV64oIJ)k@ddms-%N}_;zjUz0bF z1bU^tvTAL)?h^IGPZ7S!llym?!I*-fYoI*)3$+3M`fxWdDlPQwS~z@^7uqvN^wH>| z_d~eDkw{F4xu4Iu2lMc=SH-?n(y!f9Mo3p873ZpBGneu4y6m9p#7dqx83i{d{U>X1CX(C_sN#(f*-C~^g5wX-It z)*8P~{W|70rbhoq!pAA#&6}DgjI%-E(01Qp4>AZeI`v|lqnp=wF8n(u)*qgbJhgS) zC$m(_xJWfDh}5l6z)Ki?cnYf$u{Qgb9xrMamka?6G8}mWp&Q4%qbYxkDq+Ou-xKE_ zPoH2hC)Uv!<{`gGBuynVUGyQi9M|C%ZNJ-6DizQQ0(H?a_J!adt%Y0xE9qphy5I7G z5RmKG^+MyH{S7FGmmGk26({yxnJdg|zd35+++PN&s)A&%eFOu%QQq&B=91F5JO2J% zjzRZy&#~9OU-XmfHrlGvRQ3V))g@+A(tJ|Ai z0WSK58=W^+i#PW9dlO!B%@-(BSOkm)N{EPTk%#cBai9WIvD4d+Te=zQ1x3H+r%ty= zMVB9j5EvC&JnKE*ZnnNm!5ZoGp6*3bscPa&wgoSU(?!IE{2$M68wsa7Fi@oIa>mEx zP0T&!Fl(l zLRCzVdjQU&V)y~auXn%w{U-!b1kz-r>IJ$5+3_ zYGK_od$3LtTP+SEWAKv6^A_?nx;Df*+;wl<@51Lt@WwG$yQI+vh7*n&uyB)~t^wOU zF4zmsL_`G~)5pwPS4#U=en|Yrh$Dr4BnA82|Mv9O{>@K!c}2C@(e;A!V@YT7Bhr1{ z3B53>>bgYpa1c<)!jVqA>c8R6ifv}3b*%C0E`!O0n{BjzAC4xb&dpv51s?>y#Tn*c zciY$&=oxzuyB%BO zV^Ch=&xpt>5HK}#ZtHQ$9G7(r;Om|-SNsJXLR&M#HS%6VVaV%e$Nyz?( zx0^=cm{2y^q=D-rDPV1h*jiXXfrC{LhK{)8;FZR>9zOQH~zghHx-OEYx#QTC4H2 zj~Eo2A#nnRYzr&L@B50Nc*uQnfmnNvK%ZI9R-F9d-<-sYJk-5Z2Rw)_xp%d+);bq3 zYHj?!>;2=`<+}fFA$`C*-ID@{3ih9H{PQh3HW_l$KJ*X72kQHLqRFv+U;W}ab#UH^ z5&Y@z1G+fFg0WqX^IQhA|8kr4>9E_ZPv38u85Vhqbm4tMZy

    m8}hNdPkE4a7X%+ zf~$(J-sN*Q=w6{HSy)lWq!Dqb`i3s1g9mY&J^$XWMt0E0;JEd4+;niC;Vw{8UZXKn z0wAErYzLQV&2Q|vo5#So(Qu>`r3B{JcI`ZTwI5!?Wm>lq6=|35jyoP|FaqXT7@nKxMFTn>w5gl!o%X) zeizgDY+7`Owuucevmy{U_@WGDb=4T&?uPcaL%n_d4WHXp;?(~qiq(t5g0gmDdB=Z2 z^z*l{?4Zp%TGjq@K<=Z{)oJ@GTGKX!?K*w<%Cudqme!&&7hf|x3cv{-INRuZpJ{D1 zohKrZn!z1XwCs)N}Cz5D8Rm_3vWl`V5s4}rF4PccaKdt5R~;pUHV zwk6?3cq;x@aaLrO@ydo`^c%O(x;|Ff10b5VYt70zDsQU3y<2F_Rp7;Sl-IZ_3`v&A zE3(6tDw061$}S3f;&2DJ-3YArb(}*MtrJGY;r)AE>KBD}+NgsGHPaCG#Eonl>RH_~ zBj9x6te#sr+=VjI^W(RnkHb7f`#6q(3Z%bZ$?4-8`JA>dh6D=3YnH8h|DF-S*wl{| z3E20y?T&5){n#?wqUgBJHh=xeC*;;)D%2=C(RuA-p0X_`eI(8Kns~*>V@gxJ!2#xX z0y0Z25QND#W%^yuywwQG{6x9DJbj$MQY;gHvb{Z^RZh?<-KSq~ILts-ikILjZno*p zw~vnAA&%AYl?2JzwwPte{s{tY^@3ndlAVhJn-RZ{zRq4!=BUtRvZzk(!JFb%W92f( zom^WVa?Uho==>`8!N1Tns#evCidEmHS(Mo-^9x>Sa)j^$IK4yUObE|agF3J6$hMr4 z#DcOWEtkwpl+;(SZe4wwFFYD^RP04q>=B)#Vqsdc{qvNwAg7fGG{=meYKkcTN|*Q1 zKM>&PWwuc8BF!rGiJ5)94T}teyO$KP)X-GD`v=ZRLOQ~jW@uR>30 zJ3*6d28fb-31;3Aya}P**vip1lBr}$bK>S-RMF^K&=JuB2ub`k{M=N+Z`u_Nj+}T`iM?1M zGX5g#a|EPDuEDmz7}H{6P25a88Vn73@$=Lur>P@hX!xdc?2@a)1ov&XM@!GqZ_g7? zz%=+c2E6ulik}0~m9?FNON(;5ivgG$G3}-FJHqGcmgxPEuj%tKUJmA2*)(}cEJ5^) zLl(y>naT$Tng(Z`%!q|X*{aapAx6a-H4W2%nh9Ljm>v}==KHUOhHLXqf^jRxy_Q)| zPywvJkq#?xMT<5`kt?V|{XcxhK#wdK#Ckm57%Jxt$RnDBQmV#RvEyoBYjyMo)L^S? zrrHA=Bh1u9+TWU8c*lZKgFK{G)vSPs;6rXg2(9ASF`!&^yZ(!R~-AJU6}swqZzWs7+m=Rj^=PDha4>_@MD?^8*P#%9^^8$r*iLlzdbjcvfmDJ8t z$R-BwEL?{#o~?h8D2%gwhL&M`$ej;@dA8p$uz=}2)lL?__0ldh{oxQ3%}N`|H33$# z`5`FvDK&DvaKhfhiu=K;>oA<(ph{beGgiZt~^4r*H3=}vwc&(C-x`zq{49E|@ zfCI>`n^>5SQ6~L#PZ9<~w2MHXEH1hht@rD5D7O-9G5iF|J1LLz4GBwzSA*0@^ynBV z@;>6LPg%jl+ikl4Fvt_GimeW{?`s%=F~gIc0)*0v&@j>+*fF!loSbe&f>Z+{IY-47 zQK(pJL!BFkgtl+!S4GZWnzll02cMsRB?Q$6hNtJn(t=m@2uxh8=Jf+}KE5+nr5!M8 z#Jb#JkN?m{=rd?uLpZ$%vlJcj^RN$dlTm+GJ_-&GPwzD0ScgPAVCD*k=J-ZT#QmCG zHyC)hSJ-09yV4(b>f8KiR|3ATvxoMCONUO@_6d7!iuR7y+=_WPaXis0P#-`BZF~={Wn2PqR%zqZK@Q*^PK%iORY^T$SSWZ6B2d z{eP*UPGZAqsFRm3bn(gyfubqO&O0?~AxDxmz`-afH^UY8uF&K}aYmcXkkt}kx6v}> zcg+-trXePaHqvN)XA;V?*OC66V>Z{1kCFEG(6CtT&PE3`PQ< zm2U=ieLmqwCxd>Nj?8dnK;h;yK`6p6^)W~xl2!{tzQm<6{*oiaRevAsx_-HG|M~oQ zJA??GxZVuC@eWB_Gy@ld=Qbfg$OLiV4y1OVYw#`m^Y*&al+QCcpSbCCN5s}FFlL2?>FK-(2_qiz{(>OnNd!Nb-`_4SbmkHFFPtt?I_bmqE4(`H+0vAY z{Wq{t`Kz!HOO%ab5)UEqQf$XE`2mQZnBw~i73!p6C6@?gW#!Efb5=~vgvhKbS3WY5 zk|GizOpS!CK+!W>PZ4KPj{PNL}&I zGx%a&?Aw59OQt=sMt=V{v^;ypnpo!)TX(*tCJSe=9E`SS5z%e#r>`S1Q>Z_6(!x>1 zAkL_opx46b0oQcDpI{nOp>^4V-wiVRWKat4fRH=~Wy7|0$Le2n0Pq_8;J6FCp4GbIXTI7$;`mgNuBNm>1e!WbNwiAte@^2jc zx5tzCFMEh>!#0Q}SG#aESRl8i4>sSTu?hQzNdhE0z!ZMO4i^M{*e4~uP2uS_@rN^^ ztipocUVL3uMU09yMX-o6Ra+|G_%k1;97n!Ce|vBje=_}No2^^4#}%A*mH7g*v;Fo> zg8%Jz#F8v>6Y8`^N~Lb1><@hV@5 z_3}{p+0@~Pt*kmufO}#bWcB|Ha(hlSve1IxdelH(OjzW(?Vj%WBKM*Dn*f1+ zq$iW*-hGkwy8R&6TUJC4`m84{q5hBf_a;k zyODdo9J;;l`@WakzC6QLu3#hrBWLS&xy%SihvRU}6Pb4Wl9gfYeh5^V}_Jn6vnUN06N z%F8Z6+f4%g+}ku^Pf|t5 zgGGp!;HL6vyQnJG@m;{|46deIMWKPwV1N)wE?Phg0dzaS3AoI6ZyG?Y&D_gX? zcnX{v$y=M_GZi7!tl*Gu$S0V;61Sqh%D8BdY(FHI>kF>nwVv)@D9ihFi0u}nqg7$i zw0>#3bxAi>;SZsSkap5t@);DX*y?0>8}=uRTYQqJg1%z)Z7k9>EsZ(NyvW!%@^D!% zj}l-HC=ur6-`g_T>PBIWL}00n)G_+eU5hXxRJS5rIDO)?veZn(iQW%W2a=Fn8*Q&X zqw^EH=ttSZ#Qj&h)a81z`qKviH>^rAa#Qo;)iG1)7_vvFIN^aGm-;#d#SbAZE`-uD zbF1;lo|6q=pNj6C?4nKeLIKQ}8ho|idxtn(_ot4)naaS6&@suB0L&b`NFtq2Mwt=E zE@BRFv6`XZ>ED_d#CuFqw^j%=i+A5XxNew5d#;L>bAR)@&EC-QcMs!h#|fo~kHA>J zLUi|!vt=D~(KL8VfTzr~F2P;o(%5i$zx18Ki0^YbvwEoW6BYbi`QO@wfdct%A4{7x zi~+t;QvcTy>tD(v|CdI3vWOiMl6L;Y^!HWF4+Qtl)I^@nzFh~b5$2NaAbaAnipBWD zCQTPRGI4_FSi1nEv)~98lgO+>1i!bB2o6f+n2{(4A2GSK2+G~h&q!A4cv%T+wV9rV z*j~~1ah|T{RO|#EMwmW?vWI(;jGv)?N8-Xhrl2iskEz7r6>nwI*m>*gMeUT1!)hsZ*P!ay^aQaRZLltJDeOqd7u9E$V-)9Eqq^tB& z;bn)dG!4veHnLI#d@2-hs)#7zDuZ4T-$>!cH$PHP)GUY>1>Pi^{TS`${oGh=VG?kC zSc#XsichE*^J$Nxb!j8uVeP#S)Wz!9@V#?YTlz7&DHgKpX`})xuJ1FRDeGcc4em9; z$)vFwd#tyw3X!Z}qiElVo#4yJ{xfK9Fz=U2WBtd$P~W#xC%>^r1WdFe;)Gp|*GJ-@ zFF&%BmNuFvPPckW{XAi9Ax!|E$rBLd12x?QC@(3U&5*vzsJpp5;HmBQxA9AA3ud5N zlq}}Ny|O@rB=>E9UYGo6QuYpBo|REijj5)*L~ zDgGgVmaKRw-5%hBvkU6*6|o+0WtEdgA2Z@lUe6asl2AqN zf6p0UWWy8V9gH?irh7K=Ib9_$zV`$9w&7nu@lHaSgCjWag=nag83qe0wHe21Lw`Eo zUQSpPt>ME(D5z??Y%QmV7O{s`3ASI4Niv5tExWbWku%y8Jw5LZ8|655t0k5KO*|WH z(9BT#`fj}i<=zSh^IrTzDTf(VV7ED2`r(=dw$FGZkyAcfCpL+TSYysam#ST{q5wYh zT@bRRgk8}s-evFgTW1aQOIs0Bnp}tI;zocF(T)M7@=9v!GHb%eulOaBT6}HBf_9o-p8& zEzw!{O=}?G@=hkrH(m)L)A~KYb|tr`;R+mIE0#*RXHejR9m@7o+=5brZ98jqQ3sY= z(c?gSoIN7i&zW+(q$teus`NW@t2p3FQ5g2ZL(@b+Q(T-h|zMDYCt)1h5tT~l}c~{-XL41x>YdlEEIn>u&97f=}g!)@pCj_>W@~*CgKQE#N9xf zW9XoBu{yi&g_5ZGk0r_mws87`?~^hBE}xJ1#M}e1`s%$<Zm2x!CqWSKj|?$41%4R4VrYQ-MdloLJr#OcoU=(I7^Fc2)w7> z`=D|7%G`gRM&bUc(!DRKr5`1ZoBt7ChPJ{9rz8h130;y>W9@LM93gJP7O5i|G@}zX zM?d(yM7;18r;0qN?rWEeXm?w3Or--HKO$p?fkq*)b z2%a$DI>)1-@5%o1Kk55|8zBzii_*7aQT9A@ZD$*_upqTOc6|S2*PdU3inR23e`@*z zrFG1>7k@RD*svVMgL%a|S0$4M-@uED!xhpUvjkoNZffj15YwN}zDSYf(o)rVoY>l7 zp}U8wE=7PL8?g}zVS!e=#s>I}dh?dnozaiy{O#ishIDx!9qT1SEIjjyv+(N$cdlL9 z5bDE*Dh-6j6BMFs6ls3#i5hIJgeGfqt2hvT<9p)iozBKxtg{c7)0P3`83?NK!gQ*2 zH8|}*ch_!nC--~|T@U7G41{WwtP zU7`W)hwq=z2-EF!TENp3kOn^GXrXL#2W}~RgS%1O+4mNdMn9#HGj|SV0kde#33f@# zmkuTvqB-P98hqk2nO7eN#hvmTwF>Vti(|hMe(DdyqtqEKmnfyYA`MdRb5o6&4nmbg z$FwK^ZH{~^W&iF(UxJ2qRi+03_^=1W8~_4L5PeRNrz>i?2=&c)G>BK6RT3gQpW%h~ z#0L&nP_Wg*s{|sI=+v`wlxu%j9{v*I3(X1cvtTZJyR)v2Sk*9XlMva?p1!|qKEAhz z>0^^3Zfb`j%RPqbg}^Y)`i^G=5zWV3ihNA+n?Bvg2o(hM5y|G_hQ46MLCpd&3Zt-g zoOe1bjA#}Wid^+XBa*o+QLCgvzSeVsm_aTh=C82Lg<=<9RC^8p$3VuSrI4+4I-RzR z$=Xjh)$p2;W8oOgeF4*b3a3p372T5S23fP(hshNb76}QS_Z}pb_?TgyMiLIkdBRdH z)hN&Xn0E#)61_M(s^sXs@~e+q*UM{4 z&wpvU(WZl$@>YNJrK4=%R6&<333;RT=~kBF71(`1p2(H(k?0+bP{Eo-%`8_^`SMvr^zW1(>P%(>#ASpG8c!Ifqs;Qg+PMYN#SB;HhX^? z$P-T)x|nWC7>T%h(wV@Cf}--cw>cz*fCnSfx&)DU)%U`q7rFfeAc>nLg8g00GJ)J-Uqi?QZZM&P1_d3hp;Ca_b8H5Dy%4x-#Xh7{B=Q zQuSLvBB9RxR0b7Y(cd`{%)k}Qc6$rTsH*??ennvK_gbk(ikEbq`F?>xyWUt-FK3iY zB>W0~SUFGZh#p&(vQ!D^S0On~CexN6#mdUs&8QmrgkXUwPrcYmNR08ppRWR$CPCgn zvHx^XD*YaG{8L@3a2|ddarRvZUJQgk`lw(ep2hMkhMWvmcye0$C=&JOzTfo^%N_aL zyuB$gp(J-+lirLwXiOMav)w2`#R-lf^o}yJ5Zs9hp@MD&spvS64`&jx3v8!Oo5+#s zu^ljc9=h4YF6>n1qsQ7^A}%e0f8rMza5}?MSKS1+@`Y&#!)XsvY2c#>1K5r|Dr7@; z^e*ghB8)3g2IUMAi7(czocn^EKvs<$znq+ycKocn3CZR?mhQ#Ff@d>{MS%XC^BLlr zWhYaSrT0?}ML4^Rx34c*1>YBNIk9+h6oEaJ^@jf+Yi}JCceibe27>|AG;>orAfFJJIk5FDv|7vx`vp-P7 z*ZJ4*9dFKplm2o48qekLGI55##QP^u_Vk!;&)l+q)$@P5gs;Dcz(gf#>wkg zCEDD??U`uxT2xTV`*}eM%%0jp^alNv5O`q)Oyz=Y-&nRLZ9kd4oFI_KPla=_*_hoB3VeyNCO(z7NIG-GVg5uZs* zx8R9Q8bMBMhofueXYZ261^!QzT z5y@o?(gAjdso6YUKg=KY(A=Y0v^R5XumE%x)_YHSDaw*+ht?3DyaVfO?KJgKEFVBB zS03TgD!D(I@c|s7xedP)+Uf2sb-pO{*`^u+dh#aEEwr26I|lS?X|VL_ABdPxx+vH; zJA5#_aUMT*%O5|tDeZ_cVY6~YkwQrSq9uA+W43PBYSN(SO?5L9q?dj`U9O`SGp_%h zG?yX3Tfv`D@vIcox%H_$bYVvvf4$U6Y0)6aBk8&f5b~TZRfY$(L))K7;q@hY`vWl5 z=nY?gitWw9e$NYV!DR%*Nr%bE@HQQ<+-VuuYUmx51?;ovwqrT$VyYyT`3og6>78C?;a?z@QL``9nO<)TJy3|#;i3>i&3WmG%v zWrx~iweK)?%kzxMeE@|;nYo@Pu=0L!0-iW3`J3QGE8cgq<{J8z8 zJG5w;!lNJ#sdHHCJ*-#(Wk8G1m}+AYoFev-!((b z_L@zclNou9VhC}GNC73~CTcxJ|qS*-`~I|gA8V~VZ_T+s2XM{KWVeF%26td>SX z1jZN3g1iCUmc>n!18lAzxD!cg_xjGd@kD)rPQEy^yB%6oE3ex$~fUo zHnPdcvb4@I1kl(d&w1k8fn1P>IL3V8!rHWV)%mq6w6O5pTW42T4>9 zs33&ne1)~JF)p|aHWrjIt~r7o&3w8k`^-i2GWo`Jsu4mVghv7wmk|}r-Asxvn%D&M zW=+|fIeCi6dzoZhJ6Bykba*~ynG*3h`R_iP8BU*qkR#_pWz~>_o*Q|&b@D4P4$KU<)Po{0c6{zRk1H!+nMZiO<3`TZ zr5CWN9#KW2&0i>|xi4CbShU^!dm3b))2${iUg7MIPd9pn76seRwF zCYcb$LQ~ECrJhdteLF%*WD=r)xPb$h?By-?Ed5>{Jms1YU2E^jTDb1P>gzbEsndb$ zBSaIp;INaP<#zLULt#|}?%9wgiO8~E;wYs2-(maK#}+P;DX}}@ug#Q~!%6)k=!8{Q z?744K171>ZD>t5J=ptBF)|UOx4{5lhZqkt;het@v$BCbV zGe_`?T{joBG$?sMqhWJ)K--?2txNbv>-J`ZdKqL|;ED29Nj7XCRp6&2Tb4LT<>#OFTyw@&Rg>j1eW!a26!as@<}u5tr#h- ziepybDQn0$qWToz_II8jqi3mBE|Cg-*+y+i(Vmdyw~9+knQKQVKn$gl=E1ENv}-0| zh7GrV{{ZtDW7iPb6d!?GLPd0{w^v>EoUwieF^Bg5n+0$OMKxlrc5SpFO0cX^*0Otv zMn@=GKv!RWHtD5ee(6L!z#uKWiBp05U>hGBe?Bqf(=Rx2rL?T(>)@HA=&Tm=yIM4k zSt!8xa4;QF4%g1^;NJe6_@Io))%WeEN=GmXVY!h{-5Esta9VQ-L)g^`$_(26rhuem zfp6Vaj<2dldS8%k!+;Mn$d0y{6DA)h7;L0&S6yihCvO<#kYfpkyN9S2MCejXS z6FbqdSBrQk2_HSvEr2No(*_WvrVaxSV~o0r`tvBzv@tksrcL5!o-sx-`S*~`px+$+ zMH&`m0#J$2oUSplQ(wnBOVg7|F>4|QIB79ClOq8Qt_g|1aXg%uCCPKJV@ZgxidftN zklNe3D1wXvzLN#oO8UVnen#Z%LewB$4im)Y$rZCNke5g=7i%KEWr;`XLQ67j#d6lP zknCLrxxo!IkE96ZQulUj;X_d!XUt>nZ(`Vl4d}#_8i;qnZ=`m!+vk`#p|+ggW5+t3 zlS;6Bo!ucm2Bjlx_B6L(@8&ch6ySB53%_zQpGdiELip)N~SpYKKQp#StDk^&o%KFE2F)s;QRw|qcEGtdcl21lCQwe zAT}c5@u;?#(7Ix>ii0&1~=O9EB2lUgQoFcC09Ki4j5AsjVJw{j35kk>ej_ z;c37vMK}qheUtuHGS&^$Va|l&W?bF(s-q02XV&otJ7MKnr?7V!VynGhK+te0Wz^7* z7_1-L;OjA)$f1}3QhQpI{i5mR{enMH`&y?cz$P>g{5u5NA9qLiD0A7Zq*aolQCYxn zh(Idj`44B?Q&>kkS;CtRo|6m4AJTr+EjA1a=^m>7k-6(j0$$H1Du;CMV){z{FL<*% zQ5`Pa?pSzmv9z?Z%cMbi)U+qypK?5_)MbHNi3lguCg}aVnA7cP6#dY5y56=kXNeF& z{nP3e)>s(^9yY*GL1xH}c-*K+z9d-!NG5t2$RM^sp@cy1WLhRKen=@tTOy*}dyTU# zQy&b3bP73@cWS)OndHN|+UnK72_09csz?^Gl&M8Ma}w(H7VYTu95Av`UMu^m!te8O z$)4YI!yeGm_@!4(h9~au3h$dIJhs(a_|u5t<271nR7b;LkQffvoQd!nl3!%!?IOSP zWcfT47d{l-@?{;@VqL*I`3|RabdJ6oqj}C1T9z91CrXhnxSS z_QM(~quzurj&=dtG_e|yF~x>)RE0*`>`t~DHYG)B7~heHI!}*lyY|N>Q7Y6;Uu))&&6452#XaL{lK{htmzf{=tfuS z6-M$EgR38T3y`&?=iQeo)cTMgtLWEPe8dCyg6;L`%z2}AUcoM_pFS+Mbc4f|$XehP z@jw%H9qSws^9{t^)dl4c><>q+vi5NIce2ODrGFX@cuwi`yqG!7TCXF0yH`4fGe)JN;Ce*3$djuR z24dVGnD12yDI~B1h{DTmAe24b(WAvipIcR$9CNw?I zT+Sq*uqawzn65U074lQ?Vn~#L;6N!!658G9;;zPSH8C>)A_Yy`@KL@Y6j8Y9x43-q z3L{#2ZOsWzJ<7Jnh=~fu3c+U7%f)%<+{}TDu80+3vs?NSceJKddj>C2H%vWF`P}iwoz}P;@UEzk;rqtIfk& zO89T>03t9)C5@_o0MiCqPU@wOf1x*$!Bm=yfNrL*u(2fXKnN;k{Ad%-O^}u zw5}M@c?|3yLBAb9kkBGZNySi0zTCBxIw_D>(}{g?U#YeuW3$WY?0L18mdKvdLX`U^ zJt)ypktip$D!;h$irIp4GBdcraUy}n>;JuE&ClT*)&Y>$perZ^qO53@LI6^rHy=2HO^f$}%g-(!V z92l`Uco<}3Z-!qK>+H9V_u3KR{vAwAdO!0`F>}d!6W*Dy8~Ap8bJ8UWz2Gs!CLbz< z*Q*~vcrfVmL|hTr_szgtmUhPp z8-E?3A{osFELABWUD=d^WlSC~#0oF=4A)-lRLoYxsL!Q-8xPCm($D4WFPh9z2doKn z)p5?C`A;N)U4JOEu8vXe^7~mpvIHicNVX57Ky9!{*iwKV z<4okmSN-9qKen@VSjd)?$Zz-YCr|H0eg1v$1c839feBtaM(>v?wGZvc=17&3t6-j7 z@hoc@?DH8`5^9+w6|N($K*#nn0&`$*Oa32U$EZI?`khGy*lxV^l719QZ_nON@iMF^ zYSc8m7Lme7B2eT0-F&Wu0LO-J`UReDHct$h#K8uR%~h7AcuBN25GY|3k@UlT*yT2| zS~EC5I51g<4~|-YZ@AVyLkGsE%<8^M{20JIH^K`iX+AuRslz0T8~wYYfxua=b45%# zuq?guiU=c}})^A@oASQV}k4;k|1MyQ)Lg%@;`6a5f4T`|QK{-fT?b zb;Sj#ifkz>x4NhG(u--#@Lj-9#Y7vfT(ERVf{}9C0?le_C((^L0oNNr)I}gyBx&s8 z)3$Qt5fbW3R2zlnZuBwW@LUiy)LWEuiA!L7z6tl?D)HTN{v|4^r%ZEibh(#9Q}g8& z&3v_HLL^JBOC1V2D9YL+IdaNGX>MuR$3B@n@dAH+^wij{{bpi=7y*3!>W?0DTEGDe zE9RHd?Too0dIPun)7>%=4tjR_wD+b3*5MCmN$xBa*0v>s9!(S!yM$aJr_DdO2V?8V zdOZp3iG-NfXx`aVbM0Rc*l~oOU4#8E9#-G&_H_~R5W#dfYv9V51QS=>`$OIT&ZfBT z4N3Ua^0pt){I+vvzV%kUsgqla(Gk}3`(o^M_-!wsDd2@hYC&L*@$W!--~vV78~^B$ zlXtt+;hW;6dpD`s-RYkxOQ)R;HG(@tGoY~G4}(Qn+WI2%fsMVSx(?h4K>(LpV`X(^ z41{mUFECN){`c}F*tQX%bf8)?{u8=D>E<>GER!b+&EN}?2CyKnFW3mwZjOgXePcfq zBFn|p#6H8FUndAsuWT6EM7Hk?SU4%_a!43!H$HInOp)pq- z^?MP<+Q5iy2@LJO;9#hTqr=A%c_lanXI&V`D%zk+g zea{{+X++PC`XgT@{F6svYB|*Y^@42@(@ZVayq>(7zdgO(#5sr`u1vm`22lLH+1DWQ z)9V?f0sMvz$HzC1q?TW1z!Z-hFpBbc#qK(VDT}Kme)PmSEnHTJ8sq*_||5{b0(!Qw7lrD0zNg7uC9LNi&EsnO7FQyQUC@W2rC@L*# zgs(P#$hhN}M$34d2BRJfECH1**%DTbRbf)xuyRsPn?{=Ix=tn#*Y3^Tr7B!&F@LHp7vNWBauAS4eIsz7e0b|2_8APdq* zj+#q;hTQ4gM%@R&IWNIFJ!UsW$Xy>c%y1nw15=~jbg*d3B4)J=Ehxib$0xA zotS!IZ_f$e7dUf9>h-h=-ycKHK@)Nb`65 z6HI%!Kt6VV?%-wr@kgN4{YRS8y>VvgfyS^}2Uj3j_6P~_o>9~)7Yp)3;-0&+tEuqo zPVVBJ&_kDAmce58+g*S;*aqlgVAA~DlXN^#5?wv#2XC^l zLBT0p1#VmD$3!j1n+T;WEpD6LdkE;ZbCDympw=C6FPMO`n-CHcJ@WjJ*_CMA{B7h( zmqm%IkNbjt7#jT-61;@QDD-PAD)Fm)aNL4L=~#Q-`&3sw!A8(Uq`vs8TLy3wqv$Mw zQ)1=C7P@MJ+tV$P>aBP87pfbi*qIFCx*M|qEKw+9e_}2*hhWcH6DY6R1B7hNt^994 zm!Y8x{Z9~S(lU#&BE{wk1SVPgKi}wPvG1#v&|P$~4df$TZ`(qrmNv^IirUkW-OGcr z;U|CH9rr{fO?_?fZmP3ys6Rn4UUWA(DC3&jeupvMUOldM$k)_xb>O++`TkR{$Ib?p z*oP~WQvC-whK^GC-KtjH^MQ3t+wlnLv1_R4bVB z5nKpZI2GSJMjR;On(xUXo+fPg>wX`|;KKuQStiWLabxfR9Cy)SE?o<97fb8t=JI6+khra5TOFojHmdONFReG3lTL2Iq#>=LvGbD^^=-5=knr+`0z zs_6-+uC-XTLf<8m@6CWsjL?Z%IK~`6n89{26zUkF*-a%-PvBWDCRKyj&cWCx`c8ln zDi7slp=0tk0<(>}RDVFyPSVzHD=7u0OoqMWCJG?_i9ZlB;D>;d9fIVz@3?@^ovs8EULqf}5kwObWMnE;4 zO9dAC)hp8JhG7%qL3_Q&QDDIq39OfYnYIKD?Wy=dAMZwh1wWh;@u`z@L$I2}A?zF3 zEZPbMJKrAahwyijWRW(;y2!j$9=tIoBN@l|*|EVqs2Az~Z=0lNYa>n*>DxZ~8{=Ot z;s2aL^H=cMM6IGNPc}PLH!yEVXT*`yVN$k&>w^zXy75Nlr3!9@gfOcMM(alqXi6!I zV9!hjVg<{x$r$NrBgw*l2~KpYPMo7b)}M2 z#IkZcz&=|Io$rOZkZmPdtpnRBUw&01VW60$5+%h(^R)tvzV9Zu%8u~xQesCjUaQ>N zBMe;$gmk&Sg~Pe`|>0;Up--acUhcW!&Eh-Er>(;AVA;2h8aq7$`&E0 z8<&Jvvjj(X9649TMs%F6I{C7joDJfIGtokhtQJ=?KlY+Gk|Z0Gu*8Z3^;vojr~Og^ zB}B0Krs`7nX=(iUebuMpOA~zvb%p=lWr_VK+S!cUHTE^xpVL?>{Hg}EbNQt+Q13sR z>B27qW?%RpmhDqre~R|m$`Y}n>?JsgqSFND^^)pLmbyGmqXt-_3VRy1S2WL+w9v!{ zpkC~`jE@y7kczC2md@t|eSg_0XEdjx!^;;#OHSqrd=5;z4ioDn{)%Rbe&%O%|t($Fx#H1ScPwC;zw8Mutx=0N@|&3)}LpH0`K%aF%S zh&E?hYxV8_EsCZN{7t09Pmku%He$z{sReHoe&==045i6fyFUtvokpO>Mn^`H61G^D zAkGUH^UXAjgKX3lc{iW_QAkJV{zL zsAavy+_%y<)VXAko^?ZW>D$9(^*2weQG1Jq9~1kl?7La;64{?;RWkvaxRC&BR(rJW zu-!BU!qi>Z*Fuin1^Ku>bTdGfs+03597VcQv{Mdmsn>*&fBfNV2L6ndW1?fxdn{*r zK;;}-ol3{XL#NM|txlr`Pvu~>T`v~ueQ!4)Y6R+P``0x`ek8xW{-c7>QkZLP`f@9@Dzw5RNCF%p*3EP-{W1avyR8+E<(*0zTm$~rE zg;I3|-HL6?ZSL##!7Xa{b7=Lv4*dbmEsKuWV!Da1J?g1|#9%rGfwVa?QsTF8;Cner zILc)6AfDtrgdc7dFh{7$5|?}u*PFNl?z8}Q< zo8=07L9tp|Z9MIYXDXRIe-b&t;{Gab1=bumB6Y{fn-4UEfavPexNv zsmc4S1;I)aUW+b4IU+oVQt`psN<*tn!i~s3lO?>#P8K&36gwoDc+Tk}`F=rpE<$h) zL&Mind5Kz9s$<^;sRgM3Zoy8u+?-W1lwd?mJ&H~eZlwh++MQmlIxu0z_GNh)Dww1KD1CrnMZ3x2y%jb6nPS+8apgkJ1s3u1deeF#V>aumH^bveALEB3JCK-1X1kH%f z0h=;UX~O4Cy%2e_m0E-Z%%jc%jLs~qr}wm}=>+FxmDP6hHagWka4C`Q(w?vvktwO~ za8wpo$CkjeC@%;5oI4z3HLF~Av7_!up5b`U*(*du-yx8rovBi8qZ3^1;0xF00x`EL zc7o5**wD>coQlL>ZG8`X6}^yhH{YA1g{jDj1JP3o`IN!D+~U=F`QsldCy<#6ym)fx zR{t9G|IY+zkB{QJ?h(x=ceDtMN&Tfo8`6sjT%Jt`ddLf}g%o$+j>e}uB|A1rs?4fJ zZ=bDtS&d{a-f!;@;m8pyFtM?O09%w;8Z0Z(4@kXwSdje^h;SS;@iGSe*a1#%ua{y%|@@PBrgR8Ig7WH8K19DM1J32n(79;1>(E zs#dtPZx=T^dij^M*0x0ki{>6wD>H~G%Mi};8}<&tdeui(b}KPPKcQAYlHq-cQUQ%HQbsCO{b)?P?K=3er7t~!x%>6} z03a%sEzIs z#9zkoCRQv*9Vvu*=6?O$!-zY1`tIJ|u5qFWxJi2%e!DAwnd^SaR*!!h<0|`l^{`R! zdt#b(Ae-W;M)>fg`(gckyN5$ydRYwnakkS~6{TZ}fBXGYcNj=rMC>u}Wjy!Q z#^?MzkeJcpc6ylodaH)W2I0ccL{VTd&1yaM9to*W`*ma?+T|HMLDaJ4=r}dt?dN=D zZ1uSiiSQ$1+$1RW*Xlg(!R_&(#b$FIyhN7p>+IXaenRi*25-~bZTDmPOQrCOYDZ}~ zJ20_9^SB3>!peqnM186Q5=0s7bI)|FBfshslW#is!IGme4DCtFcVBon9+TG)YqFBj z3GLuY$S01>VQ4r}^fDuR(-$ongdZK0%8f$Ybg%DqG*);4Jdb}s8b?+_^^oe{Iix2Y z5>55d(Ee7wM!u?$zkc|{-A&Vxxv=&T3FB!YyVPj|;g4y*+VjUUSS#~yCxjmQKZiDG zBEg^*RyD`X_?m+R#bvYi29$N2bQg^`^OYN>RSdKCTfQQ)bu(iHl1-WG`(S10#-)^B z&_T_kov>u!y}+$6tVy6DWN$eWfYPYP`HNBK*rU0L-kMIkcrm3X6(>>2syi3Ga-_u= z+__?cON^=t2|`Q%L9JE??h|bVeci>y9L1@Xwiv7Rd|8|;1k}O2#xD>0aR3z62?~*l z&V&mQzO#%#Ze?BTdS0Jr{3HL94Q^T*)N?X0ACo|!S-Gm)% zdYPD80gf?fp?My+?Z>{04g*dz4{y_6cDuXlTh6$Ik4f|>3(L?MG{7IW;N1Z9M^<1n zT=W8#_bnT`gQMedn5kFxrz3xop;nQBoXN$jkOQq@N%HQ^SVzzDibN4txS_zmx+Lea zf9Q6;*OjvW%{F}g)^K;q`sK@A_QBceSt*m=++E4aCzEBKO=$994S;#Jv=%$06;xsR zr{ua6iA~|-AF>oHi8!Wmh;?lAm{!RW=E>Q@@2nOQrOxA}mJ%(!EWUqdio=;Ch=|P( zwEtzZcC^a;^dfZTHQ_Z;UQtp}spR2yb7$9mvz~Kw;D>UEb zDM&t9G&@;AMEk-@iV3=WVZ-uGk2$yXAh`7PJ1zCO`mcg1?sTB($ztM5v*Se?)n}Q^ z5Z+|PVa90n?7X}g>yIt)y)Z#?L~QaMH0X7L%x5u*Yj>R9xh06XPDqKrF}=Jg_U`0RNR)3_ zH0ZZ@7aB1}2!Zu-qvF^$92xVrjkXutyp{96rR@qNuv`4tRp)^R7gZ2U_%^}NpTpxw z_>PNe;fuHx6qP>4oW(iE<+cXDUy2I8B1e@Z7xN$_xv%fIJvfaZeF@3a=WXuCcjaaG zn5N&MS7Ez6U^wco+Nhk=`ARZgi%jH4KJ9qGQ1+KfStzAvZ%jguna|%EmF_>8-3ox@ z&ji!7*)RTIkAHbH!4ei5V$u;ANp0N~n+Uc97dNC=6(`uM0$J9mE$%k%_!B-7XjIM} zS9|!I;qGhd_WYQ69(KQVUAl~*&HySc;+{Pgm?cK^qhRK@Ldp!#a}sBHoQI2>8u+r| zkx4BRGG>2SKb-dad@KBX=dsKfPE!SK^=q{aQL%XxkO(BU!Q1w#Qmy1kq>WW8F2XhL zsOc*SDsh4jFn>q8Z$_|M`fJyW=F6|f!VfdqVP;ifuN7R8v=jcP34Ry1ldX-poloY+ z?6Bh+IU}ER#t`lsi@odRi>T|Q?1M!e_u-Q-baiFXWE-b9`c|{_bhTW|LG<+Wx4p2W z%YEBHHodTr#nY2NcZ-PmJsZqs650}W)flh=DpXnWDU*A|sCOjMUeSAG1fn?3(w%T2 zI6Dv&3xRKeqjz;Yo*mOZWS!!mMvtf^zN*vkrcoX~$+=`gO|^u(2f#A_5LA&Rz@GDZ zC0=LcE)v84)V5HVOW*P6W@qB*W^HahAjGcDeNo?Y!lKa3>x{sD%MsFbI-)Op!7)bX zl#l2R_{PXxg20Ud-csB%EAzVG&fNdeX8h+5&r<->Ey1H1dOD}i*26c(H!L3`DMPTH zWhlwodo~waJYe8?84DMUiHo+?QnRlyj+(1xD|D(MKSDc8x6IY?@EPWI+Tt%r^1!S+ z^!1$IVSuO9OoKws6+vXV&F>p#S?m&`LL_p=E|UhA8RhcXso%MgZPq&65)JbN*=G9} zc6GnTZkpcX5926D6_Mw7OfwCV7LgiF&{AS5BwupH4&>T0Mej6XyI4^|Qvq%U7~su? z(j(T8%=v#M`>~~u-0f9ixz0%nSHq-{$Cg2^)62=ZzaHbadtQ(Sc^hklw5CXlAp|ib zD=LZt>9p%W5(LUax2J1orqs86A;4iYVdFB~e*Rc%ss7nJK)(&%(L5BGs++LRV_T&t;Yo+Bl*vd z_W$&~JLjJq`FNwhGWRB$@fq>kym>PWT9!#h3lLgz3V735wi@1N@?Md3iue*_`7`7Jar0EvNj}Ve^XO zHyIk$FQY|_nTlP~r zw}TUV=unEK)2fRQG^mA~$zw~8NjbM4@pm-=OE{o8A43+letobjxEV7j_c7}Bx4ZWt z3KzZ`rk|@>29Ho_RfZb~hZ_1Znepa3|G!ng|24ra5JEh++&BI!Q2XhVzfytOUtj5m znkRtipLA=AhC@2UrPzo>5yZuP+&(z*LkWoe02~}3gD|V-m1dTn4(xDwZr;%-7jbsF zoFa!RkhE!8ZtiHceLKPJc4R{gEfj}E(L$l+JLJIdab&TgC5SGzC8=? zpD=B3-|QiS6EIsiyL6uWlA4+wGW+T0^;Y=V(!-TkX3v%FY0H!WwY7z1&@WnBj$>T0 z_>B%YiMmq#>LplGzs=q9s{>{+@~C#KJgnW4U|3XeVOlr-7{i$PLyJU3nP40Hh-UOF z?p6O;5vm8_kCti6<*{cB%HnUFPOqcsN>%#(yikS3bLu(}I~uSu(s+x<$4DyS<^Xys zyR$C*VUEmE<1S2l3p#Wv@_m^>7}X2a?KPub+IOti%l(6a%i=qyEnH(xyT~wh15&dw zK``wv&LM-MQWWqF8M@;y&R_&-jDkLDX}IOZqLyXRUq5Mi~EaV6>980b1HLo?;t^Lj90|v=R+{WL|{F#BFb4w?ywn zlQo)Rt7&%v1DX2jYbVg}xaOcMtMuP(BSRW=;8OSf0`qcr_W?YA-RH34I@@%SrnWu0 zivKlC{_Illi$y}Dn+CtniP+%R9p%Es`0SJi5qQ_}pQaqwLfCCWwsH_fh^$`ntbcqh zkfYW9__5C5rN5?1<0XZWk#UK9x8O6)hQ}!1)9p#aKCwYHu@?dw+@-a6!lVSN2`Qt+Wi4# zD|uO%kpzU89oP$Jj6I&2_|2gAd;^(+2hkroo2a+KZuG0DOzRX{DuXAh(${zF$n z85yr}u6t%0Aj?4Zr~W!l0{L4j-uy^p>K$bjv`OxEtq3G3Ao?}w{K9=PT*UMFuCh7E zK~4?E)gMPBX+0)5D92RoUxoZ4#o;6iekf|~{n=55sTk*P;Hwa33+4Yj2}4~dZTCxI z0SD$RJk2NTJ8Sg&zPIa_%DI%s{~Bl&c=YMhYn%&lN09=ua=`Es7B%)i3`>iLQ08aN zq|cXGYgaU=%a|tYQTolWap^C+yWLIUar^Fk=&?$F2T(RYP~~lWUM+Lbzruk)*9r#* zgInIO`8~`$wZ^v|e>qA_X#_ECP_=dGg~6CL2O&=QqRZ*(NB)3UBGXK)8DHuIMGiaK zpQHPi?)N2`jyX;;lA~(CXAx;_59wf{6uhxBe#ai_lG3b*1_bnj=RW94Zi4%o-0J1w z1K~P*$ZhcChEAW`{I?iPhYv2QA8MBDN@tttLW;D53zP<#z8f?uP58wOv>_?&@ri_8cQrjTlLAR;R?++HBvO&fVdFyWX0kY96oP zuw<^9Xg>-hPS(F;@NPF6oc^#--8`CEXczi|#LcQbBaCd*bMxDwgOm#K#^FY(kMAY( z6spqX7uiPfMi_K^zH!$`DbdR^Ar>NxKr657yvy3-UD)tKx1eu*$;`QlUSy9V08J)U zhg2qwr1@sK0^IXCw}N8ZQ=-p5_^%xX&XbNW=u;a`U%Nd@(Em#W&~r-7ViW(isnEU` z7hrbhck;lhaAWo~R+oA*>3{jv?zk_)LBw+j68=NLIF=DSiMk zWMt(8GGIn0MFDmxF)ngogVplZ@!NOP3QUSJTWyjz8XRzBM2TrI{q9GtY{YxY-L#t_ z>wc}Sdcg@1$55FJUKR}k*r}e}2MoWANN-Ph>$t8Ds6*Q^)G#c`n$0mWeP2mOJg~E} z8f9zCI^nIIygga52-WR%UdnPyMBx~rKv;=`N~He&S>2Hg)%oGa)7eTmX->U$lX0Em z_wAEbesh-jjmK4sY7j$fQK4XikFE10R#NT^T7CiuDljSr7{!7gskUk(lDfqZCSMhj zyHZLFyAvmi(sXZ?vLHliXKxqD>5(vqwb~v76SOR#$Nzp~f0?F>PVQdiHAgtCDGjtY zzBR`~Hjzr$9MY%#f^llf!$`$&pmV#|Z}CbJN6{jFL1yctrO@{b2~ljUl-|$3e4bm zlcP19YGR@CP$Z!IjePQGBq|_l+MYtl-?A1?i7+GxUQ|xEa^#CG1@fvnV%`=C@R2G$-P!y z-(68-1rzCnS$~e>&)x5r|6AOP?bE`x3v=11@PLvOO0r zF(oqvTPK|>CFP8f=x-|K4zx8!aF0tg<}makc)r3Uee#Cqv0adp4IyZ<(Fm4ktgW}r z%@T&auz-e_9M_N#1Jx%qVZ_nGNw88EO{oRpa)SNnARnM-Q1wk4c;UOS_Zx zhW_uL|DQ!+L4h1y@;M3rG3&mt5HOkJZ6uE`S>wMFml|MR)A3Ru=$pwFHFQBT_qP{u z_1waP(dUXq84fr;zAAx_j$bG5+&>nc4)v))`)~QT8+=1uM$B+N6eA)#rAn-72zY~l zL)8Ok$fg(ATb%~{V$5t_mm$S$Au|Q(4_L1Lw+8+&HxS0i7_lfMyt{FSu0!VL7PcF& zFJC^@_J4SwSq2#Lu#yu<4bO4e(3o(!%!)pbM}nvSY1mmgjr!kA@Vj61NoI;vOezdJ zG}*J-wV6&}+<%Ut@~N)h>y?5`|L}FN_`}>fzsK_WLwHM;{6J%#cy8yCj4bp2ZgjchD7&dy;J68@-={Kgbo zeuXoaVNZd;Mv55ptGJ?y7%Hbz3nlmghjTfWdCNjPHekO`ua9%_Bh`g%tLe|aUrJx@ z+_X%l92|XGX}S2z*nCO20wPa?^+CbV3(Nw(9*w#@ceOjQll?~V&CSY;G+Wms^@@uh zv|!hD-)O-XUfe2{BKD#jAs>Yu(x_LVmS{>Apy@=*^5w@JHEGsGdw-SGgv$L?7QF7# z81J0;uGd0&31zI7DEH!OFR2=EGFaEFqSbvN5ifw!X|GGVg}Q-r_E2{{8BT|MKMV=J za#m(>8(Zkk z{U)in)RXp_4m%}bU!8lAf2zv=oDOPT;J`t|f1qbc364n3*q)xOR%^YALc+**l4-`} zze;qEv8n`=!F+1uUJ96;B)EkSA*a6i3BH@@_fCBFCw5*zyE@w$>cokp@qGrJu@w>; z836C#OC+3%bOclZ@T@oh1wLq0qRw~uaxTg(p}BshY$?4BQRA%Hu2{W6r6ux<%6&fv zc?7n+g{Y4|>dIFWqAR?!;e$zWYV9GSG56j1Ovcp76$z&?3~9xnz-jP7VilK<1bG58 zQaZVqiHUi!yukv%p;xO&C?ntWO`V<-PDX}aNl~Z#J5)i3Lh96QAjRp($L2))pSaur zU%Vt!E(}(#YwA8&Mc@Tv2H^Eg$lSCVce`r9vs51bEi6kfoj$lo{L+DlBaqEMNn z=oQf;nx;{7i5CiNl5#&aX9m^mm=!S1cN$0$dyq8sF3WRoA0(epvY-g#lC*TG_@0bY zc@_UYNp{jLe<;hQm+1)*YF;jRL$Yhi8T)UvkP$r-)CrAGxzG98T`x@h*FCBxgumJ2 ziCS_4?y!9>4#O4jB~ALP!v2=0YnBA3^CcW1bub2QJ^drRJk?)v6EM7c@qhlb)$=pk zmD*&`cp9D)zUUHbd|Z)1)=Whs+_JH;9;&U(Xlbxx??9O;rQTFvZveI=3zTI!tbHnd z*eioAAQn-UuTPAjgw3V|O|Fcog5ru`BUYppf|jf|ri1KPglW|>oCuOg86jQLsGN1Q zWBkgrWZh|esC9bCT5=j_88JIZ;JMuBb4ZRuO(gk`+#T8I1EX`TiMXM0YV~p{^ zUFwRt!kZz}0@*sbF$u@248^ z-wRX!;aXUHfI?Y+E8xf*-|u&mE>-os>~V6UncKW#iWM7K|MzHikqP1@ z$Fv1GaRY4jj5KSyZb;OLQ~OI9$99UwrDi1bUAnq{Wf-FhiznWHsLKx)^bQmIbZo%9(I#xcTZ$$L9X{5k-d|zK{Y6AY zv-#6as(a8zPPx@xx{0+iS!#nWQ}m=>-wAY)^I&>%Xmz(x;qW$}B=q{dZ8f2U+|Nj5E@2qNao7=J-S9|2*nWCa6P?i0#^+d?FMK zHr$lQ-Y{327==P31M-;ysE@~wTLgV965suFXH$OdOP0g*ehWrDetY@BKV(952uv57 zsHdXDBx|#9f78zl3P}!&8K@%LeQTxgi5XtxI*>jLqNQ`P5j`!hs#=z$M64c4c%7+Q z-gtXtOofw3EY)SwX;X8b3CUD+(8dz}l1TkC2urPI&h~U0y1msG;u40kP;FR9KfA$} z2KT1Pae_FVN40XkI!Y}a2P%6m zaLH{lfMc(Uh2Lm!sebPYdYsLRR^}sRk}SDVe|JkD&VW(#1qC@j&$c^d*!h4R5M7$+ z^(+6QdAR*)b95~A;b^Q$SF#820T3B+sIG;YJf$r?#^-Tc#R+Itr^g1<0v*}iju195 zqaU>_jSkv=ACaN--}!u`FcbLLO5=&54`IO;IwDhWd7EGa>8Fe~BSNJW|v(m_!IXU&Ck!+^ilwL+J zD&BG*i1Sf5Xg-GN^44zh4RNkj4itzVoQC;F^r(|R;24yft>9sTi-rp@Lz z$^-%H4bO7pmViUl6OUnYtWN8A;@sx5*Ix=gkJJ2L<>U##zV-iq555Ok3rlZs?VbYwuExzfn~_)B@pNo6Ak8l+R`2}V)LV9 zj6ijRCJqjA8q7iRbRY&nQsVqO6Y@CWXBpbEnUIlbNvrE=NdxR6QYuh;-IlC2u;M-$KIw!`N8>#knnOJHa6d?he5T?rsANt|7P++&#FvySqbh z5AG1$-QC^cANIL>pM7rabL+37V1{Cbs#)K+y8C(G?!`Oh{jr0uKdKVD8_|?>4BswH zxr-9@m^CJn5G9MdC(0zpyVZ1t|A*XyJh~_`iXA*#0s4Y*mPdHerh{EbW`ZXD1d9vZ z!QBOnRhrYdr)fYo!mK9BVI~w7O>|Hq5W*R{2q3wc%|GRT)N6xx?cWkTNF)abiXo6sPLg{~-^MJbZ(ZsMzhll=ggBW5shvIS#Hz~=uk z(0$Gs!sce%@|Qlj(1ZeTPe4_s4pjC9NmJ=u+x^Bq4^KNST^JPa+o&ZW!G%FUbau+E=a=d; zXAj8t<;Mrq^5dH@tJYy#D)aQc3}XkO=LXdE5J{FQaj~uzttA(UlwyR%O~|2jLI_8I z5MwPWa&WMi-DGW?-7K$qqxJwNFYjK-WC}iMNF54&CS2Ar`h%H_!Izm{<#G29loA@b zgLd?#_sY|paUbH%4b8Jz^?GJ%zEUQ1+p?zgYcuYzs0+rY>Q7;LbS}cwb@Z%-MraIV zs?V)h4P=fLq>2=Y$z@f9ow?=)^(*Z!{{l2dnLJCtKZ<{BStXR|wDt`{KvTW#%!3zz zuo2CbeXD|CS!08#;=YzBQcD9iA9OAdPA9uNs;op=lYO`y@7e{(x9H6&QbvV~gq-@s za>1>oY;1FmK8>c`&b*r?-p0q*;8-g7y~)bEx4f(J@sNYc&(>A4;nKN!+-?JBmOADq z6-}=5`DS-#q81*d_)^JqahhYyRv$vV)wP?3tGGB8MZ+h%|)Qajcg5vW3+ zGTd}uS-&Egt?-QMWeQxgF(7uA{;v|%AE6`+u(p@brV|&_X5is;M16CSNanuLC~Ed_ zeY%js=e}?fDaTAUXrlB_JBfDlAJ7_E(G3J2(tsdbE$vFJhdmiNy8aP-6^Zr%;odWa zPhNTt7VhMw;5Bf||I@)O0H&@pmSCq+ws)U!nIuV+5$PnC94$gG60q!qiyI^F8YW8`rhjw3t9nji68Zu@H#!gC49kZUQB z*MZJ(#nIDLpN49f>grpKCWj47H{MsrQ7B)VAvDOE&DV~+=cM!EH%0rDD&CCc^6<$=^DM8$=@cElvK9A3f364TfR_SqZ;lNR`90lH}0Ijmd6?mJ^KtVna# z5F06oOEWk%uS?~(lDGdS3jBuBzpT6Z!(ia#=>_E`?i@?_W-e{=CXJxQc$D`?-_PEn zfqzUy)%)J={KLC#<2?`r$N`;85QaQ{O$MtzsJq5gj-+0!K8NNDHI>EK%_nQJwBj`u z2_&b9@l-|)*z>80v49&y7*W$8$*(y)=|5=#dJ3@A%aMo+a_XL*1Hd*!;9N-9YStku zF-7P6roi<&=dmRp2FQp6GLhSuSNKijn#sMdhN|JyYr7~nn%b`Ohrm%(qGJ<{P{!fC zV%NjJL_jqmvT(vKb*_l0hH@DON|cAWr##~b@c9IMyEn75%9;J<%s!jR1!si%9UUVf zm0Kvv$k=*3J|Vu;>k-v2pR8c%Ay|(gFO?ml@zbH@4-lS$;SS-)s_y zrs=QwuK+OQZG4L~hdwc}YwV5cG2(A_*vHmlB(mgwP1Zn*6R6qZ!?+F&3M`+I9ZiIp zFjEB4^xnJuPJ%@9TGbwGRL?Ukqs>eC!~E3JzL$amva-Zc4p6&yEPcE=Ls-zo)Il)H zc|%>y0O~>~`=LEh@q3A zI?7>XsneXQ_@~^@6UHpy4W#4!Cxp(?Zia5D7L_z8FY|l>e@g2ofo%39>d6eyExvMukvbb^@3+Y)sDAokm~z zP9S&MOrXk04TTudaYksI;!{w}8dQ*8MWaYsHsZqEqj$~f!ECe zO^5)A2^kT29#LT~p=^(fAk1S26oJ0}rMmS|oHbbwkvifKqDs;$SdKifCoWpL@A=}Xwmf*w9ZnL;ipmNn(Q8}^|aQ@ zq52t6TiJ~v7@%R5wjk&tNJ1qzp|ku2kg=b{6YnqEZ9icl0AteZL_4HiR#0)G5`LkE z0jqfucnPxUI0Osqj-=Tivk}4M*+Ql)IHK@sp~U-U+Fh2hsbOcp!dGgF;*rqQo?{v1 zyc}%&lF&C{K6rDUHR$JqVBMRMCWheGDMZoye2c?fzYZI(O;&Zlzy);g2L>v}w599q zCm1dBn)v6G&6uFR|fUayt!z+fe)puZ0Y*N4I9nMA+>D*ZGnj z>+_)FQP}HM=`u}@k!0-jKMFkU4u6ns(!vMOyUo6x;%E8{_4DxMIpMavo5WusC{vz& zJdhCpHvpIyM28ioq?VRZ)u}QQ17cI==1g${#dR1zEO}x2!YKL>PqMIcpq~oPc~(T~ z^1uLNX%m;^z3is)wY}1GWiYsg)0K{2`oaI>^R{M zA8C5uS1hN0iT0B1v`Z%QjX~G{ID$+TEn%}yC|0zI9OTq(9w>}iwdm%OI($}#LJ~C2 z@F~KoUdwu!If}oVIXeV{wgHgd<0o7~m=$=)62-n19W;o7B2AW(G#>(Dlre^klC9p7 z-FBbrI@073P=cu9s7Nw^55_ZD^I4K*s@2toBlATOyfzo`3PEH;D741~@6N-D)Kq32 zaHD0wk6D(22XxB}C$It~)MX=MGWXB>St(JbiQY2^?;j<}9L0d#^JFBsg3g1{_mm+8 zmEW0+Q+f@v0_&0rr#2P}lmkZLAC)`Zf+fBJzZUTEnlFpTN-7p9r5XuaM-ce1?l8?Y zI5>7b;DrJW*~q+x;)0f5hZ`88jAw|gzQX)od2VQNIAMK^fj+I=UBxy`XtU(wxwkw@ zH?EFTz4IvOo!LnVA&Q;#$NtnlO)lGX ztm9&^Z~nN_)pwkPqId9GwtL2vI(yv1_KM*06h6;jpZ)!x6P7<+7YgQ}1lCjnl)%<* zm#?@${L)4O4^k$jvQ`nV#4 zWfP2PIr`CPlLCohrh*${0%hUd*zaMs+l+Q^VWdclqhlmVaxg>hg-5fjG9 zxJ)nGTh9Uva`aK*#3j)M1)~#!7*#^~EbL(bWs1{Hk<^^7{!^rIQX>skG?hH&vncQ3 z1eVS_X?!54XoDco!4(20_P@-iEks8KAj6iz|GETfTZJsdZL8`gBTkkdTqh3?j|yoD z@nW2?nXn5k70n*>`j!q-&A>mH2u%zaVoRTi32Lfps`$b9>eJM$dP`0j%~YZfo9uC= zNj6@Ovdo__=hOzV%z8si87b(RWG@FFPKpU4?owdOF$U}5<3cA;)USvlJ!^+QmC|3s z?9zl5L`*-drvB!*+a$|u+YC*qSitzR)3-qhLcD%{vV3?RqwSXNILfd$ zUQSbp!6e2TH+>yx0%9Kio0(o~-|Y^bQdX8RyK&ko4aC{GMd~ScGl?)iTCX?`2m;Bx z0RRT#Am1$9tyEXm(-DQrT+&5r(;XaI6$b4ERt>LIRx{MWPVq% zA~Nkjrga#u47qS15nkXm&3j`#&dr@^&~%eX-o%woeT)}W$(2%kDkv#sypHW~}q77!Mw;gA7cVSy?? z+XU=XEkB3@A~aH3CM!BgSLXL}&Nzv2Sy9tO4d|2X>nhyu*prV%WV)iec2j;tGdFabYPHP|QgVrN41-HJ1W zi@=lOkv%C=mCz3CZY-rfKZUUoLen%~*ZxxWIs#u0twMsxAE0# zNw5Z_nWki@WUvF)-@hR93XPkrHzXf7;SlU@#Dn*acXXBzE|Avk)eR4a9ILq_f%}|wtv_J$K-?cuBIW9je`}Xw{=ys<;O04bkILlwQ>1(TcY`uAk zH?e3v8@h?pqg%{)-qLuS5vn(9O%{KfRPwK{b^{cUH-#1_jq0!89a(`X+?-98&n;%W zrsH`WB~^x9HQB0NA z`lvF#K2Vivp*3U7Tvu@*Qg$i)n7~6pFif4C(Z*%e!3VMu=`l+<8p;xVv4R|}pLpNU zMX~WCYcIz0ceTf=X6XPba?pgsR=>_-n@8KWjp-yjMdJCftZd0Fjs+eKvGd-hLcwvt z>#WT2Mo)+|adLBmr@c2z=tZ7-2N@hG3;;Mcm_4l2cP)ffIB#_Wrkb2RTW)2ZDE=C>k)O>&PwS88eM4p<(+;Gd z2slrIay+=5DR!(3Cf0mg!dGG2cUl2``k36pl5g$)NL{}$`f@~mvr&&G_f)}JVoW2S%b}&pBG)YHh_D_0+ zPG(^O#EZBG7`AL`KfT4a!Rd3(I>t|&M z8yI)c9qwU zFTJ_3TDrnI&J@KfP2>%b!R5ZWWq#6n&AYUjSf;xyzZiusw>}(~E8j<*E^-a&9RZ~N zt-<$|UD$Y(7t`#O`{ z>*O=fGYnr<1`|M9&V&&a zG#q|mz!lcW>gTJzKrEbdHK&u)PvJT(XRfkfshuGbsR7p@l~Pe01a>I3@pg_fo;E$qs7p#{&x@<%``HJa%7cL<+3el9r2_+$vt(Cn zrj3f!bc;F~x?u`d2lUTy`+|}!2Ww1Y0^&#!>X*{|-ge!#!NLsWm;zsP!6sU};%O!M zzlf`}M)p0ZH|w%Hx_(@m@y&F60a-bfsCs=#^HHWvza}V8iDW!|X-vxt0OV+b%Q3mm z@l6K7gr>Cb?OInqQzjWz=PF7={QTmzb<}dxwPW0Tc-Uqr(?Xvgn*FWK9HiD&e_L@M z$8QCx^xtki75_D-#_Jkp@09&eZk*@Oi^oetK>GmR<)+rwVK(E1$>pdKMK5_m?WUW@ za^ltC|fH6t_!B`+tc$k{hI*)}8FF4wYo&Y|vv8*&|<6 z6XWhG9%)*S66P(&J8y=5p*9$NA09VII>f)Sj&0C|N2a(nZY$Uo${iKUO>`UW15Esb-SF@c+I%fp%WVfoS)&8=<>&49#JMEg?`DN{U!fVVep z-*xJ!!8umu9;k6<}0O;79GsJ-p<5By3crfg!-cAL=9Pc|JCtWup z11BPboFw^w0QnQO58$>*g8|lL5JcvM)7kytNrhMg0GizQ_Cyj^qC)HwKRxw*z>B$T zz4d^y6>COo-0h84lwv19jxI6eJ?ID=V1kbGEbbOOevz0R5gCh~eY}G)lhp5=t{+bL zAq)dB+5RQn3vy{^dlmk={BbQXdOQ`85Vxnzze6N>k|*!Vj>*&P@+_( zE-oL~eO1*UF~dw2>t|*lCLwI%CE^ zJ*0h_j6dnrhPADZuHfeZD793F1?2><0|X2Ei>FmW2;BCh*y{CRhWH#7e6J_eHye`( zAWz?naBj+9Nx`?+H%Gj%buq~F`LE*03r~10yf35Hwtm?Hbo!`o7^UUH%8wV=#YacW zxkK`orIWPjwwo2X5Bxs=C@b!vfO63X3OiyD)a!!Tt1td`<#*NkyUijc&@E*;@qtaBg&43~oYMnhEl_y9^8$Wj74*~^sXS*c^q{ZuDq-#LKtQ|$(0csv43 zxI$o&AIHLXTyHi3r=p%jec{7mh(7VRPv(_)Ca8PM$7UIumU`OD>6zWwU;6St8naZw z_Yn*T!*JSM-`ft);Dj84^DS9RWrbOq)jmDYI1`#?Jdn%rwbn<`Nhr>+O{H`1Y8SC9miv)3jl{P$wDv&0xPh0xl!Aj)l{Cu6m*L#Q!uqz?O9(Wz zqpPb6?NClg7h91|B``XLF2Ei~V~uhK7oC7lL^(w)6rB7R=vyolxe*X5NMJxDE`$ek zGhZ^DqCAizDPHVpbj}SZQ^gx)N-7=xePn-`p1$Jq`Bg@;&5+N##OmsFhM-(9;b5bQ z2+^&zj#quq_K#$S$IIuI&vc(r-IB+$%S3%}1Tzl**o zQ15#d?bb*9d+`lSt=DNQclU4#90^V)7z0@hQ$l|v&c1ujp*pfYWK*{)Zk+%G0tee%v$&C+bML3uB&^WP`ltExWSVuY@nrIZ zhIL*#9bu1?h=jrtQ8rw!7ek{ECWKd&+MZ%6T9E1759C{2v7}>>Sx9(UOLVhU7a0#S zJaW35oIE9w(mHap9qxLQ2)k@ll{#86BfV}UE?q@@`4t)fCMp`a9W+G8; z$E__l539c{JnpAmhF?>HGz4WQ|7T9dynQV>MdW8u^=GLu|Pnr%C52~G|ix_ zC(O<+mTxsc9gAwk6@*vU&6Zv59X@hO^S29MUB8YOv`*bbv}OR`>4+mb8WBXIBuZ{g z1W~}ru6TKM#b!y@2ie6!rL&YGl?YH2tI%lrN!FMQJ-3)hq!rT$;i?KWnq=$Qlc2C@ zQ_<%YzCY|gMwA?jCeq4po3+xc&M(VeVSG^8XD;)WuXO^qPieus-YD!^jx$U%D&OdD z@vmg1$EnwY$Ah7!HGtg!LbF1XX3*v#9Z3dS4*=L5H36?-P@&JJYe+al&h9BZA}sZn zw)%+D1Q)kZrt}9o^u-Bgn-Z3WsfW|on^#mHBGwx{S~ZtDS}<3%Kuq1BpV8&} zcQcb8gA{_fIn_g zTHhB#ITdmPVAKoJ$L12wk9|z+NtA?$D%UJhvA3UAO89oP;TPiYuqktdx^uQ3KrnCV z{+#~&Fng*SPR_9Etk&U*9hRRLC0S$meFoH&Z7Ihn2M3Nf`-uB1W}WF3-xK$!;bve4LaXYp2VCdzY`kPJaACs)d1B6xI zqf5SUdsRBuW8UV=h*!49#8)>C^S?s+e>|x8{HA7bUDO)M5?lEok9j2SL%E%E=`3h6 ztVL-Ze;BR%=M?Y~i=m-#Uh^$r&F3zr}mryR%=H-~fE!iNc#5z-Vcb*`T zrOI!%7_sK6)Hi+H7$V!l!O~4JjXvD+aQdqd2nc`q-*&1zXx4|{dX5XGtIj@cwp$8?$j6ubUYBpyHg)B#zRzILVkc@~PZ%SR))U>e`N~IrwJ)wS= zcS4IBLJI+nuYkyTbChBa%K&RA4WMzPPf)5hKqD!o=kS{Vpop%>2wqXd1uG5Gr;!HD zLhHbEZDKAZ^itAxaAgCgI-T)g8O>OR6_bmMVPqmB=`o7=x$B5@6iw08G`zlebH>sf z05kRlf!S8k61z)L&-sjy)LU@zVTNUhqsp{Vb?V&a`${S>o}gu1r#sR*=4EYjAiM03zvEBq;hE4a5gSl(M!1KwPPq7s>*3v~LOns+OGWGDvY#l#iz27#k=M)-P+Tyhm|QDm83|`H{0J zP`j2QaCtWtb_~lDY1u*g(KMjQ%;1Q-fJ9ij<)`e5{k}UyK}*WG(+oH6!!BfRbDgeY z&sPL|sDc)FJ47#Dub15-4?|n>7u)K_2`W-hkYs_yeZF8TwYjK2LWpqd(ZpBiBTskf z&ewYV*>b;Ga18(dk^FW1fx>t#wD{~YTIlP7TC{KR9!77zmA_`(Y_&l5&4gfsx(5Cs zM0y<~zCmpv8T&WT00Dy9IC4u{3FAKH?FtN-HFf#!SEL@_2wlt>Yvg0j0B|JP!y>7o)!Mmbwp8^~y@9tVf)NZcbTyXGbqhyxY$34mYm+&Q; zTYq0fnvgLGrwm9bf4Y!$Ln0pQK{FM!QC3l@ogOTXVMc`2U;Y>^Z}oeJ(ZOja6AcAY zf*8#o=h5hMRN+G5!iD&7@lc6$df_Y~S}hTc!6a)O?LxQWN)U3Y;|E44XZdWST0$k} zg~$j($gjn9VB_9OB1rqxJ^Rn`elUWRz846TEFX{SWG#wx&0!>BZpUHzKm_djDdbNh+&M0Gw>JSMr#$Z1tv3fYbZ7X& z{cER1yDJAG+DZQYSF>10rXtmKUMlMhqC#-ui9$y*$MXQCn#dqRUbfcr!K`%XrfR-b zuRbw3f}AY13}>MExV#S_A?wGY63+~C2vm#^MuzN zlp*#ZeGv@KU?X2)m+efJWcO!-?7TeTK-o{<^8H4oF}5id@a82KQ9WAr%*B}3Sbad7=U`w z>Ov*a0Bgm_|JyuPbciwvG9{JIN31+m@=$?H0FnPT0q_Igc&zc>Uo-zf1qcxT*R1;- zx4EO+^X*X)#vdx-vSa_tOS!v=$NkpxT?F>u{4syOj=j=ELDuGZ=TpFEWG|c`Pe%@m zTn^KpT#@g-z7uC2hp(ha?+)KW59EYh;Q18z$sR@p(Zy+mCmb{DOSJ37K$hN zdN)r(Z{SZrw`8TnBvIhyK`;jJ`7t#(AyP;vIz#d6sA(4ZST*0uA^d6 zeF~#|ucC58eN`UyWC-THEP+DUr2IPmF1TP)aE!80*~+XTx8W>_LMC#weR_7ohNuJb zk&%g#!UN>e3?E-}GTWCWcw$2-h~?N#n(V%i;0hp{G^R!4pj4~U8keXFBcuD~eI>D! zs{v(d4qTH_$;yVPzUpc%ISHw%Mk%*lEyEcYY59hyjLuWB{>lniCI%kpT&LI=={|YzJ!of!}X5Zmxw7s zIQ(5Y&5A}p4mDxyalu;S0>0Gxhd#qE;EkKreSDVpx&wnK2F?5VDMu6z`DOHid16su zhNv7PlCDS5QSm#w#{}rBl?VF~rAj^QgI^`$Z`BxQ>v&wgAK8lXg(xP$255u&F3}@s zGu4ODcXp3nRUXRg;0AKSNOE`7mrc@=NiWPL@)lV+QtEWcO=HqRrD_BgK6b$8|WzH>h8Nw$PV2v*9 zsCR9^V3-}F5shnmv-_@F-9{hM%5L?wTEkY%S{GV?t6t>j9lmX2;_+hhD9GrV)}YYI zS)zx>6$D*{07)bcSi*@M{#81b_sf61agv&>uhLT4VX$nv}~w%Mq8naDGa zS;_!xBwvbr3Zk$;dDKG%_Knzihfqs@y;DoM#gmE68w{Pwm*%~1AEjM|l`xA@V_{{w z7}uSZMRwUZ-~U`Lvv^8<+e8}hVPcQ6EL(I>SMJ@Kuc=q@7deWD$%T@N{8xsRo?hI`PVHpjndC(h2hLZv9>#c zzu~{pi4lf{LGf2{KKX*e7U9X zJCT#55Jb~`zS;hk1SDD(rGX~=Rs9k`^P<@9!Ez#a4m)Di4QEesZ`{ijO|BPhdtGta!i`?@ z3$3G{(0)aJ0KVdW7k}mM?ODfJcOG-F9KRiW5d77hjBw&Wgm`ZnOxX&1aVB}^dbSm9 zqEK$&0n>Z@F*AdV5@=?4BZZ*_X23)e%A68Kf`ZC}?=p^dRi+hDKSwNR|P zEO(QXo@8Am11~0~&Rp8}8H1y*gV)NS2tvU(`llIkM%D$hZG$72JJ^%Ihdy`&Jw|7F)$G-z;=hXHsr;uf+o<7 zq(+L^En-$%JQ&g*_n9A$D4gzv{D%v65Ws@1A6}BPsGi23XT|&1Sh!jhO1(PRgj}nc z3;E65$_0K3rPP9?b!Ywu6eY=B8UhJ{X)$H&>t-PNLFvqIt1l#zN+Vn5#A+1->f<1u zQlzPoZkzEt;S3j6B+1*LSuA9B?B1lSbOs=fY=?ei9Gd6rz#eWql|d;rf7H!`X$hDFi3gyi!985v^QeN*PKGc@9DIIf{~#(f)f(Q+J$CdHhd`_u_Ew5kgM` z`V;YfQ!zu^W?-;x*l(43;8I&`&#Ikn!E(7V@-H&4Zf~fu!n+B05Unx*E zvgI1p>jo%8!;dhB6>4HHxf~a^7pY3;BRI_FLay7fU^Ixz^s2PuxCN>+X543>Ss%BEUe6KtGLwo2~Z8Lh35H}vvT|zM$=Q3 z^yqRD37GPzdq!EEOs4&TzRJyp{Z{ zP<`V!is$GJ@b-oTzzt#tSaRwH+#J>NAUacMp>7~LX7034R@!|a7S1s}|BkUQ0vjk8 zqm-0hJFss&yW*ZIBXd$o*)MqSG+0&GCy7gjVl6?;YRsrmgUg)pU1$16=VQPvL6sn< zAxAEzKtY8*4kIE7QC)_AO|0-dARAl&-0i#WjHmL|eTrpuUXm~jJ_{G0?KmD~9PWz8 zSa4FR0io1ajD+K(q6nIpk<90s=}Te9+#@+}SHX3kODXzoQlQCsD8~v*s&fnuf!W(loNaoQL4;X?@}qa<=B93NYeGq0r-?YE#Bkh_lm~>;s>4djZ)Ofcbnv@W$Fn2_{`FNm3g~YhDucyclV8alKFS z>cM&~I58Kgv-)ZX$rg8UQSY?*r{WIR~gNq?{<)4(KeFugl zVu4Mwc0g)ZHVH!^Yz7J@iybvEGnxT)|sO4pM>}CBPrJo-m1}`@P^mK zU~KGf#poB$@tRMs_P68ZGmbu|=n?KL^Dbyd#QvyAZE~vTIHSE1#gk%8b7BGfwD(i3D^ zDBJWi@V#SV=w)%Hi;@lt;t1Qzsfqa26%~8(`V@gRL}9YH`x`_1mjrIQ8BY(n_6z3J zTNibXKL+uVzF#gnKGjK-WqpCv8*Flgo)cs9zul*_oomV3^=|Jf+ztgYgl^*$TD zYv}xT=3&+tZ2OY1xd>dWsLnGyZK;g|@W5Zdz*fTsMHPO7~UbHO0a} z0g~gu%KsVbE`{Ae)x6|O`GTMAZW&yZkwL94L8~KjmDDmE@MRtv*1OM+)!;Fb*lsea zD_&4AVvz_{&P3dBLMio2lOB5ogl`^dbP)2VTnM5h7}aPgbTXPC2+)x)&|~oTQuvp^GF@dn6X*e*bH|^G8|_w-`XMOt51H(_N$f1_bgp1eL=@d z`4Szfizu+M-h9Y#1AGh>LVS6OMsUlbw!6~9RaL_!g;6cQjkawjBVx zFZdW;%(^rq@a+qldR3&}4tBm|H#%YCb`@f9wmz}AMC_Tx;+T$BvOq})vi zXIF67y@(|VF5)57qR3Zo;_-db1!V;#Ho)2-@|n%Dl4_h}4Vo$yH5gY+yLkk!xvi!9 z%FBLQ#C#CLr}MJijEWYITmKg8jnU&1mFG59z0SW$uV8N=`JGU~Ut3_aPx}Skkqg2q z2X9nh?25UvspHthm%jq(-yb+Iy_v|^+BAlxARHjxyQ`_Ez=VeJ;|y@s{unNAIS)go`ackQpf71@Nduw}fL+-hCpaJ=cUBN~+5XO@qM zg({k-`Y}n@^az{T3u}Tc*DvNv{HmmZFmW^98SMKzvujO7Qmnf zndu+F2v&0Wys(A!Hs5WxgJER5Z1*_5#c@G`kB_fc`tK^v$2S8OI65cjMgM}F4Xvd&3%-MBcc+o59AtS9itAhoat_IH?j)xzR&jnwtWK6gTwYfgZ;{UUZ%XQ& zW3K@(iG{J=Ys?-x_&I@U3|dyiQ&&^5C}|sOqvzEIv8S=|!+= z3vHvxh?x!FsL>otbDkYnocmybv4v!0L^yFFh)Ik#BemPtmo9~3*bnxA2q6}ZB z*LWaa%~`^8+TxdDA0eSlKNF9=4(KAK?J3EE+l!?0C)+G)zmq%8#1#o}X+^VwPl1_n z4W)DNF)zG#R~xrteFFGBht}bQS{l_^aJ!f0#L?RG?0vCo>10N2t(P_0bAY~VQH*Ha zR_8Sp4ac=U)Pp>Xwpl^ms{R6~xP!qQWSsM1Hr)* z9OtUb)2f?at}^EJM$<4SJy)y9Pv%W6pN4U6cC}h?u;TwV&`taIo2bnIu5|g=6}L%t zHd3-$y&1vHv}5to|FSvIRzXKF9M(v{@?XY4{e6~O1q;}N<|5Ru&hqMJ;tt1+E-DbM zzdSL@8b2%~YvVl4V9(Ot`*5jq_<6T=qW_pB!Hoe7?LiPqhBbEI{A8ECF3S(@;M%!n9|LGFv| zOFiFdw31CEQGPzz`Io~cAisa++~yCvuA?xt<-R81d51ysbe(W6{&XTD1gxR81wP|= zcI^TUwcyEp+Up5z^Ziar+OMb)gK^E+m(3N(gUK??-(;LJf@3-|eu5LcwnyK&eoV@d zQ>F1aj*O&nm`VWQ#L46x-d8C?1_=nmhzD>&zk1xP(9Beo{JO0l}Gz&~i zwDYV9MJ2|I-&XKQ&qSfwqSVwo*s`UD@ny7c&IP_Jv{_D>nk2%{ghF#oEoYh+b zJ@eC}Py#JanJ6Ux%4KP`N2!*MU(WDTL^zYkZt?m}bC3{7{JEH#PV0&f39m17auEZ2 zS6Utys7VoI>$g1RPAW~8299~3cH|s#9m-==-${D|e%w)cQ!MUQw-f(vE$slnK2bpo zX^#K?JZL_(>jnhZ$eR+ub;7^QfDSd1#8+D-g0B6!T2^O~R62DnwyCn5MF!~|*?IO#fz3G8$#INc8M+*;aV}@cV6ugh#yeyABUk&Oe(v|AU zcy2~HTE7{Nq{24roiv}oj!Di}GdFxC2q>dw-gq~$~-Lp5bG!)4U6;PvTQbW$(F&9KSTyn3zztpZG`9ZvZ@wKilh*b80( z#rnd(o|Sr{H6)?VNqgJ|Ip(cC&aurLUUBheq(8}=%G^G_a90AdU)^fu4XtGGm6d6K&WFlf3(`O`o9HR*92QY&jVTJd#1N2s; zvLmWsXgrN#bqXi1waf`EWJJ{lyD8kidmdq zJ6L1eLjd>m2CrG!#wNPGOdu9N#PwR-FkNUP6Ogo8ixxRV=hH?_`5{g#a&OAo%h>I zx53c5tX|B6TPRo#?`$4Cnfp$-@vQ&_pN=Mt3wqo!sXI`G#Hz9Zw46(2KM*|kg2-Q3 z(sblVS6`3bu>4tcW!2rYC^gzmtfP5?hn|e~i6#RNHN{HJk)%ad)Rcad$6T z+}(;>f#Ob(;x5ICyL)kIk>V2EX>lv=_NMoD+WR@{J7>M`pJe@#EV5?y?Af!gYX((0 zEdx!Iz?ks^qtm|t0gUND6cT||*{&y|QI|CaBLd@zmFq`Y!u-9@@=}R?j0!DoZh-KR zV#tECI6GA%u!?^V$Gwa35?I5zR+}$9HAy86(u9CscGF9;eZo|_HcI|p`}0+d?)$EE z{mEi`Q?6Y+Zov`!=t&@w2CW8=VyK%NASI*=(cD z558tsTYXA~hg7~pG*iPh1{ucYZyxE(yCn>cNZ5Ut=Ji*`cMH_Lubv55+y3W{b z{kYFg?s!CKoMrLSY9(RyUANe>{pHnJ>r~w;T(!!aXr^W!oHe_2-yvH*|Jv2_*CL`B zToo*hlLh55DTCtyz2n@$TH4d?tyZy!?@iOxj$cg?um4HQMSt6~^IiF|;HT9`_?_3f zi9_7Bf1_v``{bCHi*32#-}sY(3sLeFc$|cBcxz-(@1FfUo4EJr(CI%~Ki~e}L+{@> ztkMTQXKoHPp36WZv`P`!BY`({95I$-t4xYtnI4UNxH$-GnWVKt7RE9@?k?9Y*>N1; zcZtVEPBHG8V>xPPx)pSnY{*V6Wo7QvlD$QfrNOc}uHXAoav{u+*Olhz*HPh7X6D7n z)OdkqmM7u71^z-XznQEGZXR?)Od*2*WwQ0bgr?agpN|Ja4jzzyXm*O4N@Yy2fKj+o zogDY;8#rET>nBW8)3>;!IZO}u(fxaCSx-$D%g!RvBZ6oTi!ggQ)y7qo;-YdH)-@l( zKHvn_Ay%<#v{k=}Xl8bqZ(p!W6tluo-upUGcX2VLR802luT+#st0W~=C~(|PRAU{O zrI3Savd^N1V<0VKNL}?AfykGUIicd6KG!NI-uv001kYU!) z%`CqTZ<{`&@q3+bj=BM=VB3VkheY>V0Xt6{(@lM)@3s^D z(^kEnjYrS_Ujx%HU|2-K`_g&w8&i%bUmn@_D~ebBiPmNl1gN2Al#7bRirH^j95aQT zQ70^W+uoXC>|gT?9M&g)0fc|~5>wD49Rm@@T=3&xaRcxMQtYFD>)WI^4%w%+PH=O- z9$2jJX(UqbfnSJ8dbDDlf6@KZ#Ep5UEVV)6x^wKLEB~vLe zdQZN$UHrj&x7r<4VV|Tb7~T=6cBw1;>6#~^izk}wu#cuDF6YHp50riQ>((>8UDY2M z88jvWHgHvGAh@A6;-I169TT{U?#ilegS{31Q{I|!ggL@ps$)-6uN5+XJ|O|U15R%H zzYw7vAPans*{J-t^eoFDB2P7Sos3-uNzg)R!u)-NSRRq}-cjWLj1P`kpeKPr&%m2m zMD2hSQQ$M0iILhQ8tE!aoP^N15=nuw1BrrWrQGp8?vMwmG~Mz3_+`a>No!@9SJzAr z66bXFYST?#l?{VG%B?>L7n7Gt+d`HmNo~hX(|C9=J3E4rZwP(*mwkc2aVO86D?d{m z+I#p{E{D4+>#04~&dyYH8H#OFG>LwCaK2pvb48=- z=njnKTfysPSc{WSZd*WaFD(U@sM&3&4$0rU`er>Aw`i>P>Hc(%Y=-0!EQFzwb-;vK zk;Y9TPwXsYKXP6MM8R^rCtd_#4viSrQbhKp)WnEix|c`E3m)ghcgMlgrN!i}S@F|2 zPv(N_TQf%+nt0@oG&A^GD+bw*4r8XyX^mdwys}+>rKdu+#_m!VK)PQYbg^TJLob}9 z+nMZET4=D~u%%N<^(oS4`hGyFV|s073E}SZHR#%5o>J@Jmtk7@PAvbkaA>tW-Btwc z{#9O;7kRAVMc{YAYQDBtOImbyt42C4^Uc0Xco$nkH0Q4w93>I;-&#=iNNBxkM9>My zP!4eO>S+_n$nqQ&-*aEP$!hpQEE(HQyK+6?4sp-AJ--O?e)LaR2`1`e*849q(meRP zgzzi`E*Sh_Crw45`eI1R(@DTZXLS`%ySvYw-X4p{#rCF;#KFwMER-q!TRQ&onSgTT zZhzVq#8bg(XqecRf|aTaBow}KAPn`5eV;>HW}uSM*w3E{=~`%d5vt1KWi*nadz_No zVp2ZsrFC3gze=MIKVf zV9=w3r18gbt%0c%M=nz&?!-#fH^~Sv4SQ~Sd%rgHz~W4+t`G|2Pl|1sx@IcYDm;q| zT|dQ3xvqz3U0x6E6R3v7T86ak`3qi}dHtB)I-KT_lxiUw?+_T0$ z6L9P>Dh%9RB>MYO9>u3wdGFX^&R>p;JYY;HbG)}hg#>lo4FSr$tWL@h$X=btujkV! zD{n|DtMLwfN3`M}=do6~Qkg8AHn%Hfxze}Q`8h+H#4H)a7c?Yrt8B-Zrk)bfzy4Il zDOc=KoUs_B!O~H&HoXKYw}C&X{-#-r;HQDB;5!ru5hPxXqpk z@}gQUC63vPM8l-MZ`H5y^I@Sv!bM*)NsdNryTMJ1xXId5b<^<0Tnvma`vAP6sIp0> zx$HFCxz$}(`JRp%9rClhOFgt~x~_E-S(fYIl41+Nh(K$m_Se1mn73Y+Mlqze*U10) z3yyK9`u&4b1A6m6@OG5n-QnQj?)8v1S0K`I9K9Fc`0-K3Ys2^1{qCpx)A+?I&vM%r z9lQDe=xZdQK&O`E`2H{U={vw#j6cEnTY63rK940(-Cgix%aC~;1kDKuj$8hH?4OqE z-$0hr{2j<DTZD~F&!yi?)G#D^s1pd*71mJ z=HxoYZ^tjI+O-T~7Ur_?{ADW-xqhH!_KPws2?%_EGI8d|12v^W1NMy<$X3_f#)yl1 z6SlRc*f%LeU(6wGEJA!!K~6NtO)GS;rZ_kStUlR4Vh>3Q0P9)gFN4{H|B> zcYEVX@DtPIkxE@11mxrW6zo_)Oq3E0v;3;c!;7zZOANdwr}Z3+oPaQI;A( zJXN40@3pumlRNplN|D=@i%#j;Px-Gqj-#&&{qKIR6^r;E9~V5XJ=p6T9H#xnD@SLB z-@N44W&QPEULpmT_4q4gE*;2k>qO~$9xgqlN4DHbKwbBjy8I1|e=xKAeIEE((J6zm zp{D`F6Au!u{?gwE`QT|FJHVDj--gV?Z!5Bq&+lAaW&=@GDc0W)y^Cb(;G2J5_w6t; zx6l~1B@jtB)21_4%3{-t9^Voi57>yLOAEzyUQgEHcekfq&z-bNpfT&;yMQkZRWLW) zbKwEnEJ9Z{zS>Ymiz~{sK&nF<&9LUwfsEB3?pRSk!1!m*zRWghl!!8vJ6o2gT*KlH zPb);DkYA3nMFlUPxB(9Qd{ju6f{l<=&!t4WQjJpFQ;h|4vwFA>Q=9bZpLCz(laHvC zk`Kx%-6cCtlMY#q9XJCRG0a=){(>6ws;I2`uktJ|!(_q!dk zwJO`Q_?K!z8}<5X4Rk3kf$Vav^W-Y~otm+E^v<(K2Okg1rw<@w4$MzFb_VU=J6DXR zzaLk0-(i~j% z$nxo-W6$u}$cpE-sP_JEV1eKs1jZCh~=0>VLN|Nl}~feemYh&RWtw5l?tb%6{uif}&aM<)AEfzIdmyMtf9ZfSRBNnPQS zmSh1iJ8Fc6<6a(HC{qLqt6#K;B%}V&3Vj07exjreEKnX_1U}-|e6vN>R_^k>F!leu zo;citYhtW$C~ZE#=)1pb>Vodzw<* zGd(0TO+*Yl+l>n^$-dKbFM7Ku(>A3@2)&=cAJt?^U0M`9;I${^n;tdMLRHNQwOniZ zW|??+vp3IuK4TwI7RMhC^}Y66rlyMrq^9MrABU>r=&S&uQ*r|JVt0FclKb(c(xLra z=4xC4?qEB0q*sf(O@s4OF<&0QD&cja8O{Q_^^(1a0yrm%tSY%F?KTBJNKcd(WuxtR zxM+?zb+X>>gp5eGk8*5W^s3Pg&T=A8SUcBXxMl&2BoGD>uj)c?vtDm_`ZdKGz5Md+ zd>-@6;T!wtg7K6!!+TL^N^?Hcb^p@XtQ9^|q*|#xeDy8dy@NR|?~HTiMVYEeezm=9ewkJf zs$j}ci_bC<6N$(y$C7qwOUJb;ZPpO>`C^qPGgH_Xs z55A-NnlYiCs#%6ffI%#<7+hfdk~8TLI+ql*hm^veHvp_U?95wrgD%b@1q-4qy2u#2 zgT?{;r}qi_g_9$E??AuOPy>95MCq3HLSvts8tNZGfJs<;GL6t6RFx50v zDk8>7PyLx@uRtXj0|0eScnADgG11h6>6mWdgXL49G4<`&^F9+?M`hkzR*B$UBhksmY9iUF?@48^9~Q++>sK$%=UVECwk0f(yl3H|8@{IM9y*Xa^Px7C_&^Uo>JB zj0*0b$atM8n$v}a^KRz5nD>_?A6OpTJnpg9zMXnW9$&TKlGNXmdGVm+7i>Y*o$tB0 z`oyMFv|YNiCgZ+#ZQObdtdn)U0|vX^FCyqeNJJUQzKq*`su`|09sk@s9V2HVnb85< z%sW)iHExed*+&=3!yvmKuTR_W6xE1GOxQ?T1sIE}yq~RGF?np&t#;f0(wOy<7g2}4 z<~4kkFg@9Zq4RF%NG0^@8u3e&{v_QGT5E&&)g2X)mU6g+c8YF<@icTx!`Xz@Zz_MW zY1St3$yNH$OqFTd;1uZ|KT%i;!NHVw2{}-=VOkRPF0T1N z=zT->Wz&Hx#ix=prbSEmC0A0tIrFHIthJWwJpHkaYp?RLo}Z%i@zw5YD;Ye3S`(vk zhdrgKK16+5xbz3cj%Ab7T_+r8f2iC?=XiYF&T}Mmd$1Qv>LGWxzxd7jRR=U@Ab8JD zgBPo7E&l&k&EEp?^TYRxVzg(syJfVhv_2%lyBi+=RN=c>Eu)i6GHA88m32RmTr-LvN2GuTL_DHASUQhd=mm!FD3kc1ws#MuNlD56J7iwt_DBg2IfW zol>9IzxutSxt~xO^^00-%I8@KS!OmoiL$2Th-VKx@d#IHRZs7R`--<~0n7v0fjmJn z_2l;`tSDxq#1T>x@pVhD6BWu=;OY0{t!aZ^mrj1IU?&64F#-(f!EZvVlUHbikmtjq zUZtmaehb}AHCOI>y^x(3t(;f1j$o$%_AbJTHVF3LAV`Y^JJF2rZKNMi&SOx>@3Eju z0^&uJz~Lv3lUXpG$}lSOX=>|pPa_LKaHijTg-ohWqb`YHK7ODpS2uG4+P`EC#%KK) zaQK}#&UE3>7_Ci|jF5r7bW#_PJnBKq`>}7LfFV`JmZAHNH?ZsXk6%KwdYH zaNu!HMlgGH_ZofU=>I|v&!aLBpBf%HV zhJ;^h%$Dtz+UKI6G%lD&iMmopI`J;2TR(ljo#IKiZR+N211`alU@)&?TKy-8Kc2E@@vv>o2Ou zKPw9Kcy>#8*V~J-+)M8b=lTK(pzA&Ys0)D$PBQS=tn$L?<1bLzS$I!Gh+|8)ipO^F zMr5ci&DNZ*eEj3K%6+_$*NXAr=>f+cUcZX$lVd`ah=w=du*!j(+MiZ)!_yk5J;3YCPZwyh~_^kbOV z7Ec;dWs+p~q#T90)5XcSC}WF8;>b62G0Ov*sqv|*JJ$>NoU6KtU7{{ivdj)KcxU=I zrHr)5!b*nkE~jis@3JS+tX{AlrtK)^pK}5#R2Jo*y_`}SL4xl{h$_m-W9K&_Dj|xs zoUr5TA=?YYj^l!$R$KsMStBVDpsJ%5tRYN`NNbDK9tR!E^HH_KUd;%!w&OI<&UO_yPJ?Dpd8jmB=3UkVAa7^A4`6xBS z0+zk>u4}RfcMFjv9~fnis5FeCC;cO?Hg>k_WYeA>#PGcR{6yB){1El&(376^+_tqUF_a-{I@x>&F}eTI==nC zr9vyO`}+BcYR}dGq?CccYtK~ChyU?p60J}xgR0eKbb0D8W&3X$VDqovRBX<`Ny!ST zP2M&5~sPnr?P3q^0`<%hOeQh8%khRw~(oe#Oy32R&!MKTA_m= z27n5z0mU?R?8t&a|1whm4WX#u(R4toHL!EHTX?-WPx?6-{U#=Ox$zp<(S_vmf6-OaR5v1VUIePz6m~raSolGf+bQgVn9Ojx#sk;66 zg^pvZ*`9R~naGkH7CX%7cH+gwVDVJT5k?$hASVYad8!Pc2Rq}e?5$gxy)9;KL#`<5 z`zy#S#aH-sqF$F@jI5Xh@wjGE=~1*QNM2SjtuAI*XqN36FqONaP0h(V_srn5jc&xs ziwA-_n)gUvKj6GDWTIXgB&Q)f!eWpRjZET{>?+f~C*CjIaA#6wMcwT=07ppmo<1ye zTzuPmw@5uYxtB?e_c_^^5|9d)7LxQHuI^PMHra=M!RY8|;fEox9`*)ojqE}K4Oq%s zU`Yi*CciO-Ns6v~l$~AUgWigCC6~%Fgw4AvFg)1dMeqGTu9GL@>x!y+lSXekIQkXj=<=huo~KoJe036SG zyVr?{>}mnwwJgWH-_lRK#H8~3eAH*mq6rlsuuDIck`&4?j4>a>#80<$-^n%2zh`p* zztV`RwG6|-3b-iI#4R;+CY+OMijN^9DKPS1M>XWskv~YtWUla^iQ)Vt5zwQPtu{; z%MnkLjsAs1-X`&s69Z8iyGto&h8Y#s!hn@fng(n9YAx0V zhhL2K`{dlHOZ5(`ljqU=l~<8zozBs}XpG3XsSi>Y?|#{K4dzwR%jn`UH8L;MFV}1H z`s+hw!DAnPZ-xnMY%e&;wR{E=Z@4MXFsLHM(*O#(*uH*t*xTrqAk$094sC<(=XFhM z`YXgkOJ=(^RMR{+QF!S6n|C)`h@TbLZU&Bvn|z`CVfB*U>+{Xc9JE^!iXXu+_!vfN z^FJ$_6*|xGvFXP8&MqoztE$Q4F1bTY;tAjm&B;!=2hOnG{7j+8|(?SSc-i z;tH)nIE1wLg*p`29PD!!!0>I7I(Q%fC!F(%@cQRH#v|bCQJ{m%tWoGqIma`*8bc1k z%#VA%^+EJ?2?7swcKIZuFS`XjlPFQ(g94hX46{5Pz}FX`UAK%sj!7G zG@uBI2T~ocRxpYIvQo|Wu3F8Hw*$6K@}}(M(f!U1ns!Lg^aY2*o&hpgMbp9yph+mE zv|y&=j@Xtz6Jot9j! zvBWuEDL0~lS^$#}ZQfSTiWRFJMjc|;2$}ui$3N`=-Jn3sk1{d?dk4hDvg7)ESWvsp zSSgt1zZs)U&TTWjm{|!9uX8iF+S(2=tk(-r*v9g~1W0+0+(pA;)i&uE_zczBX0wFR z9lHm-9+K*n=*2jH8@QVRJxUw1ppxw8(W9%VSI=pi2Y`VWRSBGJuwTTz6%hSytI1i; zO!J~d`9OfWAH&-T%`tQoO}^oCrm4L#C9$@RLKzDMWam^cLi6%7*jc1+jN{QK%(b}E zw~C-JoRCcdjh;w1LEi(t8xIGpYbu4yX52}$mNDyu+*(xe*2e*uL#U?8-8k=H_DIR3 zG0jfrsLX&ZiA5(bd()j25yf;C@IiO@^

    sLB@q20?-FnUXQa8eC&V0|8-S!>1$CwUOU&J7R_*7YO{<|Z&qSCn@sj=ai(9r$g+)9?0c z%fq@oXLZ!6zF*Xi0U!#cKfzb(_#zhdyu5OR;^Ms%3sJNMOFwh+ZDCWqR}eB*F&&;0}yS!0zp6#WJ|<;VvH zp}O0PZL=_fb-b`4Y`Z81S^4%gR6w|;Zr5b+Av0JyT|6ynTi?c#%(+s@69TwP!%DoG z`}|CD-%hh=>e;cIuxOUNk169`)JQ@BWW_~g3b)LaBM*D9mUXc1ha1VSkN}CN_j1lt zN{Lo%eda5&DnXVMrOn77_Q*shJ31lgG8gx>yKeuK?2u5K$b zxZ_y?MEM2F5^x^3Kzqz(oiE%v#&T(S_IxH=t6Pe^OMPNqD!1TUByO@QIbkrHpss<$ zEbBYg-egm+$+8vc!INBqEzW4?M^nOr_;}etFL{7f=(UC&MO6t1ro6^c*3OTnaGCpp z8i!J1h4^|_JLV)ao51Wz*LRJ8WXyWGeRjVQ(i$nJt zTklslzL)mTP;B2ugLY1C$B{-}di-Cu`RrEz7K|Fu|9>!623iuilk%FgUH#Pgp%kX7 zWG(#Sl1w;Vv3>D2tV%{0?O#XRAGPhDX00mnw->-QejyS1XAL|YGM0h5`d5Y`EhC=qY&P#cSilY`E`09BK~1(H`;Zm|{IW|aJOGjCyxQ&+)J67o44&w(T| zR#rjtM(8~zbXIRCUO3Z5qDc}26Z+;52B=PFeCLObfQ243P)U8XNpta} zwpgD^3_+dp0fjQ(zEyTpnoN5mhC4#6MX_f%Akzb)H6krFM+})Py@cY8snGbnckx>-eB*Q)RDMnAm^sH!OI1Hhx{vT47fPtsMD-Hn>v zY2j6kd0#By?8)H#E~1qR=Gu=;ZBcLh71q4Y&i6*I(Pjl>NB zhBel|`(LMBl&#&@50kigK9W6ocs&r2DWIiMwF<5*Fa1ZZ{wY?_cQ;r4(^P5LD_KCB zDlSbh)h=^No~STyc5H`{tpDXCzQNE?-NaX~u@NNMG#GK%ILZ-GayjCg?_XAtw%3h2 z*>5hm5%`dzj42P5C1)|d!bq>%>sDmtU?&VqRC3&a!ue;)5|(Mfl&46}{SEF`C-P~g zpA^99G@-XaDg!kEqR199LdFq=qNu8ruB{oRbH_!7Xu-g?pMoxjMm_2~U+VfA0{D|K zq>o{1Uwj&34P@Ued4XyTI_7K>W$b=yuwD=?P6Egv_GWQL55RrlxV{Ef>F?zQDPn8_ z+>_ql}7>kqAKHI@w zCV@*C4Z=>K8EURKV}fhg%mO8;gZUd`G7b0NBZB~H`i}|^ea6{b{6p!bxSNe~f(F!I z-o1%VIV*z;WAv=n1QzRBvD+6NvY%=d82H=JB>ELTo{P)a*dgs!Fq@icn#@|*@d_(KNi<@yTOTaM0cdIw z#hs;g8B#m-tdWt!+4H=W%y)fTTVWj%F;G#jY?0T&nO!qpHX8Y7ca&W@kdHAS#lx{B%W>*MiaHG9!&10H5f0`LVcmDib&iIR=Fc*wQBzJM;t6)o_@aA=+JZ$4*J0 zCs1z>T5i1a`}C9W>-e-V@kuk!2nEtR3xD%bsGh9^_Zhmbx9q3|YDCNHcDrj+YG-&{ z{XTo&)tIq*dV8_y@8#cW)FsgJN8*E~h4ImEwV_e&@a3P(Saw1@x$m*OAsWoCymokn z=6`drXPYhfqhhpa`RRk`f26{{VuL;PZ^ycxH5W-FPO|~d30osqW&7CTTs3bjS?KNg z?u-b$VKL#7)y!pH27yq%1?*F?*YBnw_cbtF&s1M9UG9rrXd?9QmG!5555(d0 zHAAIr4EMo2d8K9S`b|EBA((kHZ-je;PRBD$GW|sfCLp&3r`|Z(hmM=nX8%TdV@sSF zpSEe!6HLj$U-Tvw6tk^35OC2d)z<822PBuH1^&=(CT}mfgke$_N zNug3V2P{nv?4^eaTuZt-ijY9pz#Jal0+SXp15X)?rU~-78jCj1(c3;szIE?BT*m^g zDBmIH%7b86HH!*k<@tuP#wUnb%&@M=%p%?b9beqc1g(;DDQrZU4H^sHXp?mBo>Am#V)Ov9t)?8;1W|lDQQz?n3!{||{8Y8+II664ENyjcmx?T*n3!m=H zc|#G!xcq&HtdLA}Wz0sY^iXyMqqkH55!3HJ2_!a%!h6 z-r(NHS>T)XoawikJ)6AgVC=|0LSt~GqH{dydYSS|$H$$yqs<#CM>&?iW_|c@H`3*= zp?gk1z?UYRl+++zjTOk6 z8-GZ#oc5|wgK||S_|i9rqC_kMpq(s1QnhJ*n`HM;j<}b;@1;p48J{4i>o-| zo{`)Gu~6`Cn2jmoDc&w540L~Mrf?fe_#xd0$o$~}?50!>Tu7Chh?n(=I1875-EQ<+ z3P937lB-jM&K&6bX@1$bBEEQxW`rq3V_<*q@C~_{s>}6s8z^YYncp2mNe4?{Y(?xHXD%mnG?Mo z16s~=k@DjG?payByn%^VbR!N!%0M^r1~2XNwiQ?Cu1*c^(CaDDIOFc=E>1J4doizf zb{>AQ3aru8u; z(xqHv#aAYQiX7WC^_tiCa@8B1(bE)E$yco=WPRxh`73X#xfgO47ngk)aOLV)DFQo9 zIl}4))Mwebfds=~_n{`n&g^6*IVKak?!$Lu_yxyuw66C=hb(nqwqw{KTftAhf+qat ztZI|jISNDUGwI&kBpf^<#?!OJbz~p=rg8@2B#b(n6dsl1`Q>s7;lQd7?GX1Ug@W*~ z(w61IP;R}DiiVGHG}f8(29uS8*M(ol5hO-ZWCp^NxLvH1puI`vMVX<^^`*UM*Q}wY z=|$oI6rE*&mNcz#^nSl-!$)j<;me~7X|#4~-^({F)j#UT{SO8&o_N;$QJ5B!{`Skp zaLiS|=Q7RQex`r*BYmg=?)<)|XU~E3S=Z}c{5$huaQ44q@qbUJb@+egia^}2%+U07 z9L1h^>7If_uPjpLcj9vo*Y=zcdMoq9QcqPvMHUu&$ORw^)ZMN~_{BF)2KY!i##-Y1 zG=vV*hTiyV&8gWsB_X5-qqGc-4cc=d5%8RrX?M*8KN!zHus1Wy0+vVCZ)VLd^L78FK$%s zLdAYsg}sTeldE9)!m%8j{PDq3ZO8=O=1c|e4&By%=UCR>)*^cBG0(71r61DQhuAKG zxcSU%@U*$rHJKs;q52K51UMf1OGYwi?OQOmZjU5;8A?y)UI&k3(hU~5@Bi&up^$vc zyNdWX#JacW9S!{aJ#KpTPOE*{p~QK4=7awq(Et0`Ki=eZFZ%VaM17@3p_JcJr#LVZ z-MwWDantnb^Ma)z$@3Zue^mCdtFaDw2pCfcg0GE?dhZY`ApaRo#H_(>P$~I@htS7ZTt^cz8$%}m zHseG#T(;oj9TM+C*8rgeJc$8)CGNmmJyy98ClDp(0S38zmeON^*jQPh!fnjT1Xa3M z&H-MEF0aOScINvQLZ-}e%3WGp6q&CZw{OZnTf!8+1k(hm6PzS04QW!lzu}k}dehUz zAx2itHR3Eq#@By8p?6ufyWJf$lVP-qexvfF;-HWKQDWN4#*yEgP{44<&- z)EtU#B42+(up}r08eu)uAR~RT2B@eK4_7iNh8`X`1K&M~=oqFj zY?_>a%dl1{Y|hbdW#3@ea2hT5h6mPhkmYl9Ho|`Sh{TI!t9YHaPrY~;&eS7e2zg3u zIR1#D1r-urN^9WKRPpE6xP6&(vIo5nBc-3Jv@dqGXmnbRLvTFa;y7C5rKtNU*~8g` z;6nD6jBfHTKA~0ckxhn=7t=o7J?@D)OYnTns)_s)5%zGuZFrS65ix(8RwXLi%&Q-0&kA>ov};+PW34LLmzw_r6aO&P-+-p^j}4DH8v)wtIrjV>anYwp zvC7c#H&s}x|6$Fnb6 zC5h(9IKI(K^Xs3|M?v5yeNB1Ot_R|+N5+xz8=A6Hetu! zFG4)sRj@+9)n99&8_BH$-R6eB$Y`O0G>|6WLIVkDgrij1ChThhzezh9&C|U3DIdqi zx&K3&XEqj|r7=WhO35)sw21qu(L)+X%SxrMV8MiMdObnP{3EAWiU%&Rw`Rvo6izSV zgD?>X8F;EW?aMgxyd`@AmwIL0gkiWZSOkc93ZJce`0;BANuvoN454Hl*`fj=k%9Th z3N)lmg!6k2THH{w6j z3IjRrNH4uRruPsj-&B9K087d}I1o$8T@`Fkt?um$RKV9V7kBfQ)lU5w7SL*QDr+(8 zNy<}}1Lh#*$XuTVWTSAJtTiCOToLA=UFlh^Y()rHNQ&bVIK zzTK4bX-N&Td{poQ*EF$Bfyg0QBWvD;!w`Swv)19`U5S;?m)e-V?r&Qp%NnRlzv0ej z)r%A#o@>zHjpE_;8&q`5Re=4T4BqM-?Jy7=vO&XsnHX^-OV;SF2LW%I*%5{a_Ex3=!>vX|kt`?^CpEPZPR3 zHUEnZqZ?5_`L`&jF}U;f_qWd@;?uMSU()IKmD#$&^TqT!Yj+Vxqj`W8TUo}>QqLrW zV1XxoaN8%$hrpndSkz5~IAAv$sRCAoJn=4`W4`^n@K`ow(HfLf3U81A7#@JvYCr+F zm=&;-qF9otUQ!au2?V%1gT%MFn z+B~HDs@$oDO3qF1Xt%8$WCe$j--N>v$w>!dN=$!-L1dE>9rF;3(rp9mzbyB8+@0II z&~8Fimf;xHex5YJZhSbt_U^6HuA%mjInt&VChvXYmM%^RjFHgqRr|Fi_mIzDG?nyW zi&jU#JOzWiA&Qa1t6HTw+)ZxQ1fG$y#**8*V5J`L3bxP}b`|IHNf?uuYLsA)=1 z8sqO>+Z^M+-%Aqse2aU~a@HuQZ6Ef|9bUY+Ygg~jiWaXxVMYDWuq0&sbvfqg(0vAo zVLb(td>a1=lD}9V5h)p~tu~ zuBZf9B9g+}XMn$MEjhE4f>Pd~@q~U-(*Sy!MFWq1N2Gd0zoa}{Ctz3ws@pM3GhH`= znqooqmd_NAgT4;32ajuu3VTy)K9s(@m1wKN-}qJka)@)uqnEzQxjX-(m-4pM1Z1p$NUWAb(>tf;wEfJjl z9cknk?}dTlYdtfby>7(Uw(+7xa2r1m^PZ7wtY}ij6JfQw@fGsQ-8@9neUhXRY;$SO zf^ODPbiqjc6`AqaLVcFx!EkYtpLN&OoRh7k{Ls-HquA4XH5%d4UKG*QZ>Ms_-n?9x z6g-na#{z%ktdJdvHV~r&5%oF_B;{nh5bUK_*%Tu*sFH0>)qMzdOaVvKOwuYSV=*-< zg6vF?uXcp2M9t}i*23`+aAImL)79grSod5hI)Gu(5OFglWhdW zyg`Di&Rd_U?kOaLN7N;wM(@y4DSTv7RD4sihPkBcLSCeecbU5YR<#2ae#8J2?dCt# zVx-Y>7xhof7PgqxW8bR!&>uG#Yz5;Ae=go$n`y7BRyWHd^<;zjM2~wZyN36XRAjh$ zJaP8GV;3rTtl&UZh861TV9oUqt@VXtKfcFbL~Dt^^)v5(525_HtO@#q)=;4>_jiBB zH9M*DSN+IB{JaU(&8qJmUR6Ij-mN}g4qkXUOUyZ2|HFL$zW3h~dyZ+pSxgFDYTIuX zqef3I(Y=KTz3lYtXb%!B7j2$PO5z}Y#z1Ra_ zy1TTS01$gZ-yE1sf)89gN~`;EqKXAOPciW^n97WJvXCv_SNZ~Y#62zL9(2e`{E$M# zQ5PdWNJvQpN=#z1$hDK4$1!`AQ%u)v^$|WeEeyxGW%fbegj)S)%l5Z;X5}}L6%2^3 zi<}S+PO4SFlRP$yS*+=8mwKjf8WjpT1fgikXV$ z^9V#S?~Jdjn|pyMRTEuTz&It71H%o=`Vw*IfTCMcR0cU*x93_8RoNrB!472BSkh)@ z&i4j{N1c}zM^`$d96}kuyh->?p|aHO%gwx~!!38(K$GP3nFu=|o{9e9mkZd2zc*Y; zenH=*6z(n8)GUoHjf{l%#}B!0nr9Gstp`D}7X5P2)PjwBsGsz?odse}5grahk- zx3Sh)X=tlvQ)e(lUrkC>HhxIehP_}+hHeI@7;LED{9;KN&Vkv$x9GI=L;bQrNMFp?V7I)7{Ol^cAGfA^$&5#Z-U)Ur)uv{nq+cU;d5i-z}jpv?V-3 zAXA9-y~^CH8g?Cj{GX4fLW6zfAG>JAbk@M{bIkfc#nd*aQbLWbgELlrP~l6r<>5VT zMn!k+Wpse+7(DjG0w7Bgv!MGH2=kc9oC3h866+#Jsr$AG4Dx}^et9nn3sP0zRH_kb z*qJ8++Q6b?vD7M&1LDUTFHwq^$3D z+qJm?fEJv7%6{}YEME{}J?`%zSHG2a4bn`y{|3slyRv z?a8Tx5Vwo|YjR7~jgjP>$ZaSYAka5$(&N z;JqVd?ZE#oyVekF`O|jjV_HSi^0?GDRqYKOx@hpPp%uSM3b{h1hy<~UeHtR%h2rSk z5%G6uyC{SX&EE71q4@(Z8u(V=KS6$?`zMM5`#|5mu6=|^?M3!Ojl!XaOpCmPI!)6? z+#lY=Qh793!O-C3SP_WfJkJXtHq*b^$UA|X7*7n5C>oixsq(zcoW_n)A&EqT#o?qd zVm0rEjMLoa{2$WZGAgdMNdv{*-CY_9?(Py?g1ZE_;O_1Of;$BFU_pbsyGw9)ZMdCp z=A3V4-7|A%t^21}ckkW(h`^A-*mn{bU05pFc?lGCxQkf76lWycEdt#qL;L~XIbFyEGP?t}F_u6NK z!!B}7RW<=nYD&QYkhBx31%5pm^?!BrnxwRukiI=~_%1Dcy8s&K=H6$j)%;7N>;?ao zD>r|S1F6V=jI8~{=1K3oEMf@ zVgjfjK8P(EbrFu>QEd9W!Xdt6zh9KYUMftSt}O;GNbd+3Rtd3^=`PP5ZUkV(-E)1s zo*yD6c$^Wt6K!IrJ^g>du#tZ&lomsqzl!_V*b+h#8S4cmY5g?@P)`ymczCDA=O(Z? zBJe{&*ooAHNa{xknB+(fJ59w9sNW~&(Pm)63$N`h z&43eEj&X|K4ety-X^XTn23;GfKi(<|L%uxoSnr291B0CTl_MP?rjS|jmW!{>{wy1R zYl`vK0bJb`RzhmMU~{@xaG|!}`*a7eO2w1?GZ*t7@cM}WeOD$uFRBj|3JWYT@KIuk~wK1AKwRPFstnpNx4kInr~*YGyBR_F}l&z>E2qa zNW)-l(=%9X<&0{^dR%-yDm@zQ>ad*G1sY{F$;0x9_?*6`es(n2T!X5F5{OcfeFoDr zsSt*@;$&RGMieW_)S0#YH|CEM!Mv=NC{KU{Kn#E>vijgURWidtZ8YbUz84?Y6e~_d zN)m6K)9h2d{_@1)aX&NxZN;epD5>`l)*4Z-6@Kj|m@#kaz> zo?Ike06)CK_$al1A)1~dRN)vv>!1|`C-b3&r{OS>_riP0s`#)`LWxS+>3i{G_H`@V z)1krSXi#^<8+?0kp?`yAA*Vb@xfWX8rRIWQF~P7Z?PD(Q1uqB7;|R_A^$R-ncEO`n z_`ozj72I-4I{T4(d{}}KNybjU{1!o167x4Eq?i;@$C$HTw>k3o+o8EDc0cZS__l^#o>%RjnISS}mFM z>(HISew2Q*sSuq9Q&6kE*&JSIWX`BcevKV(!SM@M68L)IqVkP~GuaNnpPu~<24V?< zrXaYpIVe)yO#fLbk@!9^2JnV&nD)7@zd;~OutH&e%6_PJBB@aohfGv{gl%rl+iUVl z8ovmNWjLoks_a`W_y;<$l^J0_zahqI=5)<8 zDq0c+S`rtbg2oZs-M`FiW%nOSmWHGr?( zRMe3JT)q}kG8oQbtszG70TQMepks^y?K!VCY53i4gdfpu{3*K`T2w2iq%30(%LH(c z+*d(xulxOc!rsg`a`w$>E4Pvv&ytD1Y>|c^=_~!Lj9J#NIi$#4qU# z4Y9U}%F{l2;C^7#jCmS3X6M@UUlsMB$XCb?AsRi$^dnUQ#3_ws)Zvz6zuj2r82OJZ zoizy7lOFLx0!G|0o#cmjB%bm6zaxq5=I@UO1TV!{-h3HoJK_+>snTHP{MiBap;o4( zJ1)TX+tKYrh{t6xZa7z)8U1mXKeA8e?7Y6CcK|_U(3Q~>^RQ}e*f2YQv zVae`0l|S4cLVVshAbqo!hjz(W-K)o%y1Mm|(1%@eUF#&;N==6xTy|B{cgpKpi!RdA zC-{WD(fid5E`aG%LDFk|Mw8iqHsB_LEOZ2Sek2M5OqpnAk!AdAIHHfTu5Bnu8IHK} z+EB-Bna8ctaGvUZDbks{UjKJI_5tO)#A*3aFk_CuKo3+4D&!DK85r%w--%_ConT1g zWLXr`@Nw`iBk7;iA$c7oY2Y{LOyg}OhCk!Mc0;%n#PDQTOdmiy_u;uhk@Z;X;ct`* zA@U}-g%2b8hA3jh3_~;h>VndxHY!AESUO)6}>{hZ$1|E;{#&I(w9f~KB$m<>>#bm>id@m7X?{!Ws^8oE5`qY0WBd_v zehg~86izA9Pp4gc?7@7(zWL-9RmI}-CXH0-HKfVOkc;XT8`mg3pLR`kFSEQ|4n2yc z9IfbLSljhw7;|Vc?8+el9!8qQ{r4(Tttu#%s4Q~q{k!+1MFP5d@&c9gwc?`a{qdZ;@=f?#>S0#f zKQcsuTA#M~{#DPEt`kfFMH$?spjPzi)t){TUzB}|A@Ss42pLg)B^moXB&fSgkhSma4>ZDmPCvxv5=_q9U9;2DVHKeJ}+gU z=mbx%&C*nN0umPqm-><5C67Ya^RTSBiVmDslCV)HE+lI8H zel_pY>B#;N-C6{Xi(`U3VD<@v8q-E{APda?ODZEh3@6n>LX(g7Q#Bqo=~gU}ZR0`N zJVg7n3wYd+TiEB%X2I~a;hJiy7@R8&bD!nA<#fcCVzkY`dgPFPHx{yoPVJUApG>l; zV+_IShS#ka#Uy+Kuc%|rI8{l4l;omX7JXh~KIW?k;4*&)v5aWOAc``14RoxG=x}5v zaz=M_o1k+8Ek{!EZG^-L`Pi8G?kq8LJ7Gt@b26L^cn_<6j1<3n{+KRlD5DQyI;7fH zT*((?3X!m_xVZ;Rn$GxAllU?*s4qZ9#rT`oSw#z4K+e3?)a*H!bmwqu9VZ2;6w4+c1 z^Hr}JEuArjDkzNk1~l2=9b`~9_$3Cwu{b>v39|z#dXEm&s;c~-GZCpm_e+f|Zcc=x z@6V{S(n{^mJH)S`Jn-*<@I_Tn0OI^%{>O*E!iwf_P)GO<2u9St*_1KJd^`4zFLv+R zc#5!(SJ(smBc`%e^ao1hKI&Pv1N}>N*Jkh!EObNPXCw6}oh-Y^RjG2PENo$*q3qx)@#9OtkOYEB z(iBQmxLK$87iCHm!O_dstxA|uSJ>DGFIWbpCy2xpbeChGzF5YASO>yp;cH=U9O)?I zmO{TsZao|6nWca~!#PUip|B!2;-Q3t^ZZz`6G{KPf*;otJ|$~qJ>p@Mku)rY)(25b zwbMIP2@yQUd7UF;QGM2758{rqT!i{P(}ah%8|5v_yH}$p6Nhq*X}@%cu07vgy~6?z6VjbQA_xH6K9o^1dEr8Bw%Z_Z?zkTJ%HrS_ zt&3f%A+0ukxl1d?^|XD;*dTz?WtF69CK(x!*>J^Vgx;jPDgn;y_xo7HC116D7UM{T zv*J*ic3w!m^&~Von-}7+NB4i^-iknX-8=}q_>nnBZ}mCmw~va~Q+J?M7oF8~p!25) zBFTgppg9&@5IA-Xw;@-PI3uu)@Uvdrnp}vv7Mqvqt{gnHE#2I_JZ*qTSW!+dM$wAj zvO8jo&q1Se-EyVfpzYR)8+RGa#SWG8;h0A!!af|?9*sBXRi8c+HvdgxqVcDnH3iH5 z-+Gn$=ciAE!xala?j-jj2f?S}+x?g38QYLztDa!~&;nmW|9Iif_@h`&6n70r^V;67c34JvF|$W6<#QSMBoXZQNc**&WT{B2 zj5Chym;UgF=P+pXILA>!HaLN9^HR<@2L7ZlY zZ~oxr&OY7LBFN&lF@wA?>4L7Gxlb+lg9y}HzVZs0d~OUL8giT=Vi8MFC3?j1m@%o? zakjw?fRUkFfryGwGWsYYlbBB%5guR{5(4!*czqwkF`Uz~U>s-JfuJfap*1~s|9B>? z<#%rVo+J1wz-BgiDOtjTEw^af*+QG=)udx670U_mU8{>e&K4lf9;NnPs-gFF8t>6+ zNS@B_Gq}SC8$CJj%Q#1s<1|BbNdOGSq}kzqkFA8T)rID zY(eMK3u$mAb6z8mu3vl0cPN`1<5WC0XC4fJ038+m)=NEM78bV5*qQr?i&Z$5Sb6Q< zX*Oi{l5VX4m+XLM~i(r=TB-Oq*phHz)uJB!%h;Uf-xw|z8#Si zxsv#y7ZQUImM;tp(@7nxheX?g!iIdl_f(omImB2neY)lhd@WM_B?6*A#j|JM^KB&t zM`{Ef_UV+;eO8UFeYJm&H{9{PT@KFfZa!Li4G~Ryxi-1}{Ulz0gY4lSORPrFD1U0KLZHBO{~y(Kie9SXlU z7ta%gCY-{sEpom~M28`x6u}IXiNW2*BLj>JF08oYsa1Op7g z{$k-XTZaO(q-Q`mc(sCHv78Auv5Nab*Ti*X(<3JY)!rxk1DC?ljCd7sXG++!I>v6O zT%O@Gei|fuY=GV8ZgymmZ!}~o4ZV0PCO8~A8B9<@Q#ln20${@AfTn!`IG$*d>vDx$ zq(S9f8_YBr*QJ_$VLXFI`wU^G=YsIpZ`9_HIv9usW@J*6dm-JZRpRDqd|^^o62K!+ z@)slEEh3?_z;#l|C|=J9CevA=u^Hy0PMb3h?T{ZEB?SG+JT2U}NcONoT@3k*a{z8E z@}aSNZcddx>9G1#TrVT&9IGE3FXUTL6a<2aa--TD_$F5FE!`BN>5{t+bALZ_69?Bx z9;a1ffJ}KB9BG%Cijz}er}gN4WqITFiUggt%smz=N&{3K34)k@!T0h|0K2A&nQUrI z12GwtFnU!2Kk=B@+OgKi2Nx*DLtgtPm7p^ zA_59KvD}tLB>eBXD1>ICe6U$*5F3OjxK_B973&5N3_I@LVDb`Dd0O#VtRJ1Np04#W zWK&PWU#zM!WT0aBJ|>p+(1 za#7Z=(e<2UNGxRw8Jq^w;6nh>fGOJsk~`!A-t9}7+(4#%3FHoh3*>MfU!@Th>0|1d z{O7}1uSN*2wBlcRUsu*qrV!9zQ}YN?ti4WW`1kFKV`IT>~2fm+dcQ{NQL* zgZ&|?5M(5qaJO(rTE+KCXo!*a%X(LC>nQD@#3HrPvnc#sWqp?2`Du|HW%wwT(G4}U|{1hcQhLSItoOwX2w>{&GIG6ezd6% zNE=R%+|o==2h5>2NDSoDvnLu6nX{>Wh_^7yN$dm~@ZZ7eyI6xbw)uzyjjuTjqYNWm zbzZ_$Kl5AAd_-g$1WSNI>Ngls#$pxg1>mF<#IAVF=^?m{bbh!pW;4lRBhF8K zo#t~HBB8G02Z}!C+kUL@{g4*X`Xv&bqcK2)Zp5-kzvuOLp$RfSJiArEr1`km#;K0^ zE=GmcVFV3oOCBE?FYiqPREELN&b3d}Z-dyi_liAmr8&kLpQ8sVc7LVx{qW zOdN+l)5*3sleLDUx2cP`3XL{?1C2RHf{dsE#DH_?<{9mLrW*T*PW@4Ub5{J*dMD^#*4|JMkQB`fit z0BBL>58pgGn??FCGvXtfyVFl0(z^H;pEx1+O6pZKf06q1IYBt z1mU)9D4C08WZJZfgUoZxV@>|SZu6_#!#=p|aElS%c(LWjbDZ(a+}|1@nonf#Iuu_4 zw7J6RIo|uNF4wXfw22?+aIN<>Cc3}M-HLdH4xS_d3*@wgv4!Hd!y(%^-kTw;g0t-G{S zWnsHTbn*bM=S{3Gx^!2Ed&MOHVpv%}+onwXwU*Ysn`;MFOXY=Lw-BbVAd3@m5&7go z*B7v&SgdlnPg5aO7vXAA-5k$pl8Y~iPdY7J)Oe)Non8?<-Sz$3{TZFmXmUUL7{M}b zg%0?!(6m(8c@b^~uj{4>qlA1=Zi@eDkzb8^Ol-IMtp7-2`-a~GjK;h*HX&3$FCtoV zQA2PxfnqeoD}T%a9OVr-y&SlC+@o~9Tf6}c8TZ}7F5b~F)(o5SW6#^oy=ww_@zIhS zjB@o%d89p-T+RyfQJg2D@R}NWvo+y7A)FjDJxII4i9+77MDeK#xQ>eWC8!8u&U?sv zzSl8|_ffp>hdi%#GOgL&7cg9)p4_w+y^7&_VFixD2~^0>y*K7Av`$y|37O=6nHhCXq}d_E%0w{KP< zxx(DoygsZ_b>->`-i-1P70og%LXmBK`EIxy`zR!++Ce@+e3l_X2sR}EVh6?*42mWQ z3?vbdLvogg{xn0-X}Ei_=w{en$T3%#+XhjAoQH z3!+_FgI^4E^moj4SAH^9^<*d>SakYVvZpKRJ%1#FGmr5Jel;;!6auhy?%|tGH2eaZ zq6{MJVBf=Fpa=0h?IkHJhAG0o036u~kZT=y3sx@$C{)okddhb1KEt=%HTKs%uGxLn zymc-7MdYS?1$LH+fS%?kN@MMK<*4#0Q&HN@5fRhWdr0m|z(D{XITn=23)Teb$>ZaGV-xntwqcJS-_JKrt~za|BxyblhBtMqD;{7|6{L(?ApUkH z+(uL}ndoXYZ;+gA_a<@>yS3bQcey%X_yBx7){HaI_`KVIrqcm08bv1;4lw z?B|BTJ-N>I=kIZj0!Y5ai2tO!XqK)^g7cAwGWpS1LR@_mGErhz@(fxN#+=jb?!W8b zL<+3%fA2@Roi%K50In=5kW5GSS8n_*#7%#M_ygiPXt(qKQH})+Azq7{XAsG{bBAsY zZe7H-!pJ_#d+aOSY!};xBCWk#yrfflnqlv^tVtV@V~m;>WP;VultNc5D2AayhH)09 z&PdTwd{-|)vrrx4wfIK)<>LmHBZX=Njkp|Lc^>6OCgyL!4C$Jni>-ynHR;E3Vai`f zjw<^`yo;D8WG%6ag~~q=2ZJuS+0b9iCHL3&Q1$T}9)+quROOZBT~q06Ymo1Z9>bZ& zGU|#uTcM?w67Q_%oFSu8!|kf%RTTJytYSSq*6y_NjYn19bG36F$ zFZpYkk)aaI%y(_=@Tqc5ctkdk1J0l6&M`*|!1lyVc>_E5sO&J_W>F9rZi`F9oLYct~P1f0LSe zQ+(YLPUo~~a9*Ze59EoYRpbQ#RWjc)su7umlwzmW85kkt7vV8tJ5!U%E>09poc`mRTc@T5ePT0=9B)4HboA_nY z?alcmUbxMs*?wqJjC>5K>|06RI`{Wn@&)wfd10w{q+*IY!y z-1!~Yeq!Du_hTc_hoD`=Rd?xN$0$@tfemG@@gO;hF@&>K1{u>dvDgePx@KX z($c!UZfkT*zgD*H=zE^~toLHu*{7w6W4(M52{5RfPc+sqQYA7` z0dPLoQ01gc%)pNKg}3vr$9u%=(M7*I-TS*-;0z+Mc=o>qy*n)6Ev7*_+-lciuYx)=e)Zn(<9 zU)SD_YFyOV?qw8woQQl3B_{j1MuEN!v5sznL2CBu?Q)y3yr|s%X5@-_dEU9tVEM@E6$>G%`sp{T`9lk99zN{YbRo$_xw~H{D!c zO&GUc9iL#--)uJXY>J%=X%2oy6VtV-F`Z01#`_qBn(Xgo6EnM8k@m`&k>szZMgqB! zr|SPArfHlt^#=dY!7AS2C+d#_e2MnGYBj|1@_KF9eCU6^B6d$LiMlA%!Q5nxf9Tl2 zT=K!MCQ@lfvo7x!w$#Bhv*W5KC{VF`LEFBcqHbjJ6ZIrWe|Cnm5JPcpm^DH0A>K! zQb8sZbIlt01H^wU0joo3MBUFd6)ojg9zFY}n2`^O+qr)n*538dEYh?fC&qnn{lFysMA} z`)l*X-J-MmgcTC-8Jht4W`Q7$d<-#U<_b~8KMnQg0r>BH^aWf`HZmo7VA&TGpexJA z$>K8Z$>@v8s-vD!yuz^YW!F=h{mO~!mNUw+h9N>iv-Vo9Q|WK&B!ZURzM{N~g5C3m zo06LaoBr=4X%oe7UtWlwcTfs(?k??iF8chqc^!Vj8me{O*sGsQ2X;ihEIe7hHC=K# z8;YCftS>cBmMj_?t{-8C!_=v8e;&(GCNlKzYZs1w14^IX*X`>`cKL4%9=r(>hGEkV zcVC;>_8SFN1WLdKIx24yXd~pq@z3ZNjl2ly3)rjX8WB0)sB%*{Qo;+^m{EXsfM4@i zsw^gSgBE9K-p>l(HJ^nJP(Jr8o@TyJE}k)5M^taaPbdFpfVJg?0rGRz2c1?6|MArR zzh52o;UyNge@?uoWV^?dD`SP8U0hHGDTl!^NBZuw=hiX%T}2O&3Uq6~P7#FX$kf>qC}0jb%k^s(SV zu8(csi9WO5PIPOgBjERK5z})v6&hEQ;3d-!f>@aQXTlmvHf0m(!9h0{7ZVWAF-Y~= zYSx={o89aj90qB=)}`8gcRmjT_HRtqyz14GsvqqNFG^l)U-@p1&;T2sOP!Vsyt~d| zCh8nuUtoFsg{^O%)+P-jMCMjH+G<|#V+1@EP2Dqr(iV)nEm!NUy#v3h>j4X^w~p1# z2CbUMzP1Om46jqG(J>~$dw7700DeV;pAHWRD$eb{qiPOF+X>!&R( z8FQ>FOTYZgokA`5x{Ys|XiQs(u?wtsLu6=BK6ppj-GBiJr_$%+$^&nD! z`(4kZXC-gCOG=N4*IW^m)EzsU;K>0lqeXJ{C0@Nks#8P zBp%$PLKm>G^^pAqEt-|(#9=>VQAw4hXU)sxBU0gSMB^vV>_BIeOhyKlK^V5_xOH!! zE5DFX-Ib+TuGfj${d1;+$d7cRDm58to+c-9+~<|ctGXSvG;be1;pfmO!TdtI<0UHj z*3DY~uBtuxCRMm?=z!quZi6;w!s$pdIb-6Bu7}PJzYCNFMEC=vO}Dym{@F-mmG~J= z+XuXgaj#vdpKA{^@_m$|{fmH=yqyyR&!x$!uw313sB46m69ZO=c{gqqJi|wD1jvK5 zn~ja?j=KfjCOxLAzNdtON zX2gfI$L|g?zl}_O;Xq9~G5WU0qtY)od7ZY^o^AXz7hXVL+=Xl7wqg!hd=gPxb<*Oj zPAym`bXaSkD=K*M0T;Df{JvWj9o%fc9%RNL)<+>lC3YN+-fNmg;vM(EYT-(a-f{#* zg}Hu9yzp#K6Zmr9^@=KN=YNyiG36m~kJ$C9@Y0|AGBPev`swdXsy*aSZk6h&#zFmm zb@U$8a3EpZUzM*Tiy){Pg=}kUb9=eBON|5&vao(cr_PIoyPc^@CxCQ8LO`{%_Do!b zQ|)uHz?bRxwXdiOaOQ$0JuJO@SDtyta414|zuev6$N3Fe1Tx(Is7TVPLsW;YbY;-evy2 z)%6VjGRQnf`5Xki=I$~Ue!%Xh{5g|Tn)H{ZbW79b3Ge#2gdL0i-&G|Yq>x^pU6`fC zqiC^|A0aKI=rR;bB3zuedJy)K@4U{~rmY);)2LhRinplsiXrDBQ(l5TGJ3Zo zGWLdIR#S$va1bHuQx(W_^l{>$RCV{4N+>1SJ=@$Q<_N6qVqRi!9daK_!>w>_M^TV3 zbdDfA^|~U6Q&vagF0l2nFZQ$thtL0N(jZZrfD8|qtPlaF${Qh6rSr?GG>f@>VJ4U?WS~1zU&ro z{kgQibED?|C`Ndz(YC%CO?f@#j;q;wgH;`k&FtSPv$b=xSEWu5v6%bkypx?%K+avr z0Cd(bHD@*NwEn%FR}|sE9_A(Pt}CT82KPIEf)1InW8J^b)<{moe_*+O>hCn{pVF-V z1_N}{ii>omy_mHaTFt%1e|zdZ7^NbJl8AA{G%vuwfE@*(3VK|2qoAVd1L(fEF=&A_`{~N- z+x;SLR1fX5)h?q}efDN6X{KR<=JEFpmmV2`%-<*Hcc~%p$YgHod@bhuUJNhPe{lh% z3HRsoyW1~U0rfX$J37kF3bIn8$vmwU7A5PqhtRw0xx3!ln;SO9%)DnWOxZal%Ee-v zGD%;;toKc8$x~M=ey&fP6QA6wMGc8#EuVL~Elv_n8f#OSpbTtm@&^cTUPnmgkAK4* zJlsWXKxmveBRyC!lx{mS<)5x@xFHUkcx=@^X&Solsmh$c$MMVG2py!dp9%evt5m}e z1-J6uk*I01qs_VI8MUKR?9TCfLaq z@LUS+W@fX!_4Ie1dAdT&If4dW|pL~z?f1KufN?0&_3^iWJM4#HH4|F&QMO%VP`QhcC5;iM2mdul$NKiW8)1c=0R zb2Ge8MT|&iWq3GJPO@Qn3?~}80DJzxCua0Osd3Axzx!lHtVQ*w*2T_^c{Wu-W{(pi znz0o}#**33T@{~!1a+yebY=yCds-)${!npc(wo0!H-^k9bD3W*Ta(b3ziaXD*9o+* z`>8hJQ&Ok|10+uFW(K|{Dh!C`5TKw(TBT8bQPJ|>{TeUWpJQr>n=9U$YVbzS)t(SO z!peqjsQM|&${Y9Nj_uA%!k~^Q8zGi>gk5qmj0Ob-P9Y}e5*mWBXiL;xb4P8F;Y!!$ zLJu*wTSKc-rEB%@F6!jU`BjdA3%sZFzET!vqB3}}vV>Li4t^J*-xE~s()HSHjmC){ zreH*OBdwz%j&IaF$WS3@mD59zMt0`|pM@V!?ib&#xP?@iNf#b*-}c^Z-ft^}t)7J= zgkq96{!S1DNI~Jl#Laf8!G8_j|M&Mfu;4(qb5CWT$mb!lDrKhfla)f$tMJM8m^J6B2fVE0KSK|OFVf{&r4QdQEs##96b9$_r#gJQpE>KMO0Txf3>j)k9@ zc3r*gu#XBes;ot()|g_NAHJSG`#!vwRiSz<*1T*#)Rm+)xJ$Jk?PK&4yYo4cgpQ#}zE{D-QI=O~vo8t(1+4}Qnsii1d)3p5E zLFUFDh9vSZ+kNBe_3?2D1vUN@_^>(HmcQp&+T7Izn-3VF6f7+C&-{{XCrb3KP_vHi zys)pEA~gk%9QuJYjY&O%L8qzg!%5(HMPlRplE!e{r~VxC2#W4+caf zXWPeCtm{}ld4d?M*a|au8d+X5qI%=rU@`vz~pMx~+bYtWKTRFac&ZA;*7L%O)SGnKSBw(W=$D5V1j@|r* z-Bh$_f3sypr#T96m0dWfl%I55H%vxxcEQ@kRn(SOU5ybPU}%QoOu;0&^TQe>QZ#>1 zOvQVV=2}>mmjLBe;_#pXjLUMDw*!FUgB+9)Iz);8^~pWA|7p!~P1n@)yZ4Lm+iB`) z*V9<_C&bq$DyT=Fe9BI??A>@Tt_kd;qm*UG59dNf_7NdL(kP8ZOa;0w7(G(sB;^CSHRaPS}fhM*tMSK{%!Z)Y}M|iq> z9M^9KzHaSS!JoE-`aMVaB8Mi)aLy8=0IZUU!`$VYsOP&ZS7=wR?XT1abDvBG-0P6- zYrIz&8+GAwUygRzH|t_7XtQ!_Gp~5hZBM!V3{Cj=<$e5vS)Ma)$ssh!FDu#5-?Pi9 zK+0wU_d5LkrgS>^*7knX@8VazXjqr~K5ba%kjpUJdqZ(K+4VH!_r?G_S;2ot`JdRv zVh#u)b3SXH!UctgYcala^nVn4Yo0n`Rmtz}q7NqJeLmRN_ytXX4EU3y*7&3vXB~qC7qL-tj{tqj_H<%@1^;uEKq<3$lLYCKz%_ zb;bDVFOWiDXoo89^mB?lh@FvzTnaMB1;3L%RHV*pZ=F={U^Ww z-xQ_WnHrS2w(j^iWwVZqeo{@2#LGY2+Unl$I{C1=zTP-(X&aA%YS*c?V#emSd^nNK zj~2^h4I1Kw^IYw-@E~s4WAsq*Nlz8^%71ML0pC5B;U0c-c{zvcNK_E>U)`8?;7);! zo{Jj|%wC{mILTMg2vx8sL#B_A=%@lU zCm@E_PcBhb-!CB0FdHi$64rTf+IrI7cK1uy_Lat!c}nT}Fuzzj{2K*=27-3182Y4L z5Ipt6rZl1=$?yaISv#HPNQIo9jOMsG1x`Z@8#;5u7q~8SHyh!#8~woOJ=X23ncJdk zIR?Z0Dbx+>JEoXfdz=vJ$CVujl{(GTO-h`1!@I{B%4v}Md7UVG@P=x*0h}FsxJV%-HUmbz0xBROlaf>K)QCU5ecLf z5zY{E19Z8|>Q=0F$JQ|H1g&#$3s5~&3wDv*tYEgh7GI(OL3Mh#-VxkFnS2MstRM^m zEtc$7JbegOg8!MSPB>19G;Ft3J=l7zEGw_|fR}`(g*F7~;HFM^kDF+esaTU?2>!ZQLkteaiQ&$K5!`^Zw#gd4&=8H65!9(Im# z0v4o@xBZ^1tpXwNm+2Ej*}El9-ha4IR`{dDH5ma2ew)k4_%@ z)&!-rLq3#52Hanu24$yUY{0^>eJPBChE0rzrzz!>;<(sU!kh?1WUOpNIS`~U3D8xu zKns|9D^KFX^9Z?Y$zaV12~=9xZu`ecQR;XYlYqb1rW2l%Qd9dnFE6PcN^HD57i`|O zpoPcut=0kt=@YT?2Yv_iLegNhJ*fKhW#rY%*4e5}t+QBbX5zHKwCHRRY^@eeTk0K#ktKp%k7YZ2hAxW0LYbb08ujKk1_yDt0xy`y|<%fBo!LaE}M=6A{@;*J=l=iC3?$=K^@mK>N|tudE}h$T8=m(;H7h8_l7a z;VIAyFza;K2l=zwtiizH%3}Aw34<{n{7$arGL+$7`i?irgt<+sIrQ~vss5R3K+4>J zh-xC-*PyrWC{>G@tt*2`9z8eRgrqsdoO%-Lp3i{uJ_h%pTI`A7AXZaE96zzDM`vzsA_)jVTnaw*jyI>5eY~M$9(xdfFyQI zE>4hGtHCb4pyBpLA)e^ekn&(h(CBm9BZMjhxCl_mR6-=b2(?#CKe{y4 zQxE9upLKa$w6;urvBSOkped0Ffk7JA08%h=%p?r_zJ@mdFGR4Tel&;$>?X zCph5zQZvkV+!9SDT(@U9m1rKWm)^m9dB+k*Cj~7Q|E&w}A@eJ+TFB&Pp4<-})Dcq2 z+ww{m?O_ce?xnek0-fr5+j2?H;GK+2>7zG&S(Cs%wrBWg?DSA)UAA&eou_**NFNtu z%%0v&TAqZG=#kHILsmTE&OW56XcqwJ><_yBo82zaiRva3&U>4R7x4#(Yfi!_+EJ55BfjTx_$W)QcX@k_HlZ6yRJ%_GE-y#FAjjN%(~=3>6h!0giOkJ z)?0}Ffoxz+n9&aY-7F9wz4^9Fx^m=Wg>4qb75Eq!JrWd6`O6tz=`g-HnOs$i70bs1 zH57tZ+R`pih6^=5H6a3t0+o^^d*#Xs+5swP%+V;=czXtZj%>~X)p)}S<}U0z6h^q2 zLynFFdpX_6+_%ExGrB%_c?S!fC5ug=0Cg9_9^_{;wMW6RZxPiu6Az?I)59~=vAdEs zX{r&oL09ECb#Y{_j%H1!nU0%cU}Y&%Q%l*nlMVBy;kOwaRDi? zG6Pl=I+)fz#8}6&D?s^&R+u}yS<#P%{y3$Bg?4}2UJeNp`dIYn?g;oVX{#!D^!+@L zF_{>EC|we!xm~(nqD?r7O{#jk{O+ghurFLo>y1!%h!XS^-cgc%t`-53#7rAuz&n1n z6WWR5Or@^02YbQnnvSx1)`BH3{-lRFPIAug3g?)}f|tm;D&4x>s@@kWkuq9n6Gm9X zqNTgO36YKvJ_`A>kCQ6*1z?xDPxQ|rhh2ueMxweh+N>8nvIvrytBCVHdjn8 zP{Op>PJUY8H+eM?8(GHphL#_>{#Q%#|4!V2|J#$Nyw9J(CGYXDdlr7stwf}?1~>(? zEV!pfhL}%R3Gxt4<|vBV2Y~_%S@76 z^ay?gFR#~EhAB$2)NpRv4dBHgni*Q!KwWaNG@&u80xWZcy~)oZ)u8 zAiP)PSc$!uq;KVrsl%%GQ#~>*ro{wn)!ie{4j+U`+82>CtqHz&$gw(MTSi$RwU5Lk zM{eaos~AYSNU>Fwp!$~Fml_&_oXxBVTo#Bi63rwnN!cI+#}E#%l42{q2ACrJ#lwfF z%;Noh2m7~W(|@~JpdiURg+FEdM4X1~!_&h3LdrT^0fIyvM+SLmL6D7N1LU{IQ{TJ| zCg2at5=?qyjPZsgM^f%ImQwxP^+zuQ1_ps+&vMBmcgf(3km62Rn*OIy(cZk1Ulu>z zRI(%*Nhx%iUSEK7J*>2ScJ+&&rpt)NUwl;Zrk1PN#eZK>fyjb_-R-}%EcESyrJ>i) zjA=<7ZXWPVtPu>X#|cyQx|q!MX5UBPDq6CU$g&MeGR(sD zrMZp5_*OOEE0Az}R3hlhB4TR@32LscIT13`N!_4WzYjjy!dPkuHr8e$Xa@WGrqR{( z_^vl2i~SBrI5EQ^PcVBTjC-}KNPEf(M0;}f{hcnya`MyYW5mDORQ+x5)h_xBX?zv+ zJDXP~L1!$FmGBSzj+7P*MzM%Wb4AcoG69)X%T@_ zkpoH+<~auLsRBFKtO?}90%Lro+*sxYji4HpDqj6wH4-7GX|2qSZ8E}<$$5&OM$-4< zl(nmCgfRtP;SaD&NYol2BK&Z=cd9(9^br3O3(fLzq1m#m7-GuN? z9JNCAD+UI)?*lEJLdgB*rk`i8*R5?Gw)uyD;bF4VWNnDu~zD%6*zT zOv*C$-gZqws^JrsM#br&9g6S(c42$SrED?>^}I*g!jiY)eXSsd-`1=b$A>a{M8T97 zPY<{3(N7fG|4u;%3^`DJj#-{-UAS36X6&$+k{0&f-gB}^!jP07Kdc4;5garb{-?O5 zVX{*3Ard9KJG=la!uJ&X?t|RvW|I{5)so*`IR=g*<_jSRMxbLBKwI}8&S&yiOd@fNjB%L!Sb4&B~k=L5SvP7<#kFMTC10I z^w(0zF~a)C@C`7_=P>2(S$X~Zfn5|aIa9k_vZcv^r+-06<7G0c*A(R)?!4~%`4VH3ZbxZWy z(e-#a>HLKJ8}4-YDb*dt_&gq>QHl#7O-sl!L9P6h@--M=dYzRd8Le*lW!{;(Od+jH zJvwCS`iiu!2Bp)00Sel2q&4wC|aE?0po{HE~4dNz*+W1$z<;! zkJJbnU(5meRS(nJKrBNs<$YX033YSGJoVFNI8Q~IrpB)n?MTrIROIZiDF^+pAz+z} zl0HL|U+dc}EWVK)ldEO4ReY{3XjfhI<2E95laUjLp?BAtdEI|B{AoNJJChLA;M|@B z_Yq#g*O3|#nowB|Yob>oZ%~pkgLaT0`P~%4WQExkNzo`t(LQDRF2|sEHBrk}*T|L3 zdykgG!s43M@q7T(#9v6Y(RdoBFfNaUbLI7~xXMZxHXXF%nYip*r9QQ*{A$K19ifq( zhPjyIX_$`2R3^0S%UW@w8NU}gTM*fJ(cB166mWPAz02o6P%HOM700c#xJP}41k<6Z zP^t>3KVt=|>phQRH>EolYw3#rH|6p_3uu>>za$<*UXwd1Y@;j~$eB7vH=u*~0Xn~n zctOXYcXPJ&d{Bte{YAZ&0&6rSCDr1_h71}i^av>=yclRm!!qZm5j8sW(Mu%Z;SR(9oD@Yzj)TYCCokLawA{n{{x_;v=MVO~{rBwn; z?Rew~a`;ZWwc(tmc}xS2z$xe*nM2MP>MZBfi-SeTPm6%@L|wGK$otvss5`@$ujbpO zX|(EbRD}J(OW{8?G~FTwc!@nH1syY%BdE3sX&2ddZTT*>u#Nv%TryX4hA?~VjUyNr zI6Epp@{r0Z9ACJ;Rp8V|C-S|1l%a0m7-LL|?r#?6io@I(yeW*WCWh<6Nm|f0JQ8p} zn^>%!eeBF}AmJnhpJn^|s$#_LNGInS00u48LmqrpQ>ZiaCCVQI*xTs8%{zTFc%l-` z3-Rt+*>L~Xy%kWe8wS^mdlvyW(5=9Cs2eU(LU^U<5;(QbD4^cV-mYPZ@BL2#Q#)6wdAEB7pPKc8J7CDzdHayD;uPfW!njka!6vYkOCKuf7 z5hIgHlLTBXG1IlPCSR?`6y?(#b9p)Mku6DJ!S{;M{XK_c?D4>U(y;i! zslbOlJA&*AAr29;ZFe~^#mx{7wy$r~A~|hU*3PS;T%2?`=ediSk+&exDI12@i+|}@8Z?0l9Zm$pD`85g!Y)cKE&pqOp#$bc-U4=REIWlp? z->||hfK6zWTW88R6{$6Hj?zY8MUWtp75zSahzA7y@UV+6mzv55TgpBu(aGCm03~T! zUvNRz4^q1MJC)xWhG$i5`ZdppKevm+@Gj52lf^|x2s6(t&?a@ZS~NQPRWI6P}EyUL8Z&o zgj6u$ApRty%Y>aGorfIrtxH_aEY@TBWsjbe@+RN?Hd~-+!ENN2Wjt89=2_I(@VVza zkaF4GvVFV}QyR_To?r@LFGo7KvX(JM^6L2p;n+~38N15i(2`GVY!hotV<#BclW$&y`xEgwy3S@CA4UZxqlFuFvNWHZa8 z6$AYG-qfgM;kZTONA`tV?elG{&Dp>MTPrCt4~=M*R64MOC00X!Ym*NW#0nH<=kt$n;tu^2 z@$vEwnv!)&*hC1EvVlXRU{tEGn=Y~qkv`t*fk0AO_5v%^7;nXY?G-wrFF%dHTkuX$ z{3GGymlOV!Tzs4M(5ojN^`vM2zsXiEd}#a6OA7uM{aiqe@T>;e8#v!N9>T^`2Ki=G z2oxwE&UQILhAP2$4PG((5bW+3pDRSXg3n#U_IVUoehr|>=LfELBc4n-x;w2Q8_;0M z8z7{cbHN1=!^&Y6VKC1HT3$^jrU3_t)F#PiY~W0!FkQJdw?ft&P#IW*Qip$Ukw(}M zmMSe_neaquRjXi}$E>8l4u$p9uyexcr#+<=1#0#6BLk;!?_+S3+Hd$bbc`{LymgYJ zyjExTA_a}9MutAr7bq%lUm(wFGdA@W}vUFSw2>AxN-=QA(k$;JOWVr+@ zPHtXe{RUoUNI8#z>&f(#ve9B$cG*U8@wX$Gb-S!B)!P$W854I>_2jW^iH<*8Z@yK$-w{l2Bsavg(7QOshlW$ZFBsfoG2 z?vEoj9x@be{*+d&m7DE_fyFCTzKI>wB#s=^y)=E$OswVUWi^&ur*B21MTlSKb6VR&V)Cxjhe=yZO%yhT^ z9PZ!a79)pETdRI*k%-=}VkACCwG;VH%(Wek;-!XwBw(b#vldlUOGm5ewL>G@YHi}612}9MGrYc(rU!e)NaAJ0N+TCp!UhRzPOCK8?ysO ztH<9sIv4jzgu?y=2A_j@On9up6!rZo_sqz zi>6LhbV3Cwnpx3&e#@(+1`tXvu&jC6CAO}fMM+E%X;Dzr%Qbzaf<&*Yob4W9^1j;} z!Q}FL#+xtP%kT9p3aR|$t2613V+-$(DQ%jZ7RTcOclp*_?P~>s$MPpIa35c2tLPI| zG8*$wR~pIWk+*+V&k9c9=@<3$OgV12K8Ck8S7++Udr-I#cUS+{CDnKJ@>RvpO?3S8 zxyx-TF`T&iuX3b^%!`<6^Xt`7asHVn-@6#+ZyEnF=HtTh7ZtC`K3An4)Zb!DUjFkx zIC*15^S$ri*$5uzB{ia}gJ0|XE$3Q|i}2u`a!{|AQ~9j48+exTUjOzH`bJUWVJVA( zX<_F*U>daL3Ar|y4~xP*>I%C*6$Eb;jk~pkuOr-b_8|rWdcYW5kC3M#Zys*ts@D?5 zSN+T(SkhdfuR!h?vIOKs!IF=aV6pWC7*@egObsfxY_y{=mr(B{IYUL-Dp{Ittq%iB z%;gP9@AV@t6f6{rBImG@qu+`mFAQP|gWIh7+uJtDq(@E*t^y+S$ak0rS*Oc<4h|yM zlhqfy%y!7?2soR-p#)Ms(%mmsS>#Jkr-ZClwHP-2v*+bQ8-M$l);$Cdhvh7poE}S8 ze4y53mXTO_pC!RkTW$8C#}ghA>N^AMAWCX~t6bR?ONLolBX|5J#~P?Br%!ICdEOo9 zVkI2r9P58%vNm|iSkD-d1#v<_+T}KbV?3@%o__RY93`s2WPvXc4V z7FU0BLZrb@sx*E_ln;e5BJ#}4Afvc0UN{Y9upY#J3B50{h2q+8_ z)Zm2mPIHIjqNi9L&iVQ1h{FSq`LB`evhQ~^3e}BiQV4fw<%(g4{qE|UpOl3YZ$1aL zP>6}QiqSMpGC1q^M>!EuU9unwW(L#ZuPlF^8UEWZ^8a4RL(; zXS+gF7)3a7C=2uEe=cN?*}7*MK0bVVqPt(c>3FwwHsA55FmU1e<_0_guEScAv2;O2 zp!Seq9x078)cX9HCG!*yoEs-Eu$XO#Ar)P&fK3{K2Z?2ViDD4$hJ>6m1#UbGmIo%k z`!zdGL_h;hg2xfKHI!n8*G?hyD>EZSyz#NlV4=r0^yzy(x9uaJ( zUU!Q^--2`3b^i`-V0ZAlc|e^X!8@5!1N4UYUTH$JdmAHaS&va)Sv3@e&EfdRFN{yt z`8Bgs;&LZ0?p9QIz4tiu6Y4CWI;1v_pBFrJPztsj_x+;VWLov7BV?>Qwp&Sn!qC-W z&D|NMR+(!gPH7Z*Ax+egwKJT8;Z$2QGlk?Z>tM9?Sjy@Ya^7TM^_1kUl3Deod3wP4 zp57&9zRSv!lL%Zu6yx6z`-Ky>(*A9pZNfrXG)Va{IfYLPPht)o4(TY!67oxkZ6!(E#J#|xoqU0$Rr8U?jXPYe zheB*OEm~bwFj6Bm#v&=)+(}I}lbXQkRL|b=zmp`UlAfiuysK^47nukGmL)Dtc;j8YxBs+^ikqnzZd@bBvM^3um z>Yx_mXzJKHq)kKh)91-7i96~PqCaPl9-jrsAej=x-RddKH;lY<-7Dl0)VvA$_L;V}l-ZFIEhq^wi)E4GaxW<7b@`dgU{c`JIf% z-~7DcrRgh{khk5t7^*R`oSZ9?h{eh>!qKfsBbaDB+q@t{jle;R{%v4D)@{L*cNQ}W z&o7XBh9ApbZ0}mo533EjlFLp5j8GNtlW({KB0}sL8y`uBKA=wpu$}-XPBo<`4p;2@ zLn{;s0(=8DEnXg^oh^gCR`4k;Ua-;d7Amm~&`p$mE`q!tX{8|TY z*O#FV0P!pl5|CfW9s*~|I$sZ_#=ONoqsPV$`|gZdRh^1|>*40@N;|i@00}q+=!GMZ zXSZEMq4AL!dq;>(cvU0kd2j+$!i--A-O!e*4t1P%tBsomgv8uWP#@I?j*nTM4_V>p z*WW~l<8u}i$=PF~_uKG7q=tpWU%l2Ky8L3dQpuG`uI~&M;1{p3}wigqIwnTi5yS))dCSLlQd+ zJ@+1h{$b4{VXq~(%pC~eQb|(4_YYTs#?x72wq9OdwrdZ^-befUgBc@bU+<6Gpy74W z*4f4JH`h+^`Y2}6q?jhkdlSSqnzD^x!8C)SND?s`IXp78W%3a9lp<;fd+2)4bZ389PKahP+B`B?^?hJ|+ndA!v-^w>Yv7_2ubS zhazlkqVA4szd$Kt%#76!j^-r;QYJocVFHC-yBz1Oei%z?K3-O?F;#?IzLO(KCC+EN zUI|9Z<{(Onp8b-gR%$9=O@Uu;Iabm6)HH?!8i_kK zDr~6=%L*kK>b{Ez`UMTel0L+mwVSO{r>4nr(X3+8)7`@22LOq}Tj)TB0}C5KYh{4{ zoA;V`eZ!>-2Z@et5DV;7Y&GJ6pRG5|^WpO`!e%^)KX#{mg>_+98}p1!R+z_xcx2~o z-Kuzr0)W6}JJGpM{j-F0gcjphFV?<*FEkio!P}ak)o#9SDRg)9?GL9B>qV}gtBRic zkKDhe2A@p^>L(&}nf|Uo{73BnKj)1v7zjB4#V;5k<}7(e91$5J-lp|K9vCb(7AIeC z3cg;9i*Vw99%&Twc?!|t_9bcA7Z@$_;FX=uT9!qNVLgK>q_xsR&{2r}0f`gY27>v7VDa}N;c@g~TrEeYNNG(tH+fvoC5I*yu0V%UG zkoASWa=qfSO5;OQZ>Jt`os5dr7+kQ^030u)#+cZ*wru{+Ld`}4IT~Zaing@Eg z;l&gv`l|R?sVb&^9PC*RCRXlq^CYSXspY~E#VODD>B4lO5mbKRZM55<>IWHt4_{UM z=Z*b`_&qDlk{`S)y~$$*e7~W;M#=GC@!Q(kpd+g7nXlYC3)!i~yGK*;>1dD5DtR^6 zxYBSNDs1NV#V66dQMfE=s#Pw}#$e0wd;5iI(!B_`%xZ)E&V-Lr2MA;hJ0-TJlfZPv z6VDNUmW=qe;}lw-6!uEXq&=yS8U zGe{VeOpmv?Qf6aDO`;1O#G^e`mn|M$FGq6ka0I7;@o=eGhHfJUMBiaMM=MFQA5?tc z;Xd)+^PCsMTkC^A?=V@a2}H^c4Vuz;LvGKP#B@6GDA1ptLwMsVxQ;>7C{Hy3-0uaAFOwbtl;gIK6T5`IwSznWhqojZih zosdtP00PinlQ79`v8|T8!_ZJ|8Sz!?>zfO!0~Fz154AGigp1>mXjwc>3bN5zjaQnB z#19O)pD@2%@&Jm&_}=`s85f#yah*96<+Wp}JfjUir3hXxsLD>#LfE{C_fGpt79`&f z>?a`_)jIs@5XE~v+*`XTRE(9B4p}DzcLq&}DSdns&HC?v+t5yaCSZ_L;8krJ~d3FsovNS>J_BpEb7*cz?^qy=q3qqi)7h%927dd(>kz9B`WZlkN1lz;dI*5Ur8sI|8rOG&RUW5WPj(?0BFV3Hf{D%y{x zMJ{=CO4rlJzLBF}l@FiMr z7_maK@DjM4vf*Ia>xtvQNc>? zC~ht}T8wfKs^yc)kfLjp4sF(`2`e5y+7ys&qrcXtPIs zgRrSID@pa%_ISUFW~||8QIkZ<$LWGJjR;En+E7YgUq-*E2uTA4ruM`35-j?0lFT#) z6XY`on<(N}uPDOkvVXmQ0NBz{l$v@mha*b7i*22{;BQMfO0S3~;wK`IttyJBicY$7 zYmd@}Y-Lp=YEYzx^%iNmFHb1wvP=9-aFwRQ5#fwEq^L*qbN7s7(;Kiag}Bf5xlT-@ z=%^xb4D*qXNW)F<)l3$cNK+bazz*&Ms~Il`wImkt$6v4#-BB3t$=LE3G8kj&?Cb$AD(H8<&m98x&__>?J-!N1 z;1>F!r#j|a11;c2*@WX!>yyQ1RX(q2!`MolD^2Uy!kIuwj_=Xzn8nXUBYFj}4d{IR zJV@@ND{>kQh`3g|R z|DNi|;V;z@|61oBPe4j=OVK~P05CY(v*fmFSHAkqd3kvsJ&*j_K3zZ6-=C~Sg(l6D zzoTASz}l2=id^p+6a*@P1*`{2g&CxJsfOwJVR$?Y1bX0xDkTa&k2UG=H#x#?a>m7T z(nHKp%u}YS@KR|e1|OC*s!1%udV&-r0N!{lB#3=mjDhw&5rdnY+ADE!*KBB*4s>5@ z6M0D|iCq^hOk3JeN(bT?0*0KSeg|kXa#=O|@2$8W8}pP9yH<0ax{UjK%^^Kl!~+e@ zhl_%fJ#VR;@{kEZAE}dkjYo@arLygxZM_QY;(lr0zG4mnJQ}mljtxQvS@6?^0Tk7l zVze0XGfA>86Q1!XNs(Y=9bi7|pa6DnF42QqM_QidXrhzgkVqG*4Q1RA6v8i1v&kq8 zfW>Y0qtD&FxS^s!AI6%C^W-T@s*FLo*r5y5E;OHL#||>XEyg5Yk4quD7zD6c3mo-Z(kJMKi7nbpQ!@4gs2s#+d5W`^6yqThFarO#N9!P8rL!s4( z9<=zg$}mRNK@A-4Obw-)K-=;RVF39MqMa5pOr+eN zkp0VMEPp$(06+d~qV$H+zu+P&NvbjehRTo*8+5X>8z8#B=cIS}APjqIfSPJHLf?j= z38}=Q3C>_xVr>^7(4#2QLn*csfI*+IpJ+lpbWTfK2SXh^;wDYCjLpLPU@1#{FfRMg4fZ zjjny&C8L@d>dCxu3#T9_E}h){FT=501Nu8TN(^@lVUwiL( za?}MN;q;KOVudtXM|ER-I{Yb#b2Qog*o)=MXm4=|-3?J6DQpW>>JL(Vz<9J1VfjlX z;c&i7OwA!YX{|5^oR&R+pKs{BTgwa9(I}1lPS(ChteXAaJ^7vRts2z0i97y0usco+ z^(^0jVJexhAk_^^|M?5;51FUNk}>CxaFWBEuLd zSc!gfkWj`ZzlVQo7ovIg78;rhf8(Cm@@|X9gZ5Ux)A{~K_XPPv6@b?S@eGUnNJ(Y> zg@Y+4ICqToGz2mW{zcSFYsv!0cD?onX4?z(-VK;e$9;&%<948^Mj>t^k9Sje166!E z>=^U=4;5Q<=O;-pM!Z^^?*EU9f6N@MIL@sVZ)x0<5^>4mvf!%Ppo6BP?zi(xZ8lF= zL3etN0=Wxrjddqar^OHBf=?W@*!;R!-iNcfh0RqYEgxLoNi;426PY=*_j8F?kmoU> zVTcP|sFt^6^n#l9P)Q-zEr3nl1Ym$iJDj4+AR@bOP;3SNW4MCY8_fl71DPzTX=;dF z%Tv$0rJp}5<)4acdf)%)y7l0#6vIRiwKFwva$aCF&9zNPb;I{SP{b%Ezki zcB;T9R3Gt(PM*5CHJ=~X5)X9aB35a%Xta)MSksAF^(UfMfNYmFOUH0LH@V|&NfL`V zDn*$uN9qt3$7iIdk+S|P$T~&J$x4WMdlddUcIJEF!O}Merkj(cvoO?a;%`q*_o-`7 zl#m7kkwih7c*6C@%#(SeAL#J-jRQ&U4eRcHAEIWgmEZ5P-Q^YN%1XNz=l^6dV&R$4 z&GXU>IK}gjRi#&kq-A_3lD2$}EX)FUDQzIq-zP;f#L~<|52HBj|FTl+Z#VHBok+2k znYswaCRddm!^E$U`W6(4jda7-|Lpoer(#j}FO2WRws4SZN55s|7pM0~kAR1;T^_yUqgPgsCPc>#ThNw8 z5uZEwi)(GRe69AppTAc6|7-IXr2cE9VJUfInELb{~ zoPCeM*bpE z4eLO{{XD#6OrAj#l8}PS(YHDM)l=cZMJ(~oFv3rd3#86m)%O6mKXe) zrnC?Rlho2BhDN_Y9&WKtEwH5{1r&KD))Xb5c({LB{MFmd`Xs-=+__Ap2L5xr7lj13 zx2lV@#u5lax4<{Km10^YrUl$#+aZzUQAt$dmjU1V>}j~b(dgymVGcdZg7}!Hg64z- z^x7D)GB{$at9;6dTtscWB>Dou9?ugYl+t&@uR_o?{3hM^Gf~T|`dkaWU(NCF?o7B= z8-oVE(O)Uu--tSdAZ0{}E#(r-(@_u`hap(M>psYDXK)Z01zl?KD<4`QTz<2ZP@A;#jZ$b!3^lUUCByCUbdFJGq8dSbyJ*=Dk ze~2Ca&p^-L{Ci4|_=lk*1WhPK!%HSJKLn=O79dS@!b&cQWKWYZHIQ1Z z1bQ;Pb{k=mA+}=Hw&up`&!0h{Msx$vR)N*Pvm6ZSzlr9TlFMsm{My3S zYA1Aq{TjIj63AoaNOt2+_aX+!I%iZ4=ynYveei1p z$jCFEEv&Wj-7Syy8TaRDrgQQ2@W5g@>{TVgGy;d>?M*1C zq=q4_t>r(MkU#f4bPy&>`U-%RCVUfKFa_ya%QNg(fUJlzDG#KB%J)uWL_5*>XbnwS(_BQz38r{LJ!T`$kjozMq`6wfsN-agh~-84-= z5z^D|h_${gfGhQ3D0K9?#PR{1UHm!Jbd#VN_6?m~U8G!lv)~wAoR$Q)Ndmjzy=1#Q z;nb}opq7A@aDpJ@?-Y;QcA||!SDQ+Wi zmOkOz@aeSPk>#O zy}caub)b-!P1JXn6_qvf3s`u=txew4V$W{JzT!1VP|7eG+9j;WrZ$P%6)`9?Rx{E; zscS4fq~tX?om;~IZBd$dqpvIr%%hQv&efwF%8hT=_y%eS9Zfy-86SMTv+14$9e>Wd zOWqw1QC@#~P8Qxb*1-%GeqHjjF=bO_f$N?kPdee<~N$2`>3aQ=U@X79EtjBR={Ji5`Vf_uMLxQ?Yk}-rncrmdo8~{j>vz(AvS^oIKT)}%J0s@#4*R6_>)RuX&+h^bn)O!1n}4X!|g7(g5sOZ#QM7 z^OmFYCsiq8k~j(kqiR|~3OUuU-(-k#-j(`pi`WDV-1sQ+fG~-F=B0pjq>Vn(?3I;` z7DJ`!V!nPDB#rR2!r343!9b+88j7H_BFB(c+>Y81)?yo<-+zc3dty31{5I;g4jA5{Bhr?T{?k}8%@Ty zi29hw0un?*9%7=I6LdfbAfXqC`H73(U-x&e*8`=$-3B$W75YbgKDMjs|4+?lrxf0# zLta?vr}@8$kAHjE!H9wil9n)XfRA-lq7KNY%G@JSIwc~a6+f}JMwwJvTk8v#e*Hp| zlHS}=@_@u!Y>LcvqWV@ZjLqWE8L%PzNZZ ze+w8xcUDwaht${=>xW%TW<@x?NFF}2BioVe_2r~j#YCIW+ zY}a`x^V{d6_qO>vM94ZlA%kMw^-6eXMT({ zL3>|j9q$sBj$%#k6h$yKG(en^I>r?)QO>WeY?8E!U0}-a?IH`#rfKvu0h748ZDG!l z1X`FmIgiF{Hjlx4p{fi;tV~e$TsG>S^s{XUK-OQbnoEbfeS*edE5YuY@(S5N!BOMG z-L#LFRe>Kzi|>Pd6P6C80S}xLNVFp>v1PHpH^2dr!NfnTTx~V*M%sm7lQ%t&P=zLO zIWJ0sEMd%EexCqc6gTsZLuMO+urQ6sMaQ80*iKg(^-_*GaSW3^?PqulC$h6UuxHgQ zg>5Qu;Z5E@X8t-u@Sjf)l6)2VzXYNEPekqd_4DZo2o+)Jk%rIK5YV7h&ixvXhQ8}Q zmWIo8DT1Uy`?y6g(xPh`MwSiH$a*zIZ$brU@HuqdaNCpk=YD4_UHbr%%$@&0G+!Al0^~v5@mJa6Htg20ENH}H znxe2f^``Ke`1tTrm8tf&`vu2KJ>Z#S8UjiqhDiX}jqDmsOJ-h8;$C5%+KtW9(D#0U zNL>k-Jn0iWAymqnj0;Sn>M~F_xt^*N?v-oPAiSS!S4pWKo z@02TFo)ebT7ul1rk`E#gaB`w{uiVV}CYsD!?hpW59!u^W6I?dF@2jS91>SY+h$RLO zynNg=<+7muVZhey3Vk574027p$l|yWYRYx`lux3vZpl{@gO>LK5f48%rX%p_pLn>U4KANyHjb}55!tHEq#^SMor7vu7jm+8uE!`N}+`qbCp9jnn zf$B|JY{sSEIPQ(-)%hL2dPg3MsT($;`qj`?7$pgzkqmnYZrB9Zt2Jzl5&bm26X|-+ zQ(1dDv-h)6>`;)%Hx4%mwOZ>^2A^d1yTT!;RF+(umWlDBjuKFUa3x@l+G?Jc5mQ#_ zaeiXtpuOKcD3OD$B5{LEmSM_;#Hjg4B%Q`c3&`^orGw?yhd|uMcNMCIA#gyTDv(Tv zg+BC?Hsv^^c_pw-))W4)eupyt6O~kWuW$-LfN@8NA6mG4)5S2-!hGhTIZ4``;W|WV z8|)i`pz*=~Yyhxk>D6v+hYMX#Umr^tU10 zB@0bK*(0lLeAk_q>>WGPE9{85%xVN60yYjycH&pB z{eBO9#J5~_pIh;oSn)PO4}~pAW20zUmRTiBcwKB_8rk*Blu#{u%vJhIm0^zJEhoh`)Cz`}Z637&&wRIpEXZ#BKnXTofjZd5{R;0WOn9#Co zTjFpAZj!5ilPLZhD)xJjZq}aPoFGj@%bxAl-OQ8E19qTy{s&ViH>z* zQs-M8N-_qjw9W1i2bjz>mnmfX%YG&Zr)?z#8hcsz3y7mFWA`t>wz>8agufBzk0r1I zQbn}Z!m=tzwu$*g+}sM{GTK^e9J&04l#W*0PF&Z*4#>&9c7EyH1bD9n+{_QNhI?Ec z`X*41(2w5QcRTpMMx}K*L?6uFpO>)YQ2#d=4|CM#h;j5nzPTKIa)<^4oDlLG+ zp3(;-l68?~k#tJ!M$fA}CG6q@<7%14gnR z#Hils^}9~CVAbNevq|UI*R`Y7{)kB*DdsogfI3!p@<&fuHJFShwr_lRNn$OXd$h@i zT9_%gZDA(CMORl>L;a;*lW21}+C{pAI;Iv8D*K_5MWzN{N@3fo{u}j(zNp=Z1A9ll znC-W!q<~)#SpovjMU$oAnoS zbI*TT7ylP(>0kM|t^iER+E)-Q@5wApjfzXdu+XnThA(5SqiM6ss#g7oAXf{aC4^EK z#z&UbXtC(6r)ZKGYO}sVP>P0;L8(Ya-pHTwf1z9$`FOF#{39A{{ZVHaIFAKl3<n{NR{%`EYS> ziP^&Xs}zc$3A5LhF)$Q;Ge;S?4F4>-zM7*-6+;NV2KvDs1p+d|y2_X;@*Y9qWcmkf zRv7@1lsO=iS?}w-9q*6h1g~L%pb7blPvOO+ z;cNhN!irk-nRj)}Kl0W8-ns2R3)}zp429?OX{Nklf@l8OpxwrlMZ;f8Nj4hYx6N_p zkY3lZqYp!BeKv_lZsx$V?IJB zu}g&`HcMHklz?_8b@+Mj={rTWZ1C9o&ej(pn)-1Y(gaH_^%X}-roVYbyqAR0d=;Bo zn`}p}fQaRXJaHZd&To=l#b@qkC63q4v-HZyI5@vNoRX(*bkM7|Gzqx>V<&+s4@=mB zH`FGrXj2R^R0<=e+@R`*57ZUYcN5vD-URDPH{!3*Bt^&vubnz55~^gwTs zu2%m6#G_2PJ5Xkjz=^&OXA*a2pWY9;=LzhO4dG!GYZGQ;qIjh zMu?79U6&~wNMQ*qX6_?wsF>4@v&?jE#vGDE^v**F$3cG9>H?39!=V*O2pWU|D5}cS zX1^9#dqq5O6lnq#&MMXcOmPYZNXW#wJJ^pnezn_vJ{o`-(2OEFEPryRS1@SqV>14R zqobp_`Y+>XpZxLH8QnV-dObA+ce?<}1%ejec5Oxpw(cF(g)l?XUkN@f!AP?h6uGmr zx<^g!dwIc8n`LO0qzN_>)S;U>jyns+KP+>T36Sqzg=kw*I&v9(o2$p15Z?;dEc+1N zn6WYJ(N9E7T-B=HTyHlarLWy@3@nuN*`yTSx7lJ~x}&JGmWy`1(h_`#&^W;RAEdnn zP@L-$CL9Kb0D}{pf#5E|-4fh_J0TF<-Q9u&cMF!F!3K8-4uRnAGPpCy&bjBFbMM`M zckAE1RTMS#QPjY^{dPb7^wZt$Mh;ySIlL;gOoL<2-IR2g!<7Qt$t1i2H~CBZ#gUJB z)Jx}g_ZGwEBN1WObw3nhG+rocVMmY8{f1B(@-E=nUFHaP*u8KC22#txa6a|@T2jM* zacw*GVMv9WFournt)Y~9kak$8X^2(5DGwcK(14PV!!Cn?7bwE}ivr5jp=-r&n}z!C zTG{GY2JbgaMo|su-B5deX#qQ2`dL==@hv+zl z!d%IgJg6T*|I75~>V9lJ@=gu!w^e-w{22jRz64k>dDtAK{%8A6R_=l2cZYzD3;K0( z`FO%}u_=kk#GwkBc>fo?tlNWICcNaknmCh7AXs}IL`xxwMh{)dUS=#q^1q1NVhB6Z zu+QEzK?uz;=pi%D0uk9$Rr1MWhToi%b#<5m*@59(4@USgQ?xvKL){PKHgT-(>`L5j zmj%ULAot!4B;6vX0ZEF(gh;nZqoHAjZfVGC-2{P%eYmJOYG&`;~&n zVL%HSdZ#G`Z4X(Bv7PLX^tZ1hgrsYfOIB-Es;yB&#YoZNRJAc#g3P|?;hviavXWEM z$ml>iaZsuJV8u=>L6?&i z_lwlvCx)qH%@}5gw!(}rEZZMoFuAxzroQxkGg}-MHDr8bcMGXbw*~;~>z}4E-r>ZU3+W%KAl&&vDc>)Ru8Ro2mfK5pp1D%#^`6DNG1hKr->;$n4FC+LPLt8YWD zaLiY4lSA{lLKLD$C{e)Lt1etF>^g)QAtX_rk6?n;CpCg#O&)Ii)Wp-Nj!HQ4#l&<} zX4q?)TuM^d`*L#N=!}lMQIlk(lb=z>K?!hTHG15akwTc7g|14*d{~dj9&C(yBM-#2 zEmImC-KQP#&nGt5piemuVh}<2aBRPRzm@irE-2SA6=IBoxtqu#q~v^%@{p4;(!_5U zV6iW62X0A+&c{Bxc%-**i2V*>pnP!5Y zvxZIagskWMHL}wpf^a`zbJ|CM}0||*2z47Mle!*iKbKvS0Tj_;QUL1cB?au@f>wM43Cm^*RqqJ z0TTKqgh{1jvBs%L;+craSh$bUg?~h8d-t6KdoOW|1BPi)HA-++{Lk$Xvynu6uNxay z)S%wM7p-b^y;k*$Ge>R)=5~h_#r2McL77tsg+rhdzkdCwD6iBl(kFi(Rl}Z& z>#+5nG;?=4Sx-7ZkkY8+V$RC{^xgd-o|A3m(!l)1bwWv@Tn5*$7Q)jf1xQF%LZfEDOt;H>Sd4063%p?4U<{j$~5JZ$Tq}+1>{hyirvVNK-;US8Z zCw?KDItnA_9)m*$h$Wg?UopR@DY5^#vj)9V;&6%FAaoLGfyA5T|@YzsfeP&C9NBEXBI5Q=(*ShPKEB%Wh)jB z7k*pMLN;?BBbV0)T1+t9QLR{ZS>s(O#>z! z54SF_hf}Pg*+m)W)Lw^Jeh<)(#|oi=(@*L*Z*{0nr%#)(SfUn86Bs{%tK=f2 z;kB{)IgKi?9X_&>8O71WpPo!sJWku@6pXdSE{YphazUh=9?tKUHPmjAmrX)Q9g#Ng}NI89u5 zsVF%n{YpQ@{5R;)kt$xDlr;7AC$jL*VkR_c#`4IZ6u#K(y}<5NKMGDX6l%3nJ)BZn zui!u)-i(Gxn*#s6N<qHjqv<`GbR$WI($2@FuLoybdd^sEz!#FEt_;lT)oTY&y`uc^9d~b~+L0QIg&LivE?DC0rTi7{ilEX0k%3_$O>>NscEM zZ{d~&Tip)eBBhi+A0m$#A9|7G9BHUqQJV=Nu@<&_JOgJ|A`lCHD}GS0*@3rtVY&K> zeB&i0Eip(dB5LD<s-#rsiA)3g zVURI~H#VKn3C6l6qGJXwZkOF~bP#x|TY{FjWu~|Npnv8-{6%<8cDwjb%}z}QF_hGW z`WYam!CZ$OGCQLi;K?io)K180@y*8l6voPHfzo`|_0(Y1JmEji=#hE`@ZW?jH%iz| z!gPpTWbvA{c~4IlAY!8RI+6T?LB7F!@yJpDEY|}c+ldFLqg1F>ba|F;EQO|M&f1u7 zb4%gZTzUEG+1r5K*aM6KtirQ1Rs4>l&f_8$aYE-hd?Xmz!>SdU?zV7aqF0NWRAKZw zfc$awZ24=OlW(qxB3Yp$VN|`ZlxW~km5U1;3NVOCYWjiO6IA<7Mf>-d>#Qz!=Um`7 z)q83m{AV7rvOVw3G#RwAROpzKCt_##qh#}^`ullWoUYK8k~pynx=|n&tojWd2ci}{ z<>{d~G~f95%BK&n%dt4rgGh-TKwby)76y7Dl$M^JT?MbBzR}}(j^%(S8h-soF^w-^ z@hi5$4v}8G9gtCUf(bs)Fo%s`h6S3`$KojDmwFcSFoGP%JOIlh}iDZ*3RKncu#?|0x1 zooNnxTnHz^*l1p%?7UHE5Y4^QSQWV*c^`3^G0)eiRa=kvv%Zsv4})PxvO)?`uuY>* ztF_w3y9iO$W;lKVDT{q&Y?wjDIi*-amm1J3MRzlAz5U-5 z4LsPIC?h&YUyJR-F+MSg^@^!t%O`c~ARh}b!=re7yUMWk2F~lF!Sf-zQY8f}0uOj1 zT;~YZH8n!5u@_Al>WZ99RqJ1BzANG;F4%t|XYB94sYW83DjEPHm)g(npo z*bEVd=YuPn@P0}8rHRv0uVgV)UB2s{q^v%uJ((WV?OF6C@=aRRY`4n9q|KX($pwcR z`ZtVSERh>UZ%97~U~J8H5p(d?qO6Tbai}&fI|daXTys|_Ewa9;0>263QZLi4N3ag| zOyqfa7!Fn7VAbV7g%I~rQ!Nz9Ezh)!*Q(()rTs?nlh?=nxUSK@UC!7-C1!O@byR#o z4prE2IyswH(U|T@y+XQHMlNVDu^HDt5ZC5*xdEVT10bR0ojmADn#>&S%J;S?v6KQ) z`2s=hm=a%YI*Le8LtNk?1_&2m7N3pd{KFTVPpKdOH>b!e*V!TYD6?tViULUwxDarZ zKt3R)ZyoHFLgYKA0m@1NQA1TGU7Bsb&mQu{>bF!+xXm)ipc&8O=80}&wynPae4P$+ zptUj{k!k$I5*3+8)@?2?(~=!4I+l$xYdImY{#i?r2Dn>&>1c+cs`k<%AI~?P@@O@&tiNK=0L!Svi^A+nyx5H|e`T zS~Y$E0{D^s9O+Q_$Cn<2^r|0x_fT?j6f% zTOI3IySwg&8d|dX?Gr8#w(Y^SJm+xTC+%lnhk%czEc|bJUHq50<^F>qu)~H~g&p>- zo-j)&Li<+SWBpx&A5uemtQ5M2T8tcRb)W@SFNZ3sog@{h2PLhpqO$mKbqOC}~J zw^;)gQc0oU3(b;ET{Qnn{N{8bWX5RP5YfXPJ6hVHJUa36UQ}-YlY|5ADUq{!XQEiC zcsQl*%Ni^!vr@sR-#ANtluLmVAbLV7Nd7O=7hRaWgg_^i#`Q2&zpiH$T$IReG(2MJ zcSu4(OJ~4Mq%^v>^t2lWD0#qe^N1$C*n$!SC7DTy`Eo!$uvbmeCmg8@*wkHvWLlJ$ z8<`kIq}MeHv}K4b$4bEUu{os^H}_#9bX4DJryO)#cYN~k*%Uz5SwGZGQYc=TD%~3s&cfMl?fa3eP5Q(MD zgl@jil1Sq|NWL1AxlM?v1OWyDVJ>!Hoxuv@|6mo!iZ410RbaxLKT{d7IRf$Ztp7O` zblK>+@wzhC%OR7Qydj)o%uaYDIQ(NvTmRFfcl+;^a||O}HvhHO5NXUf@YwMe5>dh_ z5{pt4ki#WPx&7CM)~!`Nx65;_eo!@R)gJ%gpP;$Zgbo(=P5dR0(^fG$k}epd=SfQd zn<3vqRo}@1GR{1eM!pPw;Dp(RDF`;pL2mmC3oC2W=EW{mS|B`D74M>=_i+|wb$%qhOFo66cj;epxoP)=^un5Ei%((Q$iAW9ytefci zFZfjT#`!71rl2TSk6QM;j0g_OUt+8_@GemW69vS*V)Urz5UjPBUg!gSAns-}+9XmR zB`ZoWA=qmRmFhi1xfl(eiP9Fl7KoQ_N0Ex#q!J>FJqc_|29{){*aR5jY1Gh``jPL5 zb42+ATof{re5#nMAmq@C915OS>nF;U)vH(XPc`;Y%`L#$+z-ma=Q^hzhcq5M{sVDC zmF8LeEaQ*DIh&J;aw45uR&N3xC-}R_iNt882Sgy^`ZJ-`rRW@!KyPF+b~wR%F`sv6 z1B^Z_1Vg~p*Xew*-SC zGZ#9F2YzS*Z<}@P8_V%2;SZu$teP)yo^C~J8e8w%aTytLW`*r@4>5sLhafd% zY`%Vkl^bi+mBvrk4fYIk>FQ9gfmx2?!Xh&HKa8cur(8D)mSSa3_){DVQg{Je=+0l~ zsjDVMMlaPT@G*p}l};Urp#(ltE})VCpL^k%I$irBxh6c4Lj7t z@Fc&&9U;{buC=Bj)!)bHEAjKmfijuwRU%0rmnwii#>LsogLn#0-g&X@+Xtvq}au+_`I9=wG>SqUH1o%OSC$HqloV(4ukrW7KbX3$HtAl#wP z^tc%j5ye`YYQIw@kYBMDrebUm1P7Z3=#o_SI z(~bS#r=Fb;s6w|)*JBj-A-{Jibg*yU=DFsZ8(kGX53SsGZjj4-KV{>4%B0NKx-jjc7t zM%OoH&`$e->mz$iNJ~ePu{D!dYBAiO`FrG^ zk@1LVmHu4v>uj&kl%L~rnCs3dmnsWg%Cr-smD<#C2kNr1nrhS1Zme82IoVsJ7rK(9 z+tRXbkg;uEv=QC!;Ey%y{eXN20ie`k>$`q(F&(r2>*-?u%!1cG06l)LK-ICbV~WTF z-@_&mK1Wl>7peWpSj~wBhCOyEcZ{}QXCs5CGn5Xy+!vWt{kM2U?L^P|Qx#SP-7Gc) zo@E(}FD$-AfE>6{vkcK8Bt{f-9nkt|zxBtm$01}h=-Bg{>XZBaAQqMbUjx~TGRDJf z)V*CA*c`Yf_bcZwnZtUuWcC24H}BEeM<*LCf8H{ z!tjS2&~*(OKvG28>_3plvg^H?0I~#pg!Roc_U9R%ftFXG$v)kfU-d{y`Q#~6%i)6XlW=Zbc>vC|8gBkuQJB& zt&g%moVxzk7a)MgXEgXDWv!n?0rGrKo$}OfiT9(ItE3w_lKoiDi z7PWu!_%?N{+|0#h17D zavWzop_@Y&t*Ermw2QImi||S8dnPmc6#0+eZ~4SB+&B*6y&VHeqOLdUrz^`d#px{C z;5PDIc_+Sh0hNJ?)E0E6Ozbbl*Q1lgJP0lLo8?8c>Y7|Elo}4XrQx!jNS!1-`0xe& z8+uk>t5X%j$LzPPtCP8c0Ivcc1#mIWZkqpp3mHZ}cb=j`_j#ABPkbEgu-)@j+q@|7JmVu%=vFT3R9H{SaI`CU=O6vtbie9u&`|Zz~ z?W$dp*`wOzi(C{}Qnwox{j~^YAVKUn`)I(?Q+LXdR*XswRW6DXfdC&2_uJZ4PhVTi zppLUwBGeSY7oiv1GFm#qhmywh*{bPdz0+?i{JLbu5b5{dH5oNo-mhP(4O*quZ{uXe zu32f2{U*Nu4a~G%*2-!zyUc%EcC6dm>xif%jZau86sx*T8WRqU8Rj3N3Q@xDQ_@C< zIxm%`Bxt2S?skk?0uxiVqQ-`bQEz|y$$JCrTU-FXmlf*JRM;I*OORS4!sc_Q zt>*2BJ>kntk{N~=J)4?VJwa<8H=wxI?oJd#!u!S4f4yg*qy&sAlRT$P>i0wS;81K| zh6G8l)B(Cil=D=ZKGQ^0mQMSVCTtYl$rrYY;-^Yj%f-dX+dWh3LQ1C#p}$fUVc@6i&y+1th{5R z?LZkvkrCxxntm;QIE33Q=j4ylsL@ z^UhSEIYp@pEfnp2uS9N@T5gJ-r^!PLdu!-nzHos4pS~~##pk3E?=*illhDPs2O=z* zIhWcY(s1*rYuc8xlHQR1rShMcsI!6&xB}Zr(4|OSy?n^goV#U?uxO>h+0}J*A>I0* zzX)qd@YYn?i!fz^vM{|q zrwsQSyOUWLM^D&(-_1S^FDL>^yl&_*u^DxnC9a4)G^*6h?u-CYwO3+dhua2ex0#SQ zn+a>g+EC)s-*~shV#Iq_%~ezF3~DsHZtB{ytzJ~ zkp&F}u`4CXD2YemHrlcS-r-HmmY_c_yEux=rk=xes3o!xhZHRKMOz~{i(nW>>_#IN zxZ@X<6?rYGX!b9f*w`Nd>Gm`P<#|T+?`-COzDI$ZvTL89XIiaGE8f}!*QYH7D>?UMv5gv+cL^x+J1ipfHu^RC}cSX}Xi+)0m z>{N7BQu~byB}IPO05ooUCw0 zhnL?pDs(L>ni@2#3M=$5cDJn%ALYi(ONqp*Rblzdu^8TqnqT!O$+JG>cx=hojer`J zdd+$-+b)6wbWPcc4Pvrr5E$z{JKOHY74ZpMo@;#=G>uS61`nkA0$*lce=IwKF_9&D zJm5%`&5tGAnk=V$d@T2bXZXy6j%FytM2R%TyFqBHUV$5l3Y%NJyoaw|oFb^e55gGg zI*nz{NK?1D#x=k?s46>)JV2UuN-PJ{gl-44@6eqOhy=oCsF_F`OBr{JN%c@66fypL zYRMb=!oTJ+aJ8$$laf*GAE%I+Cpz=jEP#JFoxn|rHD~2@$=-V}UuXTCk{POhaTd_m zr@8(QnUDdydg@7=bF8+hm4_*RPVcL?+0n3QlKRhR;z)D)xEh8v*ghthz^2a$o%Bzi zvt9)*Rz2GVVSSh#myO*toXGgoGGfUC4R0DH9f5bG!6 z>Lbs}SsH{+KAL5ckrx)&)_`P6R)dEXu$*1Wk2cIiF74MnzL8y>T({i5rk>#-TI;(6 zj!FB>_&w4cHBk&)#pb+T4m$74Cd|KKSW8Ftc#6ED0v+7oK(Y~_n6`xV!3+KT3*?m; zCiQ*9W`z^B^4_np-Ve}aclz~7InfI23(6k;J)snwj<9ljgLXZ>lK80huAm_EouTvJ zb7)9u;QTzM{U1~2R-VNlNVN(If~6|{t)s@l64gcI>;y9G56PEIUzI%ZDSCu<9Fq<$ zRi-9^4dW(OPNMf$(m#cuWN83ymR`;A`tYnm-5$>`oj;!3NA|X#kJdbnom}{4AAh!; zr6?u;+ZK=|D23un(Y)TGZW&eEx(j9AZ}lx~SvFv0t2@okarhrg3D5hYdDFgLjns19 z6;w#!ud8;41E9y|-v&9>5|Fbh?Hkh5IABImea-6x1Vw?3k0dPP=-`>c9qt183ST(N zP%(VqLUtjD(kIWNW|rj(S4Ho~LSmVI6Y@&Zom_IF=g@^eNNWP+g?=RE%G=U&ab39V z$jX%L!j}e7-K8O|QQ%y$x7-0QzQ#2%JCjO?xbR5OpeOJy;-LYfGWYlrjakt$>FJ}w z;pNw45-C#=%=FEl9Y+x~J{)O-tH6<*XNp7jEmF05O;toZ4d6kUZM)-5Abat~Xh@nD zL{R89-Qjj+-zxAEeEVcESgKJ^zuya=MOU zb$k#A9hD#e-1^$C#=rYKRoYS$w(rlaOZFzM}$@7DYm`V&oAe{uG&BbvlV3*L(}&6 zTJGF-8kD3vyB+-5{I?Xoj{e(*2i~|&c|*Lp)w?PHbTjU&lE$TT$A|03Rl{mtUteR@ zQY|R&$vaEhzp;ZS4*0^yxaw&!@bTu^%vAJd*uyti`Pld35WtjrdH*pd3t#?UydWl9 zVD$Bhd;8UCarj949dyOo=;{x@E1A7*E?b6N1@*>b+bXyn-fZHf*fL`KA=oEWtI8h*K&PWc zGc7T+?~=Bh=;IlgwXW2|-sE$ZGZ$p$GN3(OU`}Q_mS!+YBapZh@1XfiMRTC*KM@!} zw?i*)TFR}m1UB8%b%$GjKsViy3yeQR&R5Gvslm~W3JIIA;Ts?t2qLTXL);=0SLmI& z3NFM6rO;>El2p>@#dWDcv#LQ^M@^QsY`>61vO8*l3HIHm=_eJhLT(~!o^gZ2iEM5%{Y@B^ikP&Dx%KOPoM|(c* z=EBPx3(6VPw_|H!YTaS7=rWr66yp0r3$!$Ed5MkF#xE-%C2s<}eiPy>4bh1u6LaB~ z@<)qKD}ug2-hCa-g%-g+t(ZWFTT_ym!t3e(1-|y@x0Wuprf$C$2Ei*cAAD@EC&JPV z@S5*AXIVahYtqoNtm63Q zI_RB-3rbG7+P@-pVCK8E4T{Iswu}5s{!Nh`uf6Q+ME_G6Xk-S9u{Qf7Fz(d-Upch{a7}Rh zz0?{C?rBi?s03nr@yJ&^R8rkAAi@)SHxd%a(7-1DhuoI|Etg#{?He6B%tv97BVx*U*g9r^Y_eOtG;{&-!~V?xl4&LN3QuoKn8)X9q%5uMx0F$R*u>!8h%q<-qE8@_Jk%dbVp?(DNi?i_XE zK-BeajnEIM-}1UByT~Idcq1xSB^N1lt5UTaN%Trahc4#A_IOFv6p-mabaupu&KT_~ zQi0D|M3F&Xg`*gi7My>1#Zd%4g8vZu-yXqNy#4DouEDtTW2rmlw$i~KRnnr}GW~y*evNMsavE}B4CYZg zlm&8kT=l5qy+JV6(OA~!YTxQMrKjL7`OX*ip})bs4Lq59UQ7^uOq;)XO6+ZaS~P`D z$jVTJwRxf${0-XDxB?%aSG~s~>6aX7JB@tq1jb$OjT&mM2IhraY*8r-j6)IQ@u|A$ zW2^$?%Uy05s-dM5`Jm9ZbmT9Y#RW+*naSVp8cPupYTeoP+{vtpM8FRhs7j0Rp*Cft z6RoG+V3W8>G4ztz?1%NXTE6NLTGQELyg=p4xtVasN%d8QNjxH?v`w11E2~+EM}w_n z0@aNi)0mzZ^-><1M6YLIt-7~CQkYv!t73A_#9qQ)i;9VK-J6d2@pOTU$=~52fkfW8 znC~mQeSd13Y30)cG&LdcjZLsqq4Ll&5wb=b05uad`9*B^Z}_bp00rFx<}B4mLO09r z(?q`8)=Gyz2(S8gRIIvteuI84as*X-1Ej%yNA*9`EvtH`|81w2UDk``$Oc9nd|PSh z*}s8sELtGN?R8KnmuSwpk~9uJS#n9K(7zp*JwoNvAoOT}d}T3BZx*_I9qoU4S&`|7 z#T9)>ZhZ9Pd)hHhQZo!h*nmzu9|N5U7F^7*2u1FAKiWU8_ZmLW!v+;Uq3pEe9uy%41jdbLg*7GM(nbd;(cgHC8Z?ugTDjE z)`s{%<-wNk_7C0eLjX?9rj$%^`){_a4PQ!PE4@&~vWBx`={xR>%5D$M4h(g;jP!!n z>Jhi1av?Aw)lMQP$HQYmLjzuYommOco2YlyYB?n77wD`9PdNkf0R4Pt~^oml_O2C1|&K3K+ZI8_27q?kJ48wI8fw+o>Qk3R9 zNv1w+8U#sHCq;*^TRd2o%W-=hMJCr zIXxE0q>QE`>AG&x8nG3}rvfZ%^v&wHm)=}Q;MlPIEcB6YdU z!iCNiy~ffGyLDqSZ)CG-6LtCp?b>d)>p9#jmYl1!A18vu?Y2G1%C{z_S>D9+F?JvI zho0NEYv+%c7nQ0=KraAJw7=f`n_DgvH)e7#@Y%t^-~P0x{kKOc1*W;u4ehe1s^)~n z{>$9fz(Aq}f4|k1*lE+1$9628-3be37^^JxPga>Ox^vOKGn-!dz$}*=xuh2(W3T9xqquUaDT(2nEY({Z6naRLuF+1 z+N#0AGv@P`tD?dLT-MK~EfDYaG{Z86sZ7?WqF}%T!Kb;9???fp% zvPIRU@d9yW)}oE@=By3-+}XxT9`*-Dp4MTmG^KIin$Vx$%X!O3g6VHx{sPvk@YUAA z2E}85+M`>$45-7;tn-?=Fr}RQeK|q0s=OwV1Q>3XGI&!NFk1lrT|=cG{KXCHnAzbb zuo5JPrpc00PT-`ExcR*yKv(vqVM(EC8bW1NTwltjRa#Y;=V2-7u&y*|>~}mlO-zYY z_}yq#Trk}${iK&l(k_II$O(tJpErFzJ>bBK1nYKF_`6{J!^tL$|7oAYTD*#K4LFBy>I`u@X((lG4 zY)oUzQflJl9oiV@>5&$LzHh54P;`Ve?LQQO0Cb^$C~IK_LV>?6F%~}XunBj2mi|8t z6q743dnxkVQ(hyfB;V{Wmj)YCkbxke>@yvkk@6EF4xLh_u;WPpk3B=h6?Vrpu%*{% za6F)TyUz_1^J>>83b^0wfIz zNdD>e6flc@euk)`+sr z%I{9uT%PB!99pwM)#=smC!$#o^at|1g~awvU3WHlLmG;Bc%5GCrcf0hx_e*Gl~B~W z)ta{{jb|Jx!aA#%18<8pSm)DgOITtgm{Oy!9~O@r79>V*!!BL`;__6giCD$j3 zhl$#4@(mk7{mjg43%2g7$GQSzA1dE^xDh>1!z94Y%7$SMD1d^liAr%Xyc?tj4Wqj7 z@b7MpS9RHft_uaoBV#kldKe%ak?4nuya9|0lQUblH;?#SactnW2F2Z^SiZt;jyuQ1 zbgGFVTXq3%MKpI4M<3mhK`9Jww(?cG3AjQt6tqOQ^mxhI z;%JYnG{VyAk#`?^h;=sXx^}Kp=beo7+$ba^{DGiJ5#X9}AJ1fem)O#M zpBo7wiblD<0X=s8-{Me6kpNzwor!XSoc?gofx&cQ?gdy+hBiG0ts`dz1-Vf3*7m!G zptd{L`EWHdpOg&ozx*|->s0!Zt!;Xd1`!N@hLiW=}VJ4R~h&??AOYD z#Qus$StIN(n?Y#5jE49AJq!=+c(!@6U%8)%U(IsZ23K+Mpt~K^rLwMjhtgZd;D%4mA@qh%U&l2E$aeje}(Z%GS2SEfzvBz zGg7k|baQl$apN{~-)WCRvzd@Wh41zzY>qdn9Db1(Vdy{M227TR;8EljfFV3yZ#@U2{8Rj`>-PHB2op8~89tP|6PEF-i%SUs+B5maap`kBR~4m49?Cv+CDn z8_+~+CMdRLqE!SN@Z?KbX|r2)FIyR-JMidsd(0rlWPeMZLeKj_mUVyDezB>l3Ec}G zzO?>@{e%6ZdlIyz5io_}+WPq~U+eSd zyIeKsf_>D{(X)X~oJeKv=d5YpV%J>#KMMYXzSp{U53u%d%O?f=GXm_NS%xqX2u4AE z@<9Rqx(8vYI5(C{#~59ruLK)etX#h->!z;oziTjQmT5pB8&NFG ztO6yV;JIL0rr`A#W>k%P{V6W@9}OZR^hx1}v zK_6F}*rh*Bn)2aKx<)F!V*#@ov@~G4lSl<*Dr$t(;^@v;@dr7WUrJ=p+D+E}sxv&0 z#wH~VIxRv{#iAGmg6fk)3rxOGWT9o^;_!oUn}U!v*QDzhu5B&%xFOn{dnQ;e`WR|p z418oDHNFj;Bc}zZ-;`ZbA?O5E1j^0~^644^a@L3{!IIcsVBX=9A$mvL-Ur$Be|Qrr zs?)WqIYIwDTuo7${j4o+T|(XK_oDYzkk44mIUio?!n6RLaJX~d1yx1J*ciX4tL8+- zVzK;T1Ke>kRgRdWg(@JBLpdr0wq+ixJ349$79tBan{Yfvy&-4F9jhr@b0S*hPz(ar z%47G^{y%BQB_{v|bdEyUW4Lxs3qW#Pz37Y4`YO2B^(m>(1$MTo2uiGhT0WnQ{SVDQ z`rp?Co1c0=Ygs)nQuJTnS7AWNfb7f8B`esT)xF9NE{|DTD#AYh_>TG7xfd1km|Z99 z(?!Ss_-P+S8nFprG6>$XD*}?mMB{#q8E+8&2~ynP$LJ}&(ZDoZT%%SDTSMEq@>~-L zW!K5q8~Tb+cuCZVzyb8}Udn8>*Ax1z6j+aBa42Ev=())l<1avPrN;LlCIyF_hhq>x z*^9bnj3-|0ct8Eh`?tKIU9`MB5Gff_*>d7kPypy9Kz-^Jw^@b-&S)1nsL0DP53} zm3Fo0yGU<0ml!7s=bnKl@dc4d`n8~|6+S3oqJX}xR@#@n^@Csce2h4srhDxMN z?MBZ+&SYn7=h{ECxh?*7`+gK+Q-6Br2GyO^)MT3IDgwr@Lje^`FQbMm5$QCBq@-Xf zO-yN#mh#%?11~6UohPf%*jR1Ry|=y*`7fbfLJ>FCWEQMO!@d*)*Rx)nxwy25>ag3E zjOVyNbZdOf2;@I*o~9+kU#H{d4iG+`cJQ6OMEE1X8KZi*7l^`C&(F5i{1lNk~=9Hg;W`^lD#+U=k8zSNW&e@g_Q?Mp(Wmp3-%;N<*@tMOBU^#p6WM{;vqL^Va$>)>&sg&#x;fCH!>3!X$NtUb zhc3(u`-EAM-SOwxdjt_Dmb)}udW!WcvK`(OSZSO$<)6&eJKEFhMgoZ5YJ&gsT-U^! z_lzIZU-`)WIc{#{)-~{va;NRWt<4`|Y?-@O3iFhxez&^KD5Um)@Vix?fy@Ug?d<3J z>EcwyCUk~S;=))s>6hwOQAY$_FA^!!J1GxaHXTHJG1gk=d*P&kO@Z^jCzlFwF^g`) zzQhAXS%e#X76=Za*)f9(l}kSL%|VEhgiVO_UeSv6x!IWT))PyKZI_}Ag8vXXjIG?|IT;~mMpHz?AxWk|b??YCFQPam{r#`Ft zZ^F;VyE>?V0<@(sR>_RjGgQn$Vvzu9-HmF#>nFGmspm!)K?O(t5t)%@w3{+m1EIO3 zB@crar8c64CpC(KsJo4khiQwn^LNvy6&9JJ3#jwO{oAye%{kEntnDPJyv=>`P4-j| z@xqy(8}~7Kq1;uBJ%I-h-NASE;G@L4m<+^`e&rog|CF!6k6985(@n;0GE)L9(?~D- zsXlxUWobFmm>kPu5p2wNLO^D%N*(HMCVD6e#x-6aX`&XN=1%|A`f_Re_h9y?1Qpi2 z#l5GjF1m5Ak%3Vm{4t{@rD3qhwUPmy=kc`sjg*)v=;$xSCJgz(k`=q&Z2At<|0%uv zjtZI0rq6AZlJ+DP$UzVM+$5z6gyBSAoX)D)qeqIR(UX3+DT~~9qy;7q2D>=ZpBJO@ zPno(~_o6y@$)n33d2ERG%2j~wrn&AR>}JermMiV5S2WfNgCR@sXthC>z< zk4^j`swkBg$w39L5&3}Y%nFVeYy>?-tn4O(Rt^`aXU@O5B`CpNu*M!i#IuR{WFEmb zFEr6sra7G6w=S1~qA^!rM0%z25gmxWNeCr%C(!R>P$>oFQ8VglM3e)N^S#QfhGA2? zv<^{55)vs^HY_|U@nC&I8$YrQL$pyn4MMa_5mbM@;14hVnb;(ATV~XzQ4=bMA^L(@ zJ4EQvfyL@QY;4%x$ik;Xo4PC_n(hq1rHQD(qKlv~ADM|A3N-VmI62%rdXox4o@_$A z+;Xi~4%KM~i*vs{L}!^Jab(P(`Q9l2;Z_;{W*pzAtP}s|NlF<3-BVWCDe@|*dc_)l zu_RM3Jo<*nHAhy}26>L?wF*@j3QM)X{VDR{dmopP^0c3$hk%b~Kgc@KVT=hU3f>(_ zp>I-}sOP^_zA&4C34M3gm6DVV|C4)IVg&FW{0Ak`Ir$n$@qi4bVBovOZ&|FRz^rKqT9I$YQW=KU(uZ9?|=XBBctP6CV{UaY4!PR$MFdgEVQrN z*S@zy4Ge$k&pAx1(6k?f47jVo?4uU{Py0y2g{3>byQ1w)Y<9>hgii2z=EX`u}f0hp%g#H*8C6u#SshQx<-@Ey67Yo;Ll=cia*O2Y@vTa*2 z%ydun=g2bM;oxwh4g+z9J_RPy*yw8|THH8gsa zcR~8$D+m&=eZ-GbSq)sm$B_Pp0KM z#I23@poAV_CgPsmI}j_b^q`@1b?|IF#bPm?%(*X&PnP3BhT*Q8WCs1i*no7GhOe zME`?x%s#x6?yDY_nR;iiD5+5l)fmx+hjz6&(;`{4wdV0_O(X5)8x`sdA_cDsVs1I;$-3uhtLEHdw>gsm6q9okvXFFs2 z&NR~dylF>R<4#bf%AU;G{+;h_HM&@9O($sb6&$pnb5D3zmHzhborN7g zo3}rPX+)nYu|MKoW;^qP`5#^DLF|e! z>-a=2@UN{)frV1WE3g7t{eRMeu#%*44(xI3YsR6eiJE_~G-NQcS3qD~EYeYkf|&kI45S&p_p&)+8pgO7Qy}aqUHYCZ9Jd)lQNROKw-27s(3_I$r&Snj_{9sI}|54#l5(@TXDDI z?ykixxCeRD-R*Dp{eR*CLK1S%ojLcMxigJ8O;})|y1K&%D%tD2cOLdKppm$n26LjlJICYUUYV{v1@pTppQP%Xfa#+NH(t7zV` z=Mh-x+=`A_VNffp$)r;@B}VFp^)gvPE1^rTc)+3Nzlf+IW}i(L-2*omCv_UNZ#?zaEOqAwozP!7B$7*@C z@0o?SP7&YEiC~AY1&_ZC6gjQ+Te@6bs2G2kZz8GMMiGC4xCHJGxCwSnIA&de(8|^s zXl?|xVLBe!{n|I(O|u?OD#lvpI6x_P1-G)D7%Q$ms@)yijF9WHsXp*)6=RCEfBcV% z-?wEevMagod2R`0-K+8_42-SJptZ_d({TdVtbpRoWP#zvUdO(uVnhm|#auotczgD1gI{3>OQjA2w<3Zo9GzzFt&H}!R>gvN zM8_r}vXTAKY9`n59-moQaakn9FYy&{MO1qq@lkct@fBp)x$tk^ey|k5#t~IglU#q6 z8AA-@A}u3qeYF?B)s-L)k^Zr+LRTXkSSjj^pdb|cE-X-_vtRa0=%0=)ZSZt!%pkm9x3-HzI}`)qKv%v?REliBBj7;@}leCwkpcQ;|{ zP00+d1Ki!%;T?6MUuf-|S?qB~v$I)g#YeIWNyXK&dHaK1Gy8g*X|RW4ecLxUaG!maam`nS67FBJUCQV@U1)4tC zTc@&%cY$q&mzVI8aabFD^%gu2bnnJ2FN4@&JCH~du%q}@-*GO?W;{t(MiY%_j^#nw z5&MMX8G7~`5sl3)nlG|O3)&K)V>ko!KsyFL!>MD`5pI||{^onD%iIi_!|0E*C*!=Q zKMHom+kP-FUvA}Mt#Dyr%nNnPQB(PED}8uKM39gzqEtq*%3}+Pp{CPjr(Ic|E`@D! z8PIcGW-tKv9rn77aOBhewxSgH^PRC^tC|dNQzF9M4u5W#};UoB-BPo5-%;X_not$b2uhPW90%I%Ra&ML` z0vH7*Zv)r{^=atT!fw+6n@7WQWiE%)1+l)xW@Int!5Oq=Fb*>m_~Obty5Slc^C#CO ztP1IB;;Vox*XXpYE9zzxVP(udxosAA@J+|e)sUfltXKC<(R2FO3)YS_BEhIbwEm0( zsa@tT?<*B|KQz~a@=DyfW3wVSzs5~n(rmcSGWNa5ZpI|w@HyWS^+|mtd)x0rrHo#u zp3`Kd`+={Mn#aESy(sprKj5sswybPEXl3@X-pb`+_+4Y;M3zp#wg$$jx*U3xhU&&s zGV9o$bK6(Cd;T+O{E7jR3GbmQz%4+KQ!|x!{56u5$Tk9>v#@Y%f|b{Nfy4~{NEhaJ zV;BpXe~MFoE4ouO_LJbSDDr>8A~_;){{IP!(L$#SVZqGYCg2ODKg^&SeSVdF*gFUf z)wkbB;jg+eMMYnDn8E!`+H?KC2N`Q_@6Ovaf*e9pZ#Y zL+J>1eztM5N&Ja<`6gY+oMncVWdWKdC!}gonO48#=jRZAk}zG5aOWoUzIltr40LlN zBk28-o2?GL=C1@4c>?LdG74S2y7cJfO06Py`7FAbo^q}qikQ8TN9k4}g9-STlv%we zAGBB(mvCez{tZc#WLIYh+BClJQuGXX~2?6-O4^MxLqrdsN9}%T} zE!YdylKKCvvN`sUQY0o0e}Rpo`a9U#rgz=fBHQuBQ~WlpprFi1 z-J09s-1(dkzAnik8Qg>cZl|9M?{F3#DDnP2=5dw6+~ab2Mm49r$^r9QV)S;r4#SoN zUx#{M#;Q`EH(jw$#;UeKqR)U$7Yrf2KX5?68nhybI%kQ0ba>ZgkTA7t@*!I(7hHpl zrWXJ`f`eeas3JFO>sr17)RLkh;lfa7P;bHz7OVSETHm7n_OT?MK>-~LSG0Khm(;hT zRj2IabDxHt5UK+>)^Bcf5=?wdB6V0I;Kr@J%6@}|NK+^)2qFccU1lTZOX3klLLv1b zA2Hgr4`I~&@JrwzXSaS*Tn6_j;h;*`ljSA{UvZKrY8x1EhWYY<8`iyEgP@b*vc`DV zCvSrM<3fJYAbdOEQWMQo?=7D@E@m$i)m76m4o5O|*4_7_Y5MA)-!&%N&xnvhxaqSy ztlsMR5T@5I^CC~fcjUx#KkJ*hy^X(H?))@zv^2k8=Hdghy57x7UlWvPh60(qvO$?d z`*DLzCz6K%=HB(x8|l?2|Gy(5?A;J1GY&Mcndv=%L|;H%XH@pjyNVzF)6>mR0Or0Q z=jriRAujkqDEb?1uL1Y=H68jJtF!RgL%&zRFbHC!tpxnE_7Mo_?x9{|;nCnhA-ym2 zUiZE;J-T3VUs3XJUt9<#>W$w%)AH-YdK$b=qREZ>aVBpkUC}UosfT~&+}NZX6qPK! zd3Xxc$IxbY!r8S+wtlclBg?`S@$u`o$bccK$w?8zZ^kgF0X+e;p96oP`T5hKJ2h(a zE3dKdoL{22^6lS)D!gvJ?o3Qfch6Zi>Rk4kLFrz%`hbd!^~R$;uYJJC%W1UwEv$jg za@9*vCSD}k?ttmV&nYExYy?;lzg7)$7Ob^;r(oV6`j59tO5HXI1rrcyT*Ud~bdx01spxk|2Fq)qYU9l^M_Y9$dx*>Dn*E+G;$FA$P^L6wCyP zDNlnHRbr#JcCAg=cD-A5T`mOmnMvUChaQ(t)F_WJEz~8!b+E?y{=@LgqUssa2U9+R zmD}+|P&MnaLTDfa*K!w+c(An)1PSAGS4@1!r1b7g{IlUyCard7d9&w))gt6QcWt%b z29%G+$!?WLf{otF*JK%Jf$N-TF1Nal4&lO_2KjODBxgso3|B)bmqhkCo>_MvrAEI* zq&*Fe@hdzgbje>WNK!mLzupRFB{`6NthAqf*i*3?XnpJ@jYUQgINBai+L)vMn zY3Q_YsewsMY4g0?<3vD>=AmM$`zwHPAPdB_mvCr$xjJyBx8RWF1L7&YTVLaA)s7?M zQC;#$Se`y!H`hDs)IQxO$Vv^`y#v+&(h}Kzm{Af9V$3B z!(6NP4bJl{b8`E0WFWYLxb9w@vhlug^m3C64UCF^z@fBBjl_3wA~s@cTzpXE9Xr8k zC)F9*xDh_VIYQXyPK%E84GK-fZ&2=FrUz*hHxBF2(sXa;-#r;VoE~|wX zB_m#U<+#KO$M3yS&ARn>SjnYef%A^L6?nmW;hLS3n8*Too*};laC3#*B>k7KLg}oi zHMGa$!^4xpL?*U}&<(zzrm?g~plgyop)37t$|!Yag5=fdRWNdSga{u^-{Aqo=d ztdr;Oj0$)0VsrF+@2j4L-(vu-e*blK2(q5Sk$G);qLD&0nrpz~vemY5+UbyW8 z0(Yzv0!B$$9>acx+|9C$mB3Aa1yO|8RJi_0eOC@L-gxUz4rKu>!;GjW4*KVr(mSa>q}(7wyg z6^-SmM)?g;!yKWPzWDGUE=2HmMpa!!-eYM%2gBSu;y9^}VN-lki3;W7~8k zR0fFc`&y63#!6+63=fy049U&XtSBOH(oDFw3hcf-=DPGN9{1np8nL9sg(4{R|03B; z7lZCrrsAWBZdGqT2^e?8_(qvsya0q2M+w=E$lwRek7sgQ4Qo>gN<>nx@KUxm)Enu{ z8nGbCPgx7lv{R#QIC))vP81~wy(WKFnw(oMt*ESUPkeh=WG!k%bllL=v3CbBCQ$J) ziK4G=4>;9X$=fsZu0V`Y*UrOOO6y1UZq_j7OU>2ns~RuODRO$BWD+^+t=id~1S&NU z=^2JuRp`E=+hp$f}k5`2GFU5M5`6=!fMG|C^2fd28RiVm3M)4sKF@rbAKzs>(U zKb`9OrDq(BOOl?1yuslg-%s;-nz-M!nz{FM{DBl)|5u!_Er;k_`6vU1Q#u0wYF>3 zxt*r!Wj34Q9Fo1nRz|4z)LHLHoD#Mt8WW%GU(ZKDw5^Jb`@T}4z?(=%m(v_c8;J5e zw?;|N0MElz6gX(!S47G^JDntJ(RTfn$LU`!v#F_)RvG(O+)wWwXgAd9>lh%Ccvg=c z%OP1ZC>dto&`MdUG{T`W(ygd29W9^c*J-iW-tM8^F+tNgTbuF0DiH!)a%~%J7$Bre z#rIsR^*#iL6;6OBAMjO0o-*P!CqCb%j)xyhNRcTTCD;viO>>Vl(OQl-KBXslwtpgj zyayE!B!PxP#&Lv6H;_7}N+h)+F#Z zsb?LZt&Jrg+w|U8C~ew)&Rw#0A>0YY|7W}ZmF&1&kUY;Y1{LbwYX^mSAK{Izzx4m? zIAyrM`>+(5_iwZUoURMa z&53opTi1@x8bxnslMTtj+=8f3o6CNteT8<3`D+oaMtn?4t@5igg9S zo03$y4^-ulTo4`@jFV5+@Eu*@-qMv(Pv)JUh;f|Dl6;};Ml!y_&tZ2<=^vXM>iQyu z27o`~9@a&IRN7@jG-81$_q{8pv&#Y!(4Ef<*~w*re44z3%gwZ^%V9~hHe~2^E*_s{ zmD?@@akdt3?dRx@gTO8(BGi()R%r5sou@Z3rX4Ud?_c+*`|4tvFXl2P!>=vEKx5A~ zLb+&q>-BeD2P{jj5&{cHeMu{X-4%L#cD1JjD-{3^hL!sR;y9g{Yi{ZVKS1dNa`IG- z$T`NoN1~K$&eiqudx)jz`Edgn>QWojgzM6AZoq+fmMO0eCAjHa>GK8IYYG4JHN17g ziAdV+>C~>=hkTT0r;*APBApm-O546O5Hbb;{o+DX4!8uEG5*D7N zw#-048)Jneb#ub%xK#=q-W|F1Lu*?Qco{XhMHHGku{c%K4X8B^(x$16C7l;+tTwH3 z`_-fG3K7X95C(*tL>7uA9m=X^rfTg#4_3d__P$ry>J<5OU*h-(46tE|?BxgcwBfP; zu)KBCKhqsCWzuHV0}wT|qd7A@U^iG*du!)FG6P;A*Pxx8<~I#rGE0T+j72}%ZkP1{ zSSk^TUa?hq74vmuKU(N^?0Rz;aUj~35!*wws@trXqPP&#S*}!~*udz{f|(0_v3z}4 zskNej+p2dkp8R}#YQ2)=+S!!=uv(eDzB0*c_p6T<{>Dbz(*~=b{7#AX(GOSDuMM+I z!KqdTi@N~XHK){8hn6??dJ7CpqJDu;!{aWTCY7&0B!B1FztIP&x!F!u32;ogS98iBcOdwk-Mc<28(O6qOk$BaMh#hJSaU9DjCE|;HJmKG zMz^0Sq493{hM@vrzr6eo$c#vBTLq?qAhdhf)JJZzVDI&!EJx79b85qCCTQn)|9abp z^3ghPA71TvKj`tH`IYzrZsF1WFmlg5YAx+v)#yHGn@G^s>g7Ze>-gqkKMRzZ_>At5 zNe(;y%nmxtdZh-*+P&;j63t=>KHGw}!X@Y1iFk2a);}cyEKuan#M|+Sz;BvH*H7?) zwU*Pzz$EL&BH?*LaK7Qn%_~ z;;Pssfc}D}N}r6RlBSGLeMb!unr7h;kFG1=!jyHP0Rjv4 zQG{QMHs1n*U7B!bMT=kFv4`ebL+KWn9ZJU08GLX(&)XU?y(W;ic6CMGgky-|#0e*N zD<0YW=;g=}t-Zm*Dc*4~T(4c~tYfoOw5xqbm}$z4|AyO2q&+=_b|T(ryxw}SFl?EH zDz3^-&5SG&RJsAg9gEx6y%pWAW!86OUErAl%^O)!4_9lhs`bAmNwCZ<{&b^8lP-O= z%bip|c(w0NGy_Q3o5f*1%B%J`8$#1NUY8&E$}B@`PmJBPGAag%MG`!RLI#fVn?JD0 zL(Sq<3%kanmdKul=#Y7SqQIW2`1yvNsx;x_B7sMsY_X1GeEKG8|7&GktA9UTbdtZ2 zPag?J&8cmkq3t)}+%D?s*B*Jk*h=8!Tzk7y&pRJuhT$uQ^CQk@XwcN_OA-+N<@y06a86qF@D_B~=)DJ>bxThDMh{J- z@0mkn=GmiH{Vx`+<%c?<>OGfKCr3~KO*LF9M)Ehqw%5fOZ0mNo5q#k@v|z_?R>-^H2%Ow6eD@@py|e?|Cx&#EL`HdkzpUDc{|JD9=a%%9 z#(e5>du$LZ7!-(n^p~p-`;OWDi%QDRF4@$$J$6Eq3Ah|L5#zOg8IqQbk--kQ$I>^} z6A096x(~_Vcb(Vpb@yN>{r-UV$K#qe{awRC{I4rYm>8bnf};eH;tkO?hS>ijQd@{auidyzyvC!^zT!rj5W- zJx5BI%R^ypfHxy~p4Qa9A@R9#q}`^{tXyGkyD$pj`o*7K0J{Q%l9kStC^O%Gw2TQ^V>e|Xt=gF8zL8j0-1zAN z#^|aaRP<}H*E#2gEm17{394ADj~?&w{%O|558}V_1)?%}b}m*;%pGsUeS~8tP6s9E zf?z*tCv5yTgF?bVC_cbu^7!`mgE2I-$0pb&viNCft|r*fhSv}4^0EMS7oOcD-ExoI zyaRz)5A|0@w^=U+oi=CQ-ARj6TcP?!ofkTS=Fuc8Hos4X)Biacf=>|pYaO>EI=2mA zI`>m@+i}Z5f8?D1Z`H(5(a$&ZwvG$aEOTy`gRfaj&|3_0+SB{du9v0AUm3i@f0LUa@3MO>5npsR5M8Cr|mDO4e|DAD&j$j#?)8* zb2*%S(cD*TDEvGKxwkW@^(&B-_A>&CCaN_rv-hp-?sJ)slePL)6}Lc~EVn6r(D1_Y zaJCtlf!M7CiRJ!-W*7{ac&@p|0~I~B z{2T0m#+PHKCRp|v;2W;D^r#PI_tdp?ALMM3D&I_2Y^Q|ITazpn+bBi@ZZ9%Iom+7X zm8L1I@|$&H@Kth5dV>;66L!rCbXH`diAC3b>ObbP-u@imu;!AolU&opaJuY3GuV}_ zr@PW{31nX2fRya-<$)qzqSH5Qa3D1=GVys)9_ut|Utb*0sDh}Zw{G;R7_kgPJ4NXY zyS`J;2?U9VtbX=%mVBo6D4@L#!!(cGvj1T6^KrT{9h$9Ppu6-2`ALFe>-hXQBdiQO z-$))RMf-#yJLPb`QohP-5srO4&tgfhfb4Fn3Oi(0(8#%Uf<3DN0HMNTrik!$#GA7$ zNq6pXtbdG$`ZYg^qP$v5TIi1K>_T`SsbW?@mLx7?&)JWe?$e_i%^dayoixlX)5VWW z?OJ4Cv8-O}^LA$2!k9c*YO{7UnedkG) z<;L=gp>ydO_pwmEO5g6%mIwCNI>etUQy>#7D@#WR&Tf%7>8y6Yh&I&`GV+|8d7Z?oVP*cxm8)Ia24NNoO6QppAK61A16yIP?rk}Y_b>!vG=GW zD%v2z3AHDI@A6Ra`vW^+(|EtF^ro~@lU=H-7XvPI6P0WLBigI`%=sUX z#7uMJI{Ptx(J32(^#lWh{Wz`v9kw^!=+Kp>F0`xQPi;K=-8a7veEa9Depe|ZBz{-< z`O|N;_%9NuPzvSo@`bWvaHFv;W1wSk{i;>a7R_g!_pY97uP^%b6mSxnl)tZmIui{@ z;dN@qyRqftU~8{tnj!>m^&(k6foN9mZOw+0iVclLN<6#kh~>j+%G|$X9WWczLLxr% zfz2$h=Q=ls!QC~2nzOgCvL)-JcY|9r;;H=@OuEcGSgaaiY@Y4^(~qdR=bp~VzlSrM z&`AT&5A9G)9Xf?=9od@}*$`_2A&&tLQuLm$3zuY1`W$N8HdHJ#H5rTwOn2?fXLn&IKo_{RaYC$4%s0hjl z^CX^}0ygDp0$tK7N^g~-^83aYgG0`x?QFwXR01;?jFLLoHuQ}MP?|5_3f3lCb|eRc zlrS9rO_l~RgmkDutTG(EF@d2qvH-O_+#41=2vIo7(W+ak75rHt zm_!KqdmtY#YpaM$E@Ic9`lobxumdTUGT!w)3}-WqG+SC6gsMG$K}LddkKkK816wGv zsl|Qkwdt#-LuZ0brUme2i$@uJORU}WtLn5WYY6#EHn&yovl_dWU!CG|A3B^lL~q`G zNK3fTs1{V696K)Zji+(Nn$1dv4U5@GzmPa>|MKPr639@f^A`Q{&@J0Z?T&@wSiPXG zRCBqrgB7}fp~OsVRhD*sEl3Usks`!0R(=cR!?hqVYX3dRK9qzEn3sUcio&nC)RjVUc-Axn9; zGg+`c2P~Tmof&w(Uqt3nt7~B0Q>l2PU6w(RitILn*D8uy(dgPo)BiP$$W?2a@C>D& zT>RF_<<7AHu2MutF+HUi>o4Nz1D6&43nLk@Z z=QovX{P|h%KQen56rSK6;N)-v2#MVeoD_XL-0)gne{9A>TZ;~`CwJmUz`0vNkiR>| z(EOd@Jg`6gitV?1IZE`}Z^XWxd~nMJUYC1{{oXP;U56^oRzZzJ63ZzKH3H z3BUT4$u?nxLZTr2sro5cF`{o=2Q#P1-=gw$5!}~9)27qFK%~8yYmP!17W$z45_gG- zs9Rhy@|H{d{F_iNVb@PTnsHe8xfmiWA%e@J55A+45Y#q?2`@sD6Fgn4{>IA?G${rc z#UJ~|Ln63aiCGU&e#yQgh{Y9VNB>44_g;RR@oEl2n4Xb13)+}U&R?cm#V~NP@`nXV z({>`S5D|p9IjfO1V*1;|d;>-$N|dXhL|jnaMM1*ODAzbgc@eSu#X)H@VIJCU`CmRp_D_#$@!{Dl-INLn`a2gtd@Gprbn2{ zr+8d0Z+=Gdby;&Zl*>>{)lt(o78OUAZ=_6y%bIe~cb(CFZ!ZjhSCTSV3XsnE`n z`!sFI@s23k2|jCTc`T$vBs{TbFUOR5I2cno^&NzzFqR)%UD101cY{I>?n9#k%9DQN zjM(1r4Xf1lY2@IOW*f^4kdl1ri$@SOV+r0n?w^^P+3fQo_48v3iI1k@Uk@iW@U5bw zAotu}4Cdub`L2iBaHNB0x-L+FGZ#sHG70WRGds`EOa+MkIh$jK^&c~ft!uxd!E zHLtyc#V?m_iVi(H9|6@?tzw@AHDf8Y$p9QdGg-)vWJb>=gg z>Gq(x3_$JsMRU@)@hW@U$IPo$k|(kdupyUo*lp#(8qVyVvo!JK8YsA*xBq;do@nXw zQdD9GzLVt5x0+`bcCDK!sME_Ho%~R%z}Ygtt7<;0M$aY5y-JSO=I93J<)rhCSuFZ8vb z%#+pV`}ApW2F>g6BOB(9vK!aDWsRs=F{kzX)`OxNbi4>Fviz7KJ(x?Nj8FMLRZwSvcvA6{YQKJq+{JDfk}1?)Mv>y5kAm zl(72%W^>?#N`FiqnrUo^Jyjk7lZ9Ag1g+)%Y@oE=e10o>n?bbNl z&0tPdw0>%?4Y+pX_5xVR&wI4&6y{Bed^NEf_hq3&dR@9GK0|6`vYOTS!*qQZwC-$} z9b85o?+_P81C?)8CwW?(=2|hrTY~*6mr%29nSOJlh*M?yy<&XkP^2&5;xa#eBhPf` zrF;FHERQ0Ll6jD%PFiC|qm`a>C4UR_4rQ{`Ugt zzk{!rDmdz)k+I&=30 zx17zpJ@Y;U0q1DKDYX&sTbS~>e#A{)cjdPg#V$@cJ;c+=@F5nKg6sHBC08gV9nNY8}{O+cpi*S*7Rw7H4gy5+KURn^CmNe5c<(kP~iK93q(T@$v~1% z!D>vBbyF}9ktijX*p$22G`Wl~x4&Jog&;eqJa9S?Wsr*(rL*TC-QdXX$Ci>IpK9|9 zh4GmkUKV9cE{0~!i(ueCMT2AD;A*whBTpw-(xgt3eGL{K>O^fQT;V9HfGRP^VTfV9 zm)A{Q5XQ7L8M1)1*p(Bw9Lb3DajNnSn$-|;s`FD*a~*<#O)`cqK!q~L%=ZJKe*MrJ zZaJ|fT1eH$3MsHeG04y=WGI6ca3)Ta72huHt48JpA-!B~vUUhb7+W9W29T@sNIX#h zJGC}Ub_VqrF@wZJHD?Y|&cg3(WtDKq5lflWYq_gwi+_{euyqtyKM=tUoiS|pNFQ~P_dQ*gvhh;FwafO|IknB zI|kui3D-RWXRJ%}N`3|wHqF_cd$&LDVVtA`Fkh!TfXLI^ULWSXe?^gZ^arw?a#xM5 z9Xl&uiInz*Mt7vNKeuPHDzklqy36lc`zaQia*$~p-TiNV`fU?8miT7pZan%}f+7?x zmdXsg(*kmJ{<}RTKlzyyAGB_$EpqxeU#(qZ+;Z#wBh~(OVA=Ge`|(n8d-pO|>GiAi z#R}BM`Pz8rwM_O{zfnu}v^=rXoQ|arPILyFlh>QnwAu=xJpim&D6MkuZ?C7hD&p!esdiBzo)K6qWBDaF6RgGs zGqLlsYEAhKnCBHP?43Y|=ylUMRoRp#xtGfmTH(jn8gg2)UW*fC_j9MmG(vs*RI(Tl z3PmgR-ktDG>^}qs3@7ib>JLA^5N7eCevxi~#R2xUG)jK=;@-KC2Hdr3@m9iiJ!oU- zbgt-^)6CpIdRT`|{K{9WW%oq6)TV9dOAo)QbE)LDDaAQv47c59niUA*0p6&E_l;aJ z4rJ~-fbO+_xbW0%z$icHi?8T+&h?IXo7f#4U^Jq(quoTCRahlecT8v{K|QbH^>saQ zYm8Oh$s$e`i0-c|q8@)@iipHSB6fvsd1^eg9rk3(XZmd_|9|G+EB1X^)>@4LXsO|O zTCol?^iuHs*@ow5?*CFS_yS+=_4AajX`csYu!bdUL3;J?lXN+#fbXt?3dyRMc74nQ zU$X=q42WN&$e!N;Uh9`%)-51Hagl;5k?BxRsMz>F(Df+M+KXVDU!ZyO=gn_7k924E zF_YUZ@uYagTO6;ZBxbzZo@jkZ%Dd}Y>Oo6+PC9!JZgCvm&<;cq_Z6ct7L>sM2aB$S zU7@e)~zgHhm(?Q%xmczzjIj?*eq z)X11)T7f4NRtjVpE=Gt-h8T{J^@X8@eNKs8k`%9daQZ&&IKMiw6z-(>pzM@k*)4aD ztZOt3Np+(`F(CE(w73He63yZFF0AekEgB7!H&XTyU&cBt zahxh2A9EY>V@~b+4BXI|WzO?^+3dw2IJzi>M4+r-GP~wzA-{`?;Eg+3chLI=){Cvu zEjeJvMU2sRk&ZJgmo-~@_7}^lsSYpA$r^F-=NPUZHnG>ON!2e1V*{+gGgS1k7;7oS z?-tc~s&Y`7d&Y|w_3EzNd&lMMMs{)Tk~S=TZ)OkWG^LH{suop($m(U;=Kxm6DC?C| z%?GP!Nhx?=q`M6j_fOb2kYSR|#_GQWp|OZu@-w7`$rlVPqLSkxrg&D!Y%ypxvZZ)X zP1F^#pGnSnC`LW=OMOEqawl~3&fJ>%^bq0$HB0Eje}MtrrofL|Jv^@exjalRzxKN2 z?8{kZ+p4OgyYF}vk<&A<2^IEK*snD(6kI9BA3`enUsoIf*0#i#JooH5>Zic*%+`iT zBR0J)*r)zex>KzxwBhV?h*$Sl^%f3uxL*N5kcy@TnhM@ zAmVq7bAPi@34|u=0JqDm1drl5IrFso>;q!BL2layg!SlzCjYYmH)+8q>DL4|?AcB~ zv;_0uLfY1mF`1t*w)RO`g9wXC%Kb?&6$tvu65mmSAt1*f4Iw^}7m6(TW3EClbpzw| z2CZRbp0G%5R|pUsFmo4o2(w3BEMU=pF3Wtbdgx_0q+I$)1Zj-0h(9r!xn2%TksnVe z#hh^zCY$BeaReNdltfCtJC}g%RfYs0D9Q+o^6C4cIl~*Ge`hsWed8f)!_$UfK8eWu zCE3tj?t}aoJg=ZRvqiEat?k)hvnaBXa!>KKjw~(alTmH|LKw4jfN~L}-RO8F zT^VXaikoAj$%2b-h_5A#oprQI8*8QOK#2{M*!4+*4d8K!gq-RLLrh)hbPyg^OSp?x zn(c9+qNIn-$uSZ;n5?96pa`Eo4(nKTTZhpr9ajagvME~p4lWj_gnB9ZW}+VWGm!M+ zm!QNv0YkEs{ghdUCCMXMk8V%=J)y-K_QGx%gyE%*2=s!~Io-`(J@?%n2RudUs0`q1 zt%!l`V}~-|Npp)_3!NoyJ3D-D>W@TiBF|~!u=+j;9-oVd3YkV}lVeTa=1d^P5OGqA zT(gKET=Bg?0fmAI_R%3kytdZ6j;p=wG&aoleFpGF+2YrlI~443ud11zH$j~vxxMahSu(;4jkXQLN}MRdh_COc z?zeNEd^%_z14P_qjB8mM020^lj`30I>aI%SB zez%2}+BdEpoQ z0*+y14JMuI=nu`6=E$FZl)?m*hA4itt+Pg5xgaHu-8+3GECaRwoQ;dj4|SDaSoR;&6;|~mUR8ov~#8Ns{-;i zNwHx%Ek;5iX2?#U_K@+i5GT(y+``*N7(eUsIvbOBHw0E|TzqNRR;<60p)+Wkw8;`f z%b*?D2;$bH*f1AZ2QkF61yi-F9^?Gdb|&>BxUH1t_4Rl|@|}+_r)^yqt-iKgd>8t9 zOO2Po-Db|>yp66{P*<$YJoGkMsu5PDq7t`DtVE9>7zWeAhB~68-ep!>5vWyRkmOT- z{o@_?$DY-icUC-VUo*z*9cOZQ-Nk2`h_^38Rue5FW27y43Z@>~ld4SgysrHfbj@g;cTG`uGJ`PV5W4fn8t0^Ky<}egCeTb0(TX`ykDo?9U_H zb-VPDsvx};PNsq6_q|rM_MX@X$p`o4>TZ82k$*zJv^qI?Zd^+ZhUUK%>7Z=aH{=W1 zzg!0_G1()P8+e9#?HxPwIoM0|d=IRI??n7l@HC+3=kwS(cNu?TJ>|Nf+` zcqZ4eT{4pT1Ip;vp1 z+F0c+E3#{m5;ay7u7aAciVl=N{z73E=C%_#CrpO!N(ETr@QvU`kEp?*3y&Op_(L6QK_Fvvx9E}@OB&DvM= zMg^+|b8IkQtQ;p}1(xvh=o)p$H>`qmj%j|fbPqD|ijGn}NPSQFHU&eKj_@>r%t~xx zRb{gxpX4kr&a8%|B)u;<-z8?tV{dDOXe*Me50Ycm4~I=^XsbC`Rdgd=ri`Nedp|)k zs{1NCt~p|s_e%B&olv-i=yg2K^5nFu$0dZ!!@l(B4>^3u7%28=-Z@sI8N;q#xe|_y zvYaAPyfQ~+;eYV%9-yfia;2ALng zf)@G7plT`4v0&zB$KF&&j56Kfg@{!%9+DhDv)^erd@pgF@M~?Z&Vx&(-8|>As9%o; zSSjz(_e=hd7BAvAQzj%NR^$q#y!;bWH}e^?&!V9zcn-^mF9QF^Y5u|Y-(MPwy5jfm zyf5`U2!Zy)W%{PKdSHN#ycqFg;VagXjMMj)|HFkO^}rT!)FM*7?M?m*|05Cy6lS&{ z1i3F1?718YTg(Tjh^uCK;5%%kcT)N9Pg0_xS*}-5_v#=_w9()EL~)VqA~F(!bHM9@ z8(vfu#zM4T+!NizRf^3{!?iFDd!fe``MiCxY8t2KOThyv=+qSy){rN9FXnR>S$Qcf z;T)ngJyy6gBVI9)>Wz*ZFD1WB;D;mLO2rxuzeCBeiTD}SpQ11?c1&uBl}25$lT^Ek za8V5`+6W#hV7?LSjK*=imijSft{v+|mx0VpCp|;;M(m?vNaO9IGVa^Q1DS(LAn`sn z9OMjn2y`0Tr1%slHU{5y#^Ab|orJK-6`QfnZtNo3)X5Gb4_AU9RvSVkVL-v9cx;+! z!VD~*3Y@ymdl>6re#(6>ij^pGj7s$_d< zPGN<`D#j8h(5JYzN*ixkG9?ZX%Tt{6gmF%pZHG5_{57e`UTgUpAEYGPnt6tqc#64R3ioY zvr$<4K%322MTAmsYi6z+lzx@pF%@t@7hq@#^Y)q=%#qZ45X<)4GWK z8;F04mH`^z01aQ^Hj4M}2^LKT(u#6|yGm#j|L>=>Pyd}3xGLt`?^6Y_l#j1s@!|XsQ8~FLAb8#i z3HyFSK-`QBJ3hp(^Bv*H;H|?+xP0)9`EX0GWrg__5-qo!9UsfJ8X4>3Rtt>2IV77C zEENRK*w7D(&4Qm%7tD)LJnGbnb>{ww7 zHCm9%2uiVcS6XSK%tCng+O$g3D+m!6>%-Z-w2-1kw)N!+0zsOcqIrt_$-Mk7Y0hvn2D zR!8C5#tPoKn&o!zxpYyaQcsHpf7!`Q?0{yRhPt#gGPoGCVBS}Sw=?dfm6CjC#Zq$r zMBkU;Ws`6Bbc?z^5o~@k9b1(j4o4;b6)hWg`50;TO_R7|VlAo?_VEDe3~7Vykid7+ zRhY*wR~9hGkPvXXsyUH0NOVSEf8MB~0{2b7LL$G7Hro`Rzte1Gdo}>o{=s`vY9I?Y z6VU&jyEpSwIi2f_w!?IMdFX&}nrRG;{*I%nlR@7dnai3$M^cw7wmsL&i7AQ0Jtu)?RL(z^C}JM23#;5T-7yaRqu7v zcxC76J@8YR0TP3x9tY9qFPOP`!wOskGJ12*ue-WV2vT@}B|t^U=dX=uuYJ$8KFo`#?dVZpBnD6_|V|%$+&7Fg-NvX9^L~vhzHN{ z*J|K0oj&`%cXR`82T~9w&lI(nVj4s|0o{#r?*P1pG3NJ~;YroaY}9Avl3}OHWbD?E z@881-jP|<3Q2h{!yE9o{=p7BSw(!fB$K%lpIk zRLL}cHg%YQBxmZcHQ(r{D-f-O;24tvk(2ao(qo#27E~+m_4DJdXF#%!=H=w9YaZYMG5WQk3ZX!j!5w zFh!s*!|?tutNo8CD5>ZNzLH`E*qaWxIR1ZZePvLb+tM`*Odz-hm*5V;CAeE~*Wm6N z+%3V~-F0v$!QEYhySs#Ma_&9nu!|r`Her&` z&|6_(SD_tose+i3z#R=cI8>eAO8Z37;AL$vEeSkU(!$dFuC{U(@AoDsS#y|faV3+2 zloZ)NThRE$#@dpWKp)Fbgjb7)qxzSfKw;)o@&+-ZF4XGo zAF>jX1;?^N2A-yLCmQQ`OGJECX;OkxE?lERkKCJ8rW=!OtCeS%>rhfby*W20pvE`H z-ooDx02n|AK=eS-)X5 z%coww*3)%h2$hh4NC=g;%zqn+!LwCNz+a<8Gq*gQu1rK<0$ygc6KagYOoYRDQr*!ysHfBS=PZ~Cg+;NjEE-y9-1MQr_jIa;L%{Jifj zw@5;0-456Jb1!_oS`p(RO^J*5SH8@TmESc{2%|)gnqh_Ni*c@p|9(GM5GEzvcImc!Iwog$NDIk?u$1I zV9{g?D0^F@Vgz%jvDz!QOR#y>&8S{ee*@{OX#R50(-%s$VqzWS5=rRA)7Cw115rXe zgkfK01F|U(3kkadMt?IwTbT->m};}e-FBy7g)2Fy;`s(;wGE`G~+*9oj(cI$3r+LO! z>97~h2_4lzjVPlmQd!=ML7w{B86o^Mj`I%SgkpM-*)0x; z#@>E{*^su=B+6g}xyGvuQ4+FX6V;2?u^sH9&sd=uYfGJ(8lz9)v`-XJ%n8}5p8)i0 z0TQ1)wDHP-+XXGdV(O;j=|9g$KGRW+B$#5LcU~hbgU7$D6c_p{Awy&*(k=aDm$Ifr zq=(nrK~9q-=XE?z9tVB7jqw&kmnUhgdzBoeNx>^!`h+zfC2bMbig2of^pLOzj4xk;nF{k$N)g+ zd0D+qi^J0mgUbzU{xe&a*;6|9hoAKZtcX(e> zN9UT=E%2u5Thq!O!DPQQJAI*Q|2~G@nYI>nz8RMQ(2A&xSQ=J{k`sd(7maTG4iK4u zMqz}H=U_>G zn>qRm2N=;z_c^TZ(hF#6`OfG+^S&^wK9G9t2qy`ngpW(Fk{o@Vo6k5W#&qJ&cLNx1 zXY1os#Ox}iFMKaFm}6R;{t{2_7+q*8#F~JeZHWxtWO&rQ5xcpLe5~?&(xQ%!ez(SkD=bH>ULTP_6Vd8}CwEV8lMgpL4CYq+ zP0(0HDe!UaQD)D(r760@1U>s|^TD@<2tF!qYzZLQ{O$@ixv ziGExQqLp**?wDG0&>GY;rj&x&hyc;OubW8GOe>|(?SnV4Y)p6y*Uy<$)p6z&g#?H; z(mg>!_zWY~x2GVoWVxUn8n6@A_;)K6Up45|zG6Ed3w9^6w}9LbY8dROgPJ zip#t8%LncrUe^S@N$nlR>o2=MbZ}h$Z6v~d! zsJb7j&Uaj2=ojx{RaH&){kQiA3)!8ACs)nxErW}6g75$oX!OBYE7)!M63H#*tTO;d zeCA31?16^uME4uzKwq#TT96>?vZv}osN-HlvIkR7!^#h3J#z&r@+;?TDie9Mp`HND zu0*GNoY@F}0!SWzGYkD_e>(bXLFS!3F&E4m1+=uEa2o!}S-LJZhO;5VjSgaBM4o7Q zO0k#5+%R>YzkWqfeOj|+NH#N-$wJ+w!A`HNs6ecy+z&^gtTT{CkpS*9r{rH);GyXu zdjN_bU)b-s@1gJ&M+o*Ol~0)MXF|$5ZOa4tlWbQDK~0N53)J&`8Ns2t zx|XSi&sFDMHQdJwht+4lMnbQa*aXc7`|@XN*^LmBkO=B=@^qGRw~Fb52J2>ymh39d zh*NUYYs4JqWQ`-6TG2Pt3h#n8nElk3v{(+Q`#p8F&Tv1(_7$PT=VhmpC}l7Fbg8;AaCDKb4OhjE8&3VxaztP#X(B6|huF23Hh2gz0%@O+4q7+N_p}iWS zM!75iLBadh2#KqUjt;85!X4)%hf0N{?1$4kjwr)(DB=a{qq$bDc~jpy5xlXWdMRyF|jXIMF|s0 z$<%$=`xBrZ){>{E!*|Mfd=0dyAgqTPepp+DqfzOG#Tj_m5Y$wykmvnq>JrL@QN!$S zF9#?%FH6v=%qAuL7v?C0-~)(Mv$lpw*fs?z(fAg^$|&LwiS+1M?E^jP82b#DpZI4M^FTxtKx>B zDLGu|J|DeN@*m{V9@TkvE!qLkPv6kYT7=0z68!%)5ZCOk@n3O64KAnpU5G7*&HYsm zpv2!2I_>ws1C1s;r-;J8O$|KBLJR~@a1+0eYDsyn8TttUNsYqGV(bV>xUiN9kXIHE z5qQ@^@m|h9RMQ2h>G^4n$dAL{7n&VbKz=tV-cdwD(SrF@#y7G{3A?J599pseGoDFc z!(~Uz(R?(Hez=lf@k0~S>}TlDRW=6vk7;`Pdp7RVnQR2GdX`eGqdQh{FbNg z2n$HG%xYuqy?}!Siu?Bv#$R>N;2t>9D&6R_4UlA&V;_2JxApa93b9N@t4XVc`301; z^Dg285V%*AZ*xo;qx@W$$YLc*v@u9o`zrh2Gj7X$Mu&d4QB$k4WTv?oXeAV6di)*_ zv3t90b3XLg2X~aWo-Hs;KL#ztYjnSwr}E@-mippD4nlT@#W=fgx=v=yyrxAu3ge=x zOGeo|g%J*ki4|4kkR;Wa$ZI@!KPgSBrbY-;q5z98MquG(CSgliAG>~t}@bq=Z@iQlFX_7e?RF}<+8iX6jj7w9umg@;l zc#J7att}f3zA1jC_f#XH)=&T6JCi$u2+2P=mHh2b+02RoG>8;n4nl_YvIyu zgV{TlJki3LdmhTqzsufDwxeIK!|FRO;c}MjZcCq5E#z##5l;i*|Ew{MQl!U%du*&B zf9W^+xU?qg;R-kFnt-5eXpk-^<-;ho-jAy+8-uB`UfKxN%_H~BuK1Bz=$}NHq2)LTkTC(^IZS+SO0txJXfM3z~A@~q&GWl z?5g(V6p;{@`qV0N(E9I#Xf$ zQLHjZ>i9LZ?$zQ$;J9H#lu<<7Ze=A`e6KN5odUqO_;Z`(Ey9Ms0EUK&3sy2*2tKr& z*)FTocy=CM7mB~7kuY(<*iAsu=onIpA+)!hewlP>dVB>e)AuK)U=M59NPj0TkRBlP zIMVMUgh=Tk_N&wwP#e}0Rhg^_mS4n9y9gTblxPJ80vHY4Vz50~dWQuR3HB@{*f8I8 z#50$thYkAWJrEt?-6GP^>d)Xy(00=FT5DaE1}UgQe2jn@WHg#ZiaY ztAnAybj3Rfy~rik+;L?Ub&j~uph75#o>7d4Ce#G)sSD{ahLtI zm)iiW;@X+^Qo8R5nuTNR@Ih3soXU;|nkq}A>EqDoki<8Br@KGSF!)cnSv5m~&C5a7 zJ074UB+U%3@)kS8=Ry;Rfbt2k>%+oUvR$jYc|6@ zsWqcG%kzHFuD?#JxxH%R^QAl73c}VzdbShYlk&GjX-N1Pk`oVCRUcQEx5yigHL9Y? zQt=IfNl@f|^_2TOQKs6}SWCVLuV25zqJ7VryeWhr&QF%l4Rj=$A1;FV8e4eiwT~XO8q@C1>b7-Nv;=M?;`XC zT<{J;p>Tmc+;3Sua8}rF-Fv#Qek~|{?hDGSgWpLsw-^JGaej?fM{TU!rheGc@Ha}N zw6+7#_`yDsLa_+afR1lxf>E;rk>0cnS!_+n4>6O$k2Qv_Psml`Tcha5jA^aJS4k-V zp!{&g2(l+65-2EWGQzxk{meiM5Eo6dH`3WbVm}GhFm%k)(|&lwpcybU<$T6bM-^dy z9}#6rhqfYwKg0?hMnXGM4_!-~97XD4EPpw|f7nAt9{p`ou~j}gzCv<~i%B6GQ_OnN zxHm6XGsI+g?{LPz`|OC?Aof+0zlJG%P)!8ULYlFO&U(6CQNuoTw004qN*UraA2Q%J zM7XiS@&+|S5$wi9-}>k3oDZj~v+8=35IbTrwro8KE><|1tu7g%DW+>?;S&+uVOCU_ z5>9=oeMg33@s6f9j)!MrO8Xv70-Nz(JcCuxNAX}}&?cYvfZ45WO2blwf@CyL;JM=r z_LJ9M>7@1B%~)OCWcBR$@N8=1rOSFHdGlQpW#%IS_rY%G438n>Z_d`r(`CaeQ!{s2 z^SCi{DyD<6o{5-JN(VE7lyh5q-Rd%Tc}t?J$KtUy-w0klE5ESF zB8}X=x^}*g3Ad8>+t7I7u)BY&s{F0Zh#w331`8{1632kjo2|4g1(OUdFW?>+7_(1U z4B?ZCY+~>hx|Gw%w*Wk^;P2^_|HGaB66zE}(bUodp$BpSBAYo4$|+vyF*YIeWBlGY zQpGa5ys#Ky_8U_o>^%rk=@diOFyEj+aEWi>T^mF(h?&X?0&5Zo1OWZ4B{D4nzrgby zI9x9b(t3gL5SQ7Hn6Vlx6zK1Xm4zQHl#A?3Zn5nP6Ac3kJOD<@yGHBoyHn(8U;Awg zC-iE*>jzn}8`b#%a4XBXwBt9>4oFDCaP^i+(x~{HFv&* zj=w~L`Z*gX@8`eviorAqFGhXYZ}1N;yH+bF=JX}mpc<;nBY%%HO1UpmpT$w%t==N~ zCQD~d3alMsTGwj5yqvaX$%MVISlejrLtl>9Y_`X1n+|jMdr4gQ~f@g7#cT#wB3^ zE+aPFldslojz9$>p7pSC_t7G=-e0lLUth4f#=`6#z+=1HYZ{qt5>zf%#iXX^3X z_h=&7Yudj}bkLj8XyI70)E(TN!{RG!czYL73WqP``r8!2_n+M(0-$!ot(TH09 z{r}H5FgHFtP*JKt`Vsr;I z80|WjmHiU^{Eg5LLEt!aPf9;^w1kE$isb52HeIPWSVmCWW2m80XRD=}1#b^RZ%nh1 zO@GYKx;!0fW}i7yMubn-E%Es~1CE`df(gk6f)so^4^lxkD}Mh{msN&x29zH)K0#E3 z_x0?)HvGoE<4hO3uzs#HRbtAO8urDp#Ei`Gxg2`H7=cv`>Y~F^D3fHf(g!!)aTGJ7 zxD(Ukss300C;H_D2tM~gQg(cc=sH1;**1$;vlA!!H6US#Qr&j2FnO-XjO5jZB@80; z$UZj2IBeUX06J-$?P25%q1})LR>l`r{nkXm%}cHX*$@+8F^zG}l)x>-Q3bbq@$^z5 zx_)W}L7Fw6Y`GO6fr4NhV!^WhwYsAG1|NVP$|4NU!KehMc5zUJTjH#gz)6i+k+zAw zd4z%{G0(>!nP62=`84(d&^l}gCp;=|zdiYShs&eOgCzW=ians@nr~#Qt^PvLl3ZQ( z#!aRzuST}4CO|~VyS+QmvVE`h$cu+(rCOH=LGGN`F2N!gH2_3Q&3qy^T41G;3Y3Gp zeUtoIu(XWuh-$wWCcSZJ4R6CS>b{jZ_a_InO=)xE3jJt|%?bipOW0tcMxw3B%N*&J z4V4|~Z}Hx5%3%$E!_(gd*2oS3IJu)D&duvw`}z5A)3i&);2bCydBo))&YiCFAesB~ zm47R=8+I7?VXhZ4#e`g~lJ5I$i(0;+9Xpn8Z>)_U%t2`gp}YUS2cFz8I{fMP2{(?{ z4G2whtzm5}CYk+4KFSv%oH@4+7{VGT&>Wv4k)m_C^#tI6{;(a(ay|m#2Hig6`<`v0 zuLZTOLR~S6h&#YNeN{D88!|&iV(bYtQ2{G?R zk4j9XG$(ZpAa{GXd-;=0vV5j!7N-|dzw%#-I+}muzMt<8MymK$=UCy@@WSY@h^*`R zdYNlKbUD4*o-3n8ux4p~vcGcd(>Ns{pX$*(P!#!WxQ446E-|nHBvyy47^STHW@dz_ zt~W|9UqK>K(caL!)_F$zOw_?O-QSiv>& z0TBH3uPe2q!Q?D17lB007&<{0{HLHGuIU#r1g>L&z;&!CX@ar;6KjBhjF?{(pT;u( z%jglIon@}acJ899xAf=rYn6}Pg{Dw{u(@3^%HW0`(q=lCt$03vV@`pIp~Y{Jyekm6 zh7BGhS!Ycwnhsk2X;U-2QxNW#NC5M>m2xS%9H8+oMI zEL-it=}ZPYF}Kg9B0?&tzS(!H6rM}^6n8@oiN~C@d9lCZElO8XPa@_n8BAUnxOy?d zOMMjKOFQ@en~RQd4*ZRMaOAo4#FJ6rd(yF?Xy+|W)OmV;$}!d|it#dXL@sU3xj`&A zPVu*4eF@*-eepo|b;CNFts`FCl`aFx!oj&Jw*aJuWgXi)mHs+gOj&8@we|ThJfqJJ z>XM%z(CVc?+BIT(bFD_U78+5Un3gbZVm1TAgbcKNgWkdJjF|-4CB+QabQntg;9Bk1 zkWxY1Xyi`>_dl1?AWSiiKDiq3j00R`t2BD6c;xZ+;n}Hm0K%F?64^()0c= zA+rA;Pk=wB2lRg4yD!-Na%@}Dwt+sfb{qTytSmOoNT%09uNFinzhM0OvOa9tsbJ*lLIFQ2B@Z zRhNY=u|f+P=6!C%D{RYKf*nKHnD)J!mTh!uzo+I45eaIhO0@jsu0|>Aeu!Rpqa97x zzcnO3HK7PS#_ZT52xNT^lE0(!@O$DCnROoC$;OZvM|0qDoJF%p(7u- zub=^1_fET`RE zuTx7sV=M-%rg3Zbr2bxLP|P}%CXuVox645eth?sK$}gQq9Rj)U1wC9yTD~+;TVNti z+F{D2#(#QIIV;d+Rb%+2I)F7^lJv&7drkasUmo=~-zTgPX(@?XptJc7#JZFmH$a_}t~2H%ppljKm2KVkd7;DXZ=JR=ff z`K0GV%Rg*<8;h0NP7u0p>s8{danljffwaDO{zo^NO(~es;7HgtTCwx>m5-M-!UP5N zki2d9>F)KNWw@9B$H7%b=8i4j9v8%fXM>~GgCk)*f010ph!`yddhG{lYDwyuQcCtL zt7T8_hHx3`fKQTr*-NT05SX;qc$H3QGWq7XHUbU6`OvrWTB7d=W;b) zoNoUj4LdkjFJ_ZPk|dtWrOSYawoeXiDRTxKrZ~|=i>X$@pZo932T z7}>54bs5&`W*B&#wT?=8shcR03O+Xv?rGeQ4dQyN-$fW?Cn2pD%~+^SKKqBQi0d%Y z*JYE0_!QrGAD-)4s*t`p@(oP;CUL9`#WQLxn^LIid!DRj8FSa%Ejh})utGC+kG+bCDY!~vry|F zy}JLD{?Y+pPfn!rD}8&IZ}PbHm(T4QaA2yo?aXtmejH3}H%qYi2BJX11Ri7CiWG<# zKx!;9wAJ%SwJA2!4W=cFvO6dWr6H0F(+r?6E{GOHhYt4<`j>&SIlsG%%&5xr7Gn!@ z6}Ane75fvf&_NPLi#eVQH)qv$zFv8q9aM===XIo?9%Op2P0pV^wI}U)WxhTU9PE!g z+*~0KyIfOBd5Y6_Xf(h?@(Hcv!S(Esd0cOh9`ok-x&>3Il>m@Px6j6vXD=H9*y|D= z$P+vVShCGN8Vlh-!OA!>iNjwGBC**?=i*ojIbHi=r7Dr9y}`Et*sHtiJO5<#<`uj74W}xKod!f^)^iyrqFQ%hA1<21#T_qsr+T&nP7`n>I+7seY0{+$ ziY0JNX`(+odx5pR=EqA#o@W)Ov#|JIjg)SwJECw8$Lp_`Ju{!WqcB>t%H-ytE5D=LD$j= z6HJ}!57Z%ucO)6spGOId@@nU%xuDikJhEHf;4Ik@=+We92O156** zV%c0712P@%i}r;HhKhx3@<3VU^W*gh5(QUEL<_O~bl-r_T=#)qsT#z}+l4TGiZk5x-bvc)&G^YxmeX-C4%7#BJ~d z*{b6DJCD7AbLQ%yjW&W*79NM{!!M_{8y;cDB6;LxQ6^R`%-ci9H0YCV{J1424Hvu* zJT7(BRnHR&Dmq6rBrU9bvAc|c|Y9fU)196h{&2!`s>GkfI1Izc*LnTrlC9ZG<=kbf zziX>NPSoVhR>pOkrLpRa1B66<Nx@WTMudeeTvw?$7;BUQBv-iBHFqKG(G_{U^3{ zcn>`N|KFGV6)m(8;qVu1Z(pC&j+5&25HCc22a)P>0nbmMa?v^=zxG@KPYd^iET_k* z&W@?7K(ei>TdWSRDic3Lbx1g~Vs(a_9ZzHB6oBe}++*=~HqpK`?rdQq&Z40m9NU3w zCBSC!TQjhNC;}BWgNeA5K=4yQt?phL*76|HO+~0VzBapXbzhVAkN~dH8cLQ#+W}u< z@I8Xl)W&R%jPzyxn+(M_stP_SUm2Iwk{VEs;~Qvf;wp@UbkR^^Pa|%y_o-Z_5>J#_ zi|ks3TAjvHwN});xw?gQz6%(O6lTUe6QQ7+3VKR0Y6h^=PTCq#(jXr3HI_bLPbZ8~ zEUHz2Qu{h=*~xjtvyw~5USkb1-xs4Ovy*C3B~z`owmWs(`41a4bR&P)H)h~fQ%cub ze;o~e1tje&ReTjOHYPXd0AW)rVZFL(y!gqQpq=TYEf1pMlM<4ZGuP{=>+Tf`U( zF2K3i-~5*~f8v7OIMqa>t@lsllle7|Bm$nAAi*q7le3xLcj83PvL0uxr!`uY5musF z#cs)eXt({>zgdHa*w`)*YpkQW8eoA93V!$LMtHrE6+(UlMZP{e-u>>FNY-J1-bTA# z7kic)EWhB*1&bGeP$4mAaCfS#oZRB^@mGQsep6?vyu}oA1pk9*dSg}yRmklLLaZ|% zdp$pMs+Q{946FPWPW5Y*t=^}Iy#rkwm3Y4*8Y$q965X{SS^(8yM4fiP4r|$fhsb-9rZzu;QTydb=E%yZ07$XCgf*QlnE>ri1qJ=Sxx! zl!O6D8}77(2eCjCeK-L`K<2u`w}IgYU-e~&7OYCb($G|`|8y5OjRuz8Jy5TjhvLL_ zNULy}20Pw@8fT4Bn3=X>j5~&cI-U{gghbM7E+Beua{Psf=ucf~T0p3;<+fqgu?VA2 zFCt;u*!~Vj+lub5%cSLB&0IVJQ~~!tA1j zk=zq8;2i(^WW7O_@w}Q%zL%Pe3%38bjG{nj235&=>y5ZNlI> z5YX3oba<5bo*BJD3#9%JX8&zl!I~jn5;I^ljJj z@NKGE#WR9aNE4xrHXB@eebk!$z$rgpiZ9{I6pqJ6VnE!722s_=qRhJWdsBYHvyZnk z?@p@OYomBQlX!2>6`p#M_)P9r9NerU3{?L`Vt-m43Y=9A;DT%LUATwFH27(}6ZTzR zUkf}rZKh?kwj`_i^y61=YoeOxs9xUDQv}s0gZF3t_l#`J`Pv@)*-_LUnr9DxQp{L+V$JN|UaoQI7-CtYj`m9P1 z!m&fi#QUN3nzGj30HPcj8SAGDZuNBav};zy^u#iKe+sHU@X>qB3yGKUQw|WGn;{Qg_D_vhz9)=_+@?_AR9-l&{hX>VIK4 z`k2@Ky=&Y7&P@VW89LxYCCeQsPS42_nad;?fUc%bni6(>h5;%1cqaUh$ozjeoeHJw z!g1tH+3V=O&} zt5@NcW9-cVX05QFrK(x! z9qM`|(V6EQZkjkyU=}oL#!MD7eUNqBYep5>p7$+R*A#c+6Av%@$+0!msQwu&g~*+y zbu4xAp(KtQ&Ux}q=x&Ij0saaO^+QFqZO*~bG_U-E87sCP;gv7;4%TAwiVG-fuAC_w z?@gcKr2sCuTpv`bn?kc&xm(tpULBc@updEjmzX|D(Bz2=6vdL?9uC8a0&w@%Yz6iA z7LP!HhXAB8`D2<-o)PkzS_4NL-!FbcWPJBPb!FnUEsWXF|9na<1MpWrii&G_D@}Gk z|1}cNhqqB@zXH8BbCxMSn_F6q@SC4C-`w1OFKXy}&M)V4KGx;AJT~`uyd$c{+p;~S zB)44nW0L>B0O7C49Jv4P6Nn(#Z~AAWrcF_Gk;`e5)W zHFj8On^HhQmsm4TxBf`?5iKnvrJ=W1l*jqd{pnCCYB>lVYXk4mW!h5D^2yYs0&alh zTtdS~ks7?wrW=CT0G2Z+*NS#07rlUvG`8=O+Tx25?R~WWf#0|r7;|Qum$DUc&}k(_ z5L5`S5+)8Db`z*YU2MCT9%w2U(Vpxy9LQ(r7>x~@QqC(M`6J_Gi4EQ7muM!5ZmlLyQ=Xe65`p8gzV8_>3cVA(LPn*DIZmqK$&%eG&7%W^+?NhFaz^8YpQrd~gb8NW7Bq9o`mn>skH+NmJ1m)_ zn971*o^INh4Wq|9=pi42RwKW)SmQZ^hL@}hU}_Z`O?UX_v$N79KYXot_h=kt&o#NH zC_Cu=H@q<-V~9BlM2bn@^2%0G3Mn;j(k^Iwi@#ivYe?RZ(}CrrQ4coHL0?6V zLcrV1y?*FWBT<*T)aY)hYEr0+LnOkoR)H}&>}Q>^LcAeLtKA;PM)9vf03Id3(Jm1l zmfUk1jD-TYk&d;7FH=Kb0AbDljaZEG8@uy8uUTyZ@%sxpa6KOB ztIoRr?W0-F??P^$JD%sSewLnUFkfDvz`&+Gzwh@RrHCos+tPFIT%EyBTh%}m$mM|u zi#(&N(p^bI?}U~&y1m|m2-o#=8Tmh$r2ja6SsL(9YNgm!p-l*W7PNfk!BGGGo6>GK zE!jdN<}-&e?qHl4yClryE0(BNsIquTw(4d%R7=>!F#o7a)MWSh<&XHqE6qYiqjr%D zb!f+H(k#{CVk8&FqUed_Igjm7&e@)DHHl7>UzspPA*h{JH*Pt&0~hO#GrwGTaJScc z(Cm`@q@shB|67S1`D)pm9%0iPtpltiH7F&%Mg^6 zRM>O9*SGfXYLCTs*1Dvw(@qmy%**X^s96Y^aGcr_?VWdT)n-yzbi3kN#VtWa;`Dmb z6vZ`&6_QFSQSB-LhVO2$s_QvgPu z$HL)~IDrc@wQL2WDSBTUi#ruH$Q=!zCSidtA>hX3lSoH6#yfw2N*b@59V0aEhd%&u zrXtv5AD3)J>i^X;t)2&lJr|!3aSOuDwQ{-^85gQPdKye8M)5tG-a2xeOr(ok$t{eZ ziA7Uogm+L~T~##{tanVi(3B-;fBT>Atbcv=!t8Hin&C(y@e*gEw>C zG@pm8Y^u6X9_WFH5<`K7lT>b#@k1&HYl|cRm9t^0wR zl!7wOTTflu(MtS88SQRTyql0oIm)et-8e&Gb*V?aPv2C4=#7UJ`^2ucW)cVeU{yPt zsWzNoJ8|R%8SbF@&9|SJO9K4t8;V&xskY_7?oWJbh9k0m17hSm4EkL;Siw&sZ}L^q znR?h13o}1qO_S0#a@FW2{UT(FnbUVX7SA~mM3-C|KCAz%JB2G-av@)E?<`L(Vf^-z z{mGJV-22T{bya{2q0Ag5kxFvnt9(E#LRrOw`$eH#gF}_6%5~XYh@BeLCdDsr@#5r! zC!LLwkGc=hrWOub%oR;WVoyaO$_=#7l)wf%LDW1)urL&rC$hcMfPkTcI4b{kx+ zf4RZ`DY1J<|0ZdY(p|xUUmA&KWn~mRjZ&8;XRFg;3h`se_5`bQw3ET`VcW=YXNegM zYF`mTj3`P~1{J|uyPcUg;wxZgCQBKQdZYq2ksQ<*Gb^8*%oQx)_oy}~ z))+B_wt9=FpACqV@HcP-Pu)yNK=*F!B;zvq$0C@GqzEEMbxBf_!N;X$pO#o=55Z&7 zh*F1gmf_An6j|jOVtH3BbyL#}3Aa4HQToJYEq*aD7^!9W*~D z{(p?URa6{Wwl!RZyGw9)cZVWqaCdiyV8JOM1a~L6yGsZjB)Gdvf)m^&f2F_Er_b$i z?->764e9|N*n6+J)|BPNwxR9T1hL+k`E9hVpu-!&RGUomK`UH*>*T~PH*Od5OOz@1 z40dUd>Uz8EC3nrlHK~FRRt~ifk7+O=6Bjv59Jgp+N zX@8l^m|S*D81sD^h$H!rh2tM2@GV5-E-(yPY6&?2fByDYB1ndb4PVX*+MRC3M;MZ_ z8jZ>Y2i|HKycEJyC48`rscPXuv4|^{sW~vBQw59A6n}@>Cnv`0#~T^+B?;z_yXaqb zmtzKd`Yvl|;F-CZm?Bnfntc{%I#tnk&BVSI?b?0_Qa`fjpm>>iD^Fnhacjt?A}OUR zbzXcVvyb=!f264Z*ADr!YP%MTKI&)NvmY_{F|24kpS+0u1AMBg-rvK2=d>s2koaJf z7&_KTS!@I? z-J%r@qbBP&n19LU$WqGq-Ydfb||`sfgicK;aCSr zz_gZ=vhJtb_SFLG*9siSS694Z!p$YbkZ2y?3@#($A(wPysA| zGMWdO&dy0FY+ic6pbUTyMHo3+3py}7!G$bkm?n%GMi)#fP!OV8k7d~__T2cT&SD?m z7sM9~lJ>Ofa{M`awP0|h>tEsYlEBFdRAJvtNl|}8|L|y84G+c_rS6Y-8Cv{dG#BJj z0`O#vKFhUVT02RWkhqHoTVc=94BkGSh{w8BPVW8Ja}|%o&Vf(s=#s*5dxy)F&1xLEx!@ z3?STYb!ayviIu*W@rSm28kTQU>sw6p81nIW_1*lw!Ek&FZ2z zOn4*w+?*hvv*8x0Ky9;=F>OT3={o$n&jkItdXGl~oS1*?2dkXqDUDYa78j#Hj~PLL z6ciC&+FmtRAaJ~r_jhNKC!R|M>cOs_oHHLak**)9XP5EHST4?ha)R839HFfU$xpd~ zBdGB6LWPC;j5K3s+==$0XeD8yWl<+H0WBvP0iDtWhcJhU!9cw1+~|RrCoHB9m4C)c zM6Mzx1*pS#QUA-a{%7&rii9v5lm0ucylq$Gw=&&+`>`>tK1K(SG${l%I6`pE+GSFOeGEYYjJ49ji1ld( z?v>xS%Nkj}N31{=>>>zKxhdzJQeNLnV?=Yb`4xU5TF$SMXzhu4!1Y6|nwk5Dz~xsc z**O4pCR|Nj6ayh=L4Qg-8i~M9Hu2WYW=bS^Qp=uS*SoTR5_}GP zjSjK@C1jiCUJ$h&n?2CVJ82I4W-tg}410Mp2lMo(G)}R$4L)XgyRnMpH&b(_COUD` z@*wv<{80bKe91`z(&m%J#t{_DjegXEE<~oAwAo1GzL>$Ft_?@>E)s=S&N>Ce=-q!v z>ulgFrbGtdg2GFDmA5bzbbIukz}l+XY~%&0M(wu*i|rGsFC4T~bIg;ACvvI;jWZ0S z=+CzushN`T{ZdXSbehot5hQq@&eown>>Fu%6d)Zu|@DnXF11Hk&p0r zU#Ud=4pjzIXFx|@y&t(TJm~8xpoqp5ipG5Yf4t?mNMnJ+uQyc6jwAD}&qp$+)r*cJ z{R_;ZPf|}|&VKs~OI<@p(@z_YVh?-zr|`!9ou`Y&m#V1~xeaFlyQ8<;ol8LD-5K3+#CRR;B%OCSR9^3t@t2T^}J&zN7^TRen#b3WTB}d5@x9 ziw)(iwiia&6BL4KK}86Hx&g|#Yqr72fa|J|MKR<6=H}G7^{jQ0M0v6qA0MIUzz&~4 z+*I(sb*J(1~*5 z`0cwQ?@^o^pr4RvFHdB8h%2%M%G)$2bLRSt+o;)^6okAKWzfY0Vah6{9HLHxzB5H> z;U4@TQr!okzZ_x06be}O#8RX^Ca;&Fcg(}95z?!pb{4y)Le86CoT_d{gpL@N#Sbpd z9!cOZfQAjB^;V zQzh6040bc>FGc*`+-VeTh6{uc7Yt(%!yS7t@eMV>CKq3%guW*&NZ_RMIEQzsfLEE* z^z*?B${2Z7*rB`-+>)qXy5TQQ%ooKHo}DmEjC)QpDFA*&JvZ&iK!g2R@BFTwX(0gL zQOIH{(I}CsvazfPn-f!;8+{WY{U3(sL>6{OWFl5L)rIl@sT)o3nOrWF<5u@BZ;6}H zUw0gZRD!&pvICz76$fuAP1pTe{hy6rv7daMy9Ur|ZWBfvTUWEi?!f*Bi@Eh;4{_c0 z-L7YB%Mth-?R(qbe>IEU?~CCsM=UVwD4_g@9Q_Y&L4akg{X+_P`?CrW<|Y|v<-9?G z^{o8U?=K{+s&#Yqkgu9Gw@9a51UYDZowSE6q0==X1jDD;4dCJLbKL{3K^SuAUkc~# z1eR18xSZtx@}E=q8Y#za7bU1qx%TC0XV4LU{GqtL%k4G(lgmyHLoIafv&o~splkzGNiC=EC zA;o6Y4uQAqOA_`$7sUX>W1q4vIH#SkKSVI&)VTj5Px}d3N%xZb#V?YJo zoHU53m;Ibzv60AKw4@B#W@c5O)8%#ZKr5q;suyd1aoY|>cn856su#B>9C;7 z6s|xJo;hC9C&ZIiz)6I1q3mQAm+zN%KyV|%4i&a*ojkv^Pe1~ku~-gC9}}hNN*3Bl zM0Zk{8A{DksSc>#{oElDq;`e8AQ0F~ktg^*dDx0t2ny^j5KR+U)W#jJ{QNF2oqBfJ z`~6FX3`=|&3aHpYDN^PNBZj~2;24@S1#y9k4l^)YMaiiOpT=amMX>VDa1}ac<`8J{-R- zY<_hT1Lgm@Y~p_+Dc(PHSp&xlWa1Qah*$YGXgwd~_v;G;@K72Bhew|tRNjpmcjH&Y z2jQYY-QV44Kg_&(CDh@9KX%x7=mCA=*Tw7RGSzP*iJZaWDzv$YFL%FRH-qGpElV9s zC$s9E1D{qGY`Cis=Poaaj7)g|_#c=FLMy@X z*pqsd&))&7ZA4t;avfy0hw8`O{Q6GUn4;M4O|}u8UTN%Ze{9-pM%ehg**bS z*`ai`AFhrYO6#auh#)U9E@nxA32?4(K2 zm`)!n2FjBnP;K7(m#{g;Ysyg1ZXUL7tl<8n-to0i(_qBquvi`&4vE%)sv9;&8 zDZG~gI37$2D%}o+B@+EG2)93>d%WiD8+)%L_F?2mc)#|s_j45oGIgk@$j+GyLt&y3 znZ(Yt1Pu?Yn1DimN0khD%F0g4^_Fc#UTuzIJzNK3jE=-JrwSBUPxz&ZCEveMz%dMs zti;I#dn&wLY-Slat6%8TH6o!0THuoeBY&s9 zLsn-ox6K*dg?_y+VSpsXrKx~^2Q0}(j7buyvMl^jNm%_)f!%6!hQp~k-AQ`QwMaqh znswZ3VQ#aKQ()n`7yIj-%N0~u=zr>uP9@X@sdcs7K*ztoGr>l>&aMC5R33-DVTS%Z zRt*W2j7rV%S=$I$q|!Hk{v~tDMCRY)v_cQk?40=JcUF4JBz76#yH0}cyzyN|)FJRP zr_fUS(Wv1!dnm6T*`T?= z+>O{2qDWFTT*qgIeU-Dq5LAtB-))BJnx0=^2QS7qMKOU8}mm=5#C*T6Bu8>*svN2>*Z&Uw11S_P0_vS(~FW!XCvT)c=9DfATXdsa*hQ5J4Hc5?Ywpg5=zwn3e`hag(oKd4y z+D(k?({e8L)u*cM^-_E*VM5xv$**N?DmRD^%EpIR#gmfr{(um`aY*O^sMXP-JXQ~V zGo)^zdHvn|6$9CvDmpHCU53{y7G^hj=$AU~IdmH*tO3_-dV@cRTd{}ZcN$60yh?_j z4&M~)b76~vw-&`DGYn9IcuLMf>1f5I+!C781~n*M+Mpt(JPdwGoG1>qj?VJUh~H=f z8tSmh+;{1?o5&dbS>fjTDDQy|GK18|2_KVe|5@~HQpKc}@n0&yyOaIjPQ$SszhaVr5v(*n zBg}6n+BYm}g*6(5UgP3cu<7>Mc$gmld92do|D5EvZ$D-HeUcR+C;7p-g(3CeSL^EL z#(RuW$h5MTtP$H@&BQ{4edndIjkD=y2V~Re!G)ggp%y)fMG!PvL|vr-vTvl?db1 z0fNL;rW;FhJf>`%oVwS~BefqpMt{dwuklqRY*BMyg0V)vaKZ2qJd$x90uUJr<0Hw* z*{T?ThzzFj4U5T4BP9hWLKgkNn)GaYfDFL<+}ep=70H0bOG8Uf3;D^IoeFlXpK2w=3_aPj_1xjb0>#V*76_ESah!YlWmm z0*<5Yw}+hZsS~!FK`%qgAG3El2nbReGCykZj#4nSF3QGZcijnCzsvvPA$_T2y=Rh4 ziV#R|LBo{s{ZJ`E65PSO1vg|LX#R{*0wg z8Q}NL&)*sg&jYWx*dC2*+a@xcUkdP^<8gs_mI=7Y%P)<$i-BA7w+(5;^#h?&LN_N2 zPnYV-y0~RK#cbZ?owpu=eMjRbwbU?NI4zequOR`}+U`_cp}s&SXu+pqL3{!&u+yhv zTcT882!Iww10i}k7pW(LL0(bw9F`uzBuud!?vY$*x_ObbiSsz6$>W z@Vj2iR#Ak6zkafJ|msZ8nMb-ni90(JNVXL%mS z8^U*id`6tE8uQLd`%8s^g(_4kVe+yBWa|b!<}N}m2{VOfFLAI9BY<(--ykMZLI8J1 zeM$2mHI3K_kb(iYW8u}=8S&%ueRYV4mcI{jCSc~I6qp>E$Rb%P-@m{OFJV6T7|mZp zmzj2M2uhQXBn~%8!}^RGu8$GjCrP+Ble1h*M;BFt>FR?)nh$Fx~_|mn9vmXy+hQ3~S4Gxm?d-nxKg(~fbeGH0V@xoAvE+iPF7+8H|Jm#+0@p2@80}EX8acuuqMMSI;eppb0Lh6lh(uueFsy+AvM23y^dhrZc+> zn`*W!z_u7Oui<29n;IiQ^}=X|bCi3su5!1D^;jx0#)!|M?Rh_hs4RS?^d1aafM4T1>9tNgEHB|Lv$ z3_IK$q)jYAw-RSX*RwRth}{&u42nGw`0m%2w%CL3DkQ6$>^hq~`A#ZY|Gqo-k?;1$@bB50cGg$Bz+KA0k^lAP zb=LerQ7v@jqI9~Sl{g@3M?IWyW#Zb@q%0faN`Fh^m&%yxUtn^W6hhGVlP-aVpYa5+ zSlhZL1&tO#a?w|W}w z5ab{~#lb}Xy->F*CMXUvcMM7ZQ5C4JuNYyhe1I?gKNN`Ecg6d`gEq(F7(8K$i{oUxPLlf= zg^M%0rne@L-K$4*WSMVd-8a8? zH|>fS(8YCc8U=pu(`P%${J`&Y*R>t~GVC0%5wJ_rDj?grSIoijAJeavCPZXX#W>E3 z3Mn*>Sb{BoY5JBhk;Ya7z5TGei+{2NZU%UTy~HNa1?*DzT6|j(y)SomAdpQq=zJ0B zw!IU178FVqTrd=~%niI0@ICfetoF5J{wG`V&*B0JS%v7fehNT_ybA?tV~DUdtbd$o zUnksTWYmF1HV6k`_5;Q%L|rrDM*!<#5~+3hd_#FLO_)zf3Cs<%1a}$=a}csOi;aw~ zYXb@szB$T+NXUd$opiJ;dVJA(Nm#(e8y0@-UcRq(Jv>kmFsn^qJH7P;kN!FKPWjyX z2qWWVzafysdg%lw7Glout3^@xEnYZ96(cm~M>B+PQ~t5$E@>&aIo5pyZS`iL(V4sj z%aDdL1ql(q>bL%~QM{jE;TV3=IL(zFI_@?96oQs(m^O{!PoZFs456GO8JN6}H~=A78@N4rVsLdIUS1`Cj+v48SRH%gH7FxNyz_uRwkO4oNdP9c!$P4Z zxqe0_c2oSKjTx>9ju=7Pvh+!XeT(R_RR?cq=3)p6@(P!C1n`ya)QRhq6fuJJXc}l@ z><+|R=x{fs#OkE|s@cma{(#xT--#4Y_TalK&F0rP} z(P^@O^8zOZ|C;0qeD;cKG>@f(L`MG$n=Hw}>IK>wFTu#WU!VQ1&c4y@DW6>WeLJBD zL`=spuqXU_-7MKh46wre9A6D+M5oWA}=N-(-g`&O56)HTis8*79U=*)87^& z3FnVVDgxlP8Qq@t9dbDf5n6hd?s7$JucGi}M>rj2Vu^1B8`;?Dm>SZ6{N6J8YED`1 z-g~)N=Muk?Z{uSq0>7?{M#~Z$z_F#zLQj+>#D-Z&dB(!9Ot{q5Ovf98xO=dX>u`|4 z{Lm&yRZwq!-V(0?$?|_e?J+k8u}h2O?)D?c%kR}5DG;=s^dOQ)H9VHdQW^$)N1y-7 z2CGPVjk$?rSPn4laPi&U;->UsA)ZLjFLKTtv zai^h($CTgN50*QfTYuE@pO6NP1IQ0fW&>^KZF6vEBvQ_0w1Xp(%f3fV;4axnqO3Gl zMXP&ozLt^g`{GY<#VT76Rg7itP$aW7kj#Tu2vO5Xwm8W%DBPY=_V69fJI|KGeGnAp z^!0u`H#W}ZNNnX$pjUDlFqi?gMs@RXYRxoMS@K~RZ-Cw~GR-GzAG+-2t(B5`BLi<5 zJ|xx_iL9`s%3|lpTOy70G8niG4vml0m=z!R*?}c2bB!AxA9O!|b(vUXV>PH-z^Vr@ zASM$+LTpSTljVhbGPPPRjZa%=PDJx)e+Iu5Q+RT#fIqypCAG`3tQk1Vek-3xX~vM` zUm?6h8HnTkIh~DM?l&(8zm1kcB!E%u!~D^^u)!E8<=&@sD+`(^rw1o*&G(X>Unixt zpFVuMT(Mv~QZXt1YtAc2vdfCIgak|adrIh}pBiQJ1@Wn0qE+$R%)(N7;LL{G@$qSh zvSZA{J6->yrB2Tu%d@xFXJRINgTKX<I+F-Jmj8{#_6p8_BZ`|l05CI26qD+jfc0vBy;tIKyAR`4ID z%0Dvw1%m$TqsK@Uf>$z5-hJPvPMYVc3WfTtO5TOkLU2=4(R}=S#s9rjeJ2zly^RJH z^vLT=(JK&2MXMkL9(`qPjkzb1JUP*SzqZSfhdk20rftw_|91I$tf|&>$Q*?Zfr$Pj z^$V=sjg@SO=g?MaRT!o*rNZ0gP_Tjtji87LVU61rL3};~IkC5CNe^0ek0+U4+4rAW zFrCn<_>3Bss&FFQWVitbmtH#Y4op$;f5t=N4?puD zg5?fgjNYP3mE=uPG6z>i1S>OaDp8-cnrBMjbgoe zJ;#v|kkn=QDgk?HA8!>AYH@>bQFfM=K`{y~)*9u3t>2r>OjTOF-W1zyo|njR#u;CL z0!$rLhgo)FbN2?EtGE!SUWzP{W)2p(aJcG+gT=dBxDnq=v($)sD2<|-(1kt|E{wovp6snPo0a0%5*s+cweV3Pf>uG|NEAL~GoPj8$$%&=uWvB%s zTQ7r~n=CcV=VbkgFnGk4cv!aH%XvI{Nf2H}Epij7`zeB*k&WR}OZ&E}T;?g}d*^8< z+x_)qs zgsnGJD>Qo#Qh%~ zIJ1_M1f=r@P{L&QfXxQ_%D~!8{g-o~+8zbXcZGp~``tn41#+evm zD}WU!lD{jDvx2~8eEL7I8N$H^_o3o{XoiXlJXxf@)VC6QO}SfD6uKG31xtPaV3BM( z(s}Hp0V2n8UWq}|PFEt&fA1lvsr%CtB9Zm{Y>B($PgAEG{0m2T)HYu$|A~K6NDn_DlYsH`b(}({+ZlbSh|mgq8waa2Ta%Xr=%OXy^+&Em_?7 zaXzKi?l|dpw`zfOq}x8*U{6pvH$)5V&#jr;AYF(!`o7+DpERo*13rN+o8n4-*bcR! zn%nB2ieG-3);ER|t~^=fGdh7KxnRY;s`dhr1C!8X9cns-zNjjB#1f)74VdgLuZ_K7 z8(M#F`ktn3*@Ay4ijXC13`!8MAjuZm5zs`YlqU}kN^ORKJ%^$$`qpAGdNMx2tT#G@ zzK0UZ4#&9plOd+=n{wWgDvj*R`&7Kt-AX{Z#_+-JEO+y#Gc%F8=~Jkkp&S@Q2J-dc4c?YEBwQ@eLJSvO5*Zic zQ-i!-qINi|IJf_zlhs#K)P_^~5EaLgUl|cU?BNkgXMrd?%COdi7B!4Xoigo1RfNG| zj{t#AQgX#t5O;;{LIRQ-ld#N4Pa&A6N_`%f_8GO3JEPPA1mX%|J%*ih5iKlkLA9&e z&M#|-laQ|I8wg7=Gt_OL2=;)vsyoY@8#v?SNOsJu0&y8=J``QecR_uyVPj>@XHvo5 z*U7bA!MPaG28uwP65mol^?tZicN40@1{G1^sJV}yWFhb7y{ToI?^2o+sD;Np%<5G; z0MmOsGR-x70FuC4*kU;pSx5?AVRfb6sMM{+adGXoG>`>QK6FYh_!~Vh%iUF^@hEgI zkL_fy3_ShhK}aOh(mY{vm*K+k!h}3UKA1sf{5W5*kg{HaUq<@qcssAAWqywZ{3;=N z9Eg0NJK6t0C7Y}yW!&=epuIZd?0n^XruZcDw=w)g`VShP5Nn2@@vBLC;lH#GI^9ss zH*C5_7mPx65&`cfK>Zq8A;k{Z^Y4>y&#?PYPQo8nN`mN<>us9Q zr-}jC#no!rYc~#(!4f8KuVF79uesT!QNt1F3z1JK^Ps-~@j|@B-=*u9Lt}GjgFgbI zwBUzi%)#ime4=h^ z3|vT!M_aiDn5#}7>KBIOPERN_FGJ#6mh)k@(^?ULDoo*A#R5!Axgt+Ed>Q-{ZJ`|? z(4{D{CdD8MNp#v>cEE=;`#UwR`=xFbuD!w}|G|z5nlF+Cgg4KKJY9u%q4AW^wY}^v zJ@AxS&?5%Y19Rhlmd<1O73_}gEKa&3iV_$PoG+Ol)Ztk7!*R_5c<gKbvnnK)MjvgYr_goGaPPihE&r&KM0*0l*!njtycJl?jPJwrtldjg~KdqMs z(jKI?7=SAMtK!e1wKO7BPq&jx#I>~#(lIl@XzLy?p~W{vD?@l z95Xpd{S#iJ_6J^aG$D_xi0?J#J(||mu-J21_EJ;eSxcO-%W+AyLqyOk*X^tRrpfn~ z{(D|R5zn;FeY!4FvaYN7)i1UVxPP~+5CkAH!79)WxgdYs4}V>dB|Jzf>_W{ZVqg?X z^2<~w_A~X*U&>4Hu^V4lu#9|?sBWpmfQr|1aJB{VL@^ux^D6x3CrH-|!51uQe4=#A zl%{!_=_<;PLE-LM#a)y8w!_WvW0mlxx_dF(NiyFe?&XR!QP+ zRl#7_$MS17nYTip?|e@>Jcdqt&PTOW;h|}h=bEKBo@`*WBq)bjKIj@FoP4D|fZZG+ z)`)4Wb{&qdlr^!e$=QilT*ubtSu_A>Wkaj+Ql)g6!6oMRCDT-LXwt3XY-Kb-5fUP} zCTdxHC)^2X4eINcwDh$r$n-Wu8QRAQkafV%5(Qe|Sn^|)X;KXen!MpoMa4lULgZBU zB%e=|cUi&_J<`=0%ri|vQ52+FeA{wOxJ?jxsfJf|8Q)l13n#1D*J91ZXp2&;GaF{5 zcFDZ{B>2c}?45Jeub9XfSEERylRg*_U; zfV<5UHvgxwr%luEbbCXpPmRXi-)kJc6<2ibp$3M9^CDnd26_l`NPScJzj)Cq@jp}$ zDy5@^9Aq?&Id7&&&u$ATCz(wJrp}5;{9Dk(s<8AGKo;&{N_U9OKQ9Gt+4goh^lJaK z7Jx?>^C^U)@oA5E>mphdil0?e5*kssfH4VI+5yfldNEk7U%D(=dgLw<$ggRV$k-D- z0{CLyvq_FZA$LIQrl1v-a)3HyLk+EhANI;en{-o!!?%Qg_4sH?IvXI-~+flI;s^r+h(Mr03Rk*9z;g11ZNykLMEv=n#Y}grpEIms$;p7Qw$oLQ0$t zmHp}8X!R8wAva}`$It4J5iLfQg9uwbCQ35jFT|K6RW8X&4tuyob2qgd~jx;4zCa%gkuJJPFtt-qYfinPk(8PR@)PlY}}J9yFjjG$6h8?!O3) zmMj$92ZDa;#q64Q4pE6z8|}kadwxv|2(Zmr8A$x4Zb+N+;@5yWZ}tW!H$L=<jw z!Tn|P{&U*7lcQnW?}yftAbn@u)gS49Ki2<~{Qn509rFipBgb)y(s}38PGciENCw?+ zEJ|yvHrszRH27NesQIi3&)VRY;Z5KdKdxOumV(tIQH{1%aKZo}X6L;9y4W?5nwsiR z-fXe?y3l%BSzQa6Z)Ckpp})kF2)Lt8!EV837)S>*!a#=A%RyPIfRi_|J_aC+rP_Ho z5W9sQ(678sn}j~J7{u>F)kCYP)`);&+d8uF35w+962CkI&l&isJk3=-@qt;%6o`OgG{Yih88en5 zbgSnASIB|_4j5OcH*aY(P$n=>f>0kz>o|n6IoRvSJj>p(NyG4uk0m>hfs(dzNx_qc zjRaHF;l-N{1#viP(IDO(Lyb2?3@qppnmv`Dz$Ak(vhzVn$&T-CVaVof<=tW!D+oS6o=9y6T&@`B9TH`0XMtF!+PUN;ex{rTCSZ2DbUNvq2mZu+Sf^(6qhunVb z(|?hUk4KYe1{Gj&r0LjyJ*tV~q%Du;UdU?Uj91acz) zMHPlnk)_FNqb~e6Dw;U?bi8N#dcy*x`GJOzYd>RHZ8N8!Ty26`ajH`AowPsU0gp|q zBe_dBeu!sPTD$Bww=PM!YHMZF&x<$!VhK|Hh<&ApjCAobJ0>I4N0^=B3|1cLjLqwr z3A4MF5Y3HCG9Sj&(Mv>0h&z|L#=(jL#j_Ap^i=-g5Io z6%%wumHU!A(rD4QT?I%Qd7ADiZ6!eo>kunXUMb#l8zdqPYo zgWkF*=U`c@c~3F`(J3##BiqAiw(I5=tUX-5@`P!Sd2>jDQG@8%FR(xMha;{|=; zwlTF}d!#uKe*~Rvo4}67mxpcnMmgpckm?4O1brM*;D#*b>P+F4gAb7%QP40OgZ?!y zKtx>3EBW|7j zAFBfC)CAcAGD;)TDXh1A*5m`!UC~T#Dsl3YqVF{;Kr#UFlpJyfQvMy1p~VNOW3>9p z^dSX}Oe(H~A-=uTHy_ev^{LWkeUEA{&b8VAiOG)TW1UBdJuH^AYSmLenvCA`XY>X| zss~M#-bdlL>dMr2eW`wG#vxFWa{NH+AxVPdpa-<^;E4YF*>9H%vW^Yx2{`Qk zVes1j{Z}JNe6o!zFtqqTi2EPJ5ai1xu79Pt?v+tZsX**sJ$y@$418@Ppe3Ok0i{N% zJRkv7@bKVWYi(tvVEl^rks91f05=atop1q&lqu`l!9h1nhGP_5Z!7nUhBxc%R3IV& zD-L>8ufZSt^{#ab9e5=2P+h-y4Hj%;d_f2=SWRHz>_JH%eN>_wRimB?{=q`v67-^E zE$K-kj@RTn;-zun4?}^Pf;l)GjiTfVqN0-bd$ki29=srLxry7NB~K!ae;3&*nX6NQ zR%^#0frc&qjzK8l3z0XCA+elk0*Kz0-0+81ZLLzZd5m=-h384Q>9pQ56IZUJ5E|Y) zp~Sm;9+MD?*P*Tn3jJiZxg8BP{pQwC>G$>F)sA^zrph7oa@7o_a$FWwU4j~D_zM&k z1x>gtY#A*wT1XbU5HEDHh0E>}U=N*f%U|6U(Y7 z!WMwfhqG|Bm(-$YlpLz;36MCL%tQnZ5&6i^V{fSjivq*$A1^=O7@F0_8FNpXER-^d+ zuhwrb0w0u5&N?6PguW0?yRDvj?oW!CkUid5pD4fXo~_ibJ;{6SNR8Ea-`D$|qQ5>U zOL-RXAmd^Gh1zcu|7aeuOCdOim%@no@8KhD9-#a8No2|b_HB+3X~xp6v-jryX^wX7 zZT@h73!=n-kurb#y+G5w^3Il_ZTwjRl?LyXe}c}P)J{Z&|gJp+}*T+F;!6FkoQA#_D* z-nJ~(tkGb#3QCAW0)8+asWa zq()Q^Ve?)LJcdezv!h8?J}HVCZwy8-!MVbi%;+mZkIZ2vI8LvccT#-S2P76H9Ol@h zvd3hrYMFiA+rWY)@pF!#av}*9@iQ(^&4-ax``}# zJj8w(#$r;@pW#tqX@8*G7YoQkYSDPQ8eIe0ufy&0nq{A8O?Bx0+NNsJBf;WB%{CSqlT_t3V+>|8O0)n1x&Bb1vQf zk`s1voBOK!)QP_4{6VBc;3Jci=b7_M{mXS&+~98mTJeIvQ$Y>>bh!2K6dSt^3avVS zqj-b2z?YO!>Cp?eCF7P2Q!zi-H(8K4D`iJlvQmgjzU#OprlM|l!Eot4Y3ToPLwZ>M zu$S9;kl~8qWz(n6`8_R+kkGM~5t7fixEM?EK`d4`KI;?|G1@?KrCd?eFHlv0A9Zzg zCd#4@MU&Hz6h?#H%bt&8V88;T-`jRQYk1NCi93Ug_Q2%#o8tJE+LtKrBGv6P^x@!a=Z- zlG-AC@#Djyv6ZNo6^&*+uO$rM=~4RqRBYRwoy7T$V~yXlcuc> z1_st#RVB=g5AF#JD{$?q&xs*c_Y|<)tTE;`#kh?>hVFizd$Rc}n=I-`_x3XSb}W=zxC_4w|oK1|tww&Di0 z^C>RiArhdPcGY0_ywi@F%Gb5Gf?5-8`M;Nro((O?#o&mVe0O_mPhNlytEFz@D+V+g zk2*kyRq4q`va@8E!SDDyPMd`gkJKN{S%VOuB4j9{wu}pVeYXde z7qX`pB}_DrtAeLtO=BoNj6{>VEzIkFBqaimO?d4nFwc?hpu&;O#3)zy1S7M>6h;D z2pdsUgF6`Cw><|es?RESZ%mL&9N-YiiK z2gR1rLu~;R(2iJ&88hp5X;Z%*(HX^p%@^Zowr-NFSS!=}uR1CW^hi?j^gM%kO<7>w zlodMANw5oMXpkW|)G^^fWRkGXG1p3b3P7S{k|V=%MP%%nzPqKwEfXuy2| zZJLq^wiG9eiUsQEniF+)$`V?mm&{f2;$yRkU8S0x{STS=%m<3bqJb1L_MKEg%Dy~G zg=c>3B7<3W%g z({&wwE0AKp707TDI_Z>e*qKf`i-66%PMZF;6Pdq z>^Z|tw$qchOL|_;L*=hkdd(}fY!T^|(N8ZJ&W}7G&7PGc^nVND$iLgmhcU1&`DYjU zzwEnI7Q)+ODQFn}v0|Oq(xwf>BjCtoPV%64x>`BFpy7JOajaw0b^^1i-n`vZ(9v#y zqic6J5j5Z5{ol#Cz6~9i;((WLcsybOCxanNe(xG-F(mq$Ktt+EhBf~E8wlb0>Z-PF z3#+11Bp4-TG0%AYe$3^lI}q`V8S3%nwgYt@eAubz>aVo->^7CmPIzB)b4odLKM*3| ziY7+9M|lLiMI+5Y4$z-?Q~quWZ;LFzfpHZgJ4PAz>Ya6z2Kb9-&7 z;F*w-EPpa<+gZA{q}v)gO21|PlOs*kG)4^{TtP3G_ENTfxys5DuiV(kT=Pfa| zkJBeVhT8dQD%-&$UP^0Rtq4|0@wSk@2Tuuene}L7!bFWJ(5Zccc1!OP-A8q$pot{> zcy&nK6f)?2^=#ua|L{hhZ_E3}a=mh_`x|o7GI)yx^FoS z>LC@uQO09JXWt5IhhSX z{oYn_vn%eI7Fa~u;8;NL@3Dfy^VQaLoc7P6iYfu{q6%)bN)u%{?b^GUZIXHql9oaT z$cEFG8TBJ5mo-)hY4G`%Eze|+{RQL8s4M4PKgW=3qh3qKmpkwL zI3uwJg5v<`53BLNUL|r^rFH}^oh@nnf5%PyN4GyR|LqWg#d`UUH{^E;`c7b#{*z8B z?qU|Cn7-uOu!)JE{Zr0El2@-aw;u%U<8PC1J5Cdu#?2@xA-b{qtB(Mt=ZRS7TdD}` zzH!nw4?RDjZR<;sLl=i2H7kaFRb(Vp>6O;Px?1WuySlEaHp;YV>lCoIRgS_CZ05>! z(wMF2HBwVMCHL7I+Sfbw+~hM)rX8kb;ZGrG#uz>WJ--Ee3y z4f)Vdd*`iQ_&qttnYz&4HNoH{z%TU1R>6p^UQCs8K|#ga zautq%G~E7oBxyHY?^v@Ov&#MWg{(FqLgke)g*FlTo1BA!7jmaCPqLUHv!y~J_owwy zfX@mMihde?(5G5Q*5pb1!CeUbe8xnx&i43%Vd zRZrA|S|flmO;J3T#wl z?7cT4gS{`3Uxmq%q&4Bgav^WN>jopLqWPtf_bT5h|Hxr@38OI@egTAlFF|$a9FHS^mlVF0JgTcIxR)i} zrdQbI76_l&@zyi8mY0lzmQTBe{wRD`)Xs+)$daB_KN8D^7ZBG^R=E??r6Pad`u|+k zknWk^M{{hXz<+G&g+sNcpm_s3sUii#PdOkl5vXE665!%~386gJHcz4mqffSG;-r(s!+ z)7hVT`nvf_T)TeYV$vzbDfiVOaHP`)P`6fey$d$Q>P{DCH@ZJc!?M?;ekoFu+E;dV zQ*7;uwMQBvcYWyiD)+YQo|Eh%>o6t}6^45R<64_w*Qr1Y=c3EqRcU0WoZcZ)Ltr(8 z#@SxIo@KxS-gHvuNB_0R3hnX=x56%d6}wAy1Q%Un#h11QHC;5E($+d#oi+DXr=~#G z`EJ;iTwg!*2IiG{TSF=};S8}6tzJTy#)BQZDb)g%-85U}?le_^?JwZi%*I`hCD!2< z@l0i~+H2%l?n+YbJ7o7f7%o)K>%3#(w^un1$^=&sd= z*s5vMv4xKz;jS4$8^?1 zNN5vWh^(DBdTifrKD4o2x{*r1E|wWFG@kM{t>HJic?5ZF6Nc!(2$c8KZR1sJEz*c2 zsfugCSYvvy(a>zcQz@(pG{JVr1Oi_QM4;o$+-!kawa*)Mj_C?+bSloF|;zw=Y93h&{G3ttl2||m4uMI=; zta(nlMcKA~O?awnGzc{CTmbsk0VNk7%R8)*A&Bcc+`Qe9x79go&-}c0mni6tTEBr@ zmU1(>8ox!QPM8jPuhb=&bsXc%f`rFXl>T+sKllFU^#knh@%iekcp7`PoiAw!#3Tkld7d6*Hw9u9^!**rv@^wT?R zOSPm?Et6lh4tMaV(F?r`Jdx!JpqTeDfH|ad$%*7bSar&=kN5_S2yQC8F&Lyhd^A?E z7#|7n5C{@14n6}w>@vQgn~+tw)=4yKKm3O=1Jr7OJ1VL0I0ygi1t4u4;=rutTZ4M* zW&g;&mlP-2Yi;W~7!(vlk77zXhBKfMWJv79jiGoIi2nkI+%9RQAM1hbN)-}R z@$+FKQa0nwkB@e?P?K^n?EIKp3Mx=!%6Q|1+uPJP5Z96&z-(Zs9}&*V)L>+HJ-(H=xpxJLo6X`*o+}&022wJ_veJRbrO_t4d_GZ z)5ekRg>c-t(e*muEoKhISOZhA!1&W)|46y!2V%%48!dWl2g8=x(y&*(?;_BmSrX6) z6^ky-FK+@DH+`O2=F~ss)U4O)9c(u2f4i`b<~P-AcCv~7!hrF%d7S??@pj3GY;m+DAz4)RFNE5L0>K!K2&&cH)&|CK{|4ya7%dMt;NI~`V$iYFNuhp^f8S;ETz-5w?*k$5n8R0RHb+;~y0lhB^cn@8qrd0Y z|GKj1eoq5Bt3wXS)7VlHEir6gRp9=7gOYj`w9_#Mq+DT@s8TzPape^ zQ2)R{deZ)^x?UZ}nrjb2AY@u-we!jAOYF}>yAV0_n<$so<*6cw+pe?Ev?igSXF~N0 zBfp$L`Gu(6C7AfDmRanhcLUGPveI&N{og>r=#_W+>7nq@QLfb#FtnI)BWFi`V4PN- zR!iowZqgphA5W`Glz$r3Q9g)MH7&2yV6*J$fl~7vND{N)d5?DS0@0aC0Y1B3wWUGR zjv(D|#C|pii*(1%($3N^sW4fO1_=!S$1IUS)6C2;$`(&L=XN|$K%dC4G;&{^&SkeI z*LER~-4%R5p{{GLu6K>d&cpE0p*_`$ycsPysuQv9#lmPGWY6(U!WVYNbb!2(++7nM zH%C=%iFzOk+gYhDgG2jr2;(tAf5A{@MpViQ*N2rAvn%W#;g-F~euX(s)@RrRxU^4*)O~daE7z7{N zQ$!4NW+#;N&@}WQIlRwS4|%$Zb8wC-ydSZ7+KZWR_B~Jamed8jR$;iW%iT^Te27j& zQC*%PGK_p)MN(g$klE*2;xIq{9mXF%Lb^vzFI&%BrmbD5u>PqMWn%jKDYwf8-&d|( zns{%%cx^WbSOnjejtji=*+baIQ&+; zh~O`8hfjvsuNW|gfGp_)X|^0gS@}v|lqc~@xeVifC@!-z>+{$~lEy%5$I`sDH(?=+ zj)h|**N7^>65{F=b?poE()L+VxWwrB@rc@^q04H>=k#mYM7iI(@shYisy~O4-Vdy-GOxdDYrM&d+uzb(fE0 z=yVSKinTO^ccGW=^2coP+~8hW)Hzk}u>Kk|S=^>f09CsJG3 za}w7Zr|suLDR3K}KHvb&90BHtjfWP~P^F zpiKhC#Qer2c*=8L)ARN0SwgB1GAA%HUj+#|JvcoI&1a)5k84j0RL38j@|lpF{+!=G z@tM$l-72fkUgtn`$;BVFQGmZ)Zbrq9wrM3jdCJ*Z1mc>eQo)E#lvm)yBjs}qPX6u32Q!wqAu74f zc?Rt*+}fuy!7ot~j$d(cacvLOC{zh(6Uy+5KhdHE-ci`xucXZu+l7mN16ERI*?mbx zQ2Btia4cHX2v0mA!>NdJnDPKm?VimKaf_BVHVSk5e$h|QMC|e`{A9>E4H;ne?HnK~ zV(y>u1EB_rot@h(un9ssZ)4ZbQ$#LQxrUaIDXhs4i_{N+V!(`iw>)AXF1;SANw9ts zT2ZdU|1n?at8~<^NZLG67c*VTa8x%QjdpOXT-%mh0YWt`w{3PKJB?$JpbXTxJPoYw zS+)Uv#BHA_v|7Oif`}`(B33!bVyvgVF*FBtev~=gRhtKU>I*Hc-k?^Qc9k}}sWJDp zC=*WKJ0KgfFlXGC5K~vH5p)p|>;XIi!b(6=V483!CMVNdLzI)?OrVLKlgxLwMrcKG zub!ssjWAw|>ta>XMuR5ZvhF~H9J07!+n6DtGv$6Sm}g53z#d4DHMX}$A)1PaiDtGG zwxJaEqQt-xaF}&R6U2rjbcZeBfmHqpvnP0|VqMo27ktGu%G$~V= z>EL03V*i|Ci7KKmhDbNIhw73#fmd7;)e7C$BvpmIQ|_Eo&L@nQ_wbf}1lxPC6p%Dzefm8FYQ*QCXI3#-CQ7>*e$wTC6hW zy?54Aoao+GS1iB2oU0!-Lp?uMr5oc^i2n;=!EGrT1elp4eAnHk7Zbn!%UdeFq5F>} zDppLKj}BMm&ld)cUr;-&^tf9a9&so@mq_1L0wK!54~|4&iWBm}|CBx;`fX6}*l5TKqdrtbv(a7Q#d`(Cu|G5C#uJ`u0l zgTdiO;|6Yr3|#t;A0Kg2NOm%YFzLA&f0kn+ji!08?v4p^_IfWjEZ+4NQYphr3kFnM z8;*p!a&ji=NEAo^RhHvR!(&JQfq6 zx;M7a%NQUB)JH=4P7i^m(x_HDGULW`y{nawnkweC^!CH@ZuyKg<>rZqEP#sG?n#n@ zB*E=2N{XImR7QDvQwfx?xC^b=C|-gbKI@RgVwC-h5Soz-`dt>4AM;8~aSe9S_l>0(Y zE$ZbD?2K+Oa^xon9N(?#^=meC0)aG{!y06nBHln8)eTPQc{*Jkm5_OK9_LT!_mSe; zN&F^c2xc_=3+(2|Awl>HS)Tk*ii)FU1~?gQ$3__Yer^3JIkuV>KSx&`e9!≀`U) zIUKHsPnKFY{ejxbwyWjH&nvK5sq5pK7TN81VC6S;6>X#EC1SJq?(?RjUyg|v9Wf89 z{0$tO%lNac;X9ieGIbHQN4H=HkVV2fC9TCxYQB!!r}JZvsD0d7tKx>15!8GSG&8h) zahcvOM@4BN)LwN8sa?#!R%QTwbnzv?aq9(LRo*({8vUTr4sXP_?l zKaLV}wlh}5&%~W`O9lUnZGQo1UqD~sn@&myR@h60k+SYIxnKkm)8;GLqs1iA-R~nL^oin}ePY&nOe|~z)d5*6b!*q@9 z7cPTVY*MNFO#z|?nYaEYWOSp)Ri8i&$jHdTNz7&kKWeQBtXkyny8Z7z`EPht9v$*O zX%{A+p-M46`5P_Fn2DCA;8%w3w4|j@$k%T#FG?%l#2!P*)hf|qmqG_;NY;UB_Qn?o zD&Fcl$5w~_up=NQt_Ae501|?d2FK$s?~KQA=r7j{rxoFR1LUiw@O?I6-sIQld~6KJjov zp~0~IGeg@`N;;1V3S>rW`nU*?o!LUfUnHSF_<~`*3FD^_LBRI-N#=`rYEn`qCo~^f zCxE99)5H$ro;3x&k@Bpkt|>K&@)r1Nk}5U$vs6}OLd^FQ zD|#JffN*QSksSdMLBsj^M?lGz>y5K;T>a`6CaRzUCO&>pHzGkmE}DXd7%6y#?z}=s zlRHSEQtaZ(-6v|=3hnZvZDGfkv(2WBr%CPD!)3yW(u1@C<==9aC zFf^*-Cxv)Pvb@*7QIi|6&n5?6W3dYUbE-l6?O#K=Gl7EfzUF2l-GoGMdl^TfoX<*x zLBP7<FC&bpl856>TGiFq-NBU<7YSG(84-@b3#Q<)3mpRe5Pl~SRhUb z@&8r$$R|3$toS3zQ6zZa`pYUHkWY|LpptZ0Qsw#qv_sy&i1E5nqT2xkNV|O>fkdi? zg;7eUf}7%Pkn?x|Nz$mIxE!^2IHlTQA{!9dZ~^3Goc#(3ogV8D`Mizp-zVx|thBcmbT8iJ4uS z27pdddSM;NoOBxg9TEr;7smY5Qm$|xc!15XO3UKuV>rB~%)2fnhu|%PMur+gLDU>q1~8Rlko%6 zAI?Y>BBn8SCf34o$k&)%`uBK`cUr5%C!aRw246Nabq-7N*^DL(*H+gs2^D(xwMdK(6i0PN0?aw8L7>W{sA8e|OJ}7q2 zWm^3h99}tK^D430qeWSEc^y8kZz4esU1B4-Bc1xCBm-?-jq(p{`Lg}iDcCx-K!so) zAR%G>XMo`F_{KeV+wg*J7Gl^wTqSuu{qn!ruUW5@U#~P(t&;OR{n+yOz~g>LEamYa zg3521*K6k zPy5-SMf21lSuJO7i~FW&+m+BB$+rgTx^97aO6DQvhK>33p)8P!sL_H5a*~^-qC|l< zLtEVJVz{MylDdnD1q?M=Zqk5H81=GpNQ-p>b>rpD_61;}usgos>!{G3lD{k9gIFJZ zn!8Dv%9ut|O5K${zF|>7o5SfWaPE7$GK!eOrWv%uo#wr|%tZ0!?j;*l+A@5pt*pqP zJziQgEa})s>cO4XpC;q_$7qaAE6yN-I%^}5jkrq6mtkNa)grUya>@ohMicf4 zbF`f>D5_J{4pKj^YsxIg@viLBtMsvU1|V4o#4Z%v`;mnuJ>ecl>3)rRH1Q0ZYpesM zCHe}a*`>^y3HlU|q(q+STEuJOXebSwDwA5<>^$kQGt{U)A=#TJF6XHRD9uZ!2sRj! zb7TBAOn|=K(mo~)yeoP8CTO0w{MW`job{4Du1-pMdt}YmP>VkbSv|IIHWW#n1S(NgOH>)#>9+HHaC!Bs=bp$-{Htv*Xn~_i zrqi$OqOGGwfa?K`rx}}Ul$j-I)l{$THOScGN@Tm?W#5-A&u))_cx8tnQ&af?|Bnar z{~IM>^gfNXI&t3j`ZoHrVo^+=A0jk#7i01{1)d%=lb-mS@YjvaXd^x`8t)c|Q}Zyd zh4s_KfQipMy-Y{ao09klN$5o08`$SdB^8UQHZQ#wz+=X-A`ZHfl%PjhNa3&G&btvN zFJ0p;gq>uF9BT+s_Ck*UyLmR?a{zb~K8sR3IHPSLC*PlJko%*6wvrVl| z2?9m5iBQ{J>Smkdc|q6Xm@UyV0kSYz8CV)%ZDu=BFB{SQ-mF^~*d$uDW)^TFHLAJa zxkHU*fdEQE{RaPi4PD4?b$KB8E1W2E=vOQjo-#%ctd$tyddqn^Kt_O z)pWr)_br9@#Ous*0WURWNNDvv(nS#|e}E7$D>5oduoJT(^ap$@EoyiZ;p?-zk z_{+d-*zka=P#OpAxbFP9noch>;!;bZLK`}p6pu+yVaVsSyCEOjPeyIRgU6;UyXanb zGfjI_6g#`-h@&fVb-7B5yCe~N1CGjfO%%i{MXlmTxgPbcQJx5`{$9`1CB6|wi$UQe z4Vv4%%A?gD+RquT<&e3MtxqU)P0pekKi*uU`HI0y%GApaFid-_NwQ<*x)&S}t4jENQ0VaEFLlE6sQJQp|QB;C8rEFCGgMCVOvKG_geDFpL7j6Mw7soo)-b5w!`MjXLuq0QO( zOyst9Xq@L)I7bap*C1JW&uh6uDCL~B_mVOHeqi;KVBlN)ww(X{@52Pkoc$=?=qIAL zSDooJjH~Ab$NPMTCOnu)W(Mc=1DOrWw|Us?SE{o8FGnZQrSEflUf1{=rC+~yu!rEk z|1TzR`fq|r9Ua*;4KCO{nl&MvzX1E`1GDE!r^w_J2f_}U9~TA0UC6ZECq8SIjt!mx3C(EC;Dw$r0w?-06z(p< zoZsq3-qwgjPM%bYhPL(Cu7{F(m!dVCTW>qxW3|K#}QG#O9qTNJfyy-9l()!RHRM*8+=wz$>2`)2q!>dFH z25sR2y4mS%4nYc-p!v$aWo!+-#A&y=_1W3Xpt9ZXGCjU&dw$tW%y_Tv`|szUKmoQ{ zhbI+N|2DlLohg8b#SXU}yOzTx3d^u59Df^+?)Hl!D6g0y`;TeQH;G4s_;qTxt4<3+ z38|ugDc}F<40L`Ai#mc@(?Bo?y!8Z^y+d&@J982#>Ewst9=L-Op(m>u)`*@k?_(P{$>-=+FI$Uy!SAT)vz==zQj zS*tap)l7|h7#Gr)P7v+j+WqjZ{{lu6bLDOYD!Wzi z_}J<|nr4T~<)k&;a~Qnmqx@QJgM4~GWq}!#Ey+^5SMV~b{hG&hOIlKhLscY55R>g3 zaIR_hWs_A@;%!J^qp8CN6A#CscJMyu)i28mO?VG!CPfs#N{hN6&rKC9{}zSrHQtYtxM_)%?KFg8jW?J#=ul0EAEy%$sq4ip+RQxqvOqbmB zO97g{)~t?*;GOu}eD#cK>!HB=1(e|}$(GTon^C|=+_XUB^LW-hIqRZv(l{fq!brds zzhCLcEmhNm<_0*@z5T|b2q(T8B6tf;yNX&@&|y@8#cvJAF64iC%v}Y;xln)G+9OD;hDnB+=b>K8JOmYON^ZQ-hgE z6sL-|yV>POJ$+@@F7L;DxH-U{++%#Ia~V@m(0}P4z5wXO!K{m3+&KLgxL$Gls%oHV z@PK*O+?WE|obMNg&ziFPoV`phLPGFy@8rIUxR4{D?J7Bph3-oTyi|b?pvpqdp#W$q zaboPt2d4}(bfEiDI(!gwWUvq=`(e8Ul;7U!yZuSpnrrVD7MO>LdBpPiB?6i}k1>Fc zw;yAheGXTw$e^fZ?}#_eQwlhNi73X2k?wmb*AH(f9yDO(X@#DX7=n>H6O>9zhGT3j z(5(@^CnKs(e)|1OzXMIqG@6*GHgbBXn)g>#h@6a~qIcA5++M%p+OA_8==BDdjcv)7 z9A;I=z};QW@P}mxw8^Yghb|vyqN(4~Hnx3t zHS_$8S+262BB#s*{<--gGETYUcKe1rxG9kCNmfUA#?f0wrU2fTRSOsAE_ZFQ)XyGj z6{f+Gg+`T`LJ~h8)*9c%SXx@dEf-asu7rJnOqB#rd)a`BBC$8nLFH~8ME!Lw3}$@h z)CDC#4C^C3a|8xHa}GY+QqxD5b(W5ncYcLGv4M5H_M^k~IzEwmuuWFfZ2#CJVsc<9 zZsZDokKAXlj`v?psPB6jQVWjPF`pWqXkE)_>mRCw)uw9?ImgQZ42haV<;7l?FM7Hx z+al3~RbB*-bMwCviq`%YSpEK3nFkN1-;s;_V7y{VV+#j3N(kBrI1k9?)&qSROO*|Q z_=I`^1s#qFN$VL62?%w`THH@3^Ooc&XY3Z zE08zi#pO-{4rHJV=^oxI@^#b6n(sUGoWF!_`Sm0&sdQC9x{&II+ael*U{(@ojS+jU zO>G?q4Q=J{35n`eXkj?08cE3HDVNe_SBPhrGO$)sKh+sxzMb9tpi0~62KZ^;ghV!( z_LEd5CszgQG4{(PChCb15=hiZyD7M*hk~7zb^nGZS*n?DXKlbR12IN^K+DO){4%*t zO9eau8yDcuUJ07HcK#|=eSlu<5^aN3CXbhR^A@-H9fZf*oaK*?Tdy7x43KV2C;}Fl zZB4K3IyWPS0w?Z~>T*#od}>gC*~Z2e8MywqM>?GIj(!ULqgIJYk~(z~yPd;(9P#ma zMjQDv^1ZC+ZMS8;yzU*xuD|%LUYvbe&AeIO^SqP8EP9PqXa}mEGqS6+#SrcUmI+1X)0k z;BXJQ5*8*mC@h_ksOP=Ng(kLv&&tmP%cO_|nUv%(g@;RMdy9D@BO@mmvh(^-n@4l} zEs?K}>t!?Cw)L?-t#5W31t5XR6?NJ=Vn{&&9K(E1QcTg2=CJoJ`#Csmk^>D5 zN`frG)T8i?uL7EfLXisZ}w8jACA(UXR$!C~xKw-@wwnRvdem*WaJ7ORc#cG5nSJA%Yhvy}A)AsrD$Y{ZBvPA0EJJZGxERJ1+cyxZGV z$znuAI)J=tr$b#}J8g&kAS9CT#bH9yenJP20$QC?ys%5hoSnn8M!9BektUdr^L{s# zor7l&T6Awb-2sxqFSn5n(heA??eFhjZ?o_f@O!Ipt;qt-6iO7rNHHjFaN|KrbthSXrFFYb~kZ@T0!eY(2n; z$$Q&dZJ|IBqgqF8$uqw|?$u0w-hYvkb1Wl;#Nv_XCH@|aSP%43#zH1m8)E!PCono?`p;8r}uWz~@}j~{?r=RR7@O9m|7Y^WCRlEq|Bbe-vjmkh9-J7<73 zmeiNpS1(n4uRf#&E_qgH!ZeMz=OYvYkPc<)w%wBh>GT}q;Qy?E2UXqKLH>XKMMKc$Ro6(FL2nrev^~j83N+mdq_{B5#^C5 zr9O_?NrxFUl5vCQ^ao9_<)%}uRS|fL>>Jm1`mj9rb+qS(V_d$%;0YlZTeost+v9U! zy86_yoW~Ulocfd<#)Hgabjuw2af43o57|LY<-bftu;X`-!+RNxrLF&E_gOQ^&rcKl2|j2kT`&;xdsns)=T>i;!aF|3 zjWma}FEHJCzB5@EG)-=s-+Jh#iz&!(u&UB=0Xg{G*O;VlQ~W-Ms5es^TWV6@6^W8n zFT$#SA3_Nij1YmC3Rf0j;(5)-m`E$y+el*F7;-$pBgD78SEyHUxZg(botAONkCMO| z{#kuva#NdLI!0bli50HM!~YzFtQOo_mg5CP89)|j48-KcLIfYF5zC<+F|q`0Yr%MhOH_3#oN@(t?bb<;h~hK%C7_Wh&OcM$9=W;z?&9eWgZ)jnf?K zyR12}t0t=y2h49zR`m&;x5_zJ64C9(LqmkKi4u=(%SCt6f>JSv!UoxeO$ifg8<5~y zGZUeQ-{P+6aL-bWOU;l zJlOHeJg(B+zFG{TA^Jm(y~#UKhh+E!3|_aEc#d2Js7xM@&rgYKRk^lEHm%gqLbGQG~csbo)?t7Nm8Gc}I4k5Xq{t+yp9{D;lvrT5 zcup~e0x0ZMEg9t*abFW2yXjjP$%*)fYD$|Dbi`M=DP1vhb6fQE^vE@eoR488>Yc3E z?z1{x%)AS+3LEp?Uuj}-#um;&7iDy$r4^*?d4Ym5UuI`IAdFH1)p2O&8fq<)yD-@V(T5bjhE}?mniKeDYuf~q6BR*x6&{%MvYC= zJ(P&Y1Z{Bgj~v+!?P=q4Z?+ee*%i`R23P~oWc5J|U!la*78kjAUgR0Rd?Y3W>wOT| z6(38z<~P1tci2N(Mr-loRV&Bi0Yb7!fQKN$h?#e&sLY9pC?Xl~{IhDs0m|X~To)TG zh-c`iL zR&2?VHb7iy^K>I|e3o&2Uoz_QvSH->{cs1{!12!Rag@O2{nP}B>c7I1B!o+^HO|J$ zscR%9ir9Y%d8OO$0LA431A7hF9uyaI`uqLFTSn&%XOEW=-N&Qs!{g0%1eqnf<(;4iSqe6QiD&+4EPMfitY;SL$5K4-qL7_=Q%5gfLW|yF( zyItJ3%F4?x-xpo7!!V75rAOxE2;&;fd2urIRUcO+Tb1C$sK;qcg|n?pN9y8Yq}72xjrBO~ zdxGXn!tsVx>=U|NGaP1xYqV{vSa1{jk}G;5;j6+q5rGWGi_w<}T(Hzbggn8XNr=lP zQd~T#$<-`18XnHDUWjU?25o~iD67hp@s*O?25g=Q4S<3?-X(ht<@}_YCgHi=jSxqW z^ANZjaM)vA_-W5I>~9Ibk_D`$(GJ=QO#+8R76%7IFgz8n*tog3PJ)AiN$I=WDbW{F zp`q0?wx{idbNVu9f607j(~OAtCAu+1E=Xp^5!@@fsxXZU4)JzQv>YdhNY|DXKK#id zmIe2o239<$c2eCNyfeSgs_?JpyYY=2n9gga_plkqcR695+I+H1>^?N6^5QxS2yKTY znvB4$jpebAlrkvQ(A)dh2aP(o;K$ z=n!F!Pr1i@wF;Egt!BO95SQr*HOPVDjznRCh3JvuX>{_q!Z_Sms-#bNM8tI#Wfn}0 zyA#MX_+1s~-4j)NBgrhd7F%@g&0NE}l0-N^Uz<_d8y2A&2eGxdgjiZRTfO^cGkh4PJ} zA_q#{gOkKu%z#u2AP?4tf~W);AdwWhc&Z%kAkG4dhl9m9)Nrlo=R`JG3A)(-hD;Pv zolFlci4{@O5Wu;kD?}I;1(wSk4ygoQ3<|i@Lh);C?Yrw>h15J5&9gNY$jw&xTSuLd zq80^tjSobp-VRV()39wcPDKtnxP*P8jOeR3xw4;<|NU>>hw%mVi9WqT{f-E?#LXu0#_JoD_w$D!kv<8rpvL}s6xWn!OR&y0p7dzCvoOs7K$zlNw+e`le%``q2T?u86?vNs zw)+le{RvImn{>(d4Ir@Id4Q?gm4jg}itsR|c=)~CBybG!WIcn&bh^TX%o|elDcYlx zG+SvDf3diwpRB#v{?RxgjQTyidbSr&d3D>e^Rj|r=T?#jASZ`1Kob*0h4Iejd4VCU zxf`uKzfC=2jH1#j19v2~adBT~_Z5jrOjQ3$;@#GV1zFs8!hL6XG&aH<46I7oa3!by5G_PNs zOI|+zJ9JmseNEziL6T8lFwZy&x7J{ZoXe*2PXosPy8$EdD4yqhu0kCW{@2&~mT0)k z9=Tqdrvx{L>#zCGK0bGFH=Xm*W$TbA>z49fFS2ya!2(YD-eo8M#S0aLW={r}r>0I=u(pW!2E4Q36$hC3J$U%UtyV+x>XDyP^CoO*hXa57>^~-mdZ1%anPh5NlwnFG9(7^*9BqR?Kotx zpovL3Vutl?eaPeFFU=q(V%DVq>av?O6Vy?a^Ef8aK$t@r) z8*Ta$hK4MKP|WZ{0c3!TQ;3(aE%lJPG&Y^!%=Vlxgi@Umk*@a8$4cV8JAXCo}E2Wb|=aD!i8-7AMoL%a()z+AhF;Ghc4MWNz`?i)Tk-LQ4-LC8;j#2$tz*s98{S#(E(8Z5LX;ohW08 z=@qpUuXsZ~@lpT6^tg*>l)qh^B8y_4xIQ~EvtH}g`5jGhMlH2~N6YH@S=HE+yY211 z!|h@Qaj#|v5btqWBD$RHDg8#-g(~dWquWgW7~+k%fTJex zD)XmUj%Cx=VY=T3nsMCh`J7AKEKIj4OdK_MkUT`+glY*`!QF!1(`6jsybSR>9}gUB z$aCC%<-6Q5wX%%afApb(XmYaYG;&@Avmm zV26J~I1zCIfWUi17sr)B!{y@law%NaK8TT#hV?{JBbXBJy+A3-VL_5|cd0PtBlw~d zMHk;~o14-6Jckwqk;kQT(r!eFSffU(`Gf_N9+~6qx4SVB9ggh-7$&IqDY`J6vTmVnnabUbA7JsfcyO>-P+s+MA` zdrY0QzeKTdUDC5{X2WIH$?MJMb($At@s(F(+-dPtV{7FszvQ(^f65^~4eS21;MmFw zJPm6WJqqVJQ`?|Y^!}87L$a^<%a$B#-VHqekF>7r zzWXPQUhbWqzYw(Q^>7}uUmT6!F2|6ZzwfL2v zoCPi1+@Er3QQCe%aZZNP2|>dsOdL4aysUgJ-|1nNOpcg`_Q86yZc5E(=syM}h@=r> zgUIl6d9~b0?dQt^;t61X#!=N(C)K;tLh4A3nue#uhmhrqGq|Zm@w~>mY@PN@t zKhqXOrqxJZp}~$yC9#&{ii7<|j8na!y`mf30BUfuFo?a_R=3?4Gu&OeYW5ZB^CTCS zMq;zceVybeDV^?dzN*S=7wVI6A(3W;yhL|64IPEBYh@#in;FN2ej3zM}_CJB;9JWoe2 ztklancv$T>!U!c4;hbI<8woFn6DeqAoqvWM*6%@Q&hGu!TS<_BR|DUMEiqF`}QvtuuHKrFSLuAE;(_Cq0QsPiuylWj$J?usLOE+R$91L80&{ofl{0?LTMI9CgD-U*39kEoX zCnqa47h+jb^sGkmjEqVF*pn{mMutWk#(u8uaNn4r7lo){TO}5@M4>G4;kRV@-CDsO z;HQ3U6u|Jxt)gEFkYjY=)yVqo*!F^YH%ny!hETTS%szL!^#sUJWOM}{ObTtvU_4xk z1?I}tMzqkQpJ9&m>Z=45^8uPLHz6z($E1acZh2Gfcy*DRn_e3jUf4VZWJA5EL*zu5 zlD>mF=0yvP1ZT1(Jh=HjdpB6%hV2W;fa(LSKqu%Iou%V>Kyc{;RF7G&E#~H!)u*xU zf~u=EJhYOVVaz7?HxZ9kCeOcp92`yQ{gCv_YSdT`AX!Z?$=2YK}$zHx{TQH`vA_r;lj zSJHdcF6h{nv<8}REI&NsAOc5fTsymY0;r#aniynn6+C`}T<;_4R&^p&%;Xz-9{30R zl}Yh<#{f`?KG}dF(X?-fbE1vV{xX(X_Gq@B;BO5be-otw+Ir9f$;NsvW-NdPMDI%9 zOh3*^Zf=m_8nJ&i;dC8uT7>^ds;1I%da-|c>+$kBm>l-9xQeHm^lWMh|K|3zLHjam z%RDfn{12-?^}EtPwQ?Qg5HM$$3;q(K{Hy5qzpix55&V#Lw@7c5%MV37h%2L>(ut-t z)l!25r=ejFrL#9-&ekioDo!-Z(=K$VdHfNu>Kqvx!N`su zePKK47FC*SDjNg;29K$#$>02c~ZU5#3GN4-N{c?3cHGw&bJ+r`#2Lx~|lfoQO> z{hKI$RJ{Vs4JeBAPEQ+Lnp_;1jg9#3A)r3&-(!&?E;kNEgtk z=}<%Ud|!Mj=dxO66o&KWqxUnMq3w?)6v$B??RY^mMC-V` z>7Q65xh$PD-1LXbdo2Hy`?N3kobOEysQ)5v2~Q(I(cbO~%4az8?f0E+`nhvWdnKpmX~~ z?K?n4n4E{qQ`}38SeXSvLk-DbhSguo&)}Yp@66ltP^HDE?u?&ly{vg3`xwzp)Is-t zOor$Lw>u@R|Jd0KUA2kAb z=|^vN>jeB~Fk> z&H@0SzY7EeTC4Djj8-0#w|S3L#r}&5r>S>umnoQd zt~)`e)9aMzTZ-X-nks61kOBJ?RU+;W65f7N`uMj6UVrlF{{<{G96)uH2?Ro!8-Sr? zP&?e}YIH8N2F*tR!cG#4kg~N4_Evlk@D=*Gr8%Hn?i3r4D-n9d?5VdfeSeYRN~=(# z)r+ugkrweE$ago$p^ovzgP3M@A8c7nctOw4Flhr5y*|d|Na4;~BCKa*5wib&H$F@r53t}4w#tDlDpB*`Rsc(D3s&^ub|6&Rd{B@F zq}W&?-_aXe0I4=A&xXSHhEVo4IxcUhe-c)MUYQA%)0vb*x)2hh`US*zPXNNYjnGYJipvt?B^*KcS)mZv#kih;A zaP=77voQV+@W33~kIGO<_=a06NjPL0iG*Xa6fl#~$Sl1Zo_?T;M)DDPVRLjmW%9_h z`=CZ4M551Zje%=Cy0$a1;0S#uY+&_L3p3`hflH~_2V1oew-CQLx@xT!f~UydjQg+5 z(3)d-_cvGFZaV86*^}V=AqF0?l2XiUb|b%ah%@S zlKlD43!st)GBMO+d1SR9(gisThVXCn!T)Mv2(3*JqmmA$7WPA( z6mlWSVo7~Qj-P!W;=xAdqk>T;a3T^d18zX3(8bNnoxzE;wE;nM5b$dKHKaFtEPbFi zvZ9(5pm(YF!1ZklpEU$0C0%B#Q{@$}+>qO|Ns+L^3vO&OS-QetcXy-7XE$E>ATndM z;=oYC3nX|9`Yw&1ETP7ed1s|;AI~@$@}(#iswBB%2hM5v+jz?98kkQuWTwj^kJEM; zbGggv2Ncb!^%aj$oa0V`-`V{xD*iuW@IS0@F$u`76qD5Xyn>(+n8?vT-(E!{_|Mxe z*0+_{{h}%orBso@6YfB-LHIMVQpeoKzr~?JU-5n`qJE1G4v|QHjC*QiW0xCPQNP!+ z?4}*Tz&m(iMf|`xT zVXCnC%5NhLclw~d7UJJRM$NachIwU!2=Dr?a=_)FYtqqRiWpC09uuwC-n)hFR*~I0 zvJ*YGoA(KtKNMj9i1xk3V}H}lw(B=o1&K_k;(t?GXFjRkcN-H{l19^xK^Y(-lOdu97!7wL*MhO42<5Zl|9B+6CUA~T<5uLQkHUdl? zGq>AweuUW<7JrZ&ESIAbe>9le30LSEivr_Pkx3_PpH3I$ccH$`kv00yS#c#=Ws9TI zX|&M(G;OFMyd~&HD8N2UoN*ImXEHfvU=-PjfXGKEFPKKKCZ+Tsa)@w5gv4+7ix6_w z1@jVl(Z{Hr-}O153$lakh(V32l*rN2-UZVW0!I9SDD;9hmm%NE;kybT_O#!0i-O#b zq!bkBcuy1%^gN;%Cl!kvbX%|wtL(0x4kzFSf$C=3$lcxy{-W3pHT$-o`f6ScIh{|L z5(X$hvp>%L#ZE<2xnJ1nn5=_uKVBpp1acp-?XA`=fxV#FLn}~w=b{=;cKN7(c#n?r zLkw^b@vc{(n0siq3R$)34Wi7k;`iAsNqGk}=e&9LdkHl=uh=feZO!;_yS%;5ZJ5cy z)|e4{O@0eXe&4P-j#sYvxozF?7#6>k8(Hlsq5MhRG^qS8a;0;WE4TS06aAMwg5~?W zS9)ujj5$|4!w4 z)lo5!NDJ4~eQqkXzyZ8TRvBPy;&5@~-U$mSqoUwMX5mUm-Z|6Xg}6QgV|`7*59_CC z#2~n{76z3i3k474;zK<3qyJte2WtW)I=hW&i@{%KoCc7ZqsMSGFqGsg+G~AtWdbErD}B7$8kQAJH>?+AI9{D7WmZ# zte%ce5-Kmk?&T)bIf4&3eBaGZ8}Gm*;eG0W@HSif&=2If66o3QnoV;)mEml_-=p9(@^}m_7<9t|$`_|5QpLI~vC5w&9{+xLh`!4Vk4>9EXP9 z6@#`g7Y;*O2@0r@{i$TC6sQ-|?BqcYf6BF3pGLQJ-SWr9@9QgK;kV<@~CK z)2xCMiQUNhu&4twM5NUVmGUT@Dyvnm1$MJr%IvK_4`D~%Z|!oE3)qH8?mXM-cTg(z zUjgCYu2_TbNaRB5gd+da5N`;pL+NF`xjx{+O)X0o~n>7$*r+BPb ztbh-0Ix|71U#Z`PR77lOVT#cB4i=`fbYQ3g^Cy9DoLSz_czj&lZ%kpIZJHc(*NlmH1eO4wSZZ5AetR=Zpy;ol3Q2nc{cUrpb~Cm z^Owi%37+SiT#pqfl7aS`7rNuk&^bZKv~aLL$zL<+a_WLtbDzi)L%fhxvF2 zSs=g|DT*-~7gW@g0^WDKW~_Dp6wvy*!g#x35jf`r2t2PX$Z%W5lJ?L!h6-i?<>by! zOQQzhx%CmejyfKzlA| z?q?kQ_28D@DI2DT84+xD7`3?jqCm+q!}`_6fCFuIBJhO(42CzZn=#BSLrUG-a3903 zQZWMsyOQpR9q*&?HT>nXZxmdAqq8-$PmXN()wY<89C&dg(-TJ)Qd1K<=5V+-te;*| z@i4%JPg_B_5#9!AHI%JB(M+6UcXU)G=v;t+yP)LzZ?J5w17=Iw^o8gK(c6=>V!tW% z^h)}*b+W=5S!Q*b15fR`q2(MLNAZ#-a9J&7FWdLM#FU8?r z8*T)Jx6+<{`eHNU`5`2KLLPBskZ{r89K`MrAkY5Id-?Cq^v@soS`VJR3O~d4I~gJuKzw&CE+Jd{sF=5;f|In))Tpg;uY?U{UM3isWB{e9i`tYBii~EKw{* ze6OQXw5cN=4zEU?QK3(1uIPS8mb7R}T8R zKMB07+3~E(`}%vYuI#5tSqd;G9Ad@(m3X;94bU_q>dZhY**xuC)85O;LP2XGdeR6C zPDjgnW2U_x31%pNa?_vMWh0;-07dD(P(`M@pZoS05ixWmeJRXxk+(|wEQ>4?}DNw)Op`vtXsn1Mqh|BX7Wt;b)>>x!u;p4_JI}38f$m>!#q>86 zN=*QzIe%xa{w9?AJ37aVhBUX=N72n}7zMQuqEss3O7$l%&~20_ zE0v?>YeBB1BCTJ}Xq{geSilev@O6CPEUSs0lr5esW=T&E_yx(AeMsSU_*~Qa`iPw{ zu1^EMIRR=H`3WpH7fyEs@^#UHg@*@TR^~GUPFercpc&3q9t7Ie(Qx^~VQhgprPtcf zZcO|o$?*6tzP8xXPK2o2x!amiH&ORjtSXgNJ4{ObtJ^s$P^kQCSZQSUWapZSE)DhjL9?f08Rr(qb0Hc6m1l4w=#eV+7R&?gccFFEaLS zpHJpqEz~fiK&fBA?=u02V+Jui)ZQ}_xu=}YIhB$3Id`6f+T|7UVu#J6>T&n&L+u+?B_nGy^p`31wy%dZm<VDAhroK?P7ntdz=Cf9Vp& zU(R9#tj0LY5u;kNBfDJHi#}Bm8ZN-uL@ZN6&YzWgRi@G1-q6(<&IWh%0YDK>U*((x zVMQ`{HX^KGU`e-yv2C#$f(5AvD31K|?M*0y*zAJxI{v|b*Wh&|5a82KCM;5tG{r-+ z^{18TWUrF~9)Pu^UQWH?+IF*Hpl^NvML#r%3#saOQCZW^;-W031KmN25G)KVgC} zOkSzw0AZTQrlLvc;Rvg0b(Bh>J`2!)eE-f0Yb!^rOy6IX?Mu9ALU|u`pmVLr3Ssn@ z9tHYPAug}9FV~DQ6Xqbd(ijR#w%{9OmsTqe{UxLMp8);;Z(aKooS0ONv%3^ohkZLX zPYI9$ETlPCSz^tfHt?tA3dOzRr<=8!?EFN`(}<_&!&^WWtuWFY-$G$52eNMFepTh0wV%f?oip2PC#3n{l; zZR13mfh~J#YI?PPqhWV9+;T?bqCU?VJ7pduH9Ua~dvaanDZz3r9h{Crrz2C0V{eDw zZk4KdzB~&!E0Lxi{m#P@MR7(--TmA6S~K&aZJLp5qvGVc)s=Zu`DdM_wcLkvw=(sLp4xI}~lA#F5s=wpx; zBQ+J4H0|DyPw@o9n%8i(zSiQ|UnON=I$Z5C4xekCo!g&072MCc%C9}&YI5N0uezac zUlF^wTXku&dhEZ%FiKps@=ULt=R81g2ba8^DSoh$aM|AMgJs3joRS{(prt!>v@657 zQkS{ERsNH%U&%S!Q zcx@$)Dnp~$=0 z`Huf!9XM_AMqZsWO3xG%oWUhJyvs0(9(p~j^+Ne_vfnOpe8a)r+9Z)!!>mt{GMaxV z>;x36jw~<vt3T*t6Q`!!{X{<{)ZoIjqmh5={gTd2 zB0ll_C0xn(`Uki4+w;!T;mI=>#Z_Ns*m>SG0Rs}IkAAtre3-KWCH|21GyY59G+r*J z)4&%d=Qy6$XT`_fZZZ=FlPPwF$^JwUAmlVty43XH372HCiK&srlKZ)o+WMQ$$svL? zj|lMX9$|WRmMgc=6b^EykjCwKbp4H^bgq~!g6!@*e|&s=>OI)x;h}1Zln|6KKc9}5 zK7$xB#gRA}05lM7JijLXu@7(?kpP!KTVHH7l85mK_e9sWHQbDOrTnON3l^4r9BlLc zsMqVA@O#3@yuy`Fy}*NX1XRCi5=b5;{t`uNAxk=@GkkME60UFjkg zQIYP>DeCtdDE3OpQqKELLT}9o(Y#0S{kB zQGi2ASl?j~H83y;*iFDNxhaY@>!5$lo`##h=p2(@TLmK;K4>e^%1=pXHW48qV8V}< z!x8_;W>|_WkZ;&1ioSwpN?j>QOh5o} zs#L;kE+0y(thg{RM~d)-R0z#yZY%nFTIMxVDNR+(kOHBFdpiH1!lT zvc_z!GvGw>Qx9dZ0{>2aiVCp=Tp)i&?J@0`M}w(7J?J%gTM2X|2e$!u5Lo_$mj;ql zgD%Q2{0X-IQz;aN#t|}@i=tz6dCQFQsY-%J-COuI)l(vST}-ghmRRzJn(!d`9e*1@ z4bF0quO^8Ic67hUq|QVTEqmSl(Ikjrc$bpfvt&d-e@9kpj=R||gq%@idhz`{lcESq zEIr})B<~C=!>BV;4)-wA{=O3*!e`s{s_7 zk<-|8=Idc*em4;VYPe9eq;ZFPcGj_=k+Z97%InGatIMPB`5V55x_YkG>t*7h8=jZ6 z4iOYi!MNwu@%8m!j{W|b1_KNM8-+a58~GSkwCLsdZyTrALpI1iG{&@0fmO?Z?D0I< zKe>jFL^BU1kgAwA-rKAAc-;Pa%v2JcekAMqtCO2eVjzeKn*Q*5hx!)3(%8>hkwNw* zvce1R{TX?ZC4GZ?$Lhk!PL{E2Su11OZVa3t`+Ww}LHavHPh9tBogfATo@chzW-nm4iSli_ zO=$^Nisx7ElU&TIYy5=+m}Dq!f#mR-J<~?1_NV8o>qVnXf2PX~P;d2q;Daa!8zB{- zYMc!CoWqnXCD%(@bNfz|;+W*gqu;QpU(YWK_z(-^^;ybg*PZc2;1O{eJMXKmqIo%U z%7jdXJN9~m1Rm`1>Fam~iwzapBTOFbTvIa&IAetX9ft2ii^cV+42abVv|f)g5wmzC zl}qfH+r@xyz2nm)H_lvAz~@)8!*`QM2fxTYt|x^Fz8y9y<>z0NEDLLg{Ms&fTP@70 z?``GDt)3m8N^Wp_x!!9kY<)ZHb5iIlzI){yxw>7~77;vCJCSKH)M>klK#p7SQ}>FopiKn{0Gi#92#^-$ARyx-W z{6z$BBE%gMtk4KHN|cK29bDpR%W0=o?V!-3_{H7Xy51Tx1MVmvIaZ`F z4Ekij2x=tV^W^9LAQ76&$eo;z7iM&o5Q_;sPuHSLgyJ-4SI4mYDIzg3pux*wi`zjZ ze0i{`?gbKJ-4|bm1qQ?-2d$YrxWvsa7EAPa4Ae{!NxLu9Z@6bsdRM2dUlT;V?d7#g3g4(ICM_wC8Tko70|7iui*U{ga)q9 zPjc|M&T`}_>%eE%hc{SsW-g*1+db@MJRROa=h6A#yVVw8D(Pa!Z?{ZrB z!J(j>At84;q7`YZiT;eNd4E3=sM!O@-Fa*yWUvUPGifZnM-5f$?Cd&0WGi8%kWctd zY5}@5-*sb4gZ{v~PQ%Yvl7oE2!6NfXPgx8QH+$rFd1tLd^(`_91TGTH$r~E+uw%j_JnH+^cjc9`r>@L{e|_-<8j@xvGi>d_+mwk zyc&}pm<-yyKOXl$@Mqw*d!RLU?Vl_ESpOftb}Wl^FibAH+SUg(Tql*ZT*o`9vi|B^-q(t&OLOtX~ zoP50lIwLyxJ2NH|0!B%Lm{*8Chm+~g6Z9e7@jSlJIyRFE5E^`SeY_TZ49tA>fMOYv zs@eVm9~n8dIjPh1*4cV};R|_)@m>ToN+`OMY~0rr`Dq|ECZ#4*i@tck?B_A8MAcZ} zEaahIV{Q4k*Io0qlh+;VVMvKUWbo#E<_oDsbgGd4rzX37siyu662jf&+x2By{i39d z)`*V`DxS}f>*7vltCFdP{DuVamsOFDtaxiYv z;}|;9DGjW*5yI8(UL0E^6;!jtqK)TB+(TY{^Q{%XG>!A#@P&GEtg2(+(TOAMwo=}` zKn+Q(eo-d%4#Gwr0N$C2zjD|<)&1Z7w}S!=$z~#-!p}O3qdw$NI>?HhP$Dj;-szXm-)(z)60`55cIkgRrnLQw{Jfo$mP(5gN18=2tRo-&80JGFK^{$TN&Iz`wZiIV0eCX>R@*7KZA(4#%D3EA+g}S`K|>lZUU=x5 z=AZMf_Yc++@5&@Qz}qxHszxjMy{iX{0GF_T&+`3sHU1yd*rEi2b(&PK>-$@Rq64P< zXymSS!a<)kxEjSh4;)++Yg%_$3H+HTj4~$`!Udu&t+o0lFdE6`^WLiJE!&43*3a@! zZ>YLw-xldzomS%~?BBA@7BF#;f^oT91hf}(A9RyFua?KR)J75sYPdWvi5U(h5KX1V z+Y_a5pMy@=w(UJVu5^eFFx@wY9Y)ZN({0o5cmPPAwthdxc;p3Ci3 zuuF2YSHaCUa01lTb8Q9Pn4M|DJ*HR_j)o`RN9osMFMc#qU6<=GFDH1f1l+I9TKTcD zvDdLT#5ieQcv%H}8GI};=iRylpB!rU&czwotInQ&Tv@6E)@ddCrmx3edozh_L^n*Q zba_dM%b1U8*&@B3d&?WO3O@SGzP)-qucv*C0gQ}~M@+YJ77ze%IyQxYj{t^w;xS#O zR6p(9mwr!FSc7pQ;JUH)xqo-K)q&u4)27J)#(V1BKCo5qOWKAwo*4riX1F9GQ%_1w z*yhA7MTB9my7+Mu;BdHsGh+k2js!aJ@pcFupyB%R_=dR<`70f|<>|L8T?Oi$Py7d$ zQd|1RbLq85==qu`m@3l%Oejl|fsZa=Wd^a0;`i>PYQUZ|k!^Nz{m={atGadGj0 z)b-95Lv0XsfsrD|n|6$0AlczB>x8lM^a4n{b?9UfOdHColPIh}tgR$ogLYkoGgaC- z<~xz7tAt3cqYSzwA**$V?WRhBjy0WUMM84VnSm8UvHpVnT}I112nS zyA0nZl-lArjIFZjf;h#4gL@{U0K%m1n+R$V^#Kj(OwD|)w?_hcM5Ah{Okk5pac`Q3 zvPpG9*(^R&xJ#saF128gI!&>HxpjY@9bmlbbrj?J`m!pr7VqxrctZqk8}Am<{xcYrpUJ+FYy~|28}?7=QMrP_|!g z9CL_3G;uC33O%78YU{y}WoAttQ)>k+|4>?<(mq4|(okNV?afo6q`1;`m`xBVKLe7< zhGDa0D2XlXqE&)A6?Jk03!JL;QcwE>;X$W$8U(Ydm5{ut24h(Z=Bcb;b_By)4+qyB zNzB2-6Xt<|vWO{)9@cl-Ig2JW0zmIanU(zBTHV(iYs^!;wB=Z9O=Xez|ME8j@>5Po7yNsV`zB93{(7YJPA! zR*~iCgW2`!J;mUpCJx~F2SP|``=E0^A0*!Z8qACUYM3j9+LFp`E{I?~Xj$V{i)jex z65W@WUJ*06exfQGIQATBRWabd*oHMIAD9!KbO!`SE`46=*_ioKR&uGs$fq1PAh`snWl+4UDQmY7#p@Lg9lDNS_D zJGd{NFHau$f?-iJ7xt6+pUP0j`q3D74|PKe39~-B_@1yl`(b3Msk-TMuhny!2x~O9 zZ9ZULA zALuMu&Y6U2*N`HQ8#|JvON{e3C8zBk=0oZ%Q@6ZNe7DDI1BCN((}wv&(`$T&w^%i?Eg5we&lj288OWH zlw7zalseBkSkEPclb0%RH)5k9nzmZ96WRa%9NOkK-a|ej{kcFPfw+z|MebtoI@g%ZQ$1v$W=EbJ* zpIm*34H}}~kczXy3d!;&O-5$KyVd8%gQ&Be#so>(dG*IhueJ6m3>Ro@PhzB(+h~S^ z6U2}PFG2!HS;C!9C-G6fGWZZN^c6%HzIA*7$#81{hFNiH$HJ}|mqn&8QgKlz0A1lZ zid^@D(h#+uQd=@^;%fM#r39)NvUEMEID8?tWX$>mYU=sI?a%CTlSo-X_jGtj(^v$C zaS_tzd9l^aAe4q52s=%H86*=e;=(Ao{D@*=Dq)zR(*zpQw@qr^}IS%d_1+n3lzzvzd2x140M`uWDso^B6Y3O z<~BGD=K88C6s^sS6IB$*)|V49!j)tH1sfj)4hb~`!9PV$O!|TIf?9YLMDAIRK3JHL z!80J%+CbPV+)4d-coqXgW{|BVW<=b7v`(c>8P{PFyE{LfO0Ook(-+r$PZ-p6(l+xj z9D5QsTnok@sPTZ3ejnZA!Jhy^(u+!ba)M1OP?Cx%@%nsI#=NslAdA z##En&Am3>`Y&fzzk$fwuc$Tb`^rf9O#?)Hb~|w zxj5O*YbaNm;hVzYhhdLToD(7;z*%-Ft8CX|GPJPwvs+?+1JtX5uYvQlzbYrJZ3~qBm1Hk3umwpV;{n_uCF%ZdcG?+ zJ~E>EU<}jNbFWG0b&eXzSlo>QN=|mrs+pQ7+ruFJTeg67gL;FqnV7A+uZXWWo5nYWp@H`fc|Xt0rF9J9-*kl7h0S^ds7|E*B_e+b9R{XtG} z-sfdhL&4TZtUaOTmx17mgXqXY)SjWJQmP9XvZ$EsH$HRd2fnU=SAy1%+zbyPRA_$C za0CK7Kep&<7-or*hiVsD!fd4wG`Jw76tNXi(k3NoL?$)AC`4XesF!QauF(i{meZJy zXh}q;fB|;mt<4wObiBym9A5mC-07=odt29}fE!}{qIK^0=bG@(UsFv5!)1EPd+=%y zb!8-=zNC{AL6bRZ@XcECN$6+|spXCyCP!HG;)$E~E=MT}*UqUiTBMp1qdm&AT1awE z!FZJSBylT5w;2t$5%Y?=5f)EIam!hyaE$2;>k<*Ct1x7bh+FA^vKeM<>NInQm!|0~ zxw9sKxD>TXJAl`!L9gr{p*_J)iOGPHBDZ+=K_INtRSuP!FAWjxB`{}fon1dVD7?_wgPK%>7@uLirGgV@G^8^K8()|b-B-`^{ zp{5jmDuoSu51eE%l&zTE&bp1==d-js^MJf;f6Aj$jpu9KjJTHWfK^zSojfR^j1s3S z5k{!q*21P{!H1c^q=m9--}h}mAbpl;V|-9#o#e*xTV8B8fl*+H;JVs=XIgegOqx~O zyt^rt{P9E6fxlRh!&_H`>n4aapz{WG>bgFd%qjo)^-`99CV)kmKgZ=7T3^z zNBE+mOds2Cx~Evt9oWS>^z|kkCHc>=6G53vZqV`96oKVUa;vr z!pwWLkQW%Kba?LhK&6je1zwqH(5yqcgF9!{7$-0EwdV4B7#EUkpF?_tQK0Pf$Wt1( zNcMy&y)@PBLd?e^lD2#-!BR^}{oX9jm$~BriLa2+B!xP_W;psGRvJK$p5orD>Y5u}H)co})FEMk}Oamj_#^;1~+F}_Oo%m{Q!X^XMfACujUbz4=J zX7x>UB}J8nJc+~LV#Z9JlT0yYqZ<$LB_rYal9G{+ch6~2*!Yl=e{STfUBIcT8w zut`sA!a6>chPq~#xD8@_o2Y*tVJA&4Enyx_+oE$n?gPATI=(Cl3Eb?`!+6SA=^D2r z>iXEGMAxb)2T#d#XKsj2GuZf-Fd>5B*S_Zc257^ zfV4?c)-fB@wrd&o2ID^f^Y4+JX~3zGThUIe&ay8NcCb(ovbelddyWdkar;6>SQ3XN zT{*}&0hrpjTKMVaahZY)ehhaa{y%;Zz2`d<;TG^Bu#4crGjKMh_g1iq4&DpGOO!58 z`j)KS5EnxNjslJU=$9w|1v5r)WY~(F?15kn4$=P72kNsLF*uALwm>#-^;e$7{2ZJ3 zfPQ%a(w_Woo5wXg_q4OLaXx&4Nq9R_hE;r43y7YcS6YBv(EiWawy5A5;8}eS0R}sa z0rXNH#bWqRCMh^RkqDe*C>N2fq1jmmnlf6VW`F%`Nomp$iz-VL+yP1cgVhmz(iqwr z-`UTV!kE#-tB8VFV&sI!5xLp2O(AM0iBK%qd!46*dr*v@=vK<>Ylx6(5||0dyn7Kl+0?28os$?aTkP3&lPSF&6 zOp4*DL6cX~>GI6{N9A^rm{lcn&#d#1>hX2xb(uTKlWPw^^$`Kz1Z?z)aY#+?9sBM{ zzMQi*e&sO*SSQ#Z&DQodzJ%uj=r5NFiqXqGSF9cS5mBSOVh=v6dB(nc(7kj6KNxgm44+hfSd3T8Hu zKEd=sTTP*45#dBnLQ(PS3Za?Hh(|f9NubYSV74R@WYt!a6A_5sg%s?jA34)a^4G7j z4SsGOASIeK=!y4FOyd^Dxk`y)<%17K8zq<&bklb)5nMmeG~FRx6K27?m5U72XCLa4 zKjB3-RUb~71+OK1htyt3(2$S19)Vh^htKL}P zX(v6%aX}k!ky9-f)+v5$ta%fSkq^tyk&`AWwxujNmqCBR8N-Zqmy_l-pQA>#5S07AI3esy^_2&UQkDlln^`O{wh%5@Y$`vF+gMWk_5 zLHp*H4VMTWJOG2T1re#Q-I>e#3nO<>C;jzTb@~ zxgU0Swi$TD1L~zDnQas`xgR04uyV16nytk^Pc^Ch)byvSlahmujR4p$1lHukQm7Ap zQG$*pY&dz0? z2>Ok9;-1ccJJDr8aZpHZpSRtq>DfzvZ{(^4{9B*P-)92nu#lZSu{qS=bY zYG0iuHw7Z0N~`-Y75U^atOb^|O4HL|Kb{hj&QE0=IkmSv+~dV0<4t5i<9Kv>nKYHX zF8&pHoEeP>%rM{KFf*@LEe7Cg33MYUymWf#d4f*!7B7Mr*`xzg`ER@K2H4N;pWcu> z*`5*Cx4MxIOp(|3pn^|N#;856I{`7+E_n-jG@U^%W_xTYI7G+>n5Kf+#HYN%73wyg z>O)~D<7vrBmfBAb-3I!HQ?GMFNP?*gR9iUgxtDly3}W=jGdb_qr)UXM2vlqJTc>WS zkW}M!%kD=OBifikC~+L`7P?BF7sKybGoN>FvE0qT&H>(5G06wqT|1@!OqKmV`G9}t zpDiDLd$qj6r^yODU>Z@~VMfgAOsi zecEz8NibR48`g2~hEN2>7N*g9EJ+wdBXQln9ILI2LR(+)b)s~qYx($GhU|ateh-^d zm5FZ1g5@Bfq+pzaXU|ougjW(40Zz#f8~h*E-ZHAKw%rzPDJ{?z3+@)&iaQht5Zv90 zI}~?#C=>|pUML~B6?Z5tP#h8{F2x;+JACQ>&VKh8XTN)&ectC}A81yCWo-Y&im4E3maeX#2q0XFG-&gW>`e_}?b7@U*F5MuU7j}nz}xS*B%gnK?&5YN z*6_w#N4iL6{?!xd@L5`Cy0U-9d6bzThuIq~bbJC@rO6W?Vj}%_Z=`lxCdU1 zm@qW7X>!2OohssScCtp$W9J0pHU)}0Jw$fRT%LkZPtu^#{9^ED;&^2X*Nd zzY)&1*yF-ragk4X7uCMbIn@{Vy5dA1KP#^4q6_Xz0^m;|a-RFyGaCb+CqWklxY<*I zOnieSJy#0u;WMBgdKnO2*I7BN@U3i_k&P$;QYiF#6*0R@XVxjob7z=M1vjB<`U%)+wQA%}Xr8_-yEh$EH`feMpBy*dh22)U`zGIB zPb2&AHtJu)pylVhsPigz8#R6Luv-z&g`_(3R_083>viLHM2~Sme5)x??Lu-M7 zW68zJlUW6%NetnRLLxVgO?&mUR&e_X7~izB+=~q7p$hW7t*f&XLtdNc*va?I^J;Om zdxbQS)FQ?U4yw3OecQRhH>WBnQBl^m{jvpq(hNBpP-XYpbFhM$KUcgZ^!bTApB$4E zc~;#tV>yF8{g+Hr{^7)p>~5^tmvnNof&7CDu#QTBn#R?so7NMTpd?hSZjf8tE>4I~ zgG4g%SL~-Bd%uhP#N|t@(>I>A9t;d6JhdC)WA-I%bF#;Q62i<3V*B(zm=8Z8Q@C}Y z_m?lqpEAi6&HY{gX`FmV(!giaC-9`4ojJO3-)SgtqRLEB!s+7-nq^#+)b9XVb<$o2 zt`ei^GW{UapwJhA^M_IQa$a=j;~3{rjjC``MPgHvD(RT4gjm~Q8s<~L%tXNKL3D?) zv6CRtD&n4gYgm6zXkIh9~T4=w)HkkAYBqGLR z?5)1Yv@zcXFNttgg^%9H-iuGqVlw;=`a@7$o-TAa{Oe7P57i%WShY%r3;w>q!|K%C zgZ5EWg4!-|$X&w7^BpXfegMFIQl5xbmWu?+=oWUPM}rX??)0s#JI_o- zl$3J9?IZQK6M)O+{-b$ZjMSnzoNWut0%l|>u>Us0rU;oJJ!TjJRea5v{@6TJXC8AV zJNG2hgL`o=_m<2=j-T4zj*XO)mLj!^D)v|>{wz@`>YrZc3a z58`Q(BKehI2GH@|37H;t#)>NIeZ^Y*SqKo1Cd3KXr!mQqbkoWigRh3t2IdbRFI-cX zEr^u2i9{vX!e>8T?Tc4|8tEUKWBQ>cI~N$pH5@U25S@Oh zRu)hu946WJmO(>8DOM*iQ5{(MIuoy$8HC%R%PT}{zlkUma(R$0PESE_jy318wx&bq zhOf+m(YY;+=O)n^9WDDiVR1|oaQj%)?yb%Q{W&@Mn&iv;H~q)3*IIIYFbZ?M zN_Suoh5`*<2+a`4Qgd}-syu+B4HqBBxgL8!ko_c8PhM;8sf=5~s>Y{FXfeR9eMd}A zOY}%_r<7-zXh0+9GlON@+nI*~U2}=ByD;PB(0OmKyi^@daG%y*mhj?kR9mOUj-9uLtT_M({ zGCAIyU%T8t1xc*2T*&{3lYKH--_O!|WLmh~8HeE~k)`gRgi7ER6_ehsgwC3j;?*++ zxU{cRIcKp+m%kg(q$Rb=nyr|tC$_JSl-b{KOLh3zg}EauvPyqXcJR!)J9IP4L$>DG z3&jk$O#Iab<10Zwt$ZEZji^SV2V2?mf=yZ1jbY(An_Ode?bh@sAkzV)x|nq5RU){? zQSh0bZN+*i`@VJ)I^V0FZ1&P9-gItkK11YloCIk^e}_q%Sc{Ul4}P^J#{7Ba$*a0x zh&5^T;uf^k$aq4ryeiLVCa?Np4zEfHBv+TWa{eKV4>I$8%M7xuT-0q6VY0jfjR&k$ZfI!n&r$ zn`7a(9tY~Z`Wr#l*B`q`=UELdWj5$XICppX@qD(>kvdfoZ`e}8-ymbYD*ln-V`O;O zwlLziYmjaOqlc-ZMXpT>2Ep8831}rLCq?b-_bK0HsWPi*UVP7>Np*a=jt(JJR`eqR z#yA-wC#Q|6b*b&C9b#m^mlRg*11)>#^`)VZo|h1gGx@a=f*-%^E6awsVH{EFdo>s6 zGNFodYO!A?-k6x9dA&l6rCkB>;Bxw|z!%*L4(U*l9SSA}*WUI8#T*j4w6J2(K1lV+ zcjD^Kq#)peAo%)>} z9?2hZp5e1CH=GZ8uFH#m5ZgjOKN%?ks0%M6Dc77R6l!2pPQPrdPL-ES2W6Xyfs-L# z!{``1?+CM>ZZ*w2BVOzjK6h&{DOxm7*>alNC%X>!;T4e2gvNDf8BomCloH%BE8t*O zFh2MR{rvIG5#b;;xk8rEJn{0c9`Xkg*VMWRHZJ-C1?I6bZvsYy@w2zDXY%Ic_Nys- zlM?l98x}R(69?_Zx5yT&c3xQ^6)btrDy#Kgyt04Id7ylvR#81{fOo}h2C){fQT2ds zdy`YC3UM~LjT!j7?^c9`XJZ;Qx<2roC!A_&0Ih@(x~8M51*gMNU6xPxNS{AEmsxm~ zPHf1awtTYsd~LxJftSXpfDeqrjGVT6$=hQ;3_69Dh2&AnDE=L#N!XyNrgBAWT(E3RN?4k1p^uToB+kSdRaYvIu8&7S#F(P zj}b97S_TOpH+t+9{UCWL^|v)x6>fUJTMs!m&|I0eTSgKnb@jRHkiKX?fiFd}WDawO znTFnUj@j5m$->^8B%`Dk#OQtl8~ zh45F#AtyG%lT#K2gaC)Xq-K~t@y?l7ks{2{Wq%Yh!}e>@XU8c`h(-su3(&lWXr@|6 zMC7oaa#eHVz#+Ijz4||;ErGiU-VFPn9-{R00~`Alg*gMc<{A#Ar4cVYd)dlNjc&w{ z`9njPwHIL1FXTL%yg8}QG?Bv11ZlacgwT6Xm^*t1B##hO1HbI}BeHOyYmPB*cr_U4 zIt3t>mW~>gF0Ni*y_NZy2{0rb`A18woUC#XaEJq6f#CcRo)pruCR0_Z#^>5*mq*DWM}Yc_2~V)6Y_$X%1p) zIb?L`noB5gZhee?-0iklB!to00S(sPg`>`7|%eBR2^$<(hMl09xqkxW68d`)vYXKRL|08xQ5T^f{iM4A9nbE zH?RcK6(j@$MO?kW>fdRWoxL0R78<+{G->0eG?YqL9PKpW4<5z$u`|n&r*MH9x}XE) z+0q_c6e@_5u%T>kPL_g=HoPipXW~L(Rfi*rj^QZR7%7eq5I}X#&g|!gd%IKS{`?!f z+OIac&t2C?<|qOf{b3L(_QaLe5@X#OGRx8wfVEQ4xX&!1(X%Sje5mAt5Vh4%1Qlh` zjuJP-lFX9QYJR6kn<0-o5IMt7qFhkHU#>iA!Xs;6DyM&zFjMOQ3W0X?WqWxt@4ONy-BG%DC`oc%bc zIi;IoWi6Ur^b|TytiFEcm@|6BQkPIWSHE*|%q)c&2&tK+PFG_s+IcF9bpjjGPEKQm zk*P6-CW1(Qo;bx})6al4)FYELfaBmp66Bm?gdP7-(r7PvZu@+K{MJdWe7Y`@_iyU? zqLLo(HdPQQo#>rWy*Cp(_14Yo@Gd!CNHTFrN&tNnc)C>lOq77Z7HJf+j0}We;>_&H#M9T_|Jc^Oy4P>vZPOq3zWDcG8cs; z-Y%>fnsd${xK83?KYk>~jV_w65TqKFJ!WD>6wS$21`RI342HB}cII+5X;UP+k)X*W zNL<_%;cW8Mly_NR`F2}7h#Ikwea>*dNwsCyVY@<26BkrbtTEG{n%UyR7p`pP`MBC)YJci3{8AZO88!k2)Jx=*u%O0I29~Zd zpT^KzTlxamu`u{WseY@hemumsxlW2K*up@G=!wN#8Yl0ziX^7(wt&P!FQX(+KGHJK zdR4Xs3c_SkgbC4Ut6Yt#!=XfyL*LnCYPou=y5StrI`CvI;8 ztMxf&@IK%h(}`nnr1k-hVgkXFkePK+yXp$D!h2%htzQW(y~_S;6o)ut+H;Qd^*BT# zlJ3l8(XIXbRwBN|HH&+BCn|e!KSDhs&vakj%+HSA2t&ZT=S>9JTj-%p003sh8h9KQ1mu6o zuppbn+A%Jc7qBlm&|Lq>hUXeC8u3du+k86jIl)hdxig#Rd2FKwTa|pPgNs$cJh6K} z(wE;X{gHnB{G%`a8E`lCcf!je%~#V|Z>|s**&<%LtJwSs4uCvrg3l9IHyfHC_mlTd}Wuvj;Or_x6X**&pt)k<76hiXkU z!A#JHLIP7`b8NOZ+ZR& zgy|JFs(|22d^$9WeMqZV<$h&L7l2^lGsLgGK-XK(sBn#!ieh~aCGuaTFDORiiGg^r z&04G~y@fNZ7x-I+HpX1($Yy{)H&l1D6J|5;&Ok9R6xW@%9P0xbybf9#_tTuhpl4{TWct!}OhH8l1a5IfnLF)?CTR*qW;H z?YHdyLpmBd!lFETWqlyIa=~js)>`b3PozvoI7_7uTJa;M)JI{9e^K*NT`2Z;)V{_P z!I1k9sW7aZXS?b9Pd1b##j*^Lf66<`1zWTI(D5ryxtP!R9S}gSoa& z7G`Ev+Md%Su8U$C-RRpD=A{|dQv^LbG0~obcppm~g#`O=0|ZS2Oma@$;T6CoDquna z;q1SguQHu}Lejf%#}@K%reKb*@Aa;vLZLh4GYdBsGlN+4-oz_&^WhJUimcqsc6q`s z^t7aJ8lT@KVHy6iJ;3b3!~_3IG=;IL*y}bHUW*=Sl1~pd!wP+7`|>BbqKX_|_@;@P zxR)%HWXgR)YkiY1rC}kpSS~gi&5=kiZ$ZN5!aN#3lfqZJ!=S}xcp=xUo+1OT=9Rto zVAkPN2nSG!nYvnU7-}>!VCeZxT~glf6n`De%QO-Qs3zm+;l<~Xf<&`$k_wof6i6Dw zz1_r~I-rgI8=w3GH7+jBx#9Nz5hNg-y5`A#IY3x!rz-6pC`BOeC2fC$1DC3N`XKhplFy zbiyxv^U)~9Q`YR58-aiWd+bE8g~yJJP2cM;#$|hoFR_ml;0Na)^nrs?>H^q-hK@J` zJb=)8fz&Kv5P*FuHk}vX|2Z!!eX^VYO7ItdHh-lQgS}>SBH|F;;MtYH>QC5$)|oN* ztp+l*&%aZOAoOAPQhJqW)u+O_DrEJ=m1SsC>cUt{ zGtl|8C`j9@W=vF8@?(Xj$tDMqpNdBOCGTNZlMrb;zoYN!#w?OB%GycN*C#OO&|A#^ zsTk$uK1zWuH2K?Fw-<@sAiPSG$;~T|w5HL*OsCdRLC6 z_l|1ngGVV}@TU-3?0DaygK~#W0xg9KW0Zd72fmQYG^Bk+N0Z!9?4LQ-f9?I%U+xLP z*IrZ;na?~FP(>T+_Ojb^k@Ma!=+R;R_Esaa=JSpMGk_TaH$mX7?Q^SWb#DkEASflD z!G6vF(Fxil^n}-Q+w!K34wB>XA!MZ`?vpo28r+XfPzIw~txZN+3iEo>*<~ZSH!QP1 z3cr3OycyZLdFkxbEQ8nabsL z(pMPaM!|&RESlrl=&ttE8=PU8Hs6dL{E|X6PJC~&GEO+yk7lJ_h=z%JxxFB zmJo&Xrzx9BwlHWT?WA4gAfa9qv+%CP;pS#|Q>)P*8Nn-*rc?+rueJ$d{yi$k7=X+Q z#)v@Ht;3k`aoYN)V!@tBJd?DRzV(#|Y_9THH^DimJ9vh`0B}zv44WV%ZKAt4)3f{qUa4)@rieGL{5({4Pb7Zqrg@fV>9fJJ6vbF+ghw zt~czk$?%f<2_G@5(|7X3fr#zWhN2!uf{(?Dh$!-$=8VwHZD1n05K9Ho;`DyG%Bejb z7Z{6#>#e7>bzwgcM3LzdX%)#<;F-p1MK_30kuD<}0GAoU$~Oq4#=~Re_wA+&eGmaP zL{CyH6jRqJHy7;hpfn64-Fj6|xv*xYKl9YC+O~GsGPaB-2vFJ-k_!^(_Jgts+&MJ9 zo45I;=jtevn*G?Wkk7?G$J+U*Eybh}UUZ$a53=Mzkgy;-^xtK5Y7As(9Q$@ctA=kI z;Fq6kldlVBP|01vnIE*IgkK&<)*12dJ7HpPt{L}cT&3wr1P5~ggV5s}$qQgS`1~C5 z-~bT?nI7*i#_SWTFY|3jMA;-5$`kBI3QJbWtny=P*ls?j?A|z;>AUydk^Xqs+u99qm^D4D_$0~SNjE}G&x#{sy67-G z&Cx`9)}VX*1ka}&{SP3_vV@$V`hNxpSDP6_&^S3{RQp}>E?+Asmi}~+n1A4CQj{vi zjg-y6D_DF1*+6Oe5F>WV5_mjIhl&e>;4lL3JlttNYt7#YDJUiOK(m5NQ+t^%lM|r9 zw!moYq@g?4Nl!7QXpt>D(JEYItRNQf4kGXwMejLTAYGh_+JZUI6kAsv`gV4$*r5p6Kj{!2=zs5!?YM8ey0-b-pm+OO88f0AagKnPXRRat0M zPuh!ldSY(4bLh{4k9>CW$2|oX_$2PcRQi5EZJOWj^p4wC7g@q9IDV!)XD%u1efRN+ zw8W-mgQ&pm?11&E1_A2bg4 zqSkBYO#h|4Ynbq+p*<`e!Tr?7$ERPP8i<=bDw`)~q^187Ax{GqwG<>c5icsdIf9F;ue0y@uLt!69~%UhDu&n#6{F&3>KVKlRDUl_t6^0n4U^ zVn2O@+gC+2-?MNfl%5aYot0aL??7hKbmOH_9cp^K+AxkXv&BPykrjl-SQcN!@FryM z`HQh`YTR&6R=4jprFT{-{}Q8rLm7X4q&X67kpe#`CmI5oeoFQKTWS%KKPV!}*6(nc zV{Ob(N1*7u;jC$RC_4&zdTkjs0kepTqE5OHEx3mKMTQgt0^K$NsBYs}$#!f}UPlf< z4a;9k^^+wj@eld7HeZR_-#`D2VSJX}O}{c(fU6OYif>7~9@8%coP9=OAt;NMm?4!j z;wO(nyiP2NwLk!h!%W{*Vc4NXtODNDzWXN9$5IyyO2X8)T>r&HhaJN0q7HMj&L(g1 znt}j%dqmHs#C6J%y&FzaN=F38mApDOml-e*ESu=VBgLkkeF4NMRJW$IABk6#77@UQ zh|}D@5unIW5(=%S0#ZV_=H?%Wi6!&4$;gVS?pw-my1%hY%^-wt(9-7Vk9Nv^=-RL^ z0mePqdhXOw>SWqFC7!MAtuY37HfR8%|!j{Q4AvS49~~n4;o+*w|amDa{j;}B*Aww78b)*hx-zw zS@?Z6K}ixFhg%$`0**q5qk7V!lJmJ!apPWcX$sJ#u9LiuC!TNAa09}r8IZe-aUiTc zL(aIQ_JuHpJ}d~>#rW`0+L*~$;A={=J*&u$diTN%8!o%31Bmz9dB?9{u@npOhs6ja zvdbmC9CEjFu_wbF>b6X2I2kQ`RyTyg7OKpniV(CqiZnGZB z6fXH&W|`w1dEai&aZ9&#zNS%H-u?Z)Cl~8F&OXBuL+L}xW(nA-FiQjotg58lzxt>I zlbf17309*$g?Fd4^fwmHG&C99TA++1ZmsnF!3zxO-h+$kkh3D}1MLjQ>W{Hre;lU% zt9OV%^jD-vzxtv2Y{y7l-`dy7=KJG_%URo_(0zP0`x8ziBl@3pEMJ$;&L4Bvm9Jjk zbus$lsdRH?eC2yO1e#`Ay3bbOgv16}kvy$gv7)!>!XQ8iWZWVaekFr~Ayb2!1S|<@R`t7Mvh{^;`5XwYwXzx&{rnPl+f0_U7&3B3n&y~x(tOh40Qb1Q9n-t*(0aZ6#4nY-gcyv!09>^ZFw)+# zyds(jTu(3qZ)w^WS}v;|qrtgT@8DB`o#MQE+DcB2l}T8yAKp2J|MfuEgJi4sk+>8b z=tTeJi<)C-3otbWkeRfqpS>~o6huXyI`ci{KJf7nNI^l%bR5k2?UTa#&5DxXGt1Q5 zV!GP1uOWS9t?3*CQw1Q6#0i_?$7ji-FquY4=nj;rVRc;mw2@w#*+D(6Gw9(|Zm0N; zstkGX_;pTh2kr6N6m~l6o0ZtHwRC^#v32K6ny9WI!?TsN73nli?OPJXV}mjSm83}Zq3)6U{+f}h zsIKlbqef${L=ACP!YP6F^D*}P1d940O0%aJ`;fdSeiUo9EeG658&{#dp+EG- zs%waEiIV(Vmi|b0Sj0b8(u?V%hOLEo@!`XPT`|L;h2xs$1j-Wrj)njY=b(|V1U^G zp9k_sn6YW$#)kBIbZ82Hy4@H0P)tST0bUQ$a$KGMvhTtM;x<_3LACombAHknM8<+$jN zG*u7!5UAP>0um3e$Mvi+J@&k$!kI0J#b3kZpQySzH0&H9o7%lI&3Y?v@p;-(ch0CR^NKkDVZe zn@sz+n|WRzkGPaZyn2Ndt7wLJBZ;4J@wJ2c_?@+JvJI{+rJ05&tZV-MKYR)DsUu0w+=Fht`s?t8Faq7q2rL7n3AtW^57 zB#Tr)nHL#@K96m-*S^KHW6LrQUJ~9;V$mCNcs>2Aq)p8;fUgvTd8OnN#0xW~vG9fl z2`6oR|N3~Yli|qTS@V8r0PIRYA&yx>UN2D%=AIBn?%HJ7vju8FG|G`N_mAS#t)BRgzFm*^Lbxd(n{-X+Xq zMTcmDcJvl&e_&huV@DR?@Q1Y9UR_-^jPAz!hsD*ug6<%BVx-HJxAGNcR@W)8Qwn6- zVUwv;hI%N|&oanQr*1{J6RIR-!&pLB%b&oweunQZ?F=r4l_YlV7JT5v{T8;`zxKi` zaQjnk%a!-FZOS2)_D&tftXp`t$NBGcJCR3PcIO; zOWNHGg-nlrROwrE&9$@0VcAFqnmq8*XXEGT(z|kzpzqyp?z`_vR$rl@yW8doiDIxA zWl2am&f?A5wKkMjO^Iq(aDk(R0<0}t<)gLubdBbDZ8we31wvh0-MN*ha#a!CD=EzM zU6val_b+NaFvApj?HUeQ)rFFHjvD+?np>e2N7+aYVX6c9+e6{P3T6LjW zzHjwUL0lfjTLfn@gw-wuZ0Oi9@)bMcwJ{&k;Y|q!bJ9IHc=bHsTkPnrW+$y|EnPEF zQ03pe(IQC)if(**L;GUm=U|49xkPRHDj%muXZHCUF-lGZQ69ftU=4!^oLk!g^kHL- z_bo%#;pUD{`-;(g!e4RN11G$LnV(Mbo>?EJE#!TcI)uWrLggK$R_D55vi-@_Kg5Lu z2i^$_^unr4EB8gj1vdM_yXG7Ztx%3w{%Ai2mDbJwd8+xR`R6~Mba{m=n_PzE17Ep@ z;~XdXI(N=Xcw*RH6kMo$>x>nN9sgK|yBb=17lx2}WapMef9)}oy3c*~Q%gT~WwWI= z-Yl3ALW}mfF(Qw4o}9KnhW;fDo^e2UPAFTgxKve(f_J>!&NE9|h~(fl)Ii3&0qyBzEnb<2^^uQln3 zepyVW>8T%8-|17LL-OXBhBhv|zRgTnsKR3TSc&XVImP*Auc3{;wW-BE5yFv+h4@)m zi`TH@@v_c3HN3oBt6I*k-O#NPfoisS_ofj0i1s>1skPzq77;Y~@Uj{Jnl1O4+U@Py zrMqT8{lcL}S+H$1rXt%b`BGlN-%tH+F@abYHBa=_(3f9+z1P^XJ5)%YHITU>_EhN{ zHGmB_Dk0qd!u!oaQGtAPMk=Dif6c>(T8JrF;$!|4vqNg*c28(6<4*SELkec;YQUtf z|77l9bM7=jP3jTrX3GPdqh3m%-X_lN&a8RtJQ zhX3tMHRR8vHtK`1u?~k+?3J1)b0Cb9x1U+YqPdaUHa+9iEi*FRc;^;#P(?EryU0^G zH+JIqY2S-NJF=vH-qOVi9Hlv4dW(^6;E*JrsUNQ5;1w}&y7-mUkz7yUeeP$1rd-9G z8g|lM*Qxhsokm%zmja4tANuxuJyqqz1ju^uxu5Tlx(`t5TeR5bat$~FyNC7m{!&z; z$3Nf7H^i^hnA5&I;!Dq^<3DKfKYS(ZSM9C|35w}IKi$&mp*?5c+wR040FkXO0@{km zHC1uG0L^0M%L`tswe~%kK8@i)P-@?}9}R4H4jPobl89}At4gMSJ+EaIHtbb5Dd%uV z^;KNvEEj_C(kwC8us$@;Y-cEPPk@LHG;_80_RlEFY)1el`*a3*j{_Z-0g5rzq^Eam z&d6@(s`U?Nx(9F%{;wOK|EeMS$N7J_#Vmcuh`#%*MlXvWvdW)-xwSilpUjmBIb4#b z3OX~hpXWDbCq$QV5*ExTgg47l4z8*P2SB!ju}sqT6=i-k2mMecDfgCP+U*LyFuA|= zXJ7kSy^tRQUEQ=&Zp>^itg|7EPK1%e`v2)rf z`9%v`Y4`4Sy2ghD?k&2;x!UP`EwL9550g3yB-L9qPbZCc3z)Y&y!?hPS)MxYZpyk760rwM$46~!UM2$R{{T;%j0b`LI(+r@_D?4D_jnlGJX!sqM}50qo?_CpbP&7$W{g8Kw_vkaRB?1R z6|Hh{7XLoTU`@t6rgd3FLhz6++uvW-Mw%7642a{qPUx)?RM~8?S==0us`aJ#^YYaK zVkhqFPPO>jJpA6LJZz{w0LAWpi_d-v-;v7;;1p~2&l-)#Mnr1qT8uXLo6vV=XiA_$ z6&$d>noh-SaePKhi5+UPl;%I42#=qbKf2}Fu#9PR!Vj5sANZc)fswQb2M34J*xwKv zAic0iQA{tqua>i71;+&&ie7Bvmn1%rU z;*75*x*b7%%x7%5srqHZ`t~E*93zz%Zk>KXB-kAj&DExZgQmeSIiv{@=-1WNmCk%e zdwcs&L2T}S1fYN6AY@t+`4sJaKIH2nKp`M8WdKVjX?V2}LToc8`QI+t&NDbo?bXv* zB+kj=oeX%``7W_lqQJwh!Y#K*0Ktl0)K*Qi2PF+epwv#w6NcU@y{LBu&klYtf)x$MT`&apJa33t%g z#bw=}PATG#h)JK}<~}E0_rC+9{^=EmJmz%>IApDhj-BOJbnMRiJsp4YR0Alf0VPzl z5F#g?Ux_@vQj!xttNzWIrAalv<-Un7H$$n;P47;U2nB!WnTmawnq!@qzPGf}4vKZg zxzC5&dMvx}u-0)eZ}?WTy?tF)-HbGQ{9n_=zh-fel%AuYUz&p}^WZnT#S&6alp4$6 zmQ+;(fx&xE@8C>F!@2`Ks$}QW`=g+2{5;Q`PoI8tJ?y5}m{dZ6(5;hI(V)xu;MnLo zI?Dprg|p^K$>dsvxn8y_)*7zb)Q4O;NHzZNc?Am*hWfa{32@b+j0AY3{M}; zbMpD*r3u^dXgq*Y{SU4IJ#TR0?zkp&a-bU+$M9W{7+V9l>ov&kpzMBc^^4-bptx7U z;Le+(k*IR|VY}cimRG=#33n>1Ej|ZtAa}^KD*o(T7tiN8`hpe%(qPr+`L<8=|5gM2 zUw!2KQ%rLXWe2QWz|mi_bFC$k=H0FP$&H*fcLo-IqjmjGwBKERi48*;`=dx*su`gXy+KZYNm%2AP0yR|=X*c^;I2{jVOL(^f z_!SEdpWYPWQuNd?Hr%(W?ylS-Z^u~jSKOpu%2~+}XQ{tuS=h;fv~uZWIWf6>g~#+o zxOLt(U7S`uuRi1H55G`UsjxA(8tM%+*XIKTY?pSzramh+LQnmfB4Y?zpjI*>-+!ckK-ds^Q2a{;x{3%cnYXM(Ljd{1A~>(Tn@sfRB9 zY`1*mRjgRjKBE7f`F)e;$RBU=vFUE%k|`P_#5X^sM?Z3T;mev+@3Oew8o_%O+q4t| ze~*LQ-(%1ouj}#N#bO*8_eH$dDg{jA4H+5FCx3P}DwX+41v=fA3@T;cH}CO@`{JpN z!b&w=>Salc{8;u?~n@2>O z#arQ+RbY4ES#^I{-`Ci6L(6bL_NwVN?SP{3_CRh@*D343rt;XR(Z3nQ{hdp98S=(CZ!Bh$v6I(Zs3vBUPb0@sIa?az^ zq?0-WGa-@jb3sAk9|7X=%MTX<$H%uvTVZ7D(2e~FX211V50NbWM5Qc6%0_xpPlwHF zHJz-zoHco>gO2m*q!cxEJ=+931(tVstcN?Ik;)LqP*KE++NYDxye97a zHmL}NcCD${?F0Ly!{HqzW;^7Cn)-@7ol7g(%R z0FG!-^;!p zv?pZ-kDYS;beV#uf_q{`aUiXRnI)Q4iLpR(X4+^jK%|Gt(%#L2BQS3-bLrh45nA0% zL}^{FOCim<+?(=j%6*2JB?n4%fB21debBnTf|y<06i4>d@It`lEUF0~ikzIBz+9~r zl+5gJ&6|Z(Mz(!PI;Bw3li#zNjv(^&ke!S>jZhmx%=xf@v@*9J4uj`2u z#@yC~GojzS_RJQ+K_0YXE)o4kuL@T(@#FZWM#EJcgax($A-16z8s=E?VsM^oj?w_f zdz_I^Zn!oLvKR1%8$Q3tQDraXC6~?;y2_nmhmq1Eo)zJ?BQj^+Q-=}fA@zZ$mGQED z2$SpXX)c zaV<1@JXhyKD*E$;9_jm9Q-yM@u0n2|wbbfoFas(ZF+%Mkt0^h7Xp08W~Goa^pFZxw57Z?U`5#E#h=g=-n1{|WM(IC9^yF*=kWB2v1cBwGJ!q3+yygZ} z&U{3{V$N9CjBfITLmJ(Do))7;d`%}JopLuUv5)!sjT9+81?sF15s%M*ygaf@bymO~ z+H9ANG&l)tF*ZHr#lqM!3ieO4fFAi(NLRKnU^8_M$?YWjgnFlrfj%b&17F#h?lO1V zbVnTSwD}!fl)2m<@Kd;bGxBcpNqn}8v00vUPopHCC6$X<;CT(LHSEpkz$QG;I=C_OSM7QO$~X0}V#Gl+F;*^9B2w$P;@)tLlfLW+x-Qc0U0EvgA7*f!b9)*-e0ZcEjo8z{ZsbyJsk@2uXYYQ z_(-IRO z^-wN&Jf1Q*-XMA3=hS=>@L_oJ6#xV zIF-(Lo6qg`)}z{J8YO8nRbg@^)o&^=tl!(Q+gCW_D^_(aN!d6rytuHzA&pWh*@USdMp-%_GFWf^|()sixWkP6S^y~cd=8BY;Fhfz|b8rDt0R~BS6~O8A|}G+%jLg&`s5Uq4^4Eb#;X&>V%6Af%w6fb3xFE+;s#?aw67QRnfIy&Wqg`ZTX23B=*w2cmC(tp2po*%Yg1QSSg0Q+Y zbpy3@EQieu8b;<%wlz{!`C}g}l#^7`EuwC@-*WE`1)7;uR`1j~O~Wz8hHeA;qvdfU zR8BP2KDjAFq}QBEV*AR^7!u^H1}oJM$~NrPdvoMJmL^n4E8Fif$}t@uSKvn1PvfW5 zeA-DD$FS3x)qF5jn3r|Y zx}!-&M)KRGZt3&O`xep8ML|7Z03_`43F@|^ZsKNFlp1X{v}(<$<=6rs zzrJh3=~BTOM1VvP^h1ZWrkpYV){XOhN6XSDs)Lz;>f)Kv=IpX6Gl3Z*hTjUfC3SOtWmdPY1Sf$Xar*o0*=d)`3rNJciMV~H?4aa#ZQs9#| z<7l-GTMe)!!shkz0Ejv9cMYG!R4OXL;OC!Ejr&6B^U!x#Ufk4C+X?HrAGF9?izAae zt`hL;^%7IQ`*zN(x1RnCKL@YTP!uN&eG{^reHMP*3H*>K?K#79ql#pxT2&d>+y0{7GyXkG#*qRK zXi`Hu(p_W}2TP|W#p5q6n9wUrC}vlk5Hr14rrE%Ad2r}CeYu8-7CRpO`>JyzGwI## zGOmb%GNvmEY#+4V_hEXd0*9XGNtOnsrFDODKM(z9585Hn7=9_^-*RK<{!{gKn?w%z z3Zod4h|Ev<5Oge2FZu0#!guH3-? z(9oI%e%!d}v4Wx>Vzmk3-W zplpz`FLHD+4=&-eK1{tv&49!i&de1^aqR>aOuaJVMe`4R8O3kfu+7JV)f8C_o+UEc zSX7}5Fs^+paVf~edvYuJ*A5I-Q)7hT|0C@!!`f`SZPAv}B5jf2ZV3>ySaG)mf;&Zu zYjM{WDFi3D7YJ@I1&S0Y#fm$$Xp6hMp0LhZd!Mzg_w4hX@4c@40227Y{oGT=7;{Xy zEHagT0TI&9!1L)vxNtoq7n#ShQ%I_mdi-1_7;V&-W-!jY^kTz}`2J*DW$DC)Y%Zph zC;YSUK;Px3$xiHN9)J0|CBGQw#7k)c7~vHqM>0?EP;ptkHtyW?7PpL?h6rqRUviR+ zf+C88MaOrIWs3rH4o!Qq5shxF@8ppUV}VDS##W^&jRQ>}NQ*ayGiuOl4=Weq6iAcXNLqIIfWQvpu=g_kF{fSk$vJVqP8cvn?RMyWA^dj|aJ9lWl>kx-8cu}_Wnw;P$({{`Sjgdl529Fw(l&nM-zbIj= z&-(8=)*R(4wW;zGc-3J+7@Gp`?=&QT9t^jIOfu5#mpuE5Lb?9E*?|A}EfFhJw8-Xh z;yhm^h%4rzj9%W@K+nmeNJ$|Fs3^^0~0_4z>(BFwyZYHq8OW@;)b^0J$q{maDqOLgf>X=rZIp9 zp<7@$^$cx1TarUZV+|d_y8Q>vybucWj4W8EKYs>x&3bG zQ|j8OpCCO^`2Nm^Zxpsx-i3fjzMdCqw#v36U2a6UcoAYIv5|Z=f8BlD1YH*w{P=9B z_*s2RNn{HpZc|lA2+5d4)85HC$>lpJd_I=kM^{%}PxgHjaa9~m5iZh5hUfcb z+ZwbBDI-1@@txEs9Jnq9RfxsQNTU}Xs`{7BEq+{2m)=+MO(?FBWPX#G({=KPsk*5l ze09^CjV8NrLj&v%m9Oj)z*4^%^|Q|tYtWuSoD~Z4PYCZH1$=R5jVsB1+#X)XA)F!@ z49t58mP2&&2Kpy>mMuQv{`ubh7x{OfqgOqExE(LF$Mn&WhLflDj&tqGfKkO zb=8s8PnB_KPv)dPSiZQVJ1d6@BC-d4k*VwJ*VR zX__IlH4=AvTC#=2o7o6+;5r1YBm@1^RZ}eG*k=A8C+%_nOhP@NS{!mhdJildE|?0U zuq}N_fCc&i1%#%$cBCrlJ%61x6P%h~lRj0XK@7$(ls3Lq+fzD|iX%e^)tWxhW4+}= zifKl!t&>rF^M}H%V<5aKiXG{8zhovpZEaH(%dA-$^}Z4+JEOktCj$MUb=l0lI)_M^ zmKhvJ4=PZP&Gad=D5B??%TdkbWN(^iw^HR-y_md+P~#In>wE{2>)llIA*@28HtNp2 zhDtQwN1PiCuBpWXHHR$A!#4Oxj~sZzw&oS|GkKK4LyCOVyUh}`*uD|@D78}0kBmZHp7fV|$AMoUgJxSM?$OVzT zPFJS%q*F0+lBUTCl^eX|&=X;935;nQ3oX9b%* zQlJ?ZbQvdMg!n=ESl)_AL+eyL51~FqQE*L2rjY+OosY&v`!fw#u5Vl|iAo*LQb3^gGEG`U zc@S&a>YMJ?Y=pD|<5&V)@M#2&6fDJY9^Z64&JCEo?)?6@TO3-PkcMOoeur34;*co7 z1&2v%ZV57PRTqOGM8ZV#hGc~V^`U$U1%F^viYvK9k=Ekrnv``+RElunpoQ96hfV`i zT!V|&!oAPJD{r+5$mxusFw-rO_vs1laX~-*5A2E;0bpsWa5StzZOBaO*;RrOZsh z!jWOwu)?XUs-bsMLXf6->M~e6E4@8|QAq%nL*5@0g}t3f`t(HH0&AWsYH6rG0aT%5E}UaSbH-Y7F?!;77nD>v%Stl>{M?w>%|u;1Gf52(Q( z_Qvia#!tvxW!!f3U#5^gSd|V96~fN)Snq@b`psmj;|VY=(X5hlw3uYq)s@{uT3wkM z>;kbL_2YKl+O~Xs8sDSl=8LKH1ih#N*16szseJE0`8sU~&^}AJoV50L5&qFTm6QK$7WhEN03zDgj7H${h>w>#^X)TI_rM{JK zr(}He7wuCBLAZ1wel-SE_N#I_Wdk5my->XZE0`me7t5fLZ)j`MBpZi-mCpK`qs81l zft*YPA;m_VCQ(?JjgVF(na4EgpQ0Q`(tY?0s-modrj_EA2tgoWVFc!RT$rbbTC7LR zSy;K&?R~2fz9z9ZcP9pc2k|G7(9PoD24pRZg5qeIqW?_k(CKV(Ab~I#Vpq(!ew87P zYV5KfKV~8jeG;Di{qGz1f9X(vhd%fRhX#mp>_r0N{6@DEutO+auFu$!c}4?P$FLLd zw8fl|7@dt=A$%pO;!``3=jXcN!PB$RJMRb@pNL0e0YBV%?6NOMxX~#lu>o1`G_fEb zpF+DmaF#q&Xj9-%zddmMfNCA|lnTzTI2J{mDqm_ioR}P})$zhlZCt6+&b>JMO??4n zNeWI~Q%WM0A^$1Aj!!9*t%KJw>aEdG=Y(KNIdAuOqx3RH^(Yhug+8-noxAMUC3g5 zw41dM>a-<}Exk}{c23$c*ZLSD$HKNWP~sNfl>OLuO>Mh5!#NZC)xrMb=thEQ%8Wj% zMveS`3%vbniMNUp#R68%u?F_Oz9BOT5sBh}OUQ@y9CnD`nl#^@&7xN6F7F4Pcsd7o zD5SsG8B3sTfZJ+4AB*|LPWPv@oKd zH8bgw=w8`V>Ty<* z$nb2!KdD$FcBtho7L@#Z2^0*81>#T1jgRtV1x~L58jFS<&5NbvlBS;GbQUH z{j;+thn?(~Y@6r?AyaB+eX%|kC9SMp;#K1~W!XNZ~`N{_L z`H)^UOv|jhS3(MmTdFGvzEgTqHP}ToWz?|`#AigJFa>pC%wji%O_w1y^I?Pma1f{j z0#=K#{0`WLF>+mW&*IDXusF1(Jk7Yhkn}e&T1!uR^i;xm2zN~(Ey9-yB(W+P5O0Kx znU8gO4!K@zrKn70u6c+9`wQ+=C$ z|845i1-a%HK$BH6> zwTjeIl<{vqS9uw0X$0R8@cHZ54V~7@i=QvsJt@3w-A_3_$`z`k@t-EYs`US}F|jE# zVenU5kn6%U^$I5Vhn2AMQtVcWYQZgEiEJYi`z>T#f6&JVX;-&~0`*sF$P%;kjnm!o zYTX*DPERsaNz7~KF!>`1W4(A-Y`=AIQN+W5$I#&PhJv@Sp_0QN3TEa=>77zbBL}O2 z2`ANIPUf@-B*tuE>+Do%?ZP4-v(FZa$2eg7&(bC^*}{wC-)99;wavvC=0W|!JnZ)> zDDE@1haydl#e4Yp`i4pQiC`0ia&LSZz=VS;^%jDAElM4q7MFc`{$7&XSI=>*}o)GNiz$9uzy~$8G-6 zixx`&l-KVPL)w)YT24_%n5S8d&QNeRk*C&@u%i}O%YVAjaaJ>hS7?jhMZ~cJJPD(J045cZYv%f~7nC5**_er*fNowrC9^dT>6B3)^qx}76DC4De2b1=Sx_|oW9 zfOYulD0sh)JNZ0A@I>~V@Vnct98^Cs9E4JmzJ~6AnOwO?HPBo=|G_zZsET@E24A$0 z1Q4T_#yP3;bBCw*izupZB3|%Nzg#|@-luD|U!tsDUZ8ON9Zm>-nU$5@t#ZG~*m|4r@kCbuS{Zn=q9DgYq9!%-zE(V zb3WNDrs-NK0+JnOs7ubN#ORtl1*plN5fN))G!}G9 zKuAMpfXi@DSxQMl`oeByUDc%IVm+1nFP~=jbq@xA6SOL!4;hRoDV5J;px{E=zaJs} z{83=$)m1aA&J`g#d7G59&<_S_K-h{8^9M@-(ymWc`$W_|LL3~NaeQMk?L?9|xqQpB zgkdCiF$U7o?2~s2oS5s5E zwF(Vfy6 z%0&=1OsWEb!#7iD#4W z``;LJKWVWiLy?e<4*RF)JX{dhnpE}h+87zi-Ijq%-4Cverw5uw<3hpjYwLYl(jy89 z7q4bL1m`u$+yz;noLc*m+a(11E%cJi4AH<%)LmXsD_j4a{=9&}rs^^|m0|G>Bd|ng zUc>5kvv{FA>-sVPtiOHAMASj7HM-)9`A^4)&JOW^VAmdWN_1-|nfb z!j|m(|J=tM*9G`NePF&lRQ#8!?@uZV|EO-^X&%O-u>hBW<{(9;ApW3=#2hD7SF zpKi~7RRedet932IVO6ueZb&13EqWlOeJW#K7?6Hbx>N;6S@v_>5JKl;xDp`uKrv3e zI1;02=d6i~3^y-~vY^#fsmHs9?y*gE#HC*}fwWIdo~N#2iO>pzO~kz9`L+^&y4f{~ za&#PtTrf{Pibi-hAvcQ|?(nXEzjR6#;gRVhJ4*U z3|@;b%7=U9+3Ga(Tb%q`TD01JTxs8ykbHeNlls6N5&TD;BrNQ^lo^1!rw3t}QOl0^ z9f46anKLq%(nzD7_5Y9x1XL@!x&J2yh{F9Y+LlFh=8R`dQOuz+xxUK3*v%XyJ-Epv zEuAkJr@qg7lRnjC@4#o(D{1=#ne5nljDsc9FCZ)$9eEttaQ|D9?V?o%D7gH!GZts5 zd(=9-Lrsw)+Dsz{^c~_X`D`h+vN}j(m)gPyp8m73Pg$awOvI}X>zLnU;=t%U# zUPc~Ba9g*(dry^&WM`bwQ#lbsN}$;_Xeu}t31>ZrMd zBmGM=6U62YdZxBKx|<;2y=-(+>>Q!0ZImS!#sfOQi7SCvs4HP4;5TQt_ zj%Nn+3J-zsQ?5F=O4J})H;gB&dY`k0s1}x2>Sn=QaJ!uI;(S%WnEgjLbd81TfUqK5 zFRL3y#iBk+yMjGk1gf z+5z^7WdClkE!NXfWZ}vG;+J8+Wr4p@0ffGlX!`$F9{no`K^5gjw_^OARaWBY2TD$m z_=ykvN^~XZC6*ZYoc011FXUFG*f-Q5BG*jIgXz6IJ`}n~8}!!F4PP4A!t7l*EiXn! z?{@$LOJ*bioa5y1P~Mp*Y4wS>3HrF-F?AHE6>=flBc3%fvSX;BY+wPk*-9*ulHME6 zm9IupT@bdS!9?IrOeR72d`p+3iK_?)Tb^dgSGz-D2UIpyu(v6_zMC7_%%oy{EiXKM zcO{CYP3g9w5p`!BFj))%_P+K%sc4w?>nOmx~#N^7|lnh(iwnPZrqj>dD-in~oZ{J8) zSVKyw<@;p3s}yJOz7w^5_=EQg<{R=RN6?q@XILj_}w{YLv z+QLx8$HDWg4izBA0Rn=8-ZON+P~1EJ|9QgrZzzKQe$vK|QNV@qV>iyrN&Gn4F*+_e zDaBYqI=F1TLl$lI3)65)7monBykTrQb3A#^ee;MY10ufYL54Qy@8Z}Kp^K0AAKWzJ zhJtCOdX#?2l5aA!L5S6_P}HG9Sv)gfZZSp&Egsx*dIV7#HKO{Y5@cjEh_hePuXgaHtj|4h=C7;q`uE1wmY~BGqb~Zv_^hM+x_Z0CbI~&$ zEVJu6viiU+R+KzY6onA(>Rt5xYZ?7-^ZoDDhvjjrepOWimB4C|>be^&(5-p?eRv2N z852UV^?J_}FYLFmlU|;w=TLm1m7rMnRxejjf&l|m#&hp4YP*f= zi&BE+er2|`0VS#&<~mHgQt2k85AVB5_67=?%Wf(MOR>^v9SIODm8@`~OksO-Cl|cn zU@Z3&sILzb%VS0^X3KGAOPvAP7O*TA##5~t3(75iE+Gl}Qb?`7B_PS@>@9E1T^u@t zIsHy9^Y4O}CC#!Jo8hkQj*hRWSL4P19J0wB+r0fO8(1B_(4n#=8lMQfv}ebp44s+^ z@Tl<4Xc}#;Y~!2fZGM(_eb6VHQ6z?%9e(3;H{}qL7DzaH$UsoBQ_y6Cq{BIwG z|ML%jvpy^jXO-R$ZDT>Bf`gce6yRiUmzlGQJHO~U;&~Tp05b*i)Kd1D1;Xi%)YA*& zZ3i3gzTB=bY7}lT9Lm{9n;}V{Qd^wj7STBg+z_*n1Bs=m?B0E5#YtIi{9`v#+7zumdmcDO^R>kA(NXl`Vk+~QLW2bYhQxSy3NmE(&l zq_MyAd}2SQ)2GCaD2LAKjd_YK$~dye%VUYxkp$%s0^VU}^>S^{zc~%r_xlyM#?WI? zXCAYf?gRNmhixOqL}xwT`??kd>^=5+h=yDT*hhP;j|!&Z;oFglr1D^FtS+Q)jX;Hz&bN4SsgTta^e-7g&f z^SDGeGyquMX3DY4zWByhyi_F`@K8&&8ld&`^c=Qh(1=XZ6W#wdHZTa00rLL?R1C$; z#y>E#@ryBb*B$8QpLfWiuT-oj^h1A*7u+`TW+2+Zc*7}Du~kcySwLaR+r+h(X#xZ7`t0<6l1`g{VQjI-8A!SF z?2}8bEt$rAJAO6(+Gp+_sRF>g^$nNJH}X%0@W_X!IjoZR^3G%q5pj=Vn<%a~x0ODn ztGMsAX{Ur3Do`w`Fp~D-Nm*~*;F!qQ6^|(dDPwQ|BD7J|G`B^GnEDVGR7wGR3rJ(D zL5t_^QBtB$yWwPH>XJ27=q%O}QHP%Ubl)C|MpRQ3aJ=nxa1an-5mc$klBKcs!K6R{ ztd%=7H;llmD`hQb>16gus9RZJuA8Nia~vrW=L7E+4?BJo3`Or@199JqaJRrslgc&! zQkJ(QuB*g@b8uFf4z(EJjz)Vnb)~IeNfV8xTAkA$>(k^o0MgGYCX3t~1NTt5`|<5! zXhv;K4Nt!e|Ks(4q~RD&LZPPX5wnpk5Y#*c-pgmfiI64;)%dR$-oVjG3maz0v zrK}|ey%H%+sB>szD*cPaSCK|mmieshvL@wqPZx-bBzi~GXzSW zK(n!FO=ToFtvDruD~k&9XWAW>m4$$&Ce$h-bm<7(f0<*bS@FJAQP|C0IDLi{QPWw@ zL9dP>DE|#;QBiCYJkpV{^F(zpIUyn?W)Al{<&(x}V5rxZ6B2*E-a{J-{etg++;c#;X&Q*55tPT3pb@usW!(G>-?W6 zI8j3%{|g1j$AmDCsnNfC@vK8YOh~~yw8~jX-dgf#V_=OdW60D;FnaHk^6$-fOkN4U zU$nSgg*{&AsX>XTa4+-}YURyXm!^Iw3ztCOIe@Fu*PF9eB!<5#SQ~w$Uj$tqwMKXW zPa)iTw8Deau1)dJLMD0BU=W=p8k5}*`nJzM-c5XC*Jn?03j?B6=ZwDZ1chYj%Z&-S z$BMXCTw@DuIdcKCpr*JcvJoS6mti4P9-sWE5(_dLWrETX8_0=#ea7@2v^0n?WdiX; zm{nM#rb-um@5dqqZS`?04e5oEFuav^Qd?pQ_o{2>_-WY}cyX^EO?W>Fe(bwVw^Od& zFR;vcJRDaUvC1uUzJDAr<*cwZA=oRfgDDL9td(Q&Q5p~|K+?kgwXia|S!4C8RI{QI z#Q9Vg9|Go5TYN3be0|`6ZOc+?g{8{U8bGHP7*ka=Yq+;IF*7lmH1RBa!8&z1i({W2 z_f}a`#nm9^`ywiOXTSVUY%tDS^Iw#q=u zlr#D97Wb;i6X#ddJ?9Iz98v@HmXQ`WCpm!xDdXPdcyMaJi$|e_H1l`iCt2aky?DP+ zXxRiqoHZc@Tdu;#xFKs=rE&E!v8@Sm|Eb6_)@;?+1^bC)^0m8Nw>zP_>g8*G;6oGb z!}j3Vm?Og#8ktiJQfj{l3=#;%2;c71`?dX0LZZV+4`x$WmKQXfKH?BX+(|h!r$O z!jPJIziwC(LKnQRcN^UOC>hL2K?H*)MKxd8(jZoK9KM8Hmt0M=^7^F~3?Q+W-t?ym zK?L-geZo#=xCC1yJg0%XQ^1cuWI8y;MmoX{m-|(I?s31au`#&a=4>$N+)4;@zRXzc zyt<)(HgMU!Lc2HD6ezjY+V6uJ$*QTT$zc0uL7EW-xb0Mgtv8jE@O-e6p?w-*S*emn zG#I=cAsZq5RoiNjQ@vB)*f>V z&f*`DZ{W?YQpCyFD*2?Pk^?T9@_d}Sd6aqCh||OuJ_YSRYFp9}hOgoi@!zr5v2&4` zqhaWuo`Rv{$Fa-&ZCB8NqHzJiRw9giqZ472c2lW?qgR0e@D9&R=m(>0?oO7Fto0{1 zDs7ta`$v)1#Vd4vgjh0l#32*XS^2Hq^aRmOhckz%0sg4v!4p(w$1l>PpK5qIe7I$M z%_aVo26sQO6E$t2o5Gc4h)AA@tzT@8hVh2y=S!cBN0Vi`O%WpE1yzk4N}_LKV* zvQi@Wu2}feN!m04CmP`ujGttdn0Inj!G$I3WURD&vK)5ENpr`h4N*Ei9>4g_Bp$K4 z!}2U#Xt_-91pj>uex`Da%F^#aUoM%>t&YO57m0j_qoI<9((G)zB6Os?GR!To7OA9R zkWgyG(Dqf9hTUFp#$cM_?H2&ebb8r>+I&Gxh2;KyoFo}G9~lF|e8itWl5SAnttj~) zhOnRMHEFN5CqiZ!FMCm#`fp40Oqd&w1GUBf9=!SgL`?qY2M$hYQN7gmVYb0)3QS^2 zRs~BOcv}p{wTf!yqCCsjr#~&91|Goh1%EJ+YjcT#^Jf#309?$#-jf4+ybpMA1wC5Z z0lhc{1}vt6)SGfGK-1WlY45gVHDqlfCSA!FJmcCJP`eCp0wPiZ`6)S-W^72??iOc0 zWF{!j0Vl!|o!}jz#tQLf1ekc_SUT(j`aTlmh78Ne{JzF* zxsJOW94r3umlh22ys3w}Nz^6oS?~r*xgj8EX-^YxO&|)SX#rJKO{cl5s>>Q4WlfM5 zHezMFp8#rh^a;xrxv1)(b-Q}5=HmuUKLd2CghJt!2CTVI_P|JktvOlhEhL}us@NC5(p zFfh0_9hX){O-X|x10NSh0GwsH)pM@+ zmzA46c*A?&5>Uv{^vZk=oQQpt*U+SI^?m45dXKFSInNlJkbFW~s>yvMcGL^6KK9Ca zy+H*n&LuEH_Y>zPTbo=^0>{xiE)&yYwFv<3c5&(F)0eT#tK>v&%; zW6XQ|hE~C5wBllXqCyPu>C3wuek8`+boIMv7}C~)anpZV#g4jZs-3uIb3V4@jd(r> zSmB9!qL2beDu~5x;?>sBY}!$OK<_S3&I7Qm@OuS$cYZ$}BaUK9X6W`D|G^bT=^nBk zitd=X=VFRz(aDD!nSj?PR@1Mqw>iLkU%#|v-C>Bqz9e+yU+mxS22$CLIVcZvF5O>W zio4#d3ovCBZ=EiGZlW(Kq;Q<`Ww^V*2y?zZGhsh^8-n~%WB9ggV5Q#fbYn>S4&rP* zYg|+jndlkelDb}2=A1}F)pHcA=6BLATmIV0`95X8YhOpvwt?h+eDTiRZ+&)IKuGKp zjyWBUAQKho3sCPX)IN%j_Nq2wgPvKZ789IHzlUhy?@A^8_9+;E=@oNGy*RVi8Qg{A zN;g|N0QV_pty_+vyVd&$DsLOK%&{bIKiBmN>HLuH+C^3dleWRRNJN{?)4kPf@sOf6CJg~upAk6%~?4m)Xe_k*Fdz#P{uHf z1fi#46B&O#g$YZQ!obi_-pt7t!bKEWF6}iejAk-a2!Ed+r2aQCU%uvoR_q8#xr801 zvwwK(et{v^clWa@Y{!@VBuq!b`GDA3$4ccXI$SyQX9JADmq}b?bm-ASmww@nV-O{2 z#z}o4WcJQULg6FPxY%1=8$W9cws<+3J6Gl7!rnXulqP{naq|PS*FVlRn%u! zXJj#_hIaokJbUcoN71->I=yso@aiMRGASkX1X!FtK#9;+Yv$2N#ZFCyLUHL-q%yeV zX)Rw@-Qe*XACZ^cU&7Y?W})Wld=>s}jq49SJ3LG%w&_Q&%P7jZBt1r>*CAP6;Dpzb{68!m>)nlMV+wEP$>;eMr3og|J7 zo?x(h%`OUDr3N!9CG3pvm0hPhO1-QvHsQ+XvLA{5HTjf&5F1`H{HQbgl>0R$cRG-+ zt}EBFWo@qf)O(10oDyKW<*w}|WCtu&qfGdi*fR=@iER+eq!m5&#De71eYEvcSJEoH zpnYDwX4@hM&}YC)%7iis<8!u z6ln5S55OzHF3>nM8qY%zPQn_c>qY8w zHfJ7c6Ga9m7Z;cOMjA0cuY6~eB;bD*9{-)l`9HrqobV9eptE+Ou4vPs%r7H!+h+Hg zo4m2_BAV2KqSIHRwd<92_|eoX0fARci;?wTgmC**{W--;!fq?@ye%X3U^F*Eh2wfrk^L9jZ8si%+Z32GDEqzzbrg0Ki|FSoIozb%|?j^nuc<(>e z647Padm4r*$J*ewJd`!-3m6u`ni`vQ7{&~g)zq@T!J6^t+rgZk4}{!_>NxT}({j ztn%egy1|4T>RXqjle7 zW0vc_G96xJC6FdOa+6Sc`6PO>&6+wZ;Z2V1*V`kc43K3yV@2?5LqrWm5sI#oBp+ql z3q0{f!Gc_*I7>K`S3hJD)WrAHJNOqbEl==1PNaMOl($b;O<5=~PcJX@E;g3G_USn8 z{tf$P7SVDSc`m3_f0a?ZE*vv8?GJa9atbqmv)cbLv)jQDH zEPT>qY$ro$a!xMnhFL!ZQdIJa<^!P7TAq1b&t(uNri1Qum_`Sbt0!imLEchhs!cD`Qm z7=Z4o0Y+^3Vp(MX_K$fV5qYucIe z^IFy{6Ot!7zVhRaVKa4of=bn|1_KaSuKyGeW5EbO=`It*YGcaEz<^N;HmIyVoo&)8 zBdQ+@uAORc2a;|0c{xGYG2Wy>utd0>u@T{3Vg85=^1E1Up&X8WPgFT(WP&QYjsB$U zSD-kMe-mi=#`-Xs!g_!(USrRT-1$=9R;poIY8xnT+P_Bu7^rmrOTh!TVOAkTGWU<% zyDV$JqS@%Xx9=ftgp2nJxTe(xon4o*H+wCM2s~gT;}YfLHJZR1mC^lE4Hxfo-~GZ1 zZpxR-Qx&thHTu4LS3-`*?LvCSUzz))g5r6RhOll72ygzy-8tv-S=Q_e@Dkw%D4Y}k zmxJ(B%{LNAhh)!Xj=866I~LuY{aBcdEvCQLw6~2OsR4elsn+dW?UMDoIP7r_2%yF& zKb2p+^2nB-IPT74QN_pclDc`0saDTl@7+4` zduMShdSp1FBmI-67gQAE5JeLM<5#~XTzA*ZvI{koe_id?t(EG`dfMYw=~~~fj9#9J zzG-t3D%rY96N{YZH{9Ocv$|;>(=p5!SNc8q(8o@C4KEb@yC?V_tGV={gerBs1N}O8 z|6tk%E&=!&E?v3zQ~r5e+BJm!E0Qc7+AI8ZYEwoh5HG^b@7{ND*_#NLS|t1@Ek50} zfuUg+liz)p_&p%4AXvnWwR>5O5{9#c*=zZ|F&^%6x4WWWhcB*KgG3gdSq1{c5}Wfo z^G_`a2*x~LhYM6K`Z8z)1)MAghV$6Q!Q9TzvM%x+hw>?7x7}#B{9WvJ8})dF_bu{- zqOI|H0C0{W&gJ~ry`qMb3diW>H;Y%qJv0SB6MR8w%w1F~Ok6+fi2LS6ITF2U>1?c( zmPRK}Sj4>UgJB*BnUmw3dp3?L4imMwA8wt_;SgT9neN?W&b@aO)3Sk?iwK+p5kHl1 zFuBdQ&HdTBvW`?i$@lAEz_HP>?9Tag$%;LOD9e1UrGw*UO%)Hg1RUgEi6y)1ewtA{ zd$(6q6fjn!51Nr)?7SmafK6!LBgw>ClJ`@;mC4YKug1xzAag~Pgi)&zdIC|lm=b$6|t^yysob2qu%6@n5_^3Ml zs?$g;2&?IwAxouIL$3H2PylYE-wg%QkxB7VSA->WhRT;_=Mr z)FgdX#VMOenH-LRhu{SJjoO)B@7NPY6X1o~gnQv3>63Bi*v^-Bgd25e_793y6qmQ~ z5au11jP{>09uz;$KS>@Q3fnyO0oOrH;~Q!W*wokJL-LW?V6-8hXupnQI>5k-QF;1N zb8dP=YXmopM6XbS%@*Ly-E1jeqGw#7nnTU4g8{WfsW0I&wMG?q_8+;Hufu&5T2-Ju z8$8I^5#Hwc;h8P)8((Di#twnq*$0u6Ud=)@D0X~!=cn=Ob7>pNlUMR;|9kNx?UMXQlZYcu3Fp0e4{JVczCBS zSfyUsp*`B1UOmERM~gm*IDnU{c){wpq$@|s*R9afSMWL?SNt5oPkoWbTKuI@tm4x) zQ`JPkSk%?P=Isc&5f<9j`OtWu%vIY{1tvuJ-@91-%jGL43+e&o+{$n^%!!rg54JYs z{z5Bg#VN-uSZp)@j$ZY+r9zLFBPttgVZC_fx@pw(IujQ+W=Z941q0j-i4znTjvXJb z(WNEY0(x^qbN4||-r3Iu5O_F!uN4*VGQW11|yt60u~>)^7M0uZh~6+`BMWE)p3{k z_?IS%-r$?RcK!-U+p-eB_s&GQ|7Mo z(&4;JlH(=odF)#}p4Q}DRZ_p8+=4rAhb(W3*}ZeC&)>FmGCe!Wr$o;UsuA2*-Y1q$s1On>R0Ud(uW<+0md`IVzoJ{!7VJ9AgscznSlsQY#I z%jL#7!)2~R*K4I!)VRY%o!>ON5iuG9?l7jTqQXuvkDl4{Z>IbIs1ro^AK*F)Th0@1 zz(yPkkPJ1G49S!a<0c8G+M2+A2{g={M!pe=l+3Y3-xz2QU8Qi&F1WzB>MNSG-$s< zNVn+DUdTT|e2B@9q|~U_57~5U0uN`#zXhtv!R5?4^Ni}{)74XuIg;6~Fge)TcGvRHIuzZ`_nxa!UngPfdD8oJITi-Flz5JMJGV z4GSfGAK6m%-V2=5rvS~A#H`1zFY!`+1}4>%(yk^-;l2t%2m=t@Ci-oiDtjhKk3Bgu z(>QPJ5yRHO#;a@Atj&!BW=4zz7^Avvg02Fs?bwGewEq9$b;tRDsBuzNlc46c{_WRA z#KXu)#5dnxR?-qU1C@r?^o<4h20!*UztA|0zw>x42!K(!Ov`H#X`HJ{J#v+AFC;LgIWnd!83~2UQU5 z3Cq98_w`&p&_xh_pCDJ;jB?hC-70m8dF^~QD(`&O8?2i!;C63IE`EKmEaE978^uVl zAkP$9=GlshQZelJ$YQB1I;sv}MAAai*avG#nuv~j9Io7MGmPem6ses6jq z^;dB1(DKI-CgWKTRUy-g?b|BK_9(2gw%z%GI%M7bjFG!=f(qlo)`MVttJQb^A3XI& z9uNJ(8UN0$KMvZoeZ=tS{h=YSju~3?Q-MypGk(-7{6MXX2_k1@WmRod30;@``Dd8{ zodU`x3M4!7G?n4T{UuY~T3;MWmqrinaY_!63c~-W>R$m6sxujrM8DAttJ(#*gIUo0z{e)GyEa3D)pR7^i zVsPbZ_wpHzMVNxBBM!e82Mg(t`(C}$1_HAf1X}LfuMfr6#h@~d3tv-~MqYi~@h0@# zYbM4(;-6wj8x*|(3Pro7TaRrp1)SR_iS<+OkF16?6gtrM0ns}7&mfijcAIgW&WzZn2)W=NoW@3MfBx7Gw@Ef123N*4KZT_3-5ZN%!fr%jHPjLwx|`h>7T8D z`2IJCVb!_=#qWrC%=3oG1>x>GBvyylId_Sj1!D7S`h?e0fi`n zgd$P`2vU_QO?pr0f=CZl>4YNE1f+?8fDq{rnn;t5AVqo$C80>~C8784;(niZy?-C> z?@yM?b^Tyw&g{LQqR)Ew6pZH zX?s2g9m>+nQ7rt44`$0+AKVlZ6gTT4IUp*a07OjKh}(4%LP~GDZ93 z4LG5H&BlK5UjRmr-=Fie;lAZZWle7^+ZlnUp2c;hQkxqC-;Cxem}b6_gKf)_1}zWQ`NAvdG%g}U_MI$>;W7aQW6E}GH%`l;6)*Oz>U9@aOLq??3DH;?y8ucK6=c~8AMOU@UDUM4ukTx-`|A0mx(E!U%JT8-xd-w%JHX9D80Oh z7FH_1*87~$A{$uX-&*sw_sh%5?DptIOT)ue@?Z*9g@~sY zCMc)I^R^kGE3$7ii}W&Hsr3)QvwR>dgfsR>v#F09lA1T8>EQ)!D@)8EUUSgVHWI?1 z`G}63GLRuE_#>3$1C=P+JFewzI7&!d=k*CoS@f>54ul!nA^f1$d^$GCYk0)Jj!4r# zI+Mnd{{hbZ=?%<>nlz1NjE|T23BIx#cZga3lgFjgGqIgdt4Y95?;Hvr+aFD={mcUw zbazWw7jYlnKl(kBv{z%rzs~rs;fM;DD)F*}>lc}&f2Q=WH;9786NT+`NhfOm?krfZ zTV3pxCG{-cA?0D^tF23_ZZ+!^*2^Kfeesv0K9mb5l=Zs9r3nF`5eIB`N zz4+Nq)l#H+sv;&Li}Z+bGbZPQIwISIZuFaFl1O#=mX#VKGl@Tyc;eky0sW>~PMxUS zQaxdvefiiBDrOq~1R@#Xp#-s^39SYzagNpfzrW|v85w|x{-B8*>R9VoL+&n4EeJj! z=Q@~H>YzVq{1j|l^?nZh_On&zCSfJt#xNECNTO=ih}pXGz<<4|+22uVZ93h3yA|?D zGdV}LLr}4_hD_{u?ePc|S+$h`eV2jZ*m_!B2U5Jv9G7&X;)YGFaN9G24y_~UW#adj zOqhIbUocLA6%g0H+$cl85zdxh!D?uw3fMcIKCTlU&VLbY1;ECv6=1x= zuz8Dws_{u65>_X7`Rf=jIoUzwE-coO#&d8Lp^H8GU8- zXFe!l19VLQ#4HfwLv}~NC|W)nvWd4W7l7q5a;we829&nGiIPWgE*>*emEq#VY%cVjKG$2 zll?1flJ>%P((_x}(LetK5F`-B|Mia}?Kva16g4JJ7&Z)W(7y`1e;x+Bich0NCr?Cfs@!x@x*i|WpkO&8Xn17*`QIOp$Ra?>X7@UPPA%VZ3jr^JP{ zWWrCM0`zt&PRVyZ7ytdB&p$TQ0Kf|T%POLw`mXyU^z;(*TKKNMbsC}=ImJLu>Cu?8 z%hUI#7z5gKx3zNJ)byhxc@N;GRuT*z-mg5o9J7OCP$Q#Y7tN4Y&q2WA0z+wX-$#>L zNKG19p~!;-Cq%#JFHA9q&vf9)qUP-evxUXAg>!z4nxzrRZ}Fr=-f!7_?OZy=%%1aTJ>Rf zKJA5r_V3I+V7BnSrW2dp*W48|$HgvDp-T9PBXcJS2v862hf(uTxOkt3|K=i5T=TSafw?|#k8SuzW?Qcc#p z;Y&TW^}!!#NIr-%3%sRQdD@Z=%$ZJM>+tIY+j@Iw_ypjeWV*Se{lQfatU%HPk zxNd;{qrC=T%R?3##?2{`nq~Ezr-NF4Oz@k_b{}oOL8TyUP_-UTcyH@~s2Kdw@|;*1 zoI>6t%{C`g8p5mjj_nzTbaOho-$S3{dWNDLRy61HUJY+_#4=9nIa|kDF}3*W3L7dJ z!Y05IqVoJ-Rk$$8`ZE|6^T5N&*tPZi?ZOW~REfpK&)IoWXfFKX%>G&7(&fv<6RO_~ zoEKC|_=|4-_#G-PTn-PxE{j7pmm*2ymfqGBTSxjHT*qNY%MgMU{t~{H&1QGdmv@4$ zY>BPqzE-TLyn;_0dzq2qmchS-Nsc8e-j`e8C18=}V z5%YMQA&gra6L<%Hg`lj2c2uV&+z&uYC%k`ACZY9#RY#U-vUXV=eN;$ud3*oPCy zL|n^-Mn}f7h^I8lAXdQPHH2uIK(w@x2OGZqMOFNqkJ^>}S))$*)-2BaVV#5w4{jI~ zk@o-)a@T*ye~{ZT|0nuBoJSuaRhQ-B(HDEp@*!a@WHrW@d)7JkqWbzG-Sbasu3b)p zO)-*#4w_`-$Vm?Ru1F)k-`IB_X=_!2OR?WbPd~f%j4?bqfU<0pSA3$ET*XcsPSdnu zv)2%PE44skZ>t$1ymE&~@~)_@qsCINi*f;)c>ft^yADsm7OVsI|D8az8|8s?Z86Dqm*q5#$QG=G*t%Ev)sszDmi{ zy0=fyzBv`>Js#5}$;N&e%!>pk=vR;4w_o#d^MHJMEqw_PCGXm0Q0o;7UAHfm?p^YW z)C0n;_1MLuLWHY#UKi{5k<^R^PNqopn1GPE8{t#A$bY7yAim*Xuh87?J#Gl7T!Ycu2jKSGkAz7zYmL zmm8^Zu-wxU1Xk;B;Wa}c<3OXA_@o*PmS!#o8a=35Tl=v?+F&T-DDiIkEG zIovN!=z@K0Q@VCQowwifV@Qq7Izv%`HQSuEj z!Aj3Wo|2t@mScCJxi16*0(;P77=H|ENi4^>o2SK4CZmgX%lo5(qeWvfEFsE-VlW#t z!e}|GyDVaMoAW_O+V_~(_L*3BgQED1Yq)nYMJsW33-ul5sF~xFtrPoEo8kw=6z;7= zNF;KYCYq(nv#{D6q(mD0z zLftpF?*}~bH|O>Q_-KJR+*AdG6N@K)x>+OvC3#W!2GHr}H30jCQro|f8R|6ni`E6E zvpT`F+wyLBm@}+FZV<#~otFD4r5JT*ByfD=nnJ!1L6U$^hCcRufM zYPfuJ<{|UuBk_aXYx_ZQgU~2p#L;$EYQi+7bx)7iev12_h(h$a0;&48+JV@|2R1F7->xrq~gRrAF9#q;qnvnWQtrL=;`Pm-Y&l z_VV$eFMQ~vHBELL_L}17j%0D>d)$Z)v2*xXAm$}`=;rFW`uFeOg~UR!dc;v#x8Z*+ z>HiOJ9Gd_jv8Vnr<)Gcr#Pvu91%R4%I}$O5IfmB5jl2u^*TKI_WR+JyLTFC_~1StfD92`do~!tJmvH z=yjEBKIE?Lq>on}{d;kofx)hE-2pA_#}^>>-0x9nDz3>Ue<`C7`Mr}-9vhT939}k! zV0wxL+H=+<-EfK#x!pF8M8@3=j#fx$xtUD2RQYeS~`^w)v_Z=$*od4g8iUD zt$Dq_%FFIea+pi7+orx<&)i^VMd-%9?s|JoEkhaYnD z`R9M3L@7js=V~dV`xAwkAS!rC0yy0dj5Dx6yHCAIsnfZl{}C6!6K!0Ihk&nXHh+`d z&yHUfE&jn~n~2VCA^hy+9Ic2ism!ceFb!YdNYG(Z98gYp7#Qk&1IMHcn4QKtB{wM9 zO9$gvL0wV;%z+`i1#bJBspiN%&ZRwRb0%_V@jn@=MnjX#@TjQr_KNY@q43Kk{|jU4 zyM}PkCik(!=lEZf+0V(UomsrH%N)DOCz2NgC~LC}ECbeauWmI&PKzJBvLg5035Rc0 z3cxcddVeRr&iWw*ID@POh%$ubCQsj6;Iou~E+uM@eYxNzv)8~?EZwT)k!z)UV-fXd ze@?xeb6OMLOCWduI{27IiV|mBQulRw2%{#Hxj$kafLIU^3L!ee{qOK8668rmzc`q3 z`}?fSPNo=$Q3?6@v z`9s+KZ)tuZ~H_uSd**F7H&hQb7sJ zG7wwBSnq2udcL(lB;(83t=8Ovo0NfQ#ahFz?cuNrou>*hrQZ7?z!HuP*-_sTW44>3Cc>xIKv$r_Q@qJR8S$%zZHw8e`0Syt|VqXW?Iw^@{PHi==Sz#*0w(7)V zZ%qZ0B9Fa{3`SQBqWAJObJ*zYum8j0cBoRR+UJQN!a$y6_6SQd3-ulm}PHv45jXXx}GncA7ILx}9R5w59vGU_)-O@0G~o19leW#>=nWl!kx%eTO74Xr{O; ztiNTeQ4k((g>(}N2oZEEx8s@U6)#VL-pTZUU{`{r&$1G!;@dIDRiS0PBmzB4uSV|f$MNgY z5+jn9Um)v+9^mfP9~e|iB#)k?KU4xn-bUS-)D4#{& zro^`4+PPA`3&i~dQDou8lDG2>R-p-Godw67oFL##;B`YwWbCf{xB^4qph`_WO>5r0 zokQ&)KMQs84ZR=pR?_4r&JM4q3l z$Q_z^a9tE^|5Dxsu=bj-m(fWTQSvvHw$&P%v4hHJQwOo94G{mvqAyFx3P6%Xw#kTs6`aKQtC< z25Rlb4n~v9_{_6qk@{{dyP-;Nw%mT&8kAM#H|Omc-!I^kU)aV(j7b7(@`9nq2{$oO z4!kbOQA=r(#_^_)`d{pBP|f<}PJOF6517bJ`I|~d=^qrwg^)E@j~W8qIPi%5?D|PtaM))VsW}qTHOO0bGg*z$ zeu$+kU{muJrEjh%KaEBOmK}wZyWXP5p}2?8G37m;9nPZzK>3=kt_%7gZi^oxhbZa{ z7sgH7dsTVe**FT{O*XDA@f>npDYx1%Jl^uEc0CTmy}bw6t44lDdmF|pbAV{x88$Y~ z<4*oJi8}jvqW-AkCw*+g0Z$*cBU$b1U|8E9>Evj}I6f zaOO$A7H!*bma|D0hxCR_(Uz?e$6{%ujo5QEzvO)P&>ndsSn)8K{*jXs7>noWD_*zM z%hkBk(X>20wqlYdcsy8G;5{IyJgc-*S9kDr4T@`vuD3SyRlA+`^tGCpsNw9%u?H=m z{{E8c-ih}ZYH~W!wBxCE&HM*ooef_C9-od-I_U+M{G4C`HR+73zsYg*fkbrr&fbs0 z{N8m9n#kK-I|Gqx<)BuefCT*lIEN6{|jnN~yr)<@jpz6CHSsGWY7tr1ul3LB)- zW{z!?DO;w@<<)pr;q&ae21;21_pS%wLw>k_w@Ie%4mnRV*yBM?IG;!IBUGigbbWlz zwE9!DN(@vgLh<|0$2A4dl^JT*o^H$ijk)vT zGL7#Ne9S^}v;8Ldtpk4_=1V1q)uu7wH<*fBscOqy^TkQ_;Hxm;3QcH%E- znS+M@d`piT_YS^w9B7*sbxU3GIUmCY9H1-YoCWVi+3J!XEkIY!O$|4En~0MCcJlfY z3bR(3b?=ZEPn(l1vVECJnzYGV4d{U1vUpSJsvF=an#l1u7j zaW&IAQhG}rF;QVqW(k%iutN6nqo!kQTS+Eb;tGtY^2br1q~dn5AJkqByL!4JVxd%B z3w$D?p4LYP9e~(Eul%V$)1kd_)nriF1oiVkmxjmeGflXT<*BmMk)84T`Y6F7jn6D$ z*SlifCB{2sVte`>A8eEER@5KIh6kI{&U+srR#Lso-%JM4@YO`N*i3fF8Gg(x+007* ztNFFXz$G!zbuDsJhR(F+Pf{~NEzGjRe}3uKNdBR7>+XD*+RxQLfCBNNITzmH8=VFh zwvbVw0Eep0DQ%@xD1Hkc)y+}n=MJM_7EVqXEpC({{ZmZ0xOh3Sl=3yw#7br`mDO}*z( z_)s@D{yssKCZp?4;B7EO)69xXx%3OGav6y75r{G{Vi^@1vSR^~Sb7SBXYj#aD>}&r z@z>}m5tfzfV3YrtA2_!23?=LyxBbk@cb1C>jhZ14Z>*=+8&-fzT3T#htJV9&bUozD z>s6+`u6Y9C$is@N^K;A#T4%Y!l1C+-?luZ0^gn;&g&_ukov}6~J}^mEyiA17svbYW zdEzc{zhM|MwJl1>29eY?584>i24|||CB!Ni6j){JOh-v1#JCYdCQNuf4J zi=|R2ZtoxMGuz!@!~8T#&ao>*3gfp*74s!9o-SYW?hNQXDXCdz|Na@xtCjgw%Ye_S zcub%@Y`s8WGtBjHcbC@1-JIC|eD4&2f7*Ys^7TC{n;~4>C%EKC&Ul^=6;a$_N?q6jMXkmUV*7d`SU)+v)m@4NgMrvs(a_*$K3RalC1u|J#`9&mFsTOA993- z^}#Gy>hQIWfrnj5Bev?enQ-7C)?B!!#Mul!*Y$d8PIG1CU_|-PKCu9(d>C!Y`*vZj znY5EP3$7JM91CIZ;6Le=Md*t7CO9Y^fpfFq#BQqVf0By`DBM*jRiHif6rETNvlNfe(kn!)l`<+Z9cnVCoSlcFEvNM zk-(D6%LkeO<}P+8jVZc&Scsgy%Oy|c7w5`@CEkDpFTCO!4Aezc67pD7K0`FZ$NjNf zyh(Bw6E)T0zMTs>$uM}pxGf<9z_Om^3PB?kqhse--Whw$edpDBSF%Xcx?HkXH z+xlEAyLaF0NurzGV`D*&J?NNFspAHIuFr4w{8Nl2d>KkPUZvLf{^I&)LJGhHgTH+{ z52wx&3qzf z&&!4@3RN4{%)V-3Vc-u4l60D~Tt2a(b^inTH7lRWk6bS6%iAtV^=8M!@$RS%NK)(3 zn^X5%QKQ3Gqwgt!qhjL!Nw+GzPLB`D4*!L_aLF$TC=qpzsrq=LaqTPlxBU(e#}+3( zA4B_lSM^0>r|q!mq#>|$*40>|XAovWdOOAFH;K~X_kt%rJrLjB*l@Xr@O!)>B3{-d zGtwMkjnK7$d1Xw%yh$hTlFhbqY>;MP^jnfB-VUlugm*m=SGPE$NTcQlaw8zHcDvF_ zQ(BQz@>$BHp6kfd>C9LVZC7YQatcJZkLuJFbO#__+@$7=Su@lm=J5n3kM-tZ!p&e0 z_a=S63<~Ev-J*GRnq393JGn!iXxS)0zoaM-`mka&k0Wl;rsUrewyD6}8O%W5rgfA# zH!+f**H2cgt=WkP3s?1j<2I`4J(F{1i>c2pB$hM1IP1`!>(2C=8xal7M39)J_2Sqw z|1FpwL$ezdal2jb3J3L|&KG%%*WneVt5-N&jB>-lWsUN}P)9BIFY@pZsV_P4!Ox?0 zC^fU%g02+7G8m#7WfEMA#6KU59Jb=ceR4=LU}_?fXzC@^QTkl9LO$8g{Hlov2D_Av zKgaaVN7H34O)^cAg_Tso{WCx03!vZ^n!#0lUs8eyzDgS+|1muwqVA6Wb423k_-eSo z)|oUxm-pfbNk;(;mQ?(XQcV}p6FR3;>fsvs@!uVe+vPr!kt-)PhiY*s(vJ*|@WRCEuxReHnEuBr!zX%gXn7 zhQ6rgn`KYRZI-GZG;|jS7W;E^M`f9Jx9fU0D zclcDSA8%$*khO7YI_F2;;<1`6;4HZPZJ~Eo>I#rfNCzAxuxpbITG!OG--;k#*YSq74mnpTC_7 zf#yJZZpPF;^+IgDwm1rwbLBnzh4!$Hl~P{Dt_$xg9^qD;Gs!O;RF{Z^m;kZzLiawEUk8b( z`k;N@cjY~!xU$#)g0e<3topr-M+)wF$TyQhr%cPYVlLT1ulNqw?$&OMzQHk46Dd_UCMt9pjfGfcE^Hr{B*clOat; zZq1k~9BmvZd+<^sq?P10>A58mM+Ougtk^s-WxFD{&C;GAa>#Gw5oZ8d_5{u8d8ujI>(I})hqeLQ;5q*4!$9V9?bruZl2!ir4ilIL0?_1sMwE6G&5aSmbO)$E6HiZX;#5=)yJ+Sn%a_fG84A7=O1FJTq-*%XiT1gkj@|LzT=nx9OMyKkpzQ|3W> z-r91lrQR4^=u1nny=m<)Iftra~8_Yh>YMWE&& zY4|RVqkUUD7X}H>ORa9cD=E;^?d{u%=+dUc-W1$#&)Cx~FAeOs$qIfIIAhKamh^DFVDx8+A9X(@)@TtXCR279IsN+HVIbJHM&!b&D*+ruMCeI?|W-NmVH2-P}D-*+^ zp8PEZQ5_r|l~Cv5FR55NB?)Mec)t}%NWtbO1}{s5H%dYT3FSb4!ZT2;pQsE!>`^-Z z2K)1jP-)9nw6L?Sd1^V0s2bQ5bUo^r_dz;O6q@`I=vFhAUI1E3Uj8x@`7Mv51220m z_&xogkhk7KVq2$TfeFVs!D#smyqI!^48{-~8Jb204@DF-aj?9sDr*g`=cI5*nZ9Ldh^t>6oB)nKBhP*eVP091bTQt86EEWVHO}EdIB8qz<*_ zWIjy0xe1!r1r%n)wndiBv++BmKG&drE+I{(@KV$nHZS|s^Ti)&893%@9CQZ{^io76 zO39IgH+Hbw4%U_UAi7K}#Qs4&`|1W44xl%5ceT8pX652Ixc?i-y}V!3AO9GB85!ej z@bWYRheoc)Iz67$o;es@UN($WqR$zB)P&mT*PePNRA>xkro;tRJx>YHv&a8AD09K> zQ-rxztvyRvPyAeLU4cO)T-wAlJ2w-$Nn*<6AU4MWO9jQ(drq4YHdgSnT?Ww zIRWYIV~(-C`(D&Y!%FZ=t_r%WG&Ue~)bQ8CM{fM~139Sr^q~^NPeaDJY3e!9*@A6D zt)Si9l+1*WjUwpjDAE8bD*f85NE{8besIP08R!ElTCOH(8Xha-ax5l>QnQ4=SzeVNcTvesDre{O$1>C0IX4j=* z9P=^@+#HJHydQhI<7)$_o5_cVurGf<>N0jZTJx;8cm-kHjJ^)k7|NKZMIPVtAHMLk zuYUsoQ)Iuduy)oaldKS<&=;&TKqdaERec(GYG+~CC)>vPqaOVR`b#HPahQm1;oh)! zW_L-8F#_M|FVnt#+&N$39+h(v;|WjI|CORY4B9r+3Jigb$~~du$ja8XF62oj5`%(E zzuML*_gR&~&e%DYrqexb$ZG>$McloI1|2ZDXpsaT=brUuR%`{?;iSL;qRyXlZhX>_ zLZCPT(s{IU0y3h-@?li6M^(am$LfnSrYmap;(mKNbLp2-uPPdNTxLGohp;GuRSCVH zH>Sp$LL1RuY~katJ)bUzmj`-Vt*k8B#z#69PW_0qAfMs7ib;K$K;wRI&vV1z%TKuSha3r7a*feY8vIlN`_3kch=gzwsuS*R zY^)Z4S+CQ*`;Rmkx~876IOKij?!`+O9aR9V9u7#ytq@}w+UK}L#orDK`L;IC=cXu? z>zZ3)gs)oFM5wLxK|0{@9Mn!eYLAPXw_^b`rrb`^&mQWLvpTbsOhE*K`%|@OlzlsO zwDPFSkuS|xHq(wXAE69qU8Rux4Nczgp&L zOi2hP7RtOd{yxx^b0-JrAj=pebF-R)TkD_d_ z1HG%VnkHND!Z4W&QgrjDXQX)RxKbFadd*5QMPh%us^}Wy zx^;%MKIbUB#VtqLIXDdm=}lUcREF;@2)g^&LYO^IPt>Y}ZZe{#+|8Qsy@&pOcDD8- z6T`T2J}kaj4=DXhi|RIne=tg^E%lFQT^7<}$SGaIzahJNGx*Ap?GYdDft~KeN}$i$ zFhYvjs&F@4N|N-5do9xU^q1;jkF*>)WOZ~XU2bS4J8lnIezaI;4tf$RYi*ewyyeb?4J6)6%{1JS@g?sSPj+J(VP2;B(zU;?u zY3(q(wd&tV_a2B$4%JKV=}3*{VYIpAv}w7UO7{glI3uv}kWF^FkDa1lL z1hV&f__g&PX|n{jy5$d^WB{h+AI^ZVd`(hv%7YQKqM5!Dwe&Fh4b{+;4ZdB@%-M*u&LA)LgY3GR7oc1QJ?S-S-r&R^B; zB#e`f3tR7y?wH!%-Uzfm?5mBi!&(~Av!}QicwinDMa(dw0GnA6Ucd&A%}HVZn$Dmh zI9!reEID%)0%iOBXm@ntA#!{4A+3XL4sT!92_esTw0qq?=Lw$;h>li7Or$KGkkDle z8FhY}#i^c!mcxP8GM0tPjm7rcUESRB??EZq*)opmFlhPNt)nWzL_e|nUJh_Uj5kJG zGiPzOIr=8-Hal+gM=);BE(5AiMkIxCy&qv|Nr}rj96~{n{KKaAUi@kRjIdEfRKRonC+_>v1{&=C6@2Rbg z&PKW(x6-f9?yH-mBX@DY#oonu_onjl@-^SHp0hl$pJxqMY1GPs?pAXv)YFB(7t_bp zJDf}y9Ur;H(RWHc&GmU>(UXD-y41HAUwJaX?PCKp?f167rL|~s6dk~x5t`k0TZ7NW zRaI42-OdOiN0I(#!x266AjVv5pF(P*yl1wj1?E=*IAp>l<5i#IT_RbP<><%OOF!px zIVQ22m}LXxuZ*7sH8_#HOYwy|FNcCuV70khsY8j)Z%!7+XDAlxGj;qtSGaDSR{3?8 z;WJwcoM;>hBF24eY}Uj#=ihvGH@x}lJ`q{|^?&Svo)ocaRiVQRJQ3doMSFx`)oDTWD*3!GcT4eG@Ysr9?(suq+ z;j)>&Xm5$bn4J}6cnE)(>xNC84vlndJlZ3-T`qH?@5-~nnIq&_)wPUWr`o1J)f+SZ z6~1$WOg(K)5>5s|75>C$C7;{DpWh<;~raybbU`z7Zr zfCBfj(8@56-WO*p91SJaWtmao?`07c�I6GBd+^vq92V0=)&nR5a{V zvu|Qkq$Cm!7qG}UJ++w|{dG5yS8y8ew6$<99K-=T^vX7=Hu|P#ldM-ZZHlI)op@oz zxCV-1w~wPUiw}HLkxD|5Q;olFR}zg@Nc8)2&T>GS& zPRDJyXCgzzfZK9(L%^n&DXt64ri@(Mz;cO~ATokcuCV7QVlAl>cUU-? zE^na-?r(ZQ4hjtP`Z`d;hs3{@cyJv~FRYFE=AYg~K_#q(LA@3vH%T5ZpgW>J#nXd@ zHJ{P%!{sbJ#tk9bKr7 zwB;Ss+)0z$4GT6N>F$mQmjlwlH$l0L&rR^j%|WBs6}!L?pM!&V#;?wf?yrDdTLF^| zy9$UT3lO7qA;JB1*K?;m8rs?1Otj{t!BnsOwc!f5yHEy9&%x7OWHP?rhtv_40K>Oay?M<&-Ux)%sJOfqCmEvU_=1A0!Q=+BZ2k zuRL>js%`6vS#8A9INv_35Q3AkMWAPZciJIek^ML64xTD@aaNT5aTa>4-Kxr9%{B7F ziV}T?3rc}eu?q&xc^$2xY<+zvEkL)#eDRGbq8>Dj>>@~Hd0B0GgL=<8d(>7(kwi)I znu-!EHoNGOTH2;smbZn7J8V#!RrpM7xPAwl z_xs+Kspy7Rl{wDc!6RMlnXkyLsmZ~!{rNW!&SqL&w~X*({uX)PN7H^G#rO56J%_T_ zgF>pfrfI+Wp6yH+GHzO5H>}|8+gj+6uzNZIxwyBieO`YANBK98$@Mgpk#LTY|4NEdD5p2 z_vKY`5r1%_Ae$kYG_1z50Q!uPH?3C_4%6-i<#p&^{+ePJA+%Fd&K1rF`DmNL&$|4K%oN*4jl1Sg?d@&Z24e!BC}5;BzFY51$PBZb-rafk_FEMc^>RkvpE5PVk*{xC5` z#qO=(weWJ6v(Z#x|{%*Y-vXYgaUOT@Us2`hL* zVscsT0WV~jw0eKtleQ2z{doKz2derQqbG9F>E6Tx5qh{^w?tbPO-lcIKlNr)DV=GC z+3k7-G+c!H+Q4t|ssf%ru@$O&Ti!_&($*VB*wt9lJX%PKo}JxdOVNo+BD!FDW;jG~ z>v!A~HFr&N{IQG6Vkv{jmVC+hS# zTRq$=hoN(QT?`JPNes5AJDle@Iu7HO5=blP;=-xVp+xsK!-0Fs>6`X6_Orql_P<(_ zjq{`bYT0!ES&}im2Q-|$@;!)@zyi04G`gQ{?)h5!7J2JjepKXM>gLN0+t*lbIrN}a z?tg{0&tMpw4t&XM%~}?$zpyKD;zYLF2~${VK2<^zRx7(;=1cqgZRa3!X96hqJZl#R z1QvO@n^$q_>rXm*1dq3HK{U*d%@#x(ILSg+3+fw2dEcMkaqJ6=KiVZM%ELc&clygt zdhaEl><^=)7MIUX>rd74;0JA8b%z9{BSILroz|}$yt~=taaYmY$nYk5HA4u`pjEx} zQ`C4YGGf#lMf9M(c#qrf3!Xzwg0$ji1~Xt;VGNk<3K z%UxE5u)O6Ux7neJXoqv)X?!^vHB(G|`i-s$>;To-1cxUhEP2b7qpKOdc}Xzb$L8y_OzF?&6u1iWbA36hY*;9k=@Dy|vgFzJ4^; z7)fa$r3cStnd9Cquu6|G z*;Wj8w7wQa^#b>4&`oZ{l5Op(z(RL{DH`!R4bIDZ6HFvf~L~kJZ^|pRN>=DfdSnd1*mhv_=^4 z@dZVM>csA^#FjJ(T-(tmjdO+7X0u9o&@e0@4n~V~6V4%~yvLM}zbGX<2VY3F2`z7| zl>I(;6M2UrxknT1uWl@*+u(oeQy&$1h<=jS3pLis*hMA@f20-6fdBQzo00Riw$c@s zr?n1j#LdyE6&k*M2`nzmM)<7Gxdv{)Qf%~gb&pXIPa9ScOkiz?_1^2#KpCaim;8@_!zl+&awAX&*fg+-syDodP+P2~AG!bT4K7cf>D`^_B_rn-oi z@wLwScjP*H`-?aVolH!C-@X)OeTEfk#G|+!P(@Bb!3b_Wb29nO`#-Ji$i(%RwUK>q zM|lMkS^4=r++lD-DG1jveL?IpX22m+XJVqshUwBXr&m%q1W4 zK7TtfK3)Iz6a5B)mNkJ?n*1?lops{UDX)eu?0R?IeA=0NSLbQSQn@A4MVY(rY9Dna zvaZ2W`ooi|fF7aEqlJQ_gqX$#W{mCTkxsUkmzU3o6MT}%XVKc&4?`0-Fvx6d=^$Xl zj?tW?(+UA5Pfg|&Z^oZgt^0O#_T}deT#UV~iq>TO$Th8xRxvg(#^d4O_bT!=+T1mV z#&ywY3t+LzTP8vDE|p4y$f5Aqi)|1L6R zSNL=J|I$T0m(j;O5jO4~y!9<(Uy{2L(YxAI>t4vq0; z5ncMwI^9k0_u@Juy%7V{tu+xB9<9v6lNec^;eA&NyrO5&R@=SUBl}EcQy6lELf+cz zv>Y|7u_K+57u6ifAJ(O|k|eI@uqGsGi;r1~i1gp{(j(0KGtr*5h%CG>w0?Sdxx&vR z0I=a$l_G5ha3Uh}a&g{g^X#X*gHUO-i{k(j_73km`k(po;uN3dYG>rfW|-NRXv*g( z9v(WwoCFonPqww7Mig14xKk0?*~)fLKNo1FEn^TWZ^#<&u3eUwR~t_I2VZ9jWiU0B z81;#jP2gHhai$pDmHSPNFX!Chiu_Y|@9?NfRItV|`N7!tf$UWn!oB#%xN+xQRBTwI zoAz@5uLu!Ofk~fvG~Ek=VjD^6?*@_{t6PDjPd{vFzZjn}QSG>+fp_w2P-l3rF6{~< z6+;50?%0%rkcY*w*|WzecxZ0a_MFh|Fqd|S>V*&DeCxsZ)A*+ z@4lKpMrl4aRrC0L>ZwO_EY6OUY)?AY|2zNX*o z&|1;s!pw9Jec^K}C+w50Hsg8y^P_V0pRxn{qn!^Uameup_VY){!}&JyeI7BUNm4vd zwlE$fi&lPa=ixZ*e{=W*|2o_l6Y96TcCgrS%v+sc-EB1WNQclMYQ2mU74_4NR_<<@ z0y?ewldraGc~(+dKrXkU@a|6Uirk%@3lF65gcvjniki$y7^zpEHd8Mbx9;-P;|B84 zv{=+n;p|F<&&-PH=cOo=Pn9mb=MWby-S^%`*DbLd`nv#r>Jry_NZ0k4Sm>c+*TdZ7 z#nt*!lUVx?AEL$@7xOqVp}N!J#_ZMJ#)O>mgiCTlv%{ZwY)$Nsn`E~wY-4fq#*3e< z1}5wzzvTJEQQD3#F?P%>!mGue_FGD+w0N_XkJba8s@$D@%?kpJwIOVDx4h~0_V&vG zNZU2_YKVEJU_e%k)&_Vs_MCY;s5@-_Wd{3biAGoM0nj^yU_Ymd$)DKqP@(dI;#6mp z(xWNLe_-p3UAtZPT~AuJ&sx7n(I3Y-gAa#49y?hpJB;74QFLG+=iZg9Z%7uXQF{2l z5;3GgcA31sA#rvX$=#GjKL>&c0Wn0;+)+;S$Mak7un*#i{$#ysZRvB{c(?a4w5YP#cL-V& z4O3A0*a_qcUDX%=^^}_$x4wc!QR38iVFBX{2Hol27H`1`{7v-*S)cr8_vA3ILT)wR zC#Zy=4Xy;EsBD%_(Z<7l&YK*Bw_bp8*PM9UiW^QAzCEf5sTLMvaxp`xRNI=H(Q>*` zvz)@=1mhuwIl^yd{nb*wvIt=7?G)n5*LVHicRwGm3VWtfg$Pad#tSb&v~*>AtrXiL zz1vt{rJ`Xb27&p4H^utYNZh7WHO*rb|DVjgW&I_MH2Nmctfx`o5Zw%6z`AejPN(iC zQn4g9SEnlNHvPl1E$QU~6Ch>RvI$2HCZzJp&+hPfPJ3`~l39uW%GrVajqHVo0l@+V} z#TSLkn_iF^LeEp*vNLxH1gZ;D)@T(E`&}R?e-aJv-fykN{=vg-P&tzNQ3JFew#yxs@X{ zg!~?ZB&0(mBuhWL_9(jIbmA%yFdJ71QD^4CY6wOS+1ruDs>*+SNxJJpZvv{ z3>{%|&TeQhWkrH{8XxXxa^Y0dx*Ono_Fk^!ry?0Mq~N|&XZvR3PQ&MqdFkiqIXJ$8 z)`{L~o7Dhs{nKS?Z>La}0UJ^H+*+{LYPTKRH7y9~|;e@*6mQH)@%-0T$EE<+)sG3#Fcj_76Gn(i1EK&f7S@ z!z-<9h@(y0GM5&ux^ahMo+Cnby<6qMdYykoKW9E_oxw5b_Km&4Ve#CWY?o|EfboyvBobYI;S$|yoXfC3q z_h-H6(nB6$!tm&`)ivZW|Jr*gUu?G0>eQj2^R(;B4(BZ6J!4Z)2u_=raBuy}eH+{I zzCqa4#r({J{CB#s*?EuFA{(8nCgfqc>WtBD3R|dC9)Iu?rWev_@@uE(j7V2Aj|a~C zpUz(nLnDy*tn0LT6i_)snwo9R``wCy%BRMLfNNmYU40lZ2v-NrbyFmiV=c7BnKC_IQt? z(Hg(9L=u&_f=To7^t~F{f`+!t| zq-M0z9#=lv8xYggQmh~E<(27N>y_E65%K=LK;d_q+)Xbjq!p4W_HZqc?8(^Y>g?lO zoQH=+u7ws%-<;1AI@SX$jm2B%czYH`nPJG0lqLw(Yg2yp5xz|P({Vic1 z&KjbLBJFV=+bT!0Org0$tnI|*rz@RT8J9?>IWeszI<>EZEO^gjl)9a zobW-@7Lzgla~8&4b2dz-=^to;W5;n~!xG8q?ec1g7|C6vHtmfnjnyIooI((MYf?+z z#OcJXQOu$@DJ7c|IY_mdfLEYl1^9V|{&( z;*1baPah2i{y=4GWouI=6+Q4kDH2+%JX}?|>trNaO_Bd((g*(2b`y2b4kaO3zLQFL z2$}HmvEUyz9S_%K>g@9k{S>nM`?VfKLeW=tNj;cr(3mtcP5?3UPJp*moLD`o8-+TC zv2(;5`9G2$x(?NeE;f1Hx-%=M}(ugNva(lXZHA92r`Ft zU@rS1t*L*zKmQBD+(oTV+obPZ{qx86-<#sFBQO4u1_`o~4+9;lgE+zAM2U^$={Aji z`jvO}611(&Hc(B_+YMLx9t^%yItcJ3%05XrDYXUdFfItK~DwxSKB+Qs)V4`@7=4z>GyP(+1g$eIe zuolHaO$HZ7P<2pBk7($fxBANty#yA>Y*7lcgc%|WST~bT@<%PZa?X=e2qF6c`db}B zmk9w6SZB6+U(!OYN8TMO<+7%-93JnD#FA~3N@epm@k_>l zvuZ4*uS<40j9eUR5*TYwxbfzs>_}Z!YRE*5)wg&RJ?mTEquD{Hc?;CTMTG=$=+T=}LaE=7 zgnZ#K1L4O=?3GLp?OTm@p+@W#O~g*w$rI-G35$4li;#GND|;<*93)bJTsW2kJT6Vc zIl(P6b-A4=)JILGCyW@UV})CTuJF|`MR@UoWYnh_KwPh=SI{Odi6b{FC*Gve^P%u^ z!He^k;fAG|P6sl0bE&$086Y=Ozg_IrBHy3v9Y$bM{N(5JtH58p>7TSX8bskEhJ3!ls&G2w& z`{mvAd71G8umT#9at!io5SBlzG#O{;p5r%U!Xdm%`G~J!YkX92B)e{|G4K19y)aiQ zC!=oOA77)5y6_JRg26P$Go7XJ{v8KKRq;8G?BMwj&+^Cp>s<+5>+*)rtaD?vq#U&= ztomD1(XAdxTiWa9mlTY60$h^=B6L-j|H1z8x5M868AHfkAPm(5S3<8C{!q?=sT>Et zp4|#s(Rq6nMDq*_1O*3%ClG{J(>BW@#Fx^5$D_&`K zPA*+z0wP@U>A^UsU@MI-k>2?^6pHdpmc-$k93kaPO7FxE;ioNYuA^@< zY)OrwS_d@g@nGiUrAbUDRMarNeR3CIBDaIg{yAJOPKB(O-IJR7qMYV( zc&T1}4#xi(XA4AQ!62iTBFO3#ezV2g;L}OnnajwHletfkT$JL`|K!tmAt5=jEq0j1555xT@5T z*B6#eNQ*yTh3C7wyX&>Ps}3mLo{& zhNg#mhoIL$TO^?B_}wpNQ8Z+h!p(tRSR&~jG8nv=aXgD2pV+Hu_NISf3-3jgPoCgJ z^_YJ>{QtE8)L%OWxxY@hkU4bFk04!o;gVd=rbsJC2Pf!=p4Fm`{!t_I-ljT{*8C#f zv@k16ME-3swC^3q&TicFBEw>EynIrLl{7keV3<@o2Q35_2cy8t)tLlgP8>32 z#o*fw#gBa1;Cdhz)W$rg?-Z0Rk{+ty_F$Pg_rR;bd%k|$#6BFUK-sSqTedUGzqTP9 zA+$|8i^6S=Ii+Vc6khNu_NvsGmcev${i8x)Y7&>E4JlO4x+blVW_u3h^0=0;%#pyaui&KW6M3#z7mZuGa$J z_|~fwVyif8kq^R@4mC8nSdl;wU@++T`OR}2h*-GvaHk}08d(iB6B&oCkysV>RRa6wRI7PE$tk6GSuAV2 zM$10eifq0=eo_CP#8Z44Op%-Pooy(4!TUklo{&S}1h_OFcdWNJisXbn9+XFaINB`n zF>ekAn#~)&^691dP`Fp5r~tsla9MTGwj~OYzFJa2nkL8lQBKD7dUow`!HA`aNzY3dTH zBW)t;iM3bJgNjM>Gv$m+v#RoVZiw6oz!h6IrmXd6)`|)tL|nZ}K}CDq{7o1!Pm_5q zX$}3rV9spvZvLxUwI2V)xfDC)gy2oGSrVsxBWW)f3^CdmZdEa!P(-YoHx*8F2H}0Cmdkr_d~$^>x#fTJO?aGa-**T(MjK>IYZZ&*kTc1SvWZdi#Gy- zNB|f`-+%B6c8jG?QHH@KTU8CA>C%f|5aDDQWz=rUL+hEprz=|v}lsg29c-peHu?7G`oI%#pzrh;D^u5FJUv)RG_F8 z=@>2?oBZ3<7w)D`Jg5K_k0gF>=v#JcnI0t4A0}rAaEpmB6Qx$(&buj(Jd#cLW;5p{ zPgs~yNA$#_7Yp)aDBOwMb&FNW9CeAEN^z|5>5S?ePMT#jOS$}ZO*NcVqt#EZk*s@l zB(7IFL&sZ|ArY>Y_JA-l*7yb2DUfw-JL6Jxnx+kAMYgynvOn+dg&(%W9=Gl=U{^>+ zxJmk6M~TgH_ix+8`X=B1LW^$KUk;JaKJUDH^_}cLTn32r=Fivj1pP|lByIz zT0fq+rYnYu{BcYkuyXTu4OWGl+%}D7^%Z;cUaUVJQZ0W{ZZ^%}DO{~BNv#4Q!1#yL zBeSf>-kUGHQ!-{WvI#6Yb)u<*Gi|UVt0Q4mrlj5j*b%(c;|%b8(k~^O%VbXb6`fk0 zBmWeqdkp|j9}bY%z?*cGNeoBEaXHh;iSeR+;%At2^oG<}8|5L)Zm0U3WD~vlDn@^x z2L#=deFW;6(bR~O>*ceFZ@-DH;yo(rG~Ok&)&3~J+1b1->=kq5Qwihq$QSe7^1);O za-#e`J%U0#M=anm-h{&JKQz!EpHn7%e&h-u!n;;}`OEgLx6CT>BX@)TaKUa6C%3{u zyg8r-$l8l8C*HbU^^UNFkdc)1btr{PqW-VsG=VNG)X;f=7Duby{wLy_N;#8K7SRfgCwnH2bVTYRXWRu4@xifP|Mx0eM z3v<{lw006Co6N1q-E~B$;H@0uSP|o`hZczTfZAIaZbSok0D!*7hLJxe9M;4hT*WI= z=mN37i-}<;T#hVpad{R@`q2oe(a!uollle4J)c&4b=Ep;@%X6aW#^I4ZEo5(r2eu6 ztuu0D`PUx6>|a!rkZ?zl=oiXwx6+RE23}25s^0fGt%_il-5_5Prw~bT<*Xb#X1%qa zG@+rvd1I{#U3bT1wVhOcRCBH@foKh!%ki4@9f0MitY|-3+EnpwT}RpdP3I)d5iB*r z^bE2g3L>IASGSUKsbS(CQuE&t_?!pD%-n-4)SS~&kK`FiY2em~;6hebgbDc#Rr|2P z{MaXkO!W_FU-0F9ZV_v`5=*&gZnB}-BG}sV_O}OKnJKASbsLyA(Y)TZ7S7Ai7@ zqCE8#U@X^~3Rp_Jvl4nR(!n$FOOeUvO@kg=w+1D0z0~>vzxC=Y-Zykp51OL|?Gnkg zC%t{5x@?ouN_zwb+wqGN}moT)I=$W#|pJP1iE~!Xooli$E{2)s_Q!R$YedH{BXOw6QpCdV0NjJmvmbBNbu`qN^eQ`ajC%>)M~Aj_#CAM7T2!c(tpQGd z;4!?GpSHk#I?&4f3Fb6`r8HWge4Bd^PG6_Wmb~E99`}n`7G(aQ=IDppA{*=2`!2H1 zs;cZz%ll?0mR|-6wI1!_AiIto@biAKP(U#I#WC35ymn)4t*lhnXT8AaNsMz>MD{Kn zCuxpevk5d&3+GCVJoxKgFHv4toDE0o>T69kMtf>ce(B ziw($Zy^MnNMDMD&xoXs-z3m8J`TlRsUy+uvKdG^i4#qfCPXOlAhrO4$cp&Q}V-WTt zp*p{Hl0P*>a>$!E6LZm$%sFNWR9&3%-t~sIpsEuVQG4P6z?UP1;5hQylV6~4z)J>D zNN_dc?c)V_$t;G4B2~IZj$H?NW#~z81?rX8WRF6|To;6pG8ka9i&zQ)l*=&;(}+H6(srf{p#)b(4%~I#-7s5 zEu$tI5E`W*inoa{(rNE|)HC*gjMTx4-Md4;F9^qB)P?Q-3O$XG1e?BELxAznw~YWL zQWv>zo8IR~Z^3DNLKO zO}%(o+!+FU@mq(`2;yo3Sw%C`q-lI7EyLT|!j2Sh|H^GrnriNqyHd0uUk(k5QlZPO z(jK)y$y+G+!G{wop{`c)LcF(RHHE1=yvUcRE907lW<+Br zM3CHF7^HjK%=-+lS>;=L#4o(ly8kkWQsN@rk^{g+7h(1D-&F=}BnY)&5wG_FjC!gY z6ebeteU?G*zs%s&qSeaGoS(!ifCKTmWha7I$EGf{mY{Rs2K?wybpCv`IR$qEI8_8g z0xzi4T4|A5O+kL+)l~vmLTa+}To5U8;g#ANW!cfHF_TdxBB4Hx2Dd^5c=?V}BHrG; zPg=B5u`}dw9|VeU*c?yh^fzO}iMu4K)GDT!f>cJoc=qsHJWR?^`y|v-;Cz(A=0I>P zO&tF+&1e%l@xH0NY|D4twKO&~w@|;N9uKb;C=B0M6|}+0oD0_|B~Xw}p&yd!hJO%N z8kCFv33jyvEd8iTy^wVM&h7j*8Dk3nSTKKO*P-Idn9YLsba+cE)!B#As=Iy3c*4^C zQwkN0arp*mU(|x?qAGGB;fJ>ZY5B-jpxoODCK6sSDE zz}2E4(o&iRPsZS$rK zD2Oh7b~r3uSKNzk_X48Sh;1QUDImtr)injY5E4Z@;1Ap5^G!TzDU7>?@D52p*}^+g zYj+LD(zSdK1r#*Ln#!&09g|`af?S2Wve(||G<}Rz51kQSvbUA`9d;U$A9&u|95Ax_ zk?9+{v07ZHV~th*o&RP3;z(E-4^eELoodY- zTIqJk{`=?fBZh*VkDXI+(tUuHk+}E!{&;Nab<^nkTHR5Jf7E5N$Gp?EN67u4G->kP z7*CRlCDXo~5v{U=dVpT&W3_U~f;iUgj-R;&=q-a{<|}L)i`jiJ2VB92Kj$nC z=Z+MB!?Oh;RoTUPa%hvQGVT;7Tq=k#&6t>a=|uAqG$ge$lBLIT1!0K@@qqyM=-$IA z=OzPNNPp7P@{Z9i6x~MSc*Mqr2jJf6pE%pGOwD?~n!j*6a*Sh{bsHuE>B-mhs*Dy1 zxCJ&9Ra`74^ybSmB?=N!p-e^jv#%O544TH{8ayW$CotAVBF5DMX|Impu34@=cw0a zbj`-kwtUeE(_lPbs})FSGVf(5I9}-c0%JYLQ_Xpfu|lsvma8-I#Uwo^gu^KJm-iUo z=tIszC0kNDs#6q}j|9oX zE9KkJgr0LqcgqnfVsm*CCvFtw88dycoEb6^uYV5{-~0CbdrWbBWOD|pAB{bgC)H=L zX~IiRI)(AmN$y#|=8vk3{xm`Zv##|5a?@eVJUF$}5AEkTiyvjoX@{RflX7s+j@4FW zddU-_YtJ1idcq~^LfQgqiC2g*Ta;^Y2T%YXf3sgV+^_Mr6oPZ#lGrJ}| z?MZoHR^2!@lbI#E^+$l{-6U@?!>US=mY`62$XC#_S9FZT$AhIVEjrp?{g&jEr$S0_ z168dhmxP|kSYhEl4|^(Z+ruU+;Y?OWrPvN2r|&I6=(~j_RD=@B1NV{kw5KR~iWYUP z@(Sb%IXwOiDR4qmT^`Ed@l0VYbD}9d=kk2PG3*%7Q8a%O$DicdTi2J(YmH9F?uJNR zhYy3%Fhi1xBonZnkFz_FB_T?eBE`4x#*XHVv?b66)#&ZG5^y@SN__p5@teD8<29XM zxm`+Q>2;tQK6IMw`EW)WRH2J@)N0Ag}im@dK&Y3^&jdJ z60H(>$&B|v@!tPo6%||_p(t&QTx3dPV!W0PORBYoGJq;S%0zR>gTUaNk|9eF4wy5N zvzz`!93oy(^UP>5$OfMDnt@t@4233~Ui|~;8$FqNOgp}d{7QUKuPXs3dgU9JywM-p zb%p)1GGw=YZR)ormJ`tHfkfh(DS_KpH1rGpbNIib4r$`z6yBXy<{h@n56*#E+15s3I$liPZ`OEizO^@EA$xsX#WfgPotiq z*dg8|qoIn)3(+6>gZ>`XZN~s#Wz|;VSiIR4xqRtJtj5=CFnvY(7ku_-e>pxre(f>C zZO=P=e|a2b0dt%f7C|mukstoMSEtzX`}yCjv{cetjo|TS7IiwzT*vkHGv(EmOEB5( zVJj~udY&45Q5cYIp11AEp|VQi{5m229@C3J4U`B%e|L~`#6>;yj?HJ+BNR5*oUhU9mP`g?V^>p8=Hyqh~ZwH0L z#V%nA(8EYrS{fhgubqfJG@MswlADPy^AZ!)1n;8Vjoe55d|e3_gFzTH!GKv@X+&K{ z6q#wR@(^5#kjCN~f)kly&Zy2Zxxa0W`&>SF8Hmy_t?MFKfn1HaFxP(Or0K!!iBNDJ zON$mbo3V?C<5(VhHDT(DJ)`tdp#7TVnTG8X;74`!JC<0+US?$=d2a0+sJOShP$k8$ zaOtRgk`s``WV;w`1e8Bf3+)TSTG^l6rbTI)saw07;W75(C;5~DCt|)Vs5!$UC@@1~8qM(e4^NqcaXMOeZH#O7rSgA8-V%)4|pkklIB*|*hpFsUFLFJCre;W8V zqa;B~Y_$8^d%-uBr!h(p4FvGmy^REbx$UU#18mZ?A;2ym<-~)WK;7geKJ>!<6Lyls zAxVY9zk3TWW5;3WxsH36 zb2Sd!&+t5%zMP$%ojX2VIy(AYRrX(aXS1@hCYXbW{#)+Qd>Q(b^q|3V;ue7N@>km{ zy#{)rwx`DW02h`LiGT!Ree@i6Iv==#tSky=(I7R7cmm@A8bne9pyn!MbL-){3g|0N zN#oqNgseGB4v?OPnsfY#hCNwk+eS#JP%yo~_WLCEo3r*_LhM3JqLpNe+8*kljl#>+ zKWjb%;Tx2i2aAgMBq*L&euB)mIrqI@=taqs)(f;LkKZqsE9$*WbUA+Z-6C#g+Mm#Q zB;{hQT^u~wdUT8;AnP;bdx){f5}yf5uv=KCdC)9d-_K{x0*fy8e2hb<`2vw)j?0Z- zjPUBURtlI+FqEN|Q>X{X+5hx(le45WlIKGEwy9(?+?EQ;bjCOVnqk_KQvz1MIR&D+ z`EyD>nougT1Eirg*=T&KEgI zV&cO#$$pM%9ggN?QzJ(CjdQFVlOj-`*#$8+NWr`XObihpaU%pt;dUBfr>CDZfQ~pd z3~#l^6OtF-C~#7dpBavy+s`I)Z!rr@h084lCOwlEHtvuI|Kpo^tK49CsnPq+!bW`( z(*`qn5+Bql2uDN7EVXVAJdoQ6#0b0 z%vZTs>m56fzr+r`o*efPv`AF43H$%-O)_K_Bbf0zUKMBlyPWj@eM^)SIoncVT*P?* zBHo?SQA=7BclXB2< zgEmfQTAWK|{e%2d)hxHQNJxPPwH|eAo;t&Wg0HKY*p%H^(i08ux!F%Ma~#Xd|D7J)bjJ#4gfWVEyQr_t4Q$?v;>CG^Q)hHM`p$z z4Wm2>{nb)+>Far#vLbTIiXi=kpUFCwI#GU(Q{_w^+nm0MD2yilJ-}$gbj(Mtm;de? zIDwdan1rI8(a=;E(v^*POg+B%+r?1CTECjv?l|Gabj!1-PLHb z>`*Kc3$udaNhyb-6@ytxx4cS= zx#hXciK^>yh6+hhnK@+^ojcyCGAxV1V7BR+8jEFkk6itM*&nQt>ASu?l3I@^L@qw#XgG!g*TEZ8IiX}`< zf4q;^qe5;dAHeu0Ql>o>bcPs6#w7hd< z5gt(&EutykQ6I-uF7TOO$0+!nG%Zt#oRD9tdD2q1#O^Atj>14}nU$z0PiNhz%JNJ* zW7rj6qmKW@#X9HSq2tQmZ6^zAR&HYGuV>PnP-GMNA68Pn^3SEEuakU!FHuH)K7ux- zv{`=tskWU{Yqh3@E$1+@d>3NMQA#~IjQK*sbl7_yphlJU(fXW*{4JiG1XL-HgJ3ht z@(8kWr*1<{<)*c1T|$~52+C_? zU$SBK&POsu!qSFDK`slpM&PSCzo0T2%$~SK>cY-rCP8Wvdw>fK)1UW@q?cj|Eu(R% z4vQ5KGMyGxprvf>^&G=Fdna5beWDlQ@>BT~u>64)nq?GT2ck8H>7_VyS5KCp(5~|6 zizbY_josRgmk+e>3s3{2^$lBSv~TN9)eH)bTLTlv4Fa5=<4nJW7ehPo>?0OwB;~dj z?Xc_SaFYc=AX6cV>A2HDUOjsClMl|qJ!{s(OzhO(6{jQ8nGz z%|uXTmxAGigFtC)kw1&xO{QYYx?q@&Ud_gqhH!?4>7;o%S8xd}368JsC+}!fm(g}g zvrxz?rism~)YTZp-YZ%;1){L>{K*vj%u03c)s3@?%c4=?rNqWMjW_+S^-YSv^kSNy7};9$!j41g z({V1Z$(+crHu0(P9yRdC_;xU@n3j5*EyyF3H&a`t7`mrnj%={frX3v*J1js1x*Uoy z?l+LEQW}`VhufHHx?Q+bZtJ31dS*$!*7U{ga*f`sx86`_o;5kD;gl0N3!et@vjL%c zl?ij%Q5S}SBSYLApyKq1k+uo0UB9H70;O6F6qT6_;~Ht-(K?OSG$N`8g7deW$p9we zsYx&LnB*63Cv?w%g=Ju#?YC6kWX=MNiUR>R4KLb? zGh5}R_jzN|yDX=1HQY0F`Xk;4D<(;ouI7w_>#v5Z>rI4#D&y^vzBu2p^C^b=D)bc? zYRKm{S_gx_6iAhbgwB-?+n(aPtIR0E?DsWasVwHWIsm{pSI#bDXRM~Vyc!^;@ig1S zK{ToUybLb_oh;!7#F6c>VJ3|wGquI-q`y9=7K54kI4(Txf_z6|1Pgk=EAK^k=p4iE z@&Iv{)464(!3;@~(f%4Ah)?laM{@N{C#F1_`*oix%-*jlM&<>0jPJsTxk_dC+?VbRHv>*=zcq%ftNK~uTH zq5apGD;S8lUdBcz(MbU^XDpdA>d9?r33kxuGm6(V+#R$%26m)mDL6?NYK;vhT-Fzs^wUz%9yg=fGi*(MGDT7fBtZ?q$(gF zl0bdx*K&+#9Hlt`e875j(-G5txh80BThKpwku$NxU@6%<~u>(!)N!m=k50A36WC<8pAw zpE)oPHwA}C{IfiETb~^tf=a@CPGO$Gd=p=QiU}mE!4056v>!v0mIgsDeO^+v7t8=D z7&2ANQzME0hytgfmf>-q{p7C!$`xulN}nJeS!BLoY7UkgU*S| zcbU%|zL(AUW~Uq6o1CVhXSS&=)}oL+-TkwTs)~LKo2h~?^Za&Gsnz$4bF8rLe0ylO zejcz9tq+Qod2qG_qS6JO;gUF+Lf|oIHNnAR?dG|yt|KZXZuxiglf@#d{qW8N-K2Ke zO@*S-_mk=V3Gd@|G(qXn(0bq*#+K}Y$dpymn58sr-VTpCS~up6)080MJqR!|<=tjs zt2M}LA_?IbBTDVfGzrj;RS-&i-#aQb{_3#28=aiH`K46FX{#pCp%0omxaz5m$ocVM zb0rB5#@kll)NIi)>;?AeAXc&0GY>6VJG~_pl`^Qo#{($p4Vu|3cgebNFZhCaI#?hg zX@g-zsvT+<3?g({J96UZx};W9v?cP28iG0Aw4Rs$ldp0lRz+)+@C78cQ&UbA&ymqY zy`4u~PRIg-fLI&M8-~*(+)*GzIy#V!wNI5=8542SlM<%TK`w`%>6GVg=DgmyTVpI0 z=B)av@c3}oSji9dr6R`2Nk;RM;7s)Q?k5WAirtU*-)@9QU-~QlAI5gvK9f`VzmI|6 zJ%3q_VP32OpV}C2rN_pis}4LPT_v0e{66O9Mk5)c950jtG4s@eSzL)}UpRs=gF~As zdtXbzK1HyT--0J#cKp_oWSrgSI_CafG5>HVjQvuwiMP+N3H;!iROrHq51o_=4DJt+ z?B-l5^OrO<$&QTXGOJz7VmWXXCM&q9m#-TN_o}b--r?_GQLNUm8BkBfU(wm8 z`KWrp9H@h0=H7P$Tw-m;I4Np>?ZqUK^aUp-+#a&Xj}gp`hgJ@Q>SMe)GYST#dCAFf z%D#m*^=!U`-QcSnDJy(pP%c=4zSqD>eRLHwiCfw@Rm(=a8EPP{EjZCxDw!U*(frdQvANPg^HO)qG<#5qluu& zgkUDa{7Z+h)1CBtTjWT=@R#f$X010Oh1HD^|M$UyBqzmx{(zNA`RWUEs`(h~rEfle zQs{@S{2TwG@(OTpyj@}+!MjXX|7fB!?t&c%&_^NJ-=8z$i1&bwgC;mSO|eyqb!l}> zlG`FrvT4J4+rd2EL=jBH@Nn2wJ{8sV6y|z!@T9P{D9n=5=Bk+uC%F;}xGZyhoX&L; z8^WVkKzEjJG_H+?GnDxc^Ptt3e`!8FJVTw5AxIczfT8)zhf_Hl(kkZd_ahnblmTu@ z+K0SSN#uXx&4Cw3dUU(bH*prF17kMPZi97gEF=@~YgaG#Pxy=r9?Js2I)`O5rbIEk z^J=*{IfYLfbYdzhV#e@&P2Yvj?mY`XX$kA4G>nXzJ|yVIdui7BvxtXz%lL|kC9@~@ z;mfJ)`{nwFrU3b^J|y9MQop|OeDlv67tl$6AYHjt+uAJt4xEy=RZX_Zq0s2g_ zJ=%4*_~lTDW%PaknVFu9mVBpnFBFvL?#;gZ`a$dFi|!qX)%^Dm5wRS2u5c-K1on6S zYTR6Rl65r-9(vWDAk&B>gBv}k6dXCsFH2{0%6OXMXg7n_kQ!1NtV*iDS2@{*L^?45vyQzSm4lft*~-!IhT7B`K5 zquKjs7P^k>q`5jbj545Jl*t5yKJZH=zvkMR3}hBPPu=5^ zU5PJ;`NeP3yRBTIkjItLl0V&WY$_GA@&zU#qWrKpBi+e{yk5#YANc+C24qh!IgXZJ z@N`VHKl^l-TU})oKL$z6n9VFkDnLv>-A6sqJlyK!7QE66A%?Q0*|ZW)(^OkabRao* zE8m@eE+4ahZLZ>gD-6gmbcXuavaVzdE?oyoK5-3(E z5ZoPFq=n$_?(Xi;p7eX?%=hm-Gf!vke;JbD-uv3J_S);xF!C1W1XfL?!;^_qpmS{z_UfxF*7$6}sqFi8orB z^nEZMy@xi-tM^$73XzPmM3j>5a3YSHzT_(|rJW@KgpHr?{$$E`5RxVY;*8cc&>qBz zkFr4NRu6}#XHOrmA8+Vi#7}e&hF%B{^8HeN|K1Ezo-+*`@S25)e-%;wGsXVL`0+qZ z^*#-&S%HCe0}!oA+aogkrntp5jHOa^Kfj%Z9N(|nE=!eEhGV|wwbc-{#zdR|!tWol z-38~Ha@T`yyRA3LduG|@--(lv9_L`ZEkm5~Ke`HxzNUivuxhk&&f#}!TGb&8g!$i# zrvLR*%G}Sey9>si6MSVsZwEKIQ?v6c_!LlaR3R+;x^>Zq_p;j)Pnyv{$ew@h8kB5; zT^S2Ugf?iH!H>2mepvX&`@8MhgIOZRC~opD;7qa^L!kxNq{3uL;%C}21qu|9%#}-A zI#MIOOj5)j4x{`y2)ZJ{p(;#Q9Q{IW>@r~&cNerR0PS&eOvs=vSLv5IEJ?r0+Ruyc zu9wNzL*leq#FAdb=|iL-HP73%zg^;?yt9);lOF`)Ey`EXpx$iFOuLv*S_+j%iIr5B7C!V z?jYef0ya|SuMu8ao23~tYagnKwk#k)P{^oHg+S_4j&s@4ytfs~ zUk)i{#?Oa`2iP*fVm<=I^yN^O(x=&E^gj;k{s$Q6g5l!7al{y3g8*CfLxMPJmNH@f z`9e7#trBfPVuuW@v~w=p{mk~QO9`*3J{(UCTu^@%6;n*e5s_+1hpc?eN}HQY(pO*} zbmyZl7{hxTdMslZ@peMKK8ZQ1((7>ePm~(5Ggy(K zZm|Dc9c>z0X?nttCSBa%D&RQ{roUpk*3?gLQKguvuFQ4#gBV9Cm43>{UV(V}!>fZb zxxvh7C+aKWI7Zr z(gf^S?63}}W-;Dk9RX^n?kfb}Rld_#W-lHg^06Im9wVMu#r0eBEH&fi-po=N80|sv zb5eEK7h`)V8z(Z`k8I*dk4lJ}CX&I)zLw)k>WZnF_EOF$Z=kppYGXe$&Vn(=bpI|- zD+fI_2R*G$>CyXsvZVN5&-DKz9-P1a+eCfl5XDc1AC-y&zNhk7PGb?=Z#KF(bvW%7 z4vQ3i`AA71U+?bgn>{+Jn&jAjQZKE(z%C6<+UCqsoitZnBrPImdZ(U|6D!B;3!YDQ zps=m#zttK%QJUa1pZY;UWfS4370q>70{%^x6~8#>Xk|BZ*o>JGE~AtT!pbX(V$-q& z6Z|OMYb`9GE%+{HAPLa)w*blBR07{;Q&i&K#e!P57}gvdWYaXo6_pl4l@_W`XFfrW z`InE5!RW={W9bz!8`th5!jumA8o~9+Tc~?G*n3{8>ax`?LAw5dbIqeI804ki2M%egvzF7MH18JKzr*U0a7$wszI&|t?DmTBHwq(+*vD|U<6$YnyF#t05sdgT&_ArTe zIsYHV`ri$GF8LQj0T77}P`w@?mJRGbY1Zt{vvm{LU0PckrL!wdY74HQ(zi^=oAM|~ z6rj~+EQs08=*B5%?99`!S82su+WOhkVke&c*{|`7&y4YyRr;ndnXV4IQz6?6ZBc9s z7@f&p28``jbvsxiU3vFQD-&=ro>-2g*Anwy#1Auvfnhrf9e72~cZQd~kh=qVx#5r^_i9md%eYfJ4CI?yTyNbvzT9()ny< z@QKed0L<73PV9STt{?iKA&v6{eYR3XMK){oBl3yK zDe`62!}Kxjv1V;)u_1nw)g{$qhW_`}0S`j{)YjJZgH3K>ZsM=4`+HhZpG#U%U-#Y@ zPJIw;SmIo{=2hL^<|N+vY5C>OD!%(c35||Eu60e#hCcWtRxb@!ASrSys^yC9#3nAa zjE+(i7W81RZHTB0#25Z91h-=BvEEsoTpIO~%|>ov{*Q8l-TyRWW;XP`SbYCPSG<>i zk%9sMiQ>I>t8{#1)^4q1J9qaScR%{-X=9{Y3)~-{ug0c=o)i8!JeL1Ct)9H}&G`NJ z=Q+#$;*q)zUj9sb3~}9;Fr|)KAJW; z>%~Q@Zp(HNK0#(6`D66Q)B+o|*-ST?Eb%|};>vq!>{CrEg*(3ZHwE?e?p?gHd?dID zbr+1G6Doq)3A>k!y^$+rbuQ>EUt7rS3XrmdV(-3XP-Q-QuDF&HsOYZ7Vl}(pX5Oa1 zs8Z^d?xCfL`=$Nv4!T!{X1W*v7M_2IKWSO!%Tq0h2#G3hMb{JGdhb1b?%dVYFg`8V zSU>pu`^4InA@CEXhTl8cjHn1QMboUOZ-$n7-$81jK7!Wg7w$QM7gM>#DLsncJ33UW zJN=VPIs-?1<_^XJZ-%a?a{Y5CXrd05(g75yf|;%<#mq~fN9UE$kF8_BhJ?>BJ~MIa zlKtc;=Q2o@IxTk1I$F~8T6(dBwKFC#3$zomdHoCh_&)eA7Z6?$`P0_RTb4w}Kv0Xz z?-ZMZ%aN>WwLh2DrA|v&AA3~@1Ypc3*;APbB@EH%?x(DHm#b6C5k0n%(kHIF7Y);_ zpnY`_OpvO){8c}YhXxfka2t}O7ZMayoLT_0h{KlqM_{XNOl}m@X9joV(ghF37OOLX zL2NGSriv_sz-fTe38%M`b^eSiNJ567h#_HMHIz2onTMvMdsy^t&wuW!d1dNHZ~2Rp z8<^<*7uR{UJDl%$YX<{%W!QN|Df+&T4^xQ|croTFJiJ*W(w4%pXTOck42mmG7Gb4! zeFlqcKA7kB`Gs+b-ZD1A$4^|{pHvKp=Q^Y<#1tdR%@^0TZW zl#4CtaBWDUW>B+FhU_nBc$&LjL7RdAQ}Pghr4|&7Szr9g;NY$^MI8c^(3lF4;mhrJ zrVY1=(mr!IAzpjuAEVV)n91)$ z0p~CgE2NXFzqyBG@1LL*`g5wkxd#hRw2vQ`2onA26uKl`yIFH3+^Y=89NjC;iPc)V zN!qa%B4x0mS*%Dq*wHmDB$t}-=TlDEz2c$E0)qXnR{TVa9!6ijo}f=^+8ssaaL#e$ zm7RWws8*zN-*3ImEGMy$b2&-Uvah>2cBoqfY9$oqZ!3N0S^Fx8vCHe)y@)i^+FK@&Xt(@@{iUOb zlbqDJVUC4vmREB6^EiIp`!Nt62a38In$>#@Nz~-R!%DhuWNpGN?ZOL&@;@eEj^szX zJ}k3FW26Q{6aM#H{=a;B-i{4(r1rcUX!uX^yF8wbLNRBS)3yLzKVyEozy@Ug0_nkY zDy)H5ivL#&fIy{qYP!kEpsH%>iaju;sycoWCh+U)@0lzYBb#&QEPcly?k~1qylcoB zjs&9aX1l3EdpFvDeh6c`m#GnJeY{1Rr!UcN|3%tu#rJ^tI!L(!IuzCvSq*_92uL;9gvpRfO3ATVpJ}`HGR6Rdk_S)|$ zy8m@d;_<{2Q88-Mr_G&PrNryTOO{r}XjMt1mSvwhS>@+J=QrCva)d5>;#Dws^2nO9 z^K^M#wc5PpZvzW>|JO$&|ACtDvyUED3|Qda`BH)~ zPP3)Efl7vVjn}-C)}#i8n{uURF>F?;^IPN94W&AMX?C=+`^hkFVp+-6@!|Cij)k3A z#n8(}&h#zSPI6s@r@MFcp60a4s?~-9ryXHyn#>KKWKpbe1+Ptp@*QOn7@uc}f27LG z>tKiRl1)h_JNvqts*B9(G;gi@;cV1nsm}}MTR~9>1PW#H^703dkB{w5fABM|jQ)i` z1zgwq>vg65f0C-7u&g$XEv5rYEb!b5I}e$tC)k4?hTJ_p)A*MDj#21ePMbTGJ&cQs z)x_0;E{DVwVG;{H<0NK#9RST;k!v}Oxwas7NjazQg8>-@$I4?nzXOdQPZRAuEsyQw z>!+z@JPM2t9Imv(bPJ* z(wW(VJ@Ufqn2m0-!PTj}#p@>*qtZ;X)%{E?x0I%1HdJ}jG2i#Q!bYl`c9?YW$*&Xw zTn&A?ZLR)jfHJkINfUE2@{p|$1L2t{Y`?|x%@`!%Sooa&;5l5HcD@`0*QHLg_`jTt=xduOJfxp6yDm6Q>h)k^1)CS)@-v zLJ;h=cf5OT#qhWn^n33v)4F-7kpKD5^ufV_U!0rGKV%eG49EIgL?K7#Y+(d%N-W!v zQDEFeRJZ>@?AutWzRYsrRuq33P@5{Km$z#-DeioF>Qo-R*6EAP529p;L1zy}i#y4m_=l8*Sdt(w~ zHc`r3Y*RsY-!+#hT#E#*k9`5OF*Wpy+r^&{8!Y0i$s>23~gw^*zQdrL@!@(sdk;Fry+yNW3q z9FHXtU45;sgOr$E=DBf5LLT^Y@oyuN>I!ZCLVs;R@FZDMc@W+k>|phB6;lNm_^-Q@ zla;6K^>?`*lj?lICbzvA2=OrK@VA3?WRN432rc)sV~sqaTg@+Dj-MGE zT#{zxv~Hok!ud4VGMQVH<>OI3VMQ)Vxf-;TepzA`j^!Vr@ctl_iH-}qGaT;VSSW)(#P;@?{hF+!Q|YU7B&PltSqrx-Ud(Hod{&H45~ z3KA<@#y(7L{3f*5H}M~0D-7NE|L|~B(S(;@SVT!lFv(H?j-now@~~qjJ#O84*gdDx z&~fB8(>9(hKp=S0pSU%Pjs)rtp6Le_B+bU4$< z&n6^_e@MVJlkcSRm;Z!7@)3mGZ{oKXw0ZGVOW9&9XX_=A zSpM!hdO92bv=G5P&b#i-fL+7gN@s)nccyTwo{JU*hh4i$yX064qFpp%Z9WkH z^QF4qdpdt+x?d(r3`m=2oA4@ymyn4%ua6^>nCC5A>|sT*EmRDqYTx1eo4MULIJQof zY}03|svxIQ>+vZlFg3n&nRT-}no--1^sRZu&ICw> zk%Q%&8|K)3w}0|;p-o00@3ze?!&-O3RT-EHWu?w!MpZdKU?F>6YReZ0q)*w4K zT#s$feL)JH#_UreEqX%vnyer2i}Sc+Y%rXvLu15_J+Nx`GZrJ}4|HLI7Z(dQ2lvm! zeH^OzUCWMK*3r+^9|z?4WtW0R8C&<9}SRLw}@ad@892Wn`29Lo!n345L5fKZkQy0znT zg6a5(V&LG!$l$}s1o}Me{L0&To}Qo>To8CDm2@+Lzv(aX6^L+`|^POj_xAWSl+$AHsxyb5=yT9k+SBhw+?5^r`dH& z+rMLE$U~G6>375@k?;I{&F#(%6CnV`K6uyzK^aiWey?U&e#vv`!!{gY8{k7`eD_tvY z=6oGs*~Z@*CpY$|G&gR$_r+$)$pR{Dr0}1Q>T}EA*?~*$pF6yEyr^cvxA>F|ZB|{G zXnYBj;;Jy>ohjt#=xCbSv#s?zklmz1fq{YHVC_#&r`pv5_L2mdB=y8ilV(10UMR*s zrvQsz-Qo4Y8{$y5>ShHx-UU1FW?epVOLAic?$&Xx#U7{7Yc86nczdVTU$k~DOBQ1l z&4X3a5j49ewzA}TaS3C{ zPWfU6?v}{rD@@qfuF~c-J+v2cA4h#-61zusJiY|p`DU?IHCZZ}6Dx8KhTzGDB*-x~ ziE?3P#FwPzjbCZi`0&orS}Yxm5L5ctSHX*l+4 z_VX<^@!*>H=SqTnJ+K6i);LpGFf$QoOvEn6ROPfgni@k! zzOYl%X!tb(mtxA(3rNN$OEPL`i9z#yEtxWv?MF*?UdizvSV4-LL`O}~Hi(maC7NPF zucZccmU@rRu8v>7U&{ zO0a8=WfkOf=Z4&U7n2nt#&SCjgS0T3D$zK7!9oQHye(o>a}|66kDEI}?H$VyW7Faz zowI)Wu;|+AL?sTcx_RlLz@hzs%=jaB&-Yq1+2dSkue#|Jts5n)7O1Eik$I3=e_&^T zE%(kp4hY$b8R4iG86|tv2{g!CP6~ZD7!W{aPMTt{q&`qk;NO?3Z?KffRZUpBww@^* z2&oDaqo){O{QrIOX8AmHFxOJfM8&73P2B4XsleE2#~{6bXN@;woRhblt4 ziK}L1Xynf;u~apSy;!5XCtt0!^F5 z=k?Xd%%&)2lA5?>*h@Nz>RG7&fb~_3_6?J}hI=U!>h=KIZxrr!G;a}eP z6ZSpuPteup%g37|q}WTWe`|bC|2Do}5jh)3auSnrp^QAT)ANl#c;X!e898uxutW?H z63DyyTC~!LASoyt+Y(JJl%v5Z*wwUl$2FYkheaRlQod33Nrxz5i!c)_f)N+w3{ZPf z)ZCL1e`w%KJ#obwln|gzf_=iS# zqSB!Yj|yblxkyrdYE*lcR|)-g-*i1!_?zkpfkxEs&09?Yxz%z)31hW9>Ctr!#i8`U3ZTy+44*c|H1U}TMkGf7#=eAvcZ;37 zgL5aw>AczEl#*o2IcNC{e#t`2r0P9+_N$p7I-D7QZYso?B?wSgNVKPv+w#ZqA*lr= zQ~;)7v68Uz{wOmQh!3D6pc*4ELoDz!M;og z+Szx-vPbU6B)dp~X5Ot5IwU6@obdE7Pvjxv@%K`(<}pVEfyM+N6XTsHYG8OOlnBH( zh)|eL`^w|^$6kfs%%Cl}U?e1EGa^RS8f7Z}N5pqcy}?T*cYt z&LLk8wL{&_pxtVjdG zMdT)jB}z!`_q&?DX^&ZcPnEC}9RO!z^XRqmLg0NaB zy6p+Vn_8gewS)zq%30Fw{5R$kEL;Tb{x@7sOW?_70*_8M(>w{ywjWFFqQEOGQ zYih@Y@MQZ0p8@sZ0#pcPuoDHZebwy@btI z6jUHy$)H3(+SS(hHiUX8w~=odjG~eG zGepTi7oc=sfk_$>hed2oo1$Rpyb|}hcVb_JyU%O#$b6{1hp5kT+~}w0fLbC78fuJAjV$7|KjwB6_q%Tc+n-kk%oqENH3rZ?tcO+S_cD5pVyNXo?(J{ID4g-zq@i zPw_FGcVH}>`(by(-^|}ls@mCp8uA$*OAta{K`O~ejecm1SKymi{2?p&9o){)x{sa> zR`0z0s8JN>4F?8J^35-}NPP`CH?yc8TR9TX1Q{jg?c8mCm^u2nMDMt;7U1eMKVfj9 z3}W4?w2GPVScK?Cs71UwY%x1Fn)q@#4%~j%{hK=?5zdpfx zR6p08b+Lo_(_Yors{brJrFNH$dm z6g3JqInZWIM5_a{wmHzp4}= z(SxGNd?01DCKyYkK>0Q6b4rLLr|qbSa#4)d8lu!zqWuobhna~CV~mqFA1^YFiLO?> zi|{jV2O54Ih4xfwWsO(xnc0XJ=*uxff#fz_cD?AQpTT?-{jywW2E_y@Jei=9MP5GE zHj9r%o|cBqG^66pCuOn{2y6^L|M+hN&4zW>vr&3=$Y+2?aVP4Lqdx!F=cfmDBGBC! zvQ`#QLfdA<`U@ZJk)S`5o8;4iyy-*P%6E^xT`cL}x1B*_s__YqJS0n_qiF#L%4YyS zjkP|mZuB6gWtbV=*GMV$F??XHWf=&kpO#2V@>{9Sa564gf+xz?FDN0%-yM^xkN0Vj zWsSH)Wv08)MH2;KG$}=ezmIFbPC(<~?jaKPa-hC)4HUK32-2oY6!VEk!WGg!+J5d070x0K!p1CqTU(S|-innZ+wmrI3^eL6a35cXD>Yk}X)YjzOW;h;xae9LAaL z*pd63rvY0w$-k8&x^! z7jrNEN++Q`f~wli)q8gi%lOf4#vid#mE4jCW+Oz&xkZNgqwpktGB0O zb!ss9r%!1T8Flx4ps)Q<3P{^Kp~upix6V>l_Ml1wdJPPd-)(EjF^wvW@Yo^b23$Z? z>~1mC!g^bzsk&8yk$dzOCr^%enpgh1L=F(po9lrh%W(f=TR++|-zKlHmKp%}pC9&Q zSdb=E)N*KdMW99|Za1-h>Q~|+i#xvGpRdOzry!?}8J(oB2krEe;d0?>kmc3F3-+jO zDk-?hheKHL4DY9+IXF1R6;}!j1#%Rf8H2(22mpHNkSEEPf}cqBI1_{$L_(7H0q#&s zzF;6U(;Vd)r~5s-LQNQup9%woAFNx3V*b{`r4+J4iOZ28PPQ`pamTsPlgNIxDPhD} zBocjW)=Z^p?z_(LxfURvq-(4!+VQotO~n2BTnlm!P&!b|F>%WgTt;+y0m+D@^Xvba z!uL8+qZS(OD35=7L=Uf#Xj_x^gVQmxLHZZci$9WyU%x-fr(k8ip-brKWCp*!CT(F2ob z^9I7cCU%4^{x=o^cOjb&IeBis0KA~j$RfDGD=RBDX`r_-dV#A|qlGLMD0vu^$Rt5WnsfLuV7E1z&FB|nYy z4Ow&$T?vD`2%w3h9ZMDqnGP?gEII#e<~T4Tib4U%iHL)8w!y3a`l?7q#WZZnK~*An zDC8{}N-E;yTaT15()x*ToE(DY;THHLXb(y6y5?Jgtq>NKnb{GPBdht9Q8UUIw{431 z;$5HlUe!T%TL=((?=4n!Bt$R@l#Djh$}v|_ERjK^R^mrPYGyPFcBmMSKuo|jC*&*f z5sTlj-VQf>6gtJ(2p;>r!S8R2O1dalaoP-!8$dc47VF9N_?5Hl_hy}!r6ejJr&r?P zmyUNm(f52PlrZG;z3Tm{jsSbT8a@_?c~B;n94D#MEtX~C3%gl_ndusOK*fVGV=d;` zF-wgj!Vl1;VN_!P{Bo7qkfTI8DNE|qS56H$yY?D{d|q<0BRw0D-$@^&MVa(>o(TdMEip!Vft?WGbO2_AM! zX!EBR_CMuua-!n8Z%#rFk?DvY-&GF%iWTJLnw~E2>cR6?h%n+KA~nP>)S(K%a>q3d zYR$Hw(9xsQb25vG<(18qqwdNtFFSwNuefh&l^#k0rxa14#A#2$s5eoKg`T|fp^^&9 z*ejO`Qo;zWC?nIv-i?P<@dK;oXxIup}z121wDS1$o^BA zyoe(=a?Q7BC=m6*U(|WLVL(xj)oy0IP*?GkzR-y`o^U|-? zsE2>FL(~}p)m~`mH-alP?T zR9CdI{mAW1^+0ELg4?wI8W>A=!CRy}IU)303VTu`QZgO_xsr?^pXG=b=l+IKP9D+` zRa0U+C;)VG!LJ!sufY; zGX!4^)eb2{&4k58vKOT~xf&OZF>M3-;g_f`R{Z6`a@m`(cW6 z5b}?qRRtDWJ#gm&*@Iuv;pjT9w@K(7+&Fy(%7A6lP@`zDd>G4EGzj~#v^cfU@+*v| zTUm@KgXsXF>U?j9o-)A-^U7v)%2at0$9aU^L?W@{#00x9t#u}4`pp+^H4a`hEZejfS4Y%~Ov)S7@ zV`tMm^p9BPlKl1r7#?}WD`gpjx4q4mg#xP{B}HHRTM2}5Ee~M&mIW+S?^63;5YmQ; zc3d$}iE$$k*NGxIy1yHR_%-VaM|h;ymNH&Q@WnX4TKg^$@hy$iiNyqs>;=1r?R4N6 zcp}LGq6Q~O{$xYdNb72^?tYb_9rbi65$6u2d&##A%SzWTcc!;y4GG43MNDZtLP+DOi#II}V=;&OaO&Z&kQtP!8_5qE1UVch*Izw*^eRz~m8JkJxjtRcZU( zuP0Auw>8xy?9CLlz>o3_3_2r;8OTxt?PFr~2mTwxK~1-}fCkHSMLRkH!0=@EJ4RMI z2n8}DQx6U!86pdcDU&FAa-nOLt(LuCd~XT5zSdbmJmDQ%`52eaE`-WN<3T|{f%d1UE7vb{b&8@w4Zcn5_JGK2DG`ca^!*_{ zF?Y7rM)zGT2)v)2*C%lv?P3BFZqY&#Ty9ODsamYNq+Sp6oB#f-3;vbp z!tl;~w8#GDKAH9{)-lJhnp)N~!|QZacJs?*e0==L`T0D-&>I_ca9WSqWgAkGpz?B4qIH%C!fpC=l-jT)wijRvD#RXn(ZJx_JzFDImqr&p( zB9=j7Dhc7~YC#OJR?e@$)z_EE&d@etNL0TC;93#aFOPG-g~RzmAvM|>z~5BKNgbSW zy`O%GMN4t);%n|1t@kC-a~)S-u+K<==k4Y$HW4v90gi#GK#Z~;8&YlRCe9nmY&-CJ zb_+l)4uJ3S)kJ6MmT~$zf4zI2T3$q8E8i55hH}S?Kn88i@-|LwBqvXcilI(j7-g)l zl!RkqLN=`M5&O^_3g0g!iXtH}25Fkos`vrX120{>Tb~!R$O70D%_@=4a`gj93PA=b z%@1(4)wPMH>3cBT&&$ry`zqqZ|2*fWm!@Ags#VWdrM0XK#YlkY_CS(;Fd{wl01R^U zBeSGFZifm!ywEhF_%p|rnW4&pM?P3w_+d%YlboLZ>UHbBwQ_B5s;PXLY<5yvd2#EaS2AXX1@STY^|HU@ z2vJI}5m+w%&%j0t$?E|b-K`@1YNlZ1kiHKB5=|MYJN?g-XLswV67x~6UaCb3NAQi^ zzE~0;C0GX1Pa=LJF7mA+7LuXJCOW0JMP~#H#Hps_2E-8o;i$76LNR`|F3eyNDMb$_-R$P?RYdVX&|C7#mD{gZNef zCj|N+i13Bf+FudA0tuUG#|yr(Zx6}R#*cZm{yd|wvO7f#*3Oz>1aodrmE*yTkyj-6 zpS#$cu_jEFnk4anrFEC-@iJnI4lrf^1hPg0=UQF zx+ca?cj*?X-u^oJtXszhRy6t$j3yJ@6Hcv12acKVU?Y;pj_ioE8MEt+Z=@t!P}Kd=$n=5_6vovNIZmWV&qWm-Oo}%tdAdqxig!b zhj{0qR{D!VA{Qx7kwlC$(=}nwT@u#s^0&ImzY?W8E>aJh-O~30UzbTNLToeE9layG zx8Tr=<~$C6cth?>1adIbQ=|drI281!!E-w&$L4kptC1XxIJJ!@b|F_Nl*k;G=^+YG zKZG^v@5Ivop=Oe5uI~f-WWy7u6dc_MevXHZ06#X!74U8d|DtuxWvh4Y9)|*p z_SX3a?vGMizZRywY%(B%0@EMGHIMaqC}YeM!KLCp6|BX6$4 zH_`l<8l2H^N5_x8IZIC7l6|*;dueD?z?AfnL*2z4sBb9Cr#>^QnBpz@|#fASY83WM5OEX@vY;rU-(f3z8mx&^Nz0IDm#16K!c)Dw&T2=z6;h z+rqR#s1t{z@A+6=4S@_gk|J04(I}Ic^nkPm@j8TzqJHoN6gf4coRMZkP4y*!6LscW zu5T}W?+CmOxZ-E=&+wp-e+dj>-ywB21jL5t_aBvuPcQwI$(E7E@SM})B*G}hI-pGZm*t|Q#F03_bVCo z+smDwI80P18s1D)bgTr48tPhUdBA$}$Bf<89TE|pe&+Y)qJy&7r*1{tc`{+L$ul^~ z=;p!bU_jw$*&&su68rqCEr)(ix?L7G1-3*jR|PI`a!uBsuo53ily+P%wej^34WiF# z6)hhvDx!+f^rMxK*_5p{yB5;#qtKF!`diQauihxU8pm0PWOlYdq_)vEXAplE#-t$l zu3~^&kR4f2KU~&U3Y)`TNigjLnii5Ep*xkV+5k{qz%<-rAj*M*5bVdFE>(n21n-v^ z1^~$t2}_D*jZj6gR)irVNRY)Qo{u_r%8(Z3619mmWg`;LzW$WcZPD##yQUj{m%J5A zXYTSAjr8#j?gE+^%hL6us{@;xOcF6#>$I_}NwAm7Ggi5U@)S1*|AhC1slyH0?epjjqhWy)DOzu$qAMwS)3I*Hg-4dE4IA`%{p|PX`u6-QVusbH!8zvu z{0xVPMEw#sbsTg0yk&w1&bodH-{9e^bBN7Falhl%lS{61tHgGOkI~D8=P>dcf@t~3 zJh1sUR38O%Slozsaph5#D!q6E4{<6L+!!e zCSIOh{gw7SeYq}jDleMiXA$s3^*sX>MtCxB)fS9~$=9$*hN3+;|%OEc?E)QIArUs@wIx88U*c%R`?w>YdR%>jyhN$ z!*ywO(Y4ePHZ~Oh;k8y&ACBJCcpSBOwl{lRxS%4!<-Cj3zjgk^V zwt8ps?W14r4K(#*Wk%jA8T)wp*{bpJE<`2Fdd%SpV5`JPMqD#Qhu#LMCd#!)2f&hg zEd&xKh-HD}FT-IV5U(Gc&KgAxBr9&YFIqoaPo$$QBJoK5KvU6;vC3(o(ZPbvPs`Z$ zqd;C>CuCv!d>jLN&7Z55h)ZjE$Z_7LhzTg8Lv2l&KOQD)jCVNdm&j&j*} zuFy9R5#jL)UkuqG38G4IAJ{j)K*)EmN4 zbID|W22Ra-i)eN2k3;qvT%KGnBb_-kN?(P!rUYOxpRX3p|=n0OWH+l7a#Be z>vc#n2?*w@5yyO?ueqMb*FTeOZld*&ja{d73+hWn4FK?W*eGG@2NDYAaO9NalK3D% zaM)%C(nrH^nmmM%uIN=1_En^eC@-QIIqQv5p9$VP_%0IdH)af`Z0A3R#Zp-b-3XnC z9MCy?wGJf|%vLj?J?S;_aBm}t!!@(9hot6V!!kox!{MJ~QS4w}XOyhA791!)mzpw;dQM1Z7%}^ z0|Pc9>ad^GH&`s8+>$aa3-M)p0CO+d^An>m!|g8G}M$X*F7C%54s$Q4YaZl;e|>t+@!{`YeUJJaVP ze@%`lIuh^(nTHyYI=V+K7QAkVB%bl-oy_=zZb z47v(@Fs{#fdQ(TXA$Yie;)+WC4n^ha0GsQv;`rRzJlFs6tc3DASX#d{(osg#@}L`6&;MKa8%=@qRAWW|%ERx`McM-;GlG7Fntw0u^b4FCeEy5}Dp7Zs7z_K+f*k1aE>=Y4!7aj8-Io6)mdz zJY}x|5YX-Zb?3aNF7b5mubh9UK}*I?A$>(W8Dplv*WX= z!xs-}I9@(5hoF!m6J&WP$w4@@NhtiKS6=?_U#rcK##OW3`oc*v6`)5OAC)66uDK7V zx|>xF-8T+1oF2<7^%u9Tn7}VEI0V-i@LX7WfRAcg?OzVx8BvbKqr*EQ!&SwE zHlA3_l3oV+yyFV-AEax)WDi$vWG|hN7h22#vPvvlSegocv8v?{^a*yAgH|;Pp%*g; z5*enI+aRtLZMGXKGtzM95NXFeQoe^dR}EP#%F@Ln$~oyu2ez0&SrPh?ob4_$NZrB z>wJW&`kbuHQ11|sZtA7D7nU6FKKoS0*$6*~QgpMq8vmJ1jH*00`GXvg4aZIAbqVthc zGaVL23Fbfp1yabPWl8LOv%aDR(S?*KPZRxpdK3__Lq(Rh>xuCBnc>41C|L$Sk+%V> zH3&7%Emi{V1EoS>ok8BB*}MkNO1CSEAHU@kDeK?XK09_J#Rj0DxaF7>*mIilDpQmKFCbPb_5#UOKhdVCe6 zhJi^iPa_{uHrpuWrX&#)Xbert_HY^$SKxF|VMUz)I6FYEssWKpT~CR+Qeum=2^K4H$LV*{+WPb`Tw+l2KCCbLB+` z$&2y6JGfpf)tV3WpM^rdv6ds$@i2!Tz#H-g8(fwF zr*4B%nrRPGXRDz%Sa@(>sREz0+4ID|x57<}=pR&`0fWQxgKuEdXvN&f5ha+>o35~- zA{@$#C@`B~w%_Jlpng~`F zd|BY$hSqMpL6=h9r{3b}QPBI=&@wDc`ud6Vaa2+0VH9@q*V3-S(Nflw zo4ZDRo8E@gO|Sb8#a=O?9wAO#5BmK6`A3#FvyC5C6&(%CH$W@q)On}!P2st?k`UkU zFb>*J?uwM_&R9mBWuA=3Mc608>tt|;&C5fBP<2+8t@q25&h_|`5W8T=O;E4I!Blpc zAj_N`n42fOFioY4HJwE^g9%YbY(Jfm+U@a7n1I7enL1r5ZWR}s*T9yif&JlB=LM6z zKXSL`?j@Q<+^oTVx!T_6D8&{HW3qB3$Yn+xr{KF*pDPRr-oXipSOQ!SnuAe2UF+C( zfy&tDpe-`pu^X5bZUJwW4xcsc2_)lqmc^f>dm00(TbHiqOm7rXlf#9 zRcc=pxgA6&PD=DEz%N%m7gcfcKN^xaGsuM0k5m2}{g*<;{IxTRpZ1SLDXkKaf1*$B zWBb&H{+(nJa#O`boAOoYT90=zabeLpFn>Rn-;DJ;n^LEX(ep`ilR!wGj~PVEL95jw zusOZeUgkY615$#fa0IYvG@-X$g0!cVsBERKs0Ix&xeU_|x(}nzQU+B%@$*ZB6#Pwf z@3@L8;Xx7W*z8neB*1q{D^gZ={PX_ym10ug7u;SON-+D`TFx^2Suko zwrheIBzN~V84#yenxfHNmvx{sGajxMn2g zbn6s5&9>P6(xa4{#6_tEMG93CJp(n!Y>3e@;D#t^A!jBWgPLC|Y#i#An|_V%AK?{< z8ryd+5{Idx9+ChJ6*u`*Gc*v`{t5d2>G@*}zi_x}K7`--2b7L34+PfDDd;}5xk1w= zK?v2Jt~XP%R%&XX>v23=1k<#cZx?9_imAjF5r^Wx9rC)(;-TrjNDZn>{MyT8sCJWq z4_c7XsI;n09O%DH_+TMGLQXL;zkR`E3If$nx9P#No9JN6^|Dn-3ioF#7w(g6I~OQe znatPf`rM=jLOF)+e?ajx54qTp8x**BuT92cYY)l-^Bi5r_=1-})m?A2{5vV-j90R( zvkWQ(4O+axN}`3`BwRP*ddw-OuFB%%>GDn;I8j92N?k(LqAXpStr@0-8w`UIf?!dB zACmyvY$bSh^b2}XdNG4hM&ZTUrOGl-URm+uxyl5dqBhb=Rr2$08_^@d44xY!`HRDI zr3fuPD?t4v*GqL-OF~Ogkm~C*ba^_YSnSyOzlD%OFm{V^3{ik8~P<^_R9576a{fG4|T_yq8a(z39fK7DGuQa5?LNl?ctMB{d?0 z2vEo-w+nD5^LqJ2>-u4FB%l;jDYutp-$O$1*%K*PSm-g$0_J|J;^ixGwEvLTUOkI} z`xKPpgd3RKS{2?HlQ2goImB2ACcrYJR)WFwwtxB@_Kj6ZijmSKN~aLKZbAXd8CF_k z=o8ylXkbu$&+!+TJE&%ZY(<~c6(P^kF-MP5Jhp~`4R3AC&ERqTxg#Om2UZ4*u+ze( zo1JU9)wT>@x{$tutnqRjNAS1-%=pxE+H;qvbFyAzG20oLpuOb5hccjdwvsQhXu{UdJ;mvHdRVr3%yt6hN zy0f|&TCo@c$YGFSc*$+fJN1)6M%US(&E9V{7#m1B%s*{(Pr%1PX5Zm1aF?8q zBgp~sWd!(X^qKB#f?a;n^&Jfs?)Ib}f<&>0$7S!pAOex|q4+d9vC<`Mupajzs2hEe zBJj8B%%BhSPi8bxYOm8(4*o#SzENWAYRt1ppp-4}LHVUk2N2HQ8WjL3WWGczjAA8g z1wT_RKh}NY9qXJ(&UBTZZ$g>iKwwL+cZsmgC#kQ;-6Dw;0q$V!xUSf7#E&srYr);L z`?s*E>qJTFw%DRQ)V`G(Y@bI?54`bKPiQm zslcpADh%^UrlA)}@j2g+PPNG&&?U(AM?ZZswjWjnW~juBih1BR!e$4AYlSV;Mc1z4}tzWgEX9{ohag4_`KZ16I$H=o)s7o<76BR#MFs3?3{XuXVP)6Jb zFF&KmFb^grUYB-X4w>$s|MqFG+;a&nm247znR~fj5il9Qo~`%YTWSG(sn0 zBn5@|uoQ)Al;t((8gXn69DRi7Z9q$O{6&ptl%0I=Z^K83mRcKeSr-E3LFOY)PrJ zvgUl(efa?m3U2w8QtpdhYD~n*(@g z5@~57-jyp(N4KBuVIPAVE#KE?P2Av!mRYKir3H^^D}HRfjljZnsbJzzuiyEJaFzf2`^o#e=R!1rlJO&O1Cy9k z*1p@(`WWi9xfHQECm-gUg`m~VuB^N7kRknS)i0@6WNaukxfY~+LhtKIpHIVZvNFl} zCx;c43a?kYECw%YD$&{Dj))IEMs*n-r)fIjPb}YsA_Og9aLpo{uLX&Ur4H> zew+k$S{=f*K~EWO57+!|H^=-W&Y{TiSpKaJW#eAzT;AWzqdmE>E^_&DW3Bziv|LK)1+ zKl;q!$68NP{gL&ZlGe9DRnDsi^!(I4mq6>*@uD7N{?SeG4@JoiyZ*HrNX4#Vv~o@J zfqatG**4+A?YpJJ!}o)$iPz%tl6}4wc*wty*3?`>jE?9njuPsVy~|FTsJANv&VL&> zRVEJo8j;|vzYof7p%c%v`VuX=m{CyD;ZW9GmAJ1KgC}RAXDnzgKYUSO!N(UO!I_q| zKv&I>d|TjktcLYI_!o5bpU!?Aoq9nMi1n!a=;*jf9t#+1^TuC@n zfUrNh_h$ukK)_ts8xBlC>je4B(q2yef3P7|C117puHAi0@6=4PidgFV`v@8 zNa=-m9bifTd3mPv>|b$*wBzmF;^wf?dH>Yzd-@D_J)W{0Aj4712T2Pz;zxwae-Mll z4ulnuu|A8*3<}jIgHrsrQVz!-1^FdmbCrbd=AZ8pb3_=mK}Y-zhffA~gTrrU`;=Jr zHWF`iQJkNcGby(NX`?w%U-MZKB@QkL=aPk3|g+C`Kz(4LHWQBe8idCrsN(Lo#(?JopIWYgQcdG~Cf%I^# zjy9l|IYR;hQO$6F-hD`%fgNOZad24052Q}~rSOwuLiH1nVq$GFcM0MSta(trPXlgD zya9Q*xw###Zlg?Sn*Meit$~M86{Xl;pd@?>J4ciLL)V+Z&tt2L{fz|W&?4t~QRoM9 z@S!xfj7{vdsGbp{3u$TA?G#poz~Oer(6aKkL0ZlLc+LS=ft>j>V@T891xx%CRD5G| za*e`2FWl*siAmwDs{C<*Rts0tBw$p0RM7=0VIMbVsbS)H1ukWZM3B!VjTW}y#v1~G zyV_%bfTQC>NKebhk_XMsgX{zL_zs#Tr^KR7XraPhcJR$o`xxyKFxXpomC$AfbE&%sX;zhDF9my*rJaL6J#M!(jE%QE~Jb}~hvKPzlj z1d(`x&wM`1d@yxPn!k3~dieN`FDI?&PXskY!GvyFcsHu3!lFJ%J|j&O-f}EiU)#Oy z8ky&8v%x9RaFbT9`ZY__ zccxVPKn`h)2H9!M&SJUOxT-G-HIbOMi!Bf<2GME%?f$Vx5QMt)3NkoOjGM*&$a)S# z?d)mu>)sP4!MVx}HFT5fpqugi($wBQx4~X^x8f1IQtB}%d?8sNQTgEu!5a18ItM}Q z;g1R*(2noz8r*fA!TsQNR0z{~$ddQzFW<*Kns2yG5XHsi&2`GkgMTzADwM~~_9bQI zf^S5%#rL873JB``a5$wr#>=Nu*MKcaOyu!Z{;;O3`)313#u(zr*`xoH{|iqwNdZJm zn_uC3Wz9_zr>DB^c{|Ol$#s+Cd;jaH2hGoLAWYgJDEkA z#jaRxyrN8V+buF$d>}`t(x<96PWXBL_+`DF$#I$S0EEMiRz*S{`f^$JVhr{mG4*X} z@ipXWxVDMEUgIyQe^&*+@RgKdKT=ZQQWDO>*{A+Ty;{_kysy+JKK?#XT@WeFBC5IW zlD(RH4(`(uxKvM^p1;MOknUit(T;)Q+P%eYvG*ze5BHB*Ojim>bWeJ108`PT{3Eh> z@0BdR)cF1e0xtC4{kT$ephbkN>NTT1wEMw94iWRbDe6HiPNr=f;b5{)Gj2X!Ha9{) zJZfqko9v_b-x1z0OnjbUAY01EW`9GaE~;H@O)K0ECnUfC_&i9R6hoWIn2=i}1H8FR zNG^)DABO(_W__pbyqcpMFdh+~hBTlqX&)?>5 zO9)Dcw;bw+(jU(Ym}98HFOK+VD($>h~4qfdaSL07vK-z_`1g-Uy=IfpMtmqNcgZ0E-PCWf;uo@ zj-$}&+LEamE%^SH{qi#2u}D)CRg9nN2URr|m8V_e){Hum={s%g#vGlHQU0WmVr-{l z9P@?zDKJI5b4!CA ziG;^?xP!=gcSQBHBSxj!vJr!jL)rt=;w!nQ=sdGnQn{8@@vIVTYNte#`6Sw z&OkURQMS=sA&ESlhLd^3i5eKwTxk3dGGv9Hm?IBWzUQYLBQ$6UeKd7SN@zght9UK= zo#56DC?)T8R~cRYv$3eW+8;d>{Mp%>3Ao|k>n*XN^Opr`?hP=03~r`HlTl5=Y2%v0 z7{7o2zACK7Bty-)G{Sd><{PPKuq+uc%lz8GGqP0fwxs7Hp%K(RhW8k1+|lJ41p|6d zt+r69j& zPZ{MqtsBdW9v92mc3l@;kjUU^w^!p4lHADk0F7mgYxI%$;pU)^p z;@fEQElEID@s*`5`B(t2}SKWO;zwW)DK|8+Vl@76(Eh(-x4vm&f#3%VImp4CE z7GSnpu*)f}m?y~=df96hXQ8x3pi=@KQg(+c6WaZJbv21z` zdm4n+fz+`X7!^VgpuSpQiP&CTM(lkEaQMXkbqZOHc%tH66^)`tA5I}41{UCGv&yc*ji;HeOdWVzBX$i87vwS} zzn91yTd;?;fOiFxl0CS**QiJCYa?5kx`dVo(9CJYq$s`t(0ICttC7{h-|Yof%4kl& zoh1m{T#&k5pmL1^fa8rjo?jUyxv8>4*wN4F?(njcBf`{M9Zn{Sn)bKPLwD|1KmRMz zL8On6Zh;KM652s`1lfCkUq+d>TXv5ce8|qSKge9ATZwl*^Sy4`yZ_k1cRNsPVj0Wr zy96eplDm0HebABxU$=*!lZ?A&3cxp`o~%lV;xrh?gI58fYC5gQ*A>PIi9>1wUMR&| z(u=o~GEAxz#)psr`u=o!S()if1~5el%_`WqTO1mr?xYN=e_~9LISopKFJeUW5Du_d zdmu-Yk<3g{m4_H@BTNK(u0$W}%Fbc(DR%{TPf(<1N#g|CizR0B?vSl1s0Ss`*%8!| zQfPsqyoh{&**8gi)~G9;Km2>yhRFKT-{D5H+oXz7=GjPenj!E+a6uJUx<|BQn5c&* zRHI{48pIxWO9&t-`8py{lAa*ZPn$1qqndz2NHapGC!Ibc`jjZ_jqfcx_ulhY;*%Wn zo<6Q#m@s>_dvw|H?pU~52v{xNdA=I6J{`TTwGn{G{vkA@i+X?ywDi<^|lFdQ_Gj^kt|UO3G80dK~V-%dNBs1xl4IbfVZ(Jf-Ko#zNYv5!5Rq%5Pj!u2?h%sP??3D8D`G*l%YA=i| zoZ zB*KhKZdaUtQ1{fl);%>VC%9hCb~vf!46f#$fD05wIVJd%>@|jMnep(X0BQw2i6sb z_fF4^E&Lvh0uWaeR+g2&6n<&YK7JA|ig`aCG_Vl}zyeyM!;qRIE|6XeiYJW7{=vYH zOr5ke8_BEn%x>ZA-8rH>@MWn=uB7m@4ll$*E<#)HzC85%$i^bnvqNpTtq{E^_GL<_~BjAn|jO0u_} z(Wq&Cr5AS%V#nZd$7t*oEoMxZj+=u+55#2{B{*$t-sVGKN?xip$;zjJ$03==wqqEh zrmJHG$d6paf#CqE1g)p3@cR1Jm6d|#XB&Nu&yurw#j;-akVDpMDATiuUieq({nu9e z|Cv=~MA#GB;mA*A0haj$6Z+6&;areIXbH}!0|A@3kU2~r(zTz$W2sL0Uin$R)&g9d zjYPl6_m<>29e)yMEiHtPw2y8rN?U-nfx{fkRrt>?Rre?1IUx6%Zp;(i^!LJ0d1&bi zNRUaGk_<)VHci?KrL?8dh~6&e@HZTgpjuBJDcHD*-i0pqliZZqn7@>`hVddDKlIy0 z!IrZrvDkGkR{exgTsJy(fZDoNNPHVSoc4AI)3*W(dgE`Dh%$^Y_w>*+8D@i;pTLYc zAX5@{%sY0r=Y`H@tWbz!qz^cHAS$xY_72^c_q|Zj7fQf4FniyjgeLxgRg}-(FkatB^lva@Hl7 zJ8=0ahn9=__XE;@GB^Q+ln}T@C)$S4LmRED8(8dUw30^W(I~OJw-0Y5+hA66p0>q3 z=I4Hf?BYt}dNPTvhcS_eBhaCw?CsN^7I(~Rp84el1Ceh%{_D=Dhd-|gF>wBv{2=HQ zP{!S}L^nl70XU((!-|ivvFa=}X#1<7P(K&Q-t4+PBROUxWx&4D7kHkhI&)svIQ zs4+W=^EQ>`s;Cd(*(_MSQcH_MKS`(*F%o{q>l_s$Y7D-u;T`vl`-6DEw9OqNzbW0b zNqJzzRF0{$U@E;1N=O_GAsd$X*vpzTURo@Ro1tEUF-j|DB%BgKUmAu)blbM66x9$h zI8gUKzL5{+r*fyUZye6>CtEEgJ3>BJe%~dRNkdd)$~Y9i)}y?=gF2RW$CQKhaycEA z$jzy)161gnd||=^M-3fcK|LPGUY$@_9bK`QXiLZW+mzVr7Xv0$0YVo! zHug)h^^*q5*9O}3$P2M$e%o?dQ$^_`P8DmUCnXR^EGD~GreA+TOp9Enlyo<{=<7DW zXiAQc&^_&HtQ0WIgT5*A!Tdz*ZVV7_MPye|l~NZoUH>-f(tGt&Gp-wr1Dy)+O_#d6 zo$h+`86M-nmqyP8ozM}?O~#cANUkNaQk0FO2VWmvUWDD}6mfVlt& zu`##C)TYX&e?0;@yh>kcum1vO;FM^jL8YGSL!HxDS~@$bDY5zaulV=0IZS2!H5OvU zhPc2Cv?wHrt>GF3$x6RSY*R9LNrqvG=_V#~q|$eH@KcIVO@%0Jfw!3F%8+0kh;a5D z_r<}lh(az!6D=?yO4M^%Mgb67siFH_iu5FHFT9S1D6$bPiK1Un+PpyWm>q+I@(&3= zPkua>)|76BrNB*mPFW0B@16q7?hm<+7Sk3Ju-kK!t^d+~q2S4Z8*?86ls(Q{Ge|OD zE>wi@YkwE;SGfak1}eqW=!mB*~B@8exxMYYV5`8Go0T3zXgSyi(@ z4M|7_D$cI(K{jo_@!&EhqE|e2c;B3)a|<>V!6rs`2tgKGIdq-^w5Vl>yoecF1EMuC zi<}(UL2$QkIHn<5kDft^ETBvdONB4~{c8k7haG zWLwZ1G%^y0xd?qFL-f7J*p%e@L?g`RZ`bvGn4=J}3n*)wKLB!gbHtQ~g=X4? zc8p`VuQ7srGAk5vBi=QlseM;Y{T2QzZhk8dYu#up0;@<>-dHy~4L*ps+}F?$ASBsZ2rDm;B?Fs6nmbVTYOxQ*LS6e9^yB6xVfA4=&zam4Q{h91>1QN#Ic#fD0)gazshL9e1A&cpznjDOqli-KtS%fqO z74`ad?*r3M_Z*EFCmP!;zvmD#R2uvX;_GX?N=xT>3LF2vO?>r;0um>gymrc#&+eq% z`1A4I^tnKz$JZ{B@#$99d}diou(qd0vmyAl9(5tHvO4P7f`z^dGvzC#Gq#TDRwfW3)6H<%yJ8he4?3FVNIOV#Jg$U>C0*sitfl1hW%t%;|5$BgL6Uhf|gfE za7z2V7^R8H30D>cC_6B{nG2C-0nox&uIlxgoAr!{&^THZD>XqudW5K9e)ElCcmPcN zSIC26k01?|Cek-AEB;Vqg6ft`<#QbD!;?u4wm4+F#)<)rTx85iQ zL{RiR6PDuzYKrj-w;04-&2_ch*-f-U+na$SqcZ7^YWz!P1ufcn4rVWS7ENk2QfstM z4spFX8i=c`JeOO>Nofs=-}5hvGPqTxojB_KcCUBI+h5Ek{z3DamR@i71$-O+)8oWa zR#q0z1}k64YZ0yR{o%!=Z5B@D6tc=Hpx-_}Ks~MSR`iy5lm$X;YwGJO{B>rW1a~i| z3J;~)(B)!=ta!R+|8ktS0-AZZNFOap)aXdqHk_ta@JdYPGRhCB{K^!7&iPyJ}6Nl`L@Y&&zh1*$H_*M zeT{%YxAgabu0%T~$$n5yCn~67jRL#~KUJxKXLk3FwU89Zd%J2ga1#2J2?D z+6`6R6=6uDf>GKAy_am`*34=v`b>237m#6%{cxyn1M7h7d#d$CH&#buQ^nTg`5Fw4 zL2HfvP1-}9cfG5{?#CskHn*{o^0QBbdhA6@4?bgSwqUA{e_pwS?$?@eO~zmd=4ChT zdC=p(-4IAox_V&n(o9TB+VMepN~_%vO8B-5?J#5IaXU@V?@Gh}Py55tNw(E=(0_`D z1&{((UY=HX^dYyG5&HQy?q%NZY0m$to9@xQ-v5Hf-(=;spNIKSYh3En`h?I(EE*B;R;H znm`#-faiXzi)f*g8@WEL70pj%=nphO7&CI-gO}3-T$TD??~~au0fHE*qFntKk+hG7 zL#g;NhCk8RM-LoH%9lGsQ%`ONse7+UY*nHw&AGkFl-#6ZWSW$(S zZr2bEpc|CL`cBL7hcnsoJ)55}%lH>fOeKa|bVqLYiV5#e`L8ii1QLfX1^q#O2V5wE5mrJE1w3kz~ql)MKiskwr2d$7_kOF|X_2fE>n&eiku;L0YrWT2UycCq9x;qX>&a4`{1ye(+ab%#3Ez_Qfb$m8to z#wkbUf@4=O2KO+v)cG53ti4I|LyS^%*75T?1O+RS3RGjTqt?J5Ew);@U!&Tv5`bo} z>z~5n+KnF`9vocMxf#r53_pP&qCD@LV^Euta1Bf~o@0(tlCc+}ilwE&%PK>CTa7JU zK6y~%=3XG+LkO?4tZ)(O;>+msSq5)C1l{TRVCK0g>jsyv>;M_c1EGKI#AG;#dB#jbtmTHXKYbg~DuWvh# z+gmL70da0OH;2N0w`wh(58S5N-6SN!wXK6kx3`0kDaXH7vJ62xWFK2H;?N19OBrIqUH@dkhF5CQ-CK-z!vS{HDG%-_Ul(jkBwh`KNwmrCvwJ zyPk^(%ne6Rq9DF`&i2%D>((5{#H2dhg2Zv2!M zXk1f|5eSAb&WsHVzWa>z&6%y7JUrj(ZqbK9wdlH~Rikj@rHq8=&BK^|rKC)vidU|r zGzE}{(+0i;iQpqAF|m@*!%8T}CKgT)d7n7v>@0b^pO9PyAMXiF5R}!pX|DPDTi>u= zzMt@X92!FzV#$!$sl}z9N`PcdE6nemPE-ZZ8hrmtWi?L^&D?Ux{$iX~u>KmAHCi5y zTaF+v=2Du=tY4$ne_WNt(6=XFKhixqj0!(Z3unn{T>bmHEWlRoO9yGw&9b+kaC_lJ z+{tbVB!YvZ$}v7*e?Qf;YmJ_7@+4gv8?J7 zg#g%4T0@P<&G~$Pr;EP!n`U#qKzv*ix(t8q^ z4$gA#%=nR42wcX&~%&tEZh7{PG8nyBzc@aQkDz0ib&^g(4CyKZNav*DQx3F zlp(aCC+G`+W9%dS0rM6n!r*rqd_KKDW956aiY%VoCelTF++q~8p&b=AY-%d!8k`D@ zce0(cv)pHux{@uYq@<*6^Vnb;a+^^QsWJlO2SS0t(hhff132Om5Ajn*M@>OVPQ-oZu_C&6N?ox%4W;ni7MkehuXsXh5AMT8H=&W z6zO5r>A#gC4p7!_g@Trc1g|r5ynecBGx6g0F$eC~foB)lVBb#j5~ZfkGS1n!eO{pN ztFB|~;G$rdF&V|wEcoP>@>8s79WSbqdia(~_aZ;8!Qzg1@LbM~SD5cau=dvL8FhP@ zrRP`NQKrFhFAwgOZcO4R3gZg1sn1ts4T?8oKq&Ic@Ha3C40KM^+6Y`R|Hm4ihlkS8 zbqw(bjz-KrUpI8v=G^wu9KJ?f#TkX0zxdMCT6m&|wUk7K>u(LMea~A4&s(NTE(ZU; zJy}l`P~o%1a9vm7YpY_nazEQ}Sl)6x<;@pWv?~EUE&zprnf1ZjuauQM=Yc$-H_#@O zJbfcMM1KGgw^7-St@&&;M&2VeDRaS8hDqM-eai$6 zZrEh^m>3LhHZ`sBfT-oeuuy~w+O`HA%sseHSkDBR5xE<1PXFu}^&+D2U0VHHZ^^hOR=>EljI{mIK3=AiH zkCniKgGJbE_JCFWU#3g?bCJ^8+~Ay`Y|HV$wBPqiidyz|vu?*_;HdEjtBm=oo^%JQ z`<#~Ss0{=p`nP!|!ALEXBeD3RZliBK6VNyGv5eX`9uN5-C{U9*Aej#{dE~xu-bt&Z z<9}}Y&U?)H2K$^PzlWp3+EW-@UBiwia7WC_0pp|JMiOjs;$I)ngbJweyDI5#&g@1T z^;ttQs5E%`W1u(AdiC!IL@j|j!j)Vd1%7vTnaFHY|4AtG zh2SfmSUNm;IxAw?+!09EdfZ&6W;Wegi!d*PdjXzV;?InNC6QRx&=~^w{d+5Z8n=aU z;>3C+vF}*`uzYlMS67jpYj}(%Ll4GhbY?L?WU!{q8*G%%=-sKXy5Z>#Z;fAIx9zY> z)Ni!qbh774Rk{V$uh&`~kcv0NkEz0=*ZsnW--qG+PAJl>(*6}#m-w8S!Is6 z1a?z64z{$KawA(ln{>3EAyBWFVeD*uQXkNsu5qAEubO2%w{tH=3c!|>QNe5S{+QR4 zFl_dYREr6sIIfh3V!*~6O0!J+{zvbB?nXm#cwOf)uGu3uC95OS$5lADohw3$qgj zO33*7e5W7^RuA&9q>+%h`i^)uso3}&JulOSAgZ*q^d-{a=lg_6)-PWyeO!>&3lL7` zPA-A>R$9s-w7>Kl-j268XwiQXEytlcBcqk#?p>tX#+tElae=-&Be+jCu@~p%ypr~w zt+@W(VwmjIw+>L09WSu1;iwVHUeN_xLJtzR%k%zpmTpp2T#7hXam|UjB`XBIIyDNkebU z_*6^plauWU=8yfAAyMOExlmMsCK*>71&@xiVJXf7hukY@Pe@ytK0oo+&8O13b9nXU zQs|iF+Z}p;<0$FNK2)J?itD1}<<7$b(5}c15rFYl4ccxmPiU0P5)Q-*9WP(Ta+yTP zKK@c?EP`V`^&GFswrz1csi*Jd!wxr3|C8Pj-GO|Q12s!8k~D3}Sr+TmI#pI;s$iHeyeZ{oA~JQt-OT!k{KR*%p~tQKj;V2q0B zOZId&e}~cI5x$j{XaTl=_U|yxas!*2TO3slkk&nr`uxsH-oHveYI#BDKi^HPx0oZz z@DN&zEv68%3+D|BsYa3GDiJ#A06Y8dr#TpR=MS0;j)}@KhoO=iaRLNynkK5uF=M5!LXQ>@+<)pN;)sy-dD60L)3TvDqMONZe_}aHWkK3I zW7KH09%`?0XD4q$VVV*9up@iIs5=mDD$jkM8uIm*^V76AHIKphKw~OtVn^S>op;mi z`OL#A9+3L@-cmV%uP8qC$L z=}a}9j$6M54jd-c4M-2_-|bbK?8g{4LiahsqoYVj0W>7suhl#9~omaim>d&;+TJ6&1g{KE!AH!2i`}w^4`U;NWoEh~X)x zZIxqg>eWAs)VRgnEwzL~`ELQ4C;(C@KU}{O9Zd`)!jBU#k4g`N(n9Mi^(~K+1Q0a$ zKO^&h{HYW5W~C%|uX_lwqjPJE#lLBJ`aM4F5^jL44B3uj6-v!VS#k$4s3ap()o2s& z2HNIJsZ`~e5i#=pY^Y}V%D8OT@5{?tVnMH&Sdswn_)HR#Ic;X{Anljd)IvnK?C%((4FcJ?7;FCXd$-d^u4RAhvkS9|DKH@`qHre^~+~xeJ_lNj(ukZ>&F`#2& zpWWC};|aF?;erhC`=4Tf+0Zv35|4VLvco2Eg60j2P9w6RpQHbZ!vZm#zx-~utlq^z zw3H#Z_ug^?y0gFjdjEgKX$B}^h!5v*gY@N=Zmajk#jfpY_TzquBILP&qrPND)X({q zdvRlhF*4I=6fqyiaqt~Nz%Lui_BO+2?9Ri5q0wlSAkH!4e!XRF&SrAmooJH@$+n_n zYb8P4E^M#Zq`OfMS5);;t6^SH$a&||3JAl*ZENrQB!beGc7C0&9@NQ3-_bH3O2J-_ce|G37rp2cGETyx+1+IwI7`a-ab ztXmA1U$`xOV|k*qlJSx)XQS7F+x^>EviDH&>gcVbdes)!eVvhl*Ac?v>dN)eR|Nyg zc+wOi6>_!U_0@Cg?LiKMV~=?v*RSC)`k)lJzGgbYcE5)Bzv%-QVJ=*J2L zMt;mz`||oeP773ezR>%tRMto7A&(DN%R^$9wK@SOu(!ML?W*%?5k`dNRDALNAoJIQ z|5;glv;iaRZT9-cCB47>Zj1fOu3Y%GteEL>^`sGQL5I(JSM9a-8gUrD0?8gJX3;Rv zL$74-6`P>W}8Bn!p3ee4u`LXtxL@oxP#s`P*Z}wF* z6As9DO$ecy6>{a&Yr_|6Pwto%{_-HEJr9}T65G(sUk8|{L6FVJBQo8(v!JRncbAec ze?_OLsHnB{eKm!K2imUpQVc5d7uZ*dbYgPN7a!uDn_&fh$U@%G0?B3fjsDUA6u$4A z!})m^;QZsl3ux*3AmkYx6{F`q>D;L!Kr*{w^Fro`-M{zY?6hB|1^i(8d0h0ziqT%4 z$2uC(TB4^ZS=CM4kr5xsdks+P>r2U&)O#G${Nb{JFcZq7 zh5lqyT3Ga#H5&&4LVM$6 zH{(Lo@1eCaz=U88d(qKV+s3!=t>$(oHFIU$vzd{UL0nT0VaZjJ;pS>Ij9={3V$yv# zK#YvH8?Xrl9>xWd0F$s}XF|v5d|k4#~My%F{!`knXIoT}8+A1srJi*!A}`(zu{rJFCW?p#`J*R|wU z`Qt5x4B~nm)eap!QO{+&V{20@Zp&}Wg^KC_@9~|0F{$-0EYg|SzPF^Lm`<=$>qQv$(TSyVIa9K>qcY6dA+Y|23oX>urDh^V;Nz@C2+%rkmG?<$aoe7P6 zkLI#-M|C5{EGnY5wUr*mm-GmconlQlOxc9oVngs%~xN7*4B4f`O9J5WkSX)`^ zw#PSf6YN@a$QiWQf27BT?Zw^JoSrqfeiihnPb!iR%QE6$%I(WY3$sQ*+cG;X_v~TT|5!-yds5_1YHw zX*Cd4<)h261b_u$11qtf4qWpuYLyJowV&mA5bdQ7AV^|mMubXE$>Y`Y zkf#r>d;GfJ>w}ZI+qe`XVrVy0^nBW`E-?xupL|_OAE$Hoy4lJzzW$JhnSx z@Uw2le?^a?4#SV4zUrpNb8NuoXc$2i1KGV^vN=r{ydOyWULLEoN{TuV6qI9r*`Ncn z71e+dOaaMT+>!k^#;4lvd}Jrp*kRt-?}2dQ<;I_r#aH-|4SeJ-&nl{7pbac_t~La8;g;_&w~o$ER=}ul;^o>G4DRVU2d1$E5urKq-Wo} zIheZ#r|+7#9L7-$SvXtn-K3B+uA5>V0A>oIg_b-({ISu|r5O;*%DY`5Uq1k@T&;HP zGjM>LvhorV^D;dW+gLmYi>(Z1(qk{7sqkhb65weZ+QZ)T zVvL??xS?4q{9~Cdy?3LpeHcxInjpR@=F8jPPmSxFON5wGzRVGmI5T|AU8)fSCk&fZ zt-78PNGXBCIcwy-s>i zb$J^W7xmjVBUoO!s{Z4pJ)mVC!x%3)XqEiEd&K{0)Bb%U+oK}9Kx@93GgM|j?kay<|q2c;Pe7dOGCahD1dF)5Y#^^lI9b08ry zwCsYuUlMpKA)*~nQlxFCSF0IBtGSq4T}zDLHljg!$WFaDG86{f4H_}ah~*HKu$lWV z!?*gKmSV;MbBJ01h^)erLTUo8Gs1kNt5AutP+d((Wp&Ir3Fw!%GcRJr}qUrYb}DpUg2zJKfDh$jux&CdfMexsfylG%|84^#$_98#Xh}@+D5Z zFTp-15P}*e_9I2z@y%23`bI6j#ovV+*MM)E&WRgJ#iyYU!!aR4M}3zvI}4uNVXVes z6L?ox_zb7_*s+xhA8^%^3%Lx# z@V=1nz@Zlt8G^edtZ@YXF)~=OfJjt@_WTA zG_1C}7nEtHKn+`Lx{tDhpms)7$AqVWHsQhFYY$TIem-QPz2yp|&reBrqmesBzUkK> z==q?UF@}v5f$@^j%`s1IE)2gOpPA!S41+`CV{giQ?)ho2`Mz=j^??t%B_nhL>H3{7 zMW~jUF*FoqHHO^xj5_ib*pGR8Ib$B%yIK^T77F~KJvzYHm__(v6iK4!UHndqHI3VvtEHd&HhSLk_$XRnHmPAgqNVS zys(}6aFLG@(QLgnww=Dy;N4kwaNwGJ{o0WmoP}d@njgj6mVESvPx{RHQ}d^B7@43N zB0>4sR-1sg{*wpRUk305!HLIt9jy`gJzn{}d*{(ddIW2KMR}+gmHsNV05-{WSP9m_ zr0RDnW`&8r3R8`kB`-cq&i-|e)I0#xx9gu#`U;Yyf9iVpart4seUP4>UUg3A>Ti(r z&wkTn8ZRw9UBB_lWa&p!SFv*+7Qg+HH$fWg3-uicBZ^%G90K%rcd*}7h!_KLS^43K zo`2l(RF;&Rle#h!^{)D02BMg7rIs^AWk;os{3N0(ebRCw^@$W+U?mwaWWUES0A}@! zN4v8^cs{OBJ4!=DGCm~ffyR~nAoy+1GSU#kjHZ7j=4g0|Fqlcyd!gP=?T5HUmK#Q3 zbYQe<)G{DgN{7)g!iKaUQLox;)?l@4jGPcj$5^Dd;sKuCxNMtU3n{tfqFb8*S8C_@ z4sw~NfwyomMUhRi&{F_W2uv%95*Vb4+CZIB0{(Q_z|klFGyD-VN2# ztxFwr@uUBh z*pj5Oi~Tc+W}f~I?YLkMMrDIC$U`x~=uZa`n~vsNI?*2bc{z8Fbq#?UU((I$TvRT- z5SP!#!EI1h-kcoJ`(e+3r>s|@%x1RPmREUnFQkPFB{oQ@S&^}&JML!1 zqeqnJ_eZw2TfwCpF@YkMy7`ebPJH}Yb7W}CCo;93 zRPjEsNF!K*@gY9xYzoQ^ByMV4(C#ICkF(DV(Y<-*N;r?Z+hK3sb>8fnzdHc`%|h@+ z!L8!Y*FAMWm$&m>-?DE;t*dspKN$Y?h)E=VT2|KUMx3rp%mEk|_}b@U)@{);HLIfh zzp=+fbPyPuxim8~Q#No_<=!!}HtjjclU-};Y)b>H`Z0Nb19SphJzfNm4kk)%V_U6j z-tz!QOt1ruJF8PXN(d{+u11{%UI#t2nV2>dDQF0!2y$FS#&6~f?zK#d2nX*;(qRnc zhPF@OgAJb3_X^6+ZN-*tCsWB0Ka4bUkS+yU?IVwLjH;%wz`y{pAq50iR!7%ip$w zkH{X?3120Ylirsy#cRc?nFKR9UFEV@>BM!zA>`A<+}i93QSW2L8E)HsRSDmU2w6Y? zvrskVB52ISv=Z33Bci0oK_3`@9fa0Kb}kKDTVdbLN3sR?Mm|d&u1w^_++v0gX18e> zLev9cMZzD`Vsl>FK|DXa#i|gS|McuKXEPN@x}&PB%&{s?!M?fmsaaZqiLe%1)swF6 zLjw@S5eL@;GY*%VgisPGBEz=QR`v*gK;|CeM_JayxY?)GD~4siaI7!LY=E;_Y=o%+ z!pl!%97G53=ra-+o+fb(y@_+-`XW_)UF@G>U+?=F_m*~;xYqi73Nb2>W-tc#TXeP2 z^yvaUH2kF3^HeiN+Fk<_o_N0B830f8I()(BXg#18e*bH~mCo^rSFtm`Ggz*-o@lKs4f+3Q+W+~lNh4rV0m?p6 zu7909{%6<`v`lOrVG+N{pL04<96!ePrHj$%`*9B>YH8$l5Y1lXl5WYSf8>D)4? zLZb+3ISu7@Xb8m3#Je`lOR7#g)BOkN#DTx4=!NiIdm}WgxTBPguV)Z{a%Xx__&A{i zCN}X%TPrJuW9m!AX=29P*wRNfTv0F@rGz#8hyTf>1dy>$Oo^Faap9BWw=zg z&1jBMtQ>bh?o|51Enj$!gmo0bVaim)c?Q z-c~mZA-iwi&$pcj$c^HDzbC%jYyU=nb4oFN4(c$Evc{dDK9^>Es zxcdhKuC02SWLRcDB?In^L=vK)G;V$dcZKx=c3x|EuBxdeW89XBK#>1YU_RC_>k((6 zMGHl!%yAu645OJCQ9!d9jS7NUv>6h$;!F^PHgKQ5i9C7oiw)^TndqM935h??vz_;MH04pGitPEs7UlaI-JnVbtatBXIV%`Ur5TjJZ^X-C&a)NGQ7y?}@sT=9_#` z@$`<0q~Yft?3B=!WuWC);o&6R z^fXb(HWu$+`Ao8B$pkSMVWB_Uny1S|VDE0hUi-veS+8Ed zMkvV`RE`yH-OE)b0An_2e4GPe{FpY zNK;e)cK7XSDdXEOdat?qY}Y`eR=U3k3n~OVrFA9a+k{mZ`m}3%DdZKrRt)yo2zfGw zRE@kh_#<1G*x{~+8DoxMaf-(hB=!;zNL;>aJ1;vhexXl<;{lfx3AeN!Od^%ZmkG0+$-v zetZ|9Gu}le{Yi2YbYMTRZ7EQ{@)aWh?NW!t9l+% z?xfHvf1$y|_~8(HeN*}r&%uoBDqPUh4I*^rFT&U_oktkRtp0o(9+7D?{Eqq)CIT_E zhpNmh5_oSzqd1IXCv_`R8~Rx_V;aJ>>CetmY=;2j>GbiXL(c zY?t>a$HW5;F0^JFECX+&_;6cHjdEDtaxKUzjS`$P-11W&SLZq4Y3s&4K_z5_1VmA7G3z5o}Vas@mqw zcr&FJLB}}mzaN6lHo2e&u+9L&v?+UyTSB_k0qh7gt0SgFh z$b61yW~6{<5Ae%7EtQRPNA_x<%Qdgj9#=o#fglk?AAQKFc7`cb*E8ghc#|aw<5hV^ z4|Mo@beRr9^C}hg;9$(>_RlM|JzOYTqG|%TA;2=uY*Y9unG!KL-B9-So@X`iWi(yf zKl~x0MRCw7*qrkl?$dcCsTr=mpSlNSbg~xYjqu`!~(S0r0!md!8{5n?H^`>(F*zE zRzPJX-n%snHm}|K^=fP3FKdu$wF;l*@>OUL_~y$~Vv7esynLtte zE9B%o%S`QiQwd5W3Q4{GYZAPiV+&*NJ4h6i3p($OmF-Lv>dlvMpC*tq^)WY9JoO0q zO=26!6zjyF40C=s`7Tie2Y|ZA!ZAeZ)qcogtUwF1^HIT+nDp9-FPcaxO6%XttD97d zofZ3RFdkrLcS)P#1Imr>xpIq(n*9K?v^*th|b38J8Ir){3>2c}WA09V^OsQYQ7 zfOfhzJpGuA>-!Rb=|)?oCaVGquL(Q7`d7gE^3YD-Sj?9 zei94j`k_gSTkroac$}IoN<9MjEs2^w|Jn-kSg>uJ;QxOPEj>iuS3sM^j4XVP> zmVzd|?|a5wy`Fij3i&0e2`UU8W(pM=3DIm1+i(fkOmP9~(knBW8KbZN;3kRq7~xEz z1l42Oji7?9hGA$Mdf9U0Lj3F$oaa#oPZ8+bVy%a8LzQhdHo!&3EAtK9Na(LoA1KOV z&RRM- zSw0@)5_^J}JTnb3R$9b;bl?gtw~gADe)6n+HB11&mZK$7bs{H47!@!Io*i8E=g|-e zBs+2TCqpUJ5Rooen4rn#)OKMv$Hd6ltboL5EAFN;`hP+9bE`{u_-M5kxx9V6X(t4e zF9k^8ea&|i6$u>v6`i!4*bT#yKXxwupT4v(MilJ1lT7-Ifc)c=W&2sz^*?{cKXELdZ~%_)Nzmnb z5Kfh6+Pzb=*Uyf5BbYg~`o_QjMnReQpzSAa9gcY2Rm<@cLb>-#c!D98fOt7f$7tQw zijx&&wqIeUg&$E908lArNrL=*<`s)cDQ!?U9(^gDds=C)CA~j{u4K}YzEYnO%FbK} z`z}E6d!Nx&XasAiP*=uG&@bo22t==zNRz44(AQOE!t$AenT$5JwaR;^P;Wd! z+SL5*B-P<@0M49MNpsw1EIs7K!A(#yrcuR}U?)4FY1WDDTi}CUUX|piV94j`Y&rT5 z$FGHA>wBB~Pf^X7z&PoKAG#!=TC9?zi13=~XjDz52ITA#uG2fIu+HgcR;5_nSo)mixCB_9w=Y*B7wYd3P+V&%gSoW%{=k`#=8%V9|ARq<6v8nmIOilBKAiC6nqxdcQzcp8HeGXBbc8Phy~LzSJc;%5w7i@@%L z^ZZvhPrCAh!s6AuQ5#26o-ZhEZR8lp%4NZk~HipZzE+$sV%+oJQSN{_5Z2ySDOk>-^|d{z`TP zJK6QgJn2i=Ag23BcJ^AQ*XaNehgT-b-*;d40kHYkS0y#;>o>6vy+oZR#&=8es$LW- ze{@cmI#+Q8lhBf%jc=80T__306PqWwBCgna^8%z;1iUQ)3IQIqRifkX{Lpl_=HcO_ebauayH+g zcc`&)ZXV1{4mjU9f`)~H#2if7h9kj2K{u=TZv?tO`lv<)K*D;~1qR7yyjQ^7!Pw!v zz4stNPuXGt3caXE_R2i-xJXYbiJr_~C#FrmkeG?|MG>0kx))KnPQ0Dk9fal9;k8K& z+H`9Woh{#`53RV^Q}BHn zpr*EOvcKa1q1)6@HWO-fYXLT_f*oV1O$R!WiOeq`Na8uvNUtIEWl}E?qZ1_`x3mbU z8c0aW4e&Rqc8Sc5^XUch$lZZs(#y@_xCD!)AMV;) zOwl9~vA^3H8%ei9|D}%2H*S;rJpz>!yC_5xX-g$dV5y_nZlri$Nlt&vd^0iKZ#kE? zzzp0FBLdd`FNF~v!16{IY#dOgPJoae!JZpnmEY5IVJVDQDJ|Y;_Jvi zefAF{{s{F+ALZ{!vD0uhR_oypzn(5Xd*g@iy7LweD$IZN#WlQuleOnhL*INVHSVjf zsXlwcYqdF#H#uuBxR`){X*B;S2M*&m!`y-A#fMI#6AiFoCu9B8#s(;?Ex!d0`+$6W z2xpj=v*40&&*_B6VcH47^KWNlf#{6P`%6+&A$HQ>dkHSN&CzLllsPif%B5kOF}ni& z5PXe+FQCe9+#-}I`FgZ4${DyAfyuyTxbX8FeLS@UtZEB+n<*~*;F}A3hYFrxn;~SrH{31?`FF`} z_@;!r27`h*Jjct!0?gS1^tJ^WvSK;{Y{({Y&DI)G+Y@vGNs5_C6Q-IsU@P85+7$-l zU&G9ORwDWXY!ks(f`)Ukl`vVqdl0Bsn)Y&BhP2+;KnH0qp*x;d{-`L!!Qd3ng`ssn zKk}7`5#rw$nu*}Q#zwt8(fnc?YeU9m?)ilOFn}<+5bp7HT{`+&;zpSHe_3W_!h?6c zvM07@99;LU_H<;U6V~EZT-6ze=8<373<7bNmjQ_`b$wA{6b1H^QUT#WrD zBOlOoVfehyvUr|67wavsHy-tv&GcpQhXtY6%@~D^(J9HQ94<9zrbM8;I`WUO>8}rF zxQ$5GGz~7vXnIVTdAUO&a=<~0D2*b{!Z0SEYZ(=j95sHpLget;D$*M20?|E zn=6)B9K3Eib!KF~uMM>sMQ-RYlT1vIq`he7Ct*apzS^RoCJdAgBTZ3$_d>~-Ysj0l z-4UGk>T%kk7it4`P3*GT!&3uFAWrJ8hOY<=vo>_-NdrtfXFFjp(}dGZlyM{S@D`Hm z(cSdE%4$jr2+Kk)!u#OQ&^DHWiE#}nTe!m<41~##C~b*cyI1ZOmf=}q^m_swL|xW5 zzB#@XV+g2M_K-3UwhJeYJXX?|-MdK;wbRd7HQ~G26pmEgjt8`zD4Vf~(=$}4wSA=C zB*8opp`jEr^f@N7%kd&KERRm%^`RGDQ2It{($!aSrf=n25`{71VacAph3kyb-4gh0 z-4nK68Ydp((^=0U%*|^ujSCY(r{9Jn;00jOp2WDf_gPt6tJyu^_+9rM+DiKv^w`$? z!N@6eCzH}Ri(z8F@>K92VG~nbZ_G5P{>KeOg>a~}e%$u6>B*3I*tXA9G1z?EvX`y3 zU&{XHk^lGrn-F5}`1SSTHRq=fo&I#F(qS|;Sb%lfBW4(%*ZwiEF0^nY-{|~%V#B-N zRt^HzBg`DuLz61a%fMf_>~f{ZkgdEA2vX5CIPmCXrVC;DdD5uG>xzhysK1j;H2RyC zd5GSTr5Dv8lh^*{+zpUi(eS@!eB)USdTvIt&p{<(CJ*JIT43C~T>z?bL$1QZkO87p zv+Hl4txlEz@>95FB}@J3wU%-tjW-OAQ-WL2At_9gF{r&njMgG@X$N|x2uzyr4nk3F zN%wjhP&~IgD3Mgc_@u#fGma_x3keEcf^e;p>FmSB&NH|nU@28`y3yxlYO`pR%*gf{ zzClOU^F!$||8ONpb4sL>K&V+M!b%X{l=fy^l112?7$z{B1l7k{Dd>Ze zEpB*fttuM3^ygt32Zqz6DURUl(&>Q@>Vq?trdjAnnt;0H?~8e+6S7>m{VIsfthLB^ zhYdA23y3I3BaS@k9`86M#%;7(p&}9&qT8Y~aEcUSiOBsw@)?gbFio{FPt}TbY6|2}yt_5aLapl~M3`hU zvdEWaz?4;bTuACr)CuaXh+3SD+`tIc1%7s2TaD|1y%~4Fp+3Vpr3hF@XyTeN2$`|C7byH_!5iP$Pnw_LG2Ov9SqDxRXLc?}?gjb}JC zZ-h>(hUGi9;%mSnIR3i9V9c)UdIU?=2}h`Y`Aw?uPyVhOmAjc@G0Ug))y9=P7)ks; zwFh&**>A%TTIliEJKuVr10`dj|BlLCX}i~S?6-+>_eHZ@mu=aeBOFS>ek0eOc74Ln zkr>3&UugmdI;hy=$_8w|UhP=C&znhO0APaO=3j|^vQElC=A zRn6O8s7{;tT^h;-Ifa&sYjWWdF@61pTv;tIPbBW|kJ3vK5xv-#FG2pM zsZ8{W&V`0kju=h~{BxFkpN}_hB#s&lkiAeP)z&IV&BWK6Uj(n0B2InP2GLXn&i1s1 zc1CgZepodnHEW*C836>oXti?;peor~ti0tJ=0#UmsNH$}ZLffs%HcSV<;OKR(C$;v z9$=&G8dDr6&W2k2*fdz9B#Kg={dun=H)XYGhq_I&fXFFDw)HWG!IP_{ks8LwEkR~0 zXliBwU>I!TZgkjU;%R1&zC@vGFvS1YWEzaNeWL7OEZlO}+a)l`n)u_n_;U6AmumYZ;wSgYUl0EMp*vl6 zJwb_m@g(>rdmJWkeBD0pB&CQ(fHju_W*)~V}7881XV2+)ZYYfehP1;lqD08PXeW9 zyv()3Sq;2T!SG=$@fXHMqM3Z77rsf_;G@m1P>XeV`iqdluT>}(p9yuo7pxb|1)`G6 ztJeflk)oSG>SMhH{YY%nsW&50ATvF0KgKv_(udyR1v6y19ngHmTV(1wX$eHrF3Z`O6;MB+W`WM4W@a7!%7L=(@rW2Tnvz0MpOcp zHsUDQ;13hG(0R>3J1&w^!+3aZ@J5(Ogt{b0Agt( zVmZI3+3L{(sJssUysq=5HSp(LTrp%bdt znck;>GdA|DT=i>BmqR*%GpcU}9Tu+I5fZ)3bC`C=o!aahMuHMi`(b2d0g{+9?AKbs z>L@K*N%#RuI4KJD>4udWDo!R~;D=^m+&Nb1q_1Si4BIcIGx3~qwbTG~n(%JoafJFZ zJy=x(!?PP3%(^0cZAY0c=K^mQaR{8%?{&(hyQRmW<-P5bgVYv0kd5#@JQ;0EQp)P3 zVMb?pR#Uh^ER4J*C)|X{k8uLWgj;acpDBZ5eTs*n%Yb=`Zbw`kB(V$JBg%eR*ohf$ zT#8DKi=+r*OYTXKVB|A%jQRb&SgG^*V*Ss}UfwczSqC7B%5A( zbzOP=_DWPC{fO{qrbLeb;VJ!V<-%Hr-mp&r1_3GVj;zJ)p0dzeLd$Jz|L=BEKrldw zFMvgZaK~`%_Ekr5Vvjltf?*oWS!(EUuFCRDIUr7@E>~)}igk7Oj$C-?D{aZm$ukMK zdgnT;F?v~EkU`XQeoma?F*27>phoi)h5@?eEwJ*}Mth%T2pofdOmHtoYm3XBf|BTj z9)2LOL8M9=Q&U=e{${-$r54~K0xU=qt?e=)hibL{2vA90PsNv_Lcm2GAZND-m<(cL zFRYRUP>yJg_NOF&FQB$1q4|(IRf88;Hut5;S$E*_C;$i>$PX`BlZqJ@C+jXO!3qEv z3h{`X$Cq>I(lWF5QWnA9I7g(7ePIX8 zA;`@8kq;&#qQXwVJO9IGGyJrSJCrE)ekM~@ch=sUDpBB(369{}FU;>pKz3C~>G$A; zA-t6cD&{~#+{OL{&{;u$=w8L=MJZCPik)OLmSe800CYYfO%$agN+cT)W^%-Wnh?;` z2Kg$FEC1V=0hy;BU?U= z|1S=|kU(l82uM#SX*+H`d?m`z>U2iL0lS~)%O)z({xrPfV1#)iugb>dA1WB@4B|K8 z9WWzGqi)A{oeVxXNrGx`9-(WNp_3(C9LOi26oF|>d2VD7G_s*sp~aQV`e#ksmy^tyK6&3 zo}siJ*Ql#Y?olSC%Z*Czq-fcEDRHk60twA8G7a`v@08)f9pC}6 z)=H2D`a*F+L11ANMB<0$mvo^5g=B+-n|%e}aOOk_`ak>10#r>5wh@xy%WB@;9`J5zKm>OXf~w|9G{ws{np&HRiz+znQySr8;oB}B59~e9yoKh)HivFU-L3Rd z4{kPHnfq}-`L*)6C@-Cji2J<3%E4PlBs-jG!jdvc6@3aL9_&nRikabc;fV(qF!HPB z=1Ic8xs<^T?|zTh{QLIs$Np4-UEMaU8WiqeF;_TuEjf2Byy?b?e=HxF`bb4ZMNc%! z(l3RsA8+5)i{BQA_x_AJ8G*s!zsUTz3jSR`bC)MxG2Pc-zyA0X?&n?}Tp{qM`2Hy- zHIW1;adfy$Te`sb=4Hy?8?&4LsA3OlXKVV%OG zv8Ptbo|!+S=iZM)!(!BqZ~NPO0!2`UKMxhLnn}1bQ1{CYeYF31-hv$`Js2kFBog;= zLWWyRN{ovoKSTs=gX3jH-3!s=7o_xuW~B{d$87K2oZ6YZX|yJYD#iKrq67r#N8WZ` zbSRx!RxH*FJhD3fyiu+(>r|#n7R}K2wMeknr%BrCB+pPIYj&Rm8J9~@UxW}DHNTp+Gy3!Dc%r(o_*YC^bXJab;^ir7-8&9nXNW2f z{Mp9>d4|Nd^>iR$`^n>wiJAe;0wCHG=s42F?$BSe}$n_4~6Om^%}8bGn_y ze1A%$dV|(&Exj{J9XB1LA)7#`$Qi+|J6uIpJwRo!W&u+GU+3@l%ralE(-sGl;DW)5 zkbI^ED)z+{)+P8=?*s>Zch#NgJaIo9H6qG)j^Bd51kOs!6w7cUxIgTn0r7h3tO)a|276W_fs-|pD#i0EU{_*0PkP5KNg}8siU}SKU&K2z zyK^$OteE#xjG1ru*ONwvT8J^SVvDTtQ7Mi5N#c!**Db3E42wx6he17P0CxFw8&tMW zJ8zKZJw#==sbMh`!vqMU^0jK8i>$x!dDA!=<{N$m)4DE6a$G*ArQqaf0`-oL$<8^L z`_x3^dU<@@XG2yEQjK~*Hm20#Mh}b2|53h(_<{z^YjvOp5 zwYFC@hqpoczpMi$a8!!STB-ABSum!zKj?q{I&Q3V4DV#H?jFqbCe}XxXm=Xb%6$fQ zqGbBR@()zuKda#0Py=&V~GNoM@m!QqZne;F3DrtDZID z*A<^#Nid$h6GUnyBl-jh$3_x=T?38NLJF9L)! z7>SGIaU=Dfb^DT@e1agB?Z!}KcSR`~f7P(8s|q&uo0<7kL*p++WlV(e)={0-PGj6F zlx**JmnXb;c@FDR2w^SdzfYuvY$S)y8!KV&>ys3P`6ZuC#akGJ#h($fe^{$drn zOJNP@lM4-n=(S6H$B`vR9})a!wETZN`PM`geAXf3Wg06lki95!Dh)} zG}%}k{_^t}nQn=eKqc`>IiS9T54?^HCPP}ynmZF}mKXSbokQUaxlBV3!WI}8`W7=F zv<+DjEGdcli@-PfDwOW0370|Z7$}ke%D>(g_{NM;P!Z%I(b3~&N;&TsWR?%lYDByW zSC$X>5>1UvEyh*tWTyQt5v4kOpLX61Ig^VX>c6hO6-X0xc(HzqFslM@$DP5AEW*fz zM1#nvzMu)|)nhCRR7T2A5<7OpZUyAg&^Jj72ws?m;Y;gnKXB3ReH;(jmBe*AZ-FRJ zp@pzynoss!zxV{GrK_bEboAy!#U@Hd*syRF>q5Asae2kHfQ{>nddSX1-wP>H@)i|1 z{Ix+Q!w$los{K45(`=mk43O2Vh6r$KUDw$~Lm!64p96FYkCGIo(IPfKig^a%<}kPs zn;qLPe=TNCN30(Gz(YUZVoipay}=xY z!(R&hLPMx)Pgm}!7Pb&)7VLrR4Da0EK)w0aWdccyg2-rr`=L3i)8>W;&s=Q0huUH% zdFY8@%}99ifzR5yYV8)}!eEJNbhirsszn$!xlrSH@#0~9&O^m2%-(XjNz`@y!ul)Z z@0&6n?ooW?eR!^J;?HDVqc^aWTR(?_f`W3v%H2PKJ_`~EIO6v;tDbvVq{KZ3Y!5KY z%o1#Amct|emxHm7Q~x}K{{9S$6s>CBM1KUImR@{IGlC5wCx4q>B>n}{u~eUi4U3~8 zhsx`{!CHXS&CnYu{`9Ss5EAM<#|^{|Q$_XBq?cuQQn7E6^n@S!E>%j?T<> z5}M0U5rFd0eM>$XM%>)~=+9+@nYWB-SSC-*!9rV*R)WJdH5tM$-TY16U%E$iY8Rav z>;S~rRhk=W{|sUeJvDD{(AuLL$4%fZGkhiod>116(=ZSkAxJ0qyyYYfT;JXtiOd)p z(Fe~ws0p0TsKi)#6m=W8}Hpy=J>H(H7u zuiF*k*%KI8%5DE~!Ko__`6WK_t&{QTN^>(NO`)XU_~!{OvW zuI()XDa!uWayv&F40V?^TU;K${~ujn0TkuhzAxRqba%IOH_|2DspJAuQqmntOM{dw zouY(vhalZZNq58l-SeH}`F=Cse}-}1*>QHzefGJp`@XNx7xDQBM37}mTYetLmYRW+ z!dXAsxOw3w4Z$}4!i4eAWjd!+&A{@G(0w;8Y1-XW@(~$Kv*Lb8-J^{E^reF&U0}cI z#Q2oBOO#)dEHbwyk0*ZR%=OA3h=UJ1 z`HggcJXK_evKDuIAX8OdXo$j87F|Ic9ctw|>j{JI3F3xTr1fofY7?lX`ym|71yv(Z zGHorGkVD2R3kIH%qjK$93vNj2hxB$@b;!J2AcPB@ieKv6&R9$GF~b)`3#2f9Yz5-m zh(isTD4Z>}RO(U`Y)1DWs(v_i+`Qw+D6W8LD<%TV+6O60_YS9slU3))yljMdSUH28 zCMud2M6Sj`(X5Te7&kn69Fl4Ds0G+$hZA8)HK&63iZ(tr1t3(fdy-xdPRQ3w$$qWa z7Hk?#cz7W+;}qlJ;5O>p=2=8`E6*^HRZ&MIGdCs~rt4|})oF<~?w54}IjKdtQGs`} z#lNiZ0Ba2#%rH67{`vPbOY*HA?jo(znH02?Td-rN-!b9oCUeUR%}#;7l}*4KymiY> zRI=Jcq}%~EPt+29rHng*k}mTWwVLCAFj%@AFb*K^GB4D{7U?$87cJjUdg*EQIZ?8W zu6(;{2f7dCLxD_XD7x@hZE?Kfz|l8@66awG6W zIe2G-EFAFtF<)L%tuqrQ5fR&=NGFD6F|DrMmJVY$ai>|i4X3}wF06MNcgS~=S`3c% zj1)CqOx}8r;@>|=9+XZPA7K!1&mgeX`=}pbNo*Hk&7ug2h0l*WxldRA?DAbx%X{o- zgW*i+H}*}>=*ku5-G@ly*#gVfPn|(5aYJY7?wdrY1?fgrVonc-vD8U z#h;_v&9rPaW(>J48cU@=c}yfQ;DcBm7v?-~)%~0nbcF8*ugk*l86d-R63YQ>9IzH_ zwwhg+c%rh`f47VJm_b;6t8kb7ve7hPe%SZvi7(gvn zJIxbNcMFwk79VpBa+a~t@v1}FUd@^cceZ&(k~JWZN<+gz53LNUMoac=wot;ggJ(*3OG=$&oEXu}m>xr|fAm!L*7m?u_e_ zYCX96gx`h1B@CkmwmE`eJDOjq-h9-$#+=2YvfS&iZmXe0$TVdW@^khY%g^m?M18SU zEGR}dxeILa7jUi`qQt3UGvAN}ClAYXqkVUOHuHf&tV)vuXnys|2cVVo7=o<~>5&^OocYdEbR^C!@1Lb!ucqN9GP)c2AW?#h|RAw#_%8Uvpm z`93~$PX;`m^qe(4L?O$*rVCil?$XKf@2FZqsRDa|b4Ce_Xr(9d5BJmq>!8~pdJ zWycaE6M^Ye1_-Aea{15ons8=DMM+zKjsViW^Y!A{bMR_g!kY zUQd`0Atjv@%oOivFfx2_CTb|DDzZ-SjW;$4F55;+oX$RAuz_ZY+X5@~9HAyoN=23$ z6G4u<(#B>DnbnFzzJtN_BXje!&AX?I=7(PcNDvVbG_`z>xBf1DeO|L@X7_9hb-n_p zrMF0u;Tr9UFiBBN-WHhYRf;Dc)W^`7fp*PRI~u%Zk;?+F^|kLaKQ3s=AS!@}OZ&97 zT-v1GR{?*Iq+1q5hF|fMe72+HdjO9DDE+C~0TV%gn4bBSzfboEae2^-9uP5g8xp0k zJyyWBsqaqy9rY`cn(h13C#$597ZB|xG2dP%#%{TkXA^mWkt4({f+#c$4whkW0A2?Yx*tzMNBCO*JIwj@hFNoGdsJHzs=_`sPCwutn4+RznJeB=facUv=BdLxqNga zD$ura!HM6cWAG(m-rTArai|NrzR-h-SD4LWxwN10?z&W&zR!THk-en}d^g9X-q=Tw zxLvQDW`#u7wDF_M<0TOGxeM&JoIc{>iDeg6Mt0aTN}f=$e-fxQ27gDm?BNV>rIi_< z|AX|5LOXyI56!y)^?+Kr0=%+Z8XQR2 z4jLRV2eLZd9)lE2k?(lXvh9q)T?{WnR!wO*Ui`x0U~oHuGk%>n6j?a^WOJmN$M$vb z^yn(beVgR1O37E=2rk(+F2DPiI*+V_(mh2~sIcEJ8v#Gw5lx0Ven;oaAk&ezO@oNcd=d?I6v37irN#J^g1il~X0qWT z#gU`2{hF`Rt-_rgv=hKZq8pylcfmujnxkF$bD+>heNZ+rTCn5D)(RXPEa#n}2{s)K zp^g)jY9wA%&YXTd!|v3Sm|qH01#b{NFIP@wG|t3(l>D9?85d11RfTD=`8d75v>gn% zbhfFF?S{0TP;A-d(p>fp^S44|$r3S6?|k+%ADGBrR!g`kYxQdmuWrzB5zL$}RV32$ zQrNI>4^DTzw+s1g&USQS@Tr2?VLD!)H8-WB%QV0Y7@{8-C-ZZk>6LG>M`8V5-$SF(9QAazP=D z^`*fvWAtv-7^W#XHWteS@knvIYrzmlHFLJ<>hG>u=v1w1KszR6WG3DTY*F|yfTJh- zZnqGSEF5|mI<47=1JOtWkM4uA{jGUhYtzQ12LMxn@H*}DrE(T|&+eVm!KNi(qx zd2~#}I)akxq23!D0QuDEjIBZQpO)>P2^Dls6?8$}fi8iEhqqTZ%Tm>u+kWgN%$VzU z$}$Go1;!K8!Oxaks-PQ@4jXi2)?v&pHnBI7pZnj)WJJ=)V_xaM27gMF|KQ0GHolx^ zo&16@S-4ciI?N3nAr?|JD-}eKG|`J*i4ZHOD8-d#7)M+_qhzp^s)oibi5(1)z6o{v zpv**+rXWu#5l(_f9ix#LrQ}cZUYQvkreVRF24j>-`6X2m*C}Q<-XOmtac+#PJ}qfj zu~&B#k;sup5B4FUKl7(wT7Ix3p)~0%(?Ou07O5m;+Rs@ml5D2n7loOxU|a`pG*wPU zzJNCSxpE!!M8As-s&flOYDa7N3`^bnaa>n@jo8o1L922-!neN3*A3|l-|iQq(S7_b z)OV-*O+asmVFRzAR5rV8X)s5qt1!Cz!NbtwQ(>o!vSDP?LBqS*LF{7vj91Lx;d8(P z7^G?=26Z=L`eu0CQnFUA8If~utMh4yTmwN7%~-kwjW~yC#~&SoV0`Mia}0`!Ceq&AXr1Z@hVbUIRFM#_T%xd`uR>J7eu zej79V4vafa529j_4|-XRblxR?eZ_>gSljCAO<+Wj)CPbMbfh4uz%|IQY}d&>C=8h` z?O9#oNgP9$Mtb&6%U&gExCZE-gt5quMBMA+2nk1^{W=SmPwT^yn|HFc6o2eI>baIT z8C-&nuKx3r30OY^!{=AyZtGEu^8oot%lFVdHwLSO_2xOZgum0fPOJlwiz#gr=Yn)u z;z$>8#>bB&=KN>sbN)cE!Yd%Eiu~0##6J#-f9`weLx&&i@ZnjpKVX=N-1^5oIXMaUlj!gEDO|Gol7#?Wrpm*PhR$!q(Ka-fV1Z_#qj3{jyK8}!!|S&r1=H#fF%QmV^NCt(+g@Hg zxs>qi+Hxl>LF|S8S1?@$zN~9RhA4#H^(JPKF;)nsL(?Z{_nqRU!==Bp&^a_!@?(3X zrC3O0_-W7=xCs_=Ond@8{p7_3d41m6iKAzm-2U!e9V$TjqM|b8^!9Vm!>7wh=_NNj zFN24^l{z>7lAeoPeVjRemM_KRvkfa0ZK2Dv61>^rts1>8%cS5SQllnkH%1K)X;=!=^QOb&QA~;q3 z?Cll7kS0+VhOpuy|tlK{h@GC%SPDlB{etjZIz0T?ZVlMwv(CKv{XNF262CFRvhoB!p9v~S%^ zyjAC_jX&*wLQN3AUa!Z*t+bQ5_{@A=wt?p0IbeBrz{Pv9#Qm8Y=rh9hoo%Hpef3%} zd%7@NfT}V`+pBgslT(qH9}3PMAMOr3s(iC!lmL5J(zqX!Yh4j~&)M50-U2T62U$G%g*%)bFIn=Gar%gAe`wf(@uYiIw zbkZ#0oWrTx!pUcrYY^6w;G^bs_|?!d{*$Bn8pG8Ov*w89$6J@x^sD}Y#Dog|s9|{= z9yaPsoo*E^4`lNO;+T1an3H+dwTx7>ewMIp0o^!A@C>!`vfWN?r;kg;O%N5W789to zbA{24%$cn4;7Fl((aSuu;Xy2(`@J)8N3q3pQ1ReM2)U1&d2_NdLDk;+FJVV)C!s|8 zm)U3DjJ`drrWUz?yH&gw$-k#)HW>c#uKj7D?Yf7PVrJCyVBUr^;3Ot4w_bvWXFZpy z=?BWqz*B<%zE$foxC>!aV<5v$pa64VLYcjnDMmo5$0B0LlrY{1PQ znrJ3kSy}xRad@d0c%jEHF5b2}yD^~hJWyIm`M%yb;OGn*UU(TU@a;QP5pc6aAQlJ& zg8Z(b00wIl;w}FlQ~x3Aj;s8K4T;AMw-!tf$_F*OQp$v2ydWWX+m#ye?OHWib1>|1 zPA5bl=LQi|4(0=@9KJ0(i?kW*DT%#jyrX2ZUT5pRp=YlD)(kk;U#KyfO+-Xq!EtQV zb%9UZp5EFeXxqvMnd62CKL{kqX#^QMu`ubKiYX^GqhPlx*6%{SE70>MVsYU%QYmf2 zf+Y3ePYVh^jwf)tx`2(orN$(xDj}59_>L%kiQEvq)UD<**Bm~*u6Y$H0R|Ua6JD;L zeK>L4Gz=m%V%tseHOJ_=3g`6mgiudEiiKSeH}xH<+)Z1OEfHmL-oE$6xjSopsm!m% zzD{-r+YWKNXRBMjZflldLu-glD8TNk=?O$5FvWc66t$=TkEYJRRz2>mrK2}L!DVAFt^c?WX( z$(;?SZwAza^e@3A2d-UV!;ZoNbQv_S*LI5UJE)x#oZEWwWNm-5W(w%5 zF9G)ps~fnfKcQ*nUC7T#FzzA(uOn=#%*u2`@g@$MR-6feZmoa@hmq`;FJAoK(_`rN zL)iKcXnp^>5ierQB)t16>DGnw?XbX&ajZ)0zkuKmGJrmSzrkNWCPPKK-X+A*C%4(7 z8ls2@)Ue99MEgyR%4+49T?$mzpDaY4#=gDEpr_aQh!&$`JA>k}jNVH9jiMXB(c&|h z?C?a`50%hl_EeY*)uqcLr>>UsC_#$uZ9m*-y@J?1nQynuSYy)VQJuf9rz8hSNV2xu zO?Za8euiX8yS$*?g@eC)xc9dnX$}|gcV|k9OTEhtpQ?jV(ygQ1sz9mu+j8MgK0Em}es<9hetf`X}fG_v>VHz7kJ)(#gO$T|44m{Q@29*0N4R= z8duud7V(aWx(B2cjsh%gua?j^Qy3 z>}UY%hxK)kpBh{*4MK-)3`q{_JF`!uHJLjCX123;2 zGPDY?R-kS0}u3aG1gEz3B9Z#b-rqP^O7 zrz`cFG{%(->DIB2tMvOwTxIiN{`2e=H@59u_g2>7j{G2SQXMMK^*$Nv!VAXSS9Uob zpR+x;m>XSI)D~niUTsI(oinc{y^{I%5Ukz2Rg*|)?;TGqez{B8b?4seVU@Kq)(3VJ zc!V_^os;0~3w2l%z9Q*5W|n4{Z?yuQ&Z~)3yQF2m9fT2iI8UO&C$Go5I;bKY@xHOg-LGXb!2PyC zCn;1l=UC}Y(59&l?Tg*9y`jL%YuRU81938510yLD(07lcR2j^@2 z<#~1qWp|naq=|*)R$oFg%D(%k-a*u-ryrJ^)L1@|e^E${Tv~rV?vFhVD;52)G)ZU) z09KW=fTg?ll*hj(gNOn)*)#R+8CrY$b)m#9?Nh)iyNgc@xggklj+6+v^3-zZ25M*W zesv#dUq7I>y9nLw*AbEr3EXjIe)#{GHlr&+B~aDS{G1R>KtDRAt=`~1Dr7g>u$gl# zV@7c2KjF5G{O|}=RGIck6t1agpvzjl`vFiPTTV@lREWiCq&95MdQck_@BGP~Vt0aa?YNJ$fL8aRtUdo`K083&PIEd3bTI zxD6|2XZsbKbSHG3;Ac+12qwKaUXBTXVTi?Yl9&NIm3h`YxRDKMZ4l0D^W(yzv!#m; z(@k4ayA&3_B&DRRbG5ekOy!^x)u1DL$+V9(#sZmp@*|ANZ^!Bzk-*{}b|G9CyFYn& zxV1v=<1mtXx4s84YD`)4^XvrI-*_^|~G9ghCH4lX--)HN0nO{#pr6W~7*{na6}$69}=Jf8r$4d!ZY zETFMn=(57azZ}>F9;a_0-kb*ptS@T0%!+Nq^2X*~kEy_O0zrLo8P8H#?uAx{OfPGT zT6X`8zdI0dS2N8Hn>XSSXd{omCPCw~n_L3UGA}uwPB}g2Uzf1Y9_bSz-7JTmu`}{c z+zkx{ zxB%%Kmr!#cXu390AQG?QT12~s{d&YW`^8;h(bo3Sn^B3U`~Uo+&|mRx2RI8|$ipd& z!ih=*pX)4@1M}|XzW47$9=y|d-e!gFrt}@PA0AI;U2(Fp;Uk{|zb#}5@>Qdo&^PeF z>}%k1Sc1%88urS*@xvLJvdZq!w!^A4CR{UCKnt-%P)abyin*!hTJa8#nhG_7JZe@Z zdUq#G<=oHe{d;~kL1hMGDF%u`*_-}ch5YoU+N1JcPVeu!0V|wG*zmLPAMNtZ1oJ(I z?4Dw=XikBMg;w!vC0my>rY*USC1f!Q>$vGhXjNZ%F10woW~GTGsQrUF?-cRtEG4~7 z#)>h2m+;?x{GH&CzfqmG+3?`N<`ViAz%`I8$qHSg8>-kumFL*d7z7W-4)tu4{W#7& zieVZwAdc?wsCMP~w|-CykfZ(|{97%D3?&XIAJRooa)}+hRcfM$9O8yI+aa(^F6`FL4C*xK*>^#CBW z{$RhldZB<2js_*gpR}?=@l@A~cZVTBVM+{9!uzeMJ81Y8$zK=#Ii+lkhAP;}lT9815$WsQr?!)r4Y=HXN)TIrkJW^ywrFLQLtJcr(VP5m&Ik|^k6I=VTetgC z>+PtT%|%()gZ9>bKEcwZLyVZCLZdew8`t53wRDNMAu7+eiO$>}HSUgji16&&e6z+j z9m%Ap+WZ-3gWwqFgF@~OsOCdnU2Pa5%tb{fx}y3hY9YsNbY;1d5gKFlh!KUxbgP)+ z#G{EgiYa4a64;A{;MED{%cp7-eyH(Pw04`^ zx)9P+F^6jkaXc~nE%r)0d3mqJ7wzgMJpTq?%(vf`aT*#7?S_K>F{4zgjLi$>0?_2l zQo-JTnun?yNb7G0Q-6}a)a_Mn{rmDTXT{AUJO2(?8KoQfcH-r%0ubuyZp8)aZIm$Q z^{5&^HB4nwHY|I#ExHzpckjy;u)YK^rXzD%4>DGYyeju)#X!*9Q|M1nJ`EUvSd=OG z+cNWqdkEj4Z9E!%BF_iiq4`pjWn?9(@EjCY zv-36Y#O#O~M+521?23KXDa>xNyOOU2r~Qy_Hn}iZ7I>PeYRW6wc)R?RFptGncW>lj z?4H5x4+_0Zhb55@t_k-HVJX2=4D+e(7T<6v6e)uBF0*C}))$L7(^cEN_T%pUG76rD zM@)%I&Uod}&QbrhLh3KcGG(J<_!2ba^;q3)4!wp&{-3cppfK$)<2gxFMN8=uJ9PB~ z_OG)M689Fqvt1O88fo-Z6}_BUsm7nGx`1R3D3%palB@Dv+B=rwySqEIjet%8_LFk& ze`(Hm{FQ4B{U0{pKpqBSKb{`ipJt$z#BX&sb06k%O&U6;?|hSbV4qUVZt(w7P6PVY z?SrO(y{7v+?p7gNfQk8shx}&@06bJ46`(#j{NeAGciXrZ z8-BCknK!0bezB%kl!N%~!LO`}@$y<>)T3AzSg=vEqFW6uAHF}cR;&|xRa!axn?FBr z+Uvm+G)>2|AkxXaD7zVN0V?{u2>Zk7q)1_PHYbblfuusV6{NYtVF|Yej9tXowO0 zQReR}m!B~RICWniV=`|ho_=sXe9&yZ>1GtSfC_dco-U(D_09;@SPi^^*eR$v*d1rr z@@Z<*?CX+je!J#nhzL@|Kdpy3U_V3W@-}771SzW%FM(49WYu)Xs`iKpScW z@l%{TKtyDDKX;VYc3#xe^1J09nEHR0>%-L5A0V>Bpz~nDZyjxbsA)od*v!kMWZ+v&2 z&Ui0Z>nN?AzQLRutvq5re!u>ZI^{wT`4udP|)QU5v_ z&u*Ulmu9bsx(2*D-KNHxT@aznLC;6_1=8)^_ktMq(QM*Q`w0y{2Mo~MpL4@3(EVN& zzlsV#79JiVFtKAT{(aJW0skQ5v}RiB6iW-MdoNHT`vuuorkF}+%T=j`FV<#3{-7dc z>9oMAsArG_DvGQ|%U-O(!8QzSy&W~2AaR2zMrk1l~nl;?=@a@JQdeaPp;!WAQ{ zXyTGRLUHYM$;hC4Hyy=_V#Is9)vjkSVOV@Q()g%>N~MyI!;Q2nANOq2de zGuyTmcs29;2p3Z7AO)hZB{1v`o~C57>ga~RPge*5k_v^=)^>Mvd>O*e6#OLzy63;~ z16a%h4{{4;ULXP0cZh-Om2H7iQSCn8lqT@SeL#WQ)Ab5$Iu~~elpU+G{yXwmNaz6a z5~6dQ05gu*}?MZEHbzI}2$Kfa%ctA=)y`l2T*SAA3H@JfG&Dcb~?{D_IGryW2 zdogelwhOJZPH#7SMserCiz_ZQ8#n$wZ5k*WyB)`Z&0G&oJ9Vqxi|BKa-nmivZ@2rE zWG$i^Laa=^(!#9>KBCWf`>XW~XjuX4fT1Y7>Wd%Qc>*t<_VcmkABQmE@W!VsPHGm3 zE(DP!n!D$Xg8c<^=!YkbI7!n?`c!i>XTW|sBgNx-(0?FZ9P>!6iP)nwLXi$^OZ9bNNssAb*<*|C_Q{2jR+)D&*HkVm;5b9;t zwrF45MTN^8;Cn1C0z_x+1EruZLAw*nLf2#Aqsl+{&>R8&`sf5|8s!Th&H7-?5D)Uy zBUOT!Hhne@Pvx2a-BIKzn3r9ZaN&&u3~sxX`z?V3@+0qv?6<$mT5FWaAv0s@6j#lC zbAM@804lDX@HsSs8CW`g@Fu#ab~G z5NHjMEqPYCV(OSB_zUTIW%tZi%M(Q$igCXZN3gWGaFouHfP{J6Zlmff_{~&!1oAd8{=}7RrCV&;QDu<3lsQDMCbYE+}wkZQXRCIh88wQfkefj3H<}Z=IJv|&%99nv(G`0y$AZ*(>q0BODgIH?5o9N8P1f3h z7K40S-A~|jUNaV7Eqo_s+=gkl^l+$tXk1$+)Z$bBSot;sVp#^kz-I`^@jaaYjs3&UD2*hgG4iDJSjr-$pfsAZrM?>LuK9ajRsR7tLQS zF@?_aho=YRZ$w*NUWrzcg_le_KWEDjv^+2zndjwARZ;ahSiSh!y9ehbskBe&Th3|GySg0@h*CX2kVk4$H-*x$|84;6GTou=qHTxDUs zk~fzr4BZpK0O!vThRp*%NLBLP(7rB#zPgPDGwi=I5w@#OW}SF3^O(_pOqEBGaQH zfVl)5H_G>jr^XZ0n2i=v8&Yf1L(YELb6qWT+aaHf7t9eJLNjX_DomfHYAtX-3gb@E zB}NHjyTnf&GduGpMuD@r;qxQW5X~Z#?^0IRGJk{$i#*vH~F{eYnZk(#Z^ihkC z8p-gG-Et0NKaaaeKR|l2!B28^xtIRJHD&uDh1-nNzJY89af1-Q4`48fc-{3_47r-9M^(x#vPviah@S>+*x@+)S%P1jbq z?P5iPL96aXa&bE6&(7qE;=^pesEF;ns1Jp{r*Q@9=V%}E;ohLZ^kHV56wNA38b)AQ z&9^jA;Wfmoi?(qG|%vb4r&RzQIl3GmqxBzC>;~*K>1F;^cZIPETX# zjGGO7NR8CD0Cyip9?@n;``q@Y7_-CPr9d$eOc4w(f(*JvUEo>(tH2JbyipMaQq?o# zJ6`IU*g!pY`ArrdV9vCSfo=Flz9FUY_SE=4Wr#mj5=KJf21t$#@j#JwnKdCC{x5_6 z4EXcb0UoKc%>M;~J2Y^wPt)2RDeq?cwz@rmrDKcid3v9y?dXM+I{)o9qg}3lqe;Nc zrssDA!dU=1*+a816Uhp8kA}Vg9<2KYV{fPPj3xzvV*Qd1;CbL%*nf_rm2{zz=TAcp zSA&6-&uMt(8gh!@+&Ir(F7O*HfXWdvJi{E1FLZY~1F!X>;=T0?=S1AnxQ4P2_%9`a z&Pp6?4^~A5`rzwk63}m2PA^Oj1cNcKc=*%>F&xnri>`I zc$wurP2Si{v8^@ab>t&Yg&1>&=vdBgsVY))6r-gZvYS8!`kBdmIdj?SW>!JK`(#d} z;6}T|_W+wvr;Pd8m-Niq8H2unE&-DwE@8(%zgY&RG$dBq56mHa0|$c#*)#sys;#!j z4vNa6A2nfsWg1fn{W6aorH?n9;XpftyA@X*g?UJS5#ErYHQ49VMT@Mi6QelN(SLOG=s~&%UKC~>fDX7i>b~uxdH@Q!9rjMs-3wx$l!oGbX%WJ_C0P2a5 zh3}WmgIcEK!_(n8F+Gj|fyP>JwTEoA4Ly~VORa&2J@R3Djy1)(T0f#Hj5!u|b_6;4 zblFnhXTR#jq!hQ_)IzbG2wt?%aHb(7JmYs=Sqb(O0^Z`HPR1Nk)StlYYf5q?oT+1 z%)8HVe}Jf3HTqG`ybvfM%32)DVCvBWG-<7q4NPNFC6@kKxCz@&!oh9**i-o6>oKlA z?cy>%5e~c&Nz)b=SmJDRJbGh1Se0n8^>N>?`rVq>SYEXRSE$vN=?DF^(t|~qlWZ`( z-OsnAi;MW(X!s^+YK7_3t85UWWi;W)_GT$>t%3;hyvRfux{WQ&j2HMI&gcXdCJ=>i ztDgQk+?1Mj(VPc;`lF2KS-wa~W-SUn21d4wjPSb2T1;)}hH8e}`Y&jaBBG;tu6W*J z)R!ukyW(BW4Ag_Nsb*DX?ORbDK?twd*ouuDa5t`RUZ6_Z8Rp-jCsT zFy7rsO$`5}S+*u9`IK4_)u0_Iv*=Q(Uw@C87($j-NEFdv)%%@IZjtJH^-HKegA2He z>MeE}s*xeWFva?9o2f>+{>yRc$G{llGI;`UwP|m{8FEnlGl^kWG}jMUt4ElySN?Od z;IPN97~mu)|EE7{P(D@1Rk7c32xWbQ*uI75gdE`<)Czzv%3)SKcJo(~dZR$=uIl9# z5Q+B}F`}~V*1k{!>S$M813hMJPZuUjPEiP{pJK-u>CV0O>_JOX&hqE+R_ax?9e$NM z@Nrg$4{bJE%jxa}H2w4OXhL&}gY+CJZ|)RSlqcC;vnpP^+*o}KnUNwPh0Qq;wA?do)99?9&JLEli#ZDm=`2*dMSdtOzbXIU}eBNI+^UruzI8JP*u7 z>-SkY_QD=b+Qz%~w}i5pG?tkqe=2F&63PU$FwPWO6zlqtm93iq|h#8SoE)Ar7-I=q`Yp)iKSQp;-!SY;n>pn^0 zz3|jZYm2@iNhdpfSPP;ea2vjy?S1r9_Y*hHH2xw!?Noy8aa{6@cDp0~nh`}wft#OkEx`$saa?*-d+iB{r)l!x%AEQdz} zS1j>gea!y_c>h6tx{^SXdj0ON9Y%eL*g@%)d!_kD`mdOK5jFb@O=G5j`7rHn=f1CA z+G4CQ&=*07fhN%*ToGyrv;rpadP7YE#={Xsl7vIoCL7J&7`xE7Y z=9d(7^ugzFFUqFRV?{J1AQ>7|>#`?D1&$u!($vAA_A(O(^S_jrfV}+cFW7O$~#=sz>^nT{PeEJyRR^o<^+en^j?UWCev5 z9vs`U4IeEArD|sfTSP7GNv&_5lkk}bE9pmSX26Hd|Dhv~N3#g_g*NnHz52^Bzt>UR zsO&h&5xRWxy`9W$OKB`>{db~)9c1wkHi@`4Ui;@3B~_7J$#gu18$bQ?(tkg|;Rynv zv@<$1s6hZls0N+HY&I8a>hhBsKxH0+_Nf~Tk{BEMHryfWJFdguw1O1tt@;FOZzY|j z2vI4AF(;)xGv-xBY=OM@2+809LN=_E$g6~xJaP1Af})gIDhZw>2Bg7?cyqzH$TT_* z=`SOsanR~eHVa{=u&tS*PYKdubeT@7$ZYo{KL%;Ya`zNW4Ze(~(MSTLiN`igMbTIz zM2oUwt*cXANY1{fVJOPt|5X5T)yR4+b(`6Hv; z*7`(TK{JGdz+x{$UrkM`D4RZ`;hy{nye##ZZcr<>Vo4ezxN117!+^h*ctuRfQBZ)@ z`vHYlx@PliEgk!{XZW(dM6eEMjSstl3jJrMxKCMZtb}*#uVceVDtx*A$F9bZfY(mF zA71w&XCMb-7g408V5^gosxwPVNwdW2Ceoqa9nawCZU-jEb5)usOj8S#K18+XfC|ls zHwL{;WU=Fk+%c-gK)YqlUOYYYL)0&I;vH%mPd)~XcVGC!cpfkR>Tuhi0Z%KK0hftx zrX=~?3?lP~D8&3?ZklR-AlG*f>c+#LP@2Q9%y+XnXk3-47gAzL5un%tS}Z8JKI>9a<5=u!cPhE=r3V;=5hSb#~8 zk_^+sOxcw62bpG4Yox3ztkbz$c^=-85A=~B1BJ0yye{ax-TU(mLo6vcR=tswPrv#u zmyBE*-nMRKL>;ysnXV`{7&g?@xRO%JG^YDq>QPTx*K#p?gl20@1&RD>z`*&m98VqC zjU9E$pF}{lY>kU!Q-E2!2i6<@rX60|-S|wiNKm)&{e%jKplXiIe1=eBRQ~FKkEL+~47HDv`rk>-_o@T5J}Sn}5{~I)60ynC4r=x1 zS<8)KPjxIJ=+Ggb-Yb&z+`nK~6jIH4UXyERST8J_F3f{UuNz(+f=)6kz>C{J^$i^7 z#@%Q_OV64o_k#>sQ;3wj3-5a?az97e;#qOfG~^!clq&r+e%s-o^V zm&nX4Yo=e?qdwY^C{;`J60z96(f!uRrsmN-2$4`t$h9QwDLZ^-hD-wbl`m?Y2fK2P zmBE&gRlC!V6Dj%rV6L^#^I#SM=d(S(u;ob`UTcqhh5L2D9$Duef~98gOu~?CI~Au>Msn`9*+Ym-&Fg5&-32eu$cmsM9qd2)gl< zB?1j6f+-9yw_Cq#Fe!jy2$eK>PwS`5EiQqFzZyJlILE);(e;;Qjg%ld8Eq`Lw1+> z_*+tE5I=DONx7T8JATj~R9z){snEoL5aEX7Ua%>?`ilxpTXzrs9qtL)RWo8pNN|W# zBNe|zAae8xjX>hnW>+jZ;*C@bOm6aA>^1tmkhSliaKsWd0oy8mfjVYYh%ww;R604b zd+hpL@(`fJMwi*JLhM4<#fMbFH*kj_1@B9vW1Qmj#3j$GJ$0kfxcFJF4 z*ANfNVuCd9xV<`BPhn2KOHmZkg+4CoRfNwVp_e8T5MLqb?&nzPG8$W@e!}Dxu`reo zxm(ZjYYale^v7s~?p^le-u?JZCC4*CHT7H`emcAZ;2hTT<6#Xz0N!d4loQf*MR?yP z1(e2rP!b|-o|zy$D>?{QI z1?&+YZXJbTfg7SMyj_J!(z7g?^;q_^t~5Wf?fdkstT$1m!4Ayqsa7Bss{CtidMp7k zu%r(Hj}k+TMq~c^1bGl%v_e+)Oxe|V<(kR!QbGBcPjb55E3$!sjAP8xd06e^85V)@ zCDlpWFr!Q>)#G{t(|OOjKlJ=Cp-*zJ#ec~0GT2cjFmJ>@96O9Sb2F>svqnArB{JL% z|GqE6*KzLGc?J`{YhbKMSjh(a`lwXusku+W!*#p#BHdBgRr9(qTFILLfA>0fCC`9d zv$+hXNN!RAJ7hJQL8E6|xP*p;F@hvx61_r3N|M|~-5$&d%l2a+0ECD zG3ftQ78++7lu!NFP7R@;9GZ|2j&}_;%LS-;8{vI5sW6Mt=-GOvK1E_@8DqvnHh|C1 z-_WIlfH6;C-}wXya_~15x`Tl>??{z@4oR2#Gh-wnrsI`ncn%w%Bx-KC$UCwVo@P39cn- z;nYd&uQXA9KnlfH@q7Zjyz=jknkFVRSv#!f>KxKG6EniYGMx1Vb5dx~$_u-skPN`) zwX3lD8t_uK*~YQ+Bh_7P7EYcHGHPqd2cCnUcn(XdS$jyLpXax@bhAdm4n zs#{~jc?9eBt|8)$3cvVO!!3VlmG$OTKWe~xJuDR8iwkfZS7rs<1LwS{QMU64Q4 z?gApW9~~u}HO$I!p2OEe880hK!9u%{iym~=^WurZmDPxibjw#-oIX-`RUN zX6VsDd0_R#6k+TI;rQ>8kJ*%#S80+>aRzO?OgKB z^qWPBvDBU_+Dx-dpoo?j+A0wzPRYTCr*{CzM6i3J!t~YRHZT$@*_SPk(Bb{(EHu*Q zsaGv{CK!_f*^B?-!Y}}*BmDM+378=M=cWJtGl&`rM35x0@E#{YXn3s5dsqpncL^h!HCar922~cqjDTtUPDs{=E{VTZQkctb z9+VC)Svu4oV(qp3e{6kqR99`YwuE#y(nvS_=F(|h>6A{TyF;W!y1Pr@ z+q|#(Jm-Apzs+VZ*5aPIXRf)XBozp|?FoC%TZ6H(5fhf06MIGl2@EE_FA0sVwFC+;!+kGL5_`IGXwiIq%k+3Qz z)Cf5suUW*Syz^BJ61%UbXeLr|^8j@4G!miW0c8kX*EY|nY@i3h1TTiR4nd7)sS#=* z`}ounY&J7N^d@e2{dG zBP$+62;FzhIlwxri3+BLsRhbpi5Zl@=l~0kuL~KyEK7{E$_?5dmQNP9QuIGecwg7e zy@wqGc<=wxkHFG_U2J*nUtm@qf8Gqi06;s z)4S$Zoo%p(5BDmAldSyXQKth1ON{49Vw7)6ynKcBKiA zB|nV-QLb#T!Qy~{cmsUXJaGaKKPPk&2&AB77)__{TEay0&qI|=P=rAmI7TIz9JGe4 zu(_Y=Vh;48i8~eJ4Rf6mEIGK$rK!UgyjQ2^W%9Nu7b& zK!cnX4gLh#CB`XV?b7>QngQKVB`CgXLk&BMk|B7lSwzi2DipnT`ON`jwyOxgc-)5gm(T%ZKE8!BH+FMo+^H@Oj@PTcCK=W5VO7F)+i+rxmQ+O6rfn^&68H z?66xrR8Tzoy~VV1mK3ncjk6cuKm3ET{)M~La(wwCO|R14i=)nmzi_a9#a{-q?^z)) zVH~=rvUWIO84B^2&4*|mu``bN&CeG`2Kd)MF~#o7u57NcP^NyAMJ5;D&v@Tp1g*dg zLrvN6Rw8XBcPYw68CON@%{__^=mw zi~r09s;yTv{&Tq5g+1YHg=lh~Eg>4l$ z)LN##pY9csN{KxO;Um2ok?+ye`aBVc#YgU7;T}ddes~z+Vux2Vco9~cA{+*tB7{dj z@HO{7yJLp5#UI7^qwNgegJ_iym61UaiY}y$6fBZ8eF0a8!d#)|foLiW>uiU}EuR%B zmA=L+hQnl1QZQmLa}5x?L>_E@nAjw(j5wo9OofY0fgAM2P}1{{RrR;SFf<(S{jmFi z>k@W?#beBnIEX@0nje;9BFYWc=BB<%Xt!-0=c_qF8XJa#D!^%K;Q9%fz7$l|27T8C z6zd)A%RrEF?iI7NL!bhuQQ%jPfK!WiUGKjJH3tXy{;cM~pAb^|Z9IHoOn}T|m>%q3 zsP{h}?5x4>?}lpbb^)s{C@#nnrw6N@{R*_lbfE}cb!}8*p(IO9hRzsYU=t^-387vB!8<-R@ z4TI}VoL_*VeH(%%iA76cY@I0S3Vm~N8rsZ(MZ=Y*3O*I6E4=2Qk?^pd!*wG}WZUy; zlG3W>wKOjwuyacv);{lu_!{F(aj*-jA`58c9u(oyIjPSm8Eg#jYRJ7;q%e{>e2qgZ zq~#ynw|raC4!?plOHmAicjz!^bZTIhe`P+iwLai&wV>b3ac)3CYKED2yMbE}`!0-r zBUdsp0A~|b`OO8iFvYwjchmrwB^>&e14<~w zs74cSgN=K|5J&;3FhU#yn4v0{3on3xO$1oXUxlR|4;kghy@^Y?E|;> z9HiB1z{UTJsX6wQ0a7Pc(|~*!izGj4rT?{_aZv`}*NAMhNuL}d(@i&D-^wS=}k06^;<0xw!5=jq`K*NHB)Pl8sPU|1w>vmRcal{BIVyO)v zL1!j6#0<hY~+zn;iInQ4WzKU&q{X`ed7jbH$n-IJ zA@nZ5LO0I19}U_OemhGgdWzo36q}IooAbC6H*mjG$va2Q0x*U|0~DumP&Ej^vGrWn z_GRm(%v*34!m_U_%v(%4q?$fe|5XBZUtb`r2R7MI@CO@o=DpMvrH!W5^ zC6;0G90-{&`0YWk1Ji#5egTzz4=WvGTdC;RM|B~*m)3*Cv0*x7hH7slDRFij9l6I} z+f&8h^SW7S5bh+AR^T=hpvqG{V~mh9ye28I8RIrVm+cH>MI}RPBnYRpd~MVButhw% zNwsJoXEr27TwfhU5*HaAts>pNKRh!J*;v>_jj`DSZ~vq2D|^Wq>&;2RQcWYJP-wuTyl&@2{& z`+gKls!k?6^hBzd3{sSiGb+L6Tnp6AV^z#n zV@VN=%J9^HZcNRjeiCKF6WL3Z@?yOVl(08f(aY~*5T-NwRKC%@UcCa&hW#R*VXi^hD@~Y zpsNyQbp@C-G*Hq$p)PwMBV{D3!OhZOe$JBE&QB#p^Ut;3y6ir#i{$nl3B&cA!k$EMvRZCVpLMi;E*^loLu3g zIEe{6+Tv=R&pa83ZD9XSHBafz~9OuyOGP--v`qT}NX%7iB1I@`5C3Wc$t24(T~zGU=XmE_pJ?AH_6p9|r_R zvV^vs(alwd!WL^!jqD<;B^ra!^i>OSr-w*`C9$COTRE&pH_6T#d6mZ7t$t$0Es1iq zg_e=}d<_5RTS?w*qpNowK zwXG(tt)tA8!c5e(T5*`X9aNb|UNB`*8YwLJ@!bpMGZR!*mlN7IblA14M57}*A58Tk z3<(F_UGK>0yF%NQpFunL>{X`@RIP22O_O4u0S*fMh17c`mLh5p3i~pY?>f?)x2I!Z z?MAO6F{0;;_(*sI=oDsLD(1f`#Y}JoS`uw{vOSK&*EKC33$N{0JN+;RiopJ2kfjV? z{dnN^M)Q=4>kCcu2uIO7 zZ$?6fevF%f4zv0p=a1NJ|7(h^jB7~w_gz`Ts!dAxI& zQp8_W8rmrh15gaKxrOb2mBC65$gNhe8IJv3e)z9qDAMW$*xeot_6Ky;U!>Likf)8F zK(|yYWJ&8OvdpIKD3a|sl1*dNs)1r-iK;m9O5&RFruE5BFPGC!lJc4NYGsK>*Gsk^ z4GVNC=L78d;g$lF2Y(536{<5M@2qoe#TupM*iBqrps3JTkfal%E7T2*iX6%aLDh!F zCM+Ai? z#2SVc3UZPcguUU>$UNXdZd{IPNeqCcYGJ~fI87i``w-v{_uP{m`w?0*YnV_3RYN?M zB+Mo}`pCt43=$l%HysDpB@as!?-*<$+=!`wwNI9hpj@QrJj{!TndPe4Gk`1AG>onV z*=%#qs1}JMgbxprG+=cD3Vqg|8HSgN-_&DB4{?E6ct@y2r&is*a}K^tfNPMKOLbV* zI6psF9lD=nu|6!EPHY#BeDHjAutGL*;ezaHkc2(Fi;zCf~2#R_(6uDLVcC>oc`e?Zm>?T-Sj}!9ZGP=iE4`yzM(0wj4 zm59NPLq{DTm8x<4giP{Srbqr06I+6drT>ITG(%sZe4#@AcoGz}Nyg0J(_AXD-y!jQ z{|mNZ_r6{HKRbsyYheI=NDh+95;a&tUwv2udV$f=Kgo4T>w}}oPNeVFQM#h*&PU+d zl+B>(*>{-&-Nl27suJRP)a7qgrkZRY8ymFf@qqemq-^`(q$v($*naiZ#AVBX*o|!H zD7^1Tk+C?W3Z?#$+=FEzN@V(P3Qvqu7bz&}W_OH|3Bo~{XlO8E1dQI@KUkXsj3*2N zT8zTkCNYac^Xih7BQF*iA@Hpoe4B43S7M(ZKh==e>x=^1}8%{3$CNfT`{9=*qU2Px{DG|0xTc6 zB#>M<`*~7$c@lWot}Cxm344A>+U0a3YD-hvWMAMsQq6C=qCQgf*?IcAk$#8FTC}mC z2I{Igx49E#7h?moV)ZSWC(-l1bnm98HV4_?PHddde>uxLyK3HcfPYYik>;Y@=+?67 zUyRBDI@f!5Jggm&r9Q@J>Sqt&Wafc(q?w?1w)K{p`{&b=LenzlS>7tu1@T+7q+sFH zd(zhqkrmQKAVVo*kOq9{OL`4IH$rn~AlpQ-I{$$L0u_>oAEFgza{X0Wc(t9&F)~g{ zH5QTZH*4*1Z%aHQ6v{q+-T)M}UD3%P2YOcpPnzB6IV>>{c9wz6nKPDQ0ySh=V}#M} z_ull~0XCJ&k*n_MbVS~-fjs|dtQPtd_53J#E8s~>2UH>o8|*WHXT)}XN(+0#I{EIe zxPZxxu)By~MQ#-_;(rUw+8F%)rwN{br;YBP;P&4D(LfXO@*=_1P?-{S{`!T&dPwQq zh7!~?Zk+tG%jkG=eJprx5Jv2pC?kf4M-adRo*QC>R5!OuNS+-wL>&=fN}HT9ORXCF z87j?$xQQtOg$-3LoQ!%iO(%N6Sh;+x-o^pV2<~2VP%XiH8yAMkNz5#^A%s3<;1b1> zg`>9)O-VV|4&1G9#@FJTG;eTeDO4b(36%qF5E`N*lv1EkWSqHRceOPiem(KFtUSkO zsD(lyDX5t~T+sn9rPedpXkigtinDnZ>fo#{@L+lPhw>S1NtZ+|u(W9DRO9Ri| z!l1%}JOvuG28zt71ao~54>)cqJS3G;v0>fmy0eu-1rZM!g)$>O-aNvgUP~Tp&<4qV zm+C?yRr0@D=Pli?SQypJ55oGjABDAu#R4)t%S)--@T_Qi3^LZqvk0^V;lk};cr3=c zP+fnw0zN7fujYOv&S;=0qWjE|$;d~X z5C)BF}jjJZ~1i%B0AnVSY9HD>UQZrWfD*Y=0GxQ0-BVYBeGFL2Ck)lGqnNY&@i z;6D%1Xpp`Jfvrt)jLGn7YNiPoJ>3nPso0{*0Fon9qY`^)B0gW`@Ss3rw> zO3QP_?ILj@L5QZw$J9VI!5L52>n^#c0ZWveCKMmo}%;1D62$0!N945Y- z^b{S0OyzJTDu2a0N7jZDbJH=F=?ZpGKr=~T zU{=7fPkf*VPpXRE@Or9vp;FuU%{kI~5rW-Gj5lQE;mD}O4?&94-c>Y_V56=`Tu|?} zW)v}$v)#(gpouWAU=?Rd$gd!EiY?ZH;_wLa3HRP5XI@a=npIC913&V31lI^khwJFv zyPi`791&DfwDF772sZH}%#cvV59w<(6C!&8{cJvj%O3n{&1P=P|Ko=aAYh(E9XY76kt z$%FH(*+0#XtMM#(zJ5(3lx#NKdo(I%0=O^y5M#R*fTmCjVa*5yMC z6@(>nVg&qH62uNCskoF+?E?rdk;j67NERV$33&pEq^WiXJVq-;GzSXfj+?ES zb6)A2MZmKxyViv=h1ZZ09x_EBt4V`T^?i)EyA>dg3^87s_rUWH8dM*|#lwWTn$8l=Mq{A8J%LO1tha#ny zwaLZ>Seu%QapGg)l|xa7l9)&jB;yy#i4#h7YK4h>91?SJSiau(8tP_FP#^XVqb(lF zzEsD}W(x-3jzLiY)p5SS)cxwAmmG-2nL=1C)1lNF8i}yF=8Sm) z3A8L4oT{}9+Q#SNwVd%ldOIS!P!!D?D@0ivCk_pg7C0QoQ z$|tasSVFmE!UDb8dYx_*%t8WP&M9H@N!{?G7$@*&h0Gws1c@RY!8L*N{291fOibQ( zqxQgLaQNa83iC!hLM$|CUaoTDZQes1R~iN}<^jZ}32CV^Up(1GRpG`QY3fEAnq)t@ z05{FAuiX*)4QxX{E9G3X2PKNEl?gFJE00?^e-|%%ivz`w7-j%t%7R)zl9K)9K)5^X zJk11yB$RhH8uhKF^14G20+^R;R!#rzoRIY>uPU91z1r}oPT;5j2y{J7G*^Klv|u^= z*=;4pkf_0WyOL-j_p7&c)WdO|Q<0flsN~)I88__ItHIZl6|iPOSX)$sVjA%3#OP;+ zCIjPGb5u-96b^A=95u+bWf=B3dHtW@xq*rT4bzu)g<(OqEEsDN@BlN4_v~kgCI1=% zg+TH`c%$%R1Odw?{uAOYrM@II`;qmv{|*xUcO8Pp3*kM|T_8_deFQ5>v3SksCx`TD zB1hwUO=^?aE^XS7KQQ>uz@SF4HjBq)A^&MbLbH6PmKugK=b>Aj_(X6uEwf%Ny=mx; zNvu&{x+G(rae?*Qt;~kSxG`3^!T6pE?vlZogzZ>y%8ZsK2P_KElt~lsgd9w&%}pxM z%W+X%IH{at2$jWNyF+$8{q>}l=<$@jLjR?*h37GH+GeLRYtS8{rbI`~I@W>sxY?^0 zL42MvC{Gx=8+9=*t~CjMo;DMMlP68J*RI-=&|m(wkurjmSw;NfPlCbuKyP|8@@{W# z`+0O0SVIOa(6owk#+PUQdi3lh3wN_cilQQ&Y_5Yy==sW;EVZEO9!aKQ?epvA$=b~? zxV|v`K{Rk8FrmY8K3r>o`OCxBU{+I*Q(CVGj%Xjp;bxPn z5_%kki!u27#h4?n6PQ%Pk%t@uTZ^;iNb(_mvPQB-|znWHQCm_@T?N#B!zjCC;PDw1G~ik8ia(2BGc>E63*B!%YpI% zML^x4pTub>dCQ^XQ0`qY{myFosG_QHe~tqxSMp*iA{2rZ79Hib{lFU()p+*WXW~ls zT8tU1gjH5QxAR!5bj{#=QV^RG1u34mpXQgms2qEH$b*5~b%~!WxX6g9bonvnka>GO z!Zm|OsYqnigVMX|qT8f!8e>{LMU>GR@VE&SCmjPHI*we*Ns35oDWYa$<6!KgvBMjo zS+GD)qLOW*W`hKQT#ng})cw5K>Y?^Rw?jhP=j-+?1}t6KROq>4lWCrT4|IuPr$)QS zvsI_?Uxj9cm%JVecTZSQE75Hsry`m-d4C2EV4fZ_z8e`wU3FdJ;<@x7!i z46a}h?u(vTUJJccywS@6vP$-DUCjIwLCApL2TeD69xm4Zego7_c-gYl$B6s=o%8u` z)QA8J=7lp7%L@}k0xlwNPnAGo*@y$S6G-&Tn%pw*&Ap4RqgF_5h|w@Wa*6hppV8dDjeAVEjL zJy#Y(v&KV@8haA?95FH(Z4;AIL=y)X1W#xBs|u_kAN9u%=J2+z@mw;~#C$4@TzyJ0 zb_%_o%%~DmORw%X2{$AjXtjretu&kI*>@HgCkZQD;RwRUC=pDRh`H1j44j2Z8*;t# zUpQLVzPBFf*5cLa2YiBHx{dKi-C*daFV3c5GjT|-9TExAp-AU;nx}|;R~Bt8m0w-r zq9$J9-{{YArha|cF#Hppm}KqF&kMDVne@FfM#OyWriDGQ`*O;KoL)SHnfVnIbLe?2 z^pP!eAThXh;LBZ);FYuyft*bRNMSapioLmHUwGU$^6hL>cS#qJiSL@`^VLs9y^u^L z6Ey<-7~yr-$u#ptzqSYKFKSI5$O?HsjgJb9?}Qgz4#kp*=82N_Lyvxf%*aCt6XU4R zYl7F?6>kbectT$zNfz}f!tCk6(dUc2imXZKpS6y5EhMtnNppBCIpYBFaoLBC9&u;5iO1n&-9g8b)Q%D6r#B?D|53 ziaza#Fa#zDMPgWtHc`Cdr|fcEuHHr-#`^n)MVDO|5NhEpzi03MjrYWH+lEUhTWpqi znTwtEXjyDJp;NWNPNott9jv^4j=dLW1$2W&hOq&02fCa`9*#tO?04b5q9 zjoS6>MqJzcrGu!2u?qJqml*`M&R<9Hs-RZ9G-~)T( zX)!P215E(<1iA=7N$BAdxWhUd$rLv`768t$j;oUj@o&EuieJM^p=%|d@#nu9QSJG{ zINVkIGqCqh5b@$33_Q>#ClnfY9Oo*<3<@D{`P~5$xgOGAyBiUII!vR&xSED3ThlE( zmO~?TZO0JVt-4_`j6gUzBcuJv`C;v*!#qJ{T1_Z8BkL{s$XlMN@%V*>Vod9uN}$4Z zaxY_i2R3`)dE#e=%TQE-c`{VzoJJa%-)J8LaZVoi^rl8zwJ)3A_MoOuS(MuWywP5cLE{ljA4|96AWBG2okrL9$j#%T~4@;-+{P1hj3f(tEkG5+PD=X?0 zpB^VqXzQXHbIEw8Kzr{Z&hZJ}r6*Kjw>nKHz5C9?@XVQtO%TRD-njWLQ9knZrxw}# ziwG3&CwNoxbjkI*D3iRE$Nbr=XG!8kcwovx&*=>E6DK}F;o3yUe#rCffXCWU!sB2s z_xRba+SQ*~0RCg^-BTkCiRvR&$3|f#3o};HVo5D)yEj2tmn%;r5vTWu&N$R>r|R^d zS$`eoJS(xp;fztBdA|ey8fYQ8U%PP-B|2;sU)7qayirvj?zrx*T|x@6iLpZgSAZj z%8!kB^19=q(WkPaD_Ij9lK2%EjNmJ9ZFygyi1r+M^!oP;JaOMS3cTx14Te9Tp!mM} z^LQiwDsA^?9Eb#HLwm(CCC<-|<)se!zJ@fZVrq_Z_+u?^WTLahr-OEr+%T4IC%R7< zAg;Ifo42jJ@t$-$WXPojF{)nsoL6_}?p7)JmA2RAOP=!9kb9&#g@(4Vd%MAH-s)+7 z;i=>$sWD02W@umbbkc&uT`%j%5WWm4QBT4IEG+-RCWjD1T-K5ew zk6Z8aiq+kY>?m>`OtG2_@m`XX9ntnp`9<>;^1Zp9Zbllg+~q=QP-dWi+vq0i_@v{n zkV_}ZFm9hHW9`sb+PdhIYc1zb#wCf+iWGtLUZmmzajt=&~v^^M3s=ZSsY!f zKt5BaQ>j?TZb}*iQwsz7M4OWb!S$@(( zd8jUSf;)`hDg$i2*R+rJ&?>`?ur=^=51PfpirbQaI{)E*7}S5n@p9Hb$vTlUf0_s* z{yBAI2|y&s1KDXL|CQOl1cEY;0Pcx1V#Z6U2J06>;VXMNso;CJ5+F$b?)%P2?i;kh zt)gwOI&IA1;(E0A?rk49`MKe8s39hwp4xvP4GG7qn+*$sxOos6b9}J3rEZ!#`Ai+oZq~y}}=Fucy1B+sdr0(&n25kCoBgd1t!l$UpsKS}HM= z4vByWp=WJP&(*V)i2rU^GA#_Lr>nZkxzPS;#DgY3SwE#Fqhaex2ZC>wbV~KXxiL2_ zkd$Z~W$XS-av_94zDLJum`s&`7Bro*@}PV(O@%r(AYEACz4GvM)3bJFk8^kuRGF<( zX8xv^m5{Wdpb|I#cB2lNt#4FRFKiYayMnPjG%p3_>-Y`-SILU-lr5Cyfnvo*%=Sv2 zW7;9xM;9AGASN8as+ zNZ|jGOmzgLYtJzQzwE_nTG6u~@AU%^e~Q3LJU_)eQT@8p|0T9doe)=t=j^0c zSv~8>KtE3^-3G;&+fqC%viF|e=jo&!p~na?Pp!AZND(+>fb(a3S~al!7Go38=eIC266T2_7taC_r(FMq&63RxR?UIUy& z`>npn9jW9(8k(h2x~)FxYyzc1-s#p!kmyyi=lXg5nESw)kOpZPVdy#05PEo9S^D`G zotEcY9TpmOh8xQ3iU;a=4yB~;7YzNo4h!`T=x#GMSl`M{VAZ#V>o=7v7xFX~YB1*a zuUunhIAZQ+bJsijyFQ%j#Z#zb(-l_DE0>F%ldRuH3%kT_*Y$3PbLd2Q_2SC>^tX5TdbQi zRA-zqE_L>zu!6j)J2y@h_7ZhQy+C+V$UP}i?)jw{DgRm%l$c%9k*tiQ5(SAF*bS z(HksJVeA1~6F^}SkHE^Mz3F26$htKRaT*XATmH%+cT#FylK=t{-;<=KD>up5x6^oGl;~Lltxo}^4nGUTyeU} zLM)ku*Ud8M=!{I3a#*~;{Q7PuC*qDyS|rEkS~Js8p*A)rY^RExbVP*R<$c($yT{HE zt&`Q=XJwzB?>ir(96Op)g~o2MSBML~vVQ%%uT}neZ1O~l{0PNYkO$mmd@TTHi7eoh0(#<8O?6RVCMWR$5E>S7iUVJbp4T zV1|dtH!>{9#uNa3T=Oqw&8ImI(n+^cW5U6+o`1yZ6PDFg3(S`37n{P#hq;O+PN5{b zk1rbcyde8Ertm~DOgE84va{lSKYe;HnitWVqW^fWzP8hGD~1tDH~VWw|M6Y*rV-fQ z*@na!egOMU+hy3*J;Fm@YdWi*Hlh3d&~&wr>!%9Q1UrXw?n5GiKy1crnbx$RmFMH7 z(<@tV+`?=fqv`y7V^ew>0aO7Mmu#^!%1y=(o(-)LQm$o^YMUH;Gnnqt5jl>0QakR-?EGR?;%*kh5~K zLTo$kxHDV%!)#k0iJs4=?I@H+Y{ppntPZtAEApQ?o6Q8=g z;F_hoNjNZ=M8Npn%FmO6tEo$$<*TwLvrIN*SU2O(T`P%Xr45qCS7q4`E;xBUj?L*0 zaD30@wa~@im`%|v&}dtV;|T>z8{wU9;F%U`E+@ys_A+^Ebhb* zw>!6Xb)c?fcisjf*ZdyGmw!ia7L3=%+ER!yFyv?spH2gsB+H~Ro*XJqIK2h#*T#TC zkGjCEpGgv(lgHy^$}aj%xyOh)2kp45Nv;J=?yfA$RC%i5-p7tdaS!e=vr$F=SY&eo z=>#EHnp~@9j!1{8>0$-ekBlZ*(X3VEG}r<;=`C}r4$TYUmHY3G&031_JU_lOOg2_q zPhKfAong~eyVu7(+V?}!4f%+yufJReHw+7oVj=eFDC22J_B$L$0CBZFQaSOO`AhSpJt6ln%6G?LVpv{}o;JH`A4NFPW%)!8d^6@1Qo!Jb zPSFjEY=g2Q`zqrT^CU01Go|Ct@h<6~op_{6^ZAg;sR=lH5~97vntX^`BoFD4uM(q5 z=4k7f3bnMeW(G3(By>F${DA-al;l6K{P8&!xdqDzP~`)OPh&m@HqSbm9m{p3f0VN^ z>Q#6Fi^`NGbk8q2ml@0xP~9*lA(yjzv(aI z{B|jM?n#~6>qI}+@9xUWQ>UQITlA)Qa=dq=ma=B+LEl?H%^{m6V_fX^c%jbD)d_!# zWQ4W@SjS>45FtQGS(X24Q=#;EH(vsXRwr88GbCo5GgwGU=@W>j6AUh< z&V=UiF1sx|(4)y0M#=A;8Y)`~GE-K(%P-t^rJDDw_{~|u$v@<0ch;0!K}TDiwmD@Q z5l%P`@iY27>)zg{bi}K&s%?LYI9=F~SUY>tyVW^ZN!grchL4Ksc<&~Wd&P-WIF7@X zGtoVQ%}9nJm*;!6-dA_JKQlsw%=JWT>RelIg1dJy(H<*N5GkScur_7L>ptuac%+`Q zGy94rotZd0lUXIgaM0{ST}Zu&dbrq?_XY#rLHR>z*%_Tqd9jMv%reX6dMTYhE5l{M zn~zZk{icWCLg#RZVc)w^lD~IDuSynD(@LJvyM%i-n=Ul?Vk7S6F~KpQ4B@v-G06^6 z%CIK?Dwn=2VgQ1zf3#h$N?43umQ}8J^V`|`$q@sw7W*1?jrf0s*m;Zs;IU&~D;L1- z{BN-WKFt$HhUY`1e`h7CAjM(>WqaR)vs(IrbPqCYa@cz>(lYe9dvs?;i7RJ%Y$?XL z&2M=6Rrd*`_~Y&KF2CuRL{>7&mG{a_wxukc6s-;AOH{&8KI*yV$?~<3kv1tP$oG7V zPaVI$>FJtMas@1Y;pv73R;i~;tgFjpnMu?^o(UI0R1YXjO5oAwnR&EGC5$KQv`yOI z{nr`gFjjrl{9?d{SoFI{PW1{!uS~k^ow}C5@)I)hte`0i4DY9ioRBJ?$8tLcKPu8j z-a?dz_$2T1IUjUF%b0CvJ5P+54@sufGFKAneY9h(v+o&n*<47Z$`>=_-7xBg)glx| zzP0Mj-j^M!H$GT_u7}5@Lz6Oa`f7~aWK@>>%r~r(lMHD&#`H$4KDO*HSuW(apNozS zeL!g?9L5Dl2{kgoLy!cwDPe{J&ye^eXGVJREsxYk-umZ-S>J!noSzM4CorDm5w=d? zw*<{l0STJ%F{;7;{}SwcM7N+tA2X|a(5C|dehFC|fOTDypfh$9$wb`ZB~58r@9AB| z1>a0^b5@670@E=iB3i+^1=jYn9|Eg5G6nV&`FEa#?#uF2WJBMDmCPW_4E|E0;>0zQCpK9?D7wbP*p?UrY(|dhwwwMRU5vMd5C=?c~0wZnG`;IN`SM8})seXp4q{7Ya{z6pDK7=blyyt{mkat+uuN*`B(hlGq|OpzVUf?=8^TRTpi zy|JHnt1`ydgtD2RUDGN_j!Zx+qR9CsByDBBav#3036d;{8e{xEf4)W)Hwml8mf$f5!d zjQ`PRIUDBS#qj_FIAjOT`tbU~{N}II{^zuRMEVNEZZ$ff5Mg`-HD3C6HlhGVCz-M( z|3R{cg?17TCOuko8gD3_V?0~MY?69_5n3utGq8SeUEehBlkre-dpTrQwrJ`=Oitj& zh6FSGgzC3Rb%FbQrUZ?P!-cw2#^?KqGf#eROz(>;+wO(uGwEmrtf$?WXQp4LAuGrp zX#Pmo$lcgzv7XVmftl~9;6&4HDU>9^$AzilQ%#UfO8B48kiBuX^}SK$_1fT`FP|SB zZ;O3?nM=t`pN1JdZl$asi+rZ$+?Z-Fcw5XG{w+oTw{bDQ2@6DO8J++@a3HoHx)b9} zlD*tt@y22RRwJewQa>C{pGxR^<026eKu3sWi87DtSyY2dBEW@RlA4me>EMRVSAmt( zTH;{T4}8AA$mw^iY)osu{V{dRhivz4xk2!I~gXo<>hS@-0AJ?@Wck$wB#FowU9}8}Y%v2*}izS9d z2qHVfSeH8SQ4iUfnSl{gRE;+tk5r3^OG@U*CCKI!!{pdfytH;Zd(>3CwGmWuK$m z)FzZB6Tf1cGnkyi>8ry>7oif2TzlZfh!1rb%b>-Z61A5nBX4uPV>u>|K~qJoDO*tCAo4b#&B~zmD0E-orBWYYv82(LdYhs5ueA6B!FAem?OAEXJYeg zz2Kd9i1Np6hmds^KMRm~ZT~T3t1Q4p(xK=E_<(-`#y^j)34$5OCTjufKQ|>2Asf>G zXvQv}dwmTCt6@@Vntbo+bvv&OE?Y(?8OAb#5b}6>HeH;XUm>UxCid`!=#nSgqO=HI zUSYgj^n6U?T%IK@__2!#>O`tU%9=uoYB({CDrwi)ZIDGt%ap>QD9?yD9Bm?u1w2Y& zLU?;r;Ei$I?oG!NMWkvKt3a-!6LAXFv|6k12zOF-{!+K|J)gVR8$Is_xztSEzzfi4 znKN#CerF=N?gQI)#JdHzq+aowvOs~B&Ae7gW3#-t>y5W5o=&+9ZUPE9yfgJ-q6L2O z#NdSF>JlSJb{@dXq635;{ zTxwzPYc{=R4gTL43D9U@lIG$s&k!Ghh53Q8Zj#CHS1d}j)(Bb>;I=a&Zy3b(t-0UD z1z%vf72XBFNa&BplEqu>e+$)Bzq7V4D#q~p;QebVfAV2k0Csv$bbz(h^NzRG^+~tY z=jl-MR@^^&i5o5O$wsf&v1|`1sVy&`Q*TId!P`CFkwr=vEAq?C!btjFL}npV@}cAX z!-=qhurf%6x#1;c&8Q? zINNX6FPIV)H?gV5#W(3BOvNdORxg`}o`=0-X`k@K(R=AU*?zhFSTiT(^Gi@|OF7Bz0h1Q?b3926#v1;&18)q% zd-#)*G2nx(zuV+K2~?=kp4b^G`>pazf}H{?Uyd@gfp8B|A)D|2kdmDW_@F2UxtIXV zAp_vUm%XkJf2l#Hf9o5A8U}+4gCvYOEPOnTBO{DpPquc?B6Bi{iA-wPxhhWqsAk~- zH%ZDmI|sdN(1K`!p8jKeEcJZ5Xr5nmbmp6ZdfCKbu^Lg5|b&1nXx>A6Ab59R$`1Mx}JnM zIpuM15zh^dnLz%7W9i!uwl?`?di)5{+q%>i9*kBaVFo#xQ8(c zMupNTv&wYA;7tkAr3lZthDE<*zpr#qMf>uz&KucO?uZ&KFichcS=Y z3Y`C*Sv57}Vk`ez!%z{g?kwzIr1{U0g%D3}uP8XQQKkY1dWiJ@XnX6ZDBGxSR1uXB zLArD3Qo2EU=%E{tZs|r*!U5?Vy1N^s6ancjDd`xH5)cqL_xOm9?{}WH&inmw)^f21 zoV8}IeeL}1-`+PuA3Z1a8plA^9LJMP%z3S)tgudB&yk(A=BLNkCvs3JM(?6!ztC2D z7qpBfE;i;3y1MD~!uw}VM2fsp{4uGSA2KM&$tV)i2aC=l!YMix#Ll*K2(M3)zaIGr zV6eZuv`ds@sM->w7*gkrF$G1*ejMJRT)AR1 zTS4MUMvaQ4{QOPN=4;?yi7NRFj}xo@L_&a=FwKZ@sF*s;j8?x-(`LJ-Eb~(l3Rf!l}lx-t!fwvRQ)_ z^soYyiw;gRVFApT!Y)}6ftOcIU8g)_pLBu`ht+~ISKb(V9B#!#GMPZw_&1(s{UCnn zC=#wZmd@XleoR2}~e({8J^o$SFfPYzhOsD8vVR}*!YlF1<>uRyQ*4oX9lF$ZPMb;2v_{kw8)`(ztuUT6I z8b87&Y@u9*-Q=lO{xvm*Rlws@ky4H2`7HuNJ;gnz2B-C^I#1t53ZO7(e~(f#pzz74 zW&<2;X6n-{s_o4DR^oxvF>u^Kp$ptc|iw!&R%Gvbh`H%}8E-lrnM%(^wM zq1RpXZZ48*Dj`us#f0|SH1JDoTsO?~=bf06v9KQRCIg}m!e)4*4UNbdg&faT(>kct zWGn*vyhh)sFYcN`=#rQ<^QNEJM1Aqn4JHQR&?eENr)H6GV7QoPD-|{3vZ}q$rZY_V z6sHb@I)O!?0W|gQ+zBfL@o_b+;@H0msdgR&@{*U|V@SEbgi49y>yannkc1ft^Xg{}Jwi+J6n z+*sEnZi=1rL-nb9t#g%zYQBbKiL%3656&~EXVE!(gnhxncjg6GR4lk+SaLkhQyI42 zwNUm+L&I6r)-y_qpGj>VUp!@sHl;EnPLh*y>Y~#582@&(waqdmAhR`x)`B*KBW9!O zJI#}V&C`!pj=bO3nNOsrcV<&rxq0q*gwBi=h6c!*iWY{KgqWT}L`qyz+#36t2 zAq;)G@4%pUw%eXZ`GOYEO6E$HMt)=#@VKfBt71V>?|JpK%)A+-ve~i-Z?MK$!?8h1 zv^z?J{0xinSkphTNiKz0oUxqC3x_xDtBKDZ}win6d*=S!4A95|@LfwWnn zIH}ARn?F=y__`xGP!)$Hc=&Nz^;^`;9&X#*C9ZbI6}kBC43@h1lFWeyrVVKfGRJY068W8&m9~(9U2T)QinOyu<92LLp|t)4m$H`Ve9oM2w#j9GZ)4d*ZBd=_QDx-23JqnqC6l$~>=CEJW1yAEsJFO~cXuy}ylWtO+I z_KfN@xmpcKO-cj-LA$;bFTuPq|5+@{i;ldymkvz>Up8+ruXcNE%cH%LM!(5b8o%KM z`{yQ8f0_Ty%N;C#o@>A7LO*>hf?L7U+k=1$_;TiO`&J3wZ407prjE$w1SN-(2jab0 zIV|E^IXHQR0iZ@2vl9xl2hl09GyRnVt6Wm}z;313d4?pA60<3cA;Hx2QMd02h=MU~ zhZ-!cc1dA$B5T+~t0cVYZ@)M@<(Tng&@IP9#GE*LM@l!ci$P7bKPZP=TWrKQqE`os z4}%J|VJ4jGFB||F+N*X=;B*PV>APt-hr!75a@(WUAyK~1wK zMjdw5zgz1-4IV3|H#1YN90{HLo>|3mlcD_O1w-N-aE?OHfn>=0v8TY9;00ZHae#9M`ERj5b+dveYV0=W@O*_Zi2Aez4$XtC{_T z#&n6e`9&vKP-&=UKAsMRLB%r@OZfME1XcsB2q zx`5MRN&pEjW=E>d4Uwop5b5fDzuj5}LVeyFU;&uxFBjl%M>Tx1sB?Lqvm4xQi!OL_ z)3N(E=D%vU$MQ(WG?~E=-vP1;F}h`M;BkujG+=eVemh=w#TpKU`Auiv(mwq*>z52X ztY4zddC#7P2B|*KWhone(Z|QeX+}QU^EzTM-Z?8FJJTu;9uCSlV^PSr0QiX)0x3dc zFF)eDwO*Prq8BvZP5W#qHe-AYeqUDEEZ-x%bJS#i+W4-8bY>?(FmM7dcc*1%X3nU@AV`nMSMgYm!XCgfX#Hnfp=IPVC+t?o z9)o~Fn_~e8v|@fQIrujQ2-Idavh5DjWD(u~Qx34ZuO(@9FgWFqxu-L>quHbTtTp~U$AnDthtwXSsYc45h!f4N8VOy4OUq1A$ z7Nv?*wYzE}TY?R;gXg^++5!`g^L&CyV|G`SEn}LBsPKdMyMRpfsz^-UwJPVlBdGWq zak4CK@9K#*XS9Oph^%9l0?5;fBdrs-t*n_IK2sP6xei^Dji;MQ4S6>sh~rG4UL{A_ z;b^AsWa)EKoxWOaT0@!kR#0+6pM+*$YI1jDTN>S0C(kvr-OHB-39fV3gRn7MldZ_y-ib>xG)*$1{{S8O@$z#()iuaxXmQR9gAg zf7mxq0FZ?S2{FW@`riiuYTuCXMtOc@)>%n@ubIT-e^` zgM_$Y6vt7r>MqN-Fqp*iPB!a*lqg7!psRis&x%^hD7d{9t9g`%9Xi^>VlJ*)x{b$N zCS`<6V_3v=I-b-Q9SvXC;Gs2$?~FT!Oga9ln4|z)Ww>z{v#}n9OwYev35$|*L`le; z=XW7A&4^^@_f^t7EA<@TxcayKdD9NB*Fp(C!(+UC6%_vE2LAC~?RNm%7OAb<3-xX< zMW8jq03j11|Kfq(eWct^s!1GIOz+pi<@y&)i1L~6y|$N;h5D9vg!|qn#?de6B0EXU z&upCQYPn+NlOMYJa*5~a5Lfx-&$Y>`Tm8CCU* z)dy0kTn`>zK*XHxWsX&I`YtC`q3WQGay7PDmZ=||H2E4>>nDNLw>zsfO9^|;a~k2a z_?_w|pvQ{&OFlv&78d+yy|!E=7ct*!hZyj;gtUB{Cp~YQH|{xk%L5NREjyi?ieJNW z|5=esbio?6*wkA8&W49OfvwhNqz2nthio?4B&nIrRqxYb=UQFgsq2RMPE$B1q6+n| zdue5Tkx&EI5=Z}4rG8eZ30Dr}2E5)ck?xBCpp-%0?qc$SMe4iXb&c3FUX&V7yXPFu zA~Ruom~Q`3nu?+L5TY;M{gs7N;2I=y%?ctJ zGBL{QoyW;HFpxGGuD#d4{nl%@c6@A$ksu0`dFdxAO1tv$`qAI z%o~b!K|jU>77VhW37HI>El(7GJyJMWU!qR2VXf4rOafCLLeQmZ6><{$mhpZt$g71n zPGaklsbZAS*ly1(a$`JnBvDk-+Jieqm<1GHygy;`nL5n_BB}?8zdq4|y&nGIrrrHf z0P>jTL$Wi=Dh#j#s>R>Psf|PCTibP7AP(ZcO9Nmy*5-AyBGfx501|{P zMrS?x^-o*a4mb_Qd%LTJ8jObC9Y-F(`)>2{XFs7qUeVK3C(Qlv6R#1Fg>8A2=A+6eCA*I!sCQ;6!G<9UyHKU4B zhz0)ylkZxkl+luqfl0rp(PfIm0$xAI$M$btP{1B}=8-Y1!{7o+%|y=R>Xj^{r5b{} zGOj@QSUR(Ic32+7j(pzeF~w&4PIW#54<%$bUwCodIU)(EK8p99Z+17(L4!j2ROQqU z42|CytGIxGZ~TsWfX_F{cr?b~-;%tb0;i&Ii;D zd-53r$;*+@g#>{WstSGpY~b0@sZ_o3)b`RDyZKXRfTUk)yH>{m%#+{RWB_ykYJ28H zX|n%A%D4Q<04ns`=g+CYatCpP#eq#Y}-mq9%YJYG2#m$7;*F!@zw zIr8}@0|Kg^S{Vzv?X8G+`S-13Qe2#{&nJ?fj)ppJriQN)$6dq)<#c+LCh;j58f@1v zp>j_bmVzC6w2$$VK?+A2 zPNNmPNjEY;`yv1dQV{>-A7KMF`c(kQ&d6xrJQ4m@74t8fy@|ecj{wuH+l5w9p!j$I zwo8%5_=svh4cU4=TU`ntt*=VJoEWWF4EMYjJ9aIzI)6t3|IM5X2c*C>%VV+BHp-S< zz!xkV?-UzV@Oj)wRmEzQ4?XIdPvsU^61t*GR~2S@AS*s5PDjiT)I}Q|tqPU-mf(7n zI8mT4R;!W@P|AROr$U_4++MG638;ytH4#Pnx#{=J zyZ^tMLKR8~BX#v6F1d>Y2Lr+pPDd~PVu`e8GosEB-btSSWdyPrIY^UFuSK0p-7mfJ z;_5Jt<2LvNVvt!P*6CxYT5Y=eK-{rJGAmq~>D}y+R(Y0-x-1=9uO;q!HOG?}KPb3Kb$1` zG(*Ztn#Y+3btkyX*9hlqN6EDg z8>3MZQ^#spu~HQhtj({tY}INd)E95UG-00m(sw05#1$Gnu)NL+P<1v_&J(04E|Zuf zhD~{*Hfb0OK6g~Vyzp!w{Ha-{^e8Vz-!=Dhj4AfJ6#jxQy3uln2kfloE^fpjGc4aa zRik$)&?T*s0uA@XoUKYYam~u{6o}s8f;5^zu0qG-tz#Qx^Yq3dotPH%66HnnqtCP5r**WcLVo<@R{0y`vw^ z0&ADyOfAtc3frQQV8P6R_4B^H&m6MOpXBtasn{}f99?&q6QpQ?EGpy{4618vf&~n? z0R3FcyeVtkKavB~3Lr2Fh~P~k|6>``1^}^tX5M0G@TQ>pD@ku4LeN#sQ#SAV8vuhD zHGRtIEsM8r+mJACcd>31bZa`YoKB;S8n2ny3xa0?=vJuT88)Lyx#7dRlg4%-7*+c; zGx_>Kpe#uTS;dgViOuC@eZPpcG*$m9*_;LJpsL5Dn@nhuX=8 zoKBg2_LQXNr(m9h2g;nFf1at=UJNkhLs@Zx>><(jMU)yJ<;EGXqVC_x*F(zwgDbkr z0pls|Aqijq_>+Iz*S5EBFTAh1iy6f#_B9-U>*k+4`zY~M6FG}8iyi+h=m_`FZ8>vR zh`dNmFVBRy`WF92WHjDZf%85ekusAf1J7n?_r(`jXzVxx9r_{3*Ho#`Ldrm5`8gx4~1_a3ea&96N@z0~XyJ1nVNej#(Et^kAr{B9DSf zy!gkJAcz;Z{&g^DUhfat8@CAX{$yWK@RhEp8Q!0me8E>s`JNbqlCl`kXrWVV?;_~hF%(GRbGx+j|3x)@e)e<(5CwK>} zd=1<~O^TGt+%>80YXoH$hJ#(n<;P*#laM;;IU8$uJ3t^-;bPnf?R6MK2x%@ot9~Kr@U-k6uB{{4}qX8 z*u!*ITzYnCC4X6@bnx(O*#RzZxX=>^;i<9{udG&G|kaq`GI7MKqZH~h2 zCe=4SCx^mK>TyMlv4NjEiK5Z!JrtWL;6l-S<6CYWTMLGKv!W=0gYzF$Y&IFpQL`Bb zK8a2FS>4jh)cy9o$T&)hJ7QKVO(*-RsFJss`Se9~tJZg~zWs?>o-RZ3YNs}*F+!K* zKFmieJVzr0Km>or7;it6HjN52>6cLUF*zv0X`^oZI50ot<2C!z1}pEr&oF=)3R1XB&guE`iw z6&%Kf4iRufeUs1bdfK}<$h2xy1&xN?&Gcl5^|Z_*r%9b>Rnz0wVwaPIea~>A0-l3h z=9DL{^R`)R~uMs1GqpA2&;2~;E7fg zJp+Bzl$t@Yax>#cTMmr2>wzg_rza1~4ci_r>Zb%b3>x+vfjc&2yqAJs9(Dt>9xIXC zB5?C3CQ(8lGxccgSa|`}gGp=)#Y3S%1$E0(5x8I3k;147)67cLh5=$CJXs>NfE_wl2ZdmZgx0H|u0T(f&Xlf_`C*V4)MH`1 z{z-d6yRA{U^~3zB^}fD_^ZdoZubTe`$65O`5spButqT-w&?iB5A;K z@)XpoY+k*EhjX%?Yq|HeXjZdtvm1C!X~n^}O%}~fO!yr>S=DiOH;+W20bE7`6s~Kd z7h{*yIP%APD^iv(+#JF*8~jzj1<3wOpwl6*g1&GRJ){2Cv##RG8vQm+)-dq@;1x2`FVK15;v zp+nuLYlkP__q(NVlf1X10sge)E24q$*O7j|Y<~I*)5P-*DjwjcX@Nr~yn2D|Pv^h6 zsg5^k@hw-oy|ToFpB{~0Ko6}-L48wnhGD-zJ{DSp*X1JGbTXZYR9N|i0T<6Hx!f@sJ z`*J)r|2{9H_utCoS{`<{by$6~KKlefGCT=l+oJgnI-eC>H9{SJccd&l2G_vRfpZV@;uPtYx3i{g`rkU;UTA5b<8u5eLo+5GV$ zR_&<4)hn!Z?@<`3=#8d14^4?D<-TBc(ZfKLT}jfY{cWb@jbn3v$%WX8dWOm zx1C*hQ?JNsj89f42X>#-!n;;J=P3Csctctjvb{S&&(nPGH~ZEuj|_o@FF(Fgf+8ro zZN;mzv-cI$%#&;wW;PEF{5COzsU}=w6P~+e^Z75sBQQFdHx2|`8yd>kp2$OL?-^Yb zXsg$n4e`|w?LCs16XB-;YS-BsTEalg4W9PVo0m3%w^Twv1aIPds;2CUr~Eb5ocT*QP1WQyyANGA=!px98+#&2>dSCAMqIWt6||x*1qYc`K69olw%UU)Z*e4yTFW<95Z&izrznvy%hNP|!Ju|Kl$tnb5-nQ3la=(9WDz?2p9h{|<-C}IBQFMlPS`&R(av5-#nh_YfA!P~e* z-5b72>ak*TxX43Os7`nK&hn89XSadFs)kjd@k;exn)E^%2UKOEv2l{Rj>SfEA!Pw8 zpf^V}%nx$n*xsZBgEq6V3$hs)Jy&ZCQJ)EVm@b40F3(qxY}p4W)JuH{?5{U2etH`P zbSS3>y@?P(yZj*Ld!DJHnI)u%`|-mz|7c=kLDDm`HgIa3Y>UD6qgEx{d^RBXM(Ojg zVv`t+c75dHv;`D-I*f*x_S;P zEzGCK0g3mKFqMJc!czKeW^EcI#l=t9ioSc!zP`z8Y&0IRX6a^yck$&pY8h6pq^OPa z(@OaZ?~PwRj}6LDb8{$?KNfoUl|`bL69nG;5u+@Ay~*(1C8a;_pkng~x=jHLN%9GZ zprx{)TfL-&eLi5pDJ^3USfd~rSnC8Q8ENKZdeuLiX$Ze0ferr-4FE4v>2L{b|Qc`{UNwqLSXhjhwDx#T8>024K@SXyR>(&v8@0w%){*upNf zg8b$i{^fju|J-xXtEd0yMOop89M32{>0noag5)ll5p}g51a@kUj4K;RmTu@Wt35I6 zHrJ$@nq(d57oEuHEc^^NEVB+jyi_!37m$9$VMbDf*(haJM`3p6Sd|eDqNXV1z~h$6 za#B9lsvkdy_ujWof~CX@&n?%PDSB!wmUl6GCm1c7&RB+pI?OG8t!fP7ec$F<$NL`9 zKNk~2{?UiOcn>)6EQEA&bsJEQ->W%Bu-I{LJ+4c49D+-Aw=u3kt0{up7 zJ~!8G-Zw?cq^hR|;p}lKO|3hc^<4+9G3z=ZLfcV%ZWC(aDn$9ZpL>(F;J2coAT(DH z?%8l|5QDU>X7WqgXeenlnx3@HH!F=R9miCzE@u5$YYe&#S9TPhrhv%e*4Qk41CZkh zVV0pQ*`Bn+X9K8#T`AgESQ*p8NUpJ#Yc^t0K|2ZZ594DJ*#f}ih=Ivujt|Lj`j7@Zts>Pq7}0;PL}~#Xj z)X~Qx<(5*%UJViWt!3`E7y02ITzSE z+rZ2|pZ2NlHJ22gt_vFRp29~4YNveR)!ogIq&sszCI=nf9fVskt?3GTRfc&OVAr&- zibhryP3&c}%E&^P=JKBuIl`t=xh4&W6_bv)mqiBr2Hqdc?H>0LE3-@e&OC5fZ?yi|=)KPtSofA5VCB z)8YPobY4VBo2-(B`L8Vgh~>Qk!mF(14=qpuW4QoNdqt8RrJ3M%F_*Iy z$Ka3&Y~wi(`o@t?v%7g};?sO=~2pj8j=<{uXJQYS~6=7{U~ z&N=X;2W!QQJx*2p*t5!#GAO;2l_>WOToVuR;1@ydEQt5tmNg8bFAQ1BV=^qf-#G6K zch{Gns2>@E_oniLE$SyZqRKgQJ$gg9a<&)2RgtdVnaQy)SxQZ*i#9fX7BKe%|LC{fC~3 z>x~bY`L~(Jq^Xu?>{T*P2tOD-zvS>Laeomvo=QTypkaUm79dZSr}Og6ZKh{ zD*dWCp2BWyu+J9kg`B4+<7Fb=R_qU|?KksMAMap%+>lZ3c>0nZO-8s>@TB;(wyCp; zr;DXceFvZQk;#g^r%tAhW^8Qn1+N|f+2Ux;W(P0(QolW*;rX9)sJ585 z5f2*qx19rw7XyhPh%ip%7c`8w*Ly6fBCJs;`xA(j?c$9G)4WH3|@ZFeIDJ}Nx}i)w!?LcR)V23BqX|cIpO~lJv;EgHf-_1Pg6G3hNE8Ouc-K3Mi>w8 zb^jPp9*y9Dzy!g6C6=>)5%f;vU-Xg z*$aMr-yU=rx?_>=k_EMVFx<4lh<_`vy>w8?Qm?zf+u9qb8!vy!HqUlZs?VKXLSoNj zO^eJ6xLPG_!Uy+CPlSoXbi)8BpRz&+8NYnVzHw(AUZM9e_YZQ7)u0CCm0L;&*uNV` zxBT@G&WF{14Pd=+|16wG153hB3YN9qP27Mmis4nf^ z53e$9f*6R9a^A}0_;xPV^-8;FX1$SBAxk{Ra1i8i#8eMaFq)Zih;eOMcoj-qCf*cW z1E|Z#yTzl*S#i!@99)*X!zQhxH|Fobv7^Ump~mpd<*DM~N;{^x19|H_F$;bA12M1g z3$5tBm4k*(hW~Sb2cU8T5-~pG{3E1dL`Bm3M)1aR|9Eb|TfKk*CAJd`OOqsW!n=#MY5u@pmU>{WKm||~@QQ;~x&8h^^Y;m4AY)hOv zhf0=dMLFZFE^zfaNx?`=3Z2fTzf`WSRCmMm875tL!knTzvkq)Y57V4(^*kV!r#_!Q z+Mv4nDwWA3!yx_r=P_}XlzNin6F=-gs-fN5LE1ZUjfvP$adeohvmiWrYI!Y|1A;LJrp6G&s|u_qMqa_?n7%uqywycPaCAAUpVZZA$U=JFw1>mr<((*s#^DPP?o`?z<8WM>d;1VS$ zNc-4OxxPnZhUXA@FKw$=bImf1(bmO|%Fpyn9k-ZKTikZKmk||^Hbr2u zVC_}};Rt&lQ-?@}r-KGD)$w{H$km2O@6z)&a*RALe$_a+=i8L;n|&TuQPwl&L=X>c z>Q%_m>yhl0;Ytg9Uf?79#%j~G2|Fe4i%Dwo=f;$x{iK|N$;_g)=gR~0z6U?2;u?10 z#Dzc&=k^*=Eo6^a(*LLcqwEOy2fk~8Q%;NC#5Fw$pgy-B$Tp?0xM zz&>|!jNo-rLs;<}`Mm@++WMb|m!`=zRFmQx(*xUJnaln0ERKXY!K&bh^~Ptd1ZYo>1WDsX*M z`Y0Jpo7JPw4`l(x8LTw6h97A}@gsCVn*bolP;=mv_TiY4VKwL>H5zi(GP=5T^Oz9b zj1t{K{uT{vk8n?Lf&O8%*#WyxR_i`(-avMLVn1L?zqbnv`cb{`2CS;yq6-D3hUOVP zJ$apV&f*W*nBgr~iME8m@b9o6s3-BRwz~y6)m^_HFl_5S-{ID|hMeSTa}A;o??uWpRr#$}zXvtu)ezrhu%=L1N!Z zk;qZ=U{`jCT$W=@$D~BBGoEBV!&kmL5MI>FCBIFKrc9MD8P7Q|&pJVA!WOZwU6$Zw zo#YnyqhG~|e9YBQg|Xj6a{=g2=AZ#oG_|6wjUXaiAiv7ptCJzkqFToTcnT9OZkYTd z#x1%1Eh(zcuSZ;jpvS37#P_0uIl}g2LBtP(pQO2S0#oisTJ*+jg`dSo`e6Yq_)uLM**_ker5lui z7jpPD<&PW%_*D(I`PN##B`vbJ(x(oz=&*G=)HwobwL=a}Vgb8eExNH9F#L1deUl-6 zmyTO^Kko*Ok^C{VIilMO72Xz>s5cW5V(35>Vs$(07uNQ-ZwC_e9b5##G@qO|IZ%ul zP{h)TCnyL4LK{7o7xVe^YAO0Q9WFKVEK>(|P_)P4Aj>Kzl&jH{WQNx# z7K5Qz3?Iq_`+Q2^43_J48?TKV-wO)oAa$k1Bio?}%3kScSUQk|C({}8#PETlEQ@TV zMf8Z0Y}m?s(BBcoEtBKa4$+cXl?n`TvuVx2CU^T~U?=JpbRj~^EVV&v)h6EEBENdU zlZY`TFJVm0JN~XT!`q6;lMZV-|y7+!QR&$WZpbn3AO-+5BCU{5IWZ zsX&9f^k~f34hv|?0nLL0Qti%3oB}h#^we&dWhEgO8y#6{$a6;qEng!f#b_z0W%*RX z3$;GU-qmc|r-NOq{gPwhpHdQGvN{1L_lT))7XWyT9I~8=Ikm-4$A^?=M^XB~_LS^T z*~Zu}n*;!WHQJjaY={R z;#%TvN+4mVM&V;NO&&d=aQf2U`5O>GM1K;1Hk4oEYv7Vx3-l>}d-r)7z`M_TukZXG z{<|sk5K{7={na(^4qZT0gFrg^W8B?zsJ3-wS68D@{^*M<#H+FFAnDxvfyGjxJk?lt zqf757uBnS%cDIy8{pUqgRcp`=Lw2NThB{G*s>aFr{ezu;G39V_#*Ro+a#Sf_@KX^M z9G$O?iHaZfHB~a}a@;eV87V#WWvHW~2?N79u#ZRe)z%>#=gWQU*`H}FWDk|;72aH- z4A*`PoKa_pRh-Hle$Zv1b*RixRV@jziB*^MjX&5x0#8vr_GEdAQ zFv=gphyh9h0s;yzM#PGZ$;fv7Lzn-bggfIbh@@zhOI5;UmZMr`PeIKLdz)-0a8PViX$1A+>8Zdei0Ja`3j4>5MNBfNxCB)6O(dfv^&>0t!H~-5 z{dnwAxZ{|OlG);~foH!$Wg?qRV3w4gJseSiIoT1SVU@4Xl{5rLmL7nigI18vH02Ht zAG#?EI{$8gNcX<=QD6T`65e@WduOt^)Fc|mk_;Z*g2iuU> zoy*Wg-~40d6NQh_Rg|!JGwMDKHjz$&)Lo;vDOWan8ig!|+`KA6bAx`wyr|gMG?bm) z-P)wB@84@C`0fEwIT=`tMZaHv8JseZ)nR>8gHKET*4G;7J8b}}`bRTQ5!+QyyOUu5 zhx0>R*0jQ}?W!WYni&2U0D;^ky#{8}jXLd87_pXXmfDD%BH69q7JDJ{T7~hTpY{%a zo3dxaJX4rtj<(vQUR|aBYTBSSR}3;ROsX!s>=*$yw<|zN6-R=^VE|8Ml@ETcGges> zBa7?g_#mQ$nkjtp`7A)2+wS4gYD71C90F&Y(hg^wTCH?cT!;xj)FR?lTJ~I-%VoiZ zlQy+}{Z)3~K_O*O(@S=8%X?r4;L6Y7z@j7MtGesR>SM8gnev!QKn^DIgXY=on~aMK zMaLQ`w42e~R}O1!c1S%(WQ z^-kK(OhMYs!OgHB83!^6b7Z1L)|qInu?&MQL3yuyl>&pQUSl zNH1q!exBhIW4ZBo{$W5Y@v>*uaUi9{zX`PME!aOo+kS)sYgZ20PX*{>{-eVK5FMz8 zPlOrl7LTgHw%PzoLj_EsqN-Bs7opU=OAKQKWZ270qJNk_$@+w-Vm;I_yGPIdE6QB(Yv`l6jS*9UszuePL z)2hKtcV#M5a~XO?FVspsI3PmbQNq`kFv}k77eqF{cj&xMu z=aEubuyD^78FbMr6N!rVL>#s*`(XHL^Es5Y0ugaSHnn;x~o_hzNLS9?T}=5K{rpr7Ha=T9f2jEeqYCl$?n|P_P=`i=RFARZEwM`ydaOQ+w*KZ z#oz!4ZTky=>MoE_-A3%Y8>GYy#3f-n7$&#`);JO=uS0~A!0#AAM2hms(i{=e>XVT* z>5+_YgX42+EJ>?lp}k+kIijRU2{tHtL!ey+3DwAP2H&|+3#%W1>Eu{(Tm`5S`eVSR zNdlZBXFb%M5VqT&RUr>D9X%nJl~hm$jcZjp)e|bH^xlGeWqVuR_|r-&KM+k&Q3EhA5EQlK>Ua9lPIAMG5qCzWB+>&+=}S_0;LyyYXOp z3SX74zbkhvUd}IWEk;y*onjFwYiiX$ozOC}dxBT@ALAo`RU1GE0!D65GirQ?`n*)1 zX#k%%3lv^0VGr%FJTQ@J8`RQ{HQHE57|xW0R8z;A~ru-%;?(itAP?D%#p z^Psxu=ZqIW;B$wASxVfF&2X-C(_GbU$6%+D`P9aoG@*20=Q93F>FiJj`~e`lmco7m zaAi3m`bp2v-Ba|ALskd_ooG9&Inlt+RYj;LJf#E#;gJbh7aZLxv>+XY!&a`xa&nMF z4uZV_q*zX@vx3ttS2cvkE}`&$^X83asDQAr9e4Os;M# z#oafpq(c)DFp9TDW3^9WAc+dXC5G2_8X9IZSp)`91&{g4gh6S^=cOkYlxSRK?8dhE z!kslU#iK{4t0ONy`H9$+1X3>wwpNh`vTYKZ9X=I_JO#-@&EMCB`Byer1~h1H4oCnzmO~U4~^hT}obKFXpxGixBv} zW(i-Mg$3Q=Bbf<~SYlS}MlIZsX6e0p-Qs%E)6(kM)YQ)~=ys|X`L78OS6blPl_>|g zK7A&PXwNqpz}Nq!`u?4?{?&T=A1`a@hfnU<0oymz_-Uws6}P}}!!ERPA&gq3_%Y;m zkr`2HX`ktV7O_CnHz{A~?RNnbjG_mmrKIxw=XQm8>%PLX-ekj=TQ;GW$aVFd)|vSl1u-@ zNPwUJ)5YD_K@rC+P^v~>(>k6v?CC(CyadY%iyZ4j!N*#qI<*9bM_XM)Z@sKjiYn9T zjTw&CGa{vPXk=-tggA>q$<(mEF{@AIuXVZ{*V76nHgWSOW;jMzTvMDBO6uEI9Ca<5_yog{AmvjS-OBe40>xuvgzDHH7GzJxY;Pax4-zHBZ39n? z_8QMRo^tHvcJHmVVb$SLL&_6hjwA$KR%nsE$zlFbpvS1`SP!W@6k!`8%gy(qa_l%( zoLgAV(>L&(-Eq6S&`HRKa1(Q}?F$QrKl>)^#+q8aj<@sFhJlgn>1PQ1@Yt&bV}V;9 z!EgBAC=w#m)V_}dOo;vL0R9QE1abp^vOyq*iWCp}n&gobJ68{dARij_Jy*PC3}v~R z+(gI;3b`_AqVkHf^G(=?K01!XJ&TozDxe=Tv+wK14j;SEPZK3iq(YFJt0wojX>zYd zElt>nT(IoHTh8}wxFFtu=V`$s->FxtJqZ;kGXkGRsnTc)R%WyCb2?c}R{? zp^3LsDyLLriMljIG3%J$P(dcARX}f?QI$xcDxptxf>Dw*L92LJF^P#Jy34dz(g%0~pZp*Niffh4Qy+A@#cQ>JiX?hYzl$H*GQ9^OCO|P?Xt4MjB6M}-2A?%+G z5T%woL-p;)Evtkc55Y5ehk|058huA^480)8YeMK=*HB<24oqT&VW0yA-X7sf*BhC(?xe5~ zrEesqB=eKYN)xy?+whuRv{mKq>jCzcc+wl$`{xtIkIzn|ew@$lfzWcFFb`jUZxKc5 z_EPhigt%o~iD5@op@M%@epUY>AI>mUfS)2{qi-}ro|fE9wtTRJhj^x6Oij{;-y)gI6izwRo zym8^Zz2(=G|90 zSU_BgrmfKJSh(#w_m6e>)~Xrdhc{Bf^LtE|6C&_};oerZ=^?i^`hL?NDM=+j^?0H_C!<1?@XaYQCqEcuBUBekmV%RN; zkjJalmr`UWLb5=WTghB@fiXXu7JTZg z6>XTyHP=~%Ua6C{ysR&=<~{SIPY5*xX`q)c#L^Y&@-7Ovtlj}MB~jZyrf750{OIu` z5+oE-JO4UkY9<`A-Ir`0g8w&qgpG^>G$ZTViSxwUJ2+dtE4n&3IU&T>=$;v}Qm;R* z-_F!`FZPmNSnyU)J}tIE@-*$%18xiwGa)fwoaCUsnBQNLeJDeQxwknT)*`36h#d%R za-71ZO^PCG=r+h4#-Kh^Jbsv%?XH6htC;}|q#^uDk~ zp?r~ZH$FaIx5cP!(P1J&35D|2Eu{bTj~(?_k4KbpEqSAm1}tS}D`czF?|W|B;eJ3( zATQDh9T;bI^DFV1!t)Z69YWw}6s`p1Wne6bOKZyDMoMom$}2B&)4us&%FuA8TbLMs&Z~ej6Eq8@4Py zQ7~cc6L)MlzxYF^_DC7ee3Wp4oEaSZ$*`jUjnGvcwb0E->vrKdT1f34o$A-Z1)pZG zS$m8ybK|QfWj-E?b4oqi0dqaBB_1Rc1(?|E<41t@?@-_xx-$XpL2+$rP@d-h;p{D- zqH5c=VMP!G328x6Vi-VB5RjIZ9+;sU=~TKw1q7s&6lsuQ=_7!dbRmsn@N#k zj58;VMr?>6LUou+J?ZiE5I7G6;V~|&{0zPc?J4)2KHSixJ1@wqI&{;ad$4zUSa#xm zh%+^7HRW&$InQk}-kdA@b$rqD`O5^6_#6?=Ct;7$5pb^JaA>$P&OPvTpyY;JH!1mR ziPQTxLvInWd_)u_#V36Ih4V&>;+-~(dQ}RJEEsj z*3;%hS4LYUYH!>67*Iz?Ewq#OJYI??>|!rf#w4<26HkCM-<$q>cMpUGls+JwMiQPc zPj5kAe5#mmrG{$f5oGNsXu~iNvPVdg~CeVGEry1j^ z!+qv4!bT&1##`4#d$W!hsXaw2wNY(k0EzSuqvApt=go75dI zCvB#T{V-=$Nh!6TEYT(;i~R_}8A&P%ud3a_cM7(mKKjU^)MY-N>p4`R3>k3GX^Z)C zzZz5jK4^L_3KS{;wyfr)hTw zw8hRwRYt3(jz^a7Yq;h0X|bguz|oP&qk3XxeSC}ki+EKY|W27}Rtknu%7s>`krjKyvJD2}x+vNUAFl&O*MS9%HKdOd+ zvN)C;wg@o#H(v+NE+}sMSz;~?RHFaqmsKOi*TeG}>2}+HWT}zS4;Q1Twb3_FUTx7uiO|UzK$7Ddd)Z^^K5g^9%n-tc)6{!)$l6>B>G}M3|(_p zl!LsC%(Z|#EM$LY?+}>wXYNs6`_!T`<>Y1Tk)?`twuD4>ixBLfLsAMBWx2v$?HN%r z>g3|Kg^qr~%5%Wd=D5!b7G;K>YuS=Vg&dUEG22M!6?s!A3a)^Pu457?8=cMiid0uT zB*9Q;Rn?@`5h}dn{w<^6m8h@ZzJ<->(3c}L;EV{AnwNDThICDE=x|wonObb1H~*$| zwQjgpk(nLyh&Ii=y-bJ0fxe{mT_-<{@MRUimtPCWygvt9DXiNP^tw1+Y4^-kh<`BQ zC{V7P!=oWa1z_d*JM@aq?v3mkp;x#-#U$YVtxnrKW<0SZIPc}UTv)#YdEX-n6_?7c&OQtIJ6aqPUp#H!yH) z8H<4*-={U39t#PbBEu~Mu5bb%LOlQw*(OG0AT0yeEJu;A3D*aLP*eF)lI z$8&*OPnI7b2)F^j1|iz<-Tg1(E`iQJS0tzJozxgw?&HcF#(`_pvENFNS`D`m(u(hK z_9^IU^>Pl0YdY&`cw`voaz_gml7{H(aBv`@5C*b)I@7qRPQ0~|RgJ0xmW&SiT#7j~ z_)*|bq*nI~b!D+4y=nOXS2qX3{o&Y(Utq883;AZc<|*f+*V(1`J8WW)Wy1OzlHI1a&{`gETRSClM~xl+`dBWNnirP?uP@r zVscETxrg+#K_nsDx48CI6Mku62UkmlTNx%~q+* zw8%F0$1|PWLkkk^!k8a4LJL#i*8TJq6c3h1N4!g3$>(A`jc!9V2`di&7zH!Neuk|^ z^MfR#I`+sEps^r%3y#75h6n_)6~|KiINBTBX9pNqhi^qaB}M^ODTn%7FOQ9j{r!E@ zqx36D?i$7d5L2i$-=T0eGR2dpZtdRlx%~)sXRtA&Q>u$SS|4JT0ancFlzNfQJpgBn zek5(c)*&epo#PjZ6xL(!UPuZKiGyvzRr@Ukh6`2IAVI@XuM|o+3FN7fiTo;LIH9eH zZMhet)8J}o!k7~7!~0kf^~%wG+2yK*r}Fq7vu5GjBQY@wrHWPGi9_Dv10f+FFpm4D zBYP$|<>XpgT9za60QKh+WViw@$9V?;MoUHRkN1mbFfR`Srde!<{Ul#s{fJyz12?>>Q*ipf{ws;OM0; z2F2mcBRlg-sN8p3lQIFE*s%rAJ!6D!GKzi$XcI zSlU5MK~48(bPV;p2Vd35cm{tNBzEMCo+Y+OO$u2(XH_|xSU1Cb>c0N@*FM6MK0QRqv#pys&&sQ8}Vh)*fw#*NtH-RagR*{>vP)Pxxn zvj#qVz5g^2Oti)r3K)w@!^`N|1F&>$uItR`)UO+tV}>K)2igvymv594!T5o_7pm#^ zecrI7+@i9yPvsmf>1RBnq_s(g#N~S28_n%fjqZ3Y1>5Gvqyv0kb2I<}oEIC_3~&PU z@c_*EnBfN6BdRl~A}?ork-0gaf9iWYL;FV+{)tLMT^NVB^fc2-)a0p!WFmdW?>o0E`KzYu!qx3w$kcoC+b! z0OKqkwS#m$W75xWp4tuZ5rNb_E*+Vb#}WS`J|3X7ObO@P1{VQgy5>3Tf3D}p^p zuXW-T$z(l*K8JS?k4lOMGRouLHZ#C~)^`AcDyj`Iaeikf%k{%yS@U!$pf%WAw?3*^ zclaG4I&k&;&JRz@s!Utw9rNAJ1|ofVHN5^n3`=SjLEp1q^B&Lsz>H#>fb4AMy3?&X zHxYv0*MqZB&}<=gs(m__njM2^_q9eM;DU@v7W4Bx zG5EwU^Dh@7pb98Ac4yCXoOk!rR3p@$Kt5S&V$oy0@+O1@K_MOQBVGyT%6d9?4_B5$ zuV>U87ba_1!pC4~a_7puOeqi5W``rDecRqcqv`bt4-CZ(AVwXppVnYVYCd4+SiH_y z}?fDCYAU-7EEU3)_;M%xXc$`L)vR1?gOFq1=qt}m{nKLcpCby<7u zsODRErkB%6)sI%G+z9@ee$5WK>BnC~S+qtA^`h{}sA?U*NJ@Xy4165QTrtovUks&x z#wt%{=3zC!tqFSeh1u{^+W{=d<9zjLj$=Buj)Q(;MNJ@KEBe+TEDq3~?Fq*1G~LEvS;5?ot2crC9=&5ZmhVQpZ~h3LXTtBWlG4mxR6mH! zR!$ZixiVs-lm%SbAGuJ#ho1Nz*hah(#l7QUl_szSKT&a^A-gd>c+6e;CAQ@Ot7Mm8 zhzsoF>iBM9ELaJ;7|p3CXUmq0pyZ^;=45k^Sx>-!By_o#flIVBs#ME2`@}pqa=R}$ ztg_Vkdk`+A*+;=$HOWv0&pR1W5L|Zk4mlfnCmp2RHH-(*;?z-X@pJOn6h_$?VHOag;60#c8})V5`_*Q9g1@CZ$; zJ$Y#Z@(y;KC0y(!`IE)k-mtGGgxA)*1>TW?)eade=vF;G%S+O-P~FC*<|qIuLbX0w zXGtx1QdF=tv_vDwmu6IMWmF%0l!A#crj|_Bt3k?7;#tF6IuGXCk*32jXr$tZ3A_N! z7p_=M2_p&&D9a=kt^U=b#DCH}*D<_URRe=!_IEp_LfT+~Y2XTV8jJ6>;mYP&>(961NM`^O4QX+aP>y3b* zqbNu&y{g(*gi+RsM6H;fK{=xV!F;-}SgEeP;2jA`gAmK_ym1jXu%gK0gXguZ!&}K< z{5l%HLp}g}T{-gIb*@)`A>e-=;jRG=$Z}L?Y=#5HNa?+GD_JllQ$fES z_fGDrsvN5O0Olii#26T;R;0Mt7lVgH#6UFd>h*JZA(NZhKXVyjPeRF9_7TzQA< ze`3HJta`!d_|Nw&*kM>(-&s@3`RnKL7o0OWf$V>E^YQ*lyFt3<`%jvW<lNC*C#1M2 zp(4DCX`HckA{$sG@)LuWxZZvD=eUE%-s1P2vhi`6wX5(b00#U+?Xkhdam;(d(Up*| zASi|!Rz`$98+%MI_-U4rYDQF_txBDQ4DBzrVecoH!NbaIsWXaVbelkq@qB{Hu) zb03%=J{VTKo53vumjl}B`4-8-JgH{$XqOPS!|Til)x`!fQQX)N?=oNbXlZn+ z)>|aSJw8YE%NvJ8& z_nF;Xn}$WczB3w3!p0PHTpfn+&$3WZ_~>e6T>qt56%JRe$h0m#(TeQ419*jmn*nuO zwRs~po@R`Inh6MEaMkZfeR{cn((U&~P}K%$Gw0oSC}{ zv3Wrt&+9H*8cA-8>hlQzsK}d0!Z7#5`{gSwlT!voaCg4ew1^^eM&m$tNZZY-Q7>L{ zv1NTw&6c~)WaiuQ%UA70X*;KvmU53X=Dim; zpMW3iy3`rU35G5Pd{(ppizF`2v&{aEn`}gY-z4?^UeAajM1&?1PRrc{GC8;Y2$0;@ z>gWqL-5U(FADx3?ieBtSPHms{Zr97R)86+N*A0mD*)T<|H(jjT)eRA%@0BW`rsl~x zHX}vOvX2MkUko^)^*)d#+40uQckaZJfXhA25I!Mz-yVXlp=!5kg=l}LjM>qfR6Bv2k^51{xKRmgfW@b$|Tp-LGuvbV#en2 zyO_M^(VDDaZ{fqtN07OB0F!-I1Yn{%DsaCR$Cja=Ptmy*pWpT&W7U-a4biwRAUCh| z@0LJH#=qT9_|{^2%Aj!b!aU8ayjYNPl&FLt=*LL#7+7ttBNF)`9C62EC_}YaZfxd< zq~s{XNDjM?IhQt?443)%9Gt-m=E}0D$w~CSW;fHrZy2okWUuLW3dxGB8X%;k9mA=q znPN$9$eoR{KE&WuzhH8&a$P^cbxanmxo;GPwiSXcQU&9G72yBz|NTH=6oSWFOJAId z!SEyVmT=n#sYFzIHpD&|UL5i2DZ`Hi=2v@W+3v!iETyJu+Lt1=bU%fC(@BbmN!vqG z-vJYuM8bRY14d>OH@mvh4dJ5BCZcD=-p~k*xn~T?D~KTI?N$!s4I-*R;TbTL3z*Aq z`T6p@r;Y={Fzcv-rIY`gPRjOg8pDg@T&G`mII6puXpwSv-9;qpcvxL!!p87)vgBj{ zNcFR*148?7+)xbu7Zv)iQ9a->AS4^|PYm35Fbr;3x*VxSypl4mQe>bB3=v*14cU{- zjewak5|R{53EO5>tB|oEe0>AQOS9Q4%ZrH+cU{ zGh=9_sPMN&!b5??Th!yhbJb+fn~+yzrAo_y!b$k*F@NPo~cKd~Y5awSca+~K#ce*hE>ZJJAU-NXg0C8eE`iyb? zbM#?(pYu-_FGEOk;z$FO8d%@N?mS!;Q5bvY<67LL(n@xldfE<(Hcph;!0>?Rj zw!#~prktaS&lu=WdXs9|AQ4o__;L3{Y-A92*V6eXVFm0KExHJQP*fA)5jVyDjf~pb zA4E6avh$wP(31hmg`9jL^w=%w@UdZ*nxqxth0B4eI{Z!62kv|XerzkSD!zk=d@oZh zytA7TrXc@*T$y%#+}kjn5!g7LwMLHSrQBo~fW!>*j+p+`LKU-syidrPjuz?v9>|(8 z@qRcDDgDOJJGk0Z=zr17iz|H4FNqpd;G&t(WL0#2-lSu&!Hw&_+;*9LdxAO^RdfEC zq|$xxhEz7^gCrm^+l8mHEu|5Y$4NGN3zE9i*?9{6Lf51=H2y(YurUAjM(%clZCDt) zYkIxq3YVA_m*@o{c{{@0dA-1Odm^=XXess zfMl=JJlxepoc1=Sho|(6kvg~!r{l^-CDs+%TqMMYF7oen5r#nDf(*Fv* z0>UvIyr!l$oKjDJv`qn>VQ5F=sDwr9K6m|eFTTlJX{xr+zB*!QZP|e_KmQeiA(%KGp+L%py`9Yn2Ldr4t%8z~5rX=nkXzNRX>KvxgsAuX4|3kUg#11&6sH`YMC4>O6W7L*{(~5I&QGA#WKRwc_|xPk zPRnw2J1?)K(jS0$X8E!aPvez1&WeEK+pX1BNr9BRd;ZG&OmFfLw-@c4Y3$b$Eqo5# z#*to`3hd`cgNNK>0xuHt49QUfgC2|XbYdDu880ajj4mYS5_Rvz0dRK0n)$`YRs_K^ zDiX;P7;+vk>A_>DAWNv`CyLa?j;Md~JD(DRW#4#gF66P7rC;r6oIj?gqf?rb>N~kr zzu)^GN2);I^~ek8+GsZz&Q;IJA6k1i9Z7Jd1LBke_-t4vTofJp_V^i?h6lc{J%Qn? zskWC1`gWfu8ouet6w2(mA$qu6B^%jU8xj%EwSsc?{1WtyeU#mxGI2Gd$E67V2x4-+ zSn5)?KYBN-mfwayrGCb|zBFo3(YV(0ljqjJ3w=4c57MC&9z*7DL2ts&vVTWJwmUmW zDMFEh{&5GdtshTXq)hV$t_OT4cHgR5IVjUPzcJK1<_d)Ljrxb2R}AUdfR*cG4AKNN zknYecH=I^Scb*P6MIhTfk1WXrRM z2V)uZnNv7UbhIpp%6{dXy!Ml)WlvVnF5wQ@xKhIE z4Z2^=cVP1sL>xO$Ozcg4qyX;SZ*#@?94cC8#_ESKT*P2<+UHSJJ-kx&Pz`o`2fa&R;_~>oLdvs`7-BJ{a7lp1H&ra zcVB&bVN%t&oImZ|dgwasl9NBXJ)wwjjJ|nD|DHl*&&MSXdpJDn;TX~>`Pb%h=CVf_ z&F_1LkODCF1Xb1KL;A#>74x$^KnkJ27alnNcUS(85F9Ph`LD^Mh(J2+02!^Ey^m2>&6jOwWpon4bZ5Ks|WCB}aL3L$U7!Ay-~roW?!yjfkc8_;b=m+fdHNBGA9! zO1#dd7z0u)UrWe*c0N(-LA}0fk#txv_6^NJ1UfP7TBU%9S{55i1`A)OF{7Qq54g{X z#;9GKRrjnY3;@9Qzxv^S9x*D#tlz-_{K7p*fZ(t?mLDUG89DhbION1fh)>93zkRua zt_(ou>#kmtLZgrO&Kj>zLg$2cP*XOn!}4 zHU3E%8$CidrZFn=-kqh(yGp*`voxR@<=#W}EAw|OKf5>^M4-$Y+a-m4^~Jhe42J7y z$>i@G8+d-SEN7E{ZiVQSOfJkuNW{2$#;poMm>I>|rVE*AXk7HI=SCj#YSU2drN>C> zOe^?lYQ7(-#j_ z?ugIn;DJ6kjcCl~MN(6{|FT$E{*noNYxBLIOPh;_dOC^2cjUM?Q9~@FIolP3JvXZ!{?F?lZKrAr+lW z5c8}k6h_nJh;s3%PN~`BUN??DCx!Qma;HEPz9$_JM!_|?)D_?ETft+|3-@HYw3rxo zbiRs|i7Ywi60ZXsKY{)_;p%W5wkL9bd0^Y3HagPPFD}xxNF2Hy*n93vj_N3dTeQOI zP-N>9@)LJAbMZ;BsfO(iStZj&j`U5T=VNV%_S*~V9zM4>M_7j%(%bf?Pq&y7vdn+O zis(D;)9VHn=e~X$N7{ON*g&JFEknOW=Rt zX2Wn<{VeJ!*U`tnb$H$r;oDc0*Z!7N(R!X#p1z2mPEfd7-71B3xS(V% z0(pe4;LZJ;hi_%6AGOtUV+2d3slr!&24Z1(y@V0&sYU=9-DnK)fmM8AJgro(1Nh`< zuQN@qO1`W0^1>&vCZkqhT|1XyrGjRhZJIwM{{=;SFsem?!p`bR6)1v3$#k3Q#0EJ6 zmNgD)!-P2bI;`rK=Ojy+W>MWQEo>7UP-%V0JNLWMoj9PXeu=YS=74(km(2eh2uOrX zo?P)Py+CN%{1F<5a}O?FnAX>l-BnpC3|d#l=a>dE{pFoES-0>4AUgv+{*|Kc^NihZY6-z7pM6sFvLVb5JuENBF&)4 zOEqp$QE%Q4J`nyI+_EpRf_s0>plGbjKF8(-@surBNsWC5H)J~UJ{mF5pj+9`7sM_y zIXOACn`To9Kr_Sv_ZiXBnzE~+xpUnBeFJ}Ovvb8bJL~7cOGzfl>i6fOKM{37#4W6t zH7nA(A7Oo6mWTg7*aJYQreVG z9%)|E?Itirs^=k`dfz#?WQ{WlBMq?vy@w$K=?E!EHAIQ%_?PjJ5=?C`h?WIu)6kIE z@}6n%SN7U_Ak|KkAQ-^kxEs!(S5sR0Xtd62b$|O}YkPxc+r3@u+!DwL0!9QRFsv&4 zryQCZu8xlW$7OO8Q%bw@9qtJ93M54?eWB*M;OTE~c|Wn#^_f08@sV_8zE{cwBt0Dr zDyLc=I7Dg$dY(N}`<9WlV1au8-_B;oCBi4sHCP*u&39lQ*i~HueP7x5#valG--`|o zV?6&}ELcvyG!lh48vdHT7#+FRMHv_(am+Xkg00C_EkmWPD&;toVxNbS1jb41Oa3Z} zpWR~c_K5>dSVz3-T8BahTUyW6^pf`DHvYz^36{2x1?e5_uzm+#XS*hT^MA{A{x^|- zh5&(=*LPiZExD^e4a{IyhWZ>1?W^34T5esgi5#ZP6g^^Td=;+5&v#_QMO0pEH~N%N z#5oh=X=TPj3t8E=F!fYd(N}~j=|mTMi9Y35K2_D6cf?(pg=B&!I>doNi0a6j8fmP1 zs*xH?PQ7cASg-P-OEp7-=lhbjspw8RPC1DIj9M5S%d!k4~Yo-xO z#0=zCCgU2ikoIH+{v5K1`;D6^Sxw2c)kVMV!g;nfdDvda&h9>1$&>4@-@ORUWyzJu)|*L>LcuSe8VFM$5pp$9tvC~bfxz?H69O%{0g zVHRdLn`Ji{`W`Qp#B+;@IpnCU2Qy!>=0TteT;+8W9x{v~7NQLHlMWwEQHvL8K%3ZS z?&ub2K?5o&Tq=(QP-!}Zqw$LWKhkpp)ms?Wnvr7m-}LIyU2P`3%>`YiLqa6 zYn!qhW18@O=TZKo1aulIHCar6EDwzAlq!TzsZH^UbP!lopc8}}%`&>%gP zh(rFTYBWfr6rRb~pqyocbc*Fe>Ymi}%CfL9bF{WSrO)3DQmheBr^lig+}J`4E(8E$_x z?B^C3x>8%)2AW|M{#y0!!9rf^GRNC>1lDiJBz`Q>xC8VCZu-81sev?@)5h7c*1So# zZ^@bkNql4IL&YEnpkUz`v)31KkG>4}f;r_vwleSf}zFtZ82T%W{n(^N(9O&%V#IP$RsQVbzi*t`T zK+ybWyXS1rsfSTJzgW;TSABjYw)2^D`Dw^y7d5R1U7$z3Bq_UU7UqCrmCbo#VUKlc z()#$1eAJWVKIDaMpZU5^p@q+;Q!NVB*moc}y6M6>QZlsmW|ufH=I6kcjn*_0Yi(Sh zVDz(0Pi14br{Sj9E^AMfbz{$0u1{D%lRoCH`b|3rfW8#@ov;5vI|K%EyX|xikJ#%S*U67R%|E1g_2-)o4u>ei<4(>0jGfe@tHJti(=Iz0ONih#OS_aMhJv?$U;%*d^`$q8`fHMOQ_A$-OecLchv9 z)I1C&sx@EtO3e3N%+1i4xm#X1#K-iah;7{MH<{p@v9yo&G7)#yC|^lY-5<5Tkkic| z8ptIv=Rxo-PTxc}=8Hp?+hdX5X{ynhj2|H`j1FSZx1hd9y46{ZQ5aGN#~|>q74Nc+ zT1TtoH_Ic9^;W4v7cDt0Cl_+jbC`_oLV4rk(HHl3;}-0^i%S;b%Ir6)%_M2jG$9WI zjodfKOH5sl_m)PyghY;!noZWgex0W3q2+SG$=aZ|m~EizC|=82V(@8?m1NVStF?*) zVV-OUejRY(itqhbU@~z;VzE9t;a{3Y!Q3f${U zH5sDmAEU6NUczVwA$4>M1GFu2RAcPK%rDY$Wq?tNM*ii&RLYAVN8?l0ds^A=>OV${ z9%`CW109h(=rCu5z#8POaI0USDY)IZ7h6>RJFOrf5U>rbSUAz{@@u1V*hiT1Ff54} zi0NEq1Q3V5Sob=ZWK$#R0%s##Jh@I=#PeHv#-yabY-}iTm#|o8x;jWBb8ZT9R zk)_DMAfoVdFh~+BI>-aA2DNxCsPh^-LE;ts_F6`iE4V^g>8|}4Q{FaYcJ3`Y=;Sun z&7Rwz4#qd~qNYe#sFL0h;M}WjUWgQ%vF`QSs>!)Bd<4$eB2`D0b@>%%_zRg~R4RW_ zQrHiz2pj#!$>yzgy;W4Fh>}X{>Ynk#Of`on3dv3ONxY?CwRKk#1^I6w&?Uyr9_=4% zY;rLZElcTUxEG*X&%^NIIqOP|a)JKFv%B4{oFgsuCIjj!93Y37T)ybG(FTrTVl`=o zzz>64`LDzWA=aS1H_<`rwsQNfw#xf4$sIvR(pdej2Bj<)=Rrq~&>uIu6eh_`28rzN zk$n_z{k8R=k?x>U-M1*e{MfZXqw(0fIt&>1(QIy&va8(oyF5jGB~ckiw-@}0$k%`| z5}Ip|)UU#raW>GS93o54xiMY2=B_6cueaWiHt+ypdNX3VA9U}F8r%iJ%rdmR3RfdE zIdSVSB!OmSGlUux0fEZI;g>pYjf9aGft+K^I*;J5qT4#^!*43S|7xD0_%w|PDglNi zckl)UzpKT)?GhuiHxa8;DzIyj>9Sf=b!*{Rt5H-0)u7{b_@EZ3a{<+7ojD~3&kxCh zF7jkQ4em~T$s@S~2tOPUegg28H!{r`ujI8wh<7j;(atagLw@;Vt1*R$Z!i^1>%M0hT{IPM z;G@X+$Ohd-OCUq9v9p)F?QN0wU04y_VEzFZg0OtqM6H2tQTuDL71sbG?|g3aTMTzB z)puCl=ziQDyT_#VJz0sbsIx35Bq5KygfBPr7OaMWh9vN%h8wlcdqUvj0|iBMy-H*8 zuC}5OUD!>kPii?Wc$9&Nr-Jw+0WBZT_53^nGmp=*BCJ=2)@Cm{E-`m1EC(~-2t#(mll{l z^3XnMXShoiB-Bq>V6<_pm!(!yyRLCmWdEH8kYXCK`%&GjUFzl8O8fFJpew@W{BD8R zH&Qt?7XD7+zERSy!-+%}Qm;Pc4N%1ad#ts{W?x))lW%@*!e|8FJYK! z(iw@6AJSxRT9%baQEa9C9K3yx2#H~lri8@DHMPu0VmH|@X&L2;K4;qehw`$x*K@n0 zG^zIJ)gFZo|Jo3#IArkdbDoV1!H@td*(a+d7%IL+KnvytZFm~VZ)Yu!HB-P1N!nbB zH>`B<-A_WH*^H9g2~gS)#pU4O7(Z&dJTI}>K}5UFc&v<{ul`LX)bTx=jxB+E;tM|gE%sa9{ z3S3$?%ktV&mx{bh40%uTWt(!yYdMnq!jg~Oto<`utU13G@#$RS=~h{ieQj=Ro8iTh z;h!}2xM2WLrOz6{1YmBRi=ezo*_F^gy(2y*T+tc7KXGpkOL(U=UYCcxY|J#6UKwW@ z%3kCmI2JvqVs z8lOLVH*hevJ;&-c&^yxFM|Zdf9L`4_!hcu$ikmP9#m*e_ZVBskT*)r|n=6QciHic@ zYkJ~XAB__m;2(KHhu*kp@uLjtVR8$W^fY6yq_QPM8a40I6cLxZOMgi-A$lbqLiRo9s41S7}d<`~XG4Ns%6-^S_EMKEMZ&8PDc<_Da3|7LL{1ogF-eSz6t&xcB#yX%tJYd@tIoB* zmbuW(%G3l6mW`jfg%N=Vukoo~V++9x->wJ|R?H&Ythq@;Kt?+F5Mq{@p}GSK;plqV zRpF(|W{dkEkUr+*O{|CCpIpG{TIz+>c_MYNSYG=QPz$9yUzKg&l<@*a7G(n}V?zvp zjx46x816aNuXgw+PJ$D5$q@*7=)ablwzRIDppBZ!99H5c?69O zF(JgteI4vAkL6Ks3|Gv0$SXB|CfEn(4wQV(B`kpTH zH$ADuG0rIYzUtrHh^LxpIUACA6;>v0M28+ay#bb_{Ui=gP* zkTymLXtChUQJ3}AD?{d#&mq_8_8Z#ARt5I{eo5qG1EG0_UR}G zX#Az;#`;}%)~|W_t?^`}sB%pnSPw9cjdsmUXNr7V{b=vp)7u*{6^{CQRDljb8;Jn2 zK)rh(Ks4Uqe<2CPxy)B$2xU_3qNGX1qRaqOb!dA%2=cjJIbB{*`|i5d()EUh3yoNM zhbIV7mJ=a88RSkxt=ek4-Yp+lDUov4&ZM(?r{amyyH%N zBK&;c(c@WOz-Bq2--Wm48N5$IZiORhzuS_oJg`)Rwu4xv1wF6pOS%NsQ&!K4mfsh4 z4)M}OkkhUk@DBuvB|Y0H9Mf~)Pf;_YXAX%AD&brzS&}F9m`>HRL!5{m>T2GQ0e%bb zsFGujXwh-mjD3@Gxk6!U=3x~7fO87#ocQ!Zq-cJc>T~{S_Ydcq4nD7wg+0`K7!`zt zN!$8mDZ5t=l#DZwOu~hdiUw?KP-ZnO=B3DuU%M$ieJ-4vUtP~XY5TD_r+kVW5ZbVO zs#`&GciZwd6*~A*W6m5k9!=1l5760FpCaS^b}9Uf{dTbZjI25%9g7O3@i0-EbMwJ; z_K&};0}Z!-IqC5K1{=3_lHgv<-vw|s0{t<-Da?DJVl+P8y(rRUHrs@C&jcMV^@NVR zo4HCp`JeR_>fIA^lu0~hm=E8)ubelsj5mNJrZEnUBgOPe*yA676iKJ%zP=C|-JS?B zEkiJp>gE%vEcVXzae>|1JAZIffmEbWja%Q&7GUOp-Doe(K)M369N zK6Y3CV>=1sL9s#^lh(h*fx17iDGslc{4GJtPi|Auq=`vP~Ge%@wOd|kuZFa_Ur-YBKPjq&6juWLGcU$}?!-z7NL$l(% zr~lc8^VfU<&=M~YKr{#+-o*__#)FRdj81A=QAch&J5ymu;~why36~f*LCF})^50fO zB5vgB^UN(Q{CaCl>I3qF!UzUM$gpvzewr9_2{)wq#&61Bgl4m^^pG2r%dMq9x0+cV zVip?E)`$XC9Fv5&R7A;rr{|Jd49M{O)`?eO&rr=<8;V=O)sPX5{g!bZB(?Y@qwykU zjrTGu7kke)vm!hQ$7y7xr)1qLi6*J_*r~vz%)GX$WQn|oZ)V22I()MlJzm*Hl#rBE zc2vJ2g8Ihg`{JDeY^cv?%L(PxbmWEW=Xro0_b3c(ZfSXip71eMp;^QJ^#9Mlb76gK z`q>n}@Q55kR^F&i_*vfDP`AAp6HcOrggPI8@tY5G&V9^_V?rR>z@xSQIG;#fb|8f_IJqwL*i`hD{U;4QTek+NGY5B zU*ps(2qz&?m3;c>BC${F_Xh)0ySaa5%WnK`LyE!k24+#q?W=5Fe=)KR>GNLZ0k$z{0C_ntR-iI^5X4_^yiNW^?r)Fq z=ecrHxZhEenOiNeGhqhDT`>oG&0=$PWp~wJzrIm3*zL^FM9g8!*I{9c?A1xbu=y!4 zL$y8`a(uULYq&mHRK$TbqEDxZgk6zTn>9jWmI!C?CF3S_JEF)(-{$nhb!K)d@P+e_ zLydua6{Ss2)tl&0RhZoT{R8V}05+{&_3{3J@W3KNyhJ4SzZ7b5wEV zo*m2FjW zV%7RdbaLWj1?;M+clsqv5u3}p_LWI0M;pT>9iAc;Bkt3}no)92$Qm}I#iYoOk;{8- z``YKLDkRdu-bV!nX4O0EF8rf271q@#M_#3of&oRTWy7cA!@|3Z!Yu0C#&5jF}_ znb+^$1Q3@N7g~+SFOWqS*5^GmdLox+`=})@zloXsl%tKb$4UUaD#D`m+9=VGoB>Sv z>U|dtq?R&NsF!+e64wGm$anO*lwDI@%Uo%P*W3@&l{1=Etj0P=DQ zXs7e5A4w?knqL1?Y4}T1`>m&5Y``L?hT-QNz_>!Qq)3VU36uP7W-IGlFj=->B+LT@ zdj)!?wplyc_~DxBwE4n|SGTm#jnmvOFmjZWOR;I)n;Q?ZxxyPN`}{8UD~pSbes9sp z28I*MDRx5N1H}QUyz2(1lVks%Ry3fks)O91#G>&iM&v?ew!}~HsM_ZUyKj6yQp`f7 zey0V7S3e(Aw;AVUUp}?v`MTj^hOTPt{8xkPik-zKH1+b?1(?JBKL>4&!1@LF94IfE zVE*6T3jvyU<}O$EVE*pH6ByE_9@Jg7fk?r_{Zi4IlW}h-wkcm_sCTi;jlzol?GJ_a z)Jam|TFS;Yd4~_OFZK$C`^})qUS86k2TydKIX$Hk-fPR7n)DDEMP}AE*-K@5#NHX? z`qZagwH}b(p`0RW#abkEw?&H8+4tx03nAa1)hQmuaGU6O&>;CJzh}GS+mC~p*w&gj z8RZ-YA&+f-G=oOt5U1(iS4|I4PS?SjeY$X&2Mj*wGoW#b!isjYSmVOs2e)dggT}|~1HI@JfsiXMyh0xas7_K)Z zbYAYh3$z_C{*&ivmbz8Hr*yj7cM&AwclL`Q-sQXfS?+B4<@#*BY49d1TpN{jzU+58 zJ2g8+Fnc)2d zNaj|eABQRR0){~nBk2{EDoe1b+1~Q6ZPx-!7(K-IgP(urAPH#YntMuGqz>Iate|TU zP}y&srM4(KJt?8(|FORq7LTy=~0pJYRGa zSo}9TyLEn$@Sm=np&XT|Q-CCBY@hS{?en7!hE%GM3P4k;CF>4Q(yu_e1H`H7pOC~N zPv6kxyBm2qlAv(Xv?=CunE3EiZ+#he{H&L?&G0M5Me?~{9}Je%zU>|EDSQMwbu*-N z*gExHm<1H{u|aC<@X5EP%U3=Jhh?tYH@xVWCRZJm(=!Y&)_~cIT1o$fZ#i?vJg3JV%&k zMk&(7Roj#ZYyoACmU0ytJY0~FRP;nuE>T-m5 zwt?WA(TB`gU8{{7;oNUuQWtIo_To(sz2KF3tepEIte9{lr>5v@b^#NWtd|S)GHWt8 z^Yli=U8G7-x`A!lbmU>maZOv{k!OnIaG4$~+?(!XYaM^z=4_w@8NXgTc`i6qrdNlz z^s$a|vuAq=u`RQ$>+q2einF)SCfc)Jdu|`$B{E|_Dq#pHwhji}afhr4wZQOt^(M@kL~dku*0C()Hs`~H+feSZOQJr7;kAZa@DPfmux zS5+E);TJv=hj~2c`v%6|Qe`V87gnM-omnpKibX1(X@14{cb%07%ES?aRZq;ee|{mD zMf$f_en~^D)uLRQmP-Z{bHWqk%c4^q$_jvSGTO4rqGZ_oUBUtPxzBgO)=A8Q1qBK3l(|mdYn7Dhp&#Iez%5*kET|0af}*!+33Tv?J&OMiUvB|b)wi{c z3!;QbOE-ru3F+?c?rx9{X{EcnySrPu5u{Tq zB#(ko*sfUa>%FgLN@}%$DT0NV@edBbpGEVxhUI6!IJ8$n#$@?!cEfdn2Rf={oyryJC32J!%Y%_80 z@!^1-2nX14E}f(UZ+$CyKJbByv0C^2J8&`Vzuf4rL2&cL8eLj?7IR1^%zT6oGs>R( z7Tn5w1Ju{9fOMBxDbxM7z(UdAcK_gRcOqrSsk1(}1bfDCE}M`yVq6vN4R&0IJLg)e zZ)VHXPea*-es zr%Ju?hwNge=TV=N3)$61{AZ8;r^lqwdjjtxL1&Xup4iogzRyph_w?gWd7kz;=yz}K z{Tm-$uE+d?HPAxAIcPF5@Vb0v1YVYV=8*gF1WD{ode#5A8d$(jm>!V$8!xiJ2X4!+ zoCkkblZA<1lO{{aIJYG^(5PfsT)8}lKYBr7FB(U4rM(>SgQz&0jRye%EkgML`AqL> zWf3_xH`NCu4NVoN1oLs~KQ-@bd}&Qa8r0yGi6|6XDa!gXN!=!h$JWm71mtluDDP2M zb5jI9qqrTKJ~ptZULlMXuFhtvie3@C-@R!`$u$gK9Z^Nrq(5`$EYaGpGj#Ca*66fu zOuC}g)V5~1F}cxrF9Y5skVSMF9qG~Qb>rgQXlJ@p*!g_l`T4|Shtp-%HvzCtjTZ&w z%uNL6vDyE)xty*CO)MUs3#K)xyf;_RW8+W1n-HL{WCpG}Q-Nl4047+G zc1@6I4k_LI0l$0kFW`6o(nIb)7HEMYp{8qJ#-CNi>swyWfv3IY&Fd8cF2f1lr!?-f z?suxbK;OO<=Miq!MQaz};9l>O$fX0X2c6c7hFa$mSwd5F{4nL;|9 z8ePodFD(o;{Go_06y1jk?xq$_U!816^2Fi}o&Mpn#{0^WiO)~0d@a9~8QMdA zeLfs77j24JED`DaS@+8v8^XZH5SxgA*=j6VxSj|eiIJXzKQI-Qx-I1 zc4JejY`Z?ra09wp4?It-F8Qb`1Hb0AX8o|Q)AhwZhQ@$IqczdNT=P0M#Sy^ePpS#FsvpXbVr1Q1&9m~drfi{$M_ zkcgs}w&UnU=GUE@qB4^Mr2?0BlZga!PX+R3xD;i!KBfiAye?Q2*Di4r`00m@`k+WV z!i9^xNm3F{Nm5DDk}Rkm^9nYqD+*>bgBypGl+?_^V(R*0ME42hr1P1lqG*LR&)Jm*K@3T_22&lO)>ySK}7w{_Mlw!HUAascX$tAnz7G0vrB`V22!RH!`(2!b` zR6Y1h3rluijOR^N*XzXAs$8P8lTxfTYs)q?KJx`QkEbX4UlneR1|s@jI_*!UcGU$T zHxv1=c_;BZS^cRA+@a!NNaNBeKosY1;)@2U6-X$1&{0a}yVV1_yHn-zAwUU9VovhJ z`8>Yi*h|zau#&E?;**OcYmnU@W1_{3NnNLA+=g)^PM02v%8-UIE*j0kFho~} zWK9GWt`VU45#_;66d1&iVh5i8MvIR-Gh_||s-gioXjst-^jnbK=_i!q8pRqp)ALuQyVNFVg9+@- zpAG~WD~mxOLP)^T?PAT6$jZAW4aB+Zl5Ba?e6u1qbZ^a0_*_W?68gx}VxMJ|@Lc#$ zu_AL+pU^QTXTHTkKYw_B@J=ugH576}k-@=n=1b;~5K_Ao>zg}mep4GIn=gQ}JM{)O zFM{uqnWi{3GDBQX3q=kE%bSq!D?V=<4*?gqw%N&Hmw!Yk8@=;-P8`OP;4G2xJWm40 zPXVVAZ{1doPD0N(tj!sxCc5bpflA#^1Hu!+j*d<_xl7;ZGGRi6&Nh_|h=(+$kN0~0 zWrBrAFP%EtpS5;4<2r*d=}Ihr65nB;C0&)k513wCCvu#v&{?W7XkR*J)waH)a*r9F zL_^V`j8+mO5%!&Ida&#fj_qm!RXOe{u;C&|T0R5m6PQlkIOK_#*9gQAD420wfV7_5 zEYl3HqIo;TY^IFlW&Q|#C#4C!>1NC5CN%{ONv|_6cf+yzR|l@2D9md=Rg??g(bd0k zr)K_%5^wXp-EiC5g9rCvO?TS;V_Kc;TfArh30}d1;$Pj6*xA|hqF_wGu-fx7cC-5Y zjD7hC`1GHPj{w4y0l!Fl$|e2B0$}jdF>(`^dmzZva^<^qvFpv(ra5gsI%_{q!`U2Q zWGyD8NbL|Dl8qtfhs9iQyKCdb^{jnETo~iE-PV72Eyr8w z(R|-lh==dp805@xdUsEITj{bUl-LNJF4CV>6(mACXcGw9V^}WY5?POx8z-5T zp{f^j;ZNf*MTnxa01V2*Z1MNeh*q8`nLaRiVYl53{@N^zCqWpfp?7O5_5`~4Xu!0cNVh&^ zRjVX2imKDFPKH%M+ttHoR^{cz#`S zLK(JX3Bq2|DG?3d?k%08ky;8@OsYf@vToaplaUK?GeDRu3%g-Jwxm z$?aq0>iD%mX*3j3O2lm{k_kc7fM;gTqLIN1uXI;_u;VN%pLBqTlQwO~r|?-w5~ zB-Q;HT$Y_^8qe!gZ0hRg-*9mgn1?sLuc0GGuNdAz;xJ7G3~Qu4+~JO5zF%Xz`DEyl zUtm_?`0f~o^k%Y+JUF3)iGJfi@w4n5Q9^oV;?A3qO?*}Q(LN9MzL`i zYY)dIl}G|v90L(9Vkx*C4!FI{soPaX<`>WV!53&3)F~n#UH{y3`EW6@32Zl&QZ~Dk zgxt&k*7N2Q3E!)~MwN&}(ffF?wU;tm4c&@ZZx$}*x#P@iRW9+w{QiS$#X_t{r{LNA z7bw{SF8KIn36`?n(aAF!M>$4o`bV_*@Ndqci`hm)B5k(&xH{!)e&>H`QIJq5S!I4> zpl>$R7|$!Yz@+cUWP2@f#0jPQ#{a$PEDSKqi8z2`I13XZ0cM#$-QhoHxl0cUbbmJ2 z@P2kL=6YXSl&)BE1a3`ydLPc}j@Ae|gME3il1!U^G2*z#WbBtprFszL7(;Hh`++2< zor4BUFtPb3y}jkm9?7F1 zg*sNEKnbTr`L{$dqAC26WWpPv}r5M`i;$><6UvJ50>0hT(63RGvEa2-qEbL#?7B zOJT8I{58Jt4Q!0SB|_oGVpHlsKRVk8XjG_4O*wPK*LkfMdBy&dLK?LcdliFRn zYXf?C3R5|KW;r{tcB|&laNev!$RfsScd1k4M#IJf5iK2cC#ErV=~>AQO-7|QD@Yet z>V<*-5cRo_=0TqvZz2d5G0xj>BEX_NPfPsg_J1D&7~hiU8S&zm{q(smNYTJ0c#{(} z+cN>r&LllEYa0C|d8PM!UH|%cN-t5e%28)hFI`0YJ{cpkE3SEDGAyg++X%36(^@7{#CnX|D#l_;MnS>7 zjiOSp!7bCx=af$zLSoa|enP~43RnHO#AN%(MDeOpu&Dy-!9upAA!RU>2|rXm#-W9!@}%g9Q;n1|E6Q|V(}Uj{;TWvP7PYSL(oqANv9&ercsi%4ISn*G%6iF9 zk6TarPmprs4RSE`)TCeL;Uf#o?}@&YnG`n_FrpgW86C1W-0a1zg!Y#lu|=~;T7O^N za=)C_XnB8w9vNLHbR>NnY@$Mz1wWC=<73=N`iDQK_SHqAGKuRtnuTiO{tD_f*$a(q ziUSy6B4cpTx)%ttUn~=F-BT_;7yrcpaUo+Ko1DeduFa&~tM26_>jIcdEC*^eO9Sv_ zdt%w9H*i`qmA;s9hgpmb5R&2~06vK78@AV4MyylPm)43Ri`&iLVZbup@Z`KM3FT5#6P5$ z?I&T&q36J^=fcsaRn@v|+mvQ&%(m`G9m}$3idq|*HAL1;=473yklu4jkkXTeTGDyz z)TCugk%>^quuA4Y#T%t?1)JPUSs1V(y5V{|MNo7^-_&k{1 zfPV~QD%uD2B7wUMPwlFLWYJK8vC9Fn-V#S~gX6a&;x)dFc=mIzb|XQhsPx84c*X(# zBTYa;sHj6Xn)7GRKW@8ZS&PnAtlJnRj+>Mm=ONWrKZ(+KT5$3yQ@+}O9kHGV5)f!T zd2V}wcrw>5x4evQ)f~uP_`u)L;AN~5(Cgn(v3Ra<27*MT&D&7ZJQ#0Yc3Dg14Kaqkf#!?Ek*3=YGp z7o?dQ6*DA@WR@-Cnxl@42_N^uHd##>8~kZHJoor5$xf-Yy}d=Od1q{Dies#z6LG6z zSQ|}wK|L~zJ_0+I7@O$749h;4nlxv*u+GNV7swzZ;e_9U&=+0&5F8m*j9DATrrd^r z{E3n%Mqk3wD9;&1q#&y(F)|HgPtjFCI+f`OHaWdGq}#i>P8;^A!i+C)+S=L)0}g~h zHY6X4gOhV|w&0n%3uujPc;ZwnI{Ocy>6#~sVy%V8VU9)pG&8;7dOyg-=5U+%Lq;VR z%XtiS@04&dP>{E1I?Uq@`kH(mJDF!|=c6b$gyvp9MwNY%j;I=2A#NZ@auR(dQKx|3 zMnnW`%snyyrrr)V69i9~Hfb~yV9`yb1^y?Z(x70zUWkOQc$i-$cwbiIvC`ClPR^YW zSm>(v?pnpHfRt+Gt9xjz0`8n`%gy?)=O=^VO!xOYA#511QGENbd8hRu#fTY9QG8O6 z5|gM0a^10NqHT5z{ZPdHNnbH%J}J?vyM;M$dO!^IlWeQz4~Dc+F-00347JfY@<5d7pWb>4mYPogqgvb*!i!{xybH41IEiR(_~Cg88bV&HHD8c$f5%yyrT0&MfB zU^q;msY*2nunXTh65E6FMuY9qWH!4EKW8gICf!$|-Fo`#8=^(z)+^v&>_P#(lkwX1 zqcluNv7wOu;lBMaz5>ggUT;bg_6O~+^=`5)MyaT=`QKWb&8x)U{lTHG?sR)IEPC#% zv?Ome)yyrB@7R;b;4sQ1XgNTDu;2Ej8tvKgk9G00mC!h$dWg!5(?-GbA7z@NX^&r6 zdx1M(?)A|IOt5ce9l-JUDKmh@Txsm@{x`pV1q<)*W5lcT661}C0{}x&nNj)4M2+KK zCdava{G=Z`)B@TW?+IFIO_5D)K>zb+_hS4{DYX?kwMc(b45ymiUt&CIMFjyGO4^=s z2h;V+iX=!EowvW=_3!SnM@FuB6I6EIG3wZuO^-vy$93)c0Um1)>aN2-({avG401Eo zhf;+>l9XP77h-clE`gZTwwfI`rhI@BD_F~b7#D>V@+Hq{E!kTIhwczu{ocPIdi}Ap@^ZN(sn_U-c#wJ24QQ7v_n~hSe`Ff zM{IELSqef$>=4xe&EQi9FRvCi_!Uw>3J%F!g0}_)M8$#t9E}m|DoHW}`a}=G`sjcm z;;&;KZMqqum*3Rp{I_xhWZB2*8EIvhMPCh zwH-KYidcr-H>bVZoQ3SQw{yk|$9zX#Zi&8KS<+LiYmF3H_)9uPmjlS=5c;0+gU$aY z85lei)`lO&_OTq^{wH>0a`?8X8_&z->=YIkpbsov^}~yvFa-jM`h=7e>-S=v`Cn|S z8Wd3XP-t2mK)SAlSjSl_n$@-Tf3n&)oG2(Y!&fE~FZoM_jSc8tH#MC73*lie83-p^yMh6jQik%B_l4FD^1Z&{f z`!c1Cj~S%%Wn!KejOiM0M@NzDqHagfy%)x-nrow}pX`}HrO|1oC78IwalAuvAOv{S zH4FTi3K~fdEHZ{9hSDk8Gb1tHMtzwkdx41>6VJlRlCn3{5%2m-kiUI&&5 zJL65jOAyNU1?d)Ox)(M=-?^;t@pIeR+x>$B+VCwPQ1+FCRr`R-z%-dp17vewEKLOV zFQJm1o5nep{UQQCIN2_1InjRh&c$$C_c^@5wxn1B9~3dKF!3wdVysTcc**ug%tT?T zgXtT*nKuqJ4)h-Cq}G>-LeWcWzrGu;fZ9TATSXlqIWE!{_lubZPOV|t0j4awkJC&b z&TTmUZ#ix7L-0Y06Cn+ImF-v{Iulq-;>4V<9+}N+#6PHGQ=MvE1aY4Rb!5zvxN2N5 zhz$kACnbMaSurcKcHb*3vZ_OO8;S{cU7u!e)98fVx-iWqq(}bve+cZ%cqr@8KZ*j> z5)|*lRJ^(y1{ILXN7EM^q!Sj$YQj~0mwaS`wIJ(9yRENif!ibSiJx>DmVoMegj>>r z)3j4j;DYRMmf1}=Iy{Ha0SOtevnZL?F)m3@6jKk`GZDIcw3vk!ac^_ZFMA^~6#`O5 z`_Scn(r$dxHdv_FyL;E&1g9fXEVsgCsBxc4s~)^~Ay*XEwiMxrZ?zfAd(9e~KOTix z=DSe*;^XVbf(V3?mO;Lw(2reP`-ZO@XjFTObyi$YofdE^ZqWRI>Q6*aW7}$^tbi2& z$yhyc;;$-IeRc<8$qY+$=)64(kyXBSbm~d4!`IgNVD@TZW9aDJ;N64mpWZwTdklX+ z5euE3uw>;|o&)(m!dX6;P|V8B{gAj1xXKvf3dRjdbt@u@?lIbWNv#uZLDi5lEej%Y z`vF3**`N9Cs{Q401;8t2GaXn~XS@(Vx@{+RL*-ah0Y2&Y zZ+7M%2m_;-8zkbQG=zl*? zafIwu18%p0gj5W)zQ**1dJ895hWc@flqiz86!x8-fI!l&7YQ2G)c1lK6!BSn(owr+ z&_U!dUmPXYSH@ZfB5Fj`8gfh)`Qe8$t>30Q`U)oa=MH&s7bCsrd6mO7akN-pmnG2h zYLF@Oqmp(b;1vZ5XyQF7$W>_&n>VQE(nGnbVTW~Xc*uga6PPAX<4HPd(qo9kjmc4z z!%_Y;k-95XOz)VO5YbC+jEsL5I%8!Z7p-u}lQx!wGnN`2hcQ-BSctDtLMQ3b5-A`F zEJ{-<0HyYtD1Mva>9KY2rEAhf4*iE`DMu7KxqHz1*im|fY=)` z1MDuQisSF29b0h1{Qoyy)LTZz^at4DYObMsCcavH-NE0ea41-#%B6W5HFL_ksd0YM zr8?#M82js1zSo2CpX}r9h6Cq#e56Q;h`_eVV}0{*;$=8}O6GWFfa*u75Id zhzLU(M)?p$MPITzlM=^d>Jh%ul|^h@i9s$ZExggvc5!R@v9ianYD(V15$E>9Q(ilC zs9AovxiDAokeCaDK1VV6@~CNEU(NJh*fGdyxL$Ohlipc^(vYLiMHQo^w}pF!(IqjO zy-0KWa1Uh-KY0Wd|0iXKQ3L|kNwkA3TZYWuCd0PsW9u(u0{mjd9c?@V3j{;_tIzsjPgLzCzHmbHY#f3*(*GSCUNE zsOI=r!x`pr{mqi$jU&taw~t;}x!do%$sZs4o9l{_zUt&&N?J)w=k+S~b0|>}V||WI zQYwa2?v3l?gna7256O4C1k}t7!@hX;%UBG(n0*#t@`T9%x%RNldiIUrq3f1aoi%9f z>AI24_SY*w2qd$TeJBeFLgMeTFv;oeK6_MohVlECmnMe_8A%vv9d$P2u9N#b=nY_H z6?fCwCjud1;IY(eAY9OdA}D(wiPK6nN^VLT$+!47YWDv`M<{$*^{7%g(0N7bYIb%Q z*UwCq#(LB}neaKMEU1%*;KNNQ|4X&~6;R-Yf5rGK<~|yNZ|Vy8!?B$yEc8DtI1LG= zU8xLGSvr`$3C#HB#sV9NS$)dQ2r75ne5b}PMKIOPzQyLKkI+z&79lYPrI353^U(waGaTxc*^znm zQ$Lpxuu+4R92Qt2SU)EpVvIo2csd)_6M`q!4lr7aqmKzQ5W zB9VBB(~E2rTyfp2%{V(c!?yMQTY+XR?CwR?Q%RIc zny2fs)xV*n%1VX9KFU^Jnyg7HKK=Qgczbp9dU&q+o6(@i!k2E{$qxOJicuU zMQYk#-=u;fyKS-@QCAjIi?_qS?LqvSx^S<)n>}gYD7%;dh z%nbrme}3w}iehrA!X#<-)_;MT(Yp zzkgGw%l3JGKp{hQ)(anlPOIyUx#Fys{9=9^#jIU59vbbNc7cnBgc@{eG|2#Y!G5>UGTKLbikDu|Ha0Ewv00c2k_VvJfIrIZcKcv3OEeGNPltd=NhLmd`qd_1Swp@L_B- z55w^M@)TTe1_#TgN2ubmDdH3>Ia$)v+~Tw?Z;h~aop8V?i8QI zX*v=4X$G0mTEetN`Fg~1`A5>)slXoh%xT9a$eT6!B6gEH$IP$K&WYQB3JDE$R@CER z38aiNf7gc>;HCEhH)Gx%i_-Bk!hZl@`n8eFOa1lljsK2AqNu96y>6gRVy&qXA!dFNwMr^s4f;pNWuAlxrRt)Kg%ARTH4F47$+)B~D^E2rD zuB{gjzS{JRJ97Q7`0X+?*_sur5F4%xB#97^7DtAhFjwT`>!py0nzD>m6q+tY^vq-b zy;wI$)b3?eMr)Qx|CQs*dwV>_;#$d6?RW%1_)S#)a7NNHd3ERMNU9?VW;Aq!H|`HG zu5B=mU-V1{Z(ekEwN_BrDn@^bo2j2#E65Rx5@sQT#yF&5LR?4Ne_oJIzgwygQD&snQ%4C6> z1XCy>Y&X>D@Lj8MxVE;AYfEorS!*HZD=Z*+kBmxb(@h1UQg9JpNgiw^-C^Oxjo^tz z@VwC?ie#HMCPwcN-e=LC((N+{Nh8*}hy%M(5x=gxs{`7J`}?(L{QH}eMy`z4;(A=D zY!3a4s$%cn4eTe{4O|rZS^{-OFtrWiBO0Eo6f3j*T**D}nU#M-^@-ei{zfRg;6S15 zVRFklVTF}d#x=KlmR-=b-X`ad&xc`-iqi6P1-95KUL2KU!#UDx&D)l0bz zW&JtLIfsKO;h##Kbca9TPB`bh4ZlWxUo_zb~|%zw~N5J|yDp238}8G0hw3y2HpugXFp9BQT# zB(qNoa$vfM9EQw$5U-ST7cz&@g_oBR8_i89xGNF-!`NR^CMe=eoCz>OGDt$?_kWbk zjo@R-_(gO{p|F#Z#RSlFpLD$SGpP!y{7DoQu>+f88lwCYO8Sd#M>F~*euq#73iRHR zgMYXXx$T^`%lfP)tp^0%;c@i~5Vaa0vw7Y*mK8KzDHh6vFO7@5m?l8y zM!>WzSIWURm35|2sczkJHF8*FEK8;A`*KsIS?#&7@vouuXY;lvVWt$n)<&)yCT7sG zrW4Kgl)l<}k+6nw;mqa{jX}-?gQ;(vfkdfs^`~qhFPw{z1>#uLpjMvtC4vqCN6`H7 zJpUCz*F%-HDV2e#nBs#>wOJ{TgDd^DkY$jfYJ)XpNH|8gI;u?m9l+Ipc(T=96z@ti zSgHa++^xWTzqdOU$?0U6o9aA(Umwq*`VU*lMDb}!Ff*Y9+=8YT<7q^Kjg`zB7k)Xj zMH78eKZe&zNDY+L;=$66`BSJ^^WgYK=tr&}8w>IY8z?a{P6EP66%Kbx5YD)S4%8UU z+gEreKcmykoT6NxwxG<*u4=)r+5~_5`n0Yw)OHZ@vkM}fbA4{@(bq|O63%yu6-|(I zjhZnKL$3<4K^b{4AtbrXCg`0O@)p~txFT9p6LRnTQ>HUZLhUG$<@o+Y5g3LU5yvPd zcl%N!t{+l{hS2BLK(f1Q2; zw}y+6Sn4V(5sQqY%eNSAQ_k?bQ*KWnLrGS;0owpfEE|pVOzucJSPIg}!YPYvc%Udq zC=Y3P4{Mi$HvicQ3A2a;`MU(Ms8ORY-Isw>bEi?b{Aq^BAjQcwKjV)QVrb`OPmYUt z#Kgm2Np=$)$Sc|nN`eA>@H-K;`Kx`_b_tN`!`IeP&!o-DYE@N8yp-W{dSIms$rL=} z#6(N1odr*G1sIvIiX8&$TKc9jD;oir=KfYuLFbs(>61`Pu;DSHS+#VOL?e(RtL^go z{G2|Pe`AD_5kyKALqh1!>u7On2Nfqosf7&pbGL8TbkM_E^Leg&xYH!69EvL({04bI(lGJ{zZrt|kQ0CCJWD z^-$LzetQj`6v73l?TO4tK-P!{oH_QDbnalTasZ>_^o@~;i3aurKjnM*Y=hMbx8&1*YkmGAiq{ryTjI?`iX41zEN@LZQPi|FT0R< zdaBUNqK0fNhv887Ldn_So!_6U8ssOI0+esUzd?)sQEM`(4+>`XS0Nh?kkHz^dl!-M zh`)_?I=4@To&fgyfXW#N-@UzoVS@ZaIiO4aO6B>VA{f3BST(oeq~ZAswFRo*g&a*) z-ntk3O3W5A4MwkzDS~hngYM@leKKq%Y6wm^z8cR^x;CI#CUQ5rp#gOXe} z#qX=QeT9TMeUp@jR>7T^?$6%OHqI3cE(Jz}_P5_o8(g~Gt5I1h5mCvdkUF!UV&mzP z@6SDw1>*}4lVxX$S$e|UrnU6n&FR{VmTzMy9>gk?i^sp&$km%N>LDAt5?cRAH*h#|c&aMn04v|as>L)MIweH0!Mm$MB znSD!c3&JdDJhqS!Z&ilF0Kk*Wi`h)O|*L&)Kx}>or&MgxwZ-x&pHe$YZ zA7A`2A3PWuUZGaJl+l&y0R9eFaw+}~>-_s5z(`4kvIpEB#5+u8rG55spf;x>AB%GR{x#8s-?kbOVdPRzB1o z75!<`Yw#mJ_^b7hvXX)bpinU1LP{WQRL+zQv=fL9CI*FdL|AHo_Gq$eg zrl)`PAoA9|5Uw_4K%it`l5=(~zW(K}Kcyp;3lhe0%*O#FBYY$I6#+^=1MZ|hrQi=& zJ*;BEMh>O8Wj5HtNh_PNMgK0ejPWgQD4<+#QXZ(Gm1O97Fb5N#g`P!4 zC0++1_pp434Agm@nId*aJoNG47*r@Ovif29VNYk!lI!1S%LVg|K^e4suXb;PAaq&GhYZcb%ED{ z((|i6(>+f9T`KW{{}L0F@+m@uZ&i}n_QyscQe*?${U8^7I0y30l4VFO zT9r6f^|DuUzoq%@j>$iA-@x^RIPmx$*yoK`$%-v~KrX0IZoR~4<>6G4nP`Tb+tCU> zaHwSV&>}aC(~jkS6MMuS!@`7$`e0+^EP0$4>eZu_&absS(Yy7XMyh&eeSdUlO4q}5 zWIjzmYNBJfY9p<Nf5QJSL*u!f<0x6jb4k!o}8 zuZ^;3r>3nV^%cfmBx@pZU2-+3!1?OY z{9S6qavnGKaprVpVD*Z5MDX0o&o+Yi@Ut&M=9=2R3-TG+!h2X1R8@p&kW|k268lSQ zmrNhye^H0pM6{PN%qwU;A(Rmm29(h2hgcFhcg&|vyJH$tsr zGi9j&9ftTD6i~s zi&sbuF&T1cAu7cmLZ$54Ndh!51d&D?TF ziMgM9C)t@A#`9B-c#Hf!kO#hOo??NBeOws4F(w`%VN7@CJv2C$`E~9C6CCwTU_c1X z)Kj3B*ZovAS)Kq^$aJV3^poI+ev7J;!s|WgOpF&a zX@CXVf@FUG`sN)Cz5V_5k?UqSD^27M9}1={kgzl3TCuK(|4ZBVpw3Nl4bry-!}=u- zWjAmpBc>liuWKw4R~A#**(blNbQFb+rP?x^4^zIwN=o4R>gK;mN*nD){_bqvcSqhr z2cg)i>wKxO=hJ6ygbb<0a9jnUe^ic0{=iC*&k|AsinS1iO)~!%O-_tx7Sh&%s#(j@ zw-sohOt9w1P{L!Qd*uppjhAYIFA#*7&m?nxB{ABd?ei-mZ zD=)oERD3@D<8ceLwJhh2A`#O|L*50nZb0?3tbJcRmW+s+TGAaC`fo}us%xI)S1(>D z!K99g(+cttgFLirL~cU~gN5Sga7=j9XA_;#?WFhMSrM^X^JXlNOn;l{4=;tIx(fY{ zdIpx7`yPFKpc8*3rfnYEvg%jAxLQ*O!WgBON=jSSUNCsh`%!^NZDY+E7%s%+0l7IP zq+}y_)%^Pv#R1u8JR)DP=XFtV7YD~u#)Z=r@+m2m;4_hmpJG=m=QC=Dt4-ki?D_J< zkF2d+C`+;C=Y66Y()jp4x8Hm`ik_RjA!# zH)zDMGq#YY)|CwWiAtJ~U5 z#_%tXB0L4oSyt27{!bJIMSZ&9v6_~WnO0H`-OR}TX~tfK z9pd5P<6!@)*Me7+RVTZ}>DhsV#arM0YK z9`NG4YVfqqI7|QG8dnBW_!8^IwcD1y#Ci0;C6|WrzqjQuq$doj)z+HvG{ot&Jos5S z(+tGUd{7hctwIGHg&4Bj+S(5AdXUJFqNbShbs40U=sYPMrrrFe`fM`la<;_XE5t10wg%mt{kFsie%gPV z;DC(*5di+S68T!hQ>L4fapI zb)<5Nir>P*;786pH*wbZTwEHr&Tz#b4Z-_i3?C63JpUR=k6_lTP?oaUG_b;>(@eoCM&?~UIYa_t%o5r_9jX$<}Tpk)tA}E+* zKq}Q&$WFuPAttXm$Spj!F%|-z|4+mvg4YUtVZCALQL?Pd(hfdI3O%goMbc zs~7WrvE_0&( zt77(&DFItNAth9e$!d6b%204hiG>~4n@$UB_S1J`rmwp~u5NNa5~0lcDRPz^M1{;* z;Vmeq%T0Txt86}T!rjpg+^SEu>-K~YmD@3G%QRyx=}Psj;3(*U{cw~4qy%{vKoj*a zVT;0q_Xl`>iOv-F(8S`!NDB#2IS8wmpx4y&G6K?IV-u4CHS}dHo2C^THf@>t{Lf`1 z6vaMLp)-Zv?Ubk?amZIE$X60ArBbrd3mG}rhA+jzs8_RKLix`%l-Gx2h5ojpHvLIYp)6|?+_Dwt`L_Qf49mx-Y8hF zqd9Yh_mry?+6eYEV7GsloZXD*w#!JM_&N_iB6B~4OK?{iuMLeq1j46R}HAi7YwIfHPcuw>)E=ml<8Nl9KuI&*hRS>NMVXkO$ zFov8*+z=czux45HCU3Z|YM%^O)wMXA6dDpHq`07MgpAPJo`>v%7Tp#|i)GKghfFIW zGOaCmaC{8aSDxiCCVvp=Q!q&?BXB%~Alt1cPY>ji91YcmuSqPwGrxgG-KESAxR^SS=Wkn~@h3BCT8@LRF%9|d?W|@z zs{E#Cn_NT^>i-anZjE(QX|JPU%ikXci_ybo-fJM_?SWZ>w|Q)>5?TJx}4DC=rnO`BqHWeE?RpgRSbkY4ceB;)1i{}Ywjw5fsHSi2bM9chL{@*}tG{0PweLo2 zqGhL&6~LYHI&6nuBCHf<6;(awj`1YOcK{L@>=1#N2FkqN7NB?-PmXcC!$b899eIYp zi8-CE`}fG|o{fyq0U7l1EiA6bzEhpY5>c~{$kop_Y%SRMiWMT5`2E|49*|f z5V6aTWUu^~p3}QZ@&EB903CXR7O^byCw9B}#Cf|}@G4s4_55n#W`pix6L#n}x|YTm zHla6qi7t2GUAUxzkRc-aDwivXL<8dglaKfdrh2z9VSLw{zM-57H}tPd-?mr$ ze`I}CP+e`abdX>Hg1ft0a19om;O_438r}!9wTlZb zsJ+%RGu_kOkET;u+dJ*kYNB)?#h!hsknQWyDjE|5ttvLk&ncl!v?2(^Z57q2a{iu3O(xqKF7cQ}n zzvwb@a*QYoTZYdMcWxxGsI=QtB@h1DlGnLQDgOt9tUkp7ZSjHi98LH?B z(S4LirI{?hwR)Y-Jhw~Y5k;TB@ z1NG(e@ntz+kYjgWsyEe#gIxHloPHB6zjeTPQ@15xhT%lQo)|LDDXHS$e^V$IV5F-*_C=fq$GrsJC%0$gYLAIY9=NN??@k~cT1n}U-C&-Vsc*0_(C>}6p#(e6cGYt7T6dWj3W=nsVsbTCuF2zHm{e?qY`=N^*YLpMdff z4=}hCz&6QY6fsyKrudRw#D9lWn%90&29fWQdKL8v_M2QSeU zqa?Je5Hq-TRQ&L4bFwOgz;k@@pkZ)`kSIxkdcScnep9l%2Nx+p5EDz%g*BAFfL_6> zeTg%8pAha=*EjZ27X{NF`5wi*R9+3D$Q$3@i)f^ZhMzDVD5~dIQs-iS{mXm$zvVbZ zyrBacfNA$77Kv2BoA4oFNQCk@>xSGrIGEO&Mpf1TX@*{FQ8A z)rw|0M`4xwJgnzBicEm#fTI@b0-N?}$}V`LxseXlaxF^VxYozboBZ$PSXD>!&mQoR zWd{=WF`hGiChVE8C5yi=Yk&|1z&j-)8SmDg?gI|!nT3S{qeY?ZA%flA0)$GnmfFR; zh<5{{DSjz4Az(VjMhqfgQs%_yQsv1~e2{&W6k30o`XLl}%D)uBtBV*Ds*d@O5KNcY z=J|SIb9wEhar++h;`FB0aOC$n$%~=XNSrMJbUudLa0HGO?=Z#EZ8sk14T1MmC9q$E&U68g~-Bv`D?i5p)wgaq_GjN#XZ0{vTVoR;p+ zJo0r4+Fu3^#JWeBp3dezu(!AiiYI>8zt^Dg*dW+yvJ&I)poOD&j6LZ!crHm=yTeG0 zL#={>tG0CN~{|nK>k^{5DD@054F`ISifD8#MfY101 z^H!-O<|}&2x!W0^-O+Vw0CKRe@Or{b} zl&yCEET=q#%@(e_B=g1XH$*_&3o;~*GAsZnU>qI6@LaLIiac^!>yW<}mlIo{7bp}~ z792&tVo-e=FQ^ne;3Td9>ju&ROyj2Kw|&}MxeN|IY%;MfdVzF@#qb*NZi_@PN{%o0 zUSeJrv(6~s&uL4xcXx{DoyJItiE9Y)%dYY4{F$dN)$BH$Db>Z$&+G8BWR*ZeB5-P@6v8-+c|Z z28`_2!qK8k4SGmh(piEhpFkW3MUEXV+JxAgI5Lb*yz_8x`o+L+o?EQO3OcC=HLQ8M zt_(3ml#Fh^l~#uJiT+r)2KI*~b$0g?9J=84>){WJjdW83YiY7do(%gmd(r>5iVikJ zCO!`iB5sWStBRKYWnGQb9s$4tu;s; zIsJF6)te*EUd-}WDCwcC2;mHpFjFcaX6QKHGThi#)BLdH-3Tr!+e;Mmc?&*v5+99G z=m(biO>%EG+_!5M71BJVFugp z*Eh}Z)4`Y<15yJHmIrTvM+xx+3h@Y zYr05dU&}7PDHPG9xfjRjC$U8m`;W{CY?K%Jv-E9)5Oa*v4~|>!-%3M1y3gpSB=_~K zydaF(OraFJ+tpsQhYbUCxD%*%3{PBwpB@qQEc;(GvW^ELAh@Vdt!UnQ6FV73Ggd6B z7oMP)srYWxx+g0G>tAY?})qOFUYho8*1#Mn1%~@rYHP8n&p#_ z2ETq)lchgIS|M0wg2F$wcEZg_LO%A`5!4V;JSx7hzPB$u%hL#JZ-fHm65_k+34gNg z9X@bNcRyB!=E1NkRr}UL$bE-5t;do4=lv6UV+`-!_Upz5>NB2vOtlW|T>lrLz>Pmf@*oeowxvw+^v{fN&j3s1w5ns^^8c z?$YNLe=XXNe?O6~FB{Z7}r9@KH zEPa@UBqi)2?V>_2I63cDV@@J`MUP4=Ax1Psdy9)|6X}222C2l;c@z55iu_lx~-j#S>fY|28}YV=HK%B zeVa^~xA&%0v+@1d;zMj(`oJ(DQQhmV1T@rvINQb*;D4nIe^z1f?&m4ydlV4x%`N!) zHEQ-&fUjWq8ZhA5kpRnDBGa>U@M)iK zNUOo`g@WzRpIp=VoZ<1k?+AI_Ucu@!y*^f)W_Aaib_B6K*Lyy_sCqng ztJb&Q^73+eLf~UQkN-YuEwa7AWo)*R_Z-g@eiXs)UY>#uf=a}>FtjCA;G}ol%|8|# z@^`qoGHrW3X}u$`FfQPBf6z>Mtsf=79L6y_vA6S-_Q;Ck;B|XsR$KFe#P7JI;~a17 zS#ZMhxkN_*l$E}@F;e&@;wcKIYMj5!`v0sxO311WN6@ymwp22At{1ci-Xh=r(2ifY ze0Fw>BqYAyESb|r{1Xubz$0Zq%+_65gY>9x^!a@Ktdg7sF;Qo1<7cY=RV(_+z3?+E zt9z*}`_4?F)05GoTKi`uZh!SVSg9ygf%Z$A{YUV7ofpcw+TWpu1p5@DN z?#>T>5+TDjSK9c(y6N}sX)sp3QBhX=?cuJqc`%TAURwH*dce01P$}GhxJIx)Tw~h& z)lyVG92&$QKihwIoZc^=Yw@jBDma$^7f1TNLVWiokf=Ty&wWu8{63s&i_V&Oz*)@S ze)6Gyt4|e(RcIiidP9+vp04Ri?50Z`_=ZvQ>sM1N6jHMOa4ZsxbBdALZ0at9G2zKr z3wj5Bju<&V<7z-30E&VfSoQ#EA^yYyU1W8si8}8J_YA6;DqB_jh#^yKF$=J={n@nc z+VDKi=C#kphZZI;4$4ft?ord9dTrZoTJ>F?ZwIEH9{D!tyWV^2H}&$RS`mgS6O2q6!?B(VxPqG8VA(Z)%SWw{Ri_<3*>-X_`SK;&kzHaZ z_*9iVl`IMn<8I0FhxIac&!gRko|@vgqwZhUCo`irGl1iPMYdg7sq%47X>`g3&hRC@ zD$na$N1%l4iDG`2#m??6Y$N(J)|=#hRSJ;Y<7GIQOGG`3w#e#o|MyGur}S`xhn{Gq z$|T+nny^}z21!S{{WnOR4FV%dSIeUYlmq4;&7M12ug^X$B0`%_ULq-Y*zRPvFA@r6 z+emZ3J9Y%D91hF*8~g)LO^)Ap2*R5MJ$ZU;{El9k1c3%-bYKEW;u>>|ga;Lu$cLx& zdwIu=&Si7n^S)jpF!uv~!@9?zh#o$c;bRc~`bN;M5DJOYiN1!#BgV0%WvpW{9J<=X z`qR&kq#ve4OyXWF?N-V3mnRy;I$0e;ziKO^9|0O{RT)G6*p?6Y-r*mny`-54Gt4a$ zB!%_tR&R963`Ag5TRDnG-T9xWxL*0oM#+!$%(SBJ&b8Zuft&!CVQw>8Vpky@L!;c7 zHpKKafM6GbdU{@a8H{PcM@8bn62N9Zn9qva;d=5CQc`M>gy>*K^9| zQJX+0!e0em6o9FJ1<@9`QCkDm+IFHqKQ*gNxVK==)|{2w4*z1~MpSDPcUybTX(6IKiBCt6W`t344LA%g>Qqej`mJ*<~Z=v zC*;a-$<1Huc0K!c-RtpkOsDJ?)5-~U6-ydc=uG+E(pGe}eYJMA>*F@q*L0Y>NZxeH zfwCNiA7=d7DV(J&iMX zynS0372xsJ>jQi*jK40dTjw^9o7Vb6QK`$}ciE9J62Mc_(sF({v;-j`l{};TojKl? zi$YK+IypDkM?lDhk56oX(oxy8y9jPI*0z1n=}>IloffeCsK#^@f#lY_1hU)ubV6fZ zjsP=8V5LWXC#2Rs;@|*J*@4Z2$#FJldiRqO?P7fO=iOO*K5PYp#&eO6J<~-UXIf7) z**`6p+j0FJAygjI-5(sP1j67tNbvw3Z_1B6rFN#mdW#PpM)ma2|Oh z{r=O)S+6qv@4XW^O4Z@WZTlLTT5w@kXD#uaFozcX{Ro$hzE3fVZ?R;-0syN_A)~?~ z4!uZn0);FlZH<=dwL49YF`^d{B2O>nDbgA8P}(pKOBmx}tJu+oRfDrwFuYUOed63@ zIr7L;Tz@qtk3I?Y}sdl*s^zGH5>cMY6|`(Gle0V0lp;x6clZ8!_>0umQENx_4_bOL=oWRiD$QLH+`enJs zlZbJeL%im~{*xm_wRJ2M%D8Hv@~Ru2;E1e(-|MXo(Z`mhvouY2h!784H1%%-cZL3xtJx*oQk00>e%Xrky11TY5kU>W|%t9XO~H4g6DjVZCHCt^3pQWKMl7b> zt#a}UWliS;N@bFL?^f7e-^ODp(YgC|Lo!nVLNlhSO<3|EEn`c3vS=;hPtvs5nO*dl z7;PrCaH__NJm{zGg`VGwA(MdAGPXxZsq z&-i)}k>tUM8fG<*^Xcj&z?o(qJ@LpDDQGRP`KTr|`fuM~h^$wh0h= z9E{`k%3QQ5S$bAxWz$d%&cBA?Gk>w%ub^4E`$=e6JWSmzf8pB4hvobX5h9wk;ja6~ zUK9TAZNwD(vvFaz#%+8SO|{o?`M-lfv>WJql{Ta5wOsgcy|=9>rV7XJ7F74LE3Hou z_Z3%QA7CKw#8wTJ&f6E|N)M|b+_lXj_y(PkfRv{=DcYZnE~1@}k@%B$W{x`(UBhqQ z>&1!dEMqv|Qdw|e@7Bv71nqJ&ZCCvTF1FE8T4qLt%19WvZ~H>r+Ys4EWgxz;Hb4s6 zT40xm3aKJu#mcYOicP#|1-hvBeUZFm0zA2fo^%oux`wX3(O|RslBU&uSwH)4n?<;~ zW!&nKX3c{YVC&7q^m}~NUt*MEdubc(!=rQY3v0qYdAy|P3X#0Eez1Q$-XOvUKrogL zz*V4Rc8Ma$yq>(?4e$j5d2`&J#>V7m^jhI3Csrf8FWxuf?|DlPG#)23>;k}j!5O}F z2Z}>kR>Qe{riK+RBg$|I5r;MtlL}a}Gqr5Kj6G9?`hTOfeuEs#|6=KXr6nXno-tG`itF-2RNT zM-N_FTIve}>JxJ$0^S}d6`&`$ZXBqz)lnTGtTAv4go ztr`2RMty#F<9WhZq&?8`dC0iMmEKh+(jamNQ5KYv!=;oJoL}}n>P#&td-PFU`pA{? zw{%3)*hBbjx0`vzesVkax3j8?+ePOdVy~IQk>E>jc4WnX~mJF2gm~ru98fb<@u|1I@$-mun+%M;`z#1{2ManhA`t`eD}V1NWg6-}Moh(P57W=xqRP8{gCn|Xxs{Sc z4y2g$nWbKsH`#?%Kk%LLuRl%u^twCWnJP;;Y4DKFuDz!&)OsvJta@P@VEd zD&@w#%My87L+6dU+nu2YuAq0#*GABZ{=ZdV@)2BM-0K&Q5ai3lXjF?b#P{)@(EAzC zE9iEP=RmG-oE@1tTXheS@aRiQL^QcimSk(A*W{ ziF1x;s*@O_Q}q>^&66bIPSt_5oZdnplnnzLyGEZR=)sluL$xK(50CO0GDE|S4vvU- z6WL)1E2iMDMK$dqLPY5!{Soqc5VQ!&GZk=W2Yi(dI~fdMZML#7OK?L+iVY$*{IxyJ zHbqM!7fAvUYy(Zi8S8-Z4T*+@t^3F%fkQ}vWK-`)kZ3%K5_-9~$?P=D57ADgp!%?E z3eX_!(e59}PC?z^-G68t1T3FZ&r^4oo1ext*4mFJJN(}t&!fCy)5Zb;0}oH|6p%~v z?DyHQpu56zDJdZ(G7_i%T>o@4 zFUpMWL?}7>8>2pN#E?9h{_vLAplH~yb{MXV*~7Eeyk!QocY`|&Z-?L)>rM~0#9Ocj0jujzO^qn{iYu@R zwtT)~>fH02_5#$9+Q?&3GxV}*oFduZ)^fr}YSI>dAKglG|FUTMVJWW`j1px|gAO(QIRiCwuB9}zo+ zgg6E&baXh!Zrx^>9T6cUs*x2PXN(qTOR~|y-a^k{al-u+eBz?{k_Grk_-gaSh?FUu zwfG?pV*Nu1mUR33k+RUi+=X-Je|yYlW`t_-`DWlM+J$6RODrr>LCxv`<<19114Bcf zo)8^qI}K)J;5yfjHZM=Ey@dS(1NuGw(18QC|2x}9d-V$Z?6%6wNkR~A{sfm-XPp}5 z(3{u8LGb=2p-fn^el>bVkquk33@*$%f-J_KIP&;eJ3iqBNjME|D|Wax?=FY*`|12) zOy=|}->~%HU@v|nir$5QAJO*<9+xIt#M<_PSu+pFDuugO$J&)E4u8SKgy;^Xf|wU5E3XIDf2HVu#+|=oCj_dG|EMcP zjTl5C%=1Vk4OQRE={Uc1)5N!^b{QD3tYULyG(P643Er%X1aCeaQAzN4`ek4JwL$(b zO;Jy`N!Co?5f{w1zSvx{KRY|?TuF`$38oynpDGt+cR_6Ni@A7wAsMW(&5u8StJ zU*hV%+Y~}b?V3-~%)8vcn(?v+d^&9>cLFs|$YA%uqBNKG`ex6x;JPob?2cy{X`~ud zobp41ju8=qat3XIkk(8)t`X+tFKZACSe(GGv|q2a2?OMmm5U6?8ikIS*_ zL}t@z5?8HFz8YWO|FM6OOm_Hbp88hn?&JP!=LHLcqTvnfrA+NNe8#yN;#@OL$P zb=vwW_TvX;bHBfk*~zdS3hJ+LXkxwUjeTVXgqQ1>l-KI50VcP$2zi!Tkm#no7aVEV zvQq`;?Jb7wV)5@dSxwG%N5}OoSjO#hb&+q);{gnWH_HE(?%b86jk|Y`_Gp4wH$0;9 z?}Pr|&+J!Z8)H7!#>k4ri%#+uIF5*8;g?3IgL%zXjmRfVPx8rXqtQ8k`BkEG%_c-c zcWGx!C!F3y(tZQ^58}k_t{MxZu#sgbLsW`^aqi+Vs2jdvmEJfj-kX9BCGIZwwU{dM zUXsqm2d)9@&;5|I+riW?D>2_tq_{r2F5Y%o1?J-Z+G*qPip_q$L+IhMMZxR!h@0-Ui<{1Mj%T*^_^zmI zwZGN-5i;`WnnmH*>%H~!wIPnH;_!lY@3Qw=m~)iE`ij2D*;L7`ZB7oJG$NvDp}$b< z@!+;hN~nL^u$ckk{dZwsC(1Z;;(1JqDEsRyy1e<`p+1<1W(_BL`kUW>SE}rwVd(Vz zmQ5>Y(=G5LYm8xNv@n0>3}z%C(&w{`u2c#WY4QIeQIg+eHW5qRm26Qxkc?%gt2y^t zVgjsa*46h`9|cDW1L~$MBERR@1euFY0S zv}-r!Y<>4q^iCykP2MF-qz50L=U+b2SF&hccS2B)*d=`*Z2UE!ROyBY$gBY>zpRR7 z3ACHRyO|+F`t(8q>w@#+y3kCIj*%T*7C_FMm$PS~;$hyHgLs6)3u-!pzTS0?F8GVx zn#b!|F1unnysvG)FG;pse!HI6nJDwu^8MHJn&!nca0n!jYfFze$uc6rq-IlYWx~^t ze2bu*U2&MPQS+?Z_jD6WD*=0buv9F%YzAK&Mt4>G3|gbgrrdIJwfBmO7SV zK6|ulxzdgZD&&~Nm}2;fYdDj38xW~j-CV5^o2wcF`Uj}p1{x9kyh^o7(pK5$d;8+5 z1Y05!J9afvEoUVnS#~icB1EK+kkmoi(R1*L*tdfwN7xI5nz3_bqneFx@s z*NY#x<>x0xumsh1-jB^6aP@P-i}nA1#?c6~@JJ4F#&idJy-Ie zVjLWKZ@+yj(mUD!LW+<(+&ibyUU@X;IL!tIsWYKeY3Qvgm zGJBuqABU=Y*IxFO&v+(o5du0&maVF*^C0PQCfoIgtAi$8>(e_fX>85|=iPy^(4 zD)s*ubSyWEdtr=)ru_0WQ0!JxJ?vo>cduWeXGdG0YDjRA?+{yq6#Xz|RqFds=dVW; z6%*@Dq*W&gNq@j5ywe{`WpUXrNb+^=iXlNT1ouTn_5?g$A+-q%IR6mId)DA`(XBmq zz~fX>4R1@|bFDLw>O@Qnw~{Ef&Y{q-N1t}MInJ;mB1z>z8yR*V;Ht))`BL81{Zg(h zh7o4rciZ`pe*3;@Ruk%-8wlN338+c(wlT+w%%raMYYFSOd2NynXx~Wi;~sd>kbPP= zi2b`UT9NVpN=nSuM*6<9$`ZJ#^)JAQ))7(Q0GSmX(bvZckm9(1A?M6Ym3~II=TNTc zCA;4Te3cb>yE6q#DBs)*{PhNSS=BugHMO*F@rN>AE@9Hf34<9ne976O2TH`o z8A{eVWwPp#On=GOJ0i_v;B~AwN{VPf*_@eXp!3uS(xlo=8g9$$kLV^Xp~T3H^*8t# z!^}&lgv_lu`_WOWW@7HZ0)ebjy3js;!YV`X(*2W%o6etI9xMhZkfHYju>ImpyDcp( zF?hdtyj>MWc)zZL1oV2x*n;!H7-IeaCP%Zi(+gyU?Ef%^vXTUE@aj~jNT26JqmY`; z49_6RViKBj6>M(CWEN3p}0wVFy8^zfBP5tm^PaxsbCbXNj~S>(c&PSF+! z8C!&*4imSP2~2m%LHTlfxO7(0ytiywKv!w`<;4%3-XT-*4VUh#Uwcr74;AK`U$SS6 z;C^2r`LG~AN8h`M{qbX@_>=9e?1b>*aQ>)Ax`(Zww6{9r=7*@?LKtg#d+Et5q6B78 zLZ1V%Sj>$HNG9XHY3YXSsfFXMXu4}SRNV&KG9QNAqJeIp;GB4=2Eup>IVGNE5Am!)ZFTGM9GLnhifhyN*RT`nWS~9PCf4md- zmDgv*YM`vDBQ!@a2;w*hOpIU}{N{A+W05@g3Ph9yT{HORKolE$=*m9;@Ef=rg6JT> z&yh}3LvgN%{23}MO=NLbzx$Ty!mm&P6tT<<2E38ZKuko*E(OI#r1x&l2=%CXGk(sS zsS}c2g9f+M<4a#xdtXBB%GDcr&qjJfNmZJkA-EHga=@~|2AnH;Vl}&lR;Y>7n}=|% zB+F~O*+>V-4pfS6=_*#s{7P|CScUsBJtJ^u75=`5F~mU2o?N_i{Q=q8{mi`q22=K* z|Mj0KPv;%7sPIYXcJ2NA7clc*p}WKA`ghjh9ULkV_cO$|c3r8AkZBfOu`pl~2~EVy z&MRQtdA9|=%eTJMCd=HFGpX3-HXFxHHopgTarka{c^`@)A~JXU!NV~xO(ip#Fy+a< z3Vc!9I!PkWuTCggSa%j_(p~z&1=hK|&$|CVpf%s>I!CK;Of>{dJyQ2T0M|Q_DvV*h z6KW|RGIDk{+Pv&ntTFHTQYF~!F&P!UlV?_hZdvEO4`@TU2h zF4mi{5;eLjjA@n6Udny3`+B_Sj8wVuRo8YE$H3T#K#ED9dSo;+@~Pm^rnmu%O9BRr zXwhRguXwsyT|=Yxdznf=jW)fY=I4p21>JGQBuLEJqG+uCV+s+eJBMgn8;rL)a77Oa z^nw?!bJrhit=*5@8)?w$|2x(H#dJCZ$fBLPIUYn-M-;;uA%+mQ_O)*)RDZ_5*%{`D zS{eJM`CunSMN!uv*#ghmyf6mDbFv9>;3jc1rfRn*o*fjqaQyR0#(R~Gb?0AHc ztqx*dM9Pn^3Oo2o!D00@pOrq*oD-2DZPzl@C5tgZr{s^5_~NT5@Y&KBO$+f6J=G)& zN98jisIq)Y*)}J$gI6@${d+y3*HVttt}|sE_1sq>A+k9-0g`QrYk55))Y!xXQA395 z$P+_%mJfgHLou<hNz*dMs4jM4V2gb9&vE%I^aM^=s!eJsba_oBg-ZryyMoxf@pL>(uPwP!98v8sVbcs@grHU$<8;CnFmy(6BC` zIiBhcU7PEmkVHPLs__Ftc*MEsVe>f$vbh);LFxya^FJN%b4|_j(HpyQIi-jUHN^*)6}hJPDE^tqPN9D}%t%qt(ZRdoe&s#~_j;Jg3Oove z7bQRf*YbY8&Ms_!I4JYO`VS588FUpYpMX9X_o&U)*CXdT5(>tmt$(&;I+Yi@;6@si zFdeOONbp^VG}I~>Y{hcebs5!@vs(3gcl-&F(pN1_HTl}h^MlM-I>!}yRy50j{gVWc zO6rAOJu9L2!sOMT5UQ-qgA*n|{wHOU6d(Y=1=A*oA4<+$h4`p%{uKxDB1v!ru>ZXj z9td^e9%rkb9^WgGYinHmXf;;b9)8x_u;y;YU2q<`G2`nOt%Ou0XbG|?G}q{m#TxVZ zmU0UA=2qKt;w?7*($PR=ztFO#Uj`8gpg#j`g=wLi87cOrpuc6b6wI0UUH_WQ>E&NVgOHV}?cZH=s9 zdV?*)JpWK^hM9mo53CRX7oJgy&*_gkTs1S}zC$l(+xitrP5&`4Q&R2&c*@M6jQVpAdorHCbEU>Y?g)i+K_oN2cAu;VoN|_7 z+@urmk$==i+1N+P9F*!GD{Bm#qoxXF;J80r#1BbA)721rp#9iYoL+p(CND9t)c)vT zJWkc(3Yy8gmI^2ROAtwV$RnW=)TV)8rha_OTKvq09Pa zQvd8=AKrtIB6#kk7@zHb_2$fXSzcPQxm|JITzh?S7WHCJ27id{(tf!}$}Ys_^WyH0 zz($|_6J-Cl@SX90KGspTxST3(QtOy>)87JTb<2~?a72GHHN3hTQfmjFA-;h>W`21J z8}sU=`E>#1>pfuHS3%u$fBpI^E9%bjc21?)mhj6KZ>WAmhuhT`PdKmVtoIPOU%Lys zSJy%_T*6M?uRrO;@T8xxh|C&M8J1!a8 zHK~(pVoJzP-VS8e3}jTvhRp)`U!_t26g%7npxD7KTlD%N@$aFs86J_y7NvVH?)knn z5vWbdR_4Iz*QYO!2&~JQ_P8?*41MJ75l2B{cr0JHE|#p6&Px9^Z!G3KKrLammXHc9Z)_aGry zq!wzfn`o+oGQ_-uhDICIzSxJN$O&t;^IqMS{~dO zhZq!;qdw?SZOZs9zE_;r{OlKQiqa}W?KzunY-Yp1V;+4TAthyEW;MTIhTI$x%Ukm5 zXh4cYi1?2Z{6$1(IG&P%mKKiV>{ck+4M@q!EhIC0nZvqB-P6%Ql1V|OrlRsy`vs)v z1^oB4_01b}KD>F+;Bsw1`%v(9{9*w|(Q@YGUhVAMz_P3DNM~jqoUg*zV1?^dAYivg zy%~s(B6TrmkW2EXth4NZj6dxjiq_hTCw4x7@^}u>gwUG2haXUuSm|bxY_rd+GRnHX z9Us5>IllnMQdIWnCO-ZjUKBCo?w%X;%vETY{zL1(oLmGX(b0asdg{sOR=pd+A1vJH zD34RsWf$u>>r=P2d&8l~bxNt!LwqYZlXR(n6k5`6bgk#HyI$lQ1${i~PK=!!oZigU zgI2QQD=aFyvi*HmshDj0m=~N_k*Tj2bP&F>Ld(HACG#r7S+qsdf1cs?DZsLSwt1?T zCEn4V2KtWpGh+-Nrv13yq*tljs}kq>^>Qem@nAra$zK<4j+j-Env&0Ks;~Hy zszd#Unju{Rx7aqmooMk12Pr&i*pIEyDD*JDPtr7BQht~vw11szd5b@OgC|^|&MS`S z9hVuW{y4X3)*T_u_PZOt1~r@fpi|<%BiL5EQa%~`!kmU_Kk5B9up&V4L0MEjc>dg( zMz0RNV;ESjv%gahpkLEESu=~g#z-BQu*>Kr+qqlv)0a(aFLK&+R{kYi6AKI2C7caH zv=VwJh`2;Y>;8x2+u zag@{98x(YNcsiQ-HrX){0F0^7gyq&{a4s{zfnPmER)72#_u>S)u(gJ-1wNuIq@lCv z$PfLKVo~RE3B_W&P`FMP&dG+of9|MnI3Uf@MeQ5zqjXQhBPGpvn%~hVY<@w3X<`Zw z?!Cmqx{NuCQzCexx@E_utiry>F=7%;j$(PwVpUR(v`GBaydeMpZ%sw_*(!QFq&{4Y zmDsMV(l;Nd6S&8G6Gema0e@ftDUdxk%s`uO4tprLHT3dybei68n=y@A&k)$dDl*&S zoYBDq)hN){~*iuz`{C7LV}73eyOF{WXfw=RxN>_o}n6rSA5 zEM^)Di4ZqSZ6ASIq`JIUV}iqrblv+Un~V|jG_1O~wgfSVbYzN)jvPx)BvVr7~hp~iYwp8_Y4W0Q;dqyUNZ*74)1O>U_RwU-8NUyj6 zp<}?*l-dOl?Si9eZ1h_!Co8S!&cAFS)zo|MSsJ5zbHR}I2&(OO$0O~D$&<9)D9OHN zS{Oh*{%4VaK@iPq&g`$;Fjk{`YI6h1yAzLV zhgO$8CBck+1dNP~Ci0ucXo?T2s-%(LNR`S4+bVX?3MA4)1998#vdFomgR=4gfc0=W|3;$ylzc=!zq)$gkC5B=i@bC%o=o7MKCl zXh`E0aI~T+Yq=T@QoFAQpr0jFdAx@gS$idB*!sP%(}jGO^?IL`JwI43#Q#4RR)Zi- ze*NrJt*&7)v!;F6{O*Hs5jGC;{YgpPiNNpny_Wuz1~35?mzKVsIVYY z50|zGk4qq7V7#pdFm7YYFl4}&0;tLANGL-CNO|!R{lQfXZj}xv2~$-ATP76u8Outs zb4OQ1sOOpokb=5U=mkj6_d`SY%kHc#%xB8Q?ou{43^HA&82ftDi+AGe&yDe7vr`O% zf8_i`pfuDnM2cgoyHmRS0gFj>4i~_|ACj}trw~|xF$DVKbr!Z*Tb2%R4(d!IVo<$W zDhqr_aATX@eB03hk-cB}Ws3) ztYiyfM?_W4h;q_5J}=2FlY3bq{8$`x(!R5C7o4t&NG?_a`B3Bd%%s6*nF|%B$3V)4 zZ;s~)YPj8c7HXiw<6^^ibFvauTI#p>I7}DMb5GHc$@P9npL}y%F-=fFpI$YJqjn&= zli2hr0qRQqa=6?1xtU>d-v6=zq-D9X7uAc2@;%0w(Hk}lRs+btb5CsN-Du5Bjay7=vRyLX1at3+5YLxh1~qp^SY*E71(VF@q;QnhcM zUw4R;7vv~sI(;l(oKng(0(Q#|U?H^$eREBzhh_9#4EDgVKEAv^gln$Z93Jt@F*ZTM z&ycUV`zVs5ywPO4ZovHzD(jjQRiY{dDl0^QWYi48{=GBZo8c#v6L~(Jtx%FPs(=Vlzpve?x<+ zIITZnwH1N@1*Nk<5-(9W5vYAj+wgG z?Q(?0!wk5^F?eAaHzsm_$PmfJiaXOh{gpsYT0@7&9odV^Fx&4)YD9N_72t@^7B!@h z!;C6d;*AXKiPi5F{C&Th6*RLCJB-=)-zT0y=s%3PXYc%$h?-6wy|i_^-9<-Wx#4#4 z)%;@ncq7OqQ_0d^WIM8!n`5s##WF{Feq7M>C`}qwedvrwf>$RdYY?boe zN6N$D`bprX#rJ>3Iz(?`9Wbse5sz>CD+k4ZaBF3I`rmd^9MI#RQB7!y{?%lNi@z{b zeIhb{%Nwr*2%r$g(>B#A(RW{o?n~ASXZB}#uY11_P{)JT_2Z(p(rkuX^eFn8KklwY9O6Gb} z{A4-E-uBVegKu}!tG0y?3F`h?*IR>GpUkJ2jNk#M05()CxOY)3D^Q{qGDJ;8Jcz<| zX=}FRic2j6>GAv4Fzuskg1At=!WX>rNOrrHK0c|7KI`4p&b*EtZjBm7hIVJ1-GlL7 zBJB-n$B6##BcE*clNp!V1i!%hvCt{~q9};=sF}aqR%5AQZBCVlbkd9|bqEO_EEqHQ zokPkC36&I|G>Gs~t)>36P6Kudui(5uL{?sTIqaJ+d$Q-l$AIJICi5coCaVMzr`Wzo zNNAzx&#tbnGIicJbyvRQ=opvE_DTXP(=N z6co#X#YzktXoqC0-8A^sPxm2&|Hs)|KvmfH@+CLh`_d7V;JdzW#A3RLe5MV`>2c9k;71 zN`eG%aoA`6f|C4i6=-)qHN33n@e|44DT{!;dDWUg~ws@XS_Bc7x4T@bR;Fzy=NA3+;jn5{s zKQU7HMGB%K{W#`k4*X_JNy_LC8726l+Z*BV%ZP=BO5V9d7|z?Xfm|J7GY_;dww*rdM;qDYi5q<<^M@#JnB zapiEEAmW2d5Qjgrk+K&NT-kd^S1*2#p&dcH;(M3z=zSs2b|@YAQ7HeBd+Mv|F`X~a zlDXTr(lXzoadG5ymUy~*=Q!P6>4x99m!Iwl=p6Eiy7hsz99&xXcOKpIZ4BVW>lV29 z!|W-R?_CK6!Y*{V9<9JX3iE*DK#vR*Vs((?7(J^Bau6B7uXFkwv4^zFvzt=-v^6l` z3%j0nG$%G^P-Xl?9f`cf zkQX^_NW@d^q67&cB)$Tp9pvB8I@iQUhjG$KMcQ=Js)sH6 zkrQ#PA@?Ik`L!9BeAudQ83TPVdPJktw)s z{LA18HWMD~T7z*Acn<{C3~qs8J0BQ4HYGWMM7`S05G*VvS(#xMJS!U;7GBP2&5SXe z^KWfKApG$VA|%2I3bK0ole+yzJ^iN+EF*}3HFu!9?dZv|Q8B%sjL|}Rm1vv~701+# zh}%!C_k$Xq$=L6tUhk1)Z`|UFe|omfrFY40sm|T_wlOu~yT#jUGi7Ns=iSB4Cms-N z6HBMb-mC6#?|0GU4}-8wnJga*Zx;aonFYrIY-wxyih-U$*v69<`o9CRzaOxlI-RYg z=}k&7GB_)X%PD?h?oMFG$$srwoKeP!K(rTzCb2O+>2!a{(airMS32Ag8XE`4m)+0V zA5@j(K1Ne%c>E=i>05LV1=)zap=pV0F#p*widafrnW)ENAkD5G0>V*ElFJ6@B3n7RRU z3M&I91&kFc(Co@)X)k8&XDMO-7~2g*LiI23A!4m%<%UdqBk`C#OeiE>vR*9wPy0V( zRt9nz2TS5m7>;uGHzx@7(9{saUO5E4 z&pV@(h@Ei-e<6HZhPE%byW+w~-6ei|V+pA-wx7B{V5GF$Wz-5&fuJP@&OBZ2=XJ^0E^Zsd_pUh5$g+!?}5pyg<;26qN=+6%u z7z$O+JEG3UA+>)!fad~wn05UUF=Rl&RL=tf0$wL#3smF{5Aw!v-iartI+95L@A&f6 z=y((Bt;*qYo?B^b{5hx2{Ng>Hc9%qNY)f?p@6K`&{Qa~zUdjMlE2%i~H{*(l9b{Ns z1mv$`UJD1!-9B$g5j`NIVR9u^h+~ZN8fE&hN!O2JKk(qloi`Oo1Cx)Lg0RSuFye)A zfs72O$kj_+Vw_gL8EP zlRrr}7XEe4(~-*j7nK7Tn=U#SKK;z|IC)T&u_&p1E*gpp8zjI@^pp>h|7wGz;ouvb zYu(ufi}gJ3F2NQbw%Vh7&%>Miuz)PWvMcKuTt7z`NFB1UJFi+vY9MQ>*ytYcwYwe~ z@Len2*UJHE|5Hc&cuAprmE)P>ZU825j4 z&-jLUmF#rQmD9F-o7$YC$-Lus7}us`#WWt@%8tW73@J2!Z>svSwfkE9yth~_lIIx- zvUIoZF{I8kSn31lYK`pw2Kuy>AnM<(p<9kzN@~_JVH~H<;>2tppec2vkif+9_D|pz z?dw7cqdgW%FCZZ%hf#XW8l(Yw8uDv3!9 z8)`td84=fOVuY@kL2S6F;m^{SMZzpYuLI1nI_6*kK1F?57%9=UW1 zat)j4_e1Ol$@3uOI9msbS&hGX%KZ5qUCVw!#&(tPL-P>)n889Q@+9$_;^QpG9nSaY z+eZ~Vs@S};-(Hi(S$=>h7`g-!s$u3P4a=6mFN3(xV9J<#rx>kGnSm@G%0Ax8S zIRQ9yOp{Szw#XN@p~d+>_}M40rjQWE0(+rUgo*v7sPDLr%UbOiTn| zT#lwRDSffOWN3SX2haNWY2W+X$^c^8z}Dzl_ac`Q*yx!3Z!~V_%ZshIll4vl1qJX1 z`!w}(XRDbPu6wAYVadTfs#fq+a&A4Y-^E|`fEeIq-pngfl(*(s2Og2Jl;zySH4VrQ z(V#!u2r*Ye+}(feJo@C;11~rg^7-?pLu;I5QDMEvS>MU9K57wf*(4fWMbIK9LQGh4hivvPjTzsJfgPGD~nF$1ERw%|7Ja7b<{oE&mVBF z6?We71@X;D=ws$F7FfyS)=!jbx~TK zx!7PKWr1$%#VH$7*b|7SNVk+rVaSJ+ufzSFxP{+rO_Sgs`3DLULK>rwjlWA421$Ha zjdlgJ^ICsIqrN0ydqI^cYQg?yAST=23?dVWBqEMg3lM$l8)aH%5&0lRY-t;Sz!cl- zn$@uwFRNYOpARWq;)ue$r4cWw&JdpQ`$}&RG1{(*-K;TyTeHNYz9B2w*|DJ0Yx#@f zyFk)o+@r(7DRFPUUd)cwiJVg`a2?&w|9X2iqpv)L`@4wXfy(Fe{$NSFSB|Dew3iSw zeqMfZ6imf(RMs$qJ3Zhle-2NF$AK|Z2b$jf;to3=o4TQbmDAU$$mvM^;k)QKs??xn zAYRFH+_{Qp9UrS1p?=Qv`z$Nq=DllCq}dgFHDPGHHRep2Mr=;fbQ^Tr^z3+i*cox5 z*`=}OGrdCx)HCTH6UHtNGn@&%O8~#?g9}7ex5K|CQ_*=E7?M^c@A1?EM;X`h1{eX zPWVZV6OB(XfHHadUieQ=s@00uH6%bS6!|=2=yy@r?zned)47hBRcEAUJF`AxAGI%` zwX#y-&%PO0PSqley(*^EtcRJ(U-CRm{NRG%(q}TpmUl!pBBwGoSv9;K^K8wc$)}%x zhVNBhguoLhkCd_U$HEBf2U;RT!e|>HtK-1?0%4G$-hBUO)!747If%KvFhDq;GM|d0 zKDjxYYdl-4a`$ZHJXoLbG7?Hjo{#mujPLXQsUgrmT$$re0FTE#TA0%$P{qm9BTM** zrUn=Jt-r+Xv<`3HZq#sD!S^CWZ>=hM&%!RW+##Mdk8mN?M+mCHo3(Bd2P61M7N;|4Hl1u# za{`md9TAflk8V5rls-j@Y}1&^RWXg2BsUS~UKwqd$7};BqaZBhYk3d|%90%?6qgJ6 zvoinnPDLEQ2X~6w9+y-SClf9I<_E}mmvy}8*jQ+&<)c1^t|IjcAMC#0GoS!7v_zyQ z-3--*u+v5nUesG${PiR->3}oz4FgvY`%+7l1=&g^Em~UyS~d>?xzT(evgXsz$``(B z0Us4b7bioXsaXu3O)%+c&IQ~$U)|3K87I=rd9>ZgLRlza=PX16O`fT2%O^c3dEBm$ zq6m1MGyC{$=}z;VLz!dKvoXj%bNoo6W5?M$Z#d0Ig|j?dS;>vvpQ}gZE(TIX*|@l3 zM_w&PU5sx3G-OjDlEY8`7`5LO7uDRE#0q}RRd(KU9bR9aT%KD0CGF&?(S_aGTiV2j zolk2<2JYg?9DrYiyMTvV6Gq~H5EPGOg}(s=^|ej;E6pJ48%TBk%V&BlxuaMvYV@hP z)+ZIiMPpFihq;M|EbKSZRB^UZuZ}>JsU?F+7LnEOt#Nh{P&6-ZV0YDxWh z{eDPwtglADZ+O##O>^?6c(2ZfD2Ar6@(GO1gIcVSoUo|#IEx7Sedz>;U_A_Ef7Yi> z%Rwi2i#Mb{LcAo6IFnDglLz^};VKun4mxmP?1{)`D_olT9ntt?I4Z8=;enL0#w7Dy zgWNX^qJYt>a-Q5!q|;fULMSK>b$h1aCHq!?)M-LOd{{yjsR+_6H;{X=n=- z>YcS5#*`^(Xxe6L?b?>3WS(<)6Wnfynp!A+v08cTA#`D19tp9r{UuNNa4ty0y-s#; zv;0PkE3c5{B^_lnDn%x!N>FL3sXD&oLnUuDq@u6MbnIhTwy=cJfn*XQVl6{}S`CMr zJ&JqiLUvVqhxx?%J$01ugp~X#2fJ^v`FYzA-yC8>t((+>PbV;--hV}#2%8Ih#dD^g z&zIX(IM6u)U4FS#s)cHCd0LGdHXF5B1C8@gvH)b?ANNxg*v|z%{og_S-%b>G@P_%` z4Msd5A%6s1BK#5vtom8g#aL=;23?)TO&p%ucKd7*fdktOZJ#c|0>{Gw^IgM9-EXJF zo8KY9j}5esSnP|ocD^XJ2(ZsCc82bqm_}mLVDc3l{#n%FJ&eFurDwMo5d(^QulpRk zcTYrXasH{>DBi2d4mH0mbk5)qy?vlK-XP>^m%%uPL;=y;nrW!dR*KDShqH_d~~BKs^-dyh^hPHcHq#gOwt_?6Q+hQ)Ibj{K5M%P@s^B^OPZ@oOaXcc)X? znfl6mtW(g#pT=*a3Z2;kdKOXUs_*uEevXJ5N;tkg8>(FX%E*HsgG6UjLEPG3`xWuVuB}D)xiLNZ~ z;Nv)gKDhltN*wgFq5Jc06xFUDaU;6RT>G9D^$mu8U)4`ya~m2gJHD;4sR%vSo2)}T zHtw+#c`FW?J{wHBdS^>9%%3M}X=%ApXlC}h?*|PcK^=uFrXXZpQ#zg?+={}3Wsu`e zfsaCQD1nb%7p|9^@>R;dA`*4W-vqDv^L?2RV?D* zKt?U(Qj=kK=RHLSdnvou3bsyuO0b_&Pgf7It*UHaIO1eBbHI!)+FBBHm0|oVG2`9( z1}G8oU6Zj({?~!}6{0fA!DO{G838jK$IrFG8&NrO5YkFra{Ip}KpoU!jRPrafA`ST zk{ZJ8PcvEH1_!h?;STEWzqe(4-{`4{`W)r=MX2h+@p8nO(n?q$Kpy>nKgt$^X&1xQcD9-$axYsAI#Yd2qDCZu3E zWVE2@32#E2D6DG&X{?qa%@4|PLY3SisJxgXcNUznW+FM()wn>{qYxGYmz=7f znf-ZHzfGQ1K-``aLct zv_YtR3q%Vgn;Beh3Ij@@~XQaHWO*JGmbYoCLl8mb%OtAI*Y1KWb{~b{|A!GH8x9 zFT5C5yyoeee5csAGhX*PF4{nM^ZxJ6Kz#XFE*oO__Zj&6fmn>tYTTXnfEj!^jDy6- zjK=U3yF{X8wgaajSi|chhj6Oaw*(tB6}NzDJLT41=23?d|G;C?)yY@`r)~9}DHRBD z_px=+rB8Ty@8+1iJ_zG~pi(>8UZGThAZmo>7#xudyd{2^m<{8-UP-zb1ZX)r7fH+D z?v7JalNeLGg(GjUice)p7)vyac$Pr&7KEAD&J(XFkEg3pCE`U!YGKi^64NhPX$Y!^`o-HUv_YGFI3!E2Y$sdPQXd3Tx{RQ{Er3s0Mo(}R~(*iRq zo66`>)?!9b<`iPaihqkzrEs^MQj&?e=J(~ zO&fp+iAWGAbWhHh%P_cbt-RfZJ2Fwcqmw!~8;o6Y6W*tw8|Yq2WPKVT^Y~piC5Q_+k0kX5&Sx^ZL+eKSJDSlEluus z^wpOLMUQCXV1Z(C{1I(H=0~zYjU0bA@GE>OdcGrQqwiG(iSsts1};$2CavzNY;h%t z!xOTxtx6z&7h2_Z?t=EjakC2!d4uAiev*PCP#+#2BBKq^ytX3lHM4YF^vNPCh^-V} z-`12P$@gK6IuTws$KzC;cz3E$tr;?!9D^#7FjgZWz*NgmVYt;E_T{RRowYhnSP3;# zJ<5oT8~pk*(vvwy#J&DDPoacoO8u~OIy%28!E6^fF^h$pLS2Rzzs-tofZ%je3};itJ|w7P+e!>e%FCgIjpg2EXJ*w(Kr$J=!BNvIwh;U& z$-4?KXCmgyuSV81FpzQ*yKe#qr~Jzj{<;OvalEmP33k31&3u9|f){4`WQ3}tHq53v zkZ@yBSHzbAon&5RCY|mK*<~c&?GFX1K)a@Sa6};(K!#0Cy))})e)-OJY8uon6Cz=(c3vh_FVovv8n*8h0SQW%?j9oj z;RgA9S(mL*G$=(pEf$uND57^Yvq*hZm_ne*>yrqy1}UF>W7eHnG^!AMu4KE2Nza+# z$G*`LXx99Qy*5%h@>n=P?J;3ZIJqXE`D*%kCD-(i(a8@ZZ?!lVc!jSOiYn;Qt?>&% z?r#dUo|#l!SUC}v%*IDnTA)aw+vE&nzfJ@tN)BjQDeQW67`*8%BYi1}4`C4u)ng;* z&WuO_C;&Vn)K=*<0OF-yjpn`wEL3JT2sKxqd){@M?t2{5GmD9mj;*o6^ma*Ju{4&a zxs-{q^xD*63>c~L+%?eL1S}>mo36%6pKonX(eiRSy87Jhdh%(N=BSe*z%J^TBugbGs>c&&Q$X$2-+b*v#8e(~-G zG_q<4+QNgbugGw@p)KWj&}HK(yQPLL)paP>hbQ-q7=7%jb=NX4Iosu+|KFe_VNe3L z^=&@iC~$WXdwWy~Xy(O?=`=-ey$ zp{e@iu)wz=M;uH9*srj8r8U$<5v0ej6$?=)WX6_vE=^bi!Z%Q2UWt7{sHzO`7e(ch zvBTB+c_#1;o$bq<5;h?nT6iR`*`VgT-7x=f4UlkFlhn-5_kuD*KZvOb%!(a4GRh0k z(Jg$*>6z8PD{QNv)OJYs)@+&kOl=`yQ$3ZxPNGL*tg33+vGi8%48?&9#=;A`aE23k z1r_~6EgK{-EJu1ucMB)jyUNy-jV& zkxndg+MiFuI08~#njePT<6yA~|K-zOYCurMJ4V-<9~SS*8lEQi$sv&=19t6j_xdy7;kEIA%^Y<#&L!}c_PGd@9^gTg&N@Ef*aG>>GDdT=Jsd|UhMsXRVa z8_ClJ$mnq|AZQbeMl_AxG5tcAd!vLEpaD`EB_1E$5%%>D#Kz9Hw9P1=TB}Bn)v7O+ z?-i;2Y$Rg(PT7alKgBbpMGYYM=l%Uhv1nnhWfyPH7bcihpx_KvyCZ;^g*IhvT#rHy2V zp9(rKD3_jiLsL`ot+G6zTl?@K!|;-AtFK`W-Y*l0EmwANk*5A&m4t?ICfXlXv2>q` zbc;PSc1WDCtqI;u0$up+``|!Gdqh1GV!ZIlkC4Q#Th^EZ7Q1{@a+SXq1$bZ!za(`f zmS0y}V(kc;6;sgYU5eHcp^q{mW==%3t|FxGq{i}*3MYMyNuJu;N0k&glt)woB_n@c z+2ax%U7m3!4?$ir_0rBV323@I!f#z;%}>=xBxUtcG?2elk}hoH6VZ+*<5wp9yNtmVZ^R*?=2tp20tP?>F6((FQf&S^^tBIC?6l#tIR#ih)*p!balF2MGJZm z5N84kxENh?3-5#ljg21qxaCzWGtff53jY_u1~eZELNJ$B7#4p?9a|a= zaI)FCpmM(%@{5enY!aEi;~SowRBP`!R+Z?b%*xRTloXYWH!y;34uy4D{wOHXX-}&| zQ=KEc*E(a15W3PBHjtLxF~`Nei5whJxq`w{BIrt;>f1Hv?i!5vKJXTX5?1 z6B(v(L5KgRDF!Z~AO?ip_zgr~5V?V2ti}Sy8cyu%ct<1SR(A8~B$dp%I^>v`rmpgA zFmhbe3C85%9!~dMcjAn|{^DY2fB$9?S;CFvsi?pG*IE8=CFuhE>MUtVz0C4`Fw z1SFMUB5ky{nniqnb0k2a6v;$Eiw;+0bLcPW?+X$Jl^;=Pd7QI}B*b{ZOetM0*7U?3*S)>c^G`Xb#FK11u9tWiLuoUZ|rBDK?;F+ik= z-IC~_?g>C7$;pYif9#ETplz^67h16c&y z0NvOhclWF3A|nx~sK~g!WBk?i{YXLmb$XEf$sxvH7V*@*!l9TqNErFUPlI4Q zzE4+?b0h1AQ)@?JbsL3x_aHy3V0#YK372&OwfBK7l;|dsNuxoNl;k>Fb0!fM`Qf8) zGWeM_g>N#y7sU({+VeUv=YHS+vHTt`d9{^cPKq2A17jr&d_(f-6%KlIn4qucapHpBaF+N*kE|!Lj|SO?H6~0LN~EDjIRk~5 zZaRK`eo}(pbAkUo9rAau_uJKgm27rUYGBTm5I;|WJ zBe@r@#%mt(nBmN0kOWUf&&a&b7r+F-7)vLy<#M3q9~)>U)7AadU;8)A`@@Hf=zkBc z0~F9-_Q837kmA+3W!}+APy+!S4gi=lZDa|ykV8Izc|S@Z)t~jPNQz#{mRKPj{xH3OWg#uy$ZhN_uX6A~@ zdwN=9*0zr>ucQRt6-uJvwjD**9{D}=Zs5P&!#`QF=P=$Ui7LxGOL%HOg0{;`={uGx ziUBfvuS0#KP8B9g$8P~|ewe0YGaAy>*K3&O0R{OJLlBS2+v1hxP>;oG#`uVx4!N4G zQ)@;bpH#jj4vg6mE(f~1zaZro3HL6qX&DnL#0Q8-nq)8)L>CEDvkZyl%bE{Lu}T4h zQiFv_MAXQHBG80Bsp(Y(>U9&7#|L~TCT0ete0IU~VP0Y&J0_M09(Foz*=UU(g5TEx z(O*zY3)k}~DphZ$+A5NbflKZ@ZN_P5(t8jX@vT;Ei8YYKg;GfXA50I?-{1d`Hapx0 z-}7X(ynur{B)C5$Ai7-C`$ruA?`G(4d-QJ)-rk~wWFw)>s&r?TG~RTR+mO)Upq-9RSLjtOt8ks;p_S{*ll!{aTTD-D{C{ zxpkjixnij~XMnO>Gmu5lGdhEQ>EZ-7w(5+g5;TWi^O@ojD2G$JuY}{HgyS1b(AbH% zrjg~nVIxD-o^v_<`9INE#Ay@qr9jl5;4K7Ve(U2rfrS(zWMDvmM!;|X9?#;7RbnA! zVNv+mL&R8^nx6hsuW4K6J+i5pnfFzr3m}{GMxz}lBRHHi4Ey*Xi1bgW{|C=N*B|La z0&xyVAbPq$2g2=kVk8<1Ep5G;JX_{h;`l08?TBPM92tDQ1S59$F^^lJR5PGW^Ye8G z)wIDGPSIc|XDZ#^a~$&E)qVRXVn|jwmQ0T@=z=2c0}nkC9F<*=Q?#BaJlRj1>Huf4 zHE3k{h^cgHkm@Sx>u{7Rwqn!D1VqEgYD{R_U92G7S9_MANDUae<&2K}PBE&s!DOe` z_F|D9-LzOCMa-Qq{YFzuErjfRVWOv>_}MdxXNw2;RT>HJx(Z8cjs8SO?~z4|QvWFz z9~u3-_6bN{!(`>=8lLaXC6pQNPFE2}TETWow=hWvpWN<)2?XvA!wwtwJrEXdesV;_ z#bLh3=bt}LMh3@PC;H_tfrwK|?m zvNJ|kXzI5v0uO)LEw8VR#UM*d+U3Z|@JQwt`T4QS!$S%=q{2Pe8GON{w9`x9GE5OF zG~MGb!`jH$!^5(6Ku6kVcaCqLAK@`Iu?GeOv#h`%Zzw|~A7AI~2+1IAQI0S}n6XGA zAR*OGYec(?6*8%VHZwm6!DA$Bf!>Z%W)Rj^OSnblW0`E`w+oDA+{!QG=c*N6ypw-J zW(A*Ct32CV+hO_9)s!ib*48SL;ayN%p1MJJ{-`B+ES3rEYw=ItWr8ly-ZBsDC=rtw zn=r+ci=EmeqLYGuhgyP&kl%@IwQFt3-ZdPGtgf$Te)+--!*KN}iOWtTaT{R&yB=m9 z^B%N3!(=w{Z))8^V}g))X;L$DIMIB^wu-seeDCRg*{2IXg@E^e3EzIfsQ=~lB{Rf0 zX_m2(1$(#3z#4$P0L86cGtiaU$xA@t4C!eYqv&nAvJYVpR9PVIj|?el z@#jSS(GYpJ;pUZrlXts_6Go$AR`~^t`<6s|9=VUXu`7>8{#vSrb2cSU8|LtMsO5S&|hjhsM zz1dowq>vIJqK6^ui-1(ltDtaGAjqJkqJkyE4SguN`2X?a?fKd(KH#du${^hLqgUh- zr6Mw6zfv_3ll*22uHon4*S6HzY@LB0R*C!rf^&UYV!&6;A8&pT|#=F9go# z>#7`78GiJe3nPQ~WK39@;(OYA;#dsH4F@c_GFO-`Nhs`WZOJp=+W(aL`NM1H2e3pw z{=|VVd|w3m!#yX4Py6DpXM_}^!orLli@?U`CIqy6gDm?T|Iflipq)>PBr`L%}* zMxM;XafvL61{I=`5-BxX(4wrhxl?;WjQlROXvMt$t}l;by(_CiJ#n~FG^PaGCk2JP z1aqp-3a5U&T3oMEYPg7$HunQdcw}3#6z9t+jCR_BGL)dG z2KoI0Z3$ee5D5jno_MPxZtU)cR19#_+#Q4O1`jd7hZu(yskfHst{#{4w%HjdGMq&G zXXRlX58{(u(7;}CUTtLR-aS|Ssl3>UV;y`B7>WpiTwm=Y0nLXtrjzq?0!!6B3gF$J zLO{Yi{x|>e0goY#S-uM_O#XEi5_{8o#v%|IO+N#dvLI_w)~O1$aV}de9)jNwZycwT z+ZBZCA`C9tDij8@WiS|fUQrish+B>1s|_gzoNkY|FZtmiZ;zJ-q?pUTz;VbflO==t zK%HM8uUhaM#=~YKz0lzHYRu-MPT6Kv zzi=h5?f6cxNL!t*6Mv&vSXLDtX>4R?WV%;AZ_b6-JHw|9OxC}x3MT4h0 z+}vL~Eoc74LQ|ZgPLVP%mje1z!Df@zN6H%UnUm(xFKUvTMyje#1pRM zVnYS)F1}vx)vxH@1c8|tMheHw4}sDjuKB}Ir}q8Z`TdUj+val2C9RJ0!2@)ZsfTe|mvG*4om|JCQ#?~}?p93Mx zbu(g~O$`chWgSUyR3M~Orja1Zz$MifTEL@GJ!{`mZY+1S(#5qkwJtBwnTw)EL>o$p zKA{qowJzWsUBI}dA)koJ-}(_4(>S!SfHJm+Dj~6ywpY0;Z5(=~YA*^d;2m8^4O4Wc zD!w(-W~w?nkI7_+d9$^@^$@sFOGky3L~`vRfro-PsHO>D_`v_Z6Hlh|A(aR+E9)eIf(D5)GIQt=&M|tN!ca zJW#QGkV)MSQzZqmG?Rjt(S?Z0%Rf$OGUY9@31w-NsU=!dD;<>;WcyLK@?$b195vT$ zi`1&P>#h}bFFB1piJ?LQAwcnU#Z1Kit1lV5EN8@9Fg1MjbN?=5cX!RPx5lk6TW z)wq>{cen!QwSyf1VqK1Zo5Hnmm*W&pBc%-#Nq{h2uUxMGd7U2Y;QxB}^zz~Iq$)=R z^apG5Ag7lSwc;sDx#FV=k=G7nDif$mNBm7*>{comXB;UR12$_bwLprM@du8@_*rM2 zLnB~SJjz|AWuvEK0(!GI_BN%*K3plrq~8IxHlf0}c9^)qD}u?4S6qj>=@#}Daw%mHS(+uOgRZ#oRIM%9nL#I(D@^!4=xA(E?7 zbN`GpTn68k-lGXz4TS~nZ;s4Nf%987HcYi_d&my!CK~B&3AcfZ=xLVe%X{3((Nuo1 zZa7!x06i5o1N)~-(z1*y#wPt>pdtljF3>D`S6CGVbLFjb*ggg-cu^*-oeA1 z?Hfl$PKcxIy<=4v^G#Zy%0(9OZOq=-?+1lQbTpF8*s*20gK5YWbn{!}HEdTZdq5v- zr!trC1AR9n(1JxoCFRA}`)U*9*^1}ZdbY$iKPyZaWoK#785n(yfDnrq*9Z=tP`E#} zmlY;?#f(PL>#P3glv{jYZhBI0#BUP8$JWakLG*!9GTcz*D$iN>d(PVE(SnL%}nF6A#!gU;eZVFK8N&%FrT-2ZKWzDFilw>&0 zlHoWg|W$IHGj5Fl|U!^o=LglPmldtfAqTkvMQ00zTW?{;kQDa6!@I{;{ zdS}=(lLZbZ?8g^bMnj0_Q_It9E6M7oHl*Ii_CjZ0AA4{yDutSJ-L4ZV^93q!X-kX+ z53@rD*?LiNq|!=9pD8+Ixo5;&S!9c^TEU~P-ATGgtJ(!Mtllb!AtABjSybAul~6w0+uu)(J$(7*KiKGh z|0ix>aq^-B@a!}lA$AF6s4-D5bqD6ng>{{##r0F}QvAyURP5JU?gM>dCGe z$-|#y5re4e6nmehRYog*&81MKlfbK17|!EoLJmQFVm?Z4;B3VC$q^nME4^vHX~S)7 zXlQIsTZyoMRgob-C<9ym1sWTUi%66ti*C2%mYeN&R6+?470s0)RjX`01u9p?H2-41`-o2Q?%nd|Uw<#pnXJo3bn8n${sR z@9K*u`48Js;F#OcXjIyP3UzECI=e({z?*%dc`-5(O)Ie*xCIJ9r7RHTQ>MVi z!K2rCvzMQ9k6O94=}Hn3KP(2g{w=Gm>QVhVuz64B+Tk&?M}xaAwE=2OL(Wew!BL%{ z&Zb|G4j#3eC>eK>YwzpI^3CyJ7##gq!fNYn#_9m?@-+L7!u0^XXMQre19s^Zn{H(f8U3*5Vudn|fGqc;pIK6jm& znMZ#RL0fSsb(^DS;Q14i_3L+9vb-20z{oK6;ew$%t50M~WB^o_CZuwjWkuC#(Z8`5 zgoz0Z*HTX<=2t4o34C4apdoQeOsllB{{lH%5m*BA*z4kntwVciJy!nlK#5HB8El~; zT-TC3DWy$|&bk=wa_SIO@s*$Nv5d|Hf$FIGi9SY9 zQ%T!jq|e!G6pYHsCfFr74N(8`ih>kyxUL)NHh=@{y1z-iugEir5&|^?6%yPoy&$q| zp8W=vqpRg?;Ml|jh4LPCbFPL95ew+L;bCI>&(F`Fc+};p9ee6*ow}~}zS-SfT-39i zsV;_5UE2lJy#t>Nt%M28M|cy2#<_Ll&lnC+AnC$s^~*OFfkk(O_A^tKD$K* zi8|rMS!%C$O1@-79o3niqH<;~IEET+ERjjR7{sPbUVy-dqs=A7T1`HVptS5? z#Wxf?EMW2cN->Yp&zszOAhBVUQ9eb8NvA1#P*$5JJBX@Fi8deejeYig-RkzG0JHXG zIuP{%cTo=sR%!UW>;8h^-mrY$VM05C2JHcYFmZF&`KK7TNU^DEa09nA5K*n9+vv~T zKctR2A;udZHvf?h!PP#ZV;7Su0l%Kb_SUX$*k&Hw!|l*CS8dw7-DI-rDzX-Rlb-{} z#}~6PVBth%?1N49GJ3)UJ6Ab4rfcAKC!uZBhM&WOt}~9kIm08tkXzX(M8WRY_Ni4u zSi8;4EPv^v4+6YX0SwyO3(n6UF(`Alf*#NU76RXi+|B;|ZL^{>=yk@&d!_IHu!H~E zR-*fQ^$G_qDKd8%lY|R#WTT!@Rc}hlO1xxKtV-27$ffbp!6u(w_V7DK zp^ft_xJ?v()Oi5b5!pLs|SOf^n0Ob`>Tpiy6dJ;#Kv-5-%x?V4FST~o*`U*9|n zroltCa~_%;K{)nAa_<$-sH}Vj`a5*#peZxyPq;{IzDzV|1ac`WK%2L{mdEKB6Dw<_ zGl4nM0I#3_{CU%P2SCyiSUb1jpf91sG5_ap_K#DmruI}nJiCrNTvy|1K&J zsjtc+(Ivb*TqTs%nWme5n$c6S`#DN<yFBx#{&oDy z;cfs3^ltU^8yrKhwfhQ=O$VB4`vYHSm6qNYlFm%=)?wAPA$z$F#`8{^>RRlt7k_;G z9JI>bO-)UYED7Mbw0Un&Mr>N4FxvO;@AgGYQViqq9*j)K12g&c{_c7OPWQrxaA>D& z>(`(E0bKiB4xGili?;KPWXgkOn1K^Y8T!+T`dL{QRlKwt+-$zEoS2v9*%Cr7`6?3S zcR$94CKLniDns{Xk$h)r(1@}Pa&`o9{Pxsj6lL$`>VBO+4`4Rf3P5{@JosXp5or!kv5KbcAkPHC zNe)M2e%Vchz;%UMt?il?LN53Zw+E51%o6fLz~_F^-PagFf`wJ}k*}!{S*^^ny<{pl z2Zqv7PpO)IqO7TufLNn z@iPBzXmmsb<2H=-lE}+OAtk7gl={*9H0JWJDZtLg&yDKrzRn^ku4{AX}<}}1J!l`gp1lm57a=$km>jHXm0a{hXeRPzX7?{ z>y@zki?ASpDEm=J=}&)rQ)rn0Q$Tj+?V6NL%cvd(3?ngW&EV@r-(B%tcRTac%^wA5 zN~JZ^t)o2^26|BCbuT}B0oJd3$VetfAG`7p(FdJgtb~QNDp4IggsTKMi_kWGbT#d= zK*ku8fI#<1rwi9k>ShLfZ5?#*qkjIhq}!YL&v|7+MgahbAFo)VLR=Fu@-}68Jjn#6gbK;oZ*D1ym%4gR9ved@77b#RiGrp6E$N}-anyPym{P1 zk<)LWK>#WoKjF`-pibm-7~hOg8*%w7?qhi1=PED8p)YqAd^ffz-DCcarHZZT`A_6|lqzSXco`ngz=pjy?!aZ6z)hP$GXDhN zjH!X7;2p8r36SFMK#}Dwx0nMsh*9&Z&HlFz;M1)!o9|3rI~sOs8~xUvb`5KE)&T&M z>TGQ^pJDy{hHZ^Zpm9?G3*B5- zGC77O>Kr}sCOZNGu<0s*!OQ!q(<7~TPxTHpdwkXLsB_j}IxAO%WgA@TS#m4iUNn;jN z4wQXh*}&-GNwmD{9WJ&xL%#`)hGJ`q1Q+zjX;s^{d_8%p(oNrkrEpeRNEW9tnt z;`V^q*u*}ArG4X#OKi@X5J7{unw>(80J6xK986Dfw8|98M9MTYlrJ4Ku%uNc%9GBz zHU~)J44u*rfY2-uk&_NcQ{axRsSQHql9-s;%(^kKt({edjJI9LH-7iuM4f-F7$kZC za9)Wo)1>UUV5JJTSK;f(B{XlJ+z{ghu>0YtqO8ck3#)}DK9!Sm5wI>8n;ObL&|HK| z4B)4#G(H4VYikdUjNpjkI*g9i?g4I)Pls$H7B>5S=-4`D3eIU_-pj51j~*Z0N#r!k{WJTNi0JkAdlXO?hL}>uNgrRLrnF zbEhHpu*Cg*CF-G>*f%1#w;)r~wHCpwVd0^P2@D#QBC(09jNIAd$lTrvYrlY_qT|E8 zsphO&lwT0%uMhkW1NXlNX)V0403JfA)Q92s1@YqrT2^H5cI4s$|HHmSs_0rV<-f zot>B)Q9Oz2>LJQZaZmCQ|8Gw6Utd7)5Ws45DTBRIvDtz$FW27776`I(r@Y~-G6nLz zbRWGxeG)MsR>myI?iCv3zt=({$CixotE(%pnbhANzGTbal548fiiYY<&lIU+P$lnO zN)I?3vL6&1%1L-5Y@u3A_2vzil#=j=qaXRc64|&h_Kh%zr)TZJxRv2aU#3dCw+WJb zSE@z95(O%wb2B)NjUMtc=SqcR8wE;11)?r@1<1xw|Ao)|%aE!$Kg9a?FTZ=Wq(F>B zaunx(69`e#H{Gt@EnmAV&QJJZx`T@!+Mk#jjOXhc^*R%kve!EkcUisWE}Zk{n6*rd zuWQhgAH8VLT2%ECHR$}O?4L%HrOY7p=wyFvZ$F`LJKjz*$p6dRO zAI|A0qhu=~Ss{gzJx&=RdqfgSgzStEIafo-C`5Ml%E(IAX`p12%8q1{8Ck#A2X$T7 z;rshu-|M~~_v8NWJe=d4_viI~&F5=zskLa&7=C>?cSG^?zu_qqa&OyS93&o%!5k*M z6VCtWl*}zLr#E#kqjFg9xG~*an*8+45%z3g)RjK~$S2xRjSaDT^T4YL3Z;!dq-o1$ zUm0t2iEj_M*P{mY~I@dCDUBkTr?>LT#FMw%B;lf8|p9$B!|Y36naRieKWQwVrY^CM(L1>1(+~3>e{u&}8~O zji(RzeJq>H-`Rx~0|Yc%32gv&#bhdhDG2w9DAKX@++?b%yT!#FLfjxvv=1;VvJbXm z>nKiUr55iP9}x<9RBiC!zI{vz+JA(D)-}F9P`6h(1Y2og+-P<}gZ)X@+YoFumyFif z_|XCo>w7fI?v2eClnLS$DOk9ZEu_bMq{p(=e8~DKe7>ty9?cA`c5yx?8|9|V5G{2xquI2<;bFytG2_CQ$HH}s zO_g&D6;8&CTX5yq8*yP;s>NI)f=-2KJvOt*rWWRYlvkTEYJc)RSfyL*mx=aUL2 zdve2HkM=(>H1-Z9z}uWIeUzWlaat*d2t6tfSxoxgyv__$6^i!m-yfhyGTpL&mPH`X zbhhpKbk?dIL%~Y6gTD7`GQ$I%5Cl0wo7^6K{eMYkz zmssSgDlgr*A$}oZ*J^O}1ukYSzv!;5C&$&(r8*Kv!$u9iEj_k4g!yb4*s0k>E-RJ{>@QLM+oi7#Fp;q7J9g=k6kh9uhzXee=*0kHjOixT$OA- z*nXaqFd*7xc=EdIbRWN@sK#w??h7I^!Ra*Gac#|^Z8~uwt)??Fr5!<)b>8vRY-wkG zml^cwyK=x3S;*$c$NLNO7%X2sEq8+O7I%o^Od`3ah}Zolr_L9%{Mq)*cfGtcV>x%m z$Z5_d$Ytmiv4qch?>%5%@uqskEb-dEy1p;qlF+Uk=i82QBN9KQEuniBr6d)C$h>%12H~1jbC&>cONje zSz5g`JB+c6$%mT5 zVw8j`-V=J$NPeG|p7RTF-wxE?k|dkfPMx zy)pEaa!96PT3|3rh70}S?UOiaHpPHT_!{>2;kS5EBGUsE6alZv#75Z%qSoV8Qm$%=B8E_jNTg~|1#b*Pvc(7 zqgS%|nd8*E)2I)|$&s+-$1(K98>hCZ9o9as_`ydpBrdHnQ$dN)+d$l*jwD|%wVRro zyOjmg5`Duu>b&k2XZw$lksArbS8t~4rOF2sHv9uzBnh~YTv>I*43U`QcUDHuCIuVcTm-vo>fd^4qoBaCbp>X9}|mvRPa7c)QF(+gq7TmS7*+kZ^TBJuOYc4;CcU%G#{eArI~EafPQg&*3x=CN2dRq0~zy z)ieC^47J!5g#>&e$m(8p{x_|+g*w5FB548tcmtvuZ`@_iLtfYIg*p50G=(zowb6`z z_ELA&;jpL}{G_XtKq6DCWtZ|fs?{`8Ny}f4gnRdRB>Vhg+nJp0_q1r{PF@WGt(y1C#l4ER19`1}6eCGXcra zQZW9irFmFew)%sYMQWZ|7ulXYRo~y*-cQWdkknB;nUE>)L)^=v>l^X^#6%mHge62W z$ln8SXvkC~6$=*v%`Qg#oR@hW1Ikej|i0md~E4rwMAM&7DpymeQb4 zr|M^Cl+#gvz-iUY00CrWTF7A^yXIPceo1vLhPE>I(=eDf{z0K}YIm)w6itA{j}8gtH1jaqrE+cMVnf2_LdL1`r+ZAi*XKyR+cRN z*l_a_dlJf7mCT~)JSCGbpXF3d*$?rV0&--6{X6`qg~RtA@IFhzp?p7}!k07Dx8i|oUbop^@=l!|_evHrT>ZexuU(k@>Qbx)P;Mw;W>^|~O=@<$QMR0a> zId|TJI863=a>y%;2)S0ea)OVnv1|ieiU3$ft>R+MVnq@fZ6C!`iCT|Sw78R3H1jwK zJ>$fp6xutSQ}z7p^?b=Vre)lsPSckV9qtzq7@kG^EmIO2za%IyQC3otv5GUO;SV@p z!#aEZ>70x+e~f_2LdcF!i`jL5Przdnc^bOAV^(gC<|HoNi}nLY!B-RB|04Qk&i;<-`ffO1kK08wsvR5rN)k*tS9i0I zY3}1ol25e=QYaRnQ^uy|3^3X zC&iO-mz0YjK&c+{%}^(y4ACnBcXrTb;JfXvfL+NeS0B8VfsF}zRxC6^+==2EMJX~Z zoz43X>tx)MCOdcjw)d3H*}ALh6y{l~18jfM_P?d;zr4sMhd7M%iM0}Hkvs6Xgt`v{ zPg;VST7KIFwC~SF9jGkzO7fBJH*1OD|5VI+$9-6~IGTFY`~<7^o;2C?Mx`$?4^qBz z%T#D6Qas7~O0JR0@HFomyVpH?lX$3Y^KTC6y`fIS^&^7peX3RFDF-u{!DuO{VU)^C zcFxT#XKdum#W*d$6phrRZR&|c`w|cS8;2d^1sIg$r=6EX(MhCQ_MfhB2j|9>OS!rg z$2aQkyV{!Ro$r;QfPLZ}=h`q`Z^bn?J=xG>G}^(xm&%w`R!3}SA>DTc!*yh&6c^5zM7U#m~|=Y4Z=&`Hm+=*N^yD%;3wKS*nAZl_A$d6uSKG4Wv< z=d9e#aAP}F65d3jmaiDu)3VO%S-1c3K1g{$5#kPbejzT2!{tp$Uf{wtYqu?DjlzDD zuk1Q(!$ae~lR1-nnfCdcmQqN!?=Ze`gXMx%QeWHUp(hS4oKp}mdef(_s2!i}wnz@~ z$?+jl3)VD`kGxnN65y(t7@rwkHkjs+h*L9UaGz=9YH0~gqt-SyGK&3_l*D_1!*_q_ zOv&;8!Z5!;D6mtwhg6)&hB95v&ADHr-)~=YO0dfmZ!1J-Uq2IjoW0ldsy~=zE#~Dy z4xJstX-O$q0wnoZ_YXFBY4e=2{dkdATjKPsNhPAAWqwY~{{LBRr^m`>@nqz<+M zDIxwO$v=Ie5+(BO_`V(CGGRQ+nyFfS4brW$xsTJx6KB7_4!8$HtMj_$Yc(QTTLe4B zTuzsXjU}_GWl9OSSUbN>A)-(E(~4IIzz^v zI4n1osO9?DX>7*^JpVh+{3kDPC5T^1ntPI{CJI9~t5uv{ykl@^jO&zag*WxEw+0DS zzg14~A=P9zpGfsCl_P%C4Ap1Elv>Ij78cKGE-I#Zwip4!0o=+Q+57{PJ(3f#smp#=f806 zKOy_tXKJz__ps!;-w$^m)qLw@q7Gbld21vsN%hINPW4#vj`6_y88`#3F-VTur_|Wqs|($yClOE#qvRlO)&bbN`itG|kIb>hN|cz*^>C zq#f3Rm2b)zlQ@@nL2@h^)mz!yZ69t;d0cf4d1791U(~WGX+1fOXY~KeeKFhz>bx8* z3dJ!H1#>rxMm?G^hbAb4_A^B##o3=`mgi>YoPIEJ@wP}lF6d`q;~t<^*K4|SHdmx5 zMulcyd}k=fekFs>5UFafM5n!}&jlS{YZVU}kGYtnVR!$R-`TC;wiJ;-)Hn7Lbmn#l&7X z$Vo?PF-l*q#VwmE@Obt85qdvUbtz7vw*&2tw>5GdN=6FkW^)#AWQuE9j9KfZX*pfs z32~~w`0-uoSa5i8J?*uKQQq#(@Yaf%h0(H?On5)`Pnevq8zIvXYjIQ-XM;*lUSae8 z39^;1)auraWTVtNNf)}@bs8;xT+Au_c-M_1(5WKWz#(1wU+|rgI;qklk#j8pMpOUX zqWoPpM}47KW-%^dHxHc- zo5#%)k132DE@~GjHrE;ZyBGh@Xcff_*gJ8|A0p&oM*~m648QkWA@-BmscI_e)U{4} zhoX+Co*^&QuuNtY4rjP@?3?B&9bHnV$kPdiU2Tg^Z8S#Q-?Td| zahr$|`Bxe@7o)=cZzAHg2i4sHe0Sv5p3kgD1+kwq27?yEySitVx7)c)F1s;JxHvE` z7kmAk+FJ?LCXNa_1Acu`%LgIsGOfH_WYf$a7mNEu67$Y~c7I+k5qfE2xisOHxqE2b z)Wi41s(G-Eos_$Op+CP=sog5(+5b>x^II9w>Vh4>N^N^hbv+elS!Mezx%@Lzf8x=) ze{^2FXnN;izS7I@Ue5J<{0oj=)1;zsoRnKq44SN)XoL4#TXQ{M1jt`*YAWx)JOCs3 zh%GM<_^19G2ndc8ri{(}JUUw84g(M?PD@Tbpo?)*TDKAUJ!OmI5!3v;@SjX(sk3|e z0@D9>>f{FYrOOBKDGdyr4U!7t`Eo_QRjEZB+1H2+_8JQIx$;kEbUYbdk@`Fh%u3rwB>{ve#?itm5Yhf9UbC=7N z$qo@Mfn&#>jqVcX+fqBt=>Y5BtjOECzZpB<6k0|d0INlgdP7+bKfPaNrtz-=dF%rD zTY|W`shZ>UZwTr18zl3#mOX4J1v!W7zP-s?XSa+dP*ACsRQ^2TeCN&8Wu&Ncba}^> zUmw24Yzx(c6|imawS;RI^hoX^BOAA?BiQn3h(@`9aLHb*Td@i-=E=%szRW(nJntSL`Xx&La0PJ+8PV5&i{_T1QP{okba612KYx%^wlM6#t z=a05wBwLG2I&mxg_A72S5+w9k>N`*cn-Z5xosc~tl~71O=US_Pe`K6J&2tzq6FL&_ zW{pD6vA~1TQ78Y_S$}&0G(=`w^uch~p)TH}T%`HtzMo&|s3YC4&*dJpt>#KDu@r)~ zLA%y-SN{6mZ$^Hv+;c^0@mvYCFZnI`enA4|x1dW_MDU(};~%i@w8CR~%?3GRutmHN zdhT2+1dskQiDYs;U2YS8P$IcPQ#}9XkR(OD_;k(T0=ozJHX&OdJ;i$~A}s74T#9*- z__F~vjL{A-0{cAme_hVsH8EBJi_|XKEshn|zqPB}cjZl8l-L!^FTgF(aa_@`$_ zXlxkw?el(osv+wW*7x^g{yIK8%lDpJdn$FHAEMVh^Lek&b!ZBC^ft(47Ya>=)L%M^XCMcqsPz(%BK5V#9ExFtI9ed~-XpksVwK~Cq zFUU{_yfhtUntM*%AinV)j3v03Dw#IGf-#bU64$xyXEgEoVU21$I#}rHIb!`{D9PPp z2}U4_K`TsI{TQt3SXU&YzBrh@T{hAm`nXLMG9b{!FRpp~<1cCYO@cFLdvdCne32z! zZsOgr4vO&ybh9%JFhEhk5>jN_eP!Hx(|;lVp52WC_8Ru+47w#FqV~Jus^$@{%fRDt z>co5Aa&Zr6}U*VpIfj%&Ap zW~%{bk1JUpBY`c4NJp=QnGI=PKZXBn>s%k)m1m^{pcAjI2rpO*8MKd#G- z3J{-?FBx~t@|BCXQwz{~dKL;5nC=+|NX*6lv^&0j#B6cGsWV5Cqnz4KN-Y(4FOMU| z)9ww+gPSPa@D3&BGVCWk=I`sB0e6$*!<1R^w>={C-zgx;}pXCE{?M1~|UBdZvJoAz8u zz;YWMs>di2u(v%1G!&^eNNPtPVx`nymt^PL6_ehr;|eoKbsF~!zK2vzlm z8cJRbum#&o{lVMxsWjt;>-YYY(M>~-$He{5`}`wPy+`ngiONN#YLO?3HcCp?^!Ofxq{%Cw0%S-K0Ty58*v=5Yj_BOFkz6@&B;O&mXNjM~Wt^eUQZI@5=k z(8F0JQ=UIeL zTf+>jn+!0M^`qhVvuXTmz{+xce+-_A=Pj89EYd0@{d^r* z%NyzVxBH;2Q@fSLiX#WV;BYVEBb=Lu}*&QO;dw47Q2% z2WwusbmrKlDuxh zc*8z$rD9SaZ~`?kMZZh)v!im^ zLApdHN~R48ioqg@9tE`#S~`x}bZ@vRisvANtgE(WlA-!hDIY9q2LdZMTscvF=G9{* z3;IlMqdD^sg-OIn)BFBavm-agcNNtQGXQ!a+4?wtrH#>j)cAn!%aoru- zqM$mq*+UCgy>Rl8^966fKM*q(+r5bw(T_55&`rGnY9K;7>qZ+} zM;Vq5((YP~&>7f3wPp($!<;6*0e$Jd$cmq9g2_uBb{_Sg5D^0=(hHACPi7$xHS};1 z2lckdW>+UNAfrNlev7ljV8D|;UN3&_CDRhp<9%c3jGo*+w2}Xf_*46#wfRXKn<+w=r z?I$?t(c`d2`QCJ$n9WvpP4rX`(iv60x>z1FUG&wWt|%=RG+`nF56nnvs>(2DmP}PI zdk}m@dLn`0A8p2KamOgl^7ZU~4kh!>uhr1neKrX_GB3VC6>{CZ>mfzi!nZs7ZnUMq zOeP`!f_*^Zpk7ZQL>Bp6(uaaoV5_wLloSor_CY zT)cyV*PmxhwW@g&h$MOjnTO{Wv${0E$e+3-odssmKg?HQ;}WJ1^BMA%*UV@k{*~W; zp)KQxKS+?#jgv6v-L31}_aW7rqKFu}?61e5iE09noQm-NJGc^J@MKTeb-VvyY`Bgr z6aY4j7LIl6++5X{J&zV05_;EP9$I}@nT1gPodsrO0&;^N{A3-xT9N-{Br zNS2_nO8V=o?bD?(}X6@L!rK8W@_d;8v zWQt_RFP``nsX$Nm0xC)?B<1NWjL(tF-0=&_dgO)Uh0ZZT#5L>mg4<_J7{;=B(u|fyZ(-HMFnz*JIXSwt!Z>e@A9uz=HeWxeB zJi;R%f??N7%pUWv57c#gF7h{3`t3KiwY8NRZwOvzQr!#`=`7I20HQWK06MnPGq@5G zm~g;q*B_!&Hxkyovv@cyy%!v;D35AMsJB5tV2Yd9lT?ge;JRTjP!;TPIkwzFFmV&~ zh>!k`#?1-gYym=)qpTjMzRtYLzFa>g%{n!&I`N2KAJ&ccl%<%*ffT|_)#2rwmh)p*fV>T0m)CMdxn z5{I-*&O>YWN4x28$tN(DP6(NNv>fcdp>4d0BVE2dCjAK%ipBo3B! z5|(^<7*x6@o-A9L%{skuE(nxP1yVvaFD0#tiJeapZ|C(~lP$0|jhkUVn6wh5ACjUh zAs|3~CCc&aV#7vT9J~=BWaAGQK}lcCu&?3LB?!dJ2j^YFHw@TsGoC#|2+DOY6%`fZ zWGpytgZEs*&;+QRp!ytaJ;pg2C{WiXQS#{@NV-SGdizfN3TS`pEL;Y0hucJ{U8m#6 zTD_asJ~x7*-HSj_`eaXRHpqenq3;jXVF9CsV728MR3Gtd0SL{}?|_U{!0;5&fxgj^ zZ4ebY_dPmq{rTg!HVR~B!7CZYpD)8`vFL@vAy6wP+bHBzh!W!MNE=0@&Bf@(Xwfp>(RN7o=P0DRng7lqoM3#{5{V_+QQkHn#pf_EX_ zYlEOh!gjD?jGVyVGX1D&gNDJH(9sw^4YUCp^L(;$|G;d~t_>;q%lY^gQCA}E^`*axBlhZ zvZYW5R_CjX&CJYhT0br|hQhc1Xh^A-fH)XNSk)7{alR+;<>!$LF3J z=4=?l32x}|yy2SnFzpVB6Y3%(&ux4Z39~&!0F?YuIkto;549wT7c zdA`G8julvUwl%6=e?@$#=FG+0{&IXGC|@l&)dLUPB;_9Pk-|UHVeB2_&ZZMa|CX#@ zFN_`#BjXDuKi7LWJ*ai9A2NVxAc(acb7`VjCqs4$`sj_A#;n zhGi9J&JUfg$ESFUf6e>z)nr~h$F}}w%{1^z9nTY`^+1DQ$}gS0{wYgYDAWwpRpe; zo9|_RVmFrmv69ibBZn=zLVArovHcvjFw^^JIQYdsE$wkd5LIm96ypd;q5pXviGp!I z>VNnl!%6hPpiE0-OoF(}gSg*9JBVQykVp^7CWTL2^qoZ7sF8S;_Oj&`Bc4XfaV`T< zbgy5(Uc71S)s+LYrpTfmn{RLr@1g8Z1|BX&177hON`)wW+89dyZ}%-0K6X zzMh{%1Fm7AW2*^4Dn3in2R0W& zVy=)vN~aPsNPJg=^vgY#31(*@eKqSj-rla<4Z{<95oO!XKCsThSTdTofG~r~$~{(W zU&H`CX~MB({vzMPw!g5k5sI>XMcim6Fz5761kv&HtDlRh%!<>dYxzQS%i}~O=0i;B zCRc&{Z8FB53DH4y3>~m<&cID?R{b<%NLm&$v4aWfUgwKp7_I!#ZFN7V9O5$mXj?Bt znJPnoCyI1BH)}P4Gd;5(4Fe!>JFe+0HkPLed%jrL#u6Zn@cS*Z zPo}M$&CsQoxG^Bs2Xj$Qve8Yh^hvE4XOyq}O#P`y-wB}rGB_f^I%~s5HArG1Cn`$I zqZ>Hnxs%aBtpmv{BBBU$^o?Ok?U@gT8i6yQxC9|{MTg9JF6m>OkpG~n8V{q^j&=b+ z7#*KlH%0M(LhX4ArqzMtc2o1KP1H<(_E>K zM09_JOpHI$g0jFkvf0&-( zb&O$0RB5@vz#@Xr<@?_JSQO8=p#%Jh+3tt}akNQ?w-j$@$%QAk&< z@N~`bX@xI!8iwl!5hVZWpNj?n^|vH#Tj>*jijoe%aWT?bHq z7`6&13;DeZI&|9Jc^R2lbc=RQAU7=K&~rwoPfrSBX8=yDC|_(2cy>EZ)^w@-iKg6FfwS>9~nk=i^*+_H`?Rp zP#6GcZ4vI95N!dTwJbW!`Dm~$78gW9(O%@gr}l=25s?qF$A+cQ-7Q2z zk8FRQY>?iwRy^WZU@ptAUVn0RIBSI)$(?^Z+OdEP2X=E+uXnz+9Cge@aS55fGkUds ziz^jyHGcF$bgV&zw|vR|#*gCev4%#*N87Njgw#|Y4x31!7%9!Zq5_?~!l)_P31tm7 zbMFHA^QEcxtS;5{YFWDlKQWihB=T0gx2x=11-4LHAQa23g7|A|aurr}^f(dBm!k%5qI;XgN}qRn)Bzzk$c@E3i#yIlZ!mh}dHR&T4d zh=5T!&b#NdU$@tTSB1nC`>m1O_!yO%Qj6r((=36CJwAss@5%boCR()n%f0KBeS4bTf1}JjKt=m-c|4-Y3A6IqcGV7$(&aqOjvnEa{BQ> zQpX*`35-C3Dn>CU%NvlT&8qkbfvDuexKXTu0)>M;mE@+1CJa3mDdbSj^{^t7obPF) z3SI5xU5V>i7Wg!) zN|nxa&w5iGs{H=twI*Skx5zu|;xXFerZp)NpCLf=k26FN8gZGB1-bHhPw7hSn!$z&t$| zBTozcfdVHU#(hJa7P$Z>Mqpn(tMjd8`S{G6#`+ymyhs_<-JMIw%{1WeOKC*BDcfdB?!+a!mcWVQtA?j;s0+ zHf1hK(c(pO32~{K$#NQRmmx;HJa!h}YmH?=y_4Q~_Nkus4>LsjtR<~p?mTcGA;ztO zs}-CEbR@?75`xh>=m{#_d(JA#8)=6^WLgyl1Z8!**Az-9Vd1#BAQ zpV@X}lQozx#es6RxN~yWXcXpq*+GygH^mlDOvNE%-<R>PF6ppDyUc&aUI-uUhrZUs=dsJTF{7u$umjl=6ys8)!K}A`O+3%-(+27Y30Y! zKVZtFfF$g^DR9{PRR6k#To3fgS;-0g+;EIEh}fJPBtVRe+w#nFpo=_V@W~2?3lGPz zx5t}%6#S+&VOzP{J7~hduxIEO**8oC@jjg9dAIOrVS1nnDRE12ewxHMpjy!B)Glv* zH23;s?>tf)@s28TryF~p(%d8qhS?D1<2@C!r;$8`IMSg>;ZE@2__#l~VsyuxPHxJB z`fz8zKU$o}KI6nAo^QfTwt@5tTwpHRZ*8yB=2S9VYGyj7`_PeRv)*MwBjrwV_S;uq z8l1N&b!1cZ@PGiR_bqF{{V%V*_7seGwzn509e14>6o#<%s;sO6R*P~GlLhMFDASGa zFR^9M{I+4qh+%;?dC6iEPesyl}N=r_?<}r~a@b3H$}U{8ntS`~-C4 zg5OLw5Y5;VsIij0Nheqv4&&iaYK~Ztl1dPo&jANrUerTj7z5&jD}@Vi3AUNVdn~r( zKB2lQim)03^erPptSS(N3Rwc<7`327=WoK2_~A8cE_xXJ#oQHtmDm`JdW>Sraq`ui zi3i^|<%`sg!r>krj*(l>%=>T~sc5v+ct_wpg*+PRMScGD;hCRCh_cH+6_Idn5TxI) z5JtVZm)XonAF7W5+sEQBI-H{W-1eeAWc70pZA!MfHggY3Gt#Bpq&+cLz^ij??QSHV zp$HRmwg%f*TwlQAG2a;ALH&;E5rA&^?s_$A!@E1lQ=Ig~>jVqbj-_&Lt)u_I8fd3F(Y+zx_1^#>2C0)dGltP0N6TR&d;c2W7tt=)q6V0WZA60SXm^2AI-0CfUMvmPUb-4{IX-^UZZf2h!he=?$_lejy@9>g17G81~r zmS2g9;SD}2NUhp~`jlf(D7VI4qJnu^S2V~0@F1N8w2U5A#P(q1E_Z^)!T6~->RO|p*;=jnOX z$#HAge?x(>2PXSo)Yy%|uf2_ZhC(!sQW)Qpxx8oyhouT21N%t#tDSN(Y#~6B!f!wq z?uGo_=`)p2q3)3$aZ|%WONsN8dXNwTd{Du}RVd2h(w1tsDqO1)SNz}7Fi;e6{*6c%*3v3Vl}@te$p2{>40Kymy^-F<%J$~=MlCEfQ;LN7R( z56wHOcw*dvD#O>F(P1Bv@Bk>Y0!4&}r}-aViUn&{ILC<-i1K3)d3JVE%x!M;lTG<@ zlS5;2Eg%teP@!iWVZFsgDDqpH89dHuy#StEE%t23N+ zF?n}BpCqC-0Q>Vr(`=gwIRYjbL-3XA^{j&S{2L@OTm_*>-xyB%?%{@89<}h zO_csXP)7jQ%4HkYEP#9Y zaBv-Bqa+Cn3g%0_i|+*qXyX=wlI(}?W6VJ_g$U;FIE%)@F5WjFvAWJFNQn!Af~3+= zC_g^N4j!C@S-SugHa|uw&uO5q($5!xu@HzVW@RQhzRQ?x(GUvcuR0HG6fFKl7htgH zVF|jtxxiM(I`gsR-x$Cy_P}peRAHF<#gZ*WXgWtR_QnRaCYnJd8Nc5cq7d zmhl130*jz=1~->yKg8^G3tBq83}c8-@#tnV*8w*E$uz=hN4w+f9GujZcl0_kKtI&L z)%-{_fvt2dj}%xav6DkME5Ewlqij+_rG}oBu|M+zg$3?v6(@m!i_j6Y&cK zF(QN+L&3+hGNs~f3l9WJE4zkeE=4~kwhj}qN>DI(ZmCvE$K}nduU3Rs4aaJb|XWc_z&q7&B__R^lhEg3sf^Rf?vsI-tW52<6MBJ)a_8 zE>+CD3<9?zWfv6?dkFS;NLwi#r;v9$T`*SIEs1OxK~|wNfCA*VXh|WE7DJq+g{l7S z9#>fLtT@IKQ%2YnX`?6?JQt~1c0y*O_SZoX7nz_r<&Wigw*7d*&5Y_u2F-2#!f~m>aW846m zA2F<&8ESU3ncAETie*Nb*9oypzjv)mWfp(6QG?jV24qogAj2=sqdp6A@hVBB2i?XE zpLp5<-2!`1a8z&Od6CM)@j2z8*7v#N``9p#~ zq@dWcu+u5si_fGwSl%af3k&Z90&qj~*elbhYut|>nXOwg>{((?TkDVv4?M$$eMY3) z%yovt?-u4NNNj$q&II}=F%svUHEN!&fbOOJgX2vTjMJRtDyDg**;u zFTRC1EOt=CR~0AgySA<3g41XA=$k7GNS&tJ#5ak}_ep@(e}3MRjd&d)*1|B?((As8 z!xf>V$fP%@_}go-K(#t7krR=poCZmiP@Fs1AcIBr?IHftULkA!n(z~cztudZ+R0#x zp$ho!mD!|%*2@We_T}#W$~WFrec-CxgRpLG!wQ0jHCJV9?z_ac5`%m?7PxMh&tW>g zL#f@>(Rk_^bAm8ZD%G^hFB}sOqDfDaD}~*KvMF0Ied;-wmY(SYbtjzA{1tF{&}uNr zC6b7Z-yPWp0|1v0$i-bIPa8bi18wZt1Ltn29JV$HI=uJghW)Xkk$00Wg`E-wl|x2& zhuDTyA^ti^->Y&?_$UGD7HNjmyiVCc3@D3tEES`lb6Lk&PP0yabz$LyLh(-y&C2`t z`MYoaSy7%qkSP)$2ogW2u^sKOy7zv=8sS9IQzbhJ{2Sbr@vJntnJ=boB>3r29=inD zDP|#5v{G<{7EuBTYQI&U`gsy>$}?10?xncXuVD= z%mtMBH&%tJZ)}c}ZQgkLjuYz{krT}9eHRnE8!oZUbCet{deDfaf!MJMpZSP_&okGG zB*R76$M0oaXXJ*n5Hni!JMpcfS-(jX>xrt1q2@C`#nC_jpJ2o|%cI7~`6$f1o*Co2 z73ee&(8!?VZCWEjm-5!>@d0J-6A=_fEnFyi{3kw^F~t&SDsjlWGhD^s7$+HOi~`(6 zrYolpHaHPbVGDx9nDK=yb_QjBw9D~}_0Ub)9qU%7joWB<2BLO;A9?)+wHkA+tI9E@ ztD=NT1%bqu5ipEERTJex%SZ9{n$sKn&y#Z|VZwc#J1a87ifGsUy0__=@CEr}wAUV% zm}=?cF)St*q{h4;WrZkM6J%Iq+CmHcN(L8zcMPBr@G#uuQ~jc6pDzvutlwKow&(s~ zZQ+Hx_;pq->{ZKrOpKfEG~k)*~$8Tz$_lz-u=hGPUm#^(E;`V$8$0)5$h&B}_ zs~Hi{P0$LvoLUzyb_%`&Tna3^H_)ixhc$cbIeq9z{0Dxl2EY^C+M>Jf;^={;PSAF3 zW+{Qw@Z^8J@}9e{TM`qVoZdxH5p@nMtO-p^G)4u~nSJ=)i<`caga^3juf%M{DgOVG zm;;d{k>XF*gMo)cVFAHx5ANb)!g)&IjOodx8>12dOV&zrpx=c(f#N`2Ph2%hc2S`Z1})bJDE&yLLi{r!x@LX|lgzEVW_e)exs{Plv6 z*0wGc_l`<*jc+);J$t`BWNYI4_4bBnVjmIf*8dOkbF|9}nMt@Nf!GVm_;u@A z(l}R@e=a{2#e^!+4Qr2zQ9#$6-ulV|=jeN3tyvxq2Q)SWN`VwDy!XCGBp{1BrKq{r zadWk1!x`XDgS}lF$v8%E!TFV7jmGI-M>Twz!xR&OlfqIfLR}#DD%kN)1y>Xpm8BG>*VZ~;fJ+pMsu9Vi&=Ot}ASB;^YEE^iz; z+}&N=Bn_;$NO+89>pMZgY2R9bujI+io_hK^pC3tgUO%l-Cs4c9>hhjVhYvr&sLC-_ zd{FHOP~}5)VdGhO=2|x9FZ>iK4FPY|@R*1IwVOnEpoKZCbNvm7ox=hIqO`p5l{GP8 zQ%7LMc4EbOX+AcTG#D9rRFw|e2Vx}3!22I%V&9&l6T?$VnAm(!v1@+x8js*!5Z>K= zrtrYNf2KUqD0LSC5Scasn*YdM;DJW=O9qPaiAELBW`U1 zaP5!uL~3R>vkI?~LX8*;VPFA%;Nt7XZmh5`|hodVSPZ2>_10iKZwLh90$uK-i75e^+Gz=Xq@w#$T zkF!}h*vBy;=Z$$)vE`l-!?Qv;e>U81NMr*7(b(*UYM;IG82&~KcL46`o18|4(10EP+_L}rXF?^^R`DBAo*`iPe>@upJyNdie@L6gxQ|G3QCj-J39~z~cl1k@kH&Ad+2XeNIYj9Ev3JaQOAt_-1n$ zP-RV_5JqCHv*6;GXxj9c(7}Vd-SyiDdj9<41`o(56?At{Y}lz4RO`#15zs1zr+o*H z03KX;_XnD27vN83%rxyp!59!!w1NN{PltM9>HvR<$h}RpKB`Dd3xKpZZy`&(&&689 zlOqu~wi+UUG|S=LC8HpmCiU5qzXatsQQLUI7$G?7`mqLidy-t2l`S{bQOzKD`uW#@ zknR9~Pyk6~C^8#Ss{O%6zo{3I`3k_2wdYRVUPscnxs4bzwm#-n(SLsDU#$OnO!&%j zZ+rxH8@3BTr+Kgjt)(Ep&0>bU+Y=cs}IFXde7#t)-kvjG0N%+rm3<2m?hor`E_ z0FyK8-Ho40O*s#^>WkD-dC0o?cymX%Zs#oqW%v2G<&90sZjKb3K96gsH-g=tq|Z|g zZc=S-tSkixex~(%^foLOT!GyQm-b^Ri9jnOs1d&8y?qR6k!ycZO}abNw?1aL%0Iq; z{pam}J?8&A=IHPDFQOnzW%CJ-M3_#Lb*08XR!swuZ8qcg*1rRD6Zm>-nYUd@z*kL1 zNRn*YAm@@Sv7F5&t3-f$N}m0_g+$wZLj?Rt0J_$yS;0T<2Xw}e){(x-GqHA9hYvga z4xA>1xe&~JH|D?J4M+=1xp-`I2X6=k7-ka zMv2e?A~uaC4#3c2A^!ttAdgoIergrat2am*#m;^=GRhiFL@f_$74wb~*-G-ax0ZAy|HIU!-P;2YcmuibjujEy1N_ zo%Zo(hcLe84BP|ht%bL@IElWV1N#!4%PNT4M+m>5|LFn6g=>F54y0GS2?37p{Yp>2 zc2dqj@Z+;DUvGU$5`6gyzLWeI2!6S@ISj)Nf}zb)yW^ZT(1gJGa{6#q#dAF8h5$7} zYx`yVh2Q7~E|6z{K>d^_qHiB0!;i+P6#<7J2|4KcS z#nEsHpflR-Bb%~kY9}G=%{~VTfG}~_<8>}D3qfFH$}<2^q@QZqs0jS?G|W1v%!75> zqG)O6P`C>F$FwIY05kH&Tw|Es8`wbZ0})`tNm>Hm6YZy!&y5bc z2T8CW6R!q|W@zE;h>yZf#Ye$;?$qv06CDG$Z)7vbC=acBS86N-SB=;elb+yz_Yn%@ zyU~}Fli7zFxUl*NmTIDhmIV}+1NA4?_2)x+;^-kXisTW0Iyt=o-o8B=Xe#-T;X@?b zZ`k@bJ7F&ofnV`I(0m{gvj2xUP2R4vJSH9FIA8JXdSk_(yo5Q2j+9zY3XUM3s5@Ja z7Cj1Cl1XZ!KR>l0ylSGzk;MsP9i+BzU*}{-8hOo*NOT6SDBk?qdJOyLGsgO%%v-NL z{GKtC$FD#p8~Yk=Q#5j$*P#YQ;Fhz)!}vCySxp(C_zT;@@N$~?b?JCYnZ2Wx``j|V zhB8^B_4JPC-AkM|m4bcY76({dQS31fQu6v<~C5$_j_Lan2ccc|r$A5hpa zT=(LRQeiBfR>M+5q9*~d1N=s=z?ch4VzV4(Bd9o_Us7Ru~DiUWHrZ&>^V10*TrL+Ut22Hh2 z#y+?d2jJI-=?74IsDrKAyXhM8rw^L%g@&pJNsk4=GOK}ABnaZTjGFyF?0t1qmfO~^ zA}FCqBaL(jNDD}ZbVzr1cS$PJ-QCSgNP~okbVzr1OE=sH#J#uR`Of+7KX=?Q&OeNI zyWjU&YxbOL{^ok1={^PZR*NCJU8m81Yv@lE-$RLFb+jQ^3JkFntMT<1be`hxk=VMZ z`4x)tgr>VM*1I)k8a&>2y>aLk3emv33zi%JSK?N>V!T@ukjT@UWI(tz0L>XBB5ANY zXDCQ~YQRj%3*o$9|ErC(@T!vyjJ_yzeL+M1%kFPJBte1co1P3#OiX-8U2_M*7UW}c zt{@Ep4%~RaFETTNep8A&hI^Y}B1T~Zr# zz^yiRfI<62(!wJGnLkgG2^q_65Abgu=;8VZAq>y|A!kxL*jjmsZ9F3GcL`u?0XrB04@`74%ST*5>O?)YFXa(;`i-% zX2h9m@3uYuVcd6?3sQ9p#(&ckLMU{C3`5Av{q^IMcGGD*e2yezfD5C18yw(3>D|in z_i@XeN8Fb2jg`lv3*$Xur0;;w?DqSM{!)>aS(zL;!t#C0Db;GBZv~3pW791f*a1e#ej)q{2Xr_&4;<-HpuD<5{V&Fvnrus00ZFQx z2GrHawhsX3M<&2h~&eaEEXuIkNrp5eua>- zx6mg5&|8=`!|K6jyLUSE|H*TJ_PzHR?0-PaZx-;Bd5PNhysB@K$wBQv2 z=<#pB{Bf~WvN1GZ(4^o0x_bNt+bn-653YOpyv(Vx6BA<`A5?yNQOOaSVQ4nO!RsvU53&@5oa`E_or zS1AC1n8!ICZq_*ZF(a}i>j~~d@De}|%N43|26Lg@p1=SY_t)X^&*t!N^l@xM0ArBd zjcEd*KJ0M#xIvt1z-(a=FoLJO%z1qqa0}Vh#Su*zu_iI47#mPX3e&0=3#zkzv|S{7 z&%poWHORoe=Fo`G-G;OL&i;SNI&o`2OQkO%p0WRlJO5TqIJB?CN;L5B=Dx+>sQs6V z{-s|3uUB{5&xKVx`?plyB1EsO9!O8o=b8geZOTu8z`#KX;k=)#`VF{DDx`fFeBcUW z{ijpbf3r}Kz%YhQFfG|M=m*q3*wN{(n@Ee_4S2H_7>L zlJnmr=Rc`N{`sB%-!r^I=Z|GEe=dtSJi#MsPc(p+Ms+tAcfZD}yI8Q?9z%7;s#q0lEFDjUxUWIKDs@EV~2SxY~gD z3pZ3$cDVAXmn@XkY8D*^g4^Fm=MqtibqFpq&bEDvwms(RFcZ&)Ig6%PY$W7L7$Uo` zHxA$!|BPw7eyW0-dOBA0AGgDi6a*%udBGalJTq6Vu@~i}(I5dLBS44LRq2R&O%3~u zk%-U6KTtEX+s8vF0=6Xe=3)bm6^IT2!9%na3+DB!3-?bW$2A4(er+a#Sv%A+ zY~lz~Xk^A1w+>ZyRn06iSY zN%cSWz6uqlza5)*%{YF8Jr(vs+m$ix?2MJy#X>&aVq-pFw#rFK8Q0>){mDWIfQ1ws z1GndwewI}`#K2*M3lsd!4YI$^D1OF0WCXc)r+dSOdkN-J*tuSVZC^i=2V^57`zO+| zV#|Nh0GT@7u*v5IaM{TGx_A5^GcA9pys6>MgFhcy1LIbo(Se&p8YE}FQ#OvDp^F2G7p=Gj9-^=-zZ%t%1h$wiN1gG9ysgsN-wB=R(IUFyS+{;)?DCn ztaNtZq1tS!qJZ5VLE)QT_XxenN`J#tgI=R?5t#hep*C2f&^X<&3BYPH!eKEjwl=Cg zCYsT(tpi>T16jgXe_2=7BwcXL!rOuVnUdN{L)6KoO`xWa?3;dFVO=Ma%U5eBLbKr* z7Xq9TIA}Q6hkhKK;2i?cAtdU*dj+>Mn9nC24N<2paTQyeiH#zR9bVOo^!STK+dWj6 z2#;z^FNe1#T!1_F%cTAE_{x9Y?ei_>ss;W#)z^MNFSkB~$^2+=%Nxy@RS)LrLEDa8 zPs6754MMr(3Jk_uSaqwpG5~L9zWu=b^hxpDCm5pUybPqLAY(=k>fd3%{iE6 z7bdpjqW2ngw(BW{dd8HAfGU1>tuK6dQ2H;(Of>8N z4Jo~$0hpS6;eULngzXu)<v*!jw56r1rhTNS~CGoDOeX4}$xkXQN zFn(DWWiFff$XZr7+Kzv{cE9D7RxguzcLOR5RuIsndC|GO0QDLtxPGjZ-{&&ZPo=FJS5#^puNf1Gm$cO6egQu2N-!iI% z)1H)h@D0=S@*q%$Pm(Kl`h+u>1$004hlkf%Jk-6YW%8WsFwI|cbSfA!rK{^Ws~t+$ z@$@v0lp;U;x+UpgkTYpN6z>xl+0@*e&N%t~TaU!~Cc8^&Ujb~+<&oIJHs7eMGwd8Z z_I+?Tj(cv;;HP5<_(bXYcQ!(V^uVcm*2jU5TN>9JYdJt+Ak?cHK(>tsP!NW6T{E# zirN-<$Z7ILv&8#M!S#vzY9TDN;ijUu98i}f7*%{{ka1Up43VOtG; z7T+Z?a)KCR9qjN0ivH1w<}8|cf3LYay94+FiL0ipc2sDPneU`Unj%yUoA}-C5<6Uh zgdm30Bdy;Elw%-%c?7Xvwk0hvC=7b_@rIx9{!PjM^2zK)jBQ90NU zKvQC;J_pu-Cx(Og#o_0RzhjRQ@{-u@cw9IrC8)zsXPT{ zR;wQg&4jsG27#3E%sC3OKR#*`+$eaVn5!Ye=?m`sSkcydP~K(K_ZV;NI@=<~83+%lZ$% zgWbd-KYX?A$yhqCmORfmRoSwWFcPs+e*fn0$N%$_nzWiQ6`SscVY%`|so>LGF;hh8 z3X{OD$;-$Sqb>t`5JC^mjcahn1!M>pga-CcS8?SOyilT^?dU1{54kZ_NLA07J1x$& zhlfYxe7IM;8}*E(fR^e~q0Vl`-f3$Dy_7jP;Q=iX0YEaGKA5R*$%ld^mSo8~Hv9AJ zl{eVuv*{9jHNe}>ZnNa)p2tbdB;Hd^*%2h^<|{kAVx&%@{9+InS0jjp>QFj@oClBg z=w@>rfbMHd1|;cK`EUdx!OF zrsgELO8j(NZhMGItrc?C%$B5XIqy!{4Y7uK1M_Ag8k4rnrdkI0IJ5T*D zZl0k|$27W-E8KPlaPPsK5_FA5!ds*rIdq#@O1en!d@OsZDJ4dD4f}gtT1m)82_r)_ z;V}TQc-_DM=CadfbgRoYefVXgvQPPQ=;!~!kbsvF_w-S<8OEgWjOasUI6f-ZjDu#h zSq>?pkVwqSz?4Ei3?}y6nA9()00(L624Cs|8nEyoWU1*OS|)S||Cj2t53A?V6#AW^-hg7bfnF<)LwoKwA@oLMC(pUV zZsiodBCfg$KShLaBj0$MDiuz?5lb8I#8uj|QrH^2I@9f#2Vedb?QWTt1kv}LT3SsB z2I7qOHzpBG46tE7o|cC1FS-2meD$hs)HO#+#dZF=M&-HHGAruVFg7XBLmNFKlMgI9 zqo^NU6OnNhl!9k_OV8IO!i5t@FuRpw$M!+im6j#5+JEK(2M}tNS-HPp1@-(=EjKhE zVTiu(!F_APkUSpsyiaP4)bXx;Ne|b(r79xbg15~cD#kWUj1Pma^g<8b>r5l1l{WaZ zFh!)O!5e`H)g^69#R6sqWYRAMgLHq)$pC8IiNBwHrSyMj`ik$bujn96$7u1Pz}~T< zV=&FkefOa=Ix=iq($;sjOO8Z3hLX+l8wIt_s>dI8Q)$4i{G~ZqkAyH?YeGhuoFOiAxy>2*vYk}W-TJKa-f1Wxf-%Km0bSFP_F69-{A4%I{0``@`x#} z%5V#LQU+pNCRdxybWRCXjZYChfTc3TO5%HkFIwB!aOEDHP z1SF-=p*Qy+rxA*@OBD`b=P@9A54f4KxVIaIGIjg)dxJtcPp zM8v7H1@`-?6~MCSi|%2r7cFm0%m{r>&Bs8(?B=FeptOYRK;cWa$QzrMZk zVwlD$wo^mPS__Fe_=A03uJO3gl>1lW2?gs>6h;co3>Elu2FK9?fzW4N*Y1Fli*s4U zm*@HXN~+gEDF;}s$YC*>@%C!4(BtWLk5UoQufiIx9T*ZKM0ZT^FU|fv5aSYohlJUy zM(jsB46PN9F@u`_ULfm=9EaIxhPw-t5LWym{enG{=knN|iSJXjOk#CBPP*e;=Sl@>75I;A+G5>cA41 z4~gz@_J$V;910iBT3;u5WT#`=ElZL;+*5`fQK>b5J~i(uu1^F(-r1j7D#z~AYC4Cx z=Ly&OFHh3FZ#?jJz5HmubLp4x8>fhL7^`Shk{V# z^_9cI69WLbEiAfPet&)YN>hKT0q`jp7cMftdyAroUD7ml=dDG_ZcEa6k@v5}t>?<~ z@5M{%+)lM!k=+`tCbm8yOdCuM1iC2W4}OKae?=oM51`y0kO}HH;p>;2Zg_)x0)fK* zNdh@QxG!^`oTlX6SGFhqOf>g6Z@!M}b5#|lgtkiEW@h1wptO$DPcPmC+t#pEgZdmW z@vf=(zynYiR7krFf*_)!RzC~y*EiZ3vg>&I4=Vqr7oIdI2t-vrz#LIko~`7&NHJt$ zLu%0xRsZuVJRX37NtmNYgWvx30c0bo?y>49Lfh{a<2DW52x0q*&r(=TL-;E<)f&%b zUGAk_o%8FZu88wXH}x z>^HWfzuPEzSP;)L*Qm^7ZEBE~md3xkM7}R}DW$(9I^@{fS4wcMGS|&M)G(h$QW!N; zV%dV?Mm@JR@*_8Me@EzngT9$~0qg~z?&Tai-McSGsI#l=aI($RlbWP7lbTyKC)kdr z4jD8kQ@MMNDCn0m1fwA^oVO+8#p_0A3p$Xc%gGzb9&wb`Uw^w-8Uk`S@g`fy_;^6e zo~a|CbYp>qMD_IE^z06|T`QajnEo-2WJC;IA6B(nS8?U*H@1_Lxf|&tZZ_%HIW-3) zL`o8hrd&K#+;#D+pAoj#Ov<)Hw`jfyx;88 z@(++r!1&Z|A-jC(yeCn~9#PQWh+Bj8IJ0)iCChZf>8(F#5@RR&+*?d#YksT$OgU#_ zy8@fknSVrIpxE|Fu`J)4ht{9hvPNBgIF-^nF1|7y3O($U)3V;!e-SRlo+0L>;jwus zF8ZTup>&4(Y-xBni&6!103!9(;$gw1N+hLn@7C1I{Q`FJi;)}}_s_RqPEcb>V}o}B<(msN1k)r(({+1E_$Cze53=9XW99%M(Q)oOTt+lAqY z<4UBfvL|mY3eOobEjhmf(YMh$liQT6;0W_D5Uwrs9~S9G z0?&WpRXZQimN^+=sBR#l&D7)+J26^JG{l8eb)DaUOX7mtR*tdMNKA1e=gFB;>J7ri zuc^MMZWHHoOq5W0X%YQQVyW1%G&`?}1oqRx+%8?N#m*3<+T)eM3zr2zd%b{a|y~-e9xtUOI_%UZB{3 zFTZDn|8*BYSgsgIguZEOKs4W{bM&a(B|T-m#5LuE%N&#@aVD*UcA=~j6}Ie9x5Hka z%q7exch)+P@{*qhUiF1XG|Or57tIsSc9nB(r(zM8<_y1;-K=b#9orqbh@A4l?Db&J zFbqzQcal0^xo`8?oeHj8PAtw~$kJD4^zw3}Pp9v5tz74t@~E|4BsNuvId!`J+z+Ii zxPd3`Y8V3fT0iZ!zjZ|f^FHAj&}o%4Rs6BI^Sb6Nrs&=YQ8|LKuOROAnwj8}i&E1` z-~8()&csQ}$I8ijv`_+;XfhJskChOx@j@^lA&7Vyf~aIg+g^v8uw*3{trRHOv*l=p z+II`}K#xItn{+fU>G!vFAW=*eb}x>bViJGX)%@x-sw|sxbPOY1Sk;@%l@eRL!!Ts1 z-Gw(%_U=9G6Tk6sT=xLsXS{k&@z}<>in)a4-weMr@V1eY$Q++I(0iIc?;$YfdiOGk zm~Pl!+%R$~Bh2qzIEw_oRUh3uJ_(8F9TbZ1A-xpL2s2wnu^G)bocP;IU?O-4k@t#K z*)kzHImi`|$!d|+R*1&GU&^d&vAkiom->+~ zg#d}yQhm$yCM+51UsLCfzu@O6Vi)+i5AY=`=vnNP4jO*bdakN$hrHv#_g><5+>27g zCAEzgEyW-|hgfTt`;dvN>E=^jqNhhE_nrp!Cd_=O2I9_)^T(E_x47qq3)z^yw(BUg zj?*%h^*Ic4<28+*ixJ+nZ4MIw@P6YkQ3&P&?lHkq+)TMnw#=p|(tOzk2w$nHYRN?d z@uvPtN6?~f-eZf&P|IyS>n<>3CZrhOpjNP)-m$kO7EZguh0mE5^d#Gh;% zVrU4lZhCAw6GOw}k|>-&-Of{NGh5DR8~%5Uwy-R*pdqXROn8pCS!#~^O^ zP-e`4eN4F)=(ST%F>XtXTA+fBiNwkdVH4%hZulp&U>aJ>u+Y$CUbN_@M5eYDxipB~ zrIj+m_EPmRHfz|&yT`miQ%;YsvqHE8;NWD~R)x>=QGZ|GT=N$7{u7T&FxP`tIH=!M z2{#V{;nTkMBUFxE24&@}cZ}6Y9{Xdi^8m9LAOL3d?Xp=IC0e}Q89;hSk34x2e$r7U zT9?-3r)=-Dq?^+;Cu`Y^K_9;G41)IyNkX*vQrBB;lpz*2;@k&x`CU>gwb;4pMjPyA zdYKU&-QxD4LxUAPDC_T3eIwH5v#B3nC|86VTiVxR=k4n4$@tqn`F(dlPzsZ&kXUkf z+JbBZqgw1^`POgXo~_~Z=Srv6_whTObN<;|Eo|YGm7T*g%wpPWYXhtMbKHoDvLf^g zWGzZrM}+C4wpzG3wgy!r{`fz$x$zhE^sSvlZa8QIwU21SBO{lZO>5Z>L8_0~z*+zH zPd`7I0oMR3=bm>NiS6k$sYs(&BTeYn zPT@Q0rUrLj57(ruy5}5H`XZYfzeSsxN#}_v~L6n(CU7>l(q!3z^ z+F4s0eTa_sy5t*)YtA0++aIw5c_ahL>csi4iD103d)pjLe-LY%H#W^a#=?B8?s0gG zZS1AmV#4Db*UTvvj{?ftPSuX0*Qe|QOHI;SbfIr}A=`U(VDV4c;f8?;R2nS5$}H>L z4ASb6Q%O1&8YDY|G2pW*G^s>cEGU!Vp;I0i&eqpn1OrV>v{Lf+8QWbqG~g5g<7pf` z)+zhDK=}Dd7}-lPk65&n+;ElJ6PvMsN=ER^VuOtamw0pCn#8!_%GXH{16|+YxZa_? zjiqo?^5{BW@dLFNBtDh0u&1c6z-asM0B6o`ZU#?1<38 z-i;%<;{KW*!mmd9`KKYzgs_l1(_&GiUW4$XkS#L4?dt_Gg4{uyB3- zp~ES%9r1pq5`B8uwd{ph&njPmCR=AXMB7-sXnVkY#AjDO#^t)Irh*W8V{Q{TObHh$ zla^?;^@(+}<9)$@lXp;v6|}+(i@x6ss5c*Vvlo7IBW4{CQNoa9>6kM z7-dTEBW-#(Sci_|X0|bbMC8Z#!+X3PW5`UX=rsPKKd&aa( z2?#V8sn6-W-0FJ+JU{L|yYh~Z@f=FaA9d!-!QYv(`uIKS%-wHH)s@QD#WQIRYYG=} zFnU*wvfJgdRZf53Y7(-5DUq}W0lh6w2IQzb&5~YD>-BK zjlMqC7gEtNv#m%N*FRR?k}~BEK6++ew%cHS?SkasfG#yk7mej@Tbh@52VvKO$X`x@_i z?WyIKpUcfTjt8h#JT7@Bq1_prDgDXb^V5FS#WRjom!tPqTP%~<9D&&)o+bJ{f{!LI zO#{@=&dHDYYd>AivI^SkrXXxpe6^biL_eQ!SuO1=$jMw|vj2Q=xk27;)c=Nxk;Hga z%Kmo3eh;h|l0a`HW}|A4oeflQX6=SvQ%q<1!%R~?VYKS$GVz!!DV?hJGnl?Sybt;VuTw(aeZhcku$ z%KqY;(ycGTpu=59P##Emb*iHOsbUQ7bR2M^)iuJ5z?7?1Y-4gp&T5wHGf?r0kcfbU znpA#eq|zc%Ci_d)W2YIB#u1jPI?;8k%@|NHdzUMEVV3Itsc1ipcC6e~ znIAQyvZ`-~P&0F(Q2El#2j%I{B;HI0H=;lwE}W40wriOo54z4)`u11i26i6OidB_O#L$igWn~Rin1TiwYtDC91(xorXpi|Cb>SO(9O~v+$?LQVV!n1p$eRNAK zf2m7=+r&b&2WAT61g$k4=_;W;<~*BsqR5c_L&k%r7+i>7P*Qz`Xg_gnKxgQMKjv$t zqEeAeN~OUBf5R}xcCHjsfkr|SigNzg1FK~t)7WuIl}`tS62`2lgIvP{EbXAqYS^sP zc{FA8y1algR)b-=bo%t%c}lXk$=hoxrGkco()xV|tgFj)c|uEFS-DOHWJ)%K)M(AA z2Z>Kc(99I_;5S~B+Gk?PdfQ;V%0tZTLbsW#G8$H@H99*uT-!M!jAwD=n|L!;QrU3A zl4akhf;2Ypg@}SS%T}*ilY&xaT36CB+?N5cI*kLiYu>M+Pd`Wt09m= zr-ZABHQzabiVr#lvtRUFj)ZN2uS|apv8regpETr!`JnODlb^c z6pXm74c?Y9SjY7Qs7+W-=SUAl3-Q4RE9zUCex%72wneznD%tZ@(oI@6MGf{P(X^(S>NQ>C<$i>n zb8hVXQgwXPgoM!!L+8?}fQ4>$g58DDTzb)wIe-?}zO5(*A{GP(de9QUNqWOcqprT} z5L&4eGHL7JYf0(-c*IY-iNPa?x&Bc4VnMKvZg_ zG&q9jDdNO14^mB@_%a@$AHyv6)15)ey6M#sLCMAJ%u2-?SC0zgm^Ama;$?A-Nby;*zEi4VI8PHAJcx>ZJoKV~D(c%~LT0t~ zj6PE&c#6_bynWW*<=Ec{srnLNCK!;A=(Cs79-MyjJ6Me#Ebpa1t_K~2qV|hd_4hiG zJvWY)z;faA6xn*)pYvG+Ti3=6eXU2fEk@)-X=`zEJb#nbFw6k-cEh1Q7Mn2F%CUTu zvD(CeFzCU;+bs2_4 zy$%Q3+-(!9@VPzkhb6;ye}RnPyX+z%iI-dV6KFw9=yYUrwABkg z6pStB>)QcKsqX|jDicNsC;0NEt2N(KxR!G6=XX$3ZahEN1U(r$8^>LIJLV0Oqp&Cr z-#;%WAhh4DJSeMd>@t&%L1Vir&^l-nUp<~8eSb3pQJl@Ya&%C;X7rTZ#nfeGw`F^2 z9!ig<4?{Nhlq=S!7csL=ZO1+Z+F#j66!T>zqpW1n#rCjQYmx32!w-;Yi$SNfjB#~s zadykwF9YjnqE3XIynGS^D&*2mo%;?6EM^%gJJ*%RKWVlE^IlQt`<_u;W1LfXnp!JQ zYw&gvU-aDyea4oH^?qJz*BMvDftWM4d=%SmJ0q6J_fOW2{nd>1?s%&H39lPG(HPAw zem>C65KO{a#KQRP*)Qzl->!rfs*!L>MftN~LF?&DU1;?0MKYR^DaSd6D^OXo{1}d8 z`#sE_D8)}#8lPbTjWKzkZDhvcC?9M6shV@dWy)`sbgV6hIGs|*XY+aF75C>p;xUyy?-3Co zHt5`>jCN^1E@|(xm6U@0t3UUPJXa>Sd86`!Jyr3ci_S$b@X_|8-nG850Ep4Ghz;V8 zE_o;S*zrc*9W|5DZ`*4f#9$fZ!seai)UKk9)HQZc2-saWAOVaQqSr^m5HD_G_1}tU zfV*6HYA+^Bb*ceBhW<&}2fcl#MCu z7K&dJ&kn7IS8lf3l}KTeH#D|Q@lBm$(7FuN*Ca+=#vWCH^m^uSFy8s1tjdX5 zr5A}?SQFUD=@6!3VXfn#yS~!uD`!|kUJs_ub*0w_Mqo8RFGw+yZ*YS!}Vrrz;!mV^kF7MII> z!noQTh0>t}@H5>SXS(Tn)dttyq&o=49zwXt61lZ9+ht+fSh75qJzJr;eS3@n-DXrP z#0R5_b2STcjJ0wpnT8aby>UUgAIs|5Mvg{n6DOk{cHaaSH5oUr&qUGI44-eOU_gll z(?~j2BXN!@s_z1FJkSxXnFUESeSY%E{G2wErUs$hd;xDGAWRD@92YqM{h$CSo_M00 z;av5uwdrgr@t1Sj>L2R94fJ&)C z%Bisp3$I)e{XC;9wa!U?GB)7ER&7nv@cnGm=R76F&T*-^+#wil1|wto9rpyI{cf#A z8RXFekMU?#=(ZlZbAOKs?*x;zr-BiWZ$kw+UO=ZUR+VO6hEyFqlzE95C0)#R7>@5| z9i6<-FB==9`-*KW+2`D)Kna5_MA-5Y#L&&q^0?Vaq&us5G7w4k*)L2JPb3F{awt#a z>*6~pa0{5XKAEvzUnSy34uhp?M!MBaCiVgXN@1z(v)Eb?)?v~^3eAUH0NM!&&@H8| zRS))$FGAUKq=*)*|Tq3X)A`8lVQRzDgc?4ceX zPM8q{5hon5jo4E);s9@U1w(Td98+NZL?@@nLYR9bL@3#Ga5-biUQ|0U|9zMJrLq}) zw7&INWa+DlSoaC=u#0V)XvDD3V(bNwl{R(ylbC;gD5a%)6) z%}^=X$w`Jy8oh6~8~l@{Whn#OXS;(BU=D=Rf#r|h8V%&QoFAKU;D7%WT8JRZ`mdmN(?_G98e~E{T_mdsc7_;m1z7E-TJU>5qe?q%%$xvJVV0o0`@d--wh3c6^Vgi(!Vv5gsqHGoL zp3^jxbN?Ju>hcw;0rpe(rVN~Urkgzdh!AdYh`_nimG8Dc!ktd!aJP^(?q7f*E-fEy zi?6-#x2XvSju+M51N(V@rn5jXfw$H^s+!1p1%r0uxayejkSmNerIWBkUB;L7QE#)7 zSw%m!o7o@gv`tuNy7)LA`!rWu*$xRoKDJ|El3~*G`Gg5&swYfwr#0OO`E!b~`7UM@ zTcp@$y_WaKNobuM@;d`RzLQPKW%@?CrTML|Z+Ev+ma10cLq-s%s&vr zTEkrRbshU_x!In~s(ffQqCY14i*5?8siP2~KsK$Re4Zh^%(WTN{dXLrXROD0MvYd+ zau+`j#Bg;W8g^p{bi^JZ-o|X!3jl}}kiK5u$Rq<($>hl65k}J8;E6?4&e-QEnx60W z6{JQr%+(#6KjFUwgXF0Mv_rz#{XAC1{lZ8f!bRk3{=nFNm(;F+Jag7{wRZZKHX$_lt&Rn>tuIfelaaWUy;Gr&| z!m_F9P=&|gDn1dngY$97i+-(W%y5(2y4qOAVY6ZbbP7vRlYxUGh^qTVMV7mP=uXZ{ z65Av1n6|z7xnyd|&bqi9M*C1X3}bFWe7zBvx}^w>qC4hoVc)xt0fA6jVinrd9sfPw zAkyDl7j^;EI@FcGQCAVg6w&vlW}ke|1Eto{Y&7}^64xvh5I3Vk<9hKJ;+9`$rK(q_ zzUO7$d*xYrN=X((w4iO2mtuzpv0KWYxNM1|woDwmD?Hk4WBpEc3iVw=@q6#KvmU!y z*}fOWEG$lHN!_x6h0tPQuo4!zzh|eWuW%3aKo31KqwaB1Ov{iRk6S05tCF72bt=K( z2le4Ay?|dZt@51=jqIPn_*0ySw$mivzK2%nZawj!Qbelpn~(7j&#%s^Uh)EglJr^8 z*~^XdV|bSHZN`oWH=gw{(`dW#4cqaZeSSOh^ob7EOn7A5UmZ zd5dm<+sHk!wk^CwJ=h^ih2A1u#N*@UnVc`bExs{ zEf7yM8#7$GJ)!-_J5jZH)*E&C->shoCAj1pew5|1T;6<2N&C>`iQSJ(mWL(g@)q^y z9X78nd%Hzy-*3{eq|rGjy!pIX&FE6i`*gW`zX@Yeb9i*uX63X~mdg1-;9AeN?3+?L z7V01Qd$*(7z5DH8BZjs4UkhsTkoQ&Ei{|YWn!5{nJ`uQZi7fY11)lc(Fo+F#oz8UR zeJ}tK8FSvzoa?M<9oC>#N3^Odd^7C>HwKnmP2_ECs}uMuv%IMVIP@TXhRyRpE|6*o z*M+kn4u4l|6Y}U`3j_#il+`En@U2dE3LbZ7r`yz&QiOcQrA=Aq5iDMid@l;%VIY<2 zzZ9rE5c`M?83peZt1Cs__@ldVxv+}GIZZe5+Ex$dlJ%N2^m7(d3Wk%7*x|l(Nc6p*e!b`Kdv?`5qM4zGB2hE$x*d*V|h_vZj6rTueF>f` z$TlDa&aUI(f8v)K6F9N}aEMq8+8xxG2^f^V$EfA8-_8(QmPH>-Y&To(&OXJ7Rx>li zmgM>AUO9`+8Wp{2?t}cG9X;N*hw&99%D(Yu8d^4#DuAzh25+b6*Ac3Z2PX}Nn2Nv3 z)Spt-yWo>pCw}~GL0y)g4k{tp`Fq#!brihqnQU|-+Xe@H4K#-&+HB&afxU8%RQ zGGXubc?wWYVZ!nERw-{ZG20KisMu|nxNrhZ<;=Qp7QV)(E<dPru4|FgdI9$ zMY&j2AAib=Du3|L8voi?je)=Q-RmE0t+CHLw+q;7Ezs7u_@P?0d?-#yVISDH1~Hl5 ze|VxQy0FHaPF=T~`TSQFLjkvLh){=^`s*M5E`MK9!DVVoQ>;#?8lQ@XUyj4O^EULB z^z?zCX^Xe>W!pZ!e}}5x6u<@`cq0K9?o8bS=>`WS>}N1N{M>jWN7?SV!y?P?I_1+EU5g9|t62`A|?CJSxc^2%fYmG`y=fc&M zTdFZKYr*l&8E>u=KQ&)~(z2Hb;6RY`155dmfIlj)iv;(rTUO^naDXm)f~C3Xv+&0@ z`D4;h*7T-u7ZCo}jOy+lb2&w9CDJKOkdDg;=uyP08uZQe)Hh?~%2ekJqZy6wZg0y= z*ey-_B(apzu=3QwYBbQAyDQY-mx9ITh9NjBMi^r-C5~$Hif9H_MQ38~oSOHoKHhlEFl+0GewD~v zK#B{`2O=25nenS>{fIg+9!nxD@9ay71Ct(PR2(L4P9~u66p`8fXaFM-6P_57I>8|Q zJ`fhWXY|M;r$jEdH1)H%If@Ri?Cq@p{eLA01ezV~4a3UnvlxZXXNI5HSEKg zO9k5$VfS>@CDyg`>Hr)~4<^TFKi6|GP4s1>p3#MJ(r*>ish=}Qp<%pCq=X!ocTr3^ zwzEH}1xJ_;gWPtBY)NCD^(0SV(6MWJ#Fz?qHx?@(Q;E>@1XA9%JX|<(FhoQm_OFnS zlCH8ZMS=N>uDD{`i%snHePkAKQZ?F!_wMK4g;g)#TH%x^x` zOWnk#k)8XsvQ%eeB`jz2`KKA9hQ%48-@BXHkab_@Yd(MRafem{2+tfJAPbB5KE#pq z`oX#-Yf|K##?q9Y#6^ikH3+$_!L;D+@}c zxE#s&THtT1q-96EzCD|U>jSYFwIwOPI48(Hl)^tg{p#YN`b8X8`KHSh2--XUBX*6R zy4TAU2Ns+n&(>(%Cs*dL`o#NDSi(C^-quD8mlFFI)eVG(bLCIh#M_|9Y#>9*rTv>sU%J5BDf9 zi~=|3YFuTtuYLaZqr9HGr5n}}L`n$Hm(nbS7LdMRn+Oz={VG;PSAwg0e+p_$Av-XG z;;kt+e0zetJ=J$qw`Zt8{?whgfw*7DdcJF2S(WOo8rSZamMa>-}Rja_Jpta1nqw(-X za64DYOfb9LvdCS344LDqTk}@%kXM|u=W1e+cbW3rX%TI3IDA9;)|r{Whu1$v^5YFK z-|iBo_iO(0Z%3$3X?Cd@DBtyR(gOSxj3BKbWN4o2{Hon+q1RHYl|!pu#UCa?f+DXItq7ODrp@ zCzGXMwgb-2kYQ?8ewL&EEJ8R@^(X@4+*5+tST{;>=T$NsjYr2vxKMtL^t(l08IOR6 z=HL6h`2@!a28DL~$>!0jvO43KmS*xr=%Wsbe$_gQ;Di0Rr`1fsz;x?}Iv}l?aS3R7 z=??x8)>wkiyylyq@~AcvymHle3O(=00ZO45}eHN?N)@8a8YsH_ctB=iDQD zp6BS7_rrVrw0^kPnsbi$kC=0e8S8PFy$!v9HtcJvSUJaS| zg;;8X#1aRChH+kSrk;3pH^jjx{-MZIhjl5g7$%*;Bw&aADXfnQ@7^=$=jaWiM@5*R zTJ4QPB~HdOD%A!jWKSl_WLYeui@nY#G8$l3-3SUknw(j?YFmpb8)$H zSKEG2eHk$C;*oO1K?Ou~Ic0IPd|Dsek#{s5`82scR2#F@TU^EKYdS&-hRn_-_dijp}UjE2LDr8igb|fwH zscCHYuBBRjW5zrT-JR>wo7h+m*kQ=~jfm9P?fGjzzI8%1eSiyM3LFCUC0Q5+D@ECIXeaRmhQ+!0;Uk%@kj>kvVzCtNR)Gw_)gd4%EDxmxb@G`8qqDO(*e#kDYt{T-@sz32a1It!Ys&=MC}lKJ9KJb9PyOQ#^;x?E zJB|W6{*TVU2y#ij+T!!^0Bt|#2is={n8IUwkG=8DJ+s_Pgxo!u>#73q(FQTOB$XfR;QdX>0F&B5SgZ1gJkCsqYh>BQt@jN_8%yWcBXI%x(q!`FE zTcojs--*BI9SD(J0v`f(3l5rD8a!8@15K%T93|2={gvmglk`4{t=d_6zw<~p{`CDm z!5?dXumPK9aOchb;+ny2dIsmn=`^KslqS=!_TXi>P_B09OKgE3)s+@JwDp088h@p~ zUk6nIreDSR?!g&}y{wj2_C25S82rLYdZlq5AK9B24^`*OO=k>$ShICwc9AiAFTqx zOPae%eEZ{~KYq)c99`C1 zifMA^{aVfQH|WAhMhLwu+RV#N__Aj%@7i^NGsH*L)jK!n8i96mCZEK1VeI8rZnE!Q znB2y@?%a(AlqFwGgf9_6~5Ju!Jt`|jO@ zAV>g`dD_COOQe$D5a}-`EgyMbL*-Fi!R5_zd`;#N`#*0e&+XtV?*ZU);aDiX>NnlAo=b zV7808A&2ZZM1a~joW*1YmG|BmSf!LMcEB~D>l2vj`5LCOvREdzvvv${>!r|X1J3R$ zP_yB8sMLuRe+$oTWrrFp_u2ZPqYZ7<FjPI|Fat(h^nO=6}?z%c{mAz#fsoDqro=Ezgg&7(|8I z7BCp3q`1@d`J^7g%t_AwJ(0lbH;wWOl-(B}bXA-C4h^jjyd!I6<1(ZF_-U!#6Ek&* zePlSxZG5%lxXh`Yo?0oVl;X%DrDv>02{zE6Y9)Ez-oVn2Z5$lRGq*n^0y>0uU)nQ{ zNyJmw95A~#_pLN4Eyw*J`_P)`H$4= z1k`d~v2{7M6b5&lm4l1!Bfe8F>5_2zKcjI_6I@uBbOW2E=;GlROv^vAFLq5{MQ zi#S9Vx)yJB(~joG6A%w0j%GcyZfu#A=cA8|SzvG7R4o8Dq)mIXh;yhJYR-&_@9?F_ z1{Emu$6~sJCVF@ir($T(FXGE9l|K?=2!;vjhnEr;o@m0-cf`r1fL-+6LkNr*UzHB99*dP#L8+HyaBG!(T(DKq7 z2!657i0W8oExCakDiK@<+KmA(#%!4-O*Q0JO6UBQi9f3e%C@`CTg02^C>F_%o(EcY zbxCSAxPn}D4KSJ!=Bn_Ti*4*p5E*opqq82Q(66r_77o$N+Ht&lEH3XZM37xPZ;rCR zYN7FEk2>kbj=Ne;QR8Pl2Q!JQiJASY&N;Z-{pnpw--{K{DM-CxY4^j@bp|23aY19_ z6kozyupw5EX^N^~`I-Btr2+|Q8(Fk@!^rIyJXj5__oQKNk%?gAudQE%c2Grjfb#xC zB7;Bc{lEF^o}R`4)^lewjV5EBX{9wM*DzS?Da7Jn(|HDM8YUDUg;ouBI^f?EM}61J zX3jH|r`yw`)Zn1!JOtFJg;bSShhseNybbhHcz@o0;4?4PQ>>c+LW@=>I&gQgC63P- z^57G+ia|2_v?xOC33u_mM0(XR?_~I72U2*&`k6bTJC^bog3HPh2U=2W31&wz^k!># z5>H}=!Ur|$gUd$I)$(=TouJ*L*tlx3&mZ`i)fb;|?K~JK?y9 zW88b5679fy>Xq&rACzLz-ljlSKH|d}+wdiyl!|+~0C0 z#j;N6*>Cj4==TDR-0ZxqjV*vIT%VuwhNe>4R+53fx6a5!y2t3e>IWReD4+xz4masv z#kX0SK)K}hV;Fr6!Q#$IBs)%>t$%+??oj|=?dihmN?#7hG;ICjGbg5#*;gJ;TE;3V zYit?qF$fYb(!Ytq<7?`*lz)6ZJ@$7u{rG04ZhVGwxY1c}{cuJk!wsp2y~{JCc^&Gv z_Zj)_$$dzZ$Boe@F>moC<;DA2YSj0abCF&R)TOR+Pngg; z!Q0U_Dp;Bag70rAS0wlDS9R18%exigOsHnR;BVZ!yi3-%{K4}EEVuvlJ{Gou#Jmrl z>;>RSdH8teDO>ky3-|i!DK0~Cg`ecAM^81&GD9MJj-k<71Vd-Nqrltv50iAZ1;V1T zi%UY^8ZBDj%*4zaU_vg+jwjF@Lp2=xsmau9#q~YHey3KL*BzCmI-aibyx$T2y`oeu8C31^Hhc)le@5K=U- z=RlW|pGn>|nFP~btP9ERJj>*RFw9~b28dm{#M34za$nhCE{hBc;TqR@`7@{l31R`} zN~>EEkXFhq7^A3$``=zM4lzxO$CZyr=K?M98t|>Qz8kX48IL}s6TE`16S|{fH} zgHPAAR&}0U`S`V-`KX%$e~Z^qrdNB4Z{fr1=zA*)9dhQ>I{EttUv|mThaRu<8rR>v zc~Q0ADxDwI)?iE=1b5lPgv|`S7eVV>&SJn}+Rm#{WA_+kiF0;6dJ~2=T-bR^>t%z) zA%Yo~Oni5;5MmcCl}=xN4WAx!0F^=pp`U)Ja=tb~@li`R>$uY^#$o(cUfy`t3vr0C zm7@sAeJYiU#aZWtTl`pgo?M4zY)0pE)=@V)y`jbUXBjP{>6@KNa=C> zR!2;3bIgV=JejO|aG+j9UYP;4Lh2~!^g4?$T@Z@mP&Raeh@rG@c^z%DaY3P|?lY75 zVM~ghq5a;bld`VyaUHj|nxt2R$39}aMv26RPE@wTgj2>W=Ij(t0VXEs@B~^v>`^O_ z(w`x}!U^;jWU{@@IqXP-j}qyC=0x)OtlUAc;o&44+)%$qZ#=Y8^)63>RHDvR<=mJf=<@k7Y-GWmgoH3OZ%KlNT4Xfx6geT;t5mpbwKI|{ zNwaVE731#hU_h>O*dr{xN74$po6(0>{$BF!AB`{*wQBjpwayolaCeWo`X0-`=^D3b z_}_1^6kvbY)f_P$%CERtx3n-c;W!~*@Wq4Uyk4_!}^2u zWf%?o%bUu%dXy*d8pHrzA7Hv-_w*R$VL%?>xy6%&gepxAAQXo zrf#Nlpg=$N@lJ0Ii|XQQUz84LTaz}i2}FysU4^H|aCj)nz5}(;7~WzOJ&BiAV@{Dc z<&z_m%}AilI}Un(A}n*GgKP%Yo!1+G%p1Lhmv#3-W8!BB?17^bw4$}WWgxds$0W}; z;q6|o`aNAabB~AHjp!RM1IY-OzZtBT>OgyH6#}W}#wy0a^59?hdlx@R3`MQz96r7Z z7hT5=9J%j#-eoykw}D%7wy|ETiHMVhM&0uj+~tIehD0uR6L_6UJ-3ZBsyYUGG%>5k zd?-~W$*{g%STFQth1W3f%n_oarixe@cZdSxbC#$~KIg^miEK=5k>ApO`9TIEk8PEl zXY=Xom9UqNbhY<>zvd}@Re|;KmN5Dx=fRe?z?W-p!AQtlRFWT`B8Pw9of-o#Td0hT zsBH+nH2mp+ylen_#@+(ajA~Pk&6DFrGI<@B`_J#jsCZohui^yDC{J#aRhi&Oa5im( zQIDp&G$LA$%ZBPrV#+tK*bJ)^7$q^u3BYa5T;@Mjd`jPBF?^V%mcIgVIa@B&(ohemxKeiYB!Q@*W134Mp)cZbPo5epsiVG!wn)34E zy_{G~!GUfbl}VlT)>y>ak4%PSf(i&H9}Nhfxu|yeUM&w_hDR2m$)zG)3)2u{eV8R$ z%ISxs$YS%3IMS+yzwr3C@#%#Qdfy2_Beu#f7+Ob04VuE=w9nWK_ zM7_eq6#1w&H>;bXvMHUVvaY3xVUDUP&MODSnH8#^r@aeO8pqWGZL6STmls#xi{N8k z3T#S|ya!sT>e`%F>S^l8=%M73uHuMro0G_;mJ?;T(Azu7?{XhIW~%;Z#PSCJO_#EQ zKrB2s(d);K0JYG9igmv4h%Z!%9LK9+eluJkLH6n?YdU&Q4`CX*`!$Ai=#nVBE>N`e zW2MzIzODHV(Loj8OW=8A2}-G{INu6CB-X&~lvFJLH7$;GSoifp*R~Y(3acmd8YScr z1n8+l}8*`5fhJUIqq&r1z&ZwL7}&q zi`ldTdAIHews|oDNPT!(l2dGab5<=@av5XlGs*quLq(rIlngm)M6x23=BeY^=tuvXq56#spdD4BYn$C7E_sz+Tns%M)@44Dwgef7q-n3HqcwwfDQjQ4f zi+EIb$76fG3876U%2dB9d48QGdI=VJXx-t6clcsEhma5&;{Z2P= z&`Hf9-B=Y2QY_!<5mPo_)jXw@^rX4y;Q>-3CxPRa3tdz1H*mKZMd^bs-2MTg>W19G zm22J**ix9NHzDU=6VRKh!z^@kUe8RN_uU?9tt(|}30Uyz*?8YJ8f9f*IdH1Qjn}`K zEI5HjmtKprL4UNKS|DC?1fh1}TzK3^34VeREW?KOp$cylVdXGlr2G(-B{i7fju?mJ zX{z*~*f?a{`pA+G3(iPmD%PtBT3zwGUKu)_n1;*Fw)EQHol}6Pm^%hm#c#qIUTeLW z_wICFi80xz>)qVu)KyFWPj9MB=+nlyJNpDAAYSv`AabVEaojVqg=?K;k-hN0ue&ZGY{MbImq(L6=_b$S z{<*s%HJ>qig;gRORt_>WZ9381PHepnmwyF^aT9eI3@+%dOeCv2ca%`CWfXfm?x4fE z>d`5_;Ag`t47~Uyax%SDpzz^40iCB%z}!}a`6&@Y{r1{yKzJLoope?2QT0K?z3Y|J zYT3tsYSF5~w{ng^}7T0pL%?5{5bc2|K~S|eLpj0I_{Hp&a4{92s+ZN|qVtzKRN zf4XUP75~=4BkjBwu;)D>cVC0?q{HSOn7$`PAJqg#pE zTP~LVpxC{p#PTjb=n0?WRy37fZ`cv447s0?`ic5eXg1MX3=aJ-Bp3RGkh&Q6#tNVP zd({$pec1%RxKhQ>*#_N5m-rshep_IeB!L3#bo7NbUFxQ@AQri~mN0}9lR;Ci30((| zxT6}?EZ0Sto~A?&tW7QFDWredBG(M=OvS8-w5qpIenSNIE(_bG)M5hDa4$b#bo2}_ z=PI$FCQQxTz^LQ&dR*XA0?f}&T65N$z#?1gZm&w5b5&W(mX{5v{iQX_qPJwB?PQtb z;mAr(SZ-6kOs7s6ncUjOt3@eW!8ZZ~db@jW){E~MeMN+yDMx=#;TqClsWh_C+XU?F z>JeAqq)|(z%e4gP5O>vo9Bv@=& zq)PPdUt)AYl4D$1olnltn{2!Xa=qy*5>mYiS4&J#v67+va{UUp_b&6T9!ar|iQ81) zNiRkPd9d#a1Kr$%joy(;=hL)FWXItcml<_S*y$r*5lhZTrG;w*SLvLCw#P*cfP0?9 zu>r59xzA%+Pp502$fSfjl zOAoR92T2!3-%OBv9FGT=YMIPg8@yzPQsV;DA&Gf6?C4jjH27Dw3}O2(%J^Dy2Kh?f zTN3MNL>KRvh^~Yxa5nF^-P1c5f~9zJ5UJw^Hfvp@%jOLKUb6mTjnL)Gth+3h3lJ`k zRS7d)%{KA1r6(UJh+H5Ez6|EY@1KhdZ%T2tL}0qy+Q&G826Q|@K=LLhi8hjOrNyj} zA-TsgL@0ngJ|4@&tKc1UJ+IL1theH=slH5s3)EOadoN^}T3FA~<|v)(S-M~(25aez zNJ^+rLV&o2K}**hRV!i#r-=M*iT0_d!`@N6POdV}jy8M_`D?Y>^)GBEn?v`e=8ixh z)9u|3bO+zXq2&=blJ1?Q*ROCo{qQ*_(coyyL4U2~%LBP`5~ILmsf|P-+}1&KX(+FE zbddiIRH4fh-aMPmN%ZO78&Bn*Uv~5d0y5y#%LGK|Yo^r0Mb<{^IEqC1k#e;VyIayH zug)D$?Xu)du^-ojn;MD^zk}5H(@A(Xe82^4#nzIey4Z*yeJy!fXpGULdQJ#zR5=1| z3UC$HIw@}M5>MS?P=Zchp}<5X62v!+onEv8R}(BQsS391TW)Ai<6yR?qvW^&ftI_Y zB5b9=Mo-1i;fY;NiEYo2sP$4;sNQ)0V4>8*bPE&usUl8idBWV|&`bC+pyrl!>zVzm zink8Z^ih*oUhewYwqstR-vWd`V@tGDp(=MB!qG=lgXIOhaf_vepwS&Dj?#sn{dIp) zfHGJ)ox`A%-e242Xs#*5t+%j>pEg{L&GBq)&rN*x*h{;Yqq=a$XEtbnoA#+zbjxLH zJ^1Q_Y-(xGhHE%@f3%4RsvYl4Xt@r)9?nfyMEnuu24aD?dkt1x!z?=1D7GzKnG%M_wDp^^)_OgG&633X|%sTZ*@2LviHh($He zOxo(&9#186@R;3HT4I_K9A|Y)Ay0XZR6wuA>!MG%W>mZ7$&Yj`fCmg+BvY>b@Ys9a z3l%l{Zl?@LlQE(!`&HUUlGlVT&U(A9SM#mp2b*@zt*8g2p9$V{YI!2ecLJAhKDBfZ zgO4u`vx(U3&=OgHj9XX=HRr9v zJDu?zraC>@XtJDyCu`NO(0PY#KtkpDapnx^+sYmFI?UC%D0KKrsp_iChITP)yiLQh zWCl39swzqsf}Jz3!%%n zdM1UD^ZET%jo&8_{t{#oz+?MQe$!$Moce}<*qVt069=2&Tw{NX-q<8uWbW!l0PpJR zU8Q65+%`%^o1H*Z0pe<1N76;UG3~_!HhE7V6+|u&of%`@EH?8R;PWaz8E-`?8fD!2 zD{x)iG67zNGC7BRxF2vyAcea?sIQ`} z$5xhIrAW$xPpX?+X%3!rX}f0=|Gj*wa zoKoz>|02|P50cmthR@nW198h&EUP4~ACovFnoUDD521*?9;e3y@XmBgUu%%_Tod|L zJsFty;Qu;5b2`mW;?Le)t#ZplK({3ry1yE52@JhfA$rUsbOVz*QiwJlB9*)Hx;@@% z5Y0j#f22})TukL|ul((5wJu+IPQt+5?NyH|x<5QJ?uPT;h6_GNk_pH7K9Fz7#K{aS zyJI*Txhg;ekK9Fe$}8MxYKhXJ3o%mmm_$psNQAHX5vjY(`<^E<7RJdFHZUfaz(C)H zD&O6r1lO7fAsE;qh&XUVke5_d+dM1NRK*UM_D#|mqOF@OfmiM)Ng>GzVvn|=cHqd^ zyPLAHyECssP3Leu+|8PTbOO(C>+#AVcQ8**PBtwC51$-=o~Y#7JyDw-f!|n)Xj}7x zJ>h#dYURNp;5_@dGk;g!bhHi$|Dw?m%W4>jDmSnsjNep$xRX^|K-J-q=DFSo~@5Ie#4W#X&n@L>NM5BxPO@Nj@K(4#}>)6?TmMSNz~`KW|++Qd>b|Hdqi9~?~T&Og+IafV9I=&cj{KNvAAQXa(@j-f~rACTI#PeK! z;@l293Nw|Rc)IcmZKgxGJf2wQKAN~CTN(b!Pm&xTjXVA}fgNV7p1rhY?r0GmW86ky z>eaobbnLudt9TY{J4firX82M!wu%bHMQFJM=0oZ$WvMoIgYooajno4*7zU`d?fn?* zro6KaoA<08TcvzOoPy~dd{q%Gc@pesxK=@DETn!;cBA&fReea;l+{U8A_Fbgr$HOs95lrC>o0wRCqj+|Keg>&7jDbYpD|Cj|CqVsh9Fald)QNseIK z60^$SaX-uZvKI*?L~*%A&fGt>TE`dJhB)sj-XW#XW4z1l_VuUCko#)Df;DF5%jbH$ zt7vbXKPe?dv9-~_vs>> ze91We-7lx53MSGd2voXS{wN&v!PkpLzr&_)Ko3Sj9WWejIt^rMEun}Y4y ziz_NE0BYcJOs+DxDa#wexV3byQcx78FC&3WSJPoLPHgR(1Pigttm)b3XDrx* zKfToR?{fZ~aZ$Jl&H{qY>M;V^zH&Ka3_Kmcy=jEzaUzf@BrzqNwhWO5vQ&ipc-_+23Bs{?NclQ2du)n;_^9P|pe8*Kbf`CLZ@g?uv7$uP=dQ`s9&(n4MMr0YHDsYBI^BLUVDg3M8+4nwkcBCM?4y3^V^e-sh#;{>7={qtYAfeAhcd79M=N$z%~WPiOY=7jvda% z_V1eBkT12J69?G|7rOBiFvXX!JuGxy{XvWWD_NY7$T7oPRdSHTLWDMpV-N3L_LOAS4gAqL zg5FR=tN--+&!&x+jPTl5Y`F;^2mI-lL||TEwcpy>fa#K7F1Oa;mD$$X<~?>nd?;(P zs_KPM?K?sEDNlf`xhRk|f27VX^%x5oZKpNJj}Vp(sqIyJI{75a{?MaU*A{oYKPbf` zX{74cseB2%b;P6DS9IYZBIAbW5Vu2PdD=xq%-@mt;8bso9)^pY)W>X>u2!L%}cY=7l`PT9m zY(U4doK1bL39mpIJ~TO*8@7CtrQC~hRWz`;VruUgakf`)SAqoaS4d5berBfr3k$<^ z0C8TCEtPnzjPev;5F0QmQmlK|rC~wHa~6Kl4ALOYVI}Le`O#$pYRUdeu`9C^qoLNz zS^4VaB@`AF##%D5)zno|A);(}dSzr9`1jIKRC#|@V}UDk-ymeYE=y-YKoTnyqAfB# zrCqd)B8;a{T!u|xycn0)?HWC>9)8oZ->|;nKc-v9-ZFUnpyo(>!_SuI4yZ-MqT}S| zN$+&xQE@%f;pu|zT&V~5os83iK`!?-^%~pd=zRl2HAz+CLt~l9v)XN&M6@iC$eewd zU#8gLIbLZPJRn38doEPP!a+QO6|aO7uFp299nVss0yTASeHW#cm_PKn>0-{~Z<2KZ z1pktaS+Oes?Wog)(z6(D*q1~UwTYArzSVQ!t4m_JS%~ubzsu}}FN+lb>e$N*7n4ge zLpTp1T~s+E^G>o&E9_kOY7OkwSMyd4#DspCVOG(=cRPe!$%T@z5VE35w+ISdu*k94 zOEvku)dj<56H8|J(UqF`tH@onj z8F1b4zaZq_euL+9ZVsm6@Na@?Z%H%{T!zkdhD`R3DTqF~&K+j9#%D!aRw5fT5Io-Z z%j5-iyampEFT*@4gjChXYk+&V@cDr2t&KT3e@3QTKa9)fiCU*=_U+PbdlwcX{PMx=`dpgU3WJeCIV|xcG_YM~i==6?Yh}NseUL!@h6pKegMQ7S+J(c6?J2xLkm;l5QZ zS-9m+s5s(lc&~~x@wP!O&pq zRf~e4ajF_Yi=2A)mcSh3N@22X=u_oimKduvKrQKH*GPquOn^=Ovh=-KjzZ%@?1$IU zc0@hSgwW8?EYt`g%CzC*eZZd4(NTNj-meh(E6c$Tsb)?OA95~Sk$F6S4UH0IVau!L z-$=uS-+=ipTXYu3>HYFDzcGni3lOgNR0{jQ`GGB9KP9Xn?AdR=ev6SLn)(wawd=2Y z7GD-{fFeQJt|rC(o3A4SY5uU!$XEiuuKDLm1a<|$c7ZO9@Hbz7i3*&B*hfN1{0-0q z#uussPdUw}~7`(fmflaxTOqpMcrl5&*ydWxV)G z?cj5>?3xz#_oIEkK!{)~VD`v~LZ5!!%ltC_2Q@%(>J;05ISTwbM~Mnx_Alj=KL1KI z`!^oV(*cU3OS5nR4*snm{{OD_5)^#yKjl2L-{l0iz0r|Mn285IPkg==Ruz@PBk)q9M_th$%ke*vI&jT7GRUTp{EQ)kl%La>+-Ry>q0oUqzDp`JMsDljMgg~4A+i~;0JeK^BAeISQsq4RZVV@MXcWu8sK zzqd3`p}%rUzlQ|basZ4C%KCc#-s{)G#DVvnv`VFZZ~jTEKuSzyGVI!`-;^vTB*g_| z)trk0{}rNjXJFMEkTmJYMcw<7z!3*}X6xA|((>f<{|#URUnO>B)^Xb%4B?=kBUKng zaG0qvRHn4m}?4VyCJyQ{^X<5sznx~94Bmzl#i}n=bT#*B;~b-nXf7VQ{f=+{JT;Pc=iVTn(}Umi1{Gcb{?vn#u-{08o{Mns<8yB? zfiL+tnNKYO=mV)n`>lVBt^XswO1uSTk0hS}Y=8aM2e8gFY8VV(4<7zAk^f06$<=|` zKc|vk{!ML0xe26_UquxC=IgiU0bllAD3$F`fcM{g((63wD). - # The default value is larger than the expected actual vocab size to allow - # for differences between tokenizer versions used in preprocessing. There is - # no harm in using a value greater than the actual vocab size, but using a - # value less than the actual vocab size will result in an error. - self.vocab_size = 12000 - - # Number of threads for image preprocessing. Should be a multiple of 2. - self.num_preprocess_threads = 4 - - # Batch size. - self.batch_size = 32 - - # File containing an Inception v3 checkpoint to initialize the variables - # of the Inception model. Must be provided when starting training for the - # first time. - self.inception_checkpoint_file = None - - # Dimensions of Inception v3 input images. - self.image_height = 299 - self.image_width = 299 - - # Scale used to initialize model variables. - self.initializer_scale = 0.08 - - # LSTM input and output dimensionality, respectively. - self.embedding_size = 512 - self.num_lstm_units = 512 - - # If < 1.0, the dropout keep probability applied to LSTM variables. - self.lstm_dropout_keep_prob = 0.7 - - -class TrainingConfig(object): - """Wrapper class for training hyperparameters.""" - - def __init__(self): - """Sets the default training hyperparameters.""" - # Number of examples per epoch of training data. - self.num_examples_per_epoch = 586363 - - # Optimizer for training the model. - self.optimizer = "SGD" - - # Learning rate for the initial phase of training. - self.initial_learning_rate = 2.0 - self.learning_rate_decay_factor = 0.5 - self.num_epochs_per_decay = 8.0 - - # Learning rate when fine tuning the Inception v3 parameters. - self.train_inception_learning_rate = 0.0005 - - # If not None, clip gradients to this value. - self.clip_gradients = 5.0 - - # How many model checkpoints to keep. - self.max_checkpoints_to_keep = 5 diff --git a/research/im2txt/im2txt/data/build_mscoco_data.py b/research/im2txt/im2txt/data/build_mscoco_data.py deleted file mode 100644 index 2c3e9d977..000000000 --- a/research/im2txt/im2txt/data/build_mscoco_data.py +++ /dev/null @@ -1,483 +0,0 @@ -# Copyright 2016 The TensorFlow Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""Converts MSCOCO data to TFRecord file format with SequenceExample protos. - -The MSCOCO images are expected to reside in JPEG files located in the following -directory structure: - - train_image_dir/COCO_train2014_000000000151.jpg - train_image_dir/COCO_train2014_000000000260.jpg - ... - -and - - val_image_dir/COCO_val2014_000000000042.jpg - val_image_dir/COCO_val2014_000000000073.jpg - ... - -The MSCOCO annotations JSON files are expected to reside in train_captions_file -and val_captions_file respectively. - -This script converts the combined MSCOCO data into sharded data files consisting -of 256, 4 and 8 TFRecord files, respectively: - - output_dir/train-00000-of-00256 - output_dir/train-00001-of-00256 - ... - output_dir/train-00255-of-00256 - -and - - output_dir/val-00000-of-00004 - ... - output_dir/val-00003-of-00004 - -and - - output_dir/test-00000-of-00008 - ... - output_dir/test-00007-of-00008 - -Each TFRecord file contains ~2300 records. Each record within the TFRecord file -is a serialized SequenceExample proto consisting of precisely one image-caption -pair. Note that each image has multiple captions (usually 5) and therefore each -image is replicated multiple times in the TFRecord files. - -The SequenceExample proto contains the following fields: - - context: - image/image_id: integer MSCOCO image identifier - image/data: string containing JPEG encoded image in RGB colorspace - - feature_lists: - image/caption: list of strings containing the (tokenized) caption words - image/caption_ids: list of integer ids corresponding to the caption words - -The captions are tokenized using the NLTK (http://www.nltk.org/) word tokenizer. -The vocabulary of word identifiers is constructed from the sorted list (by -descending frequency) of word tokens in the training set. Only tokens appearing -at least 4 times are considered; all other words get the "unknown" word id. - -NOTE: This script will consume around 100GB of disk space because each image -in the MSCOCO dataset is replicated ~5 times (once per caption) in the output. -This is done for two reasons: - 1. In order to better shuffle the training data. - 2. It makes it easier to perform asynchronous preprocessing of each image in - TensorFlow. - -Running this script using 16 threads may take around 1 hour on a HP Z420. -""" - -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -from collections import Counter -from collections import namedtuple -from datetime import datetime -import json -import os.path -import random -import sys -import threading - - - -import nltk.tokenize -import numpy as np -from six.moves import xrange -import tensorflow as tf - -tf.flags.DEFINE_string("train_image_dir", "/tmp/train2014/", - "Training image directory.") -tf.flags.DEFINE_string("val_image_dir", "/tmp/val2014", - "Validation image directory.") - -tf.flags.DEFINE_string("train_captions_file", "/tmp/captions_train2014.json", - "Training captions JSON file.") -tf.flags.DEFINE_string("val_captions_file", "/tmp/captions_val2014.json", - "Validation captions JSON file.") - -tf.flags.DEFINE_string("output_dir", "/tmp/", "Output data directory.") - -tf.flags.DEFINE_integer("train_shards", 256, - "Number of shards in training TFRecord files.") -tf.flags.DEFINE_integer("val_shards", 4, - "Number of shards in validation TFRecord files.") -tf.flags.DEFINE_integer("test_shards", 8, - "Number of shards in testing TFRecord files.") - -tf.flags.DEFINE_string("start_word", "", - "Special word added to the beginning of each sentence.") -tf.flags.DEFINE_string("end_word", "", - "Special word added to the end of each sentence.") -tf.flags.DEFINE_string("unknown_word", "", - "Special word meaning 'unknown'.") -tf.flags.DEFINE_integer("min_word_count", 4, - "The minimum number of occurrences of each word in the " - "training set for inclusion in the vocabulary.") -tf.flags.DEFINE_string("word_counts_output_file", "/tmp/word_counts.txt", - "Output vocabulary file of word counts.") - -tf.flags.DEFINE_integer("num_threads", 8, - "Number of threads to preprocess the images.") - -FLAGS = tf.flags.FLAGS - -ImageMetadata = namedtuple("ImageMetadata", - ["image_id", "filename", "captions"]) - - -class Vocabulary(object): - """Simple vocabulary wrapper.""" - - def __init__(self, vocab, unk_id): - """Initializes the vocabulary. - - Args: - vocab: A dictionary of word to word_id. - unk_id: Id of the special 'unknown' word. - """ - self._vocab = vocab - self._unk_id = unk_id - - def word_to_id(self, word): - """Returns the integer id of a word string.""" - if word in self._vocab: - return self._vocab[word] - else: - return self._unk_id - - -class ImageDecoder(object): - """Helper class for decoding images in TensorFlow.""" - - def __init__(self): - # Create a single TensorFlow Session for all image decoding calls. - self._sess = tf.Session() - - # TensorFlow ops for JPEG decoding. - self._encoded_jpeg = tf.placeholder(dtype=tf.string) - self._decode_jpeg = tf.image.decode_jpeg(self._encoded_jpeg, channels=3) - - def decode_jpeg(self, encoded_jpeg): - image = self._sess.run(self._decode_jpeg, - feed_dict={self._encoded_jpeg: encoded_jpeg}) - assert len(image.shape) == 3 - assert image.shape[2] == 3 - return image - - -def _int64_feature(value): - """Wrapper for inserting an int64 Feature into a SequenceExample proto.""" - return tf.train.Feature(int64_list=tf.train.Int64List(value=[value])) - - -def _bytes_feature(value): - """Wrapper for inserting a bytes Feature into a SequenceExample proto.""" - return tf.train.Feature(bytes_list=tf.train.BytesList(value=[str(value)])) - - -def _int64_feature_list(values): - """Wrapper for inserting an int64 FeatureList into a SequenceExample proto.""" - return tf.train.FeatureList(feature=[_int64_feature(v) for v in values]) - - -def _bytes_feature_list(values): - """Wrapper for inserting a bytes FeatureList into a SequenceExample proto.""" - return tf.train.FeatureList(feature=[_bytes_feature(v) for v in values]) - - -def _to_sequence_example(image, decoder, vocab): - """Builds a SequenceExample proto for an image-caption pair. - - Args: - image: An ImageMetadata object. - decoder: An ImageDecoder object. - vocab: A Vocabulary object. - - Returns: - A SequenceExample proto. - """ - with tf.gfile.FastGFile(image.filename, "r") as f: - encoded_image = f.read() - - try: - decoder.decode_jpeg(encoded_image) - except (tf.errors.InvalidArgumentError, AssertionError): - print("Skipping file with invalid JPEG data: %s" % image.filename) - return - - context = tf.train.Features(feature={ - "image/image_id": _int64_feature(image.image_id), - "image/data": _bytes_feature(encoded_image), - }) - - assert len(image.captions) == 1 - caption = image.captions[0] - caption_ids = [vocab.word_to_id(word) for word in caption] - feature_lists = tf.train.FeatureLists(feature_list={ - "image/caption": _bytes_feature_list(caption), - "image/caption_ids": _int64_feature_list(caption_ids) - }) - sequence_example = tf.train.SequenceExample( - context=context, feature_lists=feature_lists) - - return sequence_example - - -def _process_image_files(thread_index, ranges, name, images, decoder, vocab, - num_shards): - """Processes and saves a subset of images as TFRecord files in one thread. - - Args: - thread_index: Integer thread identifier within [0, len(ranges)]. - ranges: A list of pairs of integers specifying the ranges of the dataset to - process in parallel. - name: Unique identifier specifying the dataset. - images: List of ImageMetadata. - decoder: An ImageDecoder object. - vocab: A Vocabulary object. - num_shards: Integer number of shards for the output files. - """ - # Each thread produces N shards where N = num_shards / num_threads. For - # instance, if num_shards = 128, and num_threads = 2, then the first thread - # would produce shards [0, 64). - num_threads = len(ranges) - assert not num_shards % num_threads - num_shards_per_batch = int(num_shards / num_threads) - - shard_ranges = np.linspace(ranges[thread_index][0], ranges[thread_index][1], - num_shards_per_batch + 1).astype(int) - num_images_in_thread = ranges[thread_index][1] - ranges[thread_index][0] - - counter = 0 - for s in xrange(num_shards_per_batch): - # Generate a sharded version of the file name, e.g. 'train-00002-of-00010' - shard = thread_index * num_shards_per_batch + s - output_filename = "%s-%.5d-of-%.5d" % (name, shard, num_shards) - output_file = os.path.join(FLAGS.output_dir, output_filename) - writer = tf.python_io.TFRecordWriter(output_file) - - shard_counter = 0 - images_in_shard = np.arange(shard_ranges[s], shard_ranges[s + 1], dtype=int) - for i in images_in_shard: - image = images[i] - - sequence_example = _to_sequence_example(image, decoder, vocab) - if sequence_example is not None: - writer.write(sequence_example.SerializeToString()) - shard_counter += 1 - counter += 1 - - if not counter % 1000: - print("%s [thread %d]: Processed %d of %d items in thread batch." % - (datetime.now(), thread_index, counter, num_images_in_thread)) - sys.stdout.flush() - - writer.close() - print("%s [thread %d]: Wrote %d image-caption pairs to %s" % - (datetime.now(), thread_index, shard_counter, output_file)) - sys.stdout.flush() - shard_counter = 0 - print("%s [thread %d]: Wrote %d image-caption pairs to %d shards." % - (datetime.now(), thread_index, counter, num_shards_per_batch)) - sys.stdout.flush() - - -def _process_dataset(name, images, vocab, num_shards): - """Processes a complete data set and saves it as a TFRecord. - - Args: - name: Unique identifier specifying the dataset. - images: List of ImageMetadata. - vocab: A Vocabulary object. - num_shards: Integer number of shards for the output files. - """ - # Break up each image into a separate entity for each caption. - images = [ImageMetadata(image.image_id, image.filename, [caption]) - for image in images for caption in image.captions] - - # Shuffle the ordering of images. Make the randomization repeatable. - random.seed(12345) - random.shuffle(images) - - # Break the images into num_threads batches. Batch i is defined as - # images[ranges[i][0]:ranges[i][1]]. - num_threads = min(num_shards, FLAGS.num_threads) - spacing = np.linspace(0, len(images), num_threads + 1).astype(np.int) - ranges = [] - threads = [] - for i in xrange(len(spacing) - 1): - ranges.append([spacing[i], spacing[i + 1]]) - - # Create a mechanism for monitoring when all threads are finished. - coord = tf.train.Coordinator() - - # Create a utility for decoding JPEG images to run sanity checks. - decoder = ImageDecoder() - - # Launch a thread for each batch. - print("Launching %d threads for spacings: %s" % (num_threads, ranges)) - for thread_index in xrange(len(ranges)): - args = (thread_index, ranges, name, images, decoder, vocab, num_shards) - t = threading.Thread(target=_process_image_files, args=args) - t.start() - threads.append(t) - - # Wait for all the threads to terminate. - coord.join(threads) - print("%s: Finished processing all %d image-caption pairs in data set '%s'." % - (datetime.now(), len(images), name)) - - -def _create_vocab(captions): - """Creates the vocabulary of word to word_id. - - The vocabulary is saved to disk in a text file of word counts. The id of each - word in the file is its corresponding 0-based line number. - - Args: - captions: A list of lists of strings. - - Returns: - A Vocabulary object. - """ - print("Creating vocabulary.") - counter = Counter() - for c in captions: - counter.update(c) - print("Total words:", len(counter)) - - # Filter uncommon words and sort by descending count. - word_counts = [x for x in counter.items() if x[1] >= FLAGS.min_word_count] - word_counts.sort(key=lambda x: x[1], reverse=True) - print("Words in vocabulary:", len(word_counts)) - - # Write out the word counts file. - with tf.gfile.FastGFile(FLAGS.word_counts_output_file, "w") as f: - f.write("\n".join(["%s %d" % (w, c) for w, c in word_counts])) - print("Wrote vocabulary file:", FLAGS.word_counts_output_file) - - # Create the vocabulary dictionary. - reverse_vocab = [x[0] for x in word_counts] - unk_id = len(reverse_vocab) - vocab_dict = dict([(x, y) for (y, x) in enumerate(reverse_vocab)]) - vocab = Vocabulary(vocab_dict, unk_id) - - return vocab - - -def _process_caption(caption): - """Processes a caption string into a list of tonenized words. - - Args: - caption: A string caption. - - Returns: - A list of strings; the tokenized caption. - """ - tokenized_caption = [FLAGS.start_word] - tokenized_caption.extend(nltk.tokenize.word_tokenize(caption.lower())) - tokenized_caption.append(FLAGS.end_word) - return tokenized_caption - - -def _load_and_process_metadata(captions_file, image_dir): - """Loads image metadata from a JSON file and processes the captions. - - Args: - captions_file: JSON file containing caption annotations. - image_dir: Directory containing the image files. - - Returns: - A list of ImageMetadata. - """ - with tf.gfile.FastGFile(captions_file, "r") as f: - caption_data = json.load(f) - - # Extract the filenames. - id_to_filename = [(x["id"], x["file_name"]) for x in caption_data["images"]] - - # Extract the captions. Each image_id is associated with multiple captions. - id_to_captions = {} - for annotation in caption_data["annotations"]: - image_id = annotation["image_id"] - caption = annotation["caption"] - id_to_captions.setdefault(image_id, []) - id_to_captions[image_id].append(caption) - - assert len(id_to_filename) == len(id_to_captions) - assert set([x[0] for x in id_to_filename]) == set(id_to_captions.keys()) - print("Loaded caption metadata for %d images from %s" % - (len(id_to_filename), captions_file)) - - # Process the captions and combine the data into a list of ImageMetadata. - print("Processing captions.") - image_metadata = [] - num_captions = 0 - for image_id, base_filename in id_to_filename: - filename = os.path.join(image_dir, base_filename) - captions = [_process_caption(c) for c in id_to_captions[image_id]] - image_metadata.append(ImageMetadata(image_id, filename, captions)) - num_captions += len(captions) - print("Finished processing %d captions for %d images in %s" % - (num_captions, len(id_to_filename), captions_file)) - - return image_metadata - - -def main(unused_argv): - def _is_valid_num_shards(num_shards): - """Returns True if num_shards is compatible with FLAGS.num_threads.""" - return num_shards < FLAGS.num_threads or not num_shards % FLAGS.num_threads - - assert _is_valid_num_shards(FLAGS.train_shards), ( - "Please make the FLAGS.num_threads commensurate with FLAGS.train_shards") - assert _is_valid_num_shards(FLAGS.val_shards), ( - "Please make the FLAGS.num_threads commensurate with FLAGS.val_shards") - assert _is_valid_num_shards(FLAGS.test_shards), ( - "Please make the FLAGS.num_threads commensurate with FLAGS.test_shards") - - if not tf.gfile.IsDirectory(FLAGS.output_dir): - tf.gfile.MakeDirs(FLAGS.output_dir) - - # Load image metadata from caption files. - mscoco_train_dataset = _load_and_process_metadata(FLAGS.train_captions_file, - FLAGS.train_image_dir) - mscoco_val_dataset = _load_and_process_metadata(FLAGS.val_captions_file, - FLAGS.val_image_dir) - - # Redistribute the MSCOCO data as follows: - # train_dataset = 100% of mscoco_train_dataset + 85% of mscoco_val_dataset. - # val_dataset = 5% of mscoco_val_dataset (for validation during training). - # test_dataset = 10% of mscoco_val_dataset (for final evaluation). - train_cutoff = int(0.85 * len(mscoco_val_dataset)) - val_cutoff = int(0.90 * len(mscoco_val_dataset)) - train_dataset = mscoco_train_dataset + mscoco_val_dataset[0:train_cutoff] - val_dataset = mscoco_val_dataset[train_cutoff:val_cutoff] - test_dataset = mscoco_val_dataset[val_cutoff:] - - # Create vocabulary from the training captions. - train_captions = [c for image in train_dataset for c in image.captions] - vocab = _create_vocab(train_captions) - - _process_dataset("train", train_dataset, vocab, FLAGS.train_shards) - _process_dataset("val", val_dataset, vocab, FLAGS.val_shards) - _process_dataset("test", test_dataset, vocab, FLAGS.test_shards) - - -if __name__ == "__main__": - tf.app.run() diff --git a/research/im2txt/im2txt/data/download_and_preprocess_mscoco.sh b/research/im2txt/im2txt/data/download_and_preprocess_mscoco.sh deleted file mode 100755 index ab3ff28d5..000000000 --- a/research/im2txt/im2txt/data/download_and_preprocess_mscoco.sh +++ /dev/null @@ -1,90 +0,0 @@ -#!/bin/bash -# Copyright 2016 The TensorFlow Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -# Script to download and preprocess the MSCOCO data set. -# -# The outputs of this script are sharded TFRecord files containing serialized -# SequenceExample protocol buffers. See build_mscoco_data.py for details of how -# the SequenceExample protocol buffers are constructed. -# -# usage: -# ./download_and_preprocess_mscoco.sh -set -e - -if [ -z "$1" ]; then - echo "usage download_and_preproces_mscoco.sh [data dir]" - exit -fi - -if [ "$(uname)" == "Darwin" ]; then - UNZIP="tar -xf" -else - UNZIP="unzip -nq" -fi - -# Create the output directories. -OUTPUT_DIR="${1%/}" -SCRATCH_DIR="${OUTPUT_DIR}/raw-data" -mkdir -p "${OUTPUT_DIR}" -mkdir -p "${SCRATCH_DIR}" -CURRENT_DIR=$(pwd) -WORK_DIR="$0.runfiles/im2txt/im2txt" - -# Helper function to download and unpack a .zip file. -function download_and_unzip() { - local BASE_URL=${1} - local FILENAME=${2} - - if [ ! -f ${FILENAME} ]; then - echo "Downloading ${FILENAME} to $(pwd)" - wget -nd -c "${BASE_URL}/${FILENAME}" - else - echo "Skipping download of ${FILENAME}" - fi - echo "Unzipping ${FILENAME}" - ${UNZIP} ${FILENAME} -} - -cd ${SCRATCH_DIR} - -# Download the images. -BASE_IMAGE_URL="http://msvocds.blob.core.windows.net/coco2014" - -TRAIN_IMAGE_FILE="train2014.zip" -download_and_unzip ${BASE_IMAGE_URL} ${TRAIN_IMAGE_FILE} -TRAIN_IMAGE_DIR="${SCRATCH_DIR}/train2014" - -VAL_IMAGE_FILE="val2014.zip" -download_and_unzip ${BASE_IMAGE_URL} ${VAL_IMAGE_FILE} -VAL_IMAGE_DIR="${SCRATCH_DIR}/val2014" - -# Download the captions. -BASE_CAPTIONS_URL="http://msvocds.blob.core.windows.net/annotations-1-0-3" -CAPTIONS_FILE="captions_train-val2014.zip" -download_and_unzip ${BASE_CAPTIONS_URL} ${CAPTIONS_FILE} -TRAIN_CAPTIONS_FILE="${SCRATCH_DIR}/annotations/captions_train2014.json" -VAL_CAPTIONS_FILE="${SCRATCH_DIR}/annotations/captions_val2014.json" - -# Build TFRecords of the image data. -cd "${CURRENT_DIR}" -BUILD_SCRIPT="${WORK_DIR}/build_mscoco_data" -"${BUILD_SCRIPT}" \ - --train_image_dir="${TRAIN_IMAGE_DIR}" \ - --val_image_dir="${VAL_IMAGE_DIR}" \ - --train_captions_file="${TRAIN_CAPTIONS_FILE}" \ - --val_captions_file="${VAL_CAPTIONS_FILE}" \ - --output_dir="${OUTPUT_DIR}" \ - --word_counts_output_file="${OUTPUT_DIR}/word_counts.txt" \ diff --git a/research/im2txt/im2txt/evaluate.py b/research/im2txt/im2txt/evaluate.py deleted file mode 100644 index 0c81a59da..000000000 --- a/research/im2txt/im2txt/evaluate.py +++ /dev/null @@ -1,198 +0,0 @@ -# Copyright 2016 The TensorFlow Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -"""Evaluate the model. - -This script should be run concurrently with training so that summaries show up -in TensorBoard. -""" - -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import math -import os.path -import time - - -import numpy as np -import tensorflow as tf - -from im2txt import configuration -from im2txt import show_and_tell_model - -FLAGS = tf.flags.FLAGS - -tf.flags.DEFINE_string("input_file_pattern", "", - "File pattern of sharded TFRecord input files.") -tf.flags.DEFINE_string("checkpoint_dir", "", - "Directory containing model checkpoints.") -tf.flags.DEFINE_string("eval_dir", "", "Directory to write event logs.") - -tf.flags.DEFINE_integer("eval_interval_secs", 600, - "Interval between evaluation runs.") -tf.flags.DEFINE_integer("num_eval_examples", 10132, - "Number of examples for evaluation.") - -tf.flags.DEFINE_integer("min_global_step", 5000, - "Minimum global step to run evaluation.") - -tf.logging.set_verbosity(tf.logging.INFO) - - -def evaluate_model(sess, model, global_step, summary_writer, summary_op): - """Computes perplexity-per-word over the evaluation dataset. - - Summaries and perplexity-per-word are written out to the eval directory. - - Args: - sess: Session object. - model: Instance of ShowAndTellModel; the model to evaluate. - global_step: Integer; global step of the model checkpoint. - summary_writer: Instance of FileWriter. - summary_op: Op for generating model summaries. - """ - # Log model summaries on a single batch. - summary_str = sess.run(summary_op) - summary_writer.add_summary(summary_str, global_step) - - # Compute perplexity over the entire dataset. - num_eval_batches = int( - math.ceil(FLAGS.num_eval_examples / model.config.batch_size)) - - start_time = time.time() - sum_losses = 0. - sum_weights = 0. - for i in range(num_eval_batches): - cross_entropy_losses, weights = sess.run([ - model.target_cross_entropy_losses, - model.target_cross_entropy_loss_weights - ]) - sum_losses += np.sum(cross_entropy_losses * weights) - sum_weights += np.sum(weights) - if not i % 100: - tf.logging.info("Computed losses for %d of %d batches.", i + 1, - num_eval_batches) - eval_time = time.time() - start_time - - perplexity = math.exp(sum_losses / sum_weights) - tf.logging.info("Perplexity = %f (%.2g sec)", perplexity, eval_time) - - # Log perplexity to the FileWriter. - summary = tf.Summary() - value = summary.value.add() - value.simple_value = perplexity - value.tag = "Perplexity" - summary_writer.add_summary(summary, global_step) - - # Write the Events file to the eval directory. - summary_writer.flush() - tf.logging.info("Finished processing evaluation at global step %d.", - global_step) - - -def run_once(model, saver, summary_writer, summary_op): - """Evaluates the latest model checkpoint. - - Args: - model: Instance of ShowAndTellModel; the model to evaluate. - saver: Instance of tf.train.Saver for restoring model Variables. - summary_writer: Instance of FileWriter. - summary_op: Op for generating model summaries. - """ - model_path = tf.train.latest_checkpoint(FLAGS.checkpoint_dir) - if not model_path: - tf.logging.info("Skipping evaluation. No checkpoint found in: %s", - FLAGS.checkpoint_dir) - return - - with tf.Session() as sess: - # Load model from checkpoint. - tf.logging.info("Loading model from checkpoint: %s", model_path) - saver.restore(sess, model_path) - global_step = tf.train.global_step(sess, model.global_step.name) - tf.logging.info("Successfully loaded %s at global step = %d.", - os.path.basename(model_path), global_step) - if global_step < FLAGS.min_global_step: - tf.logging.info("Skipping evaluation. Global step = %d < %d", global_step, - FLAGS.min_global_step) - return - - # Start the queue runners. - coord = tf.train.Coordinator() - threads = tf.train.start_queue_runners(coord=coord) - - # Run evaluation on the latest checkpoint. - try: - evaluate_model( - sess=sess, - model=model, - global_step=global_step, - summary_writer=summary_writer, - summary_op=summary_op) - except Exception as e: # pylint: disable=broad-except - tf.logging.error("Evaluation failed.") - coord.request_stop(e) - - coord.request_stop() - coord.join(threads, stop_grace_period_secs=10) - - -def run(): - """Runs evaluation in a loop, and logs summaries to TensorBoard.""" - # Create the evaluation directory if it doesn't exist. - eval_dir = FLAGS.eval_dir - if not tf.gfile.IsDirectory(eval_dir): - tf.logging.info("Creating eval directory: %s", eval_dir) - tf.gfile.MakeDirs(eval_dir) - - g = tf.Graph() - with g.as_default(): - # Build the model for evaluation. - model_config = configuration.ModelConfig() - model_config.input_file_pattern = FLAGS.input_file_pattern - model = show_and_tell_model.ShowAndTellModel(model_config, mode="eval") - model.build() - - # Create the Saver to restore model Variables. - saver = tf.train.Saver() - - # Create the summary operation and the summary writer. - summary_op = tf.summary.merge_all() - summary_writer = tf.summary.FileWriter(eval_dir) - - g.finalize() - - # Run a new evaluation run every eval_interval_secs. - while True: - start = time.time() - tf.logging.info("Starting evaluation at " + time.strftime( - "%Y-%m-%d-%H:%M:%S", time.localtime())) - run_once(model, saver, summary_writer, summary_op) - time_to_next_eval = start + FLAGS.eval_interval_secs - time.time() - if time_to_next_eval > 0: - time.sleep(time_to_next_eval) - - -def main(unused_argv): - assert FLAGS.input_file_pattern, "--input_file_pattern is required" - assert FLAGS.checkpoint_dir, "--checkpoint_dir is required" - assert FLAGS.eval_dir, "--eval_dir is required" - run() - - -if __name__ == "__main__": - tf.app.run() diff --git a/research/im2txt/im2txt/inference_utils/BUILD b/research/im2txt/im2txt/inference_utils/BUILD deleted file mode 100644 index 82a15fd3c..000000000 --- a/research/im2txt/im2txt/inference_utils/BUILD +++ /dev/null @@ -1,31 +0,0 @@ -package(default_visibility = ["//im2txt:internal"]) - -licenses(["notice"]) # Apache 2.0 - -exports_files(["LICENSE"]) - -py_library( - name = "inference_wrapper_base", - srcs = ["inference_wrapper_base.py"], - srcs_version = "PY2AND3", -) - -py_library( - name = "vocabulary", - srcs = ["vocabulary.py"], - srcs_version = "PY2AND3", -) - -py_library( - name = "caption_generator", - srcs = ["caption_generator.py"], - srcs_version = "PY2AND3", -) - -py_test( - name = "caption_generator_test", - srcs = ["caption_generator_test.py"], - deps = [ - ":caption_generator", - ], -) diff --git a/research/im2txt/im2txt/inference_utils/caption_generator.py b/research/im2txt/im2txt/inference_utils/caption_generator.py deleted file mode 100644 index f158d3d23..000000000 --- a/research/im2txt/im2txt/inference_utils/caption_generator.py +++ /dev/null @@ -1,213 +0,0 @@ -# Copyright 2016 The TensorFlow Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""Class for generating captions from an image-to-text model.""" - -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import heapq -import math - - -import numpy as np - - -class Caption(object): - """Represents a complete or partial caption.""" - - def __init__(self, sentence, state, logprob, score, metadata=None): - """Initializes the Caption. - - Args: - sentence: List of word ids in the caption. - state: Model state after generating the previous word. - logprob: Log-probability of the caption. - score: Score of the caption. - metadata: Optional metadata associated with the partial sentence. If not - None, a list of strings with the same length as 'sentence'. - """ - self.sentence = sentence - self.state = state - self.logprob = logprob - self.score = score - self.metadata = metadata - - def __cmp__(self, other): - """Compares Captions by score.""" - assert isinstance(other, Caption) - if self.score == other.score: - return 0 - elif self.score < other.score: - return -1 - else: - return 1 - - # For Python 3 compatibility (__cmp__ is deprecated). - def __lt__(self, other): - assert isinstance(other, Caption) - return self.score < other.score - - # Also for Python 3 compatibility. - def __eq__(self, other): - assert isinstance(other, Caption) - return self.score == other.score - - -class TopN(object): - """Maintains the top n elements of an incrementally provided set.""" - - def __init__(self, n): - self._n = n - self._data = [] - - def size(self): - assert self._data is not None - return len(self._data) - - def push(self, x): - """Pushes a new element.""" - assert self._data is not None - if len(self._data) < self._n: - heapq.heappush(self._data, x) - else: - heapq.heappushpop(self._data, x) - - def extract(self, sort=False): - """Extracts all elements from the TopN. This is a destructive operation. - - The only method that can be called immediately after extract() is reset(). - - Args: - sort: Whether to return the elements in descending sorted order. - - Returns: - A list of data; the top n elements provided to the set. - """ - assert self._data is not None - data = self._data - self._data = None - if sort: - data.sort(reverse=True) - return data - - def reset(self): - """Returns the TopN to an empty state.""" - self._data = [] - - -class CaptionGenerator(object): - """Class to generate captions from an image-to-text model.""" - - def __init__(self, - model, - vocab, - beam_size=3, - max_caption_length=20, - length_normalization_factor=0.0): - """Initializes the generator. - - Args: - model: Object encapsulating a trained image-to-text model. Must have - methods feed_image() and inference_step(). For example, an instance of - InferenceWrapperBase. - vocab: A Vocabulary object. - beam_size: Beam size to use when generating captions. - max_caption_length: The maximum caption length before stopping the search. - length_normalization_factor: If != 0, a number x such that captions are - scored by logprob/length^x, rather than logprob. This changes the - relative scores of captions depending on their lengths. For example, if - x > 0 then longer captions will be favored. - """ - self.vocab = vocab - self.model = model - - self.beam_size = beam_size - self.max_caption_length = max_caption_length - self.length_normalization_factor = length_normalization_factor - - def beam_search(self, sess, encoded_image): - """Runs beam search caption generation on a single image. - - Args: - sess: TensorFlow Session object. - encoded_image: An encoded image string. - - Returns: - A list of Caption sorted by descending score. - """ - # Feed in the image to get the initial state. - initial_state = self.model.feed_image(sess, encoded_image) - - initial_beam = Caption( - sentence=[self.vocab.start_id], - state=initial_state[0], - logprob=0.0, - score=0.0, - metadata=[""]) - partial_captions = TopN(self.beam_size) - partial_captions.push(initial_beam) - complete_captions = TopN(self.beam_size) - - # Run beam search. - for _ in range(self.max_caption_length - 1): - partial_captions_list = partial_captions.extract() - partial_captions.reset() - input_feed = np.array([c.sentence[-1] for c in partial_captions_list]) - state_feed = np.array([c.state for c in partial_captions_list]) - - softmax, new_states, metadata = self.model.inference_step(sess, - input_feed, - state_feed) - - for i, partial_caption in enumerate(partial_captions_list): - word_probabilities = softmax[i] - state = new_states[i] - # For this partial caption, get the beam_size most probable next words. - # Sort the indexes with numpy, select the last self.beam_size - # (3 by default) (ie, the most likely) and then reverse the sorted - # indexes with [::-1] to sort them from higher to lower. - most_likely_words = np.argsort(word_probabilities)[:-self.beam_size][::-1] - - for w in most_likely_words: - p = word_probabilities[w] - if p < 1e-12: - continue # Avoid log(0). - sentence = partial_caption.sentence + [w] - logprob = partial_caption.logprob + math.log(p) - score = logprob - if metadata: - metadata_list = partial_caption.metadata + [metadata[i]] - else: - metadata_list = None - if w == self.vocab.end_id: - if self.length_normalization_factor > 0: - score /= len(sentence)**self.length_normalization_factor - beam = Caption(sentence, state, logprob, score, metadata_list) - complete_captions.push(beam) - else: - beam = Caption(sentence, state, logprob, score, metadata_list) - partial_captions.push(beam) - if partial_captions.size() == 0: - # We have run out of partial candidates; happens when beam_size = 1. - break - - # If we have no complete captions then fall back to the partial captions. - # But never output a mixture of complete and partial captions because a - # partial caption could have a higher score than all the complete captions. - if not complete_captions.size(): - complete_captions = partial_captions - - return complete_captions.extract(sort=True) diff --git a/research/im2txt/im2txt/inference_utils/caption_generator_test.py b/research/im2txt/im2txt/inference_utils/caption_generator_test.py deleted file mode 100644 index bbd069313..000000000 --- a/research/im2txt/im2txt/inference_utils/caption_generator_test.py +++ /dev/null @@ -1,178 +0,0 @@ -# Copyright 2016 The TensorFlow Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""Unit tests for CaptionGenerator.""" - -import math - - - -import numpy as np -import tensorflow as tf - -from im2txt.inference_utils import caption_generator - - -class FakeVocab(object): - """Fake Vocabulary for testing purposes.""" - - def __init__(self): - self.start_id = 0 # Word id denoting sentence start. - self.end_id = 1 # Word id denoting sentence end. - - -class FakeModel(object): - """Fake model for testing purposes.""" - - def __init__(self): - # Number of words in the vocab. - self._vocab_size = 12 - - # Dimensionality of the nominal model state. - self._state_size = 1 - - # Map of previous word to the probability distribution of the next word. - self._probabilities = { - 0: {1: 0.1, - 2: 0.2, - 3: 0.3, - 4: 0.4}, - 2: {5: 0.1, - 6: 0.9}, - 3: {1: 0.1, - 7: 0.4, - 8: 0.5}, - 4: {1: 0.3, - 9: 0.3, - 10: 0.4}, - 5: {1: 1.0}, - 6: {1: 1.0}, - 7: {1: 1.0}, - 8: {1: 1.0}, - 9: {1: 0.5, - 11: 0.5}, - 10: {1: 1.0}, - 11: {1: 1.0}, - } - - # pylint: disable=unused-argument - - def feed_image(self, sess, encoded_image): - # Return a nominal model state. - return np.zeros([1, self._state_size]) - - def inference_step(self, sess, input_feed, state_feed): - # Compute the matrix of softmax distributions for the next batch of words. - batch_size = input_feed.shape[0] - softmax_output = np.zeros([batch_size, self._vocab_size]) - for batch_index, word_id in enumerate(input_feed): - for next_word, probability in self._probabilities[word_id].items(): - softmax_output[batch_index, next_word] = probability - - # Nominal state and metadata. - new_state = np.zeros([batch_size, self._state_size]) - metadata = None - - return softmax_output, new_state, metadata - - # pylint: enable=unused-argument - - -class CaptionGeneratorTest(tf.test.TestCase): - - def _assertExpectedCaptions(self, - expected_captions, - beam_size=3, - max_caption_length=20, - length_normalization_factor=0): - """Tests that beam search generates the expected captions. - - Args: - expected_captions: A sequence of pairs (sentence, probability), where - sentence is a list of integer ids and probability is a float in [0, 1]. - beam_size: Parameter passed to beam_search(). - max_caption_length: Parameter passed to beam_search(). - length_normalization_factor: Parameter passed to beam_search(). - """ - expected_sentences = [c[0] for c in expected_captions] - expected_probabilities = [c[1] for c in expected_captions] - - # Generate captions. - generator = caption_generator.CaptionGenerator( - model=FakeModel(), - vocab=FakeVocab(), - beam_size=beam_size, - max_caption_length=max_caption_length, - length_normalization_factor=length_normalization_factor) - actual_captions = generator.beam_search(sess=None, encoded_image=None) - - actual_sentences = [c.sentence for c in actual_captions] - actual_probabilities = [math.exp(c.logprob) for c in actual_captions] - - self.assertEqual(expected_sentences, actual_sentences) - self.assertAllClose(expected_probabilities, actual_probabilities) - - def testBeamSize(self): - # Beam size = 1. - expected = [([0, 4, 10, 1], 0.16)] - self._assertExpectedCaptions(expected, beam_size=1) - - # Beam size = 2. - expected = [([0, 4, 10, 1], 0.16), ([0, 3, 8, 1], 0.15)] - self._assertExpectedCaptions(expected, beam_size=2) - - # Beam size = 3. - expected = [ - ([0, 2, 6, 1], 0.18), ([0, 4, 10, 1], 0.16), ([0, 3, 8, 1], 0.15) - ] - self._assertExpectedCaptions(expected, beam_size=3) - - def testMaxLength(self): - # Max length = 1. - expected = [([0], 1.0)] - self._assertExpectedCaptions(expected, max_caption_length=1) - - # Max length = 2. - # There are no complete sentences, so partial sentences are returned. - expected = [([0, 4], 0.4), ([0, 3], 0.3), ([0, 2], 0.2)] - self._assertExpectedCaptions(expected, max_caption_length=2) - - # Max length = 3. - # There is at least one complete sentence, so only complete sentences are - # returned. - expected = [([0, 4, 1], 0.12), ([0, 3, 1], 0.03)] - self._assertExpectedCaptions(expected, max_caption_length=3) - - # Max length = 4. - expected = [ - ([0, 2, 6, 1], 0.18), ([0, 4, 10, 1], 0.16), ([0, 3, 8, 1], 0.15) - ] - self._assertExpectedCaptions(expected, max_caption_length=4) - - def testLengthNormalization(self): - # Length normalization factor = 3. - # The longest caption is returned first, despite having low probability, - # because it has the highest log(probability)/length**3. - expected = [ - ([0, 4, 9, 11, 1], 0.06), - ([0, 2, 6, 1], 0.18), - ([0, 4, 10, 1], 0.16), - ([0, 3, 8, 1], 0.15), - ] - self._assertExpectedCaptions( - expected, beam_size=4, length_normalization_factor=3) - - -if __name__ == '__main__': - tf.test.main() diff --git a/research/im2txt/im2txt/inference_utils/inference_wrapper_base.py b/research/im2txt/im2txt/inference_utils/inference_wrapper_base.py deleted file mode 100644 index e94cd6af4..000000000 --- a/research/im2txt/im2txt/inference_utils/inference_wrapper_base.py +++ /dev/null @@ -1,181 +0,0 @@ -# Copyright 2016 The TensorFlow Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""Base wrapper class for performing inference with an image-to-text model. - -Subclasses must implement the following methods: - - build_model(): - Builds the model for inference and returns the model object. - - feed_image(): - Takes an encoded image and returns the initial model state, where "state" - is a numpy array whose specifics are defined by the subclass, e.g. - concatenated LSTM state. It's assumed that feed_image() will be called - precisely once at the start of inference for each image. Subclasses may - compute and/or save per-image internal context in this method. - - inference_step(): - Takes a batch of inputs and states at a single time-step. Returns the - softmax output corresponding to the inputs, and the new states of the batch. - Optionally also returns metadata about the current inference step, e.g. a - serialized numpy array containing activations from a particular model layer. - -Client usage: - 1. Build the model inference graph via build_graph_from_config() or - build_graph_from_proto(). - 2. Call the resulting restore_fn to load the model checkpoint. - 3. For each image in a batch of images: - a) Call feed_image() once to get the initial state. - b) For each step of caption generation, call inference_step(). -""" - -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import os.path - - -import tensorflow as tf - -# pylint: disable=unused-argument - - -class InferenceWrapperBase(object): - """Base wrapper class for performing inference with an image-to-text model.""" - - def __init__(self): - pass - - def build_model(self, model_config): - """Builds the model for inference. - - Args: - model_config: Object containing configuration for building the model. - - Returns: - model: The model object. - """ - tf.logging.fatal("Please implement build_model in subclass") - - def _create_restore_fn(self, checkpoint_path, saver): - """Creates a function that restores a model from checkpoint. - - Args: - checkpoint_path: Checkpoint file or a directory containing a checkpoint - file. - saver: Saver for restoring variables from the checkpoint file. - - Returns: - restore_fn: A function such that restore_fn(sess) loads model variables - from the checkpoint file. - - Raises: - ValueError: If checkpoint_path does not refer to a checkpoint file or a - directory containing a checkpoint file. - """ - if tf.gfile.IsDirectory(checkpoint_path): - checkpoint_path = tf.train.latest_checkpoint(checkpoint_path) - if not checkpoint_path: - raise ValueError("No checkpoint file found in: %s" % checkpoint_path) - - def _restore_fn(sess): - tf.logging.info("Loading model from checkpoint: %s", checkpoint_path) - saver.restore(sess, checkpoint_path) - tf.logging.info("Successfully loaded checkpoint: %s", - os.path.basename(checkpoint_path)) - - return _restore_fn - - def build_graph_from_config(self, model_config, checkpoint_path): - """Builds the inference graph from a configuration object. - - Args: - model_config: Object containing configuration for building the model. - checkpoint_path: Checkpoint file or a directory containing a checkpoint - file. - - Returns: - restore_fn: A function such that restore_fn(sess) loads model variables - from the checkpoint file. - """ - tf.logging.info("Building model.") - self.build_model(model_config) - saver = tf.train.Saver() - - return self._create_restore_fn(checkpoint_path, saver) - - def build_graph_from_proto(self, graph_def_file, saver_def_file, - checkpoint_path): - """Builds the inference graph from serialized GraphDef and SaverDef protos. - - Args: - graph_def_file: File containing a serialized GraphDef proto. - saver_def_file: File containing a serialized SaverDef proto. - checkpoint_path: Checkpoint file or a directory containing a checkpoint - file. - - Returns: - restore_fn: A function such that restore_fn(sess) loads model variables - from the checkpoint file. - """ - # Load the Graph. - tf.logging.info("Loading GraphDef from file: %s", graph_def_file) - graph_def = tf.GraphDef() - with tf.gfile.FastGFile(graph_def_file, "rb") as f: - graph_def.ParseFromString(f.read()) - tf.import_graph_def(graph_def, name="") - - # Load the Saver. - tf.logging.info("Loading SaverDef from file: %s", saver_def_file) - saver_def = tf.train.SaverDef() - with tf.gfile.FastGFile(saver_def_file, "rb") as f: - saver_def.ParseFromString(f.read()) - saver = tf.train.Saver(saver_def=saver_def) - - return self._create_restore_fn(checkpoint_path, saver) - - def feed_image(self, sess, encoded_image): - """Feeds an image and returns the initial model state. - - See comments at the top of file. - - Args: - sess: TensorFlow Session object. - encoded_image: An encoded image string. - - Returns: - state: A numpy array of shape [1, state_size]. - """ - tf.logging.fatal("Please implement feed_image in subclass") - - def inference_step(self, sess, input_feed, state_feed): - """Runs one step of inference. - - Args: - sess: TensorFlow Session object. - input_feed: A numpy array of shape [batch_size]. - state_feed: A numpy array of shape [batch_size, state_size]. - - Returns: - softmax_output: A numpy array of shape [batch_size, vocab_size]. - new_state: A numpy array of shape [batch_size, state_size]. - metadata: Optional. If not None, a string containing metadata about the - current inference step (e.g. serialized numpy array containing - activations from a particular model layer.). - """ - tf.logging.fatal("Please implement inference_step in subclass") - -# pylint: enable=unused-argument diff --git a/research/im2txt/im2txt/inference_utils/vocabulary.py b/research/im2txt/im2txt/inference_utils/vocabulary.py deleted file mode 100644 index ecf0ada9c..000000000 --- a/research/im2txt/im2txt/inference_utils/vocabulary.py +++ /dev/null @@ -1,78 +0,0 @@ -# Copyright 2016 The TensorFlow Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""Vocabulary class for an image-to-text model.""" - -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - - -import tensorflow as tf - - -class Vocabulary(object): - """Vocabulary class for an image-to-text model.""" - - def __init__(self, - vocab_file, - start_word="", - end_word="", - unk_word=""): - """Initializes the vocabulary. - - Args: - vocab_file: File containing the vocabulary, where the words are the first - whitespace-separated token on each line (other tokens are ignored) and - the word ids are the corresponding line numbers. - start_word: Special word denoting sentence start. - end_word: Special word denoting sentence end. - unk_word: Special word denoting unknown words. - """ - if not tf.gfile.Exists(vocab_file): - tf.logging.fatal("Vocab file %s not found.", vocab_file) - tf.logging.info("Initializing vocabulary from file: %s", vocab_file) - - with tf.gfile.GFile(vocab_file, mode="r") as f: - reverse_vocab = list(f.readlines()) - reverse_vocab = [line.split()[0] for line in reverse_vocab] - assert start_word in reverse_vocab - assert end_word in reverse_vocab - if unk_word not in reverse_vocab: - reverse_vocab.append(unk_word) - vocab = dict([(x, y) for (y, x) in enumerate(reverse_vocab)]) - - tf.logging.info("Created vocabulary with %d words" % len(vocab)) - - self.vocab = vocab # vocab[word] = id - self.reverse_vocab = reverse_vocab # reverse_vocab[id] = word - - # Save special word ids. - self.start_id = vocab[start_word] - self.end_id = vocab[end_word] - self.unk_id = vocab[unk_word] - - def word_to_id(self, word): - """Returns the integer word id of a word string.""" - if word in self.vocab: - return self.vocab[word] - else: - return self.unk_id - - def id_to_word(self, word_id): - """Returns the word string of an integer word id.""" - if word_id >= len(self.reverse_vocab): - return self.reverse_vocab[self.unk_id] - else: - return self.reverse_vocab[word_id] diff --git a/research/im2txt/im2txt/inference_wrapper.py b/research/im2txt/im2txt/inference_wrapper.py deleted file mode 100644 index a047a9c8d..000000000 --- a/research/im2txt/im2txt/inference_wrapper.py +++ /dev/null @@ -1,51 +0,0 @@ -# Copyright 2016 The TensorFlow Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -"""Model wrapper class for performing inference with a ShowAndTellModel.""" - -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - - - -from im2txt import show_and_tell_model -from im2txt.inference_utils import inference_wrapper_base - - -class InferenceWrapper(inference_wrapper_base.InferenceWrapperBase): - """Model wrapper class for performing inference with a ShowAndTellModel.""" - - def __init__(self): - super(InferenceWrapper, self).__init__() - - def build_model(self, model_config): - model = show_and_tell_model.ShowAndTellModel(model_config, mode="inference") - model.build() - return model - - def feed_image(self, sess, encoded_image): - initial_state = sess.run(fetches="lstm/initial_state:0", - feed_dict={"image_feed:0": encoded_image}) - return initial_state - - def inference_step(self, sess, input_feed, state_feed): - softmax_output, state_output = sess.run( - fetches=["softmax:0", "lstm/state:0"], - feed_dict={ - "input_feed:0": input_feed, - "lstm/state_feed:0": state_feed, - }) - return softmax_output, state_output, None diff --git a/research/im2txt/im2txt/ops/BUILD b/research/im2txt/im2txt/ops/BUILD deleted file mode 100644 index 7d48bf393..000000000 --- a/research/im2txt/im2txt/ops/BUILD +++ /dev/null @@ -1,32 +0,0 @@ -package(default_visibility = ["//im2txt:internal"]) - -licenses(["notice"]) # Apache 2.0 - -exports_files(["LICENSE"]) - -py_library( - name = "image_processing", - srcs = ["image_processing.py"], - srcs_version = "PY2AND3", -) - -py_library( - name = "image_embedding", - srcs = ["image_embedding.py"], - srcs_version = "PY2AND3", -) - -py_test( - name = "image_embedding_test", - size = "small", - srcs = ["image_embedding_test.py"], - deps = [ - ":image_embedding", - ], -) - -py_library( - name = "inputs", - srcs = ["inputs.py"], - srcs_version = "PY2AND3", -) diff --git a/research/im2txt/im2txt/ops/image_embedding.py b/research/im2txt/im2txt/ops/image_embedding.py deleted file mode 100644 index 58e3ddaa9..000000000 --- a/research/im2txt/im2txt/ops/image_embedding.py +++ /dev/null @@ -1,114 +0,0 @@ -# Copyright 2016 The TensorFlow Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -"""Image embedding ops.""" - -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - - -import tensorflow as tf - -from tensorflow.contrib.slim.python.slim.nets.inception_v3 import inception_v3_base - -slim = tf.contrib.slim - - -def inception_v3(images, - trainable=True, - is_training=True, - weight_decay=0.00004, - stddev=0.1, - dropout_keep_prob=0.8, - use_batch_norm=True, - batch_norm_params=None, - add_summaries=True, - scope="InceptionV3"): - """Builds an Inception V3 subgraph for image embeddings. - - Args: - images: A float32 Tensor of shape [batch, height, width, channels]. - trainable: Whether the inception submodel should be trainable or not. - is_training: Boolean indicating training mode or not. - weight_decay: Coefficient for weight regularization. - stddev: The standard deviation of the trunctated normal weight initializer. - dropout_keep_prob: Dropout keep probability. - use_batch_norm: Whether to use batch normalization. - batch_norm_params: Parameters for batch normalization. See - tf.contrib.layers.batch_norm for details. - add_summaries: Whether to add activation summaries. - scope: Optional Variable scope. - - Returns: - end_points: A dictionary of activations from inception_v3 layers. - """ - # Only consider the inception model to be in training mode if it's trainable. - is_inception_model_training = trainable and is_training - - if use_batch_norm: - # Default parameters for batch normalization. - if not batch_norm_params: - batch_norm_params = { - "is_training": is_inception_model_training, - "trainable": trainable, - # Decay for the moving averages. - "decay": 0.9997, - # Epsilon to prevent 0s in variance. - "epsilon": 0.001, - # Collection containing the moving mean and moving variance. - "variables_collections": { - "beta": None, - "gamma": None, - "moving_mean": ["moving_vars"], - "moving_variance": ["moving_vars"], - } - } - else: - batch_norm_params = None - - if trainable: - weights_regularizer = tf.contrib.layers.l2_regularizer(weight_decay) - else: - weights_regularizer = None - - with tf.variable_scope(scope, "InceptionV3", [images]) as scope: - with slim.arg_scope( - [slim.conv2d, slim.fully_connected], - weights_regularizer=weights_regularizer, - trainable=trainable): - with slim.arg_scope( - [slim.conv2d], - weights_initializer=tf.truncated_normal_initializer(stddev=stddev), - activation_fn=tf.nn.relu, - normalizer_fn=slim.batch_norm, - normalizer_params=batch_norm_params): - net, end_points = inception_v3_base(images, scope=scope) - with tf.variable_scope("logits"): - shape = net.get_shape() - net = slim.avg_pool2d(net, shape[1:3], padding="VALID", scope="pool") - net = slim.dropout( - net, - keep_prob=dropout_keep_prob, - is_training=is_inception_model_training, - scope="dropout") - net = slim.flatten(net, scope="flatten") - - # Add summaries. - if add_summaries: - for v in end_points.values(): - tf.contrib.layers.summaries.summarize_activation(v) - - return net diff --git a/research/im2txt/im2txt/ops/image_embedding_test.py b/research/im2txt/im2txt/ops/image_embedding_test.py deleted file mode 100644 index 66324d68e..000000000 --- a/research/im2txt/im2txt/ops/image_embedding_test.py +++ /dev/null @@ -1,136 +0,0 @@ -# Copyright 2016 The TensorFlow Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -"""Tests for tensorflow_models.im2txt.ops.image_embedding.""" - -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - - -import tensorflow as tf - -from im2txt.ops import image_embedding - - -class InceptionV3Test(tf.test.TestCase): - - def setUp(self): - super(InceptionV3Test, self).setUp() - - batch_size = 4 - height = 299 - width = 299 - num_channels = 3 - self._images = tf.placeholder(tf.float32, - [batch_size, height, width, num_channels]) - self._batch_size = batch_size - - def _countInceptionParameters(self): - """Counts the number of parameters in the inception model at top scope.""" - counter = {} - for v in tf.global_variables(): - name_tokens = v.op.name.split("/") - if name_tokens[0] == "InceptionV3": - name = "InceptionV3/" + name_tokens[1] - num_params = v.get_shape().num_elements() - assert num_params - counter[name] = counter.get(name, 0) + num_params - return counter - - def _verifyParameterCounts(self): - """Verifies the number of parameters in the inception model.""" - param_counts = self._countInceptionParameters() - expected_param_counts = { - "InceptionV3/Conv2d_1a_3x3": 960, - "InceptionV3/Conv2d_2a_3x3": 9312, - "InceptionV3/Conv2d_2b_3x3": 18624, - "InceptionV3/Conv2d_3b_1x1": 5360, - "InceptionV3/Conv2d_4a_3x3": 138816, - "InceptionV3/Mixed_5b": 256368, - "InceptionV3/Mixed_5c": 277968, - "InceptionV3/Mixed_5d": 285648, - "InceptionV3/Mixed_6a": 1153920, - "InceptionV3/Mixed_6b": 1298944, - "InceptionV3/Mixed_6c": 1692736, - "InceptionV3/Mixed_6d": 1692736, - "InceptionV3/Mixed_6e": 2143872, - "InceptionV3/Mixed_7a": 1699584, - "InceptionV3/Mixed_7b": 5047872, - "InceptionV3/Mixed_7c": 6080064, - } - self.assertDictEqual(expected_param_counts, param_counts) - - def _assertCollectionSize(self, expected_size, collection): - actual_size = len(tf.get_collection(collection)) - if expected_size != actual_size: - self.fail("Found %d items in collection %s (expected %d)." % - (actual_size, collection, expected_size)) - - def testTrainableTrueIsTrainingTrue(self): - embeddings = image_embedding.inception_v3( - self._images, trainable=True, is_training=True) - self.assertEqual([self._batch_size, 2048], embeddings.get_shape().as_list()) - - self._verifyParameterCounts() - self._assertCollectionSize(376, tf.GraphKeys.GLOBAL_VARIABLES) - self._assertCollectionSize(188, tf.GraphKeys.TRAINABLE_VARIABLES) - self._assertCollectionSize(188, tf.GraphKeys.UPDATE_OPS) - self._assertCollectionSize(94, tf.GraphKeys.REGULARIZATION_LOSSES) - self._assertCollectionSize(0, tf.GraphKeys.LOSSES) - self._assertCollectionSize(23, tf.GraphKeys.SUMMARIES) - - def testTrainableTrueIsTrainingFalse(self): - embeddings = image_embedding.inception_v3( - self._images, trainable=True, is_training=False) - self.assertEqual([self._batch_size, 2048], embeddings.get_shape().as_list()) - - self._verifyParameterCounts() - self._assertCollectionSize(376, tf.GraphKeys.GLOBAL_VARIABLES) - self._assertCollectionSize(188, tf.GraphKeys.TRAINABLE_VARIABLES) - self._assertCollectionSize(0, tf.GraphKeys.UPDATE_OPS) - self._assertCollectionSize(94, tf.GraphKeys.REGULARIZATION_LOSSES) - self._assertCollectionSize(0, tf.GraphKeys.LOSSES) - self._assertCollectionSize(23, tf.GraphKeys.SUMMARIES) - - def testTrainableFalseIsTrainingTrue(self): - embeddings = image_embedding.inception_v3( - self._images, trainable=False, is_training=True) - self.assertEqual([self._batch_size, 2048], embeddings.get_shape().as_list()) - - self._verifyParameterCounts() - self._assertCollectionSize(376, tf.GraphKeys.GLOBAL_VARIABLES) - self._assertCollectionSize(0, tf.GraphKeys.TRAINABLE_VARIABLES) - self._assertCollectionSize(0, tf.GraphKeys.UPDATE_OPS) - self._assertCollectionSize(0, tf.GraphKeys.REGULARIZATION_LOSSES) - self._assertCollectionSize(0, tf.GraphKeys.LOSSES) - self._assertCollectionSize(23, tf.GraphKeys.SUMMARIES) - - def testTrainableFalseIsTrainingFalse(self): - embeddings = image_embedding.inception_v3( - self._images, trainable=False, is_training=False) - self.assertEqual([self._batch_size, 2048], embeddings.get_shape().as_list()) - - self._verifyParameterCounts() - self._assertCollectionSize(376, tf.GraphKeys.GLOBAL_VARIABLES) - self._assertCollectionSize(0, tf.GraphKeys.TRAINABLE_VARIABLES) - self._assertCollectionSize(0, tf.GraphKeys.UPDATE_OPS) - self._assertCollectionSize(0, tf.GraphKeys.REGULARIZATION_LOSSES) - self._assertCollectionSize(0, tf.GraphKeys.LOSSES) - self._assertCollectionSize(23, tf.GraphKeys.SUMMARIES) - - -if __name__ == "__main__": - tf.test.main() diff --git a/research/im2txt/im2txt/ops/image_processing.py b/research/im2txt/im2txt/ops/image_processing.py deleted file mode 100644 index 6a7545547..000000000 --- a/research/im2txt/im2txt/ops/image_processing.py +++ /dev/null @@ -1,133 +0,0 @@ -# Copyright 2016 The TensorFlow Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -"""Helper functions for image preprocessing.""" - -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - - -import tensorflow as tf - - -def distort_image(image, thread_id): - """Perform random distortions on an image. - - Args: - image: A float32 Tensor of shape [height, width, 3] with values in [0, 1). - thread_id: Preprocessing thread id used to select the ordering of color - distortions. There should be a multiple of 2 preprocessing threads. - - Returns: - distorted_image: A float32 Tensor of shape [height, width, 3] with values in - [0, 1]. - """ - # Randomly flip horizontally. - with tf.name_scope("flip_horizontal", values=[image]): - image = tf.image.random_flip_left_right(image) - - # Randomly distort the colors based on thread id. - color_ordering = thread_id % 2 - with tf.name_scope("distort_color", values=[image]): - if color_ordering == 0: - image = tf.image.random_brightness(image, max_delta=32. / 255.) - image = tf.image.random_saturation(image, lower=0.5, upper=1.5) - image = tf.image.random_hue(image, max_delta=0.032) - image = tf.image.random_contrast(image, lower=0.5, upper=1.5) - elif color_ordering == 1: - image = tf.image.random_brightness(image, max_delta=32. / 255.) - image = tf.image.random_contrast(image, lower=0.5, upper=1.5) - image = tf.image.random_saturation(image, lower=0.5, upper=1.5) - image = tf.image.random_hue(image, max_delta=0.032) - - # The random_* ops do not necessarily clamp. - image = tf.clip_by_value(image, 0.0, 1.0) - - return image - - -def process_image(encoded_image, - is_training, - height, - width, - resize_height=346, - resize_width=346, - thread_id=0, - image_format="jpeg"): - """Decode an image, resize and apply random distortions. - - In training, images are distorted slightly differently depending on thread_id. - - Args: - encoded_image: String Tensor containing the image. - is_training: Boolean; whether preprocessing for training or eval. - height: Height of the output image. - width: Width of the output image. - resize_height: If > 0, resize height before crop to final dimensions. - resize_width: If > 0, resize width before crop to final dimensions. - thread_id: Preprocessing thread id used to select the ordering of color - distortions. There should be a multiple of 2 preprocessing threads. - image_format: "jpeg" or "png". - - Returns: - A float32 Tensor of shape [height, width, 3] with values in [-1, 1]. - - Raises: - ValueError: If image_format is invalid. - """ - # Helper function to log an image summary to the visualizer. Summaries are - # only logged in thread 0. - def image_summary(name, image): - if not thread_id: - tf.summary.image(name, tf.expand_dims(image, 0)) - - # Decode image into a float32 Tensor of shape [?, ?, 3] with values in [0, 1). - with tf.name_scope("decode", values=[encoded_image]): - if image_format == "jpeg": - image = tf.image.decode_jpeg(encoded_image, channels=3) - elif image_format == "png": - image = tf.image.decode_png(encoded_image, channels=3) - else: - raise ValueError("Invalid image format: %s" % image_format) - image = tf.image.convert_image_dtype(image, dtype=tf.float32) - image_summary("original_image", image) - - # Resize image. - assert (resize_height > 0) == (resize_width > 0) - if resize_height: - image = tf.image.resize_images(image, - size=[resize_height, resize_width], - method=tf.image.ResizeMethod.BILINEAR) - - # Crop to final dimensions. - if is_training: - image = tf.random_crop(image, [height, width, 3]) - else: - # Central crop, assuming resize_height > height, resize_width > width. - image = tf.image.resize_image_with_crop_or_pad(image, height, width) - - image_summary("resized_image", image) - - # Randomly distort the image. - if is_training: - image = distort_image(image, thread_id) - - image_summary("final_image", image) - - # Rescale to [-1,1] instead of [0, 1] - image = tf.subtract(image, 0.5) - image = tf.multiply(image, 2.0) - return image diff --git a/research/im2txt/im2txt/ops/inputs.py b/research/im2txt/im2txt/ops/inputs.py deleted file mode 100644 index 5dc90c0ce..000000000 --- a/research/im2txt/im2txt/ops/inputs.py +++ /dev/null @@ -1,204 +0,0 @@ -# Copyright 2016 The TensorFlow Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -"""Input ops.""" - -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - - -import tensorflow as tf - - -def parse_sequence_example(serialized, image_feature, caption_feature): - """Parses a tensorflow.SequenceExample into an image and caption. - - Args: - serialized: A scalar string Tensor; a single serialized SequenceExample. - image_feature: Name of SequenceExample context feature containing image - data. - caption_feature: Name of SequenceExample feature list containing integer - captions. - - Returns: - encoded_image: A scalar string Tensor containing a JPEG encoded image. - caption: A 1-D uint64 Tensor with dynamically specified length. - """ - context, sequence = tf.parse_single_sequence_example( - serialized, - context_features={ - image_feature: tf.FixedLenFeature([], dtype=tf.string) - }, - sequence_features={ - caption_feature: tf.FixedLenSequenceFeature([], dtype=tf.int64), - }) - - encoded_image = context[image_feature] - caption = sequence[caption_feature] - return encoded_image, caption - - -def prefetch_input_data(reader, - file_pattern, - is_training, - batch_size, - values_per_shard, - input_queue_capacity_factor=16, - num_reader_threads=1, - shard_queue_name="filename_queue", - value_queue_name="input_queue"): - """Prefetches string values from disk into an input queue. - - In training the capacity of the queue is important because a larger queue - means better mixing of training examples between shards. The minimum number of - values kept in the queue is values_per_shard * input_queue_capacity_factor, - where input_queue_memory factor should be chosen to trade-off better mixing - with memory usage. - - Args: - reader: Instance of tf.ReaderBase. - file_pattern: Comma-separated list of file patterns (e.g. - /tmp/train_data-?????-of-00100). - is_training: Boolean; whether prefetching for training or eval. - batch_size: Model batch size used to determine queue capacity. - values_per_shard: Approximate number of values per shard. - input_queue_capacity_factor: Minimum number of values to keep in the queue - in multiples of values_per_shard. See comments above. - num_reader_threads: Number of reader threads to fill the queue. - shard_queue_name: Name for the shards filename queue. - value_queue_name: Name for the values input queue. - - Returns: - A Queue containing prefetched string values. - """ - data_files = [] - for pattern in file_pattern.split(","): - data_files.extend(tf.gfile.Glob(pattern)) - if not data_files: - tf.logging.fatal("Found no input files matching %s", file_pattern) - else: - tf.logging.info("Prefetching values from %d files matching %s", - len(data_files), file_pattern) - - if is_training: - filename_queue = tf.train.string_input_producer( - data_files, shuffle=True, capacity=16, name=shard_queue_name) - min_queue_examples = values_per_shard * input_queue_capacity_factor - capacity = min_queue_examples + 100 * batch_size - values_queue = tf.RandomShuffleQueue( - capacity=capacity, - min_after_dequeue=min_queue_examples, - dtypes=[tf.string], - name="random_" + value_queue_name) - else: - filename_queue = tf.train.string_input_producer( - data_files, shuffle=False, capacity=1, name=shard_queue_name) - capacity = values_per_shard + 3 * batch_size - values_queue = tf.FIFOQueue( - capacity=capacity, dtypes=[tf.string], name="fifo_" + value_queue_name) - - enqueue_ops = [] - for _ in range(num_reader_threads): - _, value = reader.read(filename_queue) - enqueue_ops.append(values_queue.enqueue([value])) - tf.train.queue_runner.add_queue_runner(tf.train.queue_runner.QueueRunner( - values_queue, enqueue_ops)) - tf.summary.scalar( - "queue/%s/fraction_of_%d_full" % (values_queue.name, capacity), - tf.cast(values_queue.size(), tf.float32) * (1. / capacity)) - - return values_queue - - -def batch_with_dynamic_pad(images_and_captions, - batch_size, - queue_capacity, - add_summaries=True): - """Batches input images and captions. - - This function splits the caption into an input sequence and a target sequence, - where the target sequence is the input sequence right-shifted by 1. Input and - target sequences are batched and padded up to the maximum length of sequences - in the batch. A mask is created to distinguish real words from padding words. - - Example: - Actual captions in the batch ('-' denotes padded character): - [ - [ 1 2 3 4 5 ], - [ 1 2 3 4 - ], - [ 1 2 3 - - ], - ] - - input_seqs: - [ - [ 1 2 3 4 ], - [ 1 2 3 - ], - [ 1 2 - - ], - ] - - target_seqs: - [ - [ 2 3 4 5 ], - [ 2 3 4 - ], - [ 2 3 - - ], - ] - - mask: - [ - [ 1 1 1 1 ], - [ 1 1 1 0 ], - [ 1 1 0 0 ], - ] - - Args: - images_and_captions: A list of pairs [image, caption], where image is a - Tensor of shape [height, width, channels] and caption is a 1-D Tensor of - any length. Each pair will be processed and added to the queue in a - separate thread. - batch_size: Batch size. - queue_capacity: Queue capacity. - add_summaries: If true, add caption length summaries. - - Returns: - images: A Tensor of shape [batch_size, height, width, channels]. - input_seqs: An int32 Tensor of shape [batch_size, padded_length]. - target_seqs: An int32 Tensor of shape [batch_size, padded_length]. - mask: An int32 0/1 Tensor of shape [batch_size, padded_length]. - """ - enqueue_list = [] - for image, caption in images_and_captions: - caption_length = tf.shape(caption)[0] - input_length = tf.expand_dims(tf.subtract(caption_length, 1), 0) - - input_seq = tf.slice(caption, [0], input_length) - target_seq = tf.slice(caption, [1], input_length) - indicator = tf.ones(input_length, dtype=tf.int32) - enqueue_list.append([image, input_seq, target_seq, indicator]) - - images, input_seqs, target_seqs, mask = tf.train.batch_join( - enqueue_list, - batch_size=batch_size, - capacity=queue_capacity, - dynamic_pad=True, - name="batch_and_pad") - - if add_summaries: - lengths = tf.add(tf.reduce_sum(mask, 1), 1) - tf.summary.scalar("caption_length/batch_min", tf.reduce_min(lengths)) - tf.summary.scalar("caption_length/batch_max", tf.reduce_max(lengths)) - tf.summary.scalar("caption_length/batch_mean", tf.reduce_mean(lengths)) - - return images, input_seqs, target_seqs, mask diff --git a/research/im2txt/im2txt/run_inference.py b/research/im2txt/im2txt/run_inference.py deleted file mode 100644 index 9848522df..000000000 --- a/research/im2txt/im2txt/run_inference.py +++ /dev/null @@ -1,85 +0,0 @@ -# Copyright 2016 The TensorFlow Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -r"""Generate captions for images using default beam search parameters.""" - -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import math -import os - - -import tensorflow as tf - -from im2txt import configuration -from im2txt import inference_wrapper -from im2txt.inference_utils import caption_generator -from im2txt.inference_utils import vocabulary - -FLAGS = tf.flags.FLAGS - -tf.flags.DEFINE_string("checkpoint_path", "", - "Model checkpoint file or directory containing a " - "model checkpoint file.") -tf.flags.DEFINE_string("vocab_file", "", "Text file containing the vocabulary.") -tf.flags.DEFINE_string("input_files", "", - "File pattern or comma-separated list of file patterns " - "of image files.") - -tf.logging.set_verbosity(tf.logging.INFO) - - -def main(_): - # Build the inference graph. - g = tf.Graph() - with g.as_default(): - model = inference_wrapper.InferenceWrapper() - restore_fn = model.build_graph_from_config(configuration.ModelConfig(), - FLAGS.checkpoint_path) - g.finalize() - - # Create the vocabulary. - vocab = vocabulary.Vocabulary(FLAGS.vocab_file) - - filenames = [] - for file_pattern in FLAGS.input_files.split(","): - filenames.extend(tf.gfile.Glob(file_pattern)) - tf.logging.info("Running caption generation on %d files matching %s", - len(filenames), FLAGS.input_files) - - with tf.Session(graph=g) as sess: - # Load the model from checkpoint. - restore_fn(sess) - - # Prepare the caption generator. Here we are implicitly using the default - # beam search parameters. See caption_generator.py for a description of the - # available beam search parameters. - generator = caption_generator.CaptionGenerator(model, vocab) - - for filename in filenames: - with tf.gfile.GFile(filename, "rb") as f: - image = f.read() - captions = generator.beam_search(sess, image) - print("Captions for image %s:" % os.path.basename(filename)) - for i, caption in enumerate(captions): - # Ignore begin and end words. - sentence = [vocab.id_to_word(w) for w in caption.sentence[1:-1]] - sentence = " ".join(sentence) - print(" %d) %s (p=%f)" % (i, sentence, math.exp(caption.logprob))) - - -if __name__ == "__main__": - tf.app.run() diff --git a/research/im2txt/im2txt/show_and_tell_model.py b/research/im2txt/im2txt/show_and_tell_model.py deleted file mode 100644 index 0ac29e7fd..000000000 --- a/research/im2txt/im2txt/show_and_tell_model.py +++ /dev/null @@ -1,358 +0,0 @@ -# Copyright 2016 The TensorFlow Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -"""Image-to-text implementation based on http://arxiv.org/abs/1411.4555. - -"Show and Tell: A Neural Image Caption Generator" -Oriol Vinyals, Alexander Toshev, Samy Bengio, Dumitru Erhan -""" - -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - - -import tensorflow as tf - -from im2txt.ops import image_embedding -from im2txt.ops import image_processing -from im2txt.ops import inputs as input_ops - - -class ShowAndTellModel(object): - """Image-to-text implementation based on http://arxiv.org/abs/1411.4555. - - "Show and Tell: A Neural Image Caption Generator" - Oriol Vinyals, Alexander Toshev, Samy Bengio, Dumitru Erhan - """ - - def __init__(self, config, mode, train_inception=False): - """Basic setup. - - Args: - config: Object containing configuration parameters. - mode: "train", "eval" or "inference". - train_inception: Whether the inception submodel variables are trainable. - """ - assert mode in ["train", "eval", "inference"] - self.config = config - self.mode = mode - self.train_inception = train_inception - - # Reader for the input data. - self.reader = tf.TFRecordReader() - - # To match the "Show and Tell" paper we initialize all variables with a - # random uniform initializer. - self.initializer = tf.random_uniform_initializer( - minval=-self.config.initializer_scale, - maxval=self.config.initializer_scale) - - # A float32 Tensor with shape [batch_size, height, width, channels]. - self.images = None - - # An int32 Tensor with shape [batch_size, padded_length]. - self.input_seqs = None - - # An int32 Tensor with shape [batch_size, padded_length]. - self.target_seqs = None - - # An int32 0/1 Tensor with shape [batch_size, padded_length]. - self.input_mask = None - - # A float32 Tensor with shape [batch_size, embedding_size]. - self.image_embeddings = None - - # A float32 Tensor with shape [batch_size, padded_length, embedding_size]. - self.seq_embeddings = None - - # A float32 scalar Tensor; the total loss for the trainer to optimize. - self.total_loss = None - - # A float32 Tensor with shape [batch_size * padded_length]. - self.target_cross_entropy_losses = None - - # A float32 Tensor with shape [batch_size * padded_length]. - self.target_cross_entropy_loss_weights = None - - # Collection of variables from the inception submodel. - self.inception_variables = [] - - # Function to restore the inception submodel from checkpoint. - self.init_fn = None - - # Global step Tensor. - self.global_step = None - - def is_training(self): - """Returns true if the model is built for training mode.""" - return self.mode == "train" - - def process_image(self, encoded_image, thread_id=0): - """Decodes and processes an image string. - - Args: - encoded_image: A scalar string Tensor; the encoded image. - thread_id: Preprocessing thread id used to select the ordering of color - distortions. - - Returns: - A float32 Tensor of shape [height, width, 3]; the processed image. - """ - return image_processing.process_image(encoded_image, - is_training=self.is_training(), - height=self.config.image_height, - width=self.config.image_width, - thread_id=thread_id, - image_format=self.config.image_format) - - def build_inputs(self): - """Input prefetching, preprocessing and batching. - - Outputs: - self.images - self.input_seqs - self.target_seqs (training and eval only) - self.input_mask (training and eval only) - """ - if self.mode == "inference": - # In inference mode, images and inputs are fed via placeholders. - image_feed = tf.placeholder(dtype=tf.string, shape=[], name="image_feed") - input_feed = tf.placeholder(dtype=tf.int64, - shape=[None], # batch_size - name="input_feed") - - # Process image and insert batch dimensions. - images = tf.expand_dims(self.process_image(image_feed), 0) - input_seqs = tf.expand_dims(input_feed, 1) - - # No target sequences or input mask in inference mode. - target_seqs = None - input_mask = None - else: - # Prefetch serialized SequenceExample protos. - input_queue = input_ops.prefetch_input_data( - self.reader, - self.config.input_file_pattern, - is_training=self.is_training(), - batch_size=self.config.batch_size, - values_per_shard=self.config.values_per_input_shard, - input_queue_capacity_factor=self.config.input_queue_capacity_factor, - num_reader_threads=self.config.num_input_reader_threads) - - # Image processing and random distortion. Split across multiple threads - # with each thread applying a slightly different distortion. - assert self.config.num_preprocess_threads % 2 == 0 - images_and_captions = [] - for thread_id in range(self.config.num_preprocess_threads): - serialized_sequence_example = input_queue.dequeue() - encoded_image, caption = input_ops.parse_sequence_example( - serialized_sequence_example, - image_feature=self.config.image_feature_name, - caption_feature=self.config.caption_feature_name) - image = self.process_image(encoded_image, thread_id=thread_id) - images_and_captions.append([image, caption]) - - # Batch inputs. - queue_capacity = (2 * self.config.num_preprocess_threads * - self.config.batch_size) - images, input_seqs, target_seqs, input_mask = ( - input_ops.batch_with_dynamic_pad(images_and_captions, - batch_size=self.config.batch_size, - queue_capacity=queue_capacity)) - - self.images = images - self.input_seqs = input_seqs - self.target_seqs = target_seqs - self.input_mask = input_mask - - def build_image_embeddings(self): - """Builds the image model subgraph and generates image embeddings. - - Inputs: - self.images - - Outputs: - self.image_embeddings - """ - inception_output = image_embedding.inception_v3( - self.images, - trainable=self.train_inception, - is_training=self.is_training()) - self.inception_variables = tf.get_collection( - tf.GraphKeys.GLOBAL_VARIABLES, scope="InceptionV3") - - # Map inception output into embedding space. - with tf.variable_scope("image_embedding") as scope: - image_embeddings = tf.contrib.layers.fully_connected( - inputs=inception_output, - num_outputs=self.config.embedding_size, - activation_fn=None, - weights_initializer=self.initializer, - biases_initializer=None, - scope=scope) - - # Save the embedding size in the graph. - tf.constant(self.config.embedding_size, name="embedding_size") - - self.image_embeddings = image_embeddings - - def build_seq_embeddings(self): - """Builds the input sequence embeddings. - - Inputs: - self.input_seqs - - Outputs: - self.seq_embeddings - """ - with tf.variable_scope("seq_embedding"), tf.device("/cpu:0"): - embedding_map = tf.get_variable( - name="map", - shape=[self.config.vocab_size, self.config.embedding_size], - initializer=self.initializer) - seq_embeddings = tf.nn.embedding_lookup(embedding_map, self.input_seqs) - - self.seq_embeddings = seq_embeddings - - def build_model(self): - """Builds the model. - - Inputs: - self.image_embeddings - self.seq_embeddings - self.target_seqs (training and eval only) - self.input_mask (training and eval only) - - Outputs: - self.total_loss (training and eval only) - self.target_cross_entropy_losses (training and eval only) - self.target_cross_entropy_loss_weights (training and eval only) - """ - # This LSTM cell has biases and outputs tanh(new_c) * sigmoid(o), but the - # modified LSTM in the "Show and Tell" paper has no biases and outputs - # new_c * sigmoid(o). - lstm_cell = tf.contrib.rnn.BasicLSTMCell( - num_units=self.config.num_lstm_units, state_is_tuple=True) - if self.mode == "train": - lstm_cell = tf.contrib.rnn.DropoutWrapper( - lstm_cell, - input_keep_prob=self.config.lstm_dropout_keep_prob, - output_keep_prob=self.config.lstm_dropout_keep_prob) - - with tf.variable_scope("lstm", initializer=self.initializer) as lstm_scope: - # Feed the image embeddings to set the initial LSTM state. - zero_state = lstm_cell.zero_state( - batch_size=self.image_embeddings.get_shape()[0], dtype=tf.float32) - _, initial_state = lstm_cell(self.image_embeddings, zero_state) - - # Allow the LSTM variables to be reused. - lstm_scope.reuse_variables() - - if self.mode == "inference": - # In inference mode, use concatenated states for convenient feeding and - # fetching. - tf.concat(axis=1, values=initial_state, name="initial_state") - - # Placeholder for feeding a batch of concatenated states. - state_feed = tf.placeholder(dtype=tf.float32, - shape=[None, sum(lstm_cell.state_size)], - name="state_feed") - state_tuple = tf.split(value=state_feed, num_or_size_splits=2, axis=1) - - # Run a single LSTM step. - lstm_outputs, state_tuple = lstm_cell( - inputs=tf.squeeze(self.seq_embeddings, axis=[1]), - state=state_tuple) - - # Concatentate the resulting state. - tf.concat(axis=1, values=state_tuple, name="state") - else: - # Run the batch of sequence embeddings through the LSTM. - sequence_length = tf.reduce_sum(self.input_mask, 1) - lstm_outputs, _ = tf.nn.dynamic_rnn(cell=lstm_cell, - inputs=self.seq_embeddings, - sequence_length=sequence_length, - initial_state=initial_state, - dtype=tf.float32, - scope=lstm_scope) - - # Stack batches vertically. - lstm_outputs = tf.reshape(lstm_outputs, [-1, lstm_cell.output_size]) - - with tf.variable_scope("logits") as logits_scope: - logits = tf.contrib.layers.fully_connected( - inputs=lstm_outputs, - num_outputs=self.config.vocab_size, - activation_fn=None, - weights_initializer=self.initializer, - scope=logits_scope) - - if self.mode == "inference": - tf.nn.softmax(logits, name="softmax") - else: - targets = tf.reshape(self.target_seqs, [-1]) - weights = tf.to_float(tf.reshape(self.input_mask, [-1])) - - # Compute losses. - losses = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=targets, - logits=logits) - batch_loss = tf.div(tf.reduce_sum(tf.multiply(losses, weights)), - tf.reduce_sum(weights), - name="batch_loss") - tf.losses.add_loss(batch_loss) - total_loss = tf.losses.get_total_loss() - - # Add summaries. - tf.summary.scalar("losses/batch_loss", batch_loss) - tf.summary.scalar("losses/total_loss", total_loss) - for var in tf.trainable_variables(): - tf.summary.histogram("parameters/" + var.op.name, var) - - self.total_loss = total_loss - self.target_cross_entropy_losses = losses # Used in evaluation. - self.target_cross_entropy_loss_weights = weights # Used in evaluation. - - def setup_inception_initializer(self): - """Sets up the function to restore inception variables from checkpoint.""" - if self.mode != "inference": - # Restore inception variables only. - saver = tf.train.Saver(self.inception_variables) - - def restore_fn(sess): - tf.logging.info("Restoring Inception variables from checkpoint file %s", - self.config.inception_checkpoint_file) - saver.restore(sess, self.config.inception_checkpoint_file) - - self.init_fn = restore_fn - - def setup_global_step(self): - """Sets up the global step Tensor.""" - global_step = tf.Variable( - initial_value=0, - name="global_step", - trainable=False, - collections=[tf.GraphKeys.GLOBAL_STEP, tf.GraphKeys.GLOBAL_VARIABLES]) - - self.global_step = global_step - - def build(self): - """Creates all ops for training and evaluation.""" - self.build_inputs() - self.build_image_embeddings() - self.build_seq_embeddings() - self.build_model() - self.setup_inception_initializer() - self.setup_global_step() diff --git a/research/im2txt/im2txt/show_and_tell_model_test.py b/research/im2txt/im2txt/show_and_tell_model_test.py deleted file mode 100644 index 0bdfb6e1a..000000000 --- a/research/im2txt/im2txt/show_and_tell_model_test.py +++ /dev/null @@ -1,200 +0,0 @@ -# Copyright 2016 The TensorFlow Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -"""Tests for tensorflow_models.im2txt.show_and_tell_model.""" - -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - - -import numpy as np -import tensorflow as tf - -from im2txt import configuration -from im2txt import show_and_tell_model - - -class ShowAndTellModel(show_and_tell_model.ShowAndTellModel): - """Subclass of ShowAndTellModel without the disk I/O.""" - - def build_inputs(self): - if self.mode == "inference": - # Inference mode doesn't read from disk, so defer to parent. - return super(ShowAndTellModel, self).build_inputs() - else: - # Replace disk I/O with random Tensors. - self.images = tf.random_uniform( - shape=[self.config.batch_size, self.config.image_height, - self.config.image_width, 3], - minval=-1, - maxval=1) - self.input_seqs = tf.random_uniform( - [self.config.batch_size, 15], - minval=0, - maxval=self.config.vocab_size, - dtype=tf.int64) - self.target_seqs = tf.random_uniform( - [self.config.batch_size, 15], - minval=0, - maxval=self.config.vocab_size, - dtype=tf.int64) - self.input_mask = tf.ones_like(self.input_seqs) - - -class ShowAndTellModelTest(tf.test.TestCase): - - def setUp(self): - super(ShowAndTellModelTest, self).setUp() - self._model_config = configuration.ModelConfig() - - def _countModelParameters(self): - """Counts the number of parameters in the model at top level scope.""" - counter = {} - for v in tf.global_variables(): - name = v.op.name.split("/")[0] - num_params = v.get_shape().num_elements() - assert num_params - counter[name] = counter.get(name, 0) + num_params - return counter - - def _checkModelParameters(self): - """Verifies the number of parameters in the model.""" - param_counts = self._countModelParameters() - expected_param_counts = { - "InceptionV3": 21802784, - # inception_output_size * embedding_size - "image_embedding": 1048576, - # vocab_size * embedding_size - "seq_embedding": 6144000, - # (embedding_size + num_lstm_units + 1) * 4 * num_lstm_units - "lstm": 2099200, - # (num_lstm_units + 1) * vocab_size - "logits": 6156000, - "global_step": 1, - } - self.assertDictEqual(expected_param_counts, param_counts) - - def _checkOutputs(self, expected_shapes, feed_dict=None): - """Verifies that the model produces expected outputs. - - Args: - expected_shapes: A dict mapping Tensor or Tensor name to expected output - shape. - feed_dict: Values of Tensors to feed into Session.run(). - """ - fetches = expected_shapes.keys() - - with self.test_session() as sess: - sess.run(tf.global_variables_initializer()) - outputs = sess.run(fetches, feed_dict) - - for index, output in enumerate(outputs): - tensor = fetches[index] - expected = expected_shapes[tensor] - actual = output.shape - if expected != actual: - self.fail("Tensor %s has shape %s (expected %s)." % - (tensor, actual, expected)) - - def testBuildForTraining(self): - model = ShowAndTellModel(self._model_config, mode="train") - model.build() - - self._checkModelParameters() - - expected_shapes = { - # [batch_size, image_height, image_width, 3] - model.images: (32, 299, 299, 3), - # [batch_size, sequence_length] - model.input_seqs: (32, 15), - # [batch_size, sequence_length] - model.target_seqs: (32, 15), - # [batch_size, sequence_length] - model.input_mask: (32, 15), - # [batch_size, embedding_size] - model.image_embeddings: (32, 512), - # [batch_size, sequence_length, embedding_size] - model.seq_embeddings: (32, 15, 512), - # Scalar - model.total_loss: (), - # [batch_size * sequence_length] - model.target_cross_entropy_losses: (480,), - # [batch_size * sequence_length] - model.target_cross_entropy_loss_weights: (480,), - } - self._checkOutputs(expected_shapes) - - def testBuildForEval(self): - model = ShowAndTellModel(self._model_config, mode="eval") - model.build() - - self._checkModelParameters() - - expected_shapes = { - # [batch_size, image_height, image_width, 3] - model.images: (32, 299, 299, 3), - # [batch_size, sequence_length] - model.input_seqs: (32, 15), - # [batch_size, sequence_length] - model.target_seqs: (32, 15), - # [batch_size, sequence_length] - model.input_mask: (32, 15), - # [batch_size, embedding_size] - model.image_embeddings: (32, 512), - # [batch_size, sequence_length, embedding_size] - model.seq_embeddings: (32, 15, 512), - # Scalar - model.total_loss: (), - # [batch_size * sequence_length] - model.target_cross_entropy_losses: (480,), - # [batch_size * sequence_length] - model.target_cross_entropy_loss_weights: (480,), - } - self._checkOutputs(expected_shapes) - - def testBuildForInference(self): - model = ShowAndTellModel(self._model_config, mode="inference") - model.build() - - self._checkModelParameters() - - # Test feeding an image to get the initial LSTM state. - images_feed = np.random.rand(1, 299, 299, 3) - feed_dict = {model.images: images_feed} - expected_shapes = { - # [batch_size, embedding_size] - model.image_embeddings: (1, 512), - # [batch_size, 2 * num_lstm_units] - "lstm/initial_state:0": (1, 1024), - } - self._checkOutputs(expected_shapes, feed_dict) - - # Test feeding a batch of inputs and LSTM states to get softmax output and - # LSTM states. - input_feed = np.random.randint(0, 10, size=3) - state_feed = np.random.rand(3, 1024) - feed_dict = {"input_feed:0": input_feed, "lstm/state_feed:0": state_feed} - expected_shapes = { - # [batch_size, 2 * num_lstm_units] - "lstm/state:0": (3, 1024), - # [batch_size, vocab_size] - "softmax:0": (3, 12000), - } - self._checkOutputs(expected_shapes, feed_dict) - - -if __name__ == "__main__": - tf.test.main() diff --git a/research/im2txt/im2txt/train.py b/research/im2txt/im2txt/train.py deleted file mode 100644 index db602735b..000000000 --- a/research/im2txt/im2txt/train.py +++ /dev/null @@ -1,114 +0,0 @@ -# Copyright 2016 The TensorFlow Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""Train the model.""" - -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - - -import tensorflow as tf - -from im2txt import configuration -from im2txt import show_and_tell_model - -FLAGS = tf.app.flags.FLAGS - -tf.flags.DEFINE_string("input_file_pattern", "", - "File pattern of sharded TFRecord input files.") -tf.flags.DEFINE_string("inception_checkpoint_file", "", - "Path to a pretrained inception_v3 model.") -tf.flags.DEFINE_string("train_dir", "", - "Directory for saving and loading model checkpoints.") -tf.flags.DEFINE_boolean("train_inception", False, - "Whether to train inception submodel variables.") -tf.flags.DEFINE_integer("number_of_steps", 1000000, "Number of training steps.") -tf.flags.DEFINE_integer("log_every_n_steps", 1, - "Frequency at which loss and global step are logged.") - -tf.logging.set_verbosity(tf.logging.INFO) - - -def main(unused_argv): - assert FLAGS.input_file_pattern, "--input_file_pattern is required" - assert FLAGS.train_dir, "--train_dir is required" - - model_config = configuration.ModelConfig() - model_config.input_file_pattern = FLAGS.input_file_pattern - model_config.inception_checkpoint_file = FLAGS.inception_checkpoint_file - training_config = configuration.TrainingConfig() - - # Create training directory. - train_dir = FLAGS.train_dir - if not tf.gfile.IsDirectory(train_dir): - tf.logging.info("Creating training directory: %s", train_dir) - tf.gfile.MakeDirs(train_dir) - - # Build the TensorFlow graph. - g = tf.Graph() - with g.as_default(): - # Build the model. - model = show_and_tell_model.ShowAndTellModel( - model_config, mode="train", train_inception=FLAGS.train_inception) - model.build() - - # Set up the learning rate. - learning_rate_decay_fn = None - if FLAGS.train_inception: - learning_rate = tf.constant(training_config.train_inception_learning_rate) - else: - learning_rate = tf.constant(training_config.initial_learning_rate) - if training_config.learning_rate_decay_factor > 0: - num_batches_per_epoch = (training_config.num_examples_per_epoch / - model_config.batch_size) - decay_steps = int(num_batches_per_epoch * - training_config.num_epochs_per_decay) - - def _learning_rate_decay_fn(learning_rate, global_step): - return tf.train.exponential_decay( - learning_rate, - global_step, - decay_steps=decay_steps, - decay_rate=training_config.learning_rate_decay_factor, - staircase=True) - - learning_rate_decay_fn = _learning_rate_decay_fn - - # Set up the training ops. - train_op = tf.contrib.layers.optimize_loss( - loss=model.total_loss, - global_step=model.global_step, - learning_rate=learning_rate, - optimizer=training_config.optimizer, - clip_gradients=training_config.clip_gradients, - learning_rate_decay_fn=learning_rate_decay_fn) - - # Set up the Saver for saving and restoring model checkpoints. - saver = tf.train.Saver(max_to_keep=training_config.max_checkpoints_to_keep) - - # Run training. - tf.contrib.slim.learning.train( - train_op, - train_dir, - log_every_n_steps=FLAGS.log_every_n_steps, - graph=g, - global_step=model.global_step, - number_of_steps=FLAGS.number_of_steps, - init_fn=model.init_fn, - saver=saver) - - -if __name__ == "__main__": - tf.app.run() diff --git a/research/inception/.gitignore b/research/inception/.gitignore deleted file mode 100644 index 58cbf2f4e..000000000 --- a/research/inception/.gitignore +++ /dev/null @@ -1,7 +0,0 @@ -/bazel-bin -/bazel-ci_build-cache -/bazel-genfiles -/bazel-out -/bazel-inception -/bazel-testlogs -/bazel-tf diff --git a/research/inception/README.md b/research/inception/README.md deleted file mode 100644 index beed66cf5..000000000 --- a/research/inception/README.md +++ /dev/null @@ -1,858 +0,0 @@ -![No Maintenance Intended](https://img.shields.io/badge/No%20Maintenance%20Intended-%E2%9C%95-red.svg) -![TensorFlow Requirement: 1.x](https://img.shields.io/badge/TensorFlow%20Requirement-1.x-brightgreen) -![TensorFlow 2 Not Supported](https://img.shields.io/badge/TensorFlow%202%20Not%20Supported-%E2%9C%95-red.svg) - -**NOTE: For the most part, you will find a newer version of this code at [models/research/slim](https://github.com/tensorflow/models/tree/master/research/slim).** In particular: - -* `inception_train.py` and `imagenet_train.py` should no longer be used. The slim editions for running on multiple GPUs are the current best examples. -* `inception_distributed_train.py` and `imagenet_distributed_train.py` are still valid examples of distributed training. - -For performance benchmarking, please see https://www.tensorflow.org/performance/benchmarks. - ---- - -# Inception in TensorFlow - -[ImageNet](http://www.image-net.org/) is a common academic data set in machine -learning for training an image recognition system. Code in this directory -demonstrates how to use TensorFlow to train and evaluate a type of convolutional -neural network (CNN) on this academic data set. In particular, we demonstrate -how to train the Inception v3 architecture as specified in: - -_Rethinking the Inception Architecture for Computer Vision_ - -Christian Szegedy, Vincent Vanhoucke, Sergey Ioffe, Jonathon Shlens, Zbigniew -Wojna - -http://arxiv.org/abs/1512.00567 - -This network achieves 21.2% top-1 and 5.6% top-5 error for single frame -evaluation with a computational cost of 5 billion multiply-adds per inference -and with using less than 25 million parameters. Below is a visualization of the -model architecture. - -![Inception-v3 Architecture](g3doc/inception_v3_architecture.png) - -## Description of Code - -The code base provides three core binaries for: - -* Training an Inception v3 network from scratch across multiple GPUs and/or - multiple machines using the ImageNet 2012 Challenge training data set. -* Evaluating an Inception v3 network using the ImageNet 2012 Challenge - validation data set. -* Retraining an Inception v3 network on a novel task and back-propagating the - errors to fine tune the network weights. - -The training procedure employs synchronous stochastic gradient descent across -multiple GPUs. The user may specify the number of GPUs they wish to harness. The -synchronous training performs *batch-splitting* by dividing a given batch across -multiple GPUs. - -The training set up is nearly identical to the section [Training a Model Using -Multiple GPU Cards](https://www.tensorflow.org/tutorials/deep_cnn/index.html#launching_and_training_the_model_on_multiple_gpu_cards) -where we have substituted the CIFAR-10 model architecture with Inception v3. The -primary differences with that setup are: - -* Calculate and update the batch-norm statistics during training so that they - may be substituted in during evaluation. -* Specify the model architecture using a (still experimental) higher level - language called TensorFlow-Slim. - -For more details about TensorFlow-Slim, please see the [Slim README](inception/slim/README.md). Please note that this higher-level language is still -*experimental* and the API may change over time depending on usage and -subsequent research. - -## Getting Started - -Before you run the training script for the first time, you will need to download -and convert the ImageNet data to native TFRecord format. The TFRecord format -consists of a set of sharded files where each entry is a serialized `tf.Example` -proto. Each `tf.Example` proto contains the ImageNet image (JPEG encoded) as -well as metadata such as label and bounding box information. See -[`parse_example_proto`](inception/image_processing.py) for details. - -We provide a single [script](inception/data/download_and_preprocess_imagenet.sh) for -downloading and converting ImageNet data to TFRecord format. Downloading and -preprocessing the data may take several hours (up to half a day) depending on -your network and computer speed. Please be patient. - -To begin, you will need to sign up for an account with [ImageNet](http://image-net.org) to gain access to the data. Look for the sign up page, -create an account and request an access key to download the data. - -After you have `USERNAME` and `PASSWORD`, you are ready to run our script. Make -sure that your hard disk has at least 500 GB of free space for downloading and -storing the data. Here we select `DATA_DIR=$HOME/imagenet-data` as such a -location but feel free to edit accordingly. - -When you run the below script, please enter *USERNAME* and *PASSWORD* when -prompted. This will occur at the very beginning. Once these values are entered, -you will not need to interact with the script again. - -```shell -# location of where to place the ImageNet data -DATA_DIR=$HOME/imagenet-data - -# build the preprocessing script. -cd tensorflow-models/inception -bazel build //inception:download_and_preprocess_imagenet - -# run it -bazel-bin/inception/download_and_preprocess_imagenet "${DATA_DIR}" -``` - -The final line of the output script should read: - -```shell -2016-02-17 14:30:17.287989: Finished writing all 1281167 images in data set. -``` - -When the script finishes, you will find 1024 training files and 128 validation -files in the `DATA_DIR`. The files will match the patterns -`train-?????-of-01024` and `validation-?????-of-00128`, respectively. - -[Congratulations!](https://www.youtube.com/watch?v=9bZkp7q19f0) You are now -ready to train or evaluate with the ImageNet data set. - -## How to Train from Scratch - -**WARNING** Training an Inception v3 network from scratch is a computationally -intensive task and depending on your compute setup may take several days or even -weeks. - -*Before proceeding* please read the [Convolutional Neural Networks](https://www.tensorflow.org/tutorials/deep_cnn/index.html) tutorial; in -particular, focus on [Training a Model Using Multiple GPU Cards](https://www.tensorflow.org/tutorials/deep_cnn/index.html#launching_and_training_the_model_on_multiple_gpu_cards). The model training method is nearly identical to that described in the -CIFAR-10 multi-GPU model training. Briefly, the model training - -* Places an individual model replica on each GPU. -* Splits the batch across the GPUs. -* Updates model parameters synchronously by waiting for all GPUs to finish - processing a batch of data. - -The training procedure is encapsulated by this diagram of how operations and -variables are placed on CPU and GPUs respectively. - -
    - -
    - -Each tower computes the gradients for a portion of the batch and the gradients -are combined and averaged across the multiple towers in order to provide a -single update of the Variables stored on the CPU. - -A crucial aspect of training a network of this size is *training speed* in terms -of wall-clock time. The training speed is dictated by many factors -- most -importantly the batch size and the learning rate schedule. Both of these -parameters are heavily coupled to the hardware set up. - -Generally speaking, a batch size is a difficult parameter to tune as it requires -balancing memory demands of the model, memory available on the GPU and speed of -computation. Generally speaking, employing larger batch sizes leads to more -efficient computation and potentially more efficient training steps. - -We have tested several hardware setups for training this model from scratch but -we emphasize that depending your hardware set up, you may need to adapt the -batch size and learning rate schedule. - -Please see the comments in `inception_train.py` for a few selected learning rate -plans based on some selected hardware setups. - -To train this model, you simply need to specify the following: - -```shell -# Build the model. Note that we need to make sure the TensorFlow is ready to -# use before this as this command will not build TensorFlow. -cd tensorflow-models/inception -bazel build //inception:imagenet_train - -# run it -bazel-bin/inception/imagenet_train --num_gpus=1 --batch_size=32 --train_dir=/tmp/imagenet_train --data_dir=/tmp/imagenet_data -``` - -The model reads in the ImageNet training data from `--data_dir`. If you followed -the instructions in [Getting Started](#getting-started), then set -`--data_dir="${DATA_DIR}"`. The script assumes that there exists a set of -sharded TFRecord files containing the ImageNet data. If you have not created -TFRecord files, please refer to [Getting Started](#getting-started) - -Here is the output of the above command line when running on a Tesla K40c: - -```shell -2016-03-07 12:24:59.922898: step 0, loss = 13.11 (5.3 examples/sec; 6.064 sec/batch) -2016-03-07 12:25:55.206783: step 10, loss = 13.71 (9.4 examples/sec; 3.394 sec/batch) -2016-03-07 12:26:28.905231: step 20, loss = 14.81 (9.5 examples/sec; 3.380 sec/batch) -2016-03-07 12:27:02.699719: step 30, loss = 14.45 (9.5 examples/sec; 3.378 sec/batch) -2016-03-07 12:27:36.515699: step 40, loss = 13.98 (9.5 examples/sec; 3.376 sec/batch) -2016-03-07 12:28:10.220956: step 50, loss = 13.92 (9.6 examples/sec; 3.327 sec/batch) -2016-03-07 12:28:43.658223: step 60, loss = 13.28 (9.6 examples/sec; 3.350 sec/batch) -... -``` - -In this example, a log entry is printed every 10 step and the line includes the -total loss (starts around 13.0-14.0) and the speed of processing in terms of -throughput (examples / sec) and batch speed (sec/batch). - -The number of GPU devices is specified by `--num_gpus` (which defaults to 1). -Specifying `--num_gpus` greater then 1 splits the batch evenly split across the -GPU cards. - -```shell -# Build the model. Note that we need to make sure the TensorFlow is ready to -# use before this as this command will not build TensorFlow. -cd tensorflow-models/inception -bazel build //inception:imagenet_train - -# run it -bazel-bin/inception/imagenet_train --num_gpus=2 --batch_size=64 --train_dir=/tmp/imagenet_train -``` - -This model splits the batch of 64 images across 2 GPUs and calculates the -average gradient by waiting for both GPUs to finish calculating the gradients -from their respective data (See diagram above). Generally speaking, using larger -numbers of GPUs leads to higher throughput as well as the opportunity to use -larger batch sizes. In turn, larger batch sizes imply better estimates of the -gradient enabling the usage of higher learning rates. In summary, using more -GPUs results in simply faster training speed. - -Note that selecting a batch size is a difficult parameter to tune as it requires -balancing memory demands of the model, memory available on the GPU and speed of -computation. Generally speaking, employing larger batch sizes leads to more -efficient computation and potentially more efficient training steps. - -Note that there is considerable noise in the loss function on individual steps -in the previous log. Because of this noise, it is difficult to discern how well -a model is learning. The solution to the last problem is to launch TensorBoard -pointing to the directory containing the events log. - -```shell -tensorboard --logdir=/tmp/imagenet_train -``` - -TensorBoard has access to the many Summaries produced by the model that describe -multitudes of statistics tracking the model behavior and the quality of the -learned model. In particular, TensorBoard tracks a exponentially smoothed -version of the loss. In practice, it is far easier to judge how well a model -learns by monitoring the smoothed version of the loss. - -## How to Train from Scratch in a Distributed Setting - -**NOTE** Distributed TensorFlow requires version 0.8 or later. - -Distributed TensorFlow lets us use multiple machines to train a model faster. -This is quite different from the training with multiple GPU towers on a single -machine where all parameters and gradients computation are in the same place. We -coordinate the computation across multiple machines by employing a centralized -repository for parameters that maintains a unified, single copy of model -parameters. Each individual machine sends gradient updates to the centralized -parameter repository which coordinates these updates and sends back updated -parameters to the individual machines running the model training. - -We term each machine that runs a copy of the training a `worker` or `replica`. -We term each machine that maintains model parameters a `ps`, short for -`parameter server`. Note that we might have more than one machine acting as a -`ps` as the model parameters may be sharded across multiple machines. - -Variables may be updated with synchronous or asynchronous gradient updates. One -may construct a an [`Optimizer`](https://www.tensorflow.org/api_docs/python/train.html#optimizers) in TensorFlow -that constructs the necessary graph for either case diagrammed below from the -TensorFlow [Whitepaper](http://download.tensorflow.org/paper/whitepaper2015.pdf): - -
    - -
    - -In [a recent paper](https://arxiv.org/abs/1604.00981), synchronous gradient -updates have demonstrated to reach higher accuracy in a shorter amount of time. -In this distributed Inception example we employ synchronous gradient updates. - -Note that in this example each replica has a single tower that uses one GPU. - -The command-line flags `worker_hosts` and `ps_hosts` specify available servers. -The same binary will be used for both the `worker` jobs and the `ps` jobs. -Command line flag `job_name` will be used to specify what role a task will be -playing and `task_id` will be used to identify which one of the jobs it is -running. Several things to note here: - -* The numbers of `ps` and `worker` tasks are inferred from the lists of hosts - specified in the flags. The `task_id` should be within the range `[0, - num_ps_tasks)` for `ps` tasks and `[0, num_worker_tasks)` for `worker` - tasks. -* `ps` and `worker` tasks can run on the same machine, as long as that machine - has sufficient resources to handle both tasks. Note that the `ps` task does - not benefit from a GPU, so it should not attempt to use one (see below). -* Multiple `worker` tasks can run on the same machine with multiple GPUs so - machine_A with 2 GPUs may have 2 workers while machine_B with 1 GPU just has - 1 worker. -* The default learning rate schedule works well for a wide range of number of - replicas [25, 50, 100] but feel free to tune it for even better results. -* The command line of both `ps` and `worker` tasks should include the complete - list of `ps_hosts` and `worker_hosts`. -* There is a chief `worker` among all workers which defaults to `worker` 0. - The chief will be in charge of initializing all the parameters, writing out - the summaries and the checkpoint. The checkpoint and summary will be in the - `train_dir` of the host for `worker` 0. -* Each worker processes a batch_size number of examples but each gradient - update is computed from all replicas. Hence, the effective batch size of - this model is batch_size * num_workers. - -```shell -# Build the model. Note that we need to make sure the TensorFlow is ready to -# use before this as this command will not build TensorFlow. -cd tensorflow-models/inception -bazel build //inception:imagenet_distributed_train - -# To start worker 0, go to the worker0 host and run the following (Note that -# task_id should be in the range [0, num_worker_tasks): -bazel-bin/inception/imagenet_distributed_train \ ---batch_size=32 \ ---data_dir=$HOME/imagenet-data \ ---job_name='worker' \ ---task_id=0 \ ---ps_hosts='ps0.example.com:2222' \ ---worker_hosts='worker0.example.com:2222,worker1.example.com:2222' - -# To start worker 1, go to the worker1 host and run the following (Note that -# task_id should be in the range [0, num_worker_tasks): -bazel-bin/inception/imagenet_distributed_train \ ---batch_size=32 \ ---data_dir=$HOME/imagenet-data \ ---job_name='worker' \ ---task_id=1 \ ---ps_hosts='ps0.example.com:2222' \ ---worker_hosts='worker0.example.com:2222,worker1.example.com:2222' - -# To start the parameter server (ps), go to the ps host and run the following (Note -# that task_id should be in the range [0, num_ps_tasks): -bazel-bin/inception/imagenet_distributed_train \ ---job_name='ps' \ ---task_id=0 \ ---ps_hosts='ps0.example.com:2222' \ ---worker_hosts='worker0.example.com:2222,worker1.example.com:2222' -``` - -If you have installed a GPU-compatible version of TensorFlow, the `ps` will also -try to allocate GPU memory although it is not helpful. This could potentially -crash the worker on the same machine as it has little to no GPU memory to -allocate. To avoid this, you can prepend the previous command to start `ps` -with: `CUDA_VISIBLE_DEVICES=''` - -```shell -CUDA_VISIBLE_DEVICES='' bazel-bin/inception/imagenet_distributed_train \ ---job_name='ps' \ ---task_id=0 \ ---ps_hosts='ps0.example.com:2222' \ ---worker_hosts='worker0.example.com:2222,worker1.example.com:2222' -``` - -If you have run everything correctly, you should see a log in each `worker` job -that looks like the following. Note the training speed varies depending on your -hardware and the first several steps could take much longer. - -```shell -INFO:tensorflow:PS hosts are: ['ps0.example.com:2222', 'ps1.example.com:2222'] -INFO:tensorflow:Worker hosts are: ['worker0.example.com:2222', 'worker1.example.com:2222'] -I tensorflow/core/distributed_runtime/rpc/grpc_channel.cc:206] Initialize HostPortsGrpcChannelCache for job ps -> {ps0.example.com:2222, ps1.example.com:2222} -I tensorflow/core/distributed_runtime/rpc/grpc_channel.cc:206] Initialize HostPortsGrpcChannelCache for job worker -> {localhost:2222, worker1.example.com:2222} -I tensorflow/core/distributed_runtime/rpc/grpc_server_lib.cc:202] Started server with target: grpc://localhost:2222 -INFO:tensorflow:Created variable global_step:0 with shape () and init - -... - -INFO:tensorflow:Created variable logits/logits/biases:0 with shape (1001,) and init -INFO:tensorflow:SyncReplicas enabled: replicas_to_aggregate=2; total_num_replicas=2 -INFO:tensorflow:2016-04-13 01:56:26.405639 Supervisor -INFO:tensorflow:Started 2 queues for processing input data. -INFO:tensorflow:global_step/sec: 0 -INFO:tensorflow:Worker 0: 2016-04-13 01:58:40.342404: step 0, loss = 12.97(0.0 examples/sec; 65.428  sec/batch) -INFO:tensorflow:global_step/sec: 0.0172907 -... -``` - -and a log in each `ps` job that looks like the following: - -```shell -INFO:tensorflow:PS hosts are: ['ps0.example.com:2222', 'ps1.example.com:2222'] -INFO:tensorflow:Worker hosts are: ['worker0.example.com:2222', 'worker1.example.com:2222'] -I tensorflow/core/distributed_runtime/rpc/grpc_channel.cc:206] Initialize HostPortsGrpcChannelCache for job ps -> {localhost:2222, ps1.example.com:2222} -I tensorflow/core/distributed_runtime/rpc/grpc_channel.cc:206] Initialize HostPortsGrpcChannelCache for job worker -> {worker0.example.com:2222, worker1.example.com:2222} -I tensorflow/core/distributed_runtime/rpc/grpc_server_lib.cc:202] Started server with target: grpc://localhost:2222 -``` - -If you compiled TensorFlow (from v1.1-rc3) with VERBS support and you have the -required device and IB verbs SW stack, you can specify --protocol='grpc+verbs' -In order to use Verbs RDMA for Tensor passing between workers and ps. -Need to add the the --protocol flag in all tasks (ps and workers). -The default protocol is the TensorFlow default protocol of grpc. - - -[Congratulations!](https://www.youtube.com/watch?v=9bZkp7q19f0) You are now -training Inception in a distributed manner. - -## How to Evaluate - -Evaluating an Inception v3 model on the ImageNet 2012 validation data set -requires running a separate binary. - -The evaluation procedure is nearly identical to [Evaluating a Model](https://www.tensorflow.org/tutorials/deep_cnn/index.html#evaluating_a_model) -described in the [Convolutional Neural Network](https://www.tensorflow.org/tutorials/deep_cnn/index.html) tutorial. - -**WARNING** Be careful not to run the evaluation and training binary on the same -GPU or else you might run out of memory. Consider running the evaluation on a -separate GPU if available or suspending the training binary while running the -evaluation on the same GPU. - -Briefly, one can evaluate the model by running: - -```shell -# Build the model. Note that we need to make sure the TensorFlow is ready to -# use before this as this command will not build TensorFlow. -cd tensorflow-models/inception -bazel build //inception:imagenet_eval - -# run it -bazel-bin/inception/imagenet_eval --checkpoint_dir=/tmp/imagenet_train --eval_dir=/tmp/imagenet_eval -``` - -Note that we point `--checkpoint_dir` to the location of the checkpoints saved -by `inception_train.py` above. Running the above command results in the -following output: - -```shell -2016-02-17 22:32:50.391206: precision @ 1 = 0.735 -... -``` - -The script calculates the precision @ 1 over the entire validation data -periodically. The precision @ 1 measures the how often the highest scoring -prediction from the model matched the ImageNet label -- in this case, 73.5%. If -you wish to run the eval just once and not periodically, append the `--run_once` -option. - -Much like the training script, `imagenet_eval.py` also exports summaries that -may be visualized in TensorBoard. These summaries calculate additional -statistics on the predictions (e.g. recall @ 5) as well as monitor the -statistics of the model activations and weights during evaluation. - -## How to Fine-Tune a Pre-Trained Model on a New Task - -### Getting Started - -Much like training the ImageNet model we must first convert a new data set to -the sharded TFRecord format which each entry is a serialized `tf.Example` proto. - -We have provided a script demonstrating how to do this for small data set of of -a few thousand flower images spread across 5 labels: - -```shell -daisy, dandelion, roses, sunflowers, tulips -``` - -There is a single automated script that downloads the data set and converts it -to the TFRecord format. Much like the ImageNet data set, each record in the -TFRecord format is a serialized `tf.Example` proto whose entries include a -JPEG-encoded string and an integer label. Please see [`parse_example_proto`](inception/image_processing.py) for details. - -The script just takes a few minutes to run depending your network connection -speed for downloading and processing the images. Your hard disk requires 200MB -of free storage. Here we select `DATA_DIR=/tmp/flowers-data/` as such a location -but feel free to edit accordingly. - -```shell -# location of where to place the flowers data -FLOWERS_DATA_DIR=/tmp/flowers-data/ - -# build the preprocessing script. -cd tensorflow-models/inception -bazel build //inception:download_and_preprocess_flowers - -# run it -bazel-bin/inception/download_and_preprocess_flowers "${FLOWERS_DATA_DIR}" -``` - -If the script runs successfully, the final line of the terminal output should -look like: - -```shell -2016-02-24 20:42:25.067551: Finished writing all 3170 images in data set. -``` - -When the script finishes you will find 2 shards for the training and validation -files in the `DATA_DIR`. The files will match the patterns `train-?????-of-00002` -and `validation-?????-of-00002`, respectively. - -**NOTE** If you wish to prepare a custom image data set for transfer learning, -you will need to invoke [`build_image_data.py`](inception/data/build_image_data.py) on -your custom data set. Please see the associated options and assumptions behind -this script by reading the comments section of [`build_image_data.py`](inception/data/build_image_data.py). Also, if your custom data has a different -number of examples or classes, you need to change the appropriate values in -[`imagenet_data.py`](inception/imagenet_data.py). - -The second piece you will need is a trained Inception v3 image model. You have -the option of either training one yourself (See [How to Train from Scratch](#how-to-train-from-scratch) for details) or you can download a pre-trained -model like so: - -```shell -# location of where to place the Inception v3 model -INCEPTION_MODEL_DIR=$HOME/inception-v3-model -mkdir -p ${INCEPTION_MODEL_DIR} -cd ${INCEPTION_MODEL_DIR} - -# download the Inception v3 model -curl -O http://download.tensorflow.org/models/image/imagenet/inception-v3-2016-03-01.tar.gz -tar xzf inception-v3-2016-03-01.tar.gz - -# this will create a directory called inception-v3 which contains the following files. -> ls inception-v3 -README.txt -checkpoint -model.ckpt-157585 -``` - -[Congratulations!](https://www.youtube.com/watch?v=9bZkp7q19f0) You are now -ready to fine-tune your pre-trained Inception v3 model with the flower data set. - -### How to Retrain a Trained Model on the Flowers Data - -We are now ready to fine-tune a pre-trained Inception-v3 model on the flowers -data set. This requires two distinct changes to our training procedure: - -1. Build the exact same model as previously except we change the number of - labels in the final classification layer. - -2. Restore all weights from the pre-trained Inception-v3 except for the final - classification layer; this will get randomly initialized instead. - -We can perform these two operations by specifying two flags: -`--pretrained_model_checkpoint_path` and `--fine_tune`. The first flag is a -string that points to the path of a pre-trained Inception-v3 model. If this flag -is specified, it will load the entire model from the checkpoint before the -script begins training. - -The second flag `--fine_tune` is a boolean that indicates whether the last -classification layer should be randomly initialized or restored. You may set -this flag to false if you wish to continue training a pre-trained model from a -checkpoint. If you set this flag to true, you can train a new classification -layer from scratch. - -In order to understand how `--fine_tune` works, please see the discussion on -`Variables` in the TensorFlow-Slim [`README.md`](inception/slim/README.md). - -Putting this all together you can retrain a pre-trained Inception-v3 model on -the flowers data set with the following command. - -```shell -# Build the model. Note that we need to make sure the TensorFlow is ready to -# use before this as this command will not build TensorFlow. -cd tensorflow-models/inception -bazel build //inception:flowers_train - -# Path to the downloaded Inception-v3 model. -MODEL_PATH="${INCEPTION_MODEL_DIR}/inception-v3/model.ckpt-157585" - -# Directory where the flowers data resides. -FLOWERS_DATA_DIR=/tmp/flowers-data/ - -# Directory where to save the checkpoint and events files. -TRAIN_DIR=/tmp/flowers_train/ - -# Run the fine-tuning on the flowers data set starting from the pre-trained -# Imagenet-v3 model. -bazel-bin/inception/flowers_train \ - --train_dir="${TRAIN_DIR}" \ - --data_dir="${FLOWERS_DATA_DIR}" \ - --pretrained_model_checkpoint_path="${MODEL_PATH}" \ - --fine_tune=True \ - --initial_learning_rate=0.001 \ - --input_queue_memory_factor=1 -``` - -We have added a few extra options to the training procedure. - -* Fine-tuning a model a separate data set requires significantly lowering the - initial learning rate. We set the initial learning rate to 0.001. -* The flowers data set is quite small so we shrink the size of the shuffling - queue of examples. See [Adjusting Memory Demands](#adjusting-memory-demands) - for more details. - -The training script will only reports the loss. To evaluate the quality of the -fine-tuned model, you will need to run `flowers_eval`: - -```shell -# Build the model. Note that we need to make sure the TensorFlow is ready to -# use before this as this command will not build TensorFlow. -cd tensorflow-models/inception -bazel build //inception:flowers_eval - -# Directory where we saved the fine-tuned checkpoint and events files. -TRAIN_DIR=/tmp/flowers_train/ - -# Directory where the flowers data resides. -FLOWERS_DATA_DIR=/tmp/flowers-data/ - -# Directory where to save the evaluation events files. -EVAL_DIR=/tmp/flowers_eval/ - -# Evaluate the fine-tuned model on a hold-out of the flower data set. -bazel-bin/inception/flowers_eval \ - --eval_dir="${EVAL_DIR}" \ - --data_dir="${FLOWERS_DATA_DIR}" \ - --subset=validation \ - --num_examples=500 \ - --checkpoint_dir="${TRAIN_DIR}" \ - --input_queue_memory_factor=1 \ - --run_once -``` - -We find that the evaluation arrives at roughly 93.4% precision@1 after the model -has been running for 2000 steps. - -```shell -Successfully loaded model from /tmp/flowers/model.ckpt-1999 at step=1999. -2016-03-01 16:52:51.761219: starting evaluation on (validation). -2016-03-01 16:53:05.450419: [20 batches out of 20] (36.5 examples/sec; 0.684sec/batch) -2016-03-01 16:53:05.450471: precision @ 1 = 0.9340 recall @ 5 = 0.9960 [500 examples] -``` - -## How to Construct a New Dataset for Retraining - -One can use the existing scripts supplied with this model to build a new dataset -for training or fine-tuning. The main script to employ is -[`build_image_data.py`](inception/data/build_image_data.py). Briefly, this script takes a -structured directory of images and converts it to a sharded `TFRecord` that can -be read by the Inception model. - -In particular, you will need to create a directory of training images that -reside within `$TRAIN_DIR` and `$VALIDATION_DIR` arranged as such: - -```shell - $TRAIN_DIR/dog/image0.jpeg - $TRAIN_DIR/dog/image1.jpg - $TRAIN_DIR/dog/image2.png - ... - $TRAIN_DIR/cat/weird-image.jpeg - $TRAIN_DIR/cat/my-image.jpeg - $TRAIN_DIR/cat/my-image.JPG - ... - $VALIDATION_DIR/dog/imageA.jpeg - $VALIDATION_DIR/dog/imageB.jpg - $VALIDATION_DIR/dog/imageC.png - ... - $VALIDATION_DIR/cat/weird-image.PNG - $VALIDATION_DIR/cat/that-image.jpg - $VALIDATION_DIR/cat/cat.JPG - ... -``` -**NOTE**: This script will append an extra background class indexed at 0, so -your class labels will range from 0 to num_labels. Using the example above, the -corresponding class labels generated from `build_image_data.py` will be as -follows: -```shell -0 -1 dog -2 cat -``` - -Each sub-directory in `$TRAIN_DIR` and `$VALIDATION_DIR` corresponds to a unique -label for the images that reside within that sub-directory. The images may be -JPEG or PNG images. We do not support other images types currently. - -Once the data is arranged in this directory structure, we can run -`build_image_data.py` on the data to generate the sharded `TFRecord` dataset. -Each entry of the `TFRecord` is a serialized `tf.Example` protocol buffer. A -complete list of information contained in the `tf.Example` is described in the -comments of `build_image_data.py`. - -To run `build_image_data.py`, you can run the following command line: - -```shell -# location to where to save the TFRecord data. -OUTPUT_DIRECTORY=$HOME/my-custom-data/ - -# build the preprocessing script. -cd tensorflow-models/inception -bazel build //inception:build_image_data - -# convert the data. -bazel-bin/inception/build_image_data \ - --train_directory="${TRAIN_DIR}" \ - --validation_directory="${VALIDATION_DIR}" \ - --output_directory="${OUTPUT_DIRECTORY}" \ - --labels_file="${LABELS_FILE}" \ - --train_shards=128 \ - --validation_shards=24 \ - --num_threads=8 -``` - -where the `$OUTPUT_DIRECTORY` is the location of the sharded `TFRecords`. The -`$LABELS_FILE` will be a text file that is read by the script that provides -a list of all of the labels. For instance, in the case flowers data set, the -`$LABELS_FILE` contained the following data: - -```shell -daisy -dandelion -roses -sunflowers -tulips -``` - -Note that each row of each label corresponds with the entry in the final -classifier in the model. That is, the `daisy` corresponds to the classifier for -entry `1`; `dandelion` is entry `2`, etc. We skip label `0` as a background -class. - -After running this script produces files that look like the following: - -```shell - $TRAIN_DIR/train-00000-of-00128 - $TRAIN_DIR/train-00001-of-00128 - ... - $TRAIN_DIR/train-00127-of-00128 - -and - - $VALIDATION_DIR/validation-00000-of-00024 - $VALIDATION_DIR/validation-00001-of-00024 - ... - $VALIDATION_DIR/validation-00023-of-00024 -``` - -where 128 and 24 are the number of shards specified for each dataset, -respectively. Generally speaking, we aim for selecting the number of shards such -that roughly 1024 images reside in each shard. Once this data set is built, you -are ready to train or fine-tune an Inception model on this data set. - -Note, if you are piggy backing on the flowers retraining scripts, be sure to -update `num_classes()` and `num_examples_per_epoch()` in `flowers_data.py` -to correspond with your data. - -## Practical Considerations for Training a Model - -The model architecture and training procedure is heavily dependent on the -hardware used to train the model. If you wish to train or fine-tune this model -on your machine **you will need to adjust and empirically determine a good set -of training hyper-parameters for your setup**. What follows are some general -considerations for novices. - -### Finding Good Hyperparameters - -Roughly 5-10 hyper-parameters govern the speed at which a network is trained. In -addition to `--batch_size` and `--num_gpus`, there are several constants defined -in [inception_train.py](inception/inception_train.py) which dictate the learning -schedule. - -```shell -RMSPROP_DECAY = 0.9 # Decay term for RMSProp. -MOMENTUM = 0.9 # Momentum in RMSProp. -RMSPROP_EPSILON = 1.0 # Epsilon term for RMSProp. -INITIAL_LEARNING_RATE = 0.1 # Initial learning rate. -NUM_EPOCHS_PER_DECAY = 30.0 # Epochs after which learning rate decays. -LEARNING_RATE_DECAY_FACTOR = 0.16 # Learning rate decay factor. -``` - -There are many papers that discuss the various tricks and trade-offs associated -with training a model with stochastic gradient descent. For those new to the -field, some great references are: - -* Y Bengio, [Practical recommendations for gradient-based training of deep - architectures](http://arxiv.org/abs/1206.5533) -* I Goodfellow, Y Bengio and A Courville, [Deep Learning] - (http://www.deeplearningbook.org/) - -What follows is a summary of some general advice for identifying appropriate -model hyper-parameters in the context of this particular model training setup. -Namely, this library provides *synchronous* updates to model parameters based on -batch-splitting the model across multiple GPUs. - -* Higher learning rates leads to faster training. Too high of learning rate - leads to instability and will cause model parameters to diverge to infinity - or NaN. - -* Larger batch sizes lead to higher quality estimates of the gradient and - permit training the model with higher learning rates. - -* Often the GPU memory is a bottleneck that prevents employing larger batch - sizes. Employing more GPUs allows one to use larger batch sizes because - this model splits the batch across the GPUs. - -**NOTE** If one wishes to train this model with *asynchronous* gradient updates, -one will need to substantially alter this model and new considerations need to -be factored into hyperparameter tuning. See [Large Scale Distributed Deep -Networks](http://research.google.com/archive/large_deep_networks_nips2012.html) -for a discussion in this domain. - -### Adjusting Memory Demands - -Training this model has large memory demands in terms of the CPU and GPU. Let's -discuss each item in turn. - -GPU memory is relatively small compared to CPU memory. Two items dictate the -amount of GPU memory employed -- model architecture and batch size. Assuming -that you keep the model architecture fixed, the sole parameter governing the GPU -demand is the batch size. A good rule of thumb is to try employ as large of -batch size as will fit on the GPU. - -If you run out of GPU memory, either lower the `--batch_size` or employ more -GPUs on your desktop. The model performs batch-splitting across GPUs, thus N -GPUs can handle N times the batch size of 1 GPU. - -The model requires a large amount of CPU memory as well. We have tuned the model -to employ about ~20GB of CPU memory. Thus, having access to about 40 GB of CPU -memory would be ideal. - -If that is not possible, you can tune down the memory demands of the model via -lowering `--input_queue_memory_factor`. Images are preprocessed asynchronously -with respect to the main training across `--num_preprocess_threads` threads. The -preprocessed images are stored in shuffling queue in which each GPU performs a -dequeue operation in order to receive a `batch_size` worth of images. - -In order to guarantee good shuffling across the data, we maintain a large -shuffling queue of 1024 x `input_queue_memory_factor` images. For the current -model architecture, this corresponds to about 4GB of CPU memory. You may lower -`input_queue_memory_factor` in order to decrease the memory footprint. Keep in -mind though that lowering this value drastically may result in a model with -slightly lower predictive accuracy when training from scratch. Please see -comments in [`image_processing.py`](inception/image_processing.py) for more details. - -## Troubleshooting - -#### The model runs out of CPU memory. - -In lieu of buying more CPU memory, an easy fix is to decrease -`--input_queue_memory_factor`. See [Adjusting Memory Demands](#adjusting-memory-demands). - -#### The model runs out of GPU memory. - -The data is not able to fit on the GPU card. The simplest solution is to -decrease the batch size of the model. Otherwise, you will need to think about a -more sophisticated method for specifying the training which cuts up the model -across multiple `session.run()` calls or partitions the model across multiple -GPUs. See [Using GPUs](https://www.tensorflow.org/how_tos/using_gpu/index.html) -and [Adjusting Memory Demands](#adjusting-memory-demands) for more information. - -#### The model training results in NaN's. - -The learning rate of the model is too high. Turn down your learning rate. - -#### I wish to train a model with a different image size. - -The simplest solution is to artificially resize your images to `299x299` pixels. -See [Images](https://www.tensorflow.org/api_docs/python/image.html) section for -many resizing, cropping and padding methods. Note that the entire model -architecture is predicated on a `299x299` image, thus if you wish to change the -input image size, then you may need to redesign the entire model architecture. - -#### What hardware specification are these hyper-parameters targeted for? - -We targeted a desktop with 128GB of CPU ram connected to 8 NVIDIA Tesla K40 GPU -cards but we have run this on desktops with 32GB of CPU ram and 1 NVIDIA Tesla -K40. You can get a sense of the various training configurations we tested by -reading the comments in [`inception_train.py`](inception/inception_train.py). - -#### How do I continue training from a checkpoint in distributed setting? - -You only need to make sure that the checkpoint is in a location that can be -reached by all of the `ps` tasks. By specifying the checkpoint location with -`--train_dir` , the `ps` servers will load the checkpoint before commencing -training. diff --git a/research/inception/WORKSPACE b/research/inception/WORKSPACE deleted file mode 100644 index 2d7b4fb25..000000000 --- a/research/inception/WORKSPACE +++ /dev/null @@ -1 +0,0 @@ -workspace(name = "inception") diff --git a/research/inception/g3doc/inception_v3_architecture.png b/research/inception/g3doc/inception_v3_architecture.png deleted file mode 100644 index 91fb734a104b2f63114ade7c8f9b2f95ce6334a6..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 346842 zcmeFZWl&sgwl<8rOCY$rySoN=ch|-tIKi!Pw**K6!6CRq;}!zJwb9@b+`fKh=A4;2 z!@U2$s_81KHhbUembI?EygOE1RURFM2n7lX3SCh_MiUAO(Gm&@rWXkYatHqKx&aCb zn$SU7T3u0EnnK+JVC&#)0|mv9Vr6cwsldW8Y=L8LK0Lw9h~nX^855JBX&%zkgBB5p zmXG!(c4Pn^2n{WUCIMBV(cA2VmOyb|&o<1W{JtZUnYK~jt-=d5_-Ok>F%AO4AR2{H zYey**=mff6HnqO@2%f?R`$w3>^&2c<(mpY0U092C=n7!g~bTZ*0EUr85Y?NHgj5aWZd|1XuAhl34-w||j|asGqkU+?{6ya@YW?EM#Z z|6P`Uy9*Jf7>Wq{{{lk{MJ_d?8VX7RN>N5q+YkDv^LG~M(EG1BzBTZ|b=Wit$y73| z=`xzL;On1;hAuaC{{Df3cK&Sv_tGyWm%ZF}lleJyd*cgYE=HP4tVVdW-x^cWTL55~ zRH!=G#ueVZ_tdVmuJ{sI@OYt2EA$2(G2;md34UwFR3Ue}#EtbVsPxDR5->Q)p>Q-5 z(Es-`TNmca!KLLm^VO$+qY0zU4#Um&<Xd7@jUw2~Ge_RCu1XlS5mdCJL-1RA6_7Kg#jvN#@F&XVv5mlSJ| zqZ41riSIb!KM@&aBz`z{a?oI6`1-gC1v6sG?98d24Zl>%G5L`}V#=&)pP(cjevYKh zR(-a1s>`P`A?#d2oULe+cWMthjyheJWT-3KYoVYkZ`h{! zto@vmB6KzN5k{Lr96?g&)e4}n{1eTb+?5C3DBx%E9cfc8W)N8GAxXseJ8@z$N#-Z) z8~<=QBZqz6W!DbzO5G?TV<)@?Y(~O>MF03J+xOSU&1QioSqgnu#qs+|z=RKG#TNuu za$lb|U@=bxAK!XC`@2L$tn4yud=3>lcj4M9_-<4K@0-f&68}oftR?wz8YO9#?&LG) zRs7)Dic!tey`5U{!Ay~in~YIHk!1wHEtSA&EPQiD?P<$Osk6W6!Lo|e`B!dCe1Q*c z80TGUYC(r$jj9$Nc=)OC$OX73v^q;Qy<=U?Ay;U!X45r=k;q~Bf(e2ach#oCG~3&5xMLQc{*7ay*Put@+Yh_J)DoX zNOT?nHX8bVF$p}co9eheXc!w>XNy*9wA2S8DPzYqq`|pD!sT;x7%$J)B6_z&x-LUAxTF|A9NP^M_ zZLC0;uUmDR`8(h`<^Zj(TYF4n$}1#m2x)VRY~U27A7+N@%!Y8b>J{Uo{dXR-~MlgJk0% zC<x&-P0!gGf)KKqLupV_-4)vEG_vzcKGhwW7ms6U_&} zE3~OdG96z)<5mKfpb}UDxB6;6V#Nc7wL>AgzrGjB$0q2#d$}NYniPHs9T%M;T5KlW zcLhzqFV~2i<0cw4oJR&ACM3z$MJh~;0md^K`?2>#hgr-G&+X7~gURy|SQtP1E&Ooj zk%crem|HjskKj@cS{HVZkLK8+z zw-FoCkHq?F#%0A2#zt{F#$Oah9;?$wV7YMM_`5=IzXQJ0%sSF>UDW#dM-pD5#MH2# z>V$~lF#^-h4Q!4eatTZ#D2XjB_az8&3G?v2OkB&}O33>vflBV}+^-?878)+KhS8GhxmUJR z2S_-^_Fa}lm6627y(6A56~m+snB-nHT^7ymC=Lhy5ZW}dBVcEI?rpG^BRt>Kr2OwGr!e7k2_%p~_dp%bU^z9D zWC1ORUE(=WzmayD!GppW3!-lA1p4UlGg6ZAhJlnygK_D!W68K>gd(h2Vy00`Ew&>v zwrTSUP*0>I!8*2&p$4G_XBUG}pu!%h5eX{;!OD?0oWZ1ZvF@U-GRc-O8WikycZ7)o zV4X_*n8kgM3X?8t9hdwZS4d`W#wEn)mCRnx7y8h)AsOE34?`I$*pbp>E+5fx-&tZw z&qazDlYbISWsHyot0dEYScqD7*O7XAc!9T*lBY0WpRMR>%4!8{*y*gmt1)i}hNK4Ic@wTNVuo|BSINUi(5nBFB9qh7qX}TGEabtbLP9$-I=<^* zgUKvxXwYuk+JF-ZNzFxvy`sx;7E0mhbh?cP)WeaLNT!{KP>~GdT?xs)aAFwdVx>rP z*^Z+2!Dgt#kC%{L#2ayH>BzJ+L@j0W*r$UQm|vn1-6xAetcbrJQe)jam-V&;ron{l zUu=@*kEe{GWu4Jom-8lHX$5ncrv8!U9ZY`waz<6B0+)xeET7Am<77j}ip=ad6wz0d zL|c1-abU%rEBWuDhG?FRIh+>b+kV`PVOY3yLi7h&#UC(Hg8gQM#{&7v)N;M0s`}Kn zoFTX6^`s+`OW$ND$S|bcteH~{(h}~@@umSe>V%gtKbb2YZ`zQ|iPLXoI7>i8*(rZw z{6H2^N1^awQVG~rLRNt{v~>btC2Kv)HxcRMc^{@J2CMqn1jq?CIf5NfEd2W1md? z_fGEZb>0;M0weBA2eoYlGHO(lG2c0YJd2zr#<95eUaa!(o*qZg7jsKe`B?+2cZ&yuEQ0M#L-0F86HI(emTiCo7X6N{PTQ9CdFhf4wfG`qu$V zq8mEhnTu|;bRZR} z;gSt+ziB8;1I?PDqa_mFW>*oa4%E_R|8Z-nb;A&5V_~-#xLHX;a*k zg#n&XSd$Td1x_Zhzi%N*a^{%5S>)r2bZD8+!Y_M0kO5@MNszr?++SnSBO`S>6@zGM( zRyFD6vfbYG(K^4d@N)5ftYW7h%;0@0T$sMTzDUlI-}6e&&cNqjU+5ga2cU_?P>xSK z=h7UT?NWhU!`v||eqzq|@UsVuhsX$Wx`EM8d|6lV1aAsMo(DdA_-G$u$ahNeuaR?n z!uI4iQkYp;S$TeXXrx}rua1Nm<3@F^q*q^LLYRzjdPe{VZP;p*HVl)LZwOJ})Ktvp zXaSKt&OoUQ7A(#_5ky0q#ZmgI9z0Lz8#%-B(kH z10s$gRfOp~%1sym>^B4i?4})l&yNr+O?BNLRIu~{Z&rQRJ8#y6#l_7?J#?O!1624* z4ec%ckt#`mGc%S(O#kl6gu=PvLc@Vb#y|axVRpgDNs8R}2XsI2nf4=MlF58}I?(GL zy?D3u&|(-(%;|4=Eka6(iMv4o0{?K+h(wm)91R{!gO6-xdLB;HwR#5NA_1* zF=h&%y*+6^ecqQBFJ6)dlH7sl#im`GJ)f9G(_QB6LDsg#OUh{vt^GgKd`cbknz!_V z0RzB@bHZ?i)MtT-`fb|$Dru76F;*l%(C{N;NH8-NosHh3TW52xu-|t(5O)r6P5ZjpqwPpFLk)MXE_YnF$oRhU7rp$u$UBpgvd00U2YKw^tanDQ*J{aS=%j(VV*`GHh~Q zGUm!{oG4Z}Dbkv%sn$qoHsN)%j=ZyjodGvu_xmM~>frmWMEiB#OCZIZdB zrzLd1AR;?VaoCLem;e_$E)X|;JeX?v82>$gx4xKIF=AB1eo#AH(6R62$J_4P0d}$L z1*@8l3l!DzXbjT(aXT^TeIwmZ*q2JH*$Uf?fU#ZQGns%otM2tF$LFC%Y@AwEBEZ~_ zy6HG>H$v?82wcuxRP64bF(Y_yN0Pk*; zTiKspl(8U%!B7j$*UaXMHB8G#DwbmGG{4IU{*cSOb&<0k*pU0LA)~2BnekkCutdu5 zg0&rgIcFky>PR!ll+#+e%mtpaphxm+Z`PR_ClDDJSxZ*7@l^SJ8pN?4(C{!vw@LkU zW*H~Y%8ZMDtDPHsRblqDfhbN9`#nfj)=x)AkFUG`dBO*jK5spQlXZf`oDy+s-SMZv zvZ#YZ%Ia`-j6+`rwO31^hw=h*I3d#Ni|L;cefHuX+Y0$_hM4Hahh?aY!g!F6vlnhX zc_G5oE7=ly6~c9J^_Rc<`ufy@FK|mY0l}AfAUAUY18O->8R}gE6L#SP+qxPYQ+oU! z9egFz&oR;1$!~ro_nxJ`F#1!XaFyh93KIgHtg_feB# z3#3P)MU9dnr&GfIOplHqf6kBb^(@AiL~KYyF7t(vXRNo*C6_9|a9Be|N|=u9**LN1 zfL}px?Dwk&qzEIX3e6k>QFyOEtlbb|pI|<3VE%|{NRGx(qI$sIGfM}vOxh;0pZc-D zP+{p3S;|mK9LP$@EXc+rtkUsqHKm9YU11Co zr(m-s^xzj*(K*TaQ~uH5ifw{rbfO(3)C(h2elUu+br;oUt!1u&Vd~p0iBYY|8q%6! zmr(*HT(vuB^UGIEb&EG#@-Y(X!}9v`^-ajj4SC>MZ)I8W`VLX@a4KiRc1P|SQL$Sp zQW}Vm$n9sMr#eiBN*WUh>|zQbl}4r?u3jotD>$jM79UIto#+XVmFLuGYoE)kRSm1; zNt+{UvCXfR&?KJ>?!JxhHb?5J#7j$ausIaIThB5JZmqX+-5XlqEHj45`O+T%Up<=|L@Oxg#j4iD?ty}b@41~I)-FQ$=BaZ%v1oRn_MCXSDF1qJl6e&)AnEm z5h@8tR9}g`*fhu<5^m_Jf68;FMS49HIn?w>$w!ZkQa7qLMt z4}ARk@~5cl<>{1MJXmBtKU1}dfqkW|TX0g&W@zO*ug~~kf%BE2@E7cZP&dUPl{UBW zEZLg&^rM(Oj$n`<46bLXs58$)cD*TiskpMi>=uAS>K^dR$puJeX+Sd&qTF0Rrh{W$ zpepRTN`033>+z>U$e7P>;QzXi|Kk7lkg9z|ArJe?nkdU3c)e&>6!Ls0{?H(P=p|&J zNz=v3WKDZe6|RV4p`>8kr-Ya2Uo~xLc+ix4`~zyt5KTLJO&|^~EEY8|{_RHfG!f7- z(Y6BVQyoL}Q|xNw(lEO6q)3|M#on+)M?A#r9OlufaHIPelNUHlR4I#8Wj3$ABJdR+7zIGa z3nO*Mrb}Gy?OMQT5CqI}Zq}^d+-v5?9*SmBHrHc#Tk0GMpv9WPao7~5mlG(?8m`u;S=n}>BRYDmVo$hpiO!36Ybg$aFHS9F)#6U{5^MDEvkUv?LP>|M9uX!76|S7B3)_wpce;UxL* zm@@Zwt)z*Q#x>$Z82$sHJs_I~hP*2xw!sbpiFdqlyW0azl}?A&#C5*t7!K*ULV zW+9tZ8bO?`-sTyIe4u9aHvSZ%dPmhl?q^(^m57uI*x(vOUg!$w#3`0wd(9VJqcJB5B7J^E&}k|cN-jsKu2*t2GSLDU-V@_e!UX8#l7G4rlxTDk>_R z^=-fSWB)sXlbeT1DWwUR^HTXeUR@^Sc=VNEY7IW0)gZsAweTTe(LVZpr$sl5ruH%d zH12{%GfUP%!7-rIw_dP2U}!Ts&X8F~5v(kOv?YSQKmo=JsZ^DTyiDgG!*NudDU*gc z5u*NohQyyaRa*=KNeH@aUUGOQ?k@L2t{aN5O<|iYiDZZYE5LLur;pfn1Ko{VKT--F zPTt>ybIbbH%^5jjWGy!af1LyT8bv@L0xnhMSe*uQ@HYf3cVgn(R4^EBG0Ys&bXX47 zZQYUrhP=T!KaVN~CGs^9_>5k2Dx7(s(_4(U&0em|LVhcmwLw(ek4S?LTP~)+^aPQi zK9VP&f(QAHDqmy?b@l^JK9d?klfd8f;vH_A%G85(%rOj-AIdN97fq>&Dsw^9klTLplhyW{6nV0o z@!H8bm*S&V4efov%Qc|L4~$%TR*7L2O?nBP(l1dmwuMk=+~))rZyVFT^l{#PgSF$& zqgwEa@QrYv%6BAnt-MWJwLP6CdAW!LxGqXN!fD7`zEpIJ#YZLUOIcEj}HH z0>~{NhbVvy+ve%l6o4{J#_hwO8MUoIh_{<$tC1bF%YkQLO`}q5*a##_ypI`==`f%~ zD`+-mi|RbN<34KRCrP+xL7r~X1yIXPO*R<{pT;rZUM<$!~e*# zDc%w!PL`PxfB7*x^1&)7q%;332zWTaRdAOP`*r4|$?)HC5SFL9Q2q!&eVGP_W(s?bhtu<~x zXPGWR!tO9wM5glZsoSo0-hjz2&r3;3s^%M1v)jIknyxs8aU2@irAZm0X%V|FLHyQd z%|_~@dIz>TYc>GA7!z3D4tGpkV-@19W-=?6_(c8K+7mjncUT@9t6eWWM&ECBAJzD533z`zW zA*o2lYw%IN-x_Np739k!rFU`X{z*P?TXGa5?hX7>AgKco6Z=Mz1(1bA9Y)iRa|W-V zUPThhfECqlHcF-%7ZsWR{H!0dd6_eigoVSI&VKQN#qrON~yeyZjA&gT^YIJ?5^e=>ln?ug3D%mk8)KQB%NXTp6T(sJ!}x$0FW0t=jRc>KtC9&~D0 zY7yHvApH_{n{UfYLR6YWCVJn8%3d3>xbk?E-q!UKpm;K2Gc;y}W+n+=I2*ZW?%@*% zN+dA-^qraei9tB{ex_)UGthY79z+>rgt^PCxAvJ|K_cfC0lKaA6@NfW1u9XXS$wQy z9Q^KaL@hXVadFYWtC@w0+i0ib(vR}FnAjG^J|<%cDf+K80C*str`0eIHxFIProdSA zSinJk`#FtX%{ChB+~RybVJma!NeyW^1w<4szY@+v+EMFEhuL!tRg1(!oVF^8=**!$(|u7yoQWMMYjyIhHhh@o{*LFs8$V#+F@Z4gfh%778^pRQRHZd7j{IGtv^_WEmhUY}pQRwmYtuoJdBje`MU| zs=GESSFm9jRb9;K{X0sakylh<7PnH(BDlA?y$1Q${I z7J}r10QQ!6!W7$ddhkI9T1Eal8G7FgB&tT#tJ)G{ok@fF<4K)lk`-n$66=d!s#Bxb z&02EzGvW;k=qF63fdY8cH+uwR`fpc@)|s7;idmH;JY5ZD4BTIdmieHezst^UR$@~Z zQ+^Ht;}&IC61>gUPhEAO*IRAld8onn!`fSSqXp)ir`uU6X1Lh=YI~k2S*Ti!7$&}f zc(sQ$-IE?N3(LqBDigX>mL(I`LmFY4#>5%$Ar>uTFS>dmEaw)W&3%-dncXd}lI;pr zz3*F)#N{XHXg}1xFAFIIlzOcF8AeHv1m;{r4(}`N+~#Rm!;@bhHOV0yTMzVH?+QXH z$t(5O;bqbgpf6)jiI|QU!{f}39FAe8KCaSCuE42gPZ`;WLCGPh3H4C7s;T*&*Zk|t zcPk`NmYPLnec5?DcJuvS&Bb8p zymQ{XHm#ZPewF2r6Cg#9ogPwH5X8-Tt&=>=i7K=sn;Jj)VWrj6{-Yl*Knk$9lKlg# z;Tz%oW@vYEoDvT9*HA~ZlMsIuyy{%W_}zrxLs0hCvZ*1f5G8`gEwKifi;45A}XE)N;OuFr~vRVu=>#csCm)r2ih<9RMG=4=3DQsZS zbNnWLm@@*9iUkt(rZ_b45@(ry@7HF90|j#u;qBBi=srhvTFy$J5AyQT4B|;!N#|k- zSN$S9IT}u(9!er7M?;vfO^YPfyhV6Zzj%}_+L2aB+_jofFHAx!jN?I-{uKO(|5P0H zLro}wP^s1mDR{?Tpt30uzC1f4bsiA_5n@mk*Nr30ZwiCXVZm2WPeT#rL>U@Si(*)~ z7X07>`k_*WJ%fb+l!gb^jh%7JvQ7$F>EdIPc+SS^%gV$M0#6N>f?3~lp_f|7i7Yod z%?F%z4(aA$P46~r;VXx(=3ZG3AxVj#ISrb7F+A@-cNX)p9E^Y}nTC9fFS{%!jvDnj z_7<6ZIZR5PYu?j4K_ZKwp_8}1+)~tGBP3k3Il@x`lRsJUqjCMsM z89U{AixzxLDLZS4??1@zdWF6FzZ{jJrbUza$AlfZF|5@V1EeJL27n}zjHz&MzCBZo z!VRME7Oig!lcY%3Zhb7X8#9lxGM?&NqoH8NY9owY- zs_r6zf%ELqAb>~L13#R!Clw^GNdQ*RE4&%zWzq@r#l0_{ZjxB@#iKi7te9}Y9CgUM zw#}!aem7mP(Q$)O$7vs!#5fkaLtg&MolV4+ecRh@w3_pojyb<00Zu|{GLN7Dg2{rR zjBIL-G7M^rffW)Tk=l?KQ30|og2Kn4yMo%^-VPxAA{E!6Sf5;Den=kMo7xlhH9jUI zTSsE(Em{o$gPsn}>^5y@W6I~;J~6Zn6+WDw`GAKHaQ~|8`5%F1Ya~#=o9$l_%+a8=Ray zz0+!0>vyzRL@InecVe=97vpzrP=jW*W_w z!2SC|vwN|S6kJNAY01z!`;sCrb_r1|c%sHh{I*j1r4%Sr1C%U<0`8V7ewI+X&cqF4 zhpaYQ%bX9eL)qR}S*C&58BHuv9XTH-?VlR0dxJw_Y;xdxN|MCx?+KMKaBrRM6Kk;Xx4c|Jal z2K!{v;ugl!ZZ#^rgod&;SG(2fDR4PCH3eI{e4JU5?Al-)&(C=;?3AM(ZB(_}_j`ue zUjBBQfbxhaGIzB=*w+8xkI>*()Sf5vM)K~ z5QFX3B{w?WkfGV35zuks!IwoU7&ACD&E#udYEgRq`7`b8^>{Lz3IZJ>2oZAO!ythC}Dqb;&g zK{>z6X)#3`)d=zNL3oS3zJ`jbwBJ`^F>U|dG11>3DYRj1Y;4(g{3am2Y$Pk!x4-mws$yjnJKd_4Z5Hs(h&AiNpKQO_GX+illV`q+nMBz^rv>T=CxJ2iCW6wdu z&|Aw8&=1W;+H=F=gExOZzCw7*I5;76AUFWk%$fwU&dtV!2ZE4-Xt#cCavDpc zlVx-oc9Nh4lvrk0B*u;DCK{&EX}eV7%uiQCa^J?gDRcTBXB)j+LrEpvyD*-Gj{&6Z z`tx*7OPUj|V#2^Y_)KLw?Q0F>eq+02+cXO`XQR0T(Lq`oTuK>DW2Ac7sE+qIes#eM z;!jnmhDU-UPc#bvo1v$Nn<4>saMOZ&=N11;BUX#3UbRzs&ihxQ(c~UF%H+t&Z(IhS zq=nnvHeodI2Bp@j65k~lX2A+4v>!+N zroBMl_+dU*+1T;z^TTNkV{E?Qa_-KgYcR~#4PRb&@!ad?pAE*|<^8+?%vi%C7(Za6 zC#-`>$)UM2>c~~4s$Mtd-S9=M&L<>-MxkEEoZMr%hBby!3qc~he(1shY(J{sQO+MC z6rPq7b?RanlOqMc^!giRRd6CF;#{As2Ar%2$|#SHdruCiewlACaT*s7qfI{)89Uu= zpqAqJHFhBWsI`HS-{~Zi6T?`n_T6x&EXq4MQX|?yc0h2sZjt7Lf}^bmGZ{w7cZ4~! zmvl*;ojn*PVMxfBAFb2e13UzT83l?!cgBhrrUbD6&`CH zGM_;y5`c+R_p7nsG=DUZv)EQG5-4@)1~QYa_S9RsO4<#AKr zLxu>(tmI!7eYjgOCdZMfIUa=_>qcir$3PI|B$M;KP%;89T$C_-t;|c<{?<{fvG~H* zRuB4NYVHb?3*uRC$7-Bl@Nn}q9h)Fg63J$)))iZ){D}OR_i=BcD?P;HSyo&RdL{o{ ztAi9_>YI}uJJ$+vdcGl3j;^LMR(MmAf6@58K;KSRz}&Nh@8Iiou|1+^`hI6;{dpJC z8KI3KDbOcAixuP4FII;%99zBc4L@DJGY z_V|I>o;C@?X$8x*?9eoNhAuxxirNBEBKPvj)kML^u}SsR_(rgoOJh8HU#QG~#H!d} zFjN0~^wmR0tL|W7?$2T&n}129c6>0z&}}a61uyRe3yJBJVlEGCo#Q@_6N1|3%EX3;8%EH|dhyJwlBn;ra6|};Nyqw@w)K}|nl+vn1{7W9EYX7dCZzTyTDu_d_`}Z5 zb`M!++Ic4hIhLI34RAiv6}{QHMl|5sTpj~I4vb`Bk>O$znfIO>Qo~o!?o6s&a}KsdU|&*8#Imm&kM_r!1Zfuikh%7dbBo58xCtI!BcmpaHkL9)VYmU*tjVHT7So?LUh+RJ}Ppy9o-#m`m@Ilu`U;Yz+GH-e}RleeS;N`>~r1l8$^2{JzJJUV}g^^ZsbEzx}S7k;u z>(YN@HAyi1w=F0!1|s+*49Nb(if}G8xtyS425(*#y*w^Mx_)N0)LhoPo;-^lNS?c6 zrVp!Z_+>2pqI3P$==1u7)3-5wllp~E#naYG zQ6^WRU7bV7TibI76OXQ>Ru{-x8mBlMvFtguvm7kF6uWHy^IHIfyL<5X%$(+S3#%6^=NY8ND~evO6-fL?x~bUdiLrjKfp0Qng(O1Z=lHS_#dz(`s^? znwdwb$_!cXI${6lLV8P!0VP(P|7524BYO<3L^q9%kXL$*`AVhOuQD6Ev?B; z9ClWduJ3Sk=1NANNwEf3UqHA0q@1K`bF2hGzic#xYLoSLz5Q;5dA+G3IS1ytsar6Y zH~6RC-qH{Vd7-Dpm{C4ZkQ7nTV9ZH@w?qIKp3dzd?1BV6N%W;=g{Z|e~)4;U3VE{2PIQHzc%Hz@bpShb(Ij-f`u>1h!PCY7ZWmn+A$b2+&5A&?X<=E>?!P4^XRr3lp=M@E9D zPYegia)ND~g;Uf5UO2prRFmnqsq|w3Wr!H%hP;yA8mgYgQv)+HIJ2=$vmL z+Nbp0DQNcq(zPf!(l&vhA(PuEpcYG_NsMt$$0ncXTGdPCuX2S*_P7m;83-;|@4OrP zTC~!@ezZU_iR5EHC_S*ibB>gaA++lmd7+UX%pHfKV8fWXp{^W}2C4YkpxP!lEuTK> zQq(rqOdEyOdUw#lv_|9bH+!v;T(E?)XjU};4*b!tz~A%(pZXpb(WPRe0)=O@rS@k? z?7HE!vPrhnQbCR@4m6MDJ-kSp1#XvioL%^{$Mm+`%nsQM;SI`ypeRDyH<3w;??3KG zh)Cm*#;4p+O88918*!DspBXani(u+hra468b6Mj&$`0B5*fQc-n+JucgF{I{DGy@3bXAE~Tn z+*UV|0<1=Q^di_|f(BWWR7DK>6{ZEz(A`_tTn7d+V?WjD56AUNK|1ld4J&ZjVL80o z6zdIi&S=enYIQ?-(ps&YsxIxwwDo*+>&@OSzYuk8VQOCZ0EyY>P znnaFo!MKpC)b)A@Col_quHrX)39kwC9=g7xlao}FqfJn@I{QP^8mOte@&rz|(HTu8 z8_?VXLk)S7q32Brg+O)?}MFc(Z=YoOSP~}B^B3$bLaG8UC`c1z-hcH6Pd*0m*s^j z=h%m~e2JC)b2y?|A!9>7vW;W=7jTX*&_uw~U*O1fOk_6WfIG%6AY^&P$Df4gu0w6F($V(4Eaw8}qhSfs*9shk ztbVvc=7rlc;Gt!Qjbsr?<1L~uM#NYzc*J+zNa#U#gyT(zLa)WgMThS~S2?OBi(-Da z&&8resQoOj`Sma-3Gbso8<04lInfa09v|Ab^fciklc&kyg zJmnt0k47-X^aUP#xJC6jnF4$Tz{)v!%VONkE_5Vj@0|p!T5qFp+vHv-9Wo9Ho~%M!VrRYewsx6=mcSPM}ct-{bro z7@>jCoq>Sg8?AwLO4UUY2O=PcY;lX&kl#I96#`6I>~lhmkvD6iAt$qak)ATc+W~f< zAv!$`kyTsU6G5*Kr#;hr!{3-uWI6u68k$_V%^W|)KALtrRCn0;breeswBnpT6THul z^L<(UOrtV=tgzPoiEk3mFJlhLXO#WiMe8P_>cWoy6D$=S=jhwMEviYlfuBE-9*T`E z-x(vOknOef40iRki>s<15q))7VXZN4u4+qH(z+)70_~NAtvNkRN1T>Xf9O`GY&R1j zrH7x}haQ1N_|*sp_ILUavUCz@0Ncw^w`ij3?NG=C``KqlD@&3Z%{hx+c`eEZOR+VW z1UGv5Uo~4i2|;}~{GHd58}e^Kn2llyWEji`f#brJmzw6~Gmhw*+9;~$$IH890e>z} zA$98+@gN-8Dj8rFeq2(NQl#bYGy9SoL z@?EcAfAX?=lz(+`P<^4PMQFFOpnb!gS9MqSOW$GV7uVc5OccQ7a|sy{=aChFFu}f0 zJm{WLxXz`6+*vzH0&PDzmecSju$;vv5>~M|Z|rV0*wMBP%6(wd_Kn{7AMA-*y^5=` z0nhH|XEQ)vQ9;q68#KAw4p?eogFF6O6W|73U5hLdze{}CTpt=clWUk7;}rAoY^#Ek z+8#<5IZUiIuh}nr8A$3H+EcoWG4LYDg%xQg>VI}??G~Wol05&z-FU4gO|0K`f3g5H>zl;hsgOTUYd`PWAWwsSF;Awim?D+^2(mR-xmWba`NMil@_KWt6R84_|X3Zdb zc2f8KQC+i{Yh!$l8-?6^P;*hCWL2}a58@NQ>AfIqIr-ybH%~XJQ#?iSb@3_EBj`1U^_yPaJ z&PHGBrW3o!HYD?^_>%eJbpN-DQiSnEZ zY-Fm%BdleO#;ZXFq!qb3Hr1KN(w0{{s29@>s#KyHYYPEK;ycLk8{gJH&)}*W=z*KSFx;A^qkG^r~Ap4H@&X!*Cx6^P3^-Xj9~dIvIUF|yM;6Apuq1)m?g@iEW)C@Me>teBCzYp7X~)WIjrS0! zY>U<$9a0^B0*MulXCkYyady%oG2jA_xmN`Qfzi}QZq=K1n& z0JrGrkF-jDbU?U|%Sp9pd)w2_@nwUTw$J3$X?DTObjVY5&C_|e!%1h>qy_Z{r7z3x zf<4y%z_|k8x&7FT|MW@iD`=ILJYV6nwDjR#)>agG>r2?eIbP&{|<-~G#9{<7SD^zNgd#&;k;=|(l^ zf^g==4Q%z451q@Gug>M%x(LK6l>FLOZVb`-IDhQIlz@18mF-Xov>1a~VGjk25x z5ug46Uj*W!+L_t2ZT883{O&jZ<;?7XBjMi2|EZ`fdr@w-*t+Ut=BpD(@+E2^eY8=r zboRUd`Tse5|9$%o?ZFMFgCB4TUBb-mm6zsStmIuSi}R^cr1k&Frvjp=nV9su9D4f~{7$t`oHeb@ed|L%YO4?lkL$;Sqd-FM%8ltdUb z@>~*_D0%YQJDH}4iM5FA1S`l=Y!@yt zhM&3|Nen^a<-7<_;QH{t{P6p`c5i$9@yEkJ;q}ey7apC%;>kBRs*^VQ|KNZ45C8GWC!c)u(E)ah zPXXA+U|1h}h)0=NBOQWk_Z90VrfkQ&01M{RWMaL^EkodAEWKsgVMVtHfy*A7(;aE0 z@^LvX3Am2JF?jRU-+toAf!UqgneX$71-+A6gDV<&XJ7T6Qu6qlcdI#+)$DoP^MRzSa9drVj8GiWw(#11h z`0T^AVXr>wwfO?S-dyg-D24TAn;Sp3f1LgAKCu6Huf1FhW6Y?qReZ-5K_`-9(H(^8 z7B(gLr%&h+NY1|T^1j`3pE!J|!FTgbPM>rJp^C+zDfI0z%J9(L`}XYE_U@}Mrt^qM zyBKoVy!qYhTZrVQ?1~5lTyrAI4ISmU@ATe!<;4e%+{>WepdHXXN26*siAzBI?dHr} zgJ7D&#fKg^)LXuA@z|S5Tl}bsrSAAgt#tVY5V+KRWe6U64(ZKS?e*t>_0auy@7~er z5G|zM=#E0L=tYc79<=x#X~b*DhaW!L>n@yo?;S33oGeLMn@O(ISu*7SFu! z!mob+_Z}aPmg^0MaSub&Eo|WVUu!HZE}~fR;q#-9e*UdDUM`jwLzhgqo+Kz-Ix134 zJPmKuAXnX<4td<}nI^kwqq8SZpE~~VCm%$J;4Va00c|5ruhFp08#!Exmlw9p&m4W= z{#RdoKIRFC|s)lgE-Re`NA;#gIu2GD8Ms(fWu1S3}XSlBR zCghwI@mPhB8-Mwg=E^Lqh!R)B1@#}FeDdDIN51&@m)flvB9rlgrZYnC{ji~rN>ZKF~~beRZ9&FH}*DH`lTht)(%bk2Dq)- z(bZ?F9g)a_CNlBNO!!!^gkP$`vu!RUFS&Yw*SZ!f#DN3OxI$8L@-H6z=*fRPeCX~c zo_HJ&Ip87=&&occL$CDs^U}CbZY#RSEL-F}{a`{A>woKYFHp@o|jP?IjQjZ*(0kdgFR&p{WfZ5aZ;I zZv&T39|ZgeXLj<$@dJBz_ZF9O^a9p1UdgaEw>WAn_VFob^{{KBx&PpS6DL26x}>w4 z?n{&uCR}xbM+T?W1OUa~%58Y%UzTJS12XrzT;KNX-m$oFuHCFH$LYggohu+C7W0KC zj$8EhQNFfspFi~>SHk2nGA{K}fvb1x+?YTkJhM4mn=w#U+<;o|e{k%;!F_#V;1j^H z$xWwNB7j`G-K#gx_jwu`5{#lz4z|rS&YwM%qHuGvtmT=_df7YCwb`6sFM+tQTxm)l zI`f1T4_RkE{9yl{?PwPGGc6F{k2fpr4)0f%%VL?3d$?D%VjP|w+qPXeOAzXKfFX8f z#8ZziZk=u?fe}MT7QN-=^K&zNAeOoVquO#8zqM9($mOoXeW&bf3um*=Ec)@Vcd>Wz zblk$?#Tfb10m4h4vGQ@*nRz=KcL(m9&!f{f>-?rE{^2C5oj!ef&t1D~tp=fQ@jo2Y z8~vg^L^i6mmKk7V)Ivlew!!XQyB027ES4GH7@I{b%qRdKgJQ(Xg67&@VBUwn*H0j6 z#DRFQc%FBn_uL(^p%<4f@H|B?g2CqM?G7UG#olm{2eEeh(7xRl&z&rm&&Rh-E8V$} zE6c$L@4ofmeTQ20VXKDp&l|ZV`u>bcjpsNLWQW(Q3}dl$;l6wJzscL{m2H!ObN#vC zQs$j7RT3E2pL6HV5v+h#MA^8=pI>S`7mX`-T0)<56{di59yh}Ov;*a1XIDZhskpb+Ia6GajZur}YB|H_yC*;7wG-Cg3d z!WJ*@P^TlPF|TG*ZO11xC8t!{L+mByn28&BOgN&Vw#V}Id-6tc*EO+Uc9-@a^`ZjV zn>ZJiSCPKD9aAILvwO6TFm7}{#+!|bH(reaNRHfpKVhKhsGwG}jvncNcj4&X{iPgshJCx6%%AK|zS^2J*@jNsyl@mTsVQ<&*ahj#Vx`>E9kmO|vN{-`^s z^=Ic=U;e!>KJ}9)m%EENDDzcfibTBPwzaaakNp=raysN};r!>cH3`^@+M0H|(`ubA zVd*l$SB23yLQM3QxrcGt9+o3+bMd~kM(2{-SM+7Lx-Bg%M%R+zuX_ZPOycs&mG(}< zvxVA|>=43Nv$=5Y?2h>v0z}5(7;!&~2@}!(pzFZhEBYeC!Or=)<)wwV)g|*`*?3II zIr*o$@v0FAzYU+prWtoM#*jI&=uU8Mg@M}WEb$>>JKlFU>iFg1qQGEKn2URVn2Q%X zm^e;cO5>ExXL7RihS&FwE^yq3OKoI?kodRRym0>P_W2Iy5dn+i`j>j}0i81@%ocq? zTpburq#G_6EW#4eT@3ll>0=1+>_oT;j#(lnGGaTShF_0KwB#!S9U1hZ7 z7#TYH)6qpQhs)u{n0xu@(v?r4WVEzT;eQPJ>srmxK7OqdPp{)7M?KF(%n`Y-Tw1&^ zgF_M#j7EIj-r~bryu#V6MiAmAA4YSo^xMrkUwhJp;1Wit%|0&xH~^H1ugfGGxKSDk zlDA&?HukU+S2j!*eFffz)th|t-kh1`YomBK#<^Fo?b@F^(ufxL3>5m?I$y?Ih+viXZi5ni9$LCFmQCOOrCkH^E*Q_&)5X$D*rmoLsgCC32AgH#+%mvyNE6q<8V}ww>W42dAH)y~eBt8DFaGw+ zPdrYntWmGqrW4MY)gLleIN|rPJHO$78a@2*5zVjFsN0GVoVtwZ#7iB^I0L!-@`s4b zjh6hFKHi)+`IZ-9F{1xa=3Hm4-|aDq26fXuA-N7$6a`Sa(Al3dk+g%D(RgDXRs(S1*d;+vRa zabfwT7oY#?m!6oPo9lKLXFDCVS}ej@S<<~3e-Smt7JM*hvgHIJP;*Ei>jyWU1mcg= zG7u-5V3|0@`64F-;t+5fb;M|f>}#|rIiBVY?%#jp@V(DH`)l@`&6j@BsncxUe=cld z$5k?)7q`ofRW(d(2!W5b^M({}Are?aAYbsuSGhR+szZ=39#*>g(dQ=uQnNN-6y8Ss z^|QsWmZd*>MVnNWzk2nqxa}ReFsQ8@^EB|Pvz(C82}ymld~&8I z?szLxXV?U~F{!8sG{!)YwX(}sP(1yO@GM9J&ojDL3|K-JCr-}zA0yH0K{n=vPR2V~ znOvVq>}#qwxoQg1iF~>~RlQ_dP1g@uB*d-gjWdq+Aobbn%{XQHC|$%yh(W3&zQ-_} z1?UkDcxH%hY%(Rb!Ggh`vt!K(vc?>D9^}g#xQ0M(a>_;*I$)=Rp6eFRGW~W0asw~! zHW6!^+fV>62dxtC`6iD>oEoi8ysFF>yHx<=O2v7!>Y0d~k$GiWGrO{QdjFjSl0ki> zy~Z9it60Tdhf5E3q2EyQAj^$|c+n&g6uUSz@g9H?mKtaxM4>=C4quyyH4)dn@RDPA zJ9-gpoo+L$%3d`ySx;8vs^;HnmIO~A+aoC)>x;l42&~CKRosQrJ7U-dUpfsVzEq}H5U+0{ihj7H(IX&05@%cTg$#irqL;=nt)VTh z+G&Upxvcc>#Kkifr`a_|jC@WL1Sa`pVrgaY4}^muZoi=rF~gb5g){ow@yB;G{6&=E zI>Y4lka}@xtO}0)AYP4l>qH;}Hl<_IKqDqRQ=i}l*q7nN$03X(PI0@{9N@>p4RON6 zE4SO~OdS_LS?6kxn>-N&CT<*fq2feIo%6!1Ei#)?ufHvv7pv|{AY&1y9R0v%(c@#M z2A^n-mY(~?k3avZBim=1jRCj3esh3Qj1Ouz>=ie|IBPf)#}%j^mS=X*Q6%^*r!j{G zFXxB|L+RLxkzYA8xn1%el@fRl&}whzs?1pN^%`noqsJIQaqL9E@gXnNi$SZ+mc2(F zdFX@VzdLvCG!*`E~uQVqEDpZ_#`7-T3a=LCZr)f=WHpXJJdhI*D||+nM`Yxl z!V_IMB82k9R`hk^=0_P^#NuO>*b9A~RP^MoFHc{7*@~b%>GPH!WDwGvn9)ZG z*%$#i%m?`z&Ie;l#=2yzyvJib5P1vi=`%oBkteK?J}xU8JoK?HbhMb5fj>`ym!A%IJmji@`MBbj2Y{T; zG^%t684VtWdA3c*HPSG;@sM~P?dWfFVvNrRPJm=1o`mCW#dxZCT>&^w2mXokBF?3F z+8$5IH0+3lv3z2xX1(ulY=?*M3ZHBjP-Qda`wp`#=Qpz9mzY9sCq*N&r;{WuAlx%J z`uWr-Z-rFB*%6^M88HJR3>VYU?`W(N7j zJ!!s63{k5v<%BW8NoOxY`3xBpixV;q1@2q?62ca*L)^VHa?Zk5lTVenc5?A#v(?0t zNn`nny{J`AQ`=vxtRpPGFTy5h@LbK$aMYkXQySMi^;FBp=L6hl97+F(N|V40FbV1ICRC zM5x+H3&&NL%k!j>$Xp#`lgl@GI$z>)C2=LjE3+cM5j9Ube)JDxihsh0JSP*Qua6J$ z=)wYn4YJ>hAHKckfrH(%$MNGP+BC6j>6N!zca=!4MEzFv{h>;qjr%n&E-rlv0 zcLCw!dW$o&Etax5t_kF%6{wAKGcy+!y6yJ1vln``xxLX(D*u1>?)1s7<2n%ht9naq zSc!!@1(4K6t=4AQvTQE0Rvl6msv$&;BD5~3B^Ur4q( zdqNM6vDf10>373aIT$>6X#Z2+|A$Y0>X#@@M#Hvqb(tndZO@jt7_>FG?D5f15;e7! z;Be=cUw&CMn{jQxqu1Bw&o$=b=0G!Z7}q%5_s^fUO>HpWKmYcNM~>gQaOx!lotm|E z2A6w#X7+KrSsGYri*lkH7x7rt&B>{m{(!xHYA{@!p5f={QVK+9|X|)F{*p(dww6wg^A3Su&fuDc>d!PK&Cw2J_Nj)=z zk4!FXHRTMNQM25LTQZKF{nb}r)sT{dnYc0Y?UY?kfi+@!m&1A+yqkyub)-xIkv~IT z*ya$^!WirvQ5K?^X|Jd*sn63VI6C5CcQQWOyKFUCKglrV5V zMj7EMp|bY+vw~4mGSXZ{T=(i0`C0iTA_Q5>L@py6>60l*-a5XHzQRp|O&lI#+!~nx z!VVcr)0RVxaXKqEg0O!s2-wEXZhT0#X(yaFasJadk~ zP@>j#>xHCGJ`?*&kR~ybR+AIGLiwVp1&^5JORiAjtSB!H`U}gevtiK>CX9}Nq$#=z zDtH)ulPgj9Bj}95E=lUf%wTGKQ)^Fl_-Q`SY(KIKrPf>TFw)i=*fNzYcSPGmd&0IQKsL{uq$o(yp z>ZRDlCkyk(Dz``V7@nfR3n4y>$ct{!2-)c*Bvjc88t-qpXwm`NDJA^O6ipO^gcKJN z#e+IAteBjR3<{Y$zm4u=0cEh=oY0~#`Dv;a znlM#D_Y%p_WR@C702JP(A-KXk{$hkh{dwy|k|UKw5D(UjrFt{ntL%VDoJ!;ot)d^5 zRj{JeIdF`bc*>5XVR2m=aYwUYc_JFhg!7QT*sNR-rzY{_EU0s13yF&b0TZ1kfO#C8}C{k%P5TLy>yIKwLR zviOeikv-YbB1_RFBPo_^q8#fwekiJNEjgk_p#oMT-fjf%7H1;#(Wg?ZNvjx-3``+S zxr!RRLf}OwitdJ&!JTqUJFyw+1^H-^+Dt9VEE6~1x0tI!_oi}p!w$SU(Vd!FnVdQ| zzc@>6QIhqYST>Ck)E-u5$2BP}q;WKoGAuGUmYmw$N}Mf|d<1sF@45R!w#nKu9hJbu zxrX3ZhXeX607>wt#B>V8cT~v2{HrH^@$^p@&Y$f#_Z_^7KXt+02pH*@5kMx`Qc(r%(Un zl@}LX!DiX7-F@WapV<3>4+P(1IU#JsK#As2=#$UDwET2xM?KTFBwX_cWV6d8?{=Jz zi9ps8sH-lL82ryIo_z7?XMS}4{5uH4$bO(R`Qp$1-WmXve}1quyXTG%fAaG>W~Z}< zm<%UjU*^|is)X5nX7BrszyIe?|LmiWd~gsh;Vi|mE`XlafcQcTmhG~d{r8Xh`x_r~BcL ze{%YRA9RG!bf;U4)}c9X42h}3otu0dqrWlx?UY?jf!k){UCo~N0C3GAGsHw(+-Xaa zwX9;-3zwhp@*ozywKN?{0=*){65*$P+Q&pJmI0-x_+(-@m4r@Hrs&qH<7JV$g;!134F@hxY%nQt|jHrpe1zQ?iYNC`Tob{{s zd?YE5^sLl?H8UB4G-)!<86*i!FGDaeLQq_e;sHeObf{;s*VAwq$HHV8{IzToEX359 zR1_bC*q-KMTq(La1ts75DO9Mcb*u@Yw?Y%b{OUda3^5k}S2U9sk_bgdk?9YErKQyw zhx~Piu~|2aT%qh!<;$9xU#5Ea8+!Y=rv&TbvS}yNZ8F5ahlzYK)Y7gOR4XCqR|3n$ zB>(rYP0)cuYQ=QboGQqy$p&BI5r3nd2iaqIVu&AU`dT9*E_9!-{>(aBS z=hM&ldd#e5hAiRMd8Q~x3RJB9#HC8HW}c*3;c|$a>^8a&TZY}T;j-3cBOXlkI%fy= zKr<@i7nksIW&ueRlWjv32E;xGG72YR14J}~uvD2Zskf9rVi{nXqluq_JVFBOr%0xh zmlxPwF?!EOl48(oQUFaV=|sFrwpRI{bZZiahQR&F@|b-^`d!d=wD7Mx6|ooWiv?y% zL^#NWx=_-KjTq@mPhT-+1skI88j>RBeZrHB%V)e;+Zi+EmVIJSau9toBz+{-pBa%7 z%q^q8ZZ+-=Bg!#AQ2rPrNXM`l#>QvIP;?iE3*9b%w2-A-SS@W1Eda#!cw<}fA&7VJ zw)CR#M-?A|(x^k#%|zWc3cP(w5-N;C<(Gt}SPQPTqnkwM3fKMc_YI1NOhneOVPRP) z5W$16`I)W7>)6unj_UZ>={;IR6x*`hY-OSEr_R2MIvc$XR-+kzjPd@VL$6D=a99I$H8YRW0 zc3R0q>&&~~`^ML&27`k$U9HY{gT*t)4j% z+MR=@P(#+gv(KmLfC-|o&f?|cjf7A9I9pBN18|IjDq7Z#uV$m*E-Yv)#3O4`s=?k(VG3>hn+4GM}D?8Hxnnz zW@ct?EFag9`u30QQecxgM`l|j;^;xM`Z(z)kS_G;i>V= zvo5EfvI*Jua0Qa<{NlNZ_EeclaD)E6lUhNs{& z*+lZITk5$EYd@=*Y&{*;W-Y|VZsUI#C^2DXs*CP+l1T`i5Z_|@5SzJ8DuPGF$3OHr z3)DKKZzR)e5#B4h>6j6Jtnp8ZZ^jx-zi%gd`Cb6Wt7>)nOUvD^1-Nc>=hlQXG?fu9;vqTtiFaqFd!ac* zaUnrVCoO_900=oyd64krjs=+z zdN>k;S5YUbjTk#Z*;}26vlPysUuir4)L!;whXeMO=Y8r!<%e#`jKqN;SXjJ2jHX3~ z>!=PTpqYdZ=?I|mrzn$|$FsJp+T=3t?tel*fzKyGF>zLOb_)Km*Qb6F3dVA)KmU*a z@U3GH+_`^lYOr)}&+OjC;i8M)8F@Jj#3>=GgQIusoBHSne)O;3|HK!6Yr={^2+I=6 z6ilZ|Z(=MYYCrhNU%l(z_r3DsvkswNb~i((+b4W+(2T6xeelB%9KP#5Ka?<~>!teS zwNquH+?A8hJv&%DzkkmRw^X@|t-apVQmenxU7cH59W0#hwYv}8dHCP|^?(1$efPI! z=g4?PMFq`Jw5$-`rGq=W12Ii2wB)7`XnwJS@$19KqC=Km6(iKdpuhBkfBf6`9lvXC z?{t5-$PayKVadTR?cNl1zc)QMSXtV8@Zg6Ynf})w{@Z81@D((}EUHh@esm!Opz=GX zALFORKlq8y-*xZt7hXQG*k3v~98Py<=a+_N0k}FncksT$ci#2DaVNxtV?*57Z1b)n zf?pv$m-{b2_w2F@8229FN6bhgeRkHufFq0n+aAP4HJ$tJy!%K0_T;ZTaJ;o=Z}A4( zoC)aZg(p6;xjUqI$2-O572i(11{An$CcXwlyg9xD49q5wi`*+vM{1LF%b6~S0WGV0FB zx8GU$;SWzc(7!uVT$5w3gy1+HLBudaPSIwnwv<{F@Pq_R6|(u6(B5j#rVvM*LYNV8 z>L8s1wp0;HKb2yBe%_O^?vB)q^9-IhMh#bHCl}hM-af4u$H=Agi~WVgQ0rHh7gy&d&QF{80Mv#4Mka-c)yHDyHk**?m}`4`QITWl!C( z!ca=O{U|4CB4jqz(A+|KBaMRG;ge-XWiLsk0jT_ybL0GJc@1qs6 zUQa_Z*33@NIoPo`Gi@90$n|vtdlK3W$+u!cWmyor$kHUvIu0T`;ps`7;u>kX)L#f* zQUzBgrk2mmy!(rN-QmGIXX0$>;5g?~HzRfWMZ2I56o=IKmhSiCC!ZYjULH*BAwwN+ zJLO-u*J;nSCws0&(+3$)Xbve2k}Jx@U!gK7+FTLxHRV>0G$~W*K;kVfom)6R|L&=` z&%W`}%3yhRc5Z&K=+ubt0gai}NU4Y)mSi}1cRTK6d;Ld0TK?&C!-;NORky+&XLWX> zzxd9pdmlKAgRvOL@3Hu%+S{3)rk-s1xX%Z9xU~3_A3nJ{b9n#E)c(2XkyE|d9PzNM zf|n7XtV`MDKM|z1Oct>tM~>_~z~Ok46DMk4tF<`KFZ-?|hnI&7y`FO+7CYVEpyhDd zvJC_Mn2dYf1KoWuKlMV(A{*~xYd3myA$D9IQYO-hGNab#R$IN}f#aO$59V{~JJ8j%Nd`Gb#qB&si=G_&!KwSS#g;N%CVPi$-`ohM?j&VtgkQi zUpn#ZUHcCxZE11AaU|w}#0bmu0C?=mQ!WEGD`%EoIraR@cYO3?&Jn5zT`Yow_;}k) zd@=gm6zR9k#5V<-ZayGWjZ3&`kE}?v4TI>}GjG3h`t-Sl0o`x^?7k`PBE9a+Y}cs3 z$U%_xx_joPXFC0Np8c1XtXvyzN+C>7Y$j!qIt(N3UnRa`15`}M_wGG>=;+k2ZMoYN zPt^vhG+cU`en@%eopHyAjHuG5`1#`IRsz4f!#?QBDQQ~#oZ<}uS{ zTr)BiZ{lT3p{p&$+;PW&*_F9rt7jXVfs)yo+p%fK`E%zM7f#O4FU~J64EoE=$11{c zOgvaZ&L4n^x#)m{3B35Gy7JD_Y8RGP+i*o+3T57mPA?E}P@Ik;asT0c-AM|-G~b`z zboY+E2ijA;_T{guyRGwa6!@~^Zz&W=yHFg2b;6iMIBhye-ddU2H+%5V zfhioEm|dNW^U4N;GsFIZNuY1CcItx8h)r6HrlR*@ppLHTgER?El(pue84V?f&H6 zyN`Wn|Dl6>_U;Q`pd#ONL*%VZqSW~wd^DSo7EWYo@Z^=YM|Oo?-tB65WQay6e(;Vt zcSs*Oy1zFyJJD^=v`#pKYx^WF zQXtSAs0{*XHG9K_Gh}&gI14u!h%;^p-TLj3{Uh!5hN!kS8#cD&s=B!f#uFVp69At=0`PH zI(zOkrZAhFXBIoNu&6h7COT7{{d0S=8v=k3NS&cV?vq3b;Qj^z2)xA%vj4BH3?y4B%%99%eO*}##SivTDcKMtp6FV<8% zY47+bNX(%PNVoa8uJsb6#HJ;gH{X2II7p!Nx~=_`=RJG7?G80@zIz{TwcSESU*~)1*uSO!_@{5RR?pJXS(=>IVbW;OXttc?cbwQ;hoY<%FL7_ zgfByAG_DTp^yY@keO%=!?Ck!PSe$dXCrRtQ#?hmp4whmc)xhBC)0iD_EzW{g7SEnt z?JReC)BO%Np~2*I+X}oJgWI!l0+fZp-h^4ml=ic_G&Ez0McYeR5Q%8gJ9~7gF6%0g z-+tUA6u1W}MI<#lszL~iL0FLQ*861lC zZoae^_uhLwQ_&>LZb5OD6oYuBp2>hIfs0CCfBkh7!#f;I1s&1Q z+$YuQ)S%c>_-(bi?bWH1cENXx6JKeag)!k!98Qons1;5-;}c(|=cW(ce}Aj9&#tDJ z$46E}keg48Wfzn%(Dls#&z=R3@bjPl93L4Oj~+dG&pr2;x{GfHQmOuydEiU(QP(g( z$3Z>u?YG~?yEMx__~3)$f|+?-iV!*ro3Fn^>`Fx6Uu0eCzcm|NTiG0LSjT|L(i*)jP%Y2R9XRs7_Z@LNv=HM|{?mzKpeluh@FL$dc-AouUbBDhx#zTb!s_<86J%>_ zDNWWO6k+w+Yp>y>coNE!d|*(>aR;X|6brsnxltZ-;xyZdL5we~8{`a^8L5J?L?kRA zg{Q$vjG0(b;9U>CuQjmbNP@p-8hca{b-P)Bx2vR~Uzo?OM z0k_0oSSO(peN;*gzVXHzjOj>3dB8#J%WyHG@Nb)FEe;2-y~S$G|I9*J?1|^YAGcWL&34q_i;jYK3OJm2@e_hBV1i(M~!YYZtxJO=4w=S%GPn`-Sl z^+xafaG4h&dk4cJ6NanKtO7XdfU5?|hmqewNlwL`oV<(fH+w%rMlpaWso0Vnon-S_ z(f+b&k?nw?l;R-G!INm!uI|78{yXoyliB*#?0TCvSS|d;7hl9arhIA@_I>b!AH+L1 z-G(%r=*++V;xlJo^li5{jX^QV3JF=Phaw{{MRy)71+&6poT4-(Edg2%CN3Au@OgAI zVXV9H@L*MP1!c}7ipg11acb84Km6fAYo^oc7b!V;etz-Iw=6lF{onule;XVD_Wt+3 zpE&iQkWFh-ymnh6XjAlM+hY;a4tEg9AXkh``L|4nCin;U&7d^?-t?^F;Fc$>R<5+C zx$0X^(}z}_#|SK5RCZ`A^&LJiWp*9j9<2G%UyE)NDoZQ{@dPIu!`vKnSICO*P4$bfRm4<%}Bw=I8JWZ zw^srmL&tYmR&NhmF3H&~4%hbZ1+$v>Z*tzS8MEvuB4Kws<5j z(Y^ZTmwO>5R?nY3cPO}3*>wS?8cUMmV>H*I=6Px=HN)-5%|e0OX5yQLQ#T&Y?CRiH zC$*&;Yy_`J1loyNIxUk;$|ULoANYU?RZ0gr$0`5~B5Z|$Rk#Tgbmr=Q@)0h|g{3m1 z-sGU~2D`{+J*;cYD*C^;Jr@KgP=;|YD}gmqC%3YI7;jh#1Ul34qK#+)*cnHNgeuZo?olu;Uzcl4urwzAW!YOZ zh!ZMCA-EC0XuSeX`AM5pG&q?Ro__jiCQ<69_IB4@cjYTkC2N|KJ+2aO|dNVurfD@4jID%?u@BUZ<|_a&$zIBd3Amw_9t~F zR(O=?rTT1aMJw6$>Rwy-bUapcZ>2?tbV5kh2p%Eh-U3O`4HHxdn)@;8ksCkw!4Jra z2TGt`zSc*-wQ``SjzWd2?IOg2&O*C@Vpv=C}~gIl8~o_p@a=U#Z$WaId;W1slg z#}6O9vpuo+4CsiQ~lot(S7)%)1W zr%B`IpZ&$Z{oB8(QNLgi5%GB?U4m49?n2UmO-R-!cz$Mc(h@S_X-1*_yz&K5DPZ%_K-SbC3`VmX05yvFi+kBOHK`NVi?cJLJ zf77HT3)bNR!HNJoEcUU$A_(K1ap&QOA7;EuI(Q1D5S;~~LuWA16j5awt(+ivNSXHZ zVu#CjtZA7o$JxYXA;oYKawIqyU0{XNVgw>36c1(*Z!So_3RYATr812PtWo&}7m*!T#jz{Aza?ceK0qs!v;3ogC<#x||y*CMIHN;J0Qg5<4W%w)mwr1nmXe zrGw`&pv5@laxF^5q}|!Q-d^6sAfe{ThjBPaS+(A5xS^~bJ9doF@Cw%OrM}b_DpEt4i5*8p< zjmb%ezBn5nnLDd-@zTVK+Y1a^_R5?mJ15+3;52C#tys#z9~_L1S%PAuC{f{&n3TsO z0X&%rYl!^>nC3wDV=tyV&@M6q1oK;wO`G4v()06O^;`#PcE*Ze1(oV<8B(i8C4(T< z-KtD|TOuI=F=I%ln_w1XA`U}RwB{J_La8WziqWC`Gt(NZI-In@;t;lBrHl(?T1z@w zk-t-MpB2$ML9Sx6Z)r3RT3uaQX)o%qs}uIElcNQOmAToex%>CG?r%|YNPe=E7FU%|q%TX`Ni@97VjWKandMHHU>o4oF;g?z(5IF?KN{#_?DZ1~Z zYq3?f5`1J!aWiv_riuyH_juhcF7J(*ohCTP!MM}mw7%GGJ0fEVAbFC>)>I;v4Jc_p z$>-MjZ@=Az6u3^Ea2H~J8>B0xfI*r7t>YgpE#o1*_eIhh@dZic`dO~*$ zZU&#q7`m>Ecsp(b&3N)R-hlzNE5X6WDnzYhXE6oBK`5BIA3uJax|!J$1<0E1h({?m zPk^A7*~B;F`Si9q@k5-ao_fltEFD2U^{G$Mv&DtW#5jpwGZ#kHUyVrg&1NVamS%C; zq4p`T#nR25Jy-^Q^PFG&;up+kX6zsS@P~8C2ib~3BCf4kEx7F3c11=gS7v2b3b0rM z!%utp=TATT?9&#LXk<8No9qW|tTAg>iQX5tkp0Q!Y}3@lhb7v}B%N z|NPJYtYMS7x^1@b;g;x8YfI4VtG%?#6d0d;~)Px zW2^RAdkO@SAR-0d6yT`bq;NAnLb(h#8_^HW7XGezC28w{0q_Zih%d!?=nL#@FLMC$yE6E~yYa_mNyd-lY)QR%*G(+IAfLezw1Rlri@)`)Z!u5U zkb$!)8|%}DX}1y)N;NmZBgF_nv&Ut#^b;SoVTg$2TW#3LE5CvGH-GatEsCI$ka=n! z+zFg9Gv@(gu5QxzlEUK*o7IXnfJ`QOPk>mT;PAoJs2kEKKo0CgL56IrR-qC#DK`-x zlK{ArT=T4)^-7#gjcZ_YK0x8StJhu{s>xr7Wl8@V?Ugaa<5{<%eNs}y$^a!n{ z91#CJ6prwcEB|7q5X+dwsu(L}k!6iW z5}D)kc-2&2BJFZQLQB3MJ9f-O-O4sw8rjqCF_Ww>ht0*WT$(Pdp|2ugtZK@{ zPuw$CR?YRwX_4`HGVwwSk!xJ0$S4XnA15zzYz8wQG+MG5ku9Zv`Imo*o0ynr=2Lzj z<{T-(-2G253*bzYE8-#ora*^gqs5x5vpxOtFaL7WukG>?+_dvtU_)?I2b2qEW4(Fv zaDiKevmyvOAp%Ey%n1WgW6twefAv>>e&#cu@d|V0%&w~Wu@P_42rpXMjeC)1 zFU(4?VF2Mk9=|$*e|7TZmtXkt&wgsv{Yzi^66KgPPOv*=S`ahHl62BTF4%}=xHxUS z@r?_BM9|IB9ZEVq=3oBhUl`*){pnAmq)=&CBR7sho7W6@FS7k-w*_{}WU~n9FcY(V z=fpVWKrN{GZB5)f64YMPpb03hRfP-D12p&|w~$<`CeY*;xi|wx)yTG0fD3OgreOmO z?y#0Jd4J*)pCE_5@+GO2EoWURSAg2OFK^J#Y+#-#uRQS-%ElcWB&dJ?_kSN~nIWHk zLQ|-06%YD{AWHgc8FCda)cz6(mbYG%14Ax!lMzEB$EM+u?DQ32fKfjOYI4xJGDXPx z77kK`I5y&7GI3K5))IDYBzXk9>~7fylNXtS4%#Q{u%U57&_yRsbO{Pc}ftshG787|vLZ5wHwj_=`f}c$eUOB$mTZ^#N zZ(`Vzf*VSZc79(ressh-X|_1(E7+6E!G+v3V@uzsg1TGD^e1}12qHj7+PGjV~k&w zztOkITNANR*DvG@#*H$RvniS%Vdgr`u=G$PrtdcikOFNg=Wt=)w{&X7!i*B{C@T1s zxX78UF-!4N{$|Q1rC;A9S}QJGO`R~pXhF9VXDlW@9rfUd7sY0WjWgNDG8Pdu<*TW2 zw|xsE=);^iGM*-;Qm!YQ>4S1Lm1>&tx&L$>zS%Ktva1#!;{+tT{^A~{5`u$;o7WY? z>qSl2;bWTnle5>W=G*n`Bn57piFcBk@9nbKh|`|}nno-l&(9zJ@P`}#DJYLV`e>aF zt&<2F1?~o!xKSO9J-s87BFReDmOx*LJr^aINzgs$CB=5EgpkWu8k(S{&OyHv%w0Jq zur~hMul*ViHGz}{EjD460cy5L!2@gH=(C_`;@71|P0w!~) zD&whCg|?}K*2Eeq1j35=uyAL@J2Ql8*=j0{$>`BkAtnzcjHVN#U-2hq7mAH(kI+;Q zV;C+W6QRazMB}7AfBoxUXGJo7P1Hu3R-@G2A5El^5s7BPERGH^V&7+;;1$Kh&t@Sp z2vy#Si(CHa9kyZ5TVh7Wkts4h@sp+*Eb-y8 zL&Qdj<*!Hd3;iq@EyT1TT7aLCq0(8L-~Dg>)^D-t{LlaSKY9GwMNvD^);&53xl^=bEqCzObu^*|-2VLNaho(@;pG*v!^= zf)Jbnm=kPIKS>gHM!}NX-~HX+QHuDbrg*9CRx$9BzoOYFc(+G2LSPG!Wk)j(vW#0< z)NU)I$(f`KWT08olThP+vk7ZPG&cs0GhLhqq4+pAcE)E|?rFbxKe zFb&D)qQR+d`0nr|_V3+yPpQRJcpc?~zfEDcJy34wSJ`eMqA{R@5J+9$Kshmu+FqT> zM=T3n0q|o!VvFM=iU{h2jhI|r8FH;ti;NfanNn-v5o7DxE2eKe47YWR@TTSh!&csCRX}aP%r$D|ZMu_0wAj;1TExuNgR`ShSvgLqf7i9E%FvV>m6c2_7BFz|8{hZ_FKIYt zhEzV*6;cb|ossNN`ji$ZFem-vKmOy#Kl-tU9)4ISaMZf{#!Z(J_^?opqS>CK-6D(b zH^Qz-=hCXFrQ=s|<7Z^7N5Pj2IUh5n)a4mL^bQs}Z39WOa8%uGq^nKW&;vGb@_n;_ zr&WCQt6$9q0PLh~%`97HuI=h#c#UaX-;Zfnb~i+O9(RXgt7^qbZxK>h)t*u#`S#M! zCa4LdC;n>T`m!CwjK#-~ABQHyYvx++CGn~`cS$5s$QH;rWo;66jHkSMm+1Am&aHn*X=mvC&TGl9ni}`Qu3x#jU+RPCEl#uYGECO);^hSiX@aUtD zrbC>aNmeUm^P%TPWSfQW!Z?6Im1yE%LYPWm-9TphPJJaqFlkJsjqUc*IED}lOMhL7 z88JMrqwppfJ%WTH(X>WwR=E75%<)#i1-1mZKpG4cNl>%lVIr^0-~HX+WkR*0W-Thc z`%}M7YHU{5E=|)uVVB6(2vZ2n7Jq5Ex%Vh3Q>lRnK1^h5^WKOd4n*R}LnBEY9K^o5 zggj!vsOzlZh&+|i!JYV0aict2cgMsCY{|POY*I{2*oZBkGZy2Z6}TKha-@J4US|o- z4|j7Mv^n{LGB_$1D^=4PuWxfhHZdUc~sjZOoh1ez||k!P<|{SKlkiYZ=QT%a_QZpJ9$su z4J6z8sSI$%rZ zR%|CYpJFbJ=6>eznbR||-gdn=@I#sz98J_2T$sP}Y(D6u?v?2KX-c02N%|z>-ujs- zmqH=hgHVjcsZa)|i?P@%9$_wLi1*_bFv?%PiN}V}$WYz^5+tGt75H-wP`M)mmQgY7 z%j1~9gVJol+nfzt+_!*QO<*A0bTG$BFtghg#*L<+_+oPFmz-{DsRSw!$w`b7)c zeJ=p4L)GnUc3`j($4-T#cd`rh9$$EyMv&b})x=-#X4;^{ z8+WLl@RFWSw1sKPfkY?hP=fS)HBQ*a^(SJ4;9oV0_)R5_Nq z8t*UU+#*&O9UU`K5Rn<2p3t=W0gqUUwRv`&oGB#{GEc_0D1W!TN;I`SJNg+r?0c}M z7?9c1MG~XrrbZY%E7Xvl;j%Cq@evcF#{>tnFT}?>Q;;`0OX1f@*cHyj!^;vcdDcHF zFp;qY5rEZaM)H;PCbEV<{`liOZBwdj zicWs(m9MhEO|z{@jE;A~!dSXwIhG(g=1BAf`;wE9SglbPiW$Gml<_fEB{dsZ1+0FP zDHy*`!A(o@JU;3#5lLUNXR=Xh;Gl~#<=k1FMl~zt}>W~T=1$6_L+!KW#|fm$sjaHH?k2EpcIgi z?%@4MJIz1LAF(mb+u~}__*!C2C^D4H4ad|q`L_I#)-*s0sW_&B;-kcuPR8wydGXud z{x+j3N4(4i7oX%)=C!D8S>-NxNz1gpm(y2RB(e~%tQ5h`im42l4VqhHic^KhqnowX z;_7Aj7bck4B3d|`{-@ar3LnQzUHk1OznX0<;~FHefvO!QcR-7S$BrE{XJB^8V?e## zq_|z5q^&|zz$pd@b5}aFo#_?uWQ|UWamhPGkiTZQXr$15P4ARqs-}}Qx@sJ1^;lL0HR50R z5GO*2nGxS+YtM)$l6q=3|Lwp1w}1GDe~^<&D@mPgCO_OIPgsN26W^<6AEO(9EB6yM zh-A)$JlmDHbm5B785^MxHTn*sj!cTN$K}G=j`?<6@+kB8=cWIGzxHU53rYW+39|@T z9kF%VX#3b>k5RQNcc6Bq7+o5gYn%hXrhNMEv17+<5r8ZXRwjT%c(#FSzA7D~ z0c$W8kIEntjbAUizz7>?B{qVjwbEF>! z@i=MyU)+J6kd4#pjEi_xMJr!viZ00`h2$Y!y-lpQz-xVFH~M#Naz+775$|jP%n#Zyl`)198NWOe7AN4oU!l?83sTbVuAFy~6E+^zW$0cs43j-?75udl+xnIrkQ zJ@V*zt<^S0(s6=K4wqhDKJoku_NGkt;!f#1?>ySoB~~5DJ?f;9?Y+pzz^m0_=%@Ro z{(&OsriL*gRd{8>?yC_1oPXDOirsgqlE!vFV=Y0{3>tJ`9paIxLb18m*s69GGV0 z3UItY)cj~{DQ;?{&G;1*eIaXt3cNsRe4a){BqP~3lUeexv6`rJ4&qvJp`e>B#x>@| zi4!KSG|^;@7Fx8aU|&q`=#?6Fg9|7iNUWU^(l`*kC;mkXFP%3RKjP%pDvzWq)`tiv zn`hEi+|-Msy=}RC?U8z&Sd(Fw1i0I|V(&25Ff&F{D^ZXRLKr2E84uEx5F{%he_qKG zq5KP~ZXTlXZ}v9CSAMbC+YMy_71=9a#BoZld}elgigj}m+sK;?+AdNXs=vMor%s(> z-Ed|oA4WApHn|2JbPyzFJmG2*K|C`gtu6pwTlpxdNp`)!&t{5DVX!Ex)A7M*4Qw<> zMeo=mR0siCvsECy1zWxnPt-icj~zS4>Cbu$@4x1^wUFHzNpvc}BATDmLY;-GL9!7i zQ!*Pgg+xTo$}e1&C`D+ZS?ShHTpQ2)8<`U6vDIRIkhAp4VEWon?ULoyI!N+o!k_#~ zM5Ozf@)i~x1vp0h*`NKHv6Zv`+6s2{ui7j@iR_dgn~+!-Ic6qz;0x2l0oH4{*niT2Jd$srpw0ZTOssR;UNU70uPC&`d7 zZv%iP7Fo!1&pl_ZO*gaF`1s?G+g*ge!Q$d3Qmfd7bf~ahO$1)7;mPZ(DQ6qs8tGCt zZsXf#V@!wQP5D@+J+Y0WR`DL@qxNjl3e$>X#rk&Srl7!RoVY1qy$$5`p+JsUIc8^# z;)ZR(Oz(4_`<$7N4lmTPO}q))x}RQcOexCAg^$MKb=X=aP^gJ3tDV*$jzV>7Q_SdE z?M~Uup`=k=Zh-Ad`(F-c?#`yCAOulrb53ww{!>2VG!Nr*vfHXm^fK^a8MxwxSZi7AueMjroJoWRcbtO-0`t^(e za$R*vsDzelp@UgXS?#Bjtu#*58+}nDBh-R5Aqis_TktvVuhS#P<;U_>=2U5jqINmH zGfj=De->cW;@k08BT?)VflM>uU8H12pV=gS4fi%k35vshx zudy}cVjZi3v=P+ABA=BGnhL4)=YRg^ZVbzBv))ndwYc38sZiBEs0p!au#;MoF!ab! znU#?yH!DqQ9%zZz^o>~Zwl9grMjymq#Xb2|gju)-5-{MPQu1?eOYYboI=ZLOi}Bly zRg{;bZ<}u`kWKJ1p0AP!7W1@Tr%#yk`#k$pvu9OWH4DGt@yg1?h|y?5+8g87_)lkQpZoHDO}TKA6M|XX4U$JOwh%3O+s#qyI%uj6WGE{cJX$RI_A|5Bm*dPDloMT z*A$U;bZ9cSnN1v#H>0y2+vzWbrH?o*C+c(bz}C3l!1+it_5{qJGguxb`%1|%-mRldQ3sBHm2Q zSG(a^+pq=eTrLJ)HXd(zq4H{~6uwyg9Z9dXs9mMTIK^&F(lPr+qC@^7Jx0v+ z@hdOC%+SwF%&G1RUvLEcK_0{-XiFPtmS0P2ChiDoJ5osU5s=yCW3q4=e9Wrxk(Wbl z!#e`c?Lw|61vVSvuV?+Y>)L%3NIB&xr0^24=a-7?crS;wr9pvAl)hc?*EhV;P)pCC zkr1YXzPWmDhP$h+xIMbfa5*HDKwu?QMF(A_<*W>2UI>z0zS}NHYrbouR;wfL{@@S( zz@=enDF%R70;tw}ygPniPyz-*G8*03xSi3K_pt=silFzlj^#T5I6maG&C>d0uRp|~ zWg4DZ6t1dLo2tX~|sG%Y^E*iO6>r|16p4g2V z3FKx~-^esjC3xUxFEirWjeZRQ8rivGYJx|0Z4ZaBJzR~WE8!OvPwwss9WBQ^bcMv_x%EGpxOVwuPk z>#T{-fBy41D{DeNh7c}Q%PdiyV#r0M(_dbJSN0=h)}gfu(+gd9vdBe0nK8qN-0lpQ z29urkvP};cm(M^F0BI6JizU+%<|ww1)Y~ysin$Q4i8~v?GW7;bB|ujB5?%E77k}{= z9BNp!bKC*pPRgsC44_@x9#6AeL&;>f<7|+EpVTr479)q|)b=~`)vWyxq!4a$#8 zyxPCnV!_}fU$*`ypL|k#PFb4tNpfZUYAf66@4<#sN)=%swmkXhqmPOUJ6%P+AMcrGGV>5RfWhn3dxu_T_1|LDI&cdFi6BAV*R?n#XWu zWomY=zvSn7(X=dH0R&rQ1;gWyKhBMsPqA!C zF<>BJi!31 z1f}Ua=|6w;M}Oq3a8fkea7u$M%DL9BDuhYQ6!=?LnxBAQ#1nrD zi6-E}K*<4umS|DRs)l{~rKs^p7n=Dc=(cs$*Z-|Xq8VsxS{gKiUza_6_|{jI!q*h| zT8l`6ddceYlgdY^UEt&S^WXdKcikL=kM`?&=@rEGcj_S3S`r^pbq6 zB=zleT~E(#O^V z%?xs32y|K}6{IyezqFKNOsYz>4QhFu>0hRQU@!$Wh4wQnj1-Hpwzt0YtN2}ooifNf z$BCLOxk-u6og+k2=_+ctR-a-J*ix+H6bmB*F)cH#R4MnR!yZD6Ca(t0Lsc=rh#O|iD)W;DXhhtloHt5hYUy46@zplfod zaZnWBu0%Ya(Ta9u$QB4Z`Z0b{ADeA*Cqc%darYby#3nnvDHh;vZ(6@&cj466c&{e4w6G{s=6&U#7J%AeHQu2BaRnkmw%#Yo9#UT6QoK@$w@H?rY|PsN zJ|$!VgGL$gbhsX^k+BSfiOZ(NlY51ROjpIus~@ejyPetWN9oApc*U@wloqQfs2A@@ z78YADmOE4BzAOv=jZJME>jI||0CVivF`dGlmI-abpZOE`jo)626u_uD2OwO=R^pwc zwSOh=*{g(2#CHUSuvj2;aq`f?||c84rl@^G-kIE7r2mX-!{b9+1O zUicPIxDCFx^PI@(3EP~`Rf5+DGy%X($$W!>!`A8GcQLYv>kVPoYxkW%}a$ZM~?X1lku z)Gd<(Tj}}Sfoe7>B36&5re~=HNxGEh;=Oifb-1i)@kKT6NbfHVCMJ6eOPFX`Ao}JQ zw&)kJPPx8+Trq#MxmJ9gEP2m8_n=1Bl|9iMSanivEFTjXkbpVC7DqSf&YB=M44O6F zs~rhiL;f3=R+K177LS)#COchj#q;xvc*Z|`cG_4*)uRW>W>JUVzvX~(d25tXPLCTG zDDFOOP99P)F`)%{&3~B@)Gn6d zR;L%&ctj@}4rknvP(oy9s27({s*BZmBh&M=0c=7aI40Ixw_7
    =USv!8of;bcmnqfk0i(5G6E@e3bc zc5Iz=(F#q#eI2tkflnME)18{=%!*MqW~QcP7z`{ewv5CR%OeTZ>rF2#EQL=HUDH<= zc4S{RC>P0?0xn-B9Wup}x@I>JyScaMux%n-q~;w<2C9Ieo{khWP36)B+T^clr2Cl@ zo*k>zL@fNl!*bG{nq62}cKrevlpm9T)uEvyOu&wt#^P08kJF1w%bo5_tIM{)Tj{b%pn;mU*B z`7c!YPLHLF90&skRZk-k2|W)B`%6>Z(a+hLJvyijz$1gOf|-O@J;~t0LE`Eh&Uo?9Pz#^YY9t)IHGiJZ$ z#%%L3)|5H0V~t!tGRern>KDr=-G#+|+urSVNVZrMxo8dBe*rc$fxCb##vRMFE)WYC z3I%qM4%>%v?Uz86;F*$^>m>(F90Y2hG48w>S^Md>f9WPgkL7bW=<7g}-CnM0h4?DV z&+V|Q#CRhhsZqxC{OtMp=~;J35?M+Jp|y2?xgz*dk5X5Dm95&TDUN1lH_t&?VrIbT zD=PdtWX`5VkTPMol7VTa?!&#Nt&aJg0&(`-Lc7hf&<;@;gO26LiNR7|qhiSt;Q~3; ziMz=gM+b&KF>Sa2;sK7XKk@)o^cY1$(5&=l;mK{nakT7$VafjIuXxjLJK5T zetV~E*iSVKsMi3_@mLnZP#~*2Lym*3IKU?ru)$zSYeF&LkzlOXc5N^cFR?#P&jQN7 zaVa|{-UF6K(B0H-BmZ;HJ?E5LB$ASZk}GK_QoC*A)9VxkLvkmXm5?|m&A>-{Ptql* zp4;PN*yYPAnh`Zorr<@J84Y4I^Dy=1r7wMZHRFz!X?g{F7X@Q9*!iP-hLJN?1hH6* z{@_C@qTzCCYKCn&Rsx1ARF_a}u7s-rPa{+wNppseanSm4F3%v*_UO#!=v6 zIj>#%Hu#tf#YcvIe0<`GC-`H<@KhWSD!Z>8%-6bP?1PinkNW(cMuVuntICm zL}w?5F=mqq#x0Ze_JKop^@pv+rC=*axDl*6JbJif!0DRZX-yNIg{76nh2?#F578}$ zgHS`)QDWN@yetQ@BFQ@uSSL+5b@D6u?svbdEhP~=cOf5vB+$PZ`b}?WT{>X4x^;xw z-p`AXH|!hj?j47YFo!vJ!h8=H>U9F$%v5N8***Fg-0qy4pFeQN5!3sCL5XIpFGsl`Lq9d$H zxSSsl&ybl@ScywS2Uox`kexVjLP`?ka>~5Ud4QU*v2K{@F{=?5646jdU@}s%+X)C` zgLBtf<7)ekgGbJuS#a)VoGlTnmNELUGdeTG`ien^{hv;ur5`?WxBP`sR>=bJoq#0D z$VLlI9XWZIi57}z4swUvGX$JvSjd7v-h5P9WlN6$rndju$bBj~^?yxez^^W5zn2YPyCPx+&F0bvOn^eC( z3ChVzO*-Q`pcP8;+{t6f!Km}POUreDR@P*5N|5AX@?cP-IaEbx-G^8Evye3l$*4|8 z5*|5v_rg-2711lOp&ortPh9RVrAG)!&cB}(vfY`YY0waZh+!!n^QcjBmVcGcWEMy) z{yCAKLggeq9k7xvTeuYAD(l+z=h4(lTS}-v!kqp5^UvGLnRBk>qx8&qr4@yPh8iX& z_v}01RA@sNvLJ|Z)F_0n%>*GC|6)%J@8b#F>4tAHcmBd$F?%`>9=zlH{DPs75t(iV z9$jN)*e7Of8njCXsac4u#?4IgRu29)SPBRd_p-oGz-p6LmvPX@l2?q}KxQ?e&wh7F z%`>x&VGbg^^wLXT|N7UBl3)Dd7x(O)#UBXD0PCiP)~xF$if5;TNU~3pC}Z+PF@SY_ zhs9OruZh<>a+!P#M6~I#pn#pT`yJ}|@WT&&@rz%Wo0~Db13r7}dw`GG2cc|49yRjt z9J)jYPHqwlsR3SV-rX+$8d2be4TskVpWDS=B?S!jRiPAdU4ZMYX_-2F3VMbr5!o&| zB3>pWhJ_p%%0Ypa9Lq)46Pj1)WW~gNWoY~$ra|(?+oxVV zdGheFhb(mS{t69i=R(T3#9G{tAmmS!F?VwAgfV{SD%{DEl8LBf*fUSC?KjoROji^3 zB~M`lubrwp#*N^p>nv7}-goTune%7P#KC77@R9w>zqrZ7!K(dm5P$mF=Rf$7kH;Zq z-RZJO5DIIW&n__vjf>slZ?;th8mMH0b<)9;t)G6JHWySvnW%U71yFH(=9I^u3)t60 z=h%RE?mV2?cVO?{eLQv-&F*?#b54si!=*kx>V1G|_qzN=e)#V{dGMk4hs#&#fw5Wf z>N&TT5$aN-w5YTg4zP@ayeqKzS7zSS@bbC#>+AXp0JTGc1 z6og1;Uv8mfobtT9didVs@18zqJuIxo(Y?Z3r(=_~w+MnG{bfhxKL66o4?g%%nQuos zRt1@LNz4-@Fk1-ZCFk_pOMY@KG9Xm4HN}x}hatvfUR_|5sOO3#_jeG0VnK|Rq%o-` z@44^3H&4AYADdJxX4%3LCQg^5I(5RNm+7k0n>Oy8eErQM?>mN5tRF$`^)OwU^6S4S zzcq<$6)MdPnc|r#Xu`;7*{_PSP0P7ai3uke#fauA9tVbi#Lq-ZJh8}Z700WG7O}#3 z(V84AEPeQqk3aw7D;(-4i6_g1TucXH#V9>4mXn<|aq85&&p-F#zQaf3Z8)M9E6KPK zTs=J5tyHSfR@@~*nJ947ufCQI-t+3K+wl@YdQ42%>druX?AS5oS1fkCu4aK&Y+~uL z)w%zHW6wSRd{T_UkVi|~WPyN#@Y>_O*fT8Nb?*bMUihL?(DcEgDsC9eniX7OTy;p{ zBi&6LJQyl&jhT}*-chkoCb=xb6&An4*%LyAW@{I5)EG^b=8(+{sMp2vVgumZf&B+5 zSM+(dePbU1)*r-CAt<)A)VE!OP4Zv=<;jnJ;*+S0b*Oyp$cHkjPTnC4k5tQ|G>0Tq zvN#*t4zuJ6i%a_WDBBWRS!^RQ@@}*o1hdHPHMX*bY&Dc?opHeQ3?~3`vJ7&CP0=CmJbL%JbIt^qTpaXSim}sJIo0h!0c-;8 z+Fs9>UwZYP`|g#kNr_Px-EzNR0iaY_zEr+d3lAv(06+jqL_t(CaWM5j`=4HY_0?0S zPI)Q4S@;c!UychZz>+u>7tD2F{bP?k=2$I>GN9qy>>M;P7Wjs(QVS7$uo^#JFD&76B za1dUC2sxSk&W!nDkjsg8o-*KLh>{{?E|&jx`fjGc8ri*@0lf{|^`SuO!FqM}aL#&m z!5HsTOn3srE7`W&?G60(Ujrwb4S9+zw>p-r$(EJL*~9mI?w7y#(;xr)sW)C*bt_YU zadF{16O}E8=gyy5m|uGB)z?qId-iL8_+Q;b(`xrXw&Th|3cCiBfqN3cm`77F#`}p} z%{;p&ZZI>IGIZInOhEy>ki#$_lgmnX6g zP}G?F5l3Qjb|@TnX%irJYDe(EzP-+1$_w@zW2 z2}6HrVPXE9zHfp-`(agl@r9Qkee}^ew=`%+V}0G{F{>E=+_ai)B&s*{Y;?$ootV6d znG|bjXjS7kHiBP(WMoGuUW8yyK_Mbs1h!E+3$RYWLK*#Ndhh3-_{uZS{NlAY-dvbJ z&yCJ0c!G#-Rl?%yufOT=&`F037Fon^GOD=vGz@H2Y9*XN)y6n3DLG-wzLU?3K)Ykf zSxb^dH>7qmTXRDYPa`8}L6{jTI!9&0Nxi9?WCqH}Z)H|hzoHO^c zSII7o>)!8w|DSERu{pyjF*a)0 zvSB@yqy;C2bA_JSGk3>B59>Fo&VO0JC7&C6lN80~XhNSizF4YJ&c>XVEPSUjB+-;c zPXN;86q}z=CoJL6qQCN$uhuVwKc!;6V#B+hlbJXfJPEuMcd$`4L8iDU;gr!gO9P3Rx=OV=;Ffs)KsjRTrnT7 z1HxBc1-_t?mWd-V?|b^`t;?t+h5mKpyO>T17CWo@Ww)vcmRiw!qFvU%7ON z*EdJP&neGjk68h<6%iua+sbGWDLVySsG-`1tF*%E-)hb7`Ms}x_4U_Yc=yfMd6lfL z^z)ja_M}Tq=GvX<&;9DJxAq+hTUgv=Qp_v~%19f%TDV}E*io{z*xG+4?`qA&d z_QnfmPM$QQTUqVf_34yc{RmB56#2Pd`?c2IgQXJ%?JtRIf+t(jDq&;G=BetO2XmDq zc!l|r1zvrItP4E$uTVyhk?rGgCn+XF*0O{0%4!3#6D=3jkj-xO<>lF9$G@`YE7l&I z20k@8xv;wEx+R++;xeDk+@T|f?|S%G*zN-uR_0DPsP0r_PfQcdd?s14EqP)#4|+on zD&f=)lA>AZRmOk*^PjWv>5_>~c1DP-Yg9V|e9Xy;29Rtlzb0nxzyI;M-+blO7fziz zNse*7H{dzLAZ}Kj*|&G^{)a#PDK3uWmHjO3-t4Ghm5+Di0dbsyvPhzz9W4oheR=c* z4rVr--I02Cun=czxq|_CiHnl6Dehnu7cDBz8k_v^7rwCc^0P0#^c*uWjbdSCVK^K( z5OsNS1s82RpStIM$4#(Uu^wen(M`hWU*sU^rBqDJ0x)NtI zs)0wyR+KG`y~EJ+*kg}L_eG+RN=`Py&5GG}+B!OyJEeOwU;fYk>7D0Ky!Psg4x5;m zbT7rw!7y<%*Tlr!o&yH%-n|dTwX0(uDdh?cW0wms%Ai0ZUL#bOZ-4vS_uhN2m7Anq zw4V}^l9yZh&gIJnmK=0*hT-^kfA@D2Y(25olib`MA46|akRzcF9{jEU@?U=N-S4QV zXPt$w?u6A2&B!Pich(VJU-`;!xAq$8;5|1K%Lok)Mt9yBx5enEDPsfMvRVm2YrqmUgB=iFmxSqBW`GhzV$*2&b+jM2 z|A7-HPMn-y`0QsteX(kj*ZCGH5aD7uuSNPc_{b1}k5<1RxV#ceP(pTOr8#(qT{R8_ zznMey?$RJl?O;c>qkq0}e(;&keClFoB_BPt0iRrFK3)u}+v(e&z|Gxuw}J5OPqwCj z{+u1#==Ic7Pq|%vWBwS;N~n>Ic`lw31793nTKJz>Py zeG1QY%-(G34JCz>{0!}#` z0Pid&DOpOEpZpy)$dXp%VG)UuZ3G41oQN^jPwPi}^`pCwe)jH`c&LN zB_t#!Mb4FG-SD=;){L*is1|ZrkwVgDe;Ey9LF}?t>=@0?VgK6q9DCrNU%h0R-8!`& z4~Uoi4A*F|<#)NcxQ;ae~+wVKQ$(Sad=$uUu4 zzP&2OZCd??#F0PV5*3Wa5n2f{TguYH4L4lotJth>FIn2P$bdC^ zUp6yM%TffEU5sBBOWy)8{UK)wt?th1s#M)qg`^%GbPn^keq-KOtS!`WkEl^i9wjqqC-LzBn`-8w?6yf z8*iSp0%w)$fd?LN!)n|k7%r%pC(DfQ^7Gb;?Ks2L)g`UKQgtkVF`KST@`G?aNM~|5 zIhYu(_MEprTwR#xdE2((VoqP8Nv(DVtCOHnv}3|u7ui`Ewyn_=1}(d%7dKbqSS^=H zI$|rVnKF+y*2FUB!0EchzRLw&&?*+acfxqwpS;3R){$sr%AnI8xFW-BX=!P;JJnzA zyF#|5Em&3PEI7u5d)LhDzQNM@>6y9T_|;E*`+ML2!ILYW{H4#tYJ{BJ}b{Wq^1VzZoAxC--k1AK!fI^?&;3f3ieDhAgiv#lhVZ zqnmHgr%149d4;)`31iS+9ke^`R@^tbto8IfCMV|y!>O5R+vwu_83#FO;Zt$)TWeu) zc4q(C;p*(vzQ?}siNE>B?>+g0Azieb_l1q4lSs1<+VJU%(vn;I_aC$tg^!ia*OHmH z%iduMjI^j7F7H zv8-~+R!x2f>CT@_R4?uAc9*R5$0@<#eQuE8d0o?QRHXJtOvEW0GOQupC}6fxuc?bB zXKQeZk!EY$Mrl|2iRw59fx#)j**&kcg!gKGja<{pCmfAC#DZ18WD{Ctd-KecYfT4` zWEC07W}o2MoB=i_h6Gr)n)zBowfoCYLK#*BE=Z2%*n&6&$aTO-gE6>dv#G!^GCI?? z@(#3`Rsw$=K-&FPpavkV|CyE>dMcrAAmq414(NAaV_UI8$yfab!gEtpZCTWc2t`X@ z)A(stH@uMp03^=Fu@Uh2@#ER`c959G<~1^kDOLk^Ai&|W)_%v~!-tL>Idt^MfrAI9 z9bk1C@&~jABZptR6xueIJB}W;#q#jcqx%jV?6ETEE}QXSarIabQ&mPOjh?_rSI0pq z$mNNf*I9p1U326Gq@UR(J%xOG>YY>0ny@Tk2?wZ*lIWkpWNYHYNg)Ehm52`=yzi_J znA+0cT8_Pv!+zYOKmw5G4g?nz4luRHj}q2R0@(q(F8^_D-u&C$_EKx{?e5CCsn&dN z`AmEH%yef-;^gvqc4im9296AWZWD!#R+@gk?7YQ@td?n3PCaO4RV0q{a-35+WLXPw zY9tEui>dq3C!RRJF#nx@_QWIkPg?`Md z!NrU7@iLoSZM_*84EZ1#%dUGJxJUF5>-LF9mc^ywyNaWDPV*S z3Wy+J_QA?2)VuzCthL{cTs;M@hZ=hIP~I-#dQyO(*V~ zjT??aa?rtZv?%j|8xGIq1E8_pbI(15KiBO1D*Hlhlb2uAmFBGD&ey*7H9XbG*U7Xi zE%3dUzY+vHNQS@t+rMQGnwy&=%2Hlc8FC$PPhcuYY6KN{oOJZ)qmPOcR-PHI13WiN z6*=f2%kksKEl)C0F|uHsl!`)=sx|RiL{zFSeJyLxrvu7O2pN5H%6Hv>xfTdsqcW3j z)|jmVTG_HE=^9mZnZ@E!?fV*-VE`$ip0reNGPH)XCVL#n!i1S<4lDuJNQ=o^-&${F%J-mbKCIFaPo{zx1Uq zIi^4c0e`dAd2U*r13;wblI)nmoOB%r%?ovOz8NlX2wmzgxjwMpAMoUrQ_1HS%iR{` z(n!|DIItau%M)MNKRE`QQaFEPvfF>><)6Rx%5%$$^Y)eaUZq_(&Mlnp&FwpI~3#qA#9!G(}0({nMMm~(rbr;tP$(-R_Wkf-bz#8B?$rhI5ZXRt0Hp!7 z3B?x`ilKcY(_G zxP;>tgf1ckGg9j|wh91%ikTi;4U~4b5twCF4f|mrCJYO?EE=tZSyoZYO$=|d|9AWL z?aU1nX@jYExp3Z=^oojhgfJ@xS4b`U3EBahrCML%q*AR&A5>~lBTF^xUy-w1jMRdT z3RPc$SzU#L8N@r7fzP2J7*lSygj0g@)4u6z1F5dPMkQ~)Q6 z94H7iaox3V+LI0JSt&?$s?xCLt(wEY09d6JK}5!Av}M93Xg8!G7MX)@l$r}sV>qD^ z3ie#2Tg^z@s5~sqKk4zjsXg>S2HC>qGm2mwvdhbi3tS{_+ICCU&En1nh3FoZt|*wD{cyHoHM zRg1aMc#leZF~sJIr84y;bw7+!p_lafh_e$Ut_221HX9XY=E@#H2GRy&vD!Vga1>L# zP{~7qT%JR;_xi_g{PxuGPe!8pwEM($ZtSX=bAtRET6JLkUo?B!qn!gDb5 z#bQAQ2EgHT5;=)P;!cc=juXiMlV^fIDn?Alr#InCj7RF+TqTHdEE{B)jDv7wVjN>| zLez}F|KEQ8b6|M)op%I|6hpJFEnM$?pbis+o_Hx~268Q+BX&>Wv|G^;qSHwsH+1A$ z97YNufl$#DhzeJ$#bT%@LWf7}?-+EFM%D)WM(V^-J~uN99rLxiQyd-hf=J>@n2u%c zkdf6L&L|;#1;*R>vv*%T_SyUEM*a08&c?A|%i6*{5VY9Xy1uw3=bZoQ$g98n?<5o7 zaB7uPPb8ECHhNjtX}I~ZiMm_&BASdf7)EpAT1{3G;yUNY=Ar^rgNU5QQ5qj}V8EIj zuk!Ps|C|QAW9X=Q!a^hro{kG)7{2Km14A=$*QmQ|Gcsu5PJ-dg_IZ_VcWr~0-!Nn= zne@&Bv9VAz8LAm4z!mBych&HgU-q(dv`;d1_@M~Q*6I()SJgsz0K#RLewStd8xY8@ z8{;_z$vD52yTy{ySJB&VG*B;9P#5Da55Ry&CleUn>nEiwF!R$gUcaHb^UGJXfD=5Y z*!HL)H5FsG1$avuIvvp)aCI4&9Oy}VVLvuRkk+JL;1~2oGF{2*w>E3)y)Tw1e6BG+?md6TO%mvH{;lS>fe?A-VIbgE}hV6;$K{ z7Di=w&?R&=G+LOsu%&j#h&^YNaxV%QI(*W6{Pq z$a_RC^u-0ZSST(s#Sn|3*j>d?gTId)3-0-a;z$tYuFjUrn2HGw91`KdSZ?toW)9&7 z_z{;z#xGr-#F^LkBa5iEmJ05L(d*@tAHH3=dTHy%^#B$``SG6;FV7Z0*k;ZLS6pWnW9Q!dEHMhYXPC=41o*PSSil)`Yl znA^R5i=S1Gef+i)UxOeXmSs0>;Dbyo3#jIM_wL+%wNimJ#>qJgS!eJ4^5e^aG&1$#O@#xXRq@$&+tEq_6aHS&;txv;$R}%(A`V6lj2HG*e zyoHd&j2Y-CHydEu(|!}55q63ed&jE6@Q-%Rz;Ct8#m9_3Ar9EMh=4(KQFgoQ^iLN& zsgX~8s(1AkYjJ{2me3P4#In8pG?><@30UibwJ@2X{@SW7v+-uYfLR#szz2&KaDw*@ zn{90eyp7NT74|#gV8BaUw-J&qss?xh>3~V{f>q?$U{K0TZ~gl9h)^yB7gZ0Y-Y!sd zTe5%<<#y1*GZ)G?+gDjn0v^Scz8?&D)I~)Kk`8uwz~L&@ergs_tvdeiEA|v=RySaA zixo<{kww4u6@Z%-t~AoxIl&S^N-}^{;%b&*XKY8EVg`)yz{Wd*3lU7VJ;8vVtxVtn zc4D#3^4NeOSzXnsfP&$%%%XsSwAEavR`}h(B7|UFRj+Pr0``< z5h%)DfGXnw`xlJ|rm-NKB6~XT08}awNX_ua$Gf@oJi3 z4#ojBhzbo(CvyTXDxJG9b#Zd~iD#a5B5Zl{gVd=ju>e`0yLj%zmW^xkIjo&w=7&V8 zaHE2sFH|@=PnHcZMK&IKVAtiZzjCs&I4}Di#($wSD+JIX3^(oENjTe!GqbT9)Wcla zarvuY)W&}5_;dAaF0D_^Ri1hNMW-|(89ld})F?R&bDWmU3e6XvAE&5=3pBUgOuRT9 zl@)0S11J{%Md{cqN$IY4n0C+O-~agV2{7kH#X5#z7HP1|a`2-^#0U;1%zb@f^5W(4 z{-N~-S#s{w$!%LUQ_V=R1Ylg4dvm!`5`*uEr3T^) z@7=TO()qLEP|PuRoPj3>9<)iKhBR1hxcB~DjY8?-Y`I3_AVqum z+{@MKQ!l*W6h<-}kJF5`8AHcV>8P^?ss;?N51_ii(z^tN@6=~?`z>HZIO_$K9w-J7&7XV=8$E|VzoYULJ0x(UWFI>EM(Q3Wg z3wm48ywa$PaR-bk0+p;5ne!5VV}B|_>>WA4DOGS954_}VWk|nKWLd(}NGaI9vIRVv z%qH$PRI8$Vl?mLz4*-KPD5w%l;kQT|0Nx-Ugi|6?9g&VOcVhQcMp6l&aIOm64h%@e z7zh>ogjb_%_0GD>(18_sZw^#NdB@;=~Cs20?El-Vh6u zlasjV5F8v_0SRiVN{j)bj(Ls!Te@1)3FHQ->XnRRvhgEcI&T*#s7Q0!2Y#|G11Z{G z+Q6iiatoqTm2(RN;GwfeQSkoz@3Ya3JqO?i%OHTkbqc)Z;-$?s??DbKgC~hMjI9rk zZ^uu?V>l>IZ2a?|{nc#EJ$U5H6Q|D~KK|90r@ub(#i=h&o%`zCrK4Y-nN8iNpZg;x zEIOHABN)VWlU-X4lza8-D0B*?d@7bgrxvG#xow0_xn9Ms)#FGy9A+_VyYd*dxB@EB*q{0S_dY*% zcWsNPzkR;6E(M0F@Jz?MnFoDifdo!2o!j&oqE%Boyp26wafQu=H4>(Lw-I{ap>`&F z_{OR>xr*qetj@FA;6PEWM5qQgclnanm&WjIorOLOstubM!Svt(&DnFtMyEyKQ zpU8@|<*^QUOwCvfVBFNiH?4(SM2wa&4HBn;7U9B4EC;9+Vk-Xb^FKKGAH0C`ww!z*a5%ZiwmG=L9oOx)arcAj$bcBxYF& z7U5x_uGY$o4zvqP08YfUdX4d^PFw^+=K_y}!tknb8WG+$4EO%v*}ZeKL?o^!HG=Sv z3*RZCVF+4EqmIuJY|Lje;wD#$?=!nA19r%P!H@gfSjm6(AE7Ce`m*?bzv5nvP{)>(3Y#P@& zxbEebUwQV~9~4UgDpdhy&W}4z5no?1<3_&6se#5iOeGKvPSDzj7lQ^yEi5L)NtTQ9 zSsdfD?nNPM0duvO;3FoI3<)S1l!nBSX0<{6;aOhvtgpYm;isyp%%Up#e0Z6}jW){}Ov_Cl1;Ew|gB+qZAmafb$i zF)o{?uT7>FE~=vzm;~zvu}V}@sf^+)pG>No&z<@T-W^-GC6Hx)#vL}^Vk5RN3l#@w zUROw<6pVz$6EKv5?GH*#3*qp=A`MjFy^F@h0ToKMb|J>sfsqmu58lM{6!;;f7Qp~q z4RzplR!63$rtqd;BykTvDNES9eSm@}@C?2R7n+X7 zreF8~gk1WbqBP_6?5$4=Gj{jJLaPtlZSC0B=XvzoPuE^pVPKWO^vuJV=ELVW!G<#wnx1iOIW;eK?;_pmNA@_pUQK?i9x>ZtncG z%HI7?xog(>>o++Q>z%ORct9Hw9RtVafa(a8fr~U?$?Unlb?fHMTfTMR;}5kXP(C;T zIjGRsAKL)$<0CavJ9MN{mF-j}cy}{AWDg(y6gu(-4M&LC3dpIT3MR~TEBGjx?~tE= z2spx&lBpffFadUaCpVgfqtW_p#q~SxfAD*so}3ECw-z?s=M*u7_sy~ ziwIau!=-5~IQU46`qq1qqnkha=pz;(jJJT$ku)ra42+JqBEj%|*BH1#c643JFypGj z0DT*n>B9&hm2^Dw5(9_ba=7XmbsGT&b=dXr>XOA+Gp_1{x!)jJI#Q90LuR)-VX*pL z?`kg}yeaBzL$IfFC@??q1Ldxrf(JT?P1C;ztwkxwOphpL{j2M4Dj6aw(r6S9bUILn zebZ8H;llu$4+Ys(KtKJ#x+tsUAnV^zg zC#ZbVuS*uV!UWKf66l5i#(4(Z+e1sQu<*M(yLL40AdQx?MuL%Dsb*g)1_nb)Dn(+q z?jw zo;VdVV&M3>YQm9ca*Q5zVKls%I)!;pCOEP(U@q|+V-_wwwCXgzWLyT4_Jg? zWQ3-@g(qR;^*M^8?ia_B8iXxw|MsJouT@T+yHbnYMv4n?h$D6_j^*4tCng1EWeCX~ zLh&b7W#RIv{OGqjC_`@|kSE$tzpE$^Vh{+y05?Yy9 z$h8Ffb3_zl!b-e*;NzVSJyd0NBUi%BSpHm(g#krBW@jA2K?tiOFqolZjXS|rSZ`y& zhGbT|fRv;ee6LhchxZj?pd|rTSj;f{s>J|u&}t#f6Yyl*uz@A4W)D|+Z4<28R`wlm z5QT_j?+MO~psO9LeXqMUtWptz7)5hvCSDTf5QvLp+%0=Em|?3@CrT}0kOP~t4fQft zVGo^xw`41ni z9KKWxSkizrdX^$k<4&+QnKYWfB6cu%qf%4uce)ov<0PWUOaat;$WJo zbusX%BkEu(y_;&GGeW_6@ofjtKnMpTSzdyIfMI_Ql7(n23h}(Zj&miBy2fX=Dcfzv&L50RtJwXFu}|_zuwtSI)V&0I zZkWSJ>xIQ9o_hA3cMnvtC3>7c=GPj80?u=?9Z@I5ZfGts9yT8`gudG#Y5>x|`4ZRE z%*i#fLdKtyqY5!E<1H#yWB&X!j`Kl?Susn}LO^WVFvJAhaB>l5N5WwQ@4xq9X?)#% zJHOpLc{U3k)F{&W^uV_TE zL7(ZL3fgM6{Hs)hj#{1m(ibBB;nje8jR`@j1nPW0H*iB>{%4bY{I_Jg5gcZ3+`S_A|AngK3S zny93Duw=48sgT*#_ylib!ceh5v~)rlu?V(&Dr02TKSZs6tD|KE-Uxl2V;+ z_@rOEuIrnS5v0LpNUc++PFdUzpy67Vy{i+P{v1H+PkoqQM;v}0-%5msktOM6q#&+Vk9E5cF84k^(28xX=`4jOy~s{ zVJ0S2E7LK6;SJrhTd34Thpon#dc{yE{Y=z=b&9;NaMxLpTWL3a`O&r za_UAHg=;~GeLqOjDh}P`1ly1rhwl#$d^kBh_3-15B}7Zs!+W5k$!u5!76&5nHYdwX z>4>`(7Jo$}?IKVK$qabI+v>xBksqrM>>&36^>dQ6BVW_Rz&tt8ukF(JG%%) zBWenJZ-GO@a-BhNfCbZkJV+W>o zu*XZ<*nS^ClL?lEf56a1U|Fbcrx8jQhAQlg^VEiA6}Xbb=zdB;+W{>vt)$p*NM7U^ z%cZ)LOcAKW?RWFF7Q`mvT4gD3KKbOM?c2AZ$pZ=OWafiR(?l#R>td*A8s-=&sx$Oi-}fD;h23tJ#^^(_uuCf;<2$2 z_Mrh7mt+E9jF@86Z2g%s<$yiBmpKHDRGAa{VO)(LdE^l~ARR&bpcbO)=(6zEMPf@w zs#e5O$e~X^A>QuVb!!N`+e(1Dv68YQYJzt2XDq7AtU%FgivV~@CL_QH+FN4 zYw^|l9=PwGZJYn;rC(O#nomsU%pJ>*d5Nzs!$M8sONcQZvR-E^gI`_~n1TS-;=b(? zvY6n-@`tTY3hHP|!J95Qf=1n$qFC)~Lm`dUqIZVa zVSva^5DvN&W0tq|4L|Y6QJ*0FG?=kX>*9{F0HY60Q1Ciw%P@^%0PJz`Ouz?qD&d(o z7L)d&uNb3hr62-9<>z4P?Ny_6!UNurBMFpVb^%(U6l8sieIzQuc&S~W z-)*Ei?RN}$tgca={_JerUvlk_#!3 z$1D~9)nET5F$b^`(r{!T?InDzM6fk~&0>uYtV&J;wm4bin!|G;Umznv-U(SCfSAE( zz#cQuiyAE*+35D*gAei(fPoNN>$8Hl^1D~hLfn3n)Dg40o7NNfrI^c8^4@*>o_PE_ zzxvfn*b@lTF*`FCMTNE+l_lEm!X@}@%esP)l7GZc98Y5G#o}EK0fdSsoU!Xi5p)zM zVl3d8^=UN?JaYIj5%z!jAAXiA6|1QnmVgU0QA-SGNL`eG!Xm}8R6tQ?1YAf*lo^hs zK62!UlwdR*2*Z-&n|#Q=x` zjeNo_NEJ;)=S@@gEew1*>+5%ocUk$0K<-2dM~<;%qU6)TXfQ(n+=7u2k9 zq_nit2jbgeXYB2e*CC4NdP+e{I}2NfCG;Yhn38V%>!j3qS?2xhq=c2GvA9C@0*zXn zzFcY1OXb&E7*yRg-gkC#xB_lSk=#$~K>ZN^&J_b*%jCD?0#ML6(1{+vcV z7vYub#e@#PY}ZIP-?M4|laIgk#%t^@jdGZ(Wh!dJ6H9~Co7E*8LE<|xUZKPf&_8iB z#=IyNM(Pgu^RW%fd1Y~s#TW-O*LMTo!BN%8a)o@kI``^pzrQkd<+91)y3e9rs{fw2F|L50xBScHYbV!<_k z<`vJWpQL_WJbzhBz^mPyA_)5sO_~OIe%^H@;Bg_J=NyvPe*e2G7cTtp#TUg3UdqXe zQJ#|riH__E4=}lsJ2q>I7nU(+hC?9Wr)n%7&=GW1sufU#Yrmx*T|f~vyx&F) z49&5^dY8Al%p^7Y%J(MJ#)xN2{T0g09yv^X=@zo<4xtMEV>Ei*-0L`!8P%D{~ zKq$DlGY@wic7pR;kp_mx&dymhwh$cx?aw|vCD;nC+*v7;euMej+;?S+6D9a(r#t2))G z63KgD;6BtT+cCDtvvvVI=)okb@Zv|D%|3`ncG7LbNNIA&rV z1|!F)2q6H{Y#&Gc!>@Mx_U+8Y_=GkEUt#FPg4Ry!W#PGHRn+MK#$xeo#jc1wkuvc@ z#g55%j4e*At-jBvd$(?W=9yUpXe1qkLG%qOEIGPM&C5`ir9{c>2S64<0&_>t5ne`+I^2lxU5@;o~7Q|YMq!4dn|v13X;aH!UH{WQF*}jVjU7MFNsHl88AR-#On9YVZ<46*w*%G$LM9U5A1g zsSs!45uMf8%i%Ptrg|hCpAnD5Lt-IRgrmcOtt^Qy*}{$!AXq^ilRY;7q88}T(sAzY z6GA2iIq1kt4?}GzsCL*{Y1kqU{>&Jq2aLYXtI%cUXjJly&b`Bjsl=%3*Gsnnf zG733xZX=mnb+Ss6?sD!gr>#ap1xjSiC9DzKW0%>|X6m#P_D&)Lr> zW|%rxrxK;<@@rER8Uh%l0p#c_UkJhc2a@p>-Zv-;I2Zw613D)CV$4bN1PPN5oIWfB z5Htx9Yh{P-LdrTvC&x}OCFRi18vM}#^G+7gWvfutKul!iD1}D!W2%Gtb5aF#7v_O& zC^ZmP2`D=xa9V=$!Y5PLvYM=PRN7gZQi8^v&=Z*~WqV6XOCVb;Z;+zwd{hw%JesB* zuw}Jw@QJ|{Fk8uwk19ObL#fhEDJlXpNG<#hXsNAK)7YQOLV_7cBnLbU6CxvA$8b1| z%qzq4Ml!&G{6KIp=pa7>h6OVw$y*s?WG@H|9LvDrl9&k+PJ;A_t-{Y<*^E&msL)9u z84;3n12>y?7_hv&5nMQWPzl2j{vRmRb>Vh~JkokN=rnsJMH{uAK0?=_r`}v%Er|r| zQ4iD zH~ca5gI)q03f>sjgL;r77Yra7(E%7N&>2L^nNk_2z;*MPktKHgJi|xI88Q^p`is)b zC_o~`gy={rbO!!K5M}}bngaxKL-EZqFpX-F%el{EY%3fPS;TgvQ&8BzD5!v=0nW|M z!9x600G89B#nRGe=uLU>Sx5fI(j6E+M2moerrR)@LrA)*yPh{ON&!=J)0=W&95M;n zNCWn{3w%c93`E(WE1?E3yY}eha?uS7bWaJT2uY#-G((r+A;zha^ggd9JrSPqijF(N zphceE!etu2XfD59$$P{&qH6+CnC zl5vN{QC%Y9r`Fh>xZ~Q8S0Dha`rv{~et?Gtngir(y`o2m3PHcC2q9cFF^G(cAeb~r zB`sq=d*bVP>=Mu&RUG{xgb=)rrK3jhTR{jw#7(0)7y=u8gWdGPEp%Z7BfJ3vL0jY} zrNSN516)z+jX-xk@Po*n_(KNrYk8OAiY}m|HXhmM&!1Owt>TtDC3c0(#URlFV4EAk z002M$NklvHmYE~ORL?m9Z5s;d9xYEjCExp+Dy)9erIdtfgN@dRTN)$L4Gcj0GYJzR9 zBS${lw{H*K5CEaCz-R~zxCUgUK(jg5|B>PBh6{Op!*3B**VFi}$jn3x7NTk3i7koi zVvH941xbzC`gP;a|Kazc<14Seyko~UNI9`)B97IkwR-$cb!1;&I2kSf07|^{Kh?Rpw?4T3I7t!HsA~ zEYFUPuOGW-2jiF92&f1!U})+Q<+XhE>(j^UX^g-U(I$IeGB)NAk*81^8{N2tDWdB! z)8MZ+7vq@@GZ-0J_d`ljBXYAhrFT#Y&ItH2caU%yNyK6zGd<>AguG`8(U>}aW_EfC zB4YXAC$wO=_x#-0#Kwtr8=MF`H9ANfFi$6z6T=?H`m{WC?s#0AuEj!kN$!On^Za^N z94oBbvfVGPby*Qeg+OVRKwufn$}^Rz%a^Av)!dXLS)E$UB(Lt|{Ll-F(Z;Pi91jx@ zR$`frG!@k*FIu(LK6Kh-B^h2h*!r>ek!@fUq-rXqYOS%i^$H^RpDA=wu1?Kdxpa{} zh;2A=lZY^4jYN7-Vco`U4>-Yy%T_j{6M$IQSJq#GM)l(9$xD~&4bD0!XZ|#nu6m4e z5J2}0>$kZ1HB1ySfI$J286me0=vVurO{qf9{_NF5d$Bko(fw`7y(OY8wyzKEFvE?^nR{Cu zd`^MXqJ2%L$GxgTmjRidF%Y;JvE*Rk)Tzy!J$)LDn;3A62p+B#D6Ij?aIkLe`ohFU zHyZa@2$N1C^4hB-?rMx6QaL)*ms!(rx~r*j7DpaKkO0o0?c2Lm^oo~ zvm#QRy>RXrEZ&GYa0LGwtW?Oh$P1#e(&*^MtxkZaxsD1g=_FE1pIdJ_Mh7_5;hVdT zJ8*u-xmT3XQcwdEQz zoc6*6z9z=U#x~yzR(%eGX?QtPOeBb`IPuKIb0-@F{7Y(#J5Y)&xLiS(jcX^Ob(@&o z1YyZ_Wa}$4sL)KAo$~DE^A~5XVc|5>9wx`4k<@cxer$AI>08?zzZ7I46K7P8UR|IL zF@mL!A%K-;faNFk_Nx5F%h7R1$p<5$#)!?+m}Q%c$3G&i zML;3klDTu|7*!KCG5bD#Wl3DmhYZ@jTNv52b%$R~VODcYdXPQ^9cwXccH*EOUeSU%AlAa`bxb!C>E|Z0%eDTzk3o`VG#6}zOqL}T| zSvIkDWaEYjr!a<*8flMCC^H54Mx?rtCFEJorC74`LVjZCw*_>8Lz}nr2k@zVu=6a# zl)ap6`tdlg_4Y}{wv4dLn@86G+nuc_ecO_}45jt|GIo#DH z{5I0j4#7OoF1830>X+I5(W9RcgMtNsJ$oN4jg0b@0TD`*MhqQUmW!fV64z>phjGIg z;b~4379b)QGjA?(CAHdx^H&caK1!BH9(i>0mQC2BQH~7ntOSwGyl@v(gLc473>M79 zup$8Tpy*){1IK_2=fmq8hyg}5l|*!Ax-`QZQj%H9a#5lFhLTIhCcQ+iaiOz``ye_T zvoHcFUAyqk8^0efvj0UwB6%)KI;tUZnKjGo^2x)~<@5(X{jd1^$fBA|6PROX&fMt_ zUw;i{v{;O0;!2b&u-q8>td+${=1!jb;>znU|KY#;OD8wZ1{X#+(HoswI(g#2d#@IA zE-9TX&%#?6inmlOwT^|?|zQ9 zUyO1%kjP{~TuA0Ve&gTj>0FMbY7~31+{ij<2p71))yWHoK6>T3AO9z>xXHr{ug)Bv z?=`1(<iKXdXEIKNQN^~PZAF>@T; zPI$T8c{UWJYv7WAb&TbADWPT0x?V-!p75+h8=!avqlO@53G#dV-3CnLv=bDw_v)-%t&7;V_fjGcvUv_+H(XYSHhpMG%R+=(^g zBaKExcWz|Rq#gyOzz@%VarV$Vul(V2f8uS}7C5|NWuXqMI6?i>-~DSftzhfP`*KEZ zjN7=lo#XWrT)g`AXCJ)&gXjO)Teq2*Tmf!+z$Db_O@G7z7VSh4s6C=aij>AYAqoXHP_(&=Qi!tmv9^Qzx`sA5+ z-~N56RK%{q-V=OX#H4_gh(XE8&VD)5aPr^#$zL-T1mX~k;<@Cch|Mi8R{qGD@k$BRH8w?iV#+9=lz4uyFWY}S?1j7p3 znG4hx&mg42FF&8t~W#xjD zOvtI7ndh=H+Qf3uH%MDkg(}_4^dpUlA!+JTH{`&oQUXQ}8kjvvEY+RfYCP!h& zUdsB^*-G^z@2MaEM@SdeLvdGRuM+FTGp9d%^OZ24u)PI-WBV^AFp-6rp&IU`6LVK* ztIz+3|B4&3Sywii3i`(6srTM`yI3f2j11?xM1_3J;waf%)Rf@t(Swo=^y(9H4--4^a} zF;Tkin6-`Tc{6(YOOMolTG@wm(Lyg@ORIA@B4+#JDhsS_X=jAC~?Y@ z$3Oe<`C6A&+LHzw zQf5rPe*NoTGoPjQ0hkwQ2y}#k+63rG1CikkhN0VZS=>@T&tRLu<;f@a&&`#;JbwJu zSAU<&7q@J=_nr-#HjHBxX%H*|`GPI2ge|+n}DwxDf%#ns)eevbl z11HBfZhP#p$2M))$f(K{1k(I2m5v2I)Y=^w;V4y-fBZim*u8Vp+I8rn6A>N+E;^}L zs_JsKdzFw+u6y8>Up@Z9j~#zZ%*$9a<7@x?i@$y3;T>zoidow5e8OBrctAultHd~- zY}vH#qeGv6_WOU^_sok<&~(9AY1><(oN}11V`RjWu^;oUo5Bf78Ri*HSwipOfGu@F zC`QVd*d58s|Kso1uN&RD-)OvWS(fdGlUMh;P3Q;5Rf{{R2@;*bBzDQ$u5!aC)dSN`YU zZk#A=+p6CYdq4aO_B`^r#c>8bI$3COqj}Zo z4NQ`hIWs%g08)Q;nt5gEs0%}XublynrUzOjOh^2|YyVc6zI6YNUHM#?HEQgP<6H`i zYK^!Gp5uDDZcX7|{`qhJ>}UVQ8NW{qC>Rdv>G|V_PJVvivB&nYX^10>Nr^vQT1z22 zi!MwXH%#PT`sLsK>0kV3XT$D{C}~)Fedow)zn+;qyK~Pj1OP6?;3i>58;eHR&hSlY zxZ|b#Z+`jTfBe(GcE&f0ulfq8wh-}XRG1*?5iDpc5qnEY%NzP&f3iNbef#!>@T}4| z7;wR%qOrG4P>g9r3yZxTRKX@Y7NOX2hZz=QW25%}{lowLohKeFmGCXXOe~gSCUjK9 zDIsQ!bL8mfXFh%HUmkqwk4;1)x<9$f|NM{t$3y$Jjjt&p1J~B zO6rYjdgyn*dFTf}aWF57kWBDd{^c+J;X98$FuJY~CuNq<7+Y(&z=aWJ6#nknxb5J< z&p&?cUmpFzi;i2w1zBS&X{)Lm?x4BER$ z;w3)Oe!)Fj0hVG&CxT0Sv1>E`^ozfH^0B?6BYB*HM+i^Ec*N=8Z0Z%^<&|fGQ>QN- zee2b4Km9{lV#fP8o_74&|NOiE_Q;<5$2UsgAPIy-$*3LFVxkoJ8zzo@dFGSf|LY^q z{m6WRA!<$%|LXt!`@P@Zvv$om_zc$Kb;ZOn5)6wpjMj~flIz&pzuW!H^TH>xnB5yQ zv62ko&pMOEB$LAds1Q_V!`h1Z&m5<-1hG2)jPRuoCTCB)|MtuKzx#NY3*y>rv}Ro$ zA2z(WLz$We6NSwi)_w57+vCMxbk7UoB#PS+YQp97#||IZeg6*ZxmfMlHY+n{*h8$u zvQ2;?qdRrYYd|&*VYyCA^-j z8;{=n?LYm=pZ-U0?M~?#^qVlaaQNM?4!{4zqk9NZio;?dUyNDbaZB}hE)4y#%-yiI z@an(*;)NgnMf9zQOou`(tbcvt$eAw=KKAefbZ~S`VMy^@CfY2=JB9d=qi_7?pZ~{? z{s3-3k-z+}oK5%FYh^zqehA!P$S7LoiZ1qK&+ZO#85+3feMjf!uiq&pi6* zqir9DkITRSY_G^fSE1{HOzcww5T05olP~m4z{LUf$tRyQePf(IE)KHer#7L7wJa=3`yh10Sj<8R8}EradOSbt#&f}zZy~Umq$O;fGL~gXijOeE0-L3n ze2dM6Q}RlsKYR9%GR!G#29{-4#4|@y;rz>RYy|!?Aia4~C7IHZWeN0B=m`8bPd$8l zXJcT+gX*2#ydiW;VnAh(I>FJamrTD{0wz=ZiJgz=IuaBI6OD1sO27Qzt%r8qw`PQG zoh<$r>PbG0WhWpz|Bz!CgxS=)XYa0Bd8#~lMj}Es*d@o{jQF3Ans$Q!ET~S%>nP~*{jQpw7 zC&t!}t>3h^RK zg3?kfi|h=lOpld_20FKXqPS)(cmDXN1hB+a-B@ajp#jx~y*3ns+SA^U&YqQOIL9&0 znFV$Qs_&$Pc^E|LfO-O9*F@A-bC;P%69K4+{H1f3&dpxCyl>AAKV$o6#mj1qitOY~ zYE?W2&@D()$h!~hxaX^5pPIbVksGtuj(qyzLwoPXmK=DEkOMiAxmpE(WSNGG66wZ; zqWAE_JC7dugwF1wE@Qx%nYwu4)C0S>h@H8Sf=PNGE8ZB0`J=HMd|NE!c5d7D#b<}b zs7yc^iB!=ujT5DW?!f97b%D&4N;W~}%`asb+xLk!gLcbHE`4b~-8}!D_f|2caM23- z078{EjEp^{4sp`4Cgd>d!T!hJKd@)l10#hZyC#|GFuu{tU_~aA5=H{I z)iirx*RI;!bUJxO3=K%FMtb(sPjT$R`b1zH*=s3!x0uhwQzH!tiW4}wdp3TnkydM0 z&art#f~huUzxw>6-8=8Y9?e3g??g$B7)f}YlwM%TKmRJfb zvk5NDn5jLt3p^|Kz$fDl)}jH-jTJ~`Y)irJ>QzZ)gjIXf*-DA%!B$xR^5~&GySJ4H z5S7J|-yqkFz8(ijN(Bcl*x!M<&M6G&rlLR&SKqP>gJ( zTF=L@88_;C_U*koeXV-wqFM56hj~TMicF=Tb`6TN4=XBqsmw4oQLU+3YwWSSg2(Ib z#|J)q_`z@Iav^?d*emFKa0@$8jK1kLq`$aNKKjs?$3Bw{WG0LO)PL{oH@4k-Pd@ZG zmkD~g4L|bpA%Q8F(?TI4E4tDC{SO}e=NL0qhX~&9(Fo(=m3>t^k}sj zYKg|O+>4f4*~K&2@tEny!L9QE6BL{3AQni3S5<+Zmu&d6Lkws~K$4g=tb2X;XZu)Psj3)XX>xbJkOq(>8710(cazxWzw|ER@_^R8tzuQw;Qk%V0+! z)ltO?(XwQA71t6<(ai`P2OV)Vv0`D#>^)C_k^{Zk=?H{in|OeP`r+c|nl)ojJofmX z{^Tcr^2dL)d*`l^Qi&7run)`5X)`FuF%Qhp%>{(azbFX{| zf`J8b-rEaKa_T#F>^O1agaBZ~U2kg`mNozkbTh0D0IXpZ{l)-tgetV(B8?yPs=7ZOAfn-%dy#hW-^%#}v+7cVkh$D2Sl5uQ7H64L_; zCYDP-%CRktc_4RY70mR)97boEb>wrnU@%?p%>^cr#)?n$b;?H~4^k{LUB`{3=L0I! zZqfVQ#<_E+M%Hk+TZ9II(jewOj!npzLh~7-;F2R_81AT(m%kQki_gAM=j(H4)~y}G zyvV4}K2sEnLR3&h8RRhhkTgo}*wC2Rp)`BFY^ zRM``iuqDcKO1WSp?_Zm`3=@mji{18eBOi7_T>xXyR3l>-*}Ka^rD|bZQ6F5S?yNCA zIkk4p7>-&w^aytF#xX&O2{BA#*>uJ#i+EhQjZGUS&Yb>AP63jD!S$)h%Ud>Yq8o{V znco=^8A|kgEBgV^P63(-nfSUko z-DH*Z0sM(JY+QHl{8=G7v0pGKFjwr+1wPS+v6`u%(^3t~^{9fjVklN?VXhAOQ0Ce` zu0GwU9ZNi8KSH*&w`hgWs={g^@H`tJV4)S3c|E)AgqEQ|xYu>De{I;b5zb8*iR2xT zR3d_K6H6WU7#UHVs8)?fM@knio{@1!CQ9|om%m=Wp1pV&vWhHeA$)ShLYk$FH7sf1 z1W9=Hd~u{$%AGrZQX~(t^n=FfGheP*KSAs8wex%ex)g}g8G5e24BXHTEluy&m74y-qc(+iP{uok0LWKJ$3kVvkEk&y{hily+{^kt!-mm#d|AxInOB*bDim^zGt?Df#2_AGJ*!q@ z?3QSl^hUPN5KURUdohWWqks3iO49>Qm5?X>eXpo)MBU4jXoH9q@u~eL1 zzI<`p)-8pcMA$?vNm-x?$i*4C##+?UH}a(dF<}6dX@QaUbVt!)x^B2;R0I;f1I=ND zMRk%LENe3SIvEFh$S0lDemAAjzGnEA{cRHAeK5$hJ&vkRojS$&hp5wo8G>(0KHyO# z@W2IHn1^B=CNVt}*mwXu-jj?s;}f!P26s}rHr_V+o-)vtL`yfi1cpVr7E@YQXvv~! z$gbPYb&Jb9A98tpMCx@gD>vw#GlU+dy0 z7o_Z|g@GJX~81nK)!D!J@;vl7La5DyChI%v~Wd`mb zoUbbc+h&JPcvjv}ajF&B1jC0_gn^X~tE;G~Lr@oDz{(MZ)$Vq?!$3{Z(v(P4g7EU1 zn?{j4%)!`Uh|X6hoS7_KF+X6XEJppuJqKq-=7Jnlmo;)Dc~0z{nY-$6%rO@yW>l{t z%ZQAG%7`TyJ7c9>L-ZkWkaXz%Od4?|W%t`$dD?jQ;Gj@W(lP^JBB3mDb2Zr;pEyaS zK38kR#N?l2o)K`Mke?q3X3Lir;$-D-5RuH-nR@k5>@EV85|Qelpw`&SdS!z)hJMIZ zBVb^)%HgwOSY%n+5AuFkV3L)j9;Scjz~5-8Aaf*%J0LE{=no9H*l0XIj0ti}s9HIp zReZzEtdALCP8`GorOmUd4=%@~h$mn! z^s8l@gGBL>DUFyI^iQxQYL5I|_67F1u#VvpzU<81W}rr@(>@46xp>n>VKlj|)hgbb zI$yP1jaX-H__-iIH_I}~DD>idVLbQfRCS^Q`^AbF>O-dPc;sbF6g{UJ(_`XVLeS`_ z4q1b|M2N=ypHrPo80iGxF1oxQ!6gj>oH5x$b4g;?Bhm{tJKVVHW!#-mL|AhWE1n#n zjJ=?Wce#`JdD(f-giy{Y@9A*FT9HonKqmT17sy;P3ni0A*u@Zw$sWG9eX46oV0x9H z9e$n>Wj%MTQgi_@CF8ZxE66E_Shm{p5m}fEQ~r=m#;{1h+BnOQLh^?J0%H?Qx=F23mL1t)#1J-DDd$I`a&5*~GhqWt zO)x%*c`@{BxI5y7DwC@quDW3`!U0Ya%iRseN@H^~c=20?L`F&1Y5gq^8Onj02eGW< z&hm=rDm4_;s-Ym$a!b(V76uj&PfbbJ6yRD0p4~(z0xL41EtN)Dc*PH~lFl`lNQ#{i ztc7)#Jw_f*zQvLJba_e+yk$yDTpOlOqJprTZ1fF+<3f5oAZ6o;EI3!P)X%d_mR*~f z6yq{*$hgDkCc7j|I6q8_NI%YX5%V_-&QR#;%rt{LVTosEuQ3P0Le<-gs@YAVdmwsS z@*<_-JcW^wsuxMq{UxS zAl)>nCUdb<%>v@NaH5~X2vTF5$QQk+#BQ>f4jh)~jtnIXPsU2fOFB~KaaL*=vAB#1 z!NBB(c|sS622*dKL=wP&<9b*h#qeP~mvLR|<;sfM`rA8Z_t?8d&UuBSaam_@a?JaB zi!zpY-KKot-BHE27qvc1y+UpT3qLzHRPkfI#fg!6##pFy5aBDfB`#KSq;DrVeAhE=)L%`V4~8YWm%P&U15f)pCUgtjTO^#AiXWP$9=%NHit!?ZxMWSp**tg4Bh%nN?I#cSVF@pB4ut z0k9XBm5CJ2HAo{P)zJbbd#FctFz7{_qzr+0=mWEcn+#wk*S8@k1xOa25V<>j#3X6TkN({lhL$#OGL`2q<0gjv4<_9SOl`F{umh%mF;L;VMX}F6T24172BWB5=G6NQ7Xb8{zTE1;G39; zSj2+$jck(IWj|ikEij5;XG$1NfKXU6fU$+peh5qB^oUsr9bNo5^lT73;>8fc(1WZI z^B1Ku!l1sRgTu{qIS#m!4HHN?VMBc4QVvxUTb3-OHLqrWrK_~wbows@Qhbtu2L^H0 z3p>rcvUEx{Yv_mnK}(lKHjPr(#Wf*#WbshMja+a`7~~xeuZ?7vE6M}Z#NP%y@({2G zKCMC`u@$l<#YwB>8DuGTz?QyYqz}-sb_|flCy_A-Q*$n2>_Cgq?ra1Qdt{7H6ZOLU z%!1Ha5_6Cp2mFBVl%bHC#aP{Kdl#Td7iQ?91A`@2 zh=~CrPUW}=(7$mPXFQP+#$3EJmyAR7ZX@}rj&HmnC0Te0dd(-%L7*e5W}73bFkzD) zwan$*DHg_KN{nAWXatsCBC`cavt8yVRiLOa59dL@+lN;2W&C?X!=*s!Q$`nHAjqM$ zA>`1(*yB~LvDIsYo`Uzi#AIRRKOiC%Y8#yb5~8Qe*^j(5775t_VDOJJA5`K>Q;(Bf z7ZEY6utSN&CZYxAWjKMeM+yb$pQ6jt=_mw}iS9?4h7CA%K)ODnp7kJJrx>$Vd{HuG zP_;EOT4dUX6<5+MKxP=Uv*7v+xn0yf%yN4%fZ#w@h@3FuUMz7&N$LbDax2|66(DI|X`cQisA5>VV_7!o5E9*8J?1S#Vk zdLt50Kx8r?8X_y7%+Q#@o1)QAQ7ds#NI2NeOaw@Q9|PG5Rx92HjC@QaSS@2ejjXOQ zJI9SotWq&sA314zSyx-DZ<5bqj|V~)FjZJQ{Y{Ek{wu1`!3-m#`sGVyjs3kn!!wuUQ-hi^_Lb1^2OOjKna$hakwLGEw?qaCw) z9IwQO6blYsLYZ+t#aR`;VS)}Vu;+*e7>puze}|9=GM$v;t*hdI$5bxCH;@g*7?(YK zlPE&QGCD$o%znBkryv7{S#2NOhsp5|s4S+MCs92TkN%y?Yy37_gg7ktGf zS{b%`>1#S^l&FQo`^EzpCmy;c7BBQlvHS{|q<3R(W10>T)hU^M1ZKs?0Ln|Od2T&s zMS#gZyu@x`NJ#EpUDU%=K^)sDRt(m1b&gm~zxbUhtRAV)FigAAlb1cOba9Y+@)fW_ zl#UK?BPU`j8ruukrnSH6{8{rt6ck4sM!;d`bXl@q%!CXi991R4$7M4@1v@SRE0c{Y zB7qpKkQ8t%{ASD_b#_JLfeQDEBfc1@5Lz*=Y4A3jNonx3@qjc^2DchI3G*ceCfbb2 zgwXidd@};5bKnyyH)5$SCd)=G4A{s*np|RN_Jis%9zKe`9e>ig?+}I~XNk0429YWaFlQPavEqx<4iPZun0*xgu zu*P5mp^P(k#2uekUdrAE)O~4%GQkabMLmuS5kfr&dUOBSAI867Pf}#J9y; zm7w0T6{+r#kWg0W8>VBD3)gZw4#a?nvQfY_BNR4CqemoSXQX$h7&~|&S|STUvh#?u zFQ^fvQS6;`K+KwyjR?SS2}uzIlxl`iX_)-A;Adc3rh;gC0oidVW;Ex43Yjps3hcqZ z$`bP!Lye5Kbb6vnZ~JEPhjxOBU_=0d9vP6$c+6*mAD#I#p0o5-iyL87B#J3iKs|$b z0EuJ_>#T#B9+k2L46%TO_k3kN+M~~+4kiQ{myuJv@ns{DgbWjfLH^h^C}Cm%1EDGX z4&MMkl9O*6OazeCkQH-?C?15ftJD2?7RLh)s5XTx-qt0)Dq>N?Sn-u+8_2b3`e}*$ z%lWK~p;f>wbo}My9;Uq|2H-8*0dJY8cORf2Mpl-=U>k-)7~aa#-c{0RNwl4d)M#!h z1mIg9cw-ru1p3UKv8_Rfb~I#m%rMBJBQd=a!=ZF1X^ZIm85=k0wh|S_^azDU3u%AT zDU^$*Q}8I3i@aP_iH z5Ev*@l8XLmGP2qx>w&~9z`ZOC^Af?9nTD7SWok;j#`0`3nWpKj*{-%X`bat>UM2mK z7itP?%bllRvcOv4G>E43 z@3jh+LeU%mK(jAWFQ3{k(nCbXEFqJ~{Pv!2(4tE_9CRyNg2cE;LZS>X7!lxx>@*vx z1;IpcL7{qsh&qH2kMOv%U^5;q4O6-NRrlVB`YuB=)j^jHdoYj9rX*+?}~ zG@%l}6gY}xMO2JCSeXTKoWErk3k6B?77}<~$;ARQ6kJIO%gu|_vU?zuMoYHECRq5O zwe#L|Q$`T!6x!*a6=Y($GBlpRU=_N7T?~c%BO?X>Fm9m!;e4O5btogYh@{8M&FG5K zaaiUNb`gC;%z|cN1|y|0>2lgF4>CfTK1*?QG}5cq0g;G%Agd0jjv*P7fU-I+E1OAM#}5dL}w5_H)2ZJ z+vs7iyzEuJ{s*!|qn3k-)sq8)3Pqp-y#^s@)H-s}8M6is9Bc^_(zN;%L^rz%jZs*L zWn`e7cOhKEWqPhiU?>i`fr6qbN>-Fv3=jsAT!tl-ik-B0=`?gh*1&8!@M)1_U#{>$ z8ABZ00PWd{-X z0IY__LL{Dy96ZOqG>@=k4Vmn*Th#pVlZl_W6re*A7l{eB(IG_}#D$0B(b%wy--mdT zh?R*yUTWy!5OX5Sp0cclqNt_;{1up8FtrE?x5v^Mwqn#r>LpYz>Lm;>o=TV$O$IYP zFu>9xy==Q$FJv5D?u94gz&8H^8`E_pGJ^pyFAl0UO4HZ|YAz+Ny~80~1R@Y}NK`l? zhlu9S7EMBa66_C0V*Z#BKx32*aqnfyM+{qLUq)MEI?beu)5^fVf&~K^gudie#kGsf$5UAq{i+lQOh)c&=Ntk+rEr~;q|PhHT@zEF0FW^AJ; zTqN;4-JtH%w;+vl2aqa`me{bw^}l%;G&YC~o)MfFL$GW_GiVl;kg3^qX3Gf7YBu$Y z?>UKNN4A`0Zu-0Sbo}sm(2VqSOf!LWRUxN1BncDRnp+UiM!i}umhmqI;unCepF9+@ zjGP@OomVWTfp!XNx9YgCJ0hWN5y0y+9RXv#lS(NH+)m21hGO0 ztHbBWrRJCgYa?d}djNM!0xX7Hb&Syxfrqovh+fPBB@0-tPf*mRRYe}ua6-Ydz1oy2 zL#M3B@GqT^Zb}pECB=4dfd^74epuXE==$A38C@oUf^;#>EH>ZeT+op*OAPLyK=BO8 zjP%C#o=UV?!Mz-N$u<_T$FtbT8YG6`riP&AMH0!HBBmsU4+%g8nIw=AW@N0oO?^X% zC@Y;pHKT8!Adrz&Pay*ef)Ql|zDJReagAL}GPE(+2^Yx$g|ajd5;sYXK-6(o1efp< zn77w*5ODLfxTFoz?*_i)1Ji$VY0M%b>B%x~cniM*-d3uI&z-XUMJ5yA=zfAikN#|`h6`~mi?Goxa%b9{DEIHf7J_0G@s zZm1>LPm2^tMy`1Y3_~M@(8M`C!r&0WrMiJ@!UChu$wSJK1cZ}@u0)Qald@tW_!RA$ zvx_6A5wQ)4aA8zI#0b#>5qjcC4nbx61(MTf?GTMz7^dWOP}y3Mg@Iq>oJo2#i!qpn zvGR+JxTV!WFo;Colai?w#*j#4yx_GguG?!~)0Xv9{X8^_3fN_3?=Z0y-Sk0>j2-40 zCR+ppP@k_#K7C#!aj!m65mPWGee^r7fz`Wm_!%KN)V+`|%6412nK?cVcP?Vc&?3rb z*fZh!2^h~jf|0ks7uW*^?ph6t&{VIk9{sJ@lz!*a(X7@fCS)##%tH;~VnmH{gvdiHbuj%8 zjE+5O#ITqq28tc&!~oAl+=924M=LIF!66iX%#PMqf*$r{BB()Q}YIFT#|QzR^6V=e&qqN2=H9( z3HtQRsGwRV^#Z5fJE@D5pieK;#2aoDE0>u^o2DDAiPNXfQUsZ;3mQR3`D2!V?XNl9 zKvOw;2HzoOfX1X^GAK=zlq?0Xe9e(1kVA+r4AdY#2^MFW4hY?0i{@BJ?l6aWQCzyP zN)WUf`xPb2Hb;}%7(|s;@}}!c(TD{E#M0ib%jrC$n&Dtwo2AtZ`(pls0;z8SQm@ue z(-#1|2|H+{_uvJG`*jt(nf0BFgJMjORxzxAYUyP{6~VcoCi6du3JGe(KNFjqap~ck z;M|Z~N|j6AHFHq_^G8horhl2wP2s@2Pf01Wd5=KhYO zChHrX5d)MVwJ@+JJM54w!c;9S7{g(U!{K(0&+=u>3--WV$)7~374gF0L8XSfwJ*Tp z@&;cF?hHG`E#d+UhfwTXf+Y!(Q2X$R-7Kby^_@iJ(L&oDR_ zq#_AJ&qS^wpXT(K;Ia2a>VVlL#HFhR2fPh#da zu|NVyFRao^YiQk$BgB*nG}4&!c&W3gT2OC0H@4?m)-&aR6DY|1oLZKZx9h}IYE4vv zKy%rLg~b9PFWVC|>1dMbv!=0m5%8GtWPq2YNN&HB8+tskBLi@=14V3jPaCFtdjKwu$ zGdAxi0o2GJ1i&a_@nEw(v9Fb{8af(* zX;2}vFtPS>uy~1{D=SXy(?;>2!$f7e=8YV;}XZ zb_#Bp8MW`v=vT$Va{GV*qe%6k++mS-I0jTE0wh`#(-O>@JlmHGfmcn71e(U^dFCb) zDr`(92TDAb@yIf7#m<+-VqYM}ADSjBlcE$dapvH0{zU)7H%lB1Vw6H0i%|zrK#5^T zfw;%!*fqgCKvhK9V`$t8L~k@oX-w3KJO^3G{$DDQz-LaQOhk8?Pi8J>;z>lvh!U%r zkDH{t$3@j_`bADswM-PXQS|7+%O9Vut>JW6;%W~|pMia;1Lqbug_AX`)Gg3iXT<|dTN*HKKCB9`cPk=qZ zB6l-wGsNUD!d$S{7+>Y2hY@Cni_&9^(SVqiN6dF0)Qu;T& zRp7=E9s9Cew518P`A4zQ{-(ocwVEk@x-MkNy0m2OsA4)@Uoy)boBV9z?yYA)2a5RmKMDG-eKX z+psrABw>844KdM&35pRwlXpdV=>xDq|3?lXa&?R}o1l>b!InF{Au1_TKzQvg5k<%ssQJx~sZ+WjC8- zH%W=4^e9Rcwc8R!NnT`l0X!bS#sCi(!1n*~@P3$w`Q-tF!Td5XV9aHRU{4h_)GRjsO5a07*naRF3B>);Uwwa}~Nc&$HzKv{3dem69RI zB=L5L+f;<2Qa^;E4lph^uGBqWxzd-|$pTiYm<@UdK}strAIJ@**v!4k7Z55{j-{4q zA>AT{Vtb>?f%pSPkVMTY6RN&WGBBz^xweEi>b!uY7mlY4?))Ca(3A59rSk1eWF6J~teg_@5*5k8 z9{Vw$5a11FI6$htihY3e!RjDdLxV;K&hr-1*@)k(G}bB*tJdfB7g1J1!;mjq5s@_M zktErX`oRzkGY}?r5LX+0TN2t5E?Wa0@HmD1X@deJ^@6rJP`U-nLSo3KXZFt$mDf3S zSp#N@p8ubq8dKtIIb3u)u1Bv7l@KWP!tz6l(dbOhaxcn!OXhrwOv$2$vn6aGSc^8Q zNAwcDkqepaag+z;5H@H4*Xom5*y8L#W`_rYK|bx)=Lg+Ujy*nxt2H|$P1F}1^^FSN zH6xB-x>3%rz$USI$G&rX8o;BUnwKi&hn2vu+3sML38Jo3hjkMW=v!I{P+3a{Oo8*%_3H)Fk}3K?51PNb}Kd&8s0PA=`fjJKmS8p=*ml^QyTT^v?T zJdYHFAjqXt9Wy3wLC5zIeV3YgR~We9;qzU!a;ebo90m~Jtc%ejWay%#INck!4#y8ON)Auey7qEvI;AsK`mMOKd(dZG`BB=ky zKwQaBF9>9dz+_z~L@+m|ZXCcPzBxt|r@x4+pM%F3T#US4$QQBs3Joq|QI>rYQ75wk zpu@`7EJQOk0+`1+f`5S(0r3l=rZ3&VIJhn~CGZ46UsLcy4P)3~(3z>UAYx02j`P$* zPiMq_;v3z&#Fn)6)3aXS!&v_W8)waUaUSA^!^)(t8FHMn_A+=Xi!HDzOUo0Yvjdpf zfpQSf=`I}ZVTz~`nFyky9viFlTA~b}Fz@CRV@hW-C8!FzaLXeYCIQ2G&ZrL{OdFvf zbPDR*BdzHkO#x+{uL-3W3`BIMyRs)7GB?I;xsQ=@&TG^FN?E~<;&kMkr7ko>)Ermf zrwI%)Dk%e8j$df~?|=RoXz3A{#7$h@&@n@u3k5P0f^p}ZUUitNTtDCug3}^iehOeZ zx`8CdBnW{C?IY6cFwy{EXT-DwERG!w$=K~H-saGAZ7nROm$0|J#<@TUOQeo^*-#{h!)XWLBR~hR-guWWXmDtL>GjXmadnCN;!yBpp?D9a_@mm2r67^E*pbR zl$o}s7g6M7mj=ZB$ZHcprj$dPil2kD9A527XHKW7$ljn0maM9io~NVIgd0MzoPZA} zm%j2D zB^U%M%aZhvr4K6DFW|g|!r{?+<&$eVOC&&rf+)AhCdKkzhP>DYqRzjaGT#DozD=A9 z!a+OT*Cq{)(_xLoJ1iK?`H>3dw7?reB`-0pvri3y;)}$1c(uD1&puQ{W_bxreu_3{ z!LTQQTe!vQ4<9e?7&(q*RjQ2a4N@R0AQemnqsZM)Nm&o#fm)YMj@TP|KZ9-kvJ?q| z*P+ZXdT`+(%V5AzzFv4`o0rISqD5(L^GuggFAxKeJ_I3bzY8pV!P$d|*nYGdwjWje z_Uw1TaJ=gRwe}4k`S?@M9$^g}{j9eF2K_lc>A)AQeFC_;R%<=-_%k2==qD?U_xxOx zAYbSca7$qKj0}MHIM5I+{QrcGs7mWQ8B2B+OPkg|{E?fUedYxQ0T#@qjrp!`yA39# ztJxc;&jZyTf9%N{Kl-tXd@{wl>3f4%T4XFhBCOAaTigQg10CO+-u!#7HTLxVd&k1R z&;G|?hCDN?Jg1vDqbT%g+@c7W9!p2be`mAXgEV=tjkOs;fsI}Lg*i@alyzRK%xONy zH|X_5M?l(v%VpAA>#|=&JF}R0WBm>IQoJT`+VVkJOho=mJz~RZp8|SMi)9?j!$}0% ziy{cvyn^v4Q!~C_r0tO^=y|lX6`6P&jwt|1gRZ~;T2K-F$^$$~)&?gvzMDYVa5BD| z!WUi!Lo+ByyXRASCR<>Df;m#E!~+tiduRoqp|#2;F9NeIRQ9?YF(Qg=^_V~K0u!$x zRtDWly9p^_OnlCtg9>D4tB7WWdaYO^%3OuLs_@M~A0v{7NA%=y zJLn}wilc7zSriyR&SJh#)oC;DU^_A%AEuGsGHyz|ca z7yNBG_9L1J@-g`LhbW7G^SS4qi_$YZf8kZm zmY5Ni-%?}D*8XpIU+x!ZB;X1}!^}Wb&NlPt?6*M(uIE4Q&=w(65a!4HB?TK4@Nzos z!dnHy!M(%n^9$;LG?Wg?<~a^SEt0SS4iOPsVowjU`a(2V7wfb-C?6>}HoGYc2Oy|o zsX#cDcJX~MfoZkb{aG1x#xO?~RE%IK>WRO6k_{yQh`P2&?gFG{REZ;^|DFxDuDjb^F zY_!-Y3X_WyObi4;tAe)LY}m04z>``D9;L<_8YCoDmR@*~MVw(6dG3fzknwPp&F08m ztSt~>fB#^(R@3{@9&^&@lSrx)v0xQ;y)rV%Kgqe%F^p=AGF*r(O}+a-2jLK~if0_e z7To;qE1W3_VkJ0*ipaKKPRWs(f-|?kNgy*2<|5LJG9G+LP866_+w-`Gq&&eW-r+E{ zkV>Fn*;&R0qx@hn$4T&_#agYl29i0}h4+>)>E*P57$Q3hW~>K5H?0847?d6r6_8)0 z@IYU^)7O@;Ffj75Itp@U@PUM6HWTBC=r^2^~Lgj@ufLW}O&wQJLy7b}srXfzhUa-F1zm8T)%zl~Cv z)^%}X<4SAizAJC}^k<)V-qAWJHH)bw6X+bj)|Z6A|r>vi-wWxB?)(CBrh>;@$(F z`qXD0edJgBE?aK%y3=TMVsMNv3bBuP^~9-qqjUO9@9?!B*mL+==2{+g)10|rRlOF> zUc8GGBmxmobWjr&cganBk#PHdS98ft{C;EncceXZF$xheMla){V#zMzTY;CK#u!X- zj2m`LymjJ4zuQ|`S?lu~tn#MAVBAG1u%gv!ViOJdDi`k6yWFFPWusG=ZE3-e?CBoyCsNay)wNwb$Re z>#j4y_JQ#$Cr_R}RM%^pep8hfXxPP!Lz=S%wT&{(9mW&p>Ss=!y6?XGsE5`G*z^pI z1R{7{8J|E4aayJOT%+$(AWE^oPhd52vXb`xy!;DP5CVIM(G=h_3?=BM(M(5@pgXTd zo;r03QnFhR%N&47q#h_pJ>=21<@>8k?a|=WJImKy2@Xi7MbLrSA0w(~)M*OG$w+S> zJNlEm?(9$6oyzI)>9;T6+g@B+(ta?i(|^6tttd5llNX4>yK>_Ash|Gjr@ej+y00F8 z)?!w^Smq7 z<{jeZ_T>wkY-j9&4Ll&f{`Ifb_k#$b;-5G(cs*f9R>z6k8sewv9)3{qTpq$wH^H zI$AxtW4^z4_W~|s2!`_oB0@M(2lpFSw(3(uy$%?^`1#$#%3QOx);;;g;oaM$Ve}<* zAk}0K0p{vyD8^@f+>;2eoL+t8;YU|S?GC;dym76$kVHg}fNg;ozU#?0QVeeg_Gl{; zcOyREMM{}E#GzmiCDQX#+I<(5{~HzoOVJTv2;jLzo#r0>Hn5eEb3WrvZcFTn&MC&7 zqeqXz{4oZhuK*8$g#>I*#}&nb1z%8Vj89a0$FII}5APn>eSlv{mB57XB&!+XNqEj% zN}ql1$nz(E%ojU%aailo6Z;P^?vPDxidb&4rx+=b?jjxzua{qb>9y`%-Qi@ZGB|zu zWTSC88}_Xh(@8t{NpJ(>BNk+G|JE2TkDcz`f8YIs`XXOeWpRWzsL_EPj^nL2<`z0I zJZztNNdTXfg{ZkqFQg{#WE80HWq<@g8w_)H_6YI;3z-3rL5D!(%*qLmI~sI2kW^zi zXBwf%tE?vvYU8&j-D6i>u?w}sfi~za#_co~6#cS#5lvq@^3oIUe1{iOc2`cUoH#k& zzYo2kcaY@XGTj{!=~=2||Kn%~+FyS8wU>X0z0q2nyuGrj(@3S{7;|ua@mGApNd!`1 zUYVBLaxfaa@y07}jeoUPSz_#>Ycxbvpn?GYx#b&51%T)KHxLMZG6jieyq#|?7nl#M zXXZJdq5%&iV}$^PPhi}Ej>s~J1SAoLq$-$}&={R=PI`;AGY5As=`aCM&l_V}H&JWG zYi<^F*M52LeGI#dU#;OF*vaBJ4Sbda^cB7hdeA9}d6cprRiQpl~2L++sYM*m* zCVcxy9|o-RKHp$_yU{!S_6HAc7xopK`tzUvywX^zu@JL%eCHe=WZsVTg6_>M zB63Ij1_HHt;>58>AANLCJA#3LUWIDp=Q~bFBKnhnFcJ@rb)gHmutfmIrT`tu*ghv# z&%c4PXhT8ZjJD3dhD$kTFaVi?Qb97*NVffEa$fXXq$wIjm$IGyqSyc4Rj37v}s(snjoaxALguV2hhM*wQK`RWSC?%8| z=zUKc&6TgCqerp)=yxf?a3;-Z`^4rCF@BBKu4}IT;;yB)-gg$^OuGCpx+EIu0#i5kr)NBDPMQMz9fZGi@SHSscQfJ zecO146USV$#TSxM5PT_BRpD~tn- zu^4ce`>ab7*xiFG7SnUa?hL$v4WJ?};2hFkRBfI}8!36lAe(rA?CO%Jt9h#)Ng{gY z2e!iU6MdsTWfsPYJ3oqhqF&8L3&(6BQ#RlX@RZlIX54-I?YCcf%<0qwN}iP~RRIr+#>zCVPQ*%O3B#5sWaj+q$SGKdLb^Jr3?-?sffeCpHv zNwZT~dHTV-abknpID^87;bIo{@x%k|>Kiv}b-W{={KQB5qsDyW)C2b%vlE%_l9N5m zQWv8JTU^Gi0q;OTRp$95^;+%v!Gi}*_2znPBcBX=<1Sm5SI(S#bFJU&YSH4vsgu}? z>?)j%Uj;B^=3NXpJ!EC$w z^&|HTjvpC}v_W9RM#mZ+8Man38RG>ZPK(AlBN63*GBSMP6QAr?7CN=HS6_Ja)UoHl zZMCjbT>Q?p6fc#g4o6=(#ih}@>E>Iit?l*Z+H+6b-|wD4L^*jv_(8SOdA;^4+-NO( zXbsO^zIx?~1N%<)>f<{8g~70=mz9{fvX2|&(^;;_Bi)3EA%jp5>SPpV@@0KRelmcu zFimm>Nrc2G2jAFK5~DF}nNI1k`!$j#mxnT~i9~0~!983g36|t@J#%VpdD~9Am3U0o zxjMIy>YAheQVnhh{-H1WV3#sl7g#(DOU|qo#Xs6Puj&-f14|lkl?PF!Qpl0u=usJH*%-pg;kh0VarwKMnp z@HfG$<4O~W@tCc7K=s;duMr_X<7v=t*RDN?DwYF8tn=1_Kbfjgl4i#o6@BcU zZ~dQ;w8|0Ew95#iQATJjx`1iIer@ch!S21gKl1TgnV9UX9>3@IZ|g+i+ORg_?OEP^ zL0;sSqFd=?LV+OnePwv!N4#Wu#Q4yF|5!{2tgm|Df$`9A=LW}Z-XCz0Y!l* z;_wyYI8{Yl%nU4a*OJ8*&~z8XkE@>wNFMpDUA%apDKLe;kR<1vAsY- zH9%5@R%ArzBRUx(zVXHz^bUrkU&t?-dH??X^bWEjg9HegOI0Q-r(Zwv;-mNQwwR@l z*T)zRJRZo3mzNPe2DfSHu|B-vgCEk;jgXkUN_q>HrGOd?-#}CDZnX z-;J(bf5Q#C4_-qTPrUQWZ=Smkv1Q!x+XHGtvaBMo6|PUF&0`iLSW(`+chB6;%X=JJ zhPIFtvi0Hg@#DvN$*jAou?IRX?x2eckdFXC(K)$mfhbiBaOQKx7~)J#@Uz-o+fiL+ zkoA{!El0|#4l9&pDs+)Gq zyz514^5-`AZql&Nmy~X}>Eo5T9T55Lm!CWR>Z7(XWH7Mvn8HvQ1jR6nn;6zaVbfp5(o?JWeqP}Fv7eUn;|7Ab$78G))hHE{FQ)9Eu>D|H0yP9$h-9h-#z0!xAcUpCEoC#sl|SfWdQpQzLD}No zhY)cil(4#b>h$vBqESuK)UZ)r0woFST6Bu*XDo{f)m+NS zQ-)(K0fJ16B>u8iK)#w?A2z1wdGt!~6I`mOKAsf45lX1R(x|NKLR2jPfS+qrVaO9& zWU+|d68y^P8p{1$Cn@Xd&o@Cz=ol&7LO9H1x0$GB3k!812`vhy{Kc*2+dM;c_^RZh zxA9loRu}?Qo+vdksPpAazc)Z67|FHeCVcn1dSM+B8Lrg#RCevzvFpaIz~nv8jf}Zf z=HD(|cmtDAFwA&c7cg7kKNu=nP@K8DsmW8%N;tM00s<`$t@$ZvO2D6DQQlhI6g)FU zZ8JJ?!REGn|rL zCLyA-=Q>$;h1{) z7>SPG)Z2;5Q{ycCdR<@kng)SHP){{!qJYd}I;w8cbzpZt;Bu6R-odTu^Vdv8a9UAH zFBqy0ql$|*i(AvER^&pnNrTBVq0d7LNQy4I_MzSi&|Hyvj850aqsZ?<=YQESCY`fr zLV@#TQsq*i+;z>;m?M#>+uXZUE}=w^ig7r|pX+sX(KL+`u_{ph$Vf^6Lz1#ZRw~S` zlR?Bz72mU7;q$LiYml()A2^FlOfmV+k}cz(*Y#ugqUf&JGq$B zl|A=2xGCx_>M8!NE2Wa95W1KI>)CXT2}9U0aU}(n&AZ#|ti=4f{PN4Mzy5mW23VGq z9T=@NGX<$Uz>{pWhZ-sxpMi>7y=Ee!#0Eu`|5ZCaFFTRKmBH;h%M?cCuA?l4v*0oXG zB$&~$q27Rv+(u(TMa|QhI!CWiaIHpjVPSEB7nCNAZFBQ2)U+~acCN)CT}?z1aRIZ1 zN5J!)qGo6NVyDWxqP4lX1x*YDLo_1I>axuE$J&L$EH7Ai%Pw20E@%qL&?1Tp zg%lIy9&oVrBQU>x?6Jo{5!`##RafoXw+~hUbx=)B(J6;ud!o$rm}$hLt*p~j6u-!kDP??5l1?4NE~uOHUVN{g!t%602V!Pe8Yu@&LMQo zgsJtw3)7>48OhmGK2MDzp70cWS+0g5xYV#GOgy#=n7`%P6ff@KH?YSYDxgSHtq_{p zoE~<4Q8%#Xe6HO6o$ZmdhpH(BQgHo%bD44G&*?)lry63I6N^}uB-*(mB2YFsAeV(q zA!v~j@!(|Q!TjjnBMzh=Jb3T}ANT+s4Gju%Ou*xtKr0bbd!|gn4E-+TPANs)_`q*O zCpd$p-D;ytWIT^+xISSL^9lJ`p7a2OY-j}1Pl^S+R~wvq(^=Twp>u;X?S^!h>{{VO z7?9swwi|gpPQl)K6{5;J5BsmYf~{{nqB?~@Bh~6fOD$W@j60A3kwwJM6Bf!8hqqiq(90Bnt_$x$+^tJ7Mq3Vw(q(8 zK&7$GKo+g|Z!jmS0ADJjEkh68f9Roys2nfVp+kq3mzP0KIHc%4sK~Mq5&TnlV{>Az zeO;e(YD5B8(+(?@w6hg)LCLiiS0uGWRLa2*(*@(_g%@66^ueJF5wRTNNE0B;rONSK zmaZiS5EwK&nf)Y9F>ZJy!rjP_#1qv9au$}CwU9VC&C6X{#+0zbCAlj^f=a;&I`?+l z!ouA4WwH#`j$ug9gJAR|xfVePN+T>qYIJN=JDvIE-Mf|!9F#*;V2J_P(t$Fd0dt{5 z;@F9I;9*9ehaY|zQerY)dF7S3#X$w=3K|6|se=CUKM)9s^X91pXvWJX5y2~8`Sg+d zE54mcn%+hX7(&+K7TM=Eu8{&f_{*Ms_E{_k#!UuIm=G}xa+4>7QN#kxVRzA}+65e0 z6gq^8xFjglB8fca)+$ltRBnnx3s58*pgLKZU+8jrSg0n|mnf)B6Ut5&{>YA}>pOQW zH+C$um`(A~8J5I25(IfmIR(npe$XBN+4jAA_s-_x2M=Qa zBag@+raMGL9wSc>k@5uK=QhLvG@`Xgq&8v39aJWT)R(xKxuW@JQsUN~PESPd;vSyV zaG_G_*SiQ)`!iJ=wNaDgx`ajUHi4}GWL?WC-xTe+AQ9k1YCP1su(?;MIa97n&vOGg zLIDS9*s}ecGfc5z5h(JfVq5$PGVzcI^vwdeMfQ!!42zm?g0&2Y^nx?SHM8+F$Hrcgh>TuQv?7?>g4%i1`#=OggiQT9>If@oP)^Ok_^wJ@?=wm;G9vj6*hL8#F+x9da;0a`lH^16SNFz z{$SkYeZMBJpL3gRMIC%p(+laL2|OiXs}5&jv4f7c*vIh#0j~q%;m~H2bv-I>7%!RD zYCzFQqA3qBa)v*9aRe5<9t`mjGxB3rdb)`3O2)bB1R8oTHm27GMDrdhh_-|*a>?H< zq$npZRkk?ZQ+WYs!)Ov+DPTMQzZdK#X5F3I158sQKmqiK=m}`ijp^Ia#LRS6p(v*W z#f<`D#zVZSp$w6!-~`FJdnZ?sTH(g`(H_}TekYdZ%wZ@}#5SM)iUWds;!buCbw)uY zP3KO|Fzr#w??0Y45G&euBGr=a%0(o1BJ8T@j$lmf@W&a{8<&EA>KcrxLCfX^0*DZx zKfma^?$?1R_-RFD3V3KJXK|_2sD2-KJmm8_uqg2;lqdVT#yZ52*6Jq>m+hwk;}5Ggk(@Cds3*Uli z+R(7gatTRfLp2a#3hqflLz-IqdvFHO66%Np=IS*NNre!KVwzuylVwnnN5?ARAZ8W; z!Z~_^inN%behVRHMRKb)VG9eKZ3P6VgI|>typ6H1jW<=9Erhd`E`thbbHFJi0uLsM z6f7f_4CTPaC67a5PW=Ew{A|%9pKhI93=F>|ER}1BIAlV&GC7FQXk5v_42Nkdy*PB} z&}TmL8Q3z2%OfDqapP$bF+`)d_t4qch!T(& za3Q+}C)HqkLkHDHPB>eD@d~`yNfrp6RO?WRuyY>$$onw~ErDcei5>+HLliuT1Q2z@ z4L9%6&?fR+f2!7B-Xpp#QcEO^QH(@RlB zDMhv61C8~5;xXLNZgyewW)&=l{$zdP5B}f}B4+?bW{O07MwX+A0xdDZz*`avvtQ9A z>jVJSAVb%D%QVczkv}#v%2JftprFPx(J?bE*+2kiiorV?cl4to9ipG+lx7(6d)PI! z)Yy<8P~Te9zy+>UstC;7!~#MqFd8hlmyY0C{m4f?LP5NZSW_(4VisO=%{8EfzQ^Da zl_ajNRKOKP^fWb4Z+aCi>2D+pNcTjyNt~QiTnvs86tY&NQ-LK2000KqxP=@+kx;E9 zvPkpdi!VY!`0dl5{xnZX%!)T?L_tE-8{jyOYnVH(7zN;l{ zZ1ql{WuHA<3c3cXeK6Ls92z=Tov^;*$eSp$*&~S! zOzaR!$v#nMgkk<9K=d$bjjQnNJW6$`)TYz(1;P<8 z9u;GatUhN^*=S%2j2exw;ZgCR_!~qI<|mC?gaT8aD(N)!z-MMclPXoppHdS}WGojH zE)co6l7ed}l8Z!lBmgdwF8zHkFc6g8s%8doz5T{PdyLV19Ky58D(^;W=BGVFqsfTF z2$_{&jM+1{RZW-D-xCaE@)6MhQK+kEEDN}7Dux4@3APlF*>PXv17GXVo8NOa*?lN= z4pbbrcu_e-&uS!(@8rOH%OWsiB3Bs>{L=^3wL+kA$P;{hG6(OokSzIsRC&CC>4lD zP6_oLot#$ge4r2P35m|nh^U)swZ)?hg_`DZImB&BjQUk=IZJV?hSrC2N-E--40Gyz z zVundR5(!~S!u`DDiz^Ki`H%njk7L&!;zzb@gOZb$+($PJAeLKrfc8n!v{`=@O}I!C zGa7@Dr#Ju-$u&lMq#-$!P=%QG-p{2TQIDWhD+D#IiX%-0!Iu39>IHCWDkV(uKx81Q zdAe&spWCtHPsfRgO z)B>pP*tDutd-;z*1|;|2@$$o&xccf21++ATQ5*5-NPx}M^zOUw=C!-qZo3Vqr|P$| zM%xl0Prw{jax_6YAvY&@gczScYuV-w>XJn^ztf%B%Vg4}VbD_p?_2N+#Sk5avtCDr z3g$4gH6egRxcJ2uY(ha>>bbMnmZlV~xIy_Gsf{bsbyv@O-;GI~2|KUq_eA1EI{qrg zq~ASIFEH;w2=|#faPpx;huEA5=N~z81pF|IU=oN~Nlp@RxKl7nQ84C#Q6uRyLF14S zlF|bb;YbM*g9s6s;(A)LmdyePRb4G4`1<&l~K>VCglO!kVBS&C=ro)Kt}5;<>rXM!w?Q}V^M@dC$G5Tia-6+KP4T4QlzY-$P;5wYJ^M$x0f4_ zr}?VYbpfF+$Dl<-4nhULGpj*@AhFWl>F`9YxJuwxzB@OZb{1oeaZ371$OA%nL7J?{ zAr!eTs#D8yFS8~VFbWtZGtz+`6Cu6 zTwQ@Z(855F#~spBUufOHq{`z@TrPF|!9RbqYNu|Qd(BnVawkc`($%*CpLK}VEj z*EZ&W>XCwTOR8J_6S(w2gFESZ14D@v^QQ$19>xKwl?jT2JgCo}Wgt~sfXADJEVn`M zg7?%=5ul5BgT}1Mi9 zcobLr{)~I@FC2ne*|a9I{zM4kc&PH2U!?O_I7yjAJL(Wxkvi9cut{8zL6b>CRRmVL z)I}`7G|WVcGFimoBBNKeT#F3FggqvU0i{aYhaB|rNE9?|o1Tp|n0fLf_48llk;m4g# zPDl(L36Ya*1k$6nh%=aZ7ZOMUrL2r-bhZhsh>&dYNv$K5c!6CBk10aCn)HG~V8G?C z`_OLk5ldCtAdBKkp*5o>DhV1z-(;TTZp!}9(p+TV4=fj(TZjh?_9IAqf?m_IZj3I+ zID*+gV9N{cenuIzCHNsFXNVdo6qbWapW`AV64af7(2>LyvgLT<{FGNEta5{bN_{tB z$_Y{0_l#9jZBVLpLt?-dIf&=Vigby|#!x)viLE{UB9xX(^;Rx|^@U7oPkS@}MDED9 z0jyMdrk2@S*S9fq2l=E$4XRjk<}tTcNc;to3_#K~Id?>Wqo~K#s5<~7@N!N+xOBNF z7$~%XDnnnDZ)fU<(b+e|$Sn293O$F`@luyY0o;z(Ne~I5>Xbs1{07FHsqWJ4Ma4i6 zE;K51TvxZn2ZDqEc^4UkVvmKiht6RT;mG7oArszO7abYv+f7K#Az-qTqpYhxz&3w93{;XHNf!$41ww)i(3ZC@Dbxm zmhXcxQdwpAFd$be{=PPy(WhUlyl^G!w>DTiQ)gS7mSnY7u2xDx$lCy0>G?UL7wjf> z&*-rgr8+MBy06hv;stUzs1J%5#1c{cFd2Nrqmn6-DHe%gvgj@qc5P9oQrCrY{jNbz zqkv2t5rle5T~3HgS@cw)RJ0gK;z4AOYtfd<<^qwexatw`r>?62tSXz8ku{2tR!qtg z45a4?Qq+?gq87(|4s75v%wGzsY|54}O+!vn1F7byZQ~UiCG4ko6iOL}z4zXGdDZUo zpZ|Qk*sdn@ap5WU!rYVtP<|3|dBKVFPT-`z7&5wyQ$gCAV*`plTW$m*nMEJ`S2C-a zvmr**rX?T}D9H@a%Y@ns5pF+@sbD0aN@RkCAZBaYzTngAnA`GwbG9mKA{U!e1fiyO zNWKhC4I+k5dn7kED+3$(8v4$1oYFuVo~fH|x(QqJ```cmXFvN{CV??I427t-=U51f z`e|0>Xcg3`#XKz`WgUfz>58x-#H0|`n8kq<)f43cBqjjBLtBJsod|OnoM^Zg*)bt= zo%%#5U4hei4GAnK9kwpSQ?6umb8r-qh*O2<2dWHuML`hl7^+l6d@k{;+7FZ9X1}K0oAF7 zJ$DN+)Ljlx$NqZHu*fCh70E1#5f{HfR)kl0ghD>um0ZrIr$DZ&1dPf;xN%Q2o@N$n8qMg>ml48tH76PD&Y%Q z#a@~z#g$uzm0NDP1?Lp5#vlIhhm1R+oya5cukj7fCb&)Y0b3Dh6huuGeGn_IMV=t7 z%DCLj!<;K3tZbnkvn~ohBiDQGxrcXdzVxLp@ia(7{%Ac4s{5i*Xk+tXV$sy#v|WaN z+e_d|iJ%o)=!eoUX5c&L4*EHr&o{6~jn2xhA9EJ18gpq5WDkN|zP!`O zYgN={1_&*IfdQE*Dvianu;#UI(v~a7nDG$v81~K~|${ptc$-I@tQY98L8!50KAZVa5Fi##9eR&#^8!Spq120WAkFgiqpHxDr2Sw{N3-}^lhBRh;&)}?eR`V~(j zu9>JbnlX-`IS7}jj#Belwnx!w3Mi!;Wsn|~R&L0Lnna#7I7o~+rBPZhGbobd>7?2a z`qiAI8zNeXQ;U|GkvpWj)Ufi?5xol_>d8q2h7GwnfDRjggnTNskdhe4A;P1>hYz#< z0qvk5Mj%`MgVj~LL%4~fDiVm$-F5VGPa?DEmT8x8r6)vla`}caK!_5is(g32)sYOo ziy&_U#b_p43fo(a0x56ixk|M|WCn#rYg71gN}@aWQkY6E%BEIQ#m+E&yrCwQG6*tJ zL`UUa7Yclqp3IP1EymNh#z4HTlJZT^3raB#!Xi4Q8KbO#OhhDi^uooclsm4#DkkS~ zzLFtsNlC$Y$h8%zm;PQv3_!b(wWu?0x2m5n26P}VBgv32v#r%uSH|5q9t|^~(d&1c ztxk>8Y?>Tur$M6N~KO?nGJPqz^-5PIKFtX^4 zJ9gQkBjC*VpZ+hnNOjfM#3 z&UhCXr{sWT*zXT3Z@s;8^yqQDL8cLd&l%MFqb}pZ-hI25m(VZ`&A!C0*5z()<|6lr zH4vW%O3H*BS)b4ejkn1NRfnRFB+Gjd2Pk8O4yyzD#B|wv@9b8V6Gg+W++3AV#Za@ zM}|UK2tUZ0UjAiN4~?KX9v`&g2r?eY!CWE-xAIs~gFI_``^+r{mH;{QllFN@F;RDD zC@0IF4MM1b=^QWMRerkQKnHX8bA%@m54S8=-l*je1H4rD@|e9-DyrfNeweW{0aKms zWK^DO%Q+PRDjF{^C*X?5xyGr$6_5A}$*7QagF2)TdE}8tcw_Vr|L_lE_L5uV5%CD* zjGhuHgUxNuAT@0=F+UdlufRIhPXz#@MySyS_{k>rr>N+4byenil$}VRx0es zneB~=Z80p!wmgh4;H4s3*D5CW6OO;x1S=tUb@&g1YG@?8&2nlyEjxvE!<>JK_ z&dO(q^!`Wj0Lk{X6*(oAMPO7Dbs6j-qTZhIT%D&PIy6DAnu|5@r<8Oudof+7W`=s< z$bypzMA0B@^u1_De{(uTh!fZlbolULCdN$pKK8MXg-#=#vhnh7E?u&U7(F7qxAukySQ<$SvvLXKb4Ag?q9C()0^m_gQGYy`)||Um^fY(DIWsc{ znP`Ig8F$cEx88bd3`ry+3&HIXAtg8@gOMXVqN;qGBPfZ$soCA*du-nY!6Y%}Ks%QE z0aJpnT)oi%o#U}Vi;GD#Y+~Y#LMv%7@ zlmx?^i)4E=fk>ko-?&3&81>+4DvF086vU%<`3inM@rh6HS--pPx(jCz%U$FNao`HB zX3!h8`D)$}7!K-jNT?8?z%@1Q+pRB18N|w<5}{Gti|DK0|NY<3<)Bx&N2KQ>rl}jF z&`|VR~}%vnm|wR@wx=vsNY2fq7ayp1)1ZqvXz7SX0IX*5fOaVDsLd& zdFP#2HHQuzip&Khx&vcDO|JBXK#@EiMgDbH4K1k)Vt*ilpotJ<1ff)fR0?zkcY<(h zc&I#8MSih?C`cs=i8>;qD5*h5w1(ms8{Bxc$Q&i{Q;E1TsPQTizyq_uR%h_}`y^c4 zSlX$8?D5YGLxYw?paPiaxTiYs5*tj%yC0A?vylFYLW+HPI@c7p;(#3&aJG5 zFsP5^kmd-2Eh;|WYswzgst~atT!HRL3y_py9sDL@q6<(JjTdHytBvaPXxfS@B9!Wi zseGhmN(n7Rp5!CW&&WEhQ?bZci~&5QZ~>5!xuJOqZ3vJr6CF+4=TkBBQ=>)ML9q

    BnDx?UiwVh!x9t)ogb9y&=a-E-vr5>gvNwS6o?Xbway_7?vv> z*P1fvCY5KOJM!`?Z#Ehoolb=NSjK9LgDmDdjccztv~TY+MgSvbTY5{HM{I^Vw!Gsb zKD|=;<~P3c#+z@``arveJJ^$h-muZu5^{gg`@|58n0A(~ms) z)*El=Gx6TSdaQj6r645#7NM-P&#xLSzFSAi z{?m_PUO!K8BPC~WzZvOc0Eqf!4ai3%mynL#P#ouie(ZbVAV2n5lPE2nfKzitqftjR zO<83QU~(neZXhs<8j2p0k4EK9F*7iPd&`eKBd&v?gQqep_~pjSpvQv%VO<|xMgy?` zW8;!GP|z;PD$aVkwy4krUkmW+`f4^tn0g%;NZ4S9Ru#tieG@L9`P#@ zxs3#O0j4=u!0yj(Vk8-9VUr7fPfmms?4C*yLCoT**w@*JtgE{)aAc?D2V?;+GVURv z&L40bEXjm)E}T@v5lr@T=+p_WPNy2SnKFrhp!`HHe%SG2ChGbWxopcK5mmqhL8PaC zc6Bl1(;Fyyz1|Z=R(*7|AQZ>vW&CJABNHP!v91?%EqaGRNrd{8qsW77M(syM;W01%86 zKT(~AJlU2~kf!tQk|Rfs00|pOu#+MYPzjNTNPwq0P0Gl_ZKQLZ2E#1o1tc*j-5#^Z zX95NpA+St967_W$HaGBAh?FngtxxI=j-F!>2x>z`T-xMR?^KCi0MTX;$S(BQP=zow zZ(*>YN4w^5Jx9?FnbuKGWl+*6QPAn`Wp)?EtVN(g?pN+U7$aWZhQss#3bF|fH;1Y% zrp*+O%;eu9xJ^@21g*k__?-BOBe@mX;C03uu&n!jxpf{el>Kl_Nuh!iFR-E@1OtEkgQg5XqM?7WRo107 z7AUjiP8x>Ycd9{v=;xr{{QSt= zsPQ#b3n%AshZ5ZK_5(+(@QMa}PD5-~CB5M4X(Tv=3^`2OjUMV=J#`==?5UGt+-Yh+ zSryar`WI>h{iETDqV&X_Mzc_Y!CZ394`I<$uLCih@UvTlUQ#f8U6LaS00mvd9+L2b zAy9hj#dHT}!S;YBtH9n8e8;>=z4^EUOGkg{J~-qG#@Nqz+ikbu6#{i+h&((3f!X7k z5l(S+-W^jn4IV(ErZ|tBNa{WjbDBrkMAIl#zETqnanK5A8?uvnf;YeV(1X9ivBG9& zwNrRq$riVy5KvqMKqb+{^;ovfZ*JlVj1)|`7!c$_GCAl+a4%!L1`A#S)N*xF7q-86 z5mj4GWY*76AJ}Ocz!Z|1FE>9dkq$pKeqeCSk8FWJSdC^JNe8nqs0COBKLabh;~Hd& zc=R{u!lQ^V?$9hq;2M?#6l6t^lyX@xNq);&Mg`r;KT$G25T=4W>Z+my;XQ&W?`STr z2&@PfMHXSAT4f*cKmOxC@=E@pLx&3nD~A z+a!s=?JNx?T;CsV@hpiGBaB?UQIFnPc&i3Btl8F;1aw%cy|+0TB4hntsHWai3_OYOd7z- zcD9z;d$*+1wP{3qBechNf221Af7-GVG60t0u`bMLP+5$ z8j>r#%@qsbbD#Shn>_Bg;|_!_;}Z(v=#Ltv=uv0blb#Y>1pzoEk7yM=29gep6sVj{yS} z&OkC-ak_`fo4(USl8xcB@tzm0oo+ zIQHm6PyYO8`#R0##&~YDwxc!KUK=kqCfh6h-RZ< zbPO;&sZNBbkfRW+qydmzOp$)?fq(hdt4};$J9TQV*WEFmY{OH0^3?X>aK~iQ$HDTm zpFaJQJG670VN*j&t}NAzg6mEyKl%KhK_E!!b^AGd;35B(|`Vt|6`@w*NGT1#Tf;PP*Si7 z)y0$Z|NAZ!olz3b&G3h$YtKT+)DBUM(I~(me{Kb;5au31EHGfmQ=A$=4wb|( zJ;ff?1`r-#;RyAj{G&VGzo`bw@GHJqz(`_bO#?l1o0FY=@%cn(XH%w$1p z$6%b)H;GC&kdDR$bSp`W4;sT}vEipys3I8?kqd(YgND43E z6h<=!sP-+|@I}-)piZ;I;23%?JI+s4ilCjUSchs_HTaRH1DaT#E0oQ|-1n#<@DTXH z&&Xy3rV5%abMOyNv5s7ZNaSj|!@ijbd>8h3>B=yF^|AZ_T7#~sM|i5vvkf?ofa`2AizlCMGh zXV$a0H8?w-MwvU&crGH(i^)O5;AbF#qkbw)iysT?ccI54^Rn|A{|UZP

  1. 5+gldRXJQO->yQz zRR*en2r)q1nE8PM?Y^RfUfDG%jb{+(kxJe%=#tL|L0aIN)Q4aNHNa2Q;SC+L7>(j^ z(cBT4L#Q+DXq1NddG;V4L%tggPV{D0WBOVmGmENm2O@+>FL-j@Bc6&4m^C&UYz|@z z6(_e+RK=Xg(|lV<-ISvuTj?4!l!L}qgf7u@ZlhAJMJfnURfss9HBl%Yxl)xpj>d$n zs3NkYj3G~vD>9~n(V|Q1sc}u29F#d13rcWR+LX`8uTjsxf*;b@rmWNIC~2rsyLwO# zhFM;ZVy`6+1G``q0Nf}^f~ORcEVzllQUIH>Kv1!05pZze;+fF~^%yO)VQ^Gsi+h1RaPNC8b+d2v@_!9KIF zO&N0JS0ZRBMM8(yIF2etwK=FM%PDyG+KRNCp-QX8f<`Ik3f-&=MI}m9JH{0i(sv$_ zh~57$2y*O#qqRoHYLojX0A+{2%~yHhN)TP|izYLII56 zvj-?J8ZhQ?t{rS(pA?JoojSfwb|Q9mqX8!4*4J}So-1@zsTa&tFOMwQ*u%Y z#F9TML}4p3D_;pMMox)^3=Wt4i%NV=L@z%IctW7?Uo-ao*`NJc9JU*YzzHOP#Fdmt zjNXw#5NHLc5qENW1RPq$Rlt6bJQ}MS?z0qH7LhhYQ{Hnkoyq5+Nc0MwmSk4w2)`k{ zRCbl1Pfmt@+D;5qG)xNVYMLKKxOxn;q_d4cErlXC8#=4VO+@9SNa5|7&N`Q91ns~E z8A#z9o4m#H5C8BFK|xZ$TZ~GYRirYmIo%j0wIvoUaum-3YKxyiB!jhgkzVA}UtkF3 zK!lS_e({T6{LSC|4L)KZ4A_D?K&q7+ zjG@H(B`xaSh+T6aXP}6V0|vK{hHTI%I?hwv#2n7{wXc1R6o^P!GG}VK;kXziv9x{w zq3WhVrv3{$5}pBy`J~2ffi-uRnax zT?ckA4$qvNZ=39AKR+66tBe<`)mFd1v)$OyXuR|C%SRu7teEvjd9DbqBotBo z(H-}$^(H&_9GKLbjfL&2gHf$Bhoh}Ezc8#fTido@cHq$KZ=ZPLxi|2#$@rALuR$D9 zM-2nWFMsu9vo*JKd2hYe;d2z&lc<3Cxka}8u^(wPY2b7_aQUI9pLy}6mrr4~7Fs81 zdqJWgkluLsH{bfdzj*L?k-{@=+- zTqhbiv1hR}?SJMQtt{>JpTF~tpMCq=c*&P1qq*Md(qy!wHfeQNTZ6TQ(<{4&u)Rg9>{wyz7(IfNo`VrOUT~@%S;}k|%M5^P<{ZtW!h1Q-M4qwk6_( z5*cKufJ96SP|!bJX*EWtj&)bgG}u^LODpOanzeenxnq~Uw96(ZD#5Q>;#;z zr>L|$X4}LmYy=u)hbrC*tf1j=u(*ApQ=6xaW@QfGHEW@}sdah{OEHba&)l7?o@Ns| z=OXZaCxa8ZOnYIOHE!EeskhA`08sQ@wNMW`Tk)O@R4waT)n5PTJN3zmR}uI&j^0qE z0=>CBzfHhn0ypRBu2_fSI(HQFnvKSl{&y-I;Iq0C_-$zXpAo@}91+Yjbga zN2jtds4pk3?<#0OorEQ zl&hbXXQ0;v4@;BJyvw)1iKLi=>=c$2qG>$ea1<6n9fYI9@Oiz#$%2}i<9}gSBCqc@ z*uFHJ_Z%3O(P5yN!p&Ci^jZ&GaU=~FT+lj0hI6TUy!%d#$egTCU@^J*L%}v1Gi9fy zDR+Q>&MWf}YhIv(f|;fP zFdRa$8GuPCcc&b88@jL|gwqr;$hG-K))T8Q%p1XI6EIbQSQC087wRJEUWqqXaGqne zjjN>9YLCY3e*^e)HO0mTSA;>AOG7}*%ofb)@O+HKTpeORFdDVYLl z4?Y>eM!V4zDgz_tf8RJw5)<5*yRt4a)7iM?`j>zCm-t+c9655$HP^&EVJ7FgT3D6B z!wRvS%G?9TC;n0-A4s%0iF9>W+FNT!Qj41h$QKWYFw^6GWKQ-B#S-a&ks-+u9+l(V z!f26S-H%whpKt~f=cgh^@C>esVSkmc0rW=9h%`G z7KymUCamu0j3rS`8DKwxMnAk}(xt`jQmX2#4cS)SLn+iU z5sz-GpMpqQ(G~edF+taLUzavH2Ov;xgCTZa7zg{WIO;1+FXZ* z(kNoWqo@t4K^k-vB!hPBrex?rYvtwwx7_A1r65%oL@XypPQVb<00W>CjSo7-p}}$^ zLl2K6is}{Fz?D=_6ZL}Yl~G5IVAgIiRCM@k6O*pi;H2^aN@&nu>!5!Iqh_Ns7>*!w zI2h=KP>HjcZKid;EdeJFD|jjKbVw%9X^57%hPM*n0;%?D!{>~FCY8)!i^fyz<$kY= z&?)t+2!RJk^Y?%M_xKqZZWwn+)VQ;{$}mL6jH{3oSdd0+m(&JKsH(hWqb*a$k6A0Q zW8bTyLYnwo5d?HJ#H0=!V}wIHFo4iuHn5G67w}y|7ZP)P(+G7^9Ns8(C}lK@(VkdY z90I>F2zq&pmB9u?k$+I%eHP9}+T`ABw$UkKH+gATd1~SauIVIlvd}-tBYS7v5pd2B zGj2B8Ypg@3EpsIDQPhTI7TcVE*jf+>?hp*PNGU7t4{XwGAcvYytY2n!<(u|ZiSaZv zRb&d%kV1fheBl7w4+_#85q$1k;Ta&a`CtOLq`O26@T9WF{Xx~=?V$_25Q!$2SnI7U z6A%*!;4LzuVuT>A(F`8Famc|dg!r#c(wmanv-Q%;gL3_0D|1Kj^TGq*I|G zQ?1_MWH$JgzRS}LnRw54Z9pM;Y&8OFtPvO>z{UmLsui9y@$uodnLe=!zd}m^~22(C~ny)|m zd9RnzLD9D)riF{bl;?M^fZ%>=HiYe#JR80WQaCGhGF3t;SwAtQV>P zCVWinN~l#^&p!M7p{s6oU(prcs<8okrB7g>iJHPwA_Gri(g3L+kY^0rk_d?q!wbgsxo+vr>+ph9){=8-3#fBKR7WWdCN&q?zcDU1eo zSo9{najo83Sia?RU$Ihc%Z_b8NCB>igZ35nE4|atJapgM%1U>zf=1xQPgaoY<32v5 zZl&4njjp}!KkUE$BbC~mOp0b3dkP|Gix0=pP`rEc{=4oT4|@#u^~zdIIN9sO&J6TC z$2s48%jfDlF7q@)Gap!h8ZZt&I%kcQUy(+%YHYR|cz3J#g=-6Gt0WCZJ+|aNb@t2X*41!pEp?gdr+{13Op} zj0&SZlINuc6~RW3E!N=D?6h@AO@lY?nHkoxFI$Z|Rw`~U#%PYMLGoKoyy~zXBtk#5 zkbLll-@ZTSwmb6_gh|LxbBLGR6Md#Gn6u3HFIu2Q6e7nm;^yO6XEA5-xpTD<_@VP+ zLIn_{fAkPvgC@GfIfmcH6bg>Vqdpd&w_Ad-gy&%6FvYDYw7FYcC>g6f^w2|m@r=DB zZs8(aG!d46Lsf~FMKAf&U{w&Tgv2Wl9?d|guf>skbW++-rXCQEhkX=)yx$njbP*ph zHlfb4>$9PhMH43f@}tR=mD7>clvcgN>>QGghJ8rQeiKB2IG_9I>s^d`w?FK3=7l|f z?sH>r#eEx7&zb_&cmt7dL-9>>9JOE)@0fbMMF2m-4fK)&rCRB(aRidI91P5>F&L6O z^6uU|)7!Krh7cxua4Byev#aIc!Gquoj@mTyITOV$DU)PPp;}v8(;Jl3*>1M!RHt3< zb$hM3`7>u$w(r=#y0V61VqVv7F&^PNVt7VM^~rkxL++Rnp(&bnH}XuZ$?>$KlXwUl^ zDAEAzeb#ELUgx#p~u3D8aBrw=p|t8u$PQ$4V0P)T6}qh6}8M+w=e@4%#Rl0~is2r&{_bwhd54c1GNnu>)aQAvWC?4t1YTiG9-+_491Bd&XM!(Z*U#+ zVSF4Ye-22cC;|%_XX*%Kil>5mClyXI{h5?a411O(dG+Cqopif1!ONrV{SAY*e`@q zf{QU30FT#LhB_uye8}Vh&KkEx41A^(3Z=vj4YQ+R0#uH8!}8au1gX&E)B0 zv$b@V5#CDe-G`;kf%x(?=tvRvWiTUzgMN?kg;Z_-mZ7dmppL83t2WhcSACR1gO&{8 zvYjBR_*^=72HQ@ZxzIFhUrt2mI0WYS$z%8#Kl|y=(af;#B~^B^u*l8nU#MkkN0^At z#15urn8(y4wG^ChEov2rmTcFUaESS=2)1LMq$*(>Bw>>JlY*}7HL?AL^Dl-%4(VmtJDq{qKILT=I2gB zKiazo<}})CMW#uU8fe8)RLjDz?ijWOl}06e2bFfx(EUkyfDbum~pLQ~Yh z4k`%Qn1fq^bsKf33qQ7(2kMuEcFbE?s|13?PXxHiY9E@pDv&QW|v04Igny3Ia_J zhr*0nWg4@(p)X1>Hmt3jy|$37v*)?=^n5U&QNTo$n%Xc{uB#!*(74;>#d=(*>^tpa zHcM+s?4&ZW?@VG@LmbASWj9`JEFC$X>ZnxK*nWxHsK+KYE#y#zU?75AOL|c~=yGMy z>yfyhRX9};e=rZ`<`$&gNtb%(FJL%m_Yc*G(=aJ|T`ZJ5E*n&G(MqbXa`M>8Ci~>7 zI^#&L53OlxN~q}(s)o^nWb54%M~|UmVmjc#-+ia2XIA?s&#*IsPYk1dP@2-B)NiX+ z^H!{&>bN@^tsFac^61gM+GOW$2f$>G$U3-+r1SfG;YRLw_4J#s{_^KP`q+&hY}Ljs zj4zBj_HJOPj|a7mHk-Zl%3I(4`aga3FaM)9G_@CPR!h|)s+iuD-pbwI{pPlX_MY9l zwk_^r&A*RPUSraNImdhsaj)P1iyz-S?jN}R=0;;d)(In(@XiRtF6Lu*{`kM`-?MAy z@@}YI8J%IOhQW`C2@ROVojkMpFW>yXzWQhXeS6nIt*bKc&@jheE~AH@yYJZPw>sM! z7%Svwl1!0Wm2^r7&+xzIQf4&x01(+Q@zWpu%NK6@6J6bj-LF{*Zz;M{CEFDg%9AG;W)ZLZp0esJ@v2O z|Hh8xxgEQ9gnpWO3<&wr+J^??s_?6(NCMF=YsB*d%$E0ehj zdmvjQmgh2*xEy%>O&M%(URnQ$|~mT_0%g57fpXNUazq95fFI zZ069`D~y7LrA0X$dBVoyPHQsiarhjHqoza5wGD*XI0Dv2@JL?1TNEANv|ksR2=jn30`5iL*?i!E2RPiB z`Z2{4T5RC~py`P$XsfL_iXfBAN1o_p8CyyuYQ;!uQO;seBkj<{yMx|P6;X?fm^4qI z>380F=hUfFoPBYjz@H<($-!ni;e8;0k+oizZ^+Ium1fR>XQYYQsOiuM*Dc;vEvWFT z%J}Xy=VbZ=DjpY^$Pf_YZP-9-a1SeV*ED%^A0^`zoWCs=5Ge)q=i75beELvyFlx11 zOo>qky!PAX7<-Q>n&wTzG!AG++Wa~40jmHa}_FMfvIOfpBCTeQ1 z3iTOvd5r>#j#(fRU6>h{4SpQ^)W8R?y|-3Qu_RU-)seiWy!(R{guIEeLWd5S!!u;c z1H~c`6Aa!05ueLFdNveo=vPuus0ALOPy%Vl9S3vcDQ4adR*}KbV_caVf$q-IvR!H_H0B?ZwFpBQgi zB~?bPJkoGV;Cv(+)jo7jv}lUn0hn|;&}%DZ9Z{7o3wY$*A<)NAzc0S{B8xFI;||Dt zcl?h6>G%3A+~rcO0Opq2iGaRUvmVZDFlyMDROVZ_FysqhNk}kDShdTww|07AaUL%u zkN93TDIk=c^^7gh0vxmBVK~CiwOb4$JTy8Tj#E3;Zj2f&4OU(r2T?kDQag5;<;u@h z~G9Ik;S%hvO#B7w-MtHsVgj2GO;bEDh0>-1|r3K)2 zQyTz(R|x>d(ja^4pLpU4Mjxaja2xhMD56)wJ00aG6@VT9A~?1xstH%l0o723(yfpc zNyLvYE#e9cnToFqG|e#g@WT(|rzUa{jUqKO?qY}aSG%q1@^H|_VLTjShS8nzJSS+c ztpI7K#j1u~15&JbL!r^Q(Qd5%f9$>acU;MJ=lOD9ulnWx|Q_IGfupPZOvR60U0GzVXb0&u{~F9C!RXsZBKD!Qo*nx1P0=o44+EAY9O z((r*2>x~+cQIBw$;+sGtIEyoPJ<QeF@bgzsxca9AUWgXGn$ehZ-Q9i zC8t?bbyLHDO!FdRP87{}eRkG95wTo2uk>IEVCLi1!| z*Rnl$FLGctq4fIl-8me^Vr8{PlP<=HSR~os=k@jjcUt7^#Y}?K0J7?T;ZhY@o2cC) zo!>n}z<5xM=ICb^fUV_<1veaO<0*4YPWWediJ;UYDrGtbyAU#iVHHJghlX|VxC3w= z^b>d_z!8K^fORa>T$*0MKtV5E$bc{`0bD4Jq+!$}FPJ6p18-r$!~S7Rr=>~|yJeLg zNtVmv$i$F@4Tw<#?yLs53PKS*YA|)SL`D(m5?iWOXe~KUK8Vrhhf- z_VSf-9uF0UJ=)9;Db1v4o7^L6C^|ra>6w6)%dF?tDMZjxK?GM8|JtxclY`cj(EN7ONAI>^tXyN z!OQ*R2j4z4J~TSmhx2K%kd4Z~TV=bnTVk#-7WVhXo;@{l{_MNYe{$HJKB3VSn-dha zxH5nF+J%Y9u|%9Rs@SE5SsAqqFRF4d$~H8FO8LmKgG=-COR2fhzQdt_t6gBnRaqMO z;*~3)?$E*UFo%oi90j2pLZJ@nbODTFrH{S*?4=9uer)uBH_(fXSz4$%uf=ne*J^I@ zAHVbZ%g-I>R1$PVH!3rmm7A9aORPEZNZ0rX1atf1naO89Rg-Zr7?G9;Jyg%lzT4d& z?HR<;G9QVNJAiebNhz*sxX&cJ!jSmUlSfXUe`joTvMbEC)13$dS&Wv+jEQNeTv^x&(KwJ29VAx_81N9`Lx_%mXMPi9;Hh=K&OjCOwDk?jINL1F1=bn3x zqbIOR^M=W6ygaNUyH04Gm9G7SyCIM*9({4PYKU zdi47B>zG||&*7}+9SYX&ION#;+{M%H=Tj-H=Mn>Otn*r>*UA3=$>S$_Po4-@@%TXA z%ExniAQO}W4`M99l}w#(pmlrV2YF*)0dAp0Pw&D_WEFH34Rh2iCnS}%i#!^Ezp#|b zSrt5Hve{%0asySF?en$nzW!?k=B$zf4Bj9cS-}X0OahU3sDfB*i0L)NQPfLts0L{R z$?op?8`(-V&aH;p3V=VC+>kjy2KyPCGiU>@)5T0WBa5CJvYkze8CSbwIa+rbDDeU4 z0Wm?|$tz?UmPa6Y{``4zL6=jlTly2V)(2zPG@~3>zn&=-$)^OEuH+O`n!9jz?$&In zAZrD_^9=e0ynZ;AC7L*V=-Ai{oJ$4>p2aYE|EXh$Z(4~dwwO-MU!A>nIlG=lTAG21 zmasGpiFWn$KK1O2N*a#CFN~-Rw@k}`D=TCg<11OOyng4xnR|C`VrkR3jfp=N=Ehi* z>Mky27(FyTa|%z)K(OxjWZ-g5&<`dL@U9qOWOUGw=NeefmgeTqoIaag%V53J$s6c1 zz`|ouC)PJQI>YX;fnn4wc7n@e93pdp(lm>6>mr}|%x4Todl`4ExQ*W`)QmTw&grQ! zO&wUzWxEqP0Tu;Nt4)kLj$pL3!1-YLY&a5ML6S^#iM}lKBnx(AuOm=i2DH?K?chHb8W>^(Lfnf=aX$f{swtc8+ET>_puGV6So=kRe za%Q03-pwSm(TQEhjNnNQ$Z;tC<~P52^5jWcTSrz?IhK+hGGh{{6tbCXmuD|uSX)n# ziiLKlPIgnAsgv<&GWN{NF9t`4gCV>s6bv;M_kRt8v$M0WzWS;-%BOxm#hxNE_yHN= zDKauTv9_M;?TTu}fq`3l9njh^Rp2G3)khRC!pd4|aPp{}%~;5cc!NVDnd}0qF7aLr z7Md$KO~=Ur=@u|_4?H- z7tZI?>zK^>PHRi-)pYy%4~!jr>e=Um!y_KTgch^is#O4V)Wrfb3^+bM-YUt%y$b~r zgM*A1{KXU83C6m95E+Q!FdCPrp}Cb3M#gA?li=2KJ=ppk$3(l53{vSVmf{5dMeuH+ zEO&CCfLB>F&Eq-OU`K%AhkOcqsDhv(GZ_OioM!SlCcYrwr~+4j@Y~dNXng zoh&UQ46~{E3zx24yH?0DyOAFhsOF#>6unp?IW%@)`jwA)@+}$tW0d93Ets%q!P8(H5%-oU`XbU=uT|X@qfT9-= zzpXSg9^oZ`p9i`$YCJ^~HX#~JpiVG7VTfVEjtlMD+O?m5Fn{|NCQgcKSQXrKkkz|i zd2amZNp`r3z+_~0$Lts{yB0`&ar9ey9mE4)Pf0$anhq?|x7YHD|0=Kltw7uP)vLV&F4e(3#ZwhNX!#k?6sr$0lBUi8qiv zEmz~MATgl2MA#FfS=P?)CJ4Z8JR4W?OMsAe0xx^E@DWrFjeBND)JVXx99veQNLd*u zky)bC(>B$%^+u z%FW+6G}7Ofh_RH4VjYPl3l&b92q0Z?z9@1=RW!zUIX*nFI)B@%;O4;G5*8WFfOT}!pBFh~X3Y0cYU1hM1phlqdFjr2-BAU}y*c53SJ&^3|>COa7xxv0< zF3a2oMJg^EEMB|ecrI7Mn&f7KrJ=#@?E0dYUw4O8pnNbbVqD8#zkP9VuwQc<4jsVD z2ftI?eB^BvC}`Ibq*Y|9n@mP;-kgOTG@nQ0XqLzrsa3~ImoLmrA9je=q!Wr^bppR3 zOggNTmoObyN?qOYfu8u%!fje;@U;;gp8`%+SX*8^GBt`nvdFZ|vI*3V98@2*|B9WJ zH4Ha_@sa*37tXPCf!8d)v}kC65n$e&yVBK%s*!{C=mNG#HZ(;d@vdYaiX01T2*y~P zjYpNG)jL&P=+`#TehF9S*u||`fCUZ7-jK)m-0_3skhaBa{vbvY!=M6^O(%}&aO&{% ziKj1Izg6aZ7GUbwjEXQ9m1+SFL(P>+oG(7VusC~XVdUT;@MQ+UgEJ@Y-dlzh*|&v# zndLyh>;ABTh?yy5WhHg{_T13HNjaggcc~UQeDoOi`4mcA6pl__U~&TV7;E^;2kgRW<&V+Fb+e~xCXOegZM_VDimtxuiQQ~eL`jk z>Clyd$%ExubTx&aDYNgWCP**=6GKdpjNB}A;1(25zJ2<_fvKaKHIcFAjUPFBZ#A96 z`JRzq{wk3yNAQHG6kqfx#~W~<%4()~arV|EDmbP~oux}qUIR8zGWa}b&+GEq^JZpd zn0gsPcPgZktKPc)dNc?GsZoG$GD=anT;TNDC z488rq+3AzdD8B|ZFFrgLN%mg3ImcQA26{Gr1R`-P!ft7lofw?_7|xfxpS^eH=!s_q zeb~KL?mv8p<1+6qrL+|krz#{SQwX#GM4{6KvITZfBu<~bc=FVeRA z|J>=H^u|ilQ~if0`zObG_?tO2d~$khxIa|RFaM9P{y$!JNtd{~V+yGmNW-^mN75)< z%l+hy*Heqv#s_25lLLnj^v@g|K6GGkVyJ6+qQ5ItxO(m zW(<)Z!+`Fjk|M$)eX+z561Nua-kM+PKR6Av7(Q??m_G5$oyEm0yPVwmKQ}65^N6@G z$Kx}8A^u`r-M8nLuicm*JBDqY$)u(wM^8O-^~UWC=S#%9b)zGr0qQBfR@xU4NF;j# zk>tX?)$6zCdnb=TAgIFiR#O6hLYLsPhahW|n@}OOE?l_4xP!f;WjR<*b&)d*G#AgF z{n7UWsii)zFc>NiayfW_Yr$)M!E&;cEiK=LBfPseg=@)wrzndY(Y%3?+DWAtaqiw- z=T-4jamIrMimF4w*hW;P7y9KG0WWyy=*i31?|7jEhf8^}WC3?>*o;kJIKQDn z$?#FRx3+%$&O-mpacT20%^4@Br*Gd|9Orimp2V06+jqL_t(?5QL9GU}_+05C<5M#~aM`!9V{noL%qo%HutWkwm0B zR2__Z-L(?BM1FbxZ~yyOYYPjosxy%`a{DME;?_UFBWt2)4nWBo=tp*J?=C;e_&GIo zboRzvCdY!AmhGX>XcXa!p&Mkf@`yROc<%hQ!QshJEG{*Q9*3jD2PUsvzX7{pe`M*5 zQC{n;I;dBBD@5<%=$Q-G2gjzou7tLOqjGe0O&&gWdG>ZNnnX*%st5}+^q@uPa5+u_ z3?&QI@RZi1#)<6xw_N#t5*f^%VUoU-F!GHwta%k!hz6dMJ%XAasp??Nf z9AjZ0=YZ)jNa9N6{NKJlckY8kAw3cg4#vEmV6hhw8mta;J4!J-ck#?W{MSGC3TbUd zL0yy#tK&~~QJf=eez`gM)TvW8((%azf(?21>%o@q4Laeqz(RGbBMvapyK{Fj$T?u_ zg44+#F|8fzJ|E}*8DfJ`?9I2{KXU9DsSS*S+|bb98}A;vc6$k9Fe^RSh=F!B6lZUR zfDov192^^n?%Z3MnmJ~(EKW;^jZTz8(HnD1%no37oil*9KtzXhLj?%usux9Uzw^Pl z@q2! z9E=8Y@BiaBy-?b#(FQh~(r~V~;fxpjh>PjDtM9$_gX2fW4<6{78tpql^QU;lR;agQAynz{zH>P{XLNb zqdf)#1ocYwg1r%~NO*^|%JQ4v`KxF(_1v+^gQGpCj!qsu zIDG2Z!IMWPj*RymJ=kB(FTVcO|KhFP)t)o;eWze}v|rL8PEl*AMxb_+uuT9KWg7n|Pf}`A zaT-Oeaeic{wsyX?f&j7$^oiJAfFh4 zd*viE?;EJu()f}4X6Yo@0s$MI42k@1;IR(cM)Iada3>qEB#fLhIMz*LJ@zcKdbk{X zN9l8jCb|uqj6C=|Y0VO?K}(G=ICYDpuo{!sk(M*s@+?J?k=Sp7VkDwmhOl~&sn7mQ z@KU%b3-pN69J%^MIFV+>DGJ>&i0RBI8L2g1^M)xmMa$QneO{`|EieNjH$K*f-1;DI ztzZ+4E0ZTHn@5zLFqB_K`_-hBagz36ho?zuxD<}Ms3eOoG`GP^X?GJIIAcDwiW@N< z$4{0DSXD;JlPfH{m8=taJoU(Umz_DlDaeuhr!RGD2al65c70ZgpiP5e-#}%x>7<7e{|( z7Q@gMrv+JzQaxMpL)`o8RI}6tk6Z^G+E{s_nriihw{;^N~ z+Hd^q{RHu_UVCX=)e2J~^b{?fhgfBWx-lC-1N?bQ;MVy%+xiQ}D{?+SU5Qeh|-ndtBP z;TzxaQfr7j=7*3hS_;z(!msq|BpZg|g29DOGk?R9GlWu%(s5(^>MVR*p?gy#40)^S5t0@!gzxQ=FqA z@qx9IB8{B*-IQWi4=uVZ;2?n`v6p}CGuLm;-@d!NhJ~!k9ah1$TxBh*DKxf`^?c>Z z_4!vm^&8y>j|puNs4vCD=^cK5;+@)cbT>9_- z;{WT3gcHG9B3SK?1d^dDc*J)!9;(KRxkxcTG0>A+UAl7`Tfc#~1&~n%G=$NA*b;UZ zJtCr+id4?O|FdFlZKyXs*u$Y4g?Pm4PKMcK%Y-w|83ncc^Uuu0!==j~yrn(JPAOp2 zP$61mEoJ6!{PRD3=h(r~0d_-#YdCFp#e!sy1j^w6XNE)$jPyKp{LouJ`bW1Pg&SrN zG_t;_3HbYX#W(-{uR~seeKxr0aj8zCn;jY)ZV~M1fo!Y8eTfr?$Fi$87f-+Ct2|`& z13hx*gxY9gf{c>AmAeaXeeZkykzgNVB4bvt9D?S>1^H;T)Ef>AaG`86!ZMgnO9XVK zf`&OKp!fwZj5~P08lBr4`%2XhGVb^-1!61;0zrlkE?|swjZD4#@~7T@`^?IEKDAy* zXUdr@@<(gj?k{lt_VQY`_WOVGXI{7mWkMqe+Dz}*%b$4l^!eF^ z6iMgGEO6YKTg?60pF?aqr#_ z-}r~2t|Y7dkUckJ#DkpUR8E9FZl{U|%FL6;db{5F&bOt5HG&FhNF-vW;5KUxBK`dc zKn68mEQGZWsJ`&ZPu}?;l`UdtE+ejAN=Y6hR&!_Q6ei6 z8=iRK74(hsnQSGKE#yNWIb07GQed^q2Kv8|H0#6JQ;VS=0EVr zJ-57G=o%b9`m3LZb6L8?k_e9+dH&_0p~-XSXN#o}D9h%`YndY01pIIfa%XHQ>-6?JjYw3KJnUs^0mx z-zl%HBs_N6VbU%HikV2Yi0^bFREbmzUE%6*GTs#pTzK>Q!T|m>4m*lBSl;3*D=TQ8 z40bjYf>4tHTiRxc@B7=xr2zzCAz2#jdj8{|x;@9j*c|#8+vGACCN9x*ri6S%9xkrs zXXjR*fBDscqb&NufFMG`?U`Tw++wPF`Q{QFg!aY~8p5^+krq$@Dk$MM=T?)0*actI zWD8-)<>yY{xHgxXy>XWlq6+y+Hd9WgbJ+~1PH_8BaBZ!K0`uaFpBTm@jqyiE8L+2N z%0H_Q_wC)51UAY5J>DDf6Q`j5{(dwEq-X;gbs39|TqQ^iVEwiH(L6#^7 zH4nd##ieUAM~35^BT?oY5AOFVCL)~FhcZ&_N+c_V{KWWZCbP79>#7`|wdBm{pA?Sf z!}3f1%^&>Zv!{-6ls%4FOg!0AhD(j?EUpfwhy&e`!xN*Y-~Oqr|FXQO=gupG63UC$ zE^;o&spE$NwH*2q8*L~*77Z~+<`N6eq3%t_4vvqlE-hqPDyB@9tjJDZ#BUlcF8}C_ zv4PldKR2!y@FHd@4od=)CMLz~2a3i*Bg4JNW)8ml_K&@CP8+ixB%lPk**#5m_5=R) z16>T%md>6)mAxO)h|ZUEB)xqv5c*udJh1bs*QB&Ga^B~>NBb#On0Kw{VkrGldmYZla&!8&+s z3*kL|Xo*Lglx>vP%uU!mHRs`sFS!FV9?BHwHqbcN4P}xU*;z;#lVtwiCNlo8U#K+QvyQ=r2Vw8+DtyD5@SBjd3!Pkpq1Q(+C2{8RJ~*Wc;D( zx~ZBcohQVcO097sUXe!2s0ESRcxu$L>woJL>|LivvtrMb&H~07$@x&)^@^7eJHa`p zKgzA*93sXp05$>PL`K7aw%cB>84-vDrrMm9j3>%mf6Jti6HRr}WkBwQHMj9cKDpc> zqw~tLX(_m}*sFcI2)R@!#yLN(R7xhh*W_cs#^*fVv`p%=99E*m1gzEg1v8qvty`*B zQ+Kx)2-jR3olycX9P0`irpo9RsH+s@4Dsw{;?M@&+?`&|;w}y`;U@&CiH1<}YRoX% zG#BQ=1;|C$t*L0862j1rhRO!3a4=cmC~oE)ES@v2>b7}@A6lSlI7ADDNp!}==y+dp z?iMbxEHP*qgxz=CyurHFCS$1zoXv{8nxpzqr*$P1*UiU>NHn5o z);O7gdac*M?fz;L=%5$uI^^;?8qA|{IB^0^qp22e`Rf#5ho5bqjnjZ0D5jv1BPTwa z=;E?b%!=$oFIEe9N6;gh$f=)$BU4i|C%xz(rxRl{5;Hrng-4Em_P74%=IpzJhJA|Ibxgnx9-CI&+?(q5F8NPP$%<|G2BS*24Ri|J! zZrg|@yZc9f<@sY?*T@E2ZfpF5zAz{ONmI}<_Hq9Vm;8j{TKw36pcD=M+-gFla{26; z@xg&Gx*dmIYTqb_Nu#Q9=wb-Nm3jOKtK~c_9t%fcL>OgY zcyjvKq3DntL$Ep`*f=CK!G6h}$v^ma|M}|qcMI8-m9;e%g#sK@0bG!iGBw$!0 z!4IpMN}aTIFZkS=IJfq*@eBq*rTtD%Pjj&kkVN3v9IugT z{mK7+_ybqLTOgtDrI(rg!+cYrjMT*oIK`LVr(R4t{&7$;4IiAe(z8I zkJ)qYtfyDltN=Q~96+qICqW_(N<%Na^y%b*!`Sk06$E2yH?z_QLl=A?d_nL44IB{% zfRw}=loG6F*REZYv~(}|%1(8vaU(s#pRMJ^YC4rnu+N_D861iQ?sO4tgbR+e7opM> ztR$mx4xqx;aO>LTiD#dK5?QPZFe4SEQX2eB+i~L6Nw~x{aBB-l3BYQ(B%8W@^XfB4 z4<*^|TrNNplG}Ln$WIDy{3=5on`4g8OrF1T>DW`Jyl&Gx7{k;L9E8uEeeYv0KcCcY zmmd;Ysp0pcpe)XyfQnVp1adMag-?|;{@GAIcCf%tt@c(&&13`G|Z^N z=J|LI-8mB1e#{C-6-CRX3@3daJ3M~p=EZ?$v4t|vr$hXYlu_Sk2(V4Bt-BWtd59gp zRIIHrh+?GK(?=GeoukY2?A`-?Jjbpf!_QZ!@c=lPZ zdjg{d<0Z_)s2#@QIQ`tGdqyr_xq-wk7nh~77T19l+b*z1b@%sv>eCu@`tI`mZQer14V2)Yi z$~X?4Oh6DdFJG<9&&^HE%vif1Y`=ZdP%(Dl$$RhK!<>fSu*z*v1+)cNpgt_cVTsG| z_*2imI?#LV>V@0)R-!d-GsmdQekrjN1Lov`!%x5Rlo#stLft5M?lgD?tWfWvr#|~z zz1J?ij}We4uV?SNn}Kq|CE)7n8-C`c=e_s{248nNh(;`EbNKixzuGf;>-u}w@2<*S zg9A5_>6JX%8I#qnp2?R^eZ=b;D#z{;ibKR-A+%u(?=n@;x}cs*#6<$N43oOG%`mdux`u}CPF&8KeN?mNkHI2>!N z3~*gZrifH37#X=VmD(BU2)k(P10>;c63&$6o|qSZ^AF~)oVju1Vx^izXJA_pB9rw) z6ks&v$*JR`2TysSBsXQyRh>P}L6ggMm>+U^p zV0!GCPdeoubYRnjJlUQ!^77~Z-QCMq7Upj*-nhk;B^*~!TuxWH@*vV%jaR2;o*JE* zMx^0~fM2IfR|Wb9ZhqHZXSh_`&I8 z(l=lzH>d=;0U;duoj?B1cP_s>cW0JqWD!SBP1eve^Tnz+FgSYb_;laYK~{HA`|CO* zs*O$49#DV+Km!*DH64$x!FsHkaP9=10JD4|kW!Bt4Phmisp@63xm*(=gteLZf z5DK%{6E5k*PTW#BOOZ=uDxpN8Ka#w3<(`+%YSTcQK=ZeJ0j;nAVqYH|z(mwU^o*5) zt;xmtn>YKI&ofIe7b9$GLq7|18XH@RW#)D+i6wl;Sz5n0cW3;-I3onPjW>)QtkUhZ zZ)r^DahDKass-X0hD4B#>DX;HG{jPFo4}L{QR$UgDB+L^M2=7llAYBD%bHvd^#%hS zh;!fCl%!nSsHlC~&X=7aAii_J6c%N-(yhG0;}$kW4p}i8?(vX?@ts9qaxlW}5FAg$ zcWH!9Dq-9~ydpb{2Q4e*BWd_UPtrOV3W4)sbv9Vpn20c8!b(eeEhLB}v`pldhwB43 zpQPVfLG@9)xvhR2@w=t_Q~8y}|A%yHB{1Z#8E{Iu ztI*zP(Iz`hXKsKl%LJ7_Cd7c8`GO|AIzmB9x;SZRySO11#}Hl1qvF(YZa0cVbJ?^` zL6n^nZ)uj9Q5ErOs_LMC5N?z}jHO!V5vH;=hfzN@tLDb!BHqecBn)teRS07<_ELFv zIJn|2E@yS^i@`L2F;P=@CUEBDt07q?*9e1`4!v@R62xLXm;;!5bNO4Lkmeq9+HX3# z!-UfznX)+jncn3&Mb|A~A;}#4!6kGDElN$0DjVheLg!T~v#>g@8Hg;O!Yf83T8bfw zNoP%#P2KxM5i}U&W7^;=)?)cob5bqL=A5np1Nr&HJe=vcZk;WQU9|3kenOF=9q3In zH!S!Ochi@Vhgz^LfJX-+m`4|9p&`*_;3Lv>f%gQPHW2aOZz~DZK#^Oi4_HtVRKUDL z8b21IYO&|QM9-kR^M`NnjsODa7@pM$cfA5?6`MxoY{3_Kc_~_K}v2IQ@;rbDM z1rwlA{0?8?fe47@P(TjTz<@+J(iI&zaPkvF&Q9-|LZM1eYiJX0M>K%z0qdRJ-l+$D zV<56$h=lC;_F|$M80}Y6X|v{LS}v=D%1NTN>w1Thaq)RP%(iPs)vB!#ehn zmu374Fh{mOh#sUM#K0Y>;ZKS(BR3X#b)7jzVqoT1KJIY@igJNj$^sEuG{R+C8KoM^ zM$AS=xR!g-@Ys)!P97QZnm$8=a4^pOlN24_id%W}*v)GoLBhqx3x_@gx~cZ8&GD4bw<@X$wIJoMaiUI}vy$L_9(+rU~4%P;6-t%sRj*skLNA<9K7VgpBB z{HSc50X#uqbI|7)0lY&efuwtb*pV`@44@aJfe12oGwkZYGuxGT1v%^=d)+JZTUzzlzJdAa`>83i=pPoP*X zheTwVi-F;bmoCSy#vQe6TcBx-rnm_rMs$h^hc)A@H#YSwN1xMb{#P4RyOYB-iS6oO z6EPCjQY&mBjfOd5G;!cCwks6}sn8!UFPev6q{wm7v7>pnM1WAEY~)rT#Lc{ipBX*s zCe>Kg3#^?8R8T3Bp8=hWfKd0ZN{{1*z8m8n*L3e+nib$1XAi?C#m88&0N$L|z;S4KN)6;4{kkxO!wZ?a>gK`3?39AON zw5W5}uN+=nYGOcKkb!K&;l${~(__#YPF|Q15lqNcttM*(L6RVaxFG{=l18BPDX&;9 zj_GlP^JhViD^L0c7-t^giq`f8fE3smE}O|(ECilqYg z>|u#S6mUghm@|dAl_3yVT3qNmc|w2^nQfHgC|BjNhKH^KgVTmODxFZ(w}%e!3`e5M z`MOdYJMq-m$!P&r?2j(T><09~Tr3?DM_a&Uq+X>6jPa;a9y#&M$nmEf1k`aCz&`23 zM;b)rFJ_7bU9|PU$bl2j9XN5q3$A*#jAJ#}mKzRP^0wODkON$uUehR+e>dRM^s;r- zU}fJ-d;vf=-pb&}K0g=|)!GtZBPPN$j#+}b##XF!Iu?xvxt#<}Pis6lctk6l9EVk% z^9s3$EE37EY^h5KLoBBa42^0I>sFwxOv4v61GR9fwJ!&VSbO}@(9-Yv*CxPq%|ZMp zmg6Tn@yzoRPd{ImpW06eC&FzkF`-3pEPfo8WX^*zSYFlK$*lyD11CQsT!c8~0G)wT z8FF42Pa2cGY@#DOms?2vgV;eph~Y27I^9Pmo_v0Gl@P{y?=9|u$(Gt{)SHFU5sT3;#g0^xiNQXe8 z%V&%h)1nHC0UQB{NcD0V-5g+C*wi=@K@c?CNJsr?6;wD{h*tuz`YuscMf?tSL2D6= zD=dOCJWI!e-8AuxX0vHLO}N+vDYm|<`;mZ@o3Jy0Ns_xMCzcA3g>9D?a@V5_5Y9Ey=-jaJb4r$fi#fEAp|Fp5O=>9xDvKnW3ys05J^x)h9%5IUn|(O zeP`AG z3w2_sSJ3%qA0PnApcxabpuoDw%xp9l~7=3_(8NG_pC3phFnP;Mu3tOR?A1g6x?Ux>Jj%=E25 ziGID}e_m*1j-kh?29N|kG)xAWE9s~lN^nCyQki;Cl6aI-NJ*}dWJ2`v!bw+O3(+^x zjZ4$`QB~4i_@yLG-CU-|=S}}If-rnzRM4WA=2bd}id>qS>GoF!J`T2(sBEYT)*g)q zP_eVE;U-UvesO}9oZ_YZxPb;i4|uRBa+HAv1nhMh9X49K5u;T5+tj+YD%ixYtfR4x)QS3`{&`B3-x*%062Urqhm}jdwZyEJf zU`*C@%${X7{Z@5qFmJhNhZCtliyoNGAlI?Nu-%SZbVI0#ti>ZBnBqzpKr>v15KPoE z)jU>4z$mCfEX>$Mg*fjr!Qj3+NDzHLh%Z%%hN@(!%8Sved?;JwP!yCAh=J@73N{q< z6)zjqR9{rfZ8RLrNkcG7y4)lpB~w5SLsMyD5?RyejI%na_n?sD9pwyDb$1$|En*1- zyX^EN3w`3OHVOn_HLfi}^|k3bL$R?OO%CEVQ3-LPG77JX=c)oya$be_$c^}{?&DZo z!e_dfVkm!0u!#ZGh_-HbHZY|526V7|&lD=~Tz*ZbWhKsCiRp;E}ZTFUn=lDeBHcmNprUG-WrZ5c@;>mbJ)y0KbTjIZq)`^!go5sL^g7rbFZ+cw zaFM98Q*!nDXXn-Sq&Oo7?K#la)>7! z77&e7N+EhEL@V6p*9~zqxDLV`l87u8rf(^HETrKsQ(OG#=O zBP}3cmd?P*v9Cq07(fc;S7OOgAfv46EwTxKv8szv8|}n7z~c7CcwH(Cma;5JvPn#; zi_TqgYX^*b?0eOr^UPkcau)}Rg+!WjlYx;F=n{gmh~bRJN=ySU22YKf1bgr!^>4fQ zM~G{H&)tRyFn2Y#q)LI&Wmmt2_hg-Dw~%ffef8EjS?A&cIm?+Yt}M;`QctXZTmsc) z@9KTE2g_{n-ToSs42=8&($l9;kB*KyDp4TFG0P&amxWWjiqf$O*1y(;TWCuhtFQg#z5bfGLP^1?^39pg^xv zhLwX08g;=lC#doy<=H92LZCqA{1&B_uoCe6^Uo83C6S*Sq{6F*4z3Tb=~5dSnnN>G z4Sn`TE5wb!`(PJWn>C(y`nL)1l0FUA0zXaI_lvd~U3tw7;YFPqU2LcN8sC6bBdUJ| zI;6R5^#RUmy>#I`*X|kLNuC0E_{eIH215C@?&2~G!xT_%#F)z2T}UCJa1qwx{5r;t zP&Le*V5MR=9utUMHY+x{cOXSosOPcj!_tDq`Sk}uKjMGdMMP!R#h^>wU=5asn}E5c zpeB$45nO=u-Eu0rnV~{;qNtZa27gFYE_)5`Mipx+**KA(=_SHvnW{gpEu<|!$7@Fh!*h8%pnl%JU;o<=fRRJpTW z&Cxs`zh0bQ7$emQS!ng%eXUcAd&3u1Dl2Nh4QWcI>2@YoUJT$R#f`ffdT@@B(Lxqq zZc3~axg-et2qSTb8&HZ)2_x-Q7xsDV7bED3*}+ojbWPsP|$pdCdpzW$wafHBgn&p1|WqPV-P}6 ze3oerCx$b)pgwt}vYJdXFVBj20B1XvH1QN-0jz{>gLlIHs6n8q&$x-mfX>mx0Nf#m zrXW4twAe`ruN?JqaS#*=Wo0zNLLBAzAv%H$fz`kQXBSA7FJmpxQk0Y&iPAbpFjfLK zpv0R{NS<>}Z)XrBzutDjo*-b1W*jGabQZ?8C81#APKDY51)-~gA}n!WO5RE`qCtR~ zd_(Mz^!><(swv0DL29h4Ol|A#d}s>+5NE-(8sb1zsC1m+bPuVT1jK~?;Ru{sFvpSZ zT1(Rv?i^89;S@WjhVY(nM^3CG@Mu*Rg}`FcM=dW;ZXD$!es)M8tp{LA<3}!_uqTm+WSFcW1O!116PzS3ypMb!B@-xSIT+dsZtsryx`5{C4rSMT4^8rH7L~!u!V%Sfud}2l@z2mQN^n0 zMr*0dd0O$QVfb%Rnzg*Xe&XtFNIjvO3_KF73)Zj}rMNbzYS&Y1gDBW7Tk}}&QKS1j z`Y$Ps+F1h!{yTS>lr)0|GhMe!gm!38%SLuyHbcOu(fk9;)_A|pr#dcOB@Gb9b64zY47<`YQQue0w_ z7kLRpER7M8<)S@cmWp^JVzV|sF%f%XiJL3+~e7`VsXGie>%7b+bA9V~*d4ZFb)pu!2^^s)_9eCH;SpcC#30T5wnKmqeT{Cu0UXiMl$Xm3ut+c-5M zu*?y4w|P40+Cae2XB4Nm`>MA+rKH8F(bc7Y%?8DyER}BE$?Ju z>>~gN>?H6VMhqTznu57x036^uz+*QN6p3RM_7Oo*P{IyFDk;r$11d>oEbnLU;AIaG zFdLLH1=#^?-Hg_{kAomK4^o$4Q67X1|Yd(1Q+W%UY$fPmWt5`2jAmbEW2d=Q>!|_ zC0E-yHYb(KTsI9nG4DtOenu7menf#U`L`5i$e=O2$L`DdzwALEv|E+}@v(?@%~C0S z;W{zxSpdh{b1UM&;?F-+O-f{ryP$7NoqD8Rb`p^cQc^bON~&8OY&*R!wuY%~LwV854xALz-j2J5k)O-93D2bB*5!yOW1 zy1jPo+71c!>l4EUby1odBiSTX15|-6U4GQSr2P7z^CZF-!GcuT)U(Fu zv+3pi9(*jI1~3SkA6#eLxgXee`tF0&k6G_H!z5QTaibNdhnBdL38!Hd z0(WhcqU2Bl?A$2FFQ6XbE4P3iI&_ec7ZVMSVAC&PYwFJP{UE^b1n!I@03(m!k<Qs% zycdf0@TsqHR^pb9LPEWL-Pw#b>r?END_6epja7~htzSL2DYc{l4In78Sp%TK+>?%7 zu#his2^oMofBt;_FN^EpHJX7qDVM6rM5MN!>RMZ6nWTQM=BDx=-^0F{iNOKjz)TN& ziqElV67bB02)LHox$y!T<`I^%o!$h8lMJdN;D@b3+%M-Fajgu{WYv!Hxnc!8VP|bO zF(D--D0|&@rqP0z?aagw5Qm>|U_PyPnC0M<4UwdxiUp1_;gfoYA=p1k!&Tw`(0CG% z9$T`#R7-SpDIL)v)b^4DB;)=)zma|k7NH^lMSWO)F%ugAfu;cm`z{?P4TX~40amRB z_@o5NC5}GP=Xx8q6z)Hg1b`dG`!73@hlGFt?~sBKq>y{GK0!jg8v+r0)N79q+5Qi6 zc7q{cBtocfsebejLB(m_I!e{;B*UX`%M((A@q(@rY8W(usvqCqPEv!>%>EYpi&L7$ zTsPxlOz{;S;Mr~*-?`;vWLV^vVs}p2`MMhj08GfwBnD7r(HBj3HyQVwhFYKpe$k9j zY8tB6p3D2NXEa8TuIPNA%+UXcgjF-FQU6eV!6V@@RM%YflN$FSiAn}|>yP)*#2*Y8 zeidYc4dp@73HuEJ&}U=%BjpSD9j@Q+wtuM0^YP^?Esqo&MkL^|K1r`OSpJdL`h*s4 z>|pZEv>K*h+`IFIFMPrEc*7=R00?AP&VKJZrNz6^Pz`fixulEEk%!z4t)pd0(IbeP_9ZN}$6_nW-gVkXvO zxwLfg+?{u_WW>q^24O2kPqw?atn15P{<4Re7_*w($4P5Ndhg!MUbt`p3{qXu9bUmn zxHN!vAkS^p7Uc@VD~7~ZU;XsK1Zt0zSOOJuPCVzSU%8|s>01W!uC1-%^U1t&eKmFD z$dTXw{ZGr>S+Bo2lcf5efA|01^2#U}(c~HvrWrJz^V3hBJoy{{_BXv4KBaD2#1*65 znnvmyvzLGP{YSpo)!fQ$aclFo*AM=JZrdUZS&4&SF&M_su#OeSsaD(VT>vO1Puo>O z0!U?pKeaAFnVN`0cDx7cR*3O-%W_DjTI{e0XC}}!gY74!RxIU{i#UN|sfA#1jtH?g zM)WN!gr6;v-uXJ1gFbxjPkqkaG|RfTIg-6SzU2y*d&6KB@2<~Go5K3R2C zQCCmfPvS{Tppj&uG{=zAIGfAt#P?x-&b{}9R7XYBZuQ#l-i!Ot=k)OiU)T!DV*rU?>0_wtF;}s5 zv}%aBJvW0<6503!Lu}8G*;__~B)<#+_`yN*+soNUXj2s0g4LqYX8%UC?alu1yzvKa zjuB{y!F>k>1C418MjJeu3bF4MeRv8qB(j_a5+C(A4%X29>XU^J4~QR2PnjHH3-D#8 zAKBl^p%5$i5tYBct^MUHZHv-^1wrlK*P+U$&&K9pkieTA>7n zGS@ASab0hkdgmwDIkK-3PF3xjISD9(6@wIgrf9GQ?1?mE5%e-SG^4)i-xdt7lm;gB z(e?{lGo9-zRak-iT1DH+&KP1ixPp@P}{PwDDx4 zjG>`<+}Se>4NCk{AXmv-IiP0G1$Ht5z2C^(I1N#mT$isMz2!%r@ zk4Ht>_m2F67YL9a!=caAVtD)ohsG~>^IF1?q0rtxy0OXUL!7@tYf`5}RtoCqhdiX>K!!!#Kk)iOJZ( zV>56bG;XK_l34Q$m^+;@-7p@K3$T-g-)5b}#~tq}O#!D*{u;aJ{N_=M)ktU^jp1`lU-ZmNKP?7f9d=9xP{v z!mC}u;^e^*fv{SQMq{-K2N|~rK=zF{-l%k+%T=Q|l~=r?*Hy06iW7akEmAv7&;cl~ zSSWDD@0~k$UYmWbmW$Q`7#cW3q!RZk@!D#jurfV8#!5-Mc`Yd;p)lybc;(!?*E8!y zd6kzr>Npe*1xk@>E?i!nIe<~GyHu${??yc>o3y;V{6D|(ja)6s0T(P+K-);b%DDWj zaV!?KIW>{I6asg#s!^^;wuYmur{&*!@4dHwdL~iT-E=JjDJcY|!c0s8+DsuWQ+8fH z5Cnu8hZGEdjB_wMXJ~H(ecZQTsQ1rC@TJneRq!Dz$j6;UK%Z7?9(^#!bFkT56`bBd zXfr@|ANTP{7G}`^mQWvzcVEhGQi|E@Sfh&FRe<3p9TV`ifYew}egCrM&G+M`)#$BI5YAAugjkV#IA5ECc*jK?PaJSL0eKvr1uq!^e&zJKO8$KEY5mm4ZixU?QFQ*WL(EjNpQ4 zG7cj`W!uu621x>DX(+8km~dd&3V2&hQbr7vt^M_Ge}IoW3MWpDU=52fml)CvN@NyK zfdnFCWG#XBl!JoIzt$u&Vt8gISHfIl>TIjFiXH+uoYuY0$%Qj&bZZ+oa#9pQN5nP- zjG5TZF*ovOP^x#qc_ot&FrZThLv=@Gc%iLdYE!X~F513-4_+r71|kmJy2A~ZmQQ@? zUwfg0fQW;B-4EbNzxhA@FOuz0yp;n!jvqgM@}r;e0$pA$BCxR-aQDplcYmY=?kYTL zY3B|N9enn)pG_Qkw&ukHUZ{o#u$TMZkN&~)@-3?tGM%XuloTrE0|yTL*317oRO|9U zas8a-l2=>(=WqNs*SwZx=q1I_Dr!G*;>7ePzNE$z=sfNB%3dvh>*qgNpTA;IVnor` zR)KP`9DVM0zv2aZj2lQx3%~lK{}aeyrdE+^GCve8cv3Hf$Ja`w1gkvv+=*w7z7+ND zqd{K3ew{rXWG73jhIHQT3IYxnN(p&Ll(+yHWHK3t9LX=x1lBxpiTmuZZvq0uv57*M zB|)p8JZ{?WO3?TRx}9@!fVDCkQ_0sTx>XUKce{W<9UToW1|u9rWG`Fh2HjwpPeO+q zpOAp!cemVCtuYt~DJ0|EBa-pnetAjo%d~~o<~MxW^5`e-MD7a#Lz$rljREjtEH=r! zB`-iWq-=TbKav1J%?eyN%vxc^K=*RFgT%?g)%R1Oeq&1UT@CwarK*Y}Y>Uo;O;Fcl#Rz z1Y0ZB8ZD=~X$jMBNh7gJvohq)tyV2f}Aw)|QtVliOu6mMVT5&;F#2$wLm4kRFxzWL*+qXh6sQ52p?#9Hcp+l#{~ zE)tV-Is#ba6p%C|Iu2^bUeZSm?BRH%S`n+lQ@oLZ zC>+Ta^6_{=C)he^EC16r!Ri8ub})j`B;~Y8-g$i=2!KA5DnOE?AjM3y_sJ$P3GToK z?5vDL;9RrzE8=D}Xg7Q^Qn568bT&3anf;cW=q64^S&p5&rNAa@3HeNMRfK?*Fxa>) zj+BmWs7IQu9$jD!0*`lkL~x~A*4Do)U=UE4sgz^0(FeluXsyh3WoXhGVxp0_QZ$rj zs|DkA;}$d5J|}s$R3PZMkCO1EV$j*D$qrn$qn~-Oe!&_oj`h52*REloDpw1rAVsYE+O?nl^j=I?sDi#Ng*8tV zI_TR1oIN~(IDl-waM?TWyaR&Vpw4`wq%j(&k17+NTJ`n~Lsq+5Kz%40WH4S`UcK_} zyQylV6pF!m8uCN6P>^NCQaBX7c<~}7#BbfMa59>X<>x05uH|C++wgU}!bxWH{CGrX zF>!Nl@lLuJ!2OxFqc2taA|;lZLZMMMG_ucsJLOo(bb+uiHlYEV%6}+}>+Yq>)_3dE>^7pS}Gv zgcaLsBBf$)H7`q6a?|uoWw}6K;2>@K3WkZJf2ZDgXN~n*XS3#aVPOGpJ0jqvk*V|V z!$JUr0(E{bU%m`@Av}8{T6J&^Rx1M`4~pvRN%9r+fC44s;@eh%aVN~U!}g^JJG!*e zRYOaT1hEUu-&s@Tr2$k; zu!xo1`F$`5I2c+MV0&+2Veak%Sc*kt=hEHoa+#?`O8K6`n&fA^wY~=zBWYnSRv%Y$ zY1lcpxC#>4sz^D8BhbX;6AlH}Bzv%Sc2eID0`O5Pl{$U;v`JKc$zeM?VXH(b?=4&h zM_q|zrHuWewFOGZ3Po@=xODpT68o8B@vx_}Q^Qd%FfX%0ytK5!zT2;V{jZ9}EC^-K zD7CQvRRc8|gaURG9D`(MDN⪻^Nh8sxMioea)FQytnG^Gyx&g< zAY0)N=fMqS<25XwP`7jeHJU*X&rO_z!7YmDepcNnt33n|ozCz|+s1~!u z!oNspo9DYKoKj1MhE>|CCcS26&fLrhPHr`!wHQ<3p{R}l02+Y^*XiL)BzM3{wZsBP zBod{+)Eg+flWbjIDVd5kK&1w>DV=nUK$(PMxzx5Pjg))%w=+ZA1JIs?EDc%|-$J`k z7!mt@61w-;``}BiEiBEpT=rI%DtSgMj!g>Ig0WBxyJ00*ju+A?ccCXjjrJncTx642 z^!tHtKIJ4s(OM){E4g_BVg|pInum>8^Npskr43DAI??+>04^{k8+3pa)M!qMYzNZv zWo=F3D+C}KU<%Ht5|C=kz`(@NJQo-V*uV%e1QNs60ws3A@?szKtJog7f1^7o+CTtc zvQEPxgZ#QA+H8UQ4_IbjTBMlIt|as8jTPi=g%~ajwXiUh5DzBG;W%bi5eX}6EcqZi zYeD8A9Mu|%#UX{TM5)ZgUTM9$vRqhi&D7Kef#Ku}rBNESKy1B3!=)fwKvY7hQcTn` zYfe79eRFlI(6(l(1e(WehZc5)l9-7lOsauoFtQdX6qe_$3u-7@PO!D_D4m5wF%uVS zxJRM>$V@Cf9Q`9-Le*n`y0T-6EaVe~b>6T5zr8WMk?Moi-q?T%a6kd3aGF60v<*uo zFD)-G4bCl#>b%?o1nT2X<1QCuluem16EjQ3U?J>CI%#($@P}4f)`B6Jd&ZMcawsMf zBiS%7hPX{Pl*#2GfV!6M^jKiX{n*oq?oAo63B#5=gPr9fR>x5OKn_B-LKOZSR}SL60U9)7p=p+p$(W&Q}2EMA3QHx zf3x`?nnYk;z|lbLvPJ^D^xJ<}ttIWfwlr2UchCM{b#4|TDP7YD&?=A~D=_5u%CCLS z8#u_`TzUl6u`xF6W#0bQpOb(pa`}89+}coXr5-tQCBlA&Ye3a8?jDIt&8y% zrF35HI|L+erJzDExN=}slnu{Ov;3&EsOH?&9eC6_k!|g%ez!7fzs1G@C2S}Wrp57v<{bJ|e?}m4b zkI2bsNW!8s{2}+5Wzy*M>hJ`m= z-k5x^4@#l23oCX-F&>a;A2~iwgx^A)1FSbP~u2K%F(%jJHB{2aS z=KdZlio&tw0Ifk6l;*m&W?xE(pU&%keA983X6;0R(7 zz?&qaVU$c>g5|Pb#(h;#Usupym@Mc*Y&T zOPpT|#FfTT2)TeikNo;Z1i#GYi0Su@a`Z)`k2yy;p$FABr|-lc1_Fjn@Wpurj5|Dn zBNqU9okMx_V_WJnwEF1gD{-YKC|85YRe-(GRHrU0wv^R*+#3X(860^e)LGT?@M|Q~7%1KPw9fnV zh4bb*^+e-sLP3NUYU7UEaY#oR#i>)2K-fV7KxTYZ0%r{6(dANuICz_4DV!>rYgF~y zf*|0=3+6c#0^lr(dRy;a9mw{c8mWj}rbW9ZV9~|c_h3Q2b9$5+P&TP9uKgH#kbzX> zLu^~y^+Jtw6SOc(0t5#}a7p=ehhPRH+Nqv)!efMhh?C%q!k`I3Y2FFMIT_Yt4I7aQ zpM}_*x6>5+F*f2# zA{Y4Xx1>#Bo!2`-z(xyTNh0Eibs}}(w)%&!0GwbS?UAtowDQK@gFfl04!^u44Z$kn zmzf?$0U~}OM0Ows%ij6j0RjeF!yU?sDIY-9O}E{Y&v?!G+F0V2r8VPJ2_Ob1If*vV z$YInVS`?x|8e-6Y!k-dYem|*-*c8ArX?bPY`L#H|z5$LjbYzD{k{CGvfmwff+(=L9 zu6(~1|FOC1Ml+pjjB64Nu^KHstD5NfncxCcR`Tabu09PW0 zFphtL42%-&my&ne?oPTb5NO!)Fd;Fl8#WCCmarZN#C+Rv+Dv+vq>;?!^s^XR5zCzU zXw??2d(pXh&>i+o0 zk%}RmR|-zUv|KHJ%`T%++l>gEEl4s-;E}ann@w2TOlY($1c89=q)$$B;*k=JY&kaD zCOA|60Aiedn2#m6cg{_1)-hpm7r?N$`?L z@s>nNwk%7Uv1O0<#mq#^Ol-tN?Db+J_8)NUzMB{OVq4 z6bTXp2;#o_zPq~4%&e;Ye4nSf&;S|`Wmz67g2`qfD=YIke)sh}Y3H@|aXU_+XYZk3 z+0Q1z!j+GI(1Ly43y;vgm2a%(RF6S``2qU4$gj4?tz)?T95}GJ(1sefurv>H00Elf z5zg(&?T>%@{nr5j0`%#$r4x%{ivo*N&Mg;P8U{7Ljv;*7=A94#>_|nhKw8Ce8K2uo z!Yxl)CIDDKr@yjDyp11wn6)Qr))x1`)VJc4Mj!@SWdIi4+{{l9c*(EpX_1(W{nQX z2wiuiScOkz$FV`wRX61-0G90J_FbBeuz2r(DFZlL=zl zzXxWm1h5a%OK&9BeChURSe{vIZNK-EJ+ZVW8rw`JZJnmB8?v+)?>3&T(_{ej$W!*y zDhPYt?Rql_-~D>)KXhR`rA>QSd(MhDTI1jbjGJB;w>I$Uw~qw@V7v7`3u=@Fa*69e zxOc-%;xVQwUpb&RKJP{kv>?5+*+q{gd!)#NHko06qN%Ne`SkqbL*Os(~3bu}u@^QcsT)m9}>R2gahZTr&V#qYDch z+v&DV*^cy+Azix$%gp5#{k8R3zygH%QINq?&c{h8ro48R!vEH>0pjv zfrE4Xy4|4(s@#tqn9V(A7RE&C#+Ml<%uXYgX0~hN(`&yL2=GP_D1^hdE<~GSYiC@< zmE$c7TQ7ck{_BPS>`<%Ma@id4=j}VZ+T^Z1b~@~iW!cBx!y{X7<|_wS2Djoz_Xpf^gFU4|J1TSy zyg;LO-14hl_|&ZbRU!Br`aD$IqgSgt`3g1vhEC^`IqaT|W!V_9z&NedBH(*^`ZJWgd1VQ}7vpsZpdIR1~b*wGwjHzL7&{s6Ks6kC~(YBy3cN`EKy>r-m zIN=taG`!xu8*;KOTQa+&4P9WIJgY8|;bul^(3l;Z*n>tmxChF#=X~-kzuHeu>;U*` zT9P`jzjC7X6TJ5?me}FJdxjUS2;!5+VInnRW=hw#3qHF(+m9Z&;~L^gLhx<6q}i@NkQC(t%sfJz=W6m*jp?$XSsmwy2SfV1Up zi%?kO9!Q}rZnJmx+O;&mCkW-RU%?UQy?gFF!FYfw?9IIfdnb3^|LK)`LjY?)h)F1p z26xyfp`@T?9#uK%RL7>w-?l+80u4YCwAG^o@{wHUPzOdJ+pl!AZ6lvv{1qVpYxE2N z+mzMy5~QXYL#W;?J=?Y~kl4i+C?g_FMj7GB^Vo+b;K<{g^CP!y+rrk1kdqMeAUQJkhj+|W9XgVcEaPjAN$q}kKXOgOvDa=#;uU)`7*b)V83>| zfb_TYWeI!R-uVfz^jLtA+ncv-{2mttTd6G-ZD6<8QhQiI8Lb~v}%TvGX<-2Cj?$!|Na8h^(yps#f;l=Fd&zmvyDj?n(&nC?wujo>_ zB(7>QtUB$w<58-Botkn9hR1b>z5BgvnDE?Aowxorm0L}^$Zqni?KtJR4JY7rm94PK zhVmWmZ&5^P5DCh7Hj(5FuX=$FOFvRs-r?19DqbSs#jTxI%Hu}rC!7ZF;SN)s7Aey< zuO&@(QoQ7pD2#|T=|t?GkG3_{eK_+z*rPa%0qN~iahQygiBRL{I1Rl+pp!S<^K68N z6n2;g+fs(fL@VKhX17t}j3-iU2o5r6r`egB-op;LM;^DXx_PM)FEDHStwc8Fy{o{n z;rOXcGf{JSAreH4KI`x%L>^!f6EEz{wvssr(uo>QDh+t4TZfEOQS9?NA?@#v@6A6+ zKapqirA`8yfC~ID4jaCkjtL?1Msnw_d+}I`=hONLR#^g{j#FmiO=y=PJhSTDm54^{g zJE0%iAw8XPqQD6)^P=$5~Ro8A1wUV7=ng=1g90SxV z-d2QeQKIKGd2bvuG9@#eGz~M8b|)FR9``aSTJjl#-d#Y;Ehk}S$BdzyIBLMVl6?Uoq(k%=u#Mx0We+jr~tyNglHv!^`d+@zw>Er6x+KU$!(vJ z6Un}VJb0N6o5i=Aj$3yiND$G6YtuqaGVQn?^i88|%m(cE^MPjvZv>JD3rtfsl7_^H zXic~Rr@SV>XR4NN=sK%mRh$lTz}zWTge%RUJh}$p1;H5*C!89gB*#hftArer<_7qX zQE(fLa~p;lKQS`GVL_dZ-t^V<%_9=)+{_@WdP*;vfUS5ic0AZB>4d(M%yv>PH}M>O znS-Znm-a;2NGsF%Bw@&B_r~r1fuIpe&;^9adqtX!1~moER+vh7ASj9WCXVbSdCPpn z%RM&9!?x5z6J!J8uw!_kGe4m`Y^VC{IAIhfeI~%$rG*H%+(G(5v7%MRYS;m81w}xK ze|hC8m}UAdl#>d9fn~r>%+#@uger`-PI5jiU@U}yWy=Gwu4+2|Qql-tLMA+~WE(jecbwX8W zC9=GM#qGpxJUApf5CNVY&7_GoIi8#J+F?}Z-FHdP2duQ0k{)P`4dZ$3R)CK5 z&?e{+p@z}=Xy3G(#+*w)MIs1@65BRJp}=`hg}e+ZMh+D|}xAd#tXZvHemc*Gaj&}>HNi9Om4 z(+h~ybB20>PVBUbl6(3?P^Us8nYbrS7f?HGvYs~ud|+0dg-+-o;Y3ggf+YujLW2gb z0x#AB>mI})-NCFtsRaWvh{)(H9>p5w^>JWQMAjXP8L9@O3n>g%yWrQ#6F#Y5$Y!%W z8x(-spTLXcY8(ZLWU3YNR4+m69wui;RIZm`Aev2c1W!j3XR{)*u_CP!Ad3ul7R1CJ5gj1 zc5Vz1p=Jb{0W20?YZ=K8AAxJh06_!-@Z(NKh&7Ug;qaS43L!8o2nOs+g>pXw0}=er zbwsy^w%``NWf&+Ufv5PB@gTSuTLgK))||*C1V_ek^D)e6=0zs~6aXtF*B$IRT972m z>M0rl3?kIPaV1gh^f2k}b|9uKnn0Yb@D-2NHB>aLj*|1vhFt1 zsFkt6v8T22Dz}b)1NMnIBR*g*VW20ixtH)Dxf!L>K^sI~kQ>P})b@ zFeHQ-+Co5-_t;&U6bkoDMBb1 zH~dCd&=g(Ad4?UfJ*dU%(?FwQMwpZ+9b@~c5u>W;NLIJFT0?d7O=Zf|Fto;1`ry0^ zPp~7PiVC?0FuDWk+0m!r(tv3lLQ$g$mltsdn39np)zak=64{@ z$eER;G1-1%b2Dv&C(*QrwJ(_l`_Ky-hhMBcP%tb8M+2Xrl}I&EF4#VT7vlz=Ko#yY z7Jk2khK?E%UH*kz$k#XR?Bu1snBmBT0V?%oh zo=I34`N=hIH%2L+{0W1!!>{R%TWlEYm=>8cXcpG)JOCpx4$G4KYC+8jFP;M@u8V|k zCh&FJi9pSFntsIOc*`eFs;|hTpbO3etf!>cjG7X9;DWEBTm@MLw?xO65h9z%bfK~G ztSE)IDM5!EI+uA2;O(%J^v+B{ofAY}#Y^IbYiSO&PV?;qRzR}V&SDUx8g&mx6(1KY zn8tt`K=nXx+>8mJ1^HIkN(a1>mtim)?o?l9F6ta5*KReELBnaSIf)@y7*A7V&M={D z5FOxA=(TOk3QxY1bQ|@0&I?FHvmu8l0awVu(qQahN0tpuDpkUam}d+S;YWxLa39*NfElEkZC*&6?8Hr3oEU^qJthtAH@GmpW6BAV!iN;Tpk?@)PpCyYS7^{M zej|=#D>^eK7#r z^}YJNTSLF*>n)_BSV#trI1pNt0KqVV;jkZgkG&U0HK@@|1PVeB0E=Y#GpSJVGi(Nk zAIAm@-S|Obc2PDDbKQuWzk@k20EPY8zhdy7ZXNWUgFA;$oj$X3 z_kN5Uf_0GuDWZhe7{=*LDyr9zkqa}|zx?7CF*78TbWl12$Ez7yJbrBO>g9>oU;Fv! z*sff*FO1keKC6%;{Uw&uzPC|X-&kGxZ~vP=$`KrrShulR#DNjAx+bB`$Ke-_{^-oB z3$02%l@o-+MA}c2Pyz{UAe1E5)1F%m4VF{_*=C zl%jSXk=NY9_srO1|*TKXgV0=p1p^wM-1OEzAv))#%^iK*|1c zho32iYa2232S^mA6QpB>-BE?4<{}gY@{_L!npC=A%<(P?Wv(lI3f7W{-7GuDHycSm zNO{r<48lj53L6E(w>4^i?hi9aR3H%&lX%!O)za2Om6C?$wCYDFDvpmri($fFcx)0^nNfwEH3 z1ofa<;@p9kZnq-O=LLuimUq`uwuYldfXPV*xoVSWA>+o%QNDu@6cwE2D#`#&CxIR= zLDEarLqa+-?pfQ$g#8qzGU411&w<|VhFzMH_B9Kpure9drcky%pdob2#ij|sM;|gV zu*n;h^6J9c;v8Kz6^yVB8zDTG*Deq37);~`T_?b7WI)DzC#;Qhi`wfeH|EdCsE_nw zffm2BC4-5gObqSx1|}hBfQJtVvOLfh#!j@nwy?0e`5G~#KUf6s*fRqIL#3e|PS#EF zs$D?}Y%)U7P_HfCn4h~wJU@!vAeqN-N!w^lP#hWCne6L#3A`D@O+#l=am{JfmTpYd zs^uuGL5jEym=jHM27Wfk<;TZ&CW4HC$M%7#t?CVc0bV|F*L@`)I2SwxKLbp2hCu4T z3~b(04>$OP2o+Nynj`B|RmRLw(jF3k>QRTZB*i6epc!yy8{>q-h6e1Fdwd$7336-J z7Z;WnXB&+wj!GC)%S?kr(>kT%;NZwEw>aRUb?`E={MwEWR&Wwqdi; z+li9Iwv%j&lj`pq%8gGXNX{SxU|M6rE6I_F8?!eS))(QmPQ;ug*AW!>jwG1IcOOY+ z`<+%+oWvuV8R9rmSC*_OSb5CbCwL3Z`~D zTAjVh07)+q@<4GiDlrXGB1r7owKtI-#an>A7@PGfHv!d!BtSJ_?`k;Ce_@>tI+y{% zo)}rkJ@}|Wu*FaDYF+lOZ%V`=TU0bIN6{z|OBtC*5ViJC%i0lpFw!_J-;sMfBnEzV9iYSbIXi6kjEK!suLW;3Nip}#abmI%_w zZ6+hU3kU?T8r6mAN_C^QvBE2-feD%(U4x`Z=ZoWGlgUC6I!q-qOogXEn%8wIuIz&dAfk-1ryU}Y5`L)x^u}exYDzc9nT(T9Uk^mu$ee4^|CU~5@ zEU!LNfLKl(kze#=J8Z}4wDvKix!xgUa+qQqAZGG7bk05W0*$xgW{4)GBlblOl?iF3 z{VX)qYViVeLeeoRv)YW)#L5XCDcLC>fKe9;h0B*OgBNQ}_IkYS1hnx7T%3s9Ph(?Y zdU<)8dm_c5c^ZyDjWmnJQvcXKCsQJz&nV6Kzz4vDQ{ql}Y3lkpg$g=kj}ty&SKtS6 zB@*M4JCmgm!ZQsL9SB}cmJDw#(cD;HoLgDv^_40Bi=t+m902aUm&@i0!{a-hAkX8x z<6g!iPXW(nrMA9-cx*+LC}g`ybT*923SWd)YHV!MDUQ3z06_2vv?Za6cx=>WX09TK zU8Q(KSJ4%4E)CZ|I8^K(adIVDAv^)2@~_2@K?_7eaT76O3qwQF5++CzsG1X+y6xa; z#1(o4*;sFEBp^F4K&fKn2%7Y(u$7Csgr4PD6qBf3RG~+8WpfN-99za{I^sjr%CDO@ z=;m(sMtu-5YcB#D1XErHBNL9;wQIsD4SFu&J^+eL3Q<5(RgLwfnQQebX>4>yt&PtQk$Qoj z%3(x}?cM7n0us0>R~Z6L3LC&}tyqXvBD}mG1Y!leARS$u!hf4t7`-`Rg_-;faW(1&Qc=tZQBT%IsLjS~kb@LWr z8t_Yp02xNiuBebRf|$@mrLvLE`8*EXJRegMZ=!*|3>#6``?;lK2&p?3x+7a-Nd-gB znil#B#sw5Z+7bXZxPxGcoXYyr^p$FDL(%-Ulky3tC|;fMgMpFpG$xtxb%=l=rfGtF zzn-eC)mBz1z=^OKWlKoBcL>)a4YS#ullu~BWIH+P0I{VpD|Xg!HrD6nXPAqz?dcuj z)Z|lMFgQ4z9o>;i<_SHuWl)EC{0s0NRx2~tmuoA8IS^~O#aK=-U?NQv$Q_+HOtjHY z72!czl6@RwqO_cimAQo*iyKQA6d7gwkT}@pX*AsM;81Raavt;$Dp*-4CdHq|VLr{j zo47S(0rQ}*Imp)VMvA18J>6UNj{@Ap>4>}dVF!7xm|Kt{dxrpMGDxMr@oO3~58(s^ zJ7|Kt#Ed)*cFe&E9q|c#>XvfZUafr$;1%|}=dPcnx8jh+fGjnSjWs!MQM`RFC3-R0BWFem)*u9TtV3w>W zbA?C13Eedprxw=dYV~TmlcvUpkRp~|Givnp^^FV+CHlrZ7czmP4KOJRL1I_d=4P%} zniY&;+!EY9uq!%GxEPtm;a!fGCPRscl8htmB$|lAwYll(>*QMjBf)$xnU3-7J5H`I zKQuI$?k`e6fDT90TMS~1s=nzi2Mt>(3LExd%e?|f+@gD?7^1cP0J{%%N8$+X%!I>W z0NC+|&fKWg0(=J}PK6Xn6e}lbOskGyTZ7b1YO+JpkGBkN6YX3yccS~K1BhCsQo3FX z)6i-w!Vq7tRRTYU1H+J*g&4dZqKjp~tDK8EELw)-_yqwT`F;WICeo6zNy{i!!(|Kr zkOBFC%mjLg(2c+)XP}GcYwY+9zM)Vr?<=z zXjpP(oX|dkHkS#)I`MbU_q|55mWEW~W=4S(YzB=@W4il6J3achchQA8VKQBVk26Cr zKqCp`ZFC&r)5&S;GiAI@r z`27zys+EPSSL#t%O#7=8X}RRUgVk~^>1T)(xBcL;&pquB7(^OKAeb3!88K}hyKkJb z<@vd(q@T^?h@^!CDCivtOmrsz4pIHFM;tD25)=Cw|1$@ATvWZ?Qb724f^v8evzx~huOTApf zmk8^%Ank25D*0k*ej{>=ne_0`W6wSF6%3w9$pLcAv92rnzg8ZIIDsMTp>#lsYFmLRclS0cJwrLn9xYQ5ob_fZL%X$ zNCaldY$j8FvmWLkCcH?alNES$oROiS`C2U^wdtiBVIzg*(dzht-)vB@jfvdes5jDn zu2ya2(tYStbR23mMZi-kmNr_1i5vKu^h60WFVdt@6rgZ`Wk(hsxYT{mpRd(RgQVqg z6!n|X!P@Y%+}}BoZMW-mC5wQK)K((nU(hf*h{| zCF?UBoQGI1qy!#6d>eSlxOYi<{aqe1w zDMyGQ-XMH~f=LkHbiHt;Hg)Rck3aLdFBB#Y0aCKFq7qywP`@<&)~kP0=+BFnaS2>G zF-9t3%*6}5kLvC4@vr`OTuDSB1x_j-Yu7(|D;rNNKdMth5WV!l zTVHzd>xtYXG0hC5hXCnRXV1KI0GvoT@NJ(0ZX4p{zfRrxLf|OgW*Rq)m5=jh3Y>Mduu!o_NxUb3v*oW)q!re~4f^G|(j3QrtYNK4q1j+SE`O=54JoxaFefu7wCbx|>&q|(7wXt~l z?Kl5spokR6npcQG#>fG=b?fU3b64sEgTwni^Aw?DJa!3~7MAGx<#&I!y1Wdy(uChg zVNlFdrJr67fE8>h0-VC&auwuL1;^(vYgM~EdVHS!jzJoCi;OPeSX zLIcWSWlw{L_n%)#{ChB2qDTtB~wpMWxa8OuG`Ygz4_Ynzw>n`e+bAUNPv;Fc2u9f@WHjqANH5PNQ$lkugSRI-OU3^y>Jo z0|y`fVp@g9{83wutlN0+)$fsHOcLtjT^zD5lxZM#(})&VW|}A8{NamVhfPS`=O{9l zY;3Pwc;|1{%GE5ag~S9)+)wI*a3eKQ;l_p2ubg<~`QqN=$tbNn1acVJ-&}g{l^?c~ zaVATZJxiLXcvP?teb;D)v$L1hD~&IG;|~aEs)mkW6WHlWeF4&(i^v!opTLtIt==ID z&p8*j9^3%Mc-S;aGChrSI@^q+xK2b8f1JiT4ayu)LJ*kD4T1wmG!eWG(bdfc>4h#7 zpJK_2F_Qr|><#5fvvWX!ToRi>*wlTroM!pwul$JGw0t2;EQh!$%}Vx zH(&nzZ~uXl&XMU?LX6%h@^YIC=ij?>@k};{OrSz0--sGPHdBi*)x3>z?d7vS-M#<# z?gyVwBnt#*m7{D`op}A^Yu|U [synsets-file] - -Where is a directory containing the downloaded and unpacked bounding box -data. If [synsets-file] is supplied, then only the bounding boxes whose -synstes are contained within this file are returned. Note that the -[synsets-file] file contains synset ids, one per line. - -The script dumps out a CSV text file in which each line contains an entry. - n00007846_64193.JPEG,0.0060,0.2620,0.7545,0.9940 - -The entry can be read as: - , , , , - -The bounding box for contains two points (xmin, ymin) and -(xmax, ymax) specifying the lower-left corner and upper-right corner of a -bounding box in *relative* coordinates. - -The user supplies a directory where the XML files reside. The directory -structure in the directory is assumed to look like this: - -/nXXXXXXXX/nXXXXXXXX_YYYY.xml - -Each XML file contains a bounding box annotation. The script: - - (1) Parses the XML file and extracts the filename, label and bounding box info. - - (2) The bounding box is specified in the XML files as integer (xmin, ymin) and - (xmax, ymax) *relative* to image size displayed to the human annotator. The - size of the image displayed to the human annotator is stored in the XML file - as integer (height, width). - - Note that the displayed size will differ from the actual size of the image - downloaded from image-net.org. To make the bounding box annotation useable, - we convert bounding box to floating point numbers relative to displayed - height and width of the image. - - Note that each XML file might contain N bounding box annotations. - - Note that the points are all clamped at a range of [0.0, 1.0] because some - human annotations extend outside the range of the supplied image. - - See details here: http://image-net.org/download-bboxes - -(3) By default, the script outputs all valid bounding boxes. If a - [synsets-file] is supplied, only the subset of bounding boxes associated - with those synsets are outputted. Importantly, one can supply a list of - synsets in the ImageNet Challenge and output the list of bounding boxes - associated with the training images of the ILSVRC. - - We use these bounding boxes to inform the random distortion of images - supplied to the network. - -If you run this script successfully, you will see the following output -to stderr: -> Finished processing 544546 XML files. -> Skipped 0 XML files not in ImageNet Challenge. -> Skipped 0 bounding boxes not in ImageNet Challenge. -> Wrote 615299 bounding boxes from 544546 annotated images. -""" - -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import glob -import os.path -import sys -import xml.etree.ElementTree as ET - - -class BoundingBox(object): - pass - - -def GetItem(name, root, index=0): - count = 0 - for item in root.iter(name): - if count == index: - return item.text - count += 1 - # Failed to find "index" occurrence of item. - return -1 - - -def GetInt(name, root, index=0): - # In some XML annotation files, the point values are not integers, but floats. - # So we add a float function to avoid ValueError. - return int(float(GetItem(name, root, index))) - - -def FindNumberBoundingBoxes(root): - index = 0 - while True: - if GetInt('xmin', root, index) == -1: - break - index += 1 - return index - - -def ProcessXMLAnnotation(xml_file): - """Process a single XML file containing a bounding box.""" - # pylint: disable=broad-except - try: - tree = ET.parse(xml_file) - except Exception: - print('Failed to parse: ' + xml_file, file=sys.stderr) - return None - # pylint: enable=broad-except - root = tree.getroot() - - num_boxes = FindNumberBoundingBoxes(root) - boxes = [] - - for index in range(num_boxes): - box = BoundingBox() - # Grab the 'index' annotation. - box.xmin = GetInt('xmin', root, index) - box.ymin = GetInt('ymin', root, index) - box.xmax = GetInt('xmax', root, index) - box.ymax = GetInt('ymax', root, index) - - box.width = GetInt('width', root) - box.height = GetInt('height', root) - box.filename = GetItem('filename', root) + '.JPEG' - box.label = GetItem('name', root) - - xmin = float(box.xmin) / float(box.width) - xmax = float(box.xmax) / float(box.width) - ymin = float(box.ymin) / float(box.height) - ymax = float(box.ymax) / float(box.height) - - # Some images contain bounding box annotations that - # extend outside of the supplied image. See, e.g. - # n03127925/n03127925_147.xml - # Additionally, for some bounding boxes, the min > max - # or the box is entirely outside of the image. - min_x = min(xmin, xmax) - max_x = max(xmin, xmax) - box.xmin_scaled = min(max(min_x, 0.0), 1.0) - box.xmax_scaled = min(max(max_x, 0.0), 1.0) - - min_y = min(ymin, ymax) - max_y = max(ymin, ymax) - box.ymin_scaled = min(max(min_y, 0.0), 1.0) - box.ymax_scaled = min(max(max_y, 0.0), 1.0) - - boxes.append(box) - - return boxes - -if __name__ == '__main__': - if len(sys.argv) < 2 or len(sys.argv) > 3: - print('Invalid usage\n' - 'usage: process_bounding_boxes.py [synsets-file]', - file=sys.stderr) - sys.exit(-1) - - xml_files = glob.glob(sys.argv[1] + '/*/*.xml') - print('Identified %d XML files in %s' % (len(xml_files), sys.argv[1]), - file=sys.stderr) - - if len(sys.argv) == 3: - labels = set([l.strip() for l in open(sys.argv[2]).readlines()]) - print('Identified %d synset IDs in %s' % (len(labels), sys.argv[2]), - file=sys.stderr) - else: - labels = None - - skipped_boxes = 0 - skipped_files = 0 - saved_boxes = 0 - saved_files = 0 - for file_index, one_file in enumerate(xml_files): - # Example: <...>/n06470073/n00141669_6790.xml - label = os.path.basename(os.path.dirname(one_file)) - - # Determine if the annotation is from an ImageNet Challenge label. - if labels is not None and label not in labels: - skipped_files += 1 - continue - - bboxes = ProcessXMLAnnotation(one_file) - assert bboxes is not None, 'No bounding boxes found in ' + one_file - - found_box = False - for bbox in bboxes: - if labels is not None: - if bbox.label != label: - # Note: There is a slight bug in the bounding box annotation data. - # Many of the dog labels have the human label 'Scottish_deerhound' - # instead of the synset ID 'n02092002' in the bbox.label field. As a - # simple hack to overcome this issue, we only exclude bbox labels - # *which are synset ID's* that do not match original synset label for - # the XML file. - if bbox.label in labels: - skipped_boxes += 1 - continue - - # Guard against improperly specified boxes. - if (bbox.xmin_scaled >= bbox.xmax_scaled or - bbox.ymin_scaled >= bbox.ymax_scaled): - skipped_boxes += 1 - continue - - # Note bbox.filename occasionally contains '%s' in the name. This is - # data set noise that is fixed by just using the basename of the XML file. - image_filename = os.path.splitext(os.path.basename(one_file))[0] - print('%s.JPEG,%.4f,%.4f,%.4f,%.4f' % - (image_filename, - bbox.xmin_scaled, bbox.ymin_scaled, - bbox.xmax_scaled, bbox.ymax_scaled)) - - saved_boxes += 1 - found_box = True - if found_box: - saved_files += 1 - else: - skipped_files += 1 - - if not file_index % 5000: - print('--> processed %d of %d XML files.' % - (file_index + 1, len(xml_files)), - file=sys.stderr) - print('--> skipped %d boxes and %d XML files.' % - (skipped_boxes, skipped_files), file=sys.stderr) - - print('Finished processing %d XML files.' % len(xml_files), file=sys.stderr) - print('Skipped %d XML files not in ImageNet Challenge.' % skipped_files, - file=sys.stderr) - print('Skipped %d bounding boxes not in ImageNet Challenge.' % skipped_boxes, - file=sys.stderr) - print('Wrote %d bounding boxes from %d annotated images.' % - (saved_boxes, saved_files), - file=sys.stderr) - print('Finished.', file=sys.stderr) diff --git a/research/inception/inception/dataset.py b/research/inception/inception/dataset.py deleted file mode 100644 index 752c97e03..000000000 --- a/research/inception/inception/dataset.py +++ /dev/null @@ -1,103 +0,0 @@ -# Copyright 2016 Google Inc. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""Small library that points to a data set. - -Methods of Data class: - data_files: Returns a python list of all (sharded) data set files. - num_examples_per_epoch: Returns the number of examples in the data set. - num_classes: Returns the number of classes in the data set. - reader: Return a reader for a single entry from the data set. -""" -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -from abc import ABCMeta -from abc import abstractmethod -import os - - -import tensorflow as tf - -FLAGS = tf.app.flags.FLAGS - -# Basic model parameters. -tf.app.flags.DEFINE_string('data_dir', '/tmp/mydata', - """Path to the processed data, i.e. """ - """TFRecord of Example protos.""") - - -class Dataset(object): - """A simple class for handling data sets.""" - __metaclass__ = ABCMeta - - def __init__(self, name, subset): - """Initialize dataset using a subset and the path to the data.""" - assert subset in self.available_subsets(), self.available_subsets() - self.name = name - self.subset = subset - - @abstractmethod - def num_classes(self): - """Returns the number of classes in the data set.""" - pass - # return 10 - - @abstractmethod - def num_examples_per_epoch(self): - """Returns the number of examples in the data subset.""" - pass - # if self.subset == 'train': - # return 10000 - # if self.subset == 'validation': - # return 1000 - - @abstractmethod - def download_message(self): - """Prints a download message for the Dataset.""" - pass - - def available_subsets(self): - """Returns the list of available subsets.""" - return ['train', 'validation'] - - def data_files(self): - """Returns a python list of all (sharded) data subset files. - - Returns: - python list of all (sharded) data set files. - Raises: - ValueError: if there are not data_files matching the subset. - """ - tf_record_pattern = os.path.join(FLAGS.data_dir, '%s-*' % self.subset) - data_files = tf.gfile.Glob(tf_record_pattern) - if not data_files: - print('No files found for dataset %s/%s at %s' % (self.name, - self.subset, - FLAGS.data_dir)) - - self.download_message() - exit(-1) - return data_files - - def reader(self): - """Return a reader for a single entry from the data set. - - See io_ops.py for details of Reader class. - - Returns: - Reader object that reads the data set. - """ - return tf.TFRecordReader() diff --git a/research/inception/inception/flowers_data.py b/research/inception/inception/flowers_data.py deleted file mode 100644 index 022b5234d..000000000 --- a/research/inception/inception/flowers_data.py +++ /dev/null @@ -1,52 +0,0 @@ -# Copyright 2016 Google Inc. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""Small library that points to the flowers data set. -""" -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - - - -from inception.dataset import Dataset - - -class FlowersData(Dataset): - """Flowers data set.""" - - def __init__(self, subset): - super(FlowersData, self).__init__('Flowers', subset) - - def num_classes(self): - """Returns the number of classes in the data set.""" - return 5 - - def num_examples_per_epoch(self): - """Returns the number of examples in the data subset.""" - if self.subset == 'train': - return 3170 - if self.subset == 'validation': - return 500 - - def download_message(self): - """Instruction to download and extract the tarball from Flowers website.""" - - print('Failed to find any Flowers %s files'% self.subset) - print('') - print('If you have already downloaded and processed the data, then make ' - 'sure to set --data_dir to point to the directory containing the ' - 'location of the sharded TFRecords.\n') - print('Please see README.md for instructions on how to build ' - 'the flowers dataset using download_and_preprocess_flowers.\n') diff --git a/research/inception/inception/flowers_eval.py b/research/inception/inception/flowers_eval.py deleted file mode 100644 index ae3e9dc14..000000000 --- a/research/inception/inception/flowers_eval.py +++ /dev/null @@ -1,40 +0,0 @@ -# Copyright 2016 Google Inc. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""A binary to evaluate Inception on the flowers data set. -""" -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - - -import tensorflow as tf - -from inception import inception_eval -from inception.flowers_data import FlowersData - -FLAGS = tf.app.flags.FLAGS - - -def main(unused_argv=None): - dataset = FlowersData(subset=FLAGS.subset) - assert dataset.data_files() - if tf.gfile.Exists(FLAGS.eval_dir): - tf.gfile.DeleteRecursively(FLAGS.eval_dir) - tf.gfile.MakeDirs(FLAGS.eval_dir) - inception_eval.evaluate(dataset) - - -if __name__ == '__main__': - tf.app.run() diff --git a/research/inception/inception/flowers_train.py b/research/inception/inception/flowers_train.py deleted file mode 100644 index 1f044a539..000000000 --- a/research/inception/inception/flowers_train.py +++ /dev/null @@ -1,41 +0,0 @@ -# Copyright 2016 Google Inc. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""A binary to train Inception on the flowers data set. -""" -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - - - -import tensorflow as tf - -from inception import inception_train -from inception.flowers_data import FlowersData - -FLAGS = tf.app.flags.FLAGS - - -def main(_): - dataset = FlowersData(subset=FLAGS.subset) - assert dataset.data_files() - if tf.gfile.Exists(FLAGS.train_dir): - tf.gfile.DeleteRecursively(FLAGS.train_dir) - tf.gfile.MakeDirs(FLAGS.train_dir) - inception_train.train(dataset) - - -if __name__ == '__main__': - tf.app.run() diff --git a/research/inception/inception/image_processing.py b/research/inception/inception/image_processing.py deleted file mode 100644 index fe74f1b3c..000000000 --- a/research/inception/inception/image_processing.py +++ /dev/null @@ -1,513 +0,0 @@ -# Copyright 2016 Google Inc. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""Read and preprocess image data. - - Image processing occurs on a single image at a time. Image are read and - preprocessed in parallel across multiple threads. The resulting images - are concatenated together to form a single batch for training or evaluation. - - -- Provide processed image data for a network: - inputs: Construct batches of evaluation examples of images. - distorted_inputs: Construct batches of training examples of images. - batch_inputs: Construct batches of training or evaluation examples of images. - - -- Data processing: - parse_example_proto: Parses an Example proto containing a training example - of an image. - - -- Image decoding: - decode_jpeg: Decode a JPEG encoded string into a 3-D float32 Tensor. - - -- Image preprocessing: - image_preprocessing: Decode and preprocess one image for evaluation or training - distort_image: Distort one image for training a network. - eval_image: Prepare one image for evaluation. - distort_color: Distort the color in one image for training. -""" -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import tensorflow as tf - -FLAGS = tf.app.flags.FLAGS - -tf.app.flags.DEFINE_integer('batch_size', 32, - """Number of images to process in a batch.""") -tf.app.flags.DEFINE_integer('image_size', 299, - """Provide square images of this size.""") -tf.app.flags.DEFINE_integer('num_preprocess_threads', 4, - """Number of preprocessing threads per tower. """ - """Please make this a multiple of 4.""") -tf.app.flags.DEFINE_integer('num_readers', 4, - """Number of parallel readers during train.""") - -# Images are preprocessed asynchronously using multiple threads specified by -# --num_preprocss_threads and the resulting processed images are stored in a -# random shuffling queue. The shuffling queue dequeues --batch_size images -# for processing on a given Inception tower. A larger shuffling queue guarantees -# better mixing across examples within a batch and results in slightly higher -# predictive performance in a trained model. Empirically, -# --input_queue_memory_factor=16 works well. A value of 16 implies a queue size -# of 1024*16 images. Assuming RGB 299x299 images, this implies a queue size of -# 16GB. If the machine is memory limited, then decrease this factor to -# decrease the CPU memory footprint, accordingly. -tf.app.flags.DEFINE_integer('input_queue_memory_factor', 16, - """Size of the queue of preprocessed images. """ - """Default is ideal but try smaller values, e.g. """ - """4, 2 or 1, if host memory is constrained. See """ - """comments in code for more details.""") - - -def inputs(dataset, batch_size=None, num_preprocess_threads=None): - """Generate batches of ImageNet images for evaluation. - - Use this function as the inputs for evaluating a network. - - Note that some (minimal) image preprocessing occurs during evaluation - including central cropping and resizing of the image to fit the network. - - Args: - dataset: instance of Dataset class specifying the dataset. - batch_size: integer, number of examples in batch - num_preprocess_threads: integer, total number of preprocessing threads but - None defaults to FLAGS.num_preprocess_threads. - - Returns: - images: Images. 4D tensor of size [batch_size, FLAGS.image_size, - image_size, 3]. - labels: 1-D integer Tensor of [FLAGS.batch_size]. - """ - if not batch_size: - batch_size = FLAGS.batch_size - - # Force all input processing onto CPU in order to reserve the GPU for - # the forward inference and back-propagation. - with tf.device('/cpu:0'): - images, labels = batch_inputs( - dataset, batch_size, train=False, - num_preprocess_threads=num_preprocess_threads, - num_readers=1) - - return images, labels - - -def distorted_inputs(dataset, batch_size=None, num_preprocess_threads=None): - """Generate batches of distorted versions of ImageNet images. - - Use this function as the inputs for training a network. - - Distorting images provides a useful technique for augmenting the data - set during training in order to make the network invariant to aspects - of the image that do not effect the label. - - Args: - dataset: instance of Dataset class specifying the dataset. - batch_size: integer, number of examples in batch - num_preprocess_threads: integer, total number of preprocessing threads but - None defaults to FLAGS.num_preprocess_threads. - - Returns: - images: Images. 4D tensor of size [batch_size, FLAGS.image_size, - FLAGS.image_size, 3]. - labels: 1-D integer Tensor of [batch_size]. - """ - if not batch_size: - batch_size = FLAGS.batch_size - - # Force all input processing onto CPU in order to reserve the GPU for - # the forward inference and back-propagation. - with tf.device('/cpu:0'): - images, labels = batch_inputs( - dataset, batch_size, train=True, - num_preprocess_threads=num_preprocess_threads, - num_readers=FLAGS.num_readers) - return images, labels - - -def decode_jpeg(image_buffer, scope=None): - """Decode a JPEG string into one 3-D float image Tensor. - - Args: - image_buffer: scalar string Tensor. - scope: Optional scope for name_scope. - Returns: - 3-D float Tensor with values ranging from [0, 1). - """ - with tf.name_scope(values=[image_buffer], name=scope, - default_name='decode_jpeg'): - # Decode the string as an RGB JPEG. - # Note that the resulting image contains an unknown height and width - # that is set dynamically by decode_jpeg. In other words, the height - # and width of image is unknown at compile-time. - image = tf.image.decode_jpeg(image_buffer, channels=3) - - # After this point, all image pixels reside in [0,1) - # until the very end, when they're rescaled to (-1, 1). The various - # adjust_* ops all require this range for dtype float. - image = tf.image.convert_image_dtype(image, dtype=tf.float32) - return image - - -def distort_color(image, thread_id=0, scope=None): - """Distort the color of the image. - - Each color distortion is non-commutative and thus ordering of the color ops - matters. Ideally we would randomly permute the ordering of the color ops. - Rather than adding that level of complication, we select a distinct ordering - of color ops for each preprocessing thread. - - Args: - image: Tensor containing single image. - thread_id: preprocessing thread ID. - scope: Optional scope for name_scope. - Returns: - color-distorted image - """ - with tf.name_scope(values=[image], name=scope, default_name='distort_color'): - color_ordering = thread_id % 2 - - if color_ordering == 0: - image = tf.image.random_brightness(image, max_delta=32. / 255.) - image = tf.image.random_saturation(image, lower=0.5, upper=1.5) - image = tf.image.random_hue(image, max_delta=0.2) - image = tf.image.random_contrast(image, lower=0.5, upper=1.5) - elif color_ordering == 1: - image = tf.image.random_brightness(image, max_delta=32. / 255.) - image = tf.image.random_contrast(image, lower=0.5, upper=1.5) - image = tf.image.random_saturation(image, lower=0.5, upper=1.5) - image = tf.image.random_hue(image, max_delta=0.2) - - # The random_* ops do not necessarily clamp. - image = tf.clip_by_value(image, 0.0, 1.0) - return image - - -def distort_image(image, height, width, bbox, thread_id=0, scope=None): - """Distort one image for training a network. - - Distorting images provides a useful technique for augmenting the data - set during training in order to make the network invariant to aspects - of the image that do not effect the label. - - Args: - image: 3-D float Tensor of image - height: integer - width: integer - bbox: 3-D float Tensor of bounding boxes arranged [1, num_boxes, coords] - where each coordinate is [0, 1) and the coordinates are arranged - as [ymin, xmin, ymax, xmax]. - thread_id: integer indicating the preprocessing thread. - scope: Optional scope for name_scope. - Returns: - 3-D float Tensor of distorted image used for training. - """ - with tf.name_scope(values=[image, height, width, bbox], name=scope, - default_name='distort_image'): - # Each bounding box has shape [1, num_boxes, box coords] and - # the coordinates are ordered [ymin, xmin, ymax, xmax]. - - # Display the bounding box in the first thread only. - if not thread_id: - image_with_box = tf.image.draw_bounding_boxes(tf.expand_dims(image, 0), - bbox) - tf.summary.image('image_with_bounding_boxes', image_with_box) - - # A large fraction of image datasets contain a human-annotated bounding - # box delineating the region of the image containing the object of interest. - # We choose to create a new bounding box for the object which is a randomly - # distorted version of the human-annotated bounding box that obeys an allowed - # range of aspect ratios, sizes and overlap with the human-annotated - # bounding box. If no box is supplied, then we assume the bounding box is - # the entire image. - sample_distorted_bounding_box = tf.image.sample_distorted_bounding_box( - tf.shape(image), - bounding_boxes=bbox, - min_object_covered=0.1, - aspect_ratio_range=[0.75, 1.33], - area_range=[0.05, 1.0], - max_attempts=100, - use_image_if_no_bounding_boxes=True) - bbox_begin, bbox_size, distort_bbox = sample_distorted_bounding_box - if not thread_id: - image_with_distorted_box = tf.image.draw_bounding_boxes( - tf.expand_dims(image, 0), distort_bbox) - tf.summary.image('images_with_distorted_bounding_box', - image_with_distorted_box) - - # Crop the image to the specified bounding box. - distorted_image = tf.slice(image, bbox_begin, bbox_size) - - # This resizing operation may distort the images because the aspect - # ratio is not respected. We select a resize method in a round robin - # fashion based on the thread number. - # Note that ResizeMethod contains 4 enumerated resizing methods. - resize_method = thread_id % 4 - distorted_image = tf.image.resize_images(distorted_image, [height, width], - method=resize_method) - # Restore the shape since the dynamic slice based upon the bbox_size loses - # the third dimension. - distorted_image.set_shape([height, width, 3]) - if not thread_id: - tf.summary.image('cropped_resized_image', - tf.expand_dims(distorted_image, 0)) - - # Randomly flip the image horizontally. - distorted_image = tf.image.random_flip_left_right(distorted_image) - - # Randomly distort the colors. - distorted_image = distort_color(distorted_image, thread_id) - - if not thread_id: - tf.summary.image('final_distorted_image', - tf.expand_dims(distorted_image, 0)) - return distorted_image - - -def eval_image(image, height, width, scope=None): - """Prepare one image for evaluation. - - Args: - image: 3-D float Tensor - height: integer - width: integer - scope: Optional scope for name_scope. - Returns: - 3-D float Tensor of prepared image. - """ - with tf.name_scope(values=[image, height, width], name=scope, - default_name='eval_image'): - # Crop the central region of the image with an area containing 87.5% of - # the original image. - image = tf.image.central_crop(image, central_fraction=0.875) - - # Resize the image to the original height and width. - image = tf.expand_dims(image, 0) - image = tf.image.resize_bilinear(image, [height, width], - align_corners=False) - image = tf.squeeze(image, [0]) - return image - - -def image_preprocessing(image_buffer, bbox, train, thread_id=0): - """Decode and preprocess one image for evaluation or training. - - Args: - image_buffer: JPEG encoded string Tensor - bbox: 3-D float Tensor of bounding boxes arranged [1, num_boxes, coords] - where each coordinate is [0, 1) and the coordinates are arranged as - [ymin, xmin, ymax, xmax]. - train: boolean - thread_id: integer indicating preprocessing thread - - Returns: - 3-D float Tensor containing an appropriately scaled image - - Raises: - ValueError: if user does not provide bounding box - """ - if bbox is None: - raise ValueError('Please supply a bounding box.') - - image = decode_jpeg(image_buffer) - height = FLAGS.image_size - width = FLAGS.image_size - - if train: - image = distort_image(image, height, width, bbox, thread_id) - else: - image = eval_image(image, height, width) - - # Finally, rescale to [-1,1] instead of [0, 1) - image = tf.subtract(image, 0.5) - image = tf.multiply(image, 2.0) - return image - - -def parse_example_proto(example_serialized): - """Parses an Example proto containing a training example of an image. - - The output of the build_image_data.py image preprocessing script is a dataset - containing serialized Example protocol buffers. Each Example proto contains - the following fields: - - image/height: 462 - image/width: 581 - image/colorspace: 'RGB' - image/channels: 3 - image/class/label: 615 - image/class/synset: 'n03623198' - image/class/text: 'knee pad' - image/object/bbox/xmin: 0.1 - image/object/bbox/xmax: 0.9 - image/object/bbox/ymin: 0.2 - image/object/bbox/ymax: 0.6 - image/object/bbox/label: 615 - image/format: 'JPEG' - image/filename: 'ILSVRC2012_val_00041207.JPEG' - image/encoded: - - Args: - example_serialized: scalar Tensor tf.string containing a serialized - Example protocol buffer. - - Returns: - image_buffer: Tensor tf.string containing the contents of a JPEG file. - label: Tensor tf.int32 containing the label. - bbox: 3-D float Tensor of bounding boxes arranged [1, num_boxes, coords] - where each coordinate is [0, 1) and the coordinates are arranged as - [ymin, xmin, ymax, xmax]. - text: Tensor tf.string containing the human-readable label. - """ - # Dense features in Example proto. - feature_map = { - 'image/encoded': tf.FixedLenFeature([], dtype=tf.string, - default_value=''), - 'image/class/label': tf.FixedLenFeature([1], dtype=tf.int64, - default_value=-1), - 'image/class/text': tf.FixedLenFeature([], dtype=tf.string, - default_value=''), - } - sparse_float32 = tf.VarLenFeature(dtype=tf.float32) - # Sparse features in Example proto. - feature_map.update( - {k: sparse_float32 for k in ['image/object/bbox/xmin', - 'image/object/bbox/ymin', - 'image/object/bbox/xmax', - 'image/object/bbox/ymax']}) - - features = tf.parse_single_example(example_serialized, feature_map) - label = tf.cast(features['image/class/label'], dtype=tf.int32) - - xmin = tf.expand_dims(features['image/object/bbox/xmin'].values, 0) - ymin = tf.expand_dims(features['image/object/bbox/ymin'].values, 0) - xmax = tf.expand_dims(features['image/object/bbox/xmax'].values, 0) - ymax = tf.expand_dims(features['image/object/bbox/ymax'].values, 0) - - # Note that we impose an ordering of (y, x) just to make life difficult. - bbox = tf.concat(axis=0, values=[ymin, xmin, ymax, xmax]) - - # Force the variable number of bounding boxes into the shape - # [1, num_boxes, coords]. - bbox = tf.expand_dims(bbox, 0) - bbox = tf.transpose(bbox, [0, 2, 1]) - - return features['image/encoded'], label, bbox, features['image/class/text'] - - -def batch_inputs(dataset, batch_size, train, num_preprocess_threads=None, - num_readers=1): - """Contruct batches of training or evaluation examples from the image dataset. - - Args: - dataset: instance of Dataset class specifying the dataset. - See dataset.py for details. - batch_size: integer - train: boolean - num_preprocess_threads: integer, total number of preprocessing threads - num_readers: integer, number of parallel readers - - Returns: - images: 4-D float Tensor of a batch of images - labels: 1-D integer Tensor of [batch_size]. - - Raises: - ValueError: if data is not found - """ - with tf.name_scope('batch_processing'): - data_files = dataset.data_files() - if data_files is None: - raise ValueError('No data files found for this dataset') - - # Create filename_queue - if train: - filename_queue = tf.train.string_input_producer(data_files, - shuffle=True, - capacity=16) - else: - filename_queue = tf.train.string_input_producer(data_files, - shuffle=False, - capacity=1) - if num_preprocess_threads is None: - num_preprocess_threads = FLAGS.num_preprocess_threads - - if num_preprocess_threads % 4: - raise ValueError('Please make num_preprocess_threads a multiple ' - 'of 4 (%d % 4 != 0).', num_preprocess_threads) - - if num_readers is None: - num_readers = FLAGS.num_readers - - if num_readers < 1: - raise ValueError('Please make num_readers at least 1') - - # Approximate number of examples per shard. - examples_per_shard = 1024 - # Size the random shuffle queue to balance between good global - # mixing (more examples) and memory use (fewer examples). - # 1 image uses 299*299*3*4 bytes = 1MB - # The default input_queue_memory_factor is 16 implying a shuffling queue - # size: examples_per_shard * 16 * 1MB = 17.6GB - min_queue_examples = examples_per_shard * FLAGS.input_queue_memory_factor - if train: - examples_queue = tf.RandomShuffleQueue( - capacity=min_queue_examples + 3 * batch_size, - min_after_dequeue=min_queue_examples, - dtypes=[tf.string]) - else: - examples_queue = tf.FIFOQueue( - capacity=examples_per_shard + 3 * batch_size, - dtypes=[tf.string]) - - # Create multiple readers to populate the queue of examples. - if num_readers > 1: - enqueue_ops = [] - for _ in range(num_readers): - reader = dataset.reader() - _, value = reader.read(filename_queue) - enqueue_ops.append(examples_queue.enqueue([value])) - - tf.train.queue_runner.add_queue_runner( - tf.train.queue_runner.QueueRunner(examples_queue, enqueue_ops)) - example_serialized = examples_queue.dequeue() - else: - reader = dataset.reader() - _, example_serialized = reader.read(filename_queue) - - images_and_labels = [] - for thread_id in range(num_preprocess_threads): - # Parse a serialized Example proto to extract the image and metadata. - image_buffer, label_index, bbox, _ = parse_example_proto( - example_serialized) - image = image_preprocessing(image_buffer, bbox, train, thread_id) - images_and_labels.append([image, label_index]) - - images, label_index_batch = tf.train.batch_join( - images_and_labels, - batch_size=batch_size, - capacity=2 * num_preprocess_threads * batch_size) - - # Reshape images into these desired dimensions. - height = FLAGS.image_size - width = FLAGS.image_size - depth = 3 - - images = tf.cast(images, tf.float32) - images = tf.reshape(images, shape=[batch_size, height, width, depth]) - - # Display the training images in the visualizer. - tf.summary.image('images', images) - - return images, tf.reshape(label_index_batch, [batch_size]) diff --git a/research/inception/inception/imagenet_data.py b/research/inception/inception/imagenet_data.py deleted file mode 100644 index 0a6d22e12..000000000 --- a/research/inception/inception/imagenet_data.py +++ /dev/null @@ -1,59 +0,0 @@ -# Copyright 2016 Google Inc. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""Small library that points to the ImageNet data set. -""" -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - - - -from inception.dataset import Dataset - - -class ImagenetData(Dataset): - """ImageNet data set.""" - - def __init__(self, subset): - super(ImagenetData, self).__init__('ImageNet', subset) - - def num_classes(self): - """Returns the number of classes in the data set.""" - return 1000 - - def num_examples_per_epoch(self): - """Returns the number of examples in the data set.""" - # Bounding box data consists of 615299 bounding boxes for 544546 images. - if self.subset == 'train': - return 1281167 - if self.subset == 'validation': - return 50000 - - def download_message(self): - """Instruction to download and extract the tarball from Flowers website.""" - - print('Failed to find any ImageNet %s files'% self.subset) - print('') - print('If you have already downloaded and processed the data, then make ' - 'sure to set --data_dir to point to the directory containing the ' - 'location of the sharded TFRecords.\n') - print('If you have not downloaded and prepared the ImageNet data in the ' - 'TFRecord format, you will need to do this at least once. This ' - 'process could take several hours depending on the speed of your ' - 'computer and network connection\n') - print('Please see README.md for instructions on how to build ' - 'the ImageNet dataset using download_and_preprocess_imagenet.\n') - print('Note that the raw data size is 300 GB and the processed data size ' - 'is 150 GB. Please ensure you have at least 500GB disk space.') diff --git a/research/inception/inception/imagenet_distributed_train.py b/research/inception/inception/imagenet_distributed_train.py deleted file mode 100644 index f3615e012..000000000 --- a/research/inception/inception/imagenet_distributed_train.py +++ /dev/null @@ -1,66 +0,0 @@ -# Copyright 2016 Google Inc. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -# pylint: disable=line-too-long -"""A binary to train Inception in a distributed manner using multiple systems. - -Please see accompanying README.md for details and instructions. -""" -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import tensorflow as tf - -from inception import inception_distributed_train -from inception.imagenet_data import ImagenetData - -FLAGS = tf.app.flags.FLAGS - - -def main(unused_args): - assert FLAGS.job_name in ['ps', 'worker'], 'job_name must be ps or worker' - - # Extract all the hostnames for the ps and worker jobs to construct the - # cluster spec. - ps_hosts = FLAGS.ps_hosts.split(',') - worker_hosts = FLAGS.worker_hosts.split(',') - tf.logging.info('PS hosts are: %s' % ps_hosts) - tf.logging.info('Worker hosts are: %s' % worker_hosts) - - cluster_spec = tf.train.ClusterSpec({'ps': ps_hosts, - 'worker': worker_hosts}) - server = tf.train.Server( - {'ps': ps_hosts, - 'worker': worker_hosts}, - job_name=FLAGS.job_name, - task_index=FLAGS.task_id, - protocol=FLAGS.protocol) - - if FLAGS.job_name == 'ps': - # `ps` jobs wait for incoming connections from the workers. - server.join() - else: - # `worker` jobs will actually do the work. - dataset = ImagenetData(subset=FLAGS.subset) - assert dataset.data_files() - # Only the chief checks for or creates train_dir. - if FLAGS.task_id == 0: - if not tf.gfile.Exists(FLAGS.train_dir): - tf.gfile.MakeDirs(FLAGS.train_dir) - inception_distributed_train.train(server.target, dataset, cluster_spec) - -if __name__ == '__main__': - tf.logging.set_verbosity(tf.logging.INFO) - tf.app.run() diff --git a/research/inception/inception/imagenet_eval.py b/research/inception/inception/imagenet_eval.py deleted file mode 100644 index e6f8bac2e..000000000 --- a/research/inception/inception/imagenet_eval.py +++ /dev/null @@ -1,46 +0,0 @@ -# Copyright 2016 Google Inc. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""A binary to evaluate Inception on the ImageNet data set. - -Note that using the supplied pre-trained inception checkpoint, the eval should -achieve: - precision @ 1 = 0.7874 recall @ 5 = 0.9436 [50000 examples] - -See the README.md for more details. -""" -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - - -import tensorflow as tf - -from inception import inception_eval -from inception.imagenet_data import ImagenetData - -FLAGS = tf.app.flags.FLAGS - - -def main(unused_argv=None): - dataset = ImagenetData(subset=FLAGS.subset) - assert dataset.data_files() - if tf.gfile.Exists(FLAGS.eval_dir): - tf.gfile.DeleteRecursively(FLAGS.eval_dir) - tf.gfile.MakeDirs(FLAGS.eval_dir) - inception_eval.evaluate(dataset) - - -if __name__ == '__main__': - tf.app.run() diff --git a/research/inception/inception/imagenet_train.py b/research/inception/inception/imagenet_train.py deleted file mode 100644 index 3ffb55ee9..000000000 --- a/research/inception/inception/imagenet_train.py +++ /dev/null @@ -1,41 +0,0 @@ -# Copyright 2016 Google Inc. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""A binary to train Inception on the ImageNet data set. -""" -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - - - -import tensorflow as tf - -from inception import inception_train -from inception.imagenet_data import ImagenetData - -FLAGS = tf.app.flags.FLAGS - - -def main(_): - dataset = ImagenetData(subset=FLAGS.subset) - assert dataset.data_files() - if tf.gfile.Exists(FLAGS.train_dir): - tf.gfile.DeleteRecursively(FLAGS.train_dir) - tf.gfile.MakeDirs(FLAGS.train_dir) - inception_train.train(dataset) - - -if __name__ == '__main__': - tf.app.run() diff --git a/research/inception/inception/inception_distributed_train.py b/research/inception/inception/inception_distributed_train.py deleted file mode 100644 index c1a589acb..000000000 --- a/research/inception/inception/inception_distributed_train.py +++ /dev/null @@ -1,314 +0,0 @@ -# Copyright 2016 Google Inc. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""A library to train Inception using multiple replicas with synchronous update. - -Please see accompanying README.md for details and instructions. -""" -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -from datetime import datetime -import os.path -import time - -import numpy as np -import tensorflow as tf - -from inception import image_processing -from inception import inception_model as inception -from inception.slim import slim - -FLAGS = tf.app.flags.FLAGS - -tf.app.flags.DEFINE_string('job_name', '', 'One of "ps", "worker"') -tf.app.flags.DEFINE_string('ps_hosts', '', - """Comma-separated list of hostname:port for the """ - """parameter server jobs. e.g. """ - """'machine1:2222,machine2:1111,machine2:2222'""") -tf.app.flags.DEFINE_string('worker_hosts', '', - """Comma-separated list of hostname:port for the """ - """worker jobs. e.g. """ - """'machine1:2222,machine2:1111,machine2:2222'""") -tf.app.flags.DEFINE_string('protocol', 'grpc', - """Communication protocol to use in distributed """ - """execution (default grpc) """) - -tf.app.flags.DEFINE_string('train_dir', '/tmp/imagenet_train', - """Directory where to write event logs """ - """and checkpoint.""") -tf.app.flags.DEFINE_integer('max_steps', 1000000, 'Number of batches to run.') -tf.app.flags.DEFINE_string('subset', 'train', 'Either "train" or "validation".') -tf.app.flags.DEFINE_boolean('log_device_placement', False, - 'Whether to log device placement.') - -# Task ID is used to select the chief and also to access the local_step for -# each replica to check staleness of the gradients in SyncReplicasOptimizer. -tf.app.flags.DEFINE_integer( - 'task_id', 0, 'Task ID of the worker/replica running the training.') - -# More details can be found in the SyncReplicasOptimizer class: -# tensorflow/python/training/sync_replicas_optimizer.py -tf.app.flags.DEFINE_integer('num_replicas_to_aggregate', -1, - """Number of gradients to collect before """ - """updating the parameters.""") -tf.app.flags.DEFINE_integer('save_interval_secs', 10 * 60, - 'Save interval seconds.') -tf.app.flags.DEFINE_integer('save_summaries_secs', 180, - 'Save summaries interval seconds.') - -# **IMPORTANT** -# Please note that this learning rate schedule is heavily dependent on the -# hardware architecture, batch size and any changes to the model architecture -# specification. Selecting a finely tuned learning rate schedule is an -# empirical process that requires some experimentation. Please see README.md -# more guidance and discussion. -# -# Learning rate decay factor selected from https://arxiv.org/abs/1604.00981 -tf.app.flags.DEFINE_float('initial_learning_rate', 0.045, - 'Initial learning rate.') -tf.app.flags.DEFINE_float('num_epochs_per_decay', 2.0, - 'Epochs after which learning rate decays.') -tf.app.flags.DEFINE_float('learning_rate_decay_factor', 0.94, - 'Learning rate decay factor.') - -# Constants dictating the learning rate schedule. -RMSPROP_DECAY = 0.9 # Decay term for RMSProp. -RMSPROP_MOMENTUM = 0.9 # Momentum in RMSProp. -RMSPROP_EPSILON = 1.0 # Epsilon term for RMSProp. - - -def train(target, dataset, cluster_spec): - """Train Inception on a dataset for a number of steps.""" - # Number of workers and parameter servers are inferred from the workers and ps - # hosts string. - num_workers = len(cluster_spec.as_dict()['worker']) - num_parameter_servers = len(cluster_spec.as_dict()['ps']) - # If no value is given, num_replicas_to_aggregate defaults to be the number of - # workers. - if FLAGS.num_replicas_to_aggregate == -1: - num_replicas_to_aggregate = num_workers - else: - num_replicas_to_aggregate = FLAGS.num_replicas_to_aggregate - - # Both should be greater than 0 in a distributed training. - assert num_workers > 0 and num_parameter_servers > 0, (' num_workers and ' - 'num_parameter_servers' - ' must be > 0.') - - # Choose worker 0 as the chief. Note that any worker could be the chief - # but there should be only one chief. - is_chief = (FLAGS.task_id == 0) - - # Ops are assigned to worker by default. - with tf.device('/job:worker/task:%d' % FLAGS.task_id): - # Variables and its related init/assign ops are assigned to ps. - with slim.scopes.arg_scope( - [slim.variables.variable, slim.variables.global_step], - device=slim.variables.VariableDeviceChooser(num_parameter_servers)): - # Create a variable to count the number of train() calls. This equals the - # number of updates applied to the variables. - global_step = slim.variables.global_step() - - # Calculate the learning rate schedule. - num_batches_per_epoch = (dataset.num_examples_per_epoch() / - FLAGS.batch_size) - # Decay steps need to be divided by the number of replicas to aggregate. - decay_steps = int(num_batches_per_epoch * FLAGS.num_epochs_per_decay / - num_replicas_to_aggregate) - - # Decay the learning rate exponentially based on the number of steps. - lr = tf.train.exponential_decay(FLAGS.initial_learning_rate, - global_step, - decay_steps, - FLAGS.learning_rate_decay_factor, - staircase=True) - # Add a summary to track the learning rate. - tf.summary.scalar('learning_rate', lr) - - # Create an optimizer that performs gradient descent. - opt = tf.train.RMSPropOptimizer(lr, - RMSPROP_DECAY, - momentum=RMSPROP_MOMENTUM, - epsilon=RMSPROP_EPSILON) - - images, labels = image_processing.distorted_inputs( - dataset, - batch_size=FLAGS.batch_size, - num_preprocess_threads=FLAGS.num_preprocess_threads) - - # Number of classes in the Dataset label set plus 1. - # Label 0 is reserved for an (unused) background class. - num_classes = dataset.num_classes() + 1 - logits = inception.inference(images, num_classes, for_training=True) - # Add classification loss. - inception.loss(logits, labels) - - # Gather all of the losses including regularization losses. - losses = tf.get_collection(slim.losses.LOSSES_COLLECTION) - losses += tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES) - - total_loss = tf.add_n(losses, name='total_loss') - - if is_chief: - # Compute the moving average of all individual losses and the - # total loss. - loss_averages = tf.train.ExponentialMovingAverage(0.9, name='avg') - loss_averages_op = loss_averages.apply(losses + [total_loss]) - - # Attach a scalar summmary to all individual losses and the total loss; - # do the same for the averaged version of the losses. - for l in losses + [total_loss]: - loss_name = l.op.name - # Name each loss as '(raw)' and name the moving average version of the - # loss as the original loss name. - tf.summary.scalar(loss_name + ' (raw)', l) - tf.summary.scalar(loss_name, loss_averages.average(l)) - - # Add dependency to compute loss_averages. - with tf.control_dependencies([loss_averages_op]): - total_loss = tf.identity(total_loss) - - # Track the moving averages of all trainable variables. - # Note that we maintain a 'double-average' of the BatchNormalization - # global statistics. - # This is not needed when the number of replicas are small but important - # for synchronous distributed training with tens of workers/replicas. - exp_moving_averager = tf.train.ExponentialMovingAverage( - inception.MOVING_AVERAGE_DECAY, global_step) - - variables_to_average = ( - tf.trainable_variables() + tf.moving_average_variables()) - - # Add histograms for model variables. - for var in variables_to_average: - tf.summary.histogram(var.op.name, var) - - # Create synchronous replica optimizer. - opt = tf.train.SyncReplicasOptimizer( - opt, - replicas_to_aggregate=num_replicas_to_aggregate, - total_num_replicas=num_workers, - variable_averages=exp_moving_averager, - variables_to_average=variables_to_average) - - batchnorm_updates = tf.get_collection(slim.ops.UPDATE_OPS_COLLECTION) - assert batchnorm_updates, 'Batchnorm updates are missing' - batchnorm_updates_op = tf.group(*batchnorm_updates) - # Add dependency to compute batchnorm_updates. - with tf.control_dependencies([batchnorm_updates_op]): - total_loss = tf.identity(total_loss) - - # Compute gradients with respect to the loss. - grads = opt.compute_gradients(total_loss) - - # Add histograms for gradients. - for grad, var in grads: - if grad is not None: - tf.summary.histogram(var.op.name + '/gradients', grad) - - apply_gradients_op = opt.apply_gradients(grads, global_step=global_step) - - with tf.control_dependencies([apply_gradients_op]): - train_op = tf.identity(total_loss, name='train_op') - - # Get chief queue_runners and init_tokens, which is used to synchronize - # replicas. More details can be found in SyncReplicasOptimizer. - chief_queue_runners = [opt.get_chief_queue_runner()] - init_tokens_op = opt.get_init_tokens_op() - - # Create a saver. - saver = tf.train.Saver() - - # Build the summary operation based on the TF collection of Summaries. - summary_op = tf.summary.merge_all() - - # Build an initialization operation to run below. - init_op = tf.global_variables_initializer() - - # We run the summaries in the same thread as the training operations by - # passing in None for summary_op to avoid a summary_thread being started. - # Running summaries and training operations in parallel could run out of - # GPU memory. - sv = tf.train.Supervisor(is_chief=is_chief, - logdir=FLAGS.train_dir, - init_op=init_op, - summary_op=None, - global_step=global_step, - saver=saver, - save_model_secs=FLAGS.save_interval_secs) - - tf.logging.info('%s Supervisor' % datetime.now()) - - sess_config = tf.ConfigProto( - allow_soft_placement=True, - log_device_placement=FLAGS.log_device_placement) - - # Get a session. - sess = sv.prepare_or_wait_for_session(target, config=sess_config) - - # Start the queue runners. - queue_runners = tf.get_collection(tf.GraphKeys.QUEUE_RUNNERS) - sv.start_queue_runners(sess, queue_runners) - tf.logging.info('Started %d queues for processing input data.', - len(queue_runners)) - - if is_chief: - sv.start_queue_runners(sess, chief_queue_runners) - sess.run(init_tokens_op) - - # Train, checking for Nans. Concurrently run the summary operation at a - # specified interval. Note that the summary_op and train_op never run - # simultaneously in order to prevent running out of GPU memory. - next_summary_time = time.time() + FLAGS.save_summaries_secs - while not sv.should_stop(): - try: - start_time = time.time() - loss_value, step = sess.run([train_op, global_step]) - assert not np.isnan(loss_value), 'Model diverged with loss = NaN' - if step > FLAGS.max_steps: - break - duration = time.time() - start_time - - if step % 30 == 0: - examples_per_sec = FLAGS.batch_size / float(duration) - format_str = ('Worker %d: %s: step %d, loss = %.2f' - '(%.1f examples/sec; %.3f sec/batch)') - tf.logging.info(format_str % - (FLAGS.task_id, datetime.now(), step, loss_value, - examples_per_sec, duration)) - - # Determine if the summary_op should be run on the chief worker. - if is_chief and next_summary_time < time.time(): - tf.logging.info('Running Summary operation on the chief.') - summary_str = sess.run(summary_op) - sv.summary_computed(sess, summary_str) - tf.logging.info('Finished running Summary operation.') - - # Determine the next time for running the summary. - next_summary_time += FLAGS.save_summaries_secs - except: - if is_chief: - tf.logging.info('Chief got exception while running!') - raise - - # Stop the supervisor. This also waits for service threads to finish. - sv.stop() - - # Save after the training ends. - if is_chief: - saver.save(sess, - os.path.join(FLAGS.train_dir, 'model.ckpt'), - global_step=global_step) diff --git a/research/inception/inception/inception_eval.py b/research/inception/inception/inception_eval.py deleted file mode 100644 index e7cfc3c39..000000000 --- a/research/inception/inception/inception_eval.py +++ /dev/null @@ -1,171 +0,0 @@ -# Copyright 2016 Google Inc. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""A library to evaluate Inception on a single GPU. -""" -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -from datetime import datetime -import math -import os.path -import time - - -import numpy as np -import tensorflow as tf - -from inception import image_processing -from inception import inception_model as inception - - -FLAGS = tf.app.flags.FLAGS - -tf.app.flags.DEFINE_string('eval_dir', '/tmp/imagenet_eval', - """Directory where to write event logs.""") -tf.app.flags.DEFINE_string('checkpoint_dir', '/tmp/imagenet_train', - """Directory where to read model checkpoints.""") - -# Flags governing the frequency of the eval. -tf.app.flags.DEFINE_integer('eval_interval_secs', 60 * 5, - """How often to run the eval.""") -tf.app.flags.DEFINE_boolean('run_once', False, - """Whether to run eval only once.""") - -# Flags governing the data used for the eval. -tf.app.flags.DEFINE_integer('num_examples', 50000, - """Number of examples to run. Note that the eval """ - """ImageNet dataset contains 50000 examples.""") -tf.app.flags.DEFINE_string('subset', 'validation', - """Either 'validation' or 'train'.""") - - -def _eval_once(saver, summary_writer, top_1_op, top_5_op, summary_op): - """Runs Eval once. - - Args: - saver: Saver. - summary_writer: Summary writer. - top_1_op: Top 1 op. - top_5_op: Top 5 op. - summary_op: Summary op. - """ - with tf.Session() as sess: - ckpt = tf.train.get_checkpoint_state(FLAGS.checkpoint_dir) - if ckpt and ckpt.model_checkpoint_path: - if os.path.isabs(ckpt.model_checkpoint_path): - # Restores from checkpoint with absolute path. - saver.restore(sess, ckpt.model_checkpoint_path) - else: - # Restores from checkpoint with relative path. - saver.restore(sess, os.path.join(FLAGS.checkpoint_dir, - ckpt.model_checkpoint_path)) - - # Assuming model_checkpoint_path looks something like: - # /my-favorite-path/imagenet_train/model.ckpt-0, - # extract global_step from it. - global_step = ckpt.model_checkpoint_path.split('/')[-1].split('-')[-1] - print('Successfully loaded model from %s at step=%s.' % - (ckpt.model_checkpoint_path, global_step)) - else: - print('No checkpoint file found') - return - - # Start the queue runners. - coord = tf.train.Coordinator() - try: - threads = [] - for qr in tf.get_collection(tf.GraphKeys.QUEUE_RUNNERS): - threads.extend(qr.create_threads(sess, coord=coord, daemon=True, - start=True)) - - num_iter = int(math.ceil(FLAGS.num_examples / FLAGS.batch_size)) - # Counts the number of correct predictions. - count_top_1 = 0.0 - count_top_5 = 0.0 - total_sample_count = num_iter * FLAGS.batch_size - step = 0 - - print('%s: starting evaluation on (%s).' % (datetime.now(), FLAGS.subset)) - start_time = time.time() - while step < num_iter and not coord.should_stop(): - top_1, top_5 = sess.run([top_1_op, top_5_op]) - count_top_1 += np.sum(top_1) - count_top_5 += np.sum(top_5) - step += 1 - if step % 20 == 0: - duration = time.time() - start_time - sec_per_batch = duration / 20.0 - examples_per_sec = FLAGS.batch_size / sec_per_batch - print('%s: [%d batches out of %d] (%.1f examples/sec; %.3f' - 'sec/batch)' % (datetime.now(), step, num_iter, - examples_per_sec, sec_per_batch)) - start_time = time.time() - - # Compute precision @ 1. - precision_at_1 = count_top_1 / total_sample_count - recall_at_5 = count_top_5 / total_sample_count - print('%s: precision @ 1 = %.4f recall @ 5 = %.4f [%d examples]' % - (datetime.now(), precision_at_1, recall_at_5, total_sample_count)) - - summary = tf.Summary() - summary.ParseFromString(sess.run(summary_op)) - summary.value.add(tag='Precision @ 1', simple_value=precision_at_1) - summary.value.add(tag='Recall @ 5', simple_value=recall_at_5) - summary_writer.add_summary(summary, global_step) - - except Exception as e: # pylint: disable=broad-except - coord.request_stop(e) - - coord.request_stop() - coord.join(threads, stop_grace_period_secs=10) - - -def evaluate(dataset): - """Evaluate model on Dataset for a number of steps.""" - with tf.Graph().as_default(): - # Get images and labels from the dataset. - images, labels = image_processing.inputs(dataset) - - # Number of classes in the Dataset label set plus 1. - # Label 0 is reserved for an (unused) background class. - num_classes = dataset.num_classes() + 1 - - # Build a Graph that computes the logits predictions from the - # inference model. - logits, _ = inception.inference(images, num_classes) - - # Calculate predictions. - top_1_op = tf.nn.in_top_k(logits, labels, 1) - top_5_op = tf.nn.in_top_k(logits, labels, 5) - - # Restore the moving average version of the learned variables for eval. - variable_averages = tf.train.ExponentialMovingAverage( - inception.MOVING_AVERAGE_DECAY) - variables_to_restore = variable_averages.variables_to_restore() - saver = tf.train.Saver(variables_to_restore) - - # Build the summary operation based on the TF collection of Summaries. - summary_op = tf.summary.merge_all() - - graph_def = tf.get_default_graph().as_graph_def() - summary_writer = tf.summary.FileWriter(FLAGS.eval_dir, - graph_def=graph_def) - - while True: - _eval_once(saver, summary_writer, top_1_op, top_5_op, summary_op) - if FLAGS.run_once: - break - time.sleep(FLAGS.eval_interval_secs) diff --git a/research/inception/inception/inception_model.py b/research/inception/inception/inception_model.py deleted file mode 100644 index fedae13ae..000000000 --- a/research/inception/inception/inception_model.py +++ /dev/null @@ -1,157 +0,0 @@ -# Copyright 2016 Google Inc. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""Build the Inception v3 network on ImageNet data set. - -The Inception v3 architecture is described in http://arxiv.org/abs/1512.00567 - -Summary of available functions: - inference: Compute inference on the model inputs to make a prediction - loss: Compute the loss of the prediction with respect to the labels -""" -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import re - -import tensorflow as tf - -from inception.slim import slim - -FLAGS = tf.app.flags.FLAGS - -# If a model is trained using multiple GPUs, prefix all Op names with tower_name -# to differentiate the operations. Note that this prefix is removed from the -# names of the summaries when visualizing a model. -TOWER_NAME = 'tower' - -# Batch normalization. Constant governing the exponential moving average of -# the 'global' mean and variance for all activations. -BATCHNORM_MOVING_AVERAGE_DECAY = 0.9997 - -# The decay to use for the moving average. -MOVING_AVERAGE_DECAY = 0.9999 - - -def inference(images, num_classes, for_training=False, restore_logits=True, - scope=None): - """Build Inception v3 model architecture. - - See here for reference: http://arxiv.org/abs/1512.00567 - - Args: - images: Images returned from inputs() or distorted_inputs(). - num_classes: number of classes - for_training: If set to `True`, build the inference model for training. - Kernels that operate differently for inference during training - e.g. dropout, are appropriately configured. - restore_logits: whether or not the logits layers should be restored. - Useful for fine-tuning a model with different num_classes. - scope: optional prefix string identifying the ImageNet tower. - - Returns: - Logits. 2-D float Tensor. - Auxiliary Logits. 2-D float Tensor of side-head. Used for training only. - """ - # Parameters for BatchNorm. - batch_norm_params = { - # Decay for the moving averages. - 'decay': BATCHNORM_MOVING_AVERAGE_DECAY, - # epsilon to prevent 0s in variance. - 'epsilon': 0.001, - } - # Set weight_decay for weights in Conv and FC layers. - with slim.arg_scope([slim.ops.conv2d, slim.ops.fc], weight_decay=0.00004): - with slim.arg_scope([slim.ops.conv2d], - stddev=0.1, - activation=tf.nn.relu, - batch_norm_params=batch_norm_params): - logits, endpoints = slim.inception.inception_v3( - images, - dropout_keep_prob=0.8, - num_classes=num_classes, - is_training=for_training, - restore_logits=restore_logits, - scope=scope) - - # Add summaries for viewing model statistics on TensorBoard. - _activation_summaries(endpoints) - - # Grab the logits associated with the side head. Employed during training. - auxiliary_logits = endpoints['aux_logits'] - - return logits, auxiliary_logits - - -def loss(logits, labels, batch_size=None): - """Adds all losses for the model. - - Note the final loss is not returned. Instead, the list of losses are collected - by slim.losses. The losses are accumulated in tower_loss() and summed to - calculate the total loss. - - Args: - logits: List of logits from inference(). Each entry is a 2-D float Tensor. - labels: Labels from distorted_inputs or inputs(). 1-D tensor - of shape [batch_size] - batch_size: integer - """ - if not batch_size: - batch_size = FLAGS.batch_size - - # Reshape the labels into a dense Tensor of - # shape [FLAGS.batch_size, num_classes]. - sparse_labels = tf.reshape(labels, [batch_size, 1]) - indices = tf.reshape(tf.range(batch_size), [batch_size, 1]) - concated = tf.concat(axis=1, values=[indices, sparse_labels]) - num_classes = logits[0].get_shape()[-1].value - dense_labels = tf.sparse_to_dense(concated, - [batch_size, num_classes], - 1.0, 0.0) - - # Cross entropy loss for the main softmax prediction. - slim.losses.cross_entropy_loss(logits[0], - dense_labels, - label_smoothing=0.1, - weight=1.0) - - # Cross entropy loss for the auxiliary softmax head. - slim.losses.cross_entropy_loss(logits[1], - dense_labels, - label_smoothing=0.1, - weight=0.4, - scope='aux_loss') - - -def _activation_summary(x): - """Helper to create summaries for activations. - - Creates a summary that provides a histogram of activations. - Creates a summary that measure the sparsity of activations. - - Args: - x: Tensor - """ - # Remove 'tower_[0-9]/' from the name in case this is a multi-GPU training - # session. This helps the clarity of presentation on tensorboard. - tensor_name = re.sub('%s_[0-9]*/' % TOWER_NAME, '', x.op.name) - tf.summary.histogram(tensor_name + '/activations', x) - tf.summary.scalar(tensor_name + '/sparsity', tf.nn.zero_fraction(x)) - - -def _activation_summaries(endpoints): - with tf.name_scope('summaries'): - for act in endpoints.values(): - _activation_summary(act) diff --git a/research/inception/inception/inception_train.py b/research/inception/inception/inception_train.py deleted file mode 100644 index e1c32713b..000000000 --- a/research/inception/inception/inception_train.py +++ /dev/null @@ -1,357 +0,0 @@ -# Copyright 2016 Google Inc. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""A library to train Inception using multiple GPUs with synchronous updates. -""" -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import copy -from datetime import datetime -import os.path -import re -import time - -import numpy as np -import tensorflow as tf - -from inception import image_processing -from inception import inception_model as inception -from inception.slim import slim - -FLAGS = tf.app.flags.FLAGS - -tf.app.flags.DEFINE_string('train_dir', '/tmp/imagenet_train', - """Directory where to write event logs """ - """and checkpoint.""") -tf.app.flags.DEFINE_integer('max_steps', 10000000, - """Number of batches to run.""") -tf.app.flags.DEFINE_string('subset', 'train', - """Either 'train' or 'validation'.""") - -# Flags governing the hardware employed for running TensorFlow. -tf.app.flags.DEFINE_integer('num_gpus', 1, - """How many GPUs to use.""") -tf.app.flags.DEFINE_boolean('log_device_placement', False, - """Whether to log device placement.""") - -# Flags governing the type of training. -tf.app.flags.DEFINE_boolean('fine_tune', False, - """If set, randomly initialize the final layer """ - """of weights in order to train the network on a """ - """new task.""") -tf.app.flags.DEFINE_string('pretrained_model_checkpoint_path', '', - """If specified, restore this pretrained model """ - """before beginning any training.""") - -# **IMPORTANT** -# Please note that this learning rate schedule is heavily dependent on the -# hardware architecture, batch size and any changes to the model architecture -# specification. Selecting a finely tuned learning rate schedule is an -# empirical process that requires some experimentation. Please see README.md -# more guidance and discussion. -# -# With 8 Tesla K40's and a batch size = 256, the following setup achieves -# precision@1 = 73.5% after 100 hours and 100K steps (20 epochs). -# Learning rate decay factor selected from http://arxiv.org/abs/1404.5997. -tf.app.flags.DEFINE_float('initial_learning_rate', 0.1, - """Initial learning rate.""") -tf.app.flags.DEFINE_float('num_epochs_per_decay', 30.0, - """Epochs after which learning rate decays.""") -tf.app.flags.DEFINE_float('learning_rate_decay_factor', 0.16, - """Learning rate decay factor.""") - -# Constants dictating the learning rate schedule. -RMSPROP_DECAY = 0.9 # Decay term for RMSProp. -RMSPROP_MOMENTUM = 0.9 # Momentum in RMSProp. -RMSPROP_EPSILON = 1.0 # Epsilon term for RMSProp. - - -def _tower_loss(images, labels, num_classes, scope, reuse_variables=None): - """Calculate the total loss on a single tower running the ImageNet model. - - We perform 'batch splitting'. This means that we cut up a batch across - multiple GPUs. For instance, if the batch size = 32 and num_gpus = 2, - then each tower will operate on an batch of 16 images. - - Args: - images: Images. 4D tensor of size [batch_size, FLAGS.image_size, - FLAGS.image_size, 3]. - labels: 1-D integer Tensor of [batch_size]. - num_classes: number of classes - scope: unique prefix string identifying the ImageNet tower, e.g. - 'tower_0'. - - Returns: - Tensor of shape [] containing the total loss for a batch of data - """ - # When fine-tuning a model, we do not restore the logits but instead we - # randomly initialize the logits. The number of classes in the output of the - # logit is the number of classes in specified Dataset. - restore_logits = not FLAGS.fine_tune - - # Build inference Graph. - with tf.variable_scope(tf.get_variable_scope(), reuse=reuse_variables): - logits = inception.inference(images, num_classes, for_training=True, - restore_logits=restore_logits, - scope=scope) - - # Build the portion of the Graph calculating the losses. Note that we will - # assemble the total_loss using a custom function below. - split_batch_size = images.get_shape().as_list()[0] - inception.loss(logits, labels, batch_size=split_batch_size) - - # Assemble all of the losses for the current tower only. - losses = tf.get_collection(slim.losses.LOSSES_COLLECTION, scope) - - # Calculate the total loss for the current tower. - regularization_losses = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES) - total_loss = tf.add_n(losses + regularization_losses, name='total_loss') - - # Compute the moving average of all individual losses and the total loss. - loss_averages = tf.train.ExponentialMovingAverage(0.9, name='avg') - loss_averages_op = loss_averages.apply(losses + [total_loss]) - - # Attach a scalar summmary to all individual losses and the total loss; do the - # same for the averaged version of the losses. - for l in losses + [total_loss]: - # Remove 'tower_[0-9]/' from the name in case this is a multi-GPU training - # session. This helps the clarity of presentation on TensorBoard. - loss_name = re.sub('%s_[0-9]*/' % inception.TOWER_NAME, '', l.op.name) - # Name each loss as '(raw)' and name the moving average version of the loss - # as the original loss name. - tf.summary.scalar(loss_name +' (raw)', l) - tf.summary.scalar(loss_name, loss_averages.average(l)) - - with tf.control_dependencies([loss_averages_op]): - total_loss = tf.identity(total_loss) - return total_loss - - -def _average_gradients(tower_grads): - """Calculate the average gradient for each shared variable across all towers. - - Note that this function provides a synchronization point across all towers. - - Args: - tower_grads: List of lists of (gradient, variable) tuples. The outer list - is over individual gradients. The inner list is over the gradient - calculation for each tower. - Returns: - List of pairs of (gradient, variable) where the gradient has been averaged - across all towers. - """ - average_grads = [] - for grad_and_vars in zip(*tower_grads): - # Note that each grad_and_vars looks like the following: - # ((grad0_gpu0, var0_gpu0), ... , (grad0_gpuN, var0_gpuN)) - grads = [] - for g, _ in grad_and_vars: - # Add 0 dimension to the gradients to represent the tower. - expanded_g = tf.expand_dims(g, 0) - - # Append on a 'tower' dimension which we will average over below. - grads.append(expanded_g) - - # Average over the 'tower' dimension. - grad = tf.concat(axis=0, values=grads) - grad = tf.reduce_mean(grad, 0) - - # Keep in mind that the Variables are redundant because they are shared - # across towers. So .. we will just return the first tower's pointer to - # the Variable. - v = grad_and_vars[0][1] - grad_and_var = (grad, v) - average_grads.append(grad_and_var) - return average_grads - - -def train(dataset): - """Train on dataset for a number of steps.""" - with tf.Graph().as_default(), tf.device('/cpu:0'): - # Create a variable to count the number of train() calls. This equals the - # number of batches processed * FLAGS.num_gpus. - global_step = tf.get_variable( - 'global_step', [], - initializer=tf.constant_initializer(0), trainable=False) - - # Calculate the learning rate schedule. - num_batches_per_epoch = (dataset.num_examples_per_epoch() / - FLAGS.batch_size) - decay_steps = int(num_batches_per_epoch * FLAGS.num_epochs_per_decay) - - # Decay the learning rate exponentially based on the number of steps. - lr = tf.train.exponential_decay(FLAGS.initial_learning_rate, - global_step, - decay_steps, - FLAGS.learning_rate_decay_factor, - staircase=True) - - # Create an optimizer that performs gradient descent. - opt = tf.train.RMSPropOptimizer(lr, RMSPROP_DECAY, - momentum=RMSPROP_MOMENTUM, - epsilon=RMSPROP_EPSILON) - - # Get images and labels for ImageNet and split the batch across GPUs. - assert FLAGS.batch_size % FLAGS.num_gpus == 0, ( - 'Batch size must be divisible by number of GPUs') - split_batch_size = int(FLAGS.batch_size / FLAGS.num_gpus) - - # Override the number of preprocessing threads to account for the increased - # number of GPU towers. - num_preprocess_threads = FLAGS.num_preprocess_threads * FLAGS.num_gpus - images, labels = image_processing.distorted_inputs( - dataset, - num_preprocess_threads=num_preprocess_threads) - - input_summaries = copy.copy(tf.get_collection(tf.GraphKeys.SUMMARIES)) - - # Number of classes in the Dataset label set plus 1. - # Label 0 is reserved for an (unused) background class. - num_classes = dataset.num_classes() + 1 - - # Split the batch of images and labels for towers. - images_splits = tf.split(axis=0, num_or_size_splits=FLAGS.num_gpus, value=images) - labels_splits = tf.split(axis=0, num_or_size_splits=FLAGS.num_gpus, value=labels) - - # Calculate the gradients for each model tower. - tower_grads = [] - reuse_variables = None - for i in range(FLAGS.num_gpus): - with tf.device('/gpu:%d' % i): - with tf.name_scope('%s_%d' % (inception.TOWER_NAME, i)) as scope: - # Force all Variables to reside on the CPU. - with slim.arg_scope([slim.variables.variable], device='/cpu:0'): - # Calculate the loss for one tower of the ImageNet model. This - # function constructs the entire ImageNet model but shares the - # variables across all towers. - loss = _tower_loss(images_splits[i], labels_splits[i], num_classes, - scope, reuse_variables) - - # Reuse variables for the next tower. - reuse_variables = True - - # Retain the summaries from the final tower. - summaries = tf.get_collection(tf.GraphKeys.SUMMARIES, scope) - - # Retain the Batch Normalization updates operations only from the - # final tower. Ideally, we should grab the updates from all towers - # but these stats accumulate extremely fast so we can ignore the - # other stats from the other towers without significant detriment. - batchnorm_updates = tf.get_collection(slim.ops.UPDATE_OPS_COLLECTION, - scope) - - # Calculate the gradients for the batch of data on this ImageNet - # tower. - grads = opt.compute_gradients(loss) - - # Keep track of the gradients across all towers. - tower_grads.append(grads) - - # We must calculate the mean of each gradient. Note that this is the - # synchronization point across all towers. - grads = _average_gradients(tower_grads) - - # Add a summaries for the input processing and global_step. - summaries.extend(input_summaries) - - # Add a summary to track the learning rate. - summaries.append(tf.summary.scalar('learning_rate', lr)) - - # Add histograms for gradients. - for grad, var in grads: - if grad is not None: - summaries.append( - tf.summary.histogram(var.op.name + '/gradients', grad)) - - # Apply the gradients to adjust the shared variables. - apply_gradient_op = opt.apply_gradients(grads, global_step=global_step) - - # Add histograms for trainable variables. - for var in tf.trainable_variables(): - summaries.append(tf.summary.histogram(var.op.name, var)) - - # Track the moving averages of all trainable variables. - # Note that we maintain a "double-average" of the BatchNormalization - # global statistics. This is more complicated then need be but we employ - # this for backward-compatibility with our previous models. - variable_averages = tf.train.ExponentialMovingAverage( - inception.MOVING_AVERAGE_DECAY, global_step) - - # Another possibility is to use tf.slim.get_variables(). - variables_to_average = (tf.trainable_variables() + - tf.moving_average_variables()) - variables_averages_op = variable_averages.apply(variables_to_average) - - # Group all updates to into a single train op. - batchnorm_updates_op = tf.group(*batchnorm_updates) - train_op = tf.group(apply_gradient_op, variables_averages_op, - batchnorm_updates_op) - - # Create a saver. - saver = tf.train.Saver(tf.global_variables()) - - # Build the summary operation from the last tower summaries. - summary_op = tf.summary.merge(summaries) - - # Build an initialization operation to run below. - init = tf.global_variables_initializer() - - # Start running operations on the Graph. allow_soft_placement must be set to - # True to build towers on GPU, as some of the ops do not have GPU - # implementations. - sess = tf.Session(config=tf.ConfigProto( - allow_soft_placement=True, - log_device_placement=FLAGS.log_device_placement)) - sess.run(init) - - if FLAGS.pretrained_model_checkpoint_path: - assert tf.gfile.Exists(FLAGS.pretrained_model_checkpoint_path) - variables_to_restore = tf.get_collection( - slim.variables.VARIABLES_TO_RESTORE) - restorer = tf.train.Saver(variables_to_restore) - restorer.restore(sess, FLAGS.pretrained_model_checkpoint_path) - print('%s: Pre-trained model restored from %s' % - (datetime.now(), FLAGS.pretrained_model_checkpoint_path)) - - # Start the queue runners. - tf.train.start_queue_runners(sess=sess) - - summary_writer = tf.summary.FileWriter( - FLAGS.train_dir, - graph=sess.graph) - - for step in range(FLAGS.max_steps): - start_time = time.time() - _, loss_value = sess.run([train_op, loss]) - duration = time.time() - start_time - - assert not np.isnan(loss_value), 'Model diverged with loss = NaN' - - if step % 10 == 0: - examples_per_sec = FLAGS.batch_size / float(duration) - format_str = ('%s: step %d, loss = %.2f (%.1f examples/sec; %.3f ' - 'sec/batch)') - print(format_str % (datetime.now(), step, loss_value, - examples_per_sec, duration)) - - if step % 100 == 0: - summary_str = sess.run(summary_op) - summary_writer.add_summary(summary_str, step) - - # Save the model checkpoint periodically. - if step % 5000 == 0 or (step + 1) == FLAGS.max_steps: - checkpoint_path = os.path.join(FLAGS.train_dir, 'model.ckpt') - saver.save(sess, checkpoint_path, global_step=step) diff --git a/research/inception/inception/slim/BUILD b/research/inception/inception/slim/BUILD deleted file mode 100644 index 174e77d5c..000000000 --- a/research/inception/inception/slim/BUILD +++ /dev/null @@ -1,112 +0,0 @@ -# Description: -# Contains the operations and nets for building TensorFlow-Slim models. - -package(default_visibility = ["//inception:internal"]) - -licenses(["notice"]) # Apache 2.0 - -exports_files(["LICENSE"]) - -py_library( - name = "scopes", - srcs = ["scopes.py"], -) - -py_test( - name = "scopes_test", - size = "small", - srcs = ["scopes_test.py"], - deps = [ - ":scopes", - ], -) - -py_library( - name = "variables", - srcs = ["variables.py"], - deps = [ - ":scopes", - ], -) - -py_test( - name = "variables_test", - size = "small", - srcs = ["variables_test.py"], - deps = [ - ":variables", - ], -) - -py_library( - name = "losses", - srcs = ["losses.py"], -) - -py_test( - name = "losses_test", - size = "small", - srcs = ["losses_test.py"], - deps = [ - ":losses", - ], -) - -py_library( - name = "ops", - srcs = ["ops.py"], - deps = [ - ":losses", - ":scopes", - ":variables", - ], -) - -py_test( - name = "ops_test", - size = "small", - srcs = ["ops_test.py"], - deps = [ - ":ops", - ":variables", - ], -) - -py_library( - name = "inception", - srcs = ["inception_model.py"], - deps = [ - ":ops", - ":scopes", - ], -) - -py_test( - name = "inception_test", - size = "medium", - srcs = ["inception_test.py"], - deps = [ - ":inception", - ], -) - -py_library( - name = "slim", - srcs = ["slim.py"], - deps = [ - ":inception", - ":losses", - ":ops", - ":scopes", - ":variables", - ], -) - -py_test( - name = "collections_test", - size = "small", - srcs = ["collections_test.py"], - deps = [ - ":slim", - ], -) diff --git a/research/inception/inception/slim/README.md b/research/inception/inception/slim/README.md deleted file mode 100644 index 36d8b7eb1..000000000 --- a/research/inception/inception/slim/README.md +++ /dev/null @@ -1,621 +0,0 @@ -# TensorFlow-Slim - -TF-Slim is a lightweight library for defining, training and evaluating models in -TensorFlow. It enables defining complex networks quickly and concisely while -keeping a model's architecture transparent and its hyperparameters explicit. - -[TOC] - -## Teaser - -As a demonstration of the simplicity of using TF-Slim, compare the simplicity of -the code necessary for defining the entire [VGG](http://www.robots.ox.ac.uk/~vgg/research/very_deep/) network using TF-Slim to -the lengthy and verbose nature of defining just the first three layers (out of -16) using native tensorflow: - -```python{.good} -# VGG16 in TF-Slim. -def vgg16(inputs): - with slim.arg_scope([slim.ops.conv2d, slim.ops.fc], stddev=0.01, weight_decay=0.0005): - net = slim.ops.repeat_op(2, inputs, slim.ops.conv2d, 64, [3, 3], scope='conv1') - net = slim.ops.max_pool(net, [2, 2], scope='pool1') - net = slim.ops.repeat_op(2, net, slim.ops.conv2d, 128, [3, 3], scope='conv2') - net = slim.ops.max_pool(net, [2, 2], scope='pool2') - net = slim.ops.repeat_op(3, net, slim.ops.conv2d, 256, [3, 3], scope='conv3') - net = slim.ops.max_pool(net, [2, 2], scope='pool3') - net = slim.ops.repeat_op(3, net, slim.ops.conv2d, 512, [3, 3], scope='conv4') - net = slim.ops.max_pool(net, [2, 2], scope='pool4') - net = slim.ops.repeat_op(3, net, slim.ops.conv2d, 512, [3, 3], scope='conv5') - net = slim.ops.max_pool(net, [2, 2], scope='pool5') - net = slim.ops.flatten(net, scope='flatten5') - net = slim.ops.fc(net, 4096, scope='fc6') - net = slim.ops.dropout(net, 0.5, scope='dropout6') - net = slim.ops.fc(net, 4096, scope='fc7') - net = slim.ops.dropout(net, 0.5, scope='dropout7') - net = slim.ops.fc(net, 1000, activation=None, scope='fc8') - return net -``` - -```python{.bad} -# Layers 1-3 (out of 16) of VGG16 in native tensorflow. -def vgg16(inputs): - with tf.name_scope('conv1_1') as scope: - kernel = tf.Variable(tf.truncated_normal([3, 3, 3, 64], dtype=tf.float32, stddev=1e-1), name='weights') - conv = tf.nn.conv2d(inputs, kernel, [1, 1, 1, 1], padding='SAME') - biases = tf.Variable(tf.constant(0.0, shape=[64], dtype=tf.float32), trainable=True, name='biases') - bias = tf.nn.bias_add(conv, biases) - conv1 = tf.nn.relu(bias, name=scope) - with tf.name_scope('conv1_2') as scope: - kernel = tf.Variable(tf.truncated_normal([3, 3, 64, 64], dtype=tf.float32, stddev=1e-1), name='weights') - conv = tf.nn.conv2d(images, kernel, [1, 1, 1, 1], padding='SAME') - biases = tf.Variable(tf.constant(0.0, shape=[64], dtype=tf.float32), trainable=True, name='biases') - bias = tf.nn.bias_add(conv, biases) - conv1 = tf.nn.relu(bias, name=scope) - with tf.name_scope('pool1') - pool1 = tf.nn.max_pool(conv1, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='VALID', name='pool1') -``` - -## Why TF-Slim? - -TF-Slim offers several advantages over just the built-in tensorflow libraries: - -* Allows one to define models much more compactly by eliminating boilerplate - code. This is accomplished through the use of [argument scoping](./scopes.py) - and numerous high level [operations](./ops.py). These tools increase - readability and maintainability, reduce the likelihood of an error from - copy-and-pasting hyperparameter values and simplifies hyperparameter tuning. -* Makes developing models simple by providing commonly used [loss functions](./losses.py) -* Provides a concise [definition](./inception_model.py) of [Inception v3](http://arxiv.org/abs/1512.00567) network architecture ready to be used - out-of-the-box or subsumed into new models. - -Additionally TF-Slim was designed with several principles in mind: - -* The various modules of TF-Slim (scopes, variables, ops, losses) are - independent. This flexibility allows users to pick and choose components of - TF-Slim completely à la carte. -* TF-Slim is written using a Functional Programming style. That means it's - super-lightweight and can be used right alongside any of TensorFlow's native - operations. -* Makes re-using network architectures easy. This allows users to build new - networks on top of existing ones as well as fine-tuning pre-trained models - on new tasks. - -## What are the various components of TF-Slim? - -TF-Slim is composed of several parts which were designed to exist independently. -These include: - -* [scopes.py](./scopes.py): provides a new scope named `arg_scope` that allows - a user to define default arguments for specific operations within that - scope. -* [variables.py](./variables.py): provides convenience wrappers for variable - creation and manipulation. -* [ops.py](./ops.py): provides high level operations for building models using - tensorflow. -* [losses.py](./losses.py): contains commonly used loss functions. - -## Defining Models - -Models can be succinctly defined using TF-Slim by combining its variables, -operations and scopes. Each of these elements are defined below. - -### Variables - -Creating [`Variables`](https://www.tensorflow.org/how_tos/variables/index.html) -in native tensorflow requires either a predefined value or an initialization -mechanism (random, normally distributed). Furthermore, if a variable needs to be -created on a specific device, such as a GPU, the specification must be [made -explicit](https://www.tensorflow.org/how_tos/using_gpu/index.html). To alleviate -the code required for variable creation, TF-Slim provides a set of thin wrapper -functions in [variables.py](./variables.py) which allow callers to easily define -variables. - -For example, to create a `weight` variable, initialize it using a truncated -normal distribution, regularize it with an `l2_loss` and place it on the `CPU`, -one need only declare the following: - -```python -weights = variables.variable('weights', - shape=[10, 10, 3 , 3], - initializer=tf.truncated_normal_initializer(stddev=0.1), - regularizer=lambda t: losses.l2_loss(t, weight=0.05), - device='/cpu:0') -``` - -In addition to the functionality provided by `tf.Variable`, `slim.variables` -keeps track of the variables created by `slim.ops` to define a model, which -allows one to distinguish variables that belong to the model versus other -variables. - -```python -# Get all the variables defined by the model. -model_variables = slim.variables.get_variables() - -# Get all the variables with the same given name, i.e. 'weights', 'biases'. -weights = slim.variables.get_variables_by_name('weights') -biases = slim.variables.get_variables_by_name('biases') - -# Get all the variables in VARIABLES_TO_RESTORE collection. -variables_to_restore = tf.get_collection(slim.variables.VARIABLES_TO_RESTORE) - - -weights = variables.variable('weights', - shape=[10, 10, 3 , 3], - initializer=tf.truncated_normal_initializer(stddev=0.1), - regularizer=lambda t: losses.l2_loss(t, weight=0.05), - device='/cpu:0') -``` - -### Operations (Layers) - -While the set of TensorFlow operations is quite extensive, builders of neural -networks typically think of models in terms of "layers". A layer, such as a -Convolutional Layer, a Fully Connected Layer or a BatchNorm Layer are more -abstract than a single TensorFlow operation and typically involve many such -operations. For example, a Convolutional Layer in a neural network is built -using several steps: - -1. Creating the weight variables -2. Creating the bias variables -3. Convolving the weights with the input from the previous layer -4. Adding the biases to the result of the convolution. - -In python code this can be rather laborious: - -```python -input = ... -with tf.name_scope('conv1_1') as scope: - kernel = tf.Variable(tf.truncated_normal([3, 3, 64, 128], dtype=tf.float32, - stddev=1e-1), name='weights') - conv = tf.nn.conv2d(input, kernel, [1, 1, 1, 1], padding='SAME') - biases = tf.Variable(tf.constant(0.0, shape=[128], dtype=tf.float32), - trainable=True, name='biases') - bias = tf.nn.bias_add(conv, biases) - conv1 = tf.nn.relu(bias, name=scope) -``` - -To alleviate the need to duplicate this code repeatedly, TF-Slim provides a -number of convenient operations defined at the (more abstract) level of neural -network layers. For example, compare the code above to an invocation of the -TF-Slim code: - -```python -input = ... -net = slim.ops.conv2d(input, [3, 3], 128, scope='conv1_1') -``` - -TF-Slim provides numerous operations used in building neural networks which -roughly correspond to such layers. These include: - -Layer | TF-Slim Op ---------------------- | ------------------------ -Convolutional Layer | [ops.conv2d](./ops.py) -Fully Connected Layer | [ops.fc](./ops.py) -BatchNorm layer | [ops.batch_norm](./ops.py) -Max Pooling Layer | [ops.max_pool](./ops.py) -Avg Pooling Layer | [ops.avg_pool](./ops.py) -Dropout Layer | [ops.dropout](./ops.py) - -[ops.py](./ops.py) also includes operations that are not really "layers" per se, -but are often used to manipulate hidden unit representations during inference: - -Operation | TF-Slim Op ---------- | --------------------- -Flatten | [ops.flatten](./ops.py) - -TF-Slim also provides a meta-operation called `repeat_op` that allows one to -repeatedly perform the same operation. Consider the following snippet from the -[VGG](https://www.robots.ox.ac.uk/~vgg/research/very_deep/) network whose layers -perform several convolutions in a row between pooling layers: - -```python -net = ... -net = slim.ops.conv2d(net, 256, [3, 3], scope='conv3_1') -net = slim.ops.conv2d(net, 256, [3, 3], scope='conv3_2') -net = slim.ops.conv2d(net, 256, [3, 3], scope='conv3_3') -net = slim.ops.max_pool(net, [2, 2], scope='pool3') -``` - -This clear duplication of code can be removed via a standard loop: - -```python -net = ... -for i in range(3): - net = slim.ops.conv2d(net, 256, [3, 3], scope='conv3_' % (i+1)) -net = slim.ops.max_pool(net, [2, 2], scope='pool3') -``` - -While this does reduce the amount of duplication, it can be made even cleaner by -using the `RepeatOp`: - -```python -net = slim.ops.repeat_op(3, net, slim.ops.conv2d, 256, [3, 3], scope='conv3') -net = slim.ops.max_pool(net, [2, 2], scope='pool2') -``` - -Notice that the RepeatOp not only applies the same argument in-line, it also is -smart enough to unroll the scopes such that the scopes assigned to each -subsequent call of `ops.conv2d` is appended with an underscore and iteration -number. More concretely, the scopes in the example above would be 'conv3_1', -'conv3_2' and 'conv3_3'. - -### Scopes - -In addition to the types of scope mechanisms in TensorFlow ([name_scope](https://www.tensorflow.org/api_docs/python/framework.html#name_scope), -[variable_scope](https://www.tensorflow.org/api_docs/python/state_ops.html#variable_scope), -TF-Slim adds a new scoping mechanism called "argument scope" or [arg_scope](./scopes.py). This new scope allows a user to specify one or more operations and -a set of arguments which will be passed to each of the operations defined in the -`arg_scope`. This functionality is best illustrated by example. Consider the -following code snippet: - -```python -net = slim.ops.conv2d(inputs, 64, [11, 11], 4, padding='SAME', stddev=0.01, weight_decay=0.0005, scope='conv1') -net = slim.ops.conv2d(net, 128, [11, 11], padding='VALID', stddev=0.01, weight_decay=0.0005, scope='conv2') -net = slim.ops.conv2d(net, 256, [11, 11], padding='SAME', stddev=0.01, weight_decay=0.0005, scope='conv3') -``` - -It should be clear that these three Convolution layers share many of the same -hyperparameters. Two have the same padding, all three have the same weight_decay -and standard deviation of its weights. Not only do the duplicated values make -the code more difficult to read, it also adds the addition burder to the writer -of needing to doublecheck that all of the values are identical in each step. One -solution would be to specify default values using variables: - -```python -padding='SAME' -stddev=0.01 -weight_decay=0.0005 -net = slim.ops.conv2d(inputs, 64, [11, 11], 4, padding=padding, stddev=stddev, weight_decay=weight_decay, scope='conv1') -net = slim.ops.conv2d(net, 128, [11, 11], padding='VALID', stddev=stddev, weight_decay=weight_decay, scope='conv2') -net = slim.ops.conv2d(net, 256, [11, 11], padding=padding, stddev=stddev, weight_decay=weight_decay, scope='conv3') - -``` - -This solution ensures that all three convolutions share the exact same variable -values but doesn't reduce the code clutter. By using an `arg_scope`, we can both -ensure that each layer uses the same values and simplify the code: - -```python - with slim.arg_scope([slim.ops.conv2d], padding='SAME', stddev=0.01, weight_decay=0.0005): - net = slim.ops.conv2d(inputs, 64, [11, 11], scope='conv1') - net = slim.ops.conv2d(net, 128, [11, 11], padding='VALID', scope='conv2') - net = slim.ops.conv2d(net, 256, [11, 11], scope='conv3') -``` - -As the example illustrates, the use of arg_scope makes the code cleaner, simpler -and easier to maintain. Notice that while argument values are specifed in the -arg_scope, they can be overwritten locally. In particular, while the padding -argument has been set to 'SAME', the second convolution overrides it with the -value of 'VALID'. - -One can also nest `arg_scope`s and use multiple operations in the same scope. -For example: - -```python -with arg_scope([slim.ops.conv2d, slim.ops.fc], stddev=0.01, weight_decay=0.0005): - with arg_scope([slim.ops.conv2d], padding='SAME'), slim.arg_scope([slim.ops.fc], bias=1.0): - net = slim.ops.conv2d(inputs, 64, [11, 11], 4, padding='VALID', scope='conv1') - net = slim.ops.conv2d(net, 256, [5, 5], stddev=0.03, scope='conv2') - net = slim.ops.flatten(net) - net = slim.ops.fc(net, 1000, activation=None, scope='fc') -``` - -In this example, the first `arg_scope` applies the same `stddev` and -`weight_decay` arguments to the `conv2d` and `fc` ops in its scope. In the -second `arg_scope`, additional default arguments to `conv2d` only are specified. - -In addition to `arg_scope`, TF-Slim provides several decorators that wrap the -use of tensorflow arg scopes. These include `@AddArgScope`, `@AddNameScope`, -`@AddVariableScope`, `@AddOpScope` and `@AddVariableOpScope`. To illustrate -their use, consider the following example. - -```python -def MyNewOp(inputs): - varA = ... - varB = ... - outputs = tf.multiply(varA, inputs) + varB - return outputs - -``` - -In this example, the user has created a new op which creates two variables. To -ensure that these variables exist within a certain variable scope (to avoid -collisions with variables with the same name), in standard TF, the op must be -called within a variable scope: - -```python -inputs = ... -with tf.variable_scope('layer1'): - outputs = MyNewOp(inputs) -``` - -As an alternative, one can use TF-Slim's decorators to decorate the function and -simplify the call: - -```python -@AddVariableScope -def MyNewOp(inputs): - ... - return outputs - - -inputs = ... -outputs = MyNewOp('layer1') -``` - -The `@AddVariableScope` decorater simply applies the `tf.variable_scope` scoping -to the called function taking "layer1" as its argument. This allows the code to -be written more concisely. - -### Losses - -The loss function defines a quantity that we want to minimize. For -classification problems, this is typically the cross entropy between the true -(one-hot) distribution and the predicted probability distribution across -classes. For regression problems, this is often the sum-of-squares differences -between the predicted and true values. - -Certain models, such as multi-task learning models, require the use of multiple -loss functions simultaneously. In other words, the loss function ultimatey being -minimized is the sum of various other loss functions. For example, consider a -model that predicts both the type of scene in an image as well as the depth from -the camera of each pixel. This model's loss function would be the sum of the -classification loss and depth prediction loss. - -TF-Slim provides an easy-to-use mechanism for defining and keeping track of loss -functions via the [losses.py](./losses.py) module. Consider the simple case -where we want to train the VGG network: - -```python -# Load the images and labels. -images, labels = ... - -# Create the model. -predictions = ... - -# Define the loss functions and get the total loss. -loss = losses.cross_entropy_loss(predictions, labels) -``` - -In this example, we start by creating the model (using TF-Slim's VGG -implementation), and add the standard classification loss. Now, lets turn to the -case where we have a multi-task model that produces multiple outputs: - -```python -# Load the images and labels. -images, scene_labels, depth_labels = ... - -# Create the model. -scene_predictions, depth_predictions = CreateMultiTaskModel(images) - -# Define the loss functions and get the total loss. -classification_loss = slim.losses.cross_entropy_loss(scene_predictions, scene_labels) -sum_of_squares_loss = slim.losses.l2loss(depth_predictions - depth_labels) - -# The following two lines have the same effect: -total_loss1 = classification_loss + sum_of_squares_loss -total_loss2 = tf.get_collection(slim.losses.LOSSES_COLLECTION) -``` - -In this example, we have two losses which we add by calling -`losses.cross_entropy_loss` and `losses.l2loss`. We can obtain the -total loss by adding them together (`total_loss1`) or by calling -`losses.GetTotalLoss()`. How did this work? When you create a loss function via -TF-Slim, TF-Slim adds the loss to a special TensorFlow collection of loss -functions. This enables you to either manage the total loss manually, or allow -TF-Slim to manage them for you. - -What if you want to let TF-Slim manage the losses for you but have a custom loss -function? [losses.py](./losses.py) also has a function that adds this loss to -TF-Slims collection. For example: - -```python -# Load the images and labels. -images, scene_labels, depth_labels, pose_labels = ... - -# Create the model. -scene_predictions, depth_predictions, pose_predictions = CreateMultiTaskModel(images) - -# Define the loss functions and get the total loss. -classification_loss = slim.losses.cross_entropy_loss(scene_predictions, scene_labels) -sum_of_squares_loss = slim.losses.l2loss(depth_predictions - depth_labels) -pose_loss = MyCustomLossFunction(pose_predictions, pose_labels) -tf.add_to_collection(slim.losses.LOSSES_COLLECTION, pose_loss) # Letting TF-Slim know about the additional loss. - -# The following two lines have the same effect: -total_loss1 = classification_loss + sum_of_squares_loss + pose_loss -total_loss2 = losses.GetTotalLoss() -``` - -In this example, we can again either produce the total loss function manually or -let TF-Slim know about the additional loss and let TF-Slim handle the losses. - -## Putting the Pieces Together - -By combining TF-Slim Variables, Operations and scopes, we can write a normally -very complex network with very few lines of code. For example, the entire [VGG](https://www.robots.ox.ac.uk/~vgg/research/very_deep/) architecture can be -defined with just the following snippet: - -```python -with arg_scope([slim.ops.conv2d, slim.ops.fc], stddev=0.01, weight_decay=0.0005): - net = slim.ops.repeat_op(2, inputs, slim.ops.conv2d, 64, [3, 3], scope='conv1') - net = slim.ops.max_pool(net, [2, 2], scope='pool1') - net = slim.ops.repeat_op(2, net, slim.ops.conv2d, 128, [3, 3], scope='conv2') - net = slim.ops.max_pool(net, [2, 2], scope='pool2') - net = slim.ops.repeat_op(3, net, slim.ops.conv2d, 256, [3, 3], scope='conv3') - net = slim.ops.max_pool(net, [2, 2], scope='pool3') - net = slim.ops.repeat_op(3, net, slim.ops.conv2d, 512, [3, 3], scope='conv4') - net = slim.ops.max_pool(net, [2, 2], scope='pool4') - net = slim.ops.repeat_op(3, net, slim.ops.conv2d, 512, [3, 3], scope='conv5') - net = slim.ops.max_pool(net, [2, 2], scope='pool5') - net = slim.ops.flatten(net, scope='flatten5') - net = slim.ops.fc(net, 4096, scope='fc6') - net = slim.ops.dropout(net, 0.5, scope='dropout6') - net = slim.ops.fc(net, 4096, scope='fc7') - net = slim.ops.dropout(net, 0.5, scope='dropout7') - net = slim.ops.fc(net, 1000, activation=None, scope='fc8') -return net -``` - -## Re-using previously defined network architectures and pre-trained models. - -### Brief Recap on Restoring Variables from a Checkpoint - -After a model has been trained, it can be restored using `tf.train.Saver()` -which restores `Variables` from a given checkpoint. For many cases, -`tf.train.Saver()` provides a simple mechanism to restore all or just a few -variables. - -```python -# Create some variables. -v1 = tf.Variable(..., name="v1") -v2 = tf.Variable(..., name="v2") -... -# Add ops to restore all the variables. -restorer = tf.train.Saver() - -# Add ops to restore some variables. -restorer = tf.train.Saver([v1, v2]) - -# Later, launch the model, use the saver to restore variables from disk, and -# do some work with the model. -with tf.Session() as sess: - # Restore variables from disk. - restorer.restore(sess, "/tmp/model.ckpt") - print("Model restored.") - # Do some work with the model - ... -``` - -See [Restoring Variables](https://www.tensorflow.org/versions/r0.7/how_tos/variables/index.html#restoring-variables) -and [Choosing which Variables to Save and Restore](https://www.tensorflow.org/versions/r0.7/how_tos/variables/index.html#choosing-which-variables-to-save-and-restore) -sections of the [Variables](https://www.tensorflow.org/versions/r0.7/how_tos/variables/index.html) page for -more details. - -### Using slim.variables to Track which Variables need to be Restored - -It is often desirable to fine-tune a pre-trained model on an entirely new -dataset or even a new task. In these situations, one must specify which layers -of the model should be reused (and consequently loaded from a checkpoint) and -which layers are new. Indicating which variables or layers should be restored is -a process that quickly becomes cumbersome when done manually. - -To help keep track of which variables to restore, `slim.variables` provides a -`restore` argument when creating each Variable. By default, all variables are -marked as `restore=True`, which results in all variables defined by the model -being restored. - -```python -# Create some variables. -v1 = slim.variables.variable(name="v1", ..., restore=False) -v2 = slim.variables.variable(name="v2", ...) # By default restore=True -... -# Get list of variables to restore (which contains only 'v2') -variables_to_restore = tf.get_collection(slim.variables.VARIABLES_TO_RESTORE) -restorer = tf.train.Saver(variables_to_restore) -with tf.Session() as sess: - # Restore variables from disk. - restorer.restore(sess, "/tmp/model.ckpt") - print("Model restored.") - # Do some work with the model - ... -``` - -Additionally, every layer in `slim.ops` that creates slim.variables (such as -`slim.ops.conv2d`, `slim.ops.fc`, `slim.ops.batch_norm`) also has a `restore` -argument which controls whether the variables created by that layer should be -restored or not. - -```python -# Create a small network. -net = slim.ops.conv2d(images, 32, [7, 7], stride=2, scope='conv1') -net = slim.ops.conv2d(net, 64, [3, 3], scope='conv2') -net = slim.ops.conv2d(net, 128, [3, 3], scope='conv3') -net = slim.ops.max_pool(net, [3, 3], stride=2, scope='pool3') -net = slim.ops.flatten(net) -net = slim.ops.fc(net, 10, scope='logits', restore=False) -... - -# VARIABLES_TO_RESTORE would contain the 'weights' and 'bias' defined by 'conv1' -# 'conv2' and 'conv3' but not the ones defined by 'logits' -variables_to_restore = tf.get_collection(slim.variables.VARIABLES_TO_RESTORE) - -# Create a restorer that would restore only the needed variables. -restorer = tf.train.Saver(variables_to_restore) - -# Create a saver that would save all the variables (including 'logits'). -saver = tf.train.Saver() -with tf.Session() as sess: - # Restore variables from disk. - restorer.restore(sess, "/tmp/model.ckpt") - print("Model restored.") - - # Do some work with the model - ... - saver.save(sess, "/tmp/new_model.ckpt") -``` - -Note: When restoring variables from a checkpoint, the `Saver` locates the -variable names in a checkpoint file and maps them to variables in the current -graph. Above, we created a saver by passing to it a list of variables. In this -case, the names of the variables to locate in the checkpoint file were -implicitly obtained from each provided variable's `var.op.name`. - -This works well when the variable names in the checkpoint file match those in -the graph. However, sometimes, we want to restore a model from a checkpoint -whose variables have different names those in the current graph. In this case, -we must provide the `Saver` a dictionary that maps from each checkpoint variable -name to each graph variable. Consider the following example where the checkpoint -variables names are obtained via a simple function: - -```python -# Assuming that 'conv1/weights' should be restored from 'vgg16/conv1/weights' -def name_in_checkpoint(var): - return 'vgg16/' + var.op.name - -# Assuming that 'conv1/weights' and 'conv1/bias' should be restored from 'conv1/params1' and 'conv1/params2' -def name_in_checkpoint(var): - if "weights" in var.op.name: - return var.op.name.replace("weights", "params1") - if "bias" in var.op.name: - return var.op.name.replace("bias", "params2") - -variables_to_restore = tf.get_collection(slim.variables.VARIABLES_TO_RESTORE) -variables_to_restore = {name_in_checkpoint(var):var for var in variables_to_restore} -restorer = tf.train.Saver(variables_to_restore) -with tf.Session() as sess: - # Restore variables from disk. - restorer.restore(sess, "/tmp/model.ckpt") -``` - -### Reusing the VGG16 network defined in TF-Slim on a different task, i.e. PASCAL-VOC. - -Assuming one have already a pre-trained VGG16 model, one just need to replace -the last layer `fc8` with a new layer `fc8_pascal` and use `restore=False`. - -```python -def vgg16_pascal(inputs): - with slim.arg_scope([slim.ops.conv2d, slim.ops.fc], stddev=0.01, weight_decay=0.0005): - net = slim.ops.repeat_op(2, inputs, slim.ops.conv2d, 64, [3, 3], scope='conv1') - net = slim.ops.max_pool(net, [2, 2], scope='pool1') - net = slim.ops.repeat_op(2, net, slim.ops.conv2d, 128, [3, 3], scope='conv2') - net = slim.ops.max_pool(net, [2, 2], scope='pool2') - net = slim.ops.repeat_op(3, net, slim.ops.conv2d, 256, [3, 3], scope='conv3') - net = slim.ops.max_pool(net, [2, 2], scope='pool3') - net = slim.ops.repeat_op(3, net, slim.ops.conv2d, 512, [3, 3], scope='conv4') - net = slim.ops.max_pool(net, [2, 2], scope='pool4') - net = slim.ops.repeat_op(3, net, slim.ops.conv2d, 512, [3, 3], scope='conv5') - net = slim.ops.max_pool(net, [2, 2], scope='pool5') - net = slim.ops.flatten(net, scope='flatten5') - net = slim.ops.fc(net, 4096, scope='fc6') - net = slim.ops.dropout(net, 0.5, scope='dropout6') - net = slim.ops.fc(net, 4096, scope='fc7') - net = slim.ops.dropout(net, 0.5, scope='dropout7') - # To reuse vgg16 on PASCAL-VOC, just change the last layer. - net = slim.ops.fc(net, 21, activation=None, scope='fc8_pascal', restore=False) - return net -``` - -## Authors - -Sergio Guadarrama and Nathan Silberman diff --git a/research/inception/inception/slim/collections_test.py b/research/inception/inception/slim/collections_test.py deleted file mode 100644 index 2a1f170ed..000000000 --- a/research/inception/inception/slim/collections_test.py +++ /dev/null @@ -1,181 +0,0 @@ -# Copyright 2016 Google Inc. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""Tests for inception.""" -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import tensorflow as tf - -from inception.slim import slim - - -def get_variables(scope=None): - return slim.variables.get_variables(scope) - - -def get_variables_by_name(name): - return slim.variables.get_variables_by_name(name) - - -class CollectionsTest(tf.test.TestCase): - - def testVariables(self): - batch_size = 5 - height, width = 299, 299 - with self.test_session(): - inputs = tf.random_uniform((batch_size, height, width, 3)) - with slim.arg_scope([slim.ops.conv2d], - batch_norm_params={'decay': 0.9997}): - slim.inception.inception_v3(inputs) - self.assertEqual(len(get_variables()), 388) - self.assertEqual(len(get_variables_by_name('weights')), 98) - self.assertEqual(len(get_variables_by_name('biases')), 2) - self.assertEqual(len(get_variables_by_name('beta')), 96) - self.assertEqual(len(get_variables_by_name('gamma')), 0) - self.assertEqual(len(get_variables_by_name('moving_mean')), 96) - self.assertEqual(len(get_variables_by_name('moving_variance')), 96) - - def testVariablesWithoutBatchNorm(self): - batch_size = 5 - height, width = 299, 299 - with self.test_session(): - inputs = tf.random_uniform((batch_size, height, width, 3)) - with slim.arg_scope([slim.ops.conv2d], - batch_norm_params=None): - slim.inception.inception_v3(inputs) - self.assertEqual(len(get_variables()), 196) - self.assertEqual(len(get_variables_by_name('weights')), 98) - self.assertEqual(len(get_variables_by_name('biases')), 98) - self.assertEqual(len(get_variables_by_name('beta')), 0) - self.assertEqual(len(get_variables_by_name('gamma')), 0) - self.assertEqual(len(get_variables_by_name('moving_mean')), 0) - self.assertEqual(len(get_variables_by_name('moving_variance')), 0) - - def testVariablesByLayer(self): - batch_size = 5 - height, width = 299, 299 - with self.test_session(): - inputs = tf.random_uniform((batch_size, height, width, 3)) - with slim.arg_scope([slim.ops.conv2d], - batch_norm_params={'decay': 0.9997}): - slim.inception.inception_v3(inputs) - self.assertEqual(len(get_variables()), 388) - self.assertEqual(len(get_variables('conv0')), 4) - self.assertEqual(len(get_variables('conv1')), 4) - self.assertEqual(len(get_variables('conv2')), 4) - self.assertEqual(len(get_variables('conv3')), 4) - self.assertEqual(len(get_variables('conv4')), 4) - self.assertEqual(len(get_variables('mixed_35x35x256a')), 28) - self.assertEqual(len(get_variables('mixed_35x35x288a')), 28) - self.assertEqual(len(get_variables('mixed_35x35x288b')), 28) - self.assertEqual(len(get_variables('mixed_17x17x768a')), 16) - self.assertEqual(len(get_variables('mixed_17x17x768b')), 40) - self.assertEqual(len(get_variables('mixed_17x17x768c')), 40) - self.assertEqual(len(get_variables('mixed_17x17x768d')), 40) - self.assertEqual(len(get_variables('mixed_17x17x768e')), 40) - self.assertEqual(len(get_variables('mixed_8x8x2048a')), 36) - self.assertEqual(len(get_variables('mixed_8x8x2048b')), 36) - self.assertEqual(len(get_variables('logits')), 2) - self.assertEqual(len(get_variables('aux_logits')), 10) - - def testVariablesToRestore(self): - batch_size = 5 - height, width = 299, 299 - with self.test_session(): - inputs = tf.random_uniform((batch_size, height, width, 3)) - with slim.arg_scope([slim.ops.conv2d], - batch_norm_params={'decay': 0.9997}): - slim.inception.inception_v3(inputs) - variables_to_restore = tf.get_collection( - slim.variables.VARIABLES_TO_RESTORE) - self.assertEqual(len(variables_to_restore), 388) - self.assertListEqual(variables_to_restore, get_variables()) - - def testVariablesToRestoreWithoutLogits(self): - batch_size = 5 - height, width = 299, 299 - with self.test_session(): - inputs = tf.random_uniform((batch_size, height, width, 3)) - with slim.arg_scope([slim.ops.conv2d], - batch_norm_params={'decay': 0.9997}): - slim.inception.inception_v3(inputs, restore_logits=False) - variables_to_restore = tf.get_collection( - slim.variables.VARIABLES_TO_RESTORE) - self.assertEqual(len(variables_to_restore), 384) - - def testRegularizationLosses(self): - batch_size = 5 - height, width = 299, 299 - with self.test_session(): - inputs = tf.random_uniform((batch_size, height, width, 3)) - with slim.arg_scope([slim.ops.conv2d, slim.ops.fc], weight_decay=0.00004): - slim.inception.inception_v3(inputs) - losses = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES) - self.assertEqual(len(losses), len(get_variables_by_name('weights'))) - - def testTotalLossWithoutRegularization(self): - batch_size = 5 - height, width = 299, 299 - num_classes = 1001 - with self.test_session(): - inputs = tf.random_uniform((batch_size, height, width, 3)) - dense_labels = tf.random_uniform((batch_size, num_classes)) - with slim.arg_scope([slim.ops.conv2d, slim.ops.fc], weight_decay=0): - logits, end_points = slim.inception.inception_v3( - inputs, - num_classes=num_classes) - # Cross entropy loss for the main softmax prediction. - slim.losses.cross_entropy_loss(logits, - dense_labels, - label_smoothing=0.1, - weight=1.0) - # Cross entropy loss for the auxiliary softmax head. - slim.losses.cross_entropy_loss(end_points['aux_logits'], - dense_labels, - label_smoothing=0.1, - weight=0.4, - scope='aux_loss') - losses = tf.get_collection(slim.losses.LOSSES_COLLECTION) - self.assertEqual(len(losses), 2) - - def testTotalLossWithRegularization(self): - batch_size = 5 - height, width = 299, 299 - num_classes = 1000 - with self.test_session(): - inputs = tf.random_uniform((batch_size, height, width, 3)) - dense_labels = tf.random_uniform((batch_size, num_classes)) - with slim.arg_scope([slim.ops.conv2d, slim.ops.fc], weight_decay=0.00004): - logits, end_points = slim.inception.inception_v3(inputs, num_classes) - # Cross entropy loss for the main softmax prediction. - slim.losses.cross_entropy_loss(logits, - dense_labels, - label_smoothing=0.1, - weight=1.0) - # Cross entropy loss for the auxiliary softmax head. - slim.losses.cross_entropy_loss(end_points['aux_logits'], - dense_labels, - label_smoothing=0.1, - weight=0.4, - scope='aux_loss') - losses = tf.get_collection(slim.losses.LOSSES_COLLECTION) - self.assertEqual(len(losses), 2) - reg_losses = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES) - self.assertEqual(len(reg_losses), 98) - - -if __name__ == '__main__': - tf.test.main() diff --git a/research/inception/inception/slim/inception_model.py b/research/inception/inception/slim/inception_model.py deleted file mode 100644 index 6136ab1ba..000000000 --- a/research/inception/inception/slim/inception_model.py +++ /dev/null @@ -1,356 +0,0 @@ -# Copyright 2016 Google Inc. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""Inception-v3 expressed in TensorFlow-Slim. - - Usage: - - # Parameters for BatchNorm. - batch_norm_params = { - # Decay for the batch_norm moving averages. - 'decay': BATCHNORM_MOVING_AVERAGE_DECAY, - # epsilon to prevent 0s in variance. - 'epsilon': 0.001, - } - # Set weight_decay for weights in Conv and FC layers. - with slim.arg_scope([slim.ops.conv2d, slim.ops.fc], weight_decay=0.00004): - with slim.arg_scope([slim.ops.conv2d], - stddev=0.1, - activation=tf.nn.relu, - batch_norm_params=batch_norm_params): - # Force all Variables to reside on the CPU. - with slim.arg_scope([slim.variables.variable], device='/cpu:0'): - logits, endpoints = slim.inception.inception_v3( - images, - dropout_keep_prob=0.8, - num_classes=num_classes, - is_training=for_training, - restore_logits=restore_logits, - scope=scope) -""" -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import tensorflow as tf - -from inception.slim import ops -from inception.slim import scopes - - -def inception_v3(inputs, - dropout_keep_prob=0.8, - num_classes=1000, - is_training=True, - restore_logits=True, - scope=''): - """Latest Inception from http://arxiv.org/abs/1512.00567. - - "Rethinking the Inception Architecture for Computer Vision" - - Christian Szegedy, Vincent Vanhoucke, Sergey Ioffe, Jonathon Shlens, - Zbigniew Wojna - - Args: - inputs: a tensor of size [batch_size, height, width, channels]. - dropout_keep_prob: dropout keep_prob. - num_classes: number of predicted classes. - is_training: whether is training or not. - restore_logits: whether or not the logits layers should be restored. - Useful for fine-tuning a model with different num_classes. - scope: Optional scope for name_scope. - - Returns: - a list containing 'logits', 'aux_logits' Tensors. - """ - # end_points will collect relevant activations for external use, for example - # summaries or losses. - end_points = {} - with tf.name_scope(scope, 'inception_v3', [inputs]): - with scopes.arg_scope([ops.conv2d, ops.fc, ops.batch_norm, ops.dropout], - is_training=is_training): - with scopes.arg_scope([ops.conv2d, ops.max_pool, ops.avg_pool], - stride=1, padding='VALID'): - # 299 x 299 x 3 - end_points['conv0'] = ops.conv2d(inputs, 32, [3, 3], stride=2, - scope='conv0') - # 149 x 149 x 32 - end_points['conv1'] = ops.conv2d(end_points['conv0'], 32, [3, 3], - scope='conv1') - # 147 x 147 x 32 - end_points['conv2'] = ops.conv2d(end_points['conv1'], 64, [3, 3], - padding='SAME', scope='conv2') - # 147 x 147 x 64 - end_points['pool1'] = ops.max_pool(end_points['conv2'], [3, 3], - stride=2, scope='pool1') - # 73 x 73 x 64 - end_points['conv3'] = ops.conv2d(end_points['pool1'], 80, [1, 1], - scope='conv3') - # 73 x 73 x 80. - end_points['conv4'] = ops.conv2d(end_points['conv3'], 192, [3, 3], - scope='conv4') - # 71 x 71 x 192. - end_points['pool2'] = ops.max_pool(end_points['conv4'], [3, 3], - stride=2, scope='pool2') - # 35 x 35 x 192. - net = end_points['pool2'] - # Inception blocks - with scopes.arg_scope([ops.conv2d, ops.max_pool, ops.avg_pool], - stride=1, padding='SAME'): - # mixed: 35 x 35 x 256. - with tf.variable_scope('mixed_35x35x256a'): - with tf.variable_scope('branch1x1'): - branch1x1 = ops.conv2d(net, 64, [1, 1]) - with tf.variable_scope('branch5x5'): - branch5x5 = ops.conv2d(net, 48, [1, 1]) - branch5x5 = ops.conv2d(branch5x5, 64, [5, 5]) - with tf.variable_scope('branch3x3dbl'): - branch3x3dbl = ops.conv2d(net, 64, [1, 1]) - branch3x3dbl = ops.conv2d(branch3x3dbl, 96, [3, 3]) - branch3x3dbl = ops.conv2d(branch3x3dbl, 96, [3, 3]) - with tf.variable_scope('branch_pool'): - branch_pool = ops.avg_pool(net, [3, 3]) - branch_pool = ops.conv2d(branch_pool, 32, [1, 1]) - net = tf.concat(axis=3, values=[branch1x1, branch5x5, branch3x3dbl, branch_pool]) - end_points['mixed_35x35x256a'] = net - # mixed_1: 35 x 35 x 288. - with tf.variable_scope('mixed_35x35x288a'): - with tf.variable_scope('branch1x1'): - branch1x1 = ops.conv2d(net, 64, [1, 1]) - with tf.variable_scope('branch5x5'): - branch5x5 = ops.conv2d(net, 48, [1, 1]) - branch5x5 = ops.conv2d(branch5x5, 64, [5, 5]) - with tf.variable_scope('branch3x3dbl'): - branch3x3dbl = ops.conv2d(net, 64, [1, 1]) - branch3x3dbl = ops.conv2d(branch3x3dbl, 96, [3, 3]) - branch3x3dbl = ops.conv2d(branch3x3dbl, 96, [3, 3]) - with tf.variable_scope('branch_pool'): - branch_pool = ops.avg_pool(net, [3, 3]) - branch_pool = ops.conv2d(branch_pool, 64, [1, 1]) - net = tf.concat(axis=3, values=[branch1x1, branch5x5, branch3x3dbl, branch_pool]) - end_points['mixed_35x35x288a'] = net - # mixed_2: 35 x 35 x 288. - with tf.variable_scope('mixed_35x35x288b'): - with tf.variable_scope('branch1x1'): - branch1x1 = ops.conv2d(net, 64, [1, 1]) - with tf.variable_scope('branch5x5'): - branch5x5 = ops.conv2d(net, 48, [1, 1]) - branch5x5 = ops.conv2d(branch5x5, 64, [5, 5]) - with tf.variable_scope('branch3x3dbl'): - branch3x3dbl = ops.conv2d(net, 64, [1, 1]) - branch3x3dbl = ops.conv2d(branch3x3dbl, 96, [3, 3]) - branch3x3dbl = ops.conv2d(branch3x3dbl, 96, [3, 3]) - with tf.variable_scope('branch_pool'): - branch_pool = ops.avg_pool(net, [3, 3]) - branch_pool = ops.conv2d(branch_pool, 64, [1, 1]) - net = tf.concat(axis=3, values=[branch1x1, branch5x5, branch3x3dbl, branch_pool]) - end_points['mixed_35x35x288b'] = net - # mixed_3: 17 x 17 x 768. - with tf.variable_scope('mixed_17x17x768a'): - with tf.variable_scope('branch3x3'): - branch3x3 = ops.conv2d(net, 384, [3, 3], stride=2, padding='VALID') - with tf.variable_scope('branch3x3dbl'): - branch3x3dbl = ops.conv2d(net, 64, [1, 1]) - branch3x3dbl = ops.conv2d(branch3x3dbl, 96, [3, 3]) - branch3x3dbl = ops.conv2d(branch3x3dbl, 96, [3, 3], - stride=2, padding='VALID') - with tf.variable_scope('branch_pool'): - branch_pool = ops.max_pool(net, [3, 3], stride=2, padding='VALID') - net = tf.concat(axis=3, values=[branch3x3, branch3x3dbl, branch_pool]) - end_points['mixed_17x17x768a'] = net - # mixed4: 17 x 17 x 768. - with tf.variable_scope('mixed_17x17x768b'): - with tf.variable_scope('branch1x1'): - branch1x1 = ops.conv2d(net, 192, [1, 1]) - with tf.variable_scope('branch7x7'): - branch7x7 = ops.conv2d(net, 128, [1, 1]) - branch7x7 = ops.conv2d(branch7x7, 128, [1, 7]) - branch7x7 = ops.conv2d(branch7x7, 192, [7, 1]) - with tf.variable_scope('branch7x7dbl'): - branch7x7dbl = ops.conv2d(net, 128, [1, 1]) - branch7x7dbl = ops.conv2d(branch7x7dbl, 128, [7, 1]) - branch7x7dbl = ops.conv2d(branch7x7dbl, 128, [1, 7]) - branch7x7dbl = ops.conv2d(branch7x7dbl, 128, [7, 1]) - branch7x7dbl = ops.conv2d(branch7x7dbl, 192, [1, 7]) - with tf.variable_scope('branch_pool'): - branch_pool = ops.avg_pool(net, [3, 3]) - branch_pool = ops.conv2d(branch_pool, 192, [1, 1]) - net = tf.concat(axis=3, values=[branch1x1, branch7x7, branch7x7dbl, branch_pool]) - end_points['mixed_17x17x768b'] = net - # mixed_5: 17 x 17 x 768. - with tf.variable_scope('mixed_17x17x768c'): - with tf.variable_scope('branch1x1'): - branch1x1 = ops.conv2d(net, 192, [1, 1]) - with tf.variable_scope('branch7x7'): - branch7x7 = ops.conv2d(net, 160, [1, 1]) - branch7x7 = ops.conv2d(branch7x7, 160, [1, 7]) - branch7x7 = ops.conv2d(branch7x7, 192, [7, 1]) - with tf.variable_scope('branch7x7dbl'): - branch7x7dbl = ops.conv2d(net, 160, [1, 1]) - branch7x7dbl = ops.conv2d(branch7x7dbl, 160, [7, 1]) - branch7x7dbl = ops.conv2d(branch7x7dbl, 160, [1, 7]) - branch7x7dbl = ops.conv2d(branch7x7dbl, 160, [7, 1]) - branch7x7dbl = ops.conv2d(branch7x7dbl, 192, [1, 7]) - with tf.variable_scope('branch_pool'): - branch_pool = ops.avg_pool(net, [3, 3]) - branch_pool = ops.conv2d(branch_pool, 192, [1, 1]) - net = tf.concat(axis=3, values=[branch1x1, branch7x7, branch7x7dbl, branch_pool]) - end_points['mixed_17x17x768c'] = net - # mixed_6: 17 x 17 x 768. - with tf.variable_scope('mixed_17x17x768d'): - with tf.variable_scope('branch1x1'): - branch1x1 = ops.conv2d(net, 192, [1, 1]) - with tf.variable_scope('branch7x7'): - branch7x7 = ops.conv2d(net, 160, [1, 1]) - branch7x7 = ops.conv2d(branch7x7, 160, [1, 7]) - branch7x7 = ops.conv2d(branch7x7, 192, [7, 1]) - with tf.variable_scope('branch7x7dbl'): - branch7x7dbl = ops.conv2d(net, 160, [1, 1]) - branch7x7dbl = ops.conv2d(branch7x7dbl, 160, [7, 1]) - branch7x7dbl = ops.conv2d(branch7x7dbl, 160, [1, 7]) - branch7x7dbl = ops.conv2d(branch7x7dbl, 160, [7, 1]) - branch7x7dbl = ops.conv2d(branch7x7dbl, 192, [1, 7]) - with tf.variable_scope('branch_pool'): - branch_pool = ops.avg_pool(net, [3, 3]) - branch_pool = ops.conv2d(branch_pool, 192, [1, 1]) - net = tf.concat(axis=3, values=[branch1x1, branch7x7, branch7x7dbl, branch_pool]) - end_points['mixed_17x17x768d'] = net - # mixed_7: 17 x 17 x 768. - with tf.variable_scope('mixed_17x17x768e'): - with tf.variable_scope('branch1x1'): - branch1x1 = ops.conv2d(net, 192, [1, 1]) - with tf.variable_scope('branch7x7'): - branch7x7 = ops.conv2d(net, 192, [1, 1]) - branch7x7 = ops.conv2d(branch7x7, 192, [1, 7]) - branch7x7 = ops.conv2d(branch7x7, 192, [7, 1]) - with tf.variable_scope('branch7x7dbl'): - branch7x7dbl = ops.conv2d(net, 192, [1, 1]) - branch7x7dbl = ops.conv2d(branch7x7dbl, 192, [7, 1]) - branch7x7dbl = ops.conv2d(branch7x7dbl, 192, [1, 7]) - branch7x7dbl = ops.conv2d(branch7x7dbl, 192, [7, 1]) - branch7x7dbl = ops.conv2d(branch7x7dbl, 192, [1, 7]) - with tf.variable_scope('branch_pool'): - branch_pool = ops.avg_pool(net, [3, 3]) - branch_pool = ops.conv2d(branch_pool, 192, [1, 1]) - net = tf.concat(axis=3, values=[branch1x1, branch7x7, branch7x7dbl, branch_pool]) - end_points['mixed_17x17x768e'] = net - # Auxiliary Head logits - aux_logits = tf.identity(end_points['mixed_17x17x768e']) - with tf.variable_scope('aux_logits'): - aux_logits = ops.avg_pool(aux_logits, [5, 5], stride=3, - padding='VALID') - aux_logits = ops.conv2d(aux_logits, 128, [1, 1], scope='proj') - # Shape of feature map before the final layer. - shape = aux_logits.get_shape() - aux_logits = ops.conv2d(aux_logits, 768, shape[1:3], stddev=0.01, - padding='VALID') - aux_logits = ops.flatten(aux_logits) - aux_logits = ops.fc(aux_logits, num_classes, activation=None, - stddev=0.001, restore=restore_logits) - end_points['aux_logits'] = aux_logits - # mixed_8: 8 x 8 x 1280. - # Note that the scope below is not changed to not void previous - # checkpoints. - # (TODO) Fix the scope when appropriate. - with tf.variable_scope('mixed_17x17x1280a'): - with tf.variable_scope('branch3x3'): - branch3x3 = ops.conv2d(net, 192, [1, 1]) - branch3x3 = ops.conv2d(branch3x3, 320, [3, 3], stride=2, - padding='VALID') - with tf.variable_scope('branch7x7x3'): - branch7x7x3 = ops.conv2d(net, 192, [1, 1]) - branch7x7x3 = ops.conv2d(branch7x7x3, 192, [1, 7]) - branch7x7x3 = ops.conv2d(branch7x7x3, 192, [7, 1]) - branch7x7x3 = ops.conv2d(branch7x7x3, 192, [3, 3], - stride=2, padding='VALID') - with tf.variable_scope('branch_pool'): - branch_pool = ops.max_pool(net, [3, 3], stride=2, padding='VALID') - net = tf.concat(axis=3, values=[branch3x3, branch7x7x3, branch_pool]) - end_points['mixed_17x17x1280a'] = net - # mixed_9: 8 x 8 x 2048. - with tf.variable_scope('mixed_8x8x2048a'): - with tf.variable_scope('branch1x1'): - branch1x1 = ops.conv2d(net, 320, [1, 1]) - with tf.variable_scope('branch3x3'): - branch3x3 = ops.conv2d(net, 384, [1, 1]) - branch3x3 = tf.concat(axis=3, values=[ops.conv2d(branch3x3, 384, [1, 3]), - ops.conv2d(branch3x3, 384, [3, 1])]) - with tf.variable_scope('branch3x3dbl'): - branch3x3dbl = ops.conv2d(net, 448, [1, 1]) - branch3x3dbl = ops.conv2d(branch3x3dbl, 384, [3, 3]) - branch3x3dbl = tf.concat(axis=3, values=[ops.conv2d(branch3x3dbl, 384, [1, 3]), - ops.conv2d(branch3x3dbl, 384, [3, 1])]) - with tf.variable_scope('branch_pool'): - branch_pool = ops.avg_pool(net, [3, 3]) - branch_pool = ops.conv2d(branch_pool, 192, [1, 1]) - net = tf.concat(axis=3, values=[branch1x1, branch3x3, branch3x3dbl, branch_pool]) - end_points['mixed_8x8x2048a'] = net - # mixed_10: 8 x 8 x 2048. - with tf.variable_scope('mixed_8x8x2048b'): - with tf.variable_scope('branch1x1'): - branch1x1 = ops.conv2d(net, 320, [1, 1]) - with tf.variable_scope('branch3x3'): - branch3x3 = ops.conv2d(net, 384, [1, 1]) - branch3x3 = tf.concat(axis=3, values=[ops.conv2d(branch3x3, 384, [1, 3]), - ops.conv2d(branch3x3, 384, [3, 1])]) - with tf.variable_scope('branch3x3dbl'): - branch3x3dbl = ops.conv2d(net, 448, [1, 1]) - branch3x3dbl = ops.conv2d(branch3x3dbl, 384, [3, 3]) - branch3x3dbl = tf.concat(axis=3, values=[ops.conv2d(branch3x3dbl, 384, [1, 3]), - ops.conv2d(branch3x3dbl, 384, [3, 1])]) - with tf.variable_scope('branch_pool'): - branch_pool = ops.avg_pool(net, [3, 3]) - branch_pool = ops.conv2d(branch_pool, 192, [1, 1]) - net = tf.concat(axis=3, values=[branch1x1, branch3x3, branch3x3dbl, branch_pool]) - end_points['mixed_8x8x2048b'] = net - # Final pooling and prediction - with tf.variable_scope('logits'): - shape = net.get_shape() - net = ops.avg_pool(net, shape[1:3], padding='VALID', scope='pool') - # 1 x 1 x 2048 - net = ops.dropout(net, dropout_keep_prob, scope='dropout') - net = ops.flatten(net, scope='flatten') - # 2048 - logits = ops.fc(net, num_classes, activation=None, scope='logits', - restore=restore_logits) - # 1000 - end_points['logits'] = logits - end_points['predictions'] = tf.nn.softmax(logits, name='predictions') - return logits, end_points - - -def inception_v3_parameters(weight_decay=0.00004, stddev=0.1, - batch_norm_decay=0.9997, batch_norm_epsilon=0.001): - """Yields the scope with the default parameters for inception_v3. - - Args: - weight_decay: the weight decay for weights variables. - stddev: standard deviation of the truncated guassian weight distribution. - batch_norm_decay: decay for the moving average of batch_norm momentums. - batch_norm_epsilon: small float added to variance to avoid dividing by zero. - - Yields: - a arg_scope with the parameters needed for inception_v3. - """ - # Set weight_decay for weights in Conv and FC layers. - with scopes.arg_scope([ops.conv2d, ops.fc], - weight_decay=weight_decay): - # Set stddev, activation and parameters for batch_norm. - with scopes.arg_scope([ops.conv2d], - stddev=stddev, - activation=tf.nn.relu, - batch_norm_params={ - 'decay': batch_norm_decay, - 'epsilon': batch_norm_epsilon}) as arg_scope: - yield arg_scope diff --git a/research/inception/inception/slim/inception_test.py b/research/inception/inception/slim/inception_test.py deleted file mode 100644 index 231dea298..000000000 --- a/research/inception/inception/slim/inception_test.py +++ /dev/null @@ -1,134 +0,0 @@ -# Copyright 2016 Google Inc. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""Tests for slim.inception.""" -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import tensorflow as tf - -from inception.slim import inception_model as inception - - -class InceptionTest(tf.test.TestCase): - - def testBuildLogits(self): - batch_size = 5 - height, width = 299, 299 - num_classes = 1000 - with self.test_session(): - inputs = tf.random_uniform((batch_size, height, width, 3)) - logits, _ = inception.inception_v3(inputs, num_classes) - self.assertTrue(logits.op.name.startswith('logits')) - self.assertListEqual(logits.get_shape().as_list(), - [batch_size, num_classes]) - - def testBuildEndPoints(self): - batch_size = 5 - height, width = 299, 299 - num_classes = 1000 - with self.test_session(): - inputs = tf.random_uniform((batch_size, height, width, 3)) - _, end_points = inception.inception_v3(inputs, num_classes) - self.assertTrue('logits' in end_points) - logits = end_points['logits'] - self.assertListEqual(logits.get_shape().as_list(), - [batch_size, num_classes]) - self.assertTrue('aux_logits' in end_points) - aux_logits = end_points['aux_logits'] - self.assertListEqual(aux_logits.get_shape().as_list(), - [batch_size, num_classes]) - pre_pool = end_points['mixed_8x8x2048b'] - self.assertListEqual(pre_pool.get_shape().as_list(), - [batch_size, 8, 8, 2048]) - - def testVariablesSetDevice(self): - batch_size = 5 - height, width = 299, 299 - num_classes = 1000 - with self.test_session(): - inputs = tf.random_uniform((batch_size, height, width, 3)) - # Force all Variables to reside on the device. - with tf.variable_scope('on_cpu'), tf.device('/cpu:0'): - inception.inception_v3(inputs, num_classes) - with tf.variable_scope('on_gpu'), tf.device('/gpu:0'): - inception.inception_v3(inputs, num_classes) - for v in tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='on_cpu'): - self.assertDeviceEqual(v.device, '/cpu:0') - for v in tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='on_gpu'): - self.assertDeviceEqual(v.device, '/gpu:0') - - def testHalfSizeImages(self): - batch_size = 5 - height, width = 150, 150 - num_classes = 1000 - with self.test_session(): - inputs = tf.random_uniform((batch_size, height, width, 3)) - logits, end_points = inception.inception_v3(inputs, num_classes) - self.assertTrue(logits.op.name.startswith('logits')) - self.assertListEqual(logits.get_shape().as_list(), - [batch_size, num_classes]) - pre_pool = end_points['mixed_8x8x2048b'] - self.assertListEqual(pre_pool.get_shape().as_list(), - [batch_size, 3, 3, 2048]) - - def testUnknowBatchSize(self): - batch_size = 1 - height, width = 299, 299 - num_classes = 1000 - with self.test_session() as sess: - inputs = tf.placeholder(tf.float32, (None, height, width, 3)) - logits, _ = inception.inception_v3(inputs, num_classes) - self.assertTrue(logits.op.name.startswith('logits')) - self.assertListEqual(logits.get_shape().as_list(), - [None, num_classes]) - images = tf.random_uniform((batch_size, height, width, 3)) - sess.run(tf.global_variables_initializer()) - output = sess.run(logits, {inputs: images.eval()}) - self.assertEquals(output.shape, (batch_size, num_classes)) - - def testEvaluation(self): - batch_size = 2 - height, width = 299, 299 - num_classes = 1000 - with self.test_session() as sess: - eval_inputs = tf.random_uniform((batch_size, height, width, 3)) - logits, _ = inception.inception_v3(eval_inputs, num_classes, - is_training=False) - predictions = tf.argmax(logits, 1) - sess.run(tf.global_variables_initializer()) - output = sess.run(predictions) - self.assertEquals(output.shape, (batch_size,)) - - def testTrainEvalWithReuse(self): - train_batch_size = 5 - eval_batch_size = 2 - height, width = 150, 150 - num_classes = 1000 - with self.test_session() as sess: - train_inputs = tf.random_uniform((train_batch_size, height, width, 3)) - inception.inception_v3(train_inputs, num_classes) - tf.get_variable_scope().reuse_variables() - eval_inputs = tf.random_uniform((eval_batch_size, height, width, 3)) - logits, _ = inception.inception_v3(eval_inputs, num_classes, - is_training=False) - predictions = tf.argmax(logits, 1) - sess.run(tf.global_variables_initializer()) - output = sess.run(predictions) - self.assertEquals(output.shape, (eval_batch_size,)) - - -if __name__ == '__main__': - tf.test.main() diff --git a/research/inception/inception/slim/losses.py b/research/inception/inception/slim/losses.py deleted file mode 100644 index 78298d092..000000000 --- a/research/inception/inception/slim/losses.py +++ /dev/null @@ -1,174 +0,0 @@ -# Copyright 2016 Google Inc. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""Contains convenience wrappers for various Neural Network TensorFlow losses. - - All the losses defined here add themselves to the LOSSES_COLLECTION - collection. - - l1_loss: Define a L1 Loss, useful for regularization, i.e. lasso. - l2_loss: Define a L2 Loss, useful for regularization, i.e. weight decay. - cross_entropy_loss: Define a cross entropy loss using - softmax_cross_entropy_with_logits. Useful for classification. -""" -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import tensorflow as tf - -# In order to gather all losses in a network, the user should use this -# key for get_collection, i.e: -# losses = tf.get_collection(slim.losses.LOSSES_COLLECTION) -LOSSES_COLLECTION = '_losses' - - -def l1_regularizer(weight=1.0, scope=None): - """Define a L1 regularizer. - - Args: - weight: scale the loss by this factor. - scope: Optional scope for name_scope. - - Returns: - a regularizer function. - """ - def regularizer(tensor): - with tf.name_scope(scope, 'L1Regularizer', [tensor]): - l1_weight = tf.convert_to_tensor(weight, - dtype=tensor.dtype.base_dtype, - name='weight') - return tf.multiply(l1_weight, tf.reduce_sum(tf.abs(tensor)), name='value') - return regularizer - - -def l2_regularizer(weight=1.0, scope=None): - """Define a L2 regularizer. - - Args: - weight: scale the loss by this factor. - scope: Optional scope for name_scope. - - Returns: - a regularizer function. - """ - def regularizer(tensor): - with tf.name_scope(scope, 'L2Regularizer', [tensor]): - l2_weight = tf.convert_to_tensor(weight, - dtype=tensor.dtype.base_dtype, - name='weight') - return tf.multiply(l2_weight, tf.nn.l2_loss(tensor), name='value') - return regularizer - - -def l1_l2_regularizer(weight_l1=1.0, weight_l2=1.0, scope=None): - """Define a L1L2 regularizer. - - Args: - weight_l1: scale the L1 loss by this factor. - weight_l2: scale the L2 loss by this factor. - scope: Optional scope for name_scope. - - Returns: - a regularizer function. - """ - def regularizer(tensor): - with tf.name_scope(scope, 'L1L2Regularizer', [tensor]): - weight_l1_t = tf.convert_to_tensor(weight_l1, - dtype=tensor.dtype.base_dtype, - name='weight_l1') - weight_l2_t = tf.convert_to_tensor(weight_l2, - dtype=tensor.dtype.base_dtype, - name='weight_l2') - reg_l1 = tf.multiply(weight_l1_t, tf.reduce_sum(tf.abs(tensor)), - name='value_l1') - reg_l2 = tf.multiply(weight_l2_t, tf.nn.l2_loss(tensor), - name='value_l2') - return tf.add(reg_l1, reg_l2, name='value') - return regularizer - - -def l1_loss(tensor, weight=1.0, scope=None): - """Define a L1Loss, useful for regularize, i.e. lasso. - - Args: - tensor: tensor to regularize. - weight: scale the loss by this factor. - scope: Optional scope for name_scope. - - Returns: - the L1 loss op. - """ - with tf.name_scope(scope, 'L1Loss', [tensor]): - weight = tf.convert_to_tensor(weight, - dtype=tensor.dtype.base_dtype, - name='loss_weight') - loss = tf.multiply(weight, tf.reduce_sum(tf.abs(tensor)), name='value') - tf.add_to_collection(LOSSES_COLLECTION, loss) - return loss - - -def l2_loss(tensor, weight=1.0, scope=None): - """Define a L2Loss, useful for regularize, i.e. weight decay. - - Args: - tensor: tensor to regularize. - weight: an optional weight to modulate the loss. - scope: Optional scope for name_scope. - - Returns: - the L2 loss op. - """ - with tf.name_scope(scope, 'L2Loss', [tensor]): - weight = tf.convert_to_tensor(weight, - dtype=tensor.dtype.base_dtype, - name='loss_weight') - loss = tf.multiply(weight, tf.nn.l2_loss(tensor), name='value') - tf.add_to_collection(LOSSES_COLLECTION, loss) - return loss - - -def cross_entropy_loss(logits, one_hot_labels, label_smoothing=0, - weight=1.0, scope=None): - """Define a Cross Entropy loss using softmax_cross_entropy_with_logits. - - It can scale the loss by weight factor, and smooth the labels. - - Args: - logits: [batch_size, num_classes] logits outputs of the network . - one_hot_labels: [batch_size, num_classes] target one_hot_encoded labels. - label_smoothing: if greater than 0 then smooth the labels. - weight: scale the loss by this factor. - scope: Optional scope for name_scope. - - Returns: - A tensor with the softmax_cross_entropy loss. - """ - logits.get_shape().assert_is_compatible_with(one_hot_labels.get_shape()) - with tf.name_scope(scope, 'CrossEntropyLoss', [logits, one_hot_labels]): - num_classes = one_hot_labels.get_shape()[-1].value - one_hot_labels = tf.cast(one_hot_labels, logits.dtype) - if label_smoothing > 0: - smooth_positives = 1.0 - label_smoothing - smooth_negatives = label_smoothing / num_classes - one_hot_labels = one_hot_labels * smooth_positives + smooth_negatives - cross_entropy = tf.contrib.nn.deprecated_flipped_softmax_cross_entropy_with_logits( - logits, one_hot_labels, name='xentropy') - - weight = tf.convert_to_tensor(weight, - dtype=logits.dtype.base_dtype, - name='loss_weight') - loss = tf.multiply(weight, tf.reduce_mean(cross_entropy), name='value') - tf.add_to_collection(LOSSES_COLLECTION, loss) - return loss diff --git a/research/inception/inception/slim/losses_test.py b/research/inception/inception/slim/losses_test.py deleted file mode 100644 index e267f6520..000000000 --- a/research/inception/inception/slim/losses_test.py +++ /dev/null @@ -1,177 +0,0 @@ -# Copyright 2016 Google Inc. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""Tests for slim.losses.""" -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - - -import tensorflow as tf - -from inception.slim import losses - - -class LossesTest(tf.test.TestCase): - - def testL1Loss(self): - with self.test_session(): - shape = [5, 5, 5] - num_elem = 5 * 5 * 5 - weights = tf.constant(1.0, shape=shape) - wd = 0.01 - loss = losses.l1_loss(weights, wd) - self.assertEquals(loss.op.name, 'L1Loss/value') - self.assertAlmostEqual(loss.eval(), num_elem * wd, 5) - - def testL2Loss(self): - with self.test_session(): - shape = [5, 5, 5] - num_elem = 5 * 5 * 5 - weights = tf.constant(1.0, shape=shape) - wd = 0.01 - loss = losses.l2_loss(weights, wd) - self.assertEquals(loss.op.name, 'L2Loss/value') - self.assertAlmostEqual(loss.eval(), num_elem * wd / 2, 5) - - -class RegularizersTest(tf.test.TestCase): - - def testL1Regularizer(self): - with self.test_session(): - shape = [5, 5, 5] - num_elem = 5 * 5 * 5 - tensor = tf.constant(1.0, shape=shape) - loss = losses.l1_regularizer()(tensor) - self.assertEquals(loss.op.name, 'L1Regularizer/value') - self.assertAlmostEqual(loss.eval(), num_elem, 5) - - def testL1RegularizerWithScope(self): - with self.test_session(): - shape = [5, 5, 5] - num_elem = 5 * 5 * 5 - tensor = tf.constant(1.0, shape=shape) - loss = losses.l1_regularizer(scope='L1')(tensor) - self.assertEquals(loss.op.name, 'L1/value') - self.assertAlmostEqual(loss.eval(), num_elem, 5) - - def testL1RegularizerWithWeight(self): - with self.test_session(): - shape = [5, 5, 5] - num_elem = 5 * 5 * 5 - tensor = tf.constant(1.0, shape=shape) - weight = 0.01 - loss = losses.l1_regularizer(weight)(tensor) - self.assertEquals(loss.op.name, 'L1Regularizer/value') - self.assertAlmostEqual(loss.eval(), num_elem * weight, 5) - - def testL2Regularizer(self): - with self.test_session(): - shape = [5, 5, 5] - num_elem = 5 * 5 * 5 - tensor = tf.constant(1.0, shape=shape) - loss = losses.l2_regularizer()(tensor) - self.assertEquals(loss.op.name, 'L2Regularizer/value') - self.assertAlmostEqual(loss.eval(), num_elem / 2, 5) - - def testL2RegularizerWithScope(self): - with self.test_session(): - shape = [5, 5, 5] - num_elem = 5 * 5 * 5 - tensor = tf.constant(1.0, shape=shape) - loss = losses.l2_regularizer(scope='L2')(tensor) - self.assertEquals(loss.op.name, 'L2/value') - self.assertAlmostEqual(loss.eval(), num_elem / 2, 5) - - def testL2RegularizerWithWeight(self): - with self.test_session(): - shape = [5, 5, 5] - num_elem = 5 * 5 * 5 - tensor = tf.constant(1.0, shape=shape) - weight = 0.01 - loss = losses.l2_regularizer(weight)(tensor) - self.assertEquals(loss.op.name, 'L2Regularizer/value') - self.assertAlmostEqual(loss.eval(), num_elem * weight / 2, 5) - - def testL1L2Regularizer(self): - with self.test_session(): - shape = [5, 5, 5] - num_elem = 5 * 5 * 5 - tensor = tf.constant(1.0, shape=shape) - loss = losses.l1_l2_regularizer()(tensor) - self.assertEquals(loss.op.name, 'L1L2Regularizer/value') - self.assertAlmostEqual(loss.eval(), num_elem + num_elem / 2, 5) - - def testL1L2RegularizerWithScope(self): - with self.test_session(): - shape = [5, 5, 5] - num_elem = 5 * 5 * 5 - tensor = tf.constant(1.0, shape=shape) - loss = losses.l1_l2_regularizer(scope='L1L2')(tensor) - self.assertEquals(loss.op.name, 'L1L2/value') - self.assertAlmostEqual(loss.eval(), num_elem + num_elem / 2, 5) - - def testL1L2RegularizerWithWeights(self): - with self.test_session(): - shape = [5, 5, 5] - num_elem = 5 * 5 * 5 - tensor = tf.constant(1.0, shape=shape) - weight_l1 = 0.01 - weight_l2 = 0.05 - loss = losses.l1_l2_regularizer(weight_l1, weight_l2)(tensor) - self.assertEquals(loss.op.name, 'L1L2Regularizer/value') - self.assertAlmostEqual(loss.eval(), - num_elem * weight_l1 + num_elem * weight_l2 / 2, 5) - - -class CrossEntropyLossTest(tf.test.TestCase): - - def testCrossEntropyLossAllCorrect(self): - with self.test_session(): - logits = tf.constant([[10.0, 0.0, 0.0], - [0.0, 10.0, 0.0], - [0.0, 0.0, 10.0]]) - labels = tf.constant([[1, 0, 0], - [0, 1, 0], - [0, 0, 1]]) - loss = losses.cross_entropy_loss(logits, labels) - self.assertEquals(loss.op.name, 'CrossEntropyLoss/value') - self.assertAlmostEqual(loss.eval(), 0.0, 3) - - def testCrossEntropyLossAllWrong(self): - with self.test_session(): - logits = tf.constant([[10.0, 0.0, 0.0], - [0.0, 10.0, 0.0], - [0.0, 0.0, 10.0]]) - labels = tf.constant([[0, 0, 1], - [1, 0, 0], - [0, 1, 0]]) - loss = losses.cross_entropy_loss(logits, labels) - self.assertEquals(loss.op.name, 'CrossEntropyLoss/value') - self.assertAlmostEqual(loss.eval(), 10.0, 3) - - def testCrossEntropyLossAllWrongWithWeight(self): - with self.test_session(): - logits = tf.constant([[10.0, 0.0, 0.0], - [0.0, 10.0, 0.0], - [0.0, 0.0, 10.0]]) - labels = tf.constant([[0, 0, 1], - [1, 0, 0], - [0, 1, 0]]) - loss = losses.cross_entropy_loss(logits, labels, weight=0.5) - self.assertEquals(loss.op.name, 'CrossEntropyLoss/value') - self.assertAlmostEqual(loss.eval(), 5.0, 3) - -if __name__ == '__main__': - tf.test.main() diff --git a/research/inception/inception/slim/ops.py b/research/inception/inception/slim/ops.py deleted file mode 100644 index 54fda4eb8..000000000 --- a/research/inception/inception/slim/ops.py +++ /dev/null @@ -1,473 +0,0 @@ -# Copyright 2016 Google Inc. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""Contains convenience wrappers for typical Neural Network TensorFlow layers. - - Additionally it maintains a collection with update_ops that need to be - updated after the ops have been computed, for example to update moving means - and moving variances of batch_norm. - - Ops that have different behavior during training or eval have an is_training - parameter. Additionally Ops that contain variables.variable have a trainable - parameter, which control if the ops variables are trainable or not. -""" -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - - -import tensorflow as tf - -from tensorflow.python.training import moving_averages - -from inception.slim import losses -from inception.slim import scopes -from inception.slim import variables - -# Used to keep the update ops done by batch_norm. -UPDATE_OPS_COLLECTION = '_update_ops_' - - -@scopes.add_arg_scope -def batch_norm(inputs, - decay=0.999, - center=True, - scale=False, - epsilon=0.001, - moving_vars='moving_vars', - activation=None, - is_training=True, - trainable=True, - restore=True, - scope=None, - reuse=None): - """Adds a Batch Normalization layer. - - Args: - inputs: a tensor of size [batch_size, height, width, channels] - or [batch_size, channels]. - decay: decay for the moving average. - center: If True, subtract beta. If False, beta is not created and ignored. - scale: If True, multiply by gamma. If False, gamma is - not used. When the next layer is linear (also e.g. ReLU), this can be - disabled since the scaling can be done by the next layer. - epsilon: small float added to variance to avoid dividing by zero. - moving_vars: collection to store the moving_mean and moving_variance. - activation: activation function. - is_training: whether or not the model is in training mode. - trainable: whether or not the variables should be trainable or not. - restore: whether or not the variables should be marked for restore. - scope: Optional scope for variable_scope. - reuse: whether or not the layer and its variables should be reused. To be - able to reuse the layer scope must be given. - - Returns: - a tensor representing the output of the operation. - - """ - inputs_shape = inputs.get_shape() - with tf.variable_scope(scope, 'BatchNorm', [inputs], reuse=reuse): - axis = list(range(len(inputs_shape) - 1)) - params_shape = inputs_shape[-1:] - # Allocate parameters for the beta and gamma of the normalization. - beta, gamma = None, None - if center: - beta = variables.variable('beta', - params_shape, - initializer=tf.zeros_initializer(), - trainable=trainable, - restore=restore) - if scale: - gamma = variables.variable('gamma', - params_shape, - initializer=tf.ones_initializer(), - trainable=trainable, - restore=restore) - # Create moving_mean and moving_variance add them to - # GraphKeys.MOVING_AVERAGE_VARIABLES collections. - moving_collections = [moving_vars, tf.GraphKeys.MOVING_AVERAGE_VARIABLES] - moving_mean = variables.variable('moving_mean', - params_shape, - initializer=tf.zeros_initializer(), - trainable=False, - restore=restore, - collections=moving_collections) - moving_variance = variables.variable('moving_variance', - params_shape, - initializer=tf.ones_initializer(), - trainable=False, - restore=restore, - collections=moving_collections) - if is_training: - # Calculate the moments based on the individual batch. - mean, variance = tf.nn.moments(inputs, axis) - - update_moving_mean = moving_averages.assign_moving_average( - moving_mean, mean, decay) - tf.add_to_collection(UPDATE_OPS_COLLECTION, update_moving_mean) - update_moving_variance = moving_averages.assign_moving_average( - moving_variance, variance, decay) - tf.add_to_collection(UPDATE_OPS_COLLECTION, update_moving_variance) - else: - # Just use the moving_mean and moving_variance. - mean = moving_mean - variance = moving_variance - # Normalize the activations. - outputs = tf.nn.batch_normalization( - inputs, mean, variance, beta, gamma, epsilon) - outputs.set_shape(inputs.get_shape()) - if activation: - outputs = activation(outputs) - return outputs - - -def _two_element_tuple(int_or_tuple): - """Converts `int_or_tuple` to height, width. - - Several of the functions that follow accept arguments as either - a tuple of 2 integers or a single integer. A single integer - indicates that the 2 values of the tuple are the same. - - This functions normalizes the input value by always returning a tuple. - - Args: - int_or_tuple: A list of 2 ints, a single int or a tf.TensorShape. - - Returns: - A tuple with 2 values. - - Raises: - ValueError: If `int_or_tuple` it not well formed. - """ - if isinstance(int_or_tuple, (list, tuple)): - if len(int_or_tuple) != 2: - raise ValueError('Must be a list with 2 elements: %s' % int_or_tuple) - return int(int_or_tuple[0]), int(int_or_tuple[1]) - if isinstance(int_or_tuple, int): - return int(int_or_tuple), int(int_or_tuple) - if isinstance(int_or_tuple, tf.TensorShape): - if len(int_or_tuple) == 2: - return int_or_tuple[0], int_or_tuple[1] - raise ValueError('Must be an int, a list with 2 elements or a TensorShape of ' - 'length 2') - - -@scopes.add_arg_scope -def conv2d(inputs, - num_filters_out, - kernel_size, - stride=1, - padding='SAME', - activation=tf.nn.relu, - stddev=0.01, - bias=0.0, - weight_decay=0, - batch_norm_params=None, - is_training=True, - trainable=True, - restore=True, - scope=None, - reuse=None): - """Adds a 2D convolution followed by an optional batch_norm layer. - - conv2d creates a variable called 'weights', representing the convolutional - kernel, that is convolved with the input. If `batch_norm_params` is None, a - second variable called 'biases' is added to the result of the convolution - operation. - - Args: - inputs: a tensor of size [batch_size, height, width, channels]. - num_filters_out: the number of output filters. - kernel_size: a list of length 2: [kernel_height, kernel_width] of - of the filters. Can be an int if both values are the same. - stride: a list of length 2: [stride_height, stride_width]. - Can be an int if both strides are the same. Note that presently - both strides must have the same value. - padding: one of 'VALID' or 'SAME'. - activation: activation function. - stddev: standard deviation of the truncated guassian weight distribution. - bias: the initial value of the biases. - weight_decay: the weight decay. - batch_norm_params: parameters for the batch_norm. If is None don't use it. - is_training: whether or not the model is in training mode. - trainable: whether or not the variables should be trainable or not. - restore: whether or not the variables should be marked for restore. - scope: Optional scope for variable_scope. - reuse: whether or not the layer and its variables should be reused. To be - able to reuse the layer scope must be given. - Returns: - a tensor representing the output of the operation. - - """ - with tf.variable_scope(scope, 'Conv', [inputs], reuse=reuse): - kernel_h, kernel_w = _two_element_tuple(kernel_size) - stride_h, stride_w = _two_element_tuple(stride) - num_filters_in = inputs.get_shape()[-1] - weights_shape = [kernel_h, kernel_w, - num_filters_in, num_filters_out] - weights_initializer = tf.truncated_normal_initializer(stddev=stddev) - l2_regularizer = None - if weight_decay and weight_decay > 0: - l2_regularizer = losses.l2_regularizer(weight_decay) - weights = variables.variable('weights', - shape=weights_shape, - initializer=weights_initializer, - regularizer=l2_regularizer, - trainable=trainable, - restore=restore) - conv = tf.nn.conv2d(inputs, weights, [1, stride_h, stride_w, 1], - padding=padding) - if batch_norm_params is not None: - with scopes.arg_scope([batch_norm], is_training=is_training, - trainable=trainable, restore=restore): - outputs = batch_norm(conv, **batch_norm_params) - else: - bias_shape = [num_filters_out,] - bias_initializer = tf.constant_initializer(bias) - biases = variables.variable('biases', - shape=bias_shape, - initializer=bias_initializer, - trainable=trainable, - restore=restore) - outputs = tf.nn.bias_add(conv, biases) - if activation: - outputs = activation(outputs) - return outputs - - -@scopes.add_arg_scope -def fc(inputs, - num_units_out, - activation=tf.nn.relu, - stddev=0.01, - bias=0.0, - weight_decay=0, - batch_norm_params=None, - is_training=True, - trainable=True, - restore=True, - scope=None, - reuse=None): - """Adds a fully connected layer followed by an optional batch_norm layer. - - FC creates a variable called 'weights', representing the fully connected - weight matrix, that is multiplied by the input. If `batch_norm` is None, a - second variable called 'biases' is added to the result of the initial - vector-matrix multiplication. - - Args: - inputs: a [B x N] tensor where B is the batch size and N is the number of - input units in the layer. - num_units_out: the number of output units in the layer. - activation: activation function. - stddev: the standard deviation for the weights. - bias: the initial value of the biases. - weight_decay: the weight decay. - batch_norm_params: parameters for the batch_norm. If is None don't use it. - is_training: whether or not the model is in training mode. - trainable: whether or not the variables should be trainable or not. - restore: whether or not the variables should be marked for restore. - scope: Optional scope for variable_scope. - reuse: whether or not the layer and its variables should be reused. To be - able to reuse the layer scope must be given. - - Returns: - the tensor variable representing the result of the series of operations. - """ - with tf.variable_scope(scope, 'FC', [inputs], reuse=reuse): - num_units_in = inputs.get_shape()[1] - weights_shape = [num_units_in, num_units_out] - weights_initializer = tf.truncated_normal_initializer(stddev=stddev) - l2_regularizer = None - if weight_decay and weight_decay > 0: - l2_regularizer = losses.l2_regularizer(weight_decay) - weights = variables.variable('weights', - shape=weights_shape, - initializer=weights_initializer, - regularizer=l2_regularizer, - trainable=trainable, - restore=restore) - if batch_norm_params is not None: - outputs = tf.matmul(inputs, weights) - with scopes.arg_scope([batch_norm], is_training=is_training, - trainable=trainable, restore=restore): - outputs = batch_norm(outputs, **batch_norm_params) - else: - bias_shape = [num_units_out,] - bias_initializer = tf.constant_initializer(bias) - biases = variables.variable('biases', - shape=bias_shape, - initializer=bias_initializer, - trainable=trainable, - restore=restore) - outputs = tf.nn.xw_plus_b(inputs, weights, biases) - if activation: - outputs = activation(outputs) - return outputs - - -def one_hot_encoding(labels, num_classes, scope=None): - """Transform numeric labels into onehot_labels. - - Args: - labels: [batch_size] target labels. - num_classes: total number of classes. - scope: Optional scope for name_scope. - Returns: - one hot encoding of the labels. - """ - with tf.name_scope(scope, 'OneHotEncoding', [labels]): - batch_size = labels.get_shape()[0] - indices = tf.expand_dims(tf.range(0, batch_size), 1) - labels = tf.cast(tf.expand_dims(labels, 1), indices.dtype) - concated = tf.concat(axis=1, values=[indices, labels]) - onehot_labels = tf.sparse_to_dense( - concated, tf.stack([batch_size, num_classes]), 1.0, 0.0) - onehot_labels.set_shape([batch_size, num_classes]) - return onehot_labels - - -@scopes.add_arg_scope -def max_pool(inputs, kernel_size, stride=2, padding='VALID', scope=None): - """Adds a Max Pooling layer. - - It is assumed by the wrapper that the pooling is only done per image and not - in depth or batch. - - Args: - inputs: a tensor of size [batch_size, height, width, depth]. - kernel_size: a list of length 2: [kernel_height, kernel_width] of the - pooling kernel over which the op is computed. Can be an int if both - values are the same. - stride: a list of length 2: [stride_height, stride_width]. - Can be an int if both strides are the same. Note that presently - both strides must have the same value. - padding: the padding method, either 'VALID' or 'SAME'. - scope: Optional scope for name_scope. - - Returns: - a tensor representing the results of the pooling operation. - Raises: - ValueError: if 'kernel_size' is not a 2-D list - """ - with tf.name_scope(scope, 'MaxPool', [inputs]): - kernel_h, kernel_w = _two_element_tuple(kernel_size) - stride_h, stride_w = _two_element_tuple(stride) - return tf.nn.max_pool(inputs, - ksize=[1, kernel_h, kernel_w, 1], - strides=[1, stride_h, stride_w, 1], - padding=padding) - - -@scopes.add_arg_scope -def avg_pool(inputs, kernel_size, stride=2, padding='VALID', scope=None): - """Adds a Avg Pooling layer. - - It is assumed by the wrapper that the pooling is only done per image and not - in depth or batch. - - Args: - inputs: a tensor of size [batch_size, height, width, depth]. - kernel_size: a list of length 2: [kernel_height, kernel_width] of the - pooling kernel over which the op is computed. Can be an int if both - values are the same. - stride: a list of length 2: [stride_height, stride_width]. - Can be an int if both strides are the same. Note that presently - both strides must have the same value. - padding: the padding method, either 'VALID' or 'SAME'. - scope: Optional scope for name_scope. - - Returns: - a tensor representing the results of the pooling operation. - """ - with tf.name_scope(scope, 'AvgPool', [inputs]): - kernel_h, kernel_w = _two_element_tuple(kernel_size) - stride_h, stride_w = _two_element_tuple(stride) - return tf.nn.avg_pool(inputs, - ksize=[1, kernel_h, kernel_w, 1], - strides=[1, stride_h, stride_w, 1], - padding=padding) - - -@scopes.add_arg_scope -def dropout(inputs, keep_prob=0.5, is_training=True, scope=None): - """Returns a dropout layer applied to the input. - - Args: - inputs: the tensor to pass to the Dropout layer. - keep_prob: the probability of keeping each input unit. - is_training: whether or not the model is in training mode. If so, dropout is - applied and values scaled. Otherwise, inputs is returned. - scope: Optional scope for name_scope. - - Returns: - a tensor representing the output of the operation. - """ - if is_training and keep_prob > 0: - with tf.name_scope(scope, 'Dropout', [inputs]): - return tf.nn.dropout(inputs, keep_prob) - else: - return inputs - - -def flatten(inputs, scope=None): - """Flattens the input while maintaining the batch_size. - - Assumes that the first dimension represents the batch. - - Args: - inputs: a tensor of size [batch_size, ...]. - scope: Optional scope for name_scope. - - Returns: - a flattened tensor with shape [batch_size, k]. - Raises: - ValueError: if inputs.shape is wrong. - """ - if len(inputs.get_shape()) < 2: - raise ValueError('Inputs must be have a least 2 dimensions') - dims = inputs.get_shape()[1:] - k = dims.num_elements() - with tf.name_scope(scope, 'Flatten', [inputs]): - return tf.reshape(inputs, [-1, k]) - - -def repeat_op(repetitions, inputs, op, *args, **kwargs): - """Build a sequential Tower starting from inputs by using an op repeatedly. - - It creates new scopes for each operation by increasing the counter. - Example: given repeat_op(3, _, ops.conv2d, 64, [3, 3], scope='conv1') - it will repeat the given op under the following variable_scopes: - conv1/Conv - conv1/Conv_1 - conv1/Conv_2 - - Args: - repetitions: number or repetitions. - inputs: a tensor of size [batch_size, height, width, channels]. - op: an operation. - *args: args for the op. - **kwargs: kwargs for the op. - - Returns: - a tensor result of applying the operation op, num times. - Raises: - ValueError: if the op is unknown or wrong. - """ - scope = kwargs.pop('scope', None) - with tf.variable_scope(scope, 'RepeatOp', [inputs]): - tower = inputs - for _ in range(repetitions): - tower = op(tower, *args, **kwargs) - return tower diff --git a/research/inception/inception/slim/ops_test.py b/research/inception/inception/slim/ops_test.py deleted file mode 100644 index 13dc5d9aa..000000000 --- a/research/inception/inception/slim/ops_test.py +++ /dev/null @@ -1,687 +0,0 @@ -# Copyright 2016 Google Inc. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""Tests for slim.ops.""" -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - - -import numpy as np -import tensorflow as tf - -from inception.slim import ops -from inception.slim import scopes -from inception.slim import variables - - -class ConvTest(tf.test.TestCase): - - def testCreateConv(self): - height, width = 3, 3 - with self.test_session(): - images = tf.random_uniform((5, height, width, 3), seed=1) - output = ops.conv2d(images, 32, [3, 3]) - self.assertEquals(output.op.name, 'Conv/Relu') - self.assertListEqual(output.get_shape().as_list(), [5, height, width, 32]) - - def testCreateSquareConv(self): - height, width = 3, 3 - with self.test_session(): - images = tf.random_uniform((5, height, width, 3), seed=1) - output = ops.conv2d(images, 32, 3) - self.assertEquals(output.op.name, 'Conv/Relu') - self.assertListEqual(output.get_shape().as_list(), [5, height, width, 32]) - - def testCreateConvWithTensorShape(self): - height, width = 3, 3 - with self.test_session(): - images = tf.random_uniform((5, height, width, 3), seed=1) - output = ops.conv2d(images, 32, images.get_shape()[1:3]) - self.assertEquals(output.op.name, 'Conv/Relu') - self.assertListEqual(output.get_shape().as_list(), [5, height, width, 32]) - - def testCreateFullyConv(self): - height, width = 6, 6 - with self.test_session(): - images = tf.random_uniform((5, height, width, 32), seed=1) - output = ops.conv2d(images, 64, images.get_shape()[1:3], padding='VALID') - self.assertEquals(output.op.name, 'Conv/Relu') - self.assertListEqual(output.get_shape().as_list(), [5, 1, 1, 64]) - - def testCreateVerticalConv(self): - height, width = 3, 3 - with self.test_session(): - images = tf.random_uniform((5, height, width, 3), seed=1) - output = ops.conv2d(images, 32, [3, 1]) - self.assertEquals(output.op.name, 'Conv/Relu') - self.assertListEqual(output.get_shape().as_list(), - [5, height, width, 32]) - - def testCreateHorizontalConv(self): - height, width = 3, 3 - with self.test_session(): - images = tf.random_uniform((5, height, width, 3), seed=1) - output = ops.conv2d(images, 32, [1, 3]) - self.assertEquals(output.op.name, 'Conv/Relu') - self.assertListEqual(output.get_shape().as_list(), - [5, height, width, 32]) - - def testCreateConvWithStride(self): - height, width = 6, 6 - with self.test_session(): - images = tf.random_uniform((5, height, width, 3), seed=1) - output = ops.conv2d(images, 32, [3, 3], stride=2) - self.assertEquals(output.op.name, 'Conv/Relu') - self.assertListEqual(output.get_shape().as_list(), - [5, height/2, width/2, 32]) - - def testCreateConvCreatesWeightsAndBiasesVars(self): - height, width = 3, 3 - images = tf.random_uniform((5, height, width, 3), seed=1) - with self.test_session(): - self.assertFalse(variables.get_variables('conv1/weights')) - self.assertFalse(variables.get_variables('conv1/biases')) - ops.conv2d(images, 32, [3, 3], scope='conv1') - self.assertTrue(variables.get_variables('conv1/weights')) - self.assertTrue(variables.get_variables('conv1/biases')) - - def testCreateConvWithScope(self): - height, width = 3, 3 - with self.test_session(): - images = tf.random_uniform((5, height, width, 3), seed=1) - output = ops.conv2d(images, 32, [3, 3], scope='conv1') - self.assertEquals(output.op.name, 'conv1/Relu') - - def testCreateConvWithoutActivation(self): - height, width = 3, 3 - with self.test_session(): - images = tf.random_uniform((5, height, width, 3), seed=1) - output = ops.conv2d(images, 32, [3, 3], activation=None) - self.assertEquals(output.op.name, 'Conv/BiasAdd') - - def testCreateConvValid(self): - height, width = 3, 3 - with self.test_session(): - images = tf.random_uniform((5, height, width, 3), seed=1) - output = ops.conv2d(images, 32, [3, 3], padding='VALID') - self.assertListEqual(output.get_shape().as_list(), [5, 1, 1, 32]) - - def testCreateConvWithWD(self): - height, width = 3, 3 - with self.test_session() as sess: - images = tf.random_uniform((5, height, width, 3), seed=1) - ops.conv2d(images, 32, [3, 3], weight_decay=0.01) - wd = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)[0] - self.assertEquals(wd.op.name, - 'Conv/weights/Regularizer/L2Regularizer/value') - sess.run(tf.global_variables_initializer()) - self.assertTrue(sess.run(wd) <= 0.01) - - def testCreateConvWithoutWD(self): - height, width = 3, 3 - with self.test_session(): - images = tf.random_uniform((5, height, width, 3), seed=1) - ops.conv2d(images, 32, [3, 3], weight_decay=0) - self.assertEquals( - tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES), []) - - def testReuseVars(self): - height, width = 3, 3 - with self.test_session(): - images = tf.random_uniform((5, height, width, 3), seed=1) - ops.conv2d(images, 32, [3, 3], scope='conv1') - self.assertEquals(len(variables.get_variables()), 2) - ops.conv2d(images, 32, [3, 3], scope='conv1', reuse=True) - self.assertEquals(len(variables.get_variables()), 2) - - def testNonReuseVars(self): - height, width = 3, 3 - with self.test_session(): - images = tf.random_uniform((5, height, width, 3), seed=1) - ops.conv2d(images, 32, [3, 3]) - self.assertEquals(len(variables.get_variables()), 2) - ops.conv2d(images, 32, [3, 3]) - self.assertEquals(len(variables.get_variables()), 4) - - def testReuseConvWithWD(self): - height, width = 3, 3 - with self.test_session(): - images = tf.random_uniform((5, height, width, 3), seed=1) - ops.conv2d(images, 32, [3, 3], weight_decay=0.01, scope='conv1') - self.assertEquals(len(variables.get_variables()), 2) - self.assertEquals( - len(tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)), 1) - ops.conv2d(images, 32, [3, 3], weight_decay=0.01, scope='conv1', - reuse=True) - self.assertEquals(len(variables.get_variables()), 2) - self.assertEquals( - len(tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)), 1) - - def testConvWithBatchNorm(self): - height, width = 3, 3 - with self.test_session(): - images = tf.random_uniform((5, height, width, 32), seed=1) - with scopes.arg_scope([ops.conv2d], batch_norm_params={'decay': 0.9}): - net = ops.conv2d(images, 32, [3, 3]) - net = ops.conv2d(net, 32, [3, 3]) - self.assertEquals(len(variables.get_variables()), 8) - self.assertEquals(len(variables.get_variables('Conv/BatchNorm')), 3) - self.assertEquals(len(variables.get_variables('Conv_1/BatchNorm')), 3) - - def testReuseConvWithBatchNorm(self): - height, width = 3, 3 - with self.test_session(): - images = tf.random_uniform((5, height, width, 32), seed=1) - with scopes.arg_scope([ops.conv2d], batch_norm_params={'decay': 0.9}): - net = ops.conv2d(images, 32, [3, 3], scope='Conv') - net = ops.conv2d(net, 32, [3, 3], scope='Conv', reuse=True) - self.assertEquals(len(variables.get_variables()), 4) - self.assertEquals(len(variables.get_variables('Conv/BatchNorm')), 3) - self.assertEquals(len(variables.get_variables('Conv_1/BatchNorm')), 0) - - -class FCTest(tf.test.TestCase): - - def testCreateFC(self): - height, width = 3, 3 - with self.test_session(): - inputs = tf.random_uniform((5, height * width * 3), seed=1) - output = ops.fc(inputs, 32) - self.assertEquals(output.op.name, 'FC/Relu') - self.assertListEqual(output.get_shape().as_list(), [5, 32]) - - def testCreateFCWithScope(self): - height, width = 3, 3 - with self.test_session(): - inputs = tf.random_uniform((5, height * width * 3), seed=1) - output = ops.fc(inputs, 32, scope='fc1') - self.assertEquals(output.op.name, 'fc1/Relu') - - def testCreateFcCreatesWeightsAndBiasesVars(self): - height, width = 3, 3 - inputs = tf.random_uniform((5, height * width * 3), seed=1) - with self.test_session(): - self.assertFalse(variables.get_variables('fc1/weights')) - self.assertFalse(variables.get_variables('fc1/biases')) - ops.fc(inputs, 32, scope='fc1') - self.assertTrue(variables.get_variables('fc1/weights')) - self.assertTrue(variables.get_variables('fc1/biases')) - - def testReuseVars(self): - height, width = 3, 3 - inputs = tf.random_uniform((5, height * width * 3), seed=1) - with self.test_session(): - ops.fc(inputs, 32, scope='fc1') - self.assertEquals(len(variables.get_variables('fc1')), 2) - ops.fc(inputs, 32, scope='fc1', reuse=True) - self.assertEquals(len(variables.get_variables('fc1')), 2) - - def testNonReuseVars(self): - height, width = 3, 3 - inputs = tf.random_uniform((5, height * width * 3), seed=1) - with self.test_session(): - ops.fc(inputs, 32) - self.assertEquals(len(variables.get_variables('FC')), 2) - ops.fc(inputs, 32) - self.assertEquals(len(variables.get_variables('FC')), 4) - - def testCreateFCWithoutActivation(self): - height, width = 3, 3 - with self.test_session(): - inputs = tf.random_uniform((5, height * width * 3), seed=1) - output = ops.fc(inputs, 32, activation=None) - self.assertEquals(output.op.name, 'FC/xw_plus_b') - - def testCreateFCWithWD(self): - height, width = 3, 3 - with self.test_session() as sess: - inputs = tf.random_uniform((5, height * width * 3), seed=1) - ops.fc(inputs, 32, weight_decay=0.01) - wd = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)[0] - self.assertEquals(wd.op.name, - 'FC/weights/Regularizer/L2Regularizer/value') - sess.run(tf.global_variables_initializer()) - self.assertTrue(sess.run(wd) <= 0.01) - - def testCreateFCWithoutWD(self): - height, width = 3, 3 - with self.test_session(): - inputs = tf.random_uniform((5, height * width * 3), seed=1) - ops.fc(inputs, 32, weight_decay=0) - self.assertEquals( - tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES), []) - - def testReuseFCWithWD(self): - height, width = 3, 3 - with self.test_session(): - inputs = tf.random_uniform((5, height * width * 3), seed=1) - ops.fc(inputs, 32, weight_decay=0.01, scope='fc') - self.assertEquals(len(variables.get_variables()), 2) - self.assertEquals( - len(tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)), 1) - ops.fc(inputs, 32, weight_decay=0.01, scope='fc', reuse=True) - self.assertEquals(len(variables.get_variables()), 2) - self.assertEquals( - len(tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)), 1) - - def testFCWithBatchNorm(self): - height, width = 3, 3 - with self.test_session(): - images = tf.random_uniform((5, height * width * 3), seed=1) - with scopes.arg_scope([ops.fc], batch_norm_params={}): - net = ops.fc(images, 27) - net = ops.fc(net, 27) - self.assertEquals(len(variables.get_variables()), 8) - self.assertEquals(len(variables.get_variables('FC/BatchNorm')), 3) - self.assertEquals(len(variables.get_variables('FC_1/BatchNorm')), 3) - - def testReuseFCWithBatchNorm(self): - height, width = 3, 3 - with self.test_session(): - images = tf.random_uniform((5, height * width * 3), seed=1) - with scopes.arg_scope([ops.fc], batch_norm_params={'decay': 0.9}): - net = ops.fc(images, 27, scope='fc1') - net = ops.fc(net, 27, scope='fc1', reuse=True) - self.assertEquals(len(variables.get_variables()), 4) - self.assertEquals(len(variables.get_variables('fc1/BatchNorm')), 3) - - -class MaxPoolTest(tf.test.TestCase): - - def testCreateMaxPool(self): - height, width = 3, 3 - with self.test_session(): - images = tf.random_uniform((5, height, width, 3), seed=1) - output = ops.max_pool(images, [3, 3]) - self.assertEquals(output.op.name, 'MaxPool/MaxPool') - self.assertListEqual(output.get_shape().as_list(), [5, 1, 1, 3]) - - def testCreateSquareMaxPool(self): - height, width = 3, 3 - with self.test_session(): - images = tf.random_uniform((5, height, width, 3), seed=1) - output = ops.max_pool(images, 3) - self.assertEquals(output.op.name, 'MaxPool/MaxPool') - self.assertListEqual(output.get_shape().as_list(), [5, 1, 1, 3]) - - def testCreateMaxPoolWithScope(self): - height, width = 3, 3 - with self.test_session(): - images = tf.random_uniform((5, height, width, 3), seed=1) - output = ops.max_pool(images, [3, 3], scope='pool1') - self.assertEquals(output.op.name, 'pool1/MaxPool') - - def testCreateMaxPoolSAME(self): - height, width = 3, 3 - with self.test_session(): - images = tf.random_uniform((5, height, width, 3), seed=1) - output = ops.max_pool(images, [3, 3], padding='SAME') - self.assertListEqual(output.get_shape().as_list(), [5, 2, 2, 3]) - - def testCreateMaxPoolStrideSAME(self): - height, width = 3, 3 - with self.test_session(): - images = tf.random_uniform((5, height, width, 3), seed=1) - output = ops.max_pool(images, [3, 3], stride=1, padding='SAME') - self.assertListEqual(output.get_shape().as_list(), [5, height, width, 3]) - - def testGlobalMaxPool(self): - height, width = 3, 3 - with self.test_session(): - images = tf.random_uniform((5, height, width, 3), seed=1) - output = ops.max_pool(images, images.get_shape()[1:3], stride=1) - self.assertListEqual(output.get_shape().as_list(), [5, 1, 1, 3]) - - -class AvgPoolTest(tf.test.TestCase): - - def testCreateAvgPool(self): - height, width = 3, 3 - with self.test_session(): - images = tf.random_uniform((5, height, width, 3), seed=1) - output = ops.avg_pool(images, [3, 3]) - self.assertEquals(output.op.name, 'AvgPool/AvgPool') - self.assertListEqual(output.get_shape().as_list(), [5, 1, 1, 3]) - - def testCreateSquareAvgPool(self): - height, width = 3, 3 - with self.test_session(): - images = tf.random_uniform((5, height, width, 3), seed=1) - output = ops.avg_pool(images, 3) - self.assertEquals(output.op.name, 'AvgPool/AvgPool') - self.assertListEqual(output.get_shape().as_list(), [5, 1, 1, 3]) - - def testCreateAvgPoolWithScope(self): - height, width = 3, 3 - with self.test_session(): - images = tf.random_uniform((5, height, width, 3), seed=1) - output = ops.avg_pool(images, [3, 3], scope='pool1') - self.assertEquals(output.op.name, 'pool1/AvgPool') - - def testCreateAvgPoolSAME(self): - height, width = 3, 3 - with self.test_session(): - images = tf.random_uniform((5, height, width, 3), seed=1) - output = ops.avg_pool(images, [3, 3], padding='SAME') - self.assertListEqual(output.get_shape().as_list(), [5, 2, 2, 3]) - - def testCreateAvgPoolStrideSAME(self): - height, width = 3, 3 - with self.test_session(): - images = tf.random_uniform((5, height, width, 3), seed=1) - output = ops.avg_pool(images, [3, 3], stride=1, padding='SAME') - self.assertListEqual(output.get_shape().as_list(), [5, height, width, 3]) - - def testGlobalAvgPool(self): - height, width = 3, 3 - with self.test_session(): - images = tf.random_uniform((5, height, width, 3), seed=1) - output = ops.avg_pool(images, images.get_shape()[1:3], stride=1) - self.assertListEqual(output.get_shape().as_list(), [5, 1, 1, 3]) - - -class OneHotEncodingTest(tf.test.TestCase): - - def testOneHotEncodingCreate(self): - with self.test_session(): - labels = tf.constant([0, 1, 2]) - output = ops.one_hot_encoding(labels, num_classes=3) - self.assertEquals(output.op.name, 'OneHotEncoding/SparseToDense') - self.assertListEqual(output.get_shape().as_list(), [3, 3]) - - def testOneHotEncoding(self): - with self.test_session(): - labels = tf.constant([0, 1, 2]) - one_hot_labels = tf.constant([[1, 0, 0], - [0, 1, 0], - [0, 0, 1]]) - output = ops.one_hot_encoding(labels, num_classes=3) - self.assertAllClose(output.eval(), one_hot_labels.eval()) - - -class DropoutTest(tf.test.TestCase): - - def testCreateDropout(self): - height, width = 3, 3 - with self.test_session(): - images = tf.random_uniform((5, height, width, 3), seed=1) - output = ops.dropout(images) - self.assertEquals(output.op.name, 'Dropout/dropout/mul') - output.get_shape().assert_is_compatible_with(images.get_shape()) - - def testCreateDropoutNoTraining(self): - height, width = 3, 3 - with self.test_session(): - images = tf.random_uniform((5, height, width, 3), seed=1, name='images') - output = ops.dropout(images, is_training=False) - self.assertEquals(output, images) - - -class FlattenTest(tf.test.TestCase): - - def testFlatten4D(self): - height, width = 3, 3 - with self.test_session(): - images = tf.random_uniform((5, height, width, 3), seed=1, name='images') - output = ops.flatten(images) - self.assertEquals(output.get_shape().num_elements(), - images.get_shape().num_elements()) - self.assertEqual(output.get_shape()[0], images.get_shape()[0]) - - def testFlatten3D(self): - height, width = 3, 3 - with self.test_session(): - images = tf.random_uniform((5, height, width), seed=1, name='images') - output = ops.flatten(images) - self.assertEquals(output.get_shape().num_elements(), - images.get_shape().num_elements()) - self.assertEqual(output.get_shape()[0], images.get_shape()[0]) - - def testFlattenBatchSize(self): - height, width = 3, 3 - with self.test_session() as sess: - images = tf.random_uniform((5, height, width, 3), seed=1, name='images') - inputs = tf.placeholder(tf.int32, (None, height, width, 3)) - output = ops.flatten(inputs) - self.assertEquals(output.get_shape().as_list(), - [None, height * width * 3]) - output = sess.run(output, {inputs: images.eval()}) - self.assertEquals(output.size, - images.get_shape().num_elements()) - self.assertEqual(output.shape[0], images.get_shape()[0]) - - -class BatchNormTest(tf.test.TestCase): - - def testCreateOp(self): - height, width = 3, 3 - with self.test_session(): - images = tf.random_uniform((5, height, width, 3), seed=1) - output = ops.batch_norm(images) - self.assertTrue(output.op.name.startswith('BatchNorm/batchnorm')) - self.assertListEqual(output.get_shape().as_list(), [5, height, width, 3]) - - def testCreateVariables(self): - height, width = 3, 3 - with self.test_session(): - images = tf.random_uniform((5, height, width, 3), seed=1) - ops.batch_norm(images) - beta = variables.get_variables_by_name('beta')[0] - self.assertEquals(beta.op.name, 'BatchNorm/beta') - gamma = variables.get_variables_by_name('gamma') - self.assertEquals(gamma, []) - moving_mean = tf.moving_average_variables()[0] - moving_variance = tf.moving_average_variables()[1] - self.assertEquals(moving_mean.op.name, 'BatchNorm/moving_mean') - self.assertEquals(moving_variance.op.name, 'BatchNorm/moving_variance') - - def testCreateVariablesWithScale(self): - height, width = 3, 3 - with self.test_session(): - images = tf.random_uniform((5, height, width, 3), seed=1) - ops.batch_norm(images, scale=True) - beta = variables.get_variables_by_name('beta')[0] - gamma = variables.get_variables_by_name('gamma')[0] - self.assertEquals(beta.op.name, 'BatchNorm/beta') - self.assertEquals(gamma.op.name, 'BatchNorm/gamma') - moving_mean = tf.moving_average_variables()[0] - moving_variance = tf.moving_average_variables()[1] - self.assertEquals(moving_mean.op.name, 'BatchNorm/moving_mean') - self.assertEquals(moving_variance.op.name, 'BatchNorm/moving_variance') - - def testCreateVariablesWithoutCenterWithScale(self): - height, width = 3, 3 - with self.test_session(): - images = tf.random_uniform((5, height, width, 3), seed=1) - ops.batch_norm(images, center=False, scale=True) - beta = variables.get_variables_by_name('beta') - self.assertEquals(beta, []) - gamma = variables.get_variables_by_name('gamma')[0] - self.assertEquals(gamma.op.name, 'BatchNorm/gamma') - moving_mean = tf.moving_average_variables()[0] - moving_variance = tf.moving_average_variables()[1] - self.assertEquals(moving_mean.op.name, 'BatchNorm/moving_mean') - self.assertEquals(moving_variance.op.name, 'BatchNorm/moving_variance') - - def testCreateVariablesWithoutCenterWithoutScale(self): - height, width = 3, 3 - with self.test_session(): - images = tf.random_uniform((5, height, width, 3), seed=1) - ops.batch_norm(images, center=False, scale=False) - beta = variables.get_variables_by_name('beta') - self.assertEquals(beta, []) - gamma = variables.get_variables_by_name('gamma') - self.assertEquals(gamma, []) - moving_mean = tf.moving_average_variables()[0] - moving_variance = tf.moving_average_variables()[1] - self.assertEquals(moving_mean.op.name, 'BatchNorm/moving_mean') - self.assertEquals(moving_variance.op.name, 'BatchNorm/moving_variance') - - def testMovingAverageVariables(self): - height, width = 3, 3 - with self.test_session(): - images = tf.random_uniform((5, height, width, 3), seed=1) - ops.batch_norm(images, scale=True) - moving_mean = tf.moving_average_variables()[0] - moving_variance = tf.moving_average_variables()[1] - self.assertEquals(moving_mean.op.name, 'BatchNorm/moving_mean') - self.assertEquals(moving_variance.op.name, 'BatchNorm/moving_variance') - - def testUpdateOps(self): - height, width = 3, 3 - with self.test_session(): - images = tf.random_uniform((5, height, width, 3), seed=1) - ops.batch_norm(images) - update_ops = tf.get_collection(ops.UPDATE_OPS_COLLECTION) - update_moving_mean = update_ops[0] - update_moving_variance = update_ops[1] - self.assertEquals(update_moving_mean.op.name, - 'BatchNorm/AssignMovingAvg') - self.assertEquals(update_moving_variance.op.name, - 'BatchNorm/AssignMovingAvg_1') - - def testReuseVariables(self): - height, width = 3, 3 - with self.test_session(): - images = tf.random_uniform((5, height, width, 3), seed=1) - ops.batch_norm(images, scale=True, scope='bn') - ops.batch_norm(images, scale=True, scope='bn', reuse=True) - beta = variables.get_variables_by_name('beta') - gamma = variables.get_variables_by_name('gamma') - self.assertEquals(len(beta), 1) - self.assertEquals(len(gamma), 1) - moving_vars = tf.get_collection('moving_vars') - self.assertEquals(len(moving_vars), 2) - - def testReuseUpdateOps(self): - height, width = 3, 3 - with self.test_session(): - images = tf.random_uniform((5, height, width, 3), seed=1) - ops.batch_norm(images, scope='bn') - self.assertEquals(len(tf.get_collection(ops.UPDATE_OPS_COLLECTION)), 2) - ops.batch_norm(images, scope='bn', reuse=True) - self.assertEquals(len(tf.get_collection(ops.UPDATE_OPS_COLLECTION)), 4) - - def testCreateMovingVars(self): - height, width = 3, 3 - with self.test_session(): - images = tf.random_uniform((5, height, width, 3), seed=1) - _ = ops.batch_norm(images, moving_vars='moving_vars') - moving_mean = tf.get_collection('moving_vars', - 'BatchNorm/moving_mean') - self.assertEquals(len(moving_mean), 1) - self.assertEquals(moving_mean[0].op.name, 'BatchNorm/moving_mean') - moving_variance = tf.get_collection('moving_vars', - 'BatchNorm/moving_variance') - self.assertEquals(len(moving_variance), 1) - self.assertEquals(moving_variance[0].op.name, 'BatchNorm/moving_variance') - - def testComputeMovingVars(self): - height, width = 3, 3 - with self.test_session() as sess: - image_shape = (10, height, width, 3) - image_values = np.random.rand(*image_shape) - expected_mean = np.mean(image_values, axis=(0, 1, 2)) - expected_var = np.var(image_values, axis=(0, 1, 2)) - images = tf.constant(image_values, shape=image_shape, dtype=tf.float32) - output = ops.batch_norm(images, decay=0.1) - update_ops = tf.get_collection(ops.UPDATE_OPS_COLLECTION) - with tf.control_dependencies(update_ops): - output = tf.identity(output) - # Initialize all variables - sess.run(tf.global_variables_initializer()) - moving_mean = variables.get_variables('BatchNorm/moving_mean')[0] - moving_variance = variables.get_variables('BatchNorm/moving_variance')[0] - mean, variance = sess.run([moving_mean, moving_variance]) - # After initialization moving_mean == 0 and moving_variance == 1. - self.assertAllClose(mean, [0] * 3) - self.assertAllClose(variance, [1] * 3) - for _ in range(10): - sess.run([output]) - mean = moving_mean.eval() - variance = moving_variance.eval() - # After 10 updates with decay 0.1 moving_mean == expected_mean and - # moving_variance == expected_var. - self.assertAllClose(mean, expected_mean) - self.assertAllClose(variance, expected_var) - - def testEvalMovingVars(self): - height, width = 3, 3 - with self.test_session() as sess: - image_shape = (10, height, width, 3) - image_values = np.random.rand(*image_shape) - expected_mean = np.mean(image_values, axis=(0, 1, 2)) - expected_var = np.var(image_values, axis=(0, 1, 2)) - images = tf.constant(image_values, shape=image_shape, dtype=tf.float32) - output = ops.batch_norm(images, decay=0.1, is_training=False) - update_ops = tf.get_collection(ops.UPDATE_OPS_COLLECTION) - with tf.control_dependencies(update_ops): - output = tf.identity(output) - # Initialize all variables - sess.run(tf.global_variables_initializer()) - moving_mean = variables.get_variables('BatchNorm/moving_mean')[0] - moving_variance = variables.get_variables('BatchNorm/moving_variance')[0] - mean, variance = sess.run([moving_mean, moving_variance]) - # After initialization moving_mean == 0 and moving_variance == 1. - self.assertAllClose(mean, [0] * 3) - self.assertAllClose(variance, [1] * 3) - # Simulate assigment from saver restore. - init_assigns = [tf.assign(moving_mean, expected_mean), - tf.assign(moving_variance, expected_var)] - sess.run(init_assigns) - for _ in range(10): - sess.run([output], {images: np.random.rand(*image_shape)}) - mean = moving_mean.eval() - variance = moving_variance.eval() - # Although we feed different images, the moving_mean and moving_variance - # shouldn't change. - self.assertAllClose(mean, expected_mean) - self.assertAllClose(variance, expected_var) - - def testReuseVars(self): - height, width = 3, 3 - with self.test_session() as sess: - image_shape = (10, height, width, 3) - image_values = np.random.rand(*image_shape) - expected_mean = np.mean(image_values, axis=(0, 1, 2)) - expected_var = np.var(image_values, axis=(0, 1, 2)) - images = tf.constant(image_values, shape=image_shape, dtype=tf.float32) - output = ops.batch_norm(images, decay=0.1, is_training=False) - update_ops = tf.get_collection(ops.UPDATE_OPS_COLLECTION) - with tf.control_dependencies(update_ops): - output = tf.identity(output) - # Initialize all variables - sess.run(tf.global_variables_initializer()) - moving_mean = variables.get_variables('BatchNorm/moving_mean')[0] - moving_variance = variables.get_variables('BatchNorm/moving_variance')[0] - mean, variance = sess.run([moving_mean, moving_variance]) - # After initialization moving_mean == 0 and moving_variance == 1. - self.assertAllClose(mean, [0] * 3) - self.assertAllClose(variance, [1] * 3) - # Simulate assigment from saver restore. - init_assigns = [tf.assign(moving_mean, expected_mean), - tf.assign(moving_variance, expected_var)] - sess.run(init_assigns) - for _ in range(10): - sess.run([output], {images: np.random.rand(*image_shape)}) - mean = moving_mean.eval() - variance = moving_variance.eval() - # Although we feed different images, the moving_mean and moving_variance - # shouldn't change. - self.assertAllClose(mean, expected_mean) - self.assertAllClose(variance, expected_var) - -if __name__ == '__main__': - tf.test.main() diff --git a/research/inception/inception/slim/scopes.py b/research/inception/inception/slim/scopes.py deleted file mode 100644 index 2c2fb0a2e..000000000 --- a/research/inception/inception/slim/scopes.py +++ /dev/null @@ -1,170 +0,0 @@ -# Copyright 2016 Google Inc. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""Contains the new arg_scope used for TF-Slim ops. - - Allows one to define models much more compactly by eliminating boilerplate - code. This is accomplished through the use of argument scoping (arg_scope). - - Example of how to use scopes.arg_scope: - - with scopes.arg_scope(ops.conv2d, padding='SAME', - stddev=0.01, weight_decay=0.0005): - net = ops.conv2d(inputs, 64, [11, 11], 4, padding='VALID', scope='conv1') - net = ops.conv2d(net, 256, [5, 5], scope='conv2') - - The first call to conv2d will overwrite padding: - ops.conv2d(inputs, 64, [11, 11], 4, padding='VALID', - stddev=0.01, weight_decay=0.0005, scope='conv1') - - The second call to Conv will use predefined args: - ops.conv2d(inputs, 256, [5, 5], padding='SAME', - stddev=0.01, weight_decay=0.0005, scope='conv2') - - Example of how to reuse an arg_scope: - with scopes.arg_scope(ops.conv2d, padding='SAME', - stddev=0.01, weight_decay=0.0005) as conv2d_arg_scope: - net = ops.conv2d(net, 256, [5, 5], scope='conv1') - .... - - with scopes.arg_scope(conv2d_arg_scope): - net = ops.conv2d(net, 256, [5, 5], scope='conv2') - - Example of how to use scopes.add_arg_scope: - - @scopes.add_arg_scope - def conv2d(*args, **kwargs) -""" -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import contextlib -import functools - -from tensorflow.python.framework import ops - -_ARGSTACK_KEY = ("__arg_stack",) - -_DECORATED_OPS = set() - - -def _get_arg_stack(): - stack = ops.get_collection(_ARGSTACK_KEY) - if stack: - return stack[0] - else: - stack = [{}] - ops.add_to_collection(_ARGSTACK_KEY, stack) - return stack - - -def _current_arg_scope(): - stack = _get_arg_stack() - return stack[-1] - - -def _add_op(op): - key_op = (op.__module__, op.__name__) - if key_op not in _DECORATED_OPS: - _DECORATED_OPS.add(key_op) - - -@contextlib.contextmanager -def arg_scope(list_ops_or_scope, **kwargs): - """Stores the default arguments for the given set of list_ops. - - For usage, please see examples at top of the file. - - Args: - list_ops_or_scope: List or tuple of operations to set argument scope for or - a dictionary containg the current scope. When list_ops_or_scope is a dict, - kwargs must be empty. When list_ops_or_scope is a list or tuple, then - every op in it need to be decorated with @add_arg_scope to work. - **kwargs: keyword=value that will define the defaults for each op in - list_ops. All the ops need to accept the given set of arguments. - - Yields: - the current_scope, which is a dictionary of {op: {arg: value}} - Raises: - TypeError: if list_ops is not a list or a tuple. - ValueError: if any op in list_ops has not be decorated with @add_arg_scope. - """ - if isinstance(list_ops_or_scope, dict): - # Assumes that list_ops_or_scope is a scope that is being reused. - if kwargs: - raise ValueError("When attempting to re-use a scope by suppling a" - "dictionary, kwargs must be empty.") - current_scope = list_ops_or_scope.copy() - try: - _get_arg_stack().append(current_scope) - yield current_scope - finally: - _get_arg_stack().pop() - else: - # Assumes that list_ops_or_scope is a list/tuple of ops with kwargs. - if not isinstance(list_ops_or_scope, (list, tuple)): - raise TypeError("list_ops_or_scope must either be a list/tuple or reused" - "scope (i.e. dict)") - try: - current_scope = _current_arg_scope().copy() - for op in list_ops_or_scope: - key_op = (op.__module__, op.__name__) - if not has_arg_scope(op): - raise ValueError("%s is not decorated with @add_arg_scope", key_op) - if key_op in current_scope: - current_kwargs = current_scope[key_op].copy() - current_kwargs.update(kwargs) - current_scope[key_op] = current_kwargs - else: - current_scope[key_op] = kwargs.copy() - _get_arg_stack().append(current_scope) - yield current_scope - finally: - _get_arg_stack().pop() - - -def add_arg_scope(func): - """Decorates a function with args so it can be used within an arg_scope. - - Args: - func: function to decorate. - - Returns: - A tuple with the decorated function func_with_args(). - """ - @functools.wraps(func) - def func_with_args(*args, **kwargs): - current_scope = _current_arg_scope() - current_args = kwargs - key_func = (func.__module__, func.__name__) - if key_func in current_scope: - current_args = current_scope[key_func].copy() - current_args.update(kwargs) - return func(*args, **current_args) - _add_op(func) - return func_with_args - - -def has_arg_scope(func): - """Checks whether a func has been decorated with @add_arg_scope or not. - - Args: - func: function to check. - - Returns: - a boolean. - """ - key_op = (func.__module__, func.__name__) - return key_op in _DECORATED_OPS diff --git a/research/inception/inception/slim/scopes_test.py b/research/inception/inception/slim/scopes_test.py deleted file mode 100644 index cd349399e..000000000 --- a/research/inception/inception/slim/scopes_test.py +++ /dev/null @@ -1,162 +0,0 @@ -# Copyright 2016 Google Inc. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""Tests slim.scopes.""" -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - - -import tensorflow as tf -from inception.slim import scopes - - -@scopes.add_arg_scope -def func1(*args, **kwargs): - return (args, kwargs) - - -@scopes.add_arg_scope -def func2(*args, **kwargs): - return (args, kwargs) - - -class ArgScopeTest(tf.test.TestCase): - - def testEmptyArgScope(self): - with self.test_session(): - self.assertEqual(scopes._current_arg_scope(), {}) - - def testCurrentArgScope(self): - func1_kwargs = {'a': 1, 'b': None, 'c': [1]} - key_op = (func1.__module__, func1.__name__) - current_scope = {key_op: func1_kwargs.copy()} - with self.test_session(): - with scopes.arg_scope([func1], a=1, b=None, c=[1]) as scope: - self.assertDictEqual(scope, current_scope) - - def testCurrentArgScopeNested(self): - func1_kwargs = {'a': 1, 'b': None, 'c': [1]} - func2_kwargs = {'b': 2, 'd': [2]} - key = lambda f: (f.__module__, f.__name__) - current_scope = {key(func1): func1_kwargs.copy(), - key(func2): func2_kwargs.copy()} - with self.test_session(): - with scopes.arg_scope([func1], a=1, b=None, c=[1]): - with scopes.arg_scope([func2], b=2, d=[2]) as scope: - self.assertDictEqual(scope, current_scope) - - def testReuseArgScope(self): - func1_kwargs = {'a': 1, 'b': None, 'c': [1]} - key_op = (func1.__module__, func1.__name__) - current_scope = {key_op: func1_kwargs.copy()} - with self.test_session(): - with scopes.arg_scope([func1], a=1, b=None, c=[1]) as scope1: - pass - with scopes.arg_scope(scope1) as scope: - self.assertDictEqual(scope, current_scope) - - def testReuseArgScopeNested(self): - func1_kwargs = {'a': 1, 'b': None, 'c': [1]} - func2_kwargs = {'b': 2, 'd': [2]} - key = lambda f: (f.__module__, f.__name__) - current_scope1 = {key(func1): func1_kwargs.copy()} - current_scope2 = {key(func1): func1_kwargs.copy(), - key(func2): func2_kwargs.copy()} - with self.test_session(): - with scopes.arg_scope([func1], a=1, b=None, c=[1]) as scope1: - with scopes.arg_scope([func2], b=2, d=[2]) as scope2: - pass - with scopes.arg_scope(scope1): - self.assertDictEqual(scopes._current_arg_scope(), current_scope1) - with scopes.arg_scope(scope2): - self.assertDictEqual(scopes._current_arg_scope(), current_scope2) - - def testSimpleArgScope(self): - func1_args = (0,) - func1_kwargs = {'a': 1, 'b': None, 'c': [1]} - with self.test_session(): - with scopes.arg_scope([func1], a=1, b=None, c=[1]): - args, kwargs = func1(0) - self.assertTupleEqual(args, func1_args) - self.assertDictEqual(kwargs, func1_kwargs) - - def testSimpleArgScopeWithTuple(self): - func1_args = (0,) - func1_kwargs = {'a': 1, 'b': None, 'c': [1]} - with self.test_session(): - with scopes.arg_scope((func1,), a=1, b=None, c=[1]): - args, kwargs = func1(0) - self.assertTupleEqual(args, func1_args) - self.assertDictEqual(kwargs, func1_kwargs) - - def testOverwriteArgScope(self): - func1_args = (0,) - func1_kwargs = {'a': 1, 'b': 2, 'c': [1]} - with scopes.arg_scope([func1], a=1, b=None, c=[1]): - args, kwargs = func1(0, b=2) - self.assertTupleEqual(args, func1_args) - self.assertDictEqual(kwargs, func1_kwargs) - - def testNestedArgScope(self): - func1_args = (0,) - func1_kwargs = {'a': 1, 'b': None, 'c': [1]} - with scopes.arg_scope([func1], a=1, b=None, c=[1]): - args, kwargs = func1(0) - self.assertTupleEqual(args, func1_args) - self.assertDictEqual(kwargs, func1_kwargs) - func1_kwargs['b'] = 2 - with scopes.arg_scope([func1], b=2): - args, kwargs = func1(0) - self.assertTupleEqual(args, func1_args) - self.assertDictEqual(kwargs, func1_kwargs) - - def testSharedArgScope(self): - func1_args = (0,) - func1_kwargs = {'a': 1, 'b': None, 'c': [1]} - with scopes.arg_scope([func1, func2], a=1, b=None, c=[1]): - args, kwargs = func1(0) - self.assertTupleEqual(args, func1_args) - self.assertDictEqual(kwargs, func1_kwargs) - args, kwargs = func2(0) - self.assertTupleEqual(args, func1_args) - self.assertDictEqual(kwargs, func1_kwargs) - - def testSharedArgScopeTuple(self): - func1_args = (0,) - func1_kwargs = {'a': 1, 'b': None, 'c': [1]} - with scopes.arg_scope((func1, func2), a=1, b=None, c=[1]): - args, kwargs = func1(0) - self.assertTupleEqual(args, func1_args) - self.assertDictEqual(kwargs, func1_kwargs) - args, kwargs = func2(0) - self.assertTupleEqual(args, func1_args) - self.assertDictEqual(kwargs, func1_kwargs) - - def testPartiallySharedArgScope(self): - func1_args = (0,) - func1_kwargs = {'a': 1, 'b': None, 'c': [1]} - func2_args = (1,) - func2_kwargs = {'a': 1, 'b': None, 'd': [2]} - with scopes.arg_scope([func1, func2], a=1, b=None): - with scopes.arg_scope([func1], c=[1]), scopes.arg_scope([func2], d=[2]): - args, kwargs = func1(0) - self.assertTupleEqual(args, func1_args) - self.assertDictEqual(kwargs, func1_kwargs) - args, kwargs = func2(1) - self.assertTupleEqual(args, func2_args) - self.assertDictEqual(kwargs, func2_kwargs) - -if __name__ == '__main__': - tf.test.main() diff --git a/research/inception/inception/slim/slim.py b/research/inception/inception/slim/slim.py deleted file mode 100644 index b7a5c0f8c..000000000 --- a/research/inception/inception/slim/slim.py +++ /dev/null @@ -1,24 +0,0 @@ -# Copyright 2016 Google Inc. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""TF-Slim grouped API. Please see README.md for details and usage.""" -# pylint: disable=unused-import - -# Collapse tf-slim into a single namespace. -from inception.slim import inception_model as inception -from inception.slim import losses -from inception.slim import ops -from inception.slim import scopes -from inception.slim import variables -from inception.slim.scopes import arg_scope diff --git a/research/inception/inception/slim/variables.py b/research/inception/inception/slim/variables.py deleted file mode 100644 index 1d967b79e..000000000 --- a/research/inception/inception/slim/variables.py +++ /dev/null @@ -1,289 +0,0 @@ -# Copyright 2016 Google Inc. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""Contains convenience wrappers for creating variables in TF-Slim. - -The variables module is typically used for defining model variables from the -ops routines (see slim.ops). Such variables are used for training, evaluation -and inference of models. - -All the variables created through this module would be added to the -MODEL_VARIABLES collection, if you create a model variable outside slim, it can -be added with slim.variables.add_variable(external_variable, reuse). - -Usage: - weights_initializer = tf.truncated_normal_initializer(stddev=0.01) - l2_regularizer = lambda t: losses.l2_loss(t, weight=0.0005) - weights = variables.variable('weights', - shape=[100, 100], - initializer=weights_initializer, - regularizer=l2_regularizer, - device='/cpu:0') - - biases = variables.variable('biases', - shape=[100], - initializer=tf.zeros_initializer(), - device='/cpu:0') - - # More complex example. - - net = slim.ops.conv2d(input, 32, [3, 3], scope='conv1') - net = slim.ops.conv2d(net, 64, [3, 3], scope='conv2') - with slim.arg_scope([variables.variable], restore=False): - net = slim.ops.conv2d(net, 64, [3, 3], scope='conv3') - - # Get all model variables from all the layers. - model_variables = slim.variables.get_variables() - - # Get all model variables from a specific the layer, i.e 'conv1'. - conv1_variables = slim.variables.get_variables('conv1') - - # Get all weights from all the layers. - weights = slim.variables.get_variables_by_name('weights') - - # Get all bias from all the layers. - biases = slim.variables.get_variables_by_name('biases') - - # Get all variables to restore. - # (i.e. only those created by 'conv1' and 'conv2') - variables_to_restore = slim.variables.get_variables_to_restore() - -************************************************ -* Initializing model variables from a checkpoint -************************************************ - -# Create some variables. -v1 = slim.variables.variable(name="v1", ..., restore=False) -v2 = slim.variables.variable(name="v2", ...) # By default restore=True -... -# The list of variables to restore should only contain 'v2'. -variables_to_restore = slim.variables.get_variables_to_restore() -restorer = tf.train.Saver(variables_to_restore) -with tf.Session() as sess: - # Restore variables from disk. - restorer.restore(sess, "/tmp/model.ckpt") - print("Model restored.") - # Do some work with the model - ... - -""" -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import tensorflow as tf - -from inception.slim import scopes - -# Collection containing all the variables created using slim.variables -MODEL_VARIABLES = '_model_variables_' - -# Collection containing the slim.variables that are created with restore=True. -VARIABLES_TO_RESTORE = '_variables_to_restore_' - - -def add_variable(var, restore=True): - """Adds a variable to the MODEL_VARIABLES collection. - - Optionally it will add the variable to the VARIABLES_TO_RESTORE collection. - Args: - var: a variable. - restore: whether the variable should be added to the - VARIABLES_TO_RESTORE collection. - - """ - collections = [MODEL_VARIABLES] - if restore: - collections.append(VARIABLES_TO_RESTORE) - for collection in collections: - if var not in tf.get_collection(collection): - tf.add_to_collection(collection, var) - - -def get_variables(scope=None, suffix=None): - """Gets the list of variables, filtered by scope and/or suffix. - - Args: - scope: an optional scope for filtering the variables to return. - suffix: an optional suffix for filtering the variables to return. - - Returns: - a copied list of variables with scope and suffix. - """ - candidates = tf.get_collection(MODEL_VARIABLES, scope)[:] - if suffix is not None: - candidates = [var for var in candidates if var.op.name.endswith(suffix)] - return candidates - - -def get_variables_to_restore(): - """Gets the list of variables to restore. - - Returns: - a copied list of variables. - """ - return tf.get_collection(VARIABLES_TO_RESTORE)[:] - - -def get_variables_by_name(given_name, scope=None): - """Gets the list of variables that were given that name. - - Args: - given_name: name given to the variable without scope. - scope: an optional scope for filtering the variables to return. - - Returns: - a copied list of variables with the given name and prefix. - """ - return get_variables(scope=scope, suffix=given_name) - - -def get_unique_variable(name): - """Gets the variable uniquely identified by that name. - - Args: - name: a name that uniquely identifies the variable. - - Returns: - a tensorflow variable. - - Raises: - ValueError: if no variable uniquely identified by the name exists. - """ - candidates = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, name) - if not candidates: - raise ValueError('Couldnt find variable %s' % name) - - for candidate in candidates: - if candidate.op.name == name: - return candidate - raise ValueError('Variable %s does not uniquely identify a variable', name) - - -class VariableDeviceChooser(object): - """Slim device chooser for variables. - - When using a parameter server it will assign them in a round-robin fashion. - When not using a parameter server it allows GPU:0 placement otherwise CPU:0. - """ - - def __init__(self, - num_parameter_servers=0, - ps_device='/job:ps', - placement='CPU:0'): - """Initialize VariableDeviceChooser. - - Args: - num_parameter_servers: number of parameter servers. - ps_device: string representing the parameter server device. - placement: string representing the placement of the variable either CPU:0 - or GPU:0. When using parameter servers forced to CPU:0. - """ - self._num_ps = num_parameter_servers - self._ps_device = ps_device - self._placement = placement if num_parameter_servers == 0 else 'CPU:0' - self._next_task_id = 0 - - def __call__(self, op): - device_string = '' - if self._num_ps > 0: - task_id = self._next_task_id - self._next_task_id = (self._next_task_id + 1) % self._num_ps - device_string = '%s/task:%d' % (self._ps_device, task_id) - device_string += '/%s' % self._placement - return device_string - - -# TODO(sguada) Remove once get_variable is able to colocate op.devices. -def variable_device(device, name): - """Fix the variable device to colocate its ops.""" - if callable(device): - var_name = tf.get_variable_scope().name + '/' + name - var_def = tf.NodeDef(name=var_name, op='Variable') - device = device(var_def) - if device is None: - device = '' - return device - - -@scopes.add_arg_scope -def global_step(device=''): - """Returns the global step variable. - - Args: - device: Optional device to place the variable. It can be an string or a - function that is called to get the device for the variable. - - Returns: - the tensor representing the global step variable. - """ - global_step_ref = tf.get_collection(tf.GraphKeys.GLOBAL_STEP) - if global_step_ref: - return global_step_ref[0] - else: - collections = [ - VARIABLES_TO_RESTORE, - tf.GraphKeys.GLOBAL_VARIABLES, - tf.GraphKeys.GLOBAL_STEP, - ] - # Get the device for the variable. - with tf.device(variable_device(device, 'global_step')): - return tf.get_variable('global_step', shape=[], dtype=tf.int64, - initializer=tf.zeros_initializer(), - trainable=False, collections=collections) - - -@scopes.add_arg_scope -def variable(name, shape=None, dtype=tf.float32, initializer=None, - regularizer=None, trainable=True, collections=None, device='', - restore=True): - """Gets an existing variable with these parameters or creates a new one. - - It also add itself to a group with its name. - - Args: - name: the name of the new or existing variable. - shape: shape of the new or existing variable. - dtype: type of the new or existing variable (defaults to `DT_FLOAT`). - initializer: initializer for the variable if one is created. - regularizer: a (Tensor -> Tensor or None) function; the result of - applying it on a newly created variable will be added to the collection - GraphKeys.REGULARIZATION_LOSSES and can be used for regularization. - trainable: If `True` also add the variable to the graph collection - `GraphKeys.TRAINABLE_VARIABLES` (see tf.Variable). - collections: A list of collection names to which the Variable will be added. - Note that the variable is always also added to the tf.GraphKeys.GLOBAL_VARIABLES - and MODEL_VARIABLES collections. - device: Optional device to place the variable. It can be an string or a - function that is called to get the device for the variable. - restore: whether the variable should be added to the - VARIABLES_TO_RESTORE collection. - - Returns: - The created or existing variable. - """ - collections = list(collections or []) - - # Make sure variables are added to tf.GraphKeys.GLOBAL_VARIABLES and MODEL_VARIABLES - collections += [tf.GraphKeys.GLOBAL_VARIABLES, MODEL_VARIABLES] - # Add to VARIABLES_TO_RESTORE if necessary - if restore: - collections.append(VARIABLES_TO_RESTORE) - # Remove duplicates - collections = set(collections) - # Get the device for the variable. - with tf.device(variable_device(device, name)): - return tf.get_variable(name, shape=shape, dtype=dtype, - initializer=initializer, regularizer=regularizer, - trainable=trainable, collections=collections) diff --git a/research/inception/inception/slim/variables_test.py b/research/inception/inception/slim/variables_test.py deleted file mode 100644 index b8c1944df..000000000 --- a/research/inception/inception/slim/variables_test.py +++ /dev/null @@ -1,392 +0,0 @@ -# Copyright 2016 Google Inc. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""Tests for slim.variables.""" -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import tensorflow as tf - -from inception.slim import scopes -from inception.slim import variables - - -class VariablesTest(tf.test.TestCase): - - def testCreateVariable(self): - with self.test_session(): - with tf.variable_scope('A'): - a = variables.variable('a', [5]) - self.assertEquals(a.op.name, 'A/a') - self.assertListEqual(a.get_shape().as_list(), [5]) - - def testGetVariables(self): - with self.test_session(): - with tf.variable_scope('A'): - a = variables.variable('a', [5]) - with tf.variable_scope('B'): - b = variables.variable('a', [5]) - self.assertEquals([a, b], variables.get_variables()) - self.assertEquals([a], variables.get_variables('A')) - self.assertEquals([b], variables.get_variables('B')) - - def testGetVariablesSuffix(self): - with self.test_session(): - with tf.variable_scope('A'): - a = variables.variable('a', [5]) - with tf.variable_scope('A'): - b = variables.variable('b', [5]) - self.assertEquals([a], variables.get_variables(suffix='a')) - self.assertEquals([b], variables.get_variables(suffix='b')) - - def testGetVariableWithSingleVar(self): - with self.test_session(): - with tf.variable_scope('parent'): - a = variables.variable('child', [5]) - self.assertEquals(a, variables.get_unique_variable('parent/child')) - - def testGetVariableWithDistractors(self): - with self.test_session(): - with tf.variable_scope('parent'): - a = variables.variable('child', [5]) - with tf.variable_scope('child'): - variables.variable('grandchild1', [7]) - variables.variable('grandchild2', [9]) - self.assertEquals(a, variables.get_unique_variable('parent/child')) - - def testGetVariableThrowsExceptionWithNoMatch(self): - var_name = 'cant_find_me' - with self.test_session(): - with self.assertRaises(ValueError): - variables.get_unique_variable(var_name) - - def testGetThrowsExceptionWithChildrenButNoMatch(self): - var_name = 'parent/child' - with self.test_session(): - with tf.variable_scope(var_name): - variables.variable('grandchild1', [7]) - variables.variable('grandchild2', [9]) - with self.assertRaises(ValueError): - variables.get_unique_variable(var_name) - - def testGetVariablesToRestore(self): - with self.test_session(): - with tf.variable_scope('A'): - a = variables.variable('a', [5]) - with tf.variable_scope('B'): - b = variables.variable('a', [5]) - self.assertEquals([a, b], variables.get_variables_to_restore()) - - def testNoneGetVariablesToRestore(self): - with self.test_session(): - with tf.variable_scope('A'): - a = variables.variable('a', [5], restore=False) - with tf.variable_scope('B'): - b = variables.variable('a', [5], restore=False) - self.assertEquals([], variables.get_variables_to_restore()) - self.assertEquals([a, b], variables.get_variables()) - - def testGetMixedVariablesToRestore(self): - with self.test_session(): - with tf.variable_scope('A'): - a = variables.variable('a', [5]) - b = variables.variable('b', [5], restore=False) - with tf.variable_scope('B'): - c = variables.variable('c', [5]) - d = variables.variable('d', [5], restore=False) - self.assertEquals([a, b, c, d], variables.get_variables()) - self.assertEquals([a, c], variables.get_variables_to_restore()) - - def testReuseVariable(self): - with self.test_session(): - with tf.variable_scope('A'): - a = variables.variable('a', []) - with tf.variable_scope('A', reuse=True): - b = variables.variable('a', []) - self.assertEquals(a, b) - self.assertListEqual([a], variables.get_variables()) - - def testVariableWithDevice(self): - with self.test_session(): - with tf.variable_scope('A'): - a = variables.variable('a', [], device='cpu:0') - b = variables.variable('b', [], device='cpu:1') - self.assertDeviceEqual(a.device, 'cpu:0') - self.assertDeviceEqual(b.device, 'cpu:1') - - def testVariableWithDeviceFromScope(self): - with self.test_session(): - with tf.device('/cpu:0'): - a = variables.variable('a', []) - b = variables.variable('b', [], device='cpu:1') - self.assertDeviceEqual(a.device, 'cpu:0') - self.assertDeviceEqual(b.device, 'cpu:1') - - def testVariableWithDeviceFunction(self): - class DevFn(object): - - def __init__(self): - self.counter = -1 - - def __call__(self, op): - self.counter += 1 - return 'cpu:%d' % self.counter - - with self.test_session(): - with scopes.arg_scope([variables.variable], device=DevFn()): - a = variables.variable('a', []) - b = variables.variable('b', []) - c = variables.variable('c', [], device='cpu:12') - d = variables.variable('d', []) - with tf.device('cpu:99'): - e_init = tf.constant(12) - e = variables.variable('e', initializer=e_init) - self.assertDeviceEqual(a.device, 'cpu:0') - self.assertDeviceEqual(a.initial_value.device, 'cpu:0') - self.assertDeviceEqual(b.device, 'cpu:1') - self.assertDeviceEqual(b.initial_value.device, 'cpu:1') - self.assertDeviceEqual(c.device, 'cpu:12') - self.assertDeviceEqual(c.initial_value.device, 'cpu:12') - self.assertDeviceEqual(d.device, 'cpu:2') - self.assertDeviceEqual(d.initial_value.device, 'cpu:2') - self.assertDeviceEqual(e.device, 'cpu:3') - self.assertDeviceEqual(e.initial_value.device, 'cpu:99') - - def testVariableWithReplicaDeviceSetter(self): - with self.test_session(): - with tf.device(tf.train.replica_device_setter(ps_tasks=2)): - a = variables.variable('a', []) - b = variables.variable('b', []) - c = variables.variable('c', [], device='cpu:12') - d = variables.variable('d', []) - with tf.device('cpu:99'): - e_init = tf.constant(12) - e = variables.variable('e', initializer=e_init) - # The values below highlight how the replica_device_setter puts initial - # values on the worker job, and how it merges explicit devices. - self.assertDeviceEqual(a.device, '/job:ps/task:0/cpu:0') - self.assertDeviceEqual(a.initial_value.device, '/job:worker/cpu:0') - self.assertDeviceEqual(b.device, '/job:ps/task:1/cpu:0') - self.assertDeviceEqual(b.initial_value.device, '/job:worker/cpu:0') - self.assertDeviceEqual(c.device, '/job:ps/task:0/cpu:12') - self.assertDeviceEqual(c.initial_value.device, '/job:worker/cpu:12') - self.assertDeviceEqual(d.device, '/job:ps/task:1/cpu:0') - self.assertDeviceEqual(d.initial_value.device, '/job:worker/cpu:0') - self.assertDeviceEqual(e.device, '/job:ps/task:0/cpu:0') - self.assertDeviceEqual(e.initial_value.device, '/job:worker/cpu:99') - - def testVariableWithVariableDeviceChooser(self): - - with tf.Graph().as_default(): - device_fn = variables.VariableDeviceChooser(num_parameter_servers=2) - with scopes.arg_scope([variables.variable], device=device_fn): - a = variables.variable('a', []) - b = variables.variable('b', []) - c = variables.variable('c', [], device='cpu:12') - d = variables.variable('d', []) - with tf.device('cpu:99'): - e_init = tf.constant(12) - e = variables.variable('e', initializer=e_init) - # The values below highlight how the VariableDeviceChooser puts initial - # values on the same device as the variable job. - self.assertDeviceEqual(a.device, '/job:ps/task:0/cpu:0') - self.assertDeviceEqual(a.initial_value.device, a.device) - self.assertDeviceEqual(b.device, '/job:ps/task:1/cpu:0') - self.assertDeviceEqual(b.initial_value.device, b.device) - self.assertDeviceEqual(c.device, '/cpu:12') - self.assertDeviceEqual(c.initial_value.device, c.device) - self.assertDeviceEqual(d.device, '/job:ps/task:0/cpu:0') - self.assertDeviceEqual(d.initial_value.device, d.device) - self.assertDeviceEqual(e.device, '/job:ps/task:1/cpu:0') - self.assertDeviceEqual(e.initial_value.device, '/cpu:99') - - def testVariableGPUPlacement(self): - - with tf.Graph().as_default(): - device_fn = variables.VariableDeviceChooser(placement='gpu:0') - with scopes.arg_scope([variables.variable], device=device_fn): - a = variables.variable('a', []) - b = variables.variable('b', []) - c = variables.variable('c', [], device='cpu:12') - d = variables.variable('d', []) - with tf.device('cpu:99'): - e_init = tf.constant(12) - e = variables.variable('e', initializer=e_init) - # The values below highlight how the VariableDeviceChooser puts initial - # values on the same device as the variable job. - self.assertDeviceEqual(a.device, '/gpu:0') - self.assertDeviceEqual(a.initial_value.device, a.device) - self.assertDeviceEqual(b.device, '/gpu:0') - self.assertDeviceEqual(b.initial_value.device, b.device) - self.assertDeviceEqual(c.device, '/cpu:12') - self.assertDeviceEqual(c.initial_value.device, c.device) - self.assertDeviceEqual(d.device, '/gpu:0') - self.assertDeviceEqual(d.initial_value.device, d.device) - self.assertDeviceEqual(e.device, '/gpu:0') - self.assertDeviceEqual(e.initial_value.device, '/cpu:99') - - def testVariableCollection(self): - with self.test_session(): - a = variables.variable('a', [], collections='A') - b = variables.variable('b', [], collections='B') - self.assertEquals(a, tf.get_collection('A')[0]) - self.assertEquals(b, tf.get_collection('B')[0]) - - def testVariableCollections(self): - with self.test_session(): - a = variables.variable('a', [], collections=['A', 'C']) - b = variables.variable('b', [], collections=['B', 'C']) - self.assertEquals(a, tf.get_collection('A')[0]) - self.assertEquals(b, tf.get_collection('B')[0]) - - def testVariableCollectionsWithArgScope(self): - with self.test_session(): - with scopes.arg_scope([variables.variable], collections='A'): - a = variables.variable('a', []) - b = variables.variable('b', []) - self.assertListEqual([a, b], tf.get_collection('A')) - - def testVariableCollectionsWithArgScopeNested(self): - with self.test_session(): - with scopes.arg_scope([variables.variable], collections='A'): - a = variables.variable('a', []) - with scopes.arg_scope([variables.variable], collections='B'): - b = variables.variable('b', []) - self.assertEquals(a, tf.get_collection('A')[0]) - self.assertEquals(b, tf.get_collection('B')[0]) - - def testVariableCollectionsWithArgScopeNonNested(self): - with self.test_session(): - with scopes.arg_scope([variables.variable], collections='A'): - a = variables.variable('a', []) - with scopes.arg_scope([variables.variable], collections='B'): - b = variables.variable('b', []) - variables.variable('c', []) - self.assertListEqual([a], tf.get_collection('A')) - self.assertListEqual([b], tf.get_collection('B')) - - def testVariableRestoreWithArgScopeNested(self): - with self.test_session(): - with scopes.arg_scope([variables.variable], restore=True): - a = variables.variable('a', []) - with scopes.arg_scope([variables.variable], - trainable=False, - collections=['A', 'B']): - b = variables.variable('b', []) - c = variables.variable('c', []) - self.assertListEqual([a, b, c], variables.get_variables_to_restore()) - self.assertListEqual([a, c], tf.trainable_variables()) - self.assertListEqual([b], tf.get_collection('A')) - self.assertListEqual([b], tf.get_collection('B')) - - -class GetVariablesByNameTest(tf.test.TestCase): - - def testGetVariableGivenNameScoped(self): - with self.test_session(): - with tf.variable_scope('A'): - a = variables.variable('a', [5]) - b = variables.variable('b', [5]) - self.assertEquals([a], variables.get_variables_by_name('a')) - self.assertEquals([b], variables.get_variables_by_name('b')) - - def testGetVariablesByNameReturnsByValueWithScope(self): - with self.test_session(): - with tf.variable_scope('A'): - a = variables.variable('a', [5]) - matched_variables = variables.get_variables_by_name('a') - - # If variables.get_variables_by_name returns the list by reference, the - # following append should persist, and be returned, in subsequent calls - # to variables.get_variables_by_name('a'). - matched_variables.append(4) - - matched_variables = variables.get_variables_by_name('a') - self.assertEquals([a], matched_variables) - - def testGetVariablesByNameReturnsByValueWithoutScope(self): - with self.test_session(): - a = variables.variable('a', [5]) - matched_variables = variables.get_variables_by_name('a') - - # If variables.get_variables_by_name returns the list by reference, the - # following append should persist, and be returned, in subsequent calls - # to variables.get_variables_by_name('a'). - matched_variables.append(4) - - matched_variables = variables.get_variables_by_name('a') - self.assertEquals([a], matched_variables) - - -class GlobalStepTest(tf.test.TestCase): - - def testStable(self): - with tf.Graph().as_default(): - gs = variables.global_step() - gs2 = variables.global_step() - self.assertTrue(gs is gs2) - - def testDevice(self): - with tf.Graph().as_default(): - with scopes.arg_scope([variables.global_step], device='/gpu:0'): - gs = variables.global_step() - self.assertDeviceEqual(gs.device, '/gpu:0') - - def testDeviceFn(self): - class DevFn(object): - - def __init__(self): - self.counter = -1 - - def __call__(self, op): - self.counter += 1 - return '/cpu:%d' % self.counter - - with tf.Graph().as_default(): - with scopes.arg_scope([variables.global_step], device=DevFn()): - gs = variables.global_step() - gs2 = variables.global_step() - self.assertDeviceEqual(gs.device, '/cpu:0') - self.assertEquals(gs, gs2) - self.assertDeviceEqual(gs2.device, '/cpu:0') - - def testReplicaDeviceSetter(self): - device_fn = tf.train.replica_device_setter(2) - with tf.Graph().as_default(): - with scopes.arg_scope([variables.global_step], device=device_fn): - gs = variables.global_step() - gs2 = variables.global_step() - self.assertEquals(gs, gs2) - self.assertDeviceEqual(gs.device, '/job:ps/task:0') - self.assertDeviceEqual(gs.initial_value.device, '/job:ps/task:0') - self.assertDeviceEqual(gs2.device, '/job:ps/task:0') - self.assertDeviceEqual(gs2.initial_value.device, '/job:ps/task:0') - - def testVariableWithVariableDeviceChooser(self): - - with tf.Graph().as_default(): - device_fn = variables.VariableDeviceChooser() - with scopes.arg_scope([variables.global_step], device=device_fn): - gs = variables.global_step() - gs2 = variables.global_step() - self.assertEquals(gs, gs2) - self.assertDeviceEqual(gs.device, 'cpu:0') - self.assertDeviceEqual(gs.initial_value.device, gs.device) - self.assertDeviceEqual(gs2.device, 'cpu:0') - self.assertDeviceEqual(gs2.initial_value.device, gs2.device) - - -if __name__ == '__main__': - tf.test.main() diff --git a/research/keypointnet/CONTRIBUTING.md b/research/keypointnet/CONTRIBUTING.md deleted file mode 100644 index 939e5341e..000000000 --- a/research/keypointnet/CONTRIBUTING.md +++ /dev/null @@ -1,28 +0,0 @@ -# How to Contribute - -We'd love to accept your patches and contributions to this project. There are -just a few small guidelines you need to follow. - -## Contributor License Agreement - -Contributions to this project must be accompanied by a Contributor License -Agreement. You (or your employer) retain the copyright to your contribution; -this simply gives us permission to use and redistribute your contributions as -part of the project. Head over to to see -your current agreements on file or to sign a new one. - -You generally only need to submit a CLA once, so if you've already submitted one -(even if it was for a different project), you probably don't need to do it -again. - -## Code reviews - -All submissions, including submissions by project members, require review. We -use GitHub pull requests for this purpose. Consult -[GitHub Help](https://help.github.com/articles/about-pull-requests/) for more -information on using pull requests. - -## Community Guidelines - -This project follows [Google's Open Source Community -Guidelines](https://opensource.google.com/conduct/). diff --git a/research/keypointnet/LICENSE b/research/keypointnet/LICENSE deleted file mode 100644 index d64569567..000000000 --- a/research/keypointnet/LICENSE +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/research/keypointnet/README.md b/research/keypointnet/README.md deleted file mode 100644 index 8de88ca5a..000000000 --- a/research/keypointnet/README.md +++ /dev/null @@ -1,46 +0,0 @@ -![No Maintenance Intended](https://img.shields.io/badge/No%20Maintenance%20Intended-%E2%9C%95-red.svg) -![TensorFlow Requirement: 1.x](https://img.shields.io/badge/TensorFlow%20Requirement-1.x-brightgreen) -![TensorFlow 2 Not Supported](https://img.shields.io/badge/TensorFlow%202%20Not%20Supported-%E2%9C%95-red.svg) - -# KeypointNet -This is an implementation of the keypoint network proposed in "Discovery of -Latent 3D Keypoints via End-to-end Geometric Reasoning -[[pdf](https://arxiv.org/pdf/1807.03146.pdf)]". Given a single 2D image of a -known class, this network can predict a set of 3D keypoints that are consistent -across viewing angles of the same object and across object instances. These -keypoints and their detectors are discovered and learned automatically without -keypoint location supervision [[demo](https://keypointnet.github.io)]. - -## Datasets: - ShapeNet's rendering for - [Cars](https://storage.googleapis.com/discovery-3dkeypoints-data/cars_with_keypoints.zip), - [Planes](https://storage.googleapis.com/discovery-3dkeypoints-data/planes_with_keypoints.zip), - [Chairs](https://storage.googleapis.com/discovery-3dkeypoints-data/chairs_with_keypoints.zip). - - Each set contains: -1. tfrecords -2. train.txt, a list of tfrecords used for training. -2. dev.txt, a list of tfrecords used for validation. -3. test.txt, a list of tfrecords used for testing. -4. projection.txt, storing the global 4x4 camera projection matrix. -5. job.txt, storing ShapeNet's object IDs in each tfrecord. - -## Training: - Run `main.py --model_dir=MODEL_DIR --dset=DSET` - - where MODEL_DIR is a folder for storing model checkpoints: (see [tf.estimator](https://www.tensorflow.org/api_docs/python/tf/estimator/Estimator)), and DSET should point to the folder containing tfrecords (download above). - -## Inference: - Run `main.py --model_dir=MODEL_DIR --input=INPUT --predict` - - where MODEL_DIR is the model checkpoint folder, and INPUT is a folder containing png or jpeg test images. - We trained the network using the total batch size of 256 (8 x 32 replicas). You may have to tune the learning rate if your batch size is different. - -## Code credit: - Supasorn Suwajanakorn - -## Contact: - supasorn@gmail.com, [snavely,tompson,mnorouzi]@google.com - - -(This is not an officially supported Google product) diff --git a/research/keypointnet/main.py b/research/keypointnet/main.py deleted file mode 100644 index 04b301594..000000000 --- a/research/keypointnet/main.py +++ /dev/null @@ -1,697 +0,0 @@ -# Copyright 2018 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================= -"""KeypointNet!! - -A reimplementation of 'Discovery of Latent 3D Keypoints via End-to-end -Geometric Reasoning' keypoint network. Given a single 2D image of a known class, -this network can predict a set of 3D keypoints that are consistent across -viewing angles of the same object and across object instances. These keypoints -and their detectors are discovered and learned automatically without -keypoint location supervision. -""" - -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import math -import matplotlib.pyplot as plt -import numpy as np -import os -from scipy import misc -import sys -import tensorflow as tf -import tensorflow.contrib.slim as slim -import utils - -FLAGS = tf.app.flags.FLAGS - -tf.app.flags.DEFINE_boolean("predict", False, "Running inference if true") -tf.app.flags.DEFINE_string( - "input", - "", - "Input folder containing images") -tf.app.flags.DEFINE_string("model_dir", None, "Estimator model_dir") -tf.app.flags.DEFINE_string( - "dset", - "", - "Path to the directory containing the dataset.") -tf.app.flags.DEFINE_integer("steps", 200000, "Training steps") -tf.app.flags.DEFINE_integer("batch_size", 8, "Size of mini-batch.") -tf.app.flags.DEFINE_string( - "hparams", "", - "A comma-separated list of `name=value` hyperparameter values. This flag " - "is used to override hyperparameter settings either when manually " - "selecting hyperparameters or when using Vizier.") -tf.app.flags.DEFINE_integer( - "sync_replicas", -1, - "If > 0, use SyncReplicasOptimizer and use this many replicas per sync.") - -# Fixed input size 128 x 128. -vw = vh = 128 - - -def create_input_fn(split, batch_size): - """Returns input_fn for tf.estimator.Estimator. - - Reads tfrecords and construts input_fn for either training or eval. All - tfrecords not in test.txt or dev.txt will be assigned to training set. - - Args: - split: A string indicating the split. Can be either 'train' or 'validation'. - batch_size: The batch size! - - Returns: - input_fn for tf.estimator.Estimator. - - Raises: - IOError: If test.txt or dev.txt are not found. - """ - - if (not os.path.exists(os.path.join(FLAGS.dset, "test.txt")) or - not os.path.exists(os.path.join(FLAGS.dset, "dev.txt"))): - raise IOError("test.txt or dev.txt not found") - - with open(os.path.join(FLAGS.dset, "test.txt"), "r") as f: - testset = [x.strip() for x in f.readlines()] - - with open(os.path.join(FLAGS.dset, "dev.txt"), "r") as f: - validset = [x.strip() for x in f.readlines()] - - files = os.listdir(FLAGS.dset) - filenames = [] - for f in files: - sp = os.path.splitext(f) - if sp[1] != ".tfrecord" or sp[0] in testset: - continue - - if ((split == "validation" and sp[0] in validset) or - (split == "train" and sp[0] not in validset)): - filenames.append(os.path.join(FLAGS.dset, f)) - - def input_fn(): - """input_fn for tf.estimator.Estimator.""" - - def parser(serialized_example): - """Parses a single tf.Example into image and label tensors.""" - fs = tf.parse_single_example( - serialized_example, - features={ - "img0": tf.FixedLenFeature([], tf.string), - "img1": tf.FixedLenFeature([], tf.string), - "mv0": tf.FixedLenFeature([16], tf.float32), - "mvi0": tf.FixedLenFeature([16], tf.float32), - "mv1": tf.FixedLenFeature([16], tf.float32), - "mvi1": tf.FixedLenFeature([16], tf.float32), - }) - - fs["img0"] = tf.div(tf.to_float(tf.image.decode_png(fs["img0"], 4)), 255) - fs["img1"] = tf.div(tf.to_float(tf.image.decode_png(fs["img1"], 4)), 255) - - fs["img0"].set_shape([vh, vw, 4]) - fs["img1"].set_shape([vh, vw, 4]) - - # fs["lr0"] = [fs["mv0"][0]] - # fs["lr1"] = [fs["mv1"][0]] - - fs["lr0"] = tf.convert_to_tensor([fs["mv0"][0]]) - fs["lr1"] = tf.convert_to_tensor([fs["mv1"][0]]) - - return fs - - np.random.shuffle(filenames) - dataset = tf.data.TFRecordDataset(filenames) - dataset = dataset.map(parser, num_parallel_calls=4) - dataset = dataset.shuffle(400).repeat().batch(batch_size) - dataset = dataset.prefetch(buffer_size=256) - - return dataset.make_one_shot_iterator().get_next(), None - - return input_fn - - -class Transformer(object): - """A utility for projecting 3D points to 2D coordinates and vice versa. - - 3D points are represented in 4D-homogeneous world coordinates. The pixel - coordinates are represented in normalized device coordinates [-1, 1]. - See https://learnopengl.com/Getting-started/Coordinate-Systems. - """ - - def __get_matrix(self, lines): - return np.array([[float(y) for y in x.strip().split(" ")] for x in lines]) - - def __read_projection_matrix(self, filename): - if not os.path.exists(filename): - filename = "/cns/vz-d/home/supasorn/datasets/cars/projection.txt" - with open(filename, "r") as f: - lines = f.readlines() - return self.__get_matrix(lines) - - def __init__(self, w, h, dataset_dir): - self.w = w - self.h = h - p = self.__read_projection_matrix(dataset_dir + "projection.txt") - - # transposed of inversed projection matrix. - self.pinv_t = tf.constant([[1.0 / p[0, 0], 0, 0, - 0], [0, 1.0 / p[1, 1], 0, 0], [0, 0, 1, 0], - [0, 0, 0, 1]]) - self.f = p[0, 0] - - def project(self, xyzw): - """Projects homogeneous 3D coordinates to normalized device coordinates.""" - - z = xyzw[:, :, 2:3] + 1e-8 - return tf.concat([-self.f * xyzw[:, :, :2] / z, z], axis=2) - - def unproject(self, xyz): - """Unprojects normalized device coordinates with depth to 3D coordinates.""" - - z = xyz[:, :, 2:] - xy = -xyz * z - - def batch_matmul(a, b): - return tf.reshape( - tf.matmul(tf.reshape(a, [-1, a.shape[2].value]), b), - [-1, a.shape[1].value, a.shape[2].value]) - - return batch_matmul( - tf.concat([xy[:, :, :2], z, tf.ones_like(z)], axis=2), self.pinv_t) - - -def meshgrid(h): - """Returns a meshgrid ranging from [-1, 1] in x, y axes.""" - - r = np.arange(0.5, h, 1) / (h / 2) - 1 - ranx, rany = tf.meshgrid(r, -r) - return tf.to_float(ranx), tf.to_float(rany) - - -def estimate_rotation(xyz0, xyz1, pconf, noise): - """Estimates the rotation between two sets of keypoints. - - The rotation is estimated by first subtracting mean from each set of keypoints - and computing SVD of the covariance matrix. - - Args: - xyz0: [batch, num_kp, 3] The first set of keypoints. - xyz1: [batch, num_kp, 3] The second set of keypoints. - pconf: [batch, num_kp] The weights used to compute the rotation estimate. - noise: A number indicating the noise added to the keypoints. - - Returns: - [batch, 3, 3] A batch of transposed 3 x 3 rotation matrices. - """ - - xyz0 += tf.random_normal(tf.shape(xyz0), mean=0, stddev=noise) - xyz1 += tf.random_normal(tf.shape(xyz1), mean=0, stddev=noise) - - pconf2 = tf.expand_dims(pconf, 2) - cen0 = tf.reduce_sum(xyz0 * pconf2, 1, keepdims=True) - cen1 = tf.reduce_sum(xyz1 * pconf2, 1, keepdims=True) - - x = xyz0 - cen0 - y = xyz1 - cen1 - - cov = tf.matmul(tf.matmul(x, tf.matrix_diag(pconf), transpose_a=True), y) - _, u, v = tf.svd(cov, full_matrices=True) - - d = tf.matrix_determinant(tf.matmul(v, u, transpose_b=True)) - ud = tf.concat( - [u[:, :, :-1], u[:, :, -1:] * tf.expand_dims(tf.expand_dims(d, 1), 1)], - axis=2) - return tf.matmul(ud, v, transpose_b=True) - - -def relative_pose_loss(xyz0, xyz1, rot, pconf, noise): - """Computes the relative pose loss (chordal, angular). - - Args: - xyz0: [batch, num_kp, 3] The first set of keypoints. - xyz1: [batch, num_kp, 3] The second set of keypoints. - rot: [batch, 4, 4] The ground-truth rotation matrices. - pconf: [batch, num_kp] The weights used to compute the rotation estimate. - noise: A number indicating the noise added to the keypoints. - - Returns: - A tuple (chordal loss, angular loss). - """ - - r_transposed = estimate_rotation(xyz0, xyz1, pconf, noise) - rotation = rot[:, :3, :3] - frob_sqr = tf.reduce_sum(tf.square(r_transposed - rotation), axis=[1, 2]) - frob = tf.sqrt(frob_sqr) - - return tf.reduce_mean(frob_sqr), \ - 2.0 * tf.reduce_mean(tf.asin(tf.minimum(1.0, frob / (2 * math.sqrt(2))))) - - -def separation_loss(xyz, delta): - """Computes the separation loss. - - Args: - xyz: [batch, num_kp, 3] Input keypoints. - delta: A separation threshold. Incur 0 cost if the distance >= delta. - - Returns: - The seperation loss. - """ - - num_kp = tf.shape(xyz)[1] - t1 = tf.tile(xyz, [1, num_kp, 1]) - - t2 = tf.reshape(tf.tile(xyz, [1, 1, num_kp]), tf.shape(t1)) - diffsq = tf.square(t1 - t2) - - # -> [batch, num_kp ^ 2] - lensqr = tf.reduce_sum(diffsq, axis=2) - - return (tf.reduce_sum(tf.maximum(-lensqr + delta, 0.0)) / tf.to_float( - num_kp * FLAGS.batch_size * 2)) - - -def consistency_loss(uv0, uv1, pconf): - """Computes multi-view consistency loss between two sets of keypoints. - - Args: - uv0: [batch, num_kp, 2] The first set of keypoint 2D coordinates. - uv1: [batch, num_kp, 2] The second set of keypoint 2D coordinates. - pconf: [batch, num_kp] The weights used to compute the rotation estimate. - - Returns: - The consistency loss. - """ - - # [batch, num_kp, 2] - wd = tf.square(uv0 - uv1) * tf.expand_dims(pconf, 2) - wd = tf.reduce_sum(wd, axis=[1, 2]) - return tf.reduce_mean(wd) - - -def variance_loss(probmap, ranx, rany, uv): - """Computes the variance loss as part of Sillhouette consistency. - - Args: - probmap: [batch, num_kp, h, w] The distribution map of keypoint locations. - ranx: X-axis meshgrid. - rany: Y-axis meshgrid. - uv: [batch, num_kp, 2] Keypoint locations (in NDC). - - Returns: - The variance loss. - """ - - ran = tf.stack([ranx, rany], axis=2) - - sh = tf.shape(ran) - # [batch, num_kp, vh, vw, 2] - ran = tf.reshape(ran, [1, 1, sh[0], sh[1], 2]) - - sh = tf.shape(uv) - uv = tf.reshape(uv, [sh[0], sh[1], 1, 1, 2]) - - diff = tf.reduce_sum(tf.square(uv - ran), axis=4) - diff *= probmap - - return tf.reduce_mean(tf.reduce_sum(diff, axis=[2, 3])) - - -def dilated_cnn(images, num_filters, is_training): - """Constructs a base dilated convolutional network. - - Args: - images: [batch, h, w, 3] Input RGB images. - num_filters: The number of filters for all layers. - is_training: True if this function is called during training. - - Returns: - Output of this dilated CNN. - """ - - net = images - - with slim.arg_scope( - [slim.conv2d, slim.fully_connected], - normalizer_fn=slim.batch_norm, - activation_fn=lambda x: tf.nn.leaky_relu(x, alpha=0.1), - normalizer_params={"is_training": is_training}): - for i, r in enumerate([1, 1, 2, 4, 8, 16, 1, 2, 4, 8, 16, 1]): - net = slim.conv2d(net, num_filters, [3, 3], rate=r, scope="dconv%d" % i) - - return net - - -def orientation_network(images, num_filters, is_training): - """Constructs a network that infers the orientation of an object. - - Args: - images: [batch, h, w, 3] Input RGB images. - num_filters: The number of filters for all layers. - is_training: True if this function is called during training. - - Returns: - Output of the orientation network. - """ - - with tf.variable_scope("OrientationNetwork"): - net = dilated_cnn(images, num_filters, is_training) - - modules = 2 - prob = slim.conv2d(net, 2, [3, 3], rate=1, activation_fn=None) - prob = tf.transpose(prob, [0, 3, 1, 2]) - - prob = tf.reshape(prob, [-1, modules, vh * vw]) - prob = tf.nn.softmax(prob) - ranx, rany = meshgrid(vh) - - prob = tf.reshape(prob, [-1, 2, vh, vw]) - - sx = tf.reduce_sum(prob * ranx, axis=[2, 3]) - sy = tf.reduce_sum(prob * rany, axis=[2, 3]) # -> batch x modules - - out_xy = tf.reshape(tf.stack([sx, sy], -1), [-1, modules, 2]) - - return out_xy - - -def keypoint_network(rgba, - num_filters, - num_kp, - is_training, - lr_gt=None, - anneal=1): - """Constructs our main keypoint network that predicts 3D keypoints. - - Args: - rgba: [batch, h, w, 4] Input RGB images with alpha channel. - num_filters: The number of filters for all layers. - num_kp: The number of keypoints. - is_training: True if this function is called during training. - lr_gt: The groundtruth orientation flag used at the beginning of training. - Then we linearly anneal in the prediction. - anneal: A number between [0, 1] where 1 means using the ground-truth - orientation and 0 means using our estimate. - - Returns: - uv: [batch, num_kp, 2] 2D locations of keypoints. - z: [batch, num_kp] The depth of keypoints. - orient: [batch, 2, 2] Two 2D coordinates that correspond to [1, 0, 0] and - [-1, 0, 0] in object space. - sill: The Sillhouette loss. - variance: The variance loss. - prob_viz: A visualization of all predicted keypoints. - prob_vizs: A list of visualizations of each keypoint. - - """ - - images = rgba[:, :, :, :3] - - # [batch, 1] - orient = orientation_network(images, num_filters * 0.5, is_training) - - # [batch, 1] - lr_estimated = tf.maximum(0.0, tf.sign(orient[:, 0, :1] - orient[:, 1, :1])) - - if lr_gt is None: - lr = lr_estimated - else: - lr_gt = tf.maximum(0.0, tf.sign(lr_gt[:, :1])) - lr = tf.round(lr_gt * anneal + lr_estimated * (1 - anneal)) - - lrtiled = tf.tile( - tf.expand_dims(tf.expand_dims(lr, 1), 1), - [1, images.shape[1], images.shape[2], 1]) - - images = tf.concat([images, lrtiled], axis=3) - - mask = rgba[:, :, :, 3] - mask = tf.cast(tf.greater(mask, tf.zeros_like(mask)), dtype=tf.float32) - - net = dilated_cnn(images, num_filters, is_training) - - # The probability distribution map. - prob = slim.conv2d( - net, num_kp, [3, 3], rate=1, scope="conv_xy", activation_fn=None) - - # We added the fixed camera distance as a bias. - z = -30 + slim.conv2d( - net, num_kp, [3, 3], rate=1, scope="conv_z", activation_fn=None) - - prob = tf.transpose(prob, [0, 3, 1, 2]) - z = tf.transpose(z, [0, 3, 1, 2]) - - prob = tf.reshape(prob, [-1, num_kp, vh * vw]) - prob = tf.nn.softmax(prob, name="softmax") - - ranx, rany = meshgrid(vh) - prob = tf.reshape(prob, [-1, num_kp, vh, vw]) - - # These are for visualizing the distribution maps. - prob_viz = tf.expand_dims(tf.reduce_sum(prob, 1), 3) - prob_vizs = [tf.expand_dims(prob[:, i, :, :], 3) for i in range(num_kp)] - - sx = tf.reduce_sum(prob * ranx, axis=[2, 3]) - sy = tf.reduce_sum(prob * rany, axis=[2, 3]) # -> batch x num_kp - - # [batch, num_kp] - sill = tf.reduce_sum(prob * tf.expand_dims(mask, 1), axis=[2, 3]) - sill = tf.reduce_mean(-tf.log(sill + 1e-12)) - - z = tf.reduce_sum(prob * z, axis=[2, 3]) - uv = tf.reshape(tf.stack([sx, sy], -1), [-1, num_kp, 2]) - - variance = variance_loss(prob, ranx, rany, uv) - - return uv, z, orient, sill, variance, prob_viz, prob_vizs - - -def model_fn(features, labels, mode, hparams): - """Returns model_fn for tf.estimator.Estimator.""" - - del labels - - is_training = (mode == tf.estimator.ModeKeys.TRAIN) - t = Transformer(vw, vh, FLAGS.dset) - - def func1(x): - return tf.transpose(tf.reshape(features[x], [-1, 4, 4]), [0, 2, 1]) - - mv = [func1("mv%d" % i) for i in range(2)] - mvi = [func1("mvi%d" % i) for i in range(2)] - - uvz = [None] * 2 - uvz_proj = [None] * 2 # uvz coordinates projected on to the other view. - viz = [None] * 2 - vizs = [None] * 2 - - loss_sill = 0 - loss_variance = 0 - loss_con = 0 - loss_sep = 0 - loss_lr = 0 - - for i in range(2): - with tf.variable_scope("KeypointNetwork", reuse=i > 0): - # anneal: 1 = using ground-truth, 0 = using our estimate orientation. - anneal = tf.to_float(hparams.lr_anneal_end - tf.train.get_global_step()) - anneal = tf.clip_by_value( - anneal / (hparams.lr_anneal_end - hparams.lr_anneal_start), 0.0, 1.0) - - uv, z, orient, sill, variance, viz[i], vizs[i] = keypoint_network( - features["img%d" % i], - hparams.num_filters, - hparams.num_kp, - is_training, - lr_gt=features["lr%d" % i], - anneal=anneal) - - # x-positive/negative axes (dominant direction). - xp_axis = tf.tile( - tf.constant([[[1.0, 0, 0, 1], [-1.0, 0, 0, 1]]]), - [tf.shape(orient)[0], 1, 1]) - - # [batch, 2, 4] = [batch, 2, 4] x [batch, 4, 4] - xp = tf.matmul(xp_axis, mv[i]) - - # [batch, 2, 3] - xp = t.project(xp) - - loss_lr += tf.losses.mean_squared_error(orient[:, :, :2], xp[:, :, :2]) - loss_variance += variance - loss_sill += sill - - uv = tf.reshape(uv, [-1, hparams.num_kp, 2]) - z = tf.reshape(z, [-1, hparams.num_kp, 1]) - - # [batch, num_kp, 3] - uvz[i] = tf.concat([uv, z], axis=2) - - world_coords = tf.matmul(t.unproject(uvz[i]), mvi[i]) - - # [batch, num_kp, 3] - uvz_proj[i] = t.project(tf.matmul(world_coords, mv[1 - i])) - - pconf = tf.ones( - [tf.shape(uv)[0], tf.shape(uv)[1]], dtype=tf.float32) / hparams.num_kp - - for i in range(2): - loss_con += consistency_loss(uvz_proj[i][:, :, :2], uvz[1 - i][:, :, :2], - pconf) - loss_sep += separation_loss( - t.unproject(uvz[i])[:, :, :3], hparams.sep_delta) - - chordal, angular = relative_pose_loss( - t.unproject(uvz[0])[:, :, :3], - t.unproject(uvz[1])[:, :, :3], tf.matmul(mvi[0], mv[1]), pconf, - hparams.noise) - - loss = ( - hparams.loss_pose * angular + - hparams.loss_con * loss_con + - hparams.loss_sep * loss_sep + - hparams.loss_sill * loss_sill + - hparams.loss_lr * loss_lr + - hparams.loss_variance * loss_variance - ) - - def touint8(img): - return tf.cast(img * 255.0, tf.uint8) - - with tf.variable_scope("output"): - tf.summary.image("0_img0", touint8(features["img0"][:, :, :, :3])) - tf.summary.image("1_combined", viz[0]) - for i in range(hparams.num_kp): - tf.summary.image("2_f%02d" % i, vizs[0][i]) - - with tf.variable_scope("stats"): - tf.summary.scalar("anneal", anneal) - tf.summary.scalar("closs", loss_con) - tf.summary.scalar("seploss", loss_sep) - tf.summary.scalar("angular", angular) - tf.summary.scalar("chordal", chordal) - tf.summary.scalar("lrloss", loss_lr) - tf.summary.scalar("sill", loss_sill) - tf.summary.scalar("vloss", loss_variance) - - return { - "loss": loss, - "predictions": { - "img0": features["img0"], - "img1": features["img1"], - "uvz0": uvz[0], - "uvz1": uvz[1] - }, - "eval_metric_ops": { - "closs": tf.metrics.mean(loss_con), - "angular_loss": tf.metrics.mean(angular), - "chordal_loss": tf.metrics.mean(chordal), - } - } - - -def predict(input_folder, hparams): - """Predicts keypoints on all images in input_folder.""" - - cols = plt.cm.get_cmap("rainbow")( - np.linspace(0, 1.0, hparams.num_kp))[:, :4] - - img = tf.placeholder(tf.float32, shape=(1, 128, 128, 4)) - - with tf.variable_scope("KeypointNetwork"): - ret = keypoint_network( - img, hparams.num_filters, hparams.num_kp, False) - - uv = tf.reshape(ret[0], [-1, hparams.num_kp, 2]) - z = tf.reshape(ret[1], [-1, hparams.num_kp, 1]) - uvz = tf.concat([uv, z], axis=2) - - sess = tf.Session() - saver = tf.train.Saver() - ckpt = tf.train.get_checkpoint_state(FLAGS.model_dir) - - print("loading model: ", ckpt.model_checkpoint_path) - saver.restore(sess, ckpt.model_checkpoint_path) - - files = [x for x in os.listdir(input_folder) - if x[-3:] in ["jpg", "png"]] - - output_folder = os.path.join(input_folder, "output") - if not os.path.exists(output_folder): - os.mkdir(output_folder) - - for f in files: - orig = misc.imread(os.path.join(input_folder, f)).astype(float) / 255 - if orig.shape[2] == 3: - orig = np.concatenate((orig, np.ones_like(orig[:, :, :1])), axis=2) - - uv_ret = sess.run(uvz, feed_dict={img: np.expand_dims(orig, 0)}) - - utils.draw_ndc_points(orig, uv_ret.reshape(hparams.num_kp, 3), cols) - misc.imsave(os.path.join(output_folder, f), orig) - - -def _default_hparams(): - """Returns default or overridden user-specified hyperparameters.""" - - hparams = tf.contrib.training.HParams( - num_filters=64, # Number of filters. - num_kp=10, # Numer of keypoints. - - loss_pose=0.2, # Pose Loss. - loss_con=1.0, # Multiview consistency Loss. - loss_sep=1.0, # Seperation Loss. - loss_sill=1.0, # Sillhouette Loss. - loss_lr=1.0, # Orientation Loss. - loss_variance=0.5, # Variance Loss (part of Sillhouette loss). - - sep_delta=0.05, # Seperation threshold. - noise=0.1, # Noise added during estimating rotation. - - learning_rate=1.0e-3, - lr_anneal_start=30000, # When to anneal in the orientation prediction. - lr_anneal_end=60000, # When to use the prediction completely. - ) - if FLAGS.hparams: - hparams = hparams.parse(FLAGS.hparams) - return hparams - - -def main(argv): - del argv - - hparams = _default_hparams() - - if FLAGS.predict: - predict(FLAGS.input, hparams) - else: - utils.train_and_eval( - model_dir=FLAGS.model_dir, - model_fn=model_fn, - input_fn=create_input_fn, - hparams=hparams, - steps=FLAGS.steps, - batch_size=FLAGS.batch_size, - save_checkpoints_secs=600, - eval_throttle_secs=1800, - eval_steps=5, - sync_replicas=FLAGS.sync_replicas, - ) - - -if __name__ == "__main__": - sys.excepthook = utils.colored_hook( - os.path.dirname(os.path.realpath(__file__))) - tf.app.run() diff --git a/research/keypointnet/tools/gen_tfrecords.py b/research/keypointnet/tools/gen_tfrecords.py deleted file mode 100644 index 2f973b7fe..000000000 --- a/research/keypointnet/tools/gen_tfrecords.py +++ /dev/null @@ -1,99 +0,0 @@ -# Copyright 2018 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================= -"""An example script to generate a tfrecord file from a folder containing the -renderings. - -Example usage: - python gen_tfrecords.py --input=FOLDER --output=output.tfrecord - -""" -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import numpy as np -import os -from scipy import misc -import tensorflow as tf - -FLAGS = tf.app.flags.FLAGS -tf.app.flags.DEFINE_string("input", "", "Input folder containing images") -tf.app.flags.DEFINE_string("output", "", "Output tfrecord.") - - -def get_matrix(lines): - return np.array([[float(y) for y in x.strip().split(" ")] for x in lines]) - - -def read_model_view_matrices(filename): - with open(filename, "r") as f: - lines = f.readlines() - return get_matrix(lines[:4]), get_matrix(lines[4:]) - - -def bytes_feature(values): - return tf.train.Feature(bytes_list=tf.train.BytesList(value=[values])) - - -def generate(): - with tf.python_io.TFRecordWriter(FLAGS.output) as tfrecord_writer: - with tf.Graph().as_default(): - im0 = tf.placeholder(dtype=tf.uint8) - im1 = tf.placeholder(dtype=tf.uint8) - encoded0 = tf.image.encode_png(im0) - encoded1 = tf.image.encode_png(im1) - - with tf.Session() as sess: - count = 0 - indir = FLAGS.input + "/" - while tf.gfile.Exists(indir + "%06d.txt" % count): - print("saving %06d" % count) - image0 = misc.imread(indir + "%06d.png" % (count * 2)) - image1 = misc.imread(indir + "%06d.png" % (count * 2 + 1)) - - mat0, mat1 = read_model_view_matrices(indir + "%06d.txt" % count) - - mati0 = np.linalg.inv(mat0).flatten() - mati1 = np.linalg.inv(mat1).flatten() - mat0 = mat0.flatten() - mat1 = mat1.flatten() - - st0, st1 = sess.run([encoded0, encoded1], - feed_dict={im0: image0, im1: image1}) - - example = tf.train.Example(features=tf.train.Features(feature={ - 'img0': bytes_feature(st0), - 'img1': bytes_feature(st1), - 'mv0': tf.train.Feature( - float_list=tf.train.FloatList(value=mat0)), - 'mvi0': tf.train.Feature( - float_list=tf.train.FloatList(value=mati0)), - 'mv1': tf.train.Feature( - float_list=tf.train.FloatList(value=mat1)), - 'mvi1': tf.train.Feature( - float_list=tf.train.FloatList(value=mati1)), - })) - - tfrecord_writer.write(example.SerializeToString()) - count += 1 - - -def main(argv): - del argv - generate() - - -if __name__ == "__main__": - tf.app.run() diff --git a/research/keypointnet/tools/render.py b/research/keypointnet/tools/render.py deleted file mode 100644 index 3a8872675..000000000 --- a/research/keypointnet/tools/render.py +++ /dev/null @@ -1,310 +0,0 @@ -# Copyright 2018 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================= -"""Script to render object views from ShapeNet obj models. - -Example usage: - blender -b --python render.py -- -m model.obj -o output/ -s 128 -n 120 -fov 5 - -""" -from __future__ import print_function - -import argparse -import itertools -import json -from math import pi -import os -import random -import sys -from mathutils import Vector -import math -import mathutils -import time -import copy - -import bpy - -sys.path.append(os.path.dirname(__file__)) - -BG_LUMINANCE = 0 - - -def look_at(obj_camera, point): - loc_camera = obj_camera.location - direction = point - loc_camera - # point the cameras '-Z' and use its 'Y' as up - rot_quat = direction.to_track_quat('-Z', 'Y') - - obj_camera.rotation_euler = rot_quat.to_euler() - - -def roll_camera(obj_camera): - roll_rotate = mathutils.Euler( - (0, 0, random.random() * math.pi - math.pi * 0.5), 'XYZ') - obj_camera.rotation_euler = (obj_camera.rotation_euler.to_matrix() * - roll_rotate.to_matrix()).to_euler() - - -def norm(x): - return math.sqrt(x[0] * x[0] + x[1] * x[1] + x[2] * x[2]) - - -def normalize(x): - n = norm(x) - x[0] /= n - x[1] /= n - x[2] /= n - - -def random_top_sphere(): - xyz = [random.normalvariate(0, 1) for x in range(3)] - normalize(xyz) - - if xyz[2] < 0: - xyz[2] *= -1 - return xyz - - -def perturb_sphere(loc, size): - while True: - xyz = [random.normalvariate(0, 1) for x in range(3)] - normalize(xyz) - - nloc = [loc[i] + xyz[i] * random.random() * size for i in range(3)] - normalize(nloc) - - if nloc[2] >= 0: - return nloc - - -def perturb(loc, size): - while True: - nloc = [loc[i] + random.random() * size * 2 - size for i in range(3)] - if nloc[2] >= 0: - return nloc - - bpy.ops.object.mode_set() - - -def delete_all_objects(): - bpy.ops.object.select_by_type(type="MESH") - bpy.ops.object.delete(use_global=False) - - -def set_scene(render_size, fov, alpha=False): - """Set up default scene properties.""" - delete_all_objects() - - cam = bpy.data.cameras["Camera"] - cam.angle = fov * pi / 180 - - light = bpy.data.objects["Lamp"] - light.location = (0, 0, 1) - look_at(light, Vector((0.0, 0, 0))) - bpy.data.lamps['Lamp'].type = "HEMI" - bpy.data.lamps['Lamp'].energy = 1 - bpy.data.lamps['Lamp'].use_specular = False - bpy.data.lamps['Lamp'].use_diffuse = True - - bpy.context.scene.world.horizon_color = ( - BG_LUMINANCE, BG_LUMINANCE, BG_LUMINANCE) - - bpy.context.scene.render.resolution_x = render_size - bpy.context.scene.render.resolution_y = render_size - bpy.context.scene.render.resolution_percentage = 100 - - bpy.context.scene.render.use_antialiasing = True - bpy.context.scene.render.antialiasing_samples = '5' - - -def get_modelview_matrix(): - cam = bpy.data.objects["Camera"] - bpy.context.scene.update() - - # when apply to object with CV coordinate i.e. to_blender * obj - # this gives object in blender coordinate - to_blender = mathutils.Matrix( - ((1., 0., 0., 0.), - (0., 0., -1., 0.), - (0., 1., 0., 0.), - (0., 0., 0., 1.))) - return cam.matrix_world.inverted() * to_blender - - -def print_matrix(f, mat): - for i in range(4): - for j in range(4): - f.write("%lf " % mat[i][j]) - f.write("\n") - - -def mul(loc, v): - return [loc[i] * v for i in range(3)] - - -def merge_all(): - bpy.ops.object.select_by_type(type="MESH") - bpy.context.scene.objects.active = bpy.context.selected_objects[0] - bpy.ops.object.join() - obj = bpy.context.scene.objects.active - bpy.ops.object.origin_set(type="ORIGIN_CENTER_OF_MASS") - return obj - - -def insert_frame(obj, frame_number): - obj.keyframe_insert(data_path="location", frame=frame_number) - obj.keyframe_insert(data_path="rotation_euler", frame=frame_number) - obj.keyframe_insert(data_path="scale", frame=frame_number) - - -def render(output_prefix): - bpy.context.scene.render.filepath = output_prefix - bpy.context.scene.render.image_settings.file_format = "PNG" - bpy.context.scene.render.alpha_mode = "TRANSPARENT" - bpy.context.scene.render.image_settings.color_mode = "RGBA" - bpy.ops.render.render(write_still=True, animation=True) - - -def render_obj( - obj_fn, save_dir, n, perturb_size, rotate=False, roll=False, scale=1.0): - - # Load object. - bpy.ops.import_scene.obj(filepath=obj_fn) - cur_obj = merge_all() - - scale = 2.0 / max(cur_obj.dimensions) * scale - cur_obj.scale = (scale, scale, scale) - # Using the center of mass as the origin doesn't really work, because Blender - # assumes the object is a solid shell. This seems to generate better-looking - # rotations. - - bpy.ops.object.origin_set(type='ORIGIN_GEOMETRY', center='BOUNDS') - - # bpy.ops.mesh.primitive_cube_add(location=(0, 0, 1)) - # cube = bpy.data.objects["Cube"] - # cube.scale = (0.2, 0.2, 0.2) - - for polygon in cur_obj.data.polygons: - polygon.use_smooth = True - - bpy.ops.object.select_all(action="DESELECT") - - camera = bpy.data.objects["Camera"] - - # os.system("mkdir " + save_dir) - for i in range(n): - fo = open(save_dir + "/%06d.txt" % i, "w") - d = 30 - shift = 0.2 - if rotate: - t = 1.0 * i / (n-1) * 2 * math.pi - loc = [math.sin(t), math.cos(t), 1] - - normalize(loc) - camera.location = mul(loc, d) - look_at(camera, Vector((0.0, 0, 0))) - - print_matrix(fo, get_modelview_matrix()) - print_matrix(fo, get_modelview_matrix()) - - insert_frame(camera, 2 * i) - insert_frame(camera, 2 * i + 1) - - else: - loc = random_top_sphere() - - camera.location = mul(loc, d) - look_at(camera, Vector((0.0, 0, 0))) - - if roll: - roll_camera(camera) - camera.location = perturb(mul(loc, d), shift) - - print_matrix(fo, get_modelview_matrix()) - insert_frame(camera, 2 * i) - - if perturb_size > 0: - loc = perturb_sphere(loc, perturb_size) - else: - loc = random_top_sphere() - - camera.location = mul(loc, d) - look_at(camera, Vector((0.0, 0, 0))) - if roll: - roll_camera(camera) - camera.location = perturb(mul(loc, d), shift) - - print_matrix(fo, get_modelview_matrix()) - insert_frame(camera, 2 * i + 1) - - fo.close() - - # Create a bunch of views of the object - bpy.context.scene.frame_start = 0 - bpy.context.scene.frame_end = 2 * n - 1 - - stem = os.path.join(save_dir, '######') - render(stem) - - -def main(): - parser = argparse.ArgumentParser() - parser.add_argument('-m', '--model', dest='model', - required=True, - help='Path to model obj file.') - parser.add_argument('-o', '--output_dir', dest='output_dir', - required=True, - help='Where to output files.') - parser.add_argument('-s', '--output_size', dest='output_size', - required=True, - help='Width and height of output in pixels, e.g. 32x32.') - parser.add_argument('-n', '--num_frames', dest='n', type=int, - required=True, - help='Number of frames to generate per clip.') - - parser.add_argument('-scale', '--scale', dest='scale', type=float, - help='object scaling', default=1) - - parser.add_argument('-perturb', '--perturb', dest='perturb', type=float, - help='sphere perturbation', default=0) - - parser.add_argument('-rotate', '--rotate', dest='rotate', action='store_true', - help='render rotating test set') - - parser.add_argument('-roll', '--roll', dest='roll', action='store_true', - help='add roll') - - parser.add_argument( - '-fov', '--fov', dest='fov', type=float, required=True, - help='field of view') - - if '--' not in sys.argv: - parser.print_help() - exit(1) - - argv = sys.argv[sys.argv.index('--') + 1:] - args, _ = parser.parse_known_args(argv) - - random.seed(args.model + str(time.time()) + str(os.getpid())) - # random.seed(0) - - set_scene(int(args.output_size), args.fov) - render_obj( - args.model, args.output_dir, args.n, args.perturb, args.rotate, - args.roll, args.scale) - exit() - - -if __name__ == '__main__': - main() diff --git a/research/keypointnet/utils.py b/research/keypointnet/utils.py deleted file mode 100644 index 148b7a3ed..000000000 --- a/research/keypointnet/utils.py +++ /dev/null @@ -1,307 +0,0 @@ -# Copyright 2018 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================= -"""Utility functions for KeypointNet. - -These are helper / tensorflow related functions. The actual implementation and -algorithm is in main.py. -""" - -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import math -import numpy as np -import os -import re -import tensorflow as tf -import tensorflow.contrib.slim as slim -import time -import traceback - - -class TrainingHook(tf.train.SessionRunHook): - """A utility for displaying training information such as the loss, percent - completed, estimated finish date and time.""" - - def __init__(self, steps): - self.steps = steps - - self.last_time = time.time() - self.last_est = self.last_time - - self.eta_interval = int(math.ceil(0.1 * self.steps)) - self.current_interval = 0 - - def before_run(self, run_context): - graph = tf.get_default_graph() - return tf.train.SessionRunArgs( - {"loss": graph.get_collection("total_loss")[0]}) - - def after_run(self, run_context, run_values): - step = run_context.session.run(tf.train.get_global_step()) - now = time.time() - - if self.current_interval < self.eta_interval: - self.duration = now - self.last_est - self.current_interval += 1 - if step % self.eta_interval == 0: - self.duration = now - self.last_est - self.last_est = now - - eta_time = float(self.steps - step) / self.current_interval * \ - self.duration - m, s = divmod(eta_time, 60) - h, m = divmod(m, 60) - eta = "%d:%02d:%02d" % (h, m, s) - - print("%.2f%% (%d/%d): %.3e t %.3f @ %s (%s)" % ( - step * 100.0 / self.steps, - step, - self.steps, - run_values.results["loss"], - now - self.last_time, - time.strftime("%a %d %H:%M:%S", time.localtime(time.time() + eta_time)), - eta)) - - self.last_time = now - - -def standard_model_fn( - func, steps, run_config=None, sync_replicas=0, optimizer_fn=None): - """Creates model_fn for tf.Estimator. - - Args: - func: A model_fn with prototype model_fn(features, labels, mode, hparams). - steps: Training steps. - run_config: tf.estimatorRunConfig (usually passed in from TF_CONFIG). - sync_replicas: The number of replicas used to compute gradient for - synchronous training. - optimizer_fn: The type of the optimizer. Default to Adam. - - Returns: - model_fn for tf.estimator.Estimator. - """ - - def fn(features, labels, mode, params): - """Returns model_fn for tf.estimator.Estimator.""" - - is_training = (mode == tf.estimator.ModeKeys.TRAIN) - ret = func(features, labels, mode, params) - - tf.add_to_collection("total_loss", ret["loss"]) - train_op = None - - training_hooks = [] - if is_training: - training_hooks.append(TrainingHook(steps)) - - if optimizer_fn is None: - optimizer = tf.train.AdamOptimizer(params.learning_rate) - else: - optimizer = optimizer_fn - - if run_config is not None and run_config.num_worker_replicas > 1: - sr = sync_replicas - if sr <= 0: - sr = run_config.num_worker_replicas - - optimizer = tf.train.SyncReplicasOptimizer( - optimizer, - replicas_to_aggregate=sr, - total_num_replicas=run_config.num_worker_replicas) - - training_hooks.append( - optimizer.make_session_run_hook( - run_config.is_chief, num_tokens=run_config.num_worker_replicas)) - - optimizer = tf.contrib.estimator.clip_gradients_by_norm(optimizer, 5) - train_op = slim.learning.create_train_op(ret["loss"], optimizer) - - if "eval_metric_ops" not in ret: - ret["eval_metric_ops"] = {} - - return tf.estimator.EstimatorSpec( - mode=mode, - predictions=ret["predictions"], - loss=ret["loss"], - train_op=train_op, - eval_metric_ops=ret["eval_metric_ops"], - training_hooks=training_hooks) - return fn - - -def train_and_eval( - model_dir, - steps, - batch_size, - model_fn, - input_fn, - hparams, - keep_checkpoint_every_n_hours=0.5, - save_checkpoints_secs=180, - save_summary_steps=50, - eval_steps=20, - eval_start_delay_secs=10, - eval_throttle_secs=300, - sync_replicas=0): - """Trains and evaluates our model. Supports local and distributed training. - - Args: - model_dir: The output directory for trained parameters, checkpoints, etc. - steps: Training steps. - batch_size: Batch size. - model_fn: A func with prototype model_fn(features, labels, mode, hparams). - input_fn: A input function for the tf.estimator.Estimator. - hparams: tf.HParams containing a set of hyperparameters. - keep_checkpoint_every_n_hours: Number of hours between each checkpoint - to be saved. - save_checkpoints_secs: Save checkpoints every this many seconds. - save_summary_steps: Save summaries every this many steps. - eval_steps: Number of steps to evaluate model. - eval_start_delay_secs: Start evaluating after waiting for this many seconds. - eval_throttle_secs: Do not re-evaluate unless the last evaluation was - started at least this many seconds ago - sync_replicas: Number of synchronous replicas for distributed training. - - Returns: - None - """ - - run_config = tf.estimator.RunConfig( - keep_checkpoint_every_n_hours=keep_checkpoint_every_n_hours, - save_checkpoints_secs=save_checkpoints_secs, - save_summary_steps=save_summary_steps) - - estimator = tf.estimator.Estimator( - model_dir=model_dir, - model_fn=standard_model_fn( - model_fn, - steps, - run_config, - sync_replicas=sync_replicas), - params=hparams, config=run_config) - - train_spec = tf.estimator.TrainSpec( - input_fn=input_fn(split="train", batch_size=batch_size), - max_steps=steps) - - eval_spec = tf.estimator.EvalSpec( - input_fn=input_fn(split="validation", batch_size=batch_size), - steps=eval_steps, - start_delay_secs=eval_start_delay_secs, - throttle_secs=eval_throttle_secs) - - tf.estimator.train_and_evaluate(estimator, train_spec, eval_spec) - - -def draw_circle(rgb, u, v, col, r): - """Draws a simple anti-aliasing circle in-place. - - Args: - rgb: Input image to be modified. - u: Horizontal coordinate. - v: Vertical coordinate. - col: Color. - r: Radius. - """ - - ir = int(math.ceil(r)) - for i in range(-ir-1, ir+2): - for j in range(-ir-1, ir+2): - nu = int(round(u + i)) - nv = int(round(v + j)) - if nu < 0 or nu >= rgb.shape[1] or nv < 0 or nv >= rgb.shape[0]: - continue - - du = abs(nu - u) - dv = abs(nv - v) - - # need sqrt to keep scale - t = math.sqrt(du * du + dv * dv) - math.sqrt(r * r) - if t < 0: - rgb[nv, nu, :] = col - else: - t = 1 - t - if t > 0: - # t = t ** 0.3 - rgb[nv, nu, :] = col * t + rgb[nv, nu, :] * (1-t) - - -def draw_ndc_points(rgb, xy, cols): - """Draws keypoints onto an input image. - - Args: - rgb: Input image to be modified. - xy: [n x 2] matrix of 2D locations. - cols: A list of colors for the keypoints. - """ - - vh, vw = rgb.shape[0], rgb.shape[1] - - for j in range(len(cols)): - x, y = xy[j, :2] - x = (min(max(x, -1), 1) * vw / 2 + vw / 2) - 0.5 - y = vh - 0.5 - (min(max(y, -1), 1) * vh / 2 + vh / 2) - - x = int(round(x)) - y = int(round(y)) - if x < 0 or y < 0 or x >= vw or y >= vh: - continue - - rad = 1.5 - rad *= rgb.shape[0] / 128.0 - draw_circle(rgb, x, y, np.array([0.0, 0.0, 0.0, 1.0]), rad * 1.5) - draw_circle(rgb, x, y, cols[j], rad) - - -def colored_hook(home_dir): - """Colorizes python's error message. - - Args: - home_dir: directory where code resides (to highlight your own files). - Returns: - The traceback hook. - """ - - def hook(type_, value, tb): - def colorize(text, color, own=0): - """Returns colorized text.""" - endcolor = "\x1b[0m" - codes = { - "green": "\x1b[0;32m", - "green_own": "\x1b[1;32;40m", - "red": "\x1b[0;31m", - "red_own": "\x1b[1;31m", - "yellow": "\x1b[0;33m", - "yellow_own": "\x1b[1;33m", - "black": "\x1b[0;90m", - "black_own": "\x1b[1;90m", - "cyan": "\033[1;36m", - } - return codes[color + ("_own" if own else "")] + text + endcolor - - for filename, line_num, func, text in traceback.extract_tb(tb): - basename = os.path.basename(filename) - own = (home_dir in filename) or ("/" not in filename) - - print(colorize("\"" + basename + '"', "green", own) + " in " + func) - print("%s: %s" % ( - colorize("%5d" % line_num, "red", own), - colorize(text, "yellow", own))) - print(" %s" % colorize(filename, "black", own)) - - print(colorize("%s: %s" % (type_.__name__, value), "cyan")) - return hook diff --git a/research/learned_optimizer/.gitignore b/research/learned_optimizer/.gitignore deleted file mode 100644 index e69de29bb..000000000 diff --git a/research/learned_optimizer/BUILD b/research/learned_optimizer/BUILD deleted file mode 100644 index 629c9a06b..000000000 --- a/research/learned_optimizer/BUILD +++ /dev/null @@ -1,33 +0,0 @@ -# Learning to Optimize Learning (LOL) - -package(default_visibility = ["//visibility:public"]) - -# Libraries -# ========= - -py_library( - name = "metaopt", - srcs = ["metaopt.py"], - deps = [ - "//learned_optimizer/problems:datasets", - "//learned_optimizer/problems:problem_generator", - ], -) - -# Binaries -# ======== -py_binary( - name = "metarun", - srcs = ["metarun.py"], - deps = [ - ":metaopt", - "//learned_optimizer/optimizer:coordinatewise_rnn", - "//learned_optimizer/optimizer:global_learning_rate", - "//learned_optimizer/optimizer:hierarchical_rnn", - "//learned_optimizer/optimizer:learning_rate_schedule", - "//learned_optimizer/optimizer:trainable_adam", - "//learned_optimizer/problems:problem_sets", - "//learned_optimizer/problems:problem_spec", - ], -) - diff --git a/research/learned_optimizer/README.md b/research/learned_optimizer/README.md deleted file mode 100644 index 6a32514f0..000000000 --- a/research/learned_optimizer/README.md +++ /dev/null @@ -1,47 +0,0 @@ -![No Maintenance Intended](https://img.shields.io/badge/No%20Maintenance%20Intended-%E2%9C%95-red.svg) -![TensorFlow Requirement: 1.x](https://img.shields.io/badge/TensorFlow%20Requirement-1.x-brightgreen) -![TensorFlow 2 Not Supported](https://img.shields.io/badge/TensorFlow%202%20Not%20Supported-%E2%9C%95-red.svg) - -# Learned Optimizer - -Code for [Learned Optimizers that Scale and Generalize](https://arxiv.org/abs/1703.04813). - -## Requirements - -* Bazel ([install](https://bazel.build/versions/master/docs/install.html)) -* TensorFlow >= v1.3 -* Python 2.7.x - -## Training a Learned Optimizer - -## Code Overview -In the top-level directory, ```metaopt.py``` contains the code to train and test a learned optimizer. ```metarun.py``` packages the actual training procedure into a -single file, defining and exposing many flags to tune the procedure, from selecting the optimizer type and problem set to more fine-grained hyperparameter settings. -There is no testing binary; testing can be done ad-hoc via ```metaopt.test_optimizer``` by passing an optimizer object and a directory with a checkpoint. - -The ```optimizer``` directory contains a base ```trainable_optimizer.py``` class and a number of extensions, including the ```hierarchical_rnn``` optimizer used in -the paper, a ```coordinatewise_rnn``` optimizer that more closely matches previous work, and a number of simpler optimizers to demonstrate the basic mechanics of -a learnable optimizer. - -The ```problems``` directory contains the code to build the problems that were used in the meta-training set. - -### Binaries -```metarun.py```: meta-training of a learned optimizer - -### Command-Line Flags -The flags most relevant to meta-training are defined in ```metarun.py```. The default values will meta-train a HierarchicalRNN optimizer with the hyperparameter -settings used in the paper. - -### Using a Learned Optimizer as a Black Box -The ```trainable_optimizer``` inherits from ```tf.train.Optimizer```, so a properly instantiated version can be used to train any model in any APIs that accept -this class. There are just 2 caveats: - -1. If using the Hierarchical RNN optimizer, the apply_gradients return type must be changed (see comments inline for what exactly must be removed) - -2. Care must be taken to restore the variables from the optimizer without overriding them. Optimizer variables should be loaded manually using a pretrained checkpoint -and a ```tf.train.Saver``` with only the optimizer variables. Then, when constructing the session, ensure that any automatic variable initialization does not -re-initialize the loaded optimizer variables. - -## Contact for Issues - -* Olga Wichrowska (@olganw), Niru Maheswaranathan (@nirum) diff --git a/research/learned_optimizer/metaopt.py b/research/learned_optimizer/metaopt.py deleted file mode 100644 index 62c06272d..000000000 --- a/research/learned_optimizer/metaopt.py +++ /dev/null @@ -1,639 +0,0 @@ -# Copyright 2017 Google, Inc. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -"""Helper utilities for training and testing optimizers.""" - -from collections import defaultdict -import random -import sys -import time - -import numpy as np -from six.moves import xrange -import tensorflow as tf - -from learned_optimizer.optimizer import trainable_optimizer -from learned_optimizer.optimizer import utils -from learned_optimizer.problems import datasets -from learned_optimizer.problems import problem_generator - -tf.app.flags.DEFINE_integer("ps_tasks", 0, - """Number of tasks in the ps job. - If 0 no ps job is used.""") -tf.app.flags.DEFINE_float("nan_l2_reg", 1e-2, - """Strength of l2-reg when NaNs are encountered.""") -tf.app.flags.DEFINE_float("l2_reg", 0., - """Lambda value for parameter regularization.""") -# Default is 0.9 -tf.app.flags.DEFINE_float("rms_decay", 0.9, - """Decay value for the RMSProp metaoptimizer.""") -# Default is 1e-10 -tf.app.flags.DEFINE_float("rms_epsilon", 1e-20, - """Epsilon value for the RMSProp metaoptimizer.""") -tf.app.flags.DEFINE_boolean("set_profiling", False, - """Enable memory usage and computation time """ - """tracing for tensorflow nodes (available in """ - """TensorBoard).""") -tf.app.flags.DEFINE_boolean("reset_rnn_params", True, - """Reset the parameters of the optimizer - from one meta-iteration to the next.""") - -FLAGS = tf.app.flags.FLAGS -OPTIMIZER_SCOPE = "LOL" -OPT_SUM_COLLECTION = "LOL_summaries" - - -def sigmoid_weights(n, slope=0.1, offset=5): - """Generates a sigmoid, scaled to sum to 1. - - This function is used to generate weights that serve to mask out - the early objective values of an optimization problem such that - initial variation in the objective is phased out (hence the sigmoid - starts at zero and ramps up to the maximum value, and the total - weight is normalized to sum to one) - - Args: - n: the number of samples - slope: slope of the sigmoid (Default: 0.1) - offset: threshold of the sigmoid (Default: 5) - - Returns: - No - """ - x = np.arange(n) - y = 1. / (1. + np.exp(-slope * (x-offset))) - y_normalized = y / np.sum(y) - return y_normalized - - -def sample_numiter(scale, min_steps=50): - """Samples a number of iterations from an exponential distribution. - - Args: - scale: parameter for the exponential distribution - min_steps: minimum number of steps to run (additive) - - Returns: - num_steps: An integer equal to a rounded sample from the exponential - distribution + the value of min_steps. - """ - return int(np.round(np.random.exponential(scale=scale)) + min_steps) - - -def train_optimizer(logdir, - optimizer_spec, - problems_and_data, - num_problems, - num_meta_iterations, - num_unroll_func, - num_partial_unroll_itrs_func, - learning_rate=1e-4, - gradient_clip=5., - is_chief=False, - select_random_problems=True, - callbacks=None, - obj_train_max_multiplier=-1, - out=sys.stdout): - """Trains the meta-parameters of this optimizer. - - Args: - logdir: a directory filepath for storing model checkpoints (must exist) - optimizer_spec: specification for an Optimizer (see utils.Spec) - problems_and_data: a list of tuples containing three elements: a problem - specification (see utils.Spec), a dataset (see datasets.Dataset), and - a batch_size (int) for generating a problem and corresponding dataset. If - the problem doesn't have data, set dataset to None. - num_problems: the number of problems to sample during meta-training - num_meta_iterations: the number of iterations (steps) to run the - meta-optimizer for on each subproblem. - num_unroll_func: called once per meta iteration and returns the number of - unrolls to do for that meta iteration. - num_partial_unroll_itrs_func: called once per unroll and returns the number - of iterations to do for that unroll. - learning_rate: learning rate of the RMSProp meta-optimizer (Default: 1e-4) - gradient_clip: value to clip gradients at (Default: 5.0) - is_chief: whether this is the chief task (Default: False) - select_random_problems: whether to select training problems randomly - (Default: True) - callbacks: a list of callback functions that is run after every random - problem draw - obj_train_max_multiplier: the maximum increase in the objective value over - a single training run. Ignored if < 0. - out: where to write output to, e.g. a file handle (Default: sys.stdout) - - Raises: - ValueError: If one of the subproblems has a negative objective value. - """ - - if select_random_problems: - # iterate over random draws of problem / dataset pairs - sampler = (random.choice(problems_and_data) for _ in range(num_problems)) - else: - # iterate over a random shuffle of problems, looping if necessary - num_repeats = (num_problems / len(problems_and_data)) + 1 - random.shuffle(problems_and_data) - sampler = (problems_and_data * num_repeats)[:num_problems] - - for problem_itr, (problem_spec, dataset, batch_size) in enumerate(sampler): - - # timer used to time how long it takes to initialize a problem - problem_start_time = time.time() - - # if dataset is None, use the EMPTY_DATASET - if dataset is None: - dataset = datasets.EMPTY_DATASET - batch_size = dataset.size - - # build a new graph for this problem - graph = tf.Graph() - real_device_setter = tf.train.replica_device_setter(FLAGS.ps_tasks) - - def custom_device_setter(op): - # Places the local variables onto the workers. - if trainable_optimizer.is_local_state_variable(op): - return "/job:worker" - else: - return real_device_setter(op) - - if real_device_setter: - device_setter = custom_device_setter - else: - device_setter = None - - with graph.as_default(), graph.device(device_setter): - - # initialize a problem - problem = problem_spec.build() - - # build the optimizer - opt = optimizer_spec.build() - - # get the meta-objective for training the optimizer - train_output = opt.train(problem, dataset) - - state_keys = opt.state_keys - for key, val in zip(state_keys, train_output.output_state[0]): - finite_val = utils.make_finite(val, replacement=tf.zeros_like(val)) - tf.summary.histogram("State/{}".format(key), finite_val, - collections=[OPT_SUM_COLLECTION]) - - tf.summary.scalar("MetaObjective", train_output.metaobj, - collections=[OPT_SUM_COLLECTION]) - - # Per-problem meta-objective - tf.summary.scalar(problem_spec.callable.__name__ + "_MetaObjective", - train_output.metaobj, - collections=[OPT_SUM_COLLECTION]) - - # create the meta-train_op - global_step = tf.Variable(0, name="global_step", trainable=False) - meta_parameters = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, - scope=OPTIMIZER_SCOPE) - # parameter regularization - reg_l2 = FLAGS.l2_reg * sum([tf.reduce_sum(param ** 2) - for param in meta_parameters]) - - # compute the meta-gradients - meta_opt = tf.train.RMSPropOptimizer(learning_rate, decay=FLAGS.rms_decay, - use_locking=True, - epsilon=FLAGS.rms_epsilon) - grads_and_vars = meta_opt.compute_gradients(train_output.metaobj + reg_l2, - meta_parameters) - - # clip the gradients - clipped_grads_and_vars = [] - for grad, var in grads_and_vars: - clipped_grad = tf.clip_by_value( - utils.make_finite(grad, replacement=tf.zeros_like(var)), - -gradient_clip, gradient_clip) - clipped_grads_and_vars.append((clipped_grad, var)) - - # histogram summary of grads and vars - for grad, var in grads_and_vars: - tf.summary.histogram( - var.name + "_rawgrad", - utils.make_finite( - grad, replacement=tf.zeros_like(grad)), - collections=[OPT_SUM_COLLECTION]) - for grad, var in clipped_grads_and_vars: - tf.summary.histogram(var.name + "_var", var, - collections=[OPT_SUM_COLLECTION]) - tf.summary.histogram(var.name + "_grad", grad, - collections=[OPT_SUM_COLLECTION]) - - # builds the train and summary operations - train_op = meta_opt.apply_gradients(clipped_grads_and_vars, - global_step=global_step) - - # only grab summaries defined for LOL, not inside the problem - summary_op = tf.summary.merge_all(key=OPT_SUM_COLLECTION) - - # make sure the state gets propagated after the gradients and summaries - # were computed. - with tf.control_dependencies([train_op, summary_op]): - propagate_loop_state_ops = [] - for dest, src in zip( - train_output.init_loop_vars, train_output.output_loop_vars): - propagate_loop_state_ops.append(dest.assign(src)) - propagate_loop_state_op = tf.group(*propagate_loop_state_ops) - - # create the supervisor - sv = tf.train.Supervisor( - graph=graph, - is_chief=is_chief, - logdir=logdir, - summary_op=None, - save_model_secs=0, # we save checkpoints manually - global_step=global_step, - ) - - with sv.managed_session() as sess: - - init_time = time.time() - problem_start_time - out.write("--------- Problem #{} ---------\n".format(problem_itr)) - out.write("{callable.__name__}{args}{kwargs}\n".format( - **problem_spec.__dict__)) - out.write("Took {} seconds to initialize.\n".format(init_time)) - out.flush() - - # For profiling summaries - if FLAGS.set_profiling: - summary_writer = tf.summary.FileWriter(logdir, graph=sess.graph) - - # used to store information during training - metadata = defaultdict(list) - - for k in range(num_meta_iterations): - - if sv.should_stop(): - break - - problem.init_fn(sess) - - # set run options (for profiling) - full_trace_opt = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE) - run_options = full_trace_opt if FLAGS.set_profiling else None - run_metadata = tf.RunMetadata() if FLAGS.set_profiling else None - - num_unrolls = num_unroll_func() - partial_unroll_iters = [ - num_partial_unroll_itrs_func() for _ in xrange(num_unrolls) - ] - total_num_iter = sum(partial_unroll_iters) - - objective_weights = [np.ones(num) / float(num) - for num in partial_unroll_iters] - db = dataset.batch_indices(total_num_iter, batch_size) - dataset_batches = [] - last_index = 0 - for num in partial_unroll_iters: - dataset_batches.append(db[last_index:last_index + num]) - last_index += num - - train_start_time = time.time() - - unroll_itr = 0 - additional_log_info = "" - - for unroll_itr in range(num_unrolls): - first_unroll = unroll_itr == 0 - if FLAGS.reset_rnn_params: - reset_state = first_unroll and k == 0 - else: - reset_state = first_unroll - - feed = { - train_output.obj_weights: objective_weights[unroll_itr], - train_output.batches: dataset_batches[unroll_itr], - train_output.first_unroll: first_unroll, - train_output.reset_state: reset_state, - } - - # run the train and summary ops - # when a "save_diagnostics" flag is turned on - fetches_list = [ - train_output.metaobj, - train_output.problem_objectives, - train_output.initial_obj, - summary_op, - clipped_grads_and_vars, - train_op - ] - if unroll_itr + 1 < num_unrolls: - fetches_list += [propagate_loop_state_op] - - fetched = sess.run(fetches_list, feed_dict=feed, - options=run_options, run_metadata=run_metadata) - meta_obj = fetched[0] - sub_obj = fetched[1] - init_obj = fetched[2] - summ = fetched[3] - meta_grads_and_params = fetched[4] - - # assert that the subproblem objectives are non-negative - # (this is so that we can rescale the objective by the initial value - # and not worry about rescaling by a negative value) - if np.any(sub_obj < 0): - raise ValueError( - "Training problem objectives must be nonnegative.") - # If the objective has increased more than we want, exit this - # training run and start over on another meta iteration. - if obj_train_max_multiplier > 0 and ( - sub_obj[-1] > (init_obj + - abs(init_obj) * (obj_train_max_multiplier - 1))): - msg = " Broke early at {} out of {} unrolls. ".format( - unroll_itr + 1, num_unrolls) - additional_log_info += msg - break - - # only the chief task is allowed to write the summary - if is_chief: - sv.summary_computed(sess, summ) - - metadata["subproblem_objs"].append(sub_obj) - # store training metadata to pass to the callback - metadata["meta_objs"].append(meta_obj) - metadata["meta_grads_and_params"].append(meta_grads_and_params) - - optimization_time = time.time() - train_start_time - - if FLAGS.set_profiling: - summary_name = "%02d_iter%04d_%02d" % (FLAGS.task, problem_itr, k) - summary_writer.add_run_metadata(run_metadata, summary_name) - - metadata["global_step"].append(sess.run(global_step)) - metadata["runtimes"].append(optimization_time) - - # write a diagnostic message to the output - args = (k, meta_obj, optimization_time, - sum(partial_unroll_iters[:unroll_itr+1])) - out.write(" [{:02}] {}, {} seconds, {} iters ".format(*args)) - out.write("(unrolled {} steps)".format( - ", ".join([str(s) for s in partial_unroll_iters[:unroll_itr+1]]))) - out.write("{}\n".format(additional_log_info)) - out.flush() - - if FLAGS.set_profiling: - summary_writer.close() - - # force a checkpoint save before we load a new problem - # only the chief task has the save_path and can write the checkpoint - if is_chief: - sv.saver.save(sess, sv.save_path, global_step=global_step) - - # run the callbacks on the chief - if is_chief and callbacks is not None: - for callback in callbacks: - if hasattr(callback, "__call__"): - problem_name = problem_spec.callable.__name__ - callback(problem_name, problem_itr, logdir, metadata) - - -def test_optimizer(optimizer, - problem, - num_iter, - dataset=datasets.EMPTY_DATASET, - batch_size=None, - seed=None, - graph=None, - logdir=None, - record_every=None): - """Tests an optimization algorithm on a given problem. - - Args: - optimizer: Either a tf.train.Optimizer instance, or an Optimizer instance - inheriting from trainable_optimizer.py - problem: A Problem instance that defines an optimization problem to solve - num_iter: The number of iterations of the optimizer to run - dataset: The dataset to train the problem against - batch_size: The number of samples per batch. If None (default), the - batch size is set to the full batch (dataset.size) - seed: A random seed used for drawing the initial parameters, or a list of - numpy arrays used to explicitly initialize the parameters. - graph: The tensorflow graph to execute (if None, uses the default graph) - logdir: A directory containing model checkpoints. If given, then the - parameters of the optimizer are loaded from the latest checkpoint - in this folder. - record_every: if an integer, stores the parameters, objective, and gradient - every recored_every iterations. If None, nothing is stored - - Returns: - objective_values: A list of the objective values during optimization - parameters: The parameters obtained after training - records: A dictionary containing lists of the parameters and gradients - during optimization saved every record_every iterations (empty if - record_every is set to None) - """ - - if dataset is None: - dataset = datasets.EMPTY_DATASET - batch_size = dataset.size - else: - # default batch size is the entire dataset - batch_size = dataset.size if batch_size is None else batch_size - - graph = tf.get_default_graph() if graph is None else graph - with graph.as_default(): - - # define the parameters of the optimization problem - if isinstance(seed, (list, tuple)): - # seed is a list of arrays - params = problem_generator.init_fixed_variables(seed) - else: - # seed is an int or None - params = problem.init_variables(seed) - - data_placeholder = tf.placeholder(tf.float32) - labels_placeholder = tf.placeholder(tf.int32) - - # get the problem objective and gradient(s) - obj = problem.objective(params, data_placeholder, labels_placeholder) - gradients = problem.gradients(obj, params) - - vars_to_preinitialize = params - - with tf.Session(graph=graph) as sess: - # initialize the parameter scope variables; necessary for apply_gradients - sess.run(tf.variables_initializer(vars_to_preinitialize)) - coord = tf.train.Coordinator() - threads = tf.train.start_queue_runners(sess=sess, coord=coord) - - # create the train operation and training variables - try: - train_op, real_params = optimizer.apply_gradients(zip(gradients, params)) - obj = problem.objective(real_params, data_placeholder, labels_placeholder) - except TypeError: - # If all goes well, this exception should only be thrown when we are using - # a non-hrnn optimizer. - train_op = optimizer.apply_gradients(zip(gradients, params)) - - vars_to_restore = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, - scope=OPTIMIZER_SCOPE) - vars_to_initialize = list( - set(tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES)) - - set(vars_to_restore) - set(vars_to_preinitialize)) - # load or initialize optimizer variables - if logdir is not None: - restorer = tf.Saver(var_list=vars_to_restore) - ckpt = tf.train.latest_checkpoint(logdir) - restorer.restore(sess, ckpt) - else: - sess.run(tf.variables_initializer(vars_to_restore)) - # initialize all the other variables - sess.run(tf.variables_initializer(vars_to_initialize)) - - problem.init_fn(sess) - - # generate the minibatch indices - batch_inds = dataset.batch_indices(num_iter, batch_size) - - # run the train operation for n iterations and save the objectives - records = defaultdict(list) - objective_values = [] - for itr, batch in enumerate(batch_inds): - - # data to feed in - feed = {data_placeholder: dataset.data[batch], - labels_placeholder: dataset.labels[batch]} - full_feed = {data_placeholder: dataset.data, - labels_placeholder: dataset.labels} - - # record stuff - if record_every is not None and (itr % record_every) == 0: - def grad_value(g): - if isinstance(g, tf.IndexedSlices): - return g.values - else: - return g - - records_fetch = {} - for p in params: - for key in optimizer.get_slot_names(): - v = optimizer.get_slot(p, key) - records_fetch[p.name + "_" + key] = v - gav_fetch = [(grad_value(g), v) for g, v in zip(gradients, params)] - - _, gav_eval, records_eval = sess.run( - (obj, gav_fetch, records_fetch), feed_dict=feed) - full_obj_eval = sess.run([obj], feed_dict=full_feed) - - records["objective"].append(full_obj_eval) - records["grad_norm"].append([np.linalg.norm(g.ravel()) - for g, _ in gav_eval]) - records["param_norm"].append([np.linalg.norm(v.ravel()) - for _, v in gav_eval]) - records["grad"].append([g for g, _ in gav_eval]) - records["param"].append([v for _, v in gav_eval]) - records["iter"].append(itr) - - for k, v in records_eval.iteritems(): - records[k].append(v) - - # run the optimization train operation - objective_values.append(sess.run([train_op, obj], feed_dict=feed)[1]) - - # final parameters - parameters = [sess.run(p) for p in params] - coord.request_stop() - coord.join(threads) - - return objective_values, parameters, records - - -def run_wall_clock_test(optimizer, - problem, - num_steps, - dataset=datasets.EMPTY_DATASET, - seed=None, - logdir=None, - batch_size=None): - """Runs optimization with the given parameters and return average iter time. - - Args: - optimizer: The tf.train.Optimizer instance - problem: The problem to optimize (a problem_generator.Problem) - num_steps: The number of steps to run optimization for - dataset: The dataset to train the problem against - seed: The seed used for drawing the initial parameters, or a list of - numpy arrays used to explicitly initialize the parameters - logdir: A directory containing model checkpoints. If given, then the - parameters of the optimizer are loaded from the latest checkpoint - in this folder. - batch_size: The number of samples per batch. - - Returns: - The average time in seconds for a single optimization iteration. - """ - if dataset is None: - dataset = datasets.EMPTY_DATASET - batch_size = dataset.size - else: - # default batch size is the entire dataset - batch_size = dataset.size if batch_size is None else batch_size - - # define the parameters of the optimization problem - if isinstance(seed, (list, tuple)): - # seed is a list of arrays - params = problem_generator.init_fixed_variables(seed) - else: - # seed is an int or None - params = problem.init_variables(seed) - - data_placeholder = tf.placeholder(tf.float32) - labels_placeholder = tf.placeholder(tf.int32) - - obj = problem.objective(params, data_placeholder, labels_placeholder) - gradients = problem.gradients(obj, params) - vars_to_preinitialize = params - - with tf.Session(graph=tf.get_default_graph()) as sess: - # initialize the parameter scope variables; necessary for apply_gradients - sess.run(tf.variables_initializer(vars_to_preinitialize)) - train_op = optimizer.apply_gradients(zip(gradients, params)) - if isinstance(train_op, tuple) or isinstance(train_op, list): - # LOL apply_gradients returns a tuple. Regular optimizers do not. - train_op = train_op[0] - vars_to_restore = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, - scope=OPTIMIZER_SCOPE) - vars_to_initialize = list( - set(tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES)) - - set(vars_to_restore) - set(vars_to_preinitialize)) - # load or initialize optimizer variables - if logdir is not None: - restorer = tf.Saver(var_list=vars_to_restore) - ckpt = tf.train.latest_checkpoint(logdir) - restorer.restore(sess, ckpt) - else: - sess.run(tf.variables_initializer(vars_to_restore)) - # initialize all the other variables - sess.run(tf.variables_initializer(vars_to_initialize)) - - problem.init_fn(sess) - - # generate the minibatch indices - batch_inds = dataset.batch_indices(num_steps, batch_size) - - avg_iter_time = [] - for batch in batch_inds: - # data to feed in - feed = {data_placeholder: dataset.data[batch], - labels_placeholder: dataset.labels[batch]} - - # run the optimization train operation - start = time.time() - sess.run([train_op], feed_dict=feed) - avg_iter_time.append(time.time() - start) - - return np.median(np.array(avg_iter_time)) diff --git a/research/learned_optimizer/metarun.py b/research/learned_optimizer/metarun.py deleted file mode 100644 index 45a29623c..000000000 --- a/research/learned_optimizer/metarun.py +++ /dev/null @@ -1,394 +0,0 @@ -# Copyright 2017 Google, Inc. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -"""Scripts for meta-optimization.""" - -from __future__ import print_function - -import os - -import tensorflow as tf - -import metaopt -from learned_optimizer.optimizer import coordinatewise_rnn -from learned_optimizer.optimizer import global_learning_rate -from learned_optimizer.optimizer import hierarchical_rnn -from learned_optimizer.optimizer import learning_rate_schedule -from learned_optimizer.optimizer import trainable_adam -from learned_optimizer.problems import problem_sets as ps -from learned_optimizer.problems import problem_spec - -tf.app.flags.DEFINE_string("train_dir", "/tmp/lol/", - """Directory to store parameters and results.""") - -tf.app.flags.DEFINE_integer("task", 0, - """Task id of the replica running the training.""") -tf.app.flags.DEFINE_integer("worker_tasks", 1, - """Number of tasks in the worker job.""") - -tf.app.flags.DEFINE_integer("num_problems", 1000, - """Number of sub-problems to run.""") -tf.app.flags.DEFINE_integer("num_meta_iterations", 5, - """Number of meta-iterations to optimize.""") -tf.app.flags.DEFINE_integer("num_unroll_scale", 40, - """The scale parameter of the exponential - distribution from which the number of partial - unrolls is drawn""") -tf.app.flags.DEFINE_integer("min_num_unrolls", 1, - """The minimum number of unrolls per problem.""") -tf.app.flags.DEFINE_integer("num_partial_unroll_itr_scale", 200, - """The scale parameter of the exponential - distribution from which the number of iterations - per unroll is drawn.""") -tf.app.flags.DEFINE_integer("min_num_itr_partial_unroll", 50, - """The minimum number of iterations for one - unroll.""") - -tf.app.flags.DEFINE_string("optimizer", "HierarchicalRNN", - """Which meta-optimizer to train.""") - -# CoordinatewiseRNN-specific flags -tf.app.flags.DEFINE_integer("cell_size", 20, - """Size of the RNN hidden state in each layer.""") -tf.app.flags.DEFINE_integer("num_cells", 2, - """Number of RNN layers.""") -tf.app.flags.DEFINE_string("cell_cls", "GRUCell", - """Type of RNN cell to use.""") - -# Metaoptimization parameters -tf.app.flags.DEFINE_float("meta_learning_rate", 1e-6, - """The learning rate for the meta-optimizer.""") -tf.app.flags.DEFINE_float("gradient_clip_level", 1e4, - """The level to clip gradients to.""") - -# Training set selection -tf.app.flags.DEFINE_boolean("include_quadratic_problems", False, - """Include non-noisy quadratic problems.""") -tf.app.flags.DEFINE_boolean("include_noisy_quadratic_problems", True, - """Include noisy quadratic problems.""") -tf.app.flags.DEFINE_boolean("include_large_quadratic_problems", True, - """Include very large quadratic problems.""") -tf.app.flags.DEFINE_boolean("include_bowl_problems", True, - """Include 2D bowl problems.""") -tf.app.flags.DEFINE_boolean("include_softmax_2_class_problems", True, - """Include 2-class logistic regression problems.""") -tf.app.flags.DEFINE_boolean("include_noisy_softmax_2_class_problems", True, - """Include noisy 2-class logistic regression - problems.""") -tf.app.flags.DEFINE_boolean("include_optimization_test_problems", True, - """Include non-noisy versions of classic - optimization test problems, e.g. Rosenbrock.""") -tf.app.flags.DEFINE_boolean("include_noisy_optimization_test_problems", True, - """Include gradient-noise versions of classic - optimization test problems, e.g. Rosenbrock""") -tf.app.flags.DEFINE_boolean("include_fully_connected_random_2_class_problems", - True, """Include MLP problems for 2 classes.""") -tf.app.flags.DEFINE_boolean("include_matmul_problems", True, - """Include matrix multiplication problems.""") -tf.app.flags.DEFINE_boolean("include_log_objective_problems", True, - """Include problems where the objective is the log - objective of another problem, e.g. Bowl.""") -tf.app.flags.DEFINE_boolean("include_rescale_problems", True, - """Include problems where the parameters are scaled - version of the original parameters.""") -tf.app.flags.DEFINE_boolean("include_norm_problems", True, - """Include problems where the objective is the - N-norm of another problem, e.g. Quadratic.""") -tf.app.flags.DEFINE_boolean("include_sum_problems", True, - """Include problems where the objective is the sum - of the objectives of the subproblems that make - up the problem parameters. Per-problem tensors - are still independent of each other.""") -tf.app.flags.DEFINE_boolean("include_sparse_gradient_problems", True, - """Include problems where the gradient is set to 0 - with some high probability.""") -tf.app.flags.DEFINE_boolean("include_sparse_softmax_problems", False, - """Include sparse softmax problems.""") -tf.app.flags.DEFINE_boolean("include_one_hot_sparse_softmax_problems", False, - """Include one-hot sparse softmax problems.""") -tf.app.flags.DEFINE_boolean("include_noisy_bowl_problems", True, - """Include noisy bowl problems.""") -tf.app.flags.DEFINE_boolean("include_noisy_norm_problems", True, - """Include noisy norm problems.""") -tf.app.flags.DEFINE_boolean("include_noisy_sum_problems", True, - """Include noisy sum problems.""") -tf.app.flags.DEFINE_boolean("include_sum_of_quadratics_problems", False, - """Include sum of quadratics problems.""") -tf.app.flags.DEFINE_boolean("include_projection_quadratic_problems", False, - """Include projection quadratic problems.""") -tf.app.flags.DEFINE_boolean("include_outward_snake_problems", False, - """Include outward snake problems.""") -tf.app.flags.DEFINE_boolean("include_dependency_chain_problems", False, - """Include dependency chain problems.""") -tf.app.flags.DEFINE_boolean("include_min_max_well_problems", False, - """Include min-max well problems.""") - -# Optimizer parameters: initialization and scale values -tf.app.flags.DEFINE_float("min_lr", 1e-6, - """The minimum initial learning rate.""") -tf.app.flags.DEFINE_float("max_lr", 1e-2, - """The maximum initial learning rate.""") - -# Optimizer parameters: small features. -tf.app.flags.DEFINE_boolean("zero_init_lr_weights", True, - """Whether to initialize the learning rate weights - to 0 rather than the scaled random initialization - used for other RNN variables.""") -tf.app.flags.DEFINE_boolean("use_relative_lr", True, - """Whether to use the relative learning rate as an - input during training. Can only be used if - learnable_decay is also True.""") -tf.app.flags.DEFINE_boolean("use_extreme_indicator", False, - """Whether to use the extreme indicator for learning - rates as an input during training. Can only be - used if learnable_decay is also True.""") -tf.app.flags.DEFINE_boolean("use_log_means_squared", True, - """Whether to track the log of the mean squared - grads instead of the means squared grads.""") -tf.app.flags.DEFINE_boolean("use_problem_lr_mean", True, - """Whether to use the mean over all learning rates - in the problem when calculating the relative - learning rate.""") - -# Optimizer parameters: major features -tf.app.flags.DEFINE_boolean("learnable_decay", True, - """Whether to learn weights that dynamically - modulate the input scale via RMS decay.""") -tf.app.flags.DEFINE_boolean("dynamic_output_scale", True, - """Whether to learn weights that dynamically - modulate the output scale.""") -tf.app.flags.DEFINE_boolean("use_log_objective", True, - """Whether to use the log of the scaled objective - rather than just the scaled obj for training.""") -tf.app.flags.DEFINE_boolean("use_attention", False, - """Whether to learn where to attend.""") -tf.app.flags.DEFINE_boolean("use_second_derivatives", True, - """Whether to use second derivatives.""") -tf.app.flags.DEFINE_integer("num_gradient_scales", 4, - """How many different timescales to keep for - gradient history. If > 1, also learns a scale - factor for gradient history.""") -tf.app.flags.DEFINE_float("max_log_lr", 33, - """The maximum log learning rate allowed.""") -tf.app.flags.DEFINE_float("objective_training_max_multiplier", -1, - """How much the objective can grow before training on - this problem / param pair is terminated. Sets a max - on the objective value when multiplied by the - initial objective. If <= 0, not used.""") -tf.app.flags.DEFINE_boolean("use_gradient_shortcut", True, - """Whether to add a learned affine projection of the - gradient to the update delta in addition to the - gradient function computed by the RNN.""") -tf.app.flags.DEFINE_boolean("use_lr_shortcut", False, - """Whether to add the difference between the current - learning rate and the desired learning rate to - the RNN input.""") -tf.app.flags.DEFINE_boolean("use_grad_products", True, - """Whether to use gradient products in the input to - the RNN. Only applicable when num_gradient_scales - > 1.""") -tf.app.flags.DEFINE_boolean("use_multiple_scale_decays", False, - """Whether to use many-timescale scale decays.""") -tf.app.flags.DEFINE_boolean("use_numerator_epsilon", False, - """Whether to use epsilon in the numerator of the - log objective.""") -tf.app.flags.DEFINE_boolean("learnable_inp_decay", True, - """Whether to learn input decay weight and bias.""") -tf.app.flags.DEFINE_boolean("learnable_rnn_init", True, - """Whether to learn RNN state initialization.""") - -FLAGS = tf.app.flags.FLAGS - -# The Size of the RNN hidden state in each layer: -# [PerParam, PerTensor, Global]. The length of this list must be 1, 2, or 3. -# If less than 3, the Global and/or PerTensor RNNs will not be created. - -HRNN_CELL_SIZES = [10, 20, 20] - - - -def register_optimizers(): - opts = {} - opts["CoordinatewiseRNN"] = coordinatewise_rnn.CoordinatewiseRNN - opts["GlobalLearningRate"] = global_learning_rate.GlobalLearningRate - opts["HierarchicalRNN"] = hierarchical_rnn.HierarchicalRNN - opts["LearningRateSchedule"] = learning_rate_schedule.LearningRateSchedule - opts["TrainableAdam"] = trainable_adam.TrainableAdam - return opts - - -def main(unused_argv): - """Runs the main script.""" - - opts = register_optimizers() - - # Choose a set of problems to optimize. By default this includes quadratics, - # 2-dimensional bowls, 2-class softmax problems, and non-noisy optimization - # test problems (e.g. Rosenbrock, Beale) - problems_and_data = [] - - if FLAGS.include_sparse_softmax_problems: - problems_and_data.extend(ps.sparse_softmax_2_class_sparse_problems()) - - if FLAGS.include_one_hot_sparse_softmax_problems: - problems_and_data.extend( - ps.one_hot_sparse_softmax_2_class_sparse_problems()) - - if FLAGS.include_quadratic_problems: - problems_and_data.extend(ps.quadratic_problems()) - - if FLAGS.include_noisy_quadratic_problems: - problems_and_data.extend(ps.quadratic_problems_noisy()) - - if FLAGS.include_large_quadratic_problems: - problems_and_data.extend(ps.quadratic_problems_large()) - - if FLAGS.include_bowl_problems: - problems_and_data.extend(ps.bowl_problems()) - - if FLAGS.include_noisy_bowl_problems: - problems_and_data.extend(ps.bowl_problems_noisy()) - - if FLAGS.include_softmax_2_class_problems: - problems_and_data.extend(ps.softmax_2_class_problems()) - - if FLAGS.include_noisy_softmax_2_class_problems: - problems_and_data.extend(ps.softmax_2_class_problems_noisy()) - - if FLAGS.include_optimization_test_problems: - problems_and_data.extend(ps.optimization_test_problems()) - - if FLAGS.include_noisy_optimization_test_problems: - problems_and_data.extend(ps.optimization_test_problems_noisy()) - - if FLAGS.include_fully_connected_random_2_class_problems: - problems_and_data.extend(ps.fully_connected_random_2_class_problems()) - - if FLAGS.include_matmul_problems: - problems_and_data.extend(ps.matmul_problems()) - - if FLAGS.include_log_objective_problems: - problems_and_data.extend(ps.log_objective_problems()) - - if FLAGS.include_rescale_problems: - problems_and_data.extend(ps.rescale_problems()) - - if FLAGS.include_norm_problems: - problems_and_data.extend(ps.norm_problems()) - - if FLAGS.include_noisy_norm_problems: - problems_and_data.extend(ps.norm_problems_noisy()) - - if FLAGS.include_sum_problems: - problems_and_data.extend(ps.sum_problems()) - - if FLAGS.include_noisy_sum_problems: - problems_and_data.extend(ps.sum_problems_noisy()) - - if FLAGS.include_sparse_gradient_problems: - problems_and_data.extend(ps.sparse_gradient_problems()) - if FLAGS.include_fully_connected_random_2_class_problems: - problems_and_data.extend(ps.sparse_gradient_problems_mlp()) - - if FLAGS.include_min_max_well_problems: - problems_and_data.extend(ps.min_max_well_problems()) - - if FLAGS.include_sum_of_quadratics_problems: - problems_and_data.extend(ps.sum_of_quadratics_problems()) - - if FLAGS.include_projection_quadratic_problems: - problems_and_data.extend(ps.projection_quadratic_problems()) - - if FLAGS.include_outward_snake_problems: - problems_and_data.extend(ps.outward_snake_problems()) - - if FLAGS.include_dependency_chain_problems: - problems_and_data.extend(ps.dependency_chain_problems()) - - # log directory - logdir = os.path.join(FLAGS.train_dir, - "{}_{}_{}_{}".format(FLAGS.optimizer, - FLAGS.cell_cls, - FLAGS.cell_size, - FLAGS.num_cells)) - - # get the optimizer class and arguments - optimizer_cls = opts[FLAGS.optimizer] - - assert len(HRNN_CELL_SIZES) in [1, 2, 3] - optimizer_args = (HRNN_CELL_SIZES,) - - optimizer_kwargs = { - "init_lr_range": (FLAGS.min_lr, FLAGS.max_lr), - "learnable_decay": FLAGS.learnable_decay, - "dynamic_output_scale": FLAGS.dynamic_output_scale, - "cell_cls": getattr(tf.contrib.rnn, FLAGS.cell_cls), - "use_attention": FLAGS.use_attention, - "use_log_objective": FLAGS.use_log_objective, - "num_gradient_scales": FLAGS.num_gradient_scales, - "zero_init_lr_weights": FLAGS.zero_init_lr_weights, - "use_log_means_squared": FLAGS.use_log_means_squared, - "use_relative_lr": FLAGS.use_relative_lr, - "use_extreme_indicator": FLAGS.use_extreme_indicator, - "max_log_lr": FLAGS.max_log_lr, - "obj_train_max_multiplier": FLAGS.objective_training_max_multiplier, - "use_problem_lr_mean": FLAGS.use_problem_lr_mean, - "use_gradient_shortcut": FLAGS.use_gradient_shortcut, - "use_second_derivatives": FLAGS.use_second_derivatives, - "use_lr_shortcut": FLAGS.use_lr_shortcut, - "use_grad_products": FLAGS.use_grad_products, - "use_multiple_scale_decays": FLAGS.use_multiple_scale_decays, - "use_numerator_epsilon": FLAGS.use_numerator_epsilon, - "learnable_inp_decay": FLAGS.learnable_inp_decay, - "learnable_rnn_init": FLAGS.learnable_rnn_init, - } - optimizer_spec = problem_spec.Spec( - optimizer_cls, optimizer_args, optimizer_kwargs) - - # make log directory - tf.gfile.MakeDirs(logdir) - - is_chief = FLAGS.task == 0 - # if this is a distributed run, make the chief run through problems in order - select_random_problems = FLAGS.worker_tasks == 1 or not is_chief - - def num_unrolls(): - return metaopt.sample_numiter(FLAGS.num_unroll_scale, FLAGS.min_num_unrolls) - - def num_partial_unroll_itrs(): - return metaopt.sample_numiter(FLAGS.num_partial_unroll_itr_scale, - FLAGS.min_num_itr_partial_unroll) - - # run it - metaopt.train_optimizer( - logdir, - optimizer_spec, - problems_and_data, - FLAGS.num_problems, - FLAGS.num_meta_iterations, - num_unrolls, - num_partial_unroll_itrs, - learning_rate=FLAGS.meta_learning_rate, - gradient_clip=FLAGS.gradient_clip_level, - is_chief=is_chief, - select_random_problems=select_random_problems, - obj_train_max_multiplier=FLAGS.objective_training_max_multiplier, - callbacks=[]) - - return 0 - - -if __name__ == "__main__": - tf.app.run() diff --git a/research/learned_optimizer/optimizer/BUILD b/research/learned_optimizer/optimizer/BUILD deleted file mode 100644 index 8953e7592..000000000 --- a/research/learned_optimizer/optimizer/BUILD +++ /dev/null @@ -1,69 +0,0 @@ -package(default_visibility = ["//visibility:public"]) - -# Libraries -# ========= -py_library( - name = "coordinatewise_rnn", - srcs = ["coordinatewise_rnn.py"], - deps = [ - ":trainable_optimizer", - ":utils", - ], -) - -py_library( - name = "global_learning_rate", - srcs = ["global_learning_rate.py"], - deps = [ - ":trainable_optimizer", - ], -) - -py_library( - name = "hierarchical_rnn", - srcs = ["hierarchical_rnn.py"], - deps = [ - ":rnn_cells", - ":trainable_optimizer", - ":utils", - ], -) - -py_library( - name = "learning_rate_schedule", - srcs = ["learning_rate_schedule.py"], - deps = [ - ":trainable_optimizer", - ], -) - -py_library( - name = "rnn_cells", - srcs = ["rnn_cells.py"], - deps = [ - ":utils", - ], -) - -py_library( - name = "trainable_adam", - srcs = ["trainable_adam.py"], - deps = [ - ":trainable_optimizer", - ":utils", - ], -) - -py_library( - name = "trainable_optimizer", - srcs = ["trainable_optimizer.py"], - deps = [ - ], -) - -py_library( - name = "utils", - srcs = ["utils.py"], - deps = [ - ], -) diff --git a/research/learned_optimizer/optimizer/coordinatewise_rnn.py b/research/learned_optimizer/optimizer/coordinatewise_rnn.py deleted file mode 100644 index 3d699504b..000000000 --- a/research/learned_optimizer/optimizer/coordinatewise_rnn.py +++ /dev/null @@ -1,316 +0,0 @@ -# Copyright 2017 Google, Inc. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -"""Collection of trainable optimizers for meta-optimization.""" - -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import math - -import numpy as np -import tensorflow as tf - -from learned_optimizer.optimizer import utils -from learned_optimizer.optimizer import trainable_optimizer as opt - - -# Default was 1e-3 -tf.app.flags.DEFINE_float("crnn_rnn_readout_scale", 0.5, - """The initialization scale for the RNN readouts.""") -tf.app.flags.DEFINE_float("crnn_default_decay_var_init", 2.2, - """The default initializer value for any decay/ - momentum style variables and constants. - sigmoid(2.2) ~ 0.9, sigmoid(-2.2) ~ 0.01.""") - -FLAGS = tf.flags.FLAGS - - -class CoordinatewiseRNN(opt.TrainableOptimizer): - """RNN that operates on each coordinate of the problem independently.""" - - def __init__(self, - cell_sizes, - cell_cls, - init_lr_range=(1., 1.), - dynamic_output_scale=True, - learnable_decay=True, - zero_init_lr_weights=False, - **kwargs): - """Initializes the RNN per-parameter optimizer. - - Args: - cell_sizes: List of hidden state sizes for each RNN cell in the network - cell_cls: tf.contrib.rnn class for specifying the RNN cell type - init_lr_range: the range in which to initialize the learning rates. - dynamic_output_scale: whether to learn weights that dynamically modulate - the output scale (default: True) - learnable_decay: whether to learn weights that dynamically modulate the - input scale via RMS style decay (default: True) - zero_init_lr_weights: whether to initialize the lr weights to zero - **kwargs: args passed to TrainableOptimizer's constructor - - Raises: - ValueError: If the init lr range is not of length 2. - ValueError: If the init lr range is not a valid range (min > max). - """ - if len(init_lr_range) != 2: - raise ValueError( - "Initial LR range must be len 2, was {}".format(len(init_lr_range))) - if init_lr_range[0] > init_lr_range[1]: - raise ValueError("Initial LR range min is greater than max.") - self.init_lr_range = init_lr_range - - self.zero_init_lr_weights = zero_init_lr_weights - self.reuse_vars = False - - # create the RNN cell - with tf.variable_scope(opt.OPTIMIZER_SCOPE): - self.component_cells = [cell_cls(sz) for sz in cell_sizes] - self.cell = tf.contrib.rnn.MultiRNNCell(self.component_cells) - - # random normal initialization scaled by the output size - scale_factor = FLAGS.crnn_rnn_readout_scale / math.sqrt(cell_sizes[-1]) - scaled_init = tf.random_normal_initializer(0., scale_factor) - - # weights for projecting the hidden state to a parameter update - self.update_weights = tf.get_variable("update_weights", - shape=(cell_sizes[-1], 1), - initializer=scaled_init) - - self._initialize_decay(learnable_decay, (cell_sizes[-1], 1), scaled_init) - - self._initialize_lr(dynamic_output_scale, (cell_sizes[-1], 1), - scaled_init) - - state_size = sum([sum(state_size) for state_size in self.cell.state_size]) - self._init_vector = tf.get_variable( - "init_vector", shape=[1, state_size], - initializer=tf.random_uniform_initializer(-1., 1.)) - - state_keys = ["rms", "rnn", "learning_rate", "decay"] - super(CoordinatewiseRNN, self).__init__("cRNN", state_keys, **kwargs) - - def _initialize_decay( - self, learnable_decay, weights_tensor_shape, scaled_init): - """Initializes the decay weights and bias variables or tensors. - - Args: - learnable_decay: Whether to use learnable decay. - weights_tensor_shape: The shape the weight tensor should take. - scaled_init: The scaled initialization for the weights tensor. - """ - if learnable_decay: - - # weights for projecting the hidden state to the RMS decay term - self.decay_weights = tf.get_variable("decay_weights", - shape=weights_tensor_shape, - initializer=scaled_init) - self.decay_bias = tf.get_variable( - "decay_bias", shape=(1,), - initializer=tf.constant_initializer( - FLAGS.crnn_default_decay_var_init)) - else: - self.decay_weights = tf.zeros_like(self.update_weights) - self.decay_bias = tf.constant(FLAGS.crnn_default_decay_var_init) - - def _initialize_lr( - self, dynamic_output_scale, weights_tensor_shape, scaled_init): - """Initializes the learning rate weights and bias variables or tensors. - - Args: - dynamic_output_scale: Whether to use a dynamic output scale. - weights_tensor_shape: The shape the weight tensor should take. - scaled_init: The scaled initialization for the weights tensor. - """ - if dynamic_output_scale: - zero_init = tf.constant_initializer(0.) - wt_init = zero_init if self.zero_init_lr_weights else scaled_init - self.lr_weights = tf.get_variable("learning_rate_weights", - shape=weights_tensor_shape, - initializer=wt_init) - self.lr_bias = tf.get_variable("learning_rate_bias", shape=(1,), - initializer=zero_init) - else: - self.lr_weights = tf.zeros_like(self.update_weights) - self.lr_bias = tf.zeros([1, 1]) - - def _initialize_state(self, var): - """Return a dictionary mapping names of state variables to their values.""" - vectorized_shape = [var.get_shape().num_elements(), 1] - - min_lr = self.init_lr_range[0] - max_lr = self.init_lr_range[1] - if min_lr == max_lr: - init_lr = tf.constant(min_lr, shape=vectorized_shape) - else: - actual_vals = tf.random_uniform(vectorized_shape, - np.log(min_lr), - np.log(max_lr)) - init_lr = tf.exp(actual_vals) - - ones = tf.ones(vectorized_shape) - rnn_init = ones * self._init_vector - - return { - "rms": tf.ones(vectorized_shape), - "learning_rate": init_lr, - "rnn": rnn_init, - "decay": tf.ones(vectorized_shape), - } - - def _compute_update(self, param, grad, state): - """Update parameters given the gradient and state. - - Args: - param: tensor of parameters - grad: tensor of gradients with the same shape as param - state: a dictionary containing any state for the optimizer - - Returns: - updated_param: updated parameters - updated_state: updated state variables in a dictionary - """ - - with tf.variable_scope(opt.OPTIMIZER_SCOPE) as scope: - - if self.reuse_vars: - scope.reuse_variables() - else: - self.reuse_vars = True - - param_shape = tf.shape(param) - - (grad_values, decay_state, rms_state, rnn_state, learning_rate_state, - grad_indices) = self._extract_gradients_and_internal_state( - grad, state, param_shape) - - # Vectorize and scale the gradients. - grad_scaled, rms = utils.rms_scaling(grad_values, decay_state, rms_state) - - # Apply the RNN update. - rnn_state_tuples = self._unpack_rnn_state_into_tuples(rnn_state) - rnn_output, rnn_state_tuples = self.cell(grad_scaled, rnn_state_tuples) - rnn_state = self._pack_tuples_into_rnn_state(rnn_state_tuples) - - # Compute the update direction (a linear projection of the RNN output). - delta = utils.project(rnn_output, self.update_weights) - - # The updated decay is an affine projection of the hidden state - decay = utils.project(rnn_output, self.decay_weights, - bias=self.decay_bias, activation=tf.nn.sigmoid) - - # Compute the change in learning rate (an affine projection of the RNN - # state, passed through a 2x sigmoid, so the change is bounded). - learning_rate_change = 2. * utils.project(rnn_output, self.lr_weights, - bias=self.lr_bias, - activation=tf.nn.sigmoid) - - # Update the learning rate. - new_learning_rate = learning_rate_change * learning_rate_state - - # Apply the update to the parameters. - update = tf.reshape(new_learning_rate * delta, tf.shape(grad_values)) - - if isinstance(grad, tf.IndexedSlices): - update = utils.stack_tensor(update, grad_indices, param, - param_shape[:1]) - rms = utils.update_slices(rms, grad_indices, state["rms"], param_shape) - new_learning_rate = utils.update_slices(new_learning_rate, grad_indices, - state["learning_rate"], - param_shape) - rnn_state = utils.update_slices(rnn_state, grad_indices, state["rnn"], - param_shape) - decay = utils.update_slices(decay, grad_indices, state["decay"], - param_shape) - - new_param = param - update - - # Collect the update and new state. - new_state = { - "rms": rms, - "learning_rate": new_learning_rate, - "rnn": rnn_state, - "decay": decay, - } - - return new_param, new_state - - def _extract_gradients_and_internal_state(self, grad, state, param_shape): - """Extracts the gradients and relevant internal state. - - If the gradient is sparse, extracts the appropriate slices from the state. - - Args: - grad: The current gradient. - state: The current state. - param_shape: The shape of the parameter (used if gradient is sparse). - - Returns: - grad_values: The gradient value tensor. - decay_state: The current decay state. - rms_state: The current rms state. - rnn_state: The current state of the internal rnns. - learning_rate_state: The current learning rate state. - grad_indices: The indices for the gradient tensor, if sparse. - None otherwise. - """ - if isinstance(grad, tf.IndexedSlices): - grad_indices, grad_values = utils.accumulate_sparse_gradients(grad) - decay_state = utils.slice_tensor(state["decay"], grad_indices, - param_shape) - rms_state = utils.slice_tensor(state["rms"], grad_indices, param_shape) - rnn_state = utils.slice_tensor(state["rnn"], grad_indices, param_shape) - learning_rate_state = utils.slice_tensor(state["learning_rate"], - grad_indices, param_shape) - decay_state.set_shape([None, 1]) - rms_state.set_shape([None, 1]) - else: - grad_values = grad - grad_indices = None - - decay_state = state["decay"] - rms_state = state["rms"] - rnn_state = state["rnn"] - learning_rate_state = state["learning_rate"] - return (grad_values, decay_state, rms_state, rnn_state, learning_rate_state, - grad_indices) - - def _unpack_rnn_state_into_tuples(self, rnn_state): - """Creates state tuples from the rnn state vector.""" - rnn_state_tuples = [] - cur_state_pos = 0 - for cell in self.component_cells: - total_state_size = sum(cell.state_size) - cur_state = tf.slice(rnn_state, [0, cur_state_pos], - [-1, total_state_size]) - cur_state_tuple = tf.split(value=cur_state, num_or_size_splits=2, - axis=1) - rnn_state_tuples.append(cur_state_tuple) - cur_state_pos += total_state_size - return rnn_state_tuples - - def _pack_tuples_into_rnn_state(self, rnn_state_tuples): - """Creates a single state vector concatenated along column axis.""" - rnn_state = None - for new_state_tuple in rnn_state_tuples: - new_c, new_h = new_state_tuple - if rnn_state is None: - rnn_state = tf.concat([new_c, new_h], axis=1) - else: - rnn_state = tf.concat([rnn_state, tf.concat([new_c, new_h], 1)], axis=1) - return rnn_state - diff --git a/research/learned_optimizer/optimizer/global_learning_rate.py b/research/learned_optimizer/optimizer/global_learning_rate.py deleted file mode 100644 index bcf102fff..000000000 --- a/research/learned_optimizer/optimizer/global_learning_rate.py +++ /dev/null @@ -1,40 +0,0 @@ -# Copyright 2017 Google, Inc. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -"""A trainable optimizer that learns a single global learning rate.""" - -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import tensorflow as tf - -from learned_optimizer.optimizer import trainable_optimizer - - -class GlobalLearningRate(trainable_optimizer.TrainableOptimizer): - """Optimizes for a single global learning rate.""" - - def __init__(self, initial_rate=1e-3, **kwargs): - """Initializes the global learning rate.""" - with tf.variable_scope(trainable_optimizer.OPTIMIZER_SCOPE): - initializer = tf.constant_initializer(initial_rate) - self.learning_rate = tf.get_variable("global_learning_rate", shape=(), - initializer=initializer) - super(GlobalLearningRate, self).__init__("GLR", [], **kwargs) - - def _compute_update(self, param, grad, state): - return param - tf.scalar_mul(self.learning_rate, grad), state - diff --git a/research/learned_optimizer/optimizer/hierarchical_rnn.py b/research/learned_optimizer/optimizer/hierarchical_rnn.py deleted file mode 100644 index 953b72b5d..000000000 --- a/research/learned_optimizer/optimizer/hierarchical_rnn.py +++ /dev/null @@ -1,792 +0,0 @@ -# Copyright 2017 Google, Inc. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -"""Collection of trainable optimizers for meta-optimization.""" - -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import math - -import numpy as np -import tensorflow as tf - -from tensorflow.python.ops import state_ops -from learned_optimizer.optimizer import rnn_cells -from learned_optimizer.optimizer import trainable_optimizer as opt -from learned_optimizer.optimizer import utils - -# Default was 0.1 -tf.app.flags.DEFINE_float("biasgrucell_scale", 0.5, - """The scale for the internal BiasGRUCell vars.""") -# Default was 0 -tf.app.flags.DEFINE_float("biasgrucell_gate_bias_init", 2.2, - """The bias for the internal BiasGRUCell reset and - update gate variables.""") -# Default was 1e-3 -tf.app.flags.DEFINE_float("hrnn_rnn_readout_scale", 0.5, - """The initialization scale for the RNN readouts.""") -tf.app.flags.DEFINE_float("hrnn_default_decay_var_init", 2.2, - """The default initializer value for any decay/ - momentum style variables and constants. - sigmoid(2.2) ~ 0.9, sigmoid(-2.2) ~ 0.01.""") -# Default was 2.2 -tf.app.flags.DEFINE_float("scale_decay_bias_init", 3.2, - """The initialization for the scale decay bias. This - is the initial bias for the timescale for the - exponential avg of the mean square gradients.""") -tf.app.flags.DEFINE_float("learning_rate_momentum_logit_init", 3.2, - """Initialization for the learning rate momentum.""") -# Default was 0.1 -tf.app.flags.DEFINE_float("hrnn_affine_scale", 0.5, - """The initialization scale for the weight matrix of - the bias variables in layer0 and 1 of the hrnn.""") - -FLAGS = tf.flags.FLAGS - - -class HierarchicalRNN(opt.TrainableOptimizer): - """3 level hierarchical RNN. - - Optionally uses second order gradient information and has decoupled evaluation - and update locations. - """ - - def __init__(self, level_sizes, init_lr_range=(1e-6, 1e-2), - learnable_decay=True, dynamic_output_scale=True, - use_attention=False, use_log_objective=True, - num_gradient_scales=4, zero_init_lr_weights=True, - use_log_means_squared=True, use_relative_lr=True, - use_extreme_indicator=False, max_log_lr=33, - obj_train_max_multiplier=-1, use_problem_lr_mean=False, - use_gradient_shortcut=False, use_lr_shortcut=False, - use_grad_products=False, use_multiple_scale_decays=False, - learnable_inp_decay=True, learnable_rnn_init=True, - random_seed=None, **kwargs): - """Initializes the RNN per-parameter optimizer. - - The hierarchy consists of up to three levels: - Level 0: per parameter RNN - Level 1: per tensor RNN - Level 2: global RNN - - Args: - level_sizes: list or tuple with 1, 2, or 3 integers, the number of units - in each RNN in the hierarchy (level0, level1, level2). - length 1: only coordinatewise rnn's will be used - length 2: coordinatewise and tensor-level rnn's will be used - length 3: a single global-level rnn will be used in addition to - coordinatewise and tensor-level - init_lr_range: the range in which to initialize the learning rates - learnable_decay: whether to learn weights that dynamically modulate the - input scale via RMS style decay - dynamic_output_scale: whether to learn weights that dynamically modulate - the output scale - use_attention: whether to use attention to train the optimizer - use_log_objective: whether to train on the log of the objective - num_gradient_scales: the number of scales to use for gradient history - zero_init_lr_weights: whether to initialize the lr weights to zero - use_log_means_squared: whether to track the log of the means_squared, - used as a measure of signal vs. noise in gradient. - use_relative_lr: whether to use the relative learning rate as an - input during training (requires learnable_decay=True) - use_extreme_indicator: whether to use the extreme indicator for learning - rates as an input during training (requires learnable_decay=True) - max_log_lr: the maximum log learning rate allowed during train or test - obj_train_max_multiplier: max objective increase during a training run - use_problem_lr_mean: whether to use the mean over all learning rates in - the problem when calculating the relative learning rate as opposed to - the per-tensor mean - use_gradient_shortcut: Whether to add a learned affine projection of the - gradient to the update delta in addition to the gradient function - computed by the RNN - use_lr_shortcut: Whether to add as input the difference between the log lr - and the desired log lr (1e-3) - use_grad_products: Whether to use gradient products in the rnn input. - Only applicable if num_gradient_scales > 1 - use_multiple_scale_decays: Whether to use multiple scales for the scale - decay, as with input decay - learnable_inp_decay: Whether to learn the input decay weights and bias. - learnable_rnn_init: Whether to learn the RNN state initialization. - random_seed: Random seed for random variable initializers. (Default: None) - **kwargs: args passed to TrainableOptimizer's constructor - - Raises: - ValueError: If level_sizes is not a length 1, 2, or 3 list. - ValueError: If there are any non-integer sizes in level_sizes. - ValueError: If the init lr range is not of length 2. - ValueError: If the init lr range is not a valid range (min > max). - """ - if len(level_sizes) not in [1, 2, 3]: - raise ValueError("HierarchicalRNN only supports 1, 2, or 3 levels in the " - "hierarchy, but {} were requested.".format( - len(level_sizes))) - if any(not isinstance(level, int) for level in level_sizes): - raise ValueError("Level sizes must be integer values, were {}".format( - level_sizes)) - if len(init_lr_range) != 2: - raise ValueError( - "Initial LR range must be len 2, was {}".format(len(init_lr_range))) - if init_lr_range[0] > init_lr_range[1]: - raise ValueError("Initial LR range min is greater than max.") - - self.learnable_decay = learnable_decay - self.dynamic_output_scale = dynamic_output_scale - self.use_attention = use_attention - self.use_log_objective = use_log_objective - self.num_gradient_scales = num_gradient_scales - self.zero_init_lr_weights = zero_init_lr_weights - self.use_log_means_squared = use_log_means_squared - self.use_relative_lr = use_relative_lr - self.use_extreme_indicator = use_extreme_indicator - self.max_log_lr = max_log_lr - self.use_problem_lr_mean = use_problem_lr_mean - self.use_gradient_shortcut = use_gradient_shortcut - self.use_lr_shortcut = use_lr_shortcut - self.use_grad_products = use_grad_products - self.use_multiple_scale_decays = use_multiple_scale_decays - self.learnable_inp_decay = learnable_inp_decay - self.learnable_rnn_init = learnable_rnn_init - - self.random_seed = random_seed - - self.num_layers = len(level_sizes) - self.init_lr_range = init_lr_range - - self.reuse_vars = None - self.reuse_global_state = None - self.cells = [] - self.init_vectors = [] - - with tf.variable_scope(opt.OPTIMIZER_SCOPE): - - self._initialize_rnn_cells(level_sizes) - - # get the cell size for the per-parameter RNN (Level 0) - cell_size = level_sizes[0] - - # Random normal initialization scaled by the output size. This is the - # scale for the RNN *readouts*. RNN internal weight scale is set in the - # BiasGRUCell call. - scale_factor = FLAGS.hrnn_rnn_readout_scale / math.sqrt(cell_size) - scaled_init = tf.random_normal_initializer(0., scale_factor, - seed=self.random_seed) - - # weights for projecting the hidden state to a parameter update - self.update_weights = tf.get_variable("update_weights", - shape=(cell_size, 1), - initializer=scaled_init) - - if self.use_attention: - # weights for projecting the hidden state to the location at which the - # gradient is attended - self.attention_weights = tf.get_variable( - "attention_weights", - initializer=self.update_weights.initialized_value()) - - # weights for projecting the hidden state to the RMS decay term - self._initialize_scale_decay((cell_size, 1), scaled_init) - self._initialize_input_decay((cell_size, 1), scaled_init) - - self._initialize_lr((cell_size, 1), scaled_init) - - state_keys = ["parameter", "layer", "scl_decay", "inp_decay", "true_param"] - - if self.dynamic_output_scale: - state_keys.append("log_learning_rate") - - for i in range(self.num_gradient_scales): - state_keys.append("grad_accum{}".format(i + 1)) - state_keys.append("ms{}".format(i + 1)) - - super(HierarchicalRNN, self).__init__( - "hRNN", state_keys, use_attention=use_attention, - use_log_objective=use_log_objective, - obj_train_max_multiplier=obj_train_max_multiplier, **kwargs) - - def _initialize_rnn_cells(self, level_sizes): - """Initializes the RNN cells to use in the hierarchical RNN.""" - - # RNN Cell layers (0 -> lowest, 1 -> middle, 2 -> global) - for level in range(self.num_layers): - scope = "Level{}_RNN".format(level) - with tf.variable_scope(scope): - hcell = rnn_cells.BiasGRUCell( - level_sizes[level], - scale=FLAGS.biasgrucell_scale, - gate_bias_init=FLAGS.biasgrucell_gate_bias_init, - random_seed=self.random_seed) - self.cells.append(hcell) - if self.learnable_rnn_init: - self.init_vectors.append(tf.Variable( - tf.random_uniform([1, hcell.state_size], -1., 1., - seed=self.random_seed), - name="init_vector")) - else: - self.init_vectors.append( - tf.random_uniform([1, hcell.state_size], -1., 1., - seed=self.random_seed)) - - def _initialize_scale_decay(self, weights_tensor_shape, scaled_init): - """Initializes the scale decay weights and bias variables or tensors. - - Args: - weights_tensor_shape: The shape the weight tensor should take. - scaled_init: The scaled initialization for the weights tensor. - """ - if self.learnable_decay: - self.scl_decay_weights = tf.get_variable("scl_decay_weights", - shape=weights_tensor_shape, - initializer=scaled_init) - scl_decay_bias_init = tf.constant_initializer( - FLAGS.scale_decay_bias_init) - self.scl_decay_bias = tf.get_variable("scl_decay_bias", - shape=(1,), - initializer=scl_decay_bias_init) - else: - self.scl_decay_weights = tf.zeros_like(self.update_weights) - self.scl_decay_bias = tf.log(0.93 / (1. - 0.93)) - - def _initialize_input_decay(self, weights_tensor_shape, scaled_init): - """Initializes the input scale decay weights and bias variables or tensors. - - Args: - weights_tensor_shape: The shape the weight tensor should take. - scaled_init: The scaled initialization for the weights tensor. - """ - if (self.learnable_decay and self.num_gradient_scales > 1 and - self.learnable_inp_decay): - self.inp_decay_weights = tf.get_variable("inp_decay_weights", - shape=weights_tensor_shape, - initializer=scaled_init) - inp_decay_bias_init = tf.constant_initializer( - FLAGS.hrnn_default_decay_var_init) - self.inp_decay_bias = tf.get_variable("inp_decay_bias", - shape=(1,), - initializer=inp_decay_bias_init) - else: - self.inp_decay_weights = tf.zeros_like(self.update_weights) - self.inp_decay_bias = tf.log(0.89 / (1. - 0.89)) - - def _initialize_lr(self, weights_tensor_shape, scaled_init): - """Initializes the learning rate weights and bias variables or tensors. - - Args: - weights_tensor_shape: The shape the weight tensor should take. - scaled_init: The scaled initialization for the weights tensor. - """ - if self.dynamic_output_scale: - zero_init = tf.constant_initializer(0.) - wt_init = zero_init if self.zero_init_lr_weights else scaled_init - self.lr_weights = tf.get_variable("learning_rate_weights", - shape=weights_tensor_shape, - initializer=wt_init) - self.lr_bias = tf.get_variable("learning_rate_bias", shape=(1,), - initializer=zero_init) - else: - self.lr_weights = tf.zeros_like(self.update_weights) - self.lr_bias = tf.zeros([1, 1]) - - def _initialize_state(self, var): - """Return a dictionary mapping names of state variables to their values.""" - var_vectorized = tf.reshape(var, [-1, 1]) - ndim = var_vectorized.get_shape().as_list()[0] - - state = { - # parameter init tensor is [var_ndim x layer0_cell_size] - "parameter": tf.ones([ndim, 1]) * self.init_vectors[0], - "scl_decay": tf.zeros_like(var_vectorized), - "inp_decay": tf.zeros_like(var_vectorized), - "true_param": var, - } - - if self.num_layers > 1: - # layer init tensor is [1 x layer1_cell_size] - state["layer"] = tf.ones([1, 1]) * self.init_vectors[1] - - if self.dynamic_output_scale: - min_lr = self.init_lr_range[0] - max_lr = self.init_lr_range[1] - if min_lr == max_lr: - log_init_lr = tf.log(min_lr * tf.ones_like(var_vectorized)) - else: - # Use a random offset to increase the likelihood that the average of the - # LRs for this variable is different from the LRs for other variables. - actual_vals = tf.random_uniform(var_vectorized.get_shape().as_list(), - np.log(min_lr) / 2., - np.log(max_lr) / 2., - seed=self.random_seed) - offset = tf.random_uniform((), np.log(min_lr) / 2., np.log(max_lr) / 2., - seed=self.random_seed) - log_init_lr = actual_vals + offset - # Clip the log learning rate to the flag at the top end, and to - # (log(min int32) - 1) at the bottom - clipped = tf.clip_by_value(log_init_lr, -33, self.max_log_lr) - state["log_learning_rate"] = clipped - - for i in range(self.num_gradient_scales): - state["grad_accum{}".format(i + 1)] = tf.zeros_like(var_vectorized) - state["ms{}".format(i + 1)] = tf.zeros_like(var_vectorized) - - return state - - def _initialize_global_state(self): - if self.num_layers < 3: - return [] - rnn_global_init = tf.ones([1, 1]) * self.init_vectors[2] - return [rnn_global_init] - - def _compute_updates(self, params, grads, states, global_state): - # Store the updated parameters and states. - updated_params = [] - updated_attention = [] - updated_states = [] - - with tf.variable_scope(opt.OPTIMIZER_SCOPE): - - mean_log_lr = self._compute_mean_log_lr(states) - - # Iterate over the layers. - for param, grad_unflat, state in zip(params, grads, states): - - with tf.variable_scope("PerTensor", reuse=self.reuse_vars): - self.reuse_vars = True - grad = tf.reshape(grad_unflat, [-1, 1]) - - # Create the RNN input. We will optionally extend it with additional - # features such as curvature and gradient signal vs. noise. - (grads_scaled, mean_squared_gradients, - grads_accum) = self._compute_scaled_and_ms_grads(grad, state) - rnn_input = [g for g in grads_scaled] - - self._extend_rnn_input(rnn_input, state, grads_scaled, - mean_squared_gradients, mean_log_lr) - - # Concatenate any features we've collected. - rnn_input_tensor = tf.concat(rnn_input, 1) - - layer_state, new_param_state = self._update_rnn_cells( - state, global_state, rnn_input_tensor, - len(rnn_input) != len(grads_scaled)) - - (scl_decay, inp_decay, new_log_lr, update_step, lr_attend, - attention_delta) = self._compute_rnn_state_projections( - state, new_param_state, grads_scaled) - - # Apply updates and store state variables. - if self.use_attention: - truth = state["true_param"] - updated_param = truth - update_step - attention_step = tf.reshape(lr_attend * attention_delta, - truth.get_shape()) - updated_attention.append(truth - attention_step) - else: - updated_param = param - update_step - updated_attention.append(updated_param) - updated_params.append(updated_param) - - # Collect the new state. - new_state = { - "parameter": new_param_state, - "scl_decay": scl_decay, - "inp_decay": inp_decay, - "true_param": updated_param, - } - if layer_state is not None: - new_state["layer"] = layer_state - - if self.dynamic_output_scale: - new_state["log_learning_rate"] = new_log_lr - - for i in range(self.num_gradient_scales): - new_state["grad_accum{}".format(i + 1)] = grads_accum[i] - new_state["ms{}".format(i + 1)] = mean_squared_gradients[i] - updated_states.append(new_state) - - updated_global_state = self._compute_updated_global_state([layer_state], - global_state) - - return (updated_params, updated_states, [updated_global_state], - updated_attention) - - def _compute_mean_log_lr(self, states): - """Computes the mean log learning rate across all variables.""" - if self.use_problem_lr_mean and self.use_relative_lr: - - sum_log_lr = 0. - count_log_lr = 0. - for state in states: - sum_log_lr += tf.reduce_sum(state["log_learning_rate"]) - # Note: get_shape().num_elements()=num elements in the original tensor. - count_log_lr += state["log_learning_rate"].get_shape().num_elements() - return sum_log_lr / count_log_lr - - def _compute_scaled_and_ms_grads(self, grad, state): - """Computes the scaled gradient and the mean squared gradients. - - Gradients are also accumulated across different timescales if appropriate. - - Args: - grad: The gradient tensor for this layer. - state: The optimizer state for this layer. - - Returns: - The scaled gradients, mean squared gradients, and accumulated gradients. - """ - input_decays = [state["inp_decay"]] - scale_decays = [state["scl_decay"]] - if self.use_multiple_scale_decays and self.num_gradient_scales > 1: - for i in range(self.num_gradient_scales - 1): - scale_decays.append(tf.sqrt(scale_decays[i])) - - for i in range(self.num_gradient_scales - 1): - # Each accumulator on twice the timescale of the one before. - input_decays.append(tf.sqrt(input_decays[i])) - grads_accum = [] - grads_scaled = [] - mean_squared_gradients = [] - - # populate the scaled gradients and associated mean_squared values - if self.num_gradient_scales > 0: - for i, decay in enumerate(input_decays): - if self.num_gradient_scales == 1: - # We don't accumulate if no scales, just take the current gradient. - grad_accum = grad - else: - # The state vars are 1-indexed. - old_accum = state["grad_accum{}".format(i + 1)] - grad_accum = grad * (1. - decay) + old_accum * decay - - grads_accum.append(grad_accum) - - sd = scale_decays[i if self.use_multiple_scale_decays else 0] - grad_scaled, ms = utils.rms_scaling(grad_accum, sd, - state["ms{}".format(i + 1)], - update_ms=True) - grads_scaled.append(grad_scaled) - mean_squared_gradients.append(ms) - - return grads_scaled, mean_squared_gradients, grads_accum - - def _extend_rnn_input(self, rnn_input, state, grads_scaled, - mean_squared_gradients, mean_log_lr): - """Computes additional rnn inputs and adds them to the rnn_input list.""" - if self.num_gradient_scales > 1 and self.use_grad_products: - # This gives a measure of curvature relative to input averaging - # lengthscale and to the learning rate - grad_products = [a * b for a, b in - zip(grads_scaled[:-1], grads_scaled[1:])] - rnn_input.extend([g for g in grad_products]) - - if self.use_log_means_squared: - log_means_squared = [tf.log(ms + 1e-16) - for ms in mean_squared_gradients] - - avg = tf.reduce_mean(log_means_squared, axis=0) - # This gives a measure of the signal vs. noise contribution to the - # gradient, at the current averaging lengthscale. If all the noise - # is averaged out, and if updates are small, these will be 0. - mean_log_means_squared = [m - avg for m in log_means_squared] - - rnn_input.extend([m for m in mean_log_means_squared]) - - if self.use_relative_lr or self.use_extreme_indicator: - if not self.dynamic_output_scale: - raise Exception("Relative LR and Extreme Indicator features " - "require dynamic_output_scale to be set to True.") - log_lr_vec = tf.reshape(state["log_learning_rate"], [-1, 1]) - if self.use_relative_lr: - if self.use_problem_lr_mean: - # Learning rate of this dimension vs. rest of target problem. - relative_lr = log_lr_vec - mean_log_lr - else: - # Learning rate of this dimension vs. rest of tensor. - relative_lr = log_lr_vec - tf.reduce_mean(log_lr_vec) - rnn_input.append(relative_lr) - if self.use_extreme_indicator: - # Indicator of extremely large or extremely small learning rate. - extreme_indicator = (tf.nn.relu(log_lr_vec - tf.log(1.)) - - tf.nn.relu(tf.log(1e-6) - log_lr_vec)) - rnn_input.append(extreme_indicator) - - if self.use_lr_shortcut: - log_lr_vec = tf.reshape(state["log_learning_rate"], [-1, 1]) - rnn_input.append(log_lr_vec - tf.log(1e-3)) - - def _update_rnn_cells(self, state, global_state, rnn_input_tensor, - use_additional_features): - """Updates the component RNN cells with the given state and tensor. - - Args: - state: The current state of the optimizer. - global_state: The current global RNN state. - rnn_input_tensor: The input tensor to the RNN. - use_additional_features: Whether the rnn input tensor contains additional - features beyond the scaled gradients (affects whether the rnn input - tensor is used as input to the RNN.) - - Returns: - layer_state: The new state of the per-tensor RNN. - new_param_state: The new state of the per-parameter RNN. - """ - # lowest level (per parameter) - # input -> gradient for this parameter - # bias -> output from the layer RNN - with tf.variable_scope("Layer0_RNN"): - total_bias = None - if self.num_layers > 1: - sz = 3 * self.cells[0].state_size # size of the concatenated bias - param_bias = utils.affine([state["layer"]], sz, - scope="Param/Affine", - scale=FLAGS.hrnn_affine_scale, - random_seed=self.random_seed) - total_bias = param_bias - if self.num_layers == 3: - global_bias = utils.affine(global_state, sz, - scope="Global/Affine", - scale=FLAGS.hrnn_affine_scale, - random_seed=self.random_seed) - total_bias += global_bias - - new_param_state, _ = self.cells[0]( - rnn_input_tensor, state["parameter"], bias=total_bias) - - if self.num_layers > 1: - # middle level (per layer) - # input -> average hidden state from each parameter in this layer - # bias -> output from the RNN at the global level - with tf.variable_scope("Layer1_RNN"): - if not use_additional_features: - # Restore old behavior and only add the mean of the new params. - layer_input = tf.reduce_mean(new_param_state, 0, keep_dims=True) - else: - layer_input = tf.reduce_mean( - tf.concat((new_param_state, rnn_input_tensor), 1), 0, - keep_dims=True) - if self.num_layers == 3: - sz = 3 * self.cells[1].state_size - layer_bias = utils.affine(global_state, sz, - scale=FLAGS.hrnn_affine_scale, - random_seed=self.random_seed) - layer_state, _ = self.cells[1]( - layer_input, state["layer"], bias=layer_bias) - else: - layer_state, _ = self.cells[1](layer_input, state["layer"]) - else: - layer_state = None - - return layer_state, new_param_state - - def _compute_rnn_state_projections(self, state, new_param_state, - grads_scaled): - """Computes the RNN state-based updates to parameters and update steps.""" - # Compute the update direction (a linear projection of the RNN output). - update_weights = self.update_weights - - update_delta = utils.project(new_param_state, update_weights) - if self.use_gradient_shortcut: - # Include an affine projection of just the direction of the gradient - # so that RNN hidden states are freed up to store more complex - # functions of the gradient and other parameters. - grads_scaled_tensor = tf.concat([g for g in grads_scaled], 1) - update_delta += utils.affine(grads_scaled_tensor, 1, - scope="GradsToDelta", - include_bias=False, - vec_mean=1. / len(grads_scaled), - random_seed=self.random_seed) - if self.dynamic_output_scale: - denom = tf.sqrt(tf.reduce_mean(update_delta ** 2) + 1e-16) - - update_delta /= denom - - if self.use_attention: - attention_weights = self.attention_weights - attention_delta = utils.project(new_param_state, - attention_weights) - if self.use_gradient_shortcut: - attention_delta += utils.affine(grads_scaled_tensor, 1, - scope="GradsToAttnDelta", - include_bias=False, - vec_mean=1. / len(grads_scaled), - random_seed=self.random_seed) - if self.dynamic_output_scale: - attention_delta /= tf.sqrt( - tf.reduce_mean(attention_delta ** 2) + 1e-16) - else: - attention_delta = None - - # The updated decay is an affine projection of the hidden state. - scl_decay = utils.project(new_param_state, self.scl_decay_weights, - bias=self.scl_decay_bias, - activation=tf.nn.sigmoid) - # This is only used if learnable_decay and num_gradient_scales > 1 - inp_decay = utils.project(new_param_state, self.inp_decay_weights, - bias=self.inp_decay_bias, - activation=tf.nn.sigmoid) - - # Also update the learning rate. - lr_param, lr_attend, new_log_lr = self._compute_new_learning_rate( - state, new_param_state) - - update_step = tf.reshape(lr_param * update_delta, - state["true_param"].get_shape()) - - return (scl_decay, inp_decay, new_log_lr, update_step, lr_attend, - attention_delta) - - def _compute_new_learning_rate(self, state, new_param_state): - if self.dynamic_output_scale: - # Compute the change in learning rate (an affine projection of the - # RNN state, passed through a sigmoid or log depending on flags). - # Update the learning rate, w/ momentum. - lr_change = utils.project(new_param_state, self.lr_weights, - bias=self.lr_bias) - step_log_lr = state["log_learning_rate"] + lr_change - - # Clip the log learning rate to the flag at the top end, and to - # (log(min int32) - 1) at the bottom - - # Check out this hack: we want to be able to compute the gradient - # of the downstream result w.r.t lr weights and bias, even if the - # value of step_log_lr is outside the clip range. So we clip, - # subtract off step_log_lr, and wrap all that in a stop_gradient so - # TF never tries to take the gradient of the clip... or the - # subtraction. Then we add BACK step_log_lr so that downstream still - # receives the clipped value. But the GRADIENT of step_log_lr will - # be the gradient of the unclipped value, which we added back in - # after stop_gradients. - step_log_lr += tf.stop_gradient( - tf.clip_by_value(step_log_lr, -33, self.max_log_lr) - - step_log_lr) - - lr_momentum_logit = tf.get_variable( - "learning_rate_momentum_logit", - initializer=FLAGS.learning_rate_momentum_logit_init) - lrm = tf.nn.sigmoid(lr_momentum_logit) - new_log_lr = (lrm * state["log_learning_rate"] + - (1. - lrm) * step_log_lr) - param_stepsize_offset = tf.get_variable("param_stepsize_offset", - initializer=-1.) - lr_param = tf.exp(step_log_lr + param_stepsize_offset) - lr_attend = tf.exp(step_log_lr) if self.use_attention else lr_param - else: - # Dynamic output scale is off, LR param is always 1. - lr_param = 2. * utils.project(new_param_state, self.lr_weights, - bias=self.lr_bias, - activation=tf.nn.sigmoid) - new_log_lr = None - lr_attend = lr_param - - return lr_param, lr_attend, new_log_lr - - def _compute_updated_global_state(self, layer_states, global_state): - """Computes the new global state gives the layers states and old state. - - Args: - layer_states: The current layer states. - global_state: The old global state. - - Returns: - The updated global state. - """ - updated_global_state = [] - if self.num_layers == 3: - # highest (global) layer - # input -> average hidden state from each layer-specific RNN - # bias -> None - with tf.variable_scope("Layer2_RNN", reuse=self.reuse_global_state): - self.reuse_global_state = True - global_input = tf.reduce_mean(tf.concat(layer_states, 0), 0, - keep_dims=True) - updated_global_state, _ = self.cells[2](global_input, global_state[0]) - return updated_global_state - - def apply_gradients(self, grads_and_vars, global_step=None, name=None): - """Overwrites the tf.train.Optimizer interface for applying gradients.""" - - # Pull out the variables. - grads_and_vars = tuple(grads_and_vars) # Make sure repeat iteration works. - for g, v in grads_and_vars: - if not isinstance(g, (tf.Tensor, tf.IndexedSlices, type(None))): - raise TypeError( - "Gradient must be a Tensor, IndexedSlices, or None: %s" % g) - if not isinstance(v, tf.Variable): - raise TypeError( - "Variable must be a tf.Variable: %s" % v) - if g is not None: - self._assert_valid_dtypes([g, v]) - var_list = [v for g, v in grads_and_vars if g is not None] - if not var_list: - raise ValueError("No gradients provided for any variable: %s" % - (grads_and_vars,)) - - # Create slots for the variables. - with tf.control_dependencies(None): - self._create_slots(var_list) - - # Store update ops in this list. - with tf.op_scope([], name, self._name) as name: - - # Prepare the global state. - with tf.variable_scope(self._name, reuse=self.reuse_global_state): - gs = self._initialize_global_state() - if gs: - global_state = [tf.get_variable("global_state", initializer=gs[0])] - else: - global_state = [] - - # Get the states for each variable in the list. - states = [{key: self.get_slot(var, key) for key in self.get_slot_names()} - for var in var_list] - - # Compute updated values. - grads, params = zip(*grads_and_vars) - args = (params, grads, states, global_state) - updates = self._compute_updates(*args) - new_params, new_states, new_global_state, new_attention = updates - # Assign op for new global state. - update_ops = [tf.assign(gs, ngs) - for gs, ngs in zip(global_state, new_global_state)] - - # Create the assign ops for the params and state variables. - args = (params, states, new_params, new_attention, new_states) - for var, state, new_var, new_var_attend, new_state in zip(*args): - # Assign updates to the state variables. - state_assign_ops = [tf.assign(state_var, new_state[key]) - for key, state_var in state.items()] - - # Update the parameter. - with tf.control_dependencies(state_assign_ops): - if self.use_attention: - # Assign to the attended location, rather than the actual location - # so that the gradients are computed where attention is. - param_update_op = var.assign(new_var_attend) - else: - param_update_op = var.assign(new_var) - - with tf.name_scope("update_" + var.op.name): #, tf.colocate_with(var): - update_ops.append(param_update_op) - - real_params = [self.get_slot(var, "true_param") for var in var_list] - - if global_step is None: - # NOTE: if using the optimizer in a non-test-optimizer setting (e.g. - # on Inception), remove the real_params return value. Otherwise - # the code will throw an error. - return self._finish(update_ops, name), real_params - else: - with tf.control_dependencies([self._finish(update_ops, "update")]): - return state_ops.assign_add(global_step, 1, name=name).op, real_params diff --git a/research/learned_optimizer/optimizer/learning_rate_schedule.py b/research/learned_optimizer/optimizer/learning_rate_schedule.py deleted file mode 100644 index 53db8addd..000000000 --- a/research/learned_optimizer/optimizer/learning_rate_schedule.py +++ /dev/null @@ -1,60 +0,0 @@ -# Copyright 2017 Google, Inc. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -"""A trainable optimizer that learns a learning rate schedule.""" - -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import tensorflow as tf - -from learned_optimizer.optimizer import trainable_optimizer - - -class LearningRateSchedule(trainable_optimizer.TrainableOptimizer): - """Learns a learning rate schedule over a fixed number of iterations.""" - - def __init__(self, initial_rate=0.0, n_steps=1000, **kwargs): - """Initializes the learning rates.""" - self.max_index = tf.constant(n_steps-1, dtype=tf.int32) - - with tf.variable_scope(trainable_optimizer.OPTIMIZER_SCOPE): - initializer = tf.constant_initializer(initial_rate) - self.learning_rates = tf.get_variable("learning_rates", - shape=([n_steps,]), - initializer=initializer) - - super(LearningRateSchedule, self).__init__("LRS", ["itr"], **kwargs) - - def _initialize_state(self, var): - """Return a dictionary mapping names of state variables to their values.""" - return { - "itr": tf.constant(0, dtype=tf.int32), - } - - def _compute_update(self, param, grad, state): - """Compute updates of parameters.""" - - # get the learning rate at the current index, if the index - # is greater than the number of available learning rates, - # use the last one - index = tf.minimum(state["itr"], self.max_index) - learning_rate = tf.gather(self.learning_rates, index) - - # update the parameters: parameter - learning_rate * gradient - updated_param = param - tf.scalar_mul(learning_rate, grad) - - return updated_param, {"itr": state["itr"] + 1} diff --git a/research/learned_optimizer/optimizer/rnn_cells.py b/research/learned_optimizer/optimizer/rnn_cells.py deleted file mode 100644 index 3d68de04c..000000000 --- a/research/learned_optimizer/optimizer/rnn_cells.py +++ /dev/null @@ -1,68 +0,0 @@ -# Copyright 2017 Google, Inc. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -"""Custom RNN cells for hierarchical RNNs.""" - -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import tensorflow as tf - -from learned_optimizer.optimizer import utils - - -class BiasGRUCell(tf.contrib.rnn.RNNCell): - """GRU cell (cf. http://arxiv.org/abs/1406.1078) with an additional bias.""" - - def __init__(self, num_units, activation=tf.tanh, scale=0.1, - gate_bias_init=0., random_seed=None): - self._num_units = num_units - self._activation = activation - self._scale = scale - self._gate_bias_init = gate_bias_init - self._random_seed = random_seed - - @property - def state_size(self): - return self._num_units - - @property - def output_size(self): - return self._num_units - - def __call__(self, inputs, state, bias=None): - # Split the injected bias vector into a bias for the r, u, and c updates. - if bias is None: - bias = tf.zeros((1, 3)) - - r_bias, u_bias, c_bias = tf.split(bias, 3, 1) - - with tf.variable_scope(type(self).__name__): # "BiasGRUCell" - with tf.variable_scope("gates"): # Reset gate and update gate. - proj = utils.affine([inputs, state], 2 * self._num_units, - scale=self._scale, bias_init=self._gate_bias_init, - random_seed=self._random_seed) - r_lin, u_lin = tf.split(proj, 2, 1) - r, u = tf.nn.sigmoid(r_lin + r_bias), tf.nn.sigmoid(u_lin + u_bias) - - with tf.variable_scope("candidate"): - proj = utils.affine([inputs, r * state], self._num_units, - scale=self._scale, random_seed=self._random_seed) - c = self._activation(proj + c_bias) - - new_h = u * state + (1 - u) * c - - return new_h, new_h diff --git a/research/learned_optimizer/optimizer/trainable_adam.py b/research/learned_optimizer/optimizer/trainable_adam.py deleted file mode 100644 index 638217f1b..000000000 --- a/research/learned_optimizer/optimizer/trainable_adam.py +++ /dev/null @@ -1,210 +0,0 @@ -# Copyright 2017 Google, Inc. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -"""A trainable ADAM optimizer that learns its internal variables.""" - -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import numpy as np -import tensorflow as tf - -from learned_optimizer.optimizer import trainable_optimizer as opt -from learned_optimizer.optimizer import utils - - -class TrainableAdam(opt.TrainableOptimizer): - """Adam optimizer with learnable scalar parameters. - - See Kingma et. al., 2014 for algorithm (http://arxiv.org/abs/1412.6980). - """ - - def __init__(self, - learning_rate=1e-3, - beta1=0.9, - beta2=0.999, - epsilon=1e-8, - **kwargs): - """Initializes the TrainableAdam optimizer with the given initial values. - - Args: - learning_rate: The learning rate (default: 1e-3). - beta1: The exponential decay rate for the 1st moment estimates. - beta2: The exponential decay rate for the 2nd moment estimates. - epsilon: A small constant for numerical stability. - **kwargs: Any additional keyword arguments for TrainableOptimizer. - - Raises: - ValueError: if the learning rate or epsilon is not positive - ValueError: if beta1 or beta2 is not in (0, 1). - """ - if learning_rate <= 0: - raise ValueError("Learning rate must be positive.") - if epsilon <= 0: - raise ValueError("Epsilon must be positive.") - if not 0 < beta1 < 1 or not 0 < beta2 < 1: - raise ValueError("Beta values must be between 0 and 1, exclusive.") - - self._reuse_vars = False - - with tf.variable_scope(opt.OPTIMIZER_SCOPE): - def inv_sigmoid(x): - return np.log(x / (1.0 - x)) - - self.log_learning_rate = tf.get_variable( - "log_learning_rate", - shape=[], - initializer=tf.constant_initializer(np.log(learning_rate))) - self.beta1_logit = tf.get_variable( - "beta1_logit", - shape=[], - initializer=tf.constant_initializer(inv_sigmoid(beta1))) - self.beta2_logit = tf.get_variable( - "beta2_logit", - shape=[], - initializer=tf.constant_initializer(inv_sigmoid(beta2))) - self.log_epsilon = tf.get_variable( - "log_epsilon", - shape=[], - initializer=tf.constant_initializer(np.log(epsilon))) - - # Key names are derived from Algorithm 1 described in - # https://arxiv.org/pdf/1412.6980.pdf - state_keys = ["m", "v", "t"] - super(TrainableAdam, self).__init__("Adam", state_keys, **kwargs) - - def _initialize_state(self, var): - """Returns a dictionary mapping names of state variables to their values.""" - vectorized_shape = var.get_shape().num_elements(), 1 - - return {key: tf.zeros(vectorized_shape) for key in self.state_keys} - - def _compute_update(self, param, grad, state): - """Calculates the new internal state and parameters. - - If the gradient is sparse, updates the appropriate slices in the internal - state and stacks the update tensor. - - Args: - param: A tensor of parameters. - grad: A tensor of gradients with the same shape as param. - state: A dictionary containing any state for the optimizer. - - Returns: - updated_param: The updated parameters. - updated_state: The updated state variables in a dictionary. - """ - - with tf.variable_scope(opt.OPTIMIZER_SCOPE) as scope: - - if self._reuse_vars: - scope.reuse_variables() - else: - self._reuse_vars = True - - (grad_values, first_moment, second_moment, timestep, grad_indices - ) = self._extract_gradients_and_internal_state( - grad, state, tf.shape(param)) - - beta1 = tf.nn.sigmoid(self.beta1_logit) - beta2 = tf.nn.sigmoid(self.beta2_logit) - epsilon = tf.exp(self.log_epsilon) + 1e-10 - learning_rate = tf.exp(self.log_learning_rate) - - old_grad_shape = tf.shape(grad_values) - grad_values = tf.reshape(grad_values, [-1, 1]) - - new_timestep = timestep + 1 - new_first_moment = self._update_adam_estimate( - first_moment, grad_values, beta1) - new_second_moment = self._debias_adam_estimate( - second_moment, tf.square(grad_values), beta2) - - debiased_first_moment = self._debias_adam_estimate( - new_first_moment, beta1, new_timestep) - debiased_second_moment = self._debias_adam_estimate( - new_second_moment, beta2, new_timestep) - - # Propagating through the square root of 0 is very bad for stability. - update = (learning_rate * debiased_first_moment / - (tf.sqrt(debiased_second_moment + 1e-10) + epsilon)) - - update = tf.reshape(update, old_grad_shape) - - if grad_indices is not None: - param_shape = tf.shape(param) - update = utils.stack_tensor( - update, grad_indices, param, param_shape[:1]) - new_first_moment = utils.update_slices( - new_first_moment, grad_indices, state["m"], param_shape) - new_second_moment = utils.update_slices( - new_second_moment, grad_indices, state["v"], param_shape) - new_timestep = utils.update_slices( - new_timestep, grad_indices, state["t"], param_shape) - - new_param = param - update - - # collect the update and new state - new_state = { - "m": new_first_moment, - "v": new_second_moment, - "t": new_timestep - } - - return new_param, new_state - - def _update_adam_estimate(self, estimate, value, beta): - """Returns a beta-weighted average of estimate and value.""" - return (beta * estimate) + ((1 - beta) * value) - - def _debias_adam_estimate(self, estimate, beta, t_step): - """Returns a debiased estimate based on beta and the timestep.""" - return estimate / (1 - tf.pow(beta, t_step)) - - def _extract_gradients_and_internal_state(self, grad, state, param_shape): - """Extracts the gradients and relevant internal state. - - If the gradient is sparse, extracts the appropriate slices from the state. - - Args: - grad: The current gradient. - state: The current state. - param_shape: The shape of the parameter (used if gradient is sparse). - - Returns: - grad_values: The gradient value tensor. - first_moment: The first moment tensor (internal state). - second_moment: The second moment tensor (internal state). - timestep: The current timestep (internal state). - grad_indices: The indices for the gradient tensor, if sparse. - None otherwise. - """ - grad_values = grad - grad_indices = None - first_moment = state["m"] - second_moment = state["v"] - timestep = state["t"] - - if isinstance(grad, tf.IndexedSlices): - grad_indices, grad_values = utils.accumulate_sparse_gradients(grad) - first_moment = utils.slice_tensor( - first_moment, grad_indices, param_shape) - second_moment = utils.slice_tensor( - second_moment, grad_indices, param_shape) - timestep = utils.slice_tensor(timestep, grad_indices, param_shape) - - return grad_values, first_moment, second_moment, timestep, grad_indices - diff --git a/research/learned_optimizer/optimizer/trainable_optimizer.py b/research/learned_optimizer/optimizer/trainable_optimizer.py deleted file mode 100644 index 955112a9d..000000000 --- a/research/learned_optimizer/optimizer/trainable_optimizer.py +++ /dev/null @@ -1,574 +0,0 @@ -# Copyright 2017 Google, Inc. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -"""A base class definition for trainable optimizers.""" - -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import collections -import itertools - -import tensorflow as tf - -from tensorflow.python.framework import tensor_shape - -OPTIMIZER_SCOPE = "LOL" -_LOCAL_VARIABLE_PREFIX = "local_state_" -_LOCAL_STATE_VARIABLE_COLLECTION = "local_state_collection" -EPSILON = 1e-6 - - -class TrainableOptimizer(tf.train.Optimizer): - """Base class for trainable optimizers. - - A trainable optimizer is an optimizer that has parameters that can themselves - be learned (meta-optimized). - - Subclasses must implement: - _compute_update(self, param, grad, state) - """ - - def __init__(self, name, state_keys, use_attention=False, - use_log_objective=False, obj_train_max_multiplier=-1, - use_second_derivatives=True, use_numerator_epsilon=False, - **kwargs): - """Initializes the optimizer with the given name and settings. - - Args: - name: The name string for this optimizer. - state_keys: The names of any required state variables (list) - use_attention: Whether this optimizer uses attention (Default: True) - use_log_objective: Whether this optimizer uses the logarithm of the - objective when computing the loss (Default: False) - obj_train_max_multiplier: The maximum multiplier for the increase in the - objective before meta-training is stopped. If <= 0, meta-training is - not stopped early. (Default: -1) - use_second_derivatives: Whether this optimizer uses second derivatives in - meta-training. This should be set to False if some second derivatives - in the meta-training problem set are not defined in Tensorflow. - (Default: True) - use_numerator_epsilon: Whether to use epsilon in the numerator when - scaling the problem objective during meta-training. (Default: False) - **kwargs: Any additional keyword arguments. - """ - self.use_second_derivatives = use_second_derivatives - self.state_keys = sorted(state_keys) - self.use_attention = use_attention - self.use_log_objective = use_log_objective - self.obj_train_max_multiplier = obj_train_max_multiplier - self.use_numerator_epsilon = use_numerator_epsilon - - use_locking = False - super(TrainableOptimizer, self).__init__(use_locking, name) - - def _create_slots(self, var_list): - """Creates all slots needed by the variables. - - Args: - var_list: A list of `Variable` objects. - """ - for var in var_list: - init_states = self._initialize_state(var) - for slot_name in sorted(init_states): - slot_var_name = "{}_{}".format(self.get_name(), slot_name) - value = init_states[slot_name] - self._get_or_make_slot(var, value, slot_name, slot_var_name) - - def _initialize_state(self, var): - """Initializes any state required for this variable. - - Args: - var: a tensor containing parameters to be optimized - - Returns: - state: a dictionary mapping state keys to initial state values (tensors) - """ - return {} - - def _initialize_global_state(self): - """Initializes any global state values.""" - return [] - - def _apply_common(self, grad, var): - """Applies the optimizer updates to the variables. - - Note: this should only get called via _apply_dense or _apply_sparse when - using the optimizer via optimizer.minimize or optimizer.apply_gradients. - During meta-training, the optimizer.train function should be used to - construct an optimization path that is differentiable. - - Args: - grad: A tensor representing the gradient. - var: A tf.Variable with the same shape as grad. - - Returns: - update_op: A tensorflow op that assigns new values to the variable, and - also defines dependencies that update the state variables for the - optimizer. - """ - state = {key: self.get_slot(var, key) for key in self.get_slot_names()} - new_var, new_state = self._compute_update(var, grad, state) - state_assign_ops = [tf.assign(state_var, new_state[key]) - for key, state_var in state.items()] - with tf.control_dependencies(state_assign_ops): - update_op = var.assign(new_var) - - return update_op - - def _apply_dense(self, grad, var): - """Adds ops to apply dense gradients to 'var'.""" - return self._apply_common(grad, var) - - def _apply_sparse(self, grad, var): - """Adds ops to apply sparse gradients to 'var'.""" - return self._apply_common(grad, var) - - def _compute_update(self, param, grad, state): - """Computes the update step for optimization. - - Args: - param: A tensor of parameters to optimize. - grad: The gradient tensor of the objective with respect to the parameters. - (It has the same shape as param.) - state: A dictionary containing any extra state required by the optimizer. - - Returns: - updated_params: The updated parameters. - updated_state: The dictionary of updated state variable(s). - """ - raise NotImplementedError - - def _compute_updates(self, params, grads, states, global_state): - """Maps the compute update functions for each parameter. - - This function can be overriden by a subclass if the subclass wants to - combine information across the different parameters in the list. - - Args: - params: A list of parameter tensors. - grads: A list of gradients corresponding to each parameter. - states: A list of state variables corresponding to each parameter. - global_state: A list of global state variables for the problem. - - Returns: - new_params: The updated parameters. - new_states: The updated states. - new_global_state: The updated global state. - attention_params: A list of attention parameters. This is the same as - new_params if the optimizer does not use attention. - """ - # Zip up the arguments to _compute_update. - args = zip(params, grads, states) - - # Call compute_update on each set of parameter/gradient/state args. - new_params, new_states = zip(*list( - itertools.starmap(self._compute_update, args))) - - # Global state is unused in the basic case, just pass it through. - return list(new_params), list(new_states), global_state, list(new_params) - - def train(self, problem, dataset): - """Creates graph operations to train the optimizer. - - Args: - problem: A problem_generator.Problem instance to train on. - dataset: A datasets.Dataset tuple to use when training. - - Returns: - meta_objective: A tensorflow operation for computing the meta-objective - obj_weights: A tensor placeholder for feeding in the objective weights - obj_values: The subproblem objective values during optimization - batches: The batch indexes tensor for overriding with feed_dict - first_unroll: A placeholder signifying if this is a first unroll - (this will propagate the gradients slightly differently). - reset_state: A placeholder signifying that the rnn state should be reset. - output_state: The final state of the optimizer - init_loop_vars_to_override: Local variables that can be assigned to - propagate the optimizer and problem state for unrolling - final_loop_vals: Final values of the loop variables that can be - assigned to init_loop_vars_to_override. - """ - - # Placeholder for the objective weights - obj_weights = tf.placeholder(tf.float32) - num_iter = tf.shape(obj_weights)[0] - - # Unpack the dataset and generate the minibatches for training - data, labels = dataset - # Convert the ndarrays to tensors so we can pass them back in via feed_dict - data = tf.constant(data) - labels = tf.constant(labels) - batches = tf.placeholder(tf.int32) - first_unroll = tf.placeholder_with_default(False, []) - reset_state = tf.placeholder_with_default(False, []) - - training_output = collections.namedtuple("TrainingOutput", - ["metaobj", - "obj_weights", - "problem_objectives", - "initial_obj", - "batches", - "first_unroll", - "reset_state", - "output_state", - "init_loop_vars", - "output_loop_vars"]) - - def loop_body(itr, obj_accum, params, attend_params, flattened_states, - global_state, all_obj, unused_init_obj, data, - labels, batches): - """Body of the meta-training while loop for optimizing a sub-problem. - - Args: - itr: The current meta-training iteration. - obj_accum: The accumulated objective over all training steps so far. - params: The parameters of the sub-problem. - attend_params: The parameters of the sub-problems at the attended - location. - flattened_states: The states of the trainable optimizer, sorted and - flattened into a list (since a while loop can't handle nested lists - or dictionaries). - global_state: The global state of the optimizer. - all_obj: The list of all objective values in the training process. - unused_init_obj: The initial objective (unused here, but needed in the - variable list because it's used in a stopping condition in the - loop_cond.) - data: The data for this problem. - labels: The labels corresponding to the data. - batches: The batch indexes needed for shuffled minibatch creation. - - Returns: - itr: The updated meta-training iteration. - obj_accum: The updated accumulated objective. - params: The new parameters of the sub-problem. - attend_params: The new parameters of the sub-problems at the attended - location. - flattened_states: The new states of the trainable optimizer. - global_state: The updated global state. - all_obj: The updates list of all objective values. - unused_init_obj: The initial objective. - data: The data for this problem. - labels: The labels corresponding to the data. - batches: The batch indexes needed for shuffled minibatch creation. - """ - batch_indices = tf.gather(batches, itr) - batch_data = tf.gather(data, batch_indices) - batch_labels = tf.gather(labels, batch_indices) - - # Compute the objective over the entire dataset (full batch). - obj = problem.objective(params, data, labels) - - # Compute the gradients on just the current batch - if self.use_attention: - current_obj = problem.objective(attend_params, batch_data, batch_labels) - grads = problem.gradients(current_obj, attend_params) - else: - current_obj = problem.objective(params, batch_data, batch_labels) - grads = problem.gradients(current_obj, params) - - if not self.use_second_derivatives: - new_grads = [] - for grad in grads: - if isinstance(grad, tf.IndexedSlices): - new_grads.append( - tf.IndexedSlices(tf.stop_gradient(grad.values), grad.indices)) - else: - new_grads.append(tf.stop_gradient(grad)) - grads = new_grads - - # store the objective value for the entire problem at each iteration - all_obj = tf.concat([all_obj, tf.reshape(obj, (1,))], 0) - - # accumulate the weighted objective for the entire dataset - acc = tf.gather(obj_weights, itr) * obj - - obj_accum = tf.add(obj_accum, acc) - # Set the shape to keep the shape invariant for obj_accum. Without this, - # the graph builder thinks the tensor shape is unknown on the 2nd iter. - obj_accum.set_shape([]) - - # convert flattened_states to dictionaries - dict_states = [dict(zip(self.state_keys, flat_state)) - for flat_state in flattened_states] - - # compute the new parameters and states - args = (params, grads, dict_states, global_state) - updates = self._compute_updates(*args) - new_params, new_states, new_global_state, new_attend_params = updates - - # flatten the states - new_flattened_states = map(flatten_and_sort, new_states) - - return [itr + 1, obj_accum, new_params, new_attend_params, - new_flattened_states, new_global_state, all_obj, unused_init_obj, - data, labels, batches] - - def loop_cond(itr, obj_accum, unused_params, unused_attend_params, - unused_flattened_states, unused_global_state, all_obj, - init_obj, *args): - """Termination conditions of the sub-problem optimization loop.""" - del args # unused - - cond1 = tf.less(itr, num_iter) # We've run < num_iter times - cond2 = tf.is_finite(obj_accum) # The objective is still finite - - if self.obj_train_max_multiplier > 0: - current_obj = tf.gather(all_obj, itr) - # Account for negative init_obj too - max_diff = (self.obj_train_max_multiplier - 1) * tf.abs(init_obj) - max_obj = init_obj + max_diff - # The objective is a reasonable multiplier of the original objective - cond3 = tf.less(current_obj, max_obj) - - return tf.logical_and(tf.logical_and(cond1, cond2), cond3, - name="training_loop_cond") - else: - return tf.logical_and(cond1, cond2, name="training_loop_cond") - - init = self._initialize_training_loop_parameters( - problem, data, labels, batches, first_unroll, reset_state) - loop_vars, invariants, initial_obj, init_loop_vars_to_override = init - - loop_output = tf.while_loop(loop_cond, loop_body, loop_vars, - swap_memory=True, shape_invariants=invariants) - meta_obj, problem_objectives = loop_output[1], loop_output[6] - - # The meta objective is normalized by the initial objective at the start of - # the series of partial unrolls. - scaled_meta_objective = self.scale_objective( - meta_obj, problem_objectives, initial_obj) - - final_loop_vals = ( - [initial_obj] + loop_output[2] + loop_output[3] + loop_output[5]) - final_loop_vals.extend(itertools.chain(*loop_output[4])) - - return training_output(scaled_meta_objective, - obj_weights, - problem_objectives, - initial_obj, - batches, - first_unroll, - reset_state, - loop_output[4], - init_loop_vars_to_override, - final_loop_vals) - - def _initialize_training_loop_parameters( - self, problem, data, labels, batches, first_unroll, reset_state): - """Initializes the vars and params needed for the training process. - - Args: - problem: The problem being optimized. - data: The data for the problem. - labels: The corresponding labels for the data. - batches: The indexes needed to create shuffled batches of the data. - first_unroll: Whether this is the first unroll in a partial unrolling. - reset_state: Whether RNN state variables should be reset. - - Returns: - loop_vars: The while loop variables for training. - invariants: The corresponding variable shapes (required by while loop). - initial_obj: The initial objective (used later for scaling). - init_loop_vars_to_override: The loop vars that can be overridden when - performing training via partial unrolls. - """ - # Extract these separately so we don't have to make inter-variable - # dependencies. - initial_tensors = problem.init_tensors() - - return_initial_tensor_values = first_unroll - initial_params_vars, initial_params = local_state_variables( - initial_tensors, return_initial_tensor_values) - initial_attend_params_vars, initial_attend_params = local_state_variables( - initial_tensors, return_initial_tensor_values) - # Recalculate the initial objective for the list on each partial unroll with - # the new initial_params. initial_obj holds the value from the very first - # unroll. - initial_obj_init = problem.objective(initial_params, data, labels) - return_initial_obj_init = first_unroll - [initial_obj_var], [initial_obj] = local_state_variables( - [initial_obj_init], return_initial_obj_init) - - # Initialize the loop variables. - initial_itr = tf.constant(0, dtype=tf.int32) - initial_meta_obj = tf.constant(0, dtype=tf.float32) - # N.B. the use of initial_obj_init here rather than initial_obj - initial_problem_objectives = tf.reshape(initial_obj_init, (1,)) - - # Initialize the extra state. - initial_state_vars = [] - initial_state = [] - state_shapes = [] - return_initial_state_values = reset_state - for param in initial_tensors: - param_state_vars, param_state = local_state_variables( - flatten_and_sort(self._initialize_state(param)), - return_initial_state_values) - - initial_state_vars.append(param_state_vars) - initial_state.append(param_state) - state_shapes.append([f.get_shape() for f in param_state]) - - # Initialize any global (problem-level) state. - initial_global_state_vars, initial_global_state = local_state_variables( - self._initialize_global_state(), return_initial_state_values) - - global_shapes = [] - for item in initial_global_state: - global_shapes.append(item.get_shape()) - - # build the list of loop variables: - loop_vars = [ - initial_itr, - initial_meta_obj, - initial_params, # Local variables. - initial_attend_params, # Local variables. - initial_state, # Local variables. - initial_global_state, # Local variables. - initial_problem_objectives, - initial_obj, # Local variable. - data, - labels, - batches, - ] - - invariants = [ - initial_itr.get_shape(), - initial_meta_obj.get_shape(), - [t.get_shape() for t in initial_params], - [t.get_shape() for t in initial_attend_params], - state_shapes, - global_shapes, - tensor_shape.TensorShape([None]), # The problem objectives list grows - initial_obj.get_shape(), - tensor_shape.unknown_shape(), # Placeholder shapes are unknown - tensor_shape.unknown_shape(), - tensor_shape.unknown_shape(), - ] - - # Initialize local variables that we will override with final tensors at the - # next iter. - init_loop_vars_to_override = ( - [initial_obj_var] + initial_params_vars + initial_attend_params_vars + - initial_global_state_vars) - init_loop_vars_to_override.extend(itertools.chain(*initial_state_vars)) - - return loop_vars, invariants, initial_obj, init_loop_vars_to_override - - def scale_objective(self, total_obj, all_objs, initial_obj, - obj_scale_eps=1e-6): - """Normalizes the objective based on the initial objective value. - - Args: - total_obj: The total accumulated objective over the training run. - all_objs: A list of all the individual objectives over the training run. - initial_obj: The initial objective value. - obj_scale_eps: The epsilon value to use in computations for stability. - - Returns: - The scaled objective as a single value. - """ - if self.use_log_objective: - if self.use_numerator_epsilon: - scaled_problem_obj = ((all_objs + obj_scale_eps) / - (initial_obj + obj_scale_eps)) - log_scaled_problem_obj = tf.log(scaled_problem_obj) - else: - scaled_problem_obj = all_objs / (initial_obj + obj_scale_eps) - log_scaled_problem_obj = tf.log(scaled_problem_obj + obj_scale_eps) - return tf.reduce_mean(log_scaled_problem_obj) - else: - return total_obj / (initial_obj + obj_scale_eps) - - -def local_state_variables(init_values, return_init_values): - """Create local variables initialized from init_values. - - This will create local variables from a list of init_values. Each variable - will be named based on the value's shape and dtype. - - As a convenience, a boolean tensor allows you to return value from - the created local variable or from the original init value. - - Args: - init_values: iterable of tensors - return_init_values: boolean tensor - - Returns: - local_vars: list of the created local variables. - vals: if return_init_values is true, then this returns the values of - init_values. Otherwise it returns the values of the local_vars. - """ - if not init_values: - return [], [] - - # This generates a harmless warning when saving the metagraph. - variable_use_count = tf.get_collection_ref(_LOCAL_STATE_VARIABLE_COLLECTION) - if not variable_use_count: - variable_use_count.append(collections.defaultdict(int)) - variable_use_count = variable_use_count[0] - - local_vars = [] - with tf.variable_scope(OPTIMIZER_SCOPE): - # We can't use the init_value as an initializer as init_value may - # itself depend on some problem variables. This would produce - # inter-variable initialization order dependence which TensorFlow - # sucks at making easy. - for init_value in init_values: - name = create_local_state_variable_name(init_value) - unique_name = name + "_" + str(variable_use_count[name]) - variable_use_count[name] += 1 - # The overarching idea here is to be able to reuse variables between - # different sessions on the same TensorFlow master without errors. By - # uniquifying based on the type and name we mirror the checks made inside - # TensorFlow, while still allowing some memory reuse. Ultimately this is a - # hack due to the broken Session.reset(). - local_vars.append( - tf.get_local_variable( - unique_name, - initializer=tf.zeros( - init_value.get_shape(), dtype=init_value.dtype))) - - # It makes things a lot simpler if we use the init_value the first - # iteration, instead of the variable itself. It allows us to propagate - # gradients through it as well as simplifying initialization. The variable - # ends up assigned to after the first iteration. - vals = tf.cond(return_init_values, lambda: init_values, lambda: local_vars) - if len(init_values) == 1: - # tf.cond extracts elements from singleton lists. - vals = [vals] - return local_vars, vals - - -def create_local_state_variable_name(tensor): - """Create a name of the variable based on its type and shape.""" - if not tensor.get_shape().is_fully_defined(): - raise ValueError("Need a fully specified shape to create a local variable.") - - return (_LOCAL_VARIABLE_PREFIX + "_".join( - map(str, tensor.get_shape().as_list())) + "_" + tensor.dtype.name) - - -def is_local_state_variable(op): - """Returns if this op is a local state variable created for training.""" - return op.node_def.op in ["Variable", "VariableV2"] and op.name.startswith( - OPTIMIZER_SCOPE + "/" + _LOCAL_VARIABLE_PREFIX) - - -def flatten_and_sort(dictionary): - """Flattens a dictionary into a list of values sorted by the keys.""" - return [dictionary[k] for k in sorted(dictionary.keys())] diff --git a/research/learned_optimizer/optimizer/utils.py b/research/learned_optimizer/optimizer/utils.py deleted file mode 100644 index 58744f4cb..000000000 --- a/research/learned_optimizer/optimizer/utils.py +++ /dev/null @@ -1,278 +0,0 @@ -# Copyright 2017 Google, Inc. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -"""Utilities and helper functions.""" - -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import numpy as np -import tensorflow as tf - - -def make_finite(t, replacement): - """Replaces non-finite tensor values with the replacement value.""" - return tf.where(tf.is_finite(t), t, replacement) - - -def asinh(x): - """Computes the inverse hyperbolic sine function (in tensorflow).""" - return tf.log(x + tf.sqrt(1. + x ** 2)) - - -def affine(inputs, output_size, scope="Affine", scale=0.1, vec_mean=0., - include_bias=True, bias_init=0., random_seed=None): - """Computes an affine function of the inputs. - - Creates or recalls tensorflow variables "Matrix" and "Bias" - to generate an affine operation on the input. - - If the inputs are a list of tensors, they are concatenated together. - - Initial weights for the matrix are drawn from a Gaussian with zero - mean and standard deviation that is the given scale divided by the - square root of the input dimension. Initial weights for the bias are - set to zero. - - Args: - inputs: List of tensors with shape (batch_size, input_size) - output_size: Size (dimension) of the output - scope: Variable scope for these parameters (default: "Affine") - scale: Initial weight scale for the matrix parameters (default: 0.1), - this constant is divided by the sqrt of the input size to get the - std. deviation of the initial weights - vec_mean: The mean for the random initializer - include_bias: Whether to include the bias term - bias_init: The initializer bias (default 0.) - random_seed: Random seed for random initializers. (Default: None) - - Returns: - output: Tensor with shape (batch_size, output_size) - """ - - # Concatenate the input arguments. - x = tf.concat(inputs, 1) - - with tf.variable_scope(scope): - input_size = x.get_shape().as_list()[1] - - sigma = scale / np.sqrt(input_size) - rand_init = tf.random_normal_initializer(mean=vec_mean, stddev=sigma, - seed=random_seed) - - matrix = tf.get_variable("Matrix", [input_size, output_size], - dtype=tf.float32, initializer=rand_init) - - if include_bias: - bias = tf.get_variable("Bias", [output_size], dtype=tf.float32, - initializer=tf.constant_initializer(bias_init, - tf.float32)) - else: - bias = 0. - output = tf.matmul(x, matrix) + bias - - return output - - -def project(inputs, weights, bias=0., activation=tf.identity): - """Computes an affine or linear projection of the inputs. - - Projects the inputs onto the given weight vector and (optionally) - adds a bias and passes the result through an activation function. - - Args: - inputs: matrix of inputs with shape [batch_size, dim] - weights: weight matrix with shape [dim, output_dim] - bias: bias vector with shape [output_dim] (default: 0) - activation: nonlinear activation function (default: tf.identity) - - Returns: - outputs: an op which computes activation(inputs @ weights + bias) - """ - return activation(tf.matmul(inputs, weights) + bias) - - -def new_mean_squared(grad_vec, decay, ms): - """Calculates the new accumulated mean squared of the gradient. - - Args: - grad_vec: the vector for the current gradient - decay: the decay term - ms: the previous mean_squared value - - Returns: - the new mean_squared value - """ - decay_size = decay.get_shape().num_elements() - decay_check_ops = [ - tf.assert_less_equal(decay, 1., summarize=decay_size), - tf.assert_greater_equal(decay, 0., summarize=decay_size)] - - with tf.control_dependencies(decay_check_ops): - grad_squared = tf.square(grad_vec) - - # If the previous mean_squared is the 0 vector, don't use the decay and just - # return the full grad_squared. This should only happen on the first timestep. - decay = tf.cond(tf.reduce_all(tf.equal(ms, 0.)), - lambda: tf.zeros_like(decay, dtype=tf.float32), lambda: decay) - - # Update the running average of squared gradients. - epsilon = 1e-12 - return (1. - decay) * (grad_squared + epsilon) + decay * ms - - -def rms_scaling(gradient, decay, ms, update_ms=True): - """Vectorizes and scales a tensor of gradients. - - Args: - gradient: the current gradient - decay: the current decay value. - ms: the previous mean squared value - update_ms: Whether to update the mean squared value (default: True) - - Returns: - The scaled gradient and the new ms value if update_ms is True, - the old ms value otherwise. - """ - - # Vectorize the gradients and compute the squared gradients. - grad_vec = tf.reshape(gradient, [-1, 1]) - - if update_ms: - ms = new_mean_squared(grad_vec, decay, ms) - - # Scale the current gradients by the RMS, squashed by the asinh function. - scaled_gradient = asinh(grad_vec / tf.sqrt(ms + 1e-16)) - - return scaled_gradient, ms - - -def accumulate_sparse_gradients(grad): - """Accumulates repeated indices of a sparse gradient update. - - Args: - grad: a tf.IndexedSlices gradient - - Returns: - grad_indices: unique indices - grad_values: gradient values corresponding to the indices - """ - - grad_indices, grad_segments = tf.unique(grad.indices) - grad_values = tf.unsorted_segment_sum(grad.values, grad_segments, - tf.shape(grad_indices)[0]) - return grad_indices, grad_values - - -def slice_tensor(dense_tensor, indices, head_dims): - """Extracts slices from a partially flattened dense tensor. - - indices is assumed to index into the first dimension of head_dims. - dense_tensor is assumed to have a shape [D_0, D_1, ...] such that - prod(head_dims) == D_0. This function will extract slices along the - first_dimension of head_dims. - - Example: - - Consider a tensor with shape head_dims = [100, 2] and a dense_tensor with - shape [200, 3]. Note that the first dimension of dense_tensor equals the - product of head_dims. This function will reshape dense_tensor such that - its shape is now [100, 2, 3] (i.e. the first dimension became head-dims) - and then slice it along the first dimension. After slicing, the slices will - have their initial dimensions flattened just as they were in dense_tensor - (e.g. if there are 4 indices, the return value will have a shape of [4, 3]). - - Args: - dense_tensor: a N-D dense tensor. Shape: [D_0, D_1, ...] - indices: a 1-D integer tensor. Shape: [K] - head_dims: True dimensions of the dense_tensor's first dimension. - - Returns: - Extracted slices. Shape [K, D_1, ...] - """ - - tail_dims = tf.shape(dense_tensor)[1:] - dense_tensor = tf.reshape(dense_tensor, - tf.concat([head_dims, tail_dims], 0)) - - slices = tf.gather(dense_tensor, indices) - # NOTE(siege): This kills the shape annotation. - return tf.reshape(slices, tf.concat([[-1], tail_dims], 0)) - - -def stack_tensor(slices, indices, dense_tensor, head_dims): - """Reconsititutes a tensor from slices and corresponding indices. - - This is an inverse operation to slice_tensor. Missing slices are set to 0. - - Args: - slices: a tensor. Shape [K, D_1, ...] - indices: a 1-D integer tensor. Shape: [K] - dense_tensor: the original tensor the slices were taken - from. Shape: [D_0, D_1, ...] - head_dims: True dimensions of the dense_tensor's first dimension. - - Returns: - Reconsituted tensor. Shape: [D_0, D_1, ...] - """ - # NOTE(siege): This cast shouldn't be necessary. - indices = tf.cast(indices, tf.int32) - - tail_dims = tf.shape(dense_tensor)[1:] - dense_shape = tf.concat([head_dims, tail_dims], 0) - - slices = tf.reshape(slices, tf.concat([[-1], dense_shape[1:]], 0)) - indices = tf.expand_dims(indices, -1) - - return tf.reshape(tf.scatter_nd(indices, slices, dense_shape), - tf.shape(dense_tensor)) - - -def update_slices(slices, indices, dense_tensor, head_dims): - """Reconstitutes a tensor from slices and corresponding indices. - - Like _stack_tensor, but instead of setting missing slices to 0, sets them to - what they were in the original tensor. The return value is reshaped to be - the same as dense_tensor. - - Args: - slices: a tensor. Shape [K, D_1, ...] - indices: a 1-D integer tensor. Shape: [K] - dense_tensor: the original tensor the slices were taken - from. Shape: [D_0, D_1, ...] - head_dims: True dimensions of the dense_tensor's first dimension. - - Returns: - Reconsituted tensor. Shape: [D_0, D_1, ...] - """ - # NOTE(siege): This cast shouldn't be necessary. - indices = tf.cast(indices, tf.int32) - - tail_dims = tf.shape(dense_tensor)[1:] - dense_shape = tf.concat([head_dims, tail_dims], 0) - - update_mask_vals = tf.fill(tf.shape(indices), 1) - reshaped_indices = tf.expand_dims(indices, -1) - update_mask = tf.equal( - tf.scatter_nd(reshaped_indices, update_mask_vals, head_dims[:1]), 1) - - reshaped_dense_slices = tf.reshape( - stack_tensor(slices, indices, dense_tensor, head_dims), dense_shape) - reshaped_dense_tensor = tf.reshape(dense_tensor, dense_shape) - - return tf.reshape( - tf.where(update_mask, reshaped_dense_slices, reshaped_dense_tensor), - tf.shape(dense_tensor)) diff --git a/research/learned_optimizer/problems/BUILD b/research/learned_optimizer/problems/BUILD deleted file mode 100644 index c70461882..000000000 --- a/research/learned_optimizer/problems/BUILD +++ /dev/null @@ -1,43 +0,0 @@ -package(default_visibility = ["//visibility:public"]) - -# Libraries -# ===== - -py_library( - name = "datasets", - srcs = ["datasets.py"], - deps = [ - ], -) - -py_library( - name = "model_adapter", - srcs = ["model_adapter.py"], - deps = [ - ":problem_generator", - ], -) - -py_library( - name = "problem_generator", - srcs = ["problem_generator.py"], - deps = [ - ":problem_spec", - ], -) - -py_library( - name = "problem_sets", - srcs = ["problem_sets.py"], - deps = [ - ":datasets", - ":model_adapter", - ":problem_generator", - ], -) - -py_library( - name = "problem_spec", - srcs = ["problem_spec.py"], - deps = [], -) diff --git a/research/learned_optimizer/problems/datasets.py b/research/learned_optimizer/problems/datasets.py deleted file mode 100644 index edf3df653..000000000 --- a/research/learned_optimizer/problems/datasets.py +++ /dev/null @@ -1,218 +0,0 @@ -# Copyright 2017 Google, Inc. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -"""Functions to generate or load datasets for supervised learning.""" - -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -from collections import namedtuple - -import numpy as np -from sklearn.datasets import make_classification - -MAX_SEED = 4294967295 - - -class Dataset(namedtuple("Dataset", "data labels")): - """Helper class for managing a supervised learning dataset. - - Args: - data: an array of type float32 with N samples, each of which is the set - of features for that sample. (Shape (N, D_i), where N is the number of - samples and D_i is the number of features for that sample.) - labels: an array of type int32 or int64 with N elements, indicating the - class label for the corresponding set of features in data. - """ - # Since this is an immutable object, we don't need to reserve slots. - __slots__ = () - - @property - def size(self): - """Dataset size (number of samples).""" - return len(self.data) - - def batch_indices(self, num_batches, batch_size): - """Creates indices of shuffled minibatches. - - Args: - num_batches: the number of batches to generate - batch_size: the size of each batch - - Returns: - batch_indices: a list of minibatch indices, arranged so that the dataset - is randomly shuffled. - - Raises: - ValueError: if the data and labels have different lengths - """ - if len(self.data) != len(self.labels): - raise ValueError("Labels and data must have the same number of samples.") - - batch_indices = [] - - # Follows logic in mnist.py to ensure we cover the entire dataset. - index_in_epoch = 0 - dataset_size = len(self.data) - dataset_indices = np.arange(dataset_size) - np.random.shuffle(dataset_indices) - - for _ in range(num_batches): - start = index_in_epoch - index_in_epoch += batch_size - if index_in_epoch > dataset_size: - - # Finished epoch, reshuffle. - np.random.shuffle(dataset_indices) - - # Start next epoch. - start = 0 - index_in_epoch = batch_size - - end = index_in_epoch - batch_indices.append(dataset_indices[start:end].tolist()) - - return batch_indices - - -def noisy_parity_class(n_samples, - n_classes=2, - n_context_ids=5, - noise_prob=0.25, - random_seed=None): - """Returns a randomly generated sparse-to-sparse dataset. - - The label is a parity class of a set of context classes. - - Args: - n_samples: number of samples (data points) - n_classes: number of class labels (default: 2) - n_context_ids: how many classes to take the parity of (default: 5). - noise_prob: how often to corrupt the label (default: 0.25) - random_seed: seed used for drawing the random data (default: None) - Returns: - dataset: A Dataset namedtuple containing the generated data and labels - """ - np.random.seed(random_seed) - x = np.random.randint(0, n_classes, [n_samples, n_context_ids]) - noise = np.random.binomial(1, noise_prob, [n_samples]) - y = (np.sum(x, 1) + noise) % n_classes - return Dataset(x.astype("float32"), y.astype("int32")) - - -def random(n_features, n_samples, n_classes=2, sep=1.0, random_seed=None): - """Returns a randomly generated classification dataset. - - Args: - n_features: number of features (dependent variables) - n_samples: number of samples (data points) - n_classes: number of class labels (default: 2) - sep: separation of the two classes, a higher value corresponds to - an easier classification problem (default: 1.0) - random_seed: seed used for drawing the random data (default: None) - - Returns: - dataset: A Dataset namedtuple containing the generated data and labels - """ - # Generate the problem data. - x, y = make_classification(n_samples=n_samples, - n_features=n_features, - n_informative=n_features, - n_redundant=0, - n_classes=n_classes, - class_sep=sep, - random_state=random_seed) - - return Dataset(x.astype("float32"), y.astype("int32")) - - -def random_binary(n_features, n_samples, random_seed=None): - """Returns a randomly generated dataset of binary values. - - Args: - n_features: number of features (dependent variables) - n_samples: number of samples (data points) - random_seed: seed used for drawing the random data (default: None) - - Returns: - dataset: A Dataset namedtuple containing the generated data and labels - """ - random_seed = (np.random.randint(MAX_SEED) if random_seed is None - else random_seed) - np.random.seed(random_seed) - - x = np.random.randint(2, size=(n_samples, n_features)) - y = np.zeros((n_samples, 1)) - - return Dataset(x.astype("float32"), y.astype("int32")) - - -def random_symmetric(n_features, n_samples, random_seed=None): - """Returns a randomly generated dataset of values and their negatives. - - Args: - n_features: number of features (dependent variables) - n_samples: number of samples (data points) - random_seed: seed used for drawing the random data (default: None) - - Returns: - dataset: A Dataset namedtuple containing the generated data and labels - """ - random_seed = (np.random.randint(MAX_SEED) if random_seed is None - else random_seed) - np.random.seed(random_seed) - - x1 = np.random.normal(size=(int(n_samples/2), n_features)) - x = np.concatenate((x1, -x1), axis=0) - y = np.zeros((n_samples, 1)) - - return Dataset(x.astype("float32"), y.astype("int32")) - - -def random_mlp(n_features, n_samples, random_seed=None, n_layers=6, width=20): - """Returns a generated output of an MLP with random weights. - - Args: - n_features: number of features (dependent variables) - n_samples: number of samples (data points) - random_seed: seed used for drawing the random data (default: None) - n_layers: number of layers in random MLP - width: width of the layers in random MLP - - Returns: - dataset: A Dataset namedtuple containing the generated data and labels - """ - random_seed = (np.random.randint(MAX_SEED) if random_seed is None - else random_seed) - np.random.seed(random_seed) - - x = np.random.normal(size=(n_samples, n_features)) - y = x - n_in = n_features - scale_factor = np.sqrt(2.) / np.sqrt(n_features) - for _ in range(n_layers): - weights = np.random.normal(size=(n_in, width)) * scale_factor - y = np.dot(y, weights).clip(min=0) - n_in = width - - y = y[:, 0] - y[y > 0] = 1 - - return Dataset(x.astype("float32"), y.astype("int32")) - - -EMPTY_DATASET = Dataset(np.array([], dtype="float32"), - np.array([], dtype="int32")) diff --git a/research/learned_optimizer/problems/model_adapter.py b/research/learned_optimizer/problems/model_adapter.py deleted file mode 100644 index 845599236..000000000 --- a/research/learned_optimizer/problems/model_adapter.py +++ /dev/null @@ -1,190 +0,0 @@ -# Copyright 2017 Google, Inc. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -"""Implementation of the ModelAdapter class.""" - -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import mock -import tensorflow as tf - -from learned_optimizer.problems import problem_generator as pg - - -class ModelAdapter(pg.Problem): - """Adapts Tensorflow models/graphs into a form suitable for meta-training. - - This class adapts an existing TensorFlow graph into a form suitable for - meta-training a learned optimizer. - """ - - def __init__(self, make_loss_and_init_fn): - """Wraps a model in the Problem interface. - - make_loss_and_init argument is a callable that returns a tuple of - two other callables as follows. - - The first will construct most of the graph and return the problem loss. It - is essential that this graph contains the totality of the model's variables, - but none of its queues. - - The second will return construct the model initialization graph given a list - of parameters and return a callable that is passed an instance of - tf.Session, and should initialize the models' parameters. - - An argument value function would look like this: - - ```python - def make_loss_and_init_fn(): - inputs = queued_reader() - - def make_loss(): - return create_model_with_variables(inputs) - - def make_init_fn(parameters): - saver = tf.Saver(parameters) - def init_fn(sess): - sess.restore(sess, ...) - return init_fn - - return make_loss, make_init_fn - ``` - - Args: - make_loss_and_init_fn: a callable, as described aboce - """ - make_loss_fn, make_init_fn = make_loss_and_init_fn() - - self.make_loss_fn = make_loss_fn - self.parameters, self.constants = _get_variables(make_loss_fn) - - if make_init_fn is not None: - init_fn = make_init_fn(self.parameters + self.constants) - else: - init_op = tf.initialize_variables(self.parameters + self.constants) - init_fn = lambda sess: sess.run(init_op) - - tf.logging.info("ModelAdapter parameters: %s", - [op.name for op in self.parameters]) - tf.logging.info("ModelAdapter constants: %s", - [op.name for op in self.constants]) - - super(ModelAdapter, self).__init__( - [], random_seed=None, noise_stdev=0.0, init_fn=init_fn) - - def init_tensors(self, seed=None): - """Returns a list of tensors with the given shape.""" - return self.parameters - - def init_variables(self, seed=None): - """Returns a list of variables with the given shape.""" - # NOTE(siege): This is awkward, as these are not set as trainable. - return self.parameters - - def objective(self, parameters, data=None, labels=None): - """Computes the objective given a list of parameters. - - Args: - parameters: The parameters to optimize (as a list of tensors) - data: An optional batch of data for calculating objectives - labels: An optional batch of corresponding labels - - Returns: - A scalar tensor representing the objective value - """ - # We need to set up a mapping based on the original parameter names, because - # the parameters passed can be arbitrary tensors. - parameter_mapping = { - old_p.name: p - for old_p, p in zip(self.parameters, parameters) - } - - with tf.variable_scope(tf.get_variable_scope(), reuse=True): - return _make_with_custom_variables(self.make_loss_fn, parameter_mapping) - - -def _get_variables(func): - """Calls func, returning any variables created. - - The created variables are modified to not be trainable, and are placed into - the LOCAL_VARIABLES collection. - - Args: - func: Function to be called. - - Returns: - A tuple (variables, constants) where the first element is a list of - trainable variables and the second is the non-trainable variables. - """ - variables = [] - constants = [] - - # We need to create these variables like normal, so grab the original - # constructor before we mock it. - original_init = tf.Variable.__init__ - - def custom_init(self, *args, **kwargs): - trainable = kwargs["trainable"] - kwargs["trainable"] = False - # Making these variables local keeps them out of the optimizer's checkpoints - # somehow. - kwargs["collections"] = [tf.GraphKeys.LOCAL_VARIABLES] - original_init(self, *args, **kwargs) - if trainable: - variables.append(self) - else: - constants.append(self) - - # This name-scope is just a nicety for TensorBoard. - with tf.name_scope("unused_graph"): - with mock.patch.object(tf.Variable, "__init__", custom_init): - func() - - return variables, constants - - -def _make_with_custom_variables(func, variable_mapping): - """Calls func and replaces the value of some variables created in it. - - Args: - func: Function to be called. - variable_mapping: A mapping of variable name to the replacement tensor or - tf.Variable. - - Returns: - The return value of func is returned. - """ - original_value = tf.Variable.value - - def custom_value(self): - if self.name in variable_mapping: - replacement = variable_mapping[self.name] - tf.logging.info("Replaced %s with %s" % (self.name, replacement)) - - # value() method needs to return a tensor, we need to call value on it. - # This has to be done manually like this otherwise we'll get an infinite - # loop. - if isinstance(replacement, tf.Variable): - replacement = original_value(replacement) - - return replacement - else: - return original_value(self) - - with mock.patch.object(tf.Variable, "value", custom_value): - with mock.patch.object(tf.Variable, "_AsTensor", custom_value): - return func() diff --git a/research/learned_optimizer/problems/problem_generator.py b/research/learned_optimizer/problems/problem_generator.py deleted file mode 100644 index abe1008fa..000000000 --- a/research/learned_optimizer/problems/problem_generator.py +++ /dev/null @@ -1,1016 +0,0 @@ -# Copyright 2017 Google, Inc. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -"""Generates toy optimization problems. - -This module contains a base class, Problem, that defines a minimal interface -for optimization problems, and a few specific problem types that subclass it. - -Test functions for optimization: http://www.sfu.ca/~ssurjano/optimization.html -""" - -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import numpy as np -import tensorflow as tf - -from learned_optimizer.problems import problem_spec as prob_spec - -tf.app.flags.DEFINE_float("l2_reg_scale", 1e-3, - """Scaling factor for parameter value regularization - in softmax classifier problems.""") -FLAGS = tf.app.flags.FLAGS - -EPSILON = 1e-6 -MAX_SEED = 4294967295 -PARAMETER_SCOPE = "parameters" - -_Spec = prob_spec.Spec - - -class Problem(object): - """Base class for optimization problems. - - This defines an interface for optimization problems, including objective and - gradients functions and a feed_generator function that yields data to pass to - feed_dict in tensorflow. - - Subclasses of Problem must (at the minimum) override the objective method, - which computes the objective/loss/cost to minimize, and specify the desired - shape of the parameters in a list in the param_shapes attribute. - """ - - def __init__(self, param_shapes, random_seed, noise_stdev, init_fn=None): - """Initializes a global random seed for the problem. - - Args: - param_shapes: A list of tuples defining the expected shapes of the - parameters for this problem - random_seed: Either an integer (or None, in which case the seed is - randomly drawn) - noise_stdev: Strength (standard deviation) of added gradient noise - init_fn: A function taking a tf.Session object that is used to - initialize the problem's variables. - - Raises: - ValueError: If the random_seed is not an integer and not None - """ - if random_seed is not None and not isinstance(random_seed, int): - raise ValueError("random_seed must be an integer or None") - - # Pick a random seed. - self.random_seed = (np.random.randint(MAX_SEED) if random_seed is None - else random_seed) - - # Store the noise level. - self.noise_stdev = noise_stdev - - # Set the random seed to ensure any random data in the problem is the same. - np.random.seed(self.random_seed) - - # Store the parameter shapes. - self.param_shapes = param_shapes - - if init_fn is not None: - self.init_fn = init_fn - else: - self.init_fn = lambda _: None - - def init_tensors(self, seed=None): - """Returns a list of tensors with the given shape.""" - return [tf.random_normal(shape, seed=seed) for shape in self.param_shapes] - - def init_variables(self, seed=None): - """Returns a list of variables with the given shape.""" - with tf.variable_scope(PARAMETER_SCOPE): - params = [tf.Variable(param) for param in self.init_tensors(seed)] - return params - - def objective(self, parameters, data=None, labels=None): - """Computes the objective given a list of parameters. - - Args: - parameters: The parameters to optimize (as a list of tensors) - data: An optional batch of data for calculating objectives - labels: An optional batch of corresponding labels - - Returns: - A scalar tensor representing the objective value - """ - raise NotImplementedError - - def gradients(self, objective, parameters): - """Compute gradients of the objective with respect to the parameters. - - Args: - objective: The objective op (e.g. output of self.objective()) - parameters: A list of tensors (the parameters to optimize) - - Returns: - A list of tensors representing the gradient for each parameter, - returned in the same order as the given list - """ - grads = tf.gradients(objective, list(parameters)) - noisy_grads = [] - - for grad in grads: - if isinstance(grad, tf.IndexedSlices): - noise = self.noise_stdev * tf.random_normal(tf.shape(grad.values)) - new_grad = tf.IndexedSlices(grad.values + noise, grad.indices) - else: - new_grad = grad + self.noise_stdev * tf.random_normal(grad.get_shape()) - noisy_grads.append(new_grad) - - return noisy_grads - - -class Quadratic(Problem): - """Optimizes a random quadratic function. - - The objective is: f(x) = (1/2) ||Wx - y||_2^2 - where W is a random Gaussian matrix and y is a random Gaussian vector. - """ - - def __init__(self, ndim, random_seed=None, noise_stdev=0.0): - """Initializes a random quadratic problem.""" - param_shapes = [(ndim, 1)] - super(Quadratic, self).__init__(param_shapes, random_seed, noise_stdev) - - # Generate a random problem instance. - self.w = np.random.randn(ndim, ndim).astype("float32") - self.y = np.random.randn(ndim, 1).astype("float32") - - def objective(self, params, data=None, labels=None): - """Quadratic objective (see base class for details).""" - return tf.nn.l2_loss(tf.matmul(self.w, params[0]) - self.y) - - -class SoftmaxClassifier(Problem): - """Helper functions for supervised softmax classification problems.""" - - def init_tensors(self, seed=None): - """Returns a list of tensors with the given shape.""" - return [tf.random_normal(shape, seed=seed) * 1.2 / np.sqrt(shape[0]) - for shape in self.param_shapes] - - def inference(self, params, data): - """Computes logits given parameters and data. - - Args: - params: List of parameter tensors or variables - data: Batch of features with samples along the first dimension - - Returns: - logits: Un-normalized logits with shape (num_samples, num_classes) - """ - raise NotImplementedError - - def objective(self, params, data, labels): - """Computes the softmax cross entropy. - - Args: - params: List of parameter tensors or variables - data: Batch of features with samples along the first dimension - labels: Vector of labels with the same number of samples as the data - - Returns: - loss: Softmax cross entropy loss averaged over the samples in the batch - - Raises: - ValueError: If the objective is to be computed over >2 classes, because - this operation is broken in tensorflow at the moment. - """ - # Forward pass. - logits = self.inference(params, data) - - # Compute the loss. - l2reg = [tf.reduce_sum(param ** 2) for param in params] - if int(logits.get_shape()[1]) == 2: - labels = tf.cast(labels, tf.float32) - losses = tf.nn.sigmoid_cross_entropy_with_logits( - labels=labels, logits=logits[:, 0]) - else: - raise ValueError("Unable to compute softmax cross entropy for more than" - " 2 classes.") - - return tf.reduce_mean(losses) + tf.reduce_mean(l2reg) * FLAGS.l2_reg_scale - - def argmax(self, logits): - """Samples the most likely class label given the logits. - - Args: - logits: Un-normalized logits with shape (num_samples, num_classes) - - Returns: - predictions: Predicted class labels, has shape (num_samples,) - """ - return tf.cast(tf.argmax(tf.nn.softmax(logits), 1), tf.int32) - - def accuracy(self, params, data, labels): - """Computes the accuracy (fraction of correct classifications). - - Args: - params: List of parameter tensors or variables - data: Batch of features with samples along the first dimension - labels: Vector of labels with the same number of samples as the data - - Returns: - accuracy: Fraction of correct classifications across the batch - """ - predictions = self.argmax(self.inference(params, data)) - return tf.contrib.metrics.accuracy(predictions, tf.cast(labels, tf.int32)) - - -class SoftmaxRegression(SoftmaxClassifier): - """Builds a softmax regression problem.""" - - def __init__(self, n_features, n_classes, activation=tf.identity, - random_seed=None, noise_stdev=0.0): - self.activation = activation - self.n_features = n_features - param_shapes = [(n_features, n_classes), (n_classes,)] - super(SoftmaxRegression, self).__init__(param_shapes, - random_seed, - noise_stdev) - - def inference(self, params, data): - features = tf.reshape(data, (-1, self.n_features)) - return tf.matmul(features, params[0]) + params[1] - - -class SparseSoftmaxRegression(SoftmaxClassifier): - """Builds a sparse input softmax regression problem.""" - - def __init__(self, - n_features, - n_classes, - activation=tf.identity, - random_seed=None, - noise_stdev=0.0): - self.activation = activation - self.n_features = n_features - param_shapes = [(n_classes, n_features), (n_features, n_classes), ( - n_classes,)] - super(SparseSoftmaxRegression, self).__init__(param_shapes, random_seed, - noise_stdev) - - def inference(self, params, data): - all_embeddings, softmax_weights, softmax_bias = params - embeddings = tf.nn.embedding_lookup(all_embeddings, tf.cast(data, tf.int32)) - embeddings = tf.reduce_sum(embeddings, 1) - return tf.matmul(embeddings, softmax_weights) + softmax_bias - - -class OneHotSparseSoftmaxRegression(SoftmaxClassifier): - """Builds a sparse input softmax regression problem. - - This is identical to SparseSoftmaxRegression, but without using embedding - ops. - """ - - def __init__(self, - n_features, - n_classes, - activation=tf.identity, - random_seed=None, - noise_stdev=0.0): - self.activation = activation - self.n_features = n_features - self.n_classes = n_classes - param_shapes = [(n_classes, n_features), (n_features, n_classes), ( - n_classes,)] - super(OneHotSparseSoftmaxRegression, self).__init__(param_shapes, - random_seed, - noise_stdev) - - def inference(self, params, data): - all_embeddings, softmax_weights, softmax_bias = params - num_ids = tf.shape(data)[1] - one_hot_embeddings = tf.one_hot(tf.cast(data, tf.int32), self.n_classes) - one_hot_embeddings = tf.reshape(one_hot_embeddings, [-1, self.n_classes]) - embeddings = tf.matmul(one_hot_embeddings, all_embeddings) - embeddings = tf.reshape(embeddings, [-1, num_ids, self.n_features]) - embeddings = tf.reduce_sum(embeddings, 1) - return tf.matmul(embeddings, softmax_weights) + softmax_bias - - -class FullyConnected(SoftmaxClassifier): - """Builds a multi-layer perceptron classifier.""" - - def __init__(self, n_features, n_classes, hidden_sizes=(32, 64), - activation=tf.nn.sigmoid, random_seed=None, noise_stdev=0.0): - """Initializes an multi-layer perceptron classification problem.""" - # Store the number of features and activation function. - self.n_features = n_features - self.activation = activation - - # Define the network as a list of weight + bias shapes for each layer. - param_shapes = [] - for ix, sz in enumerate(hidden_sizes + (n_classes,)): - - # The previous layer"s size (n_features if input). - prev_size = n_features if ix == 0 else hidden_sizes[ix - 1] - - # Weight shape for this layer. - param_shapes.append((prev_size, sz)) - - # Bias shape for this layer. - param_shapes.append((sz,)) - - super(FullyConnected, self).__init__(param_shapes, random_seed, noise_stdev) - - def inference(self, params, data): - # Flatten the features into a vector. - features = tf.reshape(data, (-1, self.n_features)) - - # Pass the data through the network. - preactivations = tf.matmul(features, params[0]) + params[1] - - for layer in range(2, len(self.param_shapes), 2): - net = self.activation(preactivations) - preactivations = tf.matmul(net, params[layer]) + params[layer + 1] - - return preactivations - - def accuracy(self, params, data, labels): - """Computes the accuracy (fraction of correct classifications). - - Args: - params: List of parameter tensors or variables - data: Batch of features with samples along the first dimension - labels: Vector of labels with the same number of samples as the data - - Returns: - accuracy: Fraction of correct classifications across the batch - """ - predictions = self.argmax(self.activation(self.inference(params, data))) - return tf.contrib.metrics.accuracy(predictions, tf.cast(labels, tf.int32)) - - -class ConvNet(SoftmaxClassifier): - """Builds an N-layer convnet for image classification.""" - - def __init__(self, - image_shape, - n_classes, - filter_list, - activation=tf.nn.relu, - random_seed=None, - noise_stdev=0.0): - # Number of channels, number of pixels in x- and y- dimensions. - n_channels, px, py = image_shape - - # Store the activation. - self.activation = activation - - param_shapes = [] - input_size = n_channels - for fltr in filter_list: - # Add conv2d filters. - param_shapes.append((fltr[0], fltr[1], input_size, fltr[2])) - input_size = fltr[2] - - # Number of units in the final (dense) layer. - self.affine_size = input_size * px * py - - param_shapes.append((self.affine_size, n_classes)) # affine weights - param_shapes.append((n_classes,)) # affine bias - - super(ConvNet, self).__init__(param_shapes, random_seed, noise_stdev) - - def init_tensors(self, seed=None): - """Returns a list of tensors with the given shape.""" - return [tf.random_normal(shape, mean=0., stddev=0.01, seed=seed) - for shape in self.param_shapes] - - def inference(self, params, data): - - # Unpack. - w_conv_list = params[:-2] - output_w, output_b = params[-2:] - - conv_input = data - for w_conv in w_conv_list: - layer = tf.nn.conv2d(conv_input, w_conv, strides=[1] * 4, padding="SAME") - output = self.activation(layer) - conv_input = output - - # Flatten. - flattened = tf.reshape(conv_input, (-1, self.affine_size)) - - # Fully connected layer. - return tf.matmul(flattened, output_w) + output_b - - -class Bowl(Problem): - """A 2D quadratic bowl.""" - - def __init__(self, condition_number, angle=0.0, - random_seed=None, noise_stdev=0.0): - assert condition_number > 0, "Condition number must be positive." - - # Define parameter shapes. - param_shapes = [(2, 1)] - super(Bowl, self).__init__(param_shapes, random_seed, noise_stdev) - - self.condition_number = condition_number - self.angle = angle - self._build_matrix(condition_number, angle) - - def _build_matrix(self, condition_number, angle): - """Builds the Hessian matrix.""" - hessian = np.array([[condition_number, 0.], [0., 1.]], dtype="float32") - - # Build the rotation matrix. - rotation_matrix = np.array([ - [np.cos(angle), -np.sin(angle)], - [np.sin(angle), np.cos(angle)] - ]) - - # The objective is 0.5 * || Ax ||_2^2 - # where the data matrix (A) is: sqrt(Hessian).dot(rotation_matrix). - self.matrix = np.sqrt(hessian).dot(rotation_matrix) - - def objective(self, params, data=None, labels=None): - mtx = tf.constant(self.matrix, dtype=tf.float32) - return tf.nn.l2_loss(tf.matmul(mtx, params[0])) - - def surface(self, xlim=5, ylim=5, n=50): - xm, ym = _mesh(xlim, ylim, n) - pts = np.vstack([xm.ravel(), ym.ravel()]) - zm = 0.5 * np.linalg.norm(self.matrix.dot(pts), axis=0) ** 2 - return xm, ym, zm.reshape(n, n) - - -class Problem2D(Problem): - - def __init__(self, random_seed=None, noise_stdev=0.0): - param_shapes = [(2,)] - super(Problem2D, self).__init__(param_shapes, random_seed, noise_stdev) - - def surface(self, n=50, xlim=5, ylim=5): - """Computes the objective surface over a 2d mesh.""" - - # Create a mesh over the given coordinate ranges. - xm, ym = _mesh(xlim, ylim, n) - - with tf.Graph().as_default(), tf.Session() as sess: - - # Ops to compute the objective at every (x, y) point. - x = tf.placeholder(tf.float32, shape=xm.shape) - y = tf.placeholder(tf.float32, shape=ym.shape) - obj = self.objective([[x, y]]) - - # Run the computation. - zm = sess.run(obj, feed_dict={x: xm, y: ym}) - - return xm, ym, zm - - -class Rosenbrock(Problem2D): - """See https://en.wikipedia.org/wiki/Rosenbrock_function. - - This function has a single global minima at [1, 1] - The objective value at this point is zero. - """ - - def init_tensors(self, seed=None): - """Returns a list of tensors with the given shape.""" - return [tf.random_uniform(shape, minval=-5., maxval=10., seed=seed) - for shape in self.param_shapes] - - def objective(self, params, data=None, labels=None): - x, y = tf.split(params[0], 2, axis=0) - obj = (1 - x)**2 + 100 * (y - x**2)**2 - return tf.squeeze(obj) - - -def make_rosenbrock_loss_and_init(device=None): - """A variable-backed version of Rosenbrock problem. - - See the Rosenbrock class for details. - - Args: - device: Where to place the ops of this problem. - - Returns: - A tuple of two callables, first of which creates the loss and the second - creates the parameter initializer function. - """ - def make_rosenbrock_loss(): - with tf.name_scope("optimizee"): - with tf.device(device): - x = tf.get_variable("x", [1]) - y = tf.get_variable("y", [1]) - c = tf.get_variable( - "c", [1], - initializer=tf.constant_initializer(100.0), - trainable=False) - obj = (1 - x)**2 + c * (y - x**2)**2 - return tf.squeeze(obj) - - def make_init_fn(parameters): - with tf.device(device): - init_op = tf.variables_initializer(parameters) - def init_fn(sess): - tf.logging.info("Initializing model parameters.") - sess.run(init_op) - return init_fn - - return make_rosenbrock_loss, make_init_fn - - -class Saddle(Problem2D): - """Loss surface around a saddle point.""" - - def objective(self, params, data=None, labels=None): - x, y = tf.split(params[0], 2, axis=0) - obj = x ** 2 - y ** 2 - return tf.squeeze(obj) - - -class LogSumExp(Problem2D): - """2D function defined by the log of the sum of exponentials.""" - - def objective(self, params, data=None, labels=None): - x, y = tf.split(params[0], 2, axis=0) - obj = tf.log(tf.exp(x + 3. * y - 0.1) + - tf.exp(x - 3. * y - 0.1) + - tf.exp(-x - 0.1) + 1.0) - return tf.squeeze(obj) - - -class Ackley(Problem2D): - """Ackley's function (contains many local minima).""" - - def init_tensors(self, seed=None): - """Returns a list of tensors with the given shape.""" - return [tf.random_uniform(shape, minval=-32.768, maxval=32.768, seed=seed) - for shape in self.param_shapes] - - def objective(self, params, data=None, labels=None): - x, y = tf.split(params[0], 2, axis=0) - obj = (-20 * tf.exp(-0.2 * tf.sqrt(0.5 * (x ** 2 + y ** 2))) - - tf.exp(0.5 * (tf.cos(2 * np.pi * x) + tf.cos(2 * np.pi * y))) + - tf.exp(1.0) + 20.) - return tf.squeeze(obj) - - -class Beale(Problem2D): - """Beale function (a multimodal function with sharp peaks).""" - - def init_tensors(self, seed=None): - """Returns a list of tensors with the given shape.""" - return [tf.random_uniform(shape, minval=-4.5, maxval=4.5, seed=seed) - for shape in self.param_shapes] - - def objective(self, params, data=None, labels=None): - x, y = tf.split(params[0], 2, axis=0) - obj = ((1.5 - x + x * y) ** 2 + - (2.25 - x + x * y ** 2) ** 2 + - (2.625 - x + x * y ** 3) ** 2) - return tf.squeeze(obj) - - -class Booth(Problem2D): - """Booth's function (has a long valley along one dimension).""" - - def init_tensors(self, seed=None): - """Returns a list of tensors with the given shape.""" - return [tf.random_uniform(shape, minval=-10., maxval=10., seed=seed) - for shape in self.param_shapes] - - def objective(self, params, data=None, labels=None): - x, y = tf.split(params[0], 2, axis=0) - obj = (x + 2 * y - 7) ** 2 + (2 * x + y - 5) ** 2 - return tf.squeeze(obj) - - -class StyblinskiTang(Problem2D): - """Styblinski-Tang function (a bumpy function in two dimensions).""" - - def init_tensors(self, seed=None): - """Returns a list of tensors with the given shape.""" - return [tf.random_uniform(shape, minval=-5., maxval=5., seed=seed) - for shape in self.param_shapes] - - def objective(self, params, data=None, labels=None): - params = tf.split(params[0], 2, axis=0) - obj = 0.5 * tf.reduce_sum([x ** 4 - 16 * x ** 2 + 5 * x - for x in params], 0) + 80. - return tf.squeeze(obj) - - -class Matyas(Problem2D): - """Matyas function (a function with a single global minimum in a valley).""" - - def init_tensors(self, seed=None): - """Returns a list of tensors with the given shape.""" - return [tf.random_uniform(shape, minval=-10, maxval=10, seed=seed) - for shape in self.param_shapes] - - def objective(self, params, data=None, labels=None): - x, y = tf.split(params[0], 2, axis=0) - obj = 0.26 * (x ** 2 + y ** 2) - 0.48 * x * y - return tf.squeeze(obj) - - -class Branin(Problem2D): - """Branin function (a function with three global minima).""" - - def init_tensors(self, seed=None): - """Returns a list of tensors with the given shape.""" - x1 = tf.random_uniform((1,), minval=-5., maxval=10., - seed=seed) - x2 = tf.random_uniform((1,), minval=0., maxval=15., - seed=seed) - return [tf.concat([x1, x2], 0)] - - def objective(self, params, data=None, labels=None): - x, y = tf.split(params[0], 2, axis=0) - - # Define some constants. - a = 1. - b = 5.1 / (4. * np.pi ** 2) - c = 5 / np.pi - r = 6. - s = 10. - t = 1 / (8. * np.pi) - - # Evaluate the function. - obj = a * (y - b * x ** 2 + c * x - r) ** 2 + s * (1 - t) * tf.cos(x) + s - return tf.squeeze(obj) - - -class Michalewicz(Problem2D): - """Michalewicz function (has steep ridges and valleys).""" - - def init_tensors(self, seed=None): - """Returns a list of tensors with the given shape.""" - return [tf.random_uniform(shape, minval=0., maxval=np.pi, seed=seed) - for shape in self.param_shapes] - - def objective(self, params, data=None, labels=None): - x, y = tf.split(params[0], 2, axis=0) - m = 5 # Defines how steep the ridges are (larger m => steeper ridges). - obj = 2. - (tf.sin(x) * tf.sin(x ** 2 / np.pi) ** (2 * m) + - tf.sin(y) * tf.sin(2 * y ** 2 / np.pi) ** (2 * m)) - return tf.squeeze(obj) - - -class Rescale(Problem): - """Takes an existing problem, and rescales all the parameters.""" - - def __init__(self, problem_spec, scale=10., noise_stdev=0.0): - self.problem = problem_spec.build() - self.param_shapes = self.problem.param_shapes - self.scale = scale - - super(Rescale, self).__init__(self.param_shapes, random_seed=None, - noise_stdev=noise_stdev) - - def init_tensors(self, seed=None): - params_raw = self.problem.init_tensors(seed=seed) - params = [t * self.scale for t in params_raw] - return params - - def objective(self, params, data=None, labels=None): - params_raw = [t/self.scale for t in params] - - problem_obj = self.problem.objective(params_raw, data, labels) - return problem_obj - - -class SumTask(Problem): - """Takes a list of problems and modifies the objective to be their sum.""" - - def __init__(self, problem_specs, noise_stdev=0.0): - self.problems = [ps.build() for ps in problem_specs] - self.param_shapes = [] - for prob in self.problems: - self.param_shapes += prob.param_shapes - - super(SumTask, self).__init__(self.param_shapes, random_seed=None, - noise_stdev=noise_stdev) - - def init_tensors(self, seed=None): - tensors = [] - for prob in self.problems: - tensors += prob.init_tensors(seed=seed) - return tensors - - def objective(self, params, data=None, labels=None): - obj = 0. - index = 0 - for prob in self.problems: - num_params = len(prob.param_shapes) - obj += prob.objective(params[index:index + num_params]) - index += num_params - return obj - - -class IsotropicQuadratic(Problem): - """An isotropic quadratic problem.""" - - def objective(self, params, data=None, labels=None): - return sum([tf.reduce_sum(param ** 2) for param in params]) - - -class Norm(Problem): - """Takes an existing problem and modifies the objective to be its N-norm.""" - - def __init__(self, ndim, random_seed=None, noise_stdev=0.0, norm_power=2.): - param_shapes = [(ndim, 1)] - super(Norm, self).__init__(param_shapes, random_seed, noise_stdev) - - # Generate a random problem instance. - self.w = np.random.randn(ndim, ndim).astype("float32") - self.y = np.random.randn(ndim, 1).astype("float32") - self.norm_power = norm_power - - def objective(self, params, data=None, labels=None): - diff = tf.matmul(self.w, params[0]) - self.y - exp = 1. / self.norm_power - loss = tf.reduce_sum((tf.abs(diff) + EPSILON) ** self.norm_power) ** exp - return loss - - -class LogObjective(Problem): - """Takes an existing problem and modifies the objective to be its log.""" - - def __init__(self, problem_spec): - self.problem = problem_spec.build() - self.param_shapes = self.problem.param_shapes - - super(LogObjective, self).__init__(self.param_shapes, - random_seed=None, - noise_stdev=0.0) - - def objective(self, params, data=None, labels=None): - problem_obj = self.problem.objective(params, data, labels) - return tf.log(problem_obj + EPSILON) - tf.log(EPSILON) - - -class SparseProblem(Problem): - """Takes a problem and sets gradients to 0 with the given probability.""" - - def __init__(self, - problem_spec, - zero_probability=0.99, - random_seed=None, - noise_stdev=0.0): - self.problem = problem_spec.build() - self.param_shapes = self.problem.param_shapes - self.zero_prob = zero_probability - - super(SparseProblem, self).__init__(self.param_shapes, - random_seed=random_seed, - noise_stdev=noise_stdev) - - def objective(self, parameters, data=None, labels=None): - return self.problem.objective(parameters, data, labels) - - def gradients(self, objective, parameters): - grads = tf.gradients(objective, list(parameters)) - - new_grads = [] - for grad in grads: - mask = tf.greater(self.zero_prob, tf.random_uniform(grad.get_shape())) - zero_grad = tf.zeros_like(grad, dtype=tf.float32) - noisy_grad = grad + self.noise_stdev * tf.random_normal(grad.get_shape()) - new_grads.append(tf.where(mask, zero_grad, noisy_grad)) - return new_grads - - -class DependencyChain(Problem): - """A problem in which parameters must be optimized in order. - - A sequence of parameters which all need to be brought to 0, but where each - parameter in the sequence can't be brought to 0 until the preceding one - has been. This should take a long time to optimize, with steady - (or accelerating) progress throughout the entire process. - """ - - def __init__(self, ndim, random_seed=None, noise_stdev=0.): - param_shapes = [(ndim + 1,)] - self.ndim = ndim - super(DependencyChain, self).__init__( - param_shapes, random_seed, noise_stdev) - - def objective(self, params, data=None, labels=None): - terms = params[0][0]**2 + params[0][1:]**2 / (params[0][:-1]**2 + EPSILON) - return tf.reduce_sum(terms) - - -class MinMaxWell(Problem): - """Problem with global min when both the min and max (absolute) params are 1. - - The gradient for all but two parameters (the min and max) is zero. This - should therefore encourage the optimizer to behave sensible even when - parameters have zero gradients, as is common eg for some deep neural nets. - """ - - def __init__(self, ndim, random_seed=None, noise_stdev=0.): - param_shapes = [(ndim,)] - self.ndim = ndim - super(MinMaxWell, self).__init__(param_shapes, random_seed, noise_stdev) - - def objective(self, params, data=None, labels=None): - params_sqr = params[0]**2 - min_sqr = tf.reduce_min(params_sqr) - max_sqr = tf.reduce_max(params_sqr) - epsilon = 1e-12 - - return max_sqr + 1./min_sqr - 2. + epsilon - - -class OutwardSnake(Problem): - """A winding path out to infinity. - - Ideal step length stays constant along the entire path. - """ - - def __init__(self, ndim, random_seed=None, noise_stdev=0.): - param_shapes = [(ndim,)] - self.ndim = ndim - super(OutwardSnake, self).__init__(param_shapes, random_seed, noise_stdev) - - def objective(self, params, data, labels=None): - radius = tf.sqrt(tf.reduce_sum(params[0]**2)) - rad_loss = tf.reduce_sum(1. / (radius + 1e-6) * data[:, 0]) - - sin_dist = params[0][1:] - tf.cos(params[0][:-1]) * np.pi - sin_loss = tf.reduce_sum((sin_dist * data[:, 1:])**2) - - return rad_loss + sin_loss - - -class ProjectionQuadratic(Problem): - """Dataset consists of different directions to probe. Global min is at 0.""" - - def __init__(self, ndim, random_seed=None, noise_stdev=0.): - param_shapes = [(1, ndim)] - super(ProjectionQuadratic, self).__init__( - param_shapes, random_seed, noise_stdev) - - def objective(self, params, data, labels=None): - return tf.reduce_sum((params[0] * data)**2) - - -class SumOfQuadratics(Problem): - - def __init__(self, ndim, random_seed=None, noise_stdev=0.): - param_shapes = [(1, ndim)] - super(SumOfQuadratics, self).__init__( - param_shapes, random_seed, noise_stdev) - - def objective(self, params, data, labels=None): - epsilon = 1e-12 - # Assume dataset is designed so that the global minimum is at params=0. - # Subtract loss at params=0, so that global minimum has objective value - # epsilon (added to avoid floating point issues). - return (tf.reduce_sum((params[0] - data)**2) - tf.reduce_sum(data**2) + - epsilon) - - -class MatMulAlgorithm(Problem): - """A 6-th order polynomial optimization problem. - - This problem is parametrized by n and k. A solution to this problem with - objective value exactly zero defines a matrix multiplication algorithm of - n x n matrices using k multiplications between matrices. When applied - recursively, such an algorithm has complexity O(n^(log_n(k))). - - Given n, it is not known in general which values of k in [n^2, n^3] have a - solution. There is always a solution with k = n^3 (this is the naive - algorithm). - - In the special case n = 2, it is known that there are solutions for k = {7, 8} - but not for k <= 6. For n = 3, it is known that there are exact solutions for - 23 <= k <= 27, and there are asymptotic solutions for k = {21, 22}, but the - other cases are unknown. - - For a given n and k, if one solution exists then infinitely many solutions - exist due to permutation and scaling symmetries in the parameters. - - This is a very hard problem for some values of n and k (e.g. n = 3, k = 21), - but very easy for other values (e.g. n = 2, k = 7). - - For a given n and k, the specific formulation of this problem is as follows. - Let theta_a, theta_b, theta_c be parameter matrices with respective dimensions - [n**2, k], [n**2, k], [k, n**2]. Then for any matrices a, b with shape [n, n], - we can form the matrix c with shape [n, n] via the operation: - ((vec(a) * theta_a) .* (vec(b) * theta_b)) * theta_c = vec(c), (#) - where vec(x) is the operator that flattens a matrix with shape [n, n] into a - row vector with shape [1, n**2], * denotes matrix multiplication and .* - denotes elementwise multiplication. - - This operation, parameterized by theta_a, theta_b, theta_c, is a matrix - multiplication algorithm iff c = a*b for all [n, n] matrices a and b. But - actually it suffices to verify all combinations of one-hot matrices a and b, - of which there are n**4 such combinations. This gives a batch of n**4 matrix - triplets (a, b, c) such that equation (#) must hold for each triplet. We solve - for theta_a, theta_b, theta_c by minimizing the sum of squares of errors - across this batch. - - Finally, theta_c can be computed from theta_a and theta_b. Therefore it - suffices to learn theta_a and theta_b, from which theta_c and therefore the - objective value can be computed. - """ - - def __init__(self, n, k): - assert isinstance(n, int), "n must be an integer" - assert isinstance(k, int), "k must be an integer" - assert n >= 2, "Must have n >= 2" - assert k >= n**2 and k <= n**3, "Must have n**2 <= k <= n**3" - - param_shapes = [(n**2, k), (n**2, k)] # theta_a, theta_b - super(MatMulAlgorithm, self).__init__( - param_shapes, random_seed=None, noise_stdev=0.0) - - self.n = n - self.k = k - - # Build a batch of all combinations of one-hot matrices a, b, and their - # respective products c. Correctness on this batch is a necessary and - # sufficient condition for the algorithm to be valid. The number of matrices - # in {a, b, c}_3d is n**4 and each matrix is n x n. - onehots = np.identity(n**2).reshape(n**2, n, n) - a_3d = np.repeat(onehots, n**2, axis=0) - b_3d = np.tile(onehots, [n**2, 1, 1]) - c_3d = np.matmul(a_3d, b_3d) - - # Convert the batch to 2D Tensors. - self.a = tf.constant(a_3d.reshape(n**4, n**2), tf.float32, name="a") - self.b = tf.constant(b_3d.reshape(n**4, n**2), tf.float32, name="b") - self.c = tf.constant(c_3d.reshape(n**4, n**2), tf.float32, name="c") - - def init_tensors(self, seed=None): - # Initialize params such that the columns of theta_a and theta_b have L2 - # norm 1. - def _param_initializer(shape, seed=None): - x = tf.random_normal(shape, dtype=tf.float32, seed=seed) - return tf.transpose(tf.nn.l2_normalize(tf.transpose(x), 1)) - - return [_param_initializer(shape, seed) for shape in self.param_shapes] - - def objective(self, parameters, data=None, labels=None): - theta_a = parameters[0] - theta_b = parameters[1] - - # Compute theta_c from theta_a and theta_b. - p = tf.matmul(self.a, theta_a) * tf.matmul(self.b, theta_b) - p_trans = tf.transpose(p, name="p_trans") - p_inv = tf.matmul( - tf.matrix_inverse(tf.matmul(p_trans, p)), p_trans, name="p_inv") - theta_c = tf.matmul(p_inv, self.c, name="theta_c") - - # Compute the "predicted" value of c. - c_hat = tf.matmul(p, theta_c, name="c_hat") - - # Compute the loss (sum of squared errors). - loss = tf.reduce_sum((c_hat - self.c)**2, name="loss") - - return loss - - -def matmul_problem_sequence(n, k_min, k_max): - """Helper to generate a sequence of matrix multiplication problems.""" - return [(_Spec(MatMulAlgorithm, (n, k), {}), None, None) - for k in range(k_min, k_max + 1)] - - -def init_fixed_variables(arrays): - with tf.variable_scope(PARAMETER_SCOPE): - params = [tf.Variable(arr.astype("float32")) for arr in arrays] - return params - - -def _mesh(xlim, ylim, n): - """Creates a 2D meshgrid covering the given ranges. - - Args: - xlim: int that defines the desired x-range (-xlim, xlim) - ylim: int that defines the desired y-range (-ylim, ylim) - n: number of points in each dimension of the mesh - - Returns: - xm: 2D array of x-values in the mesh - ym: 2D array of y-values in the mesh - """ - return np.meshgrid(np.linspace(-xlim, xlim, n), - np.linspace(-ylim, ylim, n)) diff --git a/research/learned_optimizer/problems/problem_sets.py b/research/learned_optimizer/problems/problem_sets.py deleted file mode 100644 index eaf9273b8..000000000 --- a/research/learned_optimizer/problems/problem_sets.py +++ /dev/null @@ -1,561 +0,0 @@ -# Copyright 2017 Google, Inc. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -"""Groups of problems of different types for optimizer training.""" - -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import numpy as np -import tensorflow as tf - -from learned_optimizer.problems import datasets -from learned_optimizer.problems import model_adapter -from learned_optimizer.problems import problem_generator as pg -from learned_optimizer.problems import problem_spec - -_Spec = problem_spec.Spec - - -def quadratic_problems(): - return [ - (_Spec(pg.Quadratic, (20,), {}), None, None), - (_Spec(pg.Quadratic, (25,), {}), None, None), - (_Spec(pg.Quadratic, (50,), {}), None, None), - (_Spec(pg.Quadratic, (100,), {}), None, None), - ] - - -# Note: this group contains one non-noisy problem for historical reasons. The -# original training set before the refactor included this set of quadratics. -def quadratic_problems_noisy(): - return [ - (_Spec(pg.Quadratic, (20,), {"noise_stdev": 0.5}), None, None), - (_Spec(pg.Quadratic, (25,), {"noise_stdev": 0.0}), None, None), - (_Spec(pg.Quadratic, (50,), {"noise_stdev": 1.0}), None, None), - (_Spec(pg.Quadratic, (100,), {"noise_stdev": 2.0}), None, None), - ] - - -def quadratic_problems_large(): - return [ - (_Spec(pg.Quadratic, (784,), {}), None, None), - (_Spec(pg.Quadratic, (1024,), {}), None, None), - (_Spec(pg.Quadratic, (2048,), {}), None, None), - ] - - -def bowl_problems(): - return [ - (_Spec(pg.Bowl, (0.1,), {"noise_stdev": 0.0}), None, None), - (_Spec(pg.Bowl, (1.0,), {"noise_stdev": 0.0}), None, None), - (_Spec(pg.Bowl, (5.0,), {"noise_stdev": 0.0}), None, None), - (_Spec(pg.Bowl, (5.0,), {"noise_stdev": 0.0, "angle": np.pi / 4.}), - None, None), - ] - - -def bowl_problems_noisy(): - return [ - (_Spec(pg.Bowl, (0.1,), {"noise_stdev": 0.1}), None, None), - (_Spec(pg.Bowl, (1.0,), {"noise_stdev": 0.1}), None, None), - (_Spec(pg.Bowl, (5.0,), {"noise_stdev": 0.1}), None, None), - (_Spec(pg.Bowl, (5.0,), {"noise_stdev": 0.1, "angle": np.pi / 4.}), - None, None), - ] - - -def sparse_softmax_2_class_sparse_problems(): - return [(_Spec(pg.SparseSoftmaxRegression, (5, 2), {"noise_stdev": 0.0}), - datasets.noisy_parity_class(5, random_seed=123), 23),] - - -def one_hot_sparse_softmax_2_class_sparse_problems(): - return [ - (_Spec(pg.OneHotSparseSoftmaxRegression, (5, 2), {"noise_stdev": 0.0}), - datasets.noisy_parity_class(5, random_seed=123), 23), - ] - - -def softmax_2_class_problems(): - return [ - (_Spec(pg.SoftmaxRegression, (10, 2), {}), datasets.random( - 10, 1000, random_seed=123, sep=2.0), 100), - (_Spec(pg.SoftmaxRegression, (100, 2), {}), datasets.random( - 100, 1000, random_seed=123), 50), - (_Spec(pg.SoftmaxRegression, (200, 2), {}), datasets.random( - 200, 1000, random_seed=123, sep=1.5), 20), - (_Spec(pg.SoftmaxRegression, (256, 2), {}), datasets.random( - 256, 1000, random_seed=123, sep=1.5), 100), - ] - - -def softmax_2_class_problems_noisy(): - return [ - (_Spec(pg.SoftmaxRegression, (10, 2), {"noise_stdev": 0.5}), - datasets.random(10, 1000, random_seed=123, sep=2.0), 100), - (_Spec(pg.SoftmaxRegression, (100, 2), {"noise_stdev": 0.1}), - datasets.random(100, 1000, random_seed=123), 50), - (_Spec(pg.SoftmaxRegression, (200, 2), {"noise_stdev": 0.1}), - datasets.random(200, 1000, random_seed=123, sep=1.5), 20), - (_Spec(pg.SoftmaxRegression, (256, 2), {"noise_stdev": 0.5}), - datasets.random(256, 1000, random_seed=123, sep=1.5), 100), - ] - - -def optimization_test_problems(): - return [ - (_Spec(pg.Ackley, (), {}), None, None), - (_Spec(pg.Beale, (), {}), None, None), - (_Spec(pg.Booth, (), {}), None, None), - (_Spec(pg.Branin, (), {}), None, None), - (_Spec(pg.LogSumExp, (), {}), None, None), - (_Spec(pg.Matyas, (), {}), None, None), - (_Spec(pg.Michalewicz, (), {}), None, None), - (_Spec(pg.Rosenbrock, (), {}), None, None), - (_Spec(pg.StyblinskiTang, (), {}), None, None), - ] - - -def optimization_test_problems_noisy(): - return [ - (_Spec(pg.Ackley, (), {"noise_stdev": 1.}), None, None), - (_Spec(pg.Beale, (), {"noise_stdev": 1.}), None, None), - (_Spec(pg.Booth, (), {"noise_stdev": 1.}), None, None), - (_Spec(pg.Branin, (), {"noise_stdev": 1.}), None, None), - (_Spec(pg.LogSumExp, (), {"noise_stdev": 1.}), None, None), - (_Spec(pg.Matyas, (), {"noise_stdev": 1.}), None, None), - (_Spec(pg.Michalewicz, (), {"noise_stdev": 1.}), None, None), - (_Spec(pg.Rosenbrock, (), {"noise_stdev": 1.}), None, None), - (_Spec(pg.StyblinskiTang, (), {"noise_stdev": 1.}), None, None), - ] - - -def fully_connected_random_2_class_problems(): - return [ - (_Spec(pg.FullyConnected, (8, 2), - {"hidden_sizes": (8, 5,), "activation": tf.nn.sigmoid}), - datasets.random_mlp(8, 1000), 10), - (_Spec(pg.FullyConnected, (12, 2), - {"hidden_sizes": (8, 5, 3), "activation": tf.nn.sigmoid}), - datasets.random_mlp(12, 1000), 200), - (_Spec(pg.FullyConnected, (5, 2), - {"hidden_sizes": (4, 4, 4, 4,), "activation": tf.nn.sigmoid}), - datasets.random_mlp(5, 1000), 100), - (_Spec(pg.FullyConnected, (11, 2), - {"hidden_sizes": (4, 5, 6,), "activation": tf.nn.sigmoid}), - datasets.random_mlp(11, 1000), 64), - (_Spec(pg.FullyConnected, (9, 2), - {"hidden_sizes": (8,), "activation": tf.nn.sigmoid}), - datasets.random_mlp(9, 1000), 128), - (_Spec(pg.FullyConnected, (7, 2), - {"hidden_sizes": (8, 5,), "activation": tf.nn.sigmoid}), - datasets.random_mlp(7, 1000), 16), - (_Spec(pg.FullyConnected, (8, 2), - {"hidden_sizes": (32, 64,), "activation": tf.nn.sigmoid}), - datasets.random_mlp(8, 1000), 10), - (_Spec(pg.FullyConnected, (12, 2), - {"hidden_sizes": (16, 8, 3), "activation": tf.nn.sigmoid}), - datasets.random_mlp(12, 1000), 200), - (_Spec(pg.FullyConnected, (5, 2), - {"hidden_sizes": (8, 8, 8, 8,), "activation": tf.nn.sigmoid}), - datasets.random_mlp(5, 1000), 100), - (_Spec(pg.FullyConnected, (11, 2), - {"hidden_sizes": (10, 12, 12,), "activation": tf.nn.sigmoid}), - datasets.random_mlp(11, 1000), 64), - (_Spec(pg.FullyConnected, (9, 2), - {"hidden_sizes": (32,), "activation": tf.nn.sigmoid}), - datasets.random_mlp(9, 1000), 128), - (_Spec(pg.FullyConnected, (7, 2), - {"hidden_sizes": (32, 64,), "activation": tf.nn.sigmoid}), - datasets.random_mlp(7, 1000), 16), - ] - - -def matmul_problems(): - return sum([ - pg.matmul_problem_sequence(2, 5, 8), - pg.matmul_problem_sequence(3, 19, 24)], []) - - -def log_objective_problems(): - return [ - (_Spec(pg.LogObjective, [_Spec(pg.Quadratic, (20,), {})], {}), - None, None), - (_Spec(pg.LogObjective, [_Spec(pg.Quadratic, (50,), {})], {}), - None, None), - (_Spec(pg.LogObjective, [_Spec(pg.Quadratic, (100,), {})], {}), - None, None), - (_Spec(pg.LogObjective, [_Spec(pg.Bowl, (0.1,), {})], {}), None, None), - (_Spec(pg.LogObjective, [_Spec(pg.Bowl, (1.0,), {})], {}), None, None), - (_Spec(pg.LogObjective, [_Spec(pg.Bowl, (5.0,), {})], {}), None, None), - ] - - -def sparse_gradient_problems(): - return [ - (_Spec(pg.SparseProblem, [_Spec(pg.Quadratic, (20,), {})], {}), - None, None), - (_Spec(pg.SparseProblem, [_Spec(pg.Quadratic, (50,), {})], {}), - None, None), - (_Spec(pg.SparseProblem, [_Spec(pg.Quadratic, (100,), {})], {}), - None, None), - (_Spec(pg.SparseProblem, [_Spec(pg.Bowl, (0.1,), {})], {}), None, None), - (_Spec(pg.SparseProblem, [_Spec(pg.Bowl, (1.0,), {})], {}), None, None), - (_Spec(pg.SparseProblem, [_Spec(pg.Bowl, (5.0,), {})], {}), None, None), - ] - - -def sparse_gradient_problems_mlp(): - return [ - (_Spec(pg.SparseProblem, [ - _Spec(pg.FullyConnected, (8, 2), { - "hidden_sizes": (8, 5,), - "activation": tf.nn.sigmoid - }) - ], {}), datasets.random_mlp(8, 1000), 10), - (_Spec(pg.SparseProblem, [ - _Spec(pg.FullyConnected, (12, 2), { - "hidden_sizes": (8, 5, 3), - "activation": tf.nn.sigmoid - }) - ], {}), datasets.random_mlp(12, 1000), 200), - (_Spec(pg.SparseProblem, [ - _Spec(pg.FullyConnected, (5, 2), { - "hidden_sizes": (4, 4, 4, 4,), - "activation": tf.nn.sigmoid - }) - ], {}), datasets.random_mlp(5, 1000), 100), - ] - - -def rescale_problems(): - return [ - (_Spec(pg.Rescale, [_Spec(pg.Norm, (18,), {"norm_power": 2.5})], - {"scale": 0.123}), None, None), - (_Spec(pg.Rescale, [_Spec(pg.Norm, (18,), {"norm_power": 1.5})], - {"scale": 8}), None, None), - (_Spec(pg.Rescale, [_Spec(pg.Norm, (18,), {"norm_power": 2.})], - {"scale": 50}), None, None), - (_Spec(pg.Rescale, [_Spec(pg.Norm, (18,), {"norm_power": 3.})], - {"scale": 200}), None, None), - (_Spec(pg.Rescale, [_Spec(pg.Norm, (18,), {"norm_power": 1.})], - {"scale": 1000}), None, None), - (_Spec(pg.Rescale, [_Spec(pg.Quadratic, (20,), {})], {"scale": 0.1}), - None, None), - (_Spec(pg.Rescale, [_Spec(pg.Quadratic, (25,), {})], {"scale": 10.}), - None, None), - (_Spec(pg.Rescale, [_Spec(pg.Quadratic, (50,), {})], {"scale": 350.}), - None, None), - (_Spec(pg.Rescale, [_Spec(pg.Quadratic, (100,), {})], {"scale": 132}), - None, None), - ] - - -def norm_problems(): - return [ - # < 1 Norm causes NaN gradients early in training. - (_Spec(pg.Norm, (27,), {"norm_power": 1.}), None, None), - (_Spec(pg.Norm, (25,), {"norm_power": 2.}), None, None), - (_Spec(pg.Norm, (22,), {"norm_power": 3.}), None, None), - ] - - -def norm_problems_noisy(): - return [ - # < 1 Norm causes NaN gradients early in training. - (_Spec(pg.Norm, (19,), {"noise_stdev": .1, "norm_power": 1.}), - None, None), - (_Spec(pg.Norm, (26,), {"noise_stdev": .1, "norm_power": 2.}), - None, None), - (_Spec(pg.Norm, (23,), {"noise_stdev": .1, "norm_power": 3.}), - None, None), - ] - - -def sum_problems(): - return [ - (_Spec(pg.SumTask, [[ - _Spec(pg.Quadratic, (11,), {}), - _Spec(pg.Quadratic, (3,), {}), - _Spec(pg.Quadratic, (9,), {}), - _Spec(pg.Quadratic, (7,), {}), - _Spec(pg.Quadratic, (5,), {}), - _Spec(pg.Quadratic, (13,), {}), - _Spec(pg.Quadratic, (12,), {}) - ]], {}), None, None), - (_Spec(pg.SumTask, [[ - _Spec(pg.Norm, (18,), {"norm_power": 3}), - _Spec(pg.Quadratic, (25,), {}), - _Spec(pg.Rosenbrock, (), {}) - ]], {}), None, None), - (_Spec(pg.SumTask, [[ - _Spec(pg.Rosenbrock, (), {}), - _Spec(pg.LogSumExp, (), {}), - _Spec(pg.Ackley, (), {}), - _Spec(pg.Beale, (), {}), - _Spec(pg.Booth, (), {}), - _Spec(pg.StyblinskiTang, (), {}), - _Spec(pg.Matyas, (), {}), - _Spec(pg.Branin, (), {}), - _Spec(pg.Michalewicz, (), {}) - ]], {}), None, None), - (_Spec(pg.SumTask, [[ - _Spec(pg.Rosenbrock, (), {}), - _Spec(pg.LogSumExp, (), {}), - _Spec(pg.Ackley, (), {}), - _Spec(pg.Beale, (), {}), - _Spec(pg.Booth, (), {}), - _Spec(pg.StyblinskiTang, (), {}), - _Spec(pg.Matyas, (), {}), - _Spec(pg.Branin, (), {}), - _Spec(pg.Michalewicz, (), {}), - _Spec(pg.Quadratic, (5,), {}), - _Spec(pg.Quadratic, (13,), {}) - ]], {}), None, None), - (_Spec(pg.SumTask, [[ - _Spec(pg.Quadratic, (11,), {}), - _Spec(pg.Quadratic, (3,), {}) - ]], {}), None, None), - (_Spec(pg.SumTask, [[ - _Spec(pg.Rosenbrock, (), {}), - _Spec(pg.LogSumExp, (), {}), - _Spec(pg.Ackley, (), {}) - ]], {}), None, None), - ] - - -def sum_problems_noisy(): - return [ - (_Spec(pg.SumTask, [[ - _Spec(pg.Quadratic, (11,), {"noise_stdev": 0.1}), - _Spec(pg.Quadratic, (3,), {"noise_stdev": 0.1}), - _Spec(pg.Quadratic, (9,), {"noise_stdev": 0.1}), - _Spec(pg.Quadratic, (7,), {"noise_stdev": 0.1}), - _Spec(pg.Quadratic, (5,), {"noise_stdev": 0.1}), - _Spec(pg.Quadratic, (13,), {"noise_stdev": 0.1}), - _Spec(pg.Quadratic, (12,), {"noise_stdev": 0.1}) - ]], {}), None, None), - (_Spec(pg.SumTask, [[ - _Spec(pg.Rosenbrock, (), {}), - _Spec(pg.LogSumExp, (), {}), - _Spec(pg.Ackley, (), {}), - _Spec(pg.Beale, (), {}), - _Spec(pg.Booth, (), {}), - _Spec(pg.StyblinskiTang, (), {}), - _Spec(pg.Matyas, (), {}), - _Spec(pg.Branin, (), {}), - _Spec(pg.Michalewicz, (), {}), - _Spec(pg.Quadratic, (5,), {}), - _Spec(pg.Quadratic, (13,), {"noise_stdev": 0.5}) - ]], {}), None, None), - ] - - -def dependency_chain_problems(): - return [ - (_Spec(pg.DependencyChain, (20,), {}), datasets.random_binary( - 20, 1000), 100), - (_Spec(pg.DependencyChain, (12,), {}), datasets.random_binary( - 12, 200), 10), - (_Spec(pg.DependencyChain, (56,), {}), datasets.random_binary( - 56, 5000), 100), - (_Spec(pg.DependencyChain, (64,), {}), datasets.random_binary( - 64, 1000), 50), - (_Spec(pg.DependencyChain, (13,), {}), datasets.random_binary( - 13, 10000), 50), - (_Spec(pg.DependencyChain, (20,), {}), datasets.random_binary( - 20, 1000), 128), - (_Spec(pg.DependencyChain, (12,), {}), datasets.random_binary( - 12, 300), 16), - (_Spec(pg.DependencyChain, (56,), {}), datasets.random_binary( - 56, 5000), 128), - (_Spec(pg.DependencyChain, (64,), {}), datasets.random_binary( - 64, 1000), 64), - (_Spec(pg.DependencyChain, (13,), {}), datasets.random_binary( - 13, 10000), 32), - ] - - -def outward_snake_problems(): - return [ - (_Spec(pg.OutwardSnake, (20,), {}), datasets.random_binary( - 20, 1000), 100), - (_Spec(pg.OutwardSnake, (12,), {}), datasets.random_binary( - 12, 200), 10), - (_Spec(pg.OutwardSnake, (56,), {}), datasets.random_binary( - 56, 5000), 100), - (_Spec(pg.OutwardSnake, (64,), {}), datasets.random_binary( - 64, 1000), 50), - (_Spec(pg.OutwardSnake, (13,), {}), datasets.random_binary( - 13, 10000), 50), - (_Spec(pg.OutwardSnake, (20,), {}), datasets.random_binary( - 20, 1000), 128), - (_Spec(pg.OutwardSnake, (12,), {}), datasets.random_binary( - 12, 300), 16), - (_Spec(pg.OutwardSnake, (56,), {}), datasets.random_binary( - 56, 5000), 128), - (_Spec(pg.OutwardSnake, (64,), {}), datasets.random_binary( - 64, 1000), 64), - (_Spec(pg.OutwardSnake, (13,), {}), datasets.random_binary( - 13, 10000), 32), - ] - - -def min_max_well_problems(): - return [ - (_Spec(pg.MinMaxWell, (20,), {}), None, None), - (_Spec(pg.MinMaxWell, (12,), {}), None, None), - (_Spec(pg.MinMaxWell, (56,), {}), None, None), - (_Spec(pg.MinMaxWell, (64,), {}), None, None), - (_Spec(pg.MinMaxWell, (13,), {}), None, None), - ] - - -def sum_of_quadratics_problems(): - return [ - (_Spec(pg.SumOfQuadratics, (20,), {}), - datasets.random_symmetric(20, 1000), 100), - (_Spec(pg.SumOfQuadratics, (12,), {}), - datasets.random_symmetric(12, 100), 10), - (_Spec(pg.SumOfQuadratics, (56,), {}), - datasets.random_symmetric(56, 5000), 100), - (_Spec(pg.SumOfQuadratics, (64,), {}), - datasets.random_symmetric(64, 1000), 50), - (_Spec(pg.SumOfQuadratics, (13,), {}), - datasets.random_symmetric(13, 10000), 50), - (_Spec(pg.SumOfQuadratics, (20,), {}), - datasets.random_symmetric(20, 1000), 128), - (_Spec(pg.SumOfQuadratics, (12,), {}), - datasets.random_symmetric(12, 100), 16), - (_Spec(pg.SumOfQuadratics, (56,), {}), - datasets.random_symmetric(56, 5000), 128), - (_Spec(pg.SumOfQuadratics, (64,), {}), - datasets.random_symmetric(64, 1000), 64), - (_Spec(pg.SumOfQuadratics, (13,), {}), - datasets.random_symmetric(13, 10000), 32), - ] - - -def projection_quadratic_problems(): - return [ - (_Spec(pg.ProjectionQuadratic, (20,), {}), - datasets.random_symmetric(20, 1000), 100), - (_Spec(pg.ProjectionQuadratic, (12,), {}), - datasets.random_symmetric(12, 100), 10), - (_Spec(pg.ProjectionQuadratic, (56,), {}), - datasets.random_symmetric(56, 5000), 100), - (_Spec(pg.ProjectionQuadratic, (64,), {}), - datasets.random_symmetric(64, 1000), 50), - (_Spec(pg.ProjectionQuadratic, (13,), {}), - datasets.random_symmetric(13, 10000), 50), - (_Spec(pg.ProjectionQuadratic, (20,), {}), - datasets.random_symmetric(20, 1000), 128), - (_Spec(pg.ProjectionQuadratic, (12,), {}), - datasets.random_symmetric(12, 100), 16), - (_Spec(pg.ProjectionQuadratic, (56,), {}), - datasets.random_symmetric(56, 5000), 128), - (_Spec(pg.ProjectionQuadratic, (64,), {}), - datasets.random_symmetric(64, 1000), 64), - (_Spec(pg.ProjectionQuadratic, (13,), {}), - datasets.random_symmetric(13, 10000), 32), - ] - - -def adapter_rosenbrock_local(): - return [(_Spec(model_adapter.ModelAdapter, - (pg.make_rosenbrock_loss_and_init,), {}), None, None),] - - -def adapter_rosenbrock_worker(): - return [(_Spec(model_adapter.ModelAdapter, - (pg.make_rosenbrock_loss_and_init,), - {"device": "/job:worker"}), None, None),] - - -def _test_problem_mlp_scaled_init_small(): - return [ - np.random.randn(10, 32) * np.sqrt(2./10), - np.random.randn(32,) * 0.1, - np.random.randn(32, 64) * np.sqrt(2./32.), - np.random.randn(64,) * 0.1, - np.random.randn(64, 2) * np.sqrt(2./64.), - np.random.randn(2,) * 0.1 - ] - - -def _test_problem_mlp_scaled_init_large(): - return [ - np.random.randn(20, 32) * np.sqrt(2./20), - np.random.randn(32,) * 0.1, - np.random.randn(32, 64) * np.sqrt(2./32.), - np.random.randn(64,) * 0.1, - np.random.randn(64, 10) * np.sqrt(2./64.), - np.random.randn(10,) * 0.1 - ] - - -def _test_problem_mlp_scaled_init_mnist(): - return [ - np.random.randn(784, 64) * np.sqrt(2./784.), - np.random.randn(64,) * 0.1, - np.random.randn(64, 10) * np.sqrt(2./ 64.), - np.random.randn(10,) * 0.1, - ] - - -# Wrap this construction in a function to avoid UnparsedFlagAccessError -def test_problems(): - """Test problems for visualizations.""" - # Unlike the training problem sets, these test problems are made up of - # length-5 tuples. The final items in the tuple are the name of the problem - # and the initialization random_seed for testing consistency. - tp = [ - (_Spec(pg.Quadratic, (20,), {"random_seed": 1234}), None, None, - "quad_problem", 5678), - (_Spec(pg.Quadratic, (20,), {"noise_stdev": 1.0, "random_seed": 1234}), - None, None, "quad_problem_noise", 5678), - (_Spec(pg.Rosenbrock, (), {"random_seed": 1234}), None, None, - "rosenbrock", 5678), - (_Spec(pg.Rosenbrock, (), {"random_seed": 1234, "noise_stdev": 1.0}), - None, None, "rosenbrock_noise", 5678), - (_Spec(pg.SoftmaxRegression, (10, 2), {}), datasets.random( - 10, 10000, random_seed=1234), 100, "softmax", 5678), - (_Spec(pg.SoftmaxRegression, (10, 2), {"noise_stdev": 1.0}), - datasets.random(10, 10000, random_seed=1234), 100, "softmax_noise", - 5678), - (_Spec(pg.FullyConnected, (10, 2), {}), datasets.random( - 10, 10000, random_seed=1234), 100, "mlp_small", - _test_problem_mlp_scaled_init_small()), - (_Spec(pg.FullyConnected, (20, 10), {}), datasets.random( - 20, 10000, n_classes=10, random_seed=1234), 100, "mlp_large", - _test_problem_mlp_scaled_init_large()), - (_Spec(pg.FullyConnected, (784, 10), - {"hidden_sizes": (64,), "activation": tf.nn.sigmoid}), - datasets.mnist(), 64, "mlp_mnist_sigmoid", - _test_problem_mlp_scaled_init_mnist()), - (_Spec(pg.FullyConnected, (784, 10), - {"hidden_sizes": (64,), "activation": tf.nn.relu}), - datasets.mnist(), 64, "mlp_mnist_relu", - _test_problem_mlp_scaled_init_mnist()), - (_Spec(pg.ConvNet, ((1, 28, 28), 10, [(3, 3, 8), (5, 5, 8)]), - {"activation": tf.nn.sigmoid}), datasets.mnist(), 64, - "convnet_mnist_sigmoid", None), - (_Spec(pg.ConvNet, ((1, 28, 28), 10, [(3, 3, 8), (5, 5, 8)]), - {"activation": tf.nn.relu}), datasets.mnist(), 64, - "convnet_mnist_relu", None), - ] - return tp diff --git a/research/learned_optimizer/problems/problem_spec.py b/research/learned_optimizer/problems/problem_spec.py deleted file mode 100644 index e30c47b27..000000000 --- a/research/learned_optimizer/problems/problem_spec.py +++ /dev/null @@ -1,33 +0,0 @@ -# Copyright 2017 Google, Inc. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -"""Wrapper around a training problem.""" - -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -from collections import namedtuple - - -class Spec(namedtuple("Spec", "callable args kwargs")): - """Syntactic sugar for keeping track of a function/class + args.""" - - # Since this is an immutable object, we don't need to reserve slots. - __slots__ = () - - def build(self): - """Returns the output of the callable.""" - return self.callable(*self.args, **self.kwargs) diff --git a/research/learning_to_remember_rare_events/README.md b/research/learning_to_remember_rare_events/README.md deleted file mode 100644 index 2eeadea78..000000000 --- a/research/learning_to_remember_rare_events/README.md +++ /dev/null @@ -1,61 +0,0 @@ -![No Maintenance Intended](https://img.shields.io/badge/No%20Maintenance%20Intended-%E2%9C%95-red.svg) -![TensorFlow Requirement: 1.x](https://img.shields.io/badge/TensorFlow%20Requirement-1.x-brightgreen) -![TensorFlow 2 Not Supported](https://img.shields.io/badge/TensorFlow%202%20Not%20Supported-%E2%9C%95-red.svg) - ---- - -Code for the Memory Module as described -in "Learning to Remember Rare Events" by -Lukasz Kaiser, Ofir Nachum, Aurko Roy, and Samy Bengio -published as a conference paper at ICLR 2017. - -Requirements: -* TensorFlow (see tensorflow.org for how to install) -* Some basic command-line utilities (git, unzip). - -Description: - -The general memory module is located in memory.py. -Some code is provided to see the memory module in -action on the standard Omniglot dataset. -Download and setup the dataset using data_utils.py -and then run the training script train.py -(see example commands below). - -Note that the structure and parameters of the model -are optimized for the data preparation as provided. - -Quick Start: - -First download and set-up Omniglot data by running - -``` -python data_utils.py -``` - -Then run the training script: - -``` -python train.py --memory_size=8192 \ - --batch_size=16 --validation_length=50 \ - --episode_width=5 --episode_length=30 -``` - -The first validation batch may look like this (although it is noisy): -``` -0-shot: 0.040, 1-shot: 0.404, 2-shot: 0.516, 3-shot: 0.604, - 4-shot: 0.656, 5-shot: 0.684 -``` -At step 500 you may see something like this: -``` -0-shot: 0.036, 1-shot: 0.836, 2-shot: 0.900, 3-shot: 0.940, - 4-shot: 0.944, 5-shot: 0.916 -``` -At step 4000 you may see something like this: -``` -0-shot: 0.044, 1-shot: 0.960, 2-shot: 1.000, 3-shot: 0.988, - 4-shot: 0.972, 5-shot: 0.992 -``` - -Maintained by Ofir Nachum (ofirnachum) and -Lukasz Kaiser (lukaszkaiser). diff --git a/research/learning_to_remember_rare_events/data_utils.py b/research/learning_to_remember_rare_events/data_utils.py deleted file mode 100644 index 03d5dafb2..000000000 --- a/research/learning_to_remember_rare_events/data_utils.py +++ /dev/null @@ -1,243 +0,0 @@ -# Copyright 2017 Google Inc. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# ============================================================================== -"""Data loading and other utilities. - -Use this file to first copy over and pre-process the Omniglot dataset. -Simply call - python data_utils.py -""" - -import logging -import os -import subprocess -from six.moves import cPickle as pickle - -import numpy as np -from scipy.misc import imresize -from scipy.misc import imrotate -from scipy.ndimage import imread -from six.moves import xrange -import tensorflow as tf - - -MAIN_DIR = '' -REPO_LOCATION = 'https://github.com/brendenlake/omniglot.git' -REPO_DIR = os.path.join(MAIN_DIR, 'omniglot') -DATA_DIR = os.path.join(REPO_DIR, 'python') -TRAIN_DIR = os.path.join(DATA_DIR, 'images_background') -TEST_DIR = os.path.join(DATA_DIR, 'images_evaluation') -DATA_FILE_FORMAT = os.path.join(MAIN_DIR, '%s_omni.pkl') - -TRAIN_ROTATIONS = True # augment training data with rotations -TEST_ROTATIONS = False # augment testing data with rotations -IMAGE_ORIGINAL_SIZE = 105 -IMAGE_NEW_SIZE = 28 - - -def get_data(): - """Get data in form suitable for episodic training. - - Returns: - Train and test data as dictionaries mapping - label to list of examples. - """ - with tf.gfile.GFile(DATA_FILE_FORMAT % 'train', 'rb') as f: - processed_train_data = pickle.load(f) - with tf.gfile.GFile(DATA_FILE_FORMAT % 'test', 'rb') as f: - processed_test_data = pickle.load(f) - - train_data = {} - test_data = {} - - for data, processed_data in zip([train_data, test_data], - [processed_train_data, processed_test_data]): - for image, label in zip(processed_data['images'], - processed_data['labels']): - if label not in data: - data[label] = [] - data[label].append(image.reshape([-1]).astype('float32')) - - intersection = set(train_data.keys()) & set(test_data.keys()) - assert not intersection, 'Train and test data intersect.' - ok_num_examples = [len(ll) == 20 for _, ll in train_data.items()] - assert all(ok_num_examples), 'Bad number of examples in train data.' - ok_num_examples = [len(ll) == 20 for _, ll in test_data.items()] - assert all(ok_num_examples), 'Bad number of examples in test data.' - - logging.info('Number of labels in train data: %d.', len(train_data)) - logging.info('Number of labels in test data: %d.', len(test_data)) - - return train_data, test_data - - -def crawl_directory(directory, augment_with_rotations=False, - first_label=0): - """Crawls data directory and returns stuff.""" - label_idx = first_label - images = [] - labels = [] - info = [] - - # traverse root directory - for root, _, files in os.walk(directory): - logging.info('Reading files from %s', root) - fileflag = 0 - for file_name in files: - full_file_name = os.path.join(root, file_name) - img = imread(full_file_name, flatten=True) - for i, angle in enumerate([0, 90, 180, 270]): - if not augment_with_rotations and i > 0: - break - - images.append(imrotate(img, angle)) - labels.append(label_idx + i) - info.append(full_file_name) - - fileflag = 1 - - if fileflag: - label_idx += 4 if augment_with_rotations else 1 - - return images, labels, info - - -def resize_images(images, new_width, new_height): - """Resize images to new dimensions.""" - resized_images = np.zeros([images.shape[0], new_width, new_height], - dtype=np.float32) - - for i in range(images.shape[0]): - resized_images[i, :, :] = imresize(images[i, :, :], - [new_width, new_height], - interp='bilinear', - mode=None) - return resized_images - - -def write_datafiles(directory, write_file, - resize=True, rotate=False, - new_width=IMAGE_NEW_SIZE, new_height=IMAGE_NEW_SIZE, - first_label=0): - """Load and preprocess images from a directory and write them to a file. - - Args: - directory: Directory of alphabet sub-directories. - write_file: Filename to write to. - resize: Whether to resize the images. - rotate: Whether to augment the dataset with rotations. - new_width: New resize width. - new_height: New resize height. - first_label: Label to start with. - - Returns: - Number of new labels created. - """ - - # these are the default sizes for Omniglot: - imgwidth = IMAGE_ORIGINAL_SIZE - imgheight = IMAGE_ORIGINAL_SIZE - - logging.info('Reading the data.') - images, labels, info = crawl_directory(directory, - augment_with_rotations=rotate, - first_label=first_label) - - images_np = np.zeros([len(images), imgwidth, imgheight], dtype=np.bool) - labels_np = np.zeros([len(labels)], dtype=np.uint32) - for i in xrange(len(images)): - images_np[i, :, :] = images[i] - labels_np[i] = labels[i] - - if resize: - logging.info('Resizing images.') - resized_images = resize_images(images_np, new_width, new_height) - - logging.info('Writing resized data in float32 format.') - data = {'images': resized_images, - 'labels': labels_np, - 'info': info} - with tf.gfile.GFile(write_file, 'w') as f: - pickle.dump(data, f) - else: - logging.info('Writing original sized data in boolean format.') - data = {'images': images_np, - 'labels': labels_np, - 'info': info} - with tf.gfile.GFile(write_file, 'w') as f: - pickle.dump(data, f) - - return len(np.unique(labels_np)) - - -def maybe_download_data(): - """Download Omniglot repo if it does not exist.""" - if os.path.exists(REPO_DIR): - logging.info('It appears that Git repo already exists.') - else: - logging.info('It appears that Git repo does not exist.') - logging.info('Cloning now.') - - subprocess.check_output('git clone %s' % REPO_LOCATION, shell=True) - - if os.path.exists(TRAIN_DIR): - logging.info('It appears that train data has already been unzipped.') - else: - logging.info('It appears that train data has not been unzipped.') - logging.info('Unzipping now.') - - subprocess.check_output('unzip %s.zip -d %s' % (TRAIN_DIR, DATA_DIR), - shell=True) - - if os.path.exists(TEST_DIR): - logging.info('It appears that test data has already been unzipped.') - else: - logging.info('It appears that test data has not been unzipped.') - logging.info('Unzipping now.') - - subprocess.check_output('unzip %s.zip -d %s' % (TEST_DIR, DATA_DIR), - shell=True) - - -def preprocess_omniglot(): - """Download and prepare raw Omniglot data. - - Downloads the data from GitHub if it does not exist. - Then load the images, augment with rotations if desired. - Resize the images and write them to a pickle file. - """ - - maybe_download_data() - - directory = TRAIN_DIR - write_file = DATA_FILE_FORMAT % 'train' - num_labels = write_datafiles( - directory, write_file, resize=True, rotate=TRAIN_ROTATIONS, - new_width=IMAGE_NEW_SIZE, new_height=IMAGE_NEW_SIZE) - - directory = TEST_DIR - write_file = DATA_FILE_FORMAT % 'test' - write_datafiles(directory, write_file, resize=True, rotate=TEST_ROTATIONS, - new_width=IMAGE_NEW_SIZE, new_height=IMAGE_NEW_SIZE, - first_label=num_labels) - - -def main(unused_argv): - logging.basicConfig(level=logging.INFO) - preprocess_omniglot() - - -if __name__ == '__main__': - tf.app.run() diff --git a/research/learning_to_remember_rare_events/memory.py b/research/learning_to_remember_rare_events/memory.py deleted file mode 100644 index 2f40ff57f..000000000 --- a/research/learning_to_remember_rare_events/memory.py +++ /dev/null @@ -1,392 +0,0 @@ -# Copyright 2017 Google Inc. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# ============================================================================== -"""Memory module for storing "nearest neighbors". - -Implements a key-value memory for generalized one-shot learning -as described in the paper -"Learning to Remember Rare Events" -by Lukasz Kaiser, Ofir Nachum, Aurko Roy, Samy Bengio, -published as a conference paper at ICLR 2017. -""" - -import numpy as np -from six.moves import xrange -import tensorflow as tf - - -class Memory(object): - """Memory module.""" - - def __init__(self, key_dim, memory_size, vocab_size, - choose_k=256, alpha=0.1, correct_in_top=1, age_noise=8.0, - var_cache_device='', nn_device=''): - self.key_dim = key_dim - self.memory_size = memory_size - self.vocab_size = vocab_size - self.choose_k = min(choose_k, memory_size) - self.alpha = alpha - self.correct_in_top = correct_in_top - self.age_noise = age_noise - self.var_cache_device = var_cache_device # Variables are cached here. - self.nn_device = nn_device # Device to perform nearest neighbour matmul. - - caching_device = var_cache_device if var_cache_device else None - self.update_memory = tf.constant(True) # Can be fed "false" if needed. - self.mem_keys = tf.get_variable( - 'memkeys', [self.memory_size, self.key_dim], trainable=False, - initializer=tf.random_uniform_initializer(-0.0, 0.0), - caching_device=caching_device) - self.mem_vals = tf.get_variable( - 'memvals', [self.memory_size], dtype=tf.int32, trainable=False, - initializer=tf.constant_initializer(0, tf.int32), - caching_device=caching_device) - self.mem_age = tf.get_variable( - 'memage', [self.memory_size], dtype=tf.float32, trainable=False, - initializer=tf.constant_initializer(0.0), caching_device=caching_device) - self.recent_idx = tf.get_variable( - 'recent_idx', [self.vocab_size], dtype=tf.int32, trainable=False, - initializer=tf.constant_initializer(0, tf.int32)) - - # variable for projecting query vector into memory key - self.query_proj = tf.get_variable( - 'memory_query_proj', [self.key_dim, self.key_dim], dtype=tf.float32, - initializer=tf.truncated_normal_initializer(0, 0.01), - caching_device=caching_device) - - def get(self): - return self.mem_keys, self.mem_vals, self.mem_age, self.recent_idx - - def set(self, k, v, a, r=None): - return tf.group( - self.mem_keys.assign(k), - self.mem_vals.assign(v), - self.mem_age.assign(a), - (self.recent_idx.assign(r) if r is not None else tf.group())) - - def clear(self): - return tf.variables_initializer([self.mem_keys, self.mem_vals, self.mem_age, - self.recent_idx]) - - def get_hint_pool_idxs(self, normalized_query): - """Get small set of idxs to compute nearest neighbor queries on. - - This is an expensive look-up on the whole memory that is used to - avoid more expensive operations later on. - - Args: - normalized_query: A Tensor of shape [None, key_dim]. - - Returns: - A Tensor of shape [None, choose_k] of indices in memory - that are closest to the queries. - - """ - # look up in large memory, no gradients - with tf.device(self.nn_device): - similarities = tf.matmul(tf.stop_gradient(normalized_query), - self.mem_keys, transpose_b=True, name='nn_mmul') - _, hint_pool_idxs = tf.nn.top_k( - tf.stop_gradient(similarities), k=self.choose_k, name='nn_topk') - return hint_pool_idxs - - def make_update_op(self, upd_idxs, upd_keys, upd_vals, - batch_size, use_recent_idx, intended_output): - """Function that creates all the update ops.""" - mem_age_incr = self.mem_age.assign_add(tf.ones([self.memory_size], - dtype=tf.float32)) - with tf.control_dependencies([mem_age_incr]): - mem_age_upd = tf.scatter_update( - self.mem_age, upd_idxs, tf.zeros([batch_size], dtype=tf.float32)) - - mem_key_upd = tf.scatter_update( - self.mem_keys, upd_idxs, upd_keys) - mem_val_upd = tf.scatter_update( - self.mem_vals, upd_idxs, upd_vals) - - if use_recent_idx: - recent_idx_upd = tf.scatter_update( - self.recent_idx, intended_output, upd_idxs) - else: - recent_idx_upd = tf.group() - - return tf.group(mem_age_upd, mem_key_upd, mem_val_upd, recent_idx_upd) - - def query(self, query_vec, intended_output, use_recent_idx=True): - """Queries memory for nearest neighbor. - - Args: - query_vec: A batch of vectors to query (embedding of input to model). - intended_output: The values that would be the correct output of the - memory. - use_recent_idx: Whether to always insert at least one instance of a - correct memory fetch. - - Returns: - A tuple (result, mask, teacher_loss). - result: The result of the memory look up. - mask: The affinity of the query to the result. - teacher_loss: The loss for training the memory module. - """ - - batch_size = tf.shape(query_vec)[0] - output_given = intended_output is not None - - # prepare query for memory lookup - query_vec = tf.matmul(query_vec, self.query_proj) - normalized_query = tf.nn.l2_normalize(query_vec, dim=1) - - hint_pool_idxs = self.get_hint_pool_idxs(normalized_query) - - if output_given and use_recent_idx: # add at least one correct memory - most_recent_hint_idx = tf.gather(self.recent_idx, intended_output) - hint_pool_idxs = tf.concat( - axis=1, - values=[hint_pool_idxs, tf.expand_dims(most_recent_hint_idx, 1)]) - choose_k = tf.shape(hint_pool_idxs)[1] - - with tf.device(self.var_cache_device): - # create small memory and look up with gradients - my_mem_keys = tf.stop_gradient(tf.gather(self.mem_keys, hint_pool_idxs, - name='my_mem_keys_gather')) - similarities = tf.matmul(tf.expand_dims(normalized_query, 1), - my_mem_keys, adjoint_b=True, name='batch_mmul') - hint_pool_sims = tf.squeeze(similarities, [1], name='hint_pool_sims') - hint_pool_mem_vals = tf.gather(self.mem_vals, hint_pool_idxs, - name='hint_pool_mem_vals') - # Calculate softmax mask on the top-k if requested. - # Softmax temperature. Say we have K elements at dist x and one at (x+a). - # Softmax of the last is e^tm(x+a)/Ke^tm*x + e^tm(x+a) = e^tm*a/K+e^tm*a. - # To make that 20% we'd need to have e^tm*a ~= 0.2K, so tm = log(0.2K)/a. - softmax_temp = max(1.0, np.log(0.2 * self.choose_k) / self.alpha) - mask = tf.nn.softmax(hint_pool_sims[:, :choose_k - 1] * softmax_temp) - - # prepare returned values - nearest_neighbor = tf.to_int32( - tf.argmax(hint_pool_sims[:, :choose_k - 1], 1)) - - no_teacher_idxs = tf.gather( - tf.reshape(hint_pool_idxs, [-1]), - nearest_neighbor + choose_k * tf.range(batch_size)) - - with tf.device(self.var_cache_device): - result = tf.gather(self.mem_vals, tf.reshape(no_teacher_idxs, [-1])) - - if not output_given: - teacher_loss = None - return result, mask, teacher_loss - - # prepare hints from the teacher on hint pool - teacher_hints = tf.to_float( - tf.abs(tf.expand_dims(intended_output, 1) - hint_pool_mem_vals)) - teacher_hints = 1.0 - tf.minimum(1.0, teacher_hints) - - teacher_vals, teacher_hint_idxs = tf.nn.top_k( - hint_pool_sims * teacher_hints, k=1) - neg_teacher_vals, _ = tf.nn.top_k( - hint_pool_sims * (1 - teacher_hints), k=1) - - # bring back idxs to full memory - teacher_idxs = tf.gather( - tf.reshape(hint_pool_idxs, [-1]), - teacher_hint_idxs[:, 0] + choose_k * tf.range(batch_size)) - - # zero-out teacher_vals if there are no hints - teacher_vals *= ( - 1 - tf.to_float(tf.equal(0.0, tf.reduce_sum(teacher_hints, 1)))) - - # we'll determine whether to do an update to memory based on whether - # memory was queried correctly - sliced_hints = tf.slice(teacher_hints, [0, 0], [-1, self.correct_in_top]) - incorrect_memory_lookup = tf.equal(0.0, tf.reduce_sum(sliced_hints, 1)) - - # loss based on triplet loss - teacher_loss = (tf.nn.relu(neg_teacher_vals - teacher_vals + self.alpha) - - self.alpha) - - # prepare memory updates - update_keys = normalized_query - update_vals = intended_output - - fetched_idxs = teacher_idxs # correctly fetched from memory - with tf.device(self.var_cache_device): - fetched_keys = tf.gather(self.mem_keys, fetched_idxs, name='fetched_keys') - fetched_vals = tf.gather(self.mem_vals, fetched_idxs, name='fetched_vals') - - # do memory updates here - fetched_keys_upd = update_keys + fetched_keys # Momentum-like update - fetched_keys_upd = tf.nn.l2_normalize(fetched_keys_upd, dim=1) - # Randomize age a bit, e.g., to select different ones in parallel workers. - mem_age_with_noise = self.mem_age + tf.random_uniform( - [self.memory_size], - self.age_noise, self.age_noise) - - _, oldest_idxs = tf.nn.top_k(mem_age_with_noise, k=batch_size, sorted=False) - - with tf.control_dependencies([result]): - upd_idxs = tf.where(incorrect_memory_lookup, - oldest_idxs, - fetched_idxs) - # upd_idxs = tf.Print(upd_idxs, [upd_idxs], "UPD IDX", summarize=8) - upd_keys = tf.where(incorrect_memory_lookup, - update_keys, - fetched_keys_upd) - upd_vals = tf.where(incorrect_memory_lookup, - update_vals, - fetched_vals) - - def make_update_op(): - return self.make_update_op(upd_idxs, upd_keys, upd_vals, - batch_size, use_recent_idx, intended_output) - - update_op = tf.cond(self.update_memory, make_update_op, tf.no_op) - - with tf.control_dependencies([update_op]): - result = tf.identity(result) - mask = tf.identity(mask) - teacher_loss = tf.identity(teacher_loss) - - return result, mask, tf.reduce_mean(teacher_loss) - - -class LSHMemory(Memory): - """Memory employing locality sensitive hashing. - - Note: Not fully tested. - """ - - def __init__(self, key_dim, memory_size, vocab_size, - choose_k=256, alpha=0.1, correct_in_top=1, age_noise=8.0, - var_cache_device='', nn_device='', - num_hashes=None, num_libraries=None): - super(LSHMemory, self).__init__( - key_dim, memory_size, vocab_size, - choose_k=choose_k, alpha=alpha, correct_in_top=1, age_noise=age_noise, - var_cache_device=var_cache_device, nn_device=nn_device) - - self.num_libraries = num_libraries or int(self.choose_k ** 0.5) - self.num_per_hash_slot = max(1, self.choose_k // self.num_libraries) - self.num_hashes = (num_hashes or - int(np.log2(self.memory_size / self.num_per_hash_slot))) - self.num_hashes = min(max(self.num_hashes, 1), 20) - self.num_hash_slots = 2 ** self.num_hashes - - # hashing vectors - self.hash_vecs = [ - tf.get_variable( - 'hash_vecs%d' % i, [self.num_hashes, self.key_dim], - dtype=tf.float32, trainable=False, - initializer=tf.truncated_normal_initializer(0, 1)) - for i in xrange(self.num_libraries)] - - # map representing which hash slots map to which mem keys - self.hash_slots = [ - tf.get_variable( - 'hash_slots%d' % i, [self.num_hash_slots, self.num_per_hash_slot], - dtype=tf.int32, trainable=False, - initializer=tf.random_uniform_initializer(maxval=self.memory_size, - dtype=tf.int32)) - for i in xrange(self.num_libraries)] - - def get(self): # not implemented - return self.mem_keys, self.mem_vals, self.mem_age, self.recent_idx - - def set(self, k, v, a, r=None): # not implemented - return tf.group( - self.mem_keys.assign(k), - self.mem_vals.assign(v), - self.mem_age.assign(a), - (self.recent_idx.assign(r) if r is not None else tf.group())) - - def clear(self): - return tf.variables_initializer([self.mem_keys, self.mem_vals, self.mem_age, - self.recent_idx] + self.hash_slots) - - def get_hash_slots(self, query): - """Gets hashed-to buckets for batch of queries. - - Args: - query: 2-d Tensor of query vectors. - - Returns: - A list of hashed-to buckets for each hash function. - """ - - binary_hash = [ - tf.less(tf.matmul(query, self.hash_vecs[i], transpose_b=True), 0) - for i in xrange(self.num_libraries)] - hash_slot_idxs = [ - tf.reduce_sum( - tf.to_int32(binary_hash[i]) * - tf.constant([[2 ** i for i in xrange(self.num_hashes)]], - dtype=tf.int32), 1) - for i in xrange(self.num_libraries)] - return hash_slot_idxs - - def get_hint_pool_idxs(self, normalized_query): - """Get small set of idxs to compute nearest neighbor queries on. - - This is an expensive look-up on the whole memory that is used to - avoid more expensive operations later on. - - Args: - normalized_query: A Tensor of shape [None, key_dim]. - - Returns: - A Tensor of shape [None, choose_k] of indices in memory - that are closest to the queries. - - """ - # get hash of query vecs - hash_slot_idxs = self.get_hash_slots(normalized_query) - - # grab mem idxs in the hash slots - hint_pool_idxs = [ - tf.maximum(tf.minimum( - tf.gather(self.hash_slots[i], idxs), - self.memory_size - 1), 0) - for i, idxs in enumerate(hash_slot_idxs)] - - return tf.concat(axis=1, values=hint_pool_idxs) - - def make_update_op(self, upd_idxs, upd_keys, upd_vals, - batch_size, use_recent_idx, intended_output): - """Function that creates all the update ops.""" - base_update_op = super(LSHMemory, self).make_update_op( - upd_idxs, upd_keys, upd_vals, - batch_size, use_recent_idx, intended_output) - - # compute hash slots to be updated - hash_slot_idxs = self.get_hash_slots(upd_keys) - - # make updates - update_ops = [] - with tf.control_dependencies([base_update_op]): - for i, slot_idxs in enumerate(hash_slot_idxs): - # for each slot, choose which entry to replace - entry_idx = tf.random_uniform([batch_size], - maxval=self.num_per_hash_slot, - dtype=tf.int32) - entry_mul = 1 - tf.one_hot(entry_idx, self.num_per_hash_slot, - dtype=tf.int32) - entry_add = (tf.expand_dims(upd_idxs, 1) * - tf.one_hot(entry_idx, self.num_per_hash_slot, - dtype=tf.int32)) - - mul_op = tf.scatter_mul(self.hash_slots[i], slot_idxs, entry_mul) - with tf.control_dependencies([mul_op]): - add_op = tf.scatter_add(self.hash_slots[i], slot_idxs, entry_add) - update_ops.append(add_op) - - return tf.group(*update_ops) diff --git a/research/learning_to_remember_rare_events/model.py b/research/learning_to_remember_rare_events/model.py deleted file mode 100644 index 7a6b46004..000000000 --- a/research/learning_to_remember_rare_events/model.py +++ /dev/null @@ -1,302 +0,0 @@ -# Copyright 2017 Google Inc. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# ============================================================================== -"""Model using memory component. - -The model embeds images using a standard CNN architecture. -These embeddings are used as keys to the memory component, -which returns nearest neighbors. -""" - -import tensorflow as tf - -import memory - -FLAGS = tf.flags.FLAGS - - -class BasicClassifier(object): - - def __init__(self, output_dim): - self.output_dim = output_dim - - def core_builder(self, memory_val, x, y): - del x, y - y_pred = memory_val - loss = 0.0 - - return loss, y_pred - - -class LeNet(object): - """Standard CNN architecture.""" - - def __init__(self, image_size, num_channels, hidden_dim): - self.image_size = image_size - self.num_channels = num_channels - self.hidden_dim = hidden_dim - self.matrix_init = tf.truncated_normal_initializer(stddev=0.1) - self.vector_init = tf.constant_initializer(0.0) - - def core_builder(self, x): - """Embeds x using standard CNN architecture. - - Args: - x: Batch of images as a 2-d Tensor [batch_size, -1]. - - Returns: - A 2-d Tensor [batch_size, hidden_dim] of embedded images. - """ - - ch1 = 32 * 2 # number of channels in 1st layer - ch2 = 64 * 2 # number of channels in 2nd layer - conv1_weights = tf.get_variable('conv1_w', - [3, 3, self.num_channels, ch1], - initializer=self.matrix_init) - conv1_biases = tf.get_variable('conv1_b', [ch1], - initializer=self.vector_init) - conv1a_weights = tf.get_variable('conv1a_w', - [3, 3, ch1, ch1], - initializer=self.matrix_init) - conv1a_biases = tf.get_variable('conv1a_b', [ch1], - initializer=self.vector_init) - - conv2_weights = tf.get_variable('conv2_w', [3, 3, ch1, ch2], - initializer=self.matrix_init) - conv2_biases = tf.get_variable('conv2_b', [ch2], - initializer=self.vector_init) - conv2a_weights = tf.get_variable('conv2a_w', [3, 3, ch2, ch2], - initializer=self.matrix_init) - conv2a_biases = tf.get_variable('conv2a_b', [ch2], - initializer=self.vector_init) - - # fully connected - fc1_weights = tf.get_variable( - 'fc1_w', [self.image_size // 4 * self.image_size // 4 * ch2, - self.hidden_dim], initializer=self.matrix_init) - fc1_biases = tf.get_variable('fc1_b', [self.hidden_dim], - initializer=self.vector_init) - - # define model - x = tf.reshape(x, - [-1, self.image_size, self.image_size, self.num_channels]) - batch_size = tf.shape(x)[0] - - conv1 = tf.nn.conv2d(x, conv1_weights, - strides=[1, 1, 1, 1], padding='SAME') - relu1 = tf.nn.relu(tf.nn.bias_add(conv1, conv1_biases)) - conv1 = tf.nn.conv2d(relu1, conv1a_weights, - strides=[1, 1, 1, 1], padding='SAME') - relu1 = tf.nn.relu(tf.nn.bias_add(conv1, conv1a_biases)) - - pool1 = tf.nn.max_pool(relu1, ksize=[1, 2, 2, 1], - strides=[1, 2, 2, 1], padding='SAME') - - conv2 = tf.nn.conv2d(pool1, conv2_weights, - strides=[1, 1, 1, 1], padding='SAME') - relu2 = tf.nn.relu(tf.nn.bias_add(conv2, conv2_biases)) - conv2 = tf.nn.conv2d(relu2, conv2a_weights, - strides=[1, 1, 1, 1], padding='SAME') - relu2 = tf.nn.relu(tf.nn.bias_add(conv2, conv2a_biases)) - - pool2 = tf.nn.max_pool(relu2, ksize=[1, 2, 2, 1], - strides=[1, 2, 2, 1], padding='SAME') - - reshape = tf.reshape(pool2, [batch_size, -1]) - hidden = tf.matmul(reshape, fc1_weights) + fc1_biases - - return hidden - - -class Model(object): - """Model for coordinating between CNN embedder and Memory module.""" - - def __init__(self, input_dim, output_dim, rep_dim, memory_size, vocab_size, - learning_rate=0.0001, use_lsh=False): - self.input_dim = input_dim - self.output_dim = output_dim - self.rep_dim = rep_dim - self.memory_size = memory_size - self.vocab_size = vocab_size - self.learning_rate = learning_rate - self.use_lsh = use_lsh - - self.embedder = self.get_embedder() - self.memory = self.get_memory() - self.classifier = self.get_classifier() - - self.global_step = tf.train.get_or_create_global_step() - - def get_embedder(self): - return LeNet(int(self.input_dim ** 0.5), 1, self.rep_dim) - - def get_memory(self): - cls = memory.LSHMemory if self.use_lsh else memory.Memory - return cls(self.rep_dim, self.memory_size, self.vocab_size) - - def get_classifier(self): - return BasicClassifier(self.output_dim) - - def core_builder(self, x, y, keep_prob, use_recent_idx=True): - embeddings = self.embedder.core_builder(x) - if keep_prob < 1.0: - embeddings = tf.nn.dropout(embeddings, keep_prob) - memory_val, _, teacher_loss = self.memory.query( - embeddings, y, use_recent_idx=use_recent_idx) - loss, y_pred = self.classifier.core_builder(memory_val, x, y) - - return loss + teacher_loss, y_pred - - def train(self, x, y): - loss, _ = self.core_builder(x, y, keep_prob=0.3) - gradient_ops = self.training_ops(loss) - return loss, gradient_ops - - def eval(self, x, y): - _, y_preds = self.core_builder(x, y, keep_prob=1.0, - use_recent_idx=False) - return y_preds - - def get_xy_placeholders(self): - return (tf.placeholder(tf.float32, [None, self.input_dim]), - tf.placeholder(tf.int32, [None])) - - def setup(self): - """Sets up all components of the computation graph.""" - - self.x, self.y = self.get_xy_placeholders() - - # This context creates variables - with tf.variable_scope('core', reuse=None): - self.loss, self.gradient_ops = self.train(self.x, self.y) - # And this one re-uses them (thus the `reuse=True`) - with tf.variable_scope('core', reuse=True): - self.y_preds = self.eval(self.x, self.y) - - def training_ops(self, loss): - opt = self.get_optimizer() - params = tf.trainable_variables() - gradients = tf.gradients(loss, params) - clipped_gradients, _ = tf.clip_by_global_norm(gradients, 5.0) - return opt.apply_gradients(zip(clipped_gradients, params), - global_step=self.global_step) - - def get_optimizer(self): - return tf.train.AdamOptimizer(learning_rate=self.learning_rate, - epsilon=1e-4) - - def one_step(self, sess, x, y): - outputs = [self.loss, self.gradient_ops] - return sess.run(outputs, feed_dict={self.x: x, self.y: y}) - - def episode_step(self, sess, x, y, clear_memory=False): - """Performs training steps on episodic input. - - Args: - sess: A Tensorflow Session. - x: A list of batches of images defining the episode. - y: A list of batches of labels corresponding to x. - clear_memory: Whether to clear the memory before the episode. - - Returns: - List of losses the same length as the episode. - """ - - outputs = [self.loss, self.gradient_ops] - - if clear_memory: - self.clear_memory(sess) - - losses = [] - for xx, yy in zip(x, y): - out = sess.run(outputs, feed_dict={self.x: xx, self.y: yy}) - loss = out[0] - losses.append(loss) - - return losses - - def predict(self, sess, x, y=None): - """Predict the labels on a single batch of examples. - - Args: - sess: A Tensorflow Session. - x: A batch of images. - y: The labels for the images in x. - This allows for updating the memory. - - Returns: - Predicted y. - """ - - # Storing current memory state to restore it after prediction - mem_keys, mem_vals, mem_age, _ = self.memory.get() - cur_memory = ( - tf.identity(mem_keys), - tf.identity(mem_vals), - tf.identity(mem_age), - None, - ) - - outputs = [self.y_preds] - if y is None: - ret = sess.run(outputs, feed_dict={self.x: x}) - else: - ret = sess.run(outputs, feed_dict={self.x: x, self.y: y}) - - # Restoring memory state - self.memory.set(*cur_memory) - - return ret - - def episode_predict(self, sess, x, y, clear_memory=False): - """Predict the labels on an episode of examples. - - Args: - sess: A Tensorflow Session. - x: A list of batches of images. - y: A list of labels for the images in x. - This allows for updating the memory. - clear_memory: Whether to clear the memory before the episode. - - Returns: - List of predicted y. - """ - - # Storing current memory state to restore it after prediction - mem_keys, mem_vals, mem_age, _ = self.memory.get() - cur_memory = ( - tf.identity(mem_keys), - tf.identity(mem_vals), - tf.identity(mem_age), - None, - ) - - if clear_memory: - self.clear_memory(sess) - - outputs = [self.y_preds] - y_preds = [] - for xx, yy in zip(x, y): - out = sess.run(outputs, feed_dict={self.x: xx, self.y: yy}) - y_pred = out[0] - y_preds.append(y_pred) - - # Restoring memory state - self.memory.set(*cur_memory) - - return y_preds - - def clear_memory(self, sess): - sess.run([self.memory.clear()]) diff --git a/research/learning_to_remember_rare_events/train.py b/research/learning_to_remember_rare_events/train.py deleted file mode 100644 index c5c6d06b5..000000000 --- a/research/learning_to_remember_rare_events/train.py +++ /dev/null @@ -1,242 +0,0 @@ -# Copyright 2017 Google Inc. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# ============================================================================== -r"""Script for training model. - -Simple command to get up and running: - python train.py --memory_size=8192 \ - --batch_size=16 --validation_length=50 \ - --episode_width=5 --episode_length=30 -""" - -import logging -import os -import random - -import numpy as np -from six.moves import xrange -import tensorflow as tf - -import data_utils -import model - -FLAGS = tf.flags.FLAGS - -tf.flags.DEFINE_integer('rep_dim', 128, - 'dimension of keys to use in memory') -tf.flags.DEFINE_integer('episode_length', 100, 'length of episode') -tf.flags.DEFINE_integer('episode_width', 5, - 'number of distinct labels in a single episode') -tf.flags.DEFINE_integer('memory_size', None, 'number of slots in memory. ' - 'Leave as None to default to episode length') -tf.flags.DEFINE_integer('batch_size', 16, 'batch size') -tf.flags.DEFINE_integer('num_episodes', 100000, 'number of training episodes') -tf.flags.DEFINE_integer('validation_frequency', 20, - 'every so many training episodes, ' - 'assess validation accuracy') -tf.flags.DEFINE_integer('validation_length', 10, - 'number of episodes to use to compute ' - 'validation accuracy') -tf.flags.DEFINE_integer('seed', 888, 'random seed for training sampling') -tf.flags.DEFINE_string('save_dir', '', 'directory to save model to') -tf.flags.DEFINE_bool('use_lsh', False, - 'use locality-sensitive hashing ' - '(NOTE: not fully tested)') - - -class Trainer(object): - """Class that takes care of training, validating, and checkpointing model.""" - - def __init__(self, train_data, valid_data, input_dim, output_dim=None): - self.train_data = train_data - self.valid_data = valid_data - self.input_dim = input_dim - - self.rep_dim = FLAGS.rep_dim - self.episode_length = FLAGS.episode_length - self.episode_width = FLAGS.episode_width - self.batch_size = FLAGS.batch_size - self.memory_size = (self.episode_length * self.batch_size - if FLAGS.memory_size is None else FLAGS.memory_size) - self.use_lsh = FLAGS.use_lsh - - self.output_dim = (output_dim if output_dim is not None - else self.episode_width) - - def get_model(self): - # vocab size is the number of distinct values that - # could go into the memory key-value storage - vocab_size = self.episode_width * self.batch_size - return model.Model( - self.input_dim, self.output_dim, self.rep_dim, self.memory_size, - vocab_size, use_lsh=self.use_lsh) - - def sample_episode_batch(self, data, - episode_length, episode_width, batch_size): - """Generates a random batch for training or validation. - - Structures each element of the batch as an 'episode'. - Each episode contains episode_length examples and - episode_width distinct labels. - - Args: - data: A dictionary mapping label to list of examples. - episode_length: Number of examples in each episode. - episode_width: Distinct number of labels in each episode. - batch_size: Batch size (number of episodes). - - Returns: - A tuple (x, y) where x is a list of batches of examples - with size episode_length and y is a list of batches of labels. - """ - - episodes_x = [[] for _ in xrange(episode_length)] - episodes_y = [[] for _ in xrange(episode_length)] - assert len(data) >= episode_width - keys = data.keys() - for b in xrange(batch_size): - episode_labels = random.sample(keys, episode_width) - remainder = episode_length % episode_width - remainders = [0] * (episode_width - remainder) + [1] * remainder - episode_x = [ - random.sample(data[lab], - r + (episode_length - remainder) // episode_width) - for lab, r in zip(episode_labels, remainders)] - episode = sum([[(x, i, ii) for ii, x in enumerate(xx)] - for i, xx in enumerate(episode_x)], []) - random.shuffle(episode) - # Arrange episode so that each distinct label is seen before moving to - # 2nd showing - episode.sort(key=lambda elem: elem[2]) - assert len(episode) == episode_length - for i in xrange(episode_length): - episodes_x[i].append(episode[i][0]) - episodes_y[i].append(episode[i][1] + b * episode_width) - - return ([np.array(xx).astype('float32') for xx in episodes_x], - [np.array(yy).astype('int32') for yy in episodes_y]) - - def compute_correct(self, ys, y_preds): - return np.mean(np.equal(y_preds, np.array(ys))) - - def individual_compute_correct(self, y, y_pred): - return y_pred == y - - def run(self): - """Performs training. - - Trains a model using episodic training. - Every so often, runs some evaluations on validation data. - """ - - train_data, valid_data = self.train_data, self.valid_data - input_dim, output_dim = self.input_dim, self.output_dim - rep_dim, episode_length = self.rep_dim, self.episode_length - episode_width, memory_size = self.episode_width, self.memory_size - batch_size = self.batch_size - - train_size = len(train_data) - valid_size = len(valid_data) - logging.info('train_size (number of labels) %d', train_size) - logging.info('valid_size (number of labels) %d', valid_size) - logging.info('input_dim %d', input_dim) - logging.info('output_dim %d', output_dim) - logging.info('rep_dim %d', rep_dim) - logging.info('episode_length %d', episode_length) - logging.info('episode_width %d', episode_width) - logging.info('memory_size %d', memory_size) - logging.info('batch_size %d', batch_size) - - assert all(len(v) >= float(episode_length) / episode_width - for v in train_data.values()) - assert all(len(v) >= float(episode_length) / episode_width - for v in valid_data.values()) - - output_dim = episode_width - self.model = self.get_model() - self.model.setup() - - sess = tf.Session() - sess.run(tf.global_variables_initializer()) - - saver = tf.train.Saver(max_to_keep=10) - ckpt = None - if FLAGS.save_dir: - ckpt = tf.train.get_checkpoint_state(FLAGS.save_dir) - if ckpt and ckpt.model_checkpoint_path: - logging.info('restoring from %s', ckpt.model_checkpoint_path) - saver.restore(sess, ckpt.model_checkpoint_path) - - logging.info('starting now') - losses = [] - random.seed(FLAGS.seed) - np.random.seed(FLAGS.seed) - for i in xrange(FLAGS.num_episodes): - x, y = self.sample_episode_batch( - train_data, episode_length, episode_width, batch_size) - outputs = self.model.episode_step(sess, x, y, clear_memory=True) - loss = outputs - losses.append(loss) - - if i % FLAGS.validation_frequency == 0: - logging.info('episode batch %d, avg train loss %f', - i, np.mean(losses)) - losses = [] - - # validation - correct = [] - num_shots = episode_length // episode_width - correct_by_shot = dict((k, []) for k in xrange(num_shots)) - for _ in xrange(FLAGS.validation_length): - x, y = self.sample_episode_batch( - valid_data, episode_length, episode_width, 1) - outputs = self.model.episode_predict( - sess, x, y, clear_memory=True) - y_preds = outputs - correct.append(self.compute_correct(np.array(y), y_preds)) - - # compute per-shot accuracies - seen_counts = [0] * episode_width - # loop over episode steps - for yy, yy_preds in zip(y, y_preds): - # loop over batch examples - yyy, yyy_preds = int(yy[0]), int(yy_preds[0]) - count = seen_counts[yyy % episode_width] - if count in correct_by_shot: - correct_by_shot[count].append( - self.individual_compute_correct(yyy, yyy_preds)) - seen_counts[yyy % episode_width] = count + 1 - - logging.info('validation overall accuracy %f', np.mean(correct)) - logging.info('%d-shot: %.3f, ' * num_shots, - *sum([[k, np.mean(correct_by_shot[k])] - for k in xrange(num_shots)], [])) - - if saver and FLAGS.save_dir: - saved_file = saver.save(sess, - os.path.join(FLAGS.save_dir, 'model.ckpt'), - global_step=self.model.global_step) - logging.info('saved model to %s', saved_file) - - -def main(unused_argv): - train_data, valid_data = data_utils.get_data() - trainer = Trainer(train_data, valid_data, data_utils.IMAGE_NEW_SIZE ** 2) - trainer.run() - - -if __name__ == '__main__': - logging.basicConfig(level=logging.INFO) - tf.app.run() diff --git a/research/learning_unsupervised_learning/.gitignore b/research/learning_unsupervised_learning/.gitignore deleted file mode 100644 index 0d20b6487..000000000 --- a/research/learning_unsupervised_learning/.gitignore +++ /dev/null @@ -1 +0,0 @@ -*.pyc diff --git a/research/learning_unsupervised_learning/README.md b/research/learning_unsupervised_learning/README.md deleted file mode 100644 index 0e38717f5..000000000 --- a/research/learning_unsupervised_learning/README.md +++ /dev/null @@ -1,40 +0,0 @@ -![No Maintenance Intended](https://img.shields.io/badge/No%20Maintenance%20Intended-%E2%9C%95-red.svg) -![TensorFlow Requirement: 1.x](https://img.shields.io/badge/TensorFlow%20Requirement-1.x-brightgreen) -![TensorFlow 2 Not Supported](https://img.shields.io/badge/TensorFlow%202%20Not%20Supported-%E2%9C%95-red.svg) - -# Learning Unsupervised Learning Rules -This repository contains code and weights for the learned update rule -presented in "Learning Unsupervised Learning Rules." At this time, this -code can not meta-train the update rule. - -### Structure -`run_eval.py` contains the main training loop. This constructs an op -that runs one iteration of the learned update rule and assigns the -results to variables. Additionally, it loads the weights from our -pre-trained model. - -The base model and the update rule architecture definition can be found in -`architectures/more_local_weight_update.py`. For a complete description -of the model, see our [paper](https://arxiv.org/abs/1804.00222). - -### Dependencies -[absl]([https://github.com/abseil/abseil-py), [tensorflow](https://tensorflow.org), [sonnet](https://github.com/deepmind/sonnet) - -### Usage - -First, download the [pre-trained optimizer model weights](https://storage.googleapis.com/learning_unsupervised_learning/200_tf_graph.zip) and extract it. - -```bash -# move to the folder above this folder -cd path_to/research/learning_unsupervised_learning/../ - -# launch the eval script -python -m learning_unsupervised_learning.run_eval \ ---train_log_dir="/tmp/learning_unsupervised_learning" \ ---checkpoint_dir="/path/to/downloaded/model/tf_graph_data.ckpt" -``` - -### Contact -Luke Metz, Niru Maheswaranathan, Github: @lukemetz, @nirum. Email: {lmetz, nirum}@google.com - - diff --git a/research/learning_unsupervised_learning/__init__.py b/research/learning_unsupervised_learning/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/research/learning_unsupervised_learning/architectures/__init__.py b/research/learning_unsupervised_learning/architectures/__init__.py deleted file mode 100644 index af9545f26..000000000 --- a/research/learning_unsupervised_learning/architectures/__init__.py +++ /dev/null @@ -1,17 +0,0 @@ -# Copyright 2018 Google, Inc. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - - -import more_local_weight_update diff --git a/research/learning_unsupervised_learning/architectures/common.py b/research/learning_unsupervised_learning/architectures/common.py deleted file mode 100644 index 43a2d4f89..000000000 --- a/research/learning_unsupervised_learning/architectures/common.py +++ /dev/null @@ -1,153 +0,0 @@ -# Copyright 2018 Google, Inc. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - - -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import sonnet as snt -import tensorflow as tf -import numpy as np -import collections -from learning_unsupervised_learning import utils - -from tensorflow.python.util import nest - -from learning_unsupervised_learning import variable_replace - - -class LinearBatchNorm(snt.AbstractModule): - """Module that does a Linear layer then a BatchNorm followed by an activation fn""" - def __init__(self, size, activation_fn=tf.nn.relu, name="LinearBatchNorm"): - self.size = size - self.activation_fn = activation_fn - super(LinearBatchNorm, self).__init__(name=name) - - def _build(self, x): - x = tf.to_float(x) - initializers={"w": tf.truncated_normal_initializer(stddev=0.01)} - lin = snt.Linear(self.size, use_bias=False, initializers=initializers) - z = lin(x) - - scale = tf.constant(1., dtype=tf.float32) - offset = tf.get_variable( - "b", - shape=[1, z.shape.as_list()[1]], - initializer=tf.truncated_normal_initializer(stddev=0.1), - dtype=tf.float32 - ) - - mean, var = tf.nn.moments(z, [0], keep_dims=True) - z = ((z - mean) * tf.rsqrt(var + 1e-6)) * scale + offset - - x_p = self.activation_fn(z) - - return z, x_p - - # This needs to work by string name sadly due to how the variable replace - # works and would also work even if the custom getter approuch was used. - # This is verbose, but it should atleast be clear as to what is going on. - # TODO(lmetz) a better way to do this (the next 3 functions: - # _raw_name, w(), b() ) - def _raw_name(self, var_name): - """Return just the name of the variable, not the scopes.""" - return var_name.split("/")[-1].split(":")[0] - - - @property - def w(self): - var_list = snt.get_variables_in_module(self) - w = [x for x in var_list if self._raw_name(x.name) == "w"] - assert len(w) == 1 - return w[0] - - @property - def b(self): - var_list = snt.get_variables_in_module(self) - b = [x for x in var_list if self._raw_name(x.name) == "b"] - assert len(b) == 1 - return b[0] - - - -class Linear(snt.AbstractModule): - def __init__(self, size, use_bias=True, init_const_mag=True): - self.size = size - self.use_bias = use_bias - self.init_const_mag = init_const_mag - super(Linear, self).__init__(name="commonLinear") - - def _build(self, x): - if self.init_const_mag: - initializers={"w": tf.truncated_normal_initializer(stddev=0.01)} - else: - initializers={} - lin = snt.Linear(self.size, use_bias=self.use_bias, initializers=initializers) - z = lin(x) - return z - - # This needs to work by string name sadly due to how the variable replace - # works and would also work even if the custom getter approuch was used. - # This is verbose, but it should atleast be clear as to what is going on. - # TODO(lmetz) a better way to do this (the next 3 functions: - # _raw_name, w(), b() ) - def _raw_name(self, var_name): - """Return just the name of the variable, not the scopes.""" - return var_name.split("/")[-1].split(":")[0] - - @property - def w(self): - var_list = snt.get_variables_in_module(self) - if self.use_bias: - assert len(var_list) == 2, "Found not 2 but %d" % len(var_list) - else: - assert len(var_list) == 1, "Found not 1 but %d" % len(var_list) - w = [x for x in var_list if self._raw_name(x.name) == "w"] - assert len(w) == 1 - return w[0] - - @property - def b(self): - var_list = snt.get_variables_in_module(self) - assert len(var_list) == 2, "Found not 2 but %d" % len(var_list) - b = [x for x in var_list if self._raw_name(x.name) == "b"] - assert len(b) == 1 - return b[0] - - -def transformer_at_state(base_model, new_variables): - """Get the base_model that has been transformed to use the variables - in final_state. - Args: - base_model: snt.Module - Goes from batch to features - new_variables: list - New list of variables to use - Returns: - func: callable of same api as base_model. - """ - assert not variable_replace.in_variable_replace_scope() - - def _feature_transformer(input_data): - """Feature transformer at the end of training.""" - initial_variables = base_model.get_variables() - replacement = collections.OrderedDict( - utils.eqzip(initial_variables, new_variables)) - with variable_replace.variable_replace(replacement): - features = base_model(input_data) - return features - - return _feature_transformer diff --git a/research/learning_unsupervised_learning/architectures/more_local_weight_update.py b/research/learning_unsupervised_learning/architectures/more_local_weight_update.py deleted file mode 100644 index 117549af0..000000000 --- a/research/learning_unsupervised_learning/architectures/more_local_weight_update.py +++ /dev/null @@ -1,861 +0,0 @@ -# Copyright 2018 Google, Inc. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import collections -import numpy as np -import sonnet as snt -import tensorflow as tf - -from learning_unsupervised_learning.architectures import common -from learning_unsupervised_learning import optimizers -from learning_unsupervised_learning import utils -from learning_unsupervised_learning import summary_utils - -OptState = collections.namedtuple('OptState', - ['variables', 'opt_state', 'index']) - -BaseModelOutputs = collections.namedtuple( - 'BaseModelOutputs', ['xs', 'zs', 'mods', 'batch', 'backward_mods']) - - -class GradChannelReadout(snt.AbstractModule): - """Perform a linear readout and reshape from input 3 tensor.""" - - def __init__(self, - num_grad_channels, - device, - perm=(2, 0, 1), - name='GradChannelReadout'): - """Args: - - num_grad_channels: int - number of channels to readout to. - device: str or callable - devicwe to place weights. - perm: list or tuple - transpose applied. - """ - - self.num_grad_channels = num_grad_channels - self.device = device - self.perm = perm - super(GradChannelReadout, self).__init__(name=name) - - def _build(self, h): - with tf.device(self.device): - mod = snt.Linear(self.num_grad_channels) - ret = snt.BatchApply(mod)(h) - # return as [num_grad_channels] x [bs] x [num units] - return tf.transpose(ret, perm=self.perm) - - -def get_weight_stats(x, axis): - """ Compute weight statistics over the given axis. - - Args: - x: tf.Tensor - a batch of activations. - axis: int - axis to perform statistics over. - Returns: - tf.Tensor - a 3-D tensor with statistics. - """ - if x is None: - return [] - - stats = [] - l1 = tf.reduce_mean(tf.abs(x), axis=axis) - l2 = tf.sqrt(tf.reduce_mean(x**2, axis=axis) + 1e-6) - - mean, var = tf.nn.moments(x, [axis]) - stats.extend([l1, l2, mean, tf.sqrt(var + 1e-8)]) - - stats = [tf.reshape(s, [-1, 1, 1]) for s in stats] - - return stats - - -class AddUnitBatchStatistics(snt.AbstractModule): - """Compute some number of statistics over units and concat them on.""" - - def __init__(self, name='AddUnitBatchStatistics'): - super(AddUnitBatchStatistics, self).__init__(name=name) - - def _build(self, x): - # [channel, bs, 1] - output = x - for d in [0, 1]: - stats = [] - l1 = tf.reduce_mean(tf.abs(x), axis=d, keepdims=True) - l2 = tf.sqrt(tf.reduce_mean(x**2, axis=d, keepdims=True) + 1e-6) - - mean, var = tf.nn.moments(x, [d], keepdims=True) - stats.extend([l1, l2, mean, tf.sqrt(var + 1e-8)]) - - to_add = tf.concat(stats, axis=2) # [channels/1, units/1, stats] - output += snt.BatchApply(snt.Linear(x.shape.as_list()[2]))(to_add) - return output - - -class ConcatUnitConv(snt.AbstractModule): - """Do a small number of convolutions over units and concat / add them on.""" - - def __init__(self, add=True): - self.add = add - super(ConcatUnitConv, self).__init__(name='ConcatUnitConv') - - def _build(self, x): - # x is [units, bs, 1] - net = tf.transpose(x, [1, 0, 2]) # now [bs x units x 1] - channels = x.shape.as_list()[2] - mod = snt.Conv1D(output_channels=channels, kernel_shape=[3]) - net = mod(net) - net = snt.BatchNorm(axis=[0, 1])(net, is_training=False) - net = tf.nn.relu(net) - mod = snt.Conv1D(output_channels=channels, kernel_shape=[3]) - net = mod(net) - net = snt.BatchNorm(axis=[0, 1])(net, is_training=False) - net = tf.nn.relu(net) - to_concat = tf.transpose(net, [1, 0, 2]) - if self.add: - return x + to_concat - else: - return tf.concat([x, to_concat], 2) - - -class MoreLocalWeightUpdateProcess(snt.AbstractModule): - - def __init__( - self, - remote_device, - local_device, - top_delta_size=64, - top_delta_layers=2, - compute_h_size=64, - compute_h_layers=1, - delta_dim=32, - num_grad_channels=4, - normalize_epsilon=1., - ): - self.local_device = local_device - self.remote_device = remote_device - self.top_delta_size = top_delta_size - self.top_delta_layers = top_delta_layers - self.compute_h_size = compute_h_size - self.compute_h_layers = compute_h_layers - self.delta_dim = delta_dim - self.num_grad_channels = num_grad_channels - self.normalize_epsilon = normalize_epsilon, - - with tf.device(local_device): - self.opt = optimizers.UnrollableGradientDescentRollingOptimizer( - learning_rate=1e-4) - - # lazily initialized for readouts - self.readout_mods = {} - - super(MoreLocalWeightUpdateProcess, - self).__init__(name='MoreLocalWeightUpdateProcess') - - with tf.device(remote_device): - self() - - def normalize(self, change_w, normalize_epsilon=None): - if normalize_epsilon is None: - normalize_epsilon = self.normalize_epsilon - - # normalize the weights per receptive-field, rather than per-matrix - var = tf.reduce_mean(tf.square(change_w), axis=0, keepdims=True) - change_w = (change_w) / tf.sqrt(normalize_epsilon + var) - return change_w - - def _build(self): - pass - - @snt.reuse_variables - def compute_top_delta(self, z): - """ parameterization of topD. This converts the top level activation - to an error signal. - Args: - z: tf.Tensor - batch of final layer post activations - Returns - delta: tf.Tensor - the error signal - """ - s_idx = 0 - with tf.variable_scope('compute_top_delta'), tf.device(self.remote_device): - # typically this takes [BS, length, input_channels], - # We are applying this such that we convolve over the batch dimension. - act = tf.expand_dims(tf.transpose(z, [1, 0]), 2) # [channels, BS, 1] - - mod = snt.Conv1D(output_channels=self.top_delta_size, kernel_shape=[5]) - act = mod(act) - - act = snt.BatchNorm(axis=[0, 1])(act, is_training=False) - act = tf.nn.relu(act) - - bs = act.shape.as_list()[0] - act = tf.transpose(act, [2, 1, 0]) - act = snt.Conv1D(output_channels=bs, kernel_shape=[3])(act) - act = snt.BatchNorm(axis=[0, 1])(act, is_training=False) - act = tf.nn.relu(act) - act = snt.Conv1D(output_channels=bs, kernel_shape=[3])(act) - act = snt.BatchNorm(axis=[0, 1])(act, is_training=False) - act = tf.nn.relu(act) - act = tf.transpose(act, [2, 1, 0]) - - prev_act = act - for i in range(self.top_delta_layers): - mod = snt.Conv1D(output_channels=self.top_delta_size, kernel_shape=[3]) - act = mod(act) - - act = snt.BatchNorm(axis=[0, 1])(act, is_training=False) - act = tf.nn.relu(act) - - prev_act = act - - mod = snt.Conv1D(output_channels=self.delta_dim, kernel_shape=[3]) - act = mod(act) - - # [bs, feature_channels, delta_channels] - act = tf.transpose(act, [1, 0, 2]) - return act - - @snt.reuse_variables - def compute_h(self, - x, - z, - d, - bias, - W_bot, - W_top, - compute_perc=1.0, - compute_units=None): - """z = [BS, n_units] a = [BS, n_units] b = [BS, n_units] d = [BS, n_units, delta_channels] - - """ - - s_idx = 0 - if compute_perc != 1.0: - assert compute_units is None - - with tf.device(self.remote_device): - inp_feat = [x, z] - inp_feat = [tf.transpose(f, [1, 0]) for f in inp_feat] - - units = x.shape.as_list()[1] - bs = x.shape.as_list()[0] - - # add unit ID, to help the network differentiate units - id_theta = tf.linspace(0., (4) * np.pi, units) - assert bs is not None - id_theta_bs = tf.reshape(id_theta, [-1, 1]) * tf.ones([1, bs]) - inp_feat += [tf.sin(id_theta_bs), tf.cos(id_theta_bs)] - - # list of [units, BS, 1] - inp_feat = [tf.expand_dims(f, 2) for f in inp_feat] - - d_trans = tf.transpose(d, [1, 0, 2]) - - if compute_perc != 1.0: - compute_units = int(compute_perc * inp_feat.shape.as_list()[0]) - - # add weight matrix statistics, both from above and below - w_stats_bot = get_weight_stats(W_bot, 0) - w_stats_top = get_weight_stats(W_top, 1) - w_stats = w_stats_bot + w_stats_top - if W_bot is None or W_top is None: - # if it's an edge layer (top or bottom), just duplicate the stats for - # the weight matrix that does exist - w_stats = w_stats + w_stats - w_stats = [tf.ones([1, x.shape[0], 1]) * ww for ww in w_stats] - # w_stats is a list, with entries with shape UNITS x 1 x channels - - if compute_units is None: - inp_feat_in = inp_feat - d_trans_in = d_trans - w_stats_in = w_stats - bias_in = tf.transpose(bias) - else: - # only run on a subset of the activations. - mask = tf.random_uniform( - minval=0, - maxval=1, - dtype=tf.float32, - shape=inp_feat[0].shape.as_list()[0:1]) - _, ind = tf.nn.top_k(mask, k=compute_units) - ind = tf.reshape(ind, [-1, 1]) - - inp_feat_in = [tf.gather_nd(xx, ind) for xx in inp_feat] - w_stats_in = [tf.gather_nd(xx, ind) for xx in w_stats] - d_trans_in = tf.gather_nd(d_trans, ind) - bias_in = tf.gather_nd(tf.transpose(bias), ind) - - w_stats_in = tf.concat(w_stats_in, 2) - w_stats_in_norm = w_stats_in * tf.rsqrt( - tf.reduce_mean(w_stats_in**2) + 1e-6) - - act = tf.concat(inp_feat_in + [d_trans_in], 2) - act = snt.BatchNorm(axis=[0, 1])(act, is_training=True) - - bias_dense = tf.reshape(bias_in, [-1, 1, 1]) * tf.ones([1, bs, 1]) - act = tf.concat([w_stats_in_norm, bias_dense, act], 2) - - mod = snt.Conv1D(output_channels=self.compute_h_size, kernel_shape=[3]) - act = mod(act) - - act = snt.BatchNorm(axis=[0, 1])(act, is_training=True) - act = tf.nn.relu(act) - - act2 = ConcatUnitConv()(act) - act = act2 - - prev_act = act - for i in range(self.compute_h_layers): - mod = snt.Conv1D(output_channels=self.compute_h_size, kernel_shape=[3]) - act = mod(act) - - act = snt.BatchNorm(axis=[0, 1])(act, is_training=True) - act = tf.nn.relu(act) - - act = ConcatUnitConv()(act) - - prev_act = act - - h = act - if compute_units is not None: - shape = inp_feat[0].shape.as_list()[:1] + h.shape.as_list()[1:] - h = tf.scatter_nd(ind, h, shape=shape) - - h = tf.transpose(h, [1, 0, 2]) # [bs, units, channels] - - return h - - ## wrappers to allow forward and backward to have different variables - @snt.reuse_variables - def merge_change_w_forward(self, change_w_terms, global_prefix='', prefix=''): - return self.merge_change_w( - change_w_terms, global_prefix=global_prefix, prefix=prefix) - - @snt.reuse_variables - def merge_change_w_backward(self, change_w_terms, global_prefix='', - prefix=''): - return self.merge_change_w( - change_w_terms, global_prefix=global_prefix, prefix=prefix) - - def merge_change_w(self, change_w_terms, global_prefix='', prefix=''): - with tf.device( - self.remote_device), tf.name_scope(global_prefix + '_merge_change_w'): - w_base = change_w_terms['w_base'] - - for kk in sorted(change_w_terms.keys()): - name = global_prefix + 'change_w_plane_%s' % kk - delta_w = change_w_terms[kk] - mean, var = tf.nn.moments(delta_w, [0, 1]) - root_mean_square = tf.sqrt(tf.reduce_mean(delta_w**2) + 1e-6) - - for kk in sorted(change_w_terms.keys()): - change_w_terms[kk] = self.normalize(change_w_terms[kk]) - - initializers = { - 'w': tf.constant_initializer(0.1), - 'b': tf.zeros_initializer() - } - mod = snt.Linear( - 1, - name=global_prefix + '_weight_readout_coeffs', - initializers=initializers) - - change_w_terms_list = [ - change_w_terms[kk] for kk in sorted(change_w_terms.keys()) - ] - stack_terms = tf.stack(change_w_terms_list, axis=-1) - change_w = tf.squeeze( - snt.BatchApply(mod)(stack_terms), axis=-1) / len(change_w_terms) - - # only allow perpendicular updates, or updates which grow length. don't - # allow length to decay towards zero. - ip = tf.reduce_mean(change_w * w_base) - # zero out any updates that shrink length - ip = tf.nn.relu(ip) - change_w -= w_base * ip - change_w /= tf.sqrt(len(change_w_terms) * 1.) - - change_w = self.normalize(change_w) - - # encourage the receptive field to not collapse to 0 - change_w -= w_base / 7. # This is an arbitrary scale choice - - return tf.identity(change_w) - - @snt.reuse_variables - def bias_readout(self, h): - with tf.device(self.remote_device): - mod = snt.Linear(1, name='bias_readout') - ret = snt.BatchApply(mod)(h) - return tf.squeeze(ret, 2) - - @snt.reuse_variables - def next_delta(self, z, h, d): - with tf.device(self.remote_device): - return d * tf.expand_dims(tf.nn.sigmoid(z), 2) + self.to_delta_size(h) - - @utils.create_variables_in_class_scope - def get_readout_mod(self, name): - if name not in self.readout_mods: - self.readout_mods[name] = GradChannelReadout( - self.num_grad_channels, device=self.remote_device, name=name) - - return self.readout_mods[name] - - @utils.create_variables_in_class_scope - def low_rank_readout(self, name, h1, h2, psd=False): - BS = h1.shape.as_list()[0] - r_t = self.get_readout_mod(name + '_top')(h1) - if psd: - r_b = r_t - else: - r_b = self.get_readout_mod(name + '_bottom')(h2) - return tf.reduce_mean(tf.matmul(r_b, r_t, transpose_a=True), axis=0) / BS - - @snt.reuse_variables - def to_delta_size(self, h): - with tf.device(self.remote_device): - mod = snt.Linear(self.delta_dim) - return snt.BatchApply(mod)(h) - - @snt.reuse_variables - def initial_state(self, variables): - """The inner optimization state. - - Args: - variables: list of tf.Variable - list of variables to get the initial state of. - Returns: - opt_state: OptState - """ - - with tf.device(self.local_device): - initial_opt_state = self.opt.get_state(variables) - - return OptState( - variables=variables, opt_state=initial_opt_state, index=tf.constant(0)) - - @snt.reuse_variables - def compute_next_state(self, grads, learning_rate, cur_state, - cur_transformer): - - summaries = [] - with tf.device(self.local_device): - with tf.control_dependencies(summaries): - new_vars, new_state = self.opt.compute_updates( - cur_state.variables, grads, learning_rate, cur_state.opt_state) - pass - - return OptState( - variables=tuple(new_vars), - opt_state=new_state, - index=cur_state.index + 1) - - def assign_state(self, base_model, next_state): - var_ups = [ - v.assign(nv) for v, nv in utils.eqzip(base_model.get_variables(), - next_state.variables) - ] - - opt_ups = self.opt.assign_state(next_state.opt_state) - - return tf.group(opt_ups, *var_ups) - - def local_variables(self): - return list(self.opt.get_variables()) - - def remote_variables(self): - train = list( - snt.get_variables_in_module(self, tf.GraphKeys.TRAINABLE_VARIABLES)) - train += list( - snt.get_variables_in_module(self, - tf.GraphKeys.MOVING_AVERAGE_VARIABLES)) - return train - - -class MoreLocalWeightUpdateWLearner(snt.AbstractModule): - """The BaseModel that the UnsupervisedUpdateRule acts on. - """ - - def __init__(self, - remote_device, - local_device, - inner_size=128, - output_size=32, - n_layers=4, - shuffle_input=True, - activation_fn=tf.nn.relu, - identical_updates=True, - **kwargs): - self.local_device = local_device - self.remote_device = remote_device - self.inner_size = inner_size - self.n_layers = n_layers - self.shuffle_input = shuffle_input - self.activation_fn = activation_fn - self.identical_updates = identical_updates - - self.output_size = output_size - if output_size == None: - self.output_size = inner_size - - self.shuffle_ind = None - - super(MoreLocalWeightUpdateWLearner, self).__init__( - name='LocalWeightUpdateWLearner', **kwargs) - - @snt.reuse_variables - def get_shuffle_ind(self, size): - if self.shuffle_ind is None: - # put the shuffle in tf memory to make the eval jobs - # re-entrant. - shuffle_ind_val = np.random.permutation(size) - shuffle_ind = tf.get_variable( - name='shuffle_ind', dtype=tf.int64, initializer=shuffle_ind_val) - unshuffle_ind = tf.scatter_nd( - tf.reshape(shuffle_ind, [-1, 1]), tf.range(size), [size]) - - return shuffle_ind, unshuffle_ind - - def _build(self, batch): - image = batch.image - x0 = snt.BatchFlatten()(image) - if self.shuffle_input: - size = x0.shape.as_list()[1] - shuffle_ind, unshuffle_ind = self.get_shuffle_ind(size) - x0 = tf.gather(x0, shuffle_ind, axis=1) - - xs = [x0] - mods = [] - zs = [] - init = {} - - for i in range(self.n_layers): - mod = common.LinearBatchNorm( - self.inner_size, activation_fn=self.activation_fn) - z, x = mod(xs[i]) - xs.append(x) - zs.append(z) - mods.append(mod) - - mod = common.LinearBatchNorm( - self.output_size, activation_fn=self.activation_fn) - z, x = mod(xs[-1]) - mods.append(mod) - - xs.append(x) - zs.append(z) - - embedding_x = xs[-1] - - # make a random set of backward mods - backward_mods = [] - for i, (x, x_p1) in enumerate(zip(xs[0:-1], xs[1:])): - m = common.LinearBatchNorm( - x_p1.shape.as_list()[1], activation_fn=tf.identity) - _ = m(x) - backward_mods.append(m) - - shape = image.shape.as_list()[1:4] - - for mods_p, prefix in [(mods, 'forward'), (backward_mods, 'backward')]: - if self.shuffle_input: - unshuf_w = tf.gather(mods_p[0].w, unshuffle_ind, axis=0) - else: - unshuf_w = mods_p[0].w - img = summary_utils.first_layer_weight_image(unshuf_w, shape) - tf.summary.image(prefix + '_w0_receptive_field', img) - - for i, m in enumerate(mods_p[0:]): - img = summary_utils.inner_layer_weight_image(m.w) - tf.summary.image(prefix + '_w%d' % (i + 1), img) - - img = summary_utils.sorted_images(image, batch.label_onehot) - tf.summary.image('inputs', img) - - # log out pre-activations and activations - for all_vis, base_name in [(xs, 'x'), (zs, 'z')]: - for i, x_vis in enumerate(all_vis): - img = summary_utils.activation_image(x_vis, batch.label_onehot) - tf.summary.image('%s%d' % (base_name, i), img) - - embedding_x = tf.identity(embedding_x) - - outputs = BaseModelOutputs( - xs=xs, zs=zs, mods=mods, batch=batch, backward_mods=backward_mods) - - return embedding_x, outputs - - def compute_next_h_d(self, meta_opt, w_bot, w_top, bias, x, z, d, backward_w): - """ Propogate error back down the network while computing hidden state. - """ - if z is None: - z = x - - h = meta_opt.compute_h(x, z, d, bias, w_bot, - w_top) # [bs x 60 x h_channels] - - # compute the next d - delta = meta_opt.next_delta(z, h, d) - - if backward_w is not None: - - def delta_matmul(w, delta): - d = tf.transpose(delta, [0, 2, 1]) # [bs x delta_channels x n_units) - d = snt.BatchApply(lambda x: tf.matmul(x, w, transpose_b=True))(d) - d = tf.transpose(d, [0, 2, 1]) - return d - - # replace the "backward pass" with a random matrix. - d = delta_matmul(backward_w, delta) # [bs x 60 x delta_channels] - var = tf.reduce_mean(tf.square(d), [2], keepdims=True) - d = d * tf.rsqrt(1e-6 + var) - - return h, d - - def weight_change_for_layer(self, meta_opt, l_idx, w_base, b_base, upper_h, - lower_h, upper_x, lower_x, prefix, include_bias): - """Compute the change in weights for each layer. - This computes something roughly analagous to a gradient. - """ - reduce_upper_h = upper_h - reduce_lower_h = lower_h - - BS = lower_x.shape.as_list()[0] - - change_w_terms = dict() - - # initial weight value normalized - # normalize the weights per receptive-field, rather than per-matrix - weight_scale = tf.rsqrt( - tf.reduce_mean(w_base**2, axis=0, keepdims=True) + 1e-6) - w_base *= weight_scale - - change_w_terms['w_base'] = w_base - - # this will act to decay larger weights towards zero - change_w_terms['large_decay'] = w_base**2 * tf.sign(w_base) - - # term based on activations - ux0 = upper_x - tf.reduce_mean(upper_x, axis=0, keepdims=True) - uxs0 = ux0 * tf.rsqrt(tf.reduce_mean(ux0**2, axis=0, keepdims=True) + 1e-6) - change_U = tf.matmul(uxs0, uxs0, transpose_a=True) / BS - change_U /= tf.sqrt(float(change_U.shape.as_list()[0])) - - cw = tf.matmul(w_base, change_U) - cw_scale = tf.rsqrt(tf.reduce_mean(cw**2 + 1e-8)) - cw *= cw_scale - change_w_terms['decorr_x'] = cw - - # hebbian term - lx0 = lower_x - tf.reduce_mean(lower_x, axis=0, keepdims=True) - lxs0 = lx0 * tf.rsqrt(tf.reduce_mean(lx0**2, axis=0, keepdims=True) + 1e-6) - cw = tf.matmul(lxs0, uxs0, transpose_a=True) / BS - change_w_terms['hebb'] = -cw - - # 0th order term - w_term = meta_opt.low_rank_readout(prefix + 'weight_readout_0', upper_h, - lower_h) - change_w_terms['0_order'] = w_term - - # # rbf term (weight update scaled by distance from 0) - w_term = meta_opt.low_rank_readout(prefix + 'weight_readout_rbf', - reduce_upper_h, reduce_lower_h) - change_w_terms['rbf'] = tf.exp(-w_base**2) * w_term - - # 1st order term (weight dependent update to weights) - w_term = meta_opt.low_rank_readout(prefix + 'weight_readout_1', - reduce_upper_h, reduce_lower_h) - change_w_terms['1_order'] = w_base * w_term - - # more terms based on single layer readouts. - for update_type in ['lin', 'sqr']: - for h_source, h_source_name in [(reduce_upper_h, 'upper'), - (reduce_lower_h, 'lower')]: - structures = ['symm'] - if update_type == 'lin' and h_source_name == 'upper': - structures += ['psd'] - for structure in structures: - name = update_type + '_' + h_source_name + '_' + structure - if structure == 'symm': - change_U = meta_opt.low_rank_readout(prefix + name, h_source, - h_source) - change_U = (change_U + tf.transpose(change_U)) / tf.sqrt(2.) - change_U = tf.matrix_set_diag(change_U, - tf.zeros( - [change_U.shape.as_list()[0]])) - elif structure == 'psd': - change_U = meta_opt.low_rank_readout( - prefix + name, h_source, None, psd=True) - else: - assert False - change_U /= tf.sqrt(float(change_U.shape.as_list()[0])) - - if update_type == 'lin': - sign_multiplier = tf.ones_like(w_base) - w_base_l = w_base - elif update_type == 'sqr': - sign_multiplier = tf.sign(w_base) - w_base_l = tf.sqrt(1. + w_base**2) - 1. - - if h_source_name == 'upper': - cw = tf.matmul(w_base_l, change_U) # [N^l-1 x N^l] - elif h_source_name == 'lower': - cw = tf.matmul(change_U, w_base_l) - change_w_terms[name] = cw * sign_multiplier - - - if prefix == 'forward': - change_w = meta_opt.merge_change_w_forward( - change_w_terms, global_prefix=prefix, prefix='l%d' % l_idx) - elif prefix == 'backward': - change_w = meta_opt.merge_change_w_backward( - change_w_terms, global_prefix=prefix, prefix='l%d' % l_idx) - else: - assert (False) - - if not include_bias: - return change_w - - change_b = tf.reduce_mean(meta_opt.bias_readout(upper_h), [0]) - - # force nonlinearities to be exercised -- biases can't all be increased without bound - change_b_mean = tf.reduce_mean(change_b) - offset = -tf.nn.relu(-change_b_mean) - change_b -= offset - - var = tf.reduce_mean(tf.square(change_b), [0], keepdims=True) - change_b = (change_b) / tf.sqrt(0.5 + var) - return change_w, change_b - - def compute_next_state(self, outputs, meta_opt, previous_state): - zs = outputs.zs - xs = outputs.xs - batch = outputs.batch - mods = outputs.mods - backward_mods = outputs.backward_mods - variables = self.get_variables() - - rev_mods = mods[::-1] - rev_backward_mods = backward_mods[::-1] - rev_xs = xs[::-1] - rev_zs = zs[::-1] + [None] - - to_top = xs[-1] - - # variables that change in the loop - hs = [] - d = meta_opt.compute_top_delta(to_top) # [bs x 32 x delta_channels] - - iterator = utils.eqzip(rev_backward_mods + [None], rev_mods + [None], - [None] + rev_mods, rev_xs, rev_zs) - for (backward_mod, lower_mod, upper_mod, x, z) in iterator: - w_bot = None - if not lower_mod is None: - w_bot = previous_state.variables[variables.index(lower_mod.w)] - w_top = None - if not upper_mod is None: - w_top = previous_state.variables[variables.index(upper_mod.w)] - backward_w = None - if backward_mod is not None: - backward_w = previous_state.variables[variables.index(backward_mod.w)] - if lower_mod is not None: - bias = previous_state.variables[variables.index(lower_mod.b)] - else: - bias = tf.zeros([x.shape[1]]) - - h, d = self.compute_next_h_d( - meta_opt=meta_opt, - w_bot=w_bot, - w_top=w_top, - bias=bias, - backward_w=backward_w, - x=x, - z=z, - d=d) - hs.append(h) - - w_forward_var_idx = [variables.index(mod.w) for mod in rev_mods] - w_backward_var_idx = [variables.index(mod.w) for mod in rev_backward_mods] - b_var_idx = [variables.index(mod.b) for mod in rev_mods] - - # storage location for outputs of below loop - grads = [None for _ in previous_state.variables] - - # over-ride learning rate for perturbation variables - learning_rate = [None for _ in previous_state.variables] - - # This is a map -- no state is shared cross loop - for l_idx, w_forward_idx, w_backward_idx, b_idx, upper_h, lower_h, lower_x, upper_x in utils.eqzip( - range(len(w_forward_var_idx)), w_forward_var_idx, w_backward_var_idx, - b_var_idx, hs[:-1], hs[1:], xs[::-1][1:], xs[::-1][:-1]): - - b_base = previous_state.variables[b_idx] - change_w_forward, change_b = self.weight_change_for_layer( - meta_opt=meta_opt, - l_idx=l_idx, - w_base=previous_state.variables[w_forward_idx], - b_base=b_base, - upper_h=upper_h, - lower_h=lower_h, - upper_x=upper_x, - lower_x=lower_x, - prefix='forward', - include_bias=True) - - if self.identical_updates: - change_w_backward = change_w_forward - else: - change_w_backward = self.weight_change_for_layer( - meta_opt=meta_opt, - l_idx=l_idx, - w_base=previous_state.variables[w_backward_idx], - b_base=b_base, - upper_h=upper_h, - lower_h=lower_h, - upper_x=upper_x, - lower_x=lower_x, - prefix='backward', - include_bias=False) - - grads[w_forward_idx] = change_w_forward - - grads[w_backward_idx] = change_w_backward - - grads[b_idx] = change_b - - cur_transformer = common.transformer_at_state(self, - previous_state.variables) - next_state = meta_opt.compute_next_state( - grads, - learning_rate=learning_rate, - cur_state=previous_state, - cur_transformer=lambda x: cur_transformer(x)[0]) - return next_state - - def initial_state(self, meta_opt): - return meta_opt.initial_state(self.get_variables()) diff --git a/research/learning_unsupervised_learning/datasets/__init__.py b/research/learning_unsupervised_learning/datasets/__init__.py deleted file mode 100644 index 9949cd96c..000000000 --- a/research/learning_unsupervised_learning/datasets/__init__.py +++ /dev/null @@ -1,16 +0,0 @@ -# Copyright 2018 Google, Inc. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -import mnist diff --git a/research/learning_unsupervised_learning/datasets/common.py b/research/learning_unsupervised_learning/datasets/common.py deleted file mode 100644 index 11f65ceab..000000000 --- a/research/learning_unsupervised_learning/datasets/common.py +++ /dev/null @@ -1,29 +0,0 @@ -# Copyright 2018 Google, Inc. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import collections - -import tensorflow as tf -import numpy as np - -ImageLabelOnehot = collections.namedtuple('ImageLabelOnehot', - ['image', 'label', 'label_onehot']) -ImageLabelOnehotRegression = collections.namedtuple( - "ImageLabelOnehotRegression", - ["image", "label", "label_onehot", "regression_target"]) diff --git a/research/learning_unsupervised_learning/datasets/mnist.py b/research/learning_unsupervised_learning/datasets/mnist.py deleted file mode 100644 index 6ee595d99..000000000 --- a/research/learning_unsupervised_learning/datasets/mnist.py +++ /dev/null @@ -1,74 +0,0 @@ -# Copyright 2018 Google, Inc. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - - -import sonnet as snt -import tensorflow as tf -from tensorflow.python.keras.datasets import mnist -from learning_unsupervised_learning.datasets import common - -class Mnist(snt.AbstractModule): - def __init__(self, device, batch_size=128, name="Mnist"): - self.device = device - self.batch_size = batch_size - - self._make_dataset() - self.iterator = None - - super(Mnist, self).__init__(name=name) - - def _make_dataset(self): - (x_train, y_train), (x_test, y_test) = mnist.load_data() - - x_train = x_train.reshape(60000, 784) - x_test = x_test.reshape(10000, 784) - - dataset = tf.data.Dataset.from_tensor_slices((x_train, y_train)) - dataset = dataset.repeat() - dataset = dataset.shuffle(self.batch_size * 3) - dataset = dataset.batch(self.batch_size) - def _map_fn(image, label): - image = tf.to_float(image) / 255. - label.set_shape([self.batch_size]) - label = tf.cast(label, dtype=tf.int32) - label_onehot = tf.one_hot(label, 10) - image = tf.reshape(image, [self.batch_size, 28, 28, 1]) - return common.ImageLabelOnehot( - image=image, label=label, label_onehot=label_onehot) - - self.dataset = dataset.map(_map_fn) - - def _build(self): - if self.iterator is None: - self.iterator = self.dataset.make_one_shot_iterator() - batch = self.iterator.get_next() - [b.set_shape([self.batch_size] + b.shape.as_list()[1:]) for b in batch] - return batch - - -class TinyMnist(Mnist): - def __init__(self, *args, **kwargs): - kwargs.setdefault("name", "TinyMnist") - super(TinyMnist, self).__init__(*args, **kwargs) - - def _make_dataset(self): - super(TinyMnist, self)._make_dataset() - - def _map_fn(batch): - new_img = tf.image.resize_images(batch.image, [14, 14]) - return common.ImageLabelOnehot( - image=new_img, label=batch.label, label_onehot=batch.label_onehot) - - self.dataset = self.dataset.map(_map_fn) diff --git a/research/learning_unsupervised_learning/evaluation.py b/research/learning_unsupervised_learning/evaluation.py deleted file mode 100644 index 2ec40e99a..000000000 --- a/research/learning_unsupervised_learning/evaluation.py +++ /dev/null @@ -1,76 +0,0 @@ -# Copyright 2018 Google, Inc. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - - -"""Evaluation job. - -This sits on the side and performs evaluation on a saved model. -This is a separate process for ease of use and stability of numbers. -""" - -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import tensorflow as tf - -from learning_unsupervised_learning import utils - - -def construct_evaluation_graph(theta_process_fn=None, - w_learner_fn=None, - dataset_fn=None, - meta_objectives=None, - ): - """Construct the evaluation graph. - """ - if meta_objectives is None: - meta_objectives = [] - - tf.train.create_global_step() - - local_device = "" - remote_device = "" - - meta_opt = theta_process_fn( - remote_device=remote_device, local_device=local_device) - - base_model = w_learner_fn( - remote_device=remote_device, local_device=local_device) - - train_dataset = dataset_fn(device=local_device) - - # construct variables - x, outputs = base_model(train_dataset()) - initial_state = base_model.initial_state(meta_opt, max_steps=10) - next_state = base_model.compute_next_state(outputs, meta_opt, initial_state) - with utils.state_barrier_context(next_state): - train_one_step_op = meta_opt.assign_state(base_model, next_state) - - meta_objs = [] - for meta_obj_fn in meta_objectives: - meta_obj = meta_obj_fn(local_device="", remote_device="") - meta_objs.append(meta_obj) - J = meta_obj(train_dataset, lambda x: base_model(x)[0]) - tf.summary.scalar(str(meta_obj.__class__.__name__)+"_J", tf.reduce_mean(J)) - - # TODO(lmetz) this is kinda error prone. - # We should share the construction of the global variables across train and - # make sure both sets of savable variables are the same - checkpoint_vars = meta_opt.remote_variables() + [tf.train.get_global_step()] - for meta_obj in meta_objs: - checkpoint_vars.extend(meta_obj.remote_variables()) - - return checkpoint_vars, train_one_step_op, (base_model, train_dataset) diff --git a/research/learning_unsupervised_learning/meta_objective/__init__.py b/research/learning_unsupervised_learning/meta_objective/__init__.py deleted file mode 100644 index 54c46145e..000000000 --- a/research/learning_unsupervised_learning/meta_objective/__init__.py +++ /dev/null @@ -1,18 +0,0 @@ -# Copyright 2018 Google, Inc. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - - -import sklearn -import linear_regression diff --git a/research/learning_unsupervised_learning/meta_objective/linear_regression.py b/research/learning_unsupervised_learning/meta_objective/linear_regression.py deleted file mode 100644 index b49fc2529..000000000 --- a/research/learning_unsupervised_learning/meta_objective/linear_regression.py +++ /dev/null @@ -1,258 +0,0 @@ -# Copyright 2018 Google, Inc. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - - - -"""Closed form linear regression. - -Can be differentiated through. -""" - -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import collections -import numpy as np -import sonnet as snt -import tensorflow as tf - -from learning_unsupervised_learning import utils -from learning_unsupervised_learning import variable_replace - - -def solve_ridge(x, y, ridge_factor): - with tf.name_scope("solve_ridge"): - # Added a column of ones to the end of the feature matrix for bias - A = tf.concat([x, tf.ones((x.shape.as_list()[0], 1))], axis=1) - - # Analytic solution for the ridge regression loss - inv_target = tf.matmul(A, A, transpose_a=True) - np_diag_penalty = ridge_factor * np.ones( - A.shape.as_list()[1], dtype="float32") - # Remove penalty on bias component of weights - np_diag_penalty[-1] = 0. - diag_penalty = tf.constant(np_diag_penalty) - inv_target += tf.diag(diag_penalty) - - inv = tf.matrix_inverse(inv_target) - w = tf.matmul(inv, tf.matmul(A, y, transpose_a=True)) - return w - - -class LinearRegressionMetaObjective(snt.AbstractModule): - """A meta objective based on training Ridge Regression with analytic solution. - - This is used to evaluate the performance of a given feature set trained in - some other manner. - """ - - def __init__(self, - local_device=None, - remote_device=None, - zero_one_labels=True, - normalize_y_hat=True, - normalize_act=False, - averages=1, - ridge_factor=0.1, - center_y=True, - hinge_loss=False, - samples_per_class=10, - test_train_scalar=1.0, - ): - self._local_device = local_device - self._remote_device = remote_device - self.zero_one_labels = zero_one_labels - self.normalize_y_hat = normalize_y_hat - self.normalize_act = normalize_act - self.ridge_factor = ridge_factor - self.averages = averages - self.samples_per_class = samples_per_class - self.center_y=center_y - self.test_train_scalar=test_train_scalar - self.hinge_loss = hinge_loss - - self.dataset_map = {} - - super(LinearRegressionMetaObjective, - self).__init__(name="LinearRegressionMetaObjective") - - def _build(self, dataset, feature_transformer): - if self.samples_per_class is not None: - if dataset not in self.dataset_map: - # datasets are outside of frames from while loops - with tf.control_dependencies(None): - self.dataset_map[dataset] = utils.sample_n_per_class( - dataset, self.samples_per_class) - - dataset = self.dataset_map[dataset] - - stats = collections.defaultdict(list) - losses = [] - # TODO(lmetz) move this to ingraph control flow? - for _ in xrange(self.averages): - loss, stat = self._build_once(dataset, feature_transformer) - losses.append(loss) - for k, v in stat.items(): - stats[k].append(v) - stats = {k: tf.add_n(v) / float(len(v)) for k, v in stats.items()} - - summary_updates = [] - for k, v in stats.items(): - tf.summary.scalar(k, v) - - with tf.control_dependencies(summary_updates): - return tf.add_n(losses) / float(len(losses)) - - def _build_once(self, dataset, feature_transformer): - with tf.device(self._local_device): - batch = dataset() - num_classes = batch.label_onehot.shape.as_list()[1] - - regression_mod = snt.Linear(num_classes) - - if self.normalize_act: - - def normalize_transformer(x): - unnorm_x = feature_transformer(x) - return tf.nn.l2_normalize(unnorm_x, 0) - - feature_transformer_wrap = normalize_transformer - else: - feature_transformer_wrap = feature_transformer - - # construct the variables of the right shape in the sonnet module by - # calling a forward pass through the regressor. - with utils.assert_no_new_variables(): - dummy_features = feature_transformer_wrap(batch) - regression_mod(dummy_features) - reg_w = regression_mod.w - reg_b = regression_mod.b - - batch_test = dataset() - all_batch = utils.structure_map_multi(lambda x: tf.concat(x, 0), [batch, batch_test]) - #all_batch = tf.concat([batch, batch_test], 0) - # Grab a new batch of data from the dataset. - features = feature_transformer_wrap(all_batch) - features, features_test = utils.structure_map_split(lambda x: tf.split(x, 2, axis=0), features) - - def center_y(y): - y -= tf.reduce_mean(y) - y *= tf.rsqrt(tf.reduce_mean(tf.reduce_sum(y**2, axis=[1], keep_dims=True))) - return y - def get_y_vec(batch): - y_pieces = [] - if hasattr(batch, "label_onehot"): - if self.zero_one_labels: - y_pieces += [batch.label_onehot] - else: - y_pieces += [2. * batch.label_onehot - 1.] - if hasattr(batch, "regression_target"): - y_pieces += [batch.regression_target] - y = tf.concat(y_pieces, 1) - if self.center_y: - y = center_y(y) - return y - - y_train = get_y_vec(batch) - - w = solve_ridge(features, y_train, self.ridge_factor) - - # Generate features from another batch to evaluate loss on the validation - # set. This provide a less overfit signal to the learned optimizer. - y_test = get_y_vec(batch_test) - - def compute_logit(features): - # We have updated the classifier mod in previous steps, we need to - # substitute out those variables to get new values. - replacement = collections.OrderedDict([(reg_w, w[:-1]), (reg_b, w[-1])]) - with variable_replace.variable_replace(replacement): - logits = regression_mod(features) - - return logits - - batch_size = y_train.shape.as_list()[0] - - logit_train = compute_logit(features) - logit_test_unnorm = compute_logit(features_test) - if self.normalize_y_hat: - logit_test = logit_test_unnorm / tf.sqrt( - tf.reduce_sum(logit_test_unnorm**2, axis=[1], keep_dims=True)) - else: - logit_test = logit_test_unnorm - - stats = {} - - if self.hinge_loss: - # slightly closer to the true classification loss - # any distance smaller than 1 is guaranteed to map to the correct class - mse_test = tf.reduce_sum(tf.nn.relu(tf.reduce_sum(tf.square(logit_test - y_test), axis=1)-1.)) / batch_size - else: - mse_test = tf.reduce_sum(tf.square(logit_test - y_test)) / batch_size - - stats["mse_test"] = mse_test - - mse_train = tf.reduce_sum(tf.square(logit_train - y_train)) / batch_size - stats["mse_train"] = mse_train - - is_correct_test = tf.equal(tf.argmax(logit_test, 1), tf.argmax(y_test, 1)) - accuracy_test = tf.reduce_mean(tf.cast(is_correct_test, tf.float32)) - stats["accuracy_test"] = accuracy_test - - def test_confusion_fn(): - test_confusion = tf.confusion_matrix(tf.argmax(y_test, 1), tf.argmax(logit_test, 1)) - test_confusion = tf.to_float(test_confusion) / tf.constant((logit_test.shape.as_list()[0] / float(logit_test.shape.as_list()[1])), dtype=tf.float32) - test_confusion = tf.expand_dims(tf.expand_dims(test_confusion, 0), 3) - return test_confusion - tf.summary.image("test_confusion", test_confusion_fn()) - - def train_confusion_fn(): - train_confusion = tf.confusion_matrix(tf.argmax(y_train, 1), tf.argmax(logit_train, 1)) - train_confusion = tf.to_float(train_confusion) / tf.constant((logit_train.shape.as_list()[0] / float(logit_train.shape.as_list()[1])), dtype=tf.float32) - train_confusion = tf.expand_dims(tf.expand_dims(train_confusion, 0), 3) - return train_confusion - tf.summary.image("train_confusion", train_confusion_fn()) - - is_correct = tf.equal(tf.argmax(logit_train, 1), tf.argmax(y_train, 1)) - accuracy_train = tf.reduce_mean(tf.cast(is_correct, tf.float32)) - stats["accuracy_train"] = accuracy_train - - reg = self.ridge_factor * tf.reduce_sum(tf.square(w[:-1])) / batch_size - stats["ridge_component"] = reg - - stats["total_loss"] = mse_test + reg - - loss_to_train_at = (reg+ mse_test) * self.test_train_scalar + (mse_train + reg)*(1 - self.test_train_scalar) - - loss_to_train_at = tf.identity(loss_to_train_at) - - # Minimizing the test loss should not require regurization because the - # metaobjective is solved for the training loss - return loss_to_train_at, stats - - def local_variables(self): - """List of variables that need to be updated for each evaluation. - - These variables should not be stored on a parameter server and - should be reset every computation of a meta_objective loss. - - Returns: - vars: list of tf.Variable - """ - return list( - snt.get_variables_in_module(self, tf.GraphKeys.TRAINABLE_VARIABLES)) - - def remote_variables(self): - return [] diff --git a/research/learning_unsupervised_learning/meta_objective/sklearn.py b/research/learning_unsupervised_learning/meta_objective/sklearn.py deleted file mode 100644 index 4f1f2d591..000000000 --- a/research/learning_unsupervised_learning/meta_objective/sklearn.py +++ /dev/null @@ -1,167 +0,0 @@ -# Copyright 2018 Google, Inc. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - - -""" - -Can NOT be differentiated through. -""" - -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import collections -import numpy as np -import sonnet as snt -import tensorflow as tf -from tensorflow.python.framework import function - -from learning_unsupervised_learning import utils - -from learning_unsupervised_learning.meta_objective import utils as meta_obj_utils - -from sklearn import svm -from sklearn import linear_model - - -def build_fit(device, model_fn, num_classes, probs=True): - - def _py_fit_predict(trX, trY, teX): - assert len(np.unique(trY)) == num_classes - model = model_fn() - model.fit(trX, trY) - trP = model.predict(trX) - teP = model.predict(teX) - if probs: - teP_probs = model.predict_log_proba(teX) - return trP.astype(np.int64), teP.astype(np.int64), teP_probs.astype( - np.float32) - else: - teP = model.predict(teX) - return trP.astype(np.int64), teP.astype(np.int64) - - def return_fn(trX, trY, teX): - with tf.device(device): - with tf.device("/cpu:0"): - if probs: - return tf.py_func( - _py_fit_predict, - [tf.identity(trX), - tf.identity(trY), - tf.identity(teX)], [tf.int64, tf.int64, tf.float32]) - else: - return tf.py_func( - _py_fit_predict, - [tf.identity(trX), - tf.identity(trY), - tf.identity(teX)], [tf.int64, tf.int64]) - - return return_fn - - -class SKLearn(meta_obj_utils.MultiTrialMetaObjective): - - def __init__( - self, - local_device=None, - remote_device=None, - averages=1, - samples_per_class=10, - probs=False, - stddev=0.01, - n_samples=10, - name="SKLearn", - ): - self._local_device = local_device - self._remote_device = remote_device - self.name = name - self.probs = probs - self.n_samples = n_samples - self.stddev = stddev - - super(SKLearn, self).__init__( - name=name, samples_per_class=samples_per_class, averages=averages) - - def _get_model(self): - raise NotImplemented() - - def _build_once(self, dataset, feature_transformer): - with tf.device(self._local_device): - tr_batch = dataset() - te_batch = dataset() - num_classes = tr_batch.label_onehot.shape.as_list()[1] - all_batch = utils.structure_map_multi(lambda x: tf.concat(x, 0), - [tr_batch, te_batch]) - features = feature_transformer(all_batch) - trX, teX = utils.structure_map_split(lambda x: tf.split(x, 2, axis=0), - features) - trY = tf.to_int64(tr_batch.label) - trY_onehot = tf.to_int32(tr_batch.label_onehot) - teY = tf.to_int64(te_batch.label) - teY_shape = teY.shape.as_list() - - def blackbox((trX, trY, teX, teY)): - trY = tf.to_int32(tf.rint(trY)) - teY = tf.to_int32(tf.rint(teY)) - tf_fn = build_fit( - self._local_device, - self._get_model, - num_classes=num_classes, - probs=self.probs) - if self.probs: - trP, teP, teP_probs = tf_fn(trX, trY, teX) - else: - trP, teP = tf_fn(trX, trY, teX) - - teY.set_shape(teY_shape) - if self.probs: - onehot = tf.one_hot(teY, num_classes) - crossent = -tf.reduce_sum(onehot * teP_probs, [1]) - return tf.reduce_mean(crossent) - else: - # use error rate as the loss if no surrogate is avalible. - return 1 - tf.reduce_mean( - tf.to_float(tf.equal(teY, tf.to_int32(teP)))) - - test_loss = blackbox((trX, tf.to_float(trY), teX, tf.to_float(teY))) - - stats = {} - - tf_fn = build_fit( - self._local_device, - self._get_model, - num_classes=num_classes, - probs=self.probs) - if self.probs: - trP, teP, teP_probs = tf_fn(trX, trY, teX) - else: - trP, teP = tf_fn(trX, trY, teX) - stats["%s/accuracy_train" % self.name] = tf.reduce_mean( - tf.to_float(tf.equal(tf.to_int32(trY), tf.to_int32(trP)))) - stats["%s/accuracy_test" % self.name] = tf.reduce_mean( - tf.to_float(tf.equal(tf.to_int32(teY), tf.to_int32(teP)))) - stats["%s/test_loss" % self.name] = test_loss - return test_loss, stats - - -class LogisticRegression(SKLearn): - - def __init__(self, C=1.0, name="LogisticRegression", probs=True, **kwargs): - self.C = C - super(LogisticRegression, self).__init__(name=name, probs=probs, **kwargs) - - def _get_model(self): - return linear_model.LogisticRegression(C=self.C) diff --git a/research/learning_unsupervised_learning/meta_objective/utils.py b/research/learning_unsupervised_learning/meta_objective/utils.py deleted file mode 100644 index a29197d1d..000000000 --- a/research/learning_unsupervised_learning/meta_objective/utils.py +++ /dev/null @@ -1,78 +0,0 @@ -# Copyright 2018 Google, Inc. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - - -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import collections -import numpy as np -import sonnet as snt -import tensorflow as tf - -from learning_unsupervised_learning import optimizers -from learning_unsupervised_learning import utils -from learning_unsupervised_learning import summary_utils -from learning_unsupervised_learning import variable_replace - -class MultiTrialMetaObjective(snt.AbstractModule): - def __init__(self, samples_per_class, averages, **kwargs): - self.samples_per_class = samples_per_class - self.averages = averages - self.dataset_map = {} - - super(MultiTrialMetaObjective, - self).__init__(**kwargs) - - def _build(self, dataset, feature_transformer): - if self.samples_per_class is not None: - if dataset not in self.dataset_map: - # datasets are outside of frames from while loops - with tf.control_dependencies(None): - self.dataset_map[dataset] = utils.sample_n_per_class( - dataset, self.samples_per_class) - - dataset = self.dataset_map[dataset] - - stats = collections.defaultdict(list) - losses = [] - # TODO(lmetz) move this to ingraph control flow? - for _ in xrange(self.averages): - loss, stat = self._build_once(dataset, feature_transformer) - losses.append(loss) - for k, v in stat.items(): - stats[k].append(v) - stats = {k: tf.add_n(v) / float(len(v)) for k, v in stats.items()} - - for k, v in stats.items(): - tf.summary.scalar(k, v) - - return tf.add_n(losses) / float(len(losses)) - - def local_variables(self): - """List of variables that need to be updated for each evaluation. - - These variables should not be stored on a parameter server and - should be reset every computation of a meta_objective loss. - - Returns: - vars: list of tf.Variable - """ - return list( - snt.get_variables_in_module(self, tf.GraphKeys.TRAINABLE_VARIABLES)) - - def remote_variables(self): - return [] diff --git a/research/learning_unsupervised_learning/optimizers.py b/research/learning_unsupervised_learning/optimizers.py deleted file mode 100644 index 02c6106b1..000000000 --- a/research/learning_unsupervised_learning/optimizers.py +++ /dev/null @@ -1,133 +0,0 @@ -# Copyright 2018 Google, Inc. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - - - -"""Optimizers for use in unrolled optimization. - -These optimizers contain a compute_updates function and its own ability to keep -track of internal state. -These functions can be used with a tf.while_loop to perform multiple training -steps per sess.run. -""" - -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import abc -import collections -import tensorflow as tf -import sonnet as snt - -from learning_unsupervised_learning import utils - -from tensorflow.python.framework import ops -from tensorflow.python.ops import math_ops -from tensorflow.python.ops import resource_variable_ops -from tensorflow.python.training import optimizer -from tensorflow.python.training import training_ops - - -class UnrollableOptimizer(snt.AbstractModule): - """Interface for optimizers that can be used in unrolled computation. - apply_gradients is derrived from compute_update and assign_state. - """ - - def __init__(self, *args, **kwargs): - super(UnrollableOptimizer, self).__init__(*args, **kwargs) - self() - - @abc.abstractmethod - def compute_updates(self, xs, gs, state=None): - """Compute next step updates for a given variable list and state. - - Args: - xs: list of tensors - The "variables" to perform an update on. - Note these must match the same order for which get_state was originally - called. - gs: list of tensors - Gradients of `xs` with respect to some loss. - state: Any - Optimizer specific state to keep track of accumulators such as momentum - terms - """ - raise NotImplementedError() - - def _build(self): - pass - - @abc.abstractmethod - def get_state(self, var_list): - """Get the state value associated with a list of tf.Variables. - - This state is commonly going to be a NamedTuple that contains some - mapping between variables and the state associated with those variables. - This state could be a moving momentum variable tracked by the optimizer. - - Args: - var_list: list of tf.Variable - Returns: - state: Any - Optimizer specific state - """ - raise NotImplementedError() - - def assign_state(self, state): - """Assigns the state to the optimizers internal variables. - - Args: - state: Any - Returns: - op: tf.Operation - The operation that performs the assignment. - """ - raise NotImplementedError() - - def apply_gradients(self, grad_vars): - gradients, variables = zip(*grad_vars) - state = self.get_state(variables) - new_vars, new_state = self.compute_updates(variables, gradients, state) - assign_op = self.assign_state(new_state) - op = utils.assign_variables(variables, new_vars) - return tf.group(assign_op, op, name="apply_gradients") - - -class UnrollableGradientDescentRollingOptimizer(UnrollableOptimizer): - - def __init__(self, - learning_rate, - name="UnrollableGradientDescentRollingOptimizer"): - self.learning_rate = learning_rate - super(UnrollableGradientDescentRollingOptimizer, self).__init__(name=name) - - - def compute_updates(self, xs, gs, learning_rates, state): - new_vars = [] - for x, g, lr in utils.eqzip(xs, gs, learning_rates): - if lr is None: - lr = self.learning_rate - if g is not None: - new_vars.append((x * (1 - lr) - g * lr)) - else: - new_vars.append(x) - return new_vars, state - - def get_state(self, var_list): - return tf.constant(0.0) - - def assign_state(self, state, var_list=None): - return tf.no_op() diff --git a/research/learning_unsupervised_learning/run_eval.py b/research/learning_unsupervised_learning/run_eval.py deleted file mode 100644 index dcb2529dd..000000000 --- a/research/learning_unsupervised_learning/run_eval.py +++ /dev/null @@ -1,122 +0,0 @@ -# Copyright 2018 Google, Inc. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -""" Script that iteratively applies the unsupervised update rule and evaluates the - -meta-objective performance. -""" - -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -from absl import flags -from absl import app - -from learning_unsupervised_learning import evaluation -from learning_unsupervised_learning import datasets -from learning_unsupervised_learning import architectures -from learning_unsupervised_learning import summary_utils -from learning_unsupervised_learning import meta_objective - -import tensorflow as tf -import sonnet as snt - -from tensorflow.contrib.framework.python.framework import checkpoint_utils - -flags.DEFINE_string("checkpoint_dir", None, "Dir to load pretrained update rule from") -flags.DEFINE_string("train_log_dir", None, "Training log directory") - -FLAGS = flags.FLAGS - - -def train(train_log_dir, checkpoint_dir, eval_every_n_steps=10, num_steps=3000): - dataset_fn = datasets.mnist.TinyMnist - w_learner_fn = architectures.more_local_weight_update.MoreLocalWeightUpdateWLearner - theta_process_fn = architectures.more_local_weight_update.MoreLocalWeightUpdateProcess - - meta_objectives = [] - meta_objectives.append( - meta_objective.linear_regression.LinearRegressionMetaObjective) - meta_objectives.append(meta_objective.sklearn.LogisticRegression) - - checkpoint_vars, train_one_step_op, ( - base_model, dataset) = evaluation.construct_evaluation_graph( - theta_process_fn=theta_process_fn, - w_learner_fn=w_learner_fn, - dataset_fn=dataset_fn, - meta_objectives=meta_objectives) - batch = dataset() - pre_logit, outputs = base_model(batch) - - global_step = tf.train.get_or_create_global_step() - var_list = list( - snt.get_variables_in_module(base_model, tf.GraphKeys.TRAINABLE_VARIABLES)) - - tf.logging.info("all vars") - for v in tf.all_variables(): - tf.logging.info(" %s" % str(v)) - global_step = tf.train.get_global_step() - accumulate_global_step = global_step.assign_add(1) - reset_global_step = global_step.assign(0) - - train_op = tf.group( - train_one_step_op, accumulate_global_step, name="train_op") - - summary_op = tf.summary.merge_all() - - file_writer = summary_utils.LoggingFileWriter(train_log_dir, regexes=[".*"]) - if checkpoint_dir: - str_var_list = checkpoint_utils.list_variables(checkpoint_dir) - name_to_v_map = {v.op.name: v for v in tf.all_variables()} - var_list = [ - name_to_v_map[vn] for vn, _ in str_var_list if vn in name_to_v_map - ] - saver = tf.train.Saver(var_list) - missed_variables = [ - v.op.name for v in set( - snt.get_variables_in_scope("LocalWeightUpdateProcess", - tf.GraphKeys.GLOBAL_VARIABLES)) - - set(var_list) - ] - assert len(missed_variables) == 0, "Missed a theta variable." - - hooks = [] - - with tf.train.SingularMonitoredSession(master="", hooks=hooks) as sess: - - # global step should be restored from the evals job checkpoint or zero for fresh. - step = sess.run(global_step) - - if step == 0 and checkpoint_dir: - tf.logging.info("force restore") - saver.restore(sess, checkpoint_dir) - tf.logging.info("force restore done") - sess.run(reset_global_step) - step = sess.run(global_step) - - while step < num_steps: - if step % eval_every_n_steps == 0: - s, _, step = sess.run([summary_op, train_op, global_step]) - file_writer.add_summary(s, step) - else: - _, step = sess.run([train_op, global_step]) - - -def main(argv): - train(FLAGS.train_log_dir, FLAGS.checkpoint_dir) - - -if __name__ == "__main__": - app.run(main) diff --git a/research/learning_unsupervised_learning/summary_utils.py b/research/learning_unsupervised_learning/summary_utils.py deleted file mode 100644 index d5c0fdd91..000000000 --- a/research/learning_unsupervised_learning/summary_utils.py +++ /dev/null @@ -1,181 +0,0 @@ -# Copyright 2018 Google, Inc. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - - - -import collections -import functools -import threading -import tensorflow as tf -import matplotlib -import numpy as np -import time -import re -import math -matplotlib.use("Agg") - -import matplotlib.pyplot as plt -import scipy.signal - -from tensorflow.python.util import tf_should_use -from tensorflow.contrib.summary import summary_ops -from tensorflow.python.ops import summary_op_util -from tensorflow.contrib.summary import gen_summary_ops - -_DEBUG_DISABLE_SUMMARIES=False - -class LoggingFileWriter(tf.summary.FileWriter): - """A FileWriter that also logs things out. - - This is entirely for ease of debugging / not having to open up Tensorboard - a lot. - """ - - def __init__(self, logdir, regexes=[], **kwargs): - self.regexes = regexes - super(LoggingFileWriter, self).__init__(logdir, **kwargs) - - def add_summary(self, summary, global_step): - if type(summary) != tf.Summary: - summary_p = tf.Summary() - summary_p.ParseFromString(summary) - summary = summary_p - for s in summary.value: - for exists in [re.match(p, s.tag) for p in self.regexes]: - if exists is not None: - tf.logging.info("%d ] %s : %f", global_step, s.tag, s.simple_value) - break - super(LoggingFileWriter, self).add_summary(summary, global_step) - - -def image_grid(images, max_grid_size=4, border=1): - """Given images and N, return first N^2 images as an NxN image grid. - - Args: - images: a `Tensor` of size [batch_size, height, width, channels] - max_grid_size: Maximum image grid height/width - - Returns: - Single image batch, of dim [1, h*n, w*n, c] - """ - batch_size = images.shape.as_list()[0] - to_pad = int((np.ceil(np.sqrt(batch_size)))**2 - batch_size) - images = tf.pad(images, [[0, to_pad], [0, border], [0, border], [0, 0]]) - - batch_size = images.shape.as_list()[0] - grid_size = min(int(np.sqrt(batch_size)), max_grid_size) - assert images.shape.as_list()[0] >= grid_size * grid_size - - # If we have a depth channel - if images.shape.as_list()[-1] == 4: - images = images[:grid_size * grid_size, :, :, 0:3] - depth = tf.image.grayscale_to_rgb(images[:grid_size * grid_size, :, :, 3:4]) - - images = tf.reshape(images, [-1, images.shape.as_list()[2], 3]) - split = tf.split(images, grid_size, axis=0) - depth = tf.reshape(depth, [-1, images.shape.as_list()[2], 3]) - depth_split = tf.split(depth, grid_size, axis=0) - grid = tf.concat(split + depth_split, 1) - return tf.expand_dims(grid, 0) - else: - images = images[:grid_size * grid_size, :, :, :] - images = tf.reshape( - images, [-1, images.shape.as_list()[2], - images.shape.as_list()[3]]) - split = tf.split(value=images, num_or_size_splits=grid_size, axis=0) - grid = tf.concat(split, 1) - return tf.expand_dims(grid, 0) - - -def first_layer_weight_image(weight, shape): - weight_image = tf.reshape(weight, - shape + [tf.identity(weight).shape.as_list()[1]]) - # [winx, winy, wout] - mean, var = tf.nn.moments(weight_image, [0,1,2], keep_dims=True) - #mean, var = tf.nn.moments(weight_image, [0,1], keep_dims=True) - weight_image = (weight_image - mean) / tf.sqrt(var + 1e-5) - weight_image = (weight_image + 1.0) / 2.0 - weight_image = tf.clip_by_value(weight_image, 0, 1) - weight_image = tf.transpose(weight_image, (3, 0, 1, 2)) - grid = image_grid(weight_image, max_grid_size=10) - return grid - -def inner_layer_weight_image(weight): - """Visualize a weight matrix of an inner layer. - Add padding to make it square, then visualize as a gray scale image - """ - weight = tf.identity(weight) # turn into a tensor - weight = weight / (tf.reduce_max(tf.abs(weight), [0], keep_dims=True)) - weight = tf.reshape(weight, [1]+weight.shape.as_list() + [1]) - return weight - - -def activation_image(activations, label_onehot): - """Make a row sorted by class for each activation. Put a black line around the activations.""" - labels = tf.argmax(label_onehot, axis=1) - _, n_classes = label_onehot.shape.as_list() - mean, var = tf.nn.moments(activations, [0, 1]) - activations = (activations - mean)/tf.sqrt(var+1e-5) - - activations = tf.clip_by_value(activations, -1, 1) - activations = (activations + 1.0) / 2.0 # shift to [0, 1] - - canvas = [] - for i in xrange(n_classes): - inds = tf.where(tf.equal(labels, i)) - - def _gather(): - return tf.squeeze(tf.gather(activations, inds), 1) - - def _empty(): - return tf.zeros([0, activations.shape.as_list()[1]], dtype=tf.float32) - - assert inds.shape.as_list()[0] is None - x = tf.cond(tf.equal(tf.shape(inds)[0], 0), _empty, _gather) - canvas.append(x) - canvas.append(tf.zeros([1, activations.shape.as_list()[1]])) - canvas = tf.concat(canvas, 0) - canvas = tf.reshape(canvas, [1, activations.shape.as_list()[0]+n_classes, canvas.shape.as_list()[1], 1]) - return canvas - - -def sorted_images(images, label_onehot): - # images is [bs, x, y, c] - labels = tf.argmax(label_onehot, axis=1) - _, n_classes = label_onehot.shape.as_list() - to_stack = [] - for i in xrange(n_classes): - inds = tf.where(tf.equal(labels, i)) - - def _gather(): - return tf.squeeze(tf.gather(images, inds), 1) - - def _empty(): - return tf.zeros([0] + images.shape.as_list()[1:], dtype=tf.float32) - - assert inds.shape.as_list()[0] is None - x = tf.cond(tf.equal(tf.shape(inds)[0], 0), _empty, _gather) - to_stack.append(x) - # pad / trim all up to 10. - padded = [] - for t in to_stack: - n_found = tf.shape(t)[0] - pad = tf.pad(t[0:10], tf.stack([tf.stack([0,tf.maximum(0, 10-n_found)]), [0,0], [0,0], [0,0]])) - padded.append(pad) - - xs = [tf.concat(tf.split(p, 10), axis=1) for p in padded] - ys = tf.concat(xs, axis=2) - ys = tf.cast(tf.clip_by_value(ys, 0., 1.) * 255., tf.uint8) - return ys diff --git a/research/learning_unsupervised_learning/utils.py b/research/learning_unsupervised_learning/utils.py deleted file mode 100644 index ca56ca931..000000000 --- a/research/learning_unsupervised_learning/utils.py +++ /dev/null @@ -1,287 +0,0 @@ -# Copyright 2018 Google, Inc. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -"""Utilities. -""" - -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import contextlib -import tensorflow as tf -import sonnet as snt -import itertools -import functools - -from tensorflow.core.framework import node_def_pb2 -from tensorflow.python.framework import device as pydev -from tensorflow.python.framework import errors -from tensorflow.python.ops import variable_scope as variable_scope_ops -from sonnet.python.modules import util as snt_util - -from tensorflow.python.util import nest - - -def eqzip(*args): - """Zip but raises error if lengths don't match. - - Args: - *args: list of lists or tuples - Returns: - list: the result of zip - Raises: - ValueError: when the lengths don't match - """ - - sizes = [len(x) for x in args] - if not all([sizes[0] == x for x in sizes]): - raise ValueError("Lists are of different sizes. \n %s"%str(sizes)) - return zip(*args) - - -@contextlib.contextmanager -def assert_no_new_variables(): - """Ensure that no tf.Variables are constructed inside the context. - - Yields: - None - Raises: - ValueError: if there is a variable created. - """ - num_vars = len(tf.global_variables()) - old_variables = tf.global_variables() - yield - if len(tf.global_variables()) != num_vars: - new_vars = set(tf.global_variables()) - set(old_variables) - tf.logging.error("NEW VARIABLES CREATED") - tf.logging.error(10*"=") - for v in new_vars: - tf.logging.error(v) - - raise ValueError("Variables created inside an " - "assert_no_new_variables context") - if old_variables != tf.global_variables(): - raise ValueError("Variables somehow changed inside an " - "assert_no_new_variables context." - "This means something modified the tf.global_variables()") - - -def get_variables_in_modules(module_list): - var_list = [] - for m in module_list: - var_list.extend(snt.get_variables_in_module(m)) - return var_list - - -def state_barrier_context(state): - """Return a context manager that prevents interior ops from running - unless the whole state has been computed. - - This is to prevent assign race conditions. - """ - tensors = [x for x in nest.flatten(state) if type(x) == tf.Tensor] - tarray = [x.flow for x in nest.flatten(state) if hasattr(x, "flow")] - return tf.control_dependencies(tensors + tarray) - - -def _identity_fn(tf_entity): - if hasattr(tf_entity, "identity"): - return tf_entity.identity() - else: - return tf.identity(tf_entity) - - -def state_barrier_result(state): - """Return the same state, but with a control dependency to prevent it from - being partially computed - """ - with state_barrier_context(state): - return nest.map_structure(_identity_fn, state) - - -def train_iterator(num_iterations): - """Iterator that returns an index of the current step. - This iterator runs forever if num_iterations is None - otherwise it runs for some fixed amount of steps. - """ - if num_iterations is None: - return itertools.count() - else: - return xrange(num_iterations) - - -def print_op(op, msg): - """Print a string and return an op wrapped in a control dependency to make - sure it ran.""" - print_op = tf.Print(tf.constant(0), [tf.constant(0)], msg) - return tf.group(op, print_op) - - -class MultiQueueRunner(tf.train.QueueRunner): - """A QueueRunner with multiple queues """ - def __init__(self, queues, enqueue_ops): - close_op = tf.group(* [q.close() for q in queues]) - cancel_op = tf.group( - * [q.close(cancel_pending_enqueues=True) for q in queues]) - queue_closed_exception_types = (errors.OutOfRangeError,) - - enqueue_op = tf.group(*enqueue_ops, name="multi_enqueue") - - super(MultiQueueRunner, self).__init__( - queues[0], - enqueue_ops=[enqueue_op], - close_op=close_op, - cancel_op=cancel_op, - queue_closed_exception_types=queue_closed_exception_types) - - -# This function is not elegant, but I tried so many other ways to get this to -# work and this is the only one that ended up not incuring significant overhead -# or obscure tensorflow bugs. -def sample_n_per_class(dataset, samples_per_class): - """Create a new callable / dataset object that returns batches of each with - samples_per_class per label. - - Args: - dataset: fn - samples_per_class: int - Returns: - function, [] -> batch where batch is the same type as the return of - dataset(). - """ - - with tf.control_dependencies(None), tf.name_scope(None): - with tf.name_scope("queue_runner/sample_n_per_class"): - batch = dataset() - num_classes = batch.label_onehot.shape.as_list()[1] - batch_size = num_classes * samples_per_class - - flatten = nest.flatten(batch) - queues = [] - enqueue_ops = [] - capacity = samples_per_class * 20 - for i in xrange(num_classes): - queue = tf.FIFOQueue( - capacity=capacity, - shapes=[f.shape.as_list()[1:] for f in flatten], - dtypes=[f.dtype for f in flatten]) - queues.append(queue) - - idx = tf.where(tf.equal(batch.label, i)) - sub_batch = [] - to_enqueue = [] - for elem in batch: - new_e = tf.gather(elem, idx) - new_e = tf.squeeze(new_e, 1) - to_enqueue.append(new_e) - - remaining = (capacity - queue.size()) - to_add = tf.minimum(tf.shape(idx)[0], remaining) - - def _enqueue(): - return queue.enqueue_many([t[:to_add] for t in to_enqueue]) - - enqueue_op = tf.cond( - tf.equal(to_add, 0), tf.no_op, _enqueue) - enqueue_ops.append(enqueue_op) - - # This has caused many deadlocks / issues. This is some logging to at least - # shed light to what is going on. - print_lam = lambda: tf.Print(tf.constant(0.0), [q.size() for q in queues], "MultiQueueRunner queues status. Has capacity %d"%capacity) - some_percent_of_time = tf.less(tf.random_uniform([]), 0.0005) - maybe_print = tf.cond(some_percent_of_time, print_lam, lambda: tf.constant(0.0)) - with tf.control_dependencies([maybe_print]): - enqueue_ops = [tf.group(e) for e in enqueue_ops] - qr = MultiQueueRunner(queues=queues, enqueue_ops=enqueue_ops) - tf.train.add_queue_runner(qr) - - def dequeue_batch(): - with tf.name_scope("sample_n_per_batch/dequeue/"): - entries = [] - for q in queues: - entries.append(q.dequeue_many(samples_per_class)) - - flat_batch = [tf.concat(x, 0) for x in zip(*entries)] - idx = tf.random_shuffle(tf.range(batch_size)) - flat_batch = [tf.gather(f, idx, axis=0) for f in flat_batch] - return nest.pack_sequence_as(batch, flat_batch) - - return dequeue_batch - -def structure_map_multi(func, values): - all_values = [nest.flatten(v) for v in values] - rets = [] - for pair in zip(*all_values): - rets.append(func(pair)) - return nest.pack_sequence_as(values[0], rets) - -def structure_map_split(func, value): - vv = nest.flatten(value) - rets = [] - for v in vv: - rets.append(func(v)) - return [nest.pack_sequence_as(value, r) for r in zip(*rets)] - -def assign_variables(targets, values): - return tf.group(*[t.assign(v) for t,v in eqzip(targets, values)], - name="assign_variables") - - -def create_variables_in_class_scope(method): - """Force the variables constructed in this class to live in the sonnet module. - Wraps a method on a sonnet module. - - For example the following will create two different variables. - ``` - class Mod(snt.AbstractModule): - @create_variables_in_class_scope - def dynamic_thing(self, input, name): - return snt.Linear(name)(input) - mod.dynamic_thing(x, name="module_nameA") - mod.dynamic_thing(x, name="module_nameB") - # reuse - mod.dynamic_thing(y, name="module_nameA") - ``` - """ - @functools.wraps(method) - def wrapper(obj, *args, **kwargs): - def default_context_manager(reuse=None): - variable_scope = obj.variable_scope - return tf.variable_scope(variable_scope, reuse=reuse) - - variable_scope_context_manager = getattr(obj, "_enter_variable_scope", - default_context_manager) - graph = tf.get_default_graph() - - # Temporarily enter the variable scope to capture it - with variable_scope_context_manager() as tmp_variable_scope: - variable_scope = tmp_variable_scope - - with variable_scope_ops._pure_variable_scope( - variable_scope, reuse=tf.AUTO_REUSE) as pure_variable_scope: - - name_scope = variable_scope.original_name_scope - if name_scope[-1] != "/": - name_scope += "/" - - with tf.name_scope(name_scope): - sub_scope = snt_util.to_snake_case(method.__name__) - with tf.name_scope(sub_scope) as scope: - out_ops = method(obj, *args, **kwargs) - return out_ops - - return wrapper - diff --git a/research/learning_unsupervised_learning/variable_replace.py b/research/learning_unsupervised_learning/variable_replace.py deleted file mode 100644 index ebfbeadc8..000000000 --- a/research/learning_unsupervised_learning/variable_replace.py +++ /dev/null @@ -1,112 +0,0 @@ -# Copyright 2018 Google, Inc. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - - -from __future__ import absolute_import -from __future__ import division - -import tensorflow as tf -from contextlib import contextmanager - -from tensorflow.python.ops import variable_scope - -# sanity global state to ensure non recursive. -_is_variable_replacing = [False] - -def in_variable_replace_scope(): - return _is_variable_replacing[0] - -@contextmanager -def variable_replace(replacements, no_new=True): - """ A context manager that replaces variables. - - This is a context manager that replaces all calls to - get_variable with the variable in replacements. - This function does not support recursive application. - - Args: - replacements: dict - dictionary mapping a variable to replace (the key), with - the variable one wants to replace this variable with (the value). - no_new: bool - raise an error if variables were created. - This is for sanity checking. - Raises: - ValueError: if a new variable or not all the replacements are used. - """ - # TODO(lmetz) This function is a bit scary, as it relies on monkey patching - # the call to get_variable. Ideally this can be done with variable_scope's - # custom_getter attribute, but when initially writing this that was not - # avalible. - - replacements = {k: v for k, v in replacements.items() if not k == v} - - init_vars = tf.trainable_variables() - old_get_variable = variable_scope.get_variable - old_tf_get_variable = tf.get_variable - - names_replace = {} - has_replaced_names = [] - tf.logging.vlog(2, "Trying to replace") - for k, v in replacements.items(): - tf.logging.vlog(2, k.name + " >> " + v.name) - tf.logging.vlog(2, "===") - - for k, v in replacements.items(): - strip_name = k.name.replace("/read:0", "") - strip_name = strip_name.replace(":0", "") - names_replace[strip_name] = v - # TODO(lmetz) is there a cleaner way to do this? - def new_get_variable(name, *args, **kwargs): - #print "Monkeypatch get variable run with name:", name - n = tf.get_variable_scope().name + "/" + name - #print "Monkeypatch get variable run with name:", n - if n in names_replace: - has_replaced_names.append(n) - return names_replace[n] - else: - return old_get_variable(name, *args, **kwargs) - - # perform the monkey patch - if _is_variable_replacing[0] == True: - raise ValueError("No recursive calling to variable replace allowed.") - - variable_scope.get_variable = new_get_variable - tf.get_variable = new_get_variable - - _is_variable_replacing[0] = True - - yield - - if set(has_replaced_names) != set(names_replace.keys()): - print "Didn't use all replacements" - print "replaced variables that are not requested??" - print "===" - for n in list(set(has_replaced_names) - set(names_replace.keys())): - print n - print "Missed replacing variables" - print "===" - for n in list(set(names_replace.keys()) - set(has_replaced_names)): - print n, "==>", names_replace[n].name - raise ValueError("Fix this -- see stderr") - - # undo the monkey patch - tf.get_variable = old_tf_get_variable - variable_scope.get_variable = old_get_variable - - _is_variable_replacing[0] = False - - final_vars = tf.trainable_variables() - assert set(init_vars) == set(final_vars), "trainable variables changed" diff --git a/research/lexnet_nc/README.md b/research/lexnet_nc/README.md deleted file mode 100644 index 4ecb5d398..000000000 --- a/research/lexnet_nc/README.md +++ /dev/null @@ -1,215 +0,0 @@ -![No Maintenance Intended](https://img.shields.io/badge/No%20Maintenance%20Intended-%E2%9C%95-red.svg) -![TensorFlow Requirement: 1.x](https://img.shields.io/badge/TensorFlow%20Requirement-1.x-brightgreen) -![TensorFlow 2 Not Supported](https://img.shields.io/badge/TensorFlow%202%20Not%20Supported-%E2%9C%95-red.svg) - -# LexNET for Noun Compound Relation Classification - -This is a [Tensorflow](http://www.tensorflow.org/) implementation of the LexNET -algorithm for classifying relationships, specifically applied to classifying the -relationships that hold between noun compounds: - -* *olive oil* is oil that is *made from* olives -* *cooking oil* which is oil that is *used for* cooking -* *motor oil* is oil that is *contained in* a motor - -The model is a supervised classifier that predicts the relationship that holds -between the constituents of a two-word noun compound using: - -1. A neural "paraphrase" of each syntactic dependency path that connects the - constituents in a large corpus. For example, given a sentence like *This fine - oil is made from first-press olives*, the dependency path is something like - `oil from POBJ> olive`. -2. The distributional information provided by the individual words; i.e., the - word embeddings of the two consituents. -3. The distributional signal provided by the compound itself; i.e., the - embedding of the noun compound in context. - -The model includes several variants: *path-based model* uses (1) alone, the -*distributional model* uses (2) alone, and the *integrated model* uses (1) and -(2). The *distributional-nc model* and the *integrated-nc* model each add (3). - -Training a model requires the following: - -1. A collection of noun compounds that have been labeled using a *relation - inventory*. The inventory describes the specific relationships that you'd - like the model to differentiate (e.g. *part of* versus *composed of* versus - *purpose*), and generally may consist of tens of classes. You can download - the dataset used in the paper from - [here](https://vered1986.github.io/papers/Tratz2011_Dataset.tar.gz). -2. A collection of word embeddings: the path-based model uses the word - embeddings as part of the path representation, and the distributional models - use the word embeddings directly as prediction features. -3. The path-based model requires a collection of syntactic dependency parses - that connect the constituents for each noun compound. To generate these, - you'll need a corpus from which to train this data; we used Wikipedia and the - [LDC GigaWord5](https://catalog.ldc.upenn.edu/LDC2011T07) corpora. - -# Contents - -The following source code is included here: - -* `learn_path_embeddings.py` is a script that trains and evaluates a path-based - model to predict a noun-compound relationship given labeled noun-compounds and - dependency parse paths. -* `learn_classifier.py` is a script that trains and evaluates a classifier based - on any combination of paths, word embeddings, and noun-compound embeddings. -* `get_indicative_paths.py` is a script that generates the most indicative - syntactic dependency paths for a particular relationship. - -Also included are utilities for preparing data for training: - -* `text_embeddings_to_binary.py` converts a text file containing word embeddings - into a binary file that is quicker to load. -* `extract_paths.py` finds all the dependency paths that connect words in a - corpus. -* `sorted_paths_to_examples.py` processes the output of `extract_paths.py` to - produce summarized training data. - -This code (in particular, the utilities used to prepare the data) differs from -the code that was used to prepare data for the paper. Notably, we used a -proprietary dependency parser instead of spaCy, which is used here. - -# Dependencies - -* [TensorFlow](http://www.tensorflow.org/): see detailed installation - instructions at that site. -* [SciKit Learn](http://scikit-learn.org/): you can probably just install this - with `pip install sklearn`. -* [SpaCy](https://spacy.io/): `pip install spacy` ought to do the trick, along - with the English model. - -# Creating the Model - -This sections described the steps necessary to create and evaluate the model -described in the paper. - -## Generate Path Data - -To begin, you need three text files: - -1. **Corpus**. This file should contain natural language sentences, written with - one sentence per line. For purposes of exposition, we'll assume that you - have English Wikipedia serialized this way in `${HOME}/data/wiki.txt`. -2. **Labeled Noun Compound Pairs**. This file contain (modfier, head, label) - tuples, tab-separated, with one per line. The *label* represented the - relationship between the head and the modifier; e.g., if `purpose` is one - your labels, you could possibly include `toothpastepurpose`. -3. **Word Embeddings**. We used the - [GloVe](https://nlp.stanford.edu/projects/glove/) word embeddings; in - particular the 6B token, 300d variant. We'll assume you have this file as - `${HOME}/data/glove.6B.300d.txt`. - -We first processed the embeddings from their text format into something that we -can load a little bit more quickly: - - ./text_embeddings_to_binary.py \ - --input ${HOME}/data/glove.6B.300d.txt \ - --output_vocab ${HOME}/data/vocab.txt \ - --output_npy ${HOME}/data/glove.6B.300d.npy - -Next, we'll extract all the dependency parse paths connecting our labeled pairs -from the corpus. This process takes a *looooong* time, but is trivially -parallelized using map-reduce if you have access to that technology. - - ./extract_paths.py \ - --corpus ${HOME}/data/wiki.txt \ - --labeled_pairs ${HOME}/data/labeled-pairs.tsv \ - --output ${HOME}/data/paths.tsv - -The file it produces (`paths.tsv`) is a tab-separated file that contains the -modifier, the head, the label, the encoded path, and the sentence from which the -path was drawn. (This last is mostly for sanity checking.) A sample row might -look something like this (where newlines would actually be tab characters): - - navy - captain - owner_emp_use - /PROPN/dobj/>::enter/VERB/ROOT/^::follow/VERB/advcl/<::in/ADP/prep/<::footstep/NOUN/pobj/<::of/ADP/prep/<::father/NOUN/pobj/<::bover/PROPN/appos/<::/PROPN/compound/< - He entered the Royal Navy following in the footsteps of his father Captain John Bover and two of his elder brothers as volunteer aboard HMS Perseus - -This file must be sorted as follows: - - sort -k1,3 -t$'\t' paths.tsv > sorted.paths.tsv - -In particular, rows with the same modifier, head, and label must appear -contiguously. - -We next create a file that contains all the relation labels from our original -labeled pairs: - - awk 'BEGIN {FS="\t"} {print $3}' < ${HOME}/data/labeled-pairs.tsv \ - | sort -u > ${HOME}/data/relations.txt - -With these in hand, we're ready to produce the train, validation, and test data: - - ./sorted_paths_to_examples.py \ - --input ${HOME}/data/sorted.paths.tsv \ - --vocab ${HOME}/data/vocab.txt \ - --relations ${HOME}/data/relations.txt \ - --splits ${HOME}/data/splits.txt \ - --output_dir ${HOME}/data - -Here, `splits.txt` is a file that indicates which "split" (train, test, or -validation) you want the pair to appear in. It should be a tab-separate file -which conatins the modifier, head, and the dataset ( `train`, `test`, or `val`) -into which the pair should be placed; e.g.,: - - tooth paste train - banana seat test - -The program will produce a separate file for each dataset split in the directory -specified by `--output_dir`. Each file is contains `tf.train.Example` protocol -buffers encoded using the `TFRecord` file format. - -## Create Path Embeddings - -Now we're ready to train the path embeddings using `learn_path_embeddings.py`: - - ./learn_path_embeddings.py \ - --train ${HOME}/data/train.tfrecs.gz \ - --val ${HOME}/data/val.tfrecs.gz \ - --text ${HOME}/data/test.tfrecs.gz \ - --embeddings ${HOME}/data/glove.6B.300d.npy - --relations ${HOME}/data/relations.txt - --output ${HOME}/data/path-embeddings \ - --logdir /tmp/learn_path_embeddings - -The path embeddings will be placed at the location specified by `--output`. - -## Train classifiers - -Train classifiers and evaluate on the validation and test data using -`train_classifiers.py` script. This shell script fragment will iterate through -each dataset, split, corpus, and model type to train and evaluate classifiers. - - LOGDIR=/tmp/learn_classifier - for DATASET in tratz/fine_grained tratz/coarse_grained ; do - for SPLIT in random lexical_head lexical_mod lexical_full ; do - for CORPUS in wiki_gigiawords ; do - for MODEL in dist dist-nc path integrated integrated-nc ; do - # Filename for the log that will contain the classifier results. - LOGFILE=$(echo "${DATASET}.${SPLIT}.${CORPUS}.${MODEL}.log" | sed -e "s,/,.,g") - python learn_classifier.py \ - --dataset_dir ~/lexnet/datasets \ - --dataset "${DATASET}" \ - --corpus "${SPLIT}/${CORPUS}" \ - --embeddings_base_path ~/lexnet/embeddings \ - --logdir ${LOGDIR} \ - --input "${MODEL}" > "${LOGDIR}/${LOGFILE}" - done - done - done - done - -The log file will contain the final performance (precision, recall, F1) on the -train, dev, and test sets, and will include a confusion matrix for each. - -# Contact - -If you have any questions, issues, or suggestions, feel free to contact either -@vered1986 or @waterson. - -If you use this code for any published research, please include the following citation: - -Olive Oil Is Made of Olives, Baby Oil Is Made for Babies: Interpreting Noun Compounds Using Paraphrases in a Neural Model. -Vered Shwartz and Chris Waterson. NAACL 2018. [link](https://arxiv.org/pdf/1803.08073.pdf). diff --git a/research/lexnet_nc/extract_paths.py b/research/lexnet_nc/extract_paths.py deleted file mode 100755 index 833eec2c1..000000000 --- a/research/lexnet_nc/extract_paths.py +++ /dev/null @@ -1,119 +0,0 @@ -#!/usr/bin/env python -# Copyright 2017, 2018 Google, Inc. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import itertools -import sys - -import spacy -import tensorflow as tf - -tf.flags.DEFINE_string('corpus', '', 'Filename of corpus') -tf.flags.DEFINE_string('labeled_pairs', '', 'Filename of labeled pairs') -tf.flags.DEFINE_string('output', '', 'Filename of output file') -FLAGS = tf.flags.FLAGS - - -def get_path(mod_token, head_token): - """Returns the path between a modifier token and a head token.""" - # Compute the path from the root to each token. - mod_ancestors = list(reversed(list(mod_token.ancestors))) - head_ancestors = list(reversed(list(head_token.ancestors))) - - # If the paths don't start at the same place (odd!) then there is no path at - # all. - if (not mod_ancestors or not head_ancestors - or mod_ancestors[0] != head_ancestors[0]): - return None - - # Eject elements from the common path until we reach the first differing - # ancestor. - ix = 1 - while (ix < len(mod_ancestors) and ix < len(head_ancestors) - and mod_ancestors[ix] == head_ancestors[ix]): - ix += 1 - - # Construct the path. TODO: add "satellites", possibly honor sentence - # ordering between modifier and head rather than just always traversing from - # the modifier to the head? - path = ['/'.join(('', mod_token.pos_, mod_token.dep_, '>'))] - - path += ['/'.join((tok.lemma_, tok.pos_, tok.dep_, '>')) - for tok in reversed(mod_ancestors[ix:])] - - root_token = mod_ancestors[ix - 1] - path += ['/'.join((root_token.lemma_, root_token.pos_, root_token.dep_, '^'))] - - path += ['/'.join((tok.lemma_, tok.pos_, tok.dep_, '<')) - for tok in head_ancestors[ix:]] - - path += ['/'.join(('', head_token.pos_, head_token.dep_, '<'))] - - return '::'.join(path) - - -def main(_): - nlp = spacy.load('en_core_web_sm') - - # Grab the set of labeled pairs for which we wish to collect paths. - with tf.gfile.GFile(FLAGS.labeled_pairs) as fh: - parts = (l.decode('utf-8').split('\t') for l in fh.read().splitlines()) - labeled_pairs = {(mod, head): rel for mod, head, rel in parts} - - # Create a mapping from each head to the modifiers that are used with it. - mods_for_head = { - head: set(hm[1] for hm in head_mods) - for head, head_mods in itertools.groupby( - sorted((head, mod) for (mod, head) in labeled_pairs.iterkeys()), - lambda (head, mod): head)} - - # Collect all the heads that we know about. - heads = set(mods_for_head.keys()) - - # For each sentence that contains a (head, modifier) pair that's in our set, - # emit the dependency path that connects the pair. - out_fh = sys.stdout if not FLAGS.output else tf.gfile.GFile(FLAGS.output, 'w') - in_fh = sys.stdin if not FLAGS.corpus else tf.gfile.GFile(FLAGS.corpus) - - num_paths = 0 - for line, sen in enumerate(in_fh, start=1): - if line % 100 == 0: - print('\rProcessing line %d: %d paths' % (line, num_paths), - end='', file=sys.stderr) - - sen = sen.decode('utf-8').strip() - doc = nlp(sen) - - for head_token in doc: - head_text = head_token.text.lower() - if head_text in heads: - mods = mods_for_head[head_text] - for mod_token in doc: - mod_text = mod_token.text.lower() - if mod_text in mods: - path = get_path(mod_token, head_token) - if path: - label = labeled_pairs[(mod_text, head_text)] - line = '\t'.join((mod_text, head_text, label, path, sen)) - print(line.encode('utf-8'), file=out_fh) - num_paths += 1 - - out_fh.close() - -if __name__ == '__main__': - tf.app.run() diff --git a/research/lexnet_nc/get_indicative_paths.py b/research/lexnet_nc/get_indicative_paths.py deleted file mode 100755 index f8b34cca2..000000000 --- a/research/lexnet_nc/get_indicative_paths.py +++ /dev/null @@ -1,111 +0,0 @@ -#!/usr/bin/env python -# Copyright 2017, 2018 Google, Inc. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -"""Extracts paths that are indicative of each relation.""" -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import os - -import tensorflow as tf - -from . import path_model -from . import lexnet_common - -tf.flags.DEFINE_string( - 'dataset_dir', 'datasets', - 'Dataset base directory') - -tf.flags.DEFINE_string( - 'dataset', - 'tratz/fine_grained', - 'Subdirectory containing the corpus directories: ' - 'subdirectory of dataset_dir') - -tf.flags.DEFINE_string( - 'corpus', 'random/wiki', - 'Subdirectory containing the corpus and split: ' - 'subdirectory of dataset_dir/dataset') - -tf.flags.DEFINE_string( - 'embeddings_base_path', 'embeddings', - 'Embeddings base directory') - -tf.flags.DEFINE_string( - 'logdir', 'logdir', - 'Directory of model output files') - -tf.flags.DEFINE_integer( - 'top_k', 20, 'Number of top paths to extract') - -tf.flags.DEFINE_float( - 'threshold', 0.8, 'Threshold above which to consider paths as indicative') - -FLAGS = tf.flags.FLAGS - - -def main(_): - hparams = path_model.PathBasedModel.default_hparams() - - # First things first. Load the path data. - path_embeddings_file = 'path_embeddings/{dataset}/{corpus}'.format( - dataset=FLAGS.dataset, - corpus=FLAGS.corpus) - - path_dim = (hparams.lemma_dim + hparams.pos_dim + - hparams.dep_dim + hparams.dir_dim) - - path_embeddings, path_to_index = path_model.load_path_embeddings( - os.path.join(FLAGS.embeddings_base_path, path_embeddings_file), - path_dim) - - # Load and count the classes so we can correctly instantiate the model. - classes_filename = os.path.join( - FLAGS.dataset_dir, FLAGS.dataset, 'classes.txt') - - with open(classes_filename) as f_in: - classes = f_in.read().splitlines() - - hparams.num_classes = len(classes) - - # We need the word embeddings to instantiate the model, too. - print('Loading word embeddings...') - lemma_embeddings = lexnet_common.load_word_embeddings( - FLAGS.embeddings_base_path, hparams.lemma_embeddings_file) - - # Instantiate the model. - with tf.Graph().as_default(): - with tf.variable_scope('lexnet'): - instance = tf.placeholder(dtype=tf.string) - model = path_model.PathBasedModel( - hparams, lemma_embeddings, instance) - - with tf.Session() as session: - model_dir = '{logdir}/results/{dataset}/path/{corpus}'.format( - logdir=FLAGS.logdir, - dataset=FLAGS.dataset, - corpus=FLAGS.corpus) - - saver = tf.train.Saver() - saver.restore(session, os.path.join(model_dir, 'best.ckpt')) - - path_model.get_indicative_paths( - model, session, path_to_index, path_embeddings, classes, - model_dir, FLAGS.top_k, FLAGS.threshold) - -if __name__ == '__main__': - tf.app.run() diff --git a/research/lexnet_nc/learn_classifier.py b/research/lexnet_nc/learn_classifier.py deleted file mode 100755 index ec2840295..000000000 --- a/research/lexnet_nc/learn_classifier.py +++ /dev/null @@ -1,223 +0,0 @@ -#!/usr/bin/env python -# Copyright 2017, 2018 Google, Inc. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""Trains the integrated LexNET classifier.""" -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import os - -import lexnet_common -import lexnet_model -import path_model -from sklearn import metrics -import tensorflow as tf - -tf.flags.DEFINE_string( - 'dataset_dir', 'datasets', - 'Dataset base directory') - -tf.flags.DEFINE_string( - 'dataset', 'tratz/fine_grained', - 'Subdirectory containing the corpus directories: ' - 'subdirectory of dataset_dir') - -tf.flags.DEFINE_string( - 'corpus', 'wiki/random', - 'Subdirectory containing the corpus and split: ' - 'subdirectory of dataset_dir/dataset') - -tf.flags.DEFINE_string( - 'embeddings_base_path', 'embeddings', - 'Embeddings base directory') - -tf.flags.DEFINE_string( - 'logdir', 'logdir', - 'Directory of model output files') - -tf.flags.DEFINE_string('hparams', '', 'Hyper-parameters') - -tf.flags.DEFINE_string( - 'input', 'integrated', - 'The model(dist/dist-nc/path/integrated/integrated-nc') - -FLAGS = tf.flags.FLAGS - - -def main(_): - # Pick up any one-off hyper-parameters. - hparams = lexnet_model.LexNETModel.default_hparams() - hparams.corpus = FLAGS.corpus - hparams.input = FLAGS.input - hparams.path_embeddings_file = 'path_embeddings/%s/%s' % ( - FLAGS.dataset, FLAGS.corpus) - - input_dir = hparams.input if hparams.input != 'path' else 'path_classifier' - - # Set the number of classes - classes_filename = os.path.join( - FLAGS.dataset_dir, FLAGS.dataset, 'classes.txt') - with open(classes_filename) as f_in: - classes = f_in.read().splitlines() - - hparams.num_classes = len(classes) - print('Model will predict into %d classes' % hparams.num_classes) - - # Get the datasets - train_set, val_set, test_set = ( - os.path.join( - FLAGS.dataset_dir, FLAGS.dataset, FLAGS.corpus, - filename + '.tfrecs.gz') - for filename in ['train', 'val', 'test']) - - print('Running with hyper-parameters: {}'.format(hparams)) - - # Load the instances - print('Loading instances...') - opts = tf.python_io.TFRecordOptions( - compression_type=tf.python_io.TFRecordCompressionType.GZIP) - train_instances = list(tf.python_io.tf_record_iterator(train_set, opts)) - val_instances = list(tf.python_io.tf_record_iterator(val_set, opts)) - test_instances = list(tf.python_io.tf_record_iterator(test_set, opts)) - - # Load the word embeddings - print('Loading word embeddings...') - relata_embeddings, path_embeddings, nc_embeddings, path_to_index = ( - None, None, None, None) - if hparams.input in ['dist', 'dist-nc', 'integrated', 'integrated-nc']: - relata_embeddings = lexnet_common.load_word_embeddings( - FLAGS.embeddings_base_path, hparams.relata_embeddings_file) - - if hparams.input in ['path', 'integrated', 'integrated-nc']: - path_embeddings, path_to_index = path_model.load_path_embeddings( - os.path.join(FLAGS.embeddings_base_path, hparams.path_embeddings_file), - hparams.path_dim) - - if hparams.input in ['dist-nc', 'integrated-nc']: - nc_embeddings = lexnet_common.load_word_embeddings( - FLAGS.embeddings_base_path, hparams.nc_embeddings_file) - - # Define the graph and the model - with tf.Graph().as_default(): - model = lexnet_model.LexNETModel( - hparams, relata_embeddings, path_embeddings, - nc_embeddings, path_to_index) - - # Initialize a session and start training - session = tf.Session() - session.run(tf.global_variables_initializer()) - - # Initalize the path mapping - if hparams.input in ['path', 'integrated', 'integrated-nc']: - session.run(tf.tables_initializer()) - session.run(model.initialize_path_op, { - model.path_initial_value_t: path_embeddings - }) - - # Initialize the NC embeddings - if hparams.input in ['dist-nc', 'integrated-nc']: - session.run(model.initialize_nc_op, { - model.nc_initial_value_t: nc_embeddings - }) - - # Load the labels - print('Loading labels...') - train_labels = model.load_labels(session, train_instances) - val_labels = model.load_labels(session, val_instances) - test_labels = model.load_labels(session, test_instances) - - save_path = '{logdir}/results/{dataset}/{input}/{corpus}'.format( - logdir=FLAGS.logdir, dataset=FLAGS.dataset, - corpus=model.hparams.corpus, input=input_dir) - - if not os.path.exists(save_path): - os.makedirs(save_path) - - # Train the model - print('Training the model...') - model.fit(session, train_instances, epoch_completed, - val_instances, val_labels, save_path) - - # Print the best performance on the validation set - print('Best performance on the validation set: F1=%.3f' % - epoch_completed.best_f1) - - # Evaluate on the train and validation sets - lexnet_common.full_evaluation(model, session, train_instances, train_labels, - 'Train', classes) - lexnet_common.full_evaluation(model, session, val_instances, val_labels, - 'Validation', classes) - test_predictions = lexnet_common.full_evaluation( - model, session, test_instances, test_labels, 'Test', classes) - - # Write the test predictions to a file - predictions_file = os.path.join(save_path, 'test_predictions.tsv') - print('Saving test predictions to %s' % save_path) - test_pairs = model.load_pairs(session, test_instances) - lexnet_common.write_predictions(test_pairs, test_labels, test_predictions, - classes, predictions_file) - - -def epoch_completed(model, session, epoch, epoch_loss, - val_instances, val_labels, save_path): - """Runs every time an epoch completes. - - Print the performance on the validation set, and update the saved model if - its performance is better on the previous ones. If the performance dropped, - tell the training to stop. - - Args: - model: The currently trained path-based model. - session: The current TensorFlow session. - epoch: The epoch number. - epoch_loss: The current epoch loss. - val_instances: The validation set instances (evaluation between epochs). - val_labels: The validation set labels (for evaluation between epochs). - save_path: Where to save the model. - - Returns: - whether the training should stop. - """ - stop_training = False - - # Evaluate on the validation set - val_pred = model.predict(session, val_instances) - precision, recall, f1, _ = metrics.precision_recall_fscore_support( - val_labels, val_pred, average='weighted') - print( - 'Epoch: %d/%d, Loss: %f, validation set: P: %.3f, R: %.3f, F1: %.3f\n' % ( - epoch + 1, model.hparams.num_epochs, epoch_loss, - precision, recall, f1)) - - # If the F1 is much smaller than the previous one, stop training. Else, if - # it's bigger, save the model. - if f1 < epoch_completed.best_f1 - 0.08: - stop_training = True - - if f1 > epoch_completed.best_f1: - saver = tf.train.Saver() - checkpoint_filename = os.path.join(save_path, 'best.ckpt') - print('Saving model in: %s' % checkpoint_filename) - saver.save(session, checkpoint_filename) - print('Model saved in file: %s' % checkpoint_filename) - epoch_completed.best_f1 = f1 - - return stop_training - -epoch_completed.best_f1 = 0 - -if __name__ == '__main__': - tf.app.run(main) diff --git a/research/lexnet_nc/learn_path_embeddings.py b/research/lexnet_nc/learn_path_embeddings.py deleted file mode 100755 index 480378f4a..000000000 --- a/research/lexnet_nc/learn_path_embeddings.py +++ /dev/null @@ -1,186 +0,0 @@ -#!/usr/bin/env python -# Copyright 2017, 2018 Google, Inc. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -"""Trains the LexNET path-based model.""" -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import os - -import lexnet_common -import path_model -from sklearn import metrics -import tensorflow as tf - -tf.flags.DEFINE_string('train', '', 'training dataset, tfrecs') -tf.flags.DEFINE_string('val', '', 'validation dataset, tfrecs') -tf.flags.DEFINE_string('test', '', 'test dataset, tfrecs') -tf.flags.DEFINE_string('embeddings', '', 'embeddings, npy') -tf.flags.DEFINE_string('relations', '', 'file containing relation labels') -tf.flags.DEFINE_string('output_dir', '', 'output directory for path embeddings') -tf.flags.DEFINE_string('logdir', '', 'directory for model training') -FLAGS = tf.flags.FLAGS - - -def main(_): - # Pick up any one-off hyper-parameters. - hparams = path_model.PathBasedModel.default_hparams() - - with open(FLAGS.relations) as fh: - relations = fh.read().splitlines() - - hparams.num_classes = len(relations) - print('Model will predict into %d classes' % hparams.num_classes) - - print('Running with hyper-parameters: {}'.format(hparams)) - - # Load the instances - print('Loading instances...') - opts = tf.python_io.TFRecordOptions( - compression_type=tf.python_io.TFRecordCompressionType.GZIP) - - train_instances = list(tf.python_io.tf_record_iterator(FLAGS.train, opts)) - val_instances = list(tf.python_io.tf_record_iterator(FLAGS.val, opts)) - test_instances = list(tf.python_io.tf_record_iterator(FLAGS.test, opts)) - - # Load the word embeddings - print('Loading word embeddings...') - lemma_embeddings = lexnet_common.load_word_embeddings(FLAGS.embeddings) - - # Define the graph and the model - with tf.Graph().as_default(): - with tf.variable_scope('lexnet'): - options = tf.python_io.TFRecordOptions( - compression_type=tf.python_io.TFRecordCompressionType.GZIP) - reader = tf.TFRecordReader(options=options) - _, train_instance = reader.read( - tf.train.string_input_producer([FLAGS.train])) - shuffled_train_instance = tf.train.shuffle_batch( - [train_instance], - batch_size=1, - num_threads=1, - capacity=len(train_instances), - min_after_dequeue=100, - )[0] - - train_model = path_model.PathBasedModel( - hparams, lemma_embeddings, shuffled_train_instance) - - with tf.variable_scope('lexnet', reuse=True): - val_instance = tf.placeholder(dtype=tf.string) - val_model = path_model.PathBasedModel( - hparams, lemma_embeddings, val_instance) - - # Initialize a session and start training - best_model_saver = tf.train.Saver() - f1_t = tf.placeholder(tf.float32) - best_f1_t = tf.Variable(0.0, trainable=False, name='best_f1') - assign_best_f1_op = tf.assign(best_f1_t, f1_t) - - supervisor = tf.train.Supervisor( - logdir=FLAGS.logdir, - global_step=train_model.global_step) - - with supervisor.managed_session() as session: - # Load the labels - print('Loading labels...') - val_labels = train_model.load_labels(session, val_instances) - - # Train the model - print('Training the model...') - - while True: - step = session.run(train_model.global_step) - epoch = (step + len(train_instances) - 1) // len(train_instances) - if epoch > hparams.num_epochs: - break - - print('Starting epoch %d (step %d)...' % (1 + epoch, step)) - - epoch_loss = train_model.run_one_epoch(session, len(train_instances)) - - best_f1 = session.run(best_f1_t) - f1 = epoch_completed(val_model, session, epoch, epoch_loss, - val_instances, val_labels, best_model_saver, - FLAGS.logdir, best_f1) - - if f1 > best_f1: - session.run(assign_best_f1_op, {f1_t: f1}) - - if f1 < best_f1 - 0.08: - tf.logging.info('Stopping training after %d epochs.\n' % epoch) - break - - # Print the best performance on the validation set - best_f1 = session.run(best_f1_t) - print('Best performance on the validation set: F1=%.3f' % best_f1) - - # Save the path embeddings - print('Computing the path embeddings...') - instances = train_instances + val_instances + test_instances - path_index, path_vectors = path_model.compute_path_embeddings( - val_model, session, instances) - - if not os.path.exists(path_emb_dir): - os.makedirs(path_emb_dir) - - path_model.save_path_embeddings( - val_model, path_vectors, path_index, FLAGS.output_dir) - - -def epoch_completed(model, session, epoch, epoch_loss, - val_instances, val_labels, saver, save_path, best_f1): - """Runs every time an epoch completes. - - Print the performance on the validation set, and update the saved model if - its performance is better on the previous ones. If the performance dropped, - tell the training to stop. - - Args: - model: The currently trained path-based model. - session: The current TensorFlow session. - epoch: The epoch number. - epoch_loss: The current epoch loss. - val_instances: The validation set instances (evaluation between epochs). - val_labels: The validation set labels (for evaluation between epochs). - saver: tf.Saver object - save_path: Where to save the model. - best_f1: the best F1 achieved so far. - - Returns: - The F1 achieved on the training set. - """ - # Evaluate on the validation set - val_pred = model.predict(session, val_instances) - precision, recall, f1, _ = metrics.precision_recall_fscore_support( - val_labels, val_pred, average='weighted') - print( - 'Epoch: %d/%d, Loss: %f, validation set: P: %.3f, R: %.3f, F1: %.3f\n' % ( - epoch + 1, model.hparams.num_epochs, epoch_loss, - precision, recall, f1)) - - if f1 > best_f1: - save_filename = os.path.join(save_path, 'best.ckpt') - print('Saving model in: %s' % save_filename) - saver.save(session, save_filename) - print('Model saved in file: %s' % save_filename) - - return f1 - - -if __name__ == '__main__': - tf.app.run(main) diff --git a/research/lexnet_nc/lexnet_common.py b/research/lexnet_nc/lexnet_common.py deleted file mode 100644 index a2e8a104d..000000000 --- a/research/lexnet_nc/lexnet_common.py +++ /dev/null @@ -1,197 +0,0 @@ -# Copyright 2017, 2018 Google, Inc. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -"""Common stuff used with LexNET.""" -# pylint: disable=bad-whitespace - -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import os -import numpy as np -from sklearn import metrics -import tensorflow as tf - -# Part of speech tags used in the paths. -POSTAGS = [ - 'PAD', 'VERB', 'CONJ', 'NOUN', 'PUNCT', - 'ADP', 'ADJ', 'DET', 'ADV', 'PART', - 'NUM', 'X', 'INTJ', 'SYM', -] - -POSTAG_TO_ID = {tag: tid for tid, tag in enumerate(POSTAGS)} - -# Dependency labels used in the paths. -DEPLABELS = [ - 'PAD', 'UNK', 'ROOT', 'abbrev', 'acomp', 'advcl', - 'advmod', 'agent', 'amod', 'appos', 'attr', 'aux', - 'auxpass', 'cc', 'ccomp', 'complm', 'conj', 'cop', - 'csubj', 'csubjpass', 'dep', 'det', 'dobj', 'expl', - 'infmod', 'iobj', 'mark', 'mwe', 'nc', 'neg', - 'nn', 'npadvmod', 'nsubj', 'nsubjpass', 'num', 'number', - 'p', 'parataxis', 'partmod', 'pcomp', 'pobj', 'poss', - 'preconj', 'predet', 'prep', 'prepc', 'prt', 'ps', - 'purpcl', 'quantmod', 'rcmod', 'ref', 'rel', 'suffix', - 'title', 'tmod', 'xcomp', 'xsubj', -] - -DEPLABEL_TO_ID = {label: lid for lid, label in enumerate(DEPLABELS)} - -# Direction codes used in the paths. -DIRS = '_^V<>' -DIR_TO_ID = {dir: did for did, dir in enumerate(DIRS)} - - -def load_word_embeddings(embedding_filename): - """Loads pretrained word embeddings from a binary file and returns the matrix. - - Adds the , , , and tokens to the beginning of the vocab. - - Args: - embedding_filename: filename of the binary NPY data - - Returns: - The word embeddings matrix - """ - embeddings = np.load(embedding_filename) - dim = embeddings.shape[1] - - # Four initially random vectors for the special tokens: , , , - special_embeddings = np.random.normal(0, 0.1, (4, dim)) - embeddings = np.vstack((special_embeddings, embeddings)) - embeddings = embeddings.astype(np.float32) - - return embeddings - - -def full_evaluation(model, session, instances, labels, set_name, classes): - """Prints a full evaluation on the current set. - - Performance (recall, precision and F1), classification report (per - class performance), and confusion matrix). - - Args: - model: The currently trained path-based model. - session: The current TensorFlow session. - instances: The current set instances. - labels: The current set labels. - set_name: The current set name (train/validation/test). - classes: The class label names. - - Returns: - The model's prediction for the given instances. - """ - - # Predict the labels - pred = model.predict(session, instances) - - # Print the performance - precision, recall, f1, _ = metrics.precision_recall_fscore_support( - labels, pred, average='weighted') - - print('%s set: Precision: %.3f, Recall: %.3f, F1: %.3f' % ( - set_name, precision, recall, f1)) - - # Print a classification report - print('%s classification report:' % set_name) - print(metrics.classification_report(labels, pred, target_names=classes)) - - # Print the confusion matrix - print('%s confusion matrix:' % set_name) - cm = metrics.confusion_matrix(labels, pred, labels=range(len(classes))) - cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis] * 100 - print_cm(cm, labels=classes) - return pred - - -def print_cm(cm, labels): - """Pretty print for confusion matrices. - - From: https://gist.github.com/zachguo/10296432. - - Args: - cm: The confusion matrix. - labels: The class names. - """ - columnwidth = 10 - empty_cell = ' ' * columnwidth - short_labels = [label[:12].rjust(10, ' ') for label in labels] - - # Print header - header = empty_cell + ' ' - header += ''.join([' %{0}s '.format(columnwidth) % label - for label in short_labels]) - - print(header) - - # Print rows - for i, label1 in enumerate(short_labels): - row = '%{0}s '.format(columnwidth) % label1[:10] - for j in range(len(short_labels)): - value = int(cm[i, j]) if not np.isnan(cm[i, j]) else 0 - cell = ' %{0}d '.format(10) % value - row += cell + ' ' - print(row) - - -def load_all_labels(records): - """Reads TensorFlow examples from a RecordReader and returns only the labels. - - Args: - records: a record list with TensorFlow examples. - - Returns: - The labels - """ - curr_features = tf.parse_example(records, { - 'rel_id': tf.FixedLenFeature([1], dtype=tf.int64), - }) - - labels = tf.squeeze(curr_features['rel_id'], [-1]) - return labels - - -def load_all_pairs(records): - """Reads TensorFlow examples from a RecordReader and returns the word pairs. - - Args: - records: a record list with TensorFlow examples. - - Returns: - The word pairs - """ - curr_features = tf.parse_example(records, { - 'pair': tf.FixedLenFeature([1], dtype=tf.string) - }) - - word_pairs = curr_features['pair'] - return word_pairs - - -def write_predictions(pairs, labels, predictions, classes, predictions_file): - """Write the predictions to a file. - - Args: - pairs: the word pairs (list of tuple of two strings). - labels: the gold-standard labels for these pairs (array of rel ID). - predictions: the predicted labels for these pairs (array of rel ID). - classes: a list of relation names. - predictions_file: where to save the predictions. - """ - with open(predictions_file, 'w') as f_out: - for pair, label, pred in zip(pairs, labels, predictions): - w1, w2 = pair - f_out.write('\t'.join([w1, w2, classes[label], classes[pred]]) + '\n') diff --git a/research/lexnet_nc/lexnet_model.py b/research/lexnet_nc/lexnet_model.py deleted file mode 100644 index b0f16b030..000000000 --- a/research/lexnet_nc/lexnet_model.py +++ /dev/null @@ -1,438 +0,0 @@ -# Copyright 2017, 2018 Google, Inc. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -"""The integrated LexNET model.""" -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import lexnet_common -import numpy as np -import tensorflow as tf -from six.moves import xrange - - -class LexNETModel(object): - """The LexNET model for classifying relationships between noun compounds.""" - - @classmethod - def default_hparams(cls): - """Returns the default hyper-parameters.""" - return tf.contrib.training.HParams( - batch_size=10, - num_classes=37, - num_epochs=30, - input_keep_prob=0.9, - input='integrated', # dist/ dist-nc/ path/ integrated/ integrated-nc - learn_relata=False, - corpus='wiki_gigawords', - random_seed=133, # zero means no random seed - relata_embeddings_file='glove/glove.6B.300d.bin', - nc_embeddings_file='nc_glove/vecs.6B.300d.bin', - path_embeddings_file='path_embeddings/tratz/fine_grained/wiki', - hidden_layers=1, - path_dim=60) - - def __init__(self, hparams, relata_embeddings, path_embeddings, nc_embeddings, - path_to_index): - """Initialize the LexNET classifier. - - Args: - hparams: the hyper-parameters. - relata_embeddings: word embeddings for the distributional component. - path_embeddings: embeddings for the paths. - nc_embeddings: noun compound embeddings. - path_to_index: a mapping from string path to an index in the path - embeddings matrix. - """ - self.hparams = hparams - - self.path_embeddings = path_embeddings - self.relata_embeddings = relata_embeddings - self.nc_embeddings = nc_embeddings - - self.vocab_size, self.relata_dim = 0, 0 - self.path_to_index = None - self.path_dim = 0 - - # Set the random seed - if hparams.random_seed > 0: - tf.set_random_seed(hparams.random_seed) - - # Get the vocabulary size and relata dim - if self.hparams.input in ['dist', 'dist-nc', 'integrated', 'integrated-nc']: - self.vocab_size, self.relata_dim = self.relata_embeddings.shape - - # Create the mapping from string path to an index in the embeddings matrix - if self.hparams.input in ['path', 'integrated', 'integrated-nc']: - self.path_to_index = tf.contrib.lookup.HashTable( - tf.contrib.lookup.KeyValueTensorInitializer( - tf.constant(path_to_index.keys()), - tf.constant(path_to_index.values()), - key_dtype=tf.string, value_dtype=tf.int32), 0) - - self.path_dim = self.path_embeddings.shape[1] - - # Create the network - self.__create_computation_graph__() - - def __create_computation_graph__(self): - """Initialize the model and define the graph.""" - network_input = 0 - - # Define the network inputs - # Distributional x and y - if self.hparams.input in ['dist', 'dist-nc', 'integrated', 'integrated-nc']: - network_input += 2 * self.relata_dim - self.relata_lookup = tf.get_variable( - 'relata_lookup', - initializer=self.relata_embeddings, - dtype=tf.float32, - trainable=self.hparams.learn_relata) - - # Path-based - if self.hparams.input in ['path', 'integrated', 'integrated-nc']: - network_input += self.path_dim - - self.path_initial_value_t = tf.placeholder(tf.float32, None) - - self.path_lookup = tf.get_variable( - name='path_lookup', - dtype=tf.float32, - trainable=False, - shape=self.path_embeddings.shape) - - self.initialize_path_op = tf.assign( - self.path_lookup, self.path_initial_value_t, validate_shape=False) - - # Distributional noun compound - if self.hparams.input in ['dist-nc', 'integrated-nc']: - network_input += self.relata_dim - - self.nc_initial_value_t = tf.placeholder(tf.float32, None) - - self.nc_lookup = tf.get_variable( - name='nc_lookup', - dtype=tf.float32, - trainable=False, - shape=self.nc_embeddings.shape) - - self.initialize_nc_op = tf.assign( - self.nc_lookup, self.nc_initial_value_t, validate_shape=False) - - hidden_dim = network_input // 2 - - # Define the MLP - if self.hparams.hidden_layers == 0: - self.weights1 = tf.get_variable( - 'W1', - shape=[network_input, self.hparams.num_classes], - dtype=tf.float32) - self.bias1 = tf.get_variable( - 'b1', - shape=[self.hparams.num_classes], - dtype=tf.float32) - - elif self.hparams.hidden_layers == 1: - - self.weights1 = tf.get_variable( - 'W1', - shape=[network_input, hidden_dim], - dtype=tf.float32) - self.bias1 = tf.get_variable( - 'b1', - shape=[hidden_dim], - dtype=tf.float32) - - self.weights2 = tf.get_variable( - 'W2', - shape=[hidden_dim, self.hparams.num_classes], - dtype=tf.float32) - self.bias2 = tf.get_variable( - 'b2', - shape=[self.hparams.num_classes], - dtype=tf.float32) - - else: - raise ValueError('Only 0 or 1 hidden layers are supported') - - # Define the variables - self.instances = tf.placeholder(dtype=tf.string, - shape=[self.hparams.batch_size]) - - (self.x_embedding_id, - self.y_embedding_id, - self.nc_embedding_id, - self.path_embedding_id, - self.path_counts, - self.labels) = parse_tensorflow_examples( - self.instances, self.hparams.batch_size, self.path_to_index) - - # Create the MLP - self.__mlp__() - - self.instances_to_load = tf.placeholder(dtype=tf.string, shape=[None]) - self.labels_to_load = lexnet_common.load_all_labels(self.instances_to_load) - self.pairs_to_load = lexnet_common.load_all_pairs(self.instances_to_load) - - def load_labels(self, session, instances): - """Loads the labels for these instances. - - Args: - session: The current TensorFlow session, - instances: The instances for which to load the labels. - - Returns: - the labels of these instances. - """ - return session.run(self.labels_to_load, - feed_dict={self.instances_to_load: instances}) - - def load_pairs(self, session, instances): - """Loads the word pairs for these instances. - - Args: - session: The current TensorFlow session, - instances: The instances for which to load the labels. - - Returns: - the word pairs of these instances. - """ - word_pairs = session.run(self.pairs_to_load, - feed_dict={self.instances_to_load: instances}) - return [pair[0].split('::') for pair in word_pairs] - - def __train_single_batch__(self, session, batch_instances): - """Train a single batch. - - Args: - session: The current TensorFlow session. - batch_instances: TensorFlow examples containing the training intances - - Returns: - The cost for the current batch. - """ - cost, _ = session.run([self.cost, self.train_op], - feed_dict={self.instances: batch_instances}) - - return cost - - def fit(self, session, inputs, on_epoch_completed, val_instances, val_labels, - save_path): - """Train the model. - - Args: - session: The current TensorFlow session. - inputs: - on_epoch_completed: A method to call after each epoch. - val_instances: The validation set instances (evaluation between epochs). - val_labels: The validation set labels (for evaluation between epochs). - save_path: Where to save the model. - """ - for epoch in range(self.hparams.num_epochs): - - losses = [] - epoch_indices = list(np.random.permutation(len(inputs))) - - # If the number of instances doesn't divide by batch_size, enlarge it - # by duplicating training examples - mod = len(epoch_indices) % self.hparams.batch_size - if mod > 0: - epoch_indices.extend([np.random.randint(0, high=len(inputs))] * mod) - - # Define the batches - n_batches = len(epoch_indices) // self.hparams.batch_size - - for minibatch in range(n_batches): - - batch_indices = epoch_indices[minibatch * self.hparams.batch_size:( - minibatch + 1) * self.hparams.batch_size] - batch_instances = [inputs[i] for i in batch_indices] - - loss = self.__train_single_batch__(session, batch_instances) - losses.append(loss) - - epoch_loss = np.nanmean(losses) - - if on_epoch_completed: - should_stop = on_epoch_completed(self, session, epoch, epoch_loss, - val_instances, val_labels, save_path) - if should_stop: - print('Stopping training after %d epochs.' % epoch) - return - - def predict(self, session, inputs): - """Predict the classification of the test set. - - Args: - session: The current TensorFlow session. - inputs: the train paths, x, y and/or nc vectors - - Returns: - The test predictions. - """ - predictions, _ = zip(*self.predict_with_score(session, inputs)) - return np.array(predictions) - - def predict_with_score(self, session, inputs): - """Predict the classification of the test set. - - Args: - session: The current TensorFlow session. - inputs: the test paths, x, y and/or nc vectors - - Returns: - The test predictions along with their scores. - """ - test_pred = [0] * len(inputs) - - for chunk in xrange(0, len(test_pred), self.hparams.batch_size): - - # Initialize the variables with the current batch data - batch_indices = list( - range(chunk, min(chunk + self.hparams.batch_size, len(test_pred)))) - - # If the batch is too small, add a few other examples - if len(batch_indices) < self.hparams.batch_size: - batch_indices += [0] * (self.hparams.batch_size-len(batch_indices)) - - batch_instances = [inputs[i] for i in batch_indices] - - predictions, scores = session.run( - [self.predictions, self.scores], - feed_dict={self.instances: batch_instances}) - - for index_in_batch, index_in_dataset in enumerate(batch_indices): - prediction = predictions[index_in_batch] - score = scores[index_in_batch][prediction] - test_pred[index_in_dataset] = (prediction, score) - - return test_pred - - def __mlp__(self): - """Performs the MLP operations. - - Returns: the prediction object to be computed in a Session - """ - # Define the operations - - # Network input - vec_inputs = [] - - # Distributional component - if self.hparams.input in ['dist', 'dist-nc', 'integrated', 'integrated-nc']: - for emb_id in [self.x_embedding_id, self.y_embedding_id]: - vec_inputs.append(tf.nn.embedding_lookup(self.relata_lookup, emb_id)) - - # Noun compound component - if self.hparams.input in ['dist-nc', 'integrated-nc']: - vec = tf.nn.embedding_lookup(self.nc_lookup, self.nc_embedding_id) - vec_inputs.append(vec) - - # Path-based component - if self.hparams.input in ['path', 'integrated', 'integrated-nc']: - - # Get the current paths for each batch instance - self.path_embeddings = tf.nn.embedding_lookup(self.path_lookup, - self.path_embedding_id) - - # self.path_embeddings is of shape - # [batch_size, max_path_per_instance, output_dim] - # We need to multiply it by path counts - # ([batch_size, max_path_per_instance]). - # Start by duplicating path_counts along the output_dim axis. - self.path_freq = tf.tile(tf.expand_dims(self.path_counts, -1), - [1, 1, self.path_dim]) - - # Compute the averaged path vector for each instance. - # First, multiply the path embeddings and frequencies element-wise. - self.weighted = tf.multiply(self.path_freq, self.path_embeddings) - - # Second, take the sum to get a tensor of shape [batch_size, output_dim]. - self.pair_path_embeddings = tf.reduce_sum(self.weighted, 1) - - # Finally, divide by the total number of paths. - # The number of paths for each pair has a shape [batch_size, 1], - # We duplicate it output_dim times along the second axis. - self.num_paths = tf.clip_by_value( - tf.reduce_sum(self.path_counts, 1), 1, np.inf) - self.num_paths = tf.tile(tf.expand_dims(self.num_paths, -1), - [1, self.path_dim]) - - # And finally, divide pair_path_embeddings by num_paths element-wise. - self.pair_path_embeddings = tf.div( - self.pair_path_embeddings, self.num_paths) - vec_inputs.append(self.pair_path_embeddings) - - # Concatenate the inputs and feed to the MLP - self.input_vec = tf.nn.dropout( - tf.concat(vec_inputs, 1), - keep_prob=self.hparams.input_keep_prob) - - h = tf.matmul(self.input_vec, self.weights1) - self.output = h - - if self.hparams.hidden_layers == 1: - self.output = tf.matmul(tf.nn.tanh(h), self.weights2) - - self.scores = self.output - self.predictions = tf.argmax(self.scores, axis=1) - - # Define the loss function and the optimization algorithm - self.cross_entropies = tf.nn.sparse_softmax_cross_entropy_with_logits( - logits=self.scores, labels=self.labels) - self.cost = tf.reduce_sum(self.cross_entropies, name='cost') - self.global_step = tf.Variable(0, name='global_step', trainable=False) - self.optimizer = tf.train.AdamOptimizer() - self.train_op = self.optimizer.minimize( - self.cost, global_step=self.global_step) - - -def parse_tensorflow_examples(record, batch_size, path_to_index): - """Reads TensorFlow examples from a RecordReader. - - Args: - record: a record with TensorFlow examples. - batch_size: the number of instances in a minibatch - path_to_index: mapping from string path to index in the embeddings matrix. - - Returns: - The word embeddings IDs, paths and counts - """ - features = tf.parse_example( - record, { - 'x_embedding_id': tf.FixedLenFeature([1], dtype=tf.int64), - 'y_embedding_id': tf.FixedLenFeature([1], dtype=tf.int64), - 'nc_embedding_id': tf.FixedLenFeature([1], dtype=tf.int64), - 'reprs': tf.FixedLenSequenceFeature( - shape=(), dtype=tf.string, allow_missing=True), - 'counts': tf.FixedLenSequenceFeature( - shape=(), dtype=tf.int64, allow_missing=True), - 'rel_id': tf.FixedLenFeature([1], dtype=tf.int64) - }) - - x_embedding_id = tf.squeeze(features['x_embedding_id'], [-1]) - y_embedding_id = tf.squeeze(features['y_embedding_id'], [-1]) - nc_embedding_id = tf.squeeze(features['nc_embedding_id'], [-1]) - labels = tf.squeeze(features['rel_id'], [-1]) - path_counts = tf.to_float(tf.reshape(features['counts'], [batch_size, -1])) - - path_embedding_id = None - if path_to_index: - path_embedding_id = path_to_index.lookup(features['reprs']) - - return ( - x_embedding_id, y_embedding_id, nc_embedding_id, - path_embedding_id, path_counts, labels) diff --git a/research/lexnet_nc/path_model.py b/research/lexnet_nc/path_model.py deleted file mode 100644 index c28384177..000000000 --- a/research/lexnet_nc/path_model.py +++ /dev/null @@ -1,547 +0,0 @@ -# Copyright 2017, 2018 Google, Inc. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -"""LexNET Path-based Model.""" -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import collections -import itertools -import os - -import lexnet_common -import numpy as np -import tensorflow as tf - - -class PathBasedModel(object): - """The LexNET path-based model for classifying semantic relations.""" - - @classmethod - def default_hparams(cls): - """Returns the default hyper-parameters.""" - return tf.contrib.training.HParams( - max_path_len=8, - num_classes=37, - num_epochs=30, - input_keep_prob=0.9, - learning_rate=0.001, - learn_lemmas=False, - random_seed=133, # zero means no random seed - lemma_embeddings_file='glove/glove.6B.50d.bin', - num_pos=len(lexnet_common.POSTAGS), - num_dep=len(lexnet_common.DEPLABELS), - num_directions=len(lexnet_common.DIRS), - lemma_dim=50, - pos_dim=4, - dep_dim=5, - dir_dim=1) - - def __init__(self, hparams, lemma_embeddings, instance): - """Initialize the LexNET classifier. - - Args: - hparams: the hyper-parameters. - lemma_embeddings: word embeddings for the path-based component. - instance: string tensor containing the input instance - """ - self.hparams = hparams - self.lemma_embeddings = lemma_embeddings - self.instance = instance - self.vocab_size, self.lemma_dim = self.lemma_embeddings.shape - - # Set the random seed - if hparams.random_seed > 0: - tf.set_random_seed(hparams.random_seed) - - # Create the network - self.__create_computation_graph__() - - def __create_computation_graph__(self): - """Initialize the model and define the graph.""" - self.lstm_input_dim = sum([self.hparams.lemma_dim, self.hparams.pos_dim, - self.hparams.dep_dim, self.hparams.dir_dim]) - self.lstm_output_dim = self.lstm_input_dim - - network_input = self.lstm_output_dim - self.lemma_lookup = tf.get_variable( - 'lemma_lookup', - initializer=self.lemma_embeddings, - dtype=tf.float32, - trainable=self.hparams.learn_lemmas) - self.pos_lookup = tf.get_variable( - 'pos_lookup', - shape=[self.hparams.num_pos, self.hparams.pos_dim], - dtype=tf.float32) - self.dep_lookup = tf.get_variable( - 'dep_lookup', - shape=[self.hparams.num_dep, self.hparams.dep_dim], - dtype=tf.float32) - self.dir_lookup = tf.get_variable( - 'dir_lookup', - shape=[self.hparams.num_directions, self.hparams.dir_dim], - dtype=tf.float32) - - self.weights1 = tf.get_variable( - 'W1', - shape=[network_input, self.hparams.num_classes], - dtype=tf.float32) - self.bias1 = tf.get_variable( - 'b1', - shape=[self.hparams.num_classes], - dtype=tf.float32) - - # Define the variables - (self.batch_paths, - self.path_counts, - self.seq_lengths, - self.path_strings, - self.batch_labels) = _parse_tensorflow_example( - self.instance, self.hparams.max_path_len, self.hparams.input_keep_prob) - - # Create the LSTM - self.__lstm__() - - # Create the MLP - self.__mlp__() - - self.instances_to_load = tf.placeholder(dtype=tf.string, shape=[None]) - self.labels_to_load = lexnet_common.load_all_labels(self.instances_to_load) - - def load_labels(self, session, batch_instances): - """Loads the labels of the current instances. - - Args: - session: the current TensorFlow session. - batch_instances: the dataset instances. - - Returns: - the labels. - """ - return session.run(self.labels_to_load, - feed_dict={self.instances_to_load: batch_instances}) - - def run_one_epoch(self, session, num_steps): - """Train the model. - - Args: - session: The current TensorFlow session. - num_steps: The number of steps in each epoch. - - Returns: - The mean loss for the epoch. - - Raises: - ArithmeticError: if the loss becomes non-finite. - """ - losses = [] - - for step in range(num_steps): - curr_loss, _ = session.run([self.cost, self.train_op]) - if not np.isfinite(curr_loss): - raise ArithmeticError('nan loss at step %d' % step) - - losses.append(curr_loss) - - return np.mean(losses) - - def predict(self, session, inputs): - """Predict the classification of the test set. - - Args: - session: The current TensorFlow session. - inputs: the train paths, x, y and/or nc vectors - - Returns: - The test predictions. - """ - predictions, _ = zip(*self.predict_with_score(session, inputs)) - return np.array(predictions) - - def predict_with_score(self, session, inputs): - """Predict the classification of the test set. - - Args: - session: The current TensorFlow session. - inputs: the test paths, x, y and/or nc vectors - - Returns: - The test predictions along with their scores. - """ - test_pred = [0] * len(inputs) - - for index, instance in enumerate(inputs): - - prediction, scores = session.run( - [self.predictions, self.scores], - feed_dict={self.instance: instance}) - - test_pred[index] = (prediction, scores[prediction]) - - return test_pred - - def __mlp__(self): - """Performs the MLP operations. - - Returns: the prediction object to be computed in a Session - """ - # Feed the paths to the MLP: path_embeddings is - # [num_batch_paths, output_dim], and when we multiply it by W - # ([output_dim, num_classes]), we get a matrix of class distributions: - # [num_batch_paths, num_classes]. - self.distributions = tf.matmul(self.path_embeddings, self.weights1) - - # Now, compute weighted average on the class distributions, using the path - # frequency as weights. - - # First, reshape path_freq to the same shape of distributions - self.path_freq = tf.tile(tf.expand_dims(self.path_counts, -1), - [1, self.hparams.num_classes]) - - # Second, multiply the distributions and frequencies element-wise. - self.weighted = tf.multiply(self.path_freq, self.distributions) - - # Finally, take the average to get a tensor of shape [1, num_classes]. - self.weighted_sum = tf.reduce_sum(self.weighted, 0) - self.num_paths = tf.clip_by_value(tf.reduce_sum(self.path_counts), - 1, np.inf) - self.num_paths = tf.tile(tf.expand_dims(self.num_paths, -1), - [self.hparams.num_classes]) - self.scores = tf.div(self.weighted_sum, self.num_paths) - self.predictions = tf.argmax(self.scores) - - # Define the loss function and the optimization algorithm - self.cross_entropies = tf.nn.sparse_softmax_cross_entropy_with_logits( - logits=self.scores, labels=tf.reduce_mean(self.batch_labels)) - self.cost = tf.reduce_sum(self.cross_entropies, name='cost') - self.global_step = tf.Variable(0, name='global_step', trainable=False) - self.optimizer = tf.train.AdamOptimizer() - self.train_op = self.optimizer.minimize(self.cost, - global_step=self.global_step) - - def __lstm__(self): - """Defines the LSTM operations. - - Returns: - A matrix of path embeddings. - """ - lookup_tables = [self.lemma_lookup, self.pos_lookup, - self.dep_lookup, self.dir_lookup] - - # Split the edges to components: list of 4 tensors - # [num_batch_paths, max_path_len, 1] - self.edge_components = tf.split(self.batch_paths, 4, axis=2) - - # Look up the components embeddings and concatenate them back together - self.path_matrix = tf.concat([ - tf.squeeze(tf.nn.embedding_lookup(lookup_table, component), 2) - for lookup_table, component in - zip(lookup_tables, self.edge_components) - ], axis=2) - - self.sequence_lengths = tf.reshape(self.seq_lengths, [-1]) - - # Define the LSTM. - # The input is [num_batch_paths, max_path_len, input_dim]. - lstm_cell = tf.contrib.rnn.BasicLSTMCell(self.lstm_output_dim) - - # The output is [num_batch_paths, max_path_len, output_dim]. - self.lstm_outputs, _ = tf.nn.dynamic_rnn( - lstm_cell, self.path_matrix, dtype=tf.float32, - sequence_length=self.sequence_lengths) - - # Slice the last *relevant* output for each instance -> - # [num_batch_paths, output_dim] - self.path_embeddings = _extract_last_relevant(self.lstm_outputs, - self.sequence_lengths) - - -def _parse_tensorflow_example(record, max_path_len, input_keep_prob): - """Reads TensorFlow examples from a RecordReader. - - Args: - record: a record with TensorFlow example. - max_path_len: the maximum path length. - input_keep_prob: 1 - the word dropout probability - - Returns: - The paths and counts - """ - features = tf.parse_single_example(record, { - 'lemmas': - tf.FixedLenSequenceFeature( - shape=(), dtype=tf.int64, allow_missing=True), - 'postags': - tf.FixedLenSequenceFeature( - shape=(), dtype=tf.int64, allow_missing=True), - 'deplabels': - tf.FixedLenSequenceFeature( - shape=(), dtype=tf.int64, allow_missing=True), - 'dirs': - tf.FixedLenSequenceFeature( - shape=(), dtype=tf.int64, allow_missing=True), - 'counts': - tf.FixedLenSequenceFeature( - shape=(), dtype=tf.int64, allow_missing=True), - 'pathlens': - tf.FixedLenSequenceFeature( - shape=(), dtype=tf.int64, allow_missing=True), - 'reprs': - tf.FixedLenSequenceFeature( - shape=(), dtype=tf.string, allow_missing=True), - 'rel_id': - tf.FixedLenFeature([], dtype=tf.int64) - }) - - path_counts = tf.to_float(features['counts']) - seq_lengths = features['pathlens'] - - # Concatenate the edge components to create a path tensor: - # [max_paths_per_ins, max_path_length, 4] - lemmas = _word_dropout( - tf.reshape(features['lemmas'], [-1, max_path_len]), input_keep_prob) - - paths = tf.stack( - [lemmas] + [ - tf.reshape(features[f], [-1, max_path_len]) - for f in ('postags', 'deplabels', 'dirs') - ], - axis=-1) - - path_strings = features['reprs'] - - # Add an empty path to pairs with no paths - paths = tf.cond( - tf.shape(paths)[0] > 0, - lambda: paths, - lambda: tf.zeros([1, max_path_len, 4], dtype=tf.int64)) - - # Paths are left-padded. We reverse them to make them right-padded. - #paths = tf.reverse(paths, axis=[1]) - - path_counts = tf.cond( - tf.shape(path_counts)[0] > 0, - lambda: path_counts, - lambda: tf.constant([1.0], dtype=tf.float32)) - - seq_lengths = tf.cond( - tf.shape(seq_lengths)[0] > 0, - lambda: seq_lengths, - lambda: tf.constant([1], dtype=tf.int64)) - - # Duplicate the label for each path - labels = tf.ones_like(path_counts, dtype=tf.int64) * features['rel_id'] - - return paths, path_counts, seq_lengths, path_strings, labels - - -def _extract_last_relevant(output, seq_lengths): - """Get the last relevant LSTM output cell for each batch instance. - - Args: - output: the LSTM outputs - a tensor with shape - [num_paths, output_dim, max_path_len] - seq_lengths: the sequences length per instance - - Returns: - The last relevant LSTM output cell for each batch instance. - """ - max_length = int(output.get_shape()[1]) - path_lengths = tf.clip_by_value(seq_lengths - 1, 0, max_length) - relevant = tf.reduce_sum(tf.multiply(output, tf.expand_dims( - tf.one_hot(path_lengths, max_length), -1)), 1) - return relevant - - -def _word_dropout(words, input_keep_prob): - """Drops words with probability 1 - input_keep_prob. - - Args: - words: a list of lemmas from the paths. - input_keep_prob: the probability to keep the word. - - Returns: - The revised list where some of the words are ed. - """ - # Create the mask: (-1) to drop, 1 to keep - prob = tf.random_uniform(tf.shape(words), 0, 1) - condition = tf.less(prob, (1 - input_keep_prob)) - mask = tf.where(condition, - tf.negative(tf.ones_like(words)), tf.ones_like(words)) - - # We need to keep zeros (), and change other numbers to 1 () - # if their mask is -1. First, we multiply the mask and the words. - # Zeros will stay zeros, and words to drop will become negative. - # Then, we change negative values to 1. - masked_words = tf.multiply(mask, words) - condition = tf.less(masked_words, 0) - dropped_words = tf.where(condition, tf.ones_like(words), words) - return dropped_words - - -def compute_path_embeddings(model, session, instances): - """Compute the path embeddings for all the distinct paths. - - Args: - model: The trained path-based model. - session: The current TensorFlow session. - instances: All the train, test and validation instances. - - Returns: - The path to ID index and the path embeddings. - """ - # Get an index for each distinct path - path_index = collections.defaultdict(itertools.count(0).next) - path_vectors = {} - - for instance in instances: - curr_path_embeddings, curr_path_strings = session.run( - [model.path_embeddings, model.path_strings], - feed_dict={model.instance: instance}) - - for i, path in enumerate(curr_path_strings): - if not path: - continue - - # Set a new/existing index for the path - index = path_index[path] - - # Save its vector - path_vectors[index] = curr_path_embeddings[i, :] - - print('Number of distinct paths: %d' % len(path_index)) - return path_index, path_vectors - - -def save_path_embeddings(model, path_vectors, path_index, embeddings_base_path): - """Saves the path embeddings. - - Args: - model: The trained path-based model. - path_vectors: The path embeddings. - path_index: A map from path to ID. - embeddings_base_path: The base directory where the embeddings are. - """ - index_range = range(max(path_index.values()) + 1) - path_matrix = [path_vectors[i] for i in index_range] - path_matrix = np.vstack(path_matrix) - - # Save the path embeddings - path_vector_filename = os.path.join( - embeddings_base_path, '%d_path_vectors' % model.lstm_output_dim) - with open(path_vector_filename, 'w') as f_out: - np.save(f_out, path_matrix) - - index_to_path = {i: p for p, i in path_index.iteritems()} - path_vocab = [index_to_path[i] for i in index_range] - - # Save the path vocabulary - path_vocab_filename = os.path.join( - embeddings_base_path, '%d_path_vocab' % model.lstm_output_dim) - with open(path_vocab_filename, 'w') as f_out: - f_out.write('\n'.join(path_vocab)) - f_out.write('\n') - - print('Saved path embeddings.') - - -def load_path_embeddings(path_embeddings_dir, path_dim): - """Loads pretrained path embeddings from a binary file and returns the matrix. - - Args: - path_embeddings_dir: The directory for the path embeddings. - path_dim: The dimension of the path embeddings, used as prefix to the - path_vocab and path_vectors files. - - Returns: - The path embeddings matrix and the path_to_index dictionary. - """ - prefix = path_embeddings_dir + '/%d' % path_dim + '_' - with open(prefix + 'path_vocab') as f_in: - vocab = f_in.read().splitlines() - - vocab_size = len(vocab) - embedding_file = prefix + 'path_vectors' - - print('Embedding file "%s" has %d paths' % (embedding_file, vocab_size)) - - with open(embedding_file) as f_in: - embeddings = np.load(f_in) - - path_to_index = {p: i for i, p in enumerate(vocab)} - return embeddings, path_to_index - - -def get_indicative_paths(model, session, path_index, path_vectors, classes, - save_dir, k=20, threshold=0.8): - """Gets the most indicative paths for each class. - - Args: - model: The trained path-based model. - session: The current TensorFlow session. - path_index: A map from path to ID. - path_vectors: The path embeddings. - classes: The class label names. - save_dir: Where to save the paths. - k: The k for top-k paths. - threshold: The threshold above which to consider paths as indicative. - """ - # Define graph variables for this operation - p_path_embedding = tf.placeholder(dtype=tf.float32, - shape=[1, model.lstm_output_dim]) - p_distributions = tf.nn.softmax(tf.matmul(p_path_embedding, model.weights1)) - - # Treat each path as a pair instance with a single path, and get the - # relation distribution for it. Then, take the top paths for each relation. - - # This dictionary contains a relation as a key, and the value is a list of - # tuples of path index and score. A relation r will contain (p, s) if the - # path p is classified to r with a confidence of s. - prediction_per_relation = collections.defaultdict(list) - - index_to_path = {i: p for p, i in path_index.iteritems()} - - # Predict all the paths - for index in range(len(path_index)): - curr_path_vector = path_vectors[index] - - distribution = session.run(p_distributions, - feed_dict={ - p_path_embedding: np.reshape( - curr_path_vector, - [1, model.lstm_output_dim])}) - - distribution = distribution[0, :] - prediction = np.argmax(distribution) - prediction_per_relation[prediction].append( - (index, distribution[prediction])) - - if index % 10000 == 0: - print('Classified %d/%d (%3.2f%%) of the paths' % ( - index, len(path_index), 100 * index / len(path_index))) - - # Retrieve k-best scoring paths for each relation - for relation_index, relation in enumerate(classes): - curr_paths = sorted(prediction_per_relation[relation_index], - key=lambda item: item[1], reverse=True) - above_t = [(p, s) for (p, s) in curr_paths if s >= threshold] - top_k = curr_paths[k+1] - relation_paths = above_t if len(above_t) > len(top_k) else top_k - - paths_filename = os.path.join(save_dir, '%s.paths' % relation) - with open(paths_filename, 'w') as f_out: - for index, score in relation_paths: - print('\t'.join([index_to_path[index], str(score)]), file=f_out) diff --git a/research/lexnet_nc/sorted_paths_to_examples.py b/research/lexnet_nc/sorted_paths_to_examples.py deleted file mode 100755 index c21d25d71..000000000 --- a/research/lexnet_nc/sorted_paths_to_examples.py +++ /dev/null @@ -1,202 +0,0 @@ -#!/usr/bin/env python -# Copyright 2017, 2018 Google, Inc. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""Takes as input a sorted, tab-separated of paths to produce tf.Examples.""" -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import collections -import itertools -import os -import sys -import tensorflow as tf - -import lexnet_common - -tf.flags.DEFINE_string('input', '', 'tab-separated input data') -tf.flags.DEFINE_string('vocab', '', 'a text file containing lemma vocabulary') -tf.flags.DEFINE_string('relations', '', 'a text file containing the relations') -tf.flags.DEFINE_string('output_dir', '', 'output directory') -tf.flags.DEFINE_string('splits', '', 'text file enumerating splits') -tf.flags.DEFINE_string('default_split', '', 'default split for unlabeled pairs') -tf.flags.DEFINE_string('compression', 'GZIP', 'compression for output records') -tf.flags.DEFINE_integer('max_paths', 100, 'maximum number of paths per record') -tf.flags.DEFINE_integer('max_pathlen', 8, 'maximum path length') -FLAGS = tf.flags.FLAGS - - -def _int64_features(value): - return tf.train.Feature(int64_list=tf.train.Int64List(value=value)) - - -def _bytes_features(value): - value = [v.encode('utf-8') if isinstance(v, unicode) else v for v in value] - return tf.train.Feature(bytes_list=tf.train.BytesList(value=value)) - - -class CreateExampleFn(object): - - def __init__(self): - # Read the vocabulary. N.B. that 0 = PAD, 1 = UNK, 2 = , 3 = , hence - # the enumeration starting at 4. - with tf.gfile.GFile(FLAGS.vocab) as fh: - self.vocab = {w: ix for ix, w in enumerate(fh.read().splitlines(), start=4)} - - self.vocab.update({'': 0, '': 1, '': 2, '': 3}) - - # Read the relations. - with tf.gfile.GFile(FLAGS.relations) as fh: - self.relations = {r: ix for ix, r in enumerate(fh.read().splitlines())} - - # Some hackery to map from SpaCy postags to Google's. - lexnet_common.POSTAG_TO_ID['PROPN'] = lexnet_common.POSTAG_TO_ID['NOUN'] - lexnet_common.POSTAG_TO_ID['PRON'] = lexnet_common.POSTAG_TO_ID['NOUN'] - lexnet_common.POSTAG_TO_ID['CCONJ'] = lexnet_common.POSTAG_TO_ID['CONJ'] - #lexnet_common.DEPLABEL_TO_ID['relcl'] = lexnet_common.DEPLABEL_TO_ID['rel'] - #lexnet_common.DEPLABEL_TO_ID['compound'] = lexnet_common.DEPLABEL_TO_ID['xcomp'] - #lexnet_common.DEPLABEL_TO_ID['oprd'] = lexnet_common.DEPLABEL_TO_ID['UNK'] - - def __call__(self, mod, head, rel, raw_paths): - # Drop any really long paths. - paths = [] - counts = [] - for raw, count in raw_paths.most_common(FLAGS.max_paths): - path = raw.split('::') - if len(path) <= FLAGS.max_pathlen: - paths.append(path) - counts.append(count) - - if not paths: - return None - - # Compute the true length. - pathlens = [len(path) for path in paths] - - # Pad each path out to max_pathlen so the LSTM can eat it. - paths = ( - itertools.islice( - itertools.chain(path, itertools.repeat('/PAD/PAD/_')), - FLAGS.max_pathlen) - for path in paths) - - # Split the lemma, POS, dependency label, and direction each into a - # separate feature. - lemmas, postags, deplabels, dirs = zip( - *(part.split('/') for part in itertools.chain(*paths))) - - lemmas = [self.vocab.get(lemma, 1) for lemma in lemmas] - postags = [lexnet_common.POSTAG_TO_ID[pos] for pos in postags] - deplabels = [lexnet_common.DEPLABEL_TO_ID.get(dep, 1) for dep in deplabels] - dirs = [lexnet_common.DIR_TO_ID.get(d, 0) for d in dirs] - - return tf.train.Example(features=tf.train.Features(feature={ - 'pair': _bytes_features(['::'.join((mod, head))]), - 'rel': _bytes_features([rel]), - 'rel_id': _int64_features([self.relations[rel]]), - 'reprs': _bytes_features(raw_paths), - 'pathlens': _int64_features(pathlens), - 'counts': _int64_features(counts), - 'lemmas': _int64_features(lemmas), - 'dirs': _int64_features(dirs), - 'deplabels': _int64_features(deplabels), - 'postags': _int64_features(postags), - 'x_embedding_id': _int64_features([self.vocab[mod]]), - 'y_embedding_id': _int64_features([self.vocab[head]]), - })) - - -def main(_): - # Read the splits file, if there is one. - assignments = {} - if FLAGS.splits: - with tf.gfile.GFile(FLAGS.splits) as fh: - parts = (line.split('\t') for line in fh.read().splitlines()) - assignments = {(mod, head): split for mod, head, split in parts} - - splits = set(assignments.itervalues()) - if FLAGS.default_split: - default_split = FLAGS.default_split - splits.add(FLAGS.default_split) - elif splits: - default_split = iter(splits).next() - else: - print('Please specify --splits, --default_split, or both', file=sys.stderr) - return 1 - - last_mod, last_head, last_label = None, None, None - raw_paths = collections.Counter() - - # Keep track of pairs we've seen to ensure that we don't get unsorted data. - seen_labeled_pairs = set() - - # Set up output compression - compression_type = getattr( - tf.python_io.TFRecordCompressionType, FLAGS.compression) - options = tf.python_io.TFRecordOptions(compression_type=compression_type) - - writers = { - split: tf.python_io.TFRecordWriter( - os.path.join(FLAGS.output_dir, '%s.tfrecs.gz' % split), - options=options) - for split in splits} - - create_example = CreateExampleFn() - - in_fh = sys.stdin if not FLAGS.input else tf.gfile.GFile(FLAGS.input) - for lineno, line in enumerate(in_fh, start=1): - if lineno % 100 == 0: - print('\rProcessed %d lines...' % lineno, end='', file=sys.stderr) - - parts = line.decode('utf-8').strip().split('\t') - if len(parts) != 5: - print('Skipping line %d: %d columns (expected 5)' % ( - lineno, len(parts)), file=sys.stderr) - - continue - - mod, head, label, raw_path, source = parts - if mod == last_mod and head == last_head and label == last_label: - raw_paths.update([raw_path]) - continue - - if last_mod and last_head and last_label and raw_paths: - if (last_mod, last_head, last_label) in seen_labeled_pairs: - print('It looks like the input data is not sorted; ignoring extra ' - 'record for (%s::%s, %s) at line %d' % ( - last_mod, last_head, last_label, lineno)) - else: - ex = create_example(last_mod, last_head, last_label, raw_paths) - if ex: - split = assignments.get((last_mod, last_head), default_split) - writers[split].write(ex.SerializeToString()) - - seen_labeled_pairs.add((last_mod, last_head, last_label)) - - last_mod, last_head, last_label = mod, head, label - raw_paths = collections.Counter() - - if last_mod and last_head and last_label and raw_paths: - ex = create_example(last_mod, last_head, last_label, raw_paths) - if ex: - split = assignments.get((last_mod, last_head), default_split) - writers[split].write(ex.SerializeToString()) - - for writer in writers.itervalues(): - writer.close() - - -if __name__ == '__main__': - tf.app.run() diff --git a/research/lexnet_nc/text_embeddings_to_binary.py b/research/lexnet_nc/text_embeddings_to_binary.py deleted file mode 100755 index 8226a7654..000000000 --- a/research/lexnet_nc/text_embeddings_to_binary.py +++ /dev/null @@ -1,48 +0,0 @@ -#!/usr/bin/env python -# Copyright 2017, 2018 Google, Inc. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""Converts a text embedding file into a binary format for quicker loading.""" - -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import numpy as np -import tensorflow as tf - -tf.flags.DEFINE_string('input', '', 'text file containing embeddings') -tf.flags.DEFINE_string('output_vocab', '', 'output file for vocabulary') -tf.flags.DEFINE_string('output_npy', '', 'output file for binary') -FLAGS = tf.flags.FLAGS - -def main(_): - vecs = [] - vocab = [] - with tf.gfile.GFile(FLAGS.input) as fh: - for line in fh: - parts = line.strip().split() - vocab.append(parts[0]) - vecs.append([float(x) for x in parts[1:]]) - - with tf.gfile.GFile(FLAGS.output_vocab, 'w') as fh: - fh.write('\n'.join(vocab)) - fh.write('\n') - - vecs = np.array(vecs, dtype=np.float32) - np.save(FLAGS.output_npy, vecs, allow_pickle=False) - - -if __name__ == '__main__': - tf.app.run() diff --git a/research/lm_1b/BUILD b/research/lm_1b/BUILD deleted file mode 100644 index ca5bc1f6c..000000000 --- a/research/lm_1b/BUILD +++ /dev/null @@ -1,27 +0,0 @@ -package(default_visibility = [":internal"]) - -licenses(["notice"]) # Apache 2.0 - -exports_files(["LICENSE"]) - -package_group( - name = "internal", - packages = [ - "//lm_1b/...", - ], -) - -py_library( - name = "data_utils", - srcs = ["data_utils.py"], -) - -py_binary( - name = "lm_1b_eval", - srcs = [ - "lm_1b_eval.py", - ], - deps = [ - ":data_utils", - ], -) diff --git a/research/lm_1b/README.md b/research/lm_1b/README.md deleted file mode 100644 index f48afbfe2..000000000 --- a/research/lm_1b/README.md +++ /dev/null @@ -1,198 +0,0 @@ -![No Maintenance Intended](https://img.shields.io/badge/No%20Maintenance%20Intended-%E2%9C%95-red.svg) -![TensorFlow Requirement: 1.x](https://img.shields.io/badge/TensorFlow%20Requirement-1.x-brightgreen) -![TensorFlow 2 Not Supported](https://img.shields.io/badge/TensorFlow%202%20Not%20Supported-%E2%9C%95-red.svg) - -Language Model on One Billion Word Benchmark - -Authors: - -Oriol Vinyals (vinyals@google.com, github: OriolVinyals), -Xin Pan - -Paper Authors: - -Rafal Jozefowicz, Oriol Vinyals, Mike Schuster, Noam Shazeer, Yonghui Wu - -TL;DR - -This is a pretrained model on One Billion Word Benchmark. -If you use this model in your publication, please cite the original paper: - -@article{jozefowicz2016exploring, - title={Exploring the Limits of Language Modeling}, - author={Jozefowicz, Rafal and Vinyals, Oriol and Schuster, Mike - and Shazeer, Noam and Wu, Yonghui}, - journal={arXiv preprint arXiv:1602.02410}, - year={2016} -} - -Introduction - -In this release, we open source a model trained on the One Billion Word -Benchmark (http://arxiv.org/abs/1312.3005), a large language corpus in English -which was released in 2013. This dataset contains about one billion words, and -has a vocabulary size of about 800K words. It contains mostly news data. Since -sentences in the training set are shuffled, models can ignore the context and -focus on sentence level language modeling. - -In the original release and subsequent work, people have used the same test set -to train models on this dataset as a standard benchmark for language modeling. -Recently, we wrote an article (http://arxiv.org/abs/1602.02410) describing a -model hybrid between character CNN, a large and deep LSTM, and a specific -Softmax architecture which allowed us to train the best model on this dataset -thus far, almost halving the best perplexity previously obtained by others. - -Code Release - -The open-sourced components include: - -* TensorFlow GraphDef proto buffer text file. -* TensorFlow pre-trained checkpoint shards. -* Code used to evaluate the pre-trained model. -* Vocabulary file. -* Test set from LM-1B evaluation. - -The code supports 4 evaluation modes: - -* Given provided dataset, calculate the model's perplexity. -* Given a prefix sentence, predict the next words. -* Dump the softmax embedding, character-level CNN word embeddings. -* Give a sentence, dump the embedding from the LSTM state. - -Results - -Model | Test Perplexity | Number of Params [billions] -------|-----------------|---------------------------- -Sigmoid-RNN-2048 [Blackout] | 68.3 | 4.1 -Interpolated KN 5-gram, 1.1B n-grams [chelba2013one] | 67.6 | 1.76 -Sparse Non-Negative Matrix LM [shazeer2015sparse] | 52.9 | 33 -RNN-1024 + MaxEnt 9-gram features [chelba2013one] | 51.3 | 20 -LSTM-512-512 | 54.1 | 0.82 -LSTM-1024-512 | 48.2 | 0.82 -LSTM-2048-512 | 43.7 | 0.83 -LSTM-8192-2048 (No Dropout) | 37.9 | 3.3 -LSTM-8192-2048 (50\% Dropout) | 32.2 | 3.3 -2-Layer LSTM-8192-1024 (BIG LSTM) | 30.6 | 1.8 -(THIS RELEASE) BIG LSTM+CNN Inputs | 30.0 | 1.04 - -How To Run - -Prerequisites: - -* Install TensorFlow. -* Install Bazel. -* Download the data files: - * Model GraphDef file: - [link](http://download.tensorflow.org/models/LM_LSTM_CNN/graph-2016-09-10.pbtxt) - * Model Checkpoint sharded file: - [1](http://download.tensorflow.org/models/LM_LSTM_CNN/all_shards-2016-09-10/ckpt-base) - [2](http://download.tensorflow.org/models/LM_LSTM_CNN/all_shards-2016-09-10/ckpt-char-embedding) - [3](http://download.tensorflow.org/models/LM_LSTM_CNN/all_shards-2016-09-10/ckpt-lstm) - [4](http://download.tensorflow.org/models/LM_LSTM_CNN/all_shards-2016-09-10/ckpt-softmax0) - [5](http://download.tensorflow.org/models/LM_LSTM_CNN/all_shards-2016-09-10/ckpt-softmax1) - [6](http://download.tensorflow.org/models/LM_LSTM_CNN/all_shards-2016-09-10/ckpt-softmax2) - [7](http://download.tensorflow.org/models/LM_LSTM_CNN/all_shards-2016-09-10/ckpt-softmax3) - [8](http://download.tensorflow.org/models/LM_LSTM_CNN/all_shards-2016-09-10/ckpt-softmax4) - [9](http://download.tensorflow.org/models/LM_LSTM_CNN/all_shards-2016-09-10/ckpt-softmax5) - [10](http://download.tensorflow.org/models/LM_LSTM_CNN/all_shards-2016-09-10/ckpt-softmax6) - [11](http://download.tensorflow.org/models/LM_LSTM_CNN/all_shards-2016-09-10/ckpt-softmax7) - [12](http://download.tensorflow.org/models/LM_LSTM_CNN/all_shards-2016-09-10/ckpt-softmax8) - * Vocabulary file: - [link](http://download.tensorflow.org/models/LM_LSTM_CNN/vocab-2016-09-10.txt) - * test dataset: link - [link](http://download.tensorflow.org/models/LM_LSTM_CNN/test/news.en.heldout-00000-of-00050) -* It is recommended to run on a modern desktop instead of a laptop. - -```shell -# 1. Clone the code to your workspace. -# 2. Download the data to your workspace. -# 3. Create an empty WORKSPACE file in your workspace. -# 4. Create an empty output directory in your workspace. -# Example directory structure below: -$ ls -R -.: -data lm_1b output WORKSPACE - -./data: -ckpt-base ckpt-lstm ckpt-softmax1 ckpt-softmax3 ckpt-softmax5 -ckpt-softmax7 graph-2016-09-10.pbtxt vocab-2016-09-10.txt -ckpt-char-embedding ckpt-softmax0 ckpt-softmax2 ckpt-softmax4 ckpt-softmax6 -ckpt-softmax8 news.en.heldout-00000-of-00050 - -./lm_1b: -BUILD data_utils.py lm_1b_eval.py README.md - -./output: - -# Build the codes. -$ bazel build -c opt lm_1b/... -# Run sample mode: -$ bazel-bin/lm_1b/lm_1b_eval --mode sample \ - --prefix "I love that I" \ - --pbtxt data/graph-2016-09-10.pbtxt \ - --vocab_file data/vocab-2016-09-10.txt \ - --ckpt 'data/ckpt-*' -...(omitted some TensorFlow output) -I love -I love that -I love that I -I love that I find -I love that I find that -I love that I find that amazing -...(omitted) - -# Run eval mode: -$ bazel-bin/lm_1b/lm_1b_eval --mode eval \ - --pbtxt data/graph-2016-09-10.pbtxt \ - --vocab_file data/vocab-2016-09-10.txt \ - --input_data data/news.en.heldout-00000-of-00050 \ - --ckpt 'data/ckpt-*' -...(omitted some TensorFlow output) -Loaded step 14108582. -# perplexity is high initially because words without context are harder to -# predict. -Eval Step: 0, Average Perplexity: 2045.512297. -Eval Step: 1, Average Perplexity: 229.478699. -Eval Step: 2, Average Perplexity: 208.116787. -Eval Step: 3, Average Perplexity: 338.870601. -Eval Step: 4, Average Perplexity: 228.950107. -Eval Step: 5, Average Perplexity: 197.685857. -Eval Step: 6, Average Perplexity: 156.287063. -Eval Step: 7, Average Perplexity: 124.866189. -Eval Step: 8, Average Perplexity: 147.204975. -Eval Step: 9, Average Perplexity: 90.124864. -Eval Step: 10, Average Perplexity: 59.897914. -Eval Step: 11, Average Perplexity: 42.591137. -...(omitted) -Eval Step: 4529, Average Perplexity: 29.243668. -Eval Step: 4530, Average Perplexity: 29.302362. -Eval Step: 4531, Average Perplexity: 29.285674. -...(omitted. At convergence, it should be around 30.) - -# Run dump_emb mode: -$ bazel-bin/lm_1b/lm_1b_eval --mode dump_emb \ - --pbtxt data/graph-2016-09-10.pbtxt \ - --vocab_file data/vocab-2016-09-10.txt \ - --ckpt 'data/ckpt-*' \ - --save_dir output -...(omitted some TensorFlow output) -Finished softmax weights -Finished word embedding 0/793471 -Finished word embedding 1/793471 -Finished word embedding 2/793471 -...(omitted) -$ ls output/ -embeddings_softmax.npy ... - -# Run dump_lstm_emb mode: -$ bazel-bin/lm_1b/lm_1b_eval --mode dump_lstm_emb \ - --pbtxt data/graph-2016-09-10.pbtxt \ - --vocab_file data/vocab-2016-09-10.txt \ - --ckpt 'data/ckpt-*' \ - --sentence "I love who I am ." \ - --save_dir output -$ ls output/ -lstm_emb_step_0.npy lstm_emb_step_2.npy lstm_emb_step_4.npy -lstm_emb_step_6.npy lstm_emb_step_1.npy lstm_emb_step_3.npy -lstm_emb_step_5.npy -``` diff --git a/research/lm_1b/data_utils.py b/research/lm_1b/data_utils.py deleted file mode 100644 index ad8d3391e..000000000 --- a/research/lm_1b/data_utils.py +++ /dev/null @@ -1,279 +0,0 @@ -# Copyright 2016 The TensorFlow Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -"""A library for loading 1B word benchmark dataset.""" - -import random - -import numpy as np -import tensorflow as tf - - -class Vocabulary(object): - """Class that holds a vocabulary for the dataset.""" - - def __init__(self, filename): - """Initialize vocabulary. - - Args: - filename: Vocabulary file name. - """ - - self._id_to_word = [] - self._word_to_id = {} - self._unk = -1 - self._bos = -1 - self._eos = -1 - - with tf.gfile.Open(filename) as f: - idx = 0 - for line in f: - word_name = line.strip() - if word_name == '': - self._bos = idx - elif word_name == '': - self._eos = idx - elif word_name == '': - self._unk = idx - if word_name == '!!!MAXTERMID': - continue - - self._id_to_word.append(word_name) - self._word_to_id[word_name] = idx - idx += 1 - - @property - def bos(self): - return self._bos - - @property - def eos(self): - return self._eos - - @property - def unk(self): - return self._unk - - @property - def size(self): - return len(self._id_to_word) - - def word_to_id(self, word): - if word in self._word_to_id: - return self._word_to_id[word] - return self.unk - - def id_to_word(self, cur_id): - if cur_id < self.size: - return self._id_to_word[cur_id] - return 'ERROR' - - def decode(self, cur_ids): - """Convert a list of ids to a sentence, with space inserted.""" - return ' '.join([self.id_to_word(cur_id) for cur_id in cur_ids]) - - def encode(self, sentence): - """Convert a sentence to a list of ids, with special tokens added.""" - word_ids = [self.word_to_id(cur_word) for cur_word in sentence.split()] - return np.array([self.bos] + word_ids + [self.eos], dtype=np.int32) - - -class CharsVocabulary(Vocabulary): - """Vocabulary containing character-level information.""" - - def __init__(self, filename, max_word_length): - super(CharsVocabulary, self).__init__(filename) - self._max_word_length = max_word_length - chars_set = set() - - for word in self._id_to_word: - chars_set |= set(word) - - free_ids = [] - for i in range(256): - if chr(i) in chars_set: - continue - free_ids.append(chr(i)) - - if len(free_ids) < 5: - raise ValueError('Not enough free char ids: %d' % len(free_ids)) - - self.bos_char = free_ids[0] # - self.eos_char = free_ids[1] # - self.bow_char = free_ids[2] # - self.eow_char = free_ids[3] # - self.pad_char = free_ids[4] # - - chars_set |= {self.bos_char, self.eos_char, self.bow_char, self.eow_char, - self.pad_char} - - self._char_set = chars_set - num_words = len(self._id_to_word) - - self._word_char_ids = np.zeros([num_words, max_word_length], dtype=np.int32) - - self.bos_chars = self._convert_word_to_char_ids(self.bos_char) - self.eos_chars = self._convert_word_to_char_ids(self.eos_char) - - for i, word in enumerate(self._id_to_word): - self._word_char_ids[i] = self._convert_word_to_char_ids(word) - - @property - def word_char_ids(self): - return self._word_char_ids - - @property - def max_word_length(self): - return self._max_word_length - - def _convert_word_to_char_ids(self, word): - code = np.zeros([self.max_word_length], dtype=np.int32) - code[:] = ord(self.pad_char) - - if len(word) > self.max_word_length - 2: - word = word[:self.max_word_length-2] - cur_word = self.bow_char + word + self.eow_char - for j in range(len(cur_word)): - code[j] = ord(cur_word[j]) - return code - - def word_to_char_ids(self, word): - if word in self._word_to_id: - return self._word_char_ids[self._word_to_id[word]] - else: - return self._convert_word_to_char_ids(word) - - def encode_chars(self, sentence): - chars_ids = [self.word_to_char_ids(cur_word) - for cur_word in sentence.split()] - return np.vstack([self.bos_chars] + chars_ids + [self.eos_chars]) - - -def get_batch(generator, batch_size, num_steps, max_word_length, pad=False): - """Read batches of input.""" - cur_stream = [None] * batch_size - - inputs = np.zeros([batch_size, num_steps], np.int32) - char_inputs = np.zeros([batch_size, num_steps, max_word_length], np.int32) - global_word_ids = np.zeros([batch_size, num_steps], np.int32) - targets = np.zeros([batch_size, num_steps], np.int32) - weights = np.ones([batch_size, num_steps], np.float32) - - no_more_data = False - while True: - inputs[:] = 0 - char_inputs[:] = 0 - global_word_ids[:] = 0 - targets[:] = 0 - weights[:] = 0.0 - - for i in range(batch_size): - cur_pos = 0 - - while cur_pos < num_steps: - if cur_stream[i] is None or len(cur_stream[i][0]) <= 1: - try: - cur_stream[i] = list(generator.next()) - except StopIteration: - # No more data, exhaust current streams and quit - no_more_data = True - break - - how_many = min(len(cur_stream[i][0]) - 1, num_steps - cur_pos) - next_pos = cur_pos + how_many - - inputs[i, cur_pos:next_pos] = cur_stream[i][0][:how_many] - char_inputs[i, cur_pos:next_pos] = cur_stream[i][1][:how_many] - global_word_ids[i, cur_pos:next_pos] = cur_stream[i][2][:how_many] - targets[i, cur_pos:next_pos] = cur_stream[i][0][1:how_many+1] - weights[i, cur_pos:next_pos] = 1.0 - - cur_pos = next_pos - cur_stream[i][0] = cur_stream[i][0][how_many:] - cur_stream[i][1] = cur_stream[i][1][how_many:] - cur_stream[i][2] = cur_stream[i][2][how_many:] - - if pad: - break - - if no_more_data and np.sum(weights) == 0: - # There is no more data and this is an empty batch. Done! - break - yield inputs, char_inputs, global_word_ids, targets, weights - - -class LM1BDataset(object): - """Utility class for 1B word benchmark dataset. - - The current implementation reads the data from the tokenized text files. - """ - - def __init__(self, filepattern, vocab): - """Initialize LM1BDataset reader. - - Args: - filepattern: Dataset file pattern. - vocab: Vocabulary. - """ - self._vocab = vocab - self._all_shards = tf.gfile.Glob(filepattern) - tf.logging.info('Found %d shards at %s', len(self._all_shards), filepattern) - - def _load_random_shard(self): - """Randomly select a file and read it.""" - return self._load_shard(random.choice(self._all_shards)) - - def _load_shard(self, shard_name): - """Read one file and convert to ids. - - Args: - shard_name: file path. - - Returns: - list of (id, char_id, global_word_id) tuples. - """ - tf.logging.info('Loading data from: %s', shard_name) - with tf.gfile.Open(shard_name) as f: - sentences = f.readlines() - chars_ids = [self.vocab.encode_chars(sentence) for sentence in sentences] - ids = [self.vocab.encode(sentence) for sentence in sentences] - - global_word_ids = [] - current_idx = 0 - for word_ids in ids: - current_size = len(word_ids) - 1 # without symbol - cur_ids = np.arange(current_idx, current_idx + current_size) - global_word_ids.append(cur_ids) - current_idx += current_size - - tf.logging.info('Loaded %d words.', current_idx) - tf.logging.info('Finished loading') - return zip(ids, chars_ids, global_word_ids) - - def _get_sentence(self, forever=True): - while True: - ids = self._load_random_shard() - for current_ids in ids: - yield current_ids - if not forever: - break - - def get_batch(self, batch_size, num_steps, pad=False, forever=True): - return get_batch(self._get_sentence(forever), batch_size, num_steps, - self.vocab.max_word_length, pad=pad) - - @property - def vocab(self): - return self._vocab diff --git a/research/lm_1b/lm_1b_eval.py b/research/lm_1b/lm_1b_eval.py deleted file mode 100644 index ce8634757..000000000 --- a/research/lm_1b/lm_1b_eval.py +++ /dev/null @@ -1,308 +0,0 @@ -# Copyright 2016 The TensorFlow Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -"""Eval pre-trained 1 billion word language model. -""" -import os -import sys - -import numpy as np -from six.moves import xrange -import tensorflow as tf - -from google.protobuf import text_format -import data_utils - -FLAGS = tf.flags.FLAGS -# General flags. -tf.flags.DEFINE_string('mode', 'eval', - 'One of [sample, eval, dump_emb, dump_lstm_emb]. ' - '"sample" mode samples future word predictions, using ' - 'FLAGS.prefix as prefix (prefix could be left empty). ' - '"eval" mode calculates perplexity of the ' - 'FLAGS.input_data. ' - '"dump_emb" mode dumps word and softmax embeddings to ' - 'FLAGS.save_dir. embeddings are dumped in the same ' - 'order as words in vocabulary. All words in vocabulary ' - 'are dumped.' - 'dump_lstm_emb dumps lstm embeddings of FLAGS.sentence ' - 'to FLAGS.save_dir.') -tf.flags.DEFINE_string('pbtxt', '', - 'GraphDef proto text file used to construct model ' - 'structure.') -tf.flags.DEFINE_string('ckpt', '', - 'Checkpoint directory used to fill model values.') -tf.flags.DEFINE_string('vocab_file', '', 'Vocabulary file.') -tf.flags.DEFINE_string('save_dir', '', - 'Used for "dump_emb" mode to save word embeddings.') -# sample mode flags. -tf.flags.DEFINE_string('prefix', '', - 'Used for "sample" mode to predict next words.') -tf.flags.DEFINE_integer('max_sample_words', 100, - 'Sampling stops either when is met or this number ' - 'of steps has passed.') -tf.flags.DEFINE_integer('num_samples', 3, - 'Number of samples to generate for the prefix.') -# dump_lstm_emb mode flags. -tf.flags.DEFINE_string('sentence', '', - 'Used as input for "dump_lstm_emb" mode.') -# eval mode flags. -tf.flags.DEFINE_string('input_data', '', - 'Input data files for eval model.') -tf.flags.DEFINE_integer('max_eval_steps', 1000000, - 'Maximum mumber of steps to run "eval" mode.') - - -# For saving demo resources, use batch size 1 and step 1. -BATCH_SIZE = 1 -NUM_TIMESTEPS = 1 -MAX_WORD_LEN = 50 - - -def _LoadModel(gd_file, ckpt_file): - """Load the model from GraphDef and Checkpoint. - - Args: - gd_file: GraphDef proto text file. - ckpt_file: TensorFlow Checkpoint file. - - Returns: - TensorFlow session and tensors dict. - """ - with tf.Graph().as_default(): - sys.stderr.write('Recovering graph.\n') - with tf.gfile.FastGFile(gd_file, 'r') as f: - s = f.read().decode() - gd = tf.GraphDef() - text_format.Merge(s, gd) - - tf.logging.info('Recovering Graph %s', gd_file) - t = {} - [t['states_init'], t['lstm/lstm_0/control_dependency'], - t['lstm/lstm_1/control_dependency'], t['softmax_out'], t['class_ids_out'], - t['class_weights_out'], t['log_perplexity_out'], t['inputs_in'], - t['targets_in'], t['target_weights_in'], t['char_inputs_in'], - t['all_embs'], t['softmax_weights'], t['global_step'] - ] = tf.import_graph_def(gd, {}, ['states_init', - 'lstm/lstm_0/control_dependency:0', - 'lstm/lstm_1/control_dependency:0', - 'softmax_out:0', - 'class_ids_out:0', - 'class_weights_out:0', - 'log_perplexity_out:0', - 'inputs_in:0', - 'targets_in:0', - 'target_weights_in:0', - 'char_inputs_in:0', - 'all_embs_out:0', - 'Reshape_3:0', - 'global_step:0'], name='') - - sys.stderr.write('Recovering checkpoint %s\n' % ckpt_file) - sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True)) - sess.run('save/restore_all', {'save/Const:0': ckpt_file}) - sess.run(t['states_init']) - - return sess, t - - -def _EvalModel(dataset): - """Evaluate model perplexity using provided dataset. - - Args: - dataset: LM1BDataset object. - """ - sess, t = _LoadModel(FLAGS.pbtxt, FLAGS.ckpt) - - current_step = t['global_step'].eval(session=sess) - sys.stderr.write('Loaded step %d.\n' % current_step) - - data_gen = dataset.get_batch(BATCH_SIZE, NUM_TIMESTEPS, forever=False) - sum_num = 0.0 - sum_den = 0.0 - perplexity = 0.0 - for i, (inputs, char_inputs, _, targets, weights) in enumerate(data_gen): - input_dict = {t['inputs_in']: inputs, - t['targets_in']: targets, - t['target_weights_in']: weights} - if 'char_inputs_in' in t: - input_dict[t['char_inputs_in']] = char_inputs - log_perp = sess.run(t['log_perplexity_out'], feed_dict=input_dict) - - if np.isnan(log_perp): - sys.stderr.error('log_perplexity is Nan.\n') - else: - sum_num += log_perp * weights.mean() - sum_den += weights.mean() - if sum_den > 0: - perplexity = np.exp(sum_num / sum_den) - - sys.stderr.write('Eval Step: %d, Average Perplexity: %f.\n' % - (i, perplexity)) - - if i > FLAGS.max_eval_steps: - break - - -def _SampleSoftmax(softmax): - return min(np.sum(np.cumsum(softmax) < np.random.rand()), len(softmax) - 1) - - -def _SampleModel(prefix_words, vocab): - """Predict next words using the given prefix words. - - Args: - prefix_words: Prefix words. - vocab: Vocabulary. Contains max word chard id length and converts between - words and ids. - """ - targets = np.zeros([BATCH_SIZE, NUM_TIMESTEPS], np.int32) - weights = np.ones([BATCH_SIZE, NUM_TIMESTEPS], np.float32) - - sess, t = _LoadModel(FLAGS.pbtxt, FLAGS.ckpt) - - if prefix_words.find('') != 0: - prefix_words = ' ' + prefix_words - - prefix = [vocab.word_to_id(w) for w in prefix_words.split()] - prefix_char_ids = [vocab.word_to_char_ids(w) for w in prefix_words.split()] - for _ in xrange(FLAGS.num_samples): - inputs = np.zeros([BATCH_SIZE, NUM_TIMESTEPS], np.int32) - char_ids_inputs = np.zeros( - [BATCH_SIZE, NUM_TIMESTEPS, vocab.max_word_length], np.int32) - samples = prefix[:] - char_ids_samples = prefix_char_ids[:] - sent = '' - while True: - inputs[0, 0] = samples[0] - char_ids_inputs[0, 0, :] = char_ids_samples[0] - samples = samples[1:] - char_ids_samples = char_ids_samples[1:] - - softmax = sess.run(t['softmax_out'], - feed_dict={t['char_inputs_in']: char_ids_inputs, - t['inputs_in']: inputs, - t['targets_in']: targets, - t['target_weights_in']: weights}) - - sample = _SampleSoftmax(softmax[0]) - sample_char_ids = vocab.word_to_char_ids(vocab.id_to_word(sample)) - - if not samples: - samples = [sample] - char_ids_samples = [sample_char_ids] - sent += vocab.id_to_word(samples[0]) + ' ' - sys.stderr.write('%s\n' % sent) - - if (vocab.id_to_word(samples[0]) == '' or - len(sent) > FLAGS.max_sample_words): - break - - -def _DumpEmb(vocab): - """Dump the softmax weights and word embeddings to files. - - Args: - vocab: Vocabulary. Contains vocabulary size and converts word to ids. - """ - assert FLAGS.save_dir, 'Must specify FLAGS.save_dir for dump_emb.' - inputs = np.zeros([BATCH_SIZE, NUM_TIMESTEPS], np.int32) - targets = np.zeros([BATCH_SIZE, NUM_TIMESTEPS], np.int32) - weights = np.ones([BATCH_SIZE, NUM_TIMESTEPS], np.float32) - - sess, t = _LoadModel(FLAGS.pbtxt, FLAGS.ckpt) - - softmax_weights = sess.run(t['softmax_weights']) - fname = FLAGS.save_dir + '/embeddings_softmax.npy' - with tf.gfile.Open(fname, mode='w') as f: - np.save(f, softmax_weights) - sys.stderr.write('Finished softmax weights\n') - - all_embs = np.zeros([vocab.size, 1024]) - for i in xrange(vocab.size): - input_dict = {t['inputs_in']: inputs, - t['targets_in']: targets, - t['target_weights_in']: weights} - if 'char_inputs_in' in t: - input_dict[t['char_inputs_in']] = ( - vocab.word_char_ids[i].reshape([-1, 1, MAX_WORD_LEN])) - embs = sess.run(t['all_embs'], input_dict) - all_embs[i, :] = embs - sys.stderr.write('Finished word embedding %d/%d\n' % (i, vocab.size)) - - fname = FLAGS.save_dir + '/embeddings_char_cnn.npy' - with tf.gfile.Open(fname, mode='w') as f: - np.save(f, all_embs) - sys.stderr.write('Embedding file saved\n') - - -def _DumpSentenceEmbedding(sentence, vocab): - """Predict next words using the given prefix words. - - Args: - sentence: Sentence words. - vocab: Vocabulary. Contains max word chard id length and converts between - words and ids. - """ - targets = np.zeros([BATCH_SIZE, NUM_TIMESTEPS], np.int32) - weights = np.ones([BATCH_SIZE, NUM_TIMESTEPS], np.float32) - - sess, t = _LoadModel(FLAGS.pbtxt, FLAGS.ckpt) - - if sentence.find('') != 0: - sentence = ' ' + sentence - - word_ids = [vocab.word_to_id(w) for w in sentence.split()] - char_ids = [vocab.word_to_char_ids(w) for w in sentence.split()] - - inputs = np.zeros([BATCH_SIZE, NUM_TIMESTEPS], np.int32) - char_ids_inputs = np.zeros( - [BATCH_SIZE, NUM_TIMESTEPS, vocab.max_word_length], np.int32) - for i in xrange(len(word_ids)): - inputs[0, 0] = word_ids[i] - char_ids_inputs[0, 0, :] = char_ids[i] - - # Add 'lstm/lstm_0/control_dependency' if you want to dump previous layer - # LSTM. - lstm_emb = sess.run(t['lstm/lstm_1/control_dependency'], - feed_dict={t['char_inputs_in']: char_ids_inputs, - t['inputs_in']: inputs, - t['targets_in']: targets, - t['target_weights_in']: weights}) - - fname = os.path.join(FLAGS.save_dir, 'lstm_emb_step_%d.npy' % i) - with tf.gfile.Open(fname, mode='w') as f: - np.save(f, lstm_emb) - sys.stderr.write('LSTM embedding step %d file saved\n' % i) - - -def main(unused_argv): - vocab = data_utils.CharsVocabulary(FLAGS.vocab_file, MAX_WORD_LEN) - - if FLAGS.mode == 'eval': - dataset = data_utils.LM1BDataset(FLAGS.input_data, vocab) - _EvalModel(dataset) - elif FLAGS.mode == 'sample': - _SampleModel(FLAGS.prefix, vocab) - elif FLAGS.mode == 'dump_emb': - _DumpEmb(vocab) - elif FLAGS.mode == 'dump_lstm_emb': - _DumpSentenceEmbedding(FLAGS.sentence, vocab) - else: - raise Exception('Mode not supported.') - - -if __name__ == '__main__': - tf.app.run() diff --git a/research/lm_commonsense/README.md b/research/lm_commonsense/README.md deleted file mode 100644 index 78c8f53ca..000000000 --- a/research/lm_commonsense/README.md +++ /dev/null @@ -1,170 +0,0 @@ -![No Maintenance Intended](https://img.shields.io/badge/No%20Maintenance%20Intended-%E2%9C%95-red.svg) -![TensorFlow Requirement: 1.x](https://img.shields.io/badge/TensorFlow%20Requirement-1.x-brightgreen) -![TensorFlow 2 Not Supported](https://img.shields.io/badge/TensorFlow%202%20Not%20Supported-%E2%9C%95-red.svg) - -# A Simple Method for Commonsense Reasoning - -This repository contains code to reproduce results from [*A Simple Method for Commonsense Reasoning*](https://arxiv.org/abs/1806.02847). - -Authors and contact: - -* Trieu H. Trinh (thtrieu@google.com, github: thtrieu) -* Quoc V. Le (qvl@google.com) - -## TL;DR - -Commonsense reasoning is a long-standing challenge for deep learning. For example, -it is difficult to use neural networks to tackle the Winograd Schema dataset - a difficult subset of Pronoun Disambiguation problems. In this work, we use language models to score substitued sentences to decide the correct reference of the ambiguous pronoun (see Figure below for an example). - -![Figure 1. Overview of our method.](method.jpg) - -This simple unsupervised method achieves new state-of-the-art (*as of June 1st, 2018*) results on both benchmark PDP-60 and WSC-273 (See Table below), without using rule-based reasoning nor expensive annotated knowledge bases. - -| Commonsense-reasoning test | Previous best result | Ours | -| ----------------------------|:----------------------:|:-----:| -| Pronoun Disambiguation | 66.7% | 70% | -| Winograd Schema Challenge | 52.8% | 63.7% | - - - -## Citation - -If you use our released models below in your publication, please cite the original paper: - -@article{TBD} - - -## Requirements -* Python >=2.6 -* Tensorflow >= v1.4 -* Numpy >= 1.12.1 - -## Details of this release - -The open-sourced components include: - -* Test sets from Pronoun Disambiguation Problem (PDP-60) and Winograd Schema Challenges (WSC-273). -* Tensorflow metagraph and checkpoints of 14 language models (See Appendix A in the paper). -* A vocabulary file. -* Code to reproduce results from the original paper. - -## How to run - -### 1. Download data files - -Download all files from the [Google Cloud Storage of this project](https://console.cloud.google.com/storage/browser/commonsense-reasoning/). The easiest way is to install and use `gsutil cp` command-line tool (See [install gsutil](https://cloud.google.com/storage/docs/gsutil_install)). - - -```shell -# Download everything from the project gs://commonsense-reasoning -$ gsutil cp -R gs://commonsense-reasoning/* . -Copying gs://commonsense-reasoning/reproduce/vocab.txt... -Copying gs://commonsense-reasoning/reproduce/commonsense_test/pdp60.json... -Copying gs://commonsense-reasoning/reproduce/commonsense_test/wsc273.json... - -...(omitted) -``` - -All downloaded content should be in `./reproduce/`. This includes two tests `pdp60.json` and `wsc273.json`, a vocabulary file `vocab.txt` and checkpoints for all 14 language models, each includes three files (`.data`, `.index` and `.meta`). All checkpoint names start with `ckpt-best` since they are saved at the best perplexity on a hold-out text corpus. - -```shell -# Check for the content -$ ls reproduce/* -reproduce/vocab.txt - -reproduce/commonsense_test: -pdp60.json wsc273.json - -reproduce/lm01: -ckpt-best.data-00000-of-00001 ckpt-best.index ckpt-best.meta - -reproduce/lm02: -ckpt-best.data-00000-of-00001 ckpt-best.index ckpt-best.meta - -reproduce/lm03: -ckpt-best.data-00000-of-00001 ckpt-best.index ckpt-best.meta - -reproduce/lm04: -ckpt-best.data-00000-of-00001 ckpt-best.index ckpt-best.meta - -reproduce/lm05: -ckpt-best.data-00000-of-00001 ckpt-best.index ckpt-best.meta - -reproduce/lm06: -ckpt-best.data-00000-of-00001 ckpt-best.index ckpt-best.meta - -reproduce/lm07: -ckpt-best.data-00000-of-00001 ckpt-best.index ckpt-best.meta - -reproduce/lm08: -ckpt-best.data-00000-of-00001 ckpt-best.index ckpt-best.meta - -reproduce/lm09: -ckpt-best.data-00000-of-00001 ckpt-best.index ckpt-best.meta - -reproduce/lm10: -ckpt-best.data-00000-of-00001 ckpt-best.index ckpt-best.meta - -reproduce/lm11: -ckpt-best.data-00000-of-00001 ckpt-best.index ckpt-best.meta - -reproduce/lm12: -ckpt-best.data-00000-of-00001 ckpt-best.index ckpt-best.meta - -reproduce/lm13: -ckpt-best.data-00000-of-00001 ckpt-best.index ckpt-best.meta - -reproduce/lm14: -ckpt-best.data-00000-of-00001 ckpt-best.index ckpt-best.meta -``` - -### 2. Run evaluation code - -To reproduce results from the paper, simply run `eval.py` script. - -```shell -$ python eval.py --data_dir=reproduce - -Restored from ./reproduce/lm01 -Reset RNN states. -Processing patch (1, 1) / (2, 4) -Probs for -[['Then' 'Dad' 'figured' ..., 'man' "'s" 'board-bill'] - ['Then' 'Dad' 'figured' ..., 'man' "'s" 'board-bill'] - ['Always' 'before' ',' ..., 'now' ',' 'for'] - ..., - ['Mark' 'was' 'close' ..., 'promising' 'him' ','] - ['Mark' 'was' 'close' ..., 'promising' 'him' ','] - ['Mark' 'was' 'close' ..., 'promising' 'him' ',']] -= -[[ 1.64250596e-05 1.77780055e-06 4.14267970e-06 ..., 1.87315454e-03 - 1.57723188e-01 6.31845817e-02] - [ 1.64250596e-05 1.77780055e-06 4.14267970e-06 ..., 1.87315454e-03 - 1.57723188e-01 6.31845817e-02] - [ 1.28243030e-07 3.80435935e-03 1.12383246e-01 ..., 9.67682712e-03 - 2.17407525e-01 1.08243264e-01] - ..., - [ 1.15557734e-04 2.92792241e-03 3.46455898e-04 ..., 2.72328052e-05 - 3.37066874e-02 7.89367408e-02] - [ 1.15557734e-04 2.92792241e-03 3.46455898e-04 ..., 2.72328052e-05 - 3.37066874e-02 7.89367408e-02] - [ 1.15557734e-04 2.92792241e-03 3.46455898e-04 ..., 2.72328052e-05 - 3.37066874e-02 7.89367408e-02]] -Processing patch (1, 2) / (2, 4) - -...(omitted) - -Accuracy of 1 LM(s) on pdp60 = 0.6 - -...(omitted) - -Accuracy of 5 LM(s) on pdp60 = 0.7 - -...(omitted) - -Accuracy of 10 LM(s) on wsc273 = 0.615 - -...(omitted) - -Accuracy of 14 LM(s) on wsc273 = 0.637 -``` diff --git a/research/lm_commonsense/eval.py b/research/lm_commonsense/eval.py deleted file mode 100644 index e5b7ff98b..000000000 --- a/research/lm_commonsense/eval.py +++ /dev/null @@ -1,190 +0,0 @@ -# Copyright 2017 The TensorFlow Authors All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - - -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import os -import pickle as pkl -import numpy as np -import tensorflow as tf -import utils - -tf.app.flags.DEFINE_string( - 'data_dir', 'reproduce', - 'Path to directory containing data and model checkpoints.') - - -FLAGS = tf.app.flags.FLAGS - - -class EnsembleLM(object): - """Ensemble of language models.""" - - def __init__(self, test_data_name='wsc273'): - vocab_file = os.path.join(FLAGS.data_dir, 'vocab.txt') - self.vocab = utils.CharsVocabulary(vocab_file, 50) - assert test_data_name in ['pdp60', 'wsc273'], ( - 'Test data must be pdp60 or wsc273, got {}'.format(test_data_name)) - self.test_data_name = test_data_name - - test_data = utils.parse_commonsense_reasoning_test(test_data_name) - self.question_ids, self.sentences, self.labels = test_data - self.all_probs = [] # aggregate single-model prediction here. - - def add_single_model(self, model_name='lm1'): - """Add a single model into the current ensemble.""" - # Create single LM - single_lm = SingleRecurrentLanguageModel(self.vocab, model_name) - - # Add the single LM prediction. - probs = single_lm.assign_probs(self.sentences, self.test_data_name) - self.all_probs.append(probs) - print('Done adding {}'.format(model_name)) - - def evaluate(self): - """Evaluate the current ensemble.""" - # Attach word probabilities and correctness label to each substitution - ensembled_probs = sum(self.all_probs) / len(self.all_probs) - scorings = [] - for i, sentence in enumerate(self.sentences): - correctness = self.labels[i] - word_probs = ensembled_probs[i, :len(sentence)] - joint_prob = np.prod(word_probs, dtype=np.float64) - - scorings.append(dict( - correctness=correctness, - sentence=sentence, - joint_prob=joint_prob, - word_probs=word_probs)) - scoring_mode = 'full' if self.test_data_name == 'pdp60' else 'partial' - return utils.compare_substitutions( - self.question_ids, scorings, scoring_mode) - - -class SingleRecurrentLanguageModel(object): - """Single Recurrent Language Model.""" - - def __init__(self, vocab, model_name='lm01'): - self.vocab = vocab - self.log_dir = os.path.join(FLAGS.data_dir, model_name) - - def reset(self): - self.sess.run(self.tensors['states_init']) - - def _score(self, word_patch): - """Score a matrix of shape (batch_size, num_timesteps+1) str tokens.""" - word_ids = np.array( - [[self.vocab.word_to_id(word) for word in row] - for row in word_patch]) - char_ids = np.array( - [[self.vocab.word_to_char_ids(word) for word in row] - for row in word_patch]) - print('Probs for \n{}\n='.format(np.array(word_patch)[:, 1:])) - - input_ids, target_ids = word_ids[:, :-1], word_ids[:, 1:] - input_char_ids = char_ids[:, :-1, :] - - softmax = self.sess.run(self.tensors['softmax_out'], feed_dict={ - self.tensors['inputs_in']: input_ids, - self.tensors['char_inputs_in']: input_char_ids - }) - - batch_size, num_timesteps = self.shape - softmax = softmax.reshape((num_timesteps, batch_size, -1)) - softmax = np.transpose(softmax, [1, 0, 2]) - probs = np.array([[softmax[row, col, target_ids[row, col]] - for col in range(num_timesteps)] - for row in range(batch_size)]) - print(probs) - return probs - - def _score_patches(self, word_patches): - """Score a 2D matrix of word_patches and stitch results together.""" - batch_size, num_timesteps = self.shape - nrow, ncol = len(word_patches), len(word_patches[0]) - max_len = num_timesteps * ncol - probs = np.zeros([0, max_len]) # accumulate results into this. - - # Loop through the 2D matrix of word_patches and score each. - for i, row in enumerate(word_patches): - print('Reset RNN states.') - self.reset() # reset states before processing each row. - row_probs = np.zeros([batch_size, 0]) - for j, word_patch in enumerate(row): - print('Processing patch ' - '({}, {}) / ({}, {})'.format(i+1, j+1, nrow, ncol)) - patch_probs = (self._score(word_patch) if word_patch else - np.zeros([batch_size, num_timesteps])) - row_probs = np.concatenate([row_probs, patch_probs], 1) - probs = np.concatenate([probs, row_probs], 0) - return probs - - def assign_probs(self, sentences, test_data_name='wsc273'): - """Return prediction accuracy using this LM for a test.""" - - probs_cache = os.path.join(self.log_dir, '{}.probs'.format(test_data_name)) - if os.path.exists(probs_cache): - print('Reading cached result from {}'.format(probs_cache)) - with tf.gfile.Open(probs_cache, 'r') as f: - probs = pkl.load(f) - else: - tf.reset_default_graph() - self.sess = tf.Session() - # Build the graph. - saver = tf.train.import_meta_graph( - os.path.join(self.log_dir, 'ckpt-best.meta')) - saver.restore(self.sess, os.path.join(self.log_dir, 'ckpt-best')) - print('Restored from {}'.format(self.log_dir)) - graph = tf.get_default_graph() - self.tensors = dict( - inputs_in=graph.get_tensor_by_name('test_inputs_in:0'), - char_inputs_in=graph.get_tensor_by_name('test_char_inputs_in:0'), - softmax_out=graph.get_tensor_by_name('SotaRNN_1/softmax_out:0'), - states_init=graph.get_operation_by_name('SotaRNN_1/states_init')) - self.shape = self.tensors['inputs_in'].shape.as_list() - - # Cut sentences into patches of shape processable by the LM. - batch_size, num_timesteps = self.shape - word_patches = utils.cut_to_patches(sentences, batch_size, num_timesteps) - probs = self._score_patches(word_patches) - - # Cache the probs since they are expensive to evaluate - with tf.gfile.Open(probs_cache, 'w') as f: - pkl.dump(probs, f) - return probs - - -def evaluate_ensemble(test_data_name, number_of_lms): - ensemble = EnsembleLM(test_data_name) - model_list = ['lm{:02d}'.format(i+1) for i in range(number_of_lms)] - for model_name in model_list: - ensemble.add_single_model(model_name) - accuracy = ensemble.evaluate() - print('Accuracy of {} LM(s) on {} = {}'.format( - number_of_lms, test_data_name, accuracy)) - - -def main(_): - evaluate_ensemble('pdp60', 1) # 60% - evaluate_ensemble('pdp60', 5) # 70% - evaluate_ensemble('wsc273', 10) # 61.5% - evaluate_ensemble('wsc273', 14) # 63.7% - - -if __name__ == '__main__': - tf.app.run(main) diff --git a/research/lm_commonsense/method.jpg b/research/lm_commonsense/method.jpg deleted file mode 100644 index ee8a5506fccca3cbb67f7bda0ccef78303cb228b..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 69059 zcmeFZcU)9WlQw)11d$|BGAJ1Y6a>iwC`m*_g&_!tAP7j#aX=+!5D)|gM1mrWh~%7x zBsocjA?F$5kY?WFKKt!H-cR0r_n+_G@z*McK4-eCyQ{9cs_Ni>;%9+NcU3i10RjR5 zAOQaW_zB=PaDkYZg!udg5)u+p(hC>K=_ts_$jF&!XsPJfm^s+lm|0mlxkdOmuU_L~ zW#zvmaP9hyn>TNA@JY){iOGqG-xT}x5CT$CQgSkKMhXf>u`8@s#Qx3V1m_5e&J&YdAiYQib|}3BoFgD4JV!)${yY&8 z*xL{MJwQZ#p61F8C1P3~Gm@)LbYg*VSr@pJiyP@5_M^GQpELgm-NVz%`}Lck;E>R;@c4wc ziAl-tQc^!;f6U3v%m4KGTgms*vhs?`s;1_aR%BayN9Vxc(D2CU&$012)cnHY((=mc z8fIsAZ~x#BdvyG3UIYN)pQZ)={ilijVP4eWyv`935fYL7nis)253mtZ6P>?ugP2B1 zhs4Z@_NrLm1v=%ptl~yeF7bzG`e)Al7a6#3&hcP=P3_-i_Rmf1)qiPb|1q)uoYy!& zPDlVQ9w9XV0S=G(KLlL-f3;y?A^ub~7>~f4Q_gsxja@8yZM}Os)~1DXLMXFAP+Ns< zeOw4~r~l?Q^pz(lxJV8Vd;>nHO;^eULfU!H*@l(P1~OofO~A z`^&v?&cHYeAsNzXO}mht8|=U}nEk7RYi)d(@21#td)(Ix<3=PxJx*&RIWM79Fbu3A z)iT?~%&X|gL+@LSINunbtg!~U7%d4A`&&TYSMr21+s+FQkRl?B&tZ8&7_gA+L23Saj{ZFfiojD_Z^~4Dss8rdOJ`7<(KmGpVu!lX41HQ*V+fe99l!Z`v%j zs7imP_tFX|W}Ri1FKF3|i5}laL}U%f8D%P&(S7^!l0lj)iDHWBbwY)+ywp;@f8RJf zlskh>(og1Q-_=xU;q7yh%Wsim4=28Qm)lRhAY5UQoiJ7%kgaBUXK(kBNh32>=6a8m zS>wXv{arlJ)JuD+9C06gkZd+|}MQa=L4^K?4@+kVNw z1xJd3oboDMDjvnP9Y9LG;1#>R-PINrCqJ2bCik|D&pY7(GFL6NW;o@|bUwZE6Zt)M zj}(S|zv^6f)=!*hrP)3bGgIMiJirAF#{(O(paRGm9ZKApoY|4^MBtjs;m2&wC_M1S zX}54__bSF&+rJ zb}D_Ae9*oO+Z0w7R^Z@Kn&3_uH{v$)l;+|aFB1yECYX8o;eiV=KR5X*cH}gzvkFhu-nR@W zcGV@aa)yJdoW#jP1Zk=IbErL=ei;d;9g)j=`1~o4wTr`vxMzmJaFS3&NIdzW z&Y}Ppj_3>I;?_$%VAXrxaO%rIS)Oaaa!2Z%GKWLrV_fO94es)wTg=7YxiZz#!JJTK zhEW^Fp-YNSUXf_u1$OYjSUFCG!W37J2M$t4snB|ii`ZWGt*li3En`v+*DW%5N9d{T zbPe^amS}^aU3YY{{6hZ|t!3#dH%k)0sT3M5B-%7Xx+i)8n`GzRw5=1r_Sn^;BJLuv zKpSw0efm4WJnTCs8Ah@pnshLW`{d{w)Z;{5K!JFWk&^ghCj7*T$4x;DtDT9Re-Z*6 z+S=zYx*GUYw=9nSZh<&?u(C^%{CWa>Y`>PW<1oOy$m2auG=}Iw?yf$tH=yX)T zrYiw`vF9_YD!A@&Meoro%E9(bGFNE(^aWTbyZrF-pz93N+5;_eVt3p0`;Ixc1!wJg zrOgZnv2Cc!O^C~_dF$g}s?v<~pyAd(R&U;kduw_07OiHl#u*MAM~5$$ZnJuNG&3+j zt9r5yjC}gvv2?^8qN(mYioUS^*g=8cQ>2MSTEy+=V-wYHO&_G0I&V$!C?`ER2~=eB zdpP83C#tVM`e^Xww%%348!qQMFC9lUpbFOOc=~hP#VIPIMuI_DPzxH%d%+n!T$Aa`teO~z_N|JaA%Av6)! zXS#DQ=FGqKG_^#59y{1vM~l_YNq5l=?$1ee4!te=b(iYOW7-(s2P7r^2HuxL_h7X8 zXXPqGS84*-zVv52KE5vZdY?Y#%jrF|N62>0x1s%?SE@o$>WS>Ck5-gFwE}K$p||Gn z00$P@dYQwrf4{)zeI(75E7{H@PlN)r=S)mbG-2&5ig*AK2HT_0q;maG)7DgfNls|a zAq2<#eZ=jZaFLOA=#i448h{{rLllFoCDR=Ke&(PZjR)@41aFOKS6|bL^;0W8$Rv9U zed8*6U`X59r(Mxo>$$h_G)jFUm?ur0RCvizO^t_>{DzugnJgC zqk$1#?4y9n@q2>-K|Ra0@o6V=)?g}Z0RJ2@}KgnV(%MyLUL z@Xk`olqBA-R1`OCRL6Lhxu<3OD7fwMpyfcS<%I;MH^ss7w^#*qwqMCRDY7^Jhu`J| zqw_V*_vC9_h7OvQCCzV=GE5{|-jj4``YJs?l8d!Pz29c? zf#aK->4vLUH6yw#QdL4PWyU20Xh@yf+5}t_>N=&GcVM-x6R#L_+omLA6Xr^cU(oMm zKz%tE)zf-kNxt9~(v(4BhRXM08M-wmH|^kIhkX=xab*Gxbv2O`8x|EHA09rZ!0Vaa zs=)Yda1mV>E}>x=H{vGwt@kT%-HC~Dd!$HnUazl?WgMH=k|f@GD}mP7$f&ure=?{6 zY8FY`m)1vOs^T(Dfr*CCM^Tj8iLvAtj#$1a?EG9dOCTpi9j)fQ=+A@gMb)pVkppA7qVxN$2#*DmSeeGpI3DZus^_n2Tv#{|?fWaZ!qB8MFDfO-bG0_vFw zP|pN&oW}psGp+wenz%j!!Kew0>)@dh2lRWhE-BZ-SuGc5ky!LwRP;HPe2Ids}1hQm^atl@Idid%%+LemfVcE{TP$N z0kKQDylfwd(*LNR6TES%2KqT2eA*jR%EHPVJONYOUgIpBi(b-%B-3R=9Htl^cnIVh zZj5#NEGJ(juX!O~)*?Yx(;`xogxlgo?@!o?UQ-_r#PZ;QGan5+keOPo>&9;~uUQ?; zGsPV%!`~xzHH=NPRk$X#3mA=#% zGE{QJ{E*!3+>YGlpm2?xhz|W{L}^!VOltFuCUnq`_|js%Q1A=bvg4B{1Go-5;RxlJ zgV2v%DVOFiwCp`HSyyDII#IjLRvuDwi|96W>$TAAKB5PLGtG$Cb&Nx&w7F~JKWL|B zrtk34cx}D9a(YFP(-Bxt9l_4cZo-hvmT{D?%?>Jsm*R5RU7a?G0EH9sy-nk`S2rhW zIU0^d(qWeqJnXDlo!crTM+oC$)geKE?>6eO1FL_ZmHP1WgpFrdgU`gsp9!B4xf`_m zHdEL|^kH)y6M8HC^GcHDbAc*`0oCXhnYWfC@m?%4C%eW}E$}zqb=jyKjKdpy^BA4! z_zuFlCY~o|{V`0l7Obv4Uac<2c>#6Te3h$8$}x}2M%>*uV}D@inbsPg@wu3fas+Du zqTy~nwJrzG`wO&QPn&df&p-X(EbBqZz?2`QqXtuq%q z^>1f((OIsh=Y91E%DCJ^m`wvsB+NSHC%im6xr>F(%l8C1h|7N|bg|`=lZtQ=;(#UK z0d|GbQj{ozC&j5+ZosBT(?K6Aa5WSB{(jwxnM(D z?!m_&UWsO;KYVw{k&+T}KIT5@EZjso5o?YiX;hG@7_PZOU%Ege9=Z^(kK}SmFDg=M zpHJKa2huCCJ~}7|GtVFMv|Mf)Kw|DCJuHc5yfwp;sQxI@oN;#ai~qpP4sjo39c6lU z-Zl*bYpM@p9u2eGH>xlnwtpVR?q!zBKmR5eQvKk^$AC=M`FL%U(iIL?IW`2g`QE^l zzLyH4JT(J1vwNcDGXqFw%resR2|1b|q`txhzI^4iwPj*99!YkPiF;p|c^fAJ)65hJ z8JFn;PbAlpg%GyPst1zsh7su>(M4Yh;`S+ysqTK4W5U4hpnaK%6P#Gb{aP54CN(a0 zy)-?u@cFsdAy>d&TTqTwLifPV=^GobIjbHQuIN6GTr`VxzAWD%moZ819ZMyO<-!9z z2@mi<(PhXnb3f)b?mlH3u6hh6Q3Ri}YVM`Ox}c1g5CzsStKxWpVV;va+VMRX=JYdO z55%zh%3yU?X0I5}2t1v9dXP!-=OVK~MC@rv&L6lANQ4z~ z`KgXKjY!#3dR#3jQatT;1wq{35mdoL7?J539v~7v<#VPPeaLWwm+iIS-5~vQP%fq? z7FLCT6O720Y7TM9lWBY3j(vJ-)8V7iwUqV;`HHhAsfZJ0Pj4eOH4RKuuz8~K)0Dx_ zRj2*sqrprEh7pY>tk}lcvIxy@gPF{lQ(>08wjK@K#9YGFnX4w<^AM_L6r5zuTdoz_ z`{u}gibiRd%b%QNE)_t-BiSX{Z}iD=Jz zyU|vWN%|*1R>Oxvv_R;v4k)dRgpX@*%i?%O1JsEexMv%7I<*x+<5;buLI>y=9B=*@ z@n;{}Dx9(C8`tEv3vg^CB_WCaP*X7L5nZD(Pu6WC@1t{KIH5)z>P6l5z4+x>E7OWh zAIZ0V70^rh&9NC?`Gco3yaIm3udn&{rYmfYQ%uGK`f!bZSxQ+%`GZ`kI^VgzT$s)*Zu1IZI_~??f)SrLV5?5Y%Y2 z93;=iJ#~J}8jUP&DfHofI2kmT_U)B^Jk>R|a(cvX?lJ9j^X3=P&^JXCSq4dCOOXLPolfxgSY4Ni3Y09a@jWAGWRzo1)NYe3pP^Ea%nMuo9&e{$+vr6b*b-Pj!YNNkUe0lQIuBcdfoNf0 z=d7l3glueEIFyq9>|kK58_`vHpux8c$rV>*vuf3*RzdX!9Q#+2`|DR#hNa3ao4;D? z%D_HVan3l`uW;29-ay?rw`n8Hb+05|CG;ACE?JDWBE3@#XlGMAC`7Pe{g*+$wXR?jED{a%t%DFyx!=L{4Tuf`>twx0l#^Q!w>v~hTN1Sgn3dtn2cW}ivMD`Ka zH^M^47q`Pip&9vZCpgjfu5D^xof?dhqIqdxUaDGK*HTyPCH7@X?YS+X#8<}Gd>;;( zqU1cjweY^Z|JleR1)eN*rOZ5c`ez#*!GJXmf)NdrbFNd^YiiLTFbfqH1K@teaBm?n)Dd@GY2$?`o2Yv8X_Z_UQ2K@d2+og%Hv95c!E?KaI zNpWA2#&Hz-kZcO$0ZuzYTpftx`t$;>?@iI7{L9fxZF++ey&IbfJ-rM`l7n&&yI9T7 zhv{^@q#=MLx^ocLU0qWUDzEJ?{Z!ZQXZcFM=mN{f$h1q-A6Z>y6ef2w*s=Y2SeLoN zx+uTg84<*Jc(Q$+K6U3u4&g)Ew5O4niE!<)=H}5bpU9LL+wPe%e|W5%hBhQAy0-SHE8RQuJ7@2xC&K2!p90^H+LmlQayt8fKq zA0-U!dO5{&hV*mOh8=Q|_7+x*?#tv#TnWAO#lM}7f~x(r!}@QVpN)_Ma^8O5a~@l4 zLY3Rl0K6$c${|>R1Op58y>*uL!Lx4u?l+@c)zWB`#rc4T_vz+i)fSELz;}|+(r%WP zs(XfdCHeZugXihO_8FXQUQAciJ3h}4EAXzcGo#nAu!om5z2uKxL^|0kvtEsMY2zF= z9oNRG7P8=KVsODP2nTW8*kc$WmZ5WPSkL(W-n9oz{!jDl4cfP3mQ=jh(nljC{0M54 zAgw+^noFul@)G+xEUZ?q0$+WSnsb`8qMtssL)YSJ(I@KlgL7UBgNk0yl#+JFfNLba zmmuUB!%H}tAqDDNA(`5=FGj;Yx8zuQE~X@zXE56NQp!Y9f&-|d_I)rUr}M!ksj4a> zO8Z_&uLia9`%tuZAaYibiOpGlz56>AH8u}LS?I%a*kXakC^Ji?E=&|Gz!T}{iO4;; zcVtGheR-?VTCI2>RDz1s(|@h;T46u4ZA6G=n&c9fwIx~1HJ7yA@rsB|=qx<6rfcFo z1ysE{LHMeX0Fyly_(s+4$4@w1$GTD{Iv|3etVQXUc8lr19lw{rbm3*%LR?110yGQS z4J%nUrDC(O&XefxYoWB*_22TKx>hDN6S=opRrmIxNr3KGOc4n*LCz>EuH%8$u*Er86z$qw{tvYYE0i24t)Ruj)2k5k?-}#$7~!;C zuN%ob^TsFX#r>+P8naL*6)@X?T28}jj^mt-;o=)d4L94xml{KFWpnOG1F!pIyn!`- z1R>~@mtaV6esUnt^524Bs~~ePPnGZhT$>y=Qyqg7DL!r$UAlfe%&{cD6TTZ0G%YhK zXXaBhZtG~>uaV~mH~XgcVEU=EOJ%hG%HFm~jA3qSm86!0D;Ng25%iB(RL2==1&(R# z`i6!bN;8U=k>*Yk{|Awh;EBTkx7F99^0sBEFE;1mtv{-xqBN_kfzO#1@!E<)T{UW$ zjLr#Hc8;IeYUQ+#pU zi&UoGt_qNnogwp;dQ)XnPg_)kXfwzv^Gfg-BJMrgH>^J=XZ%TRUf@(*BuDrl<=R7u z%GlzI?aoXd&Y4A?$53(fHgxbNt>4GB3<5h3!=wkC8g|X!Fi}Cq_ouzu*^XBAaFl8{ zO<8=aL!Z5Jy`{;m15h&(cjmJ`Wj_yyB6+K|qZ+?J=J!}0ImA+JU$6GYjMDkuLo-y) zzSsk|axcm!O8I!nF1~x0t4-t?>(VWZtKyLUJ1tvQZpz9UX9 z#GEng5y14YA^AT~&ws-RR^Ude133-US?_A-`MUPdzaNxB$YGt(qehsdlv@$n;UXKN zKYo-AQuJ(f4X2b7Mq*;jje`(DMZTsd6050hu@pY6rZ38soobWRj{HB;kW_*pXen%R zAy!s1xe0=fFrJzD^26=}OI-i^&sCdzOv`d^Hwn~cSF0f`VfCETv~J6>#Bx6*w=#|j zGUS&Jl8ZU^FAajh+r&p$sd5;)T6@CLzHPkbERV~d#Y5YB06UyxQ^$dYTccKl*^M4P zP3IZEd#(8C%V#oveSW(eF9-|HKGrWh>c<1?Sa6F_K(D#3lxmM1s+3oZjw+Y$#O2gl zBI%?%_nG)|#la1xh8!P_-qR9)@LnlFvp?(YM?0>q-Nok}oUvj6N#^$GphN4pNa1H+ zBD5P4PF~J1V$jvU`s6X1;gd+I6VKqvhIZPrNdL=80=`d-_S&Uhez}!7YaHZTU>ZWhb;&%t z7sy>`Je?U$ewRi~RAVR!Ar}!Q4QhTOJFb@RY2GUNaJj3(A`G+AK8purfO+2=Ikqt7 zQ#(JWPwO-jD(AdfldoMNnlq1b2DW>P`M5`{Vs54zDPg0ma%_&@uW7yH)b#m8%rrNZ z*vq{@(;V-umlrHy=P;WWE)(%t8FyFGlkybhVYjaCT!=IASEJ~UD_U5%UY}X(SUu(rDpS@ zRfb*yiLGI`CpsI;n@7t}58aRgihk+Vgr ze%D_R4k?g%_M9NusS-{eYD*qBIb2zifBuj_PvK1I*s9iB%W;N`*LH?-%CtVnu13nT z%8cy`!?cEHQ_+Y$@qGN`EYwAjL%{Y}x32Q!_Ovzh_I2Z#ccyHL7D%;<^#`lCoAqeO zt5btn+ro8Ud-6-ySKfyiRPG68b8K6d@`CZFERF+u(=#GyKDJ;MnU0EKYes2^{-CC% zH8wbdJ67;}9hFm5J|LLg>-7eXwvqmf3YQ0KZqa=z+uGGWdEOv@Tehqp%dfNF?Q@8v zVxIB5-7xNJ_&Aqk^EUOh`7bZ%7=&=6g5ne0`>kFxn;;Rxl?3u$PeDpFe}4teC`a!H z=A#f=@2u)DA- z>v+&(B>mEDGh@FNt%g#~@-i=2HJtiUZkuN)u_3NL@sS!q`7xuSb6z2Df{~d6X$NgK zA=4()np^EDv?DW85{iGXc182blSP-oI$01>G;)M62c<_NKCinEjEiKy7Gy2_#b6uS@Qrc&P)-*Bc~b1G6~J38p)9&w0qO*c}; zMm^72`?pQ|m8W*-H6?nhb88{(mA;$2kY+P;SvmR|?Ra?b5I>u`rsXK>H27vzF=*gW$!83AR5oGUyY>BpO_NbzymlFU`H%ySEKN zu;zayxXpM#IAhCJYMdqXDoa|HGG@3+T3aSlkxCJQ*^8F$WFd~phDyxsC_qzDqY{@bmtu%@v-*z({*JWy{9LcURu8flV1!1`#xD-9rKya<_N9<5&? z{REp80+E~be(G8MIsfv|O)PsB7;drp1>CeWac6mW;Ii6pL-c~3av2_;5lD!ho`<2c zanyxAxPa7T+_`$J&abBc$J1Qpv)&=U-6Vk%^7OxB;UlaR7f4=6XFd75SKvnw@R#bfW`JP&9vLlrFFWPz4Be|1LWY zbd>GP9??)e8qA7miup3Si6t4qu+932FgMg(ZNvYKx& zeU6%k5J;w_x_CZiVf^{ch~iuY9Mv)pB=HC0x#C2X-xRbUUk^Wpj3NeFb1S9wJV}@rcj*#D{|NEMb*dCk`u6st$1Wrw3yBcKP zVtQe7PHs0}aB1)j&bsTe(a}s2WF#u&&Eo-LTMcHLDx&gLCLV^h9wz04p{O5DCSc~m zg%L1QK~YN+4-lB1f=LFyFRq_uy+eb-ubs=b*cK&6;^E-n;4%l7A#m~hkY$*c4jHNkwV642|40%!eFvtG+*T9AI{#%S?o|~&rIR2?sW%!!{&=bb- zVJ!znWR2{^4p?4{KD$jv{aNrB-~fD}sek`aa~L2d&{TyeS$`7HmnDTX$9#`~V{gyx zHj{w#lcmuXxpa?9+Hkri5=T7pT*J+j;{7VMd-jxmqkzsRP{Mj~7WU5xXK)l%AJVT} znDpUuHgF*upgP(5CgN*^KHgaXql!9wX=y3X2Mfo-j+43_!Q_+Qj?3TLFRSi&fKE=r z`dqVJ?sPNFp89OmV}F8m$@SJ5DnyRhw5P5Jw_Ni3`+*&&nox>=ev_~#SYgDQ#{DYF zzYP>^X0mYwPjl;FNm{l4sP#-t|AmMCPw33w6uR5uKP$Mk-|qWY4Z)x7nfyQ45YVFj zZWsO)39*?*)ja=zg{y~?+4@v$WxbVixF+!GM(~qhZo6Cbp+=|g zKz~No9GGD{<2++rzGqshsPOt4JcY+U(6;~k+1)PPQlkR0cUxB=tt?g~z-)BpY&6x? zI@@po-o&e6%o=R3K5%wC^8_fDYH6F;2}V&>5jv?mT34iG&81UD1%C?l5w46aIOkTa zJQFOpeX`m7tUzO@^oRGbCr2y%NXWhZ;GFgu`~J2Lml_S1`hZye`57kjHAkSgkHCZs zV}-jrg*XcUp{H7{+cU_Uo(=8)t89{>^m6dXv{6KudGLkUnt1g_T{`l3jafjtTLa`= z!8E*u49{d-Bh>U*ARhn~=yoHRq$#li!`=u1C6OW9ga7Kl%l@H(4_TM-T452Nm&&_Q_8D+!JjUx=r(5uQ!j%1@@5dQN|<&6}En& zMuQG{k45^&r?|gd=Clanp64b%2_Hhhaa0qHc9>_YoWWADEB1GhS*pI+BUD{LQNDJD zpqicZZli|VuPmTH&ps9E`}@@_1=sriCt0zx#h72LSarrsFUX3~dWq5Pa@n246%XGh zR+=4#Mb+=9yUm<{T*^S`dj9C?Eh`jcqrOZ-24=ZNZFA}98u2PuPv01cP)9qX!hYAO z9Z0&|2Jz}dJ?^cVB0EHd4jSOwIO$6X(seON*a2t(Gc`>5O+?YMI8_K3Uz~`+1@ic) z__)@GaNO~urqX1QLq84_~ylN+0=acthj?4-DA!RQ4rbzTZcEy=fG zNY#BaIy(4~p|5C_+dlyI&>N8Eo`rTR+`t&mL`ZaqSp={aA&x{ci8l|K$EiVnI}2xwq+|gF}Y5 z=?wBz#%=nJM!4!E@j#aZJ=rV2z{!1iF$yK~o~O|HuKat&cT31z9e-?8j8o1eK6vUV zp60VuJMiaubbtH*yT*iCB|~+>r?)jLh_WbCq45_$b!9)FI$noXyKUyOp$BVA3*4LM zh_b?J;_e={n4Sc~R%M}?P+l&=+th{X>ocEfuW1>Y687zMb#pxUdcMex`wl=eYp;_K zt=>+rS{wXLR8Attgz&u?*(}iK`)*lY#{XH(-6Jx?{$!9(Pg#A~%`kj<(&@loVAu|L zbUhv5Bt>o=^?34OBnAwyUD{~`&fkm_NTpdu5sGS%9P?>U!GlYNBQLf9!bJ!dZN}LA zev$v$0IJiR;)f{R>k`VEd9ER+kxD(j>y6g36TP~JpsRh$)$tuX1SZTe>HYokr6*ZF zUfGERjW2#)@6DqZ_F}zEJ0b1k^)aj+y!5FpkG|e~UUo%HWL`5zp6Wt#p5u+^+Uk9T zHdV$zQX*6oRsUnYAn9@R_o<$P@YWK{{X}==lt@Yq;J)NRk4rm8SmEab0lho}leC0A zxz)k+Th;}?&T#CXQrmx}xBu6gS8uD9ZYr5D@sRrAEfF5q80qI7x=#pppI5d$dmXLv z6bqXR&dGlX*M)sGV&xEJN(mv!Q)%Zza7OFZx?L-b-`^$@I!HFoebjY+evxR;L_84b z>>Y38y_Cdu-*;DV%LWIAoLjCKf$EN}dCJJhxZ58R9ylyiAjfCX8{#?pNN~7mB2Hb36B{NFPUU3f4i7G3z&Uh*!c8Ztw?)1MEM)_&{ zMM|NFC{~SK7R&uxY1AL;rbdZ5u*GsUqL>eU8lXk_=0^~Nz{dAB!^dDRqdjl~Lstjq zb{?~R(@4+xrh55xyqI4>GIjzFaEE{?D&?><{^&2h8eePbKv1Bz;hr$*#m-Zb;lL|~ zdtzJfQYORS-XV7pfhosnP*&psdpvNg!v=Vs(dn2F?2=-g(GJxoe|f(ML>*Lbq&l{pP!=($F4Br!?s2M;38cqdvyH z`f%;%jX!=aY)*UBs_%I9J~fOsGRDPZ(aX#_Uo~#pNX0R(B2G`2oRBhv zWZhuUIo>;Ez$M-u7m|Dnxd7EuV0)+77II4to;emXQcKuu!aG=_o9kDtZP`@TaW&R# z-r+V*qWARSuxm9H$&;T`M-DE|S$3HX*DgR{KdDTcig8q{$;^-tH(?X(P02$elk3gc zOE*OKWb5zJea!2eY7&cw3wg+^hNA;fXJBdO1|~0z6Mc{o@Xev((ySF=#aZIL8fe_8Ln^kUqtvAk~h zi(M|jA!7SRwQf){#1@0llpk zr$-BZu@fQVPY$IU8wl&<l5(}n3am}S@w^qgqW>JD2JWhtrlUZ z9!6#F<<0jOxTBGmSANjN!l-Ke+S$&lpdmQr)cP~W7_jm{B1SH4)SSwiPhu=%__0Lo zLwVZUwL^qEX8hK;Xx_w<=E0`~ z_u-3DP{0wGeL!NS;`I;eKpv1}{#$n0A3+87-K`r|C>p%eS^YqLApfu%Ie%=2L z>Hldd0iM5uPsm&h5!Rz|#s=3g2-YnZW_WCWOmLsvJlSxH>K+`a`col^KzuVD=G{^h zcT`nDJ|~VCuELw3_cT2kERJtt_UjY2r}*+Q;qBsm?L+PPK{ng>_2`To3o_(~CAo}e z9WSjS5vE-dD{rCiLpvRZOUs3J@PKf1r~dKKUhQbg0%a7-H|sIm$qz0)@X ziAR>yks9NakJHR~w25nF){rCnlEy}D{EdXp3z7ScYmke}4!*SZC#jutD0_7c;>81C zpKuq!UZfTF;VAzu9(awbv&tq%MyYh^8evp?Agq$g)8psFoYAaL&12r~gQ?-N6gJH| zx+n5YUQu}9+Ph8Pd2aLixmK^Ut-wz|?Zw8cRmFVUanBpJocjEn3`%eAzBFNfQkr2Vx(O<44YE#utfjw}6M*p!;6JyL;|)|J+39h6gdUMC$N(kn2M8lnV1+aGJ6I?Cm0rzQnUBbrAHlx(~Gl)R2=QW2o zId2GJ!knU4m3ZJBtV0exdiJUiw)b#U$z*l!S0PCBpED-~nMJTR7?|1v646f!5NC8l z1iKjtV0NA@$MGR(`IP9LKdnM`h>O8WNiZ8^j~Xh}1aIk0g}3x1%-sUViZ~&F;9~9` zcA_C~W_I+p>Nmh~9Lo#M2#q#_5gESPARM`JxKJ++EQPw#SHB4|0(ZF!qk0wpr&x|( zUhcfZX*`<;ZVDXm2bp3FFK(v|N6k?@)$6c~P3j@7NxA41S9aF^b1jdM6KzwEoH^o_ z***io;OZS$g84li6;F~MY;Au4e!qgM^$X@0_*n%gU>fLmXt^fVX-}>H0gle^5|j{Q z)xt5xPA7RoLBx&b8RHrkhcCFgcpwn!_t@dPo#>-Di=mqnvgJxawxJBl*?lw=j}vU# zD|1l;QrmCp*m3RX{Q8s|F8Of+y*i91uaC-4FP6*Mp)ApPA>2de0b1fx;XLL+^S&yfI$;`!S?W>%k+7EBG5+%DKnKf#1NIi8LF&Oa`xtn*#^neXC9 z+fT`YXQSh9-oNI)p_-$AFqOopGp5+KqtKh>S#lRM4ga)y?0V~gpwePxs>B1r zV4bKh-p@+w=V;M0XJp4v9Dy2+-o8zJ@aDH=?ZHA}@^{mAv-*eTxsJr%siDd`;(7OV zrr@$G2dQcyrz$Ra*g?(od7bZcmJcJT$gis|a@7x1dFElQoeF;HuX?jh{W!dQqlGOm zk^a_orp0egX`mDC!?X0>EI;39UC1_vjR+$Ycoc4zk&wIE@}5m$;AckN9YJ{ewS=_l zC81S_d!kuI)8V7>vZ*Ih+vcqtbKB8-h0HJJF?7^Fn`ujw=ocE@XOjrr%`;#!%XKWP zGShj5)h~_%zH7QiYsOl-?mqIEI#TMr-H7;*PNP3AO>fLd|J*EAY3;)2BpHR|oa#Dw z?%23^n_t>2!?P2uyEaPwtfs$hmwT8y9J3P>Fj4oSzO~ zR0Q9~^bFWLSC@t$!sg5sTGyE7+jQhkYM2QYe8Gt9UyS2?HsSJ}oTD~6xVQ*G3#UQQ zz?+OkA2Fg>dxhW^H9 z@gV8mAB@+{8fFg>DtW4sVUM}Gz}Ex_J$m}z(PID8_kLMFqtYG<^NDa;idB0TV(W_Z zT#uxzEMyiGd=#X<({1dP$ka)Rakrv}&89p^{+g!#j<4(OT$L-4qf2j&%V_HJ&A_W` zI%gJFx&t4VP?>&Ml>HFV&Ws5k64W6DB;LR`= zTjYwCxYmQ@1wf8D3MqU`|wWsPOlb7aWb#yrOh>BcWhkFhD5?;{#*&S+Xs7 z0DH|Fv#6Y%(1*4P{=6_FF!Yx!4Lv&H{_rSDcO=Uv%VylK{6MyeyY{J{L@j;eU^C^i56z1^>SHiE_WqFV|jpe`DbZ{{#XPr*a+09Xmiy?XXOUnT$c z!dI4$F|xNUSP^`jPftcgf9G!8z%1my{}vr^StAZEr^1`2EPI*;e5yamMa4EU#ZAy6 z@6$Ou5`blup0)HG31BKoItq5mAkTik7IgYO!a&bWe}CT31hE5$>>JK>dcgmH0AhS; z2KQx+jas4J!EyPk{gY=29hb|xN&N~qUs||C%Xo($Fz%qXwD( zUwzO25B?>fJ3v3l)C@IGBxpCFyWWeC=r!>a?0jZ8VlCXkwFr?3j2>IwFG>wxMg(OW z`|k+g0po7#(NCJQ?sDTN&CEGTRB;DEkXB+-l8hUX2YapNqhE(*HwD{t+Qv8IRZXJW z?N32LYSW`oVvMyfI;XWyd+#Q_7)iW5>D3)v3d<8a%a}VY#REzAJb1<9Z%m+kY*$+-ePpKMK z@qk6Ed;MguddbiRqCv9!M!VzvKF0`~ zN}1l-s>{5#w*cRtaOwG^oFf{gpA^nt^!mT$W&BdzMUXlBuzavI1_@TT zjexIIs(anDdvzL7?%LCw$jyF1j(Owv2+}{3K>we0YX8DMCRk2|w|2>MZ3=tMi88dE zFQNBI$PHv5KyJ1+G;Y`+qcF-dPinrsnrZSE3Kv4^unmnn>YpoD6Jb5AknYz1Jl^5*6?R1kOTS7iK7gV-Ie7`nb1#TgQVW9W+CZgp9=ipqzrogA`>AU5NR>-@NXrZy2=~C7*77SOnwl?lu z*xlw8>pbE<{o%deHb_U{9*%(!tm61EwZUIReGLkJu9j7$VN^#-;NR~~vU)|u=VvqO zOv3MO$lcC2^_9e$!d%DGk%eA%I}?*8zCD^1x|6U7`;8M+fNZBw2Utz*S)hh~In5J; zJf5uDEc{fPO8T7Q#RSxcGlG%z9Ldg}HV3x)VI5XF+cQL?BDHg8$H(&Ow4V06*Og)y zcs$5``bdhPw@?lYuqN38tbg-{=?mP?niCQ5-7r=OgG)%tty-xfj@DF2SPkvm62`4O zV&CXYxjSBSLh}>B_Y}?Z&(#}PNFYzB3>EmNo~(JmsO-O|Ft9|_Dw#}cTy~7CDq@Vr z_Bd@SvRA5ji4@xwdXlu(F;+*k%G7$v(V}cfn#GkXYz9@xlBb?%q2bu10_R9U(-tgosWO zM50IUA_;;Zh+am52%@*qg=nJ%LA04f7iFUN-U))yMjyS;sDm-jS8|8>>2w(JJt z88PVZxlL08TGO<*_4Y~WT{dr>F)?%U2dadj)PZ!LOu9sz6fxzKy@RK09~JnIS{}^d zGC^9%|rp+vFjrWG%PsqSEJQ?|QQl@^?>+2omuU(>AiUZo<&UBmDK~rOt zUCHR6!~!Map1~3<6#s?BJG%+792CQ%gB2`au9~u*DiL_6ksCj!N%@xo)NGYv&75MozS?#D9de+=d;aX#C zb<_z{c2(9&iyKLd@b3b?xof1VX78I8I~vHM@cNj6DbogQS*@e*y@Zmng%$g(l4()8 zlrM?0Fj6vm&}l<=BUU^)H5wp@s?oPMwT7!@d+Mx(X*I^V!@rBeF|*(XtYYUpr(uib zl%C2UA5rY@xIR<%OLyEv#sLNQLF_StHls#$d~YpSsq+3=jI9@>cPv8zIkRDF75UpuIl2OX7$%Z7Au@yzJ&opVxo5XXs%9Z&Wjp9_? zLyZ<#C!MPcofrNBslYWH9A*TvjlaPa#Cy1zbULjsj(2o-(wTSC?Tsb!il#ri5In>B z!}WqyD5!H_s=vwOqB&z_xK_wtUylAT~}(~smIMHOYpMU z=Jpq%GAPqWb~goAVm`Iu?dS85-ZihyJJg_%$rpX?s7}uFsSJM`40;YCX*?NMTHG^g z`u&pCJEmqNf_Odt$IivgDU5b#l4j+YrxF4evYPS2xnka)ii<tM~`V zpSP{j#f9d%{sp2t_5pYL!kQh+9K0AL?h1}}TlxR#`dxpoBCY_|5^SinRKMPU;fW{{ zf1X2CFARB zFb{DK@qb*l118_~B*DHf&hMvc%I`q+DBV$~&tU)1W0pd4!boDMMAH>Ls~vnw5UP8V zq4yz%ukNjd&i%+I#9MWr;|C>8hN-dj zD~fTl01SW!9CQaFsy>;gtmgEC>H*^EQu0SXYi7`5HgvV+q5VZo80JzR>7lIX6vg21 zdCp3=O>||NHlX43J9Q28u=S?h#rpzREAJe2E)_w*B zT>>f>gWU?G;bVvB8)cqgK@X^bezJ>#0SMnY{pTOMBCMoN%dck4yBVPI66lQ-`0?#I zA5R98T(RY0!5^&o`{r!H=7Q23dzs{5T5`F0h1K@^ZJgI5oP7Omf+9N8hfb@hE*Ce) zw;Ub)xT`co={9=mx^j4>KEf@s?1vjmjd9*D8YWIiJ*#S7kKHlvWg`BBn_|EOZlA8G zU`Zq!V)w9BQ%(ZeDTiLkUH0X{l9K4a+zD_@3#(~7>5k0kL^TA(=fPnWhfSHmEcuTf z%LgevWlys6WZV17;F93P!xuNSZ zuD`IKVDiFzid`2l0I(A20^w0Xu8o!csJwK_@d2^B>bCLTSM>gQbg&mfv~|Z&01!Fd zSb+Gw5k){fOVx+ayr-zl4)<10N<^@Vu!`&ks-A|?d{ctJSs!nLOD!`R=wht8;vP7s z_|$o0G)n%w*93F|<@-&b!NZo~U;d$&S_T^gtFFKC00 zf{Bjl={LaSO{T7BSvHG~4ISnmWBW~oo}dh>E`h|D&dWJg0*Bsa6_KHC6Jca*|G>Xh zbQdGnC>lEpOrL7ihcWSyip#v1e0W(v>TavUGlF(n>wCr+r?wGo^>VcKIem4|#o^zQ zMDf*D8acKyheaxn*+ra2b%2NxU1?bJ*K8@P*T0DpT+_(!C5=3Xf=*F<{iO29o`Xyw2m zwpPv-+iMTHpoJd!0e!UO-NoyB__u6Lwy*2MIOF8}VLpzZSN`1UWuGoU;bpNTDLR1a z9oS?)E)|JzryM+e<39a7hMsKB78NXi7ut@syN!GIKBJ50`3say>+5>`k$X-{bbq`z zpx6L#kFRaGW6lI#2us|>+>O-Z$*?IZsc3g)lCosV(bwCj8BYgYFhh?HBAK)H5K&DO zY*}kvx906XJ$#vGO`A_P(8lr2j~N`*5PK4K5PBCNuFW{fDLGPhFd55D_ph0fB`ZWg zLB}_NvxavkwZW@sc}Tw6FN^MZONxFgvNW9SX5phxzr3tB;Jx3)5lhXylN9|z9thTL z=BrPUIqjalsY+&Fo|CZ_wQ;f&kVGgzxQHHModx3vK+$s%A1&xFnPV3T9kISS;w?&< zDWz9!C0BY4`(%{{CXBP1+~d;>Z!aG*S^CC7w@r`UM1xN zD;tGz0v>_W`R7mc2l#}nw9gIaz*TTB_w?$62r*4n^TH z8`3j2GdPQCr%MPK?YXlu4#l=cveRe;G2@g}TG8Z+RpO6{MC>Vh7Nw*kE+aBsr*EEc z&_EphWm0c8w3HW~cWZg_^u6_ByBD?aDr9&sk)r8&p*{O8Bx${elaq(v)^1)sXWCt1>Xn1=-enV7Z=~T!4ZGAB-lTH7g$ox| zv?;_@BH%If1a+Upa@qUW^Cnp925~b^6FnP$dwTnnK_+s7I@NRSc_GQP)BTTM-*YT1 zZD20G&?sj^d$6kEAN@_UTapW#ww(6MUQyFm6mTZe9R3W6PF(i_2b~968e-0(9h;Pk z9n$^p9=^Cx9+_FS<)n7&nKLF8d1lBR}MMf7%>R@@!XaTNv7HS(i?pdDoiD3*K~^8)7e4hS)~6 zuxnygUV!O0WeDHTCf!t=KJPX@q-=+Ph+`7kd159Gpp|nwwd^2AnRN|mPo}7FeJ;ir zm;pHxuZ^kaky;Zkm-tJ; z(vs^QwQGFRc(cz&q(47UE(m(SX}6e%f4JUBjlO;?DOfN}NiGjsxjFBqbZ-ONh^0`k zpCpJvf;6jJQo8rWP7Ug!BVJr>`THf41d8W#5Gz+JICSX1N`b$PI&fr0vZn1;8ODBS zVGD|YGkC=EXdw>-#pX>=dc0=8c-)F5WuKBIo3Yv^@;%p2)V_|l`_*b_J=UZGQ|_?d zJ?gmw5X-#W3A2;5gDXMv!)Ng#V?#4lllw!)EsZqr`t2&iXp6Zy$lz&cWpx$IXt#=9 zfUj#U@L-XNW5Mf&stc8$rrWy@eq@ZbUSrM!7qk1dtGeER4({p(lBXa%*Et{k7V5OPAd(rppzLG`&J*|(jXx>3aSH?ezTh@ zsPj^Sn#;#oUz3%LY9^$;^HQE$R?{CA3E=C%bNewwm*&=EX}V&;YFG;G2MQVV+bAxQ zCpWVtI3z(qg4x0HC}?TH?2uhPYIe?Cd=C4fh`X~;kB=y%i<(H@eh5ULkPAuDo;;z_ z9q7*$wM)8!t;$%EZT#4mDynYga`1JS5kI;5@}6qLNbBua%MGFkXA?jycn!xbPD6(q zp2J0Vd7(#iZ<7mDKa5>p!jIijcz^bt#x=W6C=#`wvkWG;t6}Ef_msUX^q6(X2wfLR zVLW1lH_ds<4?DoH7V&7B2on%i3y$8i)k_?HIkcCbBb>-I`t32Q?p*rqpG@jwq8+4R zChnjsJ!@hytVw?PvZoy0C1`#?uLQBwr<`@izhG}!I4@~-W3yV0EBLYPf=e}=sLB{z zt!Sy4cu1GLm?QQ3zwg36d8#LI0wVlQX<>5?s zO$qZ3+`TCOxsNk2iIvQiGG&kCRRy^!q$f?R1!Lv7PDhK!@$XB8|z%~CvZS7==2zXA3 zLXEA&Iy^0SS%qlzib__!F&$7@Ry5UD`AP5JQ?ssdPYFv?dIVt&asA^_ric!V=fgSc zO_f#;Id!aebfct7oYNW8Qn~KxAdja0+}xC7kejj~!JKq=RcOjJI9B#^#TaMB*sF0J zssZ#~H|XI`nz%DN{^g{{OY!;VFGoGNqO1wSe@_x}Ne=&OlJFX*d~&6@qE+x;MI%brviYpM}~GciYze^O9hefU_9BtA`BEA8T4*(iV@#zbqijLS*?><+SZb>Q4# z5P6_Yci&pprR{c~LUlQorc(sh*w&B|>oia+lHGJYj`Y~Fjc!(|cf}kq_^zg>M$74! zKqQ>0RF`6%1bnxVGn#V0!m7*%%jYyQ*~vN2FOayjA8N}7Aq@|sDSwWm=1scSy+quT zj^~pshRmj7@xb1OJSB=c>vXaF+)!uk9A=o7^%-B+w*#yHPR-e!-W%2AxV(;ut^F)+ zO7#qzC_CF&*j5$x{MzrtYtQ2p>J+MrR^+L-#N#7=*`0bz-bHzRkLFM4GE!##89e;- zT$N4J_iq{+&|3rV(B&ejcve>Tz$`8VWf${A^js%&P=f5Hyf*zLE?POX%YqFl=9lSX z=pts^kZ0o_b)Wn+5x19rJyKZMC4wT#vWK@@<*gr zgU36y@99rI9oll;PQqQfZv$ZIbclVgAy%@hzR9j|OVUI$tTcYQMBo5qygGQ-)`q}d z@?>A0H$UyF8Z&4%xfdT%EnxK&iD17#ISM`z`_;JOCSuL%B7J%QlQ)5aHsnOnnyOur z44SUXrO#hh#2WS|{%tK6WnJ~L;(pq;A-7&{?$_l4&WL97TMn!)RF4_$8lBGjttH~} zqW;`{3e_wD1s%l`L)?x2wA@~>Ylsm{)$Mk}%!D5?-*NHk?bTb1KQM^sEM^(x_}pNV zJYRBpX6Nvrind1wnc{z~;ENCa?C4Wg$@WW%A<<>0c-tU5`8?$N0L+|vS8?eqXY1Akh zGkO5rP0ADL|FVTm!^@G=2+E3ZR*uani~E?nm3hXG9E$e(0J_XNi7=BwUGIvq#Z9bD z_e4$Vj|H4n7jwXz!Qh=rv+3ko%BW$kVPV%^q@nx&utxZ7bir>8T->c)9t2b4!VlNc zquFtO=F+Rdl?V>0<6$ao%vqQ7!{}^WU=R%?WB@&-4Q1kK8ee$it;$H^tdQ%vW4`t` zPml;WV!Z82{eH>*z3`3Ksv&CcY>W-51`>rk=X93lWiF=B-tlGo{QLK78WMlc4spFI zl9-;8=;5RaQE?q>zV<(?Ge^$Mv)jI^T|qRsn%S4l6a0>QHdXb|9z~H4XV%3e`_fkm z$z!qRO&Uw{1`v-@-60hD4^zJ#y*t0XT%LK}+aSi4B|<@H1vaAvBRuywf7zaGZKH-U z{r41+xZKJmdb!ZPGE*gtJfZng&J^ObPX19`aiO2pTc_${B1MEJ<&8cP?_QN5zw5vu zNx*N5&pI&fd1@0uVoxAQF1SstVXZf-wMHX|y%~F8zL4tb?VV>b2R6QfZX0zDel%13 z+U3?5LB?L~1O@R4bulNeC*?M;#U&{e@rr~x5?^=AJyh}iBHinY%np`&-X)>h(~$BN z#txuZvhBYt^vX4qt|g*wBxq^>ra7Rgt)-vB>yA|$uV4uExkmj#3ekmm$drVDb54Rl zOTnk}%YSg<#nH{r!Pu0_rJW4$O$P}RM_8>h|N7xvN&TEiH-oIzYii^2vxpxEbu3L3 zLuZlGP_6@We5R3=W~JSL6^9?2C`=)|X7f7@3(ghIf}l|?0lG}>ZeCPewfNkthBp}o z(h@{qaKM(CeNu&uDJBq`nt68LO9EF4rG!UOP z2(d4mvnMEn+z1?4&y7e=(Q7zUlktyZ4-VXmI_{kxg7jY{x+l8NvY&K8d^g_Uf)Kcr zh06|^o+81V!hUb(!mUa4TYkqgomtNJ(mQkT-YMX?1aS^FXcFvMthj)mMoRxdY-Mlq zLpew11Tdr9_n(PUeWi-DkYrlIhUR1o`}Re3u4ClGnhK4+H4w>Vo9r01;KKH``!>pn@NC3Ndk;)}NPG}ZYHPWFj< z;C_^azLA^J>6@#=sm6&?FN6YAHz|;}_?iV7Rw&A!bBThD{j8mNZDLzxedv4bfbe@R za`#Xsl7zlZBE);(%z}+C2jCa4JIP6U^Skay>awiMRuoBFwyUwXJ?2Cvq>okMqRM8C zdL4d3D7u`Bdw|p!frjSBx|Ij!)1Z@sc;93DN5oKUd;|jTgPGXPuVwZs7IU5BqPh3> z&cO4J{4!@|m+O^#f%kW?Gu(M+!E(vSh=D(O}(+!A|-x>YzokkH?nr-hp z9GW|41njU;95bjG$yyl~&Z`IF!)xpXFxclQZY+;6mL}Gp!nI(tg>p*3adgEmzQ$xX zq3!KwsQ93y6yO?ue1dQUj_LqykyNZ{yWH%o_A8OF@}ebfUE?JIpRlriQx-h zAYM=5ZAx%5>r)-+Ln#@yrL%+5O|^W2-jwCa!_!FOl#c<+n+`J{zEfLu`yO9B@1(#P zzMecj|8qWM!~FYoh%lvXp+d_be2X$6J zKE(8OIAIt)W;o%*^#=AZ7MdvBcKgr8xB|l#g8>Q*-8Ox`FRNl+5^aX$${-ngd#$c@Pqfpg167tf3zRXi0D>w_al0Ieg zMQZs&Po4i1_NJju<$eg_RWV1s><`m9$hVn?;t(!^@6v~uY7U3ulT9yIEmsPfeZR&l zlr97HCl>>Q*NZEd7PF+sWrS2a$x5aH)kfsW7%p=N6b-8he( zP-n=W=Z^>s)!(dbnb>ne`_M)24_Ne7l9|+V9RrQ}YaQt)7M-G4n(i#K+j85kM=xIY zD1EMqF`p>QvvIXmJpEiS2ow1nTPPxmv}RW&MF80{!#~i9RL?G+QWb5T@p)cby@n&9 zC&AF%JGz=Pl!CKV{n@raPv-|()*M|^EpPAkFj4GAkUKEEMyUpHdFv+YtJHo8=|%iz z08y_lS;0*XK4TAV^PB#VYOI&-Q1kXHHl5ggE2I8cHBC_^t$QcEH!kpjT{*-OqOhZ; zmaAQ3Va6AET13X_7AHc~#;^6nAW#5UOx7-3s~NST@pO>o&U8{?LU!C@#UgK>GUh`e zU0?bVRpyx(Zw1{pTa3c9%c+&({B$LI7BAU1=X?SIHj-?ztb0Q3SJtD+2H~0XSR=J= zmD}rfe5k`%M6q{EmY2v^w)AbE^&-h=g-7alA&g)UFsp;9kZUpmfShr{gzkFtvrcYVg@4cha(N5P`&xpfJPVweWJ!Sk-T@{ zbmF+UuJFQz{lWObdtCc6#dOI&S5-Jma*9V(x>SE`d3|ul1}D>wYLBMO!rAiL#P5p8 z_vHzK_mZUUpE=Q5f&Pr1IsJawe2ce3b=afLJ>(#cAzO_m!nsM+JEd`AgP*P8+n+Bq=9t+8d=v&xcVt@DW!RVy+2h_RuG{wQ39jBE1-^?ttwz*aYuR$ z`7A=kAx+x3=Eh)uzR9)8_fa`e)rx08yaJ^lQ6K0GVF^@vfZlt{@Pa4ABUbcH>hI>- z+BaYK=R;@Q7m0~6Wg&k*iAUr6CqhKHm2NsXfONPoj+0SUm)S?f8$FZcslT3SYyvirumr|oJI7}`8BJA!8N~^Z@YQh^1$Vs{#_fo$S-@m8l`*FfI76M+bOf~}dhru$=iKgpV zPLs(We@>`Om8W!hk}hS0B#a(DRehZB#jRI?FrnJP&=6OE-7suz9bf%*pWrp%q{GV zt;hrM$0z2tulY_hw*EZShv@6G^2RG&T?c`u@M^ZpBLF}_RrW8Co&9Y)%sDUh4A=13 z3@1fQntqdJ3&A|vv>zE%R zmMX;@`R+s{~)DsD`QU5Q8F8|wnAHAx3N1+gu+orpFPfbp1{k5d6^~>j0APF@x zrh&NX#n3@lsi6xFmdxc6^Nv3gW{1Gn9MsJ`pNj9=2oRChz7lue zpzj@j%QI1aX43r34R{01iA++F`b6eWFu851T!IN5}j<5UhE}`?iMIuiW5%@@vZb^-LUj569-bJ_0(; zzs+D+cw~jWH=JJJxHurlXxx!Bvi7>zWzT2P^ZojJu756UKN(LJGr#uat`N**Q+w3w zLbhe+Dbb^EAXQF7Y7T<83dsvc= z=FX~TQ$vHHmqJwbFZ|y_;_LrCB)0jlA+h@3Lt?r0TD}RXX8=5eI_TeNIPo|wJO_|s zXb}Op@)(0~KQZywKBQ}Igo>T-do(^?cNX#D(fjv@z+yVM zh(NzQF}yWi#Op+(GBxM>=5S{Crr9j4(F#36we6iSGYtth(V~ z)*lT+Hc^h@Sy;bz_`8aln#I#S!VC3`awpeJYG3$p^LFzq!TawX^A?%>1=_HJHuI?G zgl+<+{2+`5;M5QM3*^jkp~LOL1lVQ&FPaFEG63KD53}nUIe=%wb=1{*db0uGAZR{i zyH0W6tRgDQYC z9a9Bxc11_yjE#%u0~J>T=zvXJr#y@HAHoweQ@csR3xV%qVcwNCu^p6aQSu;!J3=?? z!({$X)%PyQ{H>vOVAz~peea(c+5cNUF0%qgTfk`55aKDIrZD-_boW;%S3T%fF7=%+ zDw6bM@~-KJJAlw2P!zj{MA?RT6qT0N8)gaHpq?*PMUdZpx{?`g!8ld)dEklBw@VGT z&hxzNS_C+Owurt!Z|Y>^?gw_3XPyBjsELk8rud>AnzC*U7E=_Jl2$ zvUE!1&#H|7sETblf2z^S6+7%U6x++h!_-!4!WYVv{;u31rKEXbQ29&*oNwC1Tbs>PM&);s$#9Mf8Ay21}j3NzHpNDz11Tn-Ow!WZ6HKPnqA&K!d}YNo0cT7 z6_mNheTM7ubtArrmooXo&jGTgJg;#!DmQQ?x*V%eWRtLQ^+OvolG?tKxgp=|YAz5T zvuoSe7`tcWA=d!QY8&=F-J7g%#xr=+iO#4e^&&ah5K{#|R$5fYehHR1mB4K#ni5#Q zND315nz2V<%9JC23(IJo9!&s7AB^Ta|3e<5gSDK?_%|VFimyV>Vs7811XxI!j?s0c z+6HxFa$rv}9yf`0E^Ew+}drCnSLn;GUl1dxLXk3g=*!43I4` zTT8N0$^29SJ^dipK~NGsWwUNZW0OhwwgJQGMRNAstir0(4f!`0w|5L2ejf|i9XnLa zdbbOb{FK4|iFZvpxl+xUom2L7N@~BC$irm7wlhCij_uZ)LYpFxhggW=GZTNIcP7|Y zmZ1KR5rpmufSi?rutu<8Yw;<4LA&PQtH<_DgI8k(Kco~_dXWJ(xKx6&BbdN0^r^Dj_M>xKAEVHtRN5g*y$djgOYdVm23PWTv{@N5u3 z9fbt#T>@CBA-^z4Kq@}@0I==Oo)MDL&u(7K6|c}>q5kXt-^1mma$U3j zx#;{m`1*ey2v_{C$eR)e0#_ww;}ks0nFSt;~5z2t9)Wg*-SaGUHvq4=6qEcPh&_~3QK z*Suh>^m6pQCI`jC{$NbK=it_fyv$Wjb6TyO_bUqtIck=a*C09n&hIQw^CC|(;SF=g zZk{0!_~~NENHb!?QD+?I+#{n1)enb@3Uml=)TVj$q?mE8iZ*HNi~r$fl_& z)uT+q%`fG>Ra`-l9N_xpv(ojiN}fL4XZ*UnhaFpuJDtJL&K@1hQ(vwqw>QP_GQZHU zuV5qie{PCOh_ND~7QNbUnY+01k%-0E9@NwqUWCQ?}W>O9CG&r{@J7~=dE!1BpNB_Bpcg#C|_>6Kn+~cIDlXM%dwRZZE|G)HuJ-! zcR|yN)1)C#ftsY$NbE%691+{TSK-wU3*0xFYl;P$R0s8EnZw%7pN{DDuU7`r1(jig zuEN&|o`ZOKOd_NR|17<9sDJGK1ox~hWxjZdUhMBxHz1YJFa0>@Q=U2_)YArY=-}wN zi@3;^&algwdxYQ{+snfRU?n;`Prda9zwZb)DwFJkcMl7B)_7_`L2dP0=>u50E#~-c zDy`1m)H4Z`wX5n?8d1W^r*>j@+UBE0g&a8|qfXw9q7TE^wRKIt8ia$}qQ}`(-o8Qr zja`{@I{}bYeJ*3u(k=%Cp^_tZ@sfd8b>|I~w(`;!lR*(p^vgyiLg5LX0t?B7&n#1} zH;Z(r2IR7a&W4%^bp}N&FaQdYDdcghe5t3g88a!wwy-Mh`|NQ?V<10k#Z~zE-jnr%#aa$YrI?KZMU&MQ79&&plorWfP0>R& zb(4ae$&&PQ1tB8Bs>?=TDgp-tY*8}>)*Hj0R!`W}em*}en?_R7B!~N+$W8sYf`{kb z9H`1#9(-^F)Mu>mmDBk#uZ4|MLCm6ldq7n6*?#e@&nqP=$@ zhXlkitMv?NabSZ*ykDVAs;Q|D4=>H@pQapnE-g2cdSINj<9O8oG#|v{GF*gBf`#i| z{!8NDio#p4lWHu(vvE%Z>9(%@-351JlOcB}?~2_7tB;Z-wwrEznK z*KAR=w|CuZ}jyr_46 zg?n%-t?B!BYR#Lk_QkA6tv`8?Fj>Cd|- zCjKke)x*wl8)58U-~N1Sr8QyYeGs%IAcRl<&6O8GF8)aF1>w<{&G`Ouys{F{?6PHJ zuGIOHWS-DXWu;$=a1buZ}cw>hBt<0t{?&1-{LucAVBAI17tx@ z#iLi8$v0CM8RcZ@yHk!Q4Bj`hJH(4+`!~}twrxT8I^RAT8rMJdSQB7yay&7#pE??K zEE3eTm>5JXX>4N_o;d!sfvqI?~2(NWLDQ<@!B)Xs#!F}`;^Vy zk3Vkn;o*|R{J!fo0{&#ndy1VG;ZX%M&3P%`^K8jQD_LW#8F*OrdPBvk;hjgMb@Si5 z-c_CV+)So8!j9aO=SxX`#rj}{Yq7i7Ac#Vv_|n2g{Go@Kfh6>#-(PY$dhZ#k}oF9~qGnzm^QK1%qQ5ZU6w9j|TQSYlhB|uqN!~W;0WNjF%WM0-AN5K1{$kuY9uYM^ z!ij1p%q*;9v*L`?kck1>gSzcYR}a}{$Bd6+d=jlVH0uzlT{cqbCuT^iJ3ydqM70K+ zO0xLfSzDOFLHNmN?wpw3Dr!IlE#k#-K!m@<$UiZ+#Xmp^;eqCO6(~oT>;GBHZ}9CS zoL(inFh9uxfsZQvrc=Z~c6VHWvm~-ChKbR}SWAgLyUDv}@z4~oP?dWI9p@EKzYBlF zdApNYmHGJ1YY_fA ziw5W#f$c9)Uo_C9&VsL)PmqRfR*DPuK~V(TiMPPSkVg6Z4u9(^0>VRBY(37Lsxyq3 z0iy0P+E=YXTha&@MCpLy-0~5AW@+Uw&;s`%{>y)ahkD&H%yk8ttN(L}!TO(8Bc<`c zbNpvkhuOcut$-xaCh@;O4{8zshRACG+H|d&PpA`o$Y;lKvPMG)=F@GJEyb((R_@s0 zDgM}o1g+KpP4v5+pe^;O3d2|&4?)9v@l>#u@D+#?3}`VWY-#^FZ?eU%Z3BBqugrD} z$I@9qF94|nTf9iUJEpPzd5-+vX>aXXx38v8WICgqCx3d|ZwJ}^1rgAOU2lOp_Q>^bSshueQLsHvd8pY{kCLSzD zsq0uVug2|14&06gi{1(2x|#Meye6vV?06 z!{wB(>YX3_hNcRyKrCWtT+QNA^Kth3?Oct>g4#ObvqAT~3<#@24{?gB>(=YyK+8(P}$D0jwQDK5=f~MjVDvR-YrF|##Vd&ToTV4-X z+%)$_`e_kdet>Vj`CU*4FqSPu5Hx_auQz+ns#D;ucYlGjgI{tg-O9%q_Xj2_8*`+8kz`Mz->jGEY$o{W#XXH8WF?Rr*34Fju){>Q_C?|gyHs}~%* zL3ZU1Ub8SpbCzU$3#tjG-91;>m>;qaF!TUtAS4H;+xw=|&U|{&f9Ubs9A5vhuawXYiAmC6q%O7e-4hT^qGu>jRG4gTJ zedrdaN&Y>LD--nCz?JhJ+OgOzs@iPHmY65lO*D~OL}sc2{|5Wqj+uAMpyE+OtAVi; zr7r8+x{>RZ!LENKpI!Iy4)qt1gjRP|cNN)kjCjfrT~==p^Td=hviW7+qVh4CEYNwz zr*}|iFD_gsjV>NWUI=L}j##G7P& z*Bf%Q?;{Akteg>_>dE^{9QnNUC< zzv){30sSF6zTMzZ)v7Jjw7;1RJ<$&NU)}Bg^Y52N(Bm2jx7GL;93K!42LNw!ccvdc zL}JC|SX#b!{=h7y5ertDIOdxdRjmXzw`EHZ+8IaF;%f@dPJZv3)u9KSXkjGMY)g`R zD!u=rSg}3ZBx&2HCTVF%ohC#C=jPdyCs0-PVE)YfTSLCDFY&(S8%%B2ECmPE^oLYt zr+m~&R)a~Fyg-Iu1t#09gTcC7V(DGTdUIzlrFyUCHk|0qeINSG5gkLwt$&fhViy_(c zRGp5Awpf_jgy-Y=H)~_3)s3=27VlQ=C;iwB^X3Mz6lj}>Nd7(BXr<8~o=~4Z1t@My z)d3nOC7^jenxZd7bFf2n7YG|_ScNnhE^2*~{kB2MoXb8J_j5paXT|UN7#aPQgI_=U zYsqA7^X~qv^Y?S}TDhd^Rasc2U;k+T++wHkT_d6mvyPw%+@A^wzkyxyz}{bu!u*D0o`mSWxy zRp4O1_cJgWDQ$k>K1iShC?D7-P5K%+#V_CMOA}!viE!(ca(w7g(dE+|v^-x|IC?h0p9!H4(kL> z3%k3E_cNT=T9vxL8K338V%VBA>*sJF=Zxj*?S#93`&A}4X7lG!u zB%KS|tSR&BK%pUi%j^+_8n)ivv~AEW2llhqYUkyfvQt6IZsT1&VgWHZo;4JL~i zb=S#_2wbl@h6Cym9qq`XlCsl92NEC*S&j}MU?oqPII?^y2|lSs_HSl(C-bkapd#6 z2fCz?g4j3+XNwQs#T7b=sjz@_qF)h2{T}a$6`N{T7zO;0Xqw>>hzIPsPc|LKH_K4< znYJUkVtcywLyMZ;6*aLJtq=?#FM-b=6BK~baT2IUcE%oM*Of!!OWvHO)Fgo2 zG>&Tw?y5Qw{?|+13#_AI<>ll5%>7 zbMN+5j23ubKe}%M`4J@0ADcu41V`MWuU4jS*Jupb;+#8%*;7H=)|(KMgf*^yof zv91yc_u6dM(Gsky%NZ_CsHe@d$5dNX*egpZ&fuOSlgItYUPc~P8YK((DU60bUm~t` zh>BzAW8do6z`OQa7y0nwr{i0-lT%-cKiUc88{d&2>UoopdAqmS6al0|U$xX#b?0~XqjqvoBpG~75>eCwa zruf?Ca5Txf1X<~IIRuv!Tuto{Vn;6`svOX&O*&?Z5d;C;bMQhdAebG?*G?_mi*So^ zW8wBYo`=yet~WvtpD>zC`?yU;M<0plY6HwfKB9jvSO2AR`R_6oviPr+{NGjOEKpS{ zo(dueU#nf#!TZVb)R`REm2HIzJWv2Vs6ha=s$S40DNw3*wI1@>{{=c(lOzN?paE$r ztwQ02I-K}e-tgadLI1Zn=3~)SK%(}YCadM&3V9*S0pW$YBmo7~!&7D1EyRHBMGvoi zq)<$QXY>{7`(Xo6Oa0mvPp|$0<*qk(G%5V+wkW_!hp<5dUmqdkAYg@3S3`ga;ZiEV zTcsc@C?016o~n^iF-_UdbU9|NBkCqB^ZBdA`aD;K&f%#1-JW<~jqJ4MoGkz8ogY1sTgfHJ=I0H-7XO z5h}e=uMja-rK9W{ak~?**4hLBZGmtAZ}-`A0(WJI^TTH_>;Hqi_l#<)>-R;2ASenb zy@LXxARq`x2~DL-l^zI4@4XX%Kmi+~ z0I23Zg`eUXB&DpCUs6@aT^$2>3u&rCAHSYHI%MPhy$dkWw!M4*pyLs+j zECp}{DxI!rAb?h`PD?`T3eR64_xx?x@=MI|84&F>shSkMS`bbz{5+c+KiF>a>QTiR zkJOFH>%`^GEGxdS_5ALp8$Wn;_XEqs{d{U=!8`!d@+bXkLYQDX; z4U<~)(?0fu8unHhy?mtmT<-NRa7@-gUxf%ks{htU7{8AjRmX%$q~5nX$GWDUOPlTo zZrzVlvsG+0v3a=!hEiuRkfxc|sSP=4xiO2ODls`-N0eqXGndKp z9}TO_4$V(V`ds=6PJ6tcgcOft?Iru>;=3&l)R*J?X3)9H{Cxnqq+Uff6CpxF8|9;N z>qBTEE9DD#T%Hkk#oO7_(y+^m?R`o^OsX@*Outt;=ca|gw;jyb53b5H{sQ?NQP-(2 z&v$q^Uaw{dB?xygB!$X!Z2c%K9+P~qC*^>eT7W!ZQA-TP4^_PP;%L@6pex#M_Q$LP zSH=Xf(O7cd8Z!lj7}k1~JlIxH&qWptl##04g2`iG zCsH9ouUdauSXq4HmAjSjj@+DDr2Sb#xFKM=(W(J$WfAY0h^zXp{RmsBYrKV-8<~ij zw)P&)M9&yWdRCyF!gw8c5r+K^Uy2>sshG9CatWtxxm=l`cN$ru1j)!*#pFsI+&7dj zl`u(KOu1i2Y40Nw#N#yH?=iDX2w}kK5S7|y>5@pxXlm&$B0CRFXHf!XqCB6J2+3PE zUa=Kkj#Mi6-uPXG4BgvKR2Du3wj!GFuNXd-G7nU+CH}0a`@K=P7T<>awE|{zhrNX};_ZQL_ z>I97r0!p3ojfW+Q_Hra>EH{Rtc+1bu93iSg<0y&&CBTQoqz#+hiaKc`o;%+Y708x=?zoW1fa;JS> z`;B_k*I2}ph{2XoX%m^gq1n{)LGC1rIjzI2m<-+~9al@^(CYbWC*mD?qb|A0dHpv| zY;ua4{eg;fASBzkP9CMM%_y*4K;DPxC^O>;d$!6-^hJ^rwI}B}*daPWjbmU}1?WYE zXdue3p29C>qX3Dfa=q+^jZa+zOC7@4_@Z)c?q^ah8CF>^o@w_nnjHgNK4RrSYX3-l zDEM0F!`L^vHz(ZJ!Q-dp@3JEo9z?SZ-L?SOS?fofo`7QyC(!aieZ{=)Grne-!%c4n zAV*Gw58@M#OsvG+CR^MAdvu=PLM)^(kqP&7gO&Y@ai3*#$n^Q)w>j6`;0~f-EyX$A zu>cMQjd1GTud{i6suGi{Tr;*MNFFPa#va zwQ0%^NSBm1JmIQ}ic&Tkmzk8!)^JBS|zxI_k@3}BKkck{ibmO;`GY#;QM&lK0yB%72r<6B!2Mm2LSDHa|fLD$6#eE z@bWtgZ2fG&mc6@1e1bRCA9bR52#giYlU^b`4wIh0|FojFZSHs4Q(AnElZm|lflWsDJi#?_M~!mv7FRd3*7?zF+{ehA;NU}rLTSLq*Nb8`qv)@2QGsB z-P&Z|W#^Xi#8}T1zRoUGTu?4d%<(ppMd42JcF>TfE%&sh&n~^2?8v}g7lSDc6Vm0N{RRy+?o0jQ`Yr7m4DOc$jZR`kavHMV9Q3aK41yM8`Wjg-*7BjHD1$kLk` zHZd5VA1-kCy5O3rj~xJFJ;iU$uHb2YZ{~qy7`xA|d+U5+-qAch307Gm4Px8^U+FCU z+gH-2^N5jTG9`JeTtBOx@$$LOI(9g?kR=J4b?blF$#GNkK%Z5hViQu6e^7P*YTf}s zJaim}oypkm2MFTT{qydYH8uAB*{6oKOj;W-t+yLd;Ygs7eNP`gRq@9yz8wjg`KOQm z$IR+4WHkNUzR;nby^wsj@CmUz?fKFt!Pgu=uCP6eg#Kv}za2g7s{87lu1oC?C;Fl1 zAZ&mB52MBk{n#q9* zqSkaBknZH|P{z}|J)`z`Ls*en&hytm2UCxg4fIL%^x2J-T+qEQ^%Djh=^N4yvF!?T zGVDkq(OVyd3ktR}xFJ4{oX(%SLzc#5)dkcx)OQwQGD`Ei;2)e*sH=FlH&mm=T%@TB zvH_y(PZicsIt~-1>||DtRUKoXSZar8BQx03QUCMC$*s-Yi;G&OW-#re!ft+>5@_~c zz6S7B3YPci1HOA$+4*=ezFh7%t(5~0E(f0!Pk}V_GHB2$aZ&E`MPig@NrAt|~|7_Mo8+5BcbE)*YM)yFF`yOK@7C!Q>cK zX>%P#y@Dwqp^y!@3LR(NeOlD$nPXLKgIbw0t1ZYwI3;Xxq#tJYG z;yVlVPNMOgtP7)tp+wc)={MBx%%ds#{^N4^k2e5~X`vXYx3$XYNG0f^ zqHmGDO%;(1tQ_ccO}g+5_owfQX)@wUMO0sdT+9b$=uylZoqUHylc9D%&zW*54wB zOJIIPpJ2ugV7HF71J`HSCg=gCFN6K<>knS9Kk%EK47fejEMI`}WkdKs+b{pEdy$(( zLoD}(lIZEmvL=xP|6sYAtmo#p^(uN>il2!_9t|(@`cFT|n)V?4p25ssg;H||`jKQc z*$~G)|0&%rI_lb0hlFS&M$>*cTG$n1W1O6`tEwHm-l0jp8|~=YcsM4|ej9&)f8X4d zsGyWyGb(B$p$fQ~T+;tG-+#}41xy=Zr*>zX!j5L#aLkE2K=)gu8xGy)MQg85CgtemryZBj3Nt@A?1`az4Pzg65_fzVr5Piz1yT z4T$ed`0sz0@MhQhc-X~NNtko4V!@_=o*do8vEE3zg#tg;os^1Bdl z|6;(py~q2&&)!0R{;%=DzdKF-6A5`XuKxvMt|^uvL@ZDER2RS& z2eMDzJ`8FuGAA^325~Z}IANrW&`jlVLV$F9GLA`A&w=`C=1PQ`9Gy)3h6_C%w(cYG zV?t_uwHfHYnRvrbIkL$vMHb7isl#w_2B4^Js34#lL%)PElbfv`xkVx-Xg8vC%GRXN z^@vTLm60?Tm%g&5Mr*6?z3Tm5XIAjQpVuK3F{xRpB{z7pz5bR%29R(+8>2O735{E0 ziAY^8M2y^GxWU)g+X%uz9=g_67Id+ZG@wh94u<=5+8kVo*BTq7Z|dT<}xOE){a4( zOWnunqE;em*pmc3s#0g{awF0|xFx)DAc$TMXiZwNW@ej!spWsap+`F&=)8Ftubr|9 zB7f1&jQ9vf40SV$qRC=m+G_`#8g;d`6%8tNRg2s=$pWMgDe+pqo+NYPsg-()!5X(h znnjC^y~}mObRdF$pE4L+?$J&QXcZtuiD=J&I`}O13txB}5QdQ9g3jhK#ftu3oxZh6 z6t|NPC-lrXvrKdEUt3Y)wc7~5-l1QG&h}8J-oymfzmfPEcYZsbN$Qhe?&0mQ{Bp;c z!vI#O?Fs=e7V&6SG`WVi40M}sQ2T_=9lOa z=I6}o69JOq>cHp+M54HFKVedSYi;xS?`x)V_@N^dhC>t{(an+4l9*n3vsBQ8U0Vbk z6(WzkJ^NfnS*Te|v_iA;Ese_(ua^_|C8@a->3m=QJ;i(C)qePe_%?4ocM;{iK}wLrNn!U;xI~Y* zYRHm7gtX|wM%0V33ibd>ygWp@aUbqpWi47Yr(|wP&j($4m`L?iJA`rdQwqn|*l!|R zX`W>%2+Q(PZ?CS;dyU|A`41k9DdTsho3CA+hx*w)7^7WMhcJ6UlQ;(`w3PjVZ}*g4 z;d)oJCJx=Nb61?)?!Dow+?5q5!d$ZzukqY7-!)ha#d@uT_q4!WdQ`E0`Y@M8(Ha@*Gi4vFA0+XwXT}7S} zACFNO!aFI@jw?MEy^qz7O+2UHi;C(SeljQdq-hPw+ zR35Ie7eB;83GpsQmVa_Q-Od*?_ncL{JyjuS>Uo@2A>B@%vq9Hqc%@SpWs%tN7l;ZV zi_jTC)Sg7R~be-cv>$MbwB0Me%%Kzvj4t z@H9to-Tz_%WMhJL+d?na#qL;i6E_&ph*jn5669{ozW% z9~JucG9I@8q((sgbD=HNz5Ky~w=WaVV=m}4XX}}zi z8W|+QEFxDdsWpK$Rx;S}xX9IQ+=H$JKuzpP-7go4isGZQ8Kc^0JDfq-iOR&g#8)cx zDs0V`S>rdNHh#8Pm_3(dodTHifzOW19t98ZFcZFdx+?7Deh_)$bjKF{0PB)xs|mrx zE~+Ri?_|Yp?6V~+hWAo1aupD`1u1e))Zj?-u540!;nh;VRCaKEG?yo^49u9guNnFx z55o!EO+tYj<3m*DW;I?pYO?z(EE4<@oLVU-#C>nT-ZAx2h}S!&cF(miWM-Z|+A5T{ zS*93cd%SFIroydGqZ#osMV&vSeGfRQR=2R;-c`6>NZWeSjv$_{M$qn_YJJckYTmGs zDt2A2lJ@)cA|&`!2uZIKj0nT@lrP95Ma|mc)I#!FaU&nUX&rR#vsw2QY(-wsA3MA; zs6wgrma1vbEpO+4*Jwze{N0)N`4&OnQ%!u30{8)$y6pi-YwF-(SMHIxlhM&A}Dx{MC`JEvjtBoz9*kGZ4tmJgfCDs^m zLk5|FXc;{_(79`{Afh%}Hqwc=Ew+{Cadi=1oq68|@fS$H8KSK7S&rSp^wk3e##?^$ z$9#8TvZ&;AG)*6U(sC80j_;@B(3$rP@lW46&*8bgP}q}Z4ngB9&y-g)%vY~GX#MU) z@}<|$qdHbYWI^@bEAC!-=9(cG*Ke=_judrv?A@w0DNKYL!Hs>22^@Vt+Z{*H_}wWycQHv2gLgz>`@1;K`Y)R_Z<9xNscfjzL+e z%X!oe_DH}e@_X&zu82Bl6>p-%yQ&w*h?;1LttgVW^N?r)yHtukCzS@@5(_qz>WE{& zxURHc(S&hKvl$sS_w#s4x=xDYrI~$}*^{QHk8MKU11c8=DS*J2-15wCd$!Ec)FpH3 zC4oT^-tNyJPR7KAvj2B6&uJWggFN=P*f9UDK_uo4~Lp zu;rv(N#=>OQhr{-Dg@_@!Ryxd5^2spdZPhbhU*@uWp5A-iE2~Lj}SybHqQZBH2=l! zkYR;(SU0Kj3%HuW9##}_L)268M>4E{2eFt$o)j}{V~;$-+qT9ZXr-UWbZMi`OG$%E zA=b6OY*T2z9o+6t>{sA$?jz1}*dkcZR62msA)BJc!mfT+AgE-Uep_ZNpLV`rxBe)q zu`7eWx1;?xt)u@(A$-ns>$21gP%nl*z>p!R;~73BA3<6pg(dK*R2t4lpW;0SDI2`=M+E3U*4c9DU(9?fe=If8jrt}|xk2++ zcTT^TljV&@Awtmg(% zq)Jvtm9JIfw_QE*c=z1Sr|fZ%z!BxcBQK_QD9|$iG`mRQo&E^_TeHSh=$UVuhP@oP zF(V;n+K8LUm=#K8=<(=}ZkTkjSBvY^ZFCI5{^}J#s6T0^* zrTu(%2FGh zuqP0Rpv{b@urEmu^IRk(XrNO)PtbDHW26*ADvOGsrC+7J3)L3*`>lo?w!e#{LB z9?mX$TjRY~Q8u=tY~%tJ4B_m6FlG$MRf?>}AplN;X?rtsmOM?PJXy(dsCzb)kD|eU4ZnowvZWh(E?`h9|ZSlQ?Gs|Jy^Xv)`Fenc(;EVvg zpO{1t(;yel;FVNRB#_nos` z4i6jNEWQYHU-_Fc7$15jFdqYyl*)2EIFg$l3YS_)G)Lk-xCN%yZ?UyNF=00Tx!zq} z0k3K*GOlLdotU(SE_o?bh{d<)whDCPn5sUK#IVSrN7IWT$_1FpN! zXESqf`rO{45KS}-H1r0_N58{NKQRc3b8%u!=m&sd!_kza>0CxE2(M7so1#UVMlhg1 zOF_^r+Wy-2 z>KJbvL(`iw+xnsvRr{mVrtu=lnvbVOnIuc>khp*MQ-O)Xs^`@Nrm8!2A8247fLU_w zs8h)$;ue{4h}%iI;*sG<9dSjZRAxT-D7fsvsTHi|mcPR?p9)K*6E1M*Ce%97@(BUa z4v<72OA|K2nfD}b(Fg4W2u*%1l4IsBv|o4c^0@c-mJBi;khtE)3gu%`)cgw0Zd_M% z!#@e1aAeheX7(b~=1{nNg^#Q$Hy?2nKiz&xdZC-sw}*^mX}6I@B)tqHU_?0MaE^a}%5AWfCDM7885RuzRVW z;A8t|QVIV`f5Q4q{9P#7G~baqsw|Kx*F~;q7HtzW|AgRX+99h}I7o&$X;B>yN`>kj z>J_UEDU!{$OJ3lJY0a19uoJ3JKxoRahKgw8I{}fwY~$=FEI%5Tv?g&+yJohhr-ZC< zjH7gLu{nO&6`z%hl=k5`GL#Z;(ffVh!uzjnS>n3)*3pO zsk`6Ta+{@!T7yT@3kL$wAd2oCzdSoJ*j+o0l4Mu@FIfw8#DxS8@UZ@(S)|x90Kajk zU4|!eFVhOtuiYFdEGn{B2cAvYgL0tBLifvrI56pJOFFw#PX3Rwf}~!M7UdZS?7K7V z{lt8}`q_pGJ@+SHI24a6VEeN8ge(gN#WZVhJY!Mw&3?%pBIrNf(Tas=p`i&dNS4tpKGD??=_;6L|H2d_91A9kUQRUvZX3ITDRkX4^U5 zb$%|)Vj6x&Z;PbuNSIWWtkjn!?9}POPF=!KiFcWbDRrFf>zM}~6_t|qSp!Hm=Cd)Z zWo1(niZ*p(K{>RtC1_UxVKl6vIDo$>dZlv_2KHMkr#5Vs958eFj0^xzGfNV z6niiP+`_XB*)4aUcHYf#R!YeE81Q~_DnKxR5#E1+Cc||C9w^&9@_}vZC0L;PR#q!@ z#j}ksiR^7Gj|9sWE!aT0Cjbc7PU3eBi9z}27n*#_kIYSO;g16dh>BGfTH{@V^U}3+ z16-eG)33LwOmcmAyrz!px1>!Jh?raF!4Ch62WG}a`&CNtnez2Km^(|}hEb!a25Vqa zs~9Nd0kP(Z$@(mx2lAX|Jieqp9;wJQrJl>-`1LaINK;GqWt5$M*?lM02)c7Qfsw#8 zyg!qWz;zte6xIgWBId6%)|*W49{vM-WYNK1{nXIc5@F-p}h}INw9M99~imQgCVabZR(dQTbI_79+sek-sP?uBaDC1F77GD%)#HQz)Y(F@`WZq z2^P%}|F9NBxq2JPh)g}cCS%??U%9=Bs!MeBs9#H6I_Ty({^9xLwadp_8Gh@&Zbq}6 zH_$#A40pb*K^jgrjZEs>lZiz)S0E!%(-hZz!bv11C=`;rD_A`@(AOUFrywB+^}DC1_Y3{CjG)b>Stqj0Oc&%=m#TyF}q z$^MHa{sXA49h#^UjWuhTHP*s^m#9R&BNfjn;PfV@$!(n3CGwn!Tf1`Rv|orv%ta!1 z_3JvZt}{V!2k4A2pFdCM)6R_hr4!Y;rfzMc!R+xbnV+c*NS3?q#^fs)9fSjCPrgzu z&1flTGCR}F;a%;fC>_ClKizDsG}@sr#QnR)Wa-S7&J-{8!YK8&-UVAUGBEn=gCR{9 zoW{Bpf8tDv43oO4BBdUs9)Aude__Fu^*|diO<$D(R*3fRGluU*G|-7sgX6uoEsz}h zti;FW6dy&w*(h;}9iy-Hjo%W^(S|Wi;_P{gT1nFfrB{hekAAMtq;huKSyeUGqdk(4 zcPanO3k9+Jy*#-cFNbiM3^ZUC^!MecNmM~or~J?hk{y{w6bt*^UOC2jmt3nw0kW3N z6UzhTR&5ROrqp(}%ckTmCoD~YD+IG_Xj$?_1U&4Fr-|)1;$_CEjEDM!qTlqx!b5y^<&%D=%)6I#}l_FdG3J;to5soNbtoORL%`* zgorWjx~l6YBd!Z|FK}?5D__-cBl#NW$TIp48TdKJ3F;#iSL%HYLB-@IHyTG(?^sl) zzl(5n80w=VB5*YR2D$k?QCBYaphqKM@KD2{ckrS)=~5>}M|Eb$lKqIL(64L3D(bn% z`zOT+04uAUfL}*#gJ1HEmd)z6O9n1YmmeX;uuHQnRFB$BEZ<4JT@w`SQLs!qpWe^H zZo5T9x3%rVXhqXr_yGU}DtiTz9AVj5ETx=IUI9i$q~lGOqn>8qn|&8eThB(_U`ck{ zi{)p(AGeHq1XXtV^JLXi1ITRhwBS1SI#q>xvuPq9elWiKGAr1?M>Uw(G84<-#^L4Q zRByaAK99kh#e=gg;TdKVIa$k1f*7>uXfn??sM z2Emv~e;lE7cr@j3Ivw&DN&83Z1t+_E`Rj5%0Yz<{;NZ$e8y>%ThiyaQKitET&!OH` zU1B^J5rT1)EiKIsZRU5`SYwL zpTeUJZgK-&CW~WEF$R#M{kgkIs(*Mpg`<6Ay~|z7pG*lAOA%mgr_PN0^J*rAB!t<6 zjs~pad_NN>WUZor0t0BT=EilaJ(J9_Mnx61TbJyD9JNknk(9G%B&im_UgLhxIL+qIr`$KI*OBPFBBJnZE1+-1G&NLo71@-=BDqKJUU zLMdHKe9+$q$#w@8KZ1Jis=gj#9c5thT#YN^W`ni?P7NOg>zn0^?9Nj+GbzLQ_LF-| z_}N|<_U;Xtd=|wP)kR=gif!P;&0NWzBVGj%7t-UN9p%|s#W*~Ecet!a_C0fw2?)#hB1?pth6GdO(D$|vZZ!?mPKaU;35iBU zddbubMlq1mT}d{KTb^xWP2U1v{z>zICzs_wW*fYv9I8!*B`+ zCjTD|in&b(0dc$B&|QU8esLdFy#kZF8cBaQDAbAa?QjfLXcjmI_!T!h6U-QSxdS!g6EFK2!re><64Gs%YMASYswF-Til zSaC98xmf0}s>*rBw@Dx7@#rf$cs_eV>?1i0Q z@$M@gFMrqKkE8Gq_&if~-vvUe7-qkm^3_9~C@L>JJ^dA3w5$C^;8zEM`9o+D>ZPF{WJ$T>n2z}l5;tDSo_zfI zm;QqUZw8>G=;f-a;#1?9N%%NHE#m6a4nsA+^`{0SoPh*uZM zQmF8o*h4?KRClej%6k9%;h4HZxjtS+BZ^xeO=+`jccP9&{?#Lpyz@qLxtopT!7lS=A@4UN~gP5yj{Riizank^*ecM197 zcj%|JE!&x1xO`nT-OP!IlA_*@C^X=f^P=5|h<)eCAD?j}L$qASJs7}fU6*RyrDBUX z=8QUsA7WQP5}Cx|RUD12MV^d-tQ*I|56i1dU#bgJPxO|1y%_Zja&9(BOPplOb88la zeAld$=k9#135-#a-vA~)^cCRw-b^4^%#9Y>xItDLJ@+GiGW|z-pv##Esc*p7dGMwi zN~;oJWc#7u&!c1lXNZJL!@#Jj*!h@mQdmqwT1lhz>&~u}jQ7<}<_8UjXY0mfFgpa3 zTc4MKV@`q3c?4rg#U+0k@h|)&b-00XFwkQc5NqawO$4R#q2vG0^BdS+lV&h4L{4Xot&4epsR%e!;;<+6O0(Glc)F}DVc^44ZMPQ=v9EyiZ_kb1(UjS2 zb)S9S%#*j^U)~kK(^NmD(Ra8&_u$tUyW}_?lif6yrF;n!wq~?3avBQx`XPhp^O=9) zXSLQlMHvk7&+_xG^@_f#Sy2fdmQU-A0v3MSI6H5Irm+BJCBXuy z;TvTE^gWfgCT$+>HOJWNZ=I<%)DzR^*z43~q8X1EzqOU~PbZgG-%C1Q%|#^>gsg&E zzl`C%idE@3!g5?5pL5P1KVn=5O1!!lkCL&lm18*|A}M=id@bZ+Of2X=yAM;zQ{;Y~ zB8+|%FQIvM_()Vq{mU{ubq|qx~&R zmTeJBAo~!!lsIi7bPv_GW^{t;Y#mm_^u?5!NUuQ6!6f?685j3pE1h?zNC1=Vt(}fU zSsjzR8#%0%WMkZyI=UVHPgLIMPhpk4QY@aZyDiIKrrk*Q-JI!z#Um3FF>s2z9~ia% zWOiXPrlHz^B1A;$YzR!%edr$K?uTPdX{(s|O1Ev~P(JYf>DA2)pdQ$L?5?SVLmXxPFWPn`U_-j3+ET@DI4O2wd4H@bN)VGa0nz4~A?luPn|Z4-+Fq zmzz}DouMpu3wJCj=U?|eon9&0!XM5*^+WapQt-@cr!6Jkt5XF{fXQ}2019O0_K8CX zvX=O|L)g+wqZ@CQPWa`si?*{~_|1LoIEKB2zDOJ^8L=KzqBM1sr#9ySU0X~hlgk+M z)b58rMaSFKtv%YE4Ko|R*~|7z{PL()nR!qFd8iq29%IE>UOK4pv#%tdB=0hshZ5j- zoF1i~OA+nGAk+Z+<&sEh{m&7%PM33w%@$cxJM*Y$3wxGcrq(jOCeG#XZi{b_rPLwf z4@B}2FJOkcUWrKzhZ4G0%D5KLWhJR<&q~F25EOohPn#WUEDr`-tPLbDTuV1jxxPp- zcxSoB-cFDJx}Pq6`Eh`Vk@zYUU@LZK(qXCA#(3b>Z8c9|EPU@4C8tH#YgdEgCp`~k zdP(iLsy}EODD>7cr(4+M^j5Fj_ii$1hKTds+toQ1zNbRNQ?6}br!L**?869zC+sBZ0Oesc++e7>0BoZ`z(8A;&QeZ+XkQP`9Xb)rUE!!1a-(Qt>CmHE@ZHO>(- zy{?|%odz*NyO8?|)^IQdxKr9hvT-TgrhZm!$>ZSx&^lR<82!ZNpgp0rfGMJ-1G z=`5Sv#KCj*Y__ka#1pq^jSi;!2Q~nrp21XUOy|D_DrDh*>nd{qouG57MgV_XYIp+x z7iB>(JlTI8p&i#E03-U%l+->ICoU$kHS=FGC{Z~1?lM<(TjXTd0)cMPZz8+GP2%u* zHb;S%?L_6iacf%)Z+rY+RLd=XlPxxyWfg%aN+FCvJ88_*r|W6SZ24J2a~L?J7EK*ySF}L*;1927oi^)d znX3n~&dblgBOd)~lOaq?{jgllk&sfaCpxe&66AU1wqv9gXfkooOqVa4WBC0$>!*^E z7sEXf_OX2~V9&D1__(Ug%9g8ud~NH#`Dgmj8>#o%W#oIk^oQl;%R5l=N&78j!g)6z z-+i`~n`vU1eK_A(qgf4YU;?2F)C*-oh;kU|VjPkyn^zJ7KcXh z!b(KdnlX*xZe!kNSjk*zXpJR)v-M(vmdNFoD_05MIG9J$mRR4Rq!`8@x%x*>A;GNv zmZsEaZ1L|x?MM}&X+v#9qGn{x^r%`-2H$ttwu;}?#VmDuy7MnOA-37k*kOMWsrYr% zDt0tH0w(GOs6I{XQ>Zq4mOURLGJRw=>1_%G)8NyhR)K5zFv&+YP~L<*n|=AiQf#~242V}Tr(@(8IX?*!tLC@v$(zUla|Oy5y~=n4rbJstlsnH{^BD4* z#B4M))sI*-CV{{f-WTLdWwQ+8u235biSP2VvXiiP zZXo37gK$wM*0V*eSf-AOi6`eoBD1hlr9)oX8aq}{6k1baU@aywnh)D>IGA-aF;5)F zJ5b-Kjt%iPfiqqZ< zxQ$74SOw74hC4K=4#RcIFToHHr~>2wON*5^2}pXgMwuvsrl&psA|NZERNo6ST#YM= zJmva;9OCnwSsXs&{$MsB<@u*ca;stQzmn($D;DfFI;JLUFX)~pEe=u)p4_@kKvAE# z_)*-W431C<*|@+ymNKi(K=z|~(oTH%Q;v~F*Xb3%<>VAsbzPvgo?ce*)QL^HB2ss~ z>$(jxqdl02pcR$X2@`YgG*>tR#qZp%d*JOjy{wzIoq22oRsn)He)#r_*&Lmk2~8cH z-wX(HU}#iBv9_ci*z@wTg=~3eTj?bpiQ+$!Z`d)EI-l+S{&i%!E%da>+oi$bkn@SA zga9olAWIjnpNeZSTWfjjpsb!^t`W@@*TUurlpqW9xvg_NWuNKK%L)%45n^hksOAh` z8non{!>v3>y`6C9!%Ya52&2aEwN zsDyKV?e+2(ObG*7XBGcHr=I_(J(u*sbiYL3$DiCs!woJJz~b2uC5~XZNkqKrn?Bn} zdBklS4aij4#+@D;v!7)gqYr~7Rw6RX003SKAki?J+jfOl{uuM{FVG_rJ6wY-cA=#< zQoVRcRlRn-vyJ1~``Rrcv~Fizz!TIyUw-~~FN;U{y;jL%3%FSIO27U(K5CdT~E11o9Wq#=isi+`-ciNGucHi>`s zQe&DHeSTdEqZMBCC=ygKg$?rWjUhWQ${JJEag0lVz(nwhAhVl_W7vJPV@DrUPG;Lz zcal=O1k#+AbZF4`>e--18~;gN_?ZI)kr`osP<_Ltpb0Y5HicNK4Ij>dNQpL%AaGi+ z1l+;yM@zNq$j`Zjd!~cA;d3VDSLS$QHGUqH&yDh!_2zkl^$zuAdxbT&VF#tVSDvM- zo+=C-zxCxE*{*f6e)H;nzEzB5PmqTp!MYZxE}SB774BvX7inMb)T&UzX`hkF76?O8xbm-&Y1R`T!Z9A^QCpAaT zrA9i6A`1}c)<}lHWEl(~d6_BJTE-5dZWW1vIf4tY#7LCTQO51@wl!vyXQt^;t!suo zuGP#{29RJSUM+z=M2t~l250rr(dv-Jg@c<3F=15xBao&y;G|J*QtG1ZMB91cZ|@LO zK5}PL^{BKd1a^WB!zs}M8V}xWrT!$cA70m0rU;d}eJH-Zd$O>ViPb}u_^yOTXym#&2Y zQgZt)VC@()SKs4b*l-N=Tj#B47&;I68}o~ zBvGDPQCtO$tmYRdhx9Rg3=ooWNM7LT#{hwj$O@!Cksd0)8xPTR?^Q#UP0#4M=Ic6i zG4iL+)^f{4WL;FiWX2MYnS5-(Y-WmF$#0-A7X|)M2vdq3QkZZT57FB=(U&>73wt2i zP7LBq+*_pD&mbFL>_p z7tySNoJv4&55>Yd7aJLt}X(?8-KrdvU@qhnLZ9}yJoGG z)iT%^9eRQt@lZr$s_(Y`*|C zkL|P{0JD#%olTAqFl# zSCMU*PVr;jT|LeiDaBBnpFS@&B;@1c7X;)bZe!OqcHIWY7%x@jo#&2+h?Ca}wS_J5 z3Y;#?!Jq)4a@p4|lW#bl?s)8N`Yc`aq(Al(HTk`NcJDxDszhhcUVd1fp{4a(DLnLw zWF-v7;9v!7;x$^ZiW8l0%D!zCKA(XB_u7eeLu=lk9wRJyb*m}Bz9~nUXE7!T_i<>xH~{km}Rt){@Gx5 z;6i}8s*KMn7^5*<=e_Xc3jE%)_QWUvkDBMsofPThi?FpI~w{ zP(K6h{C?7YnlQTaeMF_paYkyGIP*37Nsd42H&}-Ko(ufk$Ks(}E|G;L2qe07tI_Uy z&;O-rLtAs=P)$w5=C||28cOm${KSuBN^3Yu*lR#y;#z)+yU!@ z_(6Z6mcK59<{l=mb&`kct)H&y3Zb9ij{9&A0hHmG(-tOq^ltMvVxRchoBp}oe0)}v zU45B{-L7k~@%!%TF_vxWHp-e(U;RyL8qbOziaQY;LA|8%|E<0A4r=Oc`+Sfl28bfP z1P~OZO9vr)cS+1clr zdEVWfo!yytci+D<$zNyA+&TAsuIqDszJ{l|ex17~#U10s&;`Z1635ercTpdWbnJ2> zW2Qc%zCM;jr7Z>F>2{{x%{$AW8jo!Fs2>t^>xx&FiQ11C)LnSfUdHl%5Vj9F%T1X{ zo+U1;%>+F3US_uJUhz6HCM*IR*4UbS>9*t%b#Pgy0V|%Wm%jW{VkV`{*QKg{XcR&TO{{Y z3{j_<<(uPkeNmk+7_W^=iWs$`^{~TzqhTudEKe&S-g@mV`^@@`*V>CG^!Y9UCA%i~ z6DaZ{DD`<>a5_wIsggT4CNPlh;ZLLYVK{e+I`h2q#(!z}fyF4yX-ZlOTzSYE5{zBQ(&Pm=Ge#>uwXZ%4|NK?$)22z8;1uU~pvUXY`akrb{bQ3A@VpT8 zNb>l|KjcR`kh5*kp~IlI4;#zN6@^~FMv@Es(e{YIBS#H#p=}vNXgsQ+Tf-QumYHEn|K%09SZ~!^JAvj;Z#(^Vn6vXu_9RF^?HjPv7}2DU^O)I%dJHF zL^njQ^Za(Cx+Vb{p1L9#@18tlH%h&DwuCPB@zl&Uo+^ip*ikJ@SRv2fh#Pi<>hsf6 z0+=km=`A#9v=itPm~fL^Yc41O7b$}>`K`=XbFE`_IAv}>21v@nZSvqJYyzjg%6={P zga0|Az1+FRcjoV4I%JJnziMSv`wu;+AT2Kc9Z|`@>ahRkZ~sN?!UVu^ zP?D;zQg%Y9l)vwO#j)5?W|VDI?JTL>}GWQ1^aFG z^wM6fsKw--5t~vl*BA1&1V*3hHhBA%l*Z%I*A+g8rG_+V8XXh^Tm`}Ocxw3-yxfp7 z*vIu(_Yz<2=uiI8dnIM((AT_FlMeDc*T?joPlD#*7xr%Ffa`c(x-EbKSR5^%f^a<- z$$cv9P^LP9So<>P_Iz%X*Vms%xGW1R-NQb%=#kS=xrIX6J?^_S@e!>RpYua6BM@t7 zbc~#B^d(JBXC_0HHV{3C9BDw2@}KMDzgcho;a>P&G!PWci7`PC;+xLXR$z)a^6Vk5 zGG0zUU!FR*ldC32&`kZ5O=2$yA@g z8WXX9oXuj^D0_dW%hjiPaMqS=yE*=qT(9&z(PXV6LeV1VPc~fYBqMrQM9l7Rv1*cD zO8_T-l1D!ubEqF}UK}lQaf_(0hd=ow(wv^8!-j0~m6wOw6}`fd2}|ZYefm}q{}(8R zM3>*UmMw%eFE18J0s?k+=%H-+y>uu)F-rpO(^|07+n8SmY$SDd&CW7q>j{ecyQWtQ zbLUwpq6@5WSw|hntvdIN4fcNSY7QWzOs@tF;AzoWH)ev0Q=2&agY+iJ_EJpJootx+ z-NN3$gj*u+NP{I1R#!>yM-snj+~ex+ff9=xFQ^IDn8Nmw-6e(TJ+b}#>tF;-mDYfY z?aI?vUF0MBJ!K$2D8~J(`45(fAJbzCJBA%Y2-;llHf#kl+Z^=kB8`fA0FEm&`HNC=lO#U zD^`o$WK)|3Q;X(|*e_`+;)Q(+zWe~JBbfo(a|X|4%wyNQK(+3Y!G14jXJU2}Dz+8>R}!@Q*g z_FJyCbFLs@r>YA~ANSo~-aZZ>#Ka5)oDONJ}x4kq<@G8c7d^V+tjj-9Ow)h3)ou8{4g zJ&_)YYu*Pr-7w+t>SO!5Qv|f}d1A%RR&viSmj*$7axbm|;HI;}IJR2%3S~G)zE+pu z7EVB5mL2qCHxBR+xsjeIN4-DeaKvVQ?Q?)6mdf790l&z~ErO+vI6*HHQkRnGQQqrA z#$2y`y4Du1BKV$5T>Jb((+(LZSkX4{@p~n~e(Fq^g|4KJe+RZBBhmST-jRI9v4Xae zaGMw=J^l4tfO*=G*0TtM|qQ$c^pJemVvQG^Kk`^H+8(91L(Pgb&ct|a7hvQc6E4Sk^^d?qr+bS}ir0u=ZBBp{L&MduLaZDu+ z#O*rQ2z>w|FH`3#!5(*W{Xj?kLY>m$8KUX1vit!1^VelA5zi3bU)6+VDQHu!?aK+| z?c#Q=M4Je(HSTb8{U|BiQ-!+9J_Leh-7ASqarfZLJ@eGJ0g(s1<7j+C;)`R z-F&^jE%mwEZ>S4uobxuk6a+WhdU#04uipZmjj{|H<&qH~|JyxSg)4Gco$ zALN7Bg4>4uXW&k&dkh`N6)s1Cu%+#qMju2H=T^BX?I?8`5zu(~#bG-1={nsU(KFWE z;=1k#v`?zhU7j8S?a9%Q%>?QoOd%qji%^5_uG^qosjmv>S6g5bN=|S|czA5yo7<7Y z+1Mm&!cLVc(kCye?Z6=qYv7Y2YbR!EkCEVbPs43H5a0AdmZr za(!)O++e`_RCpRG0;mK`>Lw08McXx?8r?+3)md%Hb|mC_eyz1!k{iE0EjMP!_{>NH z1nsC1B}wKvvjo9gMvrirxxe?M3tayKePqj{_rx}~#JAx6EhmrWNaylD?eF$7maot} zex;(MS`O|$wA91{*4JHZ+Oi#cZc_G=#;|IS%Q~Uk-)mYXomtmjx|CsF!Nn0OK309k zq-ZxE^%HTU%7S&zR!3c5#MZP8dFyVi*dsd>$Y&%*8WLXD3T_VT%y=37#res+R$3Cy zytIb*c92h3=s>M~^m7Q)6iEs=fb#*s)lACr+4UJK_-YwdyMT_;bzQXCXh8?ZQ`H+e zpDN|4N2dnh;Cn($RkgLD;0KROC=A1q@*!4I-@Fq80tuFq{Pp>P5?psA?w`r;i4d!qmmRm}tJL zuNRktdjB?`o^aO#klf=I*gq|lU?YDniOxsbk2|pQWA=#yJm_dU`mGX9dhNE&V8y0x zw9ZNea^5E9n9_U98+^TZ#DlRiR7IPu$1WVW9W6p)>-q^&x^`_7P6}K<%~PtIJrBR{TLsBE_71>8@Xng{uetR8_)^=B?M7*Ch z*WmM@R~15XvV%}SQ@{7dz(u{lMW|2;5uLZ_ld#%a>|n>C;}s$hFU=-a6~aTm#nyJ* z{rXERyWov*Bi1WaAjVp{TKy)6<=WBfgjp;?2#S z41FYQ=DNjz_qTy9jkiZG1HT|&`k1ymJLh>OF|`p1#j$(1)`^oBzdU!F*hbckg^vd4 z!Y$l z`R&iv1wOGJBs{=x7UJi(x@qiJGj*9zPop;&X3cK=b`&o@jmh(DVx?-_)6^J0_If&b zgu6Fs(})%q4i1zh*D2msa7|`Q(4E*59zC{jG!_q{ycDchqBR+-hWct$7!?xlo6f$mrzQBjh}x{u51bnQKsXa4M;2jj@sx7vI84m>CB!6#87 z)SA-|xt?fOwcZxTIHR>+|B)q+K_Q$F4o+N!Wz7Z_8cmN`69om&C^^Ygpw7}2q1ZNS zUl`N1)WzCa^-TzoYH04H`w39H>i!lWKk9uz{Ir8tlzQkEq5$I-{Y5HVTGu%dr0iC#P_p( zEZi^&D#>x9`dTe865;neSiXg$+nR%*k(0*$8s*@wc+%X?+1CHfgj#6QPWt;YMEhly>?g znj_IKuI**3x28q?yu|ZTFZIMLDY9Oo=VKzmbM0L`hfCfJ+!7llabJk&%d=S*(Hx(6 znMq5xegI}_>xomM13A;_we2|QvP@%1nwyz6(gCB{vM$5!2Qeti$Cf^x7c8`OyAA*# zNW*<%z1c9b9}w5Mfjwx+3WBU#J^-`>2YWDS>#zdix3vx-^2-xEXos7fAzP&uRh8?J zyS;f3m(P_y6`weHt~N;H&Mg+%r$w+2rg#teRp=#51j2CB4eG>+)!%BmZUw{$e~^|e z!RdQ@$g=#f@)gj5oAf<8r4=3AF8pPEqB|HnQ=+qTVZErGz6%&Jl0Mz z*Dl2kS7Z7q9N@0<+dYcYrOWKiVs5W;?CUBU{ad>yhmx3w_F1!%ju^>))IPb9Ccmov z$@~2sMhS(i2Ap&Oc(msD0fXHX!=G1JFH^rP5z zWqjJ*lH2O&e_xUivo}Q0%US+A^Kan2PCe(ED42ZPEqrHo}x@`FLi-6ozaYPk(V98TEb7 zRkfUng7G_AbD}4P=9hGRzi3qBW-Zh0e!ZkS$zpg;;SWnXBw(JDbBEEgKhm&TXeh)tR~kf-Eez|>%bfi(IvN6~MaQ7Pq9jY=WSu&4ej_@dG9 zjBcv)xIg43*c%rX7C{j<-1*Uo5rq-5;p^4pDH`_3P>S13C9> zQ+VH<+FYYwH?M{9y&1bn8Q4n!D8*+DO@ZPGHR&b{JdR3Ekk};K3>nobDESMt-ZuT` zUzTzIUvFpl>UWYVu=-JO*|T+iO3y!ovPVJ|%2yQy%62k_!`JlbwH@bk;>Vflnn%(l z>OqGJP^HFW*3w01G2(+(sqGwIw{$%M1@|0U0vavXlcs-;GtwL@DI(})u7hI-;21F6 z*f>;5NTl#5$9l>ZFPb({69uxJCFx9-N)OO+VBfOjYGj-tV5j~2G@UzJy>0ADIK%8hlbV`#{qdU~+JQ*MCHnP&^Po&|#;~G{5@)fN?h?W!+ zF>8$A)^&XX1?1)8pmEnN%_@4Dzd)~>L}WI_m;?O%i57#^V^Z&~iEnQ{k|igWB9&L9 z$B1v*mgj=NL8}Z+@9*49n|3U}dH-R0vO7NmnVd*_&of|Fho+T#2TCy^%`{bxC>JZ= z(9c<%OL-9cj?JE$PXt$V`x4UT&?EUT&@2eB+yatkd7a#I5abUlsx^lJV-s8LQ#ph< zp_+hU`4*O3|FmtfV&#rWf(bFrY-=4lVSP3K2E_-PbH@m8aWJIXjw2QZo%C~7^|m3jG~=ZYa% zl)8%iuymP)C~Hi(jE{~#nb-F6C%Tqz=H7lu6W4t|R6YI8N`5lXBa`bpk|ImMcbH_ZCPbReRxxWez`{b#$gFOCvS%(VvNF|^q#G0i zC!CmG4N^0+29LQ0$geC4X*T=htc{a##?joQ>HDEYeN?ss`rSWbSdSrU7W*)0-nh`e zN(Z&4-n>?tV<$VxK)q^y+W5_fVPGvH63VNewhv%Rfdv%zG|%}fHmtZJI|c^yk;%lW za)e%@{rgR!S9_Z@*)ml^XtyO{&)kPLQ5dk8qWwCTfN^aDDq`#!&m>>!oVjTlfGFO+ zqADlwm5UPef{5){?*KR@tJdtxj?2Ah3#5A(U!*&+ijPq71b>oiZ@-FA=or6!flZ5w z$)hVR)D)PJ?>ipSu5e6y%Qspf6{rd}U(~P*1l&&){-I>hwdTW#4x8&o2oWzX$!_+0 z+esCvl#5;TqR$iZKp>Nl3I0*4HywBpsfs8+qblXOX}|FPdE|GpNLKF9GM50ViM_qI zEM|-9?4>iwy8I*YrX?wCuy%DX7%N^0{f@)}3%9?U;r}kFs3AQ}xV&E2Z^essD`a7! zm6Q1Lv;#_2IVMeGo3^>oZ?!jD%{4Ab%Ru%+(;4)g#K&$E@P<&fxI*XcKzkJjz(oDX zy`y33KSzPikhnTypdQkm-pyz&TEQu}IQSw|bM&gRvfmOxGX&+UXvQ5I!1on53#a0x z95m4~2dA)Ne_m2$B`8 zji)vT9WBD|kCW7&op4$A-)lmwk{TrAIzO%Nz*UV5nHZlvwS@pjiufbUwOuHbxs_#d zeFDX;WHFL7N7Y8JVPtytx;&oAJ3=}MEoZ=bmgT7e`I{ySC5Egjxjmis5@Yc3(9xJY zdYxE?+6-t&ZKF;aCG5?7={Bk&rhhesr?JZ-Nn)@#>7!5%|1Xey>*Qma_L>RYidC0+HM;`Pg?yNRNpIIRMij3^?cXrD z?!4{to6FAQQ~D2gt4B}oFp`l@xy^#_G~sD;>g#;=s_+sG@S1Klzg ztVEH5-T4@)ZTeJ~M|KAff5v>1Kj!y5mTp;BV&3YqrknEe*NhewUAc>$V7PjHM@4`; z{KcZrRp;d^WdU4Y76f&;kXPAO2LuOLEyagv*ld83BB(d!bgdj+92-7Px@qs^34J^C zMez`Z<>GMQf=FlL%^lKLj(@tel}avZ1Vb1sUU>V>bYK`8%5#SpUY{1I6`LA;x{wX)= me': - self._bos = idx - elif word_name == '': - self._eos = idx - elif word_name == '': - self._unk = idx - if word_name == '!!!MAXTERMID': - continue - - self._id_to_word.append(word_name) - self._word_to_id[word_name] = idx - idx += 1 - - @property - def bos(self): - return self._bos - - @property - def eos(self): - return self._eos - - @property - def unk(self): - return self._unk - - @property - def size(self): - return len(self._id_to_word) - - def word_to_id(self, word): - if word in self._word_to_id: - return self._word_to_id[word] - else: - if word.lower() in self._word_to_id: - return self._word_to_id[word.lower()] - return self.unk - - def id_to_word(self, cur_id): - if cur_id < self.size: - return self._id_to_word[int(cur_id)] - return '' - - def decode(self, cur_ids): - return ' '.join([self.id_to_word(cur_id) for cur_id in cur_ids]) - - def encode(self, sentence): - word_ids = [self.word_to_id(cur_word) for cur_word in sentence.split()] - return np.array([self.bos] + word_ids + [self.eos], dtype=np.int32) - - -class CharsVocabulary(Vocabulary): - """Vocabulary containing character-level information.""" - - def __init__(self, filename, max_word_length): - super(CharsVocabulary, self).__init__(filename) - - self._max_word_length = max_word_length - chars_set = set() - - for word in self._id_to_word: - chars_set |= set(word) - - free_ids = [] - for i in range(256): - if chr(i) in chars_set: - continue - free_ids.append(chr(i)) - - if len(free_ids) < 5: - raise ValueError('Not enough free char ids: %d' % len(free_ids)) - - self.bos_char = free_ids[0] # - self.eos_char = free_ids[1] # - self.bow_char = free_ids[2] # - self.eow_char = free_ids[3] # - self.pad_char = free_ids[4] # - - chars_set |= {self.bos_char, self.eos_char, self.bow_char, self.eow_char, - self.pad_char} - - self._char_set = chars_set - num_words = len(self._id_to_word) - - self._word_char_ids = np.zeros([num_words, max_word_length], dtype=np.int32) - - self.bos_chars = self._convert_word_to_char_ids(self.bos_char) - self.eos_chars = self._convert_word_to_char_ids(self.eos_char) - - for i, word in enumerate(self._id_to_word): - if i == self.bos: - self._word_char_ids[i] = self.bos_chars - elif i == self.eos: - self._word_char_ids[i] = self.eos_chars - else: - self._word_char_ids[i] = self._convert_word_to_char_ids(word) - - @property - def max_word_length(self): - return self._max_word_length - - def _convert_word_to_char_ids(self, word): - code = np.zeros([self.max_word_length], dtype=np.int32) - code[:] = ord(self.pad_char) - - if len(word) > self.max_word_length - 2: - word = word[:self.max_word_length-2] - cur_word = self.bow_char + word + self.eow_char - for j in range(len(cur_word)): - code[j] = ord(cur_word[j]) - return code - - def word_to_char_ids(self, word): - if word in self._word_to_id: - return self._word_char_ids[self._word_to_id[word]] - else: - return self._convert_word_to_char_ids(word) - - def encode_chars(self, sentence): - chars_ids = [self.word_to_char_ids(cur_word) - for cur_word in sentence.split()] - return np.vstack([self.bos_chars] + chars_ids + [self.eos_chars]) - - -_SPECIAL_CHAR_MAP = { - '\xe2\x80\x98': '\'', - '\xe2\x80\x99': '\'', - '\xe2\x80\x9c': '"', - '\xe2\x80\x9d': '"', - '\xe2\x80\x93': '-', - '\xe2\x80\x94': '-', - '\xe2\x88\x92': '-', - '\xce\x84': '\'', - '\xc2\xb4': '\'', - '`': '\'' -} - -_START_SPECIAL_CHARS = ['.', ',', '?', '!', ';', ':', '[', ']', '\'', '+', '/', - '\xc2\xa3', '$', '~', '*', '%', '{', '}', '#', '&', '-', - '"', '(', ')', '='] + list(_SPECIAL_CHAR_MAP.keys()) -_SPECIAL_CHARS = _START_SPECIAL_CHARS + [ - '\'s', '\'m', '\'t', '\'re', '\'d', '\'ve', '\'ll'] - - -def tokenize(sentence): - """Tokenize a sentence.""" - sentence = str(sentence) - words = sentence.strip().split() - tokenized = [] # return this - - for word in words: - if word.lower() in ['mr.', 'ms.']: - tokenized.append(word) - continue - - # Split special chars at the start of word - will_split = True - while will_split: - will_split = False - for char in _START_SPECIAL_CHARS: - if word.startswith(char): - tokenized.append(char) - word = word[len(char):] - will_split = True - - # Split special chars at the end of word - special_end_tokens = [] - will_split = True - while will_split: - will_split = False - for char in _SPECIAL_CHARS: - if word.endswith(char): - special_end_tokens = [char] + special_end_tokens - word = word[:-len(char)] - will_split = True - - if word: - tokenized.append(word) - tokenized += special_end_tokens - - # Add necessary end of sentence token. - if tokenized[-1] not in ['.', '!', '?']: - tokenized += ['.'] - return tokenized - - -def parse_commonsense_reasoning_test(test_data_name): - """Read JSON test data.""" - with tf.gfile.Open(os.path.join( - FLAGS.data_dir, 'commonsense_test', - '{}.json'.format(test_data_name)), 'r') as f: - data = json.load(f) - - question_ids = [d['question_id'] for d in data] - sentences = [tokenize(d['substitution']) for d in data] - labels = [d['correctness'] for d in data] - - return question_ids, sentences, labels - - -PAD = '' - - -def cut_to_patches(sentences, batch_size, num_timesteps): - """Cut sentences into patches of shape (batch_size, num_timesteps). - - Args: - sentences: a list of sentences, each sentence is a list of str token. - batch_size: batch size - num_timesteps: number of backprop step - - Returns: - patches: A 2D matrix, - each entry is a matrix of shape (batch_size, num_timesteps). - """ - preprocessed = [['']+sentence+[''] for sentence in sentences] - max_len = max([len(sent) for sent in preprocessed]) - - # Pad to shape [height, width] - # where height is a multiple of batch_size - # and width is a multiple of num_timesteps - nrow = int(np.ceil(len(preprocessed) * 1.0 / batch_size)) - ncol = int(np.ceil(max_len * 1.0 / num_timesteps)) - height, width = nrow * batch_size, ncol * num_timesteps + 1 - preprocessed = [sent + [PAD] * (width - len(sent)) for sent in preprocessed] - preprocessed += [[PAD] * width] * (height - len(preprocessed)) - - # Cut preprocessed into patches of shape [batch_size, num_timesteps] - patches = [] - for row in range(nrow): - patches.append([]) - for col in range(ncol): - patch = [sent[col * num_timesteps: - (col+1) * num_timesteps + 1] - for sent in preprocessed[row * batch_size: - (row+1) * batch_size]] - if np.all(np.array(patch)[:, 1:] == PAD): - patch = None # no need to process this patch. - patches[-1].append(patch) - return patches - - -def _substitution_mask(sent1, sent2): - """Binary mask identifying substituted part in two sentences. - - Example sentence and their mask: - First sentence = "I like the cat 's color" - 0 0 0 1 0 0 - Second sentence = "I like the yellow dog 's color" - 0 0 0 1 1 0 0 - - Args: - sent1: first sentence - sent2: second sentence - - Returns: - mask1: mask for first sentence - mask2: mask for second sentence - """ - mask1_start, mask2_start = [], [] - while sent1[0] == sent2[0]: - sent1 = sent1[1:] - sent2 = sent2[1:] - mask1_start.append(0.) - mask2_start.append(0.) - - mask1_end, mask2_end = [], [] - while sent1[-1] == sent2[-1]: - if (len(sent1) == 1) or (len(sent2) == 1): - break - sent1 = sent1[:-1] - sent2 = sent2[:-1] - mask1_end = [0.] + mask1_end - mask2_end = [0.] + mask2_end - - assert sent1 or sent2, 'Two sentences are identical.' - return (mask1_start + [1.] * len(sent1) + mask1_end, - mask2_start + [1.] * len(sent2) + mask2_end) - - -def _convert_to_partial(scoring1, scoring2): - """Convert full scoring into partial scoring.""" - mask1, mask2 = _substitution_mask( - scoring1['sentence'], scoring2['sentence']) - - def _partial_score(scoring, mask): - word_probs = [max(_) for _ in zip(scoring['word_probs'], mask)] - scoring.update(word_probs=word_probs, - joint_prob=np.prod(word_probs)) - - _partial_score(scoring1, mask1) - _partial_score(scoring2, mask2) - - -def compare_substitutions(question_ids, scorings, mode='full'): - """Return accuracy by comparing two consecutive scorings.""" - prediction_correctness = [] - # Compare two consecutive substitutions - for i in range(len(scorings) // 2): - scoring1, scoring2 = scorings[2*i: 2*i+2] - if mode == 'partial': # fix joint prob into partial prob - _convert_to_partial(scoring1, scoring2) - - prediction_correctness.append( - (scoring2['joint_prob'] > scoring1['joint_prob']) == - scoring2['correctness']) - - # Two consecutive substitutions always belong to the same question - question_ids = [qid for i, qid in enumerate(question_ids) if i % 2 == 0] - assert len(question_ids) == len(prediction_correctness) - num_questions = len(set(question_ids)) - - # Question is correctly answered only if - # all predictions of the same question_id is correct - num_correct_answer = 0 - previous_qid = None - correctly_answered = False - for predict, qid in zip(prediction_correctness, question_ids): - if qid != previous_qid: - previous_qid = qid - num_correct_answer += int(correctly_answered) - correctly_answered = True - correctly_answered = correctly_answered and predict - num_correct_answer += int(correctly_answered) - - return num_correct_answer / num_questions diff --git a/research/maskgan/README.md b/research/maskgan/README.md deleted file mode 100644 index 10ee8a4c4..000000000 --- a/research/maskgan/README.md +++ /dev/null @@ -1,111 +0,0 @@ -![No Maintenance Intended](https://img.shields.io/badge/No%20Maintenance%20Intended-%E2%9C%95-red.svg) -![TensorFlow Requirement: 1.x](https://img.shields.io/badge/TensorFlow%20Requirement-1.x-brightgreen) -![TensorFlow 2 Not Supported](https://img.shields.io/badge/TensorFlow%202%20Not%20Supported-%E2%9C%95-red.svg) - -# MaskGAN: Better Text Generation via Filling in the ______ - -Code for [*MaskGAN: Better Text Generation via Filling in the -______*](https://arxiv.org/abs/1801.07736) published at ICLR 2018. - -## Requirements - -* TensorFlow >= v1.5 - -## Instructions - -Warning: The open-source version of this code is still in the process of being -tested. Pretraining may not work correctly. - -For training on PTB: - -1. Follow instructions here ([Tensorflow RNN Language Model Tutorial](https://www.tensorflow.org/tutorials/sequences/recurrent)) to train a language model on PTB dataset. -Copy PTB data downloaded from the above tensorflow RNN tutorial to folder "/tmp/ptb". It should contain following three files: ptb.train.txt, ptb.test.txt, ptb.valid.txt -Make folder /tmp/pretrain-lm and copy checkpoints from above Tensorflow RNN tutorial under this folder. - - -2. Run MaskGAN in MLE pretraining mode. If step 1 was not run*, set -`language_model_ckpt_dir` to empty. - -```bash -python train_mask_gan.py \ - --data_dir='/tmp/ptb' \ - --batch_size=20 \ - --sequence_length=20 \ - --base_directory='/tmp/maskGAN' \ - --hparams="gen_rnn_size=650,dis_rnn_size=650,gen_num_layers=2,dis_num_layers=2,gen_learning_rate=0.00074876,dis_learning_rate=5e-4,baseline_decay=0.99,dis_train_iterations=1,gen_learning_rate_decay=0.95" \ - --mode='TRAIN' \ - --max_steps=100000 \ - --language_model_ckpt_dir=/tmp/pretrain-lm/ \ - --generator_model='seq2seq_vd' \ - --discriminator_model='rnn_zaremba' \ - --is_present_rate=0.5 \ - --summaries_every=10 \ - --print_every=250 \ - --max_num_to_print=3 \ - --gen_training_strategy=cross_entropy \ - --seq2seq_share_embedding -``` - -3. Run MaskGAN in GAN mode. If step 2 was not run, set `maskgan_ckpt` to empty. -```bash -python train_mask_gan.py \ - --data_dir='/tmp/ptb' \ - --batch_size=128 \ - --sequence_length=20 \ - --base_directory='/tmp/maskGAN' \ - --mask_strategy=contiguous \ - --maskgan_ckpt='/tmp/maskGAN' \ - --hparams="gen_rnn_size=650,dis_rnn_size=650,gen_num_layers=2,dis_num_layers=2,gen_learning_rate=0.000038877,gen_learning_rate_decay=1.0,gen_full_learning_rate_steps=2000000,gen_vd_keep_prob=0.33971,rl_discount_rate=0.89072,dis_learning_rate=5e-4,baseline_decay=0.99,dis_train_iterations=2,dis_pretrain_learning_rate=0.005,critic_learning_rate=5.1761e-7,dis_vd_keep_prob=0.71940" \ - --mode='TRAIN' \ - --max_steps=100000 \ - --generator_model='seq2seq_vd' \ - --discriminator_model='seq2seq_vd' \ - --is_present_rate=0.5 \ - --summaries_every=250 \ - --print_every=250 \ - --max_num_to_print=3 \ - --gen_training_strategy='reinforce' \ - --seq2seq_share_embedding=true \ - --baseline_method=critic \ - --attention_option=luong -``` - -4. Generate samples: -```bash -python generate_samples.py \ - --data_dir /tmp/ptb/ \ - --data_set=ptb \ - --batch_size=256 \ - --sequence_length=20 \ - --base_directory /tmp/imdbsample/ \ - --hparams="gen_rnn_size=650,dis_rnn_size=650,gen_num_layers=2,gen_vd_keep_prob=0.33971" \ - --generator_model=seq2seq_vd \ - --discriminator_model=seq2seq_vd \ - --is_present_rate=0.0 \ - --maskgan_ckpt=/tmp/maskGAN \ - --seq2seq_share_embedding=True \ - --dis_share_embedding=True \ - --attention_option=luong \ - --mask_strategy=contiguous \ - --baseline_method=critic \ - --number_epochs=4 -``` - - -* While trying to run Step 2, the following error appears: - NotFoundError (see above for traceback): Restoring from checkpoint failed. This is most likely due to a Variable name or other graph key that is missing from the checkpoint. Please ensure that you have not altered the graph expected based on the checkpoint. Original error: - - Key critic/rnn/biases not found in checkpoint - [[node save/RestoreV2 (defined at train_mask_gan.py:431) ]] - - This is an issue with seq2seq model because it uses the attention mechanism. - The issue arises if you saved the model with an earlier version (seq2seq is old) and restore with a recent one (saver.restore got updated). - The naming convention for LSTM parameters changed, e.g. cell_0/basic_lstm_cell/weights became cell_0/basic_lstm_cell/kernel. - Which is why you cannot restore them if you try to restore old checkpoints with recent TF. - The below script will help rename the variables and everything will work as expected. - https://github.com/tensorflow/tensorflow/blob/master/tensorflow/contrib/rnn/python/tools/checkpoint_convert.py - -## Contact for Issues - -* Liam Fedus, @liamb315 -* Andrew M. Dai, @a-dai diff --git a/research/maskgan/data/__init__.py b/research/maskgan/data/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/research/maskgan/data/imdb_loader.py b/research/maskgan/data/imdb_loader.py deleted file mode 100644 index 8169b3336..000000000 --- a/research/maskgan/data/imdb_loader.py +++ /dev/null @@ -1,136 +0,0 @@ -# Copyright 2017 The TensorFlow Authors All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -"""IMDB data loader and helpers.""" - -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import os -# Dependency imports -import numpy as np - -import tensorflow as tf - -FLAGS = tf.app.flags.FLAGS -tf.app.flags.DEFINE_boolean('prefix_label', True, 'Vocabulary file.') - -np.set_printoptions(precision=3) -np.set_printoptions(suppress=True) - -EOS_INDEX = 88892 - - -def _read_words(filename, use_prefix=True): - all_words = [] - sequence_example = tf.train.SequenceExample() - for r in tf.python_io.tf_record_iterator(filename): - sequence_example.ParseFromString(r) - - if FLAGS.prefix_label and use_prefix: - label = sequence_example.context.feature['class'].int64_list.value[0] - review_words = [EOS_INDEX + 1 + label] - else: - review_words = [] - review_words.extend([ - f.int64_list.value[0] - for f in sequence_example.feature_lists.feature_list['token_id'].feature - ]) - all_words.append(review_words) - return all_words - - -def build_vocab(vocab_file): - word_to_id = {} - - with tf.gfile.GFile(vocab_file, 'r') as f: - index = 0 - for word in f: - word_to_id[word.strip()] = index - index += 1 - word_to_id[''] = EOS_INDEX - - return word_to_id - - -def imdb_raw_data(data_path=None): - """Load IMDB raw data from data directory "data_path". - Reads IMDB tf record files containing integer ids, - and performs mini-batching of the inputs. - Args: - data_path: string path to the directory where simple-examples.tgz has - been extracted. - Returns: - tuple (train_data, valid_data) - where each of the data objects can be passed to IMDBIterator. - """ - - train_path = os.path.join(data_path, 'train_lm.tfrecords') - valid_path = os.path.join(data_path, 'test_lm.tfrecords') - - train_data = _read_words(train_path) - valid_data = _read_words(valid_path) - return train_data, valid_data - - -def imdb_iterator(raw_data, batch_size, num_steps, epoch_size_override=None): - """Iterate on the raw IMDB data. - - This generates batch_size pointers into the raw IMDB data, and allows - minibatch iteration along these pointers. - - Args: - raw_data: one of the raw data outputs from imdb_raw_data. - batch_size: int, the batch size. - num_steps: int, the number of unrolls. - - Yields: - Pairs of the batched data, each a matrix of shape [batch_size, num_steps]. - The second element of the tuple is the same data time-shifted to the - right by one. The third is a set of weights with 1 indicating a word was - present and 0 not. - - Raises: - ValueError: if batch_size or num_steps are too high. - """ - del epoch_size_override - data_len = len(raw_data) - num_batches = data_len // batch_size - 1 - - for batch in range(num_batches): - x = np.zeros([batch_size, num_steps], dtype=np.int32) - y = np.zeros([batch_size, num_steps], dtype=np.int32) - w = np.zeros([batch_size, num_steps], dtype=np.float) - - for i in range(batch_size): - data_index = batch * batch_size + i - example = raw_data[data_index] - - if len(example) > num_steps: - final_x = example[:num_steps] - final_y = example[1:(num_steps + 1)] - w[i] = 1 - - else: - to_fill_in = num_steps - len(example) - final_x = example + [EOS_INDEX] * to_fill_in - final_y = final_x[1:] + [EOS_INDEX] - w[i] = [1] * len(example) + [0] * to_fill_in - - x[i] = final_x - y[i] = final_y - - yield (x, y, w) diff --git a/research/maskgan/data/ptb_loader.py b/research/maskgan/data/ptb_loader.py deleted file mode 100644 index 43105952a..000000000 --- a/research/maskgan/data/ptb_loader.py +++ /dev/null @@ -1,123 +0,0 @@ -# Copyright 2017 The TensorFlow Authors All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -"""PTB data loader and helpers.""" - -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import collections -import os -# Dependency imports -import numpy as np - -import tensorflow as tf - -EOS_INDEX = 0 - - -def _read_words(filename): - with tf.gfile.GFile(filename, "r") as f: - return f.read().decode("utf-8").replace("\n", "").split() - - -def build_vocab(filename): - data = _read_words(filename) - - counter = collections.Counter(data) - count_pairs = sorted(counter.items(), key=lambda x: (-x[1], x[0])) - - words, _ = list(zip(*count_pairs)) - word_to_id = dict(zip(words, range(len(words)))) - print(":", word_to_id[""]) - global EOS_INDEX - EOS_INDEX = word_to_id[""] - - return word_to_id - - -def _file_to_word_ids(filename, word_to_id): - data = _read_words(filename) - return [word_to_id[word] for word in data if word in word_to_id] - - -def ptb_raw_data(data_path=None): - """Load PTB raw data from data directory "data_path". - Reads PTB text files, converts strings to integer ids, - and performs mini-batching of the inputs. - The PTB dataset comes from Tomas Mikolov's webpage: - http://www.fit.vutbr.cz/~imikolov/rnnlm/simple-examples.tgz - Args: - data_path: string path to the directory where simple-examples.tgz has - been extracted. - Returns: - tuple (train_data, valid_data, test_data, vocabulary) - where each of the data objects can be passed to PTBIterator. - """ - - train_path = os.path.join(data_path, "ptb.train.txt") - valid_path = os.path.join(data_path, "ptb.valid.txt") - test_path = os.path.join(data_path, "ptb.test.txt") - - word_to_id = build_vocab(train_path) - train_data = _file_to_word_ids(train_path, word_to_id) - valid_data = _file_to_word_ids(valid_path, word_to_id) - test_data = _file_to_word_ids(test_path, word_to_id) - vocabulary = len(word_to_id) - return train_data, valid_data, test_data, vocabulary - - -def ptb_iterator(raw_data, batch_size, num_steps, epoch_size_override=None): - """Iterate on the raw PTB data. - - This generates batch_size pointers into the raw PTB data, and allows - minibatch iteration along these pointers. - - Args: - raw_data: one of the raw data outputs from ptb_raw_data. - batch_size: int, the batch size. - num_steps: int, the number of unrolls. - - Yields: - Pairs of the batched data, each a matrix of shape [batch_size, num_steps]. - The second element of the tuple is the same data time-shifted to the - right by one. - - Raises: - ValueError: if batch_size or num_steps are too high. - """ - raw_data = np.array(raw_data, dtype=np.int32) - - data_len = len(raw_data) - batch_len = data_len // batch_size - data = np.full([batch_size, batch_len], EOS_INDEX, dtype=np.int32) - for i in range(batch_size): - data[i] = raw_data[batch_len * i:batch_len * (i + 1)] - - if epoch_size_override: - epoch_size = epoch_size_override - else: - epoch_size = (batch_len - 1) // num_steps - - if epoch_size == 0: - raise ValueError("epoch_size == 0, decrease batch_size or num_steps") - - # print("Number of batches per epoch: %d" % epoch_size) - for i in range(epoch_size): - x = data[:, i * num_steps:(i + 1) * num_steps] - y = data[:, i * num_steps + 1:(i + 1) * num_steps + 1] - w = np.ones_like(x) - yield (x, y, w) diff --git a/research/maskgan/generate_samples.py b/research/maskgan/generate_samples.py deleted file mode 100644 index d4215ebc7..000000000 --- a/research/maskgan/generate_samples.py +++ /dev/null @@ -1,281 +0,0 @@ -# Copyright 2017 The TensorFlow Authors All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -"""Generate samples from the MaskGAN. - -Launch command: - python generate_samples.py - --data_dir=/tmp/data/imdb --data_set=imdb - --batch_size=256 --sequence_length=20 --base_directory=/tmp/imdb - --hparams="gen_rnn_size=650,dis_rnn_size=650,gen_num_layers=2, - gen_vd_keep_prob=1.0" --generator_model=seq2seq_vd - --discriminator_model=seq2seq_vd --is_present_rate=0.5 - --maskgan_ckpt=/tmp/model.ckpt-45494 - --seq2seq_share_embedding=True --dis_share_embedding=True - --attention_option=luong --mask_strategy=contiguous --baseline_method=critic - --number_epochs=4 -""" - -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -from functools import partial -import os -# Dependency imports - -import numpy as np -from six.moves import xrange -import tensorflow as tf - -import train_mask_gan -from data import imdb_loader -from data import ptb_loader - -# Data. -from model_utils import helper -from model_utils import model_utils - -SAMPLE_TRAIN = 'TRAIN' -SAMPLE_VALIDATION = 'VALIDATION' - -## Sample Generation. -## Binary and setup FLAGS. -tf.app.flags.DEFINE_enum('sample_mode', 'TRAIN', - [SAMPLE_TRAIN, SAMPLE_VALIDATION], - 'Dataset to sample from.') -tf.app.flags.DEFINE_string('output_path', '/tmp', 'Model output directory.') -tf.app.flags.DEFINE_boolean( - 'output_masked_logs', False, - 'Whether to display for human evaluation (show masking).') -tf.app.flags.DEFINE_integer('number_epochs', 1, - 'The number of epochs to produce.') - -FLAGS = tf.app.flags.FLAGS - - -def get_iterator(data): - """Return the data iterator.""" - if FLAGS.data_set == 'ptb': - iterator = ptb_loader.ptb_iterator(data, FLAGS.batch_size, - FLAGS.sequence_length, - FLAGS.epoch_size_override) - elif FLAGS.data_set == 'imdb': - iterator = imdb_loader.imdb_iterator(data, FLAGS.batch_size, - FLAGS.sequence_length) - return iterator - - -def convert_to_human_readable(id_to_word, arr, p, max_num_to_print): - """Convert a np.array of indices into words using id_to_word dictionary. - Return max_num_to_print results. - """ - - assert arr.ndim == 2 - - samples = [] - for sequence_id in xrange(min(len(arr), max_num_to_print)): - sample = [] - for i, index in enumerate(arr[sequence_id, :]): - if p[sequence_id, i] == 1: - sample.append(str(id_to_word[index])) - else: - sample.append('*' + str(id_to_word[index])) - buffer_str = ' '.join(sample) - samples.append(buffer_str) - return samples - - -def write_unmasked_log(log, id_to_word, sequence_eval): - """Helper function for logging evaluated sequences without mask.""" - indices_arr = np.asarray(sequence_eval) - samples = helper.convert_to_human_readable(id_to_word, indices_arr, - FLAGS.batch_size) - for sample in samples: - log.write(sample + '\n') - log.flush() - return samples - - -def write_masked_log(log, id_to_word, sequence_eval, present_eval): - indices_arr = np.asarray(sequence_eval) - samples = convert_to_human_readable(id_to_word, indices_arr, present_eval, - FLAGS.batch_size) - for sample in samples: - log.write(sample + '\n') - log.flush() - return samples - - -def generate_logs(sess, model, log, id_to_word, feed): - """Impute Sequences using the model for a particular feed and send it to - logs. - """ - # Impute Sequences. - [p, inputs_eval, sequence_eval] = sess.run( - [model.present, model.inputs, model.fake_sequence], feed_dict=feed) - - # Add the 0th time-step for coherence. - first_token = np.expand_dims(inputs_eval[:, 0], axis=1) - sequence_eval = np.concatenate((first_token, sequence_eval), axis=1) - - # 0th token always present. - p = np.concatenate((np.ones((FLAGS.batch_size, 1)), p), axis=1) - - if FLAGS.output_masked_logs: - samples = write_masked_log(log, id_to_word, sequence_eval, p) - else: - samples = write_unmasked_log(log, id_to_word, sequence_eval) - return samples - - -def generate_samples(hparams, data, id_to_word, log_dir, output_file): - """"Generate samples. - - Args: - hparams: Hyperparameters for the MaskGAN. - data: Data to evaluate. - id_to_word: Dictionary of indices to words. - log_dir: Log directory. - output_file: Output file for the samples. - """ - # Boolean indicating operational mode. - is_training = False - - # Set a random seed to keep fixed mask. - np.random.seed(0) - - with tf.Graph().as_default(): - # Construct the model. - model = train_mask_gan.create_MaskGAN(hparams, is_training) - - ## Retrieve the initial savers. - init_savers = model_utils.retrieve_init_savers(hparams) - - ## Initial saver function to supervisor. - init_fn = partial(model_utils.init_fn, init_savers) - - is_chief = FLAGS.task == 0 - - # Create the supervisor. It will take care of initialization, summaries, - # checkpoints, and recovery. - sv = tf.Supervisor( - logdir=log_dir, - is_chief=is_chief, - saver=model.saver, - global_step=model.global_step, - recovery_wait_secs=30, - summary_op=None, - init_fn=init_fn) - - # Get an initialized, and possibly recovered session. Launch the - # services: Checkpointing, Summaries, step counting. - # - # When multiple replicas of this program are running the services are - # only launched by the 'chief' replica. - with sv.managed_session( - FLAGS.master, start_standard_services=False) as sess: - - # Generator statefulness over the epoch. - [gen_initial_state_eval, fake_gen_initial_state_eval] = sess.run( - [model.eval_initial_state, model.fake_gen_initial_state]) - - for n in xrange(FLAGS.number_epochs): - print('Epoch number: %d' % n) - # print('Percent done: %.2f' % float(n) / float(FLAGS.number_epochs)) - iterator = get_iterator(data) - for x, y, _ in iterator: - if FLAGS.eval_language_model: - is_present_rate = 0. - else: - is_present_rate = FLAGS.is_present_rate - tf.logging.info( - 'Evaluating on is_present_rate=%.3f.' % is_present_rate) - - model_utils.assign_percent_real(sess, model.percent_real_update, - model.new_rate, is_present_rate) - - # Randomly mask out tokens. - p = model_utils.generate_mask() - - eval_feed = {model.inputs: x, model.targets: y, model.present: p} - - if FLAGS.data_set == 'ptb': - # Statefulness for *evaluation* Generator. - for i, (c, h) in enumerate(model.eval_initial_state): - eval_feed[c] = gen_initial_state_eval[i].c - eval_feed[h] = gen_initial_state_eval[i].h - - # Statefulness for the Generator. - for i, (c, h) in enumerate(model.fake_gen_initial_state): - eval_feed[c] = fake_gen_initial_state_eval[i].c - eval_feed[h] = fake_gen_initial_state_eval[i].h - - [gen_initial_state_eval, fake_gen_initial_state_eval, _] = sess.run( - [ - model.eval_final_state, model.fake_gen_final_state, - model.global_step - ], - feed_dict=eval_feed) - - generate_logs(sess, model, output_file, id_to_word, eval_feed) - output_file.close() - print('Closing output_file.') - return - - -def main(_): - hparams = train_mask_gan.create_hparams() - log_dir = FLAGS.base_directory - - tf.gfile.MakeDirs(FLAGS.output_path) - output_file = tf.gfile.GFile( - os.path.join(FLAGS.output_path, 'reviews.txt'), mode='w') - - # Load data set. - if FLAGS.data_set == 'ptb': - raw_data = ptb_loader.ptb_raw_data(FLAGS.data_dir) - train_data, valid_data, _, _ = raw_data - elif FLAGS.data_set == 'imdb': - raw_data = imdb_loader.imdb_raw_data(FLAGS.data_dir) - train_data, valid_data = raw_data - else: - raise NotImplementedError - - # Generating more data on train set. - if FLAGS.sample_mode == SAMPLE_TRAIN: - data_set = train_data - elif FLAGS.sample_mode == SAMPLE_VALIDATION: - data_set = valid_data - else: - raise NotImplementedError - - # Dictionary and reverse dictionry. - if FLAGS.data_set == 'ptb': - word_to_id = ptb_loader.build_vocab( - os.path.join(FLAGS.data_dir, 'ptb.train.txt')) - elif FLAGS.data_set == 'imdb': - word_to_id = imdb_loader.build_vocab( - os.path.join(FLAGS.data_dir, 'vocab.txt')) - id_to_word = {v: k for k, v in word_to_id.iteritems()} - - FLAGS.vocab_size = len(id_to_word) - print('Vocab size: %d' % FLAGS.vocab_size) - - generate_samples(hparams, data_set, id_to_word, log_dir, output_file) - - -if __name__ == '__main__': - tf.app.run() diff --git a/research/maskgan/losses/__init__.py b/research/maskgan/losses/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/research/maskgan/losses/losses.py b/research/maskgan/losses/losses.py deleted file mode 100644 index 38d0e7b4d..000000000 --- a/research/maskgan/losses/losses.py +++ /dev/null @@ -1,186 +0,0 @@ -# Copyright 2017 The TensorFlow Authors All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -"""Losses for Generator and Discriminator.""" - -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import tensorflow as tf - - -def discriminator_loss(predictions, labels, missing_tokens): - """Discriminator loss based on predictions and labels. - - Args: - predictions: Discriminator linear predictions Tensor of shape [batch_size, - sequence_length] - labels: Labels for predictions, Tensor of shape [batch_size, - sequence_length] - missing_tokens: Indicator for the missing tokens. Evaluate the loss only - on the tokens that were missing. - - Returns: - loss: Scalar tf.float32 loss. - - """ - loss = tf.losses.sigmoid_cross_entropy(labels, - predictions, - weights=missing_tokens) - loss = tf.Print( - loss, [loss, labels, missing_tokens], - message='loss, labels, missing_tokens', - summarize=25, - first_n=25) - return loss - - -def cross_entropy_loss_matrix(gen_labels, gen_logits): - """Computes the cross entropy loss for G. - - Args: - gen_labels: Labels for the correct token. - gen_logits: Generator logits. - - Returns: - loss_matrix: Loss matrix of shape [batch_size, sequence_length]. - """ - cross_entropy_loss = tf.nn.sparse_softmax_cross_entropy_with_logits( - labels=gen_labels, logits=gen_logits) - return cross_entropy_loss - - -def GAN_loss_matrix(dis_predictions): - """Computes the cross entropy loss for G. - - Args: - dis_predictions: Discriminator predictions. - - Returns: - loss_matrix: Loss matrix of shape [batch_size, sequence_length]. - """ - eps = tf.constant(1e-7, tf.float32) - gan_loss_matrix = -tf.log(dis_predictions + eps) - return gan_loss_matrix - - -def generator_GAN_loss(predictions): - """Generator GAN loss based on Discriminator predictions.""" - return -tf.log(tf.reduce_mean(predictions)) - - -def generator_blended_forward_loss(gen_logits, gen_labels, dis_predictions, - is_real_input): - """Computes the masked-loss for G. This will be a blend of cross-entropy - loss where the true label is known and GAN loss where the true label has been - masked. - - Args: - gen_logits: Generator logits. - gen_labels: Labels for the correct token. - dis_predictions: Discriminator predictions. - is_real_input: Tensor indicating whether the label is present. - - Returns: - loss: Scalar tf.float32 total loss. - """ - cross_entropy_loss = tf.nn.sparse_softmax_cross_entropy_with_logits( - labels=gen_labels, logits=gen_logits) - gan_loss = -tf.log(dis_predictions) - loss_matrix = tf.where(is_real_input, cross_entropy_loss, gan_loss) - return tf.reduce_mean(loss_matrix) - - -def wasserstein_generator_loss(gen_logits, gen_labels, dis_values, - is_real_input): - """Computes the masked-loss for G. This will be a blend of cross-entropy - loss where the true label is known and GAN loss where the true label is - missing. - - Args: - gen_logits: Generator logits. - gen_labels: Labels for the correct token. - dis_values: Discriminator values Tensor of shape [batch_size, - sequence_length]. - is_real_input: Tensor indicating whether the label is present. - - Returns: - loss: Scalar tf.float32 total loss. - """ - cross_entropy_loss = tf.nn.sparse_softmax_cross_entropy_with_logits( - labels=gen_labels, logits=gen_logits) - # Maximize the dis_values (minimize the negative) - gan_loss = -dis_values - loss_matrix = tf.where(is_real_input, cross_entropy_loss, gan_loss) - loss = tf.reduce_mean(loss_matrix) - return loss - - -def wasserstein_discriminator_loss(real_values, fake_values): - """Wasserstein discriminator loss. - - Args: - real_values: Value given by the Wasserstein Discriminator to real data. - fake_values: Value given by the Wasserstein Discriminator to fake data. - - Returns: - loss: Scalar tf.float32 loss. - - """ - real_avg = tf.reduce_mean(real_values) - fake_avg = tf.reduce_mean(fake_values) - - wasserstein_loss = real_avg - fake_avg - return wasserstein_loss - - -def wasserstein_discriminator_loss_intrabatch(values, is_real_input): - """Wasserstein discriminator loss. This is an odd variant where the value - difference is between the real tokens and the fake tokens within a single - batch. - - Args: - values: Value given by the Wasserstein Discriminator of shape [batch_size, - sequence_length] to an imputed batch (real and fake). - is_real_input: tf.bool Tensor of shape [batch_size, sequence_length]. If - true, it indicates that the label is known. - - Returns: - wasserstein_loss: Scalar tf.float32 loss. - - """ - zero_tensor = tf.constant(0., dtype=tf.float32, shape=[]) - - present = tf.cast(is_real_input, tf.float32) - missing = tf.cast(1 - present, tf.float32) - - # Counts for real and fake tokens. - real_count = tf.reduce_sum(present) - fake_count = tf.reduce_sum(missing) - - # Averages for real and fake token values. - real = tf.mul(values, present) - fake = tf.mul(values, missing) - real_avg = tf.reduce_sum(real) / real_count - fake_avg = tf.reduce_sum(fake) / fake_count - - # If there are no real or fake entries in the batch, we assign an average - # value of zero. - real_avg = tf.where(tf.equal(real_count, 0), zero_tensor, real_avg) - fake_avg = tf.where(tf.equal(fake_count, 0), zero_tensor, fake_avg) - - wasserstein_loss = real_avg - fake_avg - return wasserstein_loss diff --git a/research/maskgan/model_utils/__init__.py b/research/maskgan/model_utils/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/research/maskgan/model_utils/helper.py b/research/maskgan/model_utils/helper.py deleted file mode 100644 index 36115b484..000000000 --- a/research/maskgan/model_utils/helper.py +++ /dev/null @@ -1,158 +0,0 @@ -# Copyright 2017 The TensorFlow Authors All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -"""Random helper functions for converting between indices and one-hot encodings -as well as printing/logging helpers. -""" - -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import numpy as np -from six.moves import xrange -import tensorflow as tf - - -def variable_summaries(var, name): - """Attach a lot of summaries to a Tensor.""" - mean = tf.reduce_mean(var) - tf.summary.scalar('mean/' + name, mean) - with tf.name_scope('stddev'): - stddev = tf.sqrt(tf.reduce_sum(tf.square(var - mean))) - tf.summary.scalar('sttdev/' + name, stddev) - tf.summary.scalar('max/' + name, tf.reduce_max(var)) - tf.summary.scalar('min/' + name, tf.reduce_min(var)) - tf.summary.histogram(name, var) - - -def zip_seq_pred_crossent(id_to_word, sequences, predictions, cross_entropy): - """Zip together the sequences, predictions, cross entropy.""" - indices = convert_to_indices(sequences) - - batch_of_metrics = [] - - for ind_batch, pred_batch, crossent_batch in zip(indices, predictions, - cross_entropy): - metrics = [] - - for index, pred, crossent in zip(ind_batch, pred_batch, crossent_batch): - metrics.append([str(id_to_word[index]), pred, crossent]) - - batch_of_metrics.append(metrics) - return batch_of_metrics - - -def print_and_log(log, id_to_word, sequence_eval, max_num_to_print=5): - """Helper function for printing and logging evaluated sequences.""" - indices_eval = convert_to_indices(sequence_eval) - indices_arr = np.asarray(indices_eval) - samples = convert_to_human_readable(id_to_word, indices_arr, max_num_to_print) - - for i, sample in enumerate(samples): - print('Sample', i, '. ', sample) - log.write('\nSample ' + str(i) + '. ' + sample) - log.write('\n') - print('\n') - log.flush() - - -def convert_to_human_readable(id_to_word, arr, max_num_to_print): - """Convert a np.array of indices into words using id_to_word dictionary. - Return max_num_to_print results. - """ - assert arr.ndim == 2 - - samples = [] - for sequence_id in xrange(min(len(arr), max_num_to_print)): - buffer_str = ' '.join( - [str(id_to_word[index]) for index in arr[sequence_id, :]]) - samples.append(buffer_str) - return samples - - -def index_to_vocab_array(indices, vocab_size, sequence_length): - """Convert the indices into an array with vocab_size one-hot encoding.""" - - # Extract properties of the indices. - num_batches = len(indices) - shape = list(indices.shape) - shape.append(vocab_size) - - # Construct the vocab_size array. - new_arr = np.zeros(shape) - - for n in xrange(num_batches): - indices_batch = indices[n] - new_arr_batch = new_arr[n] - - # We map all indices greater than the vocabulary size to an unknown - # character. - indices_batch = np.where(indices_batch < vocab_size, indices_batch, - vocab_size - 1) - - # Convert indices to vocab_size dimensions. - new_arr_batch[np.arange(sequence_length), indices_batch] = 1 - return new_arr - - -def convert_to_indices(sequences): - """Convert a list of size [batch_size, sequence_length, vocab_size] to - a list of size [batch_size, sequence_length] where the vocab element is - denoted by the index. - """ - batch_of_indices = [] - - for sequence in sequences: - indices = [] - for embedding in sequence: - indices.append(np.argmax(embedding)) - batch_of_indices.append(indices) - return batch_of_indices - - -def convert_and_zip(id_to_word, sequences, predictions): - """Helper function for printing or logging. Retrieves list of sequences - and predictions and zips them together. - """ - indices = convert_to_indices(sequences) - - batch_of_indices_predictions = [] - - for index_batch, pred_batch in zip(indices, predictions): - indices_predictions = [] - - for index, pred in zip(index_batch, pred_batch): - indices_predictions.append([str(id_to_word[index]), pred]) - batch_of_indices_predictions.append(indices_predictions) - return batch_of_indices_predictions - - -def recursive_length(item): - """Recursively determine the total number of elements in nested list.""" - if type(item) == list: - return sum(recursive_length(subitem) for subitem in item) - else: - return 1. - - -def percent_correct(real_sequence, fake_sequences): - """Determine the percent of tokens correctly generated within a batch.""" - identical = 0. - for fake_sequence in fake_sequences: - for real, fake in zip(real_sequence, fake_sequence): - if real == fake: - identical += 1. - return identical / recursive_length(fake_sequences) diff --git a/research/maskgan/model_utils/model_construction.py b/research/maskgan/model_utils/model_construction.py deleted file mode 100644 index 8dfa1df34..000000000 --- a/research/maskgan/model_utils/model_construction.py +++ /dev/null @@ -1,234 +0,0 @@ -# Copyright 2017 The TensorFlow Authors All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -"""Model construction.""" - -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -# Dependency imports - -import tensorflow as tf -from models import bidirectional -from models import bidirectional_vd - -from models import bidirectional_zaremba -from models import cnn -from models import critic_vd -from models import feedforward -from models import rnn -from models import rnn_nas -from models import rnn_vd -from models import rnn_zaremba -from models import seq2seq -from models import seq2seq_nas -from models import seq2seq_vd -from models import seq2seq_zaremba - -FLAGS = tf.app.flags.FLAGS - - -# TODO(adai): IMDB labels placeholder to model. -def create_generator(hparams, - inputs, - targets, - present, - is_training, - is_validating, - reuse=None): - """Create the Generator model specified by the FLAGS and hparams. - - Args; - hparams: Hyperparameters for the MaskGAN. - inputs: tf.int32 Tensor of the sequence input of shape [batch_size, - sequence_length]. - present: tf.bool Tensor indicating the presence or absence of the token - of shape [batch_size, sequence_length]. - is_training: Whether the model is training. - is_validating: Whether the model is being run in validation mode for - calculating the perplexity. - reuse (Optional): Whether to reuse the model. - - Returns: - Tuple of the (sequence, logits, log_probs) of the Generator. Sequence - and logits have shape [batch_size, sequence_length, vocab_size]. The - log_probs will have shape [batch_size, sequence_length]. Log_probs - corresponds to the log probability of selecting the words. - """ - if FLAGS.generator_model == 'rnn': - (sequence, logits, log_probs, initial_state, final_state) = rnn.generator( - hparams, - inputs, - targets, - present, - is_training=is_training, - is_validating=is_validating, - reuse=reuse) - elif FLAGS.generator_model == 'rnn_zaremba': - (sequence, logits, log_probs, initial_state, - final_state) = rnn_zaremba.generator( - hparams, - inputs, - targets, - present, - is_training=is_training, - is_validating=is_validating, - reuse=reuse) - elif FLAGS.generator_model == 'seq2seq': - (sequence, logits, log_probs, initial_state, - final_state) = seq2seq.generator( - hparams, - inputs, - targets, - present, - is_training=is_training, - is_validating=is_validating, - reuse=reuse) - elif FLAGS.generator_model == 'seq2seq_zaremba': - (sequence, logits, log_probs, initial_state, - final_state) = seq2seq_zaremba.generator( - hparams, - inputs, - targets, - present, - is_training=is_training, - is_validating=is_validating, - reuse=reuse) - elif FLAGS.generator_model == 'rnn_nas': - (sequence, logits, log_probs, initial_state, - final_state) = rnn_nas.generator( - hparams, - inputs, - targets, - present, - is_training=is_training, - is_validating=is_validating, - reuse=reuse) - elif FLAGS.generator_model == 'seq2seq_nas': - (sequence, logits, log_probs, initial_state, - final_state) = seq2seq_nas.generator( - hparams, - inputs, - targets, - present, - is_training=is_training, - is_validating=is_validating, - reuse=reuse) - elif FLAGS.generator_model == 'seq2seq_vd': - (sequence, logits, log_probs, initial_state, final_state, - encoder_states) = seq2seq_vd.generator( - hparams, - inputs, - targets, - present, - is_training=is_training, - is_validating=is_validating, - reuse=reuse) - else: - raise NotImplementedError - return (sequence, logits, log_probs, initial_state, final_state, - encoder_states) - - -def create_discriminator(hparams, - sequence, - is_training, - reuse=None, - initial_state=None, - inputs=None, - present=None): - """Create the Discriminator model specified by the FLAGS and hparams. - - Args: - hparams: Hyperparameters for the MaskGAN. - sequence: tf.int32 Tensor sequence of shape [batch_size, sequence_length] - is_training: Whether the model is training. - reuse (Optional): Whether to reuse the model. - - Returns: - predictions: tf.float32 Tensor of predictions of shape [batch_size, - sequence_length] - """ - if FLAGS.discriminator_model == 'cnn': - predictions = cnn.discriminator( - hparams, sequence, is_training=is_training, reuse=reuse) - elif FLAGS.discriminator_model == 'fnn': - predictions = feedforward.discriminator( - hparams, sequence, is_training=is_training, reuse=reuse) - elif FLAGS.discriminator_model == 'rnn': - predictions = rnn.discriminator( - hparams, sequence, is_training=is_training, reuse=reuse) - elif FLAGS.discriminator_model == 'bidirectional': - predictions = bidirectional.discriminator( - hparams, sequence, is_training=is_training, reuse=reuse) - elif FLAGS.discriminator_model == 'bidirectional_zaremba': - predictions = bidirectional_zaremba.discriminator( - hparams, sequence, is_training=is_training, reuse=reuse) - elif FLAGS.discriminator_model == 'seq2seq_vd': - predictions = seq2seq_vd.discriminator( - hparams, - inputs, - present, - sequence, - is_training=is_training, - reuse=reuse) - elif FLAGS.discriminator_model == 'rnn_zaremba': - predictions = rnn_zaremba.discriminator( - hparams, sequence, is_training=is_training, reuse=reuse) - elif FLAGS.discriminator_model == 'rnn_nas': - predictions = rnn_nas.discriminator( - hparams, sequence, is_training=is_training, reuse=reuse) - elif FLAGS.discriminator_model == 'rnn_vd': - predictions = rnn_vd.discriminator( - hparams, - sequence, - is_training=is_training, - reuse=reuse, - initial_state=initial_state) - elif FLAGS.discriminator_model == 'bidirectional_vd': - predictions = bidirectional_vd.discriminator( - hparams, - sequence, - is_training=is_training, - reuse=reuse, - initial_state=initial_state) - else: - raise NotImplementedError - return predictions - - -def create_critic(hparams, sequence, is_training, reuse=None): - """Create the Critic model specified by the FLAGS and hparams. - - Args: - hparams: Hyperparameters for the MaskGAN. - sequence: tf.int32 Tensor sequence of shape [batch_size, sequence_length] - is_training: Whether the model is training. - reuse (Optional): Whether to reuse the model. - - Returns: - values: tf.float32 Tensor of predictions of shape [batch_size, - sequence_length] - """ - if FLAGS.baseline_method == 'critic': - if FLAGS.discriminator_model == 'seq2seq_vd': - values = critic_vd.critic_seq2seq_vd_derivative( - hparams, sequence, is_training, reuse=reuse) - else: - raise NotImplementedError - else: - raise NotImplementedError - return values diff --git a/research/maskgan/model_utils/model_losses.py b/research/maskgan/model_utils/model_losses.py deleted file mode 100644 index c8f337dc4..000000000 --- a/research/maskgan/model_utils/model_losses.py +++ /dev/null @@ -1,327 +0,0 @@ -# Copyright 2017 The TensorFlow Authors All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -"""Model loss construction.""" - -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -# Dependency imports -import numpy as np -from six.moves import xrange -import tensorflow as tf - -# Useful for REINFORCE baseline. -from losses import losses - -FLAGS = tf.app.flags.FLAGS - - -def create_dis_loss(fake_predictions, real_predictions, targets_present): - """Compute Discriminator loss across real/fake.""" - - missing = tf.cast(targets_present, tf.int32) - missing = 1 - missing - missing = tf.cast(missing, tf.bool) - - real_labels = tf.ones([FLAGS.batch_size, FLAGS.sequence_length]) - dis_loss_real = tf.losses.sigmoid_cross_entropy( - real_labels, real_predictions, weights=missing) - dis_loss_fake = tf.losses.sigmoid_cross_entropy( - targets_present, fake_predictions, weights=missing) - - dis_loss = (dis_loss_fake + dis_loss_real) / 2. - return dis_loss, dis_loss_fake, dis_loss_real - - -def create_critic_loss(cumulative_rewards, estimated_values, present): - """Compute Critic loss in estimating the value function. This should be an - estimate only for the missing elements.""" - missing = tf.cast(present, tf.int32) - missing = 1 - missing - missing = tf.cast(missing, tf.bool) - - loss = tf.losses.mean_squared_error( - labels=cumulative_rewards, predictions=estimated_values, weights=missing) - return loss - - -def create_masked_cross_entropy_loss(targets, present, logits): - """Calculate the cross entropy loss matrices for the masked tokens.""" - cross_entropy_losses = losses.cross_entropy_loss_matrix(targets, logits) - - # Zeros matrix. - zeros_losses = tf.zeros( - shape=[FLAGS.batch_size, FLAGS.sequence_length], dtype=tf.float32) - - missing_ce_loss = tf.where(present, zeros_losses, cross_entropy_losses) - - return missing_ce_loss - - -def calculate_reinforce_objective(hparams, - log_probs, - dis_predictions, - present, - estimated_values=None): - """Calculate the REINFORCE objectives. The REINFORCE objective should - only be on the tokens that were missing. Specifically, the final Generator - reward should be based on the Discriminator predictions on missing tokens. - The log probaibilities should be only for missing tokens and the baseline - should be calculated only on the missing tokens. - - For this model, we optimize the reward is the log of the *conditional* - probability the Discriminator assigns to the distribution. Specifically, for - a Discriminator D which outputs probability of real, given the past context, - - r_t = log D(x_t|x_0,x_1,...x_{t-1}) - - And the policy for Generator G is the log-probability of taking action x2 - given the past context. - - - Args: - hparams: MaskGAN hyperparameters. - log_probs: tf.float32 Tensor of log probailities of the tokens selected by - the Generator. Shape [batch_size, sequence_length]. - dis_predictions: tf.float32 Tensor of the predictions from the - Discriminator. Shape [batch_size, sequence_length]. - present: tf.bool Tensor indicating which tokens are present. Shape - [batch_size, sequence_length]. - estimated_values: tf.float32 Tensor of estimated state values of tokens. - Shape [batch_size, sequence_length] - - Returns: - final_gen_objective: Final REINFORCE objective for the sequence. - rewards: tf.float32 Tensor of rewards for sequence of shape [batch_size, - sequence_length] - advantages: tf.float32 Tensor of advantages for sequence of shape - [batch_size, sequence_length] - baselines: tf.float32 Tensor of baselines for sequence of shape - [batch_size, sequence_length] - maintain_averages_op: ExponentialMovingAverage apply average op to - maintain the baseline. - """ - # Final Generator objective. - final_gen_objective = 0. - gamma = hparams.rl_discount_rate - eps = 1e-7 - - # Generator rewards are log-probabilities. - eps = tf.constant(1e-7, tf.float32) - dis_predictions = tf.nn.sigmoid(dis_predictions) - rewards = tf.log(dis_predictions + eps) - - # Apply only for missing elements. - zeros = tf.zeros_like(present, dtype=tf.float32) - log_probs = tf.where(present, zeros, log_probs) - rewards = tf.where(present, zeros, rewards) - - # Unstack Tensors into lists. - rewards_list = tf.unstack(rewards, axis=1) - log_probs_list = tf.unstack(log_probs, axis=1) - missing = 1. - tf.cast(present, tf.float32) - missing_list = tf.unstack(missing, axis=1) - - # Cumulative Discounted Returns. The true value function V*(s). - cumulative_rewards = [] - for t in xrange(FLAGS.sequence_length): - cum_value = tf.zeros(shape=[FLAGS.batch_size]) - for s in xrange(t, FLAGS.sequence_length): - cum_value += missing_list[s] * np.power(gamma, (s - t)) * rewards_list[s] - cumulative_rewards.append(cum_value) - cumulative_rewards = tf.stack(cumulative_rewards, axis=1) - - ## REINFORCE with different baselines. - # We create a separate critic functionality for the Discriminator. This - # will need to operate unidirectionally and it may take in the past context. - if FLAGS.baseline_method == 'critic': - - # Critic loss calculated from the estimated value function \hat{V}(s) - # versus the true value function V*(s). - critic_loss = create_critic_loss(cumulative_rewards, estimated_values, - present) - - # Baselines are coming from the critic's estimated state values. - baselines = tf.unstack(estimated_values, axis=1) - - ## Calculate the Advantages, A(s,a) = Q(s,a) - \hat{V}(s). - advantages = [] - for t in xrange(FLAGS.sequence_length): - log_probability = log_probs_list[t] - cum_advantage = tf.zeros(shape=[FLAGS.batch_size]) - - for s in xrange(t, FLAGS.sequence_length): - cum_advantage += missing_list[s] * np.power(gamma, - (s - t)) * rewards_list[s] - cum_advantage -= baselines[t] - # Clip advantages. - cum_advantage = tf.clip_by_value(cum_advantage, -FLAGS.advantage_clipping, - FLAGS.advantage_clipping) - advantages.append(missing_list[t] * cum_advantage) - final_gen_objective += tf.multiply( - log_probability, missing_list[t] * tf.stop_gradient(cum_advantage)) - - maintain_averages_op = None - baselines = tf.stack(baselines, axis=1) - advantages = tf.stack(advantages, axis=1) - - # Split the batch into half. Use half for MC estimates for REINFORCE. - # Use the other half to establish a baseline. - elif FLAGS.baseline_method == 'dis_batch': - # TODO(liamfedus): Recheck. - [rewards_half, baseline_half] = tf.split( - rewards, num_or_size_splits=2, axis=0) - [log_probs_half, _] = tf.split(log_probs, num_or_size_splits=2, axis=0) - [reward_present_half, baseline_present_half] = tf.split( - present, num_or_size_splits=2, axis=0) - - # Unstack to lists. - baseline_list = tf.unstack(baseline_half, axis=1) - baseline_missing = 1. - tf.cast(baseline_present_half, tf.float32) - baseline_missing_list = tf.unstack(baseline_missing, axis=1) - - baselines = [] - for t in xrange(FLAGS.sequence_length): - # Calculate baseline only for missing tokens. - num_missing = tf.reduce_sum(baseline_missing_list[t]) - - avg_baseline = tf.reduce_sum( - baseline_missing_list[t] * baseline_list[t], keep_dims=True) / ( - num_missing + eps) - baseline = tf.tile(avg_baseline, multiples=[FLAGS.batch_size / 2]) - baselines.append(baseline) - - # Unstack to lists. - rewards_list = tf.unstack(rewards_half, axis=1) - log_probs_list = tf.unstack(log_probs_half, axis=1) - reward_missing = 1. - tf.cast(reward_present_half, tf.float32) - reward_missing_list = tf.unstack(reward_missing, axis=1) - - ## Calculate the Advantages, A(s,a) = Q(s,a) - \hat{V}(s). - advantages = [] - for t in xrange(FLAGS.sequence_length): - log_probability = log_probs_list[t] - cum_advantage = tf.zeros(shape=[FLAGS.batch_size / 2]) - - for s in xrange(t, FLAGS.sequence_length): - cum_advantage += reward_missing_list[s] * np.power(gamma, (s - t)) * ( - rewards_list[s] - baselines[s]) - # Clip advantages. - cum_advantage = tf.clip_by_value(cum_advantage, -FLAGS.advantage_clipping, - FLAGS.advantage_clipping) - advantages.append(reward_missing_list[t] * cum_advantage) - final_gen_objective += tf.multiply( - log_probability, - reward_missing_list[t] * tf.stop_gradient(cum_advantage)) - - # Cumulative Discounted Returns. The true value function V*(s). - cumulative_rewards = [] - for t in xrange(FLAGS.sequence_length): - cum_value = tf.zeros(shape=[FLAGS.batch_size / 2]) - for s in xrange(t, FLAGS.sequence_length): - cum_value += reward_missing_list[s] * np.power(gamma, ( - s - t)) * rewards_list[s] - cumulative_rewards.append(cum_value) - cumulative_rewards = tf.stack(cumulative_rewards, axis=1) - - rewards = rewards_half - critic_loss = None - maintain_averages_op = None - baselines = tf.stack(baselines, axis=1) - advantages = tf.stack(advantages, axis=1) - - # Exponential Moving Average baseline. - elif FLAGS.baseline_method == 'ema': - # TODO(liamfedus): Recheck. - # Lists of rewards and Log probabilities of the actions taken only for - # missing tokens. - ema = tf.train.ExponentialMovingAverage(decay=hparams.baseline_decay) - maintain_averages_op = ema.apply(rewards_list) - - baselines = [] - for r in rewards_list: - baselines.append(ema.average(r)) - - ## Calculate the Advantages, A(s,a) = Q(s,a) - \hat{V}(s). - advantages = [] - for t in xrange(FLAGS.sequence_length): - log_probability = log_probs_list[t] - - # Calculate the forward advantage only on the missing tokens. - cum_advantage = tf.zeros(shape=[FLAGS.batch_size]) - for s in xrange(t, FLAGS.sequence_length): - cum_advantage += missing_list[s] * np.power(gamma, (s - t)) * ( - rewards_list[s] - baselines[s]) - # Clip advantages. - cum_advantage = tf.clip_by_value(cum_advantage, -FLAGS.advantage_clipping, - FLAGS.advantage_clipping) - advantages.append(missing_list[t] * cum_advantage) - final_gen_objective += tf.multiply( - log_probability, missing_list[t] * tf.stop_gradient(cum_advantage)) - - critic_loss = None - baselines = tf.stack(baselines, axis=1) - advantages = tf.stack(advantages, axis=1) - - elif FLAGS.baseline_method is None: - num_missing = tf.reduce_sum(missing) - final_gen_objective += tf.reduce_sum(rewards) / (num_missing + eps) - baselines = tf.zeros_like(rewards) - critic_loss = None - maintain_averages_op = None - advantages = cumulative_rewards - - else: - raise NotImplementedError - - return [ - final_gen_objective, log_probs, rewards, advantages, baselines, - maintain_averages_op, critic_loss, cumulative_rewards - ] - - -def calculate_log_perplexity(logits, targets, present): - """Calculate the average log perplexity per *missing* token. - - Args: - logits: tf.float32 Tensor of the logits of shape [batch_size, - sequence_length, vocab_size]. - targets: tf.int32 Tensor of the sequence target of shape [batch_size, - sequence_length]. - present: tf.bool Tensor indicating the presence or absence of the token - of shape [batch_size, sequence_length]. - - Returns: - avg_log_perplexity: Scalar indicating the average log perplexity per - missing token in the batch. - """ - # logits = tf.Print(logits, [logits], message='logits:', summarize=50) - # targets = tf.Print(targets, [targets], message='targets:', summarize=50) - eps = 1e-12 - logits = tf.reshape(logits, [-1, FLAGS.vocab_size]) - - # Only calculate log-perplexity on missing tokens. - weights = tf.cast(present, tf.float32) - weights = 1. - weights - weights = tf.reshape(weights, [-1]) - num_missing = tf.reduce_sum(weights) - - log_perplexity = tf.contrib.legacy_seq2seq.sequence_loss_by_example( - [logits], [tf.reshape(targets, [-1])], [weights]) - - avg_log_perplexity = tf.reduce_sum(log_perplexity) / (num_missing + eps) - return avg_log_perplexity diff --git a/research/maskgan/model_utils/model_optimization.py b/research/maskgan/model_utils/model_optimization.py deleted file mode 100644 index caae271fe..000000000 --- a/research/maskgan/model_utils/model_optimization.py +++ /dev/null @@ -1,194 +0,0 @@ -# Copyright 2017 The TensorFlow Authors All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -"""Model optimization.""" - -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -# Dependency imports - -import tensorflow as tf - -FLAGS = tf.app.flags.FLAGS - - -def create_dis_pretrain_op(hparams, dis_loss, global_step): - """Create a train op for pretraining.""" - with tf.name_scope('pretrain_generator'): - optimizer = tf.train.AdamOptimizer(hparams.dis_pretrain_learning_rate) - dis_vars = [ - v for v in tf.trainable_variables() if v.op.name.startswith('dis') - ] - if FLAGS.dis_update_share_embedding and FLAGS.dis_share_embedding: - shared_embedding = [ - v for v in tf.trainable_variables() - if v.op.name == 'gen/decoder/rnn/embedding' - ][0] - dis_vars.append(shared_embedding) - dis_grads = tf.gradients(dis_loss, dis_vars) - dis_grads_clipped, _ = tf.clip_by_global_norm(dis_grads, - FLAGS.grad_clipping) - dis_pretrain_op = optimizer.apply_gradients( - zip(dis_grads_clipped, dis_vars), global_step=global_step) - return dis_pretrain_op - - -def create_gen_pretrain_op(hparams, cross_entropy_loss, global_step): - """Create a train op for pretraining.""" - with tf.name_scope('pretrain_generator'): - optimizer = tf.train.AdamOptimizer(hparams.gen_pretrain_learning_rate) - gen_vars = [ - v for v in tf.trainable_variables() if v.op.name.startswith('gen') - ] - gen_grads = tf.gradients(cross_entropy_loss, gen_vars) - gen_grads_clipped, _ = tf.clip_by_global_norm(gen_grads, - FLAGS.grad_clipping) - gen_pretrain_op = optimizer.apply_gradients( - zip(gen_grads_clipped, gen_vars), global_step=global_step) - return gen_pretrain_op - - -def create_gen_train_op(hparams, learning_rate, gen_loss, global_step, mode): - """Create Generator train op.""" - del hparams - with tf.name_scope('train_generator'): - if FLAGS.generator_optimizer == 'sgd': - gen_optimizer = tf.train.GradientDescentOptimizer(learning_rate) - elif FLAGS.generator_optimizer == 'adam': - gen_optimizer = tf.train.AdamOptimizer(learning_rate) - else: - raise NotImplementedError - gen_vars = [ - v for v in tf.trainable_variables() if v.op.name.startswith('gen') - ] - print('Optimizing Generator vars.') - for v in gen_vars: - print(v) - if mode == 'MINIMIZE': - gen_grads = tf.gradients(gen_loss, gen_vars) - elif mode == 'MAXIMIZE': - gen_grads = tf.gradients(-gen_loss, gen_vars) - else: - raise ValueError("Must be one of 'MINIMIZE' or 'MAXIMIZE'") - gen_grads_clipped, _ = tf.clip_by_global_norm(gen_grads, - FLAGS.grad_clipping) - gen_train_op = gen_optimizer.apply_gradients( - zip(gen_grads_clipped, gen_vars), global_step=global_step) - return gen_train_op, gen_grads_clipped, gen_vars - - -def create_reinforce_gen_train_op(hparams, learning_rate, final_gen_reward, - averages_op, global_step): - """Create the Generator train_op when using REINFORCE. - - Args: - hparams: MaskGAN hyperparameters. - learning_rate: tf.Variable scalar learning rate. - final_gen_objective: Scalar final REINFORCE objective for the sequence. - averages_op: ExponentialMovingAverage apply average op to - maintain the baseline. - global_step: global_step tf.Variable. - - Returns: - gen_train_op: Generator training op. - """ - del hparams - with tf.name_scope('train_generator'): - if FLAGS.generator_optimizer == 'sgd': - gen_optimizer = tf.train.GradientDescentOptimizer(learning_rate) - elif FLAGS.generator_optimizer == 'adam': - gen_optimizer = tf.train.AdamOptimizer(learning_rate) - else: - raise NotImplementedError - gen_vars = [ - v for v in tf.trainable_variables() if v.op.name.startswith('gen') - ] - print('\nOptimizing Generator vars:') - for v in gen_vars: - print(v) - - # Maximize reward. - gen_grads = tf.gradients(-final_gen_reward, gen_vars) - gen_grads_clipped, _ = tf.clip_by_global_norm(gen_grads, - FLAGS.grad_clipping) - maximize_op = gen_optimizer.apply_gradients( - zip(gen_grads_clipped, gen_vars), global_step=global_step) - - # Group maintain averages op. - if averages_op: - gen_train_op = tf.group(maximize_op, averages_op) - else: - gen_train_op = maximize_op - - return [gen_train_op, gen_grads, gen_vars] - - -def create_dis_train_op(hparams, dis_loss, global_step): - """Create Discriminator train op.""" - with tf.name_scope('train_discriminator'): - dis_optimizer = tf.train.AdamOptimizer(hparams.dis_learning_rate) - dis_vars = [ - v for v in tf.trainable_variables() if v.op.name.startswith('dis') - ] - if FLAGS.dis_update_share_embedding and FLAGS.dis_share_embedding: - shared_embedding = [ - v for v in tf.trainable_variables() - if v.op.name == 'gen/decoder/rnn/embedding' - ][0] - dis_vars.append(shared_embedding) - print('\nOptimizing Discriminator vars:') - for v in dis_vars: - print(v) - dis_grads = tf.gradients(dis_loss, dis_vars) - dis_grads_clipped, _ = tf.clip_by_global_norm(dis_grads, - FLAGS.grad_clipping) - dis_train_op = dis_optimizer.apply_gradients( - zip(dis_grads_clipped, dis_vars), global_step=global_step) - return dis_train_op, dis_grads_clipped, dis_vars - - -def create_critic_train_op(hparams, critic_loss, global_step): - """Create Discriminator train op.""" - with tf.name_scope('train_critic'): - critic_optimizer = tf.train.AdamOptimizer(hparams.critic_learning_rate) - output_vars = [ - v for v in tf.trainable_variables() if v.op.name.startswith('critic') - ] - - if FLAGS.critic_update_dis_vars: - if FLAGS.discriminator_model == 'bidirectional_vd': - critic_vars = [ - v for v in tf.trainable_variables() - if v.op.name.startswith('dis/rnn') - ] - elif FLAGS.discriminator_model == 'seq2seq_vd': - critic_vars = [ - v for v in tf.trainable_variables() - if v.op.name.startswith('dis/decoder/rnn/multi_rnn_cell') - ] - critic_vars.extend(output_vars) - else: - critic_vars = output_vars - print('\nOptimizing Critic vars:') - for v in critic_vars: - print(v) - critic_grads = tf.gradients(critic_loss, critic_vars) - critic_grads_clipped, _ = tf.clip_by_global_norm(critic_grads, - FLAGS.grad_clipping) - critic_train_op = critic_optimizer.apply_gradients( - zip(critic_grads_clipped, critic_vars), global_step=global_step) - return critic_train_op, critic_grads_clipped, critic_vars diff --git a/research/maskgan/model_utils/model_utils.py b/research/maskgan/model_utils/model_utils.py deleted file mode 100644 index 0e3183582..000000000 --- a/research/maskgan/model_utils/model_utils.py +++ /dev/null @@ -1,291 +0,0 @@ -# Copyright 2017 The TensorFlow Authors All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -"""Model utilities.""" - -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -# Dependency imports -import numpy as np - -import tensorflow as tf -from model_utils import variable_mapping - -FLAGS = tf.app.flags.FLAGS - - -def generate_mask(): - """Generate the mask to be fed into the model.""" - if FLAGS.mask_strategy == 'random': - p = np.random.choice( - [True, False], - size=[FLAGS.batch_size, FLAGS.sequence_length], - p=[FLAGS.is_present_rate, 1. - FLAGS.is_present_rate]) - - elif FLAGS.mask_strategy == 'contiguous': - masked_length = int((1 - FLAGS.is_present_rate) * FLAGS.sequence_length) - 1 - # Determine location to start masking. - start_mask = np.random.randint( - 1, FLAGS.sequence_length - masked_length + 1, size=FLAGS.batch_size) - p = np.full([FLAGS.batch_size, FLAGS.sequence_length], True, dtype=bool) - - # Create contiguous masked section to be False. - for i, index in enumerate(start_mask): - p[i, index:index + masked_length] = False - - else: - raise NotImplementedError - - return p - - -def assign_percent_real(session, percent_real_update, new_rate, current_rate): - """Run assign operation where the we load the current_rate of percent - real into a Tensorflow variable. - - Args: - session: Current tf.Session. - percent_real_update: tf.assign operation. - new_rate: tf.placeholder for the new rate. - current_rate: Percent of tokens that are currently real. Fake tokens - are the ones being imputed by the Generator. - """ - session.run(percent_real_update, feed_dict={new_rate: current_rate}) - - -def assign_learning_rate(session, lr_update, lr_placeholder, new_lr): - """Run assign operation where the we load the current_rate of percent - real into a Tensorflow variable. - - Args: - session: Current tf.Session. - lr_update: tf.assign operation. - lr_placeholder: tf.placeholder for the new learning rate. - new_lr: New learning rate to use. - """ - session.run(lr_update, feed_dict={lr_placeholder: new_lr}) - - -def clip_weights(variables, c_lower, c_upper): - """Clip a list of weights to be within a certain range. - - Args: - variables: List of tf.Variable weights. - c_lower: Lower bound for weights. - c_upper: Upper bound for weights. - """ - clip_ops = [] - - for var in variables: - clipped_var = tf.clip_by_value(var, c_lower, c_upper) - - clip_ops.append(tf.assign(var, clipped_var)) - return tf.group(*clip_ops) - - -def retrieve_init_savers(hparams): - """Retrieve a dictionary of all the initial savers for the models. - - Args: - hparams: MaskGAN hyperparameters. - """ - ## Dictionary of init savers. - init_savers = {} - - ## Load Generator weights from MaskGAN checkpoint. - if FLAGS.maskgan_ckpt: - gen_vars = [ - v for v in tf.trainable_variables() if v.op.name.startswith('gen') - ] - init_saver = tf.train.Saver(var_list=gen_vars) - init_savers['init_saver'] = init_saver - - ## Load the Discriminator weights from the MaskGAN checkpoint if - # the weights are compatible. - if FLAGS.discriminator_model == 'seq2seq_vd': - dis_variable_maps = variable_mapping.dis_seq2seq_vd(hparams) - dis_init_saver = tf.train.Saver(var_list=dis_variable_maps) - init_savers['dis_init_saver'] = dis_init_saver - - ## Load weights from language model checkpoint. - if FLAGS.language_model_ckpt_dir: - if FLAGS.maskgan_ckpt is None: - ## Generator Variables/Savers. - if FLAGS.generator_model == 'rnn_nas': - gen_variable_maps = variable_mapping.rnn_nas(hparams, model='gen') - gen_init_saver = tf.train.Saver(var_list=gen_variable_maps) - init_savers['gen_init_saver'] = gen_init_saver - - elif FLAGS.generator_model == 'seq2seq_nas': - # Encoder. - gen_encoder_variable_maps = variable_mapping.gen_encoder_seq2seq_nas( - hparams) - gen_encoder_init_saver = tf.train.Saver( - var_list=gen_encoder_variable_maps) - # Decoder. - gen_decoder_variable_maps = variable_mapping.gen_decoder_seq2seq_nas( - hparams) - gen_decoder_init_saver = tf.train.Saver( - var_list=gen_decoder_variable_maps) - init_savers['gen_encoder_init_saver'] = gen_encoder_init_saver - init_savers['gen_decoder_init_saver'] = gen_decoder_init_saver - - # seq2seq_vd derived from the same code base as seq2seq_zaremba. - elif (FLAGS.generator_model == 'seq2seq_zaremba' or - FLAGS.generator_model == 'seq2seq_vd'): - # Encoder. - gen_encoder_variable_maps = variable_mapping.gen_encoder_seq2seq( - hparams) - gen_encoder_init_saver = tf.train.Saver( - var_list=gen_encoder_variable_maps) - # Decoder. - gen_decoder_variable_maps = variable_mapping.gen_decoder_seq2seq( - hparams) - gen_decoder_init_saver = tf.train.Saver( - var_list=gen_decoder_variable_maps) - init_savers['gen_encoder_init_saver'] = gen_encoder_init_saver - init_savers['gen_decoder_init_saver'] = gen_decoder_init_saver - - else: - raise NotImplementedError - - ## Discriminator Variables/Savers. - if FLAGS.discriminator_model == 'rnn_nas': - dis_variable_maps = variable_mapping.rnn_nas(hparams, model='dis') - dis_init_saver = tf.train.Saver(var_list=dis_variable_maps) - init_savers['dis_init_saver'] = dis_init_saver - - # rnn_vd derived from the same code base as rnn_zaremba. - elif (FLAGS.discriminator_model == 'rnn_zaremba' or - FLAGS.discriminator_model == 'rnn_vd'): - dis_variable_maps = variable_mapping.rnn_zaremba(hparams, model='dis') - dis_init_saver = tf.train.Saver(var_list=dis_variable_maps) - init_savers['dis_init_saver'] = dis_init_saver - - elif (FLAGS.discriminator_model == 'bidirectional_zaremba' or - FLAGS.discriminator_model == 'bidirectional_vd'): - dis_fwd_variable_maps = variable_mapping.dis_fwd_bidirectional(hparams) - dis_bwd_variable_maps = variable_mapping.dis_bwd_bidirectional(hparams) - # Savers for the forward/backward Discriminator components. - dis_fwd_init_saver = tf.train.Saver(var_list=dis_fwd_variable_maps) - dis_bwd_init_saver = tf.train.Saver(var_list=dis_bwd_variable_maps) - init_savers['dis_fwd_init_saver'] = dis_fwd_init_saver - init_savers['dis_bwd_init_saver'] = dis_bwd_init_saver - - elif FLAGS.discriminator_model == 'cnn': - dis_variable_maps = variable_mapping.cnn() - dis_init_saver = tf.train.Saver(var_list=dis_variable_maps) - init_savers['dis_init_saver'] = dis_init_saver - - elif FLAGS.discriminator_model == 'seq2seq_vd': - # Encoder. - dis_encoder_variable_maps = variable_mapping.dis_encoder_seq2seq(hparams) - dis_encoder_init_saver = tf.train.Saver( - var_list=dis_encoder_variable_maps) - # Decoder. - dis_decoder_variable_maps = variable_mapping.dis_decoder_seq2seq(hparams) - dis_decoder_init_saver = tf.train.Saver( - var_list=dis_decoder_variable_maps) - init_savers['dis_encoder_init_saver'] = dis_encoder_init_saver - init_savers['dis_decoder_init_saver'] = dis_decoder_init_saver - - return init_savers - - -def init_fn(init_savers, sess): - """The init_fn to be passed to the Supervisor. - - Args: - init_savers: Dictionary of init_savers. 'init_saver_name': init_saver. - sess: tf.Session. - """ - ## Load Generator weights from MaskGAN checkpoint. - if FLAGS.maskgan_ckpt: - print('Restoring Generator from %s.' % FLAGS.maskgan_ckpt) - tf.logging.info('Restoring Generator from %s.' % FLAGS.maskgan_ckpt) - print('Asserting Generator is a seq2seq-variant.') - tf.logging.info('Asserting Generator is a seq2seq-variant.') - assert FLAGS.generator_model.startswith('seq2seq') - init_saver = init_savers['init_saver'] - init_saver.restore(sess, FLAGS.maskgan_ckpt) - - ## Load the Discriminator weights from the MaskGAN checkpoint if - # the weights are compatible. - if FLAGS.discriminator_model == 'seq2seq_vd': - print('Restoring Discriminator from %s.' % FLAGS.maskgan_ckpt) - tf.logging.info('Restoring Discriminator from %s.' % FLAGS.maskgan_ckpt) - dis_init_saver = init_savers['dis_init_saver'] - dis_init_saver.restore(sess, FLAGS.maskgan_ckpt) - - ## Load weights from language model checkpoint. - if FLAGS.language_model_ckpt_dir: - if FLAGS.maskgan_ckpt is None: - ## Generator Models. - if FLAGS.generator_model == 'rnn_nas': - load_ckpt = tf.train.latest_checkpoint(FLAGS.language_model_ckpt_dir) - print('Restoring Generator from %s.' % load_ckpt) - tf.logging.info('Restoring Generator from %s.' % load_ckpt) - gen_init_saver = init_savers['gen_init_saver'] - gen_init_saver.restore(sess, load_ckpt) - - elif FLAGS.generator_model.startswith('seq2seq'): - load_ckpt = tf.train.latest_checkpoint(FLAGS.language_model_ckpt_dir) - print('Restoring Generator from %s.' % load_ckpt) - tf.logging.info('Restoring Generator from %s.' % load_ckpt) - gen_encoder_init_saver = init_savers['gen_encoder_init_saver'] - gen_decoder_init_saver = init_savers['gen_decoder_init_saver'] - gen_encoder_init_saver.restore(sess, load_ckpt) - gen_decoder_init_saver.restore(sess, load_ckpt) - - ## Discriminator Models. - if (FLAGS.discriminator_model == 'rnn_nas' or - FLAGS.discriminator_model == 'rnn_zaremba' or - FLAGS.discriminator_model == 'rnn_vd' or - FLAGS.discriminator_model == 'cnn'): - load_ckpt = tf.train.latest_checkpoint(FLAGS.language_model_ckpt_dir) - print('Restoring Discriminator from %s.' % load_ckpt) - tf.logging.info('Restoring Discriminator from %s.' % load_ckpt) - dis_init_saver = init_savers['dis_init_saver'] - dis_init_saver.restore(sess, load_ckpt) - - elif (FLAGS.discriminator_model == 'bidirectional_zaremba' or - FLAGS.discriminator_model == 'bidirectional_vd'): - assert FLAGS.language_model_ckpt_dir_reversed is not None, ( - 'Need a reversed directory to fill in the backward components.') - load_fwd_ckpt = tf.train.latest_checkpoint(FLAGS.language_model_ckpt_dir) - load_bwd_ckpt = tf.train.latest_checkpoint( - FLAGS.language_model_ckpt_dir_reversed) - print('Restoring Discriminator from %s and %s.' % (load_fwd_ckpt, - load_bwd_ckpt)) - tf.logging.info('Restoring Discriminator from %s and %s.' % - (load_fwd_ckpt, load_bwd_ckpt)) - dis_fwd_init_saver = init_savers['dis_fwd_init_saver'] - dis_bwd_init_saver = init_savers['dis_bwd_init_saver'] - dis_fwd_init_saver.restore(sess, load_fwd_ckpt) - dis_bwd_init_saver.restore(sess, load_bwd_ckpt) - - elif FLAGS.discriminator_model == 'seq2seq_vd': - load_ckpt = tf.train.latest_checkpoint(FLAGS.language_model_ckpt_dir) - print('Restoring Discriminator from %s.' % load_ckpt) - tf.logging.info('Restoring Discriminator from %s.' % load_ckpt) - dis_encoder_init_saver = init_savers['dis_encoder_init_saver'] - dis_decoder_init_saver = init_savers['dis_decoder_init_saver'] - dis_encoder_init_saver.restore(sess, load_ckpt) - dis_decoder_init_saver.restore(sess, load_ckpt) - - else: - return diff --git a/research/maskgan/model_utils/n_gram.py b/research/maskgan/model_utils/n_gram.py deleted file mode 100644 index b889dde84..000000000 --- a/research/maskgan/model_utils/n_gram.py +++ /dev/null @@ -1,66 +0,0 @@ -# Copyright 2017 The TensorFlow Authors All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -"""We calculate n-Grams from the training text. We will use this as an -evaluation metric.""" - -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -from six.moves import xrange - - -def hash_function(input_tuple): - """Hash function for a tuple.""" - return hash(input_tuple) - - -def find_all_ngrams(dataset, n): - """Generate a list of all ngrams.""" - return zip(*[dataset[i:] for i in xrange(n)]) - - -def construct_ngrams_dict(ngrams_list): - """Construct a ngram dictionary which maps an ngram tuple to the number - of times it appears in the text.""" - counts = {} - - for t in ngrams_list: - key = hash_function(t) - if key in counts: - counts[key] += 1 - else: - counts[key] = 1 - return counts - - -def percent_unique_ngrams_in_train(train_ngrams_dict, gen_ngrams_dict): - """Compute the percent of ngrams generated by the model that are - present in the training text and are unique.""" - - # *Total* number of n-grams produced by the generator. - total_ngrams_produced = 0 - - for _, value in gen_ngrams_dict.iteritems(): - total_ngrams_produced += value - - # The unique ngrams in the training set. - unique_ngrams_in_train = 0. - - for key, _ in gen_ngrams_dict.iteritems(): - if key in train_ngrams_dict: - unique_ngrams_in_train += 1 - return float(unique_ngrams_in_train) / float(total_ngrams_produced) diff --git a/research/maskgan/model_utils/variable_mapping.py b/research/maskgan/model_utils/variable_mapping.py deleted file mode 100644 index 0301b9697..000000000 --- a/research/maskgan/model_utils/variable_mapping.py +++ /dev/null @@ -1,745 +0,0 @@ -# Copyright 2017 The TensorFlow Authors All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -# Dependency imports - -import tensorflow as tf - -FLAGS = tf.app.flags.FLAGS - - -def rnn_nas(hparams, model): - assert model == 'gen' or model == 'dis' - - # This logic is only valid for rnn_zaremba - if model == 'gen': - assert FLAGS.generator_model == 'rnn_nas' - assert hparams.gen_num_layers == 2 - - if model == 'dis': - assert FLAGS.discriminator_model == 'rnn_nas' - assert hparams.dis_num_layers == 2 - - # Output variables only for the Generator. Discriminator output biases - # will begin randomly initialized. - if model == 'gen': - softmax_b = [ - v for v in tf.trainable_variables() if v.op.name == 'gen/rnn/softmax_b' - ][0] - - # Common elements to Generator and Discriminator. - embedding = [ - v for v in tf.trainable_variables() - if v.op.name == str(model) + '/rnn/embedding' - ][0] - lstm_w_0 = [ - v for v in tf.trainable_variables() - if v.op.name == - str(model) + '/rnn/GenericMultiRNNCell/Cell0/Alien/rnn_builder/big_h_mat' - ][0] - lstm_b_0 = [ - v for v in tf.trainable_variables() - if v.op.name == str(model) + - '/rnn/GenericMultiRNNCell/Cell0/Alien/rnn_builder/big_inputs_mat' - ][0] - lstm_w_1 = [ - v for v in tf.trainable_variables() - if v.op.name == - str(model) + '/rnn/GenericMultiRNNCell/Cell1/Alien/rnn_builder/big_h_mat' - ][0] - lstm_b_1 = [ - v for v in tf.trainable_variables() - if v.op.name == str(model) + - '/rnn/GenericMultiRNNCell/Cell1/Alien/rnn_builder/big_inputs_mat' - ][0] - - # Dictionary mapping. - if model == 'gen': - variable_mapping = { - 'Model/embeddings/input_embedding': - embedding, - 'Model/RNN/GenericMultiRNNCell/Cell0/Alien/rnn_builder/big_h_mat': - lstm_w_0, - 'Model/RNN/GenericMultiRNNCell/Cell0/Alien/rnn_builder/big_inputs_mat': - lstm_b_0, - 'Model/RNN/GenericMultiRNNCell/Cell1/Alien/rnn_builder/big_h_mat': - lstm_w_1, - 'Model/RNN/GenericMultiRNNCell/Cell1/Alien/rnn_builder/big_inputs_mat': - lstm_b_1, - 'Model/softmax_b': - softmax_b - } - else: - variable_mapping = { - 'Model/embeddings/input_embedding': - embedding, - 'Model/RNN/GenericMultiRNNCell/Cell0/Alien/rnn_builder/big_h_mat': - lstm_w_0, - 'Model/RNN/GenericMultiRNNCell/Cell0/Alien/rnn_builder/big_inputs_mat': - lstm_b_0, - 'Model/RNN/GenericMultiRNNCell/Cell1/Alien/rnn_builder/big_h_mat': - lstm_w_1, - 'Model/RNN/GenericMultiRNNCell/Cell1/Alien/rnn_builder/big_inputs_mat': - lstm_b_1 - } - - return variable_mapping - - -def cnn(): - """Variable mapping for the CNN embedding. - - Returns: - variable_mapping: Dictionary with Key: ckpt_name, Value: model_var. - """ - # This logic is only valid for cnn - assert FLAGS.discriminator_model == 'cnn' - - # Retrieve CNN embedding. - embedding = [ - v for v in tf.trainable_variables() if v.op.name == 'dis/embedding' - ][0] - - # Variable mapping. - variable_mapping = {'Model/embedding': embedding} - - return variable_mapping - - -def rnn_zaremba(hparams, model): - """Returns the PTB Variable name to MaskGAN Variable dictionary mapping. This - is a highly restrictive function just for testing. This will need to be - generalized. - - Args: - hparams: Hyperparameters for the MaskGAN. - model: Model type, one of ['gen', 'dis']. - - Returns: - variable_mapping: Dictionary with Key: ckpt_name, Value: model_var. - """ - assert model == 'gen' or model == 'dis' - - # This logic is only valid for rnn_zaremba - if model == 'gen': - assert FLAGS.generator_model == 'rnn_zaremba' - assert hparams.gen_num_layers == 2 - - if model == 'dis': - assert (FLAGS.discriminator_model == 'rnn_zaremba' or - FLAGS.discriminator_model == 'rnn_vd') - assert hparams.dis_num_layers == 2 - - # Output variables only for the Generator. Discriminator output weights - # and biases will begin randomly initialized. - if model == 'gen': - softmax_w = [ - v for v in tf.trainable_variables() if v.op.name == 'gen/rnn/softmax_w' - ][0] - softmax_b = [ - v for v in tf.trainable_variables() if v.op.name == 'gen/rnn/softmax_b' - ][0] - - # Common elements to Generator and Discriminator. - if not FLAGS.dis_share_embedding or model != 'dis': - embedding = [ - v for v in tf.trainable_variables() - if v.op.name == str(model) + '/rnn/embedding' - ][0] - lstm_w_0 = [ - v for v in tf.trainable_variables() if v.op.name == str(model) + - '/rnn/multi_rnn_cell/cell_0/basic_lstm_cell/kernel' - ][0] - lstm_b_0 = [ - v for v in tf.trainable_variables() if v.op.name == str(model) + - '/rnn/multi_rnn_cell/cell_0/basic_lstm_cell/bias' - ][0] - lstm_w_1 = [ - v for v in tf.trainable_variables() if v.op.name == str(model) + - '/rnn/multi_rnn_cell/cell_1/basic_lstm_cell/kernel' - ][0] - lstm_b_1 = [ - v for v in tf.trainable_variables() if v.op.name == str(model) + - '/rnn/multi_rnn_cell/cell_1/basic_lstm_cell/bias' - ][0] - - # Dictionary mapping. - if model == 'gen': - variable_mapping = { - 'Model/embedding': embedding, - 'Model/RNN/multi_rnn_cell/cell_0/basic_lstm_cell/kernel': lstm_w_0, - 'Model/RNN/multi_rnn_cell/cell_0/basic_lstm_cell/bias': lstm_b_0, - 'Model/RNN/multi_rnn_cell/cell_1/basic_lstm_cell/kernel': lstm_w_1, - 'Model/RNN/multi_rnn_cell/cell_1/basic_lstm_cell/bias': lstm_b_1, - 'Model/softmax_w': softmax_w, - 'Model/softmax_b': softmax_b - } - else: - if FLAGS.dis_share_embedding: - variable_mapping = { - 'Model/RNN/multi_rnn_cell/cell_0/basic_lstm_cell/kernel': lstm_w_0, - 'Model/RNN/multi_rnn_cell/cell_0/basic_lstm_cell/bias': lstm_b_0, - 'Model/RNN/multi_rnn_cell/cell_1/basic_lstm_cell/kernel': lstm_w_1, - 'Model/RNN/multi_rnn_cell/cell_1/basic_lstm_cell/bias': lstm_b_1 - } - else: - variable_mapping = { - 'Model/embedding': embedding, - 'Model/RNN/multi_rnn_cell/cell_0/basic_lstm_cell/kernel': lstm_w_0, - 'Model/RNN/multi_rnn_cell/cell_0/basic_lstm_cell/bias': lstm_b_0, - 'Model/RNN/multi_rnn_cell/cell_1/basic_lstm_cell/kernel': lstm_w_1, - 'Model/RNN/multi_rnn_cell/cell_1/basic_lstm_cell/bias': lstm_b_1 - } - - return variable_mapping - - -def gen_encoder_seq2seq_nas(hparams): - """Returns the NAS Variable name to MaskGAN Variable - dictionary mapping. This is a highly restrictive function just for testing. - This is for the *unidirecitional* seq2seq_nas encoder. - - Args: - hparams: Hyperparameters for the MaskGAN. - - Returns: - variable_mapping: Dictionary with Key: ckpt_name, Value: model_varself. - """ - assert FLAGS.generator_model == 'seq2seq_nas' - assert hparams.gen_num_layers == 2 - ## Encoder forward variables. - - if not FLAGS.seq2seq_share_embedding: - encoder_embedding = [ - v for v in tf.trainable_variables() - if v.op.name == 'gen/encoder/rnn/embedding' - ][0] - encoder_lstm_w_0 = [ - v for v in tf.trainable_variables() - if v.op.name == - 'gen/encoder/rnn/GenericMultiRNNCell/Cell0/Alien/rnn_builder/big_h_mat' - ][0] - encoder_lstm_b_0 = [ - v for v in tf.trainable_variables() - if v.op.name == - 'gen/encoder/rnn/GenericMultiRNNCell/Cell0/Alien/rnn_builder/big_inputs_mat' - ][0] - encoder_lstm_w_1 = [ - v for v in tf.trainable_variables() - if v.op.name == - 'gen/encoder/rnn/GenericMultiRNNCell/Cell1/Alien/rnn_builder/big_h_mat' - ][0] - encoder_lstm_b_1 = [ - v for v in tf.trainable_variables() - if v.op.name == - 'gen/encoder/rnn/GenericMultiRNNCell/Cell1/Alien/rnn_builder/big_inputs_mat' - ][0] - - if not FLAGS.seq2seq_share_embedding: - variable_mapping = { - 'Model/embeddings/input_embedding': - encoder_embedding, - 'Model/RNN/GenericMultiRNNCell/Cell0/Alien/rnn_builder/big_h_mat': - encoder_lstm_w_0, - 'Model/RNN/GenericMultiRNNCell/Cell0/Alien/rnn_builder/big_inputs_mat': - encoder_lstm_b_0, - 'Model/RNN/GenericMultiRNNCell/Cell1/Alien/rnn_builder/big_h_mat': - encoder_lstm_w_1, - 'Model/RNN/GenericMultiRNNCell/Cell1/Alien/rnn_builder/big_inputs_mat': - encoder_lstm_b_1 - } - else: - variable_mapping = { - 'Model/RNN/GenericMultiRNNCell/Cell0/Alien/rnn_builder/big_h_mat': - encoder_lstm_w_0, - 'Model/RNN/GenericMultiRNNCell/Cell0/Alien/rnn_builder/big_inputs_mat': - encoder_lstm_b_0, - 'Model/RNN/GenericMultiRNNCell/Cell1/Alien/rnn_builder/big_h_mat': - encoder_lstm_w_1, - 'Model/RNN/GenericMultiRNNCell/Cell1/Alien/rnn_builder/big_inputs_mat': - encoder_lstm_b_1 - } - return variable_mapping - - -def gen_decoder_seq2seq_nas(hparams): - assert FLAGS.generator_model == 'seq2seq_nas' - assert hparams.gen_num_layers == 2 - - decoder_embedding = [ - v for v in tf.trainable_variables() - if v.op.name == 'gen/decoder/rnn/embedding' - ][0] - decoder_lstm_w_0 = [ - v for v in tf.trainable_variables() - if v.op.name == - 'gen/decoder/rnn/GenericMultiRNNCell/Cell0/Alien/rnn_builder/big_h_mat' - ][0] - decoder_lstm_b_0 = [ - v for v in tf.trainable_variables() - if v.op.name == - 'gen/decoder/rnn/GenericMultiRNNCell/Cell0/Alien/rnn_builder/big_inputs_mat' - ][0] - decoder_lstm_w_1 = [ - v for v in tf.trainable_variables() - if v.op.name == - 'gen/decoder/rnn/GenericMultiRNNCell/Cell1/Alien/rnn_builder/big_h_mat' - ][0] - decoder_lstm_b_1 = [ - v for v in tf.trainable_variables() - if v.op.name == - 'gen/decoder/rnn/GenericMultiRNNCell/Cell1/Alien/rnn_builder/big_inputs_mat' - ][0] - - decoder_softmax_b = [ - v for v in tf.trainable_variables() - if v.op.name == 'gen/decoder/rnn/softmax_b' - ][0] - - variable_mapping = { - 'Model/embeddings/input_embedding': - decoder_embedding, - 'Model/RNN/GenericMultiRNNCell/Cell0/Alien/rnn_builder/big_h_mat': - decoder_lstm_w_0, - 'Model/RNN/GenericMultiRNNCell/Cell0/Alien/rnn_builder/big_inputs_mat': - decoder_lstm_b_0, - 'Model/RNN/GenericMultiRNNCell/Cell1/Alien/rnn_builder/big_h_mat': - decoder_lstm_w_1, - 'Model/RNN/GenericMultiRNNCell/Cell1/Alien/rnn_builder/big_inputs_mat': - decoder_lstm_b_1, - 'Model/softmax_b': - decoder_softmax_b - } - - return variable_mapping - - -def gen_encoder_seq2seq(hparams): - """Returns the PTB Variable name to MaskGAN Variable - dictionary mapping. This is a highly restrictive function just for testing. - This is foe the *unidirecitional* seq2seq_zaremba encoder. - - Args: - hparams: Hyperparameters for the MaskGAN. - - Returns: - variable_mapping: Dictionary with Key: ckpt_name, Value: model_varself. - """ - assert (FLAGS.generator_model == 'seq2seq_zaremba' or - FLAGS.generator_model == 'seq2seq_vd') - assert hparams.gen_num_layers == 2 - - ## Encoder forward variables. - if not FLAGS.seq2seq_share_embedding: - encoder_embedding = [ - v for v in tf.trainable_variables() - if v.op.name == 'gen/encoder/rnn/embedding' - ][0] - encoder_lstm_w_0 = [ - v for v in tf.trainable_variables() if v.op.name == - 'gen/encoder/rnn/multi_rnn_cell/cell_0/basic_lstm_cell/kernel' - ][0] - encoder_lstm_b_0 = [ - v for v in tf.trainable_variables() if v.op.name == - 'gen/encoder/rnn/multi_rnn_cell/cell_0/basic_lstm_cell/bias' - ][0] - encoder_lstm_w_1 = [ - v for v in tf.trainable_variables() if v.op.name == - 'gen/encoder/rnn/multi_rnn_cell/cell_1/basic_lstm_cell/kernel' - ][0] - encoder_lstm_b_1 = [ - v for v in tf.trainable_variables() if v.op.name == - 'gen/encoder/rnn/multi_rnn_cell/cell_1/basic_lstm_cell/bias' - ][0] - - if FLAGS.data_set == 'ptb': - model_str = 'Model' - else: - model_str = 'model' - - if not FLAGS.seq2seq_share_embedding: - variable_mapping = { - str(model_str) + '/embedding': - encoder_embedding, - str(model_str) + '/RNN/multi_rnn_cell/cell_0/basic_lstm_cell/kernel': - encoder_lstm_w_0, - str(model_str) + '/RNN/multi_rnn_cell/cell_0/basic_lstm_cell/bias': - encoder_lstm_b_0, - str(model_str) + '/RNN/multi_rnn_cell/cell_1/basic_lstm_cell/kernel': - encoder_lstm_w_1, - str(model_str) + '/RNN/multi_rnn_cell/cell_1/basic_lstm_cell/bias': - encoder_lstm_b_1 - } - else: - variable_mapping = { - str(model_str) + '/RNN/multi_rnn_cell/cell_0/basic_lstm_cell/kernel': - encoder_lstm_w_0, - str(model_str) + '/RNN/multi_rnn_cell/cell_0/basic_lstm_cell/bias': - encoder_lstm_b_0, - str(model_str) + '/RNN/multi_rnn_cell/cell_1/basic_lstm_cell/kernel': - encoder_lstm_w_1, - str(model_str) + '/RNN/multi_rnn_cell/cell_1/basic_lstm_cell/bias': - encoder_lstm_b_1 - } - return variable_mapping - - -def gen_decoder_seq2seq(hparams): - assert (FLAGS.generator_model == 'seq2seq_zaremba' or - FLAGS.generator_model == 'seq2seq_vd') - assert hparams.gen_num_layers == 2 - - decoder_embedding = [ - v for v in tf.trainable_variables() - if v.op.name == 'gen/decoder/rnn/embedding' - ][0] - decoder_lstm_w_0 = [ - v for v in tf.trainable_variables() if v.op.name == - 'gen/decoder/rnn/multi_rnn_cell/cell_0/basic_lstm_cell/kernel' - ][0] - decoder_lstm_b_0 = [ - v for v in tf.trainable_variables() if v.op.name == - 'gen/decoder/rnn/multi_rnn_cell/cell_0/basic_lstm_cell/bias' - ][0] - decoder_lstm_w_1 = [ - v for v in tf.trainable_variables() if v.op.name == - 'gen/decoder/rnn/multi_rnn_cell/cell_1/basic_lstm_cell/kernel' - ][0] - decoder_lstm_b_1 = [ - v for v in tf.trainable_variables() if v.op.name == - 'gen/decoder/rnn/multi_rnn_cell/cell_1/basic_lstm_cell/bias' - ][0] - decoder_softmax_b = [ - v for v in tf.trainable_variables() - if v.op.name == 'gen/decoder/rnn/softmax_b' - ][0] - - if FLAGS.data_set == 'ptb': - model_str = 'Model' - else: - model_str = 'model' - - variable_mapping = { - str(model_str) + '/embedding': - decoder_embedding, - str(model_str) + '/RNN/multi_rnn_cell/cell_0/basic_lstm_cell/kernel': - decoder_lstm_w_0, - str(model_str) + '/RNN/multi_rnn_cell/cell_0/basic_lstm_cell/bias': - decoder_lstm_b_0, - str(model_str) + '/RNN/multi_rnn_cell/cell_1/basic_lstm_cell/kernel': - decoder_lstm_w_1, - str(model_str) + '/RNN/multi_rnn_cell/cell_1/basic_lstm_cell/bias': - decoder_lstm_b_1, - str(model_str) + '/softmax_b': - decoder_softmax_b - } - return variable_mapping - - -def dis_fwd_bidirectional(hparams): - """Returns the *forward* PTB Variable name to MaskGAN Variable dictionary - mapping. This is a highly restrictive function just for testing. This is for - the bidirectional_zaremba discriminator. - - Args: - FLAGS: Flags for the model. - hparams: Hyperparameters for the MaskGAN. - - Returns: - variable_mapping: Dictionary with Key: ckpt_name, Value: model_varself. - """ - assert (FLAGS.discriminator_model == 'bidirectional_zaremba' or - FLAGS.discriminator_model == 'bidirectional_vd') - assert hparams.dis_num_layers == 2 - - # Forward Discriminator Elements. - if not FLAGS.dis_share_embedding: - embedding = [ - v for v in tf.trainable_variables() if v.op.name == 'dis/embedding' - ][0] - fw_lstm_w_0 = [ - v for v in tf.trainable_variables() - if v.op.name == 'dis/rnn/fw/multi_rnn_cell/cell_0/basic_lstm_cell/kernel' - ][0] - fw_lstm_b_0 = [ - v for v in tf.trainable_variables() - if v.op.name == 'dis/rnn/fw/multi_rnn_cell/cell_0/basic_lstm_cell/bias' - ][0] - fw_lstm_w_1 = [ - v for v in tf.trainable_variables() - if v.op.name == 'dis/rnn/fw/multi_rnn_cell/cell_1/basic_lstm_cell/kernel' - ][0] - fw_lstm_b_1 = [ - v for v in tf.trainable_variables() - if v.op.name == 'dis/rnn/fw/multi_rnn_cell/cell_1/basic_lstm_cell/bias' - ][0] - if FLAGS.dis_share_embedding: - variable_mapping = { - 'Model/RNN/multi_rnn_cell/cell_0/basic_lstm_cell/kernel': fw_lstm_w_0, - 'Model/RNN/multi_rnn_cell/cell_0/basic_lstm_cell/bias': fw_lstm_b_0, - 'Model/RNN/multi_rnn_cell/cell_1/basic_lstm_cell/kernel': fw_lstm_w_1, - 'Model/RNN/multi_rnn_cell/cell_1/basic_lstm_cell/bias': fw_lstm_b_1 - } - else: - variable_mapping = { - 'Model/embedding': embedding, - 'Model/RNN/multi_rnn_cell/cell_0/basic_lstm_cell/kernel': fw_lstm_w_0, - 'Model/RNN/multi_rnn_cell/cell_0/basic_lstm_cell/bias': fw_lstm_b_0, - 'Model/RNN/multi_rnn_cell/cell_1/basic_lstm_cell/kernel': fw_lstm_w_1, - 'Model/RNN/multi_rnn_cell/cell_1/basic_lstm_cell/bias': fw_lstm_b_1 - } - return variable_mapping - - -def dis_bwd_bidirectional(hparams): - """Returns the *backward* PTB Variable name to MaskGAN Variable dictionary - mapping. This is a highly restrictive function just for testing. This is for - the bidirectional_zaremba discriminator. - - Args: - hparams: Hyperparameters for the MaskGAN. - - Returns: - variable_mapping: Dictionary with Key: ckpt_name, Value: model_varself. - """ - assert (FLAGS.discriminator_model == 'bidirectional_zaremba' or - FLAGS.discriminator_model == 'bidirectional_vd') - assert hparams.dis_num_layers == 2 - - # Backward Discriminator Elements. - bw_lstm_w_0 = [ - v for v in tf.trainable_variables() - if v.op.name == 'dis/rnn/bw/multi_rnn_cell/cell_0/basic_lstm_cell/kernel' - ][0] - bw_lstm_b_0 = [ - v for v in tf.trainable_variables() - if v.op.name == 'dis/rnn/bw/multi_rnn_cell/cell_0/basic_lstm_cell/bias' - ][0] - bw_lstm_w_1 = [ - v for v in tf.trainable_variables() - if v.op.name == 'dis/rnn/bw/multi_rnn_cell/cell_1/basic_lstm_cell/kernel' - ][0] - bw_lstm_b_1 = [ - v for v in tf.trainable_variables() - if v.op.name == 'dis/rnn/bw/multi_rnn_cell/cell_1/basic_lstm_cell/bias' - ][0] - - variable_mapping = { - 'Model/RNN/multi_rnn_cell/cell_0/basic_lstm_cell/kernel': bw_lstm_w_0, - 'Model/RNN/multi_rnn_cell/cell_0/basic_lstm_cell/bias': bw_lstm_b_0, - 'Model/RNN/multi_rnn_cell/cell_1/basic_lstm_cell/kernel': bw_lstm_w_1, - 'Model/RNN/multi_rnn_cell/cell_1/basic_lstm_cell/bias': bw_lstm_b_1 - } - return variable_mapping - - -def dis_encoder_seq2seq(hparams): - """Returns the PTB Variable name to MaskGAN Variable - dictionary mapping. - - Args: - hparams: Hyperparameters for the MaskGAN. - - Returns: - variable_mapping: Dictionary with Key: ckpt_name, Value: model_varself. - """ - assert FLAGS.discriminator_model == 'seq2seq_vd' - assert hparams.dis_num_layers == 2 - - ## Encoder forward variables. - encoder_lstm_w_0 = [ - v for v in tf.trainable_variables() if v.op.name == - 'dis/encoder/rnn/multi_rnn_cell/cell_0/basic_lstm_cell/kernel' - ][0] - encoder_lstm_b_0 = [ - v for v in tf.trainable_variables() if v.op.name == - 'dis/encoder/rnn/multi_rnn_cell/cell_0/basic_lstm_cell/bias' - ][0] - encoder_lstm_w_1 = [ - v for v in tf.trainable_variables() if v.op.name == - 'dis/encoder/rnn/multi_rnn_cell/cell_1/basic_lstm_cell/kernel' - ][0] - encoder_lstm_b_1 = [ - v for v in tf.trainable_variables() if v.op.name == - 'dis/encoder/rnn/multi_rnn_cell/cell_1/basic_lstm_cell/bias' - ][0] - - if FLAGS.data_set == 'ptb': - model_str = 'Model' - else: - model_str = 'model' - - variable_mapping = { - str(model_str) + '/RNN/multi_rnn_cell/cell_0/basic_lstm_cell/kernel': - encoder_lstm_w_0, - str(model_str) + '/RNN/multi_rnn_cell/cell_0/basic_lstm_cell/bias': - encoder_lstm_b_0, - str(model_str) + '/RNN/multi_rnn_cell/cell_1/basic_lstm_cell/kernel': - encoder_lstm_w_1, - str(model_str) + '/RNN/multi_rnn_cell/cell_1/basic_lstm_cell/bias': - encoder_lstm_b_1 - } - return variable_mapping - - -def dis_decoder_seq2seq(hparams): - assert FLAGS.discriminator_model == 'seq2seq_vd' - assert hparams.dis_num_layers == 2 - - if not FLAGS.dis_share_embedding: - decoder_embedding = [ - v for v in tf.trainable_variables() - if v.op.name == 'dis/decoder/rnn/embedding' - ][0] - decoder_lstm_w_0 = [ - v for v in tf.trainable_variables() if v.op.name == - 'dis/decoder/rnn/multi_rnn_cell/cell_0/basic_lstm_cell/kernel' - ][0] - decoder_lstm_b_0 = [ - v for v in tf.trainable_variables() if v.op.name == - 'dis/decoder/rnn/multi_rnn_cell/cell_0/basic_lstm_cell/bias' - ][0] - decoder_lstm_w_1 = [ - v for v in tf.trainable_variables() if v.op.name == - 'dis/decoder/rnn/multi_rnn_cell/cell_1/basic_lstm_cell/kernel' - ][0] - decoder_lstm_b_1 = [ - v for v in tf.trainable_variables() if v.op.name == - 'dis/decoder/rnn/multi_rnn_cell/cell_1/basic_lstm_cell/bias' - ][0] - - if FLAGS.data_set == 'ptb': - model_str = 'Model' - else: - model_str = 'model' - - if not FLAGS.dis_share_embedding: - variable_mapping = { - str(model_str) + '/embedding': - decoder_embedding, - str(model_str) + '/RNN/multi_rnn_cell/cell_0/basic_lstm_cell/kernel': - decoder_lstm_w_0, - str(model_str) + '/RNN/multi_rnn_cell/cell_0/basic_lstm_cell/bias': - decoder_lstm_b_0, - str(model_str) + '/RNN/multi_rnn_cell/cell_1/basic_lstm_cell/kernel': - decoder_lstm_w_1, - str(model_str) + '/RNN/multi_rnn_cell/cell_1/basic_lstm_cell/bias': - decoder_lstm_b_1 - } - else: - variable_mapping = { - str(model_str) + '/RNN/multi_rnn_cell/cell_0/basic_lstm_cell/kernel': - decoder_lstm_w_0, - str(model_str) + '/RNN/multi_rnn_cell/cell_0/basic_lstm_cell/bias': - decoder_lstm_b_0, - str(model_str) + '/RNN/multi_rnn_cell/cell_1/basic_lstm_cell/kernel': - decoder_lstm_w_1, - str(model_str) + '/RNN/multi_rnn_cell/cell_1/basic_lstm_cell/bias': - decoder_lstm_b_1, - } - return variable_mapping - - -def dis_seq2seq_vd(hparams): - assert FLAGS.discriminator_model == 'seq2seq_vd' - assert hparams.dis_num_layers == 2 - - if not FLAGS.dis_share_embedding: - decoder_embedding = [ - v for v in tf.trainable_variables() - if v.op.name == 'dis/decoder/rnn/embedding' - ][0] - - ## Encoder variables. - encoder_lstm_w_0 = [ - v for v in tf.trainable_variables() if v.op.name == - 'dis/encoder/rnn/multi_rnn_cell/cell_0/basic_lstm_cell/kernel' - ][0] - encoder_lstm_b_0 = [ - v for v in tf.trainable_variables() if v.op.name == - 'dis/encoder/rnn/multi_rnn_cell/cell_0/basic_lstm_cell/bias' - ][0] - encoder_lstm_w_1 = [ - v for v in tf.trainable_variables() if v.op.name == - 'dis/encoder/rnn/multi_rnn_cell/cell_1/basic_lstm_cell/kernel' - ][0] - encoder_lstm_b_1 = [ - v for v in tf.trainable_variables() if v.op.name == - 'dis/encoder/rnn/multi_rnn_cell/cell_1/basic_lstm_cell/bias' - ][0] - - ## Attention. - if FLAGS.attention_option is not None: - decoder_attention_keys = [ - v for v in tf.trainable_variables() - if v.op.name == 'dis/decoder/attention_keys/weights' - ][0] - decoder_attention_construct_weights = [ - v for v in tf.trainable_variables() - if v.op.name == 'dis/decoder/rnn/attention_construct/weights' - ][0] - - ## Decoder. - decoder_lstm_w_0 = [ - v for v in tf.trainable_variables() if v.op.name == - 'dis/decoder/rnn/multi_rnn_cell/cell_0/basic_lstm_cell/kernel' - ][0] - decoder_lstm_b_0 = [ - v for v in tf.trainable_variables() if v.op.name == - 'dis/decoder/rnn/multi_rnn_cell/cell_0/basic_lstm_cell/bias' - ][0] - decoder_lstm_w_1 = [ - v for v in tf.trainable_variables() if v.op.name == - 'dis/decoder/rnn/multi_rnn_cell/cell_1/basic_lstm_cell/kernel' - ][0] - decoder_lstm_b_1 = [ - v for v in tf.trainable_variables() if v.op.name == - 'dis/decoder/rnn/multi_rnn_cell/cell_1/basic_lstm_cell/bias' - ][0] - - # Standard variable mappings. - variable_mapping = { - 'gen/encoder/rnn/multi_rnn_cell/cell_0/basic_lstm_cell/kernel': - encoder_lstm_w_0, - 'gen/encoder/rnn/multi_rnn_cell/cell_0/basic_lstm_cell/bias': - encoder_lstm_b_0, - 'gen/encoder/rnn/multi_rnn_cell/cell_1/basic_lstm_cell/kernel': - encoder_lstm_w_1, - 'gen/encoder/rnn/multi_rnn_cell/cell_1/basic_lstm_cell/bias': - encoder_lstm_b_1, - 'gen/decoder/rnn/multi_rnn_cell/cell_0/basic_lstm_cell/kernel': - decoder_lstm_w_0, - 'gen/decoder/rnn/multi_rnn_cell/cell_0/basic_lstm_cell/bias': - decoder_lstm_b_0, - 'gen/decoder/rnn/multi_rnn_cell/cell_1/basic_lstm_cell/kernel': - decoder_lstm_w_1, - 'gen/decoder/rnn/multi_rnn_cell/cell_1/basic_lstm_cell/bias': - decoder_lstm_b_1 - } - - # Optional variable mappings. - if not FLAGS.dis_share_embedding: - variable_mapping['gen/decoder/rnn/embedding'] = decoder_embedding - if FLAGS.attention_option is not None: - variable_mapping[ - 'gen/decoder/attention_keys/weights'] = decoder_attention_keys - variable_mapping[ - 'gen/decoder/rnn/attention_construct/weights'] = decoder_attention_construct_weights - - return variable_mapping diff --git a/research/maskgan/models/__init__.py b/research/maskgan/models/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/research/maskgan/models/attention_utils.py b/research/maskgan/models/attention_utils.py deleted file mode 100644 index 4bd9e41dd..000000000 --- a/research/maskgan/models/attention_utils.py +++ /dev/null @@ -1,477 +0,0 @@ -# Copyright 2017 The TensorFlow Authors All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -"""Attention-based decoder functions.""" - -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import tensorflow as tf -from tensorflow.python.framework import function - -__all__ = [ - "prepare_attention", "attention_decoder_fn_train", - "attention_decoder_fn_inference" -] - - -def attention_decoder_fn_train(encoder_state, - attention_keys, - attention_values, - attention_score_fn, - attention_construct_fn, - name=None): - """Attentional decoder function for `dynamic_rnn_decoder` during training. - - The `attention_decoder_fn_train` is a training function for an - attention-based sequence-to-sequence model. It should be used when - `dynamic_rnn_decoder` is in the training mode. - - The `attention_decoder_fn_train` is called with a set of the user arguments - and returns the `decoder_fn`, which can be passed to the - `dynamic_rnn_decoder`, such that - - ``` - dynamic_fn_train = attention_decoder_fn_train(encoder_state) - outputs_train, state_train = dynamic_rnn_decoder( - decoder_fn=dynamic_fn_train, ...) - ``` - - Further usage can be found in the `kernel_tests/seq2seq_test.py`. - - Args: - encoder_state: The encoded state to initialize the `dynamic_rnn_decoder`. - attention_keys: to be compared with target states. - attention_values: to be used to construct context vectors. - attention_score_fn: to compute similarity between key and target states. - attention_construct_fn: to build attention states. - name: (default: `None`) NameScope for the decoder function; - defaults to "simple_decoder_fn_train" - - Returns: - A decoder function with the required interface of `dynamic_rnn_decoder` - intended for training. - """ - with tf.name_scope(name, "attention_decoder_fn_train", [ - encoder_state, attention_keys, attention_values, attention_score_fn, - attention_construct_fn - ]): - pass - - def decoder_fn(time, cell_state, cell_input, cell_output, context_state): - """Decoder function used in the `dynamic_rnn_decoder` for training. - - Args: - time: positive integer constant reflecting the current timestep. - cell_state: state of RNNCell. - cell_input: input provided by `dynamic_rnn_decoder`. - cell_output: output of RNNCell. - context_state: context state provided by `dynamic_rnn_decoder`. - - Returns: - A tuple (done, next state, next input, emit output, next context state) - where: - - done: `None`, which is used by the `dynamic_rnn_decoder` to indicate - that `sequence_lengths` in `dynamic_rnn_decoder` should be used. - - next state: `cell_state`, this decoder function does not modify the - given state. - - next input: `cell_input`, this decoder function does not modify the - given input. The input could be modified when applying e.g. attention. - - emit output: `cell_output`, this decoder function does not modify the - given output. - - next context state: `context_state`, this decoder function does not - modify the given context state. The context state could be modified when - applying e.g. beam search. - """ - with tf.name_scope( - name, "attention_decoder_fn_train", - [time, cell_state, cell_input, cell_output, context_state]): - if cell_state is None: # first call, return encoder_state - cell_state = encoder_state - - # init attention - attention = _init_attention(encoder_state) - else: - # construct attention - attention = attention_construct_fn(cell_output, attention_keys, - attention_values) - cell_output = attention - - # combine cell_input and attention - next_input = tf.concat([cell_input, attention], 1) - - return (None, cell_state, next_input, cell_output, context_state) - - return decoder_fn - - -def attention_decoder_fn_inference(output_fn, - encoder_state, - attention_keys, - attention_values, - attention_score_fn, - attention_construct_fn, - embeddings, - start_of_sequence_id, - end_of_sequence_id, - maximum_length, - num_decoder_symbols, - dtype=tf.int32, - name=None): - """Attentional decoder function for `dynamic_rnn_decoder` during inference. - - The `attention_decoder_fn_inference` is a simple inference function for a - sequence-to-sequence model. It should be used when `dynamic_rnn_decoder` is - in the inference mode. - - The `attention_decoder_fn_inference` is called with user arguments - and returns the `decoder_fn`, which can be passed to the - `dynamic_rnn_decoder`, such that - - ``` - dynamic_fn_inference = attention_decoder_fn_inference(...) - outputs_inference, state_inference = dynamic_rnn_decoder( - decoder_fn=dynamic_fn_inference, ...) - ``` - - Further usage can be found in the `kernel_tests/seq2seq_test.py`. - - Args: - output_fn: An output function to project your `cell_output` onto class - logits. - - An example of an output function; - - ``` - tf.variable_scope("decoder") as varscope - output_fn = lambda x: tf.contrib.layers.linear(x, num_decoder_symbols, - scope=varscope) - - outputs_train, state_train = seq2seq.dynamic_rnn_decoder(...) - logits_train = output_fn(outputs_train) - - varscope.reuse_variables() - logits_inference, state_inference = seq2seq.dynamic_rnn_decoder( - output_fn=output_fn, ...) - ``` - - If `None` is supplied it will act as an identity function, which - might be wanted when using the RNNCell `OutputProjectionWrapper`. - - encoder_state: The encoded state to initialize the `dynamic_rnn_decoder`. - attention_keys: to be compared with target states. - attention_values: to be used to construct context vectors. - attention_score_fn: to compute similarity between key and target states. - attention_construct_fn: to build attention states. - embeddings: The embeddings matrix used for the decoder sized - `[num_decoder_symbols, embedding_size]`. - start_of_sequence_id: The start of sequence ID in the decoder embeddings. - end_of_sequence_id: The end of sequence ID in the decoder embeddings. - maximum_length: The maximum allowed of time steps to decode. - num_decoder_symbols: The number of classes to decode at each time step. - dtype: (default: `tf.int32`) The default data type to use when - handling integer objects. - name: (default: `None`) NameScope for the decoder function; - defaults to "attention_decoder_fn_inference" - - Returns: - A decoder function with the required interface of `dynamic_rnn_decoder` - intended for inference. - """ - with tf.name_scope(name, "attention_decoder_fn_inference", [ - output_fn, encoder_state, attention_keys, attention_values, - attention_score_fn, attention_construct_fn, embeddings, - start_of_sequence_id, end_of_sequence_id, maximum_length, - num_decoder_symbols, dtype - ]): - start_of_sequence_id = tf.convert_to_tensor(start_of_sequence_id, dtype) - end_of_sequence_id = tf.convert_to_tensor(end_of_sequence_id, dtype) - maximum_length = tf.convert_to_tensor(maximum_length, dtype) - num_decoder_symbols = tf.convert_to_tensor(num_decoder_symbols, dtype) - encoder_info = tf.contrib.framework.nest.flatten(encoder_state)[0] - batch_size = encoder_info.get_shape()[0].value - if output_fn is None: - output_fn = lambda x: x - if batch_size is None: - batch_size = tf.shape(encoder_info)[0] - - def decoder_fn(time, cell_state, cell_input, cell_output, context_state): - """Decoder function used in the `dynamic_rnn_decoder` for inference. - - The main difference between this decoder function and the `decoder_fn` in - `attention_decoder_fn_train` is how `next_cell_input` is calculated. In - decoder function we calculate the next input by applying an argmax across - the feature dimension of the output from the decoder. This is a - greedy-search approach. (Bahdanau et al., 2014) & (Sutskever et al., 2014) - use beam-search instead. - - Args: - time: positive integer constant reflecting the current timestep. - cell_state: state of RNNCell. - cell_input: input provided by `dynamic_rnn_decoder`. - cell_output: output of RNNCell. - context_state: context state provided by `dynamic_rnn_decoder`. - - Returns: - A tuple (done, next state, next input, emit output, next context state) - where: - - done: A boolean vector to indicate which sentences has reached a - `end_of_sequence_id`. This is used for early stopping by the - `dynamic_rnn_decoder`. When `time>=maximum_length` a boolean vector with - all elements as `true` is returned. - - next state: `cell_state`, this decoder function does not modify the - given state. - - next input: The embedding from argmax of the `cell_output` is used as - `next_input`. - - emit output: If `output_fn is None` the supplied `cell_output` is - returned, else the `output_fn` is used to update the `cell_output` - before calculating `next_input` and returning `cell_output`. - - next context state: `context_state`, this decoder function does not - modify the given context state. The context state could be modified when - applying e.g. beam search. - - Raises: - ValueError: if cell_input is not None. - - """ - with tf.name_scope( - name, "attention_decoder_fn_inference", - [time, cell_state, cell_input, cell_output, context_state]): - if cell_input is not None: - raise ValueError( - "Expected cell_input to be None, but saw: %s" % cell_input) - if cell_output is None: - # invariant that this is time == 0 - next_input_id = tf.ones( - [ - batch_size, - ], dtype=dtype) * ( - start_of_sequence_id) - done = tf.zeros( - [ - batch_size, - ], dtype=tf.bool) - cell_state = encoder_state - cell_output = tf.zeros([num_decoder_symbols], dtype=tf.float32) - cell_input = tf.gather(embeddings, next_input_id) - - # init attention - attention = _init_attention(encoder_state) - else: - # construct attention - attention = attention_construct_fn(cell_output, attention_keys, - attention_values) - cell_output = attention - - # argmax decoder - cell_output = output_fn(cell_output) # logits - next_input_id = tf.cast(tf.argmax(cell_output, 1), dtype=dtype) - done = tf.equal(next_input_id, end_of_sequence_id) - cell_input = tf.gather(embeddings, next_input_id) - - # combine cell_input and attention - next_input = tf.concat([cell_input, attention], 1) - - # if time > maxlen, return all true vector - done = tf.cond( - tf.greater(time, maximum_length), - lambda: tf.ones([ - batch_size,], dtype=tf.bool), lambda: done) - return (done, cell_state, next_input, cell_output, context_state) - - return decoder_fn - - -## Helper functions ## -def prepare_attention(attention_states, attention_option, num_units, - reuse=None): - """Prepare keys/values/functions for attention. - - Args: - attention_states: hidden states to attend over. - attention_option: how to compute attention, either "luong" or "bahdanau". - num_units: hidden state dimension. - reuse: whether to reuse variable scope. - - Returns: - attention_keys: to be compared with target states. - attention_values: to be used to construct context vectors. - attention_score_fn: to compute similarity between key and target states. - attention_construct_fn: to build attention states. - """ - # Prepare attention keys / values from attention_states - with tf.variable_scope("attention_keys", reuse=reuse) as scope: - attention_keys = tf.contrib.layers.linear( - attention_states, num_units, biases_initializer=None, scope=scope) - attention_values = attention_states - - # Attention score function - attention_score_fn = _create_attention_score_fn("attention_score", num_units, - attention_option, reuse) - # Attention construction function - attention_construct_fn = _create_attention_construct_fn( - "attention_construct", num_units, attention_score_fn, reuse) - - return (attention_keys, attention_values, attention_score_fn, - attention_construct_fn) - - -def _init_attention(encoder_state): - """Initialize attention. Handling both LSTM and GRU. - - Args: - encoder_state: The encoded state to initialize the `dynamic_rnn_decoder`. - - Returns: - attn: initial zero attention vector. - """ - - # Multi- vs single-layer - # TODO(thangluong): is this the best way to check? - if isinstance(encoder_state, tuple): - top_state = encoder_state[-1] - else: - top_state = encoder_state - - # LSTM vs GRU - if isinstance(top_state, tf.contrib.rnn.LSTMStateTuple): - attn = tf.zeros_like(top_state.h) - else: - attn = tf.zeros_like(top_state) - - return attn - - -def _create_attention_construct_fn(name, num_units, attention_score_fn, reuse): - """Function to compute attention vectors. - - Args: - name: to label variables. - num_units: hidden state dimension. - attention_score_fn: to compute similarity between key and target states. - reuse: whether to reuse variable scope. - - Returns: - attention_construct_fn: to build attention states. - """ - - def construct_fn(attention_query, attention_keys, attention_values): - with tf.variable_scope(name, reuse=reuse) as scope: - context = attention_score_fn(attention_query, attention_keys, - attention_values) - concat_input = tf.concat([attention_query, context], 1) - attention = tf.contrib.layers.linear( - concat_input, num_units, biases_initializer=None, scope=scope) - return attention - - return construct_fn - - -# keys: [batch_size, attention_length, attn_size] -# query: [batch_size, 1, attn_size] -# return weights [batch_size, attention_length] -@function.Defun(func_name="attn_add_fun", noinline=True) -def _attn_add_fun(v, keys, query): - return tf.reduce_sum(v * tf.tanh(keys + query), [2]) - - -@function.Defun(func_name="attn_mul_fun", noinline=True) -def _attn_mul_fun(keys, query): - return tf.reduce_sum(keys * query, [2]) - - -def _create_attention_score_fn(name, - num_units, - attention_option, - reuse, - dtype=tf.float32): - """Different ways to compute attention scores. - - Args: - name: to label variables. - num_units: hidden state dimension. - attention_option: how to compute attention, either "luong" or "bahdanau". - "bahdanau": additive (Bahdanau et al., ICLR'2015) - "luong": multiplicative (Luong et al., EMNLP'2015) - reuse: whether to reuse variable scope. - dtype: (default: `tf.float32`) data type to use. - - Returns: - attention_score_fn: to compute similarity between key and target states. - """ - with tf.variable_scope(name, reuse=reuse): - if attention_option == "bahdanau": - query_w = tf.get_variable("attnW", [num_units, num_units], dtype=dtype) - score_v = tf.get_variable("attnV", [num_units], dtype=dtype) - - def attention_score_fn(query, keys, values): - """Put attention masks on attention_values using attention_keys and query. - - Args: - query: A Tensor of shape [batch_size, num_units]. - keys: A Tensor of shape [batch_size, attention_length, num_units]. - values: A Tensor of shape [batch_size, attention_length, num_units]. - - Returns: - context_vector: A Tensor of shape [batch_size, num_units]. - - Raises: - ValueError: if attention_option is neither "luong" or "bahdanau". - - - """ - if attention_option == "bahdanau": - # transform query - query = tf.matmul(query, query_w) - - # reshape query: [batch_size, 1, num_units] - query = tf.reshape(query, [-1, 1, num_units]) - - # attn_fun - scores = _attn_add_fun(score_v, keys, query) - elif attention_option == "luong": - # reshape query: [batch_size, 1, num_units] - query = tf.reshape(query, [-1, 1, num_units]) - - # attn_fun - scores = _attn_mul_fun(keys, query) - else: - raise ValueError("Unknown attention option %s!" % attention_option) - - # Compute alignment weights - # scores: [batch_size, length] - # alignments: [batch_size, length] - # TODO(thangluong): not normalize over padding positions. - alignments = tf.nn.softmax(scores) - - # Now calculate the attention-weighted vector. - alignments = tf.expand_dims(alignments, 2) - context_vector = tf.reduce_sum(alignments * values, [1]) - context_vector.set_shape([None, num_units]) - - return context_vector - - return attention_score_fn diff --git a/research/maskgan/models/bidirectional.py b/research/maskgan/models/bidirectional.py deleted file mode 100644 index 1e6b3fe45..000000000 --- a/research/maskgan/models/bidirectional.py +++ /dev/null @@ -1,75 +0,0 @@ -# Copyright 2017 The TensorFlow Authors All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -"""Simple bidirectional model definitions.""" - -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import tensorflow as tf - -# ZoneoutWrapper. -from regularization import zoneout - -FLAGS = tf.app.flags.FLAGS - - -def discriminator(hparams, sequence, is_training, reuse=None): - """Define the bidirectional Discriminator graph.""" - sequence = tf.cast(sequence, tf.int32) - - if FLAGS.dis_share_embedding: - assert hparams.dis_rnn_size == hparams.gen_rnn_size, ( - 'If you wish to share Discriminator/Generator embeddings, they must be' - ' same dimension.') - with tf.variable_scope('gen/rnn', reuse=True): - embedding = tf.get_variable('embedding', - [FLAGS.vocab_size, hparams.gen_rnn_size]) - - with tf.variable_scope('dis', reuse=reuse): - cell_fwd = tf.contrib.rnn.LayerNormBasicLSTMCell( - hparams.dis_rnn_size, forget_bias=1.0, reuse=reuse) - cell_bwd = tf.contrib.rnn.LayerNormBasicLSTMCell( - hparams.dis_rnn_size, forget_bias=1.0, reuse=reuse) - if FLAGS.zoneout_drop_prob > 0.0: - cell_fwd = zoneout.ZoneoutWrapper( - cell_fwd, - zoneout_drop_prob=FLAGS.zoneout_drop_prob, - is_training=is_training) - cell_bwd = zoneout.ZoneoutWrapper( - cell_bwd, - zoneout_drop_prob=FLAGS.zoneout_drop_prob, - is_training=is_training) - - state_fwd = cell_fwd.zero_state(FLAGS.batch_size, tf.float32) - state_bwd = cell_bwd.zero_state(FLAGS.batch_size, tf.float32) - - if not FLAGS.dis_share_embedding: - embedding = tf.get_variable('embedding', - [FLAGS.vocab_size, hparams.dis_rnn_size]) - - rnn_inputs = tf.nn.embedding_lookup(embedding, sequence) - rnn_inputs = tf.unstack(rnn_inputs, axis=1) - - with tf.variable_scope('rnn') as vs: - outputs, _, _ = tf.contrib.rnn.static_bidirectional_rnn( - cell_fwd, cell_bwd, rnn_inputs, state_fwd, state_bwd, scope=vs) - - # Prediction is linear output for Discriminator. - predictions = tf.contrib.layers.linear(outputs, 1, scope=vs) - - predictions = tf.transpose(predictions, [1, 0, 2]) - return tf.squeeze(predictions, axis=2) diff --git a/research/maskgan/models/bidirectional_vd.py b/research/maskgan/models/bidirectional_vd.py deleted file mode 100644 index 469af9da5..000000000 --- a/research/maskgan/models/bidirectional_vd.py +++ /dev/null @@ -1,116 +0,0 @@ -# Copyright 2017 The TensorFlow Authors All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -"""Simple bidirectional model definitions.""" - -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import tensorflow as tf -from regularization import variational_dropout - -FLAGS = tf.app.flags.FLAGS - - -def discriminator(hparams, - sequence, - is_training, - reuse=None, - initial_state=None): - """Define the Discriminator graph.""" - sequence = tf.cast(sequence, tf.int32) - - if FLAGS.dis_share_embedding: - assert hparams.dis_rnn_size == hparams.gen_rnn_size, ( - 'If you wish to share Discriminator/Generator embeddings, they must be' - ' same dimension.') - with tf.variable_scope('gen/decoder/rnn', reuse=True): - embedding = tf.get_variable('embedding', - [FLAGS.vocab_size, hparams.gen_rnn_size]) - - with tf.variable_scope('dis', reuse=reuse): - - def lstm_cell(): - return tf.contrib.rnn.BasicLSTMCell( - hparams.dis_rnn_size, - forget_bias=0.0, - state_is_tuple=True, - reuse=reuse) - - attn_cell = lstm_cell - if is_training and hparams.dis_vd_keep_prob < 1: - - def attn_cell(): - return variational_dropout.VariationalDropoutWrapper( - lstm_cell(), FLAGS.batch_size, hparams.dis_rnn_size, - hparams.dis_vd_keep_prob, hparams.dis_vd_keep_prob) - - cell_fwd = tf.contrib.rnn.MultiRNNCell( - [attn_cell() for _ in range(hparams.dis_num_layers)], - state_is_tuple=True) - - cell_bwd = tf.contrib.rnn.MultiRNNCell( - [attn_cell() for _ in range(hparams.dis_num_layers)], - state_is_tuple=True) - - # print initial_state - # print cell_fwd.zero_state(FLAGS.batch_size, tf.float32) - if initial_state: - state_fwd = [[tf.identity(x) for x in inner_initial_state] - for inner_initial_state in initial_state] - state_bwd = cell_bwd.zero_state(FLAGS.batch_size, tf.float32) - else: - state_fwd = cell_fwd.zero_state(FLAGS.batch_size, tf.float32) - state_bwd = cell_bwd.zero_state(FLAGS.batch_size, tf.float32) - - def make_mask(keep_prob, units): - random_tensor = keep_prob - # 0. if [keep_prob, 1.0) and 1. if [1.0, 1.0 + keep_prob) - random_tensor += tf.random_uniform(tf.stack([FLAGS.batch_size, units])) - return tf.floor(random_tensor) / keep_prob - - if is_training: - output_mask = make_mask(hparams.dis_vd_keep_prob, - 2 * hparams.dis_rnn_size) - - if not FLAGS.dis_share_embedding: - embedding = tf.get_variable('embedding', - [FLAGS.vocab_size, hparams.dis_rnn_size]) - - rnn_inputs = tf.nn.embedding_lookup(embedding, sequence) - - rnn_inputs = tf.unstack(rnn_inputs, axis=1) - - with tf.variable_scope('rnn') as vs: - outputs, _, _ = tf.contrib.rnn.static_bidirectional_rnn( - cell_fwd, cell_bwd, rnn_inputs, state_fwd, state_bwd, scope=vs) - - if is_training: - outputs *= output_mask - - # Prediction is linear output for Discriminator. - predictions = tf.contrib.layers.linear(outputs, 1, scope=vs) - predictions = tf.transpose(predictions, [1, 0, 2]) - - if FLAGS.baseline_method == 'critic': - with tf.variable_scope('critic', reuse=reuse) as critic_scope: - values = tf.contrib.layers.linear(outputs, 1, scope=critic_scope) - values = tf.transpose(values, [1, 0, 2]) - - return tf.squeeze(predictions, axis=2), tf.squeeze(values, axis=2) - - else: - return tf.squeeze(predictions, axis=2), None diff --git a/research/maskgan/models/bidirectional_zaremba.py b/research/maskgan/models/bidirectional_zaremba.py deleted file mode 100644 index b0683d7cc..000000000 --- a/research/maskgan/models/bidirectional_zaremba.py +++ /dev/null @@ -1,83 +0,0 @@ -# Copyright 2017 The TensorFlow Authors All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -"""Simple bidirectional model definitions.""" - -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import tensorflow as tf - -FLAGS = tf.app.flags.FLAGS - - -def discriminator(hparams, sequence, is_training, reuse=None): - """Define the bidirectional Discriminator graph.""" - sequence = tf.cast(sequence, tf.int32) - - if FLAGS.dis_share_embedding: - assert hparams.dis_rnn_size == hparams.gen_rnn_size, ( - 'If you wish to share Discriminator/Generator embeddings, they must be' - ' same dimension.') - with tf.variable_scope('gen/rnn', reuse=True): - embedding = tf.get_variable('embedding', - [FLAGS.vocab_size, hparams.gen_rnn_size]) - - with tf.variable_scope('dis', reuse=reuse): - - def lstm_cell(): - return tf.contrib.rnn.BasicLSTMCell( - hparams.dis_rnn_size, - forget_bias=0.0, - state_is_tuple=True, - reuse=reuse) - - attn_cell = lstm_cell - if is_training and FLAGS.keep_prob < 1: - - def attn_cell(): - return tf.contrib.rnn.DropoutWrapper( - lstm_cell(), output_keep_prob=FLAGS.keep_prob) - - cell_fwd = tf.contrib.rnn.MultiRNNCell( - [attn_cell() for _ in range(hparams.dis_num_layers)], - state_is_tuple=True) - - cell_bwd = tf.contrib.rnn.MultiRNNCell( - [attn_cell() for _ in range(hparams.dis_num_layers)], - state_is_tuple=True) - - state_fwd = cell_fwd.zero_state(FLAGS.batch_size, tf.float32) - state_bwd = cell_bwd.zero_state(FLAGS.batch_size, tf.float32) - - if not FLAGS.dis_share_embedding: - embedding = tf.get_variable('embedding', - [FLAGS.vocab_size, hparams.dis_rnn_size]) - - rnn_inputs = tf.nn.embedding_lookup(embedding, sequence) - if is_training and FLAGS.keep_prob < 1: - rnn_inputs = tf.nn.dropout(rnn_inputs, FLAGS.keep_prob) - rnn_inputs = tf.unstack(rnn_inputs, axis=1) - - with tf.variable_scope('rnn') as vs: - outputs, _, _ = tf.contrib.rnn.static_bidirectional_rnn( - cell_fwd, cell_bwd, rnn_inputs, state_fwd, state_bwd, scope=vs) - - # Prediction is linear output for Discriminator. - predictions = tf.contrib.layers.linear(outputs, 1, scope=vs) - - predictions = tf.transpose(predictions, [1, 0, 2]) - return tf.squeeze(predictions, axis=2) diff --git a/research/maskgan/models/cnn.py b/research/maskgan/models/cnn.py deleted file mode 100644 index ca682debf..000000000 --- a/research/maskgan/models/cnn.py +++ /dev/null @@ -1,93 +0,0 @@ -# Copyright 2017 The TensorFlow Authors All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -"""Simple CNN model definitions.""" - -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import tensorflow as tf - -FLAGS = tf.app.flags.FLAGS - - -def discriminator(hparams, sequence, is_training, reuse=None): - """Define the Discriminator graph.""" - del is_training - sequence = tf.cast(sequence, tf.int32) - - if FLAGS.dis_share_embedding: - assert hparams.dis_rnn_size == hparams.gen_rnn_size, ( - "If you wish to share Discriminator/Generator embeddings, they must be" - " same dimension.") - with tf.variable_scope("gen/rnn", reuse=True): - embedding = tf.get_variable("embedding", - [FLAGS.vocab_size, hparams.gen_rnn_size]) - - dis_filter_sizes = [3, 4, 5, 6, 7, 8, 9, 10, 15, 20] - - with tf.variable_scope("dis", reuse=reuse): - if not FLAGS.dis_share_embedding: - embedding = tf.get_variable("embedding", - [FLAGS.vocab_size, hparams.dis_rnn_size]) - cnn_inputs = tf.nn.embedding_lookup(embedding, sequence) - - # Create a convolution layer for each filter size - conv_outputs = [] - for filter_size in dis_filter_sizes: - with tf.variable_scope("conv-%s" % filter_size): - # Convolution Layer - filter_shape = [ - filter_size, hparams.dis_rnn_size, hparams.dis_num_filters - ] - W = tf.get_variable( - name="W", initializer=tf.truncated_normal(filter_shape, stddev=0.1)) - b = tf.get_variable( - name="b", - initializer=tf.constant(0.1, shape=[hparams.dis_num_filters])) - conv = tf.nn.conv1d( - cnn_inputs, W, stride=1, padding="SAME", name="conv") - - # Apply nonlinearity - h = tf.nn.relu(tf.nn.bias_add(conv, b), name="relu") - - conv_outputs.append(h) - - # Combine all the pooled features - dis_num_filters_total = hparams.dis_num_filters * len(dis_filter_sizes) - - h_conv = tf.concat(conv_outputs, axis=2) - h_conv_flat = tf.reshape(h_conv, [-1, dis_num_filters_total]) - - # Add dropout - with tf.variable_scope("dropout"): - h_drop = tf.nn.dropout(h_conv_flat, FLAGS.keep_prob) - - with tf.variable_scope("fully_connected"): - fc = tf.contrib.layers.fully_connected( - h_drop, num_outputs=dis_num_filters_total / 2) - - # Final (unnormalized) scores and predictions - with tf.variable_scope("output"): - W = tf.get_variable( - "W", - shape=[dis_num_filters_total / 2, 1], - initializer=tf.contrib.layers.xavier_initializer()) - b = tf.get_variable(name="b", initializer=tf.constant(0.1, shape=[1])) - predictions = tf.nn.xw_plus_b(fc, W, b, name="predictions") - predictions = tf.reshape( - predictions, shape=[FLAGS.batch_size, FLAGS.sequence_length]) - return predictions diff --git a/research/maskgan/models/critic_vd.py b/research/maskgan/models/critic_vd.py deleted file mode 100644 index ede8b7bb7..000000000 --- a/research/maskgan/models/critic_vd.py +++ /dev/null @@ -1,108 +0,0 @@ -# Copyright 2017 The TensorFlow Authors All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -"""Critic model definitions.""" - -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -from six.moves import xrange -import tensorflow as tf -from regularization import variational_dropout - -FLAGS = tf.app.flags.FLAGS - - -def critic_seq2seq_vd_derivative(hparams, sequence, is_training, reuse=None): - """Define the Critic graph which is derived from the seq2seq_vd - Discriminator. This will be initialized with the same parameters as the - language model and will share the forward RNN components with the - Discriminator. This estimates the V(s_t), where the state - s_t = x_0,...,x_t-1. - """ - assert FLAGS.discriminator_model == 'seq2seq_vd' - sequence = tf.cast(sequence, tf.int32) - - if FLAGS.dis_share_embedding: - assert hparams.dis_rnn_size == hparams.gen_rnn_size, ( - 'If you wish to share Discriminator/Generator embeddings, they must be' - ' same dimension.') - with tf.variable_scope('gen/decoder/rnn', reuse=True): - embedding = tf.get_variable('embedding', - [FLAGS.vocab_size, hparams.gen_rnn_size]) - else: - with tf.variable_scope('dis/decoder/rnn', reuse=True): - embedding = tf.get_variable('embedding', - [FLAGS.vocab_size, hparams.dis_rnn_size]) - - with tf.variable_scope( - 'dis/decoder/rnn/multi_rnn_cell', reuse=True) as dis_scope: - - def lstm_cell(): - return tf.contrib.rnn.BasicLSTMCell( - hparams.dis_rnn_size, - forget_bias=0.0, - state_is_tuple=True, - reuse=True) - - attn_cell = lstm_cell - if is_training and hparams.dis_vd_keep_prob < 1: - - def attn_cell(): - return variational_dropout.VariationalDropoutWrapper( - lstm_cell(), FLAGS.batch_size, hparams.dis_rnn_size, - hparams.dis_vd_keep_prob, hparams.dis_vd_keep_prob) - - cell_critic = tf.contrib.rnn.MultiRNNCell( - [attn_cell() for _ in range(hparams.dis_num_layers)], - state_is_tuple=True) - - with tf.variable_scope('critic', reuse=reuse): - state_dis = cell_critic.zero_state(FLAGS.batch_size, tf.float32) - - def make_mask(keep_prob, units): - random_tensor = keep_prob - # 0. if [keep_prob, 1.0) and 1. if [1.0, 1.0 + keep_prob) - random_tensor += tf.random_uniform(tf.stack([FLAGS.batch_size, units])) - return tf.floor(random_tensor) / keep_prob - - if is_training: - output_mask = make_mask(hparams.dis_vd_keep_prob, hparams.dis_rnn_size) - - with tf.variable_scope('rnn') as vs: - values = [] - - rnn_inputs = tf.nn.embedding_lookup(embedding, sequence) - - for t in xrange(FLAGS.sequence_length): - if t > 0: - tf.get_variable_scope().reuse_variables() - - if t == 0: - rnn_in = tf.zeros_like(rnn_inputs[:, 0]) - else: - rnn_in = rnn_inputs[:, t - 1] - rnn_out, state_dis = cell_critic(rnn_in, state_dis, scope=dis_scope) - - if is_training: - rnn_out *= output_mask - - # Prediction is linear output for Discriminator. - value = tf.contrib.layers.linear(rnn_out, 1, scope=vs) - - values.append(value) - values = tf.stack(values, axis=1) - return tf.squeeze(values, axis=2) diff --git a/research/maskgan/models/evaluation_utils.py b/research/maskgan/models/evaluation_utils.py deleted file mode 100644 index fc2a3a16f..000000000 --- a/research/maskgan/models/evaluation_utils.py +++ /dev/null @@ -1,280 +0,0 @@ -# Copyright 2017 The TensorFlow Authors All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -"""Evaluation utilities.""" - -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -from collections import Counter -# Dependency imports -import numpy as np -from scipy.special import expit - -import tensorflow as tf - -from model_utils import helper -from model_utils import n_gram - -FLAGS = tf.app.flags.FLAGS - - -def print_and_log_losses(log, step, is_present_rate, avg_dis_loss, - avg_gen_loss): - """Prints and logs losses to the log file. - - Args: - log: GFile for logs. - step: Global step. - is_present_rate: Current masking rate. - avg_dis_loss: List of Discriminator losses. - avg_gen_loss: List of Generator losses. - """ - print('global_step: %d' % step) - print(' is_present_rate: %.3f' % is_present_rate) - print(' D train loss: %.5f' % np.mean(avg_dis_loss)) - print(' G train loss: %.5f' % np.mean(avg_gen_loss)) - log.write('\nglobal_step: %d\n' % step) - log.write((' is_present_rate: %.3f\n' % is_present_rate)) - log.write(' D train loss: %.5f\n' % np.mean(avg_dis_loss)) - log.write(' G train loss: %.5f\n' % np.mean(avg_gen_loss)) - - -def print_and_log(log, id_to_word, sequence_eval, max_num_to_print=5): - """Helper function for printing and logging evaluated sequences.""" - indices_arr = np.asarray(sequence_eval) - samples = helper.convert_to_human_readable(id_to_word, indices_arr, - max_num_to_print) - - for i, sample in enumerate(samples): - print('Sample', i, '. ', sample) - log.write('\nSample ' + str(i) + '. ' + sample) - log.write('\n') - print('\n') - log.flush() - return samples - - -def zip_seq_pred_crossent(id_to_word, sequences, predictions, cross_entropy): - """Zip together the sequences, predictions, cross entropy.""" - indices = np.asarray(sequences) - - batch_of_metrics = [] - - for ind_batch, pred_batch, crossent_batch in zip(indices, predictions, - cross_entropy): - metrics = [] - - for index, pred, crossent in zip(ind_batch, pred_batch, crossent_batch): - metrics.append([str(id_to_word[index]), pred, crossent]) - - batch_of_metrics.append(metrics) - return batch_of_metrics - - -def zip_metrics(indices, *args): - """Zip together the indices matrices with the provided metrics matrices.""" - batch_of_metrics = [] - for metrics_batch in zip(indices, *args): - - metrics = [] - for m in zip(*metrics_batch): - metrics.append(m) - batch_of_metrics.append(metrics) - return batch_of_metrics - - -def print_formatted(present, id_to_word, log, batch_of_tuples): - """Print and log metrics.""" - num_cols = len(batch_of_tuples[0][0]) - repeat_float_format = '{:<12.3f} ' - repeat_str_format = '{:<13}' - - format_str = ''.join( - ['[{:<1}] {:<20}', - str(repeat_float_format * (num_cols - 1))]) - - # TODO(liamfedus): Generalize the logging. This is sloppy. - header_format_str = ''.join( - ['[{:<1}] {:<20}', - str(repeat_str_format * (num_cols - 1))]) - header_str = header_format_str.format('p', 'Word', 'p(real)', 'log-perp', - 'log(p(a))', 'r', 'R=V*(s)', 'b=V(s)', - 'A(a,s)') - - for i, batch in enumerate(batch_of_tuples): - print(' Sample: %d' % i) - log.write(' Sample %d.\n' % i) - print(' ', header_str) - log.write(' ' + str(header_str) + '\n') - - for j, t in enumerate(batch): - t = list(t) - t[0] = id_to_word[t[0]] - buffer_str = format_str.format(int(present[i][j]), *t) - print(' ', buffer_str) - log.write(' ' + str(buffer_str) + '\n') - log.flush() - - -def generate_RL_logs(sess, model, log, id_to_word, feed): - """Generate complete logs while running with REINFORCE.""" - # Impute Sequences. - [ - p, - fake_sequence_eval, - fake_predictions_eval, - _, - fake_cross_entropy_losses_eval, - _, - fake_log_probs_eval, - fake_rewards_eval, - fake_baselines_eval, - cumulative_rewards_eval, - fake_advantages_eval, - ] = sess.run( - [ - model.present, - model.fake_sequence, - model.fake_predictions, - model.real_predictions, - model.fake_cross_entropy_losses, - model.fake_logits, - model.fake_log_probs, - model.fake_rewards, - model.fake_baselines, - model.cumulative_rewards, - model.fake_advantages, - ], - feed_dict=feed) - - indices = np.asarray(fake_sequence_eval) - - # Convert Discriminator linear layer to probability. - fake_prob_eval = expit(fake_predictions_eval) - - # Add metrics. - fake_tuples = zip_metrics(indices, fake_prob_eval, - fake_cross_entropy_losses_eval, fake_log_probs_eval, - fake_rewards_eval, cumulative_rewards_eval, - fake_baselines_eval, fake_advantages_eval) - - # real_tuples = zip_metrics(indices, ) - - # Print forward sequences. - tuples_to_print = fake_tuples[:FLAGS.max_num_to_print] - print_formatted(p, id_to_word, log, tuples_to_print) - - print('Samples') - log.write('Samples\n') - samples = print_and_log(log, id_to_word, fake_sequence_eval, - FLAGS.max_num_to_print) - return samples - - -def generate_logs(sess, model, log, id_to_word, feed): - """Impute Sequences using the model for a particular feed and send it to - logs.""" - # Impute Sequences. - [ - p, sequence_eval, fake_predictions_eval, fake_cross_entropy_losses_eval, - fake_logits_eval - ] = sess.run( - [ - model.present, model.fake_sequence, model.fake_predictions, - model.fake_cross_entropy_losses, model.fake_logits - ], - feed_dict=feed) - - # Convert Discriminator linear layer to probability. - fake_prob_eval = expit(fake_predictions_eval) - - # Forward Masked Tuples. - fake_tuples = zip_seq_pred_crossent(id_to_word, sequence_eval, fake_prob_eval, - fake_cross_entropy_losses_eval) - - tuples_to_print = fake_tuples[:FLAGS.max_num_to_print] - - if FLAGS.print_verbose: - print('fake_logits_eval') - print(fake_logits_eval) - - for i, batch in enumerate(tuples_to_print): - print(' Sample %d.' % i) - log.write(' Sample %d.\n' % i) - for j, pred in enumerate(batch): - buffer_str = ('[{:<1}] {:<20} {:<7.3f} {:<7.3f}').format( - int(p[i][j]), pred[0], pred[1], pred[2]) - print(' ', buffer_str) - log.write(' ' + str(buffer_str) + '\n') - log.flush() - - print('Samples') - log.write('Samples\n') - samples = print_and_log(log, id_to_word, sequence_eval, - FLAGS.max_num_to_print) - return samples - - -def create_merged_ngram_dictionaries(indices, n): - """Generate a single dictionary for the full batch. - - Args: - indices: List of lists of indices. - n: Degree of n-grams. - - Returns: - Dictionary of hashed(n-gram tuples) to counts in the batch of indices. - """ - ngram_dicts = [] - - for ind in indices: - ngrams = n_gram.find_all_ngrams(ind, n=n) - ngram_counts = n_gram.construct_ngrams_dict(ngrams) - ngram_dicts.append(ngram_counts) - - merged_gen_dict = Counter() - for ngram_dict in ngram_dicts: - merged_gen_dict += Counter(ngram_dict) - return merged_gen_dict - - -def sequence_ngram_evaluation(sess, sequence, log, feed, data_ngram_count, n): - """Calculates the percent of ngrams produced in the sequence is present in - data_ngram_count. - - Args: - sess: tf.Session. - sequence: Sequence Tensor from the MaskGAN model. - log: gFile log. - feed: Feed to evaluate. - data_ngram_count: Dictionary of hashed(n-gram tuples) to counts in the - data_set. - - Returns: - avg_percent_captured: Percent of produced ngrams that appear in the - data_ngram_count. - """ - del log - # Impute sequence. - [sequence_eval] = sess.run([sequence], feed_dict=feed) - indices = sequence_eval - - # Retrieve the counts across the batch of indices. - gen_ngram_counts = create_merged_ngram_dictionaries( - indices, n=n) - return n_gram.percent_unique_ngrams_in_train(data_ngram_count, - gen_ngram_counts) diff --git a/research/maskgan/models/feedforward.py b/research/maskgan/models/feedforward.py deleted file mode 100644 index d48a517d6..000000000 --- a/research/maskgan/models/feedforward.py +++ /dev/null @@ -1,98 +0,0 @@ -# Copyright 2017 The TensorFlow Authors All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -"""Simple FNN model definitions.""" - -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -from six.moves import xrange -import tensorflow as tf - -FLAGS = tf.app.flags.FLAGS - - -def discriminator(hparams, sequence, is_training, reuse=None): - """Define the Discriminator graph.""" - del is_training - sequence = tf.cast(sequence, tf.int32) - - if FLAGS.dis_share_embedding: - assert hparams.dis_rnn_size == hparams.gen_rnn_size, ( - "If you wish to share Discriminator/Generator embeddings, they must be" - " same dimension.") - with tf.variable_scope("gen/rnn", reuse=True): - embedding = tf.get_variable("embedding", - [FLAGS.vocab_size, hparams.gen_rnn_size]) - - with tf.variable_scope("dis", reuse=reuse): - if not FLAGS.dis_share_embedding: - embedding = tf.get_variable("embedding", - [FLAGS.vocab_size, hparams.dis_rnn_size]) - - embeddings = tf.nn.embedding_lookup(embedding, sequence) - - # Input matrices. - W = tf.get_variable( - "W", - initializer=tf.truncated_normal( - shape=[3 * hparams.dis_embedding_dim, hparams.dis_hidden_dim], - stddev=0.1)) - b = tf.get_variable( - "b", initializer=tf.constant(0.1, shape=[hparams.dis_hidden_dim])) - - # Output matrices. - W_out = tf.get_variable( - "W_out", - initializer=tf.truncated_normal( - shape=[hparams.dis_hidden_dim, 1], stddev=0.1)) - b_out = tf.get_variable("b_out", initializer=tf.constant(0.1, shape=[1])) - - predictions = [] - for t in xrange(FLAGS.sequence_length): - if t > 0: - tf.get_variable_scope().reuse_variables() - - inp = embeddings[:, t] - - if t > 0: - past_inp = tf.unstack(embeddings[:, 0:t], axis=1) - avg_past_inp = tf.add_n(past_inp) / len(past_inp) - else: - avg_past_inp = tf.zeros_like(inp) - - if t < FLAGS.sequence_length: - future_inp = tf.unstack(embeddings[:, t:], axis=1) - avg_future_inp = tf.add_n(future_inp) / len(future_inp) - else: - avg_future_inp = tf.zeros_like(inp) - - # Cumulative input. - concat_inp = tf.concat([avg_past_inp, inp, avg_future_inp], axis=1) - - # Hidden activations. - hidden = tf.nn.relu(tf.nn.xw_plus_b(concat_inp, W, b, name="scores")) - - # Add dropout - with tf.variable_scope("dropout"): - hidden = tf.nn.dropout(hidden, FLAGS.keep_prob) - - # Output. - output = tf.nn.xw_plus_b(hidden, W_out, b_out, name="output") - - predictions.append(output) - predictions = tf.stack(predictions, axis=1) - return tf.squeeze(predictions, axis=2) diff --git a/research/maskgan/models/rnn.py b/research/maskgan/models/rnn.py deleted file mode 100644 index 40b3a7aa3..000000000 --- a/research/maskgan/models/rnn.py +++ /dev/null @@ -1,211 +0,0 @@ -# Copyright 2017 The TensorFlow Authors All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -"""Simple RNN model definitions.""" - -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -from six.moves import xrange -import tensorflow as tf - -# ZoneoutWrapper. -from regularization import zoneout - -FLAGS = tf.app.flags.FLAGS - - -def generator(hparams, - inputs, - targets, - targets_present, - is_training, - is_validating, - reuse=None): - """Define the Generator graph. - - G will now impute tokens that have been masked from the input seqeunce. - """ - tf.logging.warning( - 'Undirectional generative model is not a useful model for this MaskGAN ' - 'because future context is needed. Use only for debugging purposes.') - init_scale = 0.05 - initializer = tf.random_uniform_initializer(-init_scale, init_scale) - - with tf.variable_scope('gen', reuse=reuse, initializer=initializer): - - def lstm_cell(): - return tf.contrib.rnn.LayerNormBasicLSTMCell( - hparams.gen_rnn_size, reuse=reuse) - - attn_cell = lstm_cell - if FLAGS.zoneout_drop_prob > 0.0: - - def attn_cell(): - return zoneout.ZoneoutWrapper( - lstm_cell(), - zoneout_drop_prob=FLAGS.zoneout_drop_prob, - is_training=is_training) - - cell_gen = tf.contrib.rnn.MultiRNNCell( - [attn_cell() for _ in range(hparams.gen_num_layers)], - state_is_tuple=True) - - initial_state = cell_gen.zero_state(FLAGS.batch_size, tf.float32) - - with tf.variable_scope('rnn'): - sequence, logits, log_probs = [], [], [] - embedding = tf.get_variable('embedding', - [FLAGS.vocab_size, hparams.gen_rnn_size]) - softmax_w = tf.get_variable('softmax_w', - [hparams.gen_rnn_size, FLAGS.vocab_size]) - softmax_b = tf.get_variable('softmax_b', [FLAGS.vocab_size]) - - rnn_inputs = tf.nn.embedding_lookup(embedding, inputs) - - for t in xrange(FLAGS.sequence_length): - if t > 0: - tf.get_variable_scope().reuse_variables() - - # Input to the model is the first token to provide context. The - # model will then predict token t > 0. - if t == 0: - # Always provide the real input at t = 0. - state_gen = initial_state - rnn_inp = rnn_inputs[:, t] - - # If the target at the last time-step was present, read in the real. - # If the target at the last time-step was not present, read in the fake. - else: - real_rnn_inp = rnn_inputs[:, t] - fake_rnn_inp = tf.nn.embedding_lookup(embedding, fake) - - # Use teacher forcing. - if (is_training and - FLAGS.gen_training_strategy == 'cross_entropy') or is_validating: - rnn_inp = real_rnn_inp - else: - # Note that targets_t-1 == inputs_(t) - rnn_inp = tf.where(targets_present[:, t - 1], real_rnn_inp, - fake_rnn_inp) - - # RNN. - rnn_out, state_gen = cell_gen(rnn_inp, state_gen) - logit = tf.matmul(rnn_out, softmax_w) + softmax_b - - # Real sample. - real = targets[:, t] - - # Fake sample. - categorical = tf.contrib.distributions.Categorical(logits=logit) - fake = categorical.sample() - log_prob = categorical.log_prob(fake) - - # Output for Generator will either be generated or the target. - # If present: Return real. - # If not present: Return fake. - output = tf.where(targets_present[:, t], real, fake) - - # Append to lists. - sequence.append(output) - logits.append(logit) - log_probs.append(log_prob) - - # Produce the RNN state had the model operated only - # over real data. - real_state_gen = initial_state - for t in xrange(FLAGS.sequence_length): - tf.get_variable_scope().reuse_variables() - - rnn_inp = rnn_inputs[:, t] - - # RNN. - rnn_out, real_state_gen = cell_gen(rnn_inp, real_state_gen) - - final_state = real_state_gen - - return (tf.stack(sequence, axis=1), tf.stack(logits, axis=1), tf.stack( - log_probs, axis=1), initial_state, final_state) - - -def discriminator(hparams, sequence, is_training, reuse=None): - """Define the Discriminator graph. - - Args: - hparams: Hyperparameters for the MaskGAN. - FLAGS: Current flags. - sequence: [FLAGS.batch_size, FLAGS.sequence_length] - is_training: - reuse - - Returns: - predictions: - """ - tf.logging.warning( - 'Undirectional Discriminative model is not a useful model for this ' - 'MaskGAN because future context is needed. Use only for debugging ' - 'purposes.') - sequence = tf.cast(sequence, tf.int32) - - if FLAGS.dis_share_embedding: - assert hparams.dis_rnn_size == hparams.gen_rnn_size, ( - 'If you wish to share Discriminator/Generator embeddings, they must be' - ' same dimension.') - with tf.variable_scope('gen/rnn', reuse=True): - embedding = tf.get_variable('embedding', - [FLAGS.vocab_size, hparams.gen_rnn_size]) - - with tf.variable_scope('dis', reuse=reuse): - - def lstm_cell(): - return tf.contrib.rnn.LayerNormBasicLSTMCell( - hparams.dis_rnn_size, reuse=reuse) - - attn_cell = lstm_cell - if FLAGS.zoneout_drop_prob > 0.0: - - def attn_cell(): - return zoneout.ZoneoutWrapper( - lstm_cell(), - zoneout_drop_prob=FLAGS.zoneout_drop_prob, - is_training=is_training) - - cell_dis = tf.contrib.rnn.MultiRNNCell( - [attn_cell() for _ in range(hparams.dis_num_layers)], - state_is_tuple=True) - state_dis = cell_dis.zero_state(FLAGS.batch_size, tf.float32) - - with tf.variable_scope('rnn') as vs: - predictions = [] - if not FLAGS.dis_share_embedding: - embedding = tf.get_variable('embedding', - [FLAGS.vocab_size, hparams.dis_rnn_size]) - - rnn_inputs = tf.nn.embedding_lookup(embedding, sequence) - - for t in xrange(FLAGS.sequence_length): - if t > 0: - tf.get_variable_scope().reuse_variables() - - rnn_in = rnn_inputs[:, t] - rnn_out, state_dis = cell_dis(rnn_in, state_dis) - - # Prediction is linear output for Discriminator. - pred = tf.contrib.layers.linear(rnn_out, 1, scope=vs) - - predictions.append(pred) - predictions = tf.stack(predictions, axis=1) - return tf.squeeze(predictions, axis=2) diff --git a/research/maskgan/models/rnn_nas.py b/research/maskgan/models/rnn_nas.py deleted file mode 100644 index 618ace2f8..000000000 --- a/research/maskgan/models/rnn_nas.py +++ /dev/null @@ -1,234 +0,0 @@ -# Copyright 2017 The TensorFlow Authors All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -"""Simple RNN model definitions.""" - -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import collections -from six.moves import xrange -import tensorflow as tf - -# NAS Code.. -from nas_utils import configs -from nas_utils import custom_cell -from nas_utils import variational_dropout - -FLAGS = tf.app.flags.FLAGS - - -def get_config(): - return configs.AlienConfig2() - - -LSTMTuple = collections.namedtuple('LSTMTuple', ['c', 'h']) - - -def generator(hparams, - inputs, - targets, - targets_present, - is_training, - is_validating, - reuse=None): - """Define the Generator graph. - - G will now impute tokens that have been masked from the input seqeunce. - """ - tf.logging.info( - 'Undirectional generative model is not a useful model for this MaskGAN ' - 'because future context is needed. Use only for debugging purposes.') - config = get_config() - config.keep_prob = [hparams.gen_nas_keep_prob_0, hparams.gen_nas_keep_prob_1] - configs.print_config(config) - - init_scale = config.init_scale - initializer = tf.random_uniform_initializer(-init_scale, init_scale) - - with tf.variable_scope('gen', reuse=reuse, initializer=initializer): - # Neural architecture search cell. - cell = custom_cell.Alien(config.hidden_size) - - if is_training: - [h2h_masks, _, _, - output_mask] = variational_dropout.generate_variational_dropout_masks( - hparams, config.keep_prob) - else: - output_mask = None - - cell_gen = custom_cell.GenericMultiRNNCell([cell] * config.num_layers) - initial_state = cell_gen.zero_state(FLAGS.batch_size, tf.float32) - - with tf.variable_scope('rnn'): - sequence, logits, log_probs = [], [], [] - embedding = tf.get_variable('embedding', - [FLAGS.vocab_size, hparams.gen_rnn_size]) - softmax_w = tf.matrix_transpose(embedding) - softmax_b = tf.get_variable('softmax_b', [FLAGS.vocab_size]) - - rnn_inputs = tf.nn.embedding_lookup(embedding, inputs) - - if is_training and FLAGS.keep_prob < 1: - rnn_inputs = tf.nn.dropout(rnn_inputs, FLAGS.keep_prob) - - for t in xrange(FLAGS.sequence_length): - if t > 0: - tf.get_variable_scope().reuse_variables() - - # Input to the model is the first token to provide context. The - # model will then predict token t > 0. - if t == 0: - # Always provide the real input at t = 0. - state_gen = initial_state - rnn_inp = rnn_inputs[:, t] - - # If the input is present, read in the input at t. - # If the input is not present, read in the previously generated. - else: - real_rnn_inp = rnn_inputs[:, t] - fake_rnn_inp = tf.nn.embedding_lookup(embedding, fake) - - # While validating, the decoder should be operating in teacher - # forcing regime. Also, if we're just training with cross_entropy - # use teacher forcing. - if is_validating or (is_training and - FLAGS.gen_training_strategy == 'cross_entropy'): - rnn_inp = real_rnn_inp - else: - rnn_inp = tf.where(targets_present[:, t - 1], real_rnn_inp, - fake_rnn_inp) - - if is_training: - state_gen = list(state_gen) - for layer_num, per_layer_state in enumerate(state_gen): - per_layer_state = LSTMTuple( - per_layer_state[0], per_layer_state[1] * h2h_masks[layer_num]) - state_gen[layer_num] = per_layer_state - - # RNN. - rnn_out, state_gen = cell_gen(rnn_inp, state_gen) - - if is_training: - rnn_out = output_mask * rnn_out - - logit = tf.matmul(rnn_out, softmax_w) + softmax_b - - # Real sample. - real = targets[:, t] - - categorical = tf.contrib.distributions.Categorical(logits=logit) - fake = categorical.sample() - log_prob = categorical.log_prob(fake) - - # Output for Generator will either be generated or the input. - # - # If present: Return real. - # If not present: Return fake. - output = tf.where(targets_present[:, t], real, fake) - - # Add to lists. - sequence.append(output) - log_probs.append(log_prob) - logits.append(logit) - - # Produce the RNN state had the model operated only - # over real data. - real_state_gen = initial_state - for t in xrange(FLAGS.sequence_length): - tf.get_variable_scope().reuse_variables() - - rnn_inp = rnn_inputs[:, t] - - # RNN. - rnn_out, real_state_gen = cell_gen(rnn_inp, real_state_gen) - - final_state = real_state_gen - - return (tf.stack(sequence, axis=1), tf.stack(logits, axis=1), tf.stack( - log_probs, axis=1), initial_state, final_state) - - -def discriminator(hparams, sequence, is_training, reuse=None): - """Define the Discriminator graph.""" - tf.logging.info( - 'Undirectional Discriminative model is not a useful model for this ' - 'MaskGAN because future context is needed. Use only for debugging ' - 'purposes.') - sequence = tf.cast(sequence, tf.int32) - - if FLAGS.dis_share_embedding: - assert hparams.dis_rnn_size == hparams.gen_rnn_size, ( - 'If you wish to share Discriminator/Generator embeddings, they must be' - ' same dimension.') - with tf.variable_scope('gen/rnn', reuse=True): - embedding = tf.get_variable('embedding', - [FLAGS.vocab_size, hparams.gen_rnn_size]) - - config = get_config() - config.keep_prob = [hparams.dis_nas_keep_prob_0, hparams.dis_nas_keep_prob_1] - configs.print_config(config) - - with tf.variable_scope('dis', reuse=reuse): - # Neural architecture search cell. - cell = custom_cell.Alien(config.hidden_size) - - if is_training: - [h2h_masks, _, _, - output_mask] = variational_dropout.generate_variational_dropout_masks( - hparams, config.keep_prob) - else: - output_mask = None - - cell_dis = custom_cell.GenericMultiRNNCell([cell] * config.num_layers) - state_dis = cell_dis.zero_state(FLAGS.batch_size, tf.float32) - - with tf.variable_scope('rnn') as vs: - predictions = [] - if not FLAGS.dis_share_embedding: - embedding = tf.get_variable('embedding', - [FLAGS.vocab_size, hparams.dis_rnn_size]) - - rnn_inputs = tf.nn.embedding_lookup(embedding, sequence) - - if is_training and FLAGS.keep_prob < 1: - rnn_inputs = tf.nn.dropout(rnn_inputs, FLAGS.keep_prob) - - for t in xrange(FLAGS.sequence_length): - if t > 0: - tf.get_variable_scope().reuse_variables() - - rnn_in = rnn_inputs[:, t] - - if is_training: - state_dis = list(state_dis) - for layer_num, per_layer_state in enumerate(state_dis): - per_layer_state = LSTMTuple( - per_layer_state[0], per_layer_state[1] * h2h_masks[layer_num]) - state_dis[layer_num] = per_layer_state - - # RNN. - rnn_out, state_dis = cell_dis(rnn_in, state_dis) - - if is_training: - rnn_out = output_mask * rnn_out - - # Prediction is linear output for Discriminator. - pred = tf.contrib.layers.linear(rnn_out, 1, scope=vs) - - predictions.append(pred) - predictions = tf.stack(predictions, axis=1) - return tf.squeeze(predictions, axis=2) diff --git a/research/maskgan/models/rnn_vd.py b/research/maskgan/models/rnn_vd.py deleted file mode 100644 index 428f1a54b..000000000 --- a/research/maskgan/models/rnn_vd.py +++ /dev/null @@ -1,118 +0,0 @@ -# Copyright 2017 The TensorFlow Authors All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -"""Simple RNN model definitions.""" - -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -from six.moves import xrange -import tensorflow as tf -from regularization import variational_dropout - -FLAGS = tf.app.flags.FLAGS - - -def discriminator(hparams, - sequence, - is_training, - reuse=None, - initial_state=None): - """Define the Discriminator graph.""" - tf.logging.info( - 'Undirectional Discriminative model is not a useful model for this ' - 'MaskGAN because future context is needed. Use only for debugging ' - 'purposes.') - sequence = tf.cast(sequence, tf.int32) - - if FLAGS.dis_share_embedding: - assert hparams.dis_rnn_size == hparams.gen_rnn_size, ( - 'If you wish to share Discriminator/Generator embeddings, they must be' - ' same dimension.') - with tf.variable_scope('gen/decoder/rnn', reuse=True): - embedding = tf.get_variable('embedding', - [FLAGS.vocab_size, hparams.gen_rnn_size]) - - with tf.variable_scope('dis', reuse=reuse): - - def lstm_cell(): - return tf.contrib.rnn.BasicLSTMCell( - hparams.dis_rnn_size, - forget_bias=0.0, - state_is_tuple=True, - reuse=reuse) - - attn_cell = lstm_cell - if is_training and hparams.dis_vd_keep_prob < 1: - - def attn_cell(): - return variational_dropout.VariationalDropoutWrapper( - lstm_cell(), FLAGS.batch_size, hparams.dis_rnn_size, - hparams.dis_vd_keep_prob, hparams.dis_vd_keep_prob) - - cell_dis = tf.contrib.rnn.MultiRNNCell( - [attn_cell() for _ in range(hparams.dis_num_layers)], - state_is_tuple=True) - - if initial_state: - state_dis = [[tf.identity(x) for x in inner_initial_state] - for inner_initial_state in initial_state] - else: - state_dis = cell_dis.zero_state(FLAGS.batch_size, tf.float32) - - def make_mask(keep_prob, units): - random_tensor = keep_prob - # 0. if [keep_prob, 1.0) and 1. if [1.0, 1.0 + keep_prob) - random_tensor += tf.random_uniform(tf.stack([FLAGS.batch_size, units])) - return tf.floor(random_tensor) / keep_prob - - if is_training: - output_mask = make_mask(hparams.dis_vd_keep_prob, hparams.dis_rnn_size) - - with tf.variable_scope('rnn') as vs: - predictions, rnn_outs = [], [] - - if not FLAGS.dis_share_embedding: - embedding = tf.get_variable('embedding', - [FLAGS.vocab_size, hparams.dis_rnn_size]) - - rnn_inputs = tf.nn.embedding_lookup(embedding, sequence) - - for t in xrange(FLAGS.sequence_length): - if t > 0: - tf.get_variable_scope().reuse_variables() - - rnn_in = rnn_inputs[:, t] - rnn_out, state_dis = cell_dis(rnn_in, state_dis) - - if is_training: - rnn_out *= output_mask - - # Prediction is linear output for Discriminator. - pred = tf.contrib.layers.linear(rnn_out, 1, scope=vs) - predictions.append(pred) - rnn_outs.append(rnn_out) - - predictions = tf.stack(predictions, axis=1) - - if FLAGS.baseline_method == 'critic': - with tf.variable_scope('critic', reuse=reuse) as critic_scope: - rnn_outs = tf.stack(rnn_outs, axis=1) - values = tf.contrib.layers.linear(rnn_outs, 1, scope=critic_scope) - return tf.squeeze(predictions, axis=2), tf.squeeze(values, axis=2) - - else: - return tf.squeeze(predictions, axis=2), None diff --git a/research/maskgan/models/rnn_zaremba.py b/research/maskgan/models/rnn_zaremba.py deleted file mode 100644 index 9369c77fb..000000000 --- a/research/maskgan/models/rnn_zaremba.py +++ /dev/null @@ -1,196 +0,0 @@ -# Copyright 2017 The TensorFlow Authors All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -"""Simple RNN model definitions.""" - -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -from six.moves import xrange -import tensorflow as tf - -FLAGS = tf.app.flags.FLAGS - - -def generator(hparams, - inputs, - targets, - targets_present, - is_training, - is_validating, - reuse=None): - """Define the Generator graph. - - G will now impute tokens that have been masked from the input seqeunce. - """ - tf.logging.warning( - 'Undirectional generative model is not a useful model for this MaskGAN ' - 'because future context is needed. Use only for debugging purposes.') - init_scale = 0.05 - initializer = tf.random_uniform_initializer(-init_scale, init_scale) - with tf.variable_scope('gen', reuse=reuse, initializer=initializer): - - def lstm_cell(): - return tf.contrib.rnn.BasicLSTMCell(hparams.gen_rnn_size, - forget_bias=0.0, - state_is_tuple=True, - reuse=reuse) - - attn_cell = lstm_cell - if is_training and FLAGS.keep_prob < 1: - - def attn_cell(): - return tf.contrib.rnn.DropoutWrapper( - lstm_cell(), output_keep_prob=FLAGS.keep_prob) - - cell_gen = tf.contrib.rnn.MultiRNNCell( - [attn_cell() for _ in range(hparams.gen_num_layers)], - state_is_tuple=True) - - initial_state = cell_gen.zero_state(FLAGS.batch_size, tf.float32) - - with tf.variable_scope('rnn'): - sequence, logits, log_probs = [], [], [] - embedding = tf.get_variable('embedding', - [FLAGS.vocab_size, hparams.gen_rnn_size]) - softmax_w = tf.get_variable('softmax_w', - [hparams.gen_rnn_size, FLAGS.vocab_size]) - softmax_b = tf.get_variable('softmax_b', [FLAGS.vocab_size]) - - rnn_inputs = tf.nn.embedding_lookup(embedding, inputs) - - if is_training and FLAGS.keep_prob < 1: - rnn_inputs = tf.nn.dropout(rnn_inputs, FLAGS.keep_prob) - - fake = None - for t in xrange(FLAGS.sequence_length): - if t > 0: - tf.get_variable_scope().reuse_variables() - - # Input to the model is the first token to provide context. The - # model will then predict token t > 0. - if t == 0: - # Always provide the real input at t = 0. - state_gen = initial_state - rnn_inp = rnn_inputs[:, t] - - # If the input is present, read in the input at t. - # If the input is not present, read in the previously generated. - else: - real_rnn_inp = rnn_inputs[:, t] - fake_rnn_inp = tf.nn.embedding_lookup(embedding, fake) - - # While validating, the decoder should be operating in teacher - # forcing regime. Also, if we're just training with cross_entropy - # use teacher forcing. - if is_validating or (is_training and - FLAGS.gen_training_strategy == 'cross_entropy'): - rnn_inp = real_rnn_inp - else: - rnn_inp = tf.where(targets_present[:, t - 1], real_rnn_inp, - fake_rnn_inp) - - # RNN. - rnn_out, state_gen = cell_gen(rnn_inp, state_gen) - logit = tf.matmul(rnn_out, softmax_w) + softmax_b - - # Real sample. - real = targets[:, t] - - categorical = tf.contrib.distributions.Categorical(logits=logit) - fake = categorical.sample() - log_prob = categorical.log_prob(fake) - - # Output for Generator will either be generated or the input. - # - # If present: Return real. - # If not present: Return fake. - output = tf.where(targets_present[:, t], real, fake) - - # Add to lists. - sequence.append(output) - log_probs.append(log_prob) - logits.append(logit) - - # Produce the RNN state had the model operated only - # over real data. - real_state_gen = initial_state - for t in xrange(FLAGS.sequence_length): - tf.get_variable_scope().reuse_variables() - - rnn_inp = rnn_inputs[:, t] - - # RNN. - rnn_out, real_state_gen = cell_gen(rnn_inp, real_state_gen) - - final_state = real_state_gen - - return (tf.stack(sequence, axis=1), tf.stack(logits, axis=1), tf.stack( - log_probs, axis=1), initial_state, final_state) - - -def discriminator(hparams, sequence, is_training, reuse=None): - """Define the Discriminator graph.""" - tf.logging.warning( - 'Undirectional Discriminative model is not a useful model for this ' - 'MaskGAN because future context is needed. Use only for debugging ' - 'purposes.') - sequence = tf.cast(sequence, tf.int32) - - with tf.variable_scope('dis', reuse=reuse): - - def lstm_cell(): - return tf.contrib.rnn.BasicLSTMCell(hparams.dis_rnn_size, - forget_bias=0.0, - state_is_tuple=True, - reuse=reuse) - - attn_cell = lstm_cell - if is_training and FLAGS.keep_prob < 1: - - def attn_cell(): - return tf.contrib.rnn.DropoutWrapper( - lstm_cell(), output_keep_prob=FLAGS.keep_prob) - - cell_dis = tf.contrib.rnn.MultiRNNCell( - [attn_cell() for _ in range(hparams.dis_num_layers)], - state_is_tuple=True) - - state_dis = cell_dis.zero_state(FLAGS.batch_size, tf.float32) - - with tf.variable_scope('rnn') as vs: - predictions = [] - embedding = tf.get_variable('embedding', - [FLAGS.vocab_size, hparams.dis_rnn_size]) - - rnn_inputs = tf.nn.embedding_lookup(embedding, sequence) - - if is_training and FLAGS.keep_prob < 1: - rnn_inputs = tf.nn.dropout(rnn_inputs, FLAGS.keep_prob) - - for t in xrange(FLAGS.sequence_length): - if t > 0: - tf.get_variable_scope().reuse_variables() - - rnn_in = rnn_inputs[:, t] - rnn_out, state_dis = cell_dis(rnn_in, state_dis) - - # Prediction is linear output for Discriminator. - pred = tf.contrib.layers.linear(rnn_out, 1, scope=vs) - - predictions.append(pred) - predictions = tf.stack(predictions, axis=1) - return tf.squeeze(predictions, axis=2) diff --git a/research/maskgan/models/rollout.py b/research/maskgan/models/rollout.py deleted file mode 100644 index 6919af2e3..000000000 --- a/research/maskgan/models/rollout.py +++ /dev/null @@ -1,384 +0,0 @@ -# Copyright 2017 The TensorFlow Authors All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -"""Rollout RNN model definitions which call rnn_zaremba code.""" - -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import collections - -from six.moves import xrange -import tensorflow as tf - -from losses import losses -from model_utils import helper -from model_utils import model_construction -from model_utils import model_losses -from model_utils import model_optimization - -FLAGS = tf.app.flags.FLAGS - - -def create_rollout_MaskGAN(hparams, is_training): - """Create the MaskGAN model. - - Args: - hparams: Hyperparameters for the MaskGAN. - is_training: Boolean indicating operational mode (train/inference). - evaluated with a teacher forcing regime. - - Return: - model: Namedtuple for specifying the MaskGAN.""" - global_step = tf.Variable(0, name='global_step', trainable=False) - - new_learning_rate = tf.placeholder(tf.float32, [], name='new_learning_rate') - learning_rate = tf.Variable(0.0, name='learning_rate', trainable=False) - learning_rate_update = tf.assign(learning_rate, new_learning_rate) - - new_rate = tf.placeholder(tf.float32, [], name='new_rate') - percent_real_var = tf.Variable(0.0, trainable=False) - percent_real_update = tf.assign(percent_real_var, new_rate) - - ## Placeholders. - inputs = tf.placeholder( - tf.int32, shape=[FLAGS.batch_size, FLAGS.sequence_length]) - present = tf.placeholder( - tf.bool, shape=[FLAGS.batch_size, FLAGS.sequence_length]) - inv_present = tf.placeholder( - tf.bool, shape=[FLAGS.batch_size, FLAGS.sequence_length]) - - ## Rollout Generator. - fwd_gen_rollouts = rollout_generator( - hparams, inputs, present, is_training=is_training, is_validating=False) - inv_gen_rollouts = rollout_generator( - hparams, - inputs, - inv_present, - is_training=is_training, - is_validating=False, - reuse=True) - - ## Rollout Discriminator. - fwd_dis_rollouts = rollout_discriminator( - hparams, fwd_gen_rollouts, is_training=is_training) - inv_dis_rollouts = rollout_discriminator( - hparams, inv_gen_rollouts, is_training=is_training, reuse=True) - - ## Discriminator Loss. - [dis_loss, dis_loss_pred, dis_loss_inv_pred] = rollout_discriminator_loss( - fwd_dis_rollouts, present, inv_dis_rollouts, inv_present) - - ## Average log-perplexity for only missing words. However, to do this, - # the logits are still computed using teacher forcing, that is, the ground - # truth tokens are fed in at each time point to be valid. - # TODO(liamfedus): Fix the naming convention. - with tf.variable_scope('gen_rollout'): - _, fwd_eval_logits, _ = model_construction.create_generator( - hparams, - inputs, - present, - is_training=False, - is_validating=True, - reuse=True) - - avg_log_perplexity = model_losses.calculate_log_perplexity( - fwd_eval_logits, inputs, present) - - ## Generator Loss. - # 1. Cross Entropy losses on missing tokens. - [fwd_cross_entropy_losses, - inv_cross_entropy_losses] = rollout_masked_cross_entropy_loss( - inputs, present, inv_present, fwd_gen_rollouts, inv_gen_rollouts) - - # 2. GAN losses on missing tokens. - [fwd_RL_loss, - fwd_RL_statistics, fwd_averages_op] = rollout_reinforce_objective( - hparams, fwd_gen_rollouts, fwd_dis_rollouts, present) - [inv_RL_loss, - inv_RL_statistics, inv_averages_op] = rollout_reinforce_objective( - hparams, inv_gen_rollouts, inv_dis_rollouts, inv_present) - - # TODO(liamfedus): Generalize this to use all logs. - [fwd_sequence, fwd_logits, fwd_log_probs] = fwd_gen_rollouts[-1] - [inv_sequence, inv_logits, inv_log_probs] = inv_gen_rollouts[-1] - - # TODO(liamfedus): Generalize this to use all logs. - fwd_predictions = fwd_dis_rollouts[-1] - inv_predictions = inv_dis_rollouts[-1] - - # TODO(liamfedus): Generalize this to use all logs. - [fwd_log_probs, fwd_rewards, fwd_advantages, - fwd_baselines] = fwd_RL_statistics[-1] - [inv_log_probs, inv_rewards, inv_advantages, - inv_baselines] = inv_RL_statistics[-1] - - ## Pre-training. - if FLAGS.gen_pretrain_steps: - # TODO(liamfedus): Rewrite this. - fwd_cross_entropy_loss = tf.reduce_mean(fwd_cross_entropy_losses) - gen_pretrain_op = model_optimization.create_gen_pretrain_op( - hparams, fwd_cross_entropy_loss, global_step) - else: - gen_pretrain_op = tf.no_op('gen_pretrain_no_op') - if FLAGS.dis_pretrain_steps: - dis_pretrain_op = model_optimization.create_dis_pretrain_op( - hparams, dis_loss, global_step) - else: - dis_pretrain_op = tf.no_op('dis_pretrain_no_op') - - ## Generator Train Op. - # 1. Cross-Entropy. - if FLAGS.gen_training_strategy == 'cross_entropy': - gen_loss = tf.reduce_mean( - fwd_cross_entropy_losses + inv_cross_entropy_losses) / 2. - [gen_train_op, gen_grads, - gen_vars] = model_optimization.create_gen_train_op( - hparams, learning_rate, gen_loss, global_step, mode='MINIMIZE') - - # 2. GAN (REINFORCE) - elif FLAGS.gen_training_strategy == 'reinforce': - gen_loss = (fwd_RL_loss + inv_RL_loss) / 2. - [gen_train_op, gen_grads, - gen_vars] = model_optimization.create_reinforce_gen_train_op( - hparams, learning_rate, gen_loss, fwd_averages_op, inv_averages_op, - global_step) - - else: - raise NotImplementedError - - ## Discriminator Train Op. - dis_train_op, dis_grads, dis_vars = model_optimization.create_dis_train_op( - hparams, dis_loss, global_step) - - ## Summaries. - with tf.name_scope('general'): - tf.summary.scalar('percent_real', percent_real_var) - tf.summary.scalar('learning_rate', learning_rate) - - with tf.name_scope('generator_losses'): - tf.summary.scalar('gen_loss', tf.reduce_mean(gen_loss)) - tf.summary.scalar('gen_loss_fwd_cross_entropy', - tf.reduce_mean(fwd_cross_entropy_losses)) - tf.summary.scalar('gen_loss_inv_cross_entropy', - tf.reduce_mean(inv_cross_entropy_losses)) - - with tf.name_scope('REINFORCE'): - with tf.name_scope('objective'): - tf.summary.scalar('fwd_RL_loss', tf.reduce_mean(fwd_RL_loss)) - tf.summary.scalar('inv_RL_loss', tf.reduce_mean(inv_RL_loss)) - - with tf.name_scope('rewards'): - helper.variable_summaries(fwd_rewards, 'fwd_rewards') - helper.variable_summaries(inv_rewards, 'inv_rewards') - - with tf.name_scope('advantages'): - helper.variable_summaries(fwd_advantages, 'fwd_advantages') - helper.variable_summaries(inv_advantages, 'inv_advantages') - - with tf.name_scope('baselines'): - helper.variable_summaries(fwd_baselines, 'fwd_baselines') - helper.variable_summaries(inv_baselines, 'inv_baselines') - - with tf.name_scope('log_probs'): - helper.variable_summaries(fwd_log_probs, 'fwd_log_probs') - helper.variable_summaries(inv_log_probs, 'inv_log_probs') - - with tf.name_scope('discriminator_losses'): - tf.summary.scalar('dis_loss', dis_loss) - tf.summary.scalar('dis_loss_fwd_sequence', dis_loss_pred) - tf.summary.scalar('dis_loss_inv_sequence', dis_loss_inv_pred) - - with tf.name_scope('logits'): - helper.variable_summaries(fwd_logits, 'fwd_logits') - helper.variable_summaries(inv_logits, 'inv_logits') - - for v, g in zip(gen_vars, gen_grads): - helper.variable_summaries(v, v.op.name) - helper.variable_summaries(g, 'grad/' + v.op.name) - - for v, g in zip(dis_vars, dis_grads): - helper.variable_summaries(v, v.op.name) - helper.variable_summaries(g, 'grad/' + v.op.name) - - merge_summaries_op = tf.summary.merge_all() - - # Model saver. - saver = tf.train.Saver(keep_checkpoint_every_n_hours=1, max_to_keep=5) - - # Named tuple that captures elements of the MaskGAN model. - Model = collections.namedtuple('Model', [ - 'inputs', 'present', 'inv_present', 'percent_real_update', 'new_rate', - 'fwd_sequence', 'fwd_logits', 'fwd_rewards', 'fwd_advantages', - 'fwd_log_probs', 'fwd_predictions', 'fwd_cross_entropy_losses', - 'inv_sequence', 'inv_logits', 'inv_rewards', 'inv_advantages', - 'inv_log_probs', 'inv_predictions', 'inv_cross_entropy_losses', - 'avg_log_perplexity', 'dis_loss', 'gen_loss', 'dis_train_op', - 'gen_train_op', 'gen_pretrain_op', 'dis_pretrain_op', - 'merge_summaries_op', 'global_step', 'new_learning_rate', - 'learning_rate_update', 'saver' - ]) - - model = Model( - inputs, present, inv_present, percent_real_update, new_rate, fwd_sequence, - fwd_logits, fwd_rewards, fwd_advantages, fwd_log_probs, fwd_predictions, - fwd_cross_entropy_losses, inv_sequence, inv_logits, inv_rewards, - inv_advantages, inv_log_probs, inv_predictions, inv_cross_entropy_losses, - avg_log_perplexity, dis_loss, gen_loss, dis_train_op, gen_train_op, - gen_pretrain_op, dis_pretrain_op, merge_summaries_op, global_step, - new_learning_rate, learning_rate_update, saver) - return model - - -def rollout_generator(hparams, - inputs, - input_present, - is_training, - is_validating, - reuse=None): - """Define the Generator graph which does rollouts. - - G will now impute tokens that have been masked from the input seqeunce. - """ - rollouts = [] - - with tf.variable_scope('gen_rollout'): - for n in xrange(FLAGS.num_rollouts): - if n > 0: - # TODO(liamfedus): Why is it necessary here to manually set reuse? - reuse = True - tf.get_variable_scope().reuse_variables() - - [sequence, logits, log_probs] = model_construction.create_generator( - hparams, - inputs, - input_present, - is_training, - is_validating, - reuse=reuse) - - rollouts.append([sequence, logits, log_probs]) - - # Length assertion. - assert len(rollouts) == FLAGS.num_rollouts - - return rollouts - - -def rollout_discriminator(hparams, gen_rollouts, is_training, reuse=None): - """Define the Discriminator graph which does rollouts. - - G will now impute tokens that have been masked from the input seqeunce. - """ - rollout_predictions = [] - - with tf.variable_scope('dis_rollout'): - for n, rollout in enumerate(gen_rollouts): - if n > 0: - # TODO(liamfedus): Why is it necessary here to manually set reuse? - reuse = True - tf.get_variable_scope().reuse_variables() - - [sequence, _, _] = rollout - - predictions = model_construction.create_discriminator( - hparams, sequence, is_training=is_training, reuse=reuse) - - # Predictions for each rollout. - rollout_predictions.append(predictions) - - # Length assertion. - assert len(rollout_predictions) == FLAGS.num_rollouts - - return rollout_predictions - - -def rollout_reinforce_objective(hparams, gen_rollouts, dis_rollouts, present): - cumulative_gen_objective = 0. - cumulative_averages_op = [] - cumulative_statistics = [] - - assert len(gen_rollouts) == len(dis_rollouts) - - for gen_rollout, dis_rollout in zip(gen_rollouts, dis_rollouts): - [_, _, log_probs] = gen_rollout - dis_predictions = dis_rollout - - [ - final_gen_objective, log_probs, rewards, advantages, baselines, - maintain_averages_op - ] = model_losses.calculate_reinforce_objective(hparams, log_probs, - dis_predictions, present) - - # Accumulate results. - cumulative_gen_objective += final_gen_objective - cumulative_averages_op.append(maintain_averages_op) - cumulative_statistics.append([log_probs, rewards, advantages, baselines]) - - # Group all the averaging operations. - cumulative_averages_op = tf.group(*cumulative_averages_op) - cumulative_gen_objective /= FLAGS.num_rollouts - [log_probs, rewards, advantages, baselines] = cumulative_statistics[-1] - - # Length assertion. - assert len(cumulative_statistics) == FLAGS.num_rollouts - - return [ - cumulative_gen_objective, cumulative_statistics, cumulative_averages_op - ] - - -def rollout_masked_cross_entropy_loss(inputs, present, inv_present, - fwd_rollouts, inv_rollouts): - cumulative_fwd_cross_entropy_losses = tf.zeros( - shape=[FLAGS.batch_size, FLAGS.sequence_length]) - cumulative_inv_cross_entropy_losses = tf.zeros( - shape=[FLAGS.batch_size, FLAGS.sequence_length]) - - for fwd_rollout, inv_rollout in zip(fwd_rollouts, inv_rollouts): - [_, fwd_logits, _] = fwd_rollout - [_, inv_logits, _] = inv_rollout - - [fwd_cross_entropy_losses, - inv_cross_entropy_losses] = model_losses.create_masked_cross_entropy_loss( - inputs, present, inv_present, fwd_logits, inv_logits) - - cumulative_fwd_cross_entropy_losses = tf.add( - cumulative_fwd_cross_entropy_losses, fwd_cross_entropy_losses) - cumulative_inv_cross_entropy_losses = tf.add( - cumulative_inv_cross_entropy_losses, inv_cross_entropy_losses) - - return [ - cumulative_fwd_cross_entropy_losses, cumulative_inv_cross_entropy_losses - ] - - -def rollout_discriminator_loss(fwd_rollouts, present, inv_rollouts, - inv_present): - - dis_loss = 0 - dis_loss_pred = 0 - dis_loss_inv_pred = 0 - - for fwd_predictions, inv_predictions in zip(fwd_rollouts, inv_rollouts): - dis_loss_pred += losses.discriminator_loss(fwd_predictions, present) - dis_loss_inv_pred += losses.discriminator_loss(inv_predictions, inv_present) - - dis_loss_pred /= FLAGS.num_rollouts - dis_loss_inv_pred /= FLAGS.num_rollouts - - dis_loss = (dis_loss_pred + dis_loss_inv_pred) / 2. - return [dis_loss, dis_loss_pred, dis_loss_inv_pred] diff --git a/research/maskgan/models/seq2seq.py b/research/maskgan/models/seq2seq.py deleted file mode 100644 index fac397c98..000000000 --- a/research/maskgan/models/seq2seq.py +++ /dev/null @@ -1,277 +0,0 @@ -# Copyright 2017 The TensorFlow Authors All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -"""Simple seq2seq model definitions.""" - -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import tensorflow as tf -from six.moves import xrange -from models import attention_utils - -# ZoneoutWrapper. -from regularization import zoneout - -FLAGS = tf.app.flags.FLAGS - - -def transform_input_with_is_missing_token(inputs, targets_present): - """Transforms the inputs to have missing tokens when it's masked out. The - mask is for the targets, so therefore, to determine if an input at time t is - masked, we have to check if the target at time t - 1 is masked out. - - e.g. - inputs = [a, b, c, d] - targets = [b, c, d, e] - targets_present = [1, 0, 1, 0] - - then, - transformed_input = [a, b, , d] - - Args: - inputs: tf.int32 Tensor of shape [batch_size, sequence_length] with tokens - up to, but not including, vocab_size. - targets_present: tf.bool Tensor of shape [batch_size, sequence_length] with - True representing the presence of the word. - - Returns: - transformed_input: tf.int32 Tensor of shape [batch_size, sequence_length] - which takes on value of inputs when the input is present and takes on - value=vocab_size to indicate a missing token. - """ - # To fill in if the input is missing. - input_missing = tf.constant( - FLAGS.vocab_size, - dtype=tf.int32, - shape=[FLAGS.batch_size, FLAGS.sequence_length]) - - # The 0th input will always be present to MaskGAN. - zeroth_input_present = tf.constant(True, tf.bool, shape=[FLAGS.batch_size, 1]) - - # Input present mask. - inputs_present = tf.concat( - [zeroth_input_present, targets_present[:, :-1]], axis=1) - - transformed_input = tf.where(inputs_present, inputs, input_missing) - return transformed_input - - -def gen_encoder(hparams, inputs, targets_present, is_training, reuse=None): - """Define the Encoder graph.""" - # We will use the same variable from the decoder. - if FLAGS.seq2seq_share_embedding: - with tf.variable_scope('decoder/rnn'): - embedding = tf.get_variable('embedding', - [FLAGS.vocab_size, hparams.gen_rnn_size]) - - with tf.variable_scope('encoder', reuse=reuse): - - def lstm_cell(): - return tf.contrib.rnn.LayerNormBasicLSTMCell( - hparams.gen_rnn_size, reuse=reuse) - - attn_cell = lstm_cell - if FLAGS.zoneout_drop_prob > 0.0: - - def attn_cell(): - return zoneout.ZoneoutWrapper( - lstm_cell(), - zoneout_drop_prob=FLAGS.zoneout_drop_prob, - is_training=is_training) - - cell = tf.contrib.rnn.MultiRNNCell( - [attn_cell() for _ in range(hparams.gen_num_layers)], - state_is_tuple=True) - - initial_state = cell.zero_state(FLAGS.batch_size, tf.float32) - - # Add a missing token for inputs not present. - real_inputs = inputs - masked_inputs = transform_input_with_is_missing_token( - inputs, targets_present) - - with tf.variable_scope('rnn'): - hidden_states = [] - - embedding = tf.get_variable('embedding', - [FLAGS.vocab_size + 1, hparams.gen_rnn_size]) - - real_rnn_inputs = tf.nn.embedding_lookup(embedding, real_inputs) - masked_rnn_inputs = tf.nn.embedding_lookup(embedding, masked_inputs) - - state = initial_state - for t in xrange(FLAGS.sequence_length): - if t > 0: - tf.get_variable_scope().reuse_variables() - - rnn_inp = masked_rnn_inputs[:, t] - rnn_out, state = cell(rnn_inp, state) - hidden_states.append(rnn_out) - final_masked_state = state - hidden_states = tf.stack(hidden_states, axis=1) - - # Produce the RNN state had the model operated only - # over real data. - real_state = initial_state - for t in xrange(FLAGS.sequence_length): - tf.get_variable_scope().reuse_variables() - - # RNN. - rnn_inp = real_rnn_inputs[:, t] - rnn_out, real_state = cell(rnn_inp, real_state) - final_state = real_state - - return (hidden_states, final_masked_state), initial_state, final_state - - -def gen_decoder(hparams, - inputs, - targets, - targets_present, - encoding_state, - is_training, - is_validating, - reuse=None): - """Define the Decoder graph. The Decoder will now impute tokens that - have been masked from the input seqeunce. - """ - gen_decoder_rnn_size = hparams.gen_rnn_size - - with tf.variable_scope('decoder', reuse=reuse): - - def lstm_cell(): - return tf.contrib.rnn.LayerNormBasicLSTMCell( - gen_decoder_rnn_size, reuse=reuse) - - attn_cell = lstm_cell - if FLAGS.zoneout_drop_prob > 0.0: - - def attn_cell(): - return zoneout.ZoneoutWrapper( - lstm_cell(), - zoneout_drop_prob=FLAGS.zoneout_drop_prob, - is_training=is_training) - - cell_gen = tf.contrib.rnn.MultiRNNCell( - [attn_cell() for _ in range(hparams.gen_num_layers)], - state_is_tuple=True) - - # Hidden encoder states. - hidden_vector_encodings = encoding_state[0] - - # Carry forward the final state tuple from the encoder. - # State tuples. - state_gen = encoding_state[1] - - if FLAGS.attention_option is not None: - (attention_keys, attention_values, _, - attention_construct_fn) = attention_utils.prepare_attention( - hidden_vector_encodings, - FLAGS.attention_option, - num_units=gen_decoder_rnn_size, - reuse=reuse) - - with tf.variable_scope('rnn'): - sequence, logits, log_probs = [], [], [] - embedding = tf.get_variable('embedding', - [FLAGS.vocab_size, gen_decoder_rnn_size]) - softmax_w = tf.get_variable('softmax_w', - [gen_decoder_rnn_size, FLAGS.vocab_size]) - softmax_b = tf.get_variable('softmax_b', [FLAGS.vocab_size]) - - rnn_inputs = tf.nn.embedding_lookup(embedding, inputs) - - for t in xrange(FLAGS.sequence_length): - if t > 0: - tf.get_variable_scope().reuse_variables() - - # Input to the Decoder. - if t == 0: - # Always provide the real input at t = 0. - rnn_inp = rnn_inputs[:, t] - - # If the input is present, read in the input at t. - # If the input is not present, read in the previously generated. - else: - real_rnn_inp = rnn_inputs[:, t] - fake_rnn_inp = tf.nn.embedding_lookup(embedding, fake) - - # While validating, the decoder should be operating in teacher - # forcing regime. Also, if we're just training with cross_entropy - # use teacher forcing. - if is_validating or (is_training and - FLAGS.gen_training_strategy == 'cross_entropy'): - rnn_inp = real_rnn_inp - else: - rnn_inp = tf.where(targets_present[:, t - 1], real_rnn_inp, - fake_rnn_inp) - - # RNN. - rnn_out, state_gen = cell_gen(rnn_inp, state_gen) - - if FLAGS.attention_option is not None: - rnn_out = attention_construct_fn(rnn_out, attention_keys, - attention_values) - # # TODO(liamfedus): Assert not "monotonic" attention_type. - # # TODO(liamfedus): FLAGS.attention_type. - # context_state = revised_attention_utils._empty_state() - # rnn_out, context_state = attention_construct_fn( - # rnn_out, attention_keys, attention_values, context_state, t) - logit = tf.matmul(rnn_out, softmax_w) + softmax_b - - # Output for Decoder. - # If input is present: Return real at t+1. - # If input is not present: Return fake for t+1. - real = targets[:, t] - - categorical = tf.contrib.distributions.Categorical(logits=logit) - fake = categorical.sample() - log_prob = categorical.log_prob(fake) - - output = tf.where(targets_present[:, t], real, fake) - - # Add to lists. - sequence.append(output) - log_probs.append(log_prob) - logits.append(logit) - - return (tf.stack(sequence, axis=1), tf.stack(logits, axis=1), tf.stack( - log_probs, axis=1)) - - -def generator(hparams, - inputs, - targets, - targets_present, - is_training, - is_validating, - reuse=None): - """Define the Generator graph.""" - with tf.variable_scope('gen', reuse=reuse): - encoder_states, initial_state, final_state = gen_encoder( - hparams, inputs, targets_present, is_training=is_training, reuse=reuse) - stacked_sequence, stacked_logits, stacked_log_probs = gen_decoder( - hparams, - inputs, - targets, - targets_present, - encoder_states, - is_training=is_training, - is_validating=is_validating, - reuse=reuse) - return (stacked_sequence, stacked_logits, stacked_log_probs, initial_state, - final_state) diff --git a/research/maskgan/models/seq2seq_nas.py b/research/maskgan/models/seq2seq_nas.py deleted file mode 100644 index cede90f56..000000000 --- a/research/maskgan/models/seq2seq_nas.py +++ /dev/null @@ -1,333 +0,0 @@ -# Copyright 2017 The TensorFlow Authors All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -"""Simple seq2seq model definitions.""" - -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import collections -from six.moves import xrange -import tensorflow as tf - -from models import attention_utils - -# NAS Code.. -from nas_utils import configs -from nas_utils import custom_cell -from nas_utils import variational_dropout - -FLAGS = tf.app.flags.FLAGS - - -def get_config(): - return configs.AlienConfig2() - - -LSTMTuple = collections.namedtuple('LSTMTuple', ['c', 'h']) - - -def transform_input_with_is_missing_token(inputs, targets_present): - """Transforms the inputs to have missing tokens when it's masked out. The - mask is for the targets, so therefore, to determine if an input at time t is - masked, we have to check if the target at time t - 1 is masked out. - - e.g. - inputs = [a, b, c, d] - targets = [b, c, d, e] - targets_present = [1, 0, 1, 0] - - then, - transformed_input = [a, b, , d] - - Args: - inputs: tf.int32 Tensor of shape [batch_size, sequence_length] with tokens - up to, but not including, vocab_size. - targets_present: tf.bool Tensor of shape [batch_size, sequence_length] with - True representing the presence of the word. - - Returns: - transformed_input: tf.int32 Tensor of shape [batch_size, sequence_length] - which takes on value of inputs when the input is present and takes on - value=vocab_size to indicate a missing token. - """ - # To fill in if the input is missing. - input_missing = tf.constant( - FLAGS.vocab_size, - dtype=tf.int32, - shape=[FLAGS.batch_size, FLAGS.sequence_length]) - - # The 0th input will always be present to MaskGAN. - zeroth_input_present = tf.constant(True, tf.bool, shape=[FLAGS.batch_size, 1]) - - # Input present mask. - inputs_present = tf.concat( - [zeroth_input_present, targets_present[:, :-1]], axis=1) - - transformed_input = tf.where(inputs_present, inputs, input_missing) - return transformed_input - - -def gen_encoder(hparams, inputs, targets_present, is_training, reuse=None): - """Define the Encoder graph. - - - Args: - hparams: Hyperparameters for the MaskGAN. - inputs: tf.int32 Tensor of shape [batch_size, sequence_length] with tokens - up to, but not including, vocab_size. - targets_present: tf.bool Tensor of shape [batch_size, sequence_length] with - True representing the presence of the target. - is_training: Boolean indicating operational mode (train/inference). - reuse (Optional): Whether to reuse the variables. - - Returns: - Tuple of (hidden_states, final_state). - """ - config = get_config() - configs.print_config(config) - # We will use the same variable from the decoder. - if FLAGS.seq2seq_share_embedding: - with tf.variable_scope('decoder/rnn'): - embedding = tf.get_variable('embedding', - [FLAGS.vocab_size, hparams.gen_rnn_size]) - - with tf.variable_scope('encoder', reuse=reuse): - # Neural architecture search cell. - cell = custom_cell.Alien(config.hidden_size) - - if is_training: - [h2h_masks, h2i_masks, _, - output_mask] = variational_dropout.generate_variational_dropout_masks( - hparams, config.keep_prob) - else: - h2i_masks, output_mask = None, None - - cell = custom_cell.GenericMultiRNNCell([cell] * config.num_layers) - - initial_state = cell.zero_state(FLAGS.batch_size, tf.float32) - - # Add a missing token for inputs not present. - real_inputs = inputs - masked_inputs = transform_input_with_is_missing_token( - inputs, targets_present) - - with tf.variable_scope('rnn'): - hidden_states = [] - - # Split the embedding into two parts so that we can load the PTB - # weights into one part of the Variable. - if not FLAGS.seq2seq_share_embedding: - embedding = tf.get_variable('embedding', - [FLAGS.vocab_size, hparams.gen_rnn_size]) - missing_embedding = tf.get_variable('missing_embedding', - [1, hparams.gen_rnn_size]) - embedding = tf.concat([embedding, missing_embedding], axis=0) - - real_rnn_inputs = tf.nn.embedding_lookup(embedding, real_inputs) - masked_rnn_inputs = tf.nn.embedding_lookup(embedding, masked_inputs) - - if is_training and FLAGS.keep_prob < 1: - masked_rnn_inputs = tf.nn.dropout(masked_rnn_inputs, FLAGS.keep_prob) - - state = initial_state - for t in xrange(FLAGS.sequence_length): - if t > 0: - tf.get_variable_scope().reuse_variables() - - rnn_inp = masked_rnn_inputs[:, t] - - if is_training: - state = list(state) - for layer_num, per_layer_state in enumerate(state): - per_layer_state = LSTMTuple( - per_layer_state[0], per_layer_state[1] * h2h_masks[layer_num]) - state[layer_num] = per_layer_state - - rnn_out, state = cell(rnn_inp, state, h2i_masks) - - if is_training: - rnn_out = output_mask * rnn_out - - hidden_states.append(rnn_out) - final_masked_state = state - hidden_states = tf.stack(hidden_states, axis=1) - - # Produce the RNN state had the model operated only - # over real data. - real_state = initial_state - for t in xrange(FLAGS.sequence_length): - tf.get_variable_scope().reuse_variables() - - # RNN. - rnn_inp = real_rnn_inputs[:, t] - rnn_out, real_state = cell(rnn_inp, real_state) - final_state = real_state - - return (hidden_states, final_masked_state), initial_state, final_state - - -def gen_decoder(hparams, - inputs, - targets, - targets_present, - encoding_state, - is_training, - is_validating, - reuse=None): - """Define the Decoder graph. The Decoder will now impute tokens that - have been masked from the input seqeunce. - """ - config = get_config() - gen_decoder_rnn_size = hparams.gen_rnn_size - - if FLAGS.seq2seq_share_embedding: - with tf.variable_scope('decoder/rnn', reuse=True): - embedding = tf.get_variable('embedding', - [FLAGS.vocab_size, gen_decoder_rnn_size]) - - with tf.variable_scope('decoder', reuse=reuse): - # Neural architecture search cell. - cell = custom_cell.Alien(config.hidden_size) - - if is_training: - [h2h_masks, _, _, - output_mask] = variational_dropout.generate_variational_dropout_masks( - hparams, config.keep_prob) - else: - output_mask = None - - cell_gen = custom_cell.GenericMultiRNNCell([cell] * config.num_layers) - - # Hidden encoder states. - hidden_vector_encodings = encoding_state[0] - - # Carry forward the final state tuple from the encoder. - # State tuples. - state_gen = encoding_state[1] - - if FLAGS.attention_option is not None: - (attention_keys, attention_values, _, - attention_construct_fn) = attention_utils.prepare_attention( - hidden_vector_encodings, - FLAGS.attention_option, - num_units=gen_decoder_rnn_size, - reuse=reuse) - - with tf.variable_scope('rnn'): - sequence, logits, log_probs = [], [], [] - - if not FLAGS.seq2seq_share_embedding: - embedding = tf.get_variable('embedding', - [FLAGS.vocab_size, gen_decoder_rnn_size]) - softmax_w = tf.matrix_transpose(embedding) - softmax_b = tf.get_variable('softmax_b', [FLAGS.vocab_size]) - - rnn_inputs = tf.nn.embedding_lookup(embedding, inputs) - - if is_training and FLAGS.keep_prob < 1: - rnn_inputs = tf.nn.dropout(rnn_inputs, FLAGS.keep_prob) - - for t in xrange(FLAGS.sequence_length): - if t > 0: - tf.get_variable_scope().reuse_variables() - - # Input to the Decoder. - if t == 0: - # Always provide the real input at t = 0. - rnn_inp = rnn_inputs[:, t] - - # If the input is present, read in the input at t. - # If the input is not present, read in the previously generated. - else: - real_rnn_inp = rnn_inputs[:, t] - fake_rnn_inp = tf.nn.embedding_lookup(embedding, fake) - - # While validating, the decoder should be operating in teacher - # forcing regime. Also, if we're just training with cross_entropy - # use teacher forcing. - if is_validating or (is_training and - FLAGS.gen_training_strategy == 'cross_entropy'): - rnn_inp = real_rnn_inp - else: - rnn_inp = tf.where(targets_present[:, t - 1], real_rnn_inp, - fake_rnn_inp) - - if is_training: - state_gen = list(state_gen) - for layer_num, per_layer_state in enumerate(state_gen): - per_layer_state = LSTMTuple( - per_layer_state[0], per_layer_state[1] * h2h_masks[layer_num]) - state_gen[layer_num] = per_layer_state - - # RNN. - rnn_out, state_gen = cell_gen(rnn_inp, state_gen) - - if is_training: - rnn_out = output_mask * rnn_out - - if FLAGS.attention_option is not None: - rnn_out = attention_construct_fn(rnn_out, attention_keys, - attention_values) - # # TODO(liamfedus): Assert not "monotonic" attention_type. - # # TODO(liamfedus): FLAGS.attention_type. - # context_state = revised_attention_utils._empty_state() - # rnn_out, context_state = attention_construct_fn( - # rnn_out, attention_keys, attention_values, context_state, t) - logit = tf.matmul(rnn_out, softmax_w) + softmax_b - - # Output for Decoder. - # If input is present: Return real at t+1. - # If input is not present: Return fake for t+1. - real = targets[:, t] - - categorical = tf.contrib.distributions.Categorical(logits=logit) - fake = categorical.sample() - log_prob = categorical.log_prob(fake) - - output = tf.where(targets_present[:, t], real, fake) - - # Add to lists. - sequence.append(output) - log_probs.append(log_prob) - logits.append(logit) - - return (tf.stack(sequence, axis=1), tf.stack(logits, axis=1), tf.stack( - log_probs, axis=1)) - - -def generator(hparams, - inputs, - targets, - targets_present, - is_training, - is_validating, - reuse=None): - """Define the Generator graph.""" - with tf.variable_scope('gen', reuse=reuse): - encoder_states, initial_state, final_state = gen_encoder( - hparams, inputs, targets_present, is_training=is_training, reuse=reuse) - stacked_sequence, stacked_logits, stacked_log_probs = gen_decoder( - hparams, - inputs, - targets, - targets_present, - encoder_states, - is_training=is_training, - is_validating=is_validating, - reuse=reuse) - return (stacked_sequence, stacked_logits, stacked_log_probs, initial_state, - final_state) diff --git a/research/maskgan/models/seq2seq_vd.py b/research/maskgan/models/seq2seq_vd.py deleted file mode 100644 index 850eda435..000000000 --- a/research/maskgan/models/seq2seq_vd.py +++ /dev/null @@ -1,609 +0,0 @@ -# Copyright 2017 The TensorFlow Authors All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -"""Simple seq2seq model definitions.""" - -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -from six.moves import xrange -import tensorflow as tf - -from models import attention_utils -from regularization import variational_dropout - -FLAGS = tf.app.flags.FLAGS - - -def transform_input_with_is_missing_token(inputs, targets_present): - """Transforms the inputs to have missing tokens when it's masked out. The - mask is for the targets, so therefore, to determine if an input at time t is - masked, we have to check if the target at time t - 1 is masked out. - - e.g. - inputs = [a, b, c, d] - targets = [b, c, d, e] - targets_present = [1, 0, 1, 0] - - which computes, - inputs_present = [1, 1, 0, 1] - - and outputs, - transformed_input = [a, b, , d] - - Args: - inputs: tf.int32 Tensor of shape [batch_size, sequence_length] with tokens - up to, but not including, vocab_size. - targets_present: tf.bool Tensor of shape [batch_size, sequence_length] with - True representing the presence of the word. - - Returns: - transformed_input: tf.int32 Tensor of shape [batch_size, sequence_length] - which takes on value of inputs when the input is present and takes on - value=vocab_size to indicate a missing token. - """ - # To fill in if the input is missing. - input_missing = tf.constant( - FLAGS.vocab_size, - dtype=tf.int32, - shape=[FLAGS.batch_size, FLAGS.sequence_length]) - - # The 0th input will always be present to MaskGAN. - zeroth_input_present = tf.constant(True, tf.bool, shape=[FLAGS.batch_size, 1]) - - # Input present mask. - inputs_present = tf.concat( - [zeroth_input_present, targets_present[:, :-1]], axis=1) - - transformed_input = tf.where(inputs_present, inputs, input_missing) - return transformed_input - - -# TODO(adai): IMDB labels placeholder to encoder. -def gen_encoder(hparams, inputs, targets_present, is_training, reuse=None): - """Define the Encoder graph. - - Args: - hparams: Hyperparameters for the MaskGAN. - inputs: tf.int32 Tensor of shape [batch_size, sequence_length] with tokens - up to, but not including, vocab_size. - targets_present: tf.bool Tensor of shape [batch_size, sequence_length] with - True representing the presence of the target. - is_training: Boolean indicating operational mode (train/inference). - reuse (Optional): Whether to reuse the variables. - - Returns: - Tuple of (hidden_states, final_state). - """ - # We will use the same variable from the decoder. - if FLAGS.seq2seq_share_embedding: - with tf.variable_scope('decoder/rnn'): - embedding = tf.get_variable('embedding', - [FLAGS.vocab_size, hparams.gen_rnn_size]) - - with tf.variable_scope('encoder', reuse=reuse): - - def lstm_cell(): - return tf.contrib.rnn.BasicLSTMCell( - hparams.gen_rnn_size, - forget_bias=0.0, - state_is_tuple=True, - reuse=reuse) - - attn_cell = lstm_cell - if is_training and hparams.gen_vd_keep_prob < 1: - - def attn_cell(): - return variational_dropout.VariationalDropoutWrapper( - lstm_cell(), FLAGS.batch_size, hparams.gen_rnn_size, - hparams.gen_vd_keep_prob, hparams.gen_vd_keep_prob) - - cell = tf.contrib.rnn.MultiRNNCell( - [attn_cell() for _ in range(hparams.gen_num_layers)], - state_is_tuple=True) - - initial_state = cell.zero_state(FLAGS.batch_size, tf.float32) - - # Add a missing token for inputs not present. - real_inputs = inputs - masked_inputs = transform_input_with_is_missing_token( - inputs, targets_present) - - with tf.variable_scope('rnn') as scope: - hidden_states = [] - - # Split the embedding into two parts so that we can load the PTB - # weights into one part of the Variable. - if not FLAGS.seq2seq_share_embedding: - embedding = tf.get_variable('embedding', - [FLAGS.vocab_size, hparams.gen_rnn_size]) - missing_embedding = tf.get_variable('missing_embedding', - [1, hparams.gen_rnn_size]) - embedding = tf.concat([embedding, missing_embedding], axis=0) - - # TODO(adai): Perhaps append IMDB labels placeholder to input at - # each time point. - real_rnn_inputs = tf.nn.embedding_lookup(embedding, real_inputs) - masked_rnn_inputs = tf.nn.embedding_lookup(embedding, masked_inputs) - - state = initial_state - - def make_mask(keep_prob, units): - random_tensor = keep_prob - # 0. if [keep_prob, 1.0) and 1. if [1.0, 1.0 + keep_prob) - random_tensor += tf.random_uniform( - tf.stack([FLAGS.batch_size, 1, units])) - return tf.floor(random_tensor) / keep_prob - - if is_training: - output_mask = make_mask(hparams.gen_vd_keep_prob, hparams.gen_rnn_size) - - hidden_states, state = tf.nn.dynamic_rnn( - cell, masked_rnn_inputs, initial_state=state, scope=scope) - if is_training: - hidden_states *= output_mask - - final_masked_state = state - - # Produce the RNN state had the model operated only - # over real data. - real_state = initial_state - _, real_state = tf.nn.dynamic_rnn( - cell, real_rnn_inputs, initial_state=real_state, scope=scope) - final_state = real_state - - return (hidden_states, final_masked_state), initial_state, final_state - - -# TODO(adai): IMDB labels placeholder to encoder. -def gen_encoder_cnn(hparams, inputs, targets_present, is_training, reuse=None): - """Define the CNN Encoder graph.""" - del reuse - sequence = transform_input_with_is_missing_token(inputs, targets_present) - - # TODO(liamfedus): Make this a hyperparameter. - dis_filter_sizes = [3, 4, 5, 6, 7, 8, 9, 10, 15, 20] - - # Keeping track of l2 regularization loss (optional) - # l2_loss = tf.constant(0.0) - - with tf.variable_scope('encoder', reuse=True): - with tf.variable_scope('rnn'): - embedding = tf.get_variable('embedding', - [FLAGS.vocab_size, hparams.gen_rnn_size]) - - cnn_inputs = tf.nn.embedding_lookup(embedding, sequence) - - # Create a convolution layer for each filter size - conv_outputs = [] - for filter_size in dis_filter_sizes: - with tf.variable_scope('conv-%s' % filter_size): - # Convolution Layer - filter_shape = [ - filter_size, hparams.gen_rnn_size, hparams.dis_num_filters - ] - W = tf.get_variable( - name='W', initializer=tf.truncated_normal(filter_shape, stddev=0.1)) - b = tf.get_variable( - name='b', - initializer=tf.constant(0.1, shape=[hparams.dis_num_filters])) - conv = tf.nn.conv1d(cnn_inputs, W, stride=1, padding='SAME', name='conv') - - # Apply nonlinearity - h = tf.nn.relu(tf.nn.bias_add(conv, b), name='relu') - - conv_outputs.append(h) - - # Combine all the pooled features - dis_num_filters_total = hparams.dis_num_filters * len(dis_filter_sizes) - - h_conv = tf.concat(conv_outputs, axis=2) - h_conv_flat = tf.reshape(h_conv, [-1, dis_num_filters_total]) - - # Add dropout - if is_training: - with tf.variable_scope('dropout'): - h_conv_flat = tf.nn.dropout(h_conv_flat, hparams.gen_vd_keep_prob) - - # Final (unnormalized) scores and predictions - with tf.variable_scope('output'): - W = tf.get_variable( - 'W', - shape=[dis_num_filters_total, hparams.gen_rnn_size], - initializer=tf.contrib.layers.xavier_initializer()) - b = tf.get_variable( - name='b', initializer=tf.constant(0.1, shape=[hparams.gen_rnn_size])) - # l2_loss += tf.nn.l2_loss(W) - # l2_loss += tf.nn.l2_loss(b) - predictions = tf.nn.xw_plus_b(h_conv_flat, W, b, name='predictions') - predictions = tf.reshape( - predictions, - shape=[FLAGS.batch_size, FLAGS.sequence_length, hparams.gen_rnn_size]) - final_state = tf.reduce_mean(predictions, 1) - return predictions, (final_state, final_state) - - -# TODO(adai): IMDB labels placeholder to decoder. -def gen_decoder(hparams, - inputs, - targets, - targets_present, - encoding_state, - is_training, - is_validating, - reuse=None): - """Define the Decoder graph. The Decoder will now impute tokens that - have been masked from the input seqeunce. - """ - gen_decoder_rnn_size = hparams.gen_rnn_size - - targets = tf.Print(targets, [targets], message='targets', summarize=50) - if FLAGS.seq2seq_share_embedding: - with tf.variable_scope('decoder/rnn', reuse=True): - embedding = tf.get_variable('embedding', - [FLAGS.vocab_size, hparams.gen_rnn_size]) - - with tf.variable_scope('decoder', reuse=reuse): - - def lstm_cell(): - return tf.contrib.rnn.BasicLSTMCell( - gen_decoder_rnn_size, - forget_bias=0.0, - state_is_tuple=True, - reuse=reuse) - - attn_cell = lstm_cell - if is_training and hparams.gen_vd_keep_prob < 1: - - def attn_cell(): - return variational_dropout.VariationalDropoutWrapper( - lstm_cell(), FLAGS.batch_size, hparams.gen_rnn_size, - hparams.gen_vd_keep_prob, hparams.gen_vd_keep_prob) - - cell_gen = tf.contrib.rnn.MultiRNNCell( - [attn_cell() for _ in range(hparams.gen_num_layers)], - state_is_tuple=True) - - # Hidden encoder states. - hidden_vector_encodings = encoding_state[0] - - # Carry forward the final state tuple from the encoder. - # State tuples. - state_gen = encoding_state[1] - - if FLAGS.attention_option is not None: - (attention_keys, attention_values, _, - attention_construct_fn) = attention_utils.prepare_attention( - hidden_vector_encodings, - FLAGS.attention_option, - num_units=gen_decoder_rnn_size, - reuse=reuse) - - def make_mask(keep_prob, units): - random_tensor = keep_prob - # 0. if [keep_prob, 1.0) and 1. if [1.0, 1.0 + keep_prob) - random_tensor += tf.random_uniform(tf.stack([FLAGS.batch_size, units])) - return tf.floor(random_tensor) / keep_prob - - if is_training: - output_mask = make_mask(hparams.gen_vd_keep_prob, hparams.gen_rnn_size) - - with tf.variable_scope('rnn'): - sequence, logits, log_probs = [], [], [] - - if not FLAGS.seq2seq_share_embedding: - embedding = tf.get_variable('embedding', - [FLAGS.vocab_size, hparams.gen_rnn_size]) - softmax_w = tf.matrix_transpose(embedding) - softmax_b = tf.get_variable('softmax_b', [FLAGS.vocab_size]) - - rnn_inputs = tf.nn.embedding_lookup(embedding, inputs) - # TODO(adai): Perhaps append IMDB labels placeholder to input at - # each time point. - - rnn_outs = [] - - fake = None - for t in xrange(FLAGS.sequence_length): - if t > 0: - tf.get_variable_scope().reuse_variables() - - # Input to the Decoder. - if t == 0: - # Always provide the real input at t = 0. - rnn_inp = rnn_inputs[:, t] - - # If the input is present, read in the input at t. - # If the input is not present, read in the previously generated. - else: - real_rnn_inp = rnn_inputs[:, t] - - # While validating, the decoder should be operating in teacher - # forcing regime. Also, if we're just training with cross_entropy - # use teacher forcing. - if is_validating or FLAGS.gen_training_strategy == 'cross_entropy': - rnn_inp = real_rnn_inp - else: - fake_rnn_inp = tf.nn.embedding_lookup(embedding, fake) - rnn_inp = tf.where(targets_present[:, t - 1], real_rnn_inp, - fake_rnn_inp) - - # RNN. - rnn_out, state_gen = cell_gen(rnn_inp, state_gen) - - if FLAGS.attention_option is not None: - rnn_out = attention_construct_fn(rnn_out, attention_keys, - attention_values) - if is_training: - rnn_out *= output_mask - - rnn_outs.append(rnn_out) - if FLAGS.gen_training_strategy != 'cross_entropy': - logit = tf.nn.bias_add(tf.matmul(rnn_out, softmax_w), softmax_b) - - # Output for Decoder. - # If input is present: Return real at t+1. - # If input is not present: Return fake for t+1. - real = targets[:, t] - - categorical = tf.contrib.distributions.Categorical(logits=logit) - if FLAGS.use_gen_mode: - fake = categorical.mode() - else: - fake = categorical.sample() - log_prob = categorical.log_prob(fake) - output = tf.where(targets_present[:, t], real, fake) - - else: - real = targets[:, t] - logit = tf.zeros(tf.stack([FLAGS.batch_size, FLAGS.vocab_size])) - log_prob = tf.zeros(tf.stack([FLAGS.batch_size])) - output = real - - # Add to lists. - sequence.append(output) - log_probs.append(log_prob) - logits.append(logit) - - if FLAGS.gen_training_strategy == 'cross_entropy': - logits = tf.nn.bias_add( - tf.matmul( - tf.reshape(tf.stack(rnn_outs, 1), [-1, gen_decoder_rnn_size]), - softmax_w), softmax_b) - logits = tf.reshape(logits, - [-1, FLAGS.sequence_length, FLAGS.vocab_size]) - else: - logits = tf.stack(logits, axis=1) - - return (tf.stack(sequence, axis=1), logits, tf.stack(log_probs, axis=1)) - - -def dis_encoder(hparams, masked_inputs, is_training, reuse=None, - embedding=None): - """Define the Discriminator encoder. Reads in the masked inputs for context - and produces the hidden states of the encoder.""" - with tf.variable_scope('encoder', reuse=reuse): - - def lstm_cell(): - return tf.contrib.rnn.BasicLSTMCell( - hparams.dis_rnn_size, - forget_bias=0.0, - state_is_tuple=True, - reuse=reuse) - - attn_cell = lstm_cell - if is_training and hparams.dis_vd_keep_prob < 1: - - def attn_cell(): - return variational_dropout.VariationalDropoutWrapper( - lstm_cell(), FLAGS.batch_size, hparams.dis_rnn_size, - hparams.dis_vd_keep_prob, hparams.dis_vd_keep_prob) - - cell_dis = tf.contrib.rnn.MultiRNNCell( - [attn_cell() for _ in range(hparams.dis_num_layers)], - state_is_tuple=True) - - state_dis = cell_dis.zero_state(FLAGS.batch_size, tf.float32) - - with tf.variable_scope('rnn'): - hidden_states = [] - - missing_embedding = tf.get_variable('missing_embedding', - [1, hparams.dis_rnn_size]) - embedding = tf.concat([embedding, missing_embedding], axis=0) - masked_rnn_inputs = tf.nn.embedding_lookup(embedding, masked_inputs) - - def make_mask(keep_prob, units): - random_tensor = keep_prob - # 0. if [keep_prob, 1.0) and 1. if [1.0, 1.0 + keep_prob) - random_tensor += tf.random_uniform(tf.stack([FLAGS.batch_size, units])) - return tf.floor(random_tensor) / keep_prob - - if is_training: - output_mask = make_mask(hparams.dis_vd_keep_prob, hparams.dis_rnn_size) - - for t in xrange(FLAGS.sequence_length): - if t > 0: - tf.get_variable_scope().reuse_variables() - - rnn_in = masked_rnn_inputs[:, t] - rnn_out, state_dis = cell_dis(rnn_in, state_dis) - if is_training: - rnn_out *= output_mask - hidden_states.append(rnn_out) - final_state = state_dis - - return (tf.stack(hidden_states, axis=1), final_state) - - -def dis_decoder(hparams, - sequence, - encoding_state, - is_training, - reuse=None, - embedding=None): - """Define the Discriminator decoder. Read in the sequence and predict - at each time point.""" - sequence = tf.cast(sequence, tf.int32) - - with tf.variable_scope('decoder', reuse=reuse): - - def lstm_cell(): - return tf.contrib.rnn.BasicLSTMCell( - hparams.dis_rnn_size, - forget_bias=0.0, - state_is_tuple=True, - reuse=reuse) - - attn_cell = lstm_cell - if is_training and hparams.dis_vd_keep_prob < 1: - - def attn_cell(): - return variational_dropout.VariationalDropoutWrapper( - lstm_cell(), FLAGS.batch_size, hparams.dis_rnn_size, - hparams.dis_vd_keep_prob, hparams.dis_vd_keep_prob) - - cell_dis = tf.contrib.rnn.MultiRNNCell( - [attn_cell() for _ in range(hparams.dis_num_layers)], - state_is_tuple=True) - - # Hidden encoder states. - hidden_vector_encodings = encoding_state[0] - - # Carry forward the final state tuple from the encoder. - # State tuples. - state = encoding_state[1] - - if FLAGS.attention_option is not None: - (attention_keys, attention_values, _, - attention_construct_fn) = attention_utils.prepare_attention( - hidden_vector_encodings, - FLAGS.attention_option, - num_units=hparams.dis_rnn_size, - reuse=reuse) - - def make_mask(keep_prob, units): - random_tensor = keep_prob - # 0. if [keep_prob, 1.0) and 1. if [1.0, 1.0 + keep_prob) - random_tensor += tf.random_uniform(tf.stack([FLAGS.batch_size, units])) - return tf.floor(random_tensor) / keep_prob - - if is_training: - output_mask = make_mask(hparams.dis_vd_keep_prob, hparams.dis_rnn_size) - - with tf.variable_scope('rnn') as vs: - predictions = [] - - rnn_inputs = tf.nn.embedding_lookup(embedding, sequence) - - for t in xrange(FLAGS.sequence_length): - if t > 0: - tf.get_variable_scope().reuse_variables() - - rnn_in = rnn_inputs[:, t] - rnn_out, state = cell_dis(rnn_in, state) - - if FLAGS.attention_option is not None: - rnn_out = attention_construct_fn(rnn_out, attention_keys, - attention_values) - if is_training: - rnn_out *= output_mask - - # Prediction is linear output for Discriminator. - pred = tf.contrib.layers.linear(rnn_out, 1, scope=vs) - predictions.append(pred) - - predictions = tf.stack(predictions, axis=1) - return tf.squeeze(predictions, axis=2) - - -def discriminator(hparams, - inputs, - targets_present, - sequence, - is_training, - reuse=None): - """Define the Discriminator graph.""" - if FLAGS.dis_share_embedding: - assert hparams.dis_rnn_size == hparams.gen_rnn_size, ( - 'If you wish to share Discriminator/Generator embeddings, they must be' - ' same dimension.') - with tf.variable_scope('gen/decoder/rnn', reuse=True): - embedding = tf.get_variable('embedding', - [FLAGS.vocab_size, hparams.gen_rnn_size]) - else: - # Explicitly share the embedding. - with tf.variable_scope('dis/decoder/rnn', reuse=reuse): - embedding = tf.get_variable('embedding', - [FLAGS.vocab_size, hparams.dis_rnn_size]) - - # Mask the input sequence. - masked_inputs = transform_input_with_is_missing_token(inputs, targets_present) - - # Confirm masking. - masked_inputs = tf.Print( - masked_inputs, [inputs, targets_present, masked_inputs, sequence], - message='inputs, targets_present, masked_inputs, sequence', - summarize=10) - - with tf.variable_scope('dis', reuse=reuse): - encoder_states = dis_encoder( - hparams, - masked_inputs, - is_training=is_training, - reuse=reuse, - embedding=embedding) - predictions = dis_decoder( - hparams, - sequence, - encoder_states, - is_training=is_training, - reuse=reuse, - embedding=embedding) - - # if FLAGS.baseline_method == 'critic': - # with tf.variable_scope('critic', reuse=reuse) as critic_scope: - # values = tf.contrib.layers.linear(rnn_outs, 1, scope=critic_scope) - # values = tf.squeeze(values, axis=2) - # else: - # values = None - - return predictions - - -# TODO(adai): IMDB labels placeholder to encoder/decoder. -def generator(hparams, - inputs, - targets, - targets_present, - is_training, - is_validating, - reuse=None): - """Define the Generator graph.""" - with tf.variable_scope('gen', reuse=reuse): - encoder_states, initial_state, final_state = gen_encoder( - hparams, inputs, targets_present, is_training=is_training, reuse=reuse) - stacked_sequence, stacked_logits, stacked_log_probs = gen_decoder( - hparams, - inputs, - targets, - targets_present, - encoder_states, - is_training=is_training, - is_validating=is_validating, - reuse=reuse) - return (stacked_sequence, stacked_logits, stacked_log_probs, initial_state, - final_state, encoder_states) diff --git a/research/maskgan/models/seq2seq_zaremba.py b/research/maskgan/models/seq2seq_zaremba.py deleted file mode 100644 index 25f6ce44f..000000000 --- a/research/maskgan/models/seq2seq_zaremba.py +++ /dev/null @@ -1,305 +0,0 @@ -# Copyright 2017 The TensorFlow Authors All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -"""Simple seq2seq model definitions.""" - -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import tensorflow as tf -from six.moves import xrange -from models import attention_utils - -FLAGS = tf.app.flags.FLAGS - - -def transform_input_with_is_missing_token(inputs, targets_present): - """Transforms the inputs to have missing tokens when it's masked out. The - mask is for the targets, so therefore, to determine if an input at time t is - masked, we have to check if the target at time t - 1 is masked out. - - e.g. - inputs = [a, b, c, d] - targets = [b, c, d, e] - targets_present = [1, 0, 1, 0] - - then, - transformed_input = [a, b, , d] - - Args: - inputs: tf.int32 Tensor of shape [batch_size, sequence_length] with tokens - up to, but not including, vocab_size. - targets_present: tf.bool Tensor of shape [batch_size, sequence_length] with - True representing the presence of the word. - - Returns: - transformed_input: tf.int32 Tensor of shape [batch_size, sequence_length] - which takes on value of inputs when the input is present and takes on - value=vocab_size to indicate a missing token. - """ - # To fill in if the input is missing. - input_missing = tf.constant(FLAGS.vocab_size, - dtype=tf.int32, - shape=[FLAGS.batch_size, FLAGS.sequence_length]) - - # The 0th input will always be present to MaskGAN. - zeroth_input_present = tf.constant(True, tf.bool, shape=[FLAGS.batch_size, 1]) - - # Input present mask. - inputs_present = tf.concat( - [zeroth_input_present, targets_present[:, :-1]], axis=1) - - transformed_input = tf.where(inputs_present, inputs, input_missing) - return transformed_input - - -def gen_encoder(hparams, inputs, targets_present, is_training, reuse=None): - """Define the Encoder graph. - - - Args: - hparams: Hyperparameters for the MaskGAN. - inputs: tf.int32 Tensor of shape [batch_size, sequence_length] with tokens - up to, but not including, vocab_size. - targets_present: tf.bool Tensor of shape [batch_size, sequence_length] with - True representing the presence of the target. - is_training: Boolean indicating operational mode (train/inference). - reuse (Optional): Whether to reuse the variables. - - Returns: - Tuple of (hidden_states, final_state). - """ - with tf.variable_scope('encoder', reuse=reuse): - - def lstm_cell(): - return tf.contrib.rnn.BasicLSTMCell(hparams.gen_rnn_size, - forget_bias=0.0, - state_is_tuple=True, - reuse=reuse) - - attn_cell = lstm_cell - if is_training and FLAGS.keep_prob < 1: - - def attn_cell(): - return tf.contrib.rnn.DropoutWrapper( - lstm_cell(), output_keep_prob=FLAGS.keep_prob) - - cell = tf.contrib.rnn.MultiRNNCell( - [attn_cell() for _ in range(hparams.gen_num_layers)], - state_is_tuple=True) - - initial_state = cell.zero_state(FLAGS.batch_size, tf.float32) - - # Add a missing token for inputs not present. - real_inputs = inputs - masked_inputs = transform_input_with_is_missing_token(inputs, - targets_present) - - with tf.variable_scope('rnn'): - hidden_states = [] - - # Split the embedding into two parts so that we can load the PTB - # weights into one part of the Variable. - embedding = tf.get_variable('embedding', - [FLAGS.vocab_size, hparams.gen_rnn_size]) - missing_embedding = tf.get_variable('missing_embedding', - [1, hparams.gen_rnn_size]) - embedding = tf.concat([embedding, missing_embedding], axis=0) - - real_rnn_inputs = tf.nn.embedding_lookup(embedding, real_inputs) - masked_rnn_inputs = tf.nn.embedding_lookup(embedding, masked_inputs) - - if is_training and FLAGS.keep_prob < 1: - masked_rnn_inputs = tf.nn.dropout(masked_rnn_inputs, FLAGS.keep_prob) - - state = initial_state - for t in xrange(FLAGS.sequence_length): - if t > 0: - tf.get_variable_scope().reuse_variables() - - rnn_inp = masked_rnn_inputs[:, t] - rnn_out, state = cell(rnn_inp, state) - hidden_states.append(rnn_out) - final_masked_state = state - hidden_states = tf.stack(hidden_states, axis=1) - - # Produce the RNN state had the model operated only - # over real data. - real_state = initial_state - for t in xrange(FLAGS.sequence_length): - tf.get_variable_scope().reuse_variables() - - # RNN. - rnn_inp = real_rnn_inputs[:, t] - rnn_out, real_state = cell(rnn_inp, real_state) - final_state = real_state - - return (hidden_states, final_masked_state), initial_state, final_state - - -def gen_decoder(hparams, - inputs, - targets, - targets_present, - encoding_state, - is_training, - is_validating, - reuse=None): - """Define the Decoder graph. The Decoder will now impute tokens that - have been masked from the input seqeunce. - """ - gen_decoder_rnn_size = hparams.gen_rnn_size - - with tf.variable_scope('decoder', reuse=reuse): - - def lstm_cell(): - return tf.contrib.rnn.BasicLSTMCell(gen_decoder_rnn_size, - forget_bias=0.0, - state_is_tuple=True, - reuse=reuse) - - attn_cell = lstm_cell - if is_training and FLAGS.keep_prob < 1: - - def attn_cell(): - return tf.contrib.rnn.DropoutWrapper( - lstm_cell(), output_keep_prob=FLAGS.keep_prob) - - cell_gen = tf.contrib.rnn.MultiRNNCell( - [attn_cell() for _ in range(hparams.gen_num_layers)], - state_is_tuple=True) - - # Hidden encoder states. - hidden_vector_encodings = encoding_state[0] - - # Carry forward the final state tuple from the encoder. - # State tuples. - state_gen = encoding_state[1] - - if FLAGS.attention_option is not None: - (attention_keys, attention_values, _, - attention_construct_fn) = attention_utils.prepare_attention( - hidden_vector_encodings, - FLAGS.attention_option, - num_units=gen_decoder_rnn_size, - reuse=reuse) - - with tf.variable_scope('rnn'): - sequence, logits, log_probs = [], [], [] - - embedding = tf.get_variable('embedding', - [FLAGS.vocab_size, hparams.gen_rnn_size]) - softmax_w = tf.matrix_transpose(embedding) - softmax_b = tf.get_variable('softmax_b', [FLAGS.vocab_size]) - - rnn_inputs = tf.nn.embedding_lookup(embedding, inputs) - - if is_training and FLAGS.keep_prob < 1: - rnn_inputs = tf.nn.dropout(rnn_inputs, FLAGS.keep_prob) - - rnn_outs = [] - - fake = None - for t in xrange(FLAGS.sequence_length): - if t > 0: - tf.get_variable_scope().reuse_variables() - - # Input to the Decoder. - if t == 0: - # Always provide the real input at t = 0. - rnn_inp = rnn_inputs[:, t] - - # If the input is present, read in the input at t. - # If the input is not present, read in the previously generated. - else: - real_rnn_inp = rnn_inputs[:, t] - - # While validating, the decoder should be operating in teacher - # forcing regime. Also, if we're just training with cross_entropy - # use teacher forcing. - if is_validating or FLAGS.gen_training_strategy == 'cross_entropy': - rnn_inp = real_rnn_inp - else: - fake_rnn_inp = tf.nn.embedding_lookup(embedding, fake) - rnn_inp = tf.where(targets_present[:, t - 1], real_rnn_inp, - fake_rnn_inp) - - # RNN. - rnn_out, state_gen = cell_gen(rnn_inp, state_gen) - - if FLAGS.attention_option is not None: - rnn_out = attention_construct_fn(rnn_out, attention_keys, - attention_values) - rnn_outs.append(rnn_out) - if FLAGS.gen_training_strategy != 'cross_entropy': - logit = tf.nn.bias_add(tf.matmul(rnn_out, softmax_w), softmax_b) - - # Output for Decoder. - # If input is present: Return real at t+1. - # If input is not present: Return fake for t+1. - real = targets[:, t] - - categorical = tf.contrib.distributions.Categorical(logits=logit) - fake = categorical.sample() - log_prob = categorical.log_prob(fake) - - output = tf.where(targets_present[:, t], real, fake) - - else: - batch_size = tf.shape(rnn_out)[0] - logit = tf.zeros(tf.stack([batch_size, FLAGS.vocab_size])) - log_prob = tf.zeros(tf.stack([batch_size])) - output = targets[:, t] - - # Add to lists. - sequence.append(output) - log_probs.append(log_prob) - logits.append(logit) - if FLAGS.gen_training_strategy == 'cross_entropy': - logits = tf.nn.bias_add( - tf.matmul( - tf.reshape(tf.stack(rnn_outs, 1), [-1, gen_decoder_rnn_size]), - softmax_w), softmax_b) - logits = tf.reshape(logits, - [-1, FLAGS.sequence_length, FLAGS.vocab_size]) - else: - logits = tf.stack(logits, axis=1) - - return (tf.stack(sequence, axis=1), logits, tf.stack(log_probs, axis=1)) - - -def generator(hparams, - inputs, - targets, - targets_present, - is_training, - is_validating, - reuse=None): - """Define the Generator graph.""" - with tf.variable_scope('gen', reuse=reuse): - encoder_states, initial_state, final_state = gen_encoder( - hparams, inputs, targets_present, is_training=is_training, reuse=reuse) - stacked_sequence, stacked_logits, stacked_log_probs = gen_decoder( - hparams, - inputs, - targets, - targets_present, - encoder_states, - is_training=is_training, - is_validating=is_validating, - reuse=reuse) - return (stacked_sequence, stacked_logits, stacked_log_probs, initial_state, - final_state) diff --git a/research/maskgan/nas_utils/__init__.py b/research/maskgan/nas_utils/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/research/maskgan/nas_utils/configs.py b/research/maskgan/nas_utils/configs.py deleted file mode 100644 index 80d867c36..000000000 --- a/research/maskgan/nas_utils/configs.py +++ /dev/null @@ -1,46 +0,0 @@ -# Copyright 2017 The TensorFlow Authors All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - - -def print_config(config): - print("-" * 10, "Configuration Specs", "-" * 10) - for item in dir(config): - if list(item)[0] != "_": - print(item, getattr(config, item)) - print("-" * 29) - - -class AlienConfig2(object): - """Base 8 740 shared embeddings, gets 64.0 (mean: std: min: max: ).""" - init_scale = 0.05 - learning_rate = 1.0 - max_grad_norm = 10 - num_layers = 2 - num_steps = 25 - hidden_size = 740 - max_epoch = 70 - max_max_epoch = 250 - keep_prob = [1 - 0.15, 1 - 0.45] - lr_decay = 0.95 - batch_size = 20 - vocab_size = 10000 - weight_decay = 1e-4 - share_embeddings = True - cell = "alien" - dropout_type = "variational" diff --git a/research/maskgan/nas_utils/custom_cell.py b/research/maskgan/nas_utils/custom_cell.py deleted file mode 100644 index 6add7ffa4..000000000 --- a/research/maskgan/nas_utils/custom_cell.py +++ /dev/null @@ -1,166 +0,0 @@ -# Copyright 2017 The TensorFlow Authors All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import collections -import numpy as np -import tensorflow as tf - -flags = tf.flags -FLAGS = tf.app.flags.FLAGS -LSTMTuple = collections.namedtuple('LSTMTuple', ['c', 'h']) - - -def cell_depth(num): - num /= 2 - val = np.log2(1 + num) - assert abs(val - int(val)) == 0 - return int(val) - - -class GenericMultiRNNCell(tf.contrib.rnn.RNNCell): - """More generic version of MultiRNNCell that allows you to pass in a dropout mask""" - - def __init__(self, cells): - """Create a RNN cell composed sequentially of a number of RNNCells. - - Args: - cells: list of RNNCells that will be composed in this order. - state_is_tuple: If True, accepted and returned states are n-tuples, where - `n = len(cells)`. If False, the states are all - concatenated along the column axis. This latter behavior will soon be - deprecated. - - Raises: - ValueError: if cells is empty (not allowed), or at least one of the cells - returns a state tuple but the flag `state_is_tuple` is `False`. - """ - self._cells = cells - - @property - def state_size(self): - return tuple(cell.state_size for cell in self._cells) - - @property - def output_size(self): - return self._cells[-1].output_size - - def __call__(self, inputs, state, input_masks=None, scope=None): - """Run this multi-layer cell on inputs, starting from state.""" - with tf.variable_scope(scope or type(self).__name__): - cur_inp = inputs - new_states = [] - for i, cell in enumerate(self._cells): - with tf.variable_scope('Cell%d' % i): - cur_state = state[i] - if input_masks is not None: - cur_inp *= input_masks[i] - cur_inp, new_state = cell(cur_inp, cur_state) - new_states.append(new_state) - new_states = tuple(new_states) - return cur_inp, new_states - - -class AlienRNNBuilder(tf.contrib.rnn.RNNCell): - - def __init__(self, num_units, params, additional_params, base_size): - self.num_units = num_units - self.cell_create_index = additional_params[0] - self.cell_inject_index = additional_params[1] - self.base_size = base_size - self.cell_params = params[ - -2:] # Cell injection parameters are always the last two - params = params[:-2] - self.depth = cell_depth(len(params)) - self.params = params - self.units_per_layer = [2**i for i in range(self.depth) - ][::-1] # start with the biggest layer - - def __call__(self, inputs, state, scope=None): - with tf.variable_scope(scope or type(self).__name__): - definition1 = ['add', 'elem_mult', 'max'] - definition2 = [tf.identity, tf.tanh, tf.sigmoid, tf.nn.relu, tf.sin] - layer_outputs = [[] for _ in range(self.depth)] - with tf.variable_scope('rnn_builder'): - curr_index = 0 - c, h = state - - # Run all dense matrix multiplications at once - big_h_mat = tf.get_variable( - 'big_h_mat', [self.num_units, - self.base_size * self.num_units], tf.float32) - big_inputs_mat = tf.get_variable( - 'big_inputs_mat', [self.num_units, - self.base_size * self.num_units], tf.float32) - big_h_output = tf.matmul(h, big_h_mat) - big_inputs_output = tf.matmul(inputs, big_inputs_mat) - h_splits = tf.split(big_h_output, self.base_size, axis=1) - inputs_splits = tf.split(big_inputs_output, self.base_size, axis=1) - - for layer_num, units in enumerate(self.units_per_layer): - for unit_num in range(units): - with tf.variable_scope( - 'layer_{}_unit_{}'.format(layer_num, unit_num)): - if layer_num == 0: - prev1_mat = h_splits[unit_num] - prev2_mat = inputs_splits[unit_num] - else: - prev1_mat = layer_outputs[layer_num - 1][2 * unit_num] - prev2_mat = layer_outputs[layer_num - 1][2 * unit_num + 1] - if definition1[self.params[curr_index]] == 'add': - output = prev1_mat + prev2_mat - elif definition1[self.params[curr_index]] == 'elem_mult': - output = prev1_mat * prev2_mat - elif definition1[self.params[curr_index]] == 'max': - output = tf.maximum(prev1_mat, prev2_mat) - if curr_index / 2 == self.cell_create_index: # Take the new cell before the activation - new_c = tf.identity(output) - output = definition2[self.params[curr_index + 1]](output) - if curr_index / 2 == self.cell_inject_index: - if definition1[self.cell_params[0]] == 'add': - output += c - elif definition1[self.cell_params[0]] == 'elem_mult': - output *= c - elif definition1[self.cell_params[0]] == 'max': - output = tf.maximum(output, c) - output = definition2[self.cell_params[1]](output) - layer_outputs[layer_num].append(output) - curr_index += 2 - new_h = layer_outputs[-1][-1] - return new_h, LSTMTuple(new_c, new_h) - - @property - def state_size(self): - return LSTMTuple(self.num_units, self.num_units) - - @property - def output_size(self): - return self.num_units - - -class Alien(AlienRNNBuilder): - """Base 8 Cell.""" - - def __init__(self, num_units): - params = [ - 0, 2, 0, 3, 0, 2, 1, 3, 0, 1, 0, 2, 0, 1, 0, 2, 1, 1, 0, 1, 1, 1, 0, 2, - 1, 0, 0, 1, 1, 1, 0, 1 - ] - additional_params = [12, 8] - base_size = 8 - super(Alien, self).__init__(num_units, params, additional_params, base_size) diff --git a/research/maskgan/nas_utils/variational_dropout.py b/research/maskgan/nas_utils/variational_dropout.py deleted file mode 100644 index 49cc29f0c..000000000 --- a/research/maskgan/nas_utils/variational_dropout.py +++ /dev/null @@ -1,61 +0,0 @@ -# Copyright 2017 The TensorFlow Authors All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -"""Variational Dropout.""" - -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import tensorflow as tf - -FLAGS = tf.app.flags.FLAGS - - -def generate_dropout_masks(keep_prob, shape, amount): - masks = [] - for _ in range(amount): - dropout_mask = tf.random_uniform(shape) + (keep_prob) - dropout_mask = tf.floor(dropout_mask) / (keep_prob) - masks.append(dropout_mask) - return masks - - -def generate_variational_dropout_masks(hparams, keep_prob): - [batch_size, num_steps, size, num_layers] = [ - FLAGS.batch_size, FLAGS.sequence_length, hparams.gen_rnn_size, - hparams.gen_num_layers - ] - if len(keep_prob) == 2: - emb_keep_prob = keep_prob[0] # keep prob for embedding matrix - h2h_keep_prob = emb_keep_prob # keep prob for hidden to hidden connections - h2i_keep_prob = keep_prob[1] # keep prob for hidden to input connections - out_keep_prob = h2i_keep_prob # keep probability for output state - else: - emb_keep_prob = keep_prob[0] # keep prob for embedding matrix - h2h_keep_prob = keep_prob[1] # keep prob for hidden to hidden connections - h2i_keep_prob = keep_prob[2] # keep prob for hidden to input connections - out_keep_prob = keep_prob[3] # keep probability for output state - h2i_masks = [] # Masks for input to recurrent connections - h2h_masks = [] # Masks for recurrent to recurrent connections - - # Input word dropout mask - emb_masks = generate_dropout_masks(emb_keep_prob, [num_steps, 1], batch_size) - output_mask = generate_dropout_masks(out_keep_prob, [batch_size, size], 1)[0] - h2i_masks = generate_dropout_masks(h2i_keep_prob, [batch_size, size], - num_layers) - h2h_masks = generate_dropout_masks(h2h_keep_prob, [batch_size, size], - num_layers) - return h2h_masks, h2i_masks, emb_masks, output_mask diff --git a/research/maskgan/pretrain_mask_gan.py b/research/maskgan/pretrain_mask_gan.py deleted file mode 100644 index 1a9d8ee94..000000000 --- a/research/maskgan/pretrain_mask_gan.py +++ /dev/null @@ -1,231 +0,0 @@ -# Copyright 2017 The TensorFlow Authors All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -"""Pretraining functions.""" - -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -# Dependency imports - -import numpy as np - -import tensorflow as tf - -from data import imdb_loader -from data import ptb_loader - -# Data. -from model_utils import model_utils -from models import evaluation_utils - -tf.app.flags.DEFINE_integer( - 'gen_pretrain_steps', None, - 'The number of steps to pretrain the generator with cross entropy loss.') -tf.app.flags.DEFINE_integer( - 'dis_pretrain_steps', None, - 'The number of steps to pretrain the discriminator.') - -FLAGS = tf.app.flags.FLAGS - - -def pretrain_generator(sv, sess, model, data, log, id_to_word, - data_ngram_counts, is_chief): - """Pretrain the generator with classic language modeling training.""" - print('\nPretraining generator for %d steps.' % FLAGS.gen_pretrain_steps) - log.write( - '\nPretraining generator for %d steps.\n' % FLAGS.gen_pretrain_steps) - - is_pretraining = True - - while is_pretraining: - - costs = 0. - iters = 0 - if FLAGS.data_set == 'ptb': - iterator = ptb_loader.ptb_iterator(data, FLAGS.batch_size, - FLAGS.sequence_length, - FLAGS.epoch_size_override) - elif FLAGS.data_set == 'imdb': - iterator = imdb_loader.imdb_iterator(data, FLAGS.batch_size, - FLAGS.sequence_length) - - for x, y, _ in iterator: - - # For pretraining with cross entropy loss, we have all tokens in the - # forward sequence present (all True). - model_utils.assign_percent_real(sess, model.percent_real_update, - model.new_rate, 1.0) - p = np.ones(shape=[FLAGS.batch_size, FLAGS.sequence_length], dtype=bool) - - pretrain_feed = {model.inputs: x, model.targets: y, model.present: p} - - [losses, cost_eval, _, step] = sess.run( - [ - model.fake_cross_entropy_losses, model.avg_log_perplexity, - model.gen_pretrain_op, model.global_step - ], - feed_dict=pretrain_feed) - - costs += cost_eval - iters += FLAGS.sequence_length - - # Calulate rolling perplexity. - perplexity = np.exp(costs / iters) - - # Summaries. - if is_chief and step % FLAGS.summaries_every == 0: - # Graph summaries. - summary_str = sess.run( - model.merge_summaries_op, feed_dict=pretrain_feed) - sv.SummaryComputed(sess, summary_str) - - # Additional summary. - for n, data_ngram_count in data_ngram_counts.iteritems(): - avg_percent_captured = evaluation_utils.sequence_ngram_evaluation( - sess, model.fake_sequence, log, pretrain_feed, data_ngram_count, - int(n)) - summary_percent_str = tf.Summary(value=[ - tf.Summary.Value( - tag='general/%s-grams_percent_correct' % n, - simple_value=avg_percent_captured) - ]) - sv.SummaryComputed(sess, summary_percent_str, global_step=step) - - summary_perplexity_str = tf.Summary(value=[ - tf.Summary.Value(tag='general/perplexity', simple_value=perplexity) - ]) - sv.SummaryComputed(sess, summary_perplexity_str, global_step=step) - - # Printing and logging - if is_chief and step % FLAGS.print_every == 0: - print('global_step: %d' % step) - print(' generator loss: %.3f' % np.mean(losses)) - print(' perplexity: %.3f' % perplexity) - log.write('global_step: %d\n' % step) - log.write(' generator loss: %.3f\n' % np.mean(losses)) - log.write(' perplexity: %.3f\n' % perplexity) - - for n, data_ngram_count in data_ngram_counts.iteritems(): - avg_percent_captured = evaluation_utils.sequence_ngram_evaluation( - sess, model.fake_sequence, log, pretrain_feed, data_ngram_count, - int(n)) - print(' percent of %s-grams captured: %.3f.\n' % - (n, avg_percent_captured)) - log.write(' percent of %s-grams captured: %.3f.\n\n' % - (n, avg_percent_captured)) - - evaluation_utils.generate_logs(sess, model, log, id_to_word, - pretrain_feed) - - if step >= FLAGS.gen_pretrain_steps: - is_pretraining = False - break - return - - -def pretrain_discriminator(sv, sess, model, data, log, id_to_word, - data_ngram_counts, is_chief): - print('\nPretraining discriminator for %d steps.' % FLAGS.dis_pretrain_steps) - log.write( - '\nPretraining discriminator for %d steps.\n' % FLAGS.dis_pretrain_steps) - - is_pretraining = True - - while is_pretraining: - - cumulative_costs = 0. - iters = 0 - if FLAGS.data_set == 'ptb': - iterator = ptb_loader.ptb_iterator(data, FLAGS.batch_size, - FLAGS.sequence_length, - FLAGS.epoch_size_override) - elif FLAGS.data_set == 'imdb': - iterator = imdb_loader.imdb_iterator(data, FLAGS.batch_size, - FLAGS.sequence_length) - - for x, y, _ in iterator: - is_present_rate = FLAGS.is_present_rate - # is_present_rate = np.random.uniform(low=0.0, high=1.0) - model_utils.assign_percent_real(sess, model.percent_real_update, - model.new_rate, is_present_rate) - # Randomly mask out tokens. - p = model_utils.generate_mask() - - pretrain_feed = {model.inputs: x, model.targets: y, model.present: p} - - [_, dis_loss_eval, gen_log_perplexity_eval, step] = sess.run( - [ - model.dis_pretrain_op, model.dis_loss, model.avg_log_perplexity, - model.global_step - ], - feed_dict=pretrain_feed) - - cumulative_costs += gen_log_perplexity_eval - iters += 1 - - # Calulate rolling perplexity. - perplexity = np.exp(cumulative_costs / iters) - - # Summaries. - if is_chief and step % FLAGS.summaries_every == 0: - # Graph summaries. - summary_str = sess.run( - model.merge_summaries_op, feed_dict=pretrain_feed) - sv.SummaryComputed(sess, summary_str) - - # Additional summary. - for n, data_ngram_count in data_ngram_counts.iteritems(): - avg_percent_captured = evaluation_utils.sequence_ngram_evaluation( - sess, model.fake_sequence, log, pretrain_feed, data_ngram_count, - int(n)) - summary_percent_str = tf.Summary(value=[ - tf.Summary.Value( - tag='general/%s-grams_percent_correct' % n, - simple_value=avg_percent_captured) - ]) - sv.SummaryComputed(sess, summary_percent_str, global_step=step) - - summary_perplexity_str = tf.Summary(value=[ - tf.Summary.Value(tag='general/perplexity', simple_value=perplexity) - ]) - sv.SummaryComputed(sess, summary_perplexity_str, global_step=step) - - # Printing and logging - if is_chief and step % FLAGS.print_every == 0: - print('global_step: %d' % step) - print(' discriminator loss: %.3f' % dis_loss_eval) - print(' perplexity: %.3f' % perplexity) - log.write('global_step: %d\n' % step) - log.write(' discriminator loss: %.3f\n' % dis_loss_eval) - log.write(' perplexity: %.3f\n' % perplexity) - - for n, data_ngram_count in data_ngram_counts.iteritems(): - avg_percent_captured = evaluation_utils.sequence_ngram_evaluation( - sess, model.fake_sequence, log, pretrain_feed, data_ngram_count, - int(n)) - print(' percent of %s-grams captured: %.3f.\n' % - (n, avg_percent_captured)) - log.write(' percent of %s-grams captured: %.3f.\n\n' % - (n, avg_percent_captured)) - - evaluation_utils.generate_logs(sess, model, log, id_to_word, - pretrain_feed) - - if step >= FLAGS.dis_pretrain_steps + int(FLAGS.gen_pretrain_steps or 0): - is_pretraining = False - break - return diff --git a/research/maskgan/regularization/__init__.py b/research/maskgan/regularization/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/research/maskgan/regularization/variational_dropout.py b/research/maskgan/regularization/variational_dropout.py deleted file mode 100644 index d67fe52ee..000000000 --- a/research/maskgan/regularization/variational_dropout.py +++ /dev/null @@ -1,56 +0,0 @@ -# Copyright 2017 The TensorFlow Authors All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -"""Variational Dropout Wrapper.""" - -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import tensorflow as tf - - -class VariationalDropoutWrapper(tf.contrib.rnn.RNNCell): - """Add variational dropout to a RNN cell.""" - - def __init__(self, cell, batch_size, input_size, recurrent_keep_prob, - input_keep_prob): - self._cell = cell - self._recurrent_keep_prob = recurrent_keep_prob - self._input_keep_prob = input_keep_prob - - def make_mask(keep_prob, units): - random_tensor = keep_prob - # 0. if [keep_prob, 1.0) and 1. if [1.0, 1.0 + keep_prob) - random_tensor += tf.random_uniform(tf.stack([batch_size, units])) - return tf.floor(random_tensor) / keep_prob - - self._recurrent_mask = make_mask(recurrent_keep_prob, - self._cell.state_size[0]) - self._input_mask = self._recurrent_mask - - @property - def state_size(self): - return self._cell.state_size - - @property - def output_size(self): - return self._cell.output_size - - def __call__(self, inputs, state, scope=None): - dropped_inputs = inputs * self._input_mask - dropped_state = (state[0], state[1] * self._recurrent_mask) - new_h, new_state = self._cell(dropped_inputs, dropped_state, scope) - return new_h, new_state diff --git a/research/maskgan/regularization/zoneout.py b/research/maskgan/regularization/zoneout.py deleted file mode 100644 index 5f9ef3e30..000000000 --- a/research/maskgan/regularization/zoneout.py +++ /dev/null @@ -1,64 +0,0 @@ -# Copyright 2017 The TensorFlow Authors All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -"""Zoneout Wrapper""" - -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import tensorflow as tf - - -class ZoneoutWrapper(tf.contrib.rnn.RNNCell): - """Add Zoneout to a RNN cell.""" - - def __init__(self, cell, zoneout_drop_prob, is_training=True): - self._cell = cell - self._zoneout_prob = zoneout_drop_prob - self._is_training = is_training - - @property - def state_size(self): - return self._cell.state_size - - @property - def output_size(self): - return self._cell.output_size - - def __call__(self, inputs, state, scope=None): - output, new_state = self._cell(inputs, state, scope) - if not isinstance(self._cell.state_size, tuple): - new_state = tf.split(value=new_state, num_or_size_splits=2, axis=1) - state = tf.split(value=state, num_or_size_splits=2, axis=1) - final_new_state = [new_state[0], new_state[1]] - if self._is_training: - for i, state_element in enumerate(state): - random_tensor = 1 - self._zoneout_prob # keep probability - random_tensor += tf.random_uniform(tf.shape(state_element)) - # 0. if [zoneout_prob, 1.0) and 1. if [1.0, 1.0 + zoneout_prob) - binary_tensor = tf.floor(random_tensor) - final_new_state[ - i] = (new_state[i] - state_element) * binary_tensor + state_element - else: - for i, state_element in enumerate(state): - final_new_state[ - i] = state_element * self._zoneout_prob + new_state[i] * ( - 1 - self._zoneout_prob) - if isinstance(self._cell.state_size, tuple): - return output, tf.contrib.rnn.LSTMStateTuple( - final_new_state[0], final_new_state[1]) - - return output, tf.concat([final_new_state[0], final_new_state[1]], 1) diff --git a/research/maskgan/sample_shuffler.py b/research/maskgan/sample_shuffler.py deleted file mode 100644 index 58c31fb57..000000000 --- a/research/maskgan/sample_shuffler.py +++ /dev/null @@ -1,95 +0,0 @@ -# Copyright 2017 The TensorFlow Authors All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -"""Shuffle samples for human evaluation. - -Local launch command: - python sample_shuffler.py - --input_ml_path=/tmp/ptb/seq2seq_vd_shareemb_forreal_55_3 - --input_gan_path=/tmp/ptb/MaskGAN_PTB_ari_avg_56.29_v2.0.0 - --output_file_name=/tmp/ptb/shuffled_output.txt - - python sample_shuffler.py - --input_ml_path=/tmp/generate_samples/MaskGAN_IMDB_Benchmark_87.1_v0.3.0 - --input_gan_path=/tmp/generate_samples/MaskGAN_IMDB_v1.0.1 - --output_file_name=/tmp/imdb/shuffled_output.txt -""" - -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import os -# Dependency imports -import numpy as np - -import tensorflow as tf - -tf.app.flags.DEFINE_string('input_ml_path', '/tmp', 'Model output directory.') -tf.app.flags.DEFINE_string('input_gan_path', '/tmp', 'Model output directory.') -tf.app.flags.DEFINE_string('output_file_name', '/tmp/ptb/shuffled_output.txt', - 'Model output file.') -tf.app.flags.DEFINE_boolean( - 'output_masked_logs', False, - 'Whether to display for human evaluation (show masking).') -tf.app.flags.DEFINE_integer('number_epochs', 1, - 'The number of epochs to produce.') - -FLAGS = tf.app.flags.FLAGS - - -def shuffle_samples(input_file_1, input_file_2): - """Shuffle the examples.""" - shuffled = [] - - # Set a random seed to keep fixed mask. - np.random.seed(0) - - for line_1, line_2 in zip(input_file_1, input_file_2): - rand = np.random.randint(1, 3) - if rand == 1: - shuffled.append((rand, line_1, line_2)) - else: - shuffled.append((rand, line_2, line_1)) - input_file_1.close() - input_file_2.close() - return shuffled - - -def generate_output(shuffled_tuples, output_file_name): - output_file = tf.gfile.GFile(output_file_name, mode='w') - - for tup in shuffled_tuples: - formatted_tuple = ('\n{:<1}, {:<1}, {:<1}').format(tup[0], tup[1].rstrip(), - tup[2].rstrip()) - output_file.write(formatted_tuple) - output_file.close() - - -def main(_): - ml_samples_file = tf.gfile.GFile( - os.path.join(FLAGS.input_ml_path, 'reviews.txt'), mode='r') - gan_samples_file = tf.gfile.GFile( - os.path.join(FLAGS.input_gan_path, 'reviews.txt'), mode='r') - - # Generate shuffled tuples. - shuffled_tuples = shuffle_samples(ml_samples_file, gan_samples_file) - - # Output to file. - generate_output(shuffled_tuples, FLAGS.output_file_name) - - -if __name__ == '__main__': - tf.app.run() diff --git a/research/maskgan/train_mask_gan.py b/research/maskgan/train_mask_gan.py deleted file mode 100644 index 1e70c2284..000000000 --- a/research/maskgan/train_mask_gan.py +++ /dev/null @@ -1,1167 +0,0 @@ -# Copyright 2017 The TensorFlow Authors All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -"""Launch example: - -[IMDB] -python train_mask_gan.py --data_dir -/tmp/imdb --data_set imdb --batch_size 128 ---sequence_length 20 --base_directory /tmp/maskGAN_v0.01 ---hparams="gen_rnn_size=650,gen_num_layers=2,dis_rnn_size=650,dis_num_layers=2 -,critic_learning_rate=0.0009756,dis_learning_rate=0.0000585, -dis_train_iterations=8,gen_learning_rate=0.0016624, -gen_full_learning_rate_steps=1e9,gen_learning_rate_decay=0.999999, -rl_discount_rate=0.8835659" --mode TRAIN --max_steps 1000000 ---generator_model seq2seq_vd --discriminator_model seq2seq_vd ---is_present_rate 0.5 --summaries_every 25 --print_every 25 - --max_num_to_print=3 --generator_optimizer=adam - --seq2seq_share_embedding=True --baseline_method=critic - --attention_option=luong --n_gram_eval=4 --mask_strategy=contiguous - --gen_training_strategy=reinforce --dis_pretrain_steps=100 - --perplexity_threshold=1000000 - --dis_share_embedding=True --maskgan_ckpt - /tmp/model.ckpt-171091 -""" - -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import collections - -from functools import partial -import os -import time -# Dependency imports - -import numpy as np -from six.moves import xrange -import tensorflow as tf - -import pretrain_mask_gan -from data import imdb_loader -from data import ptb_loader -from model_utils import helper -from model_utils import model_construction -from model_utils import model_losses -from model_utils import model_optimization - -# Data. -from model_utils import model_utils - -from model_utils import n_gram -from models import evaluation_utils - -from models import rollout - -np.set_printoptions(precision=3) -np.set_printoptions(suppress=True) - -MODE_TRAIN = 'TRAIN' -MODE_TRAIN_EVAL = 'TRAIN_EVAL' -MODE_VALIDATION = 'VALIDATION' -MODE_TEST = 'TEST' - -## Binary and setup FLAGS. -tf.app.flags.DEFINE_enum( - 'mode', 'TRAIN', [MODE_TRAIN, MODE_VALIDATION, MODE_TEST, MODE_TRAIN_EVAL], - 'What this binary will do.') -tf.app.flags.DEFINE_string('master', '', - """Name of the TensorFlow master to use.""") -tf.app.flags.DEFINE_string('eval_master', '', - """Name prefix of the Tensorflow eval master.""") -tf.app.flags.DEFINE_integer('task', 0, - """Task id of the replica running the training.""") -tf.app.flags.DEFINE_integer('ps_tasks', 0, """Number of tasks in the ps job. - If 0 no ps job is used.""") - -## General FLAGS. -tf.app.flags.DEFINE_string( - 'hparams', '', 'Comma separated list of name=value hyperparameter pairs.') -tf.app.flags.DEFINE_integer('batch_size', 20, 'The batch size.') -tf.app.flags.DEFINE_integer('vocab_size', 10000, 'The vocabulary size.') -tf.app.flags.DEFINE_integer('sequence_length', 20, 'The sequence length.') -tf.app.flags.DEFINE_integer('max_steps', 1000000, - 'Maximum number of steps to run.') -tf.app.flags.DEFINE_string( - 'mask_strategy', 'random', 'Strategy for masking the words. Determine the ' - 'characterisitics of how the words are dropped out. One of ' - "['contiguous', 'random'].") -tf.app.flags.DEFINE_float('is_present_rate', 0.5, - 'Percent of tokens present in the forward sequence.') -tf.app.flags.DEFINE_float('is_present_rate_decay', None, 'Decay rate for the ' - 'percent of words that are real (are present).') -tf.app.flags.DEFINE_string( - 'generator_model', 'seq2seq', - "Type of Generator model. One of ['rnn', 'seq2seq', 'seq2seq_zaremba'," - "'rnn_zaremba', 'rnn_nas', 'seq2seq_nas']") -tf.app.flags.DEFINE_string( - 'attention_option', None, - "Attention mechanism. One of [None, 'luong', 'bahdanau']") -tf.app.flags.DEFINE_string( - 'discriminator_model', 'bidirectional', - "Type of Discriminator model. One of ['cnn', 'rnn', 'bidirectional', " - "'rnn_zaremba', 'bidirectional_zaremba', 'rnn_nas', 'rnn_vd', 'seq2seq_vd']" -) -tf.app.flags.DEFINE_boolean('seq2seq_share_embedding', False, - 'Whether to share the ' - 'embeddings between the encoder and decoder.') -tf.app.flags.DEFINE_boolean( - 'dis_share_embedding', False, 'Whether to share the ' - 'embeddings between the generator and discriminator.') -tf.app.flags.DEFINE_boolean('dis_update_share_embedding', False, 'Whether the ' - 'discriminator should update the shared embedding.') -tf.app.flags.DEFINE_boolean('use_gen_mode', False, - 'Use the mode of the generator ' - 'to produce samples.') -tf.app.flags.DEFINE_boolean('critic_update_dis_vars', False, - 'Whether the critic ' - 'updates the discriminator variables.') - -## Training FLAGS. -tf.app.flags.DEFINE_string( - 'gen_training_strategy', 'reinforce', - "Method for training the Generator. One of ['cross_entropy', 'reinforce']") -tf.app.flags.DEFINE_string( - 'generator_optimizer', 'adam', - "Type of Generator optimizer. One of ['sgd', 'adam']") -tf.app.flags.DEFINE_float('grad_clipping', 10., 'Norm for gradient clipping.') -tf.app.flags.DEFINE_float('advantage_clipping', 5., 'Clipping for advantages.') -tf.app.flags.DEFINE_string( - 'baseline_method', None, - "Approach for baseline. One of ['critic', 'dis_batch', 'ema', None]") -tf.app.flags.DEFINE_float('perplexity_threshold', 15000, - 'Limit for perplexity before terminating job.') -tf.app.flags.DEFINE_float('zoneout_drop_prob', 0.1, - 'Probability for dropping parameter for zoneout.') -tf.app.flags.DEFINE_float('keep_prob', 0.5, - 'Probability for keeping parameter for dropout.') - -## Logging and evaluation FLAGS. -tf.app.flags.DEFINE_integer('print_every', 250, - 'Frequency to print and log the ' - 'outputs of the model.') -tf.app.flags.DEFINE_integer('max_num_to_print', 5, - 'Number of samples to log/print.') -tf.app.flags.DEFINE_boolean('print_verbose', False, 'Whether to print in full.') -tf.app.flags.DEFINE_integer('summaries_every', 100, - 'Frequency to compute summaries.') -tf.app.flags.DEFINE_boolean('eval_language_model', False, - 'Whether to evaluate on ' - 'all words as in language modeling.') -tf.app.flags.DEFINE_float('eval_interval_secs', 60, - 'Delay for evaluating model.') -tf.app.flags.DEFINE_integer( - 'n_gram_eval', 4, """The degree of the n-grams to use for evaluation.""") -tf.app.flags.DEFINE_integer( - 'epoch_size_override', None, - 'If an integer, this dictates the size of the epochs and will potentially ' - 'not iterate over all the data.') -tf.app.flags.DEFINE_integer('eval_epoch_size_override', None, - 'Number of evaluation steps.') - -## Directories and checkpoints. -tf.app.flags.DEFINE_string('base_directory', '/tmp/maskGAN_v0.00', - 'Base directory for the logging, events and graph.') -tf.app.flags.DEFINE_string('data_set', 'ptb', 'Data set to operate on. One of' - "['ptb', 'imdb']") -tf.app.flags.DEFINE_string('data_dir', '/tmp/data/ptb', - 'Directory for the training data.') -tf.app.flags.DEFINE_string( - 'language_model_ckpt_dir', None, - 'Directory storing checkpoints to initialize the model. Pretrained models' - 'are stored at /tmp/maskGAN/pretrained/') -tf.app.flags.DEFINE_string( - 'language_model_ckpt_dir_reversed', None, - 'Directory storing checkpoints of reversed models to initialize the model.' - 'Pretrained models stored at' - 'are stored at /tmp/PTB/pretrained_reversed') -tf.app.flags.DEFINE_string( - 'maskgan_ckpt', None, - 'Override which checkpoint file to use to restore the ' - 'model. A pretrained seq2seq_zaremba model is stored at ' - '/tmp/maskGAN/pretrain/seq2seq_zaremba/train/model.ckpt-64912') - -tf.app.flags.DEFINE_boolean('wasserstein_objective', False, - '(DEPRECATED) Whether to use the WGAN training.') -tf.app.flags.DEFINE_integer('num_rollouts', 1, - 'The number of rolled out predictions to make.') -tf.app.flags.DEFINE_float('c_lower', -0.01, 'Lower bound for weights.') -tf.app.flags.DEFINE_float('c_upper', 0.01, 'Upper bound for weights.') - -FLAGS = tf.app.flags.FLAGS - - -def create_hparams(): - """Create the hparams object for generic training hyperparameters.""" - hparams = tf.contrib.training.HParams( - gen_num_layers=2, - dis_num_layers=2, - gen_rnn_size=740, - dis_rnn_size=740, - gen_learning_rate=5e-4, - dis_learning_rate=5e-3, - critic_learning_rate=5e-3, - dis_train_iterations=1, - gen_learning_rate_decay=1.0, - gen_full_learning_rate_steps=1e7, - baseline_decay=0.999999, - rl_discount_rate=0.9, - gen_vd_keep_prob=0.5, - dis_vd_keep_prob=0.5, - dis_pretrain_learning_rate=5e-3, - dis_num_filters=128, - dis_hidden_dim=128, - gen_nas_keep_prob_0=0.85, - gen_nas_keep_prob_1=0.55, - dis_nas_keep_prob_0=0.85, - dis_nas_keep_prob_1=0.55) - # Command line flags override any of the preceding hyperparameter values. - if FLAGS.hparams: - hparams = hparams.parse(FLAGS.hparams) - return hparams - - -def create_MaskGAN(hparams, is_training): - """Create the MaskGAN model. - - Args: - hparams: Hyperparameters for the MaskGAN. - is_training: Boolean indicating operational mode (train/inference). - evaluated with a teacher forcing regime. - - Return: - model: Namedtuple for specifying the MaskGAN. - """ - global_step = tf.Variable(0, name='global_step', trainable=False) - - new_learning_rate = tf.placeholder(tf.float32, [], name='new_learning_rate') - learning_rate = tf.Variable(0.0, name='learning_rate', trainable=False) - learning_rate_update = tf.assign(learning_rate, new_learning_rate) - - new_rate = tf.placeholder(tf.float32, [], name='new_rate') - percent_real_var = tf.Variable(0.0, trainable=False) - percent_real_update = tf.assign(percent_real_var, new_rate) - - ## Placeholders. - inputs = tf.placeholder( - tf.int32, shape=[FLAGS.batch_size, FLAGS.sequence_length]) - targets = tf.placeholder( - tf.int32, shape=[FLAGS.batch_size, FLAGS.sequence_length]) - present = tf.placeholder( - tf.bool, shape=[FLAGS.batch_size, FLAGS.sequence_length]) - # TODO(adai): Placeholder for IMDB label. - - ## Real Sequence is the targets. - real_sequence = targets - - ## Fakse Sequence from the Generator. - # TODO(adai): Generator must have IMDB labels placeholder. - (fake_sequence, fake_logits, fake_log_probs, fake_gen_initial_state, - fake_gen_final_state, _) = model_construction.create_generator( - hparams, - inputs, - targets, - present, - is_training=is_training, - is_validating=False) - (_, eval_logits, _, eval_initial_state, eval_final_state, - _) = model_construction.create_generator( - hparams, - inputs, - targets, - present, - is_training=False, - is_validating=True, - reuse=True) - - ## Discriminator. - fake_predictions = model_construction.create_discriminator( - hparams, - fake_sequence, - is_training=is_training, - inputs=inputs, - present=present) - real_predictions = model_construction.create_discriminator( - hparams, - real_sequence, - is_training=is_training, - reuse=True, - inputs=inputs, - present=present) - - ## Critic. - # The critic will be used to estimate the forward rewards to the Generator. - if FLAGS.baseline_method == 'critic': - est_state_values = model_construction.create_critic( - hparams, fake_sequence, is_training=is_training) - else: - est_state_values = None - - ## Discriminator Loss. - [dis_loss, dis_loss_fake, dis_loss_real] = model_losses.create_dis_loss( - fake_predictions, real_predictions, present) - - ## Average log-perplexity for only missing words. However, to do this, - # the logits are still computed using teacher forcing, that is, the ground - # truth tokens are fed in at each time point to be valid. - avg_log_perplexity = model_losses.calculate_log_perplexity( - eval_logits, targets, present) - - ## Generator Objective. - # 1. Cross Entropy losses on missing tokens. - fake_cross_entropy_losses = model_losses.create_masked_cross_entropy_loss( - targets, present, fake_logits) - - # 2. GAN REINFORCE losses. - [ - fake_RL_loss, fake_log_probs, fake_rewards, fake_advantages, - fake_baselines, fake_averages_op, critic_loss, cumulative_rewards - ] = model_losses.calculate_reinforce_objective( - hparams, fake_log_probs, fake_predictions, present, est_state_values) - - ## Pre-training. - if FLAGS.gen_pretrain_steps: - raise NotImplementedError - # # TODO(liamfedus): Rewrite this. - # fwd_cross_entropy_loss = tf.reduce_mean(fwd_cross_entropy_losses) - # gen_pretrain_op = model_optimization.create_gen_pretrain_op( - # hparams, fwd_cross_entropy_loss, global_step) - else: - gen_pretrain_op = None - if FLAGS.dis_pretrain_steps: - dis_pretrain_op = model_optimization.create_dis_pretrain_op( - hparams, dis_loss, global_step) - else: - dis_pretrain_op = None - - ## Generator Train Op. - # 1. Cross-Entropy. - if FLAGS.gen_training_strategy == 'cross_entropy': - gen_loss = tf.reduce_mean(fake_cross_entropy_losses) - [gen_train_op, gen_grads, - gen_vars] = model_optimization.create_gen_train_op( - hparams, learning_rate, gen_loss, global_step, mode='MINIMIZE') - - # 2. GAN (REINFORCE) - elif FLAGS.gen_training_strategy == 'reinforce': - gen_loss = fake_RL_loss - [gen_train_op, gen_grads, - gen_vars] = model_optimization.create_reinforce_gen_train_op( - hparams, learning_rate, gen_loss, fake_averages_op, global_step) - - else: - raise NotImplementedError - - ## Discriminator Train Op. - dis_train_op, dis_grads, dis_vars = model_optimization.create_dis_train_op( - hparams, dis_loss, global_step) - - ## Critic Train Op. - if critic_loss is not None: - [critic_train_op, _, _] = model_optimization.create_critic_train_op( - hparams, critic_loss, global_step) - dis_train_op = tf.group(dis_train_op, critic_train_op) - - ## Summaries. - with tf.name_scope('general'): - tf.summary.scalar('percent_real', percent_real_var) - tf.summary.scalar('learning_rate', learning_rate) - - with tf.name_scope('generator_objectives'): - tf.summary.scalar('gen_objective', tf.reduce_mean(gen_loss)) - tf.summary.scalar('gen_loss_cross_entropy', - tf.reduce_mean(fake_cross_entropy_losses)) - - with tf.name_scope('REINFORCE'): - with tf.name_scope('objective'): - tf.summary.scalar('fake_RL_loss', tf.reduce_mean(fake_RL_loss)) - - with tf.name_scope('rewards'): - helper.variable_summaries(cumulative_rewards, 'rewards') - - with tf.name_scope('advantages'): - helper.variable_summaries(fake_advantages, 'advantages') - - with tf.name_scope('baselines'): - helper.variable_summaries(fake_baselines, 'baselines') - - with tf.name_scope('log_probs'): - helper.variable_summaries(fake_log_probs, 'log_probs') - - with tf.name_scope('discriminator_losses'): - tf.summary.scalar('dis_loss', dis_loss) - tf.summary.scalar('dis_loss_fake_sequence', dis_loss_fake) - tf.summary.scalar('dis_loss_prob_fake_sequence', tf.exp(-dis_loss_fake)) - tf.summary.scalar('dis_loss_real_sequence', dis_loss_real) - tf.summary.scalar('dis_loss_prob_real_sequence', tf.exp(-dis_loss_real)) - - if critic_loss is not None: - with tf.name_scope('critic_losses'): - tf.summary.scalar('critic_loss', critic_loss) - - with tf.name_scope('logits'): - helper.variable_summaries(fake_logits, 'fake_logits') - - for v, g in zip(gen_vars, gen_grads): - helper.variable_summaries(v, v.op.name) - helper.variable_summaries(g, 'grad/' + v.op.name) - - for v, g in zip(dis_vars, dis_grads): - helper.variable_summaries(v, v.op.name) - helper.variable_summaries(g, 'grad/' + v.op.name) - - merge_summaries_op = tf.summary.merge_all() - text_summary_placeholder = tf.placeholder(tf.string) - text_summary_op = tf.summary.text('Samples', text_summary_placeholder) - - # Model saver. - saver = tf.train.Saver(keep_checkpoint_every_n_hours=1, max_to_keep=5) - - # Named tuple that captures elements of the MaskGAN model. - Model = collections.namedtuple('Model', [ - 'inputs', 'targets', 'present', 'percent_real_update', 'new_rate', - 'fake_sequence', 'fake_logits', 'fake_rewards', 'fake_baselines', - 'fake_advantages', 'fake_log_probs', 'fake_predictions', - 'real_predictions', 'fake_cross_entropy_losses', 'fake_gen_initial_state', - 'fake_gen_final_state', 'eval_initial_state', 'eval_final_state', - 'avg_log_perplexity', 'dis_loss', 'gen_loss', 'critic_loss', - 'cumulative_rewards', 'dis_train_op', 'gen_train_op', 'gen_pretrain_op', - 'dis_pretrain_op', 'merge_summaries_op', 'global_step', - 'new_learning_rate', 'learning_rate_update', 'saver', 'text_summary_op', - 'text_summary_placeholder' - ]) - - model = Model( - inputs, targets, present, percent_real_update, new_rate, fake_sequence, - fake_logits, fake_rewards, fake_baselines, fake_advantages, - fake_log_probs, fake_predictions, real_predictions, - fake_cross_entropy_losses, fake_gen_initial_state, fake_gen_final_state, - eval_initial_state, eval_final_state, avg_log_perplexity, dis_loss, - gen_loss, critic_loss, cumulative_rewards, dis_train_op, gen_train_op, - gen_pretrain_op, dis_pretrain_op, merge_summaries_op, global_step, - new_learning_rate, learning_rate_update, saver, text_summary_op, - text_summary_placeholder) - return model - - -def compute_geometric_average(percent_captured): - """Compute the geometric average of the n-gram metrics.""" - - res = 1. - for _, n_gram_percent in percent_captured.iteritems(): - res *= n_gram_percent - - return np.power(res, 1. / float(len(percent_captured))) - - -def compute_arithmetic_average(percent_captured): - """Compute the arithmetic average of the n-gram metrics.""" - N = len(percent_captured) - - res = 0. - for _, n_gram_percent in percent_captured.iteritems(): - res += n_gram_percent - - return res / float(N) - - -def get_iterator(data): - """Return the data iterator.""" - if FLAGS.data_set == 'ptb': - iterator = ptb_loader.ptb_iterator(data, FLAGS.batch_size, - FLAGS.sequence_length, - FLAGS.epoch_size_override) - elif FLAGS.data_set == 'imdb': - iterator = imdb_loader.imdb_iterator(data, FLAGS.batch_size, - FLAGS.sequence_length) - return iterator - - -def train_model(hparams, data, log_dir, log, id_to_word, data_ngram_counts): - """Train model. - - Args: - hparams: Hyperparameters for the MaskGAN. - data: Data to evaluate. - log_dir: Directory to save checkpoints. - log: Readable log for the experiment. - id_to_word: Dictionary of indices to words. - data_ngram_counts: Dictionary of hashed(n-gram tuples) to counts in the - data_set. - """ - print('Training model.') - tf.logging.info('Training model.') - - # Boolean indicating operational mode. - is_training = True - - # Write all the information to the logs. - log.write('hparams\n') - log.write(str(hparams)) - log.flush() - - is_chief = FLAGS.task == 0 - - with tf.Graph().as_default(): - with tf.device(tf.train.replica_device_setter(FLAGS.ps_tasks)): - container_name = '' - with tf.container(container_name): - # Construct the model. - if FLAGS.num_rollouts == 1: - model = create_MaskGAN(hparams, is_training) - elif FLAGS.num_rollouts > 1: - model = rollout.create_rollout_MaskGAN(hparams, is_training) - else: - raise ValueError - - print('\nTrainable Variables in Graph:') - for v in tf.trainable_variables(): - print(v) - - ## Retrieve the initial savers. - init_savers = model_utils.retrieve_init_savers(hparams) - - ## Initial saver function to supervisor. - init_fn = partial(model_utils.init_fn, init_savers) - - # Create the supervisor. It will take care of initialization, - # summaries, checkpoints, and recovery. - sv = tf.train.Supervisor( - logdir=log_dir, - is_chief=is_chief, - saver=model.saver, - global_step=model.global_step, - save_model_secs=60, - recovery_wait_secs=30, - summary_op=None, - init_fn=init_fn) - - # Get an initialized, and possibly recovered session. Launch the - # services: Checkpointing, Summaries, step counting. - # - # When multiple replicas of this program are running the services are - # only launched by the 'chief' replica. - with sv.managed_session(FLAGS.master) as sess: - - ## Pretrain the generator. - if FLAGS.gen_pretrain_steps: - pretrain_mask_gan.pretrain_generator(sv, sess, model, data, log, - id_to_word, data_ngram_counts, - is_chief) - - ## Pretrain the discriminator. - if FLAGS.dis_pretrain_steps: - pretrain_mask_gan.pretrain_discriminator( - sv, sess, model, data, log, id_to_word, data_ngram_counts, - is_chief) - - # Initial indicators for printing and summarizing. - print_step_division = -1 - summary_step_division = -1 - - # Run iterative computation in a loop. - while not sv.ShouldStop(): - is_present_rate = FLAGS.is_present_rate - - if FLAGS.is_present_rate_decay is not None: - is_present_rate *= (1. - FLAGS.is_present_rate_decay) - - model_utils.assign_percent_real(sess, model.percent_real_update, - model.new_rate, is_present_rate) - - # GAN training. - avg_epoch_gen_loss, avg_epoch_dis_loss = [], [] - cumulative_costs = 0. - gen_iters = 0 - - # Generator and Discriminator statefulness initial evaluation. - # TODO(liamfedus): Throughout the code I am implicitly assuming - # that the Generator and Discriminator are equal sized. - [gen_initial_state_eval, fake_gen_initial_state_eval] = sess.run( - [model.eval_initial_state, model.fake_gen_initial_state]) - dis_initial_state_eval = fake_gen_initial_state_eval - - # Save zeros state to reset later. - zeros_state = fake_gen_initial_state_eval - - ## Offset Discriminator. - if FLAGS.ps_tasks == 0: - dis_offset = 1 - else: - dis_offset = FLAGS.task * 1000 + 1 - dis_iterator = get_iterator(data) - - for i in range(dis_offset): - try: - dis_x, dis_y, _ = next(dis_iterator) - except StopIteration: - dis_iterator = get_iterator(data) - dis_initial_state_eval = zeros_state - dis_x, dis_y, _ = next(dis_iterator) - - p = model_utils.generate_mask() - - # Construct the train feed. - train_feed = { - model.inputs: dis_x, - model.targets: dis_y, - model.present: p - } - - if FLAGS.data_set == 'ptb': - # Statefulness of the Generator being used for Discriminator. - for i, (c, h) in enumerate(model.fake_gen_initial_state): - train_feed[c] = dis_initial_state_eval[i].c - train_feed[h] = dis_initial_state_eval[i].h - - # Determine the state had the Generator run over real data. We - # use this state for the Discriminator. - [dis_initial_state_eval] = sess.run( - [model.fake_gen_final_state], train_feed) - - ## Training loop. - iterator = get_iterator(data) - gen_initial_state_eval = zeros_state - - if FLAGS.ps_tasks > 0: - gen_offset = FLAGS.task * 1000 + 1 - for i in range(gen_offset): - try: - next(iterator) - except StopIteration: - dis_iterator = get_iterator(data) - dis_initial_state_eval = zeros_state - next(dis_iterator) - - for x, y, _ in iterator: - for _ in xrange(hparams.dis_train_iterations): - try: - dis_x, dis_y, _ = next(dis_iterator) - except StopIteration: - dis_iterator = get_iterator(data) - dis_initial_state_eval = zeros_state - dis_x, dis_y, _ = next(dis_iterator) - - if FLAGS.data_set == 'ptb': - [dis_initial_state_eval] = sess.run( - [model.fake_gen_initial_state]) - - p = model_utils.generate_mask() - - # Construct the train feed. - train_feed = { - model.inputs: dis_x, - model.targets: dis_y, - model.present: p - } - - # Statefulness for the Discriminator. - if FLAGS.data_set == 'ptb': - for i, (c, h) in enumerate(model.fake_gen_initial_state): - train_feed[c] = dis_initial_state_eval[i].c - train_feed[h] = dis_initial_state_eval[i].h - - _, dis_loss_eval, step = sess.run( - [model.dis_train_op, model.dis_loss, model.global_step], - feed_dict=train_feed) - - # Determine the state had the Generator run over real data. - # Use this state for the Discriminator. - [dis_initial_state_eval] = sess.run( - [model.fake_gen_final_state], train_feed) - - # Randomly mask out tokens. - p = model_utils.generate_mask() - - # Construct the train feed. - train_feed = {model.inputs: x, model.targets: y, model.present: p} - - # Statefulness for Generator. - if FLAGS.data_set == 'ptb': - tf.logging.info('Generator is stateful.') - print('Generator is stateful.') - # Statefulness for *evaluation* Generator. - for i, (c, h) in enumerate(model.eval_initial_state): - train_feed[c] = gen_initial_state_eval[i].c - train_feed[h] = gen_initial_state_eval[i].h - - # Statefulness for Generator. - for i, (c, h) in enumerate(model.fake_gen_initial_state): - train_feed[c] = fake_gen_initial_state_eval[i].c - train_feed[h] = fake_gen_initial_state_eval[i].h - - # Determine whether to decay learning rate. - lr_decay = hparams.gen_learning_rate_decay**max( - step + 1 - hparams.gen_full_learning_rate_steps, 0.0) - - # Assign learning rate. - gen_learning_rate = hparams.gen_learning_rate * lr_decay - model_utils.assign_learning_rate(sess, model.learning_rate_update, - model.new_learning_rate, - gen_learning_rate) - - [_, gen_loss_eval, gen_log_perplexity_eval, step] = sess.run( - [ - model.gen_train_op, model.gen_loss, - model.avg_log_perplexity, model.global_step - ], - feed_dict=train_feed) - - cumulative_costs += gen_log_perplexity_eval - gen_iters += 1 - - # Determine the state had the Generator run over real data. - [gen_initial_state_eval, fake_gen_initial_state_eval] = sess.run( - [model.eval_final_state, - model.fake_gen_final_state], train_feed) - - avg_epoch_dis_loss.append(dis_loss_eval) - avg_epoch_gen_loss.append(gen_loss_eval) - - ## Summaries. - # Calulate rolling perplexity. - perplexity = np.exp(cumulative_costs / gen_iters) - - if is_chief and (step / FLAGS.summaries_every > - summary_step_division): - summary_step_division = step / FLAGS.summaries_every - - # Confirm perplexity is not infinite. - if (not np.isfinite(perplexity) or - perplexity >= FLAGS.perplexity_threshold): - print('Training raising FloatingPoinError.') - raise FloatingPointError( - 'Training infinite perplexity: %.3f' % perplexity) - - # Graph summaries. - summary_str = sess.run( - model.merge_summaries_op, feed_dict=train_feed) - sv.SummaryComputed(sess, summary_str) - - # Summary: n-gram - avg_percent_captured = {'2': 0., '3': 0., '4': 0.} - for n, data_ngram_count in data_ngram_counts.iteritems(): - batch_percent_captured = evaluation_utils.sequence_ngram_evaluation( - sess, model.fake_sequence, log, train_feed, - data_ngram_count, int(n)) - summary_percent_str = tf.Summary(value=[ - tf.Summary.Value( - tag='general/%s-grams_percent_correct' % n, - simple_value=batch_percent_captured) - ]) - sv.SummaryComputed( - sess, summary_percent_str, global_step=step) - - # Summary: geometric_avg - geometric_avg = compute_geometric_average(avg_percent_captured) - summary_geometric_avg_str = tf.Summary(value=[ - tf.Summary.Value( - tag='general/geometric_avg', simple_value=geometric_avg) - ]) - sv.SummaryComputed( - sess, summary_geometric_avg_str, global_step=step) - - # Summary: arithmetic_avg - arithmetic_avg = compute_arithmetic_average( - avg_percent_captured) - summary_arithmetic_avg_str = tf.Summary(value=[ - tf.Summary.Value( - tag='general/arithmetic_avg', - simple_value=arithmetic_avg) - ]) - sv.SummaryComputed( - sess, summary_arithmetic_avg_str, global_step=step) - - # Summary: perplexity - summary_perplexity_str = tf.Summary(value=[ - tf.Summary.Value( - tag='general/perplexity', simple_value=perplexity) - ]) - sv.SummaryComputed( - sess, summary_perplexity_str, global_step=step) - - ## Printing and logging - if is_chief and (step / FLAGS.print_every > print_step_division): - print_step_division = (step / FLAGS.print_every) - print('global_step: %d' % step) - print(' perplexity: %.3f' % perplexity) - print(' gen_learning_rate: %.6f' % gen_learning_rate) - log.write('global_step: %d\n' % step) - log.write(' perplexity: %.3f\n' % perplexity) - log.write(' gen_learning_rate: %.6f' % gen_learning_rate) - - # Average percent captured for each of the n-grams. - avg_percent_captured = {'2': 0., '3': 0., '4': 0.} - for n, data_ngram_count in data_ngram_counts.iteritems(): - batch_percent_captured = evaluation_utils.sequence_ngram_evaluation( - sess, model.fake_sequence, log, train_feed, - data_ngram_count, int(n)) - avg_percent_captured[n] = batch_percent_captured - print(' percent of %s-grams captured: %.3f.' % - (n, batch_percent_captured)) - log.write(' percent of %s-grams captured: %.3f.\n' % - (n, batch_percent_captured)) - geometric_avg = compute_geometric_average(avg_percent_captured) - print(' geometric_avg: %.3f.' % geometric_avg) - log.write(' geometric_avg: %.3f.' % geometric_avg) - arithmetic_avg = compute_arithmetic_average( - avg_percent_captured) - print(' arithmetic_avg: %.3f.' % arithmetic_avg) - log.write(' arithmetic_avg: %.3f.' % arithmetic_avg) - - evaluation_utils.print_and_log_losses( - log, step, is_present_rate, avg_epoch_dis_loss, - avg_epoch_gen_loss) - - if FLAGS.gen_training_strategy == 'reinforce': - evaluation_utils.generate_RL_logs(sess, model, log, - id_to_word, train_feed) - else: - evaluation_utils.generate_logs(sess, model, log, id_to_word, - train_feed) - log.flush() - - log.close() - - -def evaluate_once(data, sv, model, sess, train_dir, log, id_to_word, - data_ngram_counts, eval_saver): - """Evaluate model for a number of steps. - - Args: - data: Dataset. - sv: Supervisor. - model: The GAN model we have just built. - sess: A session to use. - train_dir: Path to a directory containing checkpoints. - log: Evaluation log for evaluation. - id_to_word: Dictionary of indices to words. - data_ngram_counts: Dictionary of hashed(n-gram tuples) to counts in the - data_set. - eval_saver: Evaluation saver.r. - """ - tf.logging.info('Evaluate Once.') - # Load the last model checkpoint, or initialize the graph. - model_save_path = tf.latest_checkpoint(train_dir) - if not model_save_path: - tf.logging.warning('No checkpoint yet in: %s', train_dir) - return - - tf.logging.info('Starting eval of: %s' % model_save_path) - tf.logging.info('Only restoring trainable variables.') - eval_saver.restore(sess, model_save_path) - - # Run the requested number of evaluation steps - avg_epoch_gen_loss, avg_epoch_dis_loss = [], [] - cumulative_costs = 0. - - # Average percent captured for each of the n-grams. - avg_percent_captured = {'2': 0., '3': 0., '4': 0.} - - # Set a random seed to keep fixed mask. - np.random.seed(0) - gen_iters = 0 - - # Generator statefulness over the epoch. - # TODO(liamfedus): Check this. - [gen_initial_state_eval, fake_gen_initial_state_eval] = sess.run( - [model.eval_initial_state, model.fake_gen_initial_state]) - - if FLAGS.eval_language_model: - is_present_rate = 0. - tf.logging.info('Overriding is_present_rate=0. for evaluation.') - print('Overriding is_present_rate=0. for evaluation.') - - iterator = get_iterator(data) - - for x, y, _ in iterator: - if FLAGS.eval_language_model: - is_present_rate = 0. - else: - is_present_rate = FLAGS.is_present_rate - tf.logging.info('Evaluating on is_present_rate=%.3f.' % is_present_rate) - - model_utils.assign_percent_real(sess, model.percent_real_update, - model.new_rate, is_present_rate) - - # Randomly mask out tokens. - p = model_utils.generate_mask() - - eval_feed = {model.inputs: x, model.targets: y, model.present: p} - - if FLAGS.data_set == 'ptb': - # Statefulness for *evaluation* Generator. - for i, (c, h) in enumerate(model.eval_initial_state): - eval_feed[c] = gen_initial_state_eval[i].c - eval_feed[h] = gen_initial_state_eval[i].h - - # Statefulness for the Generator. - for i, (c, h) in enumerate(model.fake_gen_initial_state): - eval_feed[c] = fake_gen_initial_state_eval[i].c - eval_feed[h] = fake_gen_initial_state_eval[i].h - - [ - gen_log_perplexity_eval, dis_loss_eval, gen_loss_eval, - gen_initial_state_eval, fake_gen_initial_state_eval, step - ] = sess.run( - [ - model.avg_log_perplexity, model.dis_loss, model.gen_loss, - model.eval_final_state, model.fake_gen_final_state, - model.global_step - ], - feed_dict=eval_feed) - - for n, data_ngram_count in data_ngram_counts.iteritems(): - batch_percent_captured = evaluation_utils.sequence_ngram_evaluation( - sess, model.fake_sequence, log, eval_feed, data_ngram_count, int(n)) - avg_percent_captured[n] += batch_percent_captured - - cumulative_costs += gen_log_perplexity_eval - - avg_epoch_dis_loss.append(dis_loss_eval) - avg_epoch_gen_loss.append(gen_loss_eval) - - gen_iters += 1 - - # Calulate rolling metrics. - perplexity = np.exp(cumulative_costs / gen_iters) - for n, _ in avg_percent_captured.iteritems(): - avg_percent_captured[n] /= gen_iters - - # Confirm perplexity is not infinite. - if not np.isfinite(perplexity) or perplexity >= FLAGS.perplexity_threshold: - print('Evaluation raising FloatingPointError.') - raise FloatingPointError( - 'Evaluation infinite perplexity: %.3f' % perplexity) - - ## Printing and logging. - evaluation_utils.print_and_log_losses(log, step, is_present_rate, - avg_epoch_dis_loss, avg_epoch_gen_loss) - print(' perplexity: %.3f' % perplexity) - log.write(' perplexity: %.3f\n' % perplexity) - - for n, n_gram_percent in avg_percent_captured.iteritems(): - n = int(n) - print(' percent of %d-grams captured: %.3f.' % (n, n_gram_percent)) - log.write(' percent of %d-grams captured: %.3f.\n' % (n, n_gram_percent)) - - samples = evaluation_utils.generate_logs(sess, model, log, id_to_word, - eval_feed) - - ## Summaries. - summary_str = sess.run(model.merge_summaries_op, feed_dict=eval_feed) - sv.SummaryComputed(sess, summary_str) - - # Summary: text - summary_str = sess.run(model.text_summary_op, - {model.text_summary_placeholder: '\n\n'.join(samples)}) - sv.SummaryComputed(sess, summary_str, global_step=step) - - # Summary: n-gram - for n, n_gram_percent in avg_percent_captured.iteritems(): - n = int(n) - summary_percent_str = tf.Summary(value=[ - tf.Summary.Value( - tag='general/%d-grams_percent_correct' % n, - simple_value=n_gram_percent) - ]) - sv.SummaryComputed(sess, summary_percent_str, global_step=step) - - # Summary: geometric_avg - geometric_avg = compute_geometric_average(avg_percent_captured) - summary_geometric_avg_str = tf.Summary(value=[ - tf.Summary.Value(tag='general/geometric_avg', simple_value=geometric_avg) - ]) - sv.SummaryComputed(sess, summary_geometric_avg_str, global_step=step) - - # Summary: arithmetic_avg - arithmetic_avg = compute_arithmetic_average(avg_percent_captured) - summary_arithmetic_avg_str = tf.Summary(value=[ - tf.Summary.Value( - tag='general/arithmetic_avg', simple_value=arithmetic_avg) - ]) - sv.SummaryComputed(sess, summary_arithmetic_avg_str, global_step=step) - - # Summary: perplexity - summary_perplexity_str = tf.Summary(value=[ - tf.Summary.Value(tag='general/perplexity', simple_value=perplexity) - ]) - sv.SummaryComputed(sess, summary_perplexity_str, global_step=step) - - -def evaluate_model(hparams, data, train_dir, log, id_to_word, - data_ngram_counts): - """Evaluate MaskGAN model. - - Args: - hparams: Hyperparameters for the MaskGAN. - data: Data to evaluate. - train_dir: Path to a directory containing checkpoints. - id_to_word: Dictionary of indices to words. - data_ngram_counts: Dictionary of hashed(n-gram tuples) to counts in the - data_set. - """ - tf.logging.error('Evaluate model.') - - # Boolean indicating operational mode. - is_training = False - - if FLAGS.mode == MODE_VALIDATION: - logdir = FLAGS.base_directory + '/validation' - elif FLAGS.mode == MODE_TRAIN_EVAL: - logdir = FLAGS.base_directory + '/train_eval' - elif FLAGS.mode == MODE_TEST: - logdir = FLAGS.base_directory + '/test' - else: - raise NotImplementedError - - # Wait for a checkpoint to exist. - print(train_dir) - print(tf.train.latest_checkpoint(train_dir)) - while not tf.train.latest_checkpoint(train_dir): - tf.logging.error('Waiting for checkpoint...') - print('Waiting for checkpoint...') - time.sleep(10) - - with tf.Graph().as_default(): - # Use a separate container for each trial - container_name = '' - with tf.container(container_name): - - # Construct the model. - if FLAGS.num_rollouts == 1: - model = create_MaskGAN(hparams, is_training) - elif FLAGS.num_rollouts > 1: - model = rollout.create_rollout_MaskGAN(hparams, is_training) - else: - raise ValueError - - # Create the supervisor. It will take care of initialization, summaries, - # checkpoints, and recovery. We only pass the trainable variables - # to load since things like baselines keep batch_size which may not - # match between training and evaluation. - evaluation_variables = tf.trainable_variables() - evaluation_variables.append(model.global_step) - eval_saver = tf.train.Saver(var_list=evaluation_variables) - sv = tf.Supervisor(logdir=logdir) - sess = sv.PrepareSession(FLAGS.eval_master, start_standard_services=False) - - tf.logging.info('Before sv.Loop.') - sv.Loop(FLAGS.eval_interval_secs, evaluate_once, - (data, sv, model, sess, train_dir, log, id_to_word, - data_ngram_counts, eval_saver)) - - sv.WaitForStop() - tf.logging.info('sv.Stop().') - sv.Stop() - - -def main(_): - hparams = create_hparams() - train_dir = FLAGS.base_directory + '/train' - - # Load data set. - if FLAGS.data_set == 'ptb': - raw_data = ptb_loader.ptb_raw_data(FLAGS.data_dir) - train_data, valid_data, test_data, _ = raw_data - valid_data_flat = valid_data - elif FLAGS.data_set == 'imdb': - raw_data = imdb_loader.imdb_raw_data(FLAGS.data_dir) - # TODO(liamfedus): Get an IMDB test partition. - train_data, valid_data = raw_data - valid_data_flat = [word for review in valid_data for word in review] - else: - raise NotImplementedError - - if FLAGS.mode == MODE_TRAIN or FLAGS.mode == MODE_TRAIN_EVAL: - data_set = train_data - elif FLAGS.mode == MODE_VALIDATION: - data_set = valid_data - elif FLAGS.mode == MODE_TEST: - data_set = test_data - else: - raise NotImplementedError - - # Dictionary and reverse dictionry. - if FLAGS.data_set == 'ptb': - word_to_id = ptb_loader.build_vocab( - os.path.join(FLAGS.data_dir, 'ptb.train.txt')) - elif FLAGS.data_set == 'imdb': - word_to_id = imdb_loader.build_vocab( - os.path.join(FLAGS.data_dir, 'vocab.txt')) - id_to_word = {v: k for k, v in word_to_id.iteritems()} - - # Dictionary of Training Set n-gram counts. - bigram_tuples = n_gram.find_all_ngrams(valid_data_flat, n=2) - trigram_tuples = n_gram.find_all_ngrams(valid_data_flat, n=3) - fourgram_tuples = n_gram.find_all_ngrams(valid_data_flat, n=4) - - bigram_counts = n_gram.construct_ngrams_dict(bigram_tuples) - trigram_counts = n_gram.construct_ngrams_dict(trigram_tuples) - fourgram_counts = n_gram.construct_ngrams_dict(fourgram_tuples) - print('Unique %d-grams: %d' % (2, len(bigram_counts))) - print('Unique %d-grams: %d' % (3, len(trigram_counts))) - print('Unique %d-grams: %d' % (4, len(fourgram_counts))) - - data_ngram_counts = { - '2': bigram_counts, - '3': trigram_counts, - '4': fourgram_counts - } - - # TODO(liamfedus): This was necessary because there was a problem with our - # originally trained IMDB models. The EOS_INDEX was off by one, which means, - # two words were mapping to index 86933. The presence of '' is going - # to throw and out of vocabulary error. - FLAGS.vocab_size = len(id_to_word) - print('Vocab size: %d' % FLAGS.vocab_size) - - tf.gfile.MakeDirs(FLAGS.base_directory) - - if FLAGS.mode == MODE_TRAIN: - log = tf.gfile.GFile( - os.path.join(FLAGS.base_directory, 'train-log.txt'), mode='w') - elif FLAGS.mode == MODE_VALIDATION: - log = tf.gfile.GFile( - os.path.join(FLAGS.base_directory, 'validation-log.txt'), mode='w') - elif FLAGS.mode == MODE_TRAIN_EVAL: - log = tf.gfile.GFile( - os.path.join(FLAGS.base_directory, 'train_eval-log.txt'), mode='w') - else: - log = tf.gfile.GFile( - os.path.join(FLAGS.base_directory, 'test-log.txt'), mode='w') - - if FLAGS.mode == MODE_TRAIN: - train_model(hparams, data_set, train_dir, log, id_to_word, - data_ngram_counts) - - elif FLAGS.mode == MODE_VALIDATION: - evaluate_model(hparams, data_set, train_dir, log, id_to_word, - data_ngram_counts) - elif FLAGS.mode == MODE_TRAIN_EVAL: - evaluate_model(hparams, data_set, train_dir, log, id_to_word, - data_ngram_counts) - - elif FLAGS.mode == MODE_TEST: - evaluate_model(hparams, data_set, train_dir, log, id_to_word, - data_ngram_counts) - - else: - raise NotImplementedError - - -if __name__ == '__main__': - tf.app.run() diff --git a/research/namignizer/.gitignore b/research/namignizer/.gitignore deleted file mode 100644 index 2dae80435..000000000 --- a/research/namignizer/.gitignore +++ /dev/null @@ -1,6 +0,0 @@ -# Remove the pyc files -*.pyc - -# Ignore the model and the data -model/ -data/ diff --git a/research/namignizer/README.md b/research/namignizer/README.md deleted file mode 100644 index 475a08754..000000000 --- a/research/namignizer/README.md +++ /dev/null @@ -1,86 +0,0 @@ -![No Maintenance Intended](https://img.shields.io/badge/No%20Maintenance%20Intended-%E2%9C%95-red.svg) -![TensorFlow Requirement: 1.x](https://img.shields.io/badge/TensorFlow%20Requirement-1.x-brightgreen) -![TensorFlow 2 Not Supported](https://img.shields.io/badge/TensorFlow%202%20Not%20Supported-%E2%9C%95-red.svg) - -# Namignizer - -Use a variation of the [PTB](https://www.tensorflow.org/versions/r0.8/tutorials/recurrent/index.html#recurrent-neural-networks) model to recognize and generate names using the [Kaggle Baby Name Database](https://www.kaggle.com/kaggle/us-baby-names). - -### API -Namignizer is implemented in Tensorflow 0.8r and uses the python package `pandas` for some data processing. - -#### How to use -Download the data from Kaggle and place it in your data directory (or use the small training data provided). The example data looks like so: - -``` -Id,Name,Year,Gender,Count -1,Mary,1880,F,7065 -2,Anna,1880,F,2604 -3,Emma,1880,F,2003 -4,Elizabeth,1880,F,1939 -5,Minnie,1880,F,1746 -6,Margaret,1880,F,1578 -7,Ida,1880,F,1472 -8,Alice,1880,F,1414 -9,Bertha,1880,F,1320 -``` - -But any data with the two columns: `Name` and `Count` will work. - -With the data, we can then train the model: - -```python -train("data/SmallNames.txt", "model/namignizer", SmallConfig) -``` - -And you will get the output: - -``` -Reading Name data in data/SmallNames.txt -Epoch: 1 Learning rate: 1.000 -0.090 perplexity: 18.539 speed: 282 lps -... -0.890 perplexity: 1.478 speed: 285 lps -0.990 perplexity: 1.477 speed: 284 lps -Epoch: 13 Train Perplexity: 1.477 -``` - -This will as a side effect write model checkpoints to the `model` directory. With this you will be able to determine the perplexity your model will give you for any arbitrary set of names like so: - -```python -namignize(["mary", "ida", "gazorpazorp", "houyhnhnms", "bob"], - tf.train.latest_checkpoint("model"), SmallConfig) -``` -You will provide the same config and the same checkpoint directory. This will allow you to use a the model you just trained. You will then get a perplexity output for each name like so: - -``` -Name mary gives us a perplexity of 1.03105580807 -Name ida gives us a perplexity of 1.07770049572 -Name gazorpazorp gives us a perplexity of 175.940353394 -Name houyhnhnms gives us a perplexity of 9.53870773315 -Name bob gives us a perplexity of 6.03938627243 -``` - -Finally, you will also be able generate names using the model like so: - -```python -namignator(tf.train.latest_checkpoint("model"), SmallConfig) -``` - -Again, you will need to provide the same config and the same checkpoint directory. This will allow you to use a the model you just trained. You will then get a single generated name. Examples of output that I got when using the provided data are: - -``` -['b', 'e', 'r', 't', 'h', 'a', '`'] -['m', 'a', 'r', 'y', '`'] -['a', 'n', 'n', 'a', '`'] -['m', 'a', 'r', 'y', '`'] -['b', 'e', 'r', 't', 'h', 'a', '`'] -['a', 'n', 'n', 'a', '`'] -['e', 'l', 'i', 'z', 'a', 'b', 'e', 't', 'h', '`'] -``` - -Notice that each name ends with a backtick. This marks the end of the name. - -### Contact Info - -Feel free to reach out to me at knt(at google) or k.nathaniel.tucker(at gmail) diff --git a/research/namignizer/data_utils.py b/research/namignizer/data_utils.py deleted file mode 100644 index 432021502..000000000 --- a/research/namignizer/data_utils.py +++ /dev/null @@ -1,119 +0,0 @@ -# Copyright 2016 Google Inc. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -"""Utilities for parsing Kaggle baby names files.""" - -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import collections -import os - -import numpy as np -import tensorflow as tf -import pandas as pd - -# the default end of name rep will be zero -_EON = 0 - - -def read_names(names_path): - """read data from downloaded file. See SmallNames.txt for example format - or go to https://www.kaggle.com/kaggle/us-baby-names for full lists - - Args: - names_path: path to the csv file similar to the example type - Returns: - Dataset: a namedtuple of two elements: deduped names and their associated - counts. The names contain only 26 chars and are all lower case - """ - names_data = pd.read_csv(names_path) - names_data.Name = names_data.Name.str.lower() - - name_data = names_data.groupby(by=["Name"])["Count"].sum() - name_counts = np.array(name_data.tolist()) - names_deduped = np.array(name_data.index.tolist()) - - Dataset = collections.namedtuple('Dataset', ['Name', 'Count']) - return Dataset(names_deduped, name_counts) - - -def _letter_to_number(letter): - """converts letters to numbers between 1 and 27""" - # ord of lower case 'a' is 97 - return ord(letter) - 96 - - -def namignizer_iterator(names, counts, batch_size, num_steps, epoch_size): - """Takes a list of names and counts like those output from read_names, and - makes an iterator yielding a batch_size by num_steps array of random names - separated by an end of name token. The names are chosen randomly according - to their counts. The batch may end mid-name - - Args: - names: a set of lowercase names composed of 26 characters - counts: a list of the frequency of those names - batch_size: int - num_steps: int - epoch_size: number of batches to yield - Yields: - (x, y): a batch_size by num_steps array of ints representing letters, where - x will be the input and y will be the target - """ - name_distribution = counts / counts.sum() - - for i in range(epoch_size): - data = np.zeros(batch_size * num_steps + 1) - samples = np.random.choice(names, size=batch_size * num_steps // 2, - replace=True, p=name_distribution) - - data_index = 0 - for sample in samples: - if data_index >= batch_size * num_steps: - break - for letter in map(_letter_to_number, sample) + [_EON]: - if data_index >= batch_size * num_steps: - break - data[data_index] = letter - data_index += 1 - - x = data[:batch_size * num_steps].reshape((batch_size, num_steps)) - y = data[1:batch_size * num_steps + 1].reshape((batch_size, num_steps)) - - yield (x, y) - - -def name_to_batch(name, batch_size, num_steps): - """ Takes a single name and fills a batch with it - - Args: - name: lowercase composed of 26 characters - batch_size: int - num_steps: int - Returns: - x, y: a batch_size by num_steps array of ints representing letters, where - x will be the input and y will be the target. The array is filled up - to the length of the string, the rest is filled with zeros - """ - data = np.zeros(batch_size * num_steps + 1) - - data_index = 0 - for letter in map(_letter_to_number, name) + [_EON]: - data[data_index] = letter - data_index += 1 - - x = data[:batch_size * num_steps].reshape((batch_size, num_steps)) - y = data[1:batch_size * num_steps + 1].reshape((batch_size, num_steps)) - - return x, y diff --git a/research/namignizer/model.py b/research/namignizer/model.py deleted file mode 100644 index 72c5c5ecb..000000000 --- a/research/namignizer/model.py +++ /dev/null @@ -1,136 +0,0 @@ -# Copyright 2016 Google Inc. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -"""RNN model with embeddings""" - -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import tensorflow as tf - - -class NamignizerModel(object): - """The Namignizer model ~ strongly based on PTB""" - - def __init__(self, is_training, config): - self.batch_size = batch_size = config.batch_size - self.num_steps = num_steps = config.num_steps - size = config.hidden_size - # will always be 27 - vocab_size = config.vocab_size - - # placeholders for inputs - self._input_data = tf.placeholder(tf.int32, [batch_size, num_steps]) - self._targets = tf.placeholder(tf.int32, [batch_size, num_steps]) - # weights for the loss function - self._weights = tf.placeholder(tf.float32, [batch_size * num_steps]) - - # lstm for our RNN cell (GRU supported too) - lstm_cells = [] - for layer in range(config.num_layers): - lstm_cell = tf.contrib.rnn.BasicLSTMCell(size, forget_bias=0.0) - if is_training and config.keep_prob < 1: - lstm_cell = tf.contrib.rnn.DropoutWrapper( - lstm_cell, output_keep_prob=config.keep_prob) - lstm_cells.append(lstm_cell) - cell = tf.contrib.rnn.MultiRNNCell(lstm_cells) - - self._initial_state = cell.zero_state(batch_size, tf.float32) - - with tf.device("/cpu:0"): - embedding = tf.get_variable("embedding", [vocab_size, size]) - inputs = tf.nn.embedding_lookup(embedding, self._input_data) - - if is_training and config.keep_prob < 1: - inputs = tf.nn.dropout(inputs, config.keep_prob) - - outputs = [] - state = self._initial_state - with tf.variable_scope("RNN"): - for time_step in range(num_steps): - if time_step > 0: - tf.get_variable_scope().reuse_variables() - (cell_output, state) = cell(inputs[:, time_step, :], state) - outputs.append(cell_output) - - output = tf.reshape(tf.concat(axis=1, values=outputs), [-1, size]) - softmax_w = tf.get_variable("softmax_w", [size, vocab_size]) - softmax_b = tf.get_variable("softmax_b", [vocab_size]) - logits = tf.matmul(output, softmax_w) + softmax_b - loss = tf.contrib.legacy_seq2seq.sequence_loss_by_example( - [logits], - [tf.reshape(self._targets, [-1])], - [self._weights]) - self._loss = loss - self._cost = cost = tf.reduce_sum(loss) / batch_size - self._final_state = state - - # probabilities of each letter - self._activations = tf.nn.softmax(logits) - - # ability to save the model - self.saver = tf.train.Saver(tf.global_variables()) - - if not is_training: - return - - self._lr = tf.Variable(0.0, trainable=False) - tvars = tf.trainable_variables() - grads, _ = tf.clip_by_global_norm(tf.gradients(cost, tvars), - config.max_grad_norm) - optimizer = tf.train.GradientDescentOptimizer(self.lr) - self._train_op = optimizer.apply_gradients(zip(grads, tvars)) - - def assign_lr(self, session, lr_value): - session.run(tf.assign(self.lr, lr_value)) - - @property - def input_data(self): - return self._input_data - - @property - def targets(self): - return self._targets - - @property - def activations(self): - return self._activations - - @property - def weights(self): - return self._weights - - @property - def initial_state(self): - return self._initial_state - - @property - def cost(self): - return self._cost - - @property - def loss(self): - return self._loss - - @property - def final_state(self): - return self._final_state - - @property - def lr(self): - return self._lr - - @property - def train_op(self): - return self._train_op diff --git a/research/namignizer/names.py b/research/namignizer/names.py deleted file mode 100644 index 253742716..000000000 --- a/research/namignizer/names.py +++ /dev/null @@ -1,259 +0,0 @@ -# Copyright 2016 Google Inc. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -"""A library showing off sequence recognition and generation with the simple -example of names. - -We use recurrent neural nets to learn complex functions able to recognize and -generate sequences of a given form. This can be used for natural language -syntax recognition, dynamically generating maps or puzzles and of course -baby name generation. - -Before using this module, it is recommended to read the Tensorflow tutorial on -recurrent neural nets, as it explains the basic concepts of this model, and -will show off another module, the PTB module on which this model bases itself. - -Here is an overview of the functions available in this module: - -* RNN Module for sequence functions based on PTB - -* Name recognition specifically for recognizing names, but can be adapted to - recognizing sequence patterns - -* Name generations specifically for generating names, but can be adapted to - generating arbitrary sequence patterns -""" - -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import time - -import tensorflow as tf -import numpy as np - -from model import NamignizerModel -import data_utils - - -class SmallConfig(object): - """Small config.""" - init_scale = 0.1 - learning_rate = 1.0 - max_grad_norm = 5 - num_layers = 2 - num_steps = 20 - hidden_size = 200 - max_epoch = 4 - max_max_epoch = 13 - keep_prob = 1.0 - lr_decay = 0.5 - batch_size = 20 - vocab_size = 27 - epoch_size = 100 - - -class LargeConfig(object): - """Medium config.""" - init_scale = 0.05 - learning_rate = 1.0 - max_grad_norm = 5 - num_layers = 2 - num_steps = 35 - hidden_size = 650 - max_epoch = 6 - max_max_epoch = 39 - keep_prob = 0.5 - lr_decay = 0.8 - batch_size = 20 - vocab_size = 27 - epoch_size = 100 - - -class TestConfig(object): - """Tiny config, for testing.""" - init_scale = 0.1 - learning_rate = 1.0 - max_grad_norm = 1 - num_layers = 1 - num_steps = 2 - hidden_size = 2 - max_epoch = 1 - max_max_epoch = 1 - keep_prob = 1.0 - lr_decay = 0.5 - batch_size = 20 - vocab_size = 27 - epoch_size = 100 - - -def run_epoch(session, m, names, counts, epoch_size, eval_op, verbose=False): - """Runs the model on the given data for one epoch - - Args: - session: the tf session holding the model graph - m: an instance of the NamignizerModel - names: a set of lowercase names of 26 characters - counts: a list of the frequency of the above names - epoch_size: the number of batches to run - eval_op: whether to change the params or not, and how to do it - Kwargs: - verbose: whether to print out state of training during the epoch - Returns: - cost: the average cost during the last stage of the epoch - """ - start_time = time.time() - costs = 0.0 - iters = 0 - for step, (x, y) in enumerate(data_utils.namignizer_iterator(names, counts, - m.batch_size, m.num_steps, epoch_size)): - - cost, _ = session.run([m.cost, eval_op], - {m.input_data: x, - m.targets: y, - m.weights: np.ones(m.batch_size * m.num_steps)}) - costs += cost - iters += m.num_steps - - if verbose and step % (epoch_size // 10) == 9: - print("%.3f perplexity: %.3f speed: %.0f lps" % - (step * 1.0 / epoch_size, np.exp(costs / iters), - iters * m.batch_size / (time.time() - start_time))) - - if step >= epoch_size: - break - - return np.exp(costs / iters) - - -def train(data_dir, checkpoint_path, config): - """Trains the model with the given data - - Args: - data_dir: path to the data for the model (see data_utils for data - format) - checkpoint_path: the path to save the trained model checkpoints - config: one of the above configs that specify the model and how it - should be run and trained - Returns: - None - """ - # Prepare Name data. - print("Reading Name data in %s" % data_dir) - names, counts = data_utils.read_names(data_dir) - - with tf.Graph().as_default(), tf.Session() as session: - initializer = tf.random_uniform_initializer(-config.init_scale, - config.init_scale) - with tf.variable_scope("model", reuse=None, initializer=initializer): - m = NamignizerModel(is_training=True, config=config) - - tf.global_variables_initializer().run() - - for i in range(config.max_max_epoch): - lr_decay = config.lr_decay ** max(i - config.max_epoch, 0.0) - m.assign_lr(session, config.learning_rate * lr_decay) - - print("Epoch: %d Learning rate: %.3f" % (i + 1, session.run(m.lr))) - train_perplexity = run_epoch(session, m, names, counts, config.epoch_size, m.train_op, - verbose=True) - print("Epoch: %d Train Perplexity: %.3f" % - (i + 1, train_perplexity)) - - m.saver.save(session, checkpoint_path, global_step=i) - - -def namignize(names, checkpoint_path, config): - """Recognizes names and prints the Perplexity of the model for each names - in the list - - Args: - names: a list of names in the model format - checkpoint_path: the path to restore the trained model from, should not - include the model name, just the path to - config: one of the above configs that specify the model and how it - should be run and trained - Returns: - None - """ - with tf.Graph().as_default(), tf.Session() as session: - - with tf.variable_scope("model"): - m = NamignizerModel(is_training=False, config=config) - - m.saver.restore(session, checkpoint_path) - - for name in names: - x, y = data_utils.name_to_batch(name, m.batch_size, m.num_steps) - - cost, loss, _ = session.run([m.cost, m.loss, tf.no_op()], - {m.input_data: x, - m.targets: y, - m.weights: np.concatenate(( - np.ones(len(name)), np.zeros(m.batch_size * m.num_steps - len(name))))}) - - print("Name {} gives us a perplexity of {}".format( - name, np.exp(cost))) - - -def namignator(checkpoint_path, config): - """Generates names randomly according to a given model - - Args: - checkpoint_path: the path to restore the trained model from, should not - include the model name, just the path to - config: one of the above configs that specify the model and how it - should be run and trained - Returns: - None - """ - # mutate the config to become a name generator config - config.num_steps = 1 - config.batch_size = 1 - - with tf.Graph().as_default(), tf.Session() as session: - - with tf.variable_scope("model"): - m = NamignizerModel(is_training=False, config=config) - - m.saver.restore(session, checkpoint_path) - - activations, final_state, _ = session.run([m.activations, m.final_state, tf.no_op()], - {m.input_data: np.zeros((1, 1)), - m.targets: np.zeros((1, 1)), - m.weights: np.ones(1)}) - - # sample from our softmax activations - next_letter = np.random.choice(27, p=activations[0]) - name = [next_letter] - while next_letter != 0: - activations, final_state, _ = session.run([m.activations, m.final_state, tf.no_op()], - {m.input_data: [[next_letter]], - m.targets: np.zeros((1, 1)), - m.initial_state: final_state, - m.weights: np.ones(1)}) - - next_letter = np.random.choice(27, p=activations[0]) - name += [next_letter] - - print(map(lambda x: chr(x + 96), name)) - - -if __name__ == "__main__": - train("data/SmallNames.txt", "model/namignizer", SmallConfig) - - namignize(["mary", "ida", "gazorbazorb", "mmmhmm", "bob"], - tf.train.latest_checkpoint("model"), SmallConfig) - - namignator(tf.train.latest_checkpoint("model"), SmallConfig) diff --git a/research/neural_gpu/README.md b/research/neural_gpu/README.md deleted file mode 100644 index 097ef318c..000000000 --- a/research/neural_gpu/README.md +++ /dev/null @@ -1,87 +0,0 @@ -![No Maintenance Intended](https://img.shields.io/badge/No%20Maintenance%20Intended-%E2%9C%95-red.svg) -![TensorFlow Requirement: 1.x](https://img.shields.io/badge/TensorFlow%20Requirement-1.x-brightgreen) -![TensorFlow 2 Not Supported](https://img.shields.io/badge/TensorFlow%202%20Not%20Supported-%E2%9C%95-red.svg) - -# NeuralGPU -Code for the Neural GPU model described in http://arxiv.org/abs/1511.08228. -The extended version was described in https://arxiv.org/abs/1610.08613. - -Requirements: -* TensorFlow (see tensorflow.org for how to install) - -The model can be trained on the following algorithmic tasks: - -* `sort` - Sort a symbol list -* `kvsort` - Sort symbol keys in dictionary -* `id` - Return the same symbol list -* `rev` - Reverse a symbol list -* `rev2` - Reverse a symbol dictionary by key -* `incr` - Add one to a symbol value -* `add` - Long decimal addition -* `left` - First symbol in list -* `right` - Last symbol in list -* `left-shift` - Left shift a symbol list -* `right-shift` - Right shift a symbol list -* `bmul` - Long binary multiplication -* `mul` - Long decimal multiplication -* `dup` - Duplicate a symbol list with padding -* `badd` - Long binary addition -* `qadd` - Long quaternary addition -* `search` - Search for symbol key in dictionary - -It can also be trained on the WMT English-French translation task: - -* `wmt` - WMT English-French translation (data will be downloaded) - -The value range for symbols are defined by the `vocab_size` flag. -In particular, the values are in the range `vocab_size - 1`. -So if you set `--vocab_size=16` (the default) then `--problem=rev` -will be reversing lists of 15 symbols, and `--problem=id` will be identity -on a list of up to 15 symbols. - - -To train the model on the binary multiplication task run: - -``` -python neural_gpu_trainer.py --problem=bmul -``` - -This trains the Extended Neural GPU, to train the original model run: - -``` -python neural_gpu_trainer.py --problem=bmul --beam_size=0 -``` - -While training, interim / checkpoint model parameters will be -written to `/tmp/neural_gpu/`. - -Once the amount of error gets down to what you're comfortable -with, hit `Ctrl-C` to stop the training process. The latest -model parameters will be in `/tmp/neural_gpu/neural_gpu.ckpt-` -and used on any subsequent run. - -To evaluate a trained model on how well it decodes run: - -``` -python neural_gpu_trainer.py --problem=bmul --mode=1 -``` - -To interact with a model (experimental, see code) run: - -``` -python neural_gpu_trainer.py --problem=bmul --mode=2 -``` - -To train on WMT data, set a larger --nmaps and --vocab_size and avoid curriculum: - -``` -python neural_gpu_trainer.py --problem=wmt --vocab_size=32768 --nmaps=256 - --vec_size=256 --curriculum_seq=1.0 --max_length=60 --data_dir ~/wmt -``` - -With less memory, try lower batch size, e.g. `--batch_size=4`. With more GPUs -in your system, there will be a batch on every GPU so you can run larger models. -For example, `--batch_size=4 --num_gpus=4 --nmaps=512 --vec_size=512` will -run a large model (512-size) on 4 GPUs, with effective batches of 4*4=16. - -Maintained by Lukasz Kaiser (lukaszkaiser) diff --git a/research/neural_gpu/data_utils.py b/research/neural_gpu/data_utils.py deleted file mode 100644 index 3c14ff701..000000000 --- a/research/neural_gpu/data_utils.py +++ /dev/null @@ -1,458 +0,0 @@ -# Copyright 2015 Google Inc. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""Neural GPU -- data generation and batching utilities.""" - -import math -import os -import random -import sys -import time - -import numpy as np -from six.moves import xrange -import tensorflow as tf - -import program_utils - -FLAGS = tf.app.flags.FLAGS - -bins = [2 + bin_idx_i for bin_idx_i in xrange(256)] -all_tasks = ["sort", "kvsort", "id", "rev", "rev2", "incr", "add", "left", - "right", "left-shift", "right-shift", "bmul", "mul", "dup", - "badd", "qadd", "search", "progeval", "progsynth"] -log_filename = "" -vocab, rev_vocab = None, None - - -def pad(l): - for b in bins: - if b >= l: return b - return bins[-1] - - -def bin_for(l): - for i, b in enumerate(bins): - if b >= l: return i - return len(bins) - 1 - - -train_set = {} -test_set = {} -for some_task in all_tasks: - train_set[some_task] = [] - test_set[some_task] = [] - for all_max_len in xrange(10000): - train_set[some_task].append([]) - test_set[some_task].append([]) - - -def read_tmp_file(name): - """Read from a file with the given name in our log directory or above.""" - dirname = os.path.dirname(log_filename) - fname = os.path.join(dirname, name + ".txt") - if not tf.gfile.Exists(fname): - print_out("== not found file: " + fname) - fname = os.path.join(dirname, "../" + name + ".txt") - if not tf.gfile.Exists(fname): - print_out("== not found file: " + fname) - fname = os.path.join(dirname, "../../" + name + ".txt") - if not tf.gfile.Exists(fname): - print_out("== not found file: " + fname) - return None - print_out("== found file: " + fname) - res = [] - with tf.gfile.GFile(fname, mode="r") as f: - for line in f: - res.append(line.strip()) - return res - - -def write_tmp_file(name, lines): - dirname = os.path.dirname(log_filename) - fname = os.path.join(dirname, name + ".txt") - with tf.gfile.GFile(fname, mode="w") as f: - for line in lines: - f.write(line + "\n") - - -def add(n1, n2, base=10): - """Add two numbers represented as lower-endian digit lists.""" - k = max(len(n1), len(n2)) + 1 - d1 = n1 + [0 for _ in xrange(k - len(n1))] - d2 = n2 + [0 for _ in xrange(k - len(n2))] - res = [] - carry = 0 - for i in xrange(k): - if d1[i] + d2[i] + carry < base: - res.append(d1[i] + d2[i] + carry) - carry = 0 - else: - res.append(d1[i] + d2[i] + carry - base) - carry = 1 - while res and res[-1] == 0: - res = res[:-1] - if res: return res - return [0] - - -def init_data(task, length, nbr_cases, nclass): - """Data initialization.""" - def rand_pair(l, task): - """Random data pair for a task. Total length should be <= l.""" - k = int((l-1)/2) - base = 10 - if task[0] == "b": base = 2 - if task[0] == "q": base = 4 - d1 = [np.random.randint(base) for _ in xrange(k)] - d2 = [np.random.randint(base) for _ in xrange(k)] - if task in ["add", "badd", "qadd"]: - res = add(d1, d2, base) - elif task in ["mul", "bmul"]: - d1n = sum([d * (base ** i) for i, d in enumerate(d1)]) - d2n = sum([d * (base ** i) for i, d in enumerate(d2)]) - if task == "bmul": - res = [int(x) for x in list(reversed(str(bin(d1n * d2n))))[:-2]] - else: - res = [int(x) for x in list(reversed(str(d1n * d2n)))] - else: - sys.exit() - sep = [12] - if task in ["add", "badd", "qadd"]: sep = [11] - inp = [d + 1 for d in d1] + sep + [d + 1 for d in d2] - return inp, [r + 1 for r in res] - - def rand_dup_pair(l): - """Random data pair for duplication task. Total length should be <= l.""" - k = int(l/2) - x = [np.random.randint(nclass - 1) + 1 for _ in xrange(k)] - inp = x + [0 for _ in xrange(l - k)] - res = x + x + [0 for _ in xrange(l - 2*k)] - return inp, res - - def rand_rev2_pair(l): - """Random data pair for reverse2 task. Total length should be <= l.""" - inp = [(np.random.randint(nclass - 1) + 1, - np.random.randint(nclass - 1) + 1) for _ in xrange(l/2)] - res = [i for i in reversed(inp)] - return [x for p in inp for x in p], [x for p in res for x in p] - - def rand_search_pair(l): - """Random data pair for search task. Total length should be <= l.""" - inp = [(np.random.randint(nclass - 1) + 1, - np.random.randint(nclass - 1) + 1) for _ in xrange(l-1/2)] - q = np.random.randint(nclass - 1) + 1 - res = 0 - for (k, v) in reversed(inp): - if k == q: - res = v - return [x for p in inp for x in p] + [q], [res] - - def rand_kvsort_pair(l): - """Random data pair for key-value sort. Total length should be <= l.""" - keys = [(np.random.randint(nclass - 1) + 1, i) for i in xrange(l/2)] - vals = [np.random.randint(nclass - 1) + 1 for _ in xrange(l/2)] - kv = [(k, vals[i]) for (k, i) in keys] - sorted_kv = [(k, vals[i]) for (k, i) in sorted(keys)] - return [x for p in kv for x in p], [x for p in sorted_kv for x in p] - - def prog_io_pair(prog, max_len, counter=0): - try: - ilen = np.random.randint(max_len - 3) + 1 - bound = max(15 - (counter / 20), 1) - inp = [random.choice(range(-bound, bound)) for _ in range(ilen)] - inp_toks = [program_utils.prog_rev_vocab[t] - for t in program_utils.tokenize(str(inp)) if t != ","] - out = program_utils.evaluate(prog, {"a": inp}) - out_toks = [program_utils.prog_rev_vocab[t] - for t in program_utils.tokenize(str(out)) if t != ","] - if counter > 400: - out_toks = [] - if (out_toks and out_toks[0] == program_utils.prog_rev_vocab["["] and - len(out_toks) != len([o for o in out if o == ","]) + 3): - raise ValueError("generated list with too long ints") - if (out_toks and out_toks[0] != program_utils.prog_rev_vocab["["] and - len(out_toks) > 1): - raise ValueError("generated one int but tokenized it to many") - if len(out_toks) > max_len: - raise ValueError("output too long") - return (inp_toks, out_toks) - except ValueError: - return prog_io_pair(prog, max_len, counter+1) - - def spec(inp): - """Return the target given the input for some tasks.""" - if task == "sort": - return sorted(inp) - elif task == "id": - return inp - elif task == "rev": - return [i for i in reversed(inp)] - elif task == "incr": - carry = 1 - res = [] - for i in xrange(len(inp)): - if inp[i] + carry < nclass: - res.append(inp[i] + carry) - carry = 0 - else: - res.append(1) - carry = 1 - return res - elif task == "left": - return [inp[0]] - elif task == "right": - return [inp[-1]] - elif task == "left-shift": - return [inp[l-1] for l in xrange(len(inp))] - elif task == "right-shift": - return [inp[l+1] for l in xrange(len(inp))] - else: - print_out("Unknown spec for task " + str(task)) - sys.exit() - - l = length - cur_time = time.time() - total_time = 0.0 - - is_prog = task in ["progeval", "progsynth"] - if is_prog: - inputs_per_prog = 5 - program_utils.make_vocab() - progs = read_tmp_file("programs_len%d" % (l / 10)) - if not progs: - progs = program_utils.gen(l / 10, 1.2 * nbr_cases / inputs_per_prog) - write_tmp_file("programs_len%d" % (l / 10), progs) - prog_ios = read_tmp_file("programs_len%d_io" % (l / 10)) - nbr_cases = min(nbr_cases, len(progs) * inputs_per_prog) / 1.2 - if not prog_ios: - # Generate program io data. - prog_ios = [] - for pidx, prog in enumerate(progs): - if pidx % 500 == 0: - print_out("== generating io pairs for program %d" % pidx) - if pidx * inputs_per_prog > nbr_cases * 1.2: - break - ptoks = [program_utils.prog_rev_vocab[t] - for t in program_utils.tokenize(prog)] - ptoks.append(program_utils.prog_rev_vocab["_EOS"]) - plen = len(ptoks) - for _ in xrange(inputs_per_prog): - if task == "progeval": - inp, out = prog_io_pair(prog, plen) - prog_ios.append(str(inp) + "\t" + str(out) + "\t" + prog) - elif task == "progsynth": - plen = max(len(ptoks), 8) - for _ in xrange(3): - inp, out = prog_io_pair(prog, plen / 2) - prog_ios.append(str(inp) + "\t" + str(out) + "\t" + prog) - write_tmp_file("programs_len%d_io" % (l / 10), prog_ios) - prog_ios_dict = {} - for s in prog_ios: - i, o, p = s.split("\t") - i_clean = "".join([c for c in i if c.isdigit() or c == " "]) - o_clean = "".join([c for c in o if c.isdigit() or c == " "]) - inp = [int(x) for x in i_clean.split()] - out = [int(x) for x in o_clean.split()] - if inp and out: - if p in prog_ios_dict: - prog_ios_dict[p].append([inp, out]) - else: - prog_ios_dict[p] = [[inp, out]] - # Use prog_ios_dict to create data. - progs = [] - for prog in prog_ios_dict: - if len([c for c in prog if c == ";"]) <= (l / 10): - progs.append(prog) - nbr_cases = min(nbr_cases, len(progs) * inputs_per_prog) / 1.2 - print_out("== %d training cases on %d progs" % (nbr_cases, len(progs))) - for pidx, prog in enumerate(progs): - if pidx * inputs_per_prog > nbr_cases * 1.2: - break - ptoks = [program_utils.prog_rev_vocab[t] - for t in program_utils.tokenize(prog)] - ptoks.append(program_utils.prog_rev_vocab["_EOS"]) - plen = len(ptoks) - dset = train_set if pidx < nbr_cases / inputs_per_prog else test_set - for _ in xrange(inputs_per_prog): - if task == "progeval": - inp, out = prog_ios_dict[prog].pop() - dset[task][bin_for(plen)].append([[ptoks, inp, [], []], [out]]) - elif task == "progsynth": - plen, ilist = max(len(ptoks), 8), [[]] - for _ in xrange(3): - inp, out = prog_ios_dict[prog].pop() - ilist.append(inp + out) - dset[task][bin_for(plen)].append([ilist, [ptoks]]) - - for case in xrange(0 if is_prog else nbr_cases): - total_time += time.time() - cur_time - cur_time = time.time() - if l > 10000 and case % 100 == 1: - print_out(" avg gen time %.4f s" % (total_time / float(case))) - if task in ["add", "badd", "qadd", "bmul", "mul"]: - i, t = rand_pair(l, task) - train_set[task][bin_for(len(i))].append([[[], i, [], []], [t]]) - i, t = rand_pair(l, task) - test_set[task][bin_for(len(i))].append([[[], i, [], []], [t]]) - elif task == "dup": - i, t = rand_dup_pair(l) - train_set[task][bin_for(len(i))].append([[i], [t]]) - i, t = rand_dup_pair(l) - test_set[task][bin_for(len(i))].append([[i], [t]]) - elif task == "rev2": - i, t = rand_rev2_pair(l) - train_set[task][bin_for(len(i))].append([[i], [t]]) - i, t = rand_rev2_pair(l) - test_set[task][bin_for(len(i))].append([[i], [t]]) - elif task == "search": - i, t = rand_search_pair(l) - train_set[task][bin_for(len(i))].append([[i], [t]]) - i, t = rand_search_pair(l) - test_set[task][bin_for(len(i))].append([[i], [t]]) - elif task == "kvsort": - i, t = rand_kvsort_pair(l) - train_set[task][bin_for(len(i))].append([[i], [t]]) - i, t = rand_kvsort_pair(l) - test_set[task][bin_for(len(i))].append([[i], [t]]) - elif task not in ["progeval", "progsynth"]: - inp = [np.random.randint(nclass - 1) + 1 for i in xrange(l)] - target = spec(inp) - train_set[task][bin_for(l)].append([[inp], [target]]) - inp = [np.random.randint(nclass - 1) + 1 for i in xrange(l)] - target = spec(inp) - test_set[task][bin_for(l)].append([[inp], [target]]) - - -def to_symbol(i): - """Covert ids to text.""" - if i == 0: return "" - if i == 11: return "+" - if i == 12: return "*" - return str(i-1) - - -def to_id(s): - """Covert text to ids.""" - if s == "+": return 11 - if s == "*": return 12 - return int(s) + 1 - - -def get_batch(bin_id, batch_size, data_set, height, offset=None, preset=None): - """Get a batch of data, training or testing.""" - inputs, targets = [], [] - pad_length = bins[bin_id] - for b in xrange(batch_size): - if preset is None: - elem = random.choice(data_set[bin_id]) - if offset is not None and offset + b < len(data_set[bin_id]): - elem = data_set[bin_id][offset + b] - else: - elem = preset - inpt, targett, inpl, targetl = elem[0], elem[1], [], [] - for inp in inpt: - inpl.append(inp + [0 for _ in xrange(pad_length - len(inp))]) - if len(inpl) == 1: - for _ in xrange(height - 1): - inpl.append([0 for _ in xrange(pad_length)]) - for target in targett: - targetl.append(target + [0 for _ in xrange(pad_length - len(target))]) - inputs.append(inpl) - targets.append(targetl) - res_input = np.array(inputs, dtype=np.int32) - res_target = np.array(targets, dtype=np.int32) - assert list(res_input.shape) == [batch_size, height, pad_length] - assert list(res_target.shape) == [batch_size, 1, pad_length] - return res_input, res_target - - -def print_out(s, newline=True): - """Print a message out and log it to file.""" - if log_filename: - try: - with tf.gfile.GFile(log_filename, mode="a") as f: - f.write(s + ("\n" if newline else "")) - # pylint: disable=bare-except - except: - sys.stderr.write("Error appending to %s\n" % log_filename) - sys.stdout.write(s + ("\n" if newline else "")) - sys.stdout.flush() - - -def decode(output): - return [np.argmax(o, axis=1) for o in output] - - -def accuracy(inpt_t, output, target_t, batch_size, nprint, - beam_out=None, beam_scores=None): - """Calculate output accuracy given target.""" - assert nprint < batch_size + 1 - inpt = [] - for h in xrange(inpt_t.shape[1]): - inpt.extend([inpt_t[:, h, l] for l in xrange(inpt_t.shape[2])]) - target = [target_t[:, 0, l] for l in xrange(target_t.shape[2])] - def tok(i): - if rev_vocab and i < len(rev_vocab): - return rev_vocab[i] - return str(i - 1) - def task_print(inp, output, target): - stop_bound = 0 - print_len = 0 - while print_len < len(target) and target[print_len] > stop_bound: - print_len += 1 - print_out(" i: " + " ".join([tok(i) for i in inp if i > 0])) - print_out(" o: " + - " ".join([tok(output[l]) for l in xrange(print_len)])) - print_out(" t: " + - " ".join([tok(target[l]) for l in xrange(print_len)])) - decoded_target = target - decoded_output = decode(output) - # Use beam output if given and score is high enough. - if beam_out is not None: - for b in xrange(batch_size): - if beam_scores[b] >= 10.0: - for l in xrange(min(len(decoded_output), beam_out.shape[2])): - decoded_output[l][b] = int(beam_out[b, 0, l]) - total = 0 - errors = 0 - seq = [0 for b in xrange(batch_size)] - for l in xrange(len(decoded_output)): - for b in xrange(batch_size): - if decoded_target[l][b] > 0: - total += 1 - if decoded_output[l][b] != decoded_target[l][b]: - seq[b] = 1 - errors += 1 - e = 0 # Previous error index - for _ in xrange(min(nprint, sum(seq))): - while seq[e] == 0: - e += 1 - task_print([inpt[l][e] for l in xrange(len(inpt))], - [decoded_output[l][e] for l in xrange(len(decoded_target))], - [decoded_target[l][e] for l in xrange(len(decoded_target))]) - e += 1 - for b in xrange(nprint - errors): - task_print([inpt[l][b] for l in xrange(len(inpt))], - [decoded_output[l][b] for l in xrange(len(decoded_target))], - [decoded_target[l][b] for l in xrange(len(decoded_target))]) - return errors, total, sum(seq) - - -def safe_exp(x): - perp = 10000 - x = float(x) - if x < 100: perp = math.exp(x) - if perp > 10000: return 10000 - return perp diff --git a/research/neural_gpu/neural_gpu.py b/research/neural_gpu/neural_gpu.py deleted file mode 100644 index 55b2b3e99..000000000 --- a/research/neural_gpu/neural_gpu.py +++ /dev/null @@ -1,747 +0,0 @@ -# Copyright 2015 Google Inc. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""The Neural GPU Model.""" - -import time - -import numpy as np -from six.moves import xrange -import tensorflow as tf - -from tensorflow.python.framework import function -import data_utils as data - -do_jit = False # Gives more speed but experimental for now. -jit_scope = tf.contrib.compiler.jit.experimental_jit_scope - - -def conv_linear(args, kw, kh, nin, nout, rate, do_bias, bias_start, prefix): - """Convolutional linear map.""" - if not isinstance(args, (list, tuple)): - args = [args] - with tf.variable_scope(prefix): - with tf.device("/cpu:0"): - k = tf.get_variable("CvK", [kw, kh, nin, nout]) - if len(args) == 1: - arg = args[0] - else: - arg = tf.concat(axis=3, values=args) - res = tf.nn.convolution(arg, k, dilation_rate=(rate, 1), padding="SAME") - if not do_bias: return res - with tf.device("/cpu:0"): - bias_term = tf.get_variable( - "CvB", [nout], initializer=tf.constant_initializer(bias_start)) - bias_term = tf.reshape(bias_term, [1, 1, 1, nout]) - return res + bias_term - - -def sigmoid_cutoff(x, cutoff): - """Sigmoid with cutoff, e.g., 1.2sigmoid(x) - 0.1.""" - y = tf.sigmoid(x) - if cutoff < 1.01: return y - d = (cutoff - 1.0) / 2.0 - return tf.minimum(1.0, tf.maximum(0.0, cutoff * y - d), name="cutoff_min") - - -@function.Defun(tf.float32, noinline=True) -def sigmoid_cutoff_12(x): - """Sigmoid with cutoff 1.2, specialized for speed and memory use.""" - y = tf.sigmoid(x) - return tf.minimum(1.0, tf.maximum(0.0, 1.2 * y - 0.1), name="cutoff_min_12") - - -@function.Defun(tf.float32, noinline=True) -def sigmoid_hard(x): - """Hard sigmoid.""" - return tf.minimum(1.0, tf.maximum(0.0, 0.25 * x + 0.5)) - - -def place_at14(decided, selected, it): - """Place selected at it-th coordinate of decided, dim=1 of 4.""" - slice1 = decided[:, :it, :, :] - slice2 = decided[:, it + 1:, :, :] - return tf.concat(axis=1, values=[slice1, selected, slice2]) - - -def place_at13(decided, selected, it): - """Place selected at it-th coordinate of decided, dim=1 of 3.""" - slice1 = decided[:, :it, :] - slice2 = decided[:, it + 1:, :] - return tf.concat(axis=1, values=[slice1, selected, slice2]) - - -def tanh_cutoff(x, cutoff): - """Tanh with cutoff, e.g., 1.1tanh(x) cut to [-1. 1].""" - y = tf.tanh(x) - if cutoff < 1.01: return y - d = (cutoff - 1.0) / 2.0 - return tf.minimum(1.0, tf.maximum(-1.0, (1.0 + d) * y)) - - -@function.Defun(tf.float32, noinline=True) -def tanh_hard(x): - """Hard tanh.""" - return tf.minimum(1.0, tf.maximum(0.0, x)) - - -def layer_norm(x, nmaps, prefix, epsilon=1e-5): - """Layer normalize the 4D tensor x, averaging over the last dimension.""" - with tf.variable_scope(prefix): - scale = tf.get_variable("layer_norm_scale", [nmaps], - initializer=tf.ones_initializer()) - bias = tf.get_variable("layer_norm_bias", [nmaps], - initializer=tf.zeros_initializer()) - mean, variance = tf.nn.moments(x, [3], keep_dims=True) - norm_x = (x - mean) / tf.sqrt(variance + epsilon) - return norm_x * scale + bias - - -def conv_gru(inpts, mem, kw, kh, nmaps, rate, cutoff, prefix, do_layer_norm, - args_len=None): - """Convolutional GRU.""" - def conv_lin(args, suffix, bias_start): - total_args_len = args_len or len(args) * nmaps - res = conv_linear(args, kw, kh, total_args_len, nmaps, rate, True, - bias_start, prefix + "/" + suffix) - if do_layer_norm: - return layer_norm(res, nmaps, prefix + "/" + suffix) - else: - return res - if cutoff == 1.2: - reset = sigmoid_cutoff_12(conv_lin(inpts + [mem], "r", 1.0)) - gate = sigmoid_cutoff_12(conv_lin(inpts + [mem], "g", 1.0)) - elif cutoff > 10: - reset = sigmoid_hard(conv_lin(inpts + [mem], "r", 1.0)) - gate = sigmoid_hard(conv_lin(inpts + [mem], "g", 1.0)) - else: - reset = sigmoid_cutoff(conv_lin(inpts + [mem], "r", 1.0), cutoff) - gate = sigmoid_cutoff(conv_lin(inpts + [mem], "g", 1.0), cutoff) - if cutoff > 10: - candidate = tanh_hard(conv_lin(inpts + [reset * mem], "c", 0.0)) - else: - # candidate = tanh_cutoff(conv_lin(inpts + [reset * mem], "c", 0.0), cutoff) - candidate = tf.tanh(conv_lin(inpts + [reset * mem], "c", 0.0)) - return gate * mem + (1 - gate) * candidate - - -CHOOSE_K = 256 - - -def memory_call(q, l, nmaps, mem_size, vocab_size, num_gpus, update_mem): - raise ValueError("Fill for experiments with additional memory structures.") - - -def memory_run(step, nmaps, mem_size, batch_size, vocab_size, - global_step, do_training, update_mem, decay_factor, num_gpus, - target_emb_weights, output_w, gpu_targets_tn, it): - """Run memory.""" - q = step[:, 0, it, :] - mlabels = gpu_targets_tn[:, it, 0] - res, mask, mem_loss = memory_call( - q, mlabels, nmaps, mem_size, vocab_size, num_gpus, update_mem) - res = tf.gather(target_emb_weights, res) * tf.expand_dims(mask[:, 0], 1) - - # Mix gold and original in the first steps, 20% later. - gold = tf.nn.dropout(tf.gather(target_emb_weights, mlabels), 0.7) - use_gold = 1.0 - tf.cast(global_step, tf.float32) / (1000. * decay_factor) - use_gold = tf.maximum(use_gold, 0.2) * do_training - mem = tf.cond(tf.less(tf.random_uniform([]), use_gold), - lambda: use_gold * gold + (1.0 - use_gold) * res, - lambda: res) - mem = tf.reshape(mem, [-1, 1, 1, nmaps]) - return mem, mem_loss, update_mem - - -@tf.RegisterGradient("CustomIdG") -def _custom_id_grad(_, grads): - return grads - - -def quantize(t, quant_scale, max_value=1.0): - """Quantize a tensor t with each element in [-max_value, max_value].""" - t = tf.minimum(max_value, tf.maximum(t, -max_value)) - big = quant_scale * (t + max_value) + 0.5 - with tf.get_default_graph().gradient_override_map({"Floor": "CustomIdG"}): - res = (tf.floor(big) / quant_scale) - max_value - return res - - -def quantize_weights_op(quant_scale, max_value): - ops = [v.assign(quantize(v, quant_scale, float(max_value))) - for v in tf.trainable_variables()] - return tf.group(*ops) - - -def autoenc_quantize(x, nbits, nmaps, do_training, layers=1): - """Autoencoder into nbits vectors of bits, using noise and sigmoids.""" - enc_x = tf.reshape(x, [-1, nmaps]) - for i in xrange(layers - 1): - enc_x = tf.layers.dense(enc_x, nmaps, name="autoenc_%d" % i) - enc_x = tf.layers.dense(enc_x, nbits, name="autoenc_%d" % (layers - 1)) - noise = tf.truncated_normal(tf.shape(enc_x), stddev=2.0) - dec_x = sigmoid_cutoff_12(enc_x + noise * do_training) - dec_x = tf.reshape(dec_x, [-1, nbits]) - for i in xrange(layers): - dec_x = tf.layers.dense(dec_x, nmaps, name="autodec_%d" % i) - return tf.reshape(dec_x, tf.shape(x)) - - -def make_dense(targets, noclass, low_param): - """Move a batch of targets to a dense 1-hot representation.""" - low = low_param / float(noclass - 1) - high = 1.0 - low * (noclass - 1) - targets = tf.cast(targets, tf.int64) - return tf.one_hot(targets, depth=noclass, on_value=high, off_value=low) - - -def reorder_beam(beam_size, batch_size, beam_val, output, is_first, - tensors_to_reorder): - """Reorder to minimize beam costs.""" - # beam_val is [batch_size x beam_size]; let b = batch_size * beam_size - # decided is len x b x a x b - # output is b x out_size; step is b x len x a x b; - outputs = tf.split(axis=0, num_or_size_splits=beam_size, value=tf.nn.log_softmax(output)) - all_beam_vals, all_beam_idx = [], [] - beam_range = 1 if is_first else beam_size - for i in xrange(beam_range): - top_out, top_out_idx = tf.nn.top_k(outputs[i], k=beam_size) - cur_beam_val = beam_val[:, i] - top_out = tf.Print(top_out, [top_out, top_out_idx, beam_val, i, - cur_beam_val], "GREPO", summarize=8) - all_beam_vals.append(top_out + tf.expand_dims(cur_beam_val, 1)) - all_beam_idx.append(top_out_idx) - all_beam_idx = tf.reshape(tf.transpose(tf.concat(axis=1, values=all_beam_idx), [1, 0]), - [-1]) - top_beam, top_beam_idx = tf.nn.top_k(tf.concat(axis=1, values=all_beam_vals), k=beam_size) - top_beam_idx = tf.Print(top_beam_idx, [top_beam, top_beam_idx], - "GREP", summarize=8) - reordered = [[] for _ in xrange(len(tensors_to_reorder) + 1)] - top_out_idx = [] - for i in xrange(beam_size): - which_idx = top_beam_idx[:, i] * batch_size + tf.range(batch_size) - top_out_idx.append(tf.gather(all_beam_idx, which_idx)) - which_beam = top_beam_idx[:, i] / beam_size # [batch] - which_beam = which_beam * batch_size + tf.range(batch_size) - reordered[0].append(tf.gather(output, which_beam)) - for i, t in enumerate(tensors_to_reorder): - reordered[i + 1].append(tf.gather(t, which_beam)) - new_tensors = [tf.concat(axis=0, values=t) for t in reordered] - top_out_idx = tf.concat(axis=0, values=top_out_idx) - return (top_beam, new_tensors[0], top_out_idx, new_tensors[1:]) - - -class NeuralGPU(object): - """Neural GPU Model.""" - - def __init__(self, nmaps, vec_size, niclass, noclass, dropout, - max_grad_norm, cutoff, nconvs, kw, kh, height, mem_size, - learning_rate, min_length, num_gpus, num_replicas, - grad_noise_scale, sampling_rate, act_noise=0.0, do_rnn=False, - atrous=False, beam_size=1, backward=True, do_layer_norm=False, - autoenc_decay=1.0): - # Feeds for parameters and ops to update them. - self.nmaps = nmaps - if backward: - self.global_step = tf.Variable(0, trainable=False, name="global_step") - self.cur_length = tf.Variable(min_length, trainable=False) - self.cur_length_incr_op = self.cur_length.assign_add(1) - self.lr = tf.Variable(learning_rate, trainable=False) - self.lr_decay_op = self.lr.assign(self.lr * 0.995) - self.do_training = tf.placeholder(tf.float32, name="do_training") - self.update_mem = tf.placeholder(tf.int32, name="update_mem") - self.noise_param = tf.placeholder(tf.float32, name="noise_param") - - # Feeds for inputs, targets, outputs, losses, etc. - self.input = tf.placeholder(tf.int32, name="inp") - self.target = tf.placeholder(tf.int32, name="tgt") - self.prev_step = tf.placeholder(tf.float32, name="prev_step") - gpu_input = tf.split(axis=0, num_or_size_splits=num_gpus, value=self.input) - gpu_target = tf.split(axis=0, num_or_size_splits=num_gpus, value=self.target) - gpu_prev_step = tf.split(axis=0, num_or_size_splits=num_gpus, value=self.prev_step) - batch_size = tf.shape(gpu_input[0])[0] - - if backward: - adam_lr = 0.005 * self.lr - adam = tf.train.AdamOptimizer(adam_lr, epsilon=1e-3) - - def adam_update(grads): - return adam.apply_gradients(zip(grads, tf.trainable_variables()), - global_step=self.global_step, - name="adam_update") - - # When switching from Adam to SGD we perform reverse-decay. - if backward: - global_step_float = tf.cast(self.global_step, tf.float32) - sampling_decay_exponent = global_step_float / 100000.0 - sampling_decay = tf.maximum(0.05, tf.pow(0.5, sampling_decay_exponent)) - self.sampling = sampling_rate * 0.05 / sampling_decay - else: - self.sampling = tf.constant(0.0) - - # Cache variables on cpu if needed. - if num_replicas > 1 or num_gpus > 1: - with tf.device("/cpu:0"): - caching_const = tf.constant(0) - tf.get_variable_scope().set_caching_device(caching_const.op.device) - # partitioner = tf.variable_axis_size_partitioner(1024*256*4) - # tf.get_variable_scope().set_partitioner(partitioner) - - def gpu_avg(l): - if l[0] is None: - for elem in l: - assert elem is None - return 0.0 - if len(l) < 2: - return l[0] - return sum(l) / float(num_gpus) - - self.length_tensor = tf.placeholder(tf.int32, name="length") - - with tf.device("/cpu:0"): - emb_weights = tf.get_variable( - "embedding", [niclass, vec_size], - initializer=tf.random_uniform_initializer(-1.7, 1.7)) - if beam_size > 0: - target_emb_weights = tf.get_variable( - "target_embedding", [noclass, nmaps], - initializer=tf.random_uniform_initializer(-1.7, 1.7)) - e0 = tf.scatter_update(emb_weights, - tf.constant(0, dtype=tf.int32, shape=[1]), - tf.zeros([1, vec_size])) - output_w = tf.get_variable("output_w", [nmaps, noclass], tf.float32) - - def conv_rate(layer): - if atrous: - return 2**layer - return 1 - - # pylint: disable=cell-var-from-loop - def enc_step(step): - """Encoder step.""" - if autoenc_decay < 1.0: - quant_step = autoenc_quantize(step, 16, nmaps, self.do_training) - if backward: - exp_glob = tf.train.exponential_decay(1.0, self.global_step - 10000, - 1000, autoenc_decay) - dec_factor = 1.0 - exp_glob # * self.do_training - dec_factor = tf.cond(tf.less(self.global_step, 10500), - lambda: tf.constant(0.05), lambda: dec_factor) - else: - dec_factor = 1.0 - cur = tf.cond(tf.less(tf.random_uniform([]), dec_factor), - lambda: quant_step, lambda: step) - else: - cur = step - if dropout > 0.0001: - cur = tf.nn.dropout(cur, keep_prob) - if act_noise > 0.00001: - cur += tf.truncated_normal(tf.shape(cur)) * act_noise_scale - # Do nconvs-many CGRU steps. - if do_jit and tf.get_variable_scope().reuse: - with jit_scope(): - for layer in xrange(nconvs): - cur = conv_gru([], cur, kw, kh, nmaps, conv_rate(layer), - cutoff, "ecgru_%d" % layer, do_layer_norm) - else: - for layer in xrange(nconvs): - cur = conv_gru([], cur, kw, kh, nmaps, conv_rate(layer), - cutoff, "ecgru_%d" % layer, do_layer_norm) - return cur - - zero_tgt = tf.zeros([batch_size, nmaps, 1]) - zero_tgt.set_shape([None, nmaps, 1]) - - def dec_substep(step, decided): - """Decoder sub-step.""" - cur = step - if dropout > 0.0001: - cur = tf.nn.dropout(cur, keep_prob) - if act_noise > 0.00001: - cur += tf.truncated_normal(tf.shape(cur)) * act_noise_scale - # Do nconvs-many CGRU steps. - if do_jit and tf.get_variable_scope().reuse: - with jit_scope(): - for layer in xrange(nconvs): - cur = conv_gru([decided], cur, kw, kh, nmaps, conv_rate(layer), - cutoff, "dcgru_%d" % layer, do_layer_norm) - else: - for layer in xrange(nconvs): - cur = conv_gru([decided], cur, kw, kh, nmaps, conv_rate(layer), - cutoff, "dcgru_%d" % layer, do_layer_norm) - return cur - # pylint: enable=cell-var-from-loop - - def dec_step(step, it, it_int, decided, output_ta, tgts, - mloss, nupd_in, out_idx, beam_cost): - """Decoder step.""" - nupd, mem_loss = 0, 0.0 - if mem_size > 0: - it_incr = tf.minimum(it+1, length - 1) - mem, mem_loss, nupd = memory_run( - step, nmaps, mem_size, batch_size, noclass, self.global_step, - self.do_training, self.update_mem, 10, num_gpus, - target_emb_weights, output_w, gpu_targets_tn, it_incr) - step = dec_substep(step, decided) - output_l = tf.expand_dims(tf.expand_dims(step[:, it, 0, :], 1), 1) - # Calculate argmax output. - output = tf.reshape(output_l, [-1, nmaps]) - # pylint: disable=cell-var-from-loop - output = tf.matmul(output, output_w) - if beam_size > 1: - beam_cost, output, out, reordered = reorder_beam( - beam_size, batch_size, beam_cost, output, it_int == 0, - [output_l, out_idx, step, decided]) - [output_l, out_idx, step, decided] = reordered - else: - # Scheduled sampling. - out = tf.multinomial(tf.stop_gradient(output), 1) - out = tf.to_int32(tf.squeeze(out, [1])) - out_write = output_ta.write(it, output_l[:batch_size, :, :, :]) - output = tf.gather(target_emb_weights, out) - output = tf.reshape(output, [-1, 1, nmaps]) - output = tf.concat(axis=1, values=[output] * height) - tgt = tgts[it, :, :, :] - selected = tf.cond(tf.less(tf.random_uniform([]), self.sampling), - lambda: output, lambda: tgt) - # pylint: enable=cell-var-from-loop - dec_write = place_at14(decided, tf.expand_dims(selected, 1), it) - out_idx = place_at13( - out_idx, tf.reshape(out, [beam_size * batch_size, 1, 1]), it) - if mem_size > 0: - mem = tf.concat(axis=2, values=[mem] * height) - dec_write = place_at14(dec_write, mem, it_incr) - return (step, dec_write, out_write, mloss + mem_loss, nupd_in + nupd, - out_idx, beam_cost) - - # Main model construction. - gpu_outputs = [] - gpu_losses = [] - gpu_grad_norms = [] - grads_list = [] - gpu_out_idx = [] - self.after_enc_step = [] - for gpu in xrange(num_gpus): # Multi-GPU towers, average gradients later. - length = self.length_tensor - length_float = tf.cast(length, tf.float32) - if gpu > 0: - tf.get_variable_scope().reuse_variables() - gpu_outputs.append([]) - gpu_losses.append([]) - gpu_grad_norms.append([]) - with tf.name_scope("gpu%d" % gpu), tf.device("/gpu:%d" % gpu): - # Main graph creation loop. - data.print_out("Creating model.") - start_time = time.time() - - # Embed inputs and calculate mask. - with tf.device("/cpu:0"): - tgt_shape = tf.shape(tf.squeeze(gpu_target[gpu], [1])) - weights = tf.where(tf.squeeze(gpu_target[gpu], [1]) > 0, - tf.ones(tgt_shape), tf.zeros(tgt_shape)) - - # Embed inputs and targets. - with tf.control_dependencies([e0]): - start = tf.gather(emb_weights, gpu_input[gpu]) # b x h x l x nmaps - gpu_targets_tn = gpu_target[gpu] # b x 1 x len - if beam_size > 0: - embedded_targets_tn = tf.gather(target_emb_weights, - gpu_targets_tn) - embedded_targets_tn = tf.transpose( - embedded_targets_tn, [2, 0, 1, 3]) # len x b x 1 x nmaps - embedded_targets_tn = tf.concat(axis=2, values=[embedded_targets_tn] * height) - - # First image comes from start by applying convolution and adding 0s. - start = tf.transpose(start, [0, 2, 1, 3]) # Now b x len x h x vec_s - first = conv_linear(start, 1, 1, vec_size, nmaps, 1, True, 0.0, "input") - first = layer_norm(first, nmaps, "input") - - # Computation steps. - keep_prob = dropout * 3.0 / tf.sqrt(length_float) - keep_prob = 1.0 - self.do_training * keep_prob - act_noise_scale = act_noise * self.do_training - - # Start with a convolutional gate merging previous step. - step = conv_gru([gpu_prev_step[gpu]], first, - kw, kh, nmaps, 1, cutoff, "first", do_layer_norm) - - # This is just for running a baseline RNN seq2seq model. - if do_rnn: - self.after_enc_step.append(step) # Not meaningful here, but needed. - def lstm_cell(): - return tf.contrib.rnn.BasicLSTMCell(height * nmaps) - cell = tf.contrib.rnn.MultiRNNCell( - [lstm_cell() for _ in range(nconvs)]) - with tf.variable_scope("encoder"): - encoder_outputs, encoder_state = tf.nn.dynamic_rnn( - cell, tf.reshape(step, [batch_size, length, height * nmaps]), - dtype=tf.float32, time_major=False) - - # Attention. - attn = tf.layers.dense( - encoder_outputs, height * nmaps, name="attn1") - - # pylint: disable=cell-var-from-loop - @function.Defun(noinline=True) - def attention_query(query, attn_v): - vecs = tf.tanh(attn + tf.expand_dims(query, 1)) - mask = tf.reduce_sum(vecs * tf.reshape(attn_v, [1, 1, -1]), 2) - mask = tf.nn.softmax(mask) - return tf.reduce_sum(encoder_outputs * tf.expand_dims(mask, 2), 1) - - with tf.variable_scope("decoder"): - def decoder_loop_fn(state__prev_cell_out__unused, cell_inp__cur_tgt): - """Decoder loop function.""" - state, prev_cell_out, _ = state__prev_cell_out__unused - cell_inp, cur_tgt = cell_inp__cur_tgt - attn_q = tf.layers.dense(prev_cell_out, height * nmaps, - name="attn_query") - attn_res = attention_query(attn_q, tf.get_variable( - "attn_v", [height * nmaps], - initializer=tf.random_uniform_initializer(-0.1, 0.1))) - concatenated = tf.reshape(tf.concat(axis=1, values=[cell_inp, attn_res]), - [batch_size, 2 * height * nmaps]) - cell_inp = tf.layers.dense( - concatenated, height * nmaps, name="attn_merge") - output, new_state = cell(cell_inp, state) - - mem_loss = 0.0 - if mem_size > 0: - res, mask, mem_loss = memory_call( - output, cur_tgt, height * nmaps, mem_size, noclass, - num_gpus, self.update_mem) - res = tf.gather(target_emb_weights, res) - res *= tf.expand_dims(mask[:, 0], 1) - output = tf.layers.dense( - tf.concat(axis=1, values=[output, res]), height * nmaps, name="rnnmem") - - return new_state, output, mem_loss - # pylint: enable=cell-var-from-loop - gpu_targets = tf.squeeze(gpu_target[gpu], [1]) # b x len - gpu_tgt_trans = tf.transpose(gpu_targets, [1, 0]) - dec_zero = tf.zeros([batch_size, 1], dtype=tf.int32) - dec_inp = tf.concat(axis=1, values=[dec_zero, gpu_targets]) - dec_inp = dec_inp[:, :length] - embedded_dec_inp = tf.gather(target_emb_weights, dec_inp) - embedded_dec_inp_proj = tf.layers.dense( - embedded_dec_inp, height * nmaps, name="dec_proj") - embedded_dec_inp_proj = tf.transpose(embedded_dec_inp_proj, - [1, 0, 2]) - init_vals = (encoder_state, - tf.zeros([batch_size, height * nmaps]), 0.0) - _, dec_outputs, mem_losses = tf.scan( - decoder_loop_fn, (embedded_dec_inp_proj, gpu_tgt_trans), - initializer=init_vals) - mem_loss = tf.reduce_mean(mem_losses) - outputs = tf.layers.dense(dec_outputs, nmaps, name="out_proj") - # Final convolution to get logits, list outputs. - outputs = tf.matmul(tf.reshape(outputs, [-1, nmaps]), output_w) - outputs = tf.reshape(outputs, [length, batch_size, noclass]) - gpu_out_idx.append(tf.argmax(outputs, 2)) - else: # Here we go with the Neural GPU. - # Encoder. - enc_length = length - step = enc_step(step) # First step hard-coded. - # pylint: disable=cell-var-from-loop - i = tf.constant(1) - c = lambda i, _s: tf.less(i, enc_length) - def enc_step_lambda(i, step): - with tf.variable_scope(tf.get_variable_scope(), reuse=True): - new_step = enc_step(step) - return (i + 1, new_step) - _, step = tf.while_loop( - c, enc_step_lambda, [i, step], - parallel_iterations=1, swap_memory=True) - # pylint: enable=cell-var-from-loop - - self.after_enc_step.append(step) - - # Decoder. - if beam_size > 0: - output_ta = tf.TensorArray( - dtype=tf.float32, size=length, dynamic_size=False, - infer_shape=False, name="outputs") - out_idx = tf.zeros([beam_size * batch_size, length, 1], - dtype=tf.int32) - decided_t = tf.zeros([beam_size * batch_size, length, - height, vec_size]) - - # Prepare for beam search. - tgts = tf.concat(axis=1, values=[embedded_targets_tn] * beam_size) - beam_cost = tf.zeros([batch_size, beam_size]) - step = tf.concat(axis=0, values=[step] * beam_size) - # First step hard-coded. - step, decided_t, output_ta, mem_loss, nupd, oi, bc = dec_step( - step, 0, 0, decided_t, output_ta, tgts, 0.0, 0, out_idx, - beam_cost) - tf.get_variable_scope().reuse_variables() - # pylint: disable=cell-var-from-loop - def step_lambda(i, step, dec_t, out_ta, ml, nu, oi, bc): - with tf.variable_scope(tf.get_variable_scope(), reuse=True): - s, d, t, nml, nu, oi, bc = dec_step( - step, i, 1, dec_t, out_ta, tgts, ml, nu, oi, bc) - return (i + 1, s, d, t, nml, nu, oi, bc) - i = tf.constant(1) - c = lambda i, _s, _d, _o, _ml, _nu, _oi, _bc: tf.less(i, length) - _, step, _, output_ta, mem_loss, nupd, out_idx, _ = tf.while_loop( - c, step_lambda, - [i, step, decided_t, output_ta, mem_loss, nupd, oi, bc], - parallel_iterations=1, swap_memory=True) - # pylint: enable=cell-var-from-loop - gpu_out_idx.append(tf.squeeze(out_idx, [2])) - outputs = output_ta.stack() - outputs = tf.squeeze(outputs, [2, 3]) # Now l x b x nmaps - else: - # If beam_size is 0 or less, we don't have a decoder. - mem_loss = 0.0 - outputs = tf.transpose(step[:, :, 1, :], [1, 0, 2]) - gpu_out_idx.append(tf.argmax(outputs, 2)) - - # Final convolution to get logits, list outputs. - outputs = tf.matmul(tf.reshape(outputs, [-1, nmaps]), output_w) - outputs = tf.reshape(outputs, [length, batch_size, noclass]) - gpu_outputs[gpu] = tf.nn.softmax(outputs) - - # Calculate cross-entropy loss and normalize it. - targets_soft = make_dense(tf.squeeze(gpu_target[gpu], [1]), - noclass, 0.1) - targets_soft = tf.reshape(targets_soft, [-1, noclass]) - targets_hard = make_dense(tf.squeeze(gpu_target[gpu], [1]), - noclass, 0.0) - targets_hard = tf.reshape(targets_hard, [-1, noclass]) - output = tf.transpose(outputs, [1, 0, 2]) - xent_soft = tf.reshape(tf.nn.softmax_cross_entropy_with_logits( - logits=tf.reshape(output, [-1, noclass]), labels=targets_soft), - [batch_size, length]) - xent_hard = tf.reshape(tf.nn.softmax_cross_entropy_with_logits( - logits=tf.reshape(output, [-1, noclass]), labels=targets_hard), - [batch_size, length]) - low, high = 0.1 / float(noclass - 1), 0.9 - const = high * tf.log(high) + float(noclass - 1) * low * tf.log(low) - weight_sum = tf.reduce_sum(weights) + 1e-20 - true_perp = tf.reduce_sum(xent_hard * weights) / weight_sum - soft_loss = tf.reduce_sum(xent_soft * weights) / weight_sum - perp_loss = soft_loss + const - # Final loss: cross-entropy + shared parameter relaxation part + extra. - mem_loss = 0.5 * tf.reduce_mean(mem_loss) / length_float - total_loss = perp_loss + mem_loss - gpu_losses[gpu].append(true_perp) - - # Gradients. - if backward: - data.print_out("Creating backward pass for the model.") - grads = tf.gradients( - total_loss, tf.trainable_variables(), - colocate_gradients_with_ops=True) - for g_i, g in enumerate(grads): - if isinstance(g, tf.IndexedSlices): - grads[g_i] = tf.convert_to_tensor(g) - grads, norm = tf.clip_by_global_norm(grads, max_grad_norm) - gpu_grad_norms[gpu].append(norm) - for g in grads: - if grad_noise_scale > 0.001: - g += tf.truncated_normal(tf.shape(g)) * self.noise_param - grads_list.append(grads) - else: - gpu_grad_norms[gpu].append(0.0) - data.print_out("Created model for gpu %d in %.2f s." - % (gpu, time.time() - start_time)) - - self.updates = [] - self.after_enc_step = tf.concat(axis=0, values=self.after_enc_step) # Concat GPUs. - if backward: - tf.get_variable_scope()._reuse = False - tf.get_variable_scope().set_caching_device(None) - grads = [gpu_avg([grads_list[g][i] for g in xrange(num_gpus)]) - for i in xrange(len(grads_list[0]))] - update = adam_update(grads) - self.updates.append(update) - else: - self.updates.append(tf.no_op()) - - self.losses = [gpu_avg([gpu_losses[g][i] for g in xrange(num_gpus)]) - for i in xrange(len(gpu_losses[0]))] - self.out_idx = tf.concat(axis=0, values=gpu_out_idx) - self.grad_norms = [gpu_avg([gpu_grad_norms[g][i] for g in xrange(num_gpus)]) - for i in xrange(len(gpu_grad_norms[0]))] - self.outputs = [tf.concat(axis=1, values=[gpu_outputs[g] for g in xrange(num_gpus)])] - self.quantize_op = quantize_weights_op(512, 8) - if backward: - self.saver = tf.train.Saver(tf.global_variables(), max_to_keep=10) - - def step(self, sess, inp, target, do_backward_in, noise_param=None, - beam_size=2, eos_id=2, eos_cost=0.0, update_mem=None, state=None): - """Run a step of the network.""" - batch_size, height, length = inp.shape[0], inp.shape[1], inp.shape[2] - do_backward = do_backward_in - train_mode = True - if do_backward_in is None: - do_backward = False - train_mode = False - if update_mem is None: - update_mem = do_backward - feed_in = {} - # print " feeding sequences of length %d" % length - if state is None: - state = np.zeros([batch_size, length, height, self.nmaps]) - feed_in[self.prev_step.name] = state - feed_in[self.length_tensor.name] = length - feed_in[self.noise_param.name] = noise_param if noise_param else 0.0 - feed_in[self.do_training.name] = 1.0 if do_backward else 0.0 - feed_in[self.update_mem.name] = 1 if update_mem else 0 - if do_backward_in is False: - feed_in[self.sampling.name] = 0.0 - index = 0 # We're dynamic now. - feed_out = [] - if do_backward: - feed_out.append(self.updates[index]) - feed_out.append(self.grad_norms[index]) - if train_mode: - feed_out.append(self.losses[index]) - feed_in[self.input.name] = inp - feed_in[self.target.name] = target - feed_out.append(self.outputs[index]) - if train_mode: - # Make a full-sequence training step with one call to session.run. - res = sess.run([self.after_enc_step] + feed_out, feed_in) - after_enc_state, res = res[0], res[1:] - else: - # Make a full-sequence decoding step with one call to session.run. - feed_in[self.sampling.name] = 1.1 # Sample every time. - res = sess.run([self.after_enc_step, self.out_idx] + feed_out, feed_in) - after_enc_state, out_idx = res[0], res[1] - res = [res[2][l] for l in xrange(length)] - outputs = [out_idx[:, i] for i in xrange(length)] - cost = [0.0 for _ in xrange(beam_size * batch_size)] - seen_eos = [0 for _ in xrange(beam_size * batch_size)] - for idx, logit in enumerate(res): - best = outputs[idx] - for b in xrange(batch_size): - if seen_eos[b] > 1: - cost[b] -= eos_cost - else: - cost[b] += np.log(logit[b][best[b]]) - if best[b] in [eos_id]: - seen_eos[b] += 1 - res = [[-c for c in cost]] + outputs - # Collect and output results. - offset = 0 - norm = None - if do_backward: - offset = 2 - norm = res[1] - if train_mode: - outputs = res[offset + 1] - outputs = [outputs[l] for l in xrange(length)] - return res[offset], outputs, norm, after_enc_state diff --git a/research/neural_gpu/neural_gpu_trainer.py b/research/neural_gpu/neural_gpu_trainer.py deleted file mode 100644 index 1f704b0da..000000000 --- a/research/neural_gpu/neural_gpu_trainer.py +++ /dev/null @@ -1,1027 +0,0 @@ -# Copyright 2015 Google Inc. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""Neural GPU.""" - -from __future__ import print_function - -import math -import os -import random -import sys -import threading -import time - -import numpy as np -from six.moves import xrange -import tensorflow as tf - -import program_utils -import data_utils as data -import neural_gpu as ngpu -import wmt_utils as wmt - -tf.app.flags.DEFINE_float("lr", 0.1, "Learning rate.") -tf.app.flags.DEFINE_float("init_weight", 0.8, "Initial weights deviation.") -tf.app.flags.DEFINE_float("max_grad_norm", 4.0, "Clip gradients to this norm.") -tf.app.flags.DEFINE_float("cutoff", 1.2, "Cutoff at the gates.") -tf.app.flags.DEFINE_float("curriculum_ppx", 9.9, "Move curriculum if ppl < X.") -tf.app.flags.DEFINE_float("curriculum_seq", 0.3, "Move curriculum if seq < X.") -tf.app.flags.DEFINE_float("dropout", 0.1, "Dropout that much.") -tf.app.flags.DEFINE_float("grad_noise_scale", 0.0, "Gradient noise scale.") -tf.app.flags.DEFINE_float("max_sampling_rate", 0.1, "Maximal sampling rate.") -tf.app.flags.DEFINE_float("length_norm", 0.0, "Length normalization.") -tf.app.flags.DEFINE_float("train_beam_freq", 0.0, "Beam-based training.") -tf.app.flags.DEFINE_float("train_beam_anneal", 20000, "How many steps anneal.") -tf.app.flags.DEFINE_integer("eval_beam_steps", 4, "How many beam steps eval.") -tf.app.flags.DEFINE_integer("batch_size", 32, "Batch size.") -tf.app.flags.DEFINE_integer("steps_per_checkpoint", 100, "Steps per epoch.") -tf.app.flags.DEFINE_integer("nmaps", 64, "Number of floats in each cell.") -tf.app.flags.DEFINE_integer("vec_size", 64, "Size of word vectors.") -tf.app.flags.DEFINE_integer("train_data_size", 1000, "Training examples/len.") -tf.app.flags.DEFINE_integer("max_length", 40, "Maximum length.") -tf.app.flags.DEFINE_integer("random_seed", 125459, "Random seed.") -tf.app.flags.DEFINE_integer("nconvs", 2, "How many convolutions / 1 step.") -tf.app.flags.DEFINE_integer("kw", 3, "Kernel width.") -tf.app.flags.DEFINE_integer("kh", 3, "Kernel height.") -tf.app.flags.DEFINE_integer("height", 4, "Height.") -tf.app.flags.DEFINE_integer("mem_size", -1, "Memory size (sqrt)") -tf.app.flags.DEFINE_integer("soft_mem_size", 1024, "Softmax memory this size.") -tf.app.flags.DEFINE_integer("num_gpus", 1, "Number of GPUs to use.") -tf.app.flags.DEFINE_integer("num_replicas", 1, "Number of replicas in use.") -tf.app.flags.DEFINE_integer("beam_size", 1, "Beam size during decoding. " - "If 0, no decoder, the non-extended Neural GPU.") -tf.app.flags.DEFINE_integer("max_target_vocab", 0, - "Maximal size of target vocabulary.") -tf.app.flags.DEFINE_integer("decode_offset", 0, "Offset for decoding.") -tf.app.flags.DEFINE_integer("task", -1, "Task id when running on borg.") -tf.app.flags.DEFINE_integer("nprint", 0, "How many test examples to print out.") -tf.app.flags.DEFINE_integer("eval_bin_print", 3, "How many bins step in eval.") -tf.app.flags.DEFINE_integer("mode", 0, "Mode: 0-train other-decode.") -tf.app.flags.DEFINE_bool("atrous", False, "Whether to use atrous convs.") -tf.app.flags.DEFINE_bool("layer_norm", False, "Do layer normalization.") -tf.app.flags.DEFINE_bool("quantize", False, "Whether to quantize variables.") -tf.app.flags.DEFINE_bool("do_train", True, "If false, only update memory.") -tf.app.flags.DEFINE_bool("rnn_baseline", False, "If true build an RNN instead.") -tf.app.flags.DEFINE_bool("simple_tokenizer", False, - "If true, tokenize on spaces only, digits are 0.") -tf.app.flags.DEFINE_bool("normalize_digits", True, - "Whether to normalize digits with simple tokenizer.") -tf.app.flags.DEFINE_integer("vocab_size", 16, "Joint vocabulary size.") -tf.app.flags.DEFINE_string("data_dir", "/tmp", "Data directory") -tf.app.flags.DEFINE_string("train_dir", "/tmp/", "Directory to store models.") -tf.app.flags.DEFINE_string("test_file_prefix", "", "Files to test (.en,.fr).") -tf.app.flags.DEFINE_integer("max_train_data_size", 0, - "Limit on the size of training data (0: no limit).") -tf.app.flags.DEFINE_string("word_vector_file_en", "", - "Optional file with word vectors to start training.") -tf.app.flags.DEFINE_string("word_vector_file_fr", "", - "Optional file with word vectors to start training.") -tf.app.flags.DEFINE_string("problem", "wmt", "What problem are we solving?.") - -tf.app.flags.DEFINE_integer("ps_tasks", 0, "Number of ps tasks used.") -tf.app.flags.DEFINE_string("master", "", "Name of the TensorFlow master.") - -FLAGS = tf.app.flags.FLAGS -EXTRA_EVAL = 10 -EVAL_LEN_INCR = 8 -MAXLEN_F = 2.0 - - -def zero_split(tok_list, append=None): - """Split tok_list (list of ints) on 0s, append int to all parts if given.""" - res, cur, l = [], [], 0 - for tok in tok_list: - if tok == 0: - if append is not None: - cur.append(append) - res.append(cur) - l = max(l, len(cur)) - cur = [] - else: - cur.append(tok) - if append is not None: - cur.append(append) - res.append(cur) - l = max(l, len(cur)) - return res, l - - -def read_data(source_path, target_path, buckets, max_size=None, print_out=True): - """Read data from source and target files and put into buckets. - - Args: - source_path: path to the files with token-ids for the source language. - target_path: path to the file with token-ids for the target language; - it must be aligned with the source file: n-th line contains the desired - output for n-th line from the source_path. - buckets: the buckets to use. - max_size: maximum number of lines to read, all other will be ignored; - if 0 or None, data files will be read completely (no limit). - If set to 1, no data will be returned (empty lists of the right form). - print_out: whether to print out status or not. - - Returns: - data_set: a list of length len(_buckets); data_set[n] contains a list of - (source, target) pairs read from the provided data files that fit - into the n-th bucket, i.e., such that len(source) < _buckets[n][0] and - len(target) < _buckets[n][1]; source and target are lists of token-ids. - """ - data_set = [[] for _ in buckets] - counter = 0 - if max_size != 1: - with tf.gfile.GFile(source_path, mode="r") as source_file: - with tf.gfile.GFile(target_path, mode="r") as target_file: - source, target = source_file.readline(), target_file.readline() - while source and target and (not max_size or counter < max_size): - counter += 1 - if counter % 100000 == 0 and print_out: - print(" reading data line %d" % counter) - sys.stdout.flush() - source_ids = [int(x) for x in source.split()] - target_ids = [int(x) for x in target.split()] - source_ids, source_len = zero_split(source_ids) - target_ids, target_len = zero_split(target_ids, append=wmt.EOS_ID) - for bucket_id, size in enumerate(buckets): - if source_len <= size and target_len <= size: - data_set[bucket_id].append([source_ids, target_ids]) - break - source, target = source_file.readline(), target_file.readline() - return data_set - - -global_train_set = {"wmt": []} -train_buckets_scale = {"wmt": []} - - -def calculate_buckets_scale(data_set, buckets, problem): - """Calculate buckets scales for the given data set.""" - train_bucket_sizes = [len(data_set[b]) for b in xrange(len(buckets))] - train_total_size = max(1, float(sum(train_bucket_sizes))) - - # A bucket scale is a list of increasing numbers from 0 to 1 that we'll use - # to select a bucket. Length of [scale[i], scale[i+1]] is proportional to - # the size if i-th training bucket, as used later. - if problem not in train_buckets_scale: - train_buckets_scale[problem] = [] - train_buckets_scale[problem].append( - [sum(train_bucket_sizes[:i + 1]) / train_total_size - for i in xrange(len(train_bucket_sizes))]) - return train_total_size - - -def read_data_into_global(source_path, target_path, buckets, - max_size=None, print_out=True): - """Read data into the global variables (can be in a separate thread).""" - # pylint: disable=global-variable-not-assigned - global global_train_set, train_buckets_scale - # pylint: enable=global-variable-not-assigned - data_set = read_data(source_path, target_path, buckets, max_size, print_out) - global_train_set["wmt"].append(data_set) - train_total_size = calculate_buckets_scale(data_set, buckets, "wmt") - if print_out: - print(" Finished global data reading (%d)." % train_total_size) - - -def initialize(sess=None): - """Initialize data and model.""" - global MAXLEN_F - # Create training directory if it does not exist. - if not tf.gfile.IsDirectory(FLAGS.train_dir): - data.print_out("Creating training directory %s." % FLAGS.train_dir) - tf.gfile.MkDir(FLAGS.train_dir) - decode_suffix = "beam%dln%d" % (FLAGS.beam_size, - int(100 * FLAGS.length_norm)) - if FLAGS.mode == 0: - decode_suffix = "" - if FLAGS.task >= 0: - data.log_filename = os.path.join(FLAGS.train_dir, - "log%d%s" % (FLAGS.task, decode_suffix)) - else: - data.log_filename = os.path.join(FLAGS.train_dir, "neural_gpu/log") - - # Set random seed. - if FLAGS.random_seed > 0: - seed = FLAGS.random_seed + max(0, FLAGS.task) - tf.set_random_seed(seed) - random.seed(seed) - np.random.seed(seed) - - # Check data sizes. - assert data.bins - max_length = min(FLAGS.max_length, data.bins[-1]) - while len(data.bins) > 1 and data.bins[-2] >= max_length + EXTRA_EVAL: - data.bins = data.bins[:-1] - if sess is None and FLAGS.task == 0 and FLAGS.num_replicas > 1: - if max_length > 60: - max_length = max_length * 1 / 2 # Save memory on chief. - min_length = min(14, max_length - 3) if FLAGS.problem == "wmt" else 3 - for p in FLAGS.problem.split("-"): - if p in ["progeval", "progsynth"]: - min_length = max(26, min_length) - assert max_length + 1 > min_length - while len(data.bins) > 1 and data.bins[-2] >= max_length + EXTRA_EVAL: - data.bins = data.bins[:-1] - - # Create checkpoint directory if it does not exist. - if FLAGS.mode == 0 or FLAGS.task < 0: - checkpoint_dir = os.path.join(FLAGS.train_dir, "neural_gpu%s" - % ("" if FLAGS.task < 0 else str(FLAGS.task))) - else: - checkpoint_dir = FLAGS.train_dir - if not tf.gfile.IsDirectory(checkpoint_dir): - data.print_out("Creating checkpoint directory %s." % checkpoint_dir) - tf.gfile.MkDir(checkpoint_dir) - - # Prepare data. - if FLAGS.problem == "wmt": - # Prepare WMT data. - data.print_out("Preparing WMT data in %s" % FLAGS.data_dir) - if FLAGS.simple_tokenizer: - MAXLEN_F = 3.5 - (en_train, fr_train, en_dev, fr_dev, - en_path, fr_path) = wmt.prepare_wmt_data( - FLAGS.data_dir, FLAGS.vocab_size, - tokenizer=wmt.space_tokenizer, - normalize_digits=FLAGS.normalize_digits) - else: - (en_train, fr_train, en_dev, fr_dev, - en_path, fr_path) = wmt.prepare_wmt_data( - FLAGS.data_dir, FLAGS.vocab_size) - - # Read data into buckets and compute their sizes. - fr_vocab, rev_fr_vocab = wmt.initialize_vocabulary(fr_path) - data.vocab = fr_vocab - data.rev_vocab = rev_fr_vocab - data.print_out("Reading development and training data (limit: %d)." - % FLAGS.max_train_data_size) - dev_set = {} - dev_set["wmt"] = read_data(en_dev, fr_dev, data.bins) - def data_read(size, print_out): - read_data_into_global(en_train, fr_train, data.bins, size, print_out) - data_read(50000, False) - read_thread_small = threading.Thread( - name="reading-data-small", target=lambda: data_read(900000, False)) - read_thread_small.start() - read_thread_full = threading.Thread( - name="reading-data-full", - target=lambda: data_read(FLAGS.max_train_data_size, True)) - read_thread_full.start() - data.print_out("Data reading set up.") - else: - # Prepare algorithmic data. - en_path, fr_path = None, None - tasks = FLAGS.problem.split("-") - data_size = FLAGS.train_data_size - for t in tasks: - data.print_out("Generating data for %s." % t) - if t in ["progeval", "progsynth"]: - data.init_data(t, data.bins[-1], 20 * data_size, FLAGS.vocab_size) - if len(program_utils.prog_vocab) > FLAGS.vocab_size - 2: - raise ValueError("Increase vocab_size to %d for prog-tasks." - % (len(program_utils.prog_vocab) + 2)) - data.rev_vocab = program_utils.prog_vocab - data.vocab = program_utils.prog_rev_vocab - else: - for l in xrange(max_length + EXTRA_EVAL - 1): - data.init_data(t, l, data_size, FLAGS.vocab_size) - data.init_data(t, data.bins[-2], data_size, FLAGS.vocab_size) - data.init_data(t, data.bins[-1], data_size, FLAGS.vocab_size) - if t not in global_train_set: - global_train_set[t] = [] - global_train_set[t].append(data.train_set[t]) - calculate_buckets_scale(data.train_set[t], data.bins, t) - dev_set = data.test_set - - # Grid-search parameters. - lr = FLAGS.lr - init_weight = FLAGS.init_weight - max_grad_norm = FLAGS.max_grad_norm - if sess is not None and FLAGS.task > -1: - def job_id_factor(step): - """If jobid / step mod 3 is 0, 1, 2: say 0, 1, -1.""" - return ((((FLAGS.task / step) % 3) + 1) % 3) - 1 - lr *= math.pow(2, job_id_factor(1)) - init_weight *= math.pow(1.5, job_id_factor(3)) - max_grad_norm *= math.pow(2, job_id_factor(9)) - - # Print out parameters. - curriculum = FLAGS.curriculum_seq - msg1 = ("layers %d kw %d h %d kh %d batch %d noise %.2f" - % (FLAGS.nconvs, FLAGS.kw, FLAGS.height, FLAGS.kh, - FLAGS.batch_size, FLAGS.grad_noise_scale)) - msg2 = ("cut %.2f lr %.3f iw %.2f cr %.2f nm %d d%.4f gn %.2f %s" - % (FLAGS.cutoff, lr, init_weight, curriculum, FLAGS.nmaps, - FLAGS.dropout, max_grad_norm, msg1)) - data.print_out(msg2) - - # Create model and initialize it. - tf.get_variable_scope().set_initializer( - tf.orthogonal_initializer(gain=1.8 * init_weight)) - max_sampling_rate = FLAGS.max_sampling_rate if FLAGS.mode == 0 else 0.0 - o = FLAGS.vocab_size if FLAGS.max_target_vocab < 1 else FLAGS.max_target_vocab - ngpu.CHOOSE_K = FLAGS.soft_mem_size - do_beam_model = FLAGS.train_beam_freq > 0.0001 and FLAGS.beam_size > 1 - beam_size = FLAGS.beam_size if FLAGS.mode > 0 and not do_beam_model else 1 - beam_size = min(beam_size, FLAGS.beam_size) - beam_model = None - def make_ngpu(cur_beam_size, back): - return ngpu.NeuralGPU( - FLAGS.nmaps, FLAGS.vec_size, FLAGS.vocab_size, o, - FLAGS.dropout, max_grad_norm, FLAGS.cutoff, FLAGS.nconvs, - FLAGS.kw, FLAGS.kh, FLAGS.height, FLAGS.mem_size, - lr / math.sqrt(FLAGS.num_replicas), min_length + 3, FLAGS.num_gpus, - FLAGS.num_replicas, FLAGS.grad_noise_scale, max_sampling_rate, - atrous=FLAGS.atrous, do_rnn=FLAGS.rnn_baseline, - do_layer_norm=FLAGS.layer_norm, beam_size=cur_beam_size, backward=back) - if sess is None: - with tf.device(tf.train.replica_device_setter(FLAGS.ps_tasks)): - model = make_ngpu(beam_size, True) - if do_beam_model: - tf.get_variable_scope().reuse_variables() - beam_model = make_ngpu(FLAGS.beam_size, False) - else: - model = make_ngpu(beam_size, True) - if do_beam_model: - tf.get_variable_scope().reuse_variables() - beam_model = make_ngpu(FLAGS.beam_size, False) - - sv = None - if sess is None: - # The supervisor configuration has a few overriden options. - sv = tf.train.Supervisor(logdir=checkpoint_dir, - is_chief=(FLAGS.task < 1), - saver=model.saver, - summary_op=None, - save_summaries_secs=60, - save_model_secs=15 * 60, - global_step=model.global_step) - - config = tf.ConfigProto(allow_soft_placement=True) - sess = sv.PrepareSession(FLAGS.master, config=config) - - data.print_out("Created model. Checkpoint dir %s" % checkpoint_dir) - - # Load model from parameters if a checkpoint exists. - ckpt = tf.train.get_checkpoint_state(checkpoint_dir) - if ckpt and tf.gfile.Exists(ckpt.model_checkpoint_path + ".index"): - data.print_out("Reading model parameters from %s" - % ckpt.model_checkpoint_path) - model.saver.restore(sess, ckpt.model_checkpoint_path) - elif sv is None: - sess.run(tf.global_variables_initializer()) - data.print_out("Initialized variables (no supervisor mode).") - elif FLAGS.task < 1 and FLAGS.mem_size > 0: - # sess.run(model.mem_norm_op) - data.print_out("Created new model and normalized mem (on chief).") - - # Return the model and needed variables. - return (model, beam_model, min_length, max_length, checkpoint_dir, - (global_train_set, dev_set, en_path, fr_path), sv, sess) - - -def m_step(model, beam_model, sess, batch_size, inp, target, bucket, nsteps, p): - """Evaluation multi-step for program synthesis.""" - state, scores, hist = None, [[-11.0 for _ in xrange(batch_size)]], [] - for _ in xrange(nsteps): - # Get the best beam (no training, just forward model). - new_target, new_first, new_inp, new_scores = get_best_beam( - beam_model, sess, inp, target, - batch_size, FLAGS.beam_size, bucket, hist, p, test_mode=True) - hist.append(new_first) - _, _, _, state = model.step(sess, inp, new_target, False, state=state) - inp = new_inp - scores.append([max(scores[-1][i], new_scores[i]) - for i in xrange(batch_size)]) - # The final step with the true target. - loss, res, _, _ = model.step(sess, inp, target, False, state=state) - return loss, res, new_target, scores[1:] - - -def single_test(bin_id, model, sess, nprint, batch_size, dev, p, print_out=True, - offset=None, beam_model=None): - """Test model on test data of length l using the given session.""" - if not dev[p][bin_id]: - data.print_out(" bin %d (%d)\t%s\tppl NA errors NA seq-errors NA" - % (bin_id, data.bins[bin_id], p)) - return 1.0, 1.0, 0.0 - inpt, target = data.get_batch( - bin_id, batch_size, dev[p], FLAGS.height, offset) - if FLAGS.beam_size > 1 and beam_model: - loss, res, new_tgt, scores = m_step( - model, beam_model, sess, batch_size, inpt, target, bin_id, - FLAGS.eval_beam_steps, p) - score_avgs = [sum(s) / float(len(s)) for s in scores] - score_maxs = [max(s) for s in scores] - score_str = ["(%.2f, %.2f)" % (score_avgs[i], score_maxs[i]) - for i in xrange(FLAGS.eval_beam_steps)] - data.print_out(" == scores (avg, max): %s" % "; ".join(score_str)) - errors, total, seq_err = data.accuracy(inpt, res, target, batch_size, - nprint, new_tgt, scores[-1]) - else: - loss, res, _, _ = model.step(sess, inpt, target, False) - errors, total, seq_err = data.accuracy(inpt, res, target, batch_size, - nprint) - seq_err = float(seq_err) / batch_size - if total > 0: - errors = float(errors) / total - if print_out: - data.print_out(" bin %d (%d)\t%s\tppl %.2f errors %.2f seq-errors %.2f" - % (bin_id, data.bins[bin_id], p, data.safe_exp(loss), - 100 * errors, 100 * seq_err)) - return (errors, seq_err, loss) - - -def assign_vectors(word_vector_file, embedding_key, vocab_path, sess): - """Assign the embedding_key variable from the given word vectors file.""" - # For words in the word vector file, set their embedding at start. - if not tf.gfile.Exists(word_vector_file): - data.print_out("Word vector file does not exist: %s" % word_vector_file) - sys.exit(1) - vocab, _ = wmt.initialize_vocabulary(vocab_path) - vectors_variable = [v for v in tf.trainable_variables() - if embedding_key == v.name] - if len(vectors_variable) != 1: - data.print_out("Word vector variable not found or too many.") - sys.exit(1) - vectors_variable = vectors_variable[0] - vectors = vectors_variable.eval() - data.print_out("Pre-setting word vectors from %s" % word_vector_file) - with tf.gfile.GFile(word_vector_file, mode="r") as f: - # Lines have format: dog 0.045123 -0.61323 0.413667 ... - for line in f: - line_parts = line.split() - # The first part is the word. - word = line_parts[0] - if word in vocab: - # Remaining parts are components of the vector. - word_vector = np.array(map(float, line_parts[1:])) - if len(word_vector) != FLAGS.vec_size: - data.print_out("Warn: Word '%s', Expecting vector size %d, " - "found %d" % (word, FLAGS.vec_size, - len(word_vector))) - else: - vectors[vocab[word]] = word_vector - # Assign the modified vectors to the vectors_variable in the graph. - sess.run([vectors_variable.initializer], - {vectors_variable.initializer.inputs[1]: vectors}) - - -def print_vectors(embedding_key, vocab_path, word_vector_file): - """Print vectors from the given variable.""" - _, rev_vocab = wmt.initialize_vocabulary(vocab_path) - vectors_variable = [v for v in tf.trainable_variables() - if embedding_key == v.name] - if len(vectors_variable) != 1: - data.print_out("Word vector variable not found or too many.") - sys.exit(1) - vectors_variable = vectors_variable[0] - vectors = vectors_variable.eval() - l, s = vectors.shape[0], vectors.shape[1] - data.print_out("Printing %d word vectors from %s to %s." - % (l, embedding_key, word_vector_file)) - with tf.gfile.GFile(word_vector_file, mode="w") as f: - # Lines have format: dog 0.045123 -0.61323 0.413667 ... - for i in xrange(l): - f.write(rev_vocab[i]) - for j in xrange(s): - f.write(" %.8f" % vectors[i][j]) - f.write("\n") - - -def get_bucket_id(train_buckets_scale_c, max_cur_length, data_set): - """Get a random bucket id.""" - # Choose a bucket according to data distribution. Pick a random number - # in [0, 1] and use the corresponding interval in train_buckets_scale. - random_number_01 = np.random.random_sample() - bucket_id = min([i for i in xrange(len(train_buckets_scale_c)) - if train_buckets_scale_c[i] > random_number_01]) - while bucket_id > 0 and not data_set[bucket_id]: - bucket_id -= 1 - for _ in xrange(10 if np.random.random_sample() < 0.9 else 1): - if data.bins[bucket_id] > max_cur_length: - random_number_01 = min(random_number_01, np.random.random_sample()) - bucket_id = min([i for i in xrange(len(train_buckets_scale_c)) - if train_buckets_scale_c[i] > random_number_01]) - while bucket_id > 0 and not data_set[bucket_id]: - bucket_id -= 1 - return bucket_id - - -def score_beams(beams, target, inp, history, p, - print_out=False, test_mode=False): - """Score beams.""" - if p == "progsynth": - return score_beams_prog(beams, target, inp, history, print_out, test_mode) - elif test_mode: - return beams[0], 10.0 if str(beams[0][:len(target)]) == str(target) else 0.0 - else: - history_s = [str(h) for h in history] - best, best_score, tgt, eos_id = None, -1000.0, target, None - if p == "wmt": - eos_id = wmt.EOS_ID - if eos_id and eos_id in target: - tgt = target[:target.index(eos_id)] - for beam in beams: - if eos_id and eos_id in beam: - beam = beam[:beam.index(eos_id)] - l = min(len(tgt), len(beam)) - score = len([i for i in xrange(l) if tgt[i] == beam[i]]) / float(len(tgt)) - hist_score = 20.0 if str([b for b in beam if b > 0]) in history_s else 0.0 - if score < 1.0: - score -= hist_score - if score > best_score: - best = beam - best_score = score - return best, best_score - - -def score_beams_prog(beams, target, inp, history, print_out=False, - test_mode=False): - """Score beams for program synthesis.""" - tgt_prog = linearize(target, program_utils.prog_vocab, True, 1) - hist_progs = [linearize(h, program_utils.prog_vocab, True, 1) - for h in history] - tgt_set = set(target) - if print_out: - print("target: ", tgt_prog) - inps, tgt_outs = [], [] - for i in xrange(3): - ilist = [inp[i + 1, l] for l in xrange(inp.shape[1])] - clist = [program_utils.prog_vocab[x] for x in ilist if x > 0] - olist = clist[clist.index("]") + 1:] # outputs - clist = clist[1:clist.index("]")] # inputs - inps.append([int(x) for x in clist]) - if olist[0] == "[": # olist may be [int] or just int - tgt_outs.append(str([int(x) for x in olist[1:-1]])) - else: - if len(olist) == 1: - tgt_outs.append(olist[0]) - else: - print([program_utils.prog_vocab[x] for x in ilist if x > 0]) - print(olist) - print(tgt_prog) - print(program_utils.evaluate(tgt_prog, {"a": inps[-1]})) - print("AAAAA") - tgt_outs.append(olist[0]) - if not test_mode: - for _ in xrange(7): - ilen = np.random.randint(len(target) - 3) + 1 - inps.append([random.choice(range(-15, 15)) for _ in range(ilen)]) - tgt_outs.extend([program_utils.evaluate(tgt_prog, {"a": inp}) - for inp in inps[3:]]) - best, best_prog, best_score = None, "", -1000.0 - for beam in beams: - b_prog = linearize(beam, program_utils.prog_vocab, True, 1) - b_set = set(beam) - jsim = len(tgt_set & b_set) / float(len(tgt_set | b_set)) - b_outs = [program_utils.evaluate(b_prog, {"a": inp}) for inp in inps] - errs = len([x for x in b_outs if x == "ERROR"]) - imatches = len([i for i in xrange(3) if b_outs[i] == tgt_outs[i]]) - perfect = 10.0 if imatches == 3 else 0.0 - hist_score = 20.0 if b_prog in hist_progs else 0.0 - if test_mode: - score = perfect - errs - else: - matches = len([i for i in xrange(10) if b_outs[i] == tgt_outs[i]]) - score = perfect + matches + jsim - errs - if score < 10.0: - score -= hist_score - # print b_prog - # print "jsim: ", jsim, " errs: ", errs, " mtchs: ", matches, " s: ", score - if score > best_score: - best = beam - best_prog = b_prog - best_score = score - if print_out: - print("best score: ", best_score, " best prog: ", best_prog) - return best, best_score - - -def get_best_beam(beam_model, sess, inp, target, batch_size, beam_size, - bucket, history, p, test_mode=False): - """Run beam_model, score beams, and return the best as target and in input.""" - _, output_logits, _, _ = beam_model.step( - sess, inp, target, None, beam_size=FLAGS.beam_size) - new_targets, new_firsts, scores, new_inp = [], [], [], np.copy(inp) - for b in xrange(batch_size): - outputs = [] - history_b = [[h[b, 0, l] for l in xrange(data.bins[bucket])] - for h in history] - for beam_idx in xrange(beam_size): - outputs.append([int(o[beam_idx * batch_size + b]) - for o in output_logits]) - target_t = [target[b, 0, l] for l in xrange(data.bins[bucket])] - best, best_score = score_beams( - outputs, [t for t in target_t if t > 0], inp[b, :, :], - [[t for t in h if t > 0] for h in history_b], p, test_mode=test_mode) - scores.append(best_score) - if 1 in best: # Only until _EOS. - best = best[:best.index(1) + 1] - best += [0 for _ in xrange(len(target_t) - len(best))] - new_targets.append([best]) - first, _ = score_beams( - outputs, [t for t in target_t if t > 0], inp[b, :, :], - [[t for t in h if t > 0] for h in history_b], p, test_mode=True) - if 1 in first: # Only until _EOS. - first = first[:first.index(1) + 1] - first += [0 for _ in xrange(len(target_t) - len(first))] - new_inp[b, 0, :] = np.array(first, dtype=np.int32) - new_firsts.append([first]) - # Change target if we found a great answer. - new_target = np.array(new_targets, dtype=np.int32) - for b in xrange(batch_size): - if scores[b] >= 10.0: - target[b, 0, :] = new_target[b, 0, :] - new_first = np.array(new_firsts, dtype=np.int32) - return new_target, new_first, new_inp, scores - - -def train(): - """Train the model.""" - batch_size = FLAGS.batch_size * FLAGS.num_gpus - (model, beam_model, min_length, max_length, checkpoint_dir, - (train_set, dev_set, en_vocab_path, fr_vocab_path), sv, sess) = initialize() - with sess.as_default(): - quant_op = model.quantize_op - max_cur_length = min(min_length + 3, max_length) - prev_acc_perp = [1000000 for _ in xrange(5)] - prev_seq_err = 1.0 - is_chief = FLAGS.task < 1 - do_report = False - - # Main traning loop. - while not sv.ShouldStop(): - global_step, max_cur_length, learning_rate = sess.run( - [model.global_step, model.cur_length, model.lr]) - acc_loss, acc_l1, acc_total, acc_errors, acc_seq_err = 0.0, 0.0, 0, 0, 0 - acc_grad_norm, step_count, step_c1, step_time = 0.0, 0, 0, 0.0 - - # For words in the word vector file, set their embedding at start. - bound1 = FLAGS.steps_per_checkpoint - 1 - if FLAGS.word_vector_file_en and global_step < bound1 and is_chief: - assign_vectors(FLAGS.word_vector_file_en, "embedding:0", - en_vocab_path, sess) - if FLAGS.max_target_vocab < 1: - assign_vectors(FLAGS.word_vector_file_en, "target_embedding:0", - en_vocab_path, sess) - - if FLAGS.word_vector_file_fr and global_step < bound1 and is_chief: - assign_vectors(FLAGS.word_vector_file_fr, "embedding:0", - fr_vocab_path, sess) - if FLAGS.max_target_vocab < 1: - assign_vectors(FLAGS.word_vector_file_fr, "target_embedding:0", - fr_vocab_path, sess) - - for _ in xrange(FLAGS.steps_per_checkpoint): - step_count += 1 - step_c1 += 1 - global_step = int(model.global_step.eval()) - train_beam_anneal = global_step / float(FLAGS.train_beam_anneal) - train_beam_freq = FLAGS.train_beam_freq * min(1.0, train_beam_anneal) - p = random.choice(FLAGS.problem.split("-")) - train_set = global_train_set[p][-1] - bucket_id = get_bucket_id(train_buckets_scale[p][-1], max_cur_length, - train_set) - # Prefer longer stuff 60% of time if not wmt. - if np.random.randint(100) < 60 and FLAGS.problem != "wmt": - bucket1 = get_bucket_id(train_buckets_scale[p][-1], max_cur_length, - train_set) - bucket_id = max(bucket1, bucket_id) - - # Run a step and time it. - start_time = time.time() - inp, target = data.get_batch(bucket_id, batch_size, train_set, - FLAGS.height) - noise_param = math.sqrt(math.pow(global_step + 1, -0.55) * - prev_seq_err) * FLAGS.grad_noise_scale - # In multi-step mode, we use best from beam for middle steps. - state, new_target, scores, history = None, None, None, [] - while (FLAGS.beam_size > 1 and - train_beam_freq > np.random.random_sample()): - # Get the best beam (no training, just forward model). - new_target, new_first, new_inp, scores = get_best_beam( - beam_model, sess, inp, target, - batch_size, FLAGS.beam_size, bucket_id, history, p) - history.append(new_first) - # Training step with the previous input and the best beam as target. - _, _, _, state = model.step(sess, inp, new_target, FLAGS.do_train, - noise_param, update_mem=True, state=state) - # Change input to the new one for the next step. - inp = new_inp - # If all results are great, stop (todo: not to wait for all?). - if FLAGS.nprint > 1: - print(scores) - if sum(scores) / float(len(scores)) >= 10.0: - break - # The final step with the true target. - loss, res, gnorm, _ = model.step( - sess, inp, target, FLAGS.do_train, noise_param, - update_mem=True, state=state) - step_time += time.time() - start_time - acc_grad_norm += 0.0 if gnorm is None else float(gnorm) - - # Accumulate statistics. - acc_loss += loss - acc_l1 += loss - errors, total, seq_err = data.accuracy( - inp, res, target, batch_size, 0, new_target, scores) - if FLAGS.nprint > 1: - print("seq_err: ", seq_err) - acc_total += total - acc_errors += errors - acc_seq_err += seq_err - - # Report summary every 10 steps. - if step_count + 3 > FLAGS.steps_per_checkpoint: - do_report = True # Don't polute plot too early. - if is_chief and step_count % 10 == 1 and do_report: - cur_loss = acc_l1 / float(step_c1) - acc_l1, step_c1 = 0.0, 0 - cur_perp = data.safe_exp(cur_loss) - summary = tf.Summary() - summary.value.extend( - [tf.Summary.Value(tag="log_perplexity", simple_value=cur_loss), - tf.Summary.Value(tag="perplexity", simple_value=cur_perp)]) - sv.SummaryComputed(sess, summary, global_step) - - # Normalize and print out accumulated statistics. - acc_loss /= step_count - step_time /= FLAGS.steps_per_checkpoint - acc_seq_err = float(acc_seq_err) / (step_count * batch_size) - prev_seq_err = max(0.0, acc_seq_err - 0.02) # No noise at error < 2%. - acc_errors = float(acc_errors) / acc_total if acc_total > 0 else 1.0 - t_size = float(sum([len(x) for x in train_set])) / float(1000000) - msg = ("step %d step-time %.2f train-size %.3f lr %.6f grad-norm %.4f" - % (global_step + 1, step_time, t_size, learning_rate, - acc_grad_norm / FLAGS.steps_per_checkpoint)) - data.print_out("%s len %d ppl %.6f errors %.2f sequence-errors %.2f" % - (msg, max_cur_length, data.safe_exp(acc_loss), - 100*acc_errors, 100*acc_seq_err)) - - # If errors are below the curriculum threshold, move curriculum forward. - is_good = FLAGS.curriculum_ppx > data.safe_exp(acc_loss) - is_good = is_good and FLAGS.curriculum_seq > acc_seq_err - if is_good and is_chief: - if FLAGS.quantize: - # Quantize weights. - data.print_out(" Quantizing parameters.") - sess.run([quant_op]) - # Increase current length (until the next with training data). - sess.run(model.cur_length_incr_op) - # Forget last perplexities if we're not yet at the end. - if max_cur_length < max_length: - prev_acc_perp.append(1000000) - - # Lower learning rate if we're worse than the last 5 checkpoints. - acc_perp = data.safe_exp(acc_loss) - if acc_perp > max(prev_acc_perp[-5:]) and is_chief: - sess.run(model.lr_decay_op) - prev_acc_perp.append(acc_perp) - - # Save checkpoint. - if is_chief: - checkpoint_path = os.path.join(checkpoint_dir, "neural_gpu.ckpt") - model.saver.save(sess, checkpoint_path, - global_step=model.global_step) - - # Run evaluation. - bin_bound = 4 - for p in FLAGS.problem.split("-"): - total_loss, total_err, tl_counter = 0.0, 0.0, 0 - for bin_id in xrange(len(data.bins)): - if bin_id < bin_bound or bin_id % FLAGS.eval_bin_print == 1: - err, _, loss = single_test(bin_id, model, sess, FLAGS.nprint, - batch_size * 4, dev_set, p, - beam_model=beam_model) - if loss > 0.0: - total_loss += loss - total_err += err - tl_counter += 1 - test_loss = total_loss / max(1, tl_counter) - test_err = total_err / max(1, tl_counter) - test_perp = data.safe_exp(test_loss) - summary = tf.Summary() - summary.value.extend( - [tf.Summary.Value(tag="test/%s/loss" % p, simple_value=test_loss), - tf.Summary.Value(tag="test/%s/error" % p, simple_value=test_err), - tf.Summary.Value(tag="test/%s/perplexity" % p, - simple_value=test_perp)]) - sv.SummaryComputed(sess, summary, global_step) - - -def linearize(output, rev_fr_vocab, simple_tokenizer=None, eos_id=wmt.EOS_ID): - # If there is an EOS symbol in outputs, cut them at that point (WMT). - if eos_id in output: - output = output[:output.index(eos_id)] - # Print out French sentence corresponding to outputs. - if simple_tokenizer or FLAGS.simple_tokenizer: - vlen = len(rev_fr_vocab) - def vget(o): - if o < vlen: - return rev_fr_vocab[o] - return "UNK" - return " ".join([vget(o) for o in output]) - else: - return wmt.basic_detokenizer([rev_fr_vocab[o] for o in output]) - - -def evaluate(): - """Evaluate an existing model.""" - batch_size = FLAGS.batch_size * FLAGS.num_gpus - with tf.Session(config=tf.ConfigProto(allow_soft_placement=True)) as sess: - (model, beam_model, _, _, _, - (_, dev_set, en_vocab_path, fr_vocab_path), _, sess) = initialize(sess) - for p in FLAGS.problem.split("-"): - for bin_id in xrange(len(data.bins)): - if (FLAGS.task >= 0 and bin_id > 4) or (FLAGS.nprint == 0 and - bin_id > 8 and p == "wmt"): - break - single_test(bin_id, model, sess, FLAGS.nprint, batch_size, dev_set, p, - beam_model=beam_model) - path = FLAGS.test_file_prefix - xid = "" if FLAGS.task < 0 else ("%.4d" % (FLAGS.task+FLAGS.decode_offset)) - en_path, fr_path = path + ".en" + xid, path + ".fr" + xid - # Evaluate the test file if they exist. - if path and tf.gfile.Exists(en_path) and tf.gfile.Exists(fr_path): - data.print_out("Translating test set %s" % en_path) - # Read lines. - en_lines, fr_lines = [], [] - with tf.gfile.GFile(en_path, mode="r") as f: - for line in f: - en_lines.append(line.strip()) - with tf.gfile.GFile(fr_path, mode="r") as f: - for line in f: - fr_lines.append(line.strip()) - # Tokenize and convert to ids. - en_vocab, _ = wmt.initialize_vocabulary(en_vocab_path) - _, rev_fr_vocab = wmt.initialize_vocabulary(fr_vocab_path) - if FLAGS.simple_tokenizer: - en_ids = [wmt.sentence_to_token_ids( - l, en_vocab, tokenizer=wmt.space_tokenizer, - normalize_digits=FLAGS.normalize_digits) - for l in en_lines] - else: - en_ids = [wmt.sentence_to_token_ids(l, en_vocab) for l in en_lines] - # Translate. - results = [] - for idx, token_ids in enumerate(en_ids): - if idx % 5 == 0: - data.print_out("Translating example %d of %d." % (idx, len(en_ids))) - # Which bucket does it belong to? - buckets = [b for b in xrange(len(data.bins)) - if data.bins[b] >= len(token_ids)] - if buckets: - result, result_cost = [], 100000000.0 - for bucket_id in buckets: - if data.bins[bucket_id] > MAXLEN_F * len(token_ids) + EVAL_LEN_INCR: - break - # Get a 1-element batch to feed the sentence to the model. - used_batch_size = 1 # batch_size - inp, target = data.get_batch( - bucket_id, used_batch_size, None, FLAGS.height, - preset=([token_ids], [[]])) - loss, output_logits, _, _ = model.step( - sess, inp, target, None, beam_size=FLAGS.beam_size) - outputs = [int(o[0]) for o in output_logits] - loss = loss[0] - (data.bins[bucket_id] * FLAGS.length_norm) - if FLAGS.simple_tokenizer: - cur_out = outputs - if wmt.EOS_ID in cur_out: - cur_out = cur_out[:cur_out.index(wmt.EOS_ID)] - res_tags = [rev_fr_vocab[o] for o in cur_out] - bad_words, bad_brack = wmt.parse_constraints(token_ids, res_tags) - loss += 1000.0 * bad_words + 100.0 * bad_brack - # print (bucket_id, loss) - if loss < result_cost: - result = outputs - result_cost = loss - final = linearize(result, rev_fr_vocab) - results.append("%s\t%s\n" % (final, fr_lines[idx])) - # print result_cost - sys.stderr.write(results[-1]) - sys.stderr.flush() - else: - sys.stderr.write("TOOO_LONG\t%s\n" % fr_lines[idx]) - sys.stderr.flush() - if xid: - decode_suffix = "beam%dln%dn" % (FLAGS.beam_size, - int(100 * FLAGS.length_norm)) - with tf.gfile.GFile(path + ".res" + decode_suffix + xid, mode="w") as f: - for line in results: - f.write(line) - - -def mul(l): - res = 1.0 - for s in l: - res *= s - return res - - -def interactive(): - """Interactively probe an existing model.""" - with tf.Session(config=tf.ConfigProto(allow_soft_placement=True)) as sess: - # Initialize model. - (model, _, _, _, _, (_, _, en_path, fr_path), _, _) = initialize(sess) - # Load vocabularies. - en_vocab, rev_en_vocab = wmt.initialize_vocabulary(en_path) - _, rev_fr_vocab = wmt.initialize_vocabulary(fr_path) - # Print out vectors and variables. - if FLAGS.nprint > 0 and FLAGS.word_vector_file_en: - print_vectors("embedding:0", en_path, FLAGS.word_vector_file_en) - if FLAGS.nprint > 0 and FLAGS.word_vector_file_fr: - print_vectors("target_embedding:0", fr_path, FLAGS.word_vector_file_fr) - total = 0 - for v in tf.trainable_variables(): - shape = v.get_shape().as_list() - total += mul(shape) - print(v.name, shape, mul(shape)) - print(total) - # Start interactive loop. - sys.stdout.write("Input to Neural GPU Translation Model.\n") - sys.stdout.write("> ") - sys.stdout.flush() - inpt = sys.stdin.readline(), "" - while inpt: - cures = [] - # Get token-ids for the input sentence. - if FLAGS.simple_tokenizer: - token_ids = wmt.sentence_to_token_ids( - inpt, en_vocab, tokenizer=wmt.space_tokenizer, - normalize_digits=FLAGS.normalize_digits) - else: - token_ids = wmt.sentence_to_token_ids(inpt, en_vocab) - print([rev_en_vocab[t] for t in token_ids]) - # Which bucket does it belong to? - buckets = [b for b in xrange(len(data.bins)) - if data.bins[b] >= max(len(token_ids), len(cures))] - if cures: - buckets = [buckets[0]] - if buckets: - result, result_cost = [], 10000000.0 - for bucket_id in buckets: - if data.bins[bucket_id] > MAXLEN_F * len(token_ids) + EVAL_LEN_INCR: - break - glen = 1 - for gen_idx in xrange(glen): - # Get a 1-element batch to feed the sentence to the model. - inp, target = data.get_batch( - bucket_id, 1, None, FLAGS.height, preset=([token_ids], [cures])) - loss, output_logits, _, _ = model.step( - sess, inp, target, None, beam_size=FLAGS.beam_size, - update_mem=False) - # If it is a greedy decoder, outputs are argmaxes of output_logits. - if FLAGS.beam_size > 1: - outputs = [int(o) for o in output_logits] - else: - loss = loss[0] - (data.bins[bucket_id] * FLAGS.length_norm) - outputs = [int(np.argmax(logit, axis=1)) - for logit in output_logits] - print([rev_fr_vocab[t] for t in outputs]) - print(loss, data.bins[bucket_id]) - print(linearize(outputs, rev_fr_vocab)) - cures.append(outputs[gen_idx]) - print(cures) - print(linearize(cures, rev_fr_vocab)) - if FLAGS.simple_tokenizer: - cur_out = outputs - if wmt.EOS_ID in cur_out: - cur_out = cur_out[:cur_out.index(wmt.EOS_ID)] - res_tags = [rev_fr_vocab[o] for o in cur_out] - bad_words, bad_brack = wmt.parse_constraints(token_ids, res_tags) - loss += 1000.0 * bad_words + 100.0 * bad_brack - if loss < result_cost: - result = outputs - result_cost = loss - print("FINAL", result_cost) - print([rev_fr_vocab[t] for t in result]) - print(linearize(result, rev_fr_vocab)) - else: - print("TOOO_LONG") - sys.stdout.write("> ") - sys.stdout.flush() - inpt = sys.stdin.readline(), "" - - -def main(_): - if FLAGS.mode == 0: - train() - elif FLAGS.mode == 1: - evaluate() - else: - interactive() - -if __name__ == "__main__": - tf.app.run() diff --git a/research/neural_gpu/program_utils.py b/research/neural_gpu/program_utils.py deleted file mode 100644 index 1f49d0129..000000000 --- a/research/neural_gpu/program_utils.py +++ /dev/null @@ -1,444 +0,0 @@ -# Copyright 2015 Google Inc. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""Utilities for generating program synthesis and evaluation data.""" - -import contextlib -import sys -import random -import os - -try: - import StringIO -except ImportError: - from io import StringIO - -class ListType(object): - def __init__(self, arg): - self.arg = arg - - def __str__(self): - return "[" + str(self.arg) + "]" - - def __eq__(self, other): - if not isinstance(other, ListType): - return False - return self.arg == other.arg - - def __hash__(self): - return hash(self.arg) - -class VarType(object): - def __init__(self, arg): - self.arg = arg - - def __str__(self): - return str(self.arg) - - def __eq__(self, other): - if not isinstance(other, VarType): - return False - return self.arg == other.arg - - def __hash__(self): - return hash(self.arg) - -class FunctionType(object): - def __init__(self, args): - self.args = args - - def __str__(self): - return str(self.args[0]) + " -> " + str(self.args[1]) - - def __eq__(self, other): - if not isinstance(other, FunctionType): - return False - return self.args == other.args - - def __hash__(self): - return hash(tuple(self.args)) - - -class Function(object): - def __init__(self, name, arg_types, output_type, fn_arg_types = None): - self.name = name - self.arg_types = arg_types - self.fn_arg_types = fn_arg_types or [] - self.output_type = output_type - -Null = 100 -## Functions -f_head = Function("c_head", [ListType("Int")], "Int") -def c_head(xs): return xs[0] if len(xs) > 0 else Null - -f_last = Function("c_last", [ListType("Int")], "Int") -def c_last(xs): return xs[-1] if len(xs) > 0 else Null - -f_take = Function("c_take", ["Int", ListType("Int")], ListType("Int")) -def c_take(n, xs): return xs[:n] - -f_drop = Function("c_drop", ["Int", ListType("Int")], ListType("Int")) -def c_drop(n, xs): return xs[n:] - -f_access = Function("c_access", ["Int", ListType("Int")], "Int") -def c_access(n, xs): return xs[n] if n >= 0 and len(xs) > n else Null - -f_max = Function("c_max", [ListType("Int")], "Int") -def c_max(xs): return max(xs) if len(xs) > 0 else Null - -f_min = Function("c_min", [ListType("Int")], "Int") -def c_min(xs): return min(xs) if len(xs) > 0 else Null - -f_reverse = Function("c_reverse", [ListType("Int")], ListType("Int")) -def c_reverse(xs): return list(reversed(xs)) - -f_sort = Function("sorted", [ListType("Int")], ListType("Int")) -# def c_sort(xs): return sorted(xs) - -f_sum = Function("sum", [ListType("Int")], "Int") -# def c_sum(xs): return sum(xs) - - -## Lambdas -# Int -> Int -def plus_one(x): return x + 1 -def minus_one(x): return x - 1 -def times_two(x): return x * 2 -def neg(x): return x * (-1) -def div_two(x): return int(x/2) -def sq(x): return x**2 -def times_three(x): return x * 3 -def div_three(x): return int(x/3) -def times_four(x): return x * 4 -def div_four(x): return int(x/4) - -# Int -> Bool -def pos(x): return x > 0 -def neg(x): return x < 0 -def even(x): return x%2 == 0 -def odd(x): return x%2 == 1 - -# Int -> Int -> Int -def add(x, y): return x + y -def sub(x, y): return x - y -def mul(x, y): return x * y - -# HOFs -f_map = Function("map", [ListType("Int")], - ListType("Int"), - [FunctionType(["Int", "Int"])]) -f_filter = Function("filter", [ListType("Int")], - ListType("Int"), - [FunctionType(["Int", "Bool"])]) -f_count = Function("c_count", [ListType("Int")], - "Int", - [FunctionType(["Int", "Bool"])]) -def c_count(f, xs): return len([x for x in xs if f(x)]) - -f_zipwith = Function("c_zipwith", [ListType("Int"), ListType("Int")], - ListType("Int"), - [FunctionType(["Int", "Int", "Int"])]) #FIX -def c_zipwith(f, xs, ys): return [f(x, y) for (x, y) in zip(xs, ys)] - -f_scan = Function("c_scan", [ListType("Int")], - ListType("Int"), - [FunctionType(["Int", "Int", "Int"])]) -def c_scan(f, xs): - out = xs - for i in range(1, len(xs)): - out[i] = f(xs[i], xs[i -1]) - return out - -@contextlib.contextmanager -def stdoutIO(stdout=None): - old = sys.stdout - if stdout is None: - stdout = StringIO.StringIO() - sys.stdout = stdout - yield stdout - sys.stdout = old - - -def evaluate(program_str, input_names_to_vals, default="ERROR"): - exec_str = [] - for name, val in input_names_to_vals.iteritems(): - exec_str += name + " = " + str(val) + "; " - exec_str += program_str - if type(exec_str) is list: - exec_str = "".join(exec_str) - - with stdoutIO() as s: - # pylint: disable=bare-except - try: - exec(exec_str + " print(out)") - return s.getvalue()[:-1] - except: - return default - # pylint: enable=bare-except - - -class Statement(object): - """Statement class.""" - - def __init__(self, fn, output_var, arg_vars, fn_args=None): - self.fn = fn - self.output_var = output_var - self.arg_vars = arg_vars - self.fn_args = fn_args or [] - - def __str__(self): - return "%s = %s(%s%s%s)"%(self.output_var, - self.fn.name, - ", ".join(self.fn_args), - ", " if self.fn_args else "", - ", ".join(self.arg_vars)) - - def substitute(self, env): - self.output_var = env.get(self.output_var, self.output_var) - self.arg_vars = [env.get(v, v) for v in self.arg_vars] - - -class ProgramGrower(object): - """Grow programs.""" - - def __init__(self, functions, types_to_lambdas): - self.functions = functions - self.types_to_lambdas = types_to_lambdas - - def grow_body(self, new_var_name, dependencies, types_to_vars): - """Grow the program body.""" - choices = [] - for f in self.functions: - if all([a in types_to_vars.keys() for a in f.arg_types]): - choices.append(f) - - f = random.choice(choices) - args = [] - for t in f.arg_types: - possible_vars = random.choice(types_to_vars[t]) - var = random.choice(possible_vars) - args.append(var) - dependencies.setdefault(new_var_name, []).extend( - [var] + (dependencies[var])) - - fn_args = [random.choice(self.types_to_lambdas[t]) for t in f.fn_arg_types] - types_to_vars.setdefault(f.output_type, []).append(new_var_name) - - return Statement(f, new_var_name, args, fn_args) - - def grow(self, program_len, input_types): - """Grow the program.""" - var_names = list(reversed(map(chr, range(97, 123)))) - dependencies = dict() - types_to_vars = dict() - input_names = [] - for t in input_types: - var = var_names.pop() - dependencies[var] = [] - types_to_vars.setdefault(t, []).append(var) - input_names.append(var) - - statements = [] - for _ in range(program_len - 1): - var = var_names.pop() - statements.append(self.grow_body(var, dependencies, types_to_vars)) - statements.append(self.grow_body("out", dependencies, types_to_vars)) - - new_var_names = [c for c in map(chr, range(97, 123)) - if c not in input_names] - new_var_names.reverse() - keep_statements = [] - env = dict() - for s in statements: - if s.output_var in dependencies["out"]: - keep_statements.append(s) - env[s.output_var] = new_var_names.pop() - if s.output_var == "out": - keep_statements.append(s) - - for k in keep_statements: - k.substitute(env) - - return Program(input_names, input_types, ";".join( - [str(k) for k in keep_statements])) - - -class Program(object): - """The program class.""" - - def __init__(self, input_names, input_types, body): - self.input_names = input_names - self.input_types = input_types - self.body = body - - def evaluate(self, inputs): - """Evaluate this program.""" - if len(inputs) != len(self.input_names): - raise AssertionError("inputs and input_names have to" - "have the same len. inp: %s , names: %s" % - (str(inputs), str(self.input_names))) - inp_str = "" - for (name, inp) in zip(self.input_names, inputs): - inp_str += name + " = " + str(inp) + "; " - - with stdoutIO() as s: - # pylint: disable=exec-used - exec(inp_str + self.body + "; print(out)") - # pylint: enable=exec-used - return s.getvalue()[:-1] - - def flat_str(self): - out = "" - for s in self.body.split(";"): - out += s + ";" - return out - - def __str__(self): - out = "" - for (n, t) in zip(self.input_names, self.input_types): - out += n + " = " + str(t) + "\n" - for s in self.body.split(";"): - out += s + "\n" - return out - - -prog_vocab = [] -prog_rev_vocab = {} - - -def tokenize(string, tokens=None): - """Tokenize the program string.""" - if tokens is None: - tokens = prog_vocab - tokens = sorted(tokens, key=len, reverse=True) - out = [] - string = string.strip() - while string: - found = False - for t in tokens: - if string.startswith(t): - out.append(t) - string = string[len(t):] - found = True - break - if not found: - raise ValueError("Couldn't tokenize this: " + string) - string = string.strip() - return out - - -def clean_up(output, max_val=100): - o = eval(str(output)) - if isinstance(o, bool): - return o - if isinstance(o, int): - if o >= 0: - return min(o, max_val) - else: - return max(o, -1 * max_val) - if isinstance(o, list): - return [clean_up(l) for l in o] - - -def make_vocab(): - gen(2, 0) - - -def gen(max_len, how_many): - """Generate some programs.""" - functions = [f_head, f_last, f_take, f_drop, f_access, f_max, f_min, - f_reverse, f_sort, f_sum, f_map, f_filter, f_count, f_zipwith, - f_scan] - - types_to_lambdas = { - FunctionType(["Int", "Int"]): ["plus_one", "minus_one", "times_two", - "div_two", "sq", "times_three", - "div_three", "times_four", "div_four"], - FunctionType(["Int", "Bool"]): ["pos", "neg", "even", "odd"], - FunctionType(["Int", "Int", "Int"]): ["add", "sub", "mul"] - } - - tokens = [] - for f in functions: - tokens.append(f.name) - for v in types_to_lambdas.values(): - tokens.extend(v) - tokens.extend(["=", ";", ",", "(", ")", "[", "]", "Int", "out"]) - tokens.extend(map(chr, range(97, 123))) - - io_tokens = map(str, range(-220, 220)) - if not prog_vocab: - prog_vocab.extend(["_PAD", "_EOS"] + tokens + io_tokens) - for i, t in enumerate(prog_vocab): - prog_rev_vocab[t] = i - - io_tokens += [",", "[", "]", ")", "(", "None"] - grower = ProgramGrower(functions=functions, - types_to_lambdas=types_to_lambdas) - - def mk_inp(l): - return [random.choice(range(-5, 5)) for _ in range(l)] - - tar = [ListType("Int")] - inps = [[mk_inp(3)], [mk_inp(5)], [mk_inp(7)], [mk_inp(15)]] - - save_prefix = None - outcomes_to_programs = dict() - tried = set() - counter = 0 - choices = [0] if max_len == 0 else range(max_len) - while counter < 100 * how_many and len(outcomes_to_programs) < how_many: - counter += 1 - length = random.choice(choices) - t = grower.grow(length, tar) - while t in tried: - length = random.choice(choices) - t = grower.grow(length, tar) - # print(t.flat_str()) - tried.add(t) - outcomes = [clean_up(t.evaluate(i)) for i in inps] - outcome_str = str(zip(inps, outcomes)) - if outcome_str in outcomes_to_programs: - outcomes_to_programs[outcome_str] = min( - [t.flat_str(), outcomes_to_programs[outcome_str]], - key=lambda x: len(tokenize(x, tokens))) - else: - outcomes_to_programs[outcome_str] = t.flat_str() - if counter % 5000 == 0: - print("== proggen: tried: " + str(counter)) - print("== proggen: kept: " + str(len(outcomes_to_programs))) - - if counter % 250000 == 0 and save_prefix is not None: - print("saving...") - save_counter = 0 - progfilename = os.path.join(save_prefix, "prog_" + str(counter) + ".txt") - iofilename = os.path.join(save_prefix, "io_" + str(counter) + ".txt") - prog_token_filename = os.path.join(save_prefix, - "prog_tokens_" + str(counter) + ".txt") - io_token_filename = os.path.join(save_prefix, - "io_tokens_" + str(counter) + ".txt") - with open(progfilename, "a+") as fp, \ - open(iofilename, "a+") as fi, \ - open(prog_token_filename, "a+") as ftp, \ - open(io_token_filename, "a+") as fti: - for (o, p) in outcomes_to_programs.iteritems(): - save_counter += 1 - if save_counter % 500 == 0: - print("saving %d of %d" % (save_counter, len(outcomes_to_programs))) - fp.write(p+"\n") - fi.write(o+"\n") - ftp.write(str(tokenize(p, tokens))+"\n") - fti.write(str(tokenize(o, io_tokens))+"\n") - - return list(outcomes_to_programs.values()) diff --git a/research/neural_gpu/wmt_utils.py b/research/neural_gpu/wmt_utils.py deleted file mode 100644 index ef831918f..000000000 --- a/research/neural_gpu/wmt_utils.py +++ /dev/null @@ -1,437 +0,0 @@ -# Copyright 2015 Google Inc. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""Utilities for downloading data from WMT, tokenizing, vocabularies.""" - -from __future__ import print_function - -import gzip -import os -import re -import tarfile - -from six.moves import urllib -import tensorflow as tf - -# Special vocabulary symbols - we always put them at the start. -_PAD = b"_PAD" -_GO = b"_GO" -_EOS = b"_EOS" -_UNK = b"_CHAR_UNK" -_SPACE = b"_SPACE" -_START_VOCAB = [_PAD, _GO, _EOS, _UNK, _SPACE] - -PAD_ID = 0 -GO_ID = 1 -EOS_ID = 2 -UNK_ID = 3 -SPACE_ID = 4 - -# Regular expressions used to tokenize. -_CHAR_MARKER = "_CHAR_" -_CHAR_MARKER_LEN = len(_CHAR_MARKER) -_SPEC_CHARS = "" + chr(226) + chr(153) + chr(128) -_PUNCTUATION = "][.,!?\"':;%$#@&*+}{|><=/^~)(_`,0123456789" + _SPEC_CHARS + "-" -_WORD_SPLIT = re.compile("([" + _PUNCTUATION + "])") -_OLD_WORD_SPLIT = re.compile(b"([.,!?\"':;)(])") -_DIGIT_RE = re.compile(br"\d") - -# URLs for WMT data. -_WMT_ENFR_TRAIN_URL = "http://www.statmt.org/wmt10/training-giga-fren.tar" -_WMT_ENFR_DEV_URL = "http://www.statmt.org/wmt15/dev-v2.tgz" - - -def maybe_download(directory, filename, url): - """Download filename from url unless it's already in directory.""" - if not tf.gfile.Exists(directory): - print("Creating directory %s" % directory) - os.mkdir(directory) - filepath = os.path.join(directory, filename) - if not tf.gfile.Exists(filepath): - print("Downloading %s to %s" % (url, filepath)) - filepath, _ = urllib.request.urlretrieve(url, filepath) - statinfo = os.stat(filepath) - print("Successfully downloaded", filename, statinfo.st_size, "bytes") - return filepath - - -def gunzip_file(gz_path, new_path): - """Unzips from gz_path into new_path.""" - print("Unpacking %s to %s" % (gz_path, new_path)) - with gzip.open(gz_path, "rb") as gz_file: - with open(new_path, "wb") as new_file: - for line in gz_file: - new_file.write(line) - - -def get_wmt_enfr_train_set(directory): - """Download the WMT en-fr training corpus to directory unless it's there.""" - train_path = os.path.join(directory, "giga-fren.release2.fixed") - if not (tf.gfile.Exists(train_path +".fr") and - tf.gfile.Exists(train_path +".en")): - corpus_file = maybe_download(directory, "training-giga-fren.tar", - _WMT_ENFR_TRAIN_URL) - print("Extracting tar file %s" % corpus_file) - with tarfile.open(corpus_file, "r") as corpus_tar: - corpus_tar.extractall(directory) - gunzip_file(train_path + ".fr.gz", train_path + ".fr") - gunzip_file(train_path + ".en.gz", train_path + ".en") - return train_path - - -def get_wmt_enfr_dev_set(directory): - """Download the WMT en-fr training corpus to directory unless it's there.""" - dev_name = "newstest2013" - dev_path = os.path.join(directory, dev_name) - if not (tf.gfile.Exists(dev_path + ".fr") and - tf.gfile.Exists(dev_path + ".en")): - dev_file = maybe_download(directory, "dev-v2.tgz", _WMT_ENFR_DEV_URL) - print("Extracting tgz file %s" % dev_file) - with tarfile.open(dev_file, "r:gz") as dev_tar: - fr_dev_file = dev_tar.getmember("dev/" + dev_name + ".fr") - en_dev_file = dev_tar.getmember("dev/" + dev_name + ".en") - fr_dev_file.name = dev_name + ".fr" # Extract without "dev/" prefix. - en_dev_file.name = dev_name + ".en" - dev_tar.extract(fr_dev_file, directory) - dev_tar.extract(en_dev_file, directory) - return dev_path - - -def is_char(token): - if len(token) > _CHAR_MARKER_LEN: - if token[:_CHAR_MARKER_LEN] == _CHAR_MARKER: - return True - return False - - -def basic_detokenizer(tokens): - """Reverse the process of the basic tokenizer below.""" - result = [] - previous_nospace = True - for t in tokens: - if is_char(t): - result.append(t[_CHAR_MARKER_LEN:]) - previous_nospace = True - elif t == _SPACE: - result.append(" ") - previous_nospace = True - elif previous_nospace: - result.append(t) - previous_nospace = False - else: - result.extend([" ", t]) - previous_nospace = False - return "".join(result) - - -old_style = False - - -def basic_tokenizer(sentence): - """Very basic tokenizer: split the sentence into a list of tokens.""" - words = [] - if old_style: - for space_separated_fragment in sentence.strip().split(): - words.extend(re.split(_OLD_WORD_SPLIT, space_separated_fragment)) - return [w for w in words if w] - for space_separated_fragment in sentence.strip().split(): - tokens = [t for t in re.split(_WORD_SPLIT, space_separated_fragment) if t] - first_is_char = False - for i, t in enumerate(tokens): - if len(t) == 1 and t in _PUNCTUATION: - tokens[i] = _CHAR_MARKER + t - if i == 0: - first_is_char = True - if words and words[-1] != _SPACE and (first_is_char or is_char(words[-1])): - tokens = [_SPACE] + tokens - spaced_tokens = [] - for i, tok in enumerate(tokens): - spaced_tokens.append(tokens[i]) - if i < len(tokens) - 1: - if tok != _SPACE and not (is_char(tok) or is_char(tokens[i+1])): - spaced_tokens.append(_SPACE) - words.extend(spaced_tokens) - return words - - -def space_tokenizer(sentence): - return sentence.strip().split() - - -def is_pos_tag(token): - """Check if token is a part-of-speech tag.""" - return(token in ["CC", "CD", "DT", "EX", "FW", "IN", "JJ", "JJR", - "JJS", "LS", "MD", "NN", "NNS", "NNP", "NNPS", "PDT", - "POS", "PRP", "PRP$", "RB", "RBR", "RBS", "RP", "SYM", "TO", - "UH", "VB", "VBD", "VBG", "VBN", "VBP", "VBZ", "WDT", "WP", - "WP$", "WRB", ".", ",", ":", ")", "-LRB-", "(", "-RRB-", - "HYPH", "$", "``", "''", "ADD", "AFX", "QTR", "BES", "-DFL-", - "GW", "HVS", "NFP"]) - - -def parse_constraints(inpt, res): - ntags = len(res) - nwords = len(inpt) - npostags = len([x for x in res if is_pos_tag(x)]) - nclose = len([x for x in res if x[0] == "/"]) - nopen = ntags - nclose - npostags - return (abs(npostags - nwords), abs(nclose - nopen)) - - -def create_vocabulary(vocabulary_path, data_path, max_vocabulary_size, - tokenizer=None, normalize_digits=False): - """Create vocabulary file (if it does not exist yet) from data file. - - Data file is assumed to contain one sentence per line. Each sentence is - tokenized and digits are normalized (if normalize_digits is set). - Vocabulary contains the most-frequent tokens up to max_vocabulary_size. - We write it to vocabulary_path in a one-token-per-line format, so that later - token in the first line gets id=0, second line gets id=1, and so on. - - Args: - vocabulary_path: path where the vocabulary will be created. - data_path: data file that will be used to create vocabulary. - max_vocabulary_size: limit on the size of the created vocabulary. - tokenizer: a function to use to tokenize each data sentence; - if None, basic_tokenizer will be used. - normalize_digits: Boolean; if true, all digits are replaced by 0s. - """ - if not tf.gfile.Exists(vocabulary_path): - print("Creating vocabulary %s from data %s" % (vocabulary_path, data_path)) - vocab, chars = {}, {} - for c in _PUNCTUATION: - chars[c] = 1 - - # Read French file. - with tf.gfile.GFile(data_path + ".fr", mode="rb") as f: - counter = 0 - for line_in in f: - line = " ".join(line_in.split()) - counter += 1 - if counter % 100000 == 0: - print(" processing fr line %d" % counter) - for c in line: - if c in chars: - chars[c] += 1 - else: - chars[c] = 1 - tokens = tokenizer(line) if tokenizer else basic_tokenizer(line) - tokens = [t for t in tokens if not is_char(t) and t != _SPACE] - for w in tokens: - word = re.sub(_DIGIT_RE, b"0", w) if normalize_digits else w - if word in vocab: - vocab[word] += 1000000000 # We want target words first. - else: - vocab[word] = 1000000000 - - # Read English file. - with tf.gfile.GFile(data_path + ".en", mode="rb") as f: - counter = 0 - for line_in in f: - line = " ".join(line_in.split()) - counter += 1 - if counter % 100000 == 0: - print(" processing en line %d" % counter) - for c in line: - if c in chars: - chars[c] += 1 - else: - chars[c] = 1 - tokens = tokenizer(line) if tokenizer else basic_tokenizer(line) - tokens = [t for t in tokens if not is_char(t) and t != _SPACE] - for w in tokens: - word = re.sub(_DIGIT_RE, b"0", w) if normalize_digits else w - if word in vocab: - vocab[word] += 1 - else: - vocab[word] = 1 - - sorted_vocab = sorted(vocab, key=vocab.get, reverse=True) - sorted_chars = sorted(chars, key=vocab.get, reverse=True) - sorted_chars = [_CHAR_MARKER + c for c in sorted_chars] - vocab_list = _START_VOCAB + sorted_chars + sorted_vocab - if tokenizer: - vocab_list = _START_VOCAB + sorted_vocab - if len(vocab_list) > max_vocabulary_size: - vocab_list = vocab_list[:max_vocabulary_size] - with tf.gfile.GFile(vocabulary_path, mode="wb") as vocab_file: - for w in vocab_list: - vocab_file.write(w + b"\n") - - -def initialize_vocabulary(vocabulary_path): - """Initialize vocabulary from file. - - We assume the vocabulary is stored one-item-per-line, so a file: - dog - cat - will result in a vocabulary {"dog": 0, "cat": 1}, and this function will - also return the reversed-vocabulary ["dog", "cat"]. - - Args: - vocabulary_path: path to the file containing the vocabulary. - - Returns: - a pair: the vocabulary (a dictionary mapping string to integers), and - the reversed vocabulary (a list, which reverses the vocabulary mapping). - - Raises: - ValueError: if the provided vocabulary_path does not exist. - """ - if tf.gfile.Exists(vocabulary_path): - rev_vocab = [] - with tf.gfile.GFile(vocabulary_path, mode="rb") as f: - rev_vocab.extend(f.readlines()) - rev_vocab = [line.strip() for line in rev_vocab] - vocab = dict([(x, y) for (y, x) in enumerate(rev_vocab)]) - return vocab, rev_vocab - else: - raise ValueError("Vocabulary file %s not found.", vocabulary_path) - - -def sentence_to_token_ids_raw(sentence, vocabulary, - tokenizer=None, normalize_digits=old_style): - """Convert a string to list of integers representing token-ids. - - For example, a sentence "I have a dog" may become tokenized into - ["I", "have", "a", "dog"] and with vocabulary {"I": 1, "have": 2, - "a": 4, "dog": 7"} this function will return [1, 2, 4, 7]. - - Args: - sentence: the sentence in bytes format to convert to token-ids. - vocabulary: a dictionary mapping tokens to integers. - tokenizer: a function to use to tokenize each sentence; - if None, basic_tokenizer will be used. - normalize_digits: Boolean; if true, all digits are replaced by 0s. - - Returns: - a list of integers, the token-ids for the sentence. - """ - if tokenizer: - words = tokenizer(sentence) - else: - words = basic_tokenizer(sentence) - result = [] - for w in words: - if normalize_digits: - w = re.sub(_DIGIT_RE, b"0", w) - if w in vocabulary: - result.append(vocabulary[w]) - else: - if tokenizer: - result.append(UNK_ID) - else: - result.append(SPACE_ID) - for c in w: - result.append(vocabulary.get(_CHAR_MARKER + c, UNK_ID)) - result.append(SPACE_ID) - while result and result[0] == SPACE_ID: - result = result[1:] - while result and result[-1] == SPACE_ID: - result = result[:-1] - return result - - -def sentence_to_token_ids(sentence, vocabulary, - tokenizer=None, normalize_digits=old_style): - """Convert a string to list of integers representing token-ids, tab=0.""" - tab_parts = sentence.strip().split("\t") - toks = [sentence_to_token_ids_raw(t, vocabulary, tokenizer, normalize_digits) - for t in tab_parts] - res = [] - for t in toks: - res.extend(t) - res.append(0) - return res[:-1] - - -def data_to_token_ids(data_path, target_path, vocabulary_path, - tokenizer=None, normalize_digits=False): - """Tokenize data file and turn into token-ids using given vocabulary file. - - This function loads data line-by-line from data_path, calls the above - sentence_to_token_ids, and saves the result to target_path. See comment - for sentence_to_token_ids on the details of token-ids format. - - Args: - data_path: path to the data file in one-sentence-per-line format. - target_path: path where the file with token-ids will be created. - vocabulary_path: path to the vocabulary file. - tokenizer: a function to use to tokenize each sentence; - if None, basic_tokenizer will be used. - normalize_digits: Boolean; if true, all digits are replaced by 0s. - """ - if not tf.gfile.Exists(target_path): - print("Tokenizing data in %s" % data_path) - vocab, _ = initialize_vocabulary(vocabulary_path) - with tf.gfile.GFile(data_path, mode="rb") as data_file: - with tf.gfile.GFile(target_path, mode="w") as tokens_file: - counter = 0 - for line in data_file: - counter += 1 - if counter % 100000 == 0: - print(" tokenizing line %d" % counter) - token_ids = sentence_to_token_ids(line, vocab, tokenizer, - normalize_digits) - tokens_file.write(" ".join([str(tok) for tok in token_ids]) + "\n") - - -def prepare_wmt_data(data_dir, vocabulary_size, - tokenizer=None, normalize_digits=False): - """Get WMT data into data_dir, create vocabularies and tokenize data. - - Args: - data_dir: directory in which the data sets will be stored. - vocabulary_size: size of the joint vocabulary to create and use. - tokenizer: a function to use to tokenize each data sentence; - if None, basic_tokenizer will be used. - normalize_digits: Boolean; if true, all digits are replaced by 0s. - - Returns: - A tuple of 6 elements: - (1) path to the token-ids for English training data-set, - (2) path to the token-ids for French training data-set, - (3) path to the token-ids for English development data-set, - (4) path to the token-ids for French development data-set, - (5) path to the vocabulary file, - (6) path to the vocabulary file (for compatibility with non-joint vocab). - """ - # Get wmt data to the specified directory. - train_path = get_wmt_enfr_train_set(data_dir) - dev_path = get_wmt_enfr_dev_set(data_dir) - - # Create vocabularies of the appropriate sizes. - vocab_path = os.path.join(data_dir, "vocab%d.txt" % vocabulary_size) - create_vocabulary(vocab_path, train_path, vocabulary_size, - tokenizer=tokenizer, normalize_digits=normalize_digits) - - # Create token ids for the training data. - fr_train_ids_path = train_path + (".ids%d.fr" % vocabulary_size) - en_train_ids_path = train_path + (".ids%d.en" % vocabulary_size) - data_to_token_ids(train_path + ".fr", fr_train_ids_path, vocab_path, - tokenizer=tokenizer, normalize_digits=normalize_digits) - data_to_token_ids(train_path + ".en", en_train_ids_path, vocab_path, - tokenizer=tokenizer, normalize_digits=normalize_digits) - - # Create token ids for the development data. - fr_dev_ids_path = dev_path + (".ids%d.fr" % vocabulary_size) - en_dev_ids_path = dev_path + (".ids%d.en" % vocabulary_size) - data_to_token_ids(dev_path + ".fr", fr_dev_ids_path, vocab_path, - tokenizer=tokenizer, normalize_digits=normalize_digits) - data_to_token_ids(dev_path + ".en", en_dev_ids_path, vocab_path, - tokenizer=tokenizer, normalize_digits=normalize_digits) - - return (en_train_ids_path, fr_train_ids_path, - en_dev_ids_path, fr_dev_ids_path, - vocab_path, vocab_path) diff --git a/research/neural_programmer/README.md b/research/neural_programmer/README.md deleted file mode 100644 index dcc27f6fb..000000000 --- a/research/neural_programmer/README.md +++ /dev/null @@ -1,26 +0,0 @@ -![No Maintenance Intended](https://img.shields.io/badge/No%20Maintenance%20Intended-%E2%9C%95-red.svg) -![TensorFlow Requirement: 1.x](https://img.shields.io/badge/TensorFlow%20Requirement-1.x-brightgreen) -![TensorFlow 2 Not Supported](https://img.shields.io/badge/TensorFlow%202%20Not%20Supported-%E2%9C%95-red.svg) - -# Neural Programmer - -Implementation of the Neural Programmer model as described in this [paper](https://openreview.net/pdf?id=ry2YOrcge). - -Download and extract the data from the [WikiTableQuestions](https://ppasupat.github.io/WikiTableQuestions/) site. The dataset contains -11321, 2831, and 4344 examples for training, development, and testing respectively. We use their tokenization, number and date pre-processing. Please note that the above paper used the [initial release](https://github.com/ppasupat/WikiTableQuestions/releases/tag/v0.2) for training, development and testing. - -Change the `data_dir FLAG` to the location of the data. - -### Training -Run `python neural_programmer.py` - -The models are written to `FLAGS.output_dir`. - -### Testing -Run `python neural_programmer.py --evaluator_job=True` - -The models are loaded from `FLAGS.output_dir`. The evaluation is done on development data. - -In case of errors because of encoding, add `"# -*- coding: utf-8 -*-"` as the first line in `wiki_data.py` - -Maintained by Arvind Neelakantan (arvind2505) diff --git a/research/neural_programmer/data_utils.py b/research/neural_programmer/data_utils.py deleted file mode 100644 index 4df80c66a..000000000 --- a/research/neural_programmer/data_utils.py +++ /dev/null @@ -1,666 +0,0 @@ -# Copyright 2016 Google Inc. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""Functions for constructing vocabulary, converting the examples to integer format and building the required masks for batch computation Author: aneelakantan (Arvind Neelakantan) -""" - -from __future__ import print_function - -import copy -import numbers -import numpy as np -import wiki_data - - -def return_index(a): - for i in range(len(a)): - if (a[i] == 1.0): - return i - - -def construct_vocab(data, utility, add_word=False): - ans = [] - for example in data: - sent = "" - for word in example.question: - if (not (isinstance(word, numbers.Number))): - sent += word + " " - example.original_nc = copy.deepcopy(example.number_columns) - example.original_wc = copy.deepcopy(example.word_columns) - example.original_nc_names = copy.deepcopy(example.number_column_names) - example.original_wc_names = copy.deepcopy(example.word_column_names) - if (add_word): - continue - number_found = 0 - if (not (example.is_bad_example)): - for word in example.question: - if (isinstance(word, numbers.Number)): - number_found += 1 - else: - if (not (utility.word_ids.has_key(word))): - utility.words.append(word) - utility.word_count[word] = 1 - utility.word_ids[word] = len(utility.word_ids) - utility.reverse_word_ids[utility.word_ids[word]] = word - else: - utility.word_count[word] += 1 - for col_name in example.word_column_names: - for word in col_name: - if (isinstance(word, numbers.Number)): - number_found += 1 - else: - if (not (utility.word_ids.has_key(word))): - utility.words.append(word) - utility.word_count[word] = 1 - utility.word_ids[word] = len(utility.word_ids) - utility.reverse_word_ids[utility.word_ids[word]] = word - else: - utility.word_count[word] += 1 - for col_name in example.number_column_names: - for word in col_name: - if (isinstance(word, numbers.Number)): - number_found += 1 - else: - if (not (utility.word_ids.has_key(word))): - utility.words.append(word) - utility.word_count[word] = 1 - utility.word_ids[word] = len(utility.word_ids) - utility.reverse_word_ids[utility.word_ids[word]] = word - else: - utility.word_count[word] += 1 - - -def word_lookup(word, utility): - if (utility.word_ids.has_key(word)): - return word - else: - return utility.unk_token - - -def convert_to_int_2d_and_pad(a, utility): - ans = [] - #print a - for b in a: - temp = [] - if (len(b) > utility.FLAGS.max_entry_length): - b = b[0:utility.FLAGS.max_entry_length] - for remaining in range(len(b), utility.FLAGS.max_entry_length): - b.append(utility.dummy_token) - assert len(b) == utility.FLAGS.max_entry_length - for word in b: - temp.append(utility.word_ids[word_lookup(word, utility)]) - ans.append(temp) - #print ans - return ans - - -def convert_to_bool_and_pad(a, utility): - a = a.tolist() - for i in range(len(a)): - for j in range(len(a[i])): - if (a[i][j] < 1): - a[i][j] = False - else: - a[i][j] = True - a[i] = a[i] + [False] * (utility.FLAGS.max_elements - len(a[i])) - return a - - -seen_tables = {} - - -def partial_match(question, table, number): - answer = [] - match = {} - for i in range(len(table)): - temp = [] - for j in range(len(table[i])): - temp.append(0) - answer.append(temp) - for i in range(len(table)): - for j in range(len(table[i])): - for word in question: - if (number): - if (word == table[i][j]): - answer[i][j] = 1.0 - match[i] = 1.0 - else: - if (word in table[i][j]): - answer[i][j] = 1.0 - match[i] = 1.0 - return answer, match - - -def exact_match(question, table, number): - #performs exact match operation - answer = [] - match = {} - matched_indices = [] - for i in range(len(table)): - temp = [] - for j in range(len(table[i])): - temp.append(0) - answer.append(temp) - for i in range(len(table)): - for j in range(len(table[i])): - if (number): - for word in question: - if (word == table[i][j]): - match[i] = 1.0 - answer[i][j] = 1.0 - else: - table_entry = table[i][j] - for k in range(len(question)): - if (k + len(table_entry) <= len(question)): - if (table_entry == question[k:(k + len(table_entry))]): - #if(len(table_entry) == 1): - #print "match: ", table_entry, question - match[i] = 1.0 - answer[i][j] = 1.0 - matched_indices.append((k, len(table_entry))) - return answer, match, matched_indices - - -def partial_column_match(question, table, number): - answer = [] - for i in range(len(table)): - answer.append(0) - for i in range(len(table)): - for word in question: - if (word in table[i]): - answer[i] = 1.0 - return answer - - -def exact_column_match(question, table, number): - #performs exact match on column names - answer = [] - matched_indices = [] - for i in range(len(table)): - answer.append(0) - for i in range(len(table)): - table_entry = table[i] - for k in range(len(question)): - if (k + len(table_entry) <= len(question)): - if (table_entry == question[k:(k + len(table_entry))]): - answer[i] = 1.0 - matched_indices.append((k, len(table_entry))) - return answer, matched_indices - - -def get_max_entry(a): - e = {} - for w in a: - if (w != "UNK, "): - if (e.has_key(w)): - e[w] += 1 - else: - e[w] = 1 - if (len(e) > 0): - (key, val) = sorted(e.items(), key=lambda x: -1 * x[1])[0] - if (val > 1): - return key - else: - return -1.0 - else: - return -1.0 - - -def list_join(a): - ans = "" - for w in a: - ans += str(w) + ", " - return ans - - -def group_by_max(table, number): - #computes the most frequently occurring entry in a column - answer = [] - for i in range(len(table)): - temp = [] - for j in range(len(table[i])): - temp.append(0) - answer.append(temp) - for i in range(len(table)): - if (number): - curr = table[i] - else: - curr = [list_join(w) for w in table[i]] - max_entry = get_max_entry(curr) - #print i, max_entry - for j in range(len(curr)): - if (max_entry == curr[j]): - answer[i][j] = 1.0 - else: - answer[i][j] = 0.0 - return answer - - -def pick_one(a): - for i in range(len(a)): - if (1.0 in a[i]): - return True - return False - - -def check_processed_cols(col, utility): - return True in [ - True for y in col - if (y != utility.FLAGS.pad_int and y != - utility.FLAGS.bad_number_pre_process) - ] - - -def complete_wiki_processing(data, utility, train=True): - #convert to integers and padding - processed_data = [] - num_bad_examples = 0 - for example in data: - number_found = 0 - if (example.is_bad_example): - num_bad_examples += 1 - if (not (example.is_bad_example)): - example.string_question = example.question[:] - #entry match - example.processed_number_columns = example.processed_number_columns[:] - example.processed_word_columns = example.processed_word_columns[:] - example.word_exact_match, word_match, matched_indices = exact_match( - example.string_question, example.original_wc, number=False) - example.number_exact_match, number_match, _ = exact_match( - example.string_question, example.original_nc, number=True) - if (not (pick_one(example.word_exact_match)) and not ( - pick_one(example.number_exact_match))): - assert len(word_match) == 0 - assert len(number_match) == 0 - example.word_exact_match, word_match = partial_match( - example.string_question, example.original_wc, number=False) - #group by max - example.word_group_by_max = group_by_max(example.original_wc, False) - example.number_group_by_max = group_by_max(example.original_nc, True) - #column name match - example.word_column_exact_match, wcol_matched_indices = exact_column_match( - example.string_question, example.original_wc_names, number=False) - example.number_column_exact_match, ncol_matched_indices = exact_column_match( - example.string_question, example.original_nc_names, number=False) - if (not (1.0 in example.word_column_exact_match) and not ( - 1.0 in example.number_column_exact_match)): - example.word_column_exact_match = partial_column_match( - example.string_question, example.original_wc_names, number=False) - example.number_column_exact_match = partial_column_match( - example.string_question, example.original_nc_names, number=False) - if (len(word_match) > 0 or len(number_match) > 0): - example.question.append(utility.entry_match_token) - if (1.0 in example.word_column_exact_match or - 1.0 in example.number_column_exact_match): - example.question.append(utility.column_match_token) - example.string_question = example.question[:] - example.number_lookup_matrix = np.transpose( - example.number_lookup_matrix)[:] - example.word_lookup_matrix = np.transpose(example.word_lookup_matrix)[:] - example.columns = example.number_columns[:] - example.word_columns = example.word_columns[:] - example.len_total_cols = len(example.word_column_names) + len( - example.number_column_names) - example.column_names = example.number_column_names[:] - example.word_column_names = example.word_column_names[:] - example.string_column_names = example.number_column_names[:] - example.string_word_column_names = example.word_column_names[:] - example.sorted_number_index = [] - example.sorted_word_index = [] - example.column_mask = [] - example.word_column_mask = [] - example.processed_column_mask = [] - example.processed_word_column_mask = [] - example.word_column_entry_mask = [] - example.question_attention_mask = [] - example.question_number = example.question_number_1 = -1 - example.question_attention_mask = [] - example.ordinal_question = [] - example.ordinal_question_one = [] - new_question = [] - if (len(example.number_columns) > 0): - example.len_col = len(example.number_columns[0]) - else: - example.len_col = len(example.word_columns[0]) - for (start, length) in matched_indices: - for j in range(length): - example.question[start + j] = utility.unk_token - #print example.question - for word in example.question: - if (isinstance(word, numbers.Number) or wiki_data.is_date(word)): - if (not (isinstance(word, numbers.Number)) and - wiki_data.is_date(word)): - word = word.replace("X", "").replace("-", "") - number_found += 1 - if (number_found == 1): - example.question_number = word - if (len(example.ordinal_question) > 0): - example.ordinal_question[len(example.ordinal_question) - 1] = 1.0 - else: - example.ordinal_question.append(1.0) - elif (number_found == 2): - example.question_number_1 = word - if (len(example.ordinal_question_one) > 0): - example.ordinal_question_one[len(example.ordinal_question_one) - - 1] = 1.0 - else: - example.ordinal_question_one.append(1.0) - else: - new_question.append(word) - example.ordinal_question.append(0.0) - example.ordinal_question_one.append(0.0) - example.question = [ - utility.word_ids[word_lookup(w, utility)] for w in new_question - ] - example.question_attention_mask = [0.0] * len(example.question) - #when the first question number occurs before a word - example.ordinal_question = example.ordinal_question[0:len( - example.question)] - example.ordinal_question_one = example.ordinal_question_one[0:len( - example.question)] - #question-padding - example.question = [utility.word_ids[utility.dummy_token]] * ( - utility.FLAGS.question_length - len(example.question) - ) + example.question - example.question_attention_mask = [-10000.0] * ( - utility.FLAGS.question_length - len(example.question_attention_mask) - ) + example.question_attention_mask - example.ordinal_question = [0.0] * (utility.FLAGS.question_length - - len(example.ordinal_question) - ) + example.ordinal_question - example.ordinal_question_one = [0.0] * (utility.FLAGS.question_length - - len(example.ordinal_question_one) - ) + example.ordinal_question_one - if (True): - #number columns and related-padding - num_cols = len(example.columns) - start = 0 - for column in example.number_columns: - if (check_processed_cols(example.processed_number_columns[start], - utility)): - example.processed_column_mask.append(0.0) - sorted_index = sorted( - range(len(example.processed_number_columns[start])), - key=lambda k: example.processed_number_columns[start][k], - reverse=True) - sorted_index = sorted_index + [utility.FLAGS.pad_int] * ( - utility.FLAGS.max_elements - len(sorted_index)) - example.sorted_number_index.append(sorted_index) - example.columns[start] = column + [utility.FLAGS.pad_int] * ( - utility.FLAGS.max_elements - len(column)) - example.processed_number_columns[start] += [utility.FLAGS.pad_int] * ( - utility.FLAGS.max_elements - - len(example.processed_number_columns[start])) - start += 1 - example.column_mask.append(0.0) - for remaining in range(num_cols, utility.FLAGS.max_number_cols): - example.sorted_number_index.append([utility.FLAGS.pad_int] * - (utility.FLAGS.max_elements)) - example.columns.append([utility.FLAGS.pad_int] * - (utility.FLAGS.max_elements)) - example.processed_number_columns.append([utility.FLAGS.pad_int] * - (utility.FLAGS.max_elements)) - example.number_exact_match.append([0.0] * - (utility.FLAGS.max_elements)) - example.number_group_by_max.append([0.0] * - (utility.FLAGS.max_elements)) - example.column_mask.append(-100000000.0) - example.processed_column_mask.append(-100000000.0) - example.number_column_exact_match.append(0.0) - example.column_names.append([utility.dummy_token]) - #word column and related-padding - start = 0 - word_num_cols = len(example.word_columns) - for column in example.word_columns: - if (check_processed_cols(example.processed_word_columns[start], - utility)): - example.processed_word_column_mask.append(0.0) - sorted_index = sorted( - range(len(example.processed_word_columns[start])), - key=lambda k: example.processed_word_columns[start][k], - reverse=True) - sorted_index = sorted_index + [utility.FLAGS.pad_int] * ( - utility.FLAGS.max_elements - len(sorted_index)) - example.sorted_word_index.append(sorted_index) - column = convert_to_int_2d_and_pad(column, utility) - example.word_columns[start] = column + [[ - utility.word_ids[utility.dummy_token] - ] * utility.FLAGS.max_entry_length] * (utility.FLAGS.max_elements - - len(column)) - example.processed_word_columns[start] += [utility.FLAGS.pad_int] * ( - utility.FLAGS.max_elements - - len(example.processed_word_columns[start])) - example.word_column_entry_mask.append([0] * len(column) + [ - utility.word_ids[utility.dummy_token] - ] * (utility.FLAGS.max_elements - len(column))) - start += 1 - example.word_column_mask.append(0.0) - for remaining in range(word_num_cols, utility.FLAGS.max_word_cols): - example.sorted_word_index.append([utility.FLAGS.pad_int] * - (utility.FLAGS.max_elements)) - example.word_columns.append([[utility.word_ids[utility.dummy_token]] * - utility.FLAGS.max_entry_length] * - (utility.FLAGS.max_elements)) - example.word_column_entry_mask.append( - [utility.word_ids[utility.dummy_token]] * - (utility.FLAGS.max_elements)) - example.word_exact_match.append([0.0] * (utility.FLAGS.max_elements)) - example.word_group_by_max.append([0.0] * (utility.FLAGS.max_elements)) - example.processed_word_columns.append([utility.FLAGS.pad_int] * - (utility.FLAGS.max_elements)) - example.word_column_mask.append(-100000000.0) - example.processed_word_column_mask.append(-100000000.0) - example.word_column_exact_match.append(0.0) - example.word_column_names.append([utility.dummy_token] * - utility.FLAGS.max_entry_length) - seen_tables[example.table_key] = 1 - #convert column and word column names to integers - example.column_ids = convert_to_int_2d_and_pad(example.column_names, - utility) - example.word_column_ids = convert_to_int_2d_and_pad( - example.word_column_names, utility) - for i_em in range(len(example.number_exact_match)): - example.number_exact_match[i_em] = example.number_exact_match[ - i_em] + [0.0] * (utility.FLAGS.max_elements - - len(example.number_exact_match[i_em])) - example.number_group_by_max[i_em] = example.number_group_by_max[ - i_em] + [0.0] * (utility.FLAGS.max_elements - - len(example.number_group_by_max[i_em])) - for i_em in range(len(example.word_exact_match)): - example.word_exact_match[i_em] = example.word_exact_match[ - i_em] + [0.0] * (utility.FLAGS.max_elements - - len(example.word_exact_match[i_em])) - example.word_group_by_max[i_em] = example.word_group_by_max[ - i_em] + [0.0] * (utility.FLAGS.max_elements - - len(example.word_group_by_max[i_em])) - example.exact_match = example.number_exact_match + example.word_exact_match - example.group_by_max = example.number_group_by_max + example.word_group_by_max - example.exact_column_match = example.number_column_exact_match + example.word_column_exact_match - #answer and related mask, padding - if (example.is_lookup): - example.answer = example.calc_answer - example.number_print_answer = example.number_lookup_matrix.tolist() - example.word_print_answer = example.word_lookup_matrix.tolist() - for i_answer in range(len(example.number_print_answer)): - example.number_print_answer[i_answer] = example.number_print_answer[ - i_answer] + [0.0] * (utility.FLAGS.max_elements - - len(example.number_print_answer[i_answer])) - for i_answer in range(len(example.word_print_answer)): - example.word_print_answer[i_answer] = example.word_print_answer[ - i_answer] + [0.0] * (utility.FLAGS.max_elements - - len(example.word_print_answer[i_answer])) - example.number_lookup_matrix = convert_to_bool_and_pad( - example.number_lookup_matrix, utility) - example.word_lookup_matrix = convert_to_bool_and_pad( - example.word_lookup_matrix, utility) - for remaining in range(num_cols, utility.FLAGS.max_number_cols): - example.number_lookup_matrix.append([False] * - utility.FLAGS.max_elements) - example.number_print_answer.append([0.0] * utility.FLAGS.max_elements) - for remaining in range(word_num_cols, utility.FLAGS.max_word_cols): - example.word_lookup_matrix.append([False] * - utility.FLAGS.max_elements) - example.word_print_answer.append([0.0] * utility.FLAGS.max_elements) - example.print_answer = example.number_print_answer + example.word_print_answer - else: - example.answer = example.calc_answer - example.print_answer = [[0.0] * (utility.FLAGS.max_elements)] * ( - utility.FLAGS.max_number_cols + utility.FLAGS.max_word_cols) - #question_number masks - if (example.question_number == -1): - example.question_number_mask = np.zeros([utility.FLAGS.max_elements]) - else: - example.question_number_mask = np.ones([utility.FLAGS.max_elements]) - if (example.question_number_1 == -1): - example.question_number_one_mask = -10000.0 - else: - example.question_number_one_mask = np.float64(0.0) - if (example.len_col > utility.FLAGS.max_elements): - continue - processed_data.append(example) - return processed_data - - -def add_special_words(utility): - utility.words.append(utility.entry_match_token) - utility.word_ids[utility.entry_match_token] = len(utility.word_ids) - utility.reverse_word_ids[utility.word_ids[ - utility.entry_match_token]] = utility.entry_match_token - utility.entry_match_token_id = utility.word_ids[utility.entry_match_token] - print("entry match token: ", utility.word_ids[ - utility.entry_match_token], utility.entry_match_token_id) - utility.words.append(utility.column_match_token) - utility.word_ids[utility.column_match_token] = len(utility.word_ids) - utility.reverse_word_ids[utility.word_ids[ - utility.column_match_token]] = utility.column_match_token - utility.column_match_token_id = utility.word_ids[utility.column_match_token] - print("entry match token: ", utility.word_ids[ - utility.column_match_token], utility.column_match_token_id) - utility.words.append(utility.dummy_token) - utility.word_ids[utility.dummy_token] = len(utility.word_ids) - utility.reverse_word_ids[utility.word_ids[ - utility.dummy_token]] = utility.dummy_token - utility.dummy_token_id = utility.word_ids[utility.dummy_token] - utility.words.append(utility.unk_token) - utility.word_ids[utility.unk_token] = len(utility.word_ids) - utility.reverse_word_ids[utility.word_ids[ - utility.unk_token]] = utility.unk_token - - -def perform_word_cutoff(utility): - if (utility.FLAGS.word_cutoff > 0): - for word in utility.word_ids.keys(): - if (utility.word_count.has_key(word) and utility.word_count[word] < - utility.FLAGS.word_cutoff and word != utility.unk_token and - word != utility.dummy_token and word != utility.entry_match_token and - word != utility.column_match_token): - utility.word_ids.pop(word) - utility.words.remove(word) - - -def word_dropout(question, utility): - if (utility.FLAGS.word_dropout_prob > 0.0): - new_question = [] - for i in range(len(question)): - if (question[i] != utility.dummy_token_id and - utility.random.random() > utility.FLAGS.word_dropout_prob): - new_question.append(utility.word_ids[utility.unk_token]) - else: - new_question.append(question[i]) - return new_question - else: - return question - - -def generate_feed_dict(data, curr, batch_size, gr, train=False, utility=None): - #prepare feed dict dictionary - feed_dict = {} - feed_examples = [] - for j in range(batch_size): - feed_examples.append(data[curr + j]) - if (train): - feed_dict[gr.batch_question] = [ - word_dropout(feed_examples[j].question, utility) - for j in range(batch_size) - ] - else: - feed_dict[gr.batch_question] = [ - feed_examples[j].question for j in range(batch_size) - ] - feed_dict[gr.batch_question_attention_mask] = [ - feed_examples[j].question_attention_mask for j in range(batch_size) - ] - feed_dict[ - gr.batch_answer] = [feed_examples[j].answer for j in range(batch_size)] - feed_dict[gr.batch_number_column] = [ - feed_examples[j].columns for j in range(batch_size) - ] - feed_dict[gr.batch_processed_number_column] = [ - feed_examples[j].processed_number_columns for j in range(batch_size) - ] - feed_dict[gr.batch_processed_sorted_index_number_column] = [ - feed_examples[j].sorted_number_index for j in range(batch_size) - ] - feed_dict[gr.batch_processed_sorted_index_word_column] = [ - feed_examples[j].sorted_word_index for j in range(batch_size) - ] - feed_dict[gr.batch_question_number] = np.array( - [feed_examples[j].question_number for j in range(batch_size)]).reshape( - (batch_size, 1)) - feed_dict[gr.batch_question_number_one] = np.array( - [feed_examples[j].question_number_1 for j in range(batch_size)]).reshape( - (batch_size, 1)) - feed_dict[gr.batch_question_number_mask] = [ - feed_examples[j].question_number_mask for j in range(batch_size) - ] - feed_dict[gr.batch_question_number_one_mask] = np.array( - [feed_examples[j].question_number_one_mask for j in range(batch_size) - ]).reshape((batch_size, 1)) - feed_dict[gr.batch_print_answer] = [ - feed_examples[j].print_answer for j in range(batch_size) - ] - feed_dict[gr.batch_exact_match] = [ - feed_examples[j].exact_match for j in range(batch_size) - ] - feed_dict[gr.batch_group_by_max] = [ - feed_examples[j].group_by_max for j in range(batch_size) - ] - feed_dict[gr.batch_column_exact_match] = [ - feed_examples[j].exact_column_match for j in range(batch_size) - ] - feed_dict[gr.batch_ordinal_question] = [ - feed_examples[j].ordinal_question for j in range(batch_size) - ] - feed_dict[gr.batch_ordinal_question_one] = [ - feed_examples[j].ordinal_question_one for j in range(batch_size) - ] - feed_dict[gr.batch_number_column_mask] = [ - feed_examples[j].column_mask for j in range(batch_size) - ] - feed_dict[gr.batch_number_column_names] = [ - feed_examples[j].column_ids for j in range(batch_size) - ] - feed_dict[gr.batch_processed_word_column] = [ - feed_examples[j].processed_word_columns for j in range(batch_size) - ] - feed_dict[gr.batch_word_column_mask] = [ - feed_examples[j].word_column_mask for j in range(batch_size) - ] - feed_dict[gr.batch_word_column_names] = [ - feed_examples[j].word_column_ids for j in range(batch_size) - ] - feed_dict[gr.batch_word_column_entry_mask] = [ - feed_examples[j].word_column_entry_mask for j in range(batch_size) - ] - return feed_dict diff --git a/research/neural_programmer/model.py b/research/neural_programmer/model.py deleted file mode 100644 index 610d66699..000000000 --- a/research/neural_programmer/model.py +++ /dev/null @@ -1,679 +0,0 @@ -# Copyright 2016 Google Inc. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""Author: aneelakantan (Arvind Neelakantan) -""" - -from __future__ import print_function - -import numpy as np -import tensorflow as tf -import nn_utils - - -class Graph(): - - def __init__(self, utility, batch_size, max_passes, mode="train"): - self.utility = utility - self.data_type = self.utility.tf_data_type[self.utility.FLAGS.data_type] - self.max_elements = self.utility.FLAGS.max_elements - max_elements = self.utility.FLAGS.max_elements - self.num_cols = self.utility.FLAGS.max_number_cols - self.num_word_cols = self.utility.FLAGS.max_word_cols - self.question_length = self.utility.FLAGS.question_length - self.batch_size = batch_size - self.max_passes = max_passes - self.mode = mode - self.embedding_dims = self.utility.FLAGS.embedding_dims - #input question and a mask - self.batch_question = tf.placeholder(tf.int32, - [batch_size, self.question_length]) - self.batch_question_attention_mask = tf.placeholder( - self.data_type, [batch_size, self.question_length]) - #ground truth scalar answer and lookup answer - self.batch_answer = tf.placeholder(self.data_type, [batch_size]) - self.batch_print_answer = tf.placeholder( - self.data_type, - [batch_size, self.num_cols + self.num_word_cols, max_elements]) - #number columns and its processed version - self.batch_number_column = tf.placeholder( - self.data_type, [batch_size, self.num_cols, max_elements - ]) #columns with numeric entries - self.batch_processed_number_column = tf.placeholder( - self.data_type, [batch_size, self.num_cols, max_elements]) - self.batch_processed_sorted_index_number_column = tf.placeholder( - tf.int32, [batch_size, self.num_cols, max_elements]) - #word columns and its processed version - self.batch_processed_word_column = tf.placeholder( - self.data_type, [batch_size, self.num_word_cols, max_elements]) - self.batch_processed_sorted_index_word_column = tf.placeholder( - tf.int32, [batch_size, self.num_word_cols, max_elements]) - self.batch_word_column_entry_mask = tf.placeholder( - tf.int32, [batch_size, self.num_word_cols, max_elements]) - #names of word and number columns along with their mask - self.batch_word_column_names = tf.placeholder( - tf.int32, - [batch_size, self.num_word_cols, self.utility.FLAGS.max_entry_length]) - self.batch_word_column_mask = tf.placeholder( - self.data_type, [batch_size, self.num_word_cols]) - self.batch_number_column_names = tf.placeholder( - tf.int32, - [batch_size, self.num_cols, self.utility.FLAGS.max_entry_length]) - self.batch_number_column_mask = tf.placeholder(self.data_type, - [batch_size, self.num_cols]) - #exact match and group by max operation - self.batch_exact_match = tf.placeholder( - self.data_type, - [batch_size, self.num_cols + self.num_word_cols, max_elements]) - self.batch_column_exact_match = tf.placeholder( - self.data_type, [batch_size, self.num_cols + self.num_word_cols]) - self.batch_group_by_max = tf.placeholder( - self.data_type, - [batch_size, self.num_cols + self.num_word_cols, max_elements]) - #numbers in the question along with their position. This is used to compute arguments to the comparison operations - self.batch_question_number = tf.placeholder(self.data_type, [batch_size, 1]) - self.batch_question_number_one = tf.placeholder(self.data_type, - [batch_size, 1]) - self.batch_question_number_mask = tf.placeholder( - self.data_type, [batch_size, max_elements]) - self.batch_question_number_one_mask = tf.placeholder(self.data_type, - [batch_size, 1]) - self.batch_ordinal_question = tf.placeholder( - self.data_type, [batch_size, self.question_length]) - self.batch_ordinal_question_one = tf.placeholder( - self.data_type, [batch_size, self.question_length]) - - def LSTM_question_embedding(self, sentence, sentence_length): - #LSTM processes the input question - lstm_params = "question_lstm" - hidden_vectors = [] - sentence = self.batch_question - question_hidden = tf.zeros( - [self.batch_size, self.utility.FLAGS.embedding_dims], self.data_type) - question_c_hidden = tf.zeros( - [self.batch_size, self.utility.FLAGS.embedding_dims], self.data_type) - if (self.utility.FLAGS.rnn_dropout > 0.0): - if (self.mode == "train"): - rnn_dropout_mask = tf.cast( - tf.random_uniform( - tf.shape(question_hidden), minval=0.0, maxval=1.0) < - self.utility.FLAGS.rnn_dropout, - self.data_type) / self.utility.FLAGS.rnn_dropout - else: - rnn_dropout_mask = tf.ones_like(question_hidden) - for question_iterator in range(self.question_length): - curr_word = sentence[:, question_iterator] - question_vector = nn_utils.apply_dropout( - nn_utils.get_embedding(curr_word, self.utility, self.params), - self.utility.FLAGS.dropout, self.mode) - question_hidden, question_c_hidden = nn_utils.LSTMCell( - question_vector, question_hidden, question_c_hidden, lstm_params, - self.params) - if (self.utility.FLAGS.rnn_dropout > 0.0): - question_hidden = question_hidden * rnn_dropout_mask - hidden_vectors.append(tf.expand_dims(question_hidden, 0)) - hidden_vectors = tf.concat(axis=0, values=hidden_vectors) - return question_hidden, hidden_vectors - - def history_recurrent_step(self, curr_hprev, hprev): - #A single RNN step for controller or history RNN - return tf.tanh( - tf.matmul( - tf.concat(axis=1, values=[hprev, curr_hprev]), self.params[ - "history_recurrent"])) + self.params["history_recurrent_bias"] - - def question_number_softmax(self, hidden_vectors): - #Attention on quetsion to decide the question number to passed to comparison ops - def compute_ans(op_embedding, comparison): - op_embedding = tf.expand_dims(op_embedding, 0) - #dot product of operation embedding with hidden state to the left of the number occurrence - first = tf.transpose( - tf.matmul(op_embedding, - tf.transpose( - tf.reduce_sum(hidden_vectors * tf.tile( - tf.expand_dims( - tf.transpose(self.batch_ordinal_question), 2), - [1, 1, self.utility.FLAGS.embedding_dims]), 0)))) - second = self.batch_question_number_one_mask + tf.transpose( - tf.matmul(op_embedding, - tf.transpose( - tf.reduce_sum(hidden_vectors * tf.tile( - tf.expand_dims( - tf.transpose(self.batch_ordinal_question_one), 2 - ), [1, 1, self.utility.FLAGS.embedding_dims]), 0)))) - question_number_softmax = tf.nn.softmax(tf.concat(axis=1, values=[first, second])) - if (self.mode == "test"): - cond = tf.equal(question_number_softmax, - tf.reshape( - tf.reduce_max(question_number_softmax, 1), - [self.batch_size, 1])) - question_number_softmax = tf.where( - cond, - tf.fill(tf.shape(question_number_softmax), 1.0), - tf.fill(tf.shape(question_number_softmax), 0.0)) - question_number_softmax = tf.cast(question_number_softmax, - self.data_type) - ans = tf.reshape( - tf.reduce_sum(question_number_softmax * tf.concat( - axis=1, values=[self.batch_question_number, self.batch_question_number_one]), - 1), [self.batch_size, 1]) - return ans - - def compute_op_position(op_name): - for i in range(len(self.utility.operations_set)): - if (op_name == self.utility.operations_set[i]): - return i - - def compute_question_number(op_name): - op_embedding = tf.nn.embedding_lookup(self.params_unit, - compute_op_position(op_name)) - return compute_ans(op_embedding, op_name) - - curr_greater_question_number = compute_question_number("greater") - curr_lesser_question_number = compute_question_number("lesser") - curr_geq_question_number = compute_question_number("geq") - curr_leq_question_number = compute_question_number("leq") - return curr_greater_question_number, curr_lesser_question_number, curr_geq_question_number, curr_leq_question_number - - def perform_attention(self, context_vector, hidden_vectors, length, mask): - #Performs attention on hiddent_vectors using context vector - context_vector = tf.tile( - tf.expand_dims(context_vector, 0), [length, 1, 1]) #time * bs * d - attention_softmax = tf.nn.softmax( - tf.transpose(tf.reduce_sum(context_vector * hidden_vectors, 2)) + - mask) #batch_size * time - attention_softmax = tf.tile( - tf.expand_dims(tf.transpose(attention_softmax), 2), - [1, 1, self.embedding_dims]) - ans_vector = tf.reduce_sum(attention_softmax * hidden_vectors, 0) - return ans_vector - - #computes embeddings for column names using parameters of question module - def get_column_hidden_vectors(self): - #vector representations for the column names - self.column_hidden_vectors = tf.reduce_sum( - nn_utils.get_embedding(self.batch_number_column_names, self.utility, - self.params), 2) - self.word_column_hidden_vectors = tf.reduce_sum( - nn_utils.get_embedding(self.batch_word_column_names, self.utility, - self.params), 2) - - def create_summary_embeddings(self): - #embeddings for each text entry in the table using parameters of the question module - self.summary_text_entry_embeddings = tf.reduce_sum( - tf.expand_dims(self.batch_exact_match, 3) * tf.expand_dims( - tf.expand_dims( - tf.expand_dims( - nn_utils.get_embedding(self.utility.entry_match_token_id, - self.utility, self.params), 0), 1), - 2), 2) - - def compute_column_softmax(self, column_controller_vector, time_step): - #compute softmax over all the columns using column controller vector - column_controller_vector = tf.tile( - tf.expand_dims(column_controller_vector, 1), - [1, self.num_cols + self.num_word_cols, 1]) #max_cols * bs * d - column_controller_vector = nn_utils.apply_dropout( - column_controller_vector, self.utility.FLAGS.dropout, self.mode) - self.full_column_hidden_vectors = tf.concat( - axis=1, values=[self.column_hidden_vectors, self.word_column_hidden_vectors]) - self.full_column_hidden_vectors += self.summary_text_entry_embeddings - self.full_column_hidden_vectors = nn_utils.apply_dropout( - self.full_column_hidden_vectors, self.utility.FLAGS.dropout, self.mode) - column_logits = tf.reduce_sum( - column_controller_vector * self.full_column_hidden_vectors, 2) + ( - self.params["word_match_feature_column_name"] * - self.batch_column_exact_match) + self.full_column_mask - column_softmax = tf.nn.softmax(column_logits) #batch_size * max_cols - return column_softmax - - def compute_first_or_last(self, select, first=True): - #perform first ot last operation on row select with probabilistic row selection - answer = tf.zeros_like(select) - running_sum = tf.zeros([self.batch_size, 1], self.data_type) - for i in range(self.max_elements): - if (first): - current = tf.slice(select, [0, i], [self.batch_size, 1]) - else: - current = tf.slice(select, [0, self.max_elements - 1 - i], - [self.batch_size, 1]) - curr_prob = current * (1 - running_sum) - curr_prob = curr_prob * tf.cast(curr_prob >= 0.0, self.data_type) - running_sum += curr_prob - temp_ans = [] - curr_prob = tf.expand_dims(tf.reshape(curr_prob, [self.batch_size]), 0) - for i_ans in range(self.max_elements): - if (not (first) and i_ans == self.max_elements - 1 - i): - temp_ans.append(curr_prob) - elif (first and i_ans == i): - temp_ans.append(curr_prob) - else: - temp_ans.append(tf.zeros_like(curr_prob)) - temp_ans = tf.transpose(tf.concat(axis=0, values=temp_ans)) - answer += temp_ans - return answer - - def make_hard_softmax(self, softmax): - #converts soft selection to hard selection. used at test time - cond = tf.equal( - softmax, tf.reshape(tf.reduce_max(softmax, 1), [self.batch_size, 1])) - softmax = tf.where( - cond, tf.fill(tf.shape(softmax), 1.0), tf.fill(tf.shape(softmax), 0.0)) - softmax = tf.cast(softmax, self.data_type) - return softmax - - def compute_max_or_min(self, select, maxi=True): - #computes the argmax and argmin of a column with probabilistic row selection - answer = tf.zeros([ - self.batch_size, self.num_cols + self.num_word_cols, self.max_elements - ], self.data_type) - sum_prob = tf.zeros([self.batch_size, self.num_cols + self.num_word_cols], - self.data_type) - for j in range(self.max_elements): - if (maxi): - curr_pos = j - else: - curr_pos = self.max_elements - 1 - j - select_index = tf.slice(self.full_processed_sorted_index_column, - [0, 0, curr_pos], [self.batch_size, -1, 1]) - select_mask = tf.equal( - tf.tile( - tf.expand_dims( - tf.tile( - tf.expand_dims(tf.range(self.max_elements), 0), - [self.batch_size, 1]), 1), - [1, self.num_cols + self.num_word_cols, 1]), select_index) - curr_prob = tf.expand_dims(select, 1) * tf.cast( - select_mask, self.data_type) * self.select_bad_number_mask - curr_prob = curr_prob * tf.expand_dims((1 - sum_prob), 2) - curr_prob = curr_prob * tf.expand_dims( - tf.cast((1 - sum_prob) > 0.0, self.data_type), 2) - answer = tf.where(select_mask, curr_prob, answer) - sum_prob += tf.reduce_sum(curr_prob, 2) - return answer - - def perform_operations(self, softmax, full_column_softmax, select, - prev_select_1, curr_pass): - #performs all the 15 operations. computes scalar output, lookup answer and row selector - column_softmax = tf.slice(full_column_softmax, [0, 0], - [self.batch_size, self.num_cols]) - word_column_softmax = tf.slice(full_column_softmax, [0, self.num_cols], - [self.batch_size, self.num_word_cols]) - init_max = self.compute_max_or_min(select, maxi=True) - init_min = self.compute_max_or_min(select, maxi=False) - #operations that are column independent - count = tf.reshape(tf.reduce_sum(select, 1), [self.batch_size, 1]) - select_full_column_softmax = tf.tile( - tf.expand_dims(full_column_softmax, 2), - [1, 1, self.max_elements - ]) #BS * (max_cols + max_word_cols) * max_elements - select_word_column_softmax = tf.tile( - tf.expand_dims(word_column_softmax, 2), - [1, 1, self.max_elements]) #BS * max_word_cols * max_elements - select_greater = tf.reduce_sum( - self.init_select_greater * select_full_column_softmax, - 1) * self.batch_question_number_mask #BS * max_elements - select_lesser = tf.reduce_sum( - self.init_select_lesser * select_full_column_softmax, - 1) * self.batch_question_number_mask #BS * max_elements - select_geq = tf.reduce_sum( - self.init_select_geq * select_full_column_softmax, - 1) * self.batch_question_number_mask #BS * max_elements - select_leq = tf.reduce_sum( - self.init_select_leq * select_full_column_softmax, - 1) * self.batch_question_number_mask #BS * max_elements - select_max = tf.reduce_sum(init_max * select_full_column_softmax, - 1) #BS * max_elements - select_min = tf.reduce_sum(init_min * select_full_column_softmax, - 1) #BS * max_elements - select_prev = tf.concat(axis=1, values=[ - tf.slice(select, [0, 1], [self.batch_size, self.max_elements - 1]), - tf.cast(tf.zeros([self.batch_size, 1]), self.data_type) - ]) - select_next = tf.concat(axis=1, values=[ - tf.cast(tf.zeros([self.batch_size, 1]), self.data_type), tf.slice( - select, [0, 0], [self.batch_size, self.max_elements - 1]) - ]) - select_last_rs = self.compute_first_or_last(select, False) - select_first_rs = self.compute_first_or_last(select, True) - select_word_match = tf.reduce_sum(self.batch_exact_match * - select_full_column_softmax, 1) - select_group_by_max = tf.reduce_sum(self.batch_group_by_max * - select_full_column_softmax, 1) - length_content = 1 - length_select = 13 - length_print = 1 - values = tf.concat(axis=1, values=[count]) - softmax_content = tf.slice(softmax, [0, 0], - [self.batch_size, length_content]) - #compute scalar output - output = tf.reduce_sum(tf.multiply(softmax_content, values), 1) - #compute lookup answer - softmax_print = tf.slice(softmax, [0, length_content + length_select], - [self.batch_size, length_print]) - curr_print = select_full_column_softmax * tf.tile( - tf.expand_dims(select, 1), - [1, self.num_cols + self.num_word_cols, 1 - ]) #BS * max_cols * max_elements (conisders only column) - self.batch_lookup_answer = curr_print * tf.tile( - tf.expand_dims(softmax_print, 2), - [1, self.num_cols + self.num_word_cols, self.max_elements - ]) #BS * max_cols * max_elements - self.batch_lookup_answer = self.batch_lookup_answer * self.select_full_mask - #compute row select - softmax_select = tf.slice(softmax, [0, length_content], - [self.batch_size, length_select]) - select_lists = [ - tf.expand_dims(select_prev, 1), tf.expand_dims(select_next, 1), - tf.expand_dims(select_first_rs, 1), tf.expand_dims(select_last_rs, 1), - tf.expand_dims(select_group_by_max, 1), - tf.expand_dims(select_greater, 1), tf.expand_dims(select_lesser, 1), - tf.expand_dims(select_geq, 1), tf.expand_dims(select_leq, 1), - tf.expand_dims(select_max, 1), tf.expand_dims(select_min, 1), - tf.expand_dims(select_word_match, 1), - tf.expand_dims(self.reset_select, 1) - ] - select = tf.reduce_sum( - tf.tile(tf.expand_dims(softmax_select, 2), [1, 1, self.max_elements]) * - tf.concat(axis=1, values=select_lists), 1) - select = select * self.select_whole_mask - return output, select - - def one_pass(self, select, question_embedding, hidden_vectors, hprev, - prev_select_1, curr_pass): - #Performs one timestep which involves selecting an operation and a column - attention_vector = self.perform_attention( - hprev, hidden_vectors, self.question_length, - self.batch_question_attention_mask) #batch_size * embedding_dims - controller_vector = tf.nn.relu( - tf.matmul(hprev, self.params["controller_prev"]) + tf.matmul( - tf.concat(axis=1, values=[question_embedding, attention_vector]), self.params[ - "controller"])) - column_controller_vector = tf.nn.relu( - tf.matmul(hprev, self.params["column_controller_prev"]) + tf.matmul( - tf.concat(axis=1, values=[question_embedding, attention_vector]), self.params[ - "column_controller"])) - controller_vector = nn_utils.apply_dropout( - controller_vector, self.utility.FLAGS.dropout, self.mode) - self.operation_logits = tf.matmul(controller_vector, - tf.transpose(self.params_unit)) - softmax = tf.nn.softmax(self.operation_logits) - soft_softmax = softmax - #compute column softmax: bs * max_columns - weighted_op_representation = tf.transpose( - tf.matmul(tf.transpose(self.params_unit), tf.transpose(softmax))) - column_controller_vector = tf.nn.relu( - tf.matmul( - tf.concat(axis=1, values=[ - column_controller_vector, weighted_op_representation - ]), self.params["break_conditional"])) - full_column_softmax = self.compute_column_softmax(column_controller_vector, - curr_pass) - soft_column_softmax = full_column_softmax - if (self.mode == "test"): - full_column_softmax = self.make_hard_softmax(full_column_softmax) - softmax = self.make_hard_softmax(softmax) - output, select = self.perform_operations(softmax, full_column_softmax, - select, prev_select_1, curr_pass) - return output, select, softmax, soft_softmax, full_column_softmax, soft_column_softmax - - def compute_lookup_error(self, val): - #computes lookup error. - cond = tf.equal(self.batch_print_answer, val) - inter = tf.where( - cond, self.init_print_error, - tf.tile( - tf.reshape(tf.constant(1e10, self.data_type), [1, 1, 1]), [ - self.batch_size, self.utility.FLAGS.max_word_cols + - self.utility.FLAGS.max_number_cols, - self.utility.FLAGS.max_elements - ])) - return tf.reduce_min(tf.reduce_min(inter, 1), 1) * tf.cast( - tf.greater( - tf.reduce_sum(tf.reduce_sum(tf.cast(cond, self.data_type), 1), 1), - 0.0), self.data_type) - - def soft_min(self, x, y): - return tf.maximum(-1.0 * (1 / ( - self.utility.FLAGS.soft_min_value + 0.0)) * tf.log( - tf.exp(-self.utility.FLAGS.soft_min_value * x) + tf.exp( - -self.utility.FLAGS.soft_min_value * y)), tf.zeros_like(x)) - - def error_computation(self): - #computes the error of each example in a batch - math_error = 0.5 * tf.square(tf.subtract(self.scalar_output, self.batch_answer)) - #scale math error - math_error = math_error / self.rows - math_error = tf.minimum(math_error, self.utility.FLAGS.max_math_error * - tf.ones(tf.shape(math_error), self.data_type)) - self.init_print_error = tf.where( - self.batch_gold_select, -1 * tf.log(self.batch_lookup_answer + 1e-300 + - self.invert_select_full_mask), -1 * - tf.log(1 - self.batch_lookup_answer)) * self.select_full_mask - print_error_1 = self.init_print_error * tf.cast( - tf.equal(self.batch_print_answer, 0.0), self.data_type) - print_error = tf.reduce_sum(tf.reduce_sum((print_error_1), 1), 1) - for val in range(1, 58): - print_error += self.compute_lookup_error(val + 0.0) - print_error = print_error * self.utility.FLAGS.print_cost / self.num_entries - if (self.mode == "train"): - error = tf.where( - tf.logical_and( - tf.not_equal(self.batch_answer, 0.0), - tf.not_equal( - tf.reduce_sum(tf.reduce_sum(self.batch_print_answer, 1), 1), - 0.0)), - self.soft_min(math_error, print_error), - tf.where( - tf.not_equal(self.batch_answer, 0.0), math_error, print_error)) - else: - error = tf.where( - tf.logical_and( - tf.equal(self.scalar_output, 0.0), - tf.equal( - tf.reduce_sum(tf.reduce_sum(self.batch_lookup_answer, 1), 1), - 0.0)), - tf.ones_like(math_error), - tf.where( - tf.equal(self.scalar_output, 0.0), print_error, math_error)) - return error - - def batch_process(self): - #Computes loss and fraction of correct examples in a batch. - self.params_unit = nn_utils.apply_dropout( - self.params["unit"], self.utility.FLAGS.dropout, self.mode) - batch_size = self.batch_size - max_passes = self.max_passes - num_timesteps = 1 - max_elements = self.max_elements - select = tf.cast( - tf.fill([self.batch_size, max_elements], 1.0), self.data_type) - hprev = tf.cast( - tf.fill([self.batch_size, self.embedding_dims], 0.0), - self.data_type) #running sum of the hidden states of the model - output = tf.cast(tf.fill([self.batch_size, 1], 0.0), - self.data_type) #output of the model - correct = tf.cast( - tf.fill([1], 0.0), self.data_type - ) #to compute accuracy, returns number of correct examples for this batch - total_error = 0.0 - prev_select_1 = tf.zeros_like(select) - self.create_summary_embeddings() - self.get_column_hidden_vectors() - #get question embedding - question_embedding, hidden_vectors = self.LSTM_question_embedding( - self.batch_question, self.question_length) - #compute arguments for comparison operation - greater_question_number, lesser_question_number, geq_question_number, leq_question_number = self.question_number_softmax( - hidden_vectors) - self.init_select_greater = tf.cast( - tf.greater(self.full_processed_column, - tf.expand_dims(greater_question_number, 2)), self. - data_type) * self.select_bad_number_mask #bs * max_cols * max_elements - self.init_select_lesser = tf.cast( - tf.less(self.full_processed_column, - tf.expand_dims(lesser_question_number, 2)), self. - data_type) * self.select_bad_number_mask #bs * max_cols * max_elements - self.init_select_geq = tf.cast( - tf.greater_equal(self.full_processed_column, - tf.expand_dims(geq_question_number, 2)), self. - data_type) * self.select_bad_number_mask #bs * max_cols * max_elements - self.init_select_leq = tf.cast( - tf.less_equal(self.full_processed_column, - tf.expand_dims(leq_question_number, 2)), self. - data_type) * self.select_bad_number_mask #bs * max_cols * max_elements - self.init_select_word_match = 0 - if (self.utility.FLAGS.rnn_dropout > 0.0): - if (self.mode == "train"): - history_rnn_dropout_mask = tf.cast( - tf.random_uniform( - tf.shape(hprev), minval=0.0, maxval=1.0) < - self.utility.FLAGS.rnn_dropout, - self.data_type) / self.utility.FLAGS.rnn_dropout - else: - history_rnn_dropout_mask = tf.ones_like(hprev) - select = select * self.select_whole_mask - self.batch_log_prob = tf.zeros([self.batch_size], dtype=self.data_type) - #Perform max_passes and at each pass select operation and column - for curr_pass in range(max_passes): - print("step: ", curr_pass) - output, select, softmax, soft_softmax, column_softmax, soft_column_softmax = self.one_pass( - select, question_embedding, hidden_vectors, hprev, prev_select_1, - curr_pass) - prev_select_1 = select - #compute input to history RNN - input_op = tf.transpose( - tf.matmul( - tf.transpose(self.params_unit), tf.transpose( - soft_softmax))) #weighted average of emebdding of operations - input_col = tf.reduce_sum( - tf.expand_dims(soft_column_softmax, 2) * - self.full_column_hidden_vectors, 1) - history_input = tf.concat(axis=1, values=[input_op, input_col]) - history_input = nn_utils.apply_dropout( - history_input, self.utility.FLAGS.dropout, self.mode) - hprev = self.history_recurrent_step(history_input, hprev) - if (self.utility.FLAGS.rnn_dropout > 0.0): - hprev = hprev * history_rnn_dropout_mask - self.scalar_output = output - error = self.error_computation() - cond = tf.less(error, 0.0001, name="cond") - correct_add = tf.where( - cond, tf.fill(tf.shape(cond), 1.0), tf.fill(tf.shape(cond), 0.0)) - correct = tf.reduce_sum(correct_add) - error = error / batch_size - total_error = tf.reduce_sum(error) - total_correct = correct / batch_size - return total_error, total_correct - - def compute_error(self): - #Sets mask variables and performs batch processing - self.batch_gold_select = self.batch_print_answer > 0.0 - self.full_column_mask = tf.concat( - axis=1, values=[self.batch_number_column_mask, self.batch_word_column_mask]) - self.full_processed_column = tf.concat( - axis=1, - values=[self.batch_processed_number_column, self.batch_processed_word_column]) - self.full_processed_sorted_index_column = tf.concat(axis=1, values=[ - self.batch_processed_sorted_index_number_column, - self.batch_processed_sorted_index_word_column - ]) - self.select_bad_number_mask = tf.cast( - tf.logical_and( - tf.not_equal(self.full_processed_column, - self.utility.FLAGS.pad_int), - tf.not_equal(self.full_processed_column, - self.utility.FLAGS.bad_number_pre_process)), - self.data_type) - self.select_mask = tf.cast( - tf.logical_not( - tf.equal(self.batch_number_column, self.utility.FLAGS.pad_int)), - self.data_type) - self.select_word_mask = tf.cast( - tf.logical_not( - tf.equal(self.batch_word_column_entry_mask, - self.utility.dummy_token_id)), self.data_type) - self.select_full_mask = tf.concat( - axis=1, values=[self.select_mask, self.select_word_mask]) - self.select_whole_mask = tf.maximum( - tf.reshape( - tf.slice(self.select_mask, [0, 0, 0], - [self.batch_size, 1, self.max_elements]), - [self.batch_size, self.max_elements]), - tf.reshape( - tf.slice(self.select_word_mask, [0, 0, 0], - [self.batch_size, 1, self.max_elements]), - [self.batch_size, self.max_elements])) - self.invert_select_full_mask = tf.cast( - tf.concat(axis=1, values=[ - tf.equal(self.batch_number_column, self.utility.FLAGS.pad_int), - tf.equal(self.batch_word_column_entry_mask, - self.utility.dummy_token_id) - ]), self.data_type) - self.batch_lookup_answer = tf.zeros(tf.shape(self.batch_gold_select)) - self.reset_select = self.select_whole_mask - self.rows = tf.reduce_sum(self.select_whole_mask, 1) - self.num_entries = tf.reshape( - tf.reduce_sum(tf.reduce_sum(self.select_full_mask, 1), 1), - [self.batch_size]) - self.final_error, self.final_correct = self.batch_process() - return self.final_error - - def create_graph(self, params, global_step): - #Creates the graph to compute error, gradient computation and updates parameters - self.params = params - batch_size = self.batch_size - learning_rate = tf.cast(self.utility.FLAGS.learning_rate, self.data_type) - self.total_cost = self.compute_error() - optimize_params = self.params.values() - optimize_names = self.params.keys() - print("optimize params ", optimize_names) - if (self.utility.FLAGS.l2_regularizer > 0.0): - reg_cost = 0.0 - for ind_param in self.params.keys(): - reg_cost += tf.nn.l2_loss(self.params[ind_param]) - self.total_cost += self.utility.FLAGS.l2_regularizer * reg_cost - grads = tf.gradients(self.total_cost, optimize_params, name="gradients") - grad_norm = 0.0 - for p, name in zip(grads, optimize_names): - print("grads: ", p, name) - if isinstance(p, tf.IndexedSlices): - grad_norm += tf.reduce_sum(p.values * p.values) - elif not (p == None): - grad_norm += tf.reduce_sum(p * p) - grad_norm = tf.sqrt(grad_norm) - max_grad_norm = np.float32(self.utility.FLAGS.clip_gradients).astype( - self.utility.np_data_type[self.utility.FLAGS.data_type]) - grad_scale = tf.minimum( - tf.cast(1.0, self.data_type), max_grad_norm / grad_norm) - clipped_grads = list() - for p in grads: - if isinstance(p, tf.IndexedSlices): - tmp = p.values * grad_scale - clipped_grads.append(tf.IndexedSlices(tmp, p.indices)) - elif not (p == None): - clipped_grads.append(p * grad_scale) - else: - clipped_grads.append(p) - grads = clipped_grads - self.global_step = global_step - params_list = self.params.values() - params_list.append(self.global_step) - adam = tf.train.AdamOptimizer( - learning_rate, - epsilon=tf.cast(self.utility.FLAGS.eps, self.data_type), - use_locking=True) - self.step = adam.apply_gradients(zip(grads, optimize_params), - global_step=self.global_step) - self.init_op = tf.global_variables_initializer() diff --git a/research/neural_programmer/neural_programmer.py b/research/neural_programmer/neural_programmer.py deleted file mode 100644 index 145ca13d6..000000000 --- a/research/neural_programmer/neural_programmer.py +++ /dev/null @@ -1,239 +0,0 @@ -# Copyright 2016 Google Inc. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""Implementation of the Neural Programmer model described in https://openreview.net/pdf?id=ry2YOrcge - -This file calls functions to load & pre-process data, construct the TF graph -and performs training or evaluation as specified by the flag evaluator_job -Author: aneelakantan (Arvind Neelakantan) -""" -from __future__ import print_function - -import time -from random import Random -import numpy as np -import tensorflow as tf -import model -import wiki_data -import parameters -import data_utils - -tf.flags.DEFINE_integer("train_steps", 100001, "Number of steps to train") -tf.flags.DEFINE_integer("eval_cycle", 500, - "Evaluate model at every eval_cycle steps") -tf.flags.DEFINE_integer("max_elements", 100, - "maximum rows that are considered for processing") -tf.flags.DEFINE_integer( - "max_number_cols", 15, - "maximum number columns that are considered for processing") -tf.flags.DEFINE_integer( - "max_word_cols", 25, - "maximum number columns that are considered for processing") -tf.flags.DEFINE_integer("question_length", 62, "maximum question length") -tf.flags.DEFINE_integer("max_entry_length", 1, "") -tf.flags.DEFINE_integer("max_passes", 4, "number of operation passes") -tf.flags.DEFINE_integer("embedding_dims", 256, "") -tf.flags.DEFINE_integer("batch_size", 20, "") -tf.flags.DEFINE_float("clip_gradients", 1.0, "") -tf.flags.DEFINE_float("eps", 1e-6, "") -tf.flags.DEFINE_float("param_init", 0.1, "") -tf.flags.DEFINE_float("learning_rate", 0.001, "") -tf.flags.DEFINE_float("l2_regularizer", 0.0001, "") -tf.flags.DEFINE_float("print_cost", 50.0, - "weighting factor in the objective function") -tf.flags.DEFINE_string("job_id", "temp", """job id""") -tf.flags.DEFINE_string("output_dir", "../model/", - """output_dir""") -tf.flags.DEFINE_string("data_dir", "../data/", - """data_dir""") -tf.flags.DEFINE_integer("write_every", 500, "wrtie every N") -tf.flags.DEFINE_integer("param_seed", 150, "") -tf.flags.DEFINE_integer("python_seed", 200, "") -tf.flags.DEFINE_float("dropout", 0.8, "dropout keep probability") -tf.flags.DEFINE_float("rnn_dropout", 0.9, - "dropout keep probability for rnn connections") -tf.flags.DEFINE_float("pad_int", -20000.0, - "number columns are padded with pad_int") -tf.flags.DEFINE_string("data_type", "double", "float or double") -tf.flags.DEFINE_float("word_dropout_prob", 0.9, "word dropout keep prob") -tf.flags.DEFINE_integer("word_cutoff", 10, "") -tf.flags.DEFINE_integer("vocab_size", 10800, "") -tf.flags.DEFINE_boolean("evaluator_job", False, - "wehther to run as trainer/evaluator") -tf.flags.DEFINE_float( - "bad_number_pre_process", -200000.0, - "number that is added to a corrupted table entry in a number column") -tf.flags.DEFINE_float("max_math_error", 3.0, - "max square loss error that is considered") -tf.flags.DEFINE_float("soft_min_value", 5.0, "") -FLAGS = tf.flags.FLAGS - - -class Utility: - #holds FLAGS and other variables that are used in different files - def __init__(self): - global FLAGS - self.FLAGS = FLAGS - self.unk_token = "UNK" - self.entry_match_token = "entry_match" - self.column_match_token = "column_match" - self.dummy_token = "dummy_token" - self.tf_data_type = {} - self.tf_data_type["double"] = tf.float64 - self.tf_data_type["float"] = tf.float32 - self.np_data_type = {} - self.np_data_type["double"] = np.float64 - self.np_data_type["float"] = np.float32 - self.operations_set = ["count"] + [ - "prev", "next", "first_rs", "last_rs", "group_by_max", "greater", - "lesser", "geq", "leq", "max", "min", "word-match" - ] + ["reset_select"] + ["print"] - self.word_ids = {} - self.reverse_word_ids = {} - self.word_count = {} - self.random = Random(FLAGS.python_seed) - - -def evaluate(sess, data, batch_size, graph, i): - #computes accuracy - num_examples = 0.0 - gc = 0.0 - for j in range(0, len(data) - batch_size + 1, batch_size): - [ct] = sess.run([graph.final_correct], - feed_dict=data_utils.generate_feed_dict(data, j, batch_size, - graph)) - gc += ct * batch_size - num_examples += batch_size - print("dev set accuracy after ", i, " : ", gc / num_examples) - print(num_examples, len(data)) - print("--------") - - -def Train(graph, utility, batch_size, train_data, sess, model_dir, - saver): - #performs training - curr = 0 - train_set_loss = 0.0 - utility.random.shuffle(train_data) - start = time.time() - for i in range(utility.FLAGS.train_steps): - curr_step = i - if (i > 0 and i % FLAGS.write_every == 0): - model_file = model_dir + "/model_" + str(i) - saver.save(sess, model_file) - if curr + batch_size >= len(train_data): - curr = 0 - utility.random.shuffle(train_data) - step, cost_value = sess.run( - [graph.step, graph.total_cost], - feed_dict=data_utils.generate_feed_dict( - train_data, curr, batch_size, graph, train=True, utility=utility)) - curr = curr + batch_size - train_set_loss += cost_value - if (i > 0 and i % FLAGS.eval_cycle == 0): - end = time.time() - time_taken = end - start - print("step ", i, " ", time_taken, " seconds ") - start = end - print(" printing train set loss: ", train_set_loss / utility.FLAGS.eval_cycle) - train_set_loss = 0.0 - - -def master(train_data, dev_data, utility): - #creates TF graph and calls trainer or evaluator - batch_size = utility.FLAGS.batch_size - model_dir = utility.FLAGS.output_dir + "/model" + utility.FLAGS.job_id + "/" - #create all paramters of the model - param_class = parameters.Parameters(utility) - params, global_step, init = param_class.parameters(utility) - key = "test" if (FLAGS.evaluator_job) else "train" - graph = model.Graph(utility, batch_size, utility.FLAGS.max_passes, mode=key) - graph.create_graph(params, global_step) - prev_dev_error = 0.0 - final_loss = 0.0 - final_accuracy = 0.0 - #start session - with tf.Session() as sess: - sess.run(init.name) - sess.run(graph.init_op.name) - to_save = params.copy() - saver = tf.train.Saver(to_save, max_to_keep=500) - if (FLAGS.evaluator_job): - while True: - selected_models = {} - file_list = tf.gfile.ListDirectory(model_dir) - for model_file in file_list: - if ("checkpoint" in model_file or "index" in model_file or - "meta" in model_file): - continue - if ("data" in model_file): - model_file = model_file.split(".")[0] - model_step = int( - model_file.split("_")[len(model_file.split("_")) - 1]) - selected_models[model_step] = model_file - file_list = sorted(selected_models.items(), key=lambda x: x[0]) - if (len(file_list) > 0): - file_list = file_list[0:len(file_list) - 1] - print("list of models: ", file_list) - for model_file in file_list: - model_file = model_file[1] - print("restoring: ", model_file) - saver.restore(sess, model_dir + "/" + model_file) - model_step = int( - model_file.split("_")[len(model_file.split("_")) - 1]) - print("evaluating on dev ", model_file, model_step) - evaluate(sess, dev_data, batch_size, graph, model_step) - else: - ckpt = tf.train.get_checkpoint_state(model_dir) - print("model dir: ", model_dir) - if (not (tf.gfile.IsDirectory(utility.FLAGS.output_dir))): - print("create dir: ", utility.FLAGS.output_dir) - tf.gfile.MkDir(utility.FLAGS.output_dir) - if (not (tf.gfile.IsDirectory(model_dir))): - print("create dir: ", model_dir) - tf.gfile.MkDir(model_dir) - Train(graph, utility, batch_size, train_data, sess, model_dir, - saver) - -def main(args): - utility = Utility() - train_name = "random-split-1-train.examples" - dev_name = "random-split-1-dev.examples" - test_name = "pristine-unseen-tables.examples" - #load data - dat = wiki_data.WikiQuestionGenerator(train_name, dev_name, test_name, FLAGS.data_dir) - train_data, dev_data, test_data = dat.load() - utility.words = [] - utility.word_ids = {} - utility.reverse_word_ids = {} - #construct vocabulary - data_utils.construct_vocab(train_data, utility) - data_utils.construct_vocab(dev_data, utility, True) - data_utils.construct_vocab(test_data, utility, True) - data_utils.add_special_words(utility) - data_utils.perform_word_cutoff(utility) - #convert data to int format and pad the inputs - train_data = data_utils.complete_wiki_processing(train_data, utility, True) - dev_data = data_utils.complete_wiki_processing(dev_data, utility, False) - test_data = data_utils.complete_wiki_processing(test_data, utility, False) - print("# train examples ", len(train_data)) - print("# dev examples ", len(dev_data)) - print("# test examples ", len(test_data)) - print("running open source") - #construct TF graph and train or evaluate - master(train_data, dev_data, utility) - - -if __name__ == "__main__": - tf.app.run() diff --git a/research/neural_programmer/nn_utils.py b/research/neural_programmer/nn_utils.py deleted file mode 100644 index 2f3a1a98b..000000000 --- a/research/neural_programmer/nn_utils.py +++ /dev/null @@ -1,68 +0,0 @@ -# Copyright 2016 Google Inc. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""Author: aneelakantan (Arvind Neelakantan) -""" - -import tensorflow as tf - -def get_embedding(word, utility, params): - return tf.nn.embedding_lookup(params["word"], word) - - -def apply_dropout(x, dropout_rate, mode): - if (dropout_rate > 0.0): - if (mode == "train"): - x = tf.nn.dropout(x, dropout_rate) - else: - x = x - return x - - -def LSTMCell(x, mprev, cprev, key, params): - """Create an LSTM cell. - - Implements the equations in pg.2 from - "Long Short-Term Memory Based Recurrent Neural Network Architectures - For Large Vocabulary Speech Recognition", - Hasim Sak, Andrew Senior, Francoise Beaufays. - - Args: - w: A dictionary of the weights and optional biases as returned - by LSTMParametersSplit(). - x: Inputs to this cell. - mprev: m_{t-1}, the recurrent activations (same as the output) - from the previous cell. - cprev: c_{t-1}, the cell activations from the previous cell. - keep_prob: Keep probability on the input and the outputs of a cell. - - Returns: - m: Outputs of this cell. - c: Cell Activations. - """ - - i = tf.matmul(x, params[key + "_ix"]) + tf.matmul(mprev, params[key + "_im"]) - i = tf.nn.bias_add(i, params[key + "_i"]) - f = tf.matmul(x, params[key + "_fx"]) + tf.matmul(mprev, params[key + "_fm"]) - f = tf.nn.bias_add(f, params[key + "_f"]) - c = tf.matmul(x, params[key + "_cx"]) + tf.matmul(mprev, params[key + "_cm"]) - c = tf.nn.bias_add(c, params[key + "_c"]) - o = tf.matmul(x, params[key + "_ox"]) + tf.matmul(mprev, params[key + "_om"]) - o = tf.nn.bias_add(o, params[key + "_o"]) - i = tf.sigmoid(i, name="i_gate") - f = tf.sigmoid(f, name="f_gate") - o = tf.sigmoid(o, name="o_gate") - c = f * cprev + i * tf.tanh(c) - m = o * c - return m, c diff --git a/research/neural_programmer/parameters.py b/research/neural_programmer/parameters.py deleted file mode 100644 index c576ae822..000000000 --- a/research/neural_programmer/parameters.py +++ /dev/null @@ -1,89 +0,0 @@ -# Copyright 2016 Google Inc. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""Author: aneelakantan (Arvind Neelakantan) -""" - -import numpy as np -import tensorflow as tf - - -class Parameters: - - def __init__(self, u): - self.utility = u - self.init_seed_counter = 0 - self.word_init = {} - - def parameters(self, utility): - params = {} - inits = [] - embedding_dims = self.utility.FLAGS.embedding_dims - params["unit"] = tf.Variable( - self.RandomUniformInit([len(utility.operations_set), embedding_dims])) - params["word"] = tf.Variable( - self.RandomUniformInit([utility.FLAGS.vocab_size, embedding_dims])) - params["word_match_feature_column_name"] = tf.Variable( - self.RandomUniformInit([1])) - params["controller"] = tf.Variable( - self.RandomUniformInit([2 * embedding_dims, embedding_dims])) - params["column_controller"] = tf.Variable( - self.RandomUniformInit([2 * embedding_dims, embedding_dims])) - params["column_controller_prev"] = tf.Variable( - self.RandomUniformInit([embedding_dims, embedding_dims])) - params["controller_prev"] = tf.Variable( - self.RandomUniformInit([embedding_dims, embedding_dims])) - global_step = tf.Variable(1, name="global_step") - #weigths of question and history RNN (or LSTM) - key_list = ["question_lstm"] - for key in key_list: - # Weights going from inputs to nodes. - for wgts in ["ix", "fx", "cx", "ox"]: - params[key + "_" + wgts] = tf.Variable( - self.RandomUniformInit([embedding_dims, embedding_dims])) - # Weights going from nodes to nodes. - for wgts in ["im", "fm", "cm", "om"]: - params[key + "_" + wgts] = tf.Variable( - self.RandomUniformInit([embedding_dims, embedding_dims])) - #Biases for the gates and cell - for bias in ["i", "f", "c", "o"]: - if (bias == "f"): - print("forget gate bias") - params[key + "_" + bias] = tf.Variable( - tf.random_uniform([embedding_dims], 1.0, 1.1, self.utility. - tf_data_type[self.utility.FLAGS.data_type])) - else: - params[key + "_" + bias] = tf.Variable( - self.RandomUniformInit([embedding_dims])) - params["history_recurrent"] = tf.Variable( - self.RandomUniformInit([3 * embedding_dims, embedding_dims])) - params["history_recurrent_bias"] = tf.Variable( - self.RandomUniformInit([1, embedding_dims])) - params["break_conditional"] = tf.Variable( - self.RandomUniformInit([2 * embedding_dims, embedding_dims])) - init = tf.global_variables_initializer() - return params, global_step, init - - def RandomUniformInit(self, shape): - """Returns a RandomUniform Tensor between -param_init and param_init.""" - param_seed = self.utility.FLAGS.param_seed - self.init_seed_counter += 1 - return tf.random_uniform( - shape, -1.0 * - (np.float32(self.utility.FLAGS.param_init) - ).astype(self.utility.np_data_type[self.utility.FLAGS.data_type]), - (np.float32(self.utility.FLAGS.param_init) - ).astype(self.utility.np_data_type[self.utility.FLAGS.data_type]), - self.utility.tf_data_type[self.utility.FLAGS.data_type], - param_seed + self.init_seed_counter) diff --git a/research/neural_programmer/wiki_data.py b/research/neural_programmer/wiki_data.py deleted file mode 100644 index c91637ca1..000000000 --- a/research/neural_programmer/wiki_data.py +++ /dev/null @@ -1,532 +0,0 @@ -# Copyright 2016 Google Inc. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""Loads the WikiQuestions dataset. - -An example consists of question, table. Additionally, we store the processed -columns which store the entries after performing number, date and other -preprocessing as done in the baseline. -columns, column names and processed columns are split into word and number -columns. -lookup answer (or matrix) is also split into number and word lookup matrix -Author: aneelakantan (Arvind Neelakantan) -""" -from __future__ import print_function - -import math -import os -import re -import numpy as np -import unicodedata as ud -import tensorflow as tf - -bad_number = -200000.0 #number that is added to a corrupted table entry in a number column - -def is_nan_or_inf(number): - return math.isnan(number) or math.isinf(number) - -def strip_accents(s): - u = unicode(s, "utf-8") - u_new = ''.join(c for c in ud.normalize('NFKD', u) if ud.category(c) != 'Mn') - return u_new.encode("utf-8") - - -def correct_unicode(string): - string = strip_accents(string) - string = re.sub("\xc2\xa0", " ", string).strip() - string = re.sub("\xe2\x80\x93", "-", string).strip() - #string = re.sub(ur'[\u0300-\u036F]', "", string) - string = re.sub("‚", ",", string) - string = re.sub("…", "...", string) - #string = re.sub("[·・]", ".", string) - string = re.sub("ˆ", "^", string) - string = re.sub("Ëœ", "~", string) - string = re.sub("‹", "<", string) - string = re.sub("›", ">", string) - #string = re.sub("[‘’´`]", "'", string) - #string = re.sub("[“”«»]", "\"", string) - #string = re.sub("[•†‡]", "", string) - #string = re.sub("[‐‑–—]", "-", string) - string = re.sub(r'[\u2E00-\uFFFF]', "", string) - string = re.sub("\\s+", " ", string).strip() - return string - - -def simple_normalize(string): - string = correct_unicode(string) - # Citations - string = re.sub("\[(nb ?)?\d+\]", "", string) - string = re.sub("\*+$", "", string) - # Year in parenthesis - string = re.sub("\(\d* ?-? ?\d*\)", "", string) - string = re.sub("^\"(.*)\"$", "", string) - return string - - -def full_normalize(string): - #print "an: ", string - string = simple_normalize(string) - # Remove trailing info in brackets - string = re.sub("\[[^\]]*\]", "", string) - # Remove most unicode characters in other languages - string = re.sub(r'[\u007F-\uFFFF]', "", string.strip()) - # Remove trailing info in parenthesis - string = re.sub("\([^)]*\)$", "", string.strip()) - string = final_normalize(string) - # Get rid of question marks - string = re.sub("\?", "", string).strip() - # Get rid of trailing colons (usually occur in column titles) - string = re.sub("\:$", " ", string).strip() - # Get rid of slashes - string = re.sub(r"/", " ", string).strip() - string = re.sub(r"\\", " ", string).strip() - # Replace colon, slash, and dash with space - # Note: need better replacement for this when parsing time - string = re.sub(r"\:", " ", string).strip() - string = re.sub("/", " ", string).strip() - string = re.sub("-", " ", string).strip() - # Convert empty strings to UNK - # Important to do this last or near last - if not string: - string = "UNK" - return string - -def final_normalize(string): - # Remove leading and trailing whitespace - string = re.sub("\\s+", " ", string).strip() - # Convert entirely to lowercase - string = string.lower() - # Get rid of strangely escaped newline characters - string = re.sub("\\\\n", " ", string).strip() - # Get rid of quotation marks - string = re.sub(r"\"", "", string).strip() - string = re.sub(r"\'", "", string).strip() - string = re.sub(r"`", "", string).strip() - # Get rid of * - string = re.sub("\*", "", string).strip() - return string - -def is_number(x): - try: - f = float(x) - return not is_nan_or_inf(f) - except ValueError: - return False - except TypeError: - return False - - -class WikiExample(object): - - def __init__(self, id, question, answer, table_key): - self.question_id = id - self.question = question - self.answer = answer - self.table_key = table_key - self.lookup_matrix = [] - self.is_bad_example = False - self.is_word_lookup = False - self.is_ambiguous_word_lookup = False - self.is_number_lookup = False - self.is_number_calc = False - self.is_unknown_answer = False - - -class TableInfo(object): - - def __init__(self, word_columns, word_column_names, word_column_indices, - number_columns, number_column_names, number_column_indices, - processed_word_columns, processed_number_columns, orig_columns): - self.word_columns = word_columns - self.word_column_names = word_column_names - self.word_column_indices = word_column_indices - self.number_columns = number_columns - self.number_column_names = number_column_names - self.number_column_indices = number_column_indices - self.processed_word_columns = processed_word_columns - self.processed_number_columns = processed_number_columns - self.orig_columns = orig_columns - - -class WikiQuestionLoader(object): - - def __init__(self, data_name, root_folder): - self.root_folder = root_folder - self.data_folder = os.path.join(self.root_folder, "data") - self.examples = [] - self.data_name = data_name - - def num_questions(self): - return len(self.examples) - - def load_qa(self): - data_source = os.path.join(self.data_folder, self.data_name) - f = tf.gfile.GFile(data_source, "r") - id_regex = re.compile("\(id ([^\)]*)\)") - for line in f: - id_match = id_regex.search(line) - id = id_match.group(1) - self.examples.append(id) - - def load(self): - self.load_qa() - - -def is_date(word): - if (not (bool(re.search("[a-z0-9]", word, re.IGNORECASE)))): - return False - if (len(word) != 10): - return False - if (word[4] != "-"): - return False - if (word[7] != "-"): - return False - for i in range(len(word)): - if (not (word[i] == "X" or word[i] == "x" or word[i] == "-" or re.search( - "[0-9]", word[i]))): - return False - return True - - -class WikiQuestionGenerator(object): - - def __init__(self, train_name, dev_name, test_name, root_folder): - self.train_name = train_name - self.dev_name = dev_name - self.test_name = test_name - self.train_loader = WikiQuestionLoader(train_name, root_folder) - self.dev_loader = WikiQuestionLoader(dev_name, root_folder) - self.test_loader = WikiQuestionLoader(test_name, root_folder) - self.bad_examples = 0 - self.root_folder = root_folder - self.data_folder = os.path.join(self.root_folder, "annotated/data") - self.annotated_examples = {} - self.annotated_tables = {} - self.annotated_word_reject = {} - self.annotated_word_reject["-lrb-"] = 1 - self.annotated_word_reject["-rrb-"] = 1 - self.annotated_word_reject["UNK"] = 1 - - def is_money(self, word): - if (not (bool(re.search("[a-z0-9]", word, re.IGNORECASE)))): - return False - for i in range(len(word)): - if (not (word[i] == "E" or word[i] == "." or re.search("[0-9]", - word[i]))): - return False - return True - - def remove_consecutive(self, ner_tags, ner_values): - for i in range(len(ner_tags)): - if ((ner_tags[i] == "NUMBER" or ner_tags[i] == "MONEY" or - ner_tags[i] == "PERCENT" or ner_tags[i] == "DATE") and - i + 1 < len(ner_tags) and ner_tags[i] == ner_tags[i + 1] and - ner_values[i] == ner_values[i + 1] and ner_values[i] != ""): - word = ner_values[i] - word = word.replace(">", "").replace("<", "").replace("=", "").replace( - "%", "").replace("~", "").replace("$", "").replace("£", "").replace( - "€", "") - if (re.search("[A-Z]", word) and not (is_date(word)) and not ( - self.is_money(word))): - ner_values[i] = "A" - else: - ner_values[i] = "," - return ner_tags, ner_values - - def pre_process_sentence(self, tokens, ner_tags, ner_values): - sentence = [] - tokens = tokens.split("|") - ner_tags = ner_tags.split("|") - ner_values = ner_values.split("|") - ner_tags, ner_values = self.remove_consecutive(ner_tags, ner_values) - #print "old: ", tokens - for i in range(len(tokens)): - word = tokens[i] - if (ner_values[i] != "" and - (ner_tags[i] == "NUMBER" or ner_tags[i] == "MONEY" or - ner_tags[i] == "PERCENT" or ner_tags[i] == "DATE")): - word = ner_values[i] - word = word.replace(">", "").replace("<", "").replace("=", "").replace( - "%", "").replace("~", "").replace("$", "").replace("£", "").replace( - "€", "") - if (re.search("[A-Z]", word) and not (is_date(word)) and not ( - self.is_money(word))): - word = tokens[i] - if (is_number(ner_values[i])): - word = float(ner_values[i]) - elif (is_number(word)): - word = float(word) - if (tokens[i] == "score"): - word = "score" - if (is_number(word)): - word = float(word) - if (not (self.annotated_word_reject.has_key(word))): - if (is_number(word) or is_date(word) or self.is_money(word)): - sentence.append(word) - else: - word = full_normalize(word) - if (not (self.annotated_word_reject.has_key(word)) and - bool(re.search("[a-z0-9]", word, re.IGNORECASE))): - m = re.search(",", word) - sentence.append(word.replace(",", "")) - if (len(sentence) == 0): - sentence.append("UNK") - return sentence - - def load_annotated_data(self, in_file): - self.annotated_examples = {} - self.annotated_tables = {} - f = tf.gfile.GFile(in_file, "r") - counter = 0 - for line in f: - if (counter > 0): - line = line.strip() - (question_id, utterance, context, target_value, tokens, lemma_tokens, - pos_tags, ner_tags, ner_values, target_canon) = line.split("\t") - question = self.pre_process_sentence(tokens, ner_tags, ner_values) - target_canon = target_canon.split("|") - self.annotated_examples[question_id] = WikiExample( - question_id, question, target_canon, context) - self.annotated_tables[context] = [] - counter += 1 - print("Annotated examples loaded ", len(self.annotated_examples)) - f.close() - - def is_number_column(self, a): - for w in a: - if (len(w) != 1): - return False - if (not (is_number(w[0]))): - return False - return True - - def convert_table(self, table): - answer = [] - for i in range(len(table)): - temp = [] - for j in range(len(table[i])): - temp.append(" ".join([str(w) for w in table[i][j]])) - answer.append(temp) - return answer - - def load_annotated_tables(self): - for table in self.annotated_tables.keys(): - annotated_table = table.replace("csv", "annotated") - orig_columns = [] - processed_columns = [] - f = tf.gfile.GFile(os.path.join(self.root_folder, annotated_table), "r") - counter = 0 - for line in f: - if (counter > 0): - line = line.strip() - line = line + "\t" * (13 - len(line.split("\t"))) - (row, col, read_id, content, tokens, lemma_tokens, pos_tags, ner_tags, - ner_values, number, date, num2, read_list) = line.split("\t") - counter += 1 - f.close() - max_row = int(row) - max_col = int(col) - for i in range(max_col + 1): - orig_columns.append([]) - processed_columns.append([]) - for j in range(max_row + 1): - orig_columns[i].append(bad_number) - processed_columns[i].append(bad_number) - #print orig_columns - f = tf.gfile.GFile(os.path.join(self.root_folder, annotated_table), "r") - counter = 0 - column_names = [] - for line in f: - if (counter > 0): - line = line.strip() - line = line + "\t" * (13 - len(line.split("\t"))) - (row, col, read_id, content, tokens, lemma_tokens, pos_tags, ner_tags, - ner_values, number, date, num2, read_list) = line.split("\t") - entry = self.pre_process_sentence(tokens, ner_tags, ner_values) - if (row == "-1"): - column_names.append(entry) - else: - orig_columns[int(col)][int(row)] = entry - if (len(entry) == 1 and is_number(entry[0])): - processed_columns[int(col)][int(row)] = float(entry[0]) - else: - for single_entry in entry: - if (is_number(single_entry)): - processed_columns[int(col)][int(row)] = float(single_entry) - break - nt = ner_tags.split("|") - nv = ner_values.split("|") - for i_entry in range(len(tokens.split("|"))): - if (nt[i_entry] == "DATE" and - is_number(nv[i_entry].replace("-", "").replace("X", ""))): - processed_columns[int(col)][int(row)] = float(nv[ - i_entry].replace("-", "").replace("X", "")) - #processed_columns[int(col)][int(row)] = float(nv[i_entry]) - if (len(entry) == 1 and (is_number(entry[0]) or is_date(entry[0]) or - self.is_money(entry[0]))): - if (len(entry) == 1 and not (is_number(entry[0])) and - is_date(entry[0])): - entry[0] = entry[0].replace("X", "x") - counter += 1 - word_columns = [] - processed_word_columns = [] - word_column_names = [] - word_column_indices = [] - number_columns = [] - processed_number_columns = [] - number_column_names = [] - number_column_indices = [] - for i in range(max_col + 1): - if (self.is_number_column(orig_columns[i])): - number_column_indices.append(i) - number_column_names.append(column_names[i]) - temp = [] - for w in orig_columns[i]: - if (is_number(w[0])): - temp.append(w[0]) - number_columns.append(temp) - processed_number_columns.append(processed_columns[i]) - else: - word_column_indices.append(i) - word_column_names.append(column_names[i]) - word_columns.append(orig_columns[i]) - processed_word_columns.append(processed_columns[i]) - table_info = TableInfo( - word_columns, word_column_names, word_column_indices, number_columns, - number_column_names, number_column_indices, processed_word_columns, - processed_number_columns, orig_columns) - self.annotated_tables[table] = table_info - f.close() - - def answer_classification(self): - lookup_questions = 0 - number_lookup_questions = 0 - word_lookup_questions = 0 - ambiguous_lookup_questions = 0 - number_questions = 0 - bad_questions = 0 - ice_bad_questions = 0 - tot = 0 - got = 0 - ice = {} - with tf.gfile.GFile( - self.root_folder + "/arvind-with-norms-2.tsv", mode="r") as f: - lines = f.readlines() - for line in lines: - line = line.strip() - if (not (self.annotated_examples.has_key(line.split("\t")[0]))): - continue - if (len(line.split("\t")) == 4): - line = line + "\t" * (5 - len(line.split("\t"))) - if (not (is_number(line.split("\t")[2]))): - ice_bad_questions += 1 - (example_id, ans_index, ans_raw, process_answer, - matched_cells) = line.split("\t") - if (ice.has_key(example_id)): - ice[example_id].append(line.split("\t")) - else: - ice[example_id] = [line.split("\t")] - for q_id in self.annotated_examples.keys(): - tot += 1 - example = self.annotated_examples[q_id] - table_info = self.annotated_tables[example.table_key] - # Figure out if the answer is numerical or lookup - n_cols = len(table_info.orig_columns) - n_rows = len(table_info.orig_columns[0]) - example.lookup_matrix = np.zeros((n_rows, n_cols)) - exact_matches = {} - for (example_id, ans_index, ans_raw, process_answer, - matched_cells) in ice[q_id]: - for match_cell in matched_cells.split("|"): - if (len(match_cell.split(",")) == 2): - (row, col) = match_cell.split(",") - row = int(row) - col = int(col) - if (row >= 0): - exact_matches[ans_index] = 1 - answer_is_in_table = len(exact_matches) == len(example.answer) - if (answer_is_in_table): - for (example_id, ans_index, ans_raw, process_answer, - matched_cells) in ice[q_id]: - for match_cell in matched_cells.split("|"): - if (len(match_cell.split(",")) == 2): - (row, col) = match_cell.split(",") - row = int(row) - col = int(col) - example.lookup_matrix[row, col] = float(ans_index) + 1.0 - example.lookup_number_answer = 0.0 - if (answer_is_in_table): - lookup_questions += 1 - if len(example.answer) == 1 and is_number(example.answer[0]): - example.number_answer = float(example.answer[0]) - number_lookup_questions += 1 - example.is_number_lookup = True - else: - #print "word lookup" - example.calc_answer = example.number_answer = 0.0 - word_lookup_questions += 1 - example.is_word_lookup = True - else: - if (len(example.answer) == 1 and is_number(example.answer[0])): - example.number_answer = example.answer[0] - example.is_number_calc = True - else: - bad_questions += 1 - example.is_bad_example = True - example.is_unknown_answer = True - example.is_lookup = example.is_word_lookup or example.is_number_lookup - if not example.is_word_lookup and not example.is_bad_example: - number_questions += 1 - example.calc_answer = example.answer[0] - example.lookup_number_answer = example.calc_answer - # Split up the lookup matrix into word part and number part - number_column_indices = table_info.number_column_indices - word_column_indices = table_info.word_column_indices - example.word_columns = table_info.word_columns - example.number_columns = table_info.number_columns - example.word_column_names = table_info.word_column_names - example.processed_number_columns = table_info.processed_number_columns - example.processed_word_columns = table_info.processed_word_columns - example.number_column_names = table_info.number_column_names - example.number_lookup_matrix = example.lookup_matrix[:, - number_column_indices] - example.word_lookup_matrix = example.lookup_matrix[:, word_column_indices] - - def load(self): - train_data = [] - dev_data = [] - test_data = [] - self.load_annotated_data( - os.path.join(self.data_folder, "training.annotated")) - self.load_annotated_tables() - self.answer_classification() - self.train_loader.load() - self.dev_loader.load() - for i in range(self.train_loader.num_questions()): - example = self.train_loader.examples[i] - example = self.annotated_examples[example] - train_data.append(example) - for i in range(self.dev_loader.num_questions()): - example = self.dev_loader.examples[i] - dev_data.append(self.annotated_examples[example]) - - self.load_annotated_data( - os.path.join(self.data_folder, "pristine-unseen-tables.annotated")) - self.load_annotated_tables() - self.answer_classification() - self.test_loader.load() - for i in range(self.test_loader.num_questions()): - example = self.test_loader.examples[i] - test_data.append(self.annotated_examples[example]) - return train_data, dev_data, test_data diff --git a/research/next_frame_prediction/README.md b/research/next_frame_prediction/README.md deleted file mode 100644 index 9aa9b6fc5..000000000 --- a/research/next_frame_prediction/README.md +++ /dev/null @@ -1,89 +0,0 @@ -![No Maintenance Intended](https://img.shields.io/badge/No%20Maintenance%20Intended-%E2%9C%95-red.svg) -![TensorFlow Requirement: 1.x](https://img.shields.io/badge/TensorFlow%20Requirement-1.x-brightgreen) -![TensorFlow 2 Not Supported](https://img.shields.io/badge/TensorFlow%202%20Not%20Supported-%E2%9C%95-red.svg) - -Visual Dynamics: Probabilistic Future Frame Synthesis via Cross Convolutional Networks. - -Introduction - -https://arxiv.org/pdf/1607.02586v1.pdf - -This is an implementation based on my understanding, with small -variations. It doesn't necessarily represents the paper published -by the original authors. - -Authors: Xin Pan, Anelia Angelova - -Results: - -![Sample1](g3doc/cross_conv.png) - -![Sample2](g3doc/cross_conv2.png) - -![Loss](g3doc/cross_conv3.png) - -Prerequisite: - -1. Install TensorFlow (r0.12), Bazel. - -2. Download the Sprites dataset or generate moving object dataset. - -Sprites data is located here: - -http://www.scottreed.info/files/nips2015-analogy-data.tar.gz - -Convert .mat files into images and use sprites_gen.py to convert them -to tf.SequenceExample. - -How to run: - -```shell -$ ls -R -.: -data next_frame_prediction WORKSPACE - -./data: -tfrecords tfrecords_test - -./next_frame_prediction: -cross_conv g3doc README.md - -./next_frame_prediction/cross_conv: -BUILD eval.py objects_gen.py model.py reader.py sprites_gen.py train.py - -./next_frame_prediction/g3doc: -cross_conv2.png cross_conv3.png cross_conv.png - - -# Build everything. -$ bazel build -c opt next_frame_prediction/... - -# The following example runs the generated 2d objects. -# For Sprites dataset, image_size should be 60, norm_scale should be 255.0. -# Batch size is normally 16~64, depending on your memory size. - -# Run training. -$ bazel-bin/next_frame_prediction/cross_conv/train \ - --batch_size=1 \ - --data_filepattern=data/tfrecords \ - --image_size=64 \ - --log_root=/tmp/predict - -step: 1, loss: 24.428671 -step: 2, loss: 19.211605 -step: 3, loss: 5.543143 -step: 4, loss: 3.035339 -step: 5, loss: 1.771392 -step: 6, loss: 2.099824 -step: 7, loss: 1.747665 -step: 8, loss: 1.572436 -step: 9, loss: 1.586816 -step: 10, loss: 1.434191 - -# Run eval. -$ bazel-bin/next_frame_prediction/cross_conv/eval \ - --batch_size=1 \ - --data_filepattern=data/tfrecords_test \ - --image_size=64 \ - --log_root=/tmp/predict -``` diff --git a/research/next_frame_prediction/cross_conv/BUILD b/research/next_frame_prediction/cross_conv/BUILD deleted file mode 100644 index b435087f3..000000000 --- a/research/next_frame_prediction/cross_conv/BUILD +++ /dev/null @@ -1,48 +0,0 @@ -licenses(["notice"]) # Apache 2.0 - -package_group( - name = "internal", - packages = [ - "//next_frame_prediction/...", - ], -) - -package(default_visibility = [":internal"]) - -py_library( - name = "model", - srcs = ["model.py"], -) - -py_library( - name = "reader", - srcs = ["reader.py"], -) - -py_binary( - name = "train", - srcs = ["train.py"], - deps = [ - ":model", - ":reader", - ], -) - -py_binary( - name = "eval", - srcs = ["eval.py"], - deps = [ - ":model", - ":reader", - ], -) - -py_binary( - name = "example_gen", - srcs = ["example_gen.py"], -) - -py_binary( - name = "sprites_gen", - srcs = ["sprites_gen.py"], -) diff --git a/research/next_frame_prediction/cross_conv/eval.py b/research/next_frame_prediction/cross_conv/eval.py deleted file mode 100644 index 17ebc0e0e..000000000 --- a/research/next_frame_prediction/cross_conv/eval.py +++ /dev/null @@ -1,119 +0,0 @@ -# Copyright 2016 The TensorFlow Authors All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -"""Eval Cross Convolutional Model.""" -import io -import os -import sys -import time - -import numpy as np -from six.moves import xrange -import tensorflow as tf - -import model as cross_conv_model -import reader - -FLAGS = tf.flags.FLAGS -tf.flags.DEFINE_string('log_root', '/tmp/moving_obj', 'The root dir of output.') -tf.flags.DEFINE_string('data_filepattern', - 'est', - 'training data file pattern.') -tf.flags.DEFINE_integer('batch_size', 1, 'Batch size.') -tf.flags.DEFINE_integer('image_size', 64, 'Image height and width.') -tf.flags.DEFINE_float('norm_scale', 1.0, 'Normalize the original image') -tf.flags.DEFINE_float('scale', 10.0, - 'Scale the image after norm_scale and move the diff ' - 'to the positive realm.') -tf.flags.DEFINE_integer('sequence_length', 2, 'tf.SequenceExample length.') -tf.flags.DEFINE_integer('eval_batch_count', 100, - 'Average the result this number of examples.') -tf.flags.DEFINE_bool('l2_loss', True, 'If true, include l2_loss.') -tf.flags.DEFINE_bool('reconstr_loss', False, 'If true, include reconstr_loss.') -tf.flags.DEFINE_bool('kl_loss', True, 'If true, include KL loss.') - -slim = tf.contrib.slim - - -def _Eval(): - params = dict() - params['batch_size'] = FLAGS.batch_size - params['seq_len'] = FLAGS.sequence_length - params['image_size'] = FLAGS.image_size - params['is_training'] = False - params['norm_scale'] = FLAGS.norm_scale - params['scale'] = FLAGS.scale - params['l2_loss'] = FLAGS.l2_loss - params['reconstr_loss'] = FLAGS.reconstr_loss - params['kl_loss'] = FLAGS.kl_loss - - eval_dir = os.path.join(FLAGS.log_root, 'eval') - - images = reader.ReadInput( - FLAGS.data_filepattern, shuffle=False, params=params) - images *= params['scale'] - # Increase the value makes training much faster. - image_diff_list = reader.SequenceToImageAndDiff(images) - model = cross_conv_model.CrossConvModel(image_diff_list, params) - model.Build() - - summary_writer = tf.summary.FileWriter(eval_dir) - saver = tf.train.Saver() - sess = tf.Session('', config=tf.ConfigProto(allow_soft_placement=True)) - tf.train.start_queue_runners(sess) - - while True: - time.sleep(60) - try: - ckpt_state = tf.train.get_checkpoint_state(FLAGS.log_root) - except tf.errors.OutOfRangeError as e: - sys.stderr.write('Cannot restore checkpoint: %s\n' % e) - continue - if not (ckpt_state and ckpt_state.model_checkpoint_path): - sys.stderr.write('No model to eval yet at %s\n' % FLAGS.log_root) - continue - sys.stderr.write('Loading checkpoint %s\n' % - ckpt_state.model_checkpoint_path) - saver.restore(sess, ckpt_state.model_checkpoint_path) - # Use the empirical distribution of z from training set. - if not tf.gfile.Exists(os.path.join(FLAGS.log_root, 'z_mean.npy')): - sys.stderr.write('No z at %s\n' % FLAGS.log_root) - continue - - with tf.gfile.Open(os.path.join(FLAGS.log_root, 'z_mean.npy')) as f: - sample_z_mean = np.load(io.BytesIO(f.read())) - with tf.gfile.Open( - os.path.join(FLAGS.log_root, 'z_stddev_log.npy')) as f: - sample_z_stddev_log = np.load(io.BytesIO(f.read())) - - total_loss = 0.0 - for _ in xrange(FLAGS.eval_batch_count): - loss_val, total_steps, summaries = sess.run( - [model.loss, model.global_step, model.summary_op], - feed_dict={model.z_mean: sample_z_mean, - model.z_stddev_log: sample_z_stddev_log}) - total_loss += loss_val - - summary_writer.add_summary(summaries, total_steps) - sys.stderr.write('steps: %d, loss: %f\n' % - (total_steps, total_loss / FLAGS.eval_batch_count)) - - -def main(_): - _Eval() - - -if __name__ == '__main__': - tf.app.run() diff --git a/research/next_frame_prediction/cross_conv/example_gen.py b/research/next_frame_prediction/cross_conv/example_gen.py deleted file mode 100644 index bcda0bc40..000000000 --- a/research/next_frame_prediction/cross_conv/example_gen.py +++ /dev/null @@ -1,93 +0,0 @@ -# Copyright 2016 The TensorFlow Authors All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -"""Generate examples of two objects moving in different directions.""" -import random -import sys - -import numpy as np -from six.moves import xrange -import tensorflow as tf - - -tf.flags.DEFINE_string('out_file', '', - 'Output file for the tfrecords.') - - -def _add_object(obj_type, image, image2, xpos, ypos): - """Add a moving obj to two consecutive images.""" - obj_size = random.randint(8, 10) - channel = random.randint(0, 2) - move = random.randint(6, 10) - - obj = np.zeros([obj_size, obj_size, 3]) - if obj_type == 'rectangle': - xpos2 = xpos + move - ypos2 = ypos - for i in xrange(obj_size): - obj[i, 0:i+1, channel] = [1.0 for _ in xrange(i+1)] - elif obj_type == 'square': - xpos2 = xpos - ypos2 = ypos + move - obj[:, :, channel] = 1.0 - - for x in xrange(obj_size): - for y in xrange(obj_size): - if obj[x, y, channel] == 1.0: - image[xpos+x, ypos+y, channel] = 1.0 - image2[xpos2+x, ypos2+y, channel] = 1.0 - - -def _images_to_example(image, image2): - """Convert two consecutive images to SequenceExample.""" - example = tf.SequenceExample() - feature_list = example.feature_lists.feature_list['moving_objs'] - feature = feature_list.feature.add() - feature.float_list.value.extend(np.reshape(image, [-1]).tolist()) - feature = feature_list.feature.add() - feature.float_list.value.extend(np.reshape(image2, [-1]).tolist()) - return example - - -def generate_input(): - """Generate tfrecords.""" - writer = tf.python_io.TFRecordWriter(tf.flags.FLAGS.out_file) - writer2 = tf.python_io.TFRecordWriter(tf.flags.FLAGS.out_file + '_test') - - examples = [] - for xpos in xrange(0, 40, 3): - for ypos in xrange(0, 40, 3): - for xpos2 in xrange(0, 40, 3): - for ypos2 in xrange(0, 40, 3): - image = np.zeros([64, 64, 3]) - image2 = np.zeros([64, 64, 3]) - _add_object('rectangle', image, image2, xpos, ypos) - _add_object('square', image, image2, xpos2, ypos2) - examples.append(_images_to_example(image, image2)) - - sys.stderr.write('Finish generating examples.\n') - random.shuffle(examples) - for count, ex in enumerate(examples): - if count % 10 == 0: - writer2.write(ex.SerializeToString()) - else: - writer.write(ex.SerializeToString()) - -def main(_): - generate_input() - - -if __name__ == '__main__': - tf.app.run() diff --git a/research/next_frame_prediction/cross_conv/model.py b/research/next_frame_prediction/cross_conv/model.py deleted file mode 100644 index 7b48e446e..000000000 --- a/research/next_frame_prediction/cross_conv/model.py +++ /dev/null @@ -1,233 +0,0 @@ -# Copyright 2016 The TensorFlow Authors All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -"""Cross Convolutional Model. - -https://arxiv.org/pdf/1607.02586v1.pdf -""" -import math -import sys - -from six.moves import xrange -import tensorflow as tf - -slim = tf.contrib.slim - - -class CrossConvModel(object): - - def __init__(self, image_diff_list, params): - """Constructor. - - Args: - image_diff_list: A list of (image, diff) tuples, with shape - [batch_size, image_size, image_size, 3] and image_sizes as - [32, 64, 128, 256]. - params: Dict of parameters. - """ - self.images = [i for (i, _) in image_diff_list] - # Move the diff to the positive realm. - self.diffs = [(d + params['scale']) / 2 for (i, d) in image_diff_list] - self.params = params - - def Build(self): - with tf.device('/gpu:0'): - with slim.arg_scope([slim.conv2d], - activation_fn=tf.nn.relu, - normalizer_fn=slim.batch_norm, - normalizer_params={'is_training': - self.params['is_training']}): - self._BuildMotionKernel() - encoded_images = self._BuildImageEncoder() - cross_conved_images = self._CrossConv(encoded_images) - self._BuildImageDecoder(cross_conved_images) - self._BuildLoss() - - image = self.images[1] - diff = self.diffs[1] - - self.global_step = tf.Variable(0, name='global_step', trainable=False) - - if self.params['is_training']: - self._BuildTrainOp() - - diff = diff * 2.0 - self.params['scale'] - diff_output = self.diff_output * 2.0 - self.params['scale'] - concat_image = tf.concat( - axis=1, values=[image, image + diff_output, image + diff, diff_output]) - tf.summary.image('origin_predict_expect_predictdiff', concat_image) - self.summary_op = tf.summary.merge_all() - return self.loss - - def _BuildTrainOp(self): - lrn_rate = tf.maximum( - 0.01, # min_lr_rate. - tf.train.exponential_decay( - self.params['learning_rate'], self.global_step, 10000, 0.5)) - tf.summary.scalar('learning rate', lrn_rate) - optimizer = tf.train.GradientDescentOptimizer(lrn_rate) - self.train_op = slim.learning.create_train_op( - self.loss, optimizer, global_step=self.global_step) - - def _BuildLoss(self): - # 1. reconstr_loss seems doesn't do better than l2 loss. - # 2. Only works when using reduce_mean. reduce_sum doesn't work. - # 3. It seems kl loss doesn't play an important role. - self.loss = 0 - with tf.variable_scope('loss'): - if self.params['l2_loss']: - l2_loss = tf.reduce_mean(tf.square(self.diff_output - self.diffs[1])) - tf.summary.scalar('l2_loss', l2_loss) - self.loss += l2_loss - if self.params['reconstr_loss']: - reconstr_loss = (-tf.reduce_mean( - self.diffs[1] * (1e-10 + self.diff_output) + - (1-self.diffs[1]) * tf.log(1e-10 + 1 - self.diff_output))) - reconstr_loss = tf.check_numerics(reconstr_loss, 'reconstr_loss') - tf.summary.scalar('reconstr_loss', reconstr_loss) - self.loss += reconstr_loss - if self.params['kl_loss']: - kl_loss = (0.5 * tf.reduce_mean( - tf.square(self.z_mean) + tf.square(self.z_stddev) - - 2 * self.z_stddev_log - 1)) - tf.summary.scalar('kl_loss', kl_loss) - self.loss += kl_loss - - tf.summary.scalar('loss', self.loss) - - def _BuildMotionKernel(self): - image = self.images[-2] - diff = self.diffs[-2] - shape = image.get_shape().as_list() - assert shape[1] == shape[2] and shape[1] == 128 - batch_size = shape[0] - - net = tf.concat(axis=3, values=[image, diff]) - with tf.variable_scope('motion_encoder'): - with slim.arg_scope([slim.conv2d], padding='VALID'): - net = slim.conv2d(net, 96, [5, 5], stride=1) - net = slim.max_pool2d(net, [2, 2]) - net = slim.conv2d(net, 96, [5, 5], stride=1) - net = slim.max_pool2d(net, [2, 2]) - net = slim.conv2d(net, 128, [5, 5], stride=1) - net = slim.conv2d(net, 128, [5, 5], stride=1) - net = slim.max_pool2d(net, [2, 2]) - net = slim.conv2d(net, 256, [4, 4], stride=1) - net = slim.conv2d(net, 256, [3, 3], stride=1) - - z = tf.reshape(net, shape=[batch_size, -1]) - self.z_mean, self.z_stddev_log = tf.split( - axis=1, num_or_size_splits=2, value=z) - self.z_stddev = tf.exp(self.z_stddev_log) - - epsilon = tf.random_normal( - self.z_mean.get_shape().as_list(), 0, 1, dtype=tf.float32) - kernel = self.z_mean + tf.multiply(self.z_stddev, epsilon) - - width = int(math.sqrt(kernel.get_shape().as_list()[1] // 128)) - kernel = tf.reshape(kernel, [batch_size, width, width, 128]) - with tf.variable_scope('kernel_decoder'): - with slim.arg_scope([slim.conv2d], padding='SAME'): - kernel = slim.conv2d(kernel, 128, [5, 5], stride=1) - self.kernel = slim.conv2d(kernel, 128, [5, 5], stride=1) - - sys.stderr.write('kernel shape: %s\n' % kernel.get_shape()) - - def _BuildImageEncoder(self): - feature_maps = [] - for (i, image) in enumerate(self.images): - with tf.variable_scope('image_encoder_%d' % i): - with slim.arg_scope([slim.conv2d, slim.max_pool2d], padding='SAME'): - net = slim.conv2d(image, 64, [5, 5], stride=1) - net = slim.conv2d(net, 64, [5, 5], stride=1) - net = slim.max_pool2d(net, [5, 5]) - net = slim.conv2d(net, 64, [5, 5], stride=1) - net = slim.conv2d(net, 32, [5, 5], stride=1) - net = slim.max_pool2d(net, [2, 2]) - sys.stderr.write('image_conv shape: %s\n' % net.get_shape()) - feature_maps.append(net) - return feature_maps - - def _CrossConvHelper(self, encoded_image, kernel): - """Cross Convolution. - - The encoded image and kernel are of the same shape. Namely - [batch_size, image_size, image_size, channels]. They are split - into [image_size, image_size] image squares [kernel_size, kernel_size] - kernel squares. kernel squares are used to convolute image squares. - """ - images = tf.expand_dims(encoded_image, 0) - kernels = tf.expand_dims(kernel, 3) - return tf.nn.depthwise_conv2d(images, kernels, [1, 1, 1, 1], 'SAME') - - def _CrossConv(self, encoded_images): - """Apply the motion kernel on the encoded_images.""" - cross_conved_images = [] - kernels = tf.split(axis=3, num_or_size_splits=4, value=self.kernel) - for (i, encoded_image) in enumerate(encoded_images): - with tf.variable_scope('cross_conv_%d' % i): - kernel = kernels[i] - - encoded_image = tf.unstack(encoded_image, axis=0) - kernel = tf.unstack(kernel, axis=0) - assert len(encoded_image) == len(kernel) - assert len(encoded_image) == self.params['batch_size'] - conved_image = [] - for j in xrange(len(encoded_image)): - conved_image.append(self._CrossConvHelper( - encoded_image[j], kernel[j])) - cross_conved_images.append(tf.concat(axis=0, values=conved_image)) - sys.stderr.write('cross_conved shape: %s\n' % - cross_conved_images[-1].get_shape()) - return cross_conved_images - - def _Deconv(self, net, out_filters, kernel_size, stride): - shape = net.get_shape().as_list() - in_filters = shape[3] - kernel_shape = [kernel_size, kernel_size, out_filters, in_filters] - - weights = tf.get_variable( - name='weights', - shape=kernel_shape, - dtype=tf.float32, - initializer=tf.truncated_normal_initializer(stddev=0.01)) - - - out_height = shape[1] * stride - out_width = shape[2] * stride - batch_size = shape[0] - - output_shape = [batch_size, out_height, out_width, out_filters] - net = tf.nn.conv2d_transpose(net, weights, output_shape, - [1, stride, stride, 1], padding='SAME') - slim.batch_norm(net) - return net - - def _BuildImageDecoder(self, cross_conved_images): - """Decode the cross_conved feature maps into the predicted images.""" - nets = [] - for i, cross_conved_image in enumerate(cross_conved_images): - with tf.variable_scope('image_decoder_%d' % i): - stride = 64 / cross_conved_image.get_shape().as_list()[1] - # TODO(xpan): Alternative solution for upsampling? - nets.append(self._Deconv( - cross_conved_image, 64, kernel_size=3, stride=stride)) - - net = tf.concat(axis=3, values=nets) - net = slim.conv2d(net, 128, [9, 9], padding='SAME', stride=1) - net = slim.conv2d(net, 128, [1, 1], padding='SAME', stride=1) - net = slim.conv2d(net, 3, [1, 1], padding='SAME', stride=1) - self.diff_output = net - sys.stderr.write('diff_output shape: %s\n' % self.diff_output.get_shape()) diff --git a/research/next_frame_prediction/cross_conv/reader.py b/research/next_frame_prediction/cross_conv/reader.py deleted file mode 100644 index ab4ab698d..000000000 --- a/research/next_frame_prediction/cross_conv/reader.py +++ /dev/null @@ -1,86 +0,0 @@ -# Copyright 2016 The TensorFlow Authors All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -"""Read image sequence.""" - -from six.moves import xrange -import tensorflow as tf - - -def SequenceToImageAndDiff(images): - """Convert image sequence batch into image and diff batch. - - Each image pair is converted to the first image and their diff. - Batch size will increase if sequence length is larger than 2. - - Args: - images: Image sequence with shape - [batch_size, seq_len, image_size, image_size, channel] - - Returns: - the list of (image, diff) tuples with shape - [batch_size2, image_size, image_size, channel]. image_sizes are - [32, 64, 128, 256]. - """ - image_diff_list = [] - image_seq = tf.unstack(images, axis=1) - for size in [32, 64, 128, 256]: - resized_images = [ - tf.image.resize_images(i, [size, size]) for i in image_seq] - diffs = [] - for i in xrange(0, len(resized_images)-1): - diffs.append(resized_images[i+1] - resized_images[i]) - image_diff_list.append( - (tf.concat(axis=0, values=resized_images[:-1]), tf.concat(axis=0, values=diffs))) - return image_diff_list - - -def ReadInput(data_filepattern, shuffle, params): - """Read the tf.SequenceExample tfrecord files. - - Args: - data_filepattern: tf.SequenceExample tfrecord filepattern. - shuffle: Whether to shuffle the examples. - params: parameter dict. - - Returns: - image sequence batch [batch_size, seq_len, image_size, image_size, channel]. - """ - image_size = params['image_size'] - filenames = tf.gfile.Glob(data_filepattern) - filename_queue = tf.train.string_input_producer(filenames, shuffle=shuffle) - reader = tf.TFRecordReader() - _, example = reader.read(filename_queue) - feature_sepc = { - 'moving_objs': tf.FixedLenSequenceFeature( - shape=[image_size * image_size * 3], dtype=tf.float32)} - _, features = tf.parse_single_sequence_example( - example, sequence_features=feature_sepc) - moving_objs = tf.reshape( - features['moving_objs'], [params['seq_len'], image_size, image_size, 3]) - if shuffle: - examples = tf.train.shuffle_batch( - [moving_objs], - batch_size=params['batch_size'], - num_threads=64, - capacity=params['batch_size'] * 100, - min_after_dequeue=params['batch_size'] * 4) - else: - examples = tf.train.batch([moving_objs], - batch_size=params['batch_size'], - num_threads=16, - capacity=params['batch_size']) - examples /= params['norm_scale'] - return examples diff --git a/research/next_frame_prediction/cross_conv/sprites_gen.py b/research/next_frame_prediction/cross_conv/sprites_gen.py deleted file mode 100644 index 0d36c255c..000000000 --- a/research/next_frame_prediction/cross_conv/sprites_gen.py +++ /dev/null @@ -1,98 +0,0 @@ -# Copyright 2016 The TensorFlow Authors All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -"""Generate the sprites tfrecords from raw_images.""" -import os -import random -import re -import sys - -import numpy as np -import scipy.misc -from six.moves import xrange -import tensorflow as tf - - -tf.flags.DEFINE_string('data_filepattern', '', 'The raw images.') -tf.flags.DEFINE_string('out_file', '', - 'File name for the tfrecord output.') - - -def _read_images(): - """Read images from image files into data structure.""" - sprites = dict() - files = tf.gfile.Glob(tf.flags.FLAGS.data_filepattern) - for f in files: - image = scipy.misc.imread(f) - m = re.search('image_([0-9]+)_([0-9]+)_([0-9]+).jpg', os.path.basename(f)) - if m.group(1) not in sprites: - sprites[m.group(1)] = dict() - character = sprites[m.group(1)] - if m.group(2) not in character: - character[m.group(2)] = dict() - pose = character[m.group(2)] - pose[int(m.group(3))] = image - return sprites - - -def _images_to_example(image, image2): - """Convert 2 consecutive image to a SequenceExample.""" - example = tf.SequenceExample() - feature_list = example.feature_lists.feature_list['moving_objs'] - feature = feature_list.feature.add() - feature.float_list.value.extend(np.reshape(image, [-1]).tolist()) - feature = feature_list.feature.add() - feature.float_list.value.extend(np.reshape(image2, [-1]).tolist()) - return example - - -def generate_input(): - """Generate tfrecords.""" - sprites = _read_images() - sys.stderr.write('Finish reading images.\n') - train_writer = tf.python_io.TFRecordWriter( - tf.flags.FLAGS.out_file.replace('sprites', 'sprites_train')) - test_writer = tf.python_io.TFRecordWriter( - tf.flags.FLAGS.out_file.replace('sprites', 'sprites_test')) - - train_examples = [] - test_examples = [] - for i in sprites: - if int(i) < 24: - examples = test_examples - else: - examples = train_examples - - character = sprites[i] - for j in character.keys(): - pose = character[j] - for k in xrange(1, len(pose), 1): - image = pose[k] - image2 = pose[k+1] - examples.append(_images_to_example(image, image2)) - - sys.stderr.write('Finish generating examples: %d, %d.\n' % - (len(train_examples), len(test_examples))) - random.shuffle(train_examples) - _ = [train_writer.write(ex.SerializeToString()) for ex in train_examples] - _ = [test_writer.write(ex.SerializeToString()) for ex in test_examples] - - -def main(_): - generate_input() - - -if __name__ == '__main__': - tf.app.run() diff --git a/research/next_frame_prediction/cross_conv/train.py b/research/next_frame_prediction/cross_conv/train.py deleted file mode 100644 index 5b9973f52..000000000 --- a/research/next_frame_prediction/cross_conv/train.py +++ /dev/null @@ -1,122 +0,0 @@ -# Copyright 2016 The TensorFlow Authors All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -"""Train the cross convolutional model.""" -import os -import sys - -import numpy as np -import tensorflow as tf - -import model as cross_conv_model -import reader - -FLAGS = tf.flags.FLAGS -tf.flags.DEFINE_string('master', '', 'Session address.') -tf.flags.DEFINE_string('log_root', '/tmp/moving_obj', 'The root dir of output.') -tf.flags.DEFINE_string('data_filepattern', '', - 'training data file pattern.') -tf.flags.DEFINE_integer('image_size', 64, 'Image height and width.') -tf.flags.DEFINE_integer('batch_size', 1, 'Batch size.') -tf.flags.DEFINE_float('norm_scale', 1.0, 'Normalize the original image') -tf.flags.DEFINE_float('scale', 10.0, - 'Scale the image after norm_scale and move the diff ' - 'to the positive realm.') -tf.flags.DEFINE_integer('sequence_length', 2, 'tf.SequenceExample length.') -tf.flags.DEFINE_float('learning_rate', 0.8, 'Learning rate.') -tf.flags.DEFINE_bool('l2_loss', True, 'If true, include l2_loss.') -tf.flags.DEFINE_bool('reconstr_loss', False, 'If true, include reconstr_loss.') -tf.flags.DEFINE_bool('kl_loss', True, 'If true, include KL loss.') - -slim = tf.contrib.slim - - -def _Train(): - params = dict() - params['batch_size'] = FLAGS.batch_size - params['seq_len'] = FLAGS.sequence_length - params['image_size'] = FLAGS.image_size - params['is_training'] = True - params['norm_scale'] = FLAGS.norm_scale - params['scale'] = FLAGS.scale - params['learning_rate'] = FLAGS.learning_rate - params['l2_loss'] = FLAGS.l2_loss - params['reconstr_loss'] = FLAGS.reconstr_loss - params['kl_loss'] = FLAGS.kl_loss - - train_dir = os.path.join(FLAGS.log_root, 'train') - - images = reader.ReadInput(FLAGS.data_filepattern, shuffle=True, params=params) - images *= params['scale'] - # Increase the value makes training much faster. - image_diff_list = reader.SequenceToImageAndDiff(images) - model = cross_conv_model.CrossConvModel(image_diff_list, params) - model.Build() - tf.contrib.tfprof.model_analyzer.print_model_analysis(tf.get_default_graph()) - - summary_writer = tf.summary.FileWriter(train_dir) - sv = tf.train.Supervisor(logdir=FLAGS.log_root, - summary_op=None, - is_chief=True, - save_model_secs=60, - global_step=model.global_step) - sess = sv.prepare_or_wait_for_session( - FLAGS.master, config=tf.ConfigProto(allow_soft_placement=True)) - - total_loss = 0.0 - step = 0 - sample_z_mean = np.zeros(model.z_mean.get_shape().as_list()) - sample_z_stddev_log = np.zeros(model.z_stddev_log.get_shape().as_list()) - sample_step = 0 - - while True: - _, loss_val, total_steps, summaries, z_mean, z_stddev_log = sess.run( - [model.train_op, model.loss, model.global_step, - model.summary_op, - model.z_mean, model.z_stddev_log]) - - sample_z_mean += z_mean - sample_z_stddev_log += z_stddev_log - total_loss += loss_val - step += 1 - sample_step += 1 - - if step % 100 == 0: - summary_writer.add_summary(summaries, total_steps) - sys.stderr.write('step: %d, loss: %f\n' % - (total_steps, total_loss / step)) - total_loss = 0.0 - step = 0 - - # Sampled z is used for eval. - # It seems 10k is better than 1k. Maybe try 100k next? - if sample_step % 10000 == 0: - with tf.gfile.Open(os.path.join(FLAGS.log_root, 'z_mean.npy'), 'w') as f: - np.save(f, sample_z_mean / sample_step) - with tf.gfile.Open( - os.path.join(FLAGS.log_root, 'z_stddev_log.npy'), 'w') as f: - np.save(f, sample_z_stddev_log / sample_step) - sample_z_mean = np.zeros(model.z_mean.get_shape().as_list()) - sample_z_stddev_log = np.zeros( - model.z_stddev_log.get_shape().as_list()) - sample_step = 0 - - -def main(_): - _Train() - - -if __name__ == '__main__': - tf.app.run() diff --git a/research/next_frame_prediction/g3doc/cross_conv.png b/research/next_frame_prediction/g3doc/cross_conv.png deleted file mode 100644 index 13915f944188adf0b0a3dc85219fce7bcb5e7de9..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 17636 zcmX7wWn5HUw8qa20}MSNox_OIjdTpWAfZS|mvpx@2tx@H(iljGNJ{6>pmYx)DM$`T zgY@Oz`|0e@>$g|z=lQSmPVwD5bNI$A5{{buxA@qL9r40 zHoR_V$u~}|n9_)J-(=CSw$(p%)!ie`9`>@av1!|6{=K@%%*_06|L&$e>W2vwiGl*~ zQzXhfgyM9AOMIGnBlP+ev3k#cL0bzg9ZO7mUiZ0pf&;ot~~+MLY|HR zAkO4_LLu@20HTf{&^@xQkMa1aON z5dg2C&e_0LD`80=96cQA*s#dV5kr^^YIAc9|M;i~M4%vGkhMF486qkAi@~Q|F7=XK z7{&6!i+yGbNsC*oYXz474kkqk!T=ZtUZRn_R9J?D5+HBxCxEYEfdYV3Xf!m(jhPVm ziHd{v!j4mWSaL>!zRxlpMBB zSIqOy`uZEQFM}lF9#Vk8IO}uXbY_E=kD5+-bciT^KAi*6qRf9F?^K9609(q(Z~y_s zC-;r*cXwt5+&JtX6!V*H%F|eAFuctg!PWoBAV3VTA*&Dds-LjR(c{#~ zdvp;3N|Y*bjQHQ>P$^b#f+nOSpG(uw&>VMiFfo=`?yZK7(D13Vs^=`@kmJ&MK4Koj z!5lYwU;W_hT(bi@tIdI3WL4rn%}%pdna`&N^H2U={7gaL1{c)@pIx}4v*u9b?!h@uJjb+>mW<3T1oL zeYrQkzI3>?Q$LX#a2jw>ll}gcbnN+=*XQqu|Lmy*T%`NfrW)Fq<#52L4m^}Rr8yLz zf8uxlNZ}W1Vnd=tu67+(-MkWC$5)V$;xTKJK2nKX^#geq%+{ z@d2B%ZAOw1ULI?4@Ii1}W79}7^zcSI$j!{&58oquhUjABd@?&IfOtY|u==Oxp)?~N zX3(>D@>3-V7l28gn#+%RwKLOxdzv4utO;Tx45LT-g{hUy51l2v-8Q?P&fwu+OPQou z4)(n|YpHwsP3X(X+Nt;LMuxG495dOMh7l>-s&q8Yk0EL{6~=<9&Ia~)<>&Pj88w@UB1#CoUd&Ex43*AOVfU}nu%yahoXms`7>(>!(mUP zuy1&!PoGPJyJa##pfgP{s2FC@s|rrE)@u=0ebpP9U)ZRUt1`Wu5PbGQ`A2pu$MNai zRd1`DT1{T7-^;UZ6kr8Cztqve%^QhR5hi=`hZ=4#vW9O506M@DjvpCP_xDmeI)Y&) zWPD$4zFX0zW_rGoPvCf$Fz5XT+qI5ws`4SJ{jq| z-41|HPxn*cUn`V}1fPsJ@T1Fk>0edMwT?J101zfBwpOLA$%jnv&jnzAbSG8R@(zTY z&D@#j4qgoVZVIcsm6ntgEYvrw<9i;x6~Sf8dQvU(>&SpPYxUiHBM}53$QO{7l$KtQ z(<}ze@67L}?J7SVEDlc(9P85zE@|;Il0L%=PmVRF&z9hzBjiq_`kTxESATv$%j+kC z^W;=PEs?fBAPx7GgqSgpU96HrJe6%^_gc4u;fr{Gxtah*|B2$RJZpAJvdcVg*IDs{ z1(JH_uk6et9WNE5L4Xz#sB(W>{+XA?f)-bcM~xSQMLt6J|HS#QOGwtlp>Mz?Y$Yuq z@UCu&mVAzO$c6DT{|62V9?=1)g{GLb`+8pUEF{pl)|Gjhi%W_(e_RfmDD=kX*I?ub zTiIAFtSoC7`K1u36Yc%{m!yD0WP3lU^g#eUeeY%a?TRk{257B6CZ)*H#W;Vy8u_~~w zG-9OBo+dip@o+WltMwz1FdBE=LKRj3&asqEvyX3e z^{U0_1qr(c^0hlT==e(JHu+!eYn*|Z%03-eOZ)q%Eot!ct#McA_dM-75K(&2Mi%x;xnM|RJBUwaw`ME*2big8$}O1 zqKn1w>>n#Fpr~n-^qtIV&qS9JEAXD6#}HplkX##Pk3d>HO=VN9taL+rSH5~9v|C6> z4yved8{w}6#wD;$L19nYBJs8F6!CU+2^5j#UX-meA=$>S-Tin_W{_5VBt)T8dNj6# zFyFq;fWb=TTg_mNrO0J(_RS~KlRZaP!kFsX1*CBz=@;3P079J4KWiG6lsrzZPfJ^Q zTtB>y4wDb*fqw?u?FG~!U{3L;vZ@qHN|aA+K7GI-QMBBwI^Req0B3?-thwg{Jn30t z85cZh)&L7uTE@6zC*i>vL*2r0FX>-$GScL|OOk6mD8HopIR5e1O+~ImRu(nXW~yzVxHrBj&7LyQM;{X6Kj2NPWk0OmYd;;;4&aTA%#F0&oX zYc$-Po0|*lrQP{n98+Dw7~sS!0f5%x^}oaggx_?!iyVybx^AR#mcIsyRq4(EQv*CX z`qW}7($1RLt8Xu?^oqOSP#mW{&#IMyuO3|h*M!y=Yi!w&a3NIliyy8@Gnad(y}*>> zX6g9^&?K5AAC`%{h_VF$#RxiN=pmoE+fm)?7f~Q_8i#>N7c6g@TUTXWzCDHifjyXNv3V9uf(14Ld9t;Bne*kYaQ40LSzhenzP zbR@^*X?OSc@ht@A<{9Fj%EJU|46-8*e;@YSOIKk=ifVc-`&*khS;5EMlN1_OsK)e% zVbhkMM)kGl0M7V)i}vOH)3Tc7H+MHdvad%t3!}GAu`@QhRd$rmyV)ppD~>Kso&8&^ zC(G#F`fyp23WJTx6(6)CxHuPfcJTn%q4~bKH2p^Ga|hi|j!i~c+1cO3Px{_fVGJS2 zp80eA1{!@BAb|Ro!l)k8aEOZ_8;+BuBI4Bkxm$2_YWa;&g;T`e{NZdj*OQ|OWEy<@ z6>e^}HikP$QBneVBN3&m@~Zl;nV6j#>Ub#hx5B4yuPI zCiVNK8iEA0`rG~bqIBk_)80qQ&O%4WFkj`(+2!f@`1q1_NSz6(@|%nD_Dr*@2|?#H z>e@P@3BCdx@L)K`B)bCullz@5aVG)*qxcW~eg4}I*t`B}zfKNS)!&|V7&ZU>k)?LI zSvdn9YMU%4?Of8<*NT4>d1{=A`yXe8G{Wx3oK~E~<_*p<6miJK=$Ch6J_!3GV3hQ3u< zs|>k$)#i7xi(S4u8_rn1ovRGKxL!BQ2`S>tu8p?S5ZrtfnEg14||91Y)R$PUQwC~33h~#2^dvDh^WQ@gnaDIK) z2-E{njf?O*{JZBUstd}@Vr=#E=~xOr-J7@3qbCb~xZyck%JL3m8?A7;8oPWJNRIM- zp%?q@$;Oe8hLwxDLk(1BR$)CYZ9B~QdX`{_WqVM|o@3xovRbh@(()2}lqapv=NBJw= zXs~mhtWao(HS@f#6v`}kyid8Epy62gAy3sy|do=z3rVptox95nMASS#om0a z!a4+G@~mW!=Puu==5PD$eQS8;Q}fA~XwS6x3MVn(?)0lL{2V>2d1v)X!xAYAfARPF z;;#K!!}`o@IjL*-f2dzmB2=dcB$Q1BK$x`iB7r1i^p+A=M{@)LE-rbmG8Af$azraz ze8UD(aX>5&5X^~9c)nl`BM7-+1UUnEzrqBgwAox39Cfm%fy@|o5vhHDv2*Qu-EdB6+T1R!}@n7q!ziY=spCM=iPk9;ofWF$c< z^zh7$cQk-IE0OobFW|rLS2kZe>Vq9+1|b}?>AU_tkJnN0oR-!Kl;B6S0`O-Dk$(L_WVJ@|9*hfJscE<`K zB9YBC?v3Qk3X0#(Q(#0;KXQy(AzJ)Jn22tQ)~|%FM1jXNVM-rpe?;uyU=GL92T=ipU6H0YAQx+Dndh%1|I?3RE*WN#*&3w z+W@T#y}c#~dYW9k%x+a!?Htzt&XTEr+Qh^c>Uw0HKmM=hJ=aBIsAM^MJanYoWm{GO z9rH|6zuCVNi9-(2>M+OEjLZeK7jLqjB52uFS?>ixy>Xf0{vs4V+eD~tp71sV?bNe<<0%ADU9n5|dwji@u? zuikTEU}_}#yK|};wMCB0{gBcER6pUU^m}e#QSZ{SbB0LF^z_4^yM*wj8wM@VEIs<@ zVM^Kv(`e$xXCOG|Z@lWX|yA3nBsu7gm=;{|}$}vz3sMi0fJM3mr z`H_i))O)Qk_smc%0qoQ{eOsO~YYGbTlq95i1eV#)=klv2biF>!hlh8EYt?=neI-ZK zetXlpOvKNBle5{%A6Et1{)lqvf3BG!p3@91QwrlX_@KACzq-zgCt12I7?7~@horie*PP#}5N+>a5fW+uZEbInAB92#n;V>x{G?UL-9GyOlL-3UBI zjp409;?H4t{*Q(@a=uej{PDR1T!BI_;qTr5WvC}&R7LP;J|oM}qIk5RXpnrX9T`^1 zu&mV4w0vVuh(Gf!ys*S1x@H7_pT9JZgTIpI(9f^wag=I$>BW?8lQu05jOe>Scr8&B z^!}ngeA(##h75<~0dKx9B2KUQUU_6nRJMk&@@@%RVQ|Htu8kRx`JATiGz9Hdn!>=*xv*5^uRBe_haQw}%NJ#zVkzZL&spoYdP zc+dr-@Q^oa`T0-0>maz2uh+`Rqdk42|L~@&pN6T8=@(T7qHFc@V(SRsc6&$*V6FuG zL*59Q&$F=T*_*;D-V|Aiq^H_#c0vV1m*G_-3$-sw@n(is_QFO?@#vXmM>L&@Mk*eB z6B`nS5d!28i$<}mp6;FxL{Y`7NRDW$fWS8-ed|?g&^aN2ym_vcA))!~OYQ~?@}qO9-YaPKnIDk}|J)#Ox? zhmiyrP#(gKvODMJ(hO<(oN728ppXv&Ii5Mu?|^{fH++A)Ms3HML*y)<9xkD`v6p8gr2wJ22bqBA1FIQ7;9P1CK$J#L)zS;h}n3lm*dhxt?j~ zkIIgP{*qzR)>W2=gkHU~-Foy+`xot*o=7qU92tpnsU+SGD+Umva4SDLj?r^6rqYEjh3do&KnA1zgYAoXfPgOq=y@h%zk#5ISyu}IFE^TQ@xdM zq!)CNGVrAPR+b2aqoCR;by2QTiWx;sR!a@uIfwPeyKb%kL+v9dqEDMP^fkSJaRpJv ze=%ITb~VIGR3k;*<9Qt2@ShtOx8@=-$(zdT-NABlX6T!Zmgzkc0})t`0l9+LlP2{F zC7%@!BJiyMH5jxlJ0@Yei(s_};Q>P2i1Ji{d@H4nN8E*=LLRkEMuGJ-xaI6jk0bK1 z{onDcH)lJO>xh;fC)wZUg@e?ki6;W_UrW8w*8B_yFT?Zrigt)EFj!MOYvrN(jRL(| zjED$D5jpvoe&Iy!<1sa|r#IiTEfvH49O6j4VMSH?1CsB$IuDm_;fx>al%SJdvshiF zPQ@O**=F&vk*2;uA~DZNm2p19bGkgei25mXjZCJen+5|o4~6*yfr{%mCZ1(y%1$~s z&b|-$Can88V*7ixRD%(vbumj1d9S~1=FijVVaFy$^~Y_|IVH&)5kma47N#}x5n2%S z=M&R;F35HKC3Eh_>3o;pwu{q1nTF=7;Zm%b2!VlX(KVueX3m>U?k4>VH zIH@t3>&Mq(Ay)|={No;t1DLdO#+GUxoP82xPHM@2fzCWOF2)TeoUZZPQ8Vc*9X{n% zzh5?2TICaZ+b85(G`IW09+TJXk~|HJx=K%bsy^^GHwP2?ct42p$B6jIMMh=&qMUT< z9M5hqR@#`Krl%}I)#8hylJ1mw?eI~3k?lj-au!q|K(_Z>OIc=DQ$sPjIGR!`xkoFN znMs|L>^z@CB|y-eR>?ue*$5ZRh75fH+xoL24xQSRyVdS8HMVEzfc-@t)Aq zb5{N@k71jsdh$r!M`AM#y92G}0h_N+U$zJRum0})aYSON;GE8(q z_)0(k6?^BBeO~H>Y5^jRs_?kYxVL2HZv)Trdym$KtSmxf^yAb<58=Z2@~}uy%^!bN zjwR#GvQxf^eoILy!i1u^FB~t{vIHQK?tOYg)SZS_j5u5{?c(@yE{_^#Qv)g56N6`o zc-m>G=bCm=jX3{(KejXumL+_Yb85&jQ7lMAX+u?ZyrF+)#L5bhhey1@|wfI-+Dq{n1fly>F)Xl+~ z=E2efss*ZC-Y3e6FuP1a-CU0M3by;Wv;LcNLQ)FjTOzu}Wn!{}Xe$zsAP3vpbDM!K zN?k(-?Oin7GDiYsSNS5@ER4s_ZB)x@aRdc4fm)%+mO!{H-oJgJF~{IoIS{D4F513a z9`Z7k{%62G72TQ_BVAQ#3GFFC827P&zL5md?)yeG-(4 zz&x-_FcwM@eCDfvfJk0LGSAI{lPvi} z7+nrC{|T|K_vzo(O7J>syCHAUGpmxkGD73Cr#M8vk~rJ4vOkZBQnlk& z2r}xO6_F+htb=d=xM;Yu3%Ra^W{!#qSIOW-ds{Y;n#%y8K$6<0oTItv>x{bXNBsiy za4;+nb$)i!a^9b#KZ@W&M5$RcRVYO2K{|}Pr*c)uk$7;}w?NVbE7ARqTy@8)?$Ker z3zC~*hG_3HMbu|60I)X%acOmgA%0xpXSW4kY}-{Pi{M#1#cNaGLEt>MCx85e7~=wQ z;v<-GG(isPikZ;}5FDB>0QG(QJ)WlFV2+9W*TI-XLb)%)h?L;*MB#hGT=C5&FtnAQ zSSW1e_jsFNpQNw1e+8H}{;e{<#uMB%rTwFgowGAhs0swV?$biXnghi6Ei_i_{X4uh z2V@Oc5+9!|R-s*|MT{p1_VE%$So`5m!wjD-^`_1x#)x>6`uDNEUc2VTU5?@%cMEO8a`UX?tB z%@Vq&(9W@iH+Qr#PgDPKf#m6~ii&Y99`$WF{vAsPNBa?V#6X|)a%rnXjsCBHAo1ox zI!1?fZ29KKt?km&fs4kGk!)>LE=1;2oW_gy<#l%ZOKG8kHTp09&o^cfh^Y_HMM z6rII=Ua?FE$Boqr_P??%H$?yf&N^lOfdH*7JGEmc=Nb6=-VOhM7T{`5BjhaQ)KhKu zO6JAY_IH}Q$yY&=?acvqZH!<0;qN*rOC|VcViSMFoV|Dd(v^Bn#*R&Yye+exr zP^v$IslRZVDXahDj4#4~``7bzhD~H&5-TBu{^R;7cGOH-&fh0MnwhMp($&p&jth7d z`mTb1^kF%B_@96Fex9Bp%<4yF1~#SBLAhx!l>7gff3NUlw+|u-VklfBUiB%dDY90M z7t674`BQe$?V2fno+i5Xs4L6}+u$~euBq8BKX3fFoUX*Yb<8&L`6H0Ij=n)>U!P2N zhp%=1mQO0TquEctIq+%zJgRSKMt~58g}y#T`(`)oefkBuMV(ygqi=8*AYhgSZP6OS zRWwIlwTRo23Fa+CzLn<8KS%QcLNiBIt6ofEY+K$LjbpGe-BOd$>zVmMRII2X@t{?n zV9|366CI3#`xIBE>Fr-v|6dfZA7=@$l6{sP|LL|V56aD=ka_z%% z>o>*Wjo9_ov!m73vyHcrXn=^2BFEW#=JLBcvGMRyo}v3o&N*WHAAl=PI|k+q`C5@% zrX&olJ@)Q4#6|QGJ!x2J{#@)&-R)JC)8JO=R(X87&@v#-jzSH@euu)z-`TnTaLSJ| zZOD4zXT&>495S4}l-aoGB$Y5F{_b(Fdcj9Jkm7fY0wmP-iU4M@ZmLoy!dvaFxn4Rg z8B?3%$!wsgnHb%qyTbIYJiGe0gK(L#{<;25QtCsmvW5!u;V7_z{*MiI*cCyj6&ekY zPov6~D9YlF)i?#-D&S2Syldu$;=*RL{*Ix>YE=k_mFy>u1R=_CL<5 z9+VShFdh;CFI==fmWUnxgA_wg92&nO&RqTZ*wqo|^mCi(&Aj#slbV-$XH;D!SzA3* zuvEdsu86~uBZ&xj21E`ZrcbRh&+1+EeOz1pJE$dVEuTC((fHnwoBw0i)jq1 z_R%{>kuItx$Bont4GUK6<18&ZHfb&*Q_1Bm;4u7GYVtY_)pp<2KM4?E(v<*U2k)FX znoT|bpHW9KuO=m+g(3%7{eajNNx2qWC)pLtV_W63mp(O^3uDl$6vFtw3N+8_lDKAM zUaCCH{5MyX=sgBTbg-yPNUsDaOqx>{I}-jPWsLs-XhxUtHB{;+jiCXH<=)2j>H^s1{+#8wmMpt(6jOT9=DLMcXwH7=x4Y=24VH7auL zF&F&HcC7qLZE~Lmgrr4b2ORBpHt4V`ED=b8DI(1mIZ7Ulf>wY*ew|#$7+H6R?XSX$ zsZw6p18Ck<2Dl4QAnCf7Hgg0pVz^J>Nbl23XX!il3`=q{QEIkZgd5jDC?y#W2FnX3R zFcw1sayt2zERq-;6Q#T}ZFhrC39d5Z5tbBZ<|Z#x7^ToA5QgoeWE6?hUi5;K38XqY24S#$7<^*@@Ab$8>u36K;IXp4e~Z3TUw74dwpiHT(0+nQuyuO=kNEs!1uYzf7MMydZJVxdAKV4j(?d`Syn;t*9wshI;rDJ6F zpj=Y+6acF1@#Z=g%2CI%C&vCQ}S> z^~Dro!)UmC=jEBvu4ECH z)(cK#v6x~wd2RGUpJal)!BK=Cl9q`)NxLs_D)j@^h;7t64D0&b0Hw{u*I>$I%&T^p zz`#$hp;7$j{OR0W3xnw3q?VE)^+zKYUBb?K^}jT$JkH3}Dy1JW!;!Ysp-VY#9e?*( zh{=@ujO`Yd#@qA;DobD8obm@Ed%gJ3->12ZTrRsFBw{mfsy zOLDp`pRb*J!(NgZ+iYX1IhK2A=(grvlB?Zo$x%SJji*GZqD(LDGo@gRT_Gha{pJ@a z$oU1WGonVh;9b^ieN#E{)5ET;I%=kuDi#F@8^IPsn>k5tLWD>kS0$c&G9(l%;BOHt ztw|a2nag~wkifcm_vG*A^@MI9pR~vSj2QY^rx@(6&3DAbk1gSVFtW*Pelf`2*-Gi& zWKVhM^Y`*-b!T-bU8&$34enB)PeQhm>(~&7JGT^C8hOS|_0_<7Y?m_IL27%s-GB z!MOW804zz5vUtUO9}W#a9*k|}krt-lea*mev$NIfi*=Kec;nyb3oody9vIj;>3z08 ze6k}=JQk>xma9%Vwh_tvS=ZkYi32v$qI^rgJ-5Am*qM7O>qp+FYMCjFp`aji)c$a3 z8Svy0nGt@?`hbCxy1&dTq1S34%SFDgZ^=sHyZ`cixyd0$xl=yXqK&pgOsR$DAgCg% ze4V-Gv8SbZaory?^^3{3Hhidi1XHS2VtE>dR2d(Jo@a^9d4XABNO$jwEmmpiLK_zs zuG=4Xw;}%$?taUW;)hR8POdvzl)sdR4~2I^fdI5TMQ`cF&cA~{`?t45Ys2@HOl+BF z`pxxOpRTTLiG1;;!{;>a;^R4m(|1WaA53NAvhW2^E&PzD;fL+l*!JMNzc4X#!VZzEp%P>EWoTYC*F?>=W!yUm zcP%=ROtZU3^Z`Bqfq}vC=TCQSsto=ZRvDDN%@<^qz`Cp_imN7zzsD(*Hj+1^DTPnj z?Ju*$ofi%E_HQ4WkKr7${yw?99By!PtSkmVv*DHcGR#@}M6ZV_8% z%4WV-1_nP@}UV*W7uKh1{W9kGfj*VPEEB06M(S9VyvjqPG8-=TQv9>FZvf zZJgQG8C89_uyie>LXF!)fRT%7Pn%UJ`Cv5!1t(@5<$h^s7t4i@+c)2pu4ghvq#Tv9 zJv^08T|W@VQn%!#k55l9WyBhG`uIh1e*VKL@r+@}qfd?sqih1 z+7T2uN*4O_dw4PODibyO#{>4hM7DI1!)LPuKDmK_n*y`8)Sbf5*vcagVNdM(v#oP;iQ_VB> zA*2`qi4YBpT~tg|6eW-`oC9Y2Q{;p|uu>%Fol2GtC?&HEDX4{ZEG-2mr)DiWqV?1m z^a<4rb^FCh)>M>3NyA%t8ac~=1F1TK1lyb^9lS7$-#)@&rPsr~>Y2Nm#XHUFAUK4Y zjuv)gozX^A{<*69MF+c=H=VzKYfZ#*TUGO%r_Ga? zC!+pz4m-cPL2S1i zam#B$FMm^{#k&ejjhkZU)irkca&iw>Q^BrWr~Y#CNKhW+yai6%C^g!`Th3N!AJNvp%}-^ zb&lOHGID`4&+_wrR=QTk2AOj65ZNn53`n?&O+IsxITQ@Pq{Iu;lvo2L8b&-CQ{z+s z_A}4p;P)k>&mIDk4ZIr>d}W#)rL_{l?erN;4OE;{K4Ik&Aso*=kIL4#<~X@$Qv6C} z=t08K=iu89?bp3>cctxW0o$IUSF8KCXDoNWeMj!YI&~m+cT^dCNz)D`!2RQP_gC!h zV1BtA(^O6-zLZMowdbD`c+Tpha&tLzcUHv%A&d-#O{m}uh8()xZYtci?hD&VE$&mL47fr$-%?h+bn*)!3%ZdrGC5L>FT=hZbIn6tFcfjbk*z%hY4{y*efwiD9rN3KEeU;ZtwI|Mhr%?zFX-{wDHDxG4dO%{DlQh4&0Mr%P^^Wbn(Es?}6B<_AZ89G*_+SUUs%=1X81CEBge zl6N*ToN^PI*|c1;?7rZnriH zr{iBVW!&v&Clxg^F(f7Jjf(2XUS5a#n6Kb;KJDU&sYNKTb#Y7>IkE9Pd#T$O=<^5?eo);y0I-^a^FDZ^rhjaWcIK|9upBhx|58Dn_^Gu4&$;}?~@Fbs66zZaRR3rL+N%NNu==Di$etEs*zxihJ zLFF`>$sm57tis{0BU!Xv`tlmIB`JZaK)Gj9;p3;|=f8o>E@wwrn5#=Lo&}<#(dx+@ z#+x)WA0)@LXuqEi?V`_@KqvuU0p}kct_PYgA1{z@;otg|R`!Tmokx@~3_mUKk%U2i zuMeh-+iU9e`JZ3b+%L`?yVjX7vaql@l%?J)!N+s0%Nl)aT88O;2DX?ONQ4q)QY~A3 z#hd@~yo75V>W)}*{7x=#cn93Z6e*Q#9Ev7=Z{*#C=-&PGY^uiaP{-<5X2}Hl1o?Z+ z?Vl%Tyhk6aVUdcQNJ?A&p<)N8$hL0Va#8s8!6upW24O^W*{PCNs%~LjbG@tJD=Pm_ z1yyxtXFf~V)3&?Yz471UvHGJ&tp`gF$df1zhZgFn9rB%8h5Kf&$tAJJtYp3cl?~!e zN<6$()ebp-uyNtgznplwtIDmx@M`#RknDrpdP94$|F);72av z6E9YZre1x1SQzoC+QBxQVx7hq> z&)(ZYcRDiX)_Tgs8E{|kjL96p%hkmOiri|+KPA6HYrJoJY1*?#X#%eT+1c?oH}z}g z5Tk?ivJ(5fvD)e{KdJjpX8M)rDLdo4(RkI*WD1L~{Ku66a&vH1RF1GuS*#r~bV^d; zM4PQIzd4sxwYoRgu5Om*=B5Ko{}?*NS&?fis_TCrnfIz%#y>=EO3O93Z{Jr6uWf4k z!(2pR8euo;Q@195DBrW1207Ja`#bafdAB9;O>7q{P=#?U_q{SATH?nsb`SVL67sVn zihgp@HzWVPreVr)5UP^`Ps0R?b0aaA2U{qmq8b}rT}6?tucKK^{M6jGWN;fTLTJ*& zXBdjX*te3jBfaO&Lk`s<+9v-XdVVQKqW3!i$II3A%UuY0dDVhq4;-w*!qDP_SN~?NK4IR*Ja%Bu+Nk>=YLiSPnKLXJ zYCyT}SclgU9*(a4FtS;?Zb_${>-*P7Qbw+2>pU=AUJmL-5!LYWB?GnX+35>tiBx%m zw2MsRsbx8D&x_pq>D+a!0d!;dViNDA9!!XKHif6w(-rd$Kjzr83>OWZ zhL_aG$VW#fCMGJSD%j#}49sG&^K;IwT%}ZddkObO7ho>1b#tUIH=`DEyqA$NFOjDg zQ_GQ{5D^(?5xqfaG{aKgfVV{85D^2FJ9pIvKBgz`&2?* zxcmFQE`;3PzjT#`@}N{nq$jtxiq`dJcHzN0FW=?4i|23cHXHJyeLE_ zD~1P$gb0!5yQh__jxXPBoF89kf6P3O;dqxXnZ3@atTlx_M|YIYR1Q>|41INQvy2;; zPc7oi&CwekaJKtxJX4?M;4FB?MDmw*);mz=C7ET_D&~6;*#%QGMU*U)Nm8J!PDRG;Zt9P=AZv_wdOu2 z9DgvvB2YTr{PVR{rnhUG$>#wBY`o!P)jXU%w1A%zNe!)4zBpb-_0J>}T}_$wQ!5@o zN4IxKaVm!0htDe`>;3H>%elZo=X8PKUx|DC;jeyxTG9#nOaDq~vnQ&x~Qv2|&~$UC*Z zzD0SD7_7CP5A|K#on+ts-Q6YB3PZ+111pC0VfN*zq+sAX#(xwOu-O~rrmucaG#%#FY~n| z)MyJ#VlqE_>rx`SgI~XCPNdhs4a{$nr797CF~UUt{8v}_y1H#Qw~MhKM#JC~p-Ap> z9bH}d{)UE zysK-d;34p{dSPy7ZoB_?CtFPZHl+(CBKdW&2=m|lcy6mQZ(UtFt2Awd`Ikv~#vlE? z>+8<-v>JWk!Oclmcw%njtySCA`BW1l9n@`G2Ynmdje)m-h)oNJwp(>5JkIRRk*E01 z509z+uQM|>_w|NnF!1K&=txbmOF&yqLLbpA5H}%05{=0f;k=Q# ze;&A01LaTx11g5;cejiCB?GBxi*n*B$pyW~-$N5JJG@qo9?H+>kFwbRC|Y9v5iZ}( zbCgkj~6+#v&|xBpG}&5hL88Is~i5&)x`_7-RLr$ ziw6Km%K!csfa5*PLiQEy%Xj|-h`R+W&msiqNtL^k>PTB~2OfV(C2`*cCy)&l1#vu^ zfA&1%dH^beFqftN4bFGr)YVEyfO>I;p0zXGczzQ zSK0LTHVA__iPI#u)_L!hqM!Wab($vqe*f~~dT%mf=H+T7z}U19t#5-LKC~lsC&0Sr zdcyewjcU}676GjHh_qa+{^2+O5Xa%`*Dr^oVG_pxvRo|x*Z=u{&(>=on(U1Ot-~mu zOh$DmDFOn#h2uXAXt$FK(AcIfTleJi2b!c{y|gUeyC4Xi^J%a5>wouedTA2H!Sr@s zmPN0Z7DZ*PjpFF~`gS}SJI|veqWZ|6x|?{nh}YWujZ5Ah+ICbsd)a;S@#hb;d3qw$ znn;C2>hkjT`s(^%e|&O!^jAOoi~araS6_a|-bYdRi(mbd*Pp)Z_j@O&C(Qo(>iXNS zznjh1-upJ^fn8L*^H8ZRcNdp!F$f#y^@Q^WP=Ha`F@xayv#T^oKl$Xv>FMFk)y;aH zy?b|YdU80PjHb8KUN7wp2LJg#fARhIZ>zG3qUf{FUj6*%KP?KAyngkQ zpZ#P$Uw-+!ufG5OeSa{FqDY|FDwe`FZfqadc9HA`^JC`^v~Aip_`eI`KbV^~d(b94 zocF`wU^1EXdcCr&s;Wdn9R$ugME1`2`YDiFEVDc>vUSn#^KTvz)lapfvU@zXgdi<0kLiFBWU(XcLYMsCN{%w*b`}^av zFza=8a(WCvX_{)SYsu*<7-b{X-55(p*xiAL)^R=M`~eVX+S&mKh(Z)Wq&B+%Ai~y* z(pm-KYL#7I-KPB>GhN>P)@`+G^6YDHnF2)Et2w3Keoz-}D8qb1wUROE^056DJ1 zMYw}s&<-dfGBY5Dp*lL;ACHCrpcD}`V5g3hWM^4&m-_B8J3xPXZkoS7{``UFi`@Rb z!FLU1E!ZQXW>^ryAlOXs0D$HkTG%d-z2Bm>f2`vhY3e3v7w?LIG?UYIUPkEF zsoIwKO&%Gyj8Oa34~^y@ps0ECPd3)O>%~u8I2^atr@Aq_H*frX>FGxm*k2PMGfa52fzSm z^i}6O^NI*}{Sle(y{fK8SN8~OGFzkp)m87v3=a>#FZvJv;XmN*E(SS}00FA3U;rS1 z1uPIygaKKo2M7oNjA4%AyAP)S00^Mn-vk0wp@4Mn>aA8|-*WZo#*!Wmf-0nYog^)g z7%Ce2C4vDG5r9B?)5_A{dgFUv2LOP^%|0nZH9eOA?lzA!fd^&45dZ`RRH<8_rgyKc z9vyZxy>}~M-UR~$h4#1a&I17iG97r*?Ix)Z#F7;i>L4Wnb-1JcF0@&#CXDqj$*Yh5 zeE}fA0tAS5C#icb02LxIh!oCH3~Cus0%>F@btg{=B!Ciw`U6Ot%}M_-03hCGJ?{bl zgVOQY9svLlf*4f`0brsUps)~CQDp)FbwE%kL_tKPi6*GOFW!L!-sK39!U7~64$@9S4JIWp$Pw(Ct3F;Tm&xTXGcW1Dgug7Dq=53bgpulKAp;ZyY zRs|dYBp7zqOuL-ve=-1+IH5q5R4u@YoUuRTuZkBiLZF3&69p1myZ zJD!NF*@EKg=jWTNU`UV^fC=U!M|!k3&`w|8%1xgg^GeQL|~u!z$UwMz{_B@wdn z?m_zcXaFo=MFwJkpqVo^b0@R%bg{HU#>)%l@rAc?9`WlJ(%|Ij`sl$V=rAnW<>`#6 zIKI0_$jWLlUj_?ANK67)X-BR2+5jLTRTUTuBM6HoW@Qr`WN(-q@UKpm4E^v2_rCbe z%hONWqXJ?*@>qWPv|Y!-;QpfnR%^(Q3K@@3FA)V1f)1m%)q>s`03tUCZLE}AFaZLc zjz@YKOThmoB#au$3Oqz%V$sEV>!dXA?M4c7FD~yH$0uK zoPkGD#L)@*+bI$63;;soVKRn`F%nxlusj^atSC`pJUl$uW3@3-WHuRo`tyhP9=vDC z{rVT54u@@7;_1tJU}0DUW|$jZMW)32J2g&k%R&LbZbkUjpPTEIw-oS*QQ!$iMcP)r z{NgEI==A>d@Pm8OqFK~m{OWV^7#a*kfAs!$ou!(&Gwt`;(MT;zp8Ftj$5 zHc|)>kaVgRZUYA6sy4OxxobJS`j%lZR1p->TH~y<&Kfq{!g|VJriT-21VN@_IypHz zy<9H6>>ZCTN<29cxG79AU$%|cs2$ldBv}Jjp8ZA)yo>MvRAmE{F)Sg7i6MCJTVw5F zu~@EF#`4|cX{;9FY^i+o_;4^Tmd%A)Kd;vfj0CJ(@q*BcF}lKnzBXNmy5|VN5P=|~ zG8vB#4-bM!RaMi&{laCl=VyQPvp?as{Nl?mUoPAIJEI~Wx^d0cwq@HkfrYCOy&}rS za=EIa-s0kS0HEX7>3J#=qqBT`eD~hnyM~#FhNID=58t=r?3bVX_H?~CE7!w22gAd= zJeah^D<76#TC$!^71ax(b{Kev|kR-c1)u7)b6AmMHMCT&+#Pxp6}5TVI)mvwSqn z+fbG^JN(u-;`xt)+%C6$8Y$WWq<3-b`}Ydsxq^vUf~1;B2v-K z$AHc!(Q`Y6f|NoNR+r1=a#n^AVXDK*k_{rp=99fid9ezm=ejkmaOm)EnT_MJie7Z< zY3g9OwID-U6%ka~FrvuTX#)Z!d#SUUwx)NtaRdQqcD`6Fe@Qvqz}i8f#y$Pw^S#6U z(6*dmIh)J6tj_EGL2>xtAS1K5oG)jkA!LRr!@3TB^T6$OJ2q;VRaHblP`R@AP^hY^ z5>vWc`ap6hP$vWlAQjaJD>ShAWIR>2c|MS;UCl1pLF$U?_GnoA!FS&O_6P50pwB-3 z;-CKMi}_`#hy!PXk1?ujSN}H3T;^Pe(XdIUg^5)qgeWT1EgOamNJETFEIpL8b!b6> z!qRk{7r6`TYF+!;=>=y7M$6b7Oa_Z}{fB@2_z!;Y-Qi#q*X4IUd_<9d^{dZ)+Z4`t zYnmAR&bhh0GGyzzJ~})ajYh3+^DGZB09dcq%hfW*nD#ry&~Hr=0Lku1rnedb0fWrl z*`H3QljW*fUo4r4A_s}YQu^Rm^7z5y++`|itW|5i_r3Q-_!s~18_VRZVILq#kt4`k zNHWWyrfL54PyaLoA3_Kr67cNVGw*%V)XZ$yR874hF4`_VAwd+HISg~o4HZn;c$hu7 zr^G>HL=bAS%s#7Te!Xfmk+p^mtH_Q+=+;EcpsFnY_OJi?&;IPs znAtklHtl+~TCdhsRjF!-p>12JO79-jj@&vk))-7D`TpK$;H>(_fDeX)~H}V2u)qnVA*R*)#MOR+;%f{@uTi6053u`SRtrKl*l- zWoKunhD}ivhMAdS2&3UhqQn>sSg{oW#xOy%jPD;G+`qe@TVfSQj1Y%;HXh`erTcde zs%m|7Z~wpkmrwuk??1V7w6Bo!+3b_gpMCb^EclLrR3X$Gi$FTx<*hK_qql6M#LL;` zfBZlHqsuZ?jWHe`9^QX&e?Ff<_0G}Z`PtcWwJ>a&wpmvVGekfVpz$*OfIe&~JbA{^8zuIv!dQ-&6DuUPUaxAs*Uz`r0(hwZ?+8V zF3G4WG^Q`CR;#jH8z#pz8aR!O1V0=Wj~*UBynlbaE?3Ltd_E7sTc+h={aN$nbUMs4 zr%Do{RMlv2+BCIwMcd4+H5aGz#7yjQFr!_m{MAS3 zb#I8OipHoB2q9Eu3uxi&qlW@;jPmgDln%U)LSymzV{r%~kJ4Yg6U9O%y zdFpZ(V>HGsW-A{gcooH?_a6<5qN$p@$A|Ado_+rG9I`Ql2>|zG_7<+i8$V4%L?j|H zY8``N_%?q2W!g3ZTPA1>d(*w?-d^x+Rjn6`W%=Z}bx_seXb`G)HlG#Qs13^T&M$w!%tM#1|m zA3lEXuq@YYTP+szHmnV!ZR29OQZ-~fmf^;1zMJX(ey>Z5iq7 z03e``KoY4c0inNT1O!-BjVc)M@oX|&GjuR04w#kLWrO$LKU^*sZQGK`saVlUrf2LhOZa@AgP={5}A5q_3*fX+4mgTPh$5!=$R~x>eQ88k=RJ z93=)H^8wqeP_p}ngJM)S!MMqIb@n7$Q-d7wbZ;0V&lhFvy%7oJ&^HJ6cCW86Xf_B1 zvC$p=rnLDlg&YwTK8nkVqdRwx?%d0Y;j`x_XXkTb>mwC|(Qr6CJUp~6tLj#WVVw|@ zGbU%Etv8EyR!_HKq8kj{fG$-~hg4PFQ4}{ahGG-|oLo6<$T;hYVrUe+T$jt`ayXiX z7$L~nOtPY2Yx41AvR7o8wxL?A{HiL*z_HgDL=s}D6UWf25^X0awEgRKN9c-1vg-xf z32AL%iXujh%k%Mc&siIS42R>{<=hAP@sIy_aXAlV6GIr1^#olvbe%F%KevUZvaYGvZf_N2L77sgITqvMAUdhF@l)ZF<2q>;k)1c_Cxz< zRhBPKPEStH)@2hFsuJVXBlO0l-sp4hVA%N#02LV&gGY}Z9UtF6J3F6$`DInr!@=;; zdynqjyF1?Bi=`WTgfAJT8k!2YGVq>kDujch9&1$*)_IJNKU#+AL zxjLs-A*PorL7@RKWSx~Jqzs4~p^oC^>9b#zv$}0#MDFrw?rIGodIgBcGPe?;8e)K? ztRs!8QQ=#a-Vt@9=l$j*^c89d0ftDl%M~*OYw@$6|H=1$@a>mRpFI2gSAX!MKRP}h zXMghD4CN=keWJime)z%T$M*(@`)X{xT+h!Jzxk)nzFaTU%7UtHh*A|m?2^B(EM`X( z?C$WJxzG);iE5rX??ayPzxtOyefaR+!NFua9-KYL+3Cy6lY@hULk?B7nunmrhxx_n zv%3f8_*?fJSSCllDUI=2u1$cCI}!RWQ46b6;xzFWO+ zB0X31v#%Y1jk!a_$>D3*0NN=oqRa%+R>D90TpE492$+Ku{1^WJh`3xxqWFyu032 z8{KL|$+@;`XxvDg)MbfZ_nu$-^R2IUiqv2Ig}MVmJKCP=^)+2n2+{xY`|_c83bWpD z=?+Q0jw?&znL0Dx>~Ji(uEep?$FcscMP^Dj-T=RS>C)?^QBo zC5kb2+)ET7i77d*7l@rRWn;4I&Zg-No48|>YykL5g!Yl+bYnv3j9aKOVkge1rverY z!~_DVA+`XZA$>o6jXDvM9(pafOMtmaAKe8beUtRo>U3xSrZp3k6aoSfZHftKQ$OGd zL@DmRTR%Wu{XmyolYEK0;iPWwsjo}9nVaqGsuI143thF&2;FiCt2J z0Z72ANwrZX7{Ed-WH>PQkBh;;P7kuWndd_#h9%cjdXm@kg|1f6mQoTx6cBG<=)mYh z^{+K7cMtYW0Q5YwV{Fvnp*tvP*V6w?Y4ZrMq=SO?hW6gk;K6Y|nOe@{`Hb#9D44)% z+6tM|%r%gp!gYWu8Kw}0O|pVE_L=Zk8f&{}VjCmdg50MM0bB3(<}4AZ8UdQpx?T*B{7v#N}BsWGUmV-yoXhKvZnx`P4T z+2pS#2-=Ac^a!J?Vr%CW6B7Voc0EEI33=-cgR*=K0OOw9z8y)*HvtN zW3XD6s%|vkYRC|%h+m14=|p*#ZKFE~r(2ML?qFc68ERi~*OS-wPW?F&Wq|=QP$slD zaz|r#Fv$mbLB>>GFE5+JJFGf*`RVg-egEL(kS2PV|zGH<3MmJ%VTKty?)Io7KyUkyNd+rIMR4S3THlZHY!^#jZR1DPY6 zyYpprSypaj!fa)DbXt*Dn2*n;rL*VN!M%x(1FG89`6Wqq=Wrxq%6hq2M}w4mPe~eX zJQZ)nMz1JLeR|w>1G-edq^PJ8lPaPb%SI5Fjhd>Bk$5`5IDWE>q|L+c96tN_{Ot31 zcWC)Sc!1UO7on2e;O_lH4V;crQ!H1Lc*drl@|FNxlT|$~dL5@{K^fU`b2DVt({M7zplXcqO^r!R6ffw5}+ zcke!a@$BMqDNj%7;jvR6mkSwXY;6W<4MoFbBKmAF79tSevKo%5EAG6Gu(aqd3f;Z3 z^@Lc4Aq9enx7Ju=tuxHVMs{K{cH`+#wWTO~6PhoV=Zp2CiPL+7^HQIlsoJq}zFfr+ zxvL*gfkppoEf!K)mjc;2vcC}nA zS1X2&4~x25L~Bc>!$$|jD4$p7pg3=;w&mFB+KX2TopsThOgm3|GXTBi&?V;fP7+O( zUD%07DJ6emLa!=dl9mh=)zN5pu)h~2Hr`M6_i~plUY>vS!|zdBesOwwUdm|S6vfbv zN@H!?MBjLdsJwVVkWGrg+tpwJVBInFfDIwlIjP_<#M%H#;87q2pk;W4VD+dpPx5e6lIVmYETM*9p9SGc!FS=8o^17*CKW%Ql#W1DMs?sJ? zZ*M$)eEjhK(fz&ABr}-?y|aH0WTH|anjG%|kt8&$MW`xigS4TkS`jszYFUXBmN!ek z(V%|~23R3bB?c#rkVrIkb~+KUNgkYya;hmx0alQT5R`<(%X+?At(L87P}3aL&bSa; zYxBw8q?wno45(Yv3cN586WB6atW~^pWY#yU*!V1N0YI06Ku{HC78QxHCt(6@Qt#1A zGfIjVkX%0m)%k4o%U}G)qYQwLKmSLL!c zWUMi2QF-wSfJ6yfPtENBU?K%1MnEM-kYQFLRuxfAAtW{?9U4`r3Q;0`#wPL~6_F_- zCp#IAh26m9v22#Jna$y=($;Y=a}N*4clIX*=O-^No`v)EssU)2nHV+3#7Vbj=z5|0 z>j21{tC~guV~i*PYK)Pn46=rq5kh2Ui6W}0ejssKQU)aDf2)XNnhrDPOjwm=6Xs_Z zCU;~}uByo(TVB@hy*K^#M<3k1b5G0qXfzqQ;Xi-;Orp=MZH+WBDpXR%%iB0sY+04V z(PXt;DR^%>S(hao^vDO{ozof`Vfhnmi6j# z7K1O>tDvn=2r(poqoU&rJv+J;AF|e1W7<5=W|uGT-@C6$bzL6p?^R{BS}g~|VH7o- ziwHCtc2QG`i7JE*sgSii8V&Bm|Ln=LVhB>c zdpH>nd9{*IwN>lOD9O_Zk(9iy@oF_5&^}MIvUkgX{~_-tB77#%j(IK=Lh?f%o!0dtF&Rfzu)@CxV)|w&Y8tx1*SUi zorA16%BSPW{=Tg%ZD*IE?L#QSnvPO$-S3#_eeDj>E^XMpkpOyRf!XwSGNK3whB*jq zHbRVsv0g8YG1geJE`~^?Y}~r6C5FAd$>HIFsFv0G%csw>+@%t(*~KD?MvuWb8&4;r zqDVwK7}#u`6#+iPAR1Mm%%sF5oyxW2FzIcn&dI3?kcCEoH|oDN97UK&2}~eo(Vb2h z)*vQ;ZIMY7HXLKf3pdE~;c!GyXR)rD`uSOwa|nJgD5|z0bF)8C69dCqYvGn>3mRl5NKVn^&gI2wwKCQo z9`0v0^S))a&KhG)+tkK7K)nxD-INuGS)$lpA)^?hL{U-0?3^8s2i969<)rukbyXQQ zL_`u1MS#W!CioB+vw8G>G#n@yCT`otMzF~&%y=+RUe%t1kc!6EN2|oDUaF>Ew~Zt9 zAu6;O6vNTLIWpD^2U!ei4TsRe<|-6oBsR{PK;)e3n*+i2R-!=BG|lE+><5 zQ52c8^F_H>F3R;P#%PUU!ypzQiZKe(eJi^byXVy3G5`}=I%c(O-j zP?OE)OG_CF;|ixfoX;1{dab25%HqRvxi+(T@X;7PIy?v=#2{5&8DoiAq8h`+U_eCP z3$alZm1uk0ffNGewhia!Ga|aUoM%~96#3=FrKm~_00Kv1({U+aHlI7|g7-0~Gsb55 za5Of?lCkTuS}qq4Mw24XM&N<-$lUp}YKEgD001BWNklOY*gb-uXwws!aWYqV*A?g&(^e&~;pcHZH0_p;R z_n}LeQ&CkW5{a#E+csF24TpoX^9xb+A-+64ErvsDZKKL6xiRZ<)%r?QjH(Unz-++6 zQ4=pG0MQ3wf?)+o)SmZ~swBso^lg-g*ly)QdX1qFRZ78Y6-_>h?Ue#JJN`OVh$mC#7U5E=j_mC>}q0{L!~Rdj9;`e7<=3 z^5pOS?!OR|vuyM1=)rMZxy5RsgjjnOHI{8|q>YQVjRFy!j36RaFg! z!$*%F9UtGD9P9<)>GS8GefHUWJ);OFXoyWzGL&adgd|GY1Qu~wUX?4=kQdH}C~CKE zFKi;?3L?8*`HmNwu7S{@OVhbxf+%s1heUkLu*|E~@-P0=U*uU9MM2EQ%$JMyQ@>iR zzw^ECE!XQ1iAU{Tuj%c}HF+pa)T?T#+pId!+01>1;$_D(GV zV36tu6cSbAa8&&GCx7(qZ-49L`O_Cqe*MEg`lIpvNlxE$T7CNQ6GQZqKls+8M|V>F zK(j38m&;%M>XR>@tw9i|S7?mbc<+_GN=(^P${oK?j<;>7E;Soby-EWe1KQ*nphuPV z)SJ$psN`7|g3ojNumAPWA3ZwW-yau+yEvt%FPCMi7oiD@3 z076vy^t0bxT%KxF6{+jha*s&c65Ix2)sy)6>PW zj7ljm2~8GNSJAj}i*FFV4Ml8{+7ccV*eG+m%=8Kdc8FmEopE^xEdUXD^7QoC^HTyt zx*RN(nIVwUaFG4vXMgzcJKr4?`J!BW_URKJd9|#5_t}eO*?6x3sS(4LvaU20?xkfH z_1al`dNsXpT@KuUfs}W#Jyax#l!yR|n$#*`^mddY0HkWf+H z-6~yjuyz40^i-OVX16NnE_Bx}1$q{ysuH(aMi=(z4b$}CwhgHvCJ~ytC4xo*C`NS5 z<~p0$<>FWJWYF`Uot14e1Uqe1uO713Z2K#}cW1v(BTnpudQ!p0S) z2UNkDicbl2ef|J$JAWVr3lnEhbjaMS>t^(9=;E9C1K-Ra=x(yn;J%qZ@MZwMnLqH& z{DI#G0DLol;G6jazb62|H}eO+nLqG*0`R}w`~lsOgS#C0HLw3KHh(~Ogy1!Kvg-%f z{aBhmVAMrP^9S5b^9PXT5B%i+gZTq^W$D=2_+8j~`!yog5rJVuq;292^9O$YIkgxV z3{`*p{?q9y7>d{%KQNn$PrHuCf0WI2VS0>oqT=s2X6JLS1!>A zK$Ssiug)J}YZvpya*Il6zZCCuCU z?6kJSuW$aqSA6NZE9eHf(9a)`e*QovVBdz)G$zaQVb+GKjA6|G>&+kNs!#zarsK)` z4<0|d_i%r*m*x-L@8%CsBtCq6_%+WT*lcO{?~c>$Ix!`8L-H?j=b==vQfT7pa=BQ{ z>KN1f0cWxh8#38wI<79)p$uQs{DCV6BK`A9((LAio_>{R)217^yjaYazvLX*(78bo z$$t68=LdHVL(`Ip>)Bk^RdwF%eI4@$Zswv_&U#;gb%okfqknW&O;N$HN+X+%MiXJ1 zyFoKw&ll&}fYG#6`@>=Wwag#rwtQRWlRO>0v6|LLc!heAiX_k{f8>^?!#uafFIG*^ z6pzdZfX&*k_Py%UkB$BGNm`;lQ>9EL{f_D-JqhVFm5<`|b@9W{X zF!;LW57@6nRj-fs(L!9Qh>Oe?3g_(M;ohC2vDH8!IBOvB&U7po`p}H`_Rbfx?|=XP zfBMT`=-U7LKl|xmz>D?ji)Uw_Jvl>!#sDDkL8t9_@8zy{ZotxWk3du-` zSw%%cI!$?&j|L;w5Szv~jdO0@RGD+5;edh+Y*7c}3=QvOfARnRqHTrK{DA-ns;I^W zAFp^?nfj>Zn=bS3{_Ve2fQgxR84MATVK!`3#c+!LZlYSWIb5mPXl_iA<(6I5`bAY& zF;M1))3Jz>0MR%e7x~~~wbEQFs2KUfAAb11|NJMTeDM6)se(M2w%>CjFq=MrH_@|p zNFp#>5dn}gcNJiwZuU%SPHxmW7eY{#W5NSOpYB&o1WC`@_A7&A5%C3XQ?5 zS~g7+mX|9goiCS7jH@-r-UH{6YW~4Vz$RFAn(1a<{(_kzpe#n^m+c zh&INgR>I6iRkbf#N-r8?Lk!N^*8Aiu7gZQ!qaykcHSQlDk=OIf<^ALRz426`>uP;= za^{FK7$0S|o|Pd4NF5aoCWm*%<6-a(85SU_M#NMvPCE+xCIKhKpQ&emDscl>-x#Au zUn&NHqA2oWP!Q0zZCO^T8byjcubT!6lMxkWdUwxdPJ=INFI7Ecv~^>N1mf3q+qMLC z#BD2)vdg;W{p{e51M7>6i~Zr~JI4=0dK~vIY625d&k)7dhpb!b>|oe9Pi@y+Q#v4v zqHxx#q9}^dXy|F-TAa zse0m=S$B8O9p>4@u@S$jK70B6?D=_hQRW3jGEoqqY}f*pBB#bsgg%F;JNF6@8#Y|m zjSrGApwzRi@p$~+dyl8ny_EQBt*z^(EK6&R_ko$8J%9G_;eAH<{KXf>8RKjnymMx< zKe2<+Afiy^s6;T<1{G12sFJ&n&rT2T?PvSB9XgeUEXRx0qAbUw{Jqg*g>3|)u*Moy z>pcO{P~@#$ps5HIP^G$U&dx68%M$1dAkwLm{)#b7Cex#%BW7ByR-b?VxvF~a^DJZL z#H2)|t6?*Sh>a!7WQ3Vjp=2G2IAem3!rVk%dvDFyD^w_RMKFy}wEwf@@{nq!(=(V8fL7C(5cqRT0s%Xa3~$ zR8?aLb!8bi@mL8z*$Rz)>NZDR(uHXly***PmPqotoMTA^Y%U0g1_Z@+u@ z{kW{oo}SM_qY|}^XgiLnUEsB2H6C5~tyjAG);j&tw_X&zZWH$^2yHrg^bKNqzKrCG zQH0vJi`p-nPzLo%%BpN!UW_M=ss}YupT9gEjV6|DJzwj(9a5f0tLqq6L0auszZ0D4 zeR9g(s1gAQbph%0r|z!yhVJ**b?cpmRn(P2bh)v83ViA^q#~>3`sK^hyeuy+XS2mZ z6pPjB;&S%l#mkJ$dc8h-c@kr=uw9DZ^&#ylLt@FW}|lPe0M7T?MJCaxfT4U8-;Uq=|k}Nt@i9_pLIq zN!{&@kyrGhuK?gRiZ;Qdf*q(zOf5|eU;wBNoO|%zdmnt~qbJXvEf>r4m#2UK_kYKr zd{Bt$gGUdm%Mhw{geue;YZ#&R)OYCJ;-!BWZ44{wiTSE}_b#ahfKVa?>MY_u*-1uu z{{DjpkMG{SJUw|ayJ)KQXpld8{CMx^aBqLVYTC)czI~RRo}4sI6Jv<6>r2&Jew{`B zsv@1zr?-lKuYBK-JAIpr(^o8kp`xsSz!+057k~YK|6l+4ubmLVa%O0;S~Q>j_Fnbi z;9zexpRJaw^>W#^t#y_N!MCbN$pssNzOtCD!=%rl-|d=mwd3pCUW%C7fq)G%gOA>S z^s}G+bTHj3m&;%O!+-ndk3TWY3|NlmFQ5LWvr}uR4I#^m=_p%OwE)D>xLJ4SHrqA4 z`_*kxLfA+L!aHC<0T4xvh9C+CjKR-;{wF{D{Rew#|MK-kX+IXWd;ew(d<8TLM3ppu zV3_g0{Fgs{_~_oj{$w&9oIS(o+3E4g(}Tl9igmeK#DIH;`NiprI|uIgTgMJ8<2$~o z9EV|!CZY|y_vVel|Ee3RU*mO4_husgYFy~f{DIFt`~3Xc{DHsuznxl-^?HA@A{4~d^3LleUean|BL-;azg;k{Ex-Ff zDskL|BvJ@mwKLY!i_)8mNr)#l&?O)1tM}er@y({>=0v{g#J=g;EN!<`>XmO1A?Mtf-_dlN5_w%9zeZ|&(g?y)tLN{)|xEKVw6->gQ#p60CQiFyRFR8&JlEdwKu@>Zu1AOi0ZBtd(_sq z5HZF8WUNuuZg-iC!oZ-5PpF7SC@~r4>z|prKq>#S)_VWj>+@RMG05)U{1DaQ&*j!@iR$5Mz)i zM8wSAr;gI0ZNWr&Q3UUqnYbHtLPW+G?FVD)l^bru0Jjun*D$aHf^M`o;2M6lFXr57 zbHXM~7DL;#5+evr(~5|(w)H+so#A>-DWQcEk4og$7pQlwgKmA6Uat!gutUbZv9iOk zp#U+FiaO_*n2CJwP_VUmk?-&CsUn0>S2bnSwNCD7hM1TUV`O8m$RN<|w;wx4=mzrL ztn<3owtYa?IH3!lbe%HOp^8MX)&g1#ayFb2WRh5rD9lNOZEl%VL<0IQShoYCm&DY! zZP?1Ko=si@ZRe&$v~jVBq<+*21v3E@Li9d#K&JK$sv^o;1r%(!kKd(HUOTJ49TJ!} zod~dthV;~3OQpZ%RUS?KflbRxUlHm2q~*FS%L=O28csCQdr-Uk>xRUxNXJ{P9Y9LE zM3KS6Lg#$zr8#F*ypk*azhL}G}M4HJQgGM8=e zwh7RyULm^MV}KHO;1n1F7$ho*WqRZ&t6ZQl|xV5-WKn zVk$sL)>(gkZ`Q=#0DwquV|Bg%NF?^{rn*zRY3<&(9oy9a00pN`u+;G%A|zQ0RVE@s zOiaungsQFqvep>WE5a&7rhC%c>vLASzETj8?m29d)uAeESXBw!H-XvF@BnOihk{sD zqof>`)U&K3O%Wl(F~rM@i}`%U#*Ief@nr0r>F=Z0&6?W-0HQX@f5Ylk5v^e+V;F3h znL)Sm^(F9S8#Tr1TZP)Eh7tyYw%0*z6LOs6Ag z|KW6CXgZw^2Lm=-l{FE0AFRz&-b*izUkk+R*pnq<5qiK^ZQRZ5~au`%Z|mlf8!gw2FohG8P_13|50^nS@3 z0eBr`mI+yweenLn-~8s|=g*%r(P%U@hH~eI<1s;vwW?q?rkk9zW0_Hx77#*&5*s37 zh%!S#hABodOd=X$NJdW@_K|0f2-anFT`N+T4BayCH*WjRW&Zr%{5gn5!|~q!a50~& z>i+(8JRX*1wOTHXwQOuz)vdTa10q_?X7j~jy<8e& z&(1EU)BWLa0HQ3*GH0P`3{sPHNwYry03vD(N9jT9wQeY43@S7n4xO{cm>7dL6k^Dn z3lw)$(A_DaZ^ng98<3LLCzFH8WZy6W7>`E7(MVO9h)JJ4d&pM+0=_fbG0z2y1 zM1wZ)Sl*dGu+dPMDa+iQJ9jjVNU&Kkm^F=Wynp`u#l^*?VcI{~n@-2ZI8_-Aa%-*O z&T3(10*iDeZxWU111@t_Rb8B467%u#o#|wBetzk-J1+{-t_S``FWSoHUU$6G<;^O1 z(^4t@;6#HUV+aak;HKl@s1QM2H>am(P19t~p1gQ@adwdxMV9A}AKka63mnBLN#SAQ zZBzewdUfi&TZzvjA&2 zM&V7BE^~5B8Iy^KH73s<5>t$EP-MY}yeQhXon6kH%O;ayo@dTEW(pxB7Xuj*Atr#x zS#x-}Z=G%G_VRMRSS;gYlx0QJ)Jzb>R9v@d6iC3%`~kf-e}Fdg2hs^rz-87NlVuDQ z>uix{%(PmUs_H{fVP*^mS=#Bn$>`yOdr@R|IbSSSs!GH$L?HoSOqb!#;4E{hIwG)5K`W7s(xd}2`c&GQF9U^dpd{k^^EWH=lQ z*-++OS+1vbQ#bW`y>44i#98K=#>W_~bw!Z_7#2lQ|N@L)g7GVlGZ<_`cMkfiwo+Rq;t z_45a~i(MpDnTTSDD#C=OZkfq2H+56jjjALMsfa|4Bn09ReO1<~DiTMdAshBSw5^}b zmMS_J4hFfE7@;VNd^j99XNWr=TCCQsBh&0?`SKfBo5pH@{} zRgG`k`Fu5O+SdCbGb#bgwKX<|Xbc}79fTM{koxNUfk8KaAQH2xcJl{1uf}Hnz{Q+^ zi_2N&Tu~I4mzPmgML-m#B_cKqg{oODm(IDS^-UX8)flGSi9mra)I>ZO6vH9aRn|1^ zYF$-TGn>uox(Pw{4i2(>;Dgppm1ov6MF{|hAkUwlf}omu%C<>kj3SFgb=`y*MUl{Q#jYi&#N}eywq8W4s&dYWYF)Qc z6r|^8CsEnug<%?v_wvjb22pUz;bd-_GQn#6N37j z03zbiOH^5wl^vyOxBpM?MC#=u z&`MPrSDh3dfMscJPDLuNF%e|$U~jzWxOS)HAph{~WSia{}+OfBnhG%Pa9 zspyEixu2;T4Ap>KT7TZdc^K`-8ujofRGz561DP7J4GmgfBJ`0`XlnN%by zFc{=n=3>-p)J!H5iLv$V+1c6U#ksR?I2ukTYXhQa9cXrCd&GQE~=3TGZ z?qtMlEcxr%@YK}=U%3_a)J&bO!wCiw5mhjOow0T}C~^=&h?%o(>%DJh^TlF5o9;~x z_9yr5-m_aLz|H~CHzj-Y-n3nfs;g;Dei7>D4%L9FSCJFix6y4pF znQZO=5+}~yRfve}?=7loofmE}7^JX5e>Bn!yPezt;JR0)H@dcO?fM&&>ZCoo$Z#62 zLm+a_Nt75w(a+)OKFFfFi%;f_o*dI|WzHyA74GmrWDihd*br}Aw*9g05O1$9xIaHv zoMed_Ua^l9u!NX=aedB-h$Q~dTV|{Z=H%E=CKBxo$o_D47|~7RaOxs&)@&20v};2o z9jokezOJMibjN=aqu)TK{n@~d_ah0y)_ZF$sN+1zt=pY=>LW^hjk1b}2=`0hwD;>g zCD`0U~Coszw5$ zgw52IZHUa+E()Vt`J-KATDCH*msb79w(ITyA-W;u?6odd-F-J<$#hCT$Xb)0o61Q+ zFvbFiAts?H{g#Mq8889f0005nNklU0qoj|-5Qq>%M1P3TyTnwr&rl|GeQXIJRq96X_UqQ|s%Z0S zdVzPBelrHHcocVTL`Vo^>aqcd0eAlVxSv+80u}XbxV&71C?Uo!KSGt00@4SgdLGw5 zY3GpkNVt)GZ2x|z`2*b+l5AebMpf!vItn6;C!;KLsYD#mO=e9ZX=3i>Ll9wkdGX>5|e<{OniS$K^Nk2YL-laRVf2LBId} zfqwK~ukBv_Xy^9@p=%MZyJ;^y+vs>*N6OdqG3>zb%I<7FoFv)X&mY(-OVrEb&21IQ zl-VdwfWC51?dJ(}EI~I{dbj&YeMD#T`7P_%p`Uk|KhUFfXUo%rdxb$nH#^%uiaH_H zr%Ydw{CXuS3S8Ni4cTp0({Ep=jb5)0z2UmXS34!)+Sj^jV{Eas`_&C6+=>zH^{XpK zz0Y{t{C*Xt9@FnWe_*R)TWuy8v^h|JvGj9>TQA&2d`s&Y{7P0;HUFHvLj`cQS z9UW`z^*(I3YQx&~`hBjhWd}33CM$P;_K)Z@A^ry&xPR8&-~0*y00008>r^-Q9f$|0lk?&bRJ8 zuElcCcDC$!-)ElZR}=bCPT~a$0SW*BFQg@bOCuJTHb~ZGQ@*8cIxcoH72$RpM#8CgjC^+fa8s_W>i93u-G#;1PTENDA{g)@ zQS|gv@W}pT$o?=SPsbR~kNzI%`AIw-!Nc)C{TmtH(#wB2l5q38^k}kexxX0Jw15Bp zy}Ei*CQZ$v&#k&?+W5wn?!!rkw|w~l6XtF5M4x``Zz1ulVB@WtuH ztZj)}*~(tUaM#AWuK_awzI$ceHy8W6cHBdPw|hZBh=!)@1WkM8{f#=dVq%?hb&l9# zEX8XLhs_P;1{r$dAs7Fffl^Ow0lek`Xt z4%ydfDJv^S3*MNSJPJJ=N7vW$CiZh!{K>1boOCI}6#2T)bTQ1cnTmjlWAxz6o2F7w z#m88rRd;f=8l7u?Q@a|aS?6H9fuHHUuU-2ETyYp6-Rq=Rg2dhEw?D$7_ocSI{jvLI z8nx6b;O^nF1sxuC(d%?@EL(cfZ8b7C-2(;C@z~Dp495lcWmebc+L+>1-NlS0+j`^8 zP60Gm-d5=scQ=tq_rQ+3ep{Y4p7(Tst-rtj=@Tsv=OySucSkFo;V|Eun;*jH zim#v<4`-{_fB3#?tY&oCMOwLhOQF&Nr@?~*3 z;n!7EJn5q=*{Tn&m;-OQpwgIcM&>k>sFd6fHg{n z-1X2+>g%=C`nkRstco_13d&c{v?tXQv`h6w=y7)Ka-+y3nY3^BnH*)ILjsp`|6JB~ zy`c3>Wp6CC>4mP_av+<@z*gbK(9`vNBl0+kk?hz!H72PvPbT4USnG-;gjtH>J8;`b zG6i2>P8mjz|M`>Zc~A$~%{xzCdo>$I3tWIF=;4`kMc@Z5hhft_ecKY<7MWf`E*r3} z_(~d2`d+^eT03(p2UjtMTw-S5WpxoM$7kIgMNHH!`;A%uY-^N*gQLP^P@~D?9NbkD z6cpaMPh%LB{ex{ZR8=KP`}@69w<;Cpqo%kD(cQ!T%V!4vYi+pk%)QELK=Nma+) z*roT!$E}@$l84#Fhl_S}p|!Q9c^li+=;B=n#VGszc5ViEw>D5xQaWx7;B(oe()SY| zbg%&;9>+7oCfj4#!{B{)vfhu1iaMzM5F|}Jv5+KJ+s22!|~*!Eo7UvSBMV-}e_&&}|;iIo>*ad}uJeJs@|O3vvtC~ItNbmLEd>$TA6 zzS*z=Xj;l`{pc$qIc>M+zMq?Rl@gY;kZnkE{FjlfLeu#Y%rhw zb0iUu-KXXR-Ugb9{Z$5?VFwND;K!tyn%FBI!OE`%%agC5sXguL_6@gaOCngK^I(rT zY`R=XV9+dXy3k0@VQM;4Y?yC$SnpfB+u#C={(3o>bfQ>WQ&klS(5$gZaDWWdXeRwHsWTt~XLt3Y@``1{awc?6aMgi)0wcGU@!+U4*to zEC!-_=*cb0>$skGCbh_m=P9w?9?ks08W#foscd;RTxO%5az-y3ZkbMUMI9 zqOY%*SYZ1y_hzbNwavVRm)`BgK3Es&&~18p`o^970!VLG@$R;h$Hm^Xn3x!2;_d+3 z@LZ*NZ1JvPSH$&G7tpR{=^dH^J43V-@HfjN&R<8rs?LsmM6FO+6`H|H{kWN8TDYK47%JMcl#dw zM(P?uvplq;_9vRgs4ph141T7bKxs)P= z0y8M*u%$<&xi~UBE@yjX#Q?K9Ml!3han7P@!{VDv1>b^-<_ zrb8dt);;Q1>Is89h2{ie3fbA&ImFeo`&2}nuEzvuu@vemDkjCMBR_KmF8*SeHRS)m zH2;2gegn}@cn2BL?FsMv-$x>7lplN1M81+|5c9b(KR2{;B*I08=d_+N%1r%8V^lFc zVE*cGs_eb;xKZlrJioxMG+0&Izs|~_OxJ(% zT_j*YY8;#;P_&eU{K3@Nzo2elVY~chP^BQALJHF!6|nMT*0-$Eu;xq&1#F#udiLvM zR)=L0g|uUd5$m9(2&PFw_C}#3i@%@BzvtzDtP>CH@7FigS6+fqs}cL3Xt5*3Yit+O zEnhP*FocS}nf&AzmRVU@$xRfcRlv64`c1NGVQkFcdC0m^C-d`m=KTB(4Q)rq3NmE5 zTsCSt5sW*Zme5~PJ}ryePE@a+VGcqpOr$$4 znce4P5ei(by}R=&nM|?tlc$QM%7;1=12TBxZ(<-2h?ZgQv5a0OZ;*aicz8uh4Ike1 zlI$nyuuSOofE@gSEVw11JIMZDji$qCEj&0XdgP$fkS4e0pMHqUCFwn%ULk zHH$o&Fz7bBLK@4OUfW0><0MdGo}8agmG4-mMG{RtIaD@oAxrUU!v`U;Kfd5c@hikoVS%8Bg%BQQ9n-^TB%~Ny68D z<>>an(`o&-AOgRqh1>p$ZI~YgU~0OWY3LXFi0wSKc3RSKKx?H2*6HIZIpzAAcsfl5 zPO6q)(PuL4iY;4dEyixX!{&v#y%e6+oa}xbdipufWvi)D^SN3UdBJnN>fWI96dj-S zy88O2-8OLIrYIMvFlknQBMX_Xuq`ekX1AI9mVTNP9rjKp6V*EE4UxA9nzkLz{jhxH z>$-D%-*yO<6$<<`Q-)XlMxqiUtY3b9U#uMO@v3scW#~E~XJC7bbk4)gN_ff5WQz&_ z^q0%ZG7Jlzwguv9ab9jTy#K)rc-{0SpAHH2fBccezI}J8>hDb zE@H!!?*DN2+53~8RQP%Xn_R=qB-G{5Cf)g-l(tX0zfW2sf_r&l)<;u8p?PUrtY+S0 z7?OyK81N%jo@bQ#Egi!%_?ZvA4n&DB-^k`d{mG2tEFIsZ$01uzLnvhFsrtCB-a#(`;%YVeIz}{BW@@1b zkTZR4htpn^D2gQu+4J>%3?__-gn_%!K6|7p*12CNc74!ac8n(Ev;D^Axc4ozOic@V z#)tyr2f~%6yLe-DM~@Q_Kbln7fBDYJ`xcXvCs@;r9tSn>9Tkq(2|sCgrwsi(J5dfz zUq_}&yME2AwOqI$L1ePaHwr07g5puLKkE)j(9CG@pCr-UqS?!K;QNMem5NL0!=Jty zdBVv_xgEEnmKHMbL(lE%=*P6gqT78DxY`ZhTTy}JoN22G22BDl!N)JpRy9b`oxtIl zpc)s{K05LG^T*o4!Xk)!hM+g=>}O%J7|Cz3F1hB(Ip(;_EXQa%7+*vJ5R-O)L)oo| z^n=^=HNthUI{$fV)`}Cla-CDP^&)uCd{nbY1?)HznTOjX{LO#26`#vzpyi>C2;?!X zGgZ4?=`OSYJnU-`445SlR^E2)+QJCaU_I!qcWT+;;s>ks-*#DZ=bNc!mfLgDa zDs+Ck$t{-LyJPtpfuq*dM49`l{Cp6k@*mV1!Yl>xu$c@ut1E%bB_9NKE&#Xp+?Mq$ zK}n-SLS~+6y8BE=C}yet%fI8JDHOV0`}nzGX76qdy51$OuHNg9;A`XPwDU(wDOU~z z4qXG=Y$kHu7ouOEbBq3+DR@hs*diDQjjIp6vBlrmx5GFxe8-nft8J|LZC5Chd>1`- z=Rv|@vD2xl>$z`BNa#wyr|WTCB^6s-d*t7*; zIXNk@*4XiWQP*StoJ)s>=IG&ek?z6mGN7d8xJ^W*@!b=b-}`+ST#Q{(ZM#_Jekva= zWbbPdO+C!^xG)=uz}X%|O!z!=1^cfO@G$bzva?`j=J(~itEv0-LT8Away)egKP-SH zrhRmldCJ612>Ad0sIG4LnwVCHn0x3Ifjm9sERB4X%frua~jHI{`8*ly!@#sVT6@ z#>5xgg43bAFqD2{aB#3pjb8QT2L-Hv-`{D%c!XXe2C&O+f`2S(K|pPju02ih>Y12A z=BQb5(5RUc2;1D)-eIh5y*SdQ({YwmQtBu5@!bS>uy{UJ(4i$v_5Sv9I0Ow5_OlbO zHz)~gtmU>9%r2oK?e|Q7R6zFc8Sgv71p4~>9XUQJJNJBjU1woR^$HwjqbpMr6VOr7*ia50`e<8KH1lY2Useu^Xob8C$Ojn2ID zB}354cq(BDf}b$i+W7|52}wwX;#G?C{h@8$I+v*&w?Ks>cn^i++t~1-MgyCyRmHB;CC^g zrt$;)d2KkHsO#;9fmK3Ju~71IZR8DKbO=qDTrPBuILUZgg~3BC^VABhM1Y!f0Ku(b zJXtPxkSA3x7oDQjvq=XNX2GO_<+0SQ{c-)_QRg%Pql&{7a#9+ zaedo$Ve8{~5B_-HI(TRthgBN>WdI)d&&B>fz&}`MUg`~H*w{L>*!y2t;M7wK^Dd4St8j5&CD6ZC9wt zK;XgRFE0CfKjQrA*UgPJNP7+%XMHoKF;T?C|CyDyLk%UW%E}8ay8R;u8c5=C8R$@>Ur3`+ z>@^yj+HhYuyxdGirA--3;VOu*`SHwB0nIzBt4mx8A`^m->W>jBI`At^;?2vjOc32Y zn*~uXh~+@ki`Ds&h633?^uy25yiNm_6U()}p)|f1<~o=RL@0qWMwP4@ARdMfFnJZ* zWl;7z0O@HT)_StSDHJUyi%+lbCbe(*0KMrmP)x)Qr&IQ4$J!ZQ$xM2G{i_-J1x{LNiNr0P*+wq@>#H^ zhK3cVrj%4@(vh$NsIyS5PSz1`I#*FrX z)3>+0oYhf2Ou)l_8AHIItSK}`6a|Q&P&Ab1!v8xRJH1r(zB^PVee`Vm z0`UeAldsiTS^5cJx5;b=0D z?p={%aet${=y|vg*kM4x>O;J0Cuvz6E-x%glgnK_c6*|i4&R8qii7g=t1Bx<*wZYv z#fL;N-)2O54O2*MT#Vxd8Bc4Gq5|LR>eR71wJ>QyGx5Oo)mX=_=xVyqxbcS`MC_^s zZv4eGVWF2%0%@2x7`c2>S-{d~q{AUA`7~a0aE4k7jzFcdK>7JjU%tiXzMLGso%wyx z^WLhjkxOuXS}!8gQ$u$mp1oaPU#Gf>0tH{0;xe`vNs3pY5BIm5UeyYpKm}uVf2cQX z(;dVnE9c+(*B$D#K(T^HeY+l9uu$FCL}=2GKe_T5S}#03s2KdoB&e^@{%YX=Pi~)u zX@>*utb|~%kz#_eAz%ClP&1IEFsNrYG$uEv7DdX4Ri=2w^24axUJE4Qn|AypH5x_x zRh|uprOzn&(l^mNz;?}m6K)XlWH*i z00)Gl!DqC;lK82JSuYM^*V~MOB9JgyfsA56wnG}qFmT#9Zoy#+qFFLz{Ot084Q4Tg z4J$VEw7>LD22x~Af(+lbZ0GD;73$FZ84+Oq((O2&2~s3bgxRRvdgjl(Sqk2HMPeBr zvh(xv2h2y|{CJ+aaFLiTNMOTtCY}G{H&eVE33*R1BoNGuY0ur81A56kRw!#DNS;Rm zNS|f>$RsFG7LS?v$mw1bF6GcWlTcT`8r#)F|GTE8NWK{E+ntjO+7F-e^J1woeCM52 z_b)+xF;(bsxEZG<$2?KjzBleIE^KQuhpy25q?kY1b7GGHfzwTVbGdp(d=Oo*N%Fw} zD3iyHY|o%5AVpYY()BtX%vUZ*WYE035K8pAUgyg2zF7B(X7pK(@9R6^TD-YM4S&2D z@ZU^H(O5a|c*v-wZZSV`iu^L-!;BKeG8NyJ=LW#xchQMRy9Il)D#=hlpD=cq`!dNN;YcnN?zZ z0zB;Rdy*!Dn`}1IyNx+VMiV@fjVjf6)i?y9=kW zg`z`S{OSAaOd7bFmqxwNHO^`%85392Ty<@2E(TNWp<~8OI5;>)QG$0PO1T+=Hz)nM zhylxQTT*>?wqj9H8VM(CJ+9+7cXkj>1rzX-$dL^$_EIb@ks=AXa;O`ta5Z3rH8)d2RVU%Ioc|3=SNhumU&TF~_r^=9uI7x*(0$g5&SDC+behi(0L9;!U%>aTLLj;R-L_rAi=9 zDTrF-2ImqULH=6_h9~N}9Q+6mJYn24rp$F6GeR`GM5q6p%^FrgHyf(6|4^puJki9c5)@laSuN4`~KP z(JwU&3?iD8Z?FGsAPVh9&DME$1-~mNfNQpDECM|pUlqBDIDfRF z06#wVSX3o@4%b>vGCCUS`IEs1h|K><_6J2Zh|yEmF3mBg(osS2$3TW3NNZhwlH~9X zr5q_=g+3=>-^xQh%8e#$vesOUR~=HsIv-$@PvN$Q1q$5`+d*x=>8{U_n560PfsfQ@ z>(}MN?TA5{Oe**CoFg^R+H^V3a#-z2A$2fYa`SL|xL0TdFX9pYKZ&}~O^ZCpjPs$} zAfJmO@jj8)B)B;*xVXLpA;c`(Qs%7H#C_C2d|VtB4vvX|4mfmjrD?14CI|$Vq}A~9 zLETilD*F2iN=Y0A`xwzG&x~JqF%(FEi*jCmJ}b4dG$hOXwQ|a!ik6mDTY|AVRIw_j z%K2?rCQV;zb@B3(e8^l-B&~oY1=*-{_Vjod{Lz5s^AD{ZUN|l2&v|NjX~ac&G`=3c zGiI?^UH0ibr-I9%{PKOxj(Y}cg-kN}D{)-*ppV8Mpj8eEYKWLi>S720SJflEQGM}$>Jy+m zJ53OujC_Qz=?E&a&SYpQ$VW(9%;4o&4OZ>PU_1eBf^PzzUG3-sJm)I~9I`2%{j%}# z>ww*Lr6sA`Qt_yvT$0e^1rn*(N&vcwd9u&Aa$+RDBMLCbY}&Oq&1IQCQKEYennGwz z?5w$O>*FBS?kbKO=g-jraumB9k|&S-EBVV@sQn|HYt{61Z*G5N)Vv6m2vUlx`q+zMF6>NxPfWrj7eoI^*jUIFZ~m|~-}U)3cj72=BW#Y#11)L?+u zQec+A2D#i|59>=4*K!lz+tPe2po1q}nVs)PSf<&6zBZRBofL!&nTo;zL4+G4SXo&B zBUn~)jpTbfg;a8da$NeLncv@aByCx=rO<{45{xlJJ3!7L8;6M!7(yW>ZbqJ3Q&~9` z-?Y2*zMMeCDHp-&)?)P59RVt2K4vl1Tb=Wv|rmhJ- znFU{)&UPo6Du|Kc@q&0Zz8QiNy-UMu>(MzQREk19Vs5+dxij%R$=%_MA}&e^yu%CX zZu!NznC3vK=8xjyA!N^pLVym;EU15QNlL4-nZe7X#*0#}6#UY+=KiGkbMGCyfrE(0 z+*Mf_u)_09eX5=Fa>72D^@8!jid3yKiKkj8}onV zuok}oIZ6~LCMtxPr=_K3RsB>P6qfm=T_3~(-maKI*Voq|@UKI3gHV8AS3#Q| zhmM}!sllqkDli1=Ts682J(pv@U8sL@d<2^?_uID4VJ@%4W`^h6y^s165 zlOs3pMWNq*1QBxWzY+!DfR-BQ@cE#x^iNDCil{IOO~>k&Vzf+{iNXaM3&KCDCx{7~ zJxOO9uc?Wb66(0pU2eAae*V_G`iw+!c?+ALpYHpcXHwf08<=xl{7JW{Yh1W`)fw8J z;l7~&xcw`1w)NU08xdC$Q##8ifOc4NH=#Y z)+yy^!2>ommaTq1zdwF6p3cchFwJmZZbM~E@jRXX@evOYyf+$j-^^II<2Fl9|HlOQ z1GRyQaRPdp7mR_bqRO$%wB%2o06Yxfr$_clVb#(fHI90NV}rj{8CKj84ktyw9vLrcE{ek9@7UV`s0 z{FFZ#=0-2_$udpL?H%cMy$kZqO{``vFt3Mr976hbtBlM<4-o@y8)G-;J?{|%JX0A zsjEK|?rqy7;ksC4Hr<$F!89zZtv)2&y$JkDk@OM^9N8eS2i19+Fm{?ZQhdL!JGCE? z(d#o|0lznrisZc$`BKD^iquk&OnQo6aDFCbAF+)8NWI^5LIcF(23cU{u0548Y8e? z9Cs#;pLGZY_+A=1Hm11B)Bc*SYiJDivYs0ioUn6AX`PkWfYFbt*|d^gUb79N1mo#S z9B*zSMh1rc!uNAnrY|06Nmsb#u*4kE=)w#%!WTe`2d9Y)Ifs}^YAn^7;??$OIO>zR zIhGPZD72ybsD%dj`0DM?IRCeT103C*GL>lG&Vwcs!RUO5+W*F$;6qOL%umJ6O#p=5 zye;KQFTF;^A|EdAUf!w4y#M^Z25Ip;GjS*?EYoSQWK*W@D)W~THO&8D!aPK1+%)~> zk~5ho-1CSc7sGqgY$QHgy}xjEi_B-{G;Q(=>5=^UbG2_<;arJ*+w7SbeL4+Z5A%42 z9WN)b%?$NOo5^E)Wh>+HE>Vx!%Bp)c^!k$jDm|w^nPRVHd5VTR(rmgIXpZ$yX!jor8 z@&dhAm%Ak`#GF(t}GiYx8f#x04)QD^TltZm0&b0k0BTdSxmuYf5zCLPK=!Z z!B6x}Ou=%ob^~ZqREZ}T;|KK?(jEwc9*PMb)nvqWCJiDijKF2b)MsIkxI9fkpX6^L zgwfr`>fi~g?UEF%W+P;c_{2Hf{bt0%8{@G4jrL~ei*U;xLAto65?Hk`dIi}Hs3Yz~ z1rkvZ;HfY#BqocsH&3ri8ZKOc*7EIPp+{OA=}+gsrjAxkRt1Zb8 zqUgg1c42wTv>o5WgtUGMbU43*-Tv>K3=Wm>SI4Vej2dT`ZSA&4J6#lxZSd8yl-*Bg^oKa8eBNecUA8vG!IklyUf$#=LFRV zM%b;ZG#vNs-xYX$20@HtxiVH@&Xdqk00(La-eR_3A`u62(FF z#VN&6V5TAn9|mX5PE?K#Awb~Ax&7meWM^FwF(N6`e1p4ccDu{GU)TYzv(fVnw(|1V zy=9q-;nYYVSlhN6?GL*Y)T(nB2kn5#K}wduK!nQ$*Y3vIgBXfB+m@Z%!xl^rTb>u7 z7D-FljeFA^`sKY=yO8T|;cqJ1^_6eW(uGg}XukCPrviD~6pj_b4_>*RZph zrAUXoN1fmU-m8mn9UtfEI9+W{@V>z(aXUH~xR@&Y*%d`xwFnk9Pr133)xeg|KT#K} zDZR{8Fx66}0h&5kitOfx5B}kM%eZwTH&>)`XQ?trq?!!+>TrJYJ~Nyh&8Cc}LpUkL zBZ1XvkiLEV=)}SfMs+UEv1Aej7+{405H^y(>QSN$k%)9vt^>_AnlLc$MP=o>H}x9% zohiADCc?Pgtk?mYI$x2+Ec6X&w9Sa@PH!v)KFG>>K;82hgU3^v=Hnz&zmayLbLlI+ zc{WGOitn((ZwYJE=zo7tx}E{+P-1iO`==*?SMbU78-PBChuwC%0poxi3UZE)Y_ibK z&Q2?m+&wxIY(lle@Eq!Tx<)vGnhnZD^bVnLip9i`u&{E>CPrXs#RBvPatA0W#_$U? zxMa)y%YM^Xq?5r3BUmH&H3}$Jx#wqfpZtaEb6~4 z#FIbkT4$bY6U-Q{{8UuD8-Rq)Nv*u=P`9(QQ-2kq`}NLW>w_kpnWhVBoFiCQvNE4x z0Zg`9mexx_9KO$yeq_NTl;+d3{S;Cs!jM6{uN1#xZ?!g5BAIEyN*^e1dwqT2Y;U5S zTI8H7N81-r7(q4tRy^VpWf39o*DOR&hp%|Efbh#W8_vhR&ucb6Xe{99G>T?IqTn~# zY---7{<)0J-Y|7i8>`IAnxYqt8T262t>z1&_yx&IxNdID$7ir33$-Yh?}JRhCYlt2BLjX%kssBD>tyYkYZgX0Xw`%POuQGr&M)gs zV5z?han9u>4?EoW!dbHBDY=@U^d}86(Mi(=lQ7Jly{d^XJ5g>~XnK`k)ACW3eb5Z0 z%8_`N6DB=o94%Pp6{K8A@qL=A_OPAL@{uQNE65%sZQmfbrMb=txsPGTQfZfMoR;w3 z4NMe8Zpt=NoE~~#3h?g4Rdh$BbAv7>nCTy!x*iKJkxh2$1d}`$ZKx$@6R!4`_OQZG zQ;|SbnAS`8hKXmD zZ_w7tp#Pty%9r_BFn+X_;#XR@ruY@M_-J0Q-jtXZW_Cu`4hhJWzXvlx>i=GT-g^O; ze*?Dd=GrS|sTscVf8jPJ!??O$6&B+=U>*_^p2hP5H1Yz#aJJ)7i}ZRPq${4GEa=aS z1ZPx=w@gew&Hy>E)E3sZixlVEQn6i?g@v*15#*`_ZZclqYc-t-R#o8{y1;owbX!D z!3!CPi`~(IILj&HtFQC!T%AM^$%mT%()3;#C=}3vE;IL9cGK((lPo#Q4*M97j2{IQ zpU&u)R>FAx^{#&l=T%lJn-*0Tc92d8rV2a?AnDwGPthkDr4x-;hu%drQxq13#gN=T|IzaXngPtCo0~+H$-}Lrb z@TQqa4%*b%u8IP9dwXgLjFggA6RY9J^U!T4@bcpJE
    0c3{gxYlf~TVtI)q9TJ?Aa8GnEeh0%cALBuFCdXVy~rjGsI4!#CPF z7hZ`nPJG@&#;~lu(2)0S#j`JpwVgvHuqpwn1m1MA@LYDzk*=g<;Tbdkw98)ep2-9M zGX*~X@vCk8s4{!kJLXzPe-<7`tmaT(hMxrhC*p+9*paX~i_ux|AH%8tE=FV0Az5kbDo1Y1t9SJfD;AGs5hw}X)F*^Y0RYmh*!DChhlCJ z|GUSm>K=4Bzux(p@l{^%beaIzhS#-e-17TW?!cz_vq4!wA5kCJxpl40JnDqOOJ3T) zu=qP9@+65a20B;B7<(17Q=Upxjrs_DLcqF{Qn9mriIiiL<2Gv#DNPyL8#~pTx z^<*WiXyUQ2D*efOZl$uKZ!qTwFzq9pg=lPy(ZfnuX@k*BUdl(E`QBIE`_KG%^4|Z% zW3-TG37F9MEATv6T7KK_@I>|52%ex($Brnn1xHS{CpK1CaZbo)m6ch48|}mx6k@#0 zA=%=sD>qqJcc-P2AhieMf{5M^D00bJC)k+MQjDh&D?0Ruf2NABxpu=HU$98o9LpU= z!boupV4}t{dF*Kuugk^&1{!?u~hUH6PJq zXj9Q9=)GD>?)`##mL2ihRY*fw^2;XY002M$Nkl9ur4OM!OO>#?5wnP~3`X2F*oCuIm)TE-D0A!56N+_z_AEBO@t-tLztfo?V zVSw9u5_NYlhPS(nBja^GIaajXKQEt9Hu)uhZvfOV-Kk&ymCHCXrsye(i!CWHNT!N& zh1iv6hA@P()Q{&Qb5L*KK}g0ojgj6jsT}gE?n7d8-c#J|9UE5+32C>l8w9p_F7;5q z2sZuZ@%~d|RB%+QY*HB4RlD|%20TPl2}OnsrSTR6K_+2WB;zQzN{r+2ZXFkRO|oB~ zJA(03ps0KNG9ams2EewSA$|_}ug^i6hiD%e3s-EdLSSQcUAT+*&60mduxfHgW4CqF zpSoG$(?Xk%%Z}Sg({h;yN86|w6ETKlJ70M$dCJmiw|+trHgaD>Lnk3o;AkLEq~B?qg0 zuk!(&H2($8U%EUvs87v->TkhD3v=De|7c#W)o*{88sE$12z}DO>nzU|zo@(gAG|pz zD*2hDK#~GON`WM>J*4DK7M`TQTTg+fex{hB2)InN0*Jzj8^#3c-h={)St2J;T0%A# zxzHReD;DT2PXa$22-$dBYwne&fqNj!3eAKq5Llp;=JnK!LQA9ypjRwAmEoS|0O5Lv6a>Q%$-fFw@QbIOXgT>KU3utkuCboV4R}DL zBS-?&67D1`f4Jq37-RW(ZFgyDJekHj+Uji8ZP$5yReBmpw9@eQj^Mdldi?1&0?&%N zeI$0Nt+MUQ?zhQjUc>w2R>AWfymIA3Dx%sTVqJ~3*1GC`>%kMeAg|Dh06iq!NU6>t ztV2KVlR)^46Q8+**w39TOHU!RKsV3q31^{(Kr8-vm=fq??mcC8-W6AI=FM$wl`F&s z&^CC6gttJbqkb&4zyIk9n|su8wz7JKWwb}FeECw_@a*&U^KX6IvWrWtAD|{Ob?R#> z@#J6sK#d9S0WEl0|C5-fZ(6b`{#0B##iC1N38jF%M~DyK*4$?2o_mUY@!Id(rfu7} zpSFgdq!bk7<=QWQemx1;P`=V^ONFfydTgI!jbBhPA-ZF-X}!foGR*UTd?DIe|Ogcx$cQ=VP+w2alr+g5=-}{ZMhL z>-SdBE@9if@kL8iSI|EJp+5QjH!kpX&h+ki!s|xoq>6pk-qvAvJW_5`#!az%9#~=|NaZt;$(Z`(hs?wiWd^7JFD55>`E~X7M zeiRid{FO!umg{p|e@C79zE|D*&-{4y(R(RRt3T96d_ZRb|EYk*zq)0y6&L1OOG~#6 zFDtT%<44$2&#baI^G-=&p|#bw+pxk6n>eP_CXXw#(!zYZ;qEo`MR-N@pd5K(3jX-m z`_Hm2Ru1*FC2ZCyXZ$l&e9g5R9=^#6Cd^|h3INFv?c0kX!;L6nBIhK{Ca>P~dl1wP zuJMcEVe$6BwSI_3V()+f9PEIAI>Nx_ePNI{{V9X@Q-})Uzg9abIjoG=YNOV=N;`7l z>4QZK*}if2HV-c*{^$qjmr*t$0FZVN459v~)lmI}LdzR-=#bT8e-=Fb^1#6H$BTc? z+XgaLZ>g#OW zO!_NE>M({Ig`DebX*Fe7NhQYo){PsH;TkNjbeNTm8et{mF3HO-wDZ4v3v9s{igCdW zD?Ta)V3JlA2x@1NhtCw+AW^(}R`YCi3_UOCD;k^Cv*_JE_DimYVvOa0oydV&9U$Xq2us&pkNqd%ywYsa$f+38 zNoY<{x<+{%qO5`pk>@4WMQAB`{SL~t&{VRwuWsYJ5(fK|HOHtbsttWCNII;32Td3o zm+fKYBjZ6kvG*mTYNd|Hf^@07mcv=YyfuZ5rUqz6}zlx$|U3? zaxQe3J+O8s&K-5@&h@tGGan|WR|?n4HvjHlVw8sx9CV%yl2VIMealD+HL5ws=!6F!nx zzWRxHEJX%6@d13M64g(2of31m#(J%ys+HUmwbs?tLcWQ9`@-GpxbEQDp1tWRr*$Ab zNe34BwJO;MHV6@n7jz_boL3M=bX33QkpBFXW(aM3^52_V1?oD=X+pMnaYMb;w)a_m zYn(Bv%iewZbj!)qE!4mpU!#LsK6tL{iUVWt2_L#ug(tTp1(FmPVhSXI?I9*`viKwg z4ipM>ZMh4@MczA1yHdKD>;(}1^~p+Gzq#DrbLL#j!n63B-}_J7vrh_3+*sv?*K@=^ zcJXY>&dflN6F?|%r_Z+StUw8p1R%ja2wft9+ktD23E*)RK*|!=OK6btqL3#c0LK?r zY{Wx1#|rbaZ0h7u2>GqZAG%J;fr>>y;ER+Af#ed{QGK+ttdglVe8zbWde+@{o0XLe zvv5|_I$D~D$Jz~u2Kb4P7{!|*gm4%pNr-zwQ(sg&Jg776p+Db@=MgckIhL1~;qjJJ zP}&YV|1yjMcwJLZTmr2@-i2igc&^@4A{5JDrDaez7i@XY=ZIrH8t|^Ltkmio8!VBP zN!;3m&0Mh1Ha+qefcFd=H~tU+ca%zi)=m-x31lE0jJ2~Qzl&qw{+{7nAY+8*2<$!m z|NY=ivrkXWN^f(u2Bf6gYO8+x^0#gw1VM+@mhZOz_k$nUBjrz8erLq?bvE0kom=g? z`Dc(=b39%=cw8U)G6-l+ zF9UBAmQ_|(+3)`FR~s|B%*uCESv9npFm{}^wn779adzM-|H+Siz!uCuni438dIkwy z!+Rm$(!t~voFQF9{Nex1h|v(DFs1^r&72`G~hJ6 zCXf-O5+0v4w!}_9afY3E<`t^^+iLsxr#}vFYMlZL&+)8Na!bV{?uJHMT|oD>FrM<_ z=QyhbWIPdIEg-xDgHkFAu0RJFj@&brk_0$pLKrFC>FHT^%5jS0rOv2?ehL{h z@z5DgtD0Tq*3sGsz14T6m@`g5WCVe+gW{HspJaBf3JMk$8-ogX05t zNZHcL2z5pob2JwE(q&!hT5&L@=w>~4pgVbA)o*S3)Dq|&8`s#3V;1_cH~MI)sb(xx zI3)q|)Dc6E5+Dv5om5|K2>|Amciv!8_~rD^eD{rtd)?#xdmd+;<$VY%pk#~!o;KhA zYsPg9CB+!D`cv%}zp1iM{m18M`>6f;vGu$Fh7stf{@R0As6tGgJDadUnamT6i75cb znm1%Lqh#8I@KVDT`1nGL_?)BT8fLsn{55ok+Ae4VAA&fq-vWv>7I|0}2}n$PNz9D8BycIpKm(@%2p)DNQ8)I8WM-5q-OccYhFt* zcliA3#L!_Xz&i&sZtz>iM+d|>r_&B@>O1{hh9`DD&+7dLdCgZQ@fP~$p7Uy(0Y0Pz zs!sz#Gqr_3PX+VmMjT`WC6CT2D4`9I3GgtSGV8U*(f=(C-srx@eN`AynYXEPP}KnV zlbj=fU7PnU_PHe^{rCUXkM;Ne%xO=9@Xn%#W^E=r&4cuZ^x+HP5q<+CgC;XSiWdZg zs~@UI;1^9ujfUr+wVfN5W6Z=zFHjcxMu{CC#{eJiVE$lANYBb57H}@%-k2A7UNbQm zGLW&-SOsN76r zQ)4I|0|47k>F&3D+rAE98*1n;_~fg`c=Zu|!1g}2s=m-xju=tKSn-#_OLer@vFfkh zWo*$TqW44|QGZkTAj#1lvVu8Q#(NJxqPO6F70(ReHMQxjKedw%3<=tAO$T9m831NywrUiBlwHvi!xgws=FYc2}fU5&5tUGk9+sA5do%^feeG!!Yc znw}!dOjOy1C4a;y)MmRIvTz2-v00M}EJj?zqEVymkclK#4>R9UtU`l%hz!aJWTpVz zR=F~Cc&I|xJ7`@saKIJlI_P5C_mM=k(LR0a&=`3R)HZzjH}CVRy*w|e8N9d9FBI!P zSp6YV1>V@fyr8K{^wXtGYXRL~^^%eYxQKDEhw$osP3SP`$!m(q47X`p)yn-jUXp}__kL)A^9Uofg}Znh5|`oduZsIEHz1i z1C9b++wVbuLa;`|Kz#LycggcrHepPW)zvrJ;zzgG(<@i|L@!iD%8fw3FyDXm@5dq3 z$@5R~Qkm%aw|>JW1UNhlh}jxPVF_tT9GRxOL1=X^kec-XQeUmqSIpZVfwkkxMh?ijCSO4do~w zMKQM^+dh8N{coC&dTLkEW&#?c_Q;YK0Pu6|*S9?e$Q81Q={2^lp@^r(ZTysB2*?}k zmQQ~b4`@OR0C=_$pE{Mgj6eJQm+EoXgG->J*}A)LvQx&7wKU0WdVlw^E-P~sqNlgVH0Lg^77m8)KlUs2lObfy}j9K9D&XZ1(Y3tj!L|j zMj46|-QL<{S$O^xk6?Af0kF01tMKvBW4%i$VVrXi0jg@ulb+)s5Mu-CieNqercp`X zG(!2Nv5P+kKnA`KDq9+pea}`K&{umdp4UfIiR!9&{xiBKAh|NR)Uu$Dpa&0XhWU+ySLTXKxy{hQ9b7Z&SW& zyjL5_NT}<)T*eCD)K4g{de0_02yLu#VOG&ef8jn1tz1)4RcFgztgtN=4PIwGTi)pl z=S{Lncxub=!E4yak`m%)AL4P^rBu5jO&yeox1SJLD*JCsR@nuYT^>CDfB9LneIxI} zyrjvQeyeaYqJW1d5Et>jYTrTrar&sO1L8m(>N9YiXf^o0d=Z$T4ux#Y#Bn+~q+8>@T|NhSU3pSOiJu#5EjuU2 z=Z7Gajn1Z6(eN=tT9^G@_`3UluGgduC9O%0lMzln+-&eo#_$uJHv?Ia3uWkHjtKgV z_=S)fm76fk|H%^VG1!7W#3eG6%8(Kx3`rN>i*1ZEnHa7!2(^)0SOkDejuXNnWM`IO z$cftG`g#oLE%xI2&5RQmG%>by;e3#yl^4in;xN@A2x-T-0GJ)3uk^DIQwQfso-H;~ zhmGP&c_9cSDiEv^Is#i4EY*6eO%$UR(}b&4ntcv zSW-D9gw*>9*yeS76MgxQT}Zu92Ssbn`!CJQ81wxCm9JTbE1d9rk^JJ<`vv}d%}p&8 zOSnf_XY{mS>{0UGa4Rcv_SU<+Tf*31Tv%oWrNb?oFor3e%v{X#p$zx}c_G3vh5zd# zyi15{yIFCOGvmz};i$~peyjZ3=RK^H+E?wMHglu5>jN6;*udarGH=0=-6Z5*pYIgjH?yf4ktQ&@$_sycb{i;tYo*~z$|whN5lxcQ`N^_vos zqEQX-ft(ZM<*%@gJd!5I*@G`LF8C!s4xa7F8{f;zss(~MFCd0~YTOtgtad(lh%UNT zea;6H9q=va!+aHexh4feU+s=Ya(Lu0uI1DIT{tBvyxyQpEgd?=${S*~x2D-{f9yG% zH96bfd-iO`Ny?|dpd@{&R{gf<>f8N$@*_!sBn5_+0!d(dXz80QJxPHBh61sj4>Q*z z{s1~EZecWcymd5G12qHqHKCt z5rGceqve*G`b+V8lef0VHAOIx5*l~`2SP|7`uAJG#8a%Rq0!ddcaw#?`Yofd1n@G7 zS4A#BU6^d)X%4D&5(Ba;)`CX_U^h#(3ARE#@gfAMjo@ZfUogg>@%)B5`xA)+kDf8X zMvcm_bKh|^f+WvIIFj1c)|G1GG8zE3QIsWiN}*A!LPIT&m*~Y9%a5>>+Fe0F=PiD9gB4 z{_PJ|L#UTOuG*m|y{Roxw;B(sZWPT3#*6i~ano)BR^X}AywzG#i2Y4$#YOWEv%4Q& zW-S2E@*+QE@&wzrZ;usZh3yMhzsp9>Ir*hJEPmuEVw{tb>ft|Ih<>cl80Y<&$r8$PkiRz=_ha|$_=O` z9(3@nVl!ea3hY6=KcKV6euT(zK<#vtB?;j2-j%UH>X{5Gqsm6xsVAOfdErdkzGn|f z{YJaORG6P@O^waINE|%i71b~OPu~SFaE|wO<&s}+9)yM=#)$w( zbPo1c2>@&#C4`6g*y-0Wegr@w$`iELpSD+iSNXbM``|pIBFkQTdKiP~6EaM0-@M)C zoN}4p`$iw@o>@Y_iD5L8aj%Q^rO)v=hC|w%k9dv3RM;ig{^*U0d)?#J z>+V1?@3U1eZo@cKj3K1Z%C`~1Y0EOp2zA?|<%M?jN6v$CF>Ayi^89wKb^(Y5l=AQ* z4E_C#|7TARVdzv%pfNNRRiKX3APldjmK*~gVl0LJ8j}OhIDWw6{PgS8Hf{*@YiK)Y zt8hc2zuHf<6mS!G$paoRe((g#9`XA8w{yjvob7jDNHK)K8^|lSrjBpSbje6CuSOfdZZu zZJkykv_LoUJ>PvUhWB)6KuX8c#f_MJjdZN`cMqIs32b@z_Uk= z9~;~>gr5z2cUc5)Vt;SdPZiV$e^pzb^Ra34q&f6L@;QteH-t6W|3$xe|4(>NBssui z2Tdi<(N}%$<9o*N;~|r{T4&Xl0y+&Ii?`KSeZ>omA%w9~Le#80V*>K3zD&3egR{bJhlN)0biBBq-sxtRqSabfwsaT zb=l_aJFRwKGvVWq3At80sWw#2-N+>7q^=AbXCuZr$Lp+UKhkgFW}uFk2i1sC|< z+ec5~m?#lEP*Y; zbr?yRPY7QaCEj)@3OLGYst{h?DF{{}WUAE3Q}nchdfRc|=V)WvOj8Xz&6V0Tj|`57 z>O*}Y9BdMVb>ku#$9u^I(VH=nF^8NBeaJujcSK(=qyoX%{1bs@homB&P#`+I~Otw`3^so5C(B`RbIDKEkG0ZV03b-OGI5LSN}gx9*lscgo7n&bEx~bZcM* zTrWCKUVgrZ{Ry%9Hi0~$RelP&x4E(n=c;WsVd6-a-~3_zEDaPXw&rcP32YB3d6R`FDRAIWpli=kOUa!~e8dbU=3HcSgjfWrS8hN0(T}Zx@DK_KqNQ&V z#)p*_+5FO47t-5$QY}ru1EEAfDQ(v2AGGz4-G=hhWL=4<3vn$?4FI3SvQB9EexKzQ z4MP|hMJD(bi?=sfb29=0Af=YC%i|yl02#&8cjn|nT(^>tft*J106GW_5(5Q-NO;k) z(X%hK`W>5X>Ec_hsXc7Xl-pKUXS1e^u#tdd`2XT*Ono|A1tTGd_7duXEcH2r8_CPD zEIfF#0mGv+=KDLw0X##!?N*ptj{+C6k|}dIuB4Sc_UMw0_N&`puqS?dA%Y_{K}Jci zWU!$Xkd?Y^+Pc$X-5nl^BLg9H-SU+-cm90axvS2`PM%Iey;R$=s~X@v9i=bJiU>(k zl$XcyeJuafZfj-Ss(Xg()rdz;I1+^p;2~P}yJy!%`_gq^e`WL7y5C>HSwvw8vJ!T= zQHkKBE7jBxUz~Tz$>BM~fDOPgsX@BVbs+yPkNRHP2BIRAMZLWRnzjW&DM5q8~mpL!L&-wr?i{nMXvI>}Q^VMWBG0`mG;B|u6jc*TfSkCzb3 zCOPB0DBkG@80%*la$Z56Ejs2{_>y?)w2g#)DeK~`5vjS>%vc~ENB~x(;h9%N3{}Ow zZ*Od~l5xdOqPE(4?gAvoC=;i>6i;8qq%MpUO1vZmFaxjsu#9S)7aXAF+Ua=p7LFX@ zW$f>dj@rH6j|RaxF}wraA_|*81OGqp+Z9}$axg=5AKZ950W)l(p<}P?HI~AN{0aCF zg@aAk=+FBzl+k|&8U1>xyo{rrwTzQ^cyWus%&i;o*!~}Y?H!N*jCKH(qA*IJn%=7J z#VYVb9`-UW`72Pl`yX7-%9Y;^R_h%U}QxaLcY=V?~Le1WaRLLK$w zHz>3{#QM%v0!hY-nTH*Y(M$X#P85Ch8M3oyN3958Tcj1Q0MEQcqyBJ?rI*eN+Cto^ zYrJ*=js3gd$NO=z&kYSKTR&exTa-*sX6kg{TIwTEAd4mWuea@f?rtlu+ygL+ArymY zR&JpkGVfHzw^j@{ZC1JdY1PKxMJpf5igIlDVQ0QxQE%xn0o#V*Np+V;sQC3|U^{px zJy1L}xQWgJyu^Ehn|MxJz>5J-`sx4~n&DrC*>axbI!{c=~AMHg>c>ZzwW2l@L`gvv-Nu|4xOWN9+t-NA4&jU~=iFP}=FE(%7 zX$?)Co($aioqLpzMT6^(?JI|b*4x*vfv3ggd}uI%r+o)iG}m#^pQ~fO>5z8@&2GV* z2f_U^O$URbG&?e`g&5j~wLN*1vw-onsK3oEMbiK|C%XJDQPAco$Y9a!g}mS`PA17$X57v*!9Z za}nJH*^>Z<$U6Z)U_ijZsI6wV6%Cst5!>5Ut*W_CAxT;q_F8s+F@_=JC5`X2wvT7< zyXJqjmt_4Oj7<8j6`A6nS@RbjRIT;U&o1^}^cBBlV^eek-c64W)%mZFBO!-R|F$A4v)%DKNAYNCMkKOW$PaNeUb=6zJUf zko6TD3cw^_kC@YV&*FDUGQWC42Q=AQyo3MkGylOc1StST#U%BmkVpWxfBk#}G$w53 z={Sk%q>Rj-f3|IT@hRqZ#nBC+@XM34*J??)*o`vM+R=e>3&2fTJxF)*mR9WOKD>T( zy`**bAe83=&pM1{kAgIMT^T^~K>FxMXhEhIddvt2FdjznYLz2gPk~;@G5{m)p)tIc1VHfK_SS++XWz+`byQ);{?QS z*}BDAYO1Vk+%y|KawK)od4L+}EdV4EbGo`k+qg{`J=&IUZm{mOu>lCoJ8&^z?>?*9 zyVJh=!=G{e%iEgU-{*9bP!<3jKv#$~hg`=}HN765iPPc^y+c z@@_zNZJroPOWgfFe-tXHm*PL9SP3kW(c{sZSJ`sB)Q>r0x{ZK#zkKwGHi*(PbTgw39Emk|mx1{}@ahxE9^{*Gpa0MvEqP+r(k4 z48U_ZExnW_hb)ucx6bBW_G5qlx8uiWKL082XQ>F+aeywx`RsAeRG! z+U!7|6s=KH;cvFKnp)?QUbPbqCZ$W8FQ=tF1j6wE2SdVidKtB(fQZ#{%ZB;2`}?Dv zJ}a+ws_E>nn7mRn)r$mQ%AynSzb<%r^IzV%IHmzj*ZRhH?$>apGPQa@Td)d+Qt3q`a16_#W7-sPb{$=tDm#dF{ACocb<>;AfX^2#PT&2uoHa}9bc67)hh78 zSu-P6oWlqKot);-#D8eeWDHI(A?7~EG{!M)=prBW)H2%o_#Mz)isaz9r|%-ZezOIa z24@G#((`gLxV_%CZRs6;OM7Pwu&v9QV<~plC0D@)7;Z=a{QN_=P4^OOI7{b>{321qwZ8-EGSQGSi+q|2bb7}I}>N7ZiX zr@?WZ6G-OealOhKxO1SK;E}q-H?KlK`yj?PeT6wc0YDZd9Fleq&@BL76mT@Bm3i2^D!-wljG z0&o?gO?=F_Eb-R`;^{t#j{2!j$cyTd_K?aI|HXksF1)983&=d^uJ$ zc@DgpilMo|?)&05Blth(cy*ia&h}2*hmrRulmzFc)fK; z05+=63~qWy-bgR!ha`Py)3L$pv>!BVs$T@hbv_ucd|sem`*YRvsDp~rUeEOM2RKH3 zU;S;uSf1%)pq{3C2v5>X{9p2F$mrJ9)do!UcVM49j9aG?AESw|hK0uSn~VfSVa2NI040V#r+8-|3|Zp2`$i*+_3*U&nQr5S+T z{m3}MM!V7=N@5vGi6>ab2$t@USi^W2D?^0D6yRy4U!QXolzA*4VO*d9XS zCW}r|;6S3lx+TALPp=7c&PF&_Ts^#6i8tMb$7+3bwe8;5VAp>AdOzo$9})x*m?8+h zKls=Jge`yYptq==)2G5koxI&{SL<-`f{rI;_tuS0=slyMmap6@6 zr6?XKY4WB}=#|urESrAp1rE$B!JYtAc{P`$0M=u{-%wuZAV&c}MihNFEuHExUIZ93! z_3!pD6+T_z={}o2a}HG#gspl6!um^IeqsgwiKj9U2=BlBryM{* zrhL73DhgOS%lib1%41&q)(8J631T}+S!Y5*DT+B@SAz2m0NG)p0=*-AU+sH1E;yrCG=O~f}$Ll~{8c+!qKN~6Jd-Lz>&0R|y}wRXJgjyU(iw;Q?Mu)ghtBEga+mh?>u zX3&XXQ}0Y177xSkgPSW#`tALXorZw{gOV#$T`dWeUdF8-+l0@k1be-5!}t^@3nq1l<0Gl93a05;|( z07d;1h$Fs?v9hMHkOXIVdumfT0nh4o=h$FRalWd?uL|lAT&MARs>t*Jq+01tw7-N2S-9`t*#P9OW6zy;N-NVQicwur( z)4h9LhZFXRPkeTO!vFK`g?oNTY043zFg)6;O*N(g;-nW_w3N8@Owr;I(W$eo)RNeWf&4F$I%?}Q}bcVK~=g>ot zt)a2{N6=R|qFXhXaZJ5t5ZLDHt2TvD@G@y8-OIyYabAV!_`o5bsDkoFF@)*qelrXc z!=a=P04^ho6<4%NYYT=6#^f+ou!DD3w__cB5I~cEfl)f3b6~6w=rY*Oy#UeTXE_t7ot-!6KYc<^ z>3bSqrHi-$o8w`OK(GwQ)$`#V#;OGQDiR7Yf>BFGIfWAwugf!A{i#pjHuA1QP-%P+ z&xyCxuNIv1{)4Ka?)vVz{=OP7qyx#QAl=pD-D{6rrOxNGAM{bnsb8uOdkb+uG_TuM z$12c#U!@y~kocN0NckDGYAMA%v{T3nRm4Nc(S$=yoHkI~^xMDbO@mm0A^Y~#N4ZCGjOE4AtU@auH{Es< zDr4k?$%LH1a*fRX+*9|ngl?a(2^Y2mOiDNq(4i0o0u!kKfTbi1#4!W`9^e9oK+Thn zp63FI5>YDI7ngDkdw^vLD`O8m%{mg<-%$J{sjpP(*UODopdSQt-3$(V#&a3 zGNWw+OX~4#@4`Bdpd!!C4ivBUHic9%6s%D;di+d(pA2Gnr}N(O8jb__#_)b^Y1(T& z)FtqQld^*H(V_MF{VWj`aF}8{T8PmXDM4xLM(NmLySA;jKiv6hZ=~mL9`MM}%ke&t z#TA{GKn7|Z2)cn_ExO%t*E(x%LomkEkgD?NsAUm$B96B@ErgPaS36Z|B`&f&eCqLP ztJ(u-qk7@tD=kxDL4u`}fwCrgOIej57yR|&@t$tieD1S^nrVgy0yRrKAc50C71fyg zv}Hr*>%Q}C8#``{bpf6z(P2(@t}D}R&@?9t#R@Oa4h%192N@EYTNRHG?_s=&!qmAD zuU5rtSHdR92tW7OXn3eU9e0n5_?7ZQzo zExut%;4!D`Xe${mWIT~hAZ6ISgM>hEzJnxqSPB3OLjWPow(u)A=t2J{wl+X7EsxhG zG6ujxK+i$&A^0Y&g#!=!hLQnUa=eGlfe!cpU1~~O08E^=> z1CSl#uM2~rmcjPmso0I75ceR!aLOXwQ=DY83Gxa=3dY*}ckTapnwwWWYM=W2k1%>M zj?kB6ymR9m@07)Oe8R;S+qBY%&6^-&mwK}3B=Cp513ipDIivWS0&PTBDD7j1K!ia+ zAQ;QMO+-7fv`FW46s=Vzd+!7KjzLD(uvc077L1Sbf(+T}ifTJ-!YCiVbUe50^+Sh#>_w9_Z=7bEmaMs}W;Eou&KYD0~u z>L-Kiq#~VqisNt0rVoR2;W-g4_)@|D@4pRqWfD?J1Kx%QA46Tf0E9Wz-`I9z+qP}nwr$%@(%80bqp@w<&i0(W z_S?>%n3?+#yNzH;%-U;Yp#yNlhHNXX*zra46l|9ir(T32ckmuTbb@qfNFB9FitewR z4MU!f__tY%uXKn9@xLt?j6?go`_8E&YO&TNiN&g}sCCst0RSJ!<{h#DYe zQvALjI}c&<`FT>$5$gKxJ_Y@)sC+e_?UM$K(K%6{;@}eKb=uWOks?nwuMvVB8jlk+ zkQ0Qst*e!sk?0E9ruTUMGvG0ujFF&_EK5>isl-}BHKKt`Bi4{syk$6OQ@B$3&G#D)0oSria66yauwx=OHw zyLxVaF-)$|q%kLXP^eqH48T#D{$0z_Kf4-Tf;Jv3FJKmV*CP++V-%lp5GcAtB;zOq zDi%qxii>tndbJCt*T5R@t#CNQEiVG-m$I1NFFZI3kxmJnv|N3&19Lh8I^A^E<(s|Q zsNzQ5faj;>Ih{ddH38Wq5-J^#cckSPBkk^YPlU$38!mze5h6{uO-+2VsfeTtMfz=a zGzH?#t|X62R}i715K^FpE_d%tm>iUnsTQo!oX`e=RL^eqn~WfVUtm?L9I37|+YkZT z|2&EF<;)=hX}YT3iIeY_e+El#^(ddGM(8Rn&+FAv$#3ZBTlu#iZY%$sq+`?XaRFJ} z&n6&C46C~DaVE@nBYq|1-o01CP%MY=39=yS_}8!z$lZ`3Cf&;!e5=Ga@imY4Sl^&!{WN zHQ&X3moYa}JVvBzQi!mcuCFkbiC))*MMT8oNJt&Wax%@tfDrI`9!SRdZ-)sIa=#L- zX%1%_eZ8EsYrt=kdVRCm*gqsVOUcQ^1*(e=>HruHwLr|Y<;DxWP`vbS*WrrZI<_y* ziOc+tOxDQr=$%6ISovn6K^6Zwx#~I$i2$!CBnj*+i%zNx5i-e>+qSM_i=TbyMv3bYEkOy zmRt7&U$Ww1n`h6PW=Ajp$pLGwg$#0ENg!334YcDfar@v~=H-@XURna%jg<}wGM7W; z#T%o9Bkl@Wp;j-z=@q%=1!mD*$8kw?C|eeYV)0lEVnV;!w^sJ1IRxWWxhvC}_2M_dWY6J(xBcZ#YN_Xa)+=f8D zI}eX5&EWF4qTCIM@#yY9QOAE2|6@;m`#a;t|7(uCI=}}h_@E}-Q1PdP<|s5FJpHta zZ<1ix)d%B9#2*WYDZ1KZk)>DfpwgRdA(t@!p&T1?v9~uU&f$%wVts& zZ+8-B^OhXnkIY8X3D{vaPv>ojzmI4iv-?jBSst#S-C=dLnxcLn9AqHFfD*J>g{6hF zy_X3Up!wPBz7#Pjc2*n;SJUy-&`EV9nTin?8^Ih*353DE*g^WH~q=Q_a^em9iv_!R>17 z56TGqOHmjsg`JPN)&bErQ2Q_(d&WuI_iba?NMGyI@KOvh1GhH7g8Ahw5APHH{coSA zD|9czDwwa*LG}pqboqA25ClVl3yzuxqV0K(6^bi+%FW6t$JOZy{G#}Z)dGsk z$)V$~-~{zEbN`+f$4qydxU02IbM6WEjUcem^*Te(=UDtro0;m`yF2he$o2s4NH{pb zJ>E@?+sj;4^kyB;Dx%SNqoeJkm9K$ZX9j;2d+mcyyhmh3q0iD5kPO3-;6;mmkn;mcj!WoDr)x4K{1MKO!lyFW#4_gharkY64hdmYMBw57Rvtc zOt_yYm?A;X==P_4e{qv4n~B7DoF30G@Kt187yPp>za~v#xZ<3`B)-wQeQp{YX7%Lw zZ)ndMXVY}e#KJ{8E2=H+!(}r<LE3`F+S%D7UhOk|~azfO4Y$vK<^!Uk!^RLL+W_DFVDA z7&k)5jmhgXRMp50<&AW4J-D8={OoF)_at#48IhlhHl!`$A@$r1Riq7~qok^RKG@Ri zLG_3sIByXNQ*Y-=^+S_b0TwW;WXmS%N6KnDQLPm)A|b~Hk#_6>uH2Fc+!`6F;0hWV zDh7iiGA*C`j5K>G>N>f@>lsxb4&BX`2#>HKln_<`4IC2T_T+?&kq4>hhBD(Zx{mt| zx9863){zXUkEd$?!*qIk`JZ=QD@+V4RSclbem+2;OL2%bRQI~edti7=9!!*pxGYsR199%>BDq8t=XNM>b0?3u54OVLq8)MJRGsB4Ek~X0d zDzTO{REm!bV@ryMIDy+QKr&CQsvf%D-J1u-(eBa~)YnS2_RW>!BG!$Iy;10lVwNkZ z)cHj|-Y&IDfB!Fzh-b0~9)K|_gJ2{_{VqXP*kuw*{Nsw7yhx9v06Y(osSgyO5l`rn z0yO~CTwTh5iC){~BnK|DbzR<%+zqth|GEG^sgr&KLFQ)_q4a)*^MAIaq>N7ndz~8f z3h#gntLG$M@8yToY!>#S18tP+vRx!$yAER8RsKAN1~RGgmk<9wI)d71R;f#}F2-u8J^XnY9JxL!}~vV`xS}Fd|Kwr;4Bxf<5zf*67az zE&fwy)35IL7WTw7D|e~)jLE?gzs?B(X$WEneNbsE@o?nGXKhoyYS5L1_!dwji+!vDIu>aifsqc zns;D(=l${UfI$10u|t!#FQQ_AcE!d~2OmAz-QpPKz=mxGY>a;S8hq5}>P3#a#r0N7 zP-R#HKfL|YL$)=MNfhkJZI^#YIOZAd1aA&es3MpR8P%AJ7nEjEEkoKunC#cT&59>3 zJBuowD)yb4+xGE7V1@B7Xz|dLH28=-s{KTrn&sNNSqo>(BsT^Op8`DG4!wJSW|FS+ z+P<-Oz7uho-3w&1Sw{A4ZL2~m>yFpmxbI$3E=S#YEW&a4QaG7}Ae*#0AEze24KsK; zUm<4o1O6P6g(0p>UeVRDX7J7ryj3v-pMig9(fiiMbE<>CB7P>#Z(C5t7JzTtGb+k-T{~mL-v$B;h<^dspX<%+CCm9C^Ry*Es<> z8>p@XG0;X$nmTZ1-2r*Cy~PYMeejzK|2a^Yt#uW95j1qgWP9?z;g69@a{T&}yWsh> z(QoV*g%E2Pi1Rg%^z{sn$)jz2NO<6y`_5b~o!QdjvU*(2ZL{7KX(#gWJ#$mvoY~6w z(~rOr`vEa9py5xel|15kGsCM`y zS_<@DDeD^2qRcH#c5{7J)a#g4axP|f&%iJi*daenD22Kc=Pb&_m|0Bk8G&SCI*!yo z(seWqHKjKO-7~Lj+|o+V1I8~UhPi1yoOT$$1~ZM>_OrYCb^Jrwlpb%;RcGQ7>ztGh z`9gMM3;&wi|z0nB9VX+aTvrR&eGnFg7Ng>>qn&- z9gM~~%iDNfiE5y)&G;tRE=>(`r z(v`tMu70}p1$DE{N!@%eHb7fj+O)2Z$Z6Mu2#aw+g-Y~6+L*;w|GkFuy@*}W#tZ0T zgIc+^jBgDJF-PDO(tdLfvDY7=>riwr7b+;_kM7`Ucgf!R*)0SV4yA%@DowcnjqHP9 zDb6Dv7H{Huz9v2o*XkN)=5~+zwZ3|8sJ5>NvyCxp<6Q$+fpL@^P}YD(K&9fBD&DvO z$~9=NbN07xWrLiNX~Y{?9K7G)xu0=Qgw;F)pj?1h4m!Iqm4WP#L8nw@gjJ-XBQf&Z znX+C4{0$sRw?bh6E#;NTszbLoR@DB=aitsqInqYG40>|!w2bW-gHr5<<}Td;`{r`c zvov8s1yCQ_nf580KABfpfbguJmqMMZ^5}W@M;Uj@i5JxQ!D#%06D7eeT26U~bRNbP zZo)opic4K?&kBlli^kTkSJg1&b>*2Szca+t*i84(D^0KXYCpSnRwQ`w?F(ACrf zND8gb6lCZw#v?GS%5}pO_no-8O0XUo(o zmo+J^q1R+VMz1b_G}e+ASddnmI$(TZ7 zRAE{EmE=kFF9ER9@MzRsjU!XCV<*}+lrV@Vrz^8O1*c|~NTY4l{V9Q;hsyTCEE_Gg zjV=$h5OO=>L=k{F5C9yrXx2Rx@U=-=ssMqjm|GO@S$L=G5_Zi6{shufn#HZ zmhsK@7E<|R=bO6o$QUC{F>NIGbu!}_ee9}3BW)DV1Fk?3k`b`pBz3;!SIil@gYhSC0 z@snUq+WI3GXx;k^!fpEOlF4h>q?({&cl0kvaP2%37DiIJt!~=ubxw(`$1=}Vd2zW; zum7-P??1RB|MTQEZ8>eoSAb6;*f_-_gExeUZ~WB`{WHiz0>1wTS)xLovQ-E_r}(Sd zn@7%%Tryu;I$ZleFF?`mW@bb{nKF@s4v@J^VD{g%c?Hw)efS&%V0DDC23T-y3YjDJ1vN!(zaH2N+SM zp8Z%n42h`r_bo4Ni?@3_ z+R^W~)e3Y4Cp)gf))s8`1l%LADmL_Zi|d!ySDh+z2txowpY5^s*R`M>-(dt8gsSk@ zc}8egZ3NRS$EK+__$8xcp`e zhy+>k-&zRIq;YdiR$vpf(n`)t^)Q6$28lkKWZ;+RzXqf3UW!~FyCmQrBj_sM&sGvc znR*2OJ+}}I0m%^cK!0e?IXD4Ku_QUjDgg;06(@RaN~kBESH;5gD8$~z+x*31VX!-P*nmd#24kjP~9$2$;hnY5Tz53T1T66z@`_fr+K zLL<<4J6Xue%otQ7kimk@$%uJ*tpFVDoiP-DPJDeFy0tl$KRJQ9J$;S{oeh!R`@w4- zVqmb+Qot`xdQzM~i2U#sfxanmM8S0Ni`29ZZ>6LiF6Ip5(d-XJe>vR?K5R9f2`A&R!$e*nsHTRlKjWH#&`IJ>Raz zngwbuj~R80y0=CDIo!9^WxJ}@ifq>Bu?*i9TYK*DpxYL(E*X%Bq|%CjxlH@lj3d-I zC%|AR<0(97a1*nWuMePS_C;qz$0FM-oe@%-?AZT)X9y6L3nG+B@N$t1;E2 z%#|-{bg4=o3Y*StUZeY6qgmG7M46)tSH0C@=3KnhX~MnzIJ^vLE7){Pgm28sI`nbK z+w{9Pyo1W`L`~pwZ!mnVMmL2GSpF0?XoI!cWgY4yJJ_7jHNV@G!_iPVh}4$$SYcCm zPS5paHPm4Y(&!S8@qhuF*C=M`MysFm^t`FKIv5YG%%NMMTerMdY= zl&T4v!W&^FHG@^Q;}7dY?D##fQ-pSe1j3ZeMnnTO@>_Jp-+j!&|6(it0k0~@a5H9n z5DTm`lTjT!1yU-H?TAf5NJjr`(6yUl^x+HO3opP`n&R!Gk0* zW5v|wi26YckxjywUP~33@%r|5xdf+B@y-xNxX| zKdusI{ju%y$&K&es9#5{@B2%<^VO_vCH^O)F(d-M0n6P;gyg95Ox+QBs#eSbU5#)GfNf09Si9H%;Y=PFY1}T>cg^iH#rDH*pzj+&oekA>ArZF{*VS;S?kh*|oDeUah5{!p1KP(uIAF^Pp4C^Qx$g_kb=6K&k5X5>;m zrj+}LhBg)bi=Xx7EWYH3(kp31zqy^NFQ>P!j0j7fiJ*5){tsuG^}l#5My~#tcQoC5 zFxQyYx$!r|*QT}h$gg$bN)FS*1(nZ%39kEp>@JG;Z;X5T@@85bA%)@EvKe!D_*D^Q z)qHHRhM86rS}eAqaj9%0>cB%;w&`Xp%7CM{XY$f=i&mQ2y%cz9uZYr8eEN5pIK1B0 z8<=KlxLkOL^@-mJUaj%}ADpGMukt5 znm)3208#|rpbyT%j*A0wWbPqdAp4W<(Y!f^fkaY5703+bsgk;CDfI*DjK--Z*)tHw z=OYYTj2r)Ea}Or3H6sy;0yhV!(q`wdoUn9$)MwfrPaW7yXC>QC{kbDi-L&=Z*qij& z89`K=Z79b-2bzIFrS$ZN!Awk}ucL8Ig&iB*$`n&`1r-k&?dsy8CMXJOh#$S9xfbuc z1m-!viC>RA@t+?wc)X3#OSPije(0%|S76^>3r#Kaj-syd*!nTQ;RPI0#eM!{Y^DF@ zv_ey>q#;xgfQ*a*GHzhz$O6GEWp0@{i<#kb;Bel}VqjzI=SZ~eV}c|u2tkyhsBK`g zcAjQ#{F!Opu9^OAhgb5;bykqzF5)ycb=-x&r{t3uI$#4MPk{n?8)@86L*zr z@`xqcyC0%jS3&OusPAPbC`4cLL#>HLL=k{tMOUlHVP6%pvSC+q&mS=&SZ+4uWAt#l zct5+@Id}8Eq5F`t#qWxDKJjn90hF6f+9wqea<6aQ7q;HExu*FVFgDBc5~AWT8h-(Y zJS*^hE-~^6xrP`P}@VR?dM+lzA6#^`_#5)%v~Tg6O*Z z5|6<#wKPO^*w|VTL1QlKG#U&$+jrB_5qi_$sCJEUUa8y%RfrDVtuF|t`eU{IFQ6`sZ#zW#eNp{)lJ8$J%k-d__o}pc80o9}mfd&7h)F8l?Nlz(1GiJUt zSJSdI+v8u6OaWZi*pVL!d&Sjvx;L63t%J6u%I^cN*T1=k(;3lY;;{w_cn5}`8 zrfCjt$D)&xpC9Z0%#-E3aZP5ZTqB7h0m`3)V?~_cH2fUPA5sL;2t+{-F!~upGM9RCF zYamU5*gQ$1ooJAL>KyHhPH*1KabgLN_)3b3WohEqcx8SYQ7BB{@<;RxK7@xihL6Fl)b4warzr$dF^hVe#A)ju*+o9+9I ztiVH{t*W4xmC(4O{hb^Q9131{%i zrSP47pzEd~vL1&hi9KCark{@rUfh}R9kTZgjzge`n-WfU=D0c4rEWUuINLOMK~6BM zADfZrO;sZafcuE%*b#5|mo>&#gK6~<(cr`NLC-B)Um8QLMtk&B=-9$%j-cDR%5$AE zmOI}ykfAO1geG~fnO9a*$eIB zb>=52bvDvfd{@F3Ee5qtP@MO?tUxY}AfZUrx@naAkVWsKz=aVBb*kJ11*a8PmL;OY z0&eti4h=^K|MannGzDaa<{URw&$b+={@!yIH#wI=vH_gYi3@sWRD(irQ!-4jZ`p@I1xBgeN(I%WLO8K3B;HJ2zUf^fI?Rgc2F&#UMW=~RQQ9) zjoc`6nP*6ZPC=X?Hs(O_NLvbbrE_2OjOkB`yRs=f$`f^f948xbz=j>BLJe%iNCsOb z58nc`W&>$4A6Q>=KK3qTsKD_Dpl@vP&AgG)B=QKgUYeD_h@V^e$8lJ9xz(FmaoT46 zUlWAjwgjc8-~<%b0kCa2<9CXYs6j-40xjo@3y|Frmf|XBs^n0EU$j#1dWDs=stR`0 zp*Einh!k%W^1pV2@&%iZb8azZXbVql0!kMYiP|PiVVm#eodrn~wkRdsOjWmnRSK;{ zS@C;~ftKB(;T$pH$5f^&)6JR)qZA2NRgzAC8PKVcPHD{GAVM1{qT@C;-yqtLGtI|x z<|}PPne&$HJN*&ZNa6(CyZjeTsD{^zx_<}!U|a07S(_0000ZywAFSEzQ~7sA+Z=xP zAY5jDO54J}i&CE2lyEh90A67M_LJY|LSEVH$RHs~z0^k$H|y|<6_wsLOO1zGGc&Er zX2lu=u3%BR_U7yaZe&qh6I}4IZ;g|e$t%7qA5vcw3kG$STOAi`vyIrq8UDeuM=hP| z+ScZqUJOlMK>lF>yQt`>eDhz47Q%R}`2C862IhMMq0g!8`g>`pU{| zMk+pS1?+%s%Sl9@gQ-&Ycj+rj+f{)__9!Q4*Pw6&+;jW$(>yNQWlY5mv|{zOM(gMuYMz?93f<8H-K=!C&M1P-SFIj^csC6x=cIHSsrWOHl%(f830xa>MC#nsWMW4s% zzvp@$LP?9}Eaz{@d`;YDbFDB;qr|h-vBa;Zc6F!WSw#ij9sFC-O7;~n&zcW*9A67U zEpIEms(OB`AF*8)`vT>d_`sA*>JzObhn1{1^({j&8UE_h=7{THm$HGt|5NRPV-hw| zY|D}a@L^R5blyWxslGiuyTntZAoYYYI&f54NudDY9=}e!-ELZRn|WJGSV!r!m_~6i zLPN3C6n>j^;(Zr3^LT~hKIBYRS$w)go1=S4#Z7;4U#4gFnuHb~(BKkJI%lD5>1rrg za282&onY(xtDZfcg!0}WR3;!`x)uQxxj^j0J>U{jUE6df%sqsioI>1?bK!NzTVb+a zhfOQkXwb)S!vcN@SO`9o8)i%^PCI8LhiACe>2JX7^s zHl?o+-s<31L41+yd8l#Y01KJ~b%}-mdE+rkmhae~{aW$cTj)UHBBaj?f47I+m6GGA z&MGMQteHORu z)%mZgf;<)np;^hJXnDUi1bxqy=IMj+fD|plB2}mLJheVKiE=cBF0sR^d5&LVJBF?b zyHyLHC7RK`K#=V>>mAcd3RCm(yvrxj^|3B2v83h76Gw7YLMxwa^;; zU{F{s{=Fxdwa!N~Eb{oAqjAj_d2oYaZdTjQUnzG7UX4AS@I{^)#!y%hv&jczX9I0> zn->DCLyOSRKQe(*$tCUNBB0%;RS~+JsEguB*ys3Tll{Di8b4xy^esX$-S5wYn7KB3 z%F||5YPS8~*95+*fvlI?|CFRLXZSI;scubXGfn?-leDGaqS%T}JhF#+lnS=sSN^Mm%(-NpHD~eqm%dS%)JK#*Cb4#$7P&CPPs0mfYqe2srOmA z1FEWT(X?m3Vt?PK=yx>9%6h&5f+RmdLp{AdZ6f;6^V*zz;o#VfHmk=h{zZRe`8ZvE zuS``OXBVK2&TB$jjC*kw4B953u6I!oX{&&3U%imH4zY5W%=L5IL1*vkS~+YNaVP~w zwcgNbcMcQoObu3U+m_f$iU#I5$<}+vw}0jDF&_O%={z&uA$&>y{6D!%;8G^&=+W5H)pEE5=EGSC6}fJ=gIhzTsRn7+`b%4=cG!BwY9aA7fn&{>V^C6byk zd{NJ0fPwXR2kHR__jWvhC4^WrupkERw)ALbeSVmE?aogrp)&1bfC6ThOidi#f-=A> z2X*K&DKa&MJx+F^VQtE|6}QT=>Cg_y+SaCIh)F~J)ss&Eri1$_VJ^R87J2=Qk`*#g zb=qFkl-i1D;-3E~QdV?sjKoOksr#cQe2v6%Oiqg_b~72wt^Y&uQPGS6qB|=mUvH!R zj!!)a)B@UJ(Mi4^X09I(+sN};Kp)#P(tDgY)98$II5kySY_XvaCx+941>8ky+(}po zno(YHF}deONemjS6od)=sGiY?3>s_XaPxuhiRk)mi=D1$sh{@`z8)O|=*?rwBdwou zdlwkQW7lT}`M+Pz8v8OysqHUpZ8xAHY9USlM?nA$=X1_W_ezCz#?@YUBn9aWQ?{9| zZjoeO)nkm!q*VNx>CGPtZfb9vm1(3&!$LXQ?CCL?MoXHN%0FC*N1nJXmJen`=mXp0 zTZcobKxJ8j#7*+;Q{EA;JjUZTA7jls_zR$}%EZhf%r8JqdsIA&^54x%U}!t+yMWhX+9q~QVn2nbKJ z%^I}#$;Pz1cpU$Xq_Yg+ zA+@t6O*Nh;l8whP+Czw{!Wv*)OAce=Y1XYnsp%;!anK7?qA2LSNNK&q8J-CbMLDAo3?49X z%dg%Cf*~MX+p56pdb^8rVGcrjP&e45hEl5ot!P?JEBg%psH+|Sz5x(cH8yuX12M9v zXw8jn+~+ZNKEuIVS2@R3suFx-&ovJBvfu(Dey0`Q@Ywf{+;_@p!f{Uz%(e6IbrF)^ zr8u)tBPml}>?DSi~KF4!W zscOI5_?Y~X4yu>YNVlTT$9>ogCs_PEUE{!UiZc8kY2pozcwsz=~ojlGUzvJ|X0tb1y$OpA-Gu$R1 z#QZ-m>>*yRT;6KdghVaip<&&Q)lH%S0Q&g$r@kCpx9QC7rxh6K&wB*LJii(Akw0p} zls}Ad8dB5-rYO1~V-k8Mk5%is9Nvjnqd8AuXU?)u>rLCm|O1p2iB3}Qrpl6lZj z)Oud78u0{D;+TBHLv1badw9KHLx%X+mn|jX*G)of#nFf|eMU?t(Iz9w0ovmNKqpR! zQM|0rxJB405cAz~J-5N%n``)>pG$%u)b?gwJtOheGu1`a^C_%skJGRFTiIg@3AM8V zT%at0@b9BwU61-rYVzR5lRHn@KN&k9Td{Az@6uUPEnx5vU@8(}<*+r%o<7TC&yOa0 z1$cv=@ZHvzCB?35!8`9ctXH-MpK;4+67zoZpucXo8>MNdDyrh*3rij<7Xzs zZ}2fQ-)nkiC%F-E1u6|)USoDkgE>dlfx%<;Up)QqJ7J8s_hYbe3IBHE{L=qrAlQ4M zYkD5|&Uk8X*Frfu{c;hX%FliCtzOz{Z4^UT9K2vY&V#$oEc8#|0X&L3ty!kB`o+<9 z-YK(D)-JSMS=70H3|3f#b+xFRL~Znzs6W?N#fQ4&{P^A)|71p-KQY{w-lf&ZD4u{8ccx z3T6kAK+b2qOHj2~_3q+Q@iO^1XG&zrh2TO3JR21`BUFHniLC?; z7}K=Tl6wWNP-8en_loN|b5l3MH7DqH?Ls7&16M%+3sJ}_T_zHo0_F05K73H5tKcY! z?u!tBRG>cRN>Yv6(;+nN$QYDn#>rOhGD_9flTgeW*2IBkB%AGs>g3yYndi}X*X6G) zh#Opgp850oEXJ{2lN$yRBmw_YN(C8S+LEMmw!(l7LpF{nBVqPuCj99?@sKo2l7HxWAspYa4r593M&qw%-`4@6#Pc^ z{-ec*H8l-e+|N$Erz|E16J+a$<;m3q@sKYuCYfxNGt0a?R@$xXjX=`fM~U{znF#+n z-d7K!%H0h7GpmiE?EgUeG?RjOxd5%ZyFb_A(2pupJ|^cJhP0zx?bQ5GxMre))6V zMCNDFKLrke6sn9sc0tXA?g9mdZLwBb(A|W%|9>X-2&GMOw=j2cw)qwnf#`2N5LA zE3u9w@J#DRQ)RHogB%EN8V07a%m*FLL-UQ!Yk;;fBCTd(2)On;;aPg9(n7UF$e7fu zYkV?RSolDVC|I>)0Gqv(q z$@2RisfNi@^o*OctS~@7BdrOczlS&Ta$lPNHVsB9>nog+VY4NZk+^+GhVV!Rq6NY_xn{!n)kys(68TftMt_93Jh`ZYzCRegB;-VdxUP1U0luhQ07-X3cUv< z|Md4P#h9#eJZQqk$06&j`ALVVf(qwcu`1|v2hhBr?)O>lnq_fBOEq_Ta6*5T7Z%&Q z8Ri$pTU%I4yp|+Oh)%3UTtSH8V;JgV3CUpxuxgR9^$%!qqLd`e+F$;z?QM^T(=U|nQRKZ!%B-a{B;8J85BXT{4w*%Oyjo4mA#{@SBVxlZ^K zf!2u!{HHj*Z~$b!^eg)Wf=N$EkX_m_o0o)xgGCjtZQpZ?cMbHT;T#+v!hDjXJW^~P zR)d^Z6r0SF)uF_-3XJJr!Zw513is;fc?s6{wny)4@e;T^>*5eiMr`vNLN#7+IkLl| znUz2k`399Z3Mh*$^hX$c4>LfJw7f}7+7b!(TUf*8PKUT+= zwJ7^LJFm|=0(gf1k@#4T%o|lx6dl460)V3MJ4K!z>p+`8Tg@!$$ovTTkHoY}!KurP zZ*TW-KeOz96wacU-=4!Sa0aCKsk8SiP=m^d(@}iiWjc>RA^};quxZJcf8d%DQ#8r| za-IzGQE`z&0DHs*G|z^A+g7xla)w^C;5PuAqCzDDCu0JT2}@N$PnHJ(kc!g(vuV+@ z_Dd?%iK5bn62pSb4)eXS=r%7jEO|k)ykY$zK`u*SqWur9`y~zg{3EJu0-M0~*guMG zf*Jv(GFlE3*QoD%A&gGQo)a9Uq)`?>uX7vN0Ui)skYkd-^*a0j(fLa;HAD|eK{eRI z+$-=z5Vq@^D(u4*fgd5ZL=FnXi*mqoxya~aEt&Q^Qy@aHz>2@ID3T4juO$2TSx52< zqUkttN%i|*WxBkW^2?J9bi}|dQ(Ifx=hp-3h~;ZAM00j~fjpi0;HD;5L_vNaVajp7 zu15_g>Jx#OGLXx`Llc!E4NPL!Kc`C?iebF^-5X&lVDDl`q*BWHkiCynOdAd5Yi`%8 z#Z(%Ol0dr-aaUk8xe7|qw;O`LJ;tE>F6j6^K)J6btPz^|Pnoe`lum)v8J0n7U=3}c z1Y%zBnmqKAhq+!jcPM^d?@RUTx>oAmdi4~#_Dx8wo%LMg;pSzhgK;)567{>Y>!ITF$9<2NE?j9M4UP7&v! z+YLmefK`z`;*qc4o2GhnAR3&MI8bKGJY9xbZn7?hOCA9WiI3&wmJkja1PBmCA#Ie( zPOc8gb9#XoM-33_$>dw`DC}kiNQq{=8HCFb0&VhK?DVzNo>XSyPmO01uSxZPTvH$&igYExZW+0Z0iOT~}W9Df~$HB@U6;O6uJMU3hDFv>JJ zEjs#ODtpvrnR`{yf4Ga#(LJ#EipYw)57?|HzE}qjwxp|*K6nfoc(oZGS<-v zAhbeQUHuK)zu5;}( z067TmmvObAwne|WYa%hp5U58ICGTHB=Kf_~5D-n63T`=7zfl%XJq}e2byjoMwCE8g zu5Dc`h@N>F79odLEuiKB|M>gWj4#5i#Tfx!Hf4OAf!Zdl(c5;*{>IEyFg+!96zX?+ zKUrv);5r;+bfMT2>K}34@WVc-nS70`cn+4q0QdYw92i+l`>(U)as~|Mc-eHu1J%ME zJ|%#}H@6uDAtV%ZVj!1~+5vl->^JrS)Bx0~+IhSnj@8SSU$X8~ihWlZ-wf}ot^6?B z@^vOvCX;X6UPXp)^`RMkK)8foXqK8L8u^bcqSs5yU%qfFzuWKP*^yw|RDG2&8K6Uv zZ!5jzn2Pm}pzbSP_3kpcNcvh|Xdd4a^=yRZ0r5CAeby2yY4MU^KN5AMqC2C_u6stjyQ>CeNim>Po8nyk)w zn{A&r|I>2+r$1kN{2=ZAQAUXz-&A03y>&QPlnL-!h3vIkD#j~cr*dh{SVfSTF>#Jt8G&LEtBo2eF1rv%!En&-dzjwznK-FaTrFPxe_ zXY*7@B(=t=G^^j_QGWltI2*e-ICww8@OM_QJZB<^^t5fN@e_>dKA&?zJ=X7hCQqNC zJ;LE0exVr2EqTK6;&#LviPgdhQIt$`cM!F>e_baBNy9)U3zmI&c=beM%q(5c=~ zysR@?THmlR$P*w)P zDkw8Fqq0zw011mF;ZeVEz)g@bkLLE6%ETsRrN9n%y$tE1%#au`4*3VWfa|s!++t}- zxVY1rewAAWP-B9_;{nYZUN}PJ;UiHu0=3K;d9<7(o1;v6y1gLq3;Hr(0jN_N2pEt7 z_ll`a=_N!p;2&I(Z6xy<-VpCvlJ@f#Q7Hxc$>WygkZX&usdFGIP#%h?Wf_$p$&}hn zSHm2NzsuMnsY%3Nf;T{;CeH{6_^t5u7Y81I#@^_T{$5CI`L@%XP{w@EN{f{ggos}* zkFH^&D0E>5w6wR+Sy^AuV)v;+-vbQHp{0`{))CglXSn91qQ-P{^ewU>umNsB&CX1= z6GPAiYRn8i<3P|6xXN!L{s?t4oFu~uJ&R^hgP+SQI|vc*22^!nH-;V(yDlNnmg-KT zDV5)rBKEl+M^Cey3IJYq(p)f}NvyrO+;jfZ^%cnwGR{!6yRsJ76&`u381(Z6sJ}tQ z>-w)F_Eq)1W7ls-Gs88HKoh4F^pW8E#L)Ohq*(JJ)cw-7tn%JpFyQF*j~%0KZuOe@ zNX^2Mnre-{jZ4UHES^T?f4OrO5hw6zI*dV|kzC_S17p99QgwgZ6Av#`8-BoVh}T>W zGf&V$DKKTSQ0~#(>wv(AmT~LYBv$cU;F0)q%gD0_nfi)hBySx%~@g1%_`0r`Rm zmk2gqR=H#{^!$-<$dV867xwj{lMeG7j--IP6~HRGg_NP}uq+n-Q`|V(`vViOcKAB@ zwy?P>AeH0;yQI3jFm#-7Yer6u1TJwgU$}W>L&gd!aGe;;2kI!Q2=P=cmRuURTgY}c zHv!h*R98V{rvJI8O!R`}=)$ttj}apmFhkh)Ir8kAn#-9}A4-#tZ?gUf^o{@sNZQxD zG}^%Poh6f_39kXn_^gz)Afp3Uu}%+*OO7FoA?TXi&dYxib@XTWz-xa9WL#ZQh2f_h z1A+YRp}}I$!Fl}4=4X_+gu`JWS~Q1sZdp$WCSuZPS^PSHYmU0MHD}1penXIi1DbY| zvyj7B?3X}%+vv{kwtC91(Mx3kvw742o=$Tre6_5kjVsus322+(Bq8RL$jskylxTU` zP%y3~b%jef4vSU(9{`&`WWS+_oM>U9u9J`viqclpHr@qG%GL&xoQwA3$fCVmYhn1i zmyLYsA2|$~DxdR}4@0by=<5fBW$ArkaS{mC$4yBJs+>VYg!647?7eVDl@$bEjpkM6h4+Yg{Spo5`ZXcgMro_g;iIjOsN zh0vi6qI>q|7NI|}FM?(*beMoA0!NsFLEQ20D~_1oags~Qhgg-ir*h?As>V@uuC|iE z2?w@eXZTzIsg6Legz3jV(+mEMYxJ%;+*b`kUr*O4uvG_RgQ*+ot$CEgbq&o$fj&Rx zw3HsKLyjp2##OH7SLO*l8x2FZa=-LG`jz)-M;WlachxV^vt08PA^aaK5B-DBBT8L} zO_iC$J_W2$Mj3|o=;W^3BG2UHQSgy;DefCazwT(Owvq|cDFb(@+kv-#m1ural{TtO=i6cRQ1O*N=1rosaVJ2{* z_yh%x1PVmEi24)on3j*C+(jfbxr+tb06Nsy9kg$J?`D)IKuv)TD82%yl^!BOu3@PV ziW(gREQpiZHvP_7iaexl4$Qndt~sB-G5$0^d{0LK5!id-ur z&4WNF0Z(C~%LORYVjc;IBqfC6dPrw6Y4Q6wr*;MkX%cPSvGINk13)VPTp0mMLn1|n zT2LDQPcO`PE?+@h?Ffr z(~fSyNXjoLD6z+PNjU=0#7IetWZK70XrSq|0bMM@eNn(!DuLb$)j>H`A9n_%mO&bT zcZ{B!@7Z9xcJBl5OmSeP4}c5`&}Pzp0C-O?FS5l;C)m_}HmhprvYQ{cI+21zddOnOm%KSEn`h z4BN#^kFjzh&39DQ*d8KylQ8zv$5mLQsoo|`n_xvVDy+4=ofIu;wzjI<&YVBaHve(8 zEjo5K=Q6Avpr9USP#5Ko0BET$$DXms=FXnY65uTR>^FX7AO66*?b_>a@iZ~HEDJvV z_~~})f;psF5P(U#AFWOhuemV`T-QsEOxmfoY8)vVRYNhAsBnf5p!WdhP_*HTH~z4M;~ z01U(=6B-6+@t-$6&O?ukqqVLk=zprKbR=4d;6}b=5p9(*q^KVWf;eez2K)PL`>GXI z6lSjgXrnYGN@1dVQ$bp$Qcv73k4tPb8)XJOfMm{wh1G#mH?m^<&pMG(lRRL=EAlsC( zr@gh_iVMcuL^i`*2&k;6>)gy-oPzmmcuFJ{x&Mk zuyhGBG-NC8`~^-LRx6~XkOCo>l}2pL%AAye^FkdU2dTj1RAfjd&dd=6XE`fy;?vKd zuT;Acn>F{iu@?Qq@5}DIk$I+w@d!W^CK4SvnLk+P{3B0?(*20R983~j9c+hJAgh)v zYwXTB@6#`;N3^di0LC%UeRC|)nZ5`!m;L;jYwX0ubFFw>fz?*y>|-r0bCby}q`AR9TbI4bU+dN8Acu08&u415XOzNN&<(V;;A6_PuGS{YX50({ViVts{NrZk8R;&Ac?2Y+r=Uk6LpAX* z*0e7~4B#yS&E+hVd=hZ!(v(O66d)(1PFkJkQw0oACPMdGz$Fo@(%DogCbb6Nck63;N zkKnt&2R%ndaQ7gaiJkCHo`*WAcF|6Lvi+OxvN#4ps=zSS3_5s31s7>|djVg=MDg|$ zbs5%;04O1p;j~;fV#2^sIt>}eB7_uJ86;|-ThmS*)ma8%*qU2gInCyE$r-k(HQOpC zO|kOvlWfYQ$u^^+(3K_aVlTr~fSU51OF8r9?0qc&Z217=C|AQo2lA9y%`-fkPdwA3 zJb50?!cbN8v?6#o&#Sqv%9^+ABz4RJTZaL4_L+q?>3DMf)+F2Q-`-&}kDG0&`9%K^ z@ulVdc@rksNf%!T$Q`#lKsh~Z00a5?)myE;y21MTw9HR0et@>L8kz#iqWDdiT4v{d z=0>NXEBET>LjW`vas(cwDqT3*;K-j#oWv|7%>)}b#MeNM6Tol{upIL52IChe{iB=^)TP}&a^UJA@F$R($r`kq|Eecjd$P~O(w>5c+z%Bhq)>40{rI8=tX zXAodaIW>m|q&(w9pmLcxqz!_;iX>?ur+r&%FZ?}h-+$-<`j|~FGZd-C2Ag%eZQsrp zY{!Pj0nMeXO-VSim`oy57}iqN$HVjV#&`W-O&KMn3(0<=#Is zjy8-SIeHXnqbMEml~$KHvp@sUli}&S$`>E|gN*4qt3x`iR19o64+rtqa;>h4D-FYA7@O?R4XVf!y}c0lV+Ix2dd0^yUCy1Yjev7 zfDv#g;%rML!af9S#p*L=t|~2nG`I^>^bn}lg4xZsKZPx z^l?QUXrF4LaqU#|0VfBD#vFA>pRYBQ*F4k=0sECk;>P!9bxuY|~fK0^QfZ1q}u=iA@m?YiM# zFSlrS8!}JwR^S@WL>y{qj87jEjFku}Ok`Ykk&damv)j(OT|8FW7wLTx_mW>SXV2UGUZH#Pt$YrS=|Ni1Hx;0__m$98IR0Gq-&}u zFSGSqcH8~~O?J_FXILp4f~nuDt7~n}F>?Vcq*KW0q`3fxiGXc*o$BCJP{e%Pni}@o z!o{x(WS;8EKk>L9yo<-2!6`@WP`fU?{BK9d2L=4!{(bPauOZ`tbRbMGKIdz*I{_~P zka^Az^e6Wvy@dy7T{73q9F+i{QVTz z`qZuHVd*w?(Zvpy2%zjG`m3q=pfxo$*}r}EODGL8PI(6ULf92$J_z6)4!|yX&6N|D z|E@%&0obLchwS>#zL)a=4SbIQgzu{DvxN(eCoRHSn>P%r|+h_E1{ zjHFm7I$R(!ii0ci7+Nx(r0k)v$nf-NXGLHWd9r(VwS&C*YzQ*%xEZ7lNTaU6hP=@D zxh2?`EJOrT>7sW5z+ePWwiFyi$m&@#S5+sKKP?huhN0!TG2T@6NVv%Ed?uR+@DZyh8L+;ZwRY?M>q)PXY45xG z+qy-)$+fCIwV+wvHtOh+4x9)SL%j#gr|;7DqkwWU#{?RuDLNbvOXZd|&KputUBMKs zR3EO9K_5i5YxBeqDLF6@`$z@A@53)-SXy?9-LzvPdG|&vqrctydWhP^!83xvICK6y z>x4;4Qu?iY&RlD2ZnHjCGcJ_ur?5%Wu(gopBTS@V zM@O%v!_&%NUpjpfX@7EX%yhUR1IkJD$nV`lx&;EKBA}T92*n|CJ}7KZ;LVUng#bd} zSWs%2LE011V56VAxo-$Z3aOWQ!Hw|M_inlYy3%fVU>I6;lV+p7YKQIIy4Er?Gs*Rx z?Epj);I1Oz<)ET7{5d01-Ua$_ubvYn4UKYc%9+JqH+q!{=riAWSRHpn9IbUtv;}7x zpfW^ZyuqWiM_eVtXP9VW<#rdyg;3+HzD|G`z$YCPBcgc z9b$w<=8`MAo9NoY!XnBTVyS+=J9^@zc_}EKWrY)tv3$l(KcG=(YpvB0LH@#;$9Zvr#L#?v*!tMg?O`5p`PafOA7&wq#c_aw{HUlS`tu)j!EZbkDo=lP{tFcYbn* z<(HPTYJwC(0Cxie!c^3_(k}_9yY;ar z?70X3U>TtS%PGnv(wn^=`ruPmNDRWKl8GggXV?d?h5scNc?oQbYfRDUnMqx?XUk&} z3Er+n=fBi35VYo%|6RA@*EmqqpcE3`iNf5*=VT%!HOUOadjgn>^K)(Dyz^hy1A5w; zya#-RfEZ3jG<+$}jk$9}k^M9idAsq^-`nfo|5ASV|39djh;41Qyk}oE&XxjJp0J^2 zN4FK=0G>MiST-u$=Jl1ll-xY+O@AMae)rIFGFXUTS@D4Uh>%t$AGwpAM|8d%!n8uY zF#ed6yoxt`U;k|DE=e`IS8%b=Y#Yz$pm=D>Du zFDrRES^f0*W=moGWI#89vC&DKFGrcPznYsx=0`d#uzU7~lFMUv;p@iARj-z4YtuKW0_ zKZg!Kr$@@crh_u2E9zo7u#Yxi`@pu1PDj|;k1@LXK16;d=pTL5X6zpH1if^fe(^q` zM-KH9^c*0)S%g(8vInEgB}&VrU=QC^nK?naVH`Osy;@UUH&NKha_fx}Dz^bifl#F3ytf&xb~1ykGBSoycGTYo9+Y*%;$+Htvp$}i$EaJhg)e2Mz9o2{pRfoz*oWK5!Hk175%0< zOVjQ6>jtR^JJj zTRH(CFl?Q5TkVryxz|ccvh3>XZ}MjOb)np3wsi9Yh<` z-sXc|u+OH?h}hOT#z)0uYbV|EyhTfG+qSJlWs>K&kMaQzm2-4L zyvayTPU;qb=K-a!0L;mq01#edr!JaJls2Fp(QhFlq`Lu2`l3lzON4b){7D(T)Q>7}pvXiUw01 zqJYCAfcgUDKlj5GhkD|*{$b}MH!x&ax=owLct06IAwyrQ2y_VebiX)AG0nwJYZLR7p&wKU>9t}H3IMSBQu7GPS$u;gt&N*uwh$r(gj2# z=aW*Q$R4=oMxrMBsZW?Z|Kxn6AiXPnPX;M7rn0%MfN~81PwfLg5~fSv(4+KmglJ~M zR!P^9Y>VId()`Ci{Qh?=Yy3=ioJl@Q4h~W$cT6Bx`-l|xXmdq6GoWNA57QuzWCa)r zGw!vD;3KauV2p}d!8i=e!9Z3>77AbyXpBUKuEDa~Wy_zl4KM7m^WSl?MH%;vE$sl@ z%{IGqmQ_~mvfcGNY%BTX$C1l-{nqv5_Kn&6q7u7k;SyF00j?smN=dDeOU`T%IC!1@ za!`gSb5dp8bpM04bm>V}Javjkv%`qUw^W;0F_l`-7Ec!y$YRw^M1*A2Q>$>uiJv@# z6P46beCKjI>5ZRP*S{*q&eiu?cQ29pqjQ~}?-5``z4AbRhUy}EFFNPlFYB2-q=b;; z#iPf#*=4*6K{n`LaHx#{=kU~o>9hZ4xd)!R4|?DvBWkxFIVF%xtCJ`a8i`-1H&ON) z4@~E_yNC3`5)J;*S zEuH8B+BYDh$a;uW?PrAyRpnd+P&>y8i5!kGHl(BMIZ%fTMMpt)$@wCZ58#l}xYDX5 z8ckHc(s1D%pl|wdRQWZ?1f?S@%+IuUzwNE|pVxiIWvj-9n@E&HwQ;TlY0K2VaK43! zB5!DJ`Abdze}3-HAAFWdYhHj@=o&rDBXSUhS#70gM738P9%W7$CN0<2dza&MA{B+^ zV`!tzw6!^IlF~3}-b8;0As157u`1qhs>?qC+Hz)z9VyTq21x*{nm=5YAXYzj-A@k- zEd+Awu{)2VDDIoSeLd;o8ttjoThJf0GDLidLzWd~Nv!tq4Rq;S$6FcSG5S&~2&K<( z0n{R3ggzC(r_bVVoe#;ODftMXt^QXS3BOw9_Em?a?~bHGYqkMwYYa&CNqw79h z?035kOQ-3KpVu||HvK3|MY4bUMyJ8pAJM_-s(0}?balD~q(auA)ACO#lm?LfiokAc z#~F$Y>jltGPtLZ2%u;Bt6<=~3pz1sJKES3YSi9k1zoWyExk-4c6eV+ zn^o_vw2Lk{iReC-?X&DPMaCKmgp9UBiU*&D1yZ84M~7LG&b3k&V<E>LEHl%Ix@(xo+Gm23#A@Wi39HTSiL~ zE~ESX#h1||$}&^|_X6?tTkjS{Z0jCc1dt~>r2xdyvzAeDF8x}9;wBJP5RuA^+NLK~ z*fT%9(W=|pt-6op{aG3I?H_;1`g^;GmSgimHf0%RgT=huLZVTN;0GdFL`Mf>=>tA1 z(sHP$&4xM~a25m#rHrRL?s?K$h;;taH-1iiXgx2b6cJsNYD5GWi(naTl7Aob!E(hY zH3d+Q(UNm0pKI^>mPM$8M>o^Y0wI+WLK}&C4m|M|fM61B6e{T{IFiyWZRsbR@9zKQ zSK(V(izDQp?j&79Hj&_FlM0fpp=yH-hjZ3hFef;X{AF=jMNkmihtX4yap5br~@+3+WY_s*@)YFl{ed(!e&X=8I2k8TC!m7Oa1Ej|A z`xQM$IZ8dFeb*Cc*}S+5kzWBeKs&X$DU-<(lF!vl&NR1s$>u_aG$-f zajlgMq}YYWo=ntmnoXNfZVmOd*4xnxIGxPu1~!9a&I#jOl9NFxmkRR$y)U>FQ6s(= z=tvjn50w%L$5LGhmLKD7AB$Y#gNZt1NBZr#C+>2dmlO8f_k39`dR30<4G&mr2OC-v zRT&0g*Fy){j5LkDA7mp}fqf|e`f}PXJnJ1V>zVD1b+lh#6)ypjlp(p|a)yRWu4opK z?9|*cw?5n26;oRXle-r&Rd=U3nV6k}Z=HZmx9xc8}?Z_0%&8P!j^FzGmhI8|#NTW7=U zlOad3bP@$GM7U0Rgmga*un;sao!}^1*{3KJ@wT z9mZCg%~BM%3nf7AAEQV76CWF%xfik`i^WyUKSMb0l?ts7N9%!xCVSuytE{<;3|Dft z!>`Fi(GMe!VQz6DK^Btj=x6j91CsBIGmUpH(VP~EqsQ@CD=3wcWark^|HRaMc}=S& zDmVsjYrIQNXl1IWoe)L!-DM8vkjXItni}PPt$#$XlqAq+$vu5QK0srr?oMmXPnt{R zER)_F{Gw}ROL#ZA+>kctb zCHhJ?-G079%L|TtiUU#g6FKNu$N>`vw6q&;vHMN=tK5o?pYix`30 z@Joj9=%$R)xTJi=Fh-?72!LKRA!^5zPq0*?eU*b+K~${`2c+WJl<9evnzz6~73pdM zv87OqaFOFjO36U^$_ZF~kmw^27)(p!~nPj6rQTY9AW*WojqZE%I6KI%E zK598j`USWFTx)}}-fp5er=IIT)a2hIRmK?&N?^s@X(x{n`ygOs89R z{y3X+{JEn!kVSv$%RfK7KZ1|qBW>1|o5M;1p6@(Jot?+%iX`~sp=HbMz6bv3{V4ES zZB)Pg=sRC$`MbcefTzSfGLoDc@RkF2Fj5lyBK}W?U)!plwHudJlj32G{pi|%p)8)s z;h@zhp9{qBv<7sf8(@?uhmVXV*#z>SraUW2I3ecFm~JI`smMK|@Q|g+ z$e#!X_oZKaDR8tAM`f;Zufr4CBrI4Ng7FMrQQ9a+3H&9ki29zh&I=Oo^tWV{qRD5> zIhOw6SMiLDZu-Xal2gLw9aZ+s%4h6~_rA@(_mgF|a?K9wAe~MGK!l7tws?A`wbhc> zy_L-&7gtzKZM7w_L8~@K?W(3;!=yUkS;b%}3&!EZM4sm}MuvD^3I=65ZR{foSI*BE zzlm@2a86$OrT-R_Qe*M8h7{vlf|@!h0+EvSX_S|_2H7ew$Pa*?+*BN3%oTAW!FB&7 zmwzqz?N#_&y8)RCnQ8!wRgW|xdVOUMG#~0|qKa&~n^6dTS@m(|WiRWA9gX$y8uJrQ zPmOb`>xCGGM_OZ+Kbd{$E^FwdGmbid-jk&LH(;`4;`i{1Z#D zjt!eF#N0N0VVRAedy-e@&aYfV?)gq@C&#wN(8|h2(wPZ#MZQJkvCPM(*f zNLa>{%J}%l|8}Yv3j2qTJHGQt>H*(tETcP#cLX>q+Env@KY6zMNH^C*x~zWmvz`qH zY=rTv8AJ0ySTEhH+Zm7gsDVA_P3Ks89{Wq+XxBKCJQ7oi<^pFwcv+ z&es}CP))L!@1B+dyFjTabWXhQgNDB@TO?O}q%c|-D*l;ED23|-2eAXD8fOeL&Fva{ z9&s$)ornp|Y98>TT5zwPtFfn2B)9ZL{h7J&=*qFZcdJs7sAmFdI34&b5hOzCH`k|; zO9G>1V+^q$Q%(II^c3b|f)%uC4wb=6P6(}{ z8|_Sga_~6%h-d2Myx_VbeT?p``DekYuU9W6jsyh~6gbQjNC4Z1nZSwS6BIZSD6o0O z?Y6P9-hOoZqq1t4oRjSA^G?GU>^WrMc+@N4YKX}{i4+S;fuIRLFn$yr4Zw&(oy8_i zu_#kL(bLH&dTD^-r<`({O`AB2e8i15_n2d?qq)%v)6%UE1$y6lqEOJ!W=>8gZ{vKH zyDExHii(!Gqq74JN|}&CsQ=P5D)+T=4h7tSnE`97Z?Ij<@3f-Q3ZjU~xykhhTk37W zTdt!1fbbZWG|WMqKGe3vq5tF_7Y*8-MRvUnrKW{uyITXEXt{{-09~NiBDREj&^b|lN(ymoNnxE zGeM#N0j{O+4FQyQVEm3NEVXPlb(2w*mchnz(7=IbYJn~X?Nl#;FZ#|#bfh~-VN*I( zJ2p03Z9`pk)E3|!MXz#yjh%Vv)v7-AlCl`|aR^6=y4W{)8Lcm09i)oyt|(94%{4qCM$=u@TZew0HE8x}?Jnc61V%LDVe9 z{+ZKf&>b=Rz}NmokAq$q6avfWH*F5uP5Xl-i2~8IOq(|KPjvq9h;wg*2J*p=`PZ?@*`UL%{=E=j;K^~T^8#h>fb_TqbX4l=nLF9VH$L+V@inBrV_5X|t zY9M2d=lS@6x5Pt=$UlFMz+aQa^ROk`h7Vt;F(*VtsuotIWEm4AX?NM8^ zaSyq!Q!N*{uy@n*HZHlw+J};@J*5Kbgh|74qCNcRA8q>FIriMMTWLT2PFe}^aYSjY z$h{l~>{Kcpf!!Wt2w;;2edQn%IDFSnKF@}OskV(&75Rmes1#$75}Y|Sp7fcv`o}}x zU>uDPP6kG9*m6?)?ap7_fngi9lEOTD+lRi!)rsTpp}?vYk60R;3RV=tB`{LG@9Z$g&Hrun64VYVcEf=wn%W8uRrKv*js+}5-nv-*;jJJ8` zoymCAWFfi250oeQts|hdW_ivr#-lvvz5n)2|Ly!4c!LjL?-!X48 z=OaKBb*(*U0`{VBndb$(qUG>T&v1?B$k!CbcbBQ?4syt9jEFh|!y#L@ZacvcX1n+A zw%u&JIxxieKz~nSbL7fxtNsb7_K!8;z{Y2hF$zFYIhtR1K|k@hR>MhW_IEzJ&LrBS zx9E4xy&7-9A>B~pL}xX}@)A(WWUC+$9$LS;ak)b@X6R4=+uB zbY8&rJiZ(~$DS=4oJMM~=t}Bju2l6IB6B^bzf->c(J4GfeKD#c`7>3AK;IBwxJ|@)eV%i|$$o9( zPl5so3LIVvB!KP1OW#E42?`t$6xj6GZEh?!5-BCaQTaF5AJB$?GTx*z2+Y%RbQy&x zbV8b#+!Z0FC68!!7(hf#0k(_~m9>~O4U;EKvI2lmR5UgijQFN@-Q1SW&P1tLJ&F>_ zBKKv@rr#5(Ibt)%1Ed0MTO=E09o2znqv}x?p00YJqzF*f(Xj0S6cX~5D$0?1$k5ue zb+^@2H(6@wI7`kTsx~Xr#?L%YY6kbJ9lDP40G^_~?Evr@DE57n4Jc1;(jgScesX5Z zK&6GQC@RWHie+Qt0jsOpLzJPSU9~hh-_AJu?XFOIohqJ@zzK@EG8_`UpkG!NhGhO!E1Y(k^>nmZ7ohyk#~-(rs-4!|(`(@Wq_a%2 z5B=aaFJn}NL(zgBv`y_&u3#x)+jg%6NNBbAU`aW*Sf>k66rOPd4r_Q$OB_zwVR zlBR-4(Ejc=mf!Vwx{BdJ(st0t0%4VIL_l_SUY@s8&kFJgs{hF!ouY_Pua59qr!P-% zLrWkuS5t1cWu|3Yck3=f^EX3l0o%dy_o*jep&HRQfkN*&1$72;d3q1}Ys_HQW7K9n zM{49S4wsB%Hhv`PT|h}x1|EI$gHL|}02-rB(TrGYJwzyJlgA_?JymxqY2AZyE1Otu z9o?PqEQ%}O@qc{i&9sSp;ia@m&!PjHNr9XSz0{Hj8<&OB#z9j>?e(zApjGP)z5Y^m z_0oM|$`|kIIiocu>a>sk&4H#se6&^kZ4_@^Ja+cRYYe0DNb?d>Y061rzzQ{iJ z=}&kOfkC5>7);PFP}+HiH@MTubPXzsAu{d;Zc;<>CK2m`vO z*~Op#70-C($E~;hl)mLzv_a!reW13-eT9Q4uCY#9g|wKRI3?Z3O8Kc*yz&RV3U}Oe z?Uk0t26d3goqWDYnz|Rh8o^;CN6SbklT`z4zDZ{vJSS(yF!D{jsfD31J$D(^ad z_nvERen~4E*574C6UrT!iqrRyQcvfP96sVt#+`$F;#=_s+(kd=`MwIoOH^cdkWNE) z1E<*IPi?k&b0-jeebDZEc(YBLKE-+H*^OHmZ#ZO_HwybVv%Ect6h=j)?#Tw6PPQ{n zT8NQIw12}vq6pLNkrk`GPI8cX#I@?hGnHN<5{|M;q{#NvwECQqM?UeHv&!s-tKVly z{N7Yw?SQt@vEZ?y9BNPJXbw<271^e9S}ilkrmG_elw?*1X-q63Nz_MkJz&T>4kS)~83f{xnGZ@EmNK1CmHz04dHQSgkxR5@P9udwc zvq=>*Z2P(=tbk3cXP$MsKk&Z)ehYKgfYt1+x0=ql-Mex(LBc7Fzbw*cA(JS$g_T69 zI5?Cd>DRaZn04hiL;$jN2(Z&jdWwVHs)!DK_YaaW{4R8l=zV^0zH`Rmo8IKU$9Fyx%*C3wo!1q0N}s52pqHY3eUn=p z-K|ZHR=2*=qTPemgtNDs_BdsbUCfX^f6OMG*0JX-wUQargXxVcBr8M{cT7WImm$*0 zpt0ngM)mtX`Q5|a)W2E2gWL$1oj*FV0q!j;0k)Z+wL$d=Dd*yGSH%2H}OC-hOLl<&mZ2*gN0AUnWz3Ff*c(gos&T-Bo$2BKQj`5pxUOB}Vo_>nobF};suswE&_M(dp>5uyE z2ZW4h@{x{9P=M>xjF~R2tRqFAj4|msbSJVt+f!hAALxD+A$3XRdD)ce{jT{$9i+KN zeICps;*TW%E*=RUuXpwNtObjM6N$eG3M43SSSgSIwht?L6NM)zaAZ(m^JBL%b)yu^ zDDg=;WKaG6W{;Q@7@EQcX-PzAMo<8>yxOBrxn3Y7>H;%2(>#Fm%rid8Bo2Uwf)?%T zv|&KrG@=$eQ7+oaPds_X0-FdJG61;RO@7%p;NZc%%UtQ0J{|=me-7G35#=dvQ;LI@ zR|XoM05B;S0+O35mr+*~JCuBxuDT|SV%QBpyXN_AKzJRTjN8HszKku6kwtw#JZz+m zD8uKMTM9~!0KX|yrV~}n27v&&IV>%fVb()C`vGYByZeZi#9*b(9c|r2`Zn@xfNp@O zqM|~U`j?>A00N>^#(FwYGKcK+%RgcpSNs~~Ms<=A?dq003(~CDTO#@lb3O^jPa*i&g3OrPo6o9^(e-S0PTL>rRYU5;*~?4 zow10J%(5)Chi3?n6XvM=~-2Mlo{f=Op+` zIZEqUs$ah3ibL(t5?ELA)gJgS0PNL;7;ON4G={Vx9ad6QMti9n--mlxrk;rcDZs%2 z+qhkMP2kMhC!lKvdv`I+Qa7O<^nJ@Mp9 z`~3rdu<>Q%Z0#1(H!u#Iuc4eGwkM1$u^)cxzerO7jm7ioYIvDH{v0XKYQLg`s381E zItkK0^c>v6O8*x7%=e$M*PVKb&7Y|BG6L@y+3G8v;weU?e2dPitV(4p=%XAiT5=vI zGFTf#x?xNo_OqV3m*vaZ86!4j>LhLuu#?&aBIgJ#umA)$>A?&QpM?4zS771uh)P7<~uZ!;7J@M&{Duu7IN;g;{nPz zufC8Y;hy^*r~lw#cuG-lNdT{M!oWGIv4aXq?~w$!4XnnBiAI)zGnQBZ5|V?6Mi%83 z`*o>uCP2F|j$~<+Q*^M2&UqZ?>YD zWuxacCW-2+9keGm?6j;f;C)xrsv4W@xLM_FWIE24EqmOeq$;xBDp(}lst2=-%WUC- zavYc$R)`#FAic^TS3D=S_4FcPcqxj5AcY4CoDppUrfg71?UE8bfEhzh@aV1qsK#5!nB z-hUjMPd@)Y#_=dI{?Fd-f9Mg$FT4i$e|)+63TGN~fgFyGS#Uzey6Rm%L;d;MI#w^g zn|U+I(s3lG&=(GDXU%X~vYr0Z94E;RzAjWvNEH(^x|;Vpa8h_SmB<01w%KlJOdBo}9_5tqvSi1;mCj;764#HyjI-ukaz-^2aq z_HTWPW(AIBwNK;|O42XR;5oA2!c#<#>U#5}g z{Fn<)MJ<5d4yxizNA&~JgASC-by}h3&!T_AqdLcBpZI?hvHh3mJL@WVVDvZ=LGG$8 zPy1%+{=>ucrO1vj1@S3B*-YuJuP5K9G!{?Ip!Qp3ukMO>P zp6nO5F(e3)PYO5~b28{cu44RPk4g7ZSsFLY7dm_egntU2qo1VfNsW^IHg=86KL4tD zCb(C>>EoQ^PxR{&e-ac(P~h-VAOUP2Uiv0VPf*~9punc(KV!xQTwzj{p&|pZdFRuB zTrwgB@&FJ397r*cneFPcW_9if=agFM%!|n#T44L0e89#>hAove9K(Qz!+?4+I8{az zB|)22rI3O|sU6UviDRbR@=`0Bb&=)wF1OVD*_M=1-Gwj%77Xuivp%PKu-2hanL@u|rw>ZEFg2<+3xgJTFhT^Y? zOVSl$N+{Yk+0OgojeoVjpTF~q4%8}w(iI{_mdWs^eFfuZSnIYLXx{p`^i0>njro<iyrAe#(srI{F^E)M4Ndu#`rG%6yP zNYW7^6pI(WhozNa(urhyPvRL6l~BfAm=pu>hSwV!yYCT@f2a6h#FERH~tcmPRP)y-lWNdT;m6 zy)*ysZ=D&QJfA4074fHKLl04)<(wyW4Sm0g2auNS^auKlbANJHo43{qszfvOs~O}lNu z{Mq)|FMS&zrICnkKv9(uuKGd1^!H!!PCNBY%RN1{$krB?@>7Qbf)ouWc{`^p>4!kZ z1Z?-$v-ua3(wiTyCj&$F=3{ssdXl8*YXBvUx3&60U`vQ{h~~yY38Jw@>mwjr^^^oC zNh}{%Cs|`Xn}v3_+uWIzmJg_-awSn|@)LC17bd7e{up2ss2cNXd zKl?$~XDw;J=~qwK_U$#+)jMnpkD6fH_8hY9TlUz}#WQT#sb>N-W&v>ZS?!L^R!}h( zii%t|F$I7gChwpsoovXrYR!wbzZRz>jttovISWKvCFiL?Y3z?0jhw5r& zn%oY2=(1C6%$O3|ZPaqpx^M<{k#8I#*oO8FoF5q(mWo|%>+B=CH{D7~@-%m)tt8lv zo%^hfNYqGw)JjY9ZPV^{tIQv=2@@(TKR3%xy7)7HD!MoH^V9eLj3|6X1rsSo|3!rn z@jQuo#c_c>c1~o~6O}V%#)LOCZ-2|fsy9F86MR<^ZcvnNVjA_NaRkxIqI43FnmW=) zD@}JMV&~PDj{*ky;{#y(H`kwM2U}uxzv?E~+5 zv!xe~rR_+EcqX<{4h>gjawd8D7y?-afHu~K&jE!%;KY}W6hKYmqphac`semqZC#y5 zmg~A?RtY$0g6&SCJRR-rz8S2VR5~{Ox2vADPhRs0%fW7b=zW)Zy&b69W%oY2gY>il zhJH0IVwuHIq}6oWSHJXWiQ$MJzxet!NPspbS@B5}T5`uJI#=yY_DX%PRzHb1U--Yu z#9+f357b!0!CH4n6jhX1VPz#d;HCmrBR@Iy++4^4;;c`AmWum_WD~s)5jQuwjKg^t zApf0L|KkALe=79pSN{pf{e7Xn|Ml%2hndQ(|S-A34eIkPxu`!ae(o{d2eTf&T=L;I3D0irvQGpKmkqSwFCG7z_xD)NgWSR zuD#W3Y)t8FADn4oUyw5v=R~$Y&j)tM#or|((vZwtHj)wf^QdAR2^Q(Fw6+&XCt_8# zO*V1qm7Xs$;En>-tw$0|<{B^prVaG>5Y^faO$uD&0K`$El41kR=$`7RM>WR$85$X= zA99K(+6_Nt!!7`DP6H$-IdEM}bbS|`tbI_r6qlAF!wColnupqYTfKT6Ab7w1^cz-?xKYsCY>t{mWSle%BUHEoO1JvW8 z)+zo{0Fo#|Wnlu7MA{FCK^kpDb#VWKEA8F~9;J-b8vjM%lLff0NI*rOrm=$H#&3Mt zDk_Ry-v#UhQ6$tgGSMb+TX}9a?_aENl^?&X$Y=%SBVFDnw>Q!Uz zB-)^FIORZwqWGl(nhXaK`H4j7@7rBN`h%7Q|6pZBA^gzAI12=JM;+|(_JsV%mYs0b z4^(}x`PlO4&0Z!!EdpIqk`-YavXn6=xL#=@FbMG8QNPFX$h*O{JmuW?yyoY;+6_0{ zd>is*@`4NnkirIJ1@J_#m;vXw8iWc+sqcH36%W`JkF4f;w~5%B0Y$_id;Zgwde9Zx zH4>#}Sk=%$8rd+u;6#9eBP%N(9RN53kbv3Lql>i?L5~saL?-y1#>zFKXHgyZ!AYN# znuar`!U3`XNC!Aa9{FkLOa`(;cTRfm7r6hGN7dTfEw5xEx~j4T4j1W~NAT%cG8lTv zmLI8tQOC@Wc|3=x|Pyam zb>eDroSd3((ZmAUYv5>>hRDj#m^JzDlgU?k>h^W_x${L17p=09V^R)5l_wRj-lNTB z|L7|nIHg7)i9vs;9I2jajO&Xg1(W{&y!u@G#!sKHJXTMMZb$m5G-RU%T1Z{Bs!D*r zxSy~8)VnRKV7&Ff6Wv1;|F&&A?fDlsxU<9UD>hrUGXr|3N!YTo@?6F^A!}`JV}syA z%gN8S^0HFPVMRt>o;GOCuzP>^I8G>y6{2k33$HiM#xR`P`Krk5)s7<@UsU*&DZSIH{!fQl=RUJ`lTq|c6f4zU=Ix?=!Yq^nCUeq7u_iCeD?m*|f zR=glFc`eviMIfs_;|! zn&725k?dH|XZ)#DxWo^Q-F;ZcH;oP4km(mr`rLzi6}%<;CUfTZaZYoqQSjVXJ@SL* zcX}|^6q^`atmo@4rR+cJ-9N;EZ5KuSPh0|TfCS>e_8TDN_#@&H_(zw(hWl>>3?MQ| z;Ipt|<}pKEHT2tj0LajkN6}T9)2O3X_pQmDCNPx?-(!z_>uT$3hqIXI-eD%B+T<%0 zUgLBC7;VlW;4RE1f8y_s0yw7wjApU{U3PM=9bk#N(m!iSg@rQ5T5@hMf%nM~0}6qh zihlIxjtoHV|JB#nND82nNu-u!I&Y?rEi`fFnMC6*TzZy;8@Ky_L_n1ybLFuYxVH}u z&xI#m%6An=!w?^e@J=}ys`#@6uKJ7kRROjF6Sudm}iL9b?xX5dXd8Q5R_?hQFnp|jO z7QWMEpr~EnbTOzE_Adn;D#(Q^iKGgx_ zR3hyJPG~84FA>rLwtPb+@*5`NI}06AG+uLai+$w1XIV*Jfz3PRd~`|;06hVW*YC+h zQbx5Y8ucs~R^XqW@#v4gXNmQP07L=70krFznr+I=scazFYw3Ax+{bmvMAeGNI+mJg z9XJ#etuDQ}^LKx=2OfHuIu8cg+5qyc=bUX9UwocN$%%?%n26TFw!@Yj?tzkn2z9`S z0USgz@dDyp5p{?{=M<4Go^}I3O-0T>e6|W_K##rcggn<|DuTRdz)qS(<_>YMbVO}J zKuZGjAL~|cwnkQt~Y^ZyJiE{3RmiblTNcUI*>W_k8rVKIT=gzw?KmAp#KqpW@_X zr(hp)Ej<&;i~>MtDXYH{?4J7{1MUH~K_FV*Z3$2i&`z}vfhIO#J`cwF0QCS~(aV@q zs|;B?cBU5@XA`}c1yGqrRDW_BpdZe_EYch3ZF61Fgh_fSy$}UXBJE1)YAJ@C5?T$g zZ+{ih#l^InI#!Hi+2nC0R;CR+amJkZp3nVRIrltso8=cz;5@b(`O4<1y~;t8nijIm z%xsspw^7O==ShUsHER#n*+WhBHod&kPFeUSd*>;ay3Ll}YOergOB~4|HXIe7d>C7y zw!Uo9IP#flB`Jfxw0*(i6IddjY)?JC#;U3h6Xo4yFK*gt4Ylo*k?3ijr4>C9ba@Ed z5(6YVcENl*{nX{w+}7ky9kt)RdntEX)Mif`4^U2fq@8Wvyp1}_WkXi-gASoKlncd` zRiw_GGD&($C#L+W@6m5gtAO?XZlr!pSe18V?Mfo2)_f3DFOuouf4{dyqJd<}K%;mUN8$MXu49J@e<;+%UA?T}A zN$N|b=Wkj%+on_&s7REM->I|gaY4lP?D4tKM5S6`ex9Y#-snZB_Y52pIAOV7k*H1q zLf<0lnM=-JW*Nof?dDs4O?_semuWV(kh~0b+003m8l(`xpNo!#t$0j}O`4N!cl`2z z9XzDyRn}R-C!9%}Ov2uta>g=n-fa8b-7DRJ(uIR3k9;(@w%NFf3ahR<1Pv8VFVv}+ z6+r3>WAq~rzPR0XY+3CqgR-C_%7K<8KQEvCB|7c7t-Ea9jvARW%F3E5%KydBU-K#w zef@sA_4=#GEA~c#3hK1TxJCsob=s`PLLPlcy(A{$Ace14!y%3NG{(^QD>l^Udg$XG z`k5q+<+KquZAxOegQhoKh=4#=`l8vjR@cMiT~ZIp4U$juPvqNA5wdZ1Kh7o zYVoV%5_sb!uz#!e?ZM(Oj@CFzpMA`OVUleMW~#Dj`o_VS#Xrl=xwYbhAEWQsGk4Bn z8#TW6RD%1}w*}8o;=yw`;r33pT`7$I?w|RE9??_R`Ps)T_RsM@aS6mFaAYMA2eyx_ zeB-IdCGbxxfoFd4DSB!^N&wX%k2GYWBeafAUErT4OHokp1*d(&>i2H3LkH^MX?)Te zx4g(klmj+nW+A{-8ekA44*~~0YOc@v04y`%T}~lNRD8~rr=QJ$BWh=#Fv%=)jHTty zU{a;1aSaa`OfVSHHrL#!o-mDyJ;6 z|Gf5tN!lbHM|jME)WFpM&K0#8byDL=adjwCU*jL zA~wKSCQ$<<2P3uPJhA~869CrUd>Gn>{X_*f0q!c2J;Ac_%53q{<&>XLP}11_!tKyf z0N{~NLVIoOw7E>;c}~}1r>jsjq5z*=+qXK+fR^U>_xHm&3JAnYnCO)sWz#*<_Iawy z$Ft))=7TEDOoRVEBvU>Hw%+7`X2JYR;JYTOOmZLLP}W3Oa^^B0^a;EaxGfNMkh~5K zc02V(5|PW@JzZ8uRQj=t&Y>;c0Ya*S3K4fMW674l;4e*ubrilus!&>SOu-uzmB4YvI0y{N?)W1Y= zRwrj5TJoS5W%|)WRZtG}TP`4VCv*h@wo9R$X=lSR@rWNdbcpXI*3`fK+MC3 zo9y(HW_Xm||GVz%{v7GIs#@T0SELo11w|jig-)cqxFwvVeB0sv zZa{k|M@Gt{)dgB&oJia22lSV7L{W&^_$dMy2>Cm}W*Yj;z#b=vN6Ql-_qCNf`8{Tz zdCy!Vu8pFgy&w&NN=X7F0VeBe+w2fP&HP!@pms?DxGD8^B=DOS;=qJ|CNFA(D!aZb zT3FGRApi~y{7-uS{|?yR_tY)4CqQ!|lLg@=0*ZFDx1wXMR#HCC##BzW@BXgAE;(zu z{q(_`Exo13zWSXXx}LvYe>VR9dIx6`0c8@9iJUp&LC?r0(%SWmNeKYY!}mRf142*L zWQK+U1}z6&5;9eLjj;M67IS!q`h*&WO*nTCGy(R$*^*;Kc6aq~8x9d7`c>xx*pjlc zB5OK?6NdDJVzfM@b7;3V!7k8~%LNcF7TXsGYBPW0sD;}+NiBAK&N zL-6{4UYUEvQUAiPtf&mInL3kQ3%O%3xFq1$R(`2jiW3Fc67u%R&;1C_`b@j%@=I(8 zeLRSMeBr7uJLqRbYYSwQJca@8lXN41yY>o5LWj>dw#-U!WF9`a&Ca~&ZB{yAI_2+r$41T9(|VWo09=I5r`#b++ClENIo z_)OYSvfc6f=hzTd6jwM6Qj_iYV<%b;5yPqQF{k30QB=HAR$a(e57KUQE{bzVv@omI zAF{Wtm_k3|g0~fyUN2GnU5#;ot>g{u%{WwWd zNRETsS3|SZZHrDi)4xA?*R6KlS8kyGWAy3lb%D*yBx-t)x=KPga`wtmDLS7#RtI!+ z_gi{?xm8w9vST&SVBDRY7>gW6CKJRdNW9WDEcZ<4rzmIDh$1i;f(FK^RK_h_HSR$ii3X6 z$@wjZ4;xJ5+|GrLMp=gR%9d8X5iL_>z^JRdq#f~fpZzVq`f6VZ1Ac6Th3llQH7GV|AT@AR9c6!r5tl$*0{>7F*tcb~=f_tx^2z5w zZnx>%_1*ty%o6?1#mIlLpDs10sInqH8s^*?V;TO^?)b3 zj`F|h)OYbrMQCc6G-NxT`?>A{5R4JgJJu>^F0cdJ*8(CdUBFWE*bT7R(RzT0?SAX( z>m~2v_4HUTCaC>(`PDbSJO~qLtwMRey;S>5eFBv*k^o zmH_zY&Is-ipRPc*Fz32A|C|U;XaP!Q0(OKQ(98sc$WDRR8=xvmbe)P3CE9C%NWFfd zxy7yP(fP<%5yJu=Rqo8Z5+p|+1OihZ4{bPXt=({J6Zx&dXmcxE-s8qONAaNYqx_S2 zFHzx&q%JQXV;eVZws&7}0$|6Gz5kuYgltCMZN$2}nX^_Qck%}}`)K-QQr_ zfBjQSYw5REXmk$L)Dt;aV1;=(Hf!2=FlR;a19SkS^bS+-;{L_9SEbxQNZem93kXM;Isf(wXr_*L>%1ht=%ly zkJz|HlkJwf*H}#})F}U;$8!(-0R5xhsAsiPZ4M_sX&R{2H`Lpt>Bm`7X{G)0*;RJ@ z)Hzm^o^ErOocy|$rs^4-M1XK109x^dOFywP%7TMhPC0{iJ^TQ`HhM!2QEu!X(T#}? z;II-P+(p#_yovIK!gNDb&;{5%bKWVIQ&<3SrVTt(0O)!h2v5MNq=u7?sMdHOVsoQrQfM>eQo_+R3%f%^k{Bd(_VqvbGbmbTR zOfC=I@pU`>P0Il3uvgx|)h<;&+9kG6K(8irG3@#it2Wwi9$bTir3G*}#ios~u#Eh4 z8%YwK7pYW4;_XohqjT#Iqg*^UVE*+5XhLHoo??rZ!uBk*l%Pc zdwbh&*SP*`l?KLZoQmP{UR z<8hwPm^jH|=tK%DWdyRyVdcOsH$`yNiY_NRKcCeEtndM>4o6u<#wwUz`lDol-L#Pb z=%Rcz81|_jhk{m&sXr62F2|QBP}Dc5Ukr22dv9E6wR<QQ!;t~eMs@m=Q-x{ownxPle~(Mx zh)N(1Y#&ka#uJZA;Gb6lFWvPipTKD1BQQ-Jv}ivxIT`|hOD#Fd`ctM_S)TZ%n=Lb^ z&?8wzD-ZyY+88a^aA}AMY78JELUfcSSyz8^gDskwYZsj|!4B=;PBdPTow^wMhBP8t z^X57LDB1)@2rfX6+GboaL`36;pZ&-Z#-(}GxA-i(h=>#iW*7i8n~lRnAE7AMy6W8k zAK4C!`s7KIG)6dzc-C;^oVR_Bw3*~-K*CF#2Wn!bPfYok+;N^!k0w@%(9{G^GC%-a z1IMN!0F^zyAFeq>6zn$7u1_AB#GQKPyP2?!f-1wdb`N}n_U-4xw^?!{je(&hQ^K^J9FZZ$phhx6S}kg*^i zM161nxfL$FLXTi$P5@fKH%-)a43Vyipn;bn2lZJI_9~$!tpeBtej_F7jRd$(5$`_v zl*}|KRzln$1-R1X{!LQxg-sZF+0>*Vd1?rA*F^^{mbYJd9YAt~(gfvE88lrNor-kL zWk89=z1p?%NA`^y?zV=WL_2TUJgY3Pu)X{C+j^Ft=ixu8LHA|@+EkPRUgi_I2Q5zw zUS;vj4-OK&OPSl-I*HU4osFVugNeU@ffOQf1vF^FFF;<^8b0B&WL^JkXRHzeAlYpdERLhKm$#P(OC1ZV;K2)Cgp+BpW|LH{a=0`|Rc9nLHNp3;kQ=u(Vd0N|h zSSgZeoy|2ifE~QxDn&ZKw&Mrao=v|3jTBC=nCu7kCmDyCR>9=u=U8H9KF&kjxabCA zw@n-OQQu|@7tNw{DR$5AAF;}b6W~2R8Ss$$7ZLz?n*lA1_937*pwiF;t?f)z1t4Z3jwI(h)kmF6PBJ#dPn}a z?7S1aPgmPfZ~&hAm*uVwC0Mqv+EV$j5y)RU9c(5`oys1{<{%GCirQyBf4h9zl#`m| zC)r4rNT)Bhs?xmAumagh6n%RBynxCH)^1O#l$ z-l*Tz@$&p*X9D@h`H09Qx(ut7XHmK|Tv3&EN`M zC5xcf=so(ZL$-F<<{o!k@HPH7E`hiNj;I9U!1fUpZ#?n11pav?u<=fSZ94fOMK`gO zU2_mER2?GfdG6^~S@rg3t+i^iF>khPCOmx+z~Iy@xLqe%`Gk4Qmsl#?(rBG*0H%$> zB7lQoHdPzw?}9F(m9(Ok!}9g&ZkCj1m)psUOASIzmKGn)z=4SsligsNtl@&-=0s~h zbijUk=M8q!g1HXnDMC!kfm_?!t+SW(0YMdA?AKP;+q@-5vEdV-DwCpAC>O-r3c^T4 zVTnzdbD@_{pmzcUk(&HDx1=UnOiGzjkq1rKfv^N@1K?92m(B6!Yz(W0oyv1js|)To^hjpBd^-f&wuloE2$Gns7N)Zw(xuSI5K_FG8av=aW%bZ7?63I~5i0e*Zw?KGB ze~P<&m?hNGhEcsBchotN?K~?ZE!PG*cEU?tWrMwkeS+N3&}|!cH`=;QRYc4Ikf1B+ z&?5+#6UZZqkID%XZTX_H5`&#{NdO)N@(ARj{7e1`cm{4Mg>Cj2Q-JG4rhMT0ZySl@G^~f`uqQU!}i8Ds@9x41L zkSQHXg)n^e!`$ytZ#;j9x)AL{TUWOgK(i7C+>!2yt6g>7NBc?xzzGwT_|30=&|+=- zZ2H+>@G`v-f7bos$K;hK@kh0j%&GuUd`P_n@~B-YvRS_=QrFvOd-clza!k}X(n<0B zD;;T|gBk#Sl&QY1-Y)sX_g~kd)Nin#ZOIPcWKk;rsG7PSaKP#Su1@wMOLs#hMr zzUixW;dv{Z?t{Q@FN^XmoA&4*pR*Zrj<%Y{cDwEFC)@!g&iH}mYMYvN$Xa_-tvU$@ z(EyRcb%*5QRPliVH}yQ-J*&DresA% z3i~gJ%0QsAoCJw%To)P^016cc_XtRH&`z6G_VsMB;n%);y(JMWBz0QfyNqcYtp%L+U?y!5Yt z&8d$Xwjo6qHEIfzl{&RY7@XV>7Y8+)Y?H>I5=s` z<5{twXSLMsLH`9xM{L>I?|x+^{|(=+zv4t|Y3>0S7Ih0YhA4R1Tk)hP5k;H;Sj?8N z$PZc|bZP(oS})M_=@YS+gZ9`n&s$MZksZ76C@-oU59({jR6y%ReaSY*=o>}T;b69$ z4_?@9;9uOMXYy2lYUdR z%0B8^*&p>=YNEp0wJOCorX;#vV>CHNM0X{dEE_0ijK(Ta`f6D(wPlTAR0kR}@W=H{ z-%mO_ptCuWl~A=~FL@yU$^vawJ%GN*nT2)avln)x%=Gbml5T4pqjyQe<%CWJ_JdL> zG0GyRruI=$KO$+#X{yzcvlmP|l0}NA9hbmAtps*&e980f;}9Q<$TVOZIMfe$o8vs) z7C$h?P#^6rSWPalAO6PAx>n;1U(rH)Rp5}$sZDE)t$(y|Km5Gz(Tm@dmTVjk;aXiq zJ0D4S^!GV&U>mu_kGKSmyaeLF_K}xy{0VUh{8LI`<8MCgAgWU#L;)Kz^DF@hi>W8P z!*;E{iwIICE{qt$k!~WwnmkIXo5)NpyX5W)Qgx7??x^T~}v^ zHMMkD)q#UdMxhDt@rt6n2JPbao^M@5X!S86)^J0-oT*Ia6+Ku!>wEweEvwaZL%^HD zkwzv=OuQ8#ILO38z-A2ki6A1Gsg)v!G=U?N0#bU2{B5k>Ywa!dK3`W9+KSUIbl^Z` zNKhm+pv%tnHxaSLcgihbp}2A`5!lb`XC?hQMq1C3>Z<$#KT4DtV}x4%mYZ5fdkLUo6%v!lxrSgnu_i205WypPQxAF!D- zrdSU?i9_`bIDi16iPBY@Nd%~r&Ixc5{XixFi)d#w7}sWLz3|72Q&EC8r3=t>-X zByu>Sjnp*grat6c5p98@zZDhSLDcOX_dmok{2?N^OC*^$x57m?L`sFI3I=;-3k|XUk^njT^gh&J_2e;0ccUcvA`QS$^_yDlQXqQ zwZkFoR0J;fyj(ctv4q_1@=gFyEL{FRz(wh4uu*CP?Q?MNV|LN`ryz2rlVC4xtF{+5 zY__xCyuz;g{H=D>G4pMF`8Zqo_$ufWLI8ysI8%08TF*f?@;lnL)%031ZR5qQ`}HKJ z?Gcbe^$;lu_=nA*=15!o#{lgD_0i*Wfa?_GC+DK{bU+RmbX}bEi8!rfLq(sX{Co5- z`IFu84e10E@mW+P$=LS!GsfD7t~dt@Bst^|87pL@KmUHscaW=isdc^DDnKRmAbONR zo*RRnDkpuwF1hF|%P&j=%uQtDPTCvSrqJIcrR3V=Jf6=zqGBmK;r~&NzkF^elWhK?*}iF}Xo!wK`<;I^z5jl}CoVXO)i0!poe`au?7gU+GTD?j5vNPP>M25f z$len{AAy!bCG3X=s@uq@LgF7|)P@4Gf{lX|*CFL3PfRe&^y+JyRVcY2Mq5!qE;uKfzJo8C7yfwa%6H*|s^g>R*A-Mhrc|O+1&#yR(l}bnW z*uP~LdEt4|WZs7r!Y=zL&l+}Lg$jvIjP|7Qqx96_Z}d8_RqEp;E83il1;0y1-i};Y zl#{4`*;BPQQR&S)X1d=K{}Y!$Tmr9@z@AMTNBV@IUAY6?`y#c?;H7gKKzJX;?Ju{r zkPxmNF^hd%pv+O68Mb7i|-3$D@k(ffjXg7c#(_}GyjUcQDO<}8j!Y>&z}{vMaW zk(59j*glfdji()#!0Rl58*aJ<00!`nNn$dSbork{fNqH_-A)-Au#;zGT2Vn39jX>u z)3N$%*iy%w?B6xm4x-95nWV$l;F_xhG-R=i{7;W3Z~r7wTbisY>W;`R2HbO(USRvT zJVo?1%cKPmGs)evcaKM!BqTr!!jj=A6UYc4gg`n0ky?4|gMT0JYYGck1(21@Z<+{~ zR31nELw54fSw4suf^Rg$#FNnkcrCfq+=u+d0C<1->qqSQ7q{7|Crsn{nZ7}k0KqPR zz|Jlr!2$jh_1F(bb!S_bUG%Pt$VWdLO~TboB%G)%vRM|Ll9Fyyk9nu(PZN0oDw>B4 zi7JK3y*AVnC?nJ>67C_5H0xDCm|;og0>pfR4oc1ZIPSlNsuV-*jGw24>kg(><@6 zE)4^22S6*J1pfqLsUDs8miu(O{wzD~E#yHu1J9_qwE+-x_yEsGV!AsRyp0a%h868N z0-o{^uV3*)E8qT1PvOu1jOJCp2)NUsb0MDDit{5czl8D)*{;n`vqXJ@%gFTz8HtNj z+99Wbo~`>RMj!?zxYAo6Y?7vAF*<0}b^0zU5>bal`+KOfe$oTb=UK6?Ll$j%o~nbZ z7+`(?;P93^p0wWrNTfkkp!>h`*~^$rH`@7Ee0@Ydfu1^8qdEz4<~pf7Q7XFvU^^em z1x1ky%t;~|GS*gW)5j8h2{-G~H@(mPc<)baFsstWmXEcX+FB?^#2+lsCrTu5zf-Uj z`!Ep#B5 zU%+(u&b$7}afYrsI6k7oQQD>M_v?VSdpaAix2dcwDBwC*0;wC6C9)Z`1M;Bn@=t~P zBX-)k%h5gZq67fIwd!K^OS|l%w*YLDvigb*tXQb-Q2792aK7{q0XepS)ho%lwqxf$ zC{jACT6SP%|D^6^0EAl>4 z_mYb;=xIlH>9VPP7##EM%1pvPKNLM(zCV zSAK>k17i0KKz%c83y+>=rG;5+B*})nV-JHO1(MC|-VF5+l9}XkuSf`ban`~n; zs7G{9*(_WBVe;~qkCz^}o;)kBNu;~{GiP` zO%AfZe(YX+ZOmt44rsWUAI7(A0HGDuPn^i+iPm()S3? znJrH|#D>AitR5?7%+Nt!-)3c{tHK<&&wQHP_B3SFc26^#F=Kbnwipz7FxB8@|&5UM|%!Uwi)vbBHs zgPnTv0;ib?Gw#X&)Xs)JCNoD)BiUE=^YkI;jO!4#HcX%14sFhH=f3rbSHSMgJG~t8 zO=>))v8jRr+`%s$m9ElH=y2zo1c05;{vFzAlU#Gfi0$M)Zb9b+_6yL)am0AnorKt| zC~YuI8$0&6*-9>c#3c}yz<-l~BDU2p=%ulN-bW*~{k!Imnuq?$?@B^8OXqmgfB*h+ zzcSjt@a}UU`UrvB>Vw?5%Jt&mR>RdjK8~RQE6B;6y5vK&n;O%O(lKh>u;}Ddxi)^p zB@mauk(NLl*gnz{j;9}&!0RZ1?|=Umix7=kncQOI%5xa_Gk^xv(57FCZpi{1O9JqA z5W!~+JemTh;oRaulM;VJV^bPnf!0C$0ZqN31J<_J-TXI%Uxz$(NuGC>~PzuQ_5Y$QT7 z0uYmCW##4E3lPeNPX)Po;5ux4m`pS+^6Y1`sUdBMn3hbmdK=GcX0t3D01Tuu0Nf`| znPVZqxxU^uqL7-{C^NxohFTdQ31~q8%zb==I3_b1A`Jt+YjZ>@$w4H6Xc2g8Q{Wue zJ)?CtHAj)745hSm9N8uUbZPU-m?lX|M|Fw3#P2zbLqdbP>1TbFa-H@ zE)A!G2Aqd>tg}2o2TcMMu^k3Ht7>}@Fl`~!DOuD{*n!*n#u_UrngCb@N9#!9!8?OU z@-zr|;saRfUI0nJ!>P{p&>t3*MR~@<6pfk~)-@4E4d}xbZ z{l0&rtV4Fj`5*T5G+=cn5YJT9HU;JhqcW42LiUj>-|Oj%YgV9hxEG*01IJP>KwdAr z!fe+T5-iVcj|1|@H}mvh z+I}j!dD=0Pa9V`{#Q-O$2cIMf=;cu4svJeizvyIws6@RBA>I2vrhX~j>R3Mw$GjgM)SSsy4q$qc6QMd~*j%UB`+RxBd zd649#bg}WZ+jm>VboMW(X|nI#_Ium8x0$jISPyzLW$FZ5ziypPo=}Mc?hISBa0#ng zu;C569NZjAF0?G7%q`MtU-|AG=}E61UER9jhor!Ph;?W$0(~4#%(D8%kmZusSeQ+Jahg^vKEsBa)>`r6tNq%) z_GjJ7UwXS$w7vSwFwW7WWVqQ$PY+2=FI2ViupwJ|)+K)}Isg6Z??2U=pycT6>a)oc z3t36T28r1HcAUF4P;}(uNK3_LN$!W?_>WQ#N%Vn51)28wuYApQSoTqN)t%D_DbQxs zZ{R@G-U-TYa1F>f`SX=FNY!PY5I8j?-*0^TC)Uh!)Rzl5_VEezq3Gun)jz4X(l2X8 ziX2CGeD!R~p{oTfsE%k)j-A0VpgS1 zoW2AQKVmOD_N)zO&9Mwt%p|d~WqMvE;}oc&Xj^GR9afM*c|1+&xK>W^6tyRP=9!!J zCD{7iWp>d;v!DvYk+|_5yYW|RY$N>ft6tdRa{pKVv+c#rlDx}B&U%kf=G{jg)P;0_ z3L9x}{7xNvrO9I8tdvkCS1#l`Z(Oc3ib}58x7(VxJa1W%4%$PKWwAm>m-P4aSWj=K z+yBy%a?6FzD-kDI3Qj9gpDS`))>o?mCSLGce zrMI`khM``|Z*H)(Y*8<1#;2YiRojyW+sqDJ?4;|ho z>95*z&;mz(=Nn@lzmDtW>`f-vK({f96*K_oJ?vlkL=i_g2 z3B)CEBqb0BwvVKA<7vkw@H$Ii<8MFbW9rnjH0Re%&rI{Bnwt0p%cFv55Dk*u%S;EZ zjmlDrkM+)85z?9{YGcNr^QR-$Ab?3mVP)AXa(nL$07*V^VdB7z7friNP993>(w0tdAKS4A&Ji0o6Oxw`*k2K9<+ zE*cM?D8JvwWIX2pX8_fNs`X zZuB{*HZ9a(PXL7O7(E1<`NT?NG$vRY__;nY*z+g_O;*t_kHY0Q<>yZi2N^~Ni@Ns} zhaRL1@x%c*(38_yO~Chq+tyg=geifJl6Dwizdzh%Ic#R8NI&Jhu62ivDPt3}p>)e) z$+$oi9eNL5!C+N-H7uRe#9E*AJhI_S2>IZ0gw8;xQ)o9ac;0&&w^)DEcH{+^2FSLh zn#gD5dD?{^0?bf*&}3ZM=CeOl8p5TLkrE=^JAd#6=yvk_*B+=sq|dWvPO?ek%hyhi-Ew$F&w1x7*sovg;1E|c}>*}mw`!={x&xY@|*^WBrf;TFA z|KoEO%$twp03E?(2``f~$tN(h9RbJ*MA#*U2JPJA=D6&o4+5~Io57NM^iTT08G#ob zsmOQ!qdRK%0Zp6skB~e2&{4H501>q$rK#t;zPbC4_Q0l+!#LnVpf@3GfGCUv zBZrCN3jur&(auFb)7N&`uKMg(y?lDE^R<%V!3`Vi;Vq4J_k&MT2iRh@8J?G!#HNne zr9Ps9ld+vg&8Z|$L^?xPl-l27T`6O%2XJ4j8uHoHbK&G*^Ia{u@9GHykngh1TXtgG zp#cc7nI8g(;gIpjB()VUx4o6b4iuXe}%JfnK=3x2)QlsqLFxeA7mR}uhsGBVRfyRv0+9;yu` zQPv3+r7puh>`7r#o3}*?y=3`ETz-G)PmGm5LBG<+MuOOO@-Rp=DGCBzMF&D3wCzva zZ<)!%R&ng9P*9ave)*)ol-vY*B29HJ;8)ZMl8LU7uIpXS z7zYs0z1x5J2iv{x0Cf;J--Gc-Ky4x_vLM0^aFBb$=)*6+f^&d6OT->4GEUZ3hnz++ zoX5%NjiRmPFmfGG)Y?9|KDxD{`ToO z6LD4>pf($GYSq*fHusgYN%>H}s+9@SnJ5l5wP|rat3PnZj5kt#Ll=L)@_BoF&0br% z?*9hI_7Us)%X$HlfcH&{KTDF8ADxq9}A%e zC;M1w*Gy2sp^Z;k2D~!a2^oy_7&GQ)*wo2WEL|QyMQM}195RFCL3>`PuH^L8cqAhy z)8<@??*7T~jS!%%&5dO*WP@bFgE9Q5h&aLF z^B!G5I{pv4A^_XRB=iGr*XSEHR#CqYfZ>=w?(y_S-u<)8qJQ^Uga1BnX`G701s(qr zm%tI0KpfaU!UB#bAD6)EDS?J{HvYqC1q|7mv zm^+<`BONevPJu1VLwWtz1Xh#L;>wCwWL3bnHbWDD>Py&(9v(k~h{~)qmLnI~&Vw)5 zwslX~P)92OUzr2CzFeOS5Od-jf#UiVrH2L_5e64SEQ426bu>!em;?xPDS*>elY%?% zeVE9rV1l5{_m&;M$kx5I#li97!W6sof<;ycmBlbX$U%0)X8uVznH&(YE#6j2KgJ{4Mdi=~K+z4ofxz`aqOIR`-gzF4VToB^x50pHl_#7HU_i9dmglT`<1-ct2T^LG{m5WKmBI#B z+Q2L$JHz=vvr1C{l`@!64G4xtCV*7Tasd^_S*laXDhyb!2&FL87AYCo zc65cJ+?k{?nb(9-ptPc?HK}y^82|;1-V{-#iIJQLQ%?Dc*Q*9xJ})1s<ZUV zGI6i;$_IB0-ZUH3#8KD!K#p@fncqoE&*gf3)iruKn8CFg`1v74)4{uFViZNGf#SaH z&sf>SnSM{0^8#oGV@xEe_b~DiXp+PL6*Pp+NGGDZOlFygjz(yYMx9IVJ)+w4to%}T z&rF7Qco(X6qaNNsc>Hy;3wf%U4jyNO(HU{gLeIYVDi zLzi(1bPEMUqoo&?S|=RAL&&HZDxKQuI-53mniUsk*aMHPweb}bZH>azDF6UK07*na zRO!-%$PrFv+EHp6vQ80cK0HK;xc@*{L z&KG`r^JRZqr0J*(M&GxuzTZlwp5*-C0=wGai$An$J^Be?+!|wJPq?OuP@_7r3|Js| zndhAQw$a=FF7HRpT|m7`XQbme*@&pud9tEwuaf151_Tx9$quZ(g z!O0K0sd6dKI1w;UWs$B7;Y7S}#Y8)L-a_kXIAl|2&$Rr~>6Tqspd@H3Y{=VMO?uLM zoOw9LbSU~)Z81i>=wl^J{ow{8!)tL)5se7YBhLDo!}XjyjB_uW4gX~GiK^v3H6I?2 zEF>FE4%N`4mzpq+fMUqZ9F@#tsiI7!QzWl@)7igE!;X@j)+a0rg=WpDSgyxO1oJ=^{ItbAxYNKcX41H)_- zOWCObcfQ6Voq))RR&(%xwQPCCK_SJXVGdr5C2oO-KSg_vc!H{(m$sa%vuKAW-&9FPoj z&U*+x%O*>+1&j;u)_Juh*=D5|^ec(>$m7r13omWO-T^1GVn<-2>M2va?u-rOpix>8 zfJ3Rn&0jdh>(l8ixK6D}&mm!eMphG4F0+KxBAhRxmK1>qnhDC-&zc&1GMA9BKF1zSwr?Jx;w7T}6`&F7*>4m3+kGWn=RnM|S(%m&ollfKSSGMaCYv#! z+CV445!x3c0a>sGHu}dnC^3_9Gfr!QCA!#TeSmS_sUQE&%Vmmxh)Wr}UvkX?~(}!AmxWA2CAO7r^@y86;jB`^rJ8Z^0agCa!*maf;?|?v(ej z33ftr9R4W)Lh}jr6C;Mr-*}pQy4V3md-SbJ)cZ;L&12tmk}i%PaS6mFaD*ig2eyx} zfaA%>CGdJmpz*n%J71qKwzc;obOtXy(6H1V*a?L|JAsBoIRc~~ck)H>ts)O5CQm+iACv1U zsw-wE9H;1B0RvPFuLPEBM~Ma#SE(YjMf0I;2eXa0LmTR&{^is{#vuW2 z0Wgl{A!GzS1dR}Pqf(^B^En;Gh$|{v4UexJY`RFK;UKy;1f`JTk|RVL%8rUtJ+Nz} zgT4L0wI3x}fFf+D(iO;f&BwlNPi(G1C!ht95vR`C5|h3{AURGEz;`+6mdz=#*_F9A zYyJZ4U9M$k=X<*gBB-&O05#IyK{JA^a!wsFbzpevX3^wz_DcBAlv-s2pv2ofkHwoY?M1OMg^;g2jAJ7p9B-Mtu z5%z8nU_0Djm}vM2h4mRA5t2x`Yc} z{ixpqpDtCB4X3o8k_36h*(|q=K-bnwUm1}Jq)HsCzlUfZU${7I%a+Jt! z4gxObqk96jGm-JPu3-h2Y&ka6pMp+kydZ}{_Vjbv#JZgG*jV}tEj4!=?ax%7r_Fc= zt+lb)?)>>3meskRHrh=;9)fby-Q|C;g zc%=wE?adZz-01BCBZsaH+r5vhweS4uX}|wp{ZH*PKapm*u4v_jYFiXG4r(JW^<|Qw z>y-40cHjPk{lgZt*4IHvvX`zI8=$sYILUHnp6nD+a&YCP z5)Iv6ZFQSg+P1CrzIs9%Y$xIv$%Z=VW4Ha`6;<%Nn^=)Yefk~<)OVuB3$mv?P|jC9 zSOFEXAHz^6$>}$%WC8YKtgq+vD$im7DoV2V-+0$6$`$_-mq1(sfdsa!UrTp0;$+3nIEals(YyLEjRzuHsmkTP z+Q`Q%SRVbR7vE(Q+}LwXFt*ctLDoMQ>*pmut z z7e8y1!92)^FE{RL@3Ho#29MS`^WU!Ufx*qcddSW`^LQvJ2CaaJ!3#uduX<{YHMR~A zIhkh7Y}8beI!pvsl6~wyF0wWz0HU>s!9}f&6q6I7K>%3L21khiQi{$|`89w^0t|8v zYi1EcfGFMF(5TcfNdZKeJ#RW`W>CHqK>IWRpO8HPElPQ5iM{l~Zd-8DGCOMid`n4a zVXVRuZiG?Q)Mqt)h1PVq)@rM^Lw~~HhDp?v*^6!Ng5~5vlpp}q41_ePNd`>X|J1iQ zr};E66+n%^8i57^+JzbEJYwb%0<{G;AWt^A+P=MpO}OA(?(4Iqi{}u96fDtpu2-&& z!U3L4lo6#R&0$-5=DGHZ-#lXvuG++)Zoocu#kox20J50GdGrAQxl|(ngn6P5>~uos zm~g7T*<8%^Mkl2o9IivIOByC>GqxU&9OegPrt28^@eI*oC~qNjDi|R0yA(**@GR*{ z&Au0G>e0(R9qEf?CxFA{qvxsaI0iNA1B{cLG->wql;4rVBPsxDR3P!+c^;*6egp&5 z;G2e}9Q^TdDS1hveg;{x-w#E~WB2~Rqp{9?`^PUUxmD*Viq|9VXlW=gCBVg;-*pPUPQ)hks!&u6c@0|A@+h!a;-K{ywk#dwBm$g?Mt7&&OY{;&ssXsXIbcYS_Us7y))4JWaO5f!aEcWpS=ECLE&EG-{1V; zW8ThQ*ibL?Vi9y;(Sn&)K6aeznWAC@1au>puAUa(81j9e{N`(X%Ikai?Ai07+eo&V zQ)b%kgLT&0-fByan{VTa3i*E6S{e@{uM|7=*m2N;po9E#yQ-;*>nQD;NsDg?CS9kD zYO}JjicA!E6KsgcePmtguV^AIjdvB*Nvcucp|EvD^h-Cj0wB0UpgX#aJ&O`$C?{Sw zI+M-v^(4|35UvQq_kZe>!PG}x4wa+9YafT9?-RH~?U zr{8smVuge`mkKbGg|2c9*Ndv>>DBA)#1of!q^=6z-P7-pyuIjrD;uBI*B)k{h%7sK z*>TuR9Ht;#fvRiLnqW^pxz=V+lJgHImzfJKi!tgwgE;7;8xYCZRvTcV@;&1w!LUoYgbx2 zZT<8k6lM9q!;iT~3>bW6E2EoZ_BJQqg^V}z*addRyrwbt`gu2Jwp{haEgzw5Bo?O0_M6UMl6 zziiTS`a#AD?1SNb1=s1_TL$kzm8yKoJbKKSC5nwtq+LV-*V}eIL`#Ayi{}MSoMBd; zd`cJp%O5q*-h`~^+mTZ+cA&_7cRS?cP1TlaZA6~f7dcDS_iHRBXMwJeO;Y>ORXQtM zBmk4ObJj1=$9rb+~oE9a_J|>S}jd+L#HJ zT$sw(wU*$Bh&98ztqsDn*;IAPtcf;t+9b=#%l90}kuNUtBxLqdT^{4$kaaZH*(F!K z`xWJie~C*VE`hiN;u45UATELbSpsoj`+t^8{A^qT|Bw=R?)TraBEpt|hwa%7yX=LH z`+d@&=#m7uBl`gw1PcD}i*IM5ruMCOw_lNTRXDvF-fN02sn7 z+B({xRp??am1Ot*@T1ny+U(CDbcb_ff{2$CCQ;;)sAIS>@znQrHwyFc$i1k%_?mk7 zBRi8$WK06=o#a=IXG$ zw0SELkcxy%vBL7%wshIaK6w@(AZ}OXS5e5-Pk)o;)0#+;S0?fTiO8-`k_3n)LqEWb z+hy_4gKO;gMKi3lAe&7r#}L6oo=C&NTdr`>f%JWH&T}JuT}1A}J4)FWpMJL8e)k63 zxP8B681ia5-sWW#`D&I^qE$N?`v6RvG*ZLs-gzfY3j zIl@B}b)kp{)d43xJxzLb#mGAs>YsBHlV9g^m);{pUlJ|AhVKG69E{O#yk7pS=jeX^ zxt1tBUBkIwefMKbR5R`T_gu#}GND7jpWgIOS=VwH z3XTD7=Bm0+roFk#f;67~&G&2%09Y%LtBSDJrlyD5qC_?WfCJDE45wOK?;y*elWpF- zxwh=Y78k^A7fQ~?Vap&C++rdNC zer>X%$N;~c$_BlYG+eJ}LmT%VP?2Brao^9sg;S^)$q^w;nboGGXL53g5{C?r=pRo* z4+jDC&$#IF*ZiE<^M;wTk7BdZBEZfBn=roAR;}BC-7!03@pPLtCLfxO9?xf5RywN_ zXhYO-h=_VcDT*)K0UF9e&GQ0)LrG$Qz0&azc2c&4fMVX^(oYyb-db_M)##DylynbW zQTr3vEn6@QkgE+QrGiOGXseRLPQ++`iGVSfG}q^3Kozvzb?nMdUCZZ0dwS(_*1;-( zy3N~d2kq;b?Kl~5D8@qAPjSKW>{tS;BGgWKfB;#{lZ{PczW|~N+n^AM;&2&&wn-}y z5^3j;{_Y0a8yE3kdah@Bo8fyR_CgLUr>fvLP9Z3W4oWRE@4y_orLJmG)GTILP6iyd+jA9VnKDq54CTg~aRYE%{mm)ETaLnYe2`kqK zNay*o_0qNcj8rz4B+?eScvO}0l7MqC0zY|jj!js8CBMD;v41<85?F?OXJ*L-v;IZg};?|ChgRd*&{W6i@MfLe5X@UG_8w%Ytb2 z1Y3L0wN|z7khL^*uzy9V{p_A?)=Zz34JgOuw8I3Pwng+!0iZ0J8kfi`B4Fj@O~p2b z*&Hu}dhW&vS5Q#Eo&ivlAlvU-P<7D#1+>BxBrX(tyyy`bssoG+<76phyII7LycQw zHQ5Ipu0JvXoEvq9NsIUz*c0iFs)`e8M`}CbGVeTGi&JKwJqsn=*1df;v$EJGRL-<1 z$1bsDr=7-_E#JD>TzT8Gzax@96-RKowGiRm1+B*wAN+tnd-PAs_WL|HK_9PtY0N_a z2)00D8_`h7HmSZt(F|7U;BdrQqD_Fs@2-H33^}dy*f_kEsQ-=*YzcZz|C^%mAbYVy zaqf%iDG5DJOr|Wzq_wljcGkSaXF0%dV6)dtNf{B}$VyHC(XL5uO^rBsvFi$cDChU` z(qgW`?$Zf1cSdbja{-qn+F56vI9jCmdt3r>3B)B3mq1(saS6PN1meK9M zz__Bvv{9PC!ag{%YpWY6Hzg!`TuYlXa)>kq6w-!5nxv$sC?Y&a7m3)vpFf>wD<&Nz ztVxwmt~3$mdvkjSV2YNG0|-MQ5z`C@ut)H>M`ZE*JfeZdl#g{z?lz)r5}+2)1Su5C zWMT#fCqNXCi9NJ>7kggj+PN2A#S&8hedMOeog(4{Bq)mY%+r>uWUuQHu3O`E?z1ye z79jBCGh1!t+O2^3nby z+-*}&{cs?2QgE0A`3q*%p){mm2?$Q0vc8g4@~z1zPxN=rlfIP3Bf=?@ej9xW6d67I zoYaDs<8#{%IbNBye?>qqPs;c$BX71eSy(g2&q?1NSNTV0&(m@2# zhlu)AM8)2)f{6Nf76jiDDJoI~LJ7SjkV+CldhatyW-`6c+ALm^w`RsE!G1!X9j?^z&ugXXk4aK z#_>e0o^v5!4kgL>7kH!uTK-;@;yX{Gg?s~p8@(35#tb1z3S{zobNvr~cq*gTvxZ{&FHp*w?nQZFP$Jv2t>8$QcnAO{lKLp_n7enTFo zUi{%Va;rmj&GczA;4qEam#)3WRxh7t%h&C;^0GV|KDfvV+jhYblLKfk9YJ%pYupQ!e|fdbDBHa<4}mC$Iq2IKX>Syc6IWU1sgoT++{Oq5fDG zjz{$`R4L6pwrzc#ec*ey{?+o|$O$`Md(LI7Ss1o(+9(|(KfyqT_2INuBD|H4eAlY? z)Y+Of)%NTv%`;IOK76=!vy;TPX0r|?0owH-n-DS+;N1bJEQ*pZU;9C57Yc3uyk)j* z>1#M4`fbAD699r+p^<@xg?UM1Mqoew2V|Y+ebWWsup+;;i%ZVE%=8pH<QEzV6|Ea{P%GJ(GMEeK`11xPK?IzD*I^(s>Dp`t_iT=|hgT?24&; z7FRpl%bBwUcq<=+`bKM;92f%W6jnyn}l z9o4J!L^u7@p|9iIL*rfJwy&enV*3^WgfbR*t`U$q?8Hz1>4JCgyWNY3*k)YHv8U$@ z?b6a(sEJLKAOecj_geR~^28aGO!dea7a-lPgoyquJ(8Xx^vtE*ODk8b zwpZsZxBeC~nZO61TTx{n{Njyv*yIT$9qzNGbDp!b-t{(e)F9|r#34$t)(? z9|wkFx#I)A;1Dfn_$pu8@ISv zKlJ`7_RQ`bcGUC};Vn$Jb<0;-sH@Aaz36Cr_Mw;U*rSei8jfs$lHxNjKd8ZXe)|Ha zDG)fB1Q?1hC00@Pw`*^Ochi2qgeAmWAgt@{|G?QDyVVxS&A zW+;Xx1_BX!VWP#=isP=j+3!2!@JU2k^UzP^4mZZ=z!Kp;GDehjzLPTA+ezN!fDVy; zM9Acja4!{OR?#)sE*KAHMY*W}H6i=J`6pWQzP*?eB=rM)qN@C^|9BdBwA~doNM3L`cq@!gf-k zavxt@ZDWr-55o_m2#ZCLeY&F};{<}AbruHuAwGI*7Sq=_8jzKupIST3{^P$NvkT9h zY}3rY$5REVVpum-Gej6%b3L1(|>l4J5L= z@_i>!V@1FTmX(=Ei_|A{oKtiFjs?FB*xkRm9gcVU9Djry9WfjdiuUv?5pch#MgR(r zI`JYa8B_(Zr`}iZ27gtyK8cr8a?_{4_?5oVy_DrqVUBX?i~!ivtGL8fFT6aw>RPU* z4^(4uuB!kU;YsLhdbr1Wp~L8h&s}r|ae(334?F@O*k|wi_v>E2o|;gI+Q)ygK>1(b zO$=^VfhoO_#=y{5bQb_|8X!st!(M#Z8lytwRVUHA$eEz~zxcZ!S$9i~ZCnRc0Fl|7 z8+vTbu1-Lo9Ov4WOd4C;(V1lX$Ytl-MVG!0xglWzDbz2ZuMt?fALJI8#CuiFd6s3g z>(+u0T)yB)$mD^CU$Sjmw^?0dqbI5Cg~F%|F6kC_J>=&XdSX|#^ofN_B)Y%*c;>-7 z=xchK{?%Nd$V)j$#EZ)$0sTm)m64RNRl6(znFf^>+O)F{o^Sr{oBo3r{PgMbPN*7yZ4?JjSirvehgX3+3@`L(KN!TTAIXdn#x7@pd7vYjh$x@Glr69b=LK|6 zq6d<0YYcQjNt2y}6NFQwRRxnywMU;>hEBw7NJlMtjLgdm?S+-Qsf@JOUjjYRoC<)W z#K8eAigZglB>kaosutrx<$?{`%lH4->n6PiQuHyKMD({DV96R-l%ptpcbrjICHxaG zqqV{Hkh0PU6(3Srfm{TD0T8lA2vG_m`niePE$@}CXRIhPT=Y1i){;|9G*eza?MmXF ztCr5kVVmxtH4SkoNUmzA=KlQLY`>R4csW-ze$*ZbOyf8MV7utZzpi`R7yrSd+hgt3 zLA&V{>9yqSbNT_fvNox)ns+n?9)EI^UHkL9`1Q~q%U^ofiu0g6psh))@4-$B`i&@m z$xiK?|M;)$HR!AEdTb@FPGyG|NojFjhPX(H=`yo3t&4<_dAZP-_&h}q;`9=|!uu~d z$*TADzy-Vq5V6w=P`eSsDsa4|d%H7nCa8?cs$82gUE{plwr=&zixbLxwF~y208=#M}k_NH)74+Sw7Hb3`ekNNx3TSGOo>NF)g*)9v3ooq9`0 zgPmS6z`f#$7XYZE7f^_#bB^*(Xl~K2ER9X&?}+lM^(&k8TQj2OpES-F1(fFHZtk1u z5!~!_z-c4h*{?RPgRV-icUiFJAZ7Y>fHBv>?tnb*)n$FV7g|!Z#mAGl@vBa}=Jl)I z$!}_3VUA}E`R=G-kzu}I%pxOJIfgIEkU_AUn%bb{>SR64uudprx^YzL`A49*l4WNy z|7(7f{Pm2K&AOS%?l<*?^tg6klg)qOWour$o##a69Il&s{$=)__kGlw_f%Vdq{&JO z*m;s1>^?y;*}d0p$6Ru7P@Z}cB=G1k=3T$7L4(D z&p!02$6_6E+rDWV(`6VslmaU+Ehq6Kc{G^slW0d6&>Q!i2U(=xEY@zp7|*?X+L~<6 z`p0bIgwvDR?4*T!^^ELN}|lz zNW1iXrq%`yi0mkKBx#;DWQ_>0e#~)0Y<4PKg|zTrG!_^t=U#u?L5=_Y^_TL*%~u#DZZva_jM0Hu54RRQg?I)rG6mk~$()l# zhmy<#zam)0iZXxh5}P?~v<-ny!=tOIXBviqC-1?i5fFdWhkxStJ!fi5e6-^5^T$N zpbmp)*Ou38@5UuwKgmJSJ6X^>><7rNGJmkKtbZBmBWU%6FJ9q7>bQe zt#QgK?_u6=g^!pj^Ie|}t*Effvcql5+9kGZ!Bfj8jU=QC3LuYifnROkrM;kYOe1Z<;k58r#AMTtrrcgCljhnDKgUiLaD z5=#K55=TXevW}5>GDZ~Si6qwlIL3BOt7YyT00OvlyVb$s1-glOr@!qq7Xg7^9 z^h^<|-W|wZ;Gz1(tzt=0Kd6^@5lsdX^zRha{>bbF>~`4efMgzh+swA=d~p-g&>ko^ zQW*=9(G!c_7_j~HeYd-@ugZy5B18l$Gl7+q!f{Mr2xQb&Y&5~E{PL1j_Jwa#1SJMP$H+ojQ{-EN~T?m+lVu!2v$l0amFq4yj0Wm6VN;Oe*c(8=an?p$ESbAXY^XvIQ@r@dCiAZ zn+{MNb4DHiPf?nA5B~xXE6EBh>!)KL!dzWDUZ=z5x6 z@x7nE>Gi5!^}LUxWafMXj+{ca4^H(kTeng(3gMz|w?&Wq+`fI=%g8a=F1zFcd*Ou_ zNUoQ~b#y<^1v(TlU67k+ZGg>rP#ksR>^7p&-k*nKr-gsV(QPjjSd8Jx8_3x@S2q|rEt#sRal^H{snSl^-fVkd!kyGQcD;K)Yy#zD{J6&jy zbdUc8{zn-1;S6NJC0%vPEY!BcbyD)EJmKm$z2DyoH`uf64rT`=g6x8%w**)q5I!Lz z98$~+`=IQo*;DJlbq;+k$3qX1^*y~L?8Z4D=h?6!JTnkQkLc(lb`9`6=6ylm>$ySw zAUdRd)wMWYTL6`ZSw>l*<>r>T10x#>lDa+He1}PXex{Y+$jT-c#ncPGa*&wXcFpIx z$yY(@OMm*I$&a~@dh?gJdfwt3?%4u;lXe&=I)C$)t?a&lB8SAyY3$rdVJ(P~JaohI zbyk{JZbOF*wrp}Sgm}J-1Bz#6l6wXEv0M^6H;_bf)@!S5=+FwvPP3Zrn{X_#ONBrV z`Mfoc@yH5=NiavErcB?=|J0|6oooepS(Jx^4rkJyNV#40@1H!VUHaf4H%-y8`9}$m zJCH=;Z$bhI2_z(tkiffL0tsOI-L3(N_P?zpFmb$qP$FOekA!v#HsKA1E$;z6ONw)> zxs3=v{!1lFWXhx|*0*PaC6^Y#*PITgb*1xLW?=Zq$V~%uRdlG5Ego_DXAhFa-QT*% z!73RuaqbxfROZRwflu*I>%N3m`-=>c&K^Cer`oFJo9yf}XTo!e0m+tR-6IZYq{wp2 zVvJ=H_YmRY9NbjDbm}GGEn!>p@||}3y>qR$QB)@a%LQ%#Y(p&~P*=c8FW`wHAqmX0 z3(uKsBZrk(K3h2jnjFcX>GiP8)XuD~{zFIeA6uGAalBrG+0ce9IlV4O~-56Z$ z827!rDySxhXs=X78WGJsKt1yc%PfV6uXKRw>^!!~6N%K<(@L3w$f-R4D@sujwdriD zF2re&45+`V+U(bVc!DnCEQ}}d4&VHlQ>hc6G=`zBO-e1aw4zCF$jQnRkgv$=J`At4 z%wlW95X@z(cP<8f4#rLlhebZ`c)b%0M>t^F2Sn+Yk!Zytgjrn);oQ`Fbsazt5h;DTDA3A!gR_M zP867`u@WKim>e*2E~uZnN$!-LhZ6(PDgZGC?1A6hPaWWLrHukR1TWUs?IpUo$aOOv zy@*7*(A~IQe(@zX@4iRu_%D5f?{M<$KLq?p2lq5=wbDYs8OfOY3B$DXxV^gmIqQ0D zgN6IL?3Go!?9M0FP$!%~>Id{g?d5Xn42VfRPC0I}U3l@ut`ACrCBRSmMD=)ev)t?B zbHD*v(x1MQ0-f_HImVFm?f3USgRV#5m`%34{9>FzMAo&py8NSTy-uUACF5ste7%ul z{mW3ffI`XP)JI*N7g=@C^n^5T`tOHGfGM78^||_!=yytr0X&Qd{my0pu6-_lIinmR6d$(e|2ZxiF_z-QG<+HvpCzGS6?%dDZP z$;KUiluew;{Egl=*6y)Yi=RL)LULQ98+vJ>|^5R^&hYd-dw)aFz!2R=q@7 zH<4}D=RagxIA{zyoMF?B81HmkLs;`qe$R1s$t9QAmUZiGPwigcwXt@cl42&~q>Fmw zduC<^G+%(iy#UkEu$_7A2&-cK98*5T(v+x_XxcD+oSl~suoz>l>~SDGCi)U+8A4n? z!1IH01jNvXSOf|g^g#1YFq7ig0=(;`?=`n+trh6(IXRGsC`d%P1Y7xhaL@m$q;n}_sz*v zGQceQP09LLS2Uiem+GN8RNskaNA>uV&Tt;(_2bZ1qG>r?qENb|)Xw$mqBxeSPB{3v z=^yUow`c)-m;Hvhh{_>1ADg_@?kbJj2x~}JPXy|-Zg``~vmhr3J0+A;M2S8-$x)Ds z{37fM&^oD|a(X^Z2kh055Bh-eG%YcIY3@{%Gb;$FFW*Jm>75pLEO*Bao4;ZQ&vxuo0H7aToMEN-M?%cqx^I3)KFNG@EQ`dJsjg2^%PTFlrs9$I z$&XzFCphDWIV?ot_OmWN)gF1|SsG)$bK&SChT8CPgRJV5BB>*wDfGR=ahpop26Yc>!%2 zcQ;yg3PuLTv2y|Q(47JJ90q7U^#k8INHBMP^E?L{6tR>Ft%$aUEBX>-07FstGppO% zZCymV3?EibRCbX^eyXehT=Y}DOg9>}RhUmqJZ;=h8w~CV&Yt0m3jWUTj$zU7f!-(h!BO@Yt2NLb&E?C;9RB2*zW6UC0QSl)@(u6 zzswxAA_Eeor_&nlC4NwkWCT!Z2MEvN_YB%5BMVE6Xt=0-`unpm)&PPqm>>G-$K1rC zcxr-ilu8s@!4cF6!-N9mn1~G!83?Tf9c#HnHKh>Y-l>ot07(ImieOd(B5`>4kU%TV zNN~pWqNe%64YREgY7`k=$wUrUWOdlsVn7_g_FiO>0XSQ|z1o)S$wh1c00NlAGl@)q zvG^=93Pmu7obM3?%bvUc8>*?xQAXf3U;DMyH4B_dvJaj!!tR;9i9VtX^a0{g`j(z> zpdZ^4eaT2qwjX`{a_fa&WAw=%q6D+;ugt49oyV@GrZrlR!Mx@~Yi5mfNQUM@` zjX#|Fn%#fjQ(bfNFPAoeCzS0A3E2r`uS~k`}KFYTs*m% zgo8}9CElZ-vDZ7hTiu-2&ISQ~`9yj>ch60>dryPieD}*3(rF$*D?@*9d8rkV>@J;q zNCs;6u!;hkdh|?M9>`zq*4AxqgX{#vaOgzzO8~l@KRQQp)y|3lbksHJ_Q!{w^C&ri z5JHKxm0$8BgxkXCnK<_8MSJR%Wh(VuaV&Y_=jh&m?W}3EAKmsOV-Im6AE_npEUmfT z2BR6TJ%p@`4z|$U>hLY6R`qjXJnET4A9&| zpS8>bpRmUKkGT>j+mW)*2#QF`OFV(&`Rv9Z}!Buajxk`CmZV;ZN;h$)<|Sx z8qvs|N*)QPzBsrA1ql$^I`0>>gNSt68DX3!lc-laMl@G&q63y1H$LppP3_L;7Z(xd z44_*BY%ReR%KT1SeJ}&3qkCZ@pe1Vo|8lT(5$W5*yv@XAFTC`c-TbTj_}oVXau;Q4 z1;87sFCM%nNYL#>CMAiDv!J32b82|A(-#(X+pS|??Q zrbF{+QBJ>29#w2rl&Q3L8O*84@NcKH9#oz4d!2EY_0NfIn~jskw$Y$z%m$5}oY-k> z+qUhbv2EKn-aIqc%v|rkaR0FH?_O)|&r-2j|IJXIP_R4bVsvp;PL|H~d?96KVO=Az zK=mVEXUw(37Mc!BW%1q-b}0ZJpG5f=JWuaf*)U)`Re7G>o;u`lv*(B79=}NqBl>Wf zpD3sI&ieoW`XEEO8Mrncr?a8ST zGIHmxT*Og$_eUh>NdCFoe9kqN;kppXThzymhJ_A-4`2UFxf5%pQ#LRaz9?6wp=a>@ z(^rz|iSX+1Ax&U^l6=KC@MOwnYIs_5`0U|$;J8eZszM?6X$G%UAmYYYnN=7p7=swA z?V{da?mO$-8Kj~!vqCVOOgt#lG)+Ji#Rv2_R8sK9xPlp%@Q<-p4%mfwg#F$-0Tyw- zq5DVRVAs*UO#30l-&yW%p5ZRdd_~FoL__k{?O)L zdqon$M?AA}Z{ zyoR)g_p4K8Jr1*a%B0ZE^Qw^yoYw>~T>tA^eVJ8B(j-93u2CMo>9wXN2OMOh@ps2TCj3=-2qK1+%>8db3z!I;#=y%fjIaXKU@)NA6)nMcw zd=9QbMn1$!Yb0EIbyDlKj?Ywoa$Tgq`5g3KAqGYWH~r^6NXRd9;xQRWk@}8scZn&} z_0M+DN)S@c_d?G~%ChgV<@QoEfnni5+(a*0p-3UA(0n=Zs-G~FDAN(B4EjpLkNt+~ zsb%MrDL6RkQGfFsnnMtYIOYxj*>_;t4UJC1vx%N1^_mFhkT{eJF%9VhH=(rl$)P+(4&6>Rx9nQFB7gSKt&os=A>&-)K|-kyoA zV3pN~s@Vo;tM#*R$VCDGUPbVL5QMQAUc~3P>xiWkw_x&dc5|t8{xWs zghnWy3aY*F4}3b>QvT5+ibOU-OA^s*WPAXUEU-Pw#jit9pk{3#VR5N3P^LQ&qp_iB z$&U5g((WWvkF-!giSy5|()yZ?NB+3;HRY)4-^xsiN!-;LYMTGr0L`<`GeGFpj-3|? zgP52yq(##AZT{LT0m%t3fqi$!j6LLLx@>9bnbuW^9U#4O*V~Rgc|@GkSqe1CUxcr~ zEjC#H=>gs~IMo-U|HqmzST+;0(W_wvkrO;oU!y*Pt9dm2A=05q6m6Y5XJsvPm#YGx zky=Kx%YjM!uUVJ}{%XwZ5+gq{O z(`wDxu^xclG)RoVdh1);er1boG`Kp8Ahn(lm4jk%aQEnj;QS7k0z`W-oM;+ssf!CbvwoXfxg*w&}`Ig%l*}HEZll7aMGQG(I+qn=WyG~1ExBzS6vz9hkJZ6OE_i-40<{wyFr=UzB>!M_#!@lfuW`QCWZ+LBFU?kig{H!D% zdg|;%*BLc5U7cixxhrPeF|Sw9#(;A;bD(zGvG#FQdZs2cL}(fDT29b~xEoKf4<;s- zr>K0C^??0)rxl0W+^^A5`~!#j1^6PLEsx`bZmV;-TeX4eOyc6`Bqrln)-pXI#`_TG zc`b=mnSno{DjkOopWfnbL2qt%^?5N-3q}!?-1#xE_KyJY&PQ=HZu9lo*2G@Cg+Y(} z>%=tN#^E4^dJ#uO2d(|;ZPm7-g+#{mK~rdo!&J5fV#*DyhSY9{>zg28d7G4Z+9<1n zY4+;LvL~0(5svIAZ6Uz|^FWUE_)sIOz|*`x1z`o{FqH)(|B<0;oNqcg=>c!WqP@KU zJv_qyumE5&HdX2pO#VWZm6`QCtHJ+UT7Foz`B-u0fAZ+ZA>LXZjms8gFUt4^*4ELH zyFSx8na&u>XDq;V2ytMUi7xhcu{O%C)-cIxwX^TU*3!b--q+MCmLMi)QfiJeJBzh= z_~*Hn9@l1|$ZkSHf_Z|npo?PLM8fIiEG(?`Yotw?-ZR*BW(4Bwv3? zNa;%$c2PfgXjy;e7Xuo#>Z*<;0%CwE-+z^#vR7bwTVSHjnc-lo*rNaG&1rG-jF{&H z*xx8J@jw9?1wS7O+QG>EY~qHZdrBNoGyZHtKwg4ukw(RGwQh`VhPA1ktTQUJjNAvA zqF*S?31(TZJ$Yi<2J|!{Qr#^pw1yc&6Mp5YxkG)8dA^!xKt_GFUhJU*DB43v0L%Rj zk<1ka7|aZ__d^7Z6{%L6Tip9r(&Wlwr+oqCZ&Qg~orfV};-=IS9gf{0+(wK}(`hD0 zM^3vH`z8*wS1Tn3+EXh;1MyZhO&Wcyf~^=m?g_r&s1Za1v2fn#JCMRZtJf7YUR z(oY9J0r+6WySi>kAa3rrU_hmmxAHNzc-x~Rn}+herjUVO6`-kY#XMLbKv$fN?I<(% z34$ODD+d`DE`7B*&gyKaN0?^>%ceTI1(b_iXU9P;jUn=z3QT(DWwj!wbJFXLxXVdY zW2B(seMUWsOpUN%R!Cei@|4cFbO>IaKdE?tF_$j~1YkP{+73^QWHU);LmkCO0GSMV zAoCuF$R(DhsWJ@QOD)RLNseRTCqS7qjT4{|3Rt4YBM9{fnEJRHB7|u6|47^s?0~OHypOM}= zA9qYK><-`JLVT96ZjjdQ2cH`aJPCQk?^v?S$X4d7PI!r#?&?5L0l@KOC(jS*uj_}r z3!g~Z8g;a_uUSf9o?r{3hB(|rIt45UnIUIQ8=5C7TW_26m9Vxf@;++iq(9BDz?yPi zOJgQ`fu`KRR1|!zSq3c}5B{|N{s>3>5Qih`&BONafXqLqCFB%7q)QnvI}r_Zq)+%R zdwkjMDhu?8G?qlXB}aGy#57fkpm20(XnS`%_tUCR@qI8vp|=9(?uUEn3mz`Ql}?xigHW~*7t?($_2(dJ!!!bbca1a8~9oeRPfr& z3eXBC(wKmO_-*JqF;#FPbTF$=^Dk0u=fB~35`Cq}A5P(d*iW?B?Ma$#b;(+09V$gM zJ8Ih3>)}o5V+0HIzB_KJWMEpmsjiI*@_Gx-`)pI%73;7snWHZAZvGeFUGBoCXE)AT zSnn*+#IbT()#woiXPR%jezaV^8IJ7X@=;AqfE$YL%t`)0d1ERHNOwm1Ms6{!w2$H& z?g$I#17)*f#CzS3UQupP%+A5Kj9!;j3kjc8WBA1}PdAB;7ueiS7@TN2-jgviv#w6$ z7@7Vtoi}sFK{pz){6Yx6Fhln}#VOWKQ7+&%I~MG9a`gg%w*J$`k+?1D7yG#JQP6^e zfScaVzx%p7zVhN5oz+KmQPGU~UCXvp-@kx0h6`I*ROa=l3tV5|N(|N+ix?A|HgjeG4$R_jb`(|o6d&sfBjIp<$tgpXVCc` zsCZX9RKBFJ1KMe~f2If7<6^K(!~%lvuNd>JXT;F%uC6}*O?m>za~Hf`?>w|PY_O-( zX+B7Z{j}IKs4s0i4+!vo(HeGc7G}AglVY#kxK94wQluo>FxI~_{1vL81?AJ2PV0Bz zb8WX2{8NkjP*-S;F&xV`ER?M$IQlyKvw0&f{n`12zqr!J?)rt6R(4gdE?E6XFA2k- zq8b{J`0Lbl}|s9wqT4kb8&YW}q^#!fmY;>JS);S_OqBhzLaJ}jg#Ok}Au zoB-G`<8XVUX#5HOi-U7H7uvecMJuCWKE|jFg;82aJ%L7EMqK(HK+V=ywT?`3<$Ee% zPoVVIrJlnHB%Aqi$|l38)A_>f%iWw61W`1Rpulr%vhD=}3^Rlhg6EL@#@}Y^<UZw^;-R=B#gVlEQcKej@+5XznHK8h~Om!Fs#b>)-xLF^eBmGJy zW&l4F84F+`I^J{Ywuc+7sQw;QGi5W6kFD`71n(8{0j^pxpT;K=fU0Es1)EoO0&dYz z5YBtXqx{r=Uf>TkPZEU`)jU)pRyQQ1H?s-~1kmCQE86qpSB~L(-SM^^t9fV39UB8$ z%RCx{rE?&>j@<32%QC!3Hg6}mW(OiO0aHlOFEx_2;+?bR!Ohz3jwKG0=1sni%;-K0 zOyuTC6Uo-(LC6nB?`+Km+f~9)wq&wE8V+o#$SEjFo6~?zRbG^OL_X+48O|zxAIaey z%d+M13uv3PBlLbM;vr5ZsxTO45Oy|yNxZ{0;vlEq#;#gyFpgdjJX1Iw&s?-D7+o@|3zoJ37UMu3YV zUrQg%pd_3jOS%kOr*;4HPRYbK5mdK7X|5MJQBNKRnZ&Env}DlJT`&{h&&6^|xhCSv zBixud(#?fYgQleN(RHY+6;6ZhRDP+f?2|!l9x~+4G1>!bbkfaGrBS~R% zhlbKV>DHr!Q_@fk`UWrQrU$Zay8rcl{PVutbWL*sZH#V6QW95#0Mq3;E|CUWo4)!P zWJl82O5WsajoD2@QlG^x<6 z&LQMVreH_%T#gsztH1Puk1?13UQ-z6-%kzh`y`M&7}1DX;aa6b$#eth3SK zjs(H^z(%sx{toi|EL3R&@ zc?!8E+SQPXvdUGuUMHbe2sa5?#OnOxu&}V}L*B!pe|uMgexAZDTOL!JV3DoRrU+<) zPQp~LD^{6$RYYq`Uos}wBs^{cJhACUWeAL5eH=7Z4^Gn#7!mWVO39=0rD+%4OQg#9 zz&=i+8^V-g5}luI3UbW`swKo8Qij}l> z(foHPK}ZD5hVnm|?xvqM6eqFK-Fh+I778D89G0twi<>WG)5LsFr`yEZDqFHB#%0i| zloz2(@x$UATuE~0;?Eq;6If*y&_}=e(UAAG|Dxt7lyJ1tZgzskT{(8sMY|GSRDi2< zHh_y?;#pk#kj>VDN|7aC@4u2JIAWPAzV$W&tPkZZg7P?0Q*AnMM)~c^@54<6Hsv#5B&qhV%HaC)v(v{|()drk ze&Zq0)9rA=Di*di4abSzJ92_OVW=xDE1oct(;?bQ`Q!@yET+V`56kKQ#9Qr(^Ps6Tl_FMhXwnYWT;DYwUW$8XsDq%M+xA z7~0IGwHFNXWNR6c^2L>MPkYnTxbMF6?!T*T=MOdd<*Pg_U-F6Vbv7!(2Z3`H=|Pg@ zOC1dL#vzSl7s<1nU`SJ%H6vYdrrbjOu`Tf)gw2#w*g7Nre7$AiJ}<4>++Pe$^0XWQ zt*)Zf^%B@6ks)6wr7Z)H~WBdn{2PC=Zrxid;*OO_@Pw<@q$u{HA* zX4pP}vem%ujJkP_5Vp$K!R*yBy00t5Lc!W0XEGjAc-yUIiQhUgySFpJe@NlUqb_2t zkZ(&NohWPz@o!(h=?dW+&y}^g?hB^w)B~dsVT0l)V_^OD$xK?KpDSiv-7`!4>3e9gO@2rsoRDHfUziIuMgC zamURcM|Oqr>}D@9kPU*t*xy&z(5N}DIISn2d37LY zmleIWOTFuVH`xACiuV?hr$o(>%O&ENkV}OHbmlxpR;!1n@LAA<2nnozYbvjW+t`Ah z-A+8qn8^tDv!}r@!AoI5ILwmP8m+;mkKc|aq@>1?0xCb~@wx3v|D80cTRAi$Ru)%0DpAEB!0qhB9^iycN)wiGWAg#nrVM)FhEoq)?kRCbjg z(~k^c@#TdUn6qknf0^)qAz2XmKt1>rIC#{TMC^eaJLjKbaN=mtvM`Ke(5dcS!M9lU z?Mub4tX5NFXe7FG9eYx0ENLBYyeQt*9GR*EfB)P)?KI_lKU<{O`?oxCo19tq%ci$s z*tz>j7^;Yve*o+K0(e-BJ?R;L*q6$7QogQ!ln+83Ld}k$x*s2xX{_?PVWXp_tw+k# z!r;rvFQ-|s!1NC-H$NCeGGEMj`u>9NjrA`B(1QF|3f=fWakj5N2P|wmBtf zx>!U6%)1OCQSgJc5t?c_Nbpk&AWV%L@aqPK-d02?A-AR)EBFJVnKYIzE67Ye%Jusp zJz4O2{XU-W7}C65D^cyOluNVY{!Lc{p7*UQUFI9)0Hi*herWhO&VW(5;OvXCw0^Rx zehlGhMLb2sx|qi0hK`{I#kLG^ioOI7Xq)_Jy+ttu0|7cJ&-1Eo0Fa1DiQ`Wzq}^ZU zY*|ClJwetiobZb!j5<{=cZPLFSz0yzKpK)Yq*=aa2`)DjBS!AtVQ}f}^#eW+qB_2~ z!3Z#sj9))giEeHG0sA%rkdhxKf5s;$m4;!)vl4H|=?80OHo&0FlCMvd>?H&sQ%gdn zju}GJxn{wyi^*F9owA|7TS8P98YPKt@ukCPlSN8xB*rLPN4Rrp@Xw*IVs)850~mkj zI`;yWN;F#9Cz}ErDeyV!M@H57;dbcGkHxjh8w%EiLvSRT35cYv>ESj3n&5M&$@?LP z0C=M^6VlaiwlzGQV+MeNs~AO?-me#OeXhbG*_G>$x^FA(Do&tH!JS6ad&KPukbC(ShYFTBLmW5HThvB11=!Sn(E4b@am4CmAJ z6{0lTgmxF5_=D4DnvI%ZX(7rk!>=zd?0izga{fZ%lAjrSx>lJgaVcaf;B2XRoM=diM*%DZi%n9yT9`v+o=7~Jaxz}djGpEW34rj(_ z$ScgL)6b77J|MQR6sb2Hy5ju^*zz$b9aMB?LgX>AYk4Q0L?EOK{)r8Px66b*&T*{X zarWdq-K`6fO>h666RH50%J=6NnYQ@nEGL$$f%1Jsl!SOcdgf_?ucrmD+~R6~Bpn(1 zW9=rTFaAn_^2H+vUBX@kOs7F= zqCjYx{R87EyQFC$Q`eMBj8sKK)u=90SAt~-X;oFnq^R>yebmr1_}ZP5moiWV$*FoI z&v)K;j2&;2RYBl%y!3&T$@$&?i|3Xq%3ka`zg4(7M^GWV!O?NZ`JrZV9n`tU$$jz0 zyRf=&P&{AmJwc>3XXMJo47&88rD+=ZYM=fGj5Z+lgKujl83fHOXdX|&`plPgxiaoE1F^h?Jh8p2Q%;7AM}tC8wmJ};k#Ri~im-1WX44lzB1zpF4M*7l8PxG?tRWqj~Rn#JI+Mg*eWW0gV7gJDJsNp7#ezyw6_F{|9CYZ0 z8k$)C-#)T&IENhAZQKLaMGp)g-amN~QNJJc^Tt!!rij|TIU-d-y0G6>T~Ck~R7-+E z^)#p~)eS0%2U@t&!?Lmt^nacB{(j}sUGCa|^?7@RLqqbu_Ebg+o%JiKl}3b5b>s_D z{T%bkOrs%{+;T{v>{XRD5f-53s~*Gh&VmJy&GHDV-W(k6YX(UR@^+Ud6KlI@gWUz5*AJ$wo$cgc_5Fo%%-(vdp7ja!$iRnoV%gAn|hk`S1$LqY#8 zwdhnBqp%{QK4W#{b4i9RJgv9cxC;9>X^yG7!9>B1ZW(CA0F4#SV z`U8M`%ZT+MmB|vy9J{`BScQN`7#e(ewB!%`h6hH8E>b8)S6G0pD3Lh2wjWylmom6k zXuwn1fjm$q5E1Ve>1c`@3hBt@u&ZUm1g>=KZ2e}D;d^b53oNnUPJ4w}!co$UZo6d#r?J&JI*1E79p(H=CVel0>rN;VHxjU~w* z(>f$KJ?{Gaw?6zuQ2|K$E(U^olN+ZHAvly$Ugvq;VSxX-gP((xpjw_9v&K)@w1z0r zEGIXB$9E|I8(;`ly)z`5aju;=qzp;=)J5)1qdyz(Lz z{O4UX4cZ-VTFModHPV}@lI|RTJk@(m|B=S=Z@(uin|>nU8m?E?{Rw)hkOyWdvfi@E z)IEdnc3O{!`i)7KbL^QNDb}&Gr~+oJY05AiZYxsq*oG{fvMv8C=LmKJ0Nu#^pyTbw zak2s~1fvVJ0bRQ|zQFhDfUlJ4%lLcaChEdzxh#sn^hW)Y>^x#Qf$iTORt`0p9t z(YaOqPELucVvO;4hri{$eRr$+(2We-a^$*^IOZ4p9cBgB$0=tIp=`Bn9b~O{>RDK` zsJiK!M~8UU22GgVTkPU4nZ_P%+SrC4gsOjWU#C`_YEx!-PY3Vmb)A8K6c3<}c;{03 zq~ZFD5ki}w7;hXdH3t*Z;;E=$)W0wzAE6SluU}5BsUegNA`MOz12picO%I-|lcZ|o zA$scT#)?V|_vJgJQn5Yco~03vPs>JRjMRD@F&=@7CxPvqRoIY`p63+# zXg(}eB*y3+T9zQ_XB_T4oQbR1gano4*y+CX2NKodp7%+;);AE zRUY?k$KEVI^5BC@=72U7V=*}^vm7tLTO4D3_ZU1IZAq1t-f>%!Ztrv*ETnv#74uE& zHX1X;3oG+hvcAo zDDwl%sj(hnpzrEJRV|a}1wIEYjX_&m?m~}8Y7ar~I*VTW1s*6W+{n|{FW?3<0$!H8 z>k!~`_?*r}J?}#IYi-=9N$ifZCC}Am1(q zzkT7S4#9p|9G&6mGoIsJN~!*Iu*H7gT2y4{Dq8T;b9lq^+Cd>9YTv&6sHNs>PJ=tl z*rOmWSQr#Ikmt4&%Uc+1HZNZ`nw94s409>^#C66&T}_p}x9nd+3g~AI5;TNz$TG@5 zq^L-WwqLFpBrrSt3OD>BE7(xL&n9tw8OF9+tXyAMnmcjJY3aizFY$5qYxP?XsNs;M z;v$dZretDTrw!Z###>y-8DqmBX@Mc~5Ybk1o`0L5#`2 zC4bLt49_nUj=2H!7N4mX&x4{-y@bSk6* zwP{zOYX)SKYN4g;3iuveJk+yWo^9^;*PRuSV6wES3hJ(+=4R$6HSSi2^=WP9hb}cB zl%5XR4HY3bj*$TO}AM7JH16POQRcw zn9NWwg?$2XisA3zE|Q7Uncv3HxKe=Q?pv;nUg4w~#Ibb%1pvOUK#G`_yS-c|dUCla zQ>X}>VL!D&wOD&hDY<@VZal_6RzPD*0ntv|_S4<}&)ujIP?a2j;D`nEsc3g(LPJnm{a=0x%x7x$`*^Ox1n z*EmzUE8yo-IN)w`e6u;UtF=fb2J3|1`#JAY#o((O9sA=4u4C*ETbSzauY2rzjpMw| z?jW)D=j!fdt@v#Wo0VAMu~;cqLR77e0>9<991ZT)H(oBc z983|@JBcB5r%(q@Z~ZWEBck<^7QYWw+@FFG>|0Q}Ay4}mFXlc+BEA@Svr6}ZEE4Vk zgQSOmtz%%kqW1yHG~qrGtvK(uUW97YsU%Zpsca&!xmSMJ&3Xqx?;*(d5r0l_u$bt^ z;Fg@KSrJ_O_7d@2SbbR!;J&CA0Abr2ak9wKZ$=l+blendCjPQrgRYU-{?4jzBB z^Y|#>zW|o!i~69z?`~J;_1+UA2vo!=j^`iNFW;kGxh$W)U=6NcndZ~ASh!}7=T6^s zj5gk#hm<|pmP5X`mwT_v|7x}Kj$2xyleJvo) zG5w496vF;~ktP#WeKmSXmU|<(+#>~Z({z$nV->H|8Ghn8h<}<~L#PYuXgY3^u_phA zMY+|kyoCjs4fG_@1&ZMeHoFFI`?9QqEDX*z5f{xqMI5>P*%j$j0EYU%>g|OQM(Sf7 zj(UUHLPFt8xc$mwf3Jc`O=&*v+WI69M9)kNr`<-p)n>D(ip_b6)nJ35z!POPA`{Fj zr=pok9Qfcr^cBz}!&80MjNBXXpSj5<19nt;o?k&E4PpsBY%T~iz@wn-PRQ&3&ic2! z|Cq*~#SrL=R1{YV@!jTJjgSfVH{G7E{0>&zrFfihnBwIybU^xLi1bXCIhR~;S<5T^ z*r{uAT8S6HbYAEb`hKA8rksye=0uJ3tJPDs0UiIrZcsaATGnTX|5#d%G72CWzL^a` z%!NfM^fNXAQLuMpCT&}+vSu#VOm$Lq{$Zd4C---F3PzFa5LCb-$aP)?YTy7-%j!BE zb3wL#EXjuA8gCAcb~TT|DV2PLO+1ipz@tuYSkS>Op6*FE=M3K0OYI z0v=H$goKN=BN{I*hq|CCX@qfV~HPAk2OH;wv5iu2m$ ziuh1O8TA{Vlu~!ECZwE8T=ZZ)JQ6?C3y@O%e$3{~fm7fBbtBYkpjRy12V+RwqhIC< zH@8Bj`(7lM4^hkNE}}>ICp6Be?9~Y$)-} zTllvWRNl*(s42TwVS^E=ZK6ew8yVimj-q0iNWk2JQ)~w~p?2BhJAiy;JbSbCu@Z1} zTyH4vP24pSzvpobdEZ#RE%sY&CEeXy2#q_|AvU?Rgz}HY;(0CZAhFdTu`eri9&)sG zcVIRDQq%CEToHuk-axYtc7wP#WNESE`n^I>BTiNSG^;(zux3db0V3Ene`#(V6e2yC_?v=ZI<>!1IXcc_&qomg+&Kdu3 zV;jj&oh92#f_pdK>eOD$sS#~?5On{fTMU9KFRSR|sM?T=C)~ntWJ3QFE=&*hE5Gar zz7`JDnaw$dj^v2$bmU13klRI00jRJB!Evcb+A)x%OPYa47vI)O2~mI7Vk!D13ps|P zqGIG3?k@80Szam12RFW{sb)Qr?~pY}duBe&3z`eQ0?G*uhB6do(@E$&;p&He9ta&K zJrTj<#-Rw#tz>1&3)ts(upfXi9xMjPN4#Wl`=?RNzsX89oBJH5y3$)v9VY0#tG98*@G{=|1=K&l+@tJPgiQkvO%__j6y6NVV=r)w zHAYO8u}40NK!a~JA%<++9B z@dbE(NHXG4rLGcC7nz3c>T6%i){-3+L;+H)8q(+&8sRhM(xr7Y^aiq?q&IQV7yZt7 zzcq(~Tu@(RTY8caUT`t@bn(*OiVWVSmR~ah*Vvcrx?RD>9Zb zVrRO)1tJz=nze=bW4n3B20ARLO{3|3cMUX0#;o38&YBrq1zB=Q-K43tcRzw{(w=7Lk+NQ2?9?A>=)|j9of&|TZC~%J9iggbRi)$jEsKWOs8Q0& zmPzM&6tV2X8Q{U3K<20d|DSdpr)N3F#eiP+pI-eKyU%=Ze=`XxeuFS}Uanc#+surH zj>`#ID;UQ4y#HLUL7&6LZI3x1dv*^C`@PT+6KtvOayM#c1@9nH#5|?_w^aZ2o>z?r zOjc<+{kMqIPDoMxGMrT>o;m1(;E#kh$uDTby7epeM?e`<9E2y$Z+7VC_;NwOP7Ohr(`?EqYpeM()qOA1-Wlz zsKVcBMSz~7Q{X7;u{w&31QS3mAY2BaEpAc4NQ^mUi18HIlFJ>-{$ysvb{@+!V;sZjP83E*P&aDI zaOivh;bNZWmqqeKEWkm~SDciL_aA1-uf*PP zdq6Q{;6EVdzpgL(Xp=ORjy}`G3%IoZ34K*H&P%Y4H3B6akm#@wU>Y_;i6}~)KGEpf zSY!@gj{kStck1_;IiYqUsFLPXtTlo2?l#@dhANFINlr!E)aH&17oUMx!S*6GwN!- z=2=_{5q+@}p!mXQz98Eq0TKX;lAJ2U$IBna^F65Z5fYtqxQh*m`?`j#N^*0UCkuyVI&P5dMCS}PrC4w%eyW<#VpRs}+1rDh@C z>;bbr+?kL#V<;byZ_^D(Xo}dBhZs=I-DL+zKqaIv0N(&pv*}!KT@) z2Bx=>m7Itrtxd~i*L@DtA(#Wy0crEZ`9?5Jw(@V9c%=jp$^q3NV%U3}Qx&r|Gfsv@ z2-u@+LKLv{f0%R{ko5#Fa+A~2sipPjG$y`i|I9gs@pflLRUVVlQjnaG zhYbt5!7%)T)kk`i$Uo*!I>tH9-$aqM&(v*;64b(>f=_ctD7N=MX6k@Hs>cS|LaW3G zd-=$Hq%-9kAbnDDF5EK4lFyPvLjo$#*O|4zIIS=gLvg1$tcsg9$K9Y((^XfPaA{lc zYK2wJ^U@U7A~MXd5&C;I@^Z5yPr&m5FM<7TNvRl9>>NU40oWX4=`xR2304gsiG@pSJ zJGQ9jK|H@da~?OU?&5fJe{Fo+v;j9}*&jXzQR(5+2j3}>rv>d=s*Y*pqH|YHBI&*0 zRZ%@D2UV_yFSmJ!<_cPJCg5a)kJLP;YGO$-KX)rH$D#}+B1Me=)m)zqZNUe#C8-F@ znBd-bOVfQ79Tl@DXXod6NCf?E8-!_X3W6KFYZ(+p$le>#8>Gp@;d~T>s3- z{{`||{bD+VI<@mOEb@GZ?jk?7iknm#9=pv;b4Ii6PQQ%gjfHa9bX%EA{IO&2CxAhZ zMrPK0AUa)c#%SQED9lf89*F$v56TAm@FIa+i3^FU%|u@XlO#n>E; zzm}LqQ%l@~sZTlRpm9_k6@wh|MqHw|@oVL6(?@RP%1Q&Q{U&K}=viX)bgQKsgZvx8 zaV~!wZFOTpg~p1}1kfV?~eLhpFNsfkEs5e5pQje=|3x z(JAGi5&fHz_MMFV+x-jJX~B6>{U$OEfH4*3sx?pvcLTcK{obPS`z6$Die>~O_}Qy% zVP@k*Vl`arlqfASNbfR&*|8>yae@=e<$0K5^u+9+8y^TG;%)2bQz#M-7$7B`BM3VKoh1i1R+F| z&$hzHT3q}1>I?5X(e7`+eC0LMOUs+^ED8ftXAc#?Zh=?oBIrJ@U#77*@X?ZdooQRMk1*pX>d zBuJO}&U6qXZ_0mS=Dk!RA%TQ%e7aas`x8`=Ke{YJVgB0-*mbWViM&>xL&`zx!1P|! zqO3XGJdabT=kbBz<7;i^-k#1KJU!@3Am6F%Yq8_g4|aTYvsLYXLp*38_!7lI-f3iy zZKV18w*WQ^)hMpC9#l5n+nByt=2t%byDGAx6 zD*f`Hv-n+;8LTxg9}$ZMn_eZ@wFo4gmzpl*fD)Zozti%|9hBw?eh8crP686ij~(lW znh`}DwvT|%IZD4JEE~<~I$6zZeM*8z$+c@%-*{b&27)_D*+@-*~}*~02~5~L;)+JYW0`GBF*ck+wD8)ENt-D`&pRtjx(s__b`b?}4QW3EG)t)&EnFZl7e zV|jSS0@NzS3%`!XKW<&FUm@DOToeRls@AT@-aYv@^cZw@vmU$L(jN3C!;*F#z7+PB z^wT!YG~4c2Dq$q>6E%{-`L->T!A4~e$>r0B(<6YYi#I=5jFE|^hCC|WRP<+~27H?m zUh*cH7)=@Uy!Ubn*FcJ(y{l^24MD#bmJyyok#5ir* zxMEk~!)2{j=d-(3f@Z&GHYZRnsZ5l&!j#b|yK7q7iY7+XUpj_c?C~G$AY;EkW}YZt z+BlAyHF4J)*8)8EctK_=@A6?E0fxw-Cx5m!F&-_aP)i9?*X~rzH%?L1q{#HpurRBa zWt)x4_ID8%QKr1E3NOS+hRws}@Bg2?&Xo8yp+B&lg?1jkCNw{qI`b(NA1s|+cDLS^ z4z3eGIbX2PXHb#CUcS1NtUBSl0`jX;xVSI6Ys<3_$fuBRE^1Ie+AevnXwcM!9%v>L z#ediR@M7X^wED21Oe`J(PJx2Cur?ENJq_LD&|-XmsXi;8t5|ZF{Tm(7-sT6x-=qi0 zCfqiIUdJZ>({PBIr?#yqNX*M~A)R^SqWK&IkQVwz29H@@x#zz#j0pduzj}Dffn`7p z?)LzC;QmU)W@~Rmj#F6U^3CGHYCmd_hM8H-zDZm@8VLz(H@N9k?^IKOR3R4hTm&$6 zz8`BTdE5W2jH$euva7U)Sl2V9Kn*Ur|E^~XEUd=S2N!rdejh5APw-Nbmy}F_W(Cxm zF)1Z?0s|lk5C7zI;hWa&J4ypu*imDtpyttSPRml!vu)_&aZ1n(K zq*o8Io7uak40I`4$3TZa629)v3^-I;RTd*9%rSX!i?LB)j0sKwefdtXlyK5%fe5wCeK$vC zmiU?)032FL57mq*iM#{ad29 z%7_w!LlG|jPNdG+1|7vLBP8x=k>|c`e91#V3K)uS?-zMAT=xj=7$%d%8EVeCLCaM@ zo><88EBfDmEhU;U^!a+NyyyyKEwwOlsM`nJz%^U0|39YQ!L9B;?jJv$Y^;{GR-J5n z+1j#g<76&#Y3bC$vTbA8I4#>=u5X|F{@vI0`y1ZZ`*FRV4|W0s+E$NC^bv;)FU6}) zR}LqGhx`Xg!sRhlYU5uu-1ND-J1w{)y!|Q<;R7p!dH52Vmo@$G6a4Ou)U9}ghPlMd zO8nuwUB3T-BUl7B&chG?9pZQNTZ9jE@GO`O7`0gOMsbZJL86z>WBH_N>|6uJ|6|0RkV1N37_znFn{On<}9XuYAbKRif zl3b^vI^vwHcr1%4Ych)>^@H`wEs(e3PYLfFe@G2%E@`Y=ox$xV*DSX38rbr-3{7@+ znG@9FiU>8BXyZ?AT$3Pd=YpmW`+y1l9J}DuSH4_Qz)S`zS3rs5FNjH}JZ+0j)?~+R zCez>6$VXN=0O(rU@WAia54TthPvL3E;(A#ZuwpZN|B2-xr~bfogBxwbZ|Qz}OD7Z6?&Iw9)=F z_B%}0!{WVyE0;9${x>Ihzm`;_l{l}}*Xdzvd+scqfAIY#b7FLYfyNme)!=rkZBIHG zEFqMZz$4ifUf%~25t-B|Uxh17^yrnyCQ=g0ixyHX6uCai?=&BqAy>+jaIZ&|!d~Z7 z`IGHFa_O;`zOTme7_Yutj=FYoZYpdOlS}>T-t5@s_q?K}Aa_Ju7c*9ljASetoww@0{--n~=Xx~=&*Df0i@ zSH8CSKWV573?)t$${=y-ATL**7F8-!DefqPY&4nS&{m{%N26 z;H0tsFQ36~dV92@HGkkJ|4$>PQ?dgB})_yL-%`xAANNdOtp*mgyq@L~Fg zR-T^<@w3Rk3D}8=+L1R)i?f?GOZFtQ838yXE{WE_W(p9u-bYEhgQLohidGN1Vg{5b zTrpgU0(vGmqym-q_6u{h_rZ%af!&D?&)4;JFH5Yu^GD+kMzHOU>`m==emQqu6Y7)a zS^J*2)h

    nuTeEN zrZgm+uU)M|yzWXgMeVM#(D8qJg@Q!gJeNZeY?u0M99WQuLf`FAQ(LG#)Y~$6XVo`w zcLjn}Fv4ihV@J5;>dT``&p;<qOs_~4{cXjeGP9@xYcCp~Bgr*I59&Uf#?GUz32SZr#)QoK zg#RXq&d5zKMtHn1dWyg`nojpy00(fZAVo=|)_yKcNPyk)Q!+X5Kvd-}rur`t52n(@ zskOB~48)uBWl~4(=^8R@B&k>;&!TWCgvM?RY7+wimPN(;7HxxB|BCB1F7_1UHiWoe zlb(0Y^cKjZ_z4CeA^)XCKl@rAlMtfhR?DSmi%-yHFxtl0hewJhFe!V*oDzT1G`BY5 ztn#A?j%k*z6FeEt?OF-d%#U9)L3XT@vDt$>{=Vcjs={3W>=|dkSBTT*Dte}A|tun2FReNvbLRp<&;*#g9)I55Vq)M&`yE-*-QW# zIOF$c-uHrF-&3N8HOKack9d>~^Wsd`zuBTxGdD+>DH*%HvBNutpz->>iuF_2YB|7xf6REFhfyx{5EmoiXtvs7 zL66*q3}l#W{u_!1$W4{xcr$S7qfK#&6EDD_s*1W|KF-$5e=m5N8l<{5F1HxZAi3Ql`Cgn+gLGlU}szwDHCzUVKW(|d(AsTj0Kvs-i?PQwuQj^L?BVP`wq#CYWl?3buC_x-rM1#>+BP8s-|c2j^m1_axJD3B zUYa|cR9;!jd)~B!@eXf5cz6cJ!(KDPo40fHpn?ZyFk<^1A>LwOZN-|nq@}q_CZVNo9 zzw6;Mm;NeeF82p5xzvAA(^2brM*@AlD_K`E-H}j@d&L_hXNmcTSE>gCg|D`QUZ($8POiv)@8vt0w8slp9~)nJQettNXEt&dqGes^Ao>;xXQBpNpH@ zD#l{Tj03U_wJg}PIxwXaMO7f-Ts?hKx46-23i`eye&1+SYPP1Uwek*W*Mi&qk-J5iSUh|T(;1_2O` zp}TSd!xtaEot|a-i7^9b!pr-iUuLlCDZ%baZ5;xJysNp``>zgslQ!a@~j zsH%zEK6u*m`F}5Ct=%vsEB$c}I9-)h;bIT6Kfu(4I2k}KmVU073m8?UK3y`3{+}uB zf9Fc?b7Y_T=3~y2PHiIv>RpPg`uOUkqOx$V!(q$_Rvlk7aSQUWc&t$(3xa95@{WK# z-4WPPdxxcq4_)a_ju&-O2XNf)9n>A48nb)dy+^4?j5yEjus()HkBHEWjT;6ma>)MV z6{;3JrrKea6~icPrNXHG#`Fh5O-XAXCb%i9H{tuG;=MQ-gndLTF^Ch}SY)v7#_5I$ ze-j6WIYEgERXLcJBTcnJYa7Oj$?Y2qTgb1)(pSk%TgN(Xp6nXmv{K`5*x&uDsPB({U}!fnFK|| zjuXXY$q63bfFV(3h&f!4jCN^bjiENZp&qacG9}sSD+w*E0wU0PfEMyUh8%GY(&nDEJWV(DvD^u~{-%|VV| z_jF|epWjyp%H?D7Ds3sXMV862sB>hQ?@ve6<{3Y-DPn|ZyFrH)=2UAvAShUT*Mj86 zV*Q9p9=yviQ7aYS>el{-O7C*Jb$yUf5&0<{ekNt+tm!7RL@- zr|BTbh3382v)ug-zV+!oq`VCrs>%Z;cqh;6&~3e(Y>C2m-zJBzYE)rk!*xAvGtkF8 zKS@}dtf&9Wm}bNp^+KOF%(iZbSy6jq^iP;F;A!nHedVLP<>M~1YGt`Xw4F6%Q2p1G+}{mAKeRrz_#T1e$167 zAmS=X(h#ped%M|0LVjeMxvciE*!S#G8`;8U% z^eDP2%Ma+1gux#ssdHGH|l1^=LEpxt@NFDBQdsKjDp z@LJ1J^@Gi3pY3a{f>2f-#Y{)f0>6*!%9>`S8yI!E$}t7p(6A_g$Z+}6xlz?H#wbpk z+bx2249l%eW>7Kgd9&9_1U5}?SUFCk80lr6lCq8aEkR&NI&H5J4`4wFCnpDw4v*Ir zAjvy9NRxxgd%1pK{IqYHsf5b-p2s~y_&e!ZOCB&lUn`l`vGJnHdhhW+AiVzyoeyX9 zP~ggkvo3m>qAbM6H*}4=!|2>IxRt7NpD8^(y+9>c91yTe%Jgi|w6~U8-8nfg6H_?+ zsn<*&jHpT0L_IW7{ZuD`4pco9cXlgpcuA~y)wpE!g>ST69)?cc)su>f~n|728UvqqpyTis`5b2xt}~; z9cHG$2K1ugkSub>k%@M$kh06&*K#xm|jvE5#Fv0x21@nIOP<6|p%ocmo ztESBnH{>~L-k6F>Ac|M�Lsl<`*T;;7q?L{(%>pm1#Ga_4*4bN0_&^7V>I$u~zkB zF{Q@XdM%h9aK>|=4JD~)NiDidYuSLJ$VFM4@*b(7;6|H9EE035oOcfc3k;J=sIO?r zvSO)dV~T$1TY5ew)NX7Jgz`+^{40K{Vzul$xU>k5QEJsSU!ik~ySP0pn}%TC9EPnd zPSjF_0kNHn$wiufbNDo4A!vFj&PVKv;hmtHDFS0{txHxS+e0aZj%aYVZX> zWT5v}zOFMBgdjTV`a5N&$OdhxNnX9R`+Bg$H+UV)#wq}$p0wzC@i%3N?78GAo-L{_ zq2zMR?uSd|h_gR+6E|e_KQWJljP$GW^7G0is@2&*3KqsD>Ix&7`O;O^>)O=RyJ#p89Go2qN9q zIHLzCr>Tj8f(+9atf4o>^;3vR$C-t&0MgBf1Thw2h2)nr`(1(h$iIbJAhiViq=}k} zNqC{ZSqen{izLj?7K8mx_-OZjxIjaF|A#2E_L7x0XQQD z>Mb%n4v?(ajg!5+#oK9H$zWfMut&T)lmx+qLPeVERO`JS@*XU@RlQ>&E0c&98Xvad zdxf_|OC(DqnXZ)km@Os>w(Z-MtZn!W?FAQl~DI4Mlp zHbV=Tf<_JqTQ;)1_FPu<77WebSB@GsLjyG*$VI5Mc$QB&9|Wys);0E>lJ*J5yp#1W zJ?5R{O(#Fl=WRniG9T8>;s>G|MUXQTaEB?@H?udqWmMc56mFxw44Ik}n&IxM#IRNs zVH{v(9hN#p6=#IfKQE!2#sngRTQB?)?#TPcxsIshq-SB%;9Eq8iGAd|j`Fpj`eR$c2W$JS^3ha%ot}1B;y2J$NAS58+!5P6mD><^DcH0XkQ&JZ@vb9u9{ zto>k^18>r$Axq5{bOJ4AplCD-r=|psl>i-m0mTYu5ox^URg|68N+}PlbhC!u!a(`m zq}eThPxg%@6Ox+ORJXDEaL3LVCnsYYsj2NTf<=Xa;l7T%^14T{1FOWre+kAYM`U7U zrGymGh|QTze@4EdL)H-MK%Vw1?F;j{y6v?=;>+8v^F6L7P~-cRRtN;hj-7)Q+epf* zpO^q*5yady(pRw(`ql7ch?xR52sw;{+KF_^Fl#9ZP{2@oRbU3wZBNZb4svl zW9(p*wDJ(#V=vFr=51v{(jPaC<$w1;8~|Zl$Bto{M@vXOci5JnD2j({j@x?2II$>Q=rc^x3?^F zRIYa)cyQR(g5)xoAs|ElSbd(ctXOGSd9F}h)$So#)-r&|4&&8qo1#9#{rV4S<*=4T z=Ep*OSkKoca^Iu0B&tUZ&2i>K_q-Au8qY&+A!kAj%$_M~7=N%M$50n^sFC-uH3H-5>hlG97g}kd#6h zXVw-qipqhI#8x>#oDsim``bb4-a7EPi}WoBO4;pz`zxxXHq<2HRWo8(5TzrbF%KHF<;FF>O%V)e z!Lsq6vB}l9t{9F8HzIN%05&GRt*Iif0s5Bj`X7RJgGs)w_tcSE45U6wR3^y%>S)6~ z(k_oI^Cg``B`ZT-1dwP$ur?39I5|B!^?=E3f5@95Y$x!+1P?8-RNgH8^K13|y{*Z7 z)DCo%iqJJ3C@=i_M{xz3J^Lb2P5s!B>+Tg|v+qPS?RZ9TN8Dk&iI)w&Q?$%5c#gI~ zOUF%61g{I)16~8}7b-+iWH$R8jYKpI7tkW_44FrAAtsyhccC7#(9}CBB^-Kg;F6Dy z?&cQs#`|6tExOl(F1q{SJsz9tN-S)oNU=x1cXkYKR4ML1T$n=qh`75CP=m9Gu5tuD zm$VS+xa7d!L#}{nCFz#yS|>mSw%8!RN+x9nU1~JG+hku*E$Zo}sJGk3^-RIgs>KUz#`^JfLp=W4Q3jq1=fl)Z!=g5Kc^RLJWxO*yMDyowLR$>jQ%=yD>Thg_ z=#x5R-+yJQPOoQeS!;)|J{+0roA6!4(X;d2mH2EL2dmL=!^6^3hG{Ui4ug}smIch7 zmF8EL%sSiNVL6mIL(%G?S_7 zdSPq@Y~Y3ux&E^C=ZM7NXpWQ4bHmX~>%^yRI#F+7Kgs5lo@-8=|7ELB|8=D`wb6^N zwR-Yi-=1&ku{opsQSh`KHe=Pa{=@Gx)X=NtZ%wyGkdlNen1WXo`Wwn798^}KE+&k+ zOY${_L^qsD4E3np?FUGRxV>?0?0(&R%Aqm+#O9vSvQcZ}nTm7FGQI%stz)k9u+R(P%YPw?cqVx)zbS+|K9F)QWKw>{Cnt zh~N)2r-B$raN62l7&c|Lrd{`2gZU0CD{UHLXjo(XtC*a88AoFknQ0-dXE`Hq-IwNC zrWEQKtLEn-_d>P;jaFt9qBRCO zNx%gQ<%JM2&gN>ov&}|kVNO%PbJWR9ZLw(@0=kmfj zl%tz~S|n)jnP?kd_mlj?@1JS?LYHy@vgVez9pPxO5Buuy2MQl0=cCzk1_rUw_45p8 z{Ahs9d@W(-UJaHtG_ux1+C#bfzw`YNpSH~oJmSR$O-<86@Dfu)4#fJCuQY1IbCL_%!my;gn*-8pqYLS zP~GdbGRo5DrD%i|tb2N_g$8%-&=uj>C1u33o=@OW(YWu+%nZ?zL$No}SFY@xCOf@X z;t&LIAD58@Bpl6CZo?6c;m@E4@9K%R*_)9YbAW{?FlG7J)k;^*SYWX_)$my#eQ3M1xv?fRPmf^Cfw?9EwFL#DpnHelKfcXuEtf$$| z>ThFF|bx$ZkbsmKyt z19e1j7v?xc)W;}Mdjc(a{wNPe{nE2yUZI_)ov|T&F>HH-FgmO%Z8?`!S>aydV%~y& zLtMsB93KxT=%0)GYj*5<0M%z(sNp2yYoe=n{DbP5e)|-|%fb8+_>$!ry@P*q%X)~O z_~y+=OY>S2F7eYqml@!3zE&Bb|9l1|F3N{cqN;qVl`;xND$ZF+O%6&wj~gA=`@B5z zl9WNRZOIjD-{aM9@A-P*FH8c&oF3syqLL3poXMwe(3W;mt#G4XZ}c|9U6J`7-g&&9 z@8tdvkqcvhg5xJru1N&He{Bf?iHJ-0jq{&K($X@3pPxN7{DjViR76<1V?7{-C*Wk%YgIygS>uxv&es|A;${88FKvz9I?tA% znq|@@zQjCRKehDp-g##gTA`_dl?nE6PheJIK?h3l-|4)>y3<_*#4#H0#V#aw2#=GJ z)HgJjEw|+qmgkvb&7&ZxQ!Qz5qMe|zzkMrhes;6|^SIWF7q2##gv&I3-}!g`vMat^ zp0XT5-zwbYndyq<>z^+%BsW@nKwdLSxB6M`$cu^y{klK3rD|OM^=X#FY5%0YFBL^q z$eg9Bt7GIS>~OPM2)4gsS1y0NtP~YqwEEIgIPtL9<4XHdkBo%2=?L~$YANG;rCVB< zC*w>qaEveyQ{z@f2V2nkzXpOI0B>x4H;Hj~QAo~lGl5pjl^8oM6<#544LbYtuWLcd4C%gM8zZToX~MRRp>-OLmvWH8~`fbZ2Om+<+ZHZ@aUfnM76i$ z)*-~HtCrmgOk-q*q$D~M7r=``>dpxtEG4W79uQxgfaI|247B+&{HH}q4eOUBg)rG^ zWh8AEu|a1Z8X>GmNlpFKb8CxXg2zoSc8);L(IWgQA*}Ua zk-gXUb7|RwNTOZR7V-O*RwTB6@GTu%PL;a(~3ASjv7Yln-w6QRT3uI z%uCHIj>&0uUWI*`3nVw^z!fUMqFuFPpT)n}Ql(F5uHGVt zQUfMb;@1?;OSC@!uI0-y(H; z^l%(O4#u@3zpPGSoPIhW`ibY(d%>Irx+pYr{F5K8Bi;AyRcm_!7aG)^mg{8P$=~D$ ze*6Btq@fj&3e5o9R$p$FqFerb-Om1f?@igYzT{u!&LcK9Jj6f`>LAB68^p_~Oy|Yw zYwQ;n)tgo!beJ@nacGbiF6}ci>gY;kdUpUugo^3F+rc-(mzxN?#Tz0p4}odTKLS!L zRPh~bPck6^ULmT^+YQJ+rKkjCq)S$ri-z7F9b@VB$s95oP+!it7@zaEb$AsU6A9=*a?Vy zRnebK);o7=G`nt`XMCtUZ#XM_!b@-JY>qJwwG|Y{-Y+yTa}Q#!k0kfnt6yMgBwwm% zd~;oi&EAM!zO%yBr)h*ZNf;SpHtLLPi+$2QagZ1zp*$>g3w3A(No< zbrAWXZoY0`A_f@|)xi1}UUl+@`%GqbLCpvRH6p^$Evn?()RKf2x}(n6Dc5w=pWuNc z3*2fSL*dGmn(eQ&(1RuzZ~1f(q(ttf^mTH5^6QkTUPiRzS2M*0SVVHcp>xcI|4zD0 z#}_U<<)vhY>(dEE&!t`V)7oq{cx?pVC)@Jxx4$e;OX(iL1sI=^#b^0#b2sd*Q66=+ zdsq7Ac0vB#8CAL>Jne(dZ~f5(aaLlUDfA2#p}=T3u3(~j3>h;aMASOdax7RI-mb7} zNa~8ZdQ&(4@Q@=ozM3MAE)u?bo)6VkI}Er1XTQK|t1Uw+_?lq9z0Q=slV7xbJxhkL z=I(5Boc3Fhn7|;>Km2m1TZ~s_&d|Z9mELeStOj0(1%M0P=iY=PJWw*kw&fS+p;*kv z1h+6;mLAvR6O>Pq%bR_ms(Y<5SgiOa4ws|_T?9K?nM1lybHW6)t2H0yC(&Mb5JGvM zSf0C?CgPNxNXRJYRYpn+tu?Ta)&7?S&>v27a(+fU;S8+hoTxjelKi}+HTnqPy&Bld zI}><9_1e9Yp!Wq-T+0U_=a)yTh@u;}L}xLZD%=6lBxmmlQ=XTZpZeCYYZy?Llg83d zD-JeDo=k?O<1x-M@Q5BoGmZWF{_g>*rv1+Ybb(NTNo)KcDdlbZ^>H7!czYHJ_?kz_ z7sQI7j8Z<=;kc~nx`-yx^n%ngId!kX{5KeeWWKg=z*4QlDjvZ5$^}m?iSY+;_{`-1 z2jYcOJec^}lx{G=_k@ZJE&nA-nTTi9-<^mnWP>7MZ>zw=dcL%?p«y!pqxu4tw zU0)*#R=;g8BN+}k4y<(&_&Z4s9W0)*^9H>)FBMQ07pEh0Gh5fs5x4{SCFy{Yg8$8OJ+Ks1!_=<5QckGYT-!GLi_TGEydM|`c{nN3jpty!L{JUJ zWRAA4D*1m*ePvWsf!glOFi0sWA&sPTcS<+X-92=7N=ZsLNGhE}!yw%aLxXfTNP0Qv zyLa91@BTS!?b+|sW|9{KzJJ3sUIlkkQQzZlS?jfZ@>+cTYC&(azRQFd_}}#H3tGV$ zSh{R6L>^+=-S@J`a=+M0leXaNc`oiwczD^6@^CjTy7RckxH7ghWh?NNgi!5#{c?)r zz>5K+u=Za3z9l`oSO7#hWVYqx zCa|mv{5xZJH1Cte-I&cZ4x*ENioEJH?W18B8EY)!CH_p%Dx7{+cz^}?!wJ=#AxL%x zuX+aK-?!9cA;0Im2HV|Rp2~j_5Xh{Il1MYiQM|?v0W=J3*J|VCo1XOR2F;m|%QmQb ze9V5S_tpRBvm4*_##;2-n@Nd2MKcCyx|g9SE8ns$J}_VLhaUr7j?K|RC3ILd+aGR5 zwD3BQ^nE*_h1%oUEPg5ej?`)k?I|@Evz8l>ocZnqir6ux&MC?soR48>6Q^a12O^BZ(0|zl-g!URHJc%D%Y9*SMsL|xNe&wB**h!- z4ZnGl`4?qB4~&-mf@ZUJNVvTCDIrqR?weUf?$yk?pTpK&YIIy^SX>FCPc0}kPhSNI z2L6d4peWct@ld734StLM#W1Y~Z!r&$_*sJ8xJ3FmIVr9#!*{$u3=*uXxPJ%h73*9g z5A6O)!e9V>i5Yz&m1t~LwVJ|%M)KmPe8Xeow2HT^ziomGJD9^;p??q*Ot+y-N;CM0JwIUpOP#paUBC zm99RqLjAA%=E_B0%*BLDyZGljHyM$Yw^Aj1pkGVV6f;p6{r1&LPIP?gJ86mG-09IX>4`3EoQmj^+FW-J zucS)QxR<5MO3mPlvqBwOnM6W?^;sp#e?F{;)q*ZJTH@=DJervUgbwi=xPgz{UCCu zq&NbG$L)$wW^S8z?q}($rR4LmPdXwgDmK-o^d-FEUW&2VTAaI0jU_R%WYBV~oy*eM zOD}$?$;xG5@FPi$6C981f(=oo;F-fqGBwM8IXe3r%Gz&YHN$j2l2|;!u_1pJ9l?u< zULkG8VSFBsDSH$MUnRaj5XVl=!_Ch+pIZHQeiNBP;SB#lF{XHtI^>qY2E+exg{Rq8 zX`7h6KG|4rggMLbMt0!6sUeii;tSdOQQ7LutI5)F{WnlRW;I6$`lp8)uVUSr+S{8q zDFbS|gQS_v5QVfY1b9mwN+BDM1?#j9QOLt3ZZ(5WBVoL}-4n-V1fe*+yw0d?|8TL z(XHOVxHMoovx}~A$K0h~D!BHL3c8r2-JP5WB2p`%ya#f&X-VI~yhM%|!cD_*0(Pi` z<0ftUC7#CgcR$2d5VLsJX_SbqjD$}|00ER9eLGsZ{s>Cx0N%aZ0y3XVb-+vjY(gSS zn{}0Yfpgg`-hW&4v%V^^j%|?I2lwB8j#p_)p z2HDXuXPNJOu#Sj+7oiU{7^?&BCBa{O55qiLPTTfb7QQ$vp;)FQq2#0gj%&bOS`01> zKge1QmP-!=Gx%@=;=LN)`boqgZ;~^BNg~~UY-gBM2L-Ke5#t@n79qDDA zA-0Ga880MO>|le^TV|FVL>za_YOSbxa6gPLOlU9Nx}gm~g7~4=={?;J3>zp|g$Nd$ z6aY&34U(v9+1FlQGhS~$R+5u#097Poc!PsmRs;}5?-S}-+-_b3J6F99P`GCm*`*#l zP+`x{%00;gULbBxn%_a_(xZ9iXlu(Gb^;KOV*lpX9YloeTt0{=0}DgbQB?~tm5^QB z`pMQ$fl0PF{)^kN+#M~D0Y%c}^-Qrw%TqPtks^?HLj_Vs!MR8u8SZx-16&Feh0jG^ zEa$2oHq+>&-QrMivSr6a%gLNn%*Onu-hfP4IoY}d3CFZ=?Q zU|Wze@Eanv?95Cojc-=YUB?wr@`J!N-`IjMr`w#ApC`G?``&i~ALt0jW0CON)d6u3 z8f<-?dXt?vv^y5!sm@b-Bz4H%Sfi(q)%xRZhJ*gkuE^rinr+7i5(9aW`_mW5ntRuI z&S_~C!Abpc+!O}M**pV}mQCl9$ICx@t84a5>H-#$A6*5~wgK@0^F!I642*6roR(ED z1*r7y4X5%ip=u&&WJjqi#o1JsR4OSFALqY*bvu$mXeq5GpOt`qx`&y2%`fS8lGM0VVpWLN zc6GybAN5*1>PecdekqzOW;zf4dQCVIbPdk2Ld99(ERTKf8Zd6VrBIpm{Y<~lh-&^Y zzx#PmaO9~>1~d(F!GM*6-nE7w{^C{@X!9_Ana?KI^jJqeNaW)NC-DE?6UwJeiwRi4%xnySIFEHH%)bDVYbP1qc3DzVG!^*$&5%-*2l2RSUQ}m+ zhz4yRrw>1o4bl+S6s>o-3M`DM4Px`GL4@~yZ_)A0>l@^dR5n$BCW)8}@pkRc4cM5& z2FZLM`R9$Dp|yb^nL?(vZH~dSpc&QTM0HD6>kPh|j$F6NHE5Ua(0Ti5@pkud1YRrs zH|jk&<}%*cNN$o&n_UBvjAxEUu#3rp=_ZZ)u~;qm6NH5c`ULvw_~H(bTSd)^{Q{ zkNcxtTe(9OVUi;qY#-+o$DzcM9~+0mAxmDatt^hfdz38xcI!(K<}~&*Hx0hMlW5*u z>ilB!C)zfF_RHSb+lW$4U~|@3mHU|bR!n=b39Yq(rMyevEx_Z@<+EP4K zsoHwPKYyS@0lY=it;sQ=*?xjw)-IPilvy_YdnhE7Lce}nf7xBgHtX774dccF2z*Gb zciZTJAEw;R>&d%Ly0~uHS0BjK!`{qu8y-0KPhpfD9vTY`=&YsNjhMGN0QPE%S=R6> z%bKd7y1qoLEyaxs%pKQV739tc$BoQs3iLs1O+jl|ho19cO>LbQYvMfo38KuEW}^Zh za6wFVL8%-<68>)%74yG_{brh#Dyu&Hcy9P|Uh{LRJq@^V7DTW#W{%LyjYSG7@|%?e#=aFj1U3vlQ}GMkHWvaT-)6+QJoa?NFG|5^WMPrDG8!g) z;Y@tmjyS+3t~V}qIL2Z~l^@Q3GBD!;NK_(HzmVbR=U0CT5haEFJIiICpdZ*>UZZ`n z-+0Nq>e?RHm>j~Fk?Ikd$q-XzFrA-PV6Uq03Zhulxr$5g1@+?UN2>i{T`~bX^&>obcEc#zjOhb034QYKnQvi<|a3|3K!rGE{^6eF*;D6&Y( zpM#X@x4VX%#J_C%jP_Q=B?3gva?reN(QWD(1@IK^4z*Ef=%* zd3R9X9bY=K(!}{LTipnOAuB-dKxu5svSWA(s0w`g} zlX4U_-&XL=Y@6c^Xu98&VOo_R2|_JqPhfYZ55;z_d$rY9|9k@ zbna@$ey8X$rC1>w`DLfX4Ao+2**9h`;hB%rWu$@d%#mb2Ua8~AN*e@y<1g)A5xKhF zlGT^THVpa3vlf8CouYvA90eKH?c(>%Y{T#0qOBA6JKZ%i%6xvB=P~#Y;MD&D_)ev# znZe68>}45i<*@mF7k!N3>9^H%gy9tTw>b7oxosTyc>4s!kEVy6d4rA=`ky0Qa~==lThxlQEu6zFH9Em&w!fSL;Y{uPa*`XR0h^3b*3ZY` z1NLK??t)oVx`JkqiIyvP&VmCiu{&#>sZ2#JgA?F@ajb3ay~}HF?%nMTd^NtazkM{D z%x2QMZ37>r)SQxOG4PZ zHPiE~qVWUYPJTDDSvh)Qlg^4%eQ~ zDg~0>fuyym&3B@K7!@QivvfBsbIk_amI8uUO#lP+?$Pm`wkL7Sr=aXM$A=8}0-iSW z9m2MQa3kLN+p@;;<0VMo84R74;bUn^8=4<_?Ty~Uk~6P~DfOo*1L8Q+^N*u8B(xjg z2sHp`&%5@7sp|n%iPr}%)({WGf2MMt#aj9{;B2b_3wLk=B+F4bh-EI3@n^Y!KWUbq(t zr!FNHL$dAZShb=+D=Szv%kpGHzokBL`^t5mXzG}jb2sW90k9ExYaKfue;7Pn=~WyQ zw$=OyK?jyg(idRdrnkcx9HjD+79KH-tgrwmB0DIQdYGVu7>66uU=lg5r-SAe0|&@8 z>+KQEWPt$?qygVYhYMRD@yu#t*fls)o7D!sQSzckq5l*iiVi>yW&@Qr2tAJ*=^LA9 z51If*rfYY&q2``@z5F$q{OIpI5Pla;z5t3{9t^r7Lp*}P*pMkf6H!H3<9*yz!|KOX zvk9R`dQ}djo-&AZRpB7^LBt(%w4Bx85I!qWejv4rcu z5^aIu174iDpq%Pwz%=q!KZ5*SqAP}IxnOL*D10~Y#3f^g!$>P9e$By_=1xos zkh(@k9t}f`eFyB86Fqd(LByYOs4Dy!s3v{$A$A7~W?8!>1f~ps)Av;TmNP6o4r7uk zBqy=iLs+!$V*Pob;agYWJe!O~f;E>C;h$?)fUC%z>b>Cpk{mxJB8)v477u$LJF<^I zD)V6F^EtPeGHjL%{o}Iu`6p~g_tm+1kq|fFmcAK0n0t1=`E30@1A71OVGPN!t)KK^ z%l`K2DA2*g?ch-t!UXKN@2*)#r6D$qtta>5&za@{nU>20S z66c)y6h!PV4KN83l_2n)USiGXs-BAXW{HFKAwO4^eY@X5^Q!bY;~OF4$qLv+iaBdM zJ)EnGnF8SQ6fycpe&~2oqE%hYIKD6NBg7v)3jnb)uLt`oUqwC8iySoZ8+JjoWef+z zG1=n0nIHI`BfXWMCGMQ=mI9PJN(23>R3*AkBXMIGJO})-)^lIBSQ2z2n}%2SfVZ>- zVmV@;|KwK&oM5@I=2v#|KusN9t6MBmxl2}*gl}R<@V2Iw|BVP6uG_hT#kgYjCbDd+ z+n*`6IQjy7hT`o%TOgW6iL%`gAD`5f8U^$iniyaWmd#0o3tS{Pr)scHanEXUob4@V zIWi@Pt(1Z!x?XmK>~@2VwzMDO>r;1)w0Gd@-IZv}?Y+SZsHunqr9S`kXvbVdE#y5p ze>wjHxUxQ1Mu0~VgN{*ep&z60K#WRCyN(@nN{Se|Yb)X|{A!Iek?TDb($nv?oCI^( zgke5~!kF=X^Tl-tz z>^=JpjtK(Pe(SSoI#Hhx56HPdk(K#Gk=8IQtisKiL&cf5&!FJyy3#^3~0yRN!J(PG#{*~u0O;tmmhqKZo)xrN>?bQdC3v{ar1TdsZkhz3p{z%r1;?&|a()uCMs#j|SuzB0XL&k!!T| zp-t}nY3|5=6Y7&fwr5DW{ITBu22F|o1x>TMwRQgmOn`X?6P5 zysOdhv>r5zAY+doJxBBySiPf=PXPSANB?S-twl^@gOum?`#bb})zkQ=_eqwK05%lY z4gyzv9hW^kqz5#=PR({pd%Q3Ou9yV!JR|*I>y8K>rv=U|ZEi3Gb z8VM)x{l?F>QXG=i3#j*%9GoA4b=%3#iKb5>QB6RQcUTjW-V8WGy8>@g_a~R65qSqQ} zxe&Uyitfu^7X#^XWpf(u#pq!?T?S$?^!FMMuZhx)f`#V6a{B4+1f@GieY*w9DDR4Tke);1_?A=#&?t!v%Vw^aeHLSeG& z61tmiOGffc-6Hn&AZjPK{#=~Y^B7u9W{otOGwA*{)eu)NJrs`$YoWcT({}Zh93nSC zrhzCcSKq)2bUQ2yh5k-sk8?im*De=KlM`t5-gt?!0_UG)awA(7A~-u`*++T5UGB>hjYf5e{<7Xa5RLB;8jcJ)z1#gc>F3d3zG!%A*=rVuH5qSO`b9P zyFCs~e0^epYLFZVk9Re|PDoCuQ17_7ryLrut*BVZ#nl8qUThmd9C#1-*dYFqya77V zMgq(x0tOL)_45Y(y8-s;p#CZD_zDf7nv}F&vmkQe{&3DIp`hJsuVYlvu}2C~Q`t2y z#Fz`UQ4_xV^wk?NYN4Ay&17A^$Q|d5qQ3SR_!aq>7}!k2=~+sqas&aPJv}uaKQQn< z>Xi}NZ5E`=Pvbf=b$~^if1dtWeFq`{GslA7M-^9f?qWcfo?Jvf8i5mp?8bfsRql`f zp(9Lloy)sXT}(9A%MIM^WJTGkENNW>URfErr9D2?cdlO7J35inA|l4qrHcpdekLS) z&@=lYz@T3Vz!U8wcx?gPis0u@6$HdCt)0l^?}Bbt|L)#N1ho~gwbbB_OMHPS|btzc-nDShcNtRfmB{V@vUO~5s-{eL{ z#@+;7Obt9TQVNXtOkHS^7cQ>9J!DoHgdiobdRhfA`Clo6;~r|j-1*|!{ugI9)$gyL zuk4mcIqVk`<12qSj6nHh#Bj&-mh`Z4px}pJTtaY}XKmLK%G>n>b%Aw2q^tZTJ-F+0N#kSJpiA>ded2{^rX)Sm96~#5}r{Z!A_3`|E7EoB~7v0 zT5<#a5KLTiRq7>9!;fkT`Oc!fpYI>?;6=Z1Dr<{@iwTSZ*Su7slXMO%HG>C6<&IXR zpTyYA;`V|sT$Pqg#7H}y*%=55?>S5Zy*JGs57R6Ys9Xkfz5O$M&vwgM_DCzoPff08 z{~eWO)v`gtiC~%X2Sx)Y2lJ^EyEoEC2FmL{dRRM^1>JZ&vZ&4>y(Oap{bMM^r*`E} z?@;;QB_updk2so-s|CrsVoy+1_=40+l`2I4gpqK_g5PQ*JyD(9&RMpFZ~toIvKw|a$F^0YVWI}JpOU>!+nJRfp7wkKf!S-|E)V^>9@qYVe5yz zm#en<^6vSac_EPTXcp@5Pkr5_Vv))ZQ?ZV5uf)bDZ0ewHa5GnDlYG?Q^s}e->dq=N z8w*dSl)b_iS@5=6rTxqs0T+y_ciNT8jb?I}WI{5{GN2;r9-SMtCus)8ajwm?@@pbbe-ZNL+B4zb!39-qJdXv@A6N zZ6yk$kr#7VS+?Z(JiV|snLz3vrDc_$fr|#g&Us_mf&A1l9sgJC`46&!H!{A~Gk8|s zeoNv0j}G*rh$iUt&)+Uhl2(9{mo1-ob$e=j>Z6AyY1VnupmVs~lCpYM(z6b3-Jw+0b|g^}Ho5h%Hhjf^Jw z5_K{@Xl9OR>Sn8n#Mua=&{AWuqargFSqRus?(fm^( zQr-PkV%pK6YAZs94_k=P#>Sr7um|7p3Vc4)z{GVvS&-?D9ZwxDw6twOw*9N~1`@Eh zXpWCw1!0ekGzx()Xz9<}R=MxJZrG5?h#0ZN=gR^X9KlGC8R(H0+1LUuN0Yk&Tiy4t z$hKjlq=5dUVn!`fPa*nEicxKZu|5e@IX zvQFV?m6cGgxk8U|4CV$}$47M0e4kKkX~}Mdl)D(Tz&)R{0$NAuBtip^rCOwC!*~`F zyc%q7P(?wOAgG=XhIj7Nye>Ema&KICMq~~{=>?;ZzrA!F99z&|7N1DE-Hl?lVg^)Uy!@BIzCZ?W4s)i&t*J)H+fbR%_O98Fdi?X zOz=+k3SisQ9v|@`2l-J1Lx%v7mhSy+krB}2JXE9y9j+-d&3$iz^i6W{R8ETid=`Tq z^Nkn|^3Te>#Oo+8lb3ftX5=!!ei&gP4JhoQgPp`|-tWnE7(E^^B%=yDkWl#hFc29X zL5}Cxq+8HcSECcJpNqO@55*1R0{p%txYUu6r2$_e!xR9P`Q4sTq)0asFpJxYQ z?Bjt*?59ehdA7H7J%LqDRB84QS-g52MtWE@xUnQBMOwubii6~O6P-sej1+` zd3{h{f926hW7vqD8dKCgf%JL?zIj}UenZsw@|%O&apa)Vn$eT8{D*dgT|{1XBuV46 za^}_uY^t`an~aBO;pyhN<)`-vRgyi_XBIizpI>M3SXyZI;#jSa2C=ZjZo{LZ%%dvU z-fFUGSS%EMT5tT|%FvbYaL z$=z?SxQXJIt~pzjp4c#Bb3q|sJ42sB*A<_Q;Ece)5l#2li27H~2*+QJ4WEo5J+6;t z>CE(HpC_nBRY~2(XGN#vwyMsUUz)QzV=?R-uhge)b30$T;BFC&Zv)tIq~09v?@Q(~ zj9qdh$y|UvCjcn|K=Q99_<3I-Yj6OM9oH%ZoRY_-K(ALxAaw#AC|VyOe+SlC79$iNP9ao zcb{-BtY7YBA6MNktS4`M-bP5(IyyV8A-znXbCOZs@sGtFyI4u(Q*7%z?YR50tgT^j zb(`%V?QheGYP?y+#Z5R^LPBMPSS$LR-MJ!D-G;X0rjZ|i>?lmnOu(gO8FGAE4VU0u<}lJ7a9x(W4u? zgq9zxD4Cp7GJcUvOR?LXd|<|c9{;dn>+*=ly4I*sZIind*ljD5JI4Kg(Vkanh}q(` zp}Rw#ORJ*hKVhHqGhO5PY|0&vpyIgStR#(DRrkOsiUD3EQSxQ-(biwN~4xGJ$6f#xq(7Z9t*b>p5`>2Xcuol#&X!uLdD8kF+{ zJ7gy^(n7fC>-1q)&+W-+UEG?|$R~Oz1vZwH)B_zFj1?WV6Fna>TPfRkh$8aC>&s}0 z7N>+agqq^TMoTh)=tNk?sKCG}Wnwubn}k=Pt$hJdXA!z0hJ75^*icyZZ&gfJvA(y{ zi>>K{e+(*KcFSu+0%^W8(@X9w8sB3~`kD!%X|VTtx2)&SE4$QRW}a>OIhsNMm6wlK zVp;%(0VJkx*ro%Y_FCaN$7!p65uNomeA*#q&vwUP{K8k_GUy(2{dEVhr%8N5EEN1; zN)X*w;tIZ$FqWX-EPgS3nl^ww1`LoRd^Z%&cA;zP+m zqT>I|L^R1Xe|tuuJp_Wwc*(xHslu@bBiT1=5bl!H!_yr?#ZBTq)HSX7O`r>HlQS?$ zsP?e=+-I{(|AyzV%R**rr(URn#L(oVoETC&RIya^z%qB9Tb;4YF#y|;rYd``u|!-W7tiu`4|o_Y0F&cOjBAy6 zmriW}he%5*0*`S)?W*qE7EItP!&h`-y~7~Gg(*e*yrnn~e`CCf@Do^rU9%Oj*grI` zG<`WfBv*H(lqK$4*dr(O_?=ZwHBkc^PUbW{LDyY%qk9eCK|i1Rsrp(Bl_Xk+yyXBk zxE`wSlq2dUF*nri*dWn6@6EGQwZRI0=jO3^%8b`ZwrfnkCCtaL3N#IkxiIk{j4{pv zh;;=l1q9vlop{k;syG|2j(h@*& zd55?#n@!nMufMp(s42#k-tq3%@8SWo<-To*vG>Pf#H}}=@D&Gc&dunbJrsc=^k;)( z+-yU1lf%4!GM<;Yxk&1vo|1SmXU57sOY&O=0PXUFF|$e`8RnzA3_`W zdX{U;xoRH>s`<58fM18LHnyF5xA%ma(1NcTl0;+l!?Oj9t_mGWyk(ES#E1Be{?k1v zG7wQPcKCTsoK=73aJrOt$5UI*(4WGWuf5~eh{s-i69P*&%w(Wqp<2v2h>!;A+;y-2 zh3+r5r4v2=D%};dB(bBkiTm+_9^$n>Q;NGN4H00Byq-$Utc1TOGPVz%kC-TzSYsG? z9iAKDL((OL>JHTHWz5Z@XOgkeyTXx={;kG$m}CsD4)w)deGs&D+NB4>*wQ2j!`DH4*2fTM>TbYsi|5e`<>*NgIQ{PIK&T zK!-Yy!Sh6*_pr}*ZZ+0j`G`|9HP0-C4ubXPEGE%5n^JXppoOR7A(1H(uB6k^x8X|B z^2D!)b<0UlWuvEy;vO?s!EF>sHJ!t$tKvfmsPvUSGuSSc*0kuZsKKnUA5KaRtt%r| zR0h|0+-kHYv&)uAa$mFrGV-2}VY0*LjS z%(G}}F44oT=Ed$>u{ zV+L{lUfj}7Yfx4vY%fMWuI44q`~uBDPa0$bSic8I$ZeNLGxg-2@(q`5eaj;V z*-`wFeKkKfhbYP+G0jaN6tvy9D92uUxhWAwSXu6fnaa;7te?TEwA{Yv+dVptut94> znpyZFN_+>NOrm_}`^5rBrlPAShyX3sb$wSuQ$5qzOin&$Ymb&9laqU)+wzV#g3}hC zZMs*76bJ^g{dNb*knFn$g==jHd1y>a)t2uh%f*Ss*srR^Y@I|g?JV(~<3|w64An8j zxo^b4kcVV{j=5D}vmcOxblf}Qjarv2NqyRL0=sd8`c_wHQ5mD6x#Y6Y%paal78Xfy zoX*1^oynZNK26Z+cAGkydL8YvG$bkChacf2MmQOBNp?IWRu@)hM><%M1@-5juvNl= zU}w9Xs;Yz99kMQQ*FVfIOd9KXy=T|8W56PzpX;-3PzQvN4OE1(jb_@N%(S2(@_Z<# zg9b)zQy=`$L0mp(JNegQG4GS9#=SA5c+yDS(XM~W6e6`hx)jX%w%n0G7Q?{Pi+8Xz zfB_80v)&POvLz%0zU^O|)=-R@Ah)sceE&C5d`t*o)(j9kKm0lU_*e=vIzZjdJ-ykK z5A|B3?h{d;=1SIhZ(tCHww+}aCum|Iorg1e`$)%^LCh9r5 z*q2ph%IIN+F$O@*>CW4v)%BckaUSsFvSy9^U9rk zDF=`FRKI#!Yurb~*%>bD>XpCJEmisM#rQk#x3l_XH;n| z&w8*MF#Jiy5CsLd1~onrFNIGT*ywHaGL9~P@esPu(pmX{%AtO)B;_QAU{~-UhY=XL zu@%ZvwJTic0dGfTVEITV^QocD^5vI+Wq8xv+|p&*S$!QgB2IDjgl(DB9=&7r!DqtK zdG5K!B^26R^9YhU5d)Pdecbf3#9<9Oj)MtN|f=Jy&dq5c53+s*Vm;h~%N3A35$ znF>S)t+NAj;;!sBlYaT`O;8_P2vMT(HATmgo{x=L*Gc$slr#{_Aj|c=)7*+sHdnMu zO}~r7Ri?<5pcvVEA_GV(GwiV}G}jS2@3!>!V#>VrLFV~mlh+%0%z7k1d6kgmCB?(O zW9O2diS^Np{VPj?12-quy>XHu?k;9KxtHjvyYX9(VY`;YJVe+XKS$_q9j zXps9zQ=0$Fp^&QvS0CAwfL3}rYmd~aKrvhRR?XdKe;3V}!%5v)xH%*VhRH$qsd5&y z|8Zn9LA95>?qjIO5|2qMXh*E;o>-#zIWPcH!ifR;?@Tua4fl$Lq*m1AE`{T0_Q}+DCs2^T8er zE{(3-Hb;`;ZjN}Q=6?!(^C*w1i->d{*1jpHHZuNq@4;vjuz<`%927}P?q zT67)%v}?@Js19q)Vthu3Ek%B5z@Jz?8fW8t=|(FrqDQ7cDKPh*1IPoUVppyEVj|nH z{h7RC6G}NODe3nZq<16oA|3%{$rfZhR~0m6U=p=-_9o zeM=(u1q`OfGth2qG%1dmraK2Vr4ZASY5Mc>;V{M)>7N9t0VFzlZc-HGLl0PYnEkE%TWOMStpM>vxBK zQtBRNaDc5IqECAR0?&`NnzempLIh@t;;Oz6r(_FX1UhcN;}0bu%d)xJ^^IUGI}|dd z8VRF+0>V$O_tdvd!3Dcxg&cHrzsJDPCg`m$r0H7fK#cd{;utkwtJTE`!LSVBt}yR1 z&xJ0%TY!*guoD?2Q9rKVjbOtk`BEN(OITKvdcXy{XE|6wP6kgoi})tC+`r#Q_UZzB{oy!JVKd|LK*P<` z#`*xghvB@RWU4BMrJ(Y^rQGxFMFGF^V;<`H8vtKwl`Yh6*v5>e2^Lxsi&25LO_ZM! z(o!go^|byFhbRQuWFmD0nbpVf-x;w>y4vGDjr4yj#*K z5Kz?=R!~6J=D*}iBviTY?ex6ax~sgYgbs)eJgM#c-7kK8{q=cdW}Os)8HshOH8R1b zvDApO>?o1eV;>5-sJ_;s0Cufq*@IcM+mD5ZCWga{hsTllS0lJkJkEdAzx;A}Dnt@- zUY?Li)%&w6@j3|ztbj{(|H2ZLYJ>!VJ2QE}+vEI)WJ$(er*ZPgFXqNROE}9AlDQS- z2&b$3kT**dA8i<@{XM!(P8sLxLBmKnon{*~jUMF&J}BL^^yTC?7|MAA=nP}UhRfOu zrev6IkJgGfCxlP|$L49DC4dYrqm<_1Wx0ciV!qNfVq;p>kCq$SJyNV3e1p6%H4|oh zd5|Gh_BgPy zIjdcjh+R}ENj+R_@Wu5Db}%~nhYJKC#7=0#coEkrMYf#0sddnC@=r}@wsqtCunH6; zSH^#I@#84wiW`sX0q&eUE=HpMOIpI}_=Z)3lzL2FBu#f|cJQKhk* zaY#r=$Jc5THT1dbQo3FqvPVr|kG2DE*4P~d*Uovj=NbOO6PFmkaJ!I-cznUBPf5P& zaXO5&X4Qs(UeMM+P#Q(E>UD1imm2IsSK3(+Rm~@L#IiHx&!6!9{>8%6n9M`$!2k5X zMeSUn(A^(1L4edd9lX0BoaleV2NN=*INIG4A|YG-QPereky@dQxh^}OAo4M+^QF*{ z%(6|HaVvajwPcTP`&8SCI+%xGmN_|r1k23MtnGjK!nErD;|rV9Wz<~z?+2oZk<~w5 z{Nz{H%2BqTH*XV1P#x(>Cp3-f&Z~$O zv}BjAjiQ;H65~kyx;-UpR+33OYbR?MoWZLf3JCqqUA`2e$!)hzVWZ)4QlAqCD)wT$I~4QA7KMl&w?% z+ACWwb!=VD(hdA*`t(lz0b#y2eLAt~{Kqe*z4>lw-meb^=y}@yw)jZ#TalXsI;vBf zan95pMcUs$1i+L<=pOv z=9~*xjBI>a9!kaSRPseOT^_@H^RY_JZNfZSzA3m?@V<3ELebu6b(Ck2l&;^${C9C~ zHoy&T@CB`9q@jiH=g_|H8LXsgsd`f z+D0|nB;}2$O0ueLPGi=0z^CAwaGbZ~l)ng56#El;VyE;wb_7RSQe*mAgWMtVOSZ?>N!w)-j#z_8JTMX(&&_e?`;lWpQVwK{p_pJM&QMoB8Hme4Dwelr9uzq)V zC`>3WRgI)z-1Iq?VT>Y5pCGl~Tt7i{kj`^}x6R(HF3N&FKbvA88odO+55KvOXCO8H zDz5Qx*F;_xL;JDObFl!Y(`1jBb?!l0?Ie~cz-ETNW-k5xk^N^3GEqJOh6S4A0nA=8 ztw#*kvsk?>3luw+exkShh`8X>W?@|Wz@gtVAQ_evvU-3j;(;>4)TNUja=u!Ow=U*< z6Tzjvg2fIs=BhkVY_n+aKpZb*&y1?t=R&gr051w0w52kiXlVZGM^D%|S10Mon>FN6 zMd2kPTEI(AtJM6UGl0bI8QIf#IW^gwB3&K7T! z<-c<;PgoedIjnqO1|;OfA{_8cI$gT| zaF_YF;KPMTocqe?uRm8;{eyZAV)%krgc$}Ug};?|{@o?qy#Qy`4hSX=z4)t~=P~vE zE)6^j4+e0a5P!+1=V9mxUL1EA6*^S5&jFKUD2H5USWx|`BxiFpEm^-TQ72cP5ieM_ z@HkwE)6$39~{-_EYn+vc)g0Zc`ibU7}itd2+y8p^D;#;l>+nmj(ugKG+W_vCt`&D#-ru+cE z%Ho~mFwX zH7c>>dcU4u9+ann$ALdT*J}N5gH`f>3|7o@`nA9R$6y5s9-6^u*dz4;h=84jgRxws zotVAG7oam1(WH%wGw8J{o>|o0#&U#4D84+eyHC2WT~ZFUPuo=T6BG5d7RCA?BqW=7 z3gWK`*_c6X0WcdNQ53I_i{|aa%T6BsWlz%!?_qnl6shDtT?Jw47((!Iox1G*>jHSM zG*-=t!5^O(uzz4*j~OHn2DsS~zFiU{R=L+4uLUUuGwlKM-NrEt6*Sz{rA+bzVYe9_ zS-5*ZMudm3mVndrx5T95NPhXo;}__N{h1h+=d6qd9qWs8kcsw|VILq@>6q|Re{5Dz zA*H_(w!xP-NXMHSnp9Ot=YRZPP@3^(JguGJ+)jb0Tf}SDmz)4hQHYhvDHHh|TWw7W zR09JefdDICbM#X@G3(yG35G$&qm1syha@=!m*|6(MISftd(g7~x%&Uo>8+CNH<34qlW3$t=aE`|j`Eh9NR*ANprSOhoeF;=k zueNMBz;~5tMa=?B{)=(g;v(%Bw1ssZu_?Nv>h5(l2?vgyopIQBO_kSije6{vJ;s~e zzt+h-V517N4mY)t)V4E7|uAj(j4vvhSdrk1g>QgbvB>)Tg?~eX8Wf zN8Zl%fB1P_vqKksBPcj2O9ZT;H4jpR$!s4n654Qv3Q(~OqFcYLQgST zO}ld~(IUNLlSJr+_PY(W;=lwC*&9LN@uGipKWO&Y{tr`c6&6RdZEbfq?(S~ECAdp) z2o~I365JYx#$5vhhd>ArG`K@>cX!vu-9Pp||9Sp%TNiavwQAH_W6b$Zcr4g1bhDL~ zwE_ix62SpNNIuD8(ssvz~vTX`iVFzP8lJT{2s>13z#_G$bDNbQ#UJuJxf+XZZ6^0}6u24&_i z=T9SvQxtcJwi3%vJnziOt|+QL>VnX55HH}~`13il03KM_z38g(=a#8Sn1$!-{;Bg+ z$BeF4`&}asU&Hc!)F*LI3_o_8N)|oS{Tye(?QC(>t1c6j0(XRxP`>^?mDgI-xemAO zfXiPzDCD80#C7Wc*H2YHhh6&d1c~6`8kY1(?7lpB+QA^AFEl*;EPDIyao-VekKZ>x z9^l7{hiAg{yJL=TbTbm%r$*~MS0GFAICag;K8R?$H1dtud*Y_T)wL}LtM-vvL~ISMlQZibcjwDZAB3!%`u4(){4Lyt^WMv!9tq==?BI{?x39vuKw8NQ4vRm zjkTp}I>CMy3`5DlM{k$Uh>&?UP~b|+^j*RAvs%&;}`Et zx1GV%M6SN1X-;U^`l_q|U{QcK(8uWZD)&qKo0q-so9>+79BW_FT=ma2@LryVi{pqZ zrwSCJo#s^$A_ zy5ZS}N5oI~|0hbWu=r1u%v`5cR`%a0*-_u*=o$tZi9q3maaX_(caJgl^&frWxarO8 zS$ElR?Ns$Dz=`)KOw*LTqLPBVR8qmh+T+y=AV^XxAv?U_pyu@_IcajlEYFOhvl#+p;0tM9olAord6@tz zDaLX0Q?dF^oxRC;JFM2odn|xjfJ`>Mz6U6o&Js_rbw_mPeDJ{gZ?7QwxleMKDz=2K zWFJ;0`H!6K5U6+A*9YQb>c}sWt|%EP!5k?j)l}n&3f0X$Gu(>Zx6V`fY{HmD=QLT` zv>#+z>kFHKT3(qq7#jxL_x1I4`ihFVH(gqy*LiIValWWyruoy&1|3b)r|tXxK}4FT z?I0IT_P<#bO*@2yB?n1UCVx}{#4hpp!$p@k@#Mp{ps+r#g?uF_pBd=#ElBi!cjV!1PjuXP0(ENj%wy&RI7oqmi8E~}tlMlqR>gBeV-d1?(8{rSr) zX^<%Rw>2_Y^Qj)?m-_buTFor>Lw|38Uv#y)JTLrl?yIE7HvO2eJ%zo#HkPjg5SXRd z)GMh@DS2`zPmEGP7UNshyT~ab0`m|;U3v<4_v!5^4vu2+l<1-r!TkB;iKT!NA5suf~WKXFAk?FllRMxI@|Z~*VFbSsz-|UdVyUlw^fgw z8xT)3srs1MZ1>2Q-jzD@Xlo39k|x{#P!PE)Ik~JqO+4017r9K`VwZh%1cp#hl_=z|-(8MXW$OiS1Kd#P^!C2Jn48&`d<3BR z_RUGJ=Qu{vAv~`Ri(a1_7sLGHCfq0~b2PhuIz*vay6jSPZQf)1VH-q7Q!{951 za7$NUC8$o9(C%>uIrB@RWn`HovfWVXk{5M?b{>=A$f=(d(ur05eu!|UPyy+8`Fd)^ zX#PY9<3U4T$2&1#F5bppmLRu-20Dec%oiwLGe>jdOZPmp^ds-rwA;S(b`b}>jrvku zn_sk$(Kz|)=Z#fJ36h+j1-{DPBu>y4)E_A^LjP`FuP_^a(-hoOg)WD!2;2tF8Oi(G z+(kq56f7xx&s*DWOcBQEruN9|hS}a|tJ=W-wky_{5_0!3Q2(tjm*W22Vno1#evojw zV;a7#TQ=!4O^dU}HuPP#P5V_}VDmM+nRlrzp1tnnZp5`kSAzigs4uNzy?NKZ!GYcc zW6-+ksYJAy-Y;D%F~w7spa}E%?9UMQddTO6N%@&!yRo12jveK!o|{O@CMVm$E6~p# zub8f(V6;O8@42tVl`h9Mdy(yHK>ccqLe}SV^FDRr$hee*hWM-xfYlLgGjohfR0)r~ zh2_4Aik>IrbK5)o!osH7?H+4_fMcCZAs;69Ey}*Oq&VjDVSgO``%@uWvT+IZwmJX% zVUo9&k-x_Ow;omVzxAjosZl9h|A_@e2~bF#Ha5XetR4WZgG2Hku!F!sDcBu@P1uMV zeeIU1q=X+4fDi#kxPDH^Tz$QFec9?aW_;FKwb}rb-=j6PP_yHsZ`Dgm%q$CwRwp;J zdy-epCU)sJ9<|H6cQ3o@yX|ioXF`;WL!d769^j<{JvP0jXcG3>!}~u|IO<-g%T3@j zAP1eimk|rHm4*K|(+RuIU55vofqp|hrYi|*M{3@wK34hZlT=3D)KCigw}|yS5h%AC zTkbzbh5DTskhme?8d>Q?V&6xQQ-VQg(zd&PWN52b8ud}@Nlp#kxj=+EQzvn_#s)>4 zhOKqOY!?GA<{y_KHQa%$AXdhtD0szG*2k*+T}fmq^>^&*b*ZMZz=4{qCZH_Ku%cV+ zM@-Uo^yL@3&M$W6vcxxt-3idDc3$z*H7)(ip&E{AfG^|xuR+)yW;N(%`!-zRna`XF%oymhyj zt&}f)M3bjhriC$|_?-p+?p(FB2Fn&kSx`pz}~*N z;W9~qL_h1>D(y&yMAw%z-=4FX?;Bn1#t%dRCkB<4?9XvS6WWB@M?y$mSDhAI+?ggQ z*H=H*ONW|$0ib)?nTn;spV=QEy3VjH3LnPA^AWN=l499(ufynkQbJ}&1<`jB(Z0R+ zM|P)bJ&KzDE?rMLoc~${&$D2dB)OMaO0dAp#E;% z|LCnLx74T=MbX3@Us~d)^T;qU0fVR_{orF*+7E2kx<;v5>jCSCNP>7jm~X+h;o*R^98yt*+p!KQvi3D`p|Y@bcdhgvJ4FQF2Pwv`%*Uu{+i-j zciR34=Yp$-Ihi$aUiQ5i#HD}!)Wary{r#QbcU}7!+w1691I%H7w>Y%ne}dZoCbpu> z@2se&niFgvzW;U(#Os+n*v2Q3V#wlHgT@gd_+IdXfg8ngeLBkVI^LTx@>Ll?Lgzj| zh_EPj@Xte&ZrVHjMMS6WTP?bN?2+jy9orktVW2#4y-~(_kapQNx4?8z$}V1aTg-A0 zA;5=aB`{m!^J)N%lLTqw)_i@8uj7ARkf$T$Y+B9gR_}1m4~E zkdeT_WN9&O6vRd6D73+Gs%rP!?J32LjO@A0v2%G2BU2oySYYxLc2$iX{Q9Pw*H2({c987y9l*s%14ce%Ej+9Y#6 zZX#PTWU5dSg82!1YWTx)(n;Ho^u>pOc59IK`xn_%jP!Q5YGHkweJ64zFzz)baOIo3 z?@JM~)rn2O0Fn^SV2Lu6Q!SAG713@#v|j6 zprw5BJdL9Gqf8$54lC=N4${TDg}5lD(uxY_!HvfSX^yW;$@~ewE0$Y$)?(>ynB8%! zPM9RUzC~v#IN)9~5h>8$95J}x8(hL)F8UbVB(SORxmo@bc>9-vFn96TE#{%-8(Q{T~M@QyPvX;C)4Q?kh8ryGk(k`;|XXL zd2m6y!VKIjcEWX1c8%Z5q}}^AO7tVwD-EW42t(}3P=D#%d*$$G4^hclxtphJ!gt6j zjRU-8twuuPu>)7XdqJ{Fh{!-JVo#b_;bdp~Do#Iv?vZ=leTaN#^Nd}jeEpTI5@KhN zG_W#1TTfE4?n`28UTj`9 zZg<2lxyK8(+1Oa36{5SuTvzoeWwea|cVGC^pO(Lsxm^m+uR9PB-MFED8SbKlIx-#3sag?u#i=uoiRHFW8UGSiydq=Q z6+^=a7saMlO1a2SILhprheE2#kL9aIK zto{+12d=`tX57=~{RjQGOgrlNkFrCG*yFGIBL+e z!plGwb3X~JbbnuQ3}#Xjp({zQ@k~JUkUohPMSJ@-YT%#R(*Lw8-}xOv4^#o=z@mRZ z^6b3*H=}Q4D1zjPeJ`eHa`W9|DZ~K43n5LX^QX43WII4Lr|37>hrer1=}a;2b|Y&qX-F(%y+CYRT2|r0)rUEQr6U&3&fh!qVbpC>EPOn- zS|$7Jeslk$UX_ODZy0E8p_U_ufmt$ZflMRllO-~b-F79k{R?uo0 z%!?^8Gu+0H20LOVf_yz-GVCg~SqVrAQm9(U>ZX(naTU{mf06oKvfCYOxBX+1&C4XF zes;D?cQH5+EuPESVQ||DaCE2<0Xm?he2MU?!f=}G^t>2#{eGZT09PlPwo2?$eT z-UxEvBh|6`G9lA)p3eoy^8=i`q$<1l(6_E;`}|?)@JzlLY!a7hKl;Er(~Vab`o}q@ zimYj{+9k4rjEkT@`jNazJOHN?Q)`{JX4DH&fDT*NYi@-zYLoaBSDda}Ncjp2p`Q#e zCJAe&*rB0IvCD4~s?OPINyvOO#*;Ko^PJsz;|Kz>Nzm`2IAL`WrO+Unw?5v8HOlpv zaOR;|egH^CafOQ6z1FIz6+*uQi6KfR+6C1#~dT#=sVRuC=5|u_c(h$Ob=( z-f~w2M&asx%IGan1H3^y*UeqA`&r%V`Eai+l-~HZ;zl@4to1nvWrZ+#n=0Mwt%iV| zGDG*;AY(W|i2CfH*Hjc$L|TjUazc2gAt`i!v8LlGQUrNd#SkZMa)kS7(xV+owdBB; zx-OkBh2>IFJAeQaC)??`>~Y9Z>}ju1Sp^o15uP4!*d3cWClL?y5I%kiadEaquwS0b z){qb{*@M*#H*WQCwPV4uHVsWtVyLxzqgmX|aaYl?H;1`9G&=+cW#>&Sw6~`=(=W_) zf50&{GP9f}=pX*AmmyvEJ!&`ij6I5*975CjolJX3;-41dk)>og_>4kJpSA&Zb}9{H zA>RLdw88!DHYHx?(dE4d0u!Q7In?dpkUGBAP!ZL!r0W#ul9e}Vbs3)$|<^pcXu zz54oOnk9?i1T<@?Txd?8rD@^yD$HkIJ9Kw?B+GksDK%K_dvUkup1-dEB*9Yix4JUmR7-O?rY?<1Cr)@blSd(@ zu+vk~VXDVM4*J!4z9+lgX5`Y!wYYzhunr7^Lq+U00cpOPa+hZMd`I)6pYbWQb%E zm&$EA5g`STDh_>DyqA>EQVS!4({gM)umE_#U;vR>RG#KT+l%Dwg1L$wN@Nmj#D^*} z0oYhYgjyE*95>ER6tyYN1I>EsUjT8L4TwgFnyYym8kk!T-e8t0iOSNtXqr9Ujy_DA znDi|=L3@Wphb{Lie zcXC*M0BLbIa4)}%&+38w<5<)9=E0|bYPGd1YZxCGc^&&KNT4tyN$-zP>VhD z3_F`{S|`?#$|{)+&_;=&2*wTGvfSL{32!x?9BiJ8A+xBIFcYmt_UTGq5b4k2?CQ}G zt&@=-RWn&2r$`%2j5LFmpq?ExO-&0imyZSkhP9{B0M4{XPbe7a5Ng_*sU2Mn$UDLt8WgcU!!! z(Y&|rl))W98vcY(hLC4Y=!8PINe)Z}9v^=brPE)qzZ?{Z z_GuFunLxl5YCBG!l5KWW0wTko->wdpA3~-jWy6D8k1~V~^y&74qs-29;uJrKhV_%* zo*Jx;+MX4xPx{0nxb0+Q=2#g?-A&$TxCzIH_|MkWjSVubAFBj-efNWQU9pPL+2jg) zP_^*vK%&M!W*Bj=bNn$NtNZ@%LY%9`ts8Geloo*?)CPNtN!n#tpvbx5;5y#VMCf|! zYFyGkqg3bkfLSdDty;?xz^P1@YZ^YDKIl~*Kuc+ZIS+T6QdsS0jdoUlmQ#@rCaYH!@;!3j~n4Fzg8`ZhdxAG@a*$)@B1U0ZMrICNx z;dW)}tOLw4B+fWii0M^6z^0h6e)^C<-Ef`*j8Z;iOr+ z4DI#Egd=6CpM@Q*DyJaNj?H~LX3yWIcSn5Q11KQuF$1k_$!T5x5KC@t%`Ot^&JpR4 zs&iAXv;qo6d}McjuW(rPP-;YYnXMdU7v30}_n^Ni0*fAM0%0>?ZrY^m)kO+M2D!1k zuiK43*w)?VwCy$V<{2niYdiEsbsVNn)MFPEzDSOVF+Y-KiNI;Qm?cKvEbx!M$@{w z;O^k>>08ObCk{x>o9!@DC8_jrz*psW)U~zcw}o3+!^|x$+D`Yy2TcpwlzPzE=GQ$U z_-qKSTa=gIfKR6+Bgf#}{x!N8Y|sP?6KoJGTUgHk(MVc{qeyqH^4Ed4w66~3>C&Ri00d(KvkuKCNJ1{RHczKKVnr;2N_X)( zV1;}`-%Dx~tX-DH=p!k$PEIYlN3zwV=3DEj43yLKt2lUn zO3WcaeFZIeZHyb7W|mxg2jBhTQtph;P%p^A63lw4k4VbUojDYHW%@WOX($>_dTuZH zD0#-hEhNH0+KbcbnnTJY+wLpH!PRCDL-i=_iUv{CiVDIAW@ADWs@P@IIKP+lrpg@} zLRs6^SMr{#2vD-{jK~r%`M;0iaK(I<$HJDO@=otzty7`{>J?L!j0@Hr-_C)(MiBKj zL#{u@Mo8qqbf+bVY|1%^%@gqjX8qndjv%w{<>3M780EpMj9NEwlR{qlg~@(6v|!yw9{&sk9w z7c3BLHRJ;#XUvEJ15*em#@9ipQ$-9v9Hh?)rB_XkqV~H6F$$`N!PYWrV z@D#34#JDI9*Uls^(aoVJ%2&ZR=+$}>0Trz|*3mcA zyPchLy^_8sr;tzciox9SI+~6HcscrRaUgGfEZYNjT)Cf0+r+$xyxN%B_+cSiPf3-e zL4hMzE_9*=2yu&})Zk)6s?K!GuRs44-*~lb6hJO(nJdz?#RHCe*gU1rCoI3|Z7N^U z`y*$`2gAi4%Pil(Zg8XNahBPU^Z0_c0)sYNn(35;t`Bsx4X1F&z3Tz4>xQ!=3JUcv zn;7%CS!JL47$6SSVUFErlruZkauq=Yhtpa1qu3e8-+g8>9rN9D<=VMKTp_nds#>M> z@H7}*=10cLDvOxelajjZl$H~A{oKEw_TCTgr05-fz;$~0(*=X>O9}gg{*wUTq4qcP zES$5=h3l>t2j60}4C6@uym0B+E)UYVf~mh!H8hl3o^z#y;$Y4~b=jOEo@ur83%}3k zxF<1cLhEj#O@`Qz(+U>&_JWeB)tG&Tf9MQMKni}QeX=L=Z&1Ud|6{_S{>Ov~oWw9< z{m-`jo?c<&`;Q!y1R)a~1=%L{tCC25On7^RgW|AqtuxLZc4F8?C-@1K+M>V8 z&tv-0eQr^MjE|qA(_)SJk3b~D$>R6ucXYzlUjkcbF%Dagzc#mHCq8@5UzK71=R;|o z4-&&nX*>A_mIH@~m{bBmn^ek`P(;$G+xD~FFaH{?f_m)i}cTJwokgKGhg$2&DAT;=aYL3Jbxj8(9nlm-Pl zF$^@C5hfLaw?p>bz&j&CW>u+0ou$P>Fjrn-AfU2p*9r3z+t_t+ey0IrF(zTs}n=6+5cD571H*iJw`YZX(+PR8h zW1dB1Aj?C|>`g3s{UCpm>50Ib zs7K_e`ra~s(Gsqi7-0#76C01FO*_XdNV!dyju==ylRU+G8JARM((}6t4(z0sN1W5H zyK={jE8>nwZ;tue@47vO0CbPU9#~{6(p+#X2Ztab*sf`FLJM-H37aAmF%Bmi4jtpoclAS8s|^`wVJ#WxqsLVyK4$s zJ(Llt2#-jX=y4)d;Cz6sU!wf_1W6&7Z`u0re|noA7XK^Y?X%gSn_i$eEqUMVWmDC1#C|j zP+e0dP3+Hl>%A)&a!i?Qbrm{B{8oG+Ke|ggLtN6Ulr;*9*p*Ah$!q@hoy_O4&~f!? z+}CwYcYnE67sr0|!`_F!&s1^eHOd0$M``x;b-!o*JPx{vI6AYa5z0v2=qY%*$6oJe zZ?DNl>|?ac8!~d~Jji%%PHv$~tIGAnuZ5wy;I1yu>$<%t$+(tFIK=QwkCGlL~)C9zKrki=_;wY zcW`$ReNlB;yFw_d++_)_H>5X8)I*IWCEZcTm&GLee)6}^%*@aPB6a&AQ`r`u_2-)( zK8{ti?G%V*?vuj~WajPgG2lBF(_}-nCcuO|P~3KOIb){~Ls;dJLADiza-(gB{-Q5{ z3g0h$%fe$kK`GZK*_NOqfu=><$v`nTY@u$8e+TYE4LbZp6QXTKZiixDzlUncz>9z$ zugHi;2!CC~HFN?rlZ+c)qB6ft-lT84&EuA@*Y<>{nd8EyB@7pR) zhw(qms5I+xIH-d?MYUtVn@j^&r6Z-C?VO|aN>xiFuiG6jRAsi!=RYES{%)vre(&ti z0FhU{u;X>yIsL~@f5pS*hbHu%4*OeOR`Ga*uuAm|1sGlN46_qC7UlTji>ss>f)_)< zyY!WAeF=aq4z<|h`HGs#m#4cboGqCo7@v=9Jt#h#C07793=iQ@w6;**1NENT^}X@+ zA5l9#KgR1upxB-7rR#3ZFd~+_94bze-JpC6|G``AJVf#=|s@` z<}Vb;_ZO+ouY)06O2SS$MQs~l0&n({%*PEl%hN!|zb*Qa_>9QnBXn^Ep1^3e{I0H- zwh2o&ej56QWT{v}xx`m6z$u^G(v3PX1as2kkgLr9iuk_&m}P1ah?uj{m^=J~;vH-m z1LkH`kkJs{ragT)+a7Fr3x32o{E?)PJV7~mDWwV20U_yQBQbuTngZ0_WaQ2w^};Zs2pM~ zkl2hkL16uR36uP^I~_=e*m;KtNeBJ7NjJeBRJ}$6ag(XCm^O`X0is%z;eS*6HWBC~ zi$U-bi;BhL+Beoc!`9byI(m(aUYICP^0*kxm&Rf)bhdCBxT^|Carns^L#@rqJ_!{p zrToBevB;eMu!m{N=J^Ex0Vs=$=8!mGEUB6cRV&m5ezf#uv!Rgukx6C=AJ!V*jHs7j z%I=4^aDc;Kyz}ER^R{>#FNM` zgK$;9H<;?zz4bXijkoVYI0A>G@RbKBtdUkEamq^Ym{MBRXjw;pv0@!VgT2F~a`GqD zEUK{0!-yPVU;xFSdigR&QasOau&PRqEDy&(_drsF&;d9@zZp6fp7TJ$BhaW)r#UM? zmPn4w|B>|LR2aicp=2J19&iIMzG@#f7e??>_aq~el#(UXS?$fP=8kKjV)m0{r2Au_Rv-k^~O2d1`^ddw^bBqTGwa zAl~6`&&ZJzm&EHZ3BSEZZ%NZ!S&}q}Kg#Y$Ulj_Q()VOZHkM7$*_gm#5{* z!(++3!8crj+~bSFnm<0{$)!sZv&}lma)$lwFKTh6atb{aLg}(eu2jQ=rEzw&!h2tK z)y(lM8m)B-9TXX*1u>JSHbp9bS^Zh7y^npI>xEf0Xs-;{^P_M`eimM_lk@rbZ z>PQ!sbW;-4Z|nQp#J748uadzGHllgD7Z0GHH@7#sqQ_d)$)s3WR*Yp&k7EUrCe2#b zFDRI3Yl~kFEr!{zafm+x=Ue9qfF1~O(CTi{);rRx#A$XveD?9#L{ZGn@-9DbKtrYX zqw>(!6RfL?u&HJ1^EUf)^`A|$9N7P(f?{(2Q9%ssGVQYexX%a`uvTMf;a;D<)b0S5 z6}!PFD(#j{524`cO2_JbV^1U)I7-~@{bOU{>8ZQ{Iocr&z-lvJ{j%d3HY6-Ego$W_J2q=+pS1|Mb>_Ic%_ag<2gpGCHZh21B_tvN??3ltsFz%6mC9<~`QC)o&| z&O~t)-DRILVJ!4wRMl`IYn7ZUv z{w;3zf6|tqb#tVFH;*Z9{%}lFqR&87v0MR0{pS1$8TNbNZ#iS4IKn*6g@v`Xc|%#n zk7XY}Nq&XHXUaMQ!~r{<@_1}%zM8Aw@IFmkpvVPD|h*+iHB*m6%pCTv5Ml$8x zit8``P$YiO7>i#rml|`0SIvG+Xk&IBkONm<9A{~cA{-HK2#N-Ug<4-1#?c5aLRI2L zt=ZdKr=-0i`=tB+0xVQbAwtrc7vlXsgX)uX($K>dd>cNuYi&{D%4n`J;0pj0eh|Nh z55{6(q!#`a{BeoxToTW1eh_irYWBOU0xg~jHIoT!n==F=iak-TUH8LA7CXwpe}i{; z;4{#P>Px!)NE6*d>SqmqWv#H%evai#a{=>6cLY*|n5^~<22I!8?KP5$O;Ss<>rytV z(?~)S9&TDm1!@GH+)lP|fPQIB5?6VN0|FqB`uC*e5l+y$_68$;7I`ry(6Sq#1?t$P zjF8zZ=#{5zpE1p1TYu<1yDjPe=bvw?T}bj4L5wx9Sol|^s7cNqR?)Z*aq*T*8wpVG z-N5)LsV}zS!YB?-mA!Kh*0VHSNjO)+xJSi9v0yhoM`@ivwH7}Dp+C{e-v9dkxF)+E7#^|}LV>_;|-+qF}N z#u*&@ytZj@#j^5N^I>XK4Qtdkb)vtr!fm zS3u|RW{AUZ&g<>Nj>*|}faObW%NYVK)@jYD1+`l`oq1n!n3w1iFmJqueiM`lu{UxRLSj z(cI(L_Je%IwI3g}9T&Ie;Bs3H1i9lsiuS0n{vka{!WvkXe;;uRrg~qk>lkGXGti%h zP=1q@MjPXDw}Bvp%V?}%7#U?FfqCvCW{}>t$+O1Et=E&<#mdR;s<8;@Bxw#8aIEiW zBHM$W>eYvCKKBE+n%(avyh6C#F`s>^jt=o86OBCAod9v}|gIJ&$wqCHP zW$v}@IIdk*PR7BoXl#jyyV-Y3DETp7fb8pf93oE(&`xRR>cFm~dJxB&X1mdR zTq#ai`($p3?ls^hoHQ^4W;Q^t-)mEo%X$R4Z`30w;b5zIgl9k_PSlXVczoMjviI=7 z-e}x$&b53{@Fypu8$O{&je)W%~VcV6XPrPWQp z`W*2%^xMw$v`YQ&ROP3glw5?g50Y(Mo-%f8K`fafw#X*R)uGRe_2v14IX(_JVFl-w z`R*2lP1)hhq?9 zOg@C@6KR0TV3C9M1Sn_JsLr$Ap(Qcjtc|Z0DIrd(S(Ain3crgGEN4at0?6t}$}@XD#<~oX|chpwXh!Kd_Imknjl9p6sdM zf|zj4z_SxvN(9Nco#O#77j-ztztBgHP2Jfnw?f)rv2#Kb!egu`qp2*=MS}BeMBQ$_ zui&NRG7w{0ADBH4GwjKB+%&FBd;^2Vc*NCo zI8JZ0AwbNP&o8=S%DV1`y~XBQHSJ(+%`_+F^9}=gBBlxb7`!wCGGyZRYaj;5vfx>S zg(K6M3pQ4XbOFEeyvpx{zTs3qG5GoO_AL`%SYf=vbK%7we>_hpfU?i|GdK4$YbR^Y zwYqB{Ma6}WSBlZMGL&DqXy`5Y6Ny;VPV;5HsiIIY)>&js=*@U*ghLdDNMs2TL9DoC zvZVQkPXRqnT6lOrbS{dqi^B&nD8<#T(23oaA{8}1YGYiuGh-Dd)e*SCMb4+3b%PEQOHHUq(tAWX$3peVg)|?5* zciOx+G~ViD9jcf1lzpBHk+m65GMvcbCg?n5fi$oxS2)_fM_Jt`4R@8bUXYnl_u1R2 zW+tIKDAx(pRmIgcrnszDXt(+8vGAmJ$CU9Qi={_80{cU72e#>{5Y3^i(b z|6^i~?Be?h8=bDeYZ8+MOz(>4aQhgW`1DfvN3p==JVN)xT4U517AqjgtRghaqoMQ9 z2i{4Cps6EMpZ>bJ^mjqjwq6rF^0?s`x4r&@&)>AN)X%@%g7YDH;-63XNj}-+9bk^; z=$8SCL()FmeuS*m*1vgSP}J!hO5lJXZ0FFVV9>T|vic^~s8IfDgszHh@OzWTpyRA{ zz}j`gDP&q~65bh6`2L`@-qr&q4wIr~?iqX=zPDUc*=iPRALSO;=-+n4oV&jNaa`0% zNCLqh{LNC0Z)pIJ&9u5=#=HA2NO`XEw1PKNLyJ6@_~lTptp6rB;~q-llLtpK$Hzfm zN@yG7^5eeygmx4E1tCuJ(e8HZDjT&9zA2k+U}8`SR9Lwq7nz50u|oUVL%5^Gz2$oY zDYNsx?`eq-;JHgz4au|rIcfiY^=hvY|4G0C5=`36{!_2U??iWgfx=#;J+IvgZ?o@l4SzTpFT3pf*q*886vTMH^AjHST6No3iJim&HhK%%< zr-*17YpE|JoDr$u5MBU;(aNhY3qY6w)PX*rIO>!wGXUuRnKsf!Y_gnoF*u{Jux_yZ;p9z9mLV-1nKmkTa0aG8XYv6= zZ3$3Unr8!|cd>bB*?~c}OZT$m6$9*eU3G+0?EtmqY)k&uBNJ}*N2 zGo4&alsb`YC{`V-Y!m}y$O8Wn)}RJDjGI|VvwHQ*>u+N{b*{)i2f-uPu$h8XtAcv6 zc6lYp`*89fuTV%CrUpIb^U74-6gr>U`yB%=-F=b#32sV$V(+CR9n#IMO-aEz(xc-X z!Kr-ZFPRU^`yd~*E-FGZJQcY#yWmw5-z`~wE(xHs_wBQ3j3o%ysWty zzU3C*;sz*m#A{C(l<^SO?uu1BMN+4hhx9lzUB7HrC&WT=R+e)6zbpV%epr)Vv{}JY ztdE?RcDCD!7pxb69%6(gc}xi_krya~xJN^|H=?X^h}@yw@mI$!0g2KT4QtaZ)UKw+ zWPzY;V$>K5YB&1NtPPXqeR{-`OH*2**36vRYULE7dkkZU2b(n4TdWzhKqml%D7|oQ zE!_M=s>NvR%^t;3_2!<;^61J22ArHxW+XbTj)N92C7+=9r`o(MVd#Jys0tS@IC_hA zN!We!FbU$(ih=EUGHFbuzXv}mcHOW?#McMAQ!Nt_EGm++O5^~AgEnBa+U=9_%~rgv zGV#4Oj+{2Xd?2BeFcy;s_MT-z%ulb3AL?QJwtb(idV9TYd!zG8Hzk7QAsuBOzu^7Q z29S}M^@nDuyDV`I>z-7Mg2(W3z31`gDqd-hVcfLjrKMi2IoBu90b7Z2tOw1ymO@vw zl-78yWAbII`$ms0Sg z?JqhHoi_&}6U>~@Gy6U)qB?Bq$kMc$u=M#HUpG86Q@_asE59Moj>ye|E_ln*Q6)pC ztvqty7JVLbdW@V@gI_o0vrgPDt*x~GNbK_uSN%$m*Q$ZMar0mKU1@V_fYz~Eb^9>K zsbOZV%tf40{CfO0=jRsOzoK0Z88V)C@!{;xZ!Xt-+JG_orQc*X{B1(^Dw51scX_)W z-8^IHf#Tq}@|9H!t|9q}g#|NX=VaEim&jvd|EZeeK)ot~8tNUyn3+b)q} zOhPzN*VZ~J2pjWvCJj#me67P#;%Dio#{a5~ zL5K&ncrE`hJ-B+(=Dm`>%w5}HHSSLTa_;W4D0YPCNpX!^u*d&el>ZgZCq^K~!kWrI zuLiGo=hEq1VWUdm3QA)97S{IHG?dH(dU3n3t+GsU8bGLu13-eIi5!mytMAgMnMXPD6Bizez{sg3^bBzHiusME7LJ*w+ zS`h7?J{2(%%>PH$TLs0{g;Ap2xVr{-2oAwLxNC5NyL<2k65QP(xCiUTEx5b8yF+jo z{<$+%Gjku$t~#~fR;~SgOVm^+(*j#k5~x6y+o?(H_`rr_olu&SD61hAy(;%+0J_G}&&PH@Cw#W_L$-nk&n-mfH5rLU@I0ermkOXNaQU)OoU&8n z?f8zUb9aEp1q)A>0tKXrI9){7D3n*yfT}rDW0W_71!|~bN#K2NLY0*jjfDal22E6L zwx~djXSi;U6U~!b+TasTMhXsbLqj2fs8nkt1pi?wS#UmqQh z@^~4_)6J&}`HVm?*MiKWdZKzIInRX4btF@Ap>vu1#6I-J4-x8`r>PBc%dybYpe?S! zr_FhRXuWyf7U>!6R%0i&d`8&N92?fRElUR$LKC$1NAy3q8QqDAS%4@+dxEIC6t~bSM*yIax(78S%Px=b4&B_ zRix55cF1JO8Uk!xfzMiv=FG&dc@tTuW)QG~#D?Z915K=^q=2aV;;sepDG+FK?Y`jRZsv z4&tXZmU8{{zA-MenOJxGA6RK(S@W?; zChmE#sqRdAOT|?rh}w?5Q4$Bd~Spa-P)2HR>hVF9=7*gy!BM z?{_cVX_Ttm;&+-*58QMfzcwzsvVIC|mPWoYBxzAT-kld%&LhiKBeOXR!zDc+KD_fO zpS&VJQNqi0?OW%r@N?VIGbfj7oR4a7-1mu_{d#k;&cUGUK7Y0EMYfUM@MdGFI$SEg zc8@YAi*|##TztLLR}nocE!yheP<_Vi^-qm8e+6;w|8zVRDzEm+{ zmcrOM=C({&N z3iEAHPDU0QDSYOT*K_gvGDtJLTGt~L_D_*gPHm{iZ0s(K6@M7vYw+1+f$%Rexm{-8 zA*g0E>;bBv$4tBti$qm=bv^QidRTS{jk07+%b+L#3dkUCccke?SZhL%l(#tPK>u4G zr>m=*3Ir7${Y_IHJXWj>r!T(lyP31FL5K0Fh9${jufWBNykGFHs;s&Q&%5Pg|AYf* zx6wnAa*>kCD7rFCPSS_Uij~%IMPA@L_{9m?(Yx&XRc;H5M=e@V>miNGm3#snM>u&k zvJub~quVko?DjVt?X>CY$#8LeSPtM`oo^Q?{UK30R3llhK)=mxIp#ae$hp{de)M^U z{78CM>7();cYT!Lp{#1ERbZ#0?oc4&nsDfRtKhc1)&|{b0D!XJNiY}Ghh#y?w&;Jx zCyN{CSH^t8*l+PmqII`fT-O>6(q5_7D(&*m} z47@yUo#~E0G*TOHNmk;^^wM!%EK}Xo7&>IM{7yf63uL2E4J%WBCioqS&purt&>tCT zz==!{k{%Zlg>{v@p`k~(eQkv{MvG5($WjYEWJ`87;$4Ty@Ok{Uu~tF~{~UgN;4e87 zm;Dd}yFUs3q@I!MjYGefYThB#>a13@sPbNO_b(2FmR@EkfHdW^J2nT<5TEP_QmS#9m6rp?iIqc^hx(}8fL#)E@V_!W8Hp8>C0KKh!|k2P=CrwQ4z+>f-X7GOO~1V5o~SO}JPiZK4B6gE zMyBD%xLU1oV=Z5_CumRV)XIY(Bb*G<$WPg#K^qi;LbCG%U}M`8x+_k5+?us8^`qMx z*7XBI#`9Jsf6BtV&(~7#UQ$p3CB1X2x+%P&`tSY)~;D8xnpUbu&0_QF7O$==dA7Re=61}(`eJum9H zuR;Q|#dKF@m2W%ESS)MzV<|i;P{m-W6*2aP|0%LANsD;i>D|AzJC;Kn!Y9}T4=j1f z=u&oiQHmESia;ZR%HG8Xw$@42L+MUCj^{2=@)l@KmoGDezy5yuxe9lZ<8g<>MrK6m z_6S${JFER1?xPxNCpG3F_unOFw1cIVu4YDbroO|@<}JFIalz8?8BAie+RsC&!cd3k z8%u5S)xGMeS%JaSwDIrsT@BcX*<(WpI_rg|J7`YJY?B-I?Wh$~)=xMkzvc~aKbJ^J zMdb8}`Dt%Ag>>ZklEv>pHNPhDy&n28oNV)Wb^iBS|KBh9z)tD^_C@zHL#_S)O+?X? zU_Kjm5S|2CeL`vHkJ?e9Y`}tbLYnOb!3On~5|NvLV|yc*C*=_^{$jv?jG(%r!?*Bh z3!US_m@OYnarO`P^mKF(LHes~slSU5^7jsW&kSi>4;){4Kzhzj160PLfT8Kd^^ce} zV38cwYRZy?WMAX1jV&0`|9R*)3hUMN@!eHPZlkKCB)9#x1W39oScnT^GgskplzQGx zPP46rnME)G)!4_!Y^vlGQh$=wT|U8n`7j8q7!SggQE=mahqXh;N@ix_hH}bd?$AH( z!UV2xe8=Tln{F6c&hQ{N6$KE6C1GEN5_e%tz2d~h=a`!!11x@Mqg^zQ63#(E;Tqr; z_~$=k09Yrb&=8VY0a8Oq&Hl>OYjxgA+y z%UXIHx3@{8SD=Kln#ARUh8YY6a3mHo~rQ zRa22E|3*KO`4%b7_1VU0kmdRM53x2w`zwmhFqb-o|w=2CEvAr zApoFcwY-#z+@U_%)b@%qZBjSM9|I_{Mn(W3#!>p{4@O$PwQ2c|8f780f7stx*HOZt zYC<~lB@gLDv(18|y0GKX4&n`b@EcgR$o*mx=?L2|R;{b_@q2(2XfMVPs!F}q{(fa9 z^DJ6zMsj_6MSfN^w=7S_&f!9>izhJ>gxt|XC%7B1LEubL_RQxqAm=)vp z`bNoQFboSw9#tmQl_>8qv|-EhZDm}j9pYdcoUF)HUzORoI1E1IG1V{?3beJ(QKTnw z^s%X6K<`r_Jd{Tom%2{cE{@Pujo6^YpJNUPr%$=;Ho%c)K`WrowO#wx>O zg**OM3*I;pyJu_i-)Zkh6Wn|ay*vGWQp}=g4XrY4r6i{`W~3z=D)jJ`CL+IC%BWyz zX{1=2l-z^^t*kXi+ZLaN(Oq>5$_*?vIpUB#sh`K7${6aVwpo1Zw9Ky6QBXNAcx8)q zD^-~c)64fulG|eS%ZW|@7D5$bUlW(;9^SrLeycE%&qMl!c8YLmRb?UfCtJ`+^a&HZ z4g1u$L5k%$&dvO^M5juW`uPU%y{@b*3&?ixa|YJp5iXU!5 zQ)y*HQ0?JoIO~k8K0zc=P5?Nm0O{{g8JC7qsYF(^{P=T#BctszV}P5h37MOr{iNH& zKel`JiGy*L+Vyo(y~Zc^)`!2G@xECQtDcnXN_SpyotcnF|3u5(i(B$#-YbYB>vXv&|aU??8GRV6|Zi{lCivYfi&L?4HD}%~r+;NXgV$S7KR3 zXtVPfV%c}{tka5je!ge4f%e-tjn&jm5>r%~gJ>{3M@`5yXh$S2GZ4HS#!r8Yu`Eic zRfW46Vn}z_*SP@h*cu9g(&JJ2wV^d9%&8Z2uk+J2k!y}?hhG=vIFd)Y$|%fh>)1QD zNu69&ytemfyHAO@@Y+{x4>#^&d0sOt?A!pUgBbS#;;blYhigLeHMv zf&09?fEmy#S10YJJ_+O`ko}ODA2R-ZJp+VOrDUZ*!%6hk<$E`Yd&G;Xi#VDI+HxKv zTs$f^ccra%-WnpF{q|#U zCL?AIrnd~j_b5Xc@S+mjxKDS%SS+@+}$M1SV?6bRCP zHeaMtznYRWY(jV}m4b3%-+x?cp~?|-Dcxs4N+BIGDslLXeL18IV{hQs`g*x9U1c-# zsmo(`gsZoe(P#}wHElXK0gq6Lhx{p<+pi%x83$_iR`)|+}v0WC$s53{&?09YxYDOdOt1tt3?$Y~wAvhsWDSVW%i(cw{D ze~Kp7pCX_GzIbecY&NB)TH1(ElKj>P>oZ5yixG)o`!%Hpebjyc!=@i3(^ldaIjFfP z^Kk07h8tAX(Gdu`bFDm_Uy(9CCkwp;B^pz-MM;#Amwc8if`tT-7Kzo2>fAzvBqzrJ zD?9d=p`-%_Ar|OA61Yui>E&)yZs(qHiP3c8`XrB559}&s6!AcO{pCNMI?fJs8Y(z) z>@;}1Xk0oSl7x86*V`|mJc#?jo|mrl8C*d7F6>@~%g5FdUHC5_m1EsJ0Wy3iufr1Y ziUU3>yrY-}ygLkj^ljwATm~1RUO3u5>7Hb-f{6~IQ_o#pI_@U^P~H5XcE%~`p&<*| zht6)=o3K5toKZHZ=9WKN!ppT(L1qZqXCZq{EB)DXI8{6>bZ9<_mZ>{>p#PJU-tlkF zcqy|;L)hDbez7$=>Q3}q0-8Wc-rK$_pL0Vn_Sr1iW5IK6Rt4#L4Uo<{iwea|jZeA3k9v8# zmohCywCo{P!MTsZ%*<174})^gtKT+U+#jT0%75$u8;J%3-#~L?D&A%X7J2}uOjMrv zSqTje*S9R4nRwe?k2%q^ug|7(Dkw4KR}7yMmIkukH19rZy^>z0c?J1bI@)+%kB*IO zw|3Q-??#FHv;RBagjsl+cxHc(MA`WK@Mqo5pb-sr(+9WxIM!`(-vB%rfvVS+5 zyUV)m+RXdb>`_t`SA^^BMJX}8r!NI#PRnZGLB>MHNm{0NdDe$x2QV>HUsVj5o=Pi% zw%GZ8Ab=8su4OX12UE7%gssa1D|V~r;0!U}-vfk%+8y^Ij&G%E@)AXqKoOB!TNM5Y zth;*+&BE;wE}d#%jNaXv48OZYUEEG&DYpB+mq!2oe;_$?a?ktWm8yvlGQ&;2BOq!Y zCy>Q3hwt~z&sVVi-9_I1nL`mTC318x>LUQb3Gq;!@G25Vtk;V$f#7*l*<+S(l7rZ2 zV0a?{{c{A`F;d6ehTPw#?@@PRBZG`3%yKSsa4PXK)Ze)u=vP42xjkgH_*6ax9FcSe zU!P%bLx9+Di^a_T217p)*cA3)@~848S{Qi<9~%2Y;tef-nt=xC2@!ag(Vfm4&z;J_ zsOFcCaYbm{I_Tjr1uE_INCF(IJ)}m5pPvL|_!p5LklH14P_(<$Q1-}^Xs8Gvf)Tr^ zmoPIFzaIOzofT4&6+%Xmb20fS0KfMx z=FL#4BDF(ZMwkjB@W%JeTk)a2&_{Rx2QSY~NE+&euTV?a{k|z1#nnfW7Yv&GrtI1$ z&cPAMst29mldz*t@+m!2^cd{%_J&z`2Li=;EhJ#`ORXhuD6B-&LD`<&N8CRDh@WqA z$so%aLX%Sf@6j|n=(T8qHQ)&JCIXFR7Z1z0YZgj_ETyeOsBM-^bJi_JTTQ{e6d%Wz zI7gc+%KTaEvp)QDfMebatjwf{F~mA@VD3;4$>kcuw}`+4m}cCsB3MpL*bG-5%O!Cn zlGEz&ikClOiIpt`l}6!zjU2(M;bnLp3rNQYnG^S zzh*=(d*s`+7(zyee6SA(9OW9}vV?07R;1-LA zR4U^P@QaVLjW__UUmWVDn$9h*yr^#nZ|pr3*IO zQdA8Q6{Zk!+@hwp%NclK@oA2A5x?W<47ww@QTPG7MXo*;4)AW>NY7I^>rf@y;qnQ! zm%|=r6Zaw678o4Mcbq#9$*};gd@@CZxZ_TBjp9SS!mtf=Tk@kAgmLuy-)s`O8IHz$ zhxA^D*i>c~7FjP9tiJTm?qo$GMwwQH8dDZyJGWUo%nI4KqVVUhkQ&-=dQaZ;>Zar>odVf>)5d z?%nQrK>^3T&M9gwmjQleT(9eje|MDV`=>V>V+W^Dl0&S@L-HaBlLf}FrIYoZ&zV7O ziXTF~3wy26_Sp7_tl2-KBB!@bFT1zx`KdfY&jQ5Bm8|(9roKsLnR~FUOc|;cH#i1l-YWMeQTpDS{~Gva6lz)csY-VIw&^O01Z@WeDeN4B_FHC9g!F?9~guC>+=nGmJFTz!UIhTlj-Zg|Bl(N;fzk7R=_c2kY;ipK8 zLv7VyzH>Lh>mRWZy8+*{FQER@xc`5;&r6p77V(~#G57KRNj}LG5Wlo}NE27~ou{bM z@W-PHlmlSl*%DxT>u3bnVTpc)X#r$_A{zm*Zlanyg9U>j#g zF4ffNw&Nd}j3UP$8ljcKSXbR~+xiLoprF~-B`L_)WeD@2+sF9NP-FrV5$(WtQ>58A zX`gtG;q3Z%Y4EOlW=4dT`%ie)3<@=7JsheVnI1Hy71N-?kS|el&&k_Lh9nqh$a?^) zih&qnd({&V=1g@g2AOlZMas z)WF%gh_LJi1`R5L=V{xOW@Bso*%`2+(!MqvyM%(f)4uGG97!8Ts3Ej!rrd^q^_V7GC#bAq+Rk?#2&BpCLnH< zQDTbCI$bdFc0b8%1o@Y-q80ycajFNWD9*Nt0^om=GZ^$Jwnd!xIu3lVWkv1EbP>@u z#3zAZWUEm(+>(?{HHxh(=Wr9EJ;EIt*V)f5vU~JysYs!?eI-n^I6}-GH56W{^J-13 z-z`LNplLZdISYm4^;|uq?8l?vbtz#^b^p{b`*XD$5&@lxYyL+Gm#odBUZ($PbqwD- zH0M|!F!>rWGSMKqto^qm5*RVPs?YElODfx8^Bd1oRYm;*ssAnz%Rdl0^`%SZecS$45K-=z5KUG3|hNWv^32YdeTHx(?*YcXP|SLj>BR)*Wb zKw*&DQ@yWUxOo!m>&eRd{`2s{xx4+$i@~=Ag_U^xGppeBqKb-{d{Bognt>d(D|kiB zeX*A8F>|kNX6X5?Z5pipB%+WBb*QcIJ>b^gS=HWGnt&%V*6+1E8Sl(&to=b=DKume z5$mYBVbQ2{?P}7Z#84|s7+Y+MyQESZsjL~<`l{Yf zX2H8rSI>i{D}ccm^wGvBelbL#K%4w^zWKW?QDkhVr0K>vV}Lha$J8dSb?G=n?f4&mp6ZRW5W zAQM_s65R6zf#R}V*OOU}SrIQSsR2z0cM>(N%f*Te5H^64!|#0a8N0ifF!vbxX4+SD z4)RSo4Gy*#=#b(?{ieKOh)U^$8 zGScNYi!3cvc38)jKd&i}z*hzDp;_}43-P`?H3wS6@e>D>QGx2QtFxTEWiT~?222M4 zS!8u)gw;oYt_pio?9Fz+QgRac=N3nZKxUXOaQ+4@*MhaPWyysT%HwXK8gBHs@ua$Q zX?dkJ9+v&_@H@xr+YvI_jw29sNU)=C>HxAj15#cR9UoWOz3`ebw~+O7DKP_A@{>0E zjw+reuF47yuAAx095fc7X(J-nCFfm~rZ%+vZv{((mYL%?`>@on^$pDr_O?DQ61r;z zmijoUNQU{J)w25E2_$tl3OBC8+oOu7s-xvckM*QJ_lF~^Hphp_jfiflv!Uq zi%=8?S-^X72{HLmmeb)3*pOGYKBXv1_-EaUY~6x!XX}Bm-gp-2K{&pp|LDA3wX<^b zRf^s%^@*dc6E>YBXFwiK(jUt~#6w~OL6PD5%;WO#%JT-QSfQD<`|ti|OPBQF#LsC9 zSe1qviYm-g>bskVqNJyvjjN9KeAUH+xg0i3wcZA}M(bVtp{pYia`+4cNLGEv{18W< zmuaTfmaT+6B+O?n!eyb}t)Z1#I;cyP5}crpq{qp=Zh%ut?Y$GFr{_C`A(aoo1yxQ; zEV8PbEcZ4Gk}$`~FqH3v+%b4P=wtm`1KIBjjqhXkt(t4WU*b#RJm6N&L%6@+qDuQA z-08~kzfAFFJStoZZ;Ssgh>pO$ZzVxa$c3xTU-k1%oqUPc|M-s_W!T=ftkKG`mFt@W zm_dHRD69rf=HYPG$T5|ss90Bd4q21eIDJOhW~60VyhgA=?$AP6xpU-?^IU)*w&f^> z(t_{vaG&9(?o+VGti9eDv4VPROz?p6%lW~my3Va@=0_z>68-HKX5<012{yMCuz2t411_LBi#%KLgB5GHpMSA~e6&hX&Vm zd$m`@U0u1FT=h)Mg>=tWJDn=5=M;or8~vs2|4{#*Ho^!R`E*JitR4Pm7w!HWxSSer zlCJ?))KoR(IB)f7$)>mfi2<7l^tT-~ssBU`Ig!202r;L8=S5Tx8B_(dVgkAWnA$8Y z8i6tZ38?E^YaBT^vb_8JdNjd^pIou;^EN!f#@4^~W^r{f^5aAEfU03SCN95Ma^!om zwjPTEk3?m49ElUObdH$h5iz!6w(#dYOe)8s|C~}DBk=*%aPfEbRKLfg#wFR9`a~z! z`1v@M5}C$RhpaG;!iQR5j33XUJjek9DS?}TLG1I1!gDl<#6W_uc#zXb`9WowP2{G! z{0NH9(y8j=7D2F`eN2}sVKFgTh&9|$2_7jQS%>PJ0G#>Y7p;l3(yxGVm?-~kMjla} z=@FsDk~{!ZA^ya84R)$RLt#G6zrBgK)-fhPB{1n8-t}znO(Bz$-KMHt0%azJEZ`N( zw%oqn!2O%VR^OdC@rPg`L}P%kEMhTIT-Yih%IHlEClBQZ5TguGx4rNM<osdHEDoMYwkZ&i6I=VjywUQ2J0@i$U(A1AB9E27S)!HLr$OP+@3`Jm9nyu>1vJAJ zhbK1Lr{d5S73mUnSFB1_&N6})QUu`$uNWBEO0#~4BFOzZ0_0x86XE#F0!1tj1=NtX zPsT6B+|+^EmblL|a?E}vm$EZ_ZvV5AeJ|KQ|Kg%o4_>xV_zc#WDOs$g%e9!XB-sYZ z0(q!T%?OB(p(T#$T*u$(5gl94B2$@M(4dmYNU#E!Tf)St|Nb~SSVm>7gbbd0P=cc(;gJwZJp9e!8@L)M2MyEY|@KKeE-J8VyKI^vsI2e~Xv zEwYB2dcwsz(YIti!E~M3RB(UH)Ym^8lG3Dp5zXbta#`ef4^y%8LE5>$Z>*UkDo zh)Jye@6=iJ+wu6QBmK|whD`TM5My_YZ!(#CI)#fXK*6D4Ac!#IH0KELXuS;>?A{Opx4t&;y?d)h(-l3L}D zeFok-Z-h3*+K98v)8*W?px{XQCFfrf*NILkg`;BA z`&fKT$+^Y|uMsy7(suXNRil{30#7~oP0#BieA2v>D@knFsSn*a7yM9cW_>G&a_1{1u5^~u5 zIO3X8!lA;RHO$Nr5WDn}IiwG#Wk`%{DlIcMHp7euHTxHS|x4_2b4u$dVDOiCIz zUQldjS}NXqM42(NU&Am5Uz^mVE@<gFD`8BP)- z?#XrQaB*SLXvz~32fEME5)pyMrAmLfi$R6I{6IEq*?6FYyu2D5(G|xP4~P1os{*_2 zDCa@`DUaYkj<%?LrdqRzWA3=~YKA@aq2aws4f$RrDyRT9Oi%T(%eZ{cvTDGmCvw_? zY;GE8TGVMYRXX|%-Kj_5F*fw-%(EHjr+<|BlzpjRxc&ttAU;Sm%E#ZG^HpZ{lQ}$a zqzF4TtA18V;0?hCi60g?9}Z&v3kTD}5TAZ43)j$v1|(PlTy1~H)lL$1bA!nf?s5e) z`$+&WM2vs%!z$1}8NTd6_0TbO{G1+}7YQI*6YXWjtmZhS8*fjuA8iN7$O1w@A=KBS zhh9Hzlk8M&2uxeObF2Idrb+!Q>dzK+EU^cQmQsWyw6RG_z4=r!-_)6lX3z+VP75Ms zDRt6}R)|6uIkL@GKD|P$$psY!Ef=K&T;apOHk@xgejwP^B}e}4t!Dl2bS6*Ftk`m5t-=6=7n>AiLfL~^jgM^wrp|_>f!OO6U;#U?E5nH zyab$zRxd~We#`9*ODGQOy3ByjACb$d&&#vEFo?B*b`idu0*$muJ1!)zJS_rEl0Cn& z6wyJeYvX|D4W)?DaPKY z5(?npXEhQq36_iLM~i~|(`-G*?`*iUT{!y{znf-+5}6_9-Is;|-?%@A>Hi_vLZKyb z1~Ke0B>3p{X=;@~KQ749W%aIhrKz+MM{s?uoBJ{{ItE^kA$>S;J)F6UdJNkuc}_EJ zO8GY-vUBs^U~%_NBsei|T22a=!s-bsaO391mis=g$L@2hc1T|6ahKTiv^d9Boi(@h zvYYLXS7Erq6v?xWu;qJ4uaL(`nBd=e=3jO8_BrkZ^k<==q5n;B_TDh5aM(1bi`kx& zIsr~fuODO13(AU?{9W(K47^N7q<#k@YBm?lkL^I!2DqLuE0K=1RuKOv<)dIx=hK#Q z>OvJzGTZB(R0mFJh`yBeEM`i|z=R;^kE3-B~QmSq~qB6c$+oWL=b=?r*M= z&^Ft`YcNESNx5k90tIhkxYs6s_+ zQ80qv;?X1#Dq&(9J>|uBycUDMzsV*LoHa_%`r8uv+G4%fUfAkauMg|eJQr7!2Tfg5 z{;+o;Gv7@+aOIn=R-BbC#|%Nr4-KuiT?DgOSNG`$r&+EBim1e|=pQ)wNQ-7O@Mj^v zT9A}K4YH_OxaOjILVGVopPVgnSTwH_P1Qzq{v1qZFzbh_r+L*lftUTGqxC%tI-K?o3TUCavBseiQ_J67u z1QAbj|KrE+yZdYZg_3uPF$K1yI`$!SA5vhcUVdX*r^2W2*|yKD7br5_m@Z8+W+ zURh4FBdHpjS7}X_nbm~PI@mYU-pVQ(7sV5T{Q>ob^bE8}Py(2YxONYxWFHsrPWwd( z%+wWLc#*yQVu}!H3kHa{LYqR0YL`4tbt3H%!B8Q4A2kYI|J5j*yl1CppS3&I9c6#P zQbUwQ;QFlvgcXyBKxHijkt1;Tx=3ERSOdB1mFf)twwJ4yBzURkd`YP5!f&u#(DG5RlGv5ko_jf zV1aJ!l)E-aoaU@=7Jw%l-#7?=0Id}r8FwTw%Eek3p4UllJ29%$h^;3ZA7(ZM5En!m zn)6z9zv$=~urrE{M#G!vz__46fecub!H|Y%d?eegX;l@vm9EHQP>EM~#8!IQ78v;L z-IIv=PFa??(tmn5Hic2hzg4|+r7=hxf?WkU1G^%M7z=Ds5D<1;Rp{MMG(ZH_cJPPr z$0h!?YSwu$n;BmgR&+m|wpIAsvI;ph&-7?6eTCtMQx+iE7kL9zostHzp0N>=iN0Aa z*%HY*G$(g--w}|RB)~VvKb71FpuuAOkR*fotg!zX)H+6>h6a#5wp(sGkdG&Je}GCW zEUV!=8d#KVrK61{4N_{_xRz~dF+EXQ>2NJM<8*1N4{gX+)?=Gj61?zfts436{)jvN zj=x{L5=4bwkB{GO##q6XT%L0n4wwikI=i1S^dKJ`I}f5&=6xuwqUK*RI#dCN>opj*hS>7Q%}y zSImqTA_zL+J#W?mECK(N;N^yUeu|&+A5+jtIQ(K%kARv>3Jt=vqXNn1)#ry1R%w`` zko8{1@3>s^;}}|5khO;MpTxN!#tT5LUEFGn)vJ~?wt3A+ogAIluxPJIO7`7Gl6RMV zPe8=pHGMnkyCCr&YVG7<3fNZi^cyhg*JR=QLm?Ag%;6Ke3EFp-^(wlr>`qOT7Z=l6%8jSP(bJt2cx2iAsaa}jO1y@3bU#VDw~HKXLmqD())3?2etr^wuEQiB(VJT=H;iN6%M=UwOa! zOOc^AZ}g3n&9P;a`1G6UGuw9jk;gsm-`-!n&wmT7HmfcReCrk%X3E8P@=Fz@c0W9sS7 zS#qt+L)kUz(4C$QVbiXxIuGRq7P2;g`7z*`EL6)m;2*^VNf3m_kksUjAODiFo}sVc zdf-Q{Ik*YBPHni=;epl}KnQpCwpFaZ3xDe#L$yqW=^mcsljY|BHAh4u z)gtfZTQ`6%&_@(eeheX4ty(j-h?Z$U+~H-GKTBnHF~XZ6|CDsq^4?|d@-t9YkO<6! zdiI-@U0H;cA@({iL0|Mg9Pf6g=%MMc&f*JeC;-bS(V&>@#hJ zs6*S{yXj6lMAoGZWoqB~{Y0MfUOss!0g$k>FnJ5Rel5>*BA?1(EQbG0iURf)v3XDz zk(0AZUQ@$FSpne(jZEG~{v8|K<~n@2eNF#Ybg^nRlGBW7A9HWWt}{>8h|IvGEiR5ttYRCzB5?a5+dON zu#!W1+T1}uhuG+QUo{ds0tI$lrI#Yvzzdj8hvdABHJphrfaYJj`gzf48GMVyQB)L$ zO!8`W{waI*p{JN@jy?mxkI^~!~p-Z`y<=IPgMrJ z?5DJdcT4u@ZT;1#{ePZlKVpEPa3)lCrrX;7go^h6T=PFTJO6WDg`6$?vK%eI)L8={ zO33}~7Qy`er7fB$VQ)85v+|;*xP=*Var?+^Giv3zE^=dgh26*ou5eT%K>FPkn>T7@ zSn>QlF2Y@rk|@|mg5LBU z8s&pu*pD7Iv`Q-LDZrDI#s|0K_jI}xAauX+YI0NF2O1kabF|Atd2sSr+*SBw;_PB? zMyZ_mO{Q*3VXUC8LDzsA2cx?4OZfF`;2V7z^_g~p;Y_eYL2y~MBZW3i$%^qgYI`f&=h0| zoPhcdZGCZ&KN4l6Yco7!{yXvk!`ggr33vZe3Kba%!mH4A0vuWiQO|#q_+kc}XDjzM znrHGo)?s8IGxK69rLk3Q^&=uL0LX-(yK-#6Py%&Z%(vZtl9E7!d+OjmL9Kp>#I5Puxq@IaA|!XS}+E=LxoG)|2$~ zhZ0!>KviFvL|L`QOqXH7I2k%drfCBn9El=niM=D@n}@0yFFN}aTAKop#yF`$_h^K| zn_>c3+DjILH_29#cB=Q2yYSf!z6~_ZOq`yd)dwF$f&XMX{}fhSj_~hA%5kX$z_Vdw z@*x|geXD}zgBiwe?torZ{4he4GoR?_G-qM&DQWHf?<|1EZq8Vr4dFVIt*W*^PLace z^~lu^Ld$tNn!0LGi8*iZDsBh3^^5Chk#UlpU4GrmXhs7pL^@yO)xYU=y#mHfVi6JM z#alLw3#pzEzDpEPBS32?4bc~MO55#~gCX`l0;ek|w+?z$1*{HKHG9o;6M1r9P|(>g z>eO8HbFYf5x>+nToMeu|;Hr={bzQY`xhRy?zw4re(cx92E8ZauryIMwH<+5i+dXUa zE8=l{Nw4l~uhnen04UfX#NgAj%v1FDYk1>D5^?{a(`}rV-8-yjd5%VVn-#)acV%{m zC8ltpe4WP#7ix6aFpf-htf8qOI?cq-wMz~j&>=(A&0PNKv0)H9?V!gNKEc9o3T4Wj zXaIj`#W+Pj3fT-a-22%&`Jt6(B)ZKvz1sg8|Z^|%YJzygUhww8jJQj(kJx*9^ zrA4Y(ae0LwVYP5&2?~?rUeF|gcXbtznvEag$dsGD$ z+M1hZpUWY^h%a|btFWrFA1_i%a{l!vA(9prMuYQ>&HZ`K%gG|_js5(tD#X_S4EFrD z9DKiSZoN2MA{+cz5c~57^Wufe;$4+Y;-FutN38JGq(j9#^!>W^juc8=X25;vO>Rp7 znJdW;Fj468FY3&ItwLF}tdn|^6Ps000FT+L^w|Dl6SziipQp1ukrSHRYeN+;|5a}h zaYlkt9k#^ZeeU289$2xiX7Bf~NScuPbfhfje?pZ;aZxrr!YYB#RK$??_v%<-`YqS( z0Y6))`t97)z)(_n2`xh?V`%u;c!bk|rkPa33CLfOEY&T&KL?}_8|~~i#Qu8(`s+aye}YzkkA|aJX!6QX zp4iCmR=|(It^^z`BEb;U`h2~tvAG}1TbPAWNG$StBKSg%W7(OyllO3DDM=%3!TYag zwD0>`75J+vk_rP8kVE<~m{2%s^q6_e1nd;o8ZVkB9$H5EbB#zDnvBvmyd zvPe1_F`AFAc;q^}Y3Y_K(wG>aFlSn%ltGU=H>{lgCyrRV9;!zHV?AzK_s9F(+$2w( z+nOh)HKu9nU#|$0mV3>?Yg`ZHOD`-7>zK1mlug1SR7w0+2Pm#~*6qUV>>4lcRUJ@4wB)y;{pOTkp9`axQgz zPR`uN{rCF#Jl?!4S3qJtryb8^4WVQK)65#^+4Z?&tDc23UE^Of^5FJ8{9IS@gzhW3 z5)YM5yqG2KCR=i(WU8fVx47`hO!poE6>LN&E5>1t)pa{c!Xva=vw$e5|3TGTI7HcZ z+uy^`Al(fD(%lUzEubJ>Lw86FA~m#1r!*)?cMLJa(A_n(G)Q;DkNe!ud7tzC5!d(H zYp=CG%fAw1=2knNQ zd%MOdqmcHoSjZ;D;&3hFxoNa&ybFxB0klq^(5hsk-$JO<7W!<4Qbgy=?=CLN7wNVu zM97xQUk9S+%LD++6Tkck(yyWNk(g-;VDB?pD5=yCL%FB*_7q-QKkbSf%8rgTY}J8Shy z>-F2&=HO9&#Kp)uB(xK!4;*0qnZab#mG}a1D~e++lm@}cYIWVkS$vqh`h1v`4YZ%+ zG1GGJQm!pP#!Lf~xh6Pix}ne4@RXx$I9KT)pw;0(UaYSBwGN8%0F1G}`wA;R9@8-^ zlT>0=%6_1@OpTmVSYqbIa0amG_>mv8eoa(BE42_Au{7{xzh^SU03al2U?xZjxo!Rl zWkwz)X8{a)>)XC}&X(k&QDL8uSbE$(f`|_a{^*Ca{;uvE^g5tj_b;dM^oGcz4pSF$ zlR8%C!m5g7zw5!Zsa8JPUGDU1dEMi`=W`T!v`a1N^pv@2B|(XE2a4i?0{R8cyK461 zk9>br#lF~Q1D_!hVh*5Rgev?J!Rm-J-hKze&Y2m?YG)>1rfhnJ7R#?wHY3tjO~G1w z;-E$FEdT7PVaM;}A3xN92&cmp^-+SqeoS>LU_8}4YBCaiPJ3&a;Lak10ewYrUob{4H ztOWN4)>wfqz+tS0f|@}UPom(K-;M34LHX2f(hmw}>08JLCgWEEHb}SF#_B7*mAMe7 z`VvJtx2OYcj#PAI;+8+ex`W$vjYQeRbsd6)ymt|0g?aZC+YPwiG6h|%t!#EZ{wWTz z;5_!76EWg7KMvWY3$ANc{qi5d{XehR(|1%!(4A>jmRI?|BjV#5F4twUk-Ayk(#rkF z=L!lEvX^gyxG%%Fzza@o?th}_Ffo}ICX#o8w?--hgTr7OF^s*@bxDIi5YWG*J~zLA zRGGMaDnDpb8S9HD8RMMx2hUXrSLR{lTTl+IRJIr=IJ<8TR|g%lqdQ^|+wJY3U?`#I ztg>^aX_?`&5ZW&MikV*u5*CWw`dq()<&HIkr)p@}uQf-Q5k(yqBk%Xjy*Mmff0a2_RZAKSYo&bMW!@H_R^mkCYhkX`0pTP?M6E8?Y{2sZ<3*@~A!vifE zszd0xqy2)Q0-I9iWCt;#H`jOlNCb zZ0(M4N5_aMc^1d@uL+4tET4=Lu$2-?FOX{?+PH|+9Ilrc2?;pv8Botig+QsqX(XB2 zXVr|-Y)+sH?k~xkcq}mT;0g%1rNw@76{3$h??Nh%3@PFUAf1j zoOW@kNxKy$1d`6T=7x*As`^9yVvv#xT>e~@wTILn+ z)|m4Cv-D_Wd3=tXvSz^)dwUAnKUB#)Ha6+3$N8~i8J%SL2Co^r@u?t62->1)s8Nfs zxaAL^;ku%2@rgnB7Q#1Uu2z4V)~Dfj(KP0r;pcDMkoAcI=)~|qjuB|l6(_lO%Wc0f zsF*s*(l}}*Ixn!DTiyIxhQ-cVx3lM!CFf_1T)d>Osr_gmnZdVTvsjPbh&K}P_Z@S% z-n%PCSsY@{r|1_n0jJ~MX`Y49o*K>z{aWZnnx1~HM-p@{8y13wGy}5R{=D|u55Ez5 zs(izR(?+hkp7!BJ-uIOl_Dc*Ihh81p3GEfwj;P_;tv!`E*tVLSw{Q}Zl#N$4<$G28 zH9g?vtIx=qM#1PW%-)8~EhRV{d?Ofn5*Bh~X}1ixZ}%i;ohVMu^yW$zh)j^> zInyh3z&S{+M6%c>^kqSM6{r;0u&`pVv+Uw>1g%~Jg164+F4~N3MOIGR=<=KBc2hP- zpHgo~xjvDPP$6CVLFoz1TBV(46KbXpY=8X z)5Dpmk1b#L!V2h9$+b{>h)m$UBp zD?W64K?V2YW6JE5vKTYjDitK$Wk))6YP&;rnjBRZeZ<(?-LF-4mu>~Y75Qtu;{R88 z{xJIP`-@VBm>l0+hh>P-!ijV+S z1KdRuop`AZvqoOiGG=8l;GF>*XE0`Uh7q=)n;m1XvgRv%{mX=&>zvmhYZL4Z{B#|N znuS)$v~ZPq0S+AV+|pe6ul~hu@J)%OQ#{?b4uhDGCSzU(RyfkJ91PDK!azOp=1=Z> z#VWf5NN{Xo2n0#*8i*&})Ko?)rH`Z&zkY%V+PDXkZGW9=uY0JN!8mhxN{T_E8S? zVM@xR`RNCO!j8%j|F;ZB17tG+ECdcg;kRN#qtZdziKi`v9!1Qu9ej}jN`|lZ9RS!1 zCMq6wcJPi&;VD0#kToYbp`$;yA621-w;L`-mCU%j8$nvQ0?FZb)Q5y~E&O_|KpxvdF^Lu%~d7kV3bKsuo#>BuP}e zO8xJ?0Oh6nO7_b9s?q_A(1_1JCnv}ljR!qw% z!L}PSXAV}5zw1)vBld@CnH8%j&|Z9;yE|*<@FVBeZnv6yCHD=)7t%nP zyY8n3t}dVoFy0NxL+UWZ;s9OOZ&GujrmV({ix*JH3o;-FI|oTEGRO|eZoQW{cZFa; zMB3SaYStQ*R@>R$fQX9_D+b1R^y^ZGFeHgQ3U&-YkTCYzM>yuA68vvSR9Q&Kllv?- zcuY=!RX5dX1-cYe`sGXu2><{t8{Ed@9GdoDRF~27temQT=ZfIE_!P*`qkFAf_dUS)?js08$-Fl>0FXJ%7Uj32TK6H7M*_YW*tMd44;g9hy`;Rwx9yV!d@xve))nSp{zgw zMnY}ofxy=Q7`(rU5Q_j7W~-MV7_BUfM4uv3{ynnsr)cdCUcPcG)s_mlEbw3}`Y@KH z-SQn1Kp&+G`0;N8xh(0|1S|`jX>D9f75cx2Ur_!e@s4-KNZ}cq9IE*M)J{@m(X*EP z;!)AJwxr~ofJ&mk%J<+n91pI?KjFm~_xL;NHz;8ZRIbfH)aqbt>-{guZ+owI1(v*F zce7e;&xdP+3Jv1&07b^i-AT7tx1WywGCJ>*K4Y+g;{^~>3a)REQ2E5Cp&s^bNPt%n zP8t&v^p2ye$b;AKa&fF*hQ~;4WNd^=uN`e_ywcD9;J7PT-3YE0BY}K4nwEY6)Q{AV zk3ezm?=io{MtrBIOB}c+LT(7B*8)=#0_+3kHE*xXAXWI;s1)aSba&G+dIL*d^IB~W zNKSdn`AfKxlZwnOzu?15F=LZm+;h)vcM@@ zs@x33nY;b9(O56l&=+mwnixeTJ(jjtze;7cp0E#fR^(bo{)TpM!vik@C{Js)>W(Ou z9%YSysrY3laFI%4EEnkLzJ!fxg8nKXUHgcxY!mVou z+|=rNR{GI2!hS&qMr}QsY^n~5U1(~V&h;{<8OY|7eQh~LMB&3Uz2#)tQPe8SNaXz= zUI|Pq1okVtt6NCEEwe}dhR3$I5hw4Ao6sEN`vaIVGKz`f%)(|NuK8wHZo$ORK|8%3 zKPSrY;g*unGauo9)?+B|?Okm^u1f!z-J&9;8PDpD^O|@1VXSxYu8Z*RPe2Lz>CnlP zDH8jzTnkEtV14llwLshrx@jH+GJkzgtd^Xvy9&f30A{MG2_>t6nrp(l21hGun`%Rn zXaxg}S@zjMf}8CMD+D9!(${2;Kf4l7PvcE6yI@>{=zXIesq{OsKXz*-eo1mriyawX zeBLkLb37Rkzb^oDpeTQq8X~QjSMmOJc*Jd4n-~Ck{pDK=g+0g%u)@c>=M`{2>1Et| z_kV_;&!pEIT+c1b*}hbtng8Q~n%D&P9yU%EkLht$?AWZ_kKpOEcE`DS_6+6Ln0eh>Xgn*CV&a_Pgb2eNYqc}`6jN)Ouc3_U8xjBc{h#Fc4P}#8t9c|4e=$=SLE?FNI7nIl(<_56 zR1_;Ee`Wf2WrmWw8~>^?7U#zA;BjYEgKwYMCDLVHtkb5_06o;0jD2sXoxl@6*H6-Y zi>3P!q8n!`b2QyPvr?^BnS(}0WDAc|L@|{s%b#Rm0&BufpUZKm#1W)i74iy&cp%=T z9z1|jp2;c69K_d3UjG{FUOJPzKXFOhrbms;@pwi08p|&($Jrq1vjdtYzjX1-0EFLHEvX=XJITi6P^Q^cCf@skf!TbOv9Y5ZUi%W*G%2%?rgo-9}q=2-a+sBs#zoykAj*nnhVt#!rg8GsCSD^jCOez#ThHfcU? zDpUt5xJz-HtZVeGtqB{iG#5Fw(VR^jn%#9GE%!s_Ln)T%$OEkyyXX( zM;IL7~rSNOu>GjzHJd!7x5RQ!YKpTjQtEx%`@V=q|Bj4laPN4xLAPnf`cuC z^!^hfRSysJcHt2<)?6`g;ZTM*W*$XCHFr8NHiL0YT>hi?EWh-^IQ_BUsz;m^r!s~T zw>*6voRXyAnT!5CTrJdTar=7Sqp^@vSD+O#{PXt7?5r{%s6-PtNq1V)PgD-xz>)f8w%nbq0=W_3gX&x&(_|SA z+%1}Ht&yex-EI0*6hzk&y>k0fGeM#FW;sv{F?@jx@K-6p{ri`mSNY&~>uR_k)!*G! zA^#9yT}@y<*)cA6)cO7|8=0X#1U_82&MyfW5^Dl@;LQeNNx*HiSC659M!l&pq$9V5@d3H2gl8HI z{;l`s*{`cVtQ8}=gGs}3Tf3t^D2ElgKoh3G7ftrJ=EXF^K6KIv0etYk{ZP+I_B|O} z{Ob39GmAg)WEUd9zI{+<|3NIXkE7x{N?t28R3c)AK}y8{Wyb{X&S7%ni{#KnMGLQ= z={6mU>YUMxJ;WZKCBjIQsyAi@rPW0Un+pooQN7V7P z_onV9H3gM)5QemG^KH=95AsL%k@NP;s5AB2S&?&n&uK~yI{xuj+G}@SbK+eYv)?x& z@IWxv zYP$M>DMVcb>l;DzQX3C|U;%(m5}@DvR#<6mYnNM#?l zsQXPm0Os}E(X}%;`j5`E1mDfCT93-y`s9~amc5a7Jiq>+)QsN;?wv45b0>y_4};Dc z)ww0gFDZU!vcTq#GWVL+H{5IAylw|7)2%+RIVfzx5~P=J63%w6urnNWG;9`yn@=Ng z(%g%+aj6vkFcbCD2ho?g=)cH3UZB;Bav6A)Y6J5->ZJ?t$FU(k`tg5PPRBq08L{Z* zKQ0IUZ`kZO8kJl0!P4iv@izd)j?)~K9fDi=c~*w&eci~Ek2L9xOzU4xz^+_!rm0z~ zerO+vUJ>bRzxj+uf!mS;o(abx&E`B8fZGIo+GK;w`KMSh(Oase2MN218(w0&O*-}+ z%Q?4mBo`xGRDh`v)JDwYQ&inWvuue?-V$4jpdLMO%fN{~yKHP1pzi0`Jz5)OC=6eoyu6lPaA|uu zC|POzOQYrMI9efnsw(>q;*~=+rzBf5xnRvlhkpDb|JG@sJv-OriO!2F)rPO87@Q6@kJ~vZ;&4nXXTH0H`0W+oNcuz!jg`j==kj_$ce|ufZUsE6F&zn43x$YSE;Aw3ObG+HYeu-0-s{$iT-H{C| zQI(X6nz+5(M9uBOilT5g0uu+AWRSp4U4$R|lyYPz`Ot^J^8~zW%RD8MrmO8mneR?0 z0maSiTvq7KpZYIR&gq5Xy8)sGTROo5e(FGms?0?a1|E$1CAv>STd6cJCR4pgwT4N$p8dC%ex-c`qbNcsOU^twzS9n1^d9EOnj(_qaz0Ht)@r;4KUA~a8@B-_GSkr6ky<(p- zTJK-|lXz8TxeBl|!&&LWo-eXzxE9)&{-{kVr%EsYijjjz`0Zrc!C#RUVe>9t@;_!l zezrnHpV|6)U4C0u8?p@A+dGKlAg`j!3T!Y3PG{5(>W(8 z$%i$Q8X0y8z2^nKMbTxYnT#=#b&EX*ZE*HriNfgf0^_eEl!o(54~Ao>CNPhd-`jae z_oKDs{=zE8UnK13R^xZU$~P$v%mk$G7*p03=|r}~v0jZx-rl!WBs=NDmW_3tvOUBS zwU|Fnp1i&!jacUUn`(HKdEyL$*>$=d9yxblDyO@toKhlWq;MdHj}Y&d>OM)4>S#Eo zpU<6V(MCFVO`4>v-s8i=D>U!c^rosu&=yM4_wy|B$Lk3%C#mRuzPolf3S)6@SvE^^lF3|yx=~Z++<1hM*#WUt1b#9da*gekA!nl{RiuVZoFcZ zZG|UPXq*Li(aGxE`Ts9WDR+pdnPbTo39;t1k@G08C9~xrmL5q0ZZ z8r3&70Nz-mv0J?f24KA=N@SqhQ5y$RB1D-EQX`W~huxggQtcaDwy+3#MngSpX#;`) z3%FP7Y2wdttG#Z=sY?IEI&9erqMYtB<2l?4)Y=p?JJRn=`(J}E>v(8o!e8z(0dxaB zIA3XvKOQhsWc=Qk`$`cWu+LS$*0`c2!YoCE&sM1*tQNHR?JXT;(T$Fn8TV#RzWU1$ z7E2ZsVhg|qJwRSr^txxgty)QgN4ZOBtuNWL>R_#sff4Y2;suE)ea`FNUeJU>gAQ;G ziv{nO&W4-HJ5oT#cfjQ<{c~w5-V=K1q;Gk_!{$_mpGdJ?WVL9c)o`2{lkEYZiR}21 z8x&$Ga`ZzjpcYy z=q?j4T#ZKoFHP;zt5FQ=hGR>%ErK%(x}pvwslL_Qq)rY0O$w!igwZj?970)|VkJTo8irB|eJ;O+pplB1Mt;lT0bTg<2~Z4vnTAYVa#VBtkMF!+O0OpAsR#`uVSN zR#5vlFdRy;5#{udy=23eHsmmn+p#he_dAcZ@ME9M1#*!l`^93z;2Da+N>+f^F!Tca zD8e&|e)Of*<;zfDcsf)_lR2-~MXQ_1TET(2h!ZpkJaFPK#ww{-Fe3 zUxaVC+$s@fa6M_5PH~qE?VD4*SB}@;%BNdCZcs}Xj~S+@E2OIJ zn<}oP9yqR#`qUHm)44QHMc)sFPW|phEoH2t-Fo_}jZ3cR-N^ftYE~$oK<@M_lWa-C zq-KJT?rAZ*_HxDl2!ZNY@&d`RBrnzZv7?nz_h>&hz9mm>MX_3E*c8|-wA}OV9a`uk zSpF#KzthyUhyL@etBmo%?sU(;k`UGn4Lf3*V=y0NpS>59U=xF8VtB#~Cs71$;OL-(amUsqNC- z+$Ma2ST-n_l=`dA)?3nkdFP)`{z)VS2Y0hZ4ly#9#JV_u1}z1ImjN+dz?9kLc}M8X z{CxLA*Jn-5rjMkpj`|ALZ{eMBXLz;l(V6W)0&7Z60jUZf_b9atNz8l^+0`7sGRymn zE7*l|01vej&ABgkv3GzJ*ZY`B$j`dh|K1kG5G{GGDt1H*RP)ya%$7Rxwm4ql{_E`NC@ zJ^3opb7zXrvL}7yT`;Whp7WFln7L!%A*mfkcMFV^ld(#A5ivqWf0W{aP?C*{S5k>F z_{R8qQo{Yv(HK-OL)*G*0G9_;U+U~BXy#?+x%qu84PbK&ys$~Ce7uA^@pUb)ae%EuhKJmvNjMEp3XYhpcVQ|WD|D&!!^vVnd zDt^oP{TpTThR)lltcVJ;p-Gp72@dBT0I{_RyG_ z>y^|Q6o+-N9yvb>M5YGa#Fcu-T&>F8oZ<(-gq>5F=05KLWlUmxSggF6Ci5(V7%boA z#ZWUSF&{S<{S3u4agVdYZu8m3J%-gMb%^0ny?>``QZJYRZ()xgz0~rB!z8dWPKHUa*9U0Ll z0u&TcTG3fp?vjm$uZoR15Nfj@-j8UyS4#O~uoVtyZoLZv%zt}1rYlTZnHdNMYj_Ei z-TaINV?;ZBq~*n zT92mRl{<)NbJe+0&+#pmf$`aT=F48xF<2&o9Ngn?5{D?Vslv=8-Rw{ApQAwO3vAW! z7FUEyewy6cjt7%mMl+$m>K~^R(EhJkop{w>~7%=_IrGNQhx^0aaG;&`kt^a9{lNXg1dn&l!4P| z*l}FFsg3f^Rn)WJHLTP_V(oK$IeQG#!5xG{)$XkiCcNt|SM%;B7vQ5VX+SfOp;F^T zIvf1?=g2CDW^bkD@c8zj`Z^`V9`Ut~rx#-?Y!&;M;zb^IHE5Mn30)R=%z)-IEGN&4 zR>x)f4TH#N7Cm-D&k?{{QNFfJeWV4Hv#E4bum4Eh8l zdW3moU78^48AE$(+JhM4{@IGv??-;6{;S@qvCj?FyJ5KQ1jqEeZy!2&W+*`+q>tiV zd>J%5IEcJS*0g9cP@8Ic$Za1!42}(8I-rhCgw$sJ#^$0(;uV%W9(&_aeRa%vOy?9R zvv^7sPp}1hn}=#C)K00=4qDux_F`J zc*(_&2!i(~iLdm|{*Q#$NF}M>GCD>HJWsFW^a*F$kc$Ojc@d?iBkdZpNp^uA(ybbr z4tndwr|Z%9NyhwrKo-&d$T!5GgZ+MNaE#Y1L8pPIvs00P=@I2U>Zx>Pe!|7t$f3I= z+vtZp`n;z_s=rm~M|!p{x^;XS`SZ;b!TTwl)}nS?sB?snle8;wn7A=7d+6;si&yC8 zaBbwr16l@*&`4$3YQvA3IW>f(mxRR8@RS*p5=o4Cr)tmjbcC3-gm7uibL_pbW?JUX z;VfRJ@fM?U_F+Tu4$+-UqqUT#*qNBJ5j{wSU_-4;Q-Et$tDUCr_Ib-==&@IlCNDL5 zlM?L(Va^c?m{pW5fEtHbQkjN7=4gSbgsh@D)|zx_Q6ZjYI!4v4j7)|CVK}Bn)jcMk zokAtlSExOfcNmzVpmtx_-YT2tP3Xj?AJJY4xtk)~6=NrKeS`OuMhA`>hqlF35F;IS8A%p%{yMTaiOQ%g`e^F;= zryh;X^lp6XRn+9R`ujDD%E*xsdh$P(N?`2Nl}n207amZ=Vldt!*XFtN+JC=mX}13n zDpuY{t^W}!5nRXTWk29)Z#Lud!dMfk5uu+-Ld7xD6L*veF{R1h|HS8%EwC~`!C+X@ zLCWE5y^hOrPV6qt?ruS5^k%O7kRgo}9bMJqV5e+9J~O80kWdis$gbz8ziClg*)WvJ z5zB-Ed0+P_%JBV_Lhxmhi&5BOm`4KAyl0H@?Af;@>^lzlodHlKxt&KJTY5G~AK;53 z7GFHHHCnqk9eg?0t>g}Q0VW}THrXe)XY{Fg z9^C3exFg^z#j;R|axVIKM~UKTMfebc1K)}MrShRrEA#o?1PXk)mO0Tuuubh=N4FSey5 zvF{ZP|5U*Oz_>PIm%mbt!#!k1OAT?P%52ec>V!b+$> zqycDyVt`=E0)=)!yfQm=xV5BxA1{k);Py*l(KvnrrY@1b6(M2AI>!YpY>A~Ch8z50 zul@l(A7yjQr@TzQ#XvXaKwssMhCk`|g<#Q>(mm|AKWY_|2R%W;d8R)-^WQSp;2nq(fX|bUW?Q!X7^Oz1ce?QkY$w{Zt?`s0}zKEArsaAzF(6v!9Ri>^3`>oSg z7C{u`v{d>8i`(N=4cPtYBjV zSrJ~7Y)9X@IdN$szsC-Su+6W*?5C?+@|XcHbhy#LV=|wHYY{q0R%QUIzprfdd5G+> z{jZgJez#bRtd|78shWzCTpNY=El7O<~N&vD0kkWG+GNmkGjhOr??E0}I90 zzZ+^dTuCNNUF6+>R*I769Ok%`41s!nR;A;O2Yg>}&10cNVcL#Nf%T%lYC zji`+fb_!DN(po*zit#zmha{NTOl}bUlvqBKk)s${B^P*ORgvZ->-4kBv;D78DQkFv zOBP8eTXNFOf!3X3Rrs9Zf{evWs-T4)k3%4Q@E%#q1e{d2W>0OY^`5)xBo{+(`9>=w z2o|q#bJb*ibf4EZ$~(roD%TS<@=2zYvSmB9VE~jm*H||E9i2U6ShAQFUiJya)&1|9 zQMn1mZvS(fEx`Q?vW+(1pe@|{^k%{{1wdNaR=m(OMf9hZ!vibfN?tRuXYg3zj7?Lx ztV-a#U(1GZ>_?1s6A87jOFG$>Th3&OB)DCL(W`4OVt{up4*R4tdEn~lPhefb*$*&B z|H|&FWx6GcZ{;C_3@1-vP6pf+=}uiYO-yj8Jm1k*6RRJDu1#v_z5?=qV&7z>W1;+L z-(3DY;Z5MCb&llrJ+=H4Tq}eaX>#IC5ZI3;k~unj`rz+md@IYZ2X|t2`dD8>l75ZB zE~4Lt#0YANVdk@{+3fN;E~*?hb7M(eLZS4;^_*?abvw10Fdd|wYHKaZ9IX<-vF~c{i^O_2`lP{z@Jbj0tq5@_f%sj?4P>mtD-^apvBkg}+kWUchsn zCnc|@YCOr*iQK9n8i+FCqbre*U+Utr=xitb%xV!T%lr!txS-?K#~XG^b4ZZ;{@rh@ zWqbZmUGwAJ)89l|pHCor>iEUbsYbD2ST@g(Rn?+|v@Wn1lG^58BcSYa6 z+us{tcqoIF^=yUj$*3{ z?92xCJsZM68SHLR*&v7xUlVFMx*k)FLA|HLO3quKUW?8>9*N_ABtdq!iF4=nLz5Ec zTJ6{E=zfO~v$}}V*vjpIFHuP(TTFFt#y8&KxDj(oeI81F4XlaxCyBnG?vdsrbX1WL ziZ`(y{2^)o^HLm|&A32t7|Mo@O;nV+xTvB!m|Uxr*6j=6;k^T9?8$=TzwTz?hHXE} zIpVd&S+EKE?Vcaq8!-Eht|#@T>O<* zyHGXCH05#(S>r9HVMk!G@k^5tJXVaot+qTyQd@TXk5}=-f=7HpyCSEAMLU)W$7J+% zwO`>cm5Ed%{shmS8;nCzN5XsKLRA!@%8$eSa`FaC8Hh2%$n4kV*zcQ*{9$#qhqdDt z1@m2YN9sF;5`Ku@8LKM`7dP(r{%bIumYQ#0Zk(1D9NJIJN?exh67bUOwJsO3Re_c3 z!fd3y$3ZGqe{`R2TV$d1?_=L#?bfj8$-VY7o{8kiQlUVuyo*qHcT*|v-WzH-E5O6X zOD|zhiZs=oQ~fC@q%rNydFntu3yphs%wd>kmbuK$+}#n{Z7;BEUWB>g=i^U@<2;ua zhpWpz+q6@ecqGYcY~+_QKW^$DAAY;HuH`97Kh52MujsNTx@-grqU$zYnw?DV*LE*= z=nm^tfT#z4nCafvOdM<~^<@iGj%}Eo2h9-yR&8y%XFs2%)IVs8|GBiuHu+~BE~s~E zN&ENP{$o)CXMA$Ulm%e>Ky3N4w1Ri6J7Ob{Kie|w)n@5KdgkU2Jk*h-8CTU+N6Tde zKE@Mrk|3d?RAcGm>vizVYQ{+1KU^@qr$@`QDAtnUde8!ih%4ke4x1ecT_#BawyZxzzq&aJu={ym zq-59Y^hHd0t;ctX^kG$}#ISxqA&Sa^B-Bw3UGS^I7cu^{UkS6L8&arMFdm2gk)DNEX=DUH2jY$9SJ$p&U`;OVABh@C$Qe@e; z0@=dQ!0<#{nxk(J`B6zJeye>O6#3+x%45{=JJLC%{KO^rT(&8qS_D>9$dLl1h zkm=+|oFu$5xve540X;VB8GAWgOw}<{5!)3XuiN1wQJLbv*2u;6 zCkQ?MMz2jELacJMSh&=(_g#gW%pX=NF@HL*-mAMh*M4ze7YY^z7Ctt8m?|%!hXJ!T z_TD@alfg@-(dor!;)2>=Q|rS+C^U@b8N)PGca!+7M)XaMD!*;9TYs`2AE6~yb1$v` zh|RA`T0*=6LSCJbi2HkgCIDMys)_Dbz*xOsJvvYmK?)ta<9f<2-9y5B70sS)Oq*anbf980_oI z^Ocr3m|}G740D1u+QaS+`FUYuGczAK2ESP>XnDhqhs1t%>ISx{mKa z>7YOy_s`Rp9#oB^t`7Elptw|p(_&7;)-}(&Nxy(m?hD(J3?E5v$0i8i{M?!%Vqp-E8fAvV7aGee?4VRs%-AdOI#rPL48#|xT?PD>1=mpQX2wFPF)-=Y zA$kR-pXZ!w#uL1j%;$cSB&KeCQ2ca|w=6FOsyN|x8ILJU!E>6DcPX#>$;7r7T$ZiB z017;Pw^uj^a$1-*Wbq`7B?VFWwP9+N)%q5CUe(TzI2o3KC(#5geK?77*l?m2IJ4SL z7HpRMnoYA%A1660Es)pSZ;0XAQ5DAi%*%Fm~ zJ@T9Ge?7eUtxTyb;?C82K99D4BP!T7Ov^arlw`W(~#xQ{O=g6=iL zA8w!=r9Yw3;$FWx5-pGwqR|>q$-7tb6YxcZq(XCC*jrH!>=1(!Nu;)v)6FQzw75{G zm8R*Yrq`7S4yXOE?UjSUP|NA(rBG#dsBAXf=N)B<-3C;h1&XTFGxfKvzh}YDMsIoR zdk0jo%6kT9}jnpO=|x=BRH+7@0hk#HEm_G>lZ4uz^L`{TY#w%T&|Fq4FmBfU3RJ!m6+c5t?W11C8ify20 zNr3J6FDfFTY~4=y<@aQoAw-sK-X8CT{7_et4c^|72rbVodRsw~Gqyq;uXySc97A8+YH0KXljo*WqQ*DG zP9HE!z8GPtLY{USu;TY3>BmG?uP|V%CK#1gz;m zTpRzy+n_Iqrf;Xh{T3qs0!l(HihEg8cv!U-NV5O+a^csH+2>7psRut%QDb8U`dQiU z;V3syvL-O+qqvuI zmumqQQ(k(0#jro7S#lAzwqE>9&Q;t|PWzmY?A&uh1E>LKz}@2B{#Tw%v)@*44tQdg z98B+SKSi%{<9GEBeC3P5={_y_%N~5OT@#F(IqC-^cwu(SROA`fpjmT0C8Ea0*Ea1 z6q$XFiUoUCbw3%-uH<(%huZ``iDK85qe&)bpghV`NnZzNTjHycmofP;wa3Mw8H()J zTzD4EOc=Q+W@cELxOiWv!?vq_&rEdoc@(nk+X05j2`N<+(y=PjIxe==DV#0B$l~Zv zpU|Lty3&WTt%LrL5FEKy%b6jzA#@JMy=d|B1sj`;W*LLNrOO=w?Q85MUD?99fK1OE zt1~a27gm3iA;vNw!TF)+MjFr~!fui@BU~W18U2)gs?2GULx(JTYk+EXUP?-;IePw{ z@s(x_Z{S#tr>>Ms+LGwEE5N}U#ugtcCOV<|}a=ik?oOJ6Kk>o-j| zLG{J8pHAw(G+%R4Db?1O8lk;n?&QpU(JOLU_H$Zd91Y$4Q2$OWQNTJbY0h|Eu}?d> zCyQLidv+eNx#@3Xqf}$olRQtWU4efqQ((dGv0v-6qhY|LJ_WQ5;(xYbCboTJ6BXmHoTXCzvj_@r7T=zQ}O->YII9+*ZqjzF1(e;vb{ad z+GAVgMFZ{Km6m3+V-r%N9beiHp3>IZQ_+mEYNZtC-k=EI)D9|ZD|x(UVhSN42#S5|J$b}aya;Z>Ah7QWBjV8 zWbz>}W|gGt-?!K&6HN1XE(`6Ekv1i5{S3gc+M@f=#lVa8oqclf0&Xm%SEdA@()naN zBvz^vm#Dk_W}A}WoIv*<^>EhwD~mGNiT`(MHNghI7WeYhJ*rzX-;QCsGNSR1 z`&ZLSuf?l2e&lLX(zr0m8y_-WdO5yi7f>B+?}@TwDrADB&I@y?{v${WQQGD#_|@E?_amhCk}iIfCdn%Sbtk}|t#F!8=+^>IpS*8KKH=mwd{N{ggi%LA zDJnbhVe=X8C6B+Q6vx0IlQb3%?sNUwfbf}a7+xV?!&{Um%46oXW2&~HfBMFH3P_+R zv%`^9+8u2!DfhjxS{bARH{F$OuBR`QVbCeP(uf3{GDio2@m1_}zeCE)PdehG`&NF?m^5Bf#T7&B%5g*r0`n{Nl)p4$a`3sea_D4wCbMdeOv1DnuHxlL#VZ9O_8IRVpjvDF+2;Sqn9_ZWtSXSF7U_F; z3QV16-grat_GSfhx6r(G&64^eaO0+mw#@5X$LDJPYt;fjq~EySsO<*>byCd+1c|Ik z25ykPpbkpL!tqL^SEEvCM>onXjZ?}}{;ke`vUrImvoNT1yojxY>=e}~rrM!TP@M22 z0Y5A7p^1NnnQil1?-S_|jU_gw8|qz~kdR$Cq_U)%UsY_Jq|JQ1k3)7+?&vKhFY|T& z;R^~5z0y5v5NfSD=JcL8{{4q0xf0kkLedPa<@y70e(%C|ai5Y{|hHA_fu@$|vW%HcL649d~%%LkIB#rGadk_-xG+s7yW?WKU!$Ee8mO}kcu{UexJML+%{w`V8PhY)v}-2faI>Ynt+cP%M=ya;+KjY{i#BHIy&CJl5`fwI(g9HP_SI5GhQ~s@%ze3$ z4q-{94)dQ1lr}SM$3P3Iem!c4vEU?)?8DzN)PBod=Uq ziwsnK04?)mdAHsl$6d}xu;v;k6%AKg<$5oYa{JBztwn4zB~7aQF7BeFE^MdEeLG(k zH_Qq!NF)r3^rm)OBJEMk54Wq{RzY8ub^d^!)9H5qNQqik-~m+o{s3G6J`cX==RZGl z)5FMiQc-^Am6ok#>HmBov}ZQoq7*XNe!GedPBud=49~}OUCf%&x}=x`p>Ak{mAD89 zC3nx#i7{T@nheQRN1O;L^yyTvh5!suAD^iecY3~D>RcxcUudl*rGBzxhPhdd+AV)mET|8(e?R@9><$345@Y#h)k4kNV=pb6@X72Xfsw`6 z*!%JL3rG8 z^j|p#6}V)rV{C2v^bR(_;EZZqXiTB@Y8_2~|MHk&0KD%t_)CMRiam&(s4N_ubr(^E znTCjVk<~~&&G~5^`J*NeAw|O&M+p<8-23;*upb6`6uV_jCZ!`1n`HrIqhSv%<-4-z z)*NY_bnEDtnY(B?_X^i^_;OP3Pwf2qB3#V^SFl)`#E$0s?_a%0#D~}){&y9AVp?(h z6QB37sPzuH_?OZekOJ1cyGA3m)ouI%YK*KeU@k%xOk8asO@5<*NoQa4>MRt{ZVgk1kI91$ZK}SKw zjqyRGb(3h|L+2WL*=-BqhA^NfzfOD@r0Wm*w>O4XYrQcI?i{Jw;I2?;CGgYfYb7-> zbd`O z1mRAwpomriUhI%>%ucJTRRa27YqALQP$CgDbq=)&Og4F{P~C5m9K$2EG8@0GSAJC# z@T3{(9^8Zp0Nt18vrVbnWuPRc{=LX7*U<&Y`9Bd|W3`%2@K#pT*zX|_0XCaBd7itT zN_pGK?mJ5U68JlgLBJ&penA1sx_S?eWRI%dAEbReGb@(s-W++YXrROPyj`Ckx=xB( z_AAk2VJQ)n{+ori3nP=Jen(rJfwYcq>Fuc{O+=kJdmtej{FOI;C8=Hqo2ucW*T9W5RCh z+Bv$28o(A+(^ts_@4KO6<&wFhy}hUSSAlv(O@?=E`Z3jk0^eqY-o3uAA7lIg3NL$` zWp*KTjWR?CL6{pX5w;*7Qp@mO5Ho{VEN!@|j?h2i)q9_A!X-SHOtkd962ZscAP`(l zKKogpiiW=-ecQxLA8;7>UoZLXuP1O0QsR;ilcVYYVQZ+Ef=O(ZUqEB4kEnPgPJ-fY z<~=n}pa57gLJldsB{BDyCp`K%xB*>Y%C)S(1~v~#$sgY*9MH_k^T&H*GDM^V8F1h4 z9&cw9<^8^AbIll3ILb~toEsDU{h1h?@-=lx|LKmh7Ua*oBY!AK^Fsl&q1~7bhZ|N~ zV4GYf7L6K7q}{h|g_T#wKeu1@q-@|x4pJ8xn7*T;ki&Z$=ruz7ab?%zqmGsQcWU2X z^Ryq!J=-3CF*Zz8pe|ombmJ(moFKL*81>)Jt5v;S(EwF7G}y0~U>%IOX4hO1b{sUo}n$0)#`&V*0dQsO|Q{)&(*vEhi zWyX6>KbvTV_|<5+vR>0t8mNf=y zqEGof?j+eWM8I$~5gVxnYed;daT}`PO9zgAC2b|j%qo)u#fu*lJ5WrsLCA_$FJ(;Z66T zy;*(}^_8v#;vrL*>Eh90X(obqdwv6|6I^8fBt%Q;Kf)o;7=?^_^M==fpDx?#G7 z8$v1WmIslsMZs!PdPR=;?T|VpSP?$yqELXn9XVlxOulm54z&-H#>6?GA1g4|<_M_G^_GJ+PU60T_Dgi$xwAY&}{?l9%Q;%1LxiFiOM3Rj@af`Z6C!I*|I(eid40_R zw`h{|`@ZC+Ly8+fU$JMLOd;=uW?SdZn4w2SORw<5mKr=6imGlyPh@Zi9IvF*b8wXO zW8O4tW9?!t6b_gWXc(Q45g~LmHf-44@kEtwPP+slzXCoIX@c5(<_V`|)c^jF&sOv? zPrGt1XKqFb#im(l-yjzjQk6Vp$UY)*oEUXKcuuChIfpqrjEe3iG$=T|6zr#ia$O|t zM`o7+I*EL4T_$w6?C_&3VPk_tQ&-p0JLFoE?><L%8xD?blz{(P_*CR-?P(o-L`f=N+Y{R1Z`8OMbcve8Zc9ba}PTSs*|&`u=CVD*$Cl4 z;D%7a+CAIyx|+pNv0FDpZBchc$HUz0l*InJ61?9YyHCFS83BWebY5928mUo&#Fc%N z`$ZV_o-(Y*G@WY{Dzni0I9f}#AWQq$)uDIR_V7^Ier}i;VcDI90~lV{Flt5tV$%lV z7U^MYIAAO-1WmpqCA1?prl%x$19$@_Vs|6ly6c;lq#+AI<+|9Jg5x(>o>Z*wO<`>s#%`$`$1D z!Lr05S&EjjE;_(8xTHr4Mo&sb&CrWL+#V6K&XT;!UkwX0A0@BZQsjWymxMvk? zzF?z1K0Ge>ruK(@Aj*Genx&p){w-`&mp`_*uS#6?Ly^7t&s^7yn!TSZIbII@pQpMHE&~UX6dAH=)QtRd1O@i1%T<F$pEL$8X=?$DZ$kDHLyd8*Epace*hwk4u0zf_$& zOLDJtV#cg0H4{e~nmA$zgPShtl{YnvGi$0$prxrl-H<*ob&p-kq@uFqA{N&N`VCmB za3gE;2ZlHaLj)xkEp*<*kV|KYt*c0Wu7vDUZ~vdAnghm@{l`@O2Ddd#m-x>mfg=iS z<5Q!G2J>aFEM+Q@C1K3x6W{pz8Xrn^7{DwJ+ zB!49Bhx9gahZIw;w>V44^H3a)iNY4t%gcK~)`O&3_ZeUP0&=Yx4yM%Jf&YeK7%a zKs=r4xj@P_-t>Ykim9+smkQirl6s<7cjQpv*{ZmvZ~wy0e(tv`4D(QF!?wH;flF z`^C=yR#d#)JDUQ3;n?0&akbu;E+6F4r8inbo8aUkAZFXgLt{4cN${eGH3Laq#0h$S zJ|DZpRmP)h2f$4?yHju|07ljVTBfRgy#Xxgx)ZHCq=|i?Gmo9&7PHIUT|C;xRj@Me zTz~V3Es%c!#B&>)S3EP`+s2d^H%u>rYo`vT6uAyXoQBPnKAj;a<{SF8$`Ei4FHY-NV zXk3Dtqqv(A{~&f~CoG*MJy^}!HZiZ=m17>XUT}vOY1d{leCAQFBd_;eB@L#Y`lofb zGCAas<7jtu01sri@QX23uf#J7c_7E~n_79oc!v!9>*FURh92$2iGWmNMwp_@uqT7* ziAFACoNlw0&>7zUK$640|FhXT*?+23{ci{u8@sGyt7yG9<1dw{F>tG=nr-#FWttHP zNbIG`TxA@F7FLD@<&Cr~&VpH+!viTyll~aE%n4~z8A+SlC~=GJKJL04$R<4O(8n#^ zv_&g%8WUNm+E)og>x=p@e8(KyG@#*cUT~>N`V&5*LSZEd1SKme??0m`_NO`*8GS*+ zsA}4oIRCDl>4Kg$@%yON#_8C7k%cH+@@htS*=z~L+0IPqWNJ)!L4k9KRAofo#XRVf zvd+`7-E+gIGBL9ZDePCV$8!Bb)ZBt$+tX3LcUUH3azH+XqfaZ8RZEI#cA%&%GyEQ8 zCo(zP@6K1Rr5R6N%))N;_72GZo2SDM6Cm(PwlftlP&1uV;goEQ+U5p)@#PIuUvRZf zgLR0kqW_M3q&eM!l^L)GmG9^8ug?W=L0jmHXqrx|?_}vS$^g7CcGu5>m!9)dK97V` zE?-j9pyO#vv|)RbillLl*Z#Q}cB6lvGro`>3mzIRwA1F$5c(FB4Fx06P?GR_e4K3d>L_<4XMSSeZ zK`TgOK}2lnTdy?kPI>gE7tKH3od^v^O^4Q{r=u{dsbiF025Eq6T>Li&4j$;we#&D~ zV4Z;#VPJzz~5?mu>pjrrB8=^y>9qg=Dhz;2)&$wauc=3gn2 zwA&YhEs$3wm#)Epe`K)H0$+GHinC0~OuscyL^F2Yn)&~Gd|rOKPdz@PT;##b!oLOz z?BU?C+URO7lJ8Z5VQkAli{5ZsyAlkk-|J7z5=b04Ot^(iGkJxl%?SmLZX z1^7zW6zi9YhCvXY4nVy`nT`Uc##q{HaNX{o%8%^Q7L1b&xw-|k4ui*mnX&rzGG1BBAhlc!Iufk*xITB9j6wG7D&U#lrdlRIRvX zYiJI%+-E2(tWya%L!cwKIy&^UjvR8phMuI`pC4Fy$$-pAOOp%u47?W}h>w$&{bsfc z=WWC~ctGckY<>FNaM7u1qf-aG#Bq;$c6MHkykK6rrq3PMW4b0}c3RZheTr1u*DLNi z@lCYByWT}jT;AIm+9ea#9Je59_sMyWQ=W=?*14I}Vkd7Vp`IS_9f^-n9CTiMO!vw|7w zaErWFiSd{wtSuubF5*IX=<3Ku%*0fBz?{!F^@Gci3y=3GLhlfDih@EuyXK`oxxb`y z&T}C1Ub8S!)WJKUvOaPcM721nn_|~I$Z+)&rI-6jl3*3ZBfXEtpWLG!t0QmkZt|m~ z-bsy^w4?W1X=!2cilHKw$7xb?JOPw``wTlKr==kmb8B88Vl}BbhA3}yM-M~YPp%zB?=F<8zKHB9_h;5@0O7SX zI935~#!EuXAOeLK-X9EuU(tR^sB@PeGVoDoUWX^C)k!^x=`a_MYP~6jght%c_I0^B zO2{epxsXrSN8tz4>SU62H2YJUMBj^Zxc63vb1rhzZ{pus`{x~`+WjYj|E3MxcH;A& zoUbu0*U|oxRJ|KU5ml!eUXGjiMx#X{^p8OGOj8~~5w?}L`A@PwtXYzMc+e!#I_=-t zH>Sk|(1tX5uR0N{lkeA((p`jhu?v0b>3)AWjJjm3C>x*GLz!$~CRXIS^P)}ANz-U6 zJ$L_Kv!ky)lS{*8*wFd3Ti8*IPgG`l74BG}>J2giqNy~V`9A)Q_cou;ZtDp1B<#fl z5tvjS{rfO)dF(NuwYLH_lb2_>EDbhv7Vxzx#taSdqs#eCV6)2zsv z_6V74+=^7^h0n@4cSYdzQw^{r2;}qV!(BBNuw&bDv5+ek?_ur*uW0;>oy^e-pNTfU z*uc*n5c>+T;8(2=ypiIs8o6*g6VbdpQh>u89P4qa@hrYJhzjJ5#d!kAXWhm#>GCb{ z?6)v`zWtuFx*&j1>^cEDYV7LXwr+p@@nfQKB>m8C$yKYuH>k zlCGS|@&>Ia5>PPn#;XLWZNlx{Mp57zz|CT8!)?8l%sga?C9s%tlSN24}l2eq1j_t7i3PFJT8%_;Bio^ajCi zHsy7Z-?!BADr>)&w)Q{V8KXo{wv^@DMrZrp5pH684yw;i8VxTbtTD;eeSsi)x3XzG z{zMC&QPz`bk)dSOhp`@9ruugfkUd>rsB3Y&N8xZxc8?tEao9TM+RC-x%kl2Y&lEn< zNoo7TxYbf$mg)b>?B?z5sG37Pk1w(loPn<(f64tnr8L8kfA-N&&xgdP|DDFLq^IaW zw;*pi%UX}!ZABJm>n9tWHMfmtr0}ciDd!YMp`g>1mG+_CM~|=(3aPHwA(H{Y&c9a_ znD(i5v$A}Z`kT<+D|a19ULbGVxo%yx3?r+NjnDBmziy!@Z&p_2@ZYF@I`@!F3=#ty))@xCBAp>&KlMWyvT0wCxDKatsPFCkpraz(XR~u0QX#$?LK=R&XvMLgzMR^bK_Tn9J zf9-k}2mi|P!#{zlBij1TfVTUNv~Ga043m_{wauY@^YRrcm7>#?3K;0r!ae zR)J4>*xn|zAxw!Y+86FDuk7lYqqn7_At?3ERPN^)(o@Pj;2N>EZkd;p$o9vl6%in1=%Z3!xUlpZAMsN!CZTEigCo@atftQp}w>CqIR_Xo@!m_O86baN@&YrtpbC+ zH@4-%aJnmJe+Cshw`ZX2%S@8!5qjyDe#h07=|~D;N!hPwKE3-*q^eQ-&sqAP{=pTw zT#xrUeeYBJ!k0MJgNR!Lc_w7@zm)ZmKp zJUxMz-ATAKcUmq(ET&VQuNkFMS!=l@Ebc2CSw$Gd7~EOrwCn@rK4 zw1zcqHDBf#nGA;;?h+}MPjwuhsCX+r#1EPaSl&{VKcA=YEM!j*CNfxPcTK`#zoAo@ zVLP$0_DSCC`CK;TY58AgigW;CtD-eTTy@&x%}_1BjlI~So?F_~a?TC*M^i2A)DrdW zddXx+L>#l#cskmojD<@0o$9^kY(iMkTEpHq$8OX&rl=io^hSar2<*T?p09B3<@8;X z!hncSL&n>_fnzdC9Xz4c=(`@02|I;Mm{*n1t!mA^hU?DzE8Rj;}( zwl!Anxw4O-iw+N68@yPIXJCErn^dfFQ*raYPf&lEwgRaPTm*M5N>|rB73;m*XG~5F zpu~ZlG?nQ)k5tm)gV-3 zUDt235fUxJdI_`=woH|IQ;D9?JT}+-PPrXgCAs|KA7QS26Yx>1Y#be zh?g-T7~&TXr?~0X#nOzzZhREZ;Hbv0W>tLqCDSA(F`pX10zTptbz|I!T;v{VB;v^PsLgkTRQmv!Q*C|tFp7%&nYR?Qv zz@}klt$n?qV1v$7j$shCO(~^@DVdR1o!@5Im@Z-Ro^bg{!{3tQs&--_XVV``k8exs zg^+a_FU>d&1rP?pvA&sc?XHWh5fuR^Eah*>MRT#`6!@a({Hdh%#8wx;->3*}r$fuf zPH2V?%;64qQ`pJkCaJCNUrAC^7KVQm9mYhon*C2!+{ET7D=x{IIa8Ya??;#6JV?#( zQgLW-$Wyfl`l>oy-Lxap@2j50d=vA*G7ff$x6hYz0f+8B{4O|mRrP6#-p*Aqsku^h z*4yf}zg2&mfz6b{QV`n5%{Zebz~g%OnD4PItJPmusCcI!UTz|q++V`g8T$s3n=5Z` z&97s#cFTl5Dv0PWBzDHe8(la<)edf7Pv1oXU&PPmIVeIS6kQANk~nA<9EBxLB%r9B z72zATKFpi=gE}ZB#47H$+_-(n5C$(@-$LM9 zz^ey3KUDGC@vITh*@R69ey&$nAd0W!!}5>1Cz7}{eI9BS>Z%T)RVR2|@B5eXGbuhO z@Z&Y8N%OsAg4n63aU^`njVSywes|9FOU3ZUaG*$=NH(3 z8cm7QQ^#e>iLU7P^Wize5V+M_76020kF%f0El=I5Ms`z014P*GCqMH5n4 zwBQ2ud5qw71mM*-I!`LQ=N+9PBIWc^AXKJACX48z{pi}uY>BfF9>0;mqVG&>XB$<_ zx&W})xDAo&pHlIlg?S|qY2q4AI3Yzq;Z(>0qU|@9xW}&^4O5w1S=80RZ!80vIJ{;f z7Dgfklzvm?E1jny2Kd@Yuf3DL58Pbk`LNdnP8srwv5jV)r6|I6U;{i?UQ~@yAlwOe zK9*}9cm8Fx+-Lg�dur(J&MoQ+xPUynvr;%V?T6Z58Ajf}v-$=G$weA^fLwGw#~2Y z$sxIt$96jiPA@BtZR#8G8a63D*c!JW6?y7*;P9t44HpH2qHnzl;yf|kH>(Ss0o}_Z z_*h34z`+z{vCvDXpgU_kbNQCnGfhs{pc93CcumeVkI9f`&iW|hxHg9>QK3h2Uixe= zs@QakC!}V3JDkhVy<~&;u-D848|aT^zrSN!opT`3%3ZTlONxNoXo$|)`6yjLTsxcA zU$tL2^^LvuQnnJ{Y|MlQ-513i9qn`A&yVPm z7QcW0)_kx2Zb>YGnK7Yob1}gr^8YI-gMm9+m)>he$G-NHy(|m zA9=Mai=SUrYV1-_T-n*0te6 zXnF9SmH$e(N=3V3e@`^(&%pggcbt$$Cb0{q^yE7&Ppc^RdZFVCgHO@mNz;YJbThtt zDr5iA6X8~hQ?I`NX*F~YK7}s*v;~W_$p71FXamzUUGQP2XaPPKLZHD=i7(x=Qm;@u z!%)z}uT${#ZG`Xd&{L8~=&v=$T_$YsfX_o+dTdWmF~aF|Fcx0|f4X>i;%I`{ZjpBl z?cRsjC;0OIo32{GsHc+Ca1wCH#cz)~9Yn8Ub&AM&TX~^>huU=9$)NCF2SNbRH zq~w*fHGl#d)VrxlNmcl_s`Q6@#^0AbS^x*oH9=iH(E4==NZHnxNP zSxwmy^2I~haruumdX@;6BOP!@x6%)h>b4GSS~@{S0boD|0PF{ZqY6~4KFERl3BU@o zv`WkYqK%Wu?%-Vt={;)+FHcuJ3hS{DmGPkO>`!)o+GFTAW#x~*y%Z1PE(5SLcT;M8 z2rpz;7p@X4CHqFkgZsI_$#-Gm$kb>H^WrzRBAAjbuKy5EAW9qeeyqit6i$Y%e^Sju zs5WSE7Xxx4NkR|wSCdkWZ+wXs{({98hQC91lb!p(W2`k`L=4Cj-3BXVyrC<4zr=fL zA>>-jH)T^%F@$XR>i)Y*U64&-P{~M@oxTz(XdWhzvYG3>)BemSw|#19u^(gl?$_OS zQ#%jWwWxUoBAlu`FsBl}QOnz!FeAD^v%;(v3IwKUtS(!_Q~&W2pad?6{8QqpVB0S% z(-d@;>l($PYr;MGyft@2O~;jW)zJ_v^j8dSqjmooy9%+ECTk`d*J&cNYl-rNrQZN^ z(MV(K6iq)T+b$LdG=cm}0@vT6p~b_kS9TVR|KyuNhW(B0A1}0|YO__WrBF=6(y&pO z{Ii7GCm-&{T|Ex^#_9(bv08SDgyBKXpFA$eO9ijzd)?J^+(kX|4Fz2w)it0CdVwD>(MFsC5m1~Dy>*kmzTq`_sVr$sicKYUjiCDK-KPp zBj4X)Wzzxn7NQQ5XWBfXk*OZP-Udj_k=Iu~u8&>78gi@{6AJF{Zj4&!IxSND`+FG} zT{gZNd31PI+`u#Khg{>}TK>MN8?}ro>O#&78sgq^@(dmi_P;1T(yQ{G1&;FV7R*g& zcRuuSi%nKsj~+hdFyPEq2U}u+HPsbrlX_JH$7T5Q7k6mP^8$LU#-SwZe410;zRFVF z9=cX%`eYHhxU;dH_XleT$?Yk%z+}vesl-#t)?lvM6v9rX>+g^NK^#R3E_tT9 z*9nNz@efxEPHk!%|I(*a-9#|%J=iupo`h=YB!1aDB6Db>Fa}nEix%+34I6ReQ$Ee* z$>ME`2lskQ*hA3O*BG(4M^Lgzt~iSH6R8$dc{KAosoPQ&YhDm2AX(E=PeDA#cmW~C zPBaI;No(%v*US33LQu4n6}0}_8ui&1zXL0AZ`CTeU&UcF)=4^hIyg!^D7Il8Qg_~70h@mLnuFAE z9gmouJ~aCRrJtZCFq)+WGx2-(3Ae{zs`@F`9hJjPoz+iztBhLu5R)5*ehNNmXegi( z$kO(6-C3XNr8E!Y^KR`~2hyFbKBE1a_wr25Ke8}c+PpzpqNYSju4&gYeCS1^%DP+w z0bl5QBj9yZtnY`_&d~d_4u9A$llxS5-IZ`$;u~$-!!bJt?7v?}2Y_O(Pya6V;4qDg z3Prw9$V0!e#XpZDLpAum6pBoe2;vU3WbH*G5b0a4uv#I3#@E|4W(72J5dX8KPgdDv?N{b(w-ggVz9I;kStvH<8e5 zpj%IBt8+PW@gZx3E6bMSNdq)^!EtYV+sj&B1?>sn@D{S1puB9B&dg~Z6LPSxx{*Mc zok4sVDR@*~DR{uTu&~JYM%B_5O~bFe8gCNvxS4E|e6;g6uB%Ma)HG6;(i}QfL!u&d zaQq}}EeI6K{3l!2z$fo<=8^b<@BpJbxxIz^I=|aybp3X45#k`|#ThdQ<0Az?H*lS9 zE#|aE5_?}cw}q-W^U!z8vPfn$C{W0!{r4LtxTVB6#qmQ!1(S@$WJQ~w@C>sF%SzjN z5HT0)j2ZP3sB%ivP{+b2BNH+ltdpW4Fq_Y%wy^e;Q;jKN)3KO@?NijdS1re^7?_yL zr<6<1*R1*cEcf@RZs%{`qVXQJvr#?N|94*hVFRqj*F1HZXw^qHNM`>PBrCJQIc>Ln zLJM(1)d0fI;MfXoXwV1o^K#mrZ(|MlH7=CfwhiL`rzlyhaEK1*W5IQr^o_|gk4Y$( zQC0uTVrpzW%T?=FL(R2?@Q_3QElIMoKqG^|ZVXCJw&iE5%+k!^KTtURl4a=fsivKl z87P&X&DT+M;9hv>{QL<@VQfxjsw3+5vTmfk;Hxm2t@@hs3Gsn7r*+Ii?0Ngu@Q6{@ zK#0W)PfZ*intNV>B5obozZHa(`wmsv;vU*so|Urk&Nh3ywH8CEB3lSn=$lRf6=V>B z2om(_Ol^}yB$;{h8-V-f%k%hFH(?pkpwI&2(gvyyqR!Rrh5%95&Zpx>#GM{k(yyE6 zym^I)*`@q6jo-1Duw@h|5WWrlyj{e^cOC(e_cI=hK=KIbNfmAQ5KTuat(QvFiSB5bNiD;$06TL^p426&E(!(>Zr zr7Qx_Jmz*5(Ovx;7eH9}VvRDCqdR33a~r$RAU>`YPf-v2U^yWi;#Xws9u+~2u^vB8 z1~G)L+V|D2AQD5 zdxE9awB_8`*uTtZyJG)guw&NeU4kM3)PD)t5_nr0#-)`LDp!$LPbcKhIXs9Dqk z7pRiEoTi`n6`hu!)i63gM})LuZ;x@44$(u!-_dD_vZU>yp`$@w1(uOzywS`=bY=Xc zWA7s(I|b6H8Ti)J3&WhbRRF^rAZ1aNENa;degQ4^_;FZKS11MnkG{(@$g`>!wHGK= zemf+?I@49qZ2ArN3Qm<44@CcV)dp8(4T%{UqE|kr;nDw*{0tm;yT3kvfddSB4#~B0 zeL`;@?k_>`F&UC<(Nz+s)nvx>gT4%U=B={V%Z2GYKjBJFl_7UU@a>=m}qK=oe3Un3Q2lVonS{DvzSbpZ}@0s zMGY&4@CADVKND@_yy5AAbso9QJ;D9|V*w=8g$H+Y+ZZi1Vgj(N?boJv_?e$-sbb5^k6lsjSCUHy zw@9*U8tKvpAf07eh)9IXdv7%;XplkZF*wS;d!+)Ulcug~;sap#)$>?yXx+8YE4Ymr zD{TlPmRY*%#%fljkD`FfPrEEtO3;$4uciitkUtSyKo`ubz;w*UuT%%M z6`_5|O8i&$*?!%molPmqm^plTjxe*;t(qmVIIew3sJ&m>SH^g4&n%DUHlW{CB1VMK z6_ubw;Ir%CRgv)*kfOFjJ@jD#;eg9~z>H}&(XO;EGj>YGyFv)ECb+ZsPZv#82zqr_ z9t!np?*yxhNx7CbX=iFIndDlc;;wkBv1c1fgDB=bnCzDFygZUrCl1oqnF? z*cs8UBGPmYg{M-2L6 zQtn}AOGi$ZNaMO$&h%U{=;{2bA?%LF?^J>;;FrM)K9u$~huG_RaFtTZYoN08%F5-% zT6um}(>ox_;6md$`7C-qBiwr?oz`n-=V9Y?7Q!RaX&8tkBQ&kc#kM9hG(}JSsFCo& zA;YMC*g7KIl?!Z=UzdvCQYHabOuqM}+}*9>Qd?ZxbkbswzJKtFB{8o?L_-VTvBBDN z&T-J~>A_q!l?gbn)(udw7{_;RIKso&sN}c z+J7&4|FPj>*YmxjTIT)LR>u~VuF4(E_d#2)-YaNHd&RWWlB+hMqLC~6lZn*Tyh`*K zRm=L{hWVTG%>CFFnko$~O@xRMNAROQW%RQJiOfWgK}=WVY)h`ZB8Q35{dD^N$LDn? zk?Kn8JT-4GJ%S9sOtV>pXv`g0Vfz&QoA~}u8XUdyAA4~XLRo44-?&>PY2#sUDFhWD zf;FU|dc$UH=ot`qJwMyF9{yd?>!S^Ii?9wmQE%}OFJR+YuQM9B8N>W1XL(f#9}%Ny zDrq=j-dU5uOP|9qsg^N|fTPr0cPO*03m$XM_fvW>`_;)r1FA}v$fbzgayW5RH+Asu zOw`W(-GgAiG{+ymEkx!-jsfw~Oys?cta|R7x0g-I=-p(w&BsqTdQ0=S<_>+oQK@vi z2Cwl%0w;0IIKTg<1ZT}39dQYX31Faz;(Qn16{!C)!GW}Ibz1n~3#lH)c$ddo(pR9p zFWp%!-=bT#6)ep{gM#|Sv{1o#w{SPE0o~2Rwnk6gSnONLl-n z{TJxNLdzj|JlMX6Z~7+u_yb!xH|qhXdIVe+w61yt7ggqOYD~Un7`bLKa763$z9yB< zVCzkX;`0jzW>9%F2BXWB-Csi#%%inOk`;w2JQ6>3jgMlIcFIu)ACm_+QGv{d3Rk+} z?N@E+!+H0a2j=|*1(Ruo6eV9kvM)gL^X&u?&s)kGF}O^2TdPIUXYD6<8>AfhH@O(+ z*J_7#2=!`C+*t9yWwW`Gw!Y%Mo0gDruOR?#mJ@FNGTD?L&)lCO;i`8G~qQrrjT|kP0QRJ zSJbnU%QnJ`Ucy`EZJ1ZcpTGH^6!^)3Utg1-&DTqHM7X@<-gcwCsx;enw83_vPhf8?Tpi`v<(eEu!@|Cl)i|fD>{698&0%wr;g<-Y6QJHhvuNhj4tyOpHQiv6-B30jLYeWfzXJy- z{LbUrr=#(oXZWsTM*{E`sNU;BNB^VrW>T8e#@~ny?-pCrF=GZhWe7I1;x!7eL zy-zH>!>3Icn65pJ0wBo|^8*06zt_?U_c;4!?l7^vlN>;qo)xey?2EdnfNf4mcyOhe z=M0Ts&Yb{#;Su1-3-(U@_but*luSX~)ba~>De~`TTJyxn-YxWPt>>Q*<*JE_@F1UZ zz3CT-S4U4rfJ0cw%f{?x3F9o@EOu47fP|f>^>`ZANc!)DTnb7l_8SpO6UGRCmEsCs z8doml`GOo9bI@s*27B-O_;D4kzSh{)GG8)BHZ~X^g!?$EAm62Zr9~hlz73c}wI6-b zl&@m=)A-i}lW7D+he|OxSF4KgyzfS#B^OeEw4$(@Uw!%w2i|R*kdjE>=vAM3nRic_IIBdKWlalobw2>D_wxP zjICV`{DRVYLf~gHA6{Ml2sy=P+a#T5FSW>lErQ~EaOod@Whe{R(SB7Ef`EC3&=Fu- z3k&LqrxJ2b97cT`JMHUUH`jrqH!Fu@&pjUy}T`;&;6MN*#~rC&<7TC{8>*2e@cq*>d>q@Oq(NiZ*Mc z+^5a@s%FvT{V$=C0!m{F zdmkfe2#ANrOs35~NduF40FI^P(KF}~$rt$z&^xdmNXgmy2zt3Oi5M5fdbww@`?>#F zxP(auXRq&rj7Z@=@%|?=BQEPt2aR;nD8jumAy1H&>f|G+Mc-o7+cv_Zp_++i+1ce8 zi{kKyQ7!d8>nX@qyC|Dz@9YCgVkZ8;Z^@Q3?3(dqqWZh`lb*)VJ5*#Q>J{#K!#dkg ze{$vR2_L{|HKTTtd)G3#R=Q)y!`fAfTAD5Y%;m!)oD^xCrQrJ z;4)Fa=wxlD65Hu1XZz}Xey))H=hJG|KKc*8E3F%X6Y{?)v+QN!uhws3@?7w`@|~ci z!EOt8I>{782FbIL$UtmX0Us8o{L#2nl2TH1M>nknO70Su*qH-c0C(tbH|e$SS5iBE z@jcw^JuJK8l%iBoZdlA-GDEz#Ow33GpmoBv_OqFHpFj^0D-Sql&A*g%LjzF&i0v#3_XBzP6rUjS~eF1(y_jbSAL!K7V7oziTmOrZ4p%f?_2swkR84 zvQlDW31@U;>dH<^7c2X^b|nUd1ioeGu`>n>Xhdi9WEP3x;ivGArK%xk_>5b7?h;e>kL5p!32pT~e3Qz9OOKe6OXIPw<`0WFeYfgw=(s z65X!B-e)WbY7HGQM@U9Iww%>Riu=L`eVm@ULO}<0n8uCLVfEO$!|un~IiPt>)e%f! z4@1tSX-k@IweFxt0pj{^HNL}WG|AnM&BiL(Sq}vphNZ(VQx|m(xA0$i9|kYDyZ@4O z4Wb?wq0m~Dv64V|DqT$3iZHs@(v&GxCi};5d~Wyb!F`32(l2=`++PRMdhj-jBdZe? zuhdoZGJI(hZ4qNF=Vw#IN-?iNpA2Y7=l9ihAcgEUSs{UUAcLwF%Ec}(=$2d#DuOjt z|8HF5QoUE(JKs4AGQqOimF4qzv;<^@Ifp#DpmgD|{8_XNF~ur=_4ja&8#e@7?^I)Q zgI4-AQ=lZame9>Dz}bIqbUg{3UfW>%O4@$}&a3V=K}ZhIl-|EHoToRDhs#sYFdNGY zA8}d~Ch?gw#q7HX(AdIEQvxS`;7RQe%iF`j8ZL~hS*oc#Q4Ob@B}CU3Rhll2hI3#6 z=g#EP!XcOoYdDs;*hoSH_xC>b_m96d^@L}_Sz&#~2B24I%4o9~8 z{nu*$(4GLHHQ6HS}?hrv68JVU<{Js-!wyw5M!lTD8B7eSBgaUvj<^l6q z6f4okbK}_N?i7#`p?Uy@PBV|BDw-6bq@Gm@OL+inN2hM=iEAhZWx?GwB2jJ)y0C3| zw(4hLFwHb*FUk3J)Jkzdi?Rn$SNdT5BTvatSEq}=CbO*jd|1*KV3Xd|+p^+CWB!x0 zRcr@iGK{0a7P%mq@ZNr}W^Z;$+$uxBqXzQpaG$jvO@TEjP>vXONcSb3-krw(05Ym# z3SS#G95;8q1vsg;az2=S>~*xC%hoLT!0>*XUpZaz@b?9?RM8a!;j#OGW^VrCl4LYH zFAAJHM|Hwe?)u%`9i26PlqhCVI+jz0z1lW~DWdG}PnAvnOF2uvs+m+ZKRZ9>Dku{G zPLf!|-+x9QL`&9^t~wsG_@%jEe}8CBCs^UuVrem!voUm9`FttZF2#XDm!!^7o;YoA z#fz;n1Cu}G8D~DXlzeXMMlFcOaC;GiPuH@VY(mV z;DnaA0xTzIVKfasw5yql1zEtzbdP##{}vh+vvk4$Ba=vT4)vjz4Ip%VpacozZ!$;5EN|>Acsq>9~E80Uu7--m<^TO~>1wRDsN#)W~N;tp_Ty zEqDt*Q4>>CNiMp_iPt-jb?uHcx&ukUFUYImEU{yd;PXq-xy#u||bw(<2e$tMUM`)MByhl}NCO2=O9gWc%L(`r7Z zda@i3CC?6Cp~_!ophv?DWuSh8&|uHH9l_kSL#wQoMRl`Z`{T&0Q61+K;0AG*Imdm^ zYu=?``6`5MfNPkcmz)V+f^L4=j zQb5%8Tyl*yf9c#~<=!nd4F(4iA-?&ALmJj4ER+dpZ?9|H0tD+JP4<_jR=$aTL!RF* zEPQv#obcrh(A2jHa$OWAVEctqbEtZO)NNZd0>FsQP$)qpLpTCNNo4xi89QbGIMXW) zmetZv7`S+6M*Q$D&33vQbI4{a#=F;q35NM%&OHjGnZGRZmvNTtnnj?9Ya7`mZKU&Y;9lE_ClOO(LfRwQ(8^3(g4Wgr0U2{ z^IRgw@2Et@1Pk#}8wc44)ot1}wK2`P)vuOLq{eRVJTdc#kNH#DcSVNz+drIH>>0-P zXmse|N$SM3rv1ovN4pBCh~ra#*-2$4H(6{?_B;+f74g6RDM#d_I5BOfk3-|5iuT{sJYg#R`R|}nuSpJhUaui zC!BMF)A_GE28|~~i^Goo`}tJWeN!y#q4*z|8m9X{icYeW3~+mDvjQU$4}16atX|z| zBM9tst^Qn&3oMwcX%g4mxejOt)Fn{M{xp{-BgbwpEeu^D!f93{eYzw%j0^BuOrhc) zws?T(?xk@nXv9EOo)2qZvnm&2@0SFO?W)^uIzCm!QWBz9NTV;@i-_2rC!JLtB7UKr zg%brGToZM8*I!-6C7c9>=;3Mucas?VXBJPm%LNtCnF|p&Ku?h!2*1t`DJ;jTNo=n> zly4?4myQ2x`6KD@zoxQrf=*HAKD|j z5W%R89IX@5OE0gHfLISW>u#~#1HM>BQypa1tG_LA{PRRKVj|yuRpM7MtvpvnPki5C z8aLbp?>kOtEbL`saM#o>%~y0&Sm9}0EeU{+d) z^%~gucM&!pZ(uHZAlMJ~8fB0t4ADF1!dtZ#IA0~Wk)D{n<_TP^5gU&-|0&aaih=aW@kVOF^Df~x2aZ|N(W5m0 zvs&N+T2is;<{pUO@}uQ&gg34M(bZypOg{!}tP1rU6p1LbT)?P66*BrBq4IO{DPYFS9NpdI(+3>@Fm4D!pD z6*VG+JESVHkx<6!die3WI>wg!_2{Bi#qLj<+!h$!v#e_C>V8y=zWe$21IvImb<`DJ zpz98V!4^7YS{f^ws)7i9GrnMOTSLERzq}EsSzrH*cB_J0=JwI7g8q)==!=bYS;Nnt z4xp-2CNfdi+2QPi6~Ef&GuUG0-1NMBO%JvU_h&Mauhd7}X8aq4xCm@Alx109HLgsF zC>#bASyV(?Gw3xp5|1TaEf=Abqsl9W)Mb>KHnfN+uf5c*m2+6*^}u^QZF=L1gKL4S z9r<0m&ihcEH83bgxg&NGoDa|Lps$WC-N78mW8~36uw@C`Y_8T^>5hy7&;LS6*3@BO zkX$%A^54`np<~szjFUj?A*J)x)u<@ft3{>mn4WnMY>?Q!&t|#%?onBfg16mBT}P-s zh-_^dMpoF`eie&kHymprdJ^hv^I_H;44TMBIp?SQW1hE!j0)W$oHMZubb>3o7teQJ zmX|~`?~*9Iyij@C?SLgM-~1O*VQl)p+^*e^$FN9TLxQ zygvhB)-p|8qrr4W=v`)rT*-7{D%66}hs2YVUR4kG4=ws8{W^B`5j-Ol184*MXhTSh@38ZvsW*Jy~o2%;EtZd9Rf8nH0ZO{M~~e8y0~=X;jW%qFxfj5p)6%Qf~9 zPnU&r^C>Z86sLJhX#7^NPdU2gY6Du`BkvOc6dM&|lw=@Kn8N{ahWE7SjpfiU{KNzjSEz0a*&^U$nci{q8kb3Z3PK{28y{aPUoo&!w(?)6sWOm3);LFGO5=f`!s|=KB1&KPdYJkdjOgG-- z4<}ol7@XNp)ru|nk@g41j)^VIKjpShqxBY#|2WD z_o8RKK~3Q1sZmQz=ORaGI^wB9poWx~DP4v>xh|KCLaI;!$3bD>;+}lw{({=U9$MTY zN}_}rhB#dg8oEYyP}TzaGxZKsN*uh=hdTR<$3W<3#hEV2}jNk zIxjpcXeM6vb-oGlPunJ}Br(?IA|5)uF!Adhv!Mcq?)-&ivd$dZO`>7&A~}B;-A_G> z7G!{Owpjnr_mcZNN6IS3*&_x*a^3*X&KR18;UJyLT$^Z6+|0A1gcpRu^_mqabnfMj z6iIy;Q}_@r9kU`*hktC4JwGNGQfu8n0H;34Ocl2Gyd(Nz!<7yjvz(wy{zF#s~7Uq)4|Eh zoW)1ORb1T3n8wfC!LMPRxDUuyGoJ$Mu(CY1BZW2$jL_nvYVqE7kq2Qc=>?A>m|dtS zK4*?`>2`d!W6YqRh)>Rpb*hw=FQ4dZg+&p1-U}M`6$C<~?dd~jw3D}z6KCn7$D&5|6Bpg(q_GaI{CpmbomOBDQNKgBM4>f4{7g=J@WX?XJ5w(9KCnY=}idi@5mowY--dFUx?563$C*c3p zGp~{;_xvZ_<$*kcnEHZ+1HblL3`cVm@@u6~@tUf)O4KHHn_ zZ?B@G0YcJ=D66?=k|EAYysL<8E>Z*8&opFepUpS=)wN?Y{W;3*{|3`0-|~Q9o?g5$ z7wncVe;Qcspg(o7n|5RbR@e z?dOn6G2hUsADKYR8~E%)meL2VovTk2A5lPthmJsOuPTAkA*HgqU&O_j9a*=>tXQNW z_5pTT*fs;2Be+6q@p#6JVmR34B0>z=of>C%SID~R?)OP_y2pBl1 zeh}NbdQvE%p8dGhpEF;#cTx4-PVR}qlaV$`4aI@B7FpXg+S(%!H_amGzA$r%M=B+o zwd%Jf4mN8ZBylWoiN#5*ON@pZo7WYuDyvS`Hg*Q>SAz_JM3c>ZC^kbEhu_(`$#@;!UJ=}8*3iqddF*(xKOcH;iYVo2 zEiYo!W0&@rId z`Y@=whQ-BpC1kL6|M`rsa_}Gq>oIl(8O2tnQg30Vq!j$ddQ9D9Y}X1)U?TRd|CdwP zJHJK;6O0S@?;6{RHPHyIJxu}$@8>AKW-c#(IaZ?Fvv-=b2-JYVmXWa>98Npe7ZS^#MG)_<-x6uT{_roaavJR z6Qty~ew~p)Wwyu-7TQ%y&9>9>-yqy(>!*}fKFU0`jYZ!a$11i<0XL*)c^#sQ9vvZZ z01ry}5tRSP|m5QF5I9QVT&U)tB_pZkexk~APGQubp zLb9;C2s;3^hv5^vP!TZde%yvIH=fkRPUP`jtL&;JP-~Jky!O|?*2(QYE758W`PsB% z$Lb|l58Y=LlTueE!PVGlLL(%%4XFX3`>%!{y+82{)wwK4RptAf`PdAUJfbrWFnwhY1)jn;SvyfTV36rsq@4dqbXaE19`o}Ac$s^y*tDW(Y`b{_o! zbDTO}eU(p;Mn6{Rqx){El*i&-gg63p5VIKt>>jaCe3h=1%v)2$_Rf|;?3YMCn0k=E| z&MidiCjrg(@&T3CkMq3AxDuVr`d-?)qrou{jg89hDh*6jEzch zT1JosImXWqY&xzVw>?iPYdiBh+7=L%9yuZc4@r?u%~RoA4FD+7N!z+tlAXs)*TdHu z!9T&gs0w*XWKFB^e-qA^{sRK*sg@pO51^c zMuYJEIa&ywlq+wHYS;8<&NdyN9KBha=5{)c#y018`V9bhg**z%;G4ECImAKDws5e`gUBLtM=+xHV=isw@{eN}?D@J3QAlos z=A(}3`}lA4-0vQki!NhpQ*^med1ao&53LTWuRx%3k4UpTgk+?6Q$;SOCj?PVPEu3H zmd%Uf#bv7hjBfu06nFkFvw#(y)mHlN{Y&oa7yaT>oE)2F*@YrxMo+3ihP6XAy3rV zD98*r`v|aiv7je@-0aV~?(h(cW!Rnl=M8M>(7e!HRZXVb;#^11hzkI~jr9B6)MDuB zwouPn<>%il;`K3v35ZMsFC@^RpDibX#8XBI{UyOM?05tumi-{5dNSpH1f*Y!DV83*;C0KrOe* zVq}&Q%*tg*pPzql%ahtmN($ubu1Na=7;8Ojq!1K0G4Igi>Gw zJ2A&8YZM7r)H*qs$>IZ@_)x8eGRI+(R`lk-F~w#c*}YJ=$W=R?lU6swl|?5)b4I+> zs#<*P7H`iqrMP(iw3Rf*pMT(xOGRG5*=ZANyyuK@1xWbbSJ-B@^(g4n2^>R~vtk6y z_w;^g4Y4%3DMe*UZ{g}W3Oqf^!!{gMvLZ}7`O2eoEBd zJMWM&FPusBof5#4niZTm&V$2*;GHSK`Kf~p`|HbU!s>TqlIAHG^=+Nx%2)`*e`Q}G z`6y1m6ARIo;G3yz#SHO)m%#gs*OWT`dXKk^-4#stb!i7OOEH@2++VrJ&d0yU|coJ*Jo|hATAMPI>$}>91kMx5Z`NEUr1d1 z7cNorzgVKGgLviPKhtL+batoxAM~1G0+(AS;utx&?fjS}@C(JZgjOm%BtVxCnyCCA zfyqoyyG(YCxwr;-_Ndhpa=6m0LnJjf0e6H4`&3dn^&_*63vLRagX|nbYxX6I>7Y%s zKmlnpzmkcusLkV>L%l zjOWr-{{sd(+OPDRt+*X8*3@88RqK2Wp_2LlTy>ue)g9ihK?%%oRvPn|lT3PE^k3o^ z@VnN#$8VE<0d68_qtQ;1!8wGkn^8*Pcm?ZJDHo)8k9)dlU9Rlk&J?tB_i;zuAnq25_8!V3gE*JiTXK z%FaqxK)XltVMyDFnq8`C>bf8v?eFbP(2;G$bLbJdnFqp-+PP#*Mm&5fkcrs}bzGc1 z9l1XSPPJ)77Vi3wo0apWzu&a7K=K3~9&V3>^dWXon4Xpe#Ji?x)fnGM)lUzIIO7*7 zWso^H@&5H^gCUY)XM5lPuKie*2N%ryN1us{fOarbQoP%BKK*?Bj-N5|j_n$ZW@P5& zMPGwOk;(d|D$P$MMT9)9k>p}`jtUG{x;J6 z62y`zHP`efCC9le13q_Omr?kwP3Zt^J#poMs#N~fS+_pRhYs|v!=k+aWhoQKk}+vJ zneHA61UMiXyQH}iD^IV%bApx&DIXKx-))ny-NPSLqJ+G&EI;M0-Z>(G2i zG;kUYLei5^Gub#7Fq5#*)P4eX;(FdGe;rKOGPHDtxR|0}gS7|72JBq@GAQcZezc8m z%6$8^0DNQu!`2B{4PE-$0`5aN{eyE&X#^Op@Q-EUvhL(MNN{_SKG%}q@|XDlxd&0? zm~YD|=q6t~+%@)SJlvF;DCWS$A91Vcu+h$&I(5nxyF7{}H-m!M*1~IbcY=?KxoBgm z`Skp$clP)DGMrUSH=5V6|Ajpv<@^gTw#BP_|6g0A7wuI`!zb+6jw-0#4HK7ET0Z$9 z0=hDNG+9`XzPRh}sShfEiavmfZ(z{iWjqGP^#XLrIreG)uUwDj>Ngq8`E4qyPp zBpCzBX6-xa&K6hBB@e~)7qy^(i-S4&-b-(rK$UY=%zOFrMvG_+EH@C)$iUn_nE*dk7khl7xDRgnz0tbdFryA>ZpO&( zcN|$eWJh8cdkPR2LE!k^7I$$)r~>; z5-6n+^jGYQ@C_5#8P$!E3@~hMbMd*+I@SE<1|;p3{(H!V4e^3lXy!F7`503Huj)r# z&eAh+^KE`6C+uoQ4@}UjsLqP`3mJV8d+-wB9rJRYWHTRqZU8N9@1P4bohH1faUErE zr&M1^0*&EP*fnW0Ch&dcMnt^WF8xmfRD*+F?mGS6$<4QVPW`ek5b1!^E0u-qYdxg) zkoR8*fq(Fm&Oc)$SffJM6NaP#qT(0|4D2qF-V<=+2)h%?dTn`!!6=1+d4*mw{!k&Ox0N+n4NS-rBY~ zJAp7dIvQZ;cqxH2Hk*7b91F{J#m;VZZjpAJJboM{t2e%^Y&8ub_V3hjHANM0ZQJPa zW%@~gc*x4h!yOQ=CF<28o~}i7D{H4Md&e*JZe`AANf1^jdE`l)VsUFSulE*|uxevQ;rU+jdFk!B%{$lkGEG`oUjaiDBSXs>D0yKYjCK zRP?0vI>U;~s=jQl0x?22*wqG6_CPJB7maeV#wQxjvdx_n8bG zHy+Sv?t;M|I<-3J?kAgrAMxau((BH zkDq`@%~bQBfQru*td+?!%Bn(WJFzXt%hwtKz&S@;*J!7Pc6w8EYN^X;p8ltl5`xx^GDzGc$f=HLUG@cWL79x5Eu&~e~8Kt0fiEO_-@ zOYnzg78wrnE=hKk)6y{Kfp_n+! zWw>QB>XIylzMI*vNMWX2gq)B_dFUHTOheH_Qsb5Xv#hPvCwoRy6BdqP%ihu$w)5pZ zpkz>|6#dpH&F-*-l&3yuiTL9~_zfI@XsW2L-{pOjI>HnmGdbg%U`mu|l`}>b8*}Ti z)NP5b_ZmcWZoa;uEGJ!6<$R_M%hByX*&MxTgdj{E zFn-XPG%U%RRg)!p+)%PAWcoXeCW0$u40tq^*sBH%r#)jKrBre0@FA(nZp7+*ny~CR zP>i)$@x2ctN(yS24f__mkF#04%ys7wTaijmnzsqHn;FghTwd-0^+U+If|IR#XP#;%$sowl>CTUq00)(zg zAYsfvKg{>T5l`uDw|iJE#0qbG^s$23_g2}}(_;El2A1}-gj0sg#EN?6Y9sNt;CB21 zy?wy}N`L6aH+tL#&FRYfWRwfr6=z+A$4Sf^kIi44G@XHS(KC>kwga;`!upIU%m8C3 z0#UYT%Yzi4;2$5aPO7i8{33)RaK~{3Su3$VUg`#Co`ruRZNyQ;?JG9utfoY?aD&u+ zp97h$37JT>MHSOw>sMn^|F$5XuOEtFh#^zRpgpHPTx?>l(E=^`pdk!b!`u(^MLBmq zX6issv7R4XA)d^s3mfFJA1;4M0YWu;BOVMET*w+$&G|h0EDLK{`Yl8hBQlV0mWy;Y{R3UyDt|@!2<#OBIM*nbRur z!d6SE3&`RZ|2C@eS8l)t%1^o|Qr-?USJ%(zlJ4lp&xvKOaFdad*Y)2splM9zViu8o z?X|xP+p*r|ZGJkXwREwI;l`Kt?~kGkzS!E1icYL%zLvt+eA^rUUdNPs9TVSH74FZl z4(mV@rbv*&ps?ER2rK&v>GfVJOoNO9f3cWv2do=G|4cBhxX1-&Iw-*x^hK`v%=ViyVBh*BMPWRS#b1LH$Dc+fV{oqi2xrK zY)B?fraf>J)VzsecULTFei7oMeNiUZY3U^m7ccxbTnH;^(Oct8>e$kVXH>PiLU%KWfR?0f{ z!cATS3Zw9K-ByN(+8wWZ&p_w@xoHBmjArpqjeKT*U6TFZVdo?e7s--6w89mIw^ zg*p>xI|5Dg66=LB3_U00cN`+JH&agTH*x}%G&+U!RXL>?b1{4gb1gv&MEVPya~wk0 zM!0p#8G^9CCo9GpUv>4`KZDv=sX+>t-QgJl_E+pPv&~M-x^jmLoPOA8g4S6}S$c$q zaTdz63LHK`e?o$(-=20f*N&Ej{dp5urWa1r=>c`>k2r@|0Z0FMYIO+<$G60G>?$5F zNxtl@+)ozT&!gs&ud0qJ4v)UQYRf#^{W@kdtNVfIaDlbM{0fQ6ls)T$?cx+3;&k=l zV5NFc;Q6X>R@f03V|Gw3Aian@U#pwRT*2pbfL^YlfKodO$o&!qc>27}H5C@D+i|Z{ z9k#>LElmOmUimE+kXR)Q(6=k;MlG9TvIRL)+$lD>n`)}?!T|9s=z(-!mupx6KxZt! z;e5d@AD)tzwR6Gv9nPJ43*8OV)&(pi^ZG^4(1%P1HpC+&c}7WBy)zBTgShtY1a=V{ z0Rb*+{wNr`bztpYrt)QV$VEMts^m4STJjpn-==L)8Wr^t695?6v&W$T90Ey}jmJ<1 zfusx0TA@-;B||Kv!z?wy+chG(PMIHY_$%?rlU)UJ;KA$bDH7ipm=z3L&kHGLdbO5_ z1vj)WhK83^T(ACKV~{P1s4~k2E;ZFnxunB4kR0qPRAOLVE_AWRIajz7loN6o3TX8> z!zkKbR7UWOKw(5_(*D(@pl+%rsD#h*J>po^+M77 zsVwiA{_3PZfA?gMARJKes)>)g8o2STGCkD$oDiq$8iV=#4Mzn3|LOu31R}wZprQ9mXaCek5WHFttHcfFYAMIu-!rX9bRmLH^EUu=VNV8P z6lywB8P$*R%Gekpo1M|C^{Xj72^WK(^#((+@V;Wx1gMjeO60e>YU!F;&bTc5o#+}IOAxUiQ&}SEW)_kQz*LWc^X4z>o1KCgl!07 zq46>3SRjg~YPbN3tC{!C>T3!q8EM)?onLmiE0-^w8yaL#tzMgk?RTFt=lC*BFTc?vbI%Ds zzjqqt>6D+PdFWazOv7zZ`S$rI@^3^EY7X-gIg=oPV~cXl_(F=G$rMHj+uv>8G6|LY zX={nDLFlC_O1Tm4lDC!wg^j;@wkiMmV_G2W5SwWvJWXa(>Rfz-820&5 zE9HC9OBH41`Chk)envh{xkoge-LfZfzN(R5F0Xm=?@H!*5mE}}YwxB1PBrD9hIMRk0QO0YwbXI5UWpuD_ZQ^i7= z4;pT8ZR*B}__zGJJ#NI$bTz~OS~k3gG(RCEFg(5()iFWMW5T1Nr7+`Y=o35v+ z#Io-vucy|_mw&Z1tFJ{R#*Zil^|xuh)AIUHud`~cu^M6MiABXVJg&l}r6+T@V7!{e zp_uB^q~7g?(&{3WF(ADHVAsn4u*za1cz$xRf#M47T)HNu!O^Tcza{+f}>gPoAp_FZn>opVP=!(t!0J8$BjHhI1w90;@#`!NpQ@YBhH)X z1vmd*O^Jghfpqc)Dez*R5Qk3+J5VK$M2a1WoHP1*q4#(ca&|2VyPd_r{6HAeDM9+8 zAyEIBv8Zk~<%8(fh$%2w_Tc28Hl=_N;LuBmBZHgbx?EhCmX=#j<-WXeG$e5cwOqbI z6+PuW0J&tfeO9w%$7e8a&3B=j`sF_{E>ox2^mq7mJgx77A-{>#%M z2;D94v7f8-w~C4tpWd$ji}c==T~;N3(*rTQ3Lq@W1pf69uXfnr@r30p{jHPwM`*XS z(2rUfpV%t)hLvNz1$Q%o0~SmVwXP_jqALa7v0*rKRr%V!=DN0%vgp_N!PV8}M(dWW zi&u$6eF)qyyZt1z4g>#l@~4)y0;&`=;={saM;3})DyiU;5QsCi2}p2x$K`jN@Ese{ zR8o)ssm)v0?NjhPea<>LNfm)ob6In^uPgZ}oY#TC4eUH5HILqzI-W2aM(ODNa`=wD6i=Rs|nS;fEQJZcE%p z1N4GaakMC#$pkpHoC)S0gioFyl<#tKED*!N$7b2;MQW<}I{_SOx1d^Ojc(V?IM)4Q zDd&$`ew%|Ae{oJ~Oh$hcdi{Dn`oB3r?n_*JOwY>yeO@Ji782af(mSr3cDNWKmcE+f zy?a7U?U{Az>K4t+fbzE_*n&Q3pGNt_lvR!+0z0rzKU00k=YETIyZVEkO7je3zSzIC z$gXyJ7i@c=tpIi`rwCYkatoU$CO&B*PiSKi2E^89`Fg%JsNE{+P247)( z$VnQ#;m?sX&lu^_!sBUIZ|o@2D(Y10W^H782+?Z&Jx%Uj7;1N^kzBnDpxEng${MH! z7|3uhW|VE`%ID=5ue`HO%r03dolc7NE)=OPL%lMXG(bCf?sh=bA>MB9MjgTZc2ZLRi%Im^RT$veY? zSNpre%(#w;@J3u}qGW|KIZ_Kb9osQJ#m)D5HM1odL<`$%_+v+M6}Uy!suqhiGGvhv z^Jsx^-Y$J%oy~9M7s+?Opi|KLV&pjnZ7g>&&-$cYC}aoEjWfmC&%TlYnZcuJ6!ooo znNIOZ>7HF=9etkL$JgUAD>7eZ2}O1Dpz14LTmxFY{oyy-%%T=P5At?X70jZWr+BJi z-h}Zl@jaSZ^Oji=c3TPWLkpV4m#(h+=h~AmI+OHYA`T9#dNg%UD%g9Q5XxKX zyPI1`na{C5hN5-2Zxy?WK;&QlJYkh)Sz%?4k?Ry_uAXL4u)QyoL9Pbe%Pg;JPZ9R< z`SKbAN~1)y5Oae$UuyUgE)PrU_{+MAzq?YM5zHRucGHsVnkg!G_k#M#kHugpEN6q| zyB^_yK-ej6SWeJ01$Z6JV3XkTKOIcxRK@G7O{Hb?8Kdn#Bel~_g4<=r6+4|y^}sjA z(yM%fjhLH+PN3$GCYHfHe&~M^qFAJ^9aJ@izbi`9N^{(>f0Aqu2lmkO*dsz-?KB(X z3au$g07VpH9Y{px{hvIK3NI7<*1=O)D+AAfy7>U0mvH_TQ=;n^>@hF@D;hmu<72B| z+2p=%3h_j_fOy^D_RN|tR$-)G7xQ7>+wn~t&)@8g{Z3ZDlyzhp6{1 z$fSiXD5d2@T+y)c3jY>oCOdhp+BWs^Yd-dZ>K$SnCJ>uw&2a2K3hb$2#dafbz$>by zoaGA!KIWHBD}aFJLh;E{XEgmoPs7Szfb)A^&ls%-jqtfZ%5rH6%VX^gRuLx3ZVLZ3 z!rAW?WknE}2X4L52KZfFA7xQ_y*J0jO4Bn3+3HZ>_CtUzUhg`&FRB*e=_6dz?T{)a z9>K5fnAx8{+C_H6xY_8bA>Z^|f_L2QHkR1|B^Nf|h2$g*DzK90wLKkSfv&Uxwr>ikq*!($JP*2S})Ijd$>O$K(UlU_{ZQn8u*Kj(5BX=M_NS9L~LL8hC5?H zuUv9LM=T31WuDNdj0`yy0te2_@DDGCkE~8gs?^8!Ns8rM*()Os8?Dn5KK&1q6e-V@ zKL687fjbM|&-~Mf!oBlH3%aJ{fCArcRr#=t;ntV*koTU7F9*ADi@0~bVfJBBY8 zgM`zwPg52AjUkHxD8UY`+(cSjN~h;m*n{lsblYGC^41&r(^~87`L(OGt>;zi$ZDd> zErifB*q~quq7yjsNIT3+h7h^oe2i0ZQKLLS(^P860p^GbvvGLtYif~aQQm-O8^kgX z47X-Dp@U&Y`jbiJdg$CU>bF--riHw6+;n~Ah`G>#RcXdfP3d1j;*UUIKVfr-y&`W{ z1zGH2KEJ#iQ|FS9eb3?h zTDJh0{ye?6m_Y(*`h|kO*c12JpvV=*wfmSLhoi~pY2vsu$VGyfl(ccU#CgRZqaQgN z5-asg@{qlwo>ZsKXs*~CXIfM2f9NV!=cwo0ukiblyz@4c4e-brKEepPh*oZzs8o8o zK4A4s(6?1|I4uLccHz_L>NW|(4YCDX__#ji_Z4o+{|mO3jlZER~;PtA6EwZrIjJOg~CXxmZF2D;A67r45(L>a)h zVh_@vbGt}`(|P#*=(?wHo`rb4ogx;cXN$OF=@7qmw$bHCm|)WRIW#YQIKlF!VF7fk&+QDm zh~-}6E?Z^4!efQNq8nDm|LIwFnRYOVmxl+cnNsN^cEfxldi0gC-_vdE1f;avEYM## zHrvFd4=P??ymS5*Uw3(Jk28uXLVSv5;c~N7H5C?(@!jZrmKgO#*Z(og!aeWySM1CF z%2~H}_z!-WpKwb02O}^MzpZ-G3!yAi8)p5dZ1O#5GEP46zklzp4naiB&=xGIIr{e< z99~dBR>Q&q;0e(6tTEu&UYO`!8kmlGjJAkYk&FY}x3j3%lLHKNZ`qZqn>Xmoes#{M z?OxN>s-5lI=XM%?s@Lre{kT;5X~k}KfT{%hOta=O57Ho{gk4aNcwBDv91lbZXx}T9skkB9PV3^x|-d5 zo;g0OqM)QSnQVO)4J*ZIIudt>Gn%68y?X5W1usDYa!ykSnIlXSgIMO4GE9})aI(FA z*LoC=w(q<(-As&n2cTIe(0@u0`D^u|O=UylC&Ndttn3TZB9SvYaf3hThV$l=-mh1X zx-@;`Bj$dxc_-daN!r(s*Zq$BtJ9__Xrvy!H%&_^z}Fb&(}KL4mx&kl($Il^3tAH+SZLxT8Sa)y#%g!llkuF{wyyj$wzTtPgg6pG3Z3gDoSdI z&rX|Ih|ijuI?uLWEa_h4{_UnV169Ex5Eu)_m$?5-CxgntzYfSvF2Y#vwuqLap6Zg6 z)BMiit&)wI$@!ry;JO>z1GA`vuA$UVRW1KAgF^FDoIIkrXt{o9;+45nd0voE^gydX zrb1fvxLT4n?)&{^W{A69SXbu7(05_Bw&p8~DN@B%lKi9T{xFES1>=ca$r@wZ2*fzp-0wNV(FfEoSf(*r(X+-77P{!m#`3UyNSZcIX7@Wd5o z-K_FTis&EW7rpF1TeQ4RhXg$zfy5%X9Es9DNE1A)Yui|rG0)+8v(9p7+u9tt=->w0 zE$NN$pd^`UixXac;_7WSkL*sIt!m4Ou@zSVh-cIFB1Pi;E%F$?{b`M%?AtT&Vhm@x031mM@1Fp76A!=n2&Ql>RGGK^JZJ9gX| zWDj=;-7doAJ>SOcwVsv6Yav%xV70Yt2YIivz%Hs{eU_$z#AlY{)kw&e?nCw1S{o46 z%2iIvd>`pz#Vo_B&K$z6nw`UuiaGRhSH&_v1WkrjtP=e4p)p@P_!>%+L<~rU1a9Pr zL!_M?Rh?S;f;K4qm5)j>seQBZpf;hMT5Ai0dC_q$H*T=+xT=WASMAHt4FVnZC}cfMi3Lh zR7i!6^dApXm6VqwEhmM!*6dF>UJ2LTM2>kSQIQhKV0yU~XbK~Hq+K|zLC>RWx0_)i z9y1r=E71mzUx|Nzh75{bSSv}>2;E7`E|#rdPYf>pO=|NU{dBzKE_+!)1p^*;>T z&YB-PFSoAjo=zon8_mdKuWa;lxx8?iY|*MMGqblyo(O77cDc2uczEHSkpQQ^Y2kxt zC03JDcm={{7bQ@vspwhL$|&r+X$Jp>XQs0JxtTf)2iZ>9v z;Q+y_oi|lo4`lMcB1yXUmC~r1ey1gs9rD&gVr4d;pt3GS_)^gG+2`a!TF|W`Ou1YboW?9~xj3Knu;wR?WL=3vsPE`yQl7VZ6!=2@>W#Oaz zCD+~gtq=)eNeeD}Hp*kynqd4NJU_HAMqQLIsLH=PdbZjTenV^?gUg|HoKb=`prD;! zn{wbpW^SO*0fUACI%Tpjx63-!o%D6d^-#3Vh`o-$i^%&@RHUc+^+3qF;h~@n(x+iR z-?2{;$Tx`5a;IJC5aHx-yy1b+FCZ;vg(m#<+6b-$$?-t7j%%W*wa&Kc0bc|I_x;lt z3RxpYOJnNxOA;Bdztl+C;1WSQRFPM@0=E$lyDoXyyh|M)-Z~uI*sew%ByMoV>AIf1 z(6351u%6e)esQvzd;;dtH|YsF=y~$>Z)E4VuLrF-N=WKIlatriO?buG(=Hz!bhURI z1k5l-&nDbS_z(&Y;o;51YD`{YEDf%|NZL|%GWSMSHXbCuDO-ZV-^Dlo(?ss;Kw&f< z=ookT|4UylI==gSvZ}P&ua;uSrI^*y?8^Oa-7%HPgKsF?pS3?EFsI9WggTdY(soju zkQE`H?~@g`>~Uk4tisvssTyv&G*8txS>7x)O}$V6J^8|QIv*XY6a|*GLcN|hJ=-3I zHF9&KYVzPre+}6mK}BC2S~SSN{W*8Tyo)4deeD1@Fj9g39KKSbonxTelbDwMW$To* z$6W?TYQKUQ@By9cv(F&(S+}3&$IKJFv*zowaCh*tAH?&O6nM)WNQl7Q##%{7*GQDQ zeL(X=z#gXgCfEJ10J_xKp*})$u4R9sDEHUIP|GV|c*uMmGBtK{1Kz^rx9oHh3}{X8 z>}PSOkBnMBO<~ix1QrSRDJT}$DV$wwaPQb3=D-Fm@X=ieAgiXBo17 zTNQXEP%F zn2h#fypX*F9zM_ctW`VX1jDfeDGAc5YcYuZ#~AqkAhM)AEcY!091G||FGt-?8Ko&+=raTnsjQbD--96F##^{T$+gY1S5M+I? zUFA+*L>UQXZVJ-|B=YKiHcWIdY%-tIZEjI9C-b0$qRln54Fy9RRm=5u zTeU;qo#8Gmar2$5SvyB~BB^Ma1YZZ~Jq)R!0-c78cvdD^Q zWhqQGGG_5h>gqPUh6BnscQ9U-9lOX0oZtI31p|{0$kw-6a}4LbkNB^uW-@_;Wu?SW z@w>@8`RZJP(p}9IXf3r5tlw~4k=Zi`f+`l%`>%k_g~9s@7o@$Wv%L6~b(xtu^6`o~ zVR!BqM1Too3eS(K#vJ>`Zbnr!wbWx(+lqk$B!=30ecBX;WNH3E54h)=6F%@1olSW$ z#^MgHY9gi)n@(}dQz|o}w<%|0uM=WHvV0rYw!}vpF}PCCawxB;Mbx~K*I(2f4C1y* zY9yK(XSiA2!Ew$~5wD+z%?(u4WH?XEH!N6eo~6b0@n348*8+L)fUUapQ-CWx4qP+) zGH)|;Hi?M&$|B4NXp_lMX$-nplj^| ziqtaUTnYoZe%e>7aR&0$96Et*Lhd-TLP~y3Wzjo-^zXudB#8lU5t zduRkb(6!k3=Tzgi(}x7Sl+pN9owss1p_4yLP*ul{&y<4{2c&Ug3o+`pP*a%^vgKV} z8>wu$jv#{-W$G!2$6^gzPb7IgbsSArfLM$E+w7YKS}S(CG^1|B($TLeoPrj+2hk=` zs1hNMlZM+{+T(r3%&yAOrQ`Mcs=CJTQ2hlp@TO;VrH;65N;*PVQ3WWy{O!nD$i({x z4I|XrSa-+@6)*RqWk(yYE6Ko#AzOCNP=@wOhW7f@&?n%^nS+jPSDSH}pqo>`FZuy* z#y$<|zqS7pt+Z05sgK?ZE!zV2as_$X^;Met6N_mjsKkT->?q$47zr|+419bL z5p{;MKkC_q6l>rt6}QCOEvuf_cDM{}4b_Op1jF`z%d0&R;VZ@%1X~bUTGx zy4=p8?t~k_lT^EP@!59Q%c}6mGN@$~feRmw^;e`n{bg=&eFkbQb)>v%jfCM%K&3Ht zXd(85g(5KSV7R1QD;5ywW#zQm`P|jz@x+k?B5S?)lJ#>*k5jE)6kNzu3dm&ACD=Ye zUB2e`-jt4ldJI6eXOz@jn>SS}p)6B3;NKbE${ojo`VKcyI&GR*ql9zw-NJAeb-tpb z1TA4)wD}6PwU!Nwid}Dl%}Ma*Koxd^Ys{^?nH#wKeBd{HJ_R^ZQc?m-y1f(p^w}=Q z6j`q%zZm01FM%|#MZ?0IR(FC02ql@x!oP3l$%_3v0kei7>hc!!O9`xfZ|S#oBW@PO z9d))Da|I3yXAoAY-g4xb_~Qrf>H9O2XYvecKEF*cs4jAXfJrGgr#Qy<%?++gt=@t@ zTZO*VQQwFZ1z7W&K}ID|3v}QkfAMFUCZhYQ+Xmmao(G7aSP$zyST-Pw1oh0aNEi==<&u_JR&xbgY^>aKt7!t`HF zTM&%idxwW{j&$4hpFc-gKj;bcAdLd7*OHz!PEEYR@+ugJt&XWRRaUY+WeKr1wqGqK zCa(<=Gk|d-YM2Wrv(TupZ*=ed#J!(Yd-@9{0%+xwPX#N^JaxjN>?Rq|ZL`Uzcps=C zn|s)O`(u33Q}oK`S|Z?%l4u>EKC=BGH$A&gUzQn$?MpqIQ71MpM{8Cg^tusAO%2sj z|1SH2I`m6maBv={E;6d1=umdb7p#@pwuRg4*yi#nJOp<9@Pe%cS}e3XDg5rFXQ3x~#biu|~}DRWG}AVKOwyS{^FbFSY} z@QnKkTU>m)DGKIB({^R1mzKeG#+Rip9@1x`p`{B4j7mjro+`o6;9(T#9g{r$^upRh zvONfH?+hIXC{T$xgR7fPeS-Vd;52HCq+~FFS^EXqj3=_1#zfe#TnBpVFN+P?Vrq6D z<25cnx?8AZ;Kc}REW2>t&G>^noB%olXUC1!gil;R&H9&1f$-~E>bOm~$?}y$7{2^G zWa@JN`9EuSRh|#~So3CziZJ~oQP|js(|fJJI3f99@9n6pgAO~*zSiT`w=KN^4LY+i zCVY#7efRm*b$e&z!q`~u=RJ|NeBC~RNOrGeZY8{gF z*_LXe;P;QYU~r|Eumev+tT2l$5=l=3#BA%bpjxZPd&DDpw+Wv(ZE>0{U_#8h>TbNL zYunY)Lq+}EFi=SqE!YdXk|BFwxCE)pusWtVl#vQ=CUqosMcpAd%uHh*RWJ6uK=6;GJ_o!&ZKFBqS*lqguNz2X` zjt`zFe58Au6^O=>{P6(5w6r|{dR0;tH+F;~W<%iC8vZ?;rmG5w-w>?>bw7dy+dSewxznEN;>+3bZE`-5jf3pQGp?6u7b!NlUD zag0UK^1v+%22>!IriRAk24ghzV?C09rT#8zpw?jAw)w8X#19?*3Km z^>~DE)O`(uwW+2C3gO!o$wHqh}-Tu9(mhtpo{6bko(*h{VIIHB7Yl>3{(n|Yl zRbZP=bP`6MiWFjK4I&&19_()Vg!v!DYT*TEBX`xOO%o$-J1CGqWze+XLUlme*cmN) zU;|9HW`Q`z2OEbXRFjs&y}$T!va}0_=za0JLVxoMC>p8YuGb6X9z-SdhH}thgl>93 zc{Q3(Pw+Bik4dxIi8u}tf``h#Z`79^I$n5f?H!$um82E>`*E9RXr+v4l|*0a3@-Pz zmtMLITwD6J&O*!?&z6u^(N+Yi{26SsuhOHWNMKx2J-HXOj!Ww-_d4?8 zVxLb34F|SHIeJ>{{|XQNC9*~yI~!#04+&Z$wsY|!_M1$~tmx5!S$bI;ku1MwLz7NsH7Iqnx zCI1I_r=%?1kExr*cm5|Cbk{hGJC2}Rs?EeeUUkd}#=C~P&zCwMa$UPTSOsQ$G)ZM) zc)+yIP@k{Kv(l@37V$a72aUdo15SW8Y6%nw;DaI7Rg0aHz#vOb0mh)8GH=i zsV5C=`c&NI*G^L?m9DO3{f*hWvEsKs(&~3SZo?Q%S?cscvEY?fq(^)wz9PahDj{_# z`g4u*cJLkf{qx=0mBro7Y<%F$&!k zg0hBL?-x#GkyoEbNBIeg^QA9BT!Aj-hDB&)4m!KrpdJ#0V6dR_Vf6^+f$O%m+Yz4?o%eENVKAYBIywl%-SiIZcN>-nr~4mWiC1J7zcd z`<)#dnmhVVu0Sls+6LzBe&YAJK{@Bx{1>LTxUa_2v6dR(BnQFnV}Wo{$6I8Kr$W{M zh5sOTN5*&L-YLKqp#GmH81zne%3>IHOj;UGNbJ)#2)NgEl?MdhTq$tMDVm)|bMCbL z760ISPBfkWAzKM}$w}1>5N4eqTJhflmj9WxxcIGf+;&iv3jNiMO;(+e2E7cDsio^T z>2~~ucjw|pePv_ImKOQCd9H#F{+d?;o&yq%h;%ynaBdJq4p&K z0wgd~oQ>R-9{q9P^d-+I_=zp8`XqwgVe!f4mYI7=#`I#A#W zi!l7L<@UIca&}>JLKBO>e~BxCy(_>SM93UfnK(gqR9})~E3I$^itym55|LwiTbi6} z(J?UU#*6*WuQswHxlbE_`T;f3nu(uub}VH`$CP0f)TCY3A{nfUtD2^3oC8f^J$Y;J z`tYwEmJ2Oi8f4HMAYbC#^ZAxG;9PnXCmiQXZRe3bn)EOXB@>Ib%HeFg!T!agnem@u z^nHiCJUcJv#EW9M_s5eDBH)LQl2~$<44NH6%CJtB2VZ}bd(InkE7_?u2n`;Nd5s+r zK*)s#=|Nr(vXP=+TxjG5WWr?atrqfW805kP#Dot*>2ys|4h-WWg%9k|eVPnM{6H6Q zXE_c2dY}XH6JS@z^t}D{bFX(*F_Sny!T^_P>I@zZ$zUY>>O%}epMWZ`q#e?3%Ay42 zc!YE?xy*mtkM@hKGu4&p1^T<%?RnhV=}6jc-^Wu(wFXe*>(3oW^WZC&m@9T*AR&JE)0Mvs03kIeQfZhVUw1ONtUVLEnX%C^mX}flnW2Su_Qz29^a= z7bQeS?RExy%UQXMbt8!Q5h=WmQw$q)w&hw0SQ@u>tI@oYXgL6+41ISM4XU)@2<0jOGy9aVHN@2=!dQ)E5t2 z?TewAG1+SEKW(s~Ely;_(V5uyLrPGLzG4=EMmCjQGtL(e)xI}`Y~M`m=Ir4 z*qF)E1h5E)5k$(Sr~ey$QUmySH(ty&bg6R78jyo~PzC1A*iY07)zIM88mu+7N71hQ zGz)e!d3T|VEJ&+)oaJ>|#*XK-Lw}9Khu)IDbdbK#>*OJ7Sp_0=ELN_rjDdm#ru;U% z7eqX!w)gnQiXb9+_l#9}OOO#Z(4urs;Qd-a+YrXBv@>38bi9Q>uHERl4>94AEz@EV zzDvI+rUYlLCCoHSEZEMdb3SPlg|%e}yZg%?4-=bq<)2HzJ*KvudD4+}_X8MSkZQR5 zU3oRg`{i|KC?{k2bq@3}Odt@yWjgReo?2#j{LPe}M`GveyisA}QjjAHNcy$f=%kon zA~(ys(sGkvcAvvgXg1r@5YH&E?VBEm@ zAinE9>}8VwfrDN7#zq9 zrD5xd$(3LQiyEg(lxAJ{drpSkY(8w(!>NeE4sedwbYYfmk%-ou<+QX#zwYyNW_Nd6 z@$PnGtJS)KMs26`?LM;2^_CP15_vtP+W_lLDWK&OhB~&|neXQ(!}um3>Gx$Yg>(5~ z0Vs_SQB^%$6zX>0hI7cZQ&}+|7bH5z#dIvghdiwZ)dt6RRpp1i$0`NuLWG;m1C`E0bv8%x57T@3&)ypzE9)Dx29JdD4B>1iJdf? zh49N7zhpb&MIv8yONS5kp7(=_AfZ^M{${I+?!hA%!;EBmEYzDbmi-e*o5(&D^?r) zJZwbtD$_;)q$z%s_R6vF#|&Q=eJ6FBFRjnBEtyqjDW&3&snXkjvTTb|)N^($ojI9N)u z%32t;ZJO+wYGUk0WH`TOWhv$u$|7(v=abU)U$Y39sT$)SpRXu8QUKzAnhrRY-z zCvvM00H3rzB7fL0w8nQyq-uOvL0l&v2no=`Ax9AgT6VsouJ7-!#{BxhZEi1|^jTZ| zD?W+p}qzx{82!Eu~L(a;%(TJZC# zM)83~oT3GxW6`Vb!QGdrDDc*uUI34RbhKPLJ;hz=1jI-)8=`c1MtFh3Wi!^4x_KF* zkMo)W)#~4EiReVgY=w55!MBeah7Kv1o(G-oan1t_fG-Z^yxcXuW4x*xZ1%~mJ# zaZNa}8{8*K$Z4VmB689d=m_b~H0G=`N-a?EiYnx|#;}j+5Md``9cu6fMRbL@j+;CU z?_HuV0t)1&86&WfzUNgmM7COm!bizrSrNcNU(*}(?kOMyZgr>6iiV-+qLG<8Q+eDc zoigdze#)_rjiG|d04sbmk^EMy^`gWSPfElREJds;z#yddAXEV>HmQ)S8Tv0axWn1(fMb2LUvJ?~Q;wkRGf@Ji#4 z$^A*&eRm(x$vx-caROWGqT#iNy4iH60h4>Qu@>mAr&k(WCHfv36kbrbu)L*J#Ha=t z6ta{w*s(IbrhDDMWm#=r1!$kcV&C{kQ%5k%u^@DxR0YZoRWw$O*4H2-m14eivq@;0 zN6IqIdBTpwC)?(@^O`nzd|a}1Xg$R+oJ;&0XNs3~X_b+g+xJ;?FJSN+wjwWwsG>yX zQ}H3gV{r|y1Ihu6{;8VTa%}l?z z{j^RloA^Z5F79|w9?3NOtePpeE^O#538!q*h*f#Fv}ZBaFTG@EG7TfPjJALDECi7ZGuiD7H8sLamR@=ksy*@;b zcGEKHUX6>a0!0T>sL6gq=R-%|Qm}c^b>MCVi|Q0DBXmPv3m9%eQ4ixwId|nv4{Q4P z7VE$Y13T^7VS-7=9pSIlt#SRg<+BrWZTDF~qb}g)-?s<#mc=qEHqdpO>fqG%t^-2- zT((MowU;^9-7)`67nJXjbR9zDS=((Fe^r9>Bp$s)<#+n86f}Rg9S5?MprN9gt9tFWCTD@vZm@hz*OMJqm8%TUeS{^KC8{jP2#_4L7Hf1r)YcXa3> zNR*G1XuIf3+m#OcG5Z01a)^@3TaBP;VzIY$YT=ikNzHBsSR~EKwvxB=e7TCVy@=De zNNFpA83muMpyfDInn>>)s;{+X;OhvPeit!|aGb8D(+te5{|WlhVstZJUU3u*fsj)5 ze6sBgTu<@nE(&=3zqNkBAL4%^>PXeZ{RRgmiUshMI<=~2L>z;5g-gRsdinF~;rZKZ z)Vf?Oo&DvO^(c&WR%RAJLX{}C=1hDE;Zn%EB6sL2QXuh|JAz9%FV|z79!CQ$UYxb1 zR>KVw7GLcTxl{zM%teiD&VIJ!t^%3bE*>hv(E+I*t-%x|iSVtN zflNHjJurp(>jcQ0*ZX}1nGL$9%^Sa+g7Vx}iHJrttGl$3nNDyHqDYhi^clWtZ`j*T z_?SP4yfbe>FM2^`R%C>`B&vnYLLBYkhgk0m1%#j`jwAn~kN%Rq58_mjCcQf@@2%Z3Aqe=%QI&w!0pvAO|F%!v4TUZ?F>}pP`rtoqI`?_IY@zS3*6-kBe7CEg8MIa;ulQ4e+>tZ|lGN$;(->4rjMl+jQ=&>=M9D7Y* z7u6%)3`&~&j;24Nq;=H}mzpDc%6&n5dLyZB$2IgCts(6!=7RPuM3aW%BF~|DE>7st ziC}@s(=#zRm663K7nCo!?H@xi50 zm4utSA6v%R8!+QW{U}kUdU7&)X-?$u&F~1Eq$Bu|H;t9$b8IsSViS%~%Y|-&F~;Z5 zF^VC6=O^Bk-o{Vgah&Yw^y|b&VG!Vqg7Tu=6Mtx3QR&eylp9$gTA>NRPn4<}1_^8n zn2|5I zI4uPcTo|8tenz0=`=Jg#))KT|scwLNd#=CEvR&LF6HRwr%FD}j+fi|S0220m;px`3 zM$g}4C{Z7JJUwh`lXPafEw2XV`u&8g)sJ5eQ?^~%<~3Q0Sf>0UX51Rxdzi*|Q^V1u z)ayu#PBu1hXfv%KZLRHPG->9(5#HyO-?#s$(<_wf1XimzB4ZHK!jb;P4;Kr470SI# zMbUf`qnQYOc@jk$g(UF6!-cSt)^TI5Im?rGHG?Iza>0aa?yLMN?jNa^B$pbq# zv(@#7I8a?@hwqT-0Eg1GER{h$wI)k->tAFcKaKb-0~YJ|v6eXCvy_DTIs2K&kn8&f z^$Oa|S`9X@v^G%VTgw(mY}BV8hzfG2NOh*>9pwOFvbgRi_ySYKvT{Vih;6 zgn!X|TFKKn4n3DVOP9p`Z1gMWE5G#XkgrleRvA=t@OBs zdJbS=HP|-aQ&je~;f98;f!sj04bHAzewZoUkrKgALSDXf5mW(Rq zjYyyLpFMu}&60gOIHZmXy@Nl|cLYVh+%2#*5oG%nlX~wCUgG$gdu$kZ9KAP^%h;yn~G8hB+#WT}Ii2}CJK$Q#uvdtLMD6UvD6*kkApvXvzRs#y<<=EkDcyOfeSN7GbZ+_iX`69$b_Q$KWFiK2_C9e}@zf39+DP#4Z+Sem$IDDR7nsS-iEifOOE)y%2tZ=dLUP z>YjChVG+IxtDLnOP5NhGhCtjuouXiV~Ih^^~MBh|`E5~15g zg?r{(h*ZI5r{}2+pC_Zt=6Spd`!EN3+{J@_j#(p)Gd1#Lx@$H;u+&=d5KXuz*?Y(f zLNZ&fJc@&*Hkb398AkMulW!G}MX8*k_{x5|Je$D^KaofxM!0{gs zu9$RC8QUAch)4l#jB&FuCzGshxTL*-S<3-$j)hlH*l4<$2z}pv=eKSJ*4=I41B%r8 zeVLwFFixO*k0T8r1#C`>R!*&BG)X2PcwEt@t@<&aqqrq=m-OcQ-Lu;5T-VVU?qSEV zFRys#B__L&KuXQKHer@*Sq5K7l_&GA%5UF(e!bxj6=21U*XVfRtUexeiLKBTH5UP} z9A(#*#^^o;`EPM}T-UV%#``=l(>SZjrl~IAkF_3xGtjO%4E&!oqO=Q|dzLl06!*T5 zim(vWRtnQkqpB9LMlD+%3Ur6jA@_YdX*BZq!)cadTQfL>oc7ep_`L=>u(P%YZs<(E z3FfzaP{hm391`Vx3!wh60$G&tdCb<)Svqj!ixIz=JO-3g{~%)HXa{SNYx=_e^dC~a z!Y(@*MbS0iEoe_>1C{u@ug{U<8w^#s-Zh7#j-V6`?l~Oo-TQ0Kd|(}FQxJ3v z+FA45f;tu{Ou8lOr!jlM79ooT8jpnKRk>6oPJWgDz^My*WEF{|o&K(h!pRFdjkNl(2FldSIo!kPlRt#C!Gx zI8Nvi^VqHu%7WoLlo7li|KpyhtY_tcJ8L7TH=eD)2&*PSP>yplYH3J8aV3@XJ;#;L zo+`R^c*5u5sL@=xibXfz`^z&Os9M+Y6~zd@h*Rmy!!xn%w#Qh3y~i_0xK=hIj&S#F z8gXUQ)ueBWE4Oa_$a0z(h&9a7Q|~6P+?{i%rAHi7_wCfu2lV7eINpt>Ze(t(w1CLL3c6LPUBKF-Eqt{?qRigUNXH8(4Fd;pGy-9SW%RJ z)$Z=glo!=4wfv200ZdFJlY&k`W9#VmEF$*E?@V|RpmpSQKwsfo#Olz4!b}Uz95*+1 zRpZCc+;`LJ7inD^fx6!ZBFoWuiuGpnZP`R#1&z)BO+nK7Z?;^h| zyz8Dtc}4hvZ}|@IgYRY$zAa5%Zm(PL80B6kzgEq}XrUk7%#xObcb%Z6Evjf`+}B%|o{!w-N**V(ut(~j)98ol z(b!vn+3$SX5rT+`UgarUXer;fyl84@Nx0$2@Cfj#Vs#& zbf$NXtKS`J=0&@_x*qIyxC{3 z80Vbpy1sF)bB@t8zz0*GR{W-}?n#q!+%u^c$yKCQk&INSdG}*p_QNaDGkB}0I@TzH zp<0%eq5|8Pj8Z%-ujnf7bUrSH9cc-*qkhFwqiX5KUtC<8M(VtCcD_j;mf~ROrD1d^EJ%ePwXC-MmlTcypmE&dHld^ca9h+iREKx*RS)_8oDZGm$k+m(%C4IE=6U z?bcTmeM#!+nefxJ@2cOgbl~_BuIWkNRD= zinGi`d55PxaEwkCw^huZ46Lx9AFcAEG-pn5-+&sDL~L=D4LROrk@229K%U%BiWWnvEIzTlQakQd)Dh+we{Q9`zT%7fg03 z%I>ZMjfUXo@*0NXFTTojZ{+&qs0>EQ8lZA_^;EKNcALC^SFnD~QD*JSscx70_o`O*RL@*Qr_%du-9Qcm+(P~`d6R*jVY;zRL49o*Oxwq zJuE*m;hSeqSIt*9HHk&5^JtJaYXf}O9qM1HrLvA*{a||SX19O$lO{|^12QCl$x*RA zdg0=pp`|qWGcG=$34T4_d*K|LxvBc=E6m2}O+}UGqa&{teK9@AjJ26gU4ENacaknM z?cCO8)c`*2%Sz3X=_8(PS4SC*f0ulAS@tXR^O)ytoxWRgTXaGm)eTXs5A55$BO-1} z^oNx$udly=)IFtLwY^B$z|6f;U-#W^272}&;n56r8)vV(&gN|! zkeX((U{68~895$oPcPbc|5iaI=a+#&yOv}t1+pjg0R?$Ky6?9qpZ7*w7SQqfnxecC z@SUgo-F-lO?M;R3XY%wy7-o$A=h!i;_(iG-wz4X@3qYDiIh&b#!o+YL=cWcwfj=ag zKcV>6l5f86jfDw8VkW7R9{)NLfDzWv@+=J!WgQ%{^BPdmqLXS{XEgVwQ3|6^_Suey zH}GBHVj9tBo$$b7o$bDi^Sd>|v84^TPPD9R;Pxd*|IW>8Cju~))W>D^1zZ|qB>S}f%SveIC2cMyz=}S{^680li0<5h`89=aS&*aC3hA6la@d>My^=A8NYGts1c(5m0 zc=cIY$r-`&i^vzMhDL#*aUgugrklTcF-h{|Wl?SptZK_%v=--exu-^nLSb1T`yFXF$CnF{`3xsOlP({@CQYO#^A0EM>*5y}P<9C8ZHYHzW>{_3KLEQXu7AbS0JWSvWx?5biB`Ws(nM_ z3S^`!L*?Wc=x^$1^ke6B6xWrB`glb(kFW3@I5^K1euO60LU`R05I86?YpTTGJm1#J z#^TkB)bkV+6zAz7T^Gi0#i&{K`{WE2YP&`&#_)g!L=`<1Vzd{7H6HWNy{^9WljrB% z2h<{+TOz2D0JJ`FHGp|_CD|)GBqJ|XD-Ng|RJ?H?>$y=YwHnacw%K}c2hGBw8}zb9 zvuW;vakWQ=tMchrq<(zqTjhPArfKX;zu zs_gF%aYHf+B&Ax)(cM?)$S$!mgocOsN0EnL`kYwUF&Wmm^2zXSPo{0J&FS=+EWkkI zwH+^CxpK^H?$_NvbzP1Rx=qVAuE?Z%DMg^v5ghLu+1>yw?CaV5l`7}2O7XWNRm3qJ zrY4akmP7O9CZ4ry{?>s~&nj7*o$e0fa$fo)1zi@7dk+t#lT}+(+B0?( zV)f-)RI;XPjd7TACDpJr}5cB;AHh>z2c>(%3>FwW*CIPy2?+bv@hgPIPv_ppqdL4z62axRz;4 z;YcI)xq<-Thi-aPCbK&|I(8Rhd(est%L4ErzpJknw8yf@Y*+~`TK&wrGkf&Goetfo z#$%j#*k4pM1T61m*R8UZ<%B`_^|uVLyNiMS47EFBn~xt#eBBw+_V%qD)+^7&bXfD> zYDPJ9YpW@$;x%E3_E;cXeUdJ!92pVkM3gG})1c1&NPoGMCVv_D7mHR2ldJl>l2 z_Q544TmV<&LOHdJAul~({=M{b?iB&Ge9*au8SjZL!|e(ow@${Hd6QGT1OC97nT2hn zHRuNW%c|VWOlid3QJ=28%LQ3qF=h?>ZXq~v*}Y7IJqwqHX|I}km#JK(ET|bH@a!(7 zhkNN^TpX(L=k8>MX$f8M!lvZWn?>&~d>iQm!cxfIAUtTzStPt|T;N7d-Vb+-x36z; zX}$cK7oZ}Ne51Z|otAERd~$p&ld|4#Ta9jP9GyZ(i;r~ioWtx$p8yYYDa!e%I6L_PccB=_nIqY?dN^}K_|SDFcrNpJL#7$^^{i0_q{}=josEL=d@dtc zuD>mCK)Y|=M896ZTb{^8cYpzsl6AEeg$Uc0!}c-?Pq}~U4sF%Bj)OZFZQU>3CeIGe6DiiMDsYG%oDQ z=ruKRt?L`#hnm~h&l%hm3AJvPUT;HrRiYu{K|8yw9}f=>xFXzdGoe|s={vvg=N_}3 z)Sae93(z5H6{p`C#u>wUPoo7gU5EO-FU=IMe@<-?8GBdZ$MYa?GnJJ=|1%}PaL;{w zw?Wq;T>S>E-zGEr4BDahVfYMZ+Glo8QHY{i_{IIkZu5oQ0bnWk={iLG9K_qf^y7QL zrNEbY;41&&8)hZeVQSvrnsCPp<1zuyU!JXpwA{Pgu`oy>Jk#wtqra*kXaXtk#U<6F zOB&gpnJL{jDaYvOvbdZ??z&g~yyIKDYY!U+@K=q*ODjgAj}D(+@9*ZlPtWLiS3lZp zx3$poRHMP+EoG$usMjAW^SmnPxzC1F#(Iq~^tpujP1U1Yfdf$-Io^(-l8RYpmj!h+ zyz8St1;-53D5FbSBW`rRaD-!dbsF;nGX4Ek%g)nC7~6s=d$erxew{wUOUn1< z=hMyP%QU4SJ;HiT`|HA5LWAGy;D@MdtC*zJy(C`KLMkCC5b?N9c=I(CdrAB?^Zp%+ z@TBA&`!0VUz1?l;3wkO}LStA&yw-If7w%$|pXr%t9sRGllCg3oo!&#v$ExEfx z%4ad`_?^j33}y&Q`upTnzAK?5Q2HGqBxE}O8r}JAQ104q*rH8o2g|hb9K{{#^){3% zwa+IBx!j3+!qFpZuI-GzEi@&cn+@N9yLjgnxk9k~#Z4z%#MRas6-v*YAVPAM+Y>R} zNwzyVj~5ST^8h=kM?6QTM(26rvtiDQt%HiJQ*~Pa48-T|OHY z&G5~Y4_EI=_M&Y7Kb_d!ga;h~Xc=hsfDvQ_7ee48{H59)uo4nj%xM@Eid?v>qUY4W z=B2MoyoMRK=Pes%tT;S- zIvFV(d55c5oSbJ$(TGQ(ZKb-IR9-G^N7Gj7V`?oNtgMt=dV#+A&JR}dm04VXcpHQjHe&P#B#9-9E*}pY*C+e{D|B*Gj~|*W?{+)_cXR#E?-9k&{A{S9l!%H zjVg;`<^>zFZDafRqJ|P(XyGI8?XQ`%8p7r!CKW`+@Eh^;Nz0acpjP^6Lfu+>IohJL z!or)M{5Nls^4aGv2i z25&Nif)LetGaH50Yfwm_$nCr-(PY*%2$c6IF=Q1jvbX3;ao=s;kFjvA70sSMl*P#L zOd6!fk{uy@vY_VfE4BiHBBV^M@hMg3e zLi)-M(V$Pvn!ydKvGFjFeGgxlT+CdgDDP_P8#<5MoB;#L<)u)e20p6B=tC*L$Xx z6AS3p4o9>`zL1EceDiKCWxpq1!r)6$Gl+G4K%%1fQM6dlcB&DZS&cHq56oeLK#kR0 z7wNGp^TG%!0r#(5Wavv(s5ao`h{cF#tD&ztthzDAlSIYCl;-bBn0|$irQ`+01f#5- zB^lLX1(R0Q-L=Gb`xP>c=r0jxBa|~s&r#%`SE$NMeH02nRU1)K0|qEYo4ZM_l2`W> zGW@~NY~=2toW^xY#W^&MnjeeRmp;4EtK@RhC)?$@-M)3ELvYOOT;_K+|5250(kW^+ z!SE=h3h8fHh%e^c?#Zk#>ggnpjik&4_Pw%f0$8Peztj6 zlFjp3*0G1^sgo^OiK9-DQD#1nj#{D!iD}doxR}eCg7>TZd8=^zyz7aO?^4Z^5u*P} z_VD($bX?Gbuc>W2oNZUvFEh#@v)0r0aIZ5DMtL_scbAe@*fh`4wGsFUQhU1?mO#i$ z1?s~a_jPE>3>V?0UV8(OHR0LQhT}7Q+aXn|`)#xBu#n)aJz}#=XshqW-e7Bs3DEH6 ztz5)pTW?CM#WIzC59(R#xpa@&lg4uaXM4m$+54G(e*owAN4UZ(3qoaO&GX-4DKw1R zZ3Hpv`IDgm+zyE0 zrmHDsB!=>6p;84aAd^R0AJUzP>wB-S=VEPK1GZ&mS1{XHXY;0Y3um%Dg}Dz@fyY6v z9_{Q=14o_eVwkAM1DsxBs9`pG$EA*%P zL!dbRx_1W{rjcs`sZAUZL8icpiUNVl9OZS&I&$g!OZ}ffQjxxtd|?TPu1ur z>X}A|lus<~Fil+7I%zPHClOAgFncN#v$SlI-d zKO)_L7M>ZR_v@HHNyxt|o#FkX^rOB@pM!q*0|}o}H+oA#zVK!QRI>-I+~fDATT4M= zhEV2*xffApoa2bRp{kai0g)=HVhJH}6mc*j{BHu`Wm5dmonu=97q&R%8zH%+3H zUs(iPLpwd8CrLy@YBIbzQ-hTf)uo1Dx1aIuTIfsbcR#90yiwj2rQ{S^_iRLO56sI; zZ}JI|#oyW%_IKk2R&!kb5p$5}EP)tC(E@a69|z_@%JNfNx0a(q6P-=D$g=#2=}GU; zb1W{z$i`LDJIb zCO4%BGd+jc+UKi|y;C8&nLmzv0JvD7E_D59)vnWHN8nboOFXMh+o;W1hnmnB)2hr< znU;W@A9(7`;!TWRI3<4I*sv$S?S}|5v6pbSf5KOuKC1=-8yHyH&5@EV(b}iYS-uTG za{<1sI{jeOx0IqVm$h#S4Wby@v9%A;#2${UZq`PbSA}F?2tB}_om)yJ*rOe6OxlLo zOmF5x8J`cQKe5SZn2+P=X-y8B*3cqPEs&UR68{|=XFAiyd-CbiQP6_-j_q10Z;KuA zk?(Il{MY2WSzNeOPglz8DgPV4#QVwc;2jsG%Pu){q4;bEz_*B^d*K|NIka+$ob{gP z?t;dXMM*Y;QiaUZ5>VN5(#4LCQ39fy?SPAZPB0zO)&-m&e6c=Uq&Qa+;Z})qb9@JC zPK!iSM6@xyY3H5Y^QW9@y*27_buIHp%v7!LZvXyb%oyMve%9sqn`W7rdqDg(HmFUW zfW8=@eN#tih#pm*0Q5&QQ@+v<@d+H_OVw^M!|k0_9*NxeYEIq646HM8xg76alDO8z z2HqyUSC`KC<_?Ry1;rCE@gdlA5qyjcX&!?7Y?MSY$}z_jEj1QB?%SAd)m1QunJzf2 zUFMKq<`#I2K&^I!EIMUmKqD5|@vYGkdo^LJrJXBHq=O5A(~`iI{v5>mqU%w0vgEwG zFtTkJ5PTxak#jcGyEKO_;F&uc_l3sKxOXtsF`R3j0_l#crM) za<}WI!r<1a^QIc4jJSY*DJa}t_Xzk5*NC%pa3n`3-%K7LInCuosA2t%Ct0>dEJqxj3W@f{*)DuKSV5!z

    Rt`tgQsQ}XN0${04$AF7kxRdFG+%2jnEKi)ZQj$s1 zmC9^LxjHCN4A@v4Vsr@D__o-twI0nY9rFKQ40FN+hq(FA;tJto%kD2`G@#geJ)Fgd z#6b26 z#e-4xv>@U#E+AxEDcn!ZNQ#V7bIuBTB9Eu`xrM%M;-Yf>ybtx~RCnKmhzWs1A457vt@&6Hv>QSoQ7okE-EPVO;gl-6jP9r|D} zSb^Qb;JOCwXXoaMokKK@hfI*E&qd6_FHMb`@4i0mn>+tcg?iNFwFZzA~Jc|n_dqp%F~v^_sRUli;UGG^J3av!{b{0^4fcvWUxURZ)W zeZ`jY>_N4wlU#rjkJn65ckvv5OH*g-ON$;pQSUkT&QqF}g5oD4ADeS%G*K zy`pF0j4f{$Z`?4a-3zSZof26B$NTS3)zMxB$DrZrF!T@F)`=0P#;%CY=EezKykq2K zb1}WStVc4W7Z`H)W%Tjn%jaWuLv!cx*(6gWY{y!JU;KXaoEYljTVV?JYmGabA&Yx1 zgOI+_pr!9~aqLtbj65u~sU@uz9$0b~+)zl{w>eY7D}1(^Iqs9U?Uz zmVUs;!T5Y(3rp@>jIkOC%T^R^5Js#u)|H^rzN3}L%>n+0n>gXSDVh=kZjti0md0t`_qUFOuRMrBV>pn&kO z_~MeSsSSx;WpC>MJ#Uk@^XnOsF3S&TIX`>C79L;Z*i%n_Mc{+E>EAfdyw%SjaD%LLmezcEcru~t+?_CTVr=Q`sKUybN z5OHEMgv_O*(65|z2f0cmke+KjZ$Iz8OHT-WmwpDk-Ugcf(u>8NE4=nmOJgl1-F57_ zDPPPLbP9>VzRcY5+#zJ6XIZoYCpQ`9%q$cC+6N)=d*~h6N}$*Qx#J(x$?X+I@ZqGG zgIDo_Nqdfx!#!x;v{*RcrJEgk?22}jahbv6#ucs%-zh(4mdUcA47x(8`f>6kQQ++v z>2T6X*enyCM!1``zws$JW5atEUEi5>Nk5w(J6iB?4t0LYy8j|6A0~v|2+1L}%z^hoQ#Ncm3jrl>m zax;*NNs(;jC*_W)_&%wuG+|4Ix~=HcEprTA+ws%n0s($PY* zTyQzwHL0`molc%&Z|eK1JUA~ekY>yK^nDF2$6B^qT=G4IOem(BZjB{3hVuwnQe_V$uSO^`mxL`H>LUp~g)78ARRDV-CJazgg zKUXI|4W=Ww?7G#`-(<8quUo z4|vYqK;R-F$rF&FZ#(SpQsyq*ztJFnU$!zRqQi*wj}Op6f4JUoCP(Kqy|R)|TW`XI zH}bxpu;~GJ##?%a%&J1STH8fMvneapj4mF+>@B;e*H@SA=#~?!;iDq>nZxN! zJzF^SO(y+O65UsK6uWyh?wln0%4RLL{gdi9Oh?BZ^ADw)g`6oz_}B6tbdgj05aK;J zQaErRReI^^>k?)pp)y2Ey=d&`W}#J_cLch1*Tr>NRHgXG;_D*YmGq`?#6!O;W+>UQH*rcN0}Yafb%9t9Q3fq}99x4kJu zXTk|vOC@I@I}FCtgR|93@{E6Vo(@x>jI)~)T8U%>Ex1|oM zZ=637tC#aAWg2kG zXrPU5N6jAgagEN@dX9Fy6*y#l^ODV_VA}F~+x}>&Mu}7vs!S+}V@fcqQN?j}&M=&HJ(>?6>TXgX7!Lk_LjF>Y&i@0@MmUsL-$6n=^*8C`d$ z@hDFkWRMKf)GT?=>}|c)R8K9*(A)M4v{YR#QYiy_ z)`o30y=#ixz^re3O2-ib*@5*iE&stY@3tQuJ|ac5aZ6KuZfLp@IS=r&%w&RZx~HDTo$mcNVt@UO*tIkF|3d8BRMYq}2%uealNaym#T5_PomZ8n zydx2gbRj+S@Tz<;bL{(?Q240Ea?|nXBzEP=^vrau;t78nZ-v~OTSKY2Y+rr# zCl7p0 z1g?jVN}rbCwNufQXC13VNqh6_f<@W-h@}aixX!s09%(G2e@fH2>BR?!1xtgO5u+oT zu+z4^EFv#qwS)Zct#J}0$P18)fL)6b^PeK{lwrcRE5Nja7qxByo4?XpxaJ60<6F+* zN0P_;84JicH1KLuE3bs)c$O?)(bz(=Rvheo;`*hz9Lb-2ku?I=u)&!es9iShlQ$z` zMwkz4=I&?zt(XJJ5+T7IwpLkXi5<*Avl=7LdD)yv1Y0er!p~=UrSssB&6(z@y^_?OI<_F?6N+u(q1TtG)R6(25 z&iR_Iza#rqJK1mFL>7>!@$`3{|G?h7&{7ODKNy^Uh@NiYWqNkM_-OY?8Z;rA?5%G; zf(ZO%(?|YnOtcQ(@Ts6!71oQ)QRUC@M#h^eLY=Y*C6WyAn~AI0%n#(Sg@>_U2kgK}AW`Vs(}7iVcqm z)Z$UC{?F8)@?G&OnV`=XEh7 z9kF(vXd0Ohmp7!$wP)TP_&jKY~nBPX;N8r_`L;#w&a|J*n(r;2H+)Qq zlm)Ocp363n_Iq2<4VrG^=iuyQ0bWA_l(j(UpcCsILI!YFp|v@ zRHvD4*wQpa^)A;zu$swz<%8wwj@Wn3(s5dT7CoOcx3g{ev8(Al5?n&DbzXzIm+OgC z20XtZ_|NF66iHTA$Cm1oKiv+uDT245#PzvR`nKScc6lU9himhIk)7+2iP!8_ipP*6 z;BqmioA=9wNKA^WyDQ+A%M$^Yn1?sMDRR5(5%W;3x!PsLdRsa@4K)oW^cotm8{8oW z@F?2Az`h_AH@1vE&hMG_eG9UH(!lxlkO7ME@}IE`oIh%`N6phERcaT5u9*S)4@Prc z0G}|cEm~8UB9`Q-%$hKCipyb#*na=)ahSqt|3%JM?a}St=0sTT1(oT`2?Ix_ivU1c zOm%OvwgwGWZ-!jO=TOcHS*G%csUhBj104-laIZqUr+Q%K#Q)uqXJ{gc1G(AG-!n<_ zSF>+UoGxks-uNV2;hFOQaq8ce@)Otexcar}(Q5_tdBfM1WF9G-=$Q~Sf~5g_T6!i* zdZ)@J@uvQd{ALv5U~!0JYzUh>qba{Sb-Rmg{Z z8Y1jF!$Itx+!6UFdr2k!2C2kb0Fnm&lz7?O6h5@M)b3VpPC4@dfMZWaM3jq(ygbQIMx^E2(`ci1`A(odL^JIU}Poet!N4Gsb zTb0Z-s21%e>8Qo(48Y9;(0Ck61^4HL2t|vkD^gwq1zxi$U=c{z_hoVO6aM3GhM%En zJ(GQm6M@|-&i)f4fua4j6*{ZKODA3(oMGQ~#X9>MX|RuBX*9q00j?UVlOfBA zjbZj6*>2|}`PD|45`c+S#GZKVX>AFMQoZwYcO*lyq~z#0a&y6%u5wa;cxLOms3c`} zmhvzUfS;Sq2=ItGAlDzTT>P`}B=d5b4sCv7FE)1p*&FYp^b?IuF`;leI#_*dU|41^ z;~j`^VaAB70m=LJ(QO2^GMS*3lmohAn8>ORnKix}cQqrp4G_PEY;gU>LOk1xvTvv= zyL5XSOYy|?^q%uXa~E2XR5^X7HHMA0c8>Cjc1I&5CBp+X1{tTMVqX(;crLdlcDE)j zovgNCW(VGf|1<1!WK`OuLUjkDUw@f2Y*~vTDktcHhpkNAryUm9cvI z8RGZynMs2yVk#$YIC4=-zj$IjRLYT=BmLKpteN5%WViQGvW)RQ;rh9M>DBN5{9GuJ zB(zSmG(P^dYv10?IB^_CCu!+(F2e^KZ`y+!!}y_R#@z+l0B`JyGlzc)(iJD{ymAD( zfEL>QM^5SNja=J!czT<JTBQ2>QKH2P9CrlFwkQ)?X{xBH z1ZvgZ^C=i52y(Nvx>MfFJ_#t-G7Wy4iAw-&iV#;v($c2##7@_vMv=4gPq}M)S1emB zr9XX2nKeT>kD$_e?~g!Qao#OEXzXV^;q_k>;a`L0s`&Qr*0`A@i9fAzx7{W#WP0x` z>hmdU1#U?K2|aP{AK4mq0dHat#xb(61|2=Es=O-H;*{SiIWq~TYNvi3%Cmr{(et;# z-oRhuSlQ;X@>ce`;t~I0s;s2_IfK4X==2xld7mF)jS77Udi}H7{v(3tjmy&N1!@Cly5DX>p^ zXV*1o+?>rv&EP-6|IarI%;l?2@Rp!&%ztFc?QKUYsrlx;B;h4fi46C)jeLQey$!zQ zT7TS!SU|P8R?Fdi4Gm5EaXSqlSV9BjIJNl!mb1jpNn61A(oAx+!UOs>yn&{~G-EiG zh<>tMeZe}?$Q#;wJZ$?p&%3)+Z)^{K6QzX=YIdwkt9N2EaD{Xv&(b%r+pm4Jx`^1G zbHRCKU6B~~vE56{%=BVON(m7vT3W7HTh|RI1SnZ#;%c1<9>;41ZDn+4r%EBtR5t&B z`rlbox&52h%t%{D{U!2$9mV;1=_r7`luezJ*z^h#u0o$jFmKh4m73)*_Ze&+-iXx? zAw57z+6YSG+*8@SmTC!Wi7@%l*aTY-V49Vs6S?h81ZoUbF_Qtw2}vSk)1`R+Go zGdJgmrB&ADv~T1PlFHE|79sny7`=|Lc2OM#IP$cO!`?a}#vc8y z9_@B}#%Q>xn{9pB)(htxuutjgM;CD8PmcWOBb3PXJ6n`L{x!Dt6Gm~*(;Q&Fm2t*; zr2nx&p-u%Xt*O{1r+hI^Mup?_yAtY*v*KOY+Q)-=x*&DpLB(ve`|S)zF|Um(0q^GCw%DU=^0mUXa><{V=Xt|!+bDy<4j)^sr*#e{q>JxZ%VcoR8d%mx z45uRG^Hgg-mkyzrNJXK5vDz0{i=`d3VwD($3@XNsk{I=?i70gcs=b zzyX-C9<7kglUXLG|9HcH0J{A=iEm5K<@R6ws~&U;Z%@>~b#rD$=VS{4z#!Z=HTXzR zoTLU4_;C?*hT{*y37G5TJt^wRz!#0ym_ir62nC<$j(WJE4hSCH_zq3}I<}XvA0*TE z>DUj}HwE5jOeJ1OAb>5R!5d?x`O6t1ojZFxu)6-{#<=~r1_Y}}R) z0%uqQ8(Xp$;{e*HVdTdvcq~ZZVw$>taD(YGo0{Fd42>d~;pk;YO58C>#w?~N$Qais zBXRPv^nb=h7w0p1?i`=?@4EmAP~4YDbyHUh~t>=(5WMd$+z3ZHN2qtMUloo4dH+Lqa9KLW4j5^L|;`2Z5srDC06 z%VsD<3;Ws-VzT9eAXp+-5twCL@FE5AQai-q^0DBp9;G#?mPrLdiTa+2agH^w z`R=CG1IqtXx-am`R<_Lf++%t3XC+_NZmwwA6`KyNfIDg9M5dY?pMgw+8Zb>EF!Gg@ znVl8JHEmnd(m?z63dc1(ahryjBg?1jfb&I7=T<~R>ON$1GL^NWMhmEkJ=qj?@bb{v zjN3$9?yk`8p1aJvcP>7Q(9fvn14l;-REh+fQ(yVC8)#gP{Zc6{qMsl!`WbME{uKV@Abzb_$B`9{<0(WsZV+;&wJe#k8L9!> zKf|96^jmx=&pjYM9z~YP1moV)epJKUO3)rmp6M>*D_Z{rH5Gzo{1iOyIfY4GeW{t= zur;)8H}KS(hso>FvZ`yr6m6kN+Aa36%jERd9)21rCQ~zO2K8$j=)#L}XEW1uT(F53qD}GZ$y6sT!OGQl_Wwgn*5)V=hpsve^~sU$g@8seQuk|6u%4+ z$moN5G^Ny%P6n_|S?G!ZIB3Le*qpGxD2dx;G|iQ3y@`4{ zy1TjO7st0go>iOQecn$V;V+MOok6=zY5VYCx24L8Df0t;kuE4tV%QYaQ)hg-T4CKX zZiBWXU8N*GANge|;gwXPfIoP+=a_omX9I8UUmt5)^_^T@v#_RQqy-9_hQRVU$1=a> z@yaZ3Tb&UN!!pFoT+Gdx+>!%>kNhsGpRF1QxU^i?-%u>0bZX|w?~+G0)%fjZ+7YkI zaNjsxvAcNVv|A%X-Id_bHJlR{!9h|F|EZiLuc?ltaI$AkouvLK969>NBNh&2xsm#k z>>Ll6S;p>MG!tb$ZQZFlMUg$|MZpNh9euSPWmjla0UN4g`Ow%5EBm}r7g;@d$wJq- zrP-XEEf^MZHu=uESk#7WFM9P;&&z~!&C3kDh@uu1U$Y`jfVh@psqwEvJun1Sr!s!# zo0Ddi@`!CPXhd>nEzf`1+LlLuilid%&Kc(v#mYThH(YTqus`b2N(aI0x^bz<@??rL zXx{2!RfuX@QUS`XONRSAWQ$DZAlO~ZL-Y~G7BilETJVDCg;YFKm2=^patF5u@E<|f zgK8E&B6{|IdjkIz?EKb$;q%|U63x$GIU3-|E8MdG^GjIitaAtG#pPnCsZhV)*`qJv~U zdTq^39P@zAf7FA53Cbd$SE?&IlP#4x?MOBDl4 z-9+K-gYD1d`C8LkS&xk0pn3#OJpuIk$a2!%ExgNO991qctKS&^d*HV#8%c@4h(EL4 z{L>1`P}nX=JP74_?)`nkZL-6&w@lgeEy{-reKUZ^G$wNXQg*sFg01XPLL(wf)3W;XEk#r_N7?BzKd9OPwaTG=YTlUTANVkwD3#*6s>;VU8a@dgU)a4G_9ugS=YT>&hb9++jHFhsUdS>X!%J8 zTqM`lS09%!;^=BS#dz7~I5N%|Sso-7tP51i%8hIhvFhZP{ElJS8c*O>L%6l=s+Q+^ z5U)9SQFA#PJ$j`&4U^N>b4HUqoIJUq7hZaffYI`lgpha5Az(iK6W}bl0n%jd+M58d@=i|>HILFnooBv%KuZk=GRxX!QCjVA0;uj83yPtCF6ly7p z0&aJw1z2cBV-cXrYi8(a*8E~h{x#r{V-5a^xgB1o`6@dpglJN?m zsZs>8GJwOcc^@;02Jm|xVeL9LB-CEK%sQ-oFCWy+n?OaD4v8*)&N5B|0+$6sddMAU z3t@)40Z5g06l$nQrNO>v5?|y>AawfKRXP0+#j()5MAAU-QSGY#_FE~s9`WW)59Ul3 z^31#U29=RGqMb0WYGnGu2QQ@b4RSJvmzHuR)ykPMvA?EH8^J3X?o>40<0`O(wY(q{ zo$If9G{Af2!YZ^Hx#yGIuvy3E18BqrA_YM~N7X2#13fBA@`e2^bf$Ps!-v=Xb6j2BVM8z}J&}i;MeJXy#qJdRM$&QU&Q!(xaWd8YBnaJ*2x$FHlQg>4O?qxwjTq;03 zDm85WfH@cal3?=@!RM^)0qik7zzx8{kmQkya2ANLR*EYqGTd%Y!`xw)w_nL-Mj=g$ z^iExk?3vdtWxxdG?4!LnFq~(%%?Sq>}6? ziZ}(AJx;9W#=R4WcccfsY8ds(T5BRYk4o7s!3Db5lEkLXCC|R!j-OQSV*AKAodp=5 z=eMP7SDU`RtfJ-De8a4*V)0g)_#SjmbvIfmV{60HHFfb!H`csZZv6W1CXkuI`dDM> zLP4aWHwId;aY7|LW@KABE-KTcopss`hjl5-$4Lg)y}MZ5H1}C|)W|E&1(jO^E@q!9tN)8FkYr28RgyHjRowrV3=O@&_x!A9S%uzx3i!nM%%w>V zu*|@zZOw{ONT41YS%B7eZoTWADwK3lGD$2v2jPaG!tU6ZK4e!wm8NQofn!I$JsOc_{+WsMh250Zy64>n_mir(LLOYfGnwU;<<&dmvpbz=2wPJ$Ttg!|Z249pOycaJG1T(CB1(i1 z#&P;OzmGn{eIfoh3ev2pTkgTZYW&p!fqo&?zNBV;FvUd;F~4O_U3jGPba2x52mI*D zE6V8TyQJOg?P|60O}d%0A#!MMKh^(~+mETFa=YizP&NGbgg;=s;B#`gST0rJi2eCS z`e>PBZH9(W>x_N*=;O=IpqiYvpEAU(h9bu`WoxRIVyFY*zyv!MFhl706#9hC1{NA+ zk_6v>;%aLw>EV|u6trM3c^vVyfqxxwIgW^eHm;Z6_#Z;#rueNkpXYw% z>WWsej;ljwr%B>@ajW)zy8#d4Da(FUNNZQ}l<4V!Yi0qJuwTmYG01*z5Y@C>?6^Nr z5u!EJ!F3#`WbKn=2T6W(yeAW!?=J~_UXZng-qHg>v`Yp9kK}k>BL6a7{?43Gb5&An zeX8Yx!ao7mF3iw4FM$2^T~DEFP{_KPNr5*$pYQ?1A(=9@g*rot0}R!C(gP6uR;Gd zE0cb&{WZyPFx!TGr1?u0k@k7H-O#DC%d*Iz>U4UL(JH<5#6ZesC6M`Lul_{QEv}m6 z-9>|4-@Y;Qac>(cza`N*qu%9xCZV>Q%b8T0VardrCryXGLeyG-Y{tXmoByx9>wjcA z|KrtNxvN9nU5UaKQodDcWLjU2Vk?|{SvKNQzRWh?M;5glIwzGcb*XWkW@a{DXBb8* zcarbMHbb;DY`)}6S>y8_Il0ddpFiN!etQ41_xtsFyq@2mkLUaS9;)9M&V9(+ekXcG z{MFzBh@KhC@9h4MqW8Q18q*KYr2?3CM7-q`qdn-V8_jli^55q9jh7JSlvlQ9W45G1 zsJqyUN_VE+`?5E&NS1a-zleljg@>qlZIgZx4yRw0tf4+3GIjjIMk)Npos^-cDiDGZqQh)q+< zKakFC($C)}f%Ox#)zRdnt9*N$Bij5g;FWtj$EPQYk*V#&p`dL_rA20xmgcYh;H7|< z^dlvs#(*yE>rN!J=f(;=fjRbT;$XoYvX;ykv?cFoIUEEV-m+a6sB-V6%NIfO9m}5Gg zu?l0tVy*>ouSqZB$E{t^hm_8y}*GA2+AC;xWGycCexep11&Un`pD z_Ed)5oF3uQCPHi+9Gn!5kd%iY>cP^GW9ntLq#$2LeKO4ajgj3=KXG7PDRVk#kSo^i z#xjd|rY;@(RjN=%Sn%*-O=k)2?Pma_E$Ux9{5VQsTxSj^iEb%N`>m z8b>SKds#yAhI_X#AC1vRWTW-~&vcLfRb{aN4b(Q+XFWb}nsyJ=4@DH^h8PJo_-mPJ zfj2Q%Og|7CLk{)P&1tw=vCr}TPLu6X-mMyES`A_S5ThAqa7OUFI1M4G6(=BR<%6ld zqVcAAeL>K?fE|7!eC-OI$(QSl$~z^mw=3XZw%>%9T|n~CJiVL0QoEOJj6r~0E~}!s zhgYb_ayi1|Ay8c3TMVh6UjEa{s|~JzoDoJnOqAsh?(6$%q$?lvY%bYK;q;bU^^ZEH zYg2SfQqJs3c4CQZR?CvrFh^}E8aHL^h|DZpCLK{k@nVVUqN+M391KAyCsM1gO@gxD zC`W-27Jff!w5caJz~oxlVzj?pHQHwf?0ry_=*d_dnar!^WMIbE7q=&P03119O=$g1 z*>4>p9Qa$TVsAi95Vsh{%`$#byj~DFXJnn4qjnFA8C7+pfq>Ea+8xYjNknZyIEz%! zLR2czZ0+jOkiE=ckB(stPRZFnpW^G0C~r3us6QTBz@b5zU^9abiNT%poreMiT`zL0 z{X?Qtrc-D1{Di8auoXo+OS4yK_k-s;W*(+Q6z%Br(RXj4cD0dygvUK2tB2T)*n?Bs zim41F{WZ3enge&M6vH##;zu87?#|U&apJ)KBi$lTjh%)#OUO|q$~FPk&8^U zoZl4JVqdC$oJpnmO~vD|RW`$R5G0ab4y!vXQ$@W4-AgnQ_Z*-rn!0A<*)uxo!6Sh~ zBGFGZsXd;_we{XXyaV8@aY`ffpIHGLi^8Vc|<^mnT zz?LADboX(z=*@{H+DgU3tL8%ttwHSjmM*%q-Skj48h@H^+Dmf%rAJarYz)gj`=+zj zR6viDupI_c(+VLLVByzI_IhoU2-ZXAO9uA0K=L zpuo!9tM{ocoDE^#wr@^zzIlp=xfld=hs$uR{#z^(ns=O`dk}K{RSzN#F$HBHKxDza zM+#y5&0VC+f~XdmxpY`%U@sBpbOo zeWkw3`?q;cD0H@X^KWk6fwK_z478h- zp2W;_JL^%=Cgh+?mYLh=VI8=WWeL<$S$cseMIvY;qXetNEz*MYlQ8#3JI5q6r6>H^ z=gRZ7gtHfy$ICzJUiCG3J9!@~NpYcfekf&)7BjNvXU5wR98w$tOGR09q$%JVUI9}y z6pF}K7^D-5+meD?bi#;^q$E5K`x`!uHSDh8`yxg{8PK3s`d-0 z5wglv*&W;*g5)bIkijb!~8IF+N0MDk{vEA`-6sT0y8ljBW|C)RKlTF9TMBcT?sZ;@SZT=p%j;PRHP z0GYSrp1X|5rt8!kB&u{MVkU9Z)XZRLQ*>?ps{;t;^dbNA5K05Kd;%=4my}SMb#=Si ze^-O@Zapf(y7Zgq?78cOe*-lXjWjnOgZ8p~GbQ}OI-Qrkk&|t7rNNc4vD~RnZ`g<) zB`=W@*%j5mmpHQ#MH19VaXh!2lj}B+&Rt7`1eK-FJXRTw*RLYyMG5_8+f3&v=>dO# z2l?v4Q$tvmqd}-B5$WHNQ8@*HKAkyjo*au0V!4Oa&PM?6uaG`PfZFqjTA8!e!rm5d z_Cy~0L;N`G-{-M~0G!9puD{Vc7tkQ692%GmZVyn!)niW1mY<^cV0~m1T~TV@<%dxb zqw8c-(xZP~HlhyPbRl(qkdOdkru{P+bNEIUmOZ5u*?*vS21XKi@_~xs(1K#hjn?D0%)dRnlnwWah9Y z@Cm~3U$`Q#w$(TUCXOGp_FY|%^xM#s36!Hgn76slf?V}x472;xr)4v_S@2K-Sm8}I z#K(FIZn+_nu%Yn?QdrLSGj#w`HaJ_BqkTjtu&$xt6%P&-JTJO61(Fb#KoT>C@clft2KzuIPiW2y@io36Ae@CpJ}f)4Vje z!71R6%HTR%d3kTZ2tWLo;Cbp(e9Dz`bK4ik zodpW479XFs5<&fzpqq{t%o}jo5J*SB^V_kztV(=x`UCit#YOKmI{3b9Q!0!>r--07 zcJPC*r*)pE11I##VA8wp;H4xV^p{ z#q!)Iq7tDOWmxw9Su~l~a>G(wu37g-5(XZOm#9GIo0R(R?DPBg&R=H$YWachox8wW ztlqw{D{xqsc;37^@ffdVQXI1Y+W8FCX> z!+mp5q`_|q1Kwt76W1sDsx-A@?cK^fF6*xR6X`DLes#jC8WXEEi{T3UTJ^N%6;xI^ z*jKwh4HMUTv~uvcN}%`l3;0;u?6*&`a4bttI@r;rl*^BD3{E1YLgM-LxevUTXsW{AUOJH=|xt(lk!GU{fHya z(8_odHtb#8y(n&`xdcAg%8)qrub24z{4EBy{X*Sa%7s6yJH4fW-~J+XX9TC)ISd|= z+C{~HUKuX!Z^^b9xB|87c4lFXyPP^`clZgVuOj%8O5T2BA*oz; zT_NT)D}>d|CA;QG^1-vH&SZ`5mQ=pX`om?|R4lp7*gUmfhh}XYWmg1D!fmxKdrP888rKB#PtL zW2#W{yVIhMF1uL|iM+NDaS&maLXU}lcbaAYHJ~A!s!Q?=ennN3};^lEgr4V1y1PNhWY6*c5 zha_(tt*mTG=h->wgYmzeW35`t)n55@AlMC+2fKpAu!R)F;5feP~%c>j?21W3Eqe@dIEiUDFp0Ueul?k*h`nR=bF@Z(+P)`8<}dJR0R5j_Xz% z|6l63B(4oWgao<2zHJ4hpCbG^S02R)K)G92K78p0KxFrM8OwR+XEc_ib3s5p%f`oq m>@yml0`VD*|8q2|=M^^lw|Oc4Z*ESA_NJ~hkA}Am|fV6ajbPpl|LwBd7G%DRPgmiZg0!j|uE!_{e z7djg7354~a0SO8Fk-50|Ygut|+Sd-Yrsh^ANJuiFaas@5KmQ<3Z_UOd(4|FD6|?n! zw2AUX3wiSm-BV<-&=@@Ro$p!xmZF{hL{$GYh_Hx>kv+X>vnncBiOVvN816fH z?>2c3{61bCXm)K8sOK_6Lo(5GQBoo zDl#&(%!o7JgfBBlA5VCT=*w@Mf47D)B|KY3@{V zUXoW$IB%7e_*1YB#XmfIXEqqDorGQP-s5UNd_&A-@`52HjVIWoUM6U#n`HG}`TBK6 z?_n#wfV?1o$YZNfC!^P~PtEFVG?FUC=@T&zWku!OP`_E=8Lm0zoDq157*IZ9uH9e% z;I)*DjE3}lvEo|n@%a11#{&_k@y4DtueY#vhwOa7b`g|$=w{MCR-l*uMF;HesYIJi ziz-b4&yJ$0t7TUopo(KN;|OWSP!qDPgo3}XcL#|Vq1rb-=i*o2yQ-s|G+gQ!X7$o)Q@@OuT@&n~5YtB@zZgmYlj5^;_qpN3B~5ttAWH)YQ?l!O-O_XP zdo67b+251aJYuJKhvcK@D=zYzo&p2OiWb$}??KUOvP!hu<_9Dns3`IU;S*}S$jeI^ zX?I$ht=0&06GO`wCaV4fk9YoNcxEfpbB<&E~g=PG7e13Fg^jdU-Ku=m+!8oapdi~2Jn7(fY zP}N?ML=e*svaVk3*r|P=#y@WtqV@ewWwtooMxKGnb=cMX#wP0Zm5}%$X|1j0<%$=< z_`*8f1hWnkUyn|9#5Grov8IUB;k8XMD?TQHW#OHA$_VeaB*rfkr}iKv_DN@J%52zLjOAHOtG5>W$_yq(uVkgla%e7 z`qKN0>H^X)`Vuyuc5~1oVZIJ-6vNs;rh9{XT>e0*jjGH~1RWWl7VW8jl3nMBD9=y9 z2QQH1MLC||w``XxBicdrd`0vUHOddojv)13LK`<6*XI3$GFFdvd3L1WHggphMCymsC?^^mVG+tUyu`o9+Pbk;>br$o~^ZU&9am36T&&QaZnVMBK zU-PJ(X{P7vXX=-Dsb4lYw1)YI<%gAG>Y03$3+lm$6JQD`aMBPMLm#sm(&k9oV9~7E>O(>vIwN;~mzaB}< z#?Rx>f{LxgN|D?%PUQ3_(~01Ypo^G^P+oEBc8^|()Z;60(O<7SPhR2HNfzuU=;!a( zOU|fD)Vn=_#v zgHN?{Re!~tt@TR4#PiJeun3d=MPRz@=dv?98-}vC&(mduqGH_ln9rE8z?vUI5|4>ItnbGB{Wuh z=3XJ6lYH)HIA+*Sn_V&5Xg{0W9a{ZyqHs35I<;!D*m+iP;sN7)Yp-*qJ=o0B#MSiB zqxGo#6#p0Hx%>Y1S|j|th-QssLu+|rt#plMyLIf=-p}Zj?kmN%SDD6(zuPKB12f^7 z&Z4gV&H;SwW9|1I4?M;aXAM*phl=!mQu)ydY;Ha$j>K$R|v zc6($_oJ4UsqrN=)J)cM|Lvl)z<%30NuJmA-FrLt(6~an}HPIsXqL(#%N_;jR`p&ag zZmky;7bStA7+8cQ^cK&K2q`|x$lA+ZeL~B-RGxWR?Aow$vbwEiBirYq<2*Nude><-W&$iir84>`YLY)z<*Dgl+T2Iu zIJMM~xb65x4%b(tOta6#B)B9Z-M0>sN0@j>ZlCTAocEP}zh!@BGcZ(N;CSCrZ(V0V zk8j^y^i@0C@!@6aWj_`+@e2cZGu(4&Px5jq(eJFRC zeUz5+Jmp(T15b#x&FtGIlim11y^-4af`k08Gr5)4t2O5{^)mvlWiF$8qjNe9$0Mva zC5?^uTqm@WHRJam@BLUfwz8d_k2lo!$)C?_E_GT+xc0v`&`{AJDM%|Ue7HfWtrjr; z!SWZblE|db_x;z##m>8X{89#+Mt0g>7tw~FeVe_Xp2!@lBtWaX6{4&KltjR)(W8Tf zG!Plv4GRUUotO9yS@l1fH!o~?T6aav?JAaIyYjj781urFqEi`Pa=G!WTiw=IISTbx zCex%Uzw|wZjbd4oh_iuP9@%Yrg}e49^-?D&gY6p~mJ52vf-RLMw)CjhWS{l`{wQS7=H{q{7U!2<7aC+Um}-f+<&!0fCO8lGI?OdfTw1} zR`1bWhP1WFKK}EGoO4-1XBLbEd#U65cRd3n8AT*7{HO?QTN<*<3X)IF589^mr&5%k zNVv5wn9zytHkY5?|$=wzFz~6|E*`CneUE*XZ z^h8_nHLbX}-EM8@N>v@hSgnb2k$!Z3%O06B|e1 z9l{)3>^y>ZH~f!N|MklMcB{^R-OBxc-}&Fp{CTG!8)63kZAO2X>+Vy)Uc#7yY=7Ba z7?Vr=2`R7~l;#r3>cDRl#M%J=u7RKDfBpvkwn9#ys`f)ddW9q_A*$|%ywi-4N~Mu> zdz~$?>BNtPVI(W#SBBbQ%9$nlEQ`*UtXr$w8kJ1?z78dkq(rc2&3jS_Z;lSX=U=~P z(V{akZDqD?{C>6-Lgt3iLBRj`aZC&zdhm<1`Kw~JFE$G~d(9fo+w;ei1Lr=%*_M17 z>-7tUW2INWznb*P9N-J%`5~dwA|a!{LPGhsKgns?gEPyKJ*+VQ?kwV-KvZ;cKD@ub zn-=MPFa{ooCnARGe?I!pTTmk@@BQ0L|9aS~4&dPwQr~g^`sClo=`R)aPXl|EX@r6r zi9`49!9P77NtCYp-yisw4dCqW;~zwNdwV;a&$;%3J$LE5DIFEI*bZoJ4{0h)dWqd9 z8t3V$sHpO5&F=km*S{H&))$54A-PtO_Tvz04+tq|GQ6z0*@xAHSYZSoG=|oTPPT75X46&X+%pV1^)_?-d|O|kKLZJsVtlV^eLvUK)I3e$o?X*w z{T1i6%53=2vZP)rO7Q4=@OgssgPIoKagQn7e_z2n)4h6zm<+R4Nvt?7-&5=}g))eL zvz|@8r0~(l=iIOyVLBmDT3|} zlhTj8sNELdA8kORdjc^C3(nFe!j_hd+$$Q;(wTyt^BAE2SVYxlo@ zv>eVBh9QurkVL>Vy0E}@dn|HG$7MDUdk^Y+e7vCR-Ym>zh$ymC6uPoFN2~YwTrIpNFYj4X@wDLQA^lU=o$~IU)dWLU z-dQ_wNqC)UKeWHJ(Z-2I)%j)>7wzYX{>|=ty zgRGl#&eU012d;cIjbq_)Fb)N5`u3t*BrJ(nD~MIM>N(GImK)q)BMy>ds=mD6S1H~t z2QaLq8=u>&g^Ip^yYb#CrKk+tiDr&SAnh!qsTVHCsheGEp zJScp%BjacEw65CiDf&2OaX^guPaIn}@j6!YKG>?r^`>%%`$)@0E1U0SZ_uv49j87C zY9yU#9SLjeN|XT1L`&aZ^2?m#pb#%XpdI~H=owjH5FK0diRvUz9gh1?7#3TEwd4H-PUDAdP#b*ye2iZ5+lENXNr-pO|IzXWh?X#&!Ub=_Hn6U5B=ZwHBuG`TS0`7>b8+-2;dNo$rwY9EK z>pGjWe4!yjaS#7C4*IpCX3uN!z8{ko`8C9@|7-vDW?$_1IeP?3CAa1HBb2ZEKIe^e zg<7T5^{3-{SiM>7oYSnVkl$UrE%#dG!W8tz?%L^Azrl5xCK#}P$dA~#9GT&aGipqS%_-db1tR0M0h>Kn4zTU zsHbdh(D&xp&i#?q7$&(^u`ZE~F|0!|g@@>jWOjfe7}#0HcFfL`wsws!U(zs`ZnsKr zES}bByu=pl4_nj;q-*D*l~%B;H|~xSn>q6E*UP|qzvg?h?n^wK6uOucW@M0Y*=Gi0 zMuS=h%*4x=oPk~_K zi*=lCs{^&7GXru7Y-%lSN`o&tKAo0dkO}`jet_|09)Xhe{1jsz1<9_(uRs6pxt3%m z_bLf*P=D$?-0=6?p9{Cw3(^Au`%Ual;U$_8ST$LCc~G9ACXC)55r!QmTyp4XyCasg z&vDAaqhC1bjHG8220`X15`>_5TRpI{lZojpkw@;sO7BNKk~30EV0HONvB@rLXFD5& zyE9%P%gV{uiPnnR7AP&tgZCZ^z9#UO+TOAc{T0>nd5ZO|{(SdCpM!w1haHOum;F#* zJRg|q^$%Gz<~anEc(a*t!-$>K$I{*2D+b#nR2D3^Yb-Z6_J?f{FCJy6d>|#Ul5BrF z%8=Z;krm!&BQLCiPAqf_M@`Ag@bMV)89|Fw@m?c~8sd5cMd|72`D(51I?AdG_7bSj zF#{GuKdiI`)C)b6JyW~6?mt^IwlU_pih}iL>u0sB_;-CcETNWyF^l$Q3_(iBug7a! zxQ8buNVr#?{r!dvK_z$)xkeVs@SW3WGTQg|iXDFg)wCVgQj(6Xf8K^g?V|M%-I{yk zkV~$Cei_mIa22>|4ew0sf`?TN+r#NBy-Mghn$2vT{ZqGN=T1lZUU9KXwqwT9XC55k z^;zW?77`;o4aL7AZM}?NQdv`sSx+Y_S2ewC3jf86oLe8+ctKdLT#<3KPqX`MTKW-) zVO6!s;KU)_!dt>y=yX(V$&n&lT$->T{y1#o=r(S_pR)PY z>O|{}Ge-3h9tZ0rldUlr^!67!8y{l}_k==**U=ApZ{J@d8Mqz&M|ySr4wcbWS|H+9 z6VL7OV)})givm%?9?1oI*;m?>qwzbSMA^!W=cq4Qi}~I{(&@~FW$PupNkXzzXjZjA z$!1*1W)8%Lo8~pV-C~!Ac*N}@fA8q?cruhA@+XGZTUs3l1E_9mLC-rgowh15UyZwT51Py@j~jyOG@YwVWu4 zOeXS?+WoxK9q5;f=2&g^2$pOI@))Vc@EEZJc5ewcD3(tI)&r}4@7Lo zj{JAXu!%l~2@+w$%wwc>Ns$Ci_?YOtCUT6n1)2UG0ZvqU(n;q#JH2 z-$TD8(6GwC1x+m1F%?EcWh`&9c(V*|7o(Qn?y-$Ei-v!XG7ZtQG0FQ`D)&7m8Zm7% zSVPpk=)YjV6iBPz{rJQl!GAvM*ipuYS);_a8VeiqSAn|$c_MCSIPkN|8;B3s>#&oq zo61%HVtjR@2JCn`Kc;19Fg(yC(KY+lx`xpw>gVCXMb!ET3e=z>-^TKG*z68&Y$qgDirFqWsa_qZJ-$u1_m}3-}$9rXtk5XFv5kb zLJ3B{)u{b&B*eYa`CDDt$QzixhVE?XlG;E{ozTeNJE0FAsd0-b9`WiKh7EW8;#BbP z_&7u3ZjD~Y6;26tEUg0eZ3?G}+}L%JOLKGc@X*lX&kTwM)2wde*#M$t_-*_9;!tBo zlDW?nqeL#Yq=XfKd=bRYZp;r_u6Jz{XyKn1!*{N~x1} zW(<&~>$4e?&M+cQ>*=cC^X`8tX<{BA`*#UGud$;sOSKQ4bze_IjbwSJr1kfvIR&b# zakK1#PVNbPI^T;9nxE5_rPdcbRvLy8Z~s2rr!u^PZ&Z0bdiZy{1Gt3>D7l*M=FAU8<&w#o$zQOqhIc6{6oFhW)9S3O1;vD|L}OYs9&Z^2(J3y?plJKk|C1Tev*{L1 z;B^*bOi3^8v-*;`nWhI%<1+oDmb!k3Z5_L`G%wa@zsr?fT+Ee9 zL++!)V|~fHNu#f)-Fge22-gvQd9rRdHF;(}JYS<{Bt*E0QKVfi29zQRKH@bHzW1lw z6CvKqK`hc4`Qih`3X?_Ff#7j?-X1BNLS)|-TB@GrVcs5PvnpL>O*??-Ng|-ZCTBZS z;})4KKT%)SeySXX5A!yI$3%^*?^#S$m>7M3X*Q5*p{QLF=vU8lI`6q3hk!8iH)pjI z#TI7!4YZ&g#;WbDRF>UcgC#G+TT}O{E0n4E%8Ig^Vyu0_$|m18j&)X6a2K&MoQxe* zYM_kMDSIP&Mpb6gi!OM5GGb)tX4D(QJBq)qDSn~b!r@U(>Ar@02~t^XQ!Wuxj>iQxC`!=|W)2Cq9{L8Dfs!+ZLd-x?PmiuPQ$p9I zOHMuC+Rb^aMV@@GA46{*w+#muo26GvH?cGG3CwWR)pQ71mbw{fIutKNWtN}zQEcL~ zSp|Z(Yft;JFv#_MBZiDIQQRLyJ`{9NmED&fY?bVaAnWdZBli`=vyu|ZBh|Y%j$N{Q zq&y?AuBrRhWd@pHaAWS#jWq(VScq@0_Qn}j_an*uVp7W@3I)!10%r&+XXX(WuJUXe z@}xp>M~j?saq1P1_AbcozC4Z(woO_q9--GJPd@CIpzt?wH%6O?zb`W-w5}=lCO!q4 zV1lgb6it%rmy^jDVEHdY_d2NgOZB2;?@+TTVDvdSBCI}ige-ktjflHzQ2SzAh60s< zY;KM^B{?~1fdLj^*^5z{KYglLz-6l{6-~w@)zAB4(#isc7zSh$;eipYIpg1>yyQQ_TPFv(0V(7n%cFF>zjpSPx&xd}GZli(faw3QOt@PZ=1 zvUYT3R*bC{y-tkUsiDwbIOLjNpxUaEtq6QK(*bp6l2e5eOIO zR7UxLAhAv_uiI52q5VzsCBVq}e7H2tiX^P8eSkJ`Hfgq*z{m z^!Dn%Y&(&jIYfQ+LTDvO=j+r=*PWqVi-sIgfM||E%V@cr63Wz(Q9y|{k#DDC+gER5 z;IDo^Q;SJolVT+Z5gk(7oh{yJ^0r|WFDsAni5fHMj{|4M>18zhY{4?{#I1u&`$CML zL_k#q^}OP7kTk|ZJ?WRw^Q$s9V6U?wpU&M$bSz}lPN`8lu^^wGb&Vidi>k^OcxN}K z-c-L&$<{|@KH?$+p9EDYPH})I9~CLiX?pLf< z2uVEQkkEb#2?1zJn7X-?)f!&cY!(+0flX59>^u;Ews@cI)W3^mCO%}wDI$$&B#(v3 zYEZ%hr6mdCA?8u@#;#a8{Arw>4gHtwJRFbBp)4@Hn~{)M!#G#nc#d?l#R1GC@TBM& zWE3j-N$vplH1?$G5V2)WDLvH*#A$)oSxdYA0B2>E*Y4LZ_7Be#f`=~J=$-}5C?>sB zzF9u7lsjuz;77cVpT9x(&!@hu-NGNPrfzoHR4F8HDkG-y-X_)G9?8~}bTeY%Zy3i1 zZ4E=g+<%PZ3gMLvQ6dG5bbwdZak;HH4Gx&C4#kngpRoJR4VMfBc-j-|JY+UyA+#waGWa?_7}C3g>z!k>;vwIzrIdsqH7C!d3#-XOI`|pUn~R z3|01r6n4qo-2=senXw>oHXt-U2SOu~tO_C8MA$c(*O-Vmequn41YWNVydE$1lj+@7 z_;fFtbNLVFDyZGpmW7i>^G)OcMk+bbZQ??kU_qOII5 zmWy4&^2#<-zNrk^jb%PIv%Is7Vh5Ma3hzxg@A+LQ^YR-q;794lB|pzNJI+qil1=P(WA$P6L0}tvyFA?{ zI90Ji4tB}haQK43-i9RnE&?^dS(@+GvNlS2zrb1&{_(ohK0}Q}gd;MxKC%9Jo;zbS8%L%S z;(e!sP*Rh{&bLHrltO?+N-Gh@09Y9lw3Q54`I&6L|I1p7WtCcj=22PIYTBkUCh|CD zQvJf%919gUBMs1&CLXBZbGE6dn~|d6mgZe*-@+BeWMYVs21Z<^_Ui!;A&5A@db!G$ zVOk84p z&DR5^578zr%dI+rRRoUm9{P8Ay{xfdb2BQGSs#hMi;#o%9pGdM+UQLCH+JFJ*f#57 zT@2>xEqV(IRllzqL4%9a^woh0?RE4d9(>s(;Efn<(fqrmYQ>rx3eOxuES=jIJ&(KL z*-&N7YotiSCq)iEVdI{D&+R||(Gckx&knLpTO*64mmyC_7-HZ^gF|xD9_X#dJgki&9 zSc}0S7+zgAhp>pn7uIWt;H%3I-{2v8FX-QuKJRQ~HrVPGUF3UH@+k^~T;A>{CjcSV zQ2K|J0F;9OHUq_o8-u~|ZDt$9Y<)=m%Ro@Nn-c;IPlbRah8kxd}f`F6I9h~m$MQZdi;|O zw>39s;c#p`$(CWO!UtG4)o4eo=GiBUO4V*(1ay<+$n+a?NLm|b|6wT^Ae7{Lap`G; zvFux68gNq7;Gttu=p6Bb$U&PX1MuYU zMu^6RW2?9D3q7!pOwFD){(z&TF-E!q7qCGYWaN0>+7(ND2s8om9s<;QFycQEC37=E zkqJv?uhV=g<*#}GLyd_AJ%*@1k$`a?z!35M7!hCNDBWW5l{1Gj?Pn($Ioa#DXD`(( zTN&AGZx=pAq5mfr8?in$&GKjg%vAmBABomcM+Dy0%7#36zhb<^>EDGXUUGY3npF?8 zJRBg^uO)R0n6tOxMlb73fog@n=Y>;&Cy@lPQUDVoHNR&)grO}~yQ%qi5#O^d6KEI1 z_E%AH!`COnF$+6nG_lSU``CGT%HB|HY%4}V=+FzVZP*pe`S<5YKOlKWeu^pvzySQQ zIZ>*ftn4Nv1kCRwqk12l82Blj2fO4H@-dsBi5-KU${Iln#8d(82}FT&7x1aNsWj9d z1E!lC=K%+YT)jJv7wMTkA0F;O z*l*O~r;7B8_b)`D^&a*!QG0g2Yhj&HQJWP@L54lSimoBOxB?dh?p|Z`OCV9>5058& z0k79zs&+HOF44noYTtWVOO86Z+&@$ZWbzFllOv@EDU#{y`OUvda5K6GpaDivV_-ce zf%T+qBevZ`M4OrmBxs`(Y6WU!=a=Qjjsx+sgzG(9W+ib5oR`*W-yZ!_`C$g05+Nve zgih8H2yPgJ#nzW{kXHQq~{qMWF*D9hTr8DZd_a$%Q@$*Zf< zg0=m{Pq9Uz7Vb=89azjUU%`tAOBD!HGO|WzmK)FR9c}bri6>T?dc8VJvwz#4X}@G? ziwRU@JC6Z{--nn_2;U=!E?C1Z(fFZKOFB^8@IjsX6`O{`=nFKqqEP8MIkbtbr};W~ zU~R(af;iO3f}daYu)*3S9}Cd#P(eXQ^xGZ`1dQ=^5ord^o{C<1>z>VCms+4jmJ0mV zdkcO(!eOJFBy4TuF55SO*iiq8spXxM>C_$$;H~|3mp2hx-w}hq%-^Opr{Gco;+~41 zHNfjCbO)RX1!R`LqQ49TW|K)K-U+Z82i6DY^C9o z4GwlCJ_Ygfd3HVMcysijK7kMv^;TAJH6@e*4O;d%z%nRn^m(XbAsS^+Azc2;a}7Bd zR-vCtLt=Zis!>5~5Q!!-)Q&?GDyCu)8%Q$l2epesV2!T_wAtAzDDJ3{gVw)-toC)t z_D%ra<#WqT|IyLe>Tt7%xz*(4{5-w5xDa4~0`X)=Nz*)nL;vs z8RnEaT%8#4UYAlG-8%mv)JT=Yy6!4Thl|x+i(~n+7CJzfDeD zwm$$BgT#|LWnej}s0@mM8W>R-rs|!dZ|X*a8&1Az;QZm8e|j{cnl#u3GQ*}yDNtHc zKQ`FDTTX*+nz2}X2ndoo%3W(maf;Y2Td|krqAS59K00+r98B*sIi7sn%L6t@R8$GT z_S`*Tpx7hOsaTrLz+?6>Wc{n^XqnEYz zE!3a_GBgAPFXRQi-CqvS$ z;3%Q9#Dc3AQ#yTpfPm=Tc##e#z=I}MzZjMPFb>BrRs+Jb1YXL^X1f|)_{Gt9h{;(5 zAtqb&V@SkC4uX2ig+dE~;0&0jFxasWVeKLL1nO#F)Rsd-wNdTW!ip9ez(&r3T2hHZ zS%tPV0!YT`vMrl!!HIhCI-hGazQb>7KYt$&5)(d>$`}~H&=40TBnR8IC%nt>Hbwig z;M2}Pw`6*KeeWojmnAld>l`glYv0Qxp} zx!-vmHL`x$>es*l?5;XnXkxF;w4bUF`4L9MygI5TlB^vA2$J}2EK8DTucWsw6s@xp0 zk@NtheZ2M(iyk;05J`Al&Y!p7>ofA?z7-&R?z16IlThkyeaNgO7{$Xe({kQS6Ysg- zEDwU9)i(i8h*cw~f*=J%rP(Q(FY*)p;D$U0-5msHEmDSgG&-ble2el0+kM}-ZT0BL89 z=^HPlJM?Z9?BS3S)g78i{fnbc(Un;~vmf&#w-15V_r}u{!2F+8hNi;WtCb82G&<`w zA|nP?H*(Yx>Ix2EFPS_NIO@g=74=Qrj3|lHdVw&trCQ9?R7Osg*89htj!_b8n|FmO z*3(B!Hivut%~%Iw#tJ8bwV2cGU{^m+MwG)et_B2r84?U{N*$Jh)=>xTz2q;fq$_uV zDQw~z_gfSh6gFZj2YzMgd+xRp+oVLfyz}o;_BpcBX}Sn+XBqXB3@5w>_=-STNhnaT z&gHeJ;9}XNDN%s-Y6nA1T9bU0;fY5wz)aMZRmM`=tC{$=h&$>v5I|=mM=mP4b1{hf z@T=aBMcuTg+*mLy%Vm}7Pnjxll+NKmuzI38)&|5UdI%*YWh@}*6TexaiDIiig6n_0 z7;vR#<6ZD7-f6vUArce0Sajf+q4qveq2C6yhpg@!U!?t!j<7^!+~a>QKA7l4i(ISO zl4rflqI8+Gl>0ZD?$i2P+V=@G?wKO_4QOnVMF8$dJQm0t93TrQd_cXX)<;s%?8| zsNHpfgAL=0g=%}+4!DABLHWl9oaHOK?L?I;9VRLQTwJDo-xheC4)O*AU8z?N0tf7VUzdHiV*0^+1dzY_uf6 zu@He5hU8P8jBU4L6;=+_nXdN4ak}p5N9L%gVFSG!w6I~xF0B4e!Kf{CJAZFQbF*HqoVH3d?F{uo+4ZARgg5n%f7{8(zTf1Kg6}dTKbV@Nu4E zif)s;(XaaTbY=w-<-|pc+Bp|2oBecyrbP1~)QDg}1n}kpYyT@$?%w8+!-J3-RM8b; z_nVyUoLXN+qtm`Q5th#lV4A=%s-=prX-Vc3Ba9t%jq^;_Yo1r*-;aG;<`sJO)aD6q zc-iYXQf+Nhn2cvuqN~f0Zv#>LE;ZO5p~c~%0NM^LtadEt4V6;N|Ct;co~65q;R>C+ z_6|}}c>zqHIJ_rld!m%&xMJMNP?c%4M++_N-Wa8G8+;zX4}Ceq;mMsD}0WZ6X&fmt!w zijbwJm~7HC8D!FcQ~Z9h#b^Ckju_m{WJyBOqH&pDL= zIFTgEI2qGkje@Tl+g2Zk67Qe8k@G4rkbW@;!GU)z>W1QB^tNFU`m4etGF9HAGs?AL zskN!XNAI?4M0%&&xK0LU38$BqUHDew!PEyW1Lris-v_tEur@%h$nW;PcYQV8(}yxR zU`;1t;iAhG#!L)oIig?tjI05Y;CKq#-=?HWC5Ek{^T+9}57Un?SonS)7}UxZ8C(^4 zE#9MHsgm`a#RKKio0fV*MOW}06HwdFP6|YgFL~)8FP?q!ndc=`RF)Ye{*Zp11I{kVU*&U@cBspVC=mxGCD z|4Ni+3ZNKv=k)nql3?Rl~jM-=#P#QYP6j@giP{r?1wm@t5Z6{=%0t}!)Kx||HO%P@) z@uliHBQfelf_qT&RdB)D_o84LK;I`!75cQP42{yH5a|10sOC{NmEqEw6y6mGCJ$&= zV+kP6aEOQRM!9m3BpxW<;-k|5;E_F&#sX*)rpu0ov!ldULBY84Z~j;pL1$*AB1QlAfj0wUgADz35lMX~M9^o0HL zFKa=CYC04i8k|E7sBCxq5OxV-?qMVr8o3p${#|km$q9==$z%CN{fM{Yp_DkdDKqJcK4G^Y&eBH3>T-42D9V=L#EkIBXC&T&j`WgsG)Z7bDe`5?K zoK5Tq;Ds9b7Z)+GdRJWyti%+6AIH!`^jns(-p;=e`qS4{Z>&0XZPR$>q}T$ctcRBz zep}|y66zG#cKBH#Rsh2iER6=^83H;) z_um1jglkCME|%NWu2><4f7Q-r3_071+k=rXTZ+%k2-~Fr6X`T>dk8 zHNZ+*9uL(3Sf5AIVge|v)}ZEq{(ymdYr>xd5Mkv9-Ek+I8_;-6pz%W_Gs*^_x-l$O z=qt1d?#1W&o%K1dW69~iXyLr<(Oe1wHj5f=z&0YabzuSf14`-Ng-C%gth=3q@RIVb z_BN&~R9$>f6xj#^z?|t1C4Qa-Saqezq8)%mCE7_6Se@;O80ZP!FuCuUt8@rMsB^w8 zPLuyqpjrt^D$|+zkU4~T)|a*%lr*F>1xNTxl|$l?xhhIAI(fhK$XZA7h^$8b9I*3> z6~)hW|Da2v919WL$;oiBvc0pL5eLro2#_Q!)%71E;M6%t6pE;KI)|*2|KV-pn3%aZ~m&X3TV;-9hjYXM`nc1G_`Ou^4+IJ3g6OepmFR++2|x_S8P#z>O%wMy zco*PWUq5!oN!9zIV-5)gldH>U-U9>8n|Mv^lIrIli*TFR$)4c=^X2{;(}dQ<&c}?y zau~x3@7}$xIWJ;8nc}c%z<#*@b9@u!V;927{sTReOfuj*BMtyvEj9>P z(BR2Dzt421a`8B=bX|v;4BI{LkyX@2|X7djLhY zCb8iVqJ2e1I;mRc>~TqG*8AFy`PAk!ZUUX*G0`T#AMFh`flS2?nxuuRF@P1XHs1jE zF6!0_ua4Q<2r%xV&fdz< z)z&b;#JQ2)zhlVyW{(Yqxs623ZMR<-kyUeJDdJ6u9H>5q?0_plZow;Z{#^ubf0zNQ z_4D0H^*g&_<+fK zYG<9`J3Y@=3-mE(sGE0=lt%=>iNW#LH9#}oGA0+Li5-Wf(HevNPK5PGoBnQX=2jx- zw*iS~(LI?ZivAY5Awo`4yyl>7<^FeWqe}6chpDqM-&5KHzM)@(F1CE84T!SrRQIVxEUdoe&q;LOdg3x;Ev!y zT<&^$45+qgQH{>iqnB$?pT|P&)?;gg)i`Dsc}xHrsA58YpvnG+Vfkvq0BJ@j827`8 zTKXwwA1Mf>35SpZ6E4%;-b z-5pH>P!b_gcnD5>*sZyH2PLh>K1zxtq2qx}VW!S>3ksu5J7@xCc5{e^+CV)*UgHu1 z2DCOyYi>qZM(c23p($x=>_`wtypu$t#sNrrO?<$&0)PwG5rgG%Yb$ei#CVEB4%N%x zwY`KE8TdMCKhgs6Xr)+q0u?}^y`2%DMj%lqFaGFg85@M9wz29j*J$w#P(Jji_bg7c zG#`llgF=o0hrD)o19!^GZ7HF`Xk>25iCT2UkkTctNY~T3u2smzw{VB|c)xjsc9-M~ zC^n9UMuCz7k@R8p8W|sRi8pd&Bd4nugAg4LAmXJwsRpb0W;2t?pFk6*7s}j9sx&x#%X2!+~^e5;g7ked+#!vcx{IAZ|;TZGKUtt|YjA#@q@@J_~d zm*=j#vnPmvLTW(ZKyb=?YlzckC$<5*@Ij=!f94^~?uwCc{f8l?kK&y7<$N12p|g%H zAS=x8{$J^So=J36I`+^GKV3NhzPdw z^-O{BDO?7AF&=bIeG;Uo>v zxcu1mYCVs`#$KvsbaI4Og^@JmBp`y##`xi;K{bzmSKh?JQ#R{MweJlVf$L#Q{X|2J*?8Vyln+XY8J^C0xI@n7BT5!A@dhkQVM6nZSA$6hy%PzeI^ zYM@7f)_ej{DJA%$1M_Fs2BGKk7$*d6AvDTvMhL<3o#fObN{A`+aSRV4d#VGV2m3sB zZL0(5?E#e~-)zZCh~Q5@$867jqM#mPs#7kq8R2~PK?!<<3x4twkOveA2y^^z)G3y9 zklCtBVMOCJ<}{-DhGwd6Q|tbo`TRSB$?ugwEjeo9{{P8B9FvM{b8N~*4KMUZiJFjd z&DN)37XDo*VFiQ$NY=o^EqY_O>n9^g?_fjJZuf?unG$a^j^Mw^D1f5xAJ@|w_&Uy) zZegO{Uzyj!ER|-z4R-j6_nsj9uSnp3t;C(iBoEIx)yDSzSECR7&4~j^(4&z1U^s+u>zMvgmGXVx!)UDEoXIv$XmzoZpjfQ~fZJiBz^43^t(XyE+gKTh+O= zY^n4D;vD+u;&+iiAi;OuJ)NLuvmI@esIj(8aK|YZXs6T5|0OE{B&BA9ryuAi z@YL@5%(%0!PhRv0P<;4}x@7~%pHeQST;MMHc7SL<1#JuVx2$S5=snt^={7$u^KrlKRCBOf^Os*OQ3K|?_TrTS{M^c3d zForGA$Y1t-6k-Pb1{UTBR<2%EjF&7My6m_KGd5gp%Pmt;oW1DgI+<~c2_oSxFt~69 zasxmUs}-`rkGXHJlb-3|JTZ&{=UT^ZHvPv&2e_c!-byXzkgZ7Z*b&Ni zKBxglgaht3Q<1(Li7&tW!PooTNN3SDr=CW~`3m4acO>WO4;ATNMFCud{#_(9fKT4W z(gO{F(oZ_g*L^u~d}7`o4psov3;y*8f7WhW?t}@RBr;3KXwzJ5u~Ru~?5dw#uJ*>@ z$vUSbdY-?;S4>FV`(9d4H>!P6=s%g2AwgquxkTF=ebB=%&D`K_+RYV6=*3pm?OJFu z_yW!p>nirsnfLLS!_b!Iu6o)I$fA zTMqavN$sSDHv$4uutw{qPL6h0KiGOv;lZwDhL6jYUtHWLVqrP)s7^Pfx+oYM&$IFA z&x=+rY%1K^Qkf{yeXh!{y%z8xQNwEOfzoGfT=Xm9Jux^{TxGZXDvsaDYL7AtAMYmw zKk$w61R#2w35)lgbcr5*Ep<+VOLzWEkFqWibNzZ21*#V&=>wsL;wr5=2I5}cRZ3nk zQA-2T4T?u!BnDTSAiZIxgINSSHLO=40uW>c(vrI@;qvbOL;_6)dY)=`m5Cv$6`EKq z7oe{}Gx^vVBJ zIZh=q5Nq7!|5&iwbeq!Ok_3x8o|TMuFB~Jz4nlH5H!3DQNk6#4NvGn%LODQP;B+QA z%{+;qm7^cG0yGq$zC+re+}e1xm@5=uN{&W$+N^%aJRPr}!K{=WrOGt+h)=aOp5=4V z8#dJ41BAU?!F7CX?N#ypM{j6*W6nM@GpWJ4=)l?f{xIPOcMDiM-G3{W18c{5RI4Ep zjoewj{&G1`dMJ2?(W75{m0(6<8uT0iK~wh!gr)X10h9W}DQyqUc1IkcD$RnL4fX2z z0P`gPT?Pp{Yi3XBa52o@53V>4W$K(mD*s=vM1{67KQ01=j9%$Dph zlI_cYJ4>w(3kpk1OMApvD+|1EUd=!IKKRH99=+cUkLm3cJh|j?XI?x%BJs^5z7Ya= zN#)XRGY(crP`Prhg`Vs1uM9J4?VMM^H7%@F1%XmZNVW1&q@00|O>TvJweoP{ z8Yoo3m&WP`?O7hw8lEG%?6*gu{A?vVshA@QTx~zy157cEiDvC-p8s4wqh|eO|1Q=b zO{|5rHKBdWVw8$Db1UgVR+DFV*;5W#R?)s&t4D<&OeW#xx%TBj2_hCAM|`ga;f#tZ z?8Q@fVs$wAgR>Q0`~AJi*XVoLJ_V>CS2wq>rS`S&9UR01&96BsRjns0m@L>$2Qt4@ z$|vBzoEX~5G@%yN$HiQZ09pUGV4m-yr7En#38i%bd7Wa6R8hTh4|}}q?AiusHqU*4 z8eN$-%?s;~5~QtK2`&q9T_2^(y$7nbs=bImso8=*JO+TA<}Z--2F_W#fl^Kn*H?n@ z?Sztkc&hD-0KZEQQ-)NJeNF3x=S4K6eve& zGSW-&{mheU-sXL=&x30$`lYj(ZI%A0?EoO5l2FwFmu2w90LRhLS9QQaz)`tdff`@P z^ITVdz+n_?epq;Puv;By7-AaOk-$oE4}j=e1WBO$+OV}si_t)y`bGc<8enw;S;*mn zQw+Z=E(g3qd%*Hun&lQS&R-n-VFpe@jS8li;a*LWXB~?GHW>r*k?T?+8hfLpw=*Bk zyl-=ewZET3AVdGSCTIXzHnOrdvTirVe=*XIm&XhtOju=)XN{2ipA<=XPfa*>Rle72 zx;hHsZOCtT4+0aQU!^imfF&PnoBaB4odRAc0g2^*SV}4b<=6f?gW+T_?Q=3vnzyLD zkWF^}lz} ze7pb8d20H1CKTzuwd5sGNVs7@mAmF#;owc826GOFI+ZK5q)*77&`c$#ObB)Kt3X-r zy#E9~aX!eG;jeN|gU~}q*5Ck9uvu>;IKcd1aBXbTqOrlbL`$Vm%+9-M7{9^b%0+Vv zD2il_M{BKVh~*lga0e3rOTz6LDCc2vhj!0t-%$V4ObM`pf1|87?^9($evMe9+^2eJ zv}xw?z`B>6IO1M5lnD{Dz%s0Nq|n+c)gU9&nBj0BL%i-4+|BnsAJi<;A+E!>Dnm87 zz8ts%5WsSD>!yqF@4mr%obx0A=;JXTEC&L6@iI_Ahcr6CJxX>~-{6L&H6^$L+Bxl3 z7&KSIOsNFYNw`oNG-jhAcVO5Pa|j<|Y=f6n-(7f$2Dp4UL>C7)t;1s~E}t@1PYegV zte4xv|Gc~n8k;0aErU@|fB7?s8jJ$GnD|dX9zZwKxKO>n@BPvbX4dP&IFqcA{r_hG zyjN0fuWP?PbLhDIl8a7E5sd;Ncx$8UBLs%_Ry#d=E%L3U$=~7cBYS3m-%|SQah34Z z>s{%CSw5M*hh97Y-r0LFr3wmatfX{$-b%{*R+leWxZgihTJVP>yuk1ltqri>|94CQ z9*c-6fRD+g^?xDK3@NeE{bJvV5+z~&qELZgB70YC8zX9@b*?7z<1ufo{135=Bf#8& zS7ETc7ya5#d>c{~w&N6~L_nI#TBYQA>vZ+i{{?7o?-!PUA~XB?nJLJg0Zt&VeN~SC z(zM%z@g$cJ_+*&m#DNGO<$gKjF7f|nmbb-j3*YTDDct~(XgHxD3aZ=L9?YUsqCox0 z9|mHp7DU7wVZbK^jJi4C*Q1B#SON<|K1Z>?tyPPj!jF4(L$BT_Jk(Z&YyDA2bCnxSs02LbBk;vArR25U7}@WInAxL8^?NkC_XqK1QA#241J)`SJln*R?x z889JWKl0B5Y`FjZC(NH49rWJP6N60<`q&nCE$U&^QNW+d>E|3D98BL6+XOH3PGw`G z$H&?e!+-XK`4(k;?QYW7hYH^W!2M0n$kq!i)i9ZuJ+eVarL6UMg{HEwz2z7VAO(ae zK|$+`VZ{h$JwR3gBs93_+@9$%Bz$r7e+MK2k#L(|`PIqb535cZlOSPIFb@BNw^R3p z;SxoIcSO0tDS2yWdu-I7%0UtNh_vs)$o#+c!mCa-mwaIe^2V@4-lC38;RD!yPF zm>E$^f4kRdKqDuRX^?~i3|l#!z)$HYE!e|!Xcy?sKm0G-;_|oMztKfN8Hg{=xutJ@ zu7o<|r`p0HE;Y9X9T+r8F&!eh*j)0i(f2-0*V>bxf0PLO+c=SCq?&5M+68ni`Gddd z%JbmI$J2m+8+)bu{!-<~sZnDQ8~;AC8uv;G@n;Eq`a38!EVcO5xlaHHY~ds2eu4Bg z+*?Ds4}i4*_y5Z>^>490HD3w)uk(p&w1!5UU{j*{-E$zr$u6 zz3<$ot-rT#diVuq7Mg)z3#V)Qc6*BAs&n_C5G$7PCuYV3`_E$Ub9B*Bff<2I@%F$6 z@$n25O0XQ=$H({ufzTz!0_^m|Yk$hUCOD#p4Beone|nFFu`$i_*8l(3OX^(f-9W1C z;L1(t@9da>qIfcn-&-2AB*_h+t)2$?u@k`A+A|CMAY!;2z|cScPF@K3yQ)PDm(PyT zUX71BSrODtg<<}UBq5K@go83R)b@ydG{BX#Y&IJR;~N-Eo&y+PmFHok9GC;RgBr|% zB5*Ayh_g67Y5p6dWrjukF~AL@(oAlHPb!#iVG_miE+E_>Xny# z@Fi4UDvT5~@Fse7`>b{Z3C`dG``LXC0_^+Ccz^igWRQzQdU&iF^>>oB|GixRTj*nv zd-vzSwAYQfYP`%~V8L8HiLH>rPWi!;NCRK;q)$3%f!m)j=eqznlAm6j_jISn@Q2U; zjnlIf8G5kF6!q=R%4$!pfC&fLcVKY?+5y@>W&3Z}O$Sh34D)JCO#ZMd6hFWQCqXyR zB0?J-;5`!Tt=8dYlr^Q0(BvcMv|As?9N%9JR)+FEq!z;+5qn~Ad&KP${qFRlXomjI zwy6FedJR-<{R*H}1%ZddDh{USw}?ZUPt+Dj)nU z2=}j6BH(Si@jn2^Z^pXlVxPR+=|YSi?p@+Q)2(T30r2WCfB*kr^g^GQO!b#a zj9AwMG7v|t0T~y4WAZml)KQ>;n*3zn2^U0VsL)s_b2UNo-}O#C_1lCu1Nua$4qgVV zy0=}82-TmbeA9=gRBVH~O%nN(@OQ1q(BP`w?G)>KD9FIJLtO2^N2CBaI3oNcxN{a> z$0lkFx2Zf$ymVD zgP>~$1Z;2Y_MSbgToPmX2u>BU`wVnZx9q0Vf6aS~Ohp%^S+3N$EXL?QQh1tlQ@z`s zFkb#lTO&PziG|g!AlV&5#D0t&6A~2EMsdCC4S12xU#}@V4mX#_^h3hnq{w!74`{Rc z+Ne98+J2?$Wexf^Bkm)UGUR$6E0Exz%B#%+ataQJ0~IwK9bW^zV}7B``F~dUW&(g~ zxL5=(m?)V6t#@jWzhM9s^?b?97&nW>t3Dn90mfDeuM$S}_hpG5$3#lvoN4nYgzhho z$Bm8doo^GCk)Tf=pWw75%#_I+^-S?&5(uAEA1r}pbm5>;)f+xOzNN|8dWB>cqom6< zN$$3>V*R||4HFo?Ft|iWju>u7{0R45o}#2{#3t-nb`+YpTP%y0Shhw`U)gPPRLXKM zj+w)NV1nDv?BIBPl7gS-E|hi3h8c}t<@9?!%ti;I~P zPoA|!W+HB@skJZ2Z*KbsnTqx0y!iCM5bzaG`*O^I$QKS70=@!n1U^)xeL3vA+mvha zwg7a{fy-?HU(uhVA=;NC8y=4W_zK{USqFRtF~{-YVH^Lfi@OcV!im9Zk2ah%VFJt- zZQd7)_|S?E{So{afTF8$5#Q-iN85Q!O@YW${Ih{Ws>(B2V)c1{ zpheUMikYSF9A&>CKz{fg)fZ-vSXR~qxfLefy4U)Vy;e4U>4$g()~*WfMAD{cQiJ}& zCnK1|JGBSKc4x~s^n}{ZvQ2Ued!BmA+>)JEG*Safp%lMg-^$Y;@K{O zFD}>Ryu!yzjnOLgdp~P*KNANn2SJyhq6gqBJ|>9j{zu!EL=7;@nm*d}hM$3SI;bVx z5jdL52YwNNu&Sp}4Mfym8I=M-SWQ=`29+N$LH)&l?kCJ%a@1n<)BSKg8ngplq)}Kt z@N$;-twgW?d%F+uq$HqxEG8@R^`?RAD6_6k&^hm@>x%Ss+qqjpSd``mZMxWP$m_dGl2)!`Q)-}y|q@n1DQB-~aH z>_;DY5lXyxf3q|NRJkq=_3Kg6eJ06;r$>ce&u4%muLHtj5&bfv=9gx^iO|Jweg^l) zLDgStPT5&mAE{`&Sl)Y*tYBNNXKii0d)d<4aWtw!z2i13=?nT~g||o0;#)KS!`wsl zW1H)%ffffI;j?%L9Kpj-U?95&IjxW4+9+L4;sq*)mc ze3P|>fpl?k@iimk`T6r!X9B|TD?h3S;)BARW2e7dVWU#?~VazDdz~J+^Aj^~|$WWY4Uld3rxk9AU zYwtz}xV{;rS?M}q6Zq*WmO}9(3;yv#IXV!(&GR6R@}CgSZwfyMd|?yrW%(_TTkks9 zxiP71*ruSDON#5p`Wy;#3XavcYuks|@y2YPizKt?g{r_WE7&w0%SD@LQ!?ee$i3V~ zg@b3WaqKp8aAJ4RJsaMPBg|n@>bPCH?}OuB)<;utqYhk(z{^`MtI-D#%i)PUh;>r( zAHq2$bx+q9w!ipdQ^3z0UNtO(!v=s^YG1T}o4`@ae~6SoD*k`AW@^d;);=Praq?M+ zKh@6b8lC(hGXQ%Mh#j9m z^E202Dn06~j)ErS=l69IoE=^*_^-!}PXV*K%=!8S=*GuhT8<)`!L&bq-U2DLEd|1N zAUreKl**WWfjoc;{RizSQ??^O+%nAvT>iC~G$HzV_!sYNrZGCsj^@E|oQ0K^(jJyH z3rZI{>k^n9SAZrHc+;zB+ASZxpI=(T9M89SEpID2ZI(l$o5uA zw~x+2qn?Cdo>A=GrrSPRw@KK|wXU^r3sgst+XcL%`YaElh3NyRjTDv1o<70gXoiTFH4B#|MM3 z>y}o8-Hy+;e8%DvWKE1eXps|aZ~rux+CLSJfkf<)xzU{E7RGbI5w9k;{}}O-7F~n` zLgfe7I)I0c0U#HyJxk2`Ac5=G1h_V(>Oba{G+5w01~A3yhV?c_)`1!*|GVl{lrQr| z?|H1n^#KJss61+HUYjI?9O%D;a)HK#X=&u^=-P-G)Fz1nwgo{!cLCazfwxNUXHIKC z{{Wu5`m_9ALw2pEBK;eDh_YMX>1xc4u**`r{VQ0lDM5`R-i(2Q3}6SibsYria71Vauv;odkEQQ6sWO@of);QLG8Vedx{L%-Kv=HS zp;%7$rv=ipTIJsNb~+0Fe40l8ah*iBo3XQ(`B- zR_?4t%Ovcu^wa(R0<#q5KOiEpU{AAme0ibL47ww3loX{gq(_R0Qyh{ZX@3y!AS?R;DBag z>I$^yim{#nMwr^qT-IK|_c+F-4_dV)h?cut{DLI3a_tD>ajeLcRbMni5<0ne>TYRh z7YflW78S$-6zH^hbjNXW6bdTf_>)BYhy=OqfGY%}yv(c#G&IB?H9BnmM-|nRzfx=o zOYYADHC3rxZ4s|#Lb(d+XgB3#A_(N&^Akb9uEYhe4nGreg;=s5Z{3R87wPm;s&Q=u zO%nN68OMza8<=)YTJWL#t|M)`Z6af|qWX6P@z}sjw~8p#wgDXSfpl07$KfmZtp4#( zy!+$3a883yK2Sv5jfj_UP9uJLAAbS<2{e3+CI*2(WM~VR^d-QkYbaak++@uuuQBO* z>BGv}Bk~IAs?%-ZjoZ+=gv#aR7<^$s^a_D{Tjg1CpM%(o`Jg2b!0F<}4?e$oUic#R zEk_ds6cbfCL_p6?ZeS{`AFU-^`4T z(jX@lsy9OiY`|^qrJi+%%#apPIsE)28t2DoU$9^<$Dg%b1}uKp?>4RMcf0NUVCW1QPhNtVR9=tu(GscR-M#wEuU0#UT1w99`RG`UMW7zoDk zzx_t@H#i6gH#b~;aHlm75?EKrZ3{`YOh?QFP$=S z80D+aQeobakHmAOaQBcD=H4hoQu=x;BEEdoZdGC6w&TJ5h_O zSLa9F9n4E_kkG&_5371;YN|+h2@g3B^J$*_z*h4=&jk-7UxfP0=)F%W8LzUYThbHpQw5^%*gA4z-NTwi4s7WOi#m1KH;4u~X%`88#uSpWkGe8f?O@#@E<`GP@(2OC^uift`-pDTR@h!}k@P-p-Ghtc92Z7~$J*qHb> z3CSPrP2ex)gpc0_^2l=ldMm6qHiNeQzODITPTWfHQA&uXlg4HzIRv!{C6!H2+SwEr z&DU<45~dPo2|h5_;Eh$f|9OXuQY7`0wx$xs4khnY&DC7Kj{A59x@rW=G$IJZ_HXdx z?xL5l;w9MOl6gI}DxT(BvHSMz5pdn&gdtlDd zH#X?j|6WJv{xXF(>=XVxbpVoRb+GQM;JOJ*-|)rUwioI-g|96-#8V$Z>=m%$KHu$d zFj8Kod>86pofGi-1#&7I{I}7yHBG%0rxBZwAwW%KdsBMXN4>=-4S}OTvUD?4 z39N*TpC!>WxEpz2w+iVF9zhuNR63E+mTQ)kozS~~-N@jG> z^FxaiGgbIYzN)D^ZJ`&%gYR{UFFFT1@${7L8`dg zdOY_cHy>s#jpa?#(D4?0d7TNi$FD3+WQ-D4!r{=kPpO*5MoP?xsA{o+W^FHCAP}<` zKjroT(aQb-7z8UiV(O!R*PMoAbPEF8waqhkQ!+vNgrZ}EXMYic1MhOjM#Ewn|z>N zT2mO@XgWgR81aW%Mo7sQ1S$=K?f6s~tU#`z(Ray9!?bNpwIi`?sfP$rL3}4Ec{C=k9w^QC|`RH`%6OuR*Al7TT8})Ctc$`539y>%A-x1oOSQLG(g1)Zb4vTq6X5 zW2M4O4;@m%8qHLFWO4gZQ(P?~4|(KYq!wpab}G$0hVUfwTv?uYuIAtTu8>4BS!&^2 zY%O4Ah?z2!2#z&*cch9#$Vj}|*mC@mif*(echfN;^+BE^>Kl>`b5^RPdoi9mMyBH43zs%7KVfkknXqyNed*xbC^z{qmtif=bGH4wDM*;kR}>2g70lUgh6F*K01oY1` z)ps8l=rZR%X2%69xhq!_83c5&o<4s9;al&G6(R`e2_R5O{R!x11drwOc;pRI_sXXE z>TY(8NjKwBIL#IjvJ-UA_AVV92GaCw~;R}*V7rK zssw>-9kakbAgz9*s}Gfi&_EX{Zqhr`&u$tai;J}#9C&S0e6(cVFh2IO#CF~AA& zo;gPNgLr66acBc{lAMf5!;Wu%x$z&Vrz z5B0+!yb0P@Xg@27a~QWe8h$-9!e96Zvf14a%OZ^2gzZ<(d;=%7bubrm9Rdeea3xfV zx2;LSrrg%F;x^tbPAE!MW0TT7>-)5Dun?m*cvGoXd>WONcOkL_Zf>||bvgFcXsig= znQY0%{%F`7sZu4R+?W@;(wL8?`dxqQuedpie?6tpMf5!<7vwV-+nV%-P2ihFXPe6r zKhN>iH!KnTr^5T!3}Ca2%0KnEJ)Kn5Z{}5ccv#sCz*tBAB#6=dtrt6N! zw{{|xL%LUr2(3G=FwZP3z*iOl0WWG++zqG0q1f#E^tmzuap15};A_iJ|CZdk+sHm) zxkazM9jOT7oTBH_l?qsyg=vcCw{fnx?7F!y(Kb~|M1M85I;-;Jm4U}y<=-)oKmrNYo-!^|K`_`p&Tk9DH0Kyku5HqIc8N>PL3}|VHn(7X>}rG!;zyiP z_1%Y&o0@N*8&QJK?dXanMFpn_HUqVP<|D|P*obDiKc{fB#6v09L?m%U;yVP#hmNg? zP`ds%R#zpww&x8Q5WP9cRNJn#IHRRc?YqQFzT%6IqLD-R;OUKnoFviR3>ArS z(SU*F^!?jEbWLP2EJBB?Rrd1Zq8T_tEcYX@n7b99rfdb?kT zhni)^b52!BhQs#M4Qzu3x((~>&95arKU+WV_yX3V8WULAwA|0t9acs9ePDRm4Tjic z7ru*~6Zku2Bf+%NjKq6B@2F-Zp@~L(d=kWi)@UDt6$svw>KAAWlBm7RvMm0+UT0Fn zLLvGr3<9nz`F@v$jTEFIm~ZU1zy^o}!LN!9n{5Cb>B|0DPe)tmU%JU_d?=RgwMk1j9$5#j0DE9@n*Tfz*y2&p|Unn@ExjZ~nwh6YF30r4_lTbNwmoow_ zZctLg67Tl%V1x85nBnZ`&!Rb)B=|^dC`v$(CovegsTY+gn8VaG2I7oQ1<`!2z#X0&Z(RX+-VJ<1-e)~dzd4<~ z1|qE3!Ih?R>UjmNG9INLUUvVwaNemqiFY|ziE9ZYbkk|-t+h=B>i`_q-6LAowia3? zb0^(x^LVIg9O0`BgtLYqEU$XrH~IDRuG7(k>B(!7M~HoEst>M}euDf{<>z-&WXszH zXy3Os@!`WGNT5=`DcFCn2FQm9bhp5XDWXA|D?;V?2})L4v`mJrarhC7M1h+x+#s#O zxSMfX76FTJhNRhtvaQKAy7sY%KIr7-^4s2P6kLN9(WT{7ClIEM@Tt&!d9(Z58{;l; zSu6&-*$~~qXF2x?QXZSO%Rym}j-ZEZO+F#ZO;3zMcON4_(SzF4ocg^bigx#hBSz@= zzAR=TLLn;xy+7Z3fXboOjt|loTR}WA^ZzHq+fE5 zZ%ct8L$aGRy_~&D^<~G=TxcUL4k=yJ*w>%0Cp7tSE-jv5J;yge?uCVD%+Uxx@`4|ga5+`$YDuJh=g~@$<%>1yZH#_!2;n;LHm6gHop%z zcT8TtdyQQeh{~}bMws;d9m=+3NbOHs$LlQ}+dNYYjs?tz2p??jn7p^RdlTR0lTJS> zW1vt_V)kdohA#s+*ur#zaX_>g*Bz|kpc|yu^TIv&n0?uIjKqa`Uf7YP(*1SmdhUW;fTHiuq` z-E4r4gJvS%`e&o_$P~t66H;ME|LU>wL61UwlUny>)%W`3c}$7@QnDY6NzuOKYPLpw zCA$Pg3t9QHkWI74Y-nb5J0Q$ z8g{LXt50P$=xj|GGT~i3(@;S)_@zl#Gt`149bqv46%e!gG3I_9qihzLdyO5UKE9I8 zJo3nDs=ZC4_$FH$8_$FsJ%dX$Rs`jSP&$`0TWTZM+?Td25IEN54>S%ru}1P zp)&bKrO8oiBw?qP?6L%6+;wB+w8a+$wn9;=vA!O3zKq{#r8iMI5J}yS@@|H-m-gQt zY#HBtAFua_IqSW-G@|c2%~h#ubiZ7r_*5ZN`c7)|D@$Ex?e$UNvmN)L?K^RM5P(2Z2L^6vA?}B%%AIgPWm*mrv1uQPNQ#2#o>_D86vkXe3l#)L z*=nn|Kl*u-3YTb?Ki3C(PWH3G-ZfTDYvID+$wop@0_o&*Q zd6O|_@bnRtsd6f-EI7cm%alWrG|Gr1Q%ng1&8l{_y|CfuMd^?O60vRcLQKOY=RsIV z?I`wlvN&%TVclBP3dQw&p_Vh#gG!}_>m#SQllqh9Z@M%4(cG~l*0rJ;k~@Sf*g^>i z6cp~eqNOJN=|HIZ>)T~LCQ7R9%p< z%B|>8h?UxVFFt*$a)- zFi+yOZyIN4%ppW1N!53&5xLEV-j})zI+lWa%})|;%9sY_Qe1fQ6xU6&Pv8-ML{iHe z*8DLf9*7|PS4d`0)7Q617uP0BjZ@CfM;i=TKAhF5x)wX|b*Jxf7P>_IC_c}(_E1R| zr)rw=I5F=68V>yfAmI7v0eRd6v%OmJtsbcyet&XVdckFM+8f*7d~=d&(|uHB@|z)$C}W^OOR;2d{0Plgs-B4Py5yodHMOrI0Yr5+^!m% z{zs5`kfKNom)BTxCP+`jFhm)4XK^w&Ol;)M2IGIsFbGt={&b31`#_1nPKd25c$BQ! z=z;g((c|KZdL<&Pt1Fc)_pQvpNdI1sE8{Iqi+AKv)b55u57^(cZ|13Ei>9n?HiT9u z=XKLs@3HnY-RUAW%8CQmDvz@xUqFOe7`H1IsRVPCJno+>>D&~DZ%`P)G^zz3Hm`9{ zb8Y<;Py2+R1;Z@0TXVfIpxm9UyO?{FB)uw&nb*sqrP%C$a&G@8BPoFNomTgHG}i8T zco`<|NAh>IM`U-O?&;?b&q+wG`l1|6`Lm8G=Kt=9VsH#RvW^9DZl~)5`d5`&oga?f zmkTV{(PxL*!g?%zE-%euK^JQ4uFRKvKOqC3?N4UC8o(WXo<$}>`m#Nj9cmm+3(Hgf z(0&w#zhD?97Lez{X~KP6g4(fzd2wOk@UwdvTqEdE(zI$M%p^~JUpgwV(1xn;+0z(~B^Q&h0o8X}U?C`R({TGX1%3b~WUYro)9mxCTr0_gWQ0S75ml0k)QBj&=Mr&@w&AdkpnnVySv0N|%~ zoe{H61zA=0=rXI`FPl>RR;z<;Yp>DT%Vm}lqoz8egaup*W``bkURl4wVUd zJ|gj=_Z`c)8m)+H@kp)AI_+L{1Km0kx|rY|ugP!13IX22i>*xDXvY*~shWE%e*J6@ z&Rdzn{fU|2P?!*4DPHe0vkf zAA_q=04`eB;Rd&mw}J~f^R$Mplz(1R%;`9i{~pL_Xy!26A*L_be7iV@BZAEabHCQV zeJOSgB7X%zNee?d<4}V5B>F>9Xa$DJiy6}rDHb;o)vB^xq{ji*H(*Fht1=d4_>X9Z z$yj?x`QF<}AwsDnf&zOi-XavLZ3ntO?g=DicU67?5s_47LNs_mvb9~eC6z9`K3R5k zly%Xe;HGPm4d4OhRWdXV?M+_7JGpeUEGfEt?gZk@J}k{!D|=}Rw@L4203wtpV-LU3 z`Q-~1Zy4ysJ>8=iru+#Ao*cPfH*ess(ljmX`BAuMpS7pB+dW{Bj-|5+hyi%cJ4IRF zw;2DxjrG>ks`BYDXc@=2?Dw~vr~)Mh5Ls={mr`uXqR(zFs~=EShKz8 z_EN#^b3UKKu@vT8OfS^jH#_+t)-+r?e@yT~RT);DhRSh{b6 zDy)jfJutuc{rn=KR}{jX#9uMDK50~*dql&EiwU5v zZ#WTNU)GzkwAW79!?traH-wgcc2GmK4SJVJ6~ZOSy;5gGir#I>-yzw23^`Dhd&$9x zY4W;*wTD6?snI`fF#j&;R-|dGj-7HL z&MU_aISdXx+v`&{xAnGrlYHIB>5ryPi>0N#xuUXu%x3@W$&>n4HJSkybdwwNTTfM3W+k-Ju857g z_NYU8Td059kef0|;`Q{p8!RFjZOd%tN3ERRXRX$pO83MO(N{1pkXd7<@5_0mC)caL z61H8)^o``JLz_rym+EuMj6F*s}$vX9TXM+7ghAmUMOAdt-_$rhUqu108ZP z@+T$g2^FfXo(S(JI|@dY*TyKUziBD_bX667KEQ@7U`pGfGAU9z^Axh$MYh>f=If7+ zJ+I1cctSq?N3@q{zTFXmWQJ?fl(Zc)f^Eh_CdnR*x z`YKfk**^;BLg%d50mot$^{Uy%rrO!m__Lt_zdFZ0Cc!?fg$5-ps_-{W7VTQ+-PXAX ze(a&=YMokXigy)TSYB4keV*BuD<2}H9c6q`)I&oH2zACpPPZ3j@=;FSpB9C92Ec0S zm}E><5Jg;q5Yh%d_$i_V8mg=RbbZ&UGc(yQ@{RanHpV}6+lGuRBVEIxQ(+EC?`S12 zm|J{X&a5~lN?1szH<8%N2Zmh=?w+?(3g6t}3!rCl=1Rw*$rTW9TsU{(L{XMDPK>+z zGf1}AYRKAFIVb{F$hOL2Wvifj^hsJbn&q6T)X6CXu)=dzzE#gMJe3-1^{-aDSIDe` zy(PEeH2_up3h@U z1NJ0e(dirohI40pNVT07gq;m*Sn+O~P8RBKP$$9SSu=&7j*Rvr-9oH;8 zQ)WsWDYu!FNPl)C^|qWx8rr-*Dm=w>~G8o`f}`f_gjT0v92+8d8E^IGp3@& z-68h(^xf6kT+~M#^DM~({PRwDUHNW~M00#d4{mfsB-5IK1p{n5=BV3^9EkS@18Pw*8v&|zIiDKq2axQ1vxr>UydQ^PVb<$ zi~u9`zF*%o(Nc>a;*Y^IYvvZ2_yn2fOlQN2I_i!C_p&p0HJnFacRHF(*$X7lmp!)n zZ0m3_Vk;VJ_tMowG8!z2p;WPGB-rk@>FtJOk_3k$na9Rq~Sc zl+1t>+F4U7viIV#px1ozYYP4`E7f!LAE6vGi2?_LZZ2yYr|p6ssbTyz94t${RONuo zZrEeNx_qni0>G^|YB6#zQI#%t360H?D%W_B?KYHHlpf?~(UrN*7VAp_mSDl)4kh^M z2vP5oex=>z5R)oUKbBAti|x?YL-?5A52ttBS{vpaH?dR_DCI7mxis;WzMq#)B1B7C-P#!bY61>{pJ$gP^>7x#hfVgW%;tnLm%E(u^kTZxqSLG z6Rh2@p(CMke2t9h11i!UrsM6J9d6gyz2~+FB}KjDLqht>l947eXZeeKp9-8P`nh5& zZOe#6^rtFJcX|tJg9hBoFo4%B!(H3Ckt8`)to1SY zs#LIWZ;~0}9dE8&3msRwdJB2+M3I4_T!u<8pZ${g%c3*j(b9)hvEHMm7Nf3k@|x-T zNW@8<5kLj(ald~+_+8_vjOA(1`tu?&PS`p)`4mH2npC-dB;PgVAOm3)fyi58QF$X$ z*VOMv07C~q4OnFwmBB^Eb!#pxQPBWpjB+z3x~ORE1u4 zSY~uP=GlyybM`;)K(D5aU|sY5*|i4=Um)lO+^p$WIIAx=ThO~om3oa3pk{IQmqT9a zk7`DPh&hKDi((FR$tAMiiJdG{8LigiR0WcrCUppURW80UOskmuE<&|hHJ7Aa_D0UU zAEd3mD$54GBPdPHWWtUUh|p39I!8LoM1-&wuT7RBdu z?*kK>y=R~y*S6{S87n7aE!!(;`Ch5o)Ee2`xllD;;Q(28S#i)hu zC8ChXsp>~FmzvLh?Ed=OgL&Eza`RJL=$!OKdt2D+w_+alN;XF$o9yQRCkyN7OO;JxnW_`Y8N$Lw{jz1CS9 zG1{EN)%guF8kXVE?$n8j2MXomMX^~qyiFQeI*nKn3D@n+SMaVv0Wbx@*DKLQyrJ)K zUVfg+U#8S2LTRTpoyoAH{Ut$asmKs(xj}k8LbV-~{9(?$xGtjq5d_GSdR*)NACN`H zkMvWm5%?%+^sDfR+y;Qk%!(9*c$va3$n;f`H1jYYMi4mI~w(a{^mo$!4m2yadu?Ju3a%p z%TSR)KOG;HW1Z|}t2f8O)vBE7)TOXZRGjFLYJa)IDpJd?1srl$@CeUPJTza^I9S&`8WdLhb=$MtGUko7G(>j$Kl><&xuQ`;zq zhm~a%bnaQm;d&(R3jfrcuAQZ*Q1*NDY-%Poa%b%HE91YZ*EL}^wDM7q_$-3{-YJ=j#Ui1L%dg4m^$Y`EU~^+rUf zILnQyJKP`2Jhu2#M_u|!cqa-$(wgzjcBeDybWxDUzqYLxy~cs>W`^Eotvi0#IuZkj z?eDc1>a!t|ZcfuyhBn)%6#|EYeE=Gu<;=J#tntrxs5myEfHm-pAEfOHy-iFK_OQBQ zIt45CA>*%DWLA<}5O9`Za)iGiZLGzYM9Sc`dE6>gb18lHxe1zod%|01wkf5_c<9*B zf}|k>>cb^Rcsiou?t&XuYAJCu`zwsQjyun{V6~J7pO|stel6Nm5AvX)jE~lOA0oYY z^``T)Px*=zzw_~{DSr1h6KUZ|+c$;?M}C`e!?DAh4B&+9{4gB1tJ|esr_fe`HzHAeuM1zxrMJ z%fY8hkIyTm*(up+Or+LyFJ+{t`nGERL}u$XTp9Z->wEALVo$}jpQFE0mZC#}oNA+4 zr2)3rf1@HF#7Qy%QQiQdJ=5CB zcIEyuS(H&qQ>OoAnJfCMaaU!Cy5L}YL;r}Wh`o9e&@7dv4oWxtubGKIRt*lCO{svr zff;@XHxEezIZ;d1uP{E{`H$uZf8F0-nq@F@kl

    p)SaYG$#fxGaOmc$bd) zrJnUnB5OCuW0$zEzoxo_v+^Yk;{~$dAizpWsQAAth%5ZkWHn#T2;a1*@0>4JCX(v& zW4Ecu>)tZQ`e3E>hBm^|5=N!=vG^^HAq5eGqsD#T-U#EE;S1Ayj&iPGM7Z}p8Srr{ zJcW5>M`T5gq~6G*3th-dqn0^&kBEfSxL#J_admdu$F*M3g_`IeK=e>a=gRa6C>TIZ z6NaB`nuqAHTPq6+hF}pn$DQbCXaweWXA`^sMmXxY9vVV}LybZG8k=uQCELGv_?PsM zuAV+7OY>6;W5eXBjIE)Uxo|C)JH^aO4Y{%0mWy4gI>kMx6tXbgf6_9_ntHSI$=LUx z@c-y<^fbb<)Z+fa3YyoRnFJ{B>osV?@OYRr2Tq@64{~La%G%!9BzLHbmz#(`>;{P&OfE0^R#w zgr)#B(WLQ{qUvt)-1(7P6EGIKhbUViLTpDvBmXLuB(xW8|KwiX58ZO8gWWsK1s~P^ zN|t8vO}9778D%?Kk^1XHR8Uos>^$_8*|Cs!f*3_Er?ija`6oaK4{_j=uziRv(4uy0 zMa#1pUHRl%$D46eBVgW0fbR85j0}LUIjZgNV-UIa2;J*f_#t%{9W-MuJSgK5D`^6c zUJk1|&{y(P$O@$Gz3EYR!BLt}u0h*s(9P9mf~=SneoqsyM`%LRsq{U$t+wA(Z%k-5 zN>X&|wjL`GeL3a}^qHeB6p_U!J3OTT}i`x%GLOF_qDY!oWkGjO|O9Wh27aXe9{(c672jYpC>2%H?t zQE3Od;X$9mD=$(qNonrMYt46+I@(YGHLFdvR%p?u_KU#2 zp}btMO2;<%k0li7t_8X0qhlsx>0p~9M*~*US{Or|UkJm{x6`^To)3{au>!)1^kDCQ zEtBctsH9F4go9lB`4>3Jy-w^AHkLiMdiV}qlXJ1YVQ0ZKB+i=v4Bw~=)h#5%py;Qg z_?ep>Q2~w4Va6`8tp6FhNL-UKhy25rBlnNPHD``FzuoiqIUCOiCkrw@4&j4~i21KL zS3lmuStm;ecyRlY1A?8GdZ5O;L4-=zF@x8(;63U$2|q-t5FaDVrl&UDP%>$RKhT4Hmg_ zgpR2xi)AtGsnwC(&c_En@_NS^@C(>N^#XEufh;R2qH-j1s()?q zCQ3=~QCjf+DF6li5#_ka3D$Qc`^s`0`%m%CALdnRE=zOCS!AA97SrIV`3u*l&iq%I zDwe4N_qgarwC9pbnT6sDsdTkuZLs2k)%t!L;$b-(t+vLV+UUuEV%e+oiW2l zV*LP(oA(P?f>p1T)`*!1voBZ@DPh=iwzFHGH-X2Rm&bD5k;m(L4CVu|P1em6J=}jR zSK1F)ANQn*9>IZw?|9|A3P|Hg_3M!FA7^Vbe%F3e8VYvGYoZKOVoyh4R(tk*G2fV* zX&sBdUtv#Ty}t`({`=03jH;DoQ8=MnkXZT2QZ$cslO)=SJbpV{Q-}P)(vAQf$vVn9 zm*8NUS^BS3YR=1`>+4a9N3a*0-M_(swuYLJT|mB*J)@N}0gOE5QcIm#UrY7u6DZ!C zMrf>GfOsB^5)>IzX7rpgJ4PZaNJxb3-BEcScs_-eNp0tZ$U%3kcryuNMD#GjPCUzyM~<&lEfFnA@e*cS zT^_c<&plElI|HErh4wA|qgT|U`7qlG*l{%W3%Pe|o&Awix4L^E5FcmR^%A$<#^xes zoSvs`99&tuaQ%e^bHPs*JcNjhUC}HU(NBYGf$C*fliB&{Zffr|bazKvj0E&y1a#~v z0vSGY`=p%h_YCZOC!!Sq8alw=1ud$2rvOulKTC%w%$*&>}SQr-v6R&*!oC z`xyAPz`DoT7?Qro{-{tc{CGyE0=Ie;J{km+JAxj~+I61(a#S76gE$F0VLjj6ct&cSkA=UIK30F0~dv^sZUZdCIil z%-k-!)w1->GX83D+}jzS_*HZeb^=*H_(>7D_|hWlc+1(1?Ur$u4kxWTJN=TQ3++6s znzB~SNa1P?(GOyIaBJr~piP@4s9a8YL#w`ZZQr*pK1^crY#(8Gw7sgQRyRotuPoeq z;H1ovQs4#TkK0M$k5j$gqk&cOyDlefW9*;xM%l)HSKZB)S}=qolOjI3Lib8hs$JuW z-BNZi@qKxJad|?y8dSt`{Ece9UHSEHP^6i`20h;62NB0s{CtWb;$L4$au5w+O<`V% z?{}PNqdHnN-g5%rf$4(AA*Xfe3W632ld)^shCGBH2sC9i3vB$O#(#Ypp9bnM{@MbY zq{r$~2YCE<3CMYo$T(2Jq3~AK{Dk7iL?MK);1pJ!fFk_RccMy^sluj=xUb4LJQ6&R z5Fp{;H_N^LctHf=n>}8Aq(SFl*RNVdo{KqWx>Xz!|4!9hGjhCq`|g(e0PYP^5WMx_ zLykZyeb3fleo}Q!ZWI&-`*7qq^TW#y?vu1|B%B~(@we` zv6vT<6+Vm`#!j(KYj>!?=2FXx47AHK7%BrUI~trztDwn^1a5;ZTNmr~RvC+wnV~i- zO>D@d)=}CslT*j8NESao^+|XWLNfcsaiv7c(ny?g*78Q}n{?N-yIC+QTS3tZ1 zI3fsrzj*R3f35rRbZUxBs%}JMvl#f*^1ACF@Reof$3_1z6oQNQRCaBHzmJwcq@1qQ zDtk8A+{SQb56UY7 z%~bpKH0C8xDAmHvVn{t*w5=<0L-(hjtt56A-6^mHT;6u_CH^(!FEPWz2V;)F00$c! zz&k51zqLorwrZMtJ9S-QCD+TE&FM9RaMHU_22OBZ`O= zZ@3cso(1Oboje7u@upG?)jZ(cVOczwu{K{WORu?)SR0cLPyX(F@{o=~O3bDg*Nkl& z?V4?Bo(SL&W)bAi(T_=}Jeymi2&#}yPWJQ}fT7c>eC&`u-r-KsuZlBx%C1@0KMH~2 z@fh$q&adQ&t1x-uFL1G&Jy<5yN*<|1E%Jv;nh zB?&9AQ8vr&brqJ8nO>zWuRO~-mT?S`I8__w9aHX3>oqGW?~y?XhFxZ*8pD#(pG5k# zyQ*L>CQ;=RTx(3H^GSAT#gDTK*RgMFFLK|W*RD{vvx3bZN1q|N2c{nuiB|gP5mSwz zQl7GT-AG6mJtzP|RJfVw`EZP)(da4hMLiw{R$1rbkVUy0pWs~9*-5pltU6PT(aa2g zs*M>v<+r!0(fS9k5^^G=s8^s_WydBW7r7~C{}UH_%EgyJu$0-zx2c=_;6{+@ zFZxE|OrxI8tdi(^vT~_$kz}aA1A{Qcft3}+prB^kK?JIByN8^`#7zoQyDQE5U%ANBVzQs|9^Jcqkf3CLfu_mmaAyAx3(#u{ma!qQ2-gpPssBsQ?$S&4J(8Xs>$K^Kh7j}GtR%DZy z(_Qe5p{^B+YiK}G2G)X*#JD}Fw)|7IlS2uiY?{#7^V9oQ-uL4^ReKy zGhsglo?n63R_Vd>&~VE25W(M|4_G`8ylPtjBs^AE0xf6N#*}-NOHNLJhr8sNIlliX z&W|H+HX$|jEJcxV2nRnb=6c`?E_4v-=*ZP%G-^8!l@vXQ=HN{lk(DUvI>_9)%=)&t zh-0B9SMa_L9i|&M>y+M=Pf15SwR|bnxF3YGcon0eS_J3OiHqbFC5h$O ztZ9-C#wqNHTQqNjKf<&wWsylS1=Ne}-h=?0MM0*?$8*g$%eGEIHs&k(Wi}+dN3X#+ zU=nvU7xo|FDMS%lIYBC!9AJXoj)}1pzq{gq z4F}4S-8_N!e!UCa&yoBd57CX2j}*jjE6_+x*Fb!`Xc$Jlnf0G*a}@Rd!z9BA#-gAUvXQx6(K< z)owp~F&fm3sMm%OqjO(V$iMvgbp8vOi%E`znzJ(CTQPj2KnHZEwi!kFe;PURB;28@yjDzv3cm<7l)EUJ$q z<7(ca2`b}NQ2Uk8;t>CPOhDy{mO|ywJT-{EarH-uaw4vK^4qH*M?J$v$7aw%%f`hR zHdRkw@Tq+Q@gUm;rtm%{j9mQUa5=SnX4hN(ZsaFcA>0xaD-Xfd`y? z!>}o*AIG3J-ah*?I|(bTjy4t2^YyD<8^3vYfGtF-dM^&7+k@lIhlJIMtRAIWfZq1z znq^LrP6slYcFo$H30Ea*s4`bw{1hajQG}xK80kqL!|wM4?+W&rxjC>=#kEkg)|gZ) zYk8@8pt>V;p{%_>#MO@bm$(ZUnJUmS#{@Ggb=E)qafYE=rKPIYdW;9#On5GgtbQ1OeC`x-`j>EAj(5r7?LVIe)`Y6=ytwV)PuBs#-UD8g zbB+Bsms_)3PDa4nZFqNpu^_ynjSpx*v~%iw}>?? zP_cZu%QNlop=wVm?>p~m&z(2Y5$&M$DZV$}=$R14=}mlg3I0q1pVa3$Uc}7k+-{JO0c1#rQt9Pnxp5Z=tg< z`nY1AdJmfl-?RkC97(B^fC;A%VH14^-*RYP5)90pQe zW^nVM;kLFVoL1m+|1=UkQ>8DIFZti8*CTJEQYi7t5Aj65^J=}8hMG;dqlX4jkN*g3uhebwObCt;?Q3N*T1x_AnrJ>TH2?o*O6~NL)tRbycMsW z@lWDg0~F*P-6t%;$Mq67ZsQ|_D&-KH!Q7?)s2D@8px$3Yn*~+MOI(}rTkrN!e49+x zeDjD(iz*(@|9=)hNr#|~xio1Gj7~Fj_bDQY>j1NEbg#4IeelDA&vtc^UoTGN>JHJ( z5I}X_$|%pab)9KvKoBuo5jsilgxhh~``qWNU1Icq^22<4Sue04)qGTrJz97}Qj};> zbm0vjni3&9hY<6uP&?@eAAYX3!WR}-mz{dAbTi+n(ktlgts1MB;9ev8Z{`**bq`E;hO=FuF|_Z~_fOugosRFY&eD^v4wdvBLs_W4?wiY(bJx(?ra|Cq6e z9F^6jop3|m_YW|L2u-I~(#nVfn|4if5+W6`u#6{a2ZrUacJfvLnUVMvb%mkaX-MUw)JFfrIH&B2zd4fvF@$&#uC*IcYlCuolm&~a% zR5A}@H1H3cJ49dL>{U~k*%w*KTsWqgQ6Us~c`tnCDbH662Sg@qVyD_>3Wao{%9y|Q zXE^0tOVSk*Ua6-P%qGPD+L4n@04FTvfEd@qXytb|277L&uJ22{I`MR`)|jcZuMA+xmZqzi?nj0Cy9%=t!n zyp&qiJdt(U>u#iQ+3Lx>km`q{W`9DA=l(j;KVnkM3yZ-7yhGVYm!J>5Lnbvi`$SgT zffa^55 zT{m&D^sEXHPBhzTQ^7tx*T+?tgr9FSJ|f=URHbXG$#X2`olt+Lgz#i>h}7)hQ5c>J z<8)_#jLDxWl(fBJ+wS4?=fzSr`-_Vv2KmxgGoTc4XneW~+qD`~LFcj3HC?vKU`9`p zg4%-)3O`bodU5EH|J?!285d!Di=O8zaSn-(P0bd&R-|YVD`8Q^0W!ZI+$*ZwK zz$OXRV-thhNtqxq9|-)BL?=~Gj5;FG+VYk1ED-AvL^TEI*|={2nOTaCh$(Q)uh|2=j}c8XNg4IpP&lCRaUxV950t7>J`lFDOZ<4_l|-07^-I!Xon z_P@bafQ9h=MJ6N4vAjKV0I-2&^E)4su@@R_W_poOVW!jP9XxWk89J|ghV@OXSqN^y zaB=h;d7UOvMt%zKo}@JS5(P;5c7D^?cG_$9fZ_(leA!-l&;wCgs~9=Yn&Fgfl~ zC7|wX15F=8LeWLf6vkFHjPStrb5D_4cIKIF_}+SZ->R(^j`ZBl1Vbg7)iPBse}|TP zDqxj}HRr38wUE|D-!`=6aRqEKPyWzqk#HIBxkEhOrVMG`3IHwT_zRq;PcBGVsp3!Rv>O6&-=8;ry=0m#3zRNrHOb!?7@@w@ep0ll)1&9% zFqy4Rs)#is1!3@D47w2a)7*LW^85fN_WDIGCa-p5)rU{SL0!`Qn_Ub~!c`IJ{ICNJ z=aww1j@dC~Q;J7K5RNFfZiDg;{%=4H$9i{{ zV}XsyN(wB^KNtd=$vcAV8h8K#QG{29cDxV(p10peU>U&kL$O0`Ee{}{i!2{Xisaj) z3-Sv(wGmP2$l(o6DJ<%$48jx4-%pEahxCGWGI2Iz@n-M^0o$jI$0Z|ugB|(uxuPWA zX>rGv3jz6~27v`MPzp-iZIV{RbsMYOu6LWl%#u}^T%lvABNflVq;LhP)st?R$T1Vx zdAs))*cS^*xKP$G9_}`RI&EuhjNPJpIlY7PMzXpdVRr((rCQF?vYs--fTZDop zJUrO7bxuo@dx4hkr#!xF&NkK|*F-BOagXIoAVx#Z4(ULq6O-jzdtWl?or_Qwry2*y zi>L%--}ymav*Di(uthh|VvA+Xw;>JlgP2B;;lhO?F*-4Hn`WsNov|cU?fvbP+Gu|= zk(Md>6i4aepTWo55oxq6m9Gn)`$iF~y5E1*xvpMP`0jEsU*hq=1mP~b-3PYKe=Rey zkQohQI<2TaFXc$O=fUz{9!!|mal~;7Xqt;kp$7E`hQHBZ^^QvtywrXnwiRcMK9b7M z4Q>sSLH&u(JKEzH`VyTQ%bfe>THlEW-*dRi?jO%pk(Y~BAI9Z+{@UEwNh=VHLAg@D zp_${lK2>aL8wNAqFCi|cG6gMgfy7QCHZM$JAud|?&ged~*MB%oFC&_B$mXS;h5jwu zPw8uzsd@kKeU* z*(ibf!zfhp?Rv!rLG#|Rhk?d5=DtdcX1B)-sg^*(V|R}vT(LuUzO+c?T?ry~1GC}E zm}g`0TPElF!J)$_b(<+5E5N3N9{&aI8wv}xUw$94#8Z2+31W29XlV?B_32buq#22X zTleH}xq69?E+fGMgaGbY()qgYdWVX5 z4=N5C_vF}&4rkQI>}6DA^~ei~r|C?!y%I8VWG(b9Jm%?Ioe~m5}gnu8l-oO zoodBfHX-V(G;GWfu49{tc%9#RIVp*V>~M3^QTl9M;~7hd3S|U+=%hAbm5s`!{0-Bu>tQHnUMToneYuXKJRg6OEV5g%`$jd z5iwP>;hC0z&VIS&?fKxjWnUX$scQsEH!Z7et8SF0+?`)%r#Nil{_h|+cnFnL9KCMK zurW|yMVfV-&yKr{BVco10fm@=viAkx+;A|@m`tYPqH+|%{gkgu(YI5AAR}@}aak2+ zB&qvYu6;f${&eNLtehFOm&DvoGV6oO+ICzqTVeQtLFu{vF%04k$XRAbm`BH6J2gG7 z8#dnrbKgrR?tAK4+9MqYQHa)bt53_YY7+>?UpM;>C)wJ5NDwinas*E+#KjxAy zE#Iy;D@5M6ZMo2lo~em!Zvci@AkIik;yro@F3(|i!Y|qmzcm5BK3g5o85!<*etA|W^=iQ!~9bOXgq4!Glz>`fl4O zI(G&|RloV5_Q$%=!FGcCL)y55>d_!};9Z|T=57dv#R{9PsZ(psWs{|2*%H*3jD8Pn z2Wc_--g~Gi?J?=fsO1L2J0e^5FS|?XRmGS?^~&?ZcBkdWZ#8JwG^Vps+t! zGM=F<{2nsK`0`-9lNTn=co7*L;JSBu6a36<}3vuNl~ol?XOYYg5XS= z)RC216+fP^t?Jlm!4Rb@OyqDDm}G)9?rq97uc&Y(UQb@scHenSsDl=0!NLtC;m1s= zNdu({l25BLwxwDVNxrEhoCgp$zbrXQcNC94)Pufti{B1NG2Gl1ohpl!>J9by_QI~E zL%X^e&yxpBz;u&scURqq06`hT0YnFd)CvR+PTPp(;65cTvyA^1w*_=5&NIh?6S>$S zXsG8KThBx@55;-qF6ic4saSmS6>65se-w<{f;VReJJe~lsREX! zI;05)=Va-{UrKz~PPGR$6dsOsXSzqM$=jd(6`41%cvhXK)|#pZv~U@fYgaV}YDllA zA13PxE?C%YsL?!(%f9{2ao<3Tx-f6J=^v*R+cz2&( zyB=yqqXn`gFbsuoy>GRii&Xwf>ce*f{0#h_O+fM-RFw!xZMov=<42Y-wdL3oS{;%X zT$CE*6>wk4_)`mYS3t&ato8hB|MEY$bzT2j!(Zj|jpUJ}Y3`twI`hXee=^iw&%A0* zu`NswlRJLtkTDaG`8ta@l0W<38HO?MxluBa_1$m3Q&G(h=2O(1cO;et3H57%GA;1V z6Dl80ew$!o=dERfU9|hhrEZ07R`w+fi5>H&9hK@+5v!2*VY@6VWQz4SXq%kx;lAJf z1NbTb0;}&OxH`VNK@GhOn82%S)-Damp3dDIWFg1oa7bt`UvezbprWrb--CaqgwDf^ zXTUK^zx(>xT8fkNBZDdW68S%xTqt+}om4Q$DfuAAz<75|_qhMJ^-*j!yDvPnS}%62 zvfS!GmubFVlBc_?N4Dnci*ZyM6utduRDmdx8W<44M=~Sqf=e47^Y?86j7XnytjqDP z)Ugj~%hW3yOu-L0ly;BtaM(E(0Oxb#HKu6BM|Uf}+DvFFX`2&Y(_=K5O5}=#y}dy|lVA2evZFwDzXeFToxIv4JOs%P&|% zh!n0Bd(AzmD22-0K&=i2O@x4&mb$S;BX#l*`SYaiVW3T?;RR)Eo=vC|R=?Za|E$HH zxGos`V`Vh}tnI*77&~LR`WvNDijId32bv(1kwuW}*`ghauS z#g>+qV29x0LTx%Seew`j3=bdZ2>q6rH@N1_gV2i|_I;W$j8xX=PFV#NNaZu=Lm?->Bp-u|50;$6+= z{0`z7s?hL4Db|muu&}_XWNmM|#a}wrx>Di5*`yW&W!|-`s<~AC7OpiULZMob_`zzM z)}ryL&+pqj{%)p|wmc-NI(Dt@xo|1HEOCCRwa_KizG-rypR1&{rU zOi7wbw;Vf!gA<&WMW@v@GS8Fs`#auN>s5%kCz^cb?MkZf@|q*(sGr zt(jjY9r_;W3*sd*l=_%8S|qh444y35l-ZpIw6AZ{gD*{WKU3Fntl38<9SKEvTPnq> zbF?*H&ic5R7=0u64|+<1Td?TsKY}Ur9aPkBxNxU(Px{wpLOqE8? zz|}73R8P@xHcEJLVboE@>_&0(f7Ofj33USywF5DT|K$Co7*n(=ySl@1Und1fKQrm1 zb)m{J2n*lf-!R3bcdVZjtzAn@27c{&lx1Ov^6sJ_1rm2h@ z1&011FInDD-T%z}Yl=jwjhBpfU~wH&8L@aE_C0FS9^{Tk+yJ;UA z*U%c>VWnwQIOKT1|2GovLiII-^d6;x6}|9_Un?>Awf3SrQ-!toE*6JpA)1AnPGtH+ zF~2*UrHseYs8n-#zhLk)m4$p0Uw{dUsqv)b2Nn98Ux>Hf6?TGl+f>VYvc6J-dc>R? zhK2z*{nGq1tvcm zE;e0)asT$-A6M)Lb`g;5mwXi*5T6BCzrd<^@vF=E$MQ*;3FU)O1VL65MkkWdFQFn0 zBI}A+TTX7;2`TAkITIlyDt7;G_?$4d2w z(qj3pGuwVhv613Pz?1;qohgZn&?1 z%TsT=yc#JtZ4}ej)b|-gQAT8)vDUnh1EVF*v9mI@qM4@d2CNdbx-M4iI$1o^cR#Si z1XeEET~|+gbyCy#pJ^OU-~l$$Sd)XucFcV4o5MaS63!D8W#nQ=ZyO;?RDyqXOv^&a zFO|lgZt7#LD5z@SqHep*bWEWBWbVs7S@_{Oim7rYfh97kcJl>nbP>ND)5e6H|9Rf5 zAawl|EuYO>FT1Ifk)9KeG^!3My?kzyLDdyo_!J~7`d*d1@HJiX1#+m(&xKngiZns7 zHWNP#laB6l-eh$-Lt;Pw9gE!39H$06p6`B9p#;>gy^z+yCC+M$&*EccX$7p~cduQc zp<5H=j0{9&0516*J${SAe7R(*P7&MXXKnrtWki0JQ`Ei^nyFc_De9@};&DpDx=`wz7pD zRz2TOAlV4+G>URA1PywaA9LIQ?+*j3Cb>yJ>f_pfrBX!K z%#%Y9L|_SM)_gc$_Sm;&K_+b^h2B%cg!pg`GV_*Ei^*G|sHC+@-Au{?eoUHp#dpyR z+cCW}LIY$$d#CO~wG<3UawtPvmO_e(V|D-N4Y5TeDJ zE2|66f2mDVm@C?T$_{EB4cU6gz>uk#7rFiD)xR5WJe|dhI=;5t{v+cP&o(|xMnnRD z$K;Jq(0YjJnJJ4K0dbv<=+4K<{=uaNcnzChyB&Ez^aFnf6RR@0pmVQIzC4%oSj#FC z>hI9Y2y=yJLs#e^gj_sH0BYL5Rg-J@BG;rPo%pviQYJc2j<7*R{rUG`Ll{T}*d7^)%1`iM}U(0rS|UCYQx(d=9-uGbCvstzODf zK8M%lsOAF4h+y>0zxfB-ApCN=qbq$??nIC^b5>VQN{jPj*L{emPWn#?jIDqs%kw)0 z4g+yrsg$yACa8tAP&p1bD?0R=5&NC0YZThS_~o=RN5R-rho3`2(=FF;4=Y2)duma~ zRlDVMyuM4OYEXEppcxV=Wc59JlkX+5SHrp=cnS49@XOo{L?#C>)iTj)_1)er`JL;J zq&^-{tQcj-x6nN*+l71c+0Dzxmz(LRemAFUwuTtA9+-8(_ zV3z8byc^cfCtCZ*se7(yp@A8*neo5zl${?ZnSk8s6aC1f>mwVeZaj0>yS4gb zSBL3*xOZ+%CL7>I6`gRKJeJ<0^62*eg+lC88E#5Pbnx0D6$tNcCb{}IU8tAZ+c^Q| zMHmum3~kkGf@8Gc-}D7O(%fsEa22LKiu@qV>8b*0n8#L8_$rVNlFxn&sk;supOSjtZ; z5!nELo%8i+gA-n7d2{;C~uL7_SyQ|CI@+{guH-jP(3nk0J$dnsksSH;H;Gtp94D$VqBA! zAC4zbH=)DG-pV6-#gVG}r}}2HcOsX^@th1y6k3Ps=;_PGgmWid_2C6j5ys(h1trTG z=KTwN=;O#v#bT;@e0LCem7_NMP_F%wRKZu&GO|=pAS0iagJ0~wsh+;yQdXa?KZlD+ z`5y->!JGM=H%0YQf%)EeZ^Y+t_2?SUbiCTD5|)^-5mtM7MJQ)r8X25;;*4w|`s0a( zsC_UU@yLpjSy?`I6TUi@s5%Gbj3TO|kto}5z-Q1*N`Ibq2$~vQY_@}x#Kk@SY+kV$ zU1BSU-DqRY^%VBl_}6ch&@x&Dl76TWy1LBJV)Zvo7d3m_NIqXGa2YGJ0W!rpvSo!Y z=I*h!1+N%x+zic1Si2yaWOZ$mNPUIDx^v#0*P7#x2=t7duU!*J#T@ZD#1Nl`jXAxP z5>$&XO__vT>09iF_|0h+BfqzVTyv2+O)7DTkFOTwm{o|JKwS$7(^RJ{3Gm7!?ycuo z*O*{;W3nC8AAq&<%Y~ax^f>+<0q~h9|B-0K06r5pSOq!0ie?*E_)I5lfh-BSV{i> z9XR7)pPh0_i}FSntclw1(161|J#-<^8!Xh?ngD3;Ln6Tgm~>c}X>Q z(vX-PqZ+3PFB|;9kEU}em5yp(4I(Bbb}aBSVbuio<2P19Hm9oXtfxA*-Wj(@oVf)% z0g_vl+_zfeWrBmuqYt7FTDy`{X8P4n_%oXBJp6hV{~21+G#h(V4QI)P?`!+BQN8jP zFG>H&wbDu+pse$m98L}QN2xArSfukx?_g=@>CImfKC?BilKK(X4$lFS!UAS`?_fL!_M>~1YMAS2-zzin>DlWtbr z>`?7*#GaMCnGp6l*?o)E@)^kLKNr{vKBP&V&`M3F2oa+p%w(-q7LZBZrSB>j6EQ+t zCew5LN!2ahO@6iE2H?fa{4w}@oS!@Fj~tg5z7C@YVAWw7VZT~VM@My9tb8E0-naE5N{nXnFrL{bv)g5jiCR6vM${(9=Scjp zX{RvRQx}NocFMmVu(wETIDJ_#=<&_Nn8K=+t4&d4U-KvifqB5FP@6y0rfx0Z7NTU( z7%(F}^83fz!yV}q?>U7@>grit>yWmm5{-YtD&K3fsu;@jD%QztGz(Z34s&6F*9G4F z!A}sk$;j(GaMIf5==<$lEy5pYuaaf3%WydjUV*d>L>o-UAM<^(Irg3GxYDKvOxZBe8$#6H_!1l79?m zsRRs|*3>vdb1-GMgP`Rh9ar{7dU_)6T@JN-AN>>lqlz^3(p4nwFw>P&&Eyf5+zt;) zg6qiMjAW%>Ft|GL%uA3OvQ(`{?7fH&!s`-*x|!~Pc~xXRNE^Avl1Mc-FHVX1!V(~W z1m0xw5bH4KC3zvA7gHR;MRNC&kt_zQekE7oU)PwdJ&q(f+~P1}El%?+Y0yU)T8sDk zfsYqmUPyjDqu$TA=K-&xzx|`@Nwso~oA8}KDMW1jXjcC=_DZ#14(;ZHsA2O3h04l1 zDrEN8edU}=73Ti4Ln@<}o854IPH2wMfut^qk)`mIwe%n-q{ebn2F-S=}+OdfQ+o6~q`Wm(pQ)qKK5}vZRpVxKRZU zXvtKO1N;!g2k8&pcW>RPaJ?j z@2sz^y^kH{*j_s#0elFRf%+1sKx{{-Y5@$F=d+dy=Qgd1UvEP)fW|Q%VDCKvS*HD% zY^bbn9Q>VR&}Y(X0!01f#xrC16dBe3NRapoy9Rd+DwnS;zCfq$Xy-wH68NKHurHPw zgQ_K9EJ~ z#2;n%viT_zA&ao`(%ey8vV$H}+0G;@rf*g&ju-T4u}B$gq8yQtpWsSo5{RJ_eSTvx zb(vVB_ZPdt5u`iK3^kbyyi)eDQ>$6Vd*lfHMQ|i%`ev)-We(L+czSi7*lR+Iz~IkI zOLMSTEyf2KTsITBgE;Ggd@qaxb0+QskWrAXz{cc{*O!6E zhqZgsn=c3W787k2W7csFq_rrPGJlkq+}m*<n2?7)PJy#6ZI>I{DO8g_k(;X-@z*1hbaN6hlf$JfK$TpR@<@&CIL9E2WU-~Wqt z9NcC|a^v>-H^6m;7lDL@OFI$29SI!?j(oPJ$JQMD3d*$8_LKU_w1f5@G>udt@#0?= zM_KEJzVi@}jY0PO7jJi#M9%S@5SDTm_6}(84y2c3*>@*?<(rjT?kGBG z0;~4sV89)pc|;%GaK@$ee-?jx6($EB3NLBBGf8NlIwnEHJ;Jh4dBXmeOb#Q~T^b)V+%#4h#658Rr1?Y6$SVHuz@w_T`V z`#J}plKDRXenEl0qH`LTXv#mx3j&REW!~H(8hw&ABg(o`qdwFH-jSr7DO4$*0)ufDjU2R~pk)CXPn`ZAJIKF1YK^Ke z=!B9WDFXUHqYmzv08;^u616WnF{V<8@2W^@I-F!7j9UAt6`<0JaB+_Oz&D@nxbV&N z)3RV|-z>Y04NiHLm7Z1sjhK9Li!5l30~WMiwZ8qbslujprR{~2VXarDQw`r3H|eR0 z8S_pGTZupWRwZ%r9+q{&I@aF=Iv}%P8KUxptI60>_oVfob!SIuSz+<;$hOzJZFe}! z&Dbh>cP^Xz3F7u{Dsf2K_hZB#VzJ4x~1fB5es zF!JUb-eaarx)s3(IaPGyz(yZv1iiR57yzwrQ8Os<9C&E?;ERnmVo zA8tj4AKNO%@cTFXMpWnJ=2eu1kSPi@-m>OnC2~b&P8?s9uC#Yphwne~*$8wyZ;*j?PlmFsQE;c8l^PYniE*^yv2w*~`4JAP!62JJx+(qt*0I zIlwuv4{l)W#Nw2{V5XVI0gY@w0F!w6&Ge~HjdnVvwaw$FzF~Il|A=iP1LF#JS+#2oI{wSVJ)PNoN0IDX3aGGA21ctw2% zps}bm*CuoiHJ~w0*-5KP6>gj)LP7cQrr%4928%`reN2LQmf5mJe_(d+|EP-t0f}=P z)p5{*P{hIE+(MFM(V<`$(ouy$CzJ{d_)0AYDa1$F7#2LD0>&~UjPin@1pGA}PO{)% zWT26^-(ix9Nsl_^eNll%txJ|It$k>HGd!Q19Qb_;?E+i8Mp=sqPKMde85d7&W9nba zJ8421%)6HK7&oqOQSoD#w5M8`;X;Q>&b>-YH_JEFxql9tPG(sTyyX;=Js!^hpmEjG z*EwrO2wgA{eO$;OiupY4eG6^p;ngtv7epv)!$le%sXk0owe2nx)K|FO}M) z{;PaJ+BpA>3lNo_)98bi0Ak@5S!;d(jd03u{KhsY$lA@@-{h2n4tp!>lb`T;YxVFK zk+p#;R)~|u(Hak7RcVD~55N-~NRKDWiuOFNbFl*@E?nFaUFZVKp=AqRoSLQb(67(Z z8#OCCuTgI+j_Y4g_Hia6&Oe6rr@FYq@%deML*P;XoNpE92mtNh<71b$TM*|D zT<}_f2jL)SPp1Z8JQNA`GK#PU(gDY{*x!Jw-%AVZTtO{ zK?@8B_hk+|QHMAK z5B#BXH(%Rv7>j6d$!c4hozT4;`x; z0W_kRoQvU|b*}(6=0v)+o#%r3^Gfiy(*A`!F(*29;%iYKi)+&er*7B&4@UqVfkv0V zl?7;*a9D`KoJm1O_^Yhcxq>bVK`udu#*t-uUxJP~*iFFpxp3}pnh3RhD7siMchkAotuUi`REx%JC#)lx4Mm^$e;bLRYy zqoBU2HU}+i7dUh7N0DGp0vc7ABT-IJ@U9a|f`{-|!HwT62j0OKDuXB|%2QnO&ra}gxbksk=))14Tpjvtu6mXpaV(1^=`=FIpx_lfO;;j%ni zWznd0NjkYDi@&vA4c|0=+`CR#;wIv8akBWaZ>hCQ zCG_irsgihEXiEzNWd%3+`BsHm$D~Jj$NmhUapjV`TozEgu@$%b%0UYd-R(e*I3@S| zW1H-DomaUym~g_HFN;%}5FYi%E!)T&Teso-_U1gjf2=P*O$O7pp{0ha<`qOD4S_7BKCzn@lecA}OiiB}p1bvOc+h?fSGflBg1 znKUi5r45h1!fkcJIcURdy}|ji9OYGO#!2IzZPu@Or9EO{yV+*9%+J9(e{kcfuvrTO)x+y>&gVF4$j_ zz{We)|352_W<GS=w#w0FoiZoB1T3;`p^ zyY)AIc+g^eT6%HcREFX^=OS@o^Ww*`8W!`rcATs0tQn^(Qdw$I539y!QD1sb#RBTArP zF9B#QScSD4xdCV#ymgv`qAEz*E(kR0ZEgA;?+P^P$~p#ESVMA^wi=-E;SJw4%U8_y zvy22cKd>D9%_k-gRPajqsn(<`*T2AJx18Jo| zBL?oC>)R#)HslFt^p#|-*El|qALNw}OaNrU8K3~DIC118ABVK#Y!*0vV|w*?>V(BF zS1svNS=02>c;n}#eW(%T_VdGUDvxn=Y8}&pEzW|13MtYUD} z$!}QT;oXR0`|t`kxy^E@yDIsJ3yV>LMrdPL5W%D~t~>~@j`gMGRIpO5QK`_W++H~M zx9n>83p7bL>E)u#j=k@VR?bzXYXqdbb?4jMxt8jW{=dRr6gI^!Lea4=;ddO+h%%2F zIn`M>p-ec#3M>0@PRq>n{BUd=@Mk}v6-A|%T$)zD1%n^n`U;m_(l$Sgd${Oe^A2mx zXv+d?#(C4;W0o#i?RX3j0SY;Jon<>E%cpIaTfp@)1cvjU6UVQ%v~)*jsp$fqAC|wB z&*UM$;feOF=QU<6hmJEz3Z9k|d7>LlwN%0ke?GuJ5BDNFUD z19^DZL>GMf0>tkxla0*#smWL@0$bPbdBU3DPDY1Z5*D}8a{I98+Y?AU&~ z;XosH#GSvm$+c4)Cjz%^&%4~t61=NLAAYOL(l#|b-ysjqU)U7sqgfV^bligVEG}a~ z{%~lIYgWE3v9GBWJ?1}X7kb1A=cuF)GGLdrlw^O#*h&lU;T@d@7W<&VpLaqQj-B*v zx5$aKRxi8TE>iq7lO1x;05lG{Djiw{3P58r(5MQj);cCh{v=4h<6S3GI+4HtTMlUS zx1=CFRfv*+Mqdfie31qn1U!{ZPC)TZC(P>a2C*~$?V|G{82m^A8t2*rg|WKoTUWUR zuK(j8j-Po08r7P#QlOFTg5_!z(5Q4=XL$rP@VB-Pt#5wEvvf<>z^&}?$7J{ZjlYRk zyL$OPdm#G)Yb`ad~BA>LMQ?a2!ia4-*g>@tjroUDMi@FRGyLM(sFsEG zuYD&j4%7Fan%1ea-?Myq{1(WyEAA;dLIH6>pIUmtnsM^DUoeXoFOJ4fy>ti`h>#`Z zm7l-RtKAzJ%?9=$}~3h&$i2Qd+fNl&hPxv{hF7?M$~&qkjNw^c`iP);~aL0BsnHuza>|b+pO8OyAlI&8Nm^Ng~>={2}=FR%$ zszEu(0R;6$! zRP+zbK_5Q@c&t4q`}2ta9_E4P&OaQ@)shyjl#iS{>U=1z4>PWgTlB*-4G3j8m6e!h zqCfM((m0uf@5k*}MGHpsW5{$k=%27~j*|oTJ@{60rT4qc);$48qR%3Yq2>b6IMix% zh?S?^fkvGeaPU*bf{M6Sy8?|g>(=jh*NK#-hdjyxjh=#r^pb$a!^gic00`J^eowsV zQza`qnlI9TBlv;A0yl2LQBdn3z;O7Eba;jfe$yoZjRfbIF2n1SgE;)s!fBj(zB7L< zC(~D;krQ=%WBXvZxTgYC01^%!7+8IVa@|a)6LRM3)35C#jt_{ZVO051r5=--S?=cH zsU_{C$Ur082YHvZjVj-hc*-;z67X%W!swNe^$0wN)rNuPXq?9UOi_g%Wp8Nm%q`1*YgLJ$4{KfwYF2W zkksvUtQ)mdkDFL4eQLRv1Rxth2x(Zipl(gMI_P;daf_=kK$9s@uxxPBU%N z9k$%v5va(rp`Q|XjhlQkoR%4Ww0z-rJhd#e+#=wL?|yrMUy{>!+pVSNrLNskP|8V_ z5#aZO_6Atci8`J$^*yFzSZ4{)hy-xEDHRDQwE!E-DFbYq+Q+bM8f_crSUqXo;~*$N zz_S;AY{%Un4JbBp-XRtLs30*f%4c7OYVhwo@C^4;x4!*<_Pzs5lA}DkZZGGYb3R?x z>68dI7C-P~Fv7+p*%$#P{6t}7jF7+riyRF|A`1Z)B4aF!0UH}EfrNCroP%^Z-|6z* z-tF#RZ_U1a@6=TH^vv|^&PrYP+;&feud2TaUG?@CG~K^XBO$1EMOofAfx&oQ(dPsj zX@e9X1#Cj%%7Ys7hpqs~yR}U;QzrZ$GkDNK2cCn!;6)lw-65Kt73Yr?J=_y~tfg&5 zbIeMC`O-CMX{*#%Xx^(`vd{+-u7xVm%Douo2_z)(>b2? zx>lf01#$5E8B2fWB-B-+5%E;nd4qtG7)cKxdlb6qo_HsX$<{d=o zTbv>HTRXrAp)pPRydJfH=BkZ1@~69|0U41|*Gknq(f7C##-8KksjU%!%Cv#~XPcQ* zZU}xT(z6Nk^O|;l!Teq2ngfkprKg^v?MwqSZrkz2=+LA9S|hOx3$_bZ3MNj~EYx0I z==t)~INweqoGri^3^zf3I_ppyG)nLE*`3Zd74wBur;%SJL6A2NXyl6pd9d(~8}(n&Ql3!)J+!w7%+2lm4HjUko%tofcu#i07CI zc4MDq;QRwkwkr$zIIJyLFe{S$Ak7en!!*5W|HtuE^H+Nh00KPV*I?4&T;^}>KR$!@ zJJ%16b5%#9)H4o2w9ST%8aAlxd)C9Nu6vC0uiW4=UdQOVEgx|730$$w%5m_XBwB13Ty&Rr$f!bV&Vi9fh>_Wme+1 z9MG7g-mL9Jn?mzI!9k7<9TTB4{`2uS=bTEpG7VQe-vBEuPlrSbLO?~m+l7Mal1en9 z9NMLJ=#uPVB0nWDaL+YOr9a{TVSZEuaBxp%+4YNF;mfTrX`ZFp`4pnfz79!q@3fA&wpleo1|MwXTBtxHJ^~IOatoVLv(`B#mJmlS8c3J3hvg3;x=LcMeT#B+WziH`abQWP`(|{nm>+ z=B5U$=ar~c;~BQ^`DR(?H+;{aE+1&*`N;7gFbN z9F_z$Vjf}t17VXVj;|$nT1m#|I8Z!_NVw6plG(5x_3Jw^_#S3{ zm|eKIr|WO)yUA%#_&~VYZzO&Hial(a_p@KFk$>kvVsI<_=Z69-Xfv{da{jx%R+7_jS0};F{ zbJI_WPR)-w;scG~-C6rJUY!CAx4b%R?ej7`ZOMs6QDYj*8C{g3v!##eK%`##67!Z1zXIA>mbPywJGCmh7JTWy8If!okQOB@M* z9H|s2>j+)r=58o&SBT4j#;(vyPvJHu0gaR#;Nk@~@kRM$VHpnHt=6FhLQ_*Ocev7n zg?^D#0F9d&Y+4}k=?C3(>EgP5_uWN@%@EM&Z)iX}4bX^udO*|XKpFxLDzvQ!iL;sZ zV93p}wo^B0>ZP+lrSMk)B;9Z-pESST^W}=TG>1Jco5elr*XJZ%Jpd}u7|P2sypk?{ zgRh+hG_oCNGl53G{W?DUG(HceQ3dwq6tAho#Hrf0FJj1r-I4)K^-Rb?y9 zK54y`(x9WIr2M)Lq>(T}d)2uEcrx1Ba>IBp0BsjPAjGL*qs~=p{?m*a(_*EvHaUA? zu-Qt`xSVP`dC~}1?qkswte2GRir1XO zKFcdqwBg+zCU6kAXH2s`rfmn`AIP7yj;5to?|f`8-p3sQG{SD~E-SUH0ej~-JA|4K z_h_$b9|CAB2QFf-G-l)r&B$TTEDJEQKFluyjX3LJkCtSQpXG4nIA>frAe--Ap0f+m zo$t5%je@3_?#n?N<~`Dcj+j$v+L2YB9B9mnlR2Hba3B$Av>R4@71x(v+{?8tjq4<$ zT-;DS4v1&Y>~<}I#8xjWt$~*>H#1PfTEFE(B`wuWmu{HUizy0a4K#u#5BmBXh{p?a zyHy*cbPnR6?lySP0=KzBJ1Lnne)NmY4C^r!Ar5GqHT6a_cI5M29^G&fXiR#E@FkAI zS#D;WM2?ks!zMHCD;er!{ULzu$pkb4Fo1eyYwI~DrPQy_Bs{(gIzB>tKqiiJN`|S^ z14zdZU;t<_Ysx$81~d|6gsQL)Bu2{kFweB>W`0A8{h2!i}BV`3xQ@x z-*xNO+j{r2HXC<;)0HbHu|jEEtqi~v4{(gfvtb?WVS!WthoQ|Z57)|j^p&b4Ln5&G z{4odU0o(yk5Oj@ZoxGCAoENme7gD={gMjs0Z*xU?h}QPd?j&jRI%WLzc9Z|7u3e17 z6_Qq%uM8p34{6#z_^o3EzY%yi=W}2)4loZ+QpjJbI_rCi^SwZ&@RvMUx#3-|Zi0F$ z;XGpKGc4%w_mOD@08@+Se%7fIt79yVOHkG^tF& zcnx?*pnjN?(7H8`-YcI&JZ4@6SChsT^DkTTCfg62-OnudRcJ5XVc4&XvG3>peW$QW zcV$18ljvK%;&*1o%n?yp?uFpJQ>9>|3=Mq4F{4e416egsXWX-YIA_@goiib@@yzk( zn#GG3TgM@HMu3G6G$xH-)>Fw<_kBrYUL8nqjVxD>u18qYDWO|VvqbrnG{3PH`fAll zZ9{u#ix+K=o3q+(EW=5atN>ohE1RU98iZ_zV>bItCGuT{{SGGHt|hP-Ay-*3V_A| zjgZK}moNk^OQ11nqr#Us$|h|FkzPq4-K4rfFDYMF2O7C)#dBgq*J^FHaPz3V*6|U? zFP~3pOAUwm73*%Wo9HtRw1E0_JtP|OJZI1DjU-WU=unBZG$0^&ktP7RnmuPk#4naf zQ-{uTmQ#K+F6~th$Eq^PKH+9v>qS0DW+UKW!%eK;zg)k#euSh>Roz&@E*&>$10)&& zJgQ?Xd`*v&dSd(Wl^W|-%Y`#eQUq-Vcnz=3hmZaqKb@K;r2cTo4D|tlM)2U+$(2qW z6!}`)nv1@AV`B;sD=ipG-ji=S?)_)eLo_8GeICJ}KRkzO0JuA4!u7UI-?q=U(|P?J zJMsmZ#ofoL;47GkjG+TKdClj50GLHB)0K;w?R-!YHv zEPw<_`LsQ{9^t)4aKV@K)H1kNJ}LkFeEf|$K{F3YQjKU2fyOwq8=$G{1?%9I?eT#| zq+_l2)n~D8KmD?QWAVCOxlhmvxWCZ2TJHHS0*$O`*V3*uWw916TC^xl){66Ry!t>R z9pdTUFNrwl?A-VLAke5%?V!QARLxpm*z4VDh7Y~k*?;BUBB^am%U%?%hpaPnP4huARIX9ua*4)9?U}q?jsrQ+SUM267?!3KVz61@%KG|rpx0W)IQGaPx{a1v-l0ykDJllGND#;WR0e}N@4RteMb5u{DaptVS5vZwkmXqS~53?v6XJ3zWqB240N79|n;#sR z64CV?e1zl+fkOR=go;9kX!O&m)7#v*#fBK*>4vQzDO%He_Znf1`@hvn2i)q~i}AH+ z&ZnFWF_^XJ-qg><<4*`6(0)Lk5P!&Pq#d&=(^N4=egxR$nl$#6?%t;t0+&_dMmOuq z14qpMfi&a%*|*vE&@`u>tw5u$Jt5FX?Hy7z6m+%R=`)WTs4F9IJ#s%%{=u4vLtMF_ zb#l&y%BZwA(y<4aJnq%8&Kn3cX6?mU+e;gkY_G0S3M4aKuRL6XPbkMw?V85&opJS_ zA&oRmXS5S_>O9o`(=~~0YqjskHJfgU4%e6~J$ej`BpR2_|7!FuOAA<#cABg1`}mP1 zR8IF))c}pE@?LKIDW7z|6KWUZR2n(%-GzR6c=h#e4N7vhq3>wY=auaV0gZS+@818T zBA`e=_>)FGuF5mguLgu-+0d0*6i6!v7Kh4rW*pbTdbEMO*YhFHd)WIRB)xOAJ)nbk z5Y+n-u-6-I|KrM!zPc|7X)P$dxO>b4)IsUoO4b#;o3IvFB1Nft80D z0x2{dn;lh;CD@{d=ciA8b7X{GAFx`WF$6M#ZoPrV=`$|^JbaEcn{Li?mSJ1V0hF4b z_bPdWI7+qoeL_j*=JN$cC{IRZFbo?1c*_Tpqt46lh$& z?#&KJ#71(#?2otuI|^a!s)Tr)S7KK|`yYKq@DV@@L3Up?e9QLF+XLcHTuD7dD`0>& z7z;kG+Hj)-O<5Pzryt+_4gPdef&kIs4RX4bG>54U1O*(JS?ME=V}$9rvR`0BZq>$r z&?I*cRp)s#Kj7|tdU_BcP_uUP`y7uUd3xoddm?(^&x-YLk9dv*z6aQ63N#{}V+;2_ zc!2S*&SChDWE(Ml)^2)#1h6RR33^$E;yE|Oew4dt$5iV z2JOKE=2`Xf&oO(=*W&gLcOMZ+l#;sIw@mY|dbn1AGRyb1UuV63EBK>pS-tA3g)Y}f z&`_{n=L_irq{n)zwoUPY0`mjmiiLN(blmH^AHTB*y6=kv=rA6)?)*{&pmASZu71vS z&6j4}DxcDhRm|OTQjKU2O$!0Mf#Zz0z4Vn2pG-KBHybo(>JrR5d)d-}{ybK>+S=QxPq zB4{x1J&xbig1wjru-nVMNm?KjbwW$V*blGWOcQ>l3Y<$gqre6(Yy~64)tlrH2K!0a zaQ5HDsT$J2nKA_$_wCzne*W|Sw1?MyX2OJV=E^HCGjrz5N__n7+jpAv>o*nUX=!OO zmtD5Vj2Sc9#kaM!ncw~H59T+&dB6^YHcNM@x#pT@T9v5ugF!3ERi~93jeVa3jivpg z8}-uE!U0}inh<&4zUS_uLl(;MK+C+oS_EnH5XIlvaID>YYb2eB_z=*@!#dJ=sN1pU z9_QIw<8ZHrhOr^U;fDujc<`D%%}Sl?U)pzOD{5>Sh9)6km;$z?b$zhz|;O0qA&W#b4P&%Sj6&UG&y_wt2tX zpo6DS1*LQ&elt`9G@@;1tmj@zGU9N^dpuJ)iAI!Xp0pb^w0Fgdht2evgI&|rR$i~3 zL+u7|Bp)ew4XGbhQnwP-%_rkAPqfdt(L#S8I`Y3R+W;#G$AG#i)d!n=un&uXO%^;% zH#PNf8(YN1^^pP$Ja};9r-weqspn{d(V)-3_N(+G)ARtU;2nU`!2=e!Z2mY^ppl0_ zWW*+W+Qc_lAatf3H|L%66V#p7nS~y5c|hj|7PQr)u1Eqifz_+RWLarekl3 zvDVt!?ADW_#0-8U0gZTuW5-vyJ;AY)YuvjFRr>qgxMqabqcnZTaSFP~4_n>c?1wh+ zEGg)E69P>uacHA`rFC-dgn&jouYM3ixjA-UK6=D+ltf+b{q~l#-&##0w2o@`Mb*@s zMHzoB%z`Fq=r{+^2)nAVp@{b*0^gs^A9vL~BgdJamo}Rb?|dMwU-DxJZ&ydbMtv~_0?CH zv13Qumr$E|$2;C*4jnpT-u>>kTg14R`SO>)VRr1;Wxn^lyQ~;~j}p4&B66Uy9ETbw zJqH>~`+^6w(v(6B7I9tt5DT$)*cwA1=*R;p^QyN_m$ChizwttT7~7>9G_ons_~`Oq zo0+pF*sn^PBVdr=9uC#3H{N6y+f#012~|9VwOjtt9W((FUAg!kcMxB0)m)ETkE#ez zdp&u|f)VXv_c-ugwfMU(4ffL3Y`(?a>mQY9#7psakN&w!W?S$h^uqJ`8_L6a^rcpA zbE0&8K!WseRoio+d{k*e-by7J;k}O_=Gaj$=oE=YpJ#Xu0#15J2EPs*`EMkfKE1}- z&CR9mo_~~%dL7n-J!oKaMEWeN*EkSM^XR<-oQQXa1$#)~1_R@OAR^NJaiQZNl#aN# zuzdMLX2z_cE*a7y=dH>u!rp^Fw;RBBxcj(2>~{cg^la+4Ev~E|uNy+9VV>x-af63A zc>W#Gf3|hbbkW8i>Sn*_hX&Gvw?NqRL5S-k^+XJMP1erq$x~Zg8C7}Z8V4`STr1Rk zgezeGu>G6uIzx47q>UZ@LNjafJDJTKJ+|C}gEzY0aAq-e!rw+;&~<|u2Q-q<_J5VJTRn6C5%XJPB9ME+;)BvnGT>Akw;?O%{$komc zs^(YPjMuf4yjD<`0O5vhA9dcEQI67pA$pXk!ra@v|HqL^CZu_&h7TGnOFNyA#DS+{ zl^*kfMwBB!jeGy#qX&QT8{h>@gz!BD$x28l;9XnKpyL7c=sFr#FM!aEkA2LpTl=Fd zCs;oTMtocd9j-ABy6QoVV>HdP$MK}2;#H!W z^MY$E+JHXb`;c>V+46rkXIsDM_I_-qzaB9@tE&Qyu=kCz3~6kD>};Rbr5`CZK6KCi z(zG;v7b-^@0B)PQ^$LQI*mopJHL5M%&_^`ZfoohFxITw~M$Fqi2Y==~2b0#4d3V@7 z^lbjD+f~OFD&d&QvO=w++I>-$?Wu%TXf3Ryd{SN=D-cG(9uDIajt})?ZW0ZoDIKmE zTBi6z(*V1MjML=Q_gW7M={qJhkqR>ET8ME;M~(al8X|>#;U#k?Mw*5rhyQWi_XweO zbW~g>K;!xI=gkXW_zH8!9iKAOr%#F6e(u~k7ysSwe%%ZkRyZ6*iT0+owbi`zrPrHp zed|l6U%$SZ6y5*(zyHd7_q#tbcinY+B*6&q@wVGOX-1D8Y2NUL>!ZABZ*rh9EvGs^ zUk)^u_JMBHOH)fF8vQ&fxrhx3ynG>Sw1+!@Y8EE{K1sW$80gdBEzsO1{ zKHrhvy8R9-eX`o!FR>0f^q{F1=6les`=m#VdA)#&kM z^7yg-_60Z8OrKUCL(*D5?~8cW*M4BK_6-R*>83!zVO60d9UK&p2VvXJJM95tmAj{O z5q{vARV~V8@cvXPv7_};fKU|F9C|zl&<&n#l;2YGfR)R9V-!q%(?G+1rknei1+#0vZ*-<6P&nsB;wnjRObFw&3&zXX}!-HVHtoOg*pKR@9SrU4ebi z?N+6lPCHN*>iv*1Mu?kxF0qO~>~|F9*u}jb^wUVm)bntS_H=XrUmzt92Sbe>bgXfl zY5!r*M5#yaVXzj?m^C1B(1vI9=V{i@9k)01>%lvF$lxUoL`lp4(0Y_++@hbcKfoGJ zEeO9o`dh)r-3R_Pav0{@oKz|Ma_bJ)L#+q*-0OybMtlI@ckma{9vtth#g}}pLx6o9 zTWBZ89PYCUT1UCPd{%X4MW>!?p|00{d-eV4r}@8izJ&Oz`|31+y-%ONwI0OjJBE3( zylQ>=5ubA@R2P9pniNXP8!F$`;xYjmH*VZw-ul+RH^2DB48)o$obY|N0kVB_3b?^4FM;fBb{(5|0|OcYnizT@SgU02*mm3lThM;vVWY%h$fusqx`(K6%`$qD>Cc*{-~Sc+{mzCCloyQlOFbf(H9W`wZhC z324+Y0~-3w_?;9KXvAh4^Bj^@0FNQ{JFtJ))}|gT@YVIek>5C*gO$|HcImS+4p%%k zf)DhqSbu|csIuAtI+skp+2z3>)Xj|+*B+>kj2`(sYfJO?BH$wq?9j0TAgG&_)+9}x zk9shMl=K#>PK-bh%l)u_&^NpGeJ_%XLpBKFYPMXDw7h4I)M;PoA*j_keSPgE6H>&{39ipYcjhH}n86A$hob z-CJGV{u+Qt9kT!%`OZ`DFa$KBETtL&n80Zc?Ki1&6&(7ZLWsjYrBhY18gx1$=r~7x zX%FOBAGX*zN4v<{R+t8uF#07LRnxup1=k;)$2y*qDyrww(W-tp9XQ$B&00 z8foIR9^|j2jcMrndHnj2UhW;11C2fxQ~W-5?6}#rYqwLG*6h1>?J=)>i*Q+CfPRKXPNU5L*05Nr4#xjT*;%yb{pk zZ&lG?Tsfc-DuJVhJl0V#85+-gP+ zD|iA#x`v`U?YndRwhtSCewEZsdP)oB3GCr3G(Z>y8qwEKn`Xd=1qPPYwTECE091Gk zKYezm%N_?bB9W3a7=HkRs7|iZCE7oz_mS1F2_Dw)Y;@WJ+Tij0Ss#j|-VldC;5KPo zA>W{0x0y>9UtQ$LifbjM-@q>%))vhAm<6&5l8`7{F3_f%jzicw^uZ#iIu9RuwSg+S z|4{{X=q8_as`qBb&6;{+WJi;6BZmEvwZD1M){uH&<~V`GEdq?QtE2+b)$XnWb5NsQ zxA}vSq$8wt=^)6_5)~x?=P(_>_lPg)gZv5#V4To?oR0rEpixzNHNOIl7`OfUOt$td z{|VJufNzuqQNCi=*7~YUyUM&j)q;&6gSiChVoI1IE@@psdZodiix~TyPoS072|zs$ z*OflgQ!zi^zm!&mMXvJiFS*Mjr0R;($h&DLQ=Y zK}QbG9igSr5kR8~PU!nC^jy`(>G<`5uxe>`RJnSth5GLFpGV*4ewzPV*La`5#q+4x zL6}dc+;8qXm`~r?jLY(>&qccB)&d`B)V(TcJejVUNHi)#4j(>ZRh@rqwr+jQ{LlZ~ zZvn)wnAx*uYL@8!2S4~%^O28y#@The=9)jUi++ni++BCwWA3}}UI$LzaKlaJi(mXq zk)$LD0BC&EoBqz)fxOA8P>+gern$+1#xz`Nj66BeSlSP|IVw#p4>Y0-C5Q+#;$Q(4 zHUw-Ouvjzro0e*TMjWn34ZFtKhTC`O=Vs6TpE~l`08*;Q{|bwj&(Y(LMAB%m%Se?p z6viQL(Iua9Ak!|Z4yOTthQ7?evot^>0WYP+&6mdUUSDcjAJg@!c^Myi!T9~_*B=Ko zf~MbBtOr-nRiF`ZqXt{Mw>_s+tU4Wf7QdaK9}3*i@qmInJhbSSKpqm19s(NKSCoj2 zv#*4Fl==fLKU4}dhT5uqhH>zib!I_NzbaXax^>fx-<52H9$o$mYnEWDlb$$xcE8!R z|K7;yL`d=u^bxeq{fB>Pf&L#`3CKHb-SJ*(3nfecvj32RXH{U)S6$w)&8io-JH~coQL2Zk}#O#YM)VoV$_3v!^edrpi$d{LkR3ohNMe4UQjRS5|E<}K(t8>>f`q% z)8A(gr%PO0@tcADt!5}7;nBbUbPK%wCR#uH^>~H3e97yJfW~PP-)N;BU*^CS06RPP ze&5`*`=4V3@p@2-0~!^G^`#nr^9S&^;F0{yx!uwGNuR}K=-@wka6{E; zB^%jH^a=U~eRS30??rva^jfZ*JD|bwpzBjwX{CA|u4{R|r%G9M?f|Lc=H^2YfY0?v z*Ql+Vk6EDc^5|Uz+q5w6P|pBc!dNHz_MT9*A7q`n?j@~{%pba9JVWKZueB)``j8VU zQ%RfxJ$PS3-I|UPRjTok)vtA?kI)yFFSsjJ6*?DDfc+K+G;$3kan4`s}R9jb65FrS;f)!Fi`v27Lef zKe04cSpf1~=Jl_?!F>DMUkU<^P^-T6)|<@SxnX0phYw%e5G(yvt5%t*Q>R*W>?s~rzkTqkTA&d|gmD1U zxON^6SQL(3Hs?9*$G88PS$RRE0_)t}?q$$0!F|A|RddyjBV{a*SyEwa?I}+je~=0@HnS}z*F%R636U|IHf=JePMs=A z+p}knYwMUX1@ZZk40%MuaNx+1BWCmF%~m~rn3*}VSTeS)&Du3-J77*<_`YdrD;R+v z(!0K@7>&RfK6hMOx;IU<>Tt)c&6i_wSh)-LExgT7GISv=)rN_wI4E zId|drQCXj-%`J~up5GItIotA!LS*~j?oA`D1kHTCRaBJi7dJdKh=8b|3^A0_ zCEY`Z(%mQx(%oIs2-4l%-5?<~ba!|65byo`*ZZD)2j3xU&En*`?rZP;tKF5&^}J{7 zrlo0Je;rW`;c{y+9pjnt_}y~4GUBy-a<1rH;reELLQ{U)A@tb06V^Dka1&s;i;?hJ z^J5hHa&wpanxr#KP*Q}K?e3Z)0yg5|^+c5{NF3q3vp#)F*SA^BH+FYPRl$DdrYdD> z$!ofw#y~qU#Wv%>%KvLaSa85#J7!|@lB1!tcJs(eB3ojTa}EN6Uv*@h^lAr42usQ2XPyd|Xp4$nDk ze4@S|+Xvr1%d&>seqBHf^|IA@vPQbkOqAKhensF#AYr0x5vuiK3{NfjmT>~BGia~y zjPxBR;&NPPbbk@W#haQhSyVaRhBvY}9CXkA1Xd*w_mmc&i=Cb~2Yos(tUHd&v(GI| z&$kk>bpJK2`o!YYde5PCio3YeZftXk({#~4+BRFBW@5RdPk0+8#`Xj`k|<4Ma0J%ZkRoxfgb~h|jZ<%K=a)`hmZh%~BMg?2_?y7JX!EO(QH6-HK45gYso4?mnpY3mc zEYNB{PAAyzf?YDCWZ#EzdAI7AE^iz_HDUSp`QEVEje7oC4b7HeDPE3+qd6$#`q;IM z+w)o1xDsaYDP;KLP{YcfhQM2u(-XC%c79#_?eUa9^=&s@jHYpiOyWn2Y9D9UJ;@df zcB6HqzBQ2KyEjyo&;7fc`C|%qe~0UG`}*ifmQFge9COe2k?`);Al05;_CO2sCZ77A^FNG-sfTmyTraQOY%-Z8ga}UUkWVn~ zuqB9gmm|}7N2dO%Enc6?Gea(niGQt-RYMHLN(dDCseOiG z-^MSeY$n`Shaxcg0sL`lPSsyuIOn3=DgqfSZwxfGs){I=I|{xBN3s)hikTGV+1?2Ag`a|{RGs+Akh4!Rc{7RF zX7rL;YaI~kszSZ^PS#>sR^n}CN9+y}y_4AXg(3&=LC%)ZK;*ZO`g+`Jio2(h>&>D2 zUyhvW0{C~D)VAjpt7;#VUOD_jia4FNPi}gcs4!KYclWe1#^%SIypk43t&a_g&h@*B z_X^DAX_6Mb*V{MajEMdAu|bWI7PUizYVlQzeF5yqVKepMVSu&p<La`R5yoVz$EDJ(=bvFSS^cQ9?}z=HSgndAbrL+#5Qfv@4_CLR`gULsRFf^c=ydU% z!y^bPZGl$=({YX`LT-49ESbKUv4MAGZ&@Euh|ItP?h%*~V)5!e$n32#rz^JYyt342c1rzDtQ5?=*YN4`v=Y|TThuJl{eB{}vSo4E?IGyaM#VQc z4lBGx>6*Y%c;n&=kRW$`M(o~UK(ctRGD{@AZMxV=Sck_w+@26gev zsVl`t-iXaSu9x$h609KJkE7B-8d35#J=&bu!eZF?|CzRXkPiQn3Vzcvy!cK6V163v0dyzfBx zY_-zbyW}t_mUBvf6UgMzORrH)jR;OjNhwn})7@EWvfpWfJz?6Pw&`&i^y7O=X*StK z)V1B&UOt_A_q4t|L0A9;25;vc=8Y}YOgB|7c-zFJ=^ojT0E!nQKoSJ_fzCM8h(X`J ziOBO0ih^hym=6zf;3-i(ma)Dmzdlb_vueN7s(3&Y_e2KANDS?NRUNV*BsjgTnCJ?!pANbeCB+{) z(*y+e)wWI}lCvEL_N)w7^xr1w^ahbXgV?KF{{PPiUza?#ikIwd9I_+|9^=>Sd) zY@@Q}VUK zxWcmAUVrJKc)ZwYZbkQW|O3d@Xw@Y7#vrKe?OC`J6OlWTM3B>_ModTx2=QDi)7v%Kpiy2?s4TX(Sp znIvWrj~6r((A5V+Q@w0joZhjqB9#1%|@2E<%>M2D@90NOI>?oD=WuV0f%NH0j-0Z8R9hZ|X# zBC&fxuWj~L?AKPwLy6)$h34Okq5m7`wHFLCyj-i+Dx+O|X?A}m8w(j3A2%3MM2eW3 zBWhT2_tby;7DycjXzb-eeDu!T~jYxqKIu_jXAiFf>EXT^TKwG#%*R`cvy3(UW| z?j)Es$x6t~P&-5$tC?{#7*Wi|h2@awUEgn<+~X-udhW~1Y_Z)>{r~HsXz6>tkEwxFw9#?1Z$0ams(M6FU99D2c4YW7ZN3g(BA8}7zzS*9c-z=$k z2{ z?D4tGqzUIv@60jxC3)NkOm{cF@luUDBWnx&R{T{`7C_b3-{br4fk3E>-f_^6ZCE0J zjnHdBPxQHwX^`ipu}G(W2dh4VQ93Z2Fa7&^s$2q!x8q3;5#kQ*EUsH_Uy#RHC(5W9 z-bFyC{$I6sbEtKe(mYwaMw%1xv9S-nXqyf*an72$mR0U8!W}-SvrHG>fT&brE$cJ} ze9t2LXBbXyMJyl5(Z|iSR#T?^;*1|oTZq-{cZ1`KE;arX{4xYI5Zw#EQuX920mDZJR{#7u>z?aAq&k^|?u& zlui#N-KwZ0&M-Qh)Wj0tT3>sR2uw~?07IlDZ~EYUqv{>s7(eC&v-M25``@+}wu%h7 z$?8TqaKD(@jVJilmpshLP{`k2Trj#&nc}bz-Lw|`1?Uv?$E%?fvxfME{R_~Adv<=b zgEssF?e8VU`|jSa4_%6jfk`nRY=69HC@4N~&8CbdFp=h^ zT26lXye9g!?jo+oyq0uoOJFqZ8+3t3>uxf7QVSXrJ#@V>HpD{p?8+2C0oQr?@tFfmF%u} zg&BY~coXeN%pLdVh+`*65zim}2TxXzP$+h4sQltfs zoh8{4^Ctn$o7(7lnN5*f+HuF7BP$E5_4@3e^0>j2ga~Ssc6eWY55?)UEx*}*7*2*OwpH^8`XT8BQ?KDYjK{_AB%nU_jAvT@0%iEnBow0B9!+IveV#H&S6aHhg82gxFno};aDEualJHRYIWpt}&)4VXxH z10LN=O_q;%Ig@G4s;+|~uw`C3Sd65+vssELCL6cqS`f?~g#y2xE<%bxWBF5!gQax3Wv0~cN3#nW(cMMX5 z)Y7*Y06!u?A`EMDzx@JiQk7HaPM)ooHHakUwG#UK*&@s~^LNM3QY#OJzIJI1kGZ+Y7c;5vf19N+7|P9H%JX!?Di`e03dYg zz#nFm9HG(s2N|0YVeqmTi}=v_Zo97C#%17BBR-X*RHovkkP)tLuy($$0OHV^05TgH z$vOtzwTQ~B@V9e4h&-b#!*kwd1PyTA7WC+&EKYSP==aWURR_(Aks>jiC}iN@;V0G_ z|2nNn7w%{q7!{x2Y#GJfFl?fSoI5YYKZQ8lFzpG}ZZxAy7JnEEsj9bndR5>R6an!C zO9m*=l1;sU{@##0x*C4Po+zo6L3f0S&7@R9hz9gOWw>2^)L-pK>BkM}33~PY5SuQ- zEt@oAuo}hZ#Y-h(F``pE5dE<=IhzLRW zX7);Euw=I!lz2=?Tz{ln3_QBheKkr4e>fqgC%tHXncr6%g(=3T?zaWG7t&uziiq6e zOX~jmO%k&I88ZuQwoB8*yDQlb|1Z zHkNPSr%M}EN$^_p%UYoOzKLRdiBICTs5UO)?M$dewR?TgtTWY!3dg;rr=3RscaVqK z{!=K`wn$4(AQJ>e^K)mR6+{gx%hjXMPJ*#c1hr$cAfL}_bI%GX!QqE1o|T>_e$IB1IawabZ@@|q zt+s3~a|NE_TS%Qvi_(7-0`VZPfdGX@r8HGS>jfNd^=orZS%sAbvR_Qzq@ z*e^3){-Z@q6aPmULwUxZdl(1%x&QT!lzVS;!2S5b!jIjo>y2gX$cJsm_Ue~4&`xyS zZLrU-uLzC~uLt;1#wMbp34fFGP^0|@mF~B;JeC;Wp03p=+3Ht3^`1T|{l!ej>gCU4 zkEk7SpG~9XiiHWPQ{c6q1Z&?mTH9;_>1U)PmD5E;{*b%IZ8Vb7=^KiRJ7|s`<4MCs zG1NqJoJ$Im?ZKh8U7Gm8bbhooxRz;$!)1BFslAZqXft~N?0vUclI(9GK~!$xFrMPu#NJW`m0s_h_t30PpFHe5%j4V7#vnU#tpm}a+jfflK=z8;*qEclttRbsNFA;$ zp2(q@b_&;rtWPYNtFIG)d%D>;op?@Mg@15YqKX&;Cl6;wHr*Uwu~ki-cJecBn@u!d z03O9@o3gNYfMzV8{Qe)wsMZ{PVKan6)btb>Pl%D1tB=_!9#^odJ3&sJ4`Sqqk2dAA zxton7;@|**-qvf1%|rI7{)sei&W3Jv;UU`VBVEa%0?JQJy{)QOQxV_-DoHLyY7T zu`I{rmnQPxyk zM09&jmEiF;bTJOHoO44PC7Tk#pmp36!c*7b-B{R)q9cCcTPxST#c09DpYng@i2rr7 zcU+bb$0z8Q8jL; zk$4lA{fwKx%ZY6pWs3#n=xm^~q!*NII5v&xEEdq1t)J8oGeg_yYw7-~as~})C8O~j zF7Of(=`xxd^>kuwdf)iA_SCJKW@gOk7#Fa$y5A;~Vn#T;L-nv0Q*G{iyD2Z0>UA3@ z9$Q6#s`iTA>3yReF+o(FO_!fW3`cUg4DozqWN}G~ij>u1;UG8!M^3t!^zNGt*o3Y@4NDBXuuVuCyPr@nQd}0_Y1by6tXCkHbRx-3A z4)`|bR75%htY}kDk<~|yAT^!Vt{Ryr@6U*$iBz)LG5RDIxO)Z!ZLw-0fV_42^@IlE z{#(i^nerRDOJBxAK%5aAFTkfoIgVcWg<%Yx89g=`3}U-EwU6g^t0rYnnxT2!|2}sy za{)b6T{2G=443|-X9tTARBk+PHP^$Xy(r;W*&76X^9dlbB>3~jJSvgp-qN@gWg*tj zYAinonC6&AeY;bwb#>X_2!KvR+15Eo=Z2PE#OSS(%&4&g3%*_aieZF zxnmLg#Su(f7r1|9yJH{vxVU*MBpS(sJ`M*GYgQjJ{>^ za*$xMb)83$<>a}}Ys7p}+`g1If?q3V0cytn@m)6-_Kc+_oT9X#}ecF5ytTu>L`(N^dF zD9q}*wA$FlGC8Qj{lW1`8k23NVu#o6;U;IJXaE0spBf#L^gBv28S>=2>zEdoA z%VkFv+6U-|1WFLC3j>b3+B*J;5&2os3sF%j{9gzdr8Cemt8}KokxWKJNal=_(flBw zqmJYFRnQe~S9=#fr+x0%Oofh}-f20wuPvK0md1U!8+*Xf7LaYpab`U{uH?*RLs_Qu zv0khnJynoC9Zt|m=cTolCG zWDzcaHTEdqEms`vbY7Q|b_`uBqhR!B7$ssIKbbI*#hw;XyQjzzbq+K~`Wrcx&qMME z=Q_tL5j2LX;*E?DuV5?mXarO3+B1z99U)L%H0sqNi@}6eDwXZr>!+uS1n$_iNtLiu z$J>HYJ}VN9Cmv@(yE}H{^4+tLM{;#+`Qz?DB~^xpCw%p#wL#lbgicq}Ub>|_Ta07x zd|8h+lytpyuX_kJ8{|_Fd%5l{t!GXoFDdTC56cBadWo~yvBsNdImy}k(WX}0Npt&y zM`8ur@Xb3TB2ajl8!!|0X)UrwB`Qr*XUvyJEyr zt%`$>XtG$KWahCO^o;f4CLP>P)OI1T0!~5cs&gR-ibh)>hib4WDUvrl7yQ#-R|IO1 zp<6Q&{mjI=?8X?Ci}u?zngr(;4}LW28m6KZh+YK20GA|Lgtq462f_wVzF51-3lOpd z5IAB2x0zizVF_qbs%Bl@7N4(kUw8x(SzZn_&pQhawMYIgWv3oaiNSC$a!xM%ej(q7Cx0 zru$IS@E_P9ZIlX!GGUEE89J zY~jKXCi0J-k4n&3`C(XR+eT;K-m&+w7BW&FyhDXO3uH?=tqdcab)8tx#8#JPsOkK6 zNLz32c8s_ud@A-(O=(q6z#)3Jo_+M=kSrQmY&5}?`r9wX_HY^~FW5i*yMHzcNyms` z>K@&yP*v*B()uInh&WLWpoliI=A=>!AU`YNX9&lK9~#T)0q~ME*N?CW5v~*4{qYi>eK3LnRAqegrLybZ;}_lS$w^KHYsKQu+6yd`GcaFvxw?705r zC7cvJNZ%(maE>$B(?`hpIoI>#{$9@++Hjcjk>n!Wl^GNK2&mpLx!$6qK zzdX42-4Xyr)tm1%G#UB~eckn*Ug{{-H+WCzDQ%gR@jdmaN=GjODwXls3%)rI6M?0J z%7dbKh+Ly*s(ioDE*G=c^2GjgiwyI| zYe;Ms$yC|VxZE{Mz_4XPdt6W5`r}=?!nHwemB5lk8W%DE#(bY_xs}AEzOuJZKwGF& z^#orvd41w8UiT1rLXQCS&-|dW2o#wIEvO*QK6mD2{z8wj8n<@AuJr84eg&q7+TlA~ z)2T+c)kB`XI_X&&?~!t6ck{do57kf)fp<5-p6+s`z<3Gyrl|$fqB%{Ep@{(`p!cCB z7}xe?u*8Il)c@V3b0s;mAIqF;M^mO7)a-5ELl~P z#p*3Z5wj1zfkZn<Zi6^d4mtJlREmk zb6Ig#>+yHNnD4tK8+oQ@bY#z*P{{9Jf4e;H9~yY&g^yXYUn9ID)(er4&x|pbn?@8y z`Ssz&ZnW@r{moq-ErFJ^M;8&VLbM{%9%`r(uzH0GovnKw1*=}K^nN1^XG!N9&5|v) z^@5>qd-@9)!Rup7Y6U&g8UeQT+=r0cJV9Ii`TEVTG4Hc^3>1nme)`-%9`lr3#rT9x z)5PF2mQ4&dn+OYt1uTm|+|2lxAjt9e)G@m?{;h|#&mTV`Eg;S8jxy zR?L(65e=6IUYO!CHP;bzhj&ymZOIa|4D~`8F=#&-lulM4N`vpLHiSgX;KBNmDzwa! zx+pu+pYi_f`8)ZF<)Mdf6$%{ zM$Y`{}XVrrSd5ukxgZ1^i!+0lS(Vn$8mKw-4CDLlul@aCd5 z;lT)A@59Y#Iq>dRfg(f5CEXZ0X>L@!EVuW%Vd(+eUJu>bD1QgzIgbo8jw=*WoF4Ze zoGLB2-hj%Na>^&ZPsWs?H?;rxEgz;78@ZqvjUIP?box~Nw#DkkPi%5MbW>46M)gJO zXzU9F(YSrfPu}I4SHw%=Z-u-BxTZg{KhCFb{Saieo)P3WBLk3=d41wGdB+*FL6Ac^ z;6%oK6JUn~*ELA%;#)?D=$9>zRQ7nfDD_w4no*EosC4A^I?@v@UXLyfFu;O9d2Igj z zEi|W?q{d1YFyOow`$mkXDlOQ; zLJu(~dhD%MFD<{mK$Wp4QVJ(e)y_UB=D(smXhj&|GE*7tD2(MD;-8pHI4aW{V_DZ=l$BRETiO9nNK*A9QJym-Cvr{H(!aWR&#^H-o^KC5 zHew&#dQ4W_Dx$G%Fn9bMA=I%$H#Oi6XP?{JoU?I29j;0Ly{ehpj0px2PV43@Tqx1- zjFIYRZ(^Q`<-G6>qxAp5Hz;$pavAo87|d)@7wmNB%ZNKf$!{oYLV zp&2yrvZ{xE8T`^&zjepLWD9xwu|VzYHr(;5#(0xKwi?fjr$D7U(vduh$DsQ(@)cRp zn~TO+rsL*%JBA%r=(6W=v&2ZLMoqTBcvYW=hBn5TV?&Db++%JrzbO@htF5DbDXY$+ z%o&cEz*wOfQ7~ULZH26siRe?Q<(Qq~KQgxk{CcJ^i#n(QzT++%?JiV}M_cXLL9YF6 z8nFAUCB{O<^!QYIX~vo67TTcvJYc@dEdpDTduNwu(AqZ1Q4-eiwPk>Wv(RMfQp`&mz!2(2|u zRVG1y!fzg8f1~B$^Om*@y>c9tG=fx3wD2iNAMCZ!iNP+-a}7=PdwW-gK-%X|r%NCv zB)ofp;w;u-n}eqKc`pc)kB(}*5bZmP2&h_{<7G&w2+u|8^8k;l-vzf0^@}<<(zP8c8@%3Pd+=yf87SSB`}f7+4l*ME~edKNYP<_RNf6>pWz@H&qUuIyZP;3MYBiPC&Uq zk0R09Iobb5BnYA`*Yq|(o+$DsO_+b~BGd0*asQ>!xbD7tk$+56$i|g@kWIq!j52H{aiSix5t=D?5|wB z8LP&{R14R!PwyoAJOn(AgTs()CM#eryq}reZbbbAa)78Seb)R;#(pxsBEkC0lHr}y zI;djOCXIA7;l&3Cu|Cqf>yg9@r(ILjd4~~Avo-_a5t1%FXKh2Y$MZblN&!VM*GTf< z=YuR56Iyzy2HYKu4e@WkM6$eKv?_y|&I_~LU5h5W!7)?q*F~9y|2~lETt;Z**va;$&jk7 zw-F&%avCqGWT74}+${}X$`bf+kyTNmNqjKpjBsL*U6; zks0vqbF=5-efUps@O4@FE_iWbY5gO_Fj3%s#JGDeK{TmKUS#?m?I?P*wy1i?agUfj zfv@JsNwH%M$QF3QoH#l5!+AG#$e6OgyOZF+~Y?}Suiqtdepih}j}_Q^i- zA=wlcrw#YlSLuYPW#V>-IN{({Kq-`cC2+j4vvWZ0sU0>`MEvbb^CMwzx5N)`KmCiA zNeg>(_1E~fqJ{9_e2Zqow!f;;4X|$Y7S#E9j^kwEHV2)EnE&cC9R_bFVnmO2y8Q@R z*{^H(a$Cj1H@~$y>nI$b4bzg92o5}DidE}VISqMzpC-HWjz+qNwOcyflRlhQGQD9)-lLr(lOLEJJJJ$GP8EXdV-2AhI8j4}gzzt#ipnBEGXL#2d2IfNq2cX1r`7O@j zZIA~APly~M5AO9NOR8iu-UtrH|E?7!L5^o*`1{^pzS(%)AGsTZ84>}$DV=kbGKF27UcAjGCBB= zb%Qk8AvnxqP)&efF*>fV^;sn&%q+KmRYM1;M0B!FuhSlFmRUSTi~op&z~}VZyVDl- zs<1f)2kLk!3@6W*2MarPOF*t=-Z7pky0^AU+*=F^m{{d2ffe(16f;->% z$ZKY^p+YI9Y2nvtrk^7{^N{Qk-;o9&MzEGC<3sTg!NUcBi=duF`$ot<>li0y>g~nb zZ$11tQRYY&1PM4&Z_px=ONZ~y4>y2^aXlRnY?Zq7e{+s|>y?m60w-#Kr@do&!p+#tW= z_a2AQI|*q4)Cnyf$0_7RW{ZS)mZ{9s&t3AmY;hXzuDI(c_?h<0m9r1cwk`KY5L_d& zE=JM_f*O5)$MT~wy7lKzq7%KAgjo8|_SOn@@pOdaXctc6#~=K?QwPF_oE}N4eH6Sg zGgmf2$`hDCgx*{VK)DNLDj*nMgw$RQ+G0ovNh?K?oNKe1EC`y`XS;{{DybIZDmtKN ziNp)`lhK56WXSt<25x1pt z-IW41to6rq1uwFx@*A8T@E!d@Qf;h>!gFToC~mG0;`^?Kn%s#xM*pXt>MR!da?b~C z0siOF1E3aZ*j5|tZ3Q1juG_2Du_2C0y3f1hm(JXm-YH=avqvC|MDFb5jAFZ}!s3x4 z;;+PacSmNsM^F8wHGS%A{==e1N$h*w98L+{pUVQQa&|`2y4cKNTgP@pz`HZyn~7Gh z=_8(ZWZmrXCfi*}kg6!?OLQUupG_8>Y(@V!iuI%26qSBdC}mWnvKg&EDp0 zl*elqUAHSSZG5P~ZGJoMn*-iyAJLV;>r=aFk=V$aJ7pf6P%h+(g|idUOxy#17u^Ya z(Kf*?8SYGl<9V7f^p&3Z*NFG%oqIno1z0_RJPHkj2@nUn;~}2sxWrP|6DXT8F($`- zz2G{XXY}X1mwT>&%(ZsH+%pvr3CeY|9Yd+okWXTq7r&xZ)CYh8<|K>P>=N6t`!n4C z&VKAtmKp%@QCjd!!FAXGFGKrEgF3|odVHY_MaL+?;|!EEz&08+PZ{E?lS^EGZn2QZ zF9HQRePqd3&=^v_bc@xl7aNrb@cEJCAp1XT>Q4h_-+kUTwX=O%ELTH}3)yTObBN4( zQ;01!9BQDOypUoQ(xGWWO>9}z=anfS!f+e+3PB#nOw=2DTA0uXVSvARLy!kJ`{y5g zmlOfzL-7JQ$3$7Ff)cT&X50)VmWKCs$C&X&xeTRLBkVw#u_>~>0M5K`Ka7pR3^}vv z{xQbX@@Uvu4(#=5TLDA-XkJru)ac##8Vq2VWUXV*vzFB@d5s<1gJi50M$`AW#PxD zk=}0Z3{e{KrjmLjCR<5%KIKuN?CUcC;e+t>GyF6G5CXGTZ+yLx5!55=UjdWJ*PWH`sMv|ezi=tL zan1yQMWG(iBuZKi)LI~L1PT~)Wfgk(w+IV&tZ!FL7j+Dn=rIOpT{;@^KO z?6PePp+NK}xYH>2RU@t1CREypVja0e$&@$qxXUB7F_TEZ7T-#)rJpA`$oJOY6ptp- z7WrSv=f^r_@EG)`*5wUk8pVH}7>cMGYgKV8r3kaYW;5>Ujm(LGl=DW;YD`TL1Y76` zo(cH%e386QuosT+y`1?S)GoK)I-Gb?<=$z&fQ!aPD>x*Tqo!o7o6m#2W%1q7cvryS($Lg)#QJj{zRdqJhOG+#F;1NtI9>WTIHo>8 zl%v8l1Y`8R6>V(J@SfSjPzW#f9=$ejssKUJdg`xt*B$~()bFAb~IE+1n_#5nvKN;sD0u)X^HV+A~=N%tNNNTm6SIj?{itdDaR zVq@GRU!>F^q8Gch8KYHyL~GK`wy+N5FOkS}RThDl&f7|`S=-?a9@t`f;%3M;)wFdL zEm`$M7hV$UMCTL6Q(HPAswtTVIR`&}k(Rrhb^`**>g*~FhIWDHdE2?H&arP@^Okpx zQ{Ph9<%^Hud;CtfIYV{_Vcd^gFgGaSERng`O8mMYEu-1;8zijTkXW)jrN`pTz^joYT&Mz*%5k zr(6|YVz?q&U$G8FF zRJwXWrbbU`B<+?(NBIGMMWPl9RFHfGkc)7!-E%3U%h75_L{)_)i?#=HbK9{gme>pUR&}XMM z9_!kmr83iCIxU-x3#G_C1+gi{;*wPNhkLYNip#*mKuotJjj(tKk7;NU+j9j`<3Fys zY5!&?qOnHD{=Bj%YsZq$ zts3_2dBKZ_h+g2h=tGgTcVb1Eb{P?YqU+zG9?Coe2^v+F#{|e_b=pg9ky2ad|9!da z6p^|Tf8m`UrXumiD(bKR?N%KSXe4UrP%0Kfuv{q^pIY7m&e-|icMG=&|D%#@>MtlR ze0%|y+z-4^BLt}2X}fa!!R_F~(RKH_Iw#<>oh2{)I+-i-XNRSDTcT{+p{+kz@;6^y z>HSY7Y_`~UI-AKAfjT)EbAzMmm>SL|+5kPr+L%6@lng|ECMK)k8BHPS?RCN&BzX}2 zCUak>BTP0T(_MGZ-@A$~NkUW;@S71LTL zMIlMRMZHPcR!*fE32bBmRNP0rXQz!3eo7ivXkdiwZuhH1+?Uy_19mpVbM)q-58vPT zEDxz3iV^7kBD=o}&%4!-pzDp4jgIP^{=YfK0o1$HD5T4U>oBK|CnRn-TIgCE4qLHX zTQP=NMi#wqU)3h6ZtnNAC(uWC(``pt9U{=jBgrpuNMM!fS9-Gzr_ZV}%A1c#4N2=F z6`z}8OPJ=1pau-F=MM4Hiydx&W=MXjbf?>50l{~rgas4qLSM0O!K1Dc}P&=(1h<^IlUCft~Os`X$+(Ns0djxfW1 z^diVZ18W5SFPENJgi27``E^E3aULu7tcUjlEE?tG?&Q(l>a|BbUZn{D|NlbBXzIuY+VqiUvNMq;Ye(L_nAtaPT3uflf5s2E0wN zY#v9LGt#k|lNopibfKKiW|YV1Zp~Gbc9J6Kby?AhHd_i9V}*`1SUg_ei7IM63)1V7 z@}U%))J8$8(LUK*t3F<=6wGn1uEZ=?1jc~nE0KSuXc)*{Od3DoF;EoxWd~p^6qPtD zzrG*FbO}b+c>kBAFs+)+6Ce+IYlOiTnZZn7t@rowl}GV=oq_#O9x8=JXVCA?oM2V$ z-CQKH?FRiDs#1obu!CIF;2=r>T(2YDPMVQe6Nclhl@`mf3g+f#flt4JD2Pe3lfmc+ z5$zAmgGCAUXPB*qJ`eCB)+h+mF3`FZJ*Er=Y9LfiKO<(4- zh6WUSk=Q>{j)rOZ26(mH_|?TG_rF%PGI1>o$P8sN9Bi+5oyr{d!HP5t#Jid^5DK%t zL_zELbOGRxG<|{V80{ewy?wM>3tOtnMv6yss;fi^=|@7&lGzFo^v&?UfLvw`_lrO+tVZ&XKV~!1QI@6_JRo)O* zMc=NDq4p(wo|H-DX`ZWg&CqDnH$&FWB75GE!1*hAD}?cvrIKVW*20W;b1L04^B3vc z!8@}M3all#P~O`oHp$Aa^vnxM6@LWj$CGiv+pr|5&2sxrx+7M^+7^eCv=iy};&*Qt zUKb6J`QwYxPJX+*m*9~^%Y-_r6+Q~95c*h?l{qMtwF-JvCJb>J4aN=BXCt~PXNZ6H zi@>38nTa141SD9x({U_ToiAt!9o^q5W$nbZ9M9w-F(f`HK9!X$^A)gM$?D&V&ckZQ zDz0c?5F?b(`~}=;@AwNT`u%A1Mj!26vH2Jv=>9-2*gXgI1CkJK zTUaq>)FG;HDnDj&uCVvZe1GhwinK!wifZ1xQ!dvbPAlgC{R=K#sQl4u$LeYP5~4kG zMbUlC$#Xj4;&3x`XpM45YuWH;x++((tZ+zgw$^Kfj^Qs2Ki~lV4@9u3+BE$SMA&r1 zXaBEDUA6TjT-nh^8teT&a~QuWtRm6Q z>VADX^@>fA;Z0vJT)lu9r=opDD&3456p<68V-T64Z6cexVwf-MlJ@%#xgG`^VgM1K z>#E0MeU~PGmm)DCls3ne!QCQJ;(TigL?OQWI;FJfBaX-zw}XVscL;29ayI{ahgFGd z>viVa7u|2+Vics5g78#8CH;PHhX4kmk)C@qEIqC28ob--;@5)@G6I9*i0dN#_AH5H zf3Cf!!Gdh4y8S{$2~ez(os3mJqk(HS3}p)#i3*WT1JqVoDALsu zocN|g4t()oBs}kfIBdC)t<*bUzGTP@@++&4rr?R+*u7r-S@k~Rif3k~y0wY5im#mL|BBIs|Qwod}VJ z?LziRaZHtrQjErMY&%mgbU%UXU@C5SDH?Mm0VCawYe*3XZxElROJ>pOSem+u))SBz z-kX-@S8Of}xNH+|D1+7RFORVJdLu@k{y+mSf z&c;{8G8ywIYF08UiQ|5>-L=@^{YWEpL5*}>;@w?mm z_2zT9q*mhpq3*qdqWZdRQ9+c@N~V!Ul0=Cj4N8V4NfsqXX;L>)KqN>o&=Q-R5wQWu z!6q~r6v+rQIfG;nP_kt1hTnJ2sayAauikxi{&`iDRlC5w*IsMRF~=NpuD$kZ)sbye z#||p(aT=y$#grw-{J4=BzWWU^K>DuVRMgW8)Qr+*h~vvY0Sb%Sc%lc8hW|*(6tCA1 ze+&eET=#va=BH6fZ>7CXqRbcrXo7sB=8Ws@6&B^J-UgX8GD}xw^g!9l*{sAJf3OZI znuLY7(aEt(aeTiDeJk*`boFJrN-`uX2v+Vy=w-yUHcaO&~oSd3fz!td{$&NWQ_<UdrxWm6$K?lWzP=H7+y6ldui?e4OnhRKzIX*QuuKBG$;2fv&y4&!Yl(=$# zkJB}7`H~deC)4Vv|A3O&8thS~sY9v1XM6$0YR&uE2eP@dXC2gorpn`@6_aH^I1ncn z)%R%ti|Gf%IcNf@)Z(~q?=FBe+@9{^qV50}bFj74Bcfd=cvJPAi2~jJU6Y(%M|0;J zgQkL!ER*YvGu!CBd(s<2C9#W!e?vW79so6VoATQJV|NPB6~aKmq``hSF}KpUIAG7j zhf?U^9XixJsVM$wG4YHy><#FzptwB{S}=r6^Yr}au1w) zSi4!m3cHqF0BLkM8sDuS>XvXdRlHJ;J?yJpOBo;j5wb^Z4ZjRvfEC{2;w-PR$C?nD z`0bkMLyxJA^_Oc^`35Ec!I@^~H90z*l`!!iPBk9>Qw-f061{P_R`D_msb0Um(&qBp z(!L0&OF1k)YfI=IY5JE6VQlD->Tvf$op>%JWa3E>pqaDs7mQ(xYN;L#!`OpW&Zx5)l7zC6VZrFB~#Cm-B%>jHuN3SK}wK*8rtIGWZ zi+d7tu|IJDewfkYLPCkv)R|361!(mcsT9=#S+rsOs_k+i3K>;Z>v%?gC{oj^8l7F<6rLb$76}euwcmYIivSSDWw($8d_? zQiw{e%Zr}`Q0FyAELHa;W_jzW*ZxNb2~bYik}CPE z{2Sze(Lovo{5i1pc^S!Gd`0D*+13`e?~BjsB%@Vz~;x?@Dn<1RVh*?(qO@^+#L5Qprn^&(KE(YvcrI zkZr&ZwHS1`Vp4<8fR(kE#P^t{D)S=TPZR ziT8kKr?1M2q%xnEcSXtqTY&rHnElx~(A-&kF8Lb{_z;jlx4cKW zC-Thr%QO9M-c|HTI8&e4wJ)(tdICa^|DB|ysjJwGGED4?wy$bP*2 zpp=sn{W|fSQ?a+|aje=nU_kND3wCyv01p0Fo>O222tG0UHfaxl;Io9#5_)>U;j-5? zH2H~efn+rIbS55{Dki^olX-SsE~8?v4p}iBsIUs`PUGl{aSW+cQxGY20(wDS&;FDL zX}IrT9@R70R9m(vznmJTpi@0ytA~7wPF1|-Fe-)*2SrK!vQ3$bp_xDZUNMg3NXjSL ztP6^iMqGSJjZhmncR9*HaTK>cJ*d(Fo?07a*ABfuc%ZNv8}}AIZktc(zP|F%&d9uC zhBA>T_`n-<81&F(@|gUzX&u)SVyhhxDN0~`<*oSi!%9QDA_J$O{LLH&PfQJ+q5jAB z+cI=>c#AM_ccnv{FdyCMKyo69>U&u_FP$zCaOA)&rxqpu@lyV@qM>{&_jdgaM-*i#R!EhrgCp7fcmvZj2z-#UR?EZ5@7GLk?1zCVy*>T& z>3q-pvz!_e99R&Jah%e~dWT26e>i_@gz2o?5zOCUW_BJERqH(hRrwX=oc z{s%78V6pz5FuEQ-b&ZLYTEq8A?;CSJN8=yD)12lPIOO_vPR9{nB@mehw{BWn_|4D# z$~QJI4#l^l?z%onYeP*uJ=M@EZn0<^aBcw;|JH6im>+daz&|2A*ch#g@E+m7foNdt zMQ2$iFr-7|$2X5s4@%QGuKK+CTe|2z_hM3IOKVPc%SiPFFJIA$)mgT~)=Lk#GR<{= zb!ofSWBsP`?P5bacv|_0V{XQr_09nmWQn#V3bbF!j`YeFLsw(5 zkEMeCoKbyU7?(}A9%@(Or)}dy&L@*?J} zZ+y~aF=A;j_T?W+JdBNBb2N(&P4WiT!>)P?LdhBH7$*GJKvvAib54tfm|<|U@1Wq( z^y8`8PHicdc}aQ|)e}a1fQHAOZ9+TSK0`jcive(!n!Ep`_`80v&C0$(@Qu}!mX|3*#9^DNVJM|e1@khZd(~oH2A->ea5nUaPe$V3 zJ>8mlyY82Yve}kh+*dHd>CxJEDM5bQ%Km1i%}po9$?61{zS|~og>U3=_yh1Ozdq5S zA#3k~cBn3Q(w*iIv+1H@RUWC}&?fVbRdM|6)g_ty*c)47It?-Wnutk{HsTn)yoq$t zKOgKF7r1xEGrYp?_kL&Cvg5&3dbSoc9eC5EBV!$OZY>l@S37Ot=2~247+A7R3b~!X z2x&k0dv}Q{ebFd!bN*^Z5>ibUVV%lCkCLN)U7KNYid3&n`>;Ig_r7E)la3ZGhV`Z9 zI8--0aEIJwaVfzCDw`YhG0VQ!7A2Gd=PxP?YQ6p|F#dp!rXS8$8L53v^2}``UAxpi z&@f4kX~O95+gU9{3y!O_(+^=iXD^DHIIc`zYj349=l>5d7 z;d*+at9yUYqLit(4^vxJujH6Sthy5UwHb^VmKy>cuP|H}9&*RW>dZ z;>>xs+Q^;?vk8E4%pJqkcQlH-7S)@ZwhursD{o&VQ6+x<#70;ciW1T_B=fGoNd%S| z&O>`!t_8$}wd)?Ib9LT2^tj)$bz$H#WM~fGEt3~Wy|J(|EnRBJvOrx-eZyux1nQkp z$&;<4{4{nsMeXx?mS%BmFAoPcT5PMzFfJU=lihnPSnArML@Rb6LYp^j1hbm<2w7Ib zL?6BPD%k(|hGyPm`&eJ6n10G!`c6;Jae|?K^q)Hp<`H~Llm2{uzxECIu*fCjM_xJf zZhgZ-)MsV-pa)Njlf(qTq03uSLWD`C$BU75 z2}!Y<(|Yog$|ad@Ao*+>;a2b4veswv%|JiKt0$Vva_xz^Lo|KTfEm0w8K|9;Cb&SBMYiEn? z>9CLEKb-nfLN-K%;8fTzB+|#;3KuJ~Q7LTb`1VR*E7vpcm^fmc2DPyK#raTWZ8F9(W$!b7@Qy(-!a^3nSS<*jJSF%piRK!an+RSVHuotwC@mg&;xG^jV>ink z^*=<>JGIr_7KFOJoD-2$HOv}no+^0d(gJSLoUZf|SCWn0i0Y(4b!RLFeYB`-ge@bt zj~H8Ev0^$n2?CAcp|R@r5W|(XH7S}TX!}XiMHv4JCG&1)1eOw(X)Gx(s7|yU*qXTX zftr-CY<_$pL^2`&b{zw=Vs#QsE|d%^p|28Zv#Dn?w?ksrEBU|&yuc@V7K?nUglgU2 zCva^4gtlKFA4fu~!7fbM6P+W25CxLVaKWlW8HD=IE#-Z z2+5^I4%yg|$$_#v)Qq)r))~Ni#2o7VZLyQzzRwDEqi&J)I4Ec+qCwavzQ%6ZQ1IbcCr2QNsk*D;OQZ7&}!jN;(uohhHbJ zn1?z)fivOTwL-GBylEL}hu>p*eJQ^l@iBXj|9(ID3rq(&1j)JGWO#%}xCyvnCOTCS zSw;0lvW)k==2PrQaQXNoU!72-d{U+MoGK$J`h%x0>rv!ifrgU#*oj4(YGbpmjmrKU z(trwTmh|G!I{dqN5xMFtz_cG5#?3OqEg9+tO;kwGi(7OVpvZkYcnVh?QHJ_xlBy@;JC zFo+GBP~dmnzyx>$cX5i0jOR~u>nK|&he{P)HE1E#SCVNVBP=^MVm8}-)#L}yRz=pw zl2vpEZnD(SO*nV$P3-$)n zl_}K`b>;9GYKybjM2h7d(xgp;2EDAuY*BO|y-o$@|BQRVsG5)zJ_=Z-IF3tAnI9@Y zWSWd=fsvq>#;MhYo&t8@h}ZAuc$k4;;Ew*vGGD z4-H7rA0>Hoq??j3S*$HkGL)2MT7f3oER9<6}I+(N>PcRH4xwKk`5;5 z3>7Tmhq_Vm-YrGlfreI!3`|rW|7BzoquQ7ecQJpyeG|`hlz}K4#YdDP7vWF+%qqMhh@12}uKSO(LBE7&}&NT1J2m8uM`XK;)Hy zYh{1YfGHyL0W!>l2bSs8E!Gfb+VSxwMu+x6Y^D7~)sjBW@(vhxUnp_pI;M~Fbjiue zZAwZcQJfNF*6-3I7TCJvRjq|KTp7RGJw|bX4UWZrtQ9td-mv*HAr`8l8lNCbrA(VP z>m(5>MJE#UT&G}-lpe-4XQSBes>Db)D}c4h9UiE?!J-A4Yr;laXQmZ1qO(rY=z@?2 zPBL_dNyRDGV!(#RLDzYKpxsRyxTSbWp{N78!=w5H@N(t=c?5rCNp5y9YH{1(A|_Z> zkJ~-3=%#@jsy}3f9t}V>h;#%S@OoSZ-m5ikX0Uld$w?i*%!=ZgWL^~yx0{&0IF-^a z(Z0$REP=H26xNclGRV%t(jiTBwq*FuLhn#+D=eUxwJ_zGo0(vMHjInm%9RIrRYiGS zc5p(Q1%QI{eEE0bmenV8#{7YJ!{U7gQzrm-dKbCg=JvwoQ!2Vf8!b-mu7ys^XigMM z$~=bTiPwsicD7nJT{X6CNH*xw9R)%qLcoaO zdQqBhubglabi{H{LZn>-twrxuClD>F-fxC!-)7UfNrI-1l2UtXxDqYGkxy;RKgbIe zG}`8>J^p<>mZ79R8b~@nY9~{6HTAwtnZ|m&8tQbOMY}YJ#Xr37Z?EstQ^(3FnYQtX z<`%3XrCVDgS$erNQ{o=JGn*liL+p05AMOcLDNd1~GubiNeNG;v0R_hO z9wYQQ3<9}5pz&34rr%VFmYec*Bhjso0Vw=R^*idJ?+ZZ9ddI98*2~193jpSoOd<;^ zkor#!Oeg?APOy0zZK)Rj?O67)tu~ep+_*hR%zcS(eId5@88OrD)XKrP!lEM0uan2| zM9tqRWK-&QdS0#PW|CW_rKPRMs24xmSr&TsVDh>6+DM57zs%C-Jidp^0PVRg$DH;a ze=#8i@H@drg3z86Px3Y6EoBWvP?1m!$D&N~uLVe@R_#{W|IO;@XE+IjoUa5=0*?ApL79V-*3y-tVNjt+l&kNZmIf8EyjtA9j%pg z`R``_5lM#DNsVcfpi8qRb7JWrZQ+y2P8F*izxl!;_h{q?Ju%U;>CuxezidRgQWAh+ z!ZJx+77~#eE{i5gWO{^~iM-;+hq_jn)0Cz=K;l_rx^V@ty0f=P<4*p3`JxV8DGPAg`<}?WrhuyqJ4^4~RuIrUC^2{Xl`Gg@eHS z;V#MGsrqN%?%@735SP9BQy+g50(~siQd3hiYW|Apt8?RN%>fv>#Ssz(Sck~|ngt7e zk-e0ZNVkz)+7oVgsLf};AbgT?ORp7>hwZBC7ikdnEt4iAQYqw6!t98nZ{ zIcs7UP;Cd*=XJ=S|5n#phQelG?QC8Y9Go1a9DDp^KeESE#EB`U1k48 zNG|XS^?%~k<1jo=&B2uCOqg>}LwxKcL*{z8Wu5Jo;$=0hD5~HaT5P-_%xLZp%8lL$uMB1Rc6bSDL(HLN+7^ zRs~FjgNGu9E(FD_D_g~T0ey-84crY1LW|966Qoaf%htOe%-A*_PkB26pFjLgF5`7x zllUzPfAEgwc29A`(VQ+O+POgHXeDp`hsb$U@>1jZ@h#k9LTV1K6gXl%hdE&NbK>ac z?DzQx+BsZbP^zUMH^ue_(`ytCXJIrCg|j!vwR zv~H(IGCXir*s4|W?RJ7_yjkDrtz*Pu#TJ(BYD3R^@&j4XW$gU>D|@xwViOt#TcQp# z)^xbg3M@e%@Atimgl^Eh*nxlJ{$3`_qwAYfKYE88Ub(PmE1pW)r;Dd1_id?CZzG?E zpiWXWkD7&<|NH>>#k83tl`snrnX&?9=!hi}>V$kR*$Y zCMQMn&j0fl7YRYb4FRw{6{Mu`%vQ{MyVbFcsj_5eU-Q8f^R)u(dRh7ucIkF~^pWhj zyX?yWO@mU)@AfX*S(SzXJE=1^2BuqWD>nCntg|sDWxy#2Ioa!VOp_96D(&ezMwMDZ z{%cxd4%1_O~u~Pxe%G1-cE+vYZIp8GE zaXHP?+TwUT(P6tgX^UU-PhHsPV&Zvpn%CTFHFA7n!eM)P15oVP7ka%(&}RoQ92eJH z9(n@JP~*w>K(l4_igGwBv=c|0<&f|iu)GWY{SgS(y`3IvBLUF$_D(+LBD z>Bq$Z$5&5PT{2wq2CsTAkbJdp`{4WN8m5m}sC|SW*1=m}CQFfj_Z!H~<29bAts_x4 z7}ekfD`viY4R#Et{N|^3{;kvA%Tv6oSgR2`dUG#u>O-t2H6W67aG#5r=d~NK$sDhP zwivTlIqx$uF@@_(eSGzMb=^JvIMQDHJ~BUqvq26K_j*B<8psMBW`x+si3S67w5dbq ziC}R)Nm;iM%wFqLp^uMRM_B4>{)Y=;TFY$Z^ zr36Mn0wwn6%tp5qDz%PL1arU1$ae5i=&lOG_Pwhs0y_@Ncl*f}r#FVl@(b2|k6 zpG1i*u+^=i_oqDwgY^}p5D1P50*tRDEdkrgwqJDrC}f1;Y>wmPJP^Oq7}*Sgy9xAf zo&#ujE;AXYS1(qeGcl={~xSkJOmx<|Js?rj_G}lNEo{xbX>vH$a zHV4hgVhB4&{u@Icf{sj%B-2+5H$w7R!D@2b9HfEDK#F5@vl*uP_#+4CQjg7=#Xwds z^jP<;Lj*#8(9U9E=w|Gi6wE(OWoEmJ#k=B7<2K2mJ2Mu~V$nWG6zg!I<;wE|Nxrw} z8+?*Vd&y2P%KwnPMC?)o@fILlPNRN}U!iS`*}}igrRLDSZ|SxH45{Ev11D)`to3Fh zcSKa~bvTBIBQgfi`!DjWw-=-R&xDK;#3}C`!+)vw$G#7;&!TCkhS=3T=P?-;mFx*N z$r(7Teh4w9^!R3&sHsux5ncY2{3wB`r>5|$Ma`oWA!00E$|?asrq& zy{hCQD|mdiuuJVyC@oHS=N3-+8jm0<=z{@9q-+ThydCTC{k154WW#5M+}XLGLcj$$ z(lD1Q8JBi36uZI!Laq@BWQq{zVyI?dR_L!g$ei2FFv$-d zMwdeQ;E>zn2r5EK2>O|*@x{XMXnaRmxjEhXvNYFM3B4+6di%uY)|b{fDv)SX`D0qH z5iqL*^v%UkXERu#kn6zL7Mo$+ayRMeFH&$sl|1GE4y-eHe)*0kaJy?(W@5tC!%F>H z;wvUEAFh=W2CUveHULA|3HLDmx?s$$xnsk2G1M+V>N4Zlh0r`RC;ed;Gwk$mt)7cZj1}=|M-!&Ws*ZoF&^yaliC%^0CRRCf`Kx| zzl!sa#5xe;K}m`;yjObV^$)rTUMR%|qe6e)w)ws=PUWRkc6nj_4!?56hXDA^wV&ec zkc;7;yl@^+GH# zd+2)tw5X1XYwZwCR|@PT0l(v z4VoFtemt^6T&g$q=N<`K4{Gj9DiG{LV1}hmGUa>g9(U8P>tb#;6Yo4E9-{;?$q7B9 zj$I?%ylt(-|Kf^SA4j0F(({N?Hn0R?2}0}=daxMQR3^fc;nnWSRDIEhmhbAvvRseL zRP5H7S>E9dmF8O#0q3U2)A7COsJQb`Wei_=`;7q<6ma*gd(=%Vo?w@d3t$l!tJDl5 z#?xuS5V}OmJdAJ*`(~)^r7TIuNLD(=_7&CIk*e|RG$@UZk<-#OwL9P{rY{1Nf}=L3 zbb>^wFx;!&Z)c#^lb?xsy`V#V(8X@IpnDB_k2uOMN4R?b2b=hiJpT`Zg+3$+nps>S zgEts;q2B<)4Y24s^9p*uNOgfnFk)xd%(`my1)ni(yc-MfbptqokE!|X|DlQUgUwsx z&YFqGUa?3qGoB5K1=wm!2RuJvs??g;LKVgXbHz{3G5`Q!rU7Jx)UQC8k)`)5a4J1# zy;d-4)znuyw4{{x9{HIfn;DSub@x^5FF}X7aNjl6w`eZ?|k2|NK%(!bf8Z4;hkeJiu@D( zy^szM0x$B?J79^|#RzIx=EcGR_5ds#gaWh7fa2NBaw(-qlzKPRE}67#DGc66;%MPnwoEXy6!{^-g8sYJKHp z4)%)+HDxv^ucdneDHhM*!}2jJ$&OTy$k0MRBn180G#Vq=9J|LCT}4@;BeiS+x9ipB=j0tUDxgibb69W~Ixv3O+=3y(SO-z( zVmFLS^_>!1`m-nVOLyqu7|^d@00!&ZE#Kk3xUd&vgXX)s#{)5e&Rewha{TriMAiIM zl&IETzq8oBu4m4<4V3Q<2w?2ovu$<&j8puwuK>UromybJVp7~*FotRztg3EDT@(jv zfY|u|qEZ+Gn+L46$F^ItwpAjq6>_`B6F}RZH}ttc8R-H*LmL(^&~(#)z$>Bb4%qm* zIsP{A8v^5=2tx#e7XeVF2Yq#qqWj0|FXsdV1zU@1%h5tSPn&W^JM~OfU*PcHjlb_$>APRe*Bz z6)ot@5W6;a!ztl)gUtm~-8680k5iN5FIO^-WYQ?!dtR>$L z(XD&({Ek0b+eCmW_)2`M2khdiQiKxXxAW+wA0?>l@ZjYoy!KUsgO3I&bEzPn6DW_O zq+Z)@=~RKW6f;d?3^ueWOb9R=p-+7JfZ1FCGhp@rFoVyY5G}I;2zqtpng);^T>1E} z@WQS6QUK2->Cq8uhN=9d>%N>FUNKthG6i7%8+ShoVLx}-4UwWv9Nli4xa>N=df{+(N{&G0jTC|!_yZOsF5&+fh?qhF3q#la_#&e$mS+J~Dq4@RxrtxmEDaz8 z7d=EI*n43S7Rx}@<3IZn)_gDNpT!16g(@Wg4SYaUa2S3$G%0fvBTUe} z5WfO@eQb9H*4jY~fsJIdWhuY_d4D&x-m4Z2$J-MGqx47gmY(BY+SrvV!KKcAEBkevVXyf^?&9zvE|73LJe3TH6$K?h9A(tMO;J+j_ zzYwB4-*3Ii!dK-Vn+?|=O+jA0Bi4EbDbyP4%;MC*+N2{q~B0*nL9W4H$e0pg+VU-MV&!VmbDgzQB9TPyp z0{hgkNv0;>3cE>;62#*4$=(8A@f(H{VIPB(9Z*1?d9wnn5R5XuXMC3kmPz+xJRD$T z3dql&E{PFeH<_|7&o zlg~gZaK~(tJh1f=9}E!}_;882EfY7??mc3?3#gX0;YEhkJh;)}VP@G(Y?-^p3Gxes zr-l@m;yy0CS0Zh!!ynTaBevZl4XNq0z{Wav)$e3%U@^t{MIZ{-zfk|Kc6l{LN`juQ z<|u;&qmsSqBpEK*i5ZO|N|6v~4**E-aD|)vhb)U-(^mPu9R}>75^Fi@FZ$CZ`*$54 z?scX)$}?^oZiQoG-kd09)~$3oV7db$ zp~Y-mKyM|0+kRatCD3A;lc6NEiyhFlc7c5Ns(Y5^Cybh5mjF)*VJzF)x&g;Tnj=kf zF4nKL)rr7P3h4CcyUGnULgC*cV!i6rB04GzezzChIMNrqQ$qdK!n~Tj?EXLV2c%V= zf*pmFa7!-c5=v0SnsdHxtaDSbwV?!wY3`%(*Pfy)PP8d!-nqyedRH~ZM$ zw*`rdAW3yGqw_~{h|4$w2%kQmUDE)U5)gp)q)9#)8UQ}_W(%AzY6Ix_+R~Bl&L|B_ ztw}RkC0?!@;x=#-M^A+Zv)JAwTKFexCg1-&>-}Lf=xY-SG~Wr##C&XF$|o0_zO6sE zPZra)Apm%Ox!^;YE{Cg$$`x-LA<Qtw&54Tx7j|04P1GQ_q84C%}s ziP;)5u+VgJ>`D}}b8>QW9Q|UMx9o}+BK*^E?7Uvk)1%eM`+j_oYUTHDTlGXin@p9+ zV#%PZXHu=keertM$2R!@l1;;hwm*`pA`hR64052aw7|x5 z11xvaue74hj)O;{+F;=zajHgHVZq%A*^Y5b(y7;d$ujs;pQM`kbi-67-;a8TR5Dqk@r8;pwa!CUqbge!9<~A!C zMu!126ix~Guh>cl4CewilT01HA&MxQVGg+<#<`1j080R10TT4qD3yu?J^mOX#Pk1; zG2Q|q5~44oN*E}B^>}5zeKSYYZt?E=O}H0>hHFB$`X|;JcnjJ42Ah=fkK0tBOL}+c z@bl{%ua!!HpVt6w68w6ZmG2)4bqBbx6X%OWF*<%BFu!hW) z$?Gfs=I1pL(BMKH6-D%KM zL;EfA)ee=Xmfwd-;ovQu2c;b>bISf+JKdEn3gLzk3?iX7Z;vL56{HVZgI|{Vp3DZZ zCipYv?@Yu3y|kigzs_hqnM0tp6g| zJ$vgqnuSa2wgt^RkAXhR&(ED%v&cj4$~3W$iu?<4_Ojl`*^Newj`tD(d&P0RDZsMB zEzyj>ULqV1dL;iS?_@XJYNN3zKkP4Xnf_pjZ{QOlq&SII{!Q@9V6-us!S@Rig>_|4J8 zj`c_ea2fgHj~8NrhXm{KN@LR%Z#fJXS`xFIoM*gQdbR%b>ff-ISw@!=hlNJ>(Hxn* zj)!>~GgMt!f4nz76%>SRRE!Wf9_=D2AvBvcwYBqJFW0duUzj{68#Fses z7li`YQiA_BFY6Z>w=^6LO=VbFg#X<+Qq_FJ@@L^N$-g;S<}_ebM}_HQf^@sph(!SC zc1en#oKsP3+G~p#P^7Cred{MKhJ7*MF= zAU@L;j8cV%*Pk6RI;Q)P+t&WB5t(qVEgEBwl^mX}l=9u^4_) zs~j@8MbCBj5UcIC5vAbIEOh;oIQ`5$!3B=VfM3Dxx9FAq!&#qQCD@Rs_nx|U6(^nSsl=ZwPKtXjEo{`O}fzcSdeO@Hoa zvGA?0G4ATu7A}y5_AU?`JVDwoF7fcbEkjMk>>J`wiJq8 zs#$Z+m)cUSx!GsVGn4n{C+fU+_|ngI zo$26$1D;+oTOuV){1y#$rGFY^gS+T=P$$yHs&p+_V!rhAlM$-`>AgV_kB9!}$6>s^ z(tqJouKse_>6_&zPg~oB8l`I8epl0P^M4C?!ZK!)cy1QrZ=F|aSnuOu1N?EM-y`Ck zi#gAK6WQ2)a0~k-VLZ)q+T^e=(%+oMC1Z$CbRLb+(_TEDIak%>r&W($<%av5NtZCO zg593fr+;wYb`fu34Mj&s(VaVWGV*oq#u|6gCxace{{O0ROzFIm(<>-N>vKj6b)ms|91)c$_cK5M(97K!FQHkvK> z1Pi~-I;t3a=wcMfGW+ynruP0t_bbx8728Avw28bY^9Wo}zUoR{lEK;0@icyAWO1;A z;O2|{vE@%{FKW|u0e|+0M{Zs16HpL->MD1~ytLg^N^7P%y??v&OQLElSTRq_bH>kt z)A(CV;ghaEDtuG$=(E+F^diNA$9(jro69KDGr;eG5kx=ROkPvRE73(OJdFv|*ZJ*oYcU<;=;_q+#J)KtS`(ZrV7b3Vk2KPD4)ieu(rMJThEh+>-~`x~~Fl;v&UkHv+s+`S+V>+g>ZD%!Z-J%by_nCfKbfB;2dvCD%m|Lge4=Ni^rOzr7 zu0O%Jbl^0kg84$gS(JoKFM zKc33tQ(yg~@uI+doiAA?Dp8r!JSH-BgL}d7kyCv0OqQH>+uhbJx~Asv)*BC|UYPiv z0BuT?AniKWWi&e_NIVaozL2BBd}wFJ)>kGv@1-F7X%S6l({@9Z{l1Jcf8l5FR6jFm zh#z0=D*v7Ugl}6${)cHe6BWq8ujOyMEOfti16MmITh=h{{lpAb59qo%X`fFLEXZ$r&4-B13+VGoFAw@dDg2AnI*P1vqbFl%1v*PnfU z-oNeaI^R}yqQ$3WeZsC)OfO_kt*y3l3#V3|fjt{GkQhc>qaghbJ*ZnsVRSz>h$+#K z{515+_qn>$&-{Yo$WuCLUs)s5sAIUv(iKDn_g>>la!NeV9Fx~X{6TVKcd;{u)1@U= zGTFbWYd-zHu&$%*t^#cL_Ee{3&}shCOL24a-1L)Sx|GSzeFB=Tm@}==4PxxQdZn|S zbnRB*AUIPCtMQP+=~{xx@7%|}8I6)0Sr%>`bgl&VPdSjPH~^%$I`)1KWRm`_nh$_u zl6-fXKtif5;$y-J{rTaMRG`?wqkSam)$aO0)y&B+gL5+`P|ZbH**{c1QI`G1?6V>S zKYtuPw-wa?%##F@7cleC)FPko6Po%Y?uH`0I%Ns1#_)b#%9JmC0>6=b zkh@GFtg39Pt$!kA&*F3^K8e8!p0@vi-c_Z^Zm9{{P5XOoD`oXo5v|t5!OvJ{*{(H< zj+6Tvuk{57Sf18szSeK+7x~yfu35VNcz$b2;z& zTH609vulLRFaU+~e&O>peWCt^jl!~?$kV;QjpytU0p6}!FMmYU5jHn*L@({Dl4Or6 zRC@K#cp9r}29%hh0kd8^Rq4(C%))(0<{~pZ4*dc>Il621diSma*LO;T)?h zAtT;95Z``vYlM=O`MDZc&U7$5FmdI8O2LMM?Sn;CXvb2j2Fp*lWctsox?R=9 z{8sgCVL0Yd*&j)1g%EwgehPR;qmo~qedE@|tj&?CQ_pnUg{9`@ZVdcV_938=&J-5v z>4;#65slswhFiuMADvm8x%v72RX-Y=&dhED;%3F}Dlr{((MkkKL*IdXxDF6E@Bfxn zrOj^C;~3pR|F%ZH;HK7ANWVoN3Z3+K;p(w89bMxiK|4=4sv9rE*l*6oFs5ZrT)Vr% z^&sVO-_Lta>;%6sLCk1wGb1Fk?`E{u&+)>4to1t23y{%xRuY#Bhg#o4iehki;yzy7 zM>+Gr76#{>_pI5Aa569i1apzs%|cbpQHW+_~){o+0* zg7u;XK-@g+Xnt(Zx69>rwDo>H`jfWM4KnY{Hfg;5gBq0S2NFt}F`1|lZQVJ}(= za}<*Z$0_H$RdbV!{b0kP{ZT_^{mja3Gzh7ef0>#T860#T13lzL+g%nPDP_YF%)clG zOsxJ+ko;Z6INxM!t@U>$$p0wSqhGbLNF&Y1biG7+e^iLsVt~R{#IC2Q3ZEBr6uy?i zJ}^l#51W*j8Y$h>dXI|%Sh_rXXZ@{sXO1zxisBG3F~zO^`Avwp*c#x zWkes*Xg)`x9=Xj^xFxPiN(lwbMrcT~&Kmg&QZ8a9W)F7oUdb5$Y*KXjFIFd|J1r{P z>@89N8YlOE4Hjcyd3sD0pp-+b|8yq9&J9(ibIiGPqQA;rm;U55aCz}81~6l`oZ(|t_}u42bE8+A^U^*9 z;G!>yt5@$MtEie`S{c>a5c(&Lvz4=<)kr!DX=>DXWe=A9kBPN+8_W zIxXiq>zOR%%e@wqCEKl4Um_FNTz0Q60b3998}7{XMIJ+Ca+ci&yu(3eukuD3ip5KR zB)ze40Zw1 z$8)g+Pd>1jyf`Mm`7#y;O#>k_CoG$nYc?IyWlleOQOm^=G)%eoUfmMtZZ^&S^SEI_ z5unl>+#|Zz_dKXeyk@ekFwRJ)WPNL$nGBh>2gdd4ThWbEPqWh>n;Tav^h#%HwzeTR zfm?ofV8tk6*Nb@jjfN~nDj20G@0ZCUjHufL7`h&aZGV!UBd~p! zAQ@^6wEKq|t%4dEWx2Jlq_2(n zc2D#kurw@i60EiWic-*rzPs$&JGi|`M1Kn zO5upiefQJ7%B&--@pF(rM#ddrp~DAqyFmveuM1_q_APwIG{6FAfDMg@+W@<6U)`7C zj`sgb>&M~WxZ7Va*Zd-E`EJe!qu2qYu!o?j!p`o^e+0&yTqw_cvB*n$@${-jCDXNNlPPSvMCnt{wUUcxP21 z-(#oWcP5Jg+YMI@n@`=}{ls*~z4i;h{bd{YaSrwA0w*hyxh({z;~tk>$7!N!a_N%) zuDPaF|9HFQ9?9J2^WJ{sQsJ!^+q`#u?fzFk_5#QBHaRx4X+CvV!!hy+8WG*knZnh3QF_fH_9^uu>*KBz zN^UtdG0yM8i-mWrau)F5(fOMtsP)*@)#01J->;v4(aTL(dTox3Ws%D3`}_BIJ_=OB zF_sD(V{J%fDs_5%X{q;3;KrErrBc8#$s}Nk*qp@ehHIo3D1Jr|IAw4D;{o$G;4tH? z&C`ND0e5HI>=0BAI`=>Tw`C5I93ic4QAa&iOUCAGSv4`tJc$|CP%xu_hI(T}M#;uW zhMnBn(vw~A#PL(b3u1dicO|`^*;5(u;mk1`CkxyOqE0Adwt0Ts(dDavgOPmsi@%*z hT)~ag%^m_j{$D>_p`_y4`IrF+JYD@<);T3K0RYmf`2PR^ diff --git a/research/tcn/generate_videos.py b/research/tcn/generate_videos.py deleted file mode 100644 index 2b2ecba84..000000000 --- a/research/tcn/generate_videos.py +++ /dev/null @@ -1,426 +0,0 @@ -# Copyright 2017 The TensorFlow Authors All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -r"""Generates imitation videos. - -Generate single pairwise imitation videos: -blaze build -c opt --config=cuda --copt=-mavx \ -learning/brain/research/tcn/generate_videos && \ -blaze-bin/learning/brain/research/tcn/generate_videos \ ---logtostderr \ ---config_paths $config_paths \ ---checkpointdir $checkpointdir \ ---checkpoint_iter $checkpoint_iter \ ---query_records_dir $query_records_dir \ ---target_records_dir $target_records_dir \ ---outdir $outdir \ ---mode single \ ---num_query_sequences 1 \ ---num_target_sequences -1 - -# Generate imitation videos with multiple sequences in the target set: -query_records_path -blaze build -c opt --config=cuda --copt=-mavx \ -learning/brain/research/tcn/generate_videos && \ -blaze-bin/learning/brain/research/tcn/generate_videos \ ---logtostderr \ ---config_paths $config_paths \ ---checkpointdir $checkpointdir \ ---checkpoint_iter $checkpoint_iter \ ---query_records_dir $query_records_dir \ ---target_records_dir $target_records_dir \ ---outdir $outdir \ ---num_multi_targets 1 \ -""" -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import cv2 -import tensorflow as tf -import os -import matplotlib -matplotlib.use("pdf") -import matplotlib.animation as animation -import matplotlib.pyplot as plt -import numpy as np -from estimators.get_estimator import get_estimator -from utils import util -tf.logging.set_verbosity(tf.logging.INFO) - -tf.flags.DEFINE_string( - 'config_paths', '', - """ - Path to a YAML configuration files defining FLAG values. Multiple files - can be separated by the `#` symbol. Files are merged recursively. Setting - a key in these files is equivalent to setting the FLAG value with - the same name. - """) -tf.flags.DEFINE_string( - 'model_params', '{}', 'YAML configuration string for the model parameters.') -tf.app.flags.DEFINE_string( - 'checkpointdir', '/tmp/tcn', 'Path to model checkpoints.') -tf.app.flags.DEFINE_string( - 'checkpoint_iter', '', 'Checkpoint iter to use.') -tf.app.flags.DEFINE_integer( - 'num_multi_targets', -1, - 'Number of imitation vids in the target set per imitation video.') -tf.app.flags.DEFINE_string( - 'outdir', '/tmp/tcn', 'Path to write embeddings to.') -tf.app.flags.DEFINE_string( - 'mode', 'single', 'single | multi. Single means generate imitation vids' - 'where query is being imitated by single sequence. Multi' - 'means generate imitation vids where query is being' - 'imitated by multiple.') -tf.app.flags.DEFINE_string('query_records_dir', '', - 'Directory of image tfrecords.') -tf.app.flags.DEFINE_string('target_records_dir', '', - 'Directory of image tfrecords.') -tf.app.flags.DEFINE_integer('query_view', 1, - 'Viewpoint of the query video.') -tf.app.flags.DEFINE_integer('target_view', 0, - 'Viewpoint of the imitation video.') -tf.app.flags.DEFINE_integer('smoothing_window', 5, - 'Number of frames to smooth over.') -tf.app.flags.DEFINE_integer('num_query_sequences', -1, - 'Number of query sequences to embed.') -tf.app.flags.DEFINE_integer('num_target_sequences', -1, - 'Number of target sequences to embed.') -FLAGS = tf.app.flags.FLAGS - - -def SmoothEmbeddings(embs): - """Temporally smoothes a sequence of embeddings.""" - new_embs = [] - window = int(FLAGS.smoothing_window) - for i in range(len(embs)): - min_i = max(i-window, 0) - max_i = min(i+window, len(embs)) - new_embs.append(np.mean(embs[min_i:max_i, :], axis=0)) - return np.array(new_embs) - - -def MakeImitationVideo( - outdir, vidname, query_im_strs, knn_im_strs, height=640, width=360): - """Creates a KNN imitation video. - - For each frame in vid0, pair with the frame at index in knn_indices in - vids1. Write video to disk. - - Args: - outdir: String, directory to write videos. - vidname: String, name of video. - query_im_strs: Numpy array holding query image strings. - knn_im_strs: Numpy array holding knn image strings. - height: Int, height of raw images. - width: Int, width of raw images. - """ - if not tf.gfile.Exists(outdir): - tf.gfile.MakeDirs(outdir) - vid_path = os.path.join(outdir, vidname) - combined = zip(query_im_strs, knn_im_strs) - - # Create and write the video. - fig = plt.figure() - ax = fig.add_subplot(111) - ax.set_aspect('equal') - ax.get_xaxis().set_visible(False) - ax.get_yaxis().set_visible(False) - im = ax.imshow( - np.zeros((height, width*2, 3)), cmap='gray', interpolation='nearest') - im.set_clim([0, 1]) - plt.tight_layout(pad=0, w_pad=0, h_pad=0) - # pylint: disable=invalid-name - def update_img(pair): - """Decode pairs of image strings, update a video.""" - im_i, im_j = pair - nparr_i = np.fromstring(str(im_i), np.uint8) - img_np_i = cv2.imdecode(nparr_i, 1) - img_np_i = img_np_i[..., [2, 1, 0]] - nparr_j = np.fromstring(str(im_j), np.uint8) - img_np_j = cv2.imdecode(nparr_j, 1) - img_np_j = img_np_j[..., [2, 1, 0]] - - # Optionally reshape the images to be same size. - frame = np.concatenate([img_np_i, img_np_j], axis=1) - im.set_data(frame) - return im - ani = animation.FuncAnimation(fig, update_img, combined, interval=15) - writer = animation.writers['ffmpeg'](fps=15) - dpi = 100 - tf.logging.info('Writing video to:\n %s \n' % vid_path) - ani.save('%s.mp4' % vid_path, writer=writer, dpi=dpi) - - -def GenerateImitationVideo( - vid_name, query_ims, query_embs, target_ims, target_embs, height, width): - """Generates a single cross-sequence imitation video. - - For each frame in some query sequence, find the nearest neighbor from - some target sequence in embedding space. - - Args: - vid_name: String, the name of the video. - query_ims: Numpy array of shape [query sequence length, height, width, 3]. - query_embs: Numpy array of shape [query sequence length, embedding size]. - target_ims: Numpy array of shape [target sequence length, height, width, - 3]. - target_embs: Numpy array of shape [target sequence length, embedding - size]. - height: Int, height of the raw image. - width: Int, width of the raw image. - """ - # For each query frame, find the index of the nearest neighbor in the - # target video. - knn_indices = [util.KNNIds(q, target_embs, k=1)[0] for q in query_embs] - - # Create and write out the video. - assert knn_indices - knn_ims = np.array([target_ims[k] for k in knn_indices]) - MakeImitationVideo(FLAGS.outdir, vid_name, query_ims, knn_ims, height, width) - - -def SingleImitationVideos( - query_records, target_records, config, height, width): - """Generates pairwise imitation videos. - - This creates all pairs of target imitating query videos, where each frame - on the left is matched to a nearest neighbor coming a single - embedded target video. - - Args: - query_records: List of Strings, paths to tfrecord datasets to use as - queries. - target_records: List of Strings, paths to tfrecord datasets to use as - targets. - config: A T object describing training config. - height: Int, height of the raw image. - width: Int, width of the raw image. - """ - # Embed query and target data. - (query_sequences_to_data, - target_sequences_to_data) = EmbedQueryTargetData( - query_records, target_records, config) - - qview = FLAGS.query_view - tview = FLAGS.target_view - - # Loop over query videos. - for task_i, data_i in query_sequences_to_data.iteritems(): - for task_j, data_j in target_sequences_to_data.iteritems(): - i_ims = data_i['images'] - i_embs = data_i['embeddings'] - query_embs = SmoothEmbeddings(i_embs[qview]) - query_ims = i_ims[qview] - - j_ims = data_j['images'] - j_embs = data_j['embeddings'] - target_embs = SmoothEmbeddings(j_embs[tview]) - target_ims = j_ims[tview] - - tf.logging.info('Generating %s imitating %s video.' % (task_j, task_i)) - vid_name = 'q%sv%s_im%sv%s' % (task_i, qview, task_j, tview) - vid_name = vid_name.replace('/', '_') - GenerateImitationVideo(vid_name, query_ims, query_embs, - target_ims, target_embs, height, width) - - -def MultiImitationVideos( - query_records, target_records, config, height, width): - """Creates multi-imitation videos. - - This creates videos where every frame on the left is matched to a nearest - neighbor coming from a set of multiple embedded target videos. - - Args: - query_records: List of Strings, paths to tfrecord datasets to use as - queries. - target_records: List of Strings, paths to tfrecord datasets to use as - targets. - config: A T object describing training config. - height: Int, height of the raw image. - width: Int, width of the raw image. - """ - # Embed query and target data. - (query_sequences_to_data, - target_sequences_to_data) = EmbedQueryTargetData( - query_records, target_records, config) - - qview = FLAGS.query_view - tview = FLAGS.target_view - - # Loop over query videos. - for task_i, data_i in query_sequences_to_data.iteritems(): - i_ims = data_i['images'] - i_embs = data_i['embeddings'] - query_embs = SmoothEmbeddings(i_embs[qview]) - query_ims = i_ims[qview] - - all_target_embs = [] - all_target_ims = [] - - # If num_imitation_vids is -1, add all seq embeddings to the target set. - if FLAGS.num_multi_targets == -1: - num_multi_targets = len(target_sequences_to_data) - else: - # Else, add some specified number of seq embeddings to the target set. - num_multi_targets = FLAGS.num_multi_targets - for j in range(num_multi_targets): - task_j = target_sequences_to_data.keys()[j] - data_j = target_sequences_to_data[task_j] - print('Adding %s to target set' % task_j) - j_ims = data_j['images'] - j_embs = data_j['embeddings'] - - target_embs = SmoothEmbeddings(j_embs[tview]) - target_ims = j_ims[tview] - all_target_embs.extend(target_embs) - all_target_ims.extend(target_ims) - - # Generate a "j imitating i" video. - tf.logging.info('Generating all imitating %s video.' % task_i) - vid_name = 'q%sv%s_multiv%s' % (task_i, qview, tview) - vid_name = vid_name.replace('/', '_') - GenerateImitationVideo(vid_name, query_ims, query_embs, - all_target_ims, all_target_embs, height, width) - - -def SameSequenceVideos(query_records, config, height, width): - """Generate same sequence, cross-view imitation videos.""" - batch_size = config.data.embed_batch_size - - # Choose an estimator based on training strategy. - estimator = get_estimator(config, FLAGS.checkpointdir) - - # Choose a checkpoint path to restore. - checkpointdir = FLAGS.checkpointdir - checkpoint_path = os.path.join(checkpointdir, - 'model.ckpt-%s' % FLAGS.checkpoint_iter) - - # Embed num_sequences query sequences, store embeddings and image strings in - # query_sequences_to_data. - sequences_to_data = {} - for (view_embeddings, view_raw_image_strings, seqname) in estimator.inference( - query_records, checkpoint_path, batch_size, - num_sequences=FLAGS.num_query_sequences): - sequences_to_data[seqname] = { - 'embeddings': view_embeddings, - 'images': view_raw_image_strings, - } - - # Loop over query videos. - qview = FLAGS.query_view - tview = FLAGS.target_view - for task_i, data_i in sequences_to_data.iteritems(): - ims = data_i['images'] - embs = data_i['embeddings'] - query_embs = SmoothEmbeddings(embs[qview]) - query_ims = ims[qview] - - target_embs = SmoothEmbeddings(embs[tview]) - target_ims = ims[tview] - - tf.logging.info('Generating %s imitating %s video.' % (task_i, task_i)) - vid_name = 'q%sv%s_im%sv%s' % (task_i, qview, task_i, tview) - vid_name = vid_name.replace('/', '_') - GenerateImitationVideo(vid_name, query_ims, query_embs, - target_ims, target_embs, height, width) - - -def EmbedQueryTargetData(query_records, target_records, config): - """Embeds the full set of query_records and target_records. - - Args: - query_records: List of Strings, paths to tfrecord datasets to use as - queries. - target_records: List of Strings, paths to tfrecord datasets to use as - targets. - config: A T object describing training config. - - Returns: - query_sequences_to_data: A dict holding 'embeddings' and 'images' - target_sequences_to_data: A dict holding 'embeddings' and 'images' - """ - batch_size = config.data.embed_batch_size - - # Choose an estimator based on training strategy. - estimator = get_estimator(config, FLAGS.checkpointdir) - - # Choose a checkpoint path to restore. - checkpointdir = FLAGS.checkpointdir - checkpoint_path = os.path.join(checkpointdir, - 'model.ckpt-%s' % FLAGS.checkpoint_iter) - - # Embed num_sequences query sequences, store embeddings and image strings in - # query_sequences_to_data. - num_query_sequences = FLAGS.num_query_sequences - num_target_sequences = FLAGS.num_target_sequences - query_sequences_to_data = {} - for (view_embeddings, view_raw_image_strings, seqname) in estimator.inference( - query_records, checkpoint_path, batch_size, - num_sequences=num_query_sequences): - query_sequences_to_data[seqname] = { - 'embeddings': view_embeddings, - 'images': view_raw_image_strings, - } - - if (query_records == target_records) and ( - num_query_sequences == num_target_sequences): - target_sequences_to_data = query_sequences_to_data - else: - # Embed num_sequences target sequences, store embeddings and image strings - # in sequences_to_data. - target_sequences_to_data = {} - for (view_embeddings, view_raw_image_strings, - seqname) in estimator.inference( - target_records, checkpoint_path, batch_size, - num_sequences=num_target_sequences): - target_sequences_to_data[seqname] = { - 'embeddings': view_embeddings, - 'images': view_raw_image_strings, - } - return query_sequences_to_data, target_sequences_to_data - - -def main(_): - # Parse config dict from yaml config files / command line flags. - config = util.ParseConfigsToLuaTable(FLAGS.config_paths, FLAGS.model_params) - - # Get tables to embed. - query_records_dir = FLAGS.query_records_dir - query_records = util.GetFilesRecursively(query_records_dir) - - target_records_dir = FLAGS.target_records_dir - target_records = util.GetFilesRecursively(target_records_dir) - - height = config.data.raw_height - width = config.data.raw_width - mode = FLAGS.mode - if mode == 'multi': - # Generate videos where target set is composed of multiple videos. - MultiImitationVideos(query_records, target_records, config, - height, width) - elif mode == 'single': - # Generate videos where target set is a single video. - SingleImitationVideos(query_records, target_records, config, - height, width) - elif mode == 'same': - # Generate videos where target set is the same as query, but diff view. - SameSequenceVideos(query_records, config, height, width) - else: - raise ValueError('Unknown mode %s' % mode) - -if __name__ == '__main__': - tf.app.run() diff --git a/research/tcn/labeled_eval.py b/research/tcn/labeled_eval.py deleted file mode 100644 index a28856a13..000000000 --- a/research/tcn/labeled_eval.py +++ /dev/null @@ -1,309 +0,0 @@ -# Copyright 2017 The TensorFlow Authors All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -"""Generates test Recall@K statistics on labeled classification problems.""" -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -from collections import defaultdict -import os -import numpy as np -from sklearn.metrics.pairwise import pairwise_distances -from six.moves import xrange -import data_providers -from estimators.get_estimator import get_estimator -from utils import util -import tensorflow as tf -tf.logging.set_verbosity(tf.logging.INFO) - - -tf.flags.DEFINE_string( - 'config_paths', '', - """ - Path to a YAML configuration files defining FLAG values. Multiple files - can be separated by the `#` symbol. Files are merged recursively. Setting - a key in these files is equivalent to setting the FLAG value with - the same name. - """) -tf.flags.DEFINE_string( - 'model_params', '{}', 'YAML configuration string for the model parameters.') -tf.app.flags.DEFINE_string( - 'mode', 'validation', - 'Which dataset to evaluate: `validation` | `test`.') -tf.app.flags.DEFINE_string('master', 'local', - 'BNS name of the TensorFlow master to use') -tf.app.flags.DEFINE_string( - 'checkpoint_iter', '', 'Evaluate this specific checkpoint.') -tf.app.flags.DEFINE_string( - 'checkpointdir', '/tmp/tcn', 'Path to model checkpoints.') -tf.app.flags.DEFINE_string('outdir', '/tmp/tcn', 'Path to write summaries to.') -FLAGS = tf.app.flags.FLAGS - - -def nearest_cross_sequence_neighbors(data, tasks, n_neighbors=1): - """Computes the n_neighbors nearest neighbors for every row in data. - - Args: - data: A np.float32 array of shape [num_data, embedding size] holding - an embedded validation / test dataset. - tasks: A list of strings of size [num_data] holding the task or sequence - name that each row belongs to. - n_neighbors: The number of knn indices to return for each row. - Returns: - indices: an np.int32 array of size [num_data, n_neighbors] holding the - n_neighbors nearest indices for every row in data. These are - restricted to be from different named sequences (as defined in `tasks`). - """ - - # Compute the pairwise sequence adjacency matrix from `tasks`. - num_data = data.shape[0] - tasks = np.array(tasks) - tasks = np.reshape(tasks, (num_data, 1)) - assert len(tasks.shape) == 2 - not_adjacent = (tasks != tasks.T) - - # Compute the symmetric pairwise distance matrix. - pdist = pairwise_distances(data, metric='sqeuclidean') - - # For every row in the pairwise distance matrix, only consider - # cross-sequence columns. - indices = np.zeros((num_data, n_neighbors), dtype=np.int32) - for idx in range(num_data): - # Restrict to cross_sequence neighbors. - distances = [( - pdist[idx][i], i) for i in xrange(num_data) if not_adjacent[idx][i]] - _, nearest_indices = zip(*sorted( - distances, key=lambda x: x[0])[:n_neighbors]) - indices[idx] = nearest_indices - return indices - - -def compute_cross_sequence_recall_at_k(retrieved_labels, labels, k_list): - """Compute recall@k for a given list of k values. - - Recall is one if an example of the same class is retrieved among the - top k nearest neighbors given a query example and zero otherwise. - Counting the recall for all examples and averaging the counts returns - recall@k score. - - Args: - retrieved_labels: 2-D Numpy array of KNN labels for every embedding. - labels: 1-D Numpy array of shape [number of data]. - k_list: List of k values to evaluate recall@k. - - Returns: - recall_list: List of recall@k values. - """ - kvalue_to_recall = dict(zip(k_list, np.zeros(len(k_list)))) - - # For each value of K. - for k in k_list: - matches = defaultdict(float) - counts = defaultdict(float) - # For each (row index, label value) in the query labels. - for i, label_value in enumerate(labels): - # Loop over the K nearest retrieved labels. - if label_value in retrieved_labels[i][:k]: - matches[label_value] += 1. - # Increment the denominator. - counts[label_value] += 1. - kvalue_to_recall[k] = np.mean( - [matches[l]/counts[l] for l in matches]) - return [kvalue_to_recall[i] for i in k_list] - - -def compute_cross_sequence_recalls_at_k( - embeddings, labels, label_attr_keys, tasks, k_list, summary_writer, - training_step): - """Computes and reports the recall@k for each classification problem. - - This takes an embedding matrix and an array of multiclass labels - with size [num_data, number of classification problems], then - computes the average recall@k for each classification problem - as well as the average across problems. - - Args: - embeddings: A np.float32 array of size [num_data, embedding_size] - representing the embedded validation or test dataset. - labels: A np.int32 array of size [num_data, num_classification_problems] - holding multiclass labels for each embedding for each problem. - label_attr_keys: List of strings, holds the names of the classification - problems. - tasks: A list of strings describing the video sequence each row - belongs to. This is used to restrict the recall@k computation - to cross-sequence examples. - k_list: A list of ints, the k values to evaluate recall@k. - summary_writer: A tf.summary.FileWriter. - training_step: Int, the current training step we're evaluating. - """ - num_data = float(embeddings.shape[0]) - assert labels.shape[0] == num_data - - # Compute knn indices. - indices = nearest_cross_sequence_neighbors( - embeddings, tasks, n_neighbors=max(k_list)) - retrieved_labels = labels[indices] - - # Compute the recall@k for each classification problem. - recall_lists = [] - for idx, label_attr in enumerate(label_attr_keys): - problem_labels = labels[:, idx] - # Take all indices, all k labels for the problem indexed by idx. - problem_retrieved = retrieved_labels[:, :, idx] - recall_list = compute_cross_sequence_recall_at_k( - retrieved_labels=problem_retrieved, - labels=problem_labels, - k_list=k_list) - recall_lists.append(recall_list) - for (k, recall) in zip(k_list, recall_list): - recall_error = 1-recall - summ = tf.Summary(value=[tf.Summary.Value( - tag='validation/classification/%s error@top%d' % ( - label_attr, k), - simple_value=recall_error)]) - print('%s recall@K=%d' % (label_attr, k), recall_error) - summary_writer.add_summary(summ, int(training_step)) - - # Report an average recall@k across problems. - recall_lists = np.array(recall_lists) - for i in range(recall_lists.shape[1]): - average_recall = np.mean(recall_lists[:, i]) - recall_error = 1 - average_recall - summ = tf.Summary(value=[tf.Summary.Value( - tag='validation/classification/average error@top%d' % k_list[i], - simple_value=recall_error)]) - print('Average recall@K=%d' % k_list[i], recall_error) - summary_writer.add_summary(summ, int(training_step)) - - -def evaluate_once( - estimator, input_fn_by_view, batch_size, checkpoint_path, - label_attr_keys, embedding_size, num_views, k_list): - """Compute the recall@k for a given checkpoint path. - - Args: - estimator: an `Estimator` object to evaluate. - input_fn_by_view: An input_fn to an `Estimator's` predict method. Takes - a view index and returns a dict holding ops for getting raw images for - the view. - batch_size: Int, size of the labeled eval batch. - checkpoint_path: String, path to the specific checkpoint being evaluated. - label_attr_keys: A list of Strings, holding each attribute name. - embedding_size: Int, the size of the embedding. - num_views: Int, number of views in the dataset. - k_list: List of ints, list of K values to compute recall at K for. - """ - feat_matrix = np.zeros((0, embedding_size)) - label_vect = np.zeros((0, len(label_attr_keys))) - tasks = [] - eval_tensor_keys = ['embeddings', 'tasks', 'classification_labels'] - - # Iterate all views in the dataset. - for view_index in range(num_views): - # Set up a graph for embedding entire dataset. - predictions = estimator.inference( - input_fn_by_view(view_index), checkpoint_path, - batch_size, predict_keys=eval_tensor_keys) - - # Enumerate predictions. - for i, p in enumerate(predictions): - if i % 100 == 0: - tf.logging.info('Embedded %d images for view %d' % (i, view_index)) - - label = p['classification_labels'] - task = p['tasks'] - embedding = p['embeddings'] - - # Collect (embedding, label, task) data. - feat_matrix = np.append(feat_matrix, [embedding], axis=0) - label_vect = np.append(label_vect, [label], axis=0) - tasks.append(task) - - # Compute recall statistics. - ckpt_step = int(checkpoint_path.split('-')[-1]) - summary_dir = os.path.join(FLAGS.outdir, 'labeled_eval_summaries') - summary_writer = tf.summary.FileWriter(summary_dir) - compute_cross_sequence_recalls_at_k( - feat_matrix, label_vect, label_attr_keys, tasks, k_list, - summary_writer, ckpt_step) - - -def get_labeled_tables(config): - """Gets either labeled test or validation tables, based on flags.""" - # Get a list of filenames corresponding to labeled data. - mode = FLAGS.mode - if mode == 'validation': - labeled_tables = util.GetFilesRecursively(config.data.labeled.validation) - elif mode == 'test': - labeled_tables = util.GetFilesRecursively(config.data.labeled.test) - else: - raise ValueError('Unknown dataset: %s' % mode) - return labeled_tables - - -def main(_): - """Runs main labeled eval loop.""" - # Parse config dict from yaml config files / command line flags. - config = util.ParseConfigsToLuaTable(FLAGS.config_paths, FLAGS.model_params) - - # Choose an estimator based on training strategy. - checkpointdir = FLAGS.checkpointdir - estimator = get_estimator(config, checkpointdir) - - # Get data configs. - image_attr_keys = config.data.labeled.image_attr_keys - label_attr_keys = config.data.labeled.label_attr_keys - embedding_size = config.embedding_size - num_views = config.data.num_views - k_list = config.val.recall_at_k_list - batch_size = config.data.batch_size - - # Get either labeled validation or test tables. - labeled_tables = get_labeled_tables(config) - - def input_fn_by_view(view_index): - """Returns an input_fn for use with a tf.Estimator by view.""" - def input_fn(): - # Get raw labeled images. - (preprocessed_images, labels, - tasks) = data_providers.labeled_data_provider( - labeled_tables, - estimator.preprocess_data, view_index, image_attr_keys, - label_attr_keys, batch_size=batch_size) - return { - 'batch_preprocessed': preprocessed_images, - 'tasks': tasks, - 'classification_labels': labels, - }, None - return input_fn - - # If evaluating a specific checkpoint, do that. - if FLAGS.checkpoint_iter: - checkpoint_path = os.path.join( - '%s/model.ckpt-%s' % (checkpointdir, FLAGS.checkpoint_iter)) - evaluate_once( - estimator, input_fn_by_view, batch_size, checkpoint_path, - label_attr_keys, embedding_size, num_views, k_list) - else: - for checkpoint_path in tf.contrib.training.checkpoints_iterator( - checkpointdir): - evaluate_once( - estimator, input_fn_by_view, batch_size, checkpoint_path, - label_attr_keys, embedding_size, num_views, k_list) - - -if __name__ == '__main__': - tf.app.run() diff --git a/research/tcn/labeled_eval_test.py b/research/tcn/labeled_eval_test.py deleted file mode 100644 index e586e2181..000000000 --- a/research/tcn/labeled_eval_test.py +++ /dev/null @@ -1,86 +0,0 @@ -# Copyright 2017 The TensorFlow Authors All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -"""Tests for tcn.labeled_eval.""" -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import numpy as np -import labeled_eval -import tensorflow as tf - - -class LabeledEvalTest(tf.test.TestCase): - - def testNearestCrossSequenceNeighbors(self): - # Generate embeddings. - num_data = 64 - embedding_size = 4 - num_tasks = 8 - n_neighbors = 2 - data = np.random.randn(num_data, embedding_size) - tasks = np.repeat(range(num_tasks), num_data // num_tasks) - - # Get nearest cross-sequence indices. - indices = labeled_eval.nearest_cross_sequence_neighbors( - data, tasks, n_neighbors=n_neighbors) - - # Assert that no nearest neighbor indices come from the same task. - repeated_tasks = np.tile(np.reshape(tasks, (num_data, 1)), n_neighbors) - self.assertTrue(np.all(np.not_equal(repeated_tasks, tasks[indices]))) - - def testPerfectCrossSequenceRecall(self): - # Make sure cross-sequence recall@k returns 1.0 for near-duplicate features. - embeddings = np.random.randn(10, 2) - embeddings[5:, :] = 0.00001 + embeddings[:5, :] - tasks = np.repeat([0, 1], 5) - labels = np.array([0, 1, 2, 3, 4, 0, 1, 2, 3, 4]) - # find k=1, k=2 nearest neighbors. - k_list = [1, 2] - - # Compute knn indices. - indices = labeled_eval.nearest_cross_sequence_neighbors( - embeddings, tasks, n_neighbors=max(k_list)) - retrieved_labels = labels[indices] - recall_list = labeled_eval.compute_cross_sequence_recall_at_k( - retrieved_labels=retrieved_labels, - labels=labels, - k_list=k_list) - self.assertTrue(np.allclose( - np.array(recall_list), np.array([1.0, 1.0]))) - - def testRelativeRecall(self): - # Make sure cross-sequence recall@k is strictly non-decreasing over k. - num_data = 100 - num_tasks = 10 - embeddings = np.random.randn(100, 5) - tasks = np.repeat(range(num_tasks), num_data // num_tasks) - labels = np.random.randint(0, 5, 100) - - k_list = [1, 2, 4, 8, 16, 32, 64] - indices = labeled_eval.nearest_cross_sequence_neighbors( - embeddings, tasks, n_neighbors=max(k_list)) - retrieved_labels = labels[indices] - recall_list = labeled_eval.compute_cross_sequence_recall_at_k( - retrieved_labels=retrieved_labels, - labels=labels, - k_list=k_list) - recall_list_sorted = sorted(recall_list) - self.assertTrue(np.allclose( - np.array(recall_list), np.array(recall_list_sorted))) - -if __name__ == "__main__": - tf.test.main() diff --git a/research/tcn/model.py b/research/tcn/model.py deleted file mode 100644 index 91db1b3e1..000000000 --- a/research/tcn/model.py +++ /dev/null @@ -1,410 +0,0 @@ -# Copyright 2017 The TensorFlow Authors All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -"""Model implementations.""" -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -from abc import ABCMeta -from abc import abstractmethod -import tensorflow as tf -import tensorflow.contrib.slim as slim -from tensorflow.contrib.slim.python.slim.nets import inception -from tensorflow.contrib.slim.python.slim.nets import resnet_v2 as resnet_v2 -from tensorflow.contrib.slim.python.slim.nets import resnet_utils as resnet_utils - - -def get_embedder( - embedder_strategy, config, images, is_training, reuse=False, - l2_normalize_embedding=True): - """Returns an embedder based on config. - - Args: - embedder_strategy: String, name of embedder version to return. - config: LuaTable object, training config. - images: 4-D float `Tensor` containing batch images. - is_training: Boolean or placeholder for boolean, - indicator for whether or not we're training. - reuse: Boolean: Reuse embedder variable scope. - l2_normalize_embedding: Boolean, whether or not to l2 normalize the - embedding. - Returns: - embedder: An `Embedder` object. - Raises: - ValueError: if unknown embedder_strategy specified. - """ - if embedder_strategy == 'inception_baseline': - pretrained_ckpt = config.inception_conv_ss_fc.pretrained_checkpoint - return InceptionBaselineEmbedder( - images, - pretrained_ckpt, - config.random_projection, - config.random_projection_dim) - - strategy_to_embedder = { - 'inception_conv_ss_fc': InceptionConvSSFCEmbedder, - 'resnet': ResnetEmbedder, - } - if embedder_strategy not in strategy_to_embedder: - raise ValueError('unknown embedder_strategy', embedder_strategy) - - embedding_size = config.embedding_size - l2_reg_weight = config.learning.l2_reg_weight - embedder = strategy_to_embedder[embedder_strategy]( - config[embedder_strategy], images, embedding_size, - is_training, embedding_l2=l2_normalize_embedding, - l2_reg_weight=l2_reg_weight, reuse=reuse) - return embedder - - -def build_inceptionv3_graph(images, endpoint, is_training, checkpoint, - reuse=False): - """Builds an InceptionV3 model graph. - - Args: - images: A 4-D float32 `Tensor` of batch images. - endpoint: String, name of the InceptionV3 endpoint. - is_training: Boolean, whether or not to build a training or inference graph. - checkpoint: String, path to the pretrained model checkpoint. - reuse: Boolean, whether or not we are reusing the embedder. - Returns: - inception_output: `Tensor` holding the InceptionV3 output. - inception_variables: List of inception variables. - init_fn: Function to initialize the weights (if not reusing, then None). - """ - with slim.arg_scope(inception.inception_v3_arg_scope()): - _, endpoints = inception.inception_v3( - images, num_classes=1001, is_training=is_training) - inception_output = endpoints[endpoint] - inception_variables = slim.get_variables_to_restore() - inception_variables = [ - i for i in inception_variables if 'global_step' not in i.name] - if is_training and not reuse: - init_saver = tf.train.Saver(inception_variables) - def init_fn(scaffold, sess): - del scaffold - init_saver.restore(sess, checkpoint) - else: - init_fn = None - return inception_output, inception_variables, init_fn - - -class InceptionBaselineEmbedder(object): - """Produces pre-trained InceptionV3 embeddings.""" - - def __init__(self, images, pretrained_ckpt, reuse=False, - random_projection=False, random_projection_dim=32): - # Build InceptionV3 graph. - (inception_output, - self.inception_variables, - self.init_fn) = build_inceptionv3_graph( - images, 'Mixed_7c', False, pretrained_ckpt, reuse) - - # Pool 8x8x2048 -> 1x1x2048. - embedding = slim.avg_pool2d(inception_output, [8, 8], stride=1) - embedding = tf.squeeze(embedding, [1, 2]) - - if random_projection: - embedding = tf.matmul( - embedding, tf.random_normal( - shape=[2048, random_projection_dim], seed=123)) - self.embedding = embedding - - -class PretrainedEmbedder(object): - """Base class for embedders that take pre-trained networks as input.""" - __metaclass__ = ABCMeta - - def __init__(self, config, images, embedding_size, is_training, - embedding_l2=True, l2_reg_weight=1e-6, reuse=False): - """Constructor. - - Args: - config: A T object holding training config. - images: A 4-D float32 `Tensor` holding images to embed. - embedding_size: Int, the size of the embedding. - is_training: Boolean, whether or not this is a training or inference-time - graph. - embedding_l2: Boolean, whether or not to l2 normalize the embedding. - l2_reg_weight: Float, weight applied to l2 weight regularization. - reuse: Boolean, whether or not we're reusing this graph. - """ - # Pull out all the embedder hyperparameters. - self._config = config - self._embedding_size = embedding_size - self._l2_reg_weight = l2_reg_weight - self._embedding_l2 = embedding_l2 - self._is_training = is_training - self._reuse = reuse - - # Pull out pretrained hparams. - pretrained_checkpoint = config.pretrained_checkpoint - pretrained_layer = config.pretrained_layer - pretrained_keep_prob = config.dropout.keep_pretrained - - # Build pretrained graph. - (pretrained_output, - self._pretrained_variables, - self.init_fn) = self.build_pretrained_graph( - images, pretrained_layer, pretrained_checkpoint, is_training, reuse) - - # Optionally drop out the activations. - pretrained_output = slim.dropout( - pretrained_output, keep_prob=pretrained_keep_prob, - is_training=is_training) - self._pretrained_output = pretrained_output - - @abstractmethod - def build_pretrained_graph(self, images, layer, pretrained_checkpoint, - is_training, reuse): - """Builds the graph for the pre-trained network. - - Method to be overridden by implementations. - - Args: - images: A 4-D tf.float32 `Tensor` holding images to embed. - layer: String, defining which pretrained layer to take as input - to adaptation layers. - pretrained_checkpoint: String, path to a checkpoint used to load - pretrained weights. - is_training: Boolean, whether or not we're in training mode. - reuse: Boolean, whether or not to reuse embedder weights. - - Returns: - pretrained_output: A 2 or 3-d tf.float32 `Tensor` holding pretrained - activations. - """ - pass - - @abstractmethod - def construct_embedding(self): - """Builds an embedding function on top of images. - - Method to be overridden by implementations. - - Returns: - embeddings: A 2-d float32 `Tensor` of shape [batch_size, embedding_size] - holding the embedded images. - """ - pass - - def get_trainable_variables(self): - """Gets a list of variables to optimize.""" - if self._config.finetune: - return tf.trainable_variables() - else: - adaptation_only_vars = tf.get_collection( - tf.GraphKeys.TRAINABLE_VARIABLES, scope=self._adaptation_scope) - return adaptation_only_vars - - -class ResnetEmbedder(PretrainedEmbedder): - """Resnet TCN. - - ResnetV2 -> resnet adaptation layers -> optional l2 normalize -> embedding. - """ - - def __init__(self, config, images, embedding_size, is_training, - embedding_l2=True, l2_reg_weight=1e-6, reuse=False): - super(ResnetEmbedder, self).__init__( - config, images, embedding_size, is_training, embedding_l2, - l2_reg_weight, reuse) - - def build_pretrained_graph( - self, images, resnet_layer, checkpoint, is_training, reuse=False): - """See baseclass.""" - with slim.arg_scope(resnet_v2.resnet_arg_scope()): - _, endpoints = resnet_v2.resnet_v2_50( - images, is_training=is_training, reuse=reuse) - resnet_layer = 'resnet_v2_50/block%d' % resnet_layer - resnet_output = endpoints[resnet_layer] - resnet_variables = slim.get_variables_to_restore() - resnet_variables = [ - i for i in resnet_variables if 'global_step' not in i.name] - if is_training and not reuse: - init_saver = tf.train.Saver(resnet_variables) - def init_fn(scaffold, sess): - del scaffold - init_saver.restore(sess, checkpoint) - else: - init_fn = None - - return resnet_output, resnet_variables, init_fn - - def construct_embedding(self): - """Builds an embedding function on top of images. - - Method to be overridden by implementations. - - Returns: - embeddings: A 2-d float32 `Tensor` of shape [batch_size, embedding_size] - holding the embedded images. - """ - with tf.variable_scope('tcn_net', reuse=self._reuse) as vs: - self._adaptation_scope = vs.name - net = self._pretrained_output - - # Define some adaptation blocks on top of the pre-trained resnet output. - adaptation_blocks = [] - adaptation_block_params = [map( - int, i.split('_')) for i in self._config.adaptation_blocks.split('-')] - for i, (depth, num_units) in enumerate(adaptation_block_params): - block = resnet_v2.resnet_v2_block( - 'adaptation_block_%d' % i, base_depth=depth, num_units=num_units, - stride=1) - adaptation_blocks.append(block) - - # Stack them on top of the resent output. - net = resnet_utils.stack_blocks_dense( - net, adaptation_blocks, output_stride=None) - - # Average pool the output. - net = tf.reduce_mean(net, [1, 2], name='adaptation_pool', keep_dims=True) - - if self._config.emb_connection == 'fc': - # Use fully connected layer to project to embedding layer. - fc_hidden_sizes = self._config.fc_hidden_sizes - if fc_hidden_sizes == 'None': - fc_hidden_sizes = [] - else: - fc_hidden_sizes = map(int, fc_hidden_sizes.split('_')) - fc_hidden_keep_prob = self._config.dropout.keep_fc - net = tf.squeeze(net) - for fc_hidden_size in fc_hidden_sizes: - net = slim.layers.fully_connected(net, fc_hidden_size) - if fc_hidden_keep_prob < 1.0: - net = slim.dropout(net, keep_prob=fc_hidden_keep_prob, - is_training=self._is_training) - - # Connect last FC layer to embedding. - embedding = slim.layers.fully_connected(net, self._embedding_size, - activation_fn=None) - else: - # Use 1x1 conv layer to project to embedding layer. - embedding = slim.conv2d( - net, self._embedding_size, [1, 1], activation_fn=None, - normalizer_fn=None, scope='embedding') - embedding = tf.squeeze(embedding) - - # Optionally L2 normalize the embedding. - if self._embedding_l2: - embedding = tf.nn.l2_normalize(embedding, dim=1) - - return embedding - - def get_trainable_variables(self): - """Gets a list of variables to optimize.""" - if self._config.finetune: - return tf.trainable_variables() - else: - adaptation_only_vars = tf.get_collection( - tf.GraphKeys.TRAINABLE_VARIABLES, scope=self._adaptation_scope) - return adaptation_only_vars - - -class InceptionEmbedderBase(PretrainedEmbedder): - """Base class for embedders that take pre-trained InceptionV3 activations.""" - - def __init__(self, config, images, embedding_size, is_training, - embedding_l2=True, l2_reg_weight=1e-6, reuse=False): - super(InceptionEmbedderBase, self).__init__( - config, images, embedding_size, is_training, embedding_l2, - l2_reg_weight, reuse) - - def build_pretrained_graph( - self, images, inception_layer, checkpoint, is_training, reuse=False): - """See baseclass.""" - # Build InceptionV3 graph. - inception_output, inception_variables, init_fn = build_inceptionv3_graph( - images, inception_layer, is_training, checkpoint, reuse) - return inception_output, inception_variables, init_fn - - -class InceptionConvSSFCEmbedder(InceptionEmbedderBase): - """TCN Embedder V1. - - InceptionV3 (mixed_5d) -> conv layers -> spatial softmax -> - fully connected -> optional l2 normalize -> embedding. - """ - - def __init__(self, config, images, embedding_size, is_training, - embedding_l2=True, l2_reg_weight=1e-6, reuse=False): - super(InceptionConvSSFCEmbedder, self).__init__( - config, images, embedding_size, is_training, embedding_l2, - l2_reg_weight, reuse) - - # Pull out all the hyperparameters specific to this embedder. - self._additional_conv_sizes = config.additional_conv_sizes - self._conv_hidden_keep_prob = config.dropout.keep_conv - self._fc_hidden_sizes = config.fc_hidden_sizes - self._fc_hidden_keep_prob = config.dropout.keep_fc - - def construct_embedding(self): - """Builds a conv -> spatial softmax -> FC adaptation network.""" - is_training = self._is_training - normalizer_params = {'is_training': is_training} - with tf.variable_scope('tcn_net', reuse=self._reuse) as vs: - self._adaptation_scope = vs.name - with slim.arg_scope( - [slim.layers.conv2d], - activation_fn=tf.nn.relu, - normalizer_fn=slim.batch_norm, normalizer_params=normalizer_params, - weights_regularizer=slim.regularizers.l2_regularizer( - self._l2_reg_weight), - biases_regularizer=slim.regularizers.l2_regularizer( - self._l2_reg_weight)): - with slim.arg_scope( - [slim.layers.fully_connected], - activation_fn=tf.nn.relu, - normalizer_fn=slim.batch_norm, normalizer_params=normalizer_params, - weights_regularizer=slim.regularizers.l2_regularizer( - self._l2_reg_weight), - biases_regularizer=slim.regularizers.l2_regularizer( - self._l2_reg_weight)): - - # Input to embedder is pre-trained inception output. - net = self._pretrained_output - - # Optionally add more conv layers. - for num_filters in self._additional_conv_sizes: - net = slim.layers.conv2d( - net, num_filters, kernel_size=[3, 3], stride=[1, 1]) - net = slim.dropout(net, keep_prob=self._conv_hidden_keep_prob, - is_training=is_training) - - # Take the spatial soft arg-max of the last convolutional layer. - # This is a form of spatial attention over the activations. - # See more here: http://arxiv.org/abs/1509.06113. - net = tf.contrib.layers.spatial_softmax(net) - self.spatial_features = net - - # Add fully connected layers. - net = slim.layers.flatten(net) - for fc_hidden_size in self._fc_hidden_sizes: - net = slim.layers.fully_connected(net, fc_hidden_size) - if self._fc_hidden_keep_prob < 1.0: - net = slim.dropout(net, keep_prob=self._fc_hidden_keep_prob, - is_training=is_training) - - # Connect last FC layer to embedding. - net = slim.layers.fully_connected(net, self._embedding_size, - activation_fn=None) - - # Optionally L2 normalize the embedding. - if self._embedding_l2: - net = tf.nn.l2_normalize(net, dim=1) - - return net diff --git a/research/tcn/preprocessing.py b/research/tcn/preprocessing.py deleted file mode 100644 index 707625aaa..000000000 --- a/research/tcn/preprocessing.py +++ /dev/null @@ -1,686 +0,0 @@ -# Copyright 2017 The TensorFlow Authors All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -"""Image preprocessing helpers.""" - -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import cv2 -from scipy import ndimage -import tensorflow as tf -from tensorflow.python.ops import control_flow_ops - - -def apply_with_random_selector(x, func, num_cases): - """Computes func(x, sel), with sel sampled from [0...num_cases-1]. - - TODO(coreylynch): add as a dependency, when slim or tensorflow/models are - pipfied. - Source: - https://raw.githubusercontent.com/tensorflow/models/a9d0e6e8923a4/slim/preprocessing/inception_preprocessing.py - - Args: - x: input Tensor. - func: Python function to apply. - num_cases: Python int32, number of cases to sample sel from. - Returns: - The result of func(x, sel), where func receives the value of the - selector as a python integer, but sel is sampled dynamically. - """ - sel = tf.random_uniform([], maxval=num_cases, dtype=tf.int32) - # Pass the real x only to one of the func calls. - return control_flow_ops.merge([ - func(control_flow_ops.switch(x, tf.equal(sel, case))[1], case) - for case in range(num_cases)])[0] - - -def distorted_bounding_box_crop(image, - bbox, - min_object_covered=0.1, - aspect_ratio_range=(0.75, 1.33), - area_range=(0.05, 1.0), - max_attempts=100, - scope=None): - """Generates cropped_image using a one of the bboxes randomly distorted. - - TODO(coreylynch): add as a dependency, when slim or tensorflow/models are - pipfied. - Source: - https://raw.githubusercontent.com/tensorflow/models/a9d0e6e8923a4/slim/preprocessing/inception_preprocessing.py - - See `tf.image.sample_distorted_bounding_box` for more documentation. - - Args: - image: 3-D Tensor of image (it will be converted to floats in [0, 1]). - bbox: 3-D float Tensor of bounding boxes arranged [1, num_boxes, coords] - where each coordinate is [0, 1) and the coordinates are arranged - as [ymin, xmin, ymax, xmax]. If num_boxes is 0 then it would use the whole - image. - min_object_covered: An optional `float`. Defaults to `0.1`. The cropped - area of the image must contain at least this fraction of any bounding box - supplied. - aspect_ratio_range: An optional list of `floats`. The cropped area of the - image must have an aspect ratio = width / height within this range. - area_range: An optional list of `floats`. The cropped area of the image - must contain a fraction of the supplied image within in this range. - max_attempts: An optional `int`. Number of attempts at generating a cropped - region of the image of the specified constraints. After `max_attempts` - failures, return the entire image. - scope: Optional scope for name_scope. - Returns: - A tuple, a 3-D Tensor cropped_image and the distorted bbox - """ - with tf.name_scope(scope, 'distorted_bounding_box_crop', [image, bbox]): - # Each bounding box has shape [1, num_boxes, box coords] and - # the coordinates are ordered [ymin, xmin, ymax, xmax]. - - # A large fraction of image datasets contain a human-annotated bounding - # box delineating the region of the image containing the object of interest. - # We choose to create a new bounding box for the object which is a randomly - # distorted version of the human-annotated bounding box that obeys an - # allowed range of aspect ratios, sizes and overlap with the human-annotated - # bounding box. If no box is supplied, then we assume the bounding box is - # the entire image. - sample_distorted_bounding_box = tf.image.sample_distorted_bounding_box( - tf.shape(image), - bounding_boxes=bbox, - min_object_covered=min_object_covered, - aspect_ratio_range=aspect_ratio_range, - area_range=area_range, - max_attempts=max_attempts, - use_image_if_no_bounding_boxes=True) - bbox_begin, bbox_size, distort_bbox = sample_distorted_bounding_box - - # Crop the image to the specified bounding box. - cropped_image = tf.slice(image, bbox_begin, bbox_size) - return cropped_image, distort_bbox - - -def distort_color(image, color_ordering=0, fast_mode=True, scope=None): - """Distort the color of a Tensor image. - - TODO(coreylynch): add as a dependency, when slim or tensorflow/models are - pipfied. - Source: - https://raw.githubusercontent.com/tensorflow/models/a9d0e6e8923a4/slim/preprocessing/inception_preprocessing.py - - Each color distortion is non-commutative and thus ordering of the color ops - matters. Ideally we would randomly permute the ordering of the color ops. - Rather than adding that level of complication, we select a distinct ordering - of color ops for each preprocessing thread. - Args: - image: 3-D Tensor containing single image in [0, 1]. - color_ordering: Python int, a type of distortion (valid values: 0-3). - fast_mode: Avoids slower ops (random_hue and random_contrast) - scope: Optional scope for name_scope. - Returns: - 3-D Tensor color-distorted image on range [0, 1] - Raises: - ValueError: if color_ordering not in [0, 3] - """ - with tf.name_scope(scope, 'distort_color', [image]): - if fast_mode: - if color_ordering == 0: - image = tf.image.random_brightness(image, max_delta=32. / 255.) - image = tf.image.random_saturation(image, lower=0.5, upper=1.5) - else: - image = tf.image.random_saturation(image, lower=0.5, upper=1.5) - image = tf.image.random_brightness(image, max_delta=32. / 255.) - else: - if color_ordering == 0: - image = tf.image.random_brightness(image, max_delta=32. / 255.) - image = tf.image.random_saturation(image, lower=0.5, upper=1.5) - image = tf.image.random_hue(image, max_delta=0.2) - image = tf.image.random_contrast(image, lower=0.5, upper=1.5) - elif color_ordering == 1: - image = tf.image.random_saturation(image, lower=0.5, upper=1.5) - image = tf.image.random_brightness(image, max_delta=32. / 255.) - image = tf.image.random_contrast(image, lower=0.5, upper=1.5) - image = tf.image.random_hue(image, max_delta=0.2) - elif color_ordering == 2: - image = tf.image.random_contrast(image, lower=0.5, upper=1.5) - image = tf.image.random_hue(image, max_delta=0.2) - image = tf.image.random_brightness(image, max_delta=32. / 255.) - image = tf.image.random_saturation(image, lower=0.5, upper=1.5) - elif color_ordering == 3: - image = tf.image.random_hue(image, max_delta=0.2) - image = tf.image.random_saturation(image, lower=0.5, upper=1.5) - image = tf.image.random_contrast(image, lower=0.5, upper=1.5) - image = tf.image.random_brightness(image, max_delta=32. / 255.) - else: - raise ValueError('color_ordering must be in [0, 3]') - - # The random_* ops do not necessarily clamp. - return tf.clip_by_value(image, 0.0, 1.0) - - -def crop_center(image): - """Returns a cropped square image.""" - shape = tf.shape(image) - new_shape = tf.minimum(shape[0], shape[1]) - offset_y = tf.maximum(shape[0] - shape[1], 0) // 2 - offset_x = tf.maximum(shape[1] - shape[0], 0) // 2 - image = tf.image.crop_to_bounding_box( - image, offset_y, offset_x, new_shape, new_shape) - return image - - -def pad(image): - """Returns an image padded to be square.""" - shape = tf.shape(image) - new_shape = tf.maximum(shape[0], shape[1]) - height = shape[0] - width = shape[1] - offset_x = tf.maximum((height-width), 0) // 2 - offset_y = tf.maximum((width-height), 0) // 2 - image = tf.image.pad_to_bounding_box( - image, offset_y, offset_x, new_shape, new_shape) - return image - - -def pad_200(image): - """Returns an image padded width-padded with 200 pixels.""" - shape = tf.shape(image) - image = tf.image.pad_to_bounding_box( - image, 0, 200, shape[0], shape[1]+400) - shape = tf.shape(image) - new_shape = tf.minimum(shape[0], shape[1]) - offset_y = tf.maximum(shape[0] - shape[1], 0) // 2 - offset_x = tf.maximum(shape[1] - shape[0], 0) // 2 - image = tf.image.crop_to_bounding_box( - image, offset_y, offset_x, new_shape, new_shape) - return image - - -def pad_crop_central(image, central_fraction=0.875): - """Pads the image to the maximum length, crops the central fraction.""" - # Pad the image to be square. - image = pad(image) - # Crop the central region of the image with an area containing 87.5% of - # the original image. - image = tf.image.central_crop(image, central_fraction=central_fraction) - return image - - -def crop_image_by_strategy(image, cropping): - """Crops an image according to a strategy defined in config. - - Args: - image: 3-d image tensor. - cropping: str, name of cropping strategy. - Returns: - image: cropped image. - Raises: - ValueError: When unknown cropping strategy is specified. - """ - strategy_to_method = { - 'crop_center': crop_center, - 'pad': pad, - 'pad200': pad_200, - 'pad_crop_central': pad_crop_central - } - tf.logging.info('Cropping strategy: %s.' % cropping) - if cropping not in strategy_to_method: - raise ValueError('Unknown cropping strategy: %s' % cropping) - return strategy_to_method[cropping](image) - - -def scale_augment_crop(image, central_bbox, area_range, min_object_covered): - """Training time scale augmentation. - - Args: - image: 3-d float tensor. - central_bbox: Bounding box defining the central region of interest. - area_range: Range of allowed areas for the augmented bounding box. - min_object_covered: Constraint for the fraction of original image in - augmented bounding box. - Returns: - distort_image: The scaled, cropped image. - """ - (distorted_image, _) = distorted_bounding_box_crop( - image, central_bbox, area_range=area_range, - aspect_ratio_range=(1.0, 1.0), - min_object_covered=min_object_covered) - # Restore the shape since the dynamic slice based upon the bbox_size loses - # the third dimension. - distorted_image.set_shape([None, None, 3]) - return distorted_image - - -def scale_to_inception_range(image): - """Scales an image in the range [0,1] to [-1,1] as expected by inception.""" - # Assert that incoming images have been properly scaled to [0,1]. - with tf.control_dependencies( - [tf.assert_less_equal(tf.reduce_max(image), 1.), - tf.assert_greater_equal(tf.reduce_min(image), 0.)]): - image = tf.subtract(image, 0.5) - image = tf.multiply(image, 2.0) - return image - - -def resize_image(image, height, width): - """Resizes an image to a target height and width.""" - image = tf.expand_dims(image, 0) - image = tf.image.resize_bilinear(image, [height, width], align_corners=False) - image = tf.squeeze(image, [0]) - return image - - -def crop_or_pad(image, curr_height, curr_width, new, height=True, crop=True): - """Crops or pads an image. - - Args: - image: 3-D float32 `Tensor` image. - curr_height: Int, current height. - curr_width: Int, current width. - new: Int, new width or height. - height: Boolean, cropping or padding for height. - crop: Boolean, True if we're cropping, False if we're padding. - Returns: - image: 3-D float32 `Tensor` image. - """ - # Crop the image to fit the new shape. - abs_diff = tf.abs(new-curr_height)//2 if height else tf.abs(new-curr_width)//2 - offset_x = 0 if height else abs_diff - offset_y = abs_diff if height else 0 - - # We process height first, so always pad/crop to new height. - target_height = new - # We process height first, so pad/crop to new width only if not doing height. - target_width = curr_width if height else new - - if crop: - image = tf.image.crop_to_bounding_box( - image, offset_y, offset_x, target_height, target_width) - else: - image = tf.image.pad_to_bounding_box( - image, offset_y, offset_x, target_height, target_width) - return image - - -def get_central_bbox(min_side, new_size): - """Gets the central bounding box for an image. - - If image is square, returns bounding box [0,0,1,1]. - Otherwise, returns the bounding box containing the central - smallest side x smallest side square. - - Args: - min_side: Int, size of smallest side in pixels. - new_size: Int, resize image to a square of new_size x new_size pixels. - Returns: - bbox: A 4-D Int `Tensor`, holding the coordinates of the central bounding - box. - """ - max_shape = tf.cast(new_size, tf.float32) - min_shape = tf.cast(min_side, tf.float32) - top_xy = ((max_shape-min_shape)/2)/max_shape - bottom_xy = (min_shape+(max_shape-min_shape)/2)/max_shape - # Create a bbox for the center region of interest. - bbox = tf.stack([[[top_xy, top_xy, bottom_xy, bottom_xy]]]) - bbox.set_shape([1, 1, 4]) - return bbox - - -def pad_to_max(image, max_scale): - """Pads an image to max_scale times the current center crop size. - - E.g.: For an image with dimensions 1920x1080 and a max_scale of 1.5, - returns an image that is 1.5 * (1080x1080). - - Args: - image: 3-D float32 `Tensor` image. - max_scale: Float, maximum scale of the image, as a multiplier on the - central bounding box. - Returns: - image: 3-D float32 `Tensor` image. - """ - orig_shape = tf.shape(image) - orig_height = orig_shape[0] - orig_width = orig_shape[1] - - # Find the smallest side and corresponding new size. - min_side = tf.cast(tf.minimum(orig_height, orig_width), tf.float32) - new_shape = tf.cast(tf.sqrt(max_scale*min_side*min_side), tf.int32) - - # Crop or pad height. - # pylint: disable=g-long-lambda - image = tf.cond( - orig_height >= new_shape, - lambda: crop_or_pad( - image, orig_height, orig_width, new_shape, height=True, crop=True), - lambda: crop_or_pad( - image, orig_height, orig_width, new_shape, height=True, crop=False)) - - # Crop or pad width. - image = tf.cond( - orig_width >= new_shape, - lambda: crop_or_pad( - image, orig_height, orig_width, new_shape, height=False, crop=True), - lambda: crop_or_pad( - image, orig_height, orig_width, new_shape, height=False, crop=False)) - - # Get the bounding box of the original centered box in the new resized image. - original_bounding_box = get_central_bbox(min_side, new_shape) - return image, original_bounding_box - - -def scale_up_augmentation(image, max_scale): - """Scales an image randomly >100% up to some max scale.""" - # Pad to max size. - image, original_central_bbox = pad_to_max(image, max_scale) - - # Determine area range of the augmented crop, as a percentage of the - # new max area. - # aug_max == 100% of new max area. - aug_max = 1.0 - # aug_min == original_area/new_area == original_area/(max_scale*original_area) - # == 1/max_scale. - aug_min = 1.0/max_scale - area_range = (aug_min, aug_max) - # Since we're doing >100% scale, always have the full original crop in frame. - min_object_covered = 1.0 - # Get a random scaled, cropped image. - image = scale_augment_crop(image, original_central_bbox, area_range, - min_object_covered) - return image - - -def scale_down_augmentation(image, min_scale): - """Scales an image randomly <100% down to some min scale.""" - # Crop the center, and consider the whole image the bounding box ROI. - image = crop_center(image) - bbox = tf.constant([0.0, 0.0, 1.0, 1.0], dtype=tf.float32, shape=[1, 1, 4]) - # Determine area range of the augmented crop, as a percentage of the - # original crop center area. - # aug_max == 100% of original area. - area_range = (min_scale, 1.0) - # Get a random scaled, cropped image. - image = scale_augment_crop(image, bbox, area_range, min_scale) - return image - - -def augment_image_scale(image, min_scale, max_scale, p_scale_up): - """Training time scale augmentation. - - Args: - image: 3-d float tensor representing image. - min_scale: minimum scale augmentation allowed, as a fraction of the - central min_side * min_side area of the original image. - max_scale: maximum scale augmentation allowed, as a fraction of the - central min_side * min_side area of the original image. - p_scale_up: Fraction of images scaled up. - Returns: - image: The scale-augmented image. - """ - assert max_scale >= 1.0 - assert min_scale <= 1.0 - if min_scale == max_scale == 1.0: - tf.logging.info('Min and max scale are 1.0, don`t augment.') - # Do no augmentation, just crop the center. - return crop_center(image) - elif (max_scale == 1.0) and (min_scale < 1.0): - tf.logging.info('Max scale is 1.0, only scale down augment.') - # Always do <100% augmentation. - return scale_down_augmentation(image, min_scale) - elif (min_scale == 1.0) and (max_scale > 1.0): - tf.logging.info('Min scale is 1.0, only scale up augment.') - # Always do >100% augmentation. - return scale_up_augmentation(image, max_scale) - else: - tf.logging.info('Sample both augmentations.') - # Choose to scale image up or down. - rn = tf.random_uniform([], minval=0., maxval=1., dtype=tf.float32) - image = tf.cond(rn >= p_scale_up, - lambda: scale_up_augmentation(image, max_scale), - lambda: scale_down_augmentation(image, min_scale)) - return image - - -def decode_image(image_str): - """Decodes a jpeg-encoded image string into a image in range [0,1].""" - # Decode jpeg string into np.uint8 tensor. - image = tf.image.decode_jpeg(image_str, channels=3) - # Convert the image to range [0,1]. - if image.dtype != tf.float32: - image = tf.image.convert_image_dtype(image, dtype=tf.float32) - return image - - -def decode_images(image_strs): - """Decodes a tensor of image strings.""" - return tf.map_fn(decode_image, image_strs, dtype=tf.float32) - - -def preprocess_training_images(images, height, width, min_scale, max_scale, - p_scale_up, aug_color=True, fast_mode=True): - """Preprocesses a batch of images for training. - - This applies training-time scale and color augmentation, crops/resizes, - and scales images to the [-1,1] range expected by pre-trained Inception nets. - - Args: - images: A 4-D float32 `Tensor` holding raw images to be preprocessed. - height: Int, height in pixels to resize image to. - width: Int, width in pixels to resize image to. - min_scale: Float, minimum scale augmentation allowed, as a fraction of the - central min_side * min_side area of the original image. - max_scale: Float, maximum scale augmentation allowed, as a fraction of the - central min_side * min_side area of the original image. - p_scale_up: Float, fraction of images scaled up. - aug_color: Whether or not to do color augmentation. - fast_mode: Boolean, avoids slower ops (random_hue and random_contrast). - Returns: - preprocessed_images: A 4-D float32 `Tensor` holding preprocessed images. - """ - def _prepro_train(im): - """Map this preprocessing function over each image in the batch.""" - return preprocess_training_image( - im, height, width, min_scale, max_scale, p_scale_up, - aug_color=aug_color, fast_mode=fast_mode) - return tf.map_fn(_prepro_train, images) - - -def preprocess_training_image( - image, height, width, min_scale, max_scale, p_scale_up, - aug_color=True, fast_mode=True): - """Preprocesses an image for training. - - Args: - image: A 3-d float tensor representing the image. - height: Target image height. - width: Target image width. - min_scale: Minimum scale of bounding box (as a percentage of full - bounding box) used to crop image during scale augmentation. - max_scale: Minimum scale of bounding box (as a percentage of full - bounding box) used to crop image during scale augmentation. - p_scale_up: Fraction of images to scale >100%. - aug_color: Whether or not to do color augmentation. - fast_mode: Avoids slower ops (random_hue and random_contrast). - Returns: - scaled_image: An scaled image tensor in the range [-1,1]. - """ - # Get a random scaled, cropped image. - image = augment_image_scale(image, min_scale, max_scale, p_scale_up) - - # Resize image to desired height, width. - image = tf.expand_dims(image, 0) - image = tf.image.resize_bilinear(image, [height, width], align_corners=False) - image = tf.squeeze(image, [0]) - - # Optionally augment the color. - # pylint: disable=g-long-lambda - if aug_color: - image = apply_with_random_selector( - image, - lambda x, ordering: distort_color( - x, ordering, fast_mode=fast_mode), num_cases=4) - - # Scale to [-1,1] range as expected by inception. - scaled_image = scale_to_inception_range(image) - return scaled_image - - -def preprocess_test_image(image, height, width, crop_strategy): - """Preprocesses an image for test/inference. - - Args: - image: A 3-d float tensor representing the image. - height: Target image height. - width: Target image width. - crop_strategy: String, name of the strategy used to crop test-time images. - Can be: 'crop_center', 'pad', 'pad_200', 'pad_crop_central'. - Returns: - scaled_image: An scaled image tensor in the range [-1,1]. - """ - image = crop_image_by_strategy(image, crop_strategy) - # Resize. - image = resize_image(image, height, width) - # Scale the input range to [-1,1] as expected by inception. - image = scale_to_inception_range(image) - return image - - -def preprocess_test_images(images, height, width, crop_strategy): - """Apply test-time preprocessing to a batch of images. - - This crops images (given a named strategy for doing so), resizes them, - and scales them to the [-1,1] range expected by pre-trained Inception nets. - - Args: - images: A 4-D float32 `Tensor` holding raw images to be preprocessed. - height: Int, height in pixels to resize image to. - width: Int, width in pixels to resize image to. - crop_strategy: String, name of the strategy used to crop test-time images. - Can be: 'crop_center', 'pad', 'pad_200', 'pad_crop_central'. - Returns: - preprocessed_images: A 4-D float32 `Tensor` holding preprocessed images. - """ - def _prepro_test(im): - """Map this preprocessing function over each image in the batch.""" - return preprocess_test_image(im, height, width, crop_strategy) - if len(images.shape) == 3: - return _prepro_test(images) - else: - return tf.map_fn(_prepro_test, images) - - -def preprocess_images( - images, is_training, height, width, - min_scale=1.0, max_scale=1.0, p_scale_up=0.0, - aug_color=True, fast_mode=True, - crop_strategy='pad_crop_central'): - """Preprocess a batch of images. - - Args: - images: A 4-D float32 `Tensor` holding raw images to be preprocessed. - is_training: Boolean, whether to preprocess them for training or test. - height: Int, height in pixels to resize image to. - width: Int, width in pixels to resize image to. - min_scale: Float, minimum scale augmentation allowed, as a fraction of the - central min_side * min_side area of the original image. - max_scale: Float, maximum scale augmentation allowed, as a fraction of the - central min_side * min_side area of the original image. - p_scale_up: Float, fraction of images scaled up. - aug_color: Whether or not to do color augmentation. - fast_mode: Boolean, avoids slower ops (random_hue and random_contrast). - crop_strategy: String, name of the strategy used to crop test-time images. - Can be: 'crop_center', 'pad', 'pad_200', 'pad_crop_central'. - Returns: - preprocessed_images: A 4-D float32 `Tensor` holding preprocessed images. - """ - if is_training: - return preprocess_training_images( - images, height, width, min_scale, max_scale, - p_scale_up, aug_color, fast_mode) - else: - return preprocess_test_images( - images, height, width, crop_strategy) - - -def cv2rotateimage(image, angle): - """Efficient rotation if 90 degrees rotations, slow otherwise. - - Not a tensorflow function, using cv2 and scipy on numpy arrays. - - Args: - image: a numpy array with shape [height, width, channels]. - angle: the rotation angle in degrees in the range [-180, 180]. - Returns: - The rotated image. - """ - # Limit angle to [-180, 180] degrees. - assert angle <= 180 and angle >= -180 - if angle == 0: - return image - # Efficient rotations. - if angle == -90: - image = cv2.transpose(image) - image = cv2.flip(image, 0) - elif angle == 90: - image = cv2.transpose(image) - image = cv2.flip(image, 1) - elif angle == 180 or angle == -180: - image = cv2.flip(image, 0) - image = cv2.flip(image, 1) - else: # Slow rotation. - image = ndimage.interpolation.rotate(image, 270) - return image - - -def cv2resizeminedge(image, min_edge_size): - """Resize smallest edge of image to min_edge_size.""" - assert min_edge_size >= 0 - height, width = (image.shape[0], image.shape[1]) - new_height, new_width = (0, 0) - if height > width: - new_width = min_edge_size - new_height = int(height * new_width / float(width)) - else: - new_height = min_edge_size - new_width = int(width * new_height / float(height)) - return cv2.resize(image, (new_width, new_height), - interpolation=cv2.INTER_AREA) - - -def shapestring(array): - """Returns a compact string describing shape of an array.""" - shape = array.shape - s = str(shape[0]) - for i in range(1, len(shape)): - s += 'x' + str(shape[i]) - return s - - -def unscale_jpeg_encode(ims): - """Unscales pixel values and jpeg encodes preprocessed image. - - Args: - ims: A 4-D float32 `Tensor` holding preprocessed images. - Returns: - im_strings: A 1-D string `Tensor` holding images that have been unscaled - (reversing the inception [-1,1] scaling), and jpeg encoded. - """ - ims /= 2.0 - ims += 0.5 - ims *= 255.0 - ims = tf.clip_by_value(ims, 0, 255) - ims = tf.cast(ims, tf.uint8) - im_strings = tf.map_fn( - lambda x: tf.image.encode_jpeg(x, format='rgb', quality=100), - ims, dtype=tf.string) - return im_strings diff --git a/research/tcn/train.py b/research/tcn/train.py deleted file mode 100644 index f35cb4c6f..000000000 --- a/research/tcn/train.py +++ /dev/null @@ -1,61 +0,0 @@ -# Copyright 2017 The TensorFlow Authors All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -"""Trains TCN models (and baseline comparisons).""" - -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -from estimators.get_estimator import get_estimator -from utils import util -import tensorflow as tf -tf.logging.set_verbosity(tf.logging.INFO) - -tf.flags.DEFINE_string( - 'config_paths', '', - """ - Path to a YAML configuration files defining FLAG values. Multiple files - can be separated by the `#` symbol. Files are merged recursively. Setting - a key in these files is equivalent to setting the FLAG value with - the same name. - """) -tf.flags.DEFINE_string( - 'model_params', '{}', 'YAML configuration string for the model parameters.') -tf.app.flags.DEFINE_string('master', 'local', - 'BNS name of the TensorFlow master to use') -tf.app.flags.DEFINE_string( - 'logdir', '/tmp/tcn', 'Directory where to write event logs.') -tf.app.flags.DEFINE_integer( - 'task', 0, 'Task id of the replica running the training.') -tf.app.flags.DEFINE_integer( - 'ps_tasks', 0, 'Number of tasks in the ps job. If 0 no ps job is used.') -FLAGS = tf.app.flags.FLAGS - - -def main(_): - """Runs main training loop.""" - # Parse config dict from yaml config files / command line flags. - config = util.ParseConfigsToLuaTable( - FLAGS.config_paths, FLAGS.model_params, save=True, logdir=FLAGS.logdir) - - # Choose an estimator based on training strategy. - estimator = get_estimator(config, FLAGS.logdir) - - # Run training - estimator.train() - -if __name__ == '__main__': - tf.app.run() diff --git a/research/tcn/utils/luatables.py b/research/tcn/utils/luatables.py deleted file mode 100644 index 565d03862..000000000 --- a/research/tcn/utils/luatables.py +++ /dev/null @@ -1,80 +0,0 @@ -# Copyright 2017 The TensorFlow Authors All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -# pylint: disable=line-too-long,g-explicit-length-test -"""A convenience class replicating some lua table syntax with a python dict. - -In general, should behave like a dictionary except that we can use dot notation - to access keys. Users should be careful to only provide keys suitable for - instance variable names. - -Nota bene: do not use the key "keys" since it will collide with the method keys. - -Usage example: - ->>> t = T(a=5,b='kaw', c=T(v=[],x=33)) ->>> t.a -5 ->>> t.z = None ->>> print t -T(a=5, z=None, c=T(x=33, v=[]), b='kaw') - ->>> t2 = T({'h':'f','x':4}) ->>> t2 -T(h='f', x=4) ->>> t2['x'] -4 -""" - - -class T(object): - """Class for emulating lua tables.""" - - def __init__(self, *args, **kwargs): - if len(args) > 1 or (len(args) == 1 and len(kwargs) > 0): - errmsg = '''constructor only allows a single dict as a positional - argument or keyword arguments''' - raise ValueError(errmsg) - if len(args) == 1 and isinstance(args[0], dict): - self.__dict__.update(args[0]) - else: - self.__dict__.update(kwargs) - - def __repr__(self): - fmt = ', '.join('%s=%s' for i in range(len(self.__dict__))) - kwargstr = fmt % tuple( - x for tup in self.__dict__.items() for x in [str(tup[0]), repr(tup[1])]) - return 'T(' + kwargstr + ')' - - def __getitem__(self, key): - return self.__dict__[key] - - def __setitem__(self, key, val): - self.__dict__[key] = val - - def __delitem__(self, key): - del self.__dict__[key] - - def __iter__(self): - return iter(self.__dict__) - - def __len__(self): - return len(self.__dict__) - - def keys(self): # Needed for dict(T( ... )) to work. - return self.__dict__.keys() - - def iteritems(self): - return self.__dict__.iteritems() diff --git a/research/tcn/utils/progress.py b/research/tcn/utils/progress.py deleted file mode 100644 index 1043261b5..000000000 --- a/research/tcn/utils/progress.py +++ /dev/null @@ -1,50 +0,0 @@ -# Copyright 2017 The TensorFlow Authors All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -"""A utility class for reporting processing progress.""" - -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import datetime - - -class Progress(object): - """A utility class for reporting processing progress.""" - - def __init__(self, target_size): - self.target_size = target_size - self.current_size = 0 - self.start_time = datetime.datetime.now() - - def Update(self, current_size): - """Replaces internal current_size with current_size.""" - self.current_size = current_size - - def Add(self, size): - """Increments internal current_size by size.""" - self.current_size += size - - def __str__(self): - processed = 1e-5 + self.current_size / float(self.target_size) - current_time = datetime.datetime.now() - elapsed = current_time - self.start_time - eta = datetime.timedelta( - seconds=elapsed.total_seconds() / processed - elapsed.total_seconds()) - return "%d / %d (elapsed %s eta %s)" % ( - self.current_size, self.target_size, - str(elapsed).split(".")[0], - str(eta).split(".")[0]) diff --git a/research/tcn/utils/util.py b/research/tcn/utils/util.py deleted file mode 100644 index 9f50366e5..000000000 --- a/research/tcn/utils/util.py +++ /dev/null @@ -1,247 +0,0 @@ -# Copyright 2017 The TensorFlow Authors All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -"""General utility functions.""" -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import os -import numpy as np -import six -from utils.luatables import T -import tensorflow as tf -import yaml -from yaml.constructor import ConstructorError -# pylint: disable=invalid-name - - -def GetFilesRecursively(topdir): - """Gets all records recursively for some topdir. - - Args: - topdir: String, path to top directory. - Returns: - allpaths: List of Strings, full paths to all leaf records. - Raises: - ValueError: If there are no files found for this directory. - """ - assert topdir - topdir = os.path.expanduser(topdir) - allpaths = [] - for path, _, leaffiles in tf.gfile.Walk(topdir): - if leaffiles: - allpaths.extend([os.path.join(path, i) for i in leaffiles]) - if not allpaths: - raise ValueError('No files found for top directory %s' % topdir) - return allpaths - - -def NoDuplicatesConstructor(loader, node, deep=False): - """Check for duplicate keys.""" - mapping = {} - for key_node, value_node in node.value: - key = loader.construct_object(key_node, deep=deep) - value = loader.construct_object(value_node, deep=deep) - if key in mapping: - raise ConstructorError('while constructing a mapping', node.start_mark, - 'found duplicate key (%s)' % key, - key_node.start_mark) - mapping[key] = value - return loader.construct_mapping(node, deep) - - -def WriteConfigAsYaml(config, logdir, filename): - """Writes a config dict as yaml to logdir/experiment.yml.""" - if not tf.gfile.Exists(logdir): - tf.gfile.MakeDirs(logdir) - config_filename = os.path.join(logdir, filename) - with tf.gfile.GFile(config_filename, 'w') as f: - f.write(yaml.dump(config)) - tf.logging.info('wrote config to %s', config_filename) - - -def LoadConfigDict(config_paths, model_params): - """Loads config dictionary from specified yaml files or command line yaml.""" - - # Ensure that no duplicate keys can be loaded (causing pain). - yaml.add_constructor(yaml.resolver.BaseResolver.DEFAULT_MAPPING_TAG, - NoDuplicatesConstructor) - - # Handle either ',' or '#' separated config lists, since borg will only - # accept '#'. - sep = ',' if ',' in config_paths else '#' - - # Load flags from config file. - final_config = {} - if config_paths: - for config_path in config_paths.split(sep): - config_path = config_path.strip() - if not config_path: - continue - config_path = os.path.abspath(config_path) - tf.logging.info('Loading config from %s', config_path) - with tf.gfile.GFile(config_path.strip()) as config_file: - config_flags = yaml.load(config_file) - final_config = DeepMergeDict(final_config, config_flags) - if model_params: - model_params = MaybeLoadYaml(model_params) - final_config = DeepMergeDict(final_config, model_params) - tf.logging.info('Final Config:\n%s', yaml.dump(final_config)) - return final_config - - -def MaybeLoadYaml(item): - """Parses item if it's a string. If it's a dictionary it's returned as-is.""" - if isinstance(item, six.string_types): - return yaml.load(item) - elif isinstance(item, dict): - return item - else: - raise ValueError('Got {}, expected YAML string or dict', type(item)) - - -def DeepMergeDict(dict_x, dict_y, path=None): - """Recursively merges dict_y into dict_x.""" - if path is None: path = [] - for key in dict_y: - if key in dict_x: - if isinstance(dict_x[key], dict) and isinstance(dict_y[key], dict): - DeepMergeDict(dict_x[key], dict_y[key], path + [str(key)]) - elif dict_x[key] == dict_y[key]: - pass # same leaf value - else: - dict_x[key] = dict_y[key] - else: - dict_x[key] = dict_y[key] - return dict_x - - -def ParseConfigsToLuaTable(config_paths, extra_model_params=None, - save=False, save_name='final_training_config.yml', - logdir=None): - """Maps config_paths and extra_model_params to a Luatable-like object.""" - # Parse config dict from yaml config files / command line flags. - config = LoadConfigDict(config_paths, extra_model_params) - if save: - WriteConfigAsYaml(config, logdir, save_name) - # Convert config dictionary to T object with dot notation. - config = RecursivelyConvertToLuatable(config) - return config - - -def SetNestedValue(d, keys, value): - """Sets a value in a nested dictionary. - - Example: - d = {}, keys = ['data','augmentation','minscale'], value = 1.0. - returns {'data': {'augmentation' : {'minscale': 1.0 }}} - - Args: - d: A dictionary to set a nested value in. - keys: list of dict keys nesting left to right. - value: the nested value to set. - Returns: - None - """ - for key in keys[:-1]: - d = d.setdefault(key, {}) - d[keys[-1]] = value - - -def RecursivelyConvertToLuatable(yaml_dict): - """Converts a dictionary to a LuaTable-like T object.""" - if isinstance(yaml_dict, dict): - yaml_dict = T(yaml_dict) - for key, item in yaml_dict.iteritems(): - if isinstance(item, dict): - yaml_dict[key] = RecursivelyConvertToLuatable(item) - return yaml_dict - - -def KNNIds(query_vec, target_seq, k=1): - """Gets the knn ids to the query vec from the target sequence.""" - sorted_distances = KNNIdsWithDistances(query_vec, target_seq, k) - return [i[0] for i in sorted_distances] - - -def KNNIdsWithDistances(query_vec, target_seq, k=1): - """Gets the knn ids to the query vec from the target sequence.""" - if not isinstance(np.array(target_seq), np.ndarray): - target_seq = np.array(target_seq) - assert np.shape(query_vec) == np.shape(target_seq[0]) - distances = [(i, np.linalg.norm(query_vec-target_vec)) for ( - i, target_vec) in enumerate(target_seq)] - sorted_distances = sorted(distances, key=lambda x: x[1]) - return sorted_distances[:k] - - -def CopyLocalConfigsToCNS(outdir, configs, gfs_user): - """Copies experiment yaml config files to the job_logdir on /cns.""" - assert configs - assert outdir - conf_files = configs.split(',') - for conf_file in conf_files: - copy_command = 'fileutil --gfs_user %s cp -f %s %s' % ( - gfs_user, conf_file, outdir) - tf.logging.info(copy_command) - os.system(copy_command) - - -def pairwise_distances(feature, squared=True): - """Computes the pairwise distance matrix in numpy. - - Args: - feature: 2-D numpy array of size [number of data, feature dimension] - squared: Boolean. If true, output is the pairwise squared euclidean - distance matrix; else, output is the pairwise euclidean distance matrix. - - Returns: - pdists: 2-D numpy array of size - [number of data, number of data]. - """ - triu = np.triu_indices(feature.shape[0], 1) - upper_tri_pdists = np.linalg.norm(feature[triu[1]] - feature[triu[0]], axis=1) - if squared: - upper_tri_pdists **= 2. - num_data = feature.shape[0] - pdists = np.zeros((num_data, num_data)) - pdists[np.triu_indices(num_data, 1)] = upper_tri_pdists - # Make symmetrical. - pdists = pdists + pdists.T - np.diag( - pdists.diagonal()) - return pdists - - -def is_tfrecord_input(inp): - """Checks if input is a TFRecord or list of TFRecords.""" - def _is_tfrecord(inp): - if not isinstance(inp, str): - return False - _, extension = os.path.splitext(inp) - return extension == '.tfrecord' - if isinstance(inp, str): - return _is_tfrecord(inp) - if isinstance(inp, list): - return all(map(_is_tfrecord, inp)) - return False - - -def is_np_array(inp): - if isinstance(inp, np.ndarray): - return True - if isinstance(inp, list): - return all([isinstance(i, np.ndarray) for i in inp]) - return False diff --git a/research/tcn/visualize_embeddings.py b/research/tcn/visualize_embeddings.py deleted file mode 100644 index 298c1ab11..000000000 --- a/research/tcn/visualize_embeddings.py +++ /dev/null @@ -1,198 +0,0 @@ -# Copyright 2017 The TensorFlow Authors All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -r"""Visualizes embeddings in tensorboard. - -Usage: -root=experimental/users/sermanet/imitation/mirror && \ -blaze build -c opt --copt=-mavx --config=cuda $root:visualize_embeddings && \ -blaze-bin/$root/visualize_embeddings \ ---checkpointdir $checkpointdir \ ---checkpoint_iter $checkpoint_iter \ ---embedding_records $embedding_records \ ---outdir $outdir \ ---num_embed 1000 \ ---sprite_dim 64 \ ---config_paths $configs \ ---logtostderr - -blaze build third_party/tensorboard && \ -blaze-bin/third_party/tensorboard/tensorboard --logdir=$outdir -""" -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import os -import random -import cv2 -import numpy as np -from scipy.misc import imresize -from scipy.misc import imsave -from estimators.get_estimator import get_estimator -from utils import util -import tensorflow as tf -from tensorflow.contrib.tensorboard.plugins import projector -tf.logging.set_verbosity(tf.logging.INFO) - -tf.flags.DEFINE_string( - 'config_paths', '', - """ - Path to a YAML configuration files defining FLAG values. Multiple files - can be separated by the `#` symbol. Files are merged recursively. Setting - a key in these files is equivalent to setting the FLAG value with - the same name. - """) -tf.flags.DEFINE_string( - 'model_params', '{}', 'YAML configuration string for the model parameters.') -tf.app.flags.DEFINE_string( - 'checkpoint_iter', '', 'Evaluate this specific checkpoint.') -tf.app.flags.DEFINE_string( - 'checkpointdir', '/tmp/tcn', 'Path to model checkpoints.') -tf.app.flags.DEFINE_string( - 'outdir', '/tmp/tcn', 'Path to write tensorboard info to.') -tf.app.flags.DEFINE_integer( - 'num_embed', 4000, 'Number of embeddings.') -tf.app.flags.DEFINE_integer( - 'num_sequences', -1, 'Number of sequences, -1 for all.') -tf.app.flags.DEFINE_integer( - 'sprite_dim', 64, 'Height, width of the square sprite image.') -tf.app.flags.DEFINE_string( - 'embedding_records', None, 'path to embedding records') -FLAGS = tf.app.flags.FLAGS - - -def images_to_sprite(data): - """Creates the sprite image along with any necessary padding. - - Taken from: https://github.com/tensorflow/tensorflow/issues/6322 - - Args: - data: NxHxW[x3] tensor containing the images. - - Returns: - data: Properly shaped HxWx3 image with any necessary padding. - """ - if len(data.shape) == 3: - data = np.tile(data[..., np.newaxis], (1, 1, 1, 3)) - data = data.astype(np.float32) - min_v = np.min(data.reshape((data.shape[0], -1)), axis=1) - data = (data.transpose(1, 2, 3, 0) - min_v).transpose(3, 0, 1, 2) - max_v = np.max(data.reshape((data.shape[0], -1)), axis=1) - data = (data.transpose(1, 2, 3, 0) / max_v).transpose(3, 0, 1, 2) - n = int(np.ceil(np.sqrt(data.shape[0]))) - padding = ((0, n ** 2 - data.shape[0]), (0, 0), - (0, 0)) + ((0, 0),) * (data.ndim - 3) - data = np.pad(data, padding, mode='constant', - constant_values=0) - # Tile the individual thumbnails into an image. - data = data.reshape((n, n) + data.shape[1:]).transpose( - (0, 2, 1, 3) + tuple(range(4, data.ndim + 1))) - data = data.reshape((n * data.shape[1], n * data.shape[3]) + data.shape[4:]) - data = (data * 255).astype(np.uint8) - return data - - -def main(_): - """Runs main labeled eval loop.""" - # Parse config dict from yaml config files / command line flags. - config = util.ParseConfigsToLuaTable(FLAGS.config_paths, FLAGS.model_params) - - # Choose an estimator based on training strategy. - checkpointdir = FLAGS.checkpointdir - checkpoint_path = os.path.join( - '%s/model.ckpt-%s' % (checkpointdir, FLAGS.checkpoint_iter)) - estimator = get_estimator(config, checkpointdir) - - # Get records to embed. - validation_dir = FLAGS.embedding_records - validation_records = util.GetFilesRecursively(validation_dir) - - sequences_to_data = {} - for (view_embeddings, view_raw_image_strings, seqname) in estimator.inference( - validation_records, checkpoint_path, config.data.embed_batch_size, - num_sequences=FLAGS.num_sequences): - sequences_to_data[seqname] = { - 'embeddings': view_embeddings, - 'images': view_raw_image_strings, - } - - all_embeddings = np.zeros((0, config.embedding_size)) - all_ims = [] - all_seqnames = [] - - num_embeddings = FLAGS.num_embed - # Concatenate all views from all sequences into a big flat list. - for seqname, data in sequences_to_data.iteritems(): - embs = data['embeddings'] - ims = data['images'] - for v in range(config.data.num_views): - for (emb, im) in zip(embs[v], ims[v]): - all_embeddings = np.append(all_embeddings, [emb], axis=0) - all_ims.append(im) - all_seqnames.append(seqname) - - # Choose N indices uniformly from all images. - random_indices = range(all_embeddings.shape[0]) - random.shuffle(random_indices) - viz_indices = random_indices[:num_embeddings] - - # Extract embs. - viz_embs = np.array(all_embeddings[viz_indices]) - - # Extract and decode ims. - viz_ims = list(np.array(all_ims)[viz_indices]) - decoded_ims = [] - - sprite_dim = FLAGS.sprite_dim - for i, im in enumerate(viz_ims): - if i % 100 == 0: - print('Decoding image %d/%d.' % (i, num_embeddings)) - nparr_i = np.fromstring(str(im), np.uint8) - img_np = cv2.imdecode(nparr_i, 1) - img_np = img_np[..., [2, 1, 0]] - - img_np = imresize(img_np, [sprite_dim, sprite_dim, 3]) - decoded_ims.append(img_np) - decoded_ims = np.array(decoded_ims) - - # Extract sequence names. - outdir = FLAGS.outdir - - # The embedding variable, which needs to be stored - # Note this must a Variable not a Tensor! - embedding_var = tf.Variable(viz_embs, name='viz_embs') - - with tf.Session() as sess: - sess.run(embedding_var.initializer) - summary_writer = tf.summary.FileWriter(outdir) - config = projector.ProjectorConfig() - embedding = config.embeddings.add() - embedding.tensor_name = embedding_var.name - - # Comment out if you don't want sprites - embedding.sprite.image_path = os.path.join(outdir, 'sprite.png') - embedding.sprite.single_image_dim.extend( - [decoded_ims.shape[1], decoded_ims.shape[1]]) - - projector.visualize_embeddings(summary_writer, config) - saver = tf.train.Saver([embedding_var]) - saver.save(sess, os.path.join(outdir, 'model2.ckpt'), 1) - - sprite = images_to_sprite(decoded_ims) - imsave(os.path.join(outdir, 'sprite.png'), sprite) - -if __name__ == '__main__': - tf.app.run(main) diff --git a/research/textsum/BUILD b/research/textsum/BUILD deleted file mode 100644 index ea0628639..000000000 --- a/research/textsum/BUILD +++ /dev/null @@ -1,64 +0,0 @@ -package(default_visibility = [":internal"]) - -licenses(["notice"]) # Apache 2.0 - -exports_files(["LICENSE"]) - -package_group( - name = "internal", - packages = [ - "//textsum/...", - ], -) - -py_library( - name = "seq2seq_attention_model", - srcs = ["seq2seq_attention_model.py"], - deps = [ - ":seq2seq_lib", - ], -) - -py_library( - name = "seq2seq_lib", - srcs = ["seq2seq_lib.py"], -) - -py_binary( - name = "seq2seq_attention", - srcs = ["seq2seq_attention.py"], - deps = [ - ":batch_reader", - ":data", - ":seq2seq_attention_decode", - ":seq2seq_attention_model", - ], -) - -py_library( - name = "batch_reader", - srcs = ["batch_reader.py"], - deps = [ - ":data", - ":seq2seq_attention_model", - ], -) - -py_library( - name = "beam_search", - srcs = ["beam_search.py"], -) - -py_library( - name = "seq2seq_attention_decode", - srcs = ["seq2seq_attention_decode.py"], - deps = [ - ":beam_search", - ":data", - ], -) - -py_library( - name = "data", - srcs = ["data.py"], -) diff --git a/research/textsum/README.md b/research/textsum/README.md deleted file mode 100644 index ac2f55dd5..000000000 --- a/research/textsum/README.md +++ /dev/null @@ -1,171 +0,0 @@ -![No Maintenance Intended](https://img.shields.io/badge/No%20Maintenance%20Intended-%E2%9C%95-red.svg) -![TensorFlow Requirement: 1.x](https://img.shields.io/badge/TensorFlow%20Requirement-1.x-brightgreen) -![TensorFlow 2 Not Supported](https://img.shields.io/badge/TensorFlow%202%20Not%20Supported-%E2%9C%95-red.svg) - -Sequence-to-Sequence with Attention Model for Text Summarization. - -Authors: - -Xin Pan -Peter Liu (peterjliu@google.com, github:peterjliu) - -Introduction - -The core model is the traditional sequence-to-sequence model with attention. -It is customized (mostly inputs/outputs) for the text summarization task. The -model has been trained on Gigaword dataset and achieved state-of-the-art -results (as of June 2016). - -The results described below are based on model trained on multi-gpu and -multi-machine settings. It has been simplified to run on only one machine -for open source purpose. - -Dataset - -We used the Gigaword dataset described in [Rush et al. A Neural Attention Model -for Sentence Summarization](https://arxiv.org/abs/1509.00685). - -We cannot provide the dataset due to the license. See ExampleGen in data.py -about the data format. data/data contains a toy example. Also see data/vocab -for example vocabulary format. In How To Run below, users can use toy -data and vocab provided in the data/ directory to run the training by replacing -the data directory flag. - -data_convert_example.py contains example of convert between binary and text. - - -Experiment Result - -8000 examples from testset are sampled to generate summaries and rouge score is -calculated for the generated summaries. Here is the best rouge score on -Gigaword dataset: - -ROUGE-1 Average_R: 0.38272 (95%-conf.int. 0.37774 - 0.38755) - -ROUGE-1 Average_P: 0.50154 (95%-conf.int. 0.49509 - 0.50780) - -ROUGE-1 Average_F: 0.42568 (95%-conf.int. 0.42016 - 0.43099) - -ROUGE-2 Average_R: 0.20576 (95%-conf.int. 0.20060 - 0.21112) - -ROUGE-2 Average_P: 0.27565 (95%-conf.int. 0.26851 - 0.28257) - -ROUGE-2 Average_F: 0.23126 (95%-conf.int. 0.22539 - 0.23708) - -Configuration: - -Following is the configuration for the best trained model on Gigaword: - -batch_size: 64 - -bidirectional encoding layer: 4 - -article length: first 2 sentences, total words within 120. - -summary length: total words within 30. - -word embedding size: 128 - -LSTM hidden units: 256 - -Sampled softmax: 4096 - -vocabulary size: Most frequent 200k words from dataset's article and summaries. - -How To Run - -Prerequisite: install TensorFlow and Bazel. - -```shell -# cd to your workspace -# 1. Clone the textsum code to your workspace 'textsum' directory. -# 2. Create an empty 'WORKSPACE' file in your workspace. -# 3. Move the train/eval/test data to your workspace 'data' directory. -# In the following example, I named the data training-*, test-*, etc. -# If your data files have different names, update the --data_path. -# If you don't have data but want to try out the model, copy the toy -# data from the textsum/data/data to the data/ directory in the workspace. -$ ls -R -.: -data textsum WORKSPACE - -./data: -vocab test-0 training-0 training-1 validation-0 ...(omitted) - -./textsum: -batch_reader.py beam_search.py BUILD README.md seq2seq_attention_model.py data -data.py seq2seq_attention_decode.py seq2seq_attention.py seq2seq_lib.py - -./textsum/data: -data vocab - -$ bazel build -c opt --config=cuda textsum/... - -# Run the training. -$ bazel-bin/textsum/seq2seq_attention \ - --mode=train \ - --article_key=article \ - --abstract_key=abstract \ - --data_path=data/training-* \ - --vocab_path=data/vocab \ - --log_root=textsum/log_root \ - --train_dir=textsum/log_root/train - -# Run the eval. Try to avoid running on the same machine as training. -$ bazel-bin/textsum/seq2seq_attention \ - --mode=eval \ - --article_key=article \ - --abstract_key=abstract \ - --data_path=data/validation-* \ - --vocab_path=data/vocab \ - --log_root=textsum/log_root \ - --eval_dir=textsum/log_root/eval - -# Run the decode. Run it when the model is mostly converged. -$ bazel-bin/textsum/seq2seq_attention \ - --mode=decode \ - --article_key=article \ - --abstract_key=abstract \ - --data_path=data/test-* \ - --vocab_path=data/vocab \ - --log_root=textsum/log_root \ - --decode_dir=textsum/log_root/decode \ - --beam_size=8 -``` - - -Examples: - -The following are some text summarization examples, including experiments -using dataset other than Gigaword. - -article: novell inc. chief executive officer eric schmidt has been named chairman of the internet search-engine company google . - -human: novell ceo named google chairman - -machine: novell chief executive named to head internet company - -====================================== - -article: gulf newspapers voiced skepticism thursday over whether newly re - elected us president bill clinton could help revive the troubled middle east peace process but saw a glimmer of hope . - -human: gulf skeptical about whether clinton will revive peace process - -machine: gulf press skeptical over clinton 's prospects for peace process - -====================================== - -article: the european court of justice ( ecj ) recently ruled in lock v british gas trading ltd that eu law requires a worker 's statutory holiday pay to take commission payments into account - it should not be based solely on basic salary . the case is not over yet , but its outcome could potentially be costly for employers with workers who are entitled to commission . mr lock , an energy salesman for british gas , was paid a basic salary and sales commission on a monthly basis . his sales commission made up around 60 % of his remuneration package . when he took two weeks ' annual leave in december 2012 , he was paid his basic salary and also received commission from previous sales that fell due during that period . lock obviously did not generate new sales while he was on holiday , which meant that in the following period he suffered a reduced income through lack of commission . he brought an employment tribunal claim asserting that this amounted to a breach of the working time regulations 1998 .....deleted rest for readability... - -abstract: will british gas ecj ruling fuel holiday pay hike ? - -decode: eu law requires worker 's statutory holiday pay - -====================================== - -article: the junior all whites have been eliminated from the fifa u - 20 world cup in colombia with results on the final day of pool play confirming their exit . sitting on two points , new zealand needed results in one of the final two groups to go their way to join the last 16 as one of the four best third place teams . but while spain helped the kiwis ' cause with a 5 - 1 thrashing of australia , a 3 - 0 win for ecuador over costa rica saw the south americans climb to second in group c with costa rica 's three points also good enough to progress in third place . that left the junior all whites hopes hanging on the group d encounter between croatia and honduras finishing in a draw . a stalemate - and a place in the knockout stages for new zealand - appeared on the cards until midfielder marvin ceballos netted an 81st minute winner that sent guatemala through to the second round and left the junior all whites packing their bags . new zealand finishes the 24 - nation tournament in 17th place , having claimed their first ever points at this level in just their second appearance at the finals . - -abstract: junior all whites exit world cup - -decoded: junior all whites eliminated from u- 20 world cup - diff --git a/research/textsum/batch_reader.py b/research/textsum/batch_reader.py deleted file mode 100644 index 918551b4c..000000000 --- a/research/textsum/batch_reader.py +++ /dev/null @@ -1,265 +0,0 @@ -# Copyright 2016 The TensorFlow Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -"""Batch reader to seq2seq attention model, with bucketing support.""" - -from collections import namedtuple -from random import shuffle -from threading import Thread -import time - -import numpy as np -import six -from six.moves import queue as Queue -from six.moves import xrange -import tensorflow as tf - -import data - -ModelInput = namedtuple('ModelInput', - 'enc_input dec_input target enc_len dec_len ' - 'origin_article origin_abstract') - -BUCKET_CACHE_BATCH = 100 -QUEUE_NUM_BATCH = 100 - - -class Batcher(object): - """Batch reader with shuffling and bucketing support.""" - - def __init__(self, data_path, vocab, hps, - article_key, abstract_key, max_article_sentences, - max_abstract_sentences, bucketing=True, truncate_input=False): - """Batcher constructor. - - Args: - data_path: tf.Example filepattern. - vocab: Vocabulary. - hps: Seq2SeqAttention model hyperparameters. - article_key: article feature key in tf.Example. - abstract_key: abstract feature key in tf.Example. - max_article_sentences: Max number of sentences used from article. - max_abstract_sentences: Max number of sentences used from abstract. - bucketing: Whether bucket articles of similar length into the same batch. - truncate_input: Whether to truncate input that is too long. Alternative is - to discard such examples. - """ - self._data_path = data_path - self._vocab = vocab - self._hps = hps - self._article_key = article_key - self._abstract_key = abstract_key - self._max_article_sentences = max_article_sentences - self._max_abstract_sentences = max_abstract_sentences - self._bucketing = bucketing - self._truncate_input = truncate_input - self._input_queue = Queue.Queue(QUEUE_NUM_BATCH * self._hps.batch_size) - self._bucket_input_queue = Queue.Queue(QUEUE_NUM_BATCH) - self._input_threads = [] - for _ in xrange(16): - self._input_threads.append(Thread(target=self._FillInputQueue)) - self._input_threads[-1].daemon = True - self._input_threads[-1].start() - self._bucketing_threads = [] - for _ in xrange(4): - self._bucketing_threads.append(Thread(target=self._FillBucketInputQueue)) - self._bucketing_threads[-1].daemon = True - self._bucketing_threads[-1].start() - - self._watch_thread = Thread(target=self._WatchThreads) - self._watch_thread.daemon = True - self._watch_thread.start() - - def NextBatch(self): - """Returns a batch of inputs for seq2seq attention model. - - Returns: - enc_batch: A batch of encoder inputs [batch_size, hps.enc_timestamps]. - dec_batch: A batch of decoder inputs [batch_size, hps.dec_timestamps]. - target_batch: A batch of targets [batch_size, hps.dec_timestamps]. - enc_input_len: encoder input lengths of the batch. - dec_input_len: decoder input lengths of the batch. - loss_weights: weights for loss function, 1 if not padded, 0 if padded. - origin_articles: original article words. - origin_abstracts: original abstract words. - """ - enc_batch = np.zeros( - (self._hps.batch_size, self._hps.enc_timesteps), dtype=np.int32) - enc_input_lens = np.zeros( - (self._hps.batch_size), dtype=np.int32) - dec_batch = np.zeros( - (self._hps.batch_size, self._hps.dec_timesteps), dtype=np.int32) - dec_output_lens = np.zeros( - (self._hps.batch_size), dtype=np.int32) - target_batch = np.zeros( - (self._hps.batch_size, self._hps.dec_timesteps), dtype=np.int32) - loss_weights = np.zeros( - (self._hps.batch_size, self._hps.dec_timesteps), dtype=np.float32) - origin_articles = ['None'] * self._hps.batch_size - origin_abstracts = ['None'] * self._hps.batch_size - - buckets = self._bucket_input_queue.get() - for i in xrange(self._hps.batch_size): - (enc_inputs, dec_inputs, targets, enc_input_len, dec_output_len, - article, abstract) = buckets[i] - - origin_articles[i] = article - origin_abstracts[i] = abstract - enc_input_lens[i] = enc_input_len - dec_output_lens[i] = dec_output_len - enc_batch[i, :] = enc_inputs[:] - dec_batch[i, :] = dec_inputs[:] - target_batch[i, :] = targets[:] - for j in xrange(dec_output_len): - loss_weights[i][j] = 1 - return (enc_batch, dec_batch, target_batch, enc_input_lens, dec_output_lens, - loss_weights, origin_articles, origin_abstracts) - - def _FillInputQueue(self): - """Fill input queue with ModelInput.""" - start_id = self._vocab.WordToId(data.SENTENCE_START) - end_id = self._vocab.WordToId(data.SENTENCE_END) - pad_id = self._vocab.WordToId(data.PAD_TOKEN) - input_gen = self._TextGenerator(data.ExampleGen(self._data_path)) - while True: - (article, abstract) = six.next(input_gen) - article_sentences = [sent.strip() for sent in - data.ToSentences(article, include_token=False)] - abstract_sentences = [sent.strip() for sent in - data.ToSentences(abstract, include_token=False)] - - enc_inputs = [] - # Use the as the symbol for decoder inputs. - dec_inputs = [start_id] - - # Convert first N sentences to word IDs, stripping existing and . - for i in xrange(min(self._max_article_sentences, - len(article_sentences))): - enc_inputs += data.GetWordIds(article_sentences[i], self._vocab) - for i in xrange(min(self._max_abstract_sentences, - len(abstract_sentences))): - dec_inputs += data.GetWordIds(abstract_sentences[i], self._vocab) - - # Filter out too-short input - if (len(enc_inputs) < self._hps.min_input_len or - len(dec_inputs) < self._hps.min_input_len): - tf.logging.warning('Drop an example - too short.\nenc:%d\ndec:%d', - len(enc_inputs), len(dec_inputs)) - continue - - # If we're not truncating input, throw out too-long input - if not self._truncate_input: - if (len(enc_inputs) > self._hps.enc_timesteps or - len(dec_inputs) > self._hps.dec_timesteps): - tf.logging.warning('Drop an example - too long.\nenc:%d\ndec:%d', - len(enc_inputs), len(dec_inputs)) - continue - # If we are truncating input, do so if necessary - else: - if len(enc_inputs) > self._hps.enc_timesteps: - enc_inputs = enc_inputs[:self._hps.enc_timesteps] - if len(dec_inputs) > self._hps.dec_timesteps: - dec_inputs = dec_inputs[:self._hps.dec_timesteps] - - # targets is dec_inputs without at beginning, plus at end - targets = dec_inputs[1:] - targets.append(end_id) - - # Now len(enc_inputs) should be <= enc_timesteps, and - # len(targets) = len(dec_inputs) should be <= dec_timesteps - - enc_input_len = len(enc_inputs) - dec_output_len = len(targets) - - # Pad if necessary - while len(enc_inputs) < self._hps.enc_timesteps: - enc_inputs.append(pad_id) - while len(dec_inputs) < self._hps.dec_timesteps: - dec_inputs.append(end_id) - while len(targets) < self._hps.dec_timesteps: - targets.append(end_id) - - element = ModelInput(enc_inputs, dec_inputs, targets, enc_input_len, - dec_output_len, ' '.join(article_sentences), - ' '.join(abstract_sentences)) - self._input_queue.put(element) - - def _FillBucketInputQueue(self): - """Fill bucketed batches into the bucket_input_queue.""" - while True: - inputs = [] - for _ in xrange(self._hps.batch_size * BUCKET_CACHE_BATCH): - inputs.append(self._input_queue.get()) - if self._bucketing: - inputs = sorted(inputs, key=lambda inp: inp.enc_len) - - batches = [] - for i in xrange(0, len(inputs), self._hps.batch_size): - batches.append(inputs[i:i+self._hps.batch_size]) - shuffle(batches) - for b in batches: - self._bucket_input_queue.put(b) - - def _WatchThreads(self): - """Watch the daemon input threads and restart if dead.""" - while True: - time.sleep(60) - input_threads = [] - for t in self._input_threads: - if t.is_alive(): - input_threads.append(t) - else: - tf.logging.error('Found input thread dead.') - new_t = Thread(target=self._FillInputQueue) - input_threads.append(new_t) - input_threads[-1].daemon = True - input_threads[-1].start() - self._input_threads = input_threads - - bucketing_threads = [] - for t in self._bucketing_threads: - if t.is_alive(): - bucketing_threads.append(t) - else: - tf.logging.error('Found bucketing thread dead.') - new_t = Thread(target=self._FillBucketInputQueue) - bucketing_threads.append(new_t) - bucketing_threads[-1].daemon = True - bucketing_threads[-1].start() - self._bucketing_threads = bucketing_threads - - def _TextGenerator(self, example_gen): - """Generates article and abstract text from tf.Example.""" - while True: - e = six.next(example_gen) - try: - article_text = self._GetExFeatureText(e, self._article_key) - abstract_text = self._GetExFeatureText(e, self._abstract_key) - except ValueError: - tf.logging.error('Failed to get article or abstract from example') - continue - - yield (article_text, abstract_text) - - def _GetExFeatureText(self, ex, key): - """Extract text for a feature from td.Example. - - Args: - ex: tf.Example. - key: key of the feature to be extracted. - Returns: - feature: a feature text extracted. - """ - return ex.features.feature[key].bytes_list.value[0] diff --git a/research/textsum/beam_search.py b/research/textsum/beam_search.py deleted file mode 100644 index 446799caa..000000000 --- a/research/textsum/beam_search.py +++ /dev/null @@ -1,156 +0,0 @@ -# Copyright 2016 The TensorFlow Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -"""Beam search module. - -Beam search takes the top K results from the model, predicts the K results for -each of the previous K result, getting K*K results. Pick the top K results from -K*K results, and start over again until certain number of results are fully -decoded. -""" - -from six.moves import xrange -import tensorflow as tf - -FLAGS = tf.flags.FLAGS -tf.flags.DEFINE_bool('normalize_by_length', True, 'Whether to normalize') - - -class Hypothesis(object): - """Defines a hypothesis during beam search.""" - - def __init__(self, tokens, log_prob, state): - """Hypothesis constructor. - - Args: - tokens: start tokens for decoding. - log_prob: log prob of the start tokens, usually 1. - state: decoder initial states. - """ - self.tokens = tokens - self.log_prob = log_prob - self.state = state - - def Extend(self, token, log_prob, new_state): - """Extend the hypothesis with result from latest step. - - Args: - token: latest token from decoding. - log_prob: log prob of the latest decoded tokens. - new_state: decoder output state. Fed to the decoder for next step. - Returns: - New Hypothesis with the results from latest step. - """ - return Hypothesis(self.tokens + [token], self.log_prob + log_prob, - new_state) - - @property - def latest_token(self): - return self.tokens[-1] - - def __str__(self): - return ('Hypothesis(log prob = %.4f, tokens = %s)' % (self.log_prob, - self.tokens)) - - -class BeamSearch(object): - """Beam search.""" - - def __init__(self, model, beam_size, start_token, end_token, max_steps): - """Creates BeamSearch object. - - Args: - model: Seq2SeqAttentionModel. - beam_size: int. - start_token: int, id of the token to start decoding with - end_token: int, id of the token that completes an hypothesis - max_steps: int, upper limit on the size of the hypothesis - """ - self._model = model - self._beam_size = beam_size - self._start_token = start_token - self._end_token = end_token - self._max_steps = max_steps - - def BeamSearch(self, sess, enc_inputs, enc_seqlen): - """Performs beam search for decoding. - - Args: - sess: tf.Session, session - enc_inputs: ndarray of shape (enc_length, 1), the document ids to encode - enc_seqlen: ndarray of shape (1), the length of the sequnce - - Returns: - hyps: list of Hypothesis, the best hypotheses found by beam search, - ordered by score - """ - - # Run the encoder and extract the outputs and final state. - enc_top_states, dec_in_state = self._model.encode_top_state( - sess, enc_inputs, enc_seqlen) - # Replicate the initial states K times for the first step. - hyps = [Hypothesis([self._start_token], 0.0, dec_in_state) - ] * self._beam_size - results = [] - - steps = 0 - while steps < self._max_steps and len(results) < self._beam_size: - latest_tokens = [h.latest_token for h in hyps] - states = [h.state for h in hyps] - - topk_ids, topk_log_probs, new_states = self._model.decode_topk( - sess, latest_tokens, enc_top_states, states) - # Extend each hypothesis. - all_hyps = [] - # The first step takes the best K results from first hyps. Following - # steps take the best K results from K*K hyps. - num_beam_source = 1 if steps == 0 else len(hyps) - for i in xrange(num_beam_source): - h, ns = hyps[i], new_states[i] - for j in xrange(self._beam_size*2): - all_hyps.append(h.Extend(topk_ids[i, j], topk_log_probs[i, j], ns)) - - # Filter and collect any hypotheses that have the end token. - hyps = [] - for h in self._BestHyps(all_hyps): - if h.latest_token == self._end_token: - # Pull the hypothesis off the beam if the end token is reached. - results.append(h) - else: - # Otherwise continue to the extend the hypothesis. - hyps.append(h) - if len(hyps) == self._beam_size or len(results) == self._beam_size: - break - - steps += 1 - - if steps == self._max_steps: - results.extend(hyps) - - return self._BestHyps(results) - - def _BestHyps(self, hyps): - """Sort the hyps based on log probs and length. - - Args: - hyps: A list of hypothesis. - Returns: - hyps: A list of sorted hypothesis in reverse log_prob order. - """ - # This length normalization is only effective for the final results. - if FLAGS.normalize_by_length: - return sorted(hyps, key=lambda h: h.log_prob/len(h.tokens), reverse=True) - else: - return sorted(hyps, key=lambda h: h.log_prob, reverse=True) diff --git a/research/textsum/data.py b/research/textsum/data.py deleted file mode 100644 index 2baad0a12..000000000 --- a/research/textsum/data.py +++ /dev/null @@ -1,215 +0,0 @@ -# Copyright 2016 The TensorFlow Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -"""Data batchers for data described in ..//data_prep/README.md.""" - -import glob -import random -import struct -import sys - -from tensorflow.core.example import example_pb2 - - -# Special tokens -PARAGRAPH_START = '

    ' -PARAGRAPH_END = '

    ' -SENTENCE_START = '' -SENTENCE_END = '' -UNKNOWN_TOKEN = '' -PAD_TOKEN = '' -DOCUMENT_START = '' -DOCUMENT_END = '' - - -class Vocab(object): - """Vocabulary class for mapping words and ids.""" - - def __init__(self, vocab_file, max_size): - self._word_to_id = {} - self._id_to_word = {} - self._count = 0 - - with open(vocab_file, 'r') as vocab_f: - for line in vocab_f: - pieces = line.split() - if len(pieces) != 2: - sys.stderr.write('Bad line: %s\n' % line) - continue - if pieces[0] in self._word_to_id: - raise ValueError('Duplicated word: %s.' % pieces[0]) - self._word_to_id[pieces[0]] = self._count - self._id_to_word[self._count] = pieces[0] - self._count += 1 - if self._count > max_size: - raise ValueError('Too many words: >%d.' % max_size) - - def CheckVocab(self, word): - if word not in self._word_to_id: - return None - return self._word_to_id[word] - - def WordToId(self, word): - if word not in self._word_to_id: - return self._word_to_id[UNKNOWN_TOKEN] - return self._word_to_id[word] - - def IdToWord(self, word_id): - if word_id not in self._id_to_word: - raise ValueError('id not found in vocab: %d.' % word_id) - return self._id_to_word[word_id] - - def NumIds(self): - return self._count - - -def ExampleGen(data_path, num_epochs=None): - """Generates tf.Examples from path of data files. - - Binary data format: . represents the byte size - of . is serialized tf.Example proto. The tf.Example contains - the tokenized article text and summary. - - Args: - data_path: path to tf.Example data files. - num_epochs: Number of times to go through the data. None means infinite. - - Yields: - Deserialized tf.Example. - - If there are multiple files specified, they accessed in a random order. - """ - epoch = 0 - while True: - if num_epochs is not None and epoch >= num_epochs: - break - filelist = glob.glob(data_path) - assert filelist, 'Empty filelist.' - random.shuffle(filelist) - for f in filelist: - reader = open(f, 'rb') - while True: - len_bytes = reader.read(8) - if not len_bytes: break - str_len = struct.unpack('q', len_bytes)[0] - example_str = struct.unpack('%ds' % str_len, reader.read(str_len))[0] - yield example_pb2.Example.FromString(example_str) - - epoch += 1 - - -def Pad(ids, pad_id, length): - """Pad or trim list to len length. - - Args: - ids: list of ints to pad - pad_id: what to pad with - length: length to pad or trim to - - Returns: - ids trimmed or padded with pad_id - """ - assert pad_id is not None - assert length is not None - - if len(ids) < length: - a = [pad_id] * (length - len(ids)) - return ids + a - else: - return ids[:length] - - -def GetWordIds(text, vocab, pad_len=None, pad_id=None): - """Get ids corresponding to words in text. - - Assumes tokens separated by space. - - Args: - text: a string - vocab: TextVocabularyFile object - pad_len: int, length to pad to - pad_id: int, word id for pad symbol - - Returns: - A list of ints representing word ids. - """ - ids = [] - for w in text.split(): - i = vocab.WordToId(w) - if i >= 0: - ids.append(i) - else: - ids.append(vocab.WordToId(UNKNOWN_TOKEN)) - if pad_len is not None: - return Pad(ids, pad_id, pad_len) - return ids - - -def Ids2Words(ids_list, vocab): - """Get words from ids. - - Args: - ids_list: list of int32 - vocab: TextVocabulary object - - Returns: - List of words corresponding to ids. - """ - assert isinstance(ids_list, list), '%s is not a list' % ids_list - return [vocab.IdToWord(i) for i in ids_list] - - -def SnippetGen(text, start_tok, end_tok, inclusive=True): - """Generates consecutive snippets between start and end tokens. - - Args: - text: a string - start_tok: a string denoting the start of snippets - end_tok: a string denoting the end of snippets - inclusive: Whether include the tokens in the returned snippets. - - Yields: - String snippets - """ - cur = 0 - while True: - try: - start_p = text.index(start_tok, cur) - end_p = text.index(end_tok, start_p + 1) - cur = end_p + len(end_tok) - if inclusive: - yield text[start_p:cur] - else: - yield text[start_p+len(start_tok):end_p] - except ValueError as e: - raise StopIteration('no more snippets in text: %s' % e) - - -def GetExFeatureText(ex, key): - return ex.features.feature[key].bytes_list.value[0] - - -def ToSentences(paragraph, include_token=True): - """Takes tokens of a paragraph and returns list of sentences. - - Args: - paragraph: string, text of paragraph - include_token: Whether include the sentence separation tokens result. - - Returns: - List of sentence strings. - """ - s_gen = SnippetGen(paragraph, SENTENCE_START, SENTENCE_END, include_token) - return [s for s in s_gen] diff --git a/research/textsum/data/data b/research/textsum/data/data deleted file mode 100644 index b554873a62ad4a6504f596f498c368b6bfe4eb12..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 33582 zcmb82Tc~VXde?={xy^k}j`V3;@Tp)woR?L*pG4AWTSPjsy`@dGy(c}ZYF5=;tEy(5 zIjh#%Yd>|+*$Rx$U~$FuQ9{+gZEcH>;18AH-}lh zsoTSFnw@ICn+?se?(0)KoQ7F5EbAQutY+=OvmUyhftgclUb37M1HG7i?lWv7Z)I68 z+Cy_HR=#SM?MmzJX7%c}W)FU!kDL9X>1W$+*RsQ5wzy}@&1~Ntm}Su{>+|3p);!-g z^=Y>5yS?AtwY_(etM{|{u3yY&yA!*g_xfYrWsqpL+_t+_-yCLF z(4-{!dfhJD8amYNs@&RFzM_etc+EfU;neJQOsu{1%@MK;e9a0)n6#ojOcQD*X9$ij}Whi!Hr zM;VE)h}Epl%V5dAJs>{})3?iQ-LGcH`m}BP`cThq+o6>}4GmHbnG4d7!aIumqFHyW z!@`#2U9vQY?RQA?slI`O59F^H(S+FZ(5&9EoP%_s?pepCk$y2iC*fo1+BoEF`u(hV zi5T%HlZd8y+ruM?XzG5~vdKwBuV800AnWKZ8$Yc2_NJcQocGeG^I=oR7}}2|QxXE5 zjA5qqw);ToI4Wf42u=p8dF;%+5rfUzINvT;aqR_(nyDhYz z4NSE{jeGQEB?F*Q7wu-#pzvbBj5UU!h?xG`;>(oC3pBZ|*K7FpVkGltWVSxD`h4}} z>g{?locels`s5#~{$TY3rBNFV+Dw+JfqKa_v4*?4A5F{fc-&7_?;OvIUCY}0Prg;X zQGMd~|G<}i$D7HZe)HYxuRog%>d!u_{_3;pFFz~iHs$(u-(XEKX1lhwHLX|OQKDDR z4$U1pwMX~%Ef!`8r%&zaY;#g_Gwdex+1*P-L}~Q2#i)q zNq;E-7tDwWM)KL_U_OR*)Ax*J8un*+kB92UY2n0k{ulms)2@@>LTUH#V(4yL+~cmP zWoQrW4H`bX*)q&_xovKltR1kqN5n;(#-gkc(?fkYA%*b0Ic*QnIi|%#-EA>qGWJjb zL1|Vzwrcj!fw%XvpNUP0=Qk~rh)Nr|R7tXALav}2mfc;+m&KW&p%Cw|6{5nhJfBWG zCaBjMVv=d*+9F0C6S>e1#4J>8o>pAg$on(yao-Hvo9G$9=6DB)Q1j8b z-m$AmTIUc%+bbDYF5FX2nm?Qu1A}0e86^~m*@%Ti9mZG^6V*sRQ$WHSHgq++Z*Fn# zd~!ba=fnN^#!}FQjt#S$ra8(Uc&1Ukq0DQJ4{Me>MYc!nlNoL z2U6hh;071pJdr1+R z0ja{3r(rHESl4gR0oj>j$ATbycEO`>_)~>%_`}r~s~;@wbKc=Y+dL%0Tv+2r13&2; zzVudd4*%+VtAG98bd*mv!&H9tM^VA=Wam9U`@tiKu6*4r4}G zb0!vC;V_B5l55Upc~ieaGfsVbRFr<&o_h!B4#=Rer4tj-4t?F=%4Sj}jVeC31jn{a zxK_pG;!}EXhT88G3C5^Mh#d8T%wVK3_A;&bD#TTCusZkQovw)oPFAr&JHk*|Hk=R; zCu0_74WD;=6fZ|<8)*iF=$n^)w>lx{j^|j_3V(JWXeD}M1zKkWC~5sG4D)?jx?CGR zo*+KNxNzR_!E|$K9c(cQYc_l-~A~E(XNHev} zhykl-*WH;Y4|+pkw51+yMD9peu%pwWi25;q`jQ3k|K$=tJR3-d;xlX0OmX6CMpcVW- zg}~DCc3>=A&UL%3S6O*bnQcpaS>Mz*tw!4uW0T_4$|SQo;I3@Rmlpfb1^JU62Z-w3 zDzB(9vxMTsUdKIH4AW*c^%@eer{Qcbk!3Mlkm##4ON3<2d11w{aE52lMJLWR&P{cHfDl8GHiskUCJ~@hWYa(MJEHV)5ml-g! zqgW5mM}YR(B=FnrC|80V)mV_JJK<8DLj@RN=0|FZGbhdA=?I<{Bm}%DYJYuk{e9Vt^rV!P6Bm6$Td#{XffKJ zyv4A|tb%#vGw$G#lQ*yjvIC4{4PurA*KzyMVBlwIC(3mYpgTp{UC#?W{!=*{Z}4s#pQf=q>0=szhLYwz&?o5>ezDZ?Hqk!RKTwP}0EYFjc!V z0a9WtWMTT%GXM{`| zERfFEYt8@4*zf;*R{eT4Y0ZDHs$Z?D|5BCH)xEqQ)OGkos)1?4)}2X}dk0e9;V9Fv zsCfzEvI0XBzXT4E)fGjO&VPh+OoY>f`kTS0-#fpQgd)ztwzKk;u)M1);JR~T;xXhq;xEvWIp!n zFZynoB8K|BAz5Tr#nflQ-<9AeA*M<&;>8jAyP!he-$;#OjrqGP4UE9a*W$cw(6xR8 z8wxoEZe&&{q{PqS$PxmEfo5MIH3Qigv3TF^Is#ph-BhsSWCCG97bFsen38Dz(B0wW zH_ZHi@>)e?yXU83m^nMgG;^bbjnj%vH zsThxKOC2HWPq8KuEJ^FJK45Qh(5>JvG}1v%mMl=jM6KGerc+y!KH@M-8L70WfL85V zo=#y=j7WY#1a#&6P?;71gFvk@dDsFTAC#Aio#U9wF8Jp_#H5EkY=l#?kI-DfQ?aeQ zTtPQ$=RFz@so7#`B01GpWMbf225piKC-cN~1+Ie!Y^+AYhOi+r!kJl`I1}6OSms-? z8u<^(4fkmFv03#n8{Q!x|1Sxl-mHG%&2j}t+%>s2Ks3aUs1w>^zNzd* z91tPL;VkHPE=2^ivFSE#g<;oV5Rw~;4VD@*H$|cNXSwSutjj%gC1XLf+rzaGH}E6S zjlD%Gg~gB+M#2}|e+ONO=7E##^>Z4#z-ud+SPCC2xS5~_F?DG6?L6Kh02yWY8fB9S zSFwa56)JjCsEexrs$k|*cO>$oz#+rT3i+=_W2ay+#Z1Nlu4|PwXV|Zv_<@Y2v(`}5 z1dn8RB0P)GC0S+I4i2Xj{t2xe7r;WbB-8>Kl=GC^do(SXDytKQBfn$^r?>X?$+;*i zhGc8N8!VLI9fK8wQ{)Ya+KJM|e!SF@`3nL4iNanMRB(j>BHgunn5I}K2z)hb_D8lw z*rZq}#dyiSLv13}*f5I-!zDvg%nmh!rSiC&&2G;ggf8NAgO(IJ7?>p}A+&+gUpDga z7z*vw-u~2WfTUq9isB6z9H-hB%z#Md2l27?2sw_7X`0^O{z zQu(7%^S)5%_4if3r~2KcRwXG%b&z!#i7X0_hIumS`Q+Ov>G{TQul~mSlLCMH{py?V zSKoNQoNC#3SaU^25i=$pn|GL5q&~1rM9YPjUdW)u4Gy_@E;TU{H}4xlRyzr*EFdn1 z8Je9+6skl*D6kjh=w=7(*H@k5tB)^HN<>+iLIfN)r67kseS6(dm!!5CqF;mSGo5NT z8lM6Lb+hgjP>22mI_LN(?bWWIk5}i7d?{&_!fp`ubHC8)HDy+!SF+!Wk$pss)RK+R zi22BiTydhADw|qoW>N|<5n8O=7=WbB%t;hQkD^DQ#dB?_nWWkSE7VGWIN^`d1`*!m zOY<>=%foiM6Ar7IoO8;{d3V1Vo+vAkc}Wn9?UR8$^iNw;xa|xoi;Z#GzR0sk4oUg5fl-{hh)XH zc_n%C6Ip^PI&jpNb0|_(=&L*;^9b{5LiGfhBm*Wk{;uUIqqb3XLHMQ!2D#sAq zAkZWFjz{d+;*n}G!3X6)aw5SZq=ieP*i2*#8WGy5HPSD;w#5pQ-U~mMg(!BBCd5cQ z#QGEMQLE@2D7qzFcr0V^OmFX1Z4|UAKu3FlsYz)Pw{ig_3XrEi0WG&APLi*3XkvFY zozTxjn}qTrXAfF7Wk~|w<;Bz3F7PwB0+OVTrIB{hxJ`EriGi8x8$GjX)B#b@w^)^U zW)4iOhu7u0_6?FF+Un5|0!P@n@O)t3F2m!MH2K)WG&v~@m)e8WZGvvdJX4?RF)M^v zJupJ_aAiXX5hkOKu7x^A#oK)c>zeAE1f9G9iAe5VDlMEw+Q$$|`ii1m-oRMYV*v|A znA(**)oerjj%kp3fT-<&6!LO~Dp}KkQfkTz_;Ds!bOS95-g2`W8Vm~zjqxsw8#{<< zwO5vu4)!tS96JK*)RN|Lx+S#^Q;0}n*mcJv0foGxOo3=ij=c<+0-S)|Dkgp+#{ZEb z7Wl#H3)LU^cn7VW1oZd7F=rVW1B@plgP+SiC_npN^{scO$lxd5t$zI7>TBNrs9<`-I9k5lBFuQ1|IEqCP4B#PaiC z8H`sw!tg0hxKQCK6zBQmZ^WTXWS?UngZJ8(;c zn-~(QqW&RD(SVeAGP-HK9nv9-Cb>`{C1k;ZFgA-__zvb(o-;O$X>G9;cEUnf+dYOm zypAO-t@MoBB_y+@NJ5_qDgc%+UByZM^a|rHPw!qf;79DOx;ht)t8xA^g4)9kdy%L2)P)XGs?%QJh&Od$JqpD`ucpjtXsEP3f>>6C0zlRZQVjTlb8b$$GxVBD zQx}ZVqyd}WcGr|E2{Wku2sexkQBOK;k1b$Lj(#rYvO#kAWmI_N4Wfl`YI(58{R583 zPB8privaT;=oV2RaTk_>UZ%XUy_IkT1rbEqDga?t-R~u=kW&PZ`<25^BJE{e&|Z;a z+E^%9mLjFx#s(K>nt9bkiHy;0fwL)^y+)`c(nv(=xjzFfM5$qj_@x3f>iUu_>E`9iH9)`!yMXL7vW zJ^j@8q{Q_n-l@LzcHy4hu73RO>T7S8YrzINg`h+raM3`LdWWEZ5qlKP;5e>C@j%xf zU%((qLn3%ok*QtLDXtJyje!{3rv?SsS#;_GMrmi1$$N&jTeZtCR%8ak z(xX;p_qkzl5I3CiqKC zB>Z0b27b5q)p@NJ_oSI8@{(AmPyq|IHyO=4lOcMr zyJ<06{@62<@}0bKA;76WG&YRBPP>0FWH1KR8}}y|QgtI0(&!o{ak~_iq%33UM0$%W z6)+X<3Wtb}h_w>v)G~U-A6OvvW9;fQA=Q1Y*a3~8$Hw-+j4O@5sO54akAXS7Mn-cC zg^;RGH)R$0h(V1!s%&acYzUGp&N@=zs)VY~7UO2ONUP)x)XT0D z3s6FWKtnaYN3h&$)FQ7U63HoLjde$gDd@J~*Pvy%LbJmbuTTXyb}TV8D-kYEiLQ6%-+n<1t8b z0s)X4+qPh~9qL3xrhWO`4f>q$SzaH#xBz7WWq_kSeaQ60)a@>uWX#XbsY~9F4QEzk zV%IB0IaIvd?T#H?)wgIF9cbhrlxYFZpn#UCJR_ExqT7Z12%l;aSb zMV&G8+OU2OfD*~;Eae<(lG#-d2c#+q8SW{42qG*N8k0hkSc4HwEh|a}Y=I+6A!ZIx z!V2VPT`f66R6<8wn&pZ``(s7tC;x36iW68bONlc;uU^f0x2gk^W80O|nT+fz!rB3pjk5Cm zv`ikAZxqQN&KybWbY2CyBp=#{3%gdwWnKve7RAK3c^X2}FCvwc)-&`GYeHxhBpf$v zS)rDUtCg_D$r_MgQ&d3R$s!NOquxP-YHQ~U;#wJ{GIAV z=*xAhh3C}m=6gz-SRQt_SUgy?CwxUq#)U#zx`o`h@m=nOvXtEB)GDhTr|)HfVVjT0 z)f>{N1yLg!`dA2Ia1>iCyc|N)m{xHvBbrEvgILXqL=>BI z&K^ZK=016;3vIQg=4I&}qy@uvxsm+0(Utwl!b{S*UVY(#mrR}sa%O*IR0SoNo1dWs zkA{5`Qu>(;DgD%Y)z1`=(%0Ut{_nHu|6CkvA}GM1kR~fJOQ+1h6EG|_{fEkCBt7;c z+N+W>DCuZYz;iANXx+mN+OpoM?^`AgNO{=KQ7D2R6|;CmQ@Br7v5}x!KEuq??MS8< z$4eZ~tIkKLNY)nRTr-T9)dS$8etrtaFpr`@AQn_u_LukkFq^3%PS6 zP8bI#q`)~!DUoN$70uc*D9P)i^wyWmb6n-bEbU4I=S3HYyH-oD&x(E`HL4qo(4$U} zjdbNwGa>N0Kev>)33=933g)M7uKxn4CTB6h$Qi5-vG|ZF=(3Go0IDnGPRkAU7c9|P zILo8Cd&@Bej3s5AJ!E{umTB`Vf?dE`BLO3FS8O~cwJV%xFok83BzAZyoW?7Cssxdd z^&?KEsW6R)HrO&KGDxxxwRnyi^+o>zwruKu zpNI&@LrUozlglE`@t+j8IPhjC!kw@{2n5l;wnZ#MD5-=4JR^3h;7w&tWRri(`PYY+ zl1;CLY-FvV{enPbs1$Va08(H-;Tawxevz!@6>4}wLpygy;M6m?FGZ7;JiF#+(NGH{ z!Yc?s@_{|nJ_;F4he3j^!k&>oC`hY=G-gulAQpq?nk?gBnUL_B+NP5BR2z1|hute& zcUVA_0JX6G@x=a#^*r1)5fHV6Xv4s+=-f9}3QM?DBupAZlte04WQ{_Ov4v_aga(J^ zY@EXym$FwFUf4CAQxX&_K-9Xl8qA3Bf-;y>Ll0k z*e9~JMd63OT>Xjahf6>7*zhM8kEqzg`Y=F7hUu15dNNXW^mR`td^*f1xNl2+@;P$9i z!x_sc%tWnU}S6PTua*ozjG8{tvNkE{VAiEZ_t zTmZVJa;PyAjctA&Vbcnmaya0psoK(a$(eTba$M6?LNh4MUYW|%2`g_*&Qfhk+8-}n ztipQ<0DVnIGA>TORe)Gw@3q6sS{aATI`X8M$tO9t;R~8r z1t+Q*k5)3)|65gyyb|%T;Gxa_1EJJzleFgTyk0FfIad2m?^eHB96|q&&#Hg_toqJH ztR||c^I#CTO4#sfJn#XvUNzurIl_p%k^{*DQO%sub;K!CP^dG}6*NNFUMC9n{PcrT4nPxf=HgG-0PWZme7 zmy8^QZ|5AajdRkWYeAEHbqp4-X5J+P7?+05s=~wd0fGV14RsY8+Ym<*C|Hc3(*{ZW z5kl=I1sG^CM`;N~6cY;?QG~=&M@bTKw}{o$9MzPq5!dH_LtHQBakzsW;edbvc~;?PcN{v0 zyU}$ftL84}jOJ6&0tVoFCL<|7spHFDM3dLmNwGv;Nhs1m6NuiNL2#&jV{)O^AKg;L~?1Ad0R7M7$;o1r1WTs%`|@8y-AaEHd@N z>J^#}U2AAT{jl6vSNRKXZd5DEmnW62m?fV+cyfhNin$?>d;E2Z>QvtMm#*D&zlXJ1 zw8|SQmZ^Z5C5{bQNxf5O56J?Q(eWyfI%d_mUK&Ss(qdchgwZ1W8mK!UXJqMjI&8$_ zh?hiwZ-|?EAWd#4F_~r$VTEHFhvYWtEl=81bt!tg$78J zSlYo)OWe}TKp7mK$Nh%^@|FV4(nj*!=`=YfrX6`vR+1jE-KF;9)Ef&Tko`z(D12J} zSKYozgbgd?o~g9=P(>G+iqrL+)u!}^V!Uc!MIKpbV*b*>sTMeJnq7?r7-)E5Dih7g z4NHF9@k<`#lfq~jM=FG}1Zaf~l3Tuq4eHh;Bs7ze5UzY-U67l+NS8)9&%~0W@QKUCW*HVVRr%>f zj+ieQn0O5#NT6jKfQBeP8j@^io#Viuh%B}tNG#{bttn=VzXD?x4Nt^Zv=CzN<7yLy zB5-)j(&O?lrVA@(Iigcs*l6ITIdhGC(I!h5DVa&de#|1%acs{>2UZeJWot4Kc+qX7 zPY{s<5mGI&m&hC5fn5aeLWVf@6}=YrNmweP4`B?Gg^tEfdveYcL2r$htk-xQn|yUP z4MGhsGNyeET?mLsZ#klcw@GZF_vD#77y z)iYefx7|uk1Pp@}Bw{2w6!gm^HP51HH=1Uc(pwQRf`-xOBT*z-A>1$yR1B3_u>jL# zp9muT-KicW3@@^bkrY%L$LixTO4clePRYSQDnxp_R`kHLvJNi!5 zOMgw1iMp$ZXBkYWCE(C@wDlObc!CJPu3B&a{W-M5$*XIpz^uB#LAFdf0-j}(#sYGJ zmOF38LY_1i&Er-z0pYK+5Cyp`7%E%JP{IZzEbD9Txm$5q8x&Wn&H_UkMYYFs#dR32 ztsseNj$+_^Y{d#;@T8bNTRmt1vXgf?fvJ)9>oyLt2AWXk$r^!YNr0R3QAMpa(1xr9 zW~u1~JTN&$n{5LTsr#$p%9`ugc#43t*}3hM(Lx1OVa?@%jEL!vY-o~-1hb+f+;1df z!&j6^)CN1Kb-}=Jy^y@?C290LG}-{K;+9etFooi=2NA;ow9<)f*lnV?ok-E zk)htv1frk11#vWF;;K|cS=Qlx(UJMdZGkT&r}eQ$%oP^=$rvURNz<*{=$JNGD&U{~ z;6m*rzgyAvCI~$Fj;sln@+||ALKObkylgq_56ny+`HCiXNk3h2_3<2)eZkWxMI;PI zrfrYpf>4k_M~3rlD2yIc^_oGfMv(s!UVcLITp0zd%?2vl5BLIRwxUf`po-%xya_UQJ1?(;e>OI7jv^wk@2;4Wbyh znJy5M_;A3U=7722*5q4BYsRKRf!0+2?touO&a@xMc$03LOh+4BhMl|sSKBa+Sdh1f z*UHO@_!7S0D=yYMSycRLD%NTYmZ(l$JZ3H-iiH9={Ebfa6L2Vk(R1Lvlds6Lg4*VvbSwRFu-lDI!|t=)Ks3TFs821aa6TLAU9^r<)+C2&thr@>VJL zvZdQsF;JNEytFGE1HPh5=VPO%ub4==3YTK}k{}()nNH~AHBb^0ksAaSG(_8)HIc+UOe|MyB!vsRdRSBu0zPc(z-aZ(JcKD<@b> zU2%@5k#ev(Fa*?mE=Qt-5~h>+P{BE$+$^FG(iK&6-6N;wfFyw!G~Lk?F9}D%N2eIW zZFL2$>aavwOr%&Qj?c@wxtha>3gB5luKyDtC=w0Zn_9BK)r>F1%X~F28JdjrzSfN+ zT)c?8Ca7>N)H_wcuiy^t6F}tIiX~Dukv_mx-^aA*0UIC%MmEqu)s4ey>d-wE19c;j zuxy+HJU5#upcxw{CVwgq)TuV0gTYi~_ofU$BYV zC;m`k8YrLmftKoooEVd?s^ib-13mkyKI)K?F5WN&q)p@B<>sJKlBR+?+?Q+^9S?YhpsT^-!)H3@a<1t#d9ZT7@@<5j>x3xRT@*gwk0VperX zMpexcEu4{S{k#b&))Q(kZQ#_IUAy)gfq8~7QiYXK7f7s*oKv0_KyH!CCe(k?%fec8 zdL^MPssB|ZT{Peq7aa~`y!9wKJJ!@!Z+s2N1{6;1jFeCrUcrE=%D$MB@H}>Hq(Es0 zaxM0y4XeXzGH!ffU&}Zl)gDPlQB0o9(J=Y40(-&S&OGg=u>)L6)7xOH0>Lzlv{uxi z=s>3~-MHj8upouB$o&h#aYG%|Z!I$Ey&YY72#>~0Rm{Qe$$B_SsU3`>Lwt3oBr7&Q zN;7hnSrG)`ImY2uUpU2=Pq1AE*vkCSdQKl^YDtnsaM-Sd2>MM*C<_I3Mm=09ZCT5n zYLD^-7q{!Y z_wayEINde9uYrXKKq-fdq^&0O!1qEC>J;kcqFVqYMSUN|V)I};nB2Oe zArm>RoFkA%%n&rl3#AZi`r8yz1X6`PGl_&9jQ9O1Cg6C5d^jQ4iWiB~#})w5Q4|>W zYY0%~59sw!L215&B6L$U{WVbHRf^ihLCjXrM255ACcd9bO;wTdV76)X{hW||GhUp; z2Gk8K)v|vH!)JbLWr?p$q6<|h4QM{O?vR8@gbOr&Z;J-oD{2Fs#;v?1{h&Vr$T*0mQ7WPVoqs2G!OFV7zNWi7;!#It z_d@ z&}p$!ng-x;p*tDLTYaURkGAETfKvR2Vda48R#rwzrszXc5+Kp6CnNiD^RhYh-6LEv zW3eyd9tt%yQ>bs5t{JRmQXC1i?oGaku&3<`Xn>mZ94Nn4+UTRFmvq*ZI_iOj1tZBZ zkg41TAY`wF*yLNEP%#dLBQT0lWjxgR$-1{(58iWQCrXDca6|*uyepT)eR9Qwe2E6~ zOEOynip55!Yn22vQvu{YZ3ff1y0Ed4G@I0h%G|UQb#Fw*V7F{B!JQ9V=F)dLh2q#r zMi9p=qj(sV{2dEuS7M)TT*bkq@JazX(}=!-1M>H1qhZT1S#cN6?9^=H#v9f0bhgyn zWEkN_Z=(PuQ zwQh*OrAE_1*58~qW}?2{H2rioO+WeFO_K;q(yz|Flp_Q>0qHJ6Wn@hU6bb5mGVmCe zp^IlK-$8TLHc%xRD#`yvV{h+9ps&JgJlA@T|3GaH970ihAGyAMe1ZZQCXH-Xc>}V` z305U}6Wk|9hc1!IA9Msx(3fMFRL6mvkMS{Hi0!WyJlt z%HgD89PFS#jXW_gC_JCeHu>Ns2O$C>9|?F7#}ScvgDKOKXod8sE10-4GelJw;VMu7 z7Ar}K60?i}=L`Aq4}FqOGdgB+25Sc@dmCQa{0_IwY86I}aoUB*-3j_E!>hCmczF%? zU9s^mBhXGR@$AdHMF9Y4ve8d?CEQWmzzIyhA4GX8znybtLKxPMVBP5V-I5Gs@f%IX zj}|40AFjBqvW!%cDe##snrU$Z$3%lsYrX%h6q6Gi$A2;<{rOL%D#f?os(!k-t@8i9 zQT_TG)&IV@;agh;eN|?d4nAQOh*i}(Dm2Uo+}WruaN<%AH|2-UI41~yRWDN4pKOZY zTw2QFs#*RcF;lD!6X3ZVr{IE=JxFQi*4(k%U*H-)AH$Q17UiC-qA+crTsF^N@edeC z+$bO=hln#f;x&=5|$Gky^0*wrIPy;(OwgrkcLNNcZlVk?gAhp zR7wIv@m9&D2V=&ILzHh2dn=$AZBM=~x=1f*9EGLO0?7bWS0SX>td?F3_T)Gl_5_1VYXc3s|oO6M`ZjYrM2V=g|jZXmstLIro zLO35cnsM%xCT8qyzya}-S|b*1Z-1mKLyka0eT72}i$zrS6hGuE?lQ>`yXobaA$l32 zPdlvh@S=Z#4ykiOQsy=VAResci~0rZy60B{_{JYJ)06yt^I~{GF}AtGO~wf ziV6meWvBY1^m@3JV#6bSF86ye$#6Ofveymm3#Q^q>e-7INE%ZUrZX`o9=Y_GVsq?% z!NEK%2h9g$|2)WZEUal>@t=pd2&iFR9JhW%2LfLl@&-urUiv z-j4q}{qU%W>OM9%vYDYwk%Xq4CDWd$rsZ_FemYA)W86AC4n7V19HUR#GAtx7JOBH@ zzsDy<-RbE>YQ>9!!!DjmeDG(ib}f7xmAtbq4ztfbpCOc2AM%Mq+MK83a-Bm)EKs;47_WOKxUJu`$m0~7)WsPN8WpVCyW(P4pdgZgoe?eE<&>RNwh{M@!A_)CI_Ifn^;!8VQTzNw*12G=VgpTKk0vdG!Qc&OV_bLQ z$RXi8(LtpUgtrF$P%VDeji)-5my(mzq^^1rg=HSOQ_%KutERFMqR z<3^x@9mtJ)(M57mtZECvlx|H42whoTMwl87`>hv{z96uG6kyLvO? z7@W?%^16T#ez-G5c&uC3ZG$KI$7M5{aI*O$n5&8WSV{9}($|eB{G76uLntKr@EJ#`4SH|cCzp7i06u}j1oFZugX~K(+ z{=O9m6ap~jcpT+a!Vo>3;tX-BeodMA2r|YVBsiC`A>w2nr+h<);Ev8g%K_P?cF$!e z1J}x#s}nM)`i=#TmJIb4`8PV0a_|cEPn{iciq8TifyEA}&!&;xrid(Ok)Vi`V3wg? zBQugG!*R4>(N-Mk2Lrk%T>^&pH9V(fg0%lOq0Hgtin=2m5VM` z3Wh2KUTJV@D|vlS6II0|RhWg;@FJ7jn6AyD&xewjr1Q}js_4r=9>ot!%06~79u zKInW^l#b+!=**aVya<_eaT$B!UhtHt0JLx{?_y`_N;q-(0$?Qv7~No{Nv&GN9lYlQ zKZMQ~+T_U5#a@RXSItY?1WE5?*_?FBGzemg4pAy&wiUXl;!@RhsaZj7Eg%yq)k0~` zH1JAC233fV^<>p9l1IMh479FkMq9FM!OQ%U?2v4Hu5oq=RoXJ(~ zB|ndmF9Bqj_=&A2j|OUC(Yci>tK}P! zBAN^=Q5B5rDvC`)<7{nMC>YUhl9Nf5@xT0B0~E$_&{%+Ju`to{Mz%&dA{llotD0U& z9O{IwUy6vqeN62NB!iV{xy53lM&E4cJ&YMD+*87_CfGgZ_<|#uTKI zAy%3zJbCOnOQIjPp;*gkwWgoYl&=9uI`{0F(v7?mih@*LlquAJj!sFfv^^dna!>C_ z7K&>mv-}lnG>KKT!1gnTj?QpRjqeOo+25k{=uluHe|d0|k34fRbg(nSDx{nssI)?m zwt;MO&>Ncr&;uf*mTH_^_Sy!7lh0NZ6KVva#9J&ej%vC2R{PC9XQ2ix_8Zl2Bs}?* yyfC`vLkz-u7N*AboNHsUzNFpdfz4Xu)4$ayGrPq}uCuk0Cw&mcWx~Ba%KZNX9M$vy diff --git a/research/textsum/data/vocab b/research/textsum/data/vocab deleted file mode 100644 index 315740c28..000000000 --- a/research/textsum/data/vocab +++ /dev/null @@ -1,10003 +0,0 @@ -the 135597564 -, 121400181 -. 98868076 -to 58429764 -of 56269484 -in 49820911 -a 49701084 -and 49378364 -'s 23787251 -'' 23227828 -`` 23116499 -that 21577263 -for 20998230 -said 20858620 -on 19106851 -## 16627320 -is 15661835 -was 14607055 -with 14265376 -he 13755120 -it 13588190 - 12263923 -at 12221539 -as 11657129 -by 11105584 - 11090708 - 11090708 -from 10275933 -his 9090323 -be 8939486 -have 8930288 -has 8880930 -but 8213981 -an 8035012 -fourmile 60 -zwart 60 -post-baby 60 -diasporas 60 -herzeg-bosna 60 -younkers 60 -rolfing 60 -cyclades 60 -lovas 60 -super-cheap 60 -johnsonglobe.com 60 -incarnates 60 -candis 60 -luzira 60 -toyota\/lola\/bridgestone 60 -caohc 60 -flatbeds 60 -pairat 60 -stubborness 60 -mogaka 60 -march-past 60 -alaba 60 -extravehicular 60 -conolly 60 -shelford 60 -snowblowers 60 -excoriation 60 -langoliers 60 -ayios 60 -rsm\/rw## 60 -ultralow 60 -kassire 60 -kirikkale 60 -rutaca 60 -hardys 60 -latigo 60 -aggressive-growth 60 -shankland 60 -tetrault 60 -ntf 60 -british-u.s. 60 -rbk 60 -hannagan 60 -pro-french 60 -macero 60 -bahrenburg 60 -recanvass 60 -hayrunisa 60 -educap 60 -tuoh 60 -x-#-#-#-# 60 -cravenly 60 -jent 60 -britain-farm-animals-disease 60 -white-power 60 -pongsidhirak 60 -fbc-ohiostate 60 -euro-arab 60 -prds 60 -sinkinson 60 -baugher 60 -fpd 60 -sakombi 60 -holyhead 60 -virusscan 60 -niboro 60 -cliffsnotes 60 -inhibitory 60 -nontariff 60 -huaraz 60 -qera 60 -icomos 60 -off-handed 60 -lumbee 60 -kututwa 60 -besra 60 -ownit 60 -out-of-area 60 -petrozuata 60 -wielinga 60 -roecker 60 -jeanneret 60 -ryukyus 60 -chocked 60 -syda 60 -rearers 60 -especialistas 60 -stoeltje 60 -tag-along 60 -pendulums 60 -land-hungry 60 -male-pattern 60 -ogbulafor 60 -jemil 60 -singlehanded 60 -bogaert 60 -brawnier 60 -picardie 60 -patsalides 60 -zvimba 60 -talamoni 60 -aristos 60 -reductionist 60 -sung-han 60 -ebby 60 -tcambanisglobe.com 60 -well-populated 60 -boguinskaia 60 -golfen 60 -fossmo 60 -leches 60 -madtv 60 -mirzapur 60 -dromey 60 -makowski 60 -bearzot 60 -fifth-day 60 -togoimi 60 -ethopia 60 -espoo-based 60 -cuppa 60 -cristin 60 -lambrechts 60 -eurosystem 60 -november-january 60 -home-security 60 -vengerov 60 -major-market 60 -obviates 60 -horster 60 -three-and-half 60 -sinyong 60 -cizek 60 -issyk-kul 60 -granatino 60 -ketones 60 -jenista 60 -mvovo 60 -shuttlecocks 60 -behounek 60 -nonscholarship 60 -mutaz 60 -hermandad 60 -engelmayer 60 -mussallam 60 -lutein 60 -drag-queen 60 -independent-film 60 -u.s.-asia 60 -jiantang 60 -pinkins 60 -uplifts 60 -lifsher 60 -gree 60 -suit-clad 60 -donavan 60 -stracke 60 -hard-to-read 60 -merco 60 -clanks 60 -asvat 60 -kloof 60 -marsland 60 -caipirinhas 60 -army-style 60 -benhuri 60 -mokotedi 60 -toader 60 -hanarotelecom 60 -guodong 60 -cottone 60 -dominicis 60 -osuntokun 60 -planetall 60 -ik-rjm 60 -serhan 60 -delimiting 60 -chiaro 60 -optimizes 60 -kopay 60 -adakhan 60 -cretz 60 -liberato 60 -kandie 60 -abdur-raheem 60 -vodichkova 60 -schawlow 60 -kulivan 60 -tagtop 60 -stefanini 60 -wcec 60 -kabocha 60 -okresek 60 -martin-in-the-fields 60 -lawn-mowing 60 -cpds 60 -raso 60 -ciubuc 60 -match-winners 60 -eye-pleasing 60 -elorriaga 60 -marusa 60 -ford@globe.com 60 -dreger 60 -#x#k 60 -wsm 60 -hocutt 60 -macconnell 60 -out-of-service 60 -deep-fat 60 -body-builder 60 -streamline.com 60 -piatas 60 -scolavino 60 -technobabble 60 -war-ridden 60 -much-used 60 -colisee 60 -itty 60 -bhw 60 -undersubscribed 60 -cargraphics 60 -tourist-related 60 -fada 60 -sibomana 60 -shuger 60 -megawatt-hours 60 -webpad 60 -visine 60 -rewriteable 60 -madore 60 -fasanos 60 -weinhauer 60 -anti-mormon 60 -loudmouths 60 -broadness 60 -quixtar 60 -fancy-schmancy 60 -gangways 60 -aversive 60 -clingendael 60 -jonathans 60 -n#k 60 -schwald 60 -puxi 60 -abrahamic 60 -casebook 60 -clear-the-air 60 -islamised 60 -messeria 60 -vacation-home 60 -pro-tutsi 60 -kishkovsky 60 -portee 60 -awardee 60 -batsh 60 -latika 60 -bristol-meyers 60 -collum 60 -aprils 60 -telephia 60 -cloud-shrouded 60 -child-abusing 60 -hott 60 -bacteriophages 60 -ghosananda 60 -x-original-to 60 -aubuchon 60 -maldon 60 -owei 60 -stumper 60 -ghanaian-born 60 -cincinnatti 60 -spelich 60 -guoxing 60 -regulation-time 60 -scotiamcleod 60 -tesana 60 -seung-youn 60 -wen-ko 60 -stadt 60 -schroeders 60 -norin 60 -nung 60 -bank\/schroder 60 -relased 60 -ea-lm 60 -rubey 60 -cfi 60 -kavaja 60 -bourgault 60 -behrakis 60 -suraphong 60 -homesteader 60 -wbur-fm 60 -whee 60 -afghan-kidnappings 60 -rybin 60 -near-starvation 60 -crippa 60 -sanlitun 60 -pro-monarchist 60 -mortalities 60 -anticoagulants 60 -unsual 60 -kaha 60 -kamte 60 -harakah 60 -scarfe 60 -kenoy 60 -sendov 60 -depasquale 60 -maddaloni 60 -jointly-funded 60 -nokwe 60 -liff 60 -kickstarter 60 -khakpour 60 -cribiore 60 -husbanded 60 -pedauye 60 -perrella 60 -far-post 60 -boruchowitz 60 -interational 60 -close-to-home 60 -sophoan 60 -luqa 60 -legislator-elect 60 -forgy 60 -leutar 60 -uosukainen 60 -mirisch 60 -havin 60 -scorekeeping 60 -englishness 60 -philippines-landslide 60 -economides 60 -etonian 60 -randburg 60 -aew 60 -maillet 60 -abogado 60 -well-aware 60 -stunners 60 -laras 60 -#-tatiana 60 -cicciaro 60 -ski-in 60 -##-billion-u.s. 60 -keteyian 60 -first-in-the-south 60 -full-rate 60 -palepoi 60 -self-deluded 60 -hiv-infection 60 -rios-martinez 60 -turbojet 60 -yike 60 -radmacher 60 -stir-crazy 60 -bucuane 60 -unidentifed 60 -richardo 60 -zygi 60 -spritely 60 -vaynerchuk 60 -meniere 60 -special-ed 60 -teenybopper 60 -jmckim@globe.com 60 -xade 60 -danish-owned 60 -ikuko 60 -#-chris 60 -kayapo 60 -zeckendorf 60 -steamrollers 60 -giroir 60 -jaidev 60 -eraserhead 60 -japanese-chinese 60 -reprocesses 60 -muros 60 -must-buy 60 -corsiglia 60 -yanqui 60 -basanez 60 -unplayed 60 -mimika 60 -electric-only 60 -scerbatihs 60 -hitzig 60 -zero-interest-rate 60 -populares 60 -iphigenia 60 -nutricia 60 -sape 60 -kopec 60 -iencsi 60 -alvan 60 -ho-nyoun 60 -mob-style 60 -frivolities 60 -post-ups 60 -percentiles 60 -coote 60 -sodowsky 60 -caglar 60 -sahaviriya 60 -leggat 60 -xueju 60 -compagnia 60 -zomax 60 -inter-school 60 -hugeness 60 -sleaziness 60 -langenhan 60 -business-news 60 -armenteros 60 -cup-clinching 60 -yelavich 60 -chao-ching 60 -salvadori 60 -ameronline 60 -sex-starved 60 -alifereti 60 -trosch 60 -recombined 60 -dwek 60 -emission-control 60 -pozdniakov 60 -shalaan 60 -uzelac 60 -imzouren 60 -obolensk 60 -moshen 60 -commodified 60 -aleady 60 -barakaldo 60 -tumwesigye 60 -greiff 60 -walp 60 -r-palm 60 -julkipli 60 -managerless 60 -yabucoa 60 -zandl 60 -self-doubting 60 -kherman@statesman.com 60 -amol 60 -erck 60 -nitc 60 -hutcheon 60 -hornbills 60 -all-win 60 -heavy-weighted 60 -publick 60 -b&h 60 -kirchler 60 -cornella-el 60 -seefeldt 60 -sasse 60 -http://www.nra.org 60 -ball-striker 60 -fip 60 -concentra 60 -graywolf 60 -debbouze 60 -snowfields 60 -rabushka 60 -well-trimmed 60 -sofri 60 -jungle-shrouded 60 -beynon 60 -kotil 60 -snarly 60 -##-cent-per-gallon 60 -then-boss 60 -wakeling 60 -haft-e-tir 60 -majoros 60 -guglielminpietro 60 -hasnawi 60 -plachkov 60 -cabarrus 60 -consumer-advocacy 60 -xinhuanet 60 -karakul 60 -eight-iron 60 -rallis 60 -lighthizer 60 -vugar 60 -eighth-generation 60 -unitd 60 -x-anthony 60 -mwonzora 60 -prestart 60 -nadkarni 60 -phyapon 60 -tshuva 60 -kakuryu 60 -ist###-### 60 -golf-ryder 60 -delivered-to 60 -excelle 60 -###.#-yard 60 -less-visible 60 -elopement 60 -khenthong 60 -controllability 60 -venzuela 60 -soutine 60 -pongpen 60 -xhelili 60 -four-homer 60 -his-name 60 -dunnigan 60 -austalia 60 -djalma 60 -uher 60 -yaman 60 -kipng 60 -debt-related 60 -volgyes 60 -liposome 60 -diamoutene 60 -spragg 60 -sumahadi 60 -lebov 60 -blackly 60 -mutineering 60 -overstocking 60 -ryynaenen 60 -beraja 60 -hoklo 60 -uniphoenix 60 -cumbal 60 -redmayne 60 -sesana 60 -inu 60 -daresay 60 -salvinia 60 -egoistic 60 -nellies 60 -deep-frozen 60 -labor-relations 60 -weegen 60 -sino-kenyan 60 -cup-winner 60 -gimlet-eyed 60 -lendus 60 -rwisereka 60 -hugh-jones 60 -shenzhen-listed 60 -sunmonu 60 -zmago 60 -latonya 60 -tv\/ji 60 -troshin 60 -analista 60 -hysteric 60 -lindsay-hogg 60 -marmaro 60 -purvey 60 -kilovolt 60 -val-kill 60 -partership 60 -prostrating 60 -bhairahawa 60 -foc 60 -lyonpo 60 -yeardley 60 -detalles 60 -camas 60 -web-search 60 -kosminsky 60 -defund 60 -cowman 60 -al-kebir 60 -kasher 60 -kapolei 60 -fiords 60 -brentjens 60 -teen-pregnancy 60 -qawi 60 -orquesta 60 -genotyping 60 -nonato 60 -nssa 60 -twyford 60 -yuling 60 -pearcy 60 -sisic 60 -defraying 60 -eisteddfod 60 -benson-pope 60 -barflies 60 -one-arm 60 -unitech 60 -wheats 60 -mid-town 60 -non-tour 60 -fiord 60 -jnr. 60 -bge 60 -mixmaster 60 -glanton 60 -oner 60 -tax-return 60 -embolisms 60 -willse 60 -ribon 60 -turbodiesel 60 -higher-performing 60 -cotswold 60 -early-summer 60 -romasko 60 -rulfo 60 -moad 60 -paradyne 60 -##-sided 60 -galili 60 -nabatean 60 -haarlemmermeer 60 -reanimated 60 -supo 60 -varujan 60 -demystifies 60 -gilgamesh 60 -bangabhaban 60 -ekin 60 -ctl 60 -bmv 60 -ipolito 60 -ovsyannikov 60 -archaeologically 60 -x-michael 60 -hko 60 -stollen 60 -veloute 60 -sung-young 60 -hydroplaned 60 -coogee 60 -tammert 60 -stambouli 60 -makama 60 -kilted 60 -tuqan 60 -dutch-german 60 -mids 60 -two-to-four 60 -gavan 60 -doorstops 60 -pazira 60 -halifa 60 -melwood 60 -techonology 60 -montecatini 60 -throat-clearing 60 -avakian 60 -hirschbiegel 60 -anticonvulsant 60 -disadvantaging 60 -sieger 60 -sealink 60 --##.### 60 -rabelo 60 -web-mail 60 -maulud 60 -inchworm 60 -tabtabai 60 -relata 60 -ktt 60 -dae-hyun 60 -witchdoctor 60 -astres 60 -yannett 60 -fixed-dose 60 -cadle 60 -tianfu 60 -necesitamos 60 -slemrod 60 -tamil-speaking 60 -rsm\/br## 60 -preisdent 60 -eitzmann 60 -akuffo 60 -blefary 60 -cyclophosphamide 60 -aprilla 60 -share-the-wealth 60 -yagmurdereli 60 -maumere 60 -embarek 60 -monday-thursday 60 -ostiglia 60 -seree 60 -smokefree 60 -nerio 60 -lactase 60 -harbour-felax 60 -belue 60 -al-jaz 60 -tamisuke 60 -twahir 60 -setian 60 -dmp 60 -kowske 60 -supermercados 60 -decalogue 60 -outpolls 60 -pinballed 60 -hezbullah 60 -daluwatte 60 -nintendogs 60 -mediocrities 60 -cercelletta 60 -lulzim 60 -youngstars 60 -mavrou 60 -gorgonio 60 -manie 60 -occassionally 60 -chikelu 60 -mcgettigan 60 -sinduhije 60 -produjo 60 -charvat 60 -once-communist 60 -good-tasting 60 -palexpo 60 -izak 60 -comedy\/drama 60 -brusqueness 60 -anky 60 -#rd-#th 60 -ergasias 60 -jaxon 60 -miletti 60 -wangui 60 -heb 60 -exurb 60 -chalices 60 -sentimentalists 60 -###-turbine 60 -vermeers 60 -wipeouts 60 -rolff 60 -deguardia 60 -grosbard 60 -zegra 60 -kunder 60 -street-racing 60 -bullington 60 -wunderteam 60 -non-disabled 60 -leese 60 -weishan 60 -ten-time 60 -lactobacillus 60 -funkadelic 60 -zhengyu 60 -state-subsidised 60 -existen 60 -nakivubo 60 -dorinda 60 -popularly-elected 60 -wmf 60 -zrenjanin 60 -al-momen 60 -cesspools 60 -yacub 60 -zhuoru 60 -zouk 60 -tarnower 60 -sweet-potato 60 -lashonda 60 -fcdu 60 -tantalus 60 -nordling 60 -heidrun 60 -swigged 60 -tramontano 60 -spa\/fas 60 -invicta 60 -fenzl 60 -al-najar 60 -vinyl-coated 60 -stategy 60 -enewetak 60 -frost-free 60 -dstanford 60 -elwin 60 -kainer 60 -yugi 60 -cl### 60 -petitclerc 60 -margriet 60 -afterworld 60 -sirbu 60 -non-tradeable 60 -co-signing 60 -isiro 60 -jelacic 60 -tonghai 60 -narisetti 60 -whisperings 60 -pembangunan 60 -clatters 60 -kalashnikova 60 -health-flu-europe 60 -cross-license 60 -sarees 60 -minal 60 -retirement-age 60 -nitrogen-based 60 -sun-yup 60 -naief 60 -#,###-day 60 -rovs 60 -konia 60 -parasitology 60 -riz 60 -hankyu 60 -ebulliently 60 -franzblau 60 -apuuli 60 -muan 60 -ghoulishly 60 -six-gun 60 -recondite 60 -dukker 60 -zemmouri 60 -yumei 60 -http://www.centcom.mil 60 -one-night-only 60 -kovanda 60 -akhundzadeh 60 -worcester\/eng 60 -missiroli 60 -zhenyuan 60 -quarterpipe 60 -salix 60 -petrouchka 60 -mini-treaty 60 -epicure 60 -maintainance 60 -day-nighter 60 -ghengis 60 -pro-hitler 60 -hurleys 60 -editorialize 60 -henle 60 -sonson 60 -meals-ready-to-eat 60 -pratts 60 -pisetsky 60 -thonglao 60 -testings 60 -narcisa 60 -eight-night 60 -noster 60 -technology-stock 60 -dsquared 60 -nain 60 -bbn# 60 -explosively-formed 60 -b.l. 60 -zucchero 60 -dulay 60 -mid-spring 60 -ionides 60 -prochazkova 60 -twinsburg 60 -jhollis 60 -pre-####s 60 -bouyabes 60 -safe-conduct 60 -drug-resistance 60 -payphones 60 -yongwei 60 -labor-law 60 -tualatin 60 -ehf 60 -hirooka 60 -mortgage-bond 60 -gaztelu 60 -mitidja 60 -kondratiev 60 -christain 60 -http://www.alcoa.com 60 -hand-raised 60 -shaner 60 -medeski 60 -scansoft 60 -multi-millionaires 60 -reder 60 -htin 60 -paillettes 60 -intershop 60 -carfagna 60 -beyazit 60 -annakin 60 -scaglione 60 -esdi 60 -mikie 60 -inferiors 60 -student-on-student 60 -regionalize 60 -luff 60 -demitasse 60 -cal-bred 60 -calzado 60 -agita 60 -balin 60 -highest-end 60 -sinyani 60 -all-industries 60 -reuses 60 -lagrimas 60 -herberts 60 -koves 60 -sub-surface 60 -al-anzi 60 -longer-running 60 -gallimore 60 -pluralities 60 -benedetta 60 -jiddah-based 60 -tomaselli 60 -kallestad 60 -souheil 60 -kanso 60 -privett 60 -koronka 60 -nonmonetary 60 -rangsan 60 -esztergom 60 -zettler 60 -land-management 60 -indie-film 60 -learysptimes.com 60 -demokratikong 60 -vishakhapatnam 60 -rutba 60 -pluss 60 -africare 60 -#.##-acre 60 -shabib 60 -florins 60 -jujiya 60 -compaign 60 -demant 60 -josiane 60 -d'andre 60 -skagerrak 60 -azapo 60 -http://www.ladieseuropeantour.com 60 -whisperers 60 -studdert 60 -overindulgent 60 -qabazard 60 -norcross-based 60 -ex-gurkha 60 -x-seattle 60 -hejiang 60 -mcglaughlin 60 -preciously 60 -geetha 60 -gardezi 60 -ikuta 60 -attaullah 60 -hensler 60 -tree-shaped 60 -subarctic 60 -starman 60 -hussar 60 -verburg 60 -tupurkovski 60 -jeelani 60 -avidan 60 -sancha 60 -poliakoff 60 -room-sized 60 -instable 60 -pejsek 60 -el-falali 60 -chachoengsao 60 -chinese-produced 60 -al-kinani 60 -knuth 60 -bunner 60 -shinholster 60 -sabiston 60 -dvdirect 60 -douroux 60 -zebley 60 -salicylic 60 -etian 60 -co-organised 60 -osmanagic 60 -khotang 60 -doswell 60 -zfa 60 -proscribing 60 -bbn-dodgernotes 60 -domzale 60 -chinguetti 60 -joseph-beth 60 -stirrer 60 -deviatovski 60 -wright-designed 60 -spin-doctoring 60 -un-sudan 60 -contratos 60 -decimalization 60 -murugesu 60 -cleeman 60 -unbolted 60 -delphin 60 -twitter.com/gregauman 60 -depravation 60 -goen 60 -meckel 60 -kerr\/john 60 -nariaki 60 -co-ranked 60 -double-gold 60 -_______ 60 -binet 60 -perpere 60 -mahbuhbullah 60 -gendun 60 -zilla 60 -godowsky 60 -lycett 60 -ceramist 60 -damia 60 -oradell 60 -karimi-rad 60 -kreisler 60 -hard-hatted 60 -wahpeton 60 -christianne 60 -vesselin 60 -kivumbi 60 -life-supporting 60 -writing-directing 60 -valances 60 -chlorinate 60 -na-young 60 -nuclear-bomb 60 -decomissioning 60 -payam 60 -tuleh 60 -innova 60 -chih-hao 60 -dejohn 60 -democratic-farmer-labor 60 -lbs. 60 -small-school 60 -nur-pashi 60 -bollaert 60 -crookedly 60 -talloires 60 -kringen 60 -dopfer 60 -#-#-year 60 -defeatists 60 -madiot 60 -full-member 60 -vyachorka 60 -haixi 60 -sydenham 60 -#-emilie 60 -acuvue 60 -telser 60 -cumpston 60 -sokhina 60 -bancolombia 60 -crisis-prone 60 -gmhc 60 -pbf 60 -nisi 60 -revolutionizes 60 -azzahar 60 -money-draining 60 -double-bogeying 60 -gauvreau 60 -chabris 60 -leg-break 60 -tecbud 60 -ginnifer 60 -osoria 60 -nigeria-unrest 60 -turjanzadeh 60 -self-affirmation 60 -minuto 60 -private-property 60 -wambui 60 -bodao 60 -gwalia 60 -vladimirs 60 -natural-foods 60 -prolapsed 60 -vdis 60 -cullerton 60 -egawa 60 -aspl 60 -windu 60 -recession-fighting 60 -riss 60 -on-land 60 -ngap 60 -perjure 60 -cochaired 60 -mpumelelo 60 -fixed-wireless 60 -poots 60 -luneville 60 -hands-only 60 -bacashihua 60 -chancing 60 -mazrouei 60 -nabiyev 60 -mideast-summit 60 -baler 60 -genii 60 -bleustein 60 -unmixed 60 -phrenology 60 -iica 60 -shevaun 60 -orch 60 -zaramba 60 -china-latin 60 -ujc 60 -heeler 60 -sparseness 60 -percudani 60 -islam#uk 60 -rbos 60 -gb-lak 60 -cobbled-together 60 -half-owner 60 -d'entremont 60 -ation 60 -translucence 60 -luxgen 60 -ahhs 60 -posibles 60 -kulyab 60 -stalement 60 -anti-car 60 -o'crowley 60 -austria-crime-incest 60 -zuazo 60 -sugarbaker 60 -rougemont 60 -brugmann 60 -seidensticker 60 -rambla 60 -pensonic 60 -gimbal 60 -naugahyde 60 -longneck 60 -bumpings 60 -puffball 60 -selolwane 60 -anti-climatic 60 -crucibles 60 -penetta 60 -fatehpur 60 -baixada 60 -ukrop 60 -udoto 60 -yulieski 60 -chidren 60 -karwan 60 -zonca 60 -rapid-ascent 60 -sayle 60 -actuator 60 -multireligious 60 -gotchas 60 -torossian 60 -bonici 60 -fellmeth 60 -smatterings 60 -glycerol 60 -batigol 60 -adamany 60 -longball 60 -duck-and-cover 60 -boxford 60 -flounce 60 -seabeds 60 -shui-tsai 60 -sreerema 60 -wentzville 60 -mckeel 60 -ecla 60 -cheol 60 -tosta 60 -stayaways 60 -qazigund 60 -zolkin 60 -lyuboslav 60 -ka-## 60 -bellaart 60 -marcovicci 60 -dream-come-true 60 -winnemucca 60 -delbarton 60 -filmtec 60 -no-drive 60 -election-reform 60 -escoto 60 -###,###-capacity 60 -supertyphoon 60 -wakaazuma 60 -unsurmountable 60 -empcar 60 -members-in-waiting 60 -virola 60 -g\/h 60 -manch 60 -ducker 60 -kornilov 60 -metsa-botnia 60 -moistening 60 -coequal 60 -acga 60 -rupesh 60 -wilkinsburg 60 -brashest 60 -khoramshahi 60 -inverter 60 -co-offensive 60 -oscar-night 60 -etymological 60 -tranter 60 -eckenrode 60 -sireta 60 -smtp 60 -dadey 60 -rehak 60 -gooneratne 60 -jurie 60 -amte 60 -troyens 60 -nametag 60 -lops 60 -petach 60 -soliev 60 -#,###-peso 60 -re-sentenced 60 -berdiyev 60 -phang-nga 60 -ptuj 60 -navickas 60 -glassmakers 60 -single-ticket 60 -holderman 60 -stratcom 60 -bhartiya 60 -portentously 60 -horlock 60 -##\/#-year-old 60 -hertog 60 -krissoff 60 -pinschers 60 -coverlets 60 -galaxie 60 -barmaids 60 -harmetz 60 -communist-bloc 60 -cavallier 60 -relizane 60 -stauring 60 -kousa 60 -#,###-car 60 -thobela 60 -disavowals 60 -bistrong 60 -relevent 60 -vibert 60 -return-path 60 -lamagna 60 -atthe 60 -satrio 60 -leebove 60 -sallai 60 -health-oriented 60 -isra 60 -bidvest 60 -gasperoni 60 -crispo 60 -gortat 60 -garavani 60 -torgau 60 -ikeyama 60 -kocian 60 -zhihe 60 -catapano 60 -taxmen 60 -quittner 60 -israelson 60 -troi 60 -toevs 60 -germany\/milram 60 -odintsov 60 -chango 60 -latag 60 -balcytis 60 -antolini 60 -xingyi 60 -nienaber 60 -shiso 60 -boskov 60 -resource-hungry 60 -rouland 60 -mpe 60 -hindu-christian 60 -dodt 60 -koenigstein 60 -once-rich 60 -generator-powered 60 -bidemi 60 -khi 60 -kniazkov 60 -taborsky 60 -lehya 60 -zylstra 60 -schosberg 60 -emergency-management 60 -panne 60 -closedown 60 -freemason 60 -higginbottom 60 -erogenous 60 -plaatjes 60 -solyndra 60 -commerciality 60 -gorky-# 60 -i-opener 60 -#.##-carat 60 -alispahic 60 -porzio 60 -cummerbunds 60 -gaku 60 -borght 60 -valicevic 60 -icier 60 -otwell 60 -alighieri 60 -milsteins 60 -machi 60 -one-stop-shop 60 -herperger 60 -hypergrowth 60 -chaffey 60 -henare 60 -peterbilt 60 -sieben 60 -escarre 60 -cross-disciplinary 60 -tahj 60 -homestands 60 -ellenson 60 -perons 60 -vanoy 60 -hafed 60 -mingshan 60 -in-migration 60 -ivancic 60 -dramani 60 -asia-middle 60 -magoffin 60 -hertzfeld 60 -####\/### 60 -larena 60 -njue 60 -hela 60 -nigerian-registered 60 -chraidi 60 -saint-jean-de-maurienne 60 -onsale 60 -taneski 60 -blagoj 60 -pcij 60 -bomb-thrower 60 -tai-shan 60 -fertel 60 -trasvina 60 -shtml 60 -sakaiminato 60 -kilar 60 -sifang 60 -snorkeled 60 -gentian 60 -dito 60 -transbourse 60 -machinga 60 -growth-enhancing 60 -reclogging 60 -#-roque 60 -cmf 60 -putaway 60 -klinge 60 -pitching-rich 59 -alights 59 -#-mariano 59 -shatz 59 -koskoff 59 -sesssion 59 -dowthitt 59 -kriens 59 -revering 59 -nanhu 59 -menduh 59 -issers 59 -gabali 59 -aristolochia 59 -magrino 59 -sb# 59 -piledriver 59 -skywalks 59 -czapiewski 59 -centerstage 59 -blandest 59 -prescriptives 59 -heartlessly 59 -vitalia 59 -anchorages 59 -http://www.nrlc.org 59 -pittston 59 -high-payroll 59 -makahs 59 -shobokshi 59 -bandwagons 59 -krivenik 59 -gamey 59 -year-plus 59 -##-billion-yen 59 -antwun 59 -haniel 59 -longer-lived 59 -saheli 59 -minutewomen 59 -gray-and-white 59 -afran 59 -dysphonia 59 -anau 59 -flemmons 59 -kabushenga 59 -anti-france 59 -simecek 59 -greenwalt 59 -wavelet 59 -zarai 59 -schweickart 59 -iicd 59 -trupin 59 -nuez 59 -tobiass 59 -outofthebox 59 -o'scannlain 59 -haqq 59 -wrcf##a 59 -holberton 59 -germplasm 59 -halide 59 -wine-colored 59 -authories 59 -harpal 59 -high-emission 59 -kombu 59 -cde 59 -horakova 59 -kinderhook 59 -ocksman 59 -ayd 59 -ubdina 59 -subhan 59 -moistness 59 -kampmeier 59 -turyk-wawrynowicz 59 -kawachi 59 -northerns 59 -mangga 59 -adv##-cox 59 -bukvich 59 -budhi 59 -d-word 59 -phap 59 -glimmerings 59 -recondition 59 -stuhlbarg 59 -obfuscated 59 -collingswood 59 -thiew 59 -varberg 59 -california-grown 59 -akbank 59 -#.#-feet 59 -esmie 59 -amanya 59 -re-filed 59 -bonani 59 -bednarz 59 -economatica 59 -tuf 59 -water-recycling 59 -athanasia 59 -jennekvist 59 -huidong 59 -issues-oriented 59 -woong-bae 59 -kochis 59 -air-dried 59 -brunken 59 -http://www.enron.com 59 -handgrenades 59 -deaf-mutes 59 -esops 59 -darulaman 59 -chanachai 59 -aponavicius 59 -upper-hand 59 -baszczynski 59 -culley 59 -ncaer 59 -calcagni 59 -bolarinwa 59 -brumos 59 -velha 59 -lathem 59 -sadoff 59 -volumen 59 -pm-elect 59 -town-house 59 -backpacked 59 -##-yarders 59 -borrelia 59 -agostinelli 59 -rundles 59 -sentir 59 -asjylyn 59 -chimalapas 59 -tetherow 59 -kinan 59 -tegan 59 -gjenero 59 -dl-hla 59 -estigarribia 59 -once-moribund 59 -biersack 59 -khitan 59 -koury 59 -cricket-ind-aus 59 -http://blogs.timesunion.com/mcguire 59 -muzahim 59 -woul 59 -sck 59 -dongzhimen 59 -fanciulla 59 -arianne 59 -koszics 59 -meadwestvaco 59 -staveley 59 -timb 59 -levs 59 -super-spy 59 -yaish 59 -araz 59 -tabman 59 -basanti 59 -peace-enforcement 59 -cesaria 59 -neopolitan 59 -mispronunciations 59 -counter-cyclical 59 -disarmement 59 -chavhanga 59 -gayler 59 -mercedez-benz 59 -recollects 59 -rundall 59 -chen-chung 59 -malusa 59 -gereida 59 -decha 59 -lemberger 59 -bakoyiannis 59 -gogi 59 -grinda 59 -jimson 59 -bradberry 59 -mervat 59 -bvg 59 -jingzhong 59 -endel 59 -allers 59 -zuoyun 59 -loevinger 59 -zntb 59 -mirnawan 59 -igniter 59 -x-shaped 59 -carlsson-paige 59 -cobia 59 -veillette 59 -weed-killer 59 -larbaa 59 -prn 59 -victorinox 59 -household-name 59 -##-dnp 59 -xiangning 59 -minasian 59 -frappe 59 -ozone-friendly 59 -smita 59 -non-biological 59 -syllabuses 59 -twe 59 -northwick 59 -ivashkevich 59 -un-proposed 59 -muharrem 59 -calorie-free 59 -kolwezi 59 -barbados-born 59 -eu-serbia 59 -lish 59 -polyak 59 -http://www.nifc.gov/ 59 -kenro 59 -iran-unrest 59 -cothran 59 -basak 59 -beezer 59 -three-strike 59 -wilkomirski 59 -coovadia 59 -serreqi 59 -bythe 59 -noncompetition 59 -post-columbine 59 -minnix 59 -mickell 59 -spyro 59 -boire 59 -topcu 59 -boureij 59 -fengyang 59 -suntrajarn 59 -etre 59 -tarsy 59 -decors 59 -yerger 59 -non-internet 59 -stuntz 59 -lazer 59 -cahaba 59 -sabtu 59 -mut 59 -fadden 59 -eckstine 59 -wawrzyniak 59 -hip-swiveling 59 -plateaux 59 -shih-fang 59 -varah 59 -schwertner 59 -malfi 59 -scheeren 59 -rousselot 59 -bircher 59 -goldsberry 59 -charteau 59 -sullenness 59 -omoro 59 -alipov 59 -tax-cutter 59 -gayer 59 -cortinovis 59 -kettleman 59 -remodeler 59 -hard-punching 59 -magomedali 59 -telesleuth 59 -smajlovic 59 -nkem 59 -sea-green 59 -morobe 59 -nodia 59 -biss 59 -dawan 59 -abolfazl 59 -alberton 59 -faial 59 -picardi 59 -muchall 59 -no-change 59 -enfranchisement 59 -kijevo 59 -salpigidis 59 -imjingak 59 -anfrel 59 -mubarek 59 -chudy 59 -broadway-notes 59 -near-miraculous 59 -lefse 59 -cacace 59 -coffield 59 -out-of-hand 59 -nuhanovic 59 -chelule 59 -jumagulov 59 -elasticized 59 -non-existing 59 -sperl 59 -junri 59 -manders 59 -substructure 59 -cahaya 59 -segamat 59 -kharas 59 -dds 59 -snitched 59 -monforts 59 -marantha 59 -green-blue 59 -sukawaty 59 -troutt 59 -asics-cga 59 -part-ownership 59 -oline 59 -leanna 59 -merl 59 -rockhopper 59 -shakiba 59 -parlez-vous 59 -costilla 59 -industry.net 59 -logvinenko 59 -kaforey 59 -finnish-born 59 -error-plagued 59 -rodnina 59 -steel-mesh 59 -thrushes 59 -nesvig 59 -arcapita 59 -wellstream 59 -naushad 59 -enad 59 -marxist-leninists 59 -optimising 59 -jl-bg 59 -pecina 59 -calamine 59 -brinner 59 -keion 59 -hatra 59 -sobota 59 -fifita 59 -akayeva 59 -#-million-acre 59 -##-marcelo 59 -curto 59 -battallion 59 -socko 59 -parviainen 59 -all-southeastern 59 -ekland 59 -chinotimba 59 -escamillo 59 -wartelle 59 -barely-there 59 -bookbuilding 59 -brightly-lit 59 -kopeck 59 -pno 59 -pelvises 59 -scuffs 59 -terminix 59 -snow\/cloudy 59 -perraud 59 -introversion 59 -thielemans 59 -ayalew 59 -sardo 59 -debunkers 59 -snow-topped 59 -inuktitut 59 -straberg 59 -keech 59 -voter-id 59 -leoncavallo 59 -harmes 59 -clay.robison@chron.com 59 -sujeeva 59 -image-maker 59 -leet 59 -wister 59 -four-tournament 59 -two-games-to-none 59 -consuela 59 -prokhorova 59 -okin 59 -cholakis 59 -policharki 59 -golf-ball 59 -bischof 59 -languorously 59 -bracanov 59 -joellen 59 -coryo 59 -party-hopping 59 -hangchow 59 -th# 59 -loetschberg 59 -yuxia 59 -anthrax-related 59 -radio-ready 59 -fethiye 59 -shak 59 -current-generation 59 -grotowski 59 -pakorn 59 -garonne 59 -mknobler@ajc.com 59 -buryak 59 -green-minded 59 -barril 59 -yamaki 59 -macheyo 59 -work\/life 59 -fessler 59 -rheingau 59 -wahdan 59 -hugel 59 -conoley 59 -hellcat 59 -poydras 59 -kazoos 59 -gollogly 59 -discoursing 59 -pungue 59 -twilson 59 -aivars 59 -torrelavega 59 -assadollah 59 -ornskoldsvik 59 -syaifudin 59 -doshisha 59 -difazio 59 -portioned 59 -esap 59 -pop\/contemporary 59 -#the 59 -belluscio 59 -afficionados 59 -shargin 59 -caulks 59 -barnesville 59 -al-mahmud 59 -topalli 59 -avowal 59 -skil 59 -jeschke 59 -pieke 59 -p&j 59 -matillano 59 -soft-loan 59 -soon-to-be-former 59 -pittle 59 -transposing 59 -parmelee 59 -fessel 59 -obudu 59 -island-republic 59 -coutry 59 -golpe 59 -vakhayev 59 -vllaznia 59 -karzai-appointed 59 -invs 59 -post-abortion 59 -chianwala 59 -ennoble 59 -neradko 59 -pujobroto 59 -chikowi 59 -ansah 59 -litang 59 -palaghiaccio 59 -euroepan 59 -y-net 59 -cnpp 59 -steroid-distribution 59 -maulawi 59 -grandison 59 -kamungozi 59 -tonneau 59 -torchings 59 -four-candidate 59 -pallium 59 -kljajevic 59 -#.##-liter 59 -buy-to-let 59 -fanzhi 59 -sunanda 59 -taymour 59 -remanufactured 59 -schopenhauer 59 -anti-syrians 59 -anti-affirmative-action 59 -petrochemia 59 -kptc 59 -lobohombo 59 -mongomo 59 -janss 59 -sabharwal 59 -landholding 59 -##-per-month 59 -aftertax 59 -sardjoe 59 -placings\/standings 59 -garrec 59 -johnson-freese 59 -yablonsky 59 -stojkov 59 -wenzao 59 -water-conservation 59 -lawner 59 -front-loader 59 -anaheim-based 59 -doulos 59 -dimapur 59 -vashchuk 59 -yorked 59 -mewar 59 -five-pointer 59 -warrell 59 -ndiwa 59 -shrage 59 -poblanos 59 -black-run 59 -auslese 59 -banducci 59 -carless 59 -suhardjono 59 -extroverts 59 -bavarian-style 59 -calorie-laden 59 -sportbild 59 -robo-signers 59 -bucchi 59 -bourne\/victor 59 -oesterle 59 -renda 59 -mortgage-market 59 -hasibul 59 -bohren 59 -human-computer 59 -half-moons 59 -al-kadhimi 59 -reguera 59 -achike 59 -schnee 59 -atje 59 -esp\/eus 59 -lamplight 59 -solimoes 59 -loua 59 -elkan 59 -vitamin-enriched 59 -deskovic 59 -burgmans 59 -miaa 59 -ascorbic 59 -spain\/festina 59 -#-##-### 59 -agro-industries 59 -negociaciones 59 -skirt-chasing 59 -gph##bb 59 -minister-counselor 59 -penhall 59 -hat-in-hand 59 -autograph-seeking 59 -temodar 59 -othmani 59 -calida 59 -panichgul 59 -blameworthy 59 -burkitt 59 -foppert 59 -optionsxpress 59 -r-salem 59 -onassio 59 -teruyuki 59 -calstart 59 -fitzcarraldo 59 -tetrazzini 59 -bohme 59 -carinii 59 -devilliers 59 -artemyev 59 -bozkurt 59 -pacificor 59 -komisar 59 -weijun 59 -zehri 59 -challe 59 -kingi 59 -zurcher 59 -svansicklersptimes.com 59 -ziso 59 -personal-foul 59 -myanmar-language 59 -italian-french 59 -yung-ping 59 -sadbhawana 59 -waywardness 59 -lockard 59 -monib 59 -controlee 59 -maroga 59 -public-listed 59 -embrey 59 -ched 59 -five-power 59 -saynudin 59 -qasemi 59 -nuzum 59 -microdrive 59 -re-investigation 59 -conflicto 59 -dasanayake 59 -xynthia 59 -wormholes 59 -baoming 59 -rappleyea 59 -ecotech 59 -pre-oscars 59 -minx 59 -yator 59 -jobbed 59 -peccadillos 59 -societa 59 -sadriya 59 -wackness 59 -a.b 59 -dychtwald 59 -libancell 59 -two-hundred 59 -egs 59 -contrada 59 -interferring 59 -shajiao 59 -zeidane 59 -tengger 59 -srinagar-muzaffarabad 59 -kurosaki 59 -yidong 59 -nchito 59 -abdille 59 -tid 59 -raw-vid###### 59 -ski-world 59 -blockings 59 -gedeck 59 -localhost 59 -dooky 59 -khafagy 59 -coppock 59 -non-gay 59 -simec 59 -#,###-yuan 59 -dementias 59 -galatioto 59 -titter 59 -nagyvary 59 -ayahuasca 59 -gordon-conwell 59 -alberto-culver 59 -houtan 59 -waspish 59 -unimaginatively 59 -dead-serious 59 -controvery 59 -ingels 59 -maxton 59 -forefingers 59 -ibata 59 -ticknor 59 -lazne 59 -gidel 59 -blache 59 -schormann 59 -drop-by 59 -nationalbank 59 -shuanghe 59 -freetel 59 -###-###-#-###-#### 59 -averitt 59 -anti-earthquake 59 -macefield 59 -trali 59 -witbooi 59 -patru 59 -unhappiest 59 -pervomaysk 59 -realy 59 -applera 59 -est\/a#r 59 -utada 59 -oil-trading 59 -al-sammarai 59 -muria 59 -trapasso 59 -diefenderfer 59 -movie-quality 59 -maslova 59 -gedz 59 -bright-yellow 59 -al-fahal 59 -razziq 59 -corbelli 59 -decanted 59 -black-and-silver 59 -cubanos 59 -towelettes 59 -http://www.weforum.org 59 -iia 59 -sirba 59 -cads 59 -kapetanos 59 -disses 59 -ltc 59 -debbasch 59 -ft.com 59 -graito 59 -staletovich 59 -debreu 59 -merisant 59 -near-collisions 59 -lokuge 59 -software-maker 59 -stephanus 59 -duba 59 -novik 59 -mayombo 59 -salvato 59 -mets-yankees 59 -oskars 59 -titmice 59 -nikolovski 59 -deanda 59 -restrictionist 59 -khadduri 59 -onterrio 59 -thodey 59 -perroud 59 -foping 59 -agboville 59 -isin 59 -wednesday-thursday 59 -ohmori 59 -jong-chan 59 -acocella 59 -buggery 59 -houphouet 59 -sanofi-pasteur 59 -xxxxxxxxend 59 -ohel 59 -suburbanized 59 -tekeda 59 -well-proportioned 59 -counter-punch 59 -selyem 59 -dawlat 59 -egas 59 -three-punch 59 -fouke 59 -gas-to-liquids 59 -edan 59 -hollopeter 59 -hintsa 59 -addenbrooke 59 -mountaga 59 -zogu 59 -bxe# 59 -bushinsky 59 -martinage 59 -kaufer 59 -pingeon 59 -mazahir 59 -shao-haei 59 -briner 59 -nemea 59 -york-headquartered 59 -anytown 59 -kleptocratic 59 -non-supervisory 59 -aix-les-bains 59 -conocer 59 -heit 59 -reinhilt 59 -clear-cuts 59 -atong 59 -haefner 59 -al-taweel 59 -sholokhov 59 -nezavisne 59 -kamoga 59 -monsalve 59 -zanussi 59 -kaller 59 -fidal 59 -moloi 59 -neck-to-ankle 59 -shirvani 59 -non-saudis 59 -kharg 59 -c-murder 59 -fingernail-sized 59 -gjomle 59 -piccioli 59 -theater-in-the-round 59 -honnold 59 -shingirai 59 -vending-machine 59 -mini-applications 59 -krusty 59 -e-services 59 -adressing 59 -taht 59 -zacharatos 59 -coupler 59 -aparri 59 -edenton 59 -dicke 59 -one-###th 59 -flamme 59 -gladiolas 59 -mcnee 59 -carnero 59 -www.caiso.com 59 -grabar-kitarovic 59 -felsen 59 -memes 59 -reinjected 59 -edwar 59 -favier 59 -yokomine 59 -slamed 59 -poll-watchers 59 -azizulhasni 59 -gracin 59 -tank-automotive 59 -halvari 59 -gloversville 59 -aleksejs 59 -louisianan 59 -clerkships 59 -sharapov 59 -ill-chosen 59 -clerestory 59 -nasa-funded 59 -minella 59 -karnaugh 59 -phoned-in 59 -taibe 59 -schondelmeyer 59 -d'arby 59 -u.s-china 59 -one-hitters 59 -chephren 59 -caru 59 -bachleda 59 -nategh-nuri 59 -gaitley 59 -kiecolt-glaser 59 -watmore 59 -orangeman 59 -arch-villain 59 -http://www.amd.com 59 -powderject 59 -toker 59 -stubbled 59 -ohnishi 59 -requesters 59 -helicoptering 59 -tahoes 59 -morong 59 -rahmonov 59 -ledanois 59 -borodulina 59 -springmann 59 -keynotes 59 -saith 59 -pallidotomy 59 -lusso 59 -shabaa 59 -byrdak 59 -vance-owen 59 -ibertsberger 59 -massih 59 -artyukhov 59 -lip-synch 59 -benziman 59 -schaer 59 -yormark 59 -zuanazzi 59 -dejohnette 59 -maternally 59 -gyurkovics 59 -haleiwa 59 -unprecented 59 -totus 59 -dshir 59 -yeakel 59 -lavecchia 59 -turbo-generators 59 -sanoma 59 -simiyu 59 -hmnzs 59 -sabagh 59 -decoratifs 59 -lehder 59 -suryo 59 -samaraneftegaz 59 -kingfishers 59 -quasi-socialist 59 -jurnee 59 -fischbein 59 -maotai 59 -turgot 59 -karimirad 59 -osnes 59 -margarian 59 -krul 59 -raho 59 -bibring 59 -shaktoolik 59 -wendler 59 -baim 59 -hellwig 59 -sugarcane-based 59 -al-shaar 59 -suomi 59 -guittard 59 -amirov 59 -chafer 59 -reconnoitering 59 -re-forming 59 -thweatt 59 -tax-writers 59 -oladapo 59 -sidelocks 59 -bedaux 59 -aravind 59 -fruit-growing 59 -koror 59 -light-year 59 -adat 59 -utm 59 -keenans 59 -grecia 59 -palestinian-jordanian 59 -pro-choicers 59 -liuhua 59 -sujit 59 -milchan 59 -weather-philippines-typhoon 59 -additonal 59 -leukemias 59 -http://www.continental.com 59 -parrothead 59 -afghanistan-unrest-taliban 59 -re-submitted 59 -cup-champion 59 -jitloff 59 -gungoren 59 -al-shall 59 -photochemical 59 -grandee 59 -chi-chao 59 -cash-raising 59 -nanri 59 -pro-prosecution 59 -maicosuel 59 -plourde 59 -tarell 59 -laquidara 59 -harkonen 59 -microdermabrasion 59 -shootarounds 59 -cryopreservation 59 -cockfield 59 -yb\/jh 59 -phromphan 59 -maxa 59 -flatliners 59 -extranjera 59 -calaio 59 -tjh-rjm 59 -post-performance 59 -apriantono 59 -rawn 59 -humadi 59 -lupsha 59 -bikita 59 -road-rail 59 -mantes-la-jolie 59 -jianyang 59 -shaweesh 59 -migliaro 59 -larrikin 59 -muju 59 -delahunty 59 -no\/okla 59 -gracen 59 -penalty-filled 59 -kitui 59 -ernsting-krienke 59 -retela 59 -ntust 59 -cordel 59 -verkhovsky 59 -hatha 59 -meat-eater 59 -musker 59 -less-publicized 59 -cityu 59 -tla 59 -fancying 59 -pankin 59 -simasiku 59 -spinaway 59 -hedija 59 -mersenne 59 -dunshee 59 -dalipi 59 -ducale 59 -bruene 59 -army-owned 59 -greatorex 59 -karsiyaka 59 -martinu 59 -shamala 59 -brimfield 59 -atlantics 59 -utuado 59 -conneally 59 -hye-youn 59 -sharpstown 59 -signore 59 -atlanta-journal 59 -negoesco 59 -plotnikov 59 -linscott 59 -despereaux 59 -ligang 59 -survivals 59 -okolski 59 -fang-yue 59 -distrigaz 59 -borana 59 -conable 59 -sorest 59 -rompers 59 -batubara 59 -kencell 59 -ibi 59 -tin-plated 59 -shlain 59 -soybean-based 59 -tallberg 59 -fibras 59 -selda 59 -inter-dependence 59 -chunkier 59 -college-jewish 59 -chiune 59 -bredahl 59 -half-starved 59 -dymchenko 59 -suspectedly 59 -dimitrakos 59 -jf-## 59 -dury 59 -mcgivney 59 -cakl 59 -###-storey 59 -kishor 59 -vionnet 59 -torlen 59 -cciced 59 -car-bombs 59 -momsen 59 -coach-class 59 -badmouthed 59 -liebermans 59 -smain 59 -sun-loving 59 -squeaky-voiced 59 -three-pack 59 -primex 59 -remixer 59 -well-articulated 59 -run-first 59 -secret-agent 59 -wismar 59 -pendulous 59 -agro-technicians 59 -supercedes 59 -concepto 59 -back-to-the-land 59 -shamoon 59 -isnt 59 -vergina 59 -kolesar 59 -delpierre 59 -volchkov 59 -baixa 59 -pentacostal 59 -mpca 59 -rwindi 59 -technimetrics 59 -beijing-hangzhou 59 -dapa 59 -divisionism 59 -burcham 59 -bean-counters 59 -serravalle 59 -agni-ii 59 -texana 59 -two-masted 59 -yumen 59 -man-of-war 59 -well-nourished 59 -ktvu 59 -brialy 59 -vlashi 59 -palanggoon 59 -essi 59 -dreamlife 59 -culbreath 59 -gosingan 59 -tejinder 59 -million-to-one 59 -ferozabad 59 -movsisyan 59 -birdie-eagle 59 -taeb 59 -biomolecules 59 -cynara 59 -mogas 59 -halbrook 59 -finstrom 59 -upmc 59 -activist-journalist 59 -state-guided 59 -scitrek 59 -newyork 59 -rosenbauer 59 -braker 59 -bds 59 -chain-smokes 59 -divyang 59 -cinemania 59 -norwegian-based 59 -bowlegged 59 -tosoh 59 -isbin 59 -stamatis 59 -###zx 59 -stroem-erichsen 59 -re-organized 59 -moughniyah 59 -mulpuru 59 -naeringsliv 59 -zitko 59 -caudillos 59 -t-systems 59 -al-shayea 59 -awesomeness 59 -cityvote 59 -desanctis 59 -luckenbill 59 -www.tsa.gov 59 -quatrano 59 -tetuan 59 -seven-million-dollar 59 -bienville 59 -abou-treika 59 -akutagawa 59 -lasn 59 -ouseph 59 -brand-conscious 59 -ultra-cool 59 -supporting-acting 59 -vivar 59 -afanasyeva 59 -raes 59 -faliron 59 -soru 59 -jobski 59 -dodonnell@nycap.rr.com 59 -scadden 59 -arditi 59 -doornekamp 59 -nexabit 59 -livetv 59 -duplin 59 -endurance-booster 59 -radionuclides 59 -volmar 59 -gittings 59 -coile 59 -overtown 59 -lesi 59 -agarwalla 59 -brooksley 59 -trifonov 59 -bizera 59 -sino-chilean 59 -ip-based 59 -condren 59 -half-size 59 -kwu 59 -kangol 59 -arlys 59 -binstock 59 -libyan-backed 59 -rayed 59 -alimentary 59 -grained 59 -atoki 59 -negations 59 -oasen 59 -arab-mediated 59 -mozena 59 -serioux 59 -bjervig 59 -hot-spring 59 -gruet 59 -#-inch-deep 59 -linker 59 -glycine 59 -chimere 59 -hamoked 59 -smyrek 59 -monkish 59 -peraliya 59 -gorospe 59 -eight-month-long 59 -galwey 59 -mastitis 59 -goupil 59 -megaresort 59 -nutraceuticals 59 -ransford 59 -subterfuges 59 -pronostica 59 -reinspection 59 -ho-yeol 59 -apperson 59 -thanomsak 59 -jadideh 59 -wevill 59 -cepelova 59 -aera 59 -kuehbacher 59 -ntumba 59 -iphitos 59 -yanis 59 -isolda 59 -o'shanter 59 -scheuring 59 -black-listed 59 -tan-colored 59 -kayhian 59 -pro-german 59 -stiletto-heeled 59 -mastan 59 -dhamar 59 -creisson 59 -tarasenko 59 -getahun 59 -pordenone 59 -hentunen 59 -underlayment 59 -al-muslimeen 59 -gardy 59 -leukocyte 59 -thermography 59 -matchpoint 59 -hongyi 59 -obertan 59 -primeeast 59 -castellvi 59 -isitolo 59 -pro-competition 59 -laryngeal 59 -xianghe 59 -eurozone-imf 59 -nigeria-oil-unrest-kidnap 59 -nader-camejo 59 -kyrastas 59 -seltsovsky 59 -spain\/astana 59 -beran 59 -foudre 59 -gaudier 59 -france\/cofidis 59 -interenergoservice 59 -gaar 59 -amberleigh 59 -shoora 59 -stehr 59 -dehoyos 59 -wait-listed 59 -meuleman 59 -kennerly 59 -yue\/niu 59 -ananiev 59 -komrskova 59 -jmh 59 -xianfeng 59 -xg### 59 -rane 59 -iturriaga 59 -bely 59 -sekondi 59 -gentex 59 -aleisha 59 -clenched-fist 59 -baraybar 59 -angolite 59 -camp-out 59 -quandry 59 -phymatrix 59 -sidecars 59 -henoch 59 -subandi 59 -baldin 59 -macia 59 -oynes 59 -pre-registered 59 -lanne 59 -garitano 59 -debarked 59 -duval-scott 59 -malambo 59 -hernreich 59 -sankofa 59 -pracharaj 59 -busies 59 -cly 59 -rassul 59 -keobounphanh 59 -per-year 59 -covenas 59 -on-wine 59 -nederlandse 59 -nontitle 59 -goave 59 -fuchsova 59 -monroes 59 -nadzmi 59 -joerres 59 -bogere 59 -shanhua 59 -szalay 59 -duf 59 -gate-crashers 59 -chidamabaram 59 -kirchpaytv 59 -ruweished 59 -andjelko 59 -ostrager 59 -atp-monte 59 -bridgers 59 -p#c 59 -damba 59 -daisey 59 -non-insurance 59 -frappuccinos 59 -missile-equipped 59 -cunneyworth 59 -kostiantyn 59 -zubr 59 -kimmi 59 -gatorland 59 -waxen 59 -sonke 59 -gramm-rudman 59 -rabon 59 -cumani 59 -hirshson 59 -harouna 59 -multi-user 59 -mccleave 59 -nemerov 59 -ejegayehu 59 -rivenbark 59 -less-privileged 59 -rotundo 59 -duangchalerm 59 -speechmaker 59 -ching-piao 59 -underachieve 59 -bakala 59 -suweidi 59 -adriaenssens 59 -autobytel 59 -willimon 59 -clean-out 59 -mazdas 59 -mochida 59 -volkogonov 59 -jasen 59 -waple 59 -podlodowski 59 -cardia 59 -traffick 59 -carpentaria 59 -harrad 59 -foradil 59 -zaveryukha 59 -rueda-denvers 59 -esperan 59 -davis-monthan 59 -over-aggressive 59 -huracan-tres 59 -redaction 59 -begles 59 -kupusovic 59 -goskomstat 59 -cents-per-share 59 -slopped 59 -#-paolo 59 -#-paola 59 -shafiullah 59 -gold-digger 59 -rawle 59 -garnishment 59 -aguta 59 -chartchai 59 -debevec 59 -first-wave 59 -haidt 59 -neider 59 -bsheets 59 -sheja'eya 59 -omofuma 59 -jk-hla 59 -jila 59 -fomca 59 -kozel 59 -phuntsog 59 -soc-intlnotes 59 -neils 59 -jebril 59 -xiaojie 59 -enrica 59 -zhare 59 -sainvil 59 -fsh 59 -fsg 59 -dbu 59 -bocalandro 59 -terror-stricken 59 -nine-plus 59 -health-guestworkout 59 -adie 59 -sissinghurst 59 -subglacial 59 -praefcke 59 -over-the-head 59 -zerlentes 59 -geosystems 59 -mescheriakova 59 -apolloni 59 -batat 59 -shortest-serving 59 -red-dirt 59 -naziunalista 59 -pellucida 59 -near-deserted 59 -olanzapine 59 -once-beautiful 59 -woon-kwong 59 -fixit 59 -reekers 59 -surgically-repaired 59 -hatipoglu 59 -mannichl 59 -latessa 59 -naturists 59 -berquist 59 -veldakova 59 -detabali 59 -sleep-related 59 -asean-us 59 -hiscock 59 -much-reviled 59 -air-ground 59 -once-taboo 59 -romilly 59 -bossem-levy 59 -health-sars-taiwan 59 -dymock 59 -lynge 59 -piranesi 59 -consistencies 59 -nelson-bond 59 -shusaku 59 -vax 59 -rhythmical 59 -all-premier 59 -jorquera 59 -shikhar 59 -camel-colored 59 -philomene 59 -vancomycin-resistant 59 -einaugler 59 -mutrif 59 -martitegi 59 -neustar 59 -ganzuri 59 -eufaula 59 -quichua 59 -thundercloud 59 -ex-christian 59 -havisham 59 -curcic 59 -talledega 59 -sung-man 59 -tr\/vls 59 -malgieri 59 -college-entrance 59 -bourkoff 59 -gambol 59 -swick 59 -calvinism 59 -ghettoization 59 -anti-rollover 59 -kamark 59 -forded 59 -repurpose 59 -gupte 59 -athans 59 -polhemus 59 -keshia 59 -ramachandra 59 -kl-gm 59 -guolla 59 -brazil-plane 59 -porato 59 -poms 59 -post-trauma 59 -ubud 59 -semi-circular 59 -blowhole 59 -inflation-related 59 -flori 59 -barhoumi 59 -barolos 59 -madritsch 59 -kulis 59 -weerts 59 -sardis 59 -saravanan 59 -kfda 59 -grapefruit-sized 59 -mucke 59 -saifudin 59 -time-slot 59 -xinli 59 -downwinders 59 -rotcheva 59 -hemdan 59 -brasses 59 -cowpox 59 -mushota 59 -bounder 59 -ex-lax 59 -tibs 59 -narcocorrido 59 -siok 59 -final-lap 59 -holczer 59 -kresse 59 -ibtisam 59 -emy 59 -al-kurd 59 -outside-the-beltway 59 -legitimises 59 -llamo 59 -incendiaries 59 -topoff 59 -asloum 59 -sabelli 59 -turkey-based 59 -krupnikovic 59 -lezion 59 -outswinging 59 -pesach 59 -mercury-free 59 -baerbel 59 -pranav 59 -khokhlova\/sergei 59 -primecap 59 -lukko 59 -bttb 59 -semprun 59 -zohur 59 -fbc-usc 59 -ulrica 59 -round-ups 59 -maastrict 59 -charbroiled 59 -son-in 59 -segat 59 -transfield 59 -government-rescued 59 -tryggve 59 -odling-smee 59 -canelas 59 -swd 59 -bapu 59 -xcp 59 -keulder 59 -haipe 59 -al-hedoud 59 -efthymiou 59 -yalman 59 -ratsirahonana 59 -too-strong 59 -polonaise 59 -aramburuzabala 59 -najafabad 59 -plaited 59 -jailbait 59 -itagui 59 -co-rookie 59 -thonier 59 -iason 59 -low-probability 59 -ligocka 59 -tetzlaff 59 -glassblowers 59 -everard 59 -ettienne 59 -granlund 59 -bever 59 -carematrix 59 -votevets.org 59 -ghinwa 59 -amoussouga 59 -brigade-size 59 -manats 59 -mrazova 59 -kupelo 59 -dekay 59 -vanzekin 59 -calixte 59 -paktin 59 -givry 59 -chocolatey 59 -poverty-eradication 59 -scandal-prone 59 -el#l 59 -good-for-nothing 59 -sailosi 59 -tuguegarao 59 -struk 59 -vancura 59 -jean-mary 59 -reemployed 59 -durling 59 -xinhua-run 59 -chadd 59 -over-expansion 59 -miyamura 59 -diani 59 -blinov 59 -stick-swinging 59 -sparxxx 59 -blagoi 59 -flohr 59 -casselman 59 -magnifico 59 -temerlin 59 -kirm 59 -bambu 59 -gohlke 59 -maniglia 59 -kamphuis 59 -moodily 59 -kilak 59 -anti-church 59 -leat 59 -serap 59 -danish-based 59 -bagless 59 -bluford 59 -tsygurov 59 -tahmasebi 59 -egomania 59 -u.n.-demarcated 59 -dm-pyg 59 -crianza 59 -ghafari 59 -mottus 58 -djamil 58 -razor-close 58 -setola 58 -cortulua 58 -turvy 58 -gechev 58 -gessel 58 -ayamas 58 -eberl 58 -leys 58 -beating-heart 58 -khadjiev 58 -vbac 58 -steber 58 -o'liney 58 -chizuko 58 -transgressor 58 -balcavage 58 -non-voters 58 -goligoski 58 -uddi 58 -philadelphia-born 58 -schlomo 58 -gamila 58 -herranz 58 -d-ram 58 -aerating 58 -heathwood 58 -jazz-pop 58 -swindlehurst 58 -dabagh 58 -sportfive 58 -jitter 58 -peelle 58 -geste 58 -tuitele 58 -limited-liability 58 -h### 58 -heilmann 58 -onyancha 58 -holobyte 58 -ppt 58 -icelike 58 -boru 58 -ayala-cornejo 58 -singer-bassist 58 -seifullah 58 -sonrisa 58 -bogacheva 58 -dassey 58 -x-muttiah 58 -immigrant-friendly 58 -post-all-star 58 -physically-unable-to-perform 58 -albergo 58 -causado 58 -triple-triples 58 -kalhammer 58 -branquinho 58 -whang 58 -cogbill 58 -rossier 58 -pitztal 58 -breadline 58 -dollar-pegged 58 -ruffing 58 -mchinji 58 -upperhands 58 -stanely 58 -banpro 58 -stereophonic 58 -spaz 58 -dumeisi 58 -soissons 58 -evaluative 58 -vashti 58 -transouth 58 -alinea 58 -just-retired 58 -##-norandrosterone 58 -ethanol-based 58 -blauensteiner 58 -mwaba 58 -chep 58 -intercutting 58 -equal-pay 58 -valencia-based 58 -chambeshi 58 -absorptive 58 -bramson 58 -hallo 58 -diatoms 58 -al-nassiri 58 -leviton 58 -tachi 58 -miscount 58 -anthuenis 58 -brockington 58 -giuffrida 58 -screenvision 58 -limandri 58 -kaliopate 58 -weather-stripping 58 -pauwels 58 -tangaroa 58 -melanogaster 58 -bewilderingly 58 -boasso 58 -deputise 58 -chinese-north 58 -uriri 58 -shibutani 58 -ranya 58 -aramin 58 -antonacci 58 -arab-kurdish 58 -sazanovich 58 -massachusetts-amherst 58 -etti 58 -tandil 58 -electrotechnical 58 -riqueza 58 -chudasama 58 -leksand 58 -two-pointer 58 -mariangela 58 -keye 58 -nikolaev 58 -sl# 58 -jamat-ud-dawa 58 -liener 58 -ashford.com 58 -parknshop 58 -quapaw 58 -two-pack-a-day 58 -aderhold 58 -buring 58 -unrolls 58 -two-iron 58 -r-miss. 58 -fazilah 58 -okam 58 -kabuli 58 -regia 58 -sekula 58 -re-deployed 58 -v.v. 58 -sporleder 58 -vigiano 58 -cahal 58 -sinsuat 58 -shortenings 58 -detargeting 58 -sportspeople 58 -ta-## 58 -immunosuppression 58 -kongying 58 -repack 58 -huster 58 -us-university 58 -eco-tourists 58 -slip-sliding 58 -wucker 58 -greece-finance-economy 58 -shvetsov 58 -sn-mpm 58 -john-patrick 58 -enought 58 -verbard 58 -sconce 58 -electro-mechanics 58 -budhia 58 -zuckerbrod 58 -kadriu 58 -biavaschi 58 -pentathletes 58 -nimani 58 -dezso 58 -cottesloe 58 -grosh 58 -croupiers 58 -vuillermin 58 -lipcsei 58 -reveiz 58 -kolesnik 58 -neeb 58 -mccullagh 58 -stand-still 58 -animistic 58 -llave 58 -oh-so 58 -savannahs 58 -obledo 58 -ston 58 -ziyuan 58 -safety-first 58 -multiple-listing 58 -nadzeya 58 -earth-observing 58 -lacquers 58 -fazl-ur 58 -bribers 58 -pass-catcher 58 -lasek 58 -cyprus-un-talks 58 -byzantines 58 -composer-in-residence 58 -princeling 58 -bataga 58 -as-yet-undetermined 58 -sumarno 58 -wuchuan 58 -gatty 58 -gobdon 58 -farsi-speaking 58 -onewest 58 -kleeman 58 -mbt 58 -andre-joseph 58 -haggett 58 -umd 58 -goggle-eyed 58 -then-popular 58 -garity 58 -ufundi 58 -changsheng 58 -hercegovacka 58 -gasparini 58 -ruyan 58 -tanerau 58 -assault-rifle 58 -vohs 58 -dimmesdale 58 -gun-for-hire 58 -lokomotive 58 -greiss 58 -afsar 58 -hersley 58 -kruma 58 -multiforme 58 -olkhovsky 58 -meridiana 58 -pannella 58 -reduced-rate 58 -leebaw 58 -webman 58 -kacha 58 -oil-like 58 -niello 58 -brierton 58 -ciliberto 58 -aercap 58 -larin 58 -caze 58 -congo-fighting 58 -ak-chin 58 -saengchai 58 -reichsmarks 58 -mutability 58 -redistributive 58 -muons 58 -proyas 58 -agajanian 58 -city-by-city 58 -nageikina 58 -dallam 58 -pb&j 58 -juacevedo 58 -pharmacologists 58 -non-moslems 58 -mukri 58 -javasoft 58 -pre-op 58 -krezelok 58 -third-and-short 58 -wahhabist 58 -farve 58 -bluejays 58 -sergius 58 -dorouma 58 -neukirchen 58 -undulated 58 -serlenga 58 -unsan 58 -al-nueimi 58 -blood-testing 58 -plait 58 -wilhide 58 -four-floor 58 -u.s.-flag 58 -mushier 58 -catto 58 -napster-like 58 -nightstands 58 -salvar 58 -vorontsova 58 -criticos 58 -grantmakers 58 -slader 58 -gholam-reza 58 -magnifique 58 -volen 58 -chishui 58 -sunalliance 58 -indoor\/outdoor 58 -heidgen 58 -lule 58 -belot 58 -cheng-yuan 58 -cyrene 58 -manganelli 58 -undershooting 58 -lorenzo-vera 58 -turadzhonzoda 58 -santilli 58 -kucher 58 -jo-krg 58 -mr\/dw 58 -dosen 58 -margi 58 -half-cooked 58 -heslin 58 -vidoje 58 -lepley 58 -commandante 58 -pattersons 58 -corsaire 58 -famosa 58 -yojimbo 58 -bromma 58 -transmutation 58 -cost-to-income 58 -feinted 58 -mop-topped 58 -moxibustion 58 -yongyi 58 -heterogeneity 58 -curti 58 -bolte 58 -tripplett 58 -paun 58 -bio-pharmaceutical 58 -cherubini 58 -hyun-woo 58 -stroem 58 -wanvig 58 -friuli-venezia 58 -schornack 58 -caucasia 58 -prosecuters 58 -sagmeister 58 -defore 58 -fakhoury 58 -ozen 58 -lightning-bolt 58 -funks 58 -pusillanimous 58 -ticky-tack 58 -lenfest 58 -overmedicated 58 -sung-tae 58 -fotherby 58 -al-gabali 58 -muhajiroun 58 -##.#-billion-pound 58 -cridlin 58 -low-dollar 58 -slappy 58 -under-recognized 58 -consumer-finance 58 -joma 58 -fabyan 58 -vredenburg 58 -rushville 58 -kingsmen 58 -feng-ying 58 -jung-hoon 58 -aldape 58 -wednesday-night 58 -janja 58 -monsod 58 -maimings 58 -kwaito 58 -al-baghli 58 -st.-denis 58 -agro-food 58 -afsa 58 -syncytial 58 -crf 58 -brogliatti 58 -melcior 58 -congestions 58 -themself 58 -meiners 58 -granick 58 -mamonyane 58 -krayer 58 -brm 58 -sex-selective 58 -daina 58 -jjh\/db 58 -carella 58 -re-took 58 -ilmor 58 -chedjou 58 -waites 58 -#-max 58 -merriex 58 -president-general 58 -water-main 58 -psa\/bloomberg 58 -camerounians 58 -savatheda 58 -counter-measure 58 -i.t. 58 -kaid 58 -porkpie 58 -cinchona 58 -makhosini 58 -audemars 58 -messa 58 -unlamented 58 -niedak-ashkenazi 58 -butyrka 58 -musicales 58 -uvm 58 -bayh-dole 58 -schleyer-halle 58 -unzipping 58 -#,###-game 58 -freemarkets 58 -requa 58 -opondo 58 -post-and-beam 58 -kmarts 58 -hejda 58 -peilin 58 -neisser 58 -re-using 58 -takeisha 58 -newstalkzb 58 -pengilly 58 -chinese-african 58 -flippancy 58 -coorsh 58 -apta 58 -willcocks 58 -emlen 58 -citiseconline 58 -rulemakers 58 -distintas 58 -qadderdan 58 -dpk 58 -vortexes 58 -mae-ggl 58 -hodur 58 -karavellas 58 -timidria 58 -haina 58 -nare 58 -overcast\/sleet 58 -eurest 58 -####-###### 58 -kakhi 58 -senafe 58 -zetti 58 -ciencia 58 -boya 58 -weadock 58 -pestriaev 58 -dhahi 58 -hui-mei 58 -el-arabi 58 -kotto 58 -sundstroem 58 -hamui 58 -provos 58 -senio 58 -bochner 58 -edyta 58 -minamoto 58 -scorings 58 -gesner 58 -adiyaman 58 -wassail 58 -fishhook 58 -krivda 58 -testifed 58 -momanyi 58 -cafarelli 58 -re-christened 58 -naaman 58 -chien-chih 58 -trailside 58 -wenhao 58 -deplaning 58 -legree 58 -anti-personal 58 -yemelyanov 58 -test-market 58 -ikramullah 58 -plateauing 58 -batad 58 -peu 58 -samling 58 -glaudini 58 -filippidis 58 -zhifu 58 -anti-divorce 58 -scrooges 58 -lashari 58 -ideologists 58 -sagapolutele 58 -niobium 58 -chows 58 -benfer 58 -hhh 58 -two-footer 58 -non-acceptance 58 -md\/ji 58 -auchi 58 -veanne 58 -brick-red 58 -varga-balazs 58 -peskiric 58 -restitutions 58 -interjection 58 -in-place 58 -cavagnaro 58 -lewinksy 58 -saudi-brokered 58 -rotax 58 -kawagoe 58 -wlosowicz 58 -puleo 58 -hoth 58 -disch-falk 58 -seiple 58 -nitrogenous 58 -urubamba 58 -particularily 58 -hagop 58 -bachirou 58 -cpc-led 58 -arbor-based 58 -valeant 58 -gunhild 58 -markswoman 58 -queyranne 58 -phoumsavanh 58 -kirschstein 58 -sharjah-based 58 -harriton 58 -baktiari 58 -confernce 58 -premade 58 -sleet\/overcast 58 -denbeaux 58 -tyibilika 58 -teya 58 -squalene 58 -guzy 58 -ampad 58 -nua 58 -oundjian 58 -yankees-mets 58 -clyfford 58 -bootes 58 -degeratu 58 -heshmatollah 58 -self-diagnosis 58 -gulyanamitta 58 -belgium-politics 58 -zukor 58 -jyujiya 58 -inadmissibility 58 -security-wise 58 -spiliotes 58 -zambelli 58 -axford 58 -closed-in 58 -laender 58 -anti-flooding 58 -icca 58 -so-what 58 -pharmacogenomics 58 -prefecture-level 58 -declines# 58 -briffa 58 -plexicushion 58 -royal-blue 58 -add-ins 58 -sfp 58 -wide-awake 58 -mahoud 58 -#motion 58 -riwhite 58 -frappes 58 -recoletos 58 -accountholders 58 -most-affected 58 -yalie 58 -sugarmann 58 -ahmadov 58 -ictu 58 -conservative-minded 58 -finigan 58 -blackard 58 -labant 58 -vierra 58 -zizka 58 -figuras 58 -esquimalt 58 -pottuvil 58 -khassawneh 58 -alben 58 -peru-hostages 58 -rebozos 58 -konchalovsky 58 -kesayeva 58 -koyo 58 -flukey 58 -over-bought 58 -dollar-supportive 58 -pushtuns 58 -distribuidora 58 -lalji 58 -thiermann 58 -advance-fee 58 -woodberry 58 -waleska 58 -oysterman 58 -ssali 58 -fievet 58 -pro-and 58 -metaxas 58 -cairngorms 58 -cohabited 58 -doyev 58 -johnsrud 58 -rayani 58 -salvatori 58 -bowdlerized 58 -epidaurus 58 -tilahun 58 -counter-guerrilla 58 -maulani 58 -chalai 58 -good-for-you 58 -soon-to-retire 58 -forkballs 58 -resettlers 58 -ultimatetv 58 -oblinger 58 -ritto 58 -fiberweb 58 -dungey 58 -pinholes 58 -#-diego 58 -kikhia 58 -tugluk 58 -progressivity 58 -asakawa 58 -kolawole 58 -regpay 58 -syktyvkar 58 -anti-copying 58 -luda 58 -tv-like 58 -weisbach 58 -multicanal 58 -export-fueled 58 -lucherini 58 -bossio 58 -numeiri 58 -swagel 58 -family-like 58 -lodar 58 -finalises 58 -larwood 58 -u.n.-proposed 58 -self-designated 58 -record-hard 58 -syncing 58 -winebrenner 58 -provosts 58 -naisbitt 58 -chetcuti 58 -wentland 58 -capetillo 58 -andreyeva 58 -skvortsova 58 -role-players 58 -artek 58 -choling 58 -##,###-sq-m 58 -resized 58 -hartack 58 -wilens 58 -sentimentalism 58 -serifovic 58 -cancio 58 -hausch 58 -earlene 58 -mengwa 58 -yokado 58 -pisarcik 58 -loredana 58 -naqi 58 -most-asked 58 -chicha 58 -skammelsrud 58 -supinit 58 -wanseele 58 -fragrance-free 58 -leffe 58 -http://www.firstunion.com 58 -nuder 58 -rouf 58 -zigomanis 58 -mp-# 58 -guma 58 -market-access 58 -square-kilometre 58 -urbanists 58 -tortes 58 -franking 58 -vorenberg 58 -schwieterman 58 -bawazir 58 -crankcase 58 -derogatis 58 -karakasevic 58 -entropia 58 -barlonyo 58 -hannibal-lagrange 58 -artnews 58 -adventurists 58 -money-changer 58 -housecoat 58 -nordictrack 58 -wilfert 58 -mccain-obama 58 -musselwhite 58 -bangzhu 58 -meyerbeer 58 -tegucigalpa-san 58 -al-saqqa 58 -quercus 58 -louay 58 -yenga 58 -rajauri 58 -cigarroa 58 -kimiyasu 58 -nicotine-free 58 -zelinsky 58 -diagon 58 -permach 58 -symmetrix 58 -firnas 58 -flower-decked 58 -istar 58 -quintupling 58 -breyers 58 -armyworm 58 -abdoulie 58 -care-related 58 -aldar 58 -beauteous 58 -yardages 58 -non-greek 58 -cutthroats 58 -flessel-colovic 58 -mago 58 -margets 58 -barela 58 -flight-to-quality 58 -most-coveted 58 -zubar 58 -ruzowitzky 58 -petru-alexandru 58 -al-hajji 58 -scots-irish 58 -chabang 58 -bouafle 58 -xxxxxxxend 58 -mintoo 58 -lolab 58 -largely-christian 58 -flim-flam 58 -wen-ying 58 -greensomes 58 -practioners 58 -ya\/ml 58 -chanters 58 -karapetian 58 -in-seat 58 -shirko 58 -bistrot 58 -esmaeel 58 -insanitary 58 -cheka 58 -issue-driven 58 -gok 58 -auclair 58 -veira 58 -craftsman-style 58 -honorifics 58 -martek 58 -ebri 58 -ferdie 58 -seshaiah 58 -sunkin 58 -caldrons 58 -cabals 58 -foredeck 58 -tight-head 58 -sung-nam 58 -demerara 58 -jucker 58 -thumb-size 58 -sciame 58 -eriksen\/mette 58 -ricketson 58 -domesticating 58 -khagendra 58 -abla 58 -rescorla 58 -kanta 58 -stylez 58 -ruwenzori 58 -kilfoyle 58 -steinmann 58 -tabakh 58 -inquisitorial 58 -heldman 58 -vus#### 58 -whole-language 58 -kimberlee 58 -harnecker 58 -dary 58 -azimkar 58 -reneau 58 -in-the-round 58 -vucetic 58 -metrowerks 58 -togiola 58 -motor-racing 58 -themistocleous 58 -rugut 58 -bectu 58 -www.orbitz.com 58 -pagos 58 -akeem 58 -anouncement 58 -flat-line 58 -sindical 58 -bc-mexico 58 -three-building 58 -chambal 58 -bhoj 58 -asra 58 -p.p. 58 -overexploitation 58 -time-keeping 58 -bearse 58 -robp 58 -marakesh 58 -avena 58 -highwayman 58 -barnaba 58 -co-coaches 58 -rfranklin 58 -self-abuse 58 -farra 58 -below-strength 58 -bolek 58 -devkota 58 -farney 58 -junck 58 -lefranc 58 -lindale 58 -likhachev 58 -flywheels 58 -cnca 58 -samin 58 -samie 58 -glasberg 58 -blue-suited 58 -kazarlyga 58 -vacanti 58 -purwoprandjono 58 -##-billion-baht 58 -bahaeddin 58 -pw#### 58 -world-shaking 58 -balbina 58 -darwyn 58 -aleskerov 58 -primp 58 -chue 58 -o'cealleagh 58 -scotrail 58 -figeroux 58 -thanawat 58 -iihs 58 -hendardji 58 -boldyrev 58 -mihoko 58 -sibani 58 -diapering 58 -animal-feed 58 -lintel 58 -infobahn 58 -kuribayashi 58 -bretons 58 -bevmark 58 -risk-sensitive 58 -flanner 58 -groenvold 58 -kaspersky 58 -salt-free 58 -cankaya 58 -zna 58 -rabson 58 -outvote 58 -hanada 58 -tourist-filled 58 -adobes 58 -athamna 58 -nato-brokered 58 -priding 58 -ngetich 58 -benac 58 -eelco 58 -teza 58 -keahon 58 -emeryville-based 58 -isentress 58 -munford 58 -critchfield 58 -chinandega 58 -carax 58 -posers 58 -tehreek-i-jafria 58 -tbarnhart@ajc.com 58 -picolinate 58 -cobwebbed 58 -conventionality 58 -byelections 58 -macallister 58 -papoose 58 -##-microgram 58 -sled-dog 58 -myostatin 58 -albaladejo 58 -ndirangu 58 -honderich 58 -sceptically 58 -garro 58 -vereeniging 58 -h-shaped 58 -lillington 58 -diddams 58 -coffy 58 -doorjamb 58 -militia-style 58 -magluta 58 -mazzariol 58 -stanford-trained 58 -agriculturalists 58 -spit-and-polish 58 -servility 58 -padmore 58 -satmars 58 -seren 58 -subsitute 58 -serwotka 58 -kuokuang 58 -tavlaridis 58 -congressman-elect 58 -two-euro 58 -series-levelling 58 -cricket-ashes-aus-eng 58 -digital-only 58 -provencher 58 -muslim-jewish 58 -resizing 58 -hula-hoop 58 -fuegos 58 -fair-housing 58 -waitakere 58 -bouldin 58 -chilanga 58 -suceeded 58 -sundeen 58 -six-phase 58 -disassociation 58 -shteyngart 58 -crystalize 58 -shihan 58 -protocal 58 -long-march 58 -fazal-ur 58 -jackknife 58 -mcnairy 58 -ealey 58 -drivon 58 -pleasure-seeking 58 -sciolino 58 -shore-up 58 -then-banned 58 -german-swiss 58 -polymorphism 58 -three-foot-high 58 -anti-speculation 58 -stoudmann 58 -koreas-talks 58 -ivanek 58 -kanyenda 58 -holyfield-lennox 58 -naceri 58 -anti-globalist 58 -jeg 58 -folden 58 -talpur 58 -r.f. 58 -##,###-barrel-a-day 58 -binshu 58 -doerfler 58 -juanmi 58 -organdy 58 -kostal 58 -chola 58 -presgrave 58 -cosgrave 58 -zhevnov 58 -recapitalizations 58 -us-philippines 58 -half-a-percentage 58 -mayenne 58 -pozole 58 -jva 58 -higher-up 58 -erra 58 -btrc 58 -taxachusetts 58 -lyor 58 -advocate-general 58 -toko 58 -blowzy 58 -kantaras 58 -consiste 58 -indama 58 -xiaoxiang 58 -jamelli 58 -esquipulas 58 -cd-based 58 -maxillofacial 58 -bernabei 58 -meshell 58 -parlin 58 -now-imprisoned 58 -ummc 58 -posterboard 58 -degradable 58 -annasue 58 -anjouanese 58 -gallivan 58 -usc-notre 58 -sappho 58 -creameries 58 -turbocharge 58 -soslan 58 -thurairaja 58 -singapore-china 58 -antiperspirant 58 -abominably 58 -puiu 58 -newfoundlanders 58 -kaylene 58 -plaine 58 -howorth 58 -non-save 58 -dijana 58 -ronaldson 58 -aivar 58 -hudong 58 -##-student 58 -pravastatin 58 -delima 58 -six-continent 58 -union-imposed 58 -kaz\/cof 58 -sandy-colored 58 -longliners 58 -janic 58 -amerithrax 58 -blurriness 58 -aleki 58 -rokkasho-mura 58 -sogang 58 -methow 58 -rosalee 58 -rapiscan 58 -nepali-language 58 -eastgate 58 -timis 58 -record-indoor 58 -adelaida 58 -ethno-sectarian 58 -pay-for-view 58 -dahdouh 58 -desisto 58 -huldai 58 -brunswijk 58 -hundertwasser 58 -chiew 58 -pyrotechnical 58 -snatchings 58 -jabri 58 -kleeblatt 58 -turkish-registered 58 -ghodhbane 58 -masunungure 58 -mousepad 58 -ascott 58 -lapoint 58 -quarter-page 58 -belfiore 58 -se-r 58 -martinville 58 -salchow-triple 58 -minored 58 -ahe 58 -counter-complaint 58 -mbula 58 -khadi 58 -wolfli 58 -faggots 58 -vigouroux 58 -chaifetz 58 -giugliano 58 -defoliated 58 -vashee 58 -maumee 58 -jutge 58 -loudi 58 -powderly 58 -razini 58 -self-seeking 58 -best-written 58 -arkaah 58 -patroon 58 -motormen 58 -pedagogic 58 -soendral 58 -pharynx 58 -qudratullah 58 -proces 58 -alcohol-monitoring 58 -corange 58 -a.t.m. 58 -rasshan 58 -sabbar 58 -malmberg 58 -sverrisson 58 -tiler 58 -sabag 58 -http://www.state.gov/ 58 -feichter 58 -#-george 58 -wheelis 58 -cleric-run 58 -roskot 58 -adjetey-nelson 58 -propios 58 -quickies 58 -nzimbi 58 -gazprombank 58 -woofer 58 -trita 58 -weigman 58 -congealing 58 -solidarite 58 -narcoleptic 58 -pulai 58 -bewag 58 -computadora 58 -sppf 58 -soundcheck 58 -zaituc 58 -adurogboye 58 -orogen 58 -demeanors 58 -neumayr 58 -mso 58 -ntou 58 -westtown 58 -re-directed 58 -avalanche-journal 58 -ishmail 58 -pearsmhnytimes.com 58 -posthaste 58 -needle-nose 58 -self-assessments 58 -lewisite 58 -kuwaiti-based 58 -kostevych 58 -dogonadze 58 -korasuv 58 -hampl 58 -concealed-weapon 58 -shragai 58 -kappos 58 -anti-casino 58 -sleith@ajc.com 58 -karyo 58 -kopi 58 -sub-themes 58 -knock-kneed 58 -stendardo 58 -anthuan 58 -rekapac 58 -nasaa 58 -chinese-manufactured 58 -isioma 58 -greenwillow 58 -strad 58 -piontkowski 58 -nine-stroke 58 -avm 58 -parishoners 58 -delfim 58 -pasquini 58 -wigmore 58 -dieuze 58 -scag 58 -berzengi 58 -cazzulani 58 -balmont 58 -marriotts 58 -cete 58 -phertzberg 58 -wapenaar 58 -hodara 58 -curcumin 58 -canoni 58 -abdulmajid 58 -maroon# 58 -anti-vaccine 58 -mini-city 58 -bioland 58 -lanced 58 -hepatology 58 -balakong 58 -eneco 58 -subuh 58 -wolfenstein 58 -henchy 58 -tumukunde 58 -argentina-vote 58 -soft-dollar 58 -merit-making 58 -qanoni 58 -czech-built 58 -al-khayat 58 -heidsieck 58 -deinstitutionalization 58 -mail-fraud 58 -calakmul 58 -chiasso 58 -faveur 58 -ranarith 58 -satara 58 -cash-management 58 -ayittey 58 -buschbaum 58 -compatibles 58 -olatunji 58 -hourglass-shaped 58 -abousamra 58 -serafino 58 -rassemblement 58 -cohen-tannoudji 58 -artspace 58 -schwartlander 58 -yeltsova 58 -kiu 58 -zoubeir 58 -asias 58 -orv 58 -yaring 58 -sheppards 58 -two-course 58 -cadrez 58 -charima 58 -canana 58 -geodon 58 -deplasco 58 -fully-armed 58 -brontes 58 -kbp 58 -stehn 58 -#-iker 58 -baff 58 -perlas 58 -madhes 58 -preis 58 -harward 58 -koloane 58 -deshayes 58 -falomo 58 -saturno 58 -corrupters 58 -planetariums 58 -bifengxia 58 -flum 58 -propound 58 -hemanshu 58 -imacec 58 -feyernoord 58 -ulta 58 -atli 58 -re-hire 58 -interclan 58 -gitonga 58 -aand 58 -icmr 58 -jarar 58 -skank 58 -thrane 58 -trousseau 58 -spent-fuel 58 -chilaquiles 58 -bump-drafting 58 -iao 58 -vanhala 58 -dj\/ak## 58 -proselytization 58 -dongdu 58 -rave-up 58 -uzebekistan 58 -ntagerura 58 -pengkalan 58 -anti-fascism 58 -domingue 58 -lamport 58 -kaillie 58 -fernet 58 -bekaert 58 -project-based 58 -mauriac 58 -abrogates 58 -basketball-wise 58 -gansey 58 -emailing 58 -jiulong 58 -mechale 58 -vignerons 58 -federative 58 -emmo 58 -panino 58 -easterain 58 -aceto 58 -manresa 58 -own-goals 58 -lamongan 58 -morinaga 58 -fourth-season 58 -duvergel 58 -ruzindana 58 -yu-ih 58 -varnishing 58 -non-televised 58 -ederer 58 -grosboell 58 -sakanyi 58 -wrn 58 -costless 58 -laxalt 58 -lb-# 58 -halse 58 -upper-tier 58 -sheinkin 58 -shadwell 58 -katselas 58 -www.aol.com 58 -yankey 58 -mytouch 58 -enyimba\/ngr 58 -brokedown 58 -jen-hung 58 -kisutu 58 -kuniyoshi 58 -dahiyah 58 -amrhein 58 -up-country 58 -rolo 58 -smartmedia 58 -watch-helsinki 58 -khawazakhela 58 -ocws 58 -yacoubian 58 -wandlike 58 -tratan 58 -temane 58 -tainan-based 58 -micropal 58 -korrodi 58 -ms-as 58 -ahdyar 58 -southerton 58 -kakiuchi 58 -state-of-origin 58 -breault 58 -filled-in 58 -kawase 58 -bysshe 58 -pericard 58 -sakwiba 58 -qingyang 58 -pedantry 58 -kamangar 58 -unlawfulness 58 -gdc 58 -astuteness 58 -vca 58 -highly-qualified 58 -pressurised 58 -fliegende 58 -ambush-style 58 -sweatman 58 -bembry 58 -rabelais 58 -hellion 58 -jaune 58 -pade 58 -cylon 58 -nnt 58 -cod-style 58 -manoeuvering 58 -lubero 58 -knocker 58 -npower 58 -oeystein 58 -stellone 58 -badou 58 -berlinecke 58 -shengrong 58 -non-sporting 58 -anti-moslem 58 -http://www.defenselink.mil/ 58 -macgillivary 58 -pigheaded 58 -grogin 58 -pluta 58 -madasamy 58 -lozzano 58 -bong-kil 58 -christmas-tree 58 -metroid 58 -kudisch 58 -runups 58 -kinsolving 58 -zety 58 -guitarist-singer 58 -vinayagamoorthi 58 -movilnet 58 -ppa-containing 58 -seyval 58 -feliks 58 -shiite-populated 58 -harymurti 58 -tingo 58 -grassless 58 -cheap-looking 58 -zhengming 58 -jastrow 58 -nrcs 58 -shirkers 58 -bc-af-fin 58 -batac 58 -finocchiaro 58 -polyrhythms 58 -weale 58 -fang-yu 58 -ispa 58 -condensers 58 -synergie 58 -rottman 58 -test-bed 58 -afroyim 58 -anti-arms 58 -amathila 58 -billion\/## 58 -todman 58 -rabies-free 58 -gilbreath 58 -ultra-conservatives 58 -out-of-context 58 -flook 58 -pongpanich 58 -skulked 58 -a###xwb 58 -semiskilled 58 -synoptics 58 -home-ported 58 -ih 58 -bornand 58 -keshubhai 58 -adianto 58 -asen 58 -d-hillsborough 58 -commercial-grade 58 -diery 58 -tzoganis 58 -al-nounou 58 -tedd 58 -map-making 58 -blockbusting 58 -be-in 58 -trouble-torn 58 -skipp 58 -whitsunday 58 -hutterites 58 -ttc 58 -occam 58 -witters 58 -winterreise 58 -al-hasani 58 -cities-abc 58 -kurtag 58 -kahoolawe 58 -radiofrequency 58 -elfers 58 -razor-edged 58 -profanity-filled 58 -nedzad 58 -elsworth 58 -sashays 58 -redbone 58 -arkhipova 58 -geck 58 -fouras 58 -erdf 58 -sivori 58 -point-based 58 -apennines 58 -raffanello 58 -foot-fault 58 -emirsyah 58 -safian 58 -poppy-producing 58 -bagong 58 -roehrig 58 -stutes 58 -michelet 58 -anter 58 -campionati 58 -semmelweis 58 -dos-based 58 -sadosky 58 -st-# 58 -franker 58 -brende 58 -mihajlov 58 -eshetu 58 -barile 58 -roey 58 -bijaya 58 -matcha 58 -hounslow 58 -sischy 58 -anxi 58 -topdog\/underdog 58 -predisposing 58 -tightenings 58 -colantuono 58 -duflo 58 -tervuren 58 -slebos 58 -krein 58 -malu-malu 58 -nkorea-nuclear-weapons-us 58 -d'hondt 58 -marce 58 -movieline 58 -bles 58 -hyeon 58 -casner 58 -dry-aged 58 -clomping 58 -jd\/pi## 58 -giganotosaurus 58 -five-six 58 -iita 58 -then-teammate 58 -badola 58 -ahrendts 58 -christoforakos 58 -al-daradji 58 -hathorn 58 -computer-operated 58 -soviet-american 58 -mousetraps 58 -turetsky 58 -farc-held 58 -medzamor 58 -herpoel 58 -scissor-kick 58 -wodie 58 -quirine 58 -shrivelled 58 -flameproof 58 -less-talented 58 -sacharow 58 -thin-bladed 58 -cavour 58 -allaga 58 -dornbush 58 -#-pounder 58 -schuermann 58 -mafia-related 58 -bellingen 58 -crichlow 58 -dividend-rich 58 -lorsch 58 -anglada 58 -non-actors 58 -anti-surface 58 -deepcut 58 -lazarev 58 -thumb-sucking 58 -bi-polar 58 -madrid-barajas 58 -thile 58 -barn-burner 58 -calibrations 58 -stimulus-fueled 58 -unplanted 58 -keyboardists 58 -ecuador-vote 58 -armuelles 58 -chittick 58 -taavi 58 -mondesire 58 -smederevska 58 -ding-dong 58 -promphan 58 -lovsan 58 -loveseat 58 -fullscale 58 -nazzaro 58 -mulvenon 58 -hillegass 58 -vanderford 58 -goodlad 58 -carphedon 58 -court-at-law 58 -pseudo.com 58 -sollers 58 -putterman 58 -finnegans 58 -durakovic 58 -hamayon 58 -duct-tape 58 -bardic 58 -scaled-up 58 -still-robust 58 -wilchcombe 58 -wathiq 58 -hiltachk 58 -krylatskoye 58 -boose 58 -data-intensive 58 -danielides 58 -transsexuality 58 -claw-foot 58 -none-out 58 -elucidating 58 -tomasch 58 -brignol 58 -jeyarajah 58 -dangor 58 -caic 58 -goaland 58 -mellis 58 -somali-based 58 -sidetracking 58 -sushmita 58 -mid-stride 58 -domestically-traded 58 -redox 58 -shoba 58 -house-arrest 58 -coppens 58 -at-tuffah 58 -nijssen 58 -hely 58 -rouged 58 -lounger 58 -a.r.c. 58 -dubovsky 58 -non-payments 58 -enviga 58 -frenz 58 -#-boris 58 -misprision 58 -go-along 58 -riccadonna 58 -machine-gunner 58 -vellore 58 -tackie 58 -constitutionalists 58 -tongliao 58 -paderina 58 -nan-cheng 58 -cattan 58 -us-immigration 58 -sparq 58 -mashingaidze 58 -togawa 58 -houseflies 58 -semenzato 58 -moton 58 -atlanta-bound 58 -summerer 58 -marmottan 58 -quartier 58 -el-motassadeq 58 -more-profitable 58 -boada 57 -malim 57 -voit 57 -tichtchenko 57 -qgpc 57 -sakassou 57 -hamadou 57 -geeked 57 -anagrams 57 -politicizes 57 -chainarong 57 -fourth-youngest 57 -schulweis 57 -unsafeguarded 57 -urmila 57 -mobile-telephone 57 -grigson 57 -abdul-samad 57 -el-youssef 57 -suffuse 57 -castellane 57 -line-outs 57 -manohara 57 -christiansborg 57 -standley 57 -gasoline\/electric 57 -munton 57 -aaib 57 -postcommunist 57 -buckcherry 57 -teletext 57 -juancho 57 -steep-sided 57 -micro-enterprise 57 -medicals 57 -cloudlike 57 -griet 57 -jintropin 57 -funsho 57 -schaus 57 -chenoweth-hage 57 -club-mate 57 -israel-vote 57 -gokavi 57 -toufic 57 -scalloping 57 -lfc 57 -chatwal 57 -movie-mad 57 -american-european 57 -butchie 57 -televisual 57 -chainrai 57 -mambasa 57 -quaye 57 -dilg 57 -http://www.nobel.no 57 -jabotinsky 57 -nilly 57 -lakela 57 -frostily 57 -australia-bushfires 57 -maumalanga 57 -coleccion 57 -##-carry 57 -weisser 57 -morago 57 -kpatinde 57 -korun 57 -kc-pq 57 -nextlink 57 -tbilissi 57 -azua 57 -#m## 57 -obersalzberg 57 -sea-skimming 57 -nizuc 57 -sacombank 57 -nitrate-based 57 -f.h. 57 -farj 57 -lagunov 57 -millipede 57 -#-cd 57 -musclebound 57 -bloche 57 -dissuades 57 -tae-dong 57 -arms-related 57 -suk-tae 57 -conceptualizing 57 -pokaski 57 -often-contentious 57 -seleccion 57 -dialogo 57 -grevenmacher 57 -alledged 57 -maziarz 57 -ecompanies 57 -slow-building 57 -greczyn 57 -packham 57 -lamego 57 -saddlebag 57 -strokosch 57 -eutaw 57 -shophouses 57 -xxiv 57 -nickel-cadmium 57 -shuning 57 -http://www.homedepot.com 57 -sunfin 57 -atg 57 -windmilling 57 -zostavax 57 -then-majority 57 -revenant 57 -daish 57 -cvijanovic 57 -butoh 57 -marianist 57 -visoth 57 -eidul 57 -isaura 57 -aleynikov 57 -shalita 57 -benegas 57 -mine-strewn 57 -litif 57 -qallab 57 -percolation 57 -magnesite 57 -sung-jin 57 -cook-offs 57 -ronayne 57 -carias 57 -calendula 57 -elmaghraby 57 -fafner 57 -scibelli 57 -huhhot 57 -sidex 57 -bank-issued 57 -perfecta 57 -vitrines 57 -ho-chunk 57 -alstyne 57 -sydkraft 57 -football-wise 57 -silverite 57 -pagliaro 57 -overdubbed 57 -mighani 57 -finger-snapping 57 -liron 57 -multi-trillion 57 -ortuno 57 -drop-kick 57 -mussavi 57 -brasilia-based 57 -meteoroid 57 -oxygen-deprived 57 -rentech 57 -puzzle-solving 57 -pro-ravalomanana 57 -thmey 57 -zalben 57 -liko 57 -qardaha 57 -muayad 57 -lottner 57 -conceptualist 57 -laychak 57 -badini 57 -jazzmen 57 -macaco 57 -nexstar 57 -especies 57 -warbirds 57 -avelar 57 -ulcerated 57 -murshidabad 57 -marife 57 -grillers 57 -marakwet 57 -sanroma 57 -college-preparatory 57 -bartholomay 57 -dpp-initiated 57 -duskin 57 -anesthetizing 57 -belgrade-controlled 57 -estragon 57 -novolipetsk 57 -caton-jones 57 -offman 57 -jetways 57 -am\/ji 57 -hatemonger 57 -zeo 57 -dial-around 57 -hunzike 57 -wicha 57 -cupiagua 57 -mewelde 57 -webmethods 57 -dacca 57 -agrast 57 -indonesia-weather-floods 57 -sumaya 57 -magliore 57 -seljan 57 -dead-eyed 57 -rezidor 57 -rubberneck 57 -substantia 57 -azzurro 57 -first-responder 57 -peruses 57 -janusaitis 57 -summiting 57 -mantas 57 -posthumus 57 -sufjan 57 -often-delayed 57 -resiana 57 -rrustem 57 -pulled-together 57 -milmo 57 -compa 57 -prezant 57 -lucke 57 -prosthetist 57 -se# 57 -luisita 57 -molinelli 57 -earthrights 57 -osayemi 57 -market-beating 57 -sincor 57 -rexburg 57 -miku 57 -anti-disease 57 -already-crowded 57 -santry 57 -friesian 57 -tg-pyg 57 -nce 57 -cottoned 57 -vaugrenard 57 -candleholders 57 -mudi 57 -hawksworth 57 -jinglian 57 -schuon 57 -dehesa 57 -puyo 57 -biosensor 57 -periodnone 57 -non-deductible 57 -eid-ul 57 -palestrina 57 -mitic 57 -segars 57 -entrapping 57 -###w 57 -vls\/nvw 57 -peachpit 57 -small-plane 57 -norsworthy 57 -lello 57 -wair 57 -latinpass 57 -souquet 57 -reveles 57 -soapboxes 57 -tholut 57 -pompton 57 -fmd-free 57 -something-or-other 57 -nayoko 57 -forum-asia 57 -interferometer 57 -hortensia 57 -tifosi 57 -u.s.-provided 57 -brasfield 57 -http://www.people-press.org 57 -mojokerto 57 -stankalla 57 -gallien 57 -valda 57 -speciosa 57 -lightly-regarded 57 -bublitz 57 -chongryong 57 -bit-part 57 -sharabati 57 -drottningholm 57 -eum 57 -pongrat 57 -tesa 57 -multicast 57 -+##,### 57 -mushed 57 -ravello 57 -eye-fi 57 -calandra 57 -shujah 57 -rural\/metro 57 -djeric 57 -##,###-points 57 -houtart 57 -idigov 57 -russian-supported 57 -caa# 57 -tionne 57 -gabai 57 -dosek 57 -sadyk 57 -calzati 57 -hip-hugger 57 -iue-cwa 57 -ksf 57 -hpa-an 57 -finamex 57 -berden 57 -ameco 57 -non-jordanians 57 -toxford 57 -mulembwe 57 -rovos 57 -extrication 57 -cutchogue 57 -bang-andersen 57 -lucchetti 57 -uninstalling 57 -excises 57 -azaouagh 57 -decompressing 57 -northwesterners 57 -wielkopolski 57 -tahlequah 57 -optimark 57 -gheen 57 -eurlings 57 -unclimbed 57 -esti 57 -niyonzima 57 -pro-rata 57 -giannoulas 57 -kodjoe 57 -ulanqab 57 -tiliwaldi 57 -baldock 57 -#-meters 57 -ghorak 57 -bovey 57 -sameur 57 -chien-kuo 57 -colten 57 -vincenti 57 -square-kilometers 57 -garamvoelgyi 57 -zippel 57 -then-commander 57 -##.#-nautical 57 -unpowered 57 -hanly 57 -parten 57 -liederman 57 -velayat 57 -willo 57 -coonelly 57 -kitov 57 -swiss-educated 57 -outraise 57 -sinta 57 -#-felix 57 -virts 57 -yome 57 -saury 57 -gretz 57 -isoun 57 -mpigi 57 -nstp 57 -modzeleski 57 -samran 57 -wazed 57 -reappraising 57 -regola 57 -exor 57 -france-telecom 57 -iresearch 57 -soz 57 -n.b.a. 57 -jenai 57 -liukko 57 -wenpu 57 -cervenko 57 -halfa 57 -yefremova 57 -biaggio 57 -mimmo 57 -iraq-unrest-us-toll 57 -quart-size 57 -favalora 57 -hockey-mad 57 -doppelgangers 57 -ghalibaf 57 -marijnissen 57 -open-face 57 -sytem 57 -fredricksen 57 -shafayat 57 -safeen 57 -programe 57 -kuroichi 57 -burqa-style 57 -xuesen 57 -dissembled 57 -uptagrafft 57 -canadiense 57 -sung-wook 57 -paravant 57 -kada 57 -short-stay 57 -pinedo 57 -balwinder 57 -tibon 57 -prig 57 -ducent 57 -kc-###s 57 -ersberg 57 -family-type 57 -lardin 57 -dicussed 57 -kaim 57 -webnews 57 -kavak 57 -debut-making 57 -hewitson 57 -moeletsi 57 -tapit 57 -updegrove 57 -soft-sided 57 -hudepohl 57 -reflectivity 57 -tigerland 57 -#.##-per-share 57 -halandri 57 -plote 57 -temporaries 57 -gren 57 -merlet 57 -odera 57 -lingao 57 -sleazier 57 -dialectics 57 -doubletalk 57 -al-siddiq 57 -tifatul 57 -sanabis 57 -sc### 57 -vanore 57 -alair 57 -mae-eap 57 -spookiness 57 -hyson 57 -nounou 57 -nasreddine 57 -rose-marie 57 -completamente 57 -non-starters 57 -beguin 57 -bell-bottomed 57 -handloom 57 -abu-zeid 57 -tolstaya 57 -transwestern 57 -maraven 57 -neoforma 57 -galster 57 -viraat 57 -engqvist 57 -sadiya 57 -idiot-proof 57 -france-politics-jobs-youth 57 -larini 57 -yuzawa 57 -utilitarianism 57 -antosh 57 -belize-flagged 57 -kasetsiri 57 -kekauoha 57 -kaleida 57 -deviously 57 -boucheron 57 -hanen 57 -crose 57 -pawson 57 -sudikoff 57 -allayar 57 -industry-# 57 -perrott 57 -unframed 57 -kirpan 57 -dimasa 57 -popma 57 -mutianyu 57 -aygun 57 -iannelli 57 -d-dayton 57 -brand-named 57 -persian-speaking 57 -garforth 57 -sucess 57 -disposer 57 -rosangela 57 -pscs 57 -hanway 57 -cambon 57 -presas 57 -j&b 57 -checkmated 57 -bonnardeaux 57 -guzzetti 57 -digel 57 -mweemba 57 -party-sponsored 57 -uncommanded 57 -high-touch 57 -draughon 57 -shanzai 57 -khakimov 57 -japonicus 57 -unigate 57 -##-anastasia 57 -nabatiye 57 -penalties_none 57 -five-song 57 -lightning-strike 57 -thumbnail-sized 57 -gutteres 57 -swardson 57 -kls 57 -still-simmering 57 -burges 57 -am\/sbg 57 -golf-epga-esp 57 -short-termism 57 -lifefx 57 -gtm 57 -huntley-brinkley 57 -saidat 57 -sequencer 57 -seperatist 57 -ki-chi 57 -spoksman 57 -human-driven 57 -laue 57 -vancleave 57 -sjoblom 57 -barbacoa 57 -statman 57 -shaneen 57 -antique-filled 57 -mhh-krg 57 -remond 57 -bijli 57 -carby 57 -greece-style 57 -misapplying 57 -dorsen 57 -buk 57 -schwarzenbauer 57 -raiz 57 -banin 57 -daryn 57 -artemisinin-based 57 -kbohls@statesman.com 57 -persahabatan 57 -wormy 57 -samsung\/radioshack 57 -baxter-johnson 57 -re-tried 57 -aetats 57 -head-turner 57 -degganssptimes.com 57 -thirty-thousand 57 -imperiousness 57 -kesha 57 -tenace 57 -ferguson-mckenzie 57 -jaovisidha 57 -agassa 57 -barriga 57 -air-strike 57 -home-opener 57 -turbi 57 -umali 57 -kralik 57 -pump-and-dump 57 -bassat 57 -keasler 57 -tank-killing 57 -misdirecting 57 -ex-fighter 57 -mngomeni 57 -rejigged 57 -novska 57 -backstabbers 57 -spa\/qst 57 -re-tally 57 -intermune 57 -lifa 57 -urbanczyk 57 -sarabeth 57 -coke-bottle 57 -battle-readiness 57 -kawkab 57 -kerdyk 57 -wenceslaus 57 -mind-expanding 57 -boutroue 57 -tanona 57 -salivation 57 -tricolore 57 -out-gunned 57 -jacobowitz 57 -bacot 57 -participar 57 -alprazolam 57 -polho 57 -funnyordie.com 57 -panarin 57 -blanchfield 57 -yolane 57 -qualia 57 -robertses 57 -ovp 57 -peissel 57 -dratshev 57 -ieremia-stansbury 57 -korchnoi 57 -pvs-lk 57 -jan.-sep 57 -schlickeisen 57 -courier-post 57 -high-cholesterol 57 -etich 57 -guolin 57 -giesen 57 -industy 57 -cucherat 57 -villate 57 -evelin 57 -kelly-goss 57 -u.s.-korea 57 -kivutha 57 -overdramatized 57 -nemzet 57 -cassata 57 -darle 57 -cur 57 -actor-politician 57 -dingbat 57 -gerspach 57 -better-established 57 -restos 57 -tristesse 57 -ausmin 57 -computer-like 57 -thongsing 57 -ramberg 57 -hjort 57 -over-estimated 57 -bioremediation 57 -stress-reduction 57 -televisi 57 -chelny 57 -sundazed 57 -radio-cassette 57 -opening-week 57 -reghecampf 57 -highest-performing 57 -zamanbek 57 -prefigures 57 -red-and-white-striped 57 -metters 57 -travessa 57 -pengiran 57 -copernic 57 -government-granted 57 -qoryoley 57 -kur 57 -martincova 57 -aldis 57 -remissainthe 57 -favor-hamilton 57 -by-# 57 -oooooooooooooooooooo 57 -fieldsman 57 -aa-plus 57 -farmsteads 57 -marrack 57 -fambrough 57 -ongarato 57 -spayd 57 -corsetry 57 -inuvik 57 -salwen 57 -power-grabbing 57 -wornick 57 -kalkstein 57 -papermakers 57 -thwaite 57 -mineira 57 -newbigging 57 -uberstine 57 -roxx 57 -ehrlichiosis 57 -al-noor 57 -chernogorneft 57 -mercally 57 -villamayor 57 -texpool 57 -eye-watering 57 -white-brick 57 -koco 57 -transportations 57 -i-zone 57 -kouadio 57 -websense 57 -clinton-like 57 -kievsky 57 -pietton 57 -athirson 57 -oxygen-generating 57 -youre 57 -supertramp 57 -ergic 57 -kasambala 57 -attention-grabber 57 -j-shaped 57 -qedwa 57 -http://www.ipcc.ch 57 -bossman 57 -last-rock 57 -pentair 57 -bingle 57 -ap# 57 -signally 57 -cundieff 57 -maflahi 57 -ciger 57 -gerlinde 57 -ndambuki 57 -ottakar 57 -braunskill 57 -recopa 57 -journaling 57 -rmit 57 -ernk 57 -non-congress 57 -dayle 57 -q-cells 57 -hetian 57 -boons 57 -non-deliverable 57 -woolston 57 -cristoph 57 -r\/# 57 -manglano 57 -erdinc 57 -gajon 57 -knickknack 57 -rohmat 57 -wen-yuan 57 -harleman 57 -guesso 57 -stamenson 57 -australia-united 57 -nitromethane 57 -eastn 57 -anonyme 57 -stadelmann 57 -siefer 57 -pripps 57 -cross-checks 57 -salzburger 57 -oceaneering 57 -ball-point 57 -mandeep 57 -fratesi 57 -beyrer 57 -sad-faced 57 -krasnomovets 57 -rouhani 57 -eruh 57 -long-battered 57 -toles 57 -parrotfish 57 -tuffree 57 -brasseur 57 -shenkarow 57 -half-a-mile 57 -gyroball 57 -dombi 57 -anzar 57 -tajan 57 -easiness 57 -burgett 57 -hoskyns 57 -kembla 57 -orena 57 -hatam 57 -pinko 57 -detroit-hamtramck 57 -suborned 57 -kavetas 57 -nielsen\/net 57 -srg 57 -u.s.-patrolled 57 -schuelke 57 -hayim 57 -drobiazko\/povilas 57 -bhumidhar 57 -akan 57 -bungler 57 -heterosexually 57 -shija 57 -dazhen 57 -teikyo 57 -techno-thriller 57 -bezrukov 57 -hobgoblin 57 -gattis 57 -famous-brand 57 -seamico 57 -boobytrapped 57 -downrange 57 -fuel-starved 57 -mcaulay 57 -robustelli 57 -brinegar 57 -rohrbaugh 57 -ismar 57 -bonfils 57 -ponomareva 57 -goorjian 57 -kashmola 57 -on-rushing 57 -tuesday-sunday 57 -still-sluggish 57 -regular-cab 57 -obermayer 57 -kpakol 57 -basketball\/pros 57 -#-virginia 57 -hunding 57 -dangol 57 -pendleton-based 57 -neftegaz 57 -nakaniwa 57 -betham 57 -lagniappe 57 -mbambo 57 -indle 57 -sakorn 57 -pro-khartoum 57 -conjectural 57 -lunda-sul 57 -chiu-chin 57 -bancassurance 57 -limbu 57 -inital 57 -anisotropy 57 -pilbeam 57 -yazawa 57 -arzak 57 -opacic 57 -karasu 57 -http://www.cbs.com 57 -mingaladon 57 -joensuu 57 -weidenbaum 57 -ugwu 57 -anugerah 57 -immunologically 57 -espuelas 57 -izgi 57 -fredie 57 -cariplo 57 -coyotepec 57 -cuyos 57 -nolle 57 -yb\/sbg 57 -eigil 57 -maximilien 57 -barberie 57 -yeongam 57 -harperbusiness 57 -nonlawyers 57 -mekachera 57 -mahlon 57 -veruca 57 -firmo 57 -lamell 57 -sileo 57 -jabarani 57 -imkb 57 -maiffret 57 -odalovic 57 -gingivitis 57 -nagasawa 57 -francistown 57 -kocherlakota 57 -life-saver 57 -seegers 57 -sbcs 57 -markazi 57 -kharbash 57 -fokker-### 57 -wide-release 57 -bajilan 57 -yaral 57 -maione 57 -tokiwa 57 -qld 57 -governable 57 -parry-jones 57 -two-unit 57 -mcclay 57 -mcclam 57 -inner-tube 57 -obanda 57 -confederado 57 -eravur 57 -ribbs 57 -mcclennen 57 -mulund 57 -bordallo 57 -cutely 57 -diferencias 57 -black-tailed 57 -ride-alongs 57 -mosson 57 -notarial 57 -rovereto 57 -skosana 57 -holohan 57 -thamilchelvan 57 -business-management 57 -rerecorded 57 -seomin 57 -roewe 57 -gaffs 57 -whirr 57 -geppetto 57 -chukwueke 57 -bc-na-fea-gen 57 -rock-oriented 57 -mirandes 57 -telkiyski 57 -kalindi 57 -non-minority 57 -well-greased 57 -nemsadze 57 -southshore 57 -ddungu 57 -lanin 57 -lubuk 57 -zhongqiang 57 -volesky 57 -gontard 57 -kopylov 57 -siestas 57 -honduras-politics-coup 57 -davitian 57 -financial-industry 57 -kantono 57 -pumpkin-colored 57 -reconverted 57 -fixer-uppers 57 -euro##-euro## 57 -epinal 57 -highchairs 57 -browses 57 -js\/jd## 57 -mg\/l 57 -six-decade-old 57 -chimeric 57 -bowdler 57 -shannyn 57 -snick 57 -female-oriented 57 -cambridgeside 57 -giacomini 57 -polityka 57 -marzouki 57 -vullo 57 -fist-pump 57 -vm###-### 57 -al-aoofi 57 -klecker 57 -inhumanly 57 -white-on-black 57 -macneil-lehrer 57 -conkling 57 -al-kharbit 57 -pcrm 57 -best-song 57 -levines 57 -adventuress 57 -castagnetti 57 -mgn 57 -chumbawamba 57 -discontinuous 57 -ojima 57 -karlos 57 -ihar 57 -mid-# 57 -toe-loop 57 -asantehene 57 -ruso 57 -camago-malampaya 57 -sweet-talked 57 -baitadi 57 -kaleidoscopes 57 -ntabakuze 57 -senado 57 -ill-received 57 -vaccum 57 -guigang 57 -voula 57 -katou 57 -montcoal 57 -harilal 57 -wetted 57 -ruman 57 -agni-i 57 -rahbani 57 -schaffhouse 57 -geyserville 57 -elda 57 -##-book 57 -yinhui 57 -leaseplan 57 -midstage 57 -elnora 57 -depende 57 -second-in 57 -mrp 57 -iturralde 57 -engelberger 57 -youde 57 -regivaldo 57 -schaper 57 -totted 57 -elhassan 57 -#,###-patient 57 -slivinski 57 -manically 57 -sampang 57 -bratu 57 -multi-goal 57 -arab-summit 57 -visted 57 -azour 57 -##-sebastian 57 -mabasa 57 -lambayeque 57 -anuradha 57 -kw\/hours 57 -fire-suppression 57 -biliary 57 -yasujiro 57 -crusat 57 -ncds 57 -zager 57 -schmitt-roschmann 57 -familiar-looking 57 -airiness 57 -moamba 57 -vocci 57 -godchildren 57 -caleigh 57 -brokenborough 57 -cervo 57 -recieving 57 -non-recourse 57 -classic-car 57 -doy 57 -yong-seok 57 -scramjets 57 -terrorismo 57 -westernised 57 -technical-support 57 -buckaroos 57 -caydee 57 -pmdc 57 -v#s 57 -dalhart 57 -vibrance 57 -wayang 57 -roundish 57 -ignarro 57 -sansoni 57 -schobert 57 -#.####-mark 57 -ncsl 57 -bhight 57 -shapey 57 -royan 57 -lemper 57 -shannon.buggs@chron.com 57 -anadyr 57 -ch-##d 57 -jhala 57 -baumgart 57 -caponi 57 -souverain 57 -push-off 57 -vidosevic 57 -verwiel 57 -strougal 57 -non-network 57 -eco-terrorists 57 -washkansky 57 -matherne 57 -bt-## 57 -dragica 57 -namuyamba 57 -petroleum-related 57 -as### 57 -mcgahern 57 -morganti 57 -american-record 57 -randfontein 57 -iraq-al-qaida 57 -cubillan 57 -bayberry 57 -mythbusters 57 -dollar-cost-averaging 57 -multitasker 57 -aicraft 57 -opole 57 -ru\/sw 57 -spirea 57 -encoder 57 -chanko 57 -pyshkin 57 -structuralism 57 -vijayakumar 57 -easygroup 57 -goncalino 57 -deadeye 57 -milieus 57 -leomitis 57 -start-stop 57 -strohm 57 -stevensville 57 -anklam 57 -huanghe 57 -bushisms 57 -sugimori 57 -tightly-knit 57 -apono 57 -one-yuan 57 -pheonix 57 -freeze-for-freeze 57 -cagily 57 -vivants 57 -cotman 57 -sirf 57 -stemme 57 -baldassi 57 -kongsi 57 -tele-medicine 57 -mackanin 57 -build-a-bear 57 -greeson 57 -indomobil 57 -britain-politics-labour 57 -redenomination 57 -sell-outs 57 -grullon 57 -noorda 57 -sawasdi 57 -ccamlr 57 -dbrs 57 -mallouh 57 -age-verification 57 -strength-sapping 57 -b-#bs 57 -prestigous 57 -##-miroslav 57 -uzcategui 57 -laquila 57 -rs\/#### 57 -gasoline-guzzling 57 -galesi 57 -hemley 57 -simha 57 -kokura 57 -nyang 57 -catenaccio 57 -sigo 57 -hiwada 57 -huscroft 57 -bench-pressed 57 -rock-hurling 57 -administrates 57 -al-hawali 57 -time-scale 57 -gokcek 57 -budgett 57 -dyatchin 57 -rege 57 -ouargla 57 -ginns 57 -papathanassiou 57 -lucrezia 57 -isroilova 57 -juillet 57 -r-fairfax 57 -civilian-populated 57 -xiaoyun 57 -mcramerglobe.com 57 -psuv 57 -creasey 57 -telleldin 57 -oltman 57 -post-football 57 -periodontist 57 -us-school 57 -impsa 57 -kahana 57 -lunch-bucket 57 -zahalka 57 -party-going 57 -lc-gm 57 -orumieh 57 -ndongou 57 -ludek 57 -renowed 57 -junyao 57 -most-admired 57 -cricket-aus-ind 57 -outpitch 57 -atherosclerotic 57 -salamao 57 -capilla 57 -all-expense 57 -alganov 57 -vaslav 57 -al-qaissi 57 -stoklos 57 -hortman 57 -ultra-small 57 -ncea 57 -fermenters 57 -belik 57 -barsuk 57 -cycad 57 -fgarcia 57 -high-carbon 57 -riska 57 -pathogenicity 57 -crenson 57 -vinyls 57 -tercentenary 57 -sledded 57 -tleiss 57 -etat 57 -googleplex 57 -messin 57 -sr# 57 -patkar 57 -sinaloan 57 -djoumessi 57 -fritzky 57 -kanaana 57 -pcv 57 -avt 57 -puzzlers 57 -swellings 57 -hillshire 57 -sentani 57 -livery-cab 57 -martensson 57 -unroadworthy 57 -jinwei 57 -longyang 57 -groenfeld 57 -derbent 57 -nihilists 57 -maurizi 57 -turnhalle 57 -mixologists 57 -frostiness 57 -selph 57 -prodigene 57 -tewodros 57 -mosquito-transmitted 57 -portch 57 -arbib 57 -attarian 57 -effendy 57 -kason 57 -advances# 57 -jerges 57 -rockhouse 57 -andani 57 -eight-city 57 -furio 57 -yaswant 57 -monona 57 -epiphanny 57 -six-kilometre 57 -shaleil 57 -protropin 57 -zients 57 -windhorst 57 -abana 57 -alleghenies 57 -intra-state 57 -lalwani 57 -unstinted 57 -warford 57 -flordia 57 -galax 57 -mcgiffert 57 -cadereyta 57 -zeck 57 -railamerica 57 -http://www.ford.com/ 57 -ronetta 57 -foodmakers 57 -yongyudh 57 -half-serious 57 -coal-to-liquid 57 -day-use 57 -acclimatizing 57 -charkhi 57 -phalaborwa 57 -muthaiga 57 -bldp 57 -obnoxiousness 57 -esperar 57 -resistances 57 -top-producing 57 -atochem 57 -intiman 57 -dog-show 57 -hard-edge 57 -wolfhound 57 -vincent-st 57 -campiness 57 -leibniz 57 -religare 57 -tankful 57 -kertih 57 -lodal 57 -mbita 57 -forestalls 57 -imbecilic 57 -watch\/americas 57 -fondiaria 57 -sanyi 57 -mikhailo 57 -palumbi 57 -germain\/fra 57 -mg-### 57 -hunthausen 57 -snappily 57 -shakhnazarov 57 -jakabos 57 -champassak 57 -chia-chun 57 -bassoonist 57 -kasambara 57 -paradies 57 -arcore 57 -newquist 57 -mocny 57 -kostelka 57 -dolina 57 -mcelrathbey 57 -province-based 57 -addum 57 -spritzer 57 -schoenholtz 57 -abbotts 57 -gay-related 57 -suseno 57 -wreh 57 -al-forat 57 -outdraw 57 -keleher 57 -iraqen#### 57 -afrim 57 -souaidia 57 -cardi 57 -inkjets 57 -gas-sipping 57 -scalf 57 -puddled 57 -kadidal 57 -raymonde 57 -ear-shattering 57 -bishi 57 -zamano 57 -sabeh 57 -prinsen 57 -us-violence 57 -millhauser 57 -million\/euros 57 -texmaco 57 -alltime 57 -cheik 57 -comando 57 -amaitis 57 -erskin 57 -mastersingers 57 -bursaries 57 -wimbley 57 -veni 57 -dolgorsvren 57 -sirait 57 -rinero 57 -mander 57 -natiq 57 -parilla 57 -mewling 57 -anangwe 57 -omnifone 57 -shandler 57 -now-ubiquitous 57 -kadokawa 57 -seven-kilometer 57 -a.k.a 57 -satterthwaite 57 -luusua 57 -pila 57 -farabee 57 -often-heard 57 -tembec 57 -fleabag 57 -dozen-plus 57 -trichopoulos 57 -antrobus 57 -antlfinger 57 -zuendel 57 -zeevi-farkash 57 -thumbelina 57 -restrictionists 57 -vraalsen 57 -radwaniya 57 -bergan 57 -spectating 57 -strike-slip 57 -setchell 57 -new-era 57 -coutries 57 -shaoqiang 57 -fact-checked 57 -fariborz 57 -###-billion-pound 57 -biblioteca 57 -isoa 57 -slimeball 57 -genaux 57 -letha 57 -fma 57 -trouble-maker 57 -sonali 57 -anumnu 57 -aavishkar 57 -r-pasadena 57 -wel 57 -trailways 57 -kalpoes 57 -marcelhino 57 -erythematosus 57 -yayha 57 -shenhar 57 -raheel 57 -yasnaya 57 -sung-kuk 57 -corvalan 57 -houston-galveston 57 -touchier 57 -u.p. 57 -re-live 57 -barrington-coupe 57 -jarosz 57 -highest-flying 57 -underuse 57 -olmecs 57 -climent 57 -slabbert 57 -commericial 57 -shalgi 57 -peattie 57 -well-staffed 57 -cypriot-flagged 57 -fraccari 57 -rafel 57 -ribo 57 -vamped 57 -taffel 57 -lucheng 57 -pugnaciously 57 -strambach 57 -cgtp 57 -tapulous 57 -rockiest 57 -bienstock 57 -higher-interest 57 -rusted-out 57 -sopped 57 -third-from-bottom 57 -pacholczyk 57 -krasovska 57 -romulan 57 -sang-moon 57 -undammed 57 -ncacc 57 -brookhart 57 -valbon 57 -greenes 57 -kitchell 57 -zurick 57 -insufficiencies 57 -mohamedou 57 -daric 57 -multicamera 57 -sunao 57 -y## 57 -plumelec 57 -ntshangase 57 -atwi 57 -yiotis 57 -baoquan 57 -muehlebach 57 -vradenburg 57 -deliberateness 57 -ridgelea 57 -superlotto 57 -game-long 57 -hard-to-sell 57 -pjm\/gj## 57 -frot-coutaz 57 -miram 57 -oil-on-canvas 57 -laming 57 -stodginess 57 -rikrok 57 -president-in-uniform 57 -obbo 57 -american-grown 57 -#-matt 57 -charnvirakul 57 -baldomero 57 -langfeld 57 -throat-slashing 57 -hegemonist 57 -molchanov 57 -tarkan 57 -grimaud 57 -treelike 57 -yi-chiao 57 -caston 57 -shallman 57 -kandeh 57 -##-jarkko 57 -boy-king 57 -germani 57 -sindian 57 -off-the-book 57 -v&s 57 -kishaba 57 -fertonani 57 -schear 57 -drainpipes 57 -holness 57 -pwilson 57 -dfler 57 -ako 57 -bonannos 57 -moonwalked 57 -buzim 57 -trapdoors 57 -skeeters 57 -camshafts 57 -onair 57 -psncr 57 -calton 57 -nyambuya 57 -speedman 57 -altarpieces 57 -fat-laden 57 -office-based 57 -end-time 57 -navigenics 57 -anser 57 -knup 57 -kyung-ja 57 -patrimonio 57 -erda 57 -timecards 57 -quencher 57 -non-deployed 57 -anggoro 57 -bajas 57 -iju 57 -kaggwa 57 -miller-jenkins 57 -visicalc 57 -magdalo 57 -johannsen 57 -tassinari 57 -cat-like 57 -spectacled 57 -blauner 57 -stickwork 57 -explosives-sniffing 57 -stavenhagen 57 -bocskai 57 -#-anna-lena 57 -brebner 57 -moehringer 57 -tamweel 57 -gold-leafed 57 -drevna 57 -reidyglobe.com 57 -mennes 57 -ripia 57 -noongar 57 -re-appear 57 -mangosteen 57 -queremos 57 -aarchs 57 -foon 57 -cooz 57 -jahurul 57 -sn####a 57 -demiralp 57 -smooched 57 -cols 57 -chapati 57 -resailed 57 -evensong 57 -luzhin 57 -##-plus-year 57 -presento 57 -non-exempt 57 -wahyono 57 -servier 57 -malayev 57 -neuro 57 -temerko 57 -beddoes 57 -marquard 57 -cremonini 57 -carnivalesque 57 -petrac 57 -tri-colored 57 -debbie-ann 57 -beaute 57 -previously-announced 57 -dugovich 57 -thibaudet 57 -outboards 57 -benhur 57 -kubelik 57 -schellenberger 57 -in-kook 57 -papan 57 -bdnf 57 -haleva 57 -precor 57 -tightly-contested 57 -hge 57 -asrar 57 -piriz 57 -taitz 57 -taita 57 -medog 57 -bonners 57 -caucausus 57 -uncustomary 57 -balsamo 57 -##-card 57 -good-vs 57 -international-class 57 -strominger 57 -interleukin 57 -funeral-home 57 -savall 57 -easynet 57 -konculj 57 -fengdu 57 -enio 57 -cyber-criminals 57 -soms 57 -wasikowska 57 -semporna 57 -#to 57 -damschroder 57 -medstar 57 -bastin 57 -tno 57 -munier 57 -rav-# 57 -ribalta 57 -elmes 57 -flicked-on 57 -cricket-wc####-pak 57 -rehabilitator 57 -medora 57 -kaddura 57 -yannopoulos 57 -sericulture 57 -closed-captioning 57 -rigler 57 -pre-injury 57 -jandek 57 -chiri-yurt 57 -#-midnight 57 -khorshid 57 -witkoff 57 -festen 57 -aboutreika 57 -jourdon 57 -hospitalisations 57 -salonius 57 -subnational 57 -bonadio 57 -tv-viewing 57 -lessee 57 -fikir 57 -watani 57 -newtok 57 -kintz 57 -in-network 57 -repacholi 57 -subert 57 -moralize 57 -truck-loads 57 -rocktober 57 -recently-held 57 -steubing 57 -myfi 57 -harned 57 -alleles 57 -nesi 57 -referential 57 -manufacturing-based 57 -rzb 57 -sun-worshipping 57 -caiu 57 -chi-keung 57 -majdalawi 57 -lackner 57 -arrangment 57 -o'mahoney 57 -intermarriages 57 -zantzinger 57 -margairaz 57 -patzelt 57 -chania 57 -gribakin 57 -##-star 57 -haruko 57 -lagunas 57 -tch 57 -ex-social 57 -qorabi 57 -debt-for-nature 57 -plam 57 -beady-eyed 57 -oscillators 57 -blood-related 57 -qualm 57 -consumer-based 57 -leweck 57 -high-revving 57 -l'heureux 57 -meramec 57 -fsa-eap 57 -lhernandez 57 -carports 57 -air-worthiness 57 -bolswessanen 57 -uberti 57 -aver 57 -#-million-member 57 -singleminded 57 -hurreh 57 -moslem-oriented 56 -benediktsson 56 -aeolian 56 -gogitidze 56 -intrepids 56 -zebedayo 56 -shakhtyor 56 -gioiella 56 -llegue 56 -draggy 56 -tkaczuk 56 -balongan 56 -abullah 56 -mn-imj 56 -abdula 56 -villepinte 56 -concrete-filled 56 -overcash 56 -astara 56 -moskvich 56 -semi-autonomy 56 -investment-linked 56 -self-tanner 56 -nurmukhammed 56 -nilesh 56 -pitch-and-putt 56 -haetzni 56 -corn-soya 56 -http://www.redcross.org 56 -predominantly-muslim 56 -zigging 56 -ryegrass 56 -soft-cover 56 -faibish 56 -reinsured 56 -mideast-israel 56 -koshlyakov 56 -big-hitters 56 -stalcup 56 -amland 56 -mega-hits 56 -mid-stretch 56 -sauna-like 56 -antihypertensive 56 -muskat 56 -ecd 56 -chornovyl 56 -shih-ming 56 -inerrant 56 -xiuqi 56 -pseudo-scientific 56 -langberg 56 -bungoma 56 -zainol 56 -telecomunications 56 -escogido 56 -okkalapa 56 -chun-sheng 56 -saefuddin 56 -rinus 56 -dcp 56 -military-based 56 -petkovski 56 -serbian-led 56 -bernardsville 56 -farkhar 56 -phosphors 56 -dutreil 56 -wragg 56 -hunza 56 -khulani 56 -brahima 56 -zakiur 56 -nooni 56 -alef 56 -governmnent 56 -haiti-vote 56 -roro 56 -waterfowls 56 -schoeller 56 -unsalable 56 -mcdonnel 56 -conflict-prevention 56 -mantz 56 -majelis 56 -exultantly 56 -pigeon-toed 56 -neiers 56 -tzemel 56 -nghimtina 56 -sindhis 56 -lippa 56 -sg-# 56 -al-yemen 56 -oughton 56 -wash-out 56 -pd-imj 56 -destinee 56 -canfor 56 -seyni 56 -bp-gm 56 -jakkrit 56 -###:##:## 56 -rapace 56 -habie 56 -ex-slave 56 -tyisha 56 -frou-frou 56 -groeschel 56 -pmb 56 -wyrsch 56 -idahoans 56 -tiantan 56 -##-million-us 56 -kootenay 56 -assasinated 56 -teruhisa 56 -kirundo 56 -joensen 56 -wedding-cake 56 -underfinancing 56 -shapewear 56 -clinton-haters 56 -tetzchner 56 -arja 56 -duchesnay 56 -fuxi 56 -radow 56 -theatre-goers 56 -sign-carrying 56 -horstman 56 -castrati 56 -heat-generating 56 -golfsmith 56 -pendareva 56 -loubscher 56 -fumento 56 -winiarski 56 -xianrong 56 -zanchi 56 -alibi-ya 56 -f-bomb 56 -stonehouse 56 -zumbo 56 -sange 56 -gamidov 56 -exanta 56 -take-or-pay 56 -mapunda 56 -propuesta 56 -three-meter-high 56 -zafaryab 56 -haloed 56 -sliven 56 -lepton 56 -alcohol-fuelled 56 -operationalized 56 -fairless 56 -foot-tapping 56 -pntl 56 -choos 56 -danisco 56 -khazaal 56 -macknin 56 -kera 56 -non-believer 56 -export-dominated 56 -chowdry 56 -tecnost 56 -monceau 56 -jmarmstrongdenverpost.com 56 -chunsheng 56 -ccac 56 -oakenfold 56 -ktda 56 -record-length 56 -toco 56 -golog 56 -pastorale 56 -zuberi 56 -million-euro## 56 -trustor 56 -delsener 56 -zx 56 -college-student 56 -ultra-right-wing 56 -runout 56 -abloom 56 -holovak 56 -scafidi 56 -associaton 56 -german-brokered 56 -cents-a-share 56 -dhanawibawa 56 -performance-driven 56 -surburb 56 -orso 56 -duodenal 56 -yellow-and-red 56 -koehl 56 -boonton 56 -exective 56 -###-footer 56 -musasa 56 -http://www.nbc.com 56 -pig-raising 56 -telo 56 -cassells 56 -sacko 56 -monshipour 56 -desk-bound 56 -soft-top 56 -tunings 56 -tomasa 56 -lekhanya 56 -breathalyser 56 -hmeid 56 -ventolin 56 -goldsby 56 -coba 56 -singkil 56 -advertisment 56 -barfoed 56 -d'ermilio 56 -esala 56 -#,#-dioxane 56 -zohreh 56 -kasatka 56 -horita 56 -spado 56 -itvs 56 -chia-yuh 56 -nandy 56 -vercauteren 56 -guedj 56 -chimene 56 -masaba 56 -century-oriented 56 -novavax 56 -burn-in 56 -snu 56 -moscou 56 -gurrola 56 -long-feuding 56 -grails 56 -jsk 56 -bachmans 56 -deguchi 56 -wanchalerm 56 -under-performed 56 -paisa 56 -smap 56 -deramus 56 -dysphoria 56 -pardede 56 -lunstead 56 -houweling 56 -solar-heated 56 -lalic 56 -banchetta 56 -tursday 56 -tree-dwelling 56 -rightmire 56 -tradicion 56 -tae-yong 56 -sayrescoxnews.com 56 -paku 56 -mubang 56 -moseyed 56 -carbon-reduction 56 -philippine-born 56 -event-planning 56 -long-fought 56 -shamsudin 56 -geninho 56 -coatless 56 -hardy-garcia 56 -and-white 56 -kuchov 56 -shoebox-sized 56 -penalty-killers 56 -value-added-tax 56 -dispenza 56 -ben-yehuda 56 -yellow-and-green 56 -babaloo 56 -goyas 56 -then-partner 56 -moulty 56 -sachio 56 -over-the-horizon 56 -btp 56 -###t 56 -calagna 56 -avions 56 -copepods 56 -french-ruled 56 -philomel 56 -osmo 56 -iconix 56 -thomastown 56 -schoerghofer 56 -witchdoctors 56 -ryoung 56 -wire-fraud 56 -tynesha 56 -freas 56 -cpw 56 -unc-wilmington 56 -water-born 56 -eket 56 -loudhailer 56 -filipp 56 -killick 56 -klimenko 56 -karppinen 56 -kuwait-politics 56 -stollsteimer 56 -trussell 56 -arbesfeld 56 -alfama 56 -caithness 56 -monetti 56 -arkle 56 -zhoima 56 -choue 56 -vulgamore 56 -rakowitz 56 -provencio 56 -beswick 56 -re-financing 56 -innocentive 56 -lahoti 56 -bertholle 56 -eui 56 -jostens 56 -indecisively 56 -gamov 56 -kalis 56 -toughest-ever 56 -mi-jin 56 -interwar 56 -zari 56 -woodroffe 56 -baixing 56 -isabekov 56 -importations 56 -hedvig 56 -name-your-price 56 -vuai 56 -carkner 56 -jmf\/ml 56 -rotarian 56 -cerra 56 -four-over-par 56 -visitantes 56 -stautner 56 -tiu 56 -inside-baseball 56 -chakravarthi 56 -klokot 56 -misstates 56 -enap 56 -weld-cellucci 56 -tahara 56 -bolender 56 -suettinger 56 -karadassiou 56 -isak-muivah 56 -governates 56 -buonomo 56 -asia-focused 56 -statesville 56 -calgon 56 -yonemura 56 -fazlic 56 -sohlberg 56 -bogale 56 -all-arounder 56 -rou 56 -swint 56 -kendall-smith 56 -jamaar 56 -gurfinkel 56 -kargar 56 -re-invented 56 -sarda 56 -witharanage 56 -non-college 56 -lohas 56 -foremothers 56 -vent-free 56 -narcotic-drug-related 56 -yashar 56 -compounce 56 -botto 56 -geddie 56 -marchman 56 -ouest-france 56 -drees 56 -business-services 56 -pre-selection 56 -vaher 56 -jocko 56 -saint-quentin 56 -amerenue 56 -lindenmuth 56 -ulus 56 -mbala 56 -husic 56 -yl###-### 56 -follicular 56 -most-loved 56 -ncafp 56 -cgx 56 -peaden 56 -pingeot 56 -fusen 56 -phaiboon 56 -pre-marked 56 -heilbroner 56 -longyi 56 -nr\/dj## 56 -nashwan 56 -kathuria 56 -##-wayne 56 -way-station 56 -visitor-friendly 56 -generative 56 -unprejudiced 56 -wolfberry 56 -cashwell 56 -two-building 56 -madryn 56 -akwesasne 56 -community-college 56 -lieff 56 -coalhouse 56 -suncare 56 -passamaquoddy 56 -kanstantsin 56 -amep 56 -news-making 56 -rahe 56 -campione 56 -neiwand 56 -munabao 56 -potti 56 -machicura 56 -wwor 56 -vermeersch 56 -schmoekel 56 -chiffonade 56 -fashola 56 -seec 56 -centre-backs 56 -kremlinologists 56 -akuressa 56 -neuropsychiatry 56 -s-##c 56 -mutaa 56 -sociobiology 56 -ultra-premium 56 -twirler 56 -xiaojin 56 -free-jazz 56 -audrina 56 -nelon 56 -seconds\/### 56 -inquisitions 56 -level-four 56 -vertigo-inducing 56 -mugambage 56 -bondue 56 -yuk\/leung 56 -edelnor 56 -wascher 56 -side-footing 56 -giresun 56 -enfoque 56 -torro 56 -rrps 56 -kirsh 56 -crimefighter 56 -importa 56 -longer-acting 56 -thakurgaon 56 -sufferance 56 -cricket-wc####-aus 56 -fluconazole 56 -waltzer 56 -stz\/ea## 56 -gbm 56 -##-stephen 56 -urey 56 -medine 56 -pelloux 56 -arkans 56 -bizzare 56 -siegbahn 56 -carnelian 56 -rueing 56 -pantomiming 56 -duckweed 56 -hofmeyr 56 -nastas 56 -pennymac 56 -peairs 56 -str-ti-jbm 56 -suharjono 56 -barcenas 56 -spit-shined 56 -coch 56 -aldebert 56 -chups 56 -eckl 56 -czaja 56 -redken 56 -duffie 56 -jordanaires 56 -impudently 56 -zingaro 56 -undistorted 56 -toolik 56 -kammerhoff 56 -decriminalisation 56 -clayoquot 56 -leblon 56 -pakistan-missile 56 -gertrudis 56 -snooker-gbr 56 -al-jarba 56 -tumwine 56 -morinigo 56 -bacre 56 -##,###,###.# 56 -staleness 56 -##hours 56 -amni 56 -linenthal 56 -kleinsasser 56 -bepza 56 -saransk 56 -khermanstatesman.com 56 -rockwellian 56 -nieuw 56 -solomonyan 56 -mangahas 56 -sewering 56 -mozartean 56 -##-square-kilometre 56 -bhu 56 -lampasas 56 -coagulate 56 -dinaburg 56 -dairy-free 56 -one-bath 56 -djibrill 56 -fischer-boel 56 -four-button 56 -lemon-flavored 56 -bipeds 56 -horneber 56 -mini-movies 56 -toplin 56 -silang 56 -barberton 56 -boukensa 56 -fernado 56 -lamberty 56 -gratings 56 -marban 56 -ilala 56 -l'avenir 56 -nizhni 56 -onepass 56 -d&b 56 -ilogho 56 -bowral 56 -lhr### 56 -cabaniss 56 -camera-friendly 56 -langerman 56 -plattekill 56 -todesca 56 -cantorial 56 -import-dependent 56 -overspenders 56 -elsen 56 -novorossisk 56 -hansabank 56 -taip 56 -everding 56 -kilinc 56 -allieu 56 -kippy 56 -kasputis 56 -rrp 56 -darling-hammond 56 -bitkom 56 -holtzhausen 56 -kin-chung 56 -sinosat-# 56 -b'gosh 56 -afroz 56 -solomou 56 -proegler 56 -on-sang 56 -motorcyles 56 -easyknit 56 -cross-pollinating 56 -ambeyi 56 -lavaggi 56 -confessors 56 -aviion 56 -###-######## 56 -iraq-unrest-qaeda 56 -janowitz 56 -ofari 56 -tato 56 -reputacion 56 -gading 56 -univac 56 -iniative 56 -al-amal 56 -sahdan 56 -culloty 56 -mariella 56 -boigny 56 -partyka 56 -juda 56 -vierma 56 -majorette 56 -news-stands 56 -ashti 56 -sarria 56 -donadze 56 -huaqiang 56 -dej 56 -deddy 56 -sobaru 56 -self-invention 56 -murley 56 -neurotically 56 -smolar 56 -piccarreta 56 -#og 56 -flowerbeds 56 -bc-ap 56 -inseparably 56 -prezioso 56 -ropeik 56 -pumpido 56 -hypothesizes 56 -wuz 56 -roulade 56 -http://www.genoa-g#.it/eng/index.html 56 -eu-mideast 56 -paddleboard 56 -bone-white 56 -derschau 56 -burika 56 -boarding-school 56 -senitt 56 -re-equipping 56 -mulvoy 56 -retinues 56 -bresse 56 -greentown 56 -www.insidesocal.com/tv/ 56 -sinaia 56 -raupp 56 -nspo 56 -overtopping 56 -shindell 56 -bahk 56 -jeremain 56 -kurta 56 -margarite 56 -ebbesen 56 -kise 56 -coooperation 56 -brochtrup 56 -kenya-climate 56 -music-themed 56 -rahimpour 56 -samory 56 -business-wise 56 -abadie 56 -shur 56 -euro-skepticism 56 -#,###-kilometers 56 -mystery-shrouded 56 -bulker 56 -ovh 56 -near-vertical 56 -khash 56 -espriella 56 -unef 56 -torbor 56 -digene 56 -dishonoured 56 -ingvard 56 -andis 56 -o'donley 56 -data-serving 56 -lakhubhai 56 -leccion 56 -common-man 56 -pre-test 56 -diht 56 -aegina 56 -youngtown 56 -talkathon 56 -ajirawit 56 -stonerside 56 -oly-####-advisory 56 -non-baseball 56 -lusail 56 -giacoletti 56 -womans 56 -chelsom 56 -lancing 56 -nagyz 56 -striatum 56 -nahel 56 -coinages 56 -kleins 56 -mulherin 56 -estebanez 56 -andolina 56 -on-coming 56 -hazels 56 -automotives 56 -borai 56 -beiteddine 56 -su-lin 56 -al-siyassa 56 -lequi 56 -mcmorran 56 -pitcher-friendly 56 -nitch 56 -rauer 56 -d'arrigo 56 -haziness 56 -draft-night 56 -maokola-majogo 56 -cuppers 56 -steinhoff 56 -gushiken 56 -moppet 56 -potebenko 56 -fishtailed 56 -amedo 56 -arbois 56 -pasturing 56 -harba 56 -authorties 56 -igber 56 -afghan-international 56 -myanmar-protest-monks 56 -fresh-air 56 -iigep 56 -wolke 56 -shock-jock 56 -smilin 56 -guguletu 56 -deregister 56 -coorperation 56 -eckstrom 56 -guelperin 56 -lobed 56 -vartanian 56 -sponged 56 -upledger 56 -kalat 56 -brocato 56 -wuertz 56 -longchamps 56 -hambastegi 56 -wunderkinder 56 -longans 56 -she-devil 56 -frueh 56 -ambridge 56 -lessner 56 -kiddos 56 -kuypers 56 -daair 56 -bg-acw 56 -club-swinging 56 -szulik 56 -quizas 56 -maxxi 56 -projectionists 56 -vujin 56 -flyertalk.com 56 -gurin 56 -prolinea 56 -vivra 56 -jurijs 56 -us-hollywood 56 -mirretti 56 -wildt 56 -###percent 56 -sylvers 56 -hoerster 56 -p-### 56 -mdluli 56 -slipup 56 -zaozhuang 56 -kazakhstani 56 -furtwaengler 56 -muffat 56 -busic 56 -viamonte 56 -montsame 56 -tuszynski 56 -mickiewicz 56 -varathep 56 -freak-show 56 -tampakan 56 -afterburner 56 -early-#### 56 -kerchove 56 -rasanen 56 -simonas 56 -high-sounding 56 -kibbutzniks 56 -us-colombian 56 -antiqued 56 -heatherington 56 -amdo 56 -side-scan 56 -birlik 56 -spigarelli 56 -non-nordic 56 -impact-resistant 56 -umran 56 -chalke 56 -nassari 56 -freiman 56 -jaar 56 -soylent 56 -sinjhuang 56 -four-hectare 56 -continuing-education 56 -mimo 56 -down-ticket 56 -fiercely-contested 56 -levantine 56 -shamie 56 -g\/t\/f 56 -autoridad 56 -prizing 56 -baywalk 56 -mocatta 56 -tea-to-steel 56 -pregunte 56 -irascibility 56 -towles 56 -china-romania 56 -hamengkubuwono 56 -padire 56 -aldam 56 -coinsurance 56 -auret 56 -ignas 56 -disco-era 56 -minette 56 -nurse-midwives 56 -smolian 56 -jaffery 56 -hochschorner 56 -rcastro 56 -fnla 56 -iyanla 56 -plant-derived 56 -krupp-hoesch 56 -night-blooming 56 -food-grade 56 -fuenmayor 56 -glaciation 56 -handmaidens 56 -kosnatcheva 56 -dll###-### 56 -mulyo 56 -ct## 56 -qfc 56 -higher-yield 56 -deyana 56 -underdiagnosed 56 -wojtala 56 -christmas-season 56 -coitus 56 -estey 56 -ashenfelter 56 -hapal 56 -tawatchai 56 -cambron 56 -kasereka 56 -periwinkles 56 -craciun 56 -clabo 56 -glashow 56 -danwei 56 -luwero 56 -blythedale 56 -kadyr 56 -young-me 56 -flash-floods 56 -spla\/m 56 -cross@globe.com 56 -pues 56 -hounddog 56 -bulimics 56 -mingming 56 -insa 56 -videocam 56 -paoua 56 -http://www.xerox.com 56 -car-jackings 56 -marzban 56 -cattolica 56 -katri 56 -kanada 56 -dacula 56 -faurel 56 -zaytun 56 -autonomy-minded 56 -shauri 56 -defrayed 56 -sanhe 56 -howison 56 -edurne 56 -states. 56 -issue-by-issue 56 -microturbines 56 -macijauskas 56 -iberville 56 -yizhar 56 -cocodrie 56 -wats 56 -ustc 56 -walta 56 -ethereally 56 -cookstown 56 -dissociating 56 -once-safe 56 -vishwanathan 56 -wince-inducing 56 -zire 56 -arpa 56 -al-shurta 56 -luneta 56 -aldermaston 56 -http://www.rnc.org 56 -hosono 56 -qiqiu\/zhao 56 -jacinthe 56 -http://www.amrcorp.com 56 -derbyshires 56 -weiyang 56 -houze 56 -pirata 56 -bugiri 56 -lafakis 56 -vika 56 -dismas 56 -cardio-thoracic 56 -thrashings 56 -reagle 56 -us-attacks-guantanamo 56 -hwwa 56 -undergirded 56 -portale 56 -scandalizing 56 -brezovica 56 -zebadua 56 -drizin 56 -reya 56 -tubewells 56 -skink 56 -babushka 56 -pre-judging 56 -schutzman 56 -wroclawski 56 -zaca 56 -daviz 56 -moree 56 -diamantis 56 -former-soviet 56 -islam-based 56 -ennes 56 -alysia 56 -soloveitchik 56 -negar 56 -damselflies 56 -tried-and-tested 56 -hebard 56 -slipstream-chipotle 56 -meraklis 56 -so-call 56 -samarn 56 -pink-hued 56 -hieronim 56 -konadu 56 -fradulent 56 -manchild 56 -skube-column 56 -bouder 56 -permitir 56 -seawolves 56 -pouladi 56 -coronell 56 -folino 56 -juravich 56 -aquacultural 56 -dono 56 -strongest-ever 56 -sub-four-minute 56 -shu-hung 56 -hakkar 56 -kotex 56 -relink 56 -multiethnicity 56 -regio 56 -pomar 56 -azzopardi 56 -kelmon 56 -tihany 56 -shisun 56 -water-repellent 56 -bumgardner 56 -clark\/donna 56 -revenge-minded 56 -grcs 56 -mcphie 56 -small-molecule 56 -zorica 56 -chavit 56 -baselga 56 -sirima 56 -zegas 56 -talis 56 -iner 56 -racette 56 -azema 56 -fuhe 56 -caramagna 56 -strenghtened 56 -lavage 56 -giannichedda 56 -gurbuz 56 -degiorgio 56 -kilolitres 56 -escolero 56 -antespend 56 -swash 56 -unoaked 56 -gimondi 56 -dirt-road 56 -daguin 56 -imputation 56 -orientalist 56 -neigh 56 -inconstant 56 -penaherrera 56 -shambhala 56 -ravishingly 56 -ikrema 56 -mythologizing 56 -gwee 56 -lousiest 56 -zirkin 56 -cross-regional 56 -campagne 56 -dumarsais 56 -mansilla 56 -quintus 56 -jurikova 56 -stickered 56 -ysern 56 -djebbour 56 -law-firm 56 -quark-gluon 56 -forbush 56 -non-responsive 56 -play-it-safe 56 -moisture-laden 56 -comradely 56 -orito 56 -fbl-fra-lcup 56 -paucke 56 -muji 56 -fertilizer-based 56 -early-to-bed 56 -woodcrest 56 -danto 56 -onne 56 -peri-urban 56 -kayiranga 56 -jianbo 56 -correspondant 56 -vankor 56 -lambing 56 -mephedrone 56 -icglr 56 -non-lawyers 56 -open-handed 56 -musetta 56 -khanty-mansiisk 56 -carouser 56 -srebnick 56 -unkown 56 -frankland 56 -by-catch 56 -aeromedical 56 -middle-tier 56 -knust 56 -natsagiin 56 -gressly 56 -long-coveted 56 -golf-epga-por 56 -thanat 56 -ivermectin 56 -garbelotto 56 -medin 56 -lamoreaux 56 -ciwujia 56 -iacoboni 56 -lipin 56 -gundegmaa 56 -nkhoma 56 -itek 56 -cebekhulu 56 -serigne 56 -tsumura 56 -radio-show 56 -mirman 56 -kelder 56 -desvonde 56 -elongation 56 -shot-for-shot 56 -loofah 56 -sterno 56 -hrabal 56 -kumon 56 -erinle 56 -workmate 56 -zuendt 56 -computer-software 56 -dervishi 56 -muhieddin 56 -sasono 56 -second-week 56 -sliti 56 -winterrowd 56 -ex-test 56 -entomological 56 -kooiman 56 -akhmedova 56 -stepanovic 56 -kriste 56 -alfond 56 -qed 56 -fullone 56 -gold-encrusted 56 -fireballer 56 -backbenches 56 -anti-minority 56 -parakh 56 -on-street 56 -majolica 56 -responde 56 -geraldton 56 -polyot 56 -taleqani 56 -hand-knitted 56 -paleozoic 56 -a-ram 56 -oil-tainted 56 -matuidi 56 -sanest 56 -middle-schooler 56 -wrightsman 56 -gerdano 56 -nozadze 56 -imm 56 -sholar 56 -plea-bargained 56 -http://www.usccb.org 56 -shikata 56 -##-kevin 56 -tejgaon 56 -two-and-a-half-month 56 -much-younger 56 -vaill 56 -krach 56 -audah 56 -bullis 56 -maharajahs 56 -unadilla 56 -uzbekistani 56 -airconditioners 56 -egolf 56 -longheld 56 -oriflame 56 -sportcoat 56 -cuttaree 56 -cressend 56 -exculpate 56 -re-shuffle 56 -sinosure 56 -kartono 56 -supergrass 56 -walley 56 -nouzaret 56 -well-adapted 56 -chupacabra 56 -schoenbaum 56 -user-friendliness 56 -shorabak 56 -zaiqing 56 -sluga 56 -oegroseno 56 -cloer 56 -waterlily 56 -nisreen 56 -junor 56 -cha-ching 56 -rocknes 56 -kejuan 56 -hofmannsthal 56 -goldhirsh 56 -h.i.v. 56 -herschbach 56 -rajakarunanayake 56 -pottenger 56 -humanplasma 56 -bentler 56 -opi 56 -troglitazone 56 -borgas 56 -ayatskov 56 -princetonian 56 -wingard 56 -kashmir-based 56 -chaiet 56 -excretions 56 -eydie 56 -marthastewart.com 56 -glassine 56 -henkes 56 -shikuku 56 -ngarlejy 56 -meversley 56 -kunsthistorisches 56 -pro-stadium 56 -carrel 56 -weilerstein 56 -jamiruddin 56 -flashiness 56 -#.##-trillion 56 -dubee 56 -knickerbockers 56 -hedong 56 -yung-lai 56 -done-that 56 -weisler 56 -protectmarriage.com 56 -assitance 56 -kadewe 56 -near-war 56 -wigstock 56 -resonator 56 -neu-ulm 56 -taimali 56 -organ-transplant 56 -compositing 56 -fantagraphics 56 -scharioth 56 -redleaf 56 -htil 56 -mideast-gaza 56 -manasieva 56 -gwang-soo 56 -yannotti 56 -unclassifiable 56 -nagarajan 56 -banisar 56 -devroy 56 -nangka 56 -house-sized 56 -mefraj 56 -lake-side 56 -peachcare 56 -kagah 56 -apostrophes 56 -frame-ups 56 -hainaut 56 -noiseless 56 -chiado 56 -salesgirl 56 -borovic 56 -kaouch 56 -kirkaldy 56 -non-automotive 56 -prokopek 56 -three-event 56 -aseanapol 56 -elfatih 56 -al-osboa 56 -santy 56 -gerrick 56 -untallied 56 -bushings 56 -turn-over 56 -five-length 56 -afanasyevsky 56 -meiselas 56 -yugoslav-made 56 -##-quart 56 -pegu 56 -riverwoods 56 -take-back 56 -remote-detonated 56 -oil-reliant 56 -jd\/nr## 56 -erinvale 56 -brookhiser 56 -ghirardi-rubbi 56 -medsger 56 -mesilla 56 -maysa 56 -carter-finley 56 -biver 56 -surkh 56 -toy-related 56 -pring 56 -jjh-eap 56 -n-#### 56 -sciarrino 56 -moba 56 -falcioni 56 -furia 56 -franz-christoph 56 -liguera 56 -herreshoff 56 -bjelkevik 56 -idoko 56 -helander 56 -bby 56 -matrika 56 -milas 56 -kosak 56 -forteo 56 -non-kenyan 56 -fretes 56 -cienaga 56 -q###s 56 -chia-yen 56 -worrywart 56 -professional-grade 56 -estudillo 56 -deficit-fighting 56 -mazzulla 56 -bie-shyun 56 -brick-sized 56 -robeco 56 -goskowicz 56 -d'hiver 56 -dewes 56 -toppo 56 -sacb 56 -golden-capped 56 -boudreau-gagnon 56 -deflectors 56 -sauzee 56 -ketz 56 -footbal 56 -menaka 56 -jerred 56 -tugendhat 56 -sutee 56 -semi-secret 56 -cftr 56 -schabarum 56 -shirani 56 -odon 56 -cosey 56 -http://www.aflcio.org 56 -doorbuster 56 -dimmock 56 -cref 56 -krusee 56 -gruwell 56 -propellors 56 -brodovitch 56 -poutine 56 -dunhua 56 -lese-majeste 56 -epicurious.com 56 -late-april 56 -niha 56 -lifescan 56 -mellette 56 -guangxiang 56 -shawali 56 -sagia 56 -eu-eurozone-economy-growth 56 -suzerainty 56 -wheatgrass 56 -adir 56 -man-bok 56 -customizer 56 -accordionists 56 -perspicacity 56 -walusimbi 56 -valenciano 56 -developping 56 -matellan 56 -propiedades 56 -topa 56 -mirken 56 -calvins 56 -townfolk 56 -self-motivation 56 -##-nicole 56 -tear-streaked 56 -lin\/wang 56 -shibboleths 56 -ueslei 56 -kleier 56 -unionville 56 -mohtarma 56 -esmeijer 56 -soloed 56 -sandschneider 56 -super-long 56 -payal 56 -victim-impact 56 -carrs 56 -bufalino 56 -pavlovski 56 -demayo 56 -http://www.nmb.gov 56 -chen-yuan 56 -wine-and-cheese 56 -perepelova 56 -saddiqi 56 -unsafely 56 -d'amiano 56 -henochowicz 56 -lesperoglou 56 -vandewater 56 -sportsweek 56 -fankhouser 56 -jayer 56 -karres 56 -economise 56 -masopust 56 -dessie 56 -poona 56 -assouw 56 -rebated 56 -saimone 56 -ethno-religious 56 -albany-area 56 -peiyao 56 -highly-efficient 56 -njie 56 -vernand 56 -bore-hole 56 -lundine 56 -foxcroft 56 -svoik 56 -delyagin 56 -bodas 56 -non-qualified 56 -ditrapano 56 -al-gizouli 56 -giorgianni 56 -talab 56 -caribbean-weather 56 -cross-bred 56 -conair 56 -http://www.njusao.org/break.html 56 -garlock 56 -karmakar 56 -the#### 56 -cheek-to-cheek 56 -jawiya 56 -goldsbury 56 -pelajaran 56 -shults 56 -kalala 56 -zaetta 56 -daubert 56 -risd 56 -brunhoff 56 -ciment 56 -blunden 56 -call-to-arms 56 -khirbet 56 -seliverstova 56 -vadhana 56 -peochar 56 -re-development 56 -eight-sided 56 -magam 56 -veran 56 -tax-dodgers 56 -luana 56 -cattle-rustling 56 -raia 56 -pb-jp 56 -non-labor 56 -mid-##st 56 -russian-occupied 56 -http://www.freddiemac.com 56 -nowgam 56 -hiroyoshi 56 -shenlong 56 -rafidiyeh 56 -njoki 56 -odoriferous 56 -goldoni 56 -iws 56 -mabou 56 -ozio 56 -chiuri 56 -first-of-its 56 -ciders 56 -by-now 56 -second-winningest 56 -arix 56 -gsd 56 -pro-yugoslav 56 -ertugruloglu 56 -nissay 56 -guaita 56 -jefferys 56 -##-straight 56 -spielbergian 56 -complexly 56 -phumaphi 56 -baronet 56 -brulliard 56 -herrara 56 -conwood 56 -contin 56 -eastent 56 -iwashita 56 -proforma 56 -guiyu 56 -wenn 56 -time-cnn 56 -re-regulate 56 -myaungmya 56 -grapplers 56 -chimura 56 -ganchev 56 -twerp 56 -yukking 56 -pflaumer 56 -deloatch 56 -al-jabali 56 -paech 56 -delimited 56 -panola 56 -clottemans 56 -neigbors 56 -gruzen 56 -chile-quake 56 -milkmaid 56 -ooooo 56 -ravenscroft 56 -darbar 56 -whitely 56 -tumpat 56 -sandile 56 -wxia 56 -iwork 56 -mcmahons 56 -gawd 56 -engles 56 -work-based 56 -china-unrest-tibet-rights-oly-#### 56 -trembler 56 -nybz### 56 -fengshui 56 -moalin 56 -perumal 56 -sopon 56 -tae-bo 56 -ghosting 56 -changgwang 56 -kecil 56 -kjolstad 56 -kopel 56 -bone-building 56 -ju-on 56 -dogaru 56 -palframan 56 -bauduin 56 -ueb 56 -time-space 56 -fellow-citizens 56 -papahanaumokuakea 56 -segment-leading 56 -fuel-hedging 56 -shokir 56 -coffeeshops 56 -advantica 56 -marcelina 56 -recirculate 56 -lead-zinc 56 -lankan-born 56 -cnbc.com 56 -effros 56 -keast 56 -indecorous 56 -ostrov 56 -low-demand 56 -grafer 56 -kaigler 56 -yniguez 56 -tayyaba 56 -woodcraft 56 -k.p.s. 56 -jarwan 56 -yamasoto 56 -l'carriere 56 -ruttenberg 56 -revver 56 -#-gisela 56 -long-shuttered 56 -westshore 56 -rendina 56 -moscardi 56 -yoani 56 -dungu 56 -http://www.mtv.com 56 -lupa 56 -money-lending 56 -futagawa 56 -al-majd 56 -sarokin 56 -masefield 56 -hatemongers 56 -#####-#####-# 56 -ci-### 56 -guderzo 56 -connoting 56 -galavision 56 -loggans 56 -fiorucci 56 -re-inspected 56 -lemenager 56 -covadonga 56 -chronometer 56 -prodon 56 -chandraswamy 56 -gatlif 56 -moevenpick 56 -giaccone 56 -charanga 56 -arti 56 -clinton-appointed 56 -extrasensory 56 -martaban 56 -essaye 56 -florentines 56 -mctague 56 -derozan 56 -bmet 56 -akher 56 -counter-corruption 56 -notepaper 56 -luzinski 56 -skanky 56 -unharvested 56 -kadosh 56 -taimyr 56 -martial-law 56 -shoreditch 56 -somersworth 56 -mass-scale 56 -lobotomies 56 -frates 56 -al-gailani 56 -cuito 56 -biters 56 -fiszmann 56 -murati 56 -vouilloz 56 -arscott 56 -goshutes 56 -fouhami 56 -palestinian-palestinian 56 -hellmer 56 -maryinsky 56 -g-class 56 -cvid 56 -hilman 56 -us-japan-space-shuttle 56 -vallop 56 -pripyat 56 -genencor 56 -nightshade 56 -kielar 56 -ginanjar 56 -botan 56 -sukamdani 56 -temizkanoglu 56 -berezovksy 56 -lithographic 56 -colbie 56 -courchesne 56 -newsbreak 56 -kwamie 56 -characterising 56 -joyriding 56 -ghilarducci 56 -mortham 56 -shaken-baby 56 -glorieta 56 -fengyun-# 56 -leibacher 56 -gardening@nytimes.com 56 -christenings 56 -discombobulating 56 -ambion 56 -stolyarov 56 -lastuvka 56 -ultra-tight 56 -xingye 56 -terrariums 56 -treaty-based 56 -edmon 56 -mangena 56 -scopolamine 56 -nobes 56 -long-separated 56 -acls 56 -skamania 56 -egx## 56 -dahduli 56 -motech 56 -intitial 56 -caffita 56 -girl-group 56 -half-expected 56 -linette 56 -soldati 56 -pend 56 -hopoate 56 -pre-castro 56 -lulin 56 -http://www.usccb.org/ 56 -non-convertible 56 -abu-zayyad 56 -step-brother 56 -artux 56 -yusanto 56 -bestiary 56 -debrum 56 -dingers 56 -khamzat 56 -ridpath 56 -adex 56 -anthracnose 56 -ten-week 56 -helbrans 56 -shoehorning 56 -:####### 56 -paride 56 -boosterish 56 -goldwin 56 -pigeonholes 56 -raimunda 56 -howden 56 -evangelized 56 -mixups 56 -panter 56 -o.h. 56 -rockrose 56 -england-born 56 -augustow 56 -lessel 56 -kasane 56 -sudeikis 56 -contentville 56 -cleaning-up 56 -natan-zada 56 -manawatu 56 -unreviewable 56 -witticism 56 -ternus 56 -uscirf 56 -rijsbergen 56 -rakad 56 -cruciferous 56 -dundes 56 -pirozzi 56 -kreegel 56 -budgie 56 -clijster 56 -brou 56 -shaywitz 56 -lopano 56 -helissio 56 -bolshunov 56 -tennstedt 56 -anhua 56 -dimiter 56 -mosquito-control 56 -hearts-and-minds 56 -pretre 56 -tvind 56 -abednego 56 -born-and-bred 56 -zingre-graf 56 -longhai 56 -inniger 56 -diaper-changing 56 -corleones 56 -runge-metzger 56 -tangdhar 56 -thibadeau 56 -financieele 56 -chassin 56 -polarities 56 -chi-x 56 -helvey 56 -vetters 56 -pazzi 56 -univerity 56 -jail-house 56 -pegasystems 56 -chengshan 56 -ofakim 56 -a-lister 56 -jackfruit 56 -kissy 56 -ruocco 56 -paleobiologist 56 -bobosikova 56 -al-rayes 56 -dornod 56 -as-expected 56 -zadari 56 -javadi 56 -dynatech 56 -silantiev 56 -sonaecom 56 -kerosine 56 -akito 55 -benedictions 55 -mamounia 55 -charco 55 -blue-colored 55 -val-de-marne 55 -antipov 55 -nagu 55 -mueller-wohlfahrt 55 -pratapkumar 55 -multirole 55 -squibbed 55 -seroczynski 55 -satcom 55 -yaacobi 55 -weihong 55 -whitesnake 55 -uninflected 55 -pabbo 55 -runyonesque 55 -sloviter 55 -sperrazza 55 -rimland 55 -standen 55 -echouafni 55 -thanakorn 55 -okutan 55 -olim 55 -nadeam 55 -dolla 55 -chaudhri 55 -hediger 55 -ibwc 55 -craigslist.com 55 -sub-office 55 -al-luhaibi 55 -boutris 55 -harshman 55 -buhain 55 -nurhadi 55 -food-loving 55 -pluots 55 -geometrics 55 -pedercini 55 -couvrette 55 -rlopez 55 -greystoke 55 -standbridge 55 -afilias 55 -ex-strongman 55 -fator 55 -somerwill 55 -worthley 55 -karbanenko 55 -amchitka 55 -thadeus 55 -etemad-e-melli 55 -safecracker 55 -once-elegant 55 -gonyea 55 -anti-islamist 55 -dabanovic 55 -aljabr 55 -ozcelik 55 -smutny-jones 55 -fa'asavalu 55 -searby 55 -pengrowth 55 -cuecat 55 -mininum 55 -shkirko 55 -golf-club 55 -muzzafarabad 55 -bozsik 55 -discerns 55 -rothbaum 55 -ayila 55 -scandanavia 55 -lewmar 55 -jelloun 55 -dragnea 55 -pole-dancing 55 -takanyi 55 -customshouse 55 -kritzer 55 -gomelauri 55 -y.v. 55 -off-airport 55 -delfs 55 -points-scoring 55 -jupin 55 -eliska 55 -rainman 55 -renovator 55 -laroussi 55 -osmus 55 -gegamian 55 -bantadtan 55 -voletta 55 -yussupova 55 -tottenville 55 -dascalu 55 -#-los 55 -metabolizing 55 -kremlin-orchestrated 55 -gastro 55 -khayyat 55 -talara 55 -zizzo 55 -sumaysim 55 -b-level 55 -secularisation 55 -foreign-ownership 55 -puning 55 -stoos 55 -moun 55 -sidener 55 -footmen 55 -kamioka 55 -nazy 55 -valentierra 55 -lebovitz 55 -palanga 55 -english-khmer 55 -secularize 55 -tousignant 55 -development-related 55 -joram 55 -positano 55 -hand-crank 55 -laser-beam 55 -outfalls 55 -gps-enabled 55 -maranda 55 -ewatch 55 -executive-secretary 55 -saira 55 -radom 55 -wagoneer 55 -romack 55 -biomolecular 55 -marsudi 55 -vernier 55 -casualization 55 -non-intrusive 55 -idefense 55 -o.b. 55 -credit-monitoring 55 -munante 55 -molehills 55 -seelbach 55 -swiger 55 -bado 55 -dppc 55 -gremolata 55 -non-parents 55 -ikenson 55 -wanniarachchi 55 -wednesdy 55 -lilas 55 -chengliang 55 -krauter 55 -blast-furnace 55 -polinard 55 -oscar-winners 55 -oddar 55 -qingcheng 55 -gellin 55 -castmate 55 -empedocle 55 -wse 55 -comerci 55 -octuplet 55 -gristmill 55 -calcium-fortified 55 -aeron 55 -xiuli 55 -powerine 55 -near-default 55 -spangles 55 -zarafshan 55 -turkish-based 55 -posptoned 55 -guyana-based 55 -agroforestry 55 -heavily-protected 55 -egis 55 -baoqing 55 -niezabitowska 55 -assignee 55 -taddeo 55 -xiaolan 55 -wechsel-bank 55 -still-living 55 -bolingbroke 55 -fusari 55 -padnos 55 -feistiest 55 -heumann 55 -nalle 55 -umbridge 55 -anthocyanins 55 -vampirism 55 -fuel-related 55 -katari 55 -newly-married 55 -funderburg 55 -compunctions 55 -mi-jung 55 -parrotheads 55 -kail 55 -jinhao 55 -ganciclovir 55 -central-east 55 -m\/a-com 55 -ggagbo 55 -haendel 55 -cholamandalam 55 -emina 55 -thiamin 55 -autorities 55 -thrill-seeker 55 -onu 55 -serb-majority 55 -mandylor 55 -kenesei 55 -soffa 55 -ghozlan 55 -puffinburger 55 -autotrader.com 55 -janowska 55 -luxenberg 55 -liddick 55 -stelmakh 55 -internationally-known 55 -oil-pipeline 55 -lolli 55 -shiang-nung 55 -matovu 55 -sablefish 55 -untradable 55 -ninth-placed 55 -minidv 55 -byronic 55 -tesema 55 -nightime 55 -isea 55 -florida-alabama 55 -bruininks 55 -gronke 55 -chearavanont 55 -##-by-##-centimeter 55 -latwp 55 -trouville 55 -allover 55 -switz 55 -notman 55 -mickler 55 -bouchenaki 55 -beirendonck 55 -sharkboy 55 -anahuac 55 -frale 55 -voter-rich 55 -livingood 55 -bailis 55 -catsup 55 -finnish-german 55 -chalupas 55 -genri 55 -abdulatif 55 -episiotomy 55 -rumbaut 55 -mullein 55 -bursted 55 -crye 55 -philoctetes 55 -beji 55 -kreidenko 55 -brianderson 55 -gitex 55 -supoj 55 -redmen 55 -road-testing 55 -rac-ns 55 -el-arab 55 -tahsi 55 -lactose-intolerant 55 -out-aced 55 -sherifi 55 -angat 55 -patarroyo 55 -czuma 55 -arsi 55 -tongue-and-groove 55 -sudol 55 -punkers 55 -frayre 55 -performance-wise 55 -#-xavier 55 -cantel 55 -ndiaye-diatta 55 -junes 55 -comegys 55 -nyuk 55 -bachelorettes 55 -creepy-crawly 55 -risner 55 -nassi 55 -teall 55 -outraising 55 -mini-concert 55 -ever-lasting 55 -habboush 55 -garrigue 55 -trelleborgs 55 -elisangela 55 -task-oriented 55 -rueppel 55 -bone-thin 55 -cremi 55 -hamat 55 -long-faced 55 -cjh\/rr 55 -finisar 55 -malaysia-vote 55 -pene 55 -dovid 55 -deal-killer 55 -enestam 55 -dedes 55 -dingiri 55 -russia-chechnya-vote 55 -genis 55 -shaabiya 55 -chelyabinsk-## 55 -selecta 55 -short-dated 55 -mahadhesi 55 -death-knell 55 -gotzsche 55 -ilaga 55 -lanegan 55 -kouk 55 -mahle 55 -per-mile 55 -corruption-busting 55 -alipui 55 -gorali 55 -jiazhen 55 -yu-ting 55 -isely 55 -medaire 55 -haggui 55 -p#-plus-one 55 -ntn 55 -mkalavishvili 55 -off-farm 55 -chromatis 55 -sharia-compliant 55 -highest-volume 55 -boner 55 -kannell 55 -tottenham\/eng 55 -helsingoer 55 -coluccio 55 -aliyeva 55 -steier 55 -koers 55 -exceso 55 -desaulniers 55 -comcast-spectacor 55 -jersey-born 55 -rock-music 55 -al-mazidi 55 -kilim 55 -povera 55 -iksanov 55 -tele-ventures 55 -primor 55 -civlians 55 -r.i.-based 55 -orania 55 -year-around 55 -small-to-medium 55 -dravecky 55 -bontecou 55 -the# 55 -amsterdam-schiphol 55 -workrate 55 -advocator 55 -demouge 55 -ei-ichi 55 -aeberhard 55 -bridi 55 -picacho 55 -dinant 55 -slashers 55 -judaea 55 -kayama 55 -delavekouras 55 -fuli 55 -dreno 55 -##-year-plus 55 -anang 55 -lerche 55 -abessole 55 -##-robert 55 -khidr 55 -meheganglobe.com 55 -dubuc 55 -budiardjo 55 -#-state 55 -spit-roasted 55 -asbill 55 -balvino 55 -munyenyembe 55 -balzaretti 55 -ivelin 55 -aproximadamente 55 -pockmark 55 -ulleval 55 -moneycentral 55 -houssine 55 -gagoc 55 -jong-ho 55 -nonlawyer 55 -str\/lp## 55 -ship-to-air 55 -yanakiev 55 -gimbels 55 -concow 55 -moschetti 55 -zonghuai 55 -ottenhoff 55 -no-bake 55 -rutschow-stomporowski 55 -tampabay.com 55 -tzeltal 55 -c-note 55 -vebjoern 55 -fuel-cycle 55 -colombian-owned 55 -rescigno 55 -cropsey 55 -triple-x 55 -palmbeachpost.com/depression 55 -khanjani 55 -tousle-haired 55 -gulled 55 -tullia 55 -bideau 55 -#-and-a-half 55 -waytha 55 -aido 55 -###ci 55 -huels 55 -mychael 55 -madrigali 55 -trygg 55 -ciccolo 55 -kotscho 55 -levinstein 55 -taie 55 -adcb 55 -kry 55 -ethers 55 -cheaptickets.com 55 -lehrmann 55 -dendur 55 -antipodean 55 -#-gao 55 -keret 55 -srilanka-unrest-blast 55 -swamis 55 -horse-breeding 55 -dinsmoor 55 -barysch 55 -junior-level 55 -geode 55 -hoffmann-laroche 55 -nones 55 -wijdan 55 -square-shouldered 55 -white-shirted 55 -coldiron 55 -chartis 55 -kuskokwim 55 -stepfret 55 -gopendra 55 -nefertari 55 -klsx-fm 55 -westhusing 55 -revault 55 -luxar 55 -giancola 55 -gartnerg# 55 -lakemba 55 -organizaciones 55 -camerman 55 -amoussou 55 -last-hour 55 -pastrik 55 -datsakorn 55 -duxford 55 -brown-brick 55 -fortul 55 -zainy 55 -wamidh 55 -hanel 55 -feda 55 -reminyl 55 -thoeni 55 -sportcenter 55 -white-fleshed 55 -parchments 55 -seafrance 55 -nordmark 55 -aristobulo 55 -ligonier 55 -rmf 55 -torchy 55 -butcheries 55 -kurfuerstendamm 55 -http://www.cdc.gov/h#n#flu 55 -isleta 55 -letter-bombs 55 -hadrosaur 55 -nourizadeh 55 -kujawa 55 -memling 55 -mtd 55 -parents-to-be 55 -kinyu 55 -completo 55 -alsatians 55 -itta 55 -baoliu 55 -pravit 55 -siping 55 -korea-eu 55 -fulminate 55 -guandu 55 -yili\/zhao 55 -demott 55 -pakistan-militant 55 -lpu 55 -petrowski 55 -sondashi 55 -lindbom 55 -gatton 55 -dubelier 55 -hsin-hsing 55 -ghazl 55 -chad-unrest 55 -amcc 55 -lemon-yellow 55 -aniek 55 -danshuei 55 -tidmore 55 -chin-up 55 -scarpaci 55 -fresnel 55 -mecum 55 -rosebush 55 -consumer-confidence 55 -vainshtok 55 -kateri 55 -hungiapuko 55 -qibao 55 -camon 55 -ruhlmann 55 -claunch 55 -audible.com 55 -falic 55 -bardales 55 -lithographer 55 -ebrima 55 -szot 55 -ambulance-chasing 55 -watoto 55 -vigan 55 -komineft 55 -miklikova 55 -tbl 55 -egidius 55 -srikkanth 55 -senlin 55 -suizhong 55 -saltpeter 55 -shitreet 55 -laxton 55 -cspc 55 -elisdottir 55 -perrins 55 -uniglory 55 -emmick 55 -skulk 55 -earwax 55 -benaroya 55 -rushwaya 55 -discomforted 55 -ohalete 55 -panigoro 55 -mosquito-born 55 -safe-house 55 -excelerate 55 -mattino 55 -masoudi 55 -sonon 55 -bn.com 55 -ostergaard 55 -kamya 55 -kyeung-ran 55 -kitayama 55 -fu-hsing 55 -clip-art 55 -trillion-won 55 -ascender 55 -spritzing 55 -jonesing 55 -policy-based 55 -flomo 55 -globalist 55 -acamprosate 55 -redwall 55 -ljudmila 55 -korade 55 -lubo 55 -highhandedness 55 -anad 55 -#.#-meter-deep 55 -brownshirts 55 -attaboy 55 -traynham 55 -europeanized 55 -adjustables 55 -lumberyards 55 -hotel-like 55 -elektroprivreda 55 -kirilova 55 -dual-layer 55 -errett 55 -racinos 55 -sigou 55 -spindletop 55 -steidl 55 -hilderbrand 55 -man-of-the 55 -unconditioned 55 -budget-strapped 55 -bodart 55 -danic 55 -plushest 55 -jalaleddin 55 -fna 55 -to-# 55 -burhannudin 55 -godward 55 -greece-fires 55 -transfat 55 -lovelife 55 -myomectomy 55 -delgada 55 -ictv 55 -u.s-mexico 55 -lloreda 55 -moebius 55 -barkow 55 -madox 55 -liang-jen 55 -g+j 55 -cgnpc 55 -punchier 55 -wrsa 55 -rospars 55 -overbid 55 -lokichoggio 55 -fukuura 55 -yuegu 55 -tarkett 55 -snocountry 55 -inui 55 -moved.wits-end-column 55 -partita 55 -overfilling 55 -hectarage 55 -okwiri 55 -#-zheng 55 -eidinger 55 -tengan 55 -kouris 55 -cbuchholz 55 -hafnarfjordur 55 -owasi 55 -iza 55 -spungen 55 -tepix 55 -sial 55 -keepin 55 -paint-by-number 55 -jeanene 55 -natural-food 55 -korniyenko 55 -hartadi 55 -ananova 55 -trend-setters 55 -nohilly 55 -pasteurize 55 -www.blogs.tampabay.com/food 55 -advertising-driven 55 -nalumango 55 -ruxton 55 -jalrez 55 -miyar 55 -dmelvin@coxnews.com 55 -overvalue 55 -slane 55 -hankerson 55 -harpswell 55 -wassell 55 -bishan 55 -fried-chicken 55 -galvanic 55 -ylli 55 -cusip 55 -dogeared 55 -goofy-looking 55 -rhys-meyers 55 -lined-up 55 -heeren 55 -hyslop 55 -yanovsky 55 -www.aa.com 55 -seagren 55 -wiklund 55 -art-making 55 -non-lawyer 55 -saurashtra 55 -michoacana 55 -##-###-## 55 -shabnam 55 -roadworker 55 -shetler 55 -quistelli 55 -jacksonville-based 55 -galekovic 55 -badruddin 55 -voorhis 55 -three-song 55 -geordies 55 -decane 55 -kilrea 55 -epfl 55 -gentamicin 55 -chewed-up 55 -severgazprom 55 -bolona 55 -chandos 55 -rafflesia 55 -mercutio 55 -pipebomb 55 -ruthann 55 -makhenkesi 55 -sweatband 55 -built-ins 55 -donata 55 -ramonet 55 -meehl 55 -curico 55 -alibux 55 -commentary\/oped 55 -slifkin 55 -bason 55 -lawsky 55 -sabal 55 -isaksen 55 -cheng-kung 55 -labruno 55 -musics 55 -virgets 55 -qidwa 55 -export-heavy 55 -out-compete 55 -nettuno 55 -ruhanie 55 -shailendra 55 -ogallaga 55 -time-being 55 -mashego 55 -bangladesh-based 55 -####b 55 -####g 55 -school-prayer 55 -necked 55 -luzern 55 -wasbir 55 -pro-royalist 55 -montelongo 55 -sudan-darfur-un 55 -internationally-sponsored 55 -buyenzi 55 -b&l 55 -paragliders 55 -producer\/director 55 -saalbach 55 -dillons 55 -subleasing 55 -unifrance 55 -fleeces 55 -chu-huan 55 -kanchi 55 -bihan 55 -agrama 55 -white-sided 55 -hansack 55 -jin-woo 55 -hileman 55 -handwoven 55 -stouter 55 -leaf-shaped 55 -osby 55 -stamen 55 -pleasurably 55 -chimeras 55 -film-related 55 -jotspot 55 -baitzel 55 -palmar 55 -ilegal 55 -hiaa 55 -eeeee 55 -alcobendas 55 -prewett 55 -knkt 55 -berkous 55 -woe-is-me 55 -krinkie 55 -magdelena 55 -post-hussein 55 -hardline-controlled 55 -narrow-bodied 55 -aqueous 55 -burukina 55 -nozoe 55 -awartani 55 -kalapani 55 -guenot 55 -tuipulotu 55 -iovino 55 -vosper 55 -##-flavia 55 -rrodriguez 55 -tranh 55 -d'amuro 55 -weert 55 -brosh 55 -europewide 55 -kovals 55 -pie-shaped 55 -nahda 55 -siboni 55 -vedenkin 55 -squillacote 55 -gittes 55 -jaakkola 55 -khattabi 55 -vatican-affiliated 55 -fewer-than-expected 55 -subschinski 55 -sherawat 55 -galadari 55 -salvors 55 -virdi 55 -jayasundara 55 -giraudet 55 -longjing 55 -thum 55 -ahorros 55 -pro-franco 55 -plutonium-making 55 -environmental-protection 55 -escuredo 55 -zhiping 55 -worktable 55 -sediq 55 -tree-hugger 55 -sahlman 55 -fadeout 55 -zizhou 55 -martes 55 -u.s.-arranged 55 -kymco 55 -nxc# 55 -kopin 55 -astbury 55 -marzan 55 -dongfanghong 55 -montecore 55 -cpap 55 -yamase 55 -assiri 55 -mbandjock 55 -plutocrat 55 -###-billion-baht 55 -unmerciful 55 -british-chinese 55 -re-gifting 55 -sexually-explicit 55 -switch-off 55 -svan 55 -abeywardene 55 -ansin 55 -cold-shoulder 55 -terron 55 -lizard-like 55 -aquarian 55 -solove 55 -water-carrying 55 -celebrityhood 55 -foreign-produced 55 -ganascia 55 -nyepi 55 -popmart 55 -ustream 55 -rosalio 55 -outdrawing 55 -vladimirovna 55 -gbissau 55 -ersoy 55 -golovlyov 55 -biserko 55 -glicken 55 -fbl-esp-cup 55 -briskman 55 -solvberg 55 -wehr-hasler 55 -yokel 55 -koplovitz 55 -likeminded 55 -berkey 55 -infrasound 55 -al-adil 55 -quinley 55 -boym 55 -boxier 55 -tutuila 55 -stair-climbing 55 -vike 55 -a.f.m. 55 -belgrad 55 -nucleaire 55 -makubuya 55 -silverchair 55 -prawiro 55 -kluzak 55 -decepticons 55 -chappy 55 -eeriest 55 -paycut 55 -pinit 55 -tech-dominant 55 -charbonnet 55 -mid-innings 55 -watermarking 55 -unshelled 55 -bpa-free 55 -faughnan 55 -vanins 55 -sirena 55 -militello 55 -officier 55 -fras 55 -marudai 55 -hypnotherapy 55 -pasada 55 -laser-cut 55 -fuji-servetto 55 -butar 55 -vajna 55 -miguez 55 -cseries 55 -schlow 55 -hamkyong 55 -top-hatted 55 -midf 55 -union-mandated 55 -hanssens 55 -exhorbitant 55 -most-famous 55 -ruesch 55 -dexedrine 55 -baracoa 55 -maricela 55 -appropriator 55 -bronis 55 -vachagayev 55 -maseratis 55 -growth-stock 55 -westerngeco 55 -fuel-injection 55 -jpletz 55 -edun 55 -pinmanee 55 -swarts 55 -gauntlets 55 -bovelander 55 -azpurua 55 -honka 55 -edge-of-your-seat 55 -mcspaden 55 -aacc 55 -underripe 55 -helipads 55 -intra-shiite 55 -corazzin 55 -copperheads 55 -mumuni 55 -hruby 55 -d'alba 55 -goodland 55 -verrill 55 -hitoki 55 -forswore 55 -griddles 55 -spintronics 55 -sunncomm 55 -tongsalee 55 -anti-roll 55 -drogoul 55 -ecologic 55 -two-states 55 -acto 55 -coral-colored 55 -juliusz 55 -traesch 55 -ulaanbaatar 55 -##-grade 55 -scorelines 55 -rainswept 55 -mandisi 55 -dually 55 -wiant 55 -essig 55 -zambeze 55 -ortigas 55 -crewson 55 -assurer 55 -yellow-legged 55 -pirouetting 55 -gotschall 55 -injury-forced 55 -chemins 55 -aerator 55 -musalo 55 -kaijuka 55 -radivoje 55 -rennet 55 -half-seriously 55 -bulwer-lytton 55 -ashrafiyeh 55 -smaller-market 55 -mukarram 55 -one-and-only 55 -bojkov 55 -mosab 55 -remmel 55 -capellini 55 -lemel 55 -marostica 55 -gurfein 55 -kilicdaroglu 55 -config 55 -xxxend 55 -well-chilled 55 -meatiest 55 -###,###-rupee 55 -infinitis 55 -fiddlehead 55 -simpanan 55 -bat-winged 55 -banyoles 55 -neistat 55 -ragas 55 -philologist 55 -autostick 55 -nessler 55 -##-people 55 -tr#s 55 -tent-pole 55 -algarabawi 55 -prakarn 55 -ts### 55 -pierre-henry 55 -dumbfounding 55 -http://www.chinapntr.gov 55 -bnu 55 -step-grandfather 55 -mcnamaraglobe.com 55 -mlm 55 -pepitone 55 -americium 55 -phased-out 55 -mitchellville 55 -india-weather 55 -ilion 55 -sapan 55 -land-to-air 55 -aerators 55 -bacho 55 -gerwig 55 -richart 55 -dethrones 55 -runcorn 55 -hinzpeter 55 -spoon-feeding 55 -krey 55 -artform 55 -kva 55 -macmullanglobe.com 55 -nonde 55 -hispanico 55 -danke 55 -sevene 55 -arqam 55 -mbatista 55 -rphilpotstar-telegram 55 -lauridsen 55 -estrogen-only 55 -clow 55 -ki-### 55 -foucher 55 -zetec 55 -autograph-signing 55 -africanus 55 -ex-israeli 55 -psoriatic 55 -hualing 55 -unibet.com 55 -khoshchehreh 55 -matanog 55 -peace\/def 55 -rashid-merem 55 -kostyuk 55 -niantic 55 -dressen 55 -neinas 55 -in-process 55 -kazeem 55 -pude 55 -arria 55 -funing 55 -self-destructiveness 55 -shergold 55 -batiuk 55 -petrosa 55 -staser 55 -seebaran 55 -nesmachny 55 -#rd\/tv 55 -altona 55 -macarounas 55 -urad 55 -arenes 55 -zinovy 55 -javelins 55 -carpet-cleaning 55 -al-gumhuriya 55 -mogelonsky 55 -tattle 55 -afaq 55 -balie 55 -jayesh 55 -mccawley 55 -manganaro 55 -breitsprecher 55 -charice 55 -abdiqassim 55 -tantaquidgeon 55 -yung-san 55 -meirav 55 -###km\/h 55 -anouma 55 -el-ayoun 55 -wherehouse 55 -alloudi 55 -moshtarak 55 -ktvt 55 -gholikhan 55 -gogele 55 -mithoff 55 -intourist 55 -x-milwaukee 55 -never-used 55 -#,###-square-kilometre 55 -darkman 55 -moulvibazar 55 -reher 55 -sewa 55 -krulwich 55 -kajiwara 55 -tunmore 55 -#-gong 55 -guille 55 -wrong-doers 55 -cirone 55 -adinolfi 55 -kil-seung 55 -charge-card 55 -tumpel-gugerell 55 -guliev 55 -glenmore 55 -flagcarrier 55 -zakum 55 -centromin 55 -earnings-driven 55 -pavoni 55 -machination 55 -flyertalk 55 -all-too-real 55 -maidenhead 55 -celtic\/sco 55 -rawod 55 -co-world 55 -sekhar 55 -gendel 55 -szymanowski 55 -domer 55 -tailandia 55 -three-panel 55 -vereker 55 -kuchinsky 55 -yongqiang 55 -fremont-based 55 -imouraren 55 -kotschau 55 -gebre-egziabher 55 -consolatory 55 -already-fragile 55 -mabini 55 -forehander 55 -whomping 55 -test-drove 55 -opensocial 55 -bacteria-killing 55 -kanko 55 -yakovleva 55 -bergert 55 -munari 55 -soccer-only 55 -maniatis 55 -helgesson 55 -nippert 55 -magnee 55 -hamisah 55 -non-war 55 -klaasen 55 -rubai 55 -saint-lazare 55 -shafir 55 -jianlin 55 -koppers 55 -lamberton 55 -metta 55 -algoma 55 -kessar 55 -kozue 55 -swiss-registered 55 -moderate-led 55 -desarrollar 55 -adenoids 55 -oberhofen 55 -tregubova 55 -##-cents 55 -adesina 55 -nedo 55 -trunzo 55 -nasaw 55 -vinters 55 -avx 55 -zanna 55 -suspender 55 -glutting 55 -objet 55 -medicina 55 -latka 55 -elegante 55 -kilner 55 -government-certified 55 -rc-# 55 -al-thawadi 55 -flip-out 55 -polish-language 55 -stationmaster 55 -fire-bombs 55 -seemo 55 -practical-minded 55 -tank-top 55 -hodari 55 -saj 55 -ger\/sae 55 -spinbaldak 55 -bikker 55 -volpenhein 55 -rom-com 55 -funches 55 -haacke 55 -al-hajiri 55 -tc-gb 55 -latium 55 -bhansali 55 -sterols 55 -mulliken 55 -plutarco 55 -voorsanger 55 -thomas-keprta 55 -bieksa 55 -loikaw 55 -matsepe-casaburri 55 -warty 55 -chen-wei 55 -www.mcdonalds.com 55 -hondora 55 -m.t.b. 55 -off-grid 55 -home-design 55 -lipovsky 55 -bergsson 55 -kii 55 -strakhov 55 -songphon 55 -zahab 55 -allauddin 55 -well-tolerated 55 -scolese 55 -cucbm 55 -drayson 55 -eiken 55 -hfi 55 -zoia 55 -edgecomb 55 -saaremaa 55 -annisu-r 55 -allaf 55 -eye-view 55 -chambermaids 55 -hirosawa 55 -vladivostock 55 -trutv 55 -orrorin 55 -wistron 55 -kik 55 -pretentions 55 -sciullo 55 -handwriting-recognition 55 -majles 55 -motoshima 55 -tanginye 55 -hellawell 55 -extractable 55 -end-year 55 -soll 55 -caronna 55 -vampiric 55 -xigui 55 -hit-and-runs 55 -duba-yurt 55 -saur 55 -koltsov 55 -blue-helmet 55 -hyuga 55 -kifri 55 -najja 55 -wible 55 -matloha 55 -idrissi 55 -tamarindo 55 -just-the-facts 55 -bulaong 55 -meddein 55 -klinefelter 55 -satz 55 -ushpizin 55 -overnighting 55 -handbrake 55 -dalley 55 -prampero 55 -hss 55 -nonhybrid 55 -mutebusi 55 -jingqian 55 -cryptology 55 -anupama 55 -yakshina 55 -eeas 55 -pottengal 55 -right-to-carry 55 -shampooed 55 -sonejee 55 -ozyurek 55 -kinrara 55 -munyua 55 -traboulsi 55 -contar 55 -lerone 55 -roundtrips 55 -chelanga 55 -limited-field 55 -wenming 55 -kreisberg 55 -bertino 55 -zhongxiao 55 -acsi 55 -levance 55 -eocene 55 -#-feliciano 55 -tarantella 55 -jaehnig 55 -al-awadhi 55 -anti-socialist 55 -hornak 55 -darkhovin 55 -perimetre 55 -abramian 55 -caricola 55 -anti-hungarian 55 -maneiro 55 -size-# 55 -farglory 55 -petermann 55 -meiwa 55 -phelpses 55 -porgras 55 -samiya 55 -texoma 55 -huffines 55 -third-placer 55 -al-badran 55 -babyhood 55 -seamounts 55 -aasen 55 -casulties 55 -bodong 55 -shamar 55 -destablising 55 -curado 55 -shangai 55 -svedka 55 - 83845866 - 83845866 - 5 diff --git a/research/textsum/data_convert_example.py b/research/textsum/data_convert_example.py deleted file mode 100644 index 9328936dd..000000000 --- a/research/textsum/data_convert_example.py +++ /dev/null @@ -1,65 +0,0 @@ -"""Example of Converting TextSum model data. -Usage: -python data_convert_example.py --command binary_to_text --in_file data/data --out_file data/text_data -python data_convert_example.py --command text_to_binary --in_file data/text_data --out_file data/binary_data -python data_convert_example.py --command binary_to_text --in_file data/binary_data --out_file data/text_data2 -diff data/text_data2 data/text_data -""" - -import struct -import sys - -import tensorflow as tf -from tensorflow.core.example import example_pb2 - -FLAGS = tf.app.flags.FLAGS -tf.app.flags.DEFINE_string('command', 'binary_to_text', - 'Either binary_to_text or text_to_binary.' - 'Specify FLAGS.in_file accordingly.') -tf.app.flags.DEFINE_string('in_file', '', 'path to file') -tf.app.flags.DEFINE_string('out_file', '', 'path to file') - -def _binary_to_text(): - reader = open(FLAGS.in_file, 'rb') - writer = open(FLAGS.out_file, 'w') - while True: - len_bytes = reader.read(8) - if not len_bytes: - sys.stderr.write('Done reading\n') - return - str_len = struct.unpack('q', len_bytes)[0] - tf_example_str = struct.unpack('%ds' % str_len, reader.read(str_len))[0] - tf_example = example_pb2.Example.FromString(tf_example_str) - examples = [] - for key in tf_example.features.feature: - examples.append('%s=%s' % (key, tf_example.features.feature[key].bytes_list.value[0])) - writer.write('%s\n' % '\t'.join(examples)) - reader.close() - writer.close() - - -def _text_to_binary(): - inputs = open(FLAGS.in_file, 'r').readlines() - writer = open(FLAGS.out_file, 'wb') - for inp in inputs: - tf_example = example_pb2.Example() - for feature in inp.strip().split('\t'): - (k, v) = feature.split('=') - tf_example.features.feature[k].bytes_list.value.extend([v]) - tf_example_str = tf_example.SerializeToString() - str_len = len(tf_example_str) - writer.write(struct.pack('q', str_len)) - writer.write(struct.pack('%ds' % str_len, tf_example_str)) - writer.close() - - -def main(unused_argv): - assert FLAGS.command and FLAGS.in_file and FLAGS.out_file - if FLAGS.command == 'binary_to_text': - _binary_to_text() - elif FLAGS.command == 'text_to_binary': - _text_to_binary() - - -if __name__ == '__main__': - tf.app.run() diff --git a/research/textsum/seq2seq_attention.py b/research/textsum/seq2seq_attention.py deleted file mode 100644 index 33d1b4fed..000000000 --- a/research/textsum/seq2seq_attention.py +++ /dev/null @@ -1,213 +0,0 @@ -# Copyright 2016 The TensorFlow Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -"""Trains a seq2seq model. - -WORK IN PROGRESS. - -Implement "Abstractive Text Summarization using Sequence-to-sequence RNNS and -Beyond." - -""" -import sys -import time - -import tensorflow as tf -import batch_reader -import data -import seq2seq_attention_decode -import seq2seq_attention_model - -FLAGS = tf.app.flags.FLAGS -tf.app.flags.DEFINE_string('data_path', - '', 'Path expression to tf.Example.') -tf.app.flags.DEFINE_string('vocab_path', - '', 'Path expression to text vocabulary file.') -tf.app.flags.DEFINE_string('article_key', 'article', - 'tf.Example feature key for article.') -tf.app.flags.DEFINE_string('abstract_key', 'headline', - 'tf.Example feature key for abstract.') -tf.app.flags.DEFINE_string('log_root', '', 'Directory for model root.') -tf.app.flags.DEFINE_string('train_dir', '', 'Directory for train.') -tf.app.flags.DEFINE_string('eval_dir', '', 'Directory for eval.') -tf.app.flags.DEFINE_string('decode_dir', '', 'Directory for decode summaries.') -tf.app.flags.DEFINE_string('mode', 'train', 'train/eval/decode mode') -tf.app.flags.DEFINE_integer('max_run_steps', 10000000, - 'Maximum number of run steps.') -tf.app.flags.DEFINE_integer('max_article_sentences', 2, - 'Max number of first sentences to use from the ' - 'article') -tf.app.flags.DEFINE_integer('max_abstract_sentences', 100, - 'Max number of first sentences to use from the ' - 'abstract') -tf.app.flags.DEFINE_integer('beam_size', 4, - 'beam size for beam search decoding.') -tf.app.flags.DEFINE_integer('eval_interval_secs', 60, 'How often to run eval.') -tf.app.flags.DEFINE_integer('checkpoint_secs', 60, 'How often to checkpoint.') -tf.app.flags.DEFINE_bool('use_bucketing', False, - 'Whether bucket articles of similar length.') -tf.app.flags.DEFINE_bool('truncate_input', False, - 'Truncate inputs that are too long. If False, ' - 'examples that are too long are discarded.') -tf.app.flags.DEFINE_integer('num_gpus', 0, 'Number of gpus used.') -tf.app.flags.DEFINE_integer('random_seed', 111, 'A seed value for randomness.') - - -def _RunningAvgLoss(loss, running_avg_loss, summary_writer, step, decay=0.999): - """Calculate the running average of losses.""" - if running_avg_loss == 0: - running_avg_loss = loss - else: - running_avg_loss = running_avg_loss * decay + (1 - decay) * loss - running_avg_loss = min(running_avg_loss, 12) - loss_sum = tf.Summary() - loss_sum.value.add(tag='running_avg_loss', simple_value=running_avg_loss) - summary_writer.add_summary(loss_sum, step) - sys.stdout.write('running_avg_loss: %f\n' % running_avg_loss) - return running_avg_loss - - -def _Train(model, data_batcher): - """Runs model training.""" - with tf.device('/cpu:0'): - model.build_graph() - saver = tf.train.Saver() - # Train dir is different from log_root to avoid summary directory - # conflict with Supervisor. - summary_writer = tf.summary.FileWriter(FLAGS.train_dir) - sv = tf.train.Supervisor(logdir=FLAGS.log_root, - is_chief=True, - saver=saver, - summary_op=None, - save_summaries_secs=60, - save_model_secs=FLAGS.checkpoint_secs, - global_step=model.global_step) - sess = sv.prepare_or_wait_for_session(config=tf.ConfigProto( - allow_soft_placement=True)) - running_avg_loss = 0 - step = 0 - while not sv.should_stop() and step < FLAGS.max_run_steps: - (article_batch, abstract_batch, targets, article_lens, abstract_lens, - loss_weights, _, _) = data_batcher.NextBatch() - (_, summaries, loss, train_step) = model.run_train_step( - sess, article_batch, abstract_batch, targets, article_lens, - abstract_lens, loss_weights) - - summary_writer.add_summary(summaries, train_step) - running_avg_loss = _RunningAvgLoss( - running_avg_loss, loss, summary_writer, train_step) - step += 1 - if step % 100 == 0: - summary_writer.flush() - sv.Stop() - return running_avg_loss - - -def _Eval(model, data_batcher, vocab=None): - """Runs model eval.""" - model.build_graph() - saver = tf.train.Saver() - summary_writer = tf.summary.FileWriter(FLAGS.eval_dir) - sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True)) - running_avg_loss = 0 - step = 0 - while True: - time.sleep(FLAGS.eval_interval_secs) - try: - ckpt_state = tf.train.get_checkpoint_state(FLAGS.log_root) - except tf.errors.OutOfRangeError as e: - tf.logging.error('Cannot restore checkpoint: %s', e) - continue - - if not (ckpt_state and ckpt_state.model_checkpoint_path): - tf.logging.info('No model to eval yet at %s', FLAGS.train_dir) - continue - - tf.logging.info('Loading checkpoint %s', ckpt_state.model_checkpoint_path) - saver.restore(sess, ckpt_state.model_checkpoint_path) - - (article_batch, abstract_batch, targets, article_lens, abstract_lens, - loss_weights, _, _) = data_batcher.NextBatch() - (summaries, loss, train_step) = model.run_eval_step( - sess, article_batch, abstract_batch, targets, article_lens, - abstract_lens, loss_weights) - tf.logging.info( - 'article: %s', - ' '.join(data.Ids2Words(article_batch[0][:].tolist(), vocab))) - tf.logging.info( - 'abstract: %s', - ' '.join(data.Ids2Words(abstract_batch[0][:].tolist(), vocab))) - - summary_writer.add_summary(summaries, train_step) - running_avg_loss = _RunningAvgLoss( - running_avg_loss, loss, summary_writer, train_step) - if step % 100 == 0: - summary_writer.flush() - - -def main(unused_argv): - vocab = data.Vocab(FLAGS.vocab_path, 1000000) - # Check for presence of required special tokens. - assert vocab.CheckVocab(data.PAD_TOKEN) > 0 - assert vocab.CheckVocab(data.UNKNOWN_TOKEN) >= 0 - assert vocab.CheckVocab(data.SENTENCE_START) > 0 - assert vocab.CheckVocab(data.SENTENCE_END) > 0 - - batch_size = 4 - if FLAGS.mode == 'decode': - batch_size = FLAGS.beam_size - - hps = seq2seq_attention_model.HParams( - mode=FLAGS.mode, # train, eval, decode - min_lr=0.01, # min learning rate. - lr=0.15, # learning rate - batch_size=batch_size, - enc_layers=4, - enc_timesteps=120, - dec_timesteps=30, - min_input_len=2, # discard articles/summaries < than this - num_hidden=256, # for rnn cell - emb_dim=128, # If 0, don't use embedding - max_grad_norm=2, - num_softmax_samples=4096) # If 0, no sampled softmax. - - batcher = batch_reader.Batcher( - FLAGS.data_path, vocab, hps, FLAGS.article_key, - FLAGS.abstract_key, FLAGS.max_article_sentences, - FLAGS.max_abstract_sentences, bucketing=FLAGS.use_bucketing, - truncate_input=FLAGS.truncate_input) - tf.set_random_seed(FLAGS.random_seed) - - if hps.mode == 'train': - model = seq2seq_attention_model.Seq2SeqAttentionModel( - hps, vocab, num_gpus=FLAGS.num_gpus) - _Train(model, batcher) - elif hps.mode == 'eval': - model = seq2seq_attention_model.Seq2SeqAttentionModel( - hps, vocab, num_gpus=FLAGS.num_gpus) - _Eval(model, batcher, vocab=vocab) - elif hps.mode == 'decode': - decode_mdl_hps = hps - # Only need to restore the 1st step and reuse it since - # we keep and feed in state for each step's output. - decode_mdl_hps = hps._replace(dec_timesteps=1) - model = seq2seq_attention_model.Seq2SeqAttentionModel( - decode_mdl_hps, vocab, num_gpus=FLAGS.num_gpus) - decoder = seq2seq_attention_decode.BSDecoder(model, batcher, hps, vocab) - decoder.DecodeLoop() - - -if __name__ == '__main__': - tf.app.run() diff --git a/research/textsum/seq2seq_attention_decode.py b/research/textsum/seq2seq_attention_decode.py deleted file mode 100644 index 54b569194..000000000 --- a/research/textsum/seq2seq_attention_decode.py +++ /dev/null @@ -1,162 +0,0 @@ -# Copyright 2016 The TensorFlow Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -"""Module for decoding.""" - -import os -import time - -import beam_search -import data -from six.moves import xrange -import tensorflow as tf - -FLAGS = tf.app.flags.FLAGS -tf.app.flags.DEFINE_integer('max_decode_steps', 1000000, - 'Number of decoding steps.') -tf.app.flags.DEFINE_integer('decode_batches_per_ckpt', 8000, - 'Number of batches to decode before restoring next ' - 'checkpoint') - -DECODE_LOOP_DELAY_SECS = 60 -DECODE_IO_FLUSH_INTERVAL = 100 - - -class DecodeIO(object): - """Writes the decoded and references to RKV files for Rouge score. - - See nlp/common/utils/internal/rkv_parser.py for detail about rkv file. - """ - - def __init__(self, outdir): - self._cnt = 0 - self._outdir = outdir - if not os.path.exists(self._outdir): - os.mkdir(self._outdir) - self._ref_file = None - self._decode_file = None - - def Write(self, reference, decode): - """Writes the reference and decoded outputs to RKV files. - - Args: - reference: The human (correct) result. - decode: The machine-generated result - """ - self._ref_file.write('output=%s\n' % reference) - self._decode_file.write('output=%s\n' % decode) - self._cnt += 1 - if self._cnt % DECODE_IO_FLUSH_INTERVAL == 0: - self._ref_file.flush() - self._decode_file.flush() - - def ResetFiles(self): - """Resets the output files. Must be called once before Write().""" - if self._ref_file: self._ref_file.close() - if self._decode_file: self._decode_file.close() - timestamp = int(time.time()) - self._ref_file = open( - os.path.join(self._outdir, 'ref%d'%timestamp), 'w') - self._decode_file = open( - os.path.join(self._outdir, 'decode%d'%timestamp), 'w') - - -class BSDecoder(object): - """Beam search decoder.""" - - def __init__(self, model, batch_reader, hps, vocab): - """Beam search decoding. - - Args: - model: The seq2seq attentional model. - batch_reader: The batch data reader. - hps: Hyperparamters. - vocab: Vocabulary - """ - self._model = model - self._model.build_graph() - self._batch_reader = batch_reader - self._hps = hps - self._vocab = vocab - self._saver = tf.train.Saver() - self._decode_io = DecodeIO(FLAGS.decode_dir) - - def DecodeLoop(self): - """Decoding loop for long running process.""" - sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True)) - step = 0 - while step < FLAGS.max_decode_steps: - time.sleep(DECODE_LOOP_DELAY_SECS) - if not self._Decode(self._saver, sess): - continue - step += 1 - - def _Decode(self, saver, sess): - """Restore a checkpoint and decode it. - - Args: - saver: Tensorflow checkpoint saver. - sess: Tensorflow session. - Returns: - If success, returns true, otherwise, false. - """ - ckpt_state = tf.train.get_checkpoint_state(FLAGS.log_root) - if not (ckpt_state and ckpt_state.model_checkpoint_path): - tf.logging.info('No model to decode yet at %s', FLAGS.log_root) - return False - - tf.logging.info('checkpoint path %s', ckpt_state.model_checkpoint_path) - ckpt_path = os.path.join( - FLAGS.log_root, os.path.basename(ckpt_state.model_checkpoint_path)) - tf.logging.info('renamed checkpoint path %s', ckpt_path) - saver.restore(sess, ckpt_path) - - self._decode_io.ResetFiles() - for _ in xrange(FLAGS.decode_batches_per_ckpt): - (article_batch, _, _, article_lens, _, _, origin_articles, - origin_abstracts) = self._batch_reader.NextBatch() - for i in xrange(self._hps.batch_size): - bs = beam_search.BeamSearch( - self._model, self._hps.batch_size, - self._vocab.WordToId(data.SENTENCE_START), - self._vocab.WordToId(data.SENTENCE_END), - self._hps.dec_timesteps) - - article_batch_cp = article_batch.copy() - article_batch_cp[:] = article_batch[i:i+1] - article_lens_cp = article_lens.copy() - article_lens_cp[:] = article_lens[i:i+1] - best_beam = bs.BeamSearch(sess, article_batch_cp, article_lens_cp)[0] - decode_output = [int(t) for t in best_beam.tokens[1:]] - self._DecodeBatch( - origin_articles[i], origin_abstracts[i], decode_output) - return True - - def _DecodeBatch(self, article, abstract, output_ids): - """Convert id to words and writing results. - - Args: - article: The original article string. - abstract: The human (correct) abstract string. - output_ids: The abstract word ids output by machine. - """ - decoded_output = ' '.join(data.Ids2Words(output_ids, self._vocab)) - end_p = decoded_output.find(data.SENTENCE_END, 0) - if end_p != -1: - decoded_output = decoded_output[:end_p] - tf.logging.info('article: %s', article) - tf.logging.info('abstract: %s', abstract) - tf.logging.info('decoded: %s', decoded_output) - self._decode_io.Write(abstract, decoded_output.strip()) diff --git a/research/textsum/seq2seq_attention_model.py b/research/textsum/seq2seq_attention_model.py deleted file mode 100644 index 618d72fa2..000000000 --- a/research/textsum/seq2seq_attention_model.py +++ /dev/null @@ -1,300 +0,0 @@ -# Copyright 2016 The TensorFlow Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -"""Sequence-to-Sequence with attention model for text summarization. -""" -from collections import namedtuple - -import numpy as np -import seq2seq_lib -from six.moves import xrange -import tensorflow as tf - -HParams = namedtuple('HParams', - 'mode, min_lr, lr, batch_size, ' - 'enc_layers, enc_timesteps, dec_timesteps, ' - 'min_input_len, num_hidden, emb_dim, max_grad_norm, ' - 'num_softmax_samples') - - -def _extract_argmax_and_embed(embedding, output_projection=None, - update_embedding=True): - """Get a loop_function that extracts the previous symbol and embeds it. - - Args: - embedding: embedding tensor for symbols. - output_projection: None or a pair (W, B). If provided, each fed previous - output will first be multiplied by W and added B. - update_embedding: Boolean; if False, the gradients will not propagate - through the embeddings. - - Returns: - A loop function. - """ - def loop_function(prev, _): - """function that feed previous model output rather than ground truth.""" - if output_projection is not None: - prev = tf.nn.xw_plus_b( - prev, output_projection[0], output_projection[1]) - prev_symbol = tf.argmax(prev, 1) - # Note that gradients will not propagate through the second parameter of - # embedding_lookup. - emb_prev = tf.nn.embedding_lookup(embedding, prev_symbol) - if not update_embedding: - emb_prev = tf.stop_gradient(emb_prev) - return emb_prev - return loop_function - - -class Seq2SeqAttentionModel(object): - """Wrapper for Tensorflow model graph for text sum vectors.""" - - def __init__(self, hps, vocab, num_gpus=0): - self._hps = hps - self._vocab = vocab - self._num_gpus = num_gpus - self._cur_gpu = 0 - - def run_train_step(self, sess, article_batch, abstract_batch, targets, - article_lens, abstract_lens, loss_weights): - to_return = [self._train_op, self._summaries, self._loss, self.global_step] - return sess.run(to_return, - feed_dict={self._articles: article_batch, - self._abstracts: abstract_batch, - self._targets: targets, - self._article_lens: article_lens, - self._abstract_lens: abstract_lens, - self._loss_weights: loss_weights}) - - def run_eval_step(self, sess, article_batch, abstract_batch, targets, - article_lens, abstract_lens, loss_weights): - to_return = [self._summaries, self._loss, self.global_step] - return sess.run(to_return, - feed_dict={self._articles: article_batch, - self._abstracts: abstract_batch, - self._targets: targets, - self._article_lens: article_lens, - self._abstract_lens: abstract_lens, - self._loss_weights: loss_weights}) - - def run_decode_step(self, sess, article_batch, abstract_batch, targets, - article_lens, abstract_lens, loss_weights): - to_return = [self._outputs, self.global_step] - return sess.run(to_return, - feed_dict={self._articles: article_batch, - self._abstracts: abstract_batch, - self._targets: targets, - self._article_lens: article_lens, - self._abstract_lens: abstract_lens, - self._loss_weights: loss_weights}) - - def _next_device(self): - """Round robin the gpu device. (Reserve last gpu for expensive op).""" - if self._num_gpus == 0: - return '' - dev = '/gpu:%d' % self._cur_gpu - if self._num_gpus > 1: - self._cur_gpu = (self._cur_gpu + 1) % (self._num_gpus-1) - return dev - - def _get_gpu(self, gpu_id): - if self._num_gpus <= 0 or gpu_id >= self._num_gpus: - return '' - return '/gpu:%d' % gpu_id - - def _add_placeholders(self): - """Inputs to be fed to the graph.""" - hps = self._hps - self._articles = tf.placeholder(tf.int32, - [hps.batch_size, hps.enc_timesteps], - name='articles') - self._abstracts = tf.placeholder(tf.int32, - [hps.batch_size, hps.dec_timesteps], - name='abstracts') - self._targets = tf.placeholder(tf.int32, - [hps.batch_size, hps.dec_timesteps], - name='targets') - self._article_lens = tf.placeholder(tf.int32, [hps.batch_size], - name='article_lens') - self._abstract_lens = tf.placeholder(tf.int32, [hps.batch_size], - name='abstract_lens') - self._loss_weights = tf.placeholder(tf.float32, - [hps.batch_size, hps.dec_timesteps], - name='loss_weights') - - def _add_seq2seq(self): - hps = self._hps - vsize = self._vocab.NumIds() - - with tf.variable_scope('seq2seq'): - encoder_inputs = tf.unstack(tf.transpose(self._articles)) - decoder_inputs = tf.unstack(tf.transpose(self._abstracts)) - targets = tf.unstack(tf.transpose(self._targets)) - loss_weights = tf.unstack(tf.transpose(self._loss_weights)) - article_lens = self._article_lens - - # Embedding shared by the input and outputs. - with tf.variable_scope('embedding'), tf.device('/cpu:0'): - embedding = tf.get_variable( - 'embedding', [vsize, hps.emb_dim], dtype=tf.float32, - initializer=tf.truncated_normal_initializer(stddev=1e-4)) - emb_encoder_inputs = [tf.nn.embedding_lookup(embedding, x) - for x in encoder_inputs] - emb_decoder_inputs = [tf.nn.embedding_lookup(embedding, x) - for x in decoder_inputs] - - for layer_i in xrange(hps.enc_layers): - with tf.variable_scope('encoder%d'%layer_i), tf.device( - self._next_device()): - cell_fw = tf.contrib.rnn.LSTMCell( - hps.num_hidden, - initializer=tf.random_uniform_initializer(-0.1, 0.1, seed=123), - state_is_tuple=False) - cell_bw = tf.contrib.rnn.LSTMCell( - hps.num_hidden, - initializer=tf.random_uniform_initializer(-0.1, 0.1, seed=113), - state_is_tuple=False) - (emb_encoder_inputs, fw_state, _) = tf.contrib.rnn.static_bidirectional_rnn( - cell_fw, cell_bw, emb_encoder_inputs, dtype=tf.float32, - sequence_length=article_lens) - encoder_outputs = emb_encoder_inputs - - with tf.variable_scope('output_projection'): - w = tf.get_variable( - 'w', [hps.num_hidden, vsize], dtype=tf.float32, - initializer=tf.truncated_normal_initializer(stddev=1e-4)) - w_t = tf.transpose(w) - v = tf.get_variable( - 'v', [vsize], dtype=tf.float32, - initializer=tf.truncated_normal_initializer(stddev=1e-4)) - - with tf.variable_scope('decoder'), tf.device(self._next_device()): - # When decoding, use model output from the previous step - # for the next step. - loop_function = None - if hps.mode == 'decode': - loop_function = _extract_argmax_and_embed( - embedding, (w, v), update_embedding=False) - - cell = tf.contrib.rnn.LSTMCell( - hps.num_hidden, - initializer=tf.random_uniform_initializer(-0.1, 0.1, seed=113), - state_is_tuple=False) - - encoder_outputs = [tf.reshape(x, [hps.batch_size, 1, 2*hps.num_hidden]) - for x in encoder_outputs] - self._enc_top_states = tf.concat(axis=1, values=encoder_outputs) - self._dec_in_state = fw_state - # During decoding, follow up _dec_in_state are fed from beam_search. - # dec_out_state are stored by beam_search for next step feeding. - initial_state_attention = (hps.mode == 'decode') - decoder_outputs, self._dec_out_state = tf.contrib.legacy_seq2seq.attention_decoder( - emb_decoder_inputs, self._dec_in_state, self._enc_top_states, - cell, num_heads=1, loop_function=loop_function, - initial_state_attention=initial_state_attention) - - with tf.variable_scope('output'), tf.device(self._next_device()): - model_outputs = [] - for i in xrange(len(decoder_outputs)): - if i > 0: - tf.get_variable_scope().reuse_variables() - model_outputs.append( - tf.nn.xw_plus_b(decoder_outputs[i], w, v)) - - if hps.mode == 'decode': - with tf.variable_scope('decode_output'), tf.device('/cpu:0'): - best_outputs = [tf.argmax(x, 1) for x in model_outputs] - tf.logging.info('best_outputs%s', best_outputs[0].get_shape()) - self._outputs = tf.concat( - axis=1, values=[tf.reshape(x, [hps.batch_size, 1]) for x in best_outputs]) - - self._topk_log_probs, self._topk_ids = tf.nn.top_k( - tf.log(tf.nn.softmax(model_outputs[-1])), hps.batch_size*2) - - with tf.variable_scope('loss'), tf.device(self._next_device()): - def sampled_loss_func(inputs, labels): - with tf.device('/cpu:0'): # Try gpu. - labels = tf.reshape(labels, [-1, 1]) - return tf.nn.sampled_softmax_loss( - weights=w_t, biases=v, labels=labels, inputs=inputs, - num_sampled=hps.num_softmax_samples, num_classes=vsize) - - if hps.num_softmax_samples != 0 and hps.mode == 'train': - self._loss = seq2seq_lib.sampled_sequence_loss( - decoder_outputs, targets, loss_weights, sampled_loss_func) - else: - self._loss = tf.contrib.legacy_seq2seq.sequence_loss( - model_outputs, targets, loss_weights) - tf.summary.scalar('loss', tf.minimum(12.0, self._loss)) - - def _add_train_op(self): - """Sets self._train_op, op to run for training.""" - hps = self._hps - - self._lr_rate = tf.maximum( - hps.min_lr, # min_lr_rate. - tf.train.exponential_decay(hps.lr, self.global_step, 30000, 0.98)) - - tvars = tf.trainable_variables() - with tf.device(self._get_gpu(self._num_gpus-1)): - grads, global_norm = tf.clip_by_global_norm( - tf.gradients(self._loss, tvars), hps.max_grad_norm) - tf.summary.scalar('global_norm', global_norm) - optimizer = tf.train.GradientDescentOptimizer(self._lr_rate) - tf.summary.scalar('learning rate', self._lr_rate) - self._train_op = optimizer.apply_gradients( - zip(grads, tvars), global_step=self.global_step, name='train_step') - - def encode_top_state(self, sess, enc_inputs, enc_len): - """Return the top states from encoder for decoder. - - Args: - sess: tensorflow session. - enc_inputs: encoder inputs of shape [batch_size, enc_timesteps]. - enc_len: encoder input length of shape [batch_size] - Returns: - enc_top_states: The top level encoder states. - dec_in_state: The decoder layer initial state. - """ - results = sess.run([self._enc_top_states, self._dec_in_state], - feed_dict={self._articles: enc_inputs, - self._article_lens: enc_len}) - return results[0], results[1][0] - - def decode_topk(self, sess, latest_tokens, enc_top_states, dec_init_states): - """Return the topK results and new decoder states.""" - feed = { - self._enc_top_states: enc_top_states, - self._dec_in_state: - np.squeeze(np.array(dec_init_states)), - self._abstracts: - np.transpose(np.array([latest_tokens])), - self._abstract_lens: np.ones([len(dec_init_states)], np.int32)} - - results = sess.run( - [self._topk_ids, self._topk_log_probs, self._dec_out_state], - feed_dict=feed) - - ids, probs, states = results[0], results[1], results[2] - new_states = [s for s in states] - return ids, probs, new_states - - def build_graph(self): - self._add_placeholders() - self._add_seq2seq() - self.global_step = tf.Variable(0, name='global_step', trainable=False) - if self._hps.mode == 'train': - self._add_train_op() - self._summaries = tf.summary.merge_all() diff --git a/research/textsum/seq2seq_lib.py b/research/textsum/seq2seq_lib.py deleted file mode 100644 index de233895a..000000000 --- a/research/textsum/seq2seq_lib.py +++ /dev/null @@ -1,137 +0,0 @@ -# Copyright 2016 The TensorFlow Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -"""seq2seq library codes copied from elsewhere for customization.""" - -import tensorflow as tf - - -# Adapted to support sampled_softmax loss function, which accepts activations -# instead of logits. -def sequence_loss_by_example(inputs, targets, weights, loss_function, - average_across_timesteps=True, name=None): - """Sampled softmax loss for a sequence of inputs (per example). - - Args: - inputs: List of 2D Tensors of shape [batch_size x hid_dim]. - targets: List of 1D batch-sized int32 Tensors of the same length as logits. - weights: List of 1D batch-sized float-Tensors of the same length as logits. - loss_function: Sampled softmax function (inputs, labels) -> loss - average_across_timesteps: If set, divide the returned cost by the total - label weight. - name: Optional name for this operation, default: 'sequence_loss_by_example'. - - Returns: - 1D batch-sized float Tensor: The log-perplexity for each sequence. - - Raises: - ValueError: If len(inputs) is different from len(targets) or len(weights). - """ - if len(targets) != len(inputs) or len(weights) != len(inputs): - raise ValueError('Lengths of logits, weights, and targets must be the same ' - '%d, %d, %d.' % (len(inputs), len(weights), len(targets))) - with tf.name_scope(values=inputs + targets + weights, name=name, - default_name='sequence_loss_by_example'): - log_perp_list = [] - for inp, target, weight in zip(inputs, targets, weights): - crossent = loss_function(inp, target) - log_perp_list.append(crossent * weight) - log_perps = tf.add_n(log_perp_list) - if average_across_timesteps: - total_size = tf.add_n(weights) - total_size += 1e-12 # Just to avoid division by 0 for all-0 weights. - log_perps /= total_size - return log_perps - - -def sampled_sequence_loss(inputs, targets, weights, loss_function, - average_across_timesteps=True, - average_across_batch=True, name=None): - """Weighted cross-entropy loss for a sequence of logits, batch-collapsed. - - Args: - inputs: List of 2D Tensors of shape [batch_size x hid_dim]. - targets: List of 1D batch-sized int32 Tensors of the same length as inputs. - weights: List of 1D batch-sized float-Tensors of the same length as inputs. - loss_function: Sampled softmax function (inputs, labels) -> loss - average_across_timesteps: If set, divide the returned cost by the total - label weight. - average_across_batch: If set, divide the returned cost by the batch size. - name: Optional name for this operation, defaults to 'sequence_loss'. - - Returns: - A scalar float Tensor: The average log-perplexity per symbol (weighted). - - Raises: - ValueError: If len(inputs) is different from len(targets) or len(weights). - """ - with tf.name_scope(values=inputs + targets + weights, name=name, - default_name='sampled_sequence_loss'): - cost = tf.reduce_sum(sequence_loss_by_example( - inputs, targets, weights, loss_function, - average_across_timesteps=average_across_timesteps)) - if average_across_batch: - batch_size = tf.shape(targets[0])[0] - return cost / tf.cast(batch_size, tf.float32) - else: - return cost - - -def linear(args, output_size, bias, bias_start=0.0, scope=None): - """Linear map: sum_i(args[i] * W[i]), where W[i] is a variable. - - Args: - args: a 2D Tensor or a list of 2D, batch x n, Tensors. - output_size: int, second dimension of W[i]. - bias: boolean, whether to add a bias term or not. - bias_start: starting value to initialize the bias; 0 by default. - scope: VariableScope for the created subgraph; defaults to "Linear". - - Returns: - A 2D Tensor with shape [batch x output_size] equal to - sum_i(args[i] * W[i]), where W[i]s are newly created matrices. - - Raises: - ValueError: if some of the arguments has unspecified or wrong shape. - """ - if args is None or (isinstance(args, (list, tuple)) and not args): - raise ValueError('`args` must be specified') - if not isinstance(args, (list, tuple)): - args = [args] - - # Calculate the total size of arguments on dimension 1. - total_arg_size = 0 - shapes = [a.get_shape().as_list() for a in args] - for shape in shapes: - if len(shape) != 2: - raise ValueError('Linear is expecting 2D arguments: %s' % str(shapes)) - if not shape[1]: - raise ValueError('Linear expects shape[1] of arguments: %s' % str(shapes)) - else: - total_arg_size += shape[1] - - # Now the computation. - with tf.variable_scope(scope or 'Linear'): - matrix = tf.get_variable('Matrix', [total_arg_size, output_size]) - if len(args) == 1: - res = tf.matmul(args[0], matrix) - else: - res = tf.matmul(tf.concat(axis=1, values=args), matrix) - if not bias: - return res - bias_term = tf.get_variable( - 'Bias', [output_size], - initializer=tf.constant_initializer(bias_start)) - return res + bias_term diff --git a/research/transformer/README.md b/research/transformer/README.md deleted file mode 100644 index 0acad0005..000000000 --- a/research/transformer/README.md +++ /dev/null @@ -1,63 +0,0 @@ -![No Maintenance Intended](https://img.shields.io/badge/No%20Maintenance%20Intended-%E2%9C%95-red.svg) -![TensorFlow Requirement: 1.x](https://img.shields.io/badge/TensorFlow%20Requirement-1.x-brightgreen) -![TensorFlow 2 Not Supported](https://img.shields.io/badge/TensorFlow%202%20Not%20Supported-%E2%9C%95-red.svg) - -# Spatial Transformer Network - -The Spatial Transformer Network [1] allows the spatial manipulation of data within the network. - -
    -

    -
    - -### API - -A Spatial Transformer Network implemented in Tensorflow 1.0 and based on [2]. - -#### How to use - -
    -

    -
    - -```python -transformer(U, theta, out_size) -``` - -#### Parameters - - U : float - The output of a convolutional net should have the - shape [num_batch, height, width, num_channels]. - theta: float - The output of the - localisation network should be [num_batch, 6]. - out_size: tuple of two ints - The size of the output of the network - - -#### Notes -To initialize the network to the identity transform init ``theta`` to : - -```python -identity = np.array([[1., 0., 0.], - [0., 1., 0.]]) -identity = identity.flatten() -theta = tf.Variable(initial_value=identity) -``` - -#### Experiments - -
    -

    -
    - -We used cluttered MNIST. Left column are the input images, right are the attended parts of the image by an STN. - -All experiments were run in Tensorflow 0.7. - -### References - -[1] Jaderberg, Max, et al. "Spatial Transformer Networks." arXiv preprint arXiv:1506.02025 (2015) - -[2] https://github.com/skaae/transformer_network/blob/master/transformerlayer.py diff --git a/research/transformer/cluttered_mnist.py b/research/transformer/cluttered_mnist.py deleted file mode 100644 index ec00c751b..000000000 --- a/research/transformer/cluttered_mnist.py +++ /dev/null @@ -1,174 +0,0 @@ -# Copyright 2016 The TensorFlow Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================= -import tensorflow as tf -from spatial_transformer import transformer -import numpy as np -from tf_utils import weight_variable, bias_variable, dense_to_one_hot - -# %% Load data -mnist_cluttered = np.load('./data/mnist_sequence1_sample_5distortions5x5.npz') - -X_train = mnist_cluttered['X_train'] -y_train = mnist_cluttered['y_train'] -X_valid = mnist_cluttered['X_valid'] -y_valid = mnist_cluttered['y_valid'] -X_test = mnist_cluttered['X_test'] -y_test = mnist_cluttered['y_test'] - -# % turn from dense to one hot representation -Y_train = dense_to_one_hot(y_train, n_classes=10) -Y_valid = dense_to_one_hot(y_valid, n_classes=10) -Y_test = dense_to_one_hot(y_test, n_classes=10) - -# %% Graph representation of our network - -# %% Placeholders for 40x40 resolution -x = tf.placeholder(tf.float32, [None, 1600]) -y = tf.placeholder(tf.float32, [None, 10]) - -# %% Since x is currently [batch, height*width], we need to reshape to a -# 4-D tensor to use it in a convolutional graph. If one component of -# `shape` is the special value -1, the size of that dimension is -# computed so that the total size remains constant. Since we haven't -# defined the batch dimension's shape yet, we use -1 to denote this -# dimension should not change size. -x_tensor = tf.reshape(x, [-1, 40, 40, 1]) - -# %% We'll setup the two-layer localisation network to figure out the -# %% parameters for an affine transformation of the input -# %% Create variables for fully connected layer -W_fc_loc1 = weight_variable([1600, 20]) -b_fc_loc1 = bias_variable([20]) - -W_fc_loc2 = weight_variable([20, 6]) -# Use identity transformation as starting point -initial = np.array([[1., 0, 0], [0, 1., 0]]) -initial = initial.astype('float32') -initial = initial.flatten() -b_fc_loc2 = tf.Variable(initial_value=initial, name='b_fc_loc2') - -# %% Define the two layer localisation network -h_fc_loc1 = tf.nn.tanh(tf.matmul(x, W_fc_loc1) + b_fc_loc1) -# %% We can add dropout for regularizing and to reduce overfitting like so: -keep_prob = tf.placeholder(tf.float32) -h_fc_loc1_drop = tf.nn.dropout(h_fc_loc1, keep_prob) -# %% Second layer -h_fc_loc2 = tf.nn.tanh(tf.matmul(h_fc_loc1_drop, W_fc_loc2) + b_fc_loc2) - -# %% We'll create a spatial transformer module to identify discriminative -# %% patches -out_size = (40, 40) -h_trans = transformer(x_tensor, h_fc_loc2, out_size) - -# %% We'll setup the first convolutional layer -# Weight matrix is [height x width x input_channels x output_channels] -filter_size = 3 -n_filters_1 = 16 -W_conv1 = weight_variable([filter_size, filter_size, 1, n_filters_1]) - -# %% Bias is [output_channels] -b_conv1 = bias_variable([n_filters_1]) - -# %% Now we can build a graph which does the first layer of convolution: -# we define our stride as batch x height x width x channels -# instead of pooling, we use strides of 2 and more layers -# with smaller filters. - -h_conv1 = tf.nn.relu( - tf.nn.conv2d(input=h_trans, - filter=W_conv1, - strides=[1, 2, 2, 1], - padding='SAME') + - b_conv1) - -# %% And just like the first layer, add additional layers to create -# a deep net -n_filters_2 = 16 -W_conv2 = weight_variable([filter_size, filter_size, n_filters_1, n_filters_2]) -b_conv2 = bias_variable([n_filters_2]) -h_conv2 = tf.nn.relu( - tf.nn.conv2d(input=h_conv1, - filter=W_conv2, - strides=[1, 2, 2, 1], - padding='SAME') + - b_conv2) - -# %% We'll now reshape so we can connect to a fully-connected layer: -h_conv2_flat = tf.reshape(h_conv2, [-1, 10 * 10 * n_filters_2]) - -# %% Create a fully-connected layer: -n_fc = 1024 -W_fc1 = weight_variable([10 * 10 * n_filters_2, n_fc]) -b_fc1 = bias_variable([n_fc]) -h_fc1 = tf.nn.relu(tf.matmul(h_conv2_flat, W_fc1) + b_fc1) - -h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob) - -# %% And finally our softmax layer: -W_fc2 = weight_variable([n_fc, 10]) -b_fc2 = bias_variable([10]) -y_logits = tf.matmul(h_fc1_drop, W_fc2) + b_fc2 - -# %% Define loss/eval/training functions -cross_entropy = tf.reduce_mean( - tf.nn.softmax_cross_entropy_with_logits(logits=y_logits, labels=y)) -opt = tf.train.AdamOptimizer() -optimizer = opt.minimize(cross_entropy) -grads = opt.compute_gradients(cross_entropy, [b_fc_loc2]) - -# %% Monitor accuracy -correct_prediction = tf.equal(tf.argmax(y_logits, 1), tf.argmax(y, 1)) -accuracy = tf.reduce_mean(tf.cast(correct_prediction, 'float')) - -# %% We now create a new session to actually perform the initialization the -# variables: -sess = tf.Session() -sess.run(tf.global_variables_initializer()) - - -# %% We'll now train in minibatches and report accuracy, loss: -iter_per_epoch = 100 -n_epochs = 500 -train_size = 10000 - -indices = np.linspace(0, 10000 - 1, iter_per_epoch) -indices = indices.astype('int') - -for epoch_i in range(n_epochs): - for iter_i in range(iter_per_epoch - 1): - batch_xs = X_train[indices[iter_i]:indices[iter_i+1]] - batch_ys = Y_train[indices[iter_i]:indices[iter_i+1]] - - if iter_i % 10 == 0: - loss = sess.run(cross_entropy, - feed_dict={ - x: batch_xs, - y: batch_ys, - keep_prob: 1.0 - }) - print('Iteration: ' + str(iter_i) + ' Loss: ' + str(loss)) - - sess.run(optimizer, feed_dict={ - x: batch_xs, y: batch_ys, keep_prob: 0.8}) - - print('Accuracy (%d): ' % epoch_i + str(sess.run(accuracy, - feed_dict={ - x: X_valid, - y: Y_valid, - keep_prob: 1.0 - }))) - # theta = sess.run(h_fc_loc2, feed_dict={ - # x: batch_xs, keep_prob: 1.0}) - # print(theta[0]) diff --git a/research/transformer/data/README.md b/research/transformer/data/README.md deleted file mode 100644 index c2a9581fd..000000000 --- a/research/transformer/data/README.md +++ /dev/null @@ -1,20 +0,0 @@ -### How to get the data - -#### Cluttered MNIST - -The cluttered MNIST dataset can be found here [1] or can be generated via [2]. - -Settings used for `cluttered_mnist.py` : - -```python - -ORG_SHP = [28, 28] -OUT_SHP = [40, 40] -NUM_DISTORTIONS = 8 -dist_size = (5, 5) - -``` - -[1] https://github.com/daviddao/spatial-transformer-tensorflow - -[2] https://github.com/skaae/recurrent-spatial-transformer-code/blob/master/MNIST_SEQUENCE/create_mnist_sequence.py \ No newline at end of file diff --git a/research/transformer/example.py b/research/transformer/example.py deleted file mode 100644 index 19ca64d14..000000000 --- a/research/transformer/example.py +++ /dev/null @@ -1,61 +0,0 @@ -# Copyright 2016 The TensorFlow Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -from scipy import ndimage -import tensorflow as tf -from spatial_transformer import transformer -import numpy as np -import matplotlib.pyplot as plt - -# %% Create a batch of three images (1600 x 1200) -# %% Image retrieved from: -# %% https://raw.githubusercontent.com/skaae/transformer_network/master/cat.jpg -im = ndimage.imread('cat.jpg') -im = im / 255. -im = im.reshape(1, 1200, 1600, 3) -im = im.astype('float32') - -# %% Let the output size of the transformer be half the image size. -out_size = (600, 800) - -# %% Simulate batch -batch = np.append(im, im, axis=0) -batch = np.append(batch, im, axis=0) -num_batch = 3 - -x = tf.placeholder(tf.float32, [None, 1200, 1600, 3]) -x = tf.cast(batch, 'float32') - -# %% Create localisation network and convolutional layer -with tf.variable_scope('spatial_transformer_0'): - - # %% Create a fully-connected layer with 6 output nodes - n_fc = 6 - W_fc1 = tf.Variable(tf.zeros([1200 * 1600 * 3, n_fc]), name='W_fc1') - - # %% Zoom into the image - initial = np.array([[0.5, 0, 0], [0, 0.5, 0]]) - initial = initial.astype('float32') - initial = initial.flatten() - - b_fc1 = tf.Variable(initial_value=initial, name='b_fc1') - h_fc1 = tf.matmul(tf.zeros([num_batch, 1200 * 1600 * 3]), W_fc1) + b_fc1 - h_trans = transformer(x, h_fc1, out_size) - -# %% Run session -sess = tf.Session() -sess.run(tf.global_variables_initializer()) -y = sess.run(h_trans, feed_dict={x: batch}) - -# plt.imshow(y[0]) diff --git a/research/transformer/spatial_transformer.py b/research/transformer/spatial_transformer.py deleted file mode 100644 index 47014fe27..000000000 --- a/research/transformer/spatial_transformer.py +++ /dev/null @@ -1,205 +0,0 @@ -# Copyright 2016 The TensorFlow Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -from six.moves import xrange -import tensorflow as tf - - -def transformer(U, theta, out_size, name='SpatialTransformer', **kwargs): - """Spatial Transformer Layer - - Implements a spatial transformer layer as described in [1]_. - Based on [2]_ and edited by David Dao for Tensorflow. - - Parameters - ---------- - U : float - The output of a convolutional net should have the - shape [num_batch, height, width, num_channels]. - theta: float - The output of the - localisation network should be [num_batch, 6]. - out_size: tuple of two ints - The size of the output of the network (height, width) - - References - ---------- - .. [1] Spatial Transformer Networks - Max Jaderberg, Karen Simonyan, Andrew Zisserman, Koray Kavukcuoglu - Submitted on 5 Jun 2015 - .. [2] https://github.com/skaae/transformer_network/blob/master/transformerlayer.py - - Notes - ----- - To initialize the network to the identity transform init - ``theta`` to : - identity = np.array([[1., 0., 0.], - [0., 1., 0.]]) - identity = identity.flatten() - theta = tf.Variable(initial_value=identity) - - """ - - def _repeat(x, n_repeats): - with tf.variable_scope('_repeat'): - rep = tf.transpose( - tf.expand_dims(tf.ones(shape=tf.stack([n_repeats, ])), 1), [1, 0]) - rep = tf.cast(rep, 'int32') - x = tf.matmul(tf.reshape(x, (-1, 1)), rep) - return tf.reshape(x, [-1]) - - def _interpolate(im, x, y, out_size): - with tf.variable_scope('_interpolate'): - # constants - num_batch = tf.shape(im)[0] - height = tf.shape(im)[1] - width = tf.shape(im)[2] - channels = tf.shape(im)[3] - - x = tf.cast(x, 'float32') - y = tf.cast(y, 'float32') - height_f = tf.cast(height, 'float32') - width_f = tf.cast(width, 'float32') - out_height = out_size[0] - out_width = out_size[1] - zero = tf.zeros([], dtype='int32') - max_y = tf.cast(tf.shape(im)[1] - 1, 'int32') - max_x = tf.cast(tf.shape(im)[2] - 1, 'int32') - - # scale indices from [-1, 1] to [0, width/height] - x = (x + 1.0)*(width_f) / 2.0 - y = (y + 1.0)*(height_f) / 2.0 - - # do sampling - x0 = tf.cast(tf.floor(x), 'int32') - x1 = x0 + 1 - y0 = tf.cast(tf.floor(y), 'int32') - y1 = y0 + 1 - - x0 = tf.clip_by_value(x0, zero, max_x) - x1 = tf.clip_by_value(x1, zero, max_x) - y0 = tf.clip_by_value(y0, zero, max_y) - y1 = tf.clip_by_value(y1, zero, max_y) - dim2 = width - dim1 = width*height - base = _repeat(tf.range(num_batch)*dim1, out_height*out_width) - base_y0 = base + y0*dim2 - base_y1 = base + y1*dim2 - idx_a = base_y0 + x0 - idx_b = base_y1 + x0 - idx_c = base_y0 + x1 - idx_d = base_y1 + x1 - - # use indices to lookup pixels in the flat image and restore - # channels dim - im_flat = tf.reshape(im, tf.stack([-1, channels])) - im_flat = tf.cast(im_flat, 'float32') - Ia = tf.gather(im_flat, idx_a) - Ib = tf.gather(im_flat, idx_b) - Ic = tf.gather(im_flat, idx_c) - Id = tf.gather(im_flat, idx_d) - - # and finally calculate interpolated values - x0_f = tf.cast(x0, 'float32') - x1_f = tf.cast(x1, 'float32') - y0_f = tf.cast(y0, 'float32') - y1_f = tf.cast(y1, 'float32') - wa = tf.expand_dims(((x1_f-x) * (y1_f-y)), 1) - wb = tf.expand_dims(((x1_f-x) * (y-y0_f)), 1) - wc = tf.expand_dims(((x-x0_f) * (y1_f-y)), 1) - wd = tf.expand_dims(((x-x0_f) * (y-y0_f)), 1) - output = tf.add_n([wa*Ia, wb*Ib, wc*Ic, wd*Id]) - return output - - def _meshgrid(height, width): - with tf.variable_scope('_meshgrid'): - # This should be equivalent to: - # x_t, y_t = np.meshgrid(np.linspace(-1, 1, width), - # np.linspace(-1, 1, height)) - # ones = np.ones(np.prod(x_t.shape)) - # grid = np.vstack([x_t.flatten(), y_t.flatten(), ones]) - x_t = tf.matmul(tf.ones(shape=tf.stack([height, 1])), - tf.transpose(tf.expand_dims(tf.linspace(-1.0, 1.0, width), 1), [1, 0])) - y_t = tf.matmul(tf.expand_dims(tf.linspace(-1.0, 1.0, height), 1), - tf.ones(shape=tf.stack([1, width]))) - - x_t_flat = tf.reshape(x_t, (1, -1)) - y_t_flat = tf.reshape(y_t, (1, -1)) - - ones = tf.ones_like(x_t_flat) - grid = tf.concat(axis=0, values=[x_t_flat, y_t_flat, ones]) - return grid - - def _transform(theta, input_dim, out_size): - with tf.variable_scope('_transform'): - num_batch = tf.shape(input_dim)[0] - height = tf.shape(input_dim)[1] - width = tf.shape(input_dim)[2] - num_channels = tf.shape(input_dim)[3] - theta = tf.reshape(theta, (-1, 2, 3)) - theta = tf.cast(theta, 'float32') - - # grid of (x_t, y_t, 1), eq (1) in ref [1] - height_f = tf.cast(height, 'float32') - width_f = tf.cast(width, 'float32') - out_height = out_size[0] - out_width = out_size[1] - grid = _meshgrid(out_height, out_width) - grid = tf.expand_dims(grid, 0) - grid = tf.reshape(grid, [-1]) - grid = tf.tile(grid, tf.stack([num_batch])) - grid = tf.reshape(grid, tf.stack([num_batch, 3, -1])) - - # Transform A x (x_t, y_t, 1)^T -> (x_s, y_s) - T_g = tf.matmul(theta, grid) - x_s = tf.slice(T_g, [0, 0, 0], [-1, 1, -1]) - y_s = tf.slice(T_g, [0, 1, 0], [-1, 1, -1]) - x_s_flat = tf.reshape(x_s, [-1]) - y_s_flat = tf.reshape(y_s, [-1]) - - input_transformed = _interpolate( - input_dim, x_s_flat, y_s_flat, - out_size) - - output = tf.reshape( - input_transformed, tf.stack([num_batch, out_height, out_width, num_channels])) - return output - - with tf.variable_scope(name): - output = _transform(theta, U, out_size) - return output - - -def batch_transformer(U, thetas, out_size, name='BatchSpatialTransformer'): - """Batch Spatial Transformer Layer - - Parameters - ---------- - - U : float - tensor of inputs [num_batch,height,width,num_channels] - thetas : float - a set of transformations for each input [num_batch,num_transforms,6] - out_size : int - the size of the output [out_height,out_width] - - Returns: float - Tensor of size [num_batch*num_transforms,out_height,out_width,num_channels] - """ - with tf.variable_scope(name): - num_batch, num_transforms = map(int, thetas.get_shape().as_list()[:2]) - indices = [[i]*num_transforms for i in xrange(num_batch)] - input_repeated = tf.gather(U, tf.reshape(indices, [-1])) - return transformer(input_repeated, thetas, out_size) diff --git a/research/transformer/tf_utils.py b/research/transformer/tf_utils.py deleted file mode 100644 index 3cdac28bc..000000000 --- a/research/transformer/tf_utils.py +++ /dev/null @@ -1,129 +0,0 @@ -# Copyright 2016 The TensorFlow Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -# %% Borrowed utils from here: https://github.com/pkmital/tensorflow_tutorials/ -import tensorflow as tf -import numpy as np - -def conv2d(x, n_filters, - k_h=5, k_w=5, - stride_h=2, stride_w=2, - stddev=0.02, - activation=lambda x: x, - bias=True, - padding='SAME', - name="Conv2D"): - """2D Convolution with options for kernel size, stride, and init deviation. - Parameters - ---------- - x : Tensor - Input tensor to convolve. - n_filters : int - Number of filters to apply. - k_h : int, optional - Kernel height. - k_w : int, optional - Kernel width. - stride_h : int, optional - Stride in rows. - stride_w : int, optional - Stride in cols. - stddev : float, optional - Initialization's standard deviation. - activation : arguments, optional - Function which applies a nonlinearity - padding : str, optional - 'SAME' or 'VALID' - name : str, optional - Variable scope to use. - Returns - ------- - x : Tensor - Convolved input. - """ - with tf.variable_scope(name): - w = tf.get_variable( - 'w', [k_h, k_w, x.get_shape()[-1], n_filters], - initializer=tf.truncated_normal_initializer(stddev=stddev)) - conv = tf.nn.conv2d( - x, w, strides=[1, stride_h, stride_w, 1], padding=padding) - if bias: - b = tf.get_variable( - 'b', [n_filters], - initializer=tf.truncated_normal_initializer(stddev=stddev)) - conv = conv + b - return conv - -def linear(x, n_units, scope=None, stddev=0.02, - activation=lambda x: x): - """Fully-connected network. - Parameters - ---------- - x : Tensor - Input tensor to the network. - n_units : int - Number of units to connect to. - scope : str, optional - Variable scope to use. - stddev : float, optional - Initialization's standard deviation. - activation : arguments, optional - Function which applies a nonlinearity - Returns - ------- - x : Tensor - Fully-connected output. - """ - shape = x.get_shape().as_list() - - with tf.variable_scope(scope or "Linear"): - matrix = tf.get_variable("Matrix", [shape[1], n_units], tf.float32, - tf.random_normal_initializer(stddev=stddev)) - return activation(tf.matmul(x, matrix)) - -# %% -def weight_variable(shape): - '''Helper function to create a weight variable initialized with - a normal distribution - Parameters - ---------- - shape : list - Size of weight variable - ''' - #initial = tf.random_normal(shape, mean=0.0, stddev=0.01) - initial = tf.zeros(shape) - return tf.Variable(initial) - -# %% -def bias_variable(shape): - '''Helper function to create a bias variable initialized with - a constant value. - Parameters - ---------- - shape : list - Size of weight variable - ''' - initial = tf.random_normal(shape, mean=0.0, stddev=0.01) - return tf.Variable(initial) - -# %% -def dense_to_one_hot(labels, n_classes=2): - """Convert class labels from scalars to one-hot vectors.""" - labels = np.array(labels) - n_labels = labels.shape[0] - index_offset = np.arange(n_labels) * n_classes - labels_one_hot = np.zeros((n_labels, n_classes), dtype=np.float32) - labels_one_hot.flat[index_offset + labels.ravel()] = 1 - return labels_one_hot diff --git a/research/video_prediction/README.md b/research/video_prediction/README.md deleted file mode 100644 index 89ea9e285..000000000 --- a/research/video_prediction/README.md +++ /dev/null @@ -1,102 +0,0 @@ -![No Maintenance Intended](https://img.shields.io/badge/No%20Maintenance%20Intended-%E2%9C%95-red.svg) -![TensorFlow Requirement: 1.x](https://img.shields.io/badge/TensorFlow%20Requirement-1.x-brightgreen) -![TensorFlow 2 Not Supported](https://img.shields.io/badge/TensorFlow%202%20Not%20Supported-%E2%9C%95-red.svg) - -# Video Prediction with Neural Advection - -*A TensorFlow implementation of the models described in [Unsupervised Learning for Physical Interaction through Video Prediction (Finn et al., 2016)](https://arxiv.org/abs/1605.07157).* - -This video prediction model, which is optionally conditioned on actions, -predicts future video by internally predicting how to transform the last -image (which may have been predicted) into the next image. As a result, it can -reuse apperance information from previous frames and can better generalize to -objects not seen in the training set. Some example predictions on novel objects -are shown below: - -![Animation](https://storage.googleapis.com/push_gens/novelgengifs9/16_70.gif) -![Animation](https://storage.googleapis.com/push_gens/novelgengifs9/2_96.gif) -![Animation](https://storage.googleapis.com/push_gens/novelgengifs9/1_38.gif) -![Animation](https://storage.googleapis.com/push_gens/novelgengifs9/11_10.gif) -![Animation](https://storage.googleapis.com/push_gens/novelgengifs9/3_34.gif) - -When the model is conditioned on actions, it changes it's predictions based on -the passed in action. Here we show the models predictions in response to varying -the magnitude of the passed in actions, from small to large: - -![Animation](https://storage.googleapis.com/push_gens/webgifs/0xact_0.gif) -![Animation](https://storage.googleapis.com/push_gens/05xact_0.gif) -![Animation](https://storage.googleapis.com/push_gens/webgifs/1xact_0.gif) -![Animation](https://storage.googleapis.com/push_gens/webgifs/15xact_0.gif) - -![Animation](https://storage.googleapis.com/push_gens/webgifs/0xact_17.gif) -![Animation](https://storage.googleapis.com/push_gens/webgifs/05xact_17.gif) -![Animation](https://storage.googleapis.com/push_gens/webgifs/1xact_17.gif) -![Animation](https://storage.googleapis.com/push_gens/webgifs/15xact_17.gif) - - -Because the model is trained with an l2 objective, it represents uncertainty as -blur. - -## Requirements -* Tensorflow (see tensorflow.org for installation instructions) -* spatial_tranformer model in tensorflow/models, for the spatial tranformer - predictor (STP). - -## Data -The data used to train this model is located -[here](https://sites.google.com/site/brainrobotdata/home/push-dataset). - -To download the robot data, run the following. -```shell -./download_data.sh -``` - -## Training the model - -To train the model, run the prediction_train.py file. -```shell -python prediction_train.py -``` - -There are several flags which can control the model that is trained, which are -exeplified below: -```shell -python prediction_train.py \ - --data_dir=push/push_train \ # path to the training set. - --model=CDNA \ # the model type to use - DNA, CDNA, or STP - --output_dir=./checkpoints \ # where to save model checkpoints - --event_log_dir=./summaries \ # where to save training statistics - --num_iterations=100000 \ # number of training iterations - --pretrained_model=model \ # path to model to initialize from, random if emtpy - --sequence_length=10 \ # the number of total frames in a sequence - --context_frames=2 \ # the number of ground truth frames to pass in at start - --use_state=1 \ # whether or not to condition on actions and the initial state - --num_masks=10 \ # the number of transformations and corresponding masks - --schedsamp_k=900.0 \ # the constant used for scheduled sampling or -1 - --train_val_split=0.95 \ # the percentage of training data for validation - --batch_size=32 \ # the training batch size - --learning_rate=0.001 \ # the initial learning rate for the Adam optimizer -``` - -If the dynamic neural advection (DNA) model is being used, the `--num_masks` -option should be set to one. - -The `--context_frames` option defines both the number of initial ground truth -frames to pass in, as well as when to start penalizing the model's predictions. - -The data directory `--data_dir` should contain tfrecord files with the format -used in the released push dataset. See -[here](https://sites.google.com/site/brainrobotdata/home/push-dataset) for -details. If the `--use_state` option is not set, then the data only needs to -contain image sequences, not states and actions. - - -## Contact - -To ask questions or report issues please open an issue on the tensorflow/models -[issues tracker](https://github.com/tensorflow/models/issues). -Please assign issues to @cbfinn. - -## Credits - -This code was written by Chelsea Finn. diff --git a/research/video_prediction/download_data.sh b/research/video_prediction/download_data.sh deleted file mode 100755 index 4928add5f..000000000 --- a/research/video_prediction/download_data.sh +++ /dev/null @@ -1,55 +0,0 @@ -#!/bin/bash -# Copyright 2016 The TensorFlow Authors All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - - -# Example: -# -# download_dataset.sh datafiles.txt ./tmp -# -# will download all of the files listed in the file, datafiles.txt, into -# a directory, "./tmp". -# -# Each line of the datafiles.txt file should contain the path from the -# bucket root to a file. - -ARGC="$#" -LISTING_FILE=push_datafiles.txt -if [ "${ARGC}" -ge 1 ]; then - LISTING_FILE=$1 -fi -OUTPUT_DIR="./" -if [ "${ARGC}" -ge 2 ]; then - OUTPUT_DIR=$2 -fi - -echo "OUTPUT_DIR=$OUTPUT_DIR" - -mkdir "${OUTPUT_DIR}" - -function download_file { - FILE=$1 - BUCKET="https://storage.googleapis.com/brain-robotics-data" - URL="${BUCKET}/${FILE}" - OUTPUT_FILE="${OUTPUT_DIR}/${FILE}" - DIRECTORY=`dirname ${OUTPUT_FILE}` - echo DIRECTORY=$DIRECTORY - mkdir -p "${DIRECTORY}" - curl --output ${OUTPUT_FILE} ${URL} -} - -while read filename; do - download_file $filename -done <${LISTING_FILE} diff --git a/research/video_prediction/lstm_ops.py b/research/video_prediction/lstm_ops.py deleted file mode 100644 index 1f8c8d97a..000000000 --- a/research/video_prediction/lstm_ops.py +++ /dev/null @@ -1,104 +0,0 @@ -# Copyright 2016 The TensorFlow Authors All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -"""Convolutional LSTM implementation.""" - -import tensorflow as tf - -from tensorflow.contrib.slim import add_arg_scope -from tensorflow.contrib.slim import layers - - -def init_state(inputs, - state_shape, - state_initializer=tf.zeros_initializer(), - dtype=tf.float32): - """Helper function to create an initial state given inputs. - - Args: - inputs: input Tensor, at least 2D, the first dimension being batch_size - state_shape: the shape of the state. - state_initializer: Initializer(shape, dtype) for state Tensor. - dtype: Optional dtype, needed when inputs is None. - Returns: - A tensors representing the initial state. - """ - if inputs is not None: - # Handle both the dynamic shape as well as the inferred shape. - inferred_batch_size = inputs.get_shape().with_rank_at_least(1)[0] - dtype = inputs.dtype - else: - inferred_batch_size = 0 - initial_state = state_initializer( - [inferred_batch_size] + state_shape, dtype=dtype) - return initial_state - - -@add_arg_scope -def basic_conv_lstm_cell(inputs, - state, - num_channels, - filter_size=5, - forget_bias=1.0, - scope=None, - reuse=None): - """Basic LSTM recurrent network cell, with 2D convolution connctions. - - We add forget_bias (default: 1) to the biases of the forget gate in order to - reduce the scale of forgetting in the beginning of the training. - - It does not allow cell clipping, a projection layer, and does not - use peep-hole connections: it is the basic baseline. - - Args: - inputs: input Tensor, 4D, batch x height x width x channels. - state: state Tensor, 4D, batch x height x width x channels. - num_channels: the number of output channels in the layer. - filter_size: the shape of the each convolution filter. - forget_bias: the initial value of the forget biases. - scope: Optional scope for variable_scope. - reuse: whether or not the layer and the variables should be reused. - - Returns: - a tuple of tensors representing output and the new state. - """ - spatial_size = inputs.get_shape()[1:3] - if state is None: - state = init_state(inputs, list(spatial_size) + [2 * num_channels]) - with tf.variable_scope(scope, - 'BasicConvLstmCell', - [inputs, state], - reuse=reuse): - inputs.get_shape().assert_has_rank(4) - state.get_shape().assert_has_rank(4) - c, h = tf.split(axis=3, num_or_size_splits=2, value=state) - inputs_h = tf.concat(axis=3, values=[inputs, h]) - # Parameters of gates are concatenated into one conv for efficiency. - i_j_f_o = layers.conv2d(inputs_h, - 4 * num_channels, [filter_size, filter_size], - stride=1, - activation_fn=None, - scope='Gates') - - # i = input_gate, j = new_input, f = forget_gate, o = output_gate - i, j, f, o = tf.split(axis=3, num_or_size_splits=4, value=i_j_f_o) - - new_c = c * tf.sigmoid(f + forget_bias) + tf.sigmoid(i) * tf.tanh(j) - new_h = tf.tanh(new_c) * tf.sigmoid(o) - - return new_h, tf.concat(axis=3, values=[new_c, new_h]) - - - diff --git a/research/video_prediction/prediction_input.py b/research/video_prediction/prediction_input.py deleted file mode 100644 index e35b9daed..000000000 --- a/research/video_prediction/prediction_input.py +++ /dev/null @@ -1,119 +0,0 @@ -# Copyright 2016 The TensorFlow Authors All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -"""Code for building the input for the prediction model.""" - -import os - -import numpy as np -import tensorflow as tf - -from tensorflow.python.platform import flags -from tensorflow.python.platform import gfile - - -FLAGS = flags.FLAGS - -# Original image dimensions -ORIGINAL_WIDTH = 640 -ORIGINAL_HEIGHT = 512 -COLOR_CHAN = 3 - -# Default image dimensions. -IMG_WIDTH = 64 -IMG_HEIGHT = 64 - -# Dimension of the state and action. -STATE_DIM = 5 - - -def build_tfrecord_input(training=True): - """Create input tfrecord tensors. - - Args: - training: training or validation data. - Returns: - list of tensors corresponding to images, actions, and states. The images - tensor is 5D, batch x time x height x width x channels. The state and - action tensors are 3D, batch x time x dimension. - Raises: - RuntimeError: if no files found. - """ - filenames = gfile.Glob(os.path.join(FLAGS.data_dir, '*')) - if not filenames: - raise RuntimeError('No data files found.') - index = int(np.floor(FLAGS.train_val_split * len(filenames))) - if training: - filenames = filenames[:index] - else: - filenames = filenames[index:] - filename_queue = tf.train.string_input_producer(filenames, shuffle=True) - reader = tf.TFRecordReader() - _, serialized_example = reader.read(filename_queue) - - image_seq, state_seq, action_seq = [], [], [] - - for i in range(FLAGS.sequence_length): - image_name = 'move/' + str(i) + '/image/encoded' - action_name = 'move/' + str(i) + '/commanded_pose/vec_pitch_yaw' - state_name = 'move/' + str(i) + '/endeffector/vec_pitch_yaw' - if FLAGS.use_state: - features = {image_name: tf.FixedLenFeature([1], tf.string), - action_name: tf.FixedLenFeature([STATE_DIM], tf.float32), - state_name: tf.FixedLenFeature([STATE_DIM], tf.float32)} - else: - features = {image_name: tf.FixedLenFeature([1], tf.string)} - features = tf.parse_single_example(serialized_example, features=features) - - image_buffer = tf.reshape(features[image_name], shape=[]) - image = tf.image.decode_jpeg(image_buffer, channels=COLOR_CHAN) - image.set_shape([ORIGINAL_HEIGHT, ORIGINAL_WIDTH, COLOR_CHAN]) - - if IMG_HEIGHT != IMG_WIDTH: - raise ValueError('Unequal height and width unsupported') - - crop_size = min(ORIGINAL_HEIGHT, ORIGINAL_WIDTH) - image = tf.image.resize_image_with_crop_or_pad(image, crop_size, crop_size) - image = tf.reshape(image, [1, crop_size, crop_size, COLOR_CHAN]) - image = tf.image.resize_bicubic(image, [IMG_HEIGHT, IMG_WIDTH]) - image = tf.cast(image, tf.float32) / 255.0 - image_seq.append(image) - - if FLAGS.use_state: - state = tf.reshape(features[state_name], shape=[1, STATE_DIM]) - state_seq.append(state) - action = tf.reshape(features[action_name], shape=[1, STATE_DIM]) - action_seq.append(action) - - image_seq = tf.concat(axis=0, values=image_seq) - - if FLAGS.use_state: - state_seq = tf.concat(axis=0, values=state_seq) - action_seq = tf.concat(axis=0, values=action_seq) - [image_batch, action_batch, state_batch] = tf.train.batch( - [image_seq, action_seq, state_seq], - FLAGS.batch_size, - num_threads=FLAGS.batch_size, - capacity=100 * FLAGS.batch_size) - return image_batch, action_batch, state_batch - else: - image_batch = tf.train.batch( - [image_seq], - FLAGS.batch_size, - num_threads=FLAGS.batch_size, - capacity=100 * FLAGS.batch_size) - zeros_batch = tf.zeros([FLAGS.batch_size, FLAGS.sequence_length, STATE_DIM]) - return image_batch, zeros_batch, zeros_batch - diff --git a/research/video_prediction/prediction_model.py b/research/video_prediction/prediction_model.py deleted file mode 100644 index ebdc15d7c..000000000 --- a/research/video_prediction/prediction_model.py +++ /dev/null @@ -1,350 +0,0 @@ -# Copyright 2016 The TensorFlow Authors All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -"""Model architecture for predictive model, including CDNA, DNA, and STP.""" - -import numpy as np -import tensorflow as tf - -import tensorflow.contrib.slim as slim -from tensorflow.contrib.layers.python import layers as tf_layers -from lstm_ops import basic_conv_lstm_cell - -# Amount to use when lower bounding tensors -RELU_SHIFT = 1e-12 - -# kernel size for DNA and CDNA. -DNA_KERN_SIZE = 5 - - -def construct_model(images, - actions=None, - states=None, - iter_num=-1.0, - k=-1, - use_state=True, - num_masks=10, - stp=False, - cdna=True, - dna=False, - context_frames=2): - """Build convolutional lstm video predictor using STP, CDNA, or DNA. - - Args: - images: tensor of ground truth image sequences - actions: tensor of action sequences - states: tensor of ground truth state sequences - iter_num: tensor of the current training iteration (for sched. sampling) - k: constant used for scheduled sampling. -1 to feed in own prediction. - use_state: True to include state and action in prediction - num_masks: the number of different pixel motion predictions (and - the number of masks for each of those predictions) - stp: True to use Spatial Transformer Predictor (STP) - cdna: True to use Convoluational Dynamic Neural Advection (CDNA) - dna: True to use Dynamic Neural Advection (DNA) - context_frames: number of ground truth frames to pass in before - feeding in own predictions - Returns: - gen_images: predicted future image frames - gen_states: predicted future states - - Raises: - ValueError: if more than one network option specified or more than 1 mask - specified for DNA model. - """ - if stp + cdna + dna != 1: - raise ValueError('More than one, or no network option specified.') - batch_size, img_height, img_width, color_channels = images[0].get_shape()[0:4] - lstm_func = basic_conv_lstm_cell - - # Generated robot states and images. - gen_states, gen_images = [], [] - current_state = states[0] - - if k == -1: - feedself = True - else: - # Scheduled sampling: - # Calculate number of ground-truth frames to pass in. - num_ground_truth = tf.to_int32( - tf.round(tf.to_float(batch_size) * (k / (k + tf.exp(iter_num / k))))) - feedself = False - - # LSTM state sizes and states. - lstm_size = np.int32(np.array([32, 32, 64, 64, 128, 64, 32])) - lstm_state1, lstm_state2, lstm_state3, lstm_state4 = None, None, None, None - lstm_state5, lstm_state6, lstm_state7 = None, None, None - - for image, action in zip(images[:-1], actions[:-1]): - # Reuse variables after the first timestep. - reuse = bool(gen_images) - - done_warm_start = len(gen_images) > context_frames - 1 - with slim.arg_scope( - [lstm_func, slim.layers.conv2d, slim.layers.fully_connected, - tf_layers.layer_norm, slim.layers.conv2d_transpose], - reuse=reuse): - - if feedself and done_warm_start: - # Feed in generated image. - prev_image = gen_images[-1] - elif done_warm_start: - # Scheduled sampling - prev_image = scheduled_sample(image, gen_images[-1], batch_size, - num_ground_truth) - else: - # Always feed in ground_truth - prev_image = image - - # Predicted state is always fed back in - state_action = tf.concat(axis=1, values=[action, current_state]) - - enc0 = slim.layers.conv2d( - prev_image, - 32, [5, 5], - stride=2, - scope='scale1_conv1', - normalizer_fn=tf_layers.layer_norm, - normalizer_params={'scope': 'layer_norm1'}) - - hidden1, lstm_state1 = lstm_func( - enc0, lstm_state1, lstm_size[0], scope='state1') - hidden1 = tf_layers.layer_norm(hidden1, scope='layer_norm2') - hidden2, lstm_state2 = lstm_func( - hidden1, lstm_state2, lstm_size[1], scope='state2') - hidden2 = tf_layers.layer_norm(hidden2, scope='layer_norm3') - enc1 = slim.layers.conv2d( - hidden2, hidden2.get_shape()[3], [3, 3], stride=2, scope='conv2') - - hidden3, lstm_state3 = lstm_func( - enc1, lstm_state3, lstm_size[2], scope='state3') - hidden3 = tf_layers.layer_norm(hidden3, scope='layer_norm4') - hidden4, lstm_state4 = lstm_func( - hidden3, lstm_state4, lstm_size[3], scope='state4') - hidden4 = tf_layers.layer_norm(hidden4, scope='layer_norm5') - enc2 = slim.layers.conv2d( - hidden4, hidden4.get_shape()[3], [3, 3], stride=2, scope='conv3') - - # Pass in state and action. - smear = tf.reshape( - state_action, - [int(batch_size), 1, 1, int(state_action.get_shape()[1])]) - smear = tf.tile( - smear, [1, int(enc2.get_shape()[1]), int(enc2.get_shape()[2]), 1]) - if use_state: - enc2 = tf.concat(axis=3, values=[enc2, smear]) - enc3 = slim.layers.conv2d( - enc2, hidden4.get_shape()[3], [1, 1], stride=1, scope='conv4') - - hidden5, lstm_state5 = lstm_func( - enc3, lstm_state5, lstm_size[4], scope='state5') # last 8x8 - hidden5 = tf_layers.layer_norm(hidden5, scope='layer_norm6') - enc4 = slim.layers.conv2d_transpose( - hidden5, hidden5.get_shape()[3], 3, stride=2, scope='convt1') - - hidden6, lstm_state6 = lstm_func( - enc4, lstm_state6, lstm_size[5], scope='state6') # 16x16 - hidden6 = tf_layers.layer_norm(hidden6, scope='layer_norm7') - # Skip connection. - hidden6 = tf.concat(axis=3, values=[hidden6, enc1]) # both 16x16 - - enc5 = slim.layers.conv2d_transpose( - hidden6, hidden6.get_shape()[3], 3, stride=2, scope='convt2') - hidden7, lstm_state7 = lstm_func( - enc5, lstm_state7, lstm_size[6], scope='state7') # 32x32 - hidden7 = tf_layers.layer_norm(hidden7, scope='layer_norm8') - - # Skip connection. - hidden7 = tf.concat(axis=3, values=[hidden7, enc0]) # both 32x32 - - enc6 = slim.layers.conv2d_transpose( - hidden7, - hidden7.get_shape()[3], 3, stride=2, scope='convt3', - normalizer_fn=tf_layers.layer_norm, - normalizer_params={'scope': 'layer_norm9'}) - - if dna: - # Using largest hidden state for predicting untied conv kernels. - enc7 = slim.layers.conv2d_transpose( - enc6, DNA_KERN_SIZE**2, 1, stride=1, scope='convt4') - else: - # Using largest hidden state for predicting a new image layer. - enc7 = slim.layers.conv2d_transpose( - enc6, color_channels, 1, stride=1, scope='convt4') - # This allows the network to also generate one image from scratch, - # which is useful when regions of the image become unoccluded. - transformed = [tf.nn.sigmoid(enc7)] - - if stp: - stp_input0 = tf.reshape(hidden5, [int(batch_size), -1]) - stp_input1 = slim.layers.fully_connected( - stp_input0, 100, scope='fc_stp') - transformed += stp_transformation(prev_image, stp_input1, num_masks) - elif cdna: - cdna_input = tf.reshape(hidden5, [int(batch_size), -1]) - transformed += cdna_transformation(prev_image, cdna_input, num_masks, - int(color_channels)) - elif dna: - # Only one mask is supported (more should be unnecessary). - if num_masks != 1: - raise ValueError('Only one mask is supported for DNA model.') - transformed = [dna_transformation(prev_image, enc7)] - - masks = slim.layers.conv2d_transpose( - enc6, num_masks + 1, 1, stride=1, scope='convt7') - masks = tf.reshape( - tf.nn.softmax(tf.reshape(masks, [-1, num_masks + 1])), - [int(batch_size), int(img_height), int(img_width), num_masks + 1]) - mask_list = tf.split(axis=3, num_or_size_splits=num_masks + 1, value=masks) - output = mask_list[0] * prev_image - for layer, mask in zip(transformed, mask_list[1:]): - output += layer * mask - gen_images.append(output) - - current_state = slim.layers.fully_connected( - state_action, - int(current_state.get_shape()[1]), - scope='state_pred', - activation_fn=None) - gen_states.append(current_state) - - return gen_images, gen_states - - -## Utility functions -def stp_transformation(prev_image, stp_input, num_masks): - """Apply spatial transformer predictor (STP) to previous image. - - Args: - prev_image: previous image to be transformed. - stp_input: hidden layer to be used for computing STN parameters. - num_masks: number of masks and hence the number of STP transformations. - Returns: - List of images transformed by the predicted STP parameters. - """ - # Only import spatial transformer if needed. - from spatial_transformer import transformer - - identity_params = tf.convert_to_tensor( - np.array([1.0, 0.0, 0.0, 0.0, 1.0, 0.0], np.float32)) - transformed = [] - for i in range(num_masks - 1): - params = slim.layers.fully_connected( - stp_input, 6, scope='stp_params' + str(i), - activation_fn=None) + identity_params - transformed.append(transformer(prev_image, params)) - - return transformed - - -def cdna_transformation(prev_image, cdna_input, num_masks, color_channels): - """Apply convolutional dynamic neural advection to previous image. - - Args: - prev_image: previous image to be transformed. - cdna_input: hidden lyaer to be used for computing CDNA kernels. - num_masks: the number of masks and hence the number of CDNA transformations. - color_channels: the number of color channels in the images. - Returns: - List of images transformed by the predicted CDNA kernels. - """ - batch_size = int(cdna_input.get_shape()[0]) - height = int(prev_image.get_shape()[1]) - width = int(prev_image.get_shape()[2]) - - # Predict kernels using linear function of last hidden layer. - cdna_kerns = slim.layers.fully_connected( - cdna_input, - DNA_KERN_SIZE * DNA_KERN_SIZE * num_masks, - scope='cdna_params', - activation_fn=None) - - # Reshape and normalize. - cdna_kerns = tf.reshape( - cdna_kerns, [batch_size, DNA_KERN_SIZE, DNA_KERN_SIZE, 1, num_masks]) - cdna_kerns = tf.nn.relu(cdna_kerns - RELU_SHIFT) + RELU_SHIFT - norm_factor = tf.reduce_sum(cdna_kerns, [1, 2, 3], keep_dims=True) - cdna_kerns /= norm_factor - - # Treat the color channel dimension as the batch dimension since the same - # transformation is applied to each color channel. - # Treat the batch dimension as the channel dimension so that - # depthwise_conv2d can apply a different transformation to each sample. - cdna_kerns = tf.transpose(cdna_kerns, [1, 2, 0, 4, 3]) - cdna_kerns = tf.reshape(cdna_kerns, [DNA_KERN_SIZE, DNA_KERN_SIZE, batch_size, num_masks]) - # Swap the batch and channel dimensions. - prev_image = tf.transpose(prev_image, [3, 1, 2, 0]) - - # Transform image. - transformed = tf.nn.depthwise_conv2d(prev_image, cdna_kerns, [1, 1, 1, 1], 'SAME') - - # Transpose the dimensions to where they belong. - transformed = tf.reshape(transformed, [color_channels, height, width, batch_size, num_masks]) - transformed = tf.transpose(transformed, [3, 1, 2, 0, 4]) - transformed = tf.unstack(transformed, axis=-1) - return transformed - - -def dna_transformation(prev_image, dna_input): - """Apply dynamic neural advection to previous image. - - Args: - prev_image: previous image to be transformed. - dna_input: hidden lyaer to be used for computing DNA transformation. - Returns: - List of images transformed by the predicted CDNA kernels. - """ - # Construct translated images. - prev_image_pad = tf.pad(prev_image, [[0, 0], [2, 2], [2, 2], [0, 0]]) - image_height = int(prev_image.get_shape()[1]) - image_width = int(prev_image.get_shape()[2]) - - inputs = [] - for xkern in range(DNA_KERN_SIZE): - for ykern in range(DNA_KERN_SIZE): - inputs.append( - tf.expand_dims( - tf.slice(prev_image_pad, [0, xkern, ykern, 0], - [-1, image_height, image_width, -1]), [3])) - inputs = tf.concat(axis=3, values=inputs) - - # Normalize channels to 1. - kernel = tf.nn.relu(dna_input - RELU_SHIFT) + RELU_SHIFT - kernel = tf.expand_dims( - kernel / tf.reduce_sum( - kernel, [3], keep_dims=True), [4]) - return tf.reduce_sum(kernel * inputs, [3], keep_dims=False) - - -def scheduled_sample(ground_truth_x, generated_x, batch_size, num_ground_truth): - """Sample batch with specified mix of ground truth and generated data points. - - Args: - ground_truth_x: tensor of ground-truth data points. - generated_x: tensor of generated data points. - batch_size: batch size - num_ground_truth: number of ground-truth examples to include in batch. - Returns: - New batch with num_ground_truth sampled from ground_truth_x and the rest - from generated_x. - """ - idx = tf.random_shuffle(tf.range(int(batch_size))) - ground_truth_idx = tf.gather(idx, tf.range(num_ground_truth)) - generated_idx = tf.gather(idx, tf.range(num_ground_truth, int(batch_size))) - - ground_truth_examps = tf.gather(ground_truth_x, ground_truth_idx) - generated_examps = tf.gather(generated_x, generated_idx) - return tf.dynamic_stitch([ground_truth_idx, generated_idx], - [ground_truth_examps, generated_examps]) diff --git a/research/video_prediction/prediction_train.py b/research/video_prediction/prediction_train.py deleted file mode 100644 index dfc7ab6c2..000000000 --- a/research/video_prediction/prediction_train.py +++ /dev/null @@ -1,255 +0,0 @@ -# Copyright 2016 The TensorFlow Authors All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -"""Code for training the prediction model.""" - -import numpy as np -import tensorflow as tf - -from tensorflow.python.platform import app -from tensorflow.python.platform import flags - -from prediction_input import build_tfrecord_input -from prediction_model import construct_model - -# How often to record tensorboard summaries. -SUMMARY_INTERVAL = 40 - -# How often to run a batch through the validation model. -VAL_INTERVAL = 200 - -# How often to save a model checkpoint -SAVE_INTERVAL = 2000 - -# EPSILON to avoid NAN -EPSILON = 1e-9 - -# tf record data location: -DATA_DIR = 'push/push_train' - -# local output directory -OUT_DIR = '/tmp/data' - -FLAGS = flags.FLAGS - -flags.DEFINE_string('data_dir', DATA_DIR, 'directory containing data.') -flags.DEFINE_string('output_dir', OUT_DIR, 'directory for model checkpoints.') -flags.DEFINE_string('event_log_dir', OUT_DIR, 'directory for writing summary.') -flags.DEFINE_integer('num_iterations', 100000, 'number of training iterations.') -flags.DEFINE_string('pretrained_model', '', - 'filepath of a pretrained model to initialize from.') - -flags.DEFINE_integer('sequence_length', 10, - 'sequence length, including context frames.') -flags.DEFINE_integer('context_frames', 2, '# of frames before predictions.') -flags.DEFINE_integer('use_state', 1, - 'Whether or not to give the state+action to the model') - -flags.DEFINE_string('model', 'CDNA', - 'model architecture to use - CDNA, DNA, or STP') - -flags.DEFINE_integer('num_masks', 10, - 'number of masks, usually 1 for DNA, 10 for CDNA, STP.') -flags.DEFINE_float('schedsamp_k', 900.0, - 'The k hyperparameter for scheduled sampling,' - '-1 for no scheduled sampling.') -flags.DEFINE_float('train_val_split', 0.95, - 'The percentage of files to use for the training set,' - ' vs. the validation set.') - -flags.DEFINE_integer('batch_size', 32, 'batch size for training') -flags.DEFINE_float('learning_rate', 0.001, - 'the base learning rate of the generator') - - -## Helper functions -def peak_signal_to_noise_ratio(true, pred): - """Image quality metric based on maximal signal power vs. power of the noise. - - Args: - true: the ground truth image. - pred: the predicted image. - Returns: - peak signal to noise ratio (PSNR) - """ - return 10.0 * (- tf.log(tf.maximum(mean_squared_error(true, pred), EPSILON))) / tf.log(10.0) - - -def mean_squared_error(true, pred): - """L2 distance between tensors true and pred. - - Args: - true: the ground truth image. - pred: the predicted image. - Returns: - mean squared error between ground truth and predicted image. - """ - return tf.reduce_sum(tf.square(true - pred)) / tf.to_float(tf.size(pred)) - - -class Model(object): - - def __init__(self, - images=None, - actions=None, - states=None, - sequence_length=None, - reuse_scope=None, - prefix=None): - - if sequence_length is None: - sequence_length = FLAGS.sequence_length - - if prefix is None: - prefix = tf.placeholder(tf.string, []) - self.prefix = prefix - self.iter_num = tf.placeholder(tf.float32, []) - summaries = [] - - # Split into timesteps. - actions = tf.split(axis=1, num_or_size_splits=int(actions.get_shape()[1]), value=actions) - actions = [tf.squeeze(act) for act in actions] - states = tf.split(axis=1, num_or_size_splits=int(states.get_shape()[1]), value=states) - states = [tf.squeeze(st) for st in states] - images = tf.split(axis=1, num_or_size_splits=int(images.get_shape()[1]), value=images) - images = [tf.squeeze(img) for img in images] - - if reuse_scope is None: - gen_images, gen_states = construct_model( - images, - actions, - states, - iter_num=self.iter_num, - k=FLAGS.schedsamp_k, - use_state=FLAGS.use_state, - num_masks=FLAGS.num_masks, - cdna=FLAGS.model == 'CDNA', - dna=FLAGS.model == 'DNA', - stp=FLAGS.model == 'STP', - context_frames=FLAGS.context_frames) - else: # If it's a validation or test model. - with tf.variable_scope(reuse_scope, reuse=True): - gen_images, gen_states = construct_model( - images, - actions, - states, - iter_num=self.iter_num, - k=FLAGS.schedsamp_k, - use_state=FLAGS.use_state, - num_masks=FLAGS.num_masks, - cdna=FLAGS.model == 'CDNA', - dna=FLAGS.model == 'DNA', - stp=FLAGS.model == 'STP', - context_frames=FLAGS.context_frames) - - # L2 loss, PSNR for eval. - loss, psnr_all = 0.0, 0.0 - for i, x, gx in zip( - range(len(gen_images)), images[FLAGS.context_frames:], - gen_images[FLAGS.context_frames - 1:]): - recon_cost = mean_squared_error(x, gx) - psnr_i = peak_signal_to_noise_ratio(x, gx) - psnr_all += psnr_i - summaries.append( - tf.summary.scalar(prefix + '_recon_cost' + str(i), recon_cost)) - summaries.append(tf.summary.scalar(prefix + '_psnr' + str(i), psnr_i)) - loss += recon_cost - - for i, state, gen_state in zip( - range(len(gen_states)), states[FLAGS.context_frames:], - gen_states[FLAGS.context_frames - 1:]): - state_cost = mean_squared_error(state, gen_state) * 1e-4 - summaries.append( - tf.summary.scalar(prefix + '_state_cost' + str(i), state_cost)) - loss += state_cost - summaries.append(tf.summary.scalar(prefix + '_psnr_all', psnr_all)) - self.psnr_all = psnr_all - - self.loss = loss = loss / np.float32(len(images) - FLAGS.context_frames) - - summaries.append(tf.summary.scalar(prefix + '_loss', loss)) - - self.lr = tf.placeholder_with_default(FLAGS.learning_rate, ()) - - self.train_op = tf.train.AdamOptimizer(self.lr).minimize(loss) - self.summ_op = tf.summary.merge(summaries) - - -def main(unused_argv): - - print('Constructing models and inputs.') - with tf.variable_scope('model', reuse=None) as training_scope: - images, actions, states = build_tfrecord_input(training=True) - model = Model(images, actions, states, FLAGS.sequence_length, - prefix='train') - - with tf.variable_scope('val_model', reuse=None): - val_images, val_actions, val_states = build_tfrecord_input(training=False) - val_model = Model(val_images, val_actions, val_states, - FLAGS.sequence_length, training_scope, prefix='val') - - print('Constructing saver.') - # Make saver. - saver = tf.train.Saver( - tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES), max_to_keep=0) - - # Make training session. - sess = tf.InteractiveSession() - sess.run(tf.global_variables_initializer()) - - summary_writer = tf.summary.FileWriter( - FLAGS.event_log_dir, graph=sess.graph, flush_secs=10) - - if FLAGS.pretrained_model: - saver.restore(sess, FLAGS.pretrained_model) - - tf.train.start_queue_runners(sess) - - tf.logging.info('iteration number, cost') - - # Run training. - for itr in range(FLAGS.num_iterations): - # Generate new batch of data. - feed_dict = {model.iter_num: np.float32(itr), - model.lr: FLAGS.learning_rate} - cost, _, summary_str = sess.run([model.loss, model.train_op, model.summ_op], - feed_dict) - - # Print info: iteration #, cost. - tf.logging.info(str(itr) + ' ' + str(cost)) - - if (itr) % VAL_INTERVAL == 2: - # Run through validation set. - feed_dict = {val_model.lr: 0.0, - val_model.iter_num: np.float32(itr)} - _, val_summary_str = sess.run([val_model.train_op, val_model.summ_op], - feed_dict) - summary_writer.add_summary(val_summary_str, itr) - - if (itr) % SAVE_INTERVAL == 2: - tf.logging.info('Saving model.') - saver.save(sess, FLAGS.output_dir + '/model' + str(itr)) - - if (itr) % SUMMARY_INTERVAL: - summary_writer.add_summary(summary_str, itr) - - tf.logging.info('Saving model.') - saver.save(sess, FLAGS.output_dir + '/model') - tf.logging.info('Training complete') - tf.logging.flush() - - -if __name__ == '__main__': - app.run() diff --git a/research/video_prediction/push_datafiles.txt b/research/video_prediction/push_datafiles.txt deleted file mode 100644 index 48da04633..000000000 --- a/research/video_prediction/push_datafiles.txt +++ /dev/null @@ -1,274 +0,0 @@ -push/push_testnovel/push_testnovel.tfrecord-00000-of-00005 -push/push_testnovel/push_testnovel.tfrecord-00001-of-00005 -push/push_testnovel/push_testnovel.tfrecord-00002-of-00005 -push/push_testnovel/push_testnovel.tfrecord-00003-of-00005 -push/push_testnovel/push_testnovel.tfrecord-00004-of-00005 -push/push_testseen/push_testseen.tfrecord-00000-of-00005 -push/push_testseen/push_testseen.tfrecord-00001-of-00005 -push/push_testseen/push_testseen.tfrecord-00002-of-00005 -push/push_testseen/push_testseen.tfrecord-00003-of-00005 -push/push_testseen/push_testseen.tfrecord-00004-of-00005 -push/push_train/push_train.tfrecord-00000-of-00264 -push/push_train/push_train.tfrecord-00001-of-00264 -push/push_train/push_train.tfrecord-00002-of-00264 -push/push_train/push_train.tfrecord-00003-of-00264 -push/push_train/push_train.tfrecord-00004-of-00264 -push/push_train/push_train.tfrecord-00005-of-00264 -push/push_train/push_train.tfrecord-00006-of-00264 -push/push_train/push_train.tfrecord-00007-of-00264 -push/push_train/push_train.tfrecord-00008-of-00264 -push/push_train/push_train.tfrecord-00009-of-00264 -push/push_train/push_train.tfrecord-00010-of-00264 -push/push_train/push_train.tfrecord-00011-of-00264 -push/push_train/push_train.tfrecord-00012-of-00264 -push/push_train/push_train.tfrecord-00013-of-00264 -push/push_train/push_train.tfrecord-00014-of-00264 -push/push_train/push_train.tfrecord-00015-of-00264 -push/push_train/push_train.tfrecord-00016-of-00264 -push/push_train/push_train.tfrecord-00017-of-00264 -push/push_train/push_train.tfrecord-00018-of-00264 -push/push_train/push_train.tfrecord-00019-of-00264 -push/push_train/push_train.tfrecord-00020-of-00264 -push/push_train/push_train.tfrecord-00021-of-00264 -push/push_train/push_train.tfrecord-00022-of-00264 -push/push_train/push_train.tfrecord-00023-of-00264 -push/push_train/push_train.tfrecord-00024-of-00264 -push/push_train/push_train.tfrecord-00025-of-00264 -push/push_train/push_train.tfrecord-00026-of-00264 -push/push_train/push_train.tfrecord-00027-of-00264 -push/push_train/push_train.tfrecord-00028-of-00264 -push/push_train/push_train.tfrecord-00029-of-00264 -push/push_train/push_train.tfrecord-00030-of-00264 -push/push_train/push_train.tfrecord-00031-of-00264 -push/push_train/push_train.tfrecord-00032-of-00264 -push/push_train/push_train.tfrecord-00033-of-00264 -push/push_train/push_train.tfrecord-00034-of-00264 -push/push_train/push_train.tfrecord-00035-of-00264 -push/push_train/push_train.tfrecord-00036-of-00264 -push/push_train/push_train.tfrecord-00037-of-00264 -push/push_train/push_train.tfrecord-00038-of-00264 -push/push_train/push_train.tfrecord-00039-of-00264 -push/push_train/push_train.tfrecord-00040-of-00264 -push/push_train/push_train.tfrecord-00041-of-00264 -push/push_train/push_train.tfrecord-00042-of-00264 -push/push_train/push_train.tfrecord-00043-of-00264 -push/push_train/push_train.tfrecord-00044-of-00264 -push/push_train/push_train.tfrecord-00045-of-00264 -push/push_train/push_train.tfrecord-00046-of-00264 -push/push_train/push_train.tfrecord-00047-of-00264 -push/push_train/push_train.tfrecord-00048-of-00264 -push/push_train/push_train.tfrecord-00049-of-00264 -push/push_train/push_train.tfrecord-00050-of-00264 -push/push_train/push_train.tfrecord-00051-of-00264 -push/push_train/push_train.tfrecord-00052-of-00264 -push/push_train/push_train.tfrecord-00053-of-00264 -push/push_train/push_train.tfrecord-00054-of-00264 -push/push_train/push_train.tfrecord-00055-of-00264 -push/push_train/push_train.tfrecord-00056-of-00264 -push/push_train/push_train.tfrecord-00057-of-00264 -push/push_train/push_train.tfrecord-00058-of-00264 -push/push_train/push_train.tfrecord-00059-of-00264 -push/push_train/push_train.tfrecord-00060-of-00264 -push/push_train/push_train.tfrecord-00061-of-00264 -push/push_train/push_train.tfrecord-00062-of-00264 -push/push_train/push_train.tfrecord-00063-of-00264 -push/push_train/push_train.tfrecord-00064-of-00264 -push/push_train/push_train.tfrecord-00065-of-00264 -push/push_train/push_train.tfrecord-00066-of-00264 -push/push_train/push_train.tfrecord-00067-of-00264 -push/push_train/push_train.tfrecord-00068-of-00264 -push/push_train/push_train.tfrecord-00069-of-00264 -push/push_train/push_train.tfrecord-00070-of-00264 -push/push_train/push_train.tfrecord-00071-of-00264 -push/push_train/push_train.tfrecord-00072-of-00264 -push/push_train/push_train.tfrecord-00073-of-00264 -push/push_train/push_train.tfrecord-00074-of-00264 -push/push_train/push_train.tfrecord-00075-of-00264 -push/push_train/push_train.tfrecord-00076-of-00264 -push/push_train/push_train.tfrecord-00077-of-00264 -push/push_train/push_train.tfrecord-00078-of-00264 -push/push_train/push_train.tfrecord-00079-of-00264 -push/push_train/push_train.tfrecord-00080-of-00264 -push/push_train/push_train.tfrecord-00081-of-00264 -push/push_train/push_train.tfrecord-00082-of-00264 -push/push_train/push_train.tfrecord-00083-of-00264 -push/push_train/push_train.tfrecord-00084-of-00264 -push/push_train/push_train.tfrecord-00085-of-00264 -push/push_train/push_train.tfrecord-00086-of-00264 -push/push_train/push_train.tfrecord-00087-of-00264 -push/push_train/push_train.tfrecord-00088-of-00264 -push/push_train/push_train.tfrecord-00089-of-00264 -push/push_train/push_train.tfrecord-00090-of-00264 -push/push_train/push_train.tfrecord-00091-of-00264 -push/push_train/push_train.tfrecord-00092-of-00264 -push/push_train/push_train.tfrecord-00093-of-00264 -push/push_train/push_train.tfrecord-00094-of-00264 -push/push_train/push_train.tfrecord-00095-of-00264 -push/push_train/push_train.tfrecord-00096-of-00264 -push/push_train/push_train.tfrecord-00097-of-00264 -push/push_train/push_train.tfrecord-00098-of-00264 -push/push_train/push_train.tfrecord-00099-of-00264 -push/push_train/push_train.tfrecord-00100-of-00264 -push/push_train/push_train.tfrecord-00101-of-00264 -push/push_train/push_train.tfrecord-00102-of-00264 -push/push_train/push_train.tfrecord-00103-of-00264 -push/push_train/push_train.tfrecord-00104-of-00264 -push/push_train/push_train.tfrecord-00105-of-00264 -push/push_train/push_train.tfrecord-00106-of-00264 -push/push_train/push_train.tfrecord-00107-of-00264 -push/push_train/push_train.tfrecord-00108-of-00264 -push/push_train/push_train.tfrecord-00109-of-00264 -push/push_train/push_train.tfrecord-00110-of-00264 -push/push_train/push_train.tfrecord-00111-of-00264 -push/push_train/push_train.tfrecord-00112-of-00264 -push/push_train/push_train.tfrecord-00113-of-00264 -push/push_train/push_train.tfrecord-00114-of-00264 -push/push_train/push_train.tfrecord-00115-of-00264 -push/push_train/push_train.tfrecord-00116-of-00264 -push/push_train/push_train.tfrecord-00117-of-00264 -push/push_train/push_train.tfrecord-00118-of-00264 -push/push_train/push_train.tfrecord-00119-of-00264 -push/push_train/push_train.tfrecord-00120-of-00264 -push/push_train/push_train.tfrecord-00121-of-00264 -push/push_train/push_train.tfrecord-00122-of-00264 -push/push_train/push_train.tfrecord-00123-of-00264 -push/push_train/push_train.tfrecord-00124-of-00264 -push/push_train/push_train.tfrecord-00125-of-00264 -push/push_train/push_train.tfrecord-00126-of-00264 -push/push_train/push_train.tfrecord-00127-of-00264 -push/push_train/push_train.tfrecord-00128-of-00264 -push/push_train/push_train.tfrecord-00129-of-00264 -push/push_train/push_train.tfrecord-00130-of-00264 -push/push_train/push_train.tfrecord-00131-of-00264 -push/push_train/push_train.tfrecord-00132-of-00264 -push/push_train/push_train.tfrecord-00133-of-00264 -push/push_train/push_train.tfrecord-00134-of-00264 -push/push_train/push_train.tfrecord-00135-of-00264 -push/push_train/push_train.tfrecord-00136-of-00264 -push/push_train/push_train.tfrecord-00137-of-00264 -push/push_train/push_train.tfrecord-00138-of-00264 -push/push_train/push_train.tfrecord-00139-of-00264 -push/push_train/push_train.tfrecord-00140-of-00264 -push/push_train/push_train.tfrecord-00141-of-00264 -push/push_train/push_train.tfrecord-00142-of-00264 -push/push_train/push_train.tfrecord-00143-of-00264 -push/push_train/push_train.tfrecord-00144-of-00264 -push/push_train/push_train.tfrecord-00145-of-00264 -push/push_train/push_train.tfrecord-00146-of-00264 -push/push_train/push_train.tfrecord-00147-of-00264 -push/push_train/push_train.tfrecord-00148-of-00264 -push/push_train/push_train.tfrecord-00149-of-00264 -push/push_train/push_train.tfrecord-00150-of-00264 -push/push_train/push_train.tfrecord-00151-of-00264 -push/push_train/push_train.tfrecord-00152-of-00264 -push/push_train/push_train.tfrecord-00153-of-00264 -push/push_train/push_train.tfrecord-00154-of-00264 -push/push_train/push_train.tfrecord-00155-of-00264 -push/push_train/push_train.tfrecord-00156-of-00264 -push/push_train/push_train.tfrecord-00157-of-00264 -push/push_train/push_train.tfrecord-00158-of-00264 -push/push_train/push_train.tfrecord-00159-of-00264 -push/push_train/push_train.tfrecord-00160-of-00264 -push/push_train/push_train.tfrecord-00161-of-00264 -push/push_train/push_train.tfrecord-00162-of-00264 -push/push_train/push_train.tfrecord-00163-of-00264 -push/push_train/push_train.tfrecord-00164-of-00264 -push/push_train/push_train.tfrecord-00165-of-00264 -push/push_train/push_train.tfrecord-00166-of-00264 -push/push_train/push_train.tfrecord-00167-of-00264 -push/push_train/push_train.tfrecord-00168-of-00264 -push/push_train/push_train.tfrecord-00169-of-00264 -push/push_train/push_train.tfrecord-00170-of-00264 -push/push_train/push_train.tfrecord-00171-of-00264 -push/push_train/push_train.tfrecord-00172-of-00264 -push/push_train/push_train.tfrecord-00173-of-00264 -push/push_train/push_train.tfrecord-00174-of-00264 -push/push_train/push_train.tfrecord-00175-of-00264 -push/push_train/push_train.tfrecord-00176-of-00264 -push/push_train/push_train.tfrecord-00177-of-00264 -push/push_train/push_train.tfrecord-00178-of-00264 -push/push_train/push_train.tfrecord-00179-of-00264 -push/push_train/push_train.tfrecord-00180-of-00264 -push/push_train/push_train.tfrecord-00181-of-00264 -push/push_train/push_train.tfrecord-00182-of-00264 -push/push_train/push_train.tfrecord-00183-of-00264 -push/push_train/push_train.tfrecord-00184-of-00264 -push/push_train/push_train.tfrecord-00185-of-00264 -push/push_train/push_train.tfrecord-00186-of-00264 -push/push_train/push_train.tfrecord-00187-of-00264 -push/push_train/push_train.tfrecord-00188-of-00264 -push/push_train/push_train.tfrecord-00189-of-00264 -push/push_train/push_train.tfrecord-00190-of-00264 -push/push_train/push_train.tfrecord-00191-of-00264 -push/push_train/push_train.tfrecord-00192-of-00264 -push/push_train/push_train.tfrecord-00193-of-00264 -push/push_train/push_train.tfrecord-00194-of-00264 -push/push_train/push_train.tfrecord-00195-of-00264 -push/push_train/push_train.tfrecord-00196-of-00264 -push/push_train/push_train.tfrecord-00197-of-00264 -push/push_train/push_train.tfrecord-00198-of-00264 -push/push_train/push_train.tfrecord-00199-of-00264 -push/push_train/push_train.tfrecord-00200-of-00264 -push/push_train/push_train.tfrecord-00201-of-00264 -push/push_train/push_train.tfrecord-00202-of-00264 -push/push_train/push_train.tfrecord-00203-of-00264 -push/push_train/push_train.tfrecord-00204-of-00264 -push/push_train/push_train.tfrecord-00205-of-00264 -push/push_train/push_train.tfrecord-00206-of-00264 -push/push_train/push_train.tfrecord-00207-of-00264 -push/push_train/push_train.tfrecord-00208-of-00264 -push/push_train/push_train.tfrecord-00209-of-00264 -push/push_train/push_train.tfrecord-00210-of-00264 -push/push_train/push_train.tfrecord-00211-of-00264 -push/push_train/push_train.tfrecord-00212-of-00264 -push/push_train/push_train.tfrecord-00213-of-00264 -push/push_train/push_train.tfrecord-00214-of-00264 -push/push_train/push_train.tfrecord-00215-of-00264 -push/push_train/push_train.tfrecord-00216-of-00264 -push/push_train/push_train.tfrecord-00217-of-00264 -push/push_train/push_train.tfrecord-00218-of-00264 -push/push_train/push_train.tfrecord-00219-of-00264 -push/push_train/push_train.tfrecord-00220-of-00264 -push/push_train/push_train.tfrecord-00221-of-00264 -push/push_train/push_train.tfrecord-00222-of-00264 -push/push_train/push_train.tfrecord-00223-of-00264 -push/push_train/push_train.tfrecord-00224-of-00264 -push/push_train/push_train.tfrecord-00225-of-00264 -push/push_train/push_train.tfrecord-00226-of-00264 -push/push_train/push_train.tfrecord-00227-of-00264 -push/push_train/push_train.tfrecord-00228-of-00264 -push/push_train/push_train.tfrecord-00229-of-00264 -push/push_train/push_train.tfrecord-00230-of-00264 -push/push_train/push_train.tfrecord-00231-of-00264 -push/push_train/push_train.tfrecord-00232-of-00264 -push/push_train/push_train.tfrecord-00233-of-00264 -push/push_train/push_train.tfrecord-00234-of-00264 -push/push_train/push_train.tfrecord-00235-of-00264 -push/push_train/push_train.tfrecord-00236-of-00264 -push/push_train/push_train.tfrecord-00237-of-00264 -push/push_train/push_train.tfrecord-00238-of-00264 -push/push_train/push_train.tfrecord-00239-of-00264 -push/push_train/push_train.tfrecord-00240-of-00264 -push/push_train/push_train.tfrecord-00241-of-00264 -push/push_train/push_train.tfrecord-00242-of-00264 -push/push_train/push_train.tfrecord-00243-of-00264 -push/push_train/push_train.tfrecord-00244-of-00264 -push/push_train/push_train.tfrecord-00245-of-00264 -push/push_train/push_train.tfrecord-00246-of-00264 -push/push_train/push_train.tfrecord-00247-of-00264 -push/push_train/push_train.tfrecord-00248-of-00264 -push/push_train/push_train.tfrecord-00249-of-00264 -push/push_train/push_train.tfrecord-00250-of-00264 -push/push_train/push_train.tfrecord-00251-of-00264 -push/push_train/push_train.tfrecord-00252-of-00264 -push/push_train/push_train.tfrecord-00253-of-00264 -push/push_train/push_train.tfrecord-00254-of-00264 -push/push_train/push_train.tfrecord-00255-of-00264 -push/push_train/push_train.tfrecord-00256-of-00264 -push/push_train/push_train.tfrecord-00257-of-00264 -push/push_train/push_train.tfrecord-00258-of-00264 -push/push_train/push_train.tfrecord-00259-of-00264 -push/push_train/push_train.tfrecord-00260-of-00264 -push/push_train/push_train.tfrecord-00261-of-00264 -push/push_train/push_train.tfrecord-00262-of-00264 -push/push_train/push_train.tfrecord-00263-of-00264 -- GitLab From 24501315d2a559876215b62b080397cea3a66b97 Mon Sep 17 00:00:00 2001 From: Hongkun Yu Date: Thu, 30 Jul 2020 22:50:24 -0700 Subject: [PATCH 085/128] Internal change PiperOrigin-RevId: 324155447 --- orbit/__init__.py | 1 - orbit/controller.py | 1 - orbit/controller_test.py | 1 - orbit/runner.py | 1 - orbit/standard_runner.py | 1 - orbit/standard_runner_test.py | 1 - orbit/utils.py | 1 - orbit/utils_test.py | 1 - 8 files changed, 8 deletions(-) diff --git a/orbit/__init__.py b/orbit/__init__.py index ef8c00c23..81a046aac 100644 --- a/orbit/__init__.py +++ b/orbit/__init__.py @@ -1,4 +1,3 @@ -# Lint as: python3 # Copyright 2020 The Orbit Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/orbit/controller.py b/orbit/controller.py index dac71d15f..3370e556c 100644 --- a/orbit/controller.py +++ b/orbit/controller.py @@ -1,4 +1,3 @@ -# Lint as: python3 # Copyright 2020 The Orbit Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/orbit/controller_test.py b/orbit/controller_test.py index 2e4a815b0..6751e9025 100644 --- a/orbit/controller_test.py +++ b/orbit/controller_test.py @@ -1,4 +1,3 @@ -# Lint as: python3 # Copyright 2020 The Orbit Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/orbit/runner.py b/orbit/runner.py index 3ea3422c7..967133db5 100644 --- a/orbit/runner.py +++ b/orbit/runner.py @@ -1,4 +1,3 @@ -# Lint as: python3 # Copyright 2020 The Orbit Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/orbit/standard_runner.py b/orbit/standard_runner.py index 2eb077141..1d37f2cc9 100644 --- a/orbit/standard_runner.py +++ b/orbit/standard_runner.py @@ -1,4 +1,3 @@ -# Lint as: python3 # Copyright 2020 The Orbit Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/orbit/standard_runner_test.py b/orbit/standard_runner_test.py index 2721b680e..fb98a715d 100644 --- a/orbit/standard_runner_test.py +++ b/orbit/standard_runner_test.py @@ -1,4 +1,3 @@ -# Lint as: python3 # Copyright 2020 The Orbit Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/orbit/utils.py b/orbit/utils.py index c084ca247..a8b231121 100644 --- a/orbit/utils.py +++ b/orbit/utils.py @@ -1,4 +1,3 @@ -# Lint as: python3 # Copyright 2020 The Orbit Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/orbit/utils_test.py b/orbit/utils_test.py index 0fc33aecc..329139f62 100644 --- a/orbit/utils_test.py +++ b/orbit/utils_test.py @@ -1,4 +1,3 @@ -# Lint as: python3 # Copyright 2020 The Orbit Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); -- GitLab From 0e9d5840c1c9466b68cbcf709af28504907a86a4 Mon Sep 17 00:00:00 2001 From: Srihari Humbarwadi Date: Fri, 31 Jul 2020 20:02:08 +0530 Subject: [PATCH 086/128] Fixed typos in losses.py --- official/vision/detection/modeling/losses.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/official/vision/detection/modeling/losses.py b/official/vision/detection/modeling/losses.py index 4b993061b..3d6afe8ad 100644 --- a/official/vision/detection/modeling/losses.py +++ b/official/vision/detection/modeling/losses.py @@ -449,7 +449,7 @@ class RetinanetBoxLoss(object): num_positives: number of positive examples in the minibatch. Returns: - an integar tensor representing total box regression loss. + an integer tensor representing total box regression loss. """ # Sums all positives in a batch for normalization and avoids zero # num_positives_sum, which would lead to inf loss during training @@ -457,7 +457,6 @@ class RetinanetBoxLoss(object): box_losses = [] for level in box_outputs.keys(): - # Onehot encoding for classification labels. box_targets_l = labels[level] box_losses.append( self.box_loss(box_outputs[level], box_targets_l, num_positives_sum)) -- GitLab From 0a2312b82863ddffa0237a11d8869d0ec33310aa Mon Sep 17 00:00:00 2001 From: Kaushik Shivakumar Date: Fri, 31 Jul 2020 18:59:48 +0000 Subject: [PATCH 087/128] bugfix --- .../dataset_tools/context_rcnn/add_context_to_examples.py | 1 + 1 file changed, 1 insertion(+) diff --git a/research/object_detection/dataset_tools/context_rcnn/add_context_to_examples.py b/research/object_detection/dataset_tools/context_rcnn/add_context_to_examples.py index 334feb765..a5b8b0ab7 100644 --- a/research/object_detection/dataset_tools/context_rcnn/add_context_to_examples.py +++ b/research/object_detection/dataset_tools/context_rcnn/add_context_to_examples.py @@ -926,6 +926,7 @@ def main(argv=None, save_main_session=True): args.context_features_score_threshold, args.keep_only_positives_gt, args.max_num_elements_in_context_features, + args.num_shards, args.output_type, args.max_clip_length, args.context_feature_length) -- GitLab From 007a619a1295b870d348ebe53b31172a93a4da44 Mon Sep 17 00:00:00 2001 From: Hongkun Yu Date: Fri, 31 Jul 2020 12:16:53 -0700 Subject: [PATCH 088/128] PY3 migration PiperOrigin-RevId: 324258617 --- official/vision/detection/evaluation/coco_utils.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/official/vision/detection/evaluation/coco_utils.py b/official/vision/detection/evaluation/coco_utils.py index 8155d1fbb..a4f366850 100644 --- a/official/vision/detection/evaluation/coco_utils.py +++ b/official/vision/detection/evaluation/coco_utils.py @@ -237,7 +237,7 @@ def convert_groundtruths_to_coco_dataset(groundtruths, label_map=None): (boxes[j, k, 3] - boxes[j, k, 1]) * (boxes[j, k, 2] - boxes[j, k, 0])) if 'masks' in groundtruths: - mask = Image.open(six.StringIO(groundtruths['masks'][i][j, k])) + mask = Image.open(six.BytesIO(groundtruths['masks'][i][j, k])) width, height = mask.size np_mask = ( np.array(mask.getdata()).reshape(height, width).astype(np.uint8)) -- GitLab From 43fb33412c637f8b638e3d6504de26986884b0d4 Mon Sep 17 00:00:00 2001 From: xinliupitt Date: Fri, 31 Jul 2020 17:33:32 -0400 Subject: [PATCH 089/128] OnDeviceEmbedding --- .../modeling/layers/on_device_embedding.py | 8 ++ .../layers/on_device_embedding_test.py | 77 +++++++++++++++++++ 2 files changed, 85 insertions(+) diff --git a/official/nlp/modeling/layers/on_device_embedding.py b/official/nlp/modeling/layers/on_device_embedding.py index 739cdb7e4..e20f47b1e 100644 --- a/official/nlp/modeling/layers/on_device_embedding.py +++ b/official/nlp/modeling/layers/on_device_embedding.py @@ -38,6 +38,9 @@ class OnDeviceEmbedding(tf.keras.layers.Layer): lookup. Defaults to False (that is, using tf.gather). Setting this option to True may improve performance, especially on small vocabulary sizes, but will generally require more memory. + use_scale: Whether to scale the output embeddings. Defaults to False (that + is, not to scale). Setting this option to True will let values in output + embeddings multiplied by self._embedding_width ** 0.5. """ def __init__(self, @@ -45,6 +48,7 @@ class OnDeviceEmbedding(tf.keras.layers.Layer): embedding_width, initializer="glorot_uniform", use_one_hot=False, + use_scale=False, **kwargs): super(OnDeviceEmbedding, self).__init__(**kwargs) @@ -52,6 +56,7 @@ class OnDeviceEmbedding(tf.keras.layers.Layer): self._embedding_width = embedding_width self._initializer = initializer self._use_one_hot = use_one_hot + self._use_scale = use_scale def get_config(self): config = { @@ -59,6 +64,7 @@ class OnDeviceEmbedding(tf.keras.layers.Layer): "embedding_width": self._embedding_width, "initializer": self._initializer, "use_one_hot": self._use_one_hot, + "use_scale": self._use_scale, } base_config = super(OnDeviceEmbedding, self).get_config() return dict(list(base_config.items()) + list(config.items())) @@ -85,4 +91,6 @@ class OnDeviceEmbedding(tf.keras.layers.Layer): # Work around b/142213824: prefer concat to shape over a Python list. tf.concat([tf.shape(inputs), [self._embedding_width]], axis=0)) embeddings.set_shape(inputs.shape.as_list() + [self._embedding_width]) + if self._use_scale: + embeddings *= self._embedding_width ** 0.5 return embeddings diff --git a/official/nlp/modeling/layers/on_device_embedding_test.py b/official/nlp/modeling/layers/on_device_embedding_test.py index e2b9b98f1..4907234a6 100644 --- a/official/nlp/modeling/layers/on_device_embedding_test.py +++ b/official/nlp/modeling/layers/on_device_embedding_test.py @@ -193,6 +193,83 @@ class OnDeviceEmbeddingTest(keras_parameterized.TestCase): output = model.predict(input_data) self.assertEqual(tf.float16, output.dtype) + def test_use_scale_layer_creation(self): + vocab_size = 31 + embedding_width = 27 + test_layer = on_device_embedding.OnDeviceEmbedding( + vocab_size=vocab_size, embedding_width=embedding_width, use_scale=True) + # Create a 2-dimensional input (the first dimension is implicit). + sequence_length = 23 + input_tensor = tf.keras.Input(shape=(sequence_length), dtype=tf.int32) + output_tensor = test_layer(input_tensor) + + # The output should be the same as the input, save that it has an extra + # embedding_width dimension on the end. + expected_output_shape = [None, sequence_length, embedding_width] + self.assertEqual(expected_output_shape, output_tensor.shape.as_list()) + self.assertEqual(output_tensor.dtype, tf.float32) + + def test_use_scale_layer_creation_with_mixed_precision(self): + vocab_size = 31 + embedding_width = 27 + policy = tf.keras.mixed_precision.experimental.Policy("mixed_float16") + test_layer = on_device_embedding.OnDeviceEmbedding( + vocab_size=vocab_size, embedding_width=embedding_width, dtype=policy, + use_scale=True) + # Create a 2-dimensional input (the first dimension is implicit). + sequence_length = 23 + input_tensor = tf.keras.Input(shape=(sequence_length), dtype=tf.int32) + output_tensor = test_layer(input_tensor) + + # The output should be the same as the input, save that it has an extra + # embedding_width dimension on the end. + expected_output_shape = [None, sequence_length, embedding_width] + self.assertEqual(expected_output_shape, output_tensor.shape.as_list()) + self.assertEqual(output_tensor.dtype, tf.float16) + + def test_use_scale_layer_invocation(self): + vocab_size = 31 + embedding_width = 27 + test_layer = on_device_embedding.OnDeviceEmbedding( + vocab_size=vocab_size, embedding_width=embedding_width, use_scale=True) + # Create a 2-dimensional input (the first dimension is implicit). + sequence_length = 23 + input_tensor = tf.keras.Input(shape=(sequence_length), dtype=tf.int32) + output_tensor = test_layer(input_tensor) + + # Create a model from the test layer. + model = tf.keras.Model(input_tensor, output_tensor) + + # Invoke the model on test data. We can't validate the output data itself + # (the NN is too complex) but this will rule out structural runtime errors. + batch_size = 3 + input_data = np.random.randint( + vocab_size, size=(batch_size, sequence_length)) + output = model.predict(input_data) + self.assertEqual(tf.float32, output.dtype) + + def test_use_scale_layer_invocation_with_mixed_precision(self): + vocab_size = 31 + embedding_width = 27 + policy = tf.keras.mixed_precision.experimental.Policy("mixed_float16") + test_layer = on_device_embedding.OnDeviceEmbedding( + vocab_size=vocab_size, embedding_width=embedding_width, + dtype=policy, use_scale=True) + # Create a 2-dimensional input (the first dimension is implicit). + sequence_length = 23 + input_tensor = tf.keras.Input(shape=(sequence_length), dtype=tf.int32) + output_tensor = test_layer(input_tensor) + + # Create a model from the test layer. + model = tf.keras.Model(input_tensor, output_tensor) + + # Invoke the model on test data. We can't validate the output data itself + # (the NN is too complex) but this will rule out structural runtime errors. + batch_size = 3 + input_data = np.random.randint( + vocab_size, size=(batch_size, sequence_length)) + output = model.predict(input_data) + self.assertEqual(tf.float16, output.dtype) if __name__ == "__main__": tf.test.main() -- GitLab From 237a543595c68ba2bc2bbdb64c11e4a811879b21 Mon Sep 17 00:00:00 2001 From: xinliupitt Date: Fri, 31 Jul 2020 20:02:14 -0400 Subject: [PATCH 090/128] remove layer creation --- .../layers/on_device_embedding_test.py | 34 ------------------- 1 file changed, 34 deletions(-) diff --git a/official/nlp/modeling/layers/on_device_embedding_test.py b/official/nlp/modeling/layers/on_device_embedding_test.py index 4907234a6..90c40f160 100644 --- a/official/nlp/modeling/layers/on_device_embedding_test.py +++ b/official/nlp/modeling/layers/on_device_embedding_test.py @@ -193,40 +193,6 @@ class OnDeviceEmbeddingTest(keras_parameterized.TestCase): output = model.predict(input_data) self.assertEqual(tf.float16, output.dtype) - def test_use_scale_layer_creation(self): - vocab_size = 31 - embedding_width = 27 - test_layer = on_device_embedding.OnDeviceEmbedding( - vocab_size=vocab_size, embedding_width=embedding_width, use_scale=True) - # Create a 2-dimensional input (the first dimension is implicit). - sequence_length = 23 - input_tensor = tf.keras.Input(shape=(sequence_length), dtype=tf.int32) - output_tensor = test_layer(input_tensor) - - # The output should be the same as the input, save that it has an extra - # embedding_width dimension on the end. - expected_output_shape = [None, sequence_length, embedding_width] - self.assertEqual(expected_output_shape, output_tensor.shape.as_list()) - self.assertEqual(output_tensor.dtype, tf.float32) - - def test_use_scale_layer_creation_with_mixed_precision(self): - vocab_size = 31 - embedding_width = 27 - policy = tf.keras.mixed_precision.experimental.Policy("mixed_float16") - test_layer = on_device_embedding.OnDeviceEmbedding( - vocab_size=vocab_size, embedding_width=embedding_width, dtype=policy, - use_scale=True) - # Create a 2-dimensional input (the first dimension is implicit). - sequence_length = 23 - input_tensor = tf.keras.Input(shape=(sequence_length), dtype=tf.int32) - output_tensor = test_layer(input_tensor) - - # The output should be the same as the input, save that it has an extra - # embedding_width dimension on the end. - expected_output_shape = [None, sequence_length, embedding_width] - self.assertEqual(expected_output_shape, output_tensor.shape.as_list()) - self.assertEqual(output_tensor.dtype, tf.float16) - def test_use_scale_layer_invocation(self): vocab_size = 31 embedding_width = 27 -- GitLab From 03a6d6c8e79b426231a4d5ba0cf45be9afc8bad5 Mon Sep 17 00:00:00 2001 From: Vivek Rathod Date: Fri, 31 Jul 2020 21:57:32 -0700 Subject: [PATCH 091/128] Pin to tf-models-official 2.2.0 since 2.3.0 pulls TF 2.3 which is incompatible with numpy 1.19.1 PiperOrigin-RevId: 324352004 --- .../object_detection/packages/tf2/setup.py | 22 ++++++++++++++----- 1 file changed, 17 insertions(+), 5 deletions(-) diff --git a/research/object_detection/packages/tf2/setup.py b/research/object_detection/packages/tf2/setup.py index 1067b0ecf..73c270b3e 100644 --- a/research/object_detection/packages/tf2/setup.py +++ b/research/object_detection/packages/tf2/setup.py @@ -6,11 +6,23 @@ from setuptools import setup # Note: adding apache-beam to required packages causes conflict with # tf-models-offical requirements. These packages request for incompatible # oauth2client package. -REQUIRED_PACKAGES = ['avro-python3==1.8.1', 'apache-beam', - 'pillow', 'lxml', - 'matplotlib', 'Cython', 'contextlib2', - 'tf-slim', 'six', 'pycocotools', 'scipy', 'pandas', - 'tf-models-official'] +REQUIRED_PACKAGES = [ + # Required for apache-beam with PY3 + 'avro-python3==1.8.1', + 'apache-beam', + 'pillow', + 'lxml', + 'matplotlib', + 'Cython', + 'contextlib2', + 'tf-slim', + 'six', + 'pycocotools', + 'scipy', + 'pandas', + # Required to avoid Numpy 1.19.1 conflict with TF 2.3 + 'tf-models-official==2.2.2' +] setup( name='object_detection', -- GitLab From 227e58b7a9c4c47d9de017b9850947c50970980f Mon Sep 17 00:00:00 2001 From: xinliupitt Date: Sat, 1 Aug 2020 12:15:04 -0400 Subject: [PATCH 092/128] remove mixed precision --- .../layers/on_device_embedding_test.py | 23 ------------------- 1 file changed, 23 deletions(-) diff --git a/official/nlp/modeling/layers/on_device_embedding_test.py b/official/nlp/modeling/layers/on_device_embedding_test.py index 90c40f160..1151def08 100644 --- a/official/nlp/modeling/layers/on_device_embedding_test.py +++ b/official/nlp/modeling/layers/on_device_embedding_test.py @@ -214,28 +214,5 @@ class OnDeviceEmbeddingTest(keras_parameterized.TestCase): output = model.predict(input_data) self.assertEqual(tf.float32, output.dtype) - def test_use_scale_layer_invocation_with_mixed_precision(self): - vocab_size = 31 - embedding_width = 27 - policy = tf.keras.mixed_precision.experimental.Policy("mixed_float16") - test_layer = on_device_embedding.OnDeviceEmbedding( - vocab_size=vocab_size, embedding_width=embedding_width, - dtype=policy, use_scale=True) - # Create a 2-dimensional input (the first dimension is implicit). - sequence_length = 23 - input_tensor = tf.keras.Input(shape=(sequence_length), dtype=tf.int32) - output_tensor = test_layer(input_tensor) - - # Create a model from the test layer. - model = tf.keras.Model(input_tensor, output_tensor) - - # Invoke the model on test data. We can't validate the output data itself - # (the NN is too complex) but this will rule out structural runtime errors. - batch_size = 3 - input_data = np.random.randint( - vocab_size, size=(batch_size, sequence_length)) - output = model.predict(input_data) - self.assertEqual(tf.float16, output.dtype) - if __name__ == "__main__": tf.test.main() -- GitLab From ea8cc8cf69212df23b98333c28e6c4a2c4dcf279 Mon Sep 17 00:00:00 2001 From: Yiming Shi <44748569+syiming@users.noreply.github.com> Date: Mon, 3 Aug 2020 10:08:27 +0800 Subject: [PATCH 093/128] Update faster_rcnn_resnet_v1_fpn_keras_feature_extractor.py Fix typo --- .../faster_rcnn_resnet_v1_fpn_keras_feature_extractor.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/research/object_detection/models/faster_rcnn_resnet_v1_fpn_keras_feature_extractor.py b/research/object_detection/models/faster_rcnn_resnet_v1_fpn_keras_feature_extractor.py index fdd597d84..91e5fb0bf 100644 --- a/research/object_detection/models/faster_rcnn_resnet_v1_fpn_keras_feature_extractor.py +++ b/research/object_detection/models/faster_rcnn_resnet_v1_fpn_keras_feature_extractor.py @@ -77,8 +77,7 @@ class _ResnetFPN(tf.keras.layers.Layer): feature_maps: A list of tensors with shape [batch, height, width, depth] represent extracted features. """ - inputs = - .pad_to_multiple(inputs, self.pad_to_multiple) + inputs = ops.pad_to_multiple(inputs, self.pad_to_multiple) backbone_outputs = self.classification_backbone(inputs) feature_block_list = [] -- GitLab From c07b073e657ab2bca3e4a84e672d8d289f7fd045 Mon Sep 17 00:00:00 2001 From: "A. Unique TensorFlower" Date: Mon, 3 Aug 2020 05:17:57 -0700 Subject: [PATCH 094/128] adding config files for models with keypoints PiperOrigin-RevId: 324584977 --- ...urglass104_1024x1024_coco17_tpu-32.config} | 0 ...ass104_1024x1024_kpts_coco17_tpu-32.config | 374 +++++++++++++++++ ..._hourglass104_512x512_coco17_tpu-8.config} | 0 ...glass104_512x512_kpts_coco17_tpu-32.config | 395 ++++++++++++++++++ ...net101_v1_fpn_512x512_coco17_tpu-8.config} | 0 ...50_v1_fpn_512x512_kpts_coco17_tpu-8.config | 392 +++++++++++++++++ ...snet50_v2_512x512_kpts_coco17_tpu-8.config | 393 +++++++++++++++++ 7 files changed, 1554 insertions(+) rename research/object_detection/configs/tf2/{center_net_hourglass104_1024x1024_coco17_tpu-32.config => centernet_hourglass104_1024x1024_coco17_tpu-32.config} (100%) create mode 100644 research/object_detection/configs/tf2/centernet_hourglass104_1024x1024_kpts_coco17_tpu-32.config rename research/object_detection/configs/tf2/{center_net_hourglass104_512x512_coco17_tpu-8.config => centernet_hourglass104_512x512_coco17_tpu-8.config} (100%) create mode 100644 research/object_detection/configs/tf2/centernet_hourglass104_512x512_kpts_coco17_tpu-32.config rename research/object_detection/configs/tf2/{center_net_resnet101_v1_fpn_512x512_coco17_tpu-8.config => centernet_resnet101_v1_fpn_512x512_coco17_tpu-8.config} (100%) create mode 100644 research/object_detection/configs/tf2/centernet_resnet50_v1_fpn_512x512_kpts_coco17_tpu-8.config create mode 100644 research/object_detection/configs/tf2/centernet_resnet50_v2_512x512_kpts_coco17_tpu-8.config diff --git a/research/object_detection/configs/tf2/center_net_hourglass104_1024x1024_coco17_tpu-32.config b/research/object_detection/configs/tf2/centernet_hourglass104_1024x1024_coco17_tpu-32.config similarity index 100% rename from research/object_detection/configs/tf2/center_net_hourglass104_1024x1024_coco17_tpu-32.config rename to research/object_detection/configs/tf2/centernet_hourglass104_1024x1024_coco17_tpu-32.config diff --git a/research/object_detection/configs/tf2/centernet_hourglass104_1024x1024_kpts_coco17_tpu-32.config b/research/object_detection/configs/tf2/centernet_hourglass104_1024x1024_kpts_coco17_tpu-32.config new file mode 100644 index 000000000..da7136f15 --- /dev/null +++ b/research/object_detection/configs/tf2/centernet_hourglass104_1024x1024_kpts_coco17_tpu-32.config @@ -0,0 +1,374 @@ +# CenterNet meta-architecture from the "Objects as Points" [2] paper with the +# hourglass[1] backbone. This config achieves an mAP of 42.8/64.5 +/- 0.16 on +# COCO 17 (averaged over 5 runs). This config is TPU compatible. +# [1]: https://arxiv.org/abs/1603.06937 +# [2]: https://arxiv.org/abs/1904.07850 + +model { + center_net { + num_classes: 90 + feature_extractor { + type: "hourglass_104" + channel_means: 104.01361846923828 + channel_means: 114.03422546386719 + channel_means: 119.91659545898438 + channel_stds: 73.60276794433594 + channel_stds: 69.89082336425781 + channel_stds: 70.91507720947266 + bgr_ordering: true + } + image_resizer { + keep_aspect_ratio_resizer { + min_dimension: 1024 + max_dimension: 1024 + pad_to_max_dimension: true + } + } + object_detection_task { + task_loss_weight: 1.0 + offset_loss_weight: 1.0 + scale_loss_weight: 0.10000000149011612 + localization_loss { + l1_localization_loss { + } + } + } + object_center_params { + object_center_loss_weight: 1.0 + classification_loss { + penalty_reduced_logistic_focal_loss { + alpha: 2.0 + beta: 4.0 + } + } + min_box_overlap_iou: 0.699999988079071 + max_box_predictions: 100 + } + keypoint_label_map_path: "PATH_TO_BE_CONFIGURED" + keypoint_estimation_task { + task_name: "human_pose" + task_loss_weight: 1.0 + loss { + localization_loss { + l1_localization_loss { + } + } + classification_loss { + penalty_reduced_logistic_focal_loss { + alpha: 2.0 + beta: 4.0 + } + } + } + keypoint_class_name: "/m/01g317" + keypoint_label_to_std { + key: "left_ankle" + value: 0.8899999856948853 + } + keypoint_label_to_std { + key: "left_ear" + value: 0.3499999940395355 + } + keypoint_label_to_std { + key: "left_elbow" + value: 0.7200000286102295 + } + keypoint_label_to_std { + key: "left_eye" + value: 0.25 + } + keypoint_label_to_std { + key: "left_hip" + value: 1.0700000524520874 + } + keypoint_label_to_std { + key: "left_knee" + value: 0.8899999856948853 + } + keypoint_label_to_std { + key: "left_shoulder" + value: 0.7900000214576721 + } + keypoint_label_to_std { + key: "left_wrist" + value: 0.6200000047683716 + } + keypoint_label_to_std { + key: "nose" + value: 0.25999999046325684 + } + keypoint_label_to_std { + key: "right_ankle" + value: 0.8899999856948853 + } + keypoint_label_to_std { + key: "right_ear" + value: 0.3499999940395355 + } + keypoint_label_to_std { + key: "right_elbow" + value: 0.7200000286102295 + } + keypoint_label_to_std { + key: "right_eye" + value: 0.25 + } + keypoint_label_to_std { + key: "right_hip" + value: 1.0700000524520874 + } + keypoint_label_to_std { + key: "right_knee" + value: 0.8899999856948853 + } + keypoint_label_to_std { + key: "right_shoulder" + value: 0.7900000214576721 + } + keypoint_label_to_std { + key: "right_wrist" + value: 0.6200000047683716 + } + keypoint_regression_loss_weight: 0.10000000149011612 + keypoint_heatmap_loss_weight: 1.0 + keypoint_offset_loss_weight: 1.0 + offset_peak_radius: 3 + per_keypoint_offset: true + } + } +} +train_config { + batch_size: 128 + data_augmentation_options { + random_horizontal_flip { + keypoint_flip_permutation: 0 + keypoint_flip_permutation: 2 + keypoint_flip_permutation: 1 + keypoint_flip_permutation: 4 + keypoint_flip_permutation: 3 + keypoint_flip_permutation: 6 + keypoint_flip_permutation: 5 + keypoint_flip_permutation: 8 + keypoint_flip_permutation: 7 + keypoint_flip_permutation: 10 + keypoint_flip_permutation: 9 + keypoint_flip_permutation: 12 + keypoint_flip_permutation: 11 + keypoint_flip_permutation: 14 + keypoint_flip_permutation: 13 + keypoint_flip_permutation: 16 + keypoint_flip_permutation: 15 + } + } + data_augmentation_options { + random_adjust_hue { + } + } + data_augmentation_options { + random_adjust_contrast { + } + } + data_augmentation_options { + random_adjust_saturation { + } + } + data_augmentation_options { + random_adjust_brightness { + } + } + data_augmentation_options { + random_square_crop_by_scale { + scale_min: 0.6000000238418579 + scale_max: 1.2999999523162842 + } + } + optimizer { + adam_optimizer { + learning_rate { + cosine_decay_learning_rate { + learning_rate_base: 0.0010000000474974513 + total_steps: 250000 + warmup_learning_rate: 0.0002500000118743628 + warmup_steps: 5000 + } + } + epsilon: 1.0000000116860974e-07 + } + use_moving_average: false + } + fine_tune_checkpoint: "PATH_TO_BE_CONFIGURED" + num_steps: 250000 + max_number_of_boxes: 100 + unpad_groundtruth_tensors: false + fine_tune_checkpoint_type: "detection" + fine_tune_checkpoint_version: V2 +} +train_input_reader: { + label_map_path: "PATH_TO_BE_CONFIGURED/label_map.txt" + tf_record_input_reader { + input_path: "PATH_TO_BE_CONFIGURED/train2017-?????-of-00256.tfrecord" + } + num_keypoints: 17 +} +eval_config { + num_visualizations: 10 + metrics_set: "coco_detection_metrics" + use_moving_averages: false + min_score_threshold: 0.20000000298023224 + max_num_boxes_to_visualize: 20 + batch_size: 1 + parameterized_metric { + coco_keypoint_metrics { + class_label: "person" + keypoint_label_to_sigmas { + key: "left_ankle" + value: 0.08900000154972076 + } + keypoint_label_to_sigmas { + key: "left_ear" + value: 0.03500000014901161 + } + keypoint_label_to_sigmas { + key: "left_elbow" + value: 0.07199999690055847 + } + keypoint_label_to_sigmas { + key: "left_eye" + value: 0.02500000037252903 + } + keypoint_label_to_sigmas { + key: "left_hip" + value: 0.10700000077486038 + } + keypoint_label_to_sigmas { + key: "left_knee" + value: 0.08699999749660492 + } + keypoint_label_to_sigmas { + key: "left_shoulder" + value: 0.07900000363588333 + } + keypoint_label_to_sigmas { + key: "left_wrist" + value: 0.06199999898672104 + } + keypoint_label_to_sigmas { + key: "nose" + value: 0.026000000536441803 + } + keypoint_label_to_sigmas { + key: "right_ankle" + value: 0.08900000154972076 + } + keypoint_label_to_sigmas { + key: "right_ear" + value: 0.03500000014901161 + } + keypoint_label_to_sigmas { + key: "right_elbow" + value: 0.07199999690055847 + } + keypoint_label_to_sigmas { + key: "right_eye" + value: 0.02500000037252903 + } + keypoint_label_to_sigmas { + key: "right_hip" + value: 0.10700000077486038 + } + keypoint_label_to_sigmas { + key: "right_knee" + value: 0.08699999749660492 + } + keypoint_label_to_sigmas { + key: "right_shoulder" + value: 0.07900000363588333 + } + keypoint_label_to_sigmas { + key: "right_wrist" + value: 0.06199999898672104 + } + } + } + keypoint_edge { + start: 0 + end: 1 + } + keypoint_edge { + start: 0 + end: 2 + } + keypoint_edge { + start: 1 + end: 3 + } + keypoint_edge { + start: 2 + end: 4 + } + keypoint_edge { + start: 0 + end: 5 + } + keypoint_edge { + start: 0 + end: 6 + } + keypoint_edge { + start: 5 + end: 7 + } + keypoint_edge { + start: 7 + end: 9 + } + keypoint_edge { + start: 6 + end: 8 + } + keypoint_edge { + start: 8 + end: 10 + } + keypoint_edge { + start: 5 + end: 6 + } + keypoint_edge { + start: 5 + end: 11 + } + keypoint_edge { + start: 6 + end: 12 + } + keypoint_edge { + start: 11 + end: 12 + } + keypoint_edge { + start: 11 + end: 13 + } + keypoint_edge { + start: 13 + end: 15 + } + keypoint_edge { + start: 12 + end: 14 + } + keypoint_edge { + start: 14 + end: 16 + } +} +eval_input_reader: { + label_map_path: "PATH_TO_BE_CONFIGURED/label_map.txt" + shuffle: false + num_epochs: 1 + tf_record_input_reader { + input_path: "PATH_TO_BE_CONFIGURED/val2017-?????-of-00032.tfrecord" + } + num_keypoints: 17 +} diff --git a/research/object_detection/configs/tf2/center_net_hourglass104_512x512_coco17_tpu-8.config b/research/object_detection/configs/tf2/centernet_hourglass104_512x512_coco17_tpu-8.config similarity index 100% rename from research/object_detection/configs/tf2/center_net_hourglass104_512x512_coco17_tpu-8.config rename to research/object_detection/configs/tf2/centernet_hourglass104_512x512_coco17_tpu-8.config diff --git a/research/object_detection/configs/tf2/centernet_hourglass104_512x512_kpts_coco17_tpu-32.config b/research/object_detection/configs/tf2/centernet_hourglass104_512x512_kpts_coco17_tpu-32.config new file mode 100644 index 000000000..ce5652895 --- /dev/null +++ b/research/object_detection/configs/tf2/centernet_hourglass104_512x512_kpts_coco17_tpu-32.config @@ -0,0 +1,395 @@ +# CenterNet meta-architecture from the "Objects as Points" [2] paper with the +# hourglass[1] backbone. This config achieves an mAP of 40.0/61.4 +/- 0.16 on +# COCO 17 (averaged over 5 runs). This config is TPU compatible. +# [1]: https://arxiv.org/abs/1603.06937 +# [2]: https://arxiv.org/abs/1904.07850 + +model { + center_net { + num_classes: 90 + feature_extractor { + type: "hourglass_104" + bgr_ordering: true + channel_means: [104.01362025, 114.03422265, 119.9165958 ] + channel_stds: [73.6027665 , 69.89082075, 70.9150767 ] + } + image_resizer { + keep_aspect_ratio_resizer { + min_dimension: 512 + max_dimension: 512 + pad_to_max_dimension: true + } + } + object_detection_task { + task_loss_weight: 1.0 + offset_loss_weight: 1.0 + scale_loss_weight: 0.1 + localization_loss { + l1_localization_loss { + } + } + } + object_center_params { + object_center_loss_weight: 1.0 + min_box_overlap_iou: 0.7 + max_box_predictions: 100 + classification_loss { + penalty_reduced_logistic_focal_loss { + alpha: 2.0 + beta: 4.0 + } + } + } + + keypoint_label_map_path: "PATH_TO_BE_CONFIGURED" + keypoint_estimation_task { + task_name: "human_pose" + task_loss_weight: 1.0 + loss { + localization_loss { + l1_localization_loss { + } + } + classification_loss { + penalty_reduced_logistic_focal_loss { + alpha: 2.0 + beta: 4.0 + } + } + } + keypoint_class_name: "/m/01g317" + keypoint_label_to_std { + key: "left_ankle" + value: 0.89 + } + keypoint_label_to_std { + key: "left_ear" + value: 0.35 + } + keypoint_label_to_std { + key: "left_elbow" + value: 0.72 + } + keypoint_label_to_std { + key: "left_eye" + value: 0.25 + } + keypoint_label_to_std { + key: "left_hip" + value: 1.07 + } + keypoint_label_to_std { + key: "left_knee" + value: 0.89 + } + keypoint_label_to_std { + key: "left_shoulder" + value: 0.79 + } + keypoint_label_to_std { + key: "left_wrist" + value: 0.62 + } + keypoint_label_to_std { + key: "nose" + value: 0.26 + } + keypoint_label_to_std { + key: "right_ankle" + value: 0.89 + } + keypoint_label_to_std { + key: "right_ear" + value: 0.35 + } + keypoint_label_to_std { + key: "right_elbow" + value: 0.72 + } + keypoint_label_to_std { + key: "right_eye" + value: 0.25 + } + keypoint_label_to_std { + key: "right_hip" + value: 1.07 + } + keypoint_label_to_std { + key: "right_knee" + value: 0.89 + } + keypoint_label_to_std { + key: "right_shoulder" + value: 0.79 + } + keypoint_label_to_std { + key: "right_wrist" + value: 0.62 + } + keypoint_regression_loss_weight: 0.1 + keypoint_heatmap_loss_weight: 1.0 + keypoint_offset_loss_weight: 1.0 + offset_peak_radius: 3 + per_keypoint_offset: true + } + } +} + +train_config: { + + batch_size: 128 + num_steps: 250000 + + data_augmentation_options { + random_horizontal_flip { + keypoint_flip_permutation: 0 + keypoint_flip_permutation: 2 + keypoint_flip_permutation: 1 + keypoint_flip_permutation: 4 + keypoint_flip_permutation: 3 + keypoint_flip_permutation: 6 + keypoint_flip_permutation: 5 + keypoint_flip_permutation: 8 + keypoint_flip_permutation: 7 + keypoint_flip_permutation: 10 + keypoint_flip_permutation: 9 + keypoint_flip_permutation: 12 + keypoint_flip_permutation: 11 + keypoint_flip_permutation: 14 + keypoint_flip_permutation: 13 + keypoint_flip_permutation: 16 + keypoint_flip_permutation: 15 + } + } + + data_augmentation_options { + random_crop_image { + min_aspect_ratio: 0.5 + max_aspect_ratio: 1.7 + random_coef: 0.25 + } + } + + + data_augmentation_options { + random_adjust_hue { + } + } + + data_augmentation_options { + random_adjust_contrast { + } + } + + data_augmentation_options { + random_adjust_saturation { + } + } + + data_augmentation_options { + random_adjust_brightness { + } + } + + data_augmentation_options { + random_absolute_pad_image { + max_height_padding: 200 + max_width_padding: 200 + pad_color: [0, 0, 0] + } + } + + optimizer { + adam_optimizer: { + epsilon: 1e-7 # Match tf.keras.optimizers.Adam's default. + learning_rate: { + cosine_decay_learning_rate { + learning_rate_base: 1e-3 + total_steps: 250000 + warmup_learning_rate: 2.5e-4 + warmup_steps: 5000 + } + } + } + use_moving_average: false + } + max_number_of_boxes: 100 + unpad_groundtruth_tensors: false + + fine_tune_checkpoint_version: V2 + fine_tune_checkpoint: "PATH_TO_BE_CONFIGURED" + fine_tune_checkpoint_type: "detection" +} + +train_input_reader: { + label_map_path: "PATH_TO_BE_CONFIGURED/label_map.txt" + tf_record_input_reader { + input_path: "PATH_TO_BE_CONFIGURED/train2017-?????-of-00256.tfrecord" + } + num_keypoints: 17 +} + +eval_config: { + metrics_set: "coco_detection_metrics" + use_moving_averages: false + num_visualizations: 10 + max_num_boxes_to_visualize: 20 + min_score_threshold: 0.2 + batch_size: 1; + parameterized_metric { + coco_keypoint_metrics { + class_label: "person" + keypoint_label_to_sigmas { + key: "nose" + value: 0.026 + } + keypoint_label_to_sigmas { + key: "left_eye" + value: 0.025 + } + keypoint_label_to_sigmas { + key: "right_eye" + value: 0.025 + } + keypoint_label_to_sigmas { + key: "left_ear" + value: 0.035 + } + keypoint_label_to_sigmas { + key: "right_ear" + value: 0.035 + } + keypoint_label_to_sigmas { + key: "left_shoulder" + value: 0.079 + } + keypoint_label_to_sigmas { + key: "right_shoulder" + value: 0.079 + } + keypoint_label_to_sigmas { + key: "left_elbow" + value: 0.072 + } + keypoint_label_to_sigmas { + key: "right_elbow" + value: 0.072 + } + keypoint_label_to_sigmas { + key: "left_wrist" + value: 0.062 + } + keypoint_label_to_sigmas { + key: "right_wrist" + value: 0.062 + } + keypoint_label_to_sigmas { + key: "left_hip" + value: 0.107 + } + keypoint_label_to_sigmas { + key: "right_hip" + value: 0.107 + } + keypoint_label_to_sigmas { + key: "left_knee" + value: 0.087 + } + keypoint_label_to_sigmas { + key: "right_knee" + value: 0.087 + } + keypoint_label_to_sigmas { + key: "left_ankle" + value: 0.089 + } + keypoint_label_to_sigmas { + key: "right_ankle" + value: 0.089 + } + } + } + # Provide the edges to connect the keypoints. The setting is suitable for + # COCO's 17 human pose keypoints. + keypoint_edge { # nose-left eye + start: 0 + end: 1 + } + keypoint_edge { # nose-right eye + start: 0 + end: 2 + } + keypoint_edge { # left eye-left ear + start: 1 + end: 3 + } + keypoint_edge { # right eye-right ear + start: 2 + end: 4 + } + keypoint_edge { # nose-left shoulder + start: 0 + end: 5 + } + keypoint_edge { # nose-right shoulder + start: 0 + end: 6 + } + keypoint_edge { # left shoulder-left elbow + start: 5 + end: 7 + } + keypoint_edge { # left elbow-left wrist + start: 7 + end: 9 + } + keypoint_edge { # right shoulder-right elbow + start: 6 + end: 8 + } + keypoint_edge { # right elbow-right wrist + start: 8 + end: 10 + } + keypoint_edge { # left shoulder-right shoulder + start: 5 + end: 6 + } + keypoint_edge { # left shoulder-left hip + start: 5 + end: 11 + } + keypoint_edge { # right shoulder-right hip + start: 6 + end: 12 + } + keypoint_edge { # left hip-right hip + start: 11 + end: 12 + } + keypoint_edge { # left hip-left knee + start: 11 + end: 13 + } + keypoint_edge { # left knee-left ankle + start: 13 + end: 15 + } + keypoint_edge { # right hip-right knee + start: 12 + end: 14 + } + keypoint_edge { # right knee-right ankle + start: 14 + end: 16 + } +} + +eval_input_reader: { + label_map_path: "PATH_TO_BE_CONFIGURED/label_map.txt" + tf_record_input_reader { + input_path: "PATH_TO_BE_CONFIGURED/train2017-?????-of-00256.tfrecord" + } + num_keypoints: 17 +} + diff --git a/research/object_detection/configs/tf2/center_net_resnet101_v1_fpn_512x512_coco17_tpu-8.config b/research/object_detection/configs/tf2/centernet_resnet101_v1_fpn_512x512_coco17_tpu-8.config similarity index 100% rename from research/object_detection/configs/tf2/center_net_resnet101_v1_fpn_512x512_coco17_tpu-8.config rename to research/object_detection/configs/tf2/centernet_resnet101_v1_fpn_512x512_coco17_tpu-8.config diff --git a/research/object_detection/configs/tf2/centernet_resnet50_v1_fpn_512x512_kpts_coco17_tpu-8.config b/research/object_detection/configs/tf2/centernet_resnet50_v1_fpn_512x512_kpts_coco17_tpu-8.config new file mode 100644 index 000000000..ad25d5c34 --- /dev/null +++ b/research/object_detection/configs/tf2/centernet_resnet50_v1_fpn_512x512_kpts_coco17_tpu-8.config @@ -0,0 +1,392 @@ +# CenterNet meta-architecture from the "Objects as Points" [1] paper +# with the ResNet-v1-50 backbone. The ResNet backbone has a few differences +# as compared to the one mentioned in the paper, hence the performance is +# slightly worse. This config is TPU comptatible. +# [1]: https://arxiv.org/abs/1904.07850 +# + +model { + center_net { + num_classes: 90 + feature_extractor { + type: "resnet_v1_50_fpn" + } + image_resizer { + keep_aspect_ratio_resizer { + min_dimension: 512 + max_dimension: 512 + pad_to_max_dimension: true + } + } + object_detection_task { + task_loss_weight: 1.0 + offset_loss_weight: 1.0 + scale_loss_weight: 0.1 + localization_loss { + l1_localization_loss { + } + } + } + object_center_params { + object_center_loss_weight: 1.0 + min_box_overlap_iou: 0.7 + max_box_predictions: 100 + classification_loss { + penalty_reduced_logistic_focal_loss { + alpha: 2.0 + beta: 4.0 + } + } + } + keypoint_label_map_path: "PATH_TO_BE_CONFIGURED" + keypoint_estimation_task { + task_name: "human_pose" + task_loss_weight: 1.0 + loss { + localization_loss { + l1_localization_loss { + } + } + classification_loss { + penalty_reduced_logistic_focal_loss { + alpha: 2.0 + beta: 4.0 + } + } + } + keypoint_class_name: "/m/01g317" + keypoint_label_to_std { + key: "left_ankle" + value: 0.89 + } + keypoint_label_to_std { + key: "left_ear" + value: 0.35 + } + keypoint_label_to_std { + key: "left_elbow" + value: 0.72 + } + keypoint_label_to_std { + key: "left_eye" + value: 0.25 + } + keypoint_label_to_std { + key: "left_hip" + value: 1.07 + } + keypoint_label_to_std { + key: "left_knee" + value: 0.89 + } + keypoint_label_to_std { + key: "left_shoulder" + value: 0.79 + } + keypoint_label_to_std { + key: "left_wrist" + value: 0.62 + } + keypoint_label_to_std { + key: "nose" + value: 0.26 + } + keypoint_label_to_std { + key: "right_ankle" + value: 0.89 + } + keypoint_label_to_std { + key: "right_ear" + value: 0.35 + } + keypoint_label_to_std { + key: "right_elbow" + value: 0.72 + } + keypoint_label_to_std { + key: "right_eye" + value: 0.25 + } + keypoint_label_to_std { + key: "right_hip" + value: 1.07 + } + keypoint_label_to_std { + key: "right_knee" + value: 0.89 + } + keypoint_label_to_std { + key: "right_shoulder" + value: 0.79 + } + keypoint_label_to_std { + key: "right_wrist" + value: 0.62 + } + keypoint_regression_loss_weight: 0.1 + keypoint_heatmap_loss_weight: 1.0 + keypoint_offset_loss_weight: 1.0 + offset_peak_radius: 3 + per_keypoint_offset: true + } + } +} + +train_config: { + + batch_size: 128 + num_steps: 250000 + + data_augmentation_options { + random_horizontal_flip { + keypoint_flip_permutation: 0 + keypoint_flip_permutation: 2 + keypoint_flip_permutation: 1 + keypoint_flip_permutation: 4 + keypoint_flip_permutation: 3 + keypoint_flip_permutation: 6 + keypoint_flip_permutation: 5 + keypoint_flip_permutation: 8 + keypoint_flip_permutation: 7 + keypoint_flip_permutation: 10 + keypoint_flip_permutation: 9 + keypoint_flip_permutation: 12 + keypoint_flip_permutation: 11 + keypoint_flip_permutation: 14 + keypoint_flip_permutation: 13 + keypoint_flip_permutation: 16 + keypoint_flip_permutation: 15 + } + } + + data_augmentation_options { + random_crop_image { + min_aspect_ratio: 0.5 + max_aspect_ratio: 1.7 + random_coef: 0.25 + } + } + + + data_augmentation_options { + random_adjust_hue { + } + } + + data_augmentation_options { + random_adjust_contrast { + } + } + + data_augmentation_options { + random_adjust_saturation { + } + } + + data_augmentation_options { + random_adjust_brightness { + } + } + + data_augmentation_options { + random_absolute_pad_image { + max_height_padding: 200 + max_width_padding: 200 + pad_color: [0, 0, 0] + } + } + + optimizer { + adam_optimizer: { + epsilon: 1e-7 # Match tf.keras.optimizers.Adam's default. + learning_rate: { + cosine_decay_learning_rate { + learning_rate_base: 1e-3 + total_steps: 250000 + warmup_learning_rate: 2.5e-4 + warmup_steps: 5000 + } + } + } + use_moving_average: false + } + max_number_of_boxes: 100 + unpad_groundtruth_tensors: false + + fine_tune_checkpoint_version: V2 + fine_tune_checkpoint: "PATH_TO_BE_CONFIGURED" + fine_tune_checkpoint_type: "classification" +} + +train_input_reader: { + label_map_path: "PATH_TO_BE_CONFIGURED/label_map.txt" + tf_record_input_reader { + input_path: "PATH_TO_BE_CONFIGURED/train2017-?????-of-00256.tfrecord" + } + num_keypoints: 17 +} + +eval_config: { + metrics_set: "coco_detection_metrics" + use_moving_averages: false + num_visualizations: 10 + max_num_boxes_to_visualize: 20 + min_score_threshold: 0.2 + batch_size: 1; + parameterized_metric { + coco_keypoint_metrics { + class_label: "person" + keypoint_label_to_sigmas { + key: "nose" + value: 0.026 + } + keypoint_label_to_sigmas { + key: "left_eye" + value: 0.025 + } + keypoint_label_to_sigmas { + key: "right_eye" + value: 0.025 + } + keypoint_label_to_sigmas { + key: "left_ear" + value: 0.035 + } + keypoint_label_to_sigmas { + key: "right_ear" + value: 0.035 + } + keypoint_label_to_sigmas { + key: "left_shoulder" + value: 0.079 + } + keypoint_label_to_sigmas { + key: "right_shoulder" + value: 0.079 + } + keypoint_label_to_sigmas { + key: "left_elbow" + value: 0.072 + } + keypoint_label_to_sigmas { + key: "right_elbow" + value: 0.072 + } + keypoint_label_to_sigmas { + key: "left_wrist" + value: 0.062 + } + keypoint_label_to_sigmas { + key: "right_wrist" + value: 0.062 + } + keypoint_label_to_sigmas { + key: "left_hip" + value: 0.107 + } + keypoint_label_to_sigmas { + key: "right_hip" + value: 0.107 + } + keypoint_label_to_sigmas { + key: "left_knee" + value: 0.087 + } + keypoint_label_to_sigmas { + key: "right_knee" + value: 0.087 + } + keypoint_label_to_sigmas { + key: "left_ankle" + value: 0.089 + } + keypoint_label_to_sigmas { + key: "right_ankle" + value: 0.089 + } + } + } + # Provide the edges to connect the keypoints. The setting is suitable for + # COCO's 17 human pose keypoints. + keypoint_edge { # nose-left eye + start: 0 + end: 1 + } + keypoint_edge { # nose-right eye + start: 0 + end: 2 + } + keypoint_edge { # left eye-left ear + start: 1 + end: 3 + } + keypoint_edge { # right eye-right ear + start: 2 + end: 4 + } + keypoint_edge { # nose-left shoulder + start: 0 + end: 5 + } + keypoint_edge { # nose-right shoulder + start: 0 + end: 6 + } + keypoint_edge { # left shoulder-left elbow + start: 5 + end: 7 + } + keypoint_edge { # left elbow-left wrist + start: 7 + end: 9 + } + keypoint_edge { # right shoulder-right elbow + start: 6 + end: 8 + } + keypoint_edge { # right elbow-right wrist + start: 8 + end: 10 + } + keypoint_edge { # left shoulder-right shoulder + start: 5 + end: 6 + } + keypoint_edge { # left shoulder-left hip + start: 5 + end: 11 + } + keypoint_edge { # right shoulder-right hip + start: 6 + end: 12 + } + keypoint_edge { # left hip-right hip + start: 11 + end: 12 + } + keypoint_edge { # left hip-left knee + start: 11 + end: 13 + } + keypoint_edge { # left knee-left ankle + start: 13 + end: 15 + } + keypoint_edge { # right hip-right knee + start: 12 + end: 14 + } + keypoint_edge { # right knee-right ankle + start: 14 + end: 16 + } +} +eval_input_reader: { + label_map_path: "PATH_TO_BE_CONFIGURED/label_map.txt" + shuffle: false + num_epochs: 1 + tf_record_input_reader { + input_path: "PATH_TO_BE_CONFIGURED/val2017-?????-of-00032.tfrecord" + } + num_keypoints: 17 +} diff --git a/research/object_detection/configs/tf2/centernet_resnet50_v2_512x512_kpts_coco17_tpu-8.config b/research/object_detection/configs/tf2/centernet_resnet50_v2_512x512_kpts_coco17_tpu-8.config new file mode 100644 index 000000000..3067ed417 --- /dev/null +++ b/research/object_detection/configs/tf2/centernet_resnet50_v2_512x512_kpts_coco17_tpu-8.config @@ -0,0 +1,393 @@ +# CenterNet meta-architecture from the "Objects as Points" [1] paper +# with the ResNet-v2-50 backbone. The ResNet backbone has a few differences +# as compared to the one mentioned in the paper, hence the performance is +# slightly worse. This config is TPU comptatible. +# [1]: https://arxiv.org/abs/1904.07850 + +model { + center_net { + num_classes: 90 + feature_extractor { + type: "resnet_v2_50" + } + image_resizer { + keep_aspect_ratio_resizer { + min_dimension: 512 + max_dimension: 512 + pad_to_max_dimension: true + } + } + object_detection_task { + task_loss_weight: 1.0 + offset_loss_weight: 1.0 + scale_loss_weight: 0.1 + localization_loss { + l1_localization_loss { + } + } + } + object_center_params { + object_center_loss_weight: 1.0 + min_box_overlap_iou: 0.7 + max_box_predictions: 100 + classification_loss { + penalty_reduced_logistic_focal_loss { + alpha: 2.0 + beta: 4.0 + } + } + } + + keypoint_label_map_path: "PATH_TO_BE_CONFIGURED" + keypoint_estimation_task { + task_name: "human_pose" + task_loss_weight: 1.0 + loss { + localization_loss { + l1_localization_loss { + } + } + classification_loss { + penalty_reduced_logistic_focal_loss { + alpha: 2.0 + beta: 4.0 + } + } + } + keypoint_class_name: "/m/01g317" + keypoint_label_to_std { + key: "left_ankle" + value: 0.89 + } + keypoint_label_to_std { + key: "left_ear" + value: 0.35 + } + keypoint_label_to_std { + key: "left_elbow" + value: 0.72 + } + keypoint_label_to_std { + key: "left_eye" + value: 0.25 + } + keypoint_label_to_std { + key: "left_hip" + value: 1.07 + } + keypoint_label_to_std { + key: "left_knee" + value: 0.89 + } + keypoint_label_to_std { + key: "left_shoulder" + value: 0.79 + } + keypoint_label_to_std { + key: "left_wrist" + value: 0.62 + } + keypoint_label_to_std { + key: "nose" + value: 0.26 + } + keypoint_label_to_std { + key: "right_ankle" + value: 0.89 + } + keypoint_label_to_std { + key: "right_ear" + value: 0.35 + } + keypoint_label_to_std { + key: "right_elbow" + value: 0.72 + } + keypoint_label_to_std { + key: "right_eye" + value: 0.25 + } + keypoint_label_to_std { + key: "right_hip" + value: 1.07 + } + keypoint_label_to_std { + key: "right_knee" + value: 0.89 + } + keypoint_label_to_std { + key: "right_shoulder" + value: 0.79 + } + keypoint_label_to_std { + key: "right_wrist" + value: 0.62 + } + keypoint_regression_loss_weight: 0.1 + keypoint_heatmap_loss_weight: 1.0 + keypoint_offset_loss_weight: 1.0 + offset_peak_radius: 3 + per_keypoint_offset: true + } + } +} + +train_config: { + + batch_size: 128 + num_steps: 250000 + + data_augmentation_options { + random_horizontal_flip { + keypoint_flip_permutation: 0 + keypoint_flip_permutation: 2 + keypoint_flip_permutation: 1 + keypoint_flip_permutation: 4 + keypoint_flip_permutation: 3 + keypoint_flip_permutation: 6 + keypoint_flip_permutation: 5 + keypoint_flip_permutation: 8 + keypoint_flip_permutation: 7 + keypoint_flip_permutation: 10 + keypoint_flip_permutation: 9 + keypoint_flip_permutation: 12 + keypoint_flip_permutation: 11 + keypoint_flip_permutation: 14 + keypoint_flip_permutation: 13 + keypoint_flip_permutation: 16 + keypoint_flip_permutation: 15 + } + } + + data_augmentation_options { + random_crop_image { + min_aspect_ratio: 0.5 + max_aspect_ratio: 1.7 + random_coef: 0.25 + } + } + + + data_augmentation_options { + random_adjust_hue { + } + } + + data_augmentation_options { + random_adjust_contrast { + } + } + + data_augmentation_options { + random_adjust_saturation { + } + } + + data_augmentation_options { + random_adjust_brightness { + } + } + + data_augmentation_options { + random_absolute_pad_image { + max_height_padding: 200 + max_width_padding: 200 + pad_color: [0, 0, 0] + } + } + + optimizer { + adam_optimizer: { + epsilon: 1e-7 # Match tf.keras.optimizers.Adam's default. + learning_rate: { + cosine_decay_learning_rate { + learning_rate_base: 1e-3 + total_steps: 250000 + warmup_learning_rate: 2.5e-4 + warmup_steps: 5000 + } + } + } + use_moving_average: false + } + max_number_of_boxes: 100 + unpad_groundtruth_tensors: false + + fine_tune_checkpoint_version: V2 + fine_tune_checkpoint: "PATH_TO_BE_CONFIGURED" + fine_tune_checkpoint_type: "classification" +} + +train_input_reader: { + label_map_path: "PATH_TO_BE_CONFIGURED/label_map.txt" + tf_record_input_reader { + input_path: "PATH_TO_BE_CONFIGURED/train2017-?????-of-00256.tfrecord" + } + num_keypoints: 17 +} + +eval_config: { + metrics_set: "coco_detection_metrics" + use_moving_averages: false + num_visualizations: 10 + max_num_boxes_to_visualize: 20 + min_score_threshold: 0.2 + batch_size: 1; + parameterized_metric { + coco_keypoint_metrics { + class_label: "person" + keypoint_label_to_sigmas { + key: "nose" + value: 0.026 + } + keypoint_label_to_sigmas { + key: "left_eye" + value: 0.025 + } + keypoint_label_to_sigmas { + key: "right_eye" + value: 0.025 + } + keypoint_label_to_sigmas { + key: "left_ear" + value: 0.035 + } + keypoint_label_to_sigmas { + key: "right_ear" + value: 0.035 + } + keypoint_label_to_sigmas { + key: "left_shoulder" + value: 0.079 + } + keypoint_label_to_sigmas { + key: "right_shoulder" + value: 0.079 + } + keypoint_label_to_sigmas { + key: "left_elbow" + value: 0.072 + } + keypoint_label_to_sigmas { + key: "right_elbow" + value: 0.072 + } + keypoint_label_to_sigmas { + key: "left_wrist" + value: 0.062 + } + keypoint_label_to_sigmas { + key: "right_wrist" + value: 0.062 + } + keypoint_label_to_sigmas { + key: "left_hip" + value: 0.107 + } + keypoint_label_to_sigmas { + key: "right_hip" + value: 0.107 + } + keypoint_label_to_sigmas { + key: "left_knee" + value: 0.087 + } + keypoint_label_to_sigmas { + key: "right_knee" + value: 0.087 + } + keypoint_label_to_sigmas { + key: "left_ankle" + value: 0.089 + } + keypoint_label_to_sigmas { + key: "right_ankle" + value: 0.089 + } + } + } + # Provide the edges to connect the keypoints. The setting is suitable for + # COCO's 17 human pose keypoints. + keypoint_edge { # nose-left eye + start: 0 + end: 1 + } + keypoint_edge { # nose-right eye + start: 0 + end: 2 + } + keypoint_edge { # left eye-left ear + start: 1 + end: 3 + } + keypoint_edge { # right eye-right ear + start: 2 + end: 4 + } + keypoint_edge { # nose-left shoulder + start: 0 + end: 5 + } + keypoint_edge { # nose-right shoulder + start: 0 + end: 6 + } + keypoint_edge { # left shoulder-left elbow + start: 5 + end: 7 + } + keypoint_edge { # left elbow-left wrist + start: 7 + end: 9 + } + keypoint_edge { # right shoulder-right elbow + start: 6 + end: 8 + } + keypoint_edge { # right elbow-right wrist + start: 8 + end: 10 + } + keypoint_edge { # left shoulder-right shoulder + start: 5 + end: 6 + } + keypoint_edge { # left shoulder-left hip + start: 5 + end: 11 + } + keypoint_edge { # right shoulder-right hip + start: 6 + end: 12 + } + keypoint_edge { # left hip-right hip + start: 11 + end: 12 + } + keypoint_edge { # left hip-left knee + start: 11 + end: 13 + } + keypoint_edge { # left knee-left ankle + start: 13 + end: 15 + } + keypoint_edge { # right hip-right knee + start: 12 + end: 14 + } + keypoint_edge { # right knee-right ankle + start: 14 + end: 16 + } +} + +eval_input_reader: { + label_map_path: "PATH_TO_BE_CONFIGURED/label_map.txt" + shuffle: false + num_epochs: 1 + tf_record_input_reader { + input_path: "PATH_TO_BE_CONFIGURED/val2017-?????-of-00032.tfrecord" + } + num_keypoints: 17 +} -- GitLab From d577681028e5f6298776a8c55859bfaf40cd4391 Mon Sep 17 00:00:00 2001 From: Francois Chollet Date: Mon, 3 Aug 2020 10:05:11 -0700 Subject: [PATCH 095/128] Internal change PiperOrigin-RevId: 324625967 --- official/vision/detection/modeling/retinanet_model.py | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/official/vision/detection/modeling/retinanet_model.py b/official/vision/detection/modeling/retinanet_model.py index aa6e38c1e..a894f060f 100644 --- a/official/vision/detection/modeling/retinanet_model.py +++ b/official/vision/detection/modeling/retinanet_model.py @@ -59,11 +59,8 @@ class RetinanetModel(base_model.Model): self._transpose_input = params.train.transpose_input assert not self._transpose_input, 'Transpose input is not supported.' # Input layer. - input_shape = ( - params.retinanet_parser.output_size + - [params.retinanet_parser.num_channels]) self._input_layer = tf.keras.layers.Input( - shape=input_shape, name='', + shape=(None, None, params.retinanet_parser.num_channels), name='', dtype=tf.bfloat16 if self._use_bfloat16 else tf.float32) def build_outputs(self, inputs, mode): -- GitLab From db87dd5b06dddbc8254d6eabeae26ec0eb70afd9 Mon Sep 17 00:00:00 2001 From: Francois Chollet Date: Mon, 3 Aug 2020 10:05:11 -0700 Subject: [PATCH 096/128] Enable input spec checking for Functional models. PiperOrigin-RevId: 324625967 --- .../models/keras_models/resnet_v1_tf2_test.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/research/object_detection/models/keras_models/resnet_v1_tf2_test.py b/research/object_detection/models/keras_models/resnet_v1_tf2_test.py index 9063efb24..4566bc8dd 100644 --- a/research/object_detection/models/keras_models/resnet_v1_tf2_test.py +++ b/research/object_detection/models/keras_models/resnet_v1_tf2_test.py @@ -202,9 +202,9 @@ class ResnetShapeTest(test_case.TestCase, parameterized.TestCase): }) def test_output_shapes(self, resnet_type, output_layer_names): if resnet_type == 'resnet_v1_34': - model = resnet_v1.resnet_v1_34(weights=None) + model = resnet_v1.resnet_v1_34(input_shape=(64, 64, 3), weights=None) else: - model = resnet_v1.resnet_v1_18(weights=None) + model = resnet_v1.resnet_v1_18(input_shape=(64, 64, 3), weights=None) outputs = [ model.get_layer(output_layer_name).output for output_layer_name in output_layer_names -- GitLab From c3b9742baf3a80e2d1a19d80c0c3137b0c4d9d61 Mon Sep 17 00:00:00 2001 From: Vivek Rathod Date: Mon, 3 Aug 2020 10:21:58 -0700 Subject: [PATCH 097/128] 1. Modify GenerateEmbeddingDataFn to consume key_value tuple to support file formats with (key, value) entries. 2. Copy all fields form input tf examples before adding context fields. PiperOrigin-RevId: 324629673 --- .../context_rcnn/generate_embedding_data.py | 89 +++++-------------- .../generate_embedding_data_tf2_test.py | 12 +-- 2 files changed, 30 insertions(+), 71 deletions(-) diff --git a/research/object_detection/dataset_tools/context_rcnn/generate_embedding_data.py b/research/object_detection/dataset_tools/context_rcnn/generate_embedding_data.py index 30ea62835..6ec654e6c 100644 --- a/research/object_detection/dataset_tools/context_rcnn/generate_embedding_data.py +++ b/research/object_detection/dataset_tools/context_rcnn/generate_embedding_data.py @@ -63,6 +63,15 @@ except ModuleNotFoundError: pass +def add_keys(serialized_example): + key = hash(serialized_example) + return key, serialized_example + + +def drop_keys(key_value_tuple): + return key_value_tuple[1] + + class GenerateEmbeddingDataFn(beam.DoFn): """Generates embedding data for camera trap images. @@ -97,11 +106,12 @@ class GenerateEmbeddingDataFn(beam.DoFn): with self.session_lock: self._detect_fn = tf.saved_model.load(self._model_dir) - def process(self, tfrecord_entry): - return self._run_inference_and_generate_embedding(tfrecord_entry) + def process(self, tfexample_key_value): + return self._run_inference_and_generate_embedding(tfexample_key_value) - def _run_inference_and_generate_embedding(self, tfrecord_entry): - input_example = tf.train.Example.FromString(tfrecord_entry) + def _run_inference_and_generate_embedding(self, tfexample_key_value): + key, tfexample = tfexample_key_value + input_example = tf.train.Example.FromString(tfexample) # Convert date_captured datetime string to unix time integer and store def get_date_captured(example): @@ -161,11 +171,12 @@ class GenerateEmbeddingDataFn(beam.DoFn): (date_captured - datetime.datetime.fromtimestamp(0)).total_seconds()) example = tf.train.Example() + example.CopyFrom(input_example) example.features.feature['image/unix_time'].float_list.value.extend( [unix_time]) detections = self._detect_fn.signatures['serving_default']( - (tf.expand_dims(tf.convert_to_tensor(tfrecord_entry), 0))) + (tf.expand_dims(tf.convert_to_tensor(tfexample), 0))) detection_features = detections['detection_features'] detection_boxes = detections['detection_boxes'] num_detections = detections['num_detections'] @@ -230,60 +241,8 @@ class GenerateEmbeddingDataFn(beam.DoFn): example.features.feature['image/embedding_count'].int64_list.value.append( embedding_count) - # Add other essential example attributes - example.features.feature['image/encoded'].bytes_list.value.extend( - input_example.features.feature['image/encoded'].bytes_list.value) - example.features.feature['image/height'].int64_list.value.extend( - input_example.features.feature['image/height'].int64_list.value) - example.features.feature['image/width'].int64_list.value.extend( - input_example.features.feature['image/width'].int64_list.value) - example.features.feature['image/source_id'].bytes_list.value.extend( - input_example.features.feature['image/source_id'].bytes_list.value) - example.features.feature['image/location'].bytes_list.value.extend( - input_example.features.feature['image/location'].bytes_list.value) - - example.features.feature['image/date_captured'].bytes_list.value.extend( - input_example.features.feature['image/date_captured'].bytes_list.value) - - example.features.feature['image/class/text'].bytes_list.value.extend( - input_example.features.feature['image/class/text'].bytes_list.value) - example.features.feature['image/class/label'].int64_list.value.extend( - input_example.features.feature['image/class/label'].int64_list.value) - - example.features.feature['image/seq_id'].bytes_list.value.extend( - input_example.features.feature['image/seq_id'].bytes_list.value) - example.features.feature['image/seq_num_frames'].int64_list.value.extend( - input_example.features.feature['image/seq_num_frames'].int64_list.value) - example.features.feature['image/seq_frame_num'].int64_list.value.extend( - input_example.features.feature['image/seq_frame_num'].int64_list.value) - - example.features.feature['image/object/bbox/ymax'].float_list.value.extend( - input_example.features.feature[ - 'image/object/bbox/ymax'].float_list.value) - example.features.feature['image/object/bbox/ymin'].float_list.value.extend( - input_example.features.feature[ - 'image/object/bbox/ymin'].float_list.value) - example.features.feature['image/object/bbox/xmax'].float_list.value.extend( - input_example.features.feature[ - 'image/object/bbox/xmax'].float_list.value) - example.features.feature['image/object/bbox/xmin'].float_list.value.extend( - input_example.features.feature[ - 'image/object/bbox/xmin'].float_list.value) - example.features.feature[ - 'image/object/class/score'].float_list.value.extend( - input_example.features.feature[ - 'image/object/class/score'].float_list.value) - example.features.feature[ - 'image/object/class/label'].int64_list.value.extend( - input_example.features.feature[ - 'image/object/class/label'].int64_list.value) - example.features.feature[ - 'image/object/class/text'].bytes_list.value.extend( - input_example.features.feature[ - 'image/object/class/text'].bytes_list.value) - self._num_examples_processed.inc(1) - return [example] + return [(key, example)] def construct_pipeline(pipeline, input_tfrecord, output_tfrecord, model_dir, @@ -303,16 +262,17 @@ def construct_pipeline(pipeline, input_tfrecord, output_tfrecord, model_dir, """ input_collection = ( pipeline | 'ReadInputTFRecord' >> beam.io.tfrecordio.ReadFromTFRecord( - input_tfrecord, - coder=beam.coders.BytesCoder())) + input_tfrecord, coder=beam.coders.BytesCoder()) + | 'AddKeys' >> beam.Map(add_keys)) output_collection = input_collection | 'ExtractEmbedding' >> beam.ParDo( GenerateEmbeddingDataFn(model_dir, top_k_embedding_count, bottom_k_embedding_count)) output_collection = output_collection | 'Reshuffle' >> beam.Reshuffle() - _ = output_collection | 'WritetoDisk' >> beam.io.tfrecordio.WriteToTFRecord( - output_tfrecord, - num_shards=num_shards, - coder=beam.coders.ProtoCoder(tf.train.Example)) + _ = output_collection | 'DropKeys' >> beam.Map( + drop_keys) | 'WritetoDisk' >> beam.io.tfrecordio.WriteToTFRecord( + output_tfrecord, + num_shards=num_shards, + coder=beam.coders.ProtoCoder(tf.train.Example)) def parse_args(argv): @@ -395,4 +355,3 @@ def main(argv=None, save_main_session=True): if __name__ == '__main__': main() - diff --git a/research/object_detection/dataset_tools/context_rcnn/generate_embedding_data_tf2_test.py b/research/object_detection/dataset_tools/context_rcnn/generate_embedding_data_tf2_test.py index 5c8503c98..921ef9c4a 100644 --- a/research/object_detection/dataset_tools/context_rcnn/generate_embedding_data_tf2_test.py +++ b/research/object_detection/dataset_tools/context_rcnn/generate_embedding_data_tf2_test.py @@ -258,8 +258,8 @@ class GenerateEmbeddingData(tf.test.TestCase): self.assertAllEqual(tf.train.Example.FromString( generated_example).features.feature['image/object/class/text'] .bytes_list.value, [b'hyena']) - output = inference_fn.process(generated_example) - output_example = output[0] + output = inference_fn.process(('dummy_key', generated_example)) + output_example = output[0][1] self.assert_expected_example(output_example) def test_generate_embedding_data_with_top_k_boxes(self): @@ -276,8 +276,8 @@ class GenerateEmbeddingData(tf.test.TestCase): self.assertAllEqual( tf.train.Example.FromString(generated_example).features .feature['image/object/class/text'].bytes_list.value, [b'hyena']) - output = inference_fn.process(generated_example) - output_example = output[0] + output = inference_fn.process(('dummy_key', generated_example)) + output_example = output[0][1] self.assert_expected_example(output_example, topk=True) def test_generate_embedding_data_with_bottom_k_boxes(self): @@ -294,8 +294,8 @@ class GenerateEmbeddingData(tf.test.TestCase): self.assertAllEqual( tf.train.Example.FromString(generated_example).features .feature['image/object/class/text'].bytes_list.value, [b'hyena']) - output = inference_fn.process(generated_example) - output_example = output[0] + output = inference_fn.process(('dummy_key', generated_example)) + output_example = output[0][1] self.assert_expected_example(output_example, botk=True) def test_beam_pipeline(self): -- GitLab From 507a8d3c01a0c492c371807b7014e8c423ecf2b9 Mon Sep 17 00:00:00 2001 From: Chen Chen Date: Mon, 3 Aug 2020 10:25:17 -0700 Subject: [PATCH 098/128] Add the link to BERT-Base, Multilingual Cased checkpoint in the README file. PiperOrigin-RevId: 324630399 --- official/nlp/bert/README.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/official/nlp/bert/README.md b/official/nlp/bert/README.md index c26a87df5..62d1e2581 100644 --- a/official/nlp/bert/README.md +++ b/official/nlp/bert/README.md @@ -46,6 +46,8 @@ The new checkpoints are:** 12-layer, 768-hidden, 12-heads , 110M parameters * **[`BERT-Large, Cased`](https://storage.googleapis.com/cloud-tpu-checkpoints/bert/keras_bert/cased_L-24_H-1024_A-16.tar.gz)**: 24-layer, 1024-hidden, 16-heads, 340M parameters +* **[`BERT-Base, Multilingual Cased`](https://storage.googleapis.com/cloud-tpu-checkpoints/bert/keras_bert/multi_cased_L-12_H-768_A-12.tar.gz)**: + 104 languages, 12-layer, 768-hidden, 12-heads, 110M parameters We recommend to host checkpoints on Google Cloud storage buckets when you use Cloud GPU/TPU. -- GitLab From eab781187e1b3de41301f87434fca025f3c4bf10 Mon Sep 17 00:00:00 2001 From: Vighnesh Birodkar Date: Mon, 3 Aug 2020 12:05:04 -0700 Subject: [PATCH 099/128] Handle label_confidences in random_square_crop_by_scale. PiperOrigin-RevId: 324652818 --- .../object_detection/core/preprocessor.py | 19 +++++++++++++------ .../core/preprocessor_test.py | 9 ++++++--- 2 files changed, 19 insertions(+), 9 deletions(-) diff --git a/research/object_detection/core/preprocessor.py b/research/object_detection/core/preprocessor.py index 6cebfd991..adb720ca9 100644 --- a/research/object_detection/core/preprocessor.py +++ b/research/object_detection/core/preprocessor.py @@ -3971,9 +3971,10 @@ def _get_crop_border(border, size): def random_square_crop_by_scale(image, boxes, labels, label_weights, - masks=None, keypoints=None, max_border=128, - scale_min=0.6, scale_max=1.3, num_scales=8, - seed=None, preprocess_vars_cache=None): + label_confidences=None, masks=None, + keypoints=None, max_border=128, scale_min=0.6, + scale_max=1.3, num_scales=8, seed=None, + preprocess_vars_cache=None): """Randomly crop a square in proportion to scale and image size. Extract a square sized crop from an image whose side length is sampled by @@ -3993,6 +3994,8 @@ def random_square_crop_by_scale(image, boxes, labels, label_weights, labels: rank 1 int32 tensor containing the object classes. label_weights: float32 tensor of shape [num_instances] representing the weight for each box. + label_confidences: (optional) float32 tensor of shape [num_instances] + representing the confidence for each box. masks: (optional) rank 3 float32 tensor with shape [num_instances, height, width] containing instance masks. The masks are of the same height, width as the input `image`. @@ -4021,6 +4024,8 @@ def random_square_crop_by_scale(image, boxes, labels, label_weights, Boxes are in normalized form. labels: new labels. label_weights: rank 1 float32 tensor with shape [num_instances]. + label_confidences: (optional) float32 tensor of shape [num_instances] + representing the confidence for each box. masks: rank 3 float32 tensor with shape [num_instances, height, width] containing instance masks. @@ -4110,6 +4115,9 @@ def random_square_crop_by_scale(image, boxes, labels, label_weights, tf.gather(labels, indices), tf.gather(label_weights, indices)] + if label_confidences is not None: + return_values.append(tf.gather(label_confidences, indices)) + if masks is not None: new_masks = tf.expand_dims(masks, -1) new_masks = new_masks[:, ymin:ymax, xmin:xmax] @@ -4483,8 +4491,8 @@ def get_default_func_arg_map(include_label_weights=True, (fields.InputDataFields.image, fields.InputDataFields.groundtruth_boxes, fields.InputDataFields.groundtruth_classes, - groundtruth_label_weights, groundtruth_instance_masks, - groundtruth_keypoints), + groundtruth_label_weights, groundtruth_label_confidences, + groundtruth_instance_masks, groundtruth_keypoints), random_scale_crop_and_pad_to_square: (fields.InputDataFields.image, fields.InputDataFields.groundtruth_boxes, @@ -4541,7 +4549,6 @@ def preprocess(tensor_dict, """ if func_arg_map is None: func_arg_map = get_default_func_arg_map() - # changes the images to image (rank 4 to rank 3) since the functions # receive rank 3 tensor for image if fields.InputDataFields.image in tensor_dict: diff --git a/research/object_detection/core/preprocessor_test.py b/research/object_detection/core/preprocessor_test.py index 396ff96da..e963787f5 100644 --- a/research/object_detection/core/preprocessor_test.py +++ b/research/object_detection/core/preprocessor_test.py @@ -3814,21 +3814,23 @@ class PreprocessorTest(test_case.TestCase, parameterized.TestCase): boxes = tf.constant([[0.25, .25, .75, .75]]) labels = tf.constant([[1]]) + label_confidences = tf.constant([0.75]) label_weights = tf.constant([[1.]]) - (new_image, new_boxes, _, _, new_masks, + (new_image, new_boxes, _, _, new_confidences, new_masks, new_keypoints) = preprocessor.random_square_crop_by_scale( image, boxes, labels, label_weights, + label_confidences, masks=masks, keypoints=keypoints, max_border=256, scale_min=scale, scale_max=scale) - return new_image, new_boxes, new_masks, new_keypoints - image, boxes, masks, keypoints = self.execute_cpu(graph_fn, []) + return new_image, new_boxes, new_confidences, new_masks, new_keypoints + image, boxes, confidences, masks, keypoints = self.execute_cpu(graph_fn, []) ymin, xmin, ymax, xmax = boxes[0] self.assertAlmostEqual(ymax - ymin, 0.5 / scale) self.assertAlmostEqual(xmax - xmin, 0.5 / scale) @@ -3842,6 +3844,7 @@ class PreprocessorTest(test_case.TestCase, parameterized.TestCase): self.assertAlmostEqual(scale * 256.0, size) self.assertAllClose(image[:, :, 0], masks[0, :, :]) + self.assertAllClose(confidences, [0.75]) @parameterized.named_parameters(('scale_0_1', 0.1), ('scale_1_0', 1.0), ('scale_2_0', 2.0)) -- GitLab From f1b3f22c75b996b8c62fdeb937875e3219d925a5 Mon Sep 17 00:00:00 2001 From: "A. Unique TensorFlower" Date: Mon, 3 Aug 2020 12:18:22 -0700 Subject: [PATCH 100/128] Monitor the Keras' global batch size. PiperOrigin-RevId: 324656070 --- official/utils/misc/keras_utils.py | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/official/utils/misc/keras_utils.py b/official/utils/misc/keras_utils.py index 2cca51f1d..15e519f75 100644 --- a/official/utils/misc/keras_utils.py +++ b/official/utils/misc/keras_utils.py @@ -26,6 +26,12 @@ from absl import logging import tensorflow as tf +from tensorflow.python.eager import monitoring + +global_batch_size_gauge = monitoring.IntGauge( + '/tensorflow/training/global_batch_size', 'TF training global batch size') + + class BatchTimestamp(object): """A structure to store batch time stamp.""" @@ -60,6 +66,8 @@ class TimeHistory(tf.keras.callbacks.Callback): self.steps_in_epoch = 0 self.start_time = None + global_batch_size_gauge.get_cell().set(batch_size) + if logdir: self.summary_writer = tf.summary.create_file_writer(logdir) else: -- GitLab From 5dc781149d92b1c388599f899d701a9fa6297115 Mon Sep 17 00:00:00 2001 From: Chen Chen Date: Mon, 3 Aug 2020 12:22:07 -0700 Subject: [PATCH 101/128] Change the version of tf models nightly pip to 2.3.0 and the required tensorflow package for non-nightly pip to tensorflow>=2.3.0. PiperOrigin-RevId: 324656938 --- official/pip_package/setup.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/official/pip_package/setup.py b/official/pip_package/setup.py index 760314600..3552a357b 100644 --- a/official/pip_package/setup.py +++ b/official/pip_package/setup.py @@ -20,7 +20,7 @@ import sys from setuptools import find_packages from setuptools import setup -version = '2.2.0' +version = '2.3.0' project_name = 'tf-models-official' @@ -60,7 +60,7 @@ if project_name == 'tf-models-nightly': version += '.dev' + datetime.datetime.now().strftime('%Y%m%d') install_requires.append('tf-nightly') else: - install_requires.append('tensorflow>=2.2.0') + install_requires.append('tensorflow>=2.3.0') print('install_requires: ', install_requires) print('dependency_links: ', dependency_links) -- GitLab From 76b4d0e774d6b744a0d0d38a18e216237b1fae71 Mon Sep 17 00:00:00 2001 From: Hongkun Yu Date: Mon, 3 Aug 2020 12:33:44 -0700 Subject: [PATCH 102/128] Introduce the abstract class for dataloaders. load() is required. PiperOrigin-RevId: 324659665 --- official/nlp/data/data_loader.py | 48 +++++++++++++++++++ official/nlp/data/pretrain_dataloader.py | 3 +- .../nlp/data/question_answering_dataloader.py | 3 +- .../data/sentence_prediction_dataloader.py | 3 +- 4 files changed, 54 insertions(+), 3 deletions(-) create mode 100644 official/nlp/data/data_loader.py diff --git a/official/nlp/data/data_loader.py b/official/nlp/data/data_loader.py new file mode 100644 index 000000000..f5cf47409 --- /dev/null +++ b/official/nlp/data/data_loader.py @@ -0,0 +1,48 @@ +# Copyright 2020 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""An abstraction that NLP models define input pipelines.""" + +import abc +from typing import Optional + +import tensorflow as tf + + +class DataLoader(metaclass=abc.ABCMeta): + """An abstract class defining the APIs for tf.data input pipeline.""" + + @abc.abstractmethod + def load( + self, + input_context: Optional[tf.distribute.InputContext] = None + ) -> tf.data.Dataset: + """Implements DataLoader load method. + + Builds the entire input pipeline inside the load method. Users can define + states inside the DataLoader class and returns a tf.data dataset + object. + + Args: + input_context: This is a context class that is passed to the user's input + function and contains information about the compute replicas and input + pipelines. This object is used for multi-host inputs and passed by + the distribution strategy. + + Returns: + A per-host tf.data dataset. Note that, we usually create the distributed + dataset through the load method, so we should not directly return a + distributed dataset here. + """ + pass diff --git a/official/nlp/data/pretrain_dataloader.py b/official/nlp/data/pretrain_dataloader.py index 985a7a5cc..e8db35e1b 100644 --- a/official/nlp/data/pretrain_dataloader.py +++ b/official/nlp/data/pretrain_dataloader.py @@ -21,6 +21,7 @@ import tensorflow as tf from official.core import input_reader from official.modeling.hyperparams import config_definitions as cfg +from official.nlp.data import data_loader from official.nlp.data import data_loader_factory @@ -37,7 +38,7 @@ class BertPretrainDataConfig(cfg.DataConfig): @data_loader_factory.register_data_loader_cls(BertPretrainDataConfig) -class BertPretrainDataLoader: +class BertPretrainDataLoader(data_loader.DataLoader): """A class to load dataset for bert pretraining task.""" def __init__(self, params): diff --git a/official/nlp/data/question_answering_dataloader.py b/official/nlp/data/question_answering_dataloader.py index 08c7047e4..25dbc3b17 100644 --- a/official/nlp/data/question_answering_dataloader.py +++ b/official/nlp/data/question_answering_dataloader.py @@ -20,6 +20,7 @@ import tensorflow as tf from official.core import input_reader from official.modeling.hyperparams import config_definitions as cfg +from official.nlp.data import data_loader from official.nlp.data import data_loader_factory @@ -42,7 +43,7 @@ class QADataConfig(cfg.DataConfig): @data_loader_factory.register_data_loader_cls(QADataConfig) -class QuestionAnsweringDataLoader: +class QuestionAnsweringDataLoader(data_loader.DataLoader): """A class to load dataset for sentence prediction (classification) task.""" def __init__(self, params): diff --git a/official/nlp/data/sentence_prediction_dataloader.py b/official/nlp/data/sentence_prediction_dataloader.py index 57c068c86..ddd0ebcca 100644 --- a/official/nlp/data/sentence_prediction_dataloader.py +++ b/official/nlp/data/sentence_prediction_dataloader.py @@ -20,6 +20,7 @@ import tensorflow as tf from official.core import input_reader from official.modeling.hyperparams import config_definitions as cfg +from official.nlp.data import data_loader from official.nlp.data import data_loader_factory @@ -37,7 +38,7 @@ class SentencePredictionDataConfig(cfg.DataConfig): @data_loader_factory.register_data_loader_cls(SentencePredictionDataConfig) -class SentencePredictionDataLoader: +class SentencePredictionDataLoader(data_loader.DataLoader): """A class to load dataset for sentence prediction (classification) task.""" def __init__(self, params): -- GitLab From a3ae12584f55254e13e97344a51487a1e0a1cb94 Mon Sep 17 00:00:00 2001 From: Kaushik Shivakumar Date: Mon, 3 Aug 2020 19:44:35 +0000 Subject: [PATCH 103/128] add files for exporter --- .../object_detection/exporter_lib_tf2_test.py | 50 +++++++++++++++- research/object_detection/exporter_lib_v2.py | 60 +++++++++++++++---- research/object_detection/exporter_main_v2.py | 24 +++++++- 3 files changed, 119 insertions(+), 15 deletions(-) diff --git a/research/object_detection/exporter_lib_tf2_test.py b/research/object_detection/exporter_lib_tf2_test.py index 99cbf263b..d89761479 100644 --- a/research/object_detection/exporter_lib_tf2_test.py +++ b/research/object_detection/exporter_lib_tf2_test.py @@ -49,13 +49,18 @@ class FakeModel(model.DetectionModel): filters=1, kernel_size=1, strides=(1, 1), padding='valid', kernel_initializer=tf.keras.initializers.Constant( value=conv_weight_scalar)) + #self._conv(tf.ones([1, 10, 10, 3])) def preprocess(self, inputs): true_image_shapes = [] # Doesn't matter for the fake model. return tf.identity(inputs), true_image_shapes - def predict(self, preprocessed_inputs, true_image_shapes): - return {'image': self._conv(preprocessed_inputs)} + def predict(self, preprocessed_inputs, true_image_shapes, **side_inputs): + return_dict = {'image': self._conv(preprocessed_inputs)} + print("SIDE INPUTS: ", side_inputs) + if 'side_inp' in side_inputs: + return_dict['image'] += side_inputs['side_inp'] + return return_dict def postprocess(self, prediction_dict, true_image_shapes): predict_tensor_sum = tf.reduce_sum(prediction_dict['image']) @@ -189,7 +194,7 @@ class ExportInferenceGraphTest(tf.test.TestCase, parameterized.TestCase): saved_model_path = os.path.join(output_directory, 'saved_model') detect_fn = tf.saved_model.load(saved_model_path) image = self.get_dummy_input(input_type) - detections = detect_fn(image) + detections = detect_fn.signatures['serving_default'](tf.constant(image)) detection_fields = fields.DetectionResultFields self.assertAllClose(detections[detection_fields.detection_boxes], @@ -203,6 +208,45 @@ class ExportInferenceGraphTest(tf.test.TestCase, parameterized.TestCase): [[1, 2], [2, 1]]) self.assertAllClose(detections[detection_fields.num_detections], [2, 1]) + def test_export_saved_model_and_run_inference_with_side_inputs( + self, input_type='image_tensor'): + tmp_dir = self.get_temp_dir() + self._save_checkpoint_from_mock_model(tmp_dir) + with mock.patch.object( + model_builder, 'build', autospec=True) as mock_builder: + mock_builder.return_value = FakeModel() + output_directory = os.path.join(tmp_dir, 'output') + pipeline_config = pipeline_pb2.TrainEvalPipelineConfig() + exporter_lib_v2.export_inference_graph( + input_type=input_type, + pipeline_config=pipeline_config, + trained_checkpoint_dir=tmp_dir, + output_directory=output_directory, + use_side_inputs=True, + side_input_shapes="1", + side_input_names="side_inp", + side_input_types="tf.float32") + + saved_model_path = os.path.join(output_directory, 'saved_model') + detect_fn = tf.saved_model.load(saved_model_path) + detect_fn_sig = detect_fn.signatures['serving_default'] + image = tf.constant(self.get_dummy_input(input_type)) + side_input = np.ones((1,), dtype=np.float32) + #detections_one = tf.saved_model.load(saved_model_path)(image, side_input) + detections = detect_fn_sig(input_tensor=image, side_inp=tf.constant(side_input)) + + detection_fields = fields.DetectionResultFields + self.assertAllClose(detections[detection_fields.detection_boxes], + [[[0.0, 0.0, 0.5, 0.5], + [0.5, 0.5, 0.8, 0.8]], + [[0.5, 0.5, 1.0, 1.0], + [0.0, 0.0, 0.0, 0.0]]]) + self.assertAllClose(detections[detection_fields.detection_scores], + [[400.7, 400.6], [400.9, 400.0]]) + self.assertAllClose(detections[detection_fields.detection_classes], + [[1, 2], [2, 1]]) + self.assertAllClose(detections[detection_fields.num_detections], [2, 1]) + def test_export_checkpoint_and_run_inference_with_image(self): tmp_dir = self.get_temp_dir() self._save_checkpoint_from_mock_model(tmp_dir, conv_weight_scalar=2.0) diff --git a/research/object_detection/exporter_lib_v2.py b/research/object_detection/exporter_lib_v2.py index a7ecb45ad..fcf0479b6 100644 --- a/research/object_detection/exporter_lib_v2.py +++ b/research/object_detection/exporter_lib_v2.py @@ -40,7 +40,11 @@ def _decode_tf_example(tf_example_string_tensor): class DetectionInferenceModule(tf.Module): """Detection Inference Module.""" - def __init__(self, detection_model): + def __init__(self, detection_model, + use_side_inputs=False, + side_input_shapes=None, + side_input_types=None, + side_input_names=None): """Initializes a module for detection. Args: @@ -48,7 +52,7 @@ class DetectionInferenceModule(tf.Module): """ self._model = detection_model - def _run_inference_on_images(self, image): + def _run_inference_on_images(self, image, **kwargs): """Cast image to float and run inference. Args: @@ -60,7 +64,7 @@ class DetectionInferenceModule(tf.Module): image = tf.cast(image, tf.float32) image, shapes = self._model.preprocess(image) - prediction_dict = self._model.predict(image, shapes) + prediction_dict = self._model.predict(image, shapes, **kwargs) detections = self._model.postprocess(prediction_dict, shapes) classes_field = fields.DetectionResultFields.detection_classes detections[classes_field] = ( @@ -71,15 +75,39 @@ class DetectionInferenceModule(tf.Module): return detections - class DetectionFromImageModule(DetectionInferenceModule): """Detection Inference Module for image inputs.""" - @tf.function( - input_signature=[ - tf.TensorSpec(shape=[1, None, None, 3], dtype=tf.uint8)]) - def __call__(self, input_tensor): - return self._run_inference_on_images(input_tensor) + def __init__(self, detection_model, + use_side_inputs=False, + side_input_shapes="", + side_input_types="", + side_input_names=""): + """Initializes a module for detection. + + Args: + detection_model: The detection model to use for inference. + """ + self.side_input_names = side_input_names + sig = [tf.TensorSpec(shape=[1, None, None, 3], dtype=tf.uint8)] + if use_side_inputs: + for info in zip(side_input_shapes.split("/"), + side_input_types.split(","), + side_input_names.split(",")): + sig.append(tf.TensorSpec(shape=eval("[" + info[0] + "]"), + dtype=eval(info[1]), + name=info[2])) + + def __call__(input_tensor, *side_inputs): + kwargs = dict(zip(self.side_input_names.split(","), side_inputs)) + return self._run_inference_on_images(input_tensor, **kwargs) + + self.__call__ = tf.function(__call__, input_signature=sig) + + super(DetectionFromImageModule, self).__init__(detection_model, + side_input_shapes, + side_input_types, + side_input_names) class DetectionFromFloatImageModule(DetectionInferenceModule): @@ -133,7 +161,11 @@ DETECTION_MODULE_MAP = { def export_inference_graph(input_type, pipeline_config, trained_checkpoint_dir, - output_directory): + output_directory, + use_side_inputs=False, + side_input_shapes="", + side_input_types="", + side_input_names=""): """Exports inference graph for the model specified in the pipeline config. This function creates `output_directory` if it does not already exist, @@ -164,7 +196,13 @@ def export_inference_graph(input_type, if input_type not in DETECTION_MODULE_MAP: raise ValueError('Unrecognized `input_type`') - detection_module = DETECTION_MODULE_MAP[input_type](detection_model) + if use_side_inputs and input_type != 'image_tensor': + raise ValueError('Side inputs supported for image_tensor input type only.') + detection_module = DETECTION_MODULE_MAP[input_type](detection_model, + use_side_inputs, + side_input_shapes, + side_input_types, + side_input_names) # Getting the concrete function traces the graph and forces variables to # be constructed --- only after this can we save the checkpoint and # saved model. diff --git a/research/object_detection/exporter_main_v2.py b/research/object_detection/exporter_main_v2.py index a2ba84560..c3dce5d71 100644 --- a/research/object_detection/exporter_main_v2.py +++ b/research/object_detection/exporter_main_v2.py @@ -106,6 +106,27 @@ flags.DEFINE_string('output_directory', None, 'Path to write outputs.') flags.DEFINE_string('config_override', '', 'pipeline_pb2.TrainEvalPipelineConfig ' 'text proto to override pipeline_config_path.') +flags.DEFINE_boolean('use_side_inputs', False, + 'If True, uses side inputs as well as image inputs.') +flags.DEFINE_string('side_input_shapes', "", + 'If use_side_inputs is True, this explicitly sets ' + 'the shape of the side input tensors to a fixed size. The ' + 'dimensions are to be provided as a comma-separated list ' + 'of integers. A value of -1 can be used for unknown ' + 'dimensions. A `/` denotes a break, starting the shape of ' + 'the next side input tensor. This flag is required if ' + 'using side inputs.') +flags.DEFINE_string('side_input_types', "", + 'If use_side_inputs is True, this explicitly sets ' + 'the type of the side input tensors. The ' + 'dimensions are to be provided as a comma-separated list ' + 'of types, each of `string`, `integer`, or `float`. ' + 'This flag is required if using side inputs.') +flags.DEFINE_string('side_input_names', "", + 'If use_side_inputs is True, this explicitly sets ' + 'the names of the side input tensors required by the model ' + 'assuming the names will be a comma-separated list of ' + 'strings. This flag is required if using side inputs.') flags.mark_flag_as_required('pipeline_config_path') flags.mark_flag_as_required('trained_checkpoint_dir') @@ -119,7 +140,8 @@ def main(_): text_format.Merge(FLAGS.config_override, pipeline_config) exporter_lib_v2.export_inference_graph( FLAGS.input_type, pipeline_config, FLAGS.trained_checkpoint_dir, - FLAGS.output_directory) + FLAGS.output_directory, FLAGS.use_side_inputs, FLAGS.side_input_shapes, + FLAGS.side_input_types, FLAGS.side_input_names) if __name__ == '__main__': -- GitLab From f6bf56d77094164efab7d1dcd241e7cf1584e8d0 Mon Sep 17 00:00:00 2001 From: Kaushik Shivakumar Date: Mon, 3 Aug 2020 20:41:22 +0000 Subject: [PATCH 104/128] clean and update exporter --- .../object_detection/exporter_lib_tf2_test.py | 6 +- research/object_detection/exporter_lib_v2.py | 59 +++++++++++++------ 2 files changed, 44 insertions(+), 21 deletions(-) diff --git a/research/object_detection/exporter_lib_tf2_test.py b/research/object_detection/exporter_lib_tf2_test.py index d89761479..fa6ba3aa4 100644 --- a/research/object_detection/exporter_lib_tf2_test.py +++ b/research/object_detection/exporter_lib_tf2_test.py @@ -194,7 +194,7 @@ class ExportInferenceGraphTest(tf.test.TestCase, parameterized.TestCase): saved_model_path = os.path.join(output_directory, 'saved_model') detect_fn = tf.saved_model.load(saved_model_path) image = self.get_dummy_input(input_type) - detections = detect_fn.signatures['serving_default'](tf.constant(image)) + detections = detect_fn(tf.constant(image)) detection_fields = fields.DetectionResultFields self.assertAllClose(detections[detection_fields.detection_boxes], @@ -232,8 +232,8 @@ class ExportInferenceGraphTest(tf.test.TestCase, parameterized.TestCase): detect_fn_sig = detect_fn.signatures['serving_default'] image = tf.constant(self.get_dummy_input(input_type)) side_input = np.ones((1,), dtype=np.float32) - #detections_one = tf.saved_model.load(saved_model_path)(image, side_input) - detections = detect_fn_sig(input_tensor=image, side_inp=tf.constant(side_input)) + detections = detect_fn_sig(input_tensor=image, + side_inp=tf.constant(side_input)) detection_fields = fields.DetectionResultFields self.assertAllClose(detections[detection_fields.detection_boxes], diff --git a/research/object_detection/exporter_lib_v2.py b/research/object_detection/exporter_lib_v2.py index fcf0479b6..2fdd6576f 100644 --- a/research/object_detection/exporter_lib_v2.py +++ b/research/object_detection/exporter_lib_v2.py @@ -36,15 +36,33 @@ def _decode_tf_example(tf_example_string_tensor): image_tensor = tensor_dict[fields.InputDataFields.image] return image_tensor +def _zip_side_inputs(side_input_shapes="", + side_input_types="", + side_input_names=""): + """Zips the side inputs together. + + Args: + side_input_shapes: forward-slash-separated list of comma-separated lists + describing input shapes. + side_input_types: comma-separated list of the types of the inputs. + side_input_names: comma-separated list of the names of the inputs. + + Returns: + a zipped list of side input tuples. + """ + side_input_shapes = list(map(lambda x: eval('[' + x + ']'), + side_input_shapes.split("/"))) + side_input_types = list(map(eval, side_input_types.split(","))) + return zip(side_input_shapes, + side_input_types, + side_input_names.split(",")) class DetectionInferenceModule(tf.Module): """Detection Inference Module.""" def __init__(self, detection_model, use_side_inputs=False, - side_input_shapes=None, - side_input_types=None, - side_input_names=None): + zipped_side_inputs=None: """Initializes a module for detection. Args: @@ -75,27 +93,26 @@ class DetectionInferenceModule(tf.Module): return detections + class DetectionFromImageModule(DetectionInferenceModule): """Detection Inference Module for image inputs.""" def __init__(self, detection_model, use_side_inputs=False, - side_input_shapes="", - side_input_types="", - side_input_names=""): + zipped_side_inputs=None): """Initializes a module for detection. Args: detection_model: The detection model to use for inference. + """ - self.side_input_names = side_input_names + self.side_input_names = [] sig = [tf.TensorSpec(shape=[1, None, None, 3], dtype=tf.uint8)] if use_side_inputs: - for info in zip(side_input_shapes.split("/"), - side_input_types.split(","), - side_input_names.split(",")): - sig.append(tf.TensorSpec(shape=eval("[" + info[0] + "]"), - dtype=eval(info[1]), + for info in zipped_side_inputs: + self.side_input_names.append(info[2]) + sig.append(tf.TensorSpec(shape=info[0], + dtype=info[1], name=info[2])) def __call__(input_tensor, *side_inputs): @@ -105,9 +122,8 @@ class DetectionFromImageModule(DetectionInferenceModule): self.__call__ = tf.function(__call__, input_signature=sig) super(DetectionFromImageModule, self).__init__(detection_model, - side_input_shapes, - side_input_types, - side_input_names) + use_side_inputs, + zipped_side_inputs) class DetectionFromFloatImageModule(DetectionInferenceModule): @@ -179,6 +195,12 @@ def export_inference_graph(input_type, pipeline_config: pipeline_pb2.TrainAndEvalPipelineConfig proto. trained_checkpoint_dir: Path to the trained checkpoint file. output_directory: Path to write outputs. + use_side_inputs: boolean that determines whether side inputs should be + included in the input signature. + side_input_shapes: forward-slash-separated list of comma-separated lists + describing input shapes. + side_input_types: comma-separated list of the types of the inputs. + side_input_names: comma-separated list of the names of the inputs. Raises: ValueError: if input_type is invalid. """ @@ -198,11 +220,12 @@ def export_inference_graph(input_type, raise ValueError('Unrecognized `input_type`') if use_side_inputs and input_type != 'image_tensor': raise ValueError('Side inputs supported for image_tensor input type only.') + zipped_side_inputs = _zip_side_inputs(side_input_shapes, + side_input_types, + side_input_names) detection_module = DETECTION_MODULE_MAP[input_type](detection_model, use_side_inputs, - side_input_shapes, - side_input_types, - side_input_names) + zipped_side_inputs) # Getting the concrete function traces the graph and forces variables to # be constructed --- only after this can we save the checkpoint and # saved model. -- GitLab From 68868227279f4258f593f3d99957b6bdc9caa2af Mon Sep 17 00:00:00 2001 From: Kaushik Shivakumar Date: Mon, 3 Aug 2020 20:44:21 +0000 Subject: [PATCH 105/128] remove print --- research/object_detection/exporter_lib_tf2_test.py | 1 - 1 file changed, 1 deletion(-) diff --git a/research/object_detection/exporter_lib_tf2_test.py b/research/object_detection/exporter_lib_tf2_test.py index fa6ba3aa4..986b0da0e 100644 --- a/research/object_detection/exporter_lib_tf2_test.py +++ b/research/object_detection/exporter_lib_tf2_test.py @@ -57,7 +57,6 @@ class FakeModel(model.DetectionModel): def predict(self, preprocessed_inputs, true_image_shapes, **side_inputs): return_dict = {'image': self._conv(preprocessed_inputs)} - print("SIDE INPUTS: ", side_inputs) if 'side_inp' in side_inputs: return_dict['image'] += side_inputs['side_inp'] return return_dict -- GitLab From 26ba72bbb708c18d72f8b0c8f058429c1f6a7064 Mon Sep 17 00:00:00 2001 From: Kaushik Shivakumar Date: Mon, 3 Aug 2020 20:45:15 +0000 Subject: [PATCH 106/128] remove comment --- research/object_detection/exporter_lib_tf2_test.py | 1 - 1 file changed, 1 deletion(-) diff --git a/research/object_detection/exporter_lib_tf2_test.py b/research/object_detection/exporter_lib_tf2_test.py index 986b0da0e..51609dfc6 100644 --- a/research/object_detection/exporter_lib_tf2_test.py +++ b/research/object_detection/exporter_lib_tf2_test.py @@ -49,7 +49,6 @@ class FakeModel(model.DetectionModel): filters=1, kernel_size=1, strides=(1, 1), padding='valid', kernel_initializer=tf.keras.initializers.Constant( value=conv_weight_scalar)) - #self._conv(tf.ones([1, 10, 10, 3])) def preprocess(self, inputs): true_image_shapes = [] # Doesn't matter for the fake model. -- GitLab From f9ff935a45cfa66efa77e1617f1f43efef7cc497 Mon Sep 17 00:00:00 2001 From: Kaushik Shivakumar Date: Mon, 3 Aug 2020 21:33:50 +0000 Subject: [PATCH 107/128] exporter changes --- research/object_detection/exporter_lib_v2.py | 34 ++++++++++---------- 1 file changed, 17 insertions(+), 17 deletions(-) diff --git a/research/object_detection/exporter_lib_v2.py b/research/object_detection/exporter_lib_v2.py index 2fdd6576f..43760c0cb 100644 --- a/research/object_detection/exporter_lib_v2.py +++ b/research/object_detection/exporter_lib_v2.py @@ -39,30 +39,30 @@ def _decode_tf_example(tf_example_string_tensor): def _zip_side_inputs(side_input_shapes="", side_input_types="", side_input_names=""): - """Zips the side inputs together. + """Zips the side inputs together. - Args: - side_input_shapes: forward-slash-separated list of comma-separated lists - describing input shapes. - side_input_types: comma-separated list of the types of the inputs. - side_input_names: comma-separated list of the names of the inputs. + Args: + side_input_shapes: forward-slash-separated list of comma-separated lists + describing input shapes. + side_input_types: comma-separated list of the types of the inputs. + side_input_names: comma-separated list of the names of the inputs. - Returns: - a zipped list of side input tuples. - """ - side_input_shapes = list(map(lambda x: eval('[' + x + ']'), - side_input_shapes.split("/"))) - side_input_types = list(map(eval, side_input_types.split(","))) - return zip(side_input_shapes, - side_input_types, - side_input_names.split(",")) + Returns: + a zipped list of side input tuples. + """ + side_input_shapes = list(map(lambda x: eval('[' + x + ']'), + side_input_shapes.split("/"))) + side_input_types = list(map(eval, side_input_types.split(","))) + return zip(side_input_shapes, + side_input_types, + side_input_names.split(",")) class DetectionInferenceModule(tf.Module): """Detection Inference Module.""" def __init__(self, detection_model, use_side_inputs=False, - zipped_side_inputs=None: + zipped_side_inputs=None): """Initializes a module for detection. Args: @@ -116,7 +116,7 @@ class DetectionFromImageModule(DetectionInferenceModule): name=info[2])) def __call__(input_tensor, *side_inputs): - kwargs = dict(zip(self.side_input_names.split(","), side_inputs)) + kwargs = dict(zip(self.side_input_names, side_inputs)) return self._run_inference_on_images(input_tensor, **kwargs) self.__call__ = tf.function(__call__, input_signature=sig) -- GitLab From 510736bab45a22e9ee5d6ba75d2b7944782f1062 Mon Sep 17 00:00:00 2001 From: Hongkun Yu Date: Mon, 3 Aug 2020 15:13:06 -0700 Subject: [PATCH 108/128] Internal change PiperOrigin-RevId: 324693165 --- official/modeling/optimization/configs/optimizer_config.py | 1 + official/nlp/optimization.py | 5 ++++- 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/official/modeling/optimization/configs/optimizer_config.py b/official/modeling/optimization/configs/optimizer_config.py index 5e7ca2d0c..3cb2a0963 100644 --- a/official/modeling/optimization/configs/optimizer_config.py +++ b/official/modeling/optimization/configs/optimizer_config.py @@ -106,6 +106,7 @@ class AdamWeightDecayConfig(base_config.Config): weight_decay_rate: float = 0.0 include_in_weight_decay: Optional[List[str]] = None exclude_from_weight_decay: Optional[List[str]] = None + gradient_clip_norm: float = 1.0 @dataclasses.dataclass diff --git a/official/nlp/optimization.py b/official/nlp/optimization.py index 51289a535..47483e3c2 100644 --- a/official/nlp/optimization.py +++ b/official/nlp/optimization.py @@ -130,13 +130,16 @@ class AdamWeightDecay(tf.keras.optimizers.Adam): weight_decay_rate=0.0, include_in_weight_decay=None, exclude_from_weight_decay=None, + gradient_clip_norm=1.0, name='AdamWeightDecay', **kwargs): super(AdamWeightDecay, self).__init__(learning_rate, beta_1, beta_2, epsilon, amsgrad, name, **kwargs) self.weight_decay_rate = weight_decay_rate + self.gradient_clip_norm = gradient_clip_norm self._include_in_weight_decay = include_in_weight_decay self._exclude_from_weight_decay = exclude_from_weight_decay + logging.info('gradient_clip_norm=%f', gradient_clip_norm) @classmethod def from_config(cls, config): @@ -165,7 +168,7 @@ class AdamWeightDecay(tf.keras.optimizers.Adam): name=None, experimental_aggregate_gradients=True): grads, tvars = list(zip(*grads_and_vars)) - if experimental_aggregate_gradients: + if experimental_aggregate_gradients and self.gradient_clip_norm > 0.0: # when experimental_aggregate_gradients = False, apply_gradients() no # longer implicitly allreduce gradients, users manually allreduce gradient # and passed the allreduced grads_and_vars. For now, the -- GitLab From 7ebdee5f43cc74734968b909bbb43c8a27e9fa21 Mon Sep 17 00:00:00 2001 From: Chen Chen Date: Mon, 3 Aug 2020 15:54:07 -0700 Subject: [PATCH 109/128] Internal change PiperOrigin-RevId: 324700979 --- official/core/base_task.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/official/core/base_task.py b/official/core/base_task.py index 76ebd8e14..704a34adf 100644 --- a/official/core/base_task.py +++ b/official/core/base_task.py @@ -76,7 +76,7 @@ class Task(tf.Module): return ckpt = tf.train.Checkpoint(**model.checkpoint_items) - status = ckpt.restore(ckpt_dir_or_file) + status = ckpt.read(ckpt_dir_or_file) status.expect_partial().assert_existing_objects_matched() logging.info("Finished loading pretrained checkpoint from %s", ckpt_dir_or_file) -- GitLab From d73569c595a3380962774d9dc84b2e735768bce1 Mon Sep 17 00:00:00 2001 From: Kaushik Shivakumar Date: Mon, 3 Aug 2020 22:59:29 +0000 Subject: [PATCH 110/128] exporter --- research/object_detection/exporter_lib_v2.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/research/object_detection/exporter_lib_v2.py b/research/object_detection/exporter_lib_v2.py index 43760c0cb..96e465b65 100644 --- a/research/object_detection/exporter_lib_v2.py +++ b/research/object_detection/exporter_lib_v2.py @@ -50,12 +50,12 @@ def _zip_side_inputs(side_input_shapes="", Returns: a zipped list of side input tuples. """ - side_input_shapes = list(map(lambda x: eval('[' + x + ']'), - side_input_shapes.split("/"))) - side_input_types = list(map(eval, side_input_types.split(","))) - return zip(side_input_shapes, - side_input_types, - side_input_names.split(",")) + if (side_input_shapes) + side_input_shapes = list(map(lambda x: eval('[' + x + ']'), side_input_shapes.split("/"))) + side_input_types = map(eval, side_input_types.split(",")) + print(list(side_input_types)) + #side_input_types = list(map(eval, side_input_types.split(","))) + return zip(side_input_shapes, side_input_types, side_input_names.split(",")) class DetectionInferenceModule(tf.Module): """Detection Inference Module.""" -- GitLab From e93afea875a544bd4b887b9f093985dd4d23ec1e Mon Sep 17 00:00:00 2001 From: Chen Chen Date: Mon, 3 Aug 2020 16:08:02 -0700 Subject: [PATCH 111/128] Support to run prediction on question answering (SQuAD) task. PiperOrigin-RevId: 324703765 --- official/nlp/tasks/question_answering.py | 53 +++++++++++++++++-- official/nlp/tasks/question_answering_test.py | 23 ++++++++ official/nlp/tasks/sentence_prediction.py | 41 ++++++-------- official/nlp/tasks/tagging.py | 50 ++++++++--------- official/nlp/tasks/utils.py | 34 ++++++++++++ 5 files changed, 143 insertions(+), 58 deletions(-) diff --git a/official/nlp/tasks/question_answering.py b/official/nlp/tasks/question_answering.py index aaa20e854..f73cdc102 100644 --- a/official/nlp/tasks/question_answering.py +++ b/official/nlp/tasks/question_answering.py @@ -17,8 +17,10 @@ import collections import json import os + from absl import logging import dataclasses +import orbit import tensorflow as tf import tensorflow_hub as hub @@ -84,6 +86,10 @@ class QuestionAnsweringTask(base_task.Task): self._tf_record_input_path, self._eval_examples, self._eval_features = ( self._preprocess_eval_data(params.validation_data)) + def set_preprocessed_eval_input_path(self, eval_input_path): + """Sets the path to the preprocessed eval data.""" + self._tf_record_input_path = eval_input_path + def build_model(self): if self._hub_module: encoder_network = utils.get_encoder_from_hub(self._hub_module) @@ -242,10 +248,6 @@ class QuestionAnsweringTask(base_task.Task): step_outputs['end_logits']): u_ids, s_logits, e_logits = ( unique_ids.numpy(), start_logits.numpy(), end_logits.numpy()) - if u_ids.size == 1: - u_ids = [u_ids] - s_logits = [s_logits] - e_logits = [e_logits] for values in zip(u_ids, s_logits, e_logits): state.append(self.raw_aggregated_result( unique_id=values[0], @@ -291,3 +293,46 @@ class QuestionAnsweringTask(base_task.Task): eval_metrics = {'exact_match': eval_metrics['exact_match'], 'final_f1': eval_metrics['final_f1']} return eval_metrics + + +def predict(task: QuestionAnsweringTask, params: cfg.DataConfig, + model: tf.keras.Model): + """Predicts on the input data. + + Args: + task: A `QuestionAnsweringTask` object. + params: A `cfg.DataConfig` object. + model: A keras.Model. + + Returns: + A tuple of `all_predictions`, `all_nbest` and `scores_diff`, which + are dict and can be written to json files including prediction json file, + nbest json file and null_odds json file. + """ + tf_record_input_path, eval_examples, eval_features = ( + task._preprocess_eval_data(params)) # pylint: disable=protected-access + + # `tf_record_input_path` will overwrite `params.input_path`, + # when `task.buid_inputs()` is called. + task.set_preprocessed_eval_input_path(tf_record_input_path) + + def predict_step(inputs): + """Replicated prediction calculation.""" + return task.validation_step(inputs, model) + + dataset = orbit.utils.make_distributed_dataset(tf.distribute.get_strategy(), + task.build_inputs, params) + aggregated_outputs = utils.predict(predict_step, task.aggregate_logs, dataset) + + all_predictions, all_nbest, scores_diff = ( + task.squad_lib.postprocess_output( + eval_examples, + eval_features, + aggregated_outputs, + task.task_config.n_best_size, + task.task_config.max_answer_length, + task.task_config.validation_data.do_lower_case, + version_2_with_negative=(params.version_2_with_negative), + null_score_diff_threshold=task.task_config.null_score_diff_threshold, + verbose=False)) + return all_predictions, all_nbest, scores_diff diff --git a/official/nlp/tasks/question_answering_test.py b/official/nlp/tasks/question_answering_test.py index 626c3cadb..ff601b0f9 100644 --- a/official/nlp/tasks/question_answering_test.py +++ b/official/nlp/tasks/question_answering_test.py @@ -81,6 +81,8 @@ class QuestionAnsweringTaskTest(tf.test.TestCase, parameterized.TestCase): val_dataset = task.build_inputs(config.validation_data) val_iterator = iter(val_dataset) logs = task.validation_step(next(val_iterator), model, metrics=metrics) + # Mock that `logs` is from one replica. + logs = {x: (logs[x],) for x in logs} logs = task.aggregate_logs(step_outputs=logs) metrics = task.reduce_aggregated_logs(logs) self.assertIn("final_f1", metrics) @@ -160,6 +162,27 @@ class QuestionAnsweringTaskTest(tf.test.TestCase, parameterized.TestCase): validation_data=self._get_validation_data_config()) self._run_task(config) + @parameterized.named_parameters(("squad1", False), ("squad2", True)) + def test_predict(self, version_2_with_negative): + validation_data = self._get_validation_data_config( + version_2_with_negative=version_2_with_negative) + + config = question_answering.QuestionAnsweringConfig( + model=question_answering.ModelConfig(encoder=self._encoder_config), + train_data=self._train_data_config, + validation_data=validation_data) + task = question_answering.QuestionAnsweringTask(config) + model = task.build_model() + + all_predictions, all_nbest, scores_diff = question_answering.predict( + task, validation_data, model) + self.assertLen(all_predictions, 1) + self.assertLen(all_nbest, 1) + if version_2_with_negative: + self.assertLen(scores_diff, 1) + else: + self.assertEmpty(scores_diff) + if __name__ == "__main__": tf.test.main() diff --git a/official/nlp/tasks/sentence_prediction.py b/official/nlp/tasks/sentence_prediction.py index f8cfefdb1..3e7cb46f3 100644 --- a/official/nlp/tasks/sentence_prediction.py +++ b/official/nlp/tasks/sentence_prediction.py @@ -245,34 +245,25 @@ def predict(task: SentencePredictionTask, params: cfg.DataConfig, """ is_regression = task.task_config.model.num_classes == 1 - @tf.function - def predict_step(iterator): - """Predicts on distributed devices.""" - - def _replicated_step(inputs): - """Replicated prediction calculation.""" - x, _ = inputs - outputs = task.inference_step(x, model) - if is_regression: - return outputs - else: - return tf.argmax(outputs, axis=-1) - - outputs = tf.distribute.get_strategy().run( - _replicated_step, args=(next(iterator),)) - return tf.nest.map_structure( - tf.distribute.get_strategy().experimental_local_results, outputs) - - def reduce_fn(state, outputs): + def predict_step(inputs): + """Replicated prediction calculation.""" + x, _ = inputs + outputs = task.inference_step(x, model) + if is_regression: + return outputs + else: + return tf.argmax(outputs, axis=-1) + + def aggregate_fn(state, outputs): """Concatenates model's outputs.""" + if state is None: + state = {'predictions': []} + for per_replica_batch_predictions in outputs: - state.extend(per_replica_batch_predictions) + state['predictions'].extend(per_replica_batch_predictions) return state - loop_fn = orbit.utils.create_loop_fn(predict_step) dataset = orbit.utils.make_distributed_dataset(tf.distribute.get_strategy(), task.build_inputs, params) - # Set `num_steps` to -1 to exhaust the dataset. - predictions = loop_fn( - iter(dataset), num_steps=-1, state=[], reduce_fn=reduce_fn) - return predictions + outputs = utils.predict(predict_step, aggregate_fn, dataset) + return outputs['predictions'] diff --git a/official/nlp/tasks/tagging.py b/official/nlp/tasks/tagging.py index d1a63a610..10b5423c1 100644 --- a/official/nlp/tasks/tagging.py +++ b/official/nlp/tasks/tagging.py @@ -232,30 +232,25 @@ def predict(task: TaggingTask, params: cfg.DataConfig, sentence id of the corresponding example. """ - @tf.function - def predict_step(iterator): - """Predicts on distributed devices.""" - - def _replicated_step(inputs): - """Replicated prediction calculation.""" - x, y = inputs - sentence_ids = x.pop('sentence_id') - outputs = task.inference_step(x, model) - predict_ids = outputs['predict_ids'] - label_mask = tf.greater_equal(y, 0) - return dict( - predict_ids=predict_ids, - label_mask=label_mask, - sentence_ids=sentence_ids) - - outputs = tf.distribute.get_strategy().run( - _replicated_step, args=(next(iterator),)) - return tf.nest.map_structure( - tf.distribute.get_strategy().experimental_local_results, outputs) - - def reduce_fn(state, outputs): + def predict_step(inputs): + """Replicated prediction calculation.""" + x, y = inputs + sentence_ids = x.pop('sentence_id') + outputs = task.inference_step(x, model) + predict_ids = outputs['predict_ids'] + label_mask = tf.greater_equal(y, 0) + return dict( + predict_ids=predict_ids, + label_mask=label_mask, + sentence_ids=sentence_ids) + + def aggregate_fn(state, outputs): """Concatenates model's outputs.""" - cur_predict_ids, cur_sentence_ids = state + if state is None: + state = {'predict_ids': [], 'sentence_ids': []} + + cur_predict_ids = state['predict_ids'] + cur_sentence_ids = state['sentence_ids'] for batch_predict_ids, batch_label_mask, batch_sentence_ids in zip( outputs['predict_ids'], outputs['label_mask'], outputs['sentence_ids']): @@ -269,12 +264,9 @@ def predict(task: TaggingTask, params: cfg.DataConfig, # Skip the padding label. if tmp_label_mask[i]: cur_predict_ids[-1].append(tmp_predict_ids[i]) - return cur_predict_ids, cur_sentence_ids + return state - loop_fn = orbit.utils.create_loop_fn(predict_step) dataset = orbit.utils.make_distributed_dataset(tf.distribute.get_strategy(), task.build_inputs, params) - # Set `num_steps` to -1 to exhaust the dataset. - predict_ids, sentence_ids = loop_fn( - iter(dataset), num_steps=-1, state=([], []), reduce_fn=reduce_fn) - return predict_ids, sentence_ids + outputs = utils.predict(predict_step, aggregate_fn, dataset) + return outputs['predict_ids'], outputs['sentence_ids'] diff --git a/official/nlp/tasks/utils.py b/official/nlp/tasks/utils.py index 467dafe31..caeb74143 100644 --- a/official/nlp/tasks/utils.py +++ b/official/nlp/tasks/utils.py @@ -14,6 +14,9 @@ # limitations under the License. # ============================================================================== """Common utils for tasks.""" +from typing import Any, Callable + +import orbit import tensorflow as tf import tensorflow_hub as hub @@ -32,3 +35,34 @@ def get_encoder_from_hub(hub_module: str) -> tf.keras.Model: return tf.keras.Model( inputs=[input_word_ids, input_mask, input_type_ids], outputs=[sequence_output, pooled_output]) + + +def predict(predict_step_fn: Callable[[Any], Any], + aggregate_fn: Callable[[Any, Any], Any], + dataset: tf.data.Dataset): + """Runs prediction. + + Args: + predict_step_fn: A callable such as `def predict_step(inputs)`, where + `inputs` are input tensors. + aggregate_fn: A callable such as `def aggregate_fn(state, value)`, where + `value` is the outputs from `predict_step_fn`. + dataset: A `tf.data.Dataset` object. + + Returns: + The aggregated predictions. + """ + + @tf.function + def predict_step(iterator): + """Predicts on distributed devices.""" + outputs = tf.distribute.get_strategy().run( + predict_step_fn, args=(next(iterator),)) + return tf.nest.map_structure( + tf.distribute.get_strategy().experimental_local_results, outputs) + + loop_fn = orbit.utils.create_loop_fn(predict_step) + # Set `num_steps` to -1 to exhaust the dataset. + outputs = loop_fn( + iter(dataset), num_steps=-1, state=None, reduce_fn=aggregate_fn) # pytype: disable=wrong-arg-types + return outputs -- GitLab From dc03c0434d95f1aec757416935bb872ceba3deb8 Mon Sep 17 00:00:00 2001 From: xinliupitt Date: Mon, 3 Aug 2020 19:17:42 -0400 Subject: [PATCH 112/128] intermediate dropout --- official/nlp/modeling/layers/transformer.py | 34 +++++++++++++++++-- .../nlp/modeling/layers/transformer_test.py | 12 ++++--- 2 files changed, 40 insertions(+), 6 deletions(-) diff --git a/official/nlp/modeling/layers/transformer.py b/official/nlp/modeling/layers/transformer.py index 8fc8fad47..019891296 100644 --- a/official/nlp/modeling/layers/transformer.py +++ b/official/nlp/modeling/layers/transformer.py @@ -55,6 +55,10 @@ class Transformer(tf.keras.layers.Layer): layers. If set False, output of attention and intermediate dense layers is normalized. norm_epsilon: Epsilon value to initialize normalization layers. + intermediate_dropout: Dropout probability for intermediate_dropout_layer. If + larger than 0.0, intermediate_dropout_layer is created and used after + intermediate_activation_layer. Otherwise, intermediate_dropout_layer is + None. """ def __init__(self, @@ -74,6 +78,7 @@ class Transformer(tf.keras.layers.Layer): use_bias=True, norm_first=False, norm_epsilon=1e-12, + intermediate_dropout=0.0, **kwargs): super(Transformer, self).__init__(**kwargs) @@ -93,6 +98,7 @@ class Transformer(tf.keras.layers.Layer): self._use_bias = use_bias self._norm_first = norm_first self._norm_epsilon = norm_epsilon + self._intermediate_dropout = intermediate_dropout def build(self, input_shape): input_tensor = input_shape[0] if len(input_shape) == 2 else input_shape @@ -155,6 +161,11 @@ class Transformer(tf.keras.layers.Layer): policy = tf.float32 self._intermediate_activation_layer = tf.keras.layers.Activation( self._intermediate_activation, dtype=policy) + if self._intermediate_dropout > 0.0: + self.intermediate_dropout_layer = tf.keras.layers.Dropout( + rate=self._intermediate_dropout) + else: + self.intermediate_dropout_layer = None self._output_dense = tf.keras.layers.experimental.EinsumDense( "abc,cd->abd", output_shape=(None, hidden_size), @@ -204,7 +215,9 @@ class Transformer(tf.keras.layers.Layer): "norm_first": self._norm_first, "norm_epsilon": - self._norm_epsilon + self._norm_epsilon, + "intermediate_dropout": + self._intermediate_dropout } base_config = super(Transformer, self).get_config() return dict(list(base_config.items()) + list(config.items())) @@ -238,6 +251,8 @@ class Transformer(tf.keras.layers.Layer): intermediate_output = self._intermediate_dense(attention_output) intermediate_output = self._intermediate_activation_layer( intermediate_output) + if self.intermediate_dropout_layer: + intermediate_output = self.intermediate_dropout_layer(intermediate_output) layer_output = self._output_dense(intermediate_output) layer_output = self._output_dropout(layer_output) # During mixed precision training, attention_output is from layer norm and @@ -291,6 +306,10 @@ class TransformerDecoderLayer(tf.keras.layers.Layer): layers. If set False, output of attention and intermediate dense layers is normalized. norm_epsilon: Epsilon value to initialize normalization layers. + intermediate_dropout: Dropout probability for intermediate_dropout_layer. If + larger than 0.0, intermediate_dropout_layer is created and used after + intermediate_activation_layer. Otherwise, intermediate_dropout_layer is + None. """ def __init__(self, @@ -310,6 +329,7 @@ class TransformerDecoderLayer(tf.keras.layers.Layer): use_bias=True, norm_first=False, norm_epsilon=1e-12, + intermediate_dropout=0.0, **kwargs): super(TransformerDecoderLayer, self).__init__(**kwargs) self.num_attention_heads = num_attention_heads @@ -329,6 +349,7 @@ class TransformerDecoderLayer(tf.keras.layers.Layer): self._use_bias = use_bias self._norm_first = norm_first self._norm_epsilon = norm_epsilon + self._intermediate_dropout = intermediate_dropout if self.multi_channel_cross_attention: self._cross_attention_cls = multi_channel_attention.MultiChannelAttention else: @@ -401,6 +422,11 @@ class TransformerDecoderLayer(tf.keras.layers.Layer): **common_kwargs) self.intermediate_activation_layer = tf.keras.layers.Activation( self.intermediate_activation) + if self._intermediate_dropout > 0.0: + self.intermediate_dropout_layer = tf.keras.layers.Dropout( + rate=self._intermediate_dropout) + else: + self.intermediate_dropout_layer = None self.output_dense = tf.keras.layers.experimental.EinsumDense( "abc,cd->abd", output_shape=(None, hidden_size), @@ -445,7 +471,9 @@ class TransformerDecoderLayer(tf.keras.layers.Layer): "norm_first": self._norm_first, "norm_epsilon": - self._norm_epsilon + self._norm_epsilon, + "intermediate_dropout": + self._intermediate_dropout } base_config = super(TransformerDecoderLayer, self).get_config() return dict(list(base_config.items()) + list(config.items())) @@ -508,6 +536,8 @@ class TransformerDecoderLayer(tf.keras.layers.Layer): intermediate_output = self.intermediate_dense(attention_output) intermediate_output = self.intermediate_activation_layer( intermediate_output) + if self.intermediate_dropout_layer: + intermediate_output = self.intermediate_dropout_layer(intermediate_output) layer_output = self.output_dense(intermediate_output) layer_output = self.output_dropout(layer_output) if self._norm_first: diff --git a/official/nlp/modeling/layers/transformer_test.py b/official/nlp/modeling/layers/transformer_test.py index f83fc4007..eac78b500 100644 --- a/official/nlp/modeling/layers/transformer_test.py +++ b/official/nlp/modeling/layers/transformer_test.py @@ -230,7 +230,8 @@ class TransformerArgumentTest(keras_parameterized.TestCase): attention_dropout_rate=0.1, use_bias=False, norm_first=True, - norm_epsilon=1e-6) + norm_epsilon=1e-6, + intermediate_dropout=0.1) # Forward path. dummy_tensor = tf.zeros([2, 4, 16], dtype=tf.float32) dummy_mask = tf.zeros([2, 4, 4], dtype=tf.float32) @@ -248,7 +249,8 @@ class TransformerArgumentTest(keras_parameterized.TestCase): attention_dropout_rate=0.1, use_bias=False, norm_first=True, - norm_epsilon=1e-6) + norm_epsilon=1e-6, + intermediate_dropout=0.1) encoder_block_config = encoder_block.get_config() new_encoder_block = transformer.Transformer.from_config( encoder_block_config) @@ -299,7 +301,8 @@ class TransformerDecoderLayerTest(keras_parameterized.TestCase): attention_dropout_rate=0.1, use_bias=False, norm_first=True, - norm_epsilon=1e-6) + norm_epsilon=1e-6, + intermediate_dropout=0.1) # Forward path. dummy_tensor = tf.zeros([2, 4, 16], dtype=tf.float32) dummy_mask = tf.zeros([2, 4, 4], dtype=tf.float32) @@ -317,7 +320,8 @@ class TransformerDecoderLayerTest(keras_parameterized.TestCase): attention_dropout_rate=0.1, use_bias=False, norm_first=True, - norm_epsilon=1e-6) + norm_epsilon=1e-6, + intermediate_dropout=0.1) decoder_block_config = decoder_block.get_config() new_decoder_block = transformer.TransformerDecoderLayer.from_config( decoder_block_config) -- GitLab From 15a3aa2bc795c57e85d9aa0553b391dc4291be18 Mon Sep 17 00:00:00 2001 From: Chen Chen Date: Mon, 3 Aug 2020 16:10:21 -0700 Subject: [PATCH 113/128] Recommend to use the stable tf-models-official pip packages in tensorflow_models' colabs. PiperOrigin-RevId: 324704216 --- official/README.md | 15 +++++++++++---- official/colab/fine_tuning_bert.ipynb | 6 +++--- official/colab/nlp/customize_encoder.ipynb | 6 +++--- .../colab/nlp/nlp_modeling_library_intro.ipynb | 6 +++--- 4 files changed, 20 insertions(+), 13 deletions(-) diff --git a/official/README.md b/official/README.md index 77e43ea9c..ca21423d8 100644 --- a/official/README.md +++ b/official/README.md @@ -98,17 +98,24 @@ pip3 install tf-nightly #### Method 1: Install the TensorFlow Model Garden pip package -**tf-models-nightly** is the nightly Model Garden package -created daily automatically. pip will install all models -and dependencies automatically. +**tf-models-official** is the stable Model Garden package. +pip will install all models and dependencies automatically. ```shell -pip install tf-models-nightly +pip install tf-models-official ``` Please check out our [example](colab/fine_tuning_bert.ipynb) to learn how to use a PIP package. +Note that **tf-models-official** may not include the latest changes in this +github repo. To include latest changes, you may install **tf-models-nightly**, +which is the nightly Model Garden package created daily automatically. + +```shell +pip install tf-models-nightly +``` + #### Method 2: Clone the source 1. Clone the GitHub repository: diff --git a/official/colab/fine_tuning_bert.ipynb b/official/colab/fine_tuning_bert.ipynb index b63c9a3f6..28526d128 100644 --- a/official/colab/fine_tuning_bert.ipynb +++ b/official/colab/fine_tuning_bert.ipynb @@ -98,7 +98,8 @@ "source": [ "### Install the TensorFlow Model Garden pip package\n", "\n", - "* `tf-models-nightly` is the nightly Model Garden package created daily automatically.\n", + "* `tf-models-official` is the stable Model Garden package. Note that it may not include the latest changes in the `tensorflow_models` github repo. To include latest changes, you may install `tf-models-nightly`,\n", + "which is the nightly Model Garden package created daily automatically.\n", "* pip will install all models and dependencies automatically." ] }, @@ -112,8 +113,7 @@ }, "outputs": [], "source": [ - "!pip install -q tf-nightly\n", - "!pip install -q tf-models-nightly" + "!pip install -q tf-models-official==2.3.0" ] }, { diff --git a/official/colab/nlp/customize_encoder.ipynb b/official/colab/nlp/customize_encoder.ipynb index 18b45d3a6..53362191e 100644 --- a/official/colab/nlp/customize_encoder.ipynb +++ b/official/colab/nlp/customize_encoder.ipynb @@ -100,7 +100,8 @@ "source": [ "### Install the TensorFlow Model Garden pip package\n", "\n", - "* `tf-models-nightly` is the nightly Model Garden package created daily automatically.\n", + "* `tf-models-official` is the stable Model Garden package. Note that it may not include the latest changes in the `tensorflow_models` github repo. To include latest changes, you may install `tf-models-nightly`,\n", + "which is the nightly Model Garden package created daily automatically.\n", "* `pip` will install all models and dependencies automatically." ] }, @@ -114,8 +115,7 @@ }, "outputs": [], "source": [ - "!pip install -q tf-nightly\n", - "!pip install -q tf-models-nightly" + "!pip install -q tf-models-official==2.3.0" ] }, { diff --git a/official/colab/nlp/nlp_modeling_library_intro.ipynb b/official/colab/nlp/nlp_modeling_library_intro.ipynb index f5ffcef96..3e86f41a9 100644 --- a/official/colab/nlp/nlp_modeling_library_intro.ipynb +++ b/official/colab/nlp/nlp_modeling_library_intro.ipynb @@ -98,7 +98,8 @@ "source": [ "### Install the TensorFlow Model Garden pip package\n", "\n", - "* `tf-models-nightly` is the nightly Model Garden package created daily automatically.\n", + "* `tf-models-official` is the stable Model Garden package. Note that it may not include the latest changes in the `tensorflow_models` github repo. To include latest changes, you may install `tf-models-nightly`,\n", + "which is the nightly Model Garden package created daily automatically.\n", "* `pip` will install all models and dependencies automatically." ] }, @@ -112,8 +113,7 @@ }, "outputs": [], "source": [ - "!pip install -q tf-nightly\n", - "!pip install -q tf-models-nightly" + "!pip install -q tf-models-official==2.3.0" ] }, { -- GitLab From 2db4cc8f8e4e51ef101a8e1b060288c20c3d4bb1 Mon Sep 17 00:00:00 2001 From: Vivek Rathod Date: Tue, 4 Aug 2020 09:26:59 -0700 Subject: [PATCH 114/128] Make timestamp optional in `generate_embedding_data.py` PiperOrigin-RevId: 324826632 --- .../context_rcnn/generate_embedding_data.py | 146 +++++++++--------- 1 file changed, 72 insertions(+), 74 deletions(-) diff --git a/research/object_detection/dataset_tools/context_rcnn/generate_embedding_data.py b/research/object_detection/dataset_tools/context_rcnn/generate_embedding_data.py index 6ec654e6c..d58b24c5d 100644 --- a/research/object_detection/dataset_tools/context_rcnn/generate_embedding_data.py +++ b/research/object_detection/dataset_tools/context_rcnn/generate_embedding_data.py @@ -72,6 +72,67 @@ def drop_keys(key_value_tuple): return key_value_tuple[1] +def get_date_captured(example): + date_captured = datetime.datetime.strptime( + six.ensure_str( + example.features.feature['image/date_captured'].bytes_list.value[0]), + '%Y-%m-%d %H:%M:%S') + return date_captured + + +def embed_date_captured(date_captured): + """Encodes the datetime of the image.""" + embedded_date_captured = [] + month_max = 12.0 + day_max = 31.0 + hour_max = 24.0 + minute_max = 60.0 + min_year = 1990.0 + max_year = 2030.0 + + year = (date_captured.year - min_year) / float(max_year - min_year) + embedded_date_captured.append(year) + + month = (date_captured.month - 1) / month_max + embedded_date_captured.append(month) + + day = (date_captured.day - 1) / day_max + embedded_date_captured.append(day) + + hour = date_captured.hour / hour_max + embedded_date_captured.append(hour) + + minute = date_captured.minute / minute_max + embedded_date_captured.append(minute) + + return np.asarray(embedded_date_captured) + + +def embed_position_and_size(box): + """Encodes the bounding box of the object of interest.""" + ymin = box[0] + xmin = box[1] + ymax = box[2] + xmax = box[3] + w = xmax - xmin + h = ymax - ymin + x = xmin + w / 2.0 + y = ymin + h / 2.0 + return np.asarray([x, y, w, h]) + + +def get_bb_embedding(detection_features, detection_boxes, detection_scores, + index): + embedding = detection_features[0][index] + pooled_embedding = np.mean(np.mean(embedding, axis=1), axis=0) + + box = detection_boxes[0][index] + position_embedding = embed_position_and_size(box) + + score = detection_scores[0][index] + return np.concatenate((pooled_embedding, position_embedding)), score + + class GenerateEmbeddingDataFn(beam.DoFn): """Generates embedding data for camera trap images. @@ -112,68 +173,18 @@ class GenerateEmbeddingDataFn(beam.DoFn): def _run_inference_and_generate_embedding(self, tfexample_key_value): key, tfexample = tfexample_key_value input_example = tf.train.Example.FromString(tfexample) - # Convert date_captured datetime string to unix time integer and store - - def get_date_captured(example): - date_captured = datetime.datetime.strptime( - six.ensure_str( - example.features.feature[ - 'image/date_captured'].bytes_list.value[0]), - '%Y-%m-%d %H:%M:%S') - return date_captured + example = tf.train.Example() + example.CopyFrom(input_example) try: date_captured = get_date_captured(input_example) + unix_time = ((date_captured - + datetime.datetime.fromtimestamp(0)).total_seconds()) + example.features.feature['image/unix_time'].float_list.value.extend( + [unix_time]) + temporal_embedding = embed_date_captured(date_captured) except Exception: # pylint: disable=broad-except - # we require date_captured to be available for all images - return [] - - def embed_date_captured(date_captured): - """Encodes the datetime of the image.""" - embedded_date_captured = [] - month_max = 12.0 - day_max = 31.0 - hour_max = 24.0 - minute_max = 60.0 - min_year = 1990.0 - max_year = 2030.0 - - year = (date_captured.year-min_year)/float(max_year-min_year) - embedded_date_captured.append(year) - - month = (date_captured.month-1)/month_max - embedded_date_captured.append(month) - - day = (date_captured.day-1)/day_max - embedded_date_captured.append(day) - - hour = date_captured.hour/hour_max - embedded_date_captured.append(hour) - - minute = date_captured.minute/minute_max - embedded_date_captured.append(minute) - - return np.asarray(embedded_date_captured) - - def embed_position_and_size(box): - """Encodes the bounding box of the object of interest.""" - ymin = box[0] - xmin = box[1] - ymax = box[2] - xmax = box[3] - w = xmax - xmin - h = ymax - ymin - x = xmin + w / 2.0 - y = ymin + h / 2.0 - return np.asarray([x, y, w, h]) - - unix_time = ( - (date_captured - datetime.datetime.fromtimestamp(0)).total_seconds()) - - example = tf.train.Example() - example.CopyFrom(input_example) - example.features.feature['image/unix_time'].float_list.value.extend( - [unix_time]) + pass detections = self._detect_fn.signatures['serving_default']( (tf.expand_dims(tf.convert_to_tensor(tfexample), 0))) @@ -188,25 +199,12 @@ class GenerateEmbeddingDataFn(beam.DoFn): detection_features = np.asarray(detection_features) - def get_bb_embedding(detection_features, detection_boxes, detection_scores, - index): - embedding = detection_features[0][index] - pooled_embedding = np.mean(np.mean(embedding, axis=1), axis=0) - - box = detection_boxes[0][index] - position_embedding = embed_position_and_size(box) - - score = detection_scores[0][index] - return np.concatenate((pooled_embedding, position_embedding)), score - - temporal_embedding = embed_date_captured(date_captured) - embedding_count = 0 for index in range(min(num_detections, self._top_k_embedding_count)): bb_embedding, score = get_bb_embedding( detection_features, detection_boxes, detection_scores, index) embed_all.extend(bb_embedding) - embed_all.extend(temporal_embedding) + if temporal_embedding is not None: embed_all.extend(temporal_embedding) score_all.append(score) embedding_count += 1 @@ -216,7 +214,7 @@ class GenerateEmbeddingDataFn(beam.DoFn): bb_embedding, score = get_bb_embedding( detection_features, detection_boxes, detection_scores, index) embed_all.extend(bb_embedding) - embed_all.extend(temporal_embedding) + if temporal_embedding is not None: embed_all.extend(temporal_embedding) score_all.append(score) embedding_count += 1 @@ -224,7 +222,7 @@ class GenerateEmbeddingDataFn(beam.DoFn): bb_embedding, score = get_bb_embedding( detection_features, detection_boxes, detection_scores, 0) embed_all.extend(bb_embedding) - embed_all.extend(temporal_embedding) + if temporal_embedding is not None: embed_all.extend(temporal_embedding) score_all.append(score) # Takes max in case embedding_count is 0. -- GitLab From 739a7f32e42c2a0658fa62114d8034998e531807 Mon Sep 17 00:00:00 2001 From: Vivek Rathod Date: Tue, 4 Aug 2020 11:04:40 -0700 Subject: [PATCH 115/128] Set temporal_embedding to None when `data_captured` isn't available. PiperOrigin-RevId: 324848229 --- .../dataset_tools/context_rcnn/generate_embedding_data.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/research/object_detection/dataset_tools/context_rcnn/generate_embedding_data.py b/research/object_detection/dataset_tools/context_rcnn/generate_embedding_data.py index d58b24c5d..ff282bbdc 100644 --- a/research/object_detection/dataset_tools/context_rcnn/generate_embedding_data.py +++ b/research/object_detection/dataset_tools/context_rcnn/generate_embedding_data.py @@ -184,7 +184,7 @@ class GenerateEmbeddingDataFn(beam.DoFn): [unix_time]) temporal_embedding = embed_date_captured(date_captured) except Exception: # pylint: disable=broad-except - pass + temporal_embedding = None detections = self._detect_fn.signatures['serving_default']( (tf.expand_dims(tf.convert_to_tensor(tfexample), 0))) -- GitLab From d08a1c6698b2fd3f5a0a9b78e4cff07d587f91ab Mon Sep 17 00:00:00 2001 From: Kaushik Shivakumar Date: Tue, 4 Aug 2020 20:02:23 +0000 Subject: [PATCH 116/128] fix exporters --- research/object_detection/exporter_lib_v2.py | 22 +++++++++++--------- 1 file changed, 12 insertions(+), 10 deletions(-) diff --git a/research/object_detection/exporter_lib_v2.py b/research/object_detection/exporter_lib_v2.py index 96e465b65..d1c7bbdc2 100644 --- a/research/object_detection/exporter_lib_v2.py +++ b/research/object_detection/exporter_lib_v2.py @@ -21,7 +21,7 @@ from object_detection.builders import model_builder from object_detection.core import standard_fields as fields from object_detection.data_decoders import tf_example_decoder from object_detection.utils import config_util - +import ast def _decode_image(encoded_image_string_tensor): image_tensor = tf.image.decode_image(encoded_image_string_tensor, @@ -50,12 +50,11 @@ def _zip_side_inputs(side_input_shapes="", Returns: a zipped list of side input tuples. """ - if (side_input_shapes) - side_input_shapes = list(map(lambda x: eval('[' + x + ']'), side_input_shapes.split("/"))) - side_input_types = map(eval, side_input_types.split(",")) - print(list(side_input_types)) - #side_input_types = list(map(eval, side_input_types.split(","))) - return zip(side_input_shapes, side_input_types, side_input_names.split(",")) + side_input_shapes = list(map(lambda x: ast.literal_eval('[' + x + ']'), + side_input_shapes.split('/'))) + side_input_types = eval('[' + side_input_types + ']') + side_input_names = side_input_names.split(',') + return zip(side_input_shapes, side_input_types, side_input_names) class DetectionInferenceModule(tf.Module): """Detection Inference Module.""" @@ -220,9 +219,12 @@ def export_inference_graph(input_type, raise ValueError('Unrecognized `input_type`') if use_side_inputs and input_type != 'image_tensor': raise ValueError('Side inputs supported for image_tensor input type only.') - zipped_side_inputs = _zip_side_inputs(side_input_shapes, - side_input_types, - side_input_names) + + zipped_side_inputs = None + if use_side_inputs: + zipped_side_inputs = _zip_side_inputs(side_input_shapes, + side_input_types, + side_input_names) detection_module = DETECTION_MODULE_MAP[input_type](detection_model, use_side_inputs, zipped_side_inputs) -- GitLab From eb795bf7f9f9ae8afb069a96711d38320aa08c5a Mon Sep 17 00:00:00 2001 From: Kaushik Shivakumar Date: Tue, 4 Aug 2020 20:07:35 +0000 Subject: [PATCH 117/128] progress on PR --- .../object_detection/core/box_list_ops.py | 44 +++++++++- .../core/box_list_ops_test.py | 26 +++++- .../object_detection/exporter_lib_tf2_test.py | 48 +---------- research/object_detection/exporter_lib_v2.py | 85 +++---------------- research/object_detection/exporter_main_v2.py | 24 +----- 5 files changed, 83 insertions(+), 144 deletions(-) diff --git a/research/object_detection/core/box_list_ops.py b/research/object_detection/core/box_list_ops.py index 159845b69..555a0a3b1 100644 --- a/research/object_detection/core/box_list_ops.py +++ b/research/object_detection/core/box_list_ops.py @@ -303,6 +303,49 @@ def iou(boxlist1, boxlist2, scope=None): tf.equal(intersections, 0.0), tf.zeros_like(intersections), tf.truediv(intersections, unions)) +def l1(boxlist1, boxlist2, scope=None): + """Computes l1 loss (pairwise) between two boxlists. + + Args: + boxlist1: BoxList holding N boxes + boxlist2: BoxList holding M boxes + scope: name scope. + + Returns: + a tensor with shape [N, M] representing the pairwise L1 loss. + """ + with tf.name_scope(scope, 'PairwiseL1'): + ycenter1, xcenter1, h1, w1 = boxlist1.get_center_coordinates_and_sizes() + ycenter2, xcenter2, h2, w2 = boxlist2.get_center_coordinates_and_sizes() + ycenters = tf.abs(tf.expand_dims(ycenter2, axis=0) - tf.expand_dims( + tf.transpose(ycenter1), axis=1)) + xcenters = tf.abs(tf.expand_dims(xcenter2, axis=0) - tf.expand_dims( + tf.transpose(xcenter1), axis=1)) + heights = tf.abs(tf.expand_dims(h2, axis=0) - tf.expand_dims( + tf.transpose(h1), axis=1)) + widths = tf.abs(tf.expand_dims(w2, axis=0) - tf.expand_dims( + tf.transpose(w1), axis=1)) + return ycenters + xcenters + heights + widths + +def giou_loss(boxlist1, boxlist2, scope=None): + """ + Computes generalized IOU loss between two boxlists pairwise, + as described at giou.stanford.edu. + + Args: + boxlist1: BoxList holding N boxes + boxlist2: BoxList holding M boxes + scope: name scope. + + Returns: + a tensor with shape [N, M] representing the pairwise GIoU loss. + """ + with tf.name_scope(scope, "PairwiseGIoU"): + N = boxlist1.num_boxes() + M = boxlist2.num_boxes() + boxes1 = tf.repeat(boxlist1.get(), repeats=M, axis=0) + boxes2 = tf.tile(boxlist2.get(), multiples=[N, 1]) + return tf.reshape(1.0 - ops.giou(boxes1, boxes2), [N, M]) def matched_iou(boxlist1, boxlist2, scope=None): """Compute intersection-over-union between corresponding boxes in boxlists. @@ -324,7 +367,6 @@ def matched_iou(boxlist1, boxlist2, scope=None): tf.equal(intersections, 0.0), tf.zeros_like(intersections), tf.truediv(intersections, unions)) - def ioa(boxlist1, boxlist2, scope=None): """Computes pairwise intersection-over-area between box collections. diff --git a/research/object_detection/core/box_list_ops_test.py b/research/object_detection/core/box_list_ops_test.py index b572dff9e..6e6990b91 100644 --- a/research/object_detection/core/box_list_ops_test.py +++ b/research/object_detection/core/box_list_ops_test.py @@ -217,7 +217,6 @@ class BoxListOpsTest(test_case.TestCase): def test_iou(self): def graph_fn(): - corners1 = tf.constant([[4.0, 3.0, 7.0, 5.0], [5.0, 6.0, 10.0, 7.0]]) corners1 = tf.constant([[4.0, 3.0, 7.0, 5.0], [5.0, 6.0, 10.0, 7.0]]) corners2 = tf.constant([[3.0, 4.0, 6.0, 8.0], [14.0, 14.0, 15.0, 15.0], [0.0, 0.0, 20.0, 20.0]]) @@ -229,6 +228,31 @@ class BoxListOpsTest(test_case.TestCase): iou_output = self.execute(graph_fn, []) self.assertAllClose(iou_output, exp_output) + def test_l1(self): + def graph_fn(): + corners1 = tf.constant([[4.0, 3.0, 7.0, 5.0], [5.0, 6.0, 10.0, 7.0]]) + corners2 = tf.constant([[3.0, 4.0, 6.0, 8.0], [14.0, 14.0, 15.0, 15.0], + [0.0, 0.0, 20.0, 20.0]]) + boxes1 = box_list.BoxList(corners1) + boxes2 = box_list.BoxList(corners2) + l1 = box_list_ops.l1(boxes1, boxes2) + return l1 + exp_output = [[5.0, 22.5, 45.5], [8.5, 19.0, 40.0]] + l1_output = self.execute(graph_fn, []) + self.assertAllClose(l1_output, exp_output) + + def test_giou(self): + def graph_fn(): + corners1 = tf.constant([[5.0, 7.0, 7.0, 9.0]]) + corners2 = tf.constant([[5.0, 7.0, 7.0, 9.0], [5.0, 11.0, 7.0, 13.0]]) + boxes1 = box_list.BoxList(corners1) + boxes2 = box_list.BoxList(corners2) + giou = box_list_ops.giou_loss(boxes1, boxes2) + return giou + exp_output = [[0.0, 4.0 / 3.0]] + giou_output = self.execute(graph_fn, []) + self.assertAllClose(giou_output, exp_output) + def test_matched_iou(self): def graph_fn(): corners1 = tf.constant([[4.0, 3.0, 7.0, 5.0], [5.0, 6.0, 10.0, 7.0]]) diff --git a/research/object_detection/exporter_lib_tf2_test.py b/research/object_detection/exporter_lib_tf2_test.py index 51609dfc6..99cbf263b 100644 --- a/research/object_detection/exporter_lib_tf2_test.py +++ b/research/object_detection/exporter_lib_tf2_test.py @@ -54,11 +54,8 @@ class FakeModel(model.DetectionModel): true_image_shapes = [] # Doesn't matter for the fake model. return tf.identity(inputs), true_image_shapes - def predict(self, preprocessed_inputs, true_image_shapes, **side_inputs): - return_dict = {'image': self._conv(preprocessed_inputs)} - if 'side_inp' in side_inputs: - return_dict['image'] += side_inputs['side_inp'] - return return_dict + def predict(self, preprocessed_inputs, true_image_shapes): + return {'image': self._conv(preprocessed_inputs)} def postprocess(self, prediction_dict, true_image_shapes): predict_tensor_sum = tf.reduce_sum(prediction_dict['image']) @@ -192,7 +189,7 @@ class ExportInferenceGraphTest(tf.test.TestCase, parameterized.TestCase): saved_model_path = os.path.join(output_directory, 'saved_model') detect_fn = tf.saved_model.load(saved_model_path) image = self.get_dummy_input(input_type) - detections = detect_fn(tf.constant(image)) + detections = detect_fn(image) detection_fields = fields.DetectionResultFields self.assertAllClose(detections[detection_fields.detection_boxes], @@ -206,45 +203,6 @@ class ExportInferenceGraphTest(tf.test.TestCase, parameterized.TestCase): [[1, 2], [2, 1]]) self.assertAllClose(detections[detection_fields.num_detections], [2, 1]) - def test_export_saved_model_and_run_inference_with_side_inputs( - self, input_type='image_tensor'): - tmp_dir = self.get_temp_dir() - self._save_checkpoint_from_mock_model(tmp_dir) - with mock.patch.object( - model_builder, 'build', autospec=True) as mock_builder: - mock_builder.return_value = FakeModel() - output_directory = os.path.join(tmp_dir, 'output') - pipeline_config = pipeline_pb2.TrainEvalPipelineConfig() - exporter_lib_v2.export_inference_graph( - input_type=input_type, - pipeline_config=pipeline_config, - trained_checkpoint_dir=tmp_dir, - output_directory=output_directory, - use_side_inputs=True, - side_input_shapes="1", - side_input_names="side_inp", - side_input_types="tf.float32") - - saved_model_path = os.path.join(output_directory, 'saved_model') - detect_fn = tf.saved_model.load(saved_model_path) - detect_fn_sig = detect_fn.signatures['serving_default'] - image = tf.constant(self.get_dummy_input(input_type)) - side_input = np.ones((1,), dtype=np.float32) - detections = detect_fn_sig(input_tensor=image, - side_inp=tf.constant(side_input)) - - detection_fields = fields.DetectionResultFields - self.assertAllClose(detections[detection_fields.detection_boxes], - [[[0.0, 0.0, 0.5, 0.5], - [0.5, 0.5, 0.8, 0.8]], - [[0.5, 0.5, 1.0, 1.0], - [0.0, 0.0, 0.0, 0.0]]]) - self.assertAllClose(detections[detection_fields.detection_scores], - [[400.7, 400.6], [400.9, 400.0]]) - self.assertAllClose(detections[detection_fields.detection_classes], - [[1, 2], [2, 1]]) - self.assertAllClose(detections[detection_fields.num_detections], [2, 1]) - def test_export_checkpoint_and_run_inference_with_image(self): tmp_dir = self.get_temp_dir() self._save_checkpoint_from_mock_model(tmp_dir, conv_weight_scalar=2.0) diff --git a/research/object_detection/exporter_lib_v2.py b/research/object_detection/exporter_lib_v2.py index d1c7bbdc2..a7ecb45ad 100644 --- a/research/object_detection/exporter_lib_v2.py +++ b/research/object_detection/exporter_lib_v2.py @@ -21,7 +21,7 @@ from object_detection.builders import model_builder from object_detection.core import standard_fields as fields from object_detection.data_decoders import tf_example_decoder from object_detection.utils import config_util -import ast + def _decode_image(encoded_image_string_tensor): image_tensor = tf.image.decode_image(encoded_image_string_tensor, @@ -36,32 +36,11 @@ def _decode_tf_example(tf_example_string_tensor): image_tensor = tensor_dict[fields.InputDataFields.image] return image_tensor -def _zip_side_inputs(side_input_shapes="", - side_input_types="", - side_input_names=""): - """Zips the side inputs together. - - Args: - side_input_shapes: forward-slash-separated list of comma-separated lists - describing input shapes. - side_input_types: comma-separated list of the types of the inputs. - side_input_names: comma-separated list of the names of the inputs. - - Returns: - a zipped list of side input tuples. - """ - side_input_shapes = list(map(lambda x: ast.literal_eval('[' + x + ']'), - side_input_shapes.split('/'))) - side_input_types = eval('[' + side_input_types + ']') - side_input_names = side_input_names.split(',') - return zip(side_input_shapes, side_input_types, side_input_names) class DetectionInferenceModule(tf.Module): """Detection Inference Module.""" - def __init__(self, detection_model, - use_side_inputs=False, - zipped_side_inputs=None): + def __init__(self, detection_model): """Initializes a module for detection. Args: @@ -69,7 +48,7 @@ class DetectionInferenceModule(tf.Module): """ self._model = detection_model - def _run_inference_on_images(self, image, **kwargs): + def _run_inference_on_images(self, image): """Cast image to float and run inference. Args: @@ -81,7 +60,7 @@ class DetectionInferenceModule(tf.Module): image = tf.cast(image, tf.float32) image, shapes = self._model.preprocess(image) - prediction_dict = self._model.predict(image, shapes, **kwargs) + prediction_dict = self._model.predict(image, shapes) detections = self._model.postprocess(prediction_dict, shapes) classes_field = fields.DetectionResultFields.detection_classes detections[classes_field] = ( @@ -96,33 +75,11 @@ class DetectionInferenceModule(tf.Module): class DetectionFromImageModule(DetectionInferenceModule): """Detection Inference Module for image inputs.""" - def __init__(self, detection_model, - use_side_inputs=False, - zipped_side_inputs=None): - """Initializes a module for detection. - - Args: - detection_model: The detection model to use for inference. - - """ - self.side_input_names = [] - sig = [tf.TensorSpec(shape=[1, None, None, 3], dtype=tf.uint8)] - if use_side_inputs: - for info in zipped_side_inputs: - self.side_input_names.append(info[2]) - sig.append(tf.TensorSpec(shape=info[0], - dtype=info[1], - name=info[2])) - - def __call__(input_tensor, *side_inputs): - kwargs = dict(zip(self.side_input_names, side_inputs)) - return self._run_inference_on_images(input_tensor, **kwargs) - - self.__call__ = tf.function(__call__, input_signature=sig) - - super(DetectionFromImageModule, self).__init__(detection_model, - use_side_inputs, - zipped_side_inputs) + @tf.function( + input_signature=[ + tf.TensorSpec(shape=[1, None, None, 3], dtype=tf.uint8)]) + def __call__(self, input_tensor): + return self._run_inference_on_images(input_tensor) class DetectionFromFloatImageModule(DetectionInferenceModule): @@ -176,11 +133,7 @@ DETECTION_MODULE_MAP = { def export_inference_graph(input_type, pipeline_config, trained_checkpoint_dir, - output_directory, - use_side_inputs=False, - side_input_shapes="", - side_input_types="", - side_input_names=""): + output_directory): """Exports inference graph for the model specified in the pipeline config. This function creates `output_directory` if it does not already exist, @@ -194,12 +147,6 @@ def export_inference_graph(input_type, pipeline_config: pipeline_pb2.TrainAndEvalPipelineConfig proto. trained_checkpoint_dir: Path to the trained checkpoint file. output_directory: Path to write outputs. - use_side_inputs: boolean that determines whether side inputs should be - included in the input signature. - side_input_shapes: forward-slash-separated list of comma-separated lists - describing input shapes. - side_input_types: comma-separated list of the types of the inputs. - side_input_names: comma-separated list of the names of the inputs. Raises: ValueError: if input_type is invalid. """ @@ -217,17 +164,7 @@ def export_inference_graph(input_type, if input_type not in DETECTION_MODULE_MAP: raise ValueError('Unrecognized `input_type`') - if use_side_inputs and input_type != 'image_tensor': - raise ValueError('Side inputs supported for image_tensor input type only.') - - zipped_side_inputs = None - if use_side_inputs: - zipped_side_inputs = _zip_side_inputs(side_input_shapes, - side_input_types, - side_input_names) - detection_module = DETECTION_MODULE_MAP[input_type](detection_model, - use_side_inputs, - zipped_side_inputs) + detection_module = DETECTION_MODULE_MAP[input_type](detection_model) # Getting the concrete function traces the graph and forces variables to # be constructed --- only after this can we save the checkpoint and # saved model. diff --git a/research/object_detection/exporter_main_v2.py b/research/object_detection/exporter_main_v2.py index c3dce5d71..a2ba84560 100644 --- a/research/object_detection/exporter_main_v2.py +++ b/research/object_detection/exporter_main_v2.py @@ -106,27 +106,6 @@ flags.DEFINE_string('output_directory', None, 'Path to write outputs.') flags.DEFINE_string('config_override', '', 'pipeline_pb2.TrainEvalPipelineConfig ' 'text proto to override pipeline_config_path.') -flags.DEFINE_boolean('use_side_inputs', False, - 'If True, uses side inputs as well as image inputs.') -flags.DEFINE_string('side_input_shapes', "", - 'If use_side_inputs is True, this explicitly sets ' - 'the shape of the side input tensors to a fixed size. The ' - 'dimensions are to be provided as a comma-separated list ' - 'of integers. A value of -1 can be used for unknown ' - 'dimensions. A `/` denotes a break, starting the shape of ' - 'the next side input tensor. This flag is required if ' - 'using side inputs.') -flags.DEFINE_string('side_input_types', "", - 'If use_side_inputs is True, this explicitly sets ' - 'the type of the side input tensors. The ' - 'dimensions are to be provided as a comma-separated list ' - 'of types, each of `string`, `integer`, or `float`. ' - 'This flag is required if using side inputs.') -flags.DEFINE_string('side_input_names', "", - 'If use_side_inputs is True, this explicitly sets ' - 'the names of the side input tensors required by the model ' - 'assuming the names will be a comma-separated list of ' - 'strings. This flag is required if using side inputs.') flags.mark_flag_as_required('pipeline_config_path') flags.mark_flag_as_required('trained_checkpoint_dir') @@ -140,8 +119,7 @@ def main(_): text_format.Merge(FLAGS.config_override, pipeline_config) exporter_lib_v2.export_inference_graph( FLAGS.input_type, pipeline_config, FLAGS.trained_checkpoint_dir, - FLAGS.output_directory, FLAGS.use_side_inputs, FLAGS.side_input_shapes, - FLAGS.side_input_types, FLAGS.side_input_names) + FLAGS.output_directory) if __name__ == '__main__': -- GitLab From e0e440e13f3b1902b57ca8ba6d516ae772e4b96c Mon Sep 17 00:00:00 2001 From: Kaushik Shivakumar Date: Tue, 4 Aug 2020 20:08:46 +0000 Subject: [PATCH 118/128] fix PR --- research/object_detection/core/losses.py | 34 ++++++++ research/object_detection/core/losses_test.py | 40 ++++++++++ research/object_detection/utils/ops.py | 57 ++++++++++++++ research/object_detection/utils/ops_test.py | 77 +++++++++++++++++++ 4 files changed, 208 insertions(+) diff --git a/research/object_detection/core/losses.py b/research/object_detection/core/losses.py index c4d499e7e..d00000513 100644 --- a/research/object_detection/core/losses.py +++ b/research/object_detection/core/losses.py @@ -36,6 +36,7 @@ import tensorflow.compat.v1 as tf from object_detection.core import box_list from object_detection.core import box_list_ops from object_detection.utils import ops +from object_detection.utils import shape_utils class Loss(six.with_metaclass(abc.ABCMeta, object)): @@ -210,6 +211,39 @@ class WeightedIOULocalizationLoss(Loss): return tf.reshape(weights, [-1]) * per_anchor_iou_loss + +class WeightedGIOULocalizationLoss(Loss): + """GIOU localization loss function. + + Sums the GIOU loss for corresponding pairs of predicted/groundtruth boxes + and for each pair assign a loss of 1 - GIOU. We then compute a weighted + sum over all pairs which is returned as the total loss. + """ + + def _compute_loss(self, prediction_tensor, target_tensor, weights): + """Compute loss function. + + Args: + prediction_tensor: A float tensor of shape [batch_size, num_anchors, 4] + representing the decoded predicted boxes + target_tensor: A float tensor of shape [batch_size, num_anchors, 4] + representing the decoded target boxes + weights: a float tensor of shape [batch_size, num_anchors] + + Returns: + loss: a float tensor of shape [batch_size, num_anchors] tensor + representing the value of the loss function. + """ + batch_size, num_anchors, _ = shape_utils.combined_static_and_dynamic_shape( + prediction_tensor) + predicted_boxes = tf.reshape(prediction_tensor, [-1, 4]) + target_boxes = tf.reshape(target_tensor, [-1, 4]) + + per_anchor_iou_loss = 1 - ops.giou(predicted_boxes, target_boxes) + return tf.reshape(tf.reshape(weights, [-1]) * per_anchor_iou_loss, + [batch_size, num_anchors]) + + class WeightedSigmoidClassificationLoss(Loss): """Sigmoid cross entropy classification loss function.""" diff --git a/research/object_detection/core/losses_test.py b/research/object_detection/core/losses_test.py index 5957052ee..f52fd6320 100644 --- a/research/object_detection/core/losses_test.py +++ b/research/object_detection/core/losses_test.py @@ -197,6 +197,46 @@ class WeightedIOULocalizationLossTest(test_case.TestCase): loss_output = self.execute(graph_fn, []) self.assertAllClose(loss_output, exp_loss) +class WeightedGIOULocalizationLossTest(test_case.TestCase): + + def testReturnsCorrectLoss(self): + def graph_fn(): + prediction_tensor = tf.constant([[[1.5, 0, 2.4, 1], + [0, 0, 1, 1], + [0, 0, 0, 0]]]) + target_tensor = tf.constant([[[1.5, 0, 2.4, 1], + [0, 0, 1, 1], + [5, 5, 10, 10]]]) + weights = [[1.0, .5, 2.0]] + loss_op = losses.WeightedGIOULocalizationLoss() + loss = loss_op(prediction_tensor, + target_tensor, + weights=weights) + loss = tf.reduce_sum(loss) + return loss + exp_loss = 3.5 + loss_output = self.execute(graph_fn, []) + self.assertAllClose(loss_output, exp_loss) + + def testReturnsCorrectLossWithNoLabels(self): + def graph_fn(): + prediction_tensor = tf.constant([[[1.5, 0, 2.4, 1], + [0, 0, 1, 1], + [0, 0, .5, .25]]]) + target_tensor = tf.constant([[[1.5, 0, 2.4, 1], + [0, 0, 1, 1], + [50, 50, 500.5, 100.25]]]) + weights = [[1.0, .5, 2.0]] + losses_mask = tf.constant([False], tf.bool) + loss_op = losses.WeightedGIOULocalizationLoss() + loss = loss_op(prediction_tensor, target_tensor, weights=weights, + losses_mask=losses_mask) + loss = tf.reduce_sum(loss) + return loss + exp_loss = 0.0 + loss_output = self.execute(graph_fn, []) + self.assertAllClose(loss_output, exp_loss) + class WeightedSigmoidClassificationLossTest(test_case.TestCase): diff --git a/research/object_detection/utils/ops.py b/research/object_detection/utils/ops.py index f345fa388..344fbd94e 100644 --- a/research/object_detection/utils/ops.py +++ b/research/object_detection/utils/ops.py @@ -1134,3 +1134,60 @@ def decode_image(tensor_dict): tensor_dict[fields.InputDataFields.image], channels=3) tensor_dict[fields.InputDataFields.image].set_shape([None, None, 3]) return tensor_dict + +def giou(boxes1, boxes2): + """ + Computes generalized IOU between two tensors. Each box should be + represented as [ymin, xmin, ymax, xmax]. + + Args: + boxes1: a tensor with shape [num_boxes, 4] + boxes2: a tensor with shape [num_boxes, 4] + + Returns: + a tensor of shape [num_boxes] containing GIoUs + + """ + def two_boxes_giou(boxes): + boxes1, boxes2 = boxes + + pred_ymin, pred_xmin, pred_ymax, pred_xmax = tf.unstack(boxes1) + gt_ymin, gt_xmin, gt_ymax, gt_xmax = tf.unstack(boxes2) + + gt_area = (gt_ymax - gt_ymin) * (gt_xmax - gt_xmin) + pred_area = (pred_ymax - pred_ymin) * (pred_xmax - pred_xmin) + + x1I = tf.maximum(pred_xmin, gt_xmin) + x2I = tf.minimum(pred_xmax, gt_xmax) + y1I = tf.maximum(pred_ymin, gt_ymin) + y2I = tf.minimum(pred_ymax, gt_ymax) + intersection_area = tf.maximum(0.0, y2I - y1I) * tf.maximum(0.0, x2I - x1I) + + x1C = tf.minimum(pred_xmin, gt_xmin) + x2C = tf.maximum(pred_xmax, gt_xmax) + y1C = tf.minimum(pred_ymin, gt_ymin) + y2C = tf.maximum(pred_ymax, gt_ymax) + hull_area = (y2C - y1C) * (x2C - x1C) + + union_area = gt_area + pred_area - intersection_area + IoU = tf.where(tf.equal(union_area, 0.0), 0.0, + intersection_area/union_area) + gIoU = IoU - tf.where(hull_area > 0.0, + (hull_area - union_area) / hull_area, IoU) + + return gIoU + + return shape_utils.static_or_dynamic_map_fn(two_boxes_giou, [boxes1, boxes2]) + +def center_to_corner_coordinate(input_tensor): + """Converts input boxes from center to corner representation.""" + reshaped_encodings = tf.reshape(input_tensor, [-1, 4]) + ycenter = tf.gather(reshaped_encodings, [0], axis=1) + xcenter = tf.gather(reshaped_encodings, [1], axis=1) + h = tf.gather(reshaped_encodings, [2], axis=1) + w = tf.gather(reshaped_encodings, [3], axis=1) + ymin = ycenter - h / 2. + xmin = xcenter - w / 2. + ymax = ycenter + h / 2. + xmax = xcenter + w / 2. + return tf.squeeze(tf.stack([ymin, xmin, ymax, xmax], axis=1)) \ No newline at end of file diff --git a/research/object_detection/utils/ops_test.py b/research/object_detection/utils/ops_test.py index c5252d644..d13285df8 100644 --- a/research/object_detection/utils/ops_test.py +++ b/research/object_detection/utils/ops_test.py @@ -1630,8 +1630,85 @@ class TestGatherWithPaddingValues(test_case.TestCase): self.assertAllClose(expected_gathered_tensor, gathered_tensor_np) +class TestGIoU(test_case.TestCase): + def test_giou_general(self): + expected_giou_tensor = [ + 0, -1/3, 1/25, -3/4, 0, -98/100 + ] + + def graph_fn(): + boxes1 = tf.constant([[3, 4, 5, 6], [3, 3, 5, 5], + [2, 1, 7, 6], [0, 0, 0, 0], + [3, 3, 5, 5], [9, 9, 10, 10]], + dtype=tf.float32) + boxes2 = tf.constant([[3, 2, 5, 4], [3, 7, 5, 9], + [4, 3, 5, 4], [5, 5, 10, 10], + [3, 5, 5, 7], [0, 0, 1, 1]], dtype=tf.float32) + + giou = ops.giou(boxes1, boxes2) + self.assertEqual(giou.dtype, tf.float32) + + return giou + + giou = self.execute(graph_fn, []) + self.assertAllClose(expected_giou_tensor, giou) + + def test_giou_edge_cases(self): + expected_giou_tensor = [ + 1, 0 + ] + + def graph_fn(): + boxes1 = tf.constant([[3, 3, 5, 5], [1, 1, 1, 1]], + dtype=tf.float32) + boxes2 = tf.constant([[3, 3, 5, 5], [1, 1, 1, 1]], + dtype=tf.float32) + + giou = ops.giou(boxes1, boxes2) + self.assertEqual(giou.dtype, tf.float32) + + return giou + + giou = self.execute(graph_fn, []) + self.assertAllClose(expected_giou_tensor, giou) + + def test_giou_l1_same(self): + expected_giou_tensor = [ + 2/3, 3/5 + ] + + def graph_fn(): + boxes1 = tf.constant([[3, 3, 5, 5], [3, 3, 5, 5]], + dtype=tf.float32) + boxes2 = tf.constant([[3, 2.5, 5, 5.5], [3, 2.5, 5, 4.5]], + dtype=tf.float32) + + giou = ops.giou(boxes1, boxes2) + self.assertEqual(giou.dtype, tf.float32) + + return giou + + giou = self.execute(graph_fn, []) + self.assertAllClose(expected_giou_tensor, giou) + +class TestCoordinateConversion(test_case.TestCase): + + def test_coord_conv(self): + expected_box_tensor = [ + [0.5, 0.5, 5.5, 5.5], [2, 1, 4, 7], [0, 0, 0, 0] + ] + + def graph_fn(): + boxes = tf.constant([[3, 3, 5, 5], [3, 4, 2, 6], [0, 0, 0, 0]], + dtype=tf.float32) + + converted = ops.center_to_corner_coordinate(boxes) + self.assertEqual(converted.dtype, tf.float32) + return converted + converted = self.execute(graph_fn, []) + self.assertAllClose(expected_box_tensor, converted) -- GitLab From d192f78a4e0ab45c800433133d3af96482ba7329 Mon Sep 17 00:00:00 2001 From: "A. Unique TensorFlower" Date: Tue, 4 Aug 2020 13:27:13 -0700 Subject: [PATCH 119/128] For object detection models, removing rpn_head.anchors_per_location from config and using anchor.num_scales * len(anchor.aspect_ratios) to compute anchors_per_location PiperOrigin-RevId: 324878153 --- official/vision/detection/configs/maskrcnn_config.py | 1 - official/vision/detection/configs/retinanet_config.py | 1 - official/vision/detection/configs/shapemask_config.py | 1 - .../vision/detection/modeling/architecture/factory.py | 8 ++++++-- 4 files changed, 6 insertions(+), 5 deletions(-) diff --git a/official/vision/detection/configs/maskrcnn_config.py b/official/vision/detection/configs/maskrcnn_config.py index 70c9b3144..c62756274 100644 --- a/official/vision/detection/configs/maskrcnn_config.py +++ b/official/vision/detection/configs/maskrcnn_config.py @@ -52,7 +52,6 @@ MASKRCNN_CFG.override({ 'anchor_size': 8, }, 'rpn_head': { - 'anchors_per_location': 3, 'num_convs': 2, 'num_filters': 256, 'use_separable_conv': False, diff --git a/official/vision/detection/configs/retinanet_config.py b/official/vision/detection/configs/retinanet_config.py index 579e30d08..7e89163b4 100644 --- a/official/vision/detection/configs/retinanet_config.py +++ b/official/vision/detection/configs/retinanet_config.py @@ -39,7 +39,6 @@ RETINANET_CFG.override({ 'max_num_instances': 100, }, 'retinanet_head': { - 'anchors_per_location': 9, 'num_convs': 4, 'num_filters': 256, 'use_separable_conv': False, diff --git a/official/vision/detection/configs/shapemask_config.py b/official/vision/detection/configs/shapemask_config.py index 0914c492e..071567933 100644 --- a/official/vision/detection/configs/shapemask_config.py +++ b/official/vision/detection/configs/shapemask_config.py @@ -62,7 +62,6 @@ SHAPEMASK_CFG.override({ 'upsample_factor': 4, }, 'retinanet_head': { - 'anchors_per_location': 9, 'num_convs': 4, 'num_filters': 256, 'use_separable_conv': False, diff --git a/official/vision/detection/modeling/architecture/factory.py b/official/vision/detection/modeling/architecture/factory.py index 403d815ea..60ed4d51f 100644 --- a/official/vision/detection/modeling/architecture/factory.py +++ b/official/vision/detection/modeling/architecture/factory.py @@ -77,11 +77,13 @@ def multilevel_features_generator(params): def retinanet_head_generator(params): """Generator function for RetinaNet head architecture.""" head_params = params.retinanet_head + anchors_per_location = params.anchor.num_scales * len( + params.anchor.aspect_ratios) return heads.RetinanetHead( params.architecture.min_level, params.architecture.max_level, params.architecture.num_classes, - head_params.anchors_per_location, + anchors_per_location, head_params.num_convs, head_params.num_filters, head_params.use_separable_conv, @@ -91,10 +93,12 @@ def retinanet_head_generator(params): def rpn_head_generator(params): """Generator function for RPN head architecture.""" head_params = params.rpn_head + anchors_per_location = params.anchor.num_scales * len( + params.anchor.aspect_ratios) return heads.RpnHead( params.architecture.min_level, params.architecture.max_level, - head_params.anchors_per_location, + anchors_per_location, head_params.num_convs, head_params.num_filters, head_params.use_separable_conv, -- GitLab From 367486482c5fe6fc896868edf9bbde7519deb52d Mon Sep 17 00:00:00 2001 From: Ruoxin Sang Date: Tue, 4 Aug 2020 16:54:16 -0700 Subject: [PATCH 120/128] Internal change PiperOrigin-RevId: 324919232 --- orbit/runner.py | 2 +- orbit/utils.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/orbit/runner.py b/orbit/runner.py index 967133db5..2aadc918d 100644 --- a/orbit/runner.py +++ b/orbit/runner.py @@ -34,7 +34,7 @@ class AbstractTrainer(tf.Module, metaclass=abc.ABCMeta): large in Eager mode. It is usually encouraged to create a host training loop (e.g. using a `tf.range` wrapping `strategy.run` inside a `tf.function`) in the TPU case. For the cases that don't require host - training loop to acheive peak performance, users can just implement a simple + training loop to achieve peak performance, users can just implement a simple python loop to drive each step. Args: diff --git a/orbit/utils.py b/orbit/utils.py index a8b231121..55b549d9a 100644 --- a/orbit/utils.py +++ b/orbit/utils.py @@ -432,7 +432,7 @@ def train_function_with_summaries(*args, **kwargs): return decorator -def get_value(x) -> np.ndarray: +def get_value(x) -> np.number: """Returns the value of a variable/tensor. Args: -- GitLab From 29d45e889b7695333d45171fc3b7e70d4629d067 Mon Sep 17 00:00:00 2001 From: Hongkun Yu Date: Wed, 5 Aug 2020 12:20:15 -0700 Subject: [PATCH 121/128] Internal change PiperOrigin-RevId: 325073831 --- official/nlp/configs/bert.py | 37 +----- official/nlp/configs/bert_test.py | 66 ----------- official/nlp/configs/electra.py | 62 +--------- official/nlp/configs/electra_test.py | 49 -------- official/nlp/configs/encoders.py | 112 ++++++++++++------ official/nlp/modeling/models/__init__.py | 2 +- official/nlp/tasks/electra_task.py | 51 ++++++-- official/nlp/tasks/electra_task_test.py | 18 +-- official/nlp/tasks/masked_lm.py | 27 +++-- official/nlp/tasks/masked_lm_test.py | 6 +- official/nlp/tasks/question_answering.py | 9 +- official/nlp/tasks/question_answering_test.py | 58 +++++---- official/nlp/tasks/sentence_prediction.py | 10 +- .../nlp/tasks/sentence_prediction_test.py | 17 +-- official/nlp/tasks/tagging.py | 6 +- official/nlp/tasks/tagging_test.py | 6 +- 16 files changed, 219 insertions(+), 317 deletions(-) delete mode 100644 official/nlp/configs/bert_test.py delete mode 100644 official/nlp/configs/electra_test.py diff --git a/official/nlp/configs/bert.py b/official/nlp/configs/bert.py index fad49e29d..e0a64b78b 100644 --- a/official/nlp/configs/bert.py +++ b/official/nlp/configs/bert.py @@ -20,13 +20,9 @@ Includes configurations and instantiation methods. from typing import List, Optional, Text import dataclasses -import tensorflow as tf -from official.modeling import tf_utils from official.modeling.hyperparams import base_config from official.nlp.configs import encoders -from official.nlp.modeling import layers -from official.nlp.modeling.models import bert_pretrainer @dataclasses.dataclass @@ -40,32 +36,9 @@ class ClsHeadConfig(base_config.Config): @dataclasses.dataclass -class BertPretrainerConfig(base_config.Config): - """BERT encoder configuration.""" - encoder: encoders.TransformerEncoderConfig = ( - encoders.TransformerEncoderConfig()) +class PretrainerConfig(base_config.Config): + """Pretrainer configuration.""" + encoder: encoders.EncoderConfig = encoders.EncoderConfig() cls_heads: List[ClsHeadConfig] = dataclasses.field(default_factory=list) - - -def instantiate_classification_heads_from_cfgs( - cls_head_configs: List[ClsHeadConfig]) -> List[layers.ClassificationHead]: - return [ - layers.ClassificationHead(**cfg.as_dict()) for cfg in cls_head_configs - ] if cls_head_configs else [] - - -def instantiate_pretrainer_from_cfg( - config: BertPretrainerConfig, - encoder_network: Optional[tf.keras.Model] = None -) -> bert_pretrainer.BertPretrainerV2: - """Instantiates a BertPretrainer from the config.""" - encoder_cfg = config.encoder - if encoder_network is None: - encoder_network = encoders.instantiate_encoder_from_cfg(encoder_cfg) - return bert_pretrainer.BertPretrainerV2( - mlm_activation=tf_utils.get_activation(encoder_cfg.hidden_activation), - mlm_initializer=tf.keras.initializers.TruncatedNormal( - stddev=encoder_cfg.initializer_range), - encoder_network=encoder_network, - classification_heads=instantiate_classification_heads_from_cfgs( - config.cls_heads)) + mlm_activation: str = "gelu" + mlm_initializer_range: float = 0.02 diff --git a/official/nlp/configs/bert_test.py b/official/nlp/configs/bert_test.py deleted file mode 100644 index 871ab4537..000000000 --- a/official/nlp/configs/bert_test.py +++ /dev/null @@ -1,66 +0,0 @@ -# Lint as: python3 -# Copyright 2020 The TensorFlow Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""Tests for BERT configurations and models instantiation.""" - -import tensorflow as tf - -from official.nlp.configs import bert -from official.nlp.configs import encoders - - -class BertModelsTest(tf.test.TestCase): - - def test_network_invocation(self): - config = bert.BertPretrainerConfig( - encoder=encoders.TransformerEncoderConfig(vocab_size=10, num_layers=1)) - _ = bert.instantiate_pretrainer_from_cfg(config) - - # Invokes with classification heads. - config = bert.BertPretrainerConfig( - encoder=encoders.TransformerEncoderConfig(vocab_size=10, num_layers=1), - cls_heads=[ - bert.ClsHeadConfig( - inner_dim=10, num_classes=2, name="next_sentence") - ]) - _ = bert.instantiate_pretrainer_from_cfg(config) - - with self.assertRaises(ValueError): - config = bert.BertPretrainerConfig( - encoder=encoders.TransformerEncoderConfig( - vocab_size=10, num_layers=1), - cls_heads=[ - bert.ClsHeadConfig( - inner_dim=10, num_classes=2, name="next_sentence"), - bert.ClsHeadConfig( - inner_dim=10, num_classes=2, name="next_sentence") - ]) - _ = bert.instantiate_pretrainer_from_cfg(config) - - def test_checkpoint_items(self): - config = bert.BertPretrainerConfig( - encoder=encoders.TransformerEncoderConfig(vocab_size=10, num_layers=1), - cls_heads=[ - bert.ClsHeadConfig( - inner_dim=10, num_classes=2, name="next_sentence") - ]) - encoder = bert.instantiate_pretrainer_from_cfg(config) - self.assertSameElements( - encoder.checkpoint_items.keys(), - ["encoder", "masked_lm", "next_sentence.pooler_dense"]) - - -if __name__ == "__main__": - tf.test.main() diff --git a/official/nlp/configs/electra.py b/official/nlp/configs/electra.py index 61fd82db7..527cde521 100644 --- a/official/nlp/configs/electra.py +++ b/official/nlp/configs/electra.py @@ -14,21 +14,17 @@ # limitations under the License. # ============================================================================== """ELECTRA model configurations and instantiation methods.""" -from typing import List, Optional +from typing import List import dataclasses -import tensorflow as tf -from official.modeling import tf_utils from official.modeling.hyperparams import base_config from official.nlp.configs import bert from official.nlp.configs import encoders -from official.nlp.modeling import layers -from official.nlp.modeling.models import electra_pretrainer @dataclasses.dataclass -class ELECTRAPretrainerConfig(base_config.Config): +class ElectraPretrainerConfig(base_config.Config): """ELECTRA pretrainer configuration.""" num_masked_tokens: int = 76 sequence_length: int = 512 @@ -36,56 +32,6 @@ class ELECTRAPretrainerConfig(base_config.Config): discriminator_loss_weight: float = 50.0 tie_embeddings: bool = True disallow_correct: bool = False - generator_encoder: encoders.TransformerEncoderConfig = ( - encoders.TransformerEncoderConfig()) - discriminator_encoder: encoders.TransformerEncoderConfig = ( - encoders.TransformerEncoderConfig()) + generator_encoder: encoders.EncoderConfig = encoders.EncoderConfig() + discriminator_encoder: encoders.EncoderConfig = encoders.EncoderConfig() cls_heads: List[bert.ClsHeadConfig] = dataclasses.field(default_factory=list) - - -def instantiate_classification_heads_from_cfgs( - cls_head_configs: List[bert.ClsHeadConfig] -) -> List[layers.ClassificationHead]: - if cls_head_configs: - return [ - layers.ClassificationHead(**cfg.as_dict()) for cfg in cls_head_configs - ] - else: - return [] - - -def instantiate_pretrainer_from_cfg( - config: ELECTRAPretrainerConfig, - generator_network: Optional[tf.keras.Model] = None, - discriminator_network: Optional[tf.keras.Model] = None, - ) -> electra_pretrainer.ElectraPretrainer: - """Instantiates ElectraPretrainer from the config.""" - generator_encoder_cfg = config.generator_encoder - discriminator_encoder_cfg = config.discriminator_encoder - # Copy discriminator's embeddings to generator for easier model serialization. - if discriminator_network is None: - discriminator_network = encoders.instantiate_encoder_from_cfg( - discriminator_encoder_cfg) - if generator_network is None: - if config.tie_embeddings: - embedding_layer = discriminator_network.get_embedding_layer() - generator_network = encoders.instantiate_encoder_from_cfg( - generator_encoder_cfg, embedding_layer=embedding_layer) - else: - generator_network = encoders.instantiate_encoder_from_cfg( - generator_encoder_cfg) - - return electra_pretrainer.ElectraPretrainer( - generator_network=generator_network, - discriminator_network=discriminator_network, - vocab_size=config.generator_encoder.vocab_size, - num_classes=config.num_classes, - sequence_length=config.sequence_length, - num_token_predictions=config.num_masked_tokens, - mlm_activation=tf_utils.get_activation( - generator_encoder_cfg.hidden_activation), - mlm_initializer=tf.keras.initializers.TruncatedNormal( - stddev=generator_encoder_cfg.initializer_range), - classification_heads=instantiate_classification_heads_from_cfgs( - config.cls_heads), - disallow_correct=config.disallow_correct) diff --git a/official/nlp/configs/electra_test.py b/official/nlp/configs/electra_test.py deleted file mode 100644 index d06d64a95..000000000 --- a/official/nlp/configs/electra_test.py +++ /dev/null @@ -1,49 +0,0 @@ -# Lint as: python3 -# Copyright 2020 The TensorFlow Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""Tests for ELECTRA configurations and models instantiation.""" - -import tensorflow as tf - -from official.nlp.configs import bert -from official.nlp.configs import electra -from official.nlp.configs import encoders - - -class ELECTRAModelsTest(tf.test.TestCase): - - def test_network_invocation(self): - config = electra.ELECTRAPretrainerConfig( - generator_encoder=encoders.TransformerEncoderConfig( - vocab_size=10, num_layers=1), - discriminator_encoder=encoders.TransformerEncoderConfig( - vocab_size=10, num_layers=2), - ) - _ = electra.instantiate_pretrainer_from_cfg(config) - - # Invokes with classification heads. - config = electra.ELECTRAPretrainerConfig( - generator_encoder=encoders.TransformerEncoderConfig( - vocab_size=10, num_layers=1), - discriminator_encoder=encoders.TransformerEncoderConfig( - vocab_size=10, num_layers=2), - cls_heads=[ - bert.ClsHeadConfig( - inner_dim=10, num_classes=2, name="next_sentence") - ]) - _ = electra.instantiate_pretrainer_from_cfg(config) - -if __name__ == "__main__": - tf.test.main() diff --git a/official/nlp/configs/encoders.py b/official/nlp/configs/encoders.py index b7467634a..722785904 100644 --- a/official/nlp/configs/encoders.py +++ b/official/nlp/configs/encoders.py @@ -15,20 +15,23 @@ # ============================================================================== """Transformer Encoders. -Includes configurations and instantiation methods. +Includes configurations and factory methods. """ from typing import Optional + +from absl import logging import dataclasses +import gin import tensorflow as tf +from official.modeling import hyperparams from official.modeling import tf_utils -from official.modeling.hyperparams import base_config from official.nlp.modeling import layers from official.nlp.modeling import networks @dataclasses.dataclass -class TransformerEncoderConfig(base_config.Config): +class BertEncoderConfig(hyperparams.Config): """BERT encoder configuration.""" vocab_size: int = 30522 hidden_size: int = 768 @@ -44,55 +47,86 @@ class TransformerEncoderConfig(base_config.Config): embedding_size: Optional[int] = None -def instantiate_encoder_from_cfg( - config: TransformerEncoderConfig, - encoder_cls=networks.TransformerEncoder, - embedding_layer: Optional[layers.OnDeviceEmbedding] = None): - """Instantiate a Transformer encoder network from TransformerEncoderConfig.""" +@dataclasses.dataclass +class EncoderConfig(hyperparams.OneOfConfig): + """Encoder configuration.""" + type: Optional[str] = "bert" + bert: BertEncoderConfig = BertEncoderConfig() + + +ENCODER_CLS = { + "bert": networks.TransformerEncoder, +} + + +@gin.configurable +def build_encoder(config: EncoderConfig, + embedding_layer: Optional[layers.OnDeviceEmbedding] = None, + encoder_cls=None, + bypass_config: bool = False): + """Instantiate a Transformer encoder network from EncoderConfig. + + Args: + config: the one-of encoder config, which provides encoder parameters of a + chosen encoder. + embedding_layer: an external embedding layer passed to the encoder. + encoder_cls: an external encoder cls not included in the supported encoders, + usually used by gin.configurable. + bypass_config: whether to ignore config instance to create the object with + `encoder_cls`. + + Returns: + An encoder instance. + """ + encoder_type = config.type + encoder_cfg = config.get() + encoder_cls = encoder_cls or ENCODER_CLS[encoder_type] + logging.info("Encoder class: %s to build...", encoder_cls.__name__) + if bypass_config: + return encoder_cls() if encoder_cls.__name__ == "EncoderScaffold": embedding_cfg = dict( - vocab_size=config.vocab_size, - type_vocab_size=config.type_vocab_size, - hidden_size=config.hidden_size, - max_seq_length=config.max_position_embeddings, + vocab_size=encoder_cfg.vocab_size, + type_vocab_size=encoder_cfg.type_vocab_size, + hidden_size=encoder_cfg.hidden_size, + max_seq_length=encoder_cfg.max_position_embeddings, initializer=tf.keras.initializers.TruncatedNormal( - stddev=config.initializer_range), - dropout_rate=config.dropout_rate, + stddev=encoder_cfg.initializer_range), + dropout_rate=encoder_cfg.dropout_rate, ) hidden_cfg = dict( - num_attention_heads=config.num_attention_heads, - intermediate_size=config.intermediate_size, + num_attention_heads=encoder_cfg.num_attention_heads, + intermediate_size=encoder_cfg.intermediate_size, intermediate_activation=tf_utils.get_activation( - config.hidden_activation), - dropout_rate=config.dropout_rate, - attention_dropout_rate=config.attention_dropout_rate, + encoder_cfg.hidden_activation), + dropout_rate=encoder_cfg.dropout_rate, + attention_dropout_rate=encoder_cfg.attention_dropout_rate, kernel_initializer=tf.keras.initializers.TruncatedNormal( - stddev=config.initializer_range), + stddev=encoder_cfg.initializer_range), ) kwargs = dict( embedding_cfg=embedding_cfg, hidden_cfg=hidden_cfg, - num_hidden_instances=config.num_layers, - pooled_output_dim=config.hidden_size, + num_hidden_instances=encoder_cfg.num_layers, + pooled_output_dim=encoder_cfg.hidden_size, pooler_layer_initializer=tf.keras.initializers.TruncatedNormal( - stddev=config.initializer_range)) + stddev=encoder_cfg.initializer_range)) return encoder_cls(**kwargs) - if encoder_cls.__name__ != "TransformerEncoder": - raise ValueError("Unknown encoder network class. %s" % str(encoder_cls)) - encoder_network = encoder_cls( - vocab_size=config.vocab_size, - hidden_size=config.hidden_size, - num_layers=config.num_layers, - num_attention_heads=config.num_attention_heads, - intermediate_size=config.intermediate_size, - activation=tf_utils.get_activation(config.hidden_activation), - dropout_rate=config.dropout_rate, - attention_dropout_rate=config.attention_dropout_rate, - max_sequence_length=config.max_position_embeddings, - type_vocab_size=config.type_vocab_size, + # Uses the default BERTEncoder configuration schema to create the encoder. + # If it does not match, please add a switch branch by the encoder type. + return encoder_cls( + vocab_size=encoder_cfg.vocab_size, + hidden_size=encoder_cfg.hidden_size, + num_layers=encoder_cfg.num_layers, + num_attention_heads=encoder_cfg.num_attention_heads, + intermediate_size=encoder_cfg.intermediate_size, + activation=tf_utils.get_activation(encoder_cfg.hidden_activation), + dropout_rate=encoder_cfg.dropout_rate, + attention_dropout_rate=encoder_cfg.attention_dropout_rate, + max_sequence_length=encoder_cfg.max_position_embeddings, + type_vocab_size=encoder_cfg.type_vocab_size, initializer=tf.keras.initializers.TruncatedNormal( - stddev=config.initializer_range), - embedding_width=config.embedding_size, + stddev=encoder_cfg.initializer_range), + embedding_width=encoder_cfg.embedding_size, embedding_layer=embedding_layer) - return encoder_network diff --git a/official/nlp/modeling/models/__init__.py b/official/nlp/modeling/models/__init__.py index a072f36b7..63529aa3f 100644 --- a/official/nlp/modeling/models/__init__.py +++ b/official/nlp/modeling/models/__init__.py @@ -14,7 +14,7 @@ # ============================================================================== """Models package definition.""" from official.nlp.modeling.models.bert_classifier import BertClassifier -from official.nlp.modeling.models.bert_pretrainer import BertPretrainer +from official.nlp.modeling.models.bert_pretrainer import * from official.nlp.modeling.models.bert_span_labeler import BertSpanLabeler from official.nlp.modeling.models.bert_token_classifier import BertTokenClassifier from official.nlp.modeling.models.electra_pretrainer import ElectraPretrainer diff --git a/official/nlp/tasks/electra_task.py b/official/nlp/tasks/electra_task.py index a34cabf08..e84504ec0 100644 --- a/official/nlp/tasks/electra_task.py +++ b/official/nlp/tasks/electra_task.py @@ -19,16 +19,20 @@ import tensorflow as tf from official.core import base_task from official.core import task_factory +from official.modeling import tf_utils from official.modeling.hyperparams import config_definitions as cfg from official.nlp.configs import bert from official.nlp.configs import electra +from official.nlp.configs import encoders from official.nlp.data import pretrain_dataloader +from official.nlp.modeling import layers +from official.nlp.modeling import models @dataclasses.dataclass -class ELECTRAPretrainConfig(cfg.TaskConfig): +class ElectraPretrainConfig(cfg.TaskConfig): """The model config.""" - model: electra.ELECTRAPretrainerConfig = electra.ELECTRAPretrainerConfig( + model: electra.ElectraPretrainerConfig = electra.ElectraPretrainerConfig( cls_heads=[ bert.ClsHeadConfig( inner_dim=768, @@ -40,13 +44,44 @@ class ELECTRAPretrainConfig(cfg.TaskConfig): validation_data: cfg.DataConfig = cfg.DataConfig() -@task_factory.register_task_cls(ELECTRAPretrainConfig) -class ELECTRAPretrainTask(base_task.Task): +def _build_pretrainer( + config: electra.ElectraPretrainerConfig) -> models.ElectraPretrainer: + """Instantiates ElectraPretrainer from the config.""" + generator_encoder_cfg = config.generator_encoder + discriminator_encoder_cfg = config.discriminator_encoder + # Copy discriminator's embeddings to generator for easier model serialization. + discriminator_network = encoders.build_encoder(discriminator_encoder_cfg) + if config.tie_embeddings: + embedding_layer = discriminator_network.get_embedding_layer() + generator_network = encoders.build_encoder( + generator_encoder_cfg, embedding_layer=embedding_layer) + else: + generator_network = encoders.build_encoder(generator_encoder_cfg) + + generator_encoder_cfg = generator_encoder_cfg.get() + return models.ElectraPretrainer( + generator_network=generator_network, + discriminator_network=discriminator_network, + vocab_size=generator_encoder_cfg.vocab_size, + num_classes=config.num_classes, + sequence_length=config.sequence_length, + num_token_predictions=config.num_masked_tokens, + mlm_activation=tf_utils.get_activation( + generator_encoder_cfg.hidden_activation), + mlm_initializer=tf.keras.initializers.TruncatedNormal( + stddev=generator_encoder_cfg.initializer_range), + classification_heads=[ + layers.ClassificationHead(**cfg.as_dict()) for cfg in config.cls_heads + ], + disallow_correct=config.disallow_correct) + + +@task_factory.register_task_cls(ElectraPretrainConfig) +class ElectraPretrainTask(base_task.Task): """ELECTRA Pretrain Task (Masked LM + Replaced Token Detection).""" def build_model(self): - return electra.instantiate_pretrainer_from_cfg( - self.task_config.model) + return _build_pretrainer(self.task_config.model) def build_losses(self, labels, @@ -70,9 +105,7 @@ class ELECTRAPretrainTask(base_task.Task): sentence_outputs = tf.cast( model_outputs['sentence_outputs'], dtype=tf.float32) sentence_loss = tf.keras.losses.sparse_categorical_crossentropy( - sentence_labels, - sentence_outputs, - from_logits=True) + sentence_labels, sentence_outputs, from_logits=True) metrics['next_sentence_loss'].update_state(sentence_loss) total_loss = mlm_loss + sentence_loss else: diff --git a/official/nlp/tasks/electra_task_test.py b/official/nlp/tasks/electra_task_test.py index c0978c01d..061ee35af 100644 --- a/official/nlp/tasks/electra_task_test.py +++ b/official/nlp/tasks/electra_task_test.py @@ -24,15 +24,17 @@ from official.nlp.data import pretrain_dataloader from official.nlp.tasks import electra_task -class ELECTRAPretrainTaskTest(tf.test.TestCase): +class ElectraPretrainTaskTest(tf.test.TestCase): def test_task(self): - config = electra_task.ELECTRAPretrainConfig( - model=electra.ELECTRAPretrainerConfig( - generator_encoder=encoders.TransformerEncoderConfig( - vocab_size=30522, num_layers=1), - discriminator_encoder=encoders.TransformerEncoderConfig( - vocab_size=30522, num_layers=1), + config = electra_task.ElectraPretrainConfig( + model=electra.ElectraPretrainerConfig( + generator_encoder=encoders.EncoderConfig( + bert=encoders.BertEncoderConfig(vocab_size=30522, + num_layers=1)), + discriminator_encoder=encoders.EncoderConfig( + bert=encoders.BertEncoderConfig(vocab_size=30522, + num_layers=1)), num_masked_tokens=20, sequence_length=128, cls_heads=[ @@ -44,7 +46,7 @@ class ELECTRAPretrainTaskTest(tf.test.TestCase): max_predictions_per_seq=20, seq_length=128, global_batch_size=1)) - task = electra_task.ELECTRAPretrainTask(config) + task = electra_task.ElectraPretrainTask(config) model = task.build_model() metrics = task.build_metrics() dataset = task.build_inputs(config.train_data) diff --git a/official/nlp/tasks/masked_lm.py b/official/nlp/tasks/masked_lm.py index b42c95192..8a6ff5dd8 100644 --- a/official/nlp/tasks/masked_lm.py +++ b/official/nlp/tasks/masked_lm.py @@ -19,15 +19,19 @@ import tensorflow as tf from official.core import base_task from official.core import task_factory +from official.modeling import tf_utils from official.modeling.hyperparams import config_definitions as cfg from official.nlp.configs import bert +from official.nlp.configs import encoders from official.nlp.data import data_loader_factory +from official.nlp.modeling import layers +from official.nlp.modeling import models @dataclasses.dataclass class MaskedLMConfig(cfg.TaskConfig): """The model config.""" - model: bert.BertPretrainerConfig = bert.BertPretrainerConfig(cls_heads=[ + model: bert.PretrainerConfig = bert.PretrainerConfig(cls_heads=[ bert.ClsHeadConfig( inner_dim=768, num_classes=2, dropout_rate=0.1, name='next_sentence') ]) @@ -37,11 +41,21 @@ class MaskedLMConfig(cfg.TaskConfig): @task_factory.register_task_cls(MaskedLMConfig) class MaskedLMTask(base_task.Task): - """Mock task object for testing.""" + """Task object for Mask language modeling.""" def build_model(self, params=None): - params = params or self.task_config.model - return bert.instantiate_pretrainer_from_cfg(params) + config = params or self.task_config.model + encoder_cfg = config.encoder + encoder_network = encoders.build_encoder(encoder_cfg) + cls_heads = [ + layers.ClassificationHead(**cfg.as_dict()) for cfg in config.cls_heads + ] if config.cls_heads else [] + return models.BertPretrainerV2( + mlm_activation=tf_utils.get_activation(config.mlm_activation), + mlm_initializer=tf.keras.initializers.TruncatedNormal( + stddev=config.mlm_initializer_range), + encoder_network=encoder_network, + classification_heads=cls_heads) def build_losses(self, labels, @@ -63,9 +77,8 @@ class MaskedLMTask(base_task.Task): sentence_outputs = tf.cast( model_outputs['next_sentence'], dtype=tf.float32) sentence_loss = tf.reduce_mean( - tf.keras.losses.sparse_categorical_crossentropy(sentence_labels, - sentence_outputs, - from_logits=True)) + tf.keras.losses.sparse_categorical_crossentropy( + sentence_labels, sentence_outputs, from_logits=True)) metrics['next_sentence_loss'].update_state(sentence_loss) total_loss = mlm_loss + sentence_loss else: diff --git a/official/nlp/tasks/masked_lm_test.py b/official/nlp/tasks/masked_lm_test.py index 38970a378..7c62506b3 100644 --- a/official/nlp/tasks/masked_lm_test.py +++ b/official/nlp/tasks/masked_lm_test.py @@ -28,8 +28,10 @@ class MLMTaskTest(tf.test.TestCase): def test_task(self): config = masked_lm.MaskedLMConfig( init_checkpoint=self.get_temp_dir(), - model=bert.BertPretrainerConfig( - encoders.TransformerEncoderConfig(vocab_size=30522, num_layers=1), + model=bert.PretrainerConfig( + encoders.EncoderConfig( + bert=encoders.BertEncoderConfig(vocab_size=30522, + num_layers=1)), cls_heads=[ bert.ClsHeadConfig( inner_dim=10, num_classes=2, name="next_sentence") diff --git a/official/nlp/tasks/question_answering.py b/official/nlp/tasks/question_answering.py index f73cdc102..35bec1472 100644 --- a/official/nlp/tasks/question_answering.py +++ b/official/nlp/tasks/question_answering.py @@ -42,8 +42,7 @@ from official.nlp.tasks import utils @dataclasses.dataclass class ModelConfig(base_config.Config): """A base span labeler configuration.""" - encoder: encoders.TransformerEncoderConfig = ( - encoders.TransformerEncoderConfig()) + encoder: encoders.EncoderConfig = encoders.EncoderConfig() @dataclasses.dataclass @@ -94,13 +93,13 @@ class QuestionAnsweringTask(base_task.Task): if self._hub_module: encoder_network = utils.get_encoder_from_hub(self._hub_module) else: - encoder_network = encoders.instantiate_encoder_from_cfg( - self.task_config.model.encoder) + encoder_network = encoders.build_encoder(self.task_config.model.encoder) + encoder_cfg = self.task_config.model.encoder.get() # Currently, we only supports bert-style question answering finetuning. return models.BertSpanLabeler( network=encoder_network, initializer=tf.keras.initializers.TruncatedNormal( - stddev=self.task_config.model.encoder.initializer_range)) + stddev=encoder_cfg.initializer_range)) def build_losses(self, labels, model_outputs, aux_losses=None) -> tf.Tensor: start_positions = labels['start_positions'] diff --git a/official/nlp/tasks/question_answering_test.py b/official/nlp/tasks/question_answering_test.py index ff601b0f9..10c9656f3 100644 --- a/official/nlp/tasks/question_answering_test.py +++ b/official/nlp/tasks/question_answering_test.py @@ -25,6 +25,7 @@ from official.nlp.bert import export_tfhub from official.nlp.configs import bert from official.nlp.configs import encoders from official.nlp.data import question_answering_dataloader +from official.nlp.tasks import masked_lm from official.nlp.tasks import question_answering @@ -32,21 +33,37 @@ class QuestionAnsweringTaskTest(tf.test.TestCase, parameterized.TestCase): def setUp(self): super(QuestionAnsweringTaskTest, self).setUp() - self._encoder_config = encoders.TransformerEncoderConfig( - vocab_size=30522, num_layers=1) + self._encoder_config = encoders.EncoderConfig( + bert=encoders.BertEncoderConfig(vocab_size=30522, num_layers=1)) self._train_data_config = question_answering_dataloader.QADataConfig( - input_path="dummy", - seq_length=128, - global_batch_size=1) - - val_data = {"version": "1.1", - "data": [{"paragraphs": [ - {"context": "Sky is blue.", - "qas": [{"question": "What is blue?", "id": "1234", - "answers": [{"text": "Sky", "answer_start": 0}, - {"text": "Sky", "answer_start": 0}, - {"text": "Sky", "answer_start": 0}] - }]}]}]} + input_path="dummy", seq_length=128, global_batch_size=1) + + val_data = { + "version": + "1.1", + "data": [{ + "paragraphs": [{ + "context": + "Sky is blue.", + "qas": [{ + "question": + "What is blue?", + "id": + "1234", + "answers": [{ + "text": "Sky", + "answer_start": 0 + }, { + "text": "Sky", + "answer_start": 0 + }, { + "text": "Sky", + "answer_start": 0 + }] + }] + }] + }] + } self._val_input_path = os.path.join(self.get_temp_dir(), "val_data.json") with tf.io.gfile.GFile(self._val_input_path, "w") as writer: writer.write(json.dumps(val_data, indent=4) + "\n") @@ -87,19 +104,20 @@ class QuestionAnsweringTaskTest(tf.test.TestCase, parameterized.TestCase): metrics = task.reduce_aggregated_logs(logs) self.assertIn("final_f1", metrics) - @parameterized.parameters(itertools.product( - (False, True), - ("WordPiece", "SentencePiece"), - )) + @parameterized.parameters( + itertools.product( + (False, True), + ("WordPiece", "SentencePiece"), + )) def test_task(self, version_2_with_negative, tokenization): # Saves a checkpoint. - pretrain_cfg = bert.BertPretrainerConfig( + pretrain_cfg = bert.PretrainerConfig( encoder=self._encoder_config, cls_heads=[ bert.ClsHeadConfig( inner_dim=10, num_classes=3, name="next_sentence") ]) - pretrain_model = bert.instantiate_pretrainer_from_cfg(pretrain_cfg) + pretrain_model = masked_lm.MaskedLMTask(None).build_model(pretrain_cfg) ckpt = tf.train.Checkpoint( model=pretrain_model, **pretrain_model.checkpoint_items) saved_path = ckpt.save(self.get_temp_dir()) diff --git a/official/nlp/tasks/sentence_prediction.py b/official/nlp/tasks/sentence_prediction.py index 3e7cb46f3..dbd5b6db1 100644 --- a/official/nlp/tasks/sentence_prediction.py +++ b/official/nlp/tasks/sentence_prediction.py @@ -44,8 +44,7 @@ class ModelConfig(base_config.Config): """A classifier/regressor configuration.""" num_classes: int = 0 use_encoder_pooler: bool = False - encoder: encoders.TransformerEncoderConfig = ( - encoders.TransformerEncoderConfig()) + encoder: encoders.EncoderConfig = encoders.EncoderConfig() @dataclasses.dataclass @@ -85,15 +84,14 @@ class SentencePredictionTask(base_task.Task): if self._hub_module: encoder_network = utils.get_encoder_from_hub(self._hub_module) else: - encoder_network = encoders.instantiate_encoder_from_cfg( - self.task_config.model.encoder) - + encoder_network = encoders.build_encoder(self.task_config.model.encoder) + encoder_cfg = self.task_config.model.encoder.get() # Currently, we only support bert-style sentence prediction finetuning. return models.BertClassifier( network=encoder_network, num_classes=self.task_config.model.num_classes, initializer=tf.keras.initializers.TruncatedNormal( - stddev=self.task_config.model.encoder.initializer_range), + stddev=encoder_cfg.initializer_range), use_encoder_pooler=self.task_config.model.use_encoder_pooler) def build_losses(self, labels, model_outputs, aux_losses=None) -> tf.Tensor: diff --git a/official/nlp/tasks/sentence_prediction_test.py b/official/nlp/tasks/sentence_prediction_test.py index 7357d6636..10cb2a869 100644 --- a/official/nlp/tasks/sentence_prediction_test.py +++ b/official/nlp/tasks/sentence_prediction_test.py @@ -26,6 +26,7 @@ from official.nlp.bert import export_tfhub from official.nlp.configs import bert from official.nlp.configs import encoders from official.nlp.data import sentence_prediction_dataloader +from official.nlp.tasks import masked_lm from official.nlp.tasks import sentence_prediction @@ -68,8 +69,8 @@ class SentencePredictionTaskTest(tf.test.TestCase, parameterized.TestCase): def get_model_config(self, num_classes): return sentence_prediction.ModelConfig( - encoder=encoders.TransformerEncoderConfig( - vocab_size=30522, num_layers=1), + encoder=encoders.EncoderConfig( + bert=encoders.BertEncoderConfig(vocab_size=30522, num_layers=1)), num_classes=num_classes) def _run_task(self, config): @@ -102,14 +103,14 @@ class SentencePredictionTaskTest(tf.test.TestCase, parameterized.TestCase): task.validation_step(next(iterator), model, metrics=metrics) # Saves a checkpoint. - pretrain_cfg = bert.BertPretrainerConfig( - encoder=encoders.TransformerEncoderConfig( - vocab_size=30522, num_layers=1), + pretrain_cfg = bert.PretrainerConfig( + encoder=encoders.EncoderConfig( + bert=encoders.BertEncoderConfig(vocab_size=30522, num_layers=1)), cls_heads=[ bert.ClsHeadConfig( inner_dim=10, num_classes=3, name="next_sentence") ]) - pretrain_model = bert.instantiate_pretrainer_from_cfg(pretrain_cfg) + pretrain_model = masked_lm.MaskedLMTask(None).build_model(pretrain_cfg) ckpt = tf.train.Checkpoint( model=pretrain_model, **pretrain_model.checkpoint_items) ckpt.save(config.init_checkpoint) @@ -136,8 +137,8 @@ class SentencePredictionTaskTest(tf.test.TestCase, parameterized.TestCase): if num_classes == 1: self.assertIsInstance(metrics[0], tf.keras.metrics.MeanSquaredError) else: - self.assertIsInstance( - metrics[0], tf.keras.metrics.SparseCategoricalAccuracy) + self.assertIsInstance(metrics[0], + tf.keras.metrics.SparseCategoricalAccuracy) dataset = task.build_inputs(config.train_data) iterator = iter(dataset) diff --git a/official/nlp/tasks/tagging.py b/official/nlp/tasks/tagging.py index 10b5423c1..ef1859167 100644 --- a/official/nlp/tasks/tagging.py +++ b/official/nlp/tasks/tagging.py @@ -37,8 +37,7 @@ from official.nlp.tasks import utils @dataclasses.dataclass class ModelConfig(base_config.Config): """A base span labeler configuration.""" - encoder: encoders.TransformerEncoderConfig = ( - encoders.TransformerEncoderConfig()) + encoder: encoders.EncoderConfig = encoders.EncoderConfig() head_dropout: float = 0.1 head_initializer_range: float = 0.02 @@ -102,8 +101,7 @@ class TaggingTask(base_task.Task): if self._hub_module: encoder_network = utils.get_encoder_from_hub(self._hub_module) else: - encoder_network = encoders.instantiate_encoder_from_cfg( - self.task_config.model.encoder) + encoder_network = encoders.build_encoder(self.task_config.model.encoder) return models.BertTokenClassifier( network=encoder_network, diff --git a/official/nlp/tasks/tagging_test.py b/official/nlp/tasks/tagging_test.py index a3ea999c4..c45b52556 100644 --- a/official/nlp/tasks/tagging_test.py +++ b/official/nlp/tasks/tagging_test.py @@ -53,8 +53,8 @@ class TaggingTest(tf.test.TestCase): def setUp(self): super(TaggingTest, self).setUp() - self._encoder_config = encoders.TransformerEncoderConfig( - vocab_size=30522, num_layers=1) + self._encoder_config = encoders.EncoderConfig( + bert=encoders.BertEncoderConfig(vocab_size=30522, num_layers=1)) self._train_data_config = tagging_data_loader.TaggingDataConfig( input_path="dummy", seq_length=128, global_batch_size=1) @@ -74,7 +74,7 @@ class TaggingTest(tf.test.TestCase): def test_task(self): # Saves a checkpoint. - encoder = encoders.instantiate_encoder_from_cfg(self._encoder_config) + encoder = encoders.build_encoder(self._encoder_config) ckpt = tf.train.Checkpoint(encoder=encoder) saved_path = ckpt.save(self.get_temp_dir()) -- GitLab From 0edeca544b5c0d65ff0fca3d56b22e2eee7d6919 Mon Sep 17 00:00:00 2001 From: Dan Holtmann-Rice Date: Wed, 5 Aug 2020 13:25:05 -0700 Subject: [PATCH 122/128] Internal change PiperOrigin-RevId: 325088513 --- .../resnet/resnet_runnable.py | 16 ++-- orbit/controller_test.py | 5 +- orbit/standard_runner.py | 74 +++++++++---------- orbit/standard_runner_test.py | 66 +++++++++-------- 4 files changed, 86 insertions(+), 75 deletions(-) diff --git a/official/vision/image_classification/resnet/resnet_runnable.py b/official/vision/image_classification/resnet/resnet_runnable.py index e3b49200f..5b898593e 100644 --- a/official/vision/image_classification/resnet/resnet_runnable.py +++ b/official/vision/image_classification/resnet/resnet_runnable.py @@ -107,9 +107,12 @@ class ResnetRunnable(orbit.StandardTrainer, orbit.StandardEvaluator): .datasets_num_private_threads, dtype=self.dtype, drop_remainder=True) - orbit.StandardTrainer.__init__(self, train_dataset, - flags_obj.use_tf_while_loop, - flags_obj.use_tf_function) + orbit.StandardTrainer.__init__( + self, + train_dataset, + options=orbit.StandardTrainerOptions( + use_tf_while_loop=flags_obj.use_tf_while_loop, + use_tf_function=flags_obj.use_tf_function)) if not flags_obj.skip_eval: eval_dataset = orbit.utils.make_distributed_dataset( self.strategy, @@ -119,8 +122,11 @@ class ResnetRunnable(orbit.StandardTrainer, orbit.StandardEvaluator): batch_size=self.batch_size, parse_record_fn=imagenet_preprocessing.parse_record, dtype=self.dtype) - orbit.StandardEvaluator.__init__(self, eval_dataset, - flags_obj.use_tf_function) + orbit.StandardEvaluator.__init__( + self, + eval_dataset, + options=orbit.StandardEvaluatorOptions( + use_tf_function=flags_obj.use_tf_function)) def train_loop_begin(self): """See base class.""" diff --git a/orbit/controller_test.py b/orbit/controller_test.py index 6751e9025..8472de4fb 100644 --- a/orbit/controller_test.py +++ b/orbit/controller_test.py @@ -221,7 +221,10 @@ class TestTrainerWithSummaries(standard_runner.StandardTrainer): self.strategy.experimental_distribute_datasets_from_function(dataset_fn) ) standard_runner.StandardTrainer.__init__( - self, train_dataset, use_tpu_summary_optimization=True) + self, + train_dataset, + options=standard_runner.StandardTrainerOptions( + use_tpu_summary_optimization=True)) def build_train_dataset(self): return self.strategy.experimental_distribute_datasets_from_function( diff --git a/orbit/standard_runner.py b/orbit/standard_runner.py index 1d37f2cc9..8d3099b38 100644 --- a/orbit/standard_runner.py +++ b/orbit/standard_runner.py @@ -23,20 +23,22 @@ import tensorflow as tf @dataclasses.dataclass(frozen=True) -class TrainerOverrides: - """Advanced overrides for Orbit trainers. +class StandardTrainerOptions: + """Advanced options for `orbit.StandardTrainer`. Attributes: - use_tf_while_loop: A boolean indicates whether to wrap the train step with - a `tf.while_loop`. - use_tf_function: A boolean indicates whether a `tf.function` will be used. - If False, training will run on pure eager mode. - use_tpu_summary_optimization: A boolean indicates whether to enable the - performance optimization for summaries in TPUs. In TPUs, writing - summaries with outside compilation inside train step is slow. If True, - it creates two `tf.function` with two XLA programs: one with summaries - and one without, and run the program with summaries (slow one) only if - necessary. + use_tf_while_loop: A boolean indicating whether to run the training loop + using a `tf.while_loop`. If `True`, `use_tf_function` must also be `True`. + use_tf_function: A boolean indicating whether to apply `tf.function` to the + training loop. This will only affect the body of the loop (involving + `train_step`); `train_loop_begin` and `train_loop_end` will always be run + in eager mode. + use_tpu_summary_optimization: A boolean indicating whether to enable a + performance optimization for summaries in TPUs. Writing summaries + conditionally with outside compilation on TPUs can be extremely slow. If + `True`, this optimization creates two `tf.function`s with two XLA programs + (one with summary calls, and one without). The program with summaries runs + only for one step when summaries should be recorded. """ use_tf_while_loop: bool = True use_tf_function: bool = True @@ -46,39 +48,29 @@ class TrainerOverrides: class StandardTrainer(runner.AbstractTrainer, metaclass=abc.ABCMeta): """Implements the standard functionality of AbstractTrainer APIs.""" - def __init__(self, - train_dataset, - use_tf_while_loop=True, - use_tf_function=True, - use_tpu_summary_optimization=False): + def __init__(self, train_dataset, options: StandardTrainerOptions = None): """Construct a `StandardTrainer` object. Args: train_dataset: A tf.nest-compatible structure of tf.data.Dataset or DistributedDataset. - use_tf_while_loop: A boolean indicates whether to wrap the train step with - a `tf.while_loop`. - use_tf_function: A boolean indicates whether a `tf.function` will be used. - If False, training will run on pure eager mode. - use_tpu_summary_optimization: A boolean indicates whether to enable the - performance optimization for summaries in TPUs. In TPUs, writing - summaries with outside compilation inside train step is slow. If True, - it creates two `tf.function` with two XLA programs: one with summaries - and one without, and run the program with summaries (slow one) only if - necessary. + options: An `orbit.StandardTrainerOptions` instance. """ - if use_tf_while_loop and not use_tf_function: + options = options or StandardTrainerOptions() + if options.use_tf_while_loop and not options.use_tf_function: raise ValueError("`use_tf_while_loop=True` and `use_tf_function=False` " "is not supported") - if use_tpu_summary_optimization and not use_tf_while_loop: + if options.use_tpu_summary_optimization and not options.use_tf_while_loop: raise ValueError("`use_tpu_summary_optimization=True` and " "`use_tf_while_loop=False` is not supported") - self._use_tf_while_loop = use_tf_while_loop - self._use_tf_function = use_tf_function + + self._use_tf_while_loop = options.use_tf_while_loop + self._use_tf_function = options.use_tf_function + self._use_tpu_summary_optimization = options.use_tpu_summary_optimization + self._train_dataset = train_dataset self._train_iter = None self._train_loop_fn = None - self._use_tpu_summary_optimization = use_tpu_summary_optimization def train(self, num_steps: Optional[tf.Tensor]) -> Optional[Dict[Text, tf.Tensor]]: @@ -168,12 +160,14 @@ class StandardTrainer(runner.AbstractTrainer, metaclass=abc.ABCMeta): @dataclasses.dataclass(frozen=True) -class EvaluatorOverrides: - """Advanced overrides for Orbit evaluators. +class StandardEvaluatorOptions: + """Advanced options for the `orbit.StandardEvaluator`. Attributes: - use_tf_function: A boolean indicates whether a `tf.function` will be used. - If False, training will run on pure eager mode. + use_tf_function: A boolean indicating whether to apply `tf.function` to the + training loop. This will only affect the body of the loop (involving + `train_step`); `train_loop_begin` and `train_loop_end` will always be run + in eager mode. """ use_tf_function: bool = True @@ -181,16 +175,16 @@ class EvaluatorOverrides: class StandardEvaluator(runner.AbstractEvaluator, metaclass=abc.ABCMeta): """Implements the standard functionality of AbstractEvaluator APIs.""" - def __init__(self, eval_dataset, use_tf_function=True): + def __init__(self, eval_dataset, options: StandardEvaluatorOptions = None): """Construct a `StandardEvaluator` object. Args: eval_dataset: A tf.nest-compatible structure of tf.data.Dataset or DistributedDataset. - use_tf_function: A boolean indicates whether a `tf.function` will be used. - If False, evaluation will run on pure eager mode. + options: An `orbit.StandardEvaluatorOptions` instance. """ - self._eval_use_tf_function = use_tf_function + options = options or StandardEvaluatorOptions() + self._eval_use_tf_function = options.use_tf_function self._eval_dataset = eval_dataset self._eval_loop_fn = None diff --git a/orbit/standard_runner_test.py b/orbit/standard_runner_test.py index fb98a715d..7adf65bc7 100644 --- a/orbit/standard_runner_test.py +++ b/orbit/standard_runner_test.py @@ -15,6 +15,7 @@ """Tests for orbit.standard_runner.""" from orbit import standard_runner +from orbit import utils import tensorflow as tf @@ -32,46 +33,49 @@ def dataset_fn(input_context=None): return dataset -class TestRunner(standard_runner.StandardTrainer, - standard_runner.StandardEvaluator): - """Implements the training and evaluation APIs for tests.""" +class TestTrainer(standard_runner.StandardTrainer): + """A StandardTrainer subclass for tests.""" - def __init__(self): + def __init__(self, options=None): self.strategy = tf.distribute.get_strategy() - self.global_step = tf.Variable( - 0, - trainable=False, - dtype=tf.int64, - name='global_step', - aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA) - standard_runner.StandardTrainer.__init__(self, train_dataset=None) - standard_runner.StandardEvaluator.__init__(self, eval_dataset=None) + self.global_step = utils.create_global_step() + distribute = self.strategy.experimental_distribute_datasets_from_function + dataset = distribute(dataset_fn) + super().__init__(train_dataset=dataset, options=options) def train_loop_begin(self): - self.train_dataset = ( - self.strategy.experimental_distribute_datasets_from_function(dataset_fn) - ) + self.global_step.assign(0) def train_step(self, iterator): - def _replicated_step(_): + def replica_step(_): self.global_step.assign_add(1) - self.strategy.run(_replicated_step, args=(next(iterator),)) + self.strategy.run(replica_step, args=(next(iterator),)) def train_loop_end(self): return self.global_step.numpy() + +class TestEvaluator(standard_runner.StandardEvaluator): + """A StandardEvaluator subclass for tests.""" + + def __init__(self, options=None): + self.strategy = tf.distribute.get_strategy() + self.global_step = utils.create_global_step() + distribute = self.strategy.experimental_distribute_datasets_from_function + dataset = distribute(dataset_fn) + super().__init__(eval_dataset=dataset, options=options) + def eval_begin(self): - self.eval_dataset = self.strategy.experimental_distribute_datasets_from_function( - dataset_fn) + self.global_step.assign(0) def eval_step(self, iterator): - def _replicated_step(_): + def replica_step(_): self.global_step.assign_add(1) - self.strategy.run(_replicated_step, args=(next(iterator),)) + self.strategy.run(replica_step, args=(next(iterator),)) def eval_end(self): return self.global_step.numpy() @@ -79,15 +83,19 @@ class TestRunner(standard_runner.StandardTrainer, class StandardRunnerTest(tf.test.TestCase): - def test_train(self): - test_runner = TestRunner() - self.assertEqual( - test_runner.train(tf.convert_to_tensor(10, dtype=tf.int32)), 10) + def test_default_trainer(self): + trainer = TestTrainer() + self.assertEqual(trainer.train(tf.constant(10)), 10) + + def test_trainer_with_tpu_summary_optimization(self): + options = standard_runner.StandardTrainerOptions( + use_tpu_summary_optimization=True) + trainer = TestTrainer(options) + self.assertEqual(trainer.train(tf.constant(10)), 10) - def test_eval(self): - test_runner = TestRunner() - self.assertEqual( - test_runner.evaluate(tf.convert_to_tensor(10, dtype=tf.int32)), 10) + def test_default_evaluator(self): + evaluator = TestEvaluator() + self.assertEqual(evaluator.evaluate(tf.constant(10)), 10) if __name__ == '__main__': -- GitLab From b3677ae2936d0457cb8c6213ea83bd89610eb3f0 Mon Sep 17 00:00:00 2001 From: Allen Wang Date: Wed, 5 Aug 2020 13:51:03 -0700 Subject: [PATCH 123/128] Remove piecewise decay with warmup and recompose it with stepwise + warmupdecay. PiperOrigin-RevId: 325093611 --- .../configs/examples/resnet/imagenet/gpu.yaml | 2 - .../configs/examples/resnet/imagenet/tpu.yaml | 2 - .../image_classification/learning_rate.py | 74 ++++--------------- .../learning_rate_test.py | 38 ---------- .../image_classification/optimizer_factory.py | 31 ++++---- .../optimizer_factory_test.py | 1 - .../resnet/resnet_config.py | 23 +++--- 7 files changed, 38 insertions(+), 133 deletions(-) diff --git a/official/vision/image_classification/configs/examples/resnet/imagenet/gpu.yaml b/official/vision/image_classification/configs/examples/resnet/imagenet/gpu.yaml index 56844b81d..2037d6b5d 100644 --- a/official/vision/image_classification/configs/examples/resnet/imagenet/gpu.yaml +++ b/official/vision/image_classification/configs/examples/resnet/imagenet/gpu.yaml @@ -40,8 +40,6 @@ model: momentum: 0.9 decay: 0.9 epsilon: 0.001 - learning_rate: - name: 'piecewise_constant_with_warmup' loss: label_smoothing: 0.1 train: diff --git a/official/vision/image_classification/configs/examples/resnet/imagenet/tpu.yaml b/official/vision/image_classification/configs/examples/resnet/imagenet/tpu.yaml index ae975c162..0a3030333 100644 --- a/official/vision/image_classification/configs/examples/resnet/imagenet/tpu.yaml +++ b/official/vision/image_classification/configs/examples/resnet/imagenet/tpu.yaml @@ -43,8 +43,6 @@ model: epsilon: 0.001 moving_average_decay: 0. lookahead: False - learning_rate: - name: 'piecewise_constant_with_warmup' loss: label_smoothing: 0.1 train: diff --git a/official/vision/image_classification/learning_rate.py b/official/vision/image_classification/learning_rate.py index 1c78b04bc..67d6ec9f6 100644 --- a/official/vision/image_classification/learning_rate.py +++ b/official/vision/image_classification/learning_rate.py @@ -18,7 +18,7 @@ from __future__ import absolute_import from __future__ import division from __future__ import print_function -from typing import Any, List, Mapping +from typing import Any, Mapping, Optional import numpy as np import tensorflow as tf @@ -32,23 +32,33 @@ class WarmupDecaySchedule(tf.keras.optimizers.schedules.LearningRateSchedule): def __init__( self, lr_schedule: tf.keras.optimizers.schedules.LearningRateSchedule, - warmup_steps: int): + warmup_steps: int, + warmup_lr: Optional[float] = None): """Add warmup decay to a learning rate schedule. Args: lr_schedule: base learning rate scheduler warmup_steps: number of warmup steps + warmup_lr: an optional field for the final warmup learning rate. This + should be provided if the base `lr_schedule` does not contain this + field. """ super(WarmupDecaySchedule, self).__init__() self._lr_schedule = lr_schedule self._warmup_steps = warmup_steps + self._warmup_lr = warmup_lr def __call__(self, step: int): lr = self._lr_schedule(step) if self._warmup_steps: - initial_learning_rate = tf.convert_to_tensor( - self._lr_schedule.initial_learning_rate, name="initial_learning_rate") + if self._warmup_lr is not None: + initial_learning_rate = tf.convert_to_tensor( + self._warmup_lr, name="initial_learning_rate") + else: + initial_learning_rate = tf.convert_to_tensor( + self._lr_schedule.initial_learning_rate, + name="initial_learning_rate") dtype = initial_learning_rate.dtype global_step_recomp = tf.cast(step, dtype) warmup_steps = tf.cast(self._warmup_steps, dtype) @@ -62,65 +72,11 @@ class WarmupDecaySchedule(tf.keras.optimizers.schedules.LearningRateSchedule): config = self._lr_schedule.get_config() config.update({ "warmup_steps": self._warmup_steps, + "warmup_lr": self._warmup_lr, }) return config -# TODO(b/149030439) - refactor this with -# tf.keras.optimizers.schedules.PiecewiseConstantDecay + WarmupDecaySchedule. -class PiecewiseConstantDecayWithWarmup( - tf.keras.optimizers.schedules.LearningRateSchedule): - """Piecewise constant decay with warmup schedule.""" - - def __init__(self, - batch_size: int, - epoch_size: int, - warmup_epochs: int, - boundaries: List[int], - multipliers: List[float]): - """Piecewise constant decay with warmup. - - Args: - batch_size: The training batch size used in the experiment. - epoch_size: The size of an epoch, or the number of examples in an epoch. - warmup_epochs: The number of warmup epochs to apply. - boundaries: The list of floats with strictly increasing entries. - multipliers: The list of multipliers/learning rates to use for the - piecewise portion. The length must be 1 less than that of boundaries. - - """ - super(PiecewiseConstantDecayWithWarmup, self).__init__() - if len(boundaries) != len(multipliers) - 1: - raise ValueError("The length of boundaries must be 1 less than the " - "length of multipliers") - - base_lr_batch_size = 256 - steps_per_epoch = epoch_size // batch_size - - self._rescaled_lr = BASE_LEARNING_RATE * batch_size / base_lr_batch_size - self._step_boundaries = [float(steps_per_epoch) * x for x in boundaries] - self._lr_values = [self._rescaled_lr * m for m in multipliers] - self._warmup_steps = warmup_epochs * steps_per_epoch - - def __call__(self, step: int): - """Compute learning rate at given step.""" - def warmup_lr(): - return self._rescaled_lr * ( - step / tf.cast(self._warmup_steps, tf.float32)) - def piecewise_lr(): - return tf.compat.v1.train.piecewise_constant( - tf.cast(step, tf.float32), self._step_boundaries, self._lr_values) - return tf.cond(step < self._warmup_steps, warmup_lr, piecewise_lr) - - def get_config(self) -> Mapping[str, Any]: - return { - "rescaled_lr": self._rescaled_lr, - "step_boundaries": self._step_boundaries, - "lr_values": self._lr_values, - "warmup_steps": self._warmup_steps, - } - - class CosineDecayWithWarmup(tf.keras.optimizers.schedules.LearningRateSchedule): """Class to generate learning rate tensor.""" diff --git a/official/vision/image_classification/learning_rate_test.py b/official/vision/image_classification/learning_rate_test.py index 272d2935f..b35b8904c 100644 --- a/official/vision/image_classification/learning_rate_test.py +++ b/official/vision/image_classification/learning_rate_test.py @@ -46,44 +46,6 @@ class LearningRateTests(tf.test.TestCase): self.assertAllClose(self.evaluate(lr(step)), step / warmup_steps * initial_lr) - def test_piecewise_constant_decay_with_warmup(self): - """Basic computational test for piecewise constant decay with warmup.""" - boundaries = [1, 2, 3] - warmup_epochs = boundaries[0] - learning_rate_multipliers = [1.0, 0.1, 0.001] - expected_keys = [ - 'rescaled_lr', 'step_boundaries', 'lr_values', 'warmup_steps', - ] - - expected_lrs = [0.0, 0.1, 0.1] - - lr = learning_rate.PiecewiseConstantDecayWithWarmup( - batch_size=256, - epoch_size=256, - warmup_epochs=warmup_epochs, - boundaries=boundaries[1:], - multipliers=learning_rate_multipliers) - - step = 0 - - config = lr.get_config() - self.assertAllInSet(list(config.keys()), expected_keys) - - for boundary, expected_lr in zip(boundaries, expected_lrs): - for _ in range(step, boundary): - self.assertAllClose(self.evaluate(lr(step)), expected_lr) - step += 1 - - def test_piecewise_constant_decay_invalid_boundaries(self): - with self.assertRaisesRegex(ValueError, - 'The length of boundaries must be 1 less '): - learning_rate.PiecewiseConstantDecayWithWarmup( - batch_size=256, - epoch_size=256, - warmup_epochs=1, - boundaries=[1, 2], - multipliers=[1, 2]) - def test_cosine_decay_with_warmup(self): """Basic computational test for cosine decay with warmup.""" expected_lrs = [0.0, 0.1, 0.05, 0.0] diff --git a/official/vision/image_classification/optimizer_factory.py b/official/vision/image_classification/optimizer_factory.py index 29b19e22d..de619e0b2 100644 --- a/official/vision/image_classification/optimizer_factory.py +++ b/official/vision/image_classification/optimizer_factory.py @@ -370,29 +370,26 @@ def build_learning_rate(params: base_configs.LearningRateConfig, decay_steps=decay_steps, decay_rate=decay_rate, staircase=params.staircase) - elif decay_type == 'piecewise_constant_with_warmup': - logging.info('Using Piecewise constant decay with warmup. ' - 'Parameters: batch_size: %d, epoch_size: %d, ' - 'warmup_epochs: %d, boundaries: %s, multipliers: %s', - batch_size, params.examples_per_epoch, - params.warmup_epochs, params.boundaries, - params.multipliers) - lr = learning_rate.PiecewiseConstantDecayWithWarmup( - batch_size=batch_size, - epoch_size=params.examples_per_epoch, - warmup_epochs=params.warmup_epochs, - boundaries=params.boundaries, - multipliers=params.multipliers) + elif decay_type == 'stepwise': + steps_per_epoch = params.examples_per_epoch // batch_size + boundaries = [boundary * steps_per_epoch for boundary in params.boundaries] + multipliers = [batch_size * multiplier for multiplier in params.multipliers] + logging.info('Using stepwise learning rate. Parameters: ' + 'boundaries: %s, values: %s', + boundaries, multipliers) + lr = tf.keras.optimizers.schedules.PiecewiseConstantDecay( + boundaries=boundaries, + values=multipliers) elif decay_type == 'cosine_with_warmup': lr = learning_rate.CosineDecayWithWarmup( batch_size=batch_size, total_steps=train_epochs * train_steps, warmup_steps=warmup_steps) if warmup_steps > 0: - if decay_type not in [ - 'piecewise_constant_with_warmup', 'cosine_with_warmup' - ]: + if decay_type not in ['cosine_with_warmup']: logging.info('Applying %d warmup steps to the learning rate', warmup_steps) - lr = learning_rate.WarmupDecaySchedule(lr, warmup_steps) + lr = learning_rate.WarmupDecaySchedule(lr, + warmup_steps, + warmup_lr=base_lr) return lr diff --git a/official/vision/image_classification/optimizer_factory_test.py b/official/vision/image_classification/optimizer_factory_test.py index a62072848..422031173 100644 --- a/official/vision/image_classification/optimizer_factory_test.py +++ b/official/vision/image_classification/optimizer_factory_test.py @@ -93,7 +93,6 @@ class OptimizerFactoryTest(tf.test.TestCase, parameterized.TestCase): @parameterized.named_parameters( ('exponential', 'exponential'), - ('piecewise_constant_with_warmup', 'piecewise_constant_with_warmup'), ('cosine_with_warmup', 'cosine_with_warmup')) def test_learning_rate_with_decay_and_warmup(self, lr_decay_type): """Basic smoke test for syntax.""" diff --git a/official/vision/image_classification/resnet/resnet_config.py b/official/vision/image_classification/resnet/resnet_config.py index a746257f0..4fb6ddf79 100644 --- a/official/vision/image_classification/resnet/resnet_config.py +++ b/official/vision/image_classification/resnet/resnet_config.py @@ -18,22 +18,12 @@ from __future__ import absolute_import from __future__ import division from __future__ import print_function -from typing import Any, Mapping - import dataclasses from official.modeling.hyperparams import base_config from official.vision.image_classification.configs import base_configs -_RESNET_LR_SCHEDULE = [ # (multiplier, epoch to start) tuples - (1.0, 5), (0.1, 30), (0.01, 60), (0.001, 80) -] -_RESNET_LR_BOUNDARIES = list(p[1] for p in _RESNET_LR_SCHEDULE[1:]) -_RESNET_LR_MULTIPLIERS = list(p[0] for p in _RESNET_LR_SCHEDULE) -_RESNET_LR_WARMUP_EPOCHS = _RESNET_LR_SCHEDULE[0][1] - - @dataclasses.dataclass class ResNetModelConfig(base_configs.ModelConfig): """Configuration for the ResNet model.""" @@ -56,8 +46,13 @@ class ResNetModelConfig(base_configs.ModelConfig): moving_average_decay=None) learning_rate: base_configs.LearningRateConfig = ( base_configs.LearningRateConfig( - name='piecewise_constant_with_warmup', + name='stepwise', + initial_lr=0.1, examples_per_epoch=1281167, - warmup_epochs=_RESNET_LR_WARMUP_EPOCHS, - boundaries=_RESNET_LR_BOUNDARIES, - multipliers=_RESNET_LR_MULTIPLIERS)) + boundaries=[30, 60, 80], + warmup_epochs=5, + scale_by_batch_size=1. / 256., + multipliers=[0.1 / 256, + 0.01 / 256, + 0.001 / 256, + 0.0001 / 256])) -- GitLab From f8d6d99d1d8e9311e4cc062d353f4ee50cea61bd Mon Sep 17 00:00:00 2001 From: "A. Unique TensorFlower" Date: Wed, 5 Aug 2020 15:31:53 -0700 Subject: [PATCH 124/128] Update orbit.Controller: do not write training summary when input summary_dir is None. PiperOrigin-RevId: 325115012 --- .../resnet/resnet_ctl_imagenet_main.py | 1 + orbit/controller.py | 10 ++-- orbit/controller_test.py | 53 ++++++++++++++++++- 3 files changed, 57 insertions(+), 7 deletions(-) diff --git a/official/vision/image_classification/resnet/resnet_ctl_imagenet_main.py b/official/vision/image_classification/resnet/resnet_ctl_imagenet_main.py index ca0ccd9fd..7d77c2c2b 100644 --- a/official/vision/image_classification/resnet/resnet_ctl_imagenet_main.py +++ b/official/vision/image_classification/resnet/resnet_ctl_imagenet_main.py @@ -167,6 +167,7 @@ def run(flags_obj): steps_per_loop=steps_per_loop, checkpoint_manager=checkpoint_manager, summary_interval=summary_interval, + summary_dir=flags_obj.model_dir, eval_summary_dir=os.path.join(flags_obj.model_dir, 'eval')) time_callback.on_train_begin() diff --git a/orbit/controller.py b/orbit/controller.py index 3370e556c..aca78c0ff 100644 --- a/orbit/controller.py +++ b/orbit/controller.py @@ -71,9 +71,11 @@ class Controller: `trainer.train` function will always be enabled. If set, the value should be divisible by steps_per_loop. summary_dir: The directory to restore and write checkpoints and summaries. - If None, it will be set to `checkpoint_manager.directory`. + For example, You can set it to `checkpoint_manager.directory`. + If None, it will not write training summarizes. eval_summary_dir: The directory to write eval summaries. If None, it will - be set to `summary_dir`. + be set to `summary_dir`. If both `summary_dir` and `eval_summary_dir` + are None, it will not write evaluation summarizes. Raises: ValueError: If both `trainer` and `evaluator` are None. @@ -108,9 +110,6 @@ class Controller: self.global_step = global_step self.checkpoint_manager = checkpoint_manager - if summary_dir is None and checkpoint_manager: - summary_dir = checkpoint_manager.directory - if self.trainer is not None: self.step_timer = None self.steps_per_loop = steps_per_loop @@ -118,7 +117,6 @@ class Controller: self.summary_manager = utils.SummaryManager( summary_dir, tf.summary.scalar, global_step=self.global_step) - eval_summary_writer = None if self.evaluator is not None: eval_summary_dir = eval_summary_dir or summary_dir if eval_summary_dir == summary_dir and self.trainer is not None: diff --git a/orbit/controller_test.py b/orbit/controller_test.py index 8472de4fb..d0b5ea9ac 100644 --- a/orbit/controller_test.py +++ b/orbit/controller_test.py @@ -294,6 +294,56 @@ class ControllerTest(tf.test.TestCase, parameterized.TestCase): train_steps=10, eval_steps=2, eval_interval=6) self.assertEqual(test_runner.global_step, 10) + def test_has_checkpoint_no_summaries(self): + test_runner = TestRunner() + # Has checkpoint, but no summary directories. + checkpoint = tf.train.Checkpoint(model=test_runner.model) + checkpoint_manager = tf.train.CheckpointManager( + checkpoint, + self.model_dir, + max_to_keep=None, + step_counter=test_runner.global_step) + test_controller = controller.Controller( + trainer=test_runner, + evaluator=test_runner, + global_step=test_runner.global_step, + checkpoint_manager=checkpoint_manager, + steps_per_loop=2) + test_controller.train_and_evaluate( + train_steps=10, eval_steps=2, eval_interval=6) + self.assertEqual(test_runner.global_step, 10) + + # No summaries are saved. + self.assertEmpty(tf.io.gfile.glob( + os.path.join(checkpoint_manager.directory, "events.*"))) + + def test_has_checkpoint_eval_summary_only(self): + test_runner = TestRunner() + # Has checkpoint, but no summary directories. + checkpoint = tf.train.Checkpoint(model=test_runner.model) + checkpoint_manager = tf.train.CheckpointManager( + checkpoint, + self.model_dir, + max_to_keep=None, + step_counter=test_runner.global_step) + test_controller = controller.Controller( + trainer=test_runner, + evaluator=test_runner, + global_step=test_runner.global_step, + checkpoint_manager=checkpoint_manager, + eval_summary_dir=os.path.join(self.model_dir, "summaries/eval"), + steps_per_loop=2) + test_controller.train_and_evaluate( + train_steps=10, eval_steps=2, eval_interval=6) + self.assertEqual(test_runner.global_step, 10) + + # Training summaries are not saved. + self.assertEmpty(tf.io.gfile.glob( + os.path.join(checkpoint_manager.directory, "events.*"))) + # Evaluation summaries are saved. + self.assertNotEmpty(tf.io.gfile.glob( + os.path.join(self.model_dir, "summaries/eval/events.*"))) + @parameterized.named_parameters(("return_numpy", True), ("return_tensor", False)) def test_train_and_evaluate(self, return_numpy): @@ -612,7 +662,8 @@ class ControllerTest(tf.test.TestCase, parameterized.TestCase): evaluator=test_runner, global_step=test_runner.global_step, steps_per_loop=10, - checkpoint_manager=checkpoint_manager) + checkpoint_manager=checkpoint_manager, + summary_dir=self.model_dir) test_controller.train_and_evaluate( train_steps=10, eval_steps=2, eval_interval=5) -- GitLab From cbbba228150140a14eea2361868bf66700feff4b Mon Sep 17 00:00:00 2001 From: Hongkun Yu Date: Thu, 6 Aug 2020 11:41:36 -0700 Subject: [PATCH 125/128] Move trainers to core/ Move mock_task to utils/testing/ PiperOrigin-RevId: 325275356 --- official/core/base_task_test.py | 100 ++++++++++++ official/core/base_trainer.py | 239 ++++++++++++++++++++++++++++ official/core/base_trainer_test.py | 107 +++++++++++++ official/utils/testing/mock_task.py | 95 +++++++++++ 4 files changed, 541 insertions(+) create mode 100644 official/core/base_task_test.py create mode 100644 official/core/base_trainer.py create mode 100644 official/core/base_trainer_test.py create mode 100644 official/utils/testing/mock_task.py diff --git a/official/core/base_task_test.py b/official/core/base_task_test.py new file mode 100644 index 000000000..1a407c5ca --- /dev/null +++ b/official/core/base_task_test.py @@ -0,0 +1,100 @@ +# Lint as: python3 +# Copyright 2020 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Tests for tensorflow_models.core.base_task.""" + +import functools + +from absl.testing import parameterized +import tensorflow as tf + +from tensorflow.python.distribute import combinations +from tensorflow.python.distribute import strategy_combinations +from official.utils.testing import mock_task + + +def all_strategy_combinations(): + return combinations.combine( + distribution=[ + strategy_combinations.default_strategy, + strategy_combinations.tpu_strategy, + strategy_combinations.one_device_strategy_gpu, + ], + mode='eager', + ) + + +class TaskKerasTest(tf.test.TestCase, parameterized.TestCase): + + @combinations.generate(all_strategy_combinations()) + def test_task_with_step_override(self, distribution): + with distribution.scope(): + task = mock_task.MockTask() + model = task.build_model() + model = task.compile_model( + model, + optimizer=tf.keras.optimizers.SGD(learning_rate=1e-3), + metrics=task.build_metrics(), + train_step=task.train_step, + validation_step=task.validation_step) + + dataset = task.build_inputs(params=None) + logs = model.fit(dataset, epochs=1, steps_per_epoch=2) + self.assertIn('loss', logs.history) + self.assertIn('acc', logs.history) + + # Without specifying metrics through compile. + with distribution.scope(): + train_metrics = task.build_metrics(training=True) + val_metrics = task.build_metrics(training=False) + model = task.build_model() + model = task.compile_model( + model, + optimizer=tf.keras.optimizers.SGD(learning_rate=1e-3), + train_step=functools.partial(task.train_step, metrics=train_metrics), + validation_step=functools.partial( + task.validation_step, metrics=val_metrics)) + logs = model.fit(dataset, epochs=1, steps_per_epoch=2) + self.assertIn('loss', logs.history) + self.assertIn('acc', logs.history) + + def test_task_with_fit(self): + task = mock_task.MockTask() + model = task.build_model() + model = task.compile_model( + model, + optimizer=tf.keras.optimizers.SGD(learning_rate=1e-3), + loss=tf.keras.losses.CategoricalCrossentropy(), + metrics=task.build_metrics()) + dataset = task.build_inputs(params=None) + logs = model.fit(dataset, epochs=1, steps_per_epoch=2) + self.assertIn('loss', logs.history) + self.assertIn('acc', logs.history) + self.assertLen(model.evaluate(dataset, steps=1), 2) + + def test_task_invalid_compile(self): + task = mock_task.MockTask() + model = task.build_model() + with self.assertRaises(ValueError): + _ = task.compile_model( + model, + optimizer=tf.keras.optimizers.SGD(learning_rate=1e-3), + loss=tf.keras.losses.CategoricalCrossentropy(), + metrics=task.build_metrics(), + train_step=task.train_step) + + +if __name__ == '__main__': + tf.test.main() diff --git a/official/core/base_trainer.py b/official/core/base_trainer.py new file mode 100644 index 000000000..5e70abbff --- /dev/null +++ b/official/core/base_trainer.py @@ -0,0 +1,239 @@ +# Lint as: python3 +# Copyright 2020 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Standard Trainer implementation. + +The base trainer implements the Orbit `StandardTrainable` and +`StandardEvaluable` interfaces. Trainers inside this project should be +interchangable and independent on model architectures and tasks. +""" +import gin +import orbit +import tensorflow as tf + +from official.core import base_task +from official.modeling import optimization +from official.modeling import performance +from official.modeling.hyperparams import config_definitions + + +ExperimentConfig = config_definitions.ExperimentConfig + + +@gin.configurable +class Trainer(orbit.StandardTrainer, orbit.StandardEvaluator): + """Implements the common trainer shared for TensorFlow models.""" + + def __init__(self, + config: ExperimentConfig, + task: base_task.Task, + train: bool = True, + evaluate: bool = True, + model=None, + optimizer=None): + """Initialize common trainer for TensorFlow models. + + Args: + config: An `ExperimentConfig` instance specifying experiment config. + task: A base_task.Task instance. + train: bool, whether or not this trainer will be used for training. + default to True. + evaluate: bool, whether or not this trainer will be used for evaluation. + default to True. + model: tf.keras.Model instance. If provided, it will be used instead + of building model using task.build_model(). Default to None. + optimizer: tf.keras.optimizers.Optimizer instance. If provided, it will + used instead of the optimizer from config. Default to None. + """ + # Gets the current distribution strategy. If not inside any strategy scope, + # it gets a single-replica no-op strategy. + self._strategy = tf.distribute.get_strategy() + self._config = config + self._task = task + + self._model = model or task.build_model() + + if optimizer is None: + opt_factory = optimization.OptimizerFactory( + config.trainer.optimizer_config) + self._optimizer = opt_factory.build_optimizer( + opt_factory.build_learning_rate()) + else: + self._optimizer = optimizer + + # Configuring optimizer when loss_scale is set in runtime config. This helps + # avoiding overflow/underflow for float16 computations. + if config.runtime.loss_scale: + self._optimizer = performance.configure_optimizer( + self._optimizer, + use_float16=config.runtime.mixed_precision_dtype == 'float16', + loss_scale=config.runtime.loss_scale) + + # global_step increases by 1 after each training iteration. + # We should have global_step.numpy() == self.optimizer.iterations.numpy() + # when there is only 1 optimizer. + self._global_step = orbit.utils.create_global_step() + if hasattr(self.model, 'checkpoint_items'): + checkpoint_items = self.model.checkpoint_items + else: + checkpoint_items = {} + self._checkpoint = tf.train.Checkpoint( + global_step=self.global_step, model=self.model, + optimizer=self.optimizer, **checkpoint_items) + + self._train_loss = tf.keras.metrics.Mean('training_loss', dtype=tf.float32) + self._validation_loss = tf.keras.metrics.Mean( + 'validation_loss', dtype=tf.float32) + self._train_metrics = self.task.build_metrics( + training=True) + self.model.metrics + self._validation_metrics = self.task.build_metrics( + training=False) + self.model.metrics + + if train: + train_dataset = orbit.utils.make_distributed_dataset( + self.strategy, self.task.build_inputs, self.config.task.train_data) + orbit.StandardTrainer.__init__( + self, + train_dataset, + options=orbit.StandardTrainerOptions( + use_tf_while_loop=config.trainer.train_tf_while_loop, + use_tf_function=config.trainer.train_tf_function, + use_tpu_summary_optimization=config.trainer.allow_tpu_summary)) + + if evaluate: + eval_dataset = orbit.utils.make_distributed_dataset( + self.strategy, self.task.build_inputs, + self.config.task.validation_data) + orbit.StandardEvaluator.__init__( + self, + eval_dataset, + options=orbit.StandardEvaluatorOptions( + use_tf_function=config.trainer.eval_tf_function)) + + @property + def strategy(self): + return self._strategy + + @property + def config(self): + return self._config + + @property + def task(self): + return self._task + + @property + def model(self): + return self._model + + @property + def optimizer(self): + return self._optimizer + + @property + def global_step(self): + return self._global_step + + @property + def train_loss(self): + """Accesses the training loss metric object.""" + return self._train_loss + + @property + def validation_loss(self): + """Accesses the validation loss metric object.""" + return self._validation_loss + + @property + def train_metrics(self): + """Accesses all training metric objects.""" + return self._train_metrics + + @property + def validation_metrics(self): + """Accesses all validation metric metric objects.""" + return self._validation_metrics + + def initialize(self): + """A callback function. + + This function will be called when no checkpoint found for the model. + If there is a checkpoint, the checkpoint will be loaded and this function + will not be called. Tasks may use this callback function to load a + pretrained checkpoint, saved under a directory other than the model_dir. + """ + self.task.initialize(self.model) + + @property + def checkpoint(self): + """Accesses the training checkpoint.""" + return self._checkpoint + + def train_loop_end(self): + """See base class.""" + logs = {} + for metric in self.train_metrics + [self.train_loss]: + logs[metric.name] = metric.result() + metric.reset_states() + if callable(self.optimizer.learning_rate): + logs['learning_rate'] = self.optimizer.learning_rate(self.global_step) + else: + logs['learning_rate'] = self.optimizer.learning_rate + return logs + + def train_step(self, iterator): + """See base class.""" + + def step_fn(inputs): + logs = self.task.train_step( + inputs, + model=self.model, + optimizer=self.optimizer, + metrics=self.train_metrics) + self._train_loss.update_state(logs[self.task.loss]) + self.global_step.assign_add(1) + + self.strategy.run(step_fn, args=(next(iterator),)) + + def eval_begin(self): + """Sets up metrics.""" + for metric in self.validation_metrics + [self.validation_loss]: + metric.reset_states() + + def eval_step(self, iterator): + """See base class.""" + + def step_fn(inputs): + logs = self.task.validation_step( + inputs, model=self.model, metrics=self.validation_metrics) + self._validation_loss.update_state(logs[self.task.loss]) + return logs + + distributed_outputs = self.strategy.run(step_fn, args=(next(iterator),)) + return tf.nest.map_structure(self.strategy.experimental_local_results, + distributed_outputs) + + def eval_end(self, aggregated_logs=None): + """Processes evaluation results.""" + logs = {} + for metric in self.validation_metrics + [self.validation_loss]: + logs[metric.name] = metric.result() + if aggregated_logs: + metrics = self.task.reduce_aggregated_logs(aggregated_logs) + logs.update(metrics) + return logs + + def eval_reduce(self, state=None, step_outputs=None): + return self.task.aggregate_logs(state, step_outputs) diff --git a/official/core/base_trainer_test.py b/official/core/base_trainer_test.py new file mode 100644 index 000000000..366da4eea --- /dev/null +++ b/official/core/base_trainer_test.py @@ -0,0 +1,107 @@ +# Lint as: python3 +# Copyright 2020 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Tests for tensorflow_models.core.trainers.trainer.""" +# pylint: disable=g-direct-tensorflow-import +from absl.testing import parameterized +import tensorflow as tf + +from tensorflow.python.distribute import combinations +from tensorflow.python.distribute import strategy_combinations +from official.core import base_trainer as trainer_lib +from official.modeling.hyperparams import config_definitions as cfg +from official.utils.testing import mock_task + + +def all_strategy_combinations(): + return combinations.combine( + distribution=[ + strategy_combinations.default_strategy, + strategy_combinations.tpu_strategy, + strategy_combinations.one_device_strategy_gpu, + ], + mode='eager', + ) + + +class TrainerTest(tf.test.TestCase, parameterized.TestCase): + + def setUp(self): + super().setUp() + self._config = cfg.ExperimentConfig( + trainer=cfg.TrainerConfig( + optimizer_config=cfg.OptimizationConfig( + {'optimizer': { + 'type': 'sgd' + }, + 'learning_rate': { + 'type': 'constant' + }}))) + + def create_test_trainer(self): + task = mock_task.MockTask() + trainer = trainer_lib.Trainer(self._config, task) + return trainer + + @combinations.generate(all_strategy_combinations()) + def test_trainer_train(self, distribution): + with distribution.scope(): + trainer = self.create_test_trainer() + logs = trainer.train(tf.convert_to_tensor(5, dtype=tf.int32)) + self.assertIn('training_loss', logs) + self.assertIn('learning_rate', logs) + + @combinations.generate(all_strategy_combinations()) + def test_trainer_validate(self, distribution): + with distribution.scope(): + trainer = self.create_test_trainer() + logs = trainer.evaluate(tf.convert_to_tensor(5, dtype=tf.int32)) + self.assertIn('validation_loss', logs) + self.assertEqual(logs['acc'], 5. * distribution.num_replicas_in_sync) + + @combinations.generate( + combinations.combine( + mixed_precision_dtype=['float32', 'bfloat16', 'float16'], + loss_scale=[None, 'dynamic', 128, 256], + )) + def test_configure_optimizer(self, mixed_precision_dtype, loss_scale): + config = cfg.ExperimentConfig( + runtime=cfg.RuntimeConfig( + mixed_precision_dtype=mixed_precision_dtype, loss_scale=loss_scale), + trainer=cfg.TrainerConfig( + optimizer_config=cfg.OptimizationConfig( + {'optimizer': { + 'type': 'sgd' + }, + 'learning_rate': { + 'type': 'constant' + }}))) + task = mock_task.MockTask() + trainer = trainer_lib.Trainer(config, task) + if mixed_precision_dtype != 'float16': + self.assertIsInstance(trainer.optimizer, tf.keras.optimizers.SGD) + elif mixed_precision_dtype == 'float16' and loss_scale is None: + self.assertIsInstance(trainer.optimizer, tf.keras.optimizers.SGD) + else: + self.assertIsInstance( + trainer.optimizer, + tf.keras.mixed_precision.experimental.LossScaleOptimizer) + + metrics = trainer.train(tf.convert_to_tensor(5, dtype=tf.int32)) + self.assertIn('training_loss', metrics) + + +if __name__ == '__main__': + tf.test.main() diff --git a/official/utils/testing/mock_task.py b/official/utils/testing/mock_task.py new file mode 100644 index 000000000..5b4b65c16 --- /dev/null +++ b/official/utils/testing/mock_task.py @@ -0,0 +1,95 @@ +# Lint as: python3 +# Copyright 2020 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Mock task for testing.""" + +import dataclasses +import numpy as np +import tensorflow as tf + +from official.core import base_task +from official.core import exp_factory +from official.core import task_factory +from official.modeling.hyperparams import config_definitions as cfg + + +class MockModel(tf.keras.Model): + + def __init__(self, network): + super().__init__() + self.network = network + + def call(self, inputs): + outputs = self.network(inputs) + self.add_loss(tf.reduce_mean(outputs)) + return outputs + + +@dataclasses.dataclass +class MockTaskConfig(cfg.TaskConfig): + pass + + +@task_factory.register_task_cls(MockTaskConfig) +class MockTask(base_task.Task): + """Mock task object for testing.""" + + def __init__(self, params=None, logging_dir=None): + super().__init__(params=params, logging_dir=logging_dir) + + def build_model(self, *arg, **kwargs): + inputs = tf.keras.layers.Input(shape=(2,), name="random", dtype=tf.float32) + outputs = tf.keras.layers.Dense(1)(inputs) + network = tf.keras.Model(inputs=inputs, outputs=outputs) + return MockModel(network) + + def build_metrics(self, training: bool = True): + del training + return [tf.keras.metrics.Accuracy(name="acc")] + + def build_inputs(self, params): + + def generate_data(_): + x = tf.zeros(shape=(2,), dtype=tf.float32) + label = tf.zeros([1], dtype=tf.int32) + return x, label + + dataset = tf.data.Dataset.range(1) + dataset = dataset.repeat() + dataset = dataset.map( + generate_data, num_parallel_calls=tf.data.experimental.AUTOTUNE) + return dataset.prefetch(buffer_size=1).batch(2, drop_remainder=True) + + def aggregate_logs(self, state, step_outputs): + if state is None: + state = {} + for key, value in step_outputs.items(): + if key not in state: + state[key] = [] + state[key].append( + np.concatenate([np.expand_dims(v.numpy(), axis=0) for v in value])) + return state + + def reduce_aggregated_logs(self, aggregated_logs): + for k, v in aggregated_logs.items(): + aggregated_logs[k] = np.sum(np.stack(v, axis=0)) + return aggregated_logs + + +@exp_factory.register_config_factory("mock") +def mock_experiment() -> cfg.ExperimentConfig: + config = cfg.ExperimentConfig( + task=MockTaskConfig(), trainer=cfg.TrainerConfig()) + return config -- GitLab From 94a2302adce96c316d31c820df721a6bba1cc609 Mon Sep 17 00:00:00 2001 From: Vivek Rathod Date: Thu, 6 Aug 2020 11:43:52 -0700 Subject: [PATCH 126/128] Load model in `setup` method to only do it once per thread/worker. This is the likely reason for jobs that take forever. PiperOrigin-RevId: 325275789 --- .../dataset_tools/context_rcnn/generate_detection_data.py | 2 +- .../context_rcnn/generate_detection_data_tf2_test.py | 2 +- .../dataset_tools/context_rcnn/generate_embedding_data.py | 2 +- .../context_rcnn/generate_embedding_data_tf2_test.py | 6 +++--- 4 files changed, 6 insertions(+), 6 deletions(-) diff --git a/research/object_detection/dataset_tools/context_rcnn/generate_detection_data.py b/research/object_detection/dataset_tools/context_rcnn/generate_detection_data.py index eb04cc8cd..c82687380 100644 --- a/research/object_detection/dataset_tools/context_rcnn/generate_detection_data.py +++ b/research/object_detection/dataset_tools/context_rcnn/generate_detection_data.py @@ -78,7 +78,7 @@ class GenerateDetectionDataFn(beam.DoFn): self._num_examples_processed = beam.metrics.Metrics.counter( 'detection_data_generation', 'num_tf_examples_processed') - def start_bundle(self): + def setup(self): self._load_inference_model() def _load_inference_model(self): diff --git a/research/object_detection/dataset_tools/context_rcnn/generate_detection_data_tf2_test.py b/research/object_detection/dataset_tools/context_rcnn/generate_detection_data_tf2_test.py index db5a716dd..89f743dd9 100644 --- a/research/object_detection/dataset_tools/context_rcnn/generate_detection_data_tf2_test.py +++ b/research/object_detection/dataset_tools/context_rcnn/generate_detection_data_tf2_test.py @@ -212,7 +212,7 @@ class GenerateDetectionDataTest(tf.test.TestCase): confidence_threshold = 0.8 inference_fn = generate_detection_data.GenerateDetectionDataFn( saved_model_path, confidence_threshold) - inference_fn.start_bundle() + inference_fn.setup() generated_example = self._create_tf_example() self.assertAllEqual(tf.train.Example.FromString( generated_example).features.feature['image/object/class/label'] diff --git a/research/object_detection/dataset_tools/context_rcnn/generate_embedding_data.py b/research/object_detection/dataset_tools/context_rcnn/generate_embedding_data.py index ff282bbdc..02e1382c0 100644 --- a/research/object_detection/dataset_tools/context_rcnn/generate_embedding_data.py +++ b/research/object_detection/dataset_tools/context_rcnn/generate_embedding_data.py @@ -157,7 +157,7 @@ class GenerateEmbeddingDataFn(beam.DoFn): self._top_k_embedding_count = top_k_embedding_count self._bottom_k_embedding_count = bottom_k_embedding_count - def start_bundle(self): + def setup(self): self._load_inference_model() def _load_inference_model(self): diff --git a/research/object_detection/dataset_tools/context_rcnn/generate_embedding_data_tf2_test.py b/research/object_detection/dataset_tools/context_rcnn/generate_embedding_data_tf2_test.py index 921ef9c4a..677adaf92 100644 --- a/research/object_detection/dataset_tools/context_rcnn/generate_embedding_data_tf2_test.py +++ b/research/object_detection/dataset_tools/context_rcnn/generate_embedding_data_tf2_test.py @@ -250,7 +250,7 @@ class GenerateEmbeddingData(tf.test.TestCase): bottom_k_embedding_count = 0 inference_fn = generate_embedding_data.GenerateEmbeddingDataFn( saved_model_path, top_k_embedding_count, bottom_k_embedding_count) - inference_fn.start_bundle() + inference_fn.setup() generated_example = self._create_tf_example() self.assertAllEqual(tf.train.Example.FromString( generated_example).features.feature['image/object/class/label'] @@ -268,7 +268,7 @@ class GenerateEmbeddingData(tf.test.TestCase): bottom_k_embedding_count = 0 inference_fn = generate_embedding_data.GenerateEmbeddingDataFn( saved_model_path, top_k_embedding_count, bottom_k_embedding_count) - inference_fn.start_bundle() + inference_fn.setup() generated_example = self._create_tf_example() self.assertAllEqual( tf.train.Example.FromString(generated_example).features @@ -286,7 +286,7 @@ class GenerateEmbeddingData(tf.test.TestCase): bottom_k_embedding_count = 2 inference_fn = generate_embedding_data.GenerateEmbeddingDataFn( saved_model_path, top_k_embedding_count, bottom_k_embedding_count) - inference_fn.start_bundle() + inference_fn.setup() generated_example = self._create_tf_example() self.assertAllEqual( tf.train.Example.FromString(generated_example).features -- GitLab From cabd4d169dd744dec3fbb895049753aebea3f009 Mon Sep 17 00:00:00 2001 From: Kaushik Shivakumar Date: Thu, 6 Aug 2020 18:49:16 +0000 Subject: [PATCH 127/128] clean up and improve tests --- .../object_detection/core/box_list_ops.py | 4 +- .../core/box_list_ops_test.py | 4 +- research/object_detection/utils/ops_test.py | 57 +++++++++++++++---- 3 files changed, 50 insertions(+), 15 deletions(-) diff --git a/research/object_detection/core/box_list_ops.py b/research/object_detection/core/box_list_ops.py index 555a0a3b1..e00dddb9c 100644 --- a/research/object_detection/core/box_list_ops.py +++ b/research/object_detection/core/box_list_ops.py @@ -327,7 +327,7 @@ def l1(boxlist1, boxlist2, scope=None): tf.transpose(w1), axis=1)) return ycenters + xcenters + heights + widths -def giou_loss(boxlist1, boxlist2, scope=None): +def giou(boxlist1, boxlist2, scope=None): """ Computes generalized IOU loss between two boxlists pairwise, as described at giou.stanford.edu. @@ -345,7 +345,7 @@ def giou_loss(boxlist1, boxlist2, scope=None): M = boxlist2.num_boxes() boxes1 = tf.repeat(boxlist1.get(), repeats=M, axis=0) boxes2 = tf.tile(boxlist2.get(), multiples=[N, 1]) - return tf.reshape(1.0 - ops.giou(boxes1, boxes2), [N, M]) + return tf.reshape(ops.giou(boxes1, boxes2), [N, M]) def matched_iou(boxlist1, boxlist2, scope=None): """Compute intersection-over-union between corresponding boxes in boxlists. diff --git a/research/object_detection/core/box_list_ops_test.py b/research/object_detection/core/box_list_ops_test.py index 6e6990b91..da0837d07 100644 --- a/research/object_detection/core/box_list_ops_test.py +++ b/research/object_detection/core/box_list_ops_test.py @@ -247,9 +247,9 @@ class BoxListOpsTest(test_case.TestCase): corners2 = tf.constant([[5.0, 7.0, 7.0, 9.0], [5.0, 11.0, 7.0, 13.0]]) boxes1 = box_list.BoxList(corners1) boxes2 = box_list.BoxList(corners2) - giou = box_list_ops.giou_loss(boxes1, boxes2) + giou = box_list_ops.giou(boxes1, boxes2) return giou - exp_output = [[0.0, 4.0 / 3.0]] + exp_output = [[1.0, -1.0 / 3.0]] giou_output = self.execute(graph_fn, []) self.assertAllClose(giou_output, exp_output) diff --git a/research/object_detection/utils/ops_test.py b/research/object_detection/utils/ops_test.py index d13285df8..1ebe627d7 100644 --- a/research/object_detection/utils/ops_test.py +++ b/research/object_detection/utils/ops_test.py @@ -1631,19 +1631,19 @@ class TestGatherWithPaddingValues(test_case.TestCase): class TestGIoU(test_case.TestCase): - def test_giou_general(self): + def test_giou_with_no_overlap(self): expected_giou_tensor = [ - 0, -1/3, 1/25, -3/4, 0, -98/100 + 0, -1/3, -3/4, 0, -98/100 ] def graph_fn(): boxes1 = tf.constant([[3, 4, 5, 6], [3, 3, 5, 5], - [2, 1, 7, 6], [0, 0, 0, 0], - [3, 3, 5, 5], [9, 9, 10, 10]], + [0, 0, 0, 0], [3, 3, 5, 5], + [9, 9, 10, 10]], dtype=tf.float32) boxes2 = tf.constant([[3, 2, 5, 4], [3, 7, 5, 9], - [4, 3, 5, 4], [5, 5, 10, 10], - [3, 5, 5, 7], [0, 0, 1, 1]], dtype=tf.float32) + [5, 5, 10, 10], [3, 5, 5, 7], + [0, 0, 1, 1]], dtype=tf.float32) giou = ops.giou(boxes1, boxes2) self.assertEqual(giou.dtype, tf.float32) @@ -1653,15 +1653,50 @@ class TestGIoU(test_case.TestCase): giou = self.execute(graph_fn, []) self.assertAllClose(expected_giou_tensor, giou) - def test_giou_edge_cases(self): + def test_giou_with_overlaps(self): expected_giou_tensor = [ - 1, 0 + 1/25, 1/4, 1/3, 1/7 - 2/9 ] def graph_fn(): - boxes1 = tf.constant([[3, 3, 5, 5], [1, 1, 1, 1]], + boxes1 = tf.constant([[2, 1, 7, 6], [2, 2, 4, 4], + [2, 2, 4, 4], [2, 2, 4, 4]], + dtype=tf.float32) + boxes2 = tf.constant([[4, 3, 5, 4], [3, 3, 4, 4], + [2, 3, 4, 5], [3, 3, 5, 5]], dtype=tf.float32) + + giou = ops.giou(boxes1, boxes2) + self.assertEqual(giou.dtype, tf.float32) + + return giou + + giou = self.execute(graph_fn, []) + self.assertAllClose(expected_giou_tensor, giou) + + def test_giou_with_perfect_overlap(self): + expected_giou_tensor = [1] + + def graph_fn(): + boxes1 = tf.constant([[3, 3, 5, 5]], + dtype=tf.float32) + boxes2 = tf.constant([[3, 3, 5, 5]], + dtype=tf.float32) + + giou = ops.giou(boxes1, boxes2) + self.assertEqual(giou.dtype, tf.float32) + + return giou + + giou = self.execute(graph_fn, []) + self.assertAllClose(expected_giou_tensor, giou) + + def test_giou_with_zero_area_boxes(self): + expected_giou_tensor = [0] + + def graph_fn(): + boxes1 = tf.constant([[1, 1, 1, 1]], dtype=tf.float32) - boxes2 = tf.constant([[3, 3, 5, 5], [1, 1, 1, 1]], + boxes2 = tf.constant([[1, 1, 1, 1]], dtype=tf.float32) giou = ops.giou(boxes1, boxes2) @@ -1672,7 +1707,7 @@ class TestGIoU(test_case.TestCase): giou = self.execute(graph_fn, []) self.assertAllClose(expected_giou_tensor, giou) - def test_giou_l1_same(self): + def test_giou_correlates_with_same_l1(self): expected_giou_tensor = [ 2/3, 3/5 ] -- GitLab From f364daf0d2df6f96855c0ac3f43ac3a1d5f2b0d6 Mon Sep 17 00:00:00 2001 From: Kaushik Shivakumar Date: Thu, 6 Aug 2020 18:57:12 +0000 Subject: [PATCH 128/128] change test name --- research/object_detection/utils/ops_test.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/research/object_detection/utils/ops_test.py b/research/object_detection/utils/ops_test.py index 1ebe627d7..94b2634ab 100644 --- a/research/object_detection/utils/ops_test.py +++ b/research/object_detection/utils/ops_test.py @@ -1707,7 +1707,7 @@ class TestGIoU(test_case.TestCase): giou = self.execute(graph_fn, []) self.assertAllClose(expected_giou_tensor, giou) - def test_giou_correlates_with_same_l1(self): + def test_giou_different_with_l1_same(self): expected_giou_tensor = [ 2/3, 3/5 ] -- GitLab

    {VDrbYF5B@=4Bkg8A}P=hPi125EtuT0L37{VhaNa5#okB=~|Rwv6|B^TNF zx)@Ovg4K|v`g!h3FWlYae}B5Vk2RxMOSvagMwnD|Z#iS`EVDYXVUbq5m7fX`M@AppDlY&7Hkx@F)4W2QaD|Z}Hw3|enQ;dWm*`Llv~e`o3NkQ}cKgpm{~V7KWc`{rdG z|4_GPwoKg+!!lL|m?PuF5w8<9Cxjb{@We8O5amBZgA!nbzJ7Eq0#6R0XJv>H5d<^4 ze%SlJE&xJ_)r4dSO`N@$w>Qr10muYEQ%c6gU6O=Mgf>WRDvcW^2s|%wl$)pt^C5IK zg-GdKx=bLNru=pj@pagbp14*vO%M1djbeo+>g_jQaC~=Jd8s{_%qBkBiW}|TO zcQpj5a@SKB-0D4FNFUy<*l}2vQ-Ee5Y&QD0z+Z1zXY*ojbBwa8#LL^W-lKhyKtjdQdzsMY#Jbb+1}!ZEfGs*oYD6 z2IYJCmv_D1Ln3TJT;V zU#BMWzRz-`uMDsB^Gn$tN1~RHzt<8-69#-E%idSJ?{nUv=*Yfz19e-dZ!-&+81T6k zdSt^xR@tAAXl{*Ei61I4XGSN-jXRSQBQ4nnt@g7>oa5duSPV)(g&&5#i#J$u6v^Gk z5Yu;h{%Hak&X-3_c$YFTgZn<|9e-ttQ_q#Ls@|-KZ_zYiNu&iJsR3mk(6&vX_L`yDedfeV@^K-rF(Pm2&9UXg{?>RN6tNhBt8|5dB`r zN)$PtidT)t4{MHsn-1@i)rI2Q&(zdn(?Gwz1DW; zVNYpKKKv@cCtdBO(M4iR$7F%&@dyWrgm_@ae{&n#inR2}Bw!_mXpYlZ{LP+#K=Q7a zi$kYnU<}&7<)q2_C5qeD`=8==4wB5d$SW%)y6ennNPpw;%@613yN@V~+2Gt1Kq^Qs zR#sE3ZBVA@R=gKk8bCDew{O?1_EscVTDj&p=N-I8da1!)y)5NM3CPgD6eA|djjjvJ z`^z%6@$McCKZM=;k0ZdszpP#!TJ7O#V`kdf!enX+6Gx{$8PQU;MPvg*p3!xM$uUS{lM}ptGQ?j#HPOBxs82OCyYk$jtM?o0(OLmxG$qH;GZ`vkHQ^%cmw1v0tmHrOs6zsP|i*t&<%+8G+2 zzJx;|)dU@Hw+R_S4t+0yUSRDwNgu`vxMbx_1Dv?v`#tU)t4t(7FU#Gcn##k z#JLnJ1Rx5@4&qO3ZJh%@^^xbi4ntzevB=MVVJEht&9h|1I|pY#%5&e~Kz_*&HX$ineGiZg_{jW=)S5)5VWdQz)=^|* zJ*VS#R+tC1Sdo>cL>Z|*o{jr=8mXOmod$paxGw%e4OrRN+1m~DN5Hg;EgTa=i8#rJ zceNmr>Ur$YEw64~DwR7XC}M49=l)})uEt5K%??%~!7#d9UPKrhVy9R7eY(N+fQ`hB zAeu43Ot=}=H&#VAy^0Zhx)#|pIc1!<9O8O6ugVXU zm?OPoh2hG{&kZjcZ{5lVPeD!IX!$os-^vaIV4v9*VQAzV3Cq1|e<@}vj-~BGR<{Si zse=9Tg!TGD&QJ_n-W~plzw>Ol`gU;0mSiGtLXJ&&YNy4jtHtDdTdI>2G*B1@0JON< z8G z{@)a5X74AH<#wF5`<(bfdndAOca#I}_$&8Zpy4QXZ|vu+MT|tWLHubZFG5)f>*L3- z3**$^zjMDS=74317s7~{KjnP=c%_n=Cir>}a(296U2irH@!Jc|FK5Jr#dmoJiH)Zn zlP{L{B!9`Df4dB67w;JXbUVtHTA#Eh&1mfhNKE!Os`K2V95fA+^1}jtYs{b5J5Uls zevc|Y!gc|ex`a_!j z5P8&V8|vnmi@&jYPAg;v9Bk@Qttow+zXBVx2?hhR5EdoKou!ctnF)8WVp=$i_lZMmAU(@ha5x`w#v-V21OysqGpM)noEe?u)Jq= zN312{THw`^LeK}Hfr}VeiiE#vmqd!vVOJz_(j^~um{h3ahE7=5w$BnLmT}u7;t+h3 zzR;W*6;@nbl2b?Gk1!RAyCLEP1z(QJ;7)h6 z4+%er!6YW4VQ>oA+E~|hz`cHGD)RA?LjcC9Br`BDl*7RrB%_zE7(H=Du^RN93&Tu| zeODou8La2Hw6Ofj+?bWI+c{vE%#X4~qf864U8CCG;T6_Skv&u|I!M~y0Ao!Bc*f{t zrAG&T-LD|z?eY5>b#Z%lVS!lHd5pg5Hl z0GT-_JX(h6v4n(v~Fn;Irypn8F>|_}*0Zz>-?3rfr4{+IXpq zFx2PfJwNpSfF*q<1C$G6%r*h5q}a&%i#J%0C9ol@2zf!$z zLXAg;YX7u~{r>XQo+o1%8zRD^_mF^K^@naCSf*ATO`YNz9+u0s^P&Y8ZNhmXGC-e` z-uk3nmeApfv;C}CX;;}bcV^+`vAo+qBYb;I6SzXC!wXkmw_rpguN=KH)8wy1Yj`ca zO_k==(cHxOvi~?5VA>J*ll^1#k*saT=iUj5xKUyaM!?dV?AV85se8ZEIE*6{6k-nx zy5W{o; z6`Bcft3aHW*0Q;XenhT%G+m`e$_le44D96uL5+Sh8S<}A@R4~FIZW~9KUv8dV2lgt zxYDPFV6;Z5p*@z(7_AmJ=pxwHqhbE5FUJvh&WeYZ^o}&(S-YTA%Qz7q5seaB8-_Bg z{eg1Wd^c^gKeP+hDr=C`fB9Z}Xifn8ie#IjD1cC2#9}c(o!;mq z_d6vm#YqI;+P>JovS#pHRsT@n_WN@%n~X?87UYlf&O;ZPaA?*4p*WV@hNK(?T%%z@ zZtv(9nuN*^*m*G&cpFfZB%nM(S?f6#urezz zbk)&423j!^xeEzbp;3X`=p!RjdsG{_Y}#By0@U()MkB+yEg46j1kEci1$(&zxRFN@ z0NHE}{iS=%jgJ+|OYMXWUPt~*lbP3yDM-8Y)MJ?`vAJN+eKoy$=Axw0g1~RyOiWPNC*c6kaY#R9~ihf(u&`_4Y z=G~?X23b=S`QnaK#q;$AXgZwzZC~v%3;F=`7`pfYXDA8_n}Mi6Cf<|A{>fop0i;7x zAVVENv(W(*J>sEr-9p00Vp6*B>ADndL>aR4X3izpRZ!#Eowd-aZ5z)^^eyXA7g0lr zXW(Qb1XIV4IQ3j3f(HRXMLz=BuWaZ6T9aF=e2IU`+J}-cy{x12Op$i!>09x4@j`;E z;2xz*-b(2kH*Hs%KeIdJJa@Nf1bS-OUOPm)Gk~oj<{wj4{-QmN#8)-;VB>1c6S{*O zY3^1~oEQZDnpY%r<;k5xU^;F_Q2IPrjzDJV*HBo}aw185SWx`JvN@T96 zO3eoW3fH&KeMchtFvAMDdZMXfz+LNh=XIe+T_uf9#cTs>y!me`2v`NUtd`Kk{j<8} z#`2czS7EE!)GUlss7+#*;vi+*%+Mw5n(x~&aFrG;*YX<`zA)EyQ9u6%b6(G5U8+39y zjCZ^*LGOzx$sejZ=XQ0D$DEzN5wUk%Eoj?%W#xw#Zk+f67h@zvmoX38rWpvFF!yeL zXMILve=AzQ3Jn(*3R<7sT=uA=T7ftNKHrZAxgY6lSFrLDlBP%((wY|XrVa2w@<f8gnI4fV%o9@0Y2-w^;y=`i&w3WrS;Gg>+NHGeVDGKFx zMjvH6>N^M=5m$5+q7H?IR;)s8{A4|dR55|I96xi+LqCq|@cUgzr#*+`Y0i23m38{= z|j9V`Vfe?XV6$Y?EmD%fE;m9Y(2$6e!|>7_7Sl71N+^Q&4#0?}0y#25G_ zB^QLcmwMysk|y8o<)Q4{Yfd$zX~7hz(+b#}sqSNSx<@b%b;$hL4k>uwG?Y#VZxLgu zZO(gokTGIoesslvOQkC`Ijd~YKcnyv4!N0AZs&N|c9>vtZ|-_vTv|6|SVzsPEyC&Wmj#8S%jm z)jpmIl8nrnUx`XHlCS#_QXozt%X$9itmPTP&&ZdIDB>USi<7TZ1*w$5*Z6K0TgqL| z6}E&hPM=bk>9BCWD{pkcGC6q{6~wu)xrb^d&e5&T2GM%?%yI|D6*FMV3VQC3>sriO zGP+&Ji9T#&zMyRIstl@NFGHg6**9!)6qTv*&Z~JpK7=9?yGH=?{>C07nw9v#pB7l; zj>(caDZg%F-mR3+DXp}Wt!E(63Djz}TGhD2E=-gKRy9P3x(PC%E0@;Pf6=`gQ$oYz zvx^*3&uOHHe~}%*_VORe6H z8&b8Rn%ific53Fi%kYWVIzqFsn&5z&f-N0&+ipw`k*PN)vE6s+^RSDzXDk_ab2bW~BP5D}SWFvRFH z7^$JJ5tdxtyES~dhLy6DNunj`A6Zvcc+Hw zdy*R*00}L39J$}!=ugn65lR&#KI%VJSA|Ru3(o*AxuyDTLmgq|jKNX=@|ImSG%}lA zVCII9q%3}HFF|fHR7uqsGC%%vlRYwiyCUP{F8iaa@_qLxq)Brd_I>bxo}bSiPE?wt z9)p*aTT$c&Ee64l04slCcFkcMR^TMFt9`w)b6bZ8& zYqJ%hf+EKLHy?A%TzC-i+6^GXu?C|N8}kna%XBHVCgKl8Z?lFDTXqdYw4l{1c6Z?e zM!MC>zFtC;s8nZPcnxC;;sFnQv;zB}?b<9O``S~zIr?dXGbAY#MY;`kuwDtvDz3U= zR@OC&oiWakP^G~_rdBFp3?502EJGlr6RJpmj8Or5^X=iAeu+c9k3x92>;C%h183fx zTSD0@c_Z-0-IV*bx`d*bYh(ZzW*=zA)hTCCH~E_S!+V_EhwKsiDv=7d@OQR|+j>Xi zH#e%lmmfpF?G-h2_y#(DP5N*9@>U7h@myPLO6RP;nzik266$B?z`uk8B|a|yqbcsm zi=xJEA`iH$(=gzO_Wo!^T2DP*2P~UMnCI*eQhl%RWCR;-v^}QBtQzdsqdE@G86t#@ zmsLI_&7YTtrxp-&yHBC7Ir!2v7my&iDJ7eC2(31{83fs%S9Nzw8M%1am#p`vkR3}l zpq$>%h7A3wYW2b~tp!#^8|oUlXEi28a&%)O&O^}ynq>(EDdK4Zo+!`Lpm=oY*5kPi zl38ALIqr=>j}cKl6_to2ag3aAX!PA}@aMyyi^C|otM(8l9-(@E@@-E$JypE;?2P7{ z>E|zsU$%ATe;z4a-9dNCCXNycXghA>z1HXbnSGJ}hllp)tdL=!AY9NS z;N>bp2tlPfHyAf(a4`&s1!r3*$>twBwisdn4i-H^Oy%W`>a< zRAO(mV$M#_U099v8Pm~J=7y^o#!)8vhM8UkhKTM(1*hTZOOv*LJUlC6h~#EMu&dzx zkIQEr6&iwbR{DekU^h!0(w>}xeQu{$4st1&oD$z3L1}{Q2 zN?f-O!`7~pR{Sx=ZTOz92%?NpSf8kQb2Ge=FP3rT`{NS0s~^~NZLqUL`7J7TA0ER- zFh!44%+mQ?zEJZf>;8aw_f_-~({Hb+QUzr%Cs^}YU0K+?&X&UtZgRWl7q;L(Tf4O_ z^Tvk~rZYZjYFkkhS`<~ZU2b*A@aIf;8Agop_Gdd$M^Qp(a4ROM^)=@lC zigJ{?+a%^VY-3h%Yq-zu`Zv=W0?WhmN;y(|!LOiVb9@}E+TwmZ)*xKV&STVtc1mGfCGkv- zB&+bfcw&9{kP?HItwi_zL~bv~TANy(q(5qdkm$kyvAf(Ny!(lBxMUDkgcI&c;tp)_ ziY-HpVY3cTd6C#M817T)X<$0eFx)qky}wRt#F%rh>df0&6?^B zL)q0)xvRp@E_1cT0kel9(m7B(G=}ghG>%o;h9~GTj`yVNrX-+d1lrQMHbt^)E9tdw z`y#gTGi&sQ8MZuz7AS2VvnVZ6rJcbd#)7S=6!da+l&Q^u;KoZx5RiR_ZdwFk!qTXB z%#+wmY_zgy%=USeEMRyAlVEG9$FLjTh&5duwB9xrm8^SvN4N)H)>o150|0vefYsdF z5596mQ)P-eqp7r}L)1)dn(%^h^UjZ#5-mS;VXuQ0Ms4&%1(7pb*xoLn1+PdK!hKh5}2p_@!meptfuvrIU*Z@TP!ULXr z11dZ=)Z}07V15Qhll$FGGtOxzUZbtjHfu#ciHSal3Bn5A*K`~qyFrRyGTLVaT z6}%;XRO~Q~`;>prg#iCHnCs?F%R2JsjNS^VFrwIf@XROK`c_mIG(q*=y9z%k7&Hv^ zI9uZuToL-vqHS=B0nSDG9VbRR!>bV3M>2}Q`I^S!I}?NnLy3Zt5_$Q_)eQ5N?F*of zzdio?B)x;+3yPfGNIlU??E_u6j**kskt_r-D>*yicXZ%)Oyqo$*EqVt;zq#9BeYNrMuy=Mnri?U7p2?!v2K3L^w?*2su z3dw+M+!g~;OaoV52$W4^y_}0a7?l!DPT5=q!h{M_4$KAF2g%o?`=thbS!sGv2I1@v zT>#T8Q_qMAPwLw{8yYXN-fk?zSl(|aWMn)4dym%mM@;)49tVLRF*P zZwD%dL#`S%Kk2?*5a!2X1`gn`1YcUKaFT<|*hv6xnZ%p5zpSnv!VNMF+;>D2jt8E# zx`+Z139Z}zMSjn`(cb>H!8GAHVZ9@rz&+DB5QebqhfB%If~!i1C#@~EY7BSO$NO{Q zj6k#qE^HQ;5Ckts>514VD{-6a1}FU3-pl)oEfhJ%a`-1d8;x`J%=G=_cnm`Z;uY z04k4RbQQ5hZWSv<{LqBizD)m{`2y%8pdo+Vh&wEL%oDUAtT1&phshak^BrrvDp)*R`jjFidtazQKn-kXtvj%!2Vt;uNz zo)lGq+jv)`M zI}>PfS9I;H2#rRQqgn{O3N90;$Hle9{2hYx=5sx%JvNr|h> z{u?QEOQ|1A*lKBV@l|~4=kK0mw!@C`uKH6$4WUr!|2?e#12C{N4pPu5K5fxOsS2>l z2U+jog{Ag8?)gV60;vsyI3O=d=m0r!VS1pyR|_Bk&+fFm{!8GV+yM@x^_oE?0_!sp zJY~=jsI(lm!1)Xxi69!i^-hC`Y^o&ix6aZdkYJz(ze6Gp-zGPw{hDg4^4HMv3#}8Hb`VPE+bdc-= z@CaC@w)*Qj(fZ_b6;N#UWM50!y%X4#_!5v=S@{tPLmAwIf+;H`R=Nr+VsZl;xaV}G zR~$5HcLsz^Cn&jnUX5o|WIlQs7yJ}x$T@a4+)sdl;3b0M-E#LXvt2<=LBXFSlb~M~ zBdP9wA>5a4UqC)ld2JRJnkUiJw%ssj@N_^+8SrF~_63Yd_sE{L^EG#^tmE&#Y zorMHT><{!X@7fYzG9B7-sh4hq**8YN-G=<@?#n!lp#J7Dp*^H6GGgWajmb;C?lxW! zsR8K}FMTvg!MnZNu!j>Hi8}}dBP2P3B+k(@ltM)W8v4?m%UaEDx=N}nvRbF2m zM@g;2A1%6Fev{aK=SX!%zxc}EKABgL_^&97zOKD_i_mwo?C5JL1&Y88<(19id{&@~ zxE^FKl32310Xac7s`K?(xYBh>WxeBcn%PR6jko=qrc0nQ=bGv0Ux z**ZSN7gSGV$9WbBEQE7-L|Y5_#{j@H1#PR$=WQD#`o83c$DHY#`-$fT(uh(I5}ne^0%vSvrp>To1Y!<> z7+h;sBn1a13K+iIfcM{<0up!X4tbT1E*zNss!qjIwmyT`Ak z4W4e(Qis=YP74m7ojVW($I3}1V#5+85&pXEG3n4j<`oReuj_x$Kixt*Bkp;e#HO(csLk22n?n2w7Zww@wc>Uj_WiH7>V|i2gG+@2D1m}jq=Rjb z@Kuw-*X2ywwf!rlCS{*Zh*DU!>R_#s`PvC!uh#|EIfRDbJtF?=I9e7n6s`#r+e!EV-R0E|6>r(N2vvg|FcsC zBy~54yfQ{BPe)a2jbnX_XyysG^Ax9wk|BvNQ!Oh`H|DS;H^G}t-H_BbjbD90Z69pC zSc19vK#F*Wgf(@fHR9%CVr!D&=s4FX^A+#oypERi4=_8d2|d;?;E^?|8C;=xa<>H+ z-fk*-?3d31lMXF+vQMYk#99fk615f|o0u7_59fPN?u!R_CVQ`2Kie+RZ+ZzH7JTQe z|F-noGUMIG*w%tc9S#5T*(eibifQ6vQm_6mMuxz=@bhh3!uyI2j4lpi_rT?IA!gQw zT+)3zUCh4Fpg8%%Z0ze__TY0feC5jxKI?S75R)AM21sa=x#w1UM?9Ssp->|0v`r1N zRfz^b+gR0?5bcqqHIRY1SB!#xyWiTqWLm}5fT=FH{{t%=7E_-RASm{$(2b4>SL{z7 zKIUFy$4W(0S1j(umro_?uZKW?;K@`{;TT0#O9SAPfCO%8gh6-GO>(fy75(mELTzR) z+*#D;S1W^K>!Os4oZU+$DIDTyoeS=0X}O623UxJ@$ahEYT<`ba^PZnKtT@Ef%{$`p zcD2;exx2+6H-9%efk%8qD@~t}7CaX7>GchBt{?TpXWld3j~^E1S%`4jKTTVhvS-8; zib?||V@D|*eD1i#R)h!-+3-U;}e>D2?F6gh|>xsyZwkD6W-6Ye z1^*o_xYL6+Vnt)5ie*F}*1FhRrCyKKFB@yD}a?l+v+r6!5sd z`24e>-hHWOD5q&1c)0G8UvX+U43^2EJ^d<}B^1cK0Reo5Z=TO8SchxGJ8l25(d@3l zdS^y_v}F5$=lGydCsV5s6 z(qbaE@{#Hh7yNRg**7H5q+w1C`f8d7s2~s=CFGM-_;s??3uwD3-c6DXJ4Z}_`zHt{ zWTIYDlxBciGKfxdiGQbNEOf8!c-k_4N*9akAhuXA^*!q+W7?BoU}iiaE^ih+=+wJh zWofjd>X*20$d2Me+o{Kd$`vc`=N+>@N!TWs1Y^>7tgQ(bpSlQo^iB_%L2LVv9+z_J zbsb}SH-)-MluzJh7@3&smtAe`Y!s308@kV0_9;CH62yb8#%h-$3mb>tzw5xl0@UJx zAO|sDHB(4vb?t3>GRfT|TkIE2mEEp7MOl;a6_t0Za27-0LPc=yT;ANYqf$kWpHN50 z!u-6Yr6G7j&0EN27yd@SLOa-drmV`G=22Lr_^~~wE7#{3E}$p#vB9vwdFXuB@#e7D z!*m3cXmJa!S#SKk<|5C)`o(QUKv@JA!v*agGA8jxt2kdZ_^qO<0qfg{_kuHd^N7fy z`9B_lCYm@>LzIqYT665q@c;9KuK%ZVziE(dHvOlOydon1b${_JugI1ZLaF#zxs`5F za!&EnN-!*JhiS$78wW-M{HRj>iA0XQVE{21S0A8P?c+}RKy8X-d7!pe_3tZfxiCS9 zHg-}fz^;%e&`U=ozZEtYa~qGU@K@iO+ZBqSgG)0SD-Q#C)$C9Gn}K5${tJ@*oGTg( zRk6m$N-kFv$hvd;=zx8ew9x>4dz1;|SzA~xxmeE+L~L7|S}ev|N!`4Cn?yt1WcyWF zrlmiqYi)fb+ zNm9>ln5KS`@Y>X64#sc&EJDt5PFD0q`(@ZvL!V>dBjZnNFQ;UDMli!e8Yh6JK=cwf zsPj=oc;cULGP9~)%=V#B-kq$YzCJ~ui=OWf!iCzVwG86pY0_ZPq$?@@(=5~y^lp7b z*_5^BamI7$hvXS-5Ix5F)pJ3zz=V9bYW9KhP?8VvDP{ARty~c_c!Y3ac~u;1^5gOH zs)=%y*Hp&lM*C$rxl^}>6q@sH+~?Y?ynKSnBfI3y&LFWEw3Gv#^o99+fVtN&Xa$;) zp~Uj**Jjx)QD<;Sw}?o3A3 zZq@iIC4ud9s$CCvY0L1oRta>p1b%f{S%%z`=!c|9WdxWYHF9e z+9={B;)jW)HK+FK2I=7~(3X~BT^H~5gbB}QBCY_h5svZ93me#G$r^`8d-M$R5(KIR zjHkxqO~&)}JiPhGN@0)pJDe0$qrnPm93BCdOizpzrk~$`(p^!1T68xS1!dE$K{E0c znZ^$1@RY>`O3gtsAn$}WiHaKN^n%yvbG7YmeEUTGlf!%3=CvmLv@-noH2%&$2ogf^ zhnF-kzi7>dKI8Ys&4ZQ-S_r-Q;o3F4OA( zWrTIm0ox&W5@cjXbWDCCxd3e#vQR9eszh_v0Of;zn1Pk1_Hngz5PoyEFAsg_Ydk53 z#b5G2h(fK8*~nEHRt|yNx_vEUi&f6buNsY*&?;gZeCnzaccVuWu4KkpFePzpD+%$f4aHOSu1Yr>f#CxlQ76qeQgZD`#rt^xh)vXri!6 zI|)_XXrGJ_>jFGH3@9^rP$b{Th(FJG6R&V^+~PJ;ZK5B7D^&~yIqeUc*MP%c7y9;(xNIMaEeK#`~`G-g!E9RuR>b4m~cZ*tVMOpqI& zJ=7OozXAfswr)e3eA5M8~R~{>Y(Y`5sT@mg|VCIx^?+E zE$%86CV{TX-Rtp(h`+c^uLWPhLCiamT2aqeR|GYZs=Unw?dASWDa2i7DsyZcT%d_| z$6Pww)A!g9@W<j&l*PA|)Na(Cll0Xby60_@o3&_@~$*xMK@`VAdK*A^YP&1<%4f zM9>b^t}oLHs6uZj-z3$+Nv=zsF!r&VRcdoQ#Q@UxilGze@-&3C%E4NlyIknOM@$)U zT%JH1i{QoKayjK10-?nRyu&%gX8r+(LnQ*2o=KEP+NV19V%;%~d!tstAdt(ga zelWIsNg@Cp0q&86RIKBxpN|!lAFRX6;m~;#r_^H`qicKoe#xEfP^bsGwSr35-L%Ok z78tnOF?E4*T+^JrcaDicKXD|hv_-pK2My(DTKJ4Vf-_P4Zf)WIy4P8pA{Aq1#tX); zb?DSb_)JCPs!x_V1FkWIlLm&q%TQLq_owe&RRJH-K=mJRc{U|(|L@6kE-(3ee}Jsf zeusz=^BjF3k(tu-nt9WTVt_VmhYYg3h=yXlZ0UytEjG!SlWfNTfx&E;GK#>`y@@cP zwu)cIRYP5%K!^kz$B5k3FId9rkt3W1T2-Ix;1Z9&l+lc8GJ)z)TYf38)4-8!Z+Kgh zQ&@_{N)U9EcLFX57U*U=?H}JpIi=qgiNmRK>aRwQ#}&6wv9@ywy5pjMNTk&H3w^9S z{*kEqH2A*4utN85At`OSDd3_HR_2jKUqeIxl|{@@L;7aoevzz%EBbulI%jTwzTrVX zO~H48pnz>4ZBdW)@4!zS_&J)5Hj~5Q>h-m?;1Dt7ZFF0mg@2{e?TRNYzquD|vpxF7 z)X^9CJ?KaJ45cp^$)%Hd*Oi~A8Xhk&rsREBr9Izviq?PpZ>7_7gxO>~cc>i@tlL-^ExP0eqh1slc%>Jj@Ap zB$LK;AQ0|(%l+|FD&-l}ck#~Yi&BHIU@Ob(%fj7wDB`)_sfTM!+}qRkjkrs4@l`I z@s)-Tp4Tt^?uOX_@n!lm13GX$Amev459S8w@Q*Gvu=bp!7IY2VrzV5{B>y`fDUP!4 zl9Hh`otW{NYRa^FBj7+&Q`g?Ska~lF#S-;Y#jpq+5UNc5bFa^{s+R8BVmYR@1uWOI zOnO>pC6cua@=+DtCFseJQekEXJMib0CikLY9!UrxUxcac++Y7nrl7(YQIiU)4YTA` z5q`k>nGt$r_E-2m@~svG`HJ6umvgx>;GVJKcnThT+{$hQnf?-LI2B_cn5c!;oA|6t zPZniGMBVIgWziTYzM7lu0bfl)qS%U)!#7MLR%7*%&VVz$1{x8)SeKg!#;7(Zp2mcU z3gPjpb>Atmtma)Ij*MH8kp8+w>^Rt}Z@tt9b+hC2?9kI|f7g8Fp9TEgP7495o@-yU z9$D;FZr2v^zL<7C(`-HBl~|3Shz&QwKJ6ZX<2d_|m}+^k#og}tNQm;TY@F!nT*HB$ z*<1n)b><^v>v_xK#W`4VZ@x6MIcDIPj6_iWYEwCuJ>|*DT#RgZ2sQqXt=mKgWNxr| zQi!r;q2sfA{LGdZ^z8gj;d7pgP{0&9h5DL9m-qTTKn_3-+d$D=oy z69QgdtEzT`VkY7RdDZE(L8jkUmvD~RJ>~l&N);%suSg&T+xKIT;}$;16xeHqmJEZT zUhdTTXx$~rwluFKy5?~10QvdIsXuARIf>lw(_^)~QpBb~(N>AfEFTTP z#Yv*beKRUgCxGQ-MjFP^I_!ENP$u6O4qEK_G`84woQX@~6{b)Kf!igfpIGQbE)*;NgG8-&8tm&dijB=vK8U!HN z65Hvv&XU~L9BfGIG!VBXrk{xUUF&lW0rlQsJa$Ys9iS*>*Sz1c#6s!379=n)B`u}J zbL)eKPZ-q@Ksk8U0}6PV%0WU>uC7#DYfx8#%wM&}ZNMeKB9%2(=sWK2rvEDx$nV79 zb06sO_!G}8R87A@Ju>c>)-m*yCl#A}{n~K;NTn5^a8^9!`Sxu)^s(kq~(Nqmp)NrG zrm=0?wr$(aiJcQClXqsmx#mCYYwz`}eLri-kBXV<(I<@AAe=zj58*-iJ*x4g=3|cc zv{VmPF41}{i)ZmPmR&#FTU>Gp+*Q2iV?npJ+7Dx~!3F<6zrKU;zb%#OVGZPQd?`I5 zvYOqf@tif3jgVjPIg33iQ$8Iw=w4F2opS&g0^-9*GM#EZ&n$y+svZM(0+Aj()#t)Ce>K@RgE*K3A7FantJ@#Gd+yAN@j9sq!cQ3LPi~o@JGs}x zSpi$$q=oFILPzkZCQTL-HJv4+2-*sUEXu|S29~Z;SMxMRYlQ^8-2Y&yOaN;Bk>U)> zt}_JRjA^^=d4AY*aeROTyo?!oND(~{krSX+vMKnD(E}jEbjkSO50PUy8%KgFdtP#{ zB#_uN)P)FpIiPs!IfH>MC2bL=JF^UdaUGA&_iIU%E;VdeTc@WdOAy1)FgXt!?$TD9 zSGJARjzz7Md}~>Vq<0%@qHgrA8uI;)OSSLj;W>6vz9YXzC;ry6@Of|40r-@50E~T( zkA0s{bnaLohoJs~~d7d}AIRed~dli#lW#v%u-zAq4j!@XAcc=@ySWw+?d zcP<`asJ&L{CuIIk&q~zae!9fUyd)=x=5;@kj!jUFZfQFFaRnETPAM6`jo$!v?sD~` z=LfXXZ;x)lD=(Cqh+pM>1@_Z+!n7ex0gu;#d(;eaN{rE1+F_Y%C1K?55}mK> z`|fryhsxq%pk33J>1uZ!H7Unmc3MPs`=|W_M7Eu7vsj6LxD-q^3F{GDgbt1{ zG;uF{@B?)%_BbD=zjl7xm}5ewDA1~1-0k8OtV;UJkbQ|%1HaDdauwuD^X8#rMl@H% zGAI9xU_vdgvRT=|B5@E3nzvzAQKz=WDfhxSS0IU%~db#hL$Oe+VgY}&2T`3JCDKv z#gXKZC9QM=EJl`#?L)>3%lGEsdqE|ai2?qtVyI=JkkP?DZuWa-tv1AWRSf~-32KyV zhp@$o6#i7L|AbJ^N0zi1P(!+QtG`!&(Gk1;(=FUl&XlDYbPuWv8llu9>rHR1vJtDq zi{`NP846%WqE?^+ltvyjFafn@h!wWR?i_+>iHltnd(y`ubUoO&~kQoz^4cHFs`nMINw5 zpV|7si%cldux81$oNC*Y*E5p@HSf_I(D{+|f4z=ZA5V=ZweQK<9q}rn*#`;&D!K=U z&#sxv)V`_bv6)!jl6ql5z!>Rypj#< z_EioU(g${V4{0SvKUHcm=7!54c?QJd^OGhPxV7{Ds%vAke}71|1a@Aqe7+gznE0Y0 z--ePxwo;P+j~})3@WUTv#m9TWQElvNLzmv)*;NbCE%=dPjK_nq$9TZQvu*eHrC94w zKK72hRKPKI!Owx*aqLUWyPTlHMR>IeaKXNKxGDu3W84J8?VQ$D3De|sd! z0-%fESj@YO!4OwWiMJFL@os;soO8L_vTjtS`D!&Q`kkzC5i8WwA?DBV(@2OJ8*zLjarap*jMbeFcEgmA$M&r=G1mj*3Q(r>y z#_An{Rorg90io$v*JQbNaO7!)KL$nUEbHoSNqAlsFhB!3W{mjP5_yp9t649miuXAq z02!+4iu!;ku6~DuLSB4PE_GYq))Ni9Bb-OxH_zJ=y+k+gI9ew$&3c>mNli7=9WAkz z|H%Rm+ z>b@V>+nIb($VPc1M{UV1?T~rbFbJ4Ec`RHjy?k?SBF}jN^B?KHKg8p=p*luh#e3}J z;qNJ)1216{z@bg8t}HZCPO}=org&cVK_Bw(=nCn8;u&kNySVBaQC;3t!!B8sUNMZ0 z-|H#SCEo=RYxr&$6<&Vp!Z}F_jwNB1lVNMxRAFz-c9k+F#uopeJB@ZSQjiQR%daED z3y^DKauf6#OHMPd3>dWYnkMF(jX^NHSLps;GvIxju$R2ko>jv5Ryu%cl#!0V`Zi?4sSBI_EXGOB;GrAc^5U-)IFeC&4 z2EULtWkq*0LDv%jWQu3KF+|Y_-g~WPRTHxI?YpswYP`yBEe{)gRs1h zu5V9JhY)nUJ7!u)cQgdq9ZQFlC%19SDSB;Y-DRV;g?{*%qZnD*DiXOzkQpUHUw&DLM#{rf_nDlSBDrn zs?D(+i~p?iMA7`95a+UF!3SIU(dej|{~)Lx9~*Cca^6K#MOA00ir>4DT{Cw}y+jbX z-dX-}&a*J2jy5*Nc-;tN_zGj0xpoT31VSn%jY|Ig&{WcyRHE9wF)^6fh108gXjLKqCm2f)zDfKI5jJ}k3!RQX z6DXb|jcY8+o`ze?K|Cx?l-~9j{^9s*0`~OL_r2j3^4%ZOJ zF{O6M6M9ISwUGqrkj=nFOVSj}|X*9524OPEkD=c+(}+hLRaDfg4m>K|?7UsCIKxWgT<) zzwqGJI&bCP*9xXcE<3-GYQk=lsp<3ma+0X@VZpBocECP^%eyQFuceECM!y)ri8yqa z*sNw{C`etN<*ORV!;e8Cz}8!zvA_zv@6G*^?rAKo*UYUwU1FdC znjKk5gzj~dX^t>s&|7`H%bhMGI*?blhvC#P&UAwhU5nGJ4P{oZx31OA%~|esmD^7h z&Imp0nZLHUv%xoT>O@ABnjrTpmDO$Li_tYqr&IW9Ojym8x>;y;TAkOOw?%8gAVH=( zkx0a=k;zw=RS)c)FM8v$jN+*zBj|b)L%^lASm{Apqyc|k$Oy<5_{zvPsY!bs3xJ2c z>0VGsRM0Y$p9?ShzdU?>NqEl_mmfXReccrBP#O^&8Gptl`dHaBH=OOHhMBRC(NsZmU ztVKP;8@ID{#VJS6Y03upRLZVjljR_pY2bh_$6@@ zSEE)z!xsuf&VYuP@+A0S(O>leBb_`PD__B0Q&A{Qr*~I5q?nxs&j>^fwy0*S9~sf= zPQ13!9>sA4F=&mw*zHW%PWb=O78jOeA2sZ$dvF}LV{Kn{y7=y|HWn8b^tM~>Eo@B- z56Qdk$dWhNg#8?Cd7q8i-8a1zIDTsCt44LcP8{CndRYaqyv*$4m>#HKer^bJyZLpM zN5fjd4XH=|#4U8X4$UiKrYK^{vZ$D&{)}Yjb1y7WS-4N^8ZwE@einuDui(uLO%AQq z@*c<@O{p|JxvrIS|6!jQ&)oVK(7kKFBT2mU0r^nq(hKk0=Cq3Aaw|E^3~jtp2Cb_3 z0}OAG;#m#Ts!mwb6rcuogm{GPy|2Q|24o18m zs4Rq7!>N+jpUh6r4xsd6IH@f%+DukWv2^DeAjth_v?TkrIjX5SE&cc$cca|X%-(ln zi@ir5Aud+(xs}BjM2QXeFX}~yav|63jCiB$el)#{{&y< zf{|L30ytwmw{%gjq%=Y8g!zB2Cfh?wJmrx@LqPnQ3T2_8*8a>NS`$#{Mif34c7sY;lG|ZCe`kFc70i3rHwXnM zQH*|^gGHShSL#;J(+)fETO9)E!lR?)*THMJ)F37A`Y@l_WD1m0xMqynbDbn2q8vFS5#0At{T-LTiA@q#=>zmUWm}TMAi&N22l*}1Nu(Z5rsMO4E zDjTC2A0IbP79s>we7(`Q>Iv|s+!~kUdCfFkNAEZUPE|EJHF3eC|JtYcEO~U}7{}9E zw{vX*Dh)dyD2b*k;8Plr$A}+Z-{}zY!=vXh8yg>jApu}cc(*Y!#sg&Ad@x5OiCw-v z{}^w)s+0qEd7&$ZwgaX;{Q9*zWC1e%Fwgupy6>~96`tY&ljU@c#&=jNe*>XwD_i`m zw-VZ0{2X8;pnt~13hCJPdoL|-8y(tq>^z&Ep9MR+YMVj?fD;YvF4NuU*@Y z?4<6IUisG*S9>S_nRiXYa}{DkYh<`=eVO8Ue5if(+Aea&>qIQvp{L5%rpfxqhg`b;Chty)8xEEvBMJEYo3dN# zJ@@w8R;|R3qQ1hic(7PEcVA3WY>BZK1~5Fz0xED-oI0b$x3Hc@uk<$_GR04gPQ#;G zt|hz6w#_1pMZjndVXyXqd%E_f25x*{v zb}AU>IQ_wCs={U&RE*b!uZ9}pYIBWXUL1c^DDQnRpA-so5;jbKKtgawvBQag((whx z(=JGLn7ixd@y$thAeM5Jz8_B`H^R>0;p5g+o~fok>|nh6c}8jaguK6`(FVMA4(a$l1jcvh7-`Mm z{<4&ZZg|gZ3|tC81Cnz*NF&pQd$B4*+hdwFq)}Ix!ogr>xwcfc9kE+lMz0F$&?GSB zvlqJH5-wviVPnDH0@{s*R|uSC zjUyPk`|XS$r?kp`$tTp6Ulpl6`j~gHn7iPu2bFZu+vpvkEa3oEm@h-3;9(T8>9Q)F z9uI}vR*E{@?N}{rM~_faO>Tknt*5lAb`{IB2r_&Wv_H6Sgb!S2WEX@wK$sT76X+)hxC ztr!T;)ZNLmK-fDuO>jJBzckv!0nG@s9rnb!W4qa8@cvXvaetuSB7bH7xU!q|G5V5U zBI{NyC$Q-(mRW;oyMgzS^?-rD_A%8)51G;eA!rm^hYfpl;D)m4Dgobj^qQ8Fpm;OE z^+?`oAK2yak{qZmou~uyDkb;jg@pDM+(VSa5(o)X5<12-Pi7zB+B6P~e}p%{yQ`GR z)hb-KLp$Wlmn2XFc4@L-qDr8Y=HdB}Sztoqu3At|?hKe3kNq_JlH7)I z;r*;A?a7M>c|Yd+ZEQm!(JjDFRVeHnRF$8=pWR1=9?vu4-4e6`S6LtF>DlroN_;Rx zKb}PJMG2KFs603LKaw2j(<<#rskW!sxmiunDf}HRDkE(q+8bY?m>L?prUW ziCl61u@sACKsr~$q!Or%mr#DH>(Su@*!{e}2V;ibbl(cU-zDSiyM43ch@MM{*dSAw zI@yd&KiwYrV;FNf?z^wOzqDJN`K%O5F9ZDIM*&vQ_xfv*%b){#n-0=@lKSK$WCY;` z*v02#lpww-&ugng|BsTU!jHo{q{J&n!s=7gt~-gx!6EB;ZH_&tjE1jxB5Uv1yu%ifARY-|I~=US*z1+x$%7eAX1-1(l!qTmd2vHoQdIFKDC} z-W_&pjPm;P> zaH)q}pK0vV9CqiC_9gf*?>!{sh0qt^$OtwM2zS+f!C1QZU}f*n(fW}UabNnXzl97Q5vS&)}t5nIRHE=LV`QNTT(766gItn~(UD?h|@U6n}jy_tAQ3Ha! zly)(-ntr7JUQ`Hy@bGTORr{t6#twC7Nuhh+@sj+yj+avA=l>A6Iyhe_2Z)yRYfS%I zMtMMOxl6MJ1BhM!q)78pLpqZO#?om>zAC^U@!3o{fGn&3X`Nt_)wNCn$k;tu#M zziJ-7Y#V2hGST>BpUB3>XIMFH)66x@(7XZZ&&div$?+Hwui_L~-FhMx^Goq;F8Gxi zo0QhiO*C!E+ZOG<-(Jz-5#-7536QmyHvwL2@^`F7f+q5tS1*s`Ed4p2Wo^|?$Mz9v z&n{BXwxZs^kVm8EHDj_MkPjXOx<8x~1pRT{a`U{#sJ{DSIl>6mHUMmaWfo6`utljk zQI0@0@mD}E1Jqj4>giYMR4DjAlao^md&wU?TPb{Q(^rZX4@7oQ-cn(afI@7+kiXnx zPd}>1n0*qU3nBuYgj^BR!VI8y0`-0=OdA3p)i4~vV{>V+4 zJ@gpV)Dyz3y_Dd1<_Z`v^N#2KoEj$YBry@01n{4Kvmc{wqzx`_ZncZ`GDy zGB1iV{`_UrC$ip&!H%7RaKF70)QMq|*X7Uf=nFk3vVwG<4hG?^-3RIL&TZ zVXnN~>|hB?WlmGM+=8PbkyjR21!TrvUEf{&Mo_tNkU4_WL1$i+(iS?dG>Ppgb_`R&_3G3aaa;4V?dqc*_6(heBXg?C!k zuGoOy*D`58_W3u;&3xTf@y9#LYn$aX&&`;{OkKmy%!{VI9fMnPJzn#{J6#Q*tyi1w zST*SMN=KA_;uA{h=zx$3HYelTY>Us24_2lpgcSu-%4amUWCN(#;b(emWqjVbClWin znV8Xk%>K21R9T`V8K#Lw$gu?=e_KGqU6bE625d8ghg#x5Kt8d8fn$t$YELINjG%X$ z6BhJINV5Rx41*E;5aQ(M3y8-;6_>yOYk4FyxO*v&oo%I~S;@PW=fV~HlGVz8s*j?M z8ab}DGyU0nh)kbHh%U;yeP>=vsv^y-2dY_|I7=$IqF?04Gf_FgXS%Wk%+dn}ywvJ% zJ=%1G-p^GvzltrhBK3?eilPfq?VgvrpRnJZ2EKW|?${5ZaTX7_uwOI=q*jOJVkxez zHIf@fKx}MXn{?mCjzp$zSB-ftr3rBeW(n@{Q&PF^rQlb4@LB%ufH5HBw00peOn8O=7^Z(rHm&& zac~H!c`d4|gA9P`H7pHRT#EUQ@a%jpGK*bs1B&??r1V|`AGlw6{IQy+0*N2t$RQ!eX%9GcCq6H{8F*DlymQGU10kV9MDs=XOa^XhQqp;{`IqJtW zAd~}WxNn%1rNj;2H_RRCau4mi)%)mOUS_ohFylPrCSy3Tg9^XKwm$iQ-7 zQA-5G6$_5OZ^#RpqC$O^Qf4(+%X1KCqFegBa9M7x?QeBEBBxoCyV*{s2`ob#!AeK~ zw36gBtt#7z3_MMd{cKU63)LCcxk%NY6~c@RwNfnD>XCvz3MM+u>H!UbvM@8pW*ZjM zeD;*tH|J0VDxC>O#zNGw=8>6h*heAzSX2;#KPK;}-hUcoe{3u>%fYlHu)rmT`ML92 zLI%Oacwcb^t4T8K&}4DnA?t!g8^G#;{3^BT2fNh!P=}R_bW_qpg{KKDbUTxD9 zydDtP0A8(bL4P4`VG_!BGK5LMbIru(ObTX>#oq7w{Vfkd`|dk{$+RZm&4mC(2kObW zB~FU%-X1L>G$@&$Yqs*DJl5|?U*T4iOh52r*NN0ksrwaq9#_7KIx|{l^cDd4Ks6V0 z$(nM2ja=jWPr=9GrfOGY*^{xq*TXC(Y8onB=uDL=%1PE2n-@zn%KFTJyND(rolcC2 z`0sp~a0Vgo{8yX=TPihxANoHwCS3_iKcAA^U*Y1T(o!V=F!zA7G&L)1 z1*|N`tZL#+8rQUrkwe8Wx_zqiGDhg47BMm8Q9+>&5iH_bxh}Gs0{Os~Z^H)AO#6tH%nEEkel06A-EUn=~<^I9#uZK?f)$%KgJQ0 zHAU6Gzg1T@U1m|9PbkYf!KE)%Aq@?EZ<>haR$JbLmq70JjQFUzT+L)hA*c;jsoqEy z@2a|Ly;bej3zkM1_|vP-T0eJwPL6%c^lgsk_!?K}7(!w+*bhI#2e6LhP)_774uJyN z{eewNrtrWR$&66D<@?5+?8HjdcMLQFnjWh#FXLrA!of|MvAcJ4JRJ>s5#ZwhtXAL;c zQAYquDaOemG66;$V_bFv5d&9S#V7J{nRuOA3}8Kl+wqWCDhkFNE%vc4kr5E676`Y@ zX^wCs!0FkC2%+0ICkR6K8{YD15^1PZLQ?{;zk_f7#T?#hgb%o|$sPL-STHV|@2;pJ z?a8~0lTA?uf&+qlQQsNSVh2_~Vq-kZ5j>H&>+mGM>AAhJi65W^>@UpW)0X>fyI}`3 zHZ6{#rEbreMZ-qe2Ar>GEU#Fsdm2ilJp19!UzW-o6^2i7-H5Wys{zGM6*9?EOf`wR zL){hIE!_ysW3_cnscQW{eyn}B4!vWsnVIlH0<~%f&1Re=X}aB}Te3d4EMu8r8zN zc-F@0S2C|`LqK~`Pk-_Fp3#xfPP0_*4AweAg_?PTphjJkigYf}6Y0@9qi%i=NS@yt zpJk?maurHk#8NPc>ienm6vByDvbg8BGwev;=Iy4zERU+*pw&YPQj&3sy)OpC#eR;4 zRs4JsIB)bDbEn%&vIl`Yy~={q6)34=8~{T~eJoCqfYH5&mA3*CMc(+)qifBYUM~Ht1UIk3v6o z@7;{0H?0TKI>$aBT=B2`5t=b2QM3lNwN@Yd4wV$6RxA~+(#Jm_q@ZZoVJ>gTqXOaN zg`u;{a@oUi4_E^$3$M{*_11x43m}v^fgdWN0o|TR$~$6(dfcGN32et@)rxkYpCT?w zmBw#?viq4e0qazGjXo%10znGuJq!UGdDA7)lJjq7hD!iw*K74aJ~%n?FH~6NS{SkO zA>JSc@o1u|4tmHeUkK#C`6BSE>d;r`*#Tx)v$OK4ynX24uIuw9nS_-fBug9XT2s6o zIN&3XP4|>iyZAYTty?}pyHhGgUm!0ydBjt^;xi{cZ;MOlj1gDnZn?U}4~fA&tU}F}^)csjNhh z63lM5b0DE2%thRCeADe|s+1_pwVt+Fd1+j+AI0ra52O@-N`R+2&674HdT2rm_q{U3btuE6*;V`+I7sw**HrwBAh8QiZqECt(+|YxiA- ztA@C1@zqkv#8}xQq(RVp1S?7>Jd$f_Vc>IM3#ZpNA#HQSdCkN(JMC*bX{PMz<3sHiPaJfQUF>%y zuCLyV!_wm*#0V^!ea@jtgtZI+)sbf8c*foBED_|f^~+s!1#xn1az#{ey|R=$Fs4&c zh;R_LiUp3$xs(Y>+NCRgY*-&oCJnqn#Fab^9BdcX&kV{RO*^DVakJ80gg6DEIE{6| zS6ff2FI$T69&anheuy4QIWhN7@DeN>_v7aZ4>F*=%;LS8$Fca4KVI^xQ_v3*9F_n~ z2n7Xi60|@W_E3tPEuXz|Vz3{oM#wJcfk-bXLEK3hFGG>K<=O6P^3iRZnL48UNhXg1 zz7an`56r5c-m@e0yLNeRxm5aquZmtwBlB91I?6W~ z;16kEBe?F`$r^P?48+2wUAzz(K>cxajL(KA9Uu6ROJCHsG!(;7$LqKfZRP5Ep_&NG zFX*r$*@Nf|=?}%ZSbU?KauFh`?Lb(g?Ae~yaya?{+hzic*Ga~8ZFs421bRnQT0w@y z_Ysi(ZgG+`{6p)V>m<~mge7&1$ad>L(-?v`B5zPqSqPs_jY%K)@u>u}dZz6yE^yDpv3VVHD3V^$ zNol2d>X|Z+paK0VxqwiJ%0^Tds{Z2@^e3FYnizU@-nC@vnGqC0 z6+4k4&(pJT@pqG&sx;+mxgapm=J3dP)n1&jrb~p%`j;ZlUfrip|340Jhhe}aM+KY# za+D-7g?|bpk~%oE-fN5~MmrOQe{(bB())AqO8+$~H?+-PQRct366!Y5z1$1q2PwyT zscJL;}{>q)Mpnb^(veuM4g{PJzJ9O`g0<*)O0n53?CZ;bSsQ+>&<$L$8|_O zE{deZLyvLYHtY>Q$HUI_l@Krht&%?lx%Xs)cE~`RJI@UB;`kHPp*Yvn{qb~Z$jAIJ z?t6}ZGaqk0c?-ME>Es6y<+^eX}pKQye(L24;Yficv0%K@sY|x(&Tg@m%yR^D7g~lU=!XdrI(W4-U zx=cX-*eQ~e93N8iD^D{UB8&KfSu5c4+F$ZT)?J}vN9=5XCa5>?oNseS-Y%2!M1@h{ zC~RD=EMm~doI@ghiZKE>q#~OV45hJR<6+Tie9l~uJ6n>Q$Y9?FjWJLLvk^wX_@KW@ zllTR?Eyn}qh+g`q3lKAaiAGHJPh|P9@iEAC#cqew`k0?o0Wo{8rp2ECBl{h=OuXIb zrN>x-!km8DW8&T&AHo>HOtF`pzxaYd7qXyydPa3a?*Mw-&H){G3RrNrF(-HlNf)B+m{}2EPuC zAbg_cZ&L6a%Tt<)TnA(i!DBR0EHA=u1>B&5JFOsL5n)+%H#?6=i3^&`WGf>P=-YD= zi|Veb(KH?sD;c==Q4ovZE{mmY(3gNj<5^PE-NLW`TiRsTE7E>99j#%iiq7Cn(p%{2 zhX9_3YNvsK2#|?s=f63G*rVlU^+!A{ilVK#@_Jxlk2k<^J!HUZequ@fEO&3cc=@au zoi9>%F`=1CMOsFsk4%ElyM6lQ@ZGfg=|UJZqnz#)z<=`l8{WULOOxmL=&m&JPvOU| z$2?>d|D^zB01)utx-^G#IiVy{Zl-yA$70pL)Ct z4f5!Iw)cE=4hfDCsiJ5B9E`LO<#bvy!en9(eHMeA}n5V7J22Z3X^ zi6cpfPS5y7L;|<>(YNCQz!3IC#pEa}<@$rbE^Lt$Q_3~^?vAe?=IuRa0g+Ch%LzK^ z#SbX_)9eE?<)uL$hL5WnKS2>uQt@c2a-aJTRX_t|v7*-DqW3Y?lx$(qP2cwSrYZQ} z=pm)IXZ0ky5Ei85gx5HZNnYV&u)*{!OQ$(P{%2Zwe`FvyO@SR(!CIdneidH3V2G}F zE0d1y(r>yfatge4?-39V^;D-i9{72>QHFF;A~|PyWiNTKE>j(YNxi%jI(OLY)lr6j zk}Q5!yyyp*h^`?EhN6pq@=17rvmS1Q>>+q#0w>I|IzD6mbw3Qgnrn;&&qX!=TRYrZ zD$wczLc9n8_Y4(Hq4#lhAL#ruaM238`8wZ|w4DBtA58Zt<}nAuI8crLbaD93Vb-;4 zixdWLx3sdRag%X~9^Kf-JY4fDTY4EkGk?h%!_2I!gYvI(U4%Y1?<=uBgM3_i@Da1&qK!fqGm2i071Xja%Ir z-MSVlTg$K$F|)z}ug?3y`#OP?T?J7r#gT#Er%0Agu0b-tzBS4H#{)hm_g@KMj7p3o z*5a|GrjtR}sfa&-SKebbSWflQvZ*dJa%pz$q#TWb3rII7lr)iV`Zn&I)y;8heEX<_ zIru)9V`XdPV=(=H9|!}~Cr#J^zl-if$#WSQaAw#y0$8h-S8o8?{$T%za+ynfK zLjM3QDBhX2$KsIi*=gCK04V2DnH}+owYx5;80();yp!Sy>n=J6c?EhYGlgy| z1GqG;tP}zN+|e_7Y(_)gtTyWg!IIfYa>~leb`_u`LI=>;!i9n|+G$U7Uja;d`;6A$ zsXcXXgQoofocajnR1UBx{%4RzA|(MjPwmPpG4gnuiAN|M!l~*@u|& z15*$qg()e-NJxenQoLT!J4W&2_mB~Y2de?KGBu>P0N;3R_lvJJLuc77j-sg-1$+C6*MsLFs9f|9chwibo?niTFKcecRG>Qw4c|vzZ-$9@)#j?7ubQ<#oi)1*&`x8_ zlC^4Ji1DHnPWWDmL;58R!#lftUxSA)ro@-qB69KMm)g{t6R zLC`%c6}1t}aQ^wjp>ARlbXfDOqUt-6rFHYy`=IWzZKCku#xBCNMyO(*Z)OOJaFlBn zSq(c-v#7XwnUqZU`ku)7{zk_Z$-u$duH;aHXO#7;@pre`vFgd~`c5D85>EudJ>O+u!E@1LC$Yb(0}+P zUT}9Z!&P8nyi29`%=dr-?pNq>4cqsQOw0pGL^qN~ju~~xA*gX%Kl@CI6LEjc>QS$R-@pPA71GoX&;XPq*$Apw&6 zHBjJ@PC?E}UubBH#Rz2_9u#S!zNDqA8thX}H~(OlzpS=x{C95!x*zEY9pm+9_K;Yh zn6Nh!Ng5fezCNH1CTq<+BQ&W{*6sEG82-a2g_nDCZ57$7A@pOXohZ0j#iNt}$qeB@ zxAA86j>LK-r-Frh< zfm<$m&vi723kD2~x|O`j^`%$HSbK+*!x4=is#A-JW5!Fu@%PQbqe`cq{pP~~qPrRX z6u#kA%RM(Ck?lC9Y_q0qcVhb|NX=)M16HS}cN~9=G#*(C^}ax3H>d6lrE%r2s~4yT zBe*~K!uJibEr%;tMT1!UUg|f)-jd2^*@x0RrnOrZGl96L15$#tKut@OGw49tJ1I@m zijq+q1`6qr;GD!G{TWUOffVUy+^&K)mG)$Hs~i%mlhE{n1>YxuHqt=B^Y5CMvf$^F zd3me)qcu#=&y!-N&bX$*^$uEFg}mTNd%!_ag|Tjk|3=Gtd2=V+ z03=F$Pr532`KKFS5Dc0jMG8OjazVy4(h;1IvtU4IqZI{*y%@lXVv%JoWzh1pLij3e z;Y`$4n&FA88d@NV%Yi>faX<{iWSx!*dTn*U{$j}iP##0C!GuV7DnolwT}MdLXZfmZ zwXwo1v(nFwdF}pX=lHp40$}5;kNz2qk4iRLwGeG!_lz80nNg-0&!p{aTm9{BEoKE& zal@u6ikQrIiIrDVl%?O#h0IBh%GOC&we%>>vuyay%sGO+fa@E(oH{y6v+_=J(HYCt zGmFI33t|g@YyHP^k;k~*|7XuU>Q7JBdITr>aBC&zN=qIRc-k z(%yYc?7{5okG$FtDzD3rPo#qgt;8lI|H}`;4);u0wfWDToGD1XajL!=#qakY4tDFd zbR>rM1|I&!2&iv zpf_yH%^Ql+;4H1nDZUquU->l^XL{)i1`2W|f)+@TkgUW{ciu2(hUqQY+19g$2*>?# z3o6@_BMJ8_x^gqAnyEq8!qIA4>)-r3YA$T$jrymZcy>o{*lp(A%2$+a9(ixTpTC_F zPeWi|`~X;UxkE|>qJ6U(RfIn;1+wQGayInGFsGayo<@8HL{d8%$P;Qg!9M z7g%0z37rRHq*&KeMZ_^D`1W4dWYNDs->T%<{ie)O-zW4UTEz8wFh8!l+5hR^zRc^A zJ}Gb$aQqKYOqd7bX}#Qb=%qr$Y81F?KGiB4by2?k5xMviv`V&avZ(-ZUrR>h6eR1C zpOnrs#<6U1;SZe@!pS9GfO*njx_59Q?_oXPw3#IO(?nL7by^rL)}LMSficU~NfEKn zS;;mo98=@p`8_%Oc^PeW0~;PkZu~vOoasS{gj=21&ypT7Q85y25@6NG2ObdtF!i%M z8PE0^=R&GH08&ZVAE4&&@rJTd@mHBTgolzGO%i{H$Gd~vNJb>0D?ZmfYTtQ6HS!EtxDQlJ(WW+J4TQ@G&%dj}Y- zcgS$hbH?toCvIhI-Kj*hvejwCFSe$5iR>CFEtZ4kTL=prRkpkIfTm0=;4g{F11%Z~i2!=L_k~Hd+sSv4!?rZF&*973~&s zFZ)8=4K`GIw4u1o5R{{TeH_2O*li5$|C}phYZOhC#QPMRi9cYb)bN-^V8F!UTXu1t z$yzxB*VBwL-DJcuXGo6A3caIV9WJY3Pjw+ycLWrM&tnuP;J@7D)!$5ARZ9zsz3&(x zMQ(!kv2n~kj$O(8Zb`mYm=gO^;7NpSF z{gD};afko|0?c;ExSBzl>J$nwBGFJ)ZP&SX6ZCeZ{!YmhJ3pR&dqPZHrRDRweLOUR zjlv$`BMDvc{7+2}kX;W+5P!4kW_F0?WBbpM(Sd-i=(;kYSOCOMM6t2CLIh%YCs*c& z*7Ju1FCdC*=a)A44Ajh_s@dZQdoZT}oYC>gcPMeAR_de$oF%xl+zzy)VaV0L=QvS! z^mlGrt1P3FmzjkbKZt@0XpqMi#{Prr{iZY-W0B~n`<>4);%Y4j^zXN^0ZLgDEJSuE>$cmD*?w*YO;Ka0Rf>RoP{f+#f;7n zstv^8?~rz*dNKMJ-vSX}|II^J7a|<~jObu~HupVq`ix9wW-@T=Di)3N z!VaB5I;Zg?jc~*f)cAUnW_`=lfxZ|ygQ}l0sbn1fuaavMGhmQZ;yIeT1%a!664IHT z+-a*TwG%@!DH5%XJ}x6&AN`(I7e|pZ%lATDad3}~P5_wiq;1g-rzXb%Bq=+KP^G@GHMEa=MH<78p6J|qqI2$FnRZ!`vx(02%P7@R_=&NDH;RWF8^ zuI^^>W6v{ZCr z*=?1iN=CG*oqENxYrnF%E56DO7tme;onFBV7+(7eW_Ag zeOJ?CQ?+4XCa;^?zQ2ZZWAeOVCYFx$JXG_gh-DWGOK8|Hx609x+xL?+Z9o{p;Iao! zirMa4;Oyi+DY3IwMMPK9mWyb|sPKV92 zU^2U|0Ax?j9y&UYs~u64Kk69b5){&&h8oGP4U*@ZBv?}TV|+nh0VuVt)ltDgyceTm zYzF`_@?E)S*GJ7OKt2AMBO88voO@uo!hql(V8i#a0W+xxxzm!4(R2Ms9#-zVgmXb? z?(&)1?*Y2&ou7>{T_N1dH2gyjhgHa%$H=A-4b6&jQ+7Ir#ze&TdZ*aq=`Kapmvdyl zUPw>FLXLiNDvNxk7^o)lGl#bW5guLlSp6L>{d)8iGRrF_DtORHH+piy4Y%E zch|$!SV5*wUp&w!QSsGc#GMqyXA|PFX4f^cb})qg_4@E*qGJ~A!-m^-Vf5m$ZcMQ) z(2LE47^lZdG0sdhkexzSPab=6@+VHvYu0UOpM2~ktd&9)mtF#lb>&$7Kl~|57A%Q+ z^y6c%k73uzguLUw%9yjN!^}mJ$Ui|LNJ9`{c1}-jf#-*If9P9>l(%)l@dyoRPfumI^Ga4@_0KGLr(yNyO0FV8QZkEWJu z#0$c8)HSrSJZ*#(EV1m0^Zuh#O_abBiBNF)RbyK>R)_UHthYwK!y_F3_mX^qxUUew zsOxFyZ%ac9+tNF+r8X4r^R=C0Y^ffD-keZ$8gck$^l;QJ_GJcETz`pg-rFgxuPvc^ zQe8*$H&ZPA%Sp*eChIiHo+%Y3iztulUpKRhq*_>%z)#CSbuCjukb)rz-Ax@alz7(t z7GR3rx5%0lPbS<%jRH)vulaOk7t=h3y{uS5jhF4IINkp0K8v$1{YKsCNjzLQQG6zc z-wVVOhdctO(@9DjC=(kPa>&6X=|kDNHY_+2vJ#kR{?1Eb1#4{|UHg8xEnuYXY&8oE zQRYGp{X4mP_>z@)LsvrRuz(!Op@b&Ri$jOp9L|IPq!gM!&+Y(SH=qM0F=!CqALUTQ z>&BJ9p%^p78F^S~*L7kwQeIOsHd)M{4>Fc2SOU8Zmi$ZzbG&8;UZbH8(-G;v?uA(2 zMN2hhWG7@#?3kQc8B2ooCL)TJ2Ze17)lvs*>>jl}n6*5z@fAf~75P;luW~E3ThT%! zQJyZTrkHx=wA|~WK;Q9%q=Yw~v)h;THD&uuGbxq3uiKGaz-Y5mtrWl6HfjJve|b;m znHu%H4?nt^iGjJ{UwJPb(y`c=w1557=)7}b*WO8ZP>vH64qrJvI;x@Lca1Ndn*DUi ziE+LC@bM)C@Kajv&K_6ydA!sY#?lb} z+xEcGAdfSQO31l8<)a%?ttXH4*Bxct4Y+w zQn}j}P#6WAV1WUF!+<|D4-lg5gT~KF!q>7&l2Pm+l^I|Bd!D>wiX=M9>y8@Tcddr< z@^SWarp7mbzbqj*aj4b&hQ9TAb<;1LVwk9Xm)+y}uPZ9eHUHj@S9-#A^^e%(l zMLx1+2*B4l!pX-fIyhI#G@+Mb~1Q!Fq{iS9!hv~ zio>>`4b1EA68Wr;kh}g80I|hwJkfRqjvWlI#65mD+b17T!Q2Y$kkk_>${l}SR@g9@ z=%1RGWdP=x@Tly9=8nrhGg9}9q=acY!=JX@l%zK&hNZVHqrx33v0xEq}z z3;$F8bg|98f^KAZFT@mOx%k$|B z*xckD+#_ZtLLp{}@eoC?e)wMn2$kH*Wb(tUdYepkID`MM&6khsYH!ptPN9d#;VAij zPxR3Jsx6K#SP4~}%51!4oFkCiJD^Ri^HDhm^kOqx;P|q?UTRGYIt%4+1-L`v@eJeA2Z_=fhHH{#xvPI< zu^r%OG5N-wC8F&n=|4?C6QA~v6Gtw|>vJZX3yQf7=S`#AM`A?!5*2+TqyKSXHaiAQ z)?ITmP)m^@JEgI}6AD4CptO)13CW-;CpQjv*8Q)bjk_|GMvP{3$*Qcf@odfdNOGp! zp3LA|ijoO7HR0m$76kL6lUs+N_BvIhx@V%69Is%W2Yv!gK!9XJW6K)5?Xt0UBbydg z+j#rXRuJ6f6)UZjc*qTfLR91SB(1hRbj?<6LYu?DOX$K-j? zvskP~jw>#~QLRkZvS0ALuM5P74Mkcnzm>H@KO1Y26poL-35(~juMG1b>QDmU3sYev zXISV>n7;ZXdiyYm>EByciHh)4)A$KqvMqo|-*Pea@|WOdTV(hnAO7iy-P1K#hd zWgRcoAT~CYYlF9u`n}z{A58htrhUWA2_|C)M37Zpdy0j(oRKk2#_?c66(De;Iv77Hn6#m7GoC)^lZ8{fm_*4B2km zi#P|lL$4TzlgZ7dHk_Y%>BdH31Q&l5mZ?zFHuPUM_1wN%>UrO+UoQ8M1E+~XmR>6s zXN0eM4Ge2Sz34O2Db&(hDa|<1j!x#nWRGyDBAG`V;8ea3ATTZKZY&-VdEx2l+ts$DE zNa=6Nmdg|*vXP2eitAnr6K{k&Avzn5Bt$*t)YO3=0W#_rs}_VXnl1;$ht-Cbpu}oF z6DP4KS$u&({`>CHvijy_J8UF zLN!}5VfH~eEf@b1q(UKLP_ZynZ%OV}mR-RmPI(eoH<6ymjmt+HWM#duwtHL(_j z)H+4#rJ!{(3TAl3ues%aefB$+ZOzK-!pf=Eerp@3`oo&sQcE%y)BI^sw=cuhpSrMipt$YWRID#e)IL-*$nXn$kM}cxE`RlfE`L90NA->_ zrnaVo^&4Bw2 z=rwDbX6($~VSvm%^7Z@0pxv=4hFD@r<#GNgFUkz$eot;kADHBMlsh%bLJ6E4LJ`Bv7WY6I0ufkHFc-ZMos67BoB87iOkLw?(5XVXAd{$nGfhR~#?tl(?-3mQvNL&Q*OFv(Oh0e;%CA-Cg5{z z-BG)P(JLIwluF!so^>RHh=mt@jjJ)?G>ig?_1v-)6XszeF+8P;>n!C6|EN~A9BV?> zD;XS^k!_xSQQ;yjC(}T4o7_xVl5@hvI^hXb1i=p9PXZ6s<9G4wgsB@32_r%bsLi0O zYHTU#9*CTFG{5n?rZook%=Pn5I`PL9Src=xnraL;p=A;Jwsf~<7Pfs7WXvL3vj55! z=d|mJni>Zxb*4;NngC`RIMkLV5<6=nO-vXpaS+#`y9z~f^mntQ;kV0T3@Nvko2PKR zzT(Rx*u%h**>ObD(7W9nY1|MD-jcn-NaqIa#N&9W>rGSnVO&L{R?Bi*&kbhiidGv6 z8@&!^xont%6|c6X6hl+b6 zB+;v|we7IDZV%%|LXJCl$M`5V_930d?9qrkwP6$}yrUZYP94{?kKD(^LYtrX3S;6p z&d}Ldf7%GxQbs;hDhwNn^_4?Jr=c?V1wh0j^Knp%8~hc9!`SVc{GJt%P!C;o`RM9> zgmvpiEwJJ8hGdErb`bHfw=X1ogA#17(D^^M(w_)G64^RIzj;Wp;3y4$j+4`IrgfaH_s z?tQOYYC_{fIZ^MiIH!$F_x(mXT=R)!H}i{d+RrbW-k#@+q5k-XHX;e!0Y??T6#)(4 zDF~k5iXjsY{doCwRC*&1bj4VfKA;;09Wf z>$i_Pc>IyjrNd+_d#VYmGDM*LnrV-~+9Fmx-l!ZV zUUj~{ zh%kLMoqcK{dN2@27cdi8`4aB1>{>fkbvrUPp-pcMaGQQ4ywau8$ zhHGLVnka(5=4)B?Z1~sNj+A<90-k> zCR7EFn({nHQ=BV;Al7#4uS;ANwP5nOvjfM-U$ZLFktCUJVg2@7s!AJsXSP=+V11K` zezL3F@RH?fiY27$8>Jk(`5fak>m5tVA=fyz^Sq1N73ht zC&U!Tz3`o>V=Jr1oXkkuiO`l(790=c){yl%n_Scp@~T1hwsOCA_qw`yYzy9mc|DnW z3s<->aJpiA_8t#lT^?d5q)pjsG(4Y=B9PK|;*WyS`xf;8zO+P0SI{0dWRRjM7X~}5 z+0D$dQBAK|PB|X<^xQIf?IkE7dB@X((}72!J4=heO5(RJi*r0-p`vdSBdW0q*KtMkEiQc6Q^ZzyH&bdKLetBiWZFYGnNPhKQl4I+)P@XJAat%(7r+DPp~Z zD+pzXyA9XabG#2tuK3c}1V0eL*(OKAmFOsCce>D)>BR zWGWPfJP#+OC^RT^A2NGHztRLHIUxUyjSgwlx>fSM&S95UT-If{Bms=i%-=6OV~a&C z(B}`Pr+6usd_!FU&%?VB*4t-fAP%dX`b8ChM6CllY__%wq{tl#i6uZp4(&57(WOX; zIR?-kJJBz|x~@&nGX%1&S7%XU)BH9$EoeNO z3OeM|4(6NgU9or|Un5zknWx|F{1tNMyKuNvbO?DHMuj|HCE+=e8n-1PEUNE`d6(M- z?p#0PIf)Gi78|Zve9zYbLq=Ta> zz{v5N)GrYU^1&1sje80|_p)3#R+~R4BOU@NqShtmBA3m;y$dCj-x0p3FT%$1wTd-y zfB)q{T~k`UPpZ~kC)tVhhQ86fEGqa!e4x4J`|irQF-7u^)2SR}eX;xZg9TZ60W+Ta zi^6v2YPSv1(TNFIX2ad?R3HOgb=*C=d^&(4l4n9mQ&FgXiP+mYm+wXAGXR`26`l}a zLnYIS#<`8(?x?6`S)%(Ci(oxc){MMGvm*<$P~b&-DIsb!Fn_ax4d$5bCv49ydQCeu1)P_kMlqRFybzv z#DP3$*!0lEBf$vJF@+LqVaV(*IV37SJJ8a>`!x1;-^=4;Mn&ivoZbv3I3Zso`g0^d zWP;5TcmbIbQug~buvb>pPQtsQ^F(E} zvlUr4Xe_Kl9e;}5d{!9D$QZ>gL_8N@EE_G%6kyKp0%xxhM=IiiH|3>2} zCKDv?lh{rB?+cySN_j^|_e-OmU>)+z{No<)Q6xhAVA9G9lXs=A)B5;agu5w3)D#uk zIIVa&fDd8-4-8DtQ%9hBKXv|@VE53l3=>-NMmyIW{#C+8)70xi{$Ztw1%biJ7W+-B zni7cR0AEKjeI7YTQ|EMdYEHJViZ{nW$EumXR_#G&oWsr?RmE+Yoc@g)1twGcH=6sgp^(02raW7{ULSGZF zpA)Nq$^FrT>d|YY5Sun%mTQ?c!t94YXyfytZIvwq=w}*O61e*>t*1VGAt_!HAER_q zNrcc_Ss>rxpAY#2&l?4R``dYSMTZMr<+@CYI|B!>PL@6e=mxXvwLu~{->rPINbU<` zZ{=xVjRLUsD-j~*5YFVeZDjU~vxpFAWw(Jb=@wu67liyc( zqc#wnyuFsho03nn(vt0Ku#6gI6Q91*bM#((WeS|t@B?(~Pyhz&QwIp=F0w=!^Y97Q zhB@iGG>(t(QV$>7Gia$iGxAFlleg9@_j4g3Z?LzvcAd7Er!PH|9FKMHvcOHkmEk?f z@j{zV-6vnjii-3H{i{sy)w$D)Y`JW*GL%KbM)6auGQ@N_r}zkhDuHgAk|M^4l~NP8 z*$y(BiJF;>h<{yAFW<0!yX6It?}HvHrm^gbjc@89hIH?jKgS8$B}T?UVgD-v94h}S z0!UasXWRb?N0y&O;63wYFWn0PsT2(gYU+D&AeT#@7HFj0$gX*QF==&YH~ERe6P3tx z2NOcU6#A~#BsG8L7&jId8$VMsXgb}to}O)dVz^Fz<8@SIWF-}@($uH|5&Bm>SsE4$ zjd4wJ^|V@B-TH-(5zicH8pam=7Amte=pr-(=NBB$2Vt!ZnXd~=aJwBv7)y4P=gG)} zJ=CvQ00f}8YbifXn1FEUAgXBjzJ-AuV4g1X4m0!hQBICwz+l@CH~U(WhNzw%*})ai zghrEvXsrAT?kSm+&31uRQ$NqI8rHrm*X{dpY=I1o-=XlgeHr`Su%gYiHiF4q{Az%y zB(KQl)hTZ?WQy0pS zar_bp5R3c6F31f?$YAJ|ly>lNAggnA=|O;B|1z0tZu{kH8;-)1WZ!Ai42!Kq*k;qw z(xkgdZAlLjd(HPMQJuwbRCP%#=klX!Qo$4N43Qv5x!9dKgvH=kMa=YLZMVX63yTIN zGGrixXaU5~Z)I49+4?{T=?Mc1R?z4?L{Us25+r>R(G%mMCZ0RQKl-nPCL=KD7}q9k zqyJzGsE8pupMrj3Q8Hk}c#hZ~jG%Zw3+N!PK-1?{IqXeD4FQZL-C)<`j;tEctsfcp zrlvRS80@WiP`J615|jf2pwdy7hFu}Di=v6${gWnm48GzfMlyQsBH(n9XP!Qx7BNim zQ?iX7F!3=p9ovAP$J%`$38ZS$Js!HL(uNQGE#&SL>sHYtNvX^c>IA8$Uot%ni zAzF{0?N%6kbl|oK2Q(?EFk<`LsJi&dMsT*Mka(~ErMT=^02SnCI0(p3^f-%d1qxRT$`i_CdyZAmg_^a)}#t6zH2EACTf6r)l z327k5SBu;hHl7hZj%yq5V=k*xD~N>ix*ah3jVKRrZSf!7`w<}%eWU0=cX7mI)y6_S z`}@C#ruRIgb1{Uu4|1sEh=->WP;B-9bux&W$KB@To)skEQ=t1%>v1-$s-5N1T}+2J zEhnm&P3jlK8Hu!Ey!CUwsbU$5_1eL+YoYCGiFDYcch;3fd-@N4z6>8bmm9}(F*lGX zk+0jaurprFX;?J8aCu{Y@iDx-ug5Kk=ZV6OJV$2skw^3+SH^uAnKwnIbnF#g<8BxV zIF~$ZC0iz;~Q$08gdpX<@aR-698OhOqhPLVjFd(wfncjYNUCq^4fT#|mDn}wgRjX**X zj-Xd85%!QCm!f(e*jV0zyr!V#tVLyJ&JW6WZV}Z0U)f%k` z_gk|YmWsxr{cned<9}PnYPp>`X-ilb+m?>2JRHd+YM1_B(^^skpc6>fZG1c= zVf_w8|4~daN}EL!#^06tmQXtzg)3H8>?xhxzB<}sdRM7K=yBB&%*rBflfNM-1zs%<`@ZA6Pl4Ig#xkA zEMgL+7X7FAGUgdb0Fj8!KnZ?rf^wHYCxU&2*MT~_+uF~Z@mA~I3qE$|V;WA}8JksN zZ%eM-2%D2Vw;cC+lewiKgRODm$o@Bi*irCCdg??3xC$QHsI5~F3 zSzjPkB;Q=;36cFr`QNZ?V*^ANX=N?7oc(wS0tbiY4qPPDq@^jiaiUekgnvy}a{|Vh zc^kq20g$py^QKg~b2moQD%6=>1 z2=^Vn#WDNnrs?sC$jR=(ryb_*H#N0o)y)}+RQc={M#bXa`N2!HRsk;LGB2ZKWX;<6 zVl$18`gy(|74{8|*z@`a+q6=eM@bJ#}5QnOr( zWLa#3Z|{lJEiih5(!^Zn2IO8l43p$+{52W$p?}VIw3y*8W#Ij#;zjwmbjkX*K!2sv z`wU$gniGeUB6PAV~OCd|5GsK8|nr{_sb1bo0q zf|jl93fb1zNKjw=!|Y1qF8xFwT!CGAFaE)#u+X`yV^v!9bohbaN$5RD+~L8}bx&kv zcia$t;oSxNqB$}C z3g#xjshq8TUxq6SAKGGhQf5e$z1r(joh-YSXd%lVx|FXi@KYQ8dRc!~3`F0n7%z2g zjaN@Vcl5OMv~Qt#K9OKT-Vi+iOgWqHF5d0SswqxegIq3P_slMXlfK`t8!*9xhsDKQ zV+6;0Ib60W%e!LB9e5ys`QPbFGrV47C~7W>H)kj8m$(hS+Q`4qc3I6MfNg6xtR&rb z+w?Ig*D~Nb4N29y_GP?qehYGKs8do1CH8RA;1#`l?&He4kZ5y#nbP)A=jR()8FYve z>An2FDUU?56PQJQO0`gL4v=pQC}>A0~0cd?{C887f)C?^S;7pb!^i6OHFPJq-i8s$92A% zU{Gft7)eE!9iGIUDZX;{h?wC2pWaSUqC$Nl%hjss{?p@` z$!3A=uGP(39qXs%-XCgks^U5LY=#$j2UZtkMop!)<552uETW(cWaK2P{ev!Vi?Up0 z&a2Ye{JZkH1W|o#9EYpnD5@{EiH#H?ln`S2v}OjPWM)~snT7vV0kN2FtxVH60pzE? za|G1=ocNGB-7`1vKF}g$p$&+F^vl=TF@F#PhOork;p+n@xCk!QJxIO7EaNbr2a zyhoXEt(*>t2BSn8jPLB2nPXtK+Q~AZBg1Btf}`yf@Gy2n6Pxpf&UfX!3-y_L$r$;A zPM5&q0waujX=GklEI4Fj8plI23Du=TGhMet$S4W8){;o?zpc1yDS!g3Bfc(XfH=csFFPg8fBoF z>~X8j>M5>&qa&(4u>oZxnHz|Sl=N3$YbYL@fOwr+C;=T77|MJDcWU(i@?O` z_z6Uzpu|^kfc)-KLU=#cLS~#D<>&-|os>_6j%r*o&<$W01hcHb{2PEvp#hX?@}4MoqbT6$&kBXJjzWyx=gK}{1W2v|;sqM2dZ%q9dToQ4xNK{!4@E3;tMlf zqAGn3!L(t7$@;t%uk|M#q|V+(?xB2rJtq+=HVO#YmQBUZ)VgJ+V3f%e9;@Zz7fig~E^9KJ1DQz~D z90vIl%(3+}v$${)l_4-6;tntsGWMSGiGA2+^e^>#--A@n80LaUO#b^WyeR3NCt0{} zaq|cN%s}+78SCJQN$mo~YE$RD?dz`E>C6VvdX&qqx_rZLq^4f+ZbcvMU&%1XCz)wW zj<^;mu;YcP_*P{lZlLq>_3%ocspzy06*91@1mB8C&k|59o-$f=0H+*v1~>OLyr3_) zo2K$WL+FkAgEr!Xnpz$ZT-tGjf*UdCW7f#G)hP`eNcY;czu4rrLw1xl1nvjxR9cJZQy)3k$z6V^@FRGWf2N0!+oIj!l#tzX`6sCj)n}1mrX8 z14|T)ppdPlqYWcz3Uy;0zWgIe8J55}(yg@(Lb_NF+|8K`ACxDt1bwkKS%wcSr&i@+ zjjQ>QMJdsU$m!u!J39?^v$}e+bw{F@x#TWto+H0B;YHtm@ts@Bc_AGen5f;Tu}vM> zn!zs3z&+qTU7k^6qb9=#-izMlwt0mol>AiS%U2AEKZewc)Dwx7)c~Fb>rUy2j=z1M zRVm_q3E+GvwBOzG_}H>HVK(SmZjJTZzBNn#&EoAA9HbRx$F0sIAHhTiQ%f8ID8)h$ zLf2ZKr8BD${$d-5bL-&$^%F4C7+oUp-JPE?TYl!TZA4=F)G9VeLHwrS3=};-v7Un~ zqUBEN(7cVE zwWoWoXwZ*Uqz4mr4lGX664+}P1ZJ#cd+bv3l`SkH^;A1TQ9K~FTwTlCyvz0sp34~8 zEJU1!#mtPsi{<_7UjQIh2A2h;hE;fa(DF76(AcpD;yynOrv0AVr^esuH?)sEO3~6< z+rEASSi*wem^kG60pw zVKkc!R=|3^c8Ke2uJG(p`?H6<^8~RI$xYNXOn8@C9{Vc^ozs}?N;^s>^GB^_dRY93 zy*gG~?}M$hEQmyN7>W~r+P(zWBaamMDXH{+6get)e5TA?lBNBb71 z2H29x=~vc_RnlvooN@TZ*BpyPs9$J2i$Hf&Xpt--|c9k0&+M0E^ z`-3r^*ICS34wM3d&~Cl?Hm|uv;L9J*t21XjUa?y&LQ5(* zU)5v4`S_qXR+6XGrf1V(DYYjiK^A$_VE^vs9hvS7T3!2AdJ>1{7YU@5loge{R;ZPh zpA$&T`l3k;sxD_d6U5DrY#u)M3!}rQ_PXvCF(KgxK6@b!g5pgccpq!LOXCt{kuR3^lVE<1O8XH&gV%H!Q+1d;b{H z6w--=_(?$Fi*IYucW51pWe4SRTPrKS&gE`J!ynfT zX9);*CL9C@1Ri64o*kb8Y5JAN{S{FF~+`|BMoN_ zEm3UNUpdSPEi{u-K`52(1?OvZ!*T)+)${6SRfoiXHiU~Etd}@I?Rm;>?4*| z6#+8^lOqqR+L(em4F!V333ERWXEWSZ$Xd@=KH00@N$2F2>gB>z&@Hz^7w7XMMUjV(-aM9RiB3_X9sPNb@noh>)H_r#= zCSAE=({9Ixo<50ps1`hemp8JHCLcfx$wzpZ54$`2-a3*3=7stMNGI zOfx@rsLuRhL{-1vFfXc-+P*%t#r#Cm=3#ePX`mv4&|zrM;q22sNiDB$o!7DlB&t_u zYrChWSAk8s2E4WJhI-ip$HL8xkMT59=83vPU>8)qkdSJ}RIw5U z^%n^XhBpgXe62_~GFknaM*ma3<(E7ZNl?RbYsXWF2<_;p*K{(!Bl5&g0^=cB5Nb)h zv>aOEm*>D*Ry;32qJbOzjRL!Q&4*aVi ze*D57$x*qg^lBZg-6ymJz_kuj-;&Gq*m8CgGR5I+d5=LCCi`>N^T62W=( zMu`kBM}K(*?B=AGgUi8k5HQgIkITR2liEbD-`<4M-%cu=)uxw#cr=m~@;StUoXDb5 zat>uXoM!JoJuY9Ea*)MjvA{O&t|>Vlv@Lria5Wv!#6?4{p~q`(`5h$SW9CXw(jvEa+ID#LW81Z6GWE~ z>TWr-Q0ejf-$jn_cjLnA?*Q#NTwidD4Qf}c4{v#`jU^yXk9A$=KMOh z$q}zV9L=qY>dV?Z?cyzy6$v(-{=Rp@KtA>&-`tE%GVXu9k;ltBy!f+-NBkN93YdYp zE(q0@;sL}8*)D<1(FNdh=Wbhvk1f; z6D(q-?yrfz5{;aO>ijzR`-yw`dyq_yNhZ|C#+CPW_aoydR~WWWW2ZcHGkB+;9UJ>r!OX<}WIY$cuk{pZjNx z+e|}0E(0w{{f#3jzfG%U^1GeA=;)*BS#?r%t75h!8-|ZxWV`QnxTx0$+(aMQ7l66! z0blCG;kif>*ZU(>+{#r~JrURNYlaAh>kQbfg)tc8$Du|XQqIB-gg%Ulc{o!5jFYbZ zA9e$F_&}4oD=L!jOED71@MF+j(_&ipCHRj^3ZMGxy_QR)!|3f!)Ave~y7BZ*aM$f2 z;llz{gS@s2!L*34%C_~(ImhO{VgO_^I`f=qAPqV_q#za(Q$Ws;iNdelv#`U(RYCb$ zvF(+N+M{#@tg`Db6BFTl>|Q5`WJl;_o`)`EnsjA5Z1Ge3Krb_9ZHqbad=tcd(95#4cD)m;Xeow6h&e zWqFw>%~uptq`uuZC$tFr(TzWV6dHOhD`7QET_6;S3kJw5 z=VY@X^Qw^4ig(_m%9s6_aZn+C-rc+`}U2`fYm^r zXZkV>*}C+8OL$VES{YlW=7}<2t@Sf6`<&0^=IKx&S<7q2G|I->cSzsNJ0r2jR41k! z2?*h=4Ju0xy`ToTE;$FWLo9uAsa7Yv7|Lx#16;3k?&L+sl@HyGq%6_5{%qN!_K)v# z@H=WO4B-tDpW^GxBx}9CA=; zd+N_n^*kG~c@qHfm58>t zuF4p74wp;qnTKv#>wx?i0wD$u^`&9r&iP8OTs;@tC2ta=g%Iw&{6yhZ;>Yl}t!Nx; zWzF2n&KN=OeC=E*glz`7hnDGoh41-A6ywplHPFje`haHw@RX^H{`p%p#qnDi0k*Z{ zsbZ!jm9OKFjt5j|Ova#W8k}gB^ABaG1WCU(JDm^v7oJU_)zlUtw7->@gmB7nNyi5&zt)pn=3qK>yZ$uREO?@9MM z&7ISFEl)TK9V)x#>UH#VqZ$r@?mL)2v@>*#-V{LK?Wx6*(!Dy-my=E(B6qUql{uN8 zXx=95{btW19TLf^h;$ix7M+hzlb^v`_!-Tk>2_Lpl+ho78NcW_dBL0`Z5iPdCc%Cftf2IfqhH&+F+bYo(qZa%}2Xyt$XO z$Lj~mJ5+K=`>ts3Nh!yM*) z{~!LKZT){b`{u8$e?;OJOeSro4y=g3?*lqpZUn#~=(bXX_ysClew|YbO}m zEPkBl!d(8N9DFll+)3&9#o%l9-e4nO&8UE!PBDDw@!YUu6SA?TNC%uOm|nuKUb$iB zMd(q^NwxeOJTo45quGsb7mx6R!A=D&%PIuwbEG5-L62G{ILGdLfAB0EhcypJ(3ghM zB%})4r8@i7l98DQ#tt46ZHisA&x?4GgUv-7dlFy$AJkD+Xt%D*{9(uLFTx|_r+rvI zjw91APzu~6xpG;)04p03JnIsQDI~9L4tp4Lz9}Tu*!VRpz=JS^3Hs;|g*Yp2zOJ06 zzXmjRSTGRPS06vMVx7ZaP+qPMKIn)XwjqOR7l$I`=N6`T=lkJ+$5}=DX}SF{0gvgy za8g{7B~m$qTD?al8%DJkfuhPMmA!*)VR30Z^Bpw}Z@r?wY*IH zT!$r}vHeUhvRa?PYle-}mZ)-@W%_jVL3oh=upn}0a&7se3O2(pO($=p98fM^bIH7UO5BycOi!m_lPF{`iN;xlpGcuC-s zH>rB;!hqPq%BJ%+<f34_+AXrgw9Pm{UvMIEe8FerqrTS z*X+r2YKmhQ6Q}}@-M-$QKGoQ)vlCnGA7PY_khJ8@bl!sd%N)a3@~({`Kw&Q8jaNO2 z(%t{|vKj{HxdZWL#h0Hhxzm56pLfG$bfE?O_y#m^P|QAG>n?%3NUX5jTp`EC)I2^? zxo5zwD_G!X-^kw_aQhdVVeK?fV^o}t!A%1uSf;AEwj(Pg>={1hM4mw$it9)xN;|y2 zLq^|UvnaMWF6)r$Ek>(PUa+mAjQD%wt}tJWlA!mfK~|;g#r!n&@AQqk((xySVqJ4B zaFwD+lq?cS2$NR5s522V7GIrg@jJ7!T5XzFKk%-Xlz4TK-~aLG(ye3GZ#%E^gigS$ zUkH?*K5u--V;>V|Q4?zG?4~>!LY6RNi=ArBY$NDe1hbcOHsrwC_l^WbpB%@)9jFvYG_EG(=QPk7t&rVN4HdQ*Nu+ojQjo8u~S|ASV(o9kEJCBcmw)9)ZUZLkn-Nzo}UCZ?-B*d&fTAc62+x}YB*c}fCO5w@yaqXW0 zKCe#lp@57^ZY!IT`mr_058b!Dz2zSvmG0*oL>&&$35jdeIV&i}Wxs9r8^5yZF=xyxoQq~IHmp*pfw1-tw$8T5$Xa$a$4R&-DW>lg zuyIg^KeuRDlCMK$er+UjBxewPS3I#1J>U#|f4yYV-qoVzW#x1QsD|gHlaVhzC)c9 zMty>8>x4%I4Bn9$#G4m4317fo7wzFfkR+Rm3+u|-T%R{}oyT4foohlQllwkx%)s}= zZ>KQIJY6>jGX&&~oopTr9hZNDpoKI?mrY?&iMbPd`l9OXoxU!TJyJ3yJkFCb)hdM`YxY}fkq-ayh>3#o+JRnb=Db}!pO(%m$ z4h7@g)+GAc$_m`#&*8%6_0S$i22^jH>?aEvCNuJYL2yg`=iwN6A^{puQukYz_GE1; zjX>=$6Ur@;GUmEM#3&MXhCO+b$J z-^{)VdH?xiji1HEU%Qe;qi=sm;#zlJRo%$K{Ue#!J+0E;66(tkP-AjpJsFW-N?f4) zYYTPQ(Ux4Yh`d(A5@Pt!()3U^^5G-RqAf-ta^OW-T|W;2jL!(|sAqY}K6#SyB)>kO zULjLzWya8oq>n%Fx8-3~Qsi$pY^oosPn^l=_BEIM3r5aMKdzyMh>XE=#?H=-stXIN zY-p8rXFC`&l&sY*Hsm7^I=c$qL#8PZW(8U;-7~BhUPMzEFj9g!o*e14Rhq3~l8YoU z1u--BqGGMWg8AbQA-^j+4z^`ORgxE9^2!B2q}nfQixsIdLWk*LnA!TB?ricDF@JK; z6h&*c^r(T_XS!Sl!ql34gU?NrhYt==+h2b9!opfn{qkfA{hqh}psqG6w{yPZo^5#P zyZLhTGXGJ|IIYM3o%=TyPEbi!TF9@AIaZ5tpX3{)6m%nlnyn6@Vd&?G7g9~rQ!d+^ zcfL%pgpem!T*LB|M9F#hie&k(9X#u<>umj8-`Jnt-X2-kLYu%X?JrR!L1KJmmCdF9 z!i#`AOgS=k+>j0@cLJPUEz_<)Cb2E>ztU+%|H6#sAC=BA!ibVXMBN}MVRj0$!V!xf zENfu5h-=vS_3rWS$j6-qD=N!=oYB^8Cj)_#I@{}~8QgiHK2(lyjPj9;lZGsYT$q+I z5W{-7I|b4E%L2aXEXaY)P7T@`ql%kDtE5yB~^ zT05Yc>99R~czU#G4h&V%WfU0|3KR$gD|;JTM5jq+$D&e6VQhL&zvzvGJHJ&-&485b zijawHrP*#0(Y(Ece{#%-`@a)J-d~!iINdeLA&zEtKdnF0@Z3yfXQ8XuDs=lb3Y>5y zF3(;a9vy5E$cJL(cR(kW*=fG(p@tC{MZgUJ*l6R zed1*`sDX3QBBIQcCN|-OvEyHJ#nJ!dqrCXsGYZ9Cx8?$9@QKfw>XSPX45-C8zuQhr zKr@dmQdkBo+Ti^Wcu$OD&=|XEHc+8&<1`tUWxVR()ZXvv{IvL5LgBTV>${0u!RM)q z49^7GTo?Y99$}k-{WB6`0cnj@OC4YtdsM8~b0L5qit0o2EGv!(p%I`Lg~B0fbJ;|a z2Xeh6t0PtAojza^oq!|?iTafrvUD=yB!*gDLMsu*$++Pl>~Z9kSG#qu4RY8cXtE~WTM?_#4YiSSBH;-isa z)VXpj+Z|bAN=h5olHb3>$HV^IC`6z`q|?wCD{KFn$8b_*ifdTgp7%Sa1q{Wq>c&MJ`fmIu~{`Sd+;wFCc%qu)J<*sW^P!)vh#Pg#$135n-5eYg1 zl`e_XL^k4@Y&n$}>dG@^2XHsi3IA)gW+C*sHyklXdG{V!kdWf%L&}k7Z$fs63;RN- zPKhbL^C2h;oy{C$6|NlI>cCZqgaha< z6MHc-5kCd+MCttCa3IXeJe+Vju)yPX*vId7Ons=~)iUk}L)9xXc~-Y6c>6ECPTbGy z4m+|^*QVQ?&sU4ph0Ei->0!lKzSBb6eW!PcHircwW<#a@g7DB7Z^>x2yzKm0(53;W zghnYfl!pJ7;w;Nh4+H4#WfhX?a%;r*c2-ZMq1b65hl#%5PQ>0lZ3^u&EBi{2lHbu} zxq7`(ltQlGRsN2bp6zLf+c5w%GLa|3YMTX9M!$a`}gHF2a~ zdpGwvJE&>2-uLjL({0iBfoEO~Z|n40Mlhn8qX8S*3*<2`1(Z4L_FumK*O#c8kF;+Z zfS(_AjQ{X{JlSMO(O!Uu7DV@s_P03s>zEQz(qBH1z2BXgFIV zin^TYe4s7BZb;$qy%;b$!p#DIC4ckK$|VrK2<*{KWar>TsYQ}$|9+ugAHqt;HW+KT z*HBCw#O20otbk)`mT^W(Ef*40H@I52FT-L7RYkE#<4OYIqWgRCu0k^@S+>SNF#~#I zk(G7ih6OK0TYI&#%nFSK$<3n!HL8AeRnt7nuzAz9oBw)gca5+CcV9LXAPkWtEu6@( zoH}y9c5n4|TNV>#eIbO3;MQk<*(}Hp?Yr>XLY3KTHkuW|LI?j`5v2t}G^=H$w1GSk z#nb8ADJPi3IKzA*7Ob!-!SdTGW#y4u@2^L!MCV7no^e-#u+QYZxxVYheP@mDB}sKP zbkCaw7F5~;!V27FUqH27DQwAcJF!QRNj`X7*y7}GuQJ9hcw$=nt%^s3IyH`6-@13W zqoCDKM~LZ^C0s7m6h2jhAPz^+Yhg}+qy5bLM^GWbgG;8M`h4N42Hfa8d8DwzPd9eo zjSgS?bzrvtjeW6#*-BWC@4KaMnLr+KFso5nq=0={hsOqAgKTOLM}{f$e9`Ne&S6aC zJ^3*)CXC$v{9*)yED)sT$yqQ#=ue1v)zF={K18#FaKXErvr-ezxfeF z5)r-84@mzQXimZQZwdlFU+79uR^UKl>-iei$`ixo{}#KozSNh@mvIBa@~4c?8lOYx z0<>||Ub8aw&2A1z2frr!w`I8;GWsrVCKRf7+_%wp1J2>HrscSD4|5&rncM@?2O4n3 zkCZZCZ^gHjNt~1e+NlR z_!o@rD8$P^bW+Dm6#D^UCH_{->_;&Q$T@;eMFkt6&S}Sz7!AEU^8du~mFo97@Gow9 z=2KzsYhsYtYPNdPAUY<82Qi5;3FjanTFHL}4dj~8a2BlP#a>4aHhr@W2p0w9&jmt% z@-$^)T!A?r0NH)v)mAjY)3=0WpVf})){jG)7XXPz_k`Imq%&1+z^_Y z>kQ-|`)zzKHK|7FyPp>Yoy*B8sM-bPLryYu6*eoq$#Mp5DnfdWI1&(8FE4x*2Lt96 z)x`*=pf5e%OLawA|86WvDf2Fvq4lpr2&0w3_$c@dy}7>u;R+><9E?-cm* z!oi4?e0)%8nI1>fE5q`0Giad5i{d(|%7Gr}NAk(b93Y}yT&~1ZVaDxjOAGlmqyqoi zVADPWJwt!QTxZ`A>2SE393fT@vzg%A0I^zQ^CYMJvK`W)T>eMQYWy$f3Y=w}AEh;D znGaG~^in0+ma82qD?+X*tYw1ufW-PO0)gowSKZ&7Wqz^h?-O}nOIu%*p1);T5~Ne& zp2%%@LH5nztXW5G&Y{9=NM@yzc8#qX(b)exVEk9h4brf0Yo2UUabI=+q*i<75-!*I z7LV)sB>sx8!z;tEJ&lN23LC?D=KdOOiDGqvbVAR1bshWN{n z{hCFLKzC}J=MAw0&@;t#8)i-7H5fod096p8pB^ZeHDd@0;m|Ako9;U>Fr+nKReAqL zNK$L-`j$*yMiY)Toc5)6UzS-9`8V7@hWZ%aoxWj41eQ`p%BEiZsa1{pl1z4-+b5 z{09CNDN(QdGY-$+~;n=tRCxWb^JRos>B&G<_ zuh34iwR(ePohY($5i>(o*|0$ZN_1HqMvJd|-FhstZwfbiK-&Bjdi|e4$M}1PEwjK{ zpx!f2q}Q?A$J>LZt!<@i&mj|<*%{n0HU&{4HY5%v*^#|jM%P)SF~h%!WL-YD)W~X9 z$Xj>!Wq@gQW$A-rfsJ@;5;i6gTGqQ9_j|k8ry{|p_~?_BrA@#`%&?}mXexUitfrxY z1qjBg!j}BQ$Tdtq()D&;tqIw#@8Y355}eq!-~HJuQRa^>L;cnf;hKp|^elV5z%8vv zMLQ!~D+7GNIRwPwZ?eNtPr)VtJmv-BEk9i+BtgLJKTz)ctrOXA(m=iieh~pYt~;{# z2bWniVE4odb8HN&z%WgzA0NrD5Q{{ip5T5S1KA$9P1i5D(s=?2ne@Em!j;O#Egkx$ zG;YuR3Gy)cbJ3*duhTz@uS0j|lQC?Oj0ZmUk4mcU--}w!BA?-(QOZS59RA#s3~M+~ zi&E^7`$hS`jqv~*SbJ#UiY$#0MK z-zwS;+As0)^wp+COU)44a|9`J-^#nN)(V|_>u+r?O`gM)OQ$|O`KPnYwrjrd zyw2FY)vRi*H;3vh{F!K!2V{ZNq*7!S8@JbxH-mO2X>IZ}KI z3rtq09J=kDWdXPF^}akQ0Wx{9l0%rx_*<0p^t0FMFh0dE2m46^g07qA7o8laU(qps z)93=kiaN_i??Eo40S63(0ncBt_bFgv+2RcB;(7%LdqhBt)F@@4Iu8s)_=DC&z=mxI zk0c1$JcaCA;mYX<^=!yIsvQ<;APb`Cvtdu1BH!`Y2cHI5+S89M8+VaftBu|-KTbD* zgx^Lz)El>zjyZR)e9OkyN#x~_%abr%rpeHzC@ry(gJWs?2BWgci}0FjFWZdPLxIX! z-bq|wa$&(MOK@%9R$sgK$!a!Nc20#Me<>DKg`__fCc=!0I1nc=EuV_SDtg;0Xg_%*CU z_dsbe*0KV7ZCT-kQ$xGV%32R0Hyoh{$k_{&LUWa`%Zk%kt?3%9dh7MqR9EiYmt}0i zmO-oaltFFYm&g>ocjceVIU^tqx-9smM=!RBR5EFG`6>DHzRJUdU{+L7EUCZzlmA== z{0d1a{NX<`ypP?ZNnoed3ku3BYJG+NOh-OnUA>(HtLk1BTmrf%wLHzj?`FTj!IQX+ zq)ar{mBMfaxZ5NWvv1$(^eBT~@$(U#zz62=E&@E%aiv0N3xovhZ*;Fr_irNCLNgLu?8m4BMP_x7X!Kg?<)&Ttm|<3sycc7XQ*(x zkaq~qM)(k4#42OO{iXW=Jj77)o?>5m>LXz_5KG4H(g|&-eWuAx79+z{C{4EHK> zN`nFb8ln0^eM2oLC*75rc5a|hsFDAXoz?^)JXVh+6YV&@nKnyQ_)j4wFd8|bXk26t51+cs*N1}CSfn@F|RSAYbdp1zRbk7h<+ z$ekA3<@a;al+!qqd2&}RT5ZXb5)jneUKKC8vb=;c!|&uWUU-mN(D-9azHV@Qz!pjs zp-Efq+C`#2ia*_HqGt|O=e%dI2DyeWKtk5zFImuAnQvHAA`V`-ygYMNZQm<$X_Q2^ z=?7B7+&a^`@|uwf*e6Aa^JeVwA+F0OawsJdb1mUA`-CtA3lI>`^W&+MEQW;O1a+W# z#f~vpSf{Oe*$|!|6<|hxlA5B{V|PNg7Z;b9Iy9c<+J4NLLdG_quUM})r1ZX%zAftYVNwLd;ZYYU$H7{5w-lP)k#KAoQAZbrR6z9 zBP76){|JPaH{IHPd9|b|KuZwT-OXA8Tq87S&qvi&vM6i}rb&SKb<4fL`ko|j%6cy& zmUCczo^QP?Bkt>`?;9ORXKHWO>)b*Fv-(ic=ci19fpt?CkYJBao^!l9#nm`NIX(na z=9xC_dw3ozkIcCC`4~0r@g}R6XNhaugWen4U8MQQ;Pd1W?oA>y;;WsU^K}*jDpN~N zQ!%PJ%BLo45tLmu<_SyUglHB}+~F|!et}F7$2Mh7C3y4De&fv{wM6r%-AYJ;%vl$g z=mgVBVh+;ZuTY;rr&>MpV=R~c+vUE10Pp!f6EB^%%Y9y5Nga&&n8-V9XPkGc(xu`L zAcD$!tfTGDr&uN7Gki4kU%_`(>_2A1xl%%{Q$J-*Hf7EGadFT+)NK2_KuND{8dOdBu z{+FmEq!v4Hg9bukM#?6=6&S>OOAg>5;7_CaQo|m3;Y{nlci&@nkYG{l^*KE1GA=+CuHX28NIvN`r(I zWmOOWGzalffe-lE5JLmNt{9gs1Uh0&HfDTRpMO z%KK|JxN9Tg2-+{l25Bp2<9?0rSmOep=MsSX)z0vDd4RVG9O5#{YRrzPWEezz^g zhko4r8jj!9*xmkR6bo_}!NYE?zE6qXY*|CDlxmmQZ?p~{PiM&wwc8Dz0QO<-nq4xXG;QLeFv$s(5n1=y<;Oe z)2r*IU*_*MnM(E~ecInfB`>LL0UFbL(=3CNlmHUCV(VM4#oz)gXJv4rk+0_vQTydH z^{DK}BB#p;VA(T(H=w5Acu3fpeZbvY^y_-yT8RV%dgGOtpxdteS==2SvheG7Kz@Ee zW{TdMEwW+hupq!|?wzA;*g8Fw*^a{d2sebqBl5c?xriQyPB|4en^d;e@zVFz-0Z~Q;iZHEQ!r=1z`9WJQ_pBo8-=0d0vm@O?qs`k<_ zk)WL;#W8xx&lO_bd|f?a4n+{%&ueSGkKTwP-p+w4-ShjMgK3dgP~|Bu#~j;jQ@>gnle9@;C7UGxPM!NPYC17**;L_ zrWBWRN}O9PJ2}*QdJDx?h2e94f(59VJhyL3U*ue#kX&K{T`TYv)M1fr!V4`MkK-NpjpklX7BR>u;hnJy7afNA^|S;R5le zd=?d0#XTnIv{hm?0fn5x0yQqkt4htyLrNnvB;Gajq#Xl;4-3tP)7-x@c~t|8qV zw`G6JR^Tvh9nJG$*bq~VTVz(|`InWj9j~cmyyx9G{XFg~h)lSVF+9Cl0&Ir6Z_?PB9TQlr#N42ZT z_jnx++#~h6Vy=;dLzxoP=y+fw{d5hbN%9ma!paZ-oBOKdG-}Q%{_~GCh(FbbNA(A^E2UM%m$anhUGy$S!I8Xbr7flsP$aW6k&k<`|*U-x4HRO4@Ij_H-{aDAfw3@_fA&!=E2<=47 zL5BH8hbV*pM)8xO=i^p#OjhO>hKhcPTHBo&7n288A3MKV9QxnDPW~-Wlu*qB%eYd+ z$r8}sYPyyi*F^_=Ax<23bjf$U*q33a*w^4(kv??V?JT4ei=<~rnqPbISGQ1R7Y095 zLX?`?G6_pOn-%|r`1AD+_YoE%*d{W zWyn~uyM*d}^hM@DCb_!%#3>Q8x1ADgi((W8!mg`H!VuqZ8@S~Stp$Gz^DvRg;Y+Yq z%%7Z=#^fsFo~Q6TJzlKgT5=kO-S3rNuYedfkTO z?evN~hE0cthR^iAn(n4hYM}zTwn-Ym1N2kfgez+;7_i1JH%3d-kb0x8A@0VKrK{;d zhlz|4m>ZBBJJ;bvva zLG9mQqJDM<(%qfJ{yi!J?We##G4Lh8Rz84D#%xc$5aq=VlCx^}I0^8ZizKGl`sBwc zmC!Mh&<6~)4zw;007p-M#f~IK94i}k>tYhZKv*siE<+~j@y=fG_5! zoZe>>n@d4Z%9SPy+f92vAK*v=6oA|cdEkh9JlMzUV0LJOBTZDL*pi?E1j%W<>D6hQ zK0Kbe8JNA}W>wNw)|Lm^d#~-6$+~lo0^UsM{n(V+X6=>SGwK~1E(^)q%Uk8>+T>&s z@7L{6%f&xN>;h8~TD)zQ=rKltfs(0DWL9vyFs(SzN81Wa0GBRZm;3MKfR5;Bae(CZ zuMnmD_Lz0n$nsrQ4D$A$ln_d18R(<1Q@qGP;_ebiWy^u=im2fp^ zYQ1-?>zn;@|HnFZK6j90IeZV7sMTfbReExE<|m)EHo(BuFx_1!H_2ECuf)$UFH`Pc zHYD!T;nr8$vQEC(9|UqQLcR!lYfrCQr(`tek3!@CK9UW~fWMhgZbGIVvbtVauBlQz z8y=^`p`rR~V8D){g*XO$M*ID?w#{M{f%o1oGjBh4HgUK}G}g7arrB8qf${AVLBW>m zh%02=M~?zWU2AOEt+3i~WT%NhzB^3P>RW#9hxq2EL4Ut_FY1gx<{x+Ugc;?Xq+NC} zVadG~@YOG(0f%}P`q#OhyhfH(DqjfKjkQ4sJBAPI-y^FnHGkn4+Oh^1=9b6sgubW! zc=>VQ2ZM8Q7PXYD+s=q-cOT+rn-HL54&fJBz50( z&YoZ7s)NlNpXq{r4Of!AWfl;R%{rmwsb8vkntn)HDi>(t2mg`R391Y}W4^zV;*KTJRt*$X-Jo%H0^OJZ)SOA5 zu=T2zuzY%xNMmgGfB|}U)tvO29$Dy-G{(2ufHbyh7Wc3^$=%<`EQ%|6$c+e^Y_=no zxSc20-784h?e{t@o3lslXC;P}bfaHT2=^BD$&X=rv1vQ^avFo4(d(WET+uLTS{6^z z+AEzM{_$#HM1HnIBUz8Z6mIY2_?-1$7jc;M;bFAssFIdbS-rK3%skLmV97jnn>je{ zmcHQn%uZT&GA~bfVr2)vbO5iGKT6k+>oLGF9Wn<@`Ww2#4N~LgL|Xo(F(p+lau20) zE*AWSP>wGgT2Ro(WyH{nC%)G-3^S+%lL6T36PX}lZ+{d@g(nJp^dJgYCwb|w_ay%d zcZvOPBxY1Oa82}yE20TBFRZCKO19eCNb$iO2<&07-A^*2aTQ*y`FZ#21dn7*tEY)` zUTZz;oW>zAUPH>EE4=hZbHpk)A}G))ALeF;;vr$^J}Kp8M`IMjE8icdyCAVtrzw1> zwq|lZ*@spY5>M9e3G-kYa zE@r6DkJd54FI9rusptaQ@TC==MQT~wCUMN21Cn$9IcO&*s;rRH%ZoX}uns*687g9Z z=Y)UQ!$0i2R6V?@bTZt$2raRYj=^YjJEu^HL-trrFjvFljeTU6W_K+$Fnt4v^8?mW z@huf1ZnX$VWN8CxX>GLSdieZxgmOUIU`XO#g#d!1@LYglqh{AmF26yl2XFX?8{So3 zhp5U|1c1KT;`^f(VK}nL`81?#WK-WRKU%dFmQ^Z}fLC%ewx_a;WAf;?0a;%%WT8Ua zUL^*YMAP18dNKK&0n8YIs4rBh#4xTN>6qNdM-NMhkdLg&1G{M^uIGJC^=vTAe$~FW zYY9jB01gv%#QNJF^yAX(vX5uM0RDaVpR6HZRF$oc-i>oq3V3KN+{&tizqB=Nh0eoH zH`jppCDhF5;yr`@ri~YwbjRCbggqR*I1emU^npISaPkZ1Hdr^2$9tnJ(6?p>zdtI` z!4C!?Mf`#K$}S$)g@$J7`c>f5n(HENY%}N@9qq1?7$*2?esA&v&|?Vz+~2!QCFexu zptjOjP~cIoG5>w!_?hBBXXmB)gni*yU2Bs8#xL@a={?n0YfBIcUM$brJ?Ehb3Ly6E z9vtflD)v}i@XoN;yCpe+L)!3N_M<(0m)lF(e2!dy>)BPCmHsHoggr;X2gduIc+HPw?`d_#Wx3_R6BSyx&8nk)f08p%`E zLxt20%YZo6jDaAh>hCUN=gu|El{XY3?WWYr8(Dl4)t9}&Squ&{!Zm}cZ4LxIaNO6oc{0H z;(9`tJo~A%#64&0>@}Xpnq9ZACRzXckANQPwVzN-Tc6WMF$?f@regg;+DA9{mD7;_To zErGxyk6lO*(MOhi_v_Bb{wja>XE|V_rE;|evxjmT=U=(#$aG)Od`BW3e}|zxrhUGe zx~?gY*gmK21uIgFuMWnXAMy3g2lBewYUMvsQTf6BP#0mezLF&62;h+_(3r9I>Lx`%z|MLyFZO_p zAP|Y<0;s`otjD@gOoR&M6#CgP zoo@e*q8CaWV0P8BV@m@dMxKclFvVu4Tz0TEpAjr@ndur1UJ-atVDf33%uz(W*RdjWsl5ARDRqlXs(U@UlU3wb6j^z;ZKN@w~E^D28qV-klaGQn@e}=Qb zU05~W3bA9XVmS({4_o4gpZ0GCD@|V_svd*BaI3$g`Fq8@-K2aBeD~D`sIvmlES(Em z_cHxN7u@;VSBe;TFv%0VM>DrV$V`G9zy18n_ZzOFV-RLV`U!=N1X{YR$0qBt=Zi5w zZ#%FU?DqbVzdR3Hq&Y=? z=?w%Ad#g3+P=9yPEvN_czj1OX&L|rTc)B|YEL&IW!U^49RY+Zr7&@<-&A*q5&Ne`R znkuci)R9VvnvNy>@2l6I2D95<%u|F$zBF=JH!o3GyJ@pSXF{?59#z(cMB-cF+3{C8gq}h=qys7R3AYW z&C-X#S^>vg0h~F-65ls9y&}csnS21SX|LtsWR{W$`ukf{3Mu5MDbG7Wz zr(qfJ4Eljw@ktXU>~c26&o}4>VK1cmR_huI=cTWvE`lU{{%pk~cF{1GR&@M5xnIzF z3KBV^C{H%>IH^kzd{0%ch+Syx4h-kdriOgAY!3NMj&T{5Xh1uNfmunYAGeMSB2YMN zY7V*7R-e~9@6RvawoEOfU2VTcale3p7w%JM>fPNa)h4ORFd!oXVNT0ZPq)5(xEH%f z$7jL5^bgBdm)Hn!&!Nu*-&E;lDRr1ojHCxwuJ1dB_wBm` zBfE|{ZdN_|ZxU6z>UOi4EreX#UXXm`h?UF(ZyRyO255=qxEEkaEl^u!fbB*rrX zv*yr9IgX~9AJi6K14*qLPCml7d<+zhj1R;FV+ew*k)yh>cTSJ{lJ$Sw0(p zc!F~Hd7J*)(WI2|YXmy9)_(M1Dndc=lTIQzZkA(?txoeuTibJIr)@cQB{FEcu<(MP z5qLsACDRBuou~tDtTw3m+^&xG>AF@w36Z^0J|3yiRj8T-#J~Ea;QMY0yN?6g^xz&M zS*dld_wAYO4S(iFjkKiwuDE5$vEbU1NG09th(&FjUq`dh^Sv8N-t^r#WZc9S#ou#f za8L~m&`FDKCiPv^60^<$bS!8wKgx=efL{_ExzsExZW<#9hE4dUTS7n_p|rk%Yjo9@ zKGWR8jRa`E#Yuvsp1c;-Pf41(3E#S7W;fBBQuuy&6Ij&$HY`Zd272R*6 z-RP6m2sJU8p=t%=>9MKo2fneE2)NW!F)jc}CO4?Ej{c+VFE#xYCCjsUvnAA6CCZtQ z(+D*_r(Dw{+O#H_M8jjh>qh}R>C>`rh0uLSs6j^40*+YZfQ1KLq9R74HlSi&$n33( z*A9;9x20;y1fPnSL)m6H@yPa@3pZ8zmB=iuH z1s0R|^?MOo+jLmv?-vv-SEy2JeY%6_Cm8F@)joO=5hECqx{H1i5h52p1rf}L!(9J9 zuUrXSyti{AJ_PL47W)9Gh^ZoB=*DdHiL5KV9$6$hs;Kx}_D8y67mSEd#^OEFS?br^ zF3KK$HFvlbF2T|*w!md7)ksZy4cALS)H8kjJH7ya?K$3i==8ceyw845um`JO3DL$W z-NNuc-7Dj~YHeBz1A&Yq3b(X7GIn-054IBs4mqF2sIMQ$Li5P0dW)vkoNA7kh}txL zsJfovswzLg-s{SkP{9BX2y^P}=M})J>Zu;tM-RswRZKoRFXXnko zCnBUPUF=N3O!VK-NUATM0GVkNurKqB@p)y`!knb`4!#w?lG!eVRW$DjKd14?SN|Q? z%~vuKYlRKK%5Vf5%oh2~DjJFuy`GIL^z`2`f*Piuf9ZMCCd$Oe`uW&Gdqh29cH6FWbD&3W;&t+TqP4x%i{T<={^{LfSW z*2|~3Lj`F`jo7(_KbkD&_35&BM;ZP(2^N5}Z9g2Gi-Lhmh8RF6&j*1jbw(g5CkMwZ zIY`#mLLU(BrR*Qk2FDlxIS3@Sa_{xNV)$SMf1LtsN^}q%0HP6%3Tw1 zGFsfND;ZPM4B)U>^h}wWblg0CPG3^s;qmeDF$8`2v0LG0FM0*_{&W3ekET zG1>}Ak90K6jefwdhy#oYo=XuT#^)lnly=DJ#3chk0be1I2_YNno%TEFFBhTHd2P%1 z?~jbtS}kB6qLyeQ%s$A1tnC5RTSV|^(gJ>C@T}l#*m6{2ITse_%Ctzj97^YAe*)w)b`d{2(6)n%UDP19Uxxm5@vB>hXeiD9+FASzGn+1lF=~lJ{Z4b-4)S(? zkJ-ImrnqA6U@}_{-Dp68rbckXkIR`A7VUr##%|81swquU>#v$=PuJs`SQGY14(wq2 zcCMng-KV+X8|X5!7_w(}Yd<-(a!^%q^>tOf+X#HdJ)?2%OyznWkS}D1vIa?XhkwPE zqpeI#VVN!CdDQ<2OBhClX1l-hZ%%4^%+`vWG7&b80I`;aBnpM~SF%iK2(@)oEOmtQ-&!$k?C2rM zv{P6zRa`2RMlwvkIK(;kRGKWHVoaPzE%izD^Nc#f?r=?fr|I$f>3&Ce?iO3iA?-Iu zf(m+w3L~yLjh^(>|C3m8qB0QA<&}X*o2uCP3<8Zl@{<_uCz@)`#?)Hq)LrhZdK`AI z2qPV~unefr7#<~x^WuG57M9|h=KaA3vAz0m@Hqkf}S=YZg@PR@64b<{C2y*=fy? z64;l?lZY@Gb@+SIYD`-DG9E2k<9?QB(el7JvtyFb}vh1d-y_BQjN*YDUpiN z0eqkcFywOZ9*xqH8bJRvpX+hZdazVj-W^E_N1uKw4!++ZUBYx6HLIHYoT)BOR#(0M zoHZPVVwl>2o{?TgOoUK-t8y_rNLsqZzfnYN_@&bWhsc*E=`GXg+N{=VTfuludzBu} zYOr?UYm|;HpJ_a%o{7@P(8CxWbaRG?Cq^5aGDTaQ`KW&5IZGUqzXz~`z!HrUhQF+8 zi3PCtutL-*5h>!ph=vUJ2_K|8z#Pb>=xUm(j-6(m!USI6DBP=Emp-fx%kNCFC*PB^Ux444t! ztsy<-`YmF4s1^krdcD`iycTF&nvRN-|;PFhxtopWJ1&+6-bN_rrG4N@2JLk zL#A$|8agcu;(pO)ptTN}SF>&ET+MGYZLXi_jwH%O&VL*ex$wR8u3q;$wRL+1Q?CYlD2%SscXh5@=X}Q} z=`{c}w-8`!5R!3{_$jpgbf!OBqCf*bgU@$CHi^8bP5b5iQ-eupD}h+r>S>npZywSi z9%guCyEU_VZxe9yF;3Nk_Xm-g(5lIfYKX6Ba5ug9D zr~UTtSgXZtV-E7!6!UPt*}^+f+aTQ}U!d@mL$ow_IQ~qai27G)Ly(OzOxUU;qOEmzailFVjxTqjR}2l}tjK zL?_sZ*yf*?mYK>CIieX%D~ht*v(LG+PNdnA{JRFz64K=f3xN#>U<33uUA{y9=D?vc zXh@jf-t4}Q@i8C5(t97udO@pR!cM!lp@O%IQ2u(X7X{?Qb0*-Ua9es63uqh&{4wl~ z7o&PNL*6kH!5n?z9VsMs{ld4%Y!@NFb|(1#M%R8sak$#JxW2csC$sfUFfpKw9?42d z-nj5OGLnOudBOtshYt4;tC_LNH!|BqFT!Hq8}1WDvNVhFhFL%xO~>42_bzJ0<&xd> z`?n+Y9ov<>T?Y5T?E-SLA8gddAw7KWUC#Rtn)&Po{pd+Qh2{aE(;@V|8>~9lOUuD3 zC^KAb3Bg<7lBW5VBWd($-R$y+3IIg|Z4MarI)x@AD4NxuPaQV_0mwFxZB!jXftGCo;l&9Yk0y$gP@zNQdxgxDaMOl%qqgOY1o zQugoEcUyax$5QtEW%S!CCr4Pbal*Ly59C`kxY$)yi0`%v=Cd>(XlX~-;@*U`^@GGp1jHG?IF~*jf*?(TSJZYoU-~Ry&egA98JbV#9GyV@{ z`=_$|#j4!$vT4ZAxA@M*W%~(|EHljnDTW8$fYAOt17EElvaz-)+;(Msiv3obezp1q z_E{q)RDm-Au*3>5e3TKVH8-TMl5Wrl03ON0*SYqp`g`b#b(#zy-~EhcK825+vj#S~YtY+6 zivGsaZ+rTbW-Y8(@s0dzAGa7ecH1`l(-w@ltgF=$4`PEHI?N50g*s%zlvxZpmuXTpwd8x zrx%YGw2F4jNgw3;sLdjfqK@Bx|5H3MP?7~~$5CE4A#+-BpQ z_0&IgM|u&A5CRmV7+8{MH#t*gS^12q?ll>XSKA9&d2uFyQ9#)vkipvleaitUwMtj4 z0l?MrDboq(y*c@uC8yWosJinK#-PC4H;(`CU~?!!+Z9L=fLGe-Fbv&= zz)pd8egFb>4z~f4_E=fjEZPw~vExaW4WD!hAazjF0&)XDf;ak<_}qsa;8NevMGiIq z42v&0#JDlgUT4WYo497krmTAPyHWP1j5gf!UAo%D$c2m?1W|$b6Blm>R2}Mn@@x#s zr#3`?)R-Z>Nc~>m8u!jDVtg8E@wtaiNWc7V+v2%L?N73i!Jw|b!L~j35JuX0mK`3n z9sOaVuI^yk+Ib^A?%5MDaYI}O^ZtENvvz)9f7oz}cfpG#`MVXS3kpZyU zW_fTg=VWKvKY!}|aDfx7mGR|(RquO6(Ra5rxW~Wd*+J%H?UqWx5Eak(5#?MThUPpx zutkFqW*$>q1_hukga&2bJ`NLhvrqv4Ab(5Q$W54^|O|%KaXjsp#ofPZFs998) zXSGnv9DU4e+x%>Uy?4PZTl}th48JLiX@LPk<5^A)aW42i!aAmpe2%BWtDZu8AwL5H z5&Bxcx3@5TMRTIhWt4G*v217pAXdBpp9?8BrKdn~mOHx=0DOn--dSf)Z)&$SyIMZE zJ#zp6KmbWZK~xBGNxBHxcU18qon0717`(+7{k^L$bvZLbx)-?SNXe4c?bQ+ly>!UZ z^XB6^c~l#7VAv+lhozVTcIa%xmYO@)$ z7TEQ_|Lwljzu!OiRzGa59esA~@1Af2jRc|cWfrjxf@Z6ymNAO9p&bqaMz6a1KlXbb z-iQQSYiqoYv{v-C;hC8DBk&MWh}m`qFU zNr%4sx=o#H;=9gGdSJhFj#lc23`4Z5z>1+JMjxN3XMNV6IOvsz4)uF3_Ro6J@@Vq# zboioE6?5lKo{qi0i6l_@U$mo%W7oyB*Vqx0KuiKL3B17)hymMga0SF3ACthFO#;_k z@nII$V>W;GLAK_$U%4j|`n!by-HyBPs{oEWtT2_uF&={hP?79tM%hF`Zg1FQbsL_x zIOm*1;lgt^AyFQ4{Bdmpi&)_^^4j{&>K9(uVjsTh43^#_PUkTa?FM{Hw1r2VWyA?% zH7##pc`@R-JPLRXNC$T3TgTrubJvG0yg5hgY>= z=SBbnDVx4_2XXoHt=|B+j0d=4>pI@5Bk-~&0w$&cBqsshlL1HoJrK8p7E+c@JN@Lp zD(7F`@dygLlzXXL0-6Co00iXmCQm_uGl?jdqogM=G7$iD7@#;2WlJ7B%PPnlN{YMz zxmZHvt7t?}!(G|tJns%b0rE<_(S06NP1X%0ru%fiuGb$L7uN|C8i5nH#v1qTxj4XtQ;xG(%sR3LFq;6MC}f_$rwZXOQ9Uu835ma&$~NwEwgYM zvPkxA_VS1nC$K#05-8iMMd|jL%l^{_@Gwk=(nQ|FZE(M~cXZl}>3~3+H_~tMP^TZa zHaA#0Ku-iO?I8t0Q~#wMq~V#k?1<%n@ky47VFm$Ew+c>4d69z@)^w;&$_ldp&eLd1 z0H#xm0HWaO=Xd$Aw{`T|k!OECknKCyzEX&BuQ$uyIS73BnMh^WylP(wR=5RoQdJ4GRMROE*fzzS5 zCIBWxbD};YY(c#&t^G-!O00sz=~HjOQT-THB;*6w6F_zd?`Pz}_n_=?7^GTgzZkAK z0yzbcpL(ZaKLi0K{hL+fr&+?NfN%;YfJozw#t38<91}n!ZJ!_f|E>)it+!@_g)_O1 zaj#%jmQA1iZd-ll%>)uY&hoRN?7)~LZ|!hqsr8K{TN=iv@My19~jcny*8}LbM zJ`gZ8W{+)1vE6ke_Nh-Sx54(U_VAw{wBN4YXag8qhnZ{?OtQG7$SIF{33@9UBymT# zws(-fFwh9;{ z-20tf0ix3=pFaI%DSq{fAIFH@X+QqSwa~$IF_y;JomZWT zyfn{CZqyCqv34UwEyMEWpCgelhK@6ybOqEp4z5wl0>O})(?%e(M8=7fR47LXl&7&r z^a$e0jxctoW)VCRP(1*`;QjuOZ$LehW%u6ykOy%TH+?+gopw2CJ{utLW?EX`D{8h=zL2`I^7A`z47wOHJ3#rjSiJWFfRmRrbdR;e=mX%Y4r|g0`S} zQhz>IG1qCnk+<%*uJ{z?hhhR5P8wJB0F9S=j@p5qr8cF2!2$ub14H9P@Tb15K)|}6 zu|B!bZoKJcuiI1%vEuy}usHYZ${vBGgmkI2?pz#yYVvHa3 zf5r-V=4)Q|U1DKRU79A{pa2P>3!m&i8m$V*GL2|l8LW=-hT@cuQ>3)mrlh#yg4Nh1G(Rv z^%BcfOakvT3B-WycUq0d${Ulw-;luSUwnymDJxCDy7+J^3+r}E8tJz#f=*4XT!6<9 z+neRh%TiQ}UU7*&xuqYj-zICQ+hqxW#6{&(?cn97*cjen!vMP7-QDi-D&?mSj9o`Mhd=*rsF! zis(x@Vyzv{osYX~0qb!o2m({I0M~zg*Pj$mft};_;BDW;0}QVuo+;0@fF47hufvwf z7IbmH3Sez)*bVQoKr8rF;ag2gCMrBii@cBrx?8v(phrpu=O*^`IzUm9myKebXgvfR z>KvcqilGAl0G_9udfZFd-uJ^J1hkY41i%RJ5Jcph*(wQMaROqEU}|ykDhEI=*GVrM zLzzm(i#Va+Fytr?Z4SzbNCO3i4+7!?bsTRYDgA0H$affpN}hpIo+t><3(o7S6fa&$ zLsS*r*4(fcP_>#eQ&!i4Lbm+y6D&2ekbs(I^Ogm`_V2FwxZfj9PoS!RW8H(Fq_jKQ z9v*ji0Y>qDA4Z{;B9>E936Px6)9?`N@1PB!D8gl#2)LYp1xNHNP9K7NLIBNicsi+i zl@GNYdESF#5J(KqKN$j44CFEZSMdlQiD3X1d2b2;_TP+ia44((g)Eg$Jw3Dn-2iIuX$!O@ACA6Cm1l_`GdK(sJPJP%$d#oDj^4Bh3Z`Zooe4Z8gi6}V!p5VV5x1aBL2Z1JN&S7_@3qMN`@hMcly2R3j!xd zXOh}Lv_R^As-qF;azG3j3mCb&w^`!QUYmRBzr60X(YWnd#zpd_K1Q2z3Lx51N^0sv zUdF8IncHZC8e7%&0cQb{$+m;FURDj16aKr;g8iPwXSq}uiH_3z!qjvGKJ=ta^2 zuKMu~*4&XnU_;f1=7%wU8wT7;B}n0)9(j!P{`S$uJS=0Dc9>A}K4!(y(bW!oM+-?z z?u-!tE2Faj*Mjo0mnHtSuYQX@OaGPtX)jxzyWjeviMDQYHM@HNL={YTIL_|4`WVI# zc4hF4pq&9MGtcGEI|n(j0|`%Y8E2vZZvyEA9FH)Djf$!SnTuDNoB;@DQs3#m8^*nM zWh_JvW6TYKah!DrITSednG4=;L$qUg+9xrlh~i3|!O0lkRmYNPW@b78rQ_|Gf_NJa z;a$(pv#i2G+gjU<0lSso!qzrcZnFXSQ_151`4iW92Wu7?j!6)Jy9aO9Ods!R>f0;= zs2|1XqCO<=e2spZ7q#<6fhM(k0Ev3*T)b+f-Tl}GyZ-j4X(LJW&t%^{RG6PC1%Im_Sk+GXZ-Ou23+hG^zOA87PkJ_iPx z;7kDast4#hb11ZL4hGZjXcjIr$x7W&{n3M9dPksLQ0Hn3au^V|xC~U{VAqZW_tfUI z`i8F?nLh%*Vn1dJ}_|el*JJS6==K5#9 z)$dgfJ#A&2&srnuyV|f$Y248`JbAnv=q67}`mY=@VEeBKbQ&#R9wopz^a^u;m;h!gHBuXgY|XDP6gP`%aCUb7jdEFApkb9a48@! zo~pQ4`2t@`C_f3z*0lnSr8wyH1Aq9Y?gj`TP@5}r{nkm~sbK)pDBhL|aHNHQq^sSs z(v!$19x-_ zY5?FH;C~!h39xi^Uck3XP1#8lxyVp}QlobP4^om)#K+)0MePazZ?Y!&jR1B^SmJ5z z>}+;#?cSa?fKxmg;m}OPGx(U}KkRZ;eikfW?1lQxH6KOUf=&QsJv5O=$z8hwcrrDm zy5?2j2m#2(0PBGtTwxmoNHV-FLwL<=cYy<6+@)uxWE3lB{84`HmokcgcvtFi76Am2 z_qwQG1iFZ(K&oPQ!#2c1`+%B6+@L~^<&l^oARVPMIyzuWjtq=CPF=t~!GFE0l?2}f zR3yrqU(JaUcz@)R^hjNWBzXhj5Ou#U@Za^bzN3&TXPh3zi;K!3HC@IDzIQh7 zw9jAtTl!}_p4YU!WW3lZmjLu*l9JK{=r$PHt3V&@L+_cae9s4nI^UgUo-TWB` z`wA9aK;NS0(APCqILL>rynZ!~IJm3&Q*Bevyy~Ms3gH3{5J+s)#}M_H0F9172!R;_ zK?LqOz(s<=_yoA&00-ys7!Ud!plMIL#izlS&lo<~U2h4EPurm%`cI|&x*j!~pN7_o zK1!PvPqxoJ6gVjbFGV`Rzx?Y}cJXO5DSH5vc>5)X0{PJLPjYP^Ks4%3CL4@F0Anpm z#C6&QlZY`R6@Xi9KxGiWd}3;r1Ia$Wkl%sle!y5xz9@rcAbG4hFFA5&G;#y1b{O@u zJ1W#YfHAMh9^bIpZr#>Mz}x9|!OB@y3@7~%+}r)o{dD0aziDfgL{IcP*RBTANrUP@~ZwPV76g2$9}W= z5$5z%%f(PFKv@(ZGBQY3Nusr3P2gPm7(UK8X@$)`aGq`4xZR$3`Z*|RB34pV!t;mW z=#Dsl_b~JSl=666LI7YH57w`Xr>zkG1fFlzWpQDF<>n>A2R@H=TABU!w>NuxOJ@G; zmuI?UN;^+u-0tb_WgO1&v`Yz!dEFfkzO4TD_m{f$t9dr_G8L{Mxjqje&!FE1hEw$+ z%~>*l&t7)!{?5W1l3wi&0o%c&eFus6!O1yWQlQ9OpldW{fLzmtFj6pQ>b4#X@_Sp^ z-NDX^ltf|xq!hCY3PTNJns!3SDPV-%1=>|0a9h0fYNwhnG%ji^2-XOC5@#5H0?zU= zJMxSRZT&-cS=qFCEWd)aLcqrz%n{ldp!ObkZcofBx{SleIiBJ3iZZ&-;o~m#tl?K< zw%Vr7`EEGMufx|Y6Yu)n$Hj?sBnQbOa2kP%B<8X0GJ7k&%!IB)b-C*kn!961Oad_p z#3bR-29xEM{18lo#a!> z4r-<=A<{EcX;O&fO*PU0zbqcaV<;(bpV&44`Ehu!$8k540_7a2y2svxsd&Uic@|>hoIo2Mg<>XeDFLtYCYA@0Km}Enw#FlP@*L2IND0(7 zT>;Xm>4ivQ0~VibLjlvK4XDcj^XD>EAzrLubiK#KmYC-wo_M7rv&yeQXBJ!z#)otkgiaL6VD zRE6nas$cc}=hto`pxbooZK|~sF8-dE=$-Pi`PR=_Y6V>S<%<{_0$@8zJ$TzwqfiT{ z59yr`h*kCOZ{C-5iF>cocGNGtcJ+aag1$z*ayaOqy64Xi-h8Ko01x#^{^*!Cs`kpg z0zZb>IncTJ7CY|a-wp109bb2Ec*1%JuqS|3<<${^4k8KqiLG0A+M`cBZy!CeA}B9? zT4hn-!vGZv3gU0YyCUN#1o;a0|O`a5%rj0p0w?Pr%n0!FL!S*OvTDuRRwB0pzHVBC9 z!FrkN<=H$tz1_>Ntu#Z78w~H3d(a0Rj1J)m9Xvaj2V}&d;W17EZc{E9`?RYgfmjFf zGF66>0AW)wwkUY9Kv?a75l6q;=p@k9x8{3Z*W&9P%dg&0u;)mnwU>76P*162Z3J!Z zp5O;2W%3+&;p#KI_mMbzd`lL_!2vt{q`9_x<5Tv9Z{KFsaD3E1F7R;v73r{9V@7*~UD@Jcg_WkrEkIU=&;;_Ox(#c=z?`ZbEiz=! zTza-w;amM>@Aen}XA#scIXemNs!!<9cdbwQ2pFB3sNItC!d83)erJv0bbhqG2%hc< z@x(;ow2a^~ez;dR_xoCmG-T*;1E>Z?9XwF0y~&+B@|<6K{8?7B_AyJ&DkUwASz2$& zKo|PgtV}hrekw6p$SlaJ1(3a`AoAXsPe+&8_nCPtWk= zd^}eA{Npn}`R~&XS{S4sdwt7Fpz^1|+C&A2f!uG|YK&z(CV@A-1Y*GUo4$%-8H`Eb z^_9StUp&>hOhto`otuwRmX6{`P^;!mRx)$B11s`O(&9EAtxp~s@)*_M^V9>4u#PJd}$?&GA&NW?8GrVL3`aZwh zdK;QNRg3|9&l6sn7q&{=;!pp?_3C}qoD|r`-A64MJJnGb5L3IIb zODGF@!sBp^`{hODU=`;CXb6BC=#N+$Ue4VuyJ-eV@Kgc}xpGb)(6)O#7e_JPNKk26 zWdLHNO^J)AAFZr&pwb0GbUFG*&eH4wh=!mfz+<2ErO^7p#kywG=56-z&wb6txsFb=D5Iyo`0jc&{q2@faT;c<@Q_1Ndmo=2M$^#W~~ zataL5?h1{+ZdCOVn%@{}a&rj8I@aOxoTSLmz2a_8nQ^=&K+`Zd2(Uc_hcciLsYxs@ zcQ@1^XHmq|Z+{35{2{Acasl4e7(eL)x?YB|cszc!!z2iT%iYLWsW?c(XH}=lp`8(N z)b$X%LLxg>GwxCbfIT-*WKXq$=B>G{BH-R~iliLeb3-uot9owginqtBrZLv_+c{@eT3pjp zcJUW~%{;|iS+m=U@dQ`D-LP&Qb3Hp{pfnMnt$>aNxtZ3{(~7tF6pK{V*kvC$&5Fwl za2yzBPKTC*ewog-Y70Xczh#UWl`)aARfbEA^8>7<2JwQH$Gg`BZAN2DFjkD)Kx823 z9o#GaWf`?JW=MkCMV4JqVyArRXF3r(_Ok?Pwr%wGuW`x;8Pb>NC4U)YCi{l^xz-@s z!54*&WaG9cERUUD+6|(0X?A`ghK+tG&$`*|29*pu4YcmjK`VG^_n7ZqBQ1@4YWF^} z(OL8yA1Bp*sB`V=nZI-Q)-e6ncY?@M+!t=l zU6etwL}c*u?o8X%{c;cp)=EMDlOCWQfa<3%0py`Ig8Grm!rQV-#`EUQlT7K~v;_yL zn6cw+A^``vg9^4?AD3f9?1)JqCV`j)_KyT&!1n%;V=R@J1l~3h_|B!L17fj|&Sp^! z=$n;WVVMQx)>FR@MQ||z^cwK?>_7=bIZMj6rnY(j;!FY%rCAU#B$Nxnumc3?j zt-9|@yvoFDjK?$H$mr>4t@34Jferwv#rHT$x4dytIehW0%!Da(IOkv#Yh8olATSff z-o5|GbR65Wb&tG;rF=j5+pm+X$Ty&+0Nj+PlD3tKU%3w;cMO2KtFzsjYFezWy4#Y; z15m1k0kv12upGW}0%zg1CScvgf+8y=Ns5Pa!=e}r64*~btoTMt*@iEg7;RIvlH+uj zb7{6V)>uRRPQFVQkzy0F!&jXSc$$Vn-f21c=r#m69KpN%=TC39p@K161)WT5b2}ge zUV~i`d-hNF+jNlB6VJWKbrT6i3Js6oUWtLvC{FE9t%lm54S*T_8>NSW6R;6Za%7gx zHuLZ(AU0llz2s(qy!YW9oPvZ}$5N~TpuW3(4*(Y4a+EDOE!~bj;RER607`gWN8sRI z_3kq$h1u_}{un7piNg>ep^27&vI*RhC#C>EDR**IZfSyNnmi`qIj#JJ@zj>CnUYz; zeKMp_&k1RO1Rd;3$QIs0+EgIvJMbmJ@fUIm7Ni;;w=*ujhLeBa@#G!9<+)P!WE3GC z%E-4shI%IQ8z4woI@{M1@j~}yzyC~IBtP%_;HRw(Nd%JjvCTsUI>Y1vZ{GfqF}DNdjb2E(K8&<-oW+^!Ywced|c3 zuN4);yX&1T&I%U1{vfJnEY+X}N9M+hKcJT5V2BzEur(Q!w=%Mre#L zmwoeFe>I0Y?*ERZ#C3YxRG(2W#({xu8&4{;+_`4}AQMCn0DU+DXY#~0{-6S6cqMS7 zf^*WYGt#mN+S+3IQ>J0mkq4x}RSZ9XwFyvkfqeeA=S%*Qv-%QsL}m7LHd$s)v4$7x zt9#PI{dEBNeBrmg{KLHO4|eXgfvU&Yk;8mIel+H^boSZ5|NL>QZ|)|@A$^jaK5yjl z(C@!R|CaGE7`rr=Nw%d4tr!6Dv<3JI5l}gQ-f6T+#)PDF0Bk^Q#uW`g8UZvG3sjTw zRi3#Hyiu1!aJ{E7&MUrz0NL*LP9K}&G3=$Wdu9@3n*`?~&t4g{0}Vh*S%bAc(f;*O zqme*e4RlVY#rHuBY}?m7ge@3a6sDv{o*l6AgATJP#k1}D>sQ-VSDfNTz4i_az+Gcj zG-Zxe*EZOc(tKM`SwITJiNFA@P8DKM_4N5H?`R#O+LCxe#5jmU7XNbPz4WDn2uPW4 zk3RY+!FfAqbL{$IuIldY@-euNT?*My-PE-Y+i8o-?6`yGV9-pnJyq2k%bEL-Z+}05s@Vn5M}CK)V{s!r zf>C`XGpA?=LmE426lYL;@m0m%$oZqKm=~RHl(&PD_6$%-G+9Q}v>(>xyCF6Cz?&|IWBvgg|epraL^JS z)U}Pk-yrNxcBLEP{#Y)fC1y|j>4*N1uj@FlO<|b#Zg333W!&3bOQdp_C#6cpPyscx zl5&!ovD7q0Ml$u#KL=F=)N(!5N1p_Y^0OP-QMhxH6p@Q@RF<0uuP1dXJ%%d63Ci;J z=7w58>P>DmjR0V$0IVH#?77qwa~JB2@#Df)-`s4!zvXE=VBQ?-sw%Uk+h4Ug`U^jx7DV4lDC$ueRRU=#tFd~cZTQQrR#`RCvc^@>W{%b|RwFExV$^6MSW4U_b%#_eFbf3IGn!o_=(v6Y!tn++dP%CgG-~Ww_jqy69Kz z0NB3z^Rx#Lo4+)Sqaks<(q3Y=kp)ZfxpWW_MEwAVy4q{_O%c~Yp9&4n%q?>x%Z)Vv z8Aa$x4?IGUhg=L|T#}{cQ61F}1D&gv?sZOZ&`$dL z+M?CSi_uD*6-6(;-%;i{Mw;tGXIq2SZ(VB#9{Ew^0B`@DS6XLtzs)7F=J=U=+at>t z+qW*hbVuu&Yd6z=#xlIjo={M8r*w_UL%Ql67e9IjND<$mcMj5=qyFGe@4mb{m=%J3 z9Hse!p7FU>$7!yLzxz^kj*0K<>1ZP2LeckT8=~p&ke7++6kD6V^IkW_pIPngGwo~S@%7) zNlwyaIV8COrsRxgm*#kVm$OzJ;}ISUqV~YU3+#xg_3oI|YMksD=UPT#C0)jx&OLQx z6v`$})eNdl(V7mjvC{!G&e43J)ms_asaA)Rd?=}h2>nL#Q1sg2`<)K$QHA$Sz!$D) zcgErpIJN=I1<-ol44?0q>oxboH=jBA1n#5q)XKRgS@RL9lu?{@A3ots%PbpbwQDxo z?ggx(qCIUm4SspUuPu~@la%x2s8hXSv_UIv^72ao>Lb(x3NDrB{iivVg1KR(z{ft= zv6g#G5A`E@oc8_nx~Hv~HWaX$YW>C<*WV~BV5BRVM1`xWo#z?&DrrF|L%= zXJK-0)cGnIMb>fy5T>YZqt8f?t<`AInE2d5Jp?XlLeDQNwa@&F1lw=e)<1JE-y~l} z+Ul8w{%1}KR0RF-zNG&MOsMI~u_KQ^=nZElKE@@mdG#`67C2xuj^g^S=MaCkL#EPD z^@%&^1pq5RKw}P1yzZ32+^rb}Kv?%ylt@l{y~nqpUxM%2tsn)JuT87XbhaPwKl*N) zve(?;L*g|dft^4CQ-2@aDK=i{;T$_bjS^KzNFX7BT}=WBV0%~Vu|$0n5_pd#aKja! zATnvlw!shC+1h36YwN5M14>DfQZUGJ;ewod;CujWKu995oC{A6(hjZ=O)@~l8Hp7q zmSzx@+~-`HgV0Z`uI{m`Zh4H_C)w?{-DKOUS6g#)H4ooJeqjh7anxD-gwf>&wt&SX zJMggg&RA+#bR%__fxsib>{twiVXOm83I`s*H;90S*HC+~>11TYhA{6k3kVD@vcXU> zpskh&%VY>5x&wnku#(%mdbMZox&+%%0wP(IKpKyXCBlu!JSAl8@982+xRqp*7{!zo z$;h$1@~MJfi82K2#X!N*Rt1R`#~|ifY0z>wXvj5|#B2q065NG+LPgq)Bs#Q#yNOH~`kKU`@ycK6i)FavDR1sho1`N+6fn07LOUB_n zj}Y-IPS$g-yvwg8U|W5cj$EWSO8hAYLwC~_^sdkHDyH)@l0^P8>iS7YIT&rCk~$4O z*Q^SnqVwq=+JFNC*af+w$@*O&4(}99+lVC6A^Za$apz77pvTEyyvpnGc3!T#{5w`P zzMKg8O4?0CBPRo&_`Av%SfVI8)ma?F`e9>rt>uzfuBb52KKrGMtRGowdAhhYl?<=A zFxP(dv&+y&!I=Q#y#qvY5ygwHg#y&dfgph*GL)fkw~pEu!9CEO;)y|2J@OX4fFmKU z(|i;nq7tb~z*|mb_#BK90AIipIi=E|EsAjMVSz0=L*q)A75~5~t+cX?NG`?**Yi~n zl&>h-lD*GSzYoUT^b{`H1zA5ccvP zebT zuUZl-emd$m1GrKT9Go7}hX5iO+19Z73ER@pY3sN2I`?+q1sJwVe{%KTt>F55FNMAd z5R$n<<5-;d=@kc9!Q}Z&htT7opF>ayDQdW%F{ng_a?onL456c%cXILyot6rpnS*-* zcEbRJl2K}^qO~PT$rD}XUH1~-^qD{JCxKVY9erJ_ZXz0a1OR=o<{{)Q1D+KQrH6X` zhH&bIiFA~1q>18$G1ju-aT{$XL1TZ5pe@_$BY(fGe^&tiVCOG?4S>t_ohkrF-beOE zf0)#04s{WrfPQN(WP(p7cR_O5J`PT+!D_DR4W!FEy2tD01;22<|J2g`EPv9xKo^;h z9R%i{8qBFKtl%7-#F>8oqCCZOYaSY5?hr4q63i*@f^fZbl1A#9L(F#peL^tbM3?-W zTO!X5y65;DrASYnMWA~L;fPHunn4+iGx}3yJIxPQ6_59pni^a1)MNI-Q_orix)Y#b ztIA8Z3x9Se=KvNm_j)@iTT!MV9L=JL(EY;eKzRWT84=v*Pncy%$zkSV9AACgU*>ip zz%j->s2)dQrqAV?3*{si)lMqTf9VRfVlL!vPR@?>tQ2eE-rHE+XxBc!(z447d|v@BMXxo~*?4F12c1O1W(Ds%- z+p>YxSRC7r17#2B#8P;k7=mkinCG-n0uRK+UQ$+W|8=vT&0gO&JpTYXKrb@@q}r{3 z^rEOr&YH~g!WNPYD`!Ozg&RxCwRuM$@%qV$Pdg+~yJjWhN?$d8UY{q>5>z*M#_-== zm-P&z=xel%`?212vQF2zaYwbB#oB`Thj%_xy1)Cf0sl?=q>Jo#h4wQzm`)wrx7*$c zU|Uj2Y&(|(;sCd5l>oVSuD(gsCLw{HT>=SUduQ*VMEw#Hc%LP3J7A$Auwxj3Esa~O zwZ6?CnU!QHGbh7J$B)Or&%zK5_!)WJ=0RBx;4*dv3d4^{lp0o`vBzu}o{xr(Bi zf@S>ufDVP(NhE-hVF92BErSAQu(tIdBSfGd%h9nS+}M*D0oQcA6^SC_4{Tkg0|)ng+{8<4nw#HKAc9mGjIzP!|ac{N}j5#AmJw!@eePy=8D zfFa;cON?azN5q*A70C!*fZQp&Qznw5$ufGKzuDW5OpvuU>7sOA0KaTx)D}R5h%g37 z8afLr3#9@GNw?5znb>l4X^X5!yR#3v*>t*MQC0bGqTmv03vDvBN%o z!H)JdZmP!N0O<}+Ek&RPpo4mw@$DU;_uz0%snT2h#W#W1Zv?kB)~KM5iQxSBk*~jN ze5+4A>X^?eEzl$XfiFqcTDQpxDyryboDJhD_~HHC*1htAr4dEF=Cx;hyv>0k2||6EzJ2kJ{%%Fr-F+!@X*aThuU3((sRdRv`)udw zPa!GW5Y!g}DM32@T|C2b$SQh!5YP{Gg=1OE^Vv^Mg0Lcxab^_eS2!(HPBziN>KkO| z(bo(L=c8q>zH7xoyg&3lNUnhh^RXNdeOq6!q~2|u&N#0+!TZzaZR)B!QA@*iD=r(S zn~dKZQ=tp0ed!jPdD;*EZd>2(&fi@3YnH??E0X6T4H=)Y54j|m!xfpUIZ)BLu6fiu zDJ9#I%V!}c0Da97s-ecZk9Vqw?yZ03YMc_qRy-32jsRfh2~P&iGlV<6nIm-k4tdZn z-#@14-VYq3`NC;KBp1n0H&E3C8OI!K);@g)P+t0L{0xC3PTyk!wTZm$A5OP|nP=i0 z1+-;u8;-#(E$6$Jq{3*T`khUWE7i;HwSndZmR~W$#?L;V+d?wA63S=;(f9)P`EzCP zA*lXF7gdJt6)KGHeb7P4X(5klOZAI38f~WRn3Wd7FW%k4aiU?MO&qRh^JE;3D$}VO zC?hVHuGSW-WgPr*Q3r`5_qNICpH>IumFHSR(<+OO~*90n5jmxyPE7sc2Z+w&_rg?V# zJ&#kb*SD{J@oQ95p!SG6E!FT7k2%c#{KUhy*Yqhid*TEm_?vCSN}#72zwdFcpZqRA zJ@mwL62CeNl|ol}{!pS)(OgU|B(X9M5B0$y5%{9389)1ocX`G4?({8d*HC!;zWHXL z*}-k}UV5cXx}c|(U=>bq@6|BzUv)#E5A+|yd)&Bvr1s<^}&3t`fa=*7!x~dT@&?6NFX7Bf4u|}!1lks4-z#< zNMNUuz&*eIG9YxXMFB?#1-Qc97$Q0}BZsA~L}n^7DoVn~BtnpqNbogElEQEX(QS%c za?@N-*Lu`egtIJBs2ecCDhdIGFg}L>2)2$)wax3{9#n)e58ub02qgye4#876Kn>?& z#@b;=9{!%m=E+;XLtA7di1QQU3B$CHC{jfzw|4ZyZQ5-Otv!|vpYSzTo`<0&*a7eX zgP^Z%$m%vU0KgJe1`lflLovIkhSSesU00dW*6dlb-}`Y7R$3>r7QWeM?L z{WfvGv)!psS6glMjT-=GXN3 zEnc{U=&*L{>u4wPP0(|x?R)Ti8&?2WSX5xOtpMO1M4$7owXx0`w^s*TxHMhdkyTUo zwms*}w-9ZUp*{>1#2V-wzV`j^xnBJFCji?392xL-3-C`$k@Eyj(G*2+D|#D)UJ*(> z4~x4|kg><$6QM1b2BA?7{ozr_G8zHueV-y4DNptwAjjwk(e+$EhOD$-zybUbcJhC` zeU9z>fAM|W^y0I&*W}3n)TwakwpnorywmWgr{)#_$c2$z3hhX?`mB5hb3|!#)h}#pbYSHbL$`tdvK%Ywh$fLue?(9mWxr&n2He}*-8 z(N6eJ)BA{|1tb%QRtq)2;#DYcpr}K`(JNkhtMt1 z1JzqLx+h0fVMW>hI`qBXv3bp6=24Q&0wQ+-6qZ)tY>@!1KvKV+P&qY4jvTEf5M@XN zI-Y6Q)(IwerVxDE%PTCh*!M^0IHhH|WArgDQV&~beZoU!UA zCSC?hY}IdER|SNtA4DS)Wn#*m$_k~leDsKWbWaEO3s#KrMRR(5MUinYPQfvs%=3mP zXoh<|gL`Ry2@V|r==_T@ro$NpfalafgHi4FO6kdDY~zDuHOQ(e<>-*3bfB}@2DUv< zeYwk|^x8mIt6x`b(l`|ut2!%!T)?(Fni+R}65pk5kP#I$Zlzp1$)ITz}N2&E1b*6I((8yM_c3Aos4(Pl;M4B=E14 zKmyqQS4ux|?Suq&LJ8b?^%r>ZCHp!5w6u3EuXY;8mB2s^1mC6~$1G!9$r#QoeCT*|xML-?naAM(a}mK@05QW6$&lbx|1j z@^Osn5rD5Fjyd!_lh6#JjvUB8WI-qZ06+jqL_t)fJz>CRpc7FuTUdPA5tf@{Ri!`{ zF0fuSESG%kWScR)0&s7{Ui#Zo0O7DrEvp2SEW}`-z5u7m;`GGwNF#}#0I?AO#(Q75 z570!QzaqA=Dggb$lq~~V34?UJz<*3P+KxfqQRj3dTKYa1?WT?xMgF_wNyIC5UI@y+Qj`S@KFFn0HT}aO*_$gySl8dcD;3y6pMph&b!T+XA^eY6TmnyvWNTI zESG5P+}uKy#Pl;I+1LO1eW~7D~W88g9HIQ?ef$2 zB#iwO0AY%z3>o_}0%^bWmt791PTJd$X2qVfT2dPTF6 z1aJfAus}V5X*HX+z+sLq5?vbWB}pFjQ+tOTSf;KBP)p25cozsh=Po9Wc^0Wob+9I#+#^pPOL? z@a&H=rbgYdMLnS5p+@k`lBCe*2+qSeU(q(nfh{G=YOLR69gUl9zr)USHk7~pR^Q|9Hs{ZI9im;Id7De||gTK~y+X$NLtBT{Y_GGz zaba!nw#tg-~T2KnqE5Wx zqZPpz(%ABZV93~kY&ymFIVcvKBQRQhrE;Xh&SN}Qnj-qN1$0Rqqw_V#(r=nK;^S06 zs%SRU*ZCEte$M;(vb}nZ<+0jj5DsJMlj@;-11=j{r3U)AmOS_u6Tl6amq9Lpzqc*9 z{USg)oPxc57Al%W>_ zx=C`%v5KSz;Bm^-x$ZaYqg@uB zc3N=#u{qrD5A2Eei}azrp^l`AxmGl8q96NiUw-~;oN)kzavW+J``Er2h(;hH0??eO z`Hua%cc+B2EnK`ehijf>b48U7s%F8<`lZiW>0}~5wW0$Vd(vd)1BB>h`LW130Mm3* zu(^DYmA9E|`F!j@1;+;a16O4H9L*Un>$Sh5{?tj+2xasV5g*Q-W_eSObzKpaRbOAH zJ3{0{^0|=XbZ#1Ljbs&)AUMNP0pXQA*~bENo!1o^42vcVz%)P+(KzJcI^%Plw>+pR zKO}Q}KP#)0aCmr#i2p55GavK(M55CHoLdZFWW%4Un#$rFjXjmXxhh;G>N!aOfAjY3 zR=0AY)%NsSPGPoX7p7U|?&sToe(6d(`>ccQ6QpS|#lHmx!n zy0G2t*%wK?EWMy0-CLwPYKP{VkDmlx3iY0U;>UNK`))rhdgx}#7ZsTtT6&(OuhAiW zPKyAl6S?jBgqJpyGn18NI3;${ZS(3?K_93eP}um&4pmoeh>wfF5r%*@Ry0TZOy^qS z`#4b33(t(`lAdw+Joc-X;lo#ZIXC$A4M+Guk*e_v<@mYu1@LIMd1yf+d^0Nd}4Toa`xB(Td$;Et;p9`IzVjxtVFS&UA;D>^zH3X<{($V-8LGy+ku z4Bc;j?pVw1iP|ksJ#JM8pJKfj!0T5mhFi1KzV-PNYzn-(fB5~qwm;m3g}FJFMFPGB zYZqcn0G`TVp{+`O=A4}}k{wjVKvcWqh7X`fTm3p@Kt0kUE(?L=q11emTfyxrY6*Bc zhoE){ag2=1FclZys8bGL{m9T%63*-?$HT8(XWO7l81yJQ^_JNoM}Nd4G=+N!aEbM{ zSktzJ0G=4cbXr4mBj-nK*~abGF_h)GElitDlH%l7;mR#B5?^Ims>~wsb?2y}V*0U{YYfO`Sg5 zX77I-v>9Wd5$%-hxR9Ojp<`6?F1h{T8>d@pQ`D;K+W4!ty=F~kY?$pfb1E{0`xkv1 z=n?lX;0WL|urSmFL~A0y0iv(N0O36(J8No&FC3sfLS($6wKL!%R|L4|Mg{?t2LND2 zmodzk6+oS-WRQTM-9&Xa!=2yW(rw*+1NMKe_%FsCeI~sENKhnc3Voz;Pgjy84z6N) zvY-%ddHQjfNO0}fz7YYD^t&SCQ{jrOD$BPNBKt=N;D)EKL~jrUJyZLfzZ;-My!-;F zw9J2T>lB(CjQMsF$2Hb%w8H^N2BI;0;imubRRQz%+QVkl}>_0Tn(@G&lHU8GP5Pz6*$OAWR^nK&m%xlo9{kfe?-t=XwfGut-+!*pa%B zdde^8`}~ux#eQ8|oBJB5jrtWz!Gc@#`BuEdSHR0AStdW||f6bsl*Upwyrb9aPeNhU>WVi+9OTR}_?_)hoVw zf%!|{LyRYlyB_$s({U;>3H#hChmq(nl)z9ji{aGt`J3-Pm(#KS&be~>sX+sMP@`~f zVTZ-qw_3P+jTOxMv|ry7*Xkg@X{ujzitbCnC(&u-mA*NW{`&8pYK@JZ*51rGX1>t< zC6!fE5t2isFpe|xi7FpuJTqU&$+ULOW(o`2hA)uIfmgI6!sm?{2p=9RI3)f8A@A)V#V{cc;lyxk^cWmqQ_=p~7_=IC4R_=QE4n?cio zz;ODit*h6{%gS(oXmtVeidM;RAN4-uT=j_@({ip4chur=gsO!C zDX&-jW%Da=!MoU#thTP%HgmsvY4JwNlmkFc_K8@0T=(!h z7U~awdbJG#CiDY&wq6ghzq*`Awxn5`@$HDUVWM_kOOR) z`tgEMBqO%ufJ`f&HrZGVw2Z6bUmQWA&5-AUNul8xJcydyvW#tx+$sBoN zt;$@ZK>l-#qPjta+_9i?>4MViQX))AWJT0=l<4s!!0Hh7Dl9CeesEO6T^XidhlYFY zPfsnds;W}>ut^d%m|{(xJ+@*i)Br;PFdd^z^`|0zzx$avfd66Z?CJ(QKrgtKfC*hw zdLsi;?US)4!@IP60-U&+4#I{=R3|{Z11Co}QQ?Zr4`Ga_A|pkEEw65~=AL1jxz8cC z=dArO(g9r23l*+Jh%(Af_z;nKyXf|XgLY%Z!=P9S9M_-H`vZfitedf;g9M_;=v z)AH&6iW!qF1V4QgfI9}gg_iB7Gv+$Hf8@+yL?P=TK((-BjXVUh4FJRn6dMH87H_#C zDCKBK0jTMY4qN8{zcV0GiQZ0P-+zDmG|me^BnOfxN%9`xTt)sB0%!-xAOS^$U?6|# zENX!M;dG#nvlt|0B_zEA{2F2WIJFK3cyu4*OXmnU3#ehV3IhG)_m}G@sRW-BI$IiT z+qU&~*fHlY&y3i&zxG8tc6ttp)e3Cf#7aB;YZt%oJ$}#uM_MT>Og?w+;Xa zS?ap|kizPAf0odMe}PE_^bT5O!C4OA zY5wtfhPlFlR}K{rmc~4((XP*o1n=Lt6UoOu{eg(-izI;GHjnIgnd?}e$AbQ45(&UO z?Z@$;GLq35U(w(|5x~#xd+_V~yFK&R6Sg1Di=c5q3*&hMR1?jmJy4XSCigf^kLa-E z*vlV3@9$Rhh2MVJ3d%?TIr&)2uGrI$2}g1OI2lJi-v-yv59%+?DGn_0u6b2X1kGhZ zw5T8DV@q^K0?>k}OMofHKoDukyL3SFE`Rz#-!*25=XPH9UijG4va$db1)g@;$kxYf z($Qa~SD7ollf0Tc`d4)dp$}0gP!yqFJTYiTaFp5azJ0Q7Shvmk`iF?%B`GZR75$Px zysTX2aW31=%A2k}(JH|moyq;0(nTfIje~4+Z6owAY1Y}_do}6 zG$Rawz@E^KABqYx>_0BN(A%mhK|q?Wk-~jQcQihFy8^i;+5ShoK3_{da$fl6gGK{F z{SfT*cRmY@_r6hG#(s~>w}0`^n9sNy(bgyq_ROg#SXRk&3gJGeXkMIpaysZw)ET0u ziUEKR`brh~C%EUZ5=Q+SCL!@~$`DB%vuI-~V;Zh%=w}4{i8@T&>h8esIgh#^V^vkx z=9PcC&*8ve{3PQQ9;BZ~n-*A4M-wZ2+WlF{?}Z$#@$0J+5J042e+%bNr@v^c>xvvD z{uJWQgvai?(-yt3klYyCElebMIh@@iaHD_q`dguwlGBrpl>GRULsK+ysetp^<@!t8 zRj&F*E9OG#Kb$+F^BHPb!g*+>^cBE+U^rsOU+^Q&e0{5Z?O}e`BpR}4fuD2V7V0GiZ{G9uy>jV1#_4-_)6IoF@mgXB&^SVX5VMC9;mTUnaT zGsq~k9<%m2)cV>R?6rl@!EdcZm5Mlv5NXwKPp_%@$CdpDzg~X%kF15|yaGYQ_bj7P z0QKNt56h9W?e0JR*||&Md!U&DnF3=V3}3Mu*MJxT;{pJPS^=t#5CJ!}DwpWHL7OxU z04Rkdd<1HpJ9C1CIUd0YlHxi<31OTJkz8`?#%0)4lquO@NJ~hHoK>9^(W=B{gug?> zu*tHDNX$v(q8lO9cd)+&kQ>?vWEUbzJ516f8Sh&DtbPzE#)YUamZo55fx~*XqaF@e zHHi~@2%x=-sJ0=?1Q^W*Bugd9pbTAs_)!eOFqA77{^lhB|2_v@hJ=35_Oc0+30MU5 zfSx4+sF#uibxJ5^-#U8=#x%UMM3MFIr$nRR*ElXP9%+;08HuD>4pDux4m=L7+HnBk zfcI2xWVjo!0p9g&qU)$zHOsEIZCVaU9<$=SkQL=-+N*1}S@G^C;ZzXNAj%t+2_Pz? zmU?|?7Y4RZI%r=j8*Q~bi`w?OsBLZ~(IpYra6@%9=oU!8`U3K3 zfp)i$1g>p>;95SP!%}+0Lh`b1D+4fVhc+f1ZgW8+AQ%ozw?d~-lwSbw)oU38FDHV%I0}%?iu_+HxaXfS*8V0XlquCL|Nb z7N8llG_1k*Ym2(Y$D4zU}=sh|wzHfm-gG3cBsvqZ3b^?34S{tn%V7nrfBy3x& zi4ttLO)dSl&$KBd8;#hfFZ&JUyuaI3KlqmIH+Ks9hu+baA)I8pPn|?_6bT=)Q_lN7 z?E-}0Ul#K0!hs~POrUILMz$S)@KpQ$RX6OY^h-B>g)xLIg!~~$m!*JD0Y%IS4wgy2 z$V(uiR8JVP`x zGhBM1M?Md5Z&3b*L(cx@->v2Ymz`-diAF4#aXR-UoOw#>d>z2NjE)P8^oU}dORY68 z+RQ_s;9_2serXd(rB4z1G1}9>y%NwFU{=oH$~`~i$HZT5{lS^GdSx|ZSt~RI;Kr<} zO-U-D*dTg1#QeLRRT+vHPvWz#k)0U<2nOh8tWTLb)eb)RAfm2|U9Y7xTH&G1K}+f- zu&utn-d=k76?^&RSFMw|Ln~^OGvm_lecdj-{7U4YIAHFN0MYKGp}8J4O#evhw94ke zhyUj7^1^uuKs;Kic5}S?A^zvb@I6Q*sbc~_Q~SM-c<*2OOMP?DIw@nk70vuGf!AU5 zCC4MQ1wL!s=ono~xd9}HegJ*xyPOk!gT36ah=k9mppML!I8b9yh@__HSRVOAbcfXd zRX_W6DNoNq!^~gtvbg}VV!YEoqeO$JAQw%6gPR|6V?KbBu8#iOVb&-@ZJG+3g zMN=gaalhv;KF&2KQJDU_+!d+b)6r&G1np8G7f+knaJ5W@2InoJq}m?`WcIBUV6d~Iq73N&V9EZsuw+m6I64gWUkVb z#FghqC}w)1DOML6&r+gpJ7i)Xk@5AeJL-oN?r~b}r~VD|4CZ-YkWT<2BMTZoX`f@> zZS~&P`86w+dw)vsuj>+@(MQcGsvB?K-}zB_mPq2;2SoxqKyC)W#`6;(_Xnk46YWSy;Qu9o1hD=8 zl2GFJgarQ0B=84-?O^F<1OpZd3_vpR3-`2bCShS2d5SW5=EmSdE_&Doe&9h`oTB0+ z22^q5OwqNeL<aMBVsSy&iesF@Tg2%L9<=go{=7lN$~Gj*jg0>d|CPkqbNww(%TP%CegDj)4(3V0)@{yC~0}=!9lTc6CwG2f? z4TXtB7s$JDcn+mvh$A$C(&rGVtL?69ukgAl5;iNV6q!YAT4^W8V59*Q$iPX47Ddr! zN-o&Y*lsHpF1MVr40y1Itb3Hc0FY=}yV+*%G1K}1Lb_X9teZ+qJ^E}SqC1Ep-(Wpk z)>`+L&2H4nP{dZXOzN7%lJ!&ofl8uKf3=|1TADh**x?Bu86Y}1)lx}bm;$)0XgwK@ z5t3TP`lEK)7iLj6_R;%&Ecy0j(irXO5hy7XnGGF9N(k;&t|73#tZF7n(29xL&O%0j zoBT^5%4`@zwRYoj+r08w*Ol!JO*U;(g%xI}u_Qaq>iUaqIFtvR2go8#BcM>m;mM~Q z@ABIfU(PsczDq8hRGLE_y)0XAv(~O2%gQUVkAD6Ww&cZUiQ48@fT|XFoCg3tasjTT zSaL?l5RTFZ4q_vVexmV2N8(HQ(S_{n98Z8MpfyTlyq4kjvb^3ChjM;lZm#!jKgmS} zW_|B#XSm`^2RE#*wI?56WDA$BCL%luP&py!d0o~TEUPlajn_Y9;9q)U0cfw=aR=33(Hj@v4F(WW}OLYyaH4YVyY=8dC zD|W}-PZ8mXlZx>nkYSM3Ae~+9h%3qV*{#5Cxcku^^gtcgoNrF2kembAbsxhGUuot z4sfY?N1Fg3ITa*t2UJ~e(NYP_570^@$y@h1&OzVxivvjdT_*!n?MQ0K?*Z_n4jKEM zeH_R|IG4|&wd(0?z@e1yAgkzv#!noluA_K(@wMOCQmz=wv<)uZyp^-1SrUXGMcb zzjQy83>nWCiu(c; zzIW2bH0~L9Klnf(ABqT8he%#3FW9^f#$=%W(p(|Ojy9(Y2@dl*>0@;HuW*dfpU5S9 z+D9y-cmjHk4s+fxE0e~$*#Tc2ulxa`=@EU5R-cHXBt}BuvWiKJD>=Janbg_Ic_Y@| z+Yev$Og~m_)!e2nkcGaZCjz8tbI|9ES>{#cUy#gI?}7Gh7Tx+dG+M0ILMAAVA1fNR zD3nF{(@(LHP_93J^pd=Cgotd7M^}b1IXH(n<%t3`k+Sr>#R?p*CXieX zV$COk;;Cl@a0mZ_eZ10y0ai$b(GAVh(N^l$@G5#q8<1f#&uTM{|0-p?zSTkpqP!@g zDjCJOG&Dq+$Z_S`b=Co;*c?_=l~hcyOp;^{_O;o#y-#@khnxz#M= z-oX~ovjyYLDPlA)_#e9DW0E>+GSIQq{f+PX>{HH|ZoF~4_$nU8jL%m3on!U8bkesm z{SCgvUp3}OZIAsE5!-QzB;G%`5}0}u^8#A};QrwDaiV1j3B)Ck0Jh_jNW3Q`kdS~& z;K84NjE7?xbC^y%8_U27xDDO3cks#~O#m8k9Qw1kp2>BJEGP{47{<6F$UM?h+?)X8 z8Gt7dfbvuT+81kjZSmR$n~rfbd*)oq5D%pyv$z&zId3Dyo%-Q-4s8F=Pp_~xBG}ZH zWB_Iv|B7}DVg-dF;@#|yYc9rMQ$%*iUVP{Y%Py|4vreC9eMBQk{+K+}ntaQM~8f)5A zZRzlLDv~KuG>dCwSVb1}9sAC-)h+4P+@a{&VQYq8y8(XfArk*7627!@lI?TI$-t(7 z>=@!(H>|M*Pd>^r=XAUNsahx{Qd#m(lpqFd3jLJ|Ac9al$)f;tMU@KCyXrsZvYZ_5 zVYo;IhVu$A< z<}Ij=3~`Kj1TR1-iN=-}K$QV+du@Bj3MTA_A)AIFPlPypvd5orq{O#NZb$Ar+saCd zJ)&B$Sq6@SQI@S2sImZp(o^D|7j7(%4bvYN8D3UtKI*Dw} z6yQxii{CjNXMz?T4D=F}h%{4Gt_)<3bpV9#y*MEB02kmEj@3$A18zBlj*NRCts@~FSgi*%)$cPeB zGGWNb)*xnZ=}iewMY|zDRuOaph1OJWv$MYNGkg5;=bUmtR0j&s3(Wao{BbHDn1cjN zXmZg+!@EEM{b`>fS+$~Im}Ix3%rgSjbvCIjEt#aZ2OUq+z<04pKl?g+aX@{~_L?&r z2T(Z=`9CIkkGxB1H#{SMA@=pr<~&}NO3J}6}bWXztFXSdw<zz90uyNC&Hc=btBLT#6 z;%JlHrJpW;eRo9*_Pd@TPn{>Z`o^2!q8I{`T;(8=lg*>y$4=Ja@p5&J=Ch!zpcKCr z`(ikFLpc7Vr#_!&rZQ);Vgu(;i1F#y9pacUxd=EW1^_ECPe51El;ED>b>iiopPXwo zjH9M{oL>ORE4Sbv>w|6!F7Pm*aw>qb=3+UXGH_sAeCZ|JQkcKlR38V=&;xU$}RK)48VrE=!5yhpJru(LBo0e4()y{E2(pk_K(r`UT9J@NiR)=~V{;YR{!_ zbbplGr8o40&hyn1%u$k$ z!?GtF?)4cUr^NuP69la59_{6+{ycc4ldw7o`W#VsrLodRV?V5w8HE;*qP%?cOe?c+8nM4*&m(lff6DgKEf@n4^g1nXqLq)UBDr<1 zb!=Z^uyu{;4s#w?muOtCc+)ebXjU@Pl^-I??W)Tr&OQ1~bzJ?(4Gn^IXoLqYL?=%L(z%;Jg!6dQ_S2Mr=7~D zceefH;{UV`66HBBDj=*ZG?_0^0C({`Z`hP#>#H}~QHSk~!P#j!fU)Do6%l2XVFlSh zV}qkCy&kmoE+`|2ByMPFxBAv@D+*&!0%Emb6Gou)*k|TsTfe^E=Il{q^Y@)dy=f0% zgqF%DB?}x#wh3iKW0SZ`$u`qDXZA^7_eTBVn*eXrOU7~(yX(1UHrThmdoJHGNH9!> zSel=uWPCDO#-77uuB=?X9+?i>+SOZ$zzy4+BTu%90wSC+G{XR4N*cCceH74ro7EGo z?RYw%%%tf8Cy(G<0o?*9C=Hu96zjB)e-Q(Z{!)S}^_xe9QI=60OFyqv+4Zwj8_m`n#;UrpC5zuYG4qu@jti=Fx|Fdn3p$ zOeY8+6XnRX@z5o7Hd$R;n;kZ9qE$B*6LFdj0G-CF0+KgY>}3Uj{k7Y++Y8S>Z*@%o zWkl9T0iVSqo1b4u61i>wa{#8CJSa=rEgQNL(R@US!WTWx_(>CO>(+W(w{9I}i-%$kq zn?iZD?^~<2@ST|{1Z>3pm)A)S-aS&dWw>o zeF#w_>ccgakPios5^YLP&-mo!)JNc#082nG&2a!v2h1$7-`?@?Tdu(QXPrvlNjasL zuH&w#oaYAxP8VO@5-2d`2WP7oL4yP2El34e)FvHHV-8ThfkC2nv$FG@o@b!9!xPgs;jo~vvDbTk@-d>cn{iIoTQ6htB^<@g0NZ)+W@o0!=@GI994?|3 z$_GII!yj(on!$B-zY%?u>L8F^pqKv%RO700egr_9#v*_Gt~ptE?l`Eny`kG~y6G({uXIw^`R$E2Q?5pjUil}n@{;15&;s!Ig}>mP!mI z3O9NlIryP;FCx z$e|EahB^zmrCuS%n*e6f;>1C4f2S@EM!Kw!1QFIMM}HFRsOBR5qba_B+e4O2sdYr$2UPn>oltY=GSes{sjEq8e5{J+gMOG_M ziRRX>&Q^Y7HIr5&@LikwEqD*wji|hNd_z9S5hrOXlp=Z@iq<`AN&PtBNKZTjjnwYP ze%a5Bzf>>ytIu!Da4whbUZU@{5&(S=z&<(}CINKVqU@J`RL=e9MQks4>|W+PQEZ5| z4Tl9(Egt#q_XOz}+CdxiOq4^g#4ye~;x8B`j2KY*QREcu?}8dCYFX1xu*Nluth6NG z#^v-{)ttlQP4xH0_kD-=AfUo^o%XuC#C z$_VsAdaeI~69YY>ua&<<$H?)>Xbg%Q+ox!59V6$0o|g2$#%%|=$RUI-@XQx*<7;&# z^Bwb<=Bi-hJ7=MR6fXx%;E>fg(B2vQ&l@`~@s^Olzn282-W<%8YIOqS{`czpL~B3T z5=a2sAME~3v@Rim_e=smC+S=&ple1}wq*cR7O)(6bgtfcHw6}^^WWR&~N^O zC9FIdedgW!4^P2j9?7iES3vj{=JW4T*%NBaDdTBRMy;C zGl^1$oa_j>`*BRtr>T8Sid9 zV?8UXZHUPDjT<)G9(&Fvg16t&Fzz~t!3Wyj-iiTF^ci&@92BTev@qaG%l1Yqs4Vqm^64Zklwmvq$UpbMebqu;&Nmrf zifZe|0ME?>$bhb-AS0P~s24cykAHo|9(k$OmabYkcE+yvw)Od6(U+mT01rk{H(Ww_n$esnec+Xz>8 zi{%#)1rBA60IL2zD02#P=$|2y!*v3DE4dj<@ELRBMHN6(R9r;l-GJS4*?ElNER14- z2m!4~N+bY(ghY#@SUSf^^nAL0-Dthfe;!uLQO`{8ATM!@K(PiLlZDG z*v02z`^j$}vtQlzup9P^US6!QI`9O^LcouJ18Jl*Lb3Y-HzXr;iTbN;F;+9AprfPS zCiIJ{ft@*6DS$`d`szj2-coC6yIp|5O^d<<@Tp*SO2 z3B$aSZXJMzIm|EcE4Nh>0pDyl-@gQz#_Z1fAAHN#`=B1!uzCp;4Tb1#Ix1r1dG);lcrkM zxC3{Tst@;Ed5*OMlyBOw-5!5!o2}j6ZP~2G@Kr(@pN!i90B<>!@>uEfi=SOaEdT)p zl5uV6ipI1?Z2;T?!m3~iLG`7=0VGDn`J@J@9oiHvT3y>}_ucmhj+k8Tf2?K#C&J2MxP8H8e$YjC+Z{~o1y`;DQY0;ApdS4FV&=8Nf z=wjrZm)>pTUN^a_%yn9wQ{UF*ZS@4-jOw@?M8Tua8M)(l-i%oN=B3`5Cw=sjZ>rFX zcYfRDA!mNDYEX)zO+6iGQv98+Mh{2rfRFC_t6IE?iI;=~5)v>6xyOe6#!vFRmDmyz zNJwCpl|Ta6-etQkQSpQX{Q}OtTmA{<-5%yDIt-D z3e&_GP-K@3!!Ufn<@wnj$<#}Pua^0)u4%Ae-1Y*7KgJ|Rm;h0k6-_No@3@gK-}PMp zjX{hS8Fd)al&h#bSuugJmtmjy`mJ0Kkexd6GjK{?tj8wcySfWzFB(Ba$-nGXmhfC@l%J`oavRcQL>2H-t!LO!Wgej zB(f}orlfMdx9hh*{2WWr)1jIuaD!Svawyh7qytW z^#ZbV+LkS~7{E%{NtY`u3_yL5^ZS}eV%9Hk0>F<5t4KzQ%{pR#+hdBJ0q|txl6);aVkKqe4pN0vQykO>vaz;Ew$;UmavtrmqdxrZfbF(* z;^9}))sV$HR$7FW4eM5|u#&l-@iFAf!8N}iucqc!``kCLaWE+gc$p2C@}%NqyZOGy z-m<(JUcQMa)+}_Ku7`uy6M6#bBoVb3tbSn?hd?&QA`3#0m8cq!TW^bXZheipCt`C> z`af^^dLPsS>sGvGX<8K`MM9?n4qNUhn$MX#^{!3xYkZR&wIhJD*oz+OSO*9B>>w!P zR1{V5P5V@q|He-LtZVAO^q9FVNH)pXLXUJGYTdrc3M_=?b%TKb8gtT=o#Z3pv2Nl~E#+5@Z((cG;uASZ4bk;#{S=}3N~aN4gc4$yYpBg$r+VCi|~aB>s1 z&W<=2_6Rf}YPTvR$AlcX7*dro*xD@re6U5RY5tDM`%O5FVqR~zE*Dw zC|C5j_r3ZJ+30h;|9n65NBn%rNM$hGd<(t@=g1Mjfz(eF8LB2AG6(i|w_BK17RpgE zPy?_X5x`G>vXWvnYn+wMIvo)R?9U;hxQA;8K&(h-An4pDWYiZ7;-}utp_kX zl@&Fq%!Nu^nx4O#t0UazzVw6pS!dohgxL9)89w3 zPJQOZYjw!EZ=H(dO&3diNk||eft^|cQ*U8nWJ>_tJ9RfDDwmMJ|5XABVEg|nn#2zY z2_z)&k0fyO6`!!O7$|0%~4%kCJbTozlQArr4Ll}MnHDv|H zFiJ$#kqg&yQEtQpZVzI}Dzf>apa0DxsP$+eL$j@;^&eH?AO5!R&hNM}J0k8~ETq6p zp%O$&0XLKQuE;L9Lrao*!Q9(dK3uip;#M?@v<6d;(A!a0YR@nSw&v7{jtd~rD zDK^(l9qF>n2$7A%Vu>#~0;no}Y#FStyt>%t?!6EFNK|1@AAki$=9ZTzK_Cyn8WCv% zLzKjQK(!Y4Zid8@x^xH*~bBVK#<8k0lBJ^ z^8<5zkEp|7Z)t6}yu56p5YsTIvj7UX7Wy;$)Jt7{yW-2%7k^8SB@?w-01%Uk13}In z402>8U^*SZBCoL2WrF$c5!Rx$$$+O^OY!CW5U;765;2@9Jx!~vw{E5PMN3<+Rd4OI zd!ASZz4$ly+p{d6cC<8XF%}Hi_=-w6z>APi77^tna3vBHws)5K zci8l^-yO(%-woF>E_>~WaW6m$k;~@+`l-uG4*87@b*=I-;D85CBqgZ5@4nSMV#jX%;+LORr+1W2o!(grr&fSz(!q2K(PKXj68{-c#h?q zSpCG+^mjWBq+Z6fB6G!^4y8a=A@@U8Rsi;T6ff_`e(Ab5UV(`1PuMLaFMVP0764Ab zUFKkoqg=*8d07$jEaQkx^Stg6CvhK0&n>6k0z1d*!>2$VfZ5cI-Ta|X%unK9mIKu3 zH>j(e)w*vB5gnoNixyS0vTD2K;AHA%m6Ze`uJ$zKr&Twi+zKJLK30)j@#TZKhlz_n z0Qv=5Cqwz6WTuLccIPvBI24&HItd9^wUnH7U(~rV081t&_NG|j>~kzc)Vq-TWadOq z4C)OCn&6F6Q`i>=sslY>w6ZUV%BStLSKz;ztytqEs)i3(z|Hpp!GX1Cc36w!4 zbzh>>JknbC#puRA@ic(r9uObNSkCkhSz7)?=2f0AXvHY2FM{%=E7G4}MU8W+3#6CK zku`8Pt8Xf}S3=1J;61?el4y=%+$T#)CUETltRLq2MRikM;`Na%m(c%;*j5w&RohFC-sjI0NvwR!CeKF(QUB6o$yno9`l)`^3V`~iHol|# z$j2!;_@=R?JTFN+Hz@x^CiFZipfEcG$9C}S6>nS3qnEk9xt>t8^gzxNglO%3c{s>& zoFXhUT{$GQx+-Loj{R=@^u&8Y0tpFx043l6cQ9$%#!n=oxIchhoM_SqR00WL`vcms ziDo4v@Lo&c_NzZnq*jl$5i!#T*c0u-5XOirttz*I@?sM9C<>GfV@p&Dz7&=n*k5j( z#E3=}h+0{aE5;HG5F)<;Xjj*C+K+$t2*)G?oWFBm`;qIuMkKYCql#BBFi?5(&mlnO zoU~+HvZmgCcH>jNFEE*iz@kpFb}@~pKT*xtoWt;KM*+$r0KfS~X`bA&r47R>LLxb6 zI)Eg+O%BBAVF*Jt1uzcV-Y)z4X*Man$rit~(T+Rvvl!wLD?C2n4!`}EmvTKBqN*5R z9)^5DmOzd)i}nFp!e2SaGH-G2W)ig=N@Zy>D5#bbS4`T+3d$;p9?t{R!^r2lO4Qfe z)ojb2yPE`=00GoPfJ6ont%bRn))gJG#*yjl*3w=Xh5^tJ9%0#gI^We1GVX+SuzBJu z>LwoNK_bkuvlTg=VXrRy3!s{SM@7QxI6zPt;TV33C>|ON>fPD2#fDg7Edx{_Zdzs; z$v4XZ=K;kzM-&w@7U=*?Q%M_xp91x+{|JX@H#kT+CJf+OSz2cK)N|%(mw5el#g`Qe zAB7@>J_Z01=q$|~gpL4KXn1zBkqICgV2wCl(|8Y)R4$aJ~%0$%gk- z0NwtkHPBLQw2=XpW@E%Z_Uu|~BG_%|>dl@=vK=Q>W_kux0dR10olcU3X0z12Y3+LJ z$}bLdkn$yaC3y^@rPU1#0QxV4o(=;ztysD+xc*ywU9uoZo+Wrkz{F?@k^4m53q;gV z6Oy1Yrt#tl6a{Ldhbf{bVGJe-@B-Xv-dbntNpzZL^%j|SvK7EzAEuC|<|d0kJ5gV^ z1zqpA%mQeUe4S-i98i~~i^ARA9fCt}D?C7g26xxO-5o-34esvl7Tn$4-QB|Q&UCNt zPxB-0Ugw^(_kIAY4h~Y*7e$?qI9AV(sGe`~+^?nn4YHXH*TF-X;z1U%-&fzel^Hpp z{zwrT=6(aICH_=)986|zUGT`JIRT#WU&IKW<(Xwzyq923I|4*rEJ4#HfS)H4$Ik3u z0a{!Y`d&!O8bz5f86u(1vEQ9$TcNqEE^>ril2kDFWosMb{u&Mt8?m+x@tzh?0!2At zIHSIXCaH?*UE6lfci!Ym*bzOeU$-DxQ06r=bJUQjt>dC4AVyP8+e_SsY;m~8u=Df` z<~>+N<-u_!LL+0=W6C_?{T;?SQ=)~7x<|ARBjPmbF0Jep(d@}H0**u|dhTER*0v2W zKgc$Bwd`&>2N3!-5+wnTqTgrkzZeg?{40T7*2~m1<;B`KADIl8mplud?Mp@&JdTWx48E0#MrB_4OcE=+0phAKL5E&g}dFOjUAzR4wrI z1f6|gY_?P`*gvdiS=FfuCY+X?&dOJw0Q@AB9-DrDM^iD*$NaHznBFBr@fOcA`ABDQ zjo=B?7@2vyBx7?>!lkkxt;J=M3ODa=ZhGoSc1)-etLtHX{Faf$d27TBOKo*$KZu6} z(7r@)uHXDk^p=0c)7&QJMzkeZaB(&16Yi_(1Gk8HpU6d=_sXF)6)BQsDDW(_DZJ3d zrWn_Am?CI?w#~r$Z(XE=F5ouQU`$8s`wZ@qiT}F`0B=zy4^Ac_ zOQJg1hQ}K7L1=Armt{5cA8FLwTE(kYp>S5=h&%MhD~DCf;0S%Jvdyc!@jM{lyNrK` z$%5kb?B?`q2lso(yaw+WeS~5rEUbOl-Gj3RI(%cnA@H*-x;TCy9*BnWJk>UedmZ&9 zm4}|(KM2bDq{86=MhmzXB};Lg8Ph#M|LI@saD0%>RsA%@-(fu6vIyHkPLbR_{w<3T z$;jQ$0(`gN4MwTGyPq>khr|I<=M>#JQ6Q~H^kpNQi6b!y^|7u|UVV!S{35c&-ADv~ zkoyoUdr^NLIew{*Qg+!zB-Sx|n%gH$QAX13=axe=Inj3IxX^fpa!PJ_PblmT7(q@# z!HlH$v*f|S>&W#)GY=aNgMjM>K^0Pj6Egy!APBn6F){+WZ6(?aqd@W5@6zcBq6Uj_sJ|ER9Us$oMz0}7IH5X^a>*rVt_zi}b~1cmM|wq?aoS6F{QeI5u-aMFOoa0<^m z7FPhMY@$>UJp@k|NCwSLXTJohU#C2K@mzftvgD<5z~qSys1w`jNM(qh=KT{}Ga9q0 zk~v}r5@k>N-trgi{1-(a+Vu9a_)FGXuwL_jHWl+M!ZNQ|5F?~=2oM6A1SA-4kV`c9 zbbRvD$(CK9->lP%AZ#zSK0e#H6h0Nf(Tm0fn$MQ~uo)uDO=~uDdj6ud0JwnYHVX#dJV{5yS9_n;Zgxbb{<{>r?7G3QVjwW22q#}m%0xv?t9$O>wqEcVULeV?#Bw|mQh*#HRlaloe z$Je#2m7ax249*n8{nYoV0Pn7wI2-nyFUZ|tX9-9#xTIz?7NVx>?ljz~{L^2@0Bqh9 z26_%up)KLh5uNkG!@^+;rkRV`vpx^-e%KNr|0x5VCMxtngb$uuQr&z$PAE9d{U9(p z+iB&avaS-ig1jtsqu@Pe|FAcIh#PW?J57>07rnPXQ?vC@Zo)A+)Nr@&q|L@kZWxG- zc0J;P!COsPPO;26l_LPWSrifJPcp3-Yozoi5t7teV1_fHY8HP59qAeuY|{%JUeC64 zMYuQ$BWnHlEcs4JX>4&xtI%#7I~6pWpu(8MSsqc*0`ImW%W_j$wX5{4Q6WxrIDYlTMJeDOi>v2UBd@Q={sf)4>N)I# zS!=yb!hiRESNpwCJn_p|0Trs$s1=v^%?#v3i?NiaO93j2kQaA=0kuEB@R596VpFw@ z^}tw)zjbWvksg)Nu=CCu<>p-bL+kjOXly(Y;3!a;f7^IEy|VM7yC%<5$ySPRVg^{0 zQS40vr6xVJ?&?h&orh+n++Cgm*lbaSUap zyw5e4JZ4%j2n4kv5w@d>rQ_~XO6<90ou&hAr$Sm+FzWf4U+g#=`)KJ|b-tb7++F|i zJ=c;WMr2Ip=CxTAW0O)r`3}PRvCV!S@efm=`c~0D%|>v;$aMh^QY~xDjop_$MJ0q$ z7YI#&UrFv`QSR1jHkvefDK}wn3EA%>%aeaIJNGX0m=d6NQ1`=XfLm~=hTO#)maIkKwj(Pi51 zzv#n@HpcOPv{-JZP~$Hq11k#7Z+j^@RL^~-cxus&aYH))GJ00DCoN)vIv174?KP7uj zTUzjDefx=b(X@-QI$1j6XuZ?-h5!7Gw4JGDQOFl^KX?C#vE!(f79_RSKGO^OmURWsNJ}6b`R!$3?9*Jw!ujN-g=7 zcXdX+hjpoKUFbdW!$-^6n+2uP`yZ;fM{lfL8g@=+2{E%F6@!9S`d=B6MeQzZGu=?{ zn{>x%L_e>AEwb2k$fLFa-H7L===>A{-|+Mwe^05%SViltRZ)iZA!sDB{4tjD1zmhu zQfp)rTQ2^M{$b3fv%84gVh-HVlHC%@YDJ%1 zY8`5V!WDmLXb!GnyRtAbLy;=RV|W-uCp#&UNDsAl;-s`$MXWa6@1Xt(tMcK@78t1cEJCRLOP2?UPQ>k-AGe61PqcL#jWBd|I9f(VWf`t$ zv1L3BV&!MgA4~kGfVNW0_o^1x1G!7i`0pO_WM~+CGqE%jO8Ki(W<4R$?oU%iLU%br zKQo^|XowPM^f_=do_D_{qDPnEJ%p0c_I#V${b}-~E)*O@mm+ zNPtA;LmL&WH(K*?%j{DA@B2!tV)1*4F5P`Rq06pFs*(MSGcx)hf)aOh7{LIs19^cp z1iYdIRmQHchN$L-#9w6Vu}*iDb$omNL^%}-kU#wZstvS0i;nnTGDd=(Lyn}rE}pRC zS9%}f>N#ahSP|K?*zM)lh&UHmqp~N7n~$=G+*|9J;AfPb#YF!jb9-Wbc$p(z&VXsI za+b2NXXA&~&~ww1RINuG4^AC49$ML0F?c1qPtKfVx<8Fo2JO!FerEZ(PW@0yYwH&V z5h}7AQi@DKG3|3G1K)AXJ9h{BalJBmo74XNHu%-@zI*7s zW0u5Fbn<>*#gpx&OaRgOD*$t#MB#%l+;rfBx*tx%Y2L^`eTlL_L1e9|LMRzTg?LMp zNa*mGD{FsxAP+GPT1*JV{ZoeUU2vDDAxvp9tRDtx!yFuRkPIs1l2Q~l;KxP@Lefwm z(bdDI&hPl#=ww6r=zElhWpUuDO2Xc(g&z1J(AM^o-%3AsNs7o7(%9}~dCQ2lNfYlTL z5PM&O%UiT!etvId&wlA3mV!iZsLsj=Q}+{6f?$PoMGf2G zHK7Q*vbU_PgDy%-0Z!1KNlGq0VnJALBoT`(rSJcp`8Ce(CYs5nTI<4N<)URDA(I~o zjgG}_rAQHH-#JTE5l8kHkc8&Cvh8u>A^lSNeh zQjR3CILKr+w-M-Kzp_x+4hXIvhE7SYa1z6@*vuTXt$2pNIJP2zbmv#e+AN$jG+{(u z?&?u>3D&wGtKzd?`R@C6*X!C0R#lsCn>a%A!S4s-XCTX7Q1P`gKh(~YO~EHl&h*cTHaZddVPme^a?v}HnV(g@rJCtc8N9Ub^dcjuywERlo6!jfo0K)oq72v2P zEGY}kh7C?ah{%WdODUAu$69T<7`t+=OWLBoPRL>-P;G+ba%_%;S1Y{c*3qOj?n^sK zYexYxoKDEAD7JJ38}6{+Mx=id;&ZOiZqhD#+X(eN`?Qt^#f^*I2N1_iP;739kTduu z$HX(PGwwqMvSWqUDG>bxjuKhMFJFp2WYm01f9Mq+X~NHA-YwrJR^`&fLU;qVG#QGc z_PIGa47}1DupCPcnD-*lpp?FViP71MoDy+~GkrIpWvt8+C%&Ngqp(ntLhH623LjEq zker4>uJuSwAW!a&bPd3EQ0hezQf<{6S|(evbO>fFG(7n5KPdBGh_k*eEqb-xZxpdT zOm!I#%`Jrty6JwENu;?pb|SY$&DAr(V^BZlxnYc*A(UE?GJfe&^Z@CG>SgI70I6sWvZn zcsrvY2q?XOWS`LHh2LGToBVg>SB3vA!En@9HnAW$>n<%zK1MS-|1L`Z#up|OX4q=w zY*yZ)9Poh}UrMw!@6w!Y&wxfz73vRV{`}_dknRCC<=N-{6B?YRePc}?H;C!7JMZgJC0qNlc)eiy^ z^jL^L`!JHH%@!7_J&CT$|MI)a|QiM_*(o7>LZ?{Zf_}M@WF;x7r z27`aQG3WzNt)Bhk??1bb_iq845SD%YsdxZ$)6wQ5ai|fTM&WIcarjNR9OO375pj;( zen(WEz@sRWNMtE8JxJX!8*v%CC6}3+e|mG`HleGTp%eIJPTepcH?L#JQM;$_X{#2) zF1tH$*^MvrYFQ^a-1xO{{*G1v4O;t~jT`u$x_Hvxw2V15=uOc$R?kK^s-n_&x-eQ8 z4!<}HdJt}DXA9QP|3qV?UEm?#4K@V&p1NBABJb_@FhlyoTC}UK@2)J@lE&qI_CJ>k zZqn+GgP%*Wq2o=4?H~IYgc z_@a`e%Apwi0Nm8HxrEOZUG8Q5GNaZnZ|`6}EdIdyrjQUA^DK`F%ubK#u;Ex< zc(Tonjbox!`}Ts>)$N;wwQm>D6D9^c3sGc>jbTdki1@#_WFoox-|jkZg=Es^iLmNT ziUgH9uI^h`ai38#i2R-?3q^^Ow5&KCuSz8{S0HF8O9(04Qo9vx>Y#43d5Tr4Yyh|E zG5Db4F5Z}i2snb?uU}xYS-roVBI`{4$#t#Jo69=#68d*c6n)d{Z{t0)KLU`ND*Or4 zdHVAT?xy#Rd&wwr)TVBPUP`?@JBU;oS0??dh>7=b{%hjGH@?>O+h@y_9$Fu_eT9vf zL%YfZwMhi{$h~8ho^yS{extX?SgSdGM#FFB-ca{RFF6R=+NH+q8wmAP>FkHLwO4%sn;86tR(86(q|d5N2r|X9z#Bo{ ziR);V(2UOrf^(<$HZ%%jS%=pXB`NX=P=xoY%416SrHbFNn8MtOS&P6LCJGhPw3Acc zbCjd&+Fj4NEPfL~A1eSax!~(<){(-Xa$xk!VJ!^VSjxYbCN%z~-DhL;1}^GcL3rLgTtaT) z@JK+H`Be65l{C#dZ2QY0{_$^8d`kun<@&8mTSMQ$qm-~KvF^h&7l@M;cZUc8v1lM< zoppq?VkSGDTDL-o-8v3WSXKe`_&bf|XOpWCfOTxUYZ>|wUwWW&&bk^o3h zMfwu^(9eKz-iIp~EdGFA3N&1mzz(RbjR-DAtkDP1W*ZZRIulaV#f}Lmjeoi zWGAY(ag1&)Jje%YZ<=G$MmLB>yQ7xoep=^$(Pd+WQ+DUqb5+B8W%qDiu@l88m7O=Z z*~f)DALkMGvF~-(7&6j5&cEghA8ccqR+guM^1_-b+)^-EtpdcZ1y}~uG{hBA2&ioP zg(!)7HBup06X1y^zzSxqeCm9cA*^QKguSfiEY1r?8>IPbWq0m648Ro-fV`7IC;!%% z2}7vNJBzSQtM-V%ldjnKOd+r65BP_&L=zL*2kc+_EzJ+QK$*K@ZUr`|@j(ti19(;x zUSC2Xnwv1b*Y_c_zGzHB&^WK4VHBJISuFwKkj^6l0S?43{w$sJA~qf;xSt{oJ%Z|7 zr8$R!a-Xrm%(FfGQF^acQJxQXlEEXHgoCO|P;etVDtqi$ue%>ygn)!D64TC{Mv!LV zy0$5;H&jWUy^bd!Ww8jQL9F}>J@TR9vcwi8$X6NKGZ6RVz}LsPAtI;C*9Ct6v@sCn zkI8IdM3alat&h`q;9c-{B*S+ugB|tPswyvZN_?+FJSQk?sx3QJ!q}W21#0{`4GJs~ z*=}7{I@Y5D_-5iQD!gbuNrI+ zR->aMcw)63#E8~DHrEcHBn&gy4%%p)Z=4JDCngLT-UwFEM^tz0;L_|m9jL4`e484+bE4t)f$pIRC+`K1Vna)0*WUs|ZJ)2XdBT}}U_ z>3|p5 zXhJnDlQ7SGW2a#+>R3To#Hu<}d~)A^C|&n>*M4>+OQ^f;aG_8Ry{R(9jJi)94ccN? zG9xP@Kd?OiLz!FPZ0oi}Lt+;qeBs*FI@kL9@HK!rkh2!a!Ml8uy?MdapFHblNb&o( z__#12YosFIK*J7!JQ^KWpmWZj@!DnHxG6sI_oPGI0~~fK=Ne^7186A5!K2puv;TUrYe)hD81aL9$}= zp&OX2s8$4by1^(acjznPJSnASb=*3rDbzx=K~=#O%T^Q1r^ zVK4mQ@PM)eO#eUc?+9*7p);X7e%y9g{W-1Wa3<|D4;c%OnCM@nN0v6|8@Uho9$!{yW;#JKcXE z{Yd+2kcmPM*;AM=bo;mCePI#$DAa5c5Z>WKNVbXXJo}6kbNHMLr1+|y8SJ)lVi$}` zjXVVzDP3@NKFAhJ*FIF6J_o?70Z z*Ck&Sqkv!v$Tu`!0xoip3rgZ4UW#T|p#FqkwFk<*VJvoB@5a2NLe__`=Kq@Kayg@G zjO@*mo8t3dB_{6hI}uX_o-2?@=84yCYHGC#km=HrEDK(Zs3q@dF`ti&?wi~JJ@m#3 z4X{lsw^~KFmEJ`%4pi2)7wtD>_L*)GF8TF6u~S6G|59hLF+;VrDWf)nMA*vkma zmoeOv5Eqg6gh;~2k4c56)2<*W)wgY+2Wth?M{MQvGLdrMzEX^>khj^{5@*kEiM+QO3!yJDFW>u8VulXaTaSU?WS-*y7v%! zeA>E+MnjF2ZbLGXmtbc{l&9OlmqSmRR+xW}U=af}E@?+7t7s-RkLAS>mFDLwQH zUnLptPgTsEi(J&T6ytbH3^sdrW*c6Vaqnz%>|W*Bgoo*=>@0oWj8q+n5mthvFj8)S+&6aFOBm$(zP zaH=uPP8xnis@;4bL38#Hfb^`Jvs%)Q$dB$O>-N!HIwX?4-_UnLw*KLJ6AWELxw@I7 zVyc2J%srKw<@p}Dk!b=MpqRjit^1F0ip;}bkIBBksJ{2|;=3L51a=mQ0TJ2+_2MOdi%1~0omyc*r^Qn) zKdpa<0%{)Oo8+3P!-UvS(6B5d?Fz4Ls}TGATf=8k7j~wUO>eoDC$KCTA_1cIH4J|f zq%&8S0naj{O|zr%TK6IzNi?W6u`$GOC291R_#27x4(ZAqLZw$fm^`@7B9{A%o0B2Z3L;^FgA|F4Wfl^9N zF2!U&skJtRK3EL6%d(4G~$ab*B zq^Z^DcYrC-AEjN9yShI`^?Ct^Y%8jCT9%Cx<0bvoQ-}d$yJuW2)&;le zmwLt{Z-J9~8$U~*kod3WeAON{r*{KxCmPpsf&6dT>qmrmK6@6(Zmc<#noKJjz6aEE zb+veNB`5Ndq8(OVxqqM*J>Q6##GbA)pg)@x0t%T5YEsPYc8doccqujARNt22O`5+46G@q%;6n;|_h< z%301Ra0lfZ70Hk~3?H->cJWB$j{C(0Y3;S6@X#*wr>{-vVma^c+Lsl0_m)Cwv0XzV zzSy1#n9N=31giTXH5Ax^L@9^{lm&`t1F`rUJ#|57+dG*TJ~pU?N~D9&P{z`+@>6c> zOpZO7+oILgviw!9!|~O#`sGf@Him!imp_%NeE;NDD6@F?4|VY$LkE3@Kc#B+L{p+`jW*jAhS?F zVoTi9Mh+)Y!!ZrFJA^v;>A zDSs^Vjm-S=rhXx+6D~z|CTjnlX{FmZ{7yY>SYE`RbN4&@3dGT(k}9fOCA-p{HNdW7 z6WYhjew<8qedk^kUwP`VI2t;1;R)E@kR-oToi5pfT3jKG24V?_3mWkkMdbm_4U+p^NoDz+>^etj`P{*) zt?k)!2mNLAKvg?n#mmn@n07Ix4>AtIg7bmNX6#oCAJS(r?%dN6DP3xlCGOUUG;@O5 z(j*a`LCi}?Cj15~5&AZ4CG>0Yg#^}XC=NKw#w8xJeAWL?uwQU5-AS>OV|L+X#<|hQ ziX&54=_K$_RuRiDGoGOeOfLBDRnE#RLdLpu$Uq((qxcF~9 z83-nUl|E%h*?E$=GuJccyEHo1Rd>}5Tl=p(W3RI{-4UX^wgU~gS4rnJ6 ztVyuwaNW;tVz+?==_5Jq6ylS2_$_;;{I)Bpzue=5x+p6A~9vP9vEG>NP=FgnFtgG zs^JM*CBIBDTP0XI8~k|WU^(P}t~FSmRTXk6t?;~79Kb@7zw5;rSRTe40Pf(*p17>I z3qwcvWsz3CeFuw}(Mq_Q6)B@)<0LLo+J%Km(fJD}caLbXWb2hYvdYbHi|>+ks}Hb3 zf8yTbG5_g#OXZbBP{SY8V^0d`x2TMnz2iUZmT5A&-r1n!vUb4^0r^nL0KvGys;!QnY!^@@fN-upm=g=W1`YkkaMj53vwsb>m=Nq=G(Ef@f&Pzf;dI~n$3n{An0 zGgXug`b>YZ*}o$~CJBh^71SbF;}#@B4o47YV!`O(I}838_xD>%Wyu@M<14!*NKW|E z?j+^1aD*ECt+TL~GNZdAM^v!Hheggg+YSUa#(x$S5}Oc@VY$ zXfLz}l&5SbN-wC}YrPeDR@hjxu1Z6tk~)!D%p{6CH<7S<`82l&JHLK{&b^Q(9!WNt z3ZKvrX3EC;vIk>x#7KrLvk?4BsXA{yEvK$l@rS>v7G~!SMoF4Cxfrz#-S_uh-jch{ zOb`}iMKpHLG+S?Ig~>PXI6hORa8r|p|M2}_zKN*!SFjKW61ScP*!nCAe@>`cm>Y*g zJ0-5WoLQK5m&reW#$_=x&{a|}y@Z{}Sn)<7083Aw-m84|*QsAMPk45zYNON_xxC`u-P#_?7V==mX-`JMplcv?atRQ4`erp!5b-S5M#BFC*UXY6|!MKy)YOX@+h zU{j{Pq1%QXZfj<@t{O>`-amURgZ*Gx5S-zaqU^H* zKz6h9OQOOSi~H&EI!dtEXX`8V@pP33nFtF^|3B%!DL4y@2pn3R!hKi$x=JFbx(G z6Gc!!$qp*bbBA2FmOZW@-_-&JQGqb0fIlFv$`8Fk3R*LP;vzfLlMqH9g|?X{rsB>M z?5e7By#roAMvTiP$V80KdMf89+QaKq!xG7*`ZdV5R=xhRFTq|5we>4bx$gAhfcvDP z94Y2E#{}$zr-exDIL}!-H}$ii>*gxNDZpH@>x#%7f8K`3IG0fZ+ASPC|8D2wDfh=8 z)@c&&J+`)!lT&Y5HySW#9}y5-Ozg3RbP8+D00az;mKH4=uPo8SLfYLexBf^g&ndyz zFkPu=+Po{gd5Lar_PKXd)#UHj{j2OUP3gMScb#YbK;o%RLY9dlViIm(_wf8t#vD=J zuur`8D+2{VvP`nZeE9YwtUTD_;xC4}%PDR~K%c)P2z)7Z=NdVE^6!|geSV(R?-^%CFJ!c^63*RAUwz)x zM*}IO_0u9s<-nW*J{EI!KF3(~5;~um_i9O@pnIX)9fG!ZX?zig*zI#>z?P7Cmj#!R zXu+ZTLJp~kPz-rdWDSfMt+$5Po7{Si^iqCDVoVD+6`m6H;h9!Chnz1Xp<<*k2G?WMnOZuIs`MM=hvs=+=W%XOEqPsvm)af?l2rPuT z*+BA*@}$ZrNL^%i^Qlz~yXtHWb$1~l9ilJfJ2WbAQsqr0UJ)lFpTsVHLjmI!!L4)^ zN6ny*{CIWN6l^eAm(t@oZJ+#b$LXE?p9|L(apHe01bI8Cq$D!>hq0h{OyQ>c$RCaN zCZU3_p{weHBU&7zD1xte3}tEhm<&ZL3nsGVE#7U&{RkD_t2hBaDnh$ha238RhHp1i zFowtlN%)FtdFBw}L#=6dRP;`K99MkY1#CRT%l8t~ku9rGI^l6CV&wnucgjz2*XP1{ zdr8Nq+@j`n(p7@R3uLP8RKef>DKjAE>#=4!Nfeknuy7tO(00;ka}m?Tq`OA9q!gDQ zBbyw9aq$9Q>H#A{DIvjb{O&gjO$Ti%FX+8_I+nHz@HtX z^yjan= z;qPWKQ-_jvzw&amdqG3P-0FIhkbVbrUMzZMa2&B{TmbW=!Fn1!6Y8WBcs#fiGdf$- zH>$f+U9RNTA2rAL-1$wSilHmxJv51lFeK5DzTqp;2-AhN2(l%I&lL%v=S9-nUumWgPNBx;=yL!(1Jgu9jdT4EKwShQ$j026 z#Mh?|+^yw#^TgM-d$HNI?e#B@+3R2Q;>wZ5t5S{|DvEVZ(tR`=rD#_^3j3UEY|lZ6 z%j;UOa9-w7FVN&EUaD-Wr=R81U?^Pb<1+80*Lm+)j|^jX01w-AKLMcFQh6`3oAG3d z%Hqebi>ueeH+cP}mXiUkPRcahFwOqj^!lgJC0*a4{Vo|XF(R5eWksOwtG!~Hv7)roW!wu@x?O^3MZ?u6bi1z( zDV*oIKQvi4t;eA;0{%#d2yRgVVdD!d8v56{kl-p?OKwQyI|n4ejEAFMj+=SCq! zoaH;eY3-9f6JDQ@TEuI5mPY=ZTkuedet)@DRpg9)gMq4o1QSXi6|KjU%5<=R_p0_EK(+b~v}RZrC{lf-|jXjVzn5ZDT2 z4gIOKGCIh**6c@|4F0dP)u$)cRUAlO{T>x2=w(jl|GA%ve!C!~>lb4@`d#q{5Nbu* zvYxnr)SZ_%@q&74iaRS1y;B1Iebt<(H+^RPZrw}1Z<^_U4_E%Lz>$hOi zJrq%!fNfQ9<7=wP@c5!@21HdXr*MC`w@Ox!^gGCb_@_{*u(M8>c$d%_#+i?3yaf@T zNCRh#RZZ&rz59uJvkSv^S%}bj)%-Y02xNiPHZQO7nB7iyNn^i-wY+naSt7f zprAUMEH(b&8IMFbf2^*K0@C^%`;b>lwV^?cM9q3Iv9{sun%~;g8GBVZ7O@uk9@OJ>~>|#%66?i9h4#XaONGol8WWEVvR~7`ii}2nQ%&lTRbX_ zN~}l_kHzBfc&t32NlGRqN76px)~uSKAmo$cx+Wt`*F3@&ZuLcr9EqGz~*$cV6q+w~n)=7(I5sQl?{S0I%hE9R9KD8j@d_#aR ziL++YZLwzS`n66F+!4H01htvU>YR#iX1`+LfGI@|bwT&j72$$X>Tw&TO7-o}XIu~faM{gx8m zMe>c1vBWUW=(e_oPvN}k%;S)+EUm7q7<40(+(%9~=3sPP+hyB%r0FfG%ld4Sj_IuF zSgyRWmVj?kVu7mq$H_-SXZY+^;$)t{oy7@uCCTf8ua4QMDi}f=?ZX{m!)O56$Kxbr z@WbF45p#o+y86tto!Wvh1>h;S*v-RY87CA_B>9_0WBUm*K#;kMwQ#5|evKu^O7*}qCFC_~t-YdD1?Hj%Fd`uVrl3TlqgMN%)m?-~}z z(z;v≥Jt?azVjR)Nj6=q!iScO)y%T_&@~?BBOGU0p$+mkO4p?NCxFVN+8|c^Qrh z5%s~Vy2Qn;vKx#Dh>^UTJRPNx;EUO9E1b_`jZ$Zs8z=)$ZxaaC3QWffLja`*y37YD& zv{${=@wm((|E;4OH9pugE^36{no~WE_gL5 z{MHL&&sR%3En_Z**8;M*=EvWcRJa9fI^0@}A&N;HN6eN@OB26ZSztlX8{xZom20%D z$h}St`~1s#zFED@h?*&4>Sw(3j;-qN!op3!!n!6BNXc_kc)SP0ieu>0pvX09nL=*m zTp`_w2+ph_7ugmhVkbfVbr8-p1=cy=yV|%dH#y#j)&NfLkVY~h9atv0CDM}}sH&K+ z-3B1?3#z5vpUw`Mw^O_>H;t`uS*qi9FizznDFDhX61(!x96zxy3V!d9XBt7D#Trt` zmrXP>B`~bsT=`+_kxfo^cEZ4-=2IzKGSFwB%@z2$jZj8w92z9PKn_KZ784b&K{o#0 z^2|HPlzUc)Dy}=jBWbu|Jvd0_am{bsLUH0+9T9$?@W?izXBcU~=P+xnhF~&HdfwYF;{N`7LlR{I zo)yl|%E!(Q1dcsjl)w%2*i-VfLxlk6atfJkKc ziUgtdDlmT#P6A)DiOskK&+F(DTS@&JJz!P~hJFN5Zq;4IpRK=aco=vcy?r1C&Cor| zlioAB+1NbMYqwn`;kkyH9`sO;37{wh|5Qme_uYOu1y8BHmE57A5@YpFqs#L>2b&P+ z-JZvGU2AuOb5kKQ9EEy=p1tw;*IZzDN+^hAS2uLPa|w4M-->jlw=W-r1_lw_b_J)| z1kHzyGA8{ib3INFkn&9%+N&nrve{>xn4<3Q?tlSv%&Krx|MFkiu&XQ@{l(;%)r)tQ zB1qHpd6F0n^tu6~tHf33M7r%=_Z#j>7u8hM)J9pfQ$>U)5HWn4kBl@B0?X-XDS=P^ z2T)mM5SQgR^V#F_z7e}3##!&N3!GUyjsR|y1{q%{IYfL6jng{WYi3iu2?*Bti9Z6c z%^}Kn%+KHv>^;F^l(A>kJ!L&A=FT> z!V+Yy+p8f@7jvH!si)_E`1;VS_8?`3>m2B*%nWEAl994{2&bA!QD{5EzwA)vwXb?$ zo@z@PJZaVuXk&?xf)Y&l5eem7Is*|PIdaKp4DQRBl*Ha9$QPFWUbnedJb1CEPv>}P&7$oM<&lIk zZ>tP=jvIT-kD5ED8Hl=}RpWhh#uG55$-c`V?$jI90w{FKSr)Min~Cwvx719l@z+eX zTy>@}S!P5Max5XhF_qSDD&StYLHkY`+L^t^pa+BP&d#S=VW)o;ktKe)lx6Z5|G52` zC6m6*#%^V=_y3Q2o#KCm^FPbXC+yMrOnnWKJKV@mI0uPXn6s}7I6{Y?1V(d1o4cuz z*I|GzfJg_qUSD#^pt>>88yr+&X;+_?k8m#&ecwRC%V1wHU zdLGvvCS1ZIND|I|0*-{P=`P~mx9H~)#HnKR5aKXGsI+tX=|K;f(ThJqCK`^9r`1$a z4)AA{er9$+?sY=|vEB-I+$nXjK9s&wIVwrBEg9l<;WxJXO+D2#Pk^pjt!oxg_7i;kLETX^l5S=&Jmz$k3g!bd;OS0w!ayaAAj66cD8CU7k0_iYbcww=i;LISm z5zC7&8*+=2>L7SG+veS_;JRZF!eyvsAV$pd+Z_(7HYLQ8s4TP_ZR+mNu@d^YIq48D zZ*6P;;k@klr`exm>05lE)f0w@ye=kG|`#_ z>_Gb}VL=xrs9!)6EsaAg`gilEbK8C8PQlnFNpD4jT%k{AR}g-eI=~d{7H|L{*N4Og z5)mW_;(8-Je=WA~JD;O0y$bA#q3fcj?#W|`jWGQ=E!*Kkr~m>R=)T!NepB}j17}{{ zyG})@XgTg-^R61Xy^M2gi)o5p4hUoDJ@dKn`RMZ9t|YR13TK`*zno9~GZ@R|7h)}U znVgYeehOYMmdzYb5#)nphHITuyAXV0ZyPdlTOOv=c9bfLtgNglvQ0k=*55FaZHL+h z!Xvav-(g?LkwUycYad1Y}JAe*&=zx9y7k~Z5 z^wGA%;_PqqiVS1BNxrnFt;_s(6iy02XLIL2@e4PV&CEkrSw5Dm(3gkZXkCOHNDT?i zdezq~ua74cIs3oZJ-h2>@Z~LUqX5rt5El^mU>FgpdSMx4yg#FUsS?C55?sN#QylQa zDmi!nX575l!{ao~KO|ij!b3q4PS6DxR+faHqOiGf!3dipb0g~q&E@Ls6f3VU8$BLy3B0-L=V2SlCr8@1hq z1vTG?C@)4(7OqgUSzebsJ0=`0D3uf15TP(S-5h+X->2&R4{Qxq=T@mMc2|M>p653v zw{-j^qPY5y;F)-eY+S$COJ8FpFVn|JFGto$jnCxMxqiY{Rp+fMm7TvD z?rys1UTV3>Eo-vAJ7#Mc|By)h{7jfRzpGB@Rh`$#U(cKaF%RYRF{)(($oc@c(Vg^K zN8X2j27$;-$!kWq>QDZH^Lq_2ZaImGyG2h@%yT!Y znBD$YvGk^LI7PfS#^&bX^;I6B+09z;kK@n52C;GGeod%<5Pddou-!D<`i-9g)mI8q zTcPC@QirN&l+Q%@SJt&WvTDgL9wNilmPsGz>x$FU&vN)48Y9c}8e}?d*l3XDz3RCl zN<_+~5+!gll*?c8oiZumnv8Nq_d*4^5z4bB&i6Fa`quh;u0I1~STgAAgr8Z1|BctK z^ZysEU7;LhDpUZr)q=`axfJO7Rm1Nd(=I4IC9K z+o%iO;27#wNZo?{5sZ#SMnVW zx{yr9qZSkx04nqA1dpx$FYKnc4)N|()+1(hiQHvDQavRJt;b%}gmDLs;wQi+xiFfDx# zcXvvnQ|44K^P0>SNnYd|d+-J265cAa`x4}NG+)5_7^)DOYgA7N+caIY@O!dfpJVAja>fH(yxcpW zC-}qj!F=T$Nmo|_o{>=-CirYmw=@C-%VnJdpDrU?yP8{SM~{)cRu4x@##RO z!V*Aj$XQ9R2w5lgh*i93Cpyda2;Z~!h+>aHl8!~ zb|J;Zu7A*&tYkp)Tf{=?{Q=?8fOPSfLy>Txp5AM!bd4dWx~tuL&Yxz^E*z+P1}|*B zYIqE)uMHKG2zFTt1W&5e{d1zkH-r`|Jg=xAP;+d{+W~bB{Pri#j~OsC;08zqZ0=!d zA{i$k)ue)$&6)Q{H1NJ*PHYm}*7v=1kk<49r$Dy{A&0GXFXFA#87tp*s`p2-A|JM) z{{Y58O?E32t8h8v70gYRtJlrS?gP@NCCZ(rQtMYD5hn@cVOPpbnQru=c~eeIoxiR8 zE620|usxfbL*#+x%I#4#}Pa=_OoPM&#B?#c&Wz!}H! zYSa&)F_k{ap@i2W$1Idx)8JTSB@{cGK9lnopYbHGS!Axkx9-22^L~&r%5S&_+Irs; zuJ|x_H4M;6B*db7ToI>_aP6shc&{#K{Yp2*xI{_Nb^Qe0TotV|99`DV`idL_~S@tadr>i)0&q6SXr9zC4}p6-VKN z&Y^hycmrL!&jgPkhWXH8DU>%*T)5*bVfLX8PenQ5R9$YGtxMK83%1rm#c@Z%+6r2K zIinacES|9RT3BrKbEDU=9QC%u*=Gk35G+=$UdR_X*vXzgTf^Y_!ef-eKi6C>dhM{FC8^1eg#?kGk$w$Ha z=U$r|$1o`ur}y}fM7`?<3Ax8`j^JGhvx&(RTlmEXj&mZCuvNNMbXucp{<$jpzi&S7 zxhtzXdTW`eub2D!`fX<~>>lg>LIl?BdzNK)hd7>DCbB%H3`ZJk!kzgc1DYYV_mS2i zFc4{`R713+z6g5P)A-kna_-BqNP|uc*hV%A%4E3Bquf2*^uuXfYBA=+_k`T|whYCi z*A$=4h}~DC(9z6h#MWmR#yF`Pv5}W6y+-lDZu({njUU*eVbkI(`ZFe%( z4Vabi|M>%7+kdYG82Fh0MZakb`Z<~A&LnM^NbZ?d5kjm}7|V~*><0T*kn<(a=Jm7@ z1xPIa7ZO+<@rt83~x{3QW{9x<9?-%9R2x(D%Jf`v|ho^Ozv`LWSMdeh?7T>&I zLMrJbyJrQc6E^?MrYAIp(rZ6fpXggQ6jA2wfmf)0;5(69tQ}MQ06LJa8ys=nno5s}p-6e#QTVDxK#`gO-@s`O=>N%J@xVS!zgw#N+#_cPs z?QD#92iRbA1;jax21U0yuOO89J(183cWd|~^_!=`hGGuWiv*t%JD}K=NHfFjOL;Dv z@%PM7GY(z<07z4fCvQBq>3W^9y{{5@Xud6eG;cHJ&>_cszj#qg*tttjv(=t)({3yr zZLqVgE;{4GR=PwMSyJt4&Zz+w@t}$~3Z;wrxB`Un-|a-VGQjU(NX~IP;Bs^e0H#q2 zU1FvEfqpTTuCwByyy?MHyP z3*sopvN~fs4Dz*<+e-l6`aEyY@3ZHx8jdT;_%7&|MUTkOw5gr}dr3wGz+D+8&Z522 z$XHPbLRbjW__om#6`u`%Qr>!k6ypv#VS>Eo+=A+np!hud{@Q*BB5(l&y95!2Bz0)m z23znED!6!3mq7OPx^xR2zx~Ukc0uEMclq;~?rxxm(1TUmjD`pi19jh2s=&ZL!RKuw zmA(}%TFsZx$Y8ea`v|Ik3ew}PU*?u3u*`IAjt!SPm9g&Lp-#ppl#AudCHwAx0KjVh zcS!)+i(Pif7qR2~4!?xfukLT%w z?uE8tHGzkiw}X#$Q+EM>^J{$q4&8gHbOppgmIpZO=o?fB=l4wW5k$QNoDq?tkuKhS zXKLpaNj-cz)7Kf*RE1XfJ`A6b-6|IoDup+mlu2xcONC&^@dGWMM>(QlDZjYiEiAjT6O|RL ztYnT8!(b9HYq!o%7& z`!d(y>t9_uFnU-W6B(`1uB>b=hCS}|C1_51F)wh8ZFtBoO4QeCaGkf-1lm|{S)bPo z9P$}+I}L*u??Vl%%t0sp0xNz?;PSQZU9R!U_J+s$oJ+>OEYah5AnT2$2f{PT6GiM5 z>__^>gPNY&y2qyj(BvD2Yx(_VF!`ih4DI+5ur&MeP1o&1d2%uB-gjO>bAEYFRFnBk zhSI0&S|-Q7+dxH6#e)2_mZ~2$37$ZYy_M7CR(ugK`v-a1T9~{`!M{6D4A=E155<0X ziNm?|Y}EI&f7rGA4SFS!fV3ZNRPSLH5w*>7w&DYHS0#g*UDP-@6}zI{)c?%(gp^|q z1Lh#2lTt;a2(aBvm~t|u!@`!n_olw9ME_X(sr~cV%rwc|`?Wx1JNy@AzTTn*_0f&EbW{P(Q689Bc=TTl>QPh-<=Ut^R{kKJxuu z0?Cvoh?S48E@J%Ilrgvo@Q=_~^}WXH{F~%vt2$oY{Fic>0EPG6&D1$r=HQvOBgiSq z|3-%PmS7Dq4@%E_mGXdztEc{Z-*6gX#G7`tnZ-FrLov}nY5$RmxmV)}JB4Xp+GhJa zbEy!;Ge7Egj#K4nL*&z!n*q5h62dq(T;ChDxImEvG`j6IUz#zZm%VG)FC2GUQdI}7 z4YwlK1H@zqrT@_tT$(M5xNn%*uGuV+=VD9S5UzWh=0DW^yZ(O6y5Y&wn5UBO)b~`uac{}Edw6II!f~8YxEIy;G4F z;G$Sbm7K6a(v-z=$sR2e1Tm#{$x9tjrnK|OP^hP8dHqPzSx`v7Bm(-LOGrYL2ulu6 z{8>J&@n=Jh=Ll-zEJFgetHES12cZfA^dmz(Y>--@fDYkT7$RzL)hspiq4C?>+7Ogj zO?3}V*|2^D8& z(zylVJYJ%XbE|A%x5)-?1?DtmrW4o13RZ7{_ul!Ks6U~YTb*&wDF^(V-!fDTZb-2G zH0UrOeLw#af~qw8x9M_aWx3*u-a}H9oi!Akz3qA3{yoGA4yu8~3dCWodyZQu+#KOo zc`7~g>%|UpN;tx1$nS{u@Z^{7M@QWuJSb1><18xlash}gI#2{I&t z6j_d?gi02cbGUSX$zQ>zPiqL>75=TN)3w61uDj-^`9@27<^lXFI#d7RpuVeRO=9By zO2ax3d=?GzP+#-*SGwUn#`RC-=V$f>;cGuP2yTZyzI${aIU;zM3CYzF;y(NvBTy7P zWxXjnPlR=Zan9h)NMsx)1&`DjQv;;g^p6yHk&1AMs>q1;RM&G%e$}ISB&)NYk72s* ztRXgitI_j#Lw8%MZ+w39d#l~NMx+O>eZ_7qqVLiJbi)m_lI6HL$KScQ0qAN;|JV=F zSPg36tPb`Yi?bw?gCCiJ#!x%PxF{Cmh};5w3{15~&_SkSUn2a;U$ew~l*Gp~kujJB z=H)V=5_LvAn;E~YGERU!HxOk3{#rXEq$b6#zXLq2oQ4c|-Sk`|h$J!2&%3~M`P%w^ zt3viaCnB~0`)!nad~(Zh@J|S1Vp-Ro#d${HB2{vz6v9Jjg&?d*?(iRzl>biKz7u7? z&r<5B2)y6-%b6;v=^4Hdf|$fa_lNXds|3g@gFhDxuJvI2MbR`J4(s!EP94^(*5e50 z=bgV`?>Rn3{)?wx7c>cyS&J&yI=asAhB)(PHm}8JQ^w`Eja36vmf}SS^9&iD@Wmu@ zl;y=W!rU9P=%?;#5KS)^QlWb47V zZ15is<6kMSGckfGtgRt4=mMiZzpq`c#rVi_(O>Ro!q47dvk_u48j9}CfFS$&AAJrU zhSPy=FBkT`#(cKuxx_qn8y;#Rx_diM<(^dFb1_y2rnws9a94v|U}*TY_$28=L6w?# z|KeV_c6o|#`5Hijy~Nz(NcrP-GON;V9`BZ8AoCUI78qNMKiUXOq-l}t^WKh4sFhP- zfCxg)SjNmh>w=7SB^No)Y+C~62#tg49OHxMW)o{=`KXvd+4ee}W1et7sTo^K zz3^=)$B2U5J+o-P6WFNISCjx(#ru3nN|@%c+&g^U?A&~mJwNwrPy=SK{nusKMD}mWygRhVybb=av(Xhu@iBCs)SD7`75Cbp;q<4_ETv6w9% zm5ixpW*~PIV1;yp&A^2D9VN5~*aELeV9Lfj)CEbkSe>a2Q46qStBN3i#zcXLmBKOL z*|_JX0B}oO^IUh_?%!;k4Ooy<$$CJz-Wz7~S`Lj4;Tb>GD+KH%zBCn^D>OWzk<6ZN9*qc*u*cm7?R`=KE-pC5)w}u zCoVzo4M%g?mAKQg5@3U$lH~@^qNA&-8Fk%eB$Gb2--cIwhf~(&JfQ?xvx3Z<1-p4o zCe{GnViA_0IhG4?COI4LE7OP;pDDa=LMW=%9F!woyUcA z8|@pXWvmlKit{B2pOHP6osTyZp`q5R`?K2Uj2ExxB zF3F~YI$o|VQ?ayG_(kUiccVemV#9D$>*4oK@w3jhhI4xJ+kWj%YZesFgkq>#w4^GH zAIyXn6Zj@y{Jv!^``MH?iY$KNw7$>6=KMOV*r!IXZ|BbI-I^$j*I40O_@&aK-Xnfc z+k?ycX9PYu^yw5W{q_jSykXAwKPt1I`hS)+cQFh- zRO!t%5`PK1rj&llE;O*l=zYa}TYLAvb{sVYRnvSc+6tpt&;x7IHZ8yKLpg+k@Zi?p z0EzlMe6z*bmLJF+cfEd;VWIk~EsO6DMX(zL!rqZe@7u87s!`3480BVk+Wub3oFAVy zK3CuoM4a@pSdHBdv%5a$CIxiXmRPePpc5{Gc%&53c2hQQupgWDVuN(Mk@?PE)wAlJ zN@;^+mY*vkslJ0Ie)HfNsC4@(=<3$U0@>Rhe2CyM5ECG#M9!0w75jXz(xLCW;;m0N zH9?`dk|V~paQ#lVi>dR=^ESoN*np$)#KJkc|~Q?O0(XQstL8<$MOg8zKy-2AMVa` zhxv4BL)1{Exrb1f&`c&>g2!}O@N-k_TX9xD5j~1E7qRxA(XhYuK}7xta1y2Q-;0Uu ze|U%-;TA$#kjj^0M2f7Zw7>b-K%We?{?LV%w&{L|sALT}!j(lXrMhnjvI3z!EIMWqS-!E^ zDEh+iuVqpOcbWRGBDcf_2A^QS@)9TZO=M%*V8Y1p>s<_tg7<=sgRDuU30z9ntRWuF zfj<9tw{XmUo8jXS_}YapJ_F}!y1aAZ(WycxCN^8#5D}joHs2x5=ltjLDrnFGiwPBG z59`ok{O{u8uF2ec(3fa^|Vm4ldBiw(&Ek%Ss4KYMQD4AmEI|`soF_h6o;6Xom%tJC|&=J%+ zdJzTvgzC6PI@Yc|((#Cn?*3B@cd%A+To1b`>7%&9z{9UJmI@ zflWQ4fl6JI)>G<~Q=f=P^tVL{oxZVcfEhw6gegb-@}4l^hnmhr!e7T1t1>>~MMc+l zO8zbXaGs@Mw+Wo$e*+kob+9{R;<`x9mS?-dde@?AKZ7#LeW`}9w^hxuIzLi32$G() z{Z3A7l($OME2NZ8F|2xW^J8@FWiP!KmH>pqg+cXoD9b#NWQ{pP%S&G zmBfrz?yGMFx<0+V?7;-skU?i|Q_~XFQTE2;n8r@#59lyaCMWD(cEjjNcC?=aBta$+ z(wF+Zq(mrCQ50ypTSg&D&uZ#P z)Ap>uS--)~(k&^#@~5o%jMa5_BjEGKxnP6q=Sd7s8E`4@?sgcf$2Wa17$Hs; zNiy`y-gZxg&~1>L`KWeC-HtB(ea9M<5LP_d0Q4wvi>W4a%Wqt zGDzDDC%twJ&TW5R7 zyd*nRuV)W-L%n5HQ{8sdJIpRrhshuLYoAD0fx@_TCtZJKM_B4%Qnoe`kE%3UarV^1 z7yW2yH&lcqKUj>e`xJ11t4_Gg6nG!MyI&`(&50;k+ zSoVYL=T(yaWwY&%w$8QIL~=i>lAbxt>v?F`ZwiZZ{WELN73|%O@}NW}UNBy&f6>tE zw;A9&Ov@QGVshP-G0mCE3h}M9URk%nOL|4_YM+bZeZou9<-$V6uC$G4Zw6T9(Z?PX zlaZww@Hn?twF$GWTdr<(7GhK8V?*rt@Xa@Fp}{@}I#Su)$h}RfU$;HRzC4wSE*i7- zafjvn+dh#q4}}SK-{-i3r=@cz6s)rlU)I-?Z1G_q(m|!At0pw*u=fmJ2BW~% z3flEY4+W|hU{@`93mP9}7j^uLNL@l&SLE&886yBT7H2)a8;q&&4R=Z5IC&;r7u^Nz z%TW-^-s5txRav#&xo4u!!5EZ`OcD@U#BI19s-Ezj&7Z6zi_Ko&sVOPh1}SLls;X)w zv?1MfRb>d^uA>aAok8r7S(e(GOpCLa{Zv%Cf0FkbVc`6r)2_9NkDUB8Wtb-l@$X1A ztyxB=ov3nnZlj(kq1X*g1RynSh$*vt{F3+LwjY$?a3Sb;0r%}<(iw+Eu!e2L$hOn! zUsI_ID*H_Iwst~SS$LdVtNlxpti9_H%9d1`jLV#Wabhha5O|_Dcm2_IfBJwgO;IX< zYb|0NaJND6Sv_5UKf7(p?modw%Q!0HfVR?L$CqGA?Nwt7siH?PcE*>Ktn~WDG*Fca zI@TN7=%8BJ8WqK*7&8~&VZZ^|7$x}jLy&7y z#7-+!LeUIbvOtl$fCr%$9Xd3D>k^nig(2*ZNadt-`V43mc$)k0E!>s4c+0i5KKo7p zbmKmt(W3`s`4HRz;=n_ivuK1BA#D#9mN>{zFsp^j-;pVu^^4wi)!mnW-{d!VzR?H< zK*5CTlyzG_lqd_FY@TcqZ*mjAq>-cKxNaYb>p?NObmymF267~fB!}GH!tV?-YKtf` z7ra>_9%W}~8Bd34vRl4loP1hbFvM)Fqb+6F7LamX!tHfd@?fvdrE4p+9M4%UnMV4w zwZr0oMAE4V;JCWhQ;gb?svIfdVF*rq$koK!g0p>uON@hU?}U1}wxqw7=|ETV#XbNr z{K7~pJ^Z*F@+9xkVxWHPp1@A8{QD02Z6-j}=~7KPrwh9)3xe}_uZCVhAAe6wtlws( zIAd3NleySqkXW!&Ixe_GJdb0;n4=NBEfATCL z$oZHh5Y#?u5g)H?BjNM-H4`aX0r=tc6|^;QbEKZC_Txoml%XWg@5jWnejMoL+2O$F zXa9Yako!!lC(gwu^{I6~V*krqk-)eBvfK?xK=+it^*nI3FNM+GasUCg8iM!eR~C$q zY$tsAd=vZPN;mJXtvFUF&>)9^A#}|rIZPLZ6PlC0E1J&YA!wjNF{HgCdGENjBe*6q zB%TA}960~-0`0V?xl|qx3NDe)k?TIuSwFZwysbxAu$GX)xKQfWM>EOO6%k9!*IZya zHzro|h`-XCXC#lng`f{8*h^LRm(tEJA=Zgxp$1=44M6h!b)ADuia)+&wk{q|$j=GF z;m`88Bs3}hO+ae8zzr7O_lTQ$r~WK&LPoyrIaR_tKB?Zl_xyaXSke&(8pX0)-TMc1 zV*n}=(0s`Woer!c{|=P|GQEa%teNNoH#Oy?(hE3S4K zABG#ZF`c3eYA>y_^5tN2`#6zeoi&LJa5pEnKSXyyi}4 z@vBXaUpHhiKOeZTdVmXP);#&j)otMo@8(^7f9*uG(6D(;N%JL6C{W=@90(9~GCN}~kz(=&Q zAuA7AUUHe-x$Dwoc-+?kMDK5q8zkmQjE{+s?Lc$)g+@(dYtjJAlwEm^7D38kAn-L| z@lU%YUL1an;0>)Z33iMLOZ`$vWmpIj!)DK0f3n{J<}sh|P2fyMT7IwsM2bY_E5n6AKO@v~{dxD#)@|+18X10%the|3!Lvp9zBdXzPY#3XZ5CKG z$q|W|sXqlH2mmOj8t__;)fR0}HDFk%%l7w&lVLJbz5^|~xEjwz7Zt;lB!!vyqcM#+Kwcpf2S)(8t)3)17yAbd8v3aK|%7tmd0KWk@FZR+m9Ngns7RaP{wo=NPjLt_krHJOlQU#R(LXm_G=)S`F@11%J=aAR8>4ImoY^9UeF_&Bc zBpIF(-M=xL6D;^S{_eZZw*NAR+ljkoYgEpe+@q1W<3E1OR>0K1S!~+G15c$HmhkLo zRq=sKgW}h1a|5)_4bP8ASf|177@qKx6x2cxEwvtIFk#bFSG~Qz9>1#K5_blM=x)nV zPr#H~Q>B}zbB9%W_9IoKr8r7lmTn)Y1TZn8iT5cvL;w5`j;?! zO?JGJ!EAXxNh-1YoqX80I*uv#XzkPN9F{ibF)ix%DSUa;CNc}Y$chHn>6;1bCqS#= zJs3jgc1fV`QUM|}eJ<$)*~A-qV~FYNu*?$C&=>K6FZ{KFn+YrE4-FTZA#P1~au zBzY?SI^YmIEm9ea)HZ1X$K0hcMr9(8Zc;uYD|E&2 zhFh0Fcr`}gb?Ys(^Po2z@>g4T9fAgN ziUaeabZvb1!l-Z29O>mZX&KUNZ0_Vkwq+3>|LMm?*3vQM!KFC!U)p}WWj@wEuAiU* ztF@LWiSJ0k7mI;{T`<|RksbcYXl=i;-?Plv&-$?Xwp(m=k)V*tZ@)j~3Bm)y9qe!d zJQm+Ms^5kp^=r<^XwtZ?(-1Y|wAX}}#5|G_j*`iSN#zUVQLCE--#{Dacon6Ty~vpg z7AFl2T20QSlt9%snSQ;9*jGxq?WvuE41+{cabw+hW7)njH7Yc67z7H4ugp-`IgdFV zBH&9WHiik>#P@p7Q~mmY zctrrwJXna*xR=Ul5&|6{U1*0l#SSv=8GDobny!sCxC;Ta)Wv=Jz=Snqlx4UA&g}wu zE&({FOYGAZZItLx_U~ERrqWVCD-szy*wOnyq!u}HqooQQVFMS~E6MaUx3Bi?@JmUA z5D|(WvOK$=Fb(YAQ@_diKIP)E%7a%fV46<65Dtw#pi%I+f3nj8`m=yS$qC_CY|wYR zx63dsvMK4i1esbW(?7<6ppd@jP|S!;ya`GY>en3eOt&jMMBmHD+t{Km?EoO-hEILs zPd@ZZ_!G@kCT5P$5A(S|LHV_-h~(Fhz+Pa|W7|gDEX3sOWV!rv&<4o8OtWIkdp;X7 z^8;~q-7m}V$T>)Q!!HVDUIc$5qsK{q=2?=k&3px>a*9w01@iaxWSe8g+wjMS0$G+t zY_rJx;@htixa6OIUmYoPy87^79n|4N64M2xuwfO4C_>FEJcK4kAWjq69Nk#6t91a2 zW53I(Ku@p2gh+Vt#+FfzarX+zKFPIXNGK8rhTi) zl$=kh>^B;L*ff!WuKJgh_Bo~E-ynNQc=8`b8XbqeC$?3Cft29>`-OH)``OX+gLjiu z=y4}w;Q+d`cAi?TBnqs=?Z4qEthly zN*HB+)c8;@qt+27Db35up+1YoQdl{B{K5Cr!-HIJXTu|<;UEDX@s0$neB@K2YE%>@ zJvY^$4HV2Rv|=e^%&!7?0|?pVRPN{b<;Q=N|?r69e=*AVUoy6ea5r`h|g_X2?iGwBDv> z`}&8C>5sqNaZc4lw4~%A+g=JitXN(CWEUAE^)13TSE&kQWFFy$Z|Qgw4d?={YbWJv zD-ZX@7oX(RB5kcSJQT_LL(f>T2z13^^NwS^T#=IE#;%;u+5M70|nBYh=L z(bI(ZlK}%UryuLv5(T1Aro@tQD;Z3zC|UzX6TJbe)Vws(G=y<3btghjB5gS;G}1&4 zJ(wNbO$*y;V|i{FM0*hoEe21J%x@cRwGk51-G@X&2xivfpV2L zL%==!Pr52Edb_&pV@(9o*j+RKDv{Y^ntV!&ScAfQ6Kg%ga;r_29v9{gtHj{6B4wC)vj~%$J@k>W#JiC7r-?r7;u$m~sxhO?J z5JCG}68j`ZfsidBq4D8l5o5Id!CB-T!k1=a1Rt>%j(*RQ%A5MpKy5wHyi?eS1-5hH zym6!i3{CH+xII~_I1V>QavABNa1sq85K`%3S}|>&n+)tzp;$OPc^Bz?x_FTT!B@S$ zAKL?=ytLAO=Hjw`>kB1m-|9E7sA!LD#+4@*rm|rEHdN?9cJTIM`a*Q&et~!KgnLsj zsIw23MLt>BQ~FH7_GcdYK*zEPkJwpR9X|KWaEc5BUyqdvAAZsxTKM7JP63vH8?{E}1 zZPsKiv@|%;Zz1 z4VBKhb*KK7RN?Ab*n@K%f-V_^Tbl_V+#?=<3)f#YLd;qV1%5X;$hd(RB{z{0K*)u- z59PBRKV{)5Z)jP?P2;IZB%9%@7`2BwU$r5C zE0NyC6gI%t*z(0}aI!#$;&BR-k(Eo(d8NK^sLvb2^RzF#|MvpKO>YK#w#k(vySZ1g zy#NP%h>g75395{kXTiGUy*FP6kue>yiy%Md46P0EOYs;CS5+ zbmTj0lO1x!g{M+ucurOWF5NS`%gbaY=V<~gM+2+1`}TzI#NpNe>n5m8viLsB2*mYx zvVzDvR+2h*ZM`M9=cqW7-cZ37je2jZ|&d8q~tdvJfUdga)cI_6c&$ zA|=Wb2q%ZL{rSy$2uJBGyd-7ocM0&fRC1|@;t{4^J)`kW-H2*XS>EXN`#?vzW?dw~ z;Ih`%QQ9gg8f&enH>VVQgoDn6ke?`=sfCWQLc0ob;C+%bzq{JIK3AeOLQ353+z=rs z56YIPCv+_0`yZ@!*R3Qy#PU{tV8f-`K{x7opgFkg7X~aU)S9N}TY3ZBtANB1Azvn3 zp=?ILT!QAec-ZsXmF)Hpcg=B`J&Oz$L*3jLF?WeDjVV>&mvo(h)`5DHjq_{#1a6oN z?Hrr~_V29iGOGUf`7}^IJ&Wm>rt;;QOg`z;KeFm&4!t&`V*Ki=g!MJsD2$XG${_1s zyZE@dx3Nz;NN^kaXV#+IO zdae@ihgs{#mm~!ee4G^JAo+xl4{K!75V=j-Y~G>P5OWb#WFerxt<9o+t7n+X) zUN2Iis8updnFLr1uPL6fOoqZVlM8|_Q=^9%(1qs=XWD@j*M--CXM0KlUDg}?@Ht7I z2VHpEZ4{A5?Z<@r`c^OgVc`9ppC*Y0fv{WF(L~xa*<ig)w2*5`aPb7V&=kH zrJ|xXqJc@?W5qms5_j`Y@Z#_X-Pab{vf#&ka#mBZcQ-FRFhXYV+I#Nrnywp)VwjI{ zLVIvdu1{h?>)4voZ#8*-v|fwf9hpc#tijn=HVt1&n>ztvCU2k&W@IRXUo_2vPge|FQIV~idD%Xvmw(hB zu_%|Um;2{LSs=3Sx zJ*;6YI@M19oQ3waWU4r7hB)))3h*C#%`0B|e8kiIM+Q`<^2zN81#A$8xH!m*-QCC1 zcKjl4?D1%KuYphZZpMQ#>aVW2^w5Yrs@=GpSEX&n=~B%gc9#B>IB|kCI0IY2_PcDv1t1YE!}KvPhMv@f6~in6;#B#%0#DElOV5&<0yX`-9aAVMIZ}Wa zyS_jWrP^+>_W4ejdf7rTNAUbW%gXDa43vm4(066ruh(!4^$YJY%d4o<6VJ?(tBikdzOiXRBPG4&Gb`C=u z3CLR#H9q^dK{qkMa|}2GJz%{dzen#m$!Bo}qdw7#>^2;ezj^qr-5^^*pzl@j)7BCZuJhPr{wRR1n6G;hTr&^@L4W4 zBfRF%m(B=@QkvvSvlEnrAx_s=b@u2CdBQN-nY$*ZGtoxH@l?H#J{Ia=yw8F8W9ea-GVDv~jMkVZ&gM~<^+oM3{)P=txCf4S75Rb5fYDm zb^g;I@Pco9@5lY1UtadWJmk>KP33KB(~`MPNm2nK16-vV(7KaI6V$oG;|{ zA%vf!1N0HIczXHQ6v#%N1b6c%`5O!2w8~1>*va9Y>bd4Yz1ihR_awzNO`vRey6h)l z0tjPjwhjNYpghYp(sqmP`z;gDtVOD9q1is^_Ix?WNO3W+2WCG{0nQyr_Hii`Dy=3H zD)zBw*>1vl&{^xhuJoM2>CiI(zx=R5bq!SHE{d3gD06fK(f$$A2vYY8YDtb7Uv7y+EEKVc|Xay z>pbugx^6wY9M0;ObX6`ZO8Nrp{kEv=`yjAx&hYc1uiK{s^2d(pfI#4S^{waa%x`VY z(3~`$l35A6t(I9wXkC;u5EPl$;kR(D7vN^21kB$n{ZxJgaKRyENH{>mMFlAgTND>T zHiEWogbE(uaMk~?fT~fZpgG`dt2476xvarxS!&21@sOK%nkF!XJ1pg0MZGFrz+p0w3@K~2<=?+UYpnSx znAswJtQ(`=-FO{*DgXNK-nNxZ0OeC@S;#!LxVMY_(7?BOoov{onjX>`(@Fk$(g2_6 zEYzxUzfhUdc*duAonR~M7drD`rQdfbvI47Im;7K1B{KgT=NFHQxWV%ms^ho^_>Mq= zCq$ULs}lDqKtH3m2=0Idreuhho~H~w_`@$Rn^3V0gqWa;Zom^L0i#hP6l&<%nyKi#x8pKEkxL5>E?Vy zRHwTfGU0pr(sIhVG${@VEjD%F%EuGuTEfmj65eP&8HOO8;PZ9epQv+1F?b9UoT97l zlS!#%AI!xk1sdC2;(nLRFmLk_2W$#5?GfR^BI@_$uNH|#&vW1hu3 zjQSrH)Xjov6l}u0pAaqUkC;^WvZKuk;L`MylSmig`Bqa1*ZE<4nPK$UuB^zUiM4h? zI+uMlu>d+VXVvT-$`v4y_WfgIE`>8-OQfPJ=>AOi2lh|1gwQej-qti1co+(i>}`2e zgttx0jw|IPo9o)frELPP<=aLU39l_#KsW=ptq_%hp7WViWfVXeISyM1jo<~&VVYrD zy^GErULX~dW|Xw~r~E-eN?lLN(7Z0byYs4?xnjA(9bQwiW{L@@_q?Dw>SaWOdnQ%% zIC2_0q(&5S5ai3`ip!51Dx!23PvUp-cxTrR(trlzuQX2xxj^AivE^@iJ6DZ_TO5GH zRewsTxUrM5D7(#Z+K}KPM)^SXMNf&*;dpmJEgUc%Px3b}c647hQ5A0sGJRtmc&h8?yiD6DS5RxIns`Kp%LyGRf`2ANDFcT zpNg0X&Ak2K(UBD<$o%X|+P8Tt+1sPtZ4d;2``uv-5NW8DTo}!k8s#qml(_{PFm`vA z)b(=}C)bJFGHn%0ay`DDIs0!i>-mBrSB%p_jJbPs&r4pUE_&$9V_^is8oUJZw)INO zum_VLp(wt**u-Ds?>ZqwVDyUlnxHZCJf@(%uS{HE7*+n4n|n2{|CQT|=JSm{LK%}w zIW?U&gBqULEnR%*XISTbcu((PSR+`mlk z;|ksfugQ)~Q}pNdkPVM`Mck5$&h(BASf0k$cJ6i_|El*-s}u)%Sj=S73 z&1?SodSqX+(3yn3-&9;_=N)`Tz?hzG_qIS%CL1j5U-_k~G`X66DryB!t+?+*+)$6R z)#0azMBw`KIuVjx!1d@x)e3Dii+`cR{qP66Q!j-V0Py#Do&1(b zk)bGCM4RW1hpp`bu2=}2{o{%_6uKOhCYJX4u-~YoS%>z8f08!tqv>U0VgF`(8dM-S z#S_`N9c)in8AZaH1i&`HC40VsJ(o1&0KUgEz^E)ZUPO82$2YB>^W9;t$Y%ePC>tC> z_Dl+{xw@%*Kghine+vBH3iH3-ngpdV@U&6Mu+a8@rp}JeC@)V?M8S>%t__Fcpv9C( z&IY((CJ`QB4+|!Y6;(9WBz7Q}EQ0&bLnhLI+-4fwqVM^QF(Z8xUlV!WP`^lFC_E$0 z5Jh5(xndae>nN+;@|Y|%8ZRwG7dbX9;5_Y}(>A~=b_!wANvUodE} zK?cre^s=>FGIldggaj@}EP2}Gp%7i<5nuR&WPn&}2!bUg8oUeU4He56iju0GTA-{m zz^Mu@n(|8=5&OSkkJ;x>rVDRJLLnePP&Y2m)g|gbM?Ga{wdRn{u&FOf9t**gVr4za z63m8j8s@mGb6w$qS!6>bn^ z8@NUgD-Q4wG<@lIbb<;UVO6&N+d~!bCz46I^BL(asRJP36Ksn^g9+{WG=rfEa1$urj}!DV8V?R#ZD@xqj1r3Y!6*w zSLq=(#uED$BJ0JSDQyCJ_zL?)sd%cU3fGS2ubRN}fSwW|@fD?&kQ;;mA1c7(y`F&+ zWi}c1fgwIFm49&`H`k#gO^DsmTafeSHDnJ-=nFb5K;HkStfEn($k+Q8pz6%jT%Nb` z*I2wOD*m7Q`+g7C`2jFFZC_|b0+)SKC&6vD6>0S)5Y?>qgP{9o_NU0(48)<|>7Bgv z*d`&PURV8dnPoF?ciLcrVX9$%Re@mYx+v80NBq)h>U)Nh-i|PDzi-@zMd|0hK0AU+ zukbUdUh?z;Nk{;%9`wMA$Or zUiwcZ$^xqhwN)+ffINY z@mQC7OZn|@!?NC1LY#LsHF$g8eD{0HTA4=24xN)5SFOV*(rs+H>9`$zBYEq@u5nA~_LI9Z%eFJ!A7^;%}E%;Y%6x z)464RlTTAchIxHWp*-kyvLb0h#y5`c8{4vzUUd>haeow`C_#%(1(VWl0$}A1Qy@|I9 z^c9W91&dS+?S#uRhVyETm9$0U9gKQBMlO&b{+{d|!?ted>ZRGSmAlxNaIwQ1Hyj@% zixC>l=a8Rbt!LpO@6(GGo!;k$XALA5Yia&|`^WkAPkaC0xQF|H#yzd?Gxf^<*`mQR zf$tr!l}O3jbg%(K%N($b1i^mSbn>gK3Oi~7ivak z)`my2w(T5;TV~;R6zDQ*GkLVc&N&QBJSu=^oJAN+E%sBdK5~+QA}0(UEBV%Rh%s738tFZe_88tPx1bq-&2N#G%YR9AI37Jefb$5b*H$3!?Tf%5E$mZq67p zKn}RVgsIY7V-M#Cm@YGmyi5YO+wi~5Yd_zOb3IP|fQC5LIZ+|=V;#^c2#(MyccF*S?P{zXt!;`-5++P$T!?Kv`c(OCCEZKwtR~KskA!yw~@U*a>8w=C_;2R&? zI@UXsMIs^zV<+~=?LhU9aquS?LL!0g`4^5J7;(k|qcNH|n7@>>-a9(|ZbTnDwuhy? z(XZznlHj zQodDlsl+6QK7HM7@8*Y1E$$kf?=J~LXjig6;in%9mlyV?ur(zF)USye0vpe^V85UH zSwx=sjGMj+rhz@8qifP#DVqVmufmMi=Uq9BPlUgLY{)|ka3eanrbs6q_tx<@V=rSNbT#AT^ry#`*ZF$o-J8iD+}wUJx^;I zo^NJmzxS};3jAUFdv;8ayaaq#&e^KK%ei?@cx+SgC*0iz$+0 zs1B)B9mCYsZj%xGi}$-qcYBt{_=}M5nRRlpM-}>2i3Y>GlA-r2w_?et19-@x`PjJi zZJj6!LxN1MfRDi^GDmmW6uP44Xq(!u@Q_WMoZ?QuWy+NfE<0UqT5Yh?vA>PX&2ruw zZrF_6)m&IcVj!dw*RF(JoJ?2ge>1BQyy&9@C^2;FOZNFFbQ;r?6{e_p+=%vClC|}B z9}Nf|m*vzO)K7dHbF(=)hOfh)-xd9JmOwctuIk($qS_CsKcyx}4sEo`7aFFn!$cuG zV)i+(D3?U}NcqL%$iglXR6 z(0Z2o%PYccXD}2C$dR!9mtXjK>9}M_>mMt&_4+N8)Ph#V`)aPoiP#{2d`c4m-sy8+ zu-4q3j^=6I7gY~AhN%E*5|bJq_l3n~X4 zjXlmb9nYJ-8ajGxO`VJDi~*8U=F`h^V71TW$a{^`_pu0kbADwN%7K{CZ*G%bBE5>g zsZ^(F0L^HR;!4>%>l^EJ{aK*#G~q@xh+a7awJ0G1$Z0UGG=FY_HayXj7d76!1(EBP z?Y_x|k7u*bo&}ij!zldzUc%70o+`9BQvw^0eAG0NdjaII&wsuKCwHb#3AB_Wxafg7lj_r|`=7|0@vVc%mK$ASGDL|;xD1D{vOR#i2Y zY>~t}KbV_qq^M^@YNUF+&g@0l?|X(b#R&fswnOM0`9Nv>oUf=k8z;HImWMSybKlT= zS0kkEz?1QG{mjAbSFyWVrTq;8qXc7j6Z*nU}cb_}}w&bL39 z9HU+-d7(#J>tCLRt;I=hRBZ`FmKl$U=nV7L-sJC-5^ZmFKM$~*&*HzAx$4cXP5<5M zGdm?FidgykgZ+j>n2+ew8dwH!npkuONz46uXu~=``1EJ6UGY0}gYP9D#7$uG3DJJ^AIO?ybjRzwDkbJm zb#|G!-sjl~l0*#WTHBwcDpu0HZRE-F#SReh4>weoRO zGL<}mpa}7m)Jlhco@Bn)-PDBcnQz~5-s>=c<2&lKX209TZ{^QBN0Y?8bWrsDW53Sh zPw2OZtm{(mq35qL=L~dyLhju}7cI{}h?vOnGzPHgEb^MWQELA@~(Dp-Y3cB!zO825W=N6Em zhU3a?0xH{j-=mQzI_kP*&xp=z%I?2VuF~z+?D3u|M+R9g7+#-;pd=K9`KX(h*qs{w zwU@qeSkEz3=Apn2ts@Tyk^-PtcOE^>W#*hBhiRlW-SdhvH}BchjSgo*mm& zRVZy+*Ag%=eJ1ZqT7s7IpR~Svv$qxU%4Fj@p)Vj5BP!OgLC(Fk+gP=4+8BFXBIn<# z1NfsBxAf^jN-kdvw$x!(`Jdw1F_Oy;pa0)L6$ktqJpDhpeuwA#x)#C#zmA5Y_r8)L zAgYRBoQfx;twsyBBw%4%{AkFbmEnNgjE!>p5wqM#;G>7lLx|U*nzfwDoO_MT3};R5 z2*U<}V*g`koY;r6M>IX)NaJa4?(r<=F;NBi#H8jooIYXBDZ$CQZQcBl9!u^>8!vX33 z(ajON*ryhbdvt@23$(@i0iv%WU5Ce~1K2!F>}1CS*nB*7X>8R@uregzAysW{Dl}DL zWZg@%3VrbFT2rG^%?EG`)6`o`sLP1mp-z{|s$Mkmu$pSTZ4#|wo#D$F=~=IfWez!w zsI=Hso;odzQ0VDRt?;(Hnl7v}cJjyDA!olFF{PfKZq>*I0QQ5^q(o}-mA0lWOFybDlkg0T3WR9ZH5&|5(SuT=gTR2CZd{C-K=bCClm zS}q&^mP(?&0wzenOyI3Z{*#-eB2~*4{c9vxi}<&-IaM^)%I*t1fEj|bFQxpj(-`?@-gjIvshvc=VGuE zvj3Dyk}A#F$j}+KV8a$=1y({MSY6MrdN9YeiKq zZSg8bN$69+;*q9(6+yPX17n1wfcFjI^WT)5>uVTZ&Q2?#>E+*SyQglm_1yVzv`ges zacFxFvF+~Ac_nSUz(eg5F>QHLKfcJXgG<$PV~u=e^XvfT#{zz1PT{)X>+zkFmMX(_x+nwZx% zRsh+a<#9eyo4ZY~KPkbFIgF=SgOL>AM6YbmdaeM^4hq)p2VO3LOGd^_uTNckU|iDG zrD+>}k8^So**>WSr7OS0yBe5()n2Pz6{!eUX%~uI$w^vg>q^DbR-B=h;M?&}+mqPV z^Tr8>X+l)z;K)9tiVD&USzK zQ5PwhoGUo7Zlt?R7Aq|rv`m`)knboqI5Q~#B^rSg1oAdJzlZoTi;=)AvEn~_J zYLxz4*!*B?zfKX34W(Ye7r3H!M_c}95{<`ZtBE~?rzHtv!|Pb?$Dmjyjm${L7QKQ6<~FgIcqE9 z-O3^KHjzU*Px5Dup;*|HK(tAFqO97m2xucc#S6N=f7PT( zE7Q-qq~A##7!+bRvxXM9Q#79WsM%SN6!TrYrg?(+Kk{8HB4|A$B92A@B-V}cH z^~>!wEy__CA(5n$ny=bRGvNTb#5bld>9z(s4*g!7oA*U3peZi$E=U%ozZg zu8s~M6F^ht7Ig_W5kc+(G-E*~`Oa5i&kD`&-&V!EasJ@f6(lngrHS!p!!p3Bn5V>1 z>7WELTu8EftpuxvDMXj$kJqWEqnqN{&2OX$wi5%1F@5Rf1V*_^2?Z0VV^zUh({y%M ze{`H|%eH_(@qY7ERkmf9QAT8O7^mu6$!JfQR9K4}&)m9PMXc>@sq3D;EK77q16Em? zOO2~Q0yJ|Ly|I0qWDFLh8JA)NxA5#6@{Oh>22)w{T9%ajY|kCFyqSH{MjCdNolh!` z8V>pO_gf;{FX7ZWSvubx!^<3=VZj6}0IJ}&`R*(d^hGYsOPak)lsmFb7oV15S!tu-~24a!LNuc)lsH%WYC?bqVpN&Qo$z^+)A?IEXqyjkb-l>0g96Vpw zc;jl|mFQJEr8-4|mXx0X5F79!#)#Ec&JbMGwLSey93f^JijTd>7~O{?blzJdkurcU z!Un0i5`;I#zA)Fe9=FmRe0iG+v!^E~?7a!!`y8WyTu^9D(YyD(M=`*+<33zhpr*^d z{I~{Z(q~8HqCTJlrb-o-aPtQrzxFWP9srj-bs%Oy4#73=h+KpbK0q9OPaWg5+a%R! zHSA+$<`pY9~EJD}==&;z2MFFfG{ zKko9K6|t;R32Au+DvYQZgivh+{9H&fd7aA8U3alp+C?_VKB9(qP%|dw$$8SRQ=0D; zRmv)gMfWH%F~~g{m&{!F<+DJ{N~;nJFfeRZn!M$^nt(69bDl}572CQYdz-2ayHHUL z8br!%wNe{?d1xGD$5@x6FQc_R3@Oq^Eo?%;E>VbL;J&j|asnhbi?~e*d{og!23gE% z)%-Tc-Y0ISsNt^}uSH%0jy_2pzqrN*|NeOb>imOv!Evj;b_nk3=I5@gjjA5h1(+|R z>ZYzp?$G5b99%a;MU^?pJ{?@|or3MgO_g=-e=0%^76t^$I_OvE2wnT(WqrKI&h45992TvDAOZ)@EX63sO&x?fE&HX6RoH zE7wZfG{%N0)@9sb8%X8ZuvrB(?2}V)Xy=a(Oz-f=KekaDi^=4$N75j z9TJ@`JKAr|&sv6e-ZQ=z#VlN#ugr2UJH35l{6j3?`Sk$Q=(BmxQ}_8Z%YDbth!`^R zq?MvP8*RR5GR%eU`58f(%Ncy-{^j4FO=;2%k3za~STutUj;A%8#|ulTS>rdl=zeJJ zhuq#X_4Ty`{W}yBPdG)&Y{~Thaynn?dirDzX-H9rdc=Gu_gQ`a;y$^u>~lh~p`srp zL#B?qg974R(pU|xKQMo2Fe?!2bYI~#>{t|f8z=J1!9&|My%4b?>^gR$w|Mv8^D6yZ zO6_fyTmx!E+%mi?C876h;rBVY$EY;VvOF*SiX3-~kxM_gC@RM`e_SIkWW#Q$wBtI4 z&*BYK&H(aTzhTADglf;`vI##%R;fSMqk`sa-D&VK@VjY(Oc}t}s&o3MQH|S%A2c zX)iODdFKUT`5!NJd2)lR|5F0U#C}kef=+c6gfs!i3xMPnc*0mNjn!%YEreY*lrHI-!F zK6QBAdkWl0Vs^2q2~4|CAq@anVcdjYzw07_up1LI3-uq)4jbG@KSQ84(aGOw+$8hL zeSGo+n}5J1Xy^B$u+%Xos30ih!)ck|gY2&X&$G%C;de+^Tcahk5b_YEaVxNd~@w(bJ%q$th=p%wc#ci@*zu;Tj)B}4z?N{ zPUQjqi&<6uI(9N=gbKq`rt`l{^(?z5#b4<`n1hRN2hb`oY+W$MrY!ITo^`K%gyy_<}xUrWtGpt=-uMwq+Z^*yMI( zFAv2?3f2&0y`a(`v7YF}c#_GDqzlq;f=^Mi@YEf~H951_uG76n`#HL~%d>#X6gKNB&kKTuzi zp%It}DN|RA*qvQn@a9=9zE#Eg1HRl=30}A(!ON3fN8Tg8UT>9D{kzB51nRTLAq)Tas%05-Ev7ttp}&+Y2iCz~%m z*uIYjv{2T^vKhC)(`+_$sfOaqUV*|zKwp0ZAN%4WrTzWnojLS0T^;~z6B8mp0$2)} zsZII8q`-xJU{`jIzRrV+AmSR!RTqhwA*qa;b*QT8=_CBa5Ug6XV2VCN0bMM3g}u%M~4!?^}`36MMt_;~xRed&l%)wEQZ&;##y$mp#J` zE2rc$Z-ra3Hi|2)@|Uj+-S3?dzTd38iF+2WCI@}H@wsiBd!e7=(C|PwuW=wdICMg_ z^<(Vqd2oyiy?+aQEQO7u5aBxzB0LYn@Ir7+@A0GGVpEnIZ=Z9f58H>om6KVLTcg_2 zI~ZIXz!bTD{*voY7Rh*cl1!QE{k&Qmmk&yoeH1yn38!^i-#Sbo@>n(``guB35EpvJ z3mER@Y6+n0EI6UaqsidZiZQDq1GVRMSG;sPmN!K7`DWchRTo4J<(xAUYb4j%vKA$#Q>K5s7>(bTa$70Yks}^j!U5F-dI>NuGO5Fe!ChDnms- z&~|(F+{(ecrl$zooQ*%~uAS%iA*-@ugi%ScgowPapBWIjzYq~bnH~2KRnIsymMrZ* zHsN55FjB@Mib1A?*!C`_glw`7`W}WT-{s<)1CZdwMf)RQZSLPPvRjVtHT5z-^k0r?>ZDf=)%qVtm5{s{sGI>N|kDTW>ac6YuPlEruApNhkAdv~= zsCq>|xp7OH#r2}1cbaAGbcA05G9umUQk*Y^2oCj-u*XdOlx!QaQUV5{6M?x39e@bw zX`rtg@R;pQt<*u;Wx0z?Uk#CDH#7sv^L`TOR}kvKq;DkLDziwGHoK;Qg>G5%`yng`VFVeQ1U(cO!w25iP-LV?#lxzAZzRn)4_Q1ViMbSQ5^mHS`xxs06?zQWry0~Z5v%bA zf57Bl@-{IvslVJ0h+7|XSD2qL-<0{!M;k2fl~!8qnH^dMQ~L!k2X2uxUXz1Wr~?V} z=u3U^H;p*s5~I2GBB z`p(Ym8or~x1!<@ikVL~1F{BZzqyb92+Fxsy&uD-Qlh!u&4c7se5aDAHgY36iW&0+h zWYn7N=_mWrJXpeY1q+R0qowHJL!aW`ix(@x~{3^7V{5Z$W1_$|n^E5br zDOqB@T|SdGV%6g{xE=b8Mq4bu)C!p;*-|0Jiz6k18XEbnGVU#OlYMVXJFb?^|`gB4*v$VJAvt-9k}LRO8q-qHg$6W|38L^t(YL)wKPiH8&* zV{74~vLX-VhbF24?<0ZLO5fv&ziV%4N0my6uXkR2bk~T$%o5nY+8~wjY}*rTe19;> zX6jO@BFV)UN!*k8fCRMfz2GP}CyeeCM>jwO%3uS;@;q?K0R|tKPw9b_n(Quhc*%Ga z2jQxzXih9EKQo+P5f46&9eNlY_hA^^{=D@n%F=5*KwrJds}UK^#gWyL zKlL0{`Dw)PxkIb$FS%wwRCs_ye*XY&ZR!V05NKXzl$oLS11pOKdaTa)0W!4DccspQ zxjJQk`DcTHZuPFUfcR4+V}|W7_{TmO7Ry*V+D4d}V?c;rYr)dG>zQQnIpU%F@{mo} zq$dt#{#t5B)r_2cR9OlpQ&drg_(MXN1@?f+*NiyN{+9yj<)Nwg)3tRY_5sCeq-%Rb zqlLs2M!OlF)i=8FdI7O-3KYe*j-F}r=d0G`^fk|$vv84M#2*Xkb6x}xTAH7G6qH1{ z$)6G>xgids{z4pBR?!PU*K(0Yb|L32A)6_!PX#ZY(RLjvbJ=5U8By}5!WRX5o{F71 zRC8&|{8n$%k#kMkwJmGXjU|Pu6AYb^p1?S@7&fS&68i$5_N4uDp!@jaGe$s6S~&^I zuFd$qA)RqK3bJkiAMX6Ov(A^4dpg-5{s=@hSGsGQ8~n{;I;0x~&@}2I0a8gzNyBGB z4Oj-6aP4FBYLc{lz_m15T&0ZCSCNhQa-F-7c@vtq;z_?maABt8@F?9!tU0Wwof^kC zgwaDG{+%24a#x8)Mdl2L$#S3d4&Ni^j)7rD*~=D0q@=6)^EHv0b^JzbjIWx%Jx^UW zb-o__lEG}M@RN%1n~1f_yA2(F?$#MTku?0>Wgc2LMb@zY4d%(HjQm&Soh))T6-}5z z%q}l(uUfnRAfFA`NP1x;Lm*iLn`Q&^n5_cO;g^7fHrwst+P$zT9T|7p->QOKR8l*? zQ_Zi}WV%-sUYpOP7@z7JwCK?R=O{SKWtgr0c$lh7@X>vxa)7U9r z$qm$6>(j6*U6Lf1Fu>NiwLQs~SvOCK@8Y@xZRGiSy$oJHqza4diQ#Y})f z(dae~w65KVogT2$b76LNbqidcYQH&1=K66o)tJ4ha~bPOh9LGEfhoLg31oq3reB#x zmV|4`>g@W{!=WdY+NWPPjebG>W1lQU#m6_)D&VUpNad~STqntiKY{fa$~b!og(*P< zJg=mjB_$jnTWz*7;8^aszn?8~H)rrtLS`O8Zwr^y>%yOFW%;4?AE2Ge$w< zYa4e9Cr?VY$i_))+=QK!W^WS{2FWuX*ny$tAymuTdG_OU%4(>@CD`K%Z98R;%&}lV zsfMR(O;F2!#sjCGPi$<#!(o@GWAt@8$ad0qvSL_&YY?L^rDd_e= zQ(pVp=!alU$+D@_Jw>_~BU@lMgr2JRyC%s=sOFD`-?3h^+JyV!y{Dl12|76* zd=YXIu?KweBStNX1o>*q51)& z$Ot{My*OJViw&~{yGdCZSbyCV{#ONnsb9|AaF>vQ%svfs&^k*SN8)HI9aoP4IH~AB zUuke{y@f|*Rue+VM$tC34A*~cT|sZkNqs%^IgZrq363Et9RC-7oKk}-uo%H24Svj7 zJJo)oFl%V!T+S<)!><4^aNwW8t8Laf7DtT>SH9ej6tiBt1 zhImt|0@7iX2M?_UqS%nKa3q?&h~zTcL|U({u^~*T9Y5Hf=t~`Kdw1X@7r#WRU$)Vo zjy7D@#sSgLwJEl8=D4%H2#g{iXbzAKT*tZ!Gr<>GUx(1x;Up7+%@0*^Sk}zo#wFGB z_P@C^w}UytI9+shf=zdf6HRmc<7qXh}Syid&bvpJh^aE|3AUsFcrYp1Y_Q}QEKp@#Of}gz_@GZsQobGt&~>0&PvpGjaRJ0E9jrBYdoW+g zT3U0rk`^k?v#jb>kU&e#PYU3nap=G^?|$rCZ&IIm)pvgrxXov;ihxc|^0}}^0QcV? zf4$>+Yh``%kXa50rW741%Xps8S1HB^ncoHzgcC!#63Hg~81)!pJ(6-=VN3=GhKLK_ zWDPsF+uV7F5rcXRk4@T~qoz{;@!nZy6R|A&@lS8CQ_gyyUH|)gEY3V4FPYk*-8N@D zK>VZ+*hQxtrkRd50950}bMY7dhjp5@69v0=VuRFWQh%F;Vp#C`w-dlEQ(IP4o&sQx7a2IW&wI*|AS>!!+y8OG(7&hR z|2;EAcrjNl2y}%uivJ$#>9$CIkv;Lp-&$<)VcJ zuZM=5@QL|<^*@~CdpX*l>i*Qb;#Es|lU0tQr((44Mf`&OyhBm)bWmt0O23K! z61HW_PV$)$!iPzY=vZ|#RY80~z`$;T z&jPd9Q1hzDBp6ZpCRe~@03bLcZ=?nD#vqIcP((;T_;D9%03RjNlK?Qpi-6jS5!ICP z<{hgZ{E=`#7X(6uqeu)?MLU!BwQv8B&Au#q&Ew<_uxa<;69@-I`6c$d-(3f2S7__j zZL`ZR{{kC!;spa>9Xr0{xQjzWznzGe(T@|Gv>!s@le zM%9kMqBYO@B8OT@hE4 zD=r;j^NyZzPzm__o!_yRUIb)>*fK&)#Iefx)$JQ>?XC_No}}Rz#_Bg0F!9`J1JuB&l*N_ej7uPv(B>BZ@J|M=(?RGXF#OUF&T`AiAOMW4s|EpWB;*JH z*zY=z41{|j25ll zjgW`{syfSBEl9lE=!n_G*2a^7ahfrVrSF>mL9j#^g9m4Gpo%qb!3+*@aD^-Z??VUnogBA%z1X!7u)<(&b95^Wrd4nv&N1$K(CBbKexY8ItN&*=c$d%S;}>w zSl=nNIhS*N5>E-uku0>l>OvKX;sfQVfVeD_lzu%lNzI==iE4WB`H5G4*BI6b4(3zc zr}`9Bzc#S__@94a9o=|hF&D?)kn`lhv4Jq<6ux<`$jp4i}`D6_j47*|D z^Ipb*ybN?bvM8MS$;FIUfc%*5@_rHrf&I`J&&&R}v_86P~LMGvvLNivqy< zcd_Q6yr6FjXS|obz~hMfb5W?ho@2>hzDr+DgO9{}8f%p1f6P00Md-69F;~6yRb!F< z)1Q8nH&qsv+x^tHq_o)bSPNyjtsEj5!z}8$2l`UD7whXc-&3Hu<_RuN` z*M;`@vYJ*XJ>?_u=TIgb;DCFC@fcx!)jZSH)oDFF?bHEJGWKX12d-GBegV9+q$KB7 zr<=G-)K}8gFrfMOzxn|@wJngVGGyonS@1KV#R@zak^Q{e4Pfrb};$0pXd)@&{%&}btIV8(`8@jG+E zSg5!Vnc7ef2g``%F@kGyu;9UkmAJ9PZfVNKm3y>7S83YHR*tB7Q%UpU{lwU&21)84 z&c>xYe=-7o!hZVSf3h9hcH792l{RP2VfNyRbqES^;yZWRC7=F)ZQQioR;^fNvk#eO z_ddMPt$;IWqg;B6azl38e}CQf?A&8L#Ar;gInQPjzhv=CSO??cURGws04WU_2&D+Z zApotM9KeI>lkCOSJvOzPPy^f-A`wq2%e^EIz%2yP7{YHu`=E^(Gunkr5$~Ae57XO0 zJ0ASamtg}0SlT)g04NK$>5IRNJGKO{eS()NBMTnIVi&;Z0t|wQU|E420$zkF$m*D; zxw=HWq3^OvlCb=>PtEh|WNDlQkSVZRiH73vVoOV#wc%bb03n98UO`o*wPPtbb^5V3 zbMjbg-bsI936}}olO%qWD|25T7K~WF3NS6lDmjRdwt4sn3rBOTqUm0QQ*dLRD{FiQ zXQ`hA-9&aZUh zZKYlhDTO|B>7@tUM~iOwtVINDLn{ffTblq&Ns5;VSOQtF(j?pl^$h{QgZ|QvfNfg! z5OtwX7^9Gna5@OJTrA)>Lj3A^$G@MLv6HYkBm4khP(ra^lRW@@RkmUE5^LJ|vbACL zsUE3NLMUY6CzkHEiHDtTWo6}*EoehRD!>Oc&4hLW7$v;QMLI9sYcDKcXE)z=H_=?; zPCwOO{T&2oDeY~r5yUnQVA>hag#YRH4Q*j!lIt^11vJsKC4pai`Ek6%PvS40HG~3f z^o(-|iflawi>(k0d9>Ri~ zvxFm>dhA?0Hn7y~UTvK*tVP3kiDVZ8OqJ9AY|2H^7wEPqKx!P`6|g2WTVS~WFqOT# zJ2Z5{=@RB|&9B6l2I>9F53jZfGsoEMGe2a{-*GJ! z1r?_pB^eYz?G~Y$bZCp_8MTWF_<7AC9!^Sg4Pg5)A%5r-{`j4VT=QXw`f9Edn6CHX zcpsh+4L(omI`x-IRxS|Dkq+ePoK9(-@;NOvw<+X;{!KpW*w^gWM{oO2YiWg7u#l%_ zu85W8yZ}$pM4+KSQW>YJT`w(5C7aI2o2#m-!fLUU&1A2n5IG4HaKaR2ksH^%$^F^I zxpwm%%dDcR#7ag_vFehjmFAFuk&q$XD7grV z?Qxo!Pqm+r;$9UQmZdoS@BE^=X{{F9a0F0VdMFh@P+5#C2D4u2BFI(hu4 zFMXZk+F=0XK@{(L{*zwmlUoxV3sfiwlu6i8Fx|6}hw0OT&KwSQ)Jw)c|lyV>+!NJt=rA|-SX(SV3> zX^JR_pd$B*coh}63MxwA0#Xz~kR}MBN$*KWFQjae&2F~$KD#sf{hl+){XQj#eix#` zOtL$(Gyi%2?|a_!mh+tRKIdOd0x@9wU#!Do)s0EupG5+jp1;X~ngKZWk^#Vy;s^bR zu|NX152>h(LI)l(#ry{T za+O51+aN}kjCEOqVV)`8h33a+cSS+<6&{WOXg0Ey1c);5WVCZ1Ra?!F;oC*DZ7=a> znR#W_lU!t-9a%PcV!qXGdJ|wZok-dy>uud;g_)_&hn!E;os6^yNSo^QT5SgPh_j0> zxs(F`{~pi$>^#dV0*qt)#StC1p-$Z4IvNh}j!_+znxe4)Qg|Yci zQnVrgj_oTUR}k*X{F3n;ot_G3b}nUMc)B4ea0#G?66>~au}#Y!x3=y+Yi((_8RJO; zN*V3p0&58Y1Zqq(?lsyq54f=S?e;GH^XprGY@6V8mGLaOdL$f5E18I{XH>Y31d$ll zC_$I>g?`&HTuf<-8lwj8Z9LBx>L~P`gV>lhqZJq7)*4tT+UiRDMeUGy7)8``#fnc2AP?UUkudROlPxi)R zzqT}@hl}9#O{H)0QfME5g`5@O1J>1rvkWSOgTD6N52W7z>V-W2_yYhQa0J8s+_7Q3 zS=CM=+=(DWKN3lB*Gu&F5m#Jf-JC+`4W(EX0B-ZnW-B22mb9xDCQ;uY6cB@)l57l%^_XUPCb)Am`g zcY*CqkA4G?l1WS85o1hXsOOGgF7wD{hEo__i=*yQP*T0l9K|^6?F`%UB@bFgS-QRa zd^a>r(}^6efI8?U`^lw;*!KE*D;+nB@{);~ZMV?{K}!cr)LJK?TMLncWT2v&i~D-v z;|>&Bc5aU4=44vawk3ASRgc)MzrD>e;U(`QLOG1XaS74lnE+hnGY+=UmOoOCC>AvH zC_+^y!W2ErYvvEl+2YaHf+65L8l5T!7wr`&hf_?^o?+HlC9KrCD$Z`rO}-xao&*{O z`dG#25gnh@nK?oKMCCB7K2_V0mDhnN<^-rFRA0?mQ%}1(`sKg)z4$lR;{+t)nK>~m zhaC0trHws>o@;A%+P01BZ2CUanC}8MW{i?4r!&_AO0tjA9O(}x51sT7@3Bsq_`J$| za_O}Xvj1Q`qmLDForqk1@*fLr(mkCXJN?2R+V*YLl)*lXlTtcX zCy>ycZgMb7&Nyxa4i3vB#Ix2sU86ajiXlSouh$CaMxOc+NGcsuVoixr4m|b09My6R z`s$lvIL?b5tb89&M109-6k2z`K>xTFdqm1J*Nuca+pyo#3ayDAdO*<0isq5hB7P=v4G%o+_%7R+2j- zmGP@O#CgsM#xr$NAE>Y8Ap6we)9sX#KjB>5J|=lzQRJdr2@x!y#N8cWG z`i|NLBH1uB0m9;m##W-5b3Zf3jyZfLyH&MsvhcT$~s>!;u7A+fZtY?z~$z0(H-_3opLV z@<)_fabZ4*GvOvBDd46pRrV~$VU_%801%a}Ifh=mRTdZ6Tg#T(q$#6qMtL5fR2p;% zL|GCUn?y1#85sQ>tL-4NPZ6~Vfo_|5>?OA5nL8Y)?hW;NVxM>n54P|`LKAtKmT!Z@ z7$Tm~2*Y;h9ou(gG~z{O6y}5_#Te0r+ik7zxB0ku$(iU_iSN#w%^efbJ$4qg2!_iq7ihsCO!k zlY)_xp>ioC`BXN4XTcBOZXK<4P>HMu;OliOUA;r+a5^JsS?QDykW|%K1T{;6g9VDN zk`nd7tEqbnT)E`puaMvr2ZnzcJrhU;prT0sp}{_zo84jK3gfMlh_G&GBvv+OSSOAp zI*9-D6JHd7l0x5K`Q@W*&aoF+HdG^WkTh@EW)IzZttCMD6Xe=V=9NTrI1@0Tyt3R< z7<1Ck4kA*OU{%RHm6TM8JbM7f6>+O1X-8l7!2#R%{`5kqTS#{0Aih8ZMX`pgWXv3^ zn0YK?z7;@qt2OO>$9g&%ZTkF^Er;Y_eaw+c?%4y7)7jE$>z2KMK5nwXrkz$$R&Kdj zaLAJcGdnZO0heC-ShO*7zi`>#qBYyNoO?LLb!fbpjhz5lCwY^aMlfOSR`E+P8h z>>K~nLDQ=3o2_RMFok(Qpxyww696UDt(OxWiOfGtVGbh2agEl6KJ+y;%qa{b zahrx6`YMHs2UODMgYQ0%BdFtK%+Q2>-@dbM{6GZY7`*SL?^UMV@~6TAE)$5 zF0|z0Sw7cm9Lv$9cs~RftNJUz{(_rPj(}SAkw9Uu0l+HjAoE;2 zvR4Fg0HjqP1#z0Hu8LaqFGVlsb#%RvgTm=eXoJ3=z&bsIBP@Nxp-otL zo!9X%e?5KAPpz?)xsvMyT!)#{o?ZNy9dq0QqELhE9mpqub3_hYwM)Fr0~ygF1_6W(m+B6)MSH}#CjLTDXmgod2OX_u4=Y3Psh=nL=r|?kyu~O{zDD@mfoC~^SSQ#R@q=@N&m}k9qrQfU z+xl0(dDIp^$6hAX#a!0M{!|VL?m-`GKj-^0WZn$#zQPmKA>!HY>6b204&b!r!VtO~ zCRu2RGJ{ei>Ka|c)nMt6BEvP_0_5hP-qcO|QUohMI@M!FM)yQtbnUlKC*MbrJF=Xj zE;^C;eoa({a&9Enx8t~ZhjQX$CORPL(D<0>&q;4{vHwjqsjQpn_fz{wwMW&h67mOSun0B54d;SCKENzOhS!!*&J zSOGuph>=!^aZ&?tx?sVAWc0M;WcuVM;(yBlm6K)VlvwUbB`@wWB? zvI^_=`SyR>AV{>OsdQVB-|I4qV3mD~;GiK~KTm0-wqSvFwX8@xn z47axU7X=t&6T0D>h|E4QVKi+ZQ5!TAgvD_u$xDW{Kqj>Trw9>aVGJBdHHffGwWT#f zRyuMl2I!zooLUASbGbeD$8kf5`uY&?k@>uvTKM8Q(G_F8~JIUA_M zyia_YKH@%#JOeBb$tb3Lx0>~gUL!0A9AtcPAlv^IP%7FJaejwrNRT#(OE;`>0HEbZ zngI)dB=|06U{D4z9>a7-)4Ja{$Tp<5(w9iyKKJjxquh5t7Tx{`ccdQx<$~x=JP~$z zBv+h20csouljQs#w?E~^V+y(|x)TBLq73Ne6vYsU-r|T-9Hfmwq7)sJ0U!R{Op;pR z3~2A_B$9l9*S%0Mz~4;2C#NLZ_Np!DGRaMefLCItq|^+{%qbEs=lVO0u`UBuARw5q z8@c*S(Zm^PqGcgbUMSy|Y-lH%u$N?W_4dq@Pgz^LBJ3mft*;!%J;Szr2Z=T})mf*a zr;&vNV(^;}!Fj7eBcoR|0*Z`OQb)8Cosgx@`^fs!3uf5iN1njx0Q4Ia#Uu44M(Ht6 z^c)z@2AEp;xcNS3-F2FB@9(d z`BWn4-sADoU6)05g8NGGbv#{MqXGu!9Q!3ecM@7cCDhi`U=2IgS#!-hHetWBths(W zCo<|S#C*~#UfG^TqRBCS|Jko(|>1zi3{^|4`y>17d|E>3^$;a#MKiqc< zdIgxK-bMC1R;;wfmtRu=u#fv*=oU`<>36*M*EaM5P6N{No&$Bv2TsAjd@V;*G7b-E zyuh7&#`@7y%8BU)yhir@Ew#|Wq%+T!10c5A-5ct0O7cC;UG#@&xumy!0F22{N+>G! zt7o0ES2|AQ zL7Km>`NicV_${_eJ`cSXbDMzro=)bomMR-+TyHl%08I@PK>zjgMSrzk&-~`+w&=!N z>~p7m20Dlo98sC>Nc4Ff`DuO{1T@Fn!kUMU;UEYkuvP**``W>2C(W5dR6@&>^hlAd z1FRo$tUF)0>TYxm=OL#}1eyvURJ40#pqA5S>6SMlR~r|vV@oT!3<_pACv`(ho7L69 z6;HnpDl(h(O3}vNEv)(KM~{Hk6sLJhiosa$X9sk(&ap2VW-zIIO`LiHD3z01OQ+UE z>72%&Q;gBew8rbWb<10SRc?Rn!=BbA);W^DD%gPe7;UBQ+>k0FC9gZTi9V>;04HHe zM~{8aB6Vs;)Ep5Uy^_0}B62*-k?}JE7fLhY?1-=aJAB>wUPc%vm)he$+(+v$h%X|G zMvaHyJJ;zOM94Fz6QMfWzGbydoqixW6}oNG+ymaTF8}Nw9DmUMwy_?k4iVqliw-i@ zBa96>74EUuFUi1d8eq=bLae*SbLn_Q`_JSpad3MA1&vgg9luO?m;zX02^))qh*4EL+sjPSG zE9V?*PcGYOV;B$3B)&XhFF6qZ+50V4znBDK5{OCQ<063=u>EoA=~z2s68L~6Fm)2i z2?hSJFO{_{&{j5%lEe%HBnes-pcW*?|t;_?>&v4?f`9{(Z%*e*A|RF5WN8EU0@$#&`q za5;0lHHl-+$`UV*3wDcLkM;+m0quy9eBLFBUQ-W5VWK-!`R2+$L zcOE~#0^pnkPXZh;Km~xur&qv}I?`=V8R`DOC}~*!lBEIUBw;A`5xtsD^1}e&ScK@W zLA3{-;)p0UsH+Up9b~>0JySe~T2dpJE8kIJLlOO`cKzCQ89d!5=81Kkd#xQpl za4DYAC;-a4svaLn_Ntcy4KiSzvI6~99sPO&rqFvu%qjX;?NkrzI6OD-_DEVPZt3$s zu*suG`B({hNOizIaqiFb@w*;NA7129hCzV?j4ve(6R431rHPWY34FTl=hqNPDtKJF zh9=N%>1}|dO5)EJkd~2_gKnS;80@M<1F1z{x z_@5&fwcYOc5r}@n_fDZXk`dPcp7ju&+1A`<_4;Y*wW^v<+g{sFcj&mZjw(}c^jSI) zM|}ih=Kv&r^9u)+_M4_b;Mec z_6MKx?>H)e@JP_vRQ%;5!`hf-m)PgW~a@VN43yv=G5&wHbjqt z6Oo<@4~a0ROn|IJoLom;_QTz`{#ZXh@S7XG`~9>CU|R&h<*8e4v+b?zHXcXfAd#FG zTz@5CZKAyloB9n954H8QUU(W5!}X*MA_Y-uE;l@7);w>4L9 zwQNoUB<2@c-S3EYPv4gie4f_`6DZ3Xz!46QByHcg7 z5+QLd(ZX8a7{a*mnWwvf(EhiA5g~QoU z-`q_94zqURXk@OU0Ix+fDX3;m=?5Cc&3zta9@Dz*b!FX)zUD8jDeRwAf+)r!T$e!G zw{Bj_JO3Pyp4JB2`sVF8jdQ5K)-+^EM>wA}jg(W=CBS+i;6A1DMCNU+rCiONu1I!u zg0BVWNeIX60Q(YAaj2}OojY*uZnaPS+r@uVgMan#%g9xx2c;q-g;xD?SzlMnq;Uw+LJI3%R!ProsK(X zs-3>@2&NF7oRpp!dOBoF-d>Askz%b~EjZ*G?DWqbOb(AYdudIF6&94+2`B$!17!YE zezCt}5{OA4CV`j)K3oaJfb9=gzOm9{68OkUVDiMNY*Ycvv{MCHk>Mg>B?<(x_0)eN zRmDB3KlYhkiE$T!mvf8-vpK%qT5t1+G}aNx3_uyDcK|Hv!!S7c;yr_XdrO$h9{m9v z%hVeqG65qXj-L%%>+FYjyhBuzBC5ngIcybWIdPctGW%ar2>180iUkHm-Wz zmcRI*r4lWtKsfP13cMBI7r;nWw6H+b&z}I-HvQn7)+EjnI2WH*U{4tOM18r7TG|JL40Vic35v3l+u+x#(UXGMbKKdMQ$2)KR z$=oz+IP4!yj9W{(4#X0kiu~(m11h4c=679{RO)}aIp3+s88Q1|;OA38bLd(%g zj4A|xhz4ykA<5k6o z#bA$v=0gvui2z`yQX%>)*wP3{*@W?#Y{Tif@W%5@Qab6-#-%^!eInY+OZl6^FpB4$ zzUM@RHE%uY0GhrR5AZ3#ojdc7`p~ZeCtiE@E?&#Y5(N=-4X-44D#mrHvmz;ZRx&95 zqM6Ci-E_jq++kZugc>4}3XLRUO+2`;h7%#(xxK-2Oloc_(SJ!+SXN{Q9=ibf z07{_`Q5b_~?%Cejf&ooZG$LD5F+TI47gCZ((QZ8Q!Wz5tfj2Pr70EehcV2%kdV*n2 z`vsn4BM2qzTxLk79p_Y?F!cH6& zGY&qBRv~|yEQ#v--hTep8x9vq9Q3)@T7U#~<}3b)UbgFE-i*o*9pJm}ya1G@ru^tL z%5$KHq8VpSpES^Eqi60HEo=|JU%vM{D75~o3@Bp&KTv+Ug;N2%uOxQTq4Cr91%T+a zZm2EZSqIN}LW6GN^M`yAFD!o1a!AOUnUq6+0j?22I?Nc#%z!FJJn0OaIAs30>+ak` zEk1s4-G9d~5iT;P|8(lZ`>v0u+qPzefxMX8_n$w@R?b~QnGS=WN6Bv;k)X~vk%7TpM9J* zN`cNu^JLf7*BQ5U4s>~xc=l{xABG3o5gY3S;Fp}Cnsenq?j<^R?q{ypRqhA-^wzVF zSYi2efa-^>Y}P{WB~`nV2v9}2XJlnKy+;NoRt5nHqhp@+mWb5;oo`v?T)@2dd2D(0 z_b!#B%nI7CbwqlAY<=C+-I&XnPy3)D%S=bE1NBx|xj(?6KxO7b^id8l%^sQ$L~|u4 zrV?KsaKtIdOnS%Ir=&sTB_}Y`0Tftmltt7%@{_RTJb&{3pG4<0=3UWfF_}>$KbnJ) zxgwyot|<~%PRKq^gta$!*dtFqXWauC0FfcdXw*c_>V2*Rfx{#h*1j4k0dU~m0oF1m;SbIAdxXL zj6T#RUA61CWUj@<*SujXsTfO_2+Zbs`XOFGF}*6suX;)AO;sB~93HtmU|Tn=W>42{ zr=IssFZ2C=-SO}X?o1dW>N}C3hsKsBqWu|TTEDohC|!ndcxYdsWZlWMDU7pX2r!*) z_CN_#f?k04nmy&P0B)klQvrzOU==-BfIX3%tJ*J!s)eR82C2JFEYPRtFFb6I)m-`H zoiLbi>`U!LMccWWb(*b!3M+Z5qqI8qcFY`$B9HvjD>srz8W^s(^A?$ay@d^oU3_0H6>&tvUPgY z?EK5_wR<0WV2|yHy&020Oad_p#3b<1lt2vF{%H1VtW_}yeCQIGG=a?Q{EH_dLp*!> zH_S#_5gX#tJ?OycwsLuu^#fw&5Jfy@^hitI^qQ3%a*(pSNi*j|&r)PNTX(>#+ej3yj`l_kNy%N*3yL5L!$YX(7agNL`rwJO-7rom7;px& z660uu#pg_+&H&U{KC9n)&0c@>K|ATJOYNnHe{ZdA+pT{nX#4Dckkzhx8P48xD<~+m zOpLz}M?UEh?h6AWQA1Ndd#UTyhS#39wEStvCTboEY?UEH=TkGxJB*tMpk#m5DoYCY zl7Ouj(1_@KBL0+gEtFnF*&!R?IBG9-6)-DMTQ(AvCR&H&koDEJbH`@nMKrP-bwNAq z@YC$B8@^&AN~YR`N&8wm2KI_qA9FtEjI=}|3NvhMSt&`gk}XAsJM~Efz*-MC^pI#< zI6Z)+1&v6Yjh}E5_v4;k-)IzV^k_Y|#OU**-PFr5D&(j(I(o`08I*EbxPb+5(6aP3 z3wG84BydcZJ}DrAa~Lo!HI)b{B8Y+3QK>ejvY0y$18o0fk7IS` z@@FW6`+J|u1gANE-#B~grI$#+%CYm@M*l9CF>Wv#`P_|1zN2JniY$};jmmlbBOOHk z_d{(#ox3p{o0=Qxn|}JAzHoXV0FK@stJ~T{R6F`eIVnV@CT6DCDQ6xBCu{+2#&FdS z#;oclqh8Nw0AOnebVBJwStVN82%^X6J9mBnWS@80?Y0dL%-gR!gUB*?W$A3S$$v?g zDNkUCfB}kgT?muJDnN2N1%!q<K}bqeMJFIW8>w^ z5r7bQUt`kigI@SOct_=V9W)rdLXwu`#ox43f9-a7=wo*rp%=ruBJx55T{d>+{`$%u zA1~c|rE2T|wntR*x*u>`V3p+7-qu3(l>iI9DJkC3O_7lnG zfCdq6+ewJn+tf|jP?2O4RnHtN`CW4B!+Wg9$L-Ar0Ji0X>F@7Emvry50px&oF)nY&JF>*HV-sfepUge z6w+`Ap|{X$iK0N`JfPeP%uDg;s1mrw0RngS5&fMWrJ&Kgku~X4d!;+p{Q-I?<{_3? z#;oQLxa9jd+CTej5+ZXNf>SrHwvXVM?+P>xaIt`Djhm zI$QeIW~*xi1SiKxNOURy?4r)X2T8kGA*i#~Tsg}L6~LkT|DQbaeFgsiT(Dv3D?Z1n z<0Lz7j{Ju`=Yg>5tA6w|yZ*ZWvWC^a(qdXU)Oc8xvS_+BV`K*UIF@dpn!D&bDv4Sg}x{ZBhCiRj$ILU&B==7!Z zSNB%_3I({t<3veh?hu8K0v0r{qWaXAxtDRz5e5H_6i`l@2#T_!w$?_CD(6}XaD3w)~c8UK6D98n>5`Bh;xZ|PwtZ_Yj;)KbKhH58`M2A@;vN4|X ztfjkA@oS!FG?{3nWqr%kdsv#k|w6b(#L z>)Eu$>KogwU|N_T!j+3REJwMC2t~8v5|eqgSg2^fP*e zOrpoq`>=Tdy>TXlNG#b(1Xa!U4rqnoe@}-8{iyjKwVj%t#eEd5r3f|d3Q!RR47f3W zOMZPEUza{Ag1fSao*S?C;A_!LeSu%9RHzq8ZQ*g{Uj(! zf%kU3_9OLbhDVZ9H#lOISXA%3UeJcUfjPM zYL+0*7Vb>+egbk{hhYxiF#wy&?;>(j^h5dyF(!19MKus$C8wbx+&&ova#RR<-RYvMVrAZjV^YbYD0jITr}gCu}#fCfsA z7-m=W8#o$zoOWxiZL$FpL3H7;$$(Q>R2-u9C@3hiMEXbIcmNpH}cg;W^p{x5j za5`_HZF&7c=Kf^cQa50gh5C-B2Ie^CoYvkH%S&S;MWwV)(s~^Xa6*QCLWue*Dpx0w z1Srx;+(wbD+=)5y$-BSp{pJ89l?nq|4>6yJ=4zP!*4nLhYRtF;jqmp}?=V#OTk}3^ zjiyk6-dv|ScZmC>vYsz}d%5jDXD)LV^in;XJYmn&)7N8fE?I4BH`GBh!5S>2oBvf? z_D;n1%2%JG-kRejYvh6)dx>7w$+%_9sn`0A_TBG%+iF+bpkQ+uF1X2*~eRZ0JXmV=gjxJxf_)Cd-wL4NfkasF{g#!dYWas6e z&*&h!6%atr{zm%^-`DUBd4A>C8G@Qept$-hE)~=(IQD&{ z9s4*YftUnh5{OCQqa=YCu>Dc$%~)Gv64;9p*l(ZNHfn6C9e&_kt0$tlAU_oli-@nm zcw4o8yDeL>hGb`1Hh$tLTe)J3WkouzWe{o$z?>qYFM2ty$hO-S9yg5$iySuH0=Y1m zFt$$m;&pqmCGWn~x~BlvF^IO;!}mv`I{^bS_P)1hDMlU!0$+*e_$Ou~mjO{zcL65) zKu6LCF%sf13U!oQQKiFf5X$HT&?7mN@HUzI0wZMz$>{jOH{ou?P^hh~!NyRO*Z}t& za1c<(g%v#%=invD2yp^OtpiMh`JbhDUBMJ-ntQ@cg;~kJeky&m4Qm25738C;9Co+)3P#|QC zi5h2XURrD^h0`2h;|p$d2>jz){0!Tg<%=zWBg?Z6IL5-=-B$n7v(TQT6Sa&nlbH`t z-fMNspY!*M%PTAwV@9d(`g;Z5%edw!D#l$;7suvz0VK}Xj(`_$+F-@y)8K`L2NY0- zm?Y!{Ks%6(1ER&Fm)cvqdEM`cy2h91+P?daqgG*C_0DDtI&qMjO`P&6WK1LzWeT7U z#LT|{06+jqL_t)Ffb8oSHQ6)0Pjr!@yWAM$HR3@&^p`AL2TX}(>*}@j_x}Q}=Kx8O z;DyE*RaAHiJkx0$*_RQShS5Jng26ZHhn;t$1oy7Tfd8t;OGh~~3vu(c#d`*2u|EO9nDnKy8w6Sr+Cb$CV@IkgW+^BHXS`N$KKDAso^iUIT_&I92}vS}$9 z$%rD^BY9Lwmy!KO{KNcZV^VXjE*Bsf;(0RShIq?qs zpa~HJs+>Tn1+hk;XJLGi)p zp7EU%OXF(_!RF4$1mXv7A45u%0!PAKQvOogz9^7?EF+Zyc5)t9!xR%`6Hnz%Vm_DZ z0uCs^ObC6S%1xh0dVaQfkx?&U3#8qGqxE-)*x@g-`NG=EDw1qom6_hBXufCbub)0S zJf3xBBUzl$eU09q6o+sMh~$t@p@vow3;8Cx>*mr>iyR-3Q#c;?;wD~2t}N*X1!9ab ztV8BMoJhNzWAu*gOaw3vzMmENw2w_m!DSi98?%A$9P{W2Bue%j>L?qBH0fa*Bq>V} z`AalqZYJ9D)1iIKDLZu9SMmrc4SZGv*`RyU^L#y!G9$`s)&*O8r+N;efpb=| zbh&);3sP$hjOWYee-2*E;iswHG!??}Szs}b3z*w0C z1d1Aalk$;d~an{!YdS! zVLVJBvz*5$9pwUeFdFl;(W~$S(ZD)Qsh$YOrcpJ}^ix08B}|3EL^SBP}5zNjHJwi_Tr{g0bRr1YOmDcWLiAPp(P?`8DrYtT)-*@{`&GElI{8Ui-tc62>~s2V$+)#=aPuBj&Zs@swnd4riuh0S8q zlI6(|es7j@irhu1ghGwNXnIeuda!ucJ?|ABk?Y-uM=WSp7dLG^Prw)oi~VraW3iIsvy@ zQL$JY66F*AaJDkh%;r!C!OfR)e**LK;ZW%T2G<5N|B2@6vmAgi1Mw=#iR zi4u2o|Zn6sVHe@Yq;c zm6$k5Q&42 z>s4uWRo3|7?~na903i=Cdxn?081Qg9l3*G(I?y!%s83q2(D=o`1$nbD7K++`sFveU zytauDh(RAD8y2L(mKXOJ)~T&RBm(xM9sE7fzDbiD{gLM@zZbAY3ewsPGC7eNf$}w! zrT(!Jk1=%+9*$sd!YO*Y7lZz@M;$?J@9l42uEVXeq&SvAxdj(g7bn|k^>`5t-uZD& z!ZrM+#!|y^BWIuiSW8|i*Yr>DPlzs-A*I`#xWw-jHa$E2mDE65-#wdPX2B!tP7>=kJS4pLGnxpL;0YI}JeaY;NY z9shZ2e>{`wOfVby&u!NgAou1efM{D9%iwbw0vDe7=2&F@0dtS=FdcI`e-GRc5eq4) z1RiEkiJ@b#t{o=)Nz9QRu!cY;n3D+Oog*szO-e`kg?`IxuEK|KjJ|Lx_P8U;l>z~j zMt0>W6R#44q{=3x+b80~y1U8#1}%xM_@p9!hr_NrWtwsWV!KUQ5QAyE6eBz~Ukc6U~nm~2iVYAJOn{@qX>GLz6!u+22gkZW5ys)VzWCCRQ;3354#YqG$q z;kOz0{&>&w<#(ERAP7A$K)8e16fr$Mu@3;S_202$L=M9L-g5qzj|K3#O3>T@W5k3G z*?hV+_Uiz8rC=eOT_Nx(D!?S>sVJQTl>*`Mx8AUbJvot*BkpOH; z9kPd??LJDL-Mj#V3+7pj>qu!D|2vlqOo{SKfy{Q>!;p>1cMmRT_=WpzgIS}mWIau} zg~80@N6{((;rTA%edHq@Ev@ghCa;gHgPtND5cOjDlPIA_^3Wz_A^(z2*U zS`@AwKtHk43J!Ev$K<_1WF_92=M?CGDa|y6&3ZPMdz@Z6uy7?Y;yp&izSBa#=KTQY zOU4cIvCaN#e~pp_j2ceTe`k7^$TVo{e`yKPbd7mR^XLI;+&Y@7@{e0jCHBRGn|s&Y z-%MAU+6i>WAuNIi0*I1ZIbaiR`qnmzvoMSAB0A^P3B^V-sKU`Rk>YFNK(~Ic1Sn$B zRc@!>6EYSQmK6mv;V^b9l})DOg+Z&60HssFU|tib?N5s^K|dLTL}o@D4?ikW(Xy`u z{Mr80Z>o`lS;vD%`Qcbp+YV#VSAe+n$xq@3ZS0l)7t@ z0i>>vh0528wdWJB$ICY&ZT+@Hl9-BlNYwL3IAg4(R;O`lJ}_m$xpyT#$DXFs760H; z3--H(anWc4lza@7lMEdR4VlH+;^dPa`Q+d#)?sZ_hh>N46Il0zI5RY;DcGTuJ=95* z%l%$95xmu`pCA6~mivW107A4kPtrH?O~+9$)KcP>^r&>WdcZq{*?eZ_8pvis2~#J~u5{mHRF=o2MH{nwHU>EBewu)=Babq}@3SfnE4ewCoNeXC*9Ij5 z3qpd_+@yIi-56@cDimByA!M~HAIsoN3&oC_O!}(rWRRI>z(m!`Ldf(I#EzJ|u6TanIke@xigw?j~tB-^x_**4(nwEdv?_QDS zg(M@u6Al_OAt|6#$Tot;=U-FPH8$;Uww`BxUmS@TVT!H(N}!-{_;(PFn5Srym{A)f z{Q0rNQD^BV!#?|POEtz$o_~vkHSCuT z+N?x*rn~kHe*x*SDcR?*%Lor98}a!+A2^Z<54_25H`$t=jn8DSaE zpPE1B?Sd@7_l;%=5s7m&8x`#BrWdPtg51$ks!>U6xh8~`6lWRL*<2lLu4ld`utugco4<%k$N$1sRKoB+i|!{t%^{lFF5*rNOhAyC|>0LfNoP z<(A@j>C^O0S9O3jwCiz_*6btcZl@tqJHDdpX4S+)076t_v3f&PCm)-pOYxjgp zHk^|OuFZMj@-<2~-Emgz><5C7ELWMcEONDAayYE1u~^G1JtJK>7qk>cH~T@kFgx4|BxFifyLV-wAiQ9+D`D0+NWW zZRp3yYJ+$?!J|L9HRICLWgJ&9VZ)x|!^Vi1<3hzv0`!hW13?EzHv+lZ7gtw970I!p zG2a01P|WLyTsAR@+R`ST>Fjpe;qPRcbI;gY*%A~wGa-byH~KEfery!td>&?tfyZjWuU1pZO3cc*+!fn9-tttT~|qg1R8R zV#pq5k!6)vY~d~(F=yV;gBy!kmlEax#F_cZ70a6_zWyxlOXf2COgmN8m2xjuDJ$h! z^B6^D!GdY*VNjCverjMZN#yR(mLliWN;wvJA%0!R3zl>|IPd|vy8Q}eBX4ZmZV)Yw zec3Tb2#xAnpA-gqeIf-5;kV-FSb1toZBXSr20tvfVM4sV5hRzIX#Cv5oTX-Je^Ml} zmB8h$6F?VYh}|4vaoQNp!=gPduxpUW^yRtVEy0ht<4E-$k${Y|lGvT^%W)b143WDb z(R3Aim7sCsnn$Ou=K%1l6t0aN+56drb!{BJ8uN0zte|DfFNwCVs5Wf`xCpE^S4Un`NCF)U-o{?Q<{t}%$cKQ==c+( z^?iCy5=_ZR!4O=uFV?$5|18#;)O-r1bl`dNZFbq6qVmp4smelb^;M_CsLsftXlQ=r z-m^lKuqPk0UpydKpEYjhk*uhxnnII0(otgDYZODMz?{uHp(L1nr}CyCeEqf3_WGrx z>L)u2tG<o5eJrtFKM zlGByP3NM+*`m%v$M%xH2t}#$PwBHI*_&v;>WR#Q;DtEpv z**cV|ERknSIkJDg1-Bsn@Fth13p#en4VRu1f7`S36-s^0@ZzAW0G$Fx$AbQvDy(Xv zElNqh0pS_(Nr8=pv;o9@1oHZ%cpWbdSB#|0RjdBy)S)zfYp!GBi=N!al?_3Pd93~K zV>`)_+ucu0Hn>7zJ+h4%myLOi{#ST?)+X4a6map%3BgD(&{lBz2xxEYxpDb%g-iHTJ0;qQ=Ad|u&`Z`(cG_n(~M_uRRUaly7-@wg^pQv3xZ;hC7Ef&DBAMJ4yz z695w=jx99~T(4;Mm+IL0*cBg^^xu0=B1PHv`w(K#r@xut;2JJ4F# zgw%t|GbPzFe8Izub;`%rQ7t$M43Qsn{W-f%f#m;9;^}s~OG^8PowLvM5~=r2$C$U# zPSgxXZ-|+ncH^G^l1yR#c2s#m%hnP&4o$KF6*HqojB65#w%16^5*C#BYdDp|W&KDG zJJl6RgOn4PI)$8|G)9EZ@?lzZ^wQST<(lt_t(ae;qa=(J>ujT89t3=XeHwj1u%98t zP=cB&0khdO1~1b$Jf)h`O|C-;$U&Lo0;qG|=KsoZydA#DS(3 zE2y}~oSs}Y+@X-)rE1MvRh3}4vt68n zWpK02M`_XrH@cU0%)LG7Y=$37wDjWNWHCoHLJ3R<4$0|XG7r~RqUen9db9H&_c6*O zqQ(R4h5?=lrgmCqh!GVjP>s5okYo`R^+Ae(g$c4j4+M&+1|8rgz%hc5_p%`7wlkzb z(S9{tROYa`Z#2ZiYtvI2qt~8td>uWhADz!wMgZGiT(_|~S~_mpOov6#`vbEZnKsSq zSU6##-pCV4*LPrh=yU?=8yxYc>xmrt#Rub=H7HO5PynC-+H$VnEN9VPGkZO_HR?vw z{K+s+ z_IuNA*i^hm#{9Z8Gr?!&=eZAU@ppIfi?*0q?HJvJx$jx6%KRVM{e}WEtel~J6WZ+{ zSz&>3N@opY@6}YrAE&`rwc-VDkc5cw(`FTR#7-*rgQ*CTV3G;b?(VYo zt+;Q4ha#4Gyuiv%bjI?-+cf_uwhP9((TbXt47jrE2EKOd3&B5!|g7UQ? zPnq{2$;6|tv8pS?j%wT;MN{LuldS4qf33f-FhASpJ*T|Zqf{^9%VYr23sbVWpPx~B zrg58cO4P}R9^M6r>@QxcTG^npVPXs<)$D)A+bv=@xvx2R)5PfTdNOS=HfN36LE|uA zcGqsPBt_P-z*y$7PeR$eRA_8zG0Wb7g3xpS9I0c9F%44Y)26j%>>)k9y>d=8>9N#t z7r$#36C4-Ul8pAsny&Jrd0>!_J?t$zYcb-dq^2r;gNydw>oMGaf#R@`-ozhRBK0-m zc1{SbDya_W*l`Zr*S{4V7tYp){!u$2X%PJ#A+$6k>)QP6)$2p!Wj~@L*h0?@l(Cgp zLuv5R!0T(}@}3-$uPdN4|CWXJ_bDx_FHS%YpF6GE zX}&`Kz21dYYd8KeZ$XE@kw|7setOQqSdj#S2Nrd0q~%K1lfO8x^E-G7zY?Rd<@LqF zihX-wlnLme@F(j##o7jU3KI^Sdcicfjkwzhx>bAx)`P+BgkOidGMV`zc#6~cn6H+z zZ`>$e9?`^1xu&tInGI4|-(PS0)+^mkegDIU_^;*XrTz&j^1ArP>)QC)hhRb1c>mn} zEueH7DRhFv`kX;V;Z$g$|E#uZt#6i;QP8NNG=?eoA|+!{vuS&@l?;$Ig>O&?nWe2c z02~|}IBgIS^L9E{xx+{M8*lJTa6E7TQqHp_R7%g=SP%y;`Yh275F^P?l$Ele?ib8x z(1oX~9^%Cm%d&`ADXXi0?TGOfT!D&ne4aAYzKg5t@_ppRVs6MdbPCDz43KkqjQR4I z-R{P%tq^lGP@1&&OG}C77XV`D;Vk*AjN{C{ZWnEh9e@;QwIbYSb0&Tgcu#_zPYop2 zhH+y7ghvGg1HDAJ$vH+HRB(aF_N1HG7cbx1=6=m1&q;__MbAATo~j0sObJ4~Sp0d+ z4GFs++p>7eDBa_5b&lN|LCt&X-!nd|kHp6uC`#k8%%sA?k~iD< z*9m*jLPF&{x=E5MR2IE7y=!5f-jAdOjr{5v+U9NC-RcNSQce@#{G~pXF~c%|HOCt`kPDgfgJ4TD{K&1AyW=w{

    VEl})Wg6-zG;cHFH?NJ@ zv7_vr^pX0!Wi*%xcwQ?oO6eIH^DXkGlbR?o(uqMUL!rH*W7f})gcjk_Y%cI4Tb&D# z#|8_mG6XS2_9PG-mRcWBGd$;}n)QbU$_%Nx4D78J4?f+#!wXujow%I)^@%o3uG@eG zbAc*WDrGR?tu8h@OE%!w`N6C_z5FGx#4|1TXjXx&I78h-NIulenS%1_8GS$NbGB3p zq>%|?e=lqG1z*S=nn-ib)VZDX8?rv8eY6rZ%4U6d;6COir!pV?dyyO}O9Iu{cIA)k zj0yI{PFTPtOwy7!_wzd-7Y^u%Yr@GP`*F|TtmE|%)Bp>ef#j6N|`9YJ~|~upcLw@wLT*yjPTD$0LVq)elZ50 z$6HEIsEQEYxw2{m*g0TXZ!g6$dkx)LdR-c^;zxJCu%3_0%%V!p`NuD}AhY^;hqzD^Oq23eRI6>9(;~K8Eo@GT( z=O4cHY6cVLuP5KVa{yYC&E(dN3i5YA7|wbhabTz=Nj1jC&dK=!61V%yd@dIRGsWW^ zTwGw_fxOQ8@}(FH7D`g>D^OsYw;2YLFZO8P+^VH1Ui~9e3F$^trE+(?Wnw}NChzPF z1e4c6!RVwj=bJTF=113eo@4L-l}GbZV1S-dl`?Bd-c(982L^J7K^Y7j;sv-;im1vHg49}IRegvZf?MjLMXGEbMkV?IHEH2~T#wF5^ zOD0Cy6iLN-dEk>9_j8rG+UPn)pepOzGZb#&4<@|e5TBcoQ>Msj=>x51WqbKWDwet> z1(lWTIHoY)^R=(@^R|=a%*r_|Ll&+Mh`P@Y7ZheiA zobnCrClwWqLL9WS=6%|DyrH8JGhtFD3s0(XPyQ&chQU-E0mQ3-!KL2;6}Ir1H0gG2 z&+2dtHg~z+RS+pXA^c988gxQV`pKP$5gi?!^n%w?kf#8GvKY^Q#SApk`q%UG^6VPs z>vZ5i{&m2ME*iYI&@{X50EWZXd9B`Kbbz6g3`;P!uLj8y^uAfMs0AM#`FR~i_)h}z zuDHSW_!bTfvZOe!Rsk5uUm6#@k`MIUR1(&MNxU4u#3m(WCliyp2k4~E+gx7R8{#tD z+=4E748`@r2|81qlf$9UFaHm@t{+-PZrr@+Fc*{aw+?#+J=_;3h{|yjYKkGG3QImd zqWgD)4G}!9Kt~{UAl)4cpoVL@aMqb1HrD^<1+EH~502bWQISO?Z!#K?6uA0M2G)<0 zd?3=_3W{9cpLk;+ae8!JjXHp(GCr4;ExU#q8m1dJpOx+XhdpNxUt|>d2*f#I7IB94 z)=CXffGisr#gbjOGfVI~!AQ~5WNu02 z_wG*!@;q!J2INS&JV0$vRflJHOuB^U;fp>dwcJ*x?Wd$aw_RtA{Ijj6w?OO0qzhib z2CLCtXyQTM4%PEZlW5(}`?PA#nnIvbS@7^7J zC*qJ;BnS`P8;9RlIIoU`?gK5pnGmaiL(j|~?iY{MHp?CFFIL(}yrQeR} zPV_9vBu%-~qHalR3kHYe1+~-fZNb2(X+}%Nwd;QbMh#gFH_kw{gLIE%tXV)I9E|{` zbaUT&J@3`kShqyycug_kd1B1`+oKy8?0B@4;5RT!H=nTuMXdDP0Gb^77=hRO38aL% z%89ZCHYqSFrLftbX)SRqkp#HpuJJdz2-89j?es z-QB>Vl3Q>s3|rTm9tF73(fP`?EC1!mNdO?l4m3%!UUr~${4)6haD84(|AT&2y3sb3 zzoX~l=4y6$^W2oqBN{*OKb=T2smD$EWQ!zL@_zT@N-a%X63yRG5&&}%QI8HukAIdC0GWRWn!!+kI z=hRaWs)Lz%o)!B9mwL_)rFH(@q*yw+w}^g+B)8^J%gE&y$DA+`3FolI0i_BevpLF% z={jp=>c}a9fZuiZ1)E78OPEK0bYy>W`+jVe7+&w!&V3qg^B45Cg))Dzw8x#r6*Q@L zja?m$Ic_PrO17y!8Lv6zQV|H7lE4c8Br~Pn3$-qskNVdY>( zQFA=?Uj9U*Xd0(u88Vcx(U!v_7*x1Bh0jf`$&-r=x!8-U`c^MMO`w;g010St%~00I zAtn+liXFIL6rB)isj@egp7pe+X6gCHCU36yGSXOM?e53@BKP$-jp+7n>F7zW!JB4% z=UV4Mi#aKF988vXyN!k@5rqz4UPt`Ak6Tj{F(+M^!AM*?7*K~|X~$MI-Ot{u_oT^x z8RmxO)XXIj%BI%l*2uOt^VX*3b2cr%<(w!0VWym$cc7+}5?I24*-u4h&#dESP zUT9>XzPRl}XwXJnR{3D+adL}*;s+ch2UFUc{pr*Vo9WW~-k^;OZT+Bw?@VYw7y^~; z;*HV!)4fM)j%F*k_M#(+$dXjWs@$4enD3~a>MElVb4M^DC&zC}cC_VtOxL0^&^dE8 zymk7+Y=aboH+n-WwmiR{#E6~S9-usiNEyCd3!#K^Ao=xC)ZUJ$PMr3X|TG@N@Y zcOdw)uZC$26W+TPCk4*p>6ucsOy_@^|1>@)4RkqN zP2>#3wVg`KdIq=+lM*W|6`ykN;vYa@B_hMUI7#XTlwkpT@edoHMPC+X-niL2b6@lH z!}i0ry0YYq>9bL7s`B1d*>t;>Wp!JmX)DG5TEB&#UDT(moSg}DcRsk;ubEJGz1wR4 zRo@N~*ekQ`QMH+`Enu0?XT7!dzvb*~!IHT@EPRQ#6rCe+3ro08QTM&{l-&2xk+u{J zo7EiM-sAt$2+}twn~EallZnH8j%dwE*zUyau7WI?4DS;7#vB#M3!@6skZ4(z3*ifO zV@`cl2zgH{(5~RlOFT>UrCA^r*BtCmpisH%~5Peah-en%mv z5O=&VM!hdD~o*mDw_ZUo+?922r z%*llHw3X_6wy&(2R-&Va3nL@QecOz)cA-VNg#<8!Jar0ROOER^)tO%FAZIOvI||qM zAc!ld2FDh(O95)catR+3N;B`7Fz`3ZUpT70$Pkh{lE2mD5T|xai}WXo!}_U{8+ddK ziB1HZRN^;_Znvw;8yO&X1|ioh3@|W1=bSX4YGMt={J!6yD1dbP-WalHOf0jLub4uF z_|Kp&2H-2o>k$l&vIAw#6!2S zqA#s3(c!#?$4s5Ji=*!s1?tr0`Z-$m!a+^6$sdfEXTgm+&oA3yVSQ6?9603%pR=4j zXpoQPki&`mwH=0uMI9s0cdC6LNDk6hRjSv5tc%K>wvV%nfF#G!FT{n!Mc#AzEB$;E zkN74qG(=k>-fW;~iRJvUai#o2SasHf_-(3k=H9a-62I)gi9mR^ssYc%%R$-diRSVc zqQJ>e?Q%q*-*CcC^oP@@o#c7aa*OIH;~pqy{0x}SKHilJ)=b3frw%-Z~mHo-Js zG?R6JEgsAsJV1|@gL5dg>u9(LF?9`IG+w))lkFyB0$j(h??umKgZ z!8-%~e_GwDZ!)rU6IZ1IVIG*pUsfi+9cQJh@gg?Arky+7Ln#P&^-A&Lo6atN-VVIf znnp7Z9Xt<8FODRR*yysI8<^T(FQu1;I&oPGFa}yL!vJPvW?zejUd<@IRuorLN&T3x zNpR-$%M`QbdR&~!50`tE3pv*4%mtK@+Kc3}_8n_!6=p0K@mskEXV!!*&(yp>@a zZLTB#{HuvS0h8lk*2Sb4@`n6`KpL5^+E6DP(=($n3SA1tm?x zAW+u%q_BsZt$`~W2q(?0hA(3T`&cOAtusHiM-{p$@#h8}gf(v3slU#-H_*3LckK@# zrm{?%dzD~C1grm9Z&eqp;v2gH+&M#7|q1HM0pnYKDEGau50 zc5t)&Xyi3p*jD|zKg_c@9o~jZUwm0Z3iiD7KbwozR4=$cHR}sGz^=5O`2K;Ex_L8N zXQ9T*nY)Z}2yYk%QMY4?$zX2=;-w5N?*FM?Fz>cq%GZ;K%}Y94MGPdcOK>?Tnn?Z( ztKR;ykVyvsUpjMyoAZLL0YFf_|2&H|G2nm(<;Zfv>;6m^7Yt1}ZLwSqWK2@UNCitg zJ(J{`==+^^C#!c;*L7Z5U>SRxOoYvVIh8rLczxH-?Y%#GxJ5cuwvj}PcE4f@L@lWl zL&bD<@f~*6z#lE`aSpNJJIim5`oEPA0D!0HE2nhuXWd=V3EC9!NsPYjh`o30>boX= zYj6@DdaL;hp1uEB=g~vA>e|bGA}8l|(9+O+PH81&jBK1XiXO$6i%ur#l7;&npnrvw zmF8C{Wf&I#uYUQ7hfV_9k!AGK+ivOFn^q#;%EQ^yi;moMu(~2q!&)I8AuT%0OL6f` z!*EW@%A@Edu(PtpHmJ{2+VhPP@HoBgNAH;1pA>FQ2i&dw1NLNb)yiEfLh z5(IcYqMuE*Q(08z1cWki01JVk@#T&lSb1@iT$bm)h5v`Kw~ni-Tib;vB_I+?3Id{} zbV!#7N_VGpcXx;&El5arNOzZ#g0ysZclS4z&wk(Y?sN9}edj;tWG>fSW8CBJ`??C6 zb#0XR+Hg5~5dv|!i&hBYD&%6!`TEr(MqHEzUghhu+)vyEEE?)q&UMx}B>=}A2-ZdW z7#?&WyYd-+fT-bP#RH{qMym#_lzf@C!)g{bqv_99jy$I`IbJEr(1E)Z`(*Bb zj*?tzl=y6{nV%FJ&Zklx1?`Qje{_^1`bwpqV~;XEMF4Jn;cKiTxyVWli@h=Ly3&nT znPIX9!jwTVG(6wUi_qK^<-QVpVi=q%JZ9z{dOfRGWurT!HF<@Y_BsEjqjPNGE3(r~Q6_k7rU677?t)`&6_r$u#w> zN`p@^5K}2W*+bPvh3hhJgD%@!v^3ZIMj&AD2Zrp!d*m&&NXvV-=XU+GUHXb|rv$1L zq{R)nT}}%5GoL{dUQPrnV4n7+7l*-a8xgMd>6pt=ev+%Bw8SfEiyB;RR4<91a!Lkw zIvgo#zu3C1OOtKlu8}S=E-|uKPdU&(sU*%}K7Dz~nxE7q?W|>|Nw)eN?t{ql&h9N5 z*J5Ayquww__dL2}k90NV;L%{-dSR;Z=87^K)iP+XrFGPKeqk#|v?j0OtjT?U+URyc zwbuyERnBmz{l?>dsypNGbpUJh$Hu&jFR{LpKWLPsUz1N=SB>r6QqoW=5x_%(wdIC4 zm!6$*E{Rb;u|5&1s14?}qC<(=mW9uhlJ3}*4?_GfREAY1iUK}cYo%25u?iP;)q%MD zcgu0$iF+nZ{kZboXs$5z8fE?Oi3@hxyLp{2OidFpQruDJnWj)3kKZI6J?H?mf z92eQClZl4B$b$s27H;o%W?emSPA@qRjKhsh|igUY?I1u7<4ao?v)W#V->3ooA`@q}Z`iD87%d z5P0|T8MPDRljDsA8dwLOrMl+)?y!lDeIc(e9@_0Do@pYE?Kt>+v``jL+)PsK8J%2E z8EqQ*sEbzAK-Ct_3z5|wercK)+piYO={zuU(S*aZA;I9D#%VKh)|dC+GjdS!i#Hml z^T|g~bPHaYiEEBd#L|b%MC)17>1X4m^qnR< zh_G+y9kF#Cmtk)eVZ(DhVfK{I6#nG;jKXH(k7L|b7Nnh=ROu-BW$VHkCL*#OpDk?567sOa z+P9;Gk&Vk{yEi%~LtK*63WQO_>?-tZ%e^o5`#DF=+>gj*JvGgaVa_y6Ir*DELZ(%t_{Xu&NGMUXwW4D^Ui^UUU zX>sRrjq?M{&dpmgj&tWY<1+3%Hpu7btvzSDuo=AnKI+h%T+U7ax#dkUBXB4=gVnjV;;ei_57rF!BstY` zQfCuzDxu0O=LX3fOZ^A2+g~D3@48VD{gCEj@#v|$tPn|8nYj0RMxvpLdjC zgTDLx8o_c6w1QWk_e+RH8K_B9eoaP~#1To18RMiFEtF?-8E{gN79Eo)(-QkG*Su{p zp(qHK74m#9Q9$ca(iQ?_65zFX*6(j^eY+ms#Kn5AFGkRc9AM``8uZs;|O7fbxk z!tqqoL?;m8p%1t%LOmEnX>MuMJ)xTBgO!{?4KHY#hAqVE&`S=jNWep#+n&dW`qfZ^ z^lfRqV+DcnqUajA{|i*5{fhIFJGiY1UpB};Nw%WAxv_LTT{}oLn+Yk!B=D#+yyVc^ ziL~LFf;_W`ir5O3$xAabqccY-9uHn^HZfm zpkqm}jh23WWhC$|J<%wipWIx34Z9{?RA0${Prx7?S34nei0rofT#fT4T+d(#U)BR0oR{ycCn)RK3qEhBFAs z;+2H{l(1VR9rPUBV$w7HNEALRSQL~~?9x9SLtve^{w(}rGZeH>E*-i63^-#1vb6)n<{nS3!>={|tV&0osvnWpcSOi^A~ z`wVYNXxgqwm`YrzZ-+;3RIsNu+Q7Cn%^Yf-`u3ZIa_^|{W55shTGsVsa<#m{fQ^(G zFQvg@&nwlGYMKwv9`f+=?tTb$eU&kVW8vgUb^dI{)C}vPrt2Ay# z;ZiE9%1_xd@M5WQkk1ll*$7U-5oTd6Vu|Nmaw4xT-wB1%;U0$f^AZ@7q{Pf)gD0m$_lO)p6duYlfn~Wrzhl|TXJlQC1+cwgJ>u&f!D|qC9Q}Lb zm7Xxk#rFgrETJIkUVB?5G-MH_KN-_{k`=?6jL|DYR=x)`HfZhimCuH$#i~q=dA(&h z+$v|vk{1B(7Q0uAtF<)x>WYB|>qEjyp6b%P=Qw6uM(BudwXNSIy2eKD&_*qsUneFc zfC+e>*+RO+uV0r-Ik|F09xIOZP>a3xRJzdV_diT18`cwb!kc!KvYsLtU~s-M?#uCg zE0aO5_$p^i!VV!54o-`LJV%yJN;s^?NK76f43dz0Bw|2}!bXewp*4A*OiW1y#nPBM z{6R8Sj4Yjq13V2pod`KP3R7JYgXxzS!b9dn+wLcO9islCgsUGu7mi5BZusk&LeKLV zsFdnxkf28r$8{bo&wa$RjO23U)xrytSnvsO8ytk>5ltW=YFY$txP%up$?Zx#o#4Bl zNy*oj^vCV?+Zs#O(Fl@`z6Naa?{dls zgNP6{u1}5&Z7;5Rn9mz3bxB;IsN#)DgmOK+)(_Mix(6T2YG6g~W zDw@=66+uK!I>~?B7JvK}DCBh1WkxY)cJ0cIG6iYk6MM&$*FS2XjBlxQu|k5Z4h-+t z?XI8H@}=T$#!4vCzdv9@7vw~r=_7yj*0-%^j3O*VzY+fSXXos}mv}X}R(in`pZ6(W z1SV$f1;Y)}fj!HRwuuF)Jqw1Xe$tTR3vgJKgzxz-_5K7|($8xoL@@#k4xXOlv>qxGGB4n2%{ry_As9b8S$O-CK zehy;!$Pe;R?)mZ@*pN%{t-^W^S#Fzx@48iX5=n4&?eF22AEvwX@Y)Q4v$Md%${h`^2#jU=>$TU>CLzkmJ^0hQJ;31I;@s$5n?DxT*G zhpe4b*<#8Um_jb|jZGQ;%k|j?bI@&h+0R27O2YR9QdB~J&?X`>=hPLRPk0C#ex(xF zB6Yt-#CkaWDq-YyTYSn_7g}K-@7e6aVcfSEYNZ$YNw<&*=laocVhsNRWJx0AC;A(Q#cocHSnstd7jdcptH7>%J;f zGUL>5U9-m=Cr%yfTUa*8_r-zpSzTQn@qkXrpD>2N&RIAd3Co!YUKgAFx0fawA)A*1 zdsg;r0!|#G%LyKW*PFE=<_-EBRx^r9uRRtOFmvY5Se;V%UW97D-0j&Jitj47q9PI( zt(0~_QbjMMOYW!;FCV=nvw{OfBxyC&ZX5_|8|rkWOn@XHb)B{Onsqrnj$tAc|IxJC zr@_aXonpBT5p{(M7dM(z&z047EuY8ty&pT%=xsGhn{VEoXjHp zlq7P#mL6|v;?LF&2@*6o>o)gn8g*{jcA&+3Zhx5c97LJv2RfLGuhuC)oNzUb(&fPUR59Z! zxb12s=7EZ-eCM(TXG|@5DNSotGrt(NjcjyG4#XCR=!*K}0D_YEy3wFF%VwV*PQDPu z8~!4->wW55CC3IAZd@+Ch$q8b4z6JL!zIbLP#|w#7S(WrpJ2a6=QflZ{9fEzijmKF9IfwXuw!@b<~Zj#}gA z+LA+TQsa(NtjE+$mW(4K!)&uZ&c4|WCgc!UKS(Ga!7n-UJ01XH$tCzr{fV-XvmRY`+;p1w&a11G4g zIm#L4$ef_V8k;y@n9o+qZNLt7z-60oy~r*Q-d-2wux<`R?=(!#3FUXgfOA3O#FyYP z$$YQdYZdmulc0Y|&oJtVQ2C*hX~No;IS@qQ6_Yf_;I*gp*=B@DP_)o6oGRM}Jsgy! z`uY9qaYohR)Xeg{njztL%Y$^;*g_6zC}sF+aDv!u;e!c1|lg6}nM$#+DQ#fKCL zRpXObnuRSDaUVoU_eAxXB*YqLUni^79;Y@pNkI_P2X!6ulx&3-bbEm;Z0a8`{}6}v z3}7Tj|GYaO-u^4c)^(?D%LC1x?lO%wGd(fpjdRs;0vOrH7GOfSo>iYE{?UVvi5%AK zhmS$)O@|&Tz#7ac`u=^$G1(Bad{k;vcW)i}=kf`sOEj-F!@Pbf3mcb! zr{jRlFg+o0^Z^wTRIQrKi1st_mYy|q#7vP2SyU`EWLTt}HW1#&MHFxOu@OGS`1e-` z?laLE&q1tZT1x#0qLxaCea?m~jF3@=!%qPL#eFhm^ZJ??GKOLx8G zTRDIN3i^a34^YzewY#)5r-`rEi1tHMZNjIM^`CvRPwLQIpY*yt8|65?z6D)00I!&m zUr0-fc;ML8)kr)+7Dq`<&9uznicXy+9$-+K)YOzvZrA73)S)w`eD!a-y#{c^M5r-# z23%@WE)4l$KB90XU$j{)8>zGt6)9#X+l+ow4SuPd{u~1t)GLVphL(l26pxc#+^C0g1Xq2;%Tymgm~!P$$V*yd!&zr}sBjh7L^#|qX#5iL z@Qb9#=GxqNQ@+khL!Xa83q+Su5GAybWfxvmR>o&({lRGWx6jvCpZI8?1x<@rb9QUs zCEh9O(R~&VzLetLhjXr@y`41W2s&_Y%=T=gOH9ldRuOc;%zgL@Ds^HP`r|cRcjW|k0)5yOdYw7)mz(ch6HKGcEp#^n{K?#{pX_K3gBF}A- zLXa~3seKw|RvDbL8Q%}niLg4jy=mGRd#2KRt+D%zz!3GUp~sXhhkkr(a@zNoq3%=K z-M%$<&caR#kC3mMDFT?he(zBnBb^#*y>5n#CE^&|S4WM3I0d@zX_hVoN#tmP0j4A_ zRvCU*2yz(cujrad@ty6+wO3uEo)f}9#G`$Cjp^`8&l->Ij6PI&`VI2)h$d_*`in6w zcvsAtdvyMH&CaI|?dD%Qt%U34t<~LHy7pFCuUXb@=vMY5Jm}rI9+f@4g`+7oQd1?; z=cV}kYvOq01LI2Cz;!EP1Qz$*?tI7T@b|9_$=sWQ`b~2;0P(G|PqN;4eNz8ZOb+(o z&~XN50z)as!}^j_E_8WN7&WP!naI%Wg=R|7nP%yKg;45PXz`X zu>9fC6gHi5u&d?dOQlLy9#VN*c#eNtcr3KxzhGbrEo@j_g_?#WJH|D%2XOYXFi2e2 znSO4tS7O8X-q=MZGeP+(BzcuDNol*;6bGAE2!(h&aOaWb$V*tvNZ9T5DJ+;DhZiCmQWWwugJVI8=(~bGfOkGVKz$u{+paEW^ZrL5)Tky zlM}?pPhEJe^lUp3cEIG0F;!^SGLQ4#P9CR|yMbCPxfoyT%UvET4G}rc@a;c}`T3oo z@3ikp2*7DpWg~n<PI|h2ocE3SeNbuVl}4O2U)?WKd2HrTxg>R8>Jm0TDzUtY_8x zfi7D2+uE8D_6Wdz0!YWqk49qP0OCbLjy?b}!PHPt(0}nkNhvWg5tC}cfo#R9YER$K zT4JQqq0hKrR9#l~!;&lXu5PnmI&YhM;FuE6*n&k;(u8ggD>XIt7nqk+eb!~`kA>a$ z(>5unyU1wUPTu?U}hNUNI&hP=?2}<=IHwNL3 zp-jwhn-{(h9QnEV{-I;vyEP{7C-2>7yAbDloSA;*^<{$G}sminb@0ZRVS(a}f#TfO^_=_YImYBSeZmXqb#?;}W(JxLm$qrkwY z5)7#@@n-(37vOwv79%jT!mtgM?EZG&TZ)c>;89QcOoef7D&;vq<6dxcmMk2HN{t6d zK%;MJsNfjjr`c~?O5)L4Ik4>aAm#6^bgB|7?SEB3f;8bBgY_pFf4LM@V)ZNlSnh1F}K}Oal-aN*8c} zLX1X@&BqV^LFWJd@9m%9V1cWDp84lO7pMW)2QILGy#8XFZmtkGHBoAGYdy$HLUU3$erSjj;a)19ntD905^)Yxb84xi?%q4-+dsZ{w2X& zASqqbO}W}vDjhECx!dw2?v3&(;)Z+U#>N+ z6-Kj__Qrj_Qx(f&B>WbdoHH&4%bMjYBh%Im(=Jjdu4aomlrqhBM=P|Xt`{_!oQ4fL zw#f92uE()r_%dBen-l6)=cLme1x{1vL>xNyr|1f~{YkccVO3S9$XW^*%>9{H73BWE zTlR-w%KSn>X*7`3&1|@1gi4F|_S~5dsC&w4MXjec4YXc04{I}J{4nJ7^&Nh1pgHds zQ~2}VO{Au{=*g{3C>1oPxNSa(rHo6-C;o)elnk(AG5)6PHJaF;_w6kg@5Du-1|5k_HDu4Nn3MFA~$Vn z94>j=PQAlGQgu#?4cD4EyEtZp%Ol2;+8cEdIUJEDr?EcL#`|Bs%}W^SE+aXq49zBl z#Zp~^`LlZy#hO(k35ojp?{qF&?mcBtE~n?`Qaxua|FvM}6@}j??PncpF!;8UIJEB@ zm{+RC@0^aM04c)|+wNWiDey2v{(IHVQEL%g5a&?=3qEZ3@U{^N*N>z$LH! zPVj0$r_4nR=MO^Ib;ZHfh#s-$iXVoNv%r z5qH$9ht& zbWa^D-ZG{U@%r84jGCu;U#n$Uv%+Ov^7D(-D$ag=Lc2ShD~KXzRIjXEHmu?~6HGk> z1H2jyXVajc*U)R_C2geWoZGe*L)>vrq7i8xACHwKRJ$7M%4-cMCYay9{Mpic0bJ4x zk4g7bMt1?)-l~oGj$N+~;uz2B+ z{0IRr_xA2|=Q-Q`H{;Q7BPv3?V;h`yo6n{ab7fj8!3Trrl~f)y-UrY3A#@^Izth!y zLyaSr>i_H4uWI#kzPQp-llcijrj=7XNKiMO1Lt9lQM7B%6lJUFTzoP#DKoWxi@-Bg^ zjS8tmub-n^1gcfjL_qkR)V;jsTEBY9Y0HVr{mBx;21BA0Vn=VrqThyB(0a!fsFXD6 z_FG#w+y;QjdB@Tmhp(q5_qZ*I$f2lnp%noscekBV4bJ26tvvf4;Gl~_qLiO(sNtc^ z{C6Jp4)M=FmnU7B{4&n$3PtqEmus89?s@`&(F;I>LCS2GH=YPMN^T~N@8NwI?{8!J zI?uPq*FXDn7@c%`-)_%mP(KlZZ3qJLnZRKA&0QM#uibFVb5{7)R^Qw8876aQsa;^E z_?FO#FgUc&6E)TVfadlT?FhoO<$y^gm^~&np@Wh!Y6h}30*L&(+d;R0L!jC+SqA7rk&vt=Iwi~|N zqr_vm(w#0~HK%vd1E^xg0W?kEdJ%C6O(2PJ+$~+prP1wMO#aI_}E(C`-PHbv^&H?#Fq>6<$&?wrN?h{1vQ zh3P`y_yT3*OyG~Hopm^prt71nWWd}k>F$>o7-xec2z|-MZ&4h*YxiC{BvD-y&-*?1 zm;T*^>_7O}_wO60FJpMXO zG+m$m$NI`jBjBrf@ALzZw4Ay%<>v$PCU4%&tgi=&g0S}se0edh?Y>*y%b>4@*Mb1s z=+D=#N3>)sVH|T8*Yo$`IuQ6Ff9S&(NWE_kwVT`~EvoJNxr~AC=xhaW3yxcVl9G}# zGBN-=iI$Sm%Md{3Gz(_VxwyChMbXiv$7Xz?T16)?ZcbCCmPXBv+hc%SW_-~mIy##7 zY(jr?(ffLO9$+RAV{`$AttzWoZvBOOaIPP}d75Q_ryvuKRG;1V#h1cHGlE%)6jz*U9rl?>MNOFDM$1}Yh-xvT*5r=X>U z4k!&y8Am83w~(j6&0kO zC%|-Ghh*gCn`vpy*4i!un8oQDYn|}r9Vec@FDRQEvCy_?14wcX7TdxE0J1hl8r%oK zEfj)B5Aadl00Rx6%o^a?=Y?v)A`uZ1K6pgHWIYEsi1_t8AL;*uG21M(`Zl?rTe04w z6R{_OZB;H9y|7>Djs&I|U_=1I7SPB<%gERQsG5|EHMxxYp8-}J2W~*~(=P^uBA#V6 zJ5IZPBO|1gmLCY00p$;(?;I8L4LPRebO^0kVqXFEj(y+jaSuS728PW@Jr3X?MMXu6 z>FV9GE)oC`MhK|K0GV0XcWpqCwmFhp!*kEhO68v=Pm{8)QmA?e)@E&eeE@*M(nX%9qFNJx~8@OHfA&hZyT+>XG# zF9Na`5YKl<4}8EDod6&kDyntB?BsgbfggxLT>s&RA57kWUDvL$nuRSX0HX&H>73KF zC7723DF`6Jl>d?_`c$T`DRn0!ca&-|q zTTGU(rU#PLgKvQoj9n$NZX8;*OLkq02SOFup|?3+k~&1@ld}ofw2VN~1Yoem#Kiow zg!J8H-TnPufZ_;+tiD*_pcMfilCdZ5&e#vkR$C?>27sU=lvH32P#f^3(3Uio*VV;i zMM+W`52Pd~CIYfLz!g?uIzlPb4P+9w*1zKgPX8z%AY-s;TTfM*0){leCkLLE;XM>y zbH6&-)UQ&99WhM)i&lbK@a78ATpcdQen@C2Fr|BzfkLKL&6-Z#0}eB<^PXn-vd#EI zpoO{J>e>E=_Ic(DBP;)V^!{%O`Tx(D$*z}D{mHeouwWzkV4X(&&>L>U7(ih)45$dB z0AsN4f1>Y1NJKO;P`yvrGEW-zu1~7Lus8OEb)QEB%O7NX;x$a3W0`=@bL@^4X()V; z4gv`OK&Er+K0WyO17I$5FZ`aZEk@-8bCYe03>XAt{s$<{pSqSOYs}w{0FedwF_deW z`LAHvXjjS{z^DW6;DuyYV1qGesgd7EfHJ7d7Qn+%=7b5qfISVSW>NeysMp}fK!sq^ z&kueC4$xO%!{y4;jKHxzT85=Y%-!bEVBhrrVzmCl8VwGtw&xqv02u~Zv_=4#xPK0I z@K?Ks-;d{Awg9w8^HD-hnMJOKc2pK%6)HBfpvKWtV+SlwXsU8J*xBcxRPo^B+SXA( zn{I$EC~MUDz?Np%J&GI&9!^QA4@q*Qvd*k3T^3B{<8Zu#NRUohaO-9 z5ccE1$m~Pvi^rbkvmSVHfgX@7({4orU~?_YkwKNlO_ zAIeZAM{u=NQ2XR7k&ySg}HjA>-jW;vB{Ci~7X>!#a`u4U;6U@<@<=irkJ-#4ybo#=_$uBVed zJ)Y;Efc! zuI^bS;`k)A2wYV@*P|G~I>HPA2y+lY2{5XA{J2!GZrR=Yz=R0V#+DEYEUK_4hOJ+2 zZK0-m+;DmQ01I}2a<`6ifQ4qKs&PSu&1@;hk%5(Krz!u(^DPKW%AMisPNfi@aUWQ1us1 z?Tx|-_>0S!^S84;#zTT^gt&-5T!=$Do&@>Vxg7<_QfGCbX38cF2N_vdSb(}7rP)wO z2H*Ko075Pp$^U_{WVi9QY*)ND9^|GmW!*E4fIbxU0q2alm|i}9q{|v|0;1IK=sA;NvmC?H6XI;3@jfl|MSP7xboUtmjgQ4ogNpa;t`7ymyqZq1}Kx( z3?Bf{L_J;;T`yoLTMJ(R#1~q;^A7ZGFF)8pC@7x1S^5!nN*Wibp+ybUaV{60I8%pdTMLMzVF1+~){H%aX3_4})E{msY0>{5I0q;e;JfESs zW1=tM#z|zhC-f=-uOw}w>WBXS5CJ4}N*AvGxW2vnEngKD76x+TIqS+QD!FXtc)n%r z@o!Um){oVIwvO2mmV?O$B_hbcwc2U|)D33Kw{H=;Y=M%yy7j`QIDlI7))Mc^xjVoo z=~u` zF55tCrZ%QEn6;^;fGIKoy7+;|k%p62HKay2ba)Wrz+Xmz8d-p3PUWJH-=pa#kmMspOZK`yhJJ& zv8cU-^Hz~j6-5=DvaQ~dDbEPHT!t2lv4(vwlN;Cj+lXYxHLkY6Fd}}784%u>KDyZs zDN#|;u?5-)pV14qMGL0S3is?1XO0J&c5Ej2FAl=$>jUChtvt4O@%c!c6VCx^B`Jch z?A1UR5qs0oRz*yV;U*byuyd8$YmXx_O+_&COge%G)>aAl=DJ4u#A?*db2g z#D62+TOKFID_N9$ui-p0khCsv{d|;id%R>oC&lxpi|1`p{L6p{bBoX|ZmvPeN1!%l zVP(ax9g?wed)|*O5Ak1Vd3Btp8@UfgO(Ofqn2f^1T z$drhPsJExb2ACager%7w3BC1ISLitb3OXSh6Q9{^{V6(nv$^r?BPd81a2mImSy&Kq z*_pikXuUI$e-g>nrS>r7p-_5k`JC^E?rnOLis<3tVVHhn6dML`8B)RxxOYfUHpME` zdQ@4EyF8d{NRR`ll7*WkDZiqlqrZ;3L8u1s#|EUqZ ze=n4)L9$5biSXc$lo!j?Kh{ejtP=cEn!j!l%!iW2*OkeWnVy*`qz*ph2IL}Ne*wTg zJ^u0}cpqW5$v=PERcR*2f{w~ShYY~A7pCBOg9TVV>wNz5?@YEh?WZGKWJD!X2mjSo zY*L9B;Gu@LN8fTx19T-NX;EBW{rxou-Q`Kes|;fDuVPqKrjlvMq30LmYO$v9(Gx*| zp9s*Dl&UJwV#sM&@A{<+OcUx%ftSi$7vy1D|4BryQk8Bk2k_rTk!y`NaDIq2?2}5I z+XQieAO82kD4EpT5!I77LH-4p)l4R~C;q^6>315|t6&vs0U@%RAA34D&}lmq>h zDxI>tydJm(eM9@EOE6Nk5#|G204o1)7!%;nF1+CyFF>X%aC|{3{opbMygcaha<-Fp zg7OWfnle^CzOWMLGU4 zG9iUIcQq!g*skKd3386Zx5uRO0N5G+V0zk`!1b*3cNvh?yPK|zFgc-xtpdhGn>#=N z4b&_BtwFr)`XNm&hI-`&Gjsv8t9R{+g4Ad}V}i|Yl?usjX_-3xUni8<_5bFCdS0aG z_Vc?>tj5WpA_{$FNMU!FcxvU`|KI*-`N=d$kTh6K_5cQ4-;6v!JW`NHU6fDg6%F6N zWgfBrWcgI+@ygNwOkkWxw4{d^fjL+b!-h-C%DMpaRJVGQJ-^G)Qr$L&B^W`4hWfW{ zQpZm~fcF35n_do?y{7G`#vjAQ;Gv~QUwc+AJMfj{SM=pVYctQ zzr7j`NtQNdA|PQ|iF2ND9f5h>zzVDrpTh$pj1xBvqa`qsQj_v4vSXDAN<&Ge7LAjw z@(<2E0jl2d)1F;$$`@tgk*T7D=8b`f_nA_F5k+Udxv49R=o+NBnF4eKjMqLdE)x=E z)=P7zhyh*mB9cj)=+j1x#iZT~b#F_E91m*$RteN?!C*d}mh6yU@ZJaN(rU9gQbZ)k zqL~jz*zxYO|LO%$cfv$jX=`L?)TTcJhOH%FI=%9n^k6c`nyw;xkw-KJrtsor13u7-*KRr!uX;71k z4kXd!1iuwj7XQs-1-P=qptkcg;1ZN#;%=@o*d-P(JI4y~n(f8z&x9 zzuVt2^JY(|*7w6 zk=mIJJbUBkYyD*)j6%NQv&elpkO-k&Of5A_!lR(oFeZ;q5GC|Umg z7P;IR##e9IHIkntAw?5lymQ%D7jF?10x`Akw$Hx+>SLMd$MkMBGTu#dU+XGuECD0f zq!ib+A$I*-$?qvF+4X&`poZA@jjPC9?@ZAMnM3%T4pUKbE2xDWoU7S~%H^!1W{=RJ zeiA2(hks96M5gjGHB{rUO1mWF8A3y`Lt z{#A3ue4mZKw|Qqt<~Joy8A@4WDL_83UZ5|Ryq}~FSbwhf!%Wf33v6jkIW0Ue>x`uK z$1x(@9pSGktpQnBQ+0{gE^4v3B7kW9cVrFSSDcPI3)mZ|#nM0d%FIsD8!VDZfi`RO z3QCiw5sM0CA{Tl6Kzz8FQEy%lkB$%xl6xbWS06KMpvF$hM79RYNkoyyQ%BN@4KE@V znocbuT4V_C={^A7d7Y_}Qpdks^TSII;3T^MR}7%6!eCc_QxQCp?+g7p00x7sCI2vpE_AdM$N57f)icYJ&V0L9nUtAvYFZ zOsKyUP8l-LdhO*F^%&m9L}7p)rD{V;62;woD7U4s|A9sH+H3sMx_Y~J^lq6Gmd5e4 zb>K97TR7hY>fowsem|mX8Dy_Hm`QU-Ow&1~OOf2jmrjM6J2Ymzbsitl$vKGzf6b=V zy0(gz*>oGu6S4N~=@Eaq+kuPwFH2=tjn~0wYYIHy175Oq7>_CyV75oiWKYo zs7K@-OngO7(}X>#E?Y%!EBEJbgz*z`{u3R0;{X;+kp5N362J>zh`&WP`<^+R%HTVD z0<0==`JP>XT2$8cx_8dZ7wtOFBi_ZVlLTOG|8Qy$IgOV7 zk07O{=szZLZZ_Cc!;pYLRqY>Wjh#kg+%y}I&f=}}Ju_gYTq{{39tMjjxyg{b5%+%v zF>4b+*aL}drY|6X00tVBk^RzzU&v`_+^>s-Fmift?bgf^KMH_8bQ~!yQ{D6!RUGo_ zXz?w6jSvu+(rpmmfxhHnCpfihu+-;Z^X~i;!3X#VsLBq?8yB@E?jIwwbcGXZ7&cyj zrjhxgd7Ca`4y6AMZ$N2)A2eI+H!jb`>0C-Y$KqZ7tq|tyhIVvYWDE}hI(aSn#_!)j z0s1u?+qWQgtOeJ(VG9Ik;DhmUKzqLXJ|)Z>yH24RyW{mh9{6qSeb z0eFXOYy>eoPx6f(Szw*dn#7Tq1eFnzr7KDtdWgk*&v^%^_ZU8eFN3x^n9+0Md`9_d z67T^&`~(O&>wExrmrB+x6M|EnCM6!>qX(u5(gK@l3V><}=K6P8`cY+#)|munoOus` zULF$n9T^!0kHPIe?ri!em1=(4`mSWULme zOR#+8o%-w3FTRG=(x@2*<8x<7f;qIGi9W;#h3Szlpei&CAcN9I*m}!5?bOmjedBer zkm03Rq2F@~*LXcb1Z%)ra1=2w`sU2i&_II`HP0qLmAl8IP0ZL~R>WW!tN&k;NEXKd z9U(fmH>tg;_uew6TL8#N;I6hR{YTAtcM6seXTQ9vBGy9@bqu(VWF96 zV5?%b{#_A}76zDpchWxSb(!t)DNn>n(n>?wYFNDL3)VEK(3agOLs%u_-e3GfN15;i z5|m=Y#la!Ce##DXDqd~!b|xxBu;i43ymp8O$!5j2?OAA%r6t0$$3Tg_O_zQ1=7%rF zXY)+dmv^`CQT?9xme=_xDZSPTnM0Y-q;~@mC-PrC+qTH`uNbWE!T&7=V}Wj3?$3Mb z9p{^GdUQf}tPBc)Z3jCHw-6IIPsqLVD0hS_->f}XA`aD$y7xO6pkjCX-Cc~zUl51@ z3C(jn_4fLMusYpdTtd2#c@$^}7)NQ306?ll91Y^#I-!F~QBwYq^NR@IcGN!xwcH2zItLIYU9 zLEHF+-Tx7^h2n2FUyk2>mHhWKmil=6&WkYW6@^gwquo7^igf4_ipQAmwZ#g?n;YR>weDb-m{A#+!Rko3XSH z_cuxj3=UjHkUivnWB9>M(5pp>*)P(J_~cjPEe_hM)Kj(;CAl^OZC4>y#ZfF4G;QLjB+tnJu^1* zJjdwXd3s5!S-S5%55vRRPTbR$zMl+FOB6tKOnIRXCS{ygSG1wh;}JLLLTdg6bWzQG zcO$JMG^_GM5p`Qnc<%$h-*>y*jrAzms^9zgWd8N2XZ}}}ApIdZe)~_EW2Sbf-Q({d z253GKLsH_4Va`CLn+zq0TvyZjSlND!$DOqS9Sp5m=4Pip(@_~O zc9lOLzWDyN^Z|rr2thyH?cc}7`k4shh+xA3JripP*ph=xop+}wK=QiXnrNE54`upC z<&>JF*;RNJ-Mg1WLBHIN9<>4XWP7-f43iAJCUUVKA7o>Brf%g?6rAvJf2Q6p2D;crUX+Eyy)x#hD4OYN?!Gd~_-xI~$AQL@ViE;{ zk&{ew0`M$TD299L8@*y^$9+9(;@jpbE44NNOTer4v{7=FC-LpT(iouvAufBr zei%Zf^h0)Lo)uXg7#_daqvWxXYZat{vqjS#gfsw&stAuMTO!V(5l4@1J> zd|Z;hkg&AxI?CRCY{xhKT3nClOw-MTW- zlhAC<5d^uy_sI)eo28uU$uWJTJG!pi(Y74CkR)9{f=`4;WJiO?_UqFs;Sm|svOea# zpg*f$<>NoumhzmxDT<&B?K%P#a!MBmtk5bYq9c<2(GfUk$g<26wYgirapDPn3+^c6 zOXk$*$xD(g;mb5M@w_WcKRp9mxe;@bx;K&{3fKJ-)!Ffuo%ps93v`d1Rji-2V(DXyM3Fz=*`A zrH<;NtmRPF$jac5&+pG#)Qw!eEp48M+67S5XPV=urmbO|$tZ}sz3}d86NRCS5 zVL5{+TD@nky8ELAHcpdnM`b0>zJfA?<#m}i&ok5QmJ8o zvf6U}!Zt4JzVZI9tDpNgX|b&nos@mAlW3pq!QE-cctJr)Eocx8!Ed>;_lzwh!GQ9# z7CA7M)QF*lpf|Vga922=s+Y?`SSnS$EtiLVd~fulMk6mg6V0mkoWEZTUgtT!u`ani zC*!Uy|MDUbr#^NVPEf(&rDt&j<&see%QG)3VYzYae3Fij?m$ncH_T9;-`F$qJgdg! z_sA?R*8Tq)`^u;+zNp_Jq#Km(F6ojM5Rev-4(aah5~KyB8|mhu8>G9WySuyJ@&B%S z*Zp|;bk@USo|%0PbI#uT7dsJzs~(RM(S5BfsAdZw>hEp?xgzh&tH&|Km~qwT@l9Bc$yi%iP7a8fber`}0Jn+7FC}wy6}}Gz)hs86f`TkRC7?RG zb6SQx-&xHo-1 zq4{o!Y!8kOby|tzs)M#tJq;DDj9Sx?^(}dl*fOO+x9*!Q;PgZH$*sLaWNszFd$6pr zNqM);_*F*$JUD_5kC{FXMS4tHpzC&Dlujzo>s5Cg`q7YGs5prJ&ctPI10!9(>f*|Z z2krNENG&q+j1kOzFTI?vgA{~bT97H~O*D?~%Pfj3xp#)$Y4On$7hf($U!Tt<4ev1O ztN$32c5wLOYg;%qFB0Is*Sp=^Uae_aU+ldbv~%mM@q&R#KcYVp8y%F{kj*gUzEPJ1Hy|1`vgY zh;ASPg~AmUV=N#fe*FD894EC-|5p((_Zpu=90|Mjb*tSrZiqiuU}F(ae5@G0rT zv&ez3(Bv0w;jgNypt~kPEusMEZ)3d697Ojg`PyUvlP{XyB0}sW;s{d$^yN%YWop*r zfQ>C(?+eZ#R=gHnMuIpQsIylGQ4{1d4aHKip~Tg4de&r$aLP)v0s3A_^3&Ax6?B7t z1(~GTfo^sNddwX2ZD!6~rPy!Vm+mnTzNke;OMrEV%U-=HtyqS-3wL(~B&BetI#bL? zs!M4CWT>RN{rkVPO#5Ez8NyTHe_fj}(_R@XWEELq)7S$ziK=~PU&X9xPTaoF2`tPP6oGC4@TRtcXt&V7B zDU(UZ(uK+4O!spCK-r22IMV)9P&Bz06f$-NO3{PCZ4d5HQwzZg zH8KVdKpTOfb#;x(dhMRBrko$_c9La~yI_LU6N=E(sIkP}B#H=NB1J|Tk)VZ0n$%aR zGsJ_9p0wavlrG+?J!B5I`Cn$B@_GiRs#Y0O;m5VX&zlMpE6jl#%?oLcDef>uDA{zM zd5QAPAFSff*sqM8JL9mDC2!G7`HIXxNolUy>x)cpc2(KD@Mzub>#g1DE~!7lPpH4Y zdv_;z$d6W<$pHfhaPGtZlue~Y+%?lMlorwWLw(M42FJi7AkXA6!OA6lJ#MtQckK+2 zT`>GkB}rj=f5!75nDY&;`W|f9v&sgB+(xB#TE-#)o8i0?)$A0~v0&HN@Z&&2EVl$; z=TUl9f*)MO;;N>qnm36B{Ef0)Rbc|itODunC-`sQ4_!pybEM&WsZ3Z+)4j_JM(+%9 zr=f_rMw%;STRd0unV2-It` znjrOgWdcx8x(*H29m>?&2*imNanJGnOf|`J43IO<*YDz0bEP z6OFH&a;tYo;X`Und)Ozpbw)$t}lAHCu<-e`aajdmotoJ>yj*;R5w0-6aKom*|sgBdb-QnWGj?d1{p-t01 za-A^`=U?y3R6RI9u!XU=(z>jsQ#;C8_LecBRNH;0>PmVG!xmswIb6UOB)oOCJz%-5 zT6yEY@g}c%J3NPEXZ~>^Wu|qiHfw2$p8jxm*zCHA2aSqfV!IC>Ned~1Pzp+6trToW9a9%yTy$V1PIchnY z5bEP_{w9_ZWIiFkFDh_mgryVEdg9#Ce}2vK*_ZF4XiSRoLVtIG`~mz7@a6fT{p6!9 zZe9ay7OkXBxEqb4Tg{SmO^OS2IERx=NDg|BCq3;yp{K_}1esla*&{#S-NFWT5=pK} zxky6gP#H4y#FmBvevGE!9M|7nYuzoi!5lH_7@m~xs;=#gj{@5=FOc@`49YKrqFqPL zM^8&RTn+Fm%n@GZ=cUODVqgSK0Y7UBpfXhHa&+Uc!9GD3taPB7yOb4=0l^24m7qa{K5UW5R0_5j7xE2GJ2&%c>xo9cc`Ip z*#nm}{1Mpu8aY|oy#Ib;;*oqgV&|{46LI{`6k{Ze8F>-~U8Gr3kG*t}k}q1boa2A`TQbwc18r=J zKzF)2|G!rH2I-@@UC;rPrP-OS;iIYd6vxm>UW^UjML3zr005H9 zY;0_Dw?I#{W4_FrCTLj~YSkt5bYI4HKXIPJW~j+Q!v;~C0sKhYoK?Qa)s@m;bYC%O zp7(neb^ZREtkfxg*5{ySTJb~q%77J5An+BSRI&Rg@h$L!sXsEN)4(4uWh^dgOhZw( zZ^qY}s~e}Iu1nW2wlwv^m7cS4%it)TaVR*w(~#!&O()|y9}99qy_*iQ0;6t8JfD)QMbvNd2yFPo~mts5pEXZA*Pw*ODo$5=+1M>uS@EBa2mw|D9#!>XOO0~ zZQsRZdIrK99GYKcx}uzAdGXtaDSyCi^Y72Sl$5Yzb8}H?yfpf`l>VnCmi0RrZqy&7 z66cJB#p#aTEVLY)9Wed`rnirN#r%&@nc;aP$5GHCt@1)QF<(u1^CNZGgMZaj5nnjj zZnQ6WLIj`yLik{CTQ1X`B~fYvr3RaeLZ${}mPLVnhTuJzbQ-FuFouO0@te2O(9+O; z^~wg~`spM0nBTH`C|lA8J@9w(cNRA4TjF9&5Uli<*tiF3YLqV6%<>O|yj`bG(+)gFE+(l$+{1=AL@Vk#XnJ5i6mOHKw z>`@9i#Y%JWD&kRhPW`S;A>OPBjuurk9}0%2QFr>5tWw`ktAtoXy9|D+x6sx)Az{!6 za$`zZOv-SA;+Kj;oD!;*P1-ftMT{`K)MFnEg!xV;Y?{#Aq-vu@m# zVthzYXp`!1b6&_PAs<)w7oFlN#xiT>&_CiD{W*xhosXEl_4mggBn4)i9t-9LeA&0G z;HpJ_a{5U%z={;L4PFUEu-l$*^h}x25=34IkRrgMx#wBWUHI+Xg&{+)*0wV%5?1KD z!AL-Ux^#9XK~9CyN1;E-9Re84j8=kZ$o`uG7R-B=2mN>3=JL*y~ z_j0|6=st?Oj`Nq8JlYbQYI%8)TLY~qc_jki=NXvROwph=LvkVwDxT%oBxA2IT2>fe z0B6yqM}M30$kj*K*_Q93PecLrt=lK(tBg*IsQYzku2&AKmzD}U;G3NDz3Cw`8=xz@ zBY;yoclxsO(&_5>*^1X_WiacsryJJB5nW>R;tZ%%`$9dWOLWG>;L38-j6d}s;_Q2o zZ=jqx^tr*urzcPE*}q@4i4mVD3%ptx88on#XVuj(*l#bY{?3{hvQ1JT|bV`=g=&24RMlj>K>hW+_}D*(U#z{AiTm<|HbkGM#xMXQdF%to!ZxDEM84rFWnIvH8G| zV)}1|#P9B3<;*1#1G^$4*9lYq{BB_?1`l@W6Hsq4PtD9WJI0f$Ur55P*sppeE@@mQ z3m+6*oTO212NlcjS`SS=@_NfA;*MXHNsyunLbDbex}>_ za5+sJ_4-=lcYCaoZNNN6hI&nj4fv6%V-Mo+uTum1=En0gd6&_9_6K`=Nqmkp@7zF~ z=bINj*)`^o(d(`BSx$wM`RVDu?17bqY|x|w0oBtsLpv~BxHI2z;cl%D4wuI&AJ^%NA?n>guYaqci!@Q@jx}W|o3$$EKpduxgD@{q0hey=-!=8Kj~g z5_`U_SXEe>X#9@@-2lD4Q1JG){2StcCt03D7x+4I;v>UTVq_*6g z)mY{rF7WdCqu*s60$m2qCYt#bF&NgYzoZ*h7>=PY(zE8kHJ#@-!Hs@_$GKfxmkEv5T#>!Y$8I4Y1 zv6$%sx1(ANa6FwnOl%xv(?hvtzvS}gZDh~kAKT(x)8s+rn{3m?!A?8>osJ$%5GjZuEe!d6) z=?31Vpjn)}qjq`jsLXkSJBbPamXk>puZ+fi-rah=FF%LFdfYljKYGW|pr5LTKe;p0 zXzsrex#R<;;EgLSjw{jj6Twz!Ey=7nj1P-0wq?k`FY@Db_*LwkU-*!DNWRZ7D*JDE zX4#K=*RhQa{&~9Lw#Q4zz+=WKV*;F}*Wk~56!R8;XrWOv7q!}ZL6jnw&`@vus4Gfh zk9w41F^?R}pAZdca)Mk(duYGg^Ei8jS$w^o~9H z=LIsHt>wII#^%Dt5pLM?uBd;BMdn(ZXF{$UbO(+3A7yGYKCv&%t}M5ymkXjHb$d{z666B?D~lI%VF&J&K^-Nrhg24W5}nArfSngXZ} z7D}wnpPm~C>~GzvNPamZxxtw8J5D@P0J+T$E&g=qV&^!*T{=QTGL#Nvc}Zc(hl#bn zS9u6(~G4jr>iMG{?_S~ z=z2kNFBWmKgMz@klf$SgI+O?U`03sy7 z0T6_A(xRCeC$2A$Z@_h`q7X{RWo`uZi@uOA6d0aeTvU5Dz})Qg)%b6>4;>$R>W2Cr zL}ogbWzjS{@)Lz##STZzLZj8mVN4^7om#GuuCgrPEOA&yX4?A;2do9Sfv5&cxo+p! z(&Pzg93>K8!e-=G2NUU|&fCGeP0B&}wa>!?p@Cv2t~J5mJGXS%h@8GBd~Kc1z?)0J zdEC;6=!?EK?|XCJ8LKqWl2K6LN{_!|mo_+wm-BjaSmrzhotJ-gltH9504*|?6YG0) zJ9cq%B=4FEN1y^Fa^~p1LWA&8h7cI9DW-w2n)T;Hs895WmYx#}=NK>FmX4R<2Wnoi zumKqGP}_Vn7LsxuYJdAg16s8wo}p>(2B#nAVMn11vsF(d{5~wEp}A3fy;)vyWOMG6AjnmZPVB>htHg2jeQ9!!k_S9(JRy;RQh! zak;Q@`X5PTq{F+hL7S{+L;Wn#MB*r19wxdxmQ=%)g+zD+rKr=?D{=)*4ls+cPNQEO zH=0jFeem&dq=?od5Zz$*Q7}h;dAuM7VTfV?Kj%aKZB1!Epg9muK)U&Ani1%RGdc@Y;Zs7OCp z3>qXRZ~?!~e__vI{$T`#K!XJSn%J>T`A+|P2pRI=fq4GqOj7_k`Q(@N8s!}*x8mme zzn>q)jPnhwUnfpxxKRB?sc}`cW@b1dDc3L6k)4nLve-Zd3`TWwN$i zjhCWg(e;dsJOTwn>!(Y`JU1ijcScfm^z@K{MD3=BloH#v{|&R=n^=m0buU_>D2EA^ z@Q`}(sf*OK4#2Nj699~*2uUke!lfcBq5SZ4vGwK52C=ts($fTv`7NPzum*zR11kME zL^l9{&YudiA)qDBd-OZP3|u-vJZ{0qfNfKrBq0VQ5#+y5&dlFEzajXBD=jRh)O7*sx+u>!zZRUHP7#BXpB^d5|)68jTRvV%cO8GzpiCz zG13?>sAVvU;O7c;N|CnT%<poMwyUiVWuB zAZB4u{Iu5KhAxOTCJ(2Qf-MbYPz{(+tSUoFh6M&7%ozZXX{K2!(I~xp$TWmxA{Pg{ z5Il)SrcQ=ZqD^jvtzoQEtWZ`q>XsW zI(SmhNZ7W<0YnJCsAEZ`lDQ8~$_-HfeyoZUKyLutTR@~hR90(czUJMIiH^$Q+>sNS zG&c9gemH1ywImoorcPa4Gv%053`rzkQV?rYE}3ea;KO#BCWkQr&Xypo@+YwMH4qb- ziQxJ=)^Ros0FLEWTMWsR9^Co2WZ%Gch&}Du&7RvntSAxW$}#gb;@qt*#A_#lgWTB<3M~kboPvo%FEbSuV(_<(^E=K15?;HGx?=;X=7DkR zTn!f|zUw7`osE3Anig>usw58Y@(CWCT#Hlm-v*Lj!8ZV1c%TPxilLX{)Kq zhVXF_A=N&%0iR)Pv!u|dKXQsw|c3hz>4dodQ%4#@KmK1JG{ZSx2EFg{r&f@!fWqqh^3cFQ zfZ)-NKFlwAjXE?dzlF?7b3#ReA}&sV_twTU(#=4MRDQpU@JBTOa3V!`eeF&5PSR;XMDcaotKYrPs;lgu?2T@{q@G&k=Q{scBh^mNxsx>Q5O$#d@X!9J5J zG32U9Ptb9+xcd3U!SvlIMAq3^^)c$tn3+HZ9Sh4QuggIKR92BWm9Q4mJ7t{htvH2@ zUS=XCs^)W;n3b&Q-#z89Ks|oGrKElL2-b3e4IaU*%!cn1mGC}MahK!#<0F~T-Px0|6tdO%b-AR@#hp4r^JjS+TbR5wWPMO$rqAtgDTAO>Sy|nAXGkNAQmeH*Zg9zlWi$6LHUn>V0CE#A zdH>WBtX|&(UUmRwa#6;o@-%$qL?dZ1JCww@2ZlV%0Xr|e7n6$id99@?sYzJrm>w1j zC;M$(7o*oFRuu4i`J_#e-fCx+D)tPG<46V%-VyNZGOcrvAiZYkvnFACz=1el(&gs* z$KsNiPuqD+NKC5e}F*CxW=X){W2&|>9 zJ`5(FJOzWcxTwyzOOCTf%!c_S_zf*yN?o7B-GWgrQ6yuk#JDzwi;1&|g-7YI?1=rR z0!Uk_D|uZ>ua6vl8P?6Ex&~v)qokeJLHRLu;~K*8U!c6DM1A*(4~HSb1!2t#g;`NP zA#2hRRXO5gJ_&fwc*DZ@X;wSl3fvsQ>HMxF&bZIy_@_A)`upU|SK~${w-}fXF2@V1 zb`zxJ5T@JfYvTO_$Yzd0K9)*41`1s3Fs$MDu{k0DR%7Gv1uSPu4NelA5~U`^HGc2f zp`08agVYpepN$ASBGXTp6RgN$L0EyH#^$`ND;-Ws?<<--cVMT!5E+Nta8-|39MBQl zeQr*&ne#lNOU*FJ$(7xtKzajJ^Ug`I1H~o`p5V4zmz~=N4-NSx7u_# z8PdnKDem6w?$F}-x<{d67ZLpW3#<{o125YlheeXHUijyOJ)c-IwXJYRL$Tc){=X^< z$z|tv>Vd3|__jy{+U^|N{gn^k$9(o3?cU(k!Ta(Dvt4j>S6+S zKo8472uRY^iP{7yE4>X!HOy`m|*Y@*0|jMi(TcvTH(JDG}to(M>|P&8#Jl^4Z(f`_;UDv z0t`+C814V&>%Wix^|Su>Z+!zU4@zvcTW(S=$C|H(^fW;f2)T@^YU?56t+rd|TU>3s zo)WO5sXa3_<+@y(*?JbhDC9gXCn)HB8j{Yxz+c|FlOV&=`qE@6ytudsLZYT z1@BwD`pfz2;3$K9++Ha`D?y8q6b0|oW?`qCNfI?U@Og*l^w=WA7~*xSp5oxqZzzI% z&Ys~{MGYI zVcVHceZnqeK|YtM!Yef=$OzeLdfuCpGUbWJAl0a(;$?WItRns^pDe%YvBzl+a)p=h z!%c^OhV!A2Z||RK-|H>gbcGDJ4W9bt2jebQrlE(0*|yeiU1A|eO@@|d|Gunen+L={ z;_aYjwjC?@dc8ETt}jK*>c{ib-6PFGG5>K$-LKT1)UX0Ay;|K6}mMKUO-`4 zq}N?n!Hes z8aeFMX|+SF_9-O2S8Z=jh26SP>vf!`sX^W8l%~U|#?I*1BWa~yFD>`zzAI0XnPZRR z(WLyNsJ;I+hu;*JlwgID?>hhAv&-bcfg(i=lNWDNA)2bHT1eX4lXZ7r3Kbn^r(+a2 z#@*4>~{ISz^b)RrlRx)e@l-2SlR2a$Uz2LfbayG{T5~#V$i6CV|Z^ z{smWJfjMMgOyII$dnl22YTJx67$c?9X&)ltd-q%ou(D!+NyOs@wQ11D&V_~kaF!Pq zpylG?Frb5nYlOGS<-LYO?@5&4X!#KsLQCZ-l#Ja3W}T-$`6G~0vy`^;d0ddg0uAgt z(IjF=_m7Hnyk?`PKQ)ed?;Pqqy6hwMDDB$%F21DO+PZDg5+*S5b=@1W#~D;L6kGHy zt%0Yy>U$O~xv7r`*2@ygpiPevS2Q zzPaXk8MZ&V`=ar+?J@n)_4K61`mxt}7DSfN09RL6%a>)Oxl{PMYo~3GQ3(kelFOsB zuT96G6cY!^)TX^JctRlBb_0tvLhbuw`Q~NnRa-M!vzV~4$m z**1?;(w93}VCmshued1EYhR|GSfaZ9y6ki;W!~-nEiOz4m)dHJ=PgN!Nm`Dwy!F4@ zj>4QXV}kF+if8-7ghT*. - -* Disclaimer: This data is hosted personally by Arkanath Pathak for non-commercial research purposes. Please cite the [ShapeNet paper](https://arxiv.org/pdf/1512.03012.pdf) in your works when using ShapeNet for non-commercial research purposes. - -### Pretraining: pretrain_rotator.py for each RNN step -$ bazel run -c opt :pretrain_rotator -- --step_size={} --init_model={} - -Pass the init_model as the checkpoint path for the last step trained model. -You'll also need to set the inp_dir flag to where your data resides. - -### Training: train_ptn.py with last pretrained model. -$ bazel run -c opt :train_ptn -- --init_model={} - -### Example TensorBoard Visualizations - -To compare the visualizations make sure to set the model_name flag different for each parametric setting: - -This code adds summaries for each loss. For instance, these are the losses we encountered in the distributed pretraining for ShapeNet Chair Dataset with 10 workers and 16 parameter servers: -![ShapeNet Chair Pretraining](https://drive.google.com/uc?export=view&id=0B12XukcbU7T7bWdlTjhzbGJVaWs "ShapeNet Chair Experiment Pretraining Losses") - -You can expect such images after fine tuning the training as "grid_vis" under **Image** summaries in TensorBoard: -![ShapeNet Chair experiments with projection weight of 1](https://drive.google.com/uc?export=view&id=0B12XukcbU7T7ZFV6aEVBSDdCMjQ "ShapeNet Chair Dataset Predictions") -Here the third and fifth columns are the predicted masks and voxels respectively, alongside their ground truth values. - -A similar image for when trained on all ShapeNet Categories (Voxel visualizations might be skewed): -![ShapeNet All Categories experiments](https://drive.google.com/uc?export=view&id=0B12XukcbU7T7bDZKNFlkTVAzZmM "ShapeNet All Categories Dataset Predictions") diff --git a/research/ptn/WORKSPACE b/research/ptn/WORKSPACE deleted file mode 100644 index e69de29bb..000000000 diff --git a/research/ptn/eval_ptn.py b/research/ptn/eval_ptn.py deleted file mode 100644 index 2f8dd96b1..000000000 --- a/research/ptn/eval_ptn.py +++ /dev/null @@ -1,132 +0,0 @@ -# Copyright 2017 The TensorFlow Authors All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -"""Contains evaluation plan for the Im2vox model.""" -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import os - -import tensorflow as tf -from tensorflow import app - -import model_ptn - -flags = tf.app.flags -slim = tf.contrib.slim - -flags.DEFINE_string('inp_dir', - '', - 'Directory path containing the input data (tfrecords).') -flags.DEFINE_string( - 'dataset_name', 'shapenet_chair', - 'Dataset name that is to be used for training and evaluation.') -flags.DEFINE_integer('z_dim', 512, '') -flags.DEFINE_integer('f_dim', 64, '') -flags.DEFINE_integer('fc_dim', 1024, '') -flags.DEFINE_integer('num_views', 24, 'Num of viewpoints in the input data.') -flags.DEFINE_integer('image_size', 64, - 'Input images dimension (pixels) - width & height.') -flags.DEFINE_integer('vox_size', 32, 'Voxel prediction dimension.') -flags.DEFINE_integer('step_size', 24, '') -flags.DEFINE_integer('batch_size', 1, 'Batch size while training.') -flags.DEFINE_float('focal_length', 0.866, '') -flags.DEFINE_float('focal_range', 1.732, '') -flags.DEFINE_string('encoder_name', 'ptn_encoder', - 'Name of the encoder network being used.') -flags.DEFINE_string('decoder_name', 'ptn_vox_decoder', - 'Name of the decoder network being used.') -flags.DEFINE_string('projector_name', 'ptn_projector', - 'Name of the projector network being used.') -# Save options -flags.DEFINE_string('checkpoint_dir', '/tmp/ptn/eval/', - 'Directory path for saving trained models and other data.') -flags.DEFINE_string('model_name', 'ptn_proj', - 'Name of the model used in naming the TF job. Must be different for each run.') -flags.DEFINE_string('eval_set', 'val', 'Data partition to form evaluation on.') -# Optimization -flags.DEFINE_float('proj_weight', 10, 'Weighting factor for projection loss.') -flags.DEFINE_float('volume_weight', 0, 'Weighting factor for volume loss.') -flags.DEFINE_float('viewpoint_weight', 1, - 'Weighting factor for viewpoint loss.') -flags.DEFINE_float('learning_rate', 0.0001, 'Learning rate.') -flags.DEFINE_float('weight_decay', 0.001, '') -flags.DEFINE_float('clip_gradient_norm', 0, '') -# Summary -flags.DEFINE_integer('save_summaries_secs', 15, '') -flags.DEFINE_integer('eval_interval_secs', 60 * 5, '') -# Distribution -flags.DEFINE_string('master', '', '') - -FLAGS = flags.FLAGS - - -def main(argv=()): - del argv # Unused. - eval_dir = os.path.join(FLAGS.checkpoint_dir, FLAGS.model_name, 'train') - log_dir = os.path.join(FLAGS.checkpoint_dir, FLAGS.model_name, - 'eval_%s' % FLAGS.eval_set) - if not os.path.exists(eval_dir): - os.makedirs(eval_dir) - if not os.path.exists(log_dir): - os.makedirs(log_dir) - g = tf.Graph() - - with g.as_default(): - eval_params = FLAGS - eval_params.batch_size = 1 - eval_params.step_size = FLAGS.num_views - ########### - ## model ## - ########### - model = model_ptn.model_PTN(eval_params) - ########## - ## data ## - ########## - eval_data = model.get_inputs( - FLAGS.inp_dir, - FLAGS.dataset_name, - eval_params.eval_set, - eval_params.batch_size, - eval_params.image_size, - eval_params.vox_size, - is_training=False) - inputs = model.preprocess_with_all_views(eval_data) - ############## - ## model_fn ## - ############## - model_fn = model.get_model_fn(is_training=False, run_projection=False) - outputs = model_fn(inputs) - ############# - ## metrics ## - ############# - names_to_values, names_to_updates = model.get_metrics(inputs, outputs) - del names_to_values - ################ - ## evaluation ## - ################ - num_batches = eval_data['num_samples'] - slim.evaluation.evaluation_loop( - master=FLAGS.master, - checkpoint_dir=eval_dir, - logdir=log_dir, - num_evals=num_batches, - eval_op=names_to_updates.values(), - eval_interval_secs=FLAGS.eval_interval_secs) - - -if __name__ == '__main__': - app.run() diff --git a/research/ptn/eval_rotator.py b/research/ptn/eval_rotator.py deleted file mode 100644 index b7fcf0fe4..000000000 --- a/research/ptn/eval_rotator.py +++ /dev/null @@ -1,126 +0,0 @@ -# Copyright 2017 The TensorFlow Authors All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -"""Contains evaluation plan for the Rotator model.""" -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import os - -import tensorflow as tf -from tensorflow import app - -import model_rotator as model - -flags = tf.app.flags -slim = tf.contrib.slim - -flags.DEFINE_string('inp_dir', - '', - 'Directory path containing the input data (tfrecords).') -flags.DEFINE_string( - 'dataset_name', 'shapenet_chair', - 'Dataset name that is to be used for training and evaluation.') -flags.DEFINE_integer('z_dim', 512, '') -flags.DEFINE_integer('a_dim', 3, '') -flags.DEFINE_integer('f_dim', 64, '') -flags.DEFINE_integer('fc_dim', 1024, '') -flags.DEFINE_integer('num_views', 24, 'Num of viewpoints in the input data.') -flags.DEFINE_integer('image_size', 64, - 'Input images dimension (pixels) - width & height.') -flags.DEFINE_integer('step_size', 24, '') -flags.DEFINE_integer('batch_size', 2, '') -flags.DEFINE_string('encoder_name', 'ptn_encoder', - 'Name of the encoder network being used.') -flags.DEFINE_string('decoder_name', 'ptn_im_decoder', - 'Name of the decoder network being used.') -flags.DEFINE_string('rotator_name', 'ptn_rotator', - 'Name of the rotator network being used.') -# Save options -flags.DEFINE_string('checkpoint_dir', '/tmp/ptn_train/', - 'Directory path for saving trained models and other data.') -flags.DEFINE_string('model_name', 'ptn_proj', - 'Name of the model used in naming the TF job. Must be different for each run.') -# Optimization -flags.DEFINE_float('image_weight', 10, '') -flags.DEFINE_float('mask_weight', 1, '') -flags.DEFINE_float('learning_rate', 0.0001, 'Learning rate.') -flags.DEFINE_float('weight_decay', 0.001, '') -flags.DEFINE_float('clip_gradient_norm', 0, '') -# Summary -flags.DEFINE_integer('save_summaries_secs', 15, '') -flags.DEFINE_integer('eval_interval_secs', 60 * 5, '') -# Scheduling -flags.DEFINE_string('master', '', '') - -FLAGS = flags.FLAGS - - -def main(argv=()): - del argv # Unused. - eval_dir = os.path.join(FLAGS.checkpoint_dir, - FLAGS.model_name, 'train') - log_dir = os.path.join(FLAGS.checkpoint_dir, - FLAGS.model_name, 'eval') - - if not os.path.exists(eval_dir): - os.makedirs(eval_dir) - if not os.path.exists(log_dir): - os.makedirs(log_dir) - g = tf.Graph() - - if FLAGS.step_size < FLAGS.num_views: - raise ValueError('Impossible step_size, must not be less than num_views.') - - g = tf.Graph() - with g.as_default(): - ########## - ## data ## - ########## - val_data = model.get_inputs( - FLAGS.inp_dir, - FLAGS.dataset_name, - 'val', - FLAGS.batch_size, - FLAGS.image_size, - is_training=False) - inputs = model.preprocess(val_data, FLAGS.step_size) - ########### - ## model ## - ########### - model_fn = model.get_model_fn(FLAGS, is_training=False) - outputs = model_fn(inputs) - ############# - ## metrics ## - ############# - names_to_values, names_to_updates = model.get_metrics( - inputs, outputs, FLAGS) - del names_to_values - ################ - ## evaluation ## - ################ - num_batches = int(val_data['num_samples'] / FLAGS.batch_size) - slim.evaluation.evaluation_loop( - master=FLAGS.master, - checkpoint_dir=eval_dir, - logdir=log_dir, - num_evals=num_batches, - eval_op=names_to_updates.values(), - eval_interval_secs=FLAGS.eval_interval_secs) - - -if __name__ == '__main__': - app.run() diff --git a/research/ptn/input_generator.py b/research/ptn/input_generator.py deleted file mode 100644 index 7047d6483..000000000 --- a/research/ptn/input_generator.py +++ /dev/null @@ -1,130 +0,0 @@ -# Copyright 2017 The TensorFlow Authors All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -"""Provides dataset dictionaries as used in our network models.""" -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import os - -import tensorflow as tf -import tensorflow.contrib.slim as slim - -from tensorflow.contrib.slim.python.slim.data import dataset -from tensorflow.contrib.slim.python.slim.data import dataset_data_provider -from tensorflow.contrib.slim.python.slim.data import tfexample_decoder - -_ITEMS_TO_DESCRIPTIONS = { - 'image': 'Images', - 'mask': 'Masks', - 'vox': 'Voxels' -} - - -def _get_split(file_pattern, num_samples, num_views, image_size, vox_size): - """Get dataset.Dataset for the given dataset file pattern and properties.""" - - # A dictionary from TF-Example keys to tf.FixedLenFeature instance. - keys_to_features = { - 'image': tf.FixedLenFeature( - shape=[num_views, image_size, image_size, 3], - dtype=tf.float32, default_value=None), - 'mask': tf.FixedLenFeature( - shape=[num_views, image_size, image_size, 1], - dtype=tf.float32, default_value=None), - 'vox': tf.FixedLenFeature( - shape=[vox_size, vox_size, vox_size, 1], - dtype=tf.float32, default_value=None), - } - - items_to_handler = { - 'image': tfexample_decoder.Tensor( - 'image', shape=[num_views, image_size, image_size, 3]), - 'mask': tfexample_decoder.Tensor( - 'mask', shape=[num_views, image_size, image_size, 1]), - 'vox': tfexample_decoder.Tensor( - 'vox', shape=[vox_size, vox_size, vox_size, 1]) - } - - decoder = tfexample_decoder.TFExampleDecoder( - keys_to_features, items_to_handler) - - return dataset.Dataset( - data_sources=file_pattern, - reader=tf.TFRecordReader, - decoder=decoder, - num_samples=num_samples, - items_to_descriptions=_ITEMS_TO_DESCRIPTIONS) - - -def get(dataset_dir, - dataset_name, - split_name, - shuffle=True, - num_readers=1, - common_queue_capacity=64, - common_queue_min=50): - """Provides input data for a specified dataset and split.""" - - dataset_to_kwargs = { - 'shapenet_chair': { - 'file_pattern': '03001627_%s.tfrecords' % split_name, - 'num_views': 24, - 'image_size': 64, - 'vox_size': 32, - }, 'shapenet_all': { - 'file_pattern': '*_%s.tfrecords' % split_name, - 'num_views': 24, - 'image_size': 64, - 'vox_size': 32, - }, - } - - split_sizes = { - 'shapenet_chair': { - 'train': 4744, - 'val': 678, - 'test': 1356, - }, - 'shapenet_all': { - 'train': 30643, - 'val': 4378, - 'test': 8762, - } - } - - kwargs = dataset_to_kwargs[dataset_name] - kwargs['file_pattern'] = os.path.join(dataset_dir, kwargs['file_pattern']) - kwargs['num_samples'] = split_sizes[dataset_name][split_name] - - dataset_split = _get_split(**kwargs) - data_provider = dataset_data_provider.DatasetDataProvider( - dataset_split, - num_readers=num_readers, - common_queue_capacity=common_queue_capacity, - common_queue_min=common_queue_min, - shuffle=shuffle) - - inputs = { - 'num_samples': dataset_split.num_samples, - } - - [image, mask, vox] = data_provider.get(['image', 'mask', 'vox']) - inputs['image'] = image - inputs['mask'] = mask - inputs['voxel'] = vox - - return inputs diff --git a/research/ptn/losses.py b/research/ptn/losses.py deleted file mode 100644 index 53cc28847..000000000 --- a/research/ptn/losses.py +++ /dev/null @@ -1,178 +0,0 @@ -# Copyright 2017 The TensorFlow Authors All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -"""Defines the various loss functions in use by the PTN model.""" - -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import tensorflow as tf - -slim = tf.contrib.slim - - -def add_rotator_image_loss(inputs, outputs, step_size, weight_scale): - """Computes the image loss of deep rotator model. - - Args: - inputs: Input dictionary to the model containing keys - such as `images_k'. - outputs: Output dictionary returned by the model containing keys - such as `images_k'. - step_size: A scalar representing the number of recurrent - steps (number of repeated out-of-plane rotations) - in the deep rotator network (int). - weight_scale: A reweighting factor applied over the image loss (float). - - Returns: - A `Tensor' scalar that returns averaged L2 loss - (divided by batch_size and step_size) between the - ground-truth images (RGB) and predicted images (tf.float32). - - """ - batch_size = tf.shape(inputs['images_0'])[0] - image_loss = 0 - for k in range(1, step_size + 1): - image_loss += tf.nn.l2_loss( - inputs['images_%d' % k] - outputs['images_%d' % k]) - - image_loss /= tf.to_float(step_size * batch_size) - slim.summaries.add_scalar_summary( - image_loss, 'image_loss', prefix='losses') - image_loss *= weight_scale - return image_loss - - -def add_rotator_mask_loss(inputs, outputs, step_size, weight_scale): - """Computes the mask loss of deep rotator model. - - Args: - inputs: Input dictionary to the model containing keys - such as `masks_k'. - outputs: Output dictionary returned by the model containing - keys such as `masks_k'. - step_size: A scalar representing the number of recurrent - steps (number of repeated out-of-plane rotations) - in the deep rotator network (int). - weight_scale: A reweighting factor applied over the mask loss (float). - - Returns: - A `Tensor' that returns averaged L2 loss - (divided by batch_size and step_size) between the ground-truth masks - (object silhouettes) and predicted masks (tf.float32). - - """ - batch_size = tf.shape(inputs['images_0'])[0] - mask_loss = 0 - for k in range(1, step_size + 1): - mask_loss += tf.nn.l2_loss( - inputs['masks_%d' % k] - outputs['masks_%d' % k]) - - mask_loss /= tf.to_float(step_size * batch_size) - slim.summaries.add_scalar_summary( - mask_loss, 'mask_loss', prefix='losses') - mask_loss *= weight_scale - return mask_loss - - -def add_volume_proj_loss(inputs, outputs, num_views, weight_scale): - """Computes the projection loss of voxel generation model. - - Args: - inputs: Input dictionary to the model containing keys such as - `images_1'. - outputs: Output dictionary returned by the model containing keys - such as `masks_k' and ``projs_k'. - num_views: A integer scalar represents the total number of - viewpoints for each of the object (int). - weight_scale: A reweighting factor applied over the projection loss (float). - - Returns: - A `Tensor' that returns the averaged L2 loss - (divided by batch_size and num_views) between the ground-truth - masks (object silhouettes) and predicted masks (tf.float32). - - """ - batch_size = tf.shape(inputs['images_1'])[0] - proj_loss = 0 - for k in range(num_views): - proj_loss += tf.nn.l2_loss( - outputs['masks_%d' % (k + 1)] - outputs['projs_%d' % (k + 1)]) - proj_loss /= tf.to_float(num_views * batch_size) - slim.summaries.add_scalar_summary( - proj_loss, 'proj_loss', prefix='losses') - proj_loss *= weight_scale - return proj_loss - - -def add_volume_loss(inputs, outputs, num_views, weight_scale): - """Computes the volume loss of voxel generation model. - - Args: - inputs: Input dictionary to the model containing keys such as - `images_1' and `voxels'. - outputs: Output dictionary returned by the model containing keys - such as `voxels_k'. - num_views: A scalar representing the total number of - viewpoints for each object (int). - weight_scale: A reweighting factor applied over the volume - loss (tf.float32). - - Returns: - A `Tensor' that returns the averaged L2 loss - (divided by batch_size and num_views) between the ground-truth - volumes and predicted volumes (tf.float32). - - """ - batch_size = tf.shape(inputs['images_1'])[0] - vol_loss = 0 - for k in range(num_views): - vol_loss += tf.nn.l2_loss( - inputs['voxels'] - outputs['voxels_%d' % (k + 1)]) - vol_loss /= tf.to_float(num_views * batch_size) - slim.summaries.add_scalar_summary( - vol_loss, 'vol_loss', prefix='losses') - vol_loss *= weight_scale - return vol_loss - - -def regularization_loss(scopes, params): - """Computes the weight decay as regularization during training. - - Args: - scopes: A list of different components of the model such as - ``encoder'', ``decoder'' and ``projector''. - params: Parameters of the model. - - Returns: - Regularization loss (tf.float32). - """ - - reg_loss = tf.zeros(dtype=tf.float32, shape=[]) - if params.weight_decay > 0: - is_trainable = lambda x: x in tf.trainable_variables() - is_weights = lambda x: 'weights' in x.name - for scope in scopes: - scope_vars = filter(is_trainable, - tf.contrib.framework.get_model_variables(scope)) - scope_vars = filter(is_weights, scope_vars) - if scope_vars: - reg_loss += tf.add_n([tf.nn.l2_loss(var) for var in scope_vars]) - - slim.summaries.add_scalar_summary( - reg_loss, 'reg_loss', prefix='losses') - reg_loss *= params.weight_decay - return reg_loss diff --git a/research/ptn/metrics.py b/research/ptn/metrics.py deleted file mode 100644 index 5f31dd5fd..000000000 --- a/research/ptn/metrics.py +++ /dev/null @@ -1,111 +0,0 @@ -# Copyright 2017 The TensorFlow Authors All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -"""Provides metrics used by PTN.""" - -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -from six.moves import xrange -import tensorflow as tf - -slim = tf.contrib.slim - - -def add_image_pred_metrics( - inputs, outputs, num_views, upscale_factor): - """Computes the image prediction metrics. - - Args: - inputs: Input dictionary of the deep rotator model (model_rotator.py). - outputs: Output dictionary of the deep rotator model (model_rotator.py). - num_views: An integer scalar representing the total number - of different viewpoints for each object in the dataset. - upscale_factor: A float scalar representing the number of pixels - per image (num_channels x image_height x image_width). - - Returns: - names_to_values: A dictionary representing the current value - of the metric. - names_to_updates: A dictionary representing the operation - that accumulates the error from a batch of data. - """ - names_to_values = dict() - names_to_updates = dict() - for k in xrange(num_views): - tmp_value, tmp_update = tf.contrib.metrics.streaming_mean_squared_error( - outputs['images_%d' % (k + 1)], inputs['images_%d' % (k + 1)]) - name = 'image_pred/rnn_%d' % (k + 1) - names_to_values.update({name: tmp_value * upscale_factor}) - names_to_updates.update({name: tmp_update}) - return names_to_values, names_to_updates - - -def add_mask_pred_metrics( - inputs, outputs, num_views, upscale_factor): - """Computes the mask prediction metrics. - - Args: - inputs: Input dictionary of the deep rotator model (model_rotator.py). - outputs: Output dictionary of the deep rotator model (model_rotator.py). - num_views: An integer scalar representing the total number - of different viewpoints for each object in the dataset. - upscale_factor: A float scalar representing the number of pixels - per image (num_channels x image_height x image_width). - - Returns: - names_to_values: A dictionary representing the current value - of the metric. - names_to_updates: A dictionary representing the operation - that accumulates the error from a batch of data. - - """ - names_to_values = dict() - names_to_updates = dict() - for k in xrange(num_views): - tmp_value, tmp_update = tf.contrib.metrics.streaming_mean_squared_error( - outputs['masks_%d' % (k + 1)], inputs['masks_%d' % (k + 1)]) - name = 'mask_pred/rnn_%d' % (k + 1) - names_to_values.update({name: tmp_value * upscale_factor}) - names_to_updates.update({name: tmp_update}) - return names_to_values, names_to_updates - - -def add_volume_iou_metrics(inputs, outputs): - """Computes the per-instance volume IOU. - - Args: - inputs: Input dictionary of the voxel generation model. - outputs: Output dictionary returned by the voxel generation model. - - Returns: - names_to_values: metrics->values (dict). - names_to_updates: metrics->ops (dict). - - """ - names_to_values = dict() - names_to_updates = dict() - labels = tf.greater_equal(inputs['voxels'], 0.5) - predictions = tf.greater_equal(outputs['voxels_1'], 0.5) - labels = (2 - tf.to_int32(labels)) - 1 - predictions = (3 - tf.to_int32(predictions) * 2) - 1 - tmp_values, tmp_updates = tf.metrics.mean_iou( - labels=labels, - predictions=predictions, - num_classes=3) - names_to_values['volume_iou'] = tmp_values * 3.0 - names_to_updates['volume_iou'] = tmp_updates - return names_to_values, names_to_updates diff --git a/research/ptn/model_ptn.py b/research/ptn/model_ptn.py deleted file mode 100644 index cc0fc4fa3..000000000 --- a/research/ptn/model_ptn.py +++ /dev/null @@ -1,232 +0,0 @@ -# Copyright 2017 The TensorFlow Authors All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -"""Implementations for Im2Vox PTN (NIPS16) model.""" -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import os - -import numpy as np -from six.moves import xrange -import tensorflow as tf - -import losses -import metrics -import model_voxel_generation -import utils -from nets import im2vox_factory - -slim = tf.contrib.slim - - -class model_PTN(model_voxel_generation.Im2Vox): # pylint:disable=invalid-name - """Inherits the generic Im2Vox model class and implements the functions.""" - - def __init__(self, params): - super(model_PTN, self).__init__(params) - - # For testing, this selects all views in input - def preprocess_with_all_views(self, raw_inputs): - (quantity, num_views) = raw_inputs['images'].get_shape().as_list()[:2] - - inputs = dict() - inputs['voxels'] = [] - inputs['images_1'] = [] - for k in xrange(num_views): - inputs['matrix_%d' % (k + 1)] = [] - inputs['matrix_1'] = [] - for n in xrange(quantity): - for k in xrange(num_views): - inputs['images_1'].append(raw_inputs['images'][n, k, :, :, :]) - inputs['voxels'].append(raw_inputs['voxels'][n, :, :, :, :]) - tf_matrix = self.get_transform_matrix(k) - inputs['matrix_%d' % (k + 1)].append(tf_matrix) - - inputs['images_1'] = tf.stack(inputs['images_1']) - inputs['voxels'] = tf.stack(inputs['voxels']) - for k in xrange(num_views): - inputs['matrix_%d' % (k + 1)] = tf.stack(inputs['matrix_%d' % (k + 1)]) - - return inputs - - def get_model_fn(self, is_training=True, reuse=False, run_projection=True): - return im2vox_factory.get(self._params, is_training, reuse, run_projection) - - def get_regularization_loss(self, scopes): - return losses.regularization_loss(scopes, self._params) - - def get_loss(self, inputs, outputs): - """Computes the loss used for PTN paper (projection + volume loss).""" - g_loss = tf.zeros(dtype=tf.float32, shape=[]) - - if self._params.proj_weight: - g_loss += losses.add_volume_proj_loss( - inputs, outputs, self._params.step_size, self._params.proj_weight) - - if self._params.volume_weight: - g_loss += losses.add_volume_loss(inputs, outputs, 1, - self._params.volume_weight) - - slim.summaries.add_scalar_summary(g_loss, 'im2vox_loss', prefix='losses') - - return g_loss - - def get_metrics(self, inputs, outputs): - """Aggregate the metrics for voxel generation model. - - Args: - inputs: Input dictionary of the voxel generation model. - outputs: Output dictionary returned by the voxel generation model. - - Returns: - names_to_values: metrics->values (dict). - names_to_updates: metrics->ops (dict). - """ - names_to_values = dict() - names_to_updates = dict() - - tmp_values, tmp_updates = metrics.add_volume_iou_metrics(inputs, outputs) - - names_to_values.update(tmp_values) - names_to_updates.update(tmp_updates) - - for name, value in names_to_values.iteritems(): - slim.summaries.add_scalar_summary( - value, name, prefix='eval', print_summary=True) - - return names_to_values, names_to_updates - - def write_disk_grid(self, - global_step, - log_dir, - input_images, - gt_projs, - pred_projs, - input_voxels=None, - output_voxels=None): - """Function called by TF to save the prediction periodically.""" - summary_freq = self._params.save_every - - def write_grid(input_images, gt_projs, pred_projs, global_step, - input_voxels, output_voxels): - """Native python function to call for writing images to files.""" - grid = _build_image_grid( - input_images, - gt_projs, - pred_projs, - input_voxels=input_voxels, - output_voxels=output_voxels) - - if global_step % summary_freq == 0: - img_path = os.path.join(log_dir, '%s.jpg' % str(global_step)) - utils.save_image(grid, img_path) - return grid - - save_op = tf.py_func(write_grid, [ - input_images, gt_projs, pred_projs, global_step, input_voxels, - output_voxels - ], [tf.uint8], 'write_grid')[0] - slim.summaries.add_image_summary( - tf.expand_dims(save_op, axis=0), name='grid_vis') - return save_op - - def get_transform_matrix(self, view_out): - """Get the 4x4 Perspective Transfromation matrix used for PTN.""" - num_views = self._params.num_views - focal_length = self._params.focal_length - focal_range = self._params.focal_range - phi = 30 - theta_interval = 360.0 / num_views - theta = theta_interval * view_out - - # pylint: disable=invalid-name - camera_matrix = np.zeros((4, 4), dtype=np.float32) - intrinsic_matrix = np.eye(4, dtype=np.float32) - extrinsic_matrix = np.eye(4, dtype=np.float32) - - sin_phi = np.sin(float(phi) / 180.0 * np.pi) - cos_phi = np.cos(float(phi) / 180.0 * np.pi) - sin_theta = np.sin(float(-theta) / 180.0 * np.pi) - cos_theta = np.cos(float(-theta) / 180.0 * np.pi) - - rotation_azimuth = np.zeros((3, 3), dtype=np.float32) - rotation_azimuth[0, 0] = cos_theta - rotation_azimuth[2, 2] = cos_theta - rotation_azimuth[0, 2] = -sin_theta - rotation_azimuth[2, 0] = sin_theta - rotation_azimuth[1, 1] = 1.0 - - ## rotation axis -- x - rotation_elevation = np.zeros((3, 3), dtype=np.float32) - rotation_elevation[0, 0] = cos_phi - rotation_elevation[0, 1] = sin_phi - rotation_elevation[1, 0] = -sin_phi - rotation_elevation[1, 1] = cos_phi - rotation_elevation[2, 2] = 1.0 - - rotation_matrix = np.matmul(rotation_azimuth, rotation_elevation) - displacement = np.zeros((3, 1), dtype=np.float32) - displacement[0, 0] = float(focal_length) + float(focal_range) / 2.0 - displacement = np.matmul(rotation_matrix, displacement) - - extrinsic_matrix[0:3, 0:3] = rotation_matrix - extrinsic_matrix[0:3, 3:4] = -displacement - - intrinsic_matrix[2, 2] = 1.0 / float(focal_length) - intrinsic_matrix[1, 1] = 1.0 / float(focal_length) - - camera_matrix = np.matmul(extrinsic_matrix, intrinsic_matrix) - return camera_matrix - - -def _build_image_grid(input_images, - gt_projs, - pred_projs, - input_voxels, - output_voxels, - vis_size=128): - """Builds a grid image by concatenating the input images.""" - quantity = input_images.shape[0] - - for row in xrange(int(quantity / 3)): - for col in xrange(3): - index = row * 3 + col - input_img_ = utils.resize_image(input_images[index, :, :, :], vis_size, - vis_size) - gt_proj_ = utils.resize_image(gt_projs[index, :, :, :], vis_size, - vis_size) - pred_proj_ = utils.resize_image(pred_projs[index, :, :, :], vis_size, - vis_size) - gt_voxel_vis = utils.resize_image( - utils.display_voxel(input_voxels[index, :, :, :, 0]), vis_size, - vis_size) - pred_voxel_vis = utils.resize_image( - utils.display_voxel(output_voxels[index, :, :, :, 0]), vis_size, - vis_size) - if col == 0: - tmp_ = np.concatenate( - [input_img_, gt_proj_, pred_proj_, gt_voxel_vis, pred_voxel_vis], 1) - else: - tmp_ = np.concatenate([ - tmp_, input_img_, gt_proj_, pred_proj_, gt_voxel_vis, pred_voxel_vis - ], 1) - if row == 0: - out_grid = tmp_ - else: - out_grid = np.concatenate([out_grid, tmp_], 0) - - return out_grid diff --git a/research/ptn/model_rotator.py b/research/ptn/model_rotator.py deleted file mode 100644 index 28860bc10..000000000 --- a/research/ptn/model_rotator.py +++ /dev/null @@ -1,266 +0,0 @@ -# Copyright 2017 The TensorFlow Authors All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -"""Helper functions for pretraining (rotator) as described in PTN paper.""" -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import os - -import numpy as np -from six.moves import xrange -import tensorflow as tf - -import input_generator -import losses -import metrics -import utils -from nets import deeprotator_factory - -slim = tf.contrib.slim - - -def _get_data_from_provider(inputs, batch_size, split_name): - """Returns dictionary of batch input data processed by tf.train.batch.""" - images, masks = tf.train.batch( - [inputs['image'], inputs['mask']], - batch_size=batch_size, - num_threads=8, - capacity=8 * batch_size, - name='batching_queues/%s' % (split_name)) - - outputs = dict() - outputs['images'] = images - outputs['masks'] = masks - outputs['num_samples'] = inputs['num_samples'] - - return outputs - - -def get_inputs(dataset_dir, dataset_name, split_name, batch_size, image_size, - is_training): - """Loads the given dataset and split.""" - del image_size # Unused - with tf.variable_scope('data_loading_%s/%s' % (dataset_name, split_name)): - common_queue_min = 50 - common_queue_capacity = 256 - num_readers = 4 - - inputs = input_generator.get( - dataset_dir, - dataset_name, - split_name, - shuffle=is_training, - num_readers=num_readers, - common_queue_min=common_queue_min, - common_queue_capacity=common_queue_capacity) - - return _get_data_from_provider(inputs, batch_size, split_name) - - -def preprocess(raw_inputs, step_size): - """Selects the subset of viewpoints to train on.""" - shp = raw_inputs['images'].get_shape().as_list() - quantity = shp[0] - num_views = shp[1] - image_size = shp[2] - del image_size # Unused - - batch_rot = np.zeros((quantity, 3), dtype=np.float32) - inputs = dict() - for n in xrange(step_size + 1): - inputs['images_%d' % n] = [] - inputs['masks_%d' % n] = [] - - for n in xrange(quantity): - view_in = np.random.randint(0, num_views) - rng_rot = np.random.randint(0, 2) - if step_size == 1: - rng_rot = np.random.randint(0, 3) - - delta = 0 - if rng_rot == 0: - delta = -1 - batch_rot[n, 2] = 1 - elif rng_rot == 1: - delta = 1 - batch_rot[n, 0] = 1 - else: - delta = 0 - batch_rot[n, 1] = 1 - - inputs['images_0'].append(raw_inputs['images'][n, view_in, :, :, :]) - inputs['masks_0'].append(raw_inputs['masks'][n, view_in, :, :, :]) - - view_out = view_in - for k in xrange(1, step_size + 1): - view_out += delta - if view_out >= num_views: - view_out = 0 - if view_out < 0: - view_out = num_views - 1 - - inputs['images_%d' % k].append(raw_inputs['images'][n, view_out, :, :, :]) - inputs['masks_%d' % k].append(raw_inputs['masks'][n, view_out, :, :, :]) - - for n in xrange(step_size + 1): - inputs['images_%d' % n] = tf.stack(inputs['images_%d' % n]) - inputs['masks_%d' % n] = tf.stack(inputs['masks_%d' % n]) - - inputs['actions'] = tf.constant(batch_rot, dtype=tf.float32) - return inputs - - -def get_init_fn(scopes, params): - """Initialization assignment operator function used while training.""" - if not params.init_model: - return None - - is_trainable = lambda x: x in tf.trainable_variables() - var_list = [] - for scope in scopes: - var_list.extend( - filter(is_trainable, tf.contrib.framework.get_model_variables(scope))) - - init_assign_op, init_feed_dict = slim.assign_from_checkpoint( - params.init_model, var_list) - - def init_assign_function(sess): - sess.run(init_assign_op, init_feed_dict) - - return init_assign_function - - -def get_model_fn(params, is_training, reuse=False): - return deeprotator_factory.get(params, is_training, reuse) - - -def get_regularization_loss(scopes, params): - return losses.regularization_loss(scopes, params) - - -def get_loss(inputs, outputs, params): - """Computes the rotator loss.""" - g_loss = tf.zeros(dtype=tf.float32, shape=[]) - - if hasattr(params, 'image_weight'): - g_loss += losses.add_rotator_image_loss(inputs, outputs, params.step_size, - params.image_weight) - - if hasattr(params, 'mask_weight'): - g_loss += losses.add_rotator_mask_loss(inputs, outputs, params.step_size, - params.mask_weight) - - slim.summaries.add_scalar_summary( - g_loss, 'rotator_loss', prefix='losses') - - return g_loss - - -def get_train_op_for_scope(loss, optimizer, scopes, params): - """Train operation function for the given scope used file training.""" - is_trainable = lambda x: x in tf.trainable_variables() - - var_list = [] - update_ops = [] - - for scope in scopes: - var_list.extend( - filter(is_trainable, tf.contrib.framework.get_model_variables(scope))) - update_ops.extend(tf.get_collection(tf.GraphKeys.UPDATE_OPS, scope)) - - return slim.learning.create_train_op( - loss, - optimizer, - update_ops=update_ops, - variables_to_train=var_list, - clip_gradient_norm=params.clip_gradient_norm) - - -def get_metrics(inputs, outputs, params): - """Aggregate the metrics for rotator model. - - Args: - inputs: Input dictionary of the rotator model. - outputs: Output dictionary returned by the rotator model. - params: Hyperparameters of the rotator model. - - Returns: - names_to_values: metrics->values (dict). - names_to_updates: metrics->ops (dict). - """ - names_to_values = dict() - names_to_updates = dict() - - tmp_values, tmp_updates = metrics.add_image_pred_metrics( - inputs, outputs, params.num_views, 3*params.image_size**2) - names_to_values.update(tmp_values) - names_to_updates.update(tmp_updates) - - tmp_values, tmp_updates = metrics.add_mask_pred_metrics( - inputs, outputs, params.num_views, params.image_size**2) - names_to_values.update(tmp_values) - names_to_updates.update(tmp_updates) - - for name, value in names_to_values.iteritems(): - slim.summaries.add_scalar_summary( - value, name, prefix='eval', print_summary=True) - - return names_to_values, names_to_updates - - -def write_disk_grid(global_step, summary_freq, log_dir, input_images, - output_images, pred_images, pred_masks): - """Function called by TF to save the prediction periodically.""" - - def write_grid(grid, global_step): - """Native python function to call for writing images to files.""" - if global_step % summary_freq == 0: - img_path = os.path.join(log_dir, '%s.jpg' % str(global_step)) - utils.save_image(grid, img_path) - return 0 - - grid = _build_image_grid(input_images, output_images, pred_images, pred_masks) - slim.summaries.add_image_summary( - tf.expand_dims(grid, axis=0), name='grid_vis') - save_op = tf.py_func(write_grid, [grid, global_step], [tf.int64], - 'write_grid')[0] - return save_op - - -def _build_image_grid(input_images, output_images, pred_images, pred_masks): - """Builds a grid image by concatenating the input images.""" - quantity = input_images.get_shape().as_list()[0] - - for row in xrange(int(quantity / 4)): - for col in xrange(4): - index = row * 4 + col - input_img_ = input_images[index, :, :, :] - output_img_ = output_images[index, :, :, :] - pred_img_ = pred_images[index, :, :, :] - pred_mask_ = tf.tile(pred_masks[index, :, :, :], [1, 1, 3]) - if col == 0: - tmp_ = tf.concat([input_img_, output_img_, pred_img_, pred_mask_], - 1) ## to the right - else: - tmp_ = tf.concat([tmp_, input_img_, output_img_, pred_img_, pred_mask_], - 1) - if row == 0: - out_grid = tmp_ - else: - out_grid = tf.concat([out_grid, tmp_], 0) - - return out_grid diff --git a/research/ptn/model_voxel_generation.py b/research/ptn/model_voxel_generation.py deleted file mode 100644 index 0c8fc8466..000000000 --- a/research/ptn/model_voxel_generation.py +++ /dev/null @@ -1,222 +0,0 @@ -# Copyright 2017 The TensorFlow Authors All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -"""Base class for voxel generation model.""" -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import abc -import os - -import numpy as np -from six.moves import xrange -import tensorflow as tf - -import input_generator -import utils - -slim = tf.contrib.slim - - -class Im2Vox(object): - """Defines the voxel generation model.""" - - __metaclass__ = abc.ABCMeta - - def __init__(self, params): - self._params = params - - @abc.abstractmethod - def get_metrics(self, inputs, outputs): - """Gets dictionaries from metrics to value `Tensors` & update `Tensors`.""" - pass - - @abc.abstractmethod - def get_loss(self, inputs, outputs): - pass - - @abc.abstractmethod - def get_regularization_loss(self, scopes): - pass - - def set_params(self, params): - self._params = params - - def get_inputs(self, - dataset_dir, - dataset_name, - split_name, - batch_size, - image_size, - vox_size, - is_training=True): - """Loads data for a specified dataset and split.""" - del image_size, vox_size - with tf.variable_scope('data_loading_%s/%s' % (dataset_name, split_name)): - common_queue_min = 64 - common_queue_capacity = 256 - num_readers = 4 - - inputs = input_generator.get( - dataset_dir, - dataset_name, - split_name, - shuffle=is_training, - num_readers=num_readers, - common_queue_min=common_queue_min, - common_queue_capacity=common_queue_capacity) - - images, voxels = tf.train.batch( - [inputs['image'], inputs['voxel']], - batch_size=batch_size, - num_threads=8, - capacity=8 * batch_size, - name='batching_queues/%s/%s' % (dataset_name, split_name)) - - outputs = dict() - outputs['images'] = images - outputs['voxels'] = voxels - outputs['num_samples'] = inputs['num_samples'] - - return outputs - - def preprocess(self, raw_inputs, step_size): - """Selects the subset of viewpoints to train on.""" - (quantity, num_views) = raw_inputs['images'].get_shape().as_list()[:2] - - inputs = dict() - inputs['voxels'] = raw_inputs['voxels'] - - for k in xrange(step_size): - inputs['images_%d' % (k + 1)] = [] - inputs['matrix_%d' % (k + 1)] = [] - - for n in xrange(quantity): - selected_views = np.random.choice(num_views, step_size, replace=False) - for k in xrange(step_size): - view_selected = selected_views[k] - inputs['images_%d' % - (k + 1)].append(raw_inputs['images'][n, view_selected, :, :, :]) - tf_matrix = self.get_transform_matrix(view_selected) - inputs['matrix_%d' % (k + 1)].append(tf_matrix) - - for k in xrange(step_size): - inputs['images_%d' % (k + 1)] = tf.stack(inputs['images_%d' % (k + 1)]) - inputs['matrix_%d' % (k + 1)] = tf.stack(inputs['matrix_%d' % (k + 1)]) - - return inputs - - def get_init_fn(self, scopes): - """Initialization assignment operator function used while training.""" - if not self._params.init_model: - return None - - is_trainable = lambda x: x in tf.trainable_variables() - var_list = [] - for scope in scopes: - var_list.extend( - filter(is_trainable, tf.contrib.framework.get_model_variables(scope))) - - init_assign_op, init_feed_dict = slim.assign_from_checkpoint( - self._params.init_model, var_list) - - def init_assign_function(sess): - sess.run(init_assign_op, init_feed_dict) - - return init_assign_function - - def get_train_op_for_scope(self, loss, optimizer, scopes): - """Train operation function for the given scope used file training.""" - is_trainable = lambda x: x in tf.trainable_variables() - - var_list = [] - update_ops = [] - - for scope in scopes: - var_list.extend( - filter(is_trainable, tf.contrib.framework.get_model_variables(scope))) - update_ops.extend(tf.get_collection(tf.GraphKeys.UPDATE_OPS, scope)) - - return slim.learning.create_train_op( - loss, - optimizer, - update_ops=update_ops, - variables_to_train=var_list, - clip_gradient_norm=self._params.clip_gradient_norm) - - def write_disk_grid(self, - global_step, - log_dir, - input_images, - gt_projs, - pred_projs, - pred_voxels=None): - """Function called by TF to save the prediction periodically.""" - summary_freq = self._params.save_every - - def write_grid(input_images, gt_projs, pred_projs, pred_voxels, - global_step): - """Native python function to call for writing images to files.""" - grid = _build_image_grid(input_images, gt_projs, pred_projs, pred_voxels) - - if global_step % summary_freq == 0: - img_path = os.path.join(log_dir, '%s.jpg' % str(global_step)) - utils.save_image(grid, img_path) - with open( - os.path.join(log_dir, 'pred_voxels_%s' % str(global_step)), - 'w') as fout: - np.save(fout, pred_voxels) - with open( - os.path.join(log_dir, 'input_images_%s' % str(global_step)), - 'w') as fout: - np.save(fout, input_images) - - return grid - - py_func_args = [ - input_images, gt_projs, pred_projs, pred_voxels, global_step - ] - save_grid_op = tf.py_func(write_grid, py_func_args, [tf.uint8], - 'wrtie_grid')[0] - slim.summaries.add_image_summary( - tf.expand_dims(save_grid_op, axis=0), name='grid_vis') - return save_grid_op - - -def _build_image_grid(input_images, gt_projs, pred_projs, pred_voxels): - """Build the visualization grid with py_func.""" - quantity, img_height, img_width = input_images.shape[:3] - for row in xrange(int(quantity / 3)): - for col in xrange(3): - index = row * 3 + col - input_img_ = input_images[index, :, :, :] - gt_proj_ = gt_projs[index, :, :, :] - pred_proj_ = pred_projs[index, :, :, :] - pred_voxel_ = utils.display_voxel(pred_voxels[index, :, :, :, 0]) - pred_voxel_ = utils.resize_image(pred_voxel_, img_height, img_width) - if col == 0: - tmp_ = np.concatenate([input_img_, gt_proj_, pred_proj_, pred_voxel_], - 1) - else: - tmp_ = np.concatenate( - [tmp_, input_img_, gt_proj_, pred_proj_, pred_voxel_], 1) - if row == 0: - out_grid = tmp_ - else: - out_grid = np.concatenate([out_grid, tmp_], 0) - - out_grid = out_grid.astype(np.uint8) - return out_grid diff --git a/research/ptn/nets/BUILD b/research/ptn/nets/BUILD deleted file mode 100644 index 987499341..000000000 --- a/research/ptn/nets/BUILD +++ /dev/null @@ -1,64 +0,0 @@ -package(default_visibility = ["//visibility:public"]) - -py_library( - name = "deeprotator_factory", - srcs = ["deeprotator_factory.py"], - deps = [ - ":ptn_encoder", - ":ptn_im_decoder", - ":ptn_rotator", - ], -) - -py_library( - name = "im2vox_factory", - srcs = ["im2vox_factory.py"], - deps = [ - ":perspective_projector", - ":ptn_encoder", - ":ptn_vox_decoder", - ], -) - -py_library( - name = "perspective_projector", - srcs = ["perspective_projector.py"], - deps = [ - ":perspective_transform", - ], -) - -py_library( - name = "perspective_transform", - srcs = ["perspective_transform.py"], - deps = [ - ], -) - -py_library( - name = "ptn_encoder", - srcs = ["ptn_encoder.py"], - deps = [ - ], -) - -py_library( - name = "ptn_im_decoder", - srcs = ["ptn_im_decoder.py"], - deps = [ - ], -) - -py_library( - name = "ptn_rotator", - srcs = ["ptn_rotator.py"], - deps = [ - ], -) - -py_library( - name = "ptn_vox_decoder", - srcs = ["ptn_vox_decoder.py"], - deps = [ - ], -) diff --git a/research/ptn/nets/deeprotator_factory.py b/research/ptn/nets/deeprotator_factory.py deleted file mode 100644 index e16170c41..000000000 --- a/research/ptn/nets/deeprotator_factory.py +++ /dev/null @@ -1,91 +0,0 @@ -# Copyright 2017 The TensorFlow Authors All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -"""Factory module for different encoder/decoder network models.""" -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import tensorflow as tf - -from nets import ptn_encoder -from nets import ptn_im_decoder -from nets import ptn_rotator - -_NAME_TO_NETS = { - 'ptn_encoder': ptn_encoder, - 'ptn_rotator': ptn_rotator, - 'ptn_im_decoder': ptn_im_decoder, -} - - -def _get_network(name): - """Gets a single network component.""" - - if name not in _NAME_TO_NETS: - raise ValueError('Network name [%s] not recognized.' % name) - return _NAME_TO_NETS[name].model - - -def get(params, is_training=False, reuse=False): - """Factory function to retrieve a network model. - - Args: - params: Different parameters used througout ptn, typically FLAGS (dict) - is_training: Set to True if while training (boolean) - reuse: Set as True if either using a pre-trained model or - in the training loop while the graph has already been built (boolean) - Returns: - Model function for network (inputs to outputs) - """ - - def model(inputs): - """Model function corresponding to a specific network architecture.""" - outputs = {} - - # First, build the encoder. - encoder_fn = _get_network(params.encoder_name) - with tf.variable_scope('encoder', reuse=reuse): - # Produces id/pose units - features = encoder_fn(inputs['images_0'], params, is_training) - outputs['ids'] = features['ids'] - outputs['poses_0'] = features['poses'] - - # Second, build the rotator and decoder. - rotator_fn = _get_network(params.rotator_name) - with tf.variable_scope('rotator', reuse=reuse): - outputs['poses_1'] = rotator_fn(outputs['poses_0'], inputs['actions'], - params, is_training) - decoder_fn = _get_network(params.decoder_name) - with tf.variable_scope('decoder', reuse=reuse): - dec_output = decoder_fn(outputs['ids'], outputs['poses_1'], params, - is_training) - outputs['images_1'] = dec_output['images'] - outputs['masks_1'] = dec_output['masks'] - - # Third, build the recurrent connection - for k in range(1, params.step_size): - with tf.variable_scope('rotator', reuse=True): - outputs['poses_%d' % (k + 1)] = rotator_fn( - outputs['poses_%d' % k], inputs['actions'], params, is_training) - with tf.variable_scope('decoder', reuse=True): - dec_output = decoder_fn(outputs['ids'], outputs['poses_%d' % (k + 1)], - params, is_training) - outputs['images_%d' % (k + 1)] = dec_output['images'] - outputs['masks_%d' % (k + 1)] = dec_output['masks'] - - return outputs - - return model diff --git a/research/ptn/nets/im2vox_factory.py b/research/ptn/nets/im2vox_factory.py deleted file mode 100644 index c54b96c24..000000000 --- a/research/ptn/nets/im2vox_factory.py +++ /dev/null @@ -1,92 +0,0 @@ -# Copyright 2017 The TensorFlow Authors All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -"""Factory module for getting the complete image to voxel generation network.""" -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import tensorflow as tf - -from nets import perspective_projector -from nets import ptn_encoder -from nets import ptn_vox_decoder - -_NAME_TO_NETS = { - 'ptn_encoder': ptn_encoder, - 'ptn_vox_decoder': ptn_vox_decoder, - 'perspective_projector': perspective_projector, -} - - -def _get_network(name): - """Gets a single encoder/decoder network model.""" - - if name not in _NAME_TO_NETS: - raise ValueError('Network name [%s] not recognized.' % name) - return _NAME_TO_NETS[name].model - - -def get(params, is_training=False, reuse=False, run_projection=True): - """Factory function to get the training/pretraining im->vox model (NIPS16). - - Args: - params: Different parameters used througout ptn, typically FLAGS (dict). - is_training: Set to True if while training (boolean). - reuse: Set as True if sharing variables with a model that has already - been built (boolean). - run_projection: Set as False if not interested in mask and projection - images. Useful in evaluation routine (boolean). - Returns: - Model function for network (inputs to outputs). - """ - def model(inputs): - """Model function corresponding to a specific network architecture.""" - outputs = {} - - # First, build the encoder - encoder_fn = _get_network(params.encoder_name) - with tf.variable_scope('encoder', reuse=reuse): - # Produces id/pose units - enc_outputs = encoder_fn(inputs['images_1'], params, is_training) - outputs['ids_1'] = enc_outputs['ids'] - - # Second, build the decoder and projector - decoder_fn = _get_network(params.decoder_name) - with tf.variable_scope('decoder', reuse=reuse): - outputs['voxels_1'] = decoder_fn(outputs['ids_1'], params, is_training) - if run_projection: - projector_fn = _get_network(params.projector_name) - with tf.variable_scope('projector', reuse=reuse): - outputs['projs_1'] = projector_fn( - outputs['voxels_1'], inputs['matrix_1'], params, is_training) - # Infer the ground-truth mask - with tf.variable_scope('oracle', reuse=reuse): - outputs['masks_1'] = projector_fn(inputs['voxels'], inputs['matrix_1'], - params, False) - - # Third, build the entire graph (bundled strategy described in PTN paper) - for k in range(1, params.step_size): - with tf.variable_scope('projector', reuse=True): - outputs['projs_%d' % (k + 1)] = projector_fn( - outputs['voxels_1'], inputs['matrix_%d' % - (k + 1)], params, is_training) - with tf.variable_scope('oracle', reuse=True): - outputs['masks_%d' % (k + 1)] = projector_fn( - inputs['voxels'], inputs['matrix_%d' % (k + 1)], params, False) - - return outputs - - return model diff --git a/research/ptn/nets/perspective_projector.py b/research/ptn/nets/perspective_projector.py deleted file mode 100644 index 38c7df86b..000000000 --- a/research/ptn/nets/perspective_projector.py +++ /dev/null @@ -1,53 +0,0 @@ -# Copyright 2017 The TensorFlow Authors All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -"""3D->2D projector model as used in PTN (NIPS16).""" -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import tensorflow as tf - -from nets import perspective_transform - - -def model(voxels, transform_matrix, params, is_training): - """Model transforming the 3D voxels into 2D projections. - - Args: - voxels: A tensor of size [batch, depth, height, width, channel] - representing the input of projection layer (tf.float32). - transform_matrix: A tensor of size [batch, 16] representing - the flattened 4-by-4 matrix for transformation (tf.float32). - params: Model parameters (dict). - is_training: Set to True if while training (boolean). - - Returns: - A transformed tensor (tf.float32) - - """ - del is_training # Doesn't make a difference for projector - # Rearrangement (batch, z, y, x, channel) --> (batch, y, z, x, channel). - # By the standard, projection happens along z-axis but the voxels - # are stored in a different way. So we need to switch the y and z - # axis for transformation operation. - voxels = tf.transpose(voxels, [0, 2, 1, 3, 4]) - z_near = params.focal_length - z_far = params.focal_length + params.focal_range - transformed_voxels = perspective_transform.transformer( - voxels, transform_matrix, [params.vox_size] * 3, z_near, z_far) - views = tf.reduce_max(transformed_voxels, [1]) - views = tf.reverse(views, [1]) - return views diff --git a/research/ptn/nets/perspective_transform.py b/research/ptn/nets/perspective_transform.py deleted file mode 100644 index 1c01f15f2..000000000 --- a/research/ptn/nets/perspective_transform.py +++ /dev/null @@ -1,278 +0,0 @@ -# Copyright 2017 The TensorFlow Authors All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -"""Perspective Transformer Layer Implementation. - -Transform the volume based on 4 x 4 perspective projection matrix. - -Reference: -(1) "Perspective Transformer Nets: Perspective Transformer Nets: -Learning Single-View 3D Object Reconstruction without 3D Supervision." -Xinchen Yan, Jimei Yang, Ersin Yumer, Yijie Guo, Honglak Lee. In NIPS 2016 -https://papers.nips.cc/paper/6206-perspective-transformer-nets-learning-single-view-3d-object-reconstruction-without-3d-supervision.pdf - -(2) Official implementation in Torch: https://github.com/xcyan/ptnbhwd - -(3) 2D Transformer implementation in TF: -github.com/tensorflow/models/tree/master/research/transformer - -""" - -import tensorflow as tf - - -def transformer(voxels, - theta, - out_size, - z_near, - z_far, - name='PerspectiveTransformer'): - """Perspective Transformer Layer. - - Args: - voxels: A tensor of size [num_batch, depth, height, width, num_channels]. - It is the output of a deconv/upsampling conv network (tf.float32). - theta: A tensor of size [num_batch, 16]. - It is the inverse camera transformation matrix (tf.float32). - out_size: A tuple representing the size of output of - transformer layer (float). - z_near: A number representing the near clipping plane (float). - z_far: A number representing the far clipping plane (float). - - Returns: - A transformed tensor (tf.float32). - - """ - def _repeat(x, n_repeats): - with tf.variable_scope('_repeat'): - rep = tf.transpose( - tf.expand_dims(tf.ones(shape=tf.stack([ - n_repeats, - ])), 1), [1, 0]) - rep = tf.to_int32(rep) - x = tf.matmul(tf.reshape(x, (-1, 1)), rep) - return tf.reshape(x, [-1]) - - def _interpolate(im, x, y, z, out_size): - """Bilinear interploation layer. - - Args: - im: A 5D tensor of size [num_batch, depth, height, width, num_channels]. - It is the input volume for the transformation layer (tf.float32). - x: A tensor of size [num_batch, out_depth, out_height, out_width] - representing the inverse coordinate mapping for x (tf.float32). - y: A tensor of size [num_batch, out_depth, out_height, out_width] - representing the inverse coordinate mapping for y (tf.float32). - z: A tensor of size [num_batch, out_depth, out_height, out_width] - representing the inverse coordinate mapping for z (tf.float32). - out_size: A tuple representing the output size of transformation layer - (float). - - Returns: - A transformed tensor (tf.float32). - - """ - with tf.variable_scope('_interpolate'): - num_batch = im.get_shape().as_list()[0] - depth = im.get_shape().as_list()[1] - height = im.get_shape().as_list()[2] - width = im.get_shape().as_list()[3] - channels = im.get_shape().as_list()[4] - - x = tf.to_float(x) - y = tf.to_float(y) - z = tf.to_float(z) - depth_f = tf.to_float(depth) - height_f = tf.to_float(height) - width_f = tf.to_float(width) - # Number of disparity interpolated. - out_depth = out_size[0] - out_height = out_size[1] - out_width = out_size[2] - zero = tf.zeros([], dtype='int32') - # 0 <= z < depth, 0 <= y < height & 0 <= x < width. - max_z = tf.to_int32(tf.shape(im)[1] - 1) - max_y = tf.to_int32(tf.shape(im)[2] - 1) - max_x = tf.to_int32(tf.shape(im)[3] - 1) - - # Converts scale indices from [-1, 1] to [0, width/height/depth]. - x = (x + 1.0) * (width_f) / 2.0 - y = (y + 1.0) * (height_f) / 2.0 - z = (z + 1.0) * (depth_f) / 2.0 - - x0 = tf.to_int32(tf.floor(x)) - x1 = x0 + 1 - y0 = tf.to_int32(tf.floor(y)) - y1 = y0 + 1 - z0 = tf.to_int32(tf.floor(z)) - z1 = z0 + 1 - - x0_clip = tf.clip_by_value(x0, zero, max_x) - x1_clip = tf.clip_by_value(x1, zero, max_x) - y0_clip = tf.clip_by_value(y0, zero, max_y) - y1_clip = tf.clip_by_value(y1, zero, max_y) - z0_clip = tf.clip_by_value(z0, zero, max_z) - z1_clip = tf.clip_by_value(z1, zero, max_z) - dim3 = width - dim2 = width * height - dim1 = width * height * depth - base = _repeat( - tf.range(num_batch) * dim1, out_depth * out_height * out_width) - base_z0_y0 = base + z0_clip * dim2 + y0_clip * dim3 - base_z0_y1 = base + z0_clip * dim2 + y1_clip * dim3 - base_z1_y0 = base + z1_clip * dim2 + y0_clip * dim3 - base_z1_y1 = base + z1_clip * dim2 + y1_clip * dim3 - - idx_z0_y0_x0 = base_z0_y0 + x0_clip - idx_z0_y0_x1 = base_z0_y0 + x1_clip - idx_z0_y1_x0 = base_z0_y1 + x0_clip - idx_z0_y1_x1 = base_z0_y1 + x1_clip - idx_z1_y0_x0 = base_z1_y0 + x0_clip - idx_z1_y0_x1 = base_z1_y0 + x1_clip - idx_z1_y1_x0 = base_z1_y1 + x0_clip - idx_z1_y1_x1 = base_z1_y1 + x1_clip - - # Use indices to lookup pixels in the flat image and restore - # channels dim - im_flat = tf.reshape(im, tf.stack([-1, channels])) - im_flat = tf.to_float(im_flat) - i_z0_y0_x0 = tf.gather(im_flat, idx_z0_y0_x0) - i_z0_y0_x1 = tf.gather(im_flat, idx_z0_y0_x1) - i_z0_y1_x0 = tf.gather(im_flat, idx_z0_y1_x0) - i_z0_y1_x1 = tf.gather(im_flat, idx_z0_y1_x1) - i_z1_y0_x0 = tf.gather(im_flat, idx_z1_y0_x0) - i_z1_y0_x1 = tf.gather(im_flat, idx_z1_y0_x1) - i_z1_y1_x0 = tf.gather(im_flat, idx_z1_y1_x0) - i_z1_y1_x1 = tf.gather(im_flat, idx_z1_y1_x1) - - # Finally calculate interpolated values. - x0_f = tf.to_float(x0) - x1_f = tf.to_float(x1) - y0_f = tf.to_float(y0) - y1_f = tf.to_float(y1) - z0_f = tf.to_float(z0) - z1_f = tf.to_float(z1) - # Check the out-of-boundary case. - x0_valid = tf.to_float( - tf.less_equal(x0, max_x) & tf.greater_equal(x0, 0)) - x1_valid = tf.to_float( - tf.less_equal(x1, max_x) & tf.greater_equal(x1, 0)) - y0_valid = tf.to_float( - tf.less_equal(y0, max_y) & tf.greater_equal(y0, 0)) - y1_valid = tf.to_float( - tf.less_equal(y1, max_y) & tf.greater_equal(y1, 0)) - z0_valid = tf.to_float( - tf.less_equal(z0, max_z) & tf.greater_equal(z0, 0)) - z1_valid = tf.to_float( - tf.less_equal(z1, max_z) & tf.greater_equal(z1, 0)) - - w_z0_y0_x0 = tf.expand_dims(((x1_f - x) * (y1_f - y) * - (z1_f - z) * x1_valid * y1_valid * z1_valid), - 1) - w_z0_y0_x1 = tf.expand_dims(((x - x0_f) * (y1_f - y) * - (z1_f - z) * x0_valid * y1_valid * z1_valid), - 1) - w_z0_y1_x0 = tf.expand_dims(((x1_f - x) * (y - y0_f) * - (z1_f - z) * x1_valid * y0_valid * z1_valid), - 1) - w_z0_y1_x1 = tf.expand_dims(((x - x0_f) * (y - y0_f) * - (z1_f - z) * x0_valid * y0_valid * z1_valid), - 1) - w_z1_y0_x0 = tf.expand_dims(((x1_f - x) * (y1_f - y) * - (z - z0_f) * x1_valid * y1_valid * z0_valid), - 1) - w_z1_y0_x1 = tf.expand_dims(((x - x0_f) * (y1_f - y) * - (z - z0_f) * x0_valid * y1_valid * z0_valid), - 1) - w_z1_y1_x0 = tf.expand_dims(((x1_f - x) * (y - y0_f) * - (z - z0_f) * x1_valid * y0_valid * z0_valid), - 1) - w_z1_y1_x1 = tf.expand_dims(((x - x0_f) * (y - y0_f) * - (z - z0_f) * x0_valid * y0_valid * z0_valid), - 1) - - output = tf.add_n([ - w_z0_y0_x0 * i_z0_y0_x0, w_z0_y0_x1 * i_z0_y0_x1, - w_z0_y1_x0 * i_z0_y1_x0, w_z0_y1_x1 * i_z0_y1_x1, - w_z1_y0_x0 * i_z1_y0_x0, w_z1_y0_x1 * i_z1_y0_x1, - w_z1_y1_x0 * i_z1_y1_x0, w_z1_y1_x1 * i_z1_y1_x1 - ]) - return output - - def _meshgrid(depth, height, width, z_near, z_far): - with tf.variable_scope('_meshgrid'): - x_t = tf.reshape( - tf.tile(tf.linspace(-1.0, 1.0, width), [height * depth]), - [depth, height, width]) - y_t = tf.reshape( - tf.tile(tf.linspace(-1.0, 1.0, height), [width * depth]), - [depth, width, height]) - y_t = tf.transpose(y_t, [0, 2, 1]) - sample_grid = tf.tile( - tf.linspace(float(z_near), float(z_far), depth), [width * height]) - z_t = tf.reshape(sample_grid, [height, width, depth]) - z_t = tf.transpose(z_t, [2, 0, 1]) - - z_t = 1 / z_t - d_t = 1 / z_t - x_t /= z_t - y_t /= z_t - - x_t_flat = tf.reshape(x_t, (1, -1)) - y_t_flat = tf.reshape(y_t, (1, -1)) - d_t_flat = tf.reshape(d_t, (1, -1)) - - ones = tf.ones_like(x_t_flat) - grid = tf.concat([d_t_flat, y_t_flat, x_t_flat, ones], 0) - return grid - - def _transform(theta, input_dim, out_size, z_near, z_far): - with tf.variable_scope('_transform'): - num_batch = input_dim.get_shape().as_list()[0] - num_channels = input_dim.get_shape().as_list()[4] - theta = tf.reshape(theta, (-1, 4, 4)) - theta = tf.cast(theta, 'float32') - - out_depth = out_size[0] - out_height = out_size[1] - out_width = out_size[2] - grid = _meshgrid(out_depth, out_height, out_width, z_near, z_far) - grid = tf.expand_dims(grid, 0) - grid = tf.reshape(grid, [-1]) - grid = tf.tile(grid, tf.stack([num_batch])) - grid = tf.reshape(grid, tf.stack([num_batch, 4, -1])) - - # Transform A x (x_t', y_t', 1, d_t)^T -> (x_s, y_s, z_s, 1). - t_g = tf.matmul(theta, grid) - z_s = tf.slice(t_g, [0, 0, 0], [-1, 1, -1]) - y_s = tf.slice(t_g, [0, 1, 0], [-1, 1, -1]) - x_s = tf.slice(t_g, [0, 2, 0], [-1, 1, -1]) - - z_s_flat = tf.reshape(z_s, [-1]) - y_s_flat = tf.reshape(y_s, [-1]) - x_s_flat = tf.reshape(x_s, [-1]) - - input_transformed = _interpolate(input_dim, x_s_flat, y_s_flat, z_s_flat, - out_size) - - output = tf.reshape( - input_transformed, - tf.stack([num_batch, out_depth, out_height, out_width, num_channels])) - - return output - - with tf.variable_scope(name): - output = _transform(theta, voxels, out_size, z_near, z_far) - return output diff --git a/research/ptn/nets/ptn_encoder.py b/research/ptn/nets/ptn_encoder.py deleted file mode 100644 index ede556834..000000000 --- a/research/ptn/nets/ptn_encoder.py +++ /dev/null @@ -1,54 +0,0 @@ -# Copyright 2017 The TensorFlow Authors All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -"""Training/Pretraining encoder as used in PTN (NIPS16).""" -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import tensorflow as tf - -slim = tf.contrib.slim - - -def _preprocess(images): - return images * 2 - 1 - - -def model(images, params, is_training): - """Model encoding the images into view-invariant embedding.""" - del is_training # Unused - image_size = images.get_shape().as_list()[1] - f_dim = params.f_dim - fc_dim = params.fc_dim - z_dim = params.z_dim - outputs = dict() - - images = _preprocess(images) - with slim.arg_scope( - [slim.conv2d, slim.fully_connected], - weights_initializer=tf.truncated_normal_initializer(stddev=0.02, seed=1)): - h0 = slim.conv2d(images, f_dim, [5, 5], stride=2, activation_fn=tf.nn.relu) - h1 = slim.conv2d(h0, f_dim * 2, [5, 5], stride=2, activation_fn=tf.nn.relu) - h2 = slim.conv2d(h1, f_dim * 4, [5, 5], stride=2, activation_fn=tf.nn.relu) - # Reshape layer - s8 = image_size // 8 - h2 = tf.reshape(h2, [-1, s8 * s8 * f_dim * 4]) - h3 = slim.fully_connected(h2, fc_dim, activation_fn=tf.nn.relu) - h4 = slim.fully_connected(h3, fc_dim, activation_fn=tf.nn.relu) - - outputs['ids'] = slim.fully_connected(h4, z_dim, activation_fn=tf.nn.relu) - outputs['poses'] = slim.fully_connected(h4, z_dim, activation_fn=tf.nn.relu) - return outputs diff --git a/research/ptn/nets/ptn_im_decoder.py b/research/ptn/nets/ptn_im_decoder.py deleted file mode 100644 index 8ee512e87..000000000 --- a/research/ptn/nets/ptn_im_decoder.py +++ /dev/null @@ -1,81 +0,0 @@ -# Copyright 2017 The TensorFlow Authors All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -"""Image/Mask decoder used while pretraining the network.""" -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import tensorflow as tf - -slim = tf.contrib.slim - -_FEATURE_MAP_SIZE = 8 - - -def _postprocess_im(images): - """Performs post-processing for the images returned from conv net. - - Transforms the value from [-1, 1] to [0, 1]. - """ - return (images + 1) * 0.5 - - -def model(identities, poses, params, is_training): - """Decoder model to get image and mask from latent embedding.""" - del is_training - f_dim = params.f_dim - fc_dim = params.fc_dim - - outputs = dict() - - with slim.arg_scope( - [slim.fully_connected, slim.conv2d_transpose], - weights_initializer=tf.truncated_normal_initializer(stddev=0.02, seed=1)): - # Concatenate the identity and pose units - h0 = tf.concat([identities, poses], 1) - h0 = slim.fully_connected(h0, fc_dim, activation_fn=tf.nn.relu) - h1 = slim.fully_connected(h0, fc_dim, activation_fn=tf.nn.relu) - - # Mask decoder - dec_m0 = slim.fully_connected( - h1, (_FEATURE_MAP_SIZE**2) * f_dim * 2, activation_fn=tf.nn.relu) - dec_m0 = tf.reshape( - dec_m0, [-1, _FEATURE_MAP_SIZE, _FEATURE_MAP_SIZE, f_dim * 2]) - - dec_m1 = slim.conv2d_transpose( - dec_m0, f_dim, [5, 5], stride=2, activation_fn=tf.nn.relu) - dec_m2 = slim.conv2d_transpose( - dec_m1, int(f_dim / 2), [5, 5], stride=2, activation_fn=tf.nn.relu) - dec_m3 = slim.conv2d_transpose( - dec_m2, 1, [5, 5], stride=2, activation_fn=tf.nn.sigmoid) - - # Image decoder - dec_i0 = slim.fully_connected( - h1, (_FEATURE_MAP_SIZE**2) * f_dim * 4, activation_fn=tf.nn.relu) - dec_i0 = tf.reshape( - dec_i0, [-1, _FEATURE_MAP_SIZE, _FEATURE_MAP_SIZE, f_dim * 4]) - - dec_i1 = slim.conv2d_transpose( - dec_i0, f_dim * 2, [5, 5], stride=2, activation_fn=tf.nn.relu) - dec_i2 = slim.conv2d_transpose( - dec_i1, f_dim * 2, [5, 5], stride=2, activation_fn=tf.nn.relu) - dec_i3 = slim.conv2d_transpose( - dec_i2, 3, [5, 5], stride=2, activation_fn=tf.nn.tanh) - - outputs = dict() - outputs['images'] = _postprocess_im(dec_i3) - outputs['masks'] = dec_m3 - return outputs diff --git a/research/ptn/nets/ptn_rotator.py b/research/ptn/nets/ptn_rotator.py deleted file mode 100644 index 2cc73bb8d..000000000 --- a/research/ptn/nets/ptn_rotator.py +++ /dev/null @@ -1,58 +0,0 @@ -# Copyright 2017 The TensorFlow Authors All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -"""Creates rotator network model. - -This model performs the out-of-plane rotations given input image and action. -The action is either no-op, rotate clockwise or rotate counter-clockwise. - -""" -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import tensorflow as tf - - -def bilinear(input_x, input_y, output_size): - """Define the bilinear transformation layer.""" - shape_x = input_x.get_shape().as_list() - shape_y = input_y.get_shape().as_list() - - weights_initializer = tf.truncated_normal_initializer(stddev=0.02, - seed=1) - biases_initializer = tf.constant_initializer(0.0) - - matrix = tf.get_variable("Matrix", [shape_x[1], shape_y[1], output_size], - tf.float32, initializer=weights_initializer) - bias = tf.get_variable("Bias", [output_size], - initializer=biases_initializer) - # Add to GraphKeys.MODEL_VARIABLES - tf.contrib.framework.add_model_variable(matrix) - tf.contrib.framework.add_model_variable(bias) - # Define the transformation - h0 = tf.matmul(input_x, tf.reshape(matrix, - [shape_x[1], shape_y[1]*output_size])) - h0 = tf.reshape(h0, [-1, shape_y[1], output_size]) - h1 = tf.tile(tf.reshape(input_y, [-1, shape_y[1], 1]), - [1, 1, output_size]) - h1 = tf.multiply(h0, h1) - return tf.reduce_sum(h1, 1) + bias - - -def model(poses, actions, params, is_training): - """Model for performing rotation.""" - del is_training # Unused - return bilinear(poses, actions, params.z_dim) diff --git a/research/ptn/nets/ptn_vox_decoder.py b/research/ptn/nets/ptn_vox_decoder.py deleted file mode 100644 index 87ea27fa2..000000000 --- a/research/ptn/nets/ptn_vox_decoder.py +++ /dev/null @@ -1,118 +0,0 @@ -# Copyright 2017 The TensorFlow Authors All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -"""Training decoder as used in PTN (NIPS16).""" - -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import tensorflow as tf - -slim = tf.contrib.slim - - -@tf.contrib.framework.add_arg_scope -def conv3d_transpose(inputs, - num_outputs, - kernel_size, - stride=1, - padding='SAME', - activation_fn=tf.nn.relu, - weights_initializer=tf.contrib.layers.xavier_initializer(), - biases_initializer=tf.zeros_initializer(), - reuse=None, - trainable=True, - scope=None): - """Wrapper for conv3d_transpose layer. - - This function wraps the tf.conv3d_transpose with basic non-linearity. - Tt creates a variable called `weights`, representing the kernel, - that is convoled with the input. A second varibale called `biases' - is added to the result of operation. - """ - with tf.variable_scope( - scope, 'Conv3d_transpose', [inputs], reuse=reuse): - dtype = inputs.dtype.base_dtype - kernel_d, kernel_h, kernel_w = kernel_size[0:3] - num_filters_in = inputs.get_shape()[4] - - weights_shape = [kernel_d, kernel_h, kernel_w, num_outputs, num_filters_in] - weights = tf.get_variable('weights', - shape=weights_shape, - dtype=dtype, - initializer=weights_initializer, - trainable=trainable) - tf.contrib.framework.add_model_variable(weights) - - input_shape = inputs.get_shape().as_list() - batch_size = input_shape[0] - depth = input_shape[1] - height = input_shape[2] - width = input_shape[3] - - def get_deconv_dim(dim_size, stride_size): - # Only support padding='SAME'. - if isinstance(dim_size, tf.Tensor): - dim_size = tf.multiply(dim_size, stride_size) - elif dim_size is not None: - dim_size *= stride_size - return dim_size - - out_depth = get_deconv_dim(depth, stride) - out_height = get_deconv_dim(height, stride) - out_width = get_deconv_dim(width, stride) - - out_shape = [batch_size, out_depth, out_height, out_width, num_outputs] - outputs = tf.nn.conv3d_transpose(inputs, weights, out_shape, - [1, stride, stride, stride, 1], - padding=padding) - - outputs.set_shape(out_shape) - - if biases_initializer is not None: - biases = tf.get_variable('biases', - shape=[num_outputs,], - dtype=dtype, - initializer=biases_initializer, - trainable=trainable) - tf.contrib.framework.add_model_variable(biases) - outputs = tf.nn.bias_add(outputs, biases) - - if activation_fn: - outputs = activation_fn(outputs) - return outputs - - -def model(identities, params, is_training): - """Model transforming embedding to voxels.""" - del is_training # Unused - f_dim = params.f_dim - - # Please refer to the original implementation: github.com/xcyan/nips16_PTN - # In TF replication, we use a slightly different architecture. - with slim.arg_scope( - [slim.fully_connected, conv3d_transpose], - weights_initializer=tf.truncated_normal_initializer(stddev=0.02, seed=1)): - h0 = slim.fully_connected( - identities, 4 * 4 * 4 * f_dim * 8, activation_fn=tf.nn.relu) - h1 = tf.reshape(h0, [-1, 4, 4, 4, f_dim * 8]) - h1 = conv3d_transpose( - h1, f_dim * 4, [4, 4, 4], stride=2, activation_fn=tf.nn.relu) - h2 = conv3d_transpose( - h1, int(f_dim * 3 / 2), [5, 5, 5], stride=2, activation_fn=tf.nn.relu) - h3 = conv3d_transpose( - h2, 1, [6, 6, 6], stride=2, activation_fn=tf.nn.sigmoid) - return h3 diff --git a/research/ptn/pretrain_rotator.py b/research/ptn/pretrain_rotator.py deleted file mode 100644 index 6307f2d4f..000000000 --- a/research/ptn/pretrain_rotator.py +++ /dev/null @@ -1,236 +0,0 @@ -# Copyright 2017 The TensorFlow Authors All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -"""Contains training plan for the Rotator model (Pretraining in NIPS16).""" -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import os - -import numpy as np -from six.moves import xrange -import tensorflow as tf - -from tensorflow import app - -import model_rotator as model - -flags = tf.app.flags -slim = tf.contrib.slim - -flags.DEFINE_string('inp_dir', '', - 'Directory path containing the input data (tfrecords).') -flags.DEFINE_string( - 'dataset_name', 'shapenet_chair', - 'Dataset name that is to be used for training and evaluation.') -flags.DEFINE_integer('z_dim', 512, '') -flags.DEFINE_integer('a_dim', 3, '') -flags.DEFINE_integer('f_dim', 64, '') -flags.DEFINE_integer('fc_dim', 1024, '') -flags.DEFINE_integer('num_views', 24, 'Num of viewpoints in the input data.') -flags.DEFINE_integer('image_size', 64, - 'Input images dimension (pixels) - width & height.') -flags.DEFINE_integer('step_size', 1, 'Steps to take for rotation in pretraining.') -flags.DEFINE_integer('batch_size', 32, 'Batch size for training.') -flags.DEFINE_string('encoder_name', 'ptn_encoder', - 'Name of the encoder network being used.') -flags.DEFINE_string('decoder_name', 'ptn_im_decoder', - 'Name of the decoder network being used.') -flags.DEFINE_string('rotator_name', 'ptn_rotator', - 'Name of the rotator network being used.') -# Save options -flags.DEFINE_string('checkpoint_dir', '/tmp/ptn_train/', - 'Directory path for saving trained models and other data.') -flags.DEFINE_string('model_name', 'deeprotator_pretrain', - 'Name of the model used in naming the TF job. Must be different for each run.') -flags.DEFINE_string('init_model', None, - 'Checkpoint path of the model to initialize with.') -flags.DEFINE_integer('save_every', 1000, - 'Average period of steps after which we save a model.') -# Optimization -flags.DEFINE_float('image_weight', 10, 'Weighting factor for image loss.') -flags.DEFINE_float('mask_weight', 1, 'Weighting factor for mask loss.') -flags.DEFINE_float('learning_rate', 0.0001, 'Learning rate.') -flags.DEFINE_float('weight_decay', 0.001, 'Weight decay parameter while training.') -flags.DEFINE_float('clip_gradient_norm', 0, 'Gradient clim norm, leave 0 if no gradient clipping.') -flags.DEFINE_integer('max_number_of_steps', 320000, 'Maximum number of steps for training.') -# Summary -flags.DEFINE_integer('save_summaries_secs', 15, 'Seconds interval for dumping TF summaries.') -flags.DEFINE_integer('save_interval_secs', 60 * 5, 'Seconds interval to save models.') -# Distribution -flags.DEFINE_string('master', '', 'The address of the tensorflow master if running distributed.') -flags.DEFINE_bool('sync_replicas', False, 'Whether to sync gradients between replicas for optimizer.') -flags.DEFINE_integer('worker_replicas', 1, 'Number of worker replicas (train tasks).') -flags.DEFINE_integer('backup_workers', 0, 'Number of backup workers.') -flags.DEFINE_integer('ps_tasks', 0, 'Number of ps tasks.') -flags.DEFINE_integer('task', 0, - 'Task identifier flag to be set for each task running in distributed manner. Task number 0 ' - 'will be chosen as the chief.') - -FLAGS = flags.FLAGS - - -def main(_): - train_dir = os.path.join(FLAGS.checkpoint_dir, FLAGS.model_name, 'train') - save_image_dir = os.path.join(train_dir, 'images') - if not os.path.exists(train_dir): - os.makedirs(train_dir) - if not os.path.exists(save_image_dir): - os.makedirs(save_image_dir) - - g = tf.Graph() - with g.as_default(): - with tf.device(tf.train.replica_device_setter(FLAGS.ps_tasks)): - global_step = slim.get_or_create_global_step() - ########## - ## data ## - ########## - train_data = model.get_inputs( - FLAGS.inp_dir, - FLAGS.dataset_name, - 'train', - FLAGS.batch_size, - FLAGS.image_size, - is_training=True) - inputs = model.preprocess(train_data, FLAGS.step_size) - ########### - ## model ## - ########### - model_fn = model.get_model_fn(FLAGS, is_training=True) - outputs = model_fn(inputs) - ########## - ## loss ## - ########## - task_loss = model.get_loss(inputs, outputs, FLAGS) - regularization_loss = model.get_regularization_loss( - ['encoder', 'rotator', 'decoder'], FLAGS) - loss = task_loss + regularization_loss - ############### - ## optimizer ## - ############### - optimizer = tf.train.AdamOptimizer(FLAGS.learning_rate) - if FLAGS.sync_replicas: - optimizer = tf.train.SyncReplicasOptimizer( - optimizer, - replicas_to_aggregate=FLAGS.workers_replicas - FLAGS.backup_workers, - total_num_replicas=FLAGS.worker_replicas) - - ############## - ## train_op ## - ############## - train_op = model.get_train_op_for_scope( - loss, optimizer, ['encoder', 'rotator', 'decoder'], FLAGS) - ########### - ## saver ## - ########### - saver = tf.train.Saver(max_to_keep=np.minimum(5, - FLAGS.worker_replicas + 1)) - - if FLAGS.task == 0: - val_data = model.get_inputs( - FLAGS.inp_dir, - FLAGS.dataset_name, - 'val', - FLAGS.batch_size, - FLAGS.image_size, - is_training=False) - val_inputs = model.preprocess(val_data, FLAGS.step_size) - # Note: don't compute loss here - reused_model_fn = model.get_model_fn( - FLAGS, is_training=False, reuse=True) - val_outputs = reused_model_fn(val_inputs) - with tf.device(tf.DeviceSpec(device_type='CPU')): - if FLAGS.step_size == 1: - vis_input_images = val_inputs['images_0'] * 255.0 - vis_output_images = val_inputs['images_1'] * 255.0 - vis_pred_images = val_outputs['images_1'] * 255.0 - vis_pred_masks = (val_outputs['masks_1'] * (-1) + 1) * 255.0 - else: - rep_times = int(np.ceil(32.0 / float(FLAGS.step_size))) - vis_list_1 = [] - vis_list_2 = [] - vis_list_3 = [] - vis_list_4 = [] - for j in xrange(rep_times): - for k in xrange(FLAGS.step_size): - vis_input_image = val_inputs['images_0'][j], - vis_output_image = val_inputs['images_%d' % (k + 1)][j] - vis_pred_image = val_outputs['images_%d' % (k + 1)][j] - vis_pred_mask = val_outputs['masks_%d' % (k + 1)][j] - vis_list_1.append(tf.expand_dims(vis_input_image, 0)) - vis_list_2.append(tf.expand_dims(vis_output_image, 0)) - vis_list_3.append(tf.expand_dims(vis_pred_image, 0)) - vis_list_4.append(tf.expand_dims(vis_pred_mask, 0)) - - vis_list_1 = tf.reshape( - tf.stack(vis_list_1), [ - rep_times * FLAGS.step_size, FLAGS.image_size, - FLAGS.image_size, 3 - ]) - vis_list_2 = tf.reshape( - tf.stack(vis_list_2), [ - rep_times * FLAGS.step_size, FLAGS.image_size, - FLAGS.image_size, 3 - ]) - vis_list_3 = tf.reshape( - tf.stack(vis_list_3), [ - rep_times * FLAGS.step_size, FLAGS.image_size, - FLAGS.image_size, 3 - ]) - vis_list_4 = tf.reshape( - tf.stack(vis_list_4), [ - rep_times * FLAGS.step_size, FLAGS.image_size, - FLAGS.image_size, 1 - ]) - - vis_input_images = vis_list_1 * 255.0 - vis_output_images = vis_list_2 * 255.0 - vis_pred_images = vis_list_3 * 255.0 - vis_pred_masks = (vis_list_4 * (-1) + 1) * 255.0 - - write_disk_op = model.write_disk_grid( - global_step=global_step, - summary_freq=FLAGS.save_every, - log_dir=save_image_dir, - input_images=vis_input_images, - output_images=vis_output_images, - pred_images=vis_pred_images, - pred_masks=vis_pred_masks) - with tf.control_dependencies([write_disk_op]): - train_op = tf.identity(train_op) - - ############# - ## init_fn ## - ############# - init_fn = model.get_init_fn(['encoder, ' 'rotator', 'decoder'], FLAGS) - - ############## - ## training ## - ############## - slim.learning.train( - train_op=train_op, - logdir=train_dir, - init_fn=init_fn, - master=FLAGS.master, - is_chief=(FLAGS.task == 0), - number_of_steps=FLAGS.max_number_of_steps, - saver=saver, - save_summaries_secs=FLAGS.save_summaries_secs, - save_interval_secs=FLAGS.save_interval_secs) - - -if __name__ == '__main__': - app.run() diff --git a/research/ptn/train_ptn.py b/research/ptn/train_ptn.py deleted file mode 100644 index 1b42245d4..000000000 --- a/research/ptn/train_ptn.py +++ /dev/null @@ -1,230 +0,0 @@ -# Copyright 2017 The TensorFlow Authors All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -"""Contains training plan for the Im2vox model.""" -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import os - -import numpy as np -import tensorflow as tf - -from tensorflow import app - -import model_ptn - -flags = tf.app.flags -slim = tf.contrib.slim - -flags.DEFINE_string('inp_dir', - '', - 'Directory path containing the input data (tfrecords).') -flags.DEFINE_string( - 'dataset_name', 'shapenet_chair', - 'Dataset name that is to be used for training and evaluation.') -flags.DEFINE_integer('z_dim', 512, '') -flags.DEFINE_integer('f_dim', 64, '') -flags.DEFINE_integer('fc_dim', 1024, '') -flags.DEFINE_integer('num_views', 24, 'Num of viewpoints in the input data.') -flags.DEFINE_integer('image_size', 64, - 'Input images dimension (pixels) - width & height.') -flags.DEFINE_integer('vox_size', 32, 'Voxel prediction dimension.') -flags.DEFINE_integer('step_size', 24, 'Steps to take in rotation to fetch viewpoints.') -flags.DEFINE_integer('batch_size', 6, 'Batch size while training.') -flags.DEFINE_float('focal_length', 0.866, 'Focal length parameter used in perspective projection.') -flags.DEFINE_float('focal_range', 1.732, 'Focal length parameter used in perspective projection.') -flags.DEFINE_string('encoder_name', 'ptn_encoder', - 'Name of the encoder network being used.') -flags.DEFINE_string('decoder_name', 'ptn_vox_decoder', - 'Name of the decoder network being used.') -flags.DEFINE_string('projector_name', 'perspective_projector', - 'Name of the projector network being used.') -# Save options -flags.DEFINE_string('checkpoint_dir', '/tmp/ptn_train/', - 'Directory path for saving trained models and other data.') -flags.DEFINE_string('model_name', 'ptn_finetune', - 'Name of the model used in naming the TF job. Must be different for each run.') -flags.DEFINE_string('init_model', None, - 'Checkpoint path of the model to initialize with.') -flags.DEFINE_integer('save_every', 1000, - 'Average period of steps after which we save a model.') -# Optimization -flags.DEFINE_float('proj_weight', 10, 'Weighting factor for projection loss.') -flags.DEFINE_float('volume_weight', 0, 'Weighting factor for volume loss.') -flags.DEFINE_float('viewpoint_weight', 1, 'Weighting factor for viewpoint loss.') -flags.DEFINE_float('learning_rate', 0.0001, 'Learning rate.') -flags.DEFINE_float('weight_decay', 0.001, 'Weight decay parameter while training.') -flags.DEFINE_float('clip_gradient_norm', 0, 'Gradient clim norm, leave 0 if no gradient clipping.') -flags.DEFINE_integer('max_number_of_steps', 10000, 'Maximum number of steps for training.') -# Summary -flags.DEFINE_integer('save_summaries_secs', 15, 'Seconds interval for dumping TF summaries.') -flags.DEFINE_integer('save_interval_secs', 60 * 5, 'Seconds interval to save models.') - -# Scheduling -flags.DEFINE_string('master', '', 'The address of the tensorflow master') -flags.DEFINE_bool('sync_replicas', False, 'Whether to sync gradients between replicas for optimizer.') -flags.DEFINE_integer('worker_replicas', 1, 'Number of worker replicas (train tasks).') -flags.DEFINE_integer('backup_workers', 0, 'Number of backup workers.') -flags.DEFINE_integer('ps_tasks', 0, 'Number of ps tasks.') -flags.DEFINE_integer('task', 0, - 'Task identifier flag to be set for each task running in distributed manner. Task number 0 ' - 'will be chosen as the chief.') - -FLAGS = flags.FLAGS - - -def main(_): - train_dir = os.path.join(FLAGS.checkpoint_dir, FLAGS.model_name, 'train') - save_image_dir = os.path.join(train_dir, 'images') - if not os.path.exists(train_dir): - os.makedirs(train_dir) - if not os.path.exists(save_image_dir): - os.makedirs(save_image_dir) - - g = tf.Graph() - with g.as_default(): - with tf.device(tf.train.replica_device_setter(FLAGS.ps_tasks)): - global_step = slim.get_or_create_global_step() - ########### - ## model ## - ########### - model = model_ptn.model_PTN(FLAGS) - ########## - ## data ## - ########## - train_data = model.get_inputs( - FLAGS.inp_dir, - FLAGS.dataset_name, - 'train', - FLAGS.batch_size, - FLAGS.image_size, - FLAGS.vox_size, - is_training=True) - inputs = model.preprocess(train_data, FLAGS.step_size) - ############## - ## model_fn ## - ############## - model_fn = model.get_model_fn( - is_training=True, reuse=False, run_projection=True) - outputs = model_fn(inputs) - ################## - ## train_scopes ## - ################## - if FLAGS.init_model: - train_scopes = ['decoder'] - init_scopes = ['encoder'] - else: - train_scopes = ['encoder', 'decoder'] - - ########## - ## loss ## - ########## - task_loss = model.get_loss(inputs, outputs) - - regularization_loss = model.get_regularization_loss(train_scopes) - loss = task_loss + regularization_loss - ############### - ## optimizer ## - ############### - optimizer = tf.train.AdamOptimizer(FLAGS.learning_rate) - if FLAGS.sync_replicas: - optimizer = tf.train.SyncReplicasOptimizer( - optimizer, - replicas_to_aggregate=FLAGS.workers_replicas - FLAGS.backup_workers, - total_num_replicas=FLAGS.worker_replicas) - - ############## - ## train_op ## - ############## - train_op = model.get_train_op_for_scope(loss, optimizer, train_scopes) - ########### - ## saver ## - ########### - saver = tf.train.Saver(max_to_keep=np.minimum(5, - FLAGS.worker_replicas + 1)) - - if FLAGS.task == 0: - params = FLAGS - params.batch_size = params.num_views - params.step_size = 1 - model.set_params(params) - val_data = model.get_inputs( - params.inp_dir, - params.dataset_name, - 'val', - params.batch_size, - params.image_size, - params.vox_size, - is_training=False) - val_inputs = model.preprocess(val_data, params.step_size) - # Note: don't compute loss here - reused_model_fn = model.get_model_fn(is_training=False, reuse=True) - val_outputs = reused_model_fn(val_inputs) - - with tf.device(tf.DeviceSpec(device_type='CPU')): - vis_input_images = val_inputs['images_1'] * 255.0 - vis_gt_projs = (val_outputs['masks_1'] * (-1) + 1) * 255.0 - vis_pred_projs = (val_outputs['projs_1'] * (-1) + 1) * 255.0 - - vis_gt_projs = tf.concat([vis_gt_projs] * 3, axis=3) - vis_pred_projs = tf.concat([vis_pred_projs] * 3, axis=3) - # rescale - new_size = [FLAGS.image_size] * 2 - vis_gt_projs = tf.image.resize_nearest_neighbor( - vis_gt_projs, new_size) - vis_pred_projs = tf.image.resize_nearest_neighbor( - vis_pred_projs, new_size) - # flip - # vis_gt_projs = utils.image_flipud(vis_gt_projs) - # vis_pred_projs = utils.image_flipud(vis_pred_projs) - # vis_gt_projs is of shape [batch, height, width, channels] - write_disk_op = model.write_disk_grid( - global_step=global_step, - log_dir=save_image_dir, - input_images=vis_input_images, - gt_projs=vis_gt_projs, - pred_projs=vis_pred_projs, - input_voxels=val_inputs['voxels'], - output_voxels=val_outputs['voxels_1']) - with tf.control_dependencies([write_disk_op]): - train_op = tf.identity(train_op) - - ############# - ## init_fn ## - ############# - if FLAGS.init_model: - init_fn = model.get_init_fn(init_scopes) - else: - init_fn = None - - ############## - ## training ## - ############## - slim.learning.train( - train_op=train_op, - logdir=train_dir, - init_fn=init_fn, - master=FLAGS.master, - is_chief=(FLAGS.task == 0), - number_of_steps=FLAGS.max_number_of_steps, - saver=saver, - save_summaries_secs=FLAGS.save_summaries_secs, - save_interval_secs=FLAGS.save_interval_secs) - - -if __name__ == '__main__': - app.run() diff --git a/research/ptn/utils.py b/research/ptn/utils.py deleted file mode 100644 index adf71731e..000000000 --- a/research/ptn/utils.py +++ /dev/null @@ -1,119 +0,0 @@ -# Copyright 2017 The TensorFlow Authors All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -"""Utility functions.""" - -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import StringIO -import matplotlib -matplotlib.use('Agg') -from matplotlib import pylab as p -# axes3d is being used implictly for visualization. -from mpl_toolkits.mplot3d import axes3d as p3 # pylint:disable=unused-import -import numpy as np -from PIL import Image -from skimage import measure -from six.moves import xrange - -import tensorflow as tf - - -def save_image(inp_array, image_file): - """Function that dumps the image to disk.""" - inp_array = np.clip(inp_array, 0, 255).astype(np.uint8) - image = Image.fromarray(inp_array) - buf = StringIO.StringIO() - image.save(buf, format='JPEG') - with open(image_file, 'w') as f: - f.write(buf.getvalue()) - return None - - -def image_flipud(images): - """Function that flip (up-down) the np image.""" - quantity = images.get_shape().as_list()[0] - image_list = [] - for k in xrange(quantity): - image_list.append(tf.image.flip_up_down(images[k, :, :, :])) - outputs = tf.stack(image_list) - return outputs - - -def resize_image(inp_array, new_height, new_width): - """Function that resize the np image.""" - inp_array = np.clip(inp_array, 0, 255).astype(np.uint8) - image = Image.fromarray(inp_array) - # Reverse order - image = image.resize((new_width, new_height)) - return np.array(image) - - -def display_voxel(points, vis_size=128): - """Function to display 3D voxel.""" - try: - data = visualize_voxel_spectral(points, vis_size) - except ValueError: - data = visualize_voxel_scatter(points, vis_size) - return data - - -def visualize_voxel_spectral(points, vis_size=128): - """Function to visualize voxel (spectral).""" - points = np.rint(points) - points = np.swapaxes(points, 0, 2) - fig = p.figure(figsize=(1, 1), dpi=vis_size) - verts, faces = measure.marching_cubes_classic(points, 0, spacing=(0.1, 0.1, 0.1)) - ax = fig.add_subplot(111, projection='3d') - ax.plot_trisurf( - verts[:, 0], verts[:, 1], faces, verts[:, 2], cmap='Spectral_r', lw=0.1) - ax.set_axis_off() - fig.tight_layout(pad=0) - fig.canvas.draw() - data = np.fromstring( - fig.canvas.tostring_rgb(), dtype=np.uint8, sep='').reshape( - vis_size, vis_size, 3) - p.close('all') - return data - - -def visualize_voxel_scatter(points, vis_size=128): - """Function to visualize voxel (scatter).""" - points = np.rint(points) - points = np.swapaxes(points, 0, 2) - fig = p.figure(figsize=(1, 1), dpi=vis_size) - ax = fig.add_subplot(111, projection='3d') - x = [] - y = [] - z = [] - (x_dimension, y_dimension, z_dimension) = points.shape - for i in range(x_dimension): - for j in range(y_dimension): - for k in range(z_dimension): - if points[i, j, k]: - x.append(i) - y.append(j) - z.append(k) - ax.scatter3D(x, y, z) - ax.set_axis_off() - fig.tight_layout(pad=0) - fig.canvas.draw() - data = np.fromstring( - fig.canvas.tostring_rgb(), dtype=np.uint8, sep='').reshape( - vis_size, vis_size, 3) - p.close('all') - return data diff --git a/research/qa_kg/README.md b/research/qa_kg/README.md deleted file mode 100644 index 7224ac8f8..000000000 --- a/research/qa_kg/README.md +++ /dev/null @@ -1,83 +0,0 @@ -![No Maintenance Intended](https://img.shields.io/badge/No%20Maintenance%20Intended-%E2%9C%95-red.svg) -![TensorFlow Requirement: 1.x](https://img.shields.io/badge/TensorFlow%20Requirement-1.x-brightgreen) -![TensorFlow 2 Not Supported](https://img.shields.io/badge/TensorFlow%202%20Not%20Supported-%E2%9C%95-red.svg) - -# Module networks for question answering on knowledge graph - -This code repository contains a TensorFlow model for question answering on -knowledge graph with end-to-end module networks. The original paper describing -end-to-end module networks is as follows. - -R. Hu, J. Andreas, M. Rohrbach, T. Darrell, K. Saenko, *Learning to Reason: -End-to-End Module Networks for Visual Question Answering*. in arXiv preprint -arXiv:1704.05526, 2017. ([PDF](https://arxiv.org/pdf/1704.05526.pdf)) - -``` -@article{hu2017learning, - title={Learning to Reason: End-to-End Module Networks for Visual Question Answering}, - author={Hu, Ronghang and Andreas, Jacob and Rohrbach, Marcus and Darrell, Trevor and Saenko, Kate}, - journal={arXiv preprint arXiv:1704.05526}, - year={2017} -} -``` - -The code in this repository is based on the original -[implementation](https://github.com/ronghanghu/n2nmn) for this paper. - -## Requirements - -1. Install TensorFlow 1.0.0. Follow the [official - guide](https://www.tensorflow.org/install/). Please note that newer or older - versions of TensorFlow may fail to work due to incompatibility with - TensorFlow Fold. -2. Install TensorFlow Fold. Follow the - [setup instructions](https://github.com/tensorflow/fold/blob/master/tensorflow_fold/g3doc/setup.md). - TensorFlow Fold only supports Linux platform. We have not tested - the code on other platforms. - -## Data - -1. Download the [MetaQA dataset](https://goo.gl/f3AmcY). Click the button - `MetaQA` and then click `Download` in the drop-down list. Extract the zip - file after downloading completed. Read the documents there for dataset - details. -2. Move the `MetaQA` folder to the root directory of this repository. - -## How to use this code - -We provide an experiment folder `exp_1_hop`, which applies the implemented model -to the 1-hop vanilla dataset in MetaQA. More experiment folders are coming soon. - -Currently, we provide code for training with ground truth layout, and testing -the saved model. Configurations can be modified in `config.py`. They can also be -set via command line parameters. - -To train the model: - -``` -python exp_1_hop/train_gt_layout.py -``` - -To test the saved model (need to provide the snapshot name): - -``` -python exp_1_hop/test.py --snapshot_name 00010000 -``` - -## Model introduction - -1. In this model, we store the knowledge graph in a key-value based memory. For - each knowledge graph edge (subject, relation, object), we use the (subject, - relation) as the key and the object as the value. -2. All entities and relations are embedded as fixed-dimension vectors. These - embeddings are also end-to-end learned. -3. Neural modules can separately operate on either the key side or the value - side. -4. The attention is shared between keys and corresponding values. -5. The answer output is based on the attention-weighted sum over keys or - values, depending on the output module. - -## Contact -Authors: Yuyu Zhang, Xin Pan - -Pull requests and issues: @yuyuz diff --git a/research/qa_kg/exp_1_hop/config.py b/research/qa_kg/exp_1_hop/config.py deleted file mode 100644 index 95d8cf5f5..000000000 --- a/research/qa_kg/exp_1_hop/config.py +++ /dev/null @@ -1,80 +0,0 @@ -# Copyright 2017 The TensorFlow Authors All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -import argparse -import os - - -def str2bool(v): - return v.lower() in ('true', '1') - - -def add_argument_group(name): - arg = parser.add_argument_group(name) - arg_lists.append(arg) - return arg - - -def get_config(): - config, unparsed = parser.parse_known_args() - return config, unparsed - - -arg_lists = [] -parser = argparse.ArgumentParser() -work_dir = os.path.abspath(os.path.join(__file__, '../../')) - -net_arg = add_argument_group('Network') -net_arg.add_argument('--lstm_dim', type=int, default=128) -net_arg.add_argument('--num_layers', type=int, default=1) -net_arg.add_argument('--embed_dim_txt', type=int, default=128) -net_arg.add_argument('--embed_dim_nmn', type=int, default=128) -net_arg.add_argument( - '--T_encoder', type=int, default=0) # will be updated when reading data -net_arg.add_argument('--T_decoder', type=int, default=5) - -train_arg = add_argument_group('Training') -train_arg.add_argument('--train_tag', type=str, default='n2nmn') -train_arg.add_argument('--batch_size', type=int, default=128) -train_arg.add_argument('--max_iter', type=int, default=1000000) -train_arg.add_argument('--weight_decay', type=float, default=1e-5) -train_arg.add_argument('--baseline_decay', type=float, default=0.99) -train_arg.add_argument('--max_grad_norm', type=float, default=10) -train_arg.add_argument('--random_seed', type=int, default=123) - -data_arg = add_argument_group('Data') -data_path = work_dir + '/MetaQA/' -data_arg.add_argument('--KB_file', type=str, default=data_path + 'kb.txt') -data_arg.add_argument( - '--data_dir', type=str, default=data_path + '1-hop/vanilla/') -data_arg.add_argument('--train_data_file', type=str, default='qa_train.txt') -data_arg.add_argument('--dev_data_file', type=str, default='qa_dev.txt') -data_arg.add_argument('--test_data_file', type=str, default='qa_test.txt') - -exp_arg = add_argument_group('Experiment') -exp_path = work_dir + '/exp_1_hop/' -exp_arg.add_argument('--exp_dir', type=str, default=exp_path) - -log_arg = add_argument_group('Log') -log_arg.add_argument('--log_dir', type=str, default='logs') -log_arg.add_argument('--log_interval', type=int, default=1000) -log_arg.add_argument('--num_log_samples', type=int, default=3) -log_arg.add_argument( - '--log_level', type=str, default='INFO', choices=['INFO', 'DEBUG', 'WARN']) - -io_arg = add_argument_group('IO') -io_arg.add_argument('--model_dir', type=str, default='model') -io_arg.add_argument('--snapshot_interval', type=int, default=1000) -io_arg.add_argument('--output_dir', type=str, default='output') diff --git a/research/qa_kg/exp_1_hop/test.py b/research/qa_kg/exp_1_hop/test.py deleted file mode 100644 index 2937c0d58..000000000 --- a/research/qa_kg/exp_1_hop/test.py +++ /dev/null @@ -1,135 +0,0 @@ -# Copyright 2017 The TensorFlow Authors All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -import os -import sys -sys.path.append(os.path.abspath(os.path.join(__file__, '../../'))) -import numpy as np -import tensorflow as tf -from config import get_config -from model_n2nmn.assembler import Assembler -from model_n2nmn.model import Model -from util.data_reader import DataReader -from util.data_reader import SampleBuilder -from util.misc import prepare_dirs_and_logger - -FLAGS = tf.flags.FLAGS -tf.flags.DEFINE_string('snapshot_name', '00001000', 'snapshot file name') - - -def main(_): - config = prepare_dirs_and_logger(config_raw) - - rng = np.random.RandomState(config.random_seed) - tf.set_random_seed(config.random_seed) - config.rng = rng - - config.module_names = ['_key_find', '_key_filter', '_val_desc', ''] - config.gt_layout_tokens = ['_key_find', '_key_filter', '_val_desc', ''] - assembler = Assembler(config) - - sample_builder = SampleBuilder(config) - config = sample_builder.config # update T_encoder according to data - data_test = sample_builder.data_all['test'] - data_reader_test = DataReader( - config, data_test, assembler, shuffle=False, one_pass=True) - - num_vocab_txt = len(sample_builder.dict_all) - num_vocab_nmn = len(assembler.module_names) - num_choices = len(sample_builder.dict_all) - - # Network inputs - text_seq_batch = tf.placeholder(tf.int32, [None, None]) - seq_len_batch = tf.placeholder(tf.int32, [None]) - - # The model - model = Model( - config, - sample_builder.kb, - text_seq_batch, - seq_len_batch, - num_vocab_txt=num_vocab_txt, - num_vocab_nmn=num_vocab_nmn, - EOS_idx=assembler.EOS_idx, - num_choices=num_choices, - decoder_sampling=False) - compiler = model.compiler - scores = model.scores - - sess = tf.Session() - sess.run(tf.global_variables_initializer()) - snapshot_file = os.path.join(config.model_dir, FLAGS.snapshot_name) - tf.logging.info('Snapshot file: %s' % snapshot_file) - - snapshot_saver = tf.train.Saver() - snapshot_saver.restore(sess, snapshot_file) - - # Evaluation metrics - num_questions = len(data_test.Y) - tf.logging.info('# of test questions: %d' % num_questions) - - answer_correct = 0 - layout_correct = 0 - layout_valid = 0 - for batch in data_reader_test.batches(): - # set up input and output tensors - h = sess.partial_run_setup( - fetches=[model.predicted_tokens, scores], - feeds=[text_seq_batch, seq_len_batch, compiler.loom_input_tensor]) - - # Part 1: Generate module layout - tokens = sess.partial_run( - h, - fetches=model.predicted_tokens, - feed_dict={ - text_seq_batch: batch['input_seq_batch'], - seq_len_batch: batch['seq_len_batch'] - }) - - # Compute accuracy of the predicted layout - gt_tokens = batch['gt_layout_batch'] - layout_correct += np.sum( - np.all( - np.logical_or(tokens == gt_tokens, gt_tokens == assembler.EOS_idx), - axis=0)) - - # Assemble the layout tokens into network structure - expr_list, expr_validity_array = assembler.assemble(tokens) - layout_valid += np.sum(expr_validity_array) - labels = batch['ans_label_batch'] - # Build TensorFlow Fold input for NMN - expr_feed = compiler.build_feed_dict(expr_list) - - # Part 2: Run NMN and learning steps - scores_val = sess.partial_run(h, scores, feed_dict=expr_feed) - - # Compute accuracy - predictions = np.argmax(scores_val, axis=1) - answer_correct += np.sum( - np.logical_and(expr_validity_array, predictions == labels)) - - answer_accuracy = answer_correct * 1.0 / num_questions - layout_accuracy = layout_correct * 1.0 / num_questions - layout_validity = layout_valid * 1.0 / num_questions - - tf.logging.info('test answer accuracy = %f, ' - 'test layout accuracy = %f, ' - 'test layout validity = %f' % - (answer_accuracy, layout_accuracy, layout_validity)) - - -if __name__ == '__main__': - config_raw, unparsed = get_config() - tf.app.run(main=main, argv=[sys.argv[0]] + unparsed) diff --git a/research/qa_kg/exp_1_hop/train_gt_layout.py b/research/qa_kg/exp_1_hop/train_gt_layout.py deleted file mode 100644 index 02bafc428..000000000 --- a/research/qa_kg/exp_1_hop/train_gt_layout.py +++ /dev/null @@ -1,194 +0,0 @@ -# Copyright 2017 The TensorFlow Authors All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -import os -import sys -sys.path.append(os.path.abspath(os.path.join(__file__, '../../'))) -import numpy as np -import tensorflow as tf -from config import get_config -from model_n2nmn.assembler import Assembler -from model_n2nmn.model import Model -from util.data_reader import DataReader -from util.data_reader import SampleBuilder -from util.misc import prepare_dirs_and_logger -from util.misc import save_config -from util.misc import show_all_variables - - -def main(_): - config = prepare_dirs_and_logger(config_raw) - save_config(config) - - rng = np.random.RandomState(config.random_seed) - tf.set_random_seed(config.random_seed) - config.rng = rng - - config.module_names = ['_key_find', '_key_filter', '_val_desc', ''] - config.gt_layout_tokens = ['_key_find', '_key_filter', '_val_desc', ''] - assembler = Assembler(config) - - sample_builder = SampleBuilder(config) - config = sample_builder.config # update T_encoder according to data - data_train = sample_builder.data_all['train'] - data_reader_train = DataReader( - config, data_train, assembler, shuffle=True, one_pass=False) - - num_vocab_txt = len(sample_builder.dict_all) - num_vocab_nmn = len(assembler.module_names) - num_choices = len(sample_builder.dict_all) - - # Network inputs - text_seq_batch = tf.placeholder(tf.int32, [None, None]) - seq_len_batch = tf.placeholder(tf.int32, [None]) - ans_label_batch = tf.placeholder(tf.int32, [None]) - use_gt_layout = tf.constant(True, dtype=tf.bool) - gt_layout_batch = tf.placeholder(tf.int32, [None, None]) - - # The model for training - model = Model( - config, - sample_builder.kb, - text_seq_batch, - seq_len_batch, - num_vocab_txt=num_vocab_txt, - num_vocab_nmn=num_vocab_nmn, - EOS_idx=assembler.EOS_idx, - num_choices=num_choices, - decoder_sampling=True, - use_gt_layout=use_gt_layout, - gt_layout_batch=gt_layout_batch) - compiler = model.compiler - scores = model.scores - log_seq_prob = model.log_seq_prob - - # Loss function - softmax_loss_per_sample = tf.nn.sparse_softmax_cross_entropy_with_logits( - logits=scores, labels=ans_label_batch) - # The final per-sample loss, which is loss for valid expr - # and invalid_expr_loss for invalid expr - final_loss_per_sample = softmax_loss_per_sample # All exprs are valid - - avg_sample_loss = tf.reduce_mean(final_loss_per_sample) - seq_likelihood_loss = tf.reduce_mean(-log_seq_prob) - - total_training_loss = seq_likelihood_loss + avg_sample_loss - total_loss = total_training_loss + config.weight_decay * model.l2_reg - - # Train with Adam optimizer - solver = tf.train.AdamOptimizer() - gradients = solver.compute_gradients(total_loss) - - # Clip gradient by L2 norm - gradients = [(tf.clip_by_norm(g, config.max_grad_norm), v) - for g, v in gradients] - solver_op = solver.apply_gradients(gradients) - - # Training operation - with tf.control_dependencies([solver_op]): - train_step = tf.constant(0) - - # Write summary to TensorBoard - log_writer = tf.summary.FileWriter(config.log_dir, tf.get_default_graph()) - - loss_ph = tf.placeholder(tf.float32, []) - entropy_ph = tf.placeholder(tf.float32, []) - accuracy_ph = tf.placeholder(tf.float32, []) - summary_train = [ - tf.summary.scalar('avg_sample_loss', loss_ph), - tf.summary.scalar('entropy', entropy_ph), - tf.summary.scalar('avg_accuracy', accuracy_ph) - ] - log_step_train = tf.summary.merge(summary_train) - - # Training - sess = tf.Session() - sess.run(tf.global_variables_initializer()) - snapshot_saver = tf.train.Saver(max_to_keep=None) # keep all snapshots - show_all_variables() - - avg_accuracy = 0 - accuracy_decay = 0.99 - for n_iter, batch in enumerate(data_reader_train.batches()): - if n_iter >= config.max_iter: - break - - # set up input and output tensors - h = sess.partial_run_setup( - fetches=[ - model.predicted_tokens, model.entropy_reg, scores, avg_sample_loss, - train_step - ], - feeds=[ - text_seq_batch, seq_len_batch, gt_layout_batch, - compiler.loom_input_tensor, ans_label_batch - ]) - - # Part 1: Generate module layout - tokens, entropy_reg_val = sess.partial_run( - h, - fetches=(model.predicted_tokens, model.entropy_reg), - feed_dict={ - text_seq_batch: batch['input_seq_batch'], - seq_len_batch: batch['seq_len_batch'], - gt_layout_batch: batch['gt_layout_batch'] - }) - # Assemble the layout tokens into network structure - expr_list, expr_validity_array = assembler.assemble(tokens) - # all exprs should be valid (since they are ground-truth) - assert np.all(expr_validity_array) - labels = batch['ans_label_batch'] - # Build TensorFlow Fold input for NMN - expr_feed = compiler.build_feed_dict(expr_list) - expr_feed[ans_label_batch] = labels - - # Part 2: Run NMN and learning steps - scores_val, avg_sample_loss_val, _ = sess.partial_run( - h, fetches=(scores, avg_sample_loss, train_step), feed_dict=expr_feed) - - # Compute accuracy - predictions = np.argmax(scores_val, axis=1) - accuracy = np.mean( - np.logical_and(expr_validity_array, predictions == labels)) - avg_accuracy += (1 - accuracy_decay) * (accuracy - avg_accuracy) - - # Add to TensorBoard summary - if (n_iter + 1) % config.log_interval == 0: - tf.logging.info('iter = %d\n\t' - 'loss = %f, accuracy (cur) = %f, ' - 'accuracy (avg) = %f, entropy = %f' % - (n_iter + 1, avg_sample_loss_val, accuracy, avg_accuracy, - -entropy_reg_val)) - summary = sess.run( - fetches=log_step_train, - feed_dict={ - loss_ph: avg_sample_loss_val, - entropy_ph: -entropy_reg_val, - accuracy_ph: avg_accuracy - }) - log_writer.add_summary(summary, n_iter + 1) - - # Save snapshot - if (n_iter + 1) % config.snapshot_interval == 0: - snapshot_file = os.path.join(config.model_dir, '%08d' % (n_iter + 1)) - snapshot_saver.save(sess, snapshot_file, write_meta_graph=False) - tf.logging.info('Snapshot saved to %s' % snapshot_file) - - tf.logging.info('Run finished.') - - -if __name__ == '__main__': - config_raw, unparsed = get_config() - tf.app.run(main=main, argv=[sys.argv[0]] + unparsed) diff --git a/research/qa_kg/model_n2nmn/__init__.py b/research/qa_kg/model_n2nmn/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/research/qa_kg/model_n2nmn/assembler.py b/research/qa_kg/model_n2nmn/assembler.py deleted file mode 100644 index f5839f6f4..000000000 --- a/research/qa_kg/model_n2nmn/assembler.py +++ /dev/null @@ -1,145 +0,0 @@ -# Copyright 2017 The TensorFlow Authors All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -import numpy as np - -# the number of attention input to each module -_module_input_num = { - '_key_find': 0, - '_key_filter': 1, - '_val_desc': 1} -_module_output_type = { - '_key_find': 'att', - '_key_filter': 'att', - '_val_desc': 'ans' -} - -INVALID_EXPR = 'INVALID_EXPR' - - -class Assembler: - - def __init__(self, config): - # read the module list, and record the index of each module and - self.module_names = config.module_names - # find the index of - for n_s in range(len(self.module_names)): - if self.module_names[n_s] == '': - self.EOS_idx = n_s - break - # build a dictionary from module name to token index - self.name2idx_dict = { - name: n_s - for n_s, name in enumerate(self.module_names) - } - - def module_list2tokens(self, module_list, max_len=None): - layout_tokens = [self.name2idx_dict[name] for name in module_list] - if max_len is not None: - if len(module_list) >= max_len: - raise ValueError('Not enough time steps to add ') - layout_tokens += [self.EOS_idx] * (max_len - len(module_list)) - return layout_tokens - - def _layout_tokens2str(self, layout_tokens): - return ' '.join([self.module_names[idx] for idx in layout_tokens]) - - def _invalid_expr(self, layout_tokens, error_str): - return { - 'module': INVALID_EXPR, - 'expr_str': self._layout_tokens2str(layout_tokens), - 'error': error_str - } - - def _assemble_layout_tokens(self, layout_tokens, batch_idx): - # Every module takes a time_idx as the index from LSTM hidden states - # (even if it doesn't need it, like _and), and different arity of - # attention inputs. The output type can be either attention or answer - # - # The final assembled expression for each instance is as follows: - # expr_type := - # {'module': '_find', 'output_type': 'att', 'time_idx': idx} - # | {'module': '_relocate', 'output_type': 'att', 'time_idx': idx, - # 'inputs_0': } - # | {'module': '_and', 'output_type': 'att', 'time_idx': idx, - # 'inputs_0': , 'inputs_1': )} - # | {'module': '_describe', 'output_type': 'ans', 'time_idx': idx, - # 'inputs_0': } - # | {'module': INVALID_EXPR, 'expr_str': '...', 'error': '...', - # 'assembly_loss': } (for invalid expressions) - # - - # A valid layout must contain . Assembly fails if it doesn't. - if not np.any(layout_tokens == self.EOS_idx): - return self._invalid_expr(layout_tokens, 'cannot find ') - - # Decoding Reverse Polish Notation with a stack - decoding_stack = [] - for t in range(len(layout_tokens)): - # decode a module/operation - module_idx = layout_tokens[t] - if module_idx == self.EOS_idx: - break - module_name = self.module_names[module_idx] - expr = { - 'module': module_name, - 'output_type': _module_output_type[module_name], - 'time_idx': t, - 'batch_idx': batch_idx - } - - input_num = _module_input_num[module_name] - # Check if there are enough input in the stack - if len(decoding_stack) < input_num: - # Invalid expression. Not enough input. - return self._invalid_expr(layout_tokens, - 'not enough input for ' + module_name) - - # Get the input from stack - for n_input in range(input_num - 1, -1, -1): - stack_top = decoding_stack.pop() - if stack_top['output_type'] != 'att': - # Invalid expression. Input must be attention - return self._invalid_expr(layout_tokens, - 'input incompatible for ' + module_name) - expr['input_%d' % n_input] = stack_top - - decoding_stack.append(expr) - - # After decoding the reverse polish expression, there should be exactly - # one expression in the stack - if len(decoding_stack) != 1: - return self._invalid_expr( - layout_tokens, - 'final stack size not equal to 1 (%d remains)' % len(decoding_stack)) - - result = decoding_stack[0] - # The result type should be answer, not attention - if result['output_type'] != 'ans': - return self._invalid_expr(layout_tokens, - 'result type must be ans, not att') - return result - - def assemble(self, layout_tokens_batch): - # layout_tokens_batch is a numpy array with shape [max_dec_len, batch_size], - # containing module tokens and , in Reverse Polish Notation. - _, batch_size = layout_tokens_batch.shape - expr_list = [ - self._assemble_layout_tokens(layout_tokens_batch[:, batch_i], batch_i) - for batch_i in range(batch_size) - ] - expr_validity = np.array( - [expr['module'] != INVALID_EXPR for expr in expr_list], np.bool) - return expr_list, expr_validity diff --git a/research/qa_kg/model_n2nmn/model.py b/research/qa_kg/model_n2nmn/model.py deleted file mode 100644 index 56896f438..000000000 --- a/research/qa_kg/model_n2nmn/model.py +++ /dev/null @@ -1,119 +0,0 @@ -# Copyright 2017 The TensorFlow Authors All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -import numpy as np -import tensorflow as tf -import tensorflow_fold as td -from model_n2nmn import netgen_att -from model_n2nmn import assembler -from model_n2nmn.modules import Modules - - -class Model: - - def __init__(self, - config, - kb, - text_seq_batch, - seq_length_batch, - num_vocab_txt, - num_vocab_nmn, - EOS_idx, - num_choices, - decoder_sampling, - use_gt_layout=None, - gt_layout_batch=None, - scope='neural_module_network', - reuse=None): - with tf.variable_scope(scope, reuse=reuse): - # Part 1: Seq2seq RNN to generate module layout tokens - - embedding_mat = tf.get_variable( - 'embedding_mat', [num_vocab_txt, config.embed_dim_txt], - initializer=tf.contrib.layers.xavier_initializer()) - - with tf.variable_scope('layout_generation'): - att_seq2seq = netgen_att.AttentionSeq2Seq( - config, text_seq_batch, seq_length_batch, num_vocab_txt, - num_vocab_nmn, EOS_idx, decoder_sampling, embedding_mat, - use_gt_layout, gt_layout_batch) - self.att_seq2seq = att_seq2seq - predicted_tokens = att_seq2seq.predicted_tokens - token_probs = att_seq2seq.token_probs - word_vecs = att_seq2seq.word_vecs - neg_entropy = att_seq2seq.neg_entropy - self.atts = att_seq2seq.atts - - self.predicted_tokens = predicted_tokens - self.token_probs = token_probs - self.word_vecs = word_vecs - self.neg_entropy = neg_entropy - - # log probability of each generated sequence - self.log_seq_prob = tf.reduce_sum(tf.log(token_probs), axis=0) - - # Part 2: Neural Module Network - with tf.variable_scope('layout_execution'): - modules = Modules(config, kb, word_vecs, num_choices, embedding_mat) - self.modules = modules - # Recursion of modules - att_shape = [len(kb)] - # Forward declaration of module recursion - att_expr_decl = td.ForwardDeclaration(td.PyObjectType(), - td.TensorType(att_shape)) - # _key_find - case_key_find = td.Record([('time_idx', td.Scalar(dtype='int32')), - ('batch_idx', td.Scalar(dtype='int32'))]) - case_key_find = case_key_find >> td.ScopedLayer( - modules.KeyFindModule, name_or_scope='KeyFindModule') - # _key_filter - case_key_filter = td.Record([('input_0', att_expr_decl()), - ('time_idx', td.Scalar('int32')), - ('batch_idx', td.Scalar('int32'))]) - case_key_filter = case_key_filter >> td.ScopedLayer( - modules.KeyFilterModule, name_or_scope='KeyFilterModule') - recursion_cases = td.OneOf( - td.GetItem('module'), - {'_key_find': case_key_find, - '_key_filter': case_key_filter}) - att_expr_decl.resolve_to(recursion_cases) - # _val_desc: output scores for choice (for valid expressions) - predicted_scores = td.Record([('input_0', recursion_cases), - ('time_idx', td.Scalar('int32')), - ('batch_idx', td.Scalar('int32'))]) - predicted_scores = predicted_scores >> td.ScopedLayer( - modules.ValDescribeModule, name_or_scope='ValDescribeModule') - - # For invalid expressions, define a dummy answer - # so that all answers have the same form - INVALID = assembler.INVALID_EXPR - dummy_scores = td.Void() >> td.FromTensor( - np.zeros(num_choices, np.float32)) - output_scores = td.OneOf( - td.GetItem('module'), - {'_val_desc': predicted_scores, - INVALID: dummy_scores}) - - # compile and get the output scores - self.compiler = td.Compiler.create(output_scores) - self.scores = self.compiler.output_tensors[0] - - # Regularization: Entropy + L2 - self.entropy_reg = tf.reduce_mean(neg_entropy) - module_weights = [ - v for v in tf.trainable_variables() - if (scope in v.op.name and v.op.name.endswith('weights')) - ] - self.l2_reg = tf.add_n([tf.nn.l2_loss(v) for v in module_weights]) diff --git a/research/qa_kg/model_n2nmn/modules.py b/research/qa_kg/model_n2nmn/modules.py deleted file mode 100644 index 8c7a7370f..000000000 --- a/research/qa_kg/model_n2nmn/modules.py +++ /dev/null @@ -1,131 +0,0 @@ -# Copyright 2017 The TensorFlow Authors All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -import tensorflow as tf - - -class Modules: - - def __init__(self, config, kb, word_vecs, num_choices, embedding_mat): - self.config = config - - self.embedding_mat = embedding_mat - - # kb has shape [N_kb, 3] - self.kb = kb - self.embed_keys_e, self.embed_keys_r, self.embed_vals_e = self.embed_kb() - - # word_vecs has shape [T_decoder, N, D_txt] - self.word_vecs = word_vecs - self.num_choices = num_choices - - def embed_kb(self): - keys_e, keys_r, vals_e = [], [], [] - for idx_sub, idx_rel, idx_obj in self.kb: - keys_e.append(idx_sub) - keys_r.append(idx_rel) - vals_e.append(idx_obj) - embed_keys_e = tf.nn.embedding_lookup(self.embedding_mat, keys_e) - embed_keys_r = tf.nn.embedding_lookup(self.embedding_mat, keys_r) - embed_vals_e = tf.nn.embedding_lookup(self.embedding_mat, vals_e) - return embed_keys_e, embed_keys_r, embed_vals_e - - def _slice_word_vecs(self, time_idx, batch_idx): - # this callable will be wrapped into a td.Function - # In TF Fold, batch_idx and time_idx are both [N_batch, 1] tensors - # time is highest dim in word_vecs - joint_index = tf.stack([time_idx, batch_idx], axis=1) - return tf.gather_nd(self.word_vecs, joint_index) - - # All the layers are wrapped with td.ScopedLayer - def KeyFindModule(self, - time_idx, - batch_idx, - scope='KeyFindModule', - reuse=None): - # In TF Fold, batch_idx and time_idx are both [N_batch, 1] tensors - text_param = self._slice_word_vecs(time_idx, batch_idx) - - # Mapping: embed_keys_e x text_param -> att - # Input: - # embed_keys_e: [N_kb, D_txt] - # text_param: [N, D_txt] - # Output: - # att: [N, N_kb] - # - # Implementation: - # 1. Elementwise multiplication between embed_key_e and text_param - # 2. L2-normalization - with tf.variable_scope(scope, reuse=reuse): - m = tf.matmul(text_param, self.embed_keys_e, transpose_b=True) - att = tf.nn.l2_normalize(m, dim=1) - return att - - def KeyFilterModule(self, - input_0, - time_idx, - batch_idx, - scope='KeyFilterModule', - reuse=None): - att_0 = input_0 - text_param = self._slice_word_vecs(time_idx, batch_idx) - - # Mapping: and(embed_keys_r x text_param, att) -> att - # Input: - # embed_keys_r: [N_kb, D_txt] - # text_param: [N, D_txt] - # att_0: [N, N_kb] - # Output: - # att: [N, N_kb] - # - # Implementation: - # 1. Elementwise multiplication between embed_key_r and text_param - # 2. L2-normalization - # 3. Take the elementwise-min - with tf.variable_scope(scope, reuse=reuse): - m = tf.matmul(text_param, self.embed_keys_r, transpose_b=True) - att_1 = tf.nn.l2_normalize(m, dim=1) - att = tf.minimum(att_0, att_1) - return att - - def ValDescribeModule(self, - input_0, - time_idx, - batch_idx, - scope='ValDescribeModule', - reuse=None): - att = input_0 - - # Mapping: att -> answer probs - # Input: - # embed_vals_e: [N_kb, D_txt] - # att: [N, N_kb] - # embedding_mat: [self.num_choices, D_txt] - # Output: - # answer_scores: [N, self.num_choices] - # - # Implementation: - # 1. Attention-weighted sum over values - # 2. Compute cosine similarity scores between the weighted sum and - # each candidate answer - with tf.variable_scope(scope, reuse=reuse): - # weighted_sum has shape [N, D_txt] - weighted_sum = tf.matmul(att, self.embed_vals_e) - # scores has shape [N, self.num_choices] - scores = tf.matmul( - weighted_sum, - tf.nn.l2_normalize(self.embedding_mat, dim=1), - transpose_b=True) - return scores diff --git a/research/qa_kg/model_n2nmn/netgen_att.py b/research/qa_kg/model_n2nmn/netgen_att.py deleted file mode 100644 index df6509946..000000000 --- a/research/qa_kg/model_n2nmn/netgen_att.py +++ /dev/null @@ -1,295 +0,0 @@ -# Copyright 2017 The TensorFlow Authors All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -import tensorflow as tf -from util.nn import fc_layer as fc - - -def _get_lstm_cell(num_layers, lstm_dim): - cell_list = [ - tf.contrib.rnn.BasicLSTMCell(lstm_dim, state_is_tuple=True) - for _ in range(num_layers) - ] - cell = tf.contrib.rnn.MultiRNNCell(cell_list, state_is_tuple=True) - return cell - - -class AttentionSeq2Seq: - - def __init__(self, - config, - text_seq_batch, - seq_length_batch, - num_vocab_txt, - num_vocab_nmn, - EOS_token, - decoder_sampling, - embedding_mat, - use_gt_layout=None, - gt_layout_batch=None, - scope='encoder_decoder', - reuse=None): - self.T_decoder = config.T_decoder - self.encoder_num_vocab = num_vocab_txt - self.encoder_embed_dim = config.embed_dim_txt - self.decoder_num_vocab = num_vocab_nmn - self.decoder_embed_dim = config.embed_dim_nmn - self.lstm_dim = config.lstm_dim - self.num_layers = config.num_layers - self.EOS_token = EOS_token - self.decoder_sampling = decoder_sampling - self.embedding_mat = embedding_mat - - with tf.variable_scope(scope, reuse=reuse): - self._build_encoder(text_seq_batch, seq_length_batch) - self._build_decoder(use_gt_layout, gt_layout_batch) - - def _build_encoder(self, - text_seq_batch, - seq_length_batch, - scope='encoder', - reuse=None): - lstm_dim = self.lstm_dim - num_layers = self.num_layers - - with tf.variable_scope(scope, reuse=reuse): - T = tf.shape(text_seq_batch)[0] - N = tf.shape(text_seq_batch)[1] - self.T_encoder = T - self.N = N - - # text_seq has shape [T, N] and embedded_seq has shape [T, N, D] - embedded_seq = tf.nn.embedding_lookup(self.embedding_mat, text_seq_batch) - self.embedded_input_seq = embedded_seq - - # The RNN - cell = _get_lstm_cell(num_layers, lstm_dim) - - # encoder_outputs has shape [T, N, lstm_dim] - encoder_outputs, encoder_states = tf.nn.dynamic_rnn( - cell, - embedded_seq, - seq_length_batch, - dtype=tf.float32, - time_major=True, - scope='lstm') - self.encoder_outputs = encoder_outputs - self.encoder_states = encoder_states - - # transform the encoder outputs for further attention alignments - # encoder_outputs_flat has shape [T, N, lstm_dim] - encoder_h_transformed = fc( - 'encoder_h_transform', - tf.reshape(encoder_outputs, [-1, lstm_dim]), - output_dim=lstm_dim) - encoder_h_transformed = tf.reshape(encoder_h_transformed, - [T, N, lstm_dim]) - self.encoder_h_transformed = encoder_h_transformed - - # seq_not_finished is a shape [T, N, 1] tensor, - # where seq_not_finished[t, n] - # is 1 iff sequence n is not finished at time t, and 0 otherwise - seq_not_finished = tf.less( - tf.range(T)[:, tf.newaxis, tf.newaxis], - seq_length_batch[:, tf.newaxis]) - seq_not_finished = tf.cast(seq_not_finished, tf.float32) - self.seq_not_finished = seq_not_finished - - def _build_decoder(self, - use_gt_layout, - gt_layout_batch, - scope='decoder', - reuse=None): - # The main difference from before is that the decoders now takes another - # input (the attention) when computing the next step - # T_max is the maximum length of decoded sequence (including ) - # - # This function is for decoding only. It performs greedy search or sampling. - # the first input is (its embedding vector) and the subsequent inputs - # are the outputs from previous time step - # num_vocab does not include - # - # use_gt_layout is None or a bool tensor, and gt_layout_batch is a tensor - # with shape [T_max, N]. - # If use_gt_layout is not None, then when use_gt_layout is true, predict - # exactly the tokens in gt_layout_batch, regardless of actual probability. - # Otherwise, if sampling is True, sample from the token probability - # If sampling is False, do greedy decoding (beam size 1) - N = self.N - encoder_states = self.encoder_states - T_max = self.T_decoder - lstm_dim = self.lstm_dim - num_layers = self.num_layers - EOS_token = self.EOS_token - sampling = self.decoder_sampling - - with tf.variable_scope(scope, reuse=reuse): - embedding_mat = tf.get_variable( - 'embedding_mat', [self.decoder_num_vocab, self.decoder_embed_dim]) - # we use a separate embedding for , as it is only used in the - # beginning of the sequence - go_embedding = tf.get_variable('go_embedding', - [1, self.decoder_embed_dim]) - - with tf.variable_scope('att_prediction'): - v = tf.get_variable('v', [lstm_dim]) - W_a = tf.get_variable( - 'weights', [lstm_dim, lstm_dim], - initializer=tf.contrib.layers.xavier_initializer()) - b_a = tf.get_variable( - 'biases', lstm_dim, initializer=tf.constant_initializer(0.)) - - # The parameters to predict the next token - with tf.variable_scope('token_prediction'): - W_y = tf.get_variable( - 'weights', [lstm_dim * 2, self.decoder_num_vocab], - initializer=tf.contrib.layers.xavier_initializer()) - b_y = tf.get_variable( - 'biases', - self.decoder_num_vocab, - initializer=tf.constant_initializer(0.)) - - # Attentional decoding - # Loop function is called at time t BEFORE the cell execution at time t, - # and its next_input is used as the input at time t (not t+1) - # c.f. https://www.tensorflow.org/api_docs/python/tf/nn/raw_rnn - mask_range = tf.reshape( - tf.range(self.decoder_num_vocab, dtype=tf.int32), [1, -1]) - all_eos_pred = EOS_token * tf.ones([N], tf.int32) - all_one_prob = tf.ones([N], tf.float32) - all_zero_entropy = tf.zeros([N], tf.float32) - if use_gt_layout is not None: - gt_layout_mult = tf.cast(use_gt_layout, tf.int32) - pred_layout_mult = 1 - gt_layout_mult - - def loop_fn(time, cell_output, cell_state, loop_state): - if cell_output is None: # time == 0 - next_cell_state = encoder_states - next_input = tf.tile(go_embedding, [N, 1]) - else: # time > 0 - next_cell_state = cell_state - - # compute the attention map over the input sequence - # a_raw has shape [T, N, 1] - att_raw = tf.reduce_sum( - tf.tanh( - tf.nn.xw_plus_b(cell_output, W_a, b_a) + - self.encoder_h_transformed) * v, - axis=2, - keep_dims=True) - # softmax along the first dimension (T) over not finished examples - # att has shape [T, N, 1] - att = tf.nn.softmax(att_raw, dim=0) * self.seq_not_finished - att = att / tf.reduce_sum(att, axis=0, keep_dims=True) - # d has shape [N, lstm_dim] - d2 = tf.reduce_sum(att * self.encoder_outputs, axis=0) - - # token_scores has shape [N, num_vocab] - token_scores = tf.nn.xw_plus_b( - tf.concat([cell_output, d2], axis=1), W_y, b_y) - # predict the next token (behavior depending on parameters) - if sampling: - # predicted_token has shape [N] - logits = token_scores - predicted_token = tf.cast( - tf.reshape(tf.multinomial(token_scores, 1), [-1]), tf.int32) - else: - # predicted_token has shape [N] - predicted_token = tf.cast(tf.argmax(token_scores, 1), tf.int32) - if use_gt_layout is not None: - predicted_token = (gt_layout_batch[time - 1] * gt_layout_mult + - predicted_token * pred_layout_mult) - - # token_prob has shape [N], the probability of the predicted token - # although token_prob is not needed for predicting the next token - # it is needed in output (for policy gradient training) - # [N, num_vocab] - # mask has shape [N, num_vocab] - mask = tf.equal(mask_range, tf.reshape(predicted_token, [-1, 1])) - all_token_probs = tf.nn.softmax(token_scores) - token_prob = tf.reduce_sum( - all_token_probs * tf.cast(mask, tf.float32), axis=1) - neg_entropy = tf.reduce_sum( - all_token_probs * tf.log(all_token_probs), axis=1) - - # is_eos_predicted is a [N] bool tensor, indicating whether - # has already been predicted previously in each sequence - is_eos_predicted = loop_state[2] - predicted_token_old = predicted_token - # if has already been predicted, now predict with - # prob 1 - predicted_token = tf.where(is_eos_predicted, all_eos_pred, - predicted_token) - token_prob = tf.where(is_eos_predicted, all_one_prob, token_prob) - neg_entropy = tf.where(is_eos_predicted, all_zero_entropy, - neg_entropy) - is_eos_predicted = tf.logical_or(is_eos_predicted, - tf.equal(predicted_token_old, - EOS_token)) - - # the prediction is from the cell output of the last step - # timestep (t-1), feed it as input into timestep t - next_input = tf.nn.embedding_lookup(embedding_mat, predicted_token) - - elements_finished = tf.greater_equal(time, T_max) - - # loop_state is a 5-tuple, representing - # 1) the predicted_tokens - # 2) the prob of predicted_tokens - # 3) whether has already been predicted - # 4) the negative entropy of policy (accumulated across timesteps) - # 5) the attention - if loop_state is None: # time == 0 - # Write the predicted token into the output - predicted_token_array = tf.TensorArray( - dtype=tf.int32, size=T_max, infer_shape=False) - token_prob_array = tf.TensorArray( - dtype=tf.float32, size=T_max, infer_shape=False) - att_array = tf.TensorArray( - dtype=tf.float32, size=T_max, infer_shape=False) - next_loop_state = (predicted_token_array, token_prob_array, tf.zeros( - [N], dtype=tf.bool), tf.zeros([N], dtype=tf.float32), att_array) - else: # time > 0 - t_write = time - 1 - next_loop_state = ( - loop_state[0].write(t_write, predicted_token), - loop_state[1].write(t_write, token_prob), - is_eos_predicted, - loop_state[3] + neg_entropy, - loop_state[4].write(t_write, att)) - return (elements_finished, next_input, next_cell_state, cell_output, - next_loop_state) - - # The RNN - cell = _get_lstm_cell(num_layers, lstm_dim) - _, _, decodes_ta = tf.nn.raw_rnn(cell, loop_fn, scope='lstm') - predicted_tokens = decodes_ta[0].stack() - token_probs = decodes_ta[1].stack() - neg_entropy = decodes_ta[3] - # atts has shape [T_decoder, T_encoder, N, 1] - atts = decodes_ta[4].stack() - self.atts = atts - # word_vec has shape [T_decoder, N, D] - word_vecs = tf.reduce_sum(atts * self.embedded_input_seq, axis=1) - - predicted_tokens.set_shape([None, None]) - token_probs.set_shape([None, None]) - neg_entropy.set_shape([None]) - word_vecs.set_shape([None, None, self.encoder_embed_dim]) - - self.predicted_tokens = predicted_tokens - self.token_probs = token_probs - self.neg_entropy = neg_entropy - self.word_vecs = word_vecs diff --git a/research/qa_kg/util/__init__.py b/research/qa_kg/util/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/research/qa_kg/util/data_reader.py b/research/qa_kg/util/data_reader.py deleted file mode 100644 index 397390af6..000000000 --- a/research/qa_kg/util/data_reader.py +++ /dev/null @@ -1,231 +0,0 @@ -# Copyright 2017 The TensorFlow Authors All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -from collections import namedtuple -try: - from queue import Queue # Python 3 -except ImportError: - from Queue import Queue # Python 2 -import re -import threading -import numpy as np -import tensorflow as tf - -Data = namedtuple('Data', ['X', 'Y', 'MultiYs', 'qid']) - - -class SampleBuilder: - - def __init__(self, config): - self.config = config - - self.kb_raw = self.read_kb() - self.data_raw = self.read_raw_data() - - # dictionary of entities, normal words, and relations - self.dict_all = self.gen_dict() - self.reverse_dict_all = dict( - zip(self.dict_all.values(), self.dict_all.keys())) - - tf.logging.info('size of dict: %d' % len(self.dict_all)) - - self.kb = self.build_kb() - self.data_all = self.build_samples() - - def read_kb(self): - kb_raw = [] - for line in open(self.config.KB_file): - sub, rel, obj = line.strip().split('|') - kb_raw.append((sub, rel, obj)) - tf.logging.info('# of KB records: %d' % len(kb_raw)) - return kb_raw - - def read_raw_data(self): - data = dict() - for name in self.config.data_files: - raw = [] - tf.logging.info( - 'Reading data file {}'.format(self.config.data_files[name])) - for line in open(self.config.data_files[name]): - question, answers = line.strip().split('\t') - question = question.replace('],', ']') # ignore ',' in the template - raw.append((question, answers)) - data[name] = raw - return data - - def build_kb(self): - tf.logging.info('Indexing KB...') - kb = [] - for sub, rel, obj in self.kb_raw: - kb.append([self.dict_all[sub], self.dict_all[rel], self.dict_all[obj]]) - return kb - - def gen_dict(self): - s = set() - for sub, rel, obj in self.kb_raw: - s.add(sub) - s.add(rel) - s.add(obj) - for name in self.data_raw: - for question, answers in self.data_raw[name]: - normal = re.split('\[[^\]]+\]', question) - for phrase in normal: - for word in phrase.split(): - s.add(word) - s = list(s) - d = {s[idx]: idx for idx in range(len(s))} - return d - - def build_samples(self): - - def map_entity_idx(text): - entities = re.findall('\[[^\]]+\]', text) - for entity in entities: - entity = entity[1:-1] - index = self.dict_all[entity] - text = text.replace('[%s]' % entity, '@%d' % index) - return text - - data_all = dict() - - for name in self.data_raw: - X, Y, MultiYs, qid = [], [], [], [] - for i, (question, answers) in enumerate(self.data_raw[name]): - qdata, labels = [], [] - question = map_entity_idx(question) - for word in question.split(): - if word[0] == '@': - qdata.append(int(word[1:])) - else: - qdata.append(self.dict_all[word]) - for answer in answers.split('|'): - labels.append(self.dict_all[answer]) - if len(qdata) > self.config.T_encoder: - self.config.T_encoder = len(qdata) - for label in labels: - X.append(qdata) - Y.append(label) - MultiYs.append(set(labels)) - qid.append(i) - data_all[name] = Data(X=X, Y=Y, MultiYs=MultiYs, qid=qid) - - return data_all - - -def _run_prefetch(prefetch_queue, batch_loader, data, shuffle, one_pass, - config): - assert len(data.X) == len(data.Y) == len(data.MultiYs) == len(data.qid) - num_samples = len(data.X) - batch_size = config.batch_size - - n_sample = 0 - fetch_order = config.rng.permutation(num_samples) - while True: - sample_ids = fetch_order[n_sample:n_sample + batch_size] - batch = batch_loader.load_one_batch(sample_ids) - prefetch_queue.put(batch, block=True) - - n_sample += len(sample_ids) - if n_sample >= num_samples: - if one_pass: - prefetch_queue.put(None, block=True) - n_sample = 0 - if shuffle: - fetch_order = config.rng.permutation(num_samples) - - -class DataReader: - def __init__(self, - config, - data, - assembler, - shuffle=True, - one_pass=False, - prefetch_num=10): - self.config = config - - self.data = data - self.assembler = assembler - self.batch_loader = BatchLoader(self.config, - self.data, self.assembler) - - self.shuffle = shuffle - self.one_pass = one_pass - self.prefetch_queue = Queue(maxsize=prefetch_num) - self.prefetch_thread = threading.Thread(target=_run_prefetch, - args=(self.prefetch_queue, - self.batch_loader, self.data, - self.shuffle, self.one_pass, - self.config)) - self.prefetch_thread.daemon = True - self.prefetch_thread.start() - - def batches(self): - while True: - if self.prefetch_queue.empty(): - tf.logging.warning('Waiting for data loading (IO is slow)...') - batch = self.prefetch_queue.get(block=True) - if batch is None: - assert self.one_pass - tf.logging.info('One pass finished!') - raise StopIteration() - yield batch - - -class BatchLoader: - def __init__(self, config, - data, assembler): - self.config = config - - self.data = data - self.assembler = assembler - - self.T_encoder = config.T_encoder - self.T_decoder = config.T_decoder - - tf.logging.info('T_encoder: %d' % self.T_encoder) - tf.logging.info('T_decoder: %d' % self.T_decoder) - tf.logging.info('batch size: %d' % self.config.batch_size) - - self.gt_layout_tokens = config.gt_layout_tokens - - def load_one_batch(self, sample_ids): - actual_batch_size = len(sample_ids) - input_seq_batch = np.zeros((self.T_encoder, actual_batch_size), np.int32) - seq_len_batch = np.zeros(actual_batch_size, np.int32) - ans_label_batch = np.zeros(actual_batch_size, np.int32) - ans_set_labels_list = [None] * actual_batch_size - question_id_list = [None] * actual_batch_size - gt_layout_batch = np.zeros((self.T_decoder, actual_batch_size), np.int32) - - for batch_i in range(actual_batch_size): - idx = sample_ids[batch_i] - seq_len = len(self.data.X[idx]) - seq_len_batch[batch_i] = seq_len - input_seq_batch[:seq_len, batch_i] = self.data.X[idx] - ans_label_batch[batch_i] = self.data.Y[idx] - ans_set_labels_list[batch_i] = self.data.MultiYs[idx] - question_id_list[batch_i] = self.data.qid[idx] - - gt_layout_batch[:, batch_i] = self.assembler.module_list2tokens( - self.gt_layout_tokens, self.T_decoder) - - batch = dict(input_seq_batch=input_seq_batch, - seq_len_batch=seq_len_batch, - ans_label_batch=ans_label_batch, - gt_layout_batch=gt_layout_batch, - ans_set_labels_list=ans_set_labels_list, - question_id_list=question_id_list) - return batch diff --git a/research/qa_kg/util/misc.py b/research/qa_kg/util/misc.py deleted file mode 100644 index 9a0199bb4..000000000 --- a/research/qa_kg/util/misc.py +++ /dev/null @@ -1,77 +0,0 @@ -# Copyright 2017 The TensorFlow Authors All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -from datetime import datetime -import json -import logging -import os -import tensorflow as tf -import tensorflow.contrib.slim as slim - - -def prepare_dirs_and_logger(config): - formatter = logging.Formatter('%(asctime)s:%(levelname)s::%(message)s') - logger = logging.getLogger('tensorflow') - - for hdlr in logger.handlers: - logger.removeHandler(hdlr) - - handler = logging.StreamHandler() - handler.setFormatter(formatter) - - logger.addHandler(handler) - logger.setLevel(tf.logging.INFO) - - config.log_dir = os.path.join(config.exp_dir, config.log_dir, - config.train_tag) - config.model_dir = os.path.join(config.exp_dir, config.model_dir, - config.train_tag) - config.output_dir = os.path.join(config.exp_dir, config.output_dir, - config.train_tag) - - for path in [ - config.log_dir, config.model_dir, config.output_dir - ]: - if not os.path.exists(path): - os.makedirs(path) - - config.data_files = { - 'train': os.path.join(config.data_dir, config.train_data_file), - 'dev': os.path.join(config.data_dir, config.dev_data_file), - 'test': os.path.join(config.data_dir, config.test_data_file) - } - - return config - - -def get_time(): - return datetime.now().strftime('%Y-%m-%d_%H-%M-%S') - - -def show_all_variables(): - model_vars = tf.trainable_variables() - slim.model_analyzer.analyze_vars(model_vars, print_info=True) - - -def save_config(config): - param_path = os.path.join(config.model_dir, 'params.json') - - tf.logging.info('log dir: %s' % config.log_dir) - tf.logging.info('model dir: %s' % config.model_dir) - tf.logging.info('param path: %s' % param_path) - tf.logging.info('output dir: %s' % config.output_dir) - - with open(param_path, 'w') as f: - f.write(json.dumps(config.__dict__, indent=4, sort_keys=True)) diff --git a/research/qa_kg/util/nn.py b/research/qa_kg/util/nn.py deleted file mode 100644 index 38ba02b2e..000000000 --- a/research/qa_kg/util/nn.py +++ /dev/null @@ -1,55 +0,0 @@ -# Copyright 2017 The TensorFlow Authors All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -import tensorflow as tf - - -def fc_layer(name, - bottom, - output_dim, - bias_term=True, - weights_initializer=None, - biases_initializer=None, - reuse=None): - # flatten bottom input - shape = bottom.get_shape().as_list() - input_dim = 1 - for d in shape[1:]: - input_dim *= d - flat_bottom = tf.reshape(bottom, [-1, input_dim]) - - # weights and biases variables - with tf.variable_scope(name, reuse=reuse): - # initialize the variables - if weights_initializer is None: - weights_initializer = tf.contrib.layers.xavier_initializer() - if bias_term and biases_initializer is None: - biases_initializer = tf.constant_initializer(0.) - - # weights has shape [input_dim, output_dim] - weights = tf.get_variable( - 'weights', [input_dim, output_dim], initializer=weights_initializer) - if bias_term: - biases = tf.get_variable( - 'biases', output_dim, initializer=biases_initializer) - if not reuse: - tf.add_to_collection(tf.GraphKeys.REGULARIZATION_LOSSES, - tf.nn.l2_loss(weights)) - - if bias_term: - fc = tf.nn.xw_plus_b(flat_bottom, weights, biases) - else: - fc = tf.matmul(flat_bottom, weights) - return fc diff --git a/research/real_nvp/README.md b/research/real_nvp/README.md deleted file mode 100644 index c20ef111e..000000000 --- a/research/real_nvp/README.md +++ /dev/null @@ -1,282 +0,0 @@ -![No Maintenance Intended](https://img.shields.io/badge/No%20Maintenance%20Intended-%E2%9C%95-red.svg) -![TensorFlow Requirement: 1.x](https://img.shields.io/badge/TensorFlow%20Requirement-1.x-brightgreen) -![TensorFlow 2 Not Supported](https://img.shields.io/badge/TensorFlow%202%20Not%20Supported-%E2%9C%95-red.svg) - -# Real NVP in TensorFlow - -*A Tensorflow implementation of the training procedure of* -[*Density estimation using Real NVP*](https://arxiv.org/abs/1605.08803)*, by -Laurent Dinh, Jascha Sohl-Dickstein and Samy Bengio, for Imagenet -(32x32 and 64x64), CelebA and LSUN Including the scripts to -put the datasets in `.tfrecords` format.* - -We are happy to open source the code for *Real NVP*, a novel approach to -density estimation using deep neural networks that enables tractable density -estimation and efficient one-pass inference and sampling. This model -successfully decomposes images into hierarchical features ranging from -high-level concepts to low-resolution details. Visualizations are available -[here](http://goo.gl/yco14s). - -## Installation -* python 2.7: - * python 3 support is not available yet -* pip (python package manager) - * `apt-get install python-pip` on Ubuntu - * `brew` installs pip along with python on OSX -* Install the dependencies for [LSUN](https://github.com/fyu/lsun.git) - * Install [OpenCV](http://opencv.org/) - * `pip install numpy lmdb` -* Install the python dependencies - * `pip install scipy scikit-image Pillow` -* Install the -[latest Tensorflow Pip package](https://www.tensorflow.org/get_started/os_setup.html#using-pip) -for Python 2.7 - -## Getting Started -Once you have successfully installed the dependencies, you can start by -downloading the repository: -```shell -git clone --recursive https://github.com/tensorflow/models.git -``` -Afterward, you can use the utilities in this folder prepare the datasets. - -## Preparing datasets -### CelebA -For [*CelebA*](http://mmlab.ie.cuhk.edu.hk/projects/CelebA.html), download -`img_align_celeba.zip` from the Dropbox link on this -[page](http://mmlab.ie.cuhk.edu.hk/projects/CelebA.html) under the -link *Align&Cropped Images* in the *Img* directory and `list_eval_partition.txt` -under the link *Train/Val/Test Partitions* in the *Eval* directory. Then do: - -```shell -mkdir celeba -cd celeba -unzip img_align_celeba.zip -``` - -We'll format the training subset: -```shell -python2.7 ../models/real_nvp/celeba_formatting.py \ - --partition_fn list_eval_partition.txt \ - --file_out celeba_train \ - --fn_root img_align_celeba \ - --set 0 -``` - -Then the validation subset: -```shell -python2.7 ../models/real_nvp/celeba_formatting.py \ - --partition_fn list_eval_partition.txt \ - --file_out celeba_valid \ - --fn_root img_align_celeba \ - --set 1 -``` - -And finally the test subset: -```shell -python2.7 ../models/real_nvp/celeba_formatting.py \ - --partition_fn list_eval_partition.txt \ - --file_out celeba_test \ - --fn_root img_align_celeba \ - --set 2 -``` - -Afterward: -```shell -cd .. -``` - -### Small Imagenet -Downloading the [*small Imagenet*](http://image-net.org/small/download.php) -dataset is more straightforward and can be done -entirely in Shell: -```shell -mkdir small_imnet -cd small_imnet -for FILENAME in train_32x32.tar valid_32x32.tar train_64x64.tar valid_64x64.tar -do - curl -O http://image-net.org/small/$FILENAME - tar -xvf $FILENAME -done -``` - -Then, you can format the datasets as follow: -```shell -for DIRNAME in train_32x32 valid_32x32 train_64x64 valid_64x64 -do - python2.7 ../models/real_nvp/imnet_formatting.py \ - --file_out $DIRNAME \ - --fn_root $DIRNAME -done -cd .. -``` - -### LSUN -To prepare the [*LSUN*](http://lsun.cs.princeton.edu/2016/) dataset, we will -need to use the code associated: -```shell -git clone https://github.com/fyu/lsun.git -cd lsun -``` -Then we'll download the db files: -```shell -for CATEGORY in bedroom church_outdoor tower -do - python2.7 download.py -c $CATEGORY - unzip "$CATEGORY"_train_lmdb.zip - unzip "$CATEGORY"_val_lmdb.zip - python2.7 data.py export "$CATEGORY"_train_lmdb \ - --out_dir "$CATEGORY"_train --flat - python2.7 data.py export "$CATEGORY"_val_lmdb \ - --out_dir "$CATEGORY"_val --flat -done -``` - -Finally, we then format the dataset into `.tfrecords`: -```shell -for CATEGORY in bedroom church_outdoor tower -do - python2.7 ../models/real_nvp/lsun_formatting.py \ - --file_out "$CATEGORY"_train \ - --fn_root "$CATEGORY"_train - python2.7 ../models/real_nvp/lsun_formatting.py \ - --file_out "$CATEGORY"_val \ - --fn_root "$CATEGORY"_val -done -cd .. -``` - - -## Training -We'll give an example on how to train a model on the small Imagenet -dataset (32x32): -```shell -cd models/real_nvp/ -python2.7 real_nvp_multiscale_dataset.py \ ---image_size 32 \ ---hpconfig=n_scale=4,base_dim=32,clip_gradient=100,residual_blocks=4 \ ---dataset imnet \ ---traindir /tmp/real_nvp_imnet32/train \ ---logdir /tmp/real_nvp_imnet32/train \ ---data_path ../../small_imnet/train_32x32_?????.tfrecords -``` -In parallel, you can run the script to generate visualization from the model: -```shell -python2.7 real_nvp_multiscale_dataset.py \ ---image_size 32 \ ---hpconfig=n_scale=4,base_dim=32,clip_gradient=100,residual_blocks=4 \ ---dataset imnet \ ---traindir /tmp/real_nvp_imnet32/train \ ---logdir /tmp/real_nvp_imnet32/sample \ ---data_path ../../small_imnet/valid_32x32_?????.tfrecords \ ---mode sample -``` -Additionally, you can also run in the script to evaluate the model on the -validation set: -```shell -python2.7 real_nvp_multiscale_dataset.py \ ---image_size 32 \ ---hpconfig=n_scale=4,base_dim=32,clip_gradient=100,residual_blocks=4 \ ---dataset imnet \ ---traindir /tmp/real_nvp_imnet32/train \ ---logdir /tmp/real_nvp_imnet32/eval \ ---data_path ../../small_imnet/valid_32x32_?????.tfrecords \ ---eval_set_size 50000 ---mode eval -``` -The visualizations and validation set evaluation can be seen through -[Tensorboard](https://github.com/tensorflow/tensorflow/blob/master/tensorflow/tensorboard/README.md). - -Another example would be how to run the model on LSUN (bedroom category): -```shell -# train the model -python2.7 real_nvp_multiscale_dataset.py \ ---image_size 64 \ ---hpconfig=n_scale=5,base_dim=32,clip_gradient=100,residual_blocks=4 \ ---dataset lsun \ ---traindir /tmp/real_nvp_church_outdoor/train \ ---logdir /tmp/real_nvp_church_outdoor/train \ ---data_path ../../lsun/church_outdoor_train_?????.tfrecords -``` - -```shell -# sample from the model -python2.7 real_nvp_multiscale_dataset.py \ ---image_size 64 \ ---hpconfig=n_scale=5,base_dim=32,clip_gradient=100,residual_blocks=4 \ ---dataset lsun \ ---traindir /tmp/real_nvp_church_outdoor/train \ ---logdir /tmp/real_nvp_church_outdoor/sample \ ---data_path ../../lsun/church_outdoor_val_?????.tfrecords \ ---mode sample -``` - -```shell -# evaluate the model -python2.7 real_nvp_multiscale_dataset.py \ ---image_size 64 \ ---hpconfig=n_scale=5,base_dim=32,clip_gradient=100,residual_blocks=4 \ ---dataset lsun \ ---traindir /tmp/real_nvp_church_outdoor/train \ ---logdir /tmp/real_nvp_church_outdoor/eval \ ---data_path ../../lsun/church_outdoor_val_?????.tfrecords \ ---eval_set_size 300 ---mode eval -``` - -Finally, we'll give the commands to run the model on the CelebA dataset: -```shell -# train the model -python2.7 real_nvp_multiscale_dataset.py \ ---image_size 64 \ ---hpconfig=n_scale=5,base_dim=32,clip_gradient=100,residual_blocks=4 \ ---dataset lsun \ ---traindir /tmp/real_nvp_celeba/train \ ---logdir /tmp/real_nvp_celeba/train \ ---data_path ../../celeba/celeba_train.tfrecords -``` - -```shell -# sample from the model -python2.7 real_nvp_multiscale_dataset.py \ ---image_size 64 \ ---hpconfig=n_scale=5,base_dim=32,clip_gradient=100,residual_blocks=4 \ ---dataset celeba \ ---traindir /tmp/real_nvp_celeba/train \ ---logdir /tmp/real_nvp_celeba/sample \ ---data_path ../../celeba/celeba_valid.tfrecords \ ---mode sample -``` - -```shell -# evaluate the model on validation set -python2.7 real_nvp_multiscale_dataset.py \ ---image_size 64 \ ---hpconfig=n_scale=5,base_dim=32,clip_gradient=100,residual_blocks=4 \ ---dataset celeba \ ---traindir /tmp/real_nvp_celeba/train \ ---logdir /tmp/real_nvp_celeba/eval_valid \ ---data_path ../../celeba/celeba_valid.tfrecords \ ---eval_set_size 19867 ---mode eval - -# evaluate the model on test set -python2.7 real_nvp_multiscale_dataset.py \ ---image_size 64 \ ---hpconfig=n_scale=5,base_dim=32,clip_gradient=100,residual_blocks=4 \ ---dataset celeba \ ---traindir /tmp/real_nvp_celeba/train \ ---logdir /tmp/real_nvp_celeba/eval_test \ ---data_path ../../celeba/celeba_test.tfrecords \ ---eval_set_size 19962 ---mode eval -``` - -## Credits -This code was written by Laurent Dinh -([@laurent-dinh](https://github.com/laurent-dinh)) with -the help of -Jascha Sohl-Dickstein ([@Sohl-Dickstein](https://github.com/Sohl-Dickstein) -and [jaschasd@google.com](mailto:jaschasd@google.com)), -Samy Bengio, Jon Shlens, Sherry Moore and -David Andersen. diff --git a/research/real_nvp/__init__.py b/research/real_nvp/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/research/real_nvp/celeba_formatting.py b/research/real_nvp/celeba_formatting.py deleted file mode 100644 index e03571086..000000000 --- a/research/real_nvp/celeba_formatting.py +++ /dev/null @@ -1,96 +0,0 @@ -# Copyright 2016 Google Inc. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -r"""CelebA dataset formating. - -Download img_align_celeba.zip from -http://mmlab.ie.cuhk.edu.hk/projects/CelebA.html under the -link "Align&Cropped Images" in the "Img" directory and list_eval_partition.txt -under the link "Train/Val/Test Partitions" in the "Eval" directory. Then do: -unzip img_align_celeba.zip - -Use the script as follow: -python celeba_formatting.py \ - --partition_fn [PARTITION_FILE_PATH] \ - --file_out [OUTPUT_FILE_PATH_PREFIX] \ - --fn_root [CELEBA_FOLDER] \ - --set [SUBSET_INDEX] - -""" - -from __future__ import print_function - -import os -import os.path - -import scipy.io -import scipy.io.wavfile -import scipy.ndimage -import tensorflow as tf - - -tf.flags.DEFINE_string("file_out", "", - "Filename of the output .tfrecords file.") -tf.flags.DEFINE_string("fn_root", "", "Name of root file path.") -tf.flags.DEFINE_string("partition_fn", "", "Partition file path.") -tf.flags.DEFINE_string("set", "", "Name of subset.") - -FLAGS = tf.flags.FLAGS - - -def _int64_feature(value): - return tf.train.Feature(int64_list=tf.train.Int64List(value=[value])) - - -def _bytes_feature(value): - return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value])) - - -def main(): - """Main converter function.""" - # Celeb A - with open(FLAGS.partition_fn, "r") as infile: - img_fn_list = infile.readlines() - img_fn_list = [elem.strip().split() for elem in img_fn_list] - img_fn_list = [elem[0] for elem in img_fn_list if elem[1] == FLAGS.set] - fn_root = FLAGS.fn_root - num_examples = len(img_fn_list) - - file_out = "%s.tfrecords" % FLAGS.file_out - writer = tf.python_io.TFRecordWriter(file_out) - for example_idx, img_fn in enumerate(img_fn_list): - if example_idx % 1000 == 0: - print(example_idx, "/", num_examples) - image_raw = scipy.ndimage.imread(os.path.join(fn_root, img_fn)) - rows = image_raw.shape[0] - cols = image_raw.shape[1] - depth = image_raw.shape[2] - image_raw = image_raw.tostring() - example = tf.train.Example( - features=tf.train.Features( - feature={ - "height": _int64_feature(rows), - "width": _int64_feature(cols), - "depth": _int64_feature(depth), - "image_raw": _bytes_feature(image_raw) - } - ) - ) - writer.write(example.SerializeToString()) - writer.close() - - -if __name__ == "__main__": - main() diff --git a/research/real_nvp/imnet_formatting.py b/research/real_nvp/imnet_formatting.py deleted file mode 100644 index 1775dd54d..000000000 --- a/research/real_nvp/imnet_formatting.py +++ /dev/null @@ -1,105 +0,0 @@ -# Copyright 2016 Google Inc. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -r"""LSUN dataset formatting. - -Download and format the Imagenet dataset as follow: -mkdir [IMAGENET_PATH] -cd [IMAGENET_PATH] -for FILENAME in train_32x32.tar valid_32x32.tar train_64x64.tar valid_64x64.tar -do - curl -O http://image-net.org/small/$FILENAME - tar -xvf $FILENAME -done - -Then use the script as follow: -for DIRNAME in train_32x32 valid_32x32 train_64x64 valid_64x64 -do - python imnet_formatting.py \ - --file_out $DIRNAME \ - --fn_root $DIRNAME -done - -""" - -from __future__ import print_function - -import os -import os.path - -import scipy.io -import scipy.io.wavfile -import scipy.ndimage -import tensorflow as tf - - -tf.flags.DEFINE_string("file_out", "", - "Filename of the output .tfrecords file.") -tf.flags.DEFINE_string("fn_root", "", "Name of root file path.") - -FLAGS = tf.flags.FLAGS - - -def _int64_feature(value): - return tf.train.Feature(int64_list=tf.train.Int64List(value=[value])) - - -def _bytes_feature(value): - return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value])) - - -def main(): - """Main converter function.""" - # LSUN - fn_root = FLAGS.fn_root - img_fn_list = os.listdir(fn_root) - img_fn_list = [img_fn for img_fn in img_fn_list - if img_fn.endswith('.png')] - num_examples = len(img_fn_list) - - n_examples_per_file = 10000 - for example_idx, img_fn in enumerate(img_fn_list): - if example_idx % n_examples_per_file == 0: - file_out = "%s_%05d.tfrecords" - file_out = file_out % (FLAGS.file_out, - example_idx // n_examples_per_file) - print("Writing on:", file_out) - writer = tf.python_io.TFRecordWriter(file_out) - if example_idx % 1000 == 0: - print(example_idx, "/", num_examples) - image_raw = scipy.ndimage.imread(os.path.join(fn_root, img_fn)) - rows = image_raw.shape[0] - cols = image_raw.shape[1] - depth = image_raw.shape[2] - image_raw = image_raw.astype("uint8") - image_raw = image_raw.tostring() - example = tf.train.Example( - features=tf.train.Features( - feature={ - "height": _int64_feature(rows), - "width": _int64_feature(cols), - "depth": _int64_feature(depth), - "image_raw": _bytes_feature(image_raw) - } - ) - ) - writer.write(example.SerializeToString()) - if example_idx % n_examples_per_file == (n_examples_per_file - 1): - writer.close() - writer.close() - - -if __name__ == "__main__": - main() diff --git a/research/real_nvp/lsun_formatting.py b/research/real_nvp/lsun_formatting.py deleted file mode 100644 index 13a21c5e9..000000000 --- a/research/real_nvp/lsun_formatting.py +++ /dev/null @@ -1,105 +0,0 @@ -# Copyright 2016 Google Inc. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -r"""LSUN dataset formatting. - -Download and format the LSUN dataset as follow: -git clone https://github.com/fyu/lsun.git -cd lsun -python2.7 download.py -c [CATEGORY] - -Then unzip the downloaded .zip files before executing: -python2.7 data.py export [IMAGE_DB_PATH] --out_dir [LSUN_FOLDER] --flat - -Then use the script as follow: -python lsun_formatting.py \ - --file_out [OUTPUT_FILE_PATH_PREFIX] \ - --fn_root [LSUN_FOLDER] - -""" -from __future__ import print_function - -import os -import os.path - -import numpy -import skimage.transform -from PIL import Image -import tensorflow as tf - - -tf.flags.DEFINE_string("file_out", "", - "Filename of the output .tfrecords file.") -tf.flags.DEFINE_string("fn_root", "", "Name of root file path.") - -FLAGS = tf.flags.FLAGS - - -def _int64_feature(value): - return tf.train.Feature(int64_list=tf.train.Int64List(value=[value])) - - -def _bytes_feature(value): - return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value])) - - -def main(): - """Main converter function.""" - fn_root = FLAGS.fn_root - img_fn_list = os.listdir(fn_root) - img_fn_list = [img_fn for img_fn in img_fn_list - if img_fn.endswith('.webp')] - num_examples = len(img_fn_list) - - n_examples_per_file = 10000 - for example_idx, img_fn in enumerate(img_fn_list): - if example_idx % n_examples_per_file == 0: - file_out = "%s_%05d.tfrecords" - file_out = file_out % (FLAGS.file_out, - example_idx // n_examples_per_file) - print("Writing on:", file_out) - writer = tf.python_io.TFRecordWriter(file_out) - if example_idx % 1000 == 0: - print(example_idx, "/", num_examples) - image_raw = numpy.array(Image.open(os.path.join(fn_root, img_fn))) - rows = image_raw.shape[0] - cols = image_raw.shape[1] - depth = image_raw.shape[2] - downscale = min(rows / 96., cols / 96.) - image_raw = skimage.transform.pyramid_reduce(image_raw, downscale) - image_raw *= 255. - image_raw = image_raw.astype("uint8") - rows = image_raw.shape[0] - cols = image_raw.shape[1] - depth = image_raw.shape[2] - image_raw = image_raw.tostring() - example = tf.train.Example( - features=tf.train.Features( - feature={ - "height": _int64_feature(rows), - "width": _int64_feature(cols), - "depth": _int64_feature(depth), - "image_raw": _bytes_feature(image_raw) - } - ) - ) - writer.write(example.SerializeToString()) - if example_idx % n_examples_per_file == (n_examples_per_file - 1): - writer.close() - writer.close() - - -if __name__ == "__main__": - main() diff --git a/research/real_nvp/real_nvp_multiscale_dataset.py b/research/real_nvp/real_nvp_multiscale_dataset.py deleted file mode 100644 index c0e1864f1..000000000 --- a/research/real_nvp/real_nvp_multiscale_dataset.py +++ /dev/null @@ -1,1639 +0,0 @@ -# Copyright 2016 Google Inc. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -r"""Script for training, evaluation and sampling for Real NVP. - -$ python real_nvp_multiscale_dataset.py \ ---alsologtostderr \ ---image_size 64 \ ---hpconfig=n_scale=5,base_dim=8 \ ---dataset imnet \ ---data_path [DATA_PATH] -""" - -from __future__ import print_function - -import time -from datetime import datetime -import os - -import numpy -from six.moves import xrange -import tensorflow as tf - -from tensorflow import gfile - -from real_nvp_utils import ( - batch_norm, batch_norm_log_diff, conv_layer, - squeeze_2x2, squeeze_2x2_ordered, standard_normal_ll, - standard_normal_sample, unsqueeze_2x2, variable_on_cpu) - - -tf.flags.DEFINE_string("master", "local", - "BNS name of the TensorFlow master, or local.") - -tf.flags.DEFINE_string("logdir", "/tmp/real_nvp_multiscale", - "Directory to which writes logs.") - -tf.flags.DEFINE_string("traindir", "/tmp/real_nvp_multiscale", - "Directory to which writes logs.") - -tf.flags.DEFINE_integer("train_steps", 1000000000000000000, - "Number of steps to train for.") - -tf.flags.DEFINE_string("data_path", "", "Path to the data.") - -tf.flags.DEFINE_string("mode", "train", - "Mode of execution. Must be 'train', " - "'sample' or 'eval'.") - -tf.flags.DEFINE_string("dataset", "imnet", - "Dataset used. Must be 'imnet', " - "'celeba' or 'lsun'.") - -tf.flags.DEFINE_integer("recursion_type", 2, - "Type of the recursion.") - -tf.flags.DEFINE_integer("image_size", 64, - "Size of the input image.") - -tf.flags.DEFINE_integer("eval_set_size", 0, - "Size of evaluation dataset.") - -tf.flags.DEFINE_string( - "hpconfig", "", - "A comma separated list of hyperparameters for the model. Format is " - "hp1=value1,hp2=value2,etc. If this FLAG is set, the model will be trained " - "with the specified hyperparameters, filling in missing hyperparameters " - "from the default_values in |hyper_params|.") - -FLAGS = tf.flags.FLAGS - -class HParams(object): - """Dictionary of hyperparameters.""" - def __init__(self, **kwargs): - self.dict_ = kwargs - self.__dict__.update(self.dict_) - - def update_config(self, in_string): - """Update the dictionary with a comma separated list.""" - pairs = in_string.split(",") - pairs = [pair.split("=") for pair in pairs] - for key, val in pairs: - self.dict_[key] = type(self.dict_[key])(val) - self.__dict__.update(self.dict_) - return self - - def __getitem__(self, key): - return self.dict_[key] - - def __setitem__(self, key, val): - self.dict_[key] = val - self.__dict__.update(self.dict_) - - -def get_default_hparams(): - """Get the default hyperparameters.""" - return HParams( - batch_size=64, - residual_blocks=2, - n_couplings=2, - n_scale=4, - learning_rate=0.001, - momentum=1e-1, - decay=1e-3, - l2_coeff=0.00005, - clip_gradient=100., - optimizer="adam", - dropout_mask=0, - base_dim=32, - bottleneck=0, - use_batch_norm=1, - alternate=1, - use_aff=1, - skip=1, - data_constraint=.9, - n_opt=0) - - -# RESNET UTILS -def residual_block(input_, dim, name, use_batch_norm=True, - train=True, weight_norm=True, bottleneck=False): - """Residual convolutional block.""" - with tf.variable_scope(name): - res = input_ - if use_batch_norm: - res = batch_norm( - input_=res, dim=dim, name="bn_in", scale=False, - train=train, epsilon=1e-4, axes=[0, 1, 2]) - res = tf.nn.relu(res) - if bottleneck: - res = conv_layer( - input_=res, filter_size=[1, 1], dim_in=dim, dim_out=dim, - name="h_0", stddev=numpy.sqrt(2. / (dim)), - strides=[1, 1, 1, 1], padding="SAME", - nonlinearity=None, bias=(not use_batch_norm), - weight_norm=weight_norm, scale=False) - if use_batch_norm: - res = batch_norm( - input_=res, dim=dim, - name="bn_0", scale=False, train=train, - epsilon=1e-4, axes=[0, 1, 2]) - res = tf.nn.relu(res) - res = conv_layer( - input_=res, filter_size=[3, 3], dim_in=dim, - dim_out=dim, name="h_1", stddev=numpy.sqrt(2. / (1. * dim)), - strides=[1, 1, 1, 1], padding="SAME", nonlinearity=None, - bias=(not use_batch_norm), - weight_norm=weight_norm, scale=False) - if use_batch_norm: - res = batch_norm( - input_=res, dim=dim, name="bn_1", scale=False, - train=train, epsilon=1e-4, axes=[0, 1, 2]) - res = tf.nn.relu(res) - res = conv_layer( - input_=res, filter_size=[1, 1], dim_in=dim, dim_out=dim, - name="out", stddev=numpy.sqrt(2. / (1. * dim)), - strides=[1, 1, 1, 1], padding="SAME", nonlinearity=None, - bias=True, weight_norm=weight_norm, scale=True) - else: - res = conv_layer( - input_=res, filter_size=[3, 3], dim_in=dim, dim_out=dim, - name="h_0", stddev=numpy.sqrt(2. / (dim)), - strides=[1, 1, 1, 1], padding="SAME", - nonlinearity=None, bias=(not use_batch_norm), - weight_norm=weight_norm, scale=False) - if use_batch_norm: - res = batch_norm( - input_=res, dim=dim, name="bn_0", scale=False, - train=train, epsilon=1e-4, axes=[0, 1, 2]) - res = tf.nn.relu(res) - res = conv_layer( - input_=res, filter_size=[3, 3], dim_in=dim, dim_out=dim, - name="out", stddev=numpy.sqrt(2. / (1. * dim)), - strides=[1, 1, 1, 1], padding="SAME", nonlinearity=None, - bias=True, weight_norm=weight_norm, scale=True) - res += input_ - - return res - - -def resnet(input_, dim_in, dim, dim_out, name, use_batch_norm=True, - train=True, weight_norm=True, residual_blocks=5, - bottleneck=False, skip=True): - """Residual convolutional network.""" - with tf.variable_scope(name): - res = input_ - if residual_blocks != 0: - res = conv_layer( - input_=res, filter_size=[3, 3], dim_in=dim_in, dim_out=dim, - name="h_in", stddev=numpy.sqrt(2. / (dim_in)), - strides=[1, 1, 1, 1], padding="SAME", - nonlinearity=None, bias=True, - weight_norm=weight_norm, scale=False) - if skip: - out = conv_layer( - input_=res, filter_size=[1, 1], dim_in=dim, dim_out=dim, - name="skip_in", stddev=numpy.sqrt(2. / (dim)), - strides=[1, 1, 1, 1], padding="SAME", - nonlinearity=None, bias=True, - weight_norm=weight_norm, scale=True) - - # residual blocks - for idx_block in xrange(residual_blocks): - res = residual_block(res, dim, "block_%d" % idx_block, - use_batch_norm=use_batch_norm, train=train, - weight_norm=weight_norm, - bottleneck=bottleneck) - if skip: - out += conv_layer( - input_=res, filter_size=[1, 1], dim_in=dim, dim_out=dim, - name="skip_%d" % idx_block, stddev=numpy.sqrt(2. / (dim)), - strides=[1, 1, 1, 1], padding="SAME", - nonlinearity=None, bias=True, - weight_norm=weight_norm, scale=True) - # outputs - if skip: - res = out - if use_batch_norm: - res = batch_norm( - input_=res, dim=dim, name="bn_pre_out", scale=False, - train=train, epsilon=1e-4, axes=[0, 1, 2]) - res = tf.nn.relu(res) - res = conv_layer( - input_=res, filter_size=[1, 1], dim_in=dim, - dim_out=dim_out, - name="out", stddev=numpy.sqrt(2. / (1. * dim)), - strides=[1, 1, 1, 1], padding="SAME", - nonlinearity=None, bias=True, - weight_norm=weight_norm, scale=True) - else: - if bottleneck: - res = conv_layer( - input_=res, filter_size=[1, 1], dim_in=dim_in, dim_out=dim, - name="h_0", stddev=numpy.sqrt(2. / (dim_in)), - strides=[1, 1, 1, 1], padding="SAME", - nonlinearity=None, bias=(not use_batch_norm), - weight_norm=weight_norm, scale=False) - if use_batch_norm: - res = batch_norm( - input_=res, dim=dim, name="bn_0", scale=False, - train=train, epsilon=1e-4, axes=[0, 1, 2]) - res = tf.nn.relu(res) - res = conv_layer( - input_=res, filter_size=[3, 3], dim_in=dim, - dim_out=dim, name="h_1", stddev=numpy.sqrt(2. / (1. * dim)), - strides=[1, 1, 1, 1], padding="SAME", - nonlinearity=None, - bias=(not use_batch_norm), - weight_norm=weight_norm, scale=False) - if use_batch_norm: - res = batch_norm( - input_=res, dim=dim, name="bn_1", scale=False, - train=train, epsilon=1e-4, axes=[0, 1, 2]) - res = tf.nn.relu(res) - res = conv_layer( - input_=res, filter_size=[1, 1], dim_in=dim, dim_out=dim_out, - name="out", stddev=numpy.sqrt(2. / (1. * dim)), - strides=[1, 1, 1, 1], padding="SAME", - nonlinearity=None, bias=True, - weight_norm=weight_norm, scale=True) - else: - res = conv_layer( - input_=res, filter_size=[3, 3], dim_in=dim_in, dim_out=dim, - name="h_0", stddev=numpy.sqrt(2. / (dim_in)), - strides=[1, 1, 1, 1], padding="SAME", - nonlinearity=None, bias=(not use_batch_norm), - weight_norm=weight_norm, scale=False) - if use_batch_norm: - res = batch_norm( - input_=res, dim=dim, name="bn_0", scale=False, - train=train, epsilon=1e-4, axes=[0, 1, 2]) - res = tf.nn.relu(res) - res = conv_layer( - input_=res, filter_size=[3, 3], dim_in=dim, dim_out=dim_out, - name="out", stddev=numpy.sqrt(2. / (1. * dim)), - strides=[1, 1, 1, 1], padding="SAME", - nonlinearity=None, bias=True, - weight_norm=weight_norm, scale=True) - return res - - -# COUPLING LAYERS -# masked convolution implementations -def masked_conv_aff_coupling(input_, mask_in, dim, name, - use_batch_norm=True, train=True, weight_norm=True, - reverse=False, residual_blocks=5, - bottleneck=False, use_width=1., use_height=1., - mask_channel=0., skip=True): - """Affine coupling with masked convolution.""" - with tf.variable_scope(name) as scope: - if reverse or (not train): - scope.reuse_variables() - shape = input_.get_shape().as_list() - batch_size = shape[0] - height = shape[1] - width = shape[2] - channels = shape[3] - - # build mask - mask = use_width * numpy.arange(width) - mask = use_height * numpy.arange(height).reshape((-1, 1)) + mask - mask = mask.astype("float32") - mask = tf.mod(mask_in + mask, 2) - mask = tf.reshape(mask, [-1, height, width, 1]) - if mask.get_shape().as_list()[0] == 1: - mask = tf.tile(mask, [batch_size, 1, 1, 1]) - res = input_ * tf.mod(mask_channel + mask, 2) - - # initial input - if use_batch_norm: - res = batch_norm( - input_=res, dim=channels, name="bn_in", scale=False, - train=train, epsilon=1e-4, axes=[0, 1, 2]) - res *= 2. - res = tf.concat([res, -res], 3) - res = tf.concat([res, mask], 3) - dim_in = 2. * channels + 1 - res = tf.nn.relu(res) - res = resnet(input_=res, dim_in=dim_in, dim=dim, - dim_out=2 * channels, - name="resnet", use_batch_norm=use_batch_norm, - train=train, weight_norm=weight_norm, - residual_blocks=residual_blocks, - bottleneck=bottleneck, skip=skip) - mask = tf.mod(mask_channel + mask, 2) - res = tf.split(axis=3, num_or_size_splits=2, value=res) - shift, log_rescaling = res[-2], res[-1] - scale = variable_on_cpu( - "rescaling_scale", [], - tf.constant_initializer(0.)) - shift = tf.reshape( - shift, [batch_size, height, width, channels]) - log_rescaling = tf.reshape( - log_rescaling, [batch_size, height, width, channels]) - log_rescaling = scale * tf.tanh(log_rescaling) - if not use_batch_norm: - scale_shift = variable_on_cpu( - "scale_shift", [], - tf.constant_initializer(0.)) - log_rescaling += scale_shift - shift *= (1. - mask) - log_rescaling *= (1. - mask) - if reverse: - res = input_ - if use_batch_norm: - mean, var = batch_norm_log_diff( - input_=res * (1. - mask), dim=channels, name="bn_out", - train=False, epsilon=1e-4, axes=[0, 1, 2]) - log_var = tf.log(var) - res *= tf.exp(.5 * log_var * (1. - mask)) - res += mean * (1. - mask) - res *= tf.exp(-log_rescaling) - res -= shift - log_diff = -log_rescaling - if use_batch_norm: - log_diff += .5 * log_var * (1. - mask) - else: - res = input_ - res += shift - res *= tf.exp(log_rescaling) - log_diff = log_rescaling - if use_batch_norm: - mean, var = batch_norm_log_diff( - input_=res * (1. - mask), dim=channels, name="bn_out", - train=train, epsilon=1e-4, axes=[0, 1, 2]) - log_var = tf.log(var) - res -= mean * (1. - mask) - res *= tf.exp(-.5 * log_var * (1. - mask)) - log_diff -= .5 * log_var * (1. - mask) - - return res, log_diff - - -def masked_conv_add_coupling(input_, mask_in, dim, name, - use_batch_norm=True, train=True, weight_norm=True, - reverse=False, residual_blocks=5, - bottleneck=False, use_width=1., use_height=1., - mask_channel=0., skip=True): - """Additive coupling with masked convolution.""" - with tf.variable_scope(name) as scope: - if reverse or (not train): - scope.reuse_variables() - shape = input_.get_shape().as_list() - batch_size = shape[0] - height = shape[1] - width = shape[2] - channels = shape[3] - - # build mask - mask = use_width * numpy.arange(width) - mask = use_height * numpy.arange(height).reshape((-1, 1)) + mask - mask = mask.astype("float32") - mask = tf.mod(mask_in + mask, 2) - mask = tf.reshape(mask, [-1, height, width, 1]) - if mask.get_shape().as_list()[0] == 1: - mask = tf.tile(mask, [batch_size, 1, 1, 1]) - res = input_ * tf.mod(mask_channel + mask, 2) - - # initial input - if use_batch_norm: - res = batch_norm( - input_=res, dim=channels, name="bn_in", scale=False, - train=train, epsilon=1e-4, axes=[0, 1, 2]) - res *= 2. - res = tf.concat([res, -res], 3) - res = tf.concat([res, mask], 3) - dim_in = 2. * channels + 1 - res = tf.nn.relu(res) - shift = resnet(input_=res, dim_in=dim_in, dim=dim, dim_out=channels, - name="resnet", use_batch_norm=use_batch_norm, - train=train, weight_norm=weight_norm, - residual_blocks=residual_blocks, - bottleneck=bottleneck, skip=skip) - mask = tf.mod(mask_channel + mask, 2) - shift *= (1. - mask) - # use_batch_norm = False - if reverse: - res = input_ - if use_batch_norm: - mean, var = batch_norm_log_diff( - input_=res * (1. - mask), - dim=channels, name="bn_out", train=False, epsilon=1e-4) - log_var = tf.log(var) - res *= tf.exp(.5 * log_var * (1. - mask)) - res += mean * (1. - mask) - res -= shift - log_diff = tf.zeros_like(res) - if use_batch_norm: - log_diff += .5 * log_var * (1. - mask) - else: - res = input_ - res += shift - log_diff = tf.zeros_like(res) - if use_batch_norm: - mean, var = batch_norm_log_diff( - input_=res * (1. - mask), dim=channels, - name="bn_out", train=train, epsilon=1e-4, axes=[0, 1, 2]) - log_var = tf.log(var) - res -= mean * (1. - mask) - res *= tf.exp(-.5 * log_var * (1. - mask)) - log_diff -= .5 * log_var * (1. - mask) - - return res, log_diff - - -def masked_conv_coupling(input_, mask_in, dim, name, - use_batch_norm=True, train=True, weight_norm=True, - reverse=False, residual_blocks=5, - bottleneck=False, use_aff=True, - use_width=1., use_height=1., - mask_channel=0., skip=True): - """Coupling with masked convolution.""" - if use_aff: - return masked_conv_aff_coupling( - input_=input_, mask_in=mask_in, dim=dim, name=name, - use_batch_norm=use_batch_norm, train=train, weight_norm=weight_norm, - reverse=reverse, residual_blocks=residual_blocks, - bottleneck=bottleneck, use_width=use_width, use_height=use_height, - mask_channel=mask_channel, skip=skip) - else: - return masked_conv_add_coupling( - input_=input_, mask_in=mask_in, dim=dim, name=name, - use_batch_norm=use_batch_norm, train=train, weight_norm=weight_norm, - reverse=reverse, residual_blocks=residual_blocks, - bottleneck=bottleneck, use_width=use_width, use_height=use_height, - mask_channel=mask_channel, skip=skip) - - -# channel-axis splitting implementations -def conv_ch_aff_coupling(input_, dim, name, - use_batch_norm=True, train=True, weight_norm=True, - reverse=False, residual_blocks=5, - bottleneck=False, change_bottom=True, skip=True): - """Affine coupling with channel-wise splitting.""" - with tf.variable_scope(name) as scope: - if reverse or (not train): - scope.reuse_variables() - - if change_bottom: - input_, canvas = tf.split(axis=3, num_or_size_splits=2, value=input_) - else: - canvas, input_ = tf.split(axis=3, num_or_size_splits=2, value=input_) - shape = input_.get_shape().as_list() - batch_size = shape[0] - height = shape[1] - width = shape[2] - channels = shape[3] - res = input_ - - # initial input - if use_batch_norm: - res = batch_norm( - input_=res, dim=channels, name="bn_in", scale=False, - train=train, epsilon=1e-4, axes=[0, 1, 2]) - res = tf.concat([res, -res], 3) - dim_in = 2. * channels - res = tf.nn.relu(res) - res = resnet(input_=res, dim_in=dim_in, dim=dim, dim_out=2 * channels, - name="resnet", use_batch_norm=use_batch_norm, - train=train, weight_norm=weight_norm, - residual_blocks=residual_blocks, - bottleneck=bottleneck, skip=skip) - shift, log_rescaling = tf.split(axis=3, num_or_size_splits=2, value=res) - scale = variable_on_cpu( - "scale", [], - tf.constant_initializer(1.)) - shift = tf.reshape( - shift, [batch_size, height, width, channels]) - log_rescaling = tf.reshape( - log_rescaling, [batch_size, height, width, channels]) - log_rescaling = scale * tf.tanh(log_rescaling) - if not use_batch_norm: - scale_shift = variable_on_cpu( - "scale_shift", [], - tf.constant_initializer(0.)) - log_rescaling += scale_shift - if reverse: - res = canvas - if use_batch_norm: - mean, var = batch_norm_log_diff( - input_=res, dim=channels, name="bn_out", train=False, - epsilon=1e-4, axes=[0, 1, 2]) - log_var = tf.log(var) - res *= tf.exp(.5 * log_var) - res += mean - res *= tf.exp(-log_rescaling) - res -= shift - log_diff = -log_rescaling - if use_batch_norm: - log_diff += .5 * log_var - else: - res = canvas - res += shift - res *= tf.exp(log_rescaling) - log_diff = log_rescaling - if use_batch_norm: - mean, var = batch_norm_log_diff( - input_=res, dim=channels, name="bn_out", train=train, - epsilon=1e-4, axes=[0, 1, 2]) - log_var = tf.log(var) - res -= mean - res *= tf.exp(-.5 * log_var) - log_diff -= .5 * log_var - if change_bottom: - res = tf.concat([input_, res], 3) - log_diff = tf.concat([tf.zeros_like(log_diff), log_diff], 3) - else: - res = tf.concat([res, input_], 3) - log_diff = tf.concat([log_diff, tf.zeros_like(log_diff)], 3) - - return res, log_diff - - -def conv_ch_add_coupling(input_, dim, name, - use_batch_norm=True, train=True, weight_norm=True, - reverse=False, residual_blocks=5, - bottleneck=False, change_bottom=True, skip=True): - """Additive coupling with channel-wise splitting.""" - with tf.variable_scope(name) as scope: - if reverse or (not train): - scope.reuse_variables() - - if change_bottom: - input_, canvas = tf.split(axis=3, num_or_size_splits=2, value=input_) - else: - canvas, input_ = tf.split(axis=3, num_or_size_splits=2, value=input_) - shape = input_.get_shape().as_list() - channels = shape[3] - res = input_ - - # initial input - if use_batch_norm: - res = batch_norm( - input_=res, dim=channels, name="bn_in", scale=False, - train=train, epsilon=1e-4, axes=[0, 1, 2]) - res = tf.concat([res, -res], 3) - dim_in = 2. * channels - res = tf.nn.relu(res) - shift = resnet(input_=res, dim_in=dim_in, dim=dim, dim_out=channels, - name="resnet", use_batch_norm=use_batch_norm, - train=train, weight_norm=weight_norm, - residual_blocks=residual_blocks, - bottleneck=bottleneck, skip=skip) - if reverse: - res = canvas - if use_batch_norm: - mean, var = batch_norm_log_diff( - input_=res, dim=channels, name="bn_out", train=False, - epsilon=1e-4, axes=[0, 1, 2]) - log_var = tf.log(var) - res *= tf.exp(.5 * log_var) - res += mean - res -= shift - log_diff = tf.zeros_like(res) - if use_batch_norm: - log_diff += .5 * log_var - else: - res = canvas - res += shift - log_diff = tf.zeros_like(res) - if use_batch_norm: - mean, var = batch_norm_log_diff( - input_=res, dim=channels, name="bn_out", train=train, - epsilon=1e-4, axes=[0, 1, 2]) - log_var = tf.log(var) - res -= mean - res *= tf.exp(-.5 * log_var) - log_diff -= .5 * log_var - if change_bottom: - res = tf.concat([input_, res], 3) - log_diff = tf.concat([tf.zeros_like(log_diff), log_diff], 3) - else: - res = tf.concat([res, input_], 3) - log_diff = tf.concat([log_diff, tf.zeros_like(log_diff)], 3) - - return res, log_diff - - -def conv_ch_coupling(input_, dim, name, - use_batch_norm=True, train=True, weight_norm=True, - reverse=False, residual_blocks=5, - bottleneck=False, use_aff=True, change_bottom=True, - skip=True): - """Coupling with channel-wise splitting.""" - if use_aff: - return conv_ch_aff_coupling( - input_=input_, dim=dim, name=name, - use_batch_norm=use_batch_norm, train=train, weight_norm=weight_norm, - reverse=reverse, residual_blocks=residual_blocks, - bottleneck=bottleneck, change_bottom=change_bottom, skip=skip) - else: - return conv_ch_add_coupling( - input_=input_, dim=dim, name=name, - use_batch_norm=use_batch_norm, train=train, weight_norm=weight_norm, - reverse=reverse, residual_blocks=residual_blocks, - bottleneck=bottleneck, change_bottom=change_bottom, skip=skip) - - -# RECURSIVE USE OF COUPLING LAYERS -def rec_masked_conv_coupling(input_, hps, scale_idx, n_scale, - use_batch_norm=True, weight_norm=True, - train=True): - """Recursion on coupling layers.""" - shape = input_.get_shape().as_list() - channels = shape[3] - residual_blocks = hps.residual_blocks - base_dim = hps.base_dim - mask = 1. - use_aff = hps.use_aff - res = input_ - skip = hps.skip - log_diff = tf.zeros_like(input_) - dim = base_dim - if FLAGS.recursion_type < 4: - dim *= 2 ** scale_idx - with tf.variable_scope("scale_%d" % scale_idx): - # initial coupling layers - res, inc_log_diff = masked_conv_coupling( - input_=res, - mask_in=mask, dim=dim, - name="coupling_0", - use_batch_norm=use_batch_norm, train=train, - weight_norm=weight_norm, - reverse=False, residual_blocks=residual_blocks, - bottleneck=hps.bottleneck, use_aff=use_aff, - use_width=1., use_height=1., skip=skip) - log_diff += inc_log_diff - res, inc_log_diff = masked_conv_coupling( - input_=res, - mask_in=1. - mask, dim=dim, - name="coupling_1", - use_batch_norm=use_batch_norm, train=train, - weight_norm=weight_norm, - reverse=False, residual_blocks=residual_blocks, - bottleneck=hps.bottleneck, use_aff=use_aff, - use_width=1., use_height=1., skip=skip) - log_diff += inc_log_diff - res, inc_log_diff = masked_conv_coupling( - input_=res, - mask_in=mask, dim=dim, - name="coupling_2", - use_batch_norm=use_batch_norm, train=train, - weight_norm=weight_norm, - reverse=False, residual_blocks=residual_blocks, - bottleneck=hps.bottleneck, use_aff=True, - use_width=1., use_height=1., skip=skip) - log_diff += inc_log_diff - if scale_idx < (n_scale - 1): - with tf.variable_scope("scale_%d" % scale_idx): - res = squeeze_2x2(res) - log_diff = squeeze_2x2(log_diff) - res, inc_log_diff = conv_ch_coupling( - input_=res, - change_bottom=True, dim=2 * dim, - name="coupling_4", - use_batch_norm=use_batch_norm, train=train, - weight_norm=weight_norm, - reverse=False, residual_blocks=residual_blocks, - bottleneck=hps.bottleneck, use_aff=use_aff, skip=skip) - log_diff += inc_log_diff - res, inc_log_diff = conv_ch_coupling( - input_=res, - change_bottom=False, dim=2 * dim, - name="coupling_5", - use_batch_norm=use_batch_norm, train=train, - weight_norm=weight_norm, - reverse=False, residual_blocks=residual_blocks, - bottleneck=hps.bottleneck, use_aff=use_aff, skip=skip) - log_diff += inc_log_diff - res, inc_log_diff = conv_ch_coupling( - input_=res, - change_bottom=True, dim=2 * dim, - name="coupling_6", - use_batch_norm=use_batch_norm, train=train, - weight_norm=weight_norm, - reverse=False, residual_blocks=residual_blocks, - bottleneck=hps.bottleneck, use_aff=True, skip=skip) - log_diff += inc_log_diff - res = unsqueeze_2x2(res) - log_diff = unsqueeze_2x2(log_diff) - if FLAGS.recursion_type > 1: - res = squeeze_2x2_ordered(res) - log_diff = squeeze_2x2_ordered(log_diff) - if FLAGS.recursion_type > 2: - res_1 = res[:, :, :, :channels] - res_2 = res[:, :, :, channels:] - log_diff_1 = log_diff[:, :, :, :channels] - log_diff_2 = log_diff[:, :, :, channels:] - else: - res_1, res_2 = tf.split(axis=3, num_or_size_splits=2, value=res) - log_diff_1, log_diff_2 = tf.split(axis=3, num_or_size_splits=2, value=log_diff) - res_1, inc_log_diff = rec_masked_conv_coupling( - input_=res_1, hps=hps, scale_idx=scale_idx + 1, n_scale=n_scale, - use_batch_norm=use_batch_norm, weight_norm=weight_norm, - train=train) - res = tf.concat([res_1, res_2], 3) - log_diff_1 += inc_log_diff - log_diff = tf.concat([log_diff_1, log_diff_2], 3) - res = squeeze_2x2_ordered(res, reverse=True) - log_diff = squeeze_2x2_ordered(log_diff, reverse=True) - else: - res = squeeze_2x2_ordered(res) - log_diff = squeeze_2x2_ordered(log_diff) - res, inc_log_diff = rec_masked_conv_coupling( - input_=res, hps=hps, scale_idx=scale_idx + 1, n_scale=n_scale, - use_batch_norm=use_batch_norm, weight_norm=weight_norm, - train=train) - log_diff += inc_log_diff - res = squeeze_2x2_ordered(res, reverse=True) - log_diff = squeeze_2x2_ordered(log_diff, reverse=True) - else: - with tf.variable_scope("scale_%d" % scale_idx): - res, inc_log_diff = masked_conv_coupling( - input_=res, - mask_in=1. - mask, dim=dim, - name="coupling_3", - use_batch_norm=use_batch_norm, train=train, - weight_norm=weight_norm, - reverse=False, residual_blocks=residual_blocks, - bottleneck=hps.bottleneck, use_aff=True, - use_width=1., use_height=1., skip=skip) - log_diff += inc_log_diff - return res, log_diff - - -def rec_masked_deconv_coupling(input_, hps, scale_idx, n_scale, - use_batch_norm=True, weight_norm=True, - train=True): - """Recursion on inverting coupling layers.""" - shape = input_.get_shape().as_list() - channels = shape[3] - residual_blocks = hps.residual_blocks - base_dim = hps.base_dim - mask = 1. - use_aff = hps.use_aff - res = input_ - log_diff = tf.zeros_like(input_) - skip = hps.skip - dim = base_dim - if FLAGS.recursion_type < 4: - dim *= 2 ** scale_idx - if scale_idx < (n_scale - 1): - if FLAGS.recursion_type > 1: - res = squeeze_2x2_ordered(res) - log_diff = squeeze_2x2_ordered(log_diff) - if FLAGS.recursion_type > 2: - res_1 = res[:, :, :, :channels] - res_2 = res[:, :, :, channels:] - log_diff_1 = log_diff[:, :, :, :channels] - log_diff_2 = log_diff[:, :, :, channels:] - else: - res_1, res_2 = tf.split(axis=3, num_or_size_splits=2, value=res) - log_diff_1, log_diff_2 = tf.split(axis=3, num_or_size_splits=2, value=log_diff) - res_1, log_diff_1 = rec_masked_deconv_coupling( - input_=res_1, hps=hps, - scale_idx=scale_idx + 1, n_scale=n_scale, - use_batch_norm=use_batch_norm, weight_norm=weight_norm, - train=train) - res = tf.concat([res_1, res_2], 3) - log_diff = tf.concat([log_diff_1, log_diff_2], 3) - res = squeeze_2x2_ordered(res, reverse=True) - log_diff = squeeze_2x2_ordered(log_diff, reverse=True) - else: - res = squeeze_2x2_ordered(res) - log_diff = squeeze_2x2_ordered(log_diff) - res, log_diff = rec_masked_deconv_coupling( - input_=res, hps=hps, - scale_idx=scale_idx + 1, n_scale=n_scale, - use_batch_norm=use_batch_norm, weight_norm=weight_norm, - train=train) - res = squeeze_2x2_ordered(res, reverse=True) - log_diff = squeeze_2x2_ordered(log_diff, reverse=True) - with tf.variable_scope("scale_%d" % scale_idx): - res = squeeze_2x2(res) - log_diff = squeeze_2x2(log_diff) - res, inc_log_diff = conv_ch_coupling( - input_=res, - change_bottom=True, dim=2 * dim, - name="coupling_6", - use_batch_norm=use_batch_norm, train=train, - weight_norm=weight_norm, - reverse=True, residual_blocks=residual_blocks, - bottleneck=hps.bottleneck, use_aff=True, skip=skip) - log_diff += inc_log_diff - res, inc_log_diff = conv_ch_coupling( - input_=res, - change_bottom=False, dim=2 * dim, - name="coupling_5", - use_batch_norm=use_batch_norm, train=train, - weight_norm=weight_norm, - reverse=True, residual_blocks=residual_blocks, - bottleneck=hps.bottleneck, use_aff=use_aff, skip=skip) - log_diff += inc_log_diff - res, inc_log_diff = conv_ch_coupling( - input_=res, - change_bottom=True, dim=2 * dim, - name="coupling_4", - use_batch_norm=use_batch_norm, train=train, - weight_norm=weight_norm, - reverse=True, residual_blocks=residual_blocks, - bottleneck=hps.bottleneck, use_aff=use_aff, skip=skip) - log_diff += inc_log_diff - res = unsqueeze_2x2(res) - log_diff = unsqueeze_2x2(log_diff) - else: - with tf.variable_scope("scale_%d" % scale_idx): - res, inc_log_diff = masked_conv_coupling( - input_=res, - mask_in=1. - mask, dim=dim, - name="coupling_3", - use_batch_norm=use_batch_norm, train=train, - weight_norm=weight_norm, - reverse=True, residual_blocks=residual_blocks, - bottleneck=hps.bottleneck, use_aff=True, - use_width=1., use_height=1., skip=skip) - log_diff += inc_log_diff - - with tf.variable_scope("scale_%d" % scale_idx): - res, inc_log_diff = masked_conv_coupling( - input_=res, - mask_in=mask, dim=dim, - name="coupling_2", - use_batch_norm=use_batch_norm, train=train, weight_norm=weight_norm, - reverse=True, residual_blocks=residual_blocks, - bottleneck=hps.bottleneck, use_aff=True, - use_width=1., use_height=1., skip=skip) - log_diff += inc_log_diff - res, inc_log_diff = masked_conv_coupling( - input_=res, - mask_in=1. - mask, dim=dim, - name="coupling_1", - use_batch_norm=use_batch_norm, train=train, weight_norm=weight_norm, - reverse=True, residual_blocks=residual_blocks, - bottleneck=hps.bottleneck, use_aff=use_aff, - use_width=1., use_height=1., skip=skip) - log_diff += inc_log_diff - res, inc_log_diff = masked_conv_coupling( - input_=res, - mask_in=mask, dim=dim, - name="coupling_0", - use_batch_norm=use_batch_norm, train=train, weight_norm=weight_norm, - reverse=True, residual_blocks=residual_blocks, - bottleneck=hps.bottleneck, use_aff=use_aff, - use_width=1., use_height=1., skip=skip) - log_diff += inc_log_diff - - return res, log_diff - - -# ENCODER AND DECODER IMPLEMENTATIONS -# start the recursions -def encoder(input_, hps, n_scale, use_batch_norm=True, - weight_norm=True, train=True): - """Encoding/gaussianization function.""" - res = input_ - log_diff = tf.zeros_like(input_) - res, inc_log_diff = rec_masked_conv_coupling( - input_=res, hps=hps, scale_idx=0, n_scale=n_scale, - use_batch_norm=use_batch_norm, weight_norm=weight_norm, - train=train) - log_diff += inc_log_diff - - return res, log_diff - - -def decoder(input_, hps, n_scale, use_batch_norm=True, - weight_norm=True, train=True): - """Decoding/generator function.""" - res, log_diff = rec_masked_deconv_coupling( - input_=input_, hps=hps, scale_idx=0, n_scale=n_scale, - use_batch_norm=use_batch_norm, weight_norm=weight_norm, - train=train) - - return res, log_diff - - -class RealNVP(object): - """Real NVP model.""" - - def __init__(self, hps, sampling=False): - # DATA TENSOR INSTANTIATION - device = "/cpu:0" - if FLAGS.dataset == "imnet": - with tf.device( - tf.train.replica_device_setter(0, worker_device=device)): - filename_queue = tf.train.string_input_producer( - gfile.Glob(FLAGS.data_path), num_epochs=None) - reader = tf.TFRecordReader() - _, serialized_example = reader.read(filename_queue) - features = tf.parse_single_example( - serialized_example, - features={ - "image_raw": tf.FixedLenFeature([], tf.string), - }) - image = tf.decode_raw(features["image_raw"], tf.uint8) - image.set_shape([FLAGS.image_size * FLAGS.image_size * 3]) - image = tf.cast(image, tf.float32) - if FLAGS.mode == "train": - images = tf.train.shuffle_batch( - [image], batch_size=hps.batch_size, num_threads=1, - capacity=1000 + 3 * hps.batch_size, - # Ensures a minimum amount of shuffling of examples. - min_after_dequeue=1000) - else: - images = tf.train.batch( - [image], batch_size=hps.batch_size, num_threads=1, - capacity=1000 + 3 * hps.batch_size) - self.x_orig = x_orig = images - image_size = FLAGS.image_size - x_in = tf.reshape( - x_orig, - [hps.batch_size, FLAGS.image_size, FLAGS.image_size, 3]) - x_in = tf.clip_by_value(x_in, 0, 255) - x_in = (tf.cast(x_in, tf.float32) - + tf.random_uniform(tf.shape(x_in))) / 256. - elif FLAGS.dataset == "celeba": - with tf.device( - tf.train.replica_device_setter(0, worker_device=device)): - filename_queue = tf.train.string_input_producer( - gfile.Glob(FLAGS.data_path), num_epochs=None) - reader = tf.TFRecordReader() - _, serialized_example = reader.read(filename_queue) - features = tf.parse_single_example( - serialized_example, - features={ - "image_raw": tf.FixedLenFeature([], tf.string), - }) - image = tf.decode_raw(features["image_raw"], tf.uint8) - image.set_shape([218 * 178 * 3]) # 218, 178 - image = tf.cast(image, tf.float32) - image = tf.reshape(image, [218, 178, 3]) - image = image[40:188, 15:163, :] - if FLAGS.mode == "train": - image = tf.image.random_flip_left_right(image) - images = tf.train.shuffle_batch( - [image], batch_size=hps.batch_size, num_threads=1, - capacity=1000 + 3 * hps.batch_size, - min_after_dequeue=1000) - else: - images = tf.train.batch( - [image], batch_size=hps.batch_size, num_threads=1, - capacity=1000 + 3 * hps.batch_size) - self.x_orig = x_orig = images - image_size = 64 - x_in = tf.reshape(x_orig, [hps.batch_size, 148, 148, 3]) - x_in = tf.image.resize_images( - x_in, [64, 64], method=0, align_corners=False) - x_in = (tf.cast(x_in, tf.float32) - + tf.random_uniform(tf.shape(x_in))) / 256. - elif FLAGS.dataset == "lsun": - with tf.device( - tf.train.replica_device_setter(0, worker_device=device)): - filename_queue = tf.train.string_input_producer( - gfile.Glob(FLAGS.data_path), num_epochs=None) - reader = tf.TFRecordReader() - _, serialized_example = reader.read(filename_queue) - features = tf.parse_single_example( - serialized_example, - features={ - "image_raw": tf.FixedLenFeature([], tf.string), - "height": tf.FixedLenFeature([], tf.int64), - "width": tf.FixedLenFeature([], tf.int64), - "depth": tf.FixedLenFeature([], tf.int64) - }) - image = tf.decode_raw(features["image_raw"], tf.uint8) - height = tf.reshape((features["height"], tf.int64)[0], [1]) - height = tf.cast(height, tf.int32) - width = tf.reshape((features["width"], tf.int64)[0], [1]) - width = tf.cast(width, tf.int32) - depth = tf.reshape((features["depth"], tf.int64)[0], [1]) - depth = tf.cast(depth, tf.int32) - image = tf.reshape(image, tf.concat([height, width, depth], 0)) - image = tf.random_crop(image, [64, 64, 3]) - if FLAGS.mode == "train": - image = tf.image.random_flip_left_right(image) - images = tf.train.shuffle_batch( - [image], batch_size=hps.batch_size, num_threads=1, - capacity=1000 + 3 * hps.batch_size, - # Ensures a minimum amount of shuffling of examples. - min_after_dequeue=1000) - else: - images = tf.train.batch( - [image], batch_size=hps.batch_size, num_threads=1, - capacity=1000 + 3 * hps.batch_size) - self.x_orig = x_orig = images - image_size = 64 - x_in = tf.reshape(x_orig, [hps.batch_size, 64, 64, 3]) - x_in = (tf.cast(x_in, tf.float32) - + tf.random_uniform(tf.shape(x_in))) / 256. - else: - raise ValueError("Unknown dataset.") - x_in = tf.reshape(x_in, [hps.batch_size, image_size, image_size, 3]) - side_shown = int(numpy.sqrt(hps.batch_size)) - shown_x = tf.transpose( - tf.reshape( - x_in[:(side_shown * side_shown), :, :, :], - [side_shown, image_size * side_shown, image_size, 3]), - [0, 2, 1, 3]) - shown_x = tf.transpose( - tf.reshape( - shown_x, - [1, image_size * side_shown, image_size * side_shown, 3]), - [0, 2, 1, 3]) * 255. - tf.summary.image( - "inputs", - tf.cast(shown_x, tf.uint8), - max_outputs=1) - - # restrict the data - FLAGS.image_size = image_size - data_constraint = hps.data_constraint - pre_logit_scale = numpy.log(data_constraint) - pre_logit_scale -= numpy.log(1. - data_constraint) - pre_logit_scale = tf.cast(pre_logit_scale, tf.float32) - logit_x_in = 2. * x_in # [0, 2] - logit_x_in -= 1. # [-1, 1] - logit_x_in *= data_constraint # [-.9, .9] - logit_x_in += 1. # [.1, 1.9] - logit_x_in /= 2. # [.05, .95] - # logit the data - logit_x_in = tf.log(logit_x_in) - tf.log(1. - logit_x_in) - transform_cost = tf.reduce_sum( - tf.nn.softplus(logit_x_in) + tf.nn.softplus(-logit_x_in) - - tf.nn.softplus(-pre_logit_scale), - [1, 2, 3]) - - # INFERENCE AND COSTS - z_out, log_diff = encoder( - input_=logit_x_in, hps=hps, n_scale=hps.n_scale, - use_batch_norm=hps.use_batch_norm, weight_norm=True, - train=True) - if FLAGS.mode != "train": - z_out, log_diff = encoder( - input_=logit_x_in, hps=hps, n_scale=hps.n_scale, - use_batch_norm=hps.use_batch_norm, weight_norm=True, - train=False) - final_shape = [image_size, image_size, 3] - prior_ll = standard_normal_ll(z_out) - prior_ll = tf.reduce_sum(prior_ll, [1, 2, 3]) - log_diff = tf.reduce_sum(log_diff, [1, 2, 3]) - log_diff += transform_cost - cost = -(prior_ll + log_diff) - - self.x_in = x_in - self.z_out = z_out - self.cost = cost = tf.reduce_mean(cost) - - l2_reg = sum( - [tf.reduce_sum(tf.square(v)) for v in tf.trainable_variables() - if ("magnitude" in v.name) or ("rescaling_scale" in v.name)]) - - bit_per_dim = ((cost + numpy.log(256.) * image_size * image_size * 3.) - / (image_size * image_size * 3. * numpy.log(2.))) - self.bit_per_dim = bit_per_dim - - # OPTIMIZATION - momentum = 1. - hps.momentum - decay = 1. - hps.decay - if hps.optimizer == "adam": - optimizer = tf.train.AdamOptimizer( - learning_rate=hps.learning_rate, - beta1=momentum, beta2=decay, epsilon=1e-08, - use_locking=False, name="Adam") - elif hps.optimizer == "rmsprop": - optimizer = tf.train.RMSPropOptimizer( - learning_rate=hps.learning_rate, decay=decay, - momentum=momentum, epsilon=1e-04, - use_locking=False, name="RMSProp") - else: - optimizer = tf.train.MomentumOptimizer(hps.learning_rate, - momentum=momentum) - - step = tf.get_variable( - "global_step", [], tf.int64, - tf.zeros_initializer(), - trainable=False) - self.step = step - grads_and_vars = optimizer.compute_gradients( - cost + hps.l2_coeff * l2_reg, - tf.trainable_variables()) - grads, vars_ = zip(*grads_and_vars) - capped_grads, gradient_norm = tf.clip_by_global_norm( - grads, clip_norm=hps.clip_gradient) - gradient_norm = tf.check_numerics(gradient_norm, - "Gradient norm is NaN or Inf.") - - l2_z = tf.reduce_sum(tf.square(z_out), [1, 2, 3]) - if not sampling: - tf.summary.scalar("negative_log_likelihood", tf.reshape(cost, [])) - tf.summary.scalar("gradient_norm", tf.reshape(gradient_norm, [])) - tf.summary.scalar("bit_per_dim", tf.reshape(bit_per_dim, [])) - tf.summary.scalar("log_diff", tf.reshape(tf.reduce_mean(log_diff), [])) - tf.summary.scalar("prior_ll", tf.reshape(tf.reduce_mean(prior_ll), [])) - tf.summary.scalar( - "log_diff_var", - tf.reshape(tf.reduce_mean(tf.square(log_diff)) - - tf.square(tf.reduce_mean(log_diff)), [])) - tf.summary.scalar( - "prior_ll_var", - tf.reshape(tf.reduce_mean(tf.square(prior_ll)) - - tf.square(tf.reduce_mean(prior_ll)), [])) - tf.summary.scalar("l2_z_mean", tf.reshape(tf.reduce_mean(l2_z), [])) - tf.summary.scalar( - "l2_z_var", - tf.reshape(tf.reduce_mean(tf.square(l2_z)) - - tf.square(tf.reduce_mean(l2_z)), [])) - - - capped_grads_and_vars = zip(capped_grads, vars_) - self.train_step = optimizer.apply_gradients( - capped_grads_and_vars, global_step=step) - - # SAMPLING AND VISUALIZATION - if sampling: - # SAMPLES - sample = standard_normal_sample([100] + final_shape) - sample, _ = decoder( - input_=sample, hps=hps, n_scale=hps.n_scale, - use_batch_norm=hps.use_batch_norm, weight_norm=True, - train=True) - sample = tf.nn.sigmoid(sample) - - sample = tf.clip_by_value(sample, 0, 1) * 255. - sample = tf.reshape(sample, [100, image_size, image_size, 3]) - sample = tf.transpose( - tf.reshape(sample, [10, image_size * 10, image_size, 3]), - [0, 2, 1, 3]) - sample = tf.transpose( - tf.reshape(sample, [1, image_size * 10, image_size * 10, 3]), - [0, 2, 1, 3]) - tf.summary.image( - "samples", - tf.cast(sample, tf.uint8), - max_outputs=1) - - # CONCATENATION - concatenation, _ = encoder( - input_=logit_x_in, hps=hps, - n_scale=hps.n_scale, - use_batch_norm=hps.use_batch_norm, weight_norm=True, - train=False) - concatenation = tf.reshape( - concatenation, - [(side_shown * side_shown), image_size, image_size, 3]) - concatenation = tf.transpose( - tf.reshape( - concatenation, - [side_shown, image_size * side_shown, image_size, 3]), - [0, 2, 1, 3]) - concatenation = tf.transpose( - tf.reshape( - concatenation, - [1, image_size * side_shown, image_size * side_shown, 3]), - [0, 2, 1, 3]) - concatenation, _ = decoder( - input_=concatenation, hps=hps, n_scale=hps.n_scale, - use_batch_norm=hps.use_batch_norm, weight_norm=True, - train=False) - concatenation = tf.nn.sigmoid(concatenation) * 255. - tf.summary.image( - "concatenation", - tf.cast(concatenation, tf.uint8), - max_outputs=1) - - # MANIFOLD - - # Data basis - z_u, _ = encoder( - input_=logit_x_in[:8, :, :, :], hps=hps, - n_scale=hps.n_scale, - use_batch_norm=hps.use_batch_norm, weight_norm=True, - train=False) - u_1 = tf.reshape(z_u[0, :, :, :], [-1]) - u_2 = tf.reshape(z_u[1, :, :, :], [-1]) - u_3 = tf.reshape(z_u[2, :, :, :], [-1]) - u_4 = tf.reshape(z_u[3, :, :, :], [-1]) - u_5 = tf.reshape(z_u[4, :, :, :], [-1]) - u_6 = tf.reshape(z_u[5, :, :, :], [-1]) - u_7 = tf.reshape(z_u[6, :, :, :], [-1]) - u_8 = tf.reshape(z_u[7, :, :, :], [-1]) - - # 3D dome - manifold_side = 8 - angle_1 = numpy.arange(manifold_side) * 1. / manifold_side - angle_2 = numpy.arange(manifold_side) * 1. / manifold_side - angle_1 *= 2. * numpy.pi - angle_2 *= 2. * numpy.pi - angle_1 = angle_1.astype("float32") - angle_2 = angle_2.astype("float32") - angle_1 = tf.reshape(angle_1, [1, -1, 1]) - angle_1 += tf.zeros([manifold_side, manifold_side, 1]) - angle_2 = tf.reshape(angle_2, [-1, 1, 1]) - angle_2 += tf.zeros([manifold_side, manifold_side, 1]) - n_angle_3 = 40 - angle_3 = numpy.arange(n_angle_3) * 1. / n_angle_3 - angle_3 *= 2 * numpy.pi - angle_3 = angle_3.astype("float32") - angle_3 = tf.reshape(angle_3, [-1, 1, 1, 1]) - angle_3 += tf.zeros([n_angle_3, manifold_side, manifold_side, 1]) - manifold = tf.cos(angle_1) * ( - tf.cos(angle_2) * ( - tf.cos(angle_3) * u_1 + tf.sin(angle_3) * u_2) - + tf.sin(angle_2) * ( - tf.cos(angle_3) * u_3 + tf.sin(angle_3) * u_4)) - manifold += tf.sin(angle_1) * ( - tf.cos(angle_2) * ( - tf.cos(angle_3) * u_5 + tf.sin(angle_3) * u_6) - + tf.sin(angle_2) * ( - tf.cos(angle_3) * u_7 + tf.sin(angle_3) * u_8)) - manifold = tf.reshape( - manifold, - [n_angle_3 * manifold_side * manifold_side] + final_shape) - manifold, _ = decoder( - input_=manifold, hps=hps, n_scale=hps.n_scale, - use_batch_norm=hps.use_batch_norm, weight_norm=True, - train=False) - manifold = tf.nn.sigmoid(manifold) - - manifold = tf.clip_by_value(manifold, 0, 1) * 255. - manifold = tf.reshape( - manifold, - [n_angle_3, - manifold_side * manifold_side, - image_size, - image_size, - 3]) - manifold = tf.transpose( - tf.reshape( - manifold, - [n_angle_3, manifold_side, - image_size * manifold_side, image_size, 3]), [0, 1, 3, 2, 4]) - manifold = tf.transpose( - tf.reshape( - manifold, - [n_angle_3, image_size * manifold_side, - image_size * manifold_side, 3]), - [0, 2, 1, 3]) - manifold = tf.transpose(manifold, [1, 2, 0, 3]) - manifold = tf.reshape( - manifold, - [1, image_size * manifold_side, - image_size * manifold_side, 3 * n_angle_3]) - tf.summary.image( - "manifold", - tf.cast(manifold[:, :, :, :3], tf.uint8), - max_outputs=1) - - # COMPRESSION - z_complete, _ = encoder( - input_=logit_x_in[:hps.n_scale, :, :, :], hps=hps, - n_scale=hps.n_scale, - use_batch_norm=hps.use_batch_norm, weight_norm=True, - train=False) - z_compressed_list = [z_complete] - z_noisy_list = [z_complete] - z_lost = z_complete - for scale_idx in xrange(hps.n_scale - 1): - z_lost = squeeze_2x2_ordered(z_lost) - z_lost, _ = tf.split(axis=3, num_or_size_splits=2, value=z_lost) - z_compressed = z_lost - z_noisy = z_lost - for _ in xrange(scale_idx + 1): - z_compressed = tf.concat( - [z_compressed, tf.zeros_like(z_compressed)], 3) - z_compressed = squeeze_2x2_ordered( - z_compressed, reverse=True) - z_noisy = tf.concat( - [z_noisy, tf.random_normal( - z_noisy.get_shape().as_list())], 3) - z_noisy = squeeze_2x2_ordered(z_noisy, reverse=True) - z_compressed_list.append(z_compressed) - z_noisy_list.append(z_noisy) - self.z_reduced = z_lost - z_compressed = tf.concat(z_compressed_list, 0) - z_noisy = tf.concat(z_noisy_list, 0) - noisy_images, _ = decoder( - input_=z_noisy, hps=hps, n_scale=hps.n_scale, - use_batch_norm=hps.use_batch_norm, weight_norm=True, - train=False) - compressed_images, _ = decoder( - input_=z_compressed, hps=hps, n_scale=hps.n_scale, - use_batch_norm=hps.use_batch_norm, weight_norm=True, - train=False) - noisy_images = tf.nn.sigmoid(noisy_images) - compressed_images = tf.nn.sigmoid(compressed_images) - - noisy_images = tf.clip_by_value(noisy_images, 0, 1) * 255. - noisy_images = tf.reshape( - noisy_images, - [(hps.n_scale * hps.n_scale), image_size, image_size, 3]) - noisy_images = tf.transpose( - tf.reshape( - noisy_images, - [hps.n_scale, image_size * hps.n_scale, image_size, 3]), - [0, 2, 1, 3]) - noisy_images = tf.transpose( - tf.reshape( - noisy_images, - [1, image_size * hps.n_scale, image_size * hps.n_scale, 3]), - [0, 2, 1, 3]) - tf.summary.image( - "noise", - tf.cast(noisy_images, tf.uint8), - max_outputs=1) - compressed_images = tf.clip_by_value(compressed_images, 0, 1) * 255. - compressed_images = tf.reshape( - compressed_images, - [(hps.n_scale * hps.n_scale), image_size, image_size, 3]) - compressed_images = tf.transpose( - tf.reshape( - compressed_images, - [hps.n_scale, image_size * hps.n_scale, image_size, 3]), - [0, 2, 1, 3]) - compressed_images = tf.transpose( - tf.reshape( - compressed_images, - [1, image_size * hps.n_scale, image_size * hps.n_scale, 3]), - [0, 2, 1, 3]) - tf.summary.image( - "compression", - tf.cast(compressed_images, tf.uint8), - max_outputs=1) - - # SAMPLES x2 - final_shape[0] *= 2 - final_shape[1] *= 2 - big_sample = standard_normal_sample([25] + final_shape) - big_sample, _ = decoder( - input_=big_sample, hps=hps, n_scale=hps.n_scale, - use_batch_norm=hps.use_batch_norm, weight_norm=True, - train=True) - big_sample = tf.nn.sigmoid(big_sample) - - big_sample = tf.clip_by_value(big_sample, 0, 1) * 255. - big_sample = tf.reshape( - big_sample, - [25, image_size * 2, image_size * 2, 3]) - big_sample = tf.transpose( - tf.reshape( - big_sample, - [5, image_size * 10, image_size * 2, 3]), [0, 2, 1, 3]) - big_sample = tf.transpose( - tf.reshape( - big_sample, - [1, image_size * 10, image_size * 10, 3]), - [0, 2, 1, 3]) - tf.summary.image( - "big_sample", - tf.cast(big_sample, tf.uint8), - max_outputs=1) - - # SAMPLES x10 - final_shape[0] *= 5 - final_shape[1] *= 5 - extra_large = standard_normal_sample([1] + final_shape) - extra_large, _ = decoder( - input_=extra_large, hps=hps, n_scale=hps.n_scale, - use_batch_norm=hps.use_batch_norm, weight_norm=True, - train=True) - extra_large = tf.nn.sigmoid(extra_large) - - extra_large = tf.clip_by_value(extra_large, 0, 1) * 255. - tf.summary.image( - "extra_large", - tf.cast(extra_large, tf.uint8), - max_outputs=1) - - def eval_epoch(self, hps): - """Evaluate bits/dim.""" - n_eval_dict = { - "imnet": 50000, - "lsun": 300, - "celeba": 19962, - "svhn": 26032, - } - if FLAGS.eval_set_size == 0: - num_examples_eval = n_eval_dict[FLAGS.dataset] - else: - num_examples_eval = FLAGS.eval_set_size - n_epoch = num_examples_eval / hps.batch_size - eval_costs = [] - bar_len = 70 - for epoch_idx in xrange(n_epoch): - n_equal = epoch_idx * bar_len * 1. / n_epoch - n_equal = numpy.ceil(n_equal) - n_equal = int(n_equal) - n_dash = bar_len - n_equal - progress_bar = "[" + "=" * n_equal + "-" * n_dash + "]\r" - print(progress_bar, end=' ') - cost = self.bit_per_dim.eval() - eval_costs.append(cost) - print("") - return float(numpy.mean(eval_costs)) - - -def train_model(hps, logdir): - """Training.""" - with tf.Graph().as_default(): - with tf.device(tf.train.replica_device_setter(0)): - with tf.variable_scope("model"): - model = RealNVP(hps) - - saver = tf.train.Saver(tf.global_variables()) - - # Build the summary operation from the last tower summaries. - summary_op = tf.summary.merge_all() - - # Build an initialization operation to run below. - init = tf.global_variables_initializer() - - # Start running operations on the Graph. allow_soft_placement must be set to - # True to build towers on GPU, as some of the ops do not have GPU - # implementations. - sess = tf.Session(config=tf.ConfigProto( - allow_soft_placement=True, - log_device_placement=True)) - sess.run(init) - - ckpt_state = tf.train.get_checkpoint_state(logdir) - if ckpt_state and ckpt_state.model_checkpoint_path: - print("Loading file %s" % ckpt_state.model_checkpoint_path) - saver.restore(sess, ckpt_state.model_checkpoint_path) - - # Start the queue runners. - tf.train.start_queue_runners(sess=sess) - - summary_writer = tf.summary.FileWriter( - logdir, - graph=sess.graph) - - local_step = 0 - while True: - fetches = [model.step, model.bit_per_dim, model.train_step] - # The chief worker evaluates the summaries every 10 steps. - should_eval_summaries = local_step % 100 == 0 - if should_eval_summaries: - fetches += [summary_op] - - - start_time = time.time() - outputs = sess.run(fetches) - global_step_val = outputs[0] - loss = outputs[1] - duration = time.time() - start_time - assert not numpy.isnan( - loss), 'Model diverged with loss = NaN' - - if local_step % 10 == 0: - examples_per_sec = hps.batch_size / float(duration) - format_str = ('%s: step %d, loss = %.2f ' - '(%.1f examples/sec; %.3f ' - 'sec/batch)') - print(format_str % (datetime.now(), global_step_val, loss, - examples_per_sec, duration)) - - if should_eval_summaries: - summary_str = outputs[-1] - summary_writer.add_summary(summary_str, global_step_val) - - # Save the model checkpoint periodically. - if local_step % 1000 == 0 or (local_step + 1) == FLAGS.train_steps: - checkpoint_path = os.path.join(logdir, 'model.ckpt') - saver.save( - sess, - checkpoint_path, - global_step=global_step_val) - - if outputs[0] >= FLAGS.train_steps: - break - - local_step += 1 - - -def evaluate(hps, logdir, traindir, subset="valid", return_val=False): - """Evaluation.""" - hps.batch_size = 100 - with tf.Graph().as_default(): - with tf.device("/cpu:0"): - with tf.variable_scope("model") as var_scope: - eval_model = RealNVP(hps) - summary_writer = tf.summary.FileWriter(logdir) - var_scope.reuse_variables() - - saver = tf.train.Saver() - sess = tf.Session(config=tf.ConfigProto( - allow_soft_placement=True, - log_device_placement=True)) - tf.train.start_queue_runners(sess) - - previous_global_step = 0 # don"t run eval for step = 0 - - with sess.as_default(): - while True: - ckpt_state = tf.train.get_checkpoint_state(traindir) - if not (ckpt_state and ckpt_state.model_checkpoint_path): - print("No model to eval yet at %s" % traindir) - time.sleep(30) - continue - print("Loading file %s" % ckpt_state.model_checkpoint_path) - saver.restore(sess, ckpt_state.model_checkpoint_path) - - current_step = tf.train.global_step(sess, eval_model.step) - if current_step == previous_global_step: - print("Waiting for the checkpoint to be updated.") - time.sleep(30) - continue - previous_global_step = current_step - - print("Evaluating...") - bit_per_dim = eval_model.eval_epoch(hps) - print("Epoch: %d, %s -> %.3f bits/dim" - % (current_step, subset, bit_per_dim)) - print("Writing summary...") - summary = tf.Summary() - summary.value.extend( - [tf.Summary.Value( - tag="bit_per_dim", - simple_value=bit_per_dim)]) - summary_writer.add_summary(summary, current_step) - - if return_val: - return current_step, bit_per_dim - - -def sample_from_model(hps, logdir, traindir): - """Sampling.""" - hps.batch_size = 100 - with tf.Graph().as_default(): - with tf.device("/cpu:0"): - with tf.variable_scope("model") as var_scope: - eval_model = RealNVP(hps, sampling=True) - summary_writer = tf.summary.FileWriter(logdir) - var_scope.reuse_variables() - - summary_op = tf.summary.merge_all() - saver = tf.train.Saver() - sess = tf.Session(config=tf.ConfigProto( - allow_soft_placement=True, - log_device_placement=True)) - coord = tf.train.Coordinator() - threads = tf.train.start_queue_runners(sess=sess, coord=coord) - - previous_global_step = 0 # don"t run eval for step = 0 - - initialized = False - with sess.as_default(): - while True: - ckpt_state = tf.train.get_checkpoint_state(traindir) - if not (ckpt_state and ckpt_state.model_checkpoint_path): - if not initialized: - print("No model to eval yet at %s" % traindir) - time.sleep(30) - continue - else: - print ("Loading file %s" - % ckpt_state.model_checkpoint_path) - saver.restore(sess, ckpt_state.model_checkpoint_path) - - current_step = tf.train.global_step(sess, eval_model.step) - if current_step == previous_global_step: - print("Waiting for the checkpoint to be updated.") - time.sleep(30) - continue - previous_global_step = current_step - - fetches = [summary_op] - - outputs = sess.run(fetches) - summary_writer.add_summary(outputs[0], current_step) - coord.request_stop() - coord.join(threads) - - -def main(unused_argv): - hps = get_default_hparams().update_config(FLAGS.hpconfig) - if FLAGS.mode == "train": - train_model(hps=hps, logdir=FLAGS.logdir) - elif FLAGS.mode == "sample": - sample_from_model(hps=hps, logdir=FLAGS.logdir, - traindir=FLAGS.traindir) - else: - hps.batch_size = 100 - evaluate(hps=hps, logdir=FLAGS.logdir, - traindir=FLAGS.traindir, subset=FLAGS.mode) - -if __name__ == "__main__": - tf.app.run() diff --git a/research/real_nvp/real_nvp_utils.py b/research/real_nvp/real_nvp_utils.py deleted file mode 100644 index d8240f0e9..000000000 --- a/research/real_nvp/real_nvp_utils.py +++ /dev/null @@ -1,475 +0,0 @@ -# Copyright 2016 Google Inc. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -r"""Utility functions for Real NVP. -""" - -# pylint: disable=dangerous-default-value - -import numpy -from six.moves import xrange -import tensorflow as tf -from tensorflow.python.framework import ops - -DEFAULT_BN_LAG = .0 - - -def stable_var(input_, mean=None, axes=[0]): - """Numerically more stable variance computation.""" - if mean is None: - mean = tf.reduce_mean(input_, axes) - res = tf.square(input_ - mean) - max_sqr = tf.reduce_max(res, axes) - res /= max_sqr - res = tf.reduce_mean(res, axes) - res *= max_sqr - - return res - - -def variable_on_cpu(name, shape, initializer, trainable=True): - """Helper to create a Variable stored on CPU memory. - - Args: - name: name of the variable - shape: list of ints - initializer: initializer for Variable - trainable: boolean defining if the variable is for training - Returns: - Variable Tensor - """ - var = tf.get_variable( - name, shape, initializer=initializer, trainable=trainable) - return var - - -# layers -def conv_layer(input_, - filter_size, - dim_in, - dim_out, - name, - stddev=1e-2, - strides=[1, 1, 1, 1], - padding="SAME", - nonlinearity=None, - bias=False, - weight_norm=False, - scale=False): - """Convolutional layer.""" - with tf.variable_scope(name) as scope: - weights = variable_on_cpu( - "weights", - filter_size + [dim_in, dim_out], - tf.random_uniform_initializer( - minval=-stddev, maxval=stddev)) - # weight normalization - if weight_norm: - weights /= tf.sqrt(tf.reduce_sum(tf.square(weights), [0, 1, 2])) - if scale: - magnitude = variable_on_cpu( - "magnitude", [dim_out], - tf.constant_initializer( - stddev * numpy.sqrt(dim_in * numpy.prod(filter_size) / 12.))) - weights *= magnitude - res = input_ - # handling filter size bigger than image size - if hasattr(input_, "shape"): - if input_.get_shape().as_list()[1] < filter_size[0]: - pad_1 = tf.zeros([ - input_.get_shape().as_list()[0], - filter_size[0] - input_.get_shape().as_list()[1], - input_.get_shape().as_list()[2], - input_.get_shape().as_list()[3] - ]) - pad_2 = tf.zeros([ - input_.get_shape().as_list[0], - filter_size[0], - filter_size[1] - input_.get_shape().as_list()[2], - input_.get_shape().as_list()[3] - ]) - res = tf.concat(axis=1, values=[pad_1, res]) - res = tf.concat(axis=2, values=[pad_2, res]) - res = tf.nn.conv2d( - input=res, - filter=weights, - strides=strides, - padding=padding, - name=scope.name) - - if hasattr(input_, "shape"): - if input_.get_shape().as_list()[1] < filter_size[0]: - res = tf.slice(res, [ - 0, filter_size[0] - input_.get_shape().as_list()[1], - filter_size[1] - input_.get_shape().as_list()[2], 0 - ], [-1, -1, -1, -1]) - - if bias: - biases = variable_on_cpu("biases", [dim_out], tf.constant_initializer(0.)) - res = tf.nn.bias_add(res, biases) - if nonlinearity is not None: - res = nonlinearity(res) - - return res - - -def max_pool_2x2(input_): - """Max pooling.""" - return tf.nn.max_pool( - input_, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding="SAME") - - -def depool_2x2(input_, stride=2): - """Depooling.""" - shape = input_.get_shape().as_list() - batch_size = shape[0] - height = shape[1] - width = shape[2] - channels = shape[3] - res = tf.reshape(input_, [batch_size, height, 1, width, 1, channels]) - res = tf.concat( - axis=2, values=[res, tf.zeros([batch_size, height, stride - 1, width, 1, channels])]) - res = tf.concat(axis=4, values=[ - res, tf.zeros([batch_size, height, stride, width, stride - 1, channels]) - ]) - res = tf.reshape(res, [batch_size, stride * height, stride * width, channels]) - - return res - - -# random flip on a batch of images -def batch_random_flip(input_): - """Simultaneous horizontal random flip.""" - if isinstance(input_, (float, int)): - return input_ - shape = input_.get_shape().as_list() - batch_size = shape[0] - height = shape[1] - width = shape[2] - channels = shape[3] - res = tf.split(axis=0, num_or_size_splits=batch_size, value=input_) - res = [elem[0, :, :, :] for elem in res] - res = [tf.image.random_flip_left_right(elem) for elem in res] - res = [tf.reshape(elem, [1, height, width, channels]) for elem in res] - res = tf.concat(axis=0, values=res) - - return res - - -# build a one hot representation corresponding to the integer tensor -# the one-hot dimension is appended to the integer tensor shape -def as_one_hot(input_, n_indices): - """Convert indices to one-hot.""" - shape = input_.get_shape().as_list() - n_elem = numpy.prod(shape) - indices = tf.range(n_elem) - indices = tf.cast(indices, tf.int64) - indices_input = tf.concat(axis=0, values=[indices, tf.reshape(input_, [-1])]) - indices_input = tf.reshape(indices_input, [2, -1]) - indices_input = tf.transpose(indices_input) - res = tf.sparse_to_dense( - indices_input, [n_elem, n_indices], 1., 0., name="flat_one_hot") - res = tf.reshape(res, [elem for elem in shape] + [n_indices]) - - return res - - -def squeeze_2x2(input_): - """Squeezing operation: reshape to convert space to channels.""" - return squeeze_nxn(input_, n_factor=2) - - -def squeeze_nxn(input_, n_factor=2): - """Squeezing operation: reshape to convert space to channels.""" - if isinstance(input_, (float, int)): - return input_ - shape = input_.get_shape().as_list() - batch_size = shape[0] - height = shape[1] - width = shape[2] - channels = shape[3] - if height % n_factor != 0: - raise ValueError("Height not divisible by %d." % n_factor) - if width % n_factor != 0: - raise ValueError("Width not divisible by %d." % n_factor) - res = tf.reshape( - input_, - [batch_size, - height // n_factor, - n_factor, width // n_factor, - n_factor, channels]) - res = tf.transpose(res, [0, 1, 3, 5, 2, 4]) - res = tf.reshape( - res, - [batch_size, - height // n_factor, - width // n_factor, - channels * n_factor * n_factor]) - - return res - - -def unsqueeze_2x2(input_): - """Unsqueezing operation: reshape to convert channels into space.""" - if isinstance(input_, (float, int)): - return input_ - shape = input_.get_shape().as_list() - batch_size = shape[0] - height = shape[1] - width = shape[2] - channels = shape[3] - if channels % 4 != 0: - raise ValueError("Number of channels not divisible by 4.") - res = tf.reshape(input_, [batch_size, height, width, channels // 4, 2, 2]) - res = tf.transpose(res, [0, 1, 4, 2, 5, 3]) - res = tf.reshape(res, [batch_size, 2 * height, 2 * width, channels // 4]) - - return res - - -# batch norm -def batch_norm(input_, - dim, - name, - scale=True, - train=True, - epsilon=1e-8, - decay=.1, - axes=[0], - bn_lag=DEFAULT_BN_LAG): - """Batch normalization.""" - # create variables - with tf.variable_scope(name): - var = variable_on_cpu( - "var", [dim], tf.constant_initializer(1.), trainable=False) - mean = variable_on_cpu( - "mean", [dim], tf.constant_initializer(0.), trainable=False) - step = variable_on_cpu("step", [], tf.constant_initializer(0.), trainable=False) - if scale: - gamma = variable_on_cpu("gamma", [dim], tf.constant_initializer(1.)) - beta = variable_on_cpu("beta", [dim], tf.constant_initializer(0.)) - # choose the appropriate moments - if train: - used_mean, used_var = tf.nn.moments(input_, axes, name="batch_norm") - cur_mean, cur_var = used_mean, used_var - if bn_lag > 0.: - used_mean -= (1. - bn_lag) * (used_mean - tf.stop_gradient(mean)) - used_var -= (1 - bn_lag) * (used_var - tf.stop_gradient(var)) - used_mean /= (1. - bn_lag**(step + 1)) - used_var /= (1. - bn_lag**(step + 1)) - else: - used_mean, used_var = mean, var - cur_mean, cur_var = used_mean, used_var - - # normalize - res = (input_ - used_mean) / tf.sqrt(used_var + epsilon) - # de-normalize - if scale: - res *= gamma - res += beta - - # update variables - if train: - with tf.name_scope(name, "AssignMovingAvg", [mean, cur_mean, decay]): - with ops.colocate_with(mean): - new_mean = tf.assign_sub( - mean, - tf.check_numerics(decay * (mean - cur_mean), "NaN in moving mean.")) - with tf.name_scope(name, "AssignMovingAvg", [var, cur_var, decay]): - with ops.colocate_with(var): - new_var = tf.assign_sub( - var, - tf.check_numerics(decay * (var - cur_var), - "NaN in moving variance.")) - with tf.name_scope(name, "IncrementTime", [step]): - with ops.colocate_with(step): - new_step = tf.assign_add(step, 1.) - res += 0. * new_mean * new_var * new_step - - return res - - -# batch normalization taking into account the volume transformation -def batch_norm_log_diff(input_, - dim, - name, - train=True, - epsilon=1e-8, - decay=.1, - axes=[0], - reuse=None, - bn_lag=DEFAULT_BN_LAG): - """Batch normalization with corresponding log determinant Jacobian.""" - if reuse is None: - reuse = not train - # create variables - with tf.variable_scope(name) as scope: - if reuse: - scope.reuse_variables() - var = variable_on_cpu( - "var", [dim], tf.constant_initializer(1.), trainable=False) - mean = variable_on_cpu( - "mean", [dim], tf.constant_initializer(0.), trainable=False) - step = variable_on_cpu("step", [], tf.constant_initializer(0.), trainable=False) - # choose the appropriate moments - if train: - used_mean, used_var = tf.nn.moments(input_, axes, name="batch_norm") - cur_mean, cur_var = used_mean, used_var - if bn_lag > 0.: - used_var = stable_var(input_=input_, mean=used_mean, axes=axes) - cur_var = used_var - used_mean -= (1 - bn_lag) * (used_mean - tf.stop_gradient(mean)) - used_mean /= (1. - bn_lag**(step + 1)) - used_var -= (1 - bn_lag) * (used_var - tf.stop_gradient(var)) - used_var /= (1. - bn_lag**(step + 1)) - else: - used_mean, used_var = mean, var - cur_mean, cur_var = used_mean, used_var - - # update variables - if train: - with tf.name_scope(name, "AssignMovingAvg", [mean, cur_mean, decay]): - with ops.colocate_with(mean): - new_mean = tf.assign_sub( - mean, - tf.check_numerics( - decay * (mean - cur_mean), "NaN in moving mean.")) - with tf.name_scope(name, "AssignMovingAvg", [var, cur_var, decay]): - with ops.colocate_with(var): - new_var = tf.assign_sub( - var, - tf.check_numerics(decay * (var - cur_var), - "NaN in moving variance.")) - with tf.name_scope(name, "IncrementTime", [step]): - with ops.colocate_with(step): - new_step = tf.assign_add(step, 1.) - used_var += 0. * new_mean * new_var * new_step - used_var += epsilon - - return used_mean, used_var - - -def convnet(input_, - dim_in, - dim_hid, - filter_sizes, - dim_out, - name, - use_batch_norm=True, - train=True, - nonlinearity=tf.nn.relu): - """Chaining of convolutional layers.""" - dims_in = [dim_in] + dim_hid[:-1] - dims_out = dim_hid - res = input_ - - bias = (not use_batch_norm) - with tf.variable_scope(name): - for layer_idx in xrange(len(dim_hid)): - res = conv_layer( - input_=res, - filter_size=filter_sizes[layer_idx], - dim_in=dims_in[layer_idx], - dim_out=dims_out[layer_idx], - name="h_%d" % layer_idx, - stddev=1e-2, - nonlinearity=None, - bias=bias) - if use_batch_norm: - res = batch_norm( - input_=res, - dim=dims_out[layer_idx], - name="bn_%d" % layer_idx, - scale=(nonlinearity == tf.nn.relu), - train=train, - epsilon=1e-8, - axes=[0, 1, 2]) - if nonlinearity is not None: - res = nonlinearity(res) - - res = conv_layer( - input_=res, - filter_size=filter_sizes[-1], - dim_in=dims_out[-1], - dim_out=dim_out, - name="out", - stddev=1e-2, - nonlinearity=None) - - return res - - -# distributions -# log-likelihood estimation -def standard_normal_ll(input_): - """Log-likelihood of standard Gaussian distribution.""" - res = -.5 * (tf.square(input_) + numpy.log(2. * numpy.pi)) - - return res - - -def standard_normal_sample(shape): - """Samples from standard Gaussian distribution.""" - return tf.random_normal(shape) - - -SQUEEZE_MATRIX = numpy.array([[[[1., 0., 0., 0.]], [[0., 0., 1., 0.]]], - [[[0., 0., 0., 1.]], [[0., 1., 0., 0.]]]]) - - -def squeeze_2x2_ordered(input_, reverse=False): - """Squeezing operation with a controlled ordering.""" - shape = input_.get_shape().as_list() - batch_size = shape[0] - height = shape[1] - width = shape[2] - channels = shape[3] - if reverse: - if channels % 4 != 0: - raise ValueError("Number of channels not divisible by 4.") - channels /= 4 - else: - if height % 2 != 0: - raise ValueError("Height not divisible by 2.") - if width % 2 != 0: - raise ValueError("Width not divisible by 2.") - weights = numpy.zeros((2, 2, channels, 4 * channels)) - for idx_ch in xrange(channels): - slice_2 = slice(idx_ch, (idx_ch + 1)) - slice_3 = slice((idx_ch * 4), ((idx_ch + 1) * 4)) - weights[:, :, slice_2, slice_3] = SQUEEZE_MATRIX - shuffle_channels = [idx_ch * 4 for idx_ch in xrange(channels)] - shuffle_channels += [idx_ch * 4 + 1 for idx_ch in xrange(channels)] - shuffle_channels += [idx_ch * 4 + 2 for idx_ch in xrange(channels)] - shuffle_channels += [idx_ch * 4 + 3 for idx_ch in xrange(channels)] - shuffle_channels = numpy.array(shuffle_channels) - weights = weights[:, :, :, shuffle_channels].astype("float32") - if reverse: - res = tf.nn.conv2d_transpose( - value=input_, - filter=weights, - output_shape=[batch_size, height * 2, width * 2, channels], - strides=[1, 2, 2, 1], - padding="SAME", - name="unsqueeze_2x2") - else: - res = tf.nn.conv2d( - input=input_, - filter=weights, - strides=[1, 2, 2, 1], - padding="SAME", - name="squeeze_2x2") - - return res diff --git a/research/sentiment_analysis/README.md b/research/sentiment_analysis/README.md deleted file mode 100644 index f98c42751..000000000 --- a/research/sentiment_analysis/README.md +++ /dev/null @@ -1,26 +0,0 @@ -![No Maintenance Intended](https://img.shields.io/badge/No%20Maintenance%20Intended-%E2%9C%95-red.svg) -![TensorFlow Requirement: 1.x](https://img.shields.io/badge/TensorFlow%20Requirement-1.x-brightgreen) -![TensorFlow 2 Not Supported](https://img.shields.io/badge/TensorFlow%202%20Not%20Supported-%E2%9C%95-red.svg) - -# Sentiment Analysis -## Overview -This is an implementation of the Sentiment Analysis model as described in the [this paper](https://arxiv.org/abs/1412.1058). The implementation is with the reference to [paddle version](https://github.com/mlperf/reference/tree/master/sentiment_analysis/paddle). - -The model makes use of concatenation of two CNN layers with different kernel sizes. Batch normalization and dropout layers are used to prevent over-fitting. - -## Dataset -The [keras](https://keras.io)'s [IMDB Movie reviews sentiment classification](https://keras.io/datasets/#imdb-movie-reviews-sentiment-classification) dataset is used. The dataset file download is handled by keras module, and the downloaded files are stored at ``~/.keras/datasets` directory. The compressed file's filesize as of June 15 2018 is 17MB. - -## Running Code -### Train and evaluate model -To train and evaluate the model, issue the following command: -``` -python sentiment_main.py -``` -Arguments: - * `--dataset`: The dataset name to be downloaded and preprocessed. By default, it is `imdb`. - -There are other arguments about models and training process. Use the `--help` or `-h` flag to get a full list of possible arguments with detailed descriptions. - -## Benchmarks -The model was recorded to have the accuracy of 90.1% for the IMDB dataset. diff --git a/research/sentiment_analysis/__init__.py b/research/sentiment_analysis/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/research/sentiment_analysis/data/__init__.py b/research/sentiment_analysis/data/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/research/sentiment_analysis/data/dataset.py b/research/sentiment_analysis/data/dataset.py deleted file mode 100644 index 9ba4b9ac6..000000000 --- a/research/sentiment_analysis/data/dataset.py +++ /dev/null @@ -1,52 +0,0 @@ -"""Dataset module for sentiment analysis. - -Currently imdb dataset is available. -""" - -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import data.imdb as imdb - -DATASET_IMDB = "imdb" - - -def load(dataset, vocabulary_size, sentence_length): - """Returns training and evaluation input. - - Args: - dataset: Dataset to be trained and evaluated. - Currently only imdb is supported. - vocabulary_size: The number of the most frequent tokens - to be used from the corpus. - sentence_length: The number of words in each sentence. - Longer sentences get cut, shorter ones padded. - Raises: - ValueError: if the dataset value is not valid. - Returns: - A tuple of length 4, for training sentences, labels, - evaluation sentences, and evaluation labels, - each being an numpy array. - """ - if dataset == DATASET_IMDB: - return imdb.load(vocabulary_size, sentence_length) - else: - raise ValueError("unsupported dataset: " + dataset) - - -def get_num_class(dataset): - """Returns an integer for the number of label classes. - - Args: - dataset: Dataset to be trained and evaluated. - Currently only imdb is supported. - Raises: - ValueError: if the dataset value is not valid. - Returns: - int: The number of label classes. - """ - if dataset == DATASET_IMDB: - return imdb.NUM_CLASS - else: - raise ValueError("unsupported dataset: " + dataset) diff --git a/research/sentiment_analysis/data/imdb.py b/research/sentiment_analysis/data/imdb.py deleted file mode 100644 index f8160ca2f..000000000 --- a/research/sentiment_analysis/data/imdb.py +++ /dev/null @@ -1,54 +0,0 @@ -"""IMDB Dataset module for sentiment analysis.""" - -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import numpy as np -import tensorflow as tf - -from data.util import OOV_CHAR -from data.util import pad_sentence -from data.util import START_CHAR - -NUM_CLASS = 2 - - -def load(vocabulary_size, sentence_length): - """Returns training and evaluation input for imdb dataset. - - Args: - vocabulary_size: The number of the most frequent tokens - to be used from the corpus. - sentence_length: The number of words in each sentence. - Longer sentences get cut, shorter ones padded. - Raises: - ValueError: if the dataset value is not valid. - Returns: - A tuple of length 4, for training and evaluation data, - each being an numpy array. - """ - (x_train, y_train), (x_test, y_test) = tf.keras.datasets.imdb.load_data( - path="imdb.npz", - num_words=vocabulary_size, - skip_top=0, - maxlen=None, - seed=113, - start_char=START_CHAR, - oov_char=OOV_CHAR, - index_from=OOV_CHAR+1) - - x_train_processed = [] - for sen in x_train: - sen = pad_sentence(sen, sentence_length) - x_train_processed.append(np.array(sen)) - x_train_processed = np.array(x_train_processed) - - x_test_processed = [] - for sen in x_test: - sen = pad_sentence(sen, sentence_length) - x_test_processed.append(np.array(sen)) - x_test_processed = np.array(x_test_processed) - - return x_train_processed, np.eye(NUM_CLASS)[y_train], \ - x_test_processed, np.eye(NUM_CLASS)[y_test] diff --git a/research/sentiment_analysis/data/util.py b/research/sentiment_analysis/data/util.py deleted file mode 100644 index c8f8808f7..000000000 --- a/research/sentiment_analysis/data/util.py +++ /dev/null @@ -1,32 +0,0 @@ -"""Utility module for sentiment analysis.""" - -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import numpy as np - -START_CHAR = 1 -END_CHAR = 2 -OOV_CHAR = 3 - - -def pad_sentence(sentence, sentence_length): - """Pad the given sentense at the end. - - If the input is longer than sentence_length, - the remaining portion is dropped. - END_CHAR is used for the padding. - - Args: - sentence: A numpy array of integers. - sentence_length: The length of the input after the padding. - Returns: - A numpy array of integers of the given length. - """ - sentence = sentence[:sentence_length] - if len(sentence) < sentence_length: - sentence = np.pad(sentence, (0, sentence_length - len(sentence)), - "constant", constant_values=(START_CHAR, END_CHAR)) - - return sentence diff --git a/research/sentiment_analysis/sentiment_main.py b/research/sentiment_analysis/sentiment_main.py deleted file mode 100644 index 8b9ba5f92..000000000 --- a/research/sentiment_analysis/sentiment_main.py +++ /dev/null @@ -1,115 +0,0 @@ -"""Main function for the sentiment analysis model. - -The model makes use of concatenation of two CNN layers with -different kernel sizes. See `sentiment_model.py` -for more details about the models. -""" - -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import argparse -import os - -import tensorflow as tf - -from data import dataset -import sentiment_model - - - -_DROPOUT_RATE = 0.95 - - -def run_model(dataset_name, emb_dim, voc_size, sen_len, - hid_dim, batch_size, epochs, model_save_dir): - """Run training loop and an evaluation at the end. - - Args: - dataset_name: Dataset name to be trained and evaluated. - emb_dim: The dimension of the Embedding layer. - voc_size: The number of the most frequent tokens - to be used from the corpus. - sen_len: The number of words in each sentence. - Longer sentences get cut, shorter ones padded. - hid_dim: The dimension of the Embedding layer. - batch_size: The size of each batch during training. - epochs: The number of the iteration over the training set for training. - """ - - model = sentiment_model.CNN(emb_dim, voc_size, sen_len, - hid_dim, dataset.get_num_class(dataset_name), - _DROPOUT_RATE) - model.summary() - - model.compile(loss="categorical_crossentropy", - optimizer="rmsprop", - metrics=["accuracy"]) - - tf.logging.info("Loading the data") - x_train, y_train, x_test, y_test = dataset.load( - dataset_name, voc_size, sen_len) - - if not os.path.exists(model_save_dir): - os.makedirs(model_save_dir) - - filepath=model_save_dir+"/model-{epoch:02d}.hdf5" - - checkpoint_callback = tf.keras.callbacks.ModelCheckpoint(filepath, monitor='val_accuracy', - verbose=1,save_best_only=True, - save_weights_only=True,mode='auto') - - - model.fit(x_train, y_train, batch_size=batch_size, - validation_split=0.4, epochs=epochs, callbacks=[checkpoint_callback]) - - score = model.evaluate(x_test, y_test, batch_size=batch_size) - - model.save(os.path.join(model_save_dir, "full-model.h5")) - - tf.logging.info("Score: {}".format(score)) - -if __name__ == "__main__": - parser = argparse.ArgumentParser() - parser.add_argument("-d", "--dataset", help="Dataset to be trained " - "and evaluated.", - type=str, choices=["imdb"], default="imdb") - - parser.add_argument("-e", "--embedding_dim", - help="The dimension of the Embedding layer.", - type=int, default=512) - - parser.add_argument("-v", "--vocabulary_size", - help="The number of the words to be considered " - "in the dataset corpus.", - type=int, default=6000) - - parser.add_argument("-s", "--sentence_length", - help="The number of words in a data point." - "Entries of smaller length are padded.", - type=int, default=600) - - parser.add_argument("-c", "--hidden_dim", - help="The number of the CNN layer filters.", - type=int, default=512) - - parser.add_argument("-b", "--batch_size", - help="The size of each batch for training.", - type=int, default=500) - - parser.add_argument("-p", "--epochs", - help="The number of epochs for training.", - type=int, default=55) - - parser.add_argument("-f", "--folder", - help="folder/dir to save trained model", - type=str, default=None) - args = parser.parse_args() - - if args.folder is None: - parser.error("-f argument folder/dir to save is None,provide path to save model.") - - run_model(args.dataset, args.embedding_dim, args.vocabulary_size, - args.sentence_length, args.hidden_dim, - args.batch_size, args.epochs, args.folder) diff --git a/research/sentiment_analysis/sentiment_model.py b/research/sentiment_analysis/sentiment_model.py deleted file mode 100644 index 586474992..000000000 --- a/research/sentiment_analysis/sentiment_model.py +++ /dev/null @@ -1,50 +0,0 @@ -"""Model for sentiment analysis. - -The model makes use of concatenation of two CNN layers with -different kernel sizes. -""" - -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import tensorflow as tf - - -class CNN(tf.keras.models.Model): - """CNN for sentimental analysis.""" - - def __init__(self, emb_dim, num_words, sentence_length, hid_dim, - class_dim, dropout_rate): - """Initialize CNN model. - - Args: - emb_dim: The dimension of the Embedding layer. - num_words: The number of the most frequent tokens - to be used from the corpus. - sentence_length: The number of words in each sentence. - Longer sentences get cut, shorter ones padded. - hid_dim: The dimension of the Embedding layer. - class_dim: The number of the CNN layer filters. - dropout_rate: The portion of kept value in the Dropout layer. - Returns: - tf.keras.models.Model: A Keras model. - """ - - input_layer = tf.keras.layers.Input(shape=(sentence_length,), dtype=tf.int32) - - layer = tf.keras.layers.Embedding(num_words, output_dim=emb_dim)(input_layer) - - layer_conv3 = tf.keras.layers.Conv1D(hid_dim, 3, activation="relu")(layer) - layer_conv3 = tf.keras.layers.GlobalMaxPooling1D()(layer_conv3) - - layer_conv4 = tf.keras.layers.Conv1D(hid_dim, 2, activation="relu")(layer) - layer_conv4 = tf.keras.layers.GlobalMaxPooling1D()(layer_conv4) - - layer = tf.keras.layers.concatenate([layer_conv4, layer_conv3], axis=1) - layer = tf.keras.layers.BatchNormalization()(layer) - layer = tf.keras.layers.Dropout(dropout_rate)(layer) - - output = tf.keras.layers.Dense(class_dim, activation="softmax")(layer) - - super(CNN, self).__init__(inputs=[input_layer], outputs=output) diff --git a/research/seq2species/README.md b/research/seq2species/README.md deleted file mode 100644 index dbe473131..000000000 --- a/research/seq2species/README.md +++ /dev/null @@ -1,187 +0,0 @@ -![No Maintenance Intended](https://img.shields.io/badge/No%20Maintenance%20Intended-%E2%9C%95-red.svg) -![TensorFlow Requirement: 1.x](https://img.shields.io/badge/TensorFlow%20Requirement-1.x-brightgreen) -![TensorFlow 2 Not Supported](https://img.shields.io/badge/TensorFlow%202%20Not%20Supported-%E2%9C%95-red.svg) - -# Seq2Species: Neural Network Models for Species Classification - -*A deep learning solution for read-level taxonomic classification with 16s.* - -Recent improvements in sequencing technology have made possible large, public -databases of biological sequencing data, bringing about new data richness for -many important problems in bioinformatics. However, this growing availability of -data creates a need for analysis methods capable of efficiently handling these -large sequencing datasets. We on the [Genomics team in Google -Brain](https://ai.google/research/teams/brain/healthcare-biosciences) are -particularly interested in the class of problems which can be framed as -assigning meaningful labels to short biological sequences, and are exploring the -possiblity of creating a general deep learning solution for solving this class -of sequence-labeling problems. We are excited to share our initial progress in -this direction by releasing Seq2Species, an open-source neural network framework -for [TensorFlow](https://www.tensorflow.org/) for predicting read-level -taxonomic labels from genomic sequence. Our release includes all the code -necessary to train new Seq2Species models. - -## About Seq2Species - -Briefly, Seq2Species provides a framework for training deep neural networks to -predict database-derived labels directly from short reads of DNA. Thus far, our -research has focused predominantly on demonstrating the value of this deep -learning approach on the problem of determining the species of origin of -next-generation sequencing reads from [16S ribosomal -DNA](https://en.wikipedia.org/wiki/16S_ribosomal_RNA). We used this -Seq2Species framework to train depthwise separable convolutional neural networks -on short subsequences from the 16S genes of more than 13 thousand distinct -species. The resulting classification model assign species-level probabilities -to individual 16S reads. - -For more information about the use cases we have explored, or for technical -details describing how Seq2Species work, please see our -[preprint](https://www.biorxiv.org/content/early/2018/06/22/353474). - -## Installation - -Training Seq2Species models requires installing the following dependencies: - -* python 2.7 - -* protocol buffers - -* numpy - -* absl - -### Dependencies - -Detailed instructions for installing TensorFlow are available on the [Installing -TensorFlow](https://www.tensorflow.org/install/) website. Please follow the -full instructions for installing TensorFlow with GPU support. For most -users, the following command will suffice for continuing with CPU support only: -```bash -# For CPU -pip install --upgrade tensorflow -``` - -The TensorFlow installation should also include installation of the numpy and -absl libraries, which are two of TensorFlow's python dependencies. If -necessary, instructions for standalone installation are available: - -* [numpy](https://scipy.org/install.html) - -* [absl](https://github.com/abseil/abseil-py) - -Information about protocol buffers, as well as download and installation -intructions for the protocol buffer (protobuf) compiler, are available on the [Google -Developers website](https://developers.google.com/protocol-buffers/). A typical -Ubuntu user can install this library using `apt-get`: -```bash -sudo apt-get install protobuf-compiler -``` - -### Clone - -Now, clone `tensorflow/models` to start working with the code: -```bash -git clone https://github.com/tensorflow/models.git -``` - -### Protobuf Compilation - -Seq2Species uses protobufs to store and save dataset and model metadata. Before -the framework can be used to build and train models, the protobuf libraries must -be compiled. This can be accomplished using the following command: -```bash -# From tensorflow/models/research -protoc seq2species/protos/seq2label.proto --python_out=. -``` - -### Testing the Installation - -One can test that Seq2Species has been installed correctly by running the -following command: -```bash -python seq2species/run_training_test.py -``` - -## Usage Information - -Input data to Seq2Species models should be [tf.train.Example protocol messages](https://github.com/tensorflow/tensorflow/blob/master/tensorflow/core/example/example.proto) stored in -[TFRecord format](https://www.tensorflow.org/versions/r1.0/api_guides/python/python_io#tfrecords_format_details). -Specifically, the input pipeline expects tf.train.Examples with a 'sequence' field -containing a genomic sequence as an upper-case string, as one field for each -target label (e.g. 'species'). There should also be an accompanying -Seq2LabelDatasetInfo text protobuf containing metadata about the input, including -the possible label values for each target. - -Below, we give an example command that could be used to launch training for 1000 -steps, assuming that appropriate data and metadata files are stored at -`${TFRECORD}` and `${DATASET_INFO}`: -```bash -python seq2species/run_training.py --train_files ${TFRECORD} ---metadata_path ${DATASET_INFO} --hparams 'train_steps=1000' ---logdir $HOME/seq2species -``` -This will output [TensorBoard -summaries](https://www.tensorflow.org/guide/summaries_and_tensorboard), [TensorFlow -checkpoints](https://www.tensorflow.org/guide/variables#checkpoint_files), Seq2LabelModelInfo and -Seq2LabelExperimentMeasures metadata to the logdir `$HOME/seq2species`. - -### Preprocessed Seq2Species Data - -We have provided preprocessed data based on 16S reference sequences from the -[NCBI RefSeq Targeted Loci -Project](https://www.ncbi.nlm.nih.gov/refseq/targetedloci/) in a Seq2Species -bucket on Google Cloud Storage. After installing the -[Cloud SDK](https://cloud.google.com/sdk/install), -one can download those data (roughly 25 GB) to a local directory `${DEST}` using -the `gsutil` command: -```bash -BUCKET=gs://brain-genomics-public/research/seq2species -mkdir -p ${DEST} -gsutil -m cp ${BUCKET}/* ${DEST} -``` - -To check if the copy has completed successsfully, check the `${DEST}` directory: -```bash -ls -1 ${DEST} -``` -which should produce: -```bash -ncbi_100bp_revcomp.dataset_info.pbtxt -ncbi_100bp_revcomp.tfrecord -``` - -The following command can be used to train a copy of one of our best-perfoming -deep neural network models for 100 base pair (bp) data. This command also -illustrates how to set hyperparameter values explicitly from the commandline. -The file `configuration.py` provides a full list of hyperparameters, their descriptions, -and their default values. Additional flags are described at the top of -`run_training.py`. -```bash -python seq2species/run_training.py \ ---num_filters 3 \ ---noise_rate 0.04 \ ---train_files ${DEST}/ncbi_100bp_revcomp.tfrecord \ ---metadata_path ${DEST}/ncbi_100bp_revcomp.dataset_info.pbtxt \ ---logdir $HOME/seq2species \ ---hparams 'filter_depths=[1,1,1],filter_widths=[5,9,13],grad_clip_norm=20.0,keep_prob=0.94017831318, -lr_decay=0.0655052811,lr_init=0.000469689635793,lrelu_slope=0.0125376069918,min_read_length=100,num_fc_layers=2,num_fc_units=2828,optimizer=adam,optimizer_hp=0.885769367218,pointwise_depths=[84,58,180],pooling_type=avg,train_steps=3000000,use_depthwise_separable=true,weight_scale=1.18409526348' -``` - -### Visualization - -[TensorBoard](https://github.com/tensorflow/tensorboard) can be used to -visualize training curves and other metrics stored in the summary files produced -by `run_training.py`. Use the following command to launch a TensorBoard instance -for the example model directory `$HOME/seq2species`: -```bash -tensorboard --logdir=$HOME/seq2species -``` - -## Contact - -Any issues with the Seq2Species framework should be filed with the -[TensorFlow/models issue tracker](https://github.com/tensorflow/models/issues). -Questions regarding Seq2Species capabilities can be directed to -[seq2species-interest@google.com](mailto:seq2species-interest@google.com). This -code is maintained by [@apbusia](https://github.com/apbusia) and -[@depristo](https://github.com/depristo). diff --git a/research/seq2species/build_model.py b/research/seq2species/build_model.py deleted file mode 100644 index 9f4ae6b2e..000000000 --- a/research/seq2species/build_model.py +++ /dev/null @@ -1,506 +0,0 @@ -# Copyright 2018 The TensorFlow Authors All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -"""Defines convolutional model graph for Seq2Species. - -Builds TensorFlow computation graph for predicting the given taxonomic target -labels from short reads of DNA using convolutional filters, followed by -fully-connected layers and a softmax output layer. -""" - -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import collections -import math - -import tensorflow as tf - -import input as seq2species_input -import seq2label_utils - - -class ConvolutionalNet(object): - """Class to build and store the model's computational graph and operations. - - Attributes: - read_length: int; the length in basepairs of the input reads of DNA. - placeholders: dict; mapping from name to tf.Placeholder. - global_step: tf.Variable tracking number of training iterations performed. - train_op: operation to perform one training step by gradient descent. - summary_op: operation to log model's performance metrics to TF event files. - accuracy: tf.Variable giving the model's read-level accuracy for the - current inputs. - weighted_accuracy: tf.Variable giving the model's read-level weighted - accuracy for the current inputs. - loss: tf.Variable giving the model's current cross entropy loss. - logits: tf.Variable containing the model's logits for the current inputs. - predictions: tf.Variable containing the model's current predicted - probability distributions for the current inputs. - possible_labels: a dict of possible label values (list of strings), keyed by - target name. Labels in the lists are the order used for integer encoding. - use_tpu: whether model is to be run on TPU. - """ - - def __init__(self, hparams, dataset_info, targets, use_tpu=False): - """Initializes the ConvolutionalNet according to provided hyperparameters. - - Does not build the graph---this is done by calling `build_graph` on the - constructed object or using `model_fn`. - - Args: - hparams: tf.contrib.training.Hparams object containing the model's - hyperparamters; see configuration.py for hyperparameter definitions. - dataset_info: a `Seq2LabelDatasetInfo` message reflecting the dataset - metadata. - targets: list of strings: the names of the prediction targets. - use_tpu: whether we are running on TPU; if True, summaries will be - disabled. - """ - self._placeholders = {} - self._targets = targets - self._dataset_info = dataset_info - self._hparams = hparams - all_label_values = seq2label_utils.get_all_label_values(self.dataset_info) - self._possible_labels = { - target: all_label_values[target] - for target in self.targets - } - self._use_tpu = use_tpu - - @property - def hparams(self): - return self._hparams - - @property - def dataset_info(self): - return self._dataset_info - - @property - def possible_labels(self): - return self._possible_labels - - @property - def bases(self): - return seq2species_input.DNA_BASES - - @property - def n_bases(self): - return seq2species_input.NUM_DNA_BASES - - @property - def targets(self): - return self._targets - - @property - def read_length(self): - return self.dataset_info.read_length - - @property - def placeholders(self): - return self._placeholders - - @property - def global_step(self): - return self._global_step - - @property - def train_op(self): - return self._train_op - - @property - def summary_op(self): - return self._summary_op - - @property - def accuracy(self): - return self._accuracy - - @property - def weighted_accuracy(self): - return self._weighted_accuracy - - @property - def loss(self): - return self._loss - - @property - def total_loss(self): - return self._total_loss - - @property - def logits(self): - return self._logits - - @property - def predictions(self): - return self._predictions - - @property - def use_tpu(self): - return self._use_tpu - - def _summary_scalar(self, name, scalar): - """Adds a summary scalar, if the platform supports summaries.""" - if not self.use_tpu: - return tf.summary.scalar(name, scalar) - else: - return None - - def _summary_histogram(self, name, values): - """Adds a summary histogram, if the platform supports summaries.""" - if not self.use_tpu: - return tf.summary.histogram(name, values) - else: - return None - - def _init_weights(self, shape, scale=1.0, name='weights'): - """Randomly initializes a weight Tensor of the given shape. - - Args: - shape: list; desired Tensor dimensions. - scale: float; standard deviation scale with which to initialize weights. - name: string name for the variable. - - Returns: - TF Variable contining truncated random Normal initialized weights. - """ - num_inputs = shape[0] if len(shape) < 3 else shape[0] * shape[1] * shape[2] - stddev = scale / math.sqrt(num_inputs) - return tf.get_variable( - name, - shape=shape, - initializer=tf.truncated_normal_initializer(0., stddev)) - - def _init_bias(self, size): - """Initializes bias vector of given shape as zeros. - - Args: - size: int; desired size of bias Tensor. - - Returns: - TF Variable containing the initialized biases. - """ - return tf.get_variable( - name='b_{}'.format(size), - shape=[size], - initializer=tf.zeros_initializer()) - - def _add_summaries(self, mode, gradient_norm, parameter_norm): - """Defines TensorFlow operation for logging summaries to event files. - - Args: - mode: the ModeKey string. - gradient_norm: Tensor; norm of gradients produced during the current - training operation. - parameter_norm: Tensor; norm of the model parameters produced during the - current training operation. - """ - # Log summaries for TensorBoard. - if mode == tf.estimator.ModeKeys.TRAIN: - self._summary_scalar('norm_of_gradients', gradient_norm) - self._summary_scalar('norm_of_parameters', parameter_norm) - self._summary_scalar('total_loss', self.total_loss) - self._summary_scalar('learning_rate', self._learn_rate) - for target in self.targets: - self._summary_scalar('per_read_weighted_accuracy/{}'.format(target), - self.weighted_accuracy[target]) - self._summary_scalar('per_read_accuracy/{}'.format(target), - self.accuracy[target]) - self._summary_histogram('prediction_frequency/{}'.format(target), - self._predictions[target]) - self._summary_scalar('cross_entropy_loss/{}'.format(target), - self._loss[target]) - self._summary_op = tf.summary.merge_all() - else: - # Log average performance metrics over many batches using placeholders. - summaries = [] - for target in self.targets: - accuracy_ph = tf.placeholder(tf.float32, shape=()) - weighted_accuracy_ph = tf.placeholder(tf.float32, shape=()) - cross_entropy_ph = tf.placeholder(tf.float32, shape=()) - self._placeholders.update({ - 'accuracy/{}'.format(target): accuracy_ph, - 'weighted_accuracy/{}'.format(target): weighted_accuracy_ph, - 'cross_entropy/{}'.format(target): cross_entropy_ph, - }) - summaries += [ - self._summary_scalar('cross_entropy_loss/{}'.format(target), - cross_entropy_ph), - self._summary_scalar('per_read_accuracy/{}'.format(target), - accuracy_ph), - self._summary_scalar('per_read_weighted_accuracy/{}'.format(target), - weighted_accuracy_ph) - ] - - self._summary_op = tf.summary.merge(summaries) - - def _convolution(self, - inputs, - filter_dim, - pointwise_dim=None, - scale=1.0, - padding='SAME'): - """Applies convolutional filter of given dimensions to given input Tensor. - - If a pointwise dimension is specified, a depthwise separable convolution is - performed. - - Args: - inputs: 4D Tensor of shape (# reads, 1, # basepairs, # bases). - filter_dim: integer tuple of the form (width, depth). - pointwise_dim: int; output dimension for pointwise convolution. - scale: float; standard deviation scale with which to initialize weights. - padding: string; type of padding to use. One of "SAME" or "VALID". - - Returns: - 4D Tensor result of applying the convolutional filter to the inputs. - """ - in_channels = inputs.get_shape()[3].value - filter_width, filter_depth = filter_dim - filters = self._init_weights([1, filter_width, in_channels, filter_depth], - scale) - self._summary_histogram(filters.name.split(':')[0].split('/')[1], filters) - if pointwise_dim is None: - return tf.nn.conv2d( - inputs, - filters, - strides=[1, 1, 1, 1], - padding=padding, - name='weights') - pointwise_filters = self._init_weights( - [1, 1, filter_depth * in_channels, pointwise_dim], - scale, - name='pointwise_weights') - self._summary_histogram( - pointwise_filters.name.split(':')[0].split('/')[1], pointwise_filters) - return tf.nn.separable_conv2d( - inputs, - filters, - pointwise_filters, - strides=[1, 1, 1, 1], - padding=padding) - - def _pool(self, inputs, pooling_type): - """Performs pooling across width and height of the given inputs. - - Args: - inputs: Tensor shaped (batch, height, width, channels) over which to pool. - In our case, height is a unitary dimension and width can be thought of - as the read dimension. - pooling_type: string; one of "avg" or "max". - - Returns: - Tensor result of performing pooling of the given pooling_type over the - height and width dimensions of the given inputs. - """ - if pooling_type == 'max': - return tf.reduce_max(inputs, axis=[1, 2]) - if pooling_type == 'avg': - return tf.reduce_sum( - inputs, axis=[1, 2]) / tf.to_float(tf.shape(inputs)[2]) - - def _leaky_relu(self, lrelu_slope, inputs): - """Applies leaky ReLu activation to the given inputs with the given slope. - - Args: - lrelu_slope: float; slope value for the activation function. - A slope of 0.0 defines a standard ReLu activation, while a positive - slope defines a leaky ReLu. - inputs: Tensor upon which to apply the activation function. - - Returns: - Tensor result of applying the activation function to the given inputs. - """ - with tf.variable_scope('leaky_relu_activation'): - return tf.maximum(lrelu_slope * inputs, inputs) - - def _dropout(self, inputs, keep_prob): - """Applies dropout to the given inputs. - - Args: - inputs: Tensor upon which to apply dropout. - keep_prob: float; probability with which to randomly retain values in - the given input. - - Returns: - Tensor result of applying dropout to the given inputs. - """ - with tf.variable_scope('dropout'): - if keep_prob < 1.0: - return tf.nn.dropout(inputs, keep_prob) - return inputs - - def build_graph(self, features, labels, mode, batch_size): - """Creates TensorFlow model graph. - - Args: - features: a dict of input features Tensors. - labels: a dict (by target name) of prediction labels. - mode: the ModeKey string. - batch_size: the integer batch size. - - Side Effect: - Adds the following key Tensors and operations as class attributes: - placeholders, global_step, train_op, summary_op, accuracy, - weighted_accuracy, loss, logits, and predictions. - """ - is_train = (mode == tf.estimator.ModeKeys.TRAIN) - read = features['sequence'] - - # Add a unitary dimension, so we can use conv2d. - read = tf.expand_dims(read, 1) - prev_out = read - - filters = zip(self.hparams.filter_widths, self.hparams.filter_depths) - for i, f in enumerate(filters): - with tf.variable_scope('convolution_' + str(i)): - if self.hparams.use_depthwise_separable: - p = self.hparams.pointwise_depths[i] - else: - p = None - conv_out = self._convolution( - prev_out, f, pointwise_dim=p, scale=self.hparams.weight_scale) - conv_act_out = self._leaky_relu(self.hparams.lrelu_slope, conv_out) - prev_out = ( - self._dropout(conv_act_out, self.hparams.keep_prob) - if is_train else conv_act_out) - - for i in xrange(self.hparams.num_fc_layers): - with tf.variable_scope('fully_connected_' + str(i)): - # Create a convolutional layer which is equivalent to a fully-connected - # layer when reads have length self.hparams.min_read_length. - # The convolution will tile the layer appropriately for longer reads. - biases = self._init_bias(self.hparams.num_fc_units) - if i == 0: - # Take entire min_read_length segment as input. - # Output a single value per min_read_length_segment. - filter_dimensions = (self.hparams.min_read_length, - self.hparams.num_fc_units) - else: - # Take single output value of previous layer as input. - filter_dimensions = (1, self.hparams.num_fc_units) - fc_out = biases + self._convolution( - prev_out, - filter_dimensions, - scale=self.hparams.weight_scale, - padding='VALID') - self._summary_histogram(biases.name.split(':')[0].split('/')[1], biases) - fc_act_out = self._leaky_relu(self.hparams.lrelu_slope, fc_out) - prev_out = ( - self._dropout(fc_act_out, self.hparams.keep_prob) - if is_train else fc_act_out) - - # Pool to collapse tiling for reads longer than hparams.min_read_length. - with tf.variable_scope('pool'): - pool_out = self._pool(prev_out, self.hparams.pooling_type) - - with tf.variable_scope('output'): - self._logits = {} - self._predictions = {} - self._weighted_accuracy = {} - self._accuracy = {} - self._loss = collections.OrderedDict() - - for target in self.targets: - with tf.variable_scope(target): - label = labels[target] - possible_labels = self.possible_labels[target] - weights = self._init_weights( - [pool_out.get_shape()[1].value, - len(possible_labels)], - self.hparams.weight_scale, - name='weights') - biases = self._init_bias(len(possible_labels)) - self._summary_histogram( - weights.name.split(':')[0].split('/')[1], weights) - self._summary_histogram( - biases.name.split(':')[0].split('/')[1], biases) - logits = tf.matmul(pool_out, weights) + biases - predictions = tf.nn.softmax(logits) - - gather_inds = tf.stack([tf.range(batch_size), label], axis=1) - self._weighted_accuracy[target] = tf.reduce_mean( - tf.gather_nd(predictions, gather_inds)) - argmax_prediction = tf.cast(tf.argmax(predictions, axis=1), tf.int32) - self._accuracy[target] = tf.reduce_mean( - tf.to_float(tf.equal(label, argmax_prediction))) - - losses = tf.nn.sparse_softmax_cross_entropy_with_logits( - labels=label, logits=logits) - self._loss[target] = tf.reduce_mean(losses) - self._logits[target] = logits - self._predictions[target] = predictions - - # Compute total loss - self._total_loss = tf.add_n(self._loss.values()) - - # Define the optimizer. - - # tf.estimator framework builds the global_step for us, but if we aren't - # using the framework we have to make it ourselves. - self._global_step = tf.train.get_or_create_global_step() - if self.hparams.lr_decay < 0: - self._learn_rate = self.hparams.lr_init - else: - self._learn_rate = tf.train.exponential_decay( - self.hparams.lr_init, - self._global_step, - int(self.hparams.train_steps), - self.hparams.lr_decay, - staircase=False) - if self.hparams.optimizer == 'adam': - opt = tf.train.AdamOptimizer(self._learn_rate, self.hparams.optimizer_hp) - elif self.hparams.optimizer == 'momentum': - opt = tf.train.MomentumOptimizer(self._learn_rate, - self.hparams.optimizer_hp) - if self.use_tpu: - opt = tf.contrib.tpu.CrossShardOptimizer(opt) - - gradients, variables = zip(*opt.compute_gradients(self._total_loss)) - clipped_gradients, _ = tf.clip_by_global_norm(gradients, - self.hparams.grad_clip_norm) - with tf.control_dependencies(tf.get_collection(tf.GraphKeys.UPDATE_OPS)): - self._train_op = opt.apply_gradients( - zip(clipped_gradients, variables), global_step=self._global_step) - - if not self.use_tpu: - grad_norm = tf.global_norm(gradients) if is_train else None - param_norm = tf.global_norm(variables) if is_train else None - self._add_summaries(mode, grad_norm, param_norm) - - def model_fn(self, features, labels, mode, params): - """Function fulfilling the tf.estimator model_fn interface. - - Args: - features: a dict containing the input features for prediction. - labels: a dict from target name to Tensor-value prediction. - mode: the ModeKey string. - params: a dictionary of parameters for building the model; current params - are params["batch_size"]: the integer batch size. - - Returns: - A tf.estimator.EstimatorSpec object ready for use in training, inference. - or evaluation. - """ - self.build_graph(features, labels, mode, params['batch_size']) - - return tf.estimator.EstimatorSpec( - mode, - predictions=self.predictions, - loss=self.total_loss, - train_op=self.train_op, - eval_metric_ops={}) diff --git a/research/seq2species/configuration.py b/research/seq2species/configuration.py deleted file mode 100644 index a4dd626e2..000000000 --- a/research/seq2species/configuration.py +++ /dev/null @@ -1,77 +0,0 @@ -# Copyright 2018 The TensorFlow Authors All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -"""Defines hyperparameter configuration for ConvolutionalNet models. - -Specifically, provides methods for defining and initializing TensorFlow -hyperparameters objects for a convolutional model as defined in: -seq2species.build_model -""" - -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import tensorflow as tf - - -def parse_hparams(hparam_values='', num_filters=1): - """Initializes TensorFlow hyperparameters object with default values. - - In addition, default hyperparameter values are overwritten with the specified - ones, where necessary. - - Args: - hparam_values: comma-separated string of name=value pairs for setting - particular hyperparameters. - num_filters: int; number of filters in the model. - Must be fixed outside of hyperparameter/study object as Vizier does not - support having inter-hyperparameter dependencies. - - Returns: - tf.contrib.training.Hparams object containing the model's hyperparameters. - """ - hparams = tf.contrib.training.HParams() - - # Specify model architecture option. - hparams.add_hparam('use_depthwise_separable', True) - - # Specify number of model parameters. - hparams.add_hparam('filter_widths', [3] * num_filters) - hparams.add_hparam('filter_depths', [1] * num_filters) - hparams.add_hparam('pointwise_depths', [64] * num_filters) - hparams.add_hparam('num_fc_layers', 2) - hparams.add_hparam('num_fc_units', 455) - hparams.add_hparam('min_read_length', 100) - hparams.add_hparam('pooling_type', 'avg') - - # Specify activation options. - hparams.add_hparam('lrelu_slope', 0.0) # Negative slope for leaky relu. - - # Specify training options. - hparams.add_hparam('keep_prob', 1.0) - hparams.add_hparam('weight_scale', 1.0) - hparams.add_hparam('grad_clip_norm', 20.0) - hparams.add_hparam('lr_init', 0.001) - hparams.add_hparam('lr_decay', 0.1) - hparams.add_hparam('optimizer', 'adam') - # optimizer_hp is decay rate for 1st moment estimates for ADAM, and - # momentum for SGD. - hparams.add_hparam('optimizer_hp', 0.9) - hparams.add_hparam('train_steps', 400000) - - # Overwrite defaults with specified values. - hparams.parse(hparam_values) - return hparams diff --git a/research/seq2species/input.py b/research/seq2species/input.py deleted file mode 100644 index f1636c875..000000000 --- a/research/seq2species/input.py +++ /dev/null @@ -1,325 +0,0 @@ -# Copyright 2018 The TensorFlow Authors All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -"""Input pipe for feeding examples to a Seq2Label model graph.""" - -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import numpy as np -import tensorflow as tf -from google.protobuf import text_format - -from protos import seq2label_pb2 -import seq2label_utils - -DNA_BASES = tuple('ACGT') -NUM_DNA_BASES = len(DNA_BASES) -# Possible FASTA characters/IUPAC ambiguity codes. -# See https://en.wikipedia.org/wiki/Nucleic_acid_notation. -AMBIGUITY_CODES = { - 'K': 'GT', - 'M': 'AC', - 'R': 'AG', - 'Y': 'CT', - 'S': 'CG', - 'W': 'AT', - 'B': 'CGT', - 'V': 'ACG', - 'H': 'ACT', - 'D': 'AGT', - 'X': 'ACGT', - 'N': 'ACGT' -} - - -def load_dataset_info(dataset_info_path): - """Load a `Seq2LabelDatasetInfo` from a serialized text proto file.""" - dataset_info = seq2label_pb2.Seq2LabelDatasetInfo() - with tf.gfile.Open(dataset_info_path, 'r') as f: - text_format.Parse(f.read(), dataset_info) - return dataset_info - - -class _InputEncoding(object): - """A helper class providing the graph operations needed to encode input. - - Instantiation of an _InputEncoding will write on the default TF graph, so it - should only be instantiated inside the `input_fn`. - - Attributes: - mode: `tf.estimator.ModeKeys`; the execution mode {TRAIN, EVAL, INFER}. - targets: list of strings; the names of the labels of interest (e.g. - "species"). - dna_bases: a tuple of the recognized DNA alphabet. - n_bases: the size of the DNA alphabet. - all_characters: list of recognized alphabet, including ambiguity codes. - label_values: a tuple of strings, the possible label values of the - prediction target. - n_labels: the size of label_values - fixed_read_length: an integer value of the statically-known read length, or - None if the read length is to be determined dynamically. - """ - - def __init__(self, - dataset_info, - mode, - targets, - noise_rate=0.0, - fixed_read_length=None): - self.mode = mode - self.targets = targets - self.dna_bases = DNA_BASES - self.n_bases = NUM_DNA_BASES - self.all_characters = list(DNA_BASES) + sorted(AMBIGUITY_CODES.keys()) - self.character_encodings = np.concatenate( - [[self._character_to_base_distribution(char)] - for char in self.all_characters], - axis=0) - all_legal_label_values = seq2label_utils.get_all_label_values(dataset_info) - # TF lookup tables. - self.characters_table = tf.contrib.lookup.index_table_from_tensor( - mapping=self.all_characters) - self.label_tables = { - target: tf.contrib.lookup.index_table_from_tensor( - all_legal_label_values[target]) - for target in targets - } - self.fixed_read_length = fixed_read_length - self.noise_rate = noise_rate - - def _character_to_base_distribution(self, char): - """Maps the given character to a probability distribution over DNA bases. - - Args: - char: character to be encoded as a probability distribution over bases. - - Returns: - Array of size (self.n_bases,) representing the identity of the given - character as a distribution over the possible DNA bases, self.dna_bases. - - Raises: - ValueError: if the given character is not contained in the recognized - alphabet, self.all_characters. - """ - if char not in self.all_characters: - raise ValueError( - 'Base distribution requested for unrecognized character %s.' % char) - possible_bases = AMBIGUITY_CODES[char] if char in AMBIGUITY_CODES else char - base_indices = [self.dna_bases.index(base) for base in possible_bases] - probability_weight = 1.0 / len(possible_bases) - distribution = np.zeros((self.n_bases)) - distribution[base_indices] = probability_weight - return distribution - - def encode_read(self, string_seq): - """Converts the input read sequence to one-hot encoding. - - Args: - string_seq: tf.String; input read sequence. - - Returns: - Input read sequence as a one-hot encoded Tensor, with depth and ordering - of one-hot encoding determined by the given bases. Ambiguous characters - such as "N" and "S" are encoded as a probability distribution over the - possible bases they represent. - """ - with tf.variable_scope('encode_read'): - read = tf.string_split([string_seq], delimiter='').values - read = self.characters_table.lookup(read) - read = tf.cast(tf.gather(self.character_encodings, read), tf.float32) - if self.fixed_read_length: - read = tf.reshape(read, (self.fixed_read_length, self.n_bases)) - return read - - def encode_label(self, target, string_label): - """Converts the label value to an integer encoding. - - Args: - target: str; the target name. - string_label: tf.String; value of the label for the current input read. - - Returns: - Given label value as an index into the possible_target_values. - """ - with tf.variable_scope('encode_label/{}'.format(target)): - return tf.cast(self.label_tables[target].lookup(string_label), tf.int32) - - def _empty_label(self): - return tf.constant((), dtype=tf.int32, shape=()) - - def parse_single_tfexample(self, serialized_example): - """Parses a tf.train.Example proto to a one-hot encoded read, label pair. - - Injects noise into the incoming tf.train.Example's read sequence - when noise_rate is non-zero. - - Args: - serialized_example: string; the serialized tf.train.Example proto - containing the read sequence and label value of interest as - tf.FixedLenFeatures. - - Returns: - Tuple (features, labels) of dicts for the input features and prediction - targets. - """ - with tf.variable_scope('parse_single_tfexample'): - features_spec = {'sequence': tf.FixedLenFeature([], tf.string)} - for target in self.targets: - features_spec[target] = tf.FixedLenFeature([], tf.string) - features = tf.parse_single_example( - serialized_example, features=features_spec) - if self.noise_rate > 0.0: - read_sequence = tf.py_func(seq2label_utils.add_read_noise, - [features['sequence'], self.noise_rate], - (tf.string)) - else: - read_sequence = features['sequence'] - read_sequence = self.encode_read(read_sequence) - read_features = {'sequence': read_sequence} - if self.mode in (tf.estimator.ModeKeys.TRAIN, tf.estimator.ModeKeys.EVAL): - label = { - target: self.encode_label(target, features[target]) - for target in self.targets - } - else: - label = {target: self._empty_label() for target in self.targets} - return read_features, label - - -class InputDataset(object): - """A class providing access to input data for the Seq2Label model. - - Attributes: - mode: `tf.estimator.ModeKeys`; the execution mode {TRAIN, EVAL, INFER}. - targets: list of strings; the names of the labels of interest (e.g. - "species"). - dataset_info: a `Seq2LabelDatasetInfo` message reflecting the dataset - metadata. - initializer: the TF initializer op for the underlying iterator, which - will rewind the iterator. - is_train: Boolean indicating whether or not the execution mode is TRAIN. - """ - - def __init__(self, - mode, - targets, - dataset_info, - train_epochs=None, - noise_rate=0.0, - random_seed=None, - input_tfrecord_files=None, - fixed_read_length=None, - ensure_constant_batch_size=False, - num_parallel_calls=32): - """Constructor for InputDataset. - - Args: - mode: `tf.estimator.ModeKeys`; the execution mode {TRAIN, EVAL, INFER}. - targets: list of strings; the names of the labels of interest (e.g. - "species"). - dataset_info: a `Seq2LabelDatasetInfo` message reflecting the dataset - metadata. - train_epochs: the number of training epochs to perform, if mode==TRAIN. - noise_rate: float [0.0, 1.0] specifying rate at which to inject - base-flipping noise into the read sequences. - random_seed: seed to be used for shuffling, if mode==TRAIN. - input_tfrecord_files: a list of filenames for TFRecords of TF examples. - fixed_read_length: an integer value of the statically-known read length, - or None if the read length is to be determined dynamically. The read - length must be known statically for TPU execution. - ensure_constant_batch_size: ensure a constant batch size at the expense of - discarding the last "short" batch. This also gives us a statically - constant batch size, which is essential for e.g. the TPU platform. - num_parallel_calls: the number of dataset elements to process in parallel. - If None, elements will be processed sequentially. - """ - self.input_tfrecord_files = input_tfrecord_files - self.mode = mode - self.targets = targets - self.dataset_info = dataset_info - self._train_epochs = train_epochs - self._noise_rate = noise_rate - self._random_seed = random_seed - if random_seed is not None: - np.random.seed(random_seed) - self._fixed_read_length = fixed_read_length - self._ensure_constant_batch_size = ensure_constant_batch_size - self._num_parallel_calls = num_parallel_calls - - @staticmethod - def from_tfrecord_files(input_tfrecord_files, *args, **kwargs): - return InputDataset( - *args, input_tfrecord_files=input_tfrecord_files, **kwargs) - - @property - def is_train(self): - return self.mode == tf.estimator.ModeKeys.TRAIN - - def input_fn(self, params): - """Supplies input for the model. - - This function supplies input to our model as a function of the mode. - - Args: - params: a dictionary, containing: - - params['batch_size']: the integer batch size. - - Returns: - A tuple of two values as follows: - 1) the *features* dict, containing a tensor value for keys as follows: - - "sequence" - the encoded read input sequence. - 2) the *labels* dict. containing a key for `target`, whose value is: - - a string Tensor value (in TRAIN/EVAL mode), or - - a blank Tensor (PREDICT mode). - """ - randomize_input = self.is_train - batch_size = params['batch_size'] - - encoding = _InputEncoding( - self.dataset_info, - self.mode, - self.targets, - noise_rate=self._noise_rate, - fixed_read_length=self._fixed_read_length) - - dataset = tf.data.TFRecordDataset(self.input_tfrecord_files) - dataset = dataset.map( - encoding.parse_single_tfexample, - num_parallel_calls=self._num_parallel_calls) - - dataset = dataset.repeat(self._train_epochs if self.is_train else 1) - if randomize_input: - dataset = dataset.shuffle( - buffer_size=max(1000, batch_size), seed=self._random_seed) - - if self._ensure_constant_batch_size: - # Only take batches of *exactly* size batch_size; then we get a - # statically knowable batch shape. - dataset = dataset.batch(batch_size, drop_remainder=True) - else: - dataset = dataset.batch(batch_size) - - # Prefetch to allow infeed to be in parallel with model computations. - dataset = dataset.prefetch(2) - - # Use initializable iterator to support table lookups. - iterator = dataset.make_initializable_iterator() - self.initializer = iterator.initializer - tf.add_to_collection(tf.GraphKeys.TABLE_INITIALIZERS, iterator.initializer) - - features, labels = iterator.get_next() - return (features, labels) diff --git a/research/seq2species/protos/BUILD b/research/seq2species/protos/BUILD deleted file mode 100644 index 5628d4c41..000000000 --- a/research/seq2species/protos/BUILD +++ /dev/null @@ -1,16 +0,0 @@ -# Protos for Tensorflow Seq2Species API. - -package( - default_visibility = ["//visibility:public"], -) - -py_proto_library( - name = "seq2label_py_pb2", - api_version = 2, - deps = [":seq2label_proto"], -) - -proto_library( - name = "seq2label_proto", - srcs = ["seq2label.proto"], -) diff --git a/research/seq2species/protos/__init__.py b/research/seq2species/protos/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/research/seq2species/protos/seq2label.proto b/research/seq2species/protos/seq2label.proto deleted file mode 100644 index 531c4ad75..000000000 --- a/research/seq2species/protos/seq2label.proto +++ /dev/null @@ -1,49 +0,0 @@ -syntax = "proto2"; - -package seq2species.protos; - -// Summarizes metadata information for a dataset that can be used for running -// training or inference. -message Seq2LabelDatasetInfo { - // Summarizes all possible values for a given label in the dataset. - message LabelInfo { - optional string name = 1; - repeated string values = 2; - // Per-value weights used to normalize the classes in a dataset. - repeated float weights = 3; - } - repeated LabelInfo labels = 3; - // Length (in basepairs) of the reads in the dataset. - optional int32 read_length = 4; - // Stride (in number of basepairs) in the moving window. - optional int32 read_stride = 7; - // Total number of examples in the dataset. - optional int64 num_examples = 5; - // Full path to the dataset. - optional string dataset_path = 6; -} - -// Summarizes metadata information about a model trained on a Seq2Label dataset. -message Seq2LabelModelInfo { - optional string hparams_string = 1; - optional string model_type = 2; - repeated string targets = 3; - optional int32 num_filters = 4; - optional int32 batch_size = 5; - optional string metadata_path = 6; - optional float training_noise_rate = 7; -} - -// Summarizes resulting measures of modelling experiments. -message Seq2LabelExperimentMeasures { - optional string checkpoint_path = 1; - optional int64 steps = 2; - optional float wall_time = 3; - optional bool experiment_infeasible = 4; - - message Measure { - optional string name = 1; - optional float value = 2; - } - repeated Measure measures = 5; -} diff --git a/research/seq2species/run_training.py b/research/seq2species/run_training.py deleted file mode 100644 index f03bb09ec..000000000 --- a/research/seq2species/run_training.py +++ /dev/null @@ -1,293 +0,0 @@ -# Copyright 2018 The TensorFlow Authors All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -"""Defines training scheme for neural networks for Seq2Species prediction. - -Defines and runs the loop for training a (optionally) depthwise separable -convolutional model for predicting taxonomic labels from short reads of DNA. -""" - -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import os -import time - -from absl import flags -import numpy as np -import tensorflow as tf -from google.protobuf import text_format - -import build_model -import configuration -import input as seq2species_input -from protos import seq2label_pb2 -import seq2label_utils - -# Define non-tunable parameters. -flags.DEFINE_integer('num_filters', 1, 'Number of filters for conv model') -flags.DEFINE_string('hparams', '', - 'Comma-separated list of name=value hyperparameter ' - "pairs ('hp1=value1,hp2=value2'). Unspecified " - 'hyperparameters will be filled with defaults.') -flags.DEFINE_integer('batch_size', 512, 'Size of batches during training.') -flags.DEFINE_integer('min_train_steps', 1000, - 'Minimum number of training steps to run.') -flags.DEFINE_float('max_task_loss', 10.0, - "Terminate trial if task loss doesn't fall below this " - 'within --min_train_steps.') -flags.DEFINE_integer('n_print_progress_every', 1000, - 'Print training progress every ' - '--n_print_progress_every global steps.') -flags.DEFINE_list('targets', ['species'], - 'Names of taxonomic ranks to use as training targets.') -flags.DEFINE_float( - 'noise_rate', 0.0, 'Rate [0.0, 1.0] at which to inject ' - 'base-flipping noise into input read sequences.') - -# Define paths to logs and data. -flags.DEFINE_list( - 'train_files', [], 'Full paths to the TFRecords containing the ' - 'training examples.') -flags.DEFINE_string( - 'metadata_path', '', 'Full path of the text proto containing configuration ' - 'information about the set of training examples.') -flags.DEFINE_string('logdir', '/tmp/seq2species', - 'Directory to which to write logs.') - -# Define supervisor/checkpointing options. -flags.DEFINE_integer('task', 0, 'Task ID of the replica running the training.') -flags.DEFINE_string('master', '', 'Name of the TF master to use.') -flags.DEFINE_integer( - 'save_model_secs', 900, 'Rate at which to save model parameters. ' - 'Set to 0 to disable checkpointing.') -flags.DEFINE_integer('recovery_wait_secs', 30, - 'Wait to recover model from checkpoint ' - 'before timing out.') -flags.DEFINE_integer('save_summaries_secs', 900, - 'Rate at which to save Tensorboard summaries.') -flags.DEFINE_integer('ps_tasks', 0, - 'Number of tasks in the ps job; 0 if no ps is used.') - -FLAGS = flags.FLAGS -RANDOM_SEED = 42 - - -def wait_until(time_sec): - """Stalls execution until a given time. - - Args: - time_sec: time, in seconds, until which to loop idly. - """ - while time.time() < time_sec: - pass - - -def update_measures(measures, new_measures, loss_val, max_loss=None): - """Updates tracking of experimental measures and infeasibilty. - - Args: - measures: dict; mapping from measure name to measure value. - new_measures: dict; mapping from measure name to new measure values. - loss_val: float; value of loss metric by which to determine fesibility. - max_loss: float; maximum value at which to consider the loss feasible. - - Side Effects: - Updates the given mapping of measures and values based on the current - experimental metrics stored in new_measures, and determines current - feasibility of the experiment based on the provided loss value. - """ - max_loss = max_loss if max_loss else np.finfo('f').max - measures['is_infeasible'] = ( - loss_val >= max_loss or not np.isfinite(loss_val)) - measures.update(new_measures) - - -def run_training(model, hparams, training_dataset, logdir, batch_size): - """Trains the given model on random mini-batches of reads. - - Args: - model: ConvolutionalNet instance containing the model graph and operations. - hparams: tf.contrib.training.Hparams object containing the model's - hyperparamters; see configuration.py for hyperparameter definitions. - training_dataset: an `InputDataset` that can feed labelled examples. - logdir: string; full path of directory to which to save checkpoints. - batch_size: integer batch size. - - Yields: - Tuple comprising a dictionary of experimental measures and the save path - for train checkpoints and summaries. - """ - input_params = dict(batch_size=batch_size) - features, labels = training_dataset.input_fn(input_params) - model.build_graph(features, labels, tf.estimator.ModeKeys.TRAIN, batch_size) - - is_chief = FLAGS.task == 0 - scaffold = tf.train.Scaffold( - saver=tf.train.Saver( - tf.global_variables(), - max_to_keep=5, - keep_checkpoint_every_n_hours=1.0), - init_op=tf.global_variables_initializer(), - summary_op=model.summary_op) - with tf.train.MonitoredTrainingSession( - master=FLAGS.master, - checkpoint_dir=logdir, - is_chief=is_chief, - scaffold=scaffold, - save_summaries_secs=FLAGS.save_summaries_secs, - save_checkpoint_secs=FLAGS.save_model_secs, - max_wait_secs=FLAGS.recovery_wait_secs) as sess: - global_step = sess.run(model.global_step) - print('Initialized model at global step ', global_step) - init_time = time.time() - measures = {'is_infeasible': False} - - if is_chief: - model_info = seq2label_utils.construct_seq2label_model_info( - hparams, 'conv', FLAGS.targets, FLAGS.metadata_path, FLAGS.batch_size, - FLAGS.num_filters, FLAGS.noise_rate) - write_message(model_info, os.path.join(logdir, 'model_info.pbtxt')) - - ops = [ - model.accuracy, model.weighted_accuracy, model.total_loss, - model.global_step, model.train_op - ] - - while not sess.should_stop() and global_step < hparams.train_steps: - accuracy, weighted_accuracy, loss, global_step, _ = sess.run(ops) - - def gather_measures(): - """Updates the measures dictionary from this batch.""" - new_measures = {'train_loss': loss, 'global_step': global_step} - for target in FLAGS.targets: - new_measures.update({ - ('train_accuracy/%s' % target): accuracy[target], - ('train_weighted_accuracy/%s' % target): weighted_accuracy[target] - }) - update_measures( - measures, new_measures, loss, max_loss=FLAGS.max_task_loss) - - # Periodically track measures according to current mini-batch performance. - - # Log a message. - if global_step % FLAGS.n_print_progress_every == 0: - log_message = ('\tstep: %d (%d sec), loss: %f' % - (global_step, time.time() - init_time, loss)) - for target in FLAGS.targets: - log_message += (', accuracy/%s: %f ' % (target, accuracy[target])) - log_message += (', weighted_accuracy/%s: %f ' % - (target, weighted_accuracy[target])) - print(log_message) - - # Gather new measures and update the measures dictionary. - gather_measures() - yield measures, scaffold.saver.last_checkpoints[-1] - - # Check for additional stopping criteria. - if not np.isfinite(loss) or (loss >= FLAGS.max_task_loss and - global_step > FLAGS.min_train_steps): - break - - # Always yield once at the end. - gather_measures() - yield measures, scaffold.saver.last_checkpoints[-1] - - -def write_message(message, filename): - """Writes contents of the given message to the given filename as a text proto. - - Args: - message: the proto message to save. - filename: full path of file to which to save the text proto. - - Side Effects: - Outputs a text proto file to the given filename. - """ - message_string = text_format.MessageToString(message) - with tf.gfile.GFile(filename, 'w') as f: - f.write(message_string) - - -def write_measures(measures, checkpoint_file, init_time): - """Writes performance measures to file. - - Args: - measures: dict; mapping from measure name to measure value. - checkpoint_file: string; full save path for checkpoints and summaries. - init_time: int; start time for work on the current experiment. - - Side Effects: - Writes given dictionary of performance measures for the current experiment - to a 'measures.pbtxt' file in the checkpoint directory. - """ - # Save experiment measures. - print('global_step: ', measures['global_step']) - experiment_measures = seq2label_pb2.Seq2LabelExperimentMeasures( - checkpoint_path=checkpoint_file, - steps=measures['global_step'], - experiment_infeasible=measures['is_infeasible'], - wall_time=time.time() - init_time) # Inaccurate for restarts. - for name, value in measures.iteritems(): - if name not in ['is_infeasible', 'global_step']: - experiment_measures.measures.add(name=name, value=value) - measures_file = os.path.join( - os.path.dirname(checkpoint_file), 'measures.pbtxt') - write_message(experiment_measures, measures_file) - print('Wrote ', measures_file, - ' containing the following experiment measures:\n', experiment_measures) - - -def main(unused_argv): - dataset_info = seq2species_input.load_dataset_info(FLAGS.metadata_path) - - init_time = time.time() - - # Determine model hyperparameters. - hparams = configuration.parse_hparams(FLAGS.hparams, FLAGS.num_filters) - print('Current Hyperparameters:') - for hp_name, hp_val in hparams.values().items(): - print('\t', hp_name, ': ', hp_val) - - # Initialize the model graph. - print('Constructing TensorFlow Graph.') - tf.reset_default_graph() - - input_dataset = seq2species_input.InputDataset.from_tfrecord_files( - FLAGS.train_files, - 'train', - FLAGS.targets, - dataset_info, - noise_rate=FLAGS.noise_rate, - random_seed=RANDOM_SEED) - - with tf.device(tf.train.replica_device_setter(FLAGS.ps_tasks)): - model = build_model.ConvolutionalNet( - hparams, dataset_info, targets=FLAGS.targets) - - # Run the experiment. - measures, checkpoint_file = None, None - print('Starting model training.') - for cur_measures, cur_file in run_training( - model, hparams, input_dataset, FLAGS.logdir, batch_size=FLAGS.batch_size): - measures, checkpoint_file = cur_measures, cur_file - - # Save experiment results. - write_measures(measures, checkpoint_file, init_time) - - -if __name__ == '__main__': - tf.app.run(main) diff --git a/research/seq2species/run_training_test.py b/research/seq2species/run_training_test.py deleted file mode 100644 index 754d2e017..000000000 --- a/research/seq2species/run_training_test.py +++ /dev/null @@ -1,118 +0,0 @@ -# Copyright 2018 The TensorFlow Authors All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -"""Tests for run_training.""" - -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import os -import time - -from absl import flags -from absl.testing import absltest -from absl.testing import flagsaver -from absl.testing import parameterized -import numpy as np -import tensorflow as tf -from google.protobuf import text_format - -import run_training -from protos import seq2label_pb2 -import test_utils - -FLAGS = flags.FLAGS - - -class RunTrainingTest(parameterized.TestCase): - - @parameterized.parameters(2, 4, 7) - def test_wait_until(self, wait_sec): - end_time = time.time() + wait_sec - run_training.wait_until(end_time) - self.assertEqual(round(time.time() - end_time), 0) - - @parameterized.parameters( - ({}, {'a': 0.7, 'b': 12.3}, 12.3, None, - {'a': 0.7, 'b': 12.3, 'is_infeasible': False}), - ({'a': 0.42}, {'b': 24.5}, 24.5, 32.0, - {'a': 0.42, 'b': 24.5, 'is_infeasible': False}), - ({'a': 0.503}, {'a': 0.82, 'b': 7.2}, 7.2, 0.1, - {'a': 0.82, 'b': 7.2, 'is_infeasible': True}), - ({}, {'a': 0.7, 'b': 12.3}, float('Inf'), None, - {'a': 0.7, 'b': 12.3, 'is_infeasible': True}) - ) - def test_update_measures(self, measures, new_measures, loss, max_loss, - expected): - run_training.update_measures(measures, new_measures, loss, max_loss) - self.assertEqual(measures, expected) - - def test_write_measures(self): - init_time = time.time() - measures = { - 'global_step': 311448, - 'train_loss': np.float32(18.36), - 'train_weighted_accuracy': np.float32(0.3295), - 'train_accuracy': 0.8243, - 'is_infeasible': False - } - tmp_path = os.path.join(FLAGS.test_tmpdir, 'measures.pbtxt') - run_training.write_measures(measures, tmp_path, init_time) - experiment_measures = seq2label_pb2.Seq2LabelExperimentMeasures() - with tf.gfile.Open(tmp_path) as f: - text_format.Parse(f.read(), experiment_measures) - self.assertEqual(experiment_measures.checkpoint_path, tmp_path) - self.assertFalse(experiment_measures.experiment_infeasible) - self.assertEqual(experiment_measures.steps, measures['global_step']) - self.assertGreater(experiment_measures.wall_time, 0) - self.assertEqual(len(experiment_measures.measures), 3) - for measure in experiment_measures.measures: - self.assertAlmostEqual(measure.value, measures[measure.name]) - - @parameterized.parameters((test_utils.TEST_TARGETS[:1],), - (test_utils.TEST_TARGETS,)) - def test_run_training(self, targets): - """Tests whether the training loop can be run successfully. - - Generates test input files and runs the main driving code. - - Args: - targets: the targets to train on. - """ - # Create test input and metadata files. - num_examples, read_len = 20, 5 - train_file = test_utils.create_tmp_train_file(num_examples, read_len) - metadata_path = test_utils.create_tmp_metadata(num_examples, read_len) - - # Check that the training loop runs as expected. - logdir = os.path.join(FLAGS.test_tmpdir, 'train:{}'.format(len(targets))) - with flagsaver.flagsaver( - train_files=train_file, - metadata_path=metadata_path, - targets=targets, - logdir=logdir, - hparams='train_steps=10,min_read_length=5', - batch_size=10): - run_training.main(FLAGS) - # Check training loop ran by confirming existence of a checkpoint file. - self.assertIsNotNone(tf.train.latest_checkpoint(FLAGS.logdir)) - # Check training loop ran by confiming existence of a measures file. - self.assertTrue( - os.path.exists(os.path.join(FLAGS.logdir, 'measures.pbtxt'))) - - -if __name__ == '__main__': - absltest.main() diff --git a/research/seq2species/seq2label_utils.py b/research/seq2species/seq2label_utils.py deleted file mode 100644 index b975b7f17..000000000 --- a/research/seq2species/seq2label_utils.py +++ /dev/null @@ -1,95 +0,0 @@ -# Copyright 2018 The TensorFlow Authors All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -"""Utilities for working with Seq2Label datasets and models. - -This library provides utilities for parsing and generating Seq2Label protos. -""" -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import numpy as np - -from protos import seq2label_pb2 - - -def get_all_label_values(dataset_info): - """Retrieves possible values for modeled labels from a `Seq2LabelDatasetInfo`. - - Args: - dataset_info: a `Seq2LabelDatasetInfo` message. - - Returns: - A dictionary mapping each label name to a tuple of its permissible values. - """ - return { - label_info.name: tuple(label_info.values) - for label_info in dataset_info.labels - } - - -def construct_seq2label_model_info(hparams, model_type, targets, metadata_path, - batch_size, num_filters, - training_noise_rate): - """Constructs a Seq2LabelModelInfo proto with the given properties. - - Args: - hparams: initialized tf.contrib.training.Hparams object. - model_type: string; descriptive tag indicating type of model, ie. "conv". - targets: list of names of the targets the model is trained to predict. - metadata_path: string; full path to Seq2LabelDatasetInfo text proto used - to initialize the model. - batch_size: int; number of reads per mini-batch. - num_filters: int; number of filters for convolutional model. - training_noise_rate: float; rate [0.0, 1.0] of base-flipping noise injected - into input read sequenced at training time. - - Returns: - The Seq2LabelModelInfo proto with the hparams, model_type, targets, - num_filters, batch_size, metadata_path, and training_noise_rate fields - set to the given values. - """ - return seq2label_pb2.Seq2LabelModelInfo( - hparams_string=hparams.to_json(), - model_type=model_type, - targets=sorted(targets), - num_filters=num_filters, - batch_size=batch_size, - metadata_path=metadata_path, - training_noise_rate=training_noise_rate) - - -def add_read_noise(read, base_flip_probability=0.01): - """Adds base-flipping noise to the given read sequence. - - Args: - read: string; the read sequence to which to add noise. - base_flip_probability: float; probability of a base flip at each position. - - Returns: - The given read with base-flipping noise added at the provided - base_flip_probability rate. - """ - base_flips = np.random.binomial(1, base_flip_probability, len(read)) - if sum(base_flips) == 0: - return read - - read = np.array(list(read)) - possible_mutations = np.char.replace(['ACTG'] * sum(base_flips), - read[base_flips == 1], '') - mutations = map(np.random.choice, map(list, possible_mutations)) - read[base_flips == 1] = mutations - return ''.join(read) diff --git a/research/seq2species/test_utils.py b/research/seq2species/test_utils.py deleted file mode 100644 index f02798fb5..000000000 --- a/research/seq2species/test_utils.py +++ /dev/null @@ -1,106 +0,0 @@ -# Copyright 2018 The TensorFlow Authors All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -"""Utility methods for accessing and operating on test data.""" - -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import os -from absl import flags -import tensorflow as tf -from google.protobuf import text_format - -import input as seq2species_input -from protos import seq2label_pb2 - -FLAGS = flags.FLAGS - -# Target names included in the example inputs. -TEST_TARGETS = ['test_target_1', 'test_target_2'] - - -def _as_bytes_feature(in_string): - """Converts the given string to a tf.train.BytesList feature. - - Args: - in_string: string to be converted to BytesList Feature. - - Returns: - The TF BytesList Feature representing the given string. - """ - return tf.train.Feature(bytes_list=tf.train.BytesList(value=[in_string])) - - -def create_tmp_train_file(num_examples, - read_len, - characters=seq2species_input.DNA_BASES, - name='test.tfrecord'): - """Write a test TFRecord of input examples to temporary test directory. - - The generated input examples are test tf.train.Example protos, each comprised - of a toy sequence of length read_len and non-meaningful labels for targets in - TEST_TARGETS. - - Args: - num_examples: int; number of examples to write to test input file. - read_len: int; length of test read sequences. - characters: string; set of characters from which to construct test reads. - Defaults to canonical DNA bases. - name: string; filename for the test input file. - - Returns: - Full path to the generated temporary test input file. - """ - tmp_path = os.path.join(FLAGS.test_tmpdir, name) - with tf.python_io.TFRecordWriter(tmp_path) as writer: - for i in xrange(num_examples): - char = characters[i % len(characters)] - features_dict = {'sequence': _as_bytes_feature(char * read_len)} - for target_name in TEST_TARGETS: - nonsense_label = _as_bytes_feature(str(i)) - features_dict[target_name] = nonsense_label - tf_features = tf.train.Features(feature=features_dict) - example = tf.train.Example(features=tf_features) - writer.write(example.SerializeToString()) - return tmp_path - - -def create_tmp_metadata(num_examples, read_len): - """Write a test Seq2LabelDatasetInfo test proto to temporary test directory. - - Args: - num_examples: int; number of example labels to write into test metadata. - read_len: int; length of test read sequences. - - Returns: - Full path to the generated temporary test file containing the - Seq2LabelDatasetInfo text proto. - """ - dataset_info = seq2label_pb2.Seq2LabelDatasetInfo( - read_length=read_len, - num_examples=num_examples, - read_stride=1, - dataset_path='test.tfrecord') - - for target in TEST_TARGETS: - dataset_info.labels.add( - name=target, values=[str(i) for i in xrange(num_examples)]) - - tmp_path = os.path.join(FLAGS.test_tmpdir, 'test.pbtxt') - with tf.gfile.GFile(tmp_path, 'w') as f: - f.write(text_format.MessageToString(dataset_info)) - return tmp_path diff --git a/research/skip_thoughts/.gitignore b/research/skip_thoughts/.gitignore deleted file mode 100644 index 91cb861a9..000000000 --- a/research/skip_thoughts/.gitignore +++ /dev/null @@ -1,8 +0,0 @@ -/bazel-bin -/bazel-ci_build-cache -/bazel-genfiles -/bazel-out -/bazel-skip_thoughts -/bazel-testlogs -/bazel-tf -*.pyc diff --git a/research/skip_thoughts/README.md b/research/skip_thoughts/README.md deleted file mode 100644 index b3a1de73f..000000000 --- a/research/skip_thoughts/README.md +++ /dev/null @@ -1,479 +0,0 @@ -![No Maintenance Intended](https://img.shields.io/badge/No%20Maintenance%20Intended-%E2%9C%95-red.svg) -![TensorFlow Requirement: 1.x](https://img.shields.io/badge/TensorFlow%20Requirement-1.x-brightgreen) -![TensorFlow 2 Not Supported](https://img.shields.io/badge/TensorFlow%202%20Not%20Supported-%E2%9C%95-red.svg) - -# Skip-Thought Vectors - -This is a TensorFlow implementation of the model described in: - -Jamie Ryan Kiros, Yukun Zhu, Ruslan Salakhutdinov, Richard S. Zemel, -Antonio Torralba, Raquel Urtasun, Sanja Fidler. -[Skip-Thought Vectors](https://papers.nips.cc/paper/5950-skip-thought-vectors.pdf). -*In NIPS, 2015.* - - -## Contact -***Code author:*** Chris Shallue - -***Pull requests and issues:*** @cshallue - -## Contents -* [Model Overview](#model-overview) -* [Getting Started](#getting-started) - * [Install Required Packages](#install-required-packages) - * [Download Pretrained Models (Optional)](#download-pretrained-models-optional) -* [Training a Model](#training-a-model) - * [Prepare the Training Data](#prepare-the-training-data) - * [Run the Training Script](#run-the-training-script) - * [Track Training Progress](#track-training-progress) -* [Expanding the Vocabulary](#expanding-the-vocabulary) - * [Overview](#overview) - * [Preparation](#preparation) - * [Run the Vocabulary Expansion Script](#run-the-vocabulary-expansion-script) -* [Evaluating a Model](#evaluating-a-model) - * [Overview](#overview-1) - * [Preparation](#preparation-1) - * [Run the Evaluation Tasks](#run-the-evaluation-tasks) -* [Encoding Sentences](#encoding-sentences) - -## Model overview - -The *Skip-Thoughts* model is a sentence encoder. It learns to encode input -sentences into a fixed-dimensional vector representation that is useful for many -tasks, for example to detect paraphrases or to classify whether a product review -is positive or negative. See the -[Skip-Thought Vectors](https://papers.nips.cc/paper/5950-skip-thought-vectors.pdf) -paper for details of the model architecture and more example applications. - -A trained *Skip-Thoughts* model will encode similar sentences nearby each other -in the embedding vector space. The following examples show the nearest neighbor by -cosine similarity of some sentences from the -[movie review dataset](https://www.cs.cornell.edu/people/pabo/movie-review-data/). - - -| Input sentence | Nearest Neighbor | -|----------------|------------------| -| Simplistic, silly and tedious. | Trite, banal, cliched, mostly inoffensive. | -| Not so much farcical as sour. | Not only unfunny, but downright repellent. | -| A sensitive and astute first feature by Anne-Sophie Birot. | Absorbing character study by André Turpin . | -| An enthralling, entertaining feature. | A slick, engrossing melodrama. | - -## Getting Started - -### Install Required Packages -First ensure that you have installed the following required packages: - -* **Bazel** ([instructions](http://bazel.build/docs/install.html)) -* **TensorFlow** ([instructions](https://www.tensorflow.org/install/)) -* **NumPy** ([instructions](http://www.scipy.org/install.html)) -* **scikit-learn** ([instructions](http://scikit-learn.org/stable/install.html)) -* **Natural Language Toolkit (NLTK)** - * First install NLTK ([instructions](http://www.nltk.org/install.html)) - * Then install the NLTK data ([instructions](http://www.nltk.org/data.html)) -* **gensim** ([instructions](https://radimrehurek.com/gensim/install.html)) - * Only required if you will be expanding your vocabulary with the [word2vec](https://code.google.com/archive/p/word2vec/) model. - - -### Download Pretrained Models (Optional) - -You can download model checkpoints pretrained on the -[BookCorpus](http://yknzhu.wixsite.com/mbweb) dataset in the following -configurations: - -* Unidirectional RNN encoder ("uni-skip" in the paper) -* Bidirectional RNN encoder ("bi-skip" in the paper) - -```shell -# Directory to download the pretrained models to. -PRETRAINED_MODELS_DIR="${HOME}/skip_thoughts/pretrained/" - -mkdir -p ${PRETRAINED_MODELS_DIR} -cd ${PRETRAINED_MODELS_DIR} - -# Download and extract the unidirectional model. -wget "http://download.tensorflow.org/models/skip_thoughts_uni_2017_02_02.tar.gz" -tar -xvf skip_thoughts_uni_2017_02_02.tar.gz -rm skip_thoughts_uni_2017_02_02.tar.gz - -# Download and extract the bidirectional model. -wget "http://download.tensorflow.org/models/skip_thoughts_bi_2017_02_16.tar.gz" -tar -xvf skip_thoughts_bi_2017_02_16.tar.gz -rm skip_thoughts_bi_2017_02_16.tar.gz -``` - -You can now skip to the sections [Evaluating a Model](#evaluating-a-model) and -[Encoding Sentences](#encoding-sentences). - - -## Training a Model - -### Prepare the Training Data - -To train a model you will need to provide training data in TFRecord format. The -TFRecord format consists of a set of sharded files containing serialized -`tf.Example` protocol buffers. Each `tf.Example` proto contains three -sentences: - - * `encode`: The sentence to encode. - * `decode_pre`: The sentence preceding `encode` in the original text. - * `decode_post`: The sentence following `encode` in the original text. - -Each sentence is a list of words. During preprocessing, a dictionary is created -that assigns each word in the vocabulary to an integer-valued id. Each sentence -is encoded as a list of integer word ids in the `tf.Example` protos. - -We have provided a script to preprocess any set of text-files into this format. -You may wish to use the [BookCorpus](http://yknzhu.wixsite.com/mbweb) dataset. -Note that the preprocessing script may take **12 hours** or more to complete -on this large dataset. - -```shell -# Comma-separated list of globs matching the input input files. The format of -# the input files is assumed to be a list of newline-separated sentences, where -# each sentence is already tokenized. -INPUT_FILES="${HOME}/skip_thoughts/bookcorpus/*.txt" - -# Location to save the preprocessed training and validation data. -DATA_DIR="${HOME}/skip_thoughts/data" - -# Build the preprocessing script. -cd tensorflow-models/skip_thoughts -bazel build -c opt //skip_thoughts/data:preprocess_dataset - -# Run the preprocessing script. -bazel-bin/skip_thoughts/data/preprocess_dataset \ - --input_files=${INPUT_FILES} \ - --output_dir=${DATA_DIR} -``` - -When the script finishes you will find 100 training files and 1 validation file -in `DATA_DIR`. The files will match the patterns `train-?????-of-00100` and -`validation-00000-of-00001` respectively. - -The script will also produce a file named `vocab.txt`. The format of this file -is a list of newline-separated words where the word id is the corresponding 0- -based line index. Words are sorted by descending order of frequency in the input -data. Only the top 20,000 words are assigned unique ids; all other words are -assigned the "unknown id" of 1 in the processed data. - -### Run the Training Script - -Execute the following commands to start the training script. By default it will -run for 500k steps (around 9 days on a GeForce GTX 1080 GPU). - -```shell -# Directory containing the preprocessed data. -DATA_DIR="${HOME}/skip_thoughts/data" - -# Directory to save the model. -MODEL_DIR="${HOME}/skip_thoughts/model" - -# Build the model. -cd tensorflow-models/skip_thoughts -bazel build -c opt //skip_thoughts/... - -# Run the training script. -bazel-bin/skip_thoughts/train \ - --input_file_pattern="${DATA_DIR}/train-?????-of-00100" \ - --train_dir="${MODEL_DIR}/train" -``` - -### Track Training Progress - -Optionally, you can run the `track_perplexity` script in a separate process. -This will log per-word perplexity on the validation set which allows training -progress to be monitored on -[TensorBoard](https://www.tensorflow.org/get_started/summaries_and_tensorboard). - -Note that you may run out of memory if you run the this script on the same GPU -as the training script. You can set the environment variable -`CUDA_VISIBLE_DEVICES=""` to force the script to run on CPU. If it runs too -slowly on CPU, you can decrease the value of `--num_eval_examples`. - -```shell -DATA_DIR="${HOME}/skip_thoughts/data" -MODEL_DIR="${HOME}/skip_thoughts/model" - -# Ignore GPU devices (only necessary if your GPU is currently memory -# constrained, for example, by running the training script). -export CUDA_VISIBLE_DEVICES="" - -# Run the evaluation script. This will run in a loop, periodically loading the -# latest model checkpoint file and computing evaluation metrics. -bazel-bin/skip_thoughts/track_perplexity \ - --input_file_pattern="${DATA_DIR}/validation-?????-of-00001" \ - --checkpoint_dir="${MODEL_DIR}/train" \ - --eval_dir="${MODEL_DIR}/val" \ - --num_eval_examples=50000 -``` - -If you started the `track_perplexity` script, run a -[TensorBoard](https://www.tensorflow.org/get_started/summaries_and_tensorboard) -server in a separate process for real-time monitoring of training summaries and -validation perplexity. - -```shell -MODEL_DIR="${HOME}/skip_thoughts/model" - -# Run a TensorBoard server. -tensorboard --logdir="${MODEL_DIR}" -``` - -## Expanding the Vocabulary - -### Overview - -The vocabulary generated by the preprocessing script contains only 20,000 words -which is insufficient for many tasks. For example, a sentence from Wikipedia -might contain nouns that do not appear in this vocabulary. - -A solution to this problem described in the -[Skip-Thought Vectors](https://papers.nips.cc/paper/5950-skip-thought-vectors.pdf) -paper is to learn a mapping that transfers word representations from one model to -another. This idea is based on the "Translation Matrix" method from the paper -[Exploiting Similarities Among Languages for Machine Translation](https://arxiv.org/abs/1309.4168). - - -Specifically, we will load the word embeddings from a trained *Skip-Thoughts* -model and from a trained [word2vec model](https://arxiv.org/pdf/1301.3781.pdf) -(which has a much larger vocabulary). We will train a linear regression model -without regularization to learn a linear mapping from the word2vec embedding -space to the *Skip-Thoughts* embedding space. We will then apply the linear -model to all words in the word2vec vocabulary, yielding vectors in the *Skip- -Thoughts* word embedding space for the union of the two vocabularies. - -The linear regression task is to learn a parameter matrix *W* to minimize -*|| X - Y \* W ||2*, where *X* is a matrix of *Skip-Thoughts* -embeddings of shape `[num_words, dim1]`, *Y* is a matrix of word2vec embeddings -of shape `[num_words, dim2]`, and *W* is a matrix of shape `[dim2, dim1]`. - -### Preparation - -First you will need to download and unpack a pretrained -[word2vec model](https://arxiv.org/pdf/1301.3781.pdf) from -[this website](https://code.google.com/archive/p/word2vec/) -([direct download link](https://drive.google.com/file/d/0B7XkCwpI5KDYNlNUTTlSS21pQmM/edit?usp=sharing)). -This model was trained on the Google News dataset (about 100 billion words). - - -Also ensure that you have already [installed gensim](https://radimrehurek.com/gensim/install.html). - -### Run the Vocabulary Expansion Script - -```shell -# Path to checkpoint file or a directory containing checkpoint files (the script -# will select the most recent). -CHECKPOINT_PATH="${HOME}/skip_thoughts/model/train" - -# Vocabulary file generated by the preprocessing script. -SKIP_THOUGHTS_VOCAB="${HOME}/skip_thoughts/data/vocab.txt" - -# Path to downloaded word2vec model. -WORD2VEC_MODEL="${HOME}/skip_thoughts/googlenews/GoogleNews-vectors-negative300.bin" - -# Output directory. -EXP_VOCAB_DIR="${HOME}/skip_thoughts/exp_vocab" - -# Build the vocabulary expansion script. -cd tensorflow-models/skip_thoughts -bazel build -c opt //skip_thoughts:vocabulary_expansion - -# Run the vocabulary expansion script. -bazel-bin/skip_thoughts/vocabulary_expansion \ - --skip_thoughts_model=${CHECKPOINT_PATH} \ - --skip_thoughts_vocab=${SKIP_THOUGHTS_VOCAB} \ - --word2vec_model=${WORD2VEC_MODEL} \ - --output_dir=${EXP_VOCAB_DIR} -``` - -## Evaluating a Model - -### Overview - -The model can be evaluated using the benchmark tasks described in the -[Skip-Thought Vectors](https://papers.nips.cc/paper/5950-skip-thought-vectors.pdf) -paper. The following tasks are supported (refer to the paper for full details): - - * **SICK** semantic relatedness task. - * **MSRP** (Microsoft Research Paraphrase Corpus) paraphrase detection task. - * Binary classification tasks: - * **MR** movie review sentiment task. - * **CR** customer product review task. - * **SUBJ** subjectivity/objectivity task. - * **MPQA** opinion polarity task. - * **TREC** question-type classification task. - -### Preparation - -You will need to clone or download the -[skip-thoughts GitHub repository](https://github.com/ryankiros/skip-thoughts) by -[ryankiros](https://github.com/ryankiros) (the first author of the Skip-Thoughts -paper): - -```shell -# Folder to clone the repository to. -ST_KIROS_DIR="${HOME}/skip_thoughts/skipthoughts_kiros" - -# Clone the repository. -git clone git@github.com:ryankiros/skip-thoughts.git "${ST_KIROS_DIR}/skipthoughts" - -# Make the package importable. -export PYTHONPATH="${ST_KIROS_DIR}/:${PYTHONPATH}" -``` - -You will also need to download the data needed for each evaluation task. See the -instructions [here](https://github.com/ryankiros/skip-thoughts). - -For example, the CR (customer review) dataset is found [here](http://nlp.stanford.edu/~sidaw/home/projects:nbsvm). For this task we want the -files `custrev.pos` and `custrev.neg`. - -### Run the Evaluation Tasks - -In the following example we will evaluate a unidirectional model ("uni-skip" in -the paper) on the CR task. To use a bidirectional model ("bi-skip" in the -paper), simply pass the flags `--bi_vocab_file`, `--bi_embeddings_file` and -`--bi_checkpoint_path` instead. To use the "combine-skip" model described in the -paper you will need to pass both the unidirectional and bidirectional flags. - -```shell -# Path to checkpoint file or a directory containing checkpoint files (the script -# will select the most recent). -CHECKPOINT_PATH="${HOME}/skip_thoughts/model/train" - -# Vocabulary file generated by the vocabulary expansion script. -VOCAB_FILE="${HOME}/skip_thoughts/exp_vocab/vocab.txt" - -# Embeddings file generated by the vocabulary expansion script. -EMBEDDINGS_FILE="${HOME}/skip_thoughts/exp_vocab/embeddings.npy" - -# Directory containing files custrev.pos and custrev.neg. -EVAL_DATA_DIR="${HOME}/skip_thoughts/eval_data" - -# Build the evaluation script. -cd tensorflow-models/skip_thoughts -bazel build -c opt //skip_thoughts:evaluate - -# Run the evaluation script. -bazel-bin/skip_thoughts/evaluate \ - --eval_task=CR \ - --data_dir=${EVAL_DATA_DIR} \ - --uni_vocab_file=${VOCAB_FILE} \ - --uni_embeddings_file=${EMBEDDINGS_FILE} \ - --uni_checkpoint_path=${CHECKPOINT_PATH} -``` - -Output: - -```python -[0.82539682539682535, 0.84084880636604775, 0.83023872679045096, - 0.86206896551724133, 0.83554376657824936, 0.85676392572944293, - 0.84084880636604775, 0.83023872679045096, 0.85145888594164454, - 0.82758620689655171] -``` - -The output is a list of accuracies of 10 cross-validation classification models. -To get a single number, simply take the average: - -```python -ipython # Launch iPython. - -In [0]: -import numpy as np -np.mean([0.82539682539682535, 0.84084880636604775, 0.83023872679045096, - 0.86206896551724133, 0.83554376657824936, 0.85676392572944293, - 0.84084880636604775, 0.83023872679045096, 0.85145888594164454, - 0.82758620689655171]) - -Out [0]: 0.84009936423729525 -``` - -## Encoding Sentences - -In this example we will encode data from the -[movie review dataset](https://www.cs.cornell.edu/people/pabo/movie-review-data/) -(specifically the [sentence polarity dataset v1.0](https://www.cs.cornell.edu/people/pabo/movie-review-data/rt-polaritydata.tar.gz)). - -```python -ipython # Launch iPython. - -In [0]: - -# Imports. -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function -import numpy as np -import os.path -import scipy.spatial.distance as sd -from skip_thoughts import configuration -from skip_thoughts import encoder_manager - -In [1]: -# Set paths to the model. -VOCAB_FILE = "/path/to/vocab.txt" -EMBEDDING_MATRIX_FILE = "/path/to/embeddings.npy" -CHECKPOINT_PATH = "/path/to/model.ckpt-9999" -# The following directory should contain files rt-polarity.neg and -# rt-polarity.pos. -MR_DATA_DIR = "/dir/containing/mr/data" - -In [2]: -# Set up the encoder. Here we are using a single unidirectional model. -# To use a bidirectional model as well, call load_model() again with -# configuration.model_config(bidirectional_encoder=True) and paths to the -# bidirectional model's files. The encoder will use the concatenation of -# all loaded models. -encoder = encoder_manager.EncoderManager() -encoder.load_model(configuration.model_config(), - vocabulary_file=VOCAB_FILE, - embedding_matrix_file=EMBEDDING_MATRIX_FILE, - checkpoint_path=CHECKPOINT_PATH) - -In [3]: -# Load the movie review dataset. -data = [] -with open(os.path.join(MR_DATA_DIR, 'rt-polarity.neg'), 'rb') as f: - data.extend([line.decode('latin-1').strip() for line in f]) -with open(os.path.join(MR_DATA_DIR, 'rt-polarity.pos'), 'rb') as f: - data.extend([line.decode('latin-1').strip() for line in f]) - -In [4]: -# Generate Skip-Thought Vectors for each sentence in the dataset. -encodings = encoder.encode(data) - -In [5]: -# Define a helper function to generate nearest neighbors. -def get_nn(ind, num=10): - encoding = encodings[ind] - scores = sd.cdist([encoding], encodings, "cosine")[0] - sorted_ids = np.argsort(scores) - print("Sentence:") - print("", data[ind]) - print("\nNearest neighbors:") - for i in range(1, num + 1): - print(" %d. %s (%.3f)" % - (i, data[sorted_ids[i]], scores[sorted_ids[i]])) - -In [6]: -# Compute nearest neighbors of the first sentence in the dataset. -get_nn(0) -``` - -Output: - -``` -Sentence: - simplistic , silly and tedious . - -Nearest neighbors: - 1. trite , banal , cliched , mostly inoffensive . (0.247) - 2. banal and predictable . (0.253) - 3. witless , pointless , tasteless and idiotic . (0.272) - 4. loud , silly , stupid and pointless . (0.295) - 5. grating and tedious . (0.299) - 6. idiotic and ugly . (0.330) - 7. black-and-white and unrealistic . (0.335) - 8. hopelessly inane , humorless and under-inspired . (0.335) - 9. shallow , noisy and pretentious . (0.340) - 10. . . . unlikable , uninteresting , unfunny , and completely , utterly inept . (0.346) -``` diff --git a/research/skip_thoughts/WORKSPACE b/research/skip_thoughts/WORKSPACE deleted file mode 100644 index e69de29bb..000000000 diff --git a/research/skip_thoughts/skip_thoughts/BUILD b/research/skip_thoughts/skip_thoughts/BUILD deleted file mode 100644 index 3ab642cac..000000000 --- a/research/skip_thoughts/skip_thoughts/BUILD +++ /dev/null @@ -1,87 +0,0 @@ -package(default_visibility = ["//visibility:public"]) - -licenses(["notice"]) # Apache 2.0 - -exports_files(["LICENSE"]) - -py_library( - name = "configuration", - srcs = ["configuration.py"], - srcs_version = "PY2AND3", -) - -py_library( - name = "skip_thoughts_model", - srcs = ["skip_thoughts_model.py"], - srcs_version = "PY2AND3", - deps = [ - "//skip_thoughts/ops:gru_cell", - "//skip_thoughts/ops:input_ops", - ], -) - -py_test( - name = "skip_thoughts_model_test", - size = "large", - srcs = ["skip_thoughts_model_test.py"], - deps = [ - ":configuration", - ":skip_thoughts_model", - ], -) - -py_binary( - name = "train", - srcs = ["train.py"], - srcs_version = "PY2AND3", - deps = [ - ":configuration", - ":skip_thoughts_model", - ], -) - -py_binary( - name = "track_perplexity", - srcs = ["track_perplexity.py"], - srcs_version = "PY2AND3", - deps = [ - ":configuration", - ":skip_thoughts_model", - ], -) - -py_binary( - name = "vocabulary_expansion", - srcs = ["vocabulary_expansion.py"], - srcs_version = "PY2AND3", -) - -py_library( - name = "skip_thoughts_encoder", - srcs = ["skip_thoughts_encoder.py"], - srcs_version = "PY2AND3", - deps = [ - ":skip_thoughts_model", - "//skip_thoughts/data:special_words", - ], -) - -py_library( - name = "encoder_manager", - srcs = ["encoder_manager.py"], - srcs_version = "PY2AND3", - deps = [ - ":skip_thoughts_encoder", - ], -) - -py_binary( - name = "evaluate", - srcs = ["evaluate.py"], - srcs_version = "PY2AND3", - deps = [ - ":encoder_manager", - "//skip_thoughts:configuration", - ], -) - diff --git a/research/skip_thoughts/skip_thoughts/__init__.py b/research/skip_thoughts/skip_thoughts/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/research/skip_thoughts/skip_thoughts/configuration.py b/research/skip_thoughts/skip_thoughts/configuration.py deleted file mode 100644 index bc04d5798..000000000 --- a/research/skip_thoughts/skip_thoughts/configuration.py +++ /dev/null @@ -1,110 +0,0 @@ -# Copyright 2017 The TensorFlow Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""Default configuration for model architecture and training.""" - -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - - -class _HParams(object): - """Wrapper for configuration parameters.""" - pass - - -def model_config(input_file_pattern=None, - input_queue_capacity=640000, - num_input_reader_threads=1, - shuffle_input_data=True, - uniform_init_scale=0.1, - vocab_size=20000, - batch_size=128, - word_embedding_dim=620, - bidirectional_encoder=False, - encoder_dim=2400): - """Creates a model configuration object. - - Args: - input_file_pattern: File pattern of sharded TFRecord files containing - tf.Example protobufs. - input_queue_capacity: Number of examples to keep in the input queue. - num_input_reader_threads: Number of threads for prefetching input - tf.Examples. - shuffle_input_data: Whether to shuffle the input data. - uniform_init_scale: Scale of random uniform initializer. - vocab_size: Number of unique words in the vocab. - batch_size: Batch size (training and evaluation only). - word_embedding_dim: Word embedding dimension. - bidirectional_encoder: Whether to use a bidirectional or unidirectional - encoder RNN. - encoder_dim: Number of output dimensions of the sentence encoder. - - Returns: - An object containing model configuration parameters. - """ - config = _HParams() - config.input_file_pattern = input_file_pattern - config.input_queue_capacity = input_queue_capacity - config.num_input_reader_threads = num_input_reader_threads - config.shuffle_input_data = shuffle_input_data - config.uniform_init_scale = uniform_init_scale - config.vocab_size = vocab_size - config.batch_size = batch_size - config.word_embedding_dim = word_embedding_dim - config.bidirectional_encoder = bidirectional_encoder - config.encoder_dim = encoder_dim - return config - - -def training_config(learning_rate=0.0008, - learning_rate_decay_factor=0.5, - learning_rate_decay_steps=400000, - number_of_steps=500000, - clip_gradient_norm=5.0, - save_model_secs=600, - save_summaries_secs=600): - """Creates a training configuration object. - - Args: - learning_rate: Initial learning rate. - learning_rate_decay_factor: If > 0, the learning rate decay factor. - learning_rate_decay_steps: The number of steps before the learning rate - decays by learning_rate_decay_factor. - number_of_steps: The total number of training steps to run. Passing None - will cause the training script to run indefinitely. - clip_gradient_norm: If not None, then clip gradients to this value. - save_model_secs: How often (in seconds) to save model checkpoints. - save_summaries_secs: How often (in seconds) to save model summaries. - - Returns: - An object containing training configuration parameters. - - Raises: - ValueError: If learning_rate_decay_factor is set and - learning_rate_decay_steps is unset. - """ - if learning_rate_decay_factor and not learning_rate_decay_steps: - raise ValueError( - "learning_rate_decay_factor requires learning_rate_decay_steps.") - - config = _HParams() - config.learning_rate = learning_rate - config.learning_rate_decay_factor = learning_rate_decay_factor - config.learning_rate_decay_steps = learning_rate_decay_steps - config.number_of_steps = number_of_steps - config.clip_gradient_norm = clip_gradient_norm - config.save_model_secs = save_model_secs - config.save_summaries_secs = save_summaries_secs - return config diff --git a/research/skip_thoughts/skip_thoughts/data/BUILD b/research/skip_thoughts/skip_thoughts/data/BUILD deleted file mode 100644 index a8b61bfdc..000000000 --- a/research/skip_thoughts/skip_thoughts/data/BUILD +++ /dev/null @@ -1,23 +0,0 @@ -package(default_visibility = ["//visibility:public"]) - -licenses(["notice"]) # Apache 2.0 - -exports_files(["LICENSE"]) - -py_library( - name = "special_words", - srcs = ["special_words.py"], - srcs_version = "PY2AND3", - deps = [], -) - -py_binary( - name = "preprocess_dataset", - srcs = [ - "preprocess_dataset.py", - ], - srcs_version = "PY2AND3", - deps = [ - ":special_words", - ], -) diff --git a/research/skip_thoughts/skip_thoughts/data/__init__.py b/research/skip_thoughts/skip_thoughts/data/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/research/skip_thoughts/skip_thoughts/data/preprocess_dataset.py b/research/skip_thoughts/skip_thoughts/data/preprocess_dataset.py deleted file mode 100644 index b6f304f53..000000000 --- a/research/skip_thoughts/skip_thoughts/data/preprocess_dataset.py +++ /dev/null @@ -1,301 +0,0 @@ -# Copyright 2017 The TensorFlow Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""Converts a set of text files to TFRecord format with Example protos. - -Each Example proto in the output contains the following fields: - - decode_pre: list of int64 ids corresponding to the "previous" sentence. - encode: list of int64 ids corresponding to the "current" sentence. - decode_post: list of int64 ids corresponding to the "post" sentence. - -In addition, the following files are generated: - - vocab.txt: List of " " pairs, where is the integer - encoding of in the Example protos. - word_counts.txt: List of " " pairs, where is the number - of occurrences of in the input files. - -The vocabulary of word ids is constructed from the top --num_words by word -count. All other words get the word id. -""" - -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import collections -import os - - -import numpy as np -import tensorflow as tf - -from skip_thoughts.data import special_words - -FLAGS = tf.flags.FLAGS - -tf.flags.DEFINE_string("input_files", None, - "Comma-separated list of globs matching the input " - "files. The format of the input files is assumed to be " - "a list of newline-separated sentences, where each " - "sentence is already tokenized.") - -tf.flags.DEFINE_string("vocab_file", "", - "(Optional) existing vocab file. Otherwise, a new vocab " - "file is created and written to the output directory. " - "The file format is a list of newline-separated words, " - "where the word id is the corresponding 0-based index " - "in the file.") - -tf.flags.DEFINE_string("output_dir", None, "Output directory.") - -tf.flags.DEFINE_integer("train_output_shards", 100, - "Number of output shards for the training set.") - -tf.flags.DEFINE_integer("validation_output_shards", 1, - "Number of output shards for the validation set.") - -tf.flags.DEFINE_integer("num_validation_sentences", 50000, - "Number of output shards for the validation set.") - -tf.flags.DEFINE_integer("num_words", 20000, - "Number of words to include in the output.") - -tf.flags.DEFINE_integer("max_sentences", 0, - "If > 0, the maximum number of sentences to output.") - -tf.flags.DEFINE_integer("max_sentence_length", 30, - "If > 0, exclude sentences whose encode, decode_pre OR" - "decode_post sentence exceeds this length.") - -tf.flags.DEFINE_boolean("add_eos", True, - "Whether to add end-of-sentence ids to the output.") - -tf.logging.set_verbosity(tf.logging.INFO) - - -def _build_vocabulary(input_files): - """Loads or builds the model vocabulary. - - Args: - input_files: List of pre-tokenized input .txt files. - - Returns: - vocab: A dictionary of word to id. - """ - if FLAGS.vocab_file: - tf.logging.info("Loading existing vocab file.") - vocab = collections.OrderedDict() - with tf.gfile.GFile(FLAGS.vocab_file, mode="r") as f: - for i, line in enumerate(f): - word = line.decode("utf-8").strip() - assert word not in vocab, "Attempting to add word twice: %s" % word - vocab[word] = i - tf.logging.info("Read vocab of size %d from %s", - len(vocab), FLAGS.vocab_file) - return vocab - - tf.logging.info("Creating vocabulary.") - num = 0 - wordcount = collections.Counter() - for input_file in input_files: - tf.logging.info("Processing file: %s", input_file) - for sentence in tf.gfile.FastGFile(input_file): - wordcount.update(sentence.split()) - - num += 1 - if num % 1000000 == 0: - tf.logging.info("Processed %d sentences", num) - - tf.logging.info("Processed %d sentences total", num) - - words = list(wordcount) - freqs = list(wordcount.values()) - sorted_indices = np.argsort(freqs)[::-1] - - vocab = collections.OrderedDict() - vocab[special_words.EOS] = special_words.EOS_ID - vocab[special_words.UNK] = special_words.UNK_ID - for w_id, w_index in enumerate(sorted_indices[0:FLAGS.num_words - 2]): - vocab[words[w_index]] = w_id + 2 # 0: EOS, 1: UNK. - - tf.logging.info("Created vocab with %d words", len(vocab)) - - vocab_file = os.path.join(FLAGS.output_dir, "vocab.txt") - with tf.gfile.FastGFile(vocab_file, "w") as f: - f.write("\n".join(vocab.keys())) - tf.logging.info("Wrote vocab file to %s", vocab_file) - - word_counts_file = os.path.join(FLAGS.output_dir, "word_counts.txt") - with tf.gfile.FastGFile(word_counts_file, "w") as f: - for i in sorted_indices: - f.write("%s %d\n" % (words[i], freqs[i])) - tf.logging.info("Wrote word counts file to %s", word_counts_file) - - return vocab - - -def _int64_feature(value): - """Helper for creating an Int64 Feature.""" - return tf.train.Feature(int64_list=tf.train.Int64List( - value=[int(v) for v in value])) - - -def _sentence_to_ids(sentence, vocab): - """Helper for converting a sentence (list of words) to a list of ids.""" - ids = [vocab.get(w, special_words.UNK_ID) for w in sentence] - if FLAGS.add_eos: - ids.append(special_words.EOS_ID) - return ids - - -def _create_serialized_example(predecessor, current, successor, vocab): - """Helper for creating a serialized Example proto.""" - example = tf.train.Example(features=tf.train.Features(feature={ - "decode_pre": _int64_feature(_sentence_to_ids(predecessor, vocab)), - "encode": _int64_feature(_sentence_to_ids(current, vocab)), - "decode_post": _int64_feature(_sentence_to_ids(successor, vocab)), - })) - - return example.SerializeToString() - - -def _process_input_file(filename, vocab, stats): - """Processes the sentences in an input file. - - Args: - filename: Path to a pre-tokenized input .txt file. - vocab: A dictionary of word to id. - stats: A Counter object for statistics. - - Returns: - processed: A list of serialized Example protos - """ - tf.logging.info("Processing input file: %s", filename) - processed = [] - - predecessor = None # Predecessor sentence (list of words). - current = None # Current sentence (list of words). - successor = None # Successor sentence (list of words). - - for successor_str in tf.gfile.FastGFile(filename): - stats.update(["sentences_seen"]) - successor = successor_str.split() - - # The first 2 sentences per file will be skipped. - if predecessor and current and successor: - stats.update(["sentences_considered"]) - - # Note that we are going to insert later, so we only allow - # sentences with strictly less than max_sentence_length to pass. - if FLAGS.max_sentence_length and ( - len(predecessor) >= FLAGS.max_sentence_length or len(current) >= - FLAGS.max_sentence_length or len(successor) >= - FLAGS.max_sentence_length): - stats.update(["sentences_too_long"]) - else: - serialized = _create_serialized_example(predecessor, current, successor, - vocab) - processed.append(serialized) - stats.update(["sentences_output"]) - - predecessor = current - current = successor - - sentences_seen = stats["sentences_seen"] - sentences_output = stats["sentences_output"] - if sentences_seen and sentences_seen % 100000 == 0: - tf.logging.info("Processed %d sentences (%d output)", sentences_seen, - sentences_output) - if FLAGS.max_sentences and sentences_output >= FLAGS.max_sentences: - break - - tf.logging.info("Completed processing file %s", filename) - return processed - - -def _write_shard(filename, dataset, indices): - """Writes a TFRecord shard.""" - with tf.python_io.TFRecordWriter(filename) as writer: - for j in indices: - writer.write(dataset[j]) - - -def _write_dataset(name, dataset, indices, num_shards): - """Writes a sharded TFRecord dataset. - - Args: - name: Name of the dataset (e.g. "train"). - dataset: List of serialized Example protos. - indices: List of indices of 'dataset' to be written. - num_shards: The number of output shards. - """ - tf.logging.info("Writing dataset %s", name) - borders = np.int32(np.linspace(0, len(indices), num_shards + 1)) - for i in range(num_shards): - filename = os.path.join(FLAGS.output_dir, "%s-%.5d-of-%.5d" % (name, i, - num_shards)) - shard_indices = indices[borders[i]:borders[i + 1]] - _write_shard(filename, dataset, shard_indices) - tf.logging.info("Wrote dataset indices [%d, %d) to output shard %s", - borders[i], borders[i + 1], filename) - tf.logging.info("Finished writing %d sentences in dataset %s.", - len(indices), name) - - -def main(unused_argv): - if not FLAGS.input_files: - raise ValueError("--input_files is required.") - if not FLAGS.output_dir: - raise ValueError("--output_dir is required.") - - if not tf.gfile.IsDirectory(FLAGS.output_dir): - tf.gfile.MakeDirs(FLAGS.output_dir) - - input_files = [] - for pattern in FLAGS.input_files.split(","): - match = tf.gfile.Glob(FLAGS.input_files) - if not match: - raise ValueError("Found no files matching %s" % pattern) - input_files.extend(match) - tf.logging.info("Found %d input files.", len(input_files)) - - vocab = _build_vocabulary(input_files) - - tf.logging.info("Generating dataset.") - stats = collections.Counter() - dataset = [] - for filename in input_files: - dataset.extend(_process_input_file(filename, vocab, stats)) - if FLAGS.max_sentences and stats["sentences_output"] >= FLAGS.max_sentences: - break - - tf.logging.info("Generated dataset with %d sentences.", len(dataset)) - for k, v in stats.items(): - tf.logging.info("%s: %d", k, v) - - tf.logging.info("Shuffling dataset.") - np.random.seed(123) - shuffled_indices = np.random.permutation(len(dataset)) - val_indices = shuffled_indices[:FLAGS.num_validation_sentences] - train_indices = shuffled_indices[FLAGS.num_validation_sentences:] - - _write_dataset("train", dataset, train_indices, FLAGS.train_output_shards) - _write_dataset("validation", dataset, val_indices, - FLAGS.validation_output_shards) - - -if __name__ == "__main__": - tf.app.run() diff --git a/research/skip_thoughts/skip_thoughts/data/special_words.py b/research/skip_thoughts/skip_thoughts/data/special_words.py deleted file mode 100644 index fb76b7a94..000000000 --- a/research/skip_thoughts/skip_thoughts/data/special_words.py +++ /dev/null @@ -1,27 +0,0 @@ -# Copyright 2017 The TensorFlow Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""Special word constants. - -NOTE: The ids of the EOS and UNK constants should not be modified. It is assumed -that these always occupy the first two ids. -""" - -# End of sentence. -EOS = "" -EOS_ID = 0 - -# Unknown. -UNK = "" -UNK_ID = 1 diff --git a/research/skip_thoughts/skip_thoughts/encoder_manager.py b/research/skip_thoughts/skip_thoughts/encoder_manager.py deleted file mode 100644 index 00b220245..000000000 --- a/research/skip_thoughts/skip_thoughts/encoder_manager.py +++ /dev/null @@ -1,134 +0,0 @@ -# Copyright 2017 The TensorFlow Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""Manager class for loading and encoding with multiple skip-thoughts models. - -If multiple models are loaded at once then the encode() function returns the -concatenation of the outputs of each model. - -Example usage: - manager = EncoderManager() - manager.load_model(model_config_1, vocabulary_file_1, embedding_matrix_file_1, - checkpoint_path_1) - manager.load_model(model_config_2, vocabulary_file_2, embedding_matrix_file_2, - checkpoint_path_2) - encodings = manager.encode(data) -""" - -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import collections - - -import numpy as np -import tensorflow as tf - -from skip_thoughts import skip_thoughts_encoder - - -class EncoderManager(object): - """Manager class for loading and encoding with skip-thoughts models.""" - - def __init__(self): - self.encoders = [] - self.sessions = [] - - def load_model(self, model_config, vocabulary_file, embedding_matrix_file, - checkpoint_path): - """Loads a skip-thoughts model. - - Args: - model_config: Object containing parameters for building the model. - vocabulary_file: Path to vocabulary file containing a list of newline- - separated words where the word id is the corresponding 0-based index in - the file. - embedding_matrix_file: Path to a serialized numpy array of shape - [vocab_size, embedding_dim]. - checkpoint_path: SkipThoughtsModel checkpoint file or a directory - containing a checkpoint file. - """ - tf.logging.info("Reading vocabulary from %s", vocabulary_file) - with tf.gfile.GFile(vocabulary_file, mode="rb") as f: - lines = list(f.readlines()) - reverse_vocab = [line.decode("utf-8").strip() for line in lines] - - tf.logging.info("Loaded vocabulary with %d words.", len(reverse_vocab)) - - tf.logging.info("Loading embedding matrix from %s", embedding_matrix_file) - # Note: tf.gfile.GFile doesn't work here because np.load() calls f.seek() - # with 3 arguments. - embedding_matrix = np.load(embedding_matrix_file) - tf.logging.info("Loaded embedding matrix with shape %s", - embedding_matrix.shape) - - word_embeddings = collections.OrderedDict( - zip(reverse_vocab, embedding_matrix)) - - g = tf.Graph() - with g.as_default(): - encoder = skip_thoughts_encoder.SkipThoughtsEncoder(word_embeddings) - restore_model = encoder.build_graph_from_config(model_config, - checkpoint_path) - - sess = tf.Session(graph=g) - restore_model(sess) - - self.encoders.append(encoder) - self.sessions.append(sess) - - def encode(self, - data, - use_norm=True, - verbose=False, - batch_size=128, - use_eos=False): - """Encodes a sequence of sentences as skip-thought vectors. - - Args: - data: A list of input strings. - use_norm: If True, normalize output skip-thought vectors to unit L2 norm. - verbose: Whether to log every batch. - batch_size: Batch size for the RNN encoders. - use_eos: If True, append the end-of-sentence word to each input sentence. - - Returns: - thought_vectors: A list of numpy arrays corresponding to 'data'. - - Raises: - ValueError: If called before calling load_encoder. - """ - if not self.encoders: - raise ValueError( - "Must call load_model at least once before calling encode.") - - encoded = [] - for encoder, sess in zip(self.encoders, self.sessions): - encoded.append( - np.array( - encoder.encode( - sess, - data, - use_norm=use_norm, - verbose=verbose, - batch_size=batch_size, - use_eos=use_eos))) - - return np.concatenate(encoded, axis=1) - - def close(self): - """Closes the active TensorFlow Sessions.""" - for sess in self.sessions: - sess.close() diff --git a/research/skip_thoughts/skip_thoughts/evaluate.py b/research/skip_thoughts/skip_thoughts/evaluate.py deleted file mode 100644 index e840d9da9..000000000 --- a/research/skip_thoughts/skip_thoughts/evaluate.py +++ /dev/null @@ -1,117 +0,0 @@ -# Copyright 2017 The TensorFlow Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""Script to evaluate a skip-thoughts model. - -This script can evaluate a model with a unidirectional encoder ("uni-skip" in -the paper); or a model with a bidirectional encoder ("bi-skip"); or the -combination of a model with a unidirectional encoder and a model with a -bidirectional encoder ("combine-skip"). - -The uni-skip model (if it exists) is specified by the flags ---uni_vocab_file, --uni_embeddings_file, --uni_checkpoint_path. - -The bi-skip model (if it exists) is specified by the flags ---bi_vocab_file, --bi_embeddings_path, --bi_checkpoint_path. - -The evaluation tasks have different running times. SICK may take 5-10 minutes. -MSRP, TREC and CR may take 20-60 minutes. SUBJ, MPQA and MR may take 2+ hours. -""" - -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - - -from skipthoughts import eval_classification -from skipthoughts import eval_msrp -from skipthoughts import eval_sick -from skipthoughts import eval_trec -import tensorflow as tf - -from skip_thoughts import configuration -from skip_thoughts import encoder_manager - -FLAGS = tf.flags.FLAGS - -tf.flags.DEFINE_string("eval_task", "CR", - "Name of the evaluation task to run. Available tasks: " - "MR, CR, SUBJ, MPQA, SICK, MSRP, TREC.") - -tf.flags.DEFINE_string("data_dir", None, "Directory containing training data.") - -tf.flags.DEFINE_string("uni_vocab_file", None, - "Path to vocabulary file containing a list of newline-" - "separated words where the word id is the " - "corresponding 0-based index in the file.") -tf.flags.DEFINE_string("bi_vocab_file", None, - "Path to vocabulary file containing a list of newline-" - "separated words where the word id is the " - "corresponding 0-based index in the file.") - -tf.flags.DEFINE_string("uni_embeddings_file", None, - "Path to serialized numpy array of shape " - "[vocab_size, embedding_dim].") -tf.flags.DEFINE_string("bi_embeddings_file", None, - "Path to serialized numpy array of shape " - "[vocab_size, embedding_dim].") - -tf.flags.DEFINE_string("uni_checkpoint_path", None, - "Checkpoint file or directory containing a checkpoint " - "file.") -tf.flags.DEFINE_string("bi_checkpoint_path", None, - "Checkpoint file or directory containing a checkpoint " - "file.") - -tf.logging.set_verbosity(tf.logging.INFO) - - -def main(unused_argv): - if not FLAGS.data_dir: - raise ValueError("--data_dir is required.") - - encoder = encoder_manager.EncoderManager() - - # Maybe load unidirectional encoder. - if FLAGS.uni_checkpoint_path: - print("Loading unidirectional model...") - uni_config = configuration.model_config() - encoder.load_model(uni_config, FLAGS.uni_vocab_file, - FLAGS.uni_embeddings_file, FLAGS.uni_checkpoint_path) - - # Maybe load bidirectional encoder. - if FLAGS.bi_checkpoint_path: - print("Loading bidirectional model...") - bi_config = configuration.model_config(bidirectional_encoder=True) - encoder.load_model(bi_config, FLAGS.bi_vocab_file, FLAGS.bi_embeddings_file, - FLAGS.bi_checkpoint_path) - - if FLAGS.eval_task in ["MR", "CR", "SUBJ", "MPQA"]: - eval_classification.eval_nested_kfold( - encoder, FLAGS.eval_task, FLAGS.data_dir, use_nb=False) - elif FLAGS.eval_task == "SICK": - eval_sick.evaluate(encoder, evaltest=True, loc=FLAGS.data_dir) - elif FLAGS.eval_task == "MSRP": - eval_msrp.evaluate( - encoder, evalcv=True, evaltest=True, use_feats=True, loc=FLAGS.data_dir) - elif FLAGS.eval_task == "TREC": - eval_trec.evaluate(encoder, evalcv=True, evaltest=True, loc=FLAGS.data_dir) - else: - raise ValueError("Unrecognized eval_task: %s" % FLAGS.eval_task) - - encoder.close() - - -if __name__ == "__main__": - tf.app.run() diff --git a/research/skip_thoughts/skip_thoughts/ops/BUILD b/research/skip_thoughts/skip_thoughts/ops/BUILD deleted file mode 100644 index 896d54db7..000000000 --- a/research/skip_thoughts/skip_thoughts/ops/BUILD +++ /dev/null @@ -1,17 +0,0 @@ -package(default_visibility = ["//visibility:public"]) - -licenses(["notice"]) # Apache 2.0 - -exports_files(["LICENSE"]) - -py_library( - name = "input_ops", - srcs = ["input_ops.py"], - srcs_version = "PY2AND3", -) - -py_library( - name = "gru_cell", - srcs = ["gru_cell.py"], - srcs_version = "PY2AND3", -) diff --git a/research/skip_thoughts/skip_thoughts/ops/__init__.py b/research/skip_thoughts/skip_thoughts/ops/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/research/skip_thoughts/skip_thoughts/ops/gru_cell.py b/research/skip_thoughts/skip_thoughts/ops/gru_cell.py deleted file mode 100644 index c4bee46d3..000000000 --- a/research/skip_thoughts/skip_thoughts/ops/gru_cell.py +++ /dev/null @@ -1,134 +0,0 @@ -# Copyright 2017 The TensorFlow Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""GRU cell implementation for the skip-thought vectors model.""" - -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - - -import tensorflow as tf - -_layer_norm = tf.contrib.layers.layer_norm - - -class LayerNormGRUCell(tf.contrib.rnn.RNNCell): - """GRU cell with layer normalization. - - The layer normalization implementation is based on: - - https://arxiv.org/abs/1607.06450. - - "Layer Normalization" - Jimmy Lei Ba, Jamie Ryan Kiros, Geoffrey E. Hinton - """ - - def __init__(self, - num_units, - w_initializer, - u_initializer, - b_initializer, - activation=tf.nn.tanh): - """Initializes the cell. - - Args: - num_units: Number of cell units. - w_initializer: Initializer for the "W" (input) parameter matrices. - u_initializer: Initializer for the "U" (recurrent) parameter matrices. - b_initializer: Initializer for the "b" (bias) parameter vectors. - activation: Cell activation function. - """ - self._num_units = num_units - self._w_initializer = w_initializer - self._u_initializer = u_initializer - self._b_initializer = b_initializer - self._activation = activation - - @property - def state_size(self): - return self._num_units - - @property - def output_size(self): - return self._num_units - - def _w_h_initializer(self): - """Returns an initializer for the "W_h" parameter matrix. - - See equation (23) in the paper. The "W_h" parameter matrix is the - concatenation of two parameter submatrices. The matrix returned is - [U_z, U_r]. - - Returns: - A Tensor with shape [num_units, 2 * num_units] as described above. - """ - - def _initializer(shape, dtype=tf.float32, partition_info=None): - num_units = self._num_units - assert shape == [num_units, 2 * num_units] - u_z = self._u_initializer([num_units, num_units], dtype, partition_info) - u_r = self._u_initializer([num_units, num_units], dtype, partition_info) - return tf.concat([u_z, u_r], 1) - - return _initializer - - def _w_x_initializer(self, input_dim): - """Returns an initializer for the "W_x" parameter matrix. - - See equation (23) in the paper. The "W_x" parameter matrix is the - concatenation of two parameter submatrices. The matrix returned is - [W_z, W_r]. - - Args: - input_dim: The dimension of the cell inputs. - - Returns: - A Tensor with shape [input_dim, 2 * num_units] as described above. - """ - - def _initializer(shape, dtype=tf.float32, partition_info=None): - num_units = self._num_units - assert shape == [input_dim, 2 * num_units] - w_z = self._w_initializer([input_dim, num_units], dtype, partition_info) - w_r = self._w_initializer([input_dim, num_units], dtype, partition_info) - return tf.concat([w_z, w_r], 1) - - return _initializer - - def __call__(self, inputs, state, scope=None): - """GRU cell with layer normalization.""" - input_dim = inputs.get_shape().as_list()[1] - num_units = self._num_units - - with tf.variable_scope(scope or "gru_cell"): - with tf.variable_scope("gates"): - w_h = tf.get_variable( - "w_h", [num_units, 2 * num_units], - initializer=self._w_h_initializer()) - w_x = tf.get_variable( - "w_x", [input_dim, 2 * num_units], - initializer=self._w_x_initializer(input_dim)) - z_and_r = (_layer_norm(tf.matmul(state, w_h), scope="layer_norm/w_h") + - _layer_norm(tf.matmul(inputs, w_x), scope="layer_norm/w_x")) - z, r = tf.split(tf.sigmoid(z_and_r), 2, 1) - with tf.variable_scope("candidate"): - w = tf.get_variable( - "w", [input_dim, num_units], initializer=self._w_initializer) - u = tf.get_variable( - "u", [num_units, num_units], initializer=self._u_initializer) - h_hat = (r * _layer_norm(tf.matmul(state, u), scope="layer_norm/u") + - _layer_norm(tf.matmul(inputs, w), scope="layer_norm/w")) - new_h = (1 - z) * state + z * self._activation(h_hat) - return new_h, new_h diff --git a/research/skip_thoughts/skip_thoughts/ops/input_ops.py b/research/skip_thoughts/skip_thoughts/ops/input_ops.py deleted file mode 100644 index 51b03fc5d..000000000 --- a/research/skip_thoughts/skip_thoughts/ops/input_ops.py +++ /dev/null @@ -1,118 +0,0 @@ -# Copyright 2017 The TensorFlow Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""Input ops.""" - -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import collections - - -import tensorflow as tf - -# A SentenceBatch is a pair of Tensors: -# ids: Batch of input sentences represented as sequences of word ids: an int64 -# Tensor with shape [batch_size, padded_length]. -# mask: Boolean mask distinguishing real words (1) from padded words (0): an -# int32 Tensor with shape [batch_size, padded_length]. -SentenceBatch = collections.namedtuple("SentenceBatch", ("ids", "mask")) - - -def parse_example_batch(serialized): - """Parses a batch of tf.Example protos. - - Args: - serialized: A 1-D string Tensor; a batch of serialized tf.Example protos. - Returns: - encode: A SentenceBatch of encode sentences. - decode_pre: A SentenceBatch of "previous" sentences to decode. - decode_post: A SentenceBatch of "post" sentences to decode. - """ - features = tf.parse_example( - serialized, - features={ - "encode": tf.VarLenFeature(dtype=tf.int64), - "decode_pre": tf.VarLenFeature(dtype=tf.int64), - "decode_post": tf.VarLenFeature(dtype=tf.int64), - }) - - def _sparse_to_batch(sparse): - ids = tf.sparse_tensor_to_dense(sparse) # Padding with zeroes. - mask = tf.sparse_to_dense(sparse.indices, sparse.dense_shape, - tf.ones_like(sparse.values, dtype=tf.int32)) - return SentenceBatch(ids=ids, mask=mask) - - output_names = ("encode", "decode_pre", "decode_post") - return tuple(_sparse_to_batch(features[x]) for x in output_names) - - -def prefetch_input_data(reader, - file_pattern, - shuffle, - capacity, - num_reader_threads=1): - """Prefetches string values from disk into an input queue. - - Args: - reader: Instance of tf.ReaderBase. - file_pattern: Comma-separated list of file patterns (e.g. - "/tmp/train_data-?????-of-00100", where '?' acts as a wildcard that - matches any character). - shuffle: Boolean; whether to randomly shuffle the input data. - capacity: Queue capacity (number of records). - num_reader_threads: Number of reader threads feeding into the queue. - - Returns: - A Queue containing prefetched string values. - """ - data_files = [] - for pattern in file_pattern.split(","): - data_files.extend(tf.gfile.Glob(pattern)) - if not data_files: - tf.logging.fatal("Found no input files matching %s", file_pattern) - else: - tf.logging.info("Prefetching values from %d files matching %s", - len(data_files), file_pattern) - - filename_queue = tf.train.string_input_producer( - data_files, shuffle=shuffle, capacity=16, name="filename_queue") - - if shuffle: - min_after_dequeue = int(0.6 * capacity) - values_queue = tf.RandomShuffleQueue( - capacity=capacity, - min_after_dequeue=min_after_dequeue, - dtypes=[tf.string], - shapes=[[]], - name="random_input_queue") - else: - values_queue = tf.FIFOQueue( - capacity=capacity, - dtypes=[tf.string], - shapes=[[]], - name="fifo_input_queue") - - enqueue_ops = [] - for _ in range(num_reader_threads): - _, value = reader.read(filename_queue) - enqueue_ops.append(values_queue.enqueue([value])) - tf.train.queue_runner.add_queue_runner( - tf.train.queue_runner.QueueRunner(values_queue, enqueue_ops)) - tf.summary.scalar("queue/%s/fraction_of_%d_full" % (values_queue.name, - capacity), - tf.cast(values_queue.size(), tf.float32) * (1.0 / capacity)) - - return values_queue diff --git a/research/skip_thoughts/skip_thoughts/skip_thoughts_encoder.py b/research/skip_thoughts/skip_thoughts/skip_thoughts_encoder.py deleted file mode 100644 index 79c47c588..000000000 --- a/research/skip_thoughts/skip_thoughts/skip_thoughts_encoder.py +++ /dev/null @@ -1,258 +0,0 @@ -# Copyright 2017 The TensorFlow Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""Class for encoding text using a trained SkipThoughtsModel. - -Example usage: - g = tf.Graph() - with g.as_default(): - encoder = SkipThoughtsEncoder(embeddings) - restore_fn = encoder.build_graph_from_config(model_config, checkpoint_path) - - with tf.Session(graph=g) as sess: - restore_fn(sess) - skip_thought_vectors = encoder.encode(sess, data) -""" - -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import os.path - - -import nltk -import nltk.tokenize -import numpy as np -import tensorflow as tf - -from skip_thoughts import skip_thoughts_model -from skip_thoughts.data import special_words - - -def _pad(seq, target_len): - """Pads a sequence of word embeddings up to the target length. - - Args: - seq: Sequence of word embeddings. - target_len: Desired padded sequence length. - - Returns: - embeddings: Input sequence padded with zero embeddings up to the target - length. - mask: A 0/1 vector with zeros corresponding to padded embeddings. - - Raises: - ValueError: If len(seq) is not in the interval (0, target_len]. - """ - seq_len = len(seq) - if seq_len <= 0 or seq_len > target_len: - raise ValueError("Expected 0 < len(seq) <= %d, got %d" % (target_len, - seq_len)) - - emb_dim = seq[0].shape[0] - padded_seq = np.zeros(shape=(target_len, emb_dim), dtype=seq[0].dtype) - mask = np.zeros(shape=(target_len,), dtype=np.int8) - for i in range(seq_len): - padded_seq[i] = seq[i] - mask[i] = 1 - return padded_seq, mask - - -def _batch_and_pad(sequences): - """Batches and pads sequences of word embeddings into a 2D array. - - Args: - sequences: A list of batch_size sequences of word embeddings. - - Returns: - embeddings: A numpy array with shape [batch_size, padded_length, emb_dim]. - mask: A numpy 0/1 array with shape [batch_size, padded_length] with zeros - corresponding to padded elements. - """ - batch_embeddings = [] - batch_mask = [] - batch_len = max([len(seq) for seq in sequences]) - for seq in sequences: - embeddings, mask = _pad(seq, batch_len) - batch_embeddings.append(embeddings) - batch_mask.append(mask) - return np.array(batch_embeddings), np.array(batch_mask) - - -class SkipThoughtsEncoder(object): - """Skip-thoughts sentence encoder.""" - - def __init__(self, embeddings): - """Initializes the encoder. - - Args: - embeddings: Dictionary of word to embedding vector (1D numpy array). - """ - self._sentence_detector = nltk.data.load("tokenizers/punkt/english.pickle") - self._embeddings = embeddings - - def _create_restore_fn(self, checkpoint_path, saver): - """Creates a function that restores a model from checkpoint. - - Args: - checkpoint_path: Checkpoint file or a directory containing a checkpoint - file. - saver: Saver for restoring variables from the checkpoint file. - - Returns: - restore_fn: A function such that restore_fn(sess) loads model variables - from the checkpoint file. - - Raises: - ValueError: If checkpoint_path does not refer to a checkpoint file or a - directory containing a checkpoint file. - """ - if tf.gfile.IsDirectory(checkpoint_path): - latest_checkpoint = tf.train.latest_checkpoint(checkpoint_path) - if not latest_checkpoint: - raise ValueError("No checkpoint file found in: %s" % checkpoint_path) - checkpoint_path = latest_checkpoint - - def _restore_fn(sess): - tf.logging.info("Loading model from checkpoint: %s", checkpoint_path) - saver.restore(sess, checkpoint_path) - tf.logging.info("Successfully loaded checkpoint: %s", - os.path.basename(checkpoint_path)) - - return _restore_fn - - def build_graph_from_config(self, model_config, checkpoint_path): - """Builds the inference graph from a configuration object. - - Args: - model_config: Object containing configuration for building the model. - checkpoint_path: Checkpoint file or a directory containing a checkpoint - file. - - Returns: - restore_fn: A function such that restore_fn(sess) loads model variables - from the checkpoint file. - """ - tf.logging.info("Building model.") - model = skip_thoughts_model.SkipThoughtsModel(model_config, mode="encode") - model.build() - saver = tf.train.Saver() - - return self._create_restore_fn(checkpoint_path, saver) - - def build_graph_from_proto(self, graph_def_file, saver_def_file, - checkpoint_path): - """Builds the inference graph from serialized GraphDef and SaverDef protos. - - Args: - graph_def_file: File containing a serialized GraphDef proto. - saver_def_file: File containing a serialized SaverDef proto. - checkpoint_path: Checkpoint file or a directory containing a checkpoint - file. - - Returns: - restore_fn: A function such that restore_fn(sess) loads model variables - from the checkpoint file. - """ - # Load the Graph. - tf.logging.info("Loading GraphDef from file: %s", graph_def_file) - graph_def = tf.GraphDef() - with tf.gfile.FastGFile(graph_def_file, "rb") as f: - graph_def.ParseFromString(f.read()) - tf.import_graph_def(graph_def, name="") - - # Load the Saver. - tf.logging.info("Loading SaverDef from file: %s", saver_def_file) - saver_def = tf.train.SaverDef() - with tf.gfile.FastGFile(saver_def_file, "rb") as f: - saver_def.ParseFromString(f.read()) - saver = tf.train.Saver(saver_def=saver_def) - - return self._create_restore_fn(checkpoint_path, saver) - - def _tokenize(self, item): - """Tokenizes an input string into a list of words.""" - tokenized = [] - for s in self._sentence_detector.tokenize(item): - tokenized.extend(nltk.tokenize.word_tokenize(s)) - - return tokenized - - def _word_to_embedding(self, w): - """Returns the embedding of a word.""" - return self._embeddings.get(w, self._embeddings[special_words.UNK]) - - def _preprocess(self, data, use_eos): - """Preprocesses text for the encoder. - - Args: - data: A list of input strings. - use_eos: Whether to append the end-of-sentence word to each sentence. - - Returns: - embeddings: A list of word embedding sequences corresponding to the input - strings. - """ - preprocessed_data = [] - for item in data: - tokenized = self._tokenize(item) - if use_eos: - tokenized.append(special_words.EOS) - preprocessed_data.append([self._word_to_embedding(w) for w in tokenized]) - return preprocessed_data - - def encode(self, - sess, - data, - use_norm=True, - verbose=True, - batch_size=128, - use_eos=False): - """Encodes a sequence of sentences as skip-thought vectors. - - Args: - sess: TensorFlow Session. - data: A list of input strings. - use_norm: Whether to normalize skip-thought vectors to unit L2 norm. - verbose: Whether to log every batch. - batch_size: Batch size for the encoder. - use_eos: Whether to append the end-of-sentence word to each input - sentence. - - Returns: - thought_vectors: A list of numpy arrays corresponding to the skip-thought - encodings of sentences in 'data'. - """ - data = self._preprocess(data, use_eos) - thought_vectors = [] - - batch_indices = np.arange(0, len(data), batch_size) - for batch, start_index in enumerate(batch_indices): - if verbose: - tf.logging.info("Batch %d / %d.", batch, len(batch_indices)) - - embeddings, mask = _batch_and_pad( - data[start_index:start_index + batch_size]) - feed_dict = { - "encode_emb:0": embeddings, - "encode_mask:0": mask, - } - thought_vectors.extend( - sess.run("encoder/thought_vectors:0", feed_dict=feed_dict)) - - if use_norm: - thought_vectors = [v / np.linalg.norm(v) for v in thought_vectors] - - return thought_vectors diff --git a/research/skip_thoughts/skip_thoughts/skip_thoughts_model.py b/research/skip_thoughts/skip_thoughts/skip_thoughts_model.py deleted file mode 100644 index 9a9a43a4f..000000000 --- a/research/skip_thoughts/skip_thoughts/skip_thoughts_model.py +++ /dev/null @@ -1,369 +0,0 @@ -# Copyright 2017 The TensorFlow Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""Skip-Thoughts model for learning sentence vectors. - -The model is based on the paper: - - "Skip-Thought Vectors" - Ryan Kiros, Yukun Zhu, Ruslan Salakhutdinov, Richard S. Zemel, - Antonio Torralba, Raquel Urtasun, Sanja Fidler. - https://papers.nips.cc/paper/5950-skip-thought-vectors.pdf - -Layer normalization is applied based on the paper: - - "Layer Normalization" - Jimmy Lei Ba, Jamie Ryan Kiros, Geoffrey E. Hinton - https://arxiv.org/abs/1607.06450 -""" - -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - - -import tensorflow as tf - -from skip_thoughts.ops import gru_cell -from skip_thoughts.ops import input_ops - - -def random_orthonormal_initializer(shape, dtype=tf.float32, - partition_info=None): # pylint: disable=unused-argument - """Variable initializer that produces a random orthonormal matrix.""" - if len(shape) != 2 or shape[0] != shape[1]: - raise ValueError("Expecting square shape, got %s" % shape) - _, u, _ = tf.svd(tf.random_normal(shape, dtype=dtype), full_matrices=True) - return u - - -class SkipThoughtsModel(object): - """Skip-thoughts model.""" - - def __init__(self, config, mode="train", input_reader=None): - """Basic setup. The actual TensorFlow graph is constructed in build(). - - Args: - config: Object containing configuration parameters. - mode: "train", "eval" or "encode". - input_reader: Subclass of tf.ReaderBase for reading the input serialized - tf.Example protocol buffers. Defaults to TFRecordReader. - - Raises: - ValueError: If mode is invalid. - """ - if mode not in ["train", "eval", "encode"]: - raise ValueError("Unrecognized mode: %s" % mode) - - self.config = config - self.mode = mode - self.reader = input_reader if input_reader else tf.TFRecordReader() - - # Initializer used for non-recurrent weights. - self.uniform_initializer = tf.random_uniform_initializer( - minval=-self.config.uniform_init_scale, - maxval=self.config.uniform_init_scale) - - # Input sentences represented as sequences of word ids. "encode" is the - # source sentence, "decode_pre" is the previous sentence and "decode_post" - # is the next sentence. - # Each is an int64 Tensor with shape [batch_size, padded_length]. - self.encode_ids = None - self.decode_pre_ids = None - self.decode_post_ids = None - - # Boolean masks distinguishing real words (1) from padded words (0). - # Each is an int32 Tensor with shape [batch_size, padded_length]. - self.encode_mask = None - self.decode_pre_mask = None - self.decode_post_mask = None - - # Input sentences represented as sequences of word embeddings. - # Each is a float32 Tensor with shape [batch_size, padded_length, emb_dim]. - self.encode_emb = None - self.decode_pre_emb = None - self.decode_post_emb = None - - # The output from the sentence encoder. - # A float32 Tensor with shape [batch_size, num_gru_units]. - self.thought_vectors = None - - # The cross entropy losses and corresponding weights of the decoders. Used - # for evaluation. - self.target_cross_entropy_losses = [] - self.target_cross_entropy_loss_weights = [] - - # The total loss to optimize. - self.total_loss = None - - def build_inputs(self): - """Builds the ops for reading input data. - - Outputs: - self.encode_ids - self.decode_pre_ids - self.decode_post_ids - self.encode_mask - self.decode_pre_mask - self.decode_post_mask - """ - if self.mode == "encode": - # Word embeddings are fed from an external vocabulary which has possibly - # been expanded (see vocabulary_expansion.py). - encode_ids = None - decode_pre_ids = None - decode_post_ids = None - encode_mask = tf.placeholder(tf.int8, (None, None), name="encode_mask") - decode_pre_mask = None - decode_post_mask = None - else: - # Prefetch serialized tf.Example protos. - input_queue = input_ops.prefetch_input_data( - self.reader, - self.config.input_file_pattern, - shuffle=self.config.shuffle_input_data, - capacity=self.config.input_queue_capacity, - num_reader_threads=self.config.num_input_reader_threads) - - # Deserialize a batch. - serialized = input_queue.dequeue_many(self.config.batch_size) - encode, decode_pre, decode_post = input_ops.parse_example_batch( - serialized) - - encode_ids = encode.ids - decode_pre_ids = decode_pre.ids - decode_post_ids = decode_post.ids - - encode_mask = encode.mask - decode_pre_mask = decode_pre.mask - decode_post_mask = decode_post.mask - - self.encode_ids = encode_ids - self.decode_pre_ids = decode_pre_ids - self.decode_post_ids = decode_post_ids - - self.encode_mask = encode_mask - self.decode_pre_mask = decode_pre_mask - self.decode_post_mask = decode_post_mask - - def build_word_embeddings(self): - """Builds the word embeddings. - - Inputs: - self.encode_ids - self.decode_pre_ids - self.decode_post_ids - - Outputs: - self.encode_emb - self.decode_pre_emb - self.decode_post_emb - """ - if self.mode == "encode": - # Word embeddings are fed from an external vocabulary which has possibly - # been expanded (see vocabulary_expansion.py). - encode_emb = tf.placeholder(tf.float32, ( - None, None, self.config.word_embedding_dim), "encode_emb") - # No sequences to decode. - decode_pre_emb = None - decode_post_emb = None - else: - word_emb = tf.get_variable( - name="word_embedding", - shape=[self.config.vocab_size, self.config.word_embedding_dim], - initializer=self.uniform_initializer) - - encode_emb = tf.nn.embedding_lookup(word_emb, self.encode_ids) - decode_pre_emb = tf.nn.embedding_lookup(word_emb, self.decode_pre_ids) - decode_post_emb = tf.nn.embedding_lookup(word_emb, self.decode_post_ids) - - self.encode_emb = encode_emb - self.decode_pre_emb = decode_pre_emb - self.decode_post_emb = decode_post_emb - - def _initialize_gru_cell(self, num_units): - """Initializes a GRU cell. - - The Variables of the GRU cell are initialized in a way that exactly matches - the skip-thoughts paper: recurrent weights are initialized from random - orthonormal matrices and non-recurrent weights are initialized from random - uniform matrices. - - Args: - num_units: Number of output units. - - Returns: - cell: An instance of RNNCell with variable initializers that match the - skip-thoughts paper. - """ - return gru_cell.LayerNormGRUCell( - num_units, - w_initializer=self.uniform_initializer, - u_initializer=random_orthonormal_initializer, - b_initializer=tf.constant_initializer(0.0)) - - def build_encoder(self): - """Builds the sentence encoder. - - Inputs: - self.encode_emb - self.encode_mask - - Outputs: - self.thought_vectors - - Raises: - ValueError: if config.bidirectional_encoder is True and config.encoder_dim - is odd. - """ - with tf.variable_scope("encoder") as scope: - length = tf.to_int32(tf.reduce_sum(self.encode_mask, 1), name="length") - - if self.config.bidirectional_encoder: - if self.config.encoder_dim % 2: - raise ValueError( - "encoder_dim must be even when using a bidirectional encoder.") - num_units = self.config.encoder_dim // 2 - cell_fw = self._initialize_gru_cell(num_units) # Forward encoder - cell_bw = self._initialize_gru_cell(num_units) # Backward encoder - _, states = tf.nn.bidirectional_dynamic_rnn( - cell_fw=cell_fw, - cell_bw=cell_bw, - inputs=self.encode_emb, - sequence_length=length, - dtype=tf.float32, - scope=scope) - thought_vectors = tf.concat(states, 1, name="thought_vectors") - else: - cell = self._initialize_gru_cell(self.config.encoder_dim) - _, state = tf.nn.dynamic_rnn( - cell=cell, - inputs=self.encode_emb, - sequence_length=length, - dtype=tf.float32, - scope=scope) - # Use an identity operation to name the Tensor in the Graph. - thought_vectors = tf.identity(state, name="thought_vectors") - - self.thought_vectors = thought_vectors - - def _build_decoder(self, name, embeddings, targets, mask, initial_state, - reuse_logits): - """Builds a sentence decoder. - - Args: - name: Decoder name. - embeddings: Batch of sentences to decode; a float32 Tensor with shape - [batch_size, padded_length, emb_dim]. - targets: Batch of target word ids; an int64 Tensor with shape - [batch_size, padded_length]. - mask: A 0/1 Tensor with shape [batch_size, padded_length]. - initial_state: Initial state of the GRU. A float32 Tensor with shape - [batch_size, num_gru_cells]. - reuse_logits: Whether to reuse the logits weights. - """ - # Decoder RNN. - cell = self._initialize_gru_cell(self.config.encoder_dim) - with tf.variable_scope(name) as scope: - # Add a padding word at the start of each sentence (to correspond to the - # prediction of the first word) and remove the last word. - decoder_input = tf.pad( - embeddings[:, :-1, :], [[0, 0], [1, 0], [0, 0]], name="input") - length = tf.reduce_sum(mask, 1, name="length") - decoder_output, _ = tf.nn.dynamic_rnn( - cell=cell, - inputs=decoder_input, - sequence_length=length, - initial_state=initial_state, - scope=scope) - - # Stack batch vertically. - decoder_output = tf.reshape(decoder_output, [-1, self.config.encoder_dim]) - targets = tf.reshape(targets, [-1]) - weights = tf.to_float(tf.reshape(mask, [-1])) - - # Logits. - with tf.variable_scope("logits", reuse=reuse_logits) as scope: - logits = tf.contrib.layers.fully_connected( - inputs=decoder_output, - num_outputs=self.config.vocab_size, - activation_fn=None, - weights_initializer=self.uniform_initializer, - scope=scope) - - losses = tf.nn.sparse_softmax_cross_entropy_with_logits( - labels=targets, logits=logits) - batch_loss = tf.reduce_sum(losses * weights) - tf.losses.add_loss(batch_loss) - - tf.summary.scalar("losses/" + name, batch_loss) - - self.target_cross_entropy_losses.append(losses) - self.target_cross_entropy_loss_weights.append(weights) - - def build_decoders(self): - """Builds the sentence decoders. - - Inputs: - self.decode_pre_emb - self.decode_post_emb - self.decode_pre_ids - self.decode_post_ids - self.decode_pre_mask - self.decode_post_mask - self.thought_vectors - - Outputs: - self.target_cross_entropy_losses - self.target_cross_entropy_loss_weights - """ - if self.mode != "encode": - # Pre-sentence decoder. - self._build_decoder("decoder_pre", self.decode_pre_emb, - self.decode_pre_ids, self.decode_pre_mask, - self.thought_vectors, False) - - # Post-sentence decoder. Logits weights are reused. - self._build_decoder("decoder_post", self.decode_post_emb, - self.decode_post_ids, self.decode_post_mask, - self.thought_vectors, True) - - def build_loss(self): - """Builds the loss Tensor. - - Outputs: - self.total_loss - """ - if self.mode != "encode": - total_loss = tf.losses.get_total_loss() - tf.summary.scalar("losses/total", total_loss) - - self.total_loss = total_loss - - def build_global_step(self): - """Builds the global step Tensor. - - Outputs: - self.global_step - """ - self.global_step = tf.contrib.framework.create_global_step() - - def build(self): - """Creates all ops for training, evaluation or encoding.""" - self.build_inputs() - self.build_word_embeddings() - self.build_encoder() - self.build_decoders() - self.build_loss() - self.build_global_step() diff --git a/research/skip_thoughts/skip_thoughts/skip_thoughts_model_test.py b/research/skip_thoughts/skip_thoughts/skip_thoughts_model_test.py deleted file mode 100644 index 7bd64326d..000000000 --- a/research/skip_thoughts/skip_thoughts/skip_thoughts_model_test.py +++ /dev/null @@ -1,191 +0,0 @@ -# Copyright 2017 The TensorFlow Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""Tests for tensorflow_models.skip_thoughts.skip_thoughts_model.""" - -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - - -import numpy as np -import tensorflow as tf - -from skip_thoughts import configuration -from skip_thoughts import skip_thoughts_model - - -class SkipThoughtsModel(skip_thoughts_model.SkipThoughtsModel): - """Subclass of SkipThoughtsModel without the disk I/O.""" - - def build_inputs(self): - if self.mode == "encode": - # Encode mode doesn't read from disk, so defer to parent. - return super(SkipThoughtsModel, self).build_inputs() - else: - # Replace disk I/O with random Tensors. - self.encode_ids = tf.random_uniform( - [self.config.batch_size, 15], - minval=0, - maxval=self.config.vocab_size, - dtype=tf.int64) - self.decode_pre_ids = tf.random_uniform( - [self.config.batch_size, 15], - minval=0, - maxval=self.config.vocab_size, - dtype=tf.int64) - self.decode_post_ids = tf.random_uniform( - [self.config.batch_size, 15], - minval=0, - maxval=self.config.vocab_size, - dtype=tf.int64) - self.encode_mask = tf.ones_like(self.encode_ids) - self.decode_pre_mask = tf.ones_like(self.decode_pre_ids) - self.decode_post_mask = tf.ones_like(self.decode_post_ids) - - -class SkipThoughtsModelTest(tf.test.TestCase): - - def setUp(self): - super(SkipThoughtsModelTest, self).setUp() - self._model_config = configuration.model_config() - - def _countModelParameters(self): - """Counts the number of parameters in the model at top level scope.""" - counter = {} - for v in tf.global_variables(): - name = v.op.name.split("/")[0] - num_params = v.get_shape().num_elements() - if not num_params: - self.fail("Could not infer num_elements from Variable %s" % v.op.name) - counter[name] = counter.get(name, 0) + num_params - return counter - - def _checkModelParameters(self): - """Verifies the number of parameters in the model.""" - param_counts = self._countModelParameters() - expected_param_counts = { - # vocab_size * embedding_size - "word_embedding": 12400000, - # GRU Cells - "encoder": 21772800, - "decoder_pre": 21772800, - "decoder_post": 21772800, - # (encoder_dim + 1) * vocab_size - "logits": 48020000, - "global_step": 1, - } - self.assertDictEqual(expected_param_counts, param_counts) - - def _checkOutputs(self, expected_shapes, feed_dict=None): - """Verifies that the model produces expected outputs. - - Args: - expected_shapes: A dict mapping Tensor or Tensor name to expected output - shape. - feed_dict: Values of Tensors to feed into Session.run(). - """ - fetches = expected_shapes.keys() - - with self.test_session() as sess: - sess.run(tf.global_variables_initializer()) - outputs = sess.run(fetches, feed_dict) - - for index, output in enumerate(outputs): - tensor = fetches[index] - expected = expected_shapes[tensor] - actual = output.shape - if expected != actual: - self.fail("Tensor %s has shape %s (expected %s)." % (tensor, actual, - expected)) - - def testBuildForTraining(self): - model = SkipThoughtsModel(self._model_config, mode="train") - model.build() - - self._checkModelParameters() - - expected_shapes = { - # [batch_size, length] - model.encode_ids: (128, 15), - model.decode_pre_ids: (128, 15), - model.decode_post_ids: (128, 15), - model.encode_mask: (128, 15), - model.decode_pre_mask: (128, 15), - model.decode_post_mask: (128, 15), - # [batch_size, length, word_embedding_dim] - model.encode_emb: (128, 15, 620), - model.decode_pre_emb: (128, 15, 620), - model.decode_post_emb: (128, 15, 620), - # [batch_size, encoder_dim] - model.thought_vectors: (128, 2400), - # [batch_size * length] - model.target_cross_entropy_losses[0]: (1920,), - model.target_cross_entropy_losses[1]: (1920,), - # [batch_size * length] - model.target_cross_entropy_loss_weights[0]: (1920,), - model.target_cross_entropy_loss_weights[1]: (1920,), - # Scalar - model.total_loss: (), - } - self._checkOutputs(expected_shapes) - - def testBuildForEval(self): - model = SkipThoughtsModel(self._model_config, mode="eval") - model.build() - - self._checkModelParameters() - - expected_shapes = { - # [batch_size, length] - model.encode_ids: (128, 15), - model.decode_pre_ids: (128, 15), - model.decode_post_ids: (128, 15), - model.encode_mask: (128, 15), - model.decode_pre_mask: (128, 15), - model.decode_post_mask: (128, 15), - # [batch_size, length, word_embedding_dim] - model.encode_emb: (128, 15, 620), - model.decode_pre_emb: (128, 15, 620), - model.decode_post_emb: (128, 15, 620), - # [batch_size, encoder_dim] - model.thought_vectors: (128, 2400), - # [batch_size * length] - model.target_cross_entropy_losses[0]: (1920,), - model.target_cross_entropy_losses[1]: (1920,), - # [batch_size * length] - model.target_cross_entropy_loss_weights[0]: (1920,), - model.target_cross_entropy_loss_weights[1]: (1920,), - # Scalar - model.total_loss: (), - } - self._checkOutputs(expected_shapes) - - def testBuildForEncode(self): - model = SkipThoughtsModel(self._model_config, mode="encode") - model.build() - - # Test feeding a batch of word embeddings to get skip thought vectors. - encode_emb = np.random.rand(64, 15, 620) - encode_mask = np.ones((64, 15), dtype=np.int64) - feed_dict = {model.encode_emb: encode_emb, model.encode_mask: encode_mask} - expected_shapes = { - # [batch_size, encoder_dim] - model.thought_vectors: (64, 2400), - } - self._checkOutputs(expected_shapes, feed_dict) - - -if __name__ == "__main__": - tf.test.main() diff --git a/research/skip_thoughts/skip_thoughts/track_perplexity.py b/research/skip_thoughts/skip_thoughts/track_perplexity.py deleted file mode 100644 index 637eaf2c0..000000000 --- a/research/skip_thoughts/skip_thoughts/track_perplexity.py +++ /dev/null @@ -1,201 +0,0 @@ -# Copyright 2017 The TensorFlow Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""Tracks training progress via per-word perplexity. - -This script should be run concurrently with training so that summaries show up -in TensorBoard. -""" - -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -from six.moves import range - -import math -import os.path -import time - - -import numpy as np -import tensorflow as tf - -from skip_thoughts import configuration -from skip_thoughts import skip_thoughts_model - -FLAGS = tf.flags.FLAGS - -tf.flags.DEFINE_string("input_file_pattern", None, - "File pattern of sharded TFRecord input files.") -tf.flags.DEFINE_string("checkpoint_dir", None, - "Directory containing model checkpoints.") -tf.flags.DEFINE_string("eval_dir", None, "Directory to write event logs to.") - -tf.flags.DEFINE_integer("eval_interval_secs", 600, - "Interval between evaluation runs.") -tf.flags.DEFINE_integer("num_eval_examples", 50000, - "Number of examples for evaluation.") - -tf.flags.DEFINE_integer("min_global_step", 100, - "Minimum global step to run evaluation.") - -tf.logging.set_verbosity(tf.logging.INFO) - - -def evaluate_model(sess, losses, weights, num_batches, global_step, - summary_writer, summary_op): - """Computes perplexity-per-word over the evaluation dataset. - - Summaries and perplexity-per-word are written out to the eval directory. - - Args: - sess: Session object. - losses: A Tensor of any shape; the target cross entropy losses for the - current batch. - weights: A Tensor of weights corresponding to losses. - num_batches: Integer; the number of evaluation batches. - global_step: Integer; global step of the model checkpoint. - summary_writer: Instance of SummaryWriter. - summary_op: Op for generating model summaries. - """ - # Log model summaries on a single batch. - summary_str = sess.run(summary_op) - summary_writer.add_summary(summary_str, global_step) - - start_time = time.time() - sum_losses = 0.0 - sum_weights = 0.0 - for i in range(num_batches): - batch_losses, batch_weights = sess.run([losses, weights]) - sum_losses += np.sum(batch_losses * batch_weights) - sum_weights += np.sum(batch_weights) - if not i % 100: - tf.logging.info("Computed losses for %d of %d batches.", i + 1, - num_batches) - eval_time = time.time() - start_time - - perplexity = math.exp(sum_losses / sum_weights) - tf.logging.info("Perplexity = %f (%.2f sec)", perplexity, eval_time) - - # Log perplexity to the SummaryWriter. - summary = tf.Summary() - value = summary.value.add() - value.simple_value = perplexity - value.tag = "perplexity" - summary_writer.add_summary(summary, global_step) - - # Write the Events file to the eval directory. - summary_writer.flush() - tf.logging.info("Finished processing evaluation at global step %d.", - global_step) - - -def run_once(model, losses, weights, saver, summary_writer, summary_op): - """Evaluates the latest model checkpoint. - - Args: - model: Instance of SkipThoughtsModel; the model to evaluate. - losses: Tensor; the target cross entropy losses for the current batch. - weights: A Tensor of weights corresponding to losses. - saver: Instance of tf.train.Saver for restoring model Variables. - summary_writer: Instance of FileWriter. - summary_op: Op for generating model summaries. - """ - model_path = tf.train.latest_checkpoint(FLAGS.checkpoint_dir) - if not model_path: - tf.logging.info("Skipping evaluation. No checkpoint found in: %s", - FLAGS.checkpoint_dir) - return - - with tf.Session() as sess: - # Load model from checkpoint. - tf.logging.info("Loading model from checkpoint: %s", model_path) - saver.restore(sess, model_path) - global_step = tf.train.global_step(sess, model.global_step.name) - tf.logging.info("Successfully loaded %s at global step = %d.", - os.path.basename(model_path), global_step) - if global_step < FLAGS.min_global_step: - tf.logging.info("Skipping evaluation. Global step = %d < %d", global_step, - FLAGS.min_global_step) - return - - # Start the queue runners. - coord = tf.train.Coordinator() - threads = tf.train.start_queue_runners(coord=coord) - - num_eval_batches = int( - math.ceil(FLAGS.num_eval_examples / model.config.batch_size)) - - # Run evaluation on the latest checkpoint. - try: - evaluate_model(sess, losses, weights, num_eval_batches, global_step, - summary_writer, summary_op) - except tf.InvalidArgumentError: - tf.logging.error( - "Evaluation raised InvalidArgumentError (e.g. due to Nans).") - finally: - coord.request_stop() - coord.join(threads, stop_grace_period_secs=10) - - -def main(unused_argv): - if not FLAGS.input_file_pattern: - raise ValueError("--input_file_pattern is required.") - if not FLAGS.checkpoint_dir: - raise ValueError("--checkpoint_dir is required.") - if not FLAGS.eval_dir: - raise ValueError("--eval_dir is required.") - - # Create the evaluation directory if it doesn't exist. - eval_dir = FLAGS.eval_dir - if not tf.gfile.IsDirectory(eval_dir): - tf.logging.info("Creating eval directory: %s", eval_dir) - tf.gfile.MakeDirs(eval_dir) - - g = tf.Graph() - with g.as_default(): - # Build the model for evaluation. - model_config = configuration.model_config( - input_file_pattern=FLAGS.input_file_pattern, - input_queue_capacity=FLAGS.num_eval_examples, - shuffle_input_data=False) - model = skip_thoughts_model.SkipThoughtsModel(model_config, mode="eval") - model.build() - - losses = tf.concat(model.target_cross_entropy_losses, 0) - weights = tf.concat(model.target_cross_entropy_loss_weights, 0) - - # Create the Saver to restore model Variables. - saver = tf.train.Saver() - - # Create the summary operation and the summary writer. - summary_op = tf.summary.merge_all() - summary_writer = tf.summary.FileWriter(eval_dir) - - g.finalize() - - # Run a new evaluation run every eval_interval_secs. - while True: - start = time.time() - tf.logging.info("Starting evaluation at " + time.strftime( - "%Y-%m-%d-%H:%M:%S", time.localtime())) - run_once(model, losses, weights, saver, summary_writer, summary_op) - time_to_next_eval = start + FLAGS.eval_interval_secs - time.time() - if time_to_next_eval > 0: - time.sleep(time_to_next_eval) - - -if __name__ == "__main__": - tf.app.run() diff --git a/research/skip_thoughts/skip_thoughts/train.py b/research/skip_thoughts/skip_thoughts/train.py deleted file mode 100644 index 445f31c5a..000000000 --- a/research/skip_thoughts/skip_thoughts/train.py +++ /dev/null @@ -1,99 +0,0 @@ -# Copyright 2017 The TensorFlow Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""Train the skip-thoughts model.""" - -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - - -import tensorflow as tf - -from skip_thoughts import configuration -from skip_thoughts import skip_thoughts_model - -FLAGS = tf.flags.FLAGS - -tf.flags.DEFINE_string("input_file_pattern", None, - "File pattern of sharded TFRecord files containing " - "tf.Example protos.") -tf.flags.DEFINE_string("train_dir", None, - "Directory for saving and loading checkpoints.") - -tf.logging.set_verbosity(tf.logging.INFO) - - -def _setup_learning_rate(config, global_step): - """Sets up the learning rate with optional exponential decay. - - Args: - config: Object containing learning rate configuration parameters. - global_step: Tensor; the global step. - - Returns: - learning_rate: Tensor; the learning rate with exponential decay. - """ - if config.learning_rate_decay_factor > 0: - learning_rate = tf.train.exponential_decay( - learning_rate=float(config.learning_rate), - global_step=global_step, - decay_steps=config.learning_rate_decay_steps, - decay_rate=config.learning_rate_decay_factor, - staircase=False) - else: - learning_rate = tf.constant(config.learning_rate) - return learning_rate - - -def main(unused_argv): - if not FLAGS.input_file_pattern: - raise ValueError("--input_file_pattern is required.") - if not FLAGS.train_dir: - raise ValueError("--train_dir is required.") - - model_config = configuration.model_config( - input_file_pattern=FLAGS.input_file_pattern) - training_config = configuration.training_config() - - tf.logging.info("Building training graph.") - g = tf.Graph() - with g.as_default(): - model = skip_thoughts_model.SkipThoughtsModel(model_config, mode="train") - model.build() - - learning_rate = _setup_learning_rate(training_config, model.global_step) - optimizer = tf.train.AdamOptimizer(learning_rate) - - train_tensor = tf.contrib.slim.learning.create_train_op( - total_loss=model.total_loss, - optimizer=optimizer, - global_step=model.global_step, - clip_gradient_norm=training_config.clip_gradient_norm) - - saver = tf.train.Saver() - - tf.contrib.slim.learning.train( - train_op=train_tensor, - logdir=FLAGS.train_dir, - graph=g, - global_step=model.global_step, - number_of_steps=training_config.number_of_steps, - save_summaries_secs=training_config.save_summaries_secs, - saver=saver, - save_interval_secs=training_config.save_model_secs) - - -if __name__ == "__main__": - tf.app.run() diff --git a/research/skip_thoughts/skip_thoughts/vocabulary_expansion.py b/research/skip_thoughts/skip_thoughts/vocabulary_expansion.py deleted file mode 100644 index 0d6c8e2bc..000000000 --- a/research/skip_thoughts/skip_thoughts/vocabulary_expansion.py +++ /dev/null @@ -1,203 +0,0 @@ -# Copyright 2017 The TensorFlow Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""Compute an expanded vocabulary of embeddings using a word2vec model. - -This script loads the word embeddings from a trained skip-thoughts model and -from a trained word2vec model (typically with a larger vocabulary). It trains a -linear regression model without regularization to learn a linear mapping from -the word2vec embedding space to the skip-thoughts embedding space. The model is -then applied to all words in the word2vec vocabulary, yielding vectors in the -skip-thoughts word embedding space for the union of the two vocabularies. - -The linear regression task is to learn a parameter matrix W to minimize - || X - Y * W ||^2, -where X is a matrix of skip-thoughts embeddings of shape [num_words, dim1], -Y is a matrix of word2vec embeddings of shape [num_words, dim2], and W is a -matrix of shape [dim2, dim1]. - -This is based on the "Translation Matrix" method from the paper: - - "Exploiting Similarities among Languages for Machine Translation" - Tomas Mikolov, Quoc V. Le, Ilya Sutskever - https://arxiv.org/abs/1309.4168 -""" - -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import collections -import os.path - - -import gensim.models -import numpy as np -import sklearn.linear_model -import tensorflow as tf - -FLAGS = tf.flags.FLAGS - -tf.flags.DEFINE_string("skip_thoughts_model", None, - "Checkpoint file or directory containing a checkpoint " - "file.") - -tf.flags.DEFINE_string("skip_thoughts_vocab", None, - "Path to vocabulary file containing a list of newline-" - "separated words where the word id is the " - "corresponding 0-based index in the file.") - -tf.flags.DEFINE_string("word2vec_model", None, - "File containing a word2vec model in binary format.") - -tf.flags.DEFINE_string("output_dir", None, "Output directory.") - -tf.logging.set_verbosity(tf.logging.INFO) - - -def _load_skip_thoughts_embeddings(checkpoint_path): - """Loads the embedding matrix from a skip-thoughts model checkpoint. - - Args: - checkpoint_path: Model checkpoint file or directory containing a checkpoint - file. - - Returns: - word_embedding: A numpy array of shape [vocab_size, embedding_dim]. - - Raises: - ValueError: If no checkpoint file matches checkpoint_path. - """ - if tf.gfile.IsDirectory(checkpoint_path): - checkpoint_file = tf.train.latest_checkpoint(checkpoint_path) - if not checkpoint_file: - raise ValueError("No checkpoint file found in %s" % checkpoint_path) - else: - checkpoint_file = checkpoint_path - - tf.logging.info("Loading skip-thoughts embedding matrix from %s", - checkpoint_file) - reader = tf.train.NewCheckpointReader(checkpoint_file) - word_embedding = reader.get_tensor("word_embedding") - tf.logging.info("Loaded skip-thoughts embedding matrix of shape %s", - word_embedding.shape) - - return word_embedding - - -def _load_vocabulary(filename): - """Loads a vocabulary file. - - Args: - filename: Path to text file containing newline-separated words. - - Returns: - vocab: A dictionary mapping word to word id. - """ - tf.logging.info("Reading vocabulary from %s", filename) - vocab = collections.OrderedDict() - with tf.gfile.GFile(filename, mode="rb") as f: - for i, line in enumerate(f): - word = line.decode("utf-8").strip() - assert word not in vocab, "Attempting to add word twice: %s" % word - vocab[word] = i - tf.logging.info("Read vocabulary of size %d", len(vocab)) - return vocab - - -def _expand_vocabulary(skip_thoughts_emb, skip_thoughts_vocab, word2vec): - """Runs vocabulary expansion on a skip-thoughts model using a word2vec model. - - Args: - skip_thoughts_emb: A numpy array of shape [skip_thoughts_vocab_size, - skip_thoughts_embedding_dim]. - skip_thoughts_vocab: A dictionary of word to id. - word2vec: An instance of gensim.models.Word2Vec. - - Returns: - combined_emb: A dictionary mapping words to embedding vectors. - """ - # Find words shared between the two vocabularies. - tf.logging.info("Finding shared words") - shared_words = [w for w in word2vec.vocab if w in skip_thoughts_vocab] - - # Select embedding vectors for shared words. - tf.logging.info("Selecting embeddings for %d shared words", len(shared_words)) - shared_st_emb = skip_thoughts_emb[[ - skip_thoughts_vocab[w] for w in shared_words - ]] - shared_w2v_emb = word2vec[shared_words] - - # Train a linear regression model on the shared embedding vectors. - tf.logging.info("Training linear regression model") - model = sklearn.linear_model.LinearRegression() - model.fit(shared_w2v_emb, shared_st_emb) - - # Create the expanded vocabulary. - tf.logging.info("Creating embeddings for expanded vocabuary") - combined_emb = collections.OrderedDict() - for w in word2vec.vocab: - # Ignore words with underscores (spaces). - if "_" not in w: - w_emb = model.predict(word2vec[w].reshape(1, -1)) - combined_emb[w] = w_emb.reshape(-1) - - for w in skip_thoughts_vocab: - combined_emb[w] = skip_thoughts_emb[skip_thoughts_vocab[w]] - - tf.logging.info("Created expanded vocabulary of %d words", len(combined_emb)) - - return combined_emb - - -def main(unused_argv): - if not FLAGS.skip_thoughts_model: - raise ValueError("--skip_thoughts_model is required.") - if not FLAGS.skip_thoughts_vocab: - raise ValueError("--skip_thoughts_vocab is required.") - if not FLAGS.word2vec_model: - raise ValueError("--word2vec_model is required.") - if not FLAGS.output_dir: - raise ValueError("--output_dir is required.") - - if not tf.gfile.IsDirectory(FLAGS.output_dir): - tf.gfile.MakeDirs(FLAGS.output_dir) - - # Load the skip-thoughts embeddings and vocabulary. - skip_thoughts_emb = _load_skip_thoughts_embeddings(FLAGS.skip_thoughts_model) - skip_thoughts_vocab = _load_vocabulary(FLAGS.skip_thoughts_vocab) - - # Load the Word2Vec model. - word2vec = gensim.models.KeyedVectors.load_word2vec_format( - FLAGS.word2vec_model, binary=True) - - # Run vocabulary expansion. - embedding_map = _expand_vocabulary(skip_thoughts_emb, skip_thoughts_vocab, - word2vec) - - # Save the output. - vocab = embedding_map.keys() - vocab_file = os.path.join(FLAGS.output_dir, "vocab.txt") - with tf.gfile.GFile(vocab_file, "w") as f: - f.write("\n".join(vocab)) - tf.logging.info("Wrote vocabulary file to %s", vocab_file) - - embeddings = np.array(embedding_map.values()) - embeddings_file = os.path.join(FLAGS.output_dir, "embeddings.npy") - np.save(embeddings_file, embeddings) - tf.logging.info("Wrote embeddings file to %s", embeddings_file) - - -if __name__ == "__main__": - tf.app.run() diff --git a/research/steve/README.md b/research/steve/README.md deleted file mode 100644 index 363be719e..000000000 --- a/research/steve/README.md +++ /dev/null @@ -1,94 +0,0 @@ -![No Maintenance Intended](https://img.shields.io/badge/No%20Maintenance%20Intended-%E2%9C%95-red.svg) -![TensorFlow Requirement: 1.x](https://img.shields.io/badge/TensorFlow%20Requirement-1.x-brightgreen) -![TensorFlow 2 Not Supported](https://img.shields.io/badge/TensorFlow%202%20Not%20Supported-%E2%9C%95-red.svg) - -# Stochastic Ensemble Value Expansion - -*A hybrid model-based/model-free reinforcement learning algorithm for sample-efficient continuous control.* - -This is the code repository accompanying the paper Sample-Efficient Reinforcement Learning with -Stochastic Ensemble Value Expansion, by Buckman et al. (2018). - -#### Abstract: -Merging model-free and model-based approaches in reinforcement learning has the potential to achieve -the high performance of model-free algorithms with low sample complexity. This is difficult because -an imperfect dynamics model can degrade the performance of the learning algorithm, and in sufficiently -complex environments, the dynamics model will always be imperfect. As a result, a key challenge is to -combine model-based approaches with model-free learning in such a way that errors in the model do not -degrade performance. We propose *stochastic ensemble value expansion* (STEVE), a novel model-based -technique that addresses this issue. By dynamically interpolating between model rollouts of various horizon -lengths for each individual example, STEVE ensures that the model is only utilized when doing so does not -introduce significant errors. Our approach outperforms model-free baselines on challenging continuous -control benchmarks with an order-of-magnitude increase in sample efficiency, and in contrast to previous -model-based approaches, performance does not degrade as the environment gets more complex. - -## Installation -This code is compatible with Ubuntu 16.04 and Python 2.7. There are several prerequisites: -* Numpy, Scipy, and Portalocker: `pip install numpy scipy portalocker` -* TensorFlow 1.6 or above. Instructions can be found on the official TensorFlow page: - [https://www.tensorflow.org/install/install_linux](https://www.tensorflow.org/install/install_linux). - We suggest installing the GPU version of TensorFlow to speed up training. -* OpenAI Gym version 0.9.4. Instructions can be found in the OpenAI Gym repository: - [https://github.com/openai/gym#installation](https://github.com/openai/gym#installation). - Note that you need to replace "pip install gym[all]" with "pip install gym[all]==0.9.4", which - will ensure that you get the correct version of Gym. (The current version of Gym has deprecated - the -v1 MuJoCo environments, which are the environments studied in this paper.) -* MuJoCo version 1.31, which can be downloaded here: [https://www.roboti.us/download/mjpro131_linux.zip](https://www.roboti.us/download/mjpro131_linux.zip). - Simply run: ``` - cd ~; mkdir -p .mujoco; cd .mujoco/; wget https://www.roboti.us/download/mjpro131_linux.zip; unzip mjpro131_linux.zip``` - You also need to get a license, and put the license key in ~/.mujoco/ as well. -* Optionally, Roboschool version 1.1. This is needed only to replicate the Roboschool experiments. - Instructions can be found in the OpenAI Roboschool repository: - [https://github.com/openai/roboschool#installation](https://github.com/openai/roboschool#installation). -* Optionally, MoviePy to render trained agents. Instructions on the MoviePy homepage: - [https://zulko.github.io/moviepy/install.html](https://zulko.github.io/moviepy/install.html). - -## Running Experiments -To run an experiment, run master.py and pass in a config file and GPU ID. For example: ``` -python master.py config/experiments/speedruns/humanoid/speedy_steve0.json 0``` -The `config/experiments/` -directory contains configuration files for all of the experiments run in the paper. - -The GPU ID specifies the GPU that should be used to learn the policy. For model-based approaches, the -next GPU (i.e. GPU_ID+1) is used to learn the worldmodel in parallel. - -To resume an experiment that was interrupted, use the same config file and pass the `--resume` flag: ``` -python master.py config/experiments/speedruns/humanoid/speedy_steve0.json 0 --resume``` - -## Output -For each experiment, two folders are created in the output directory: `//log` -and `//checkpoints`. The log directory contains the following: - -* `hps.json` contains the accumulated hyperparameters of the config file used to generate these results -* `valuerl.log` and `worldmodel.log` contain the log output of the learners. `worldmodel.log` will not - exist if you are not learning a worldmodel. -* `.greedy.csv` records all of the scores of our evaluators. The four columns contain time (hours), - epochs, frames, and score. - -The checkpoints directory contains the most recent versions of the policy and worldmodel, as well as checkpoints -of the policy, worldmodel, and their respective replay buffers at various points throughout training. - -## Code Organization -`master.py` launches four types of processes: a ValueRlLearner to learn the policy, a WorldmodelLearner -to learn the dynamics model, several Interactors to gather data from the environment to train on, and -a few Evaluators to run the greedy policy in the environment and record the score. - -`learner.py` contains a general framework for models which learn from a replay buffer. This is where -most of the code for the overall training loop is located. `valuerl_learner.py` and `worldmodel_learner.py` -contain a small amount of model-specific training loop code. - -`valuerl.py` implements the core model for all value-function-based policy learning techniques studied -in the paper, including DDPG, MVE, STEVE, etc. Similarly, `worldmodel.py` contains the core model for -our dynamics model and reward function. - -`replay.py` contains the code for the replay buffer. `nn.py`, `envwrap.py`, `config.py`, and `util.py` -each contain various helper functions. - -`toy_demo.py` is a self-contained demo, written in numpy, that was used to generate the results for the -toy examples in the first segment of the paper. - -`visualizer.py` is a utility script for loading trained policies and inspecting them. In addition to a -config file and a GPU, it takes the filename of the model to load as a mandatory third argument. - -## Contact -Please contact GitHub user buckman-google (jacobbuckman@gmail.com) with any questions. diff --git a/research/steve/agent.py b/research/steve/agent.py deleted file mode 100644 index 25069e29b..000000000 --- a/research/steve/agent.py +++ /dev/null @@ -1,143 +0,0 @@ -from __future__ import print_function -from builtins import zip -from builtins import range -from builtins import object -# Copyright 2018 The TensorFlow Authors All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -import numpy as np -import tensorflow as tf -import time, os, traceback, multiprocessing, portalocker - -import envwrap -import valuerl -import util -from config import config - - -def run_env(pipe): - env = envwrap.get_env(config["env"]["name"]) - reset = True - while True: - if reset is True: pipe.send(env.reset()) - action = pipe.recv() - obs, reward, done, reset = env.step(action) - pipe.send((obs, reward, done, reset)) - -class AgentManager(object): - """ - Interact with the environment according to the learned policy, - """ - def __init__(self, proc_num, evaluation, policy_lock, batch_size, config): - self.evaluation = evaluation - self.policy_lock = policy_lock - self.batch_size = batch_size - self.config = config - - self.log_path = util.create_directory("%s/%s/%s/%s" % (config["output_root"], config["env"]["name"], config["name"], config["log_path"])) + "/%s" % config["name"] - self.load_path = util.create_directory("%s/%s/%s/%s" % (config["output_root"], config["env"]["name"], config["name"], config["save_model_path"])) - - ## placeholders for intermediate states (basis for rollout) - self.obs_loader = tf.placeholder(tf.float32, [self.batch_size, np.prod(self.config["env"]["obs_dims"])]) - - ## build model - self.valuerl = valuerl.ValueRL(self.config["name"], self.config["env"], self.config["policy_config"]) - self.policy_actions = self.valuerl.build_evalution_graph(self.obs_loader, mode="exploit" if self.evaluation else "explore") - - # interactors - self.agent_pipes, self.agent_child_pipes = list(zip(*[multiprocessing.Pipe() for _ in range(self.batch_size)])) - self.agents = [multiprocessing.Process(target=run_env, args=(self.agent_child_pipes[i],)) for i in range(self.batch_size)] - for agent in self.agents: agent.start() - self.obs = [pipe.recv() for pipe in self.agent_pipes] - self.total_rewards = [0. for _ in self.agent_pipes] - self.loaded_policy = False - - self.sess = tf.Session() - self.sess.run(tf.global_variables_initializer()) - - self.rollout_i = 0 - self.proc_num = proc_num - self.epoch = -1 - self.frame_total = 0 - self.hours = 0. - - self.first = True - - def get_action(self, obs): - if self.loaded_policy: - all_actions = self.sess.run(self.policy_actions, feed_dict={self.obs_loader: obs}) - all_actions = np.clip(all_actions, -1., 1.) - return all_actions[:self.batch_size] - else: - return [self.get_random_action() for _ in range(obs.shape[0])] - - def get_random_action(self, *args, **kwargs): - return np.random.random(self.config["env"]["action_dim"]) * 2 - 1 - - def step(self): - actions = self.get_action(np.stack(self.obs)) - self.first = False - [pipe.send(action) for pipe, action in zip(self.agent_pipes, actions)] - next_obs, rewards, dones, resets = list(zip(*[pipe.recv() for pipe in self.agent_pipes])) - - frames = list(zip(self.obs, next_obs, actions, rewards, dones)) - - self.obs = [o if resets[i] is False else self.agent_pipes[i].recv() for i, o in enumerate(next_obs)] - - for i, (t,r,reset) in enumerate(zip(self.total_rewards, rewards, resets)): - if reset: - self.total_rewards[i] = 0. - if self.evaluation and self.loaded_policy: - with portalocker.Lock(self.log_path+'.greedy.csv', mode="a") as f: f.write("%2f,%d,%d,%2f\n" % (self.hours, self.epoch, self.frame_total, t+r)) - - else: - self.total_rewards[i] = t + r - - if self.evaluation and np.any(resets): self.reload() - - self.rollout_i += 1 - return frames - - def reload(self): - if not os.path.exists("%s/%s.params.index" % (self.load_path ,self.valuerl.saveid)): return False - with self.policy_lock: - self.valuerl.load(self.sess, self.load_path) - self.epoch, self.frame_total, self.hours = self.sess.run([self.valuerl.epoch_n, self.valuerl.frame_n, self.valuerl.hours]) - self.loaded_policy = True - self.first = True - return True - -def main(proc_num, evaluation, policy_replay_frame_queue, model_replay_frame_queue, policy_lock, config): - try: - np.random.seed((proc_num * int(time.time())) % (2 ** 32 - 1)) - agentmanager = AgentManager(proc_num, evaluation, policy_lock, config["evaluator_config"]["batch_size"] if evaluation else config["agent_config"]["batch_size"], config) - frame_i = 0 - while True: - new_frames = agentmanager.step() - if not evaluation: - policy_replay_frame_queue.put(new_frames) - if model_replay_frame_queue is not None: model_replay_frame_queue.put(new_frames) - if frame_i % config["agent_config"]["reload_every_n"] == 0: agentmanager.reload() - frame_i += len(new_frames) - - except Exception as e: - print('Caught exception in agent process %d' % proc_num) - traceback.print_exc() - print() - try: - for i in agentmanager.agents: i.join() - except: - pass - raise e diff --git a/research/steve/config.py b/research/steve/config.py deleted file mode 100644 index 4a6da98c3..000000000 --- a/research/steve/config.py +++ /dev/null @@ -1,38 +0,0 @@ -from __future__ import print_function -from builtins import str -# Copyright 2018 The TensorFlow Authors All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -import argparse, json, util, traceback - -parser = argparse.ArgumentParser() -parser.add_argument("config") -parser.add_argument("root_gpu", type=int) -parser.add_argument("--resume", action="store_true") -args = parser.parse_args() - -config_loc = args.config -config = util.ConfigDict(config_loc) - -config["name"] = config_loc.split("/")[-1][:-5] -config["resume"] = args.resume - -cstr = str(config) - -def log_config(): - HPS_PATH = util.create_directory("output/" + config["env"]["name"] + "/" + config["name"] + "/" + config["log_path"]) + "/hps.json" - print("ROOT GPU: " + str(args.root_gpu) + "\n" + str(cstr)) - with open(HPS_PATH, "w") as f: - f.write("ROOT GPU: " + str(args.root_gpu) + "\n" + str(cstr)) \ No newline at end of file diff --git a/research/steve/config/algos/ddpg.json b/research/steve/config/algos/ddpg.json deleted file mode 100644 index e76c10698..000000000 --- a/research/steve/config/algos/ddpg.json +++ /dev/null @@ -1,3 +0,0 @@ -{ - "inherits": ["config/core/basic.json"] -} \ No newline at end of file diff --git a/research/steve/config/algos/mve_mean.json b/research/steve/config/algos/mve_mean.json deleted file mode 100644 index 729bccc62..000000000 --- a/research/steve/config/algos/mve_mean.json +++ /dev/null @@ -1,14 +0,0 @@ -{ - "inherits": [ - "config/core/basic.json", - "config/core/model.json" - ], - "updates":{ - "policy_config": { - "value_expansion": { - "rollout_len": 3, - "mean_k_return": true - } - } - } -} \ No newline at end of file diff --git a/research/steve/config/algos/mve_tdk.json b/research/steve/config/algos/mve_tdk.json deleted file mode 100644 index 222fd40c3..000000000 --- a/research/steve/config/algos/mve_tdk.json +++ /dev/null @@ -1,14 +0,0 @@ -{ - "inherits": [ - "config/core/basic.json", - "config/core/model.json" - ], - "updates":{ - "policy_config": { - "value_expansion": { - "rollout_len": 3, - "tdk_trick": true - } - } - } -} \ No newline at end of file diff --git a/research/steve/config/algos/mve_tdlambda.json b/research/steve/config/algos/mve_tdlambda.json deleted file mode 100644 index 3414dda5d..000000000 --- a/research/steve/config/algos/mve_tdlambda.json +++ /dev/null @@ -1,14 +0,0 @@ -{ - "inherits": [ - "config/core/basic.json", - "config/core/model.json" - ], - "updates":{ - "policy_config": { - "value_expansion": { - "rollout_len": 3, - "lambda_return": 0.25 - } - } - } -} \ No newline at end of file diff --git a/research/steve/config/algos/steve.json b/research/steve/config/algos/steve.json deleted file mode 100644 index ca2bc0395..000000000 --- a/research/steve/config/algos/steve.json +++ /dev/null @@ -1,15 +0,0 @@ -{ - "inherits": [ - "config/core/basic.json", - "config/core/model.json", - "config/core/bayesian.json" - ], - "updates":{ - "policy_config": { - "value_expansion": { - "rollout_len": 3, - "steve_reweight": true - } - } - } -} \ No newline at end of file diff --git a/research/steve/config/algos/steve_cov.json b/research/steve/config/algos/steve_cov.json deleted file mode 100644 index 4dbf46e19..000000000 --- a/research/steve/config/algos/steve_cov.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "inherits": [ - "config/core/basic.json", - "config/core/model.json", - "config/core/bayesian.json" - ], - "updates":{ - "policy_config": { - "value_expansion": { - "rollout_len": 3, - "steve_reweight": true, - "covariances": true - } - } - } -} \ No newline at end of file diff --git a/research/steve/config/core/basic.json b/research/steve/config/core/basic.json deleted file mode 100644 index 411e7b65e..000000000 --- a/research/steve/config/core/basic.json +++ /dev/null @@ -1,32 +0,0 @@ -{ - "updates": { - "output_root": "output", - "save_model_path": "checkpoints", - "log_path": "log", - - "agent_config": { - "count": 1, - "batch_size": 8, - "reload_every_n": 1, - "full_random_n": 10000 - }, - - "evaluator_config": { - "count": 2, - "batch_size": 1 - }, - - "policy_config": { - "algo": "ddpg", - "hidden_dim": 128, - "explore_chance": 0.05, - "batch_size": 512, - "replay_size": 1000000, - "frames_before_learning": 10000, - "log_every_n": 500, - "epoch_every_n": 500, - "backup_every_n": 2500000, - "frames_per_update": 0.25 - } - } -} \ No newline at end of file diff --git a/research/steve/config/core/bayesian.json b/research/steve/config/core/bayesian.json deleted file mode 100644 index ea7d95543..000000000 --- a/research/steve/config/core/bayesian.json +++ /dev/null @@ -1,26 +0,0 @@ -{ - "updates": { - "policy_config": { - "bayesian": { - "ensemble_size": 4, - "train_sample_count": 4, - "eval_sample_count": 4 - } - }, - - "*model_config": { - "bayesian": { - "transition": { - "ensemble_size": 4, - "train_sample_count": 4, - "eval_sample_count": 4 - }, - "reward": { - "ensemble_size": 4, - "train_sample_count": 4, - "eval_sample_count": 4 - } - } - } - } -} \ No newline at end of file diff --git a/research/steve/config/core/model.json b/research/steve/config/core/model.json deleted file mode 100644 index 485146ab0..000000000 --- a/research/steve/config/core/model.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "updates": { - "model_config": { - "transition_hidden_dim": 512, - "aux_hidden_dim": 128, - "batch_size": 512, - "replay_size": 1000000, - "frames_before_learning": 10000, - "log_every_n": 500, - "epoch_every_n": 500, - "backup_every_n": 2500000, - "pretrain_n": 10000, - "frames_per_update": 0.25 - } - } -} \ No newline at end of file diff --git a/research/steve/config/envs/flagrun.json b/research/steve/config/envs/flagrun.json deleted file mode 100644 index 09ecc7cde..000000000 --- a/research/steve/config/envs/flagrun.json +++ /dev/null @@ -1,12 +0,0 @@ -{ - "updates": { - "env": { - "name": "RoboschoolHumanoidFlagrun-v1", - "obs_dims": [44], - "action_dim": 17, - "reward_scale":1.0, - "discount":0.99, - "max_frames": 1000 - } - } -} \ No newline at end of file diff --git a/research/steve/config/envs/halfcheetah.json b/research/steve/config/envs/halfcheetah.json deleted file mode 100644 index e0c9b3897..000000000 --- a/research/steve/config/envs/halfcheetah.json +++ /dev/null @@ -1,12 +0,0 @@ -{ - "updates": { - "env": { - "name": "HalfCheetah-v1", - "obs_dims": [17], - "action_dim": 6, - "reward_scale":1.0, - "discount":0.99, - "max_frames": 1000 - } - } -} \ No newline at end of file diff --git a/research/steve/config/envs/hardcore.json b/research/steve/config/envs/hardcore.json deleted file mode 100644 index af372b28b..000000000 --- a/research/steve/config/envs/hardcore.json +++ /dev/null @@ -1,12 +0,0 @@ -{ - "updates": { - "env": { - "name": "BipedalWalkerHardcore-v2", - "obs_dims": [24], - "action_dim": 4, - "reward_scale":1.0, - "discount":0.99, - "max_frames": 1000 - } - } -} \ No newline at end of file diff --git a/research/steve/config/envs/hopper.json b/research/steve/config/envs/hopper.json deleted file mode 100644 index 012def185..000000000 --- a/research/steve/config/envs/hopper.json +++ /dev/null @@ -1,12 +0,0 @@ -{ - "updates": { - "env": { - "name": "Hopper-v1", - "obs_dims": [11], - "action_dim": 3, - "reward_scale":1.0, - "discount":0.99, - "max_frames": 1000 - } - } -} \ No newline at end of file diff --git a/research/steve/config/envs/humanoid.json b/research/steve/config/envs/humanoid.json deleted file mode 100644 index 39aeeb295..000000000 --- a/research/steve/config/envs/humanoid.json +++ /dev/null @@ -1,12 +0,0 @@ -{ - "updates": { - "env": { - "name": "Humanoid-v1", - "obs_dims": [376], - "action_dim": 17, - "reward_scale":1.0, - "discount":0.99, - "max_frames": 1000 - } - } -} \ No newline at end of file diff --git a/research/steve/config/envs/rshum.json b/research/steve/config/envs/rshum.json deleted file mode 100644 index 0ad54b2bf..000000000 --- a/research/steve/config/envs/rshum.json +++ /dev/null @@ -1,12 +0,0 @@ -{ - "updates": { - "env": { - "name": "RoboschoolHumanoid-v1", - "obs_dims": [44], - "action_dim": 17, - "reward_scale":1.0, - "discount":0.99, - "max_frames": 1000 - } - } -} \ No newline at end of file diff --git a/research/steve/config/envs/swimmer.json b/research/steve/config/envs/swimmer.json deleted file mode 100644 index 0fcf2f32e..000000000 --- a/research/steve/config/envs/swimmer.json +++ /dev/null @@ -1,12 +0,0 @@ -{ - "updates": { - "env": { - "name": "Swimmer-v1", - "obs_dims": [8], - "action_dim": 2, - "reward_scale":1.0, - "discount":0.99, - "max_frames": 1000 - } - } -} \ No newline at end of file diff --git a/research/steve/config/envs/walker2d.json b/research/steve/config/envs/walker2d.json deleted file mode 100644 index 03ed94f74..000000000 --- a/research/steve/config/envs/walker2d.json +++ /dev/null @@ -1,12 +0,0 @@ -{ - "updates": { - "env": { - "name": "Walker2d-v1", - "obs_dims": [17], - "action_dim": 6, - "reward_scale":1.0, - "discount":0.99, - "max_frames": 1000 - } - } -} \ No newline at end of file diff --git a/research/steve/config/experimental_setups/speedrun.json b/research/steve/config/experimental_setups/speedrun.json deleted file mode 100644 index b34a9b706..000000000 --- a/research/steve/config/experimental_setups/speedrun.json +++ /dev/null @@ -1,11 +0,0 @@ -{ - "updates": { - "policy_config": { - "frames_per_update": false - }, - "*model_config":{ - "frames_per_update": false, - "pretrain_n": false - } - } -} \ No newline at end of file diff --git a/research/steve/config/experiments/ablations/baselines/ensemble_mve_tdk0.json b/research/steve/config/experiments/ablations/baselines/ensemble_mve_tdk0.json deleted file mode 100644 index da54f6310..000000000 --- a/research/steve/config/experiments/ablations/baselines/ensemble_mve_tdk0.json +++ /dev/null @@ -1 +0,0 @@ -{"inherits": ["config/algos/mve_tdk.json", "config/core/bayesian", "config/envs/humanoid.json"]} diff --git a/research/steve/config/experiments/ablations/baselines/ensemble_mve_tdk1.json b/research/steve/config/experiments/ablations/baselines/ensemble_mve_tdk1.json deleted file mode 100644 index da54f6310..000000000 --- a/research/steve/config/experiments/ablations/baselines/ensemble_mve_tdk1.json +++ /dev/null @@ -1 +0,0 @@ -{"inherits": ["config/algos/mve_tdk.json", "config/core/bayesian", "config/envs/humanoid.json"]} diff --git a/research/steve/config/experiments/ablations/baselines/ensemble_mve_tdk2.json b/research/steve/config/experiments/ablations/baselines/ensemble_mve_tdk2.json deleted file mode 100644 index da54f6310..000000000 --- a/research/steve/config/experiments/ablations/baselines/ensemble_mve_tdk2.json +++ /dev/null @@ -1 +0,0 @@ -{"inherits": ["config/algos/mve_tdk.json", "config/core/bayesian", "config/envs/humanoid.json"]} diff --git a/research/steve/config/experiments/ablations/baselines/mve_25tdlambda0.json b/research/steve/config/experiments/ablations/baselines/mve_25tdlambda0.json deleted file mode 100644 index b9e3dcd4b..000000000 --- a/research/steve/config/experiments/ablations/baselines/mve_25tdlambda0.json +++ /dev/null @@ -1,10 +0,0 @@ -{ - "inherits": ["config/algos/mve_tdlambda.json", "config/envs/humanoid.json"], - "updates":{ - "policy_config": { - "value_expansion": { - "lambda_return": 0.25 - } - } - } -} diff --git a/research/steve/config/experiments/ablations/baselines/mve_25tdlambda1.json b/research/steve/config/experiments/ablations/baselines/mve_25tdlambda1.json deleted file mode 100644 index b9e3dcd4b..000000000 --- a/research/steve/config/experiments/ablations/baselines/mve_25tdlambda1.json +++ /dev/null @@ -1,10 +0,0 @@ -{ - "inherits": ["config/algos/mve_tdlambda.json", "config/envs/humanoid.json"], - "updates":{ - "policy_config": { - "value_expansion": { - "lambda_return": 0.25 - } - } - } -} diff --git a/research/steve/config/experiments/ablations/baselines/mve_25tdlambda2.json b/research/steve/config/experiments/ablations/baselines/mve_25tdlambda2.json deleted file mode 100644 index b9e3dcd4b..000000000 --- a/research/steve/config/experiments/ablations/baselines/mve_25tdlambda2.json +++ /dev/null @@ -1,10 +0,0 @@ -{ - "inherits": ["config/algos/mve_tdlambda.json", "config/envs/humanoid.json"], - "updates":{ - "policy_config": { - "value_expansion": { - "lambda_return": 0.25 - } - } - } -} diff --git a/research/steve/config/experiments/ablations/baselines/mve_75tdlambda0.json b/research/steve/config/experiments/ablations/baselines/mve_75tdlambda0.json deleted file mode 100644 index 7366ba77b..000000000 --- a/research/steve/config/experiments/ablations/baselines/mve_75tdlambda0.json +++ /dev/null @@ -1,10 +0,0 @@ -{ - "inherits": ["config/algos/mve_tdlambda.json", "config/envs/humanoid.json"], - "updates":{ - "policy_config": { - "value_expansion": { - "lambda_return": 0.75 - } - } - } -} diff --git a/research/steve/config/experiments/ablations/baselines/mve_75tdlambda1.json b/research/steve/config/experiments/ablations/baselines/mve_75tdlambda1.json deleted file mode 100644 index 7366ba77b..000000000 --- a/research/steve/config/experiments/ablations/baselines/mve_75tdlambda1.json +++ /dev/null @@ -1,10 +0,0 @@ -{ - "inherits": ["config/algos/mve_tdlambda.json", "config/envs/humanoid.json"], - "updates":{ - "policy_config": { - "value_expansion": { - "lambda_return": 0.75 - } - } - } -} diff --git a/research/steve/config/experiments/ablations/baselines/mve_75tdlambda2.json b/research/steve/config/experiments/ablations/baselines/mve_75tdlambda2.json deleted file mode 100644 index 7366ba77b..000000000 --- a/research/steve/config/experiments/ablations/baselines/mve_75tdlambda2.json +++ /dev/null @@ -1,10 +0,0 @@ -{ - "inherits": ["config/algos/mve_tdlambda.json", "config/envs/humanoid.json"], - "updates":{ - "policy_config": { - "value_expansion": { - "lambda_return": 0.75 - } - } - } -} diff --git a/research/steve/config/experiments/ablations/baselines/mve_meank0.json b/research/steve/config/experiments/ablations/baselines/mve_meank0.json deleted file mode 100644 index ce7d9b1ea..000000000 --- a/research/steve/config/experiments/ablations/baselines/mve_meank0.json +++ /dev/null @@ -1 +0,0 @@ -{"inherits": ["config/algos/mve_mean.json", "config/envs/humanoid.json"]} diff --git a/research/steve/config/experiments/ablations/baselines/mve_meank1.json b/research/steve/config/experiments/ablations/baselines/mve_meank1.json deleted file mode 100644 index ce7d9b1ea..000000000 --- a/research/steve/config/experiments/ablations/baselines/mve_meank1.json +++ /dev/null @@ -1 +0,0 @@ -{"inherits": ["config/algos/mve_mean.json", "config/envs/humanoid.json"]} diff --git a/research/steve/config/experiments/ablations/baselines/mve_meank2.json b/research/steve/config/experiments/ablations/baselines/mve_meank2.json deleted file mode 100644 index ce7d9b1ea..000000000 --- a/research/steve/config/experiments/ablations/baselines/mve_meank2.json +++ /dev/null @@ -1 +0,0 @@ -{"inherits": ["config/algos/mve_mean.json", "config/envs/humanoid.json"]} diff --git a/research/steve/config/experiments/ablations/baselines/steve_cov0.json b/research/steve/config/experiments/ablations/baselines/steve_cov0.json deleted file mode 100644 index df2e8a0d8..000000000 --- a/research/steve/config/experiments/ablations/baselines/steve_cov0.json +++ /dev/null @@ -1 +0,0 @@ -{"inherits": ["config/algos/steve_cov.json", "config/envs/humanoid.json"]} diff --git a/research/steve/config/experiments/ablations/baselines/steve_cov1.json b/research/steve/config/experiments/ablations/baselines/steve_cov1.json deleted file mode 100644 index df2e8a0d8..000000000 --- a/research/steve/config/experiments/ablations/baselines/steve_cov1.json +++ /dev/null @@ -1 +0,0 @@ -{"inherits": ["config/algos/steve_cov.json", "config/envs/humanoid.json"]} diff --git a/research/steve/config/experiments/ablations/baselines/steve_cov2.json b/research/steve/config/experiments/ablations/baselines/steve_cov2.json deleted file mode 100644 index df2e8a0d8..000000000 --- a/research/steve/config/experiments/ablations/baselines/steve_cov2.json +++ /dev/null @@ -1 +0,0 @@ -{"inherits": ["config/algos/steve_cov.json", "config/envs/humanoid.json"]} diff --git a/research/steve/config/experiments/ablations/horizons/steve_1h0.json b/research/steve/config/experiments/ablations/horizons/steve_1h0.json deleted file mode 100644 index 48b6730b7..000000000 --- a/research/steve/config/experiments/ablations/horizons/steve_1h0.json +++ /dev/null @@ -1,10 +0,0 @@ -{ - "inherits": ["config/algos/steve.json", "config/envs/humanoid.json"], - "updates": { - "policy_config": { - "value_expansion": { - "rollout_len": 1 - } - } - } -} diff --git a/research/steve/config/experiments/ablations/horizons/steve_1h1.json b/research/steve/config/experiments/ablations/horizons/steve_1h1.json deleted file mode 100644 index 48b6730b7..000000000 --- a/research/steve/config/experiments/ablations/horizons/steve_1h1.json +++ /dev/null @@ -1,10 +0,0 @@ -{ - "inherits": ["config/algos/steve.json", "config/envs/humanoid.json"], - "updates": { - "policy_config": { - "value_expansion": { - "rollout_len": 1 - } - } - } -} diff --git a/research/steve/config/experiments/ablations/horizons/steve_1h2.json b/research/steve/config/experiments/ablations/horizons/steve_1h2.json deleted file mode 100644 index 48b6730b7..000000000 --- a/research/steve/config/experiments/ablations/horizons/steve_1h2.json +++ /dev/null @@ -1,10 +0,0 @@ -{ - "inherits": ["config/algos/steve.json", "config/envs/humanoid.json"], - "updates": { - "policy_config": { - "value_expansion": { - "rollout_len": 1 - } - } - } -} diff --git a/research/steve/config/experiments/ablations/horizons/steve_2h0.json b/research/steve/config/experiments/ablations/horizons/steve_2h0.json deleted file mode 100644 index 48b6730b7..000000000 --- a/research/steve/config/experiments/ablations/horizons/steve_2h0.json +++ /dev/null @@ -1,10 +0,0 @@ -{ - "inherits": ["config/algos/steve.json", "config/envs/humanoid.json"], - "updates": { - "policy_config": { - "value_expansion": { - "rollout_len": 1 - } - } - } -} diff --git a/research/steve/config/experiments/ablations/horizons/steve_2h1.json b/research/steve/config/experiments/ablations/horizons/steve_2h1.json deleted file mode 100644 index 48b6730b7..000000000 --- a/research/steve/config/experiments/ablations/horizons/steve_2h1.json +++ /dev/null @@ -1,10 +0,0 @@ -{ - "inherits": ["config/algos/steve.json", "config/envs/humanoid.json"], - "updates": { - "policy_config": { - "value_expansion": { - "rollout_len": 1 - } - } - } -} diff --git a/research/steve/config/experiments/ablations/horizons/steve_2h2.json b/research/steve/config/experiments/ablations/horizons/steve_2h2.json deleted file mode 100644 index 48b6730b7..000000000 --- a/research/steve/config/experiments/ablations/horizons/steve_2h2.json +++ /dev/null @@ -1,10 +0,0 @@ -{ - "inherits": ["config/algos/steve.json", "config/envs/humanoid.json"], - "updates": { - "policy_config": { - "value_expansion": { - "rollout_len": 1 - } - } - } -} diff --git a/research/steve/config/experiments/ablations/horizons/steve_5h0.json b/research/steve/config/experiments/ablations/horizons/steve_5h0.json deleted file mode 100644 index 48b6730b7..000000000 --- a/research/steve/config/experiments/ablations/horizons/steve_5h0.json +++ /dev/null @@ -1,10 +0,0 @@ -{ - "inherits": ["config/algos/steve.json", "config/envs/humanoid.json"], - "updates": { - "policy_config": { - "value_expansion": { - "rollout_len": 1 - } - } - } -} diff --git a/research/steve/config/experiments/ablations/horizons/steve_5h1.json b/research/steve/config/experiments/ablations/horizons/steve_5h1.json deleted file mode 100644 index 48b6730b7..000000000 --- a/research/steve/config/experiments/ablations/horizons/steve_5h1.json +++ /dev/null @@ -1,10 +0,0 @@ -{ - "inherits": ["config/algos/steve.json", "config/envs/humanoid.json"], - "updates": { - "policy_config": { - "value_expansion": { - "rollout_len": 1 - } - } - } -} diff --git a/research/steve/config/experiments/ablations/horizons/steve_5h2.json b/research/steve/config/experiments/ablations/horizons/steve_5h2.json deleted file mode 100644 index 48b6730b7..000000000 --- a/research/steve/config/experiments/ablations/horizons/steve_5h2.json +++ /dev/null @@ -1,10 +0,0 @@ -{ - "inherits": ["config/algos/steve.json", "config/envs/humanoid.json"], - "updates": { - "policy_config": { - "value_expansion": { - "rollout_len": 1 - } - } - } -} diff --git a/research/steve/config/experiments/goodruns/flagrun/ddpg0.json b/research/steve/config/experiments/goodruns/flagrun/ddpg0.json deleted file mode 100644 index a68ee412d..000000000 --- a/research/steve/config/experiments/goodruns/flagrun/ddpg0.json +++ /dev/null @@ -1 +0,0 @@ -{"inherits": ["config/algos/ddpg.json", "config/envs/flagrun.json"]} diff --git a/research/steve/config/experiments/goodruns/flagrun/ddpg1.json b/research/steve/config/experiments/goodruns/flagrun/ddpg1.json deleted file mode 100644 index a68ee412d..000000000 --- a/research/steve/config/experiments/goodruns/flagrun/ddpg1.json +++ /dev/null @@ -1 +0,0 @@ -{"inherits": ["config/algos/ddpg.json", "config/envs/flagrun.json"]} diff --git a/research/steve/config/experiments/goodruns/flagrun/ddpg2.json b/research/steve/config/experiments/goodruns/flagrun/ddpg2.json deleted file mode 100644 index a68ee412d..000000000 --- a/research/steve/config/experiments/goodruns/flagrun/ddpg2.json +++ /dev/null @@ -1 +0,0 @@ -{"inherits": ["config/algos/ddpg.json", "config/envs/flagrun.json"]} diff --git a/research/steve/config/experiments/goodruns/flagrun/ddpg3.json b/research/steve/config/experiments/goodruns/flagrun/ddpg3.json deleted file mode 100644 index a68ee412d..000000000 --- a/research/steve/config/experiments/goodruns/flagrun/ddpg3.json +++ /dev/null @@ -1 +0,0 @@ -{"inherits": ["config/algos/ddpg.json", "config/envs/flagrun.json"]} diff --git a/research/steve/config/experiments/goodruns/flagrun/mve_tdk0.json b/research/steve/config/experiments/goodruns/flagrun/mve_tdk0.json deleted file mode 100644 index 8da85dd37..000000000 --- a/research/steve/config/experiments/goodruns/flagrun/mve_tdk0.json +++ /dev/null @@ -1 +0,0 @@ -{"inherits": ["config/algos/mve_tdk.json", "config/envs/flagrun.json"]} diff --git a/research/steve/config/experiments/goodruns/flagrun/mve_tdk1.json b/research/steve/config/experiments/goodruns/flagrun/mve_tdk1.json deleted file mode 100644 index 8da85dd37..000000000 --- a/research/steve/config/experiments/goodruns/flagrun/mve_tdk1.json +++ /dev/null @@ -1 +0,0 @@ -{"inherits": ["config/algos/mve_tdk.json", "config/envs/flagrun.json"]} diff --git a/research/steve/config/experiments/goodruns/flagrun/mve_tdk2.json b/research/steve/config/experiments/goodruns/flagrun/mve_tdk2.json deleted file mode 100644 index 8da85dd37..000000000 --- a/research/steve/config/experiments/goodruns/flagrun/mve_tdk2.json +++ /dev/null @@ -1 +0,0 @@ -{"inherits": ["config/algos/mve_tdk.json", "config/envs/flagrun.json"]} diff --git a/research/steve/config/experiments/goodruns/flagrun/mve_tdk3.json b/research/steve/config/experiments/goodruns/flagrun/mve_tdk3.json deleted file mode 100644 index 8da85dd37..000000000 --- a/research/steve/config/experiments/goodruns/flagrun/mve_tdk3.json +++ /dev/null @@ -1 +0,0 @@ -{"inherits": ["config/algos/mve_tdk.json", "config/envs/flagrun.json"]} diff --git a/research/steve/config/experiments/goodruns/flagrun/steve0.json b/research/steve/config/experiments/goodruns/flagrun/steve0.json deleted file mode 100644 index 21d329302..000000000 --- a/research/steve/config/experiments/goodruns/flagrun/steve0.json +++ /dev/null @@ -1 +0,0 @@ -{"inherits": ["config/algos/steve.json", "config/envs/flagrun.json"]} diff --git a/research/steve/config/experiments/goodruns/flagrun/steve1.json b/research/steve/config/experiments/goodruns/flagrun/steve1.json deleted file mode 100644 index 21d329302..000000000 --- a/research/steve/config/experiments/goodruns/flagrun/steve1.json +++ /dev/null @@ -1 +0,0 @@ -{"inherits": ["config/algos/steve.json", "config/envs/flagrun.json"]} diff --git a/research/steve/config/experiments/goodruns/flagrun/steve2.json b/research/steve/config/experiments/goodruns/flagrun/steve2.json deleted file mode 100644 index 21d329302..000000000 --- a/research/steve/config/experiments/goodruns/flagrun/steve2.json +++ /dev/null @@ -1 +0,0 @@ -{"inherits": ["config/algos/steve.json", "config/envs/flagrun.json"]} diff --git a/research/steve/config/experiments/goodruns/flagrun/steve3.json b/research/steve/config/experiments/goodruns/flagrun/steve3.json deleted file mode 100644 index 21d329302..000000000 --- a/research/steve/config/experiments/goodruns/flagrun/steve3.json +++ /dev/null @@ -1 +0,0 @@ -{"inherits": ["config/algos/steve.json", "config/envs/flagrun.json"]} diff --git a/research/steve/config/experiments/goodruns/halfcheetah/ddpg0.json b/research/steve/config/experiments/goodruns/halfcheetah/ddpg0.json deleted file mode 100644 index fc9d9eef2..000000000 --- a/research/steve/config/experiments/goodruns/halfcheetah/ddpg0.json +++ /dev/null @@ -1 +0,0 @@ -{"inherits": ["config/algos/ddpg.json", "config/envs/halfcheetah.json"]} diff --git a/research/steve/config/experiments/goodruns/halfcheetah/ddpg1.json b/research/steve/config/experiments/goodruns/halfcheetah/ddpg1.json deleted file mode 100644 index fc9d9eef2..000000000 --- a/research/steve/config/experiments/goodruns/halfcheetah/ddpg1.json +++ /dev/null @@ -1 +0,0 @@ -{"inherits": ["config/algos/ddpg.json", "config/envs/halfcheetah.json"]} diff --git a/research/steve/config/experiments/goodruns/halfcheetah/ddpg2.json b/research/steve/config/experiments/goodruns/halfcheetah/ddpg2.json deleted file mode 100644 index fc9d9eef2..000000000 --- a/research/steve/config/experiments/goodruns/halfcheetah/ddpg2.json +++ /dev/null @@ -1 +0,0 @@ -{"inherits": ["config/algos/ddpg.json", "config/envs/halfcheetah.json"]} diff --git a/research/steve/config/experiments/goodruns/halfcheetah/ddpg3.json b/research/steve/config/experiments/goodruns/halfcheetah/ddpg3.json deleted file mode 100644 index fc9d9eef2..000000000 --- a/research/steve/config/experiments/goodruns/halfcheetah/ddpg3.json +++ /dev/null @@ -1 +0,0 @@ -{"inherits": ["config/algos/ddpg.json", "config/envs/halfcheetah.json"]} diff --git a/research/steve/config/experiments/goodruns/halfcheetah/mve_tdk0.json b/research/steve/config/experiments/goodruns/halfcheetah/mve_tdk0.json deleted file mode 100644 index dcae7eb48..000000000 --- a/research/steve/config/experiments/goodruns/halfcheetah/mve_tdk0.json +++ /dev/null @@ -1 +0,0 @@ -{"inherits": ["config/algos/mve_tdk.json", "config/envs/halfcheetah.json"]} diff --git a/research/steve/config/experiments/goodruns/halfcheetah/mve_tdk1.json b/research/steve/config/experiments/goodruns/halfcheetah/mve_tdk1.json deleted file mode 100644 index dcae7eb48..000000000 --- a/research/steve/config/experiments/goodruns/halfcheetah/mve_tdk1.json +++ /dev/null @@ -1 +0,0 @@ -{"inherits": ["config/algos/mve_tdk.json", "config/envs/halfcheetah.json"]} diff --git a/research/steve/config/experiments/goodruns/halfcheetah/mve_tdk2.json b/research/steve/config/experiments/goodruns/halfcheetah/mve_tdk2.json deleted file mode 100644 index dcae7eb48..000000000 --- a/research/steve/config/experiments/goodruns/halfcheetah/mve_tdk2.json +++ /dev/null @@ -1 +0,0 @@ -{"inherits": ["config/algos/mve_tdk.json", "config/envs/halfcheetah.json"]} diff --git a/research/steve/config/experiments/goodruns/halfcheetah/mve_tdk3.json b/research/steve/config/experiments/goodruns/halfcheetah/mve_tdk3.json deleted file mode 100644 index dcae7eb48..000000000 --- a/research/steve/config/experiments/goodruns/halfcheetah/mve_tdk3.json +++ /dev/null @@ -1 +0,0 @@ -{"inherits": ["config/algos/mve_tdk.json", "config/envs/halfcheetah.json"]} diff --git a/research/steve/config/experiments/goodruns/halfcheetah/steve0.json b/research/steve/config/experiments/goodruns/halfcheetah/steve0.json deleted file mode 100644 index f2fd36d3b..000000000 --- a/research/steve/config/experiments/goodruns/halfcheetah/steve0.json +++ /dev/null @@ -1 +0,0 @@ -{"inherits": ["config/algos/steve.json", "config/envs/halfcheetah.json"]} diff --git a/research/steve/config/experiments/goodruns/halfcheetah/steve1.json b/research/steve/config/experiments/goodruns/halfcheetah/steve1.json deleted file mode 100644 index f2fd36d3b..000000000 --- a/research/steve/config/experiments/goodruns/halfcheetah/steve1.json +++ /dev/null @@ -1 +0,0 @@ -{"inherits": ["config/algos/steve.json", "config/envs/halfcheetah.json"]} diff --git a/research/steve/config/experiments/goodruns/halfcheetah/steve2.json b/research/steve/config/experiments/goodruns/halfcheetah/steve2.json deleted file mode 100644 index f2fd36d3b..000000000 --- a/research/steve/config/experiments/goodruns/halfcheetah/steve2.json +++ /dev/null @@ -1 +0,0 @@ -{"inherits": ["config/algos/steve.json", "config/envs/halfcheetah.json"]} diff --git a/research/steve/config/experiments/goodruns/halfcheetah/steve3.json b/research/steve/config/experiments/goodruns/halfcheetah/steve3.json deleted file mode 100644 index f2fd36d3b..000000000 --- a/research/steve/config/experiments/goodruns/halfcheetah/steve3.json +++ /dev/null @@ -1 +0,0 @@ -{"inherits": ["config/algos/steve.json", "config/envs/halfcheetah.json"]} diff --git a/research/steve/config/experiments/goodruns/hardcore/ddpg0.json b/research/steve/config/experiments/goodruns/hardcore/ddpg0.json deleted file mode 100644 index 3dce87b15..000000000 --- a/research/steve/config/experiments/goodruns/hardcore/ddpg0.json +++ /dev/null @@ -1 +0,0 @@ -{"inherits": ["config/algos/ddpg.json", "config/envs/hardcore.json"]} diff --git a/research/steve/config/experiments/goodruns/hardcore/ddpg1.json b/research/steve/config/experiments/goodruns/hardcore/ddpg1.json deleted file mode 100644 index 3dce87b15..000000000 --- a/research/steve/config/experiments/goodruns/hardcore/ddpg1.json +++ /dev/null @@ -1 +0,0 @@ -{"inherits": ["config/algos/ddpg.json", "config/envs/hardcore.json"]} diff --git a/research/steve/config/experiments/goodruns/hardcore/ddpg2.json b/research/steve/config/experiments/goodruns/hardcore/ddpg2.json deleted file mode 100644 index 3dce87b15..000000000 --- a/research/steve/config/experiments/goodruns/hardcore/ddpg2.json +++ /dev/null @@ -1 +0,0 @@ -{"inherits": ["config/algos/ddpg.json", "config/envs/hardcore.json"]} diff --git a/research/steve/config/experiments/goodruns/hardcore/ddpg3.json b/research/steve/config/experiments/goodruns/hardcore/ddpg3.json deleted file mode 100644 index 3dce87b15..000000000 --- a/research/steve/config/experiments/goodruns/hardcore/ddpg3.json +++ /dev/null @@ -1 +0,0 @@ -{"inherits": ["config/algos/ddpg.json", "config/envs/hardcore.json"]} diff --git a/research/steve/config/experiments/goodruns/hardcore/mve_tdk0.json b/research/steve/config/experiments/goodruns/hardcore/mve_tdk0.json deleted file mode 100644 index 095d8763a..000000000 --- a/research/steve/config/experiments/goodruns/hardcore/mve_tdk0.json +++ /dev/null @@ -1 +0,0 @@ -{"inherits": ["config/algos/mve_tdk.json", "config/envs/hardcore.json"]} diff --git a/research/steve/config/experiments/goodruns/hardcore/mve_tdk1.json b/research/steve/config/experiments/goodruns/hardcore/mve_tdk1.json deleted file mode 100644 index 095d8763a..000000000 --- a/research/steve/config/experiments/goodruns/hardcore/mve_tdk1.json +++ /dev/null @@ -1 +0,0 @@ -{"inherits": ["config/algos/mve_tdk.json", "config/envs/hardcore.json"]} diff --git a/research/steve/config/experiments/goodruns/hardcore/mve_tdk2.json b/research/steve/config/experiments/goodruns/hardcore/mve_tdk2.json deleted file mode 100644 index 095d8763a..000000000 --- a/research/steve/config/experiments/goodruns/hardcore/mve_tdk2.json +++ /dev/null @@ -1 +0,0 @@ -{"inherits": ["config/algos/mve_tdk.json", "config/envs/hardcore.json"]} diff --git a/research/steve/config/experiments/goodruns/hardcore/mve_tdk3.json b/research/steve/config/experiments/goodruns/hardcore/mve_tdk3.json deleted file mode 100644 index 095d8763a..000000000 --- a/research/steve/config/experiments/goodruns/hardcore/mve_tdk3.json +++ /dev/null @@ -1 +0,0 @@ -{"inherits": ["config/algos/mve_tdk.json", "config/envs/hardcore.json"]} diff --git a/research/steve/config/experiments/goodruns/hardcore/steve0.json b/research/steve/config/experiments/goodruns/hardcore/steve0.json deleted file mode 100644 index f09420852..000000000 --- a/research/steve/config/experiments/goodruns/hardcore/steve0.json +++ /dev/null @@ -1 +0,0 @@ -{"inherits": ["config/algos/steve.json", "config/envs/hardcore.json"]} diff --git a/research/steve/config/experiments/goodruns/hardcore/steve1.json b/research/steve/config/experiments/goodruns/hardcore/steve1.json deleted file mode 100644 index f09420852..000000000 --- a/research/steve/config/experiments/goodruns/hardcore/steve1.json +++ /dev/null @@ -1 +0,0 @@ -{"inherits": ["config/algos/steve.json", "config/envs/hardcore.json"]} diff --git a/research/steve/config/experiments/goodruns/hardcore/steve2.json b/research/steve/config/experiments/goodruns/hardcore/steve2.json deleted file mode 100644 index f09420852..000000000 --- a/research/steve/config/experiments/goodruns/hardcore/steve2.json +++ /dev/null @@ -1 +0,0 @@ -{"inherits": ["config/algos/steve.json", "config/envs/hardcore.json"]} diff --git a/research/steve/config/experiments/goodruns/hardcore/steve3.json b/research/steve/config/experiments/goodruns/hardcore/steve3.json deleted file mode 100644 index f09420852..000000000 --- a/research/steve/config/experiments/goodruns/hardcore/steve3.json +++ /dev/null @@ -1 +0,0 @@ -{"inherits": ["config/algos/steve.json", "config/envs/hardcore.json"]} diff --git a/research/steve/config/experiments/goodruns/hopper/ddpg0.json b/research/steve/config/experiments/goodruns/hopper/ddpg0.json deleted file mode 100644 index 4916ab116..000000000 --- a/research/steve/config/experiments/goodruns/hopper/ddpg0.json +++ /dev/null @@ -1 +0,0 @@ -{"inherits": ["config/algos/ddpg.json", "config/envs/hopper.json"]} diff --git a/research/steve/config/experiments/goodruns/hopper/ddpg1.json b/research/steve/config/experiments/goodruns/hopper/ddpg1.json deleted file mode 100644 index 4916ab116..000000000 --- a/research/steve/config/experiments/goodruns/hopper/ddpg1.json +++ /dev/null @@ -1 +0,0 @@ -{"inherits": ["config/algos/ddpg.json", "config/envs/hopper.json"]} diff --git a/research/steve/config/experiments/goodruns/hopper/ddpg2.json b/research/steve/config/experiments/goodruns/hopper/ddpg2.json deleted file mode 100644 index 4916ab116..000000000 --- a/research/steve/config/experiments/goodruns/hopper/ddpg2.json +++ /dev/null @@ -1 +0,0 @@ -{"inherits": ["config/algos/ddpg.json", "config/envs/hopper.json"]} diff --git a/research/steve/config/experiments/goodruns/hopper/ddpg3.json b/research/steve/config/experiments/goodruns/hopper/ddpg3.json deleted file mode 100644 index 4916ab116..000000000 --- a/research/steve/config/experiments/goodruns/hopper/ddpg3.json +++ /dev/null @@ -1 +0,0 @@ -{"inherits": ["config/algos/ddpg.json", "config/envs/hopper.json"]} diff --git a/research/steve/config/experiments/goodruns/hopper/mve_tdk0.json b/research/steve/config/experiments/goodruns/hopper/mve_tdk0.json deleted file mode 100644 index 40663e8b9..000000000 --- a/research/steve/config/experiments/goodruns/hopper/mve_tdk0.json +++ /dev/null @@ -1 +0,0 @@ -{"inherits": ["config/algos/mve_tdk.json", "config/envs/hopper.json"]} diff --git a/research/steve/config/experiments/goodruns/hopper/mve_tdk1.json b/research/steve/config/experiments/goodruns/hopper/mve_tdk1.json deleted file mode 100644 index 40663e8b9..000000000 --- a/research/steve/config/experiments/goodruns/hopper/mve_tdk1.json +++ /dev/null @@ -1 +0,0 @@ -{"inherits": ["config/algos/mve_tdk.json", "config/envs/hopper.json"]} diff --git a/research/steve/config/experiments/goodruns/hopper/mve_tdk2.json b/research/steve/config/experiments/goodruns/hopper/mve_tdk2.json deleted file mode 100644 index 40663e8b9..000000000 --- a/research/steve/config/experiments/goodruns/hopper/mve_tdk2.json +++ /dev/null @@ -1 +0,0 @@ -{"inherits": ["config/algos/mve_tdk.json", "config/envs/hopper.json"]} diff --git a/research/steve/config/experiments/goodruns/hopper/mve_tdk3.json b/research/steve/config/experiments/goodruns/hopper/mve_tdk3.json deleted file mode 100644 index 40663e8b9..000000000 --- a/research/steve/config/experiments/goodruns/hopper/mve_tdk3.json +++ /dev/null @@ -1 +0,0 @@ -{"inherits": ["config/algos/mve_tdk.json", "config/envs/hopper.json"]} diff --git a/research/steve/config/experiments/goodruns/hopper/steve0.json b/research/steve/config/experiments/goodruns/hopper/steve0.json deleted file mode 100644 index 708ce8913..000000000 --- a/research/steve/config/experiments/goodruns/hopper/steve0.json +++ /dev/null @@ -1 +0,0 @@ -{"inherits": ["config/algos/steve.json", "config/envs/hopper.json"]} diff --git a/research/steve/config/experiments/goodruns/hopper/steve1.json b/research/steve/config/experiments/goodruns/hopper/steve1.json deleted file mode 100644 index 708ce8913..000000000 --- a/research/steve/config/experiments/goodruns/hopper/steve1.json +++ /dev/null @@ -1 +0,0 @@ -{"inherits": ["config/algos/steve.json", "config/envs/hopper.json"]} diff --git a/research/steve/config/experiments/goodruns/hopper/steve2.json b/research/steve/config/experiments/goodruns/hopper/steve2.json deleted file mode 100644 index 708ce8913..000000000 --- a/research/steve/config/experiments/goodruns/hopper/steve2.json +++ /dev/null @@ -1 +0,0 @@ -{"inherits": ["config/algos/steve.json", "config/envs/hopper.json"]} diff --git a/research/steve/config/experiments/goodruns/hopper/steve3.json b/research/steve/config/experiments/goodruns/hopper/steve3.json deleted file mode 100644 index 708ce8913..000000000 --- a/research/steve/config/experiments/goodruns/hopper/steve3.json +++ /dev/null @@ -1 +0,0 @@ -{"inherits": ["config/algos/steve.json", "config/envs/hopper.json"]} diff --git a/research/steve/config/experiments/goodruns/humanoid/ddpg0.json b/research/steve/config/experiments/goodruns/humanoid/ddpg0.json deleted file mode 100644 index 3bd27e7d5..000000000 --- a/research/steve/config/experiments/goodruns/humanoid/ddpg0.json +++ /dev/null @@ -1 +0,0 @@ -{"inherits": ["config/algos/ddpg.json", "config/envs/humanoid.json"]} diff --git a/research/steve/config/experiments/goodruns/humanoid/ddpg1.json b/research/steve/config/experiments/goodruns/humanoid/ddpg1.json deleted file mode 100644 index 3bd27e7d5..000000000 --- a/research/steve/config/experiments/goodruns/humanoid/ddpg1.json +++ /dev/null @@ -1 +0,0 @@ -{"inherits": ["config/algos/ddpg.json", "config/envs/humanoid.json"]} diff --git a/research/steve/config/experiments/goodruns/humanoid/ddpg2.json b/research/steve/config/experiments/goodruns/humanoid/ddpg2.json deleted file mode 100644 index 3bd27e7d5..000000000 --- a/research/steve/config/experiments/goodruns/humanoid/ddpg2.json +++ /dev/null @@ -1 +0,0 @@ -{"inherits": ["config/algos/ddpg.json", "config/envs/humanoid.json"]} diff --git a/research/steve/config/experiments/goodruns/humanoid/ddpg3.json b/research/steve/config/experiments/goodruns/humanoid/ddpg3.json deleted file mode 100644 index 3bd27e7d5..000000000 --- a/research/steve/config/experiments/goodruns/humanoid/ddpg3.json +++ /dev/null @@ -1 +0,0 @@ -{"inherits": ["config/algos/ddpg.json", "config/envs/humanoid.json"]} diff --git a/research/steve/config/experiments/goodruns/humanoid/mve_tdk0.json b/research/steve/config/experiments/goodruns/humanoid/mve_tdk0.json deleted file mode 100644 index 542ed8d80..000000000 --- a/research/steve/config/experiments/goodruns/humanoid/mve_tdk0.json +++ /dev/null @@ -1 +0,0 @@ -{"inherits": ["config/algos/mve_tdk.json", "config/envs/humanoid.json"]} diff --git a/research/steve/config/experiments/goodruns/humanoid/mve_tdk1.json b/research/steve/config/experiments/goodruns/humanoid/mve_tdk1.json deleted file mode 100644 index 542ed8d80..000000000 --- a/research/steve/config/experiments/goodruns/humanoid/mve_tdk1.json +++ /dev/null @@ -1 +0,0 @@ -{"inherits": ["config/algos/mve_tdk.json", "config/envs/humanoid.json"]} diff --git a/research/steve/config/experiments/goodruns/humanoid/mve_tdk2.json b/research/steve/config/experiments/goodruns/humanoid/mve_tdk2.json deleted file mode 100644 index 542ed8d80..000000000 --- a/research/steve/config/experiments/goodruns/humanoid/mve_tdk2.json +++ /dev/null @@ -1 +0,0 @@ -{"inherits": ["config/algos/mve_tdk.json", "config/envs/humanoid.json"]} diff --git a/research/steve/config/experiments/goodruns/humanoid/mve_tdk3.json b/research/steve/config/experiments/goodruns/humanoid/mve_tdk3.json deleted file mode 100644 index 542ed8d80..000000000 --- a/research/steve/config/experiments/goodruns/humanoid/mve_tdk3.json +++ /dev/null @@ -1 +0,0 @@ -{"inherits": ["config/algos/mve_tdk.json", "config/envs/humanoid.json"]} diff --git a/research/steve/config/experiments/goodruns/humanoid/steve0.json b/research/steve/config/experiments/goodruns/humanoid/steve0.json deleted file mode 100644 index 835b3f621..000000000 --- a/research/steve/config/experiments/goodruns/humanoid/steve0.json +++ /dev/null @@ -1 +0,0 @@ -{"inherits": ["config/algos/steve.json", "config/envs/humanoid.json"]} diff --git a/research/steve/config/experiments/goodruns/humanoid/steve1.json b/research/steve/config/experiments/goodruns/humanoid/steve1.json deleted file mode 100644 index 835b3f621..000000000 --- a/research/steve/config/experiments/goodruns/humanoid/steve1.json +++ /dev/null @@ -1 +0,0 @@ -{"inherits": ["config/algos/steve.json", "config/envs/humanoid.json"]} diff --git a/research/steve/config/experiments/goodruns/humanoid/steve2.json b/research/steve/config/experiments/goodruns/humanoid/steve2.json deleted file mode 100644 index 835b3f621..000000000 --- a/research/steve/config/experiments/goodruns/humanoid/steve2.json +++ /dev/null @@ -1 +0,0 @@ -{"inherits": ["config/algos/steve.json", "config/envs/humanoid.json"]} diff --git a/research/steve/config/experiments/goodruns/humanoid/steve3.json b/research/steve/config/experiments/goodruns/humanoid/steve3.json deleted file mode 100644 index 835b3f621..000000000 --- a/research/steve/config/experiments/goodruns/humanoid/steve3.json +++ /dev/null @@ -1 +0,0 @@ -{"inherits": ["config/algos/steve.json", "config/envs/humanoid.json"]} diff --git a/research/steve/config/experiments/goodruns/rshum/ddpg0.json b/research/steve/config/experiments/goodruns/rshum/ddpg0.json deleted file mode 100644 index 9fd98d11e..000000000 --- a/research/steve/config/experiments/goodruns/rshum/ddpg0.json +++ /dev/null @@ -1 +0,0 @@ -{"inherits": ["config/algos/ddpg.json", "config/envs/rshum.json"]} diff --git a/research/steve/config/experiments/goodruns/rshum/ddpg1.json b/research/steve/config/experiments/goodruns/rshum/ddpg1.json deleted file mode 100644 index 9fd98d11e..000000000 --- a/research/steve/config/experiments/goodruns/rshum/ddpg1.json +++ /dev/null @@ -1 +0,0 @@ -{"inherits": ["config/algos/ddpg.json", "config/envs/rshum.json"]} diff --git a/research/steve/config/experiments/goodruns/rshum/ddpg2.json b/research/steve/config/experiments/goodruns/rshum/ddpg2.json deleted file mode 100644 index 9fd98d11e..000000000 --- a/research/steve/config/experiments/goodruns/rshum/ddpg2.json +++ /dev/null @@ -1 +0,0 @@ -{"inherits": ["config/algos/ddpg.json", "config/envs/rshum.json"]} diff --git a/research/steve/config/experiments/goodruns/rshum/ddpg3.json b/research/steve/config/experiments/goodruns/rshum/ddpg3.json deleted file mode 100644 index 9fd98d11e..000000000 --- a/research/steve/config/experiments/goodruns/rshum/ddpg3.json +++ /dev/null @@ -1 +0,0 @@ -{"inherits": ["config/algos/ddpg.json", "config/envs/rshum.json"]} diff --git a/research/steve/config/experiments/goodruns/rshum/mve_tdk0.json b/research/steve/config/experiments/goodruns/rshum/mve_tdk0.json deleted file mode 100644 index ade2434ee..000000000 --- a/research/steve/config/experiments/goodruns/rshum/mve_tdk0.json +++ /dev/null @@ -1 +0,0 @@ -{"inherits": ["config/algos/mve_tdk.json", "config/envs/rshum.json"]} diff --git a/research/steve/config/experiments/goodruns/rshum/mve_tdk1.json b/research/steve/config/experiments/goodruns/rshum/mve_tdk1.json deleted file mode 100644 index ade2434ee..000000000 --- a/research/steve/config/experiments/goodruns/rshum/mve_tdk1.json +++ /dev/null @@ -1 +0,0 @@ -{"inherits": ["config/algos/mve_tdk.json", "config/envs/rshum.json"]} diff --git a/research/steve/config/experiments/goodruns/rshum/mve_tdk2.json b/research/steve/config/experiments/goodruns/rshum/mve_tdk2.json deleted file mode 100644 index ade2434ee..000000000 --- a/research/steve/config/experiments/goodruns/rshum/mve_tdk2.json +++ /dev/null @@ -1 +0,0 @@ -{"inherits": ["config/algos/mve_tdk.json", "config/envs/rshum.json"]} diff --git a/research/steve/config/experiments/goodruns/rshum/mve_tdk3.json b/research/steve/config/experiments/goodruns/rshum/mve_tdk3.json deleted file mode 100644 index ade2434ee..000000000 --- a/research/steve/config/experiments/goodruns/rshum/mve_tdk3.json +++ /dev/null @@ -1 +0,0 @@ -{"inherits": ["config/algos/mve_tdk.json", "config/envs/rshum.json"]} diff --git a/research/steve/config/experiments/goodruns/rshum/steve0.json b/research/steve/config/experiments/goodruns/rshum/steve0.json deleted file mode 100644 index 510854fbf..000000000 --- a/research/steve/config/experiments/goodruns/rshum/steve0.json +++ /dev/null @@ -1 +0,0 @@ -{"inherits": ["config/algos/steve.json", "config/envs/rshum.json"]} diff --git a/research/steve/config/experiments/goodruns/rshum/steve1.json b/research/steve/config/experiments/goodruns/rshum/steve1.json deleted file mode 100644 index 510854fbf..000000000 --- a/research/steve/config/experiments/goodruns/rshum/steve1.json +++ /dev/null @@ -1 +0,0 @@ -{"inherits": ["config/algos/steve.json", "config/envs/rshum.json"]} diff --git a/research/steve/config/experiments/goodruns/rshum/steve2.json b/research/steve/config/experiments/goodruns/rshum/steve2.json deleted file mode 100644 index 510854fbf..000000000 --- a/research/steve/config/experiments/goodruns/rshum/steve2.json +++ /dev/null @@ -1 +0,0 @@ -{"inherits": ["config/algos/steve.json", "config/envs/rshum.json"]} diff --git a/research/steve/config/experiments/goodruns/rshum/steve3.json b/research/steve/config/experiments/goodruns/rshum/steve3.json deleted file mode 100644 index 510854fbf..000000000 --- a/research/steve/config/experiments/goodruns/rshum/steve3.json +++ /dev/null @@ -1 +0,0 @@ -{"inherits": ["config/algos/steve.json", "config/envs/rshum.json"]} diff --git a/research/steve/config/experiments/goodruns/swimmer/ddpg0.json b/research/steve/config/experiments/goodruns/swimmer/ddpg0.json deleted file mode 100644 index a94fc7c52..000000000 --- a/research/steve/config/experiments/goodruns/swimmer/ddpg0.json +++ /dev/null @@ -1 +0,0 @@ -{"inherits": ["config/algos/ddpg.json", "config/envs/swimmer.json"]} diff --git a/research/steve/config/experiments/goodruns/swimmer/ddpg1.json b/research/steve/config/experiments/goodruns/swimmer/ddpg1.json deleted file mode 100644 index a94fc7c52..000000000 --- a/research/steve/config/experiments/goodruns/swimmer/ddpg1.json +++ /dev/null @@ -1 +0,0 @@ -{"inherits": ["config/algos/ddpg.json", "config/envs/swimmer.json"]} diff --git a/research/steve/config/experiments/goodruns/swimmer/ddpg2.json b/research/steve/config/experiments/goodruns/swimmer/ddpg2.json deleted file mode 100644 index a94fc7c52..000000000 --- a/research/steve/config/experiments/goodruns/swimmer/ddpg2.json +++ /dev/null @@ -1 +0,0 @@ -{"inherits": ["config/algos/ddpg.json", "config/envs/swimmer.json"]} diff --git a/research/steve/config/experiments/goodruns/swimmer/ddpg3.json b/research/steve/config/experiments/goodruns/swimmer/ddpg3.json deleted file mode 100644 index a94fc7c52..000000000 --- a/research/steve/config/experiments/goodruns/swimmer/ddpg3.json +++ /dev/null @@ -1 +0,0 @@ -{"inherits": ["config/algos/ddpg.json", "config/envs/swimmer.json"]} diff --git a/research/steve/config/experiments/goodruns/swimmer/mve_tdk0.json b/research/steve/config/experiments/goodruns/swimmer/mve_tdk0.json deleted file mode 100644 index 142101178..000000000 --- a/research/steve/config/experiments/goodruns/swimmer/mve_tdk0.json +++ /dev/null @@ -1 +0,0 @@ -{"inherits": ["config/algos/mve_tdk.json", "config/envs/swimmer.json"]} diff --git a/research/steve/config/experiments/goodruns/swimmer/mve_tdk1.json b/research/steve/config/experiments/goodruns/swimmer/mve_tdk1.json deleted file mode 100644 index 142101178..000000000 --- a/research/steve/config/experiments/goodruns/swimmer/mve_tdk1.json +++ /dev/null @@ -1 +0,0 @@ -{"inherits": ["config/algos/mve_tdk.json", "config/envs/swimmer.json"]} diff --git a/research/steve/config/experiments/goodruns/swimmer/mve_tdk2.json b/research/steve/config/experiments/goodruns/swimmer/mve_tdk2.json deleted file mode 100644 index 142101178..000000000 --- a/research/steve/config/experiments/goodruns/swimmer/mve_tdk2.json +++ /dev/null @@ -1 +0,0 @@ -{"inherits": ["config/algos/mve_tdk.json", "config/envs/swimmer.json"]} diff --git a/research/steve/config/experiments/goodruns/swimmer/mve_tdk3.json b/research/steve/config/experiments/goodruns/swimmer/mve_tdk3.json deleted file mode 100644 index 142101178..000000000 --- a/research/steve/config/experiments/goodruns/swimmer/mve_tdk3.json +++ /dev/null @@ -1 +0,0 @@ -{"inherits": ["config/algos/mve_tdk.json", "config/envs/swimmer.json"]} diff --git a/research/steve/config/experiments/goodruns/swimmer/steve0.json b/research/steve/config/experiments/goodruns/swimmer/steve0.json deleted file mode 100644 index d33583283..000000000 --- a/research/steve/config/experiments/goodruns/swimmer/steve0.json +++ /dev/null @@ -1 +0,0 @@ -{"inherits": ["config/algos/steve.json", "config/envs/swimmer.json"]} diff --git a/research/steve/config/experiments/goodruns/swimmer/steve1.json b/research/steve/config/experiments/goodruns/swimmer/steve1.json deleted file mode 100644 index d33583283..000000000 --- a/research/steve/config/experiments/goodruns/swimmer/steve1.json +++ /dev/null @@ -1 +0,0 @@ -{"inherits": ["config/algos/steve.json", "config/envs/swimmer.json"]} diff --git a/research/steve/config/experiments/goodruns/swimmer/steve2.json b/research/steve/config/experiments/goodruns/swimmer/steve2.json deleted file mode 100644 index d33583283..000000000 --- a/research/steve/config/experiments/goodruns/swimmer/steve2.json +++ /dev/null @@ -1 +0,0 @@ -{"inherits": ["config/algos/steve.json", "config/envs/swimmer.json"]} diff --git a/research/steve/config/experiments/goodruns/swimmer/steve3.json b/research/steve/config/experiments/goodruns/swimmer/steve3.json deleted file mode 100644 index d33583283..000000000 --- a/research/steve/config/experiments/goodruns/swimmer/steve3.json +++ /dev/null @@ -1 +0,0 @@ -{"inherits": ["config/algos/steve.json", "config/envs/swimmer.json"]} diff --git a/research/steve/config/experiments/goodruns/walker2d/ddpg0.json b/research/steve/config/experiments/goodruns/walker2d/ddpg0.json deleted file mode 100644 index 81fe2ff56..000000000 --- a/research/steve/config/experiments/goodruns/walker2d/ddpg0.json +++ /dev/null @@ -1 +0,0 @@ -{"inherits": ["config/algos/ddpg.json", "config/envs/walker2d.json"]} diff --git a/research/steve/config/experiments/goodruns/walker2d/ddpg1.json b/research/steve/config/experiments/goodruns/walker2d/ddpg1.json deleted file mode 100644 index 81fe2ff56..000000000 --- a/research/steve/config/experiments/goodruns/walker2d/ddpg1.json +++ /dev/null @@ -1 +0,0 @@ -{"inherits": ["config/algos/ddpg.json", "config/envs/walker2d.json"]} diff --git a/research/steve/config/experiments/goodruns/walker2d/ddpg2.json b/research/steve/config/experiments/goodruns/walker2d/ddpg2.json deleted file mode 100644 index 81fe2ff56..000000000 --- a/research/steve/config/experiments/goodruns/walker2d/ddpg2.json +++ /dev/null @@ -1 +0,0 @@ -{"inherits": ["config/algos/ddpg.json", "config/envs/walker2d.json"]} diff --git a/research/steve/config/experiments/goodruns/walker2d/ddpg3.json b/research/steve/config/experiments/goodruns/walker2d/ddpg3.json deleted file mode 100644 index 81fe2ff56..000000000 --- a/research/steve/config/experiments/goodruns/walker2d/ddpg3.json +++ /dev/null @@ -1 +0,0 @@ -{"inherits": ["config/algos/ddpg.json", "config/envs/walker2d.json"]} diff --git a/research/steve/config/experiments/goodruns/walker2d/mve_tdk0.json b/research/steve/config/experiments/goodruns/walker2d/mve_tdk0.json deleted file mode 100644 index d8420effa..000000000 --- a/research/steve/config/experiments/goodruns/walker2d/mve_tdk0.json +++ /dev/null @@ -1 +0,0 @@ -{"inherits": ["config/algos/mve_tdk.json", "config/envs/walker2d.json"]} diff --git a/research/steve/config/experiments/goodruns/walker2d/mve_tdk1.json b/research/steve/config/experiments/goodruns/walker2d/mve_tdk1.json deleted file mode 100644 index d8420effa..000000000 --- a/research/steve/config/experiments/goodruns/walker2d/mve_tdk1.json +++ /dev/null @@ -1 +0,0 @@ -{"inherits": ["config/algos/mve_tdk.json", "config/envs/walker2d.json"]} diff --git a/research/steve/config/experiments/goodruns/walker2d/mve_tdk2.json b/research/steve/config/experiments/goodruns/walker2d/mve_tdk2.json deleted file mode 100644 index d8420effa..000000000 --- a/research/steve/config/experiments/goodruns/walker2d/mve_tdk2.json +++ /dev/null @@ -1 +0,0 @@ -{"inherits": ["config/algos/mve_tdk.json", "config/envs/walker2d.json"]} diff --git a/research/steve/config/experiments/goodruns/walker2d/mve_tdk3.json b/research/steve/config/experiments/goodruns/walker2d/mve_tdk3.json deleted file mode 100644 index d8420effa..000000000 --- a/research/steve/config/experiments/goodruns/walker2d/mve_tdk3.json +++ /dev/null @@ -1 +0,0 @@ -{"inherits": ["config/algos/mve_tdk.json", "config/envs/walker2d.json"]} diff --git a/research/steve/config/experiments/goodruns/walker2d/steve0.json b/research/steve/config/experiments/goodruns/walker2d/steve0.json deleted file mode 100644 index a98c410ca..000000000 --- a/research/steve/config/experiments/goodruns/walker2d/steve0.json +++ /dev/null @@ -1 +0,0 @@ -{"inherits": ["config/algos/steve.json", "config/envs/walker2d.json"]} diff --git a/research/steve/config/experiments/goodruns/walker2d/steve1.json b/research/steve/config/experiments/goodruns/walker2d/steve1.json deleted file mode 100644 index a98c410ca..000000000 --- a/research/steve/config/experiments/goodruns/walker2d/steve1.json +++ /dev/null @@ -1 +0,0 @@ -{"inherits": ["config/algos/steve.json", "config/envs/walker2d.json"]} diff --git a/research/steve/config/experiments/goodruns/walker2d/steve2.json b/research/steve/config/experiments/goodruns/walker2d/steve2.json deleted file mode 100644 index a98c410ca..000000000 --- a/research/steve/config/experiments/goodruns/walker2d/steve2.json +++ /dev/null @@ -1 +0,0 @@ -{"inherits": ["config/algos/steve.json", "config/envs/walker2d.json"]} diff --git a/research/steve/config/experiments/goodruns/walker2d/steve3.json b/research/steve/config/experiments/goodruns/walker2d/steve3.json deleted file mode 100644 index a98c410ca..000000000 --- a/research/steve/config/experiments/goodruns/walker2d/steve3.json +++ /dev/null @@ -1 +0,0 @@ -{"inherits": ["config/algos/steve.json", "config/envs/walker2d.json"]} diff --git a/research/steve/config/experiments/speedruns/flagrun/speedy_ddpg0.json b/research/steve/config/experiments/speedruns/flagrun/speedy_ddpg0.json deleted file mode 100644 index b7280d71e..000000000 --- a/research/steve/config/experiments/speedruns/flagrun/speedy_ddpg0.json +++ /dev/null @@ -1 +0,0 @@ -{"inherits": ["config/algos/ddpg.json", "config/envs/flagrun.json", "config/experimental_setups/speedrun.json"]} diff --git a/research/steve/config/experiments/speedruns/flagrun/speedy_ddpg1.json b/research/steve/config/experiments/speedruns/flagrun/speedy_ddpg1.json deleted file mode 100644 index b7280d71e..000000000 --- a/research/steve/config/experiments/speedruns/flagrun/speedy_ddpg1.json +++ /dev/null @@ -1 +0,0 @@ -{"inherits": ["config/algos/ddpg.json", "config/envs/flagrun.json", "config/experimental_setups/speedrun.json"]} diff --git a/research/steve/config/experiments/speedruns/flagrun/speedy_mve_tdk0.json b/research/steve/config/experiments/speedruns/flagrun/speedy_mve_tdk0.json deleted file mode 100644 index 73252566b..000000000 --- a/research/steve/config/experiments/speedruns/flagrun/speedy_mve_tdk0.json +++ /dev/null @@ -1 +0,0 @@ -{"inherits": ["config/algos/mve_tdk.json", "config/envs/flagrun.json", "config/experimental_setups/speedrun.json"]} diff --git a/research/steve/config/experiments/speedruns/flagrun/speedy_mve_tdk1.json b/research/steve/config/experiments/speedruns/flagrun/speedy_mve_tdk1.json deleted file mode 100644 index 73252566b..000000000 --- a/research/steve/config/experiments/speedruns/flagrun/speedy_mve_tdk1.json +++ /dev/null @@ -1 +0,0 @@ -{"inherits": ["config/algos/mve_tdk.json", "config/envs/flagrun.json", "config/experimental_setups/speedrun.json"]} diff --git a/research/steve/config/experiments/speedruns/flagrun/speedy_steve0.json b/research/steve/config/experiments/speedruns/flagrun/speedy_steve0.json deleted file mode 100644 index ba5708f1f..000000000 --- a/research/steve/config/experiments/speedruns/flagrun/speedy_steve0.json +++ /dev/null @@ -1 +0,0 @@ -{"inherits": ["config/algos/steve.json", "config/envs/flagrun.json", "config/experimental_setups/speedrun.json"]} diff --git a/research/steve/config/experiments/speedruns/flagrun/speedy_steve1.json b/research/steve/config/experiments/speedruns/flagrun/speedy_steve1.json deleted file mode 100644 index ba5708f1f..000000000 --- a/research/steve/config/experiments/speedruns/flagrun/speedy_steve1.json +++ /dev/null @@ -1 +0,0 @@ -{"inherits": ["config/algos/steve.json", "config/envs/flagrun.json", "config/experimental_setups/speedrun.json"]} diff --git a/research/steve/config/experiments/speedruns/humanoid/speedy_ddpg0.json b/research/steve/config/experiments/speedruns/humanoid/speedy_ddpg0.json deleted file mode 100644 index eb07f31dc..000000000 --- a/research/steve/config/experiments/speedruns/humanoid/speedy_ddpg0.json +++ /dev/null @@ -1 +0,0 @@ -{"inherits": ["config/algos/ddpg.json", "config/envs/humanoid.json", "config/experimental_setups/speedrun.json"]} diff --git a/research/steve/config/experiments/speedruns/humanoid/speedy_ddpg1.json b/research/steve/config/experiments/speedruns/humanoid/speedy_ddpg1.json deleted file mode 100644 index eb07f31dc..000000000 --- a/research/steve/config/experiments/speedruns/humanoid/speedy_ddpg1.json +++ /dev/null @@ -1 +0,0 @@ -{"inherits": ["config/algos/ddpg.json", "config/envs/humanoid.json", "config/experimental_setups/speedrun.json"]} diff --git a/research/steve/config/experiments/speedruns/humanoid/speedy_mve_tdk0.json b/research/steve/config/experiments/speedruns/humanoid/speedy_mve_tdk0.json deleted file mode 100644 index 51a3bdcb5..000000000 --- a/research/steve/config/experiments/speedruns/humanoid/speedy_mve_tdk0.json +++ /dev/null @@ -1 +0,0 @@ -{"inherits": ["config/algos/mve_tdk.json", "config/envs/humanoid.json", "config/experimental_setups/speedrun.json"]} diff --git a/research/steve/config/experiments/speedruns/humanoid/speedy_mve_tdk1.json b/research/steve/config/experiments/speedruns/humanoid/speedy_mve_tdk1.json deleted file mode 100644 index 51a3bdcb5..000000000 --- a/research/steve/config/experiments/speedruns/humanoid/speedy_mve_tdk1.json +++ /dev/null @@ -1 +0,0 @@ -{"inherits": ["config/algos/mve_tdk.json", "config/envs/humanoid.json", "config/experimental_setups/speedrun.json"]} diff --git a/research/steve/config/experiments/speedruns/humanoid/speedy_steve0.json b/research/steve/config/experiments/speedruns/humanoid/speedy_steve0.json deleted file mode 100644 index 0d2bfaa4e..000000000 --- a/research/steve/config/experiments/speedruns/humanoid/speedy_steve0.json +++ /dev/null @@ -1 +0,0 @@ -{"inherits": ["config/algos/steve.json", "config/envs/humanoid.json", "config/experimental_setups/speedrun.json"]} diff --git a/research/steve/config/experiments/speedruns/humanoid/speedy_steve1.json b/research/steve/config/experiments/speedruns/humanoid/speedy_steve1.json deleted file mode 100644 index 0d2bfaa4e..000000000 --- a/research/steve/config/experiments/speedruns/humanoid/speedy_steve1.json +++ /dev/null @@ -1 +0,0 @@ -{"inherits": ["config/algos/steve.json", "config/envs/humanoid.json", "config/experimental_setups/speedrun.json"]} diff --git a/research/steve/envwrap.py b/research/steve/envwrap.py deleted file mode 100644 index bd88c3035..000000000 --- a/research/steve/envwrap.py +++ /dev/null @@ -1,106 +0,0 @@ -from builtins import object -# Copyright 2018 The TensorFlow Authors All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -try: - import roboschool -except: - pass -import gym -import numpy as np - -from config import config - -MAX_FRAMES = config["env"]["max_frames"] - -gym.logger.level=40 - -def get_env(env_name, *args, **kwargs): - MAPPING = { - "CartPole-v0": CartPoleWrapper, - } - if env_name in MAPPING: return MAPPING[env_name](env_name, *args, **kwargs) - else: return NoTimeLimitMujocoWrapper(env_name, *args, **kwargs) - -class GymWrapper(object): - """ - Generic wrapper for OpenAI gym environments. - """ - def __init__(self, env_name): - self.internal_env = gym.make(env_name) - self.observation_space = self.internal_env.observation_space - self.action_space = self.internal_env.action_space - self.custom_init() - - def custom_init(self): - pass - - def reset(self): - self.clock = 0 - return self.preprocess_obs(self.internal_env.reset()) - - # returns normalized actions - def sample(self): - return self.action_space.sample() - - # this is used for converting continuous approximations back to the original domain - def normalize_actions(self, actions): - return actions - - # puts actions into a form where they can be predicted. by default, called after sample() - def unnormalize_actions(self, actions): - return actions - - def preprocess_obs(self, obs): - # return np.append(obs, [self.clock/float(MAX_FRAMES)]) - return obs - - def step(self, normalized_action): - out = self.internal_env.step(normalized_action) - self.clock += 1 - obs, reward, done = self.preprocess_obs(out[0]), out[1], float(out[2]) - reset = done == 1. or self.clock == MAX_FRAMES - return obs, reward, done, reset - - def render_rollout(self, states): - ## states is numpy array of size [timesteps, state] - self.internal_env.reset() - for state in states: - self.internal_env.env.state = state - self.internal_env.render() - -class CartPoleWrapper(GymWrapper): - """ - Wrap CartPole. - """ - def sample(self): - return np.array([np.random.uniform(0., 1.)]) - - def normalize_actions(self, action): - return 1 if action[0] >= 0 else 0 - - def unnormalize_actions(self, action): - return 2. * action - 1. - -class NoTimeLimitMujocoWrapper(GymWrapper): - """ - Wrap Mujoco-style environments, removing the termination condition after time. - This is needed to keep it Markovian. - """ - def __init__(self, env_name): - self.internal_env = gym.make(env_name).env - self.observation_space = self.internal_env.observation_space - self.action_space = self.internal_env.action_space - self.custom_init() diff --git a/research/steve/learner.py b/research/steve/learner.py deleted file mode 100644 index 8a4c074cd..000000000 --- a/research/steve/learner.py +++ /dev/null @@ -1,272 +0,0 @@ -from __future__ import division -from __future__ import print_function -from builtins import zip -from builtins import range -from builtins import object -# Copyright 2018 The TensorFlow Authors All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -import traceback, threading, time, warnings -import tensorflow as tf -import numpy as np - -import util -from replay import ReplayBuffer - -class Learner(object): - """ - Generic object which runs the main training loop of anything that trains using - a replay buffer. Handles updating, logging, saving/loading, batching, etc. - """ - def __init__(self, interactor_queue, lock, config, env_config, learner_config, **bonus_kwargs): - self.learner_name = self.learner_name() - self.interactor_queue = interactor_queue - self.learner_lock = lock - self.config = config - self.env_config = env_config - self.learner_config = learner_config - self.bonus_kwargs = bonus_kwargs - self.kill_threads = False - self.permit_desync = False - self.need_frames_notification = threading.Condition() - self._reset_inspections() - self.total_frames = 0 - - self.save_path = util.create_directory("%s/%s/%s/%s" % (self.config["output_root"], self.config["env"]["name"], self.config["name"], self.config["save_model_path"])) - self.log_path = util.create_directory("%s/%s/%s/%s" % (self.config["output_root"], self.config["env"]["name"], self.config["name"], self.config["log_path"])) + "/%s.log" % self.learner_name - - # replay buffer to store data - self.replay_buffer_lock = threading.RLock() - self.replay_buffer = ReplayBuffer(self.learner_config["replay_size"], - np.prod(self.env_config["obs_dims"]), - self.env_config["action_dim"]) - - # data loaders pull data from the replay buffer and put it into the tfqueue for model usage - self.data_loaders = self.make_loader_placeholders() - queue_capacity = np.ceil(1./self.learner_config["frames_per_update"]) if self.learner_config["frames_per_update"] else 100 - self.tf_queue = tf.FIFOQueue(capacity=queue_capacity, dtypes=[dl.dtype for dl in self.data_loaders]) - self.enqueue_op = self.tf_queue.enqueue(self.data_loaders) - self.current_batch = self.tf_queue.dequeue() - - # build the TF graph for the actual model to train - self.core, self.train_losses, self.train_ops, self.inspect_losses = self.make_core_model() - self.sess = tf.Session() - self.sess.run(tf.global_variables_initializer()) - - ## Mandatory functions to override - def learner_name(self): raise Exception('unimplemented: learner_name') - def make_loader_placeholders(self): raise Exception('unimplemented: make_loader_placeholders') - def make_core_model(self): raise Exception('unimplemented: make_core_model') - - ## Optional functions to override - def initialize(self): warnings.warn('unimplemented: initialize') - def resume_from_checkpoint(self, epoch): warnings.warn('unimplemented: resume_from_checkpoint') - def checkpoint(self): warnings.warn('unimplemented: checkpoint') - def backup(self): warnings.warn('unimplemented: backup') - - ## Internal functions - def _start(self): - # fetch data from the interactors to pre-fill the replay buffer - self.prefetch_thread = threading.Thread(target=self._poll_interactors, args=(True, self.learner_config["frames_before_learning"],)) - self.prefetch_thread.start() - self.prefetch_thread.join() - - # start the interactor and data loader - self.data_load_thread = threading.Thread(target=self._run_enqueue_data) - self.data_load_thread.start() - - # initialize the learner, pretraining if needed - if self.config["resume"]: self._resume_from_checkpoint() - else: self._initialize() - - # re-sync everything, and start up interactions with the environment - self.interactor_poll_thread = threading.Thread(target=self._poll_interactors) - self.interactor_poll_thread.start() - - # start the clock - self._last_checkpoint_time = time.time() - - def _learn(self, permit_desync=False, log=True, checkpoint=True, backup=True): - # this is to keep the frames/update synced properly - if self.learner_config["frames_per_update"] is not False and not permit_desync: - if not self._have_enough_frames(): - with self.need_frames_notification: - self.need_frames_notification.notify() - return - - # log - if log and (self.update_i + 1) % self.learner_config["log_every_n"] == 0: - self._log() - - # checkpoint - if checkpoint and (self.update_i + 1) % self.learner_config["epoch_every_n"] == 0: - self._checkpoint() - - # backup - if backup and (self.update_i + 1) % self.learner_config["backup_every_n"] == 0: - self._backup() - - # train - self._training_step() - - def _have_enough_frames(self): - gathered_frames = self.total_frames - self.learner_config["frames_before_learning"] - return gathered_frames > self.learner_config["frames_per_update"] * self.update_i - - def _initialize(self): - self.epoch = 0 - self.update_i = 0 - self.hours = 0 - self._last_checkpoint_time = time.time() - - self.initialize() - - if self.learner_config["pretrain_n"]: self._pretrain() - self._checkpoint() - - def _pretrain(self): - for _ in range(self.learner_config["pretrain_n"]): - self._learn(permit_desync=True, checkpoint=False, backup=False) - self.epoch = 0 - self.update_i = 0 - - def _resume_from_checkpoint(self): - epoch = util.get_largest_epoch_in_dir(self.save_path, self.core.saveid) - if not self.config['keep_all_replay_buffers']: util.wipe_all_but_largest_epoch_in_dir(self.save_path, self.core.saveid) - if epoch is False: - raise Exception("Tried to reload but no model found") - with self.learner_lock: - self.core.load(self.sess, self.save_path, epoch) - self.epoch, self.update_i, self.total_frames, self.hours = self.sess.run([self.core.epoch_n, self.core.update_n, self.core.frame_n, self.core.hours]) - with self.replay_buffer_lock: - self.replay_buffer.load(self.save_path, '%09d_%s' % (epoch, self.learner_name)) - self.resume_from_checkpoint(epoch) - - def _log(self): - if self.denom > 0: - logstring = "(%3.2f sec) h%-8.2f e%-8d s%-8d f%-8d\t" % (time.time() - self._log_time, self.hours, self.epoch, self.update_i + 1, self.total_frames) + ', '.join(["%8f" % x for x in (self.running_total / self.denom).tolist()]) - print("%s\t%s" % (self.learner_name, logstring)) - with open(self.log_path, "a") as f: f.write(logstring + "\n") - self._reset_inspections() - - def _reset_inspections(self): - self.running_total = 0. - self.denom = 0. - self._log_time = time.time() - - def _checkpoint(self): - self.checkpoint() - self.epoch += 1 - self.hours += (time.time() - self._last_checkpoint_time) / 3600. - self._last_checkpoint_time = time.time() - self.core.update_epoch(self.sess, self.epoch, self.update_i, self.total_frames, self.hours) - with self.learner_lock: self.core.save(self.sess, self.save_path) - - def _backup(self): - self.backup() - if not self.learner_config['keep_all_replay_buffers']: util.wipe_all_but_largest_epoch_in_dir(self.save_path, self.core.saveid) - with self.learner_lock: - self.core.save(self.sess, self.save_path, self.epoch) - with self.replay_buffer_lock: - self.replay_buffer.save(self.save_path, '%09d_%s' % (self.epoch, self.learner_name)) - - def _training_step(self): - train_ops = tuple([op for op, loss in zip(self.train_ops, - self.train_losses) - if loss is not None]) - outs = self.sess.run(train_ops + self.inspect_losses) - self.running_total += np.array(outs[len(train_ops):]) - self.denom += 1. - self.update_i += 1 - - def _poll_interactors(self, continuous_poll=False, frames_before_terminate=None): - # poll the interactors for new frames. - # the synced_condition semaphore prevents this from consuming too much CPU - while not self.kill_threads: - if self.learner_config["frames_per_update"] is not False and not continuous_poll: - with self.need_frames_notification: self.need_frames_notification.wait() - while not self.interactor_queue.empty(): - new_frames = self.interactor_queue.get() - self._add_frames(new_frames) - if frames_before_terminate and self.total_frames >= frames_before_terminate: return - - def _add_frames(self, frames): - with self.replay_buffer_lock: - for frame in frames: - self.replay_buffer.add_replay(*frame) - self.total_frames = self.replay_buffer.count - return self.total_frames - - def _run_enqueue_data(self): - while not self.kill_threads: - data = self.replay_buffer.random_batch(self.learner_config["batch_size"]) - self.sess.run(self.enqueue_op, feed_dict=dict(list(zip(self.data_loaders, data)))) - - def _kill_threads(self): - self.kill_threads = True - - -class CoreModel(object): - """The base class for the "core" of learners.""" - def __init__(self, name, env_config, learner_config): - self.name = self.saveid + "/" + name - self.env_config = env_config - self.learner_config = learner_config - - with tf.variable_scope(self.name): - self.epoch_n = tf.get_variable('epoch_n', [], initializer=tf.constant_initializer(0), dtype=tf.int64, trainable=False) - self.update_n = tf.get_variable('update_n', [], initializer=tf.constant_initializer(0), dtype=tf.int64, trainable=False) - self.frame_n = tf.get_variable('frame_n', [], initializer=tf.constant_initializer(0), dtype=tf.int64, trainable=False) - self.hours = tf.get_variable('hours', [], initializer=tf.constant_initializer(0.), dtype=tf.float64, trainable=False) - self.epoch_n_placeholder = tf.placeholder(tf.int64, []) - self.update_n_placeholder = tf.placeholder(tf.int64, []) - self.frame_n_placeholder = tf.placeholder(tf.int64, []) - self.hours_placeholder = tf.placeholder(tf.float64, []) - self.assign_epoch_op = [tf.assign(self.epoch_n, self.epoch_n_placeholder), tf.assign(self.update_n, self.update_n_placeholder), tf.assign(self.frame_n, self.frame_n_placeholder), tf.assign(self.hours, self.hours_placeholder)] - - self.create_params(env_config, learner_config) - self.model_params = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=self.name) - self.saver = tf.train.Saver(self.model_params) - - @property - def saveid(self): - raise Exception("specify a save ID") - - def create_params(self, env_config, learner_config): - raise Exception("unimplemented") - - def update_epoch(self, sess, epoch, updates, frames, hours): - sess.run(self.assign_epoch_op, feed_dict={self.epoch_n_placeholder: int(epoch), self.update_n_placeholder: int(updates), self.frame_n_placeholder: int(frames), self.hours_placeholder: float(hours)}) - - def save(self, sess, path, epoch=None): - if epoch is None: self.saver.save(sess, path + "/%s.params" % self.saveid) - else: self.saver.save(sess, path + "/%09d_%s.params" % (epoch, self.saveid)) - - def load(self, sess, path, epoch=None): - if epoch is None: self.saver.restore(sess, path + "/%s.params" % self.saveid) - else: self.saver.restore(sess, path + "/%09d_%s.params" % (epoch, self.saveid)) - -def run_learner(learner_subclass, queue, lock, config, env_config, learner_config, **bonus_kwargs): - learner = learner_subclass(queue, lock, config, env_config, learner_config, **bonus_kwargs) - try: - learner._start() - while True: learner._learn() - - except Exception as e: - print('Caught exception in learner process') - traceback.print_exc() - learner._kill_threads() - print() - raise e diff --git a/research/steve/master.py b/research/steve/master.py deleted file mode 100644 index 4d0847472..000000000 --- a/research/steve/master.py +++ /dev/null @@ -1,85 +0,0 @@ -from builtins import str -from builtins import range -# Copyright 2018 The TensorFlow Authors All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -import multiprocessing -import os, sys, time - -from config import config, log_config -import util - -AGENT_COUNT = config["agent_config"]["count"] -EVALUATOR_COUNT = config["evaluator_config"]["count"] -MODEL_AUGMENTED = config["model_config"] is not False -if config["resume"]: - ROOT_PATH = "output/" + config["env"]["name"] + "/" + config["name"] -else: - ROOT_PATH = util.create_and_wipe_directory("output/" + config["env"]["name"] + "/" + config["name"]) -log_config() -import learner, agent, valuerl_learner -if MODEL_AUGMENTED: import worldmodel_learner - -if __name__ == '__main__': - all_procs = set([]) - interaction_procs = set([]) - - # lock - policy_lock = multiprocessing.Lock() - model_lock = multiprocessing.Lock() if MODEL_AUGMENTED else None - - # queue - policy_replay_frame_queue = multiprocessing.Queue(1) - model_replay_frame_queue = multiprocessing.Queue(1) if MODEL_AUGMENTED else None - - # interactors - for interact_proc_i in range(AGENT_COUNT): - interact_proc = multiprocessing.Process(target=agent.main, args=(interact_proc_i, False, policy_replay_frame_queue, model_replay_frame_queue, policy_lock, config)) - all_procs.add(interact_proc) - interaction_procs.add(interact_proc) - - # evaluators - for interact_proc_i in range(EVALUATOR_COUNT): - interact_proc = multiprocessing.Process(target=agent.main, args=(interact_proc_i, True, policy_replay_frame_queue, model_replay_frame_queue, policy_lock, config)) - all_procs.add(interact_proc) - interaction_procs.add(interact_proc) - - # policy training - train_policy_proc = multiprocessing.Process(target=learner.run_learner, args=(valuerl_learner.ValueRLLearner, policy_replay_frame_queue, policy_lock, config, config["env"], config["policy_config"]), kwargs={"model_lock": model_lock}) - all_procs.add(train_policy_proc) - - # model training - if MODEL_AUGMENTED: - train_model_proc = multiprocessing.Process(target=learner.run_learner, args=(worldmodel_learner.WorldmodelLearner, model_replay_frame_queue, model_lock, config, config["env"], config["model_config"])) - all_procs.add(train_model_proc) - - # start all policies - os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' - for i, proc in enumerate(interaction_procs): - os.environ['CUDA_VISIBLE_DEVICES'] = '' - proc.start() - - os.environ['CUDA_VISIBLE_DEVICES'] = str(int(sys.argv[2])) - train_policy_proc.start() - - if MODEL_AUGMENTED: - os.environ['CUDA_VISIBLE_DEVICES'] = str(1+int(sys.argv[2])) - train_model_proc.start() - - while True: - try: - pass - except: - for proc in all_procs: proc.join() diff --git a/research/steve/nn.py b/research/steve/nn.py deleted file mode 100644 index c87c6eb83..000000000 --- a/research/steve/nn.py +++ /dev/null @@ -1,189 +0,0 @@ -from builtins import range -from builtins import object -# Copyright 2018 The TensorFlow Authors All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -import tensorflow as tf -import numpy as np -from itertools import product - -class FeedForwardNet(object): - """Custom feed-forward network layer.""" - def __init__(self, name, in_size, out_shape, layers=1, hidden_dim=32, final_nonlinearity=None, get_uncertainty=False): - self.name = name - self.in_size = in_size - self.out_shape = out_shape - self.out_size = np.prod(out_shape) - self.layers = layers - self.hidden_dim = hidden_dim - self.final_nonlinearity = (lambda x:x) if final_nonlinearity is None else final_nonlinearity - self.get_uncertainty = get_uncertainty - - self.weights = [None] * layers - self.biases = [None] * layers - - self.params_list = [] - - with tf.variable_scope(name): - for layer_i in range(self.layers): - in_size = self.hidden_dim - out_size = self.hidden_dim - if layer_i == 0: in_size = self.in_size - if layer_i == self.layers - 1: out_size = self.out_size - self.weights[layer_i] = tf.get_variable("weights%d" % layer_i, [in_size, out_size], initializer=tf.contrib.layers.xavier_initializer()) - self.biases[layer_i] = tf.get_variable("bias%d" % layer_i, [1, out_size], initializer=tf.constant_initializer(0.0)) - self.params_list += [self.weights[layer_i], self.biases[layer_i]] - - def __call__(self, x, stop_params_gradient=False, is_eval=True, ensemble_idxs=None, pre_expanded=None, reduce_mode="none"): - original_shape = tf.shape(x) - h = tf.reshape(x, [-1, self.in_size]) - for layer_i in range(self.layers): - nonlinearity = tf.nn.relu if layer_i + 1 < self.layers else self.final_nonlinearity - if stop_params_gradient: h = nonlinearity(tf.matmul(h, tf.stop_gradient(self.weights[layer_i])) + tf.stop_gradient(self.biases[layer_i])) - else: h = nonlinearity(tf.matmul(h, self.weights[layer_i]) + self.biases[layer_i]) - if len(self.out_shape) > 0: h = tf.reshape(h, tf.concat([original_shape[:-1], tf.constant(self.out_shape)], -1)) - else: h = tf.reshape(h, original_shape[:-1]) - if pre_expanded is None: pre_expanded = ensemble_idxs is not None - if reduce_mode == "none" and not pre_expanded and self.get_uncertainty: - if len(self.out_shape) > 0: h = tf.expand_dims(h, -2) - else: h = tf.expand_dims(h, -1) - return h - - def l2_loss(self): - return tf.add_n([tf.reduce_sum(.5 * tf.square(mu)) for mu in self.params_list]) - -class BayesianDropoutFeedForwardNet(FeedForwardNet): - """Custom feed-forward network layer, with dropout as a Bayesian approximation.""" - def __init__(self, name, in_size, out_shape, layers=1, hidden_dim=32, final_nonlinearity=None, get_uncertainty=False, keep_prob=.5, eval_sample_count=2, consistent_random_seed=False): - super(BayesianDropoutFeedForwardNet, self).__init__(name, in_size, out_shape, layers=layers, hidden_dim=hidden_dim, - final_nonlinearity=final_nonlinearity, get_uncertainty=get_uncertainty) - self.keep_prob = keep_prob - self.eval_sample_count = eval_sample_count - if eval_sample_count < 2: raise Exception("eval_sample_count must be at least 2 to estimate uncertainty") - self.dropout_seed = tf.random_uniform([layers], maxval=1e18, dtype=tf.int64) if consistent_random_seed else [None] * layers - - def __call__(self, x, stop_params_gradient=False, is_eval=True, pre_expanded=False, ensemble_idxs=None, reduce_mode="none"): - if is_eval: - x = tf.tile(tf.expand_dims(x,0), tf.concat([tf.constant([self.eval_sample_count]), tf.ones_like(tf.shape(x))], 0)) - original_shape = tf.shape(x) - h = tf.reshape(x, [-1, self.in_size]) - for layer_i in range(self.layers): - nonlinearity = tf.nn.relu if layer_i + 1 < self.layers else self.final_nonlinearity - if layer_i > 0: h = tf.nn.dropout(h, keep_prob=self.keep_prob, seed=self.dropout_seed[layer_i]) - if stop_params_gradient: h = nonlinearity(tf.matmul(h, tf.stop_gradient(self.weights[layer_i])) + tf.stop_gradient(self.biases[layer_i])) - else: h = nonlinearity(tf.matmul(h, self.weights[layer_i]) + self.biases[layer_i]) - if len(self.out_shape) > 0: h = tf.reshape(h, tf.concat([original_shape[:-1], tf.constant(self.out_shape)], -1)) - else: h = tf.reshape(h, original_shape[:-1]) - if is_eval: - h, uncertainty = tf.nn.moments(h, 0) - if self.get_uncertainty: return h, uncertainty - else: return h - else: - return h - - -class EnsembleFeedForwardNet(FeedForwardNet): - """Custom feed-forward network layer with an ensemble.""" - def __init__(self, name, in_size, out_shape, layers=1, hidden_dim=32, final_nonlinearity=None, get_uncertainty=False, ensemble_size=2, train_sample_count=2, eval_sample_count=2): - if train_sample_count > ensemble_size: raise Exception("train_sample_count cannot be larger than ensemble size") - if eval_sample_count > ensemble_size: raise Exception("eval_sample_count cannot be larger than ensemble size") - self.name = name - self.in_size = in_size - self.out_shape = out_shape - self.out_size = np.prod(out_shape) - self.layers = layers - self.hidden_dim = hidden_dim - self.final_nonlinearity = (lambda x:x) if final_nonlinearity is None else final_nonlinearity - self.get_uncertainty = get_uncertainty - self.ensemble_size = ensemble_size - self.train_sample_count = train_sample_count - self.eval_sample_count = eval_sample_count - - self.weights = [None] * layers - self.biases = [None] * layers - - self.params_list = [] - - with tf.variable_scope(name): - for layer_i in range(self.layers): - in_size = self.hidden_dim - out_size = self.hidden_dim - if layer_i == 0: in_size = self.in_size - if layer_i == self.layers - 1: out_size = self.out_size - self.weights[layer_i] = tf.get_variable("weights%d" % layer_i, [ensemble_size, in_size, out_size], initializer=tf.contrib.layers.xavier_initializer()) - self.biases[layer_i] = tf.get_variable("bias%d" % layer_i, [ensemble_size, out_size], initializer=tf.constant_initializer(0.0)) - self.params_list += [self.weights[layer_i], self.biases[layer_i]] - - def __call__(self, x, stop_params_gradient=False, is_eval=True, ensemble_idxs=None, pre_expanded=None, reduce_mode="none"): - if pre_expanded is None: pre_expanded = ensemble_idxs is not None - if ensemble_idxs is None: - ensemble_idxs = tf.random_shuffle(tf.range(self.ensemble_size)) - ensemble_sample_n = self.eval_sample_count if is_eval else self.train_sample_count - ensemble_idxs = ensemble_idxs[:ensemble_sample_n] - else: - ensemble_sample_n = tf.shape(ensemble_idxs)[0] - - weights = [tf.gather(w, ensemble_idxs, axis=0) for w in self.weights] - biases = [tf.expand_dims(tf.gather(b, ensemble_idxs, axis=0),0) for b in self.biases] - - original_shape = tf.shape(x) - if pre_expanded: h = tf.reshape(x, [-1, ensemble_sample_n, self.in_size]) - else: h = tf.tile(tf.reshape(x, [-1, 1, self.in_size]), [1, ensemble_sample_n, 1]) - for layer_i in range(self.layers): - nonlinearity = tf.nn.relu if layer_i + 1 < self.layers else self.final_nonlinearity - if stop_params_gradient: h = nonlinearity(tf.einsum('bri,rij->brj', h, tf.stop_gradient(weights[layer_i])) + tf.stop_gradient(biases[layer_i])) - else: h = nonlinearity(tf.einsum('bri,rij->brj', h, weights[layer_i]) + biases[layer_i]) - - if pre_expanded: - if len(self.out_shape) > 0: h = tf.reshape(h, tf.concat([original_shape[:-1], tf.constant(self.out_shape)], -1)) - else: h = tf.reshape(h, original_shape[:-1]) - else: - if len(self.out_shape) > 0: h = tf.reshape(h, tf.concat([original_shape[:-1], tf.constant([ensemble_sample_n]), tf.constant(self.out_shape)], -1)) - else: h = tf.reshape(h, tf.concat([original_shape[:-1], tf.constant([ensemble_sample_n])], -1)) - - if reduce_mode == "none": - pass - elif reduce_mode == "random": - if len(self.out_shape) > 0: h = tf.reduce_sum(h * tf.reshape(tf.one_hot(tf.random_uniform([tf.shape(h)[0]], 0, ensemble_sample_n, dtype=tf.int64), ensemble_sample_n), tf.concat([tf.shape(h)[:1], tf.ones_like(tf.shape(h)[1:-2]), tf.constant([ensemble_sample_n]), tf.constant([1])], 0)), -2) - else: h = tf.reduce_sum(h * tf.reshape(tf.one_hot(tf.random_uniform([tf.shape(h)[0]], 0, ensemble_sample_n, dtype=tf.int64), ensemble_sample_n), tf.concat([tf.shape(h)[:1], tf.ones_like(tf.shape(h)[1:-1]), tf.constant([ensemble_sample_n])], 0)), -1) - elif reduce_mode == "mean": - if len(self.out_shape) > 0: h = tf.reduce_mean(h, -2) - else: h = tf.reduce_mean(h, -1) - else: raise Exception("use a valid reduce mode: none, random, or mean") - - return h - - -class ReparamNormal(object): - """Wrapper to make a feedforward network that outputs both mu and logsigma, - for use in the reparameterization trick.""" - def __init__(self, base_net, name, in_size, out_shape, layers=2, hidden_dim=32, final_nonlinearity=None, ls_start_bias=0.0, final_net=FeedForwardNet, logsigma_min=-5., logsigma_max=2., **kwargs): - assert layers > 1 - self.main_encoder = base_net(name+"_base", in_size, [hidden_dim], layers, hidden_dim, final_nonlinearity=tf.nn.relu, **kwargs) - self.mu = final_net(name+"_mu", hidden_dim, out_shape, layers=1, final_nonlinearity=final_nonlinearity, **kwargs) - self.logsigma = final_net(name+"_logsigma", hidden_dim, out_shape, layers=1, final_nonlinearity=None, **kwargs) - self.ls_start_bias = ls_start_bias - self.params_list = self.main_encoder.params_list + self.mu.params_list + self.logsigma.params_list - self.logsigma_min = logsigma_min - self.logsigma_max = logsigma_max - - def __call__(self, x): - encoded = self.main_encoder(x) - mu = self.mu(encoded) - logsigma = tf.clip_by_value(self.logsigma(encoded) + self.ls_start_bias, self.logsigma_min, self.logsigma_max) - return mu, logsigma - - def l2_loss(self): - return self.main_encoder.l2_loss() + self.mu.l2_loss() + self.logsigma.l2_loss() diff --git a/research/steve/replay.py b/research/steve/replay.py deleted file mode 100644 index 989cc0b2a..000000000 --- a/research/steve/replay.py +++ /dev/null @@ -1,109 +0,0 @@ -from __future__ import print_function -from future import standard_library -standard_library.install_aliases() -from builtins import zip -from builtins import str -from builtins import object -# Copyright 2018 The TensorFlow Authors All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -import numpy as np -import pickle -import multiprocessing - -class ReplayBuffer(object): - """ - Stores frames sampled from the environment, with the ability to sample a batch - for training. - """ - - def __init__(self, max_size, obs_dim, action_dim, roundrobin=True): - self.max_size = max_size - self.obs_dim = obs_dim - self.action_dim = action_dim - self.roundrobin = roundrobin - - self.obs_buffer = np.zeros([max_size, obs_dim]) - self.next_obs_buffer = np.zeros([max_size, obs_dim]) - self.action_buffer = np.zeros([max_size, action_dim]) - self.reward_buffer = np.zeros([max_size]) - self.done_buffer = np.zeros([max_size]) - - self.count = 0 - - def random_batch(self, batch_size): - indices = np.random.randint(0, min(self.count, self.max_size), batch_size) - - return ( - self.obs_buffer[indices], - self.next_obs_buffer[indices], - self.action_buffer[indices], - self.reward_buffer[indices], - self.done_buffer[indices], - self.count - ) - - def add_replay(self, obs, next_obs, action, reward, done): - if self.count >= self.max_size: - if self.roundrobin: index = self.count % self.max_size - else: index = np.random.randint(0, self.max_size) - else: - index = self.count - - self.obs_buffer[index] = obs - self.next_obs_buffer[index] = next_obs - self.action_buffer[index] = action - self.reward_buffer[index] = reward - self.done_buffer[index] = done - - self.count += 1 - - def save(self, path, name): - def _save(datas, fnames): - print("saving replay buffer...") - for data, fname in zip(datas, fnames): - with open("%s.npz"%fname, "wb") as f: - pickle.dump(data, f) - with open("%s/%s.count" % (path,name), "wb") as f: - f.write(str(self.count)) - print("...done saving.") - - datas = [ - self.obs_buffer, - self.next_obs_buffer, - self.action_buffer, - self.reward_buffer, - self.done_buffer - ] - - fnames = [ - "%s/%s.obs_buffer" % (path, name), - "%s/%s.next_obs_buffer" % (path, name), - "%s/%s.action_buffer" % (path, name), - "%s/%s.reward_buffer" % (path, name), - "%s/%s.done_buffer" % (path, name) - ] - - proc = multiprocessing.Process(target=_save, args=(datas, fnames)) - proc.start() - - def load(self, path, name): - print("Loading %s replay buffer (may take a while...)" % name) - with open("%s/%s.obs_buffer.npz" % (path,name)) as f: self.obs_buffer = pickle.load(f) - with open("%s/%s.next_obs_buffer.npz" % (path,name)) as f: self.next_obs_buffer = pickle.load(f) - with open("%s/%s.action_buffer.npz" % (path,name)) as f: self.action_buffer = pickle.load(f) - with open("%s/%s.reward_buffer.npz" % (path,name)) as f: self.reward_buffer = pickle.load(f) - with open("%s/%s.done_buffer.npz" % (path,name)) as f: self.done_buffer = pickle.load(f) - with open("%s/%s.count" % (path,name), "r") as f: self.count = int(f.read()) diff --git a/research/steve/toy_demo.py b/research/steve/toy_demo.py deleted file mode 100644 index 859a86f72..000000000 --- a/research/steve/toy_demo.py +++ /dev/null @@ -1,430 +0,0 @@ -from __future__ import division -from __future__ import print_function -from builtins import range -from past.utils import old_div -# Copyright 2018 The TensorFlow Authors All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -import numpy as np -import scipy -import matplotlib.pyplot as plt -import seaborn as sns - -### Hyperparameters - -NONTERMINAL_STATE_COUNT = 100 -NOISE_AMOUNT = 0.1 -TRAIN_STEPS = 10000 -Q_ENSEMBLE_SIZE = 8 -MODEL_ENSEMBLE_SIZE = 8 -HORIZON = 5 -TRIAL_N = 10 - -### Helper functions - -initial_state = 0 -terminal_state = NONTERMINAL_STATE_COUNT + 1 -nonterminal_state_count = NONTERMINAL_STATE_COUNT -state_count = NONTERMINAL_STATE_COUNT + 1 -final_reward = NONTERMINAL_STATE_COUNT -colors = sns.color_palette('husl', 4) -plt.rcParams["figure.figsize"] = (6,5) - -def step(state): - if state == terminal_state: next_state = terminal_state - else: next_state = state + 1 - - if state == terminal_state: reward = 0 - elif state+1 == terminal_state: reward = final_reward - else: reward = -1 - - return next_state, reward - -def noisy_step(state): - if state == terminal_state: next_state = terminal_state - elif np.random.random([]) < NOISE_AMOUNT: next_state = np.random.randint(0, state_count) - else: next_state = state + 1 - - if state == terminal_state: reward = 0 - elif state+1 == terminal_state: reward = final_reward - else: reward = -1 - - return next_state, reward - -def get_error(Q): - losses = np.square(np.arange(state_count) - Q[:-1]) - return np.mean(losses) - -def downsample(array, factor): - pad_size = np.ceil(old_div(float(array.size),factor))*factor - array.size - array_padded = np.append(array, np.zeros([pad_size.astype(np.int64)])*np.NaN) - return scipy.nanmean(array_padded.reshape(-1,factor), axis=1) - - -###################### -### Main experiments -###################### - -# Basic Q -if True: - print("Running basic Q-learning.") - trial_results = [] - for run_i in range(TRIAL_N): - print("Trial %d" % run_i) - Q = np.random.randint(0,state_count,[state_count+1]).astype(np.float64) - Q[state_count] = 0 - losses = [] - for step_i in range(TRAIN_STEPS): - state = np.random.randint(0,state_count) - next_state, reward = step(state) - Q[state] = reward + Q[next_state] - losses.append(get_error(Q)) - trial_results.append(losses) - print("...complete.\n") - - result = np.stack(trial_results, axis=1) - means = np.mean(result, axis=1) - stdevs = np.std(result, axis=1) - plt.plot(means, label="Basic Q-learning", color=colors[0]) - plt.fill_between(np.arange(TRAIN_STEPS), means - stdevs, means + stdevs, alpha=.2, color=colors[0]) - with open('Toy-v1/baseline.csv', 'w') as f: - data = [] - for frame_i in range(result.shape[0]): - for loss in result[frame_i]: - data.append("%f,%f,%f,%f" % (frame_i, frame_i, frame_i, loss)) - f.write("\n".join(data)) - -# Ensemble Q -if True: - print("Running ensemble Q-learning.") - trial_results = [] - for run_i in range(TRIAL_N): - print("Trial %d" % run_i) - Q = np.random.randint(0,state_count,[Q_ENSEMBLE_SIZE, state_count+1]).astype(np.float64) - Q[:, state_count] = 0 - losses = [] - for step_i in range(TRAIN_STEPS): - for q_ensemble_i in range(Q_ENSEMBLE_SIZE): - state = np.random.randint(0,state_count) - next_state, reward = step(state) - Q[q_ensemble_i, state] = reward + np.mean(Q[:, next_state]) - losses.append(get_error(np.mean(Q, axis=0))) - trial_results.append(losses) - print("...complete.\n") - - result = np.stack(trial_results, axis=1) - means = np.mean(result, axis=1) - stdevs = np.std(result, axis=1) - plt.plot(means, label="Ensemble Q-learning", color=colors[1]) - plt.fill_between(np.arange(TRAIN_STEPS), means - stdevs, means + stdevs, alpha=.2, color=colors[1]) - -# Ensemble MVE-Oracle -if True: - print("Running ensemble oracle MVE.") - trial_results = [] - for run_i in range(TRIAL_N): - print("Trial %d" % run_i) - Q = np.random.randint(0,state_count,[Q_ENSEMBLE_SIZE, state_count+1]).astype(np.float64) - Q[:, state_count] = 0 - losses = [] - for step_i in range(TRAIN_STEPS): - for q_ensemble_i in range(Q_ENSEMBLE_SIZE): - state = np.random.randint(0,state_count) - next_state, reward = step(state) - - # MVE rollout - target = reward - for _ in range(HORIZON): - next_state, reward = step(next_state) - target += reward - target += np.mean(Q[:,next_state]) - - Q[q_ensemble_i, state] = target - losses.append(get_error(np.mean(Q, axis=0))) - trial_results.append(losses) - print("...complete.\n") - - result = np.stack(trial_results, axis=1) - means = np.mean(result, axis=1) - stdevs = np.std(result, axis=1) - plt.plot(means, label="MVE-oracle", color=colors[2]) - plt.fill_between(np.arange(TRAIN_STEPS), means - stdevs, means + stdevs, alpha=.2, color=colors[2]) - with open('Toy-v1/mve_oracle.csv', 'w') as f: - data = [] - for frame_i in range(result.shape[0]): - for loss in result[frame_i]: - data.append("%f,%f,%f,%f" % (frame_i, frame_i, frame_i, loss)) - f.write("\n".join(data)) - -# Ensemble MVE-Noisy -if True: - print("Running ensemble noisy MVE.") - trial_results = [] - for run_i in range(TRIAL_N): - print("Trial %d" % run_i) - Q = np.random.randint(0,state_count,[Q_ENSEMBLE_SIZE, state_count+1]).astype(np.float64) - Q[:, state_count] = 0 - losses = [] - for step_i in range(TRAIN_STEPS): - for q_ensemble_i in range(Q_ENSEMBLE_SIZE): - state = np.random.randint(0,state_count) - next_state, reward = step(state) - - # MVE rollout - targets = [] - first_next_state, first_reward = next_state, reward - for model_ensemble_i in range(MODEL_ENSEMBLE_SIZE): - next_state, reward = first_next_state, first_reward - target = reward - for _ in range(HORIZON): - next_state, reward = noisy_step(next_state) - target += reward - target += np.mean(Q[:,next_state]) - targets.append(target) - - Q[q_ensemble_i, state] = np.mean(targets) - losses.append(get_error(np.mean(Q, axis=0))) - trial_results.append(losses) - print("...complete.\n") - - result = np.stack(trial_results, axis=1) - means = np.mean(result, axis=1) - stdevs = np.std(result, axis=1) - plt.plot(means, label="MVE-noisy", color=colors[2], linestyle='dotted') - plt.fill_between(np.arange(TRAIN_STEPS), means - stdevs, means + stdevs, alpha=.2, color=colors[2]) - with open('Toy-v1/mve_noisy.csv', 'w') as f: - data = [] - for frame_i in range(result.shape[0]): - for loss in result[frame_i]: - data.append("%f,%f,%f,%f" % (frame_i, frame_i, frame_i, loss)) - f.write("\n".join(data)) - -# STEVE-Oracle -if True: - print("Running ensemble oracle STEVE.") - trial_results = [] - - oracle_q_estimate_errors = [] - oracle_mve_estimate_errors = [] - oracle_steve_estimate_errors = [] - oracle_opt_estimate_errors = [] - - - for run_i in range(TRIAL_N): - print("Trial %d" % run_i) - Q = np.random.randint(0,state_count,[Q_ENSEMBLE_SIZE, state_count+1]).astype(np.float64) - Q[:, state_count] = 0 - losses = [] - - q_estimate_errors = [] - mve_estimate_errors = [] - steve_estimate_errors = [] - opt_estimate_errors = [] - steve_beat_freq= [] - - for step_i in range(TRAIN_STEPS): - _q_estimate_errors = [] - _mve_estimate_errors = [] - _steve_estimate_errors = [] - _opt_estimate_errors = [] - _steve_beat_freq = [] - - for q_ensemble_i in range(Q_ENSEMBLE_SIZE): - state = np.random.randint(0,state_count) - next_state, reward = step(state) - - # STEVE rollout - Q_est_mat = np.zeros([HORIZON + 1, Q_ENSEMBLE_SIZE]) - reward_est_mat = np.zeros([HORIZON + 1, 1]) - first_next_state, first_reward = next_state, reward - next_state, reward = first_next_state, first_reward - Q_est_mat[0, :] = Q[:, next_state] - reward_est_mat[0, 0] = reward - for timestep_i in range(1,HORIZON+1): - next_state, reward = step(next_state) - Q_est_mat[timestep_i, :] = Q[:, next_state] - reward_est_mat[timestep_i, 0] = reward - all_targets = Q_est_mat + np.cumsum(reward_est_mat, axis=0) - - # STEVE weight calculation - estimates = np.mean(all_targets, axis=1) - confidences = old_div(1., (np.var(all_targets, axis=1) + 1e-8)) - coefficients = old_div(confidences, np.sum(confidences)) - target = np.sum(estimates * coefficients) - - Q[q_ensemble_i, state] = target - - true_target = state + 1. if state != terminal_state else 0. - _q_estimate_errors.append(np.square(estimates[0] - true_target)) - _mve_estimate_errors.append(np.square(estimates[-1] - true_target)) - _steve_estimate_errors.append(np.square(np.sum(estimates * coefficients) - true_target)) - _opt_estimate_errors.append(np.min(np.square(estimates - true_target))) - - losses.append(get_error(np.mean(Q, axis=0))) - q_estimate_errors.append(np.mean(_q_estimate_errors)) - mve_estimate_errors.append(np.mean(_mve_estimate_errors)) - steve_estimate_errors.append(np.mean(_steve_estimate_errors)) - opt_estimate_errors.append(np.mean(_opt_estimate_errors)) - trial_results.append(losses) - oracle_q_estimate_errors.append(q_estimate_errors) - oracle_mve_estimate_errors.append(mve_estimate_errors) - oracle_steve_estimate_errors.append(steve_estimate_errors) - oracle_opt_estimate_errors.append(opt_estimate_errors) - print("...complete.\n") - - result = np.stack(trial_results, axis=1) - means = np.mean(result, axis=1) - stdevs = np.std(result, axis=1) - plt.plot(means, label="STEVE-oracle", color=colors[3]) - plt.fill_between(np.arange(TRAIN_STEPS), means - stdevs, means + stdevs, alpha=.2, color=colors[3]) - with open('Toy-v1/steve_oracle.csv', 'w') as f: - data = [] - for frame_i in range(result.shape[0]): - for loss in result[frame_i]: - data.append("%f,%f,%f,%f" % (frame_i, frame_i, frame_i, loss)) - f.write("\n".join(data)) - -# STEVE-Noisy -if True: - print("Running ensemble noisy STEVE.") - trial_results = [] - - noisy_q_estimate_errors = [] - noisy_mve_estimate_errors = [] - noisy_steve_estimate_errors = [] - noisy_opt_estimate_errors = [] - noisy_steve_beat_freq = [] - - for run_i in range(TRIAL_N): - print("Trial %d" % run_i) - Q = np.random.randint(0,state_count,[Q_ENSEMBLE_SIZE, state_count+1]).astype(np.float64) - Q[:, state_count] = 0 - losses = [] - - q_estimate_errors = [] - mve_estimate_errors = [] - steve_estimate_errors = [] - opt_estimate_errors = [] - steve_beat_freq= [] - - for step_i in range(TRAIN_STEPS): - _q_estimate_errors = [] - _mve_estimate_errors = [] - _steve_estimate_errors = [] - _opt_estimate_errors = [] - _steve_beat_freq = [] - for q_ensemble_i in range(Q_ENSEMBLE_SIZE): - state = np.random.randint(0,state_count) - next_state, reward = step(state) - - # STEVE rollout - Q_est_mat = np.zeros([HORIZON + 1, MODEL_ENSEMBLE_SIZE, Q_ENSEMBLE_SIZE]) - reward_est_mat = np.zeros([HORIZON + 1, MODEL_ENSEMBLE_SIZE, 1]) - first_next_state, first_reward = next_state, reward - for model_ensemble_i in range(MODEL_ENSEMBLE_SIZE): - next_state, reward = first_next_state, first_reward - Q_est_mat[0, model_ensemble_i, :] = Q[:, next_state] - reward_est_mat[0, model_ensemble_i, 0] = reward - for timestep_i in range(1,HORIZON+1): - next_state, reward = noisy_step(next_state) - Q_est_mat[timestep_i, model_ensemble_i, :] = Q[:, next_state] - reward_est_mat[timestep_i, model_ensemble_i, 0] = reward - all_targets = Q_est_mat + np.cumsum(reward_est_mat, axis=0) - - # STEVE weight calculation - all_targets = np.reshape(all_targets, [HORIZON+1, MODEL_ENSEMBLE_SIZE * Q_ENSEMBLE_SIZE]) - estimates = np.mean(all_targets, axis=1) - confidences = old_div(1., (np.var(all_targets, axis=1) + 1e-8)) - coefficients = old_div(confidences, np.sum(confidences)) - target = np.sum(estimates * coefficients) - # target = estimates[0] - - Q[q_ensemble_i, state] = target - - true_target = state + 1. if state != terminal_state else 0. - _q_estimate_errors.append(np.square(estimates[0] - true_target)) - _mve_estimate_errors.append(np.square(estimates[-1] - true_target)) - _steve_estimate_errors.append(np.square(np.sum(estimates * coefficients) - true_target)) - _opt_estimate_errors.append(np.min(np.square(estimates - true_target))) - _steve_beat_freq.append(float(np.square(estimates[0] - true_target) > np.square(target - true_target))) - - losses.append(get_error(np.mean(Q, axis=0))) - q_estimate_errors.append(np.mean(_q_estimate_errors)) - mve_estimate_errors.append(np.mean(_mve_estimate_errors)) - steve_estimate_errors.append(np.mean(_steve_estimate_errors)) - opt_estimate_errors.append(np.mean(_opt_estimate_errors)) - steve_beat_freq.append(np.mean(_steve_beat_freq)) - trial_results.append(losses) - noisy_q_estimate_errors.append(q_estimate_errors) - noisy_mve_estimate_errors.append(mve_estimate_errors) - noisy_steve_estimate_errors.append(steve_estimate_errors) - noisy_opt_estimate_errors.append(opt_estimate_errors) - noisy_steve_beat_freq.append(steve_beat_freq) - - print("...complete.\n") - - result = np.stack(trial_results, axis=1) - means = np.mean(result, axis=1) - stdevs = np.std(result, axis=1) - plt.plot(means, label="STEVE-noisy", color=colors[3], linestyle='dotted') - plt.fill_between(np.arange(TRAIN_STEPS), means - stdevs, means + stdevs, alpha=.2, color=colors[3]) - with open('Toy-v1/steve_noisy.csv', 'w') as f: - data = [] - for frame_i in range(result.shape[0]): - for loss in result[frame_i]: - data.append("%f,%f,%f,%f" % (frame_i, frame_i, frame_i, loss)) - f.write("\n".join(data)) - -# ### Display results -# plt.title("Comparison of convergence rates") -# plt.legend() -# plt.savefig("comparison.pdf") -# plt.show() -# -# ### Display secondary results - error comparison -# DOWNSAMPLE = 50 -# colors = sns.color_palette('husl', 8) -# for i, (error_curve, label) in enumerate([ -# (oracle_q_estimate_errors, "Oracle Q error"), -# (oracle_mve_estimate_errors, "Oracle MVE error"), -# (oracle_steve_estimate_errors, "Oracle STEVE error"), -# # (oracle_opt_estimate_errors, "Oracle minimum single-estimate error"), -# ]): -# result = np.stack(error_curve, axis=1) -# means = downsample(np.mean(result, axis=1), DOWNSAMPLE) -# stdevs = downsample(np.std(result, axis=1), DOWNSAMPLE) -# plt.plot(means, label=label, color=colors[i]) -# plt.fill_between(np.arange(means.shape[0]), means - stdevs, means + stdevs, alpha=.2, color=colors[i]) -# -# plt.title("Comparison of errors for oracle dynamics") -# plt.legend() -# plt.show() -# -# for i, (error_curve, label) in enumerate([ -# (noisy_q_estimate_errors, "Noisy Q error"), -# (noisy_mve_estimate_errors, "Noisy MVE error"), -# (noisy_steve_estimate_errors, "Noisy STEVE error"), -# # (noisy_opt_estimate_errors, "Noisy minimum single-estimate error"), -# # (trial_steve_beat_freq, "STEVE beat freq"), -# ]): -# result = np.stack(error_curve, axis=1) -# means = downsample(np.mean(result, axis=1), DOWNSAMPLE) -# stdevs = downsample(np.std(result, axis=1), DOWNSAMPLE) -# plt.plot(means, label=label, color=colors[i]) -# plt.fill_between(np.arange(means.shape[0]), means - stdevs, means + stdevs, alpha=.2, color=colors[i]) -# -# plt.title("Comparison of errors for noisy dynamics") -# plt.legend() -# plt.show() \ No newline at end of file diff --git a/research/steve/util.py b/research/steve/util.py deleted file mode 100644 index bf0abec0a..000000000 --- a/research/steve/util.py +++ /dev/null @@ -1,164 +0,0 @@ -from __future__ import division -from future import standard_library -standard_library.install_aliases() -from builtins import str -from builtins import range -from past.utils import old_div -# Copyright 2018 The TensorFlow Authors All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -import numpy as np -import tensorflow as tf -import os, random, gc, math, re -import multiprocessing, types, shutil, pickle, json -from collections import defaultdict, MutableMapping - -def tanh_sample_info(mu, logsigma, stop_action_gradient=False, n_samples=1): - if n_samples > 1: - mu = tf.expand_dims(mu, 2) - logsigma = tf.expand_dims(logsigma, 2) - sample_shape = tf.concat([tf.shape(mu), n_samples], 0) - else: - sample_shape = tf.shape(mu) - - flat_act = mu + tf.random_normal(sample_shape) * tf.exp(logsigma) - if stop_action_gradient: flat_act = tf.stop_gradient(flat_act) - normalized_dist_t = (flat_act - mu) * tf.exp(-logsigma) # ... x D - quadratic = - 0.5 * tf.reduce_sum(normalized_dist_t ** 2, axis=-1) # ... x (None) - log_z = tf.reduce_sum(logsigma, axis=-1) # ... x (None) - D_t = tf.cast(tf.shape(mu)[-1], tf.float32) - log_z += 0.5 * D_t * np.log(2 * np.pi) - flat_ll = quadratic - log_z - - scaled_act = tf.tanh(flat_act) - corr = tf.reduce_sum(tf.log(1. - tf.square(scaled_act) + 1e-6), axis=-1) - scaled_ll = flat_ll - corr - return flat_act, flat_ll, scaled_act, scaled_ll - -def tf_cheating_contcartpole(state, action): - gravity = 9.8 - masscart = 1.0 - masspole = 0.1 - total_mass = (masspole + masscart) - length = 0.5 # actually half the pole's length - polemass_length = (masspole * length) - force_mag = 10.0 - tau = 0.02 # seconds between state updates - - # Angle at which to fail the episode - theta_threshold_radians = 12 * 2 * math.pi / 360 - x_threshold = 2.4 - - x, x_dot, theta, theta_dot = tf.split(state, 4, axis=-1) - done = tf.logical_or(x < -x_threshold, - tf.logical_or(x > x_threshold, - tf.logical_or(theta < -theta_threshold_radians, - theta > theta_threshold_radians))) - - force = force_mag * action - costheta = tf.cos(theta) - sintheta = tf.sin(theta) - temp = old_div((force + polemass_length * theta_dot * theta_dot * sintheta), total_mass) - thetaacc = old_div((gravity * sintheta - costheta* temp), (length * (old_div(4.0,3.0) - masspole * costheta * costheta / total_mass))) - xacc = temp - polemass_length * thetaacc * costheta / total_mass - x = x + tau * x_dot - x_dot = x_dot + tau * xacc - theta = theta + tau * theta_dot - theta_dot = theta_dot + tau * thetaacc - state = tf.concat([x,x_dot,theta,theta_dot], -1) - done = tf.squeeze(tf.cast(done, tf.float32), -1) - reward = 1.0 - done - done *= 0. - return state, reward, done - -def create_directory(dir): - dir_chunks = dir.split("/") - for i in range(len(dir_chunks)): - partial_dir = "/".join(dir_chunks[:i+1]) - try: - os.makedirs(partial_dir) - except OSError: - pass - return dir - -def create_and_wipe_directory(dir): - shutil.rmtree(create_directory(dir)) - create_directory(dir) - -def wipe_file(fname): - with open(fname, "w") as f: - f.write("") - return fname - -def get_largest_epoch_in_dir(dir, saveid): - reg_matches = [re.findall('\d+_%s'%saveid,filename) for filename in os.listdir(dir)] - epoch_labels = [int(regmatch[0].split("_")[0]) for regmatch in reg_matches if regmatch] - if len(epoch_labels) == 0: return False - return max(epoch_labels) - -def wipe_all_but_largest_epoch_in_dir(dir, saveid): - largest = get_largest_epoch_in_dir(dir, saveid) - reg_matches = [(filename, re.findall('\d+_%s'%saveid,filename)) for filename in os.listdir(dir)] - for filename, regmatch in reg_matches: - if regmatch and int(regmatch[0].split("_")[0]) != largest: - os.remove(os.path.join(dir,filename)) - -class ConfigDict(dict): - def __init__(self, loc=None, ghost=False): - self._dict = defaultdict(lambda :False) - self.ghost = ghost - if loc: - with open(loc) as f: raw = json.load(f) - if "inherits" in raw and raw["inherits"]: - for dep_loc in raw["inherits"]: - self.update(ConfigDict(dep_loc)) - if "updates" in raw and raw["updates"]: - self.update(raw["updates"], include_all=True) - - def __getitem__(self, key): - return self._dict[key] - - def __setitem__(self, key, value): - self._dict[key] = value - - def __str__(self): - return str(dict(self._dict)) - - def __repr__(self): - return str(dict(self._dict)) - - def __iter__(self): - return self._dict.__iter__() - - def __bool__(self): - return bool(self._dict) - - def __nonzero__(self): - return bool(self._dict) - - def update(self, dictlike, include_all=False): - for key in dictlike: - value = dictlike[key] - if isinstance(value, dict): - if key[0] == "*": # this means only override, do not set - key = key[1:] - ghost = True - else: - ghost = False - if not include_all and isinstance(value, ConfigDict) and key not in self._dict and value.ghost: continue - if key not in self._dict: self._dict[key] = ConfigDict(ghost=ghost) - self._dict[key].update(value) - else: - self._dict[key] = value diff --git a/research/steve/valuerl.py b/research/steve/valuerl.py deleted file mode 100644 index 4819dd08c..000000000 --- a/research/steve/valuerl.py +++ /dev/null @@ -1,307 +0,0 @@ -from __future__ import division -from builtins import zip -# Copyright 2018 The TensorFlow Authors All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -import tensorflow as tf -import numpy as np -import nn -import util -from learner import CoreModel - - -class ValueRL(CoreModel): - """ - Learn a state-action value function and its corresponding policy. - """ - - @property - def saveid(self): - return "valuerl" - - def create_params(self, env_config, learner_config): - self.obs_dim = np.prod(env_config["obs_dims"]) - self.action_dim = env_config["action_dim"] - self.reward_scale = env_config["reward_scale"] - self.discount = env_config["discount"] - - self.hidden_dim = learner_config["hidden_dim"] - self.bayesian_config = learner_config["bayesian"] - self.value_expansion = learner_config["value_expansion"] - self.explore_chance = learner_config["ddpg_explore_chance"] - - with tf.variable_scope(self.name): - self.policy = nn.FeedForwardNet('policy', self.obs_dim, [self.action_dim], layers=4, hidden_dim=self.hidden_dim, get_uncertainty=False) - - if self.bayesian_config: - self.Q = nn.EnsembleFeedForwardNet('Q', self.obs_dim + self.action_dim, [], layers=4, hidden_dim=self.hidden_dim, get_uncertainty=True, ensemble_size=self.bayesian_config["ensemble_size"], train_sample_count=self.bayesian_config["train_sample_count"], eval_sample_count=self.bayesian_config["eval_sample_count"]) - self.old_Q = nn.EnsembleFeedForwardNet('old_q', self.obs_dim + self.action_dim, [], layers=4, hidden_dim=self.hidden_dim, get_uncertainty=True, ensemble_size=self.bayesian_config["ensemble_size"], train_sample_count=self.bayesian_config["train_sample_count"], eval_sample_count=self.bayesian_config["eval_sample_count"]) - else: - self.Q = nn.FeedForwardNet('Q', self.obs_dim + self.action_dim, [], layers=4, hidden_dim=self.hidden_dim, get_uncertainty=True) - self.old_Q = nn.FeedForwardNet('old_q', self.obs_dim + self.action_dim, [], layers=4, hidden_dim=self.hidden_dim, get_uncertainty=True) - - self.policy_params = [v for v in tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=self.name) if "policy" in v.name] - self.Q_params = [v for v in tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=self.name) if "Q" in v.name] - self.agent_params = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=self.name) - - self.copy_to_old_ops = [tf.assign(p_old, p) for p_old, p in zip(self.old_Q.params_list, self.Q.params_list)] - self.assign_epoch_op = [tf.assign(self.epoch_n, self.epoch_n_placeholder), tf.assign(self.update_n, self.update_n_placeholder), tf.assign(self.frame_n, self.frame_n_placeholder), tf.assign(self.hours, self.hours_placeholder)] - - def update_epoch(self, sess, epoch, updates, frames, hours): - sess.run(self.assign_epoch_op, feed_dict={self.epoch_n_placeholder: int(epoch), self.update_n_placeholder: int(updates), self.frame_n_placeholder: int(frames), self.hours_placeholder: float(hours)}) - - def copy_to_old(self, sess): - sess.run(self.copy_to_old_ops) - - def build_evalution_graph(self, obs, get_full_info=False, mode="regular", n_samples=1): - assert mode in {"regular", "explore", "exploit"} - policy_actions_pretanh = self.policy(obs) - - if mode == "regular" or mode == "exploit": - policy_actions = tf.tanh(policy_actions_pretanh) - elif mode == "explore": - _, _, exploring_policy_actions, _ = util.tanh_sample_info(policy_actions_pretanh, tf.zeros_like(policy_actions_pretanh), n_samples=n_samples) - policy_actions = tf.where(tf.random_uniform(tf.shape(exploring_policy_actions)) < self.explore_chance, x=exploring_policy_actions, y=tf.tanh(policy_actions_pretanh)) - else: raise Exception('this should never happen') - - if get_full_info: return policy_actions_pretanh, policy_actions - else: return policy_actions - - def build_training_graph(self, obs, next_obs, empirical_actions, rewards, dones, data_size, worldmodel=None): - average_model_use = tf.constant(0.) - empirical_Q_info = tf.concat([obs, empirical_actions], 1) - - if worldmodel is None: - policy_action_pretanh, policy_actions = self.build_evalution_graph(obs, get_full_info=True) - policy_Q_info = tf.concat([obs, policy_actions], 1) - state_value_estimate = self.Q(policy_Q_info, reduce_mode="mean") - - next_policy_actions = self.build_evalution_graph(next_obs) - policy_next_Q_info = tf.concat([next_obs, next_policy_actions], 1) - next_Q_estimate = self.old_Q(policy_next_Q_info, reduce_mode="mean") - - Q_guess = self.Q(empirical_Q_info, is_eval=False, reduce_mode="random") - Q_target = rewards * self.reward_scale + self.discount * next_Q_estimate * (1. - dones) - - policy_losses = -state_value_estimate - Q_losses = .5 * tf.square( Q_guess - tf.stop_gradient(Q_target) ) - - else: - targets, confidence, Q_guesses, reach_probs = self.build_Q_expansion_graph(next_obs, rewards, dones, worldmodel, rollout_len=self.value_expansion["rollout_len"], model_ensembling=worldmodel.bayesian_config is not False) - - # targets is a 3D matrix: [batch_i, start_timestep, end_timestep]. here, we reduce out the last dimension, turning - # it into a [batch_i, start_timestep] matrix. in other words, we are taking a bunch of candidate targets and reducing - # them into a single target. the four options here correspond to the four ways to do that reduction. - if self.value_expansion["mean_k_return"]: - target_counts = self.value_expansion["rollout_len"]+1 - tf.reshape(tf.range(self.value_expansion["rollout_len"]+1), [1, self.value_expansion["rollout_len"]+1]) - k_returns = tf.reduce_sum(targets, 2) / tf.cast(target_counts, tf.float32) - elif self.value_expansion["lambda_return"]: - cont_coeffs = self.value_expansion["lambda_return"] ** tf.cast(tf.reshape(tf.range(self.value_expansion["rollout_len"]+1), [1,1,self.value_expansion["rollout_len"]+1]), tf.float32) - stop_coeffs = tf.concat([(1 - self.value_expansion["lambda_return"]) * tf.ones_like(targets)[:,:,:-1], tf.ones_like(targets)[:,:,-1:]], 2) - k_returns = tf.reduce_sum(targets * stop_coeffs * cont_coeffs, 2) - elif self.value_expansion["steve_reweight"]: - k_returns = tf.reduce_sum(targets * confidence, 2) - average_model_use = 1. - tf.reduce_mean(confidence[:,0,0]) - else: - # MVE objective: just take the last one - k_returns = targets[:,:,-1] - - # now we have [batch_i, start_timestep]. if we are using the TDK trick, then we want to use all of the targets, - # so we construct a corresponding [batch_i, start_timestep] matrix of guesses. otherwise, we just take the targets - # for the first timestep. - Q_guess = self.Q(empirical_Q_info, is_eval=False, reduce_mode="random") - if self.value_expansion["tdk_trick"]: - Q_guess = tf.concat([tf.expand_dims(Q_guess, 1), Q_guesses], 1) - reach_probs = tf.concat([tf.expand_dims(tf.ones_like(reach_probs[:,0]), 1), reach_probs[:,:-1]], 1) - Q_target = k_returns - else: - # non-TDK trick means we just take the first one - Q_target = k_returns[:,0] - - policy_action_pretanh, policy_actions = self.build_evalution_graph(obs, get_full_info=True) - policy_Q_info = tf.concat([obs, policy_actions], 1) - state_value_estimate = self.Q(policy_Q_info, stop_params_gradient=True, reduce_mode="mean") - - policy_losses = -state_value_estimate - Q_losses = .5 * tf.square( Q_guess - tf.stop_gradient(Q_target) ) - if self.value_expansion["tdk_trick"]: Q_losses *= reach_probs # we downscale the various TDK-trick losses by - # the likelihood of actually reaching the state - # from which the guess was made - policy_loss = tf.reduce_mean(policy_losses) - Q_loss = tf.reduce_mean(Q_losses) - policy_reg_loss = tf.reduce_mean(tf.square(policy_action_pretanh)) * .001 # a small regularization to make sure the - # tanh does not saturate - - # anything in inspect gets logged - inspect = (policy_loss, Q_loss, policy_reg_loss, average_model_use) - - return (policy_loss + policy_reg_loss, Q_loss), inspect - - - def build_Q_expansion_graph(self, obs, first_rewards, first_done, worldmodel, rollout_len=1, model_ensembling=False): - ### this sets up the machinery for having multiple parallel rollouts, each of which has a single consistent transition - ensemble_idxs, transition_sample_n, reward_sample_n = worldmodel.get_ensemble_idx_info() - q_sample_n = self.bayesian_config["eval_sample_count"] if self.bayesian_config is not False else 1 - first_rewards = tf.tile(tf.expand_dims(tf.expand_dims(first_rewards,1),1), [1,transition_sample_n,reward_sample_n]) - first_rewards.set_shape([None, transition_sample_n, reward_sample_n]) - if model_ensembling: - obs = tf.tile(tf.expand_dims(obs,1), [1,transition_sample_n,1]) - obs.set_shape([None, transition_sample_n, self.obs_dim]) - first_done = tf.tile(tf.expand_dims(first_done, 1), [1, transition_sample_n]) - first_done.set_shape([None, transition_sample_n]) - - ### below, we use a while loop to actually do the iterative model rollout - extra_info = worldmodel.init_extra_info(obs) - - action_ta = tf.TensorArray(size=rollout_len, dynamic_size=False, dtype=tf.float32) - obs_ta = tf.TensorArray(size=rollout_len, dynamic_size=False, dtype=tf.float32) - done_ta = tf.TensorArray(size=rollout_len, dynamic_size=False, dtype=tf.float32) - extra_info_ta =tf.TensorArray(size=rollout_len, dynamic_size=False, dtype=tf.float32) - - def rollout_loop_body(r_i, xxx_todo_changeme): - (obs, done, extra_info, action_ta, obs_ta, dones_ta, extra_info_ta) = xxx_todo_changeme - action_pretanh, action = self.build_evalution_graph(tf.stop_gradient(obs), get_full_info=True) - - if model_ensembling: - next_obs, next_dones, next_extra_info = worldmodel.transition(obs, action, extra_info, ensemble_idxs=ensemble_idxs) - else: - next_obs, next_dones, next_extra_info = worldmodel.transition(obs, action, extra_info) - next_obs = tf.reduce_mean(next_obs, -2) - next_dones = tf.reduce_mean(next_dones, -1) - - action_ta = action_ta.write(r_i, action) - obs_ta = obs_ta.write(r_i, obs) - dones_ta = dones_ta.write(r_i, done) - extra_info_ta = extra_info_ta.write(r_i, extra_info) - return r_i+1, (next_obs, next_dones, next_extra_info, action_ta, obs_ta, dones_ta, extra_info_ta) - - _, (final_obs, final_done, final_extra_info, action_ta, obs_ta, done_ta, extra_info_ta) = tf.while_loop( - lambda r_i, _: r_i < rollout_len, - rollout_loop_body, - [0, (obs, first_done, extra_info, action_ta, obs_ta, done_ta, extra_info_ta)] - ) - - final_action_pretanh, final_action = self.build_evalution_graph(tf.stop_gradient(final_obs), get_full_info=True) - - ### compile the TensorArrays into useful tensors - obss = obs_ta.stack() - obss = tf.reshape(obss, tf.stack([rollout_len, -1, transition_sample_n, self.obs_dim])) - obss = tf.transpose(obss, [1, 0, 2, 3]) - final_obs = tf.reshape(final_obs, tf.stack([-1, 1, transition_sample_n, self.obs_dim])) - all_obss = tf.concat([obss, final_obs],1) - next_obss = all_obss[:,1:] - - dones = done_ta.stack() - dones = tf.reshape(dones, tf.stack([rollout_len, -1, transition_sample_n])) - dones = tf.transpose(dones, [1, 0, 2]) - final_done = tf.reshape(final_done, tf.stack([-1, 1, transition_sample_n])) - all_dones = tf.concat([dones, final_done],1) - - actions = action_ta.stack() - actions = tf.reshape(actions, tf.stack([rollout_len, -1, transition_sample_n, self.action_dim])) - actions = tf.transpose(actions , [1, 0, 2, 3]) - final_action = tf.reshape(final_action, tf.stack([-1, 1, transition_sample_n, self.action_dim])) - all_actions = tf.concat([actions, final_action],1) - - continue_probs = tf.cumprod(1. - all_dones, axis=1) - rewards = worldmodel.get_rewards(obss, actions, next_obss) - rawrew = rewards = tf.concat([tf.expand_dims(first_rewards, 1), rewards],1) - - ### TDK trick means we have to guess at every timestep - if self.value_expansion["tdk_trick"]: - guess_info = tf.concat([obss,actions], -1) - Q_guesses = self.Q(guess_info, reduce_mode="random") - Q_guesses = tf.reduce_mean(Q_guesses, -1) # make it so there's only one guess per rollout length, which is the mean of the guesses under all the various model rollouts - reached_this_point_to_guess_prob = tf.reduce_mean(continue_probs, -1) - else: - Q_guesses = None - reached_this_point_to_guess_prob = None - - ### use the Q function at every timestep to get value estimates - target_info = tf.concat([all_obss, all_actions], -1) - Q_targets = self.old_Q(target_info, reduce_mode="none") - - rollout_frames = rollout_len + 1 # if we take N steps, we have N+1 frames - - ### create "decay-exponent matrix" of size [1,ROLLOUT_FRAMES,ROLLOUT_FRAMES,1]. the first ROLLOUT_FRAMES corresponds to the index of the source, the second to the target. - ts_count_mat = (tf.cast(tf.reshape(tf.range(rollout_frames), [1, rollout_frames]) - tf.reshape(tf.range(rollout_frames), [rollout_frames, 1]), tf.float32)) - reward_coeff_matrix = tf.matrix_band_part(tf.ones([rollout_frames, rollout_frames]), 0, -1) * self.discount ** ts_count_mat - value_coeff_matrix = tf.matrix_band_part(tf.ones([rollout_frames, rollout_frames]), 0, -1) * self.discount ** (1. + ts_count_mat) - reward_coeff_matrix = tf.reshape(reward_coeff_matrix, [1, rollout_frames, rollout_frames, 1, 1]) - value_coeff_matrix = tf.reshape(value_coeff_matrix, [1, rollout_frames, rollout_frames, 1, 1]) - - ### similarly, create a "done" matrix - shifted_continue_probs = tf.concat([tf.expand_dims(tf.ones_like(continue_probs[:,0]),1), continue_probs[:,:-1]], 1) - reward_continue_matrix = tf.expand_dims(shifted_continue_probs, 1) / tf.expand_dims(shifted_continue_probs+1e-8, 2) - value_continue_matrix = tf.expand_dims(continue_probs, 1) / tf.expand_dims(shifted_continue_probs+1e-8, 2) - reward_continue_matrix = tf.expand_dims(reward_continue_matrix, -1) - value_continue_matrix = tf.expand_dims(value_continue_matrix, -1) - - ### apply the discounting factors to the rewards and values - rewards = tf.expand_dims(rewards, 1) * reward_coeff_matrix * reward_continue_matrix - rewards = tf.cumsum(rewards, axis=2) - values = tf.expand_dims(Q_targets, 1) * value_coeff_matrix * value_continue_matrix - - ### compute the targets using the Bellman equation - sampled_targets = tf.expand_dims(rewards,-2) * self.reward_scale + tf.expand_dims(values,-1) - - ### flatten out the various sources of variance (transition, reward, and Q-function ensembles) to get a set of estimates for each candidate target - sampled_targets = tf.reshape(sampled_targets, tf.stack([-1, rollout_frames, rollout_frames, transition_sample_n * reward_sample_n * q_sample_n])) - - ### compute the mean and variance for each candidate target - target_means, target_variances = tf.nn.moments(sampled_targets, 3) - - ### compute the confidence, either using the full covariance matrix, or approximating all the estimators as independent - if self.value_expansion["covariances"]: - targetdiffs = sampled_targets - tf.expand_dims(target_means,3) - target_covariances = tf.einsum("abij,abjk->abik", targetdiffs, tf.transpose(targetdiffs, [0,1,3,2])) - target_confidence = tf.squeeze(tf.matrix_solve(target_covariances + tf.expand_dims(tf.expand_dims(tf.matrix_band_part(tf.ones(tf.shape(target_covariances)[-2:]),0,0) * 1e-3,0),0), tf.ones(tf.concat([tf.shape(target_covariances)[:-1], tf.constant([1])],0))),-1) - else: - target_confidence = 1./(target_variances + 1e-8) - - ### normalize so weights sum to 1 - target_confidence *= tf.matrix_band_part(tf.ones([1, rollout_frames, rollout_frames]), 0, -1) - target_confidence = target_confidence / tf.reduce_sum(target_confidence, axis=2, keepdims=True) - - ### below here is a bunch of debugging Print statements that I use as a sanity check: - # target_confidence = tf.Print(target_confidence, [], message="raw rewards") - # target_confidence = tf.Print(target_confidence, [rawrew[0,:,0,0]], summarize=rollout_len+1) - # target_means = tf.Print(target_means, [], message="\n", summarize=rollout_len+1) - # target_means = tf.Print(target_means, [(1. - all_dones)[0,:,0]], message="contin", summarize=rollout_len+1) - # target_means = tf.Print(target_means, [continue_probs[0,:,0]], message="cum_contin", summarize=rollout_len+1) - # target_means = tf.Print(target_means, [shifted_continue_probs[0,:,0]], message="shifted contin", summarize=rollout_len+1) - # target_means = tf.Print(target_means, [], message="reward_coeff") - # for i in range(rollout_len+1): target_means = tf.Print(target_means, [reward_coeff_matrix[0,i,:,0,0]], summarize=rollout_len+1) - # target_means = tf.Print(target_means, [], message="reward_continue") - # for i in range(rollout_len+1): target_means = tf.Print(target_means, [reward_continue_matrix[0,i,:,0,0]], summarize=rollout_len+1) - # target_means = tf.Print(target_means, [], message="value_coeff") - # for i in range(rollout_len+1): target_means = tf.Print(target_means, [value_coeff_matrix[0,i,:,0,0]], summarize=rollout_len+1) - # target_means = tf.Print(target_means, [], message="value_continue") - # for i in range(rollout_len+1): target_means = tf.Print(target_means, [value_continue_matrix[0,i,:,0,0]], summarize=rollout_len+1) - # target_confidence = tf.Print(target_confidence, [], message="rewards") - # for i in range(rollout_len+1): target_confidence = tf.Print(target_confidence, [rewards[0,i,:,0,0]], summarize=rollout_len+1) - # target_confidence = tf.Print(target_confidence, [], message="target Qs") - # target_confidence = tf.Print(target_confidence, [Q_targets[0,:,0,0]], summarize=rollout_len+1) - # target_confidence = tf.Print(target_confidence, [], message="values") - # for i in range(rollout_len+1): target_confidence = tf.Print(target_confidence, [values[0,i,:,0,0]], summarize=rollout_len+1) - # target_confidence = tf.Print(target_confidence, [], message="target_means") - # for i in range(rollout_len+1): target_confidence = tf.Print(target_confidence, [target_means[0,i,:]], summarize=rollout_len+1) - # target_confidence = tf.Print(target_confidence, [], message="target_variance") - # for i in range(rollout_len+1): target_confidence = tf.Print(target_confidence, [target_variances[0,i,:]], summarize=rollout_len+1) - # target_confidence = tf.Print(target_confidence, [], message="target_confidence") - # for i in range(rollout_len+1): target_confidence = tf.Print(target_confidence, [target_confidence[0,i,:]], summarize=rollout_len+1) - # target_means = tf.Print(target_means, [target_confidence, action_lls, tf.shape(Q_targets)], message="\n\n", summarize=10) - - return target_means, target_confidence, Q_guesses, reached_this_point_to_guess_prob \ No newline at end of file diff --git a/research/steve/valuerl_learner.py b/research/steve/valuerl_learner.py deleted file mode 100644 index a3c6308f8..000000000 --- a/research/steve/valuerl_learner.py +++ /dev/null @@ -1,81 +0,0 @@ -# Copyright 2018 The TensorFlow Authors All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -import tensorflow as tf -import numpy as np -import os - -from learner import Learner -from valuerl import ValueRL -from worldmodel import DeterministicWorldModel - -class ValueRLLearner(Learner): - """ - ValueRL-specific training loop details. - """ - - def learner_name(self): return "valuerl" - - def make_loader_placeholders(self): - self.obs_loader = tf.placeholder(tf.float32, [self.learner_config["batch_size"], np.prod(self.env_config["obs_dims"])]) - self.next_obs_loader = tf.placeholder(tf.float32, [self.learner_config["batch_size"], np.prod(self.env_config["obs_dims"])]) - self.action_loader = tf.placeholder(tf.float32, [self.learner_config["batch_size"], self.env_config["action_dim"]]) - self.reward_loader = tf.placeholder(tf.float32, [self.learner_config["batch_size"]]) - self.done_loader = tf.placeholder(tf.float32, [self.learner_config["batch_size"]]) - self.datasize_loader = tf.placeholder(tf.float64, []) - return [self.obs_loader, self.next_obs_loader, self.action_loader, self.reward_loader, self.done_loader, self.datasize_loader] - - def make_core_model(self): - if self.config["model_config"] is not False: - self.worldmodel = DeterministicWorldModel(self.config["name"], self.env_config, self.config["model_config"]) - else: - self.worldmodel = None - - valuerl = ValueRL(self.config["name"], self.env_config, self.learner_config) - (policy_loss, Q_loss), inspect_losses = valuerl.build_training_graph(*self.current_batch, worldmodel=self.worldmodel) - - policy_optimizer = tf.train.AdamOptimizer(3e-4) - policy_gvs = policy_optimizer.compute_gradients(policy_loss, var_list=valuerl.policy_params) - capped_policy_gvs = policy_gvs - policy_train_op = policy_optimizer.apply_gradients(capped_policy_gvs) - - Q_optimizer = tf.train.AdamOptimizer(3e-4) - Q_gvs = Q_optimizer.compute_gradients(Q_loss, var_list=valuerl.Q_params) - capped_Q_gvs = Q_gvs - Q_train_op = Q_optimizer.apply_gradients(capped_Q_gvs) - - return valuerl, (policy_loss, Q_loss), (policy_train_op, Q_train_op), inspect_losses - - ## Optional functions to override - def initialize(self): - if self.config["model_config"] is not False: - while not self.load_worldmodel(): pass - - def resume_from_checkpoint(self, epoch): - if self.config["model_config"] is not False: - with self.bonus_kwargs["model_lock"]: self.worldmodel.load(self.sess, self.save_path, epoch) - - def checkpoint(self): - self.core.copy_to_old(self.sess) - if self.config["model_config"] is not False: - self.load_worldmodel() - - def backup(self): pass - - # Other functions - def load_worldmodel(self): - if not os.path.exists("%s/%s.params.index" % (self.save_path, self.worldmodel.saveid)): return False - with self.bonus_kwargs["model_lock"]: self.worldmodel.load(self.sess, self.save_path) - return True diff --git a/research/steve/visualizer.py b/research/steve/visualizer.py deleted file mode 100644 index 825f1a238..000000000 --- a/research/steve/visualizer.py +++ /dev/null @@ -1,107 +0,0 @@ -from __future__ import print_function -from builtins import range -# Copyright 2018 The TensorFlow Authors All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -import numpy as np -import tensorflow as tf -# import moviepy.editor as mpy -import time, os, traceback, multiprocessing, portalocker, sys - -import envwrap -import util -import valuerl, worldmodel -from config import config - -MODEL_NAME = config["name"] -LOG_PATH = util.create_directory("output/" + config["env"] + "/" + MODEL_NAME + "/" + config["log_path"]) + "/" + MODEL_NAME -LOAD_PATH = util.create_directory("output/" + config["env"] + "/" + MODEL_NAME + "/" + config["save_model_path"]) -OBS_DIM = np.prod(config["obs_dims"]) -HIDDEN_DIM = config["hidden_dim"] -ACTION_DIM = config["action_dim"] -MAX_FRAMES = config["max_frames"] -REWARD_SCALE = config["reward_scale"] -DISCOUNT = config["discount"] -ALGO = config["policy_config"]["algo"] -AGENT_BATCH_SIZE = config["agent_config"]["batch_size"] -EVALUATOR_BATCH_SIZE = config["evaluator_config"]["batch_size"] -RELOAD_EVERY_N = config["agent_config"]["reload_every_n"] -FRAMES_BEFORE_LEARNING = config["policy_config"]["frames_before_learning"] -FRAMES_PER_UPDATE = config["policy_config"]["frames_per_update"] -LEARNER_EPOCH_N = config["policy_config"]["epoch_n"] -SYNC_UPDATES = config["policy_config"]["frames_per_update"] >= 0 -POLICY_BAYESIAN_CONFIG = config["policy_config"]["bayesian"] -AUX_CONFIG = config["aux_config"] -DDPG_EXPLORE_CHANCE = config["policy_config"]["explore_chance"] if ALGO == "ddpg" else 0. -MODEL_AUGMENTED = config["model_config"] is not False -if MODEL_AUGMENTED: MODEL_BAYESIAN_CONFIG = config["model_config"]["bayesian"] - -FILENAME = sys.argv[3] - -if __name__ == '__main__': - oprl = valuerl.ValueRL(MODEL_NAME, ALGO, OBS_DIM, ACTION_DIM, HIDDEN_DIM, REWARD_SCALE, DISCOUNT, POLICY_BAYESIAN_CONFIG, AUX_CONFIG, DDPG_EXPLORE_CHANCE) - - obs_loader = tf.placeholder(tf.float32, [1, OBS_DIM]) - policy_actions, _ = oprl.build_evalution_graph(obs_loader, mode="exploit") - - if MODEL_AUGMENTED: - next_obs_loader = tf.placeholder(tf.float32, [1, OBS_DIM]) - reward_loader = tf.placeholder(tf.float32, [1]) - done_loader = tf.placeholder(tf.float32, [1]) - worldmodel = worldmodel.DeterministicWorldModel(MODEL_NAME, OBS_DIM, ACTION_DIM, HIDDEN_DIM, REWARD_SCALE, DISCOUNT, MODEL_BAYESIAN_CONFIG) - _, _, _, _, _, confidence, _ = oprl.build_Q_expansion_graph(next_obs_loader, reward_loader, done_loader, worldmodel, rollout_len=3, model_ensembling=True) - - sess = tf.Session() - sess.run(tf.global_variables_initializer()) - - oprl.load(sess, FILENAME) - if MODEL_AUGMENTED: worldmodel.load(sess, FILENAME) - - env = envwrap.get_env(config["env"]) - - hist = np.zeros([4, 10]) - for _ in range(10): - ts = 0 - rgb_frames = [] - obs, reward, done, reset = env.reset(), 0, False, False - while not reset: - # env.internal_env.render() - # rgb_frames.append(env.internal_env.render(mode='rgb_array')) - # action = env.action_space.sample() - all_actions = sess.run(policy_actions, feed_dict={obs_loader: np.array([obs])}) - all_actions = np.clip(all_actions, -1., 1.) - action = all_actions[0] - obs, _reward, done, reset = env.step(action) - - if MODEL_AUGMENTED: - _confidences = sess.run(confidence, feed_dict={next_obs_loader: np.expand_dims(obs,0), - reward_loader: np.expand_dims(_reward,0), - done_loader: np.expand_dims(done,0)}) - # print "%.02f %.02f %.02f %.02f" % tuple(_confidences[0,0]) - for h in range(4): - bucket = int((_confidences[0,0,h]-1e-5)*10) - hist[h,bucket] += 1 - - reward += _reward - ts += 1 - # print ts, _reward, reward - print(ts, reward) - hist /= np.sum(hist, axis=1, keepdims=True) - for row in reversed(hist.T): print(' '.join(["%.02f"] * 4) % tuple(row)) - - #clip = mpy.ImageSequenceClip(rgb_frames, fps=100) - #clip.write_videofile(FILENAME + "/movie.mp4") - - diff --git a/research/steve/worldmodel.py b/research/steve/worldmodel.py deleted file mode 100644 index 613bc6cb3..000000000 --- a/research/steve/worldmodel.py +++ /dev/null @@ -1,104 +0,0 @@ -# Copyright 2018 The TensorFlow Authors All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -import tensorflow as tf -import numpy as np -import nn - -from learner import CoreModel - -class DeterministicWorldModel(CoreModel): - """ - A simple feed-forward neural network world model, with an option for an ensemble. - """ - - @property - def saveid(self): - return "worldmodel" - - def create_params(self, env_config, learner_config): - self.obs_dim = np.prod(env_config["obs_dims"]) - self.action_dim = env_config["action_dim"] - self.reward_scale = env_config["reward_scale"] - self.discount = env_config["discount"] - - self.aux_hidden_dim = self.learner_config["aux_hidden_dim"] - self.transition_hidden_dim = self.learner_config["transition_hidden_dim"] - self.bayesian_config = self.learner_config["bayesian"] - - with tf.variable_scope(self.name): - if self.bayesian_config: - self.transition_predictor = nn.EnsembleFeedForwardNet('transition_predictor', self.obs_dim + self.action_dim, [self.obs_dim], layers=8, hidden_dim=self.transition_hidden_dim, get_uncertainty=True, ensemble_size=self.bayesian_config["transition"]["ensemble_size"], train_sample_count=self.bayesian_config["transition"]["train_sample_count"], eval_sample_count=self.bayesian_config["transition"]["eval_sample_count"]) - self.done_predictor = nn.EnsembleFeedForwardNet('done_predictor', self.obs_dim + self.obs_dim + self.action_dim, [], layers=4, hidden_dim=self.aux_hidden_dim, get_uncertainty=True, ensemble_size=self.bayesian_config["transition"]["ensemble_size"], train_sample_count=self.bayesian_config["transition"]["train_sample_count"], eval_sample_count=self.bayesian_config["transition"]["eval_sample_count"]) - self.reward_predictor = nn.EnsembleFeedForwardNet('reward_predictor', self.obs_dim + self.obs_dim + self.action_dim, [], layers=4, hidden_dim=self.aux_hidden_dim, get_uncertainty=True, ensemble_size=self.bayesian_config["reward"]["ensemble_size"], train_sample_count=self.bayesian_config["reward"]["train_sample_count"], eval_sample_count=self.bayesian_config["reward"]["eval_sample_count"]) - else: - self.transition_predictor = nn.FeedForwardNet('transition_predictor', self.obs_dim + self.action_dim, [self.obs_dim], layers=8, hidden_dim=self.transition_hidden_dim, get_uncertainty=True) - self.done_predictor = nn.FeedForwardNet('done_predictor', self.obs_dim + self.obs_dim + self.action_dim, [], layers=4, hidden_dim=self.aux_hidden_dim, get_uncertainty=True) - self.reward_predictor = nn.FeedForwardNet('reward_predictor', self.obs_dim + self.obs_dim + self.action_dim, [], layers=4, hidden_dim=self.aux_hidden_dim, get_uncertainty=True) - - def get_ensemble_idx_info(self): - if self.bayesian_config is not False: - ensemble_idxs = tf.random_shuffle(tf.range(self.transition_predictor.ensemble_size)) - transition_ensemble_sample_n = self.transition_predictor.eval_sample_count - reward_ensemble_sample_n = self.reward_predictor.eval_sample_count - ensemble_idxs = ensemble_idxs[:transition_ensemble_sample_n] - return ensemble_idxs, transition_ensemble_sample_n, reward_ensemble_sample_n - else: - return None, 1, 1 - - def build_training_graph(self, obs, next_obs, actions, rewards, dones, data_size): - info = tf.concat([obs, actions], -1) - predicted_next_obs = self.transition_predictor(info, is_eval=False, reduce_mode="random") + obs - next_info = tf.concat([next_obs, info], -1) - predicted_dones = self.done_predictor(next_info, is_eval=False, reduce_mode="random") - predicted_rewards = self.reward_predictor(next_info, is_eval=False, reduce_mode="random") - - done_losses = tf.nn.sigmoid_cross_entropy_with_logits(labels=dones, logits=predicted_dones) - reward_losses = .5 * tf.square(rewards - predicted_rewards) - next_obs_losses = .5 * tf.reduce_sum(tf.square(next_obs - predicted_next_obs), -1) - - done_loss = tf.reduce_mean(done_losses) - reward_loss = tf.reduce_mean(reward_losses) - next_obs_loss = tf.reduce_mean(next_obs_losses) - reg_loss = .0001 * (self.done_predictor.l2_loss() + - self.reward_predictor.l2_loss() + - self.transition_predictor.l2_loss()) - - total_loss = done_loss + reward_loss + next_obs_loss + reg_loss - - inspect = (total_loss, done_loss, reward_loss, next_obs_loss, reg_loss) - - return total_loss, inspect - - def init_extra_info(self, obs): - return tf.zeros_like(obs) - - def transition(self, obs, action, extra_info, ensemble_idxs=None, pre_expanded=None): - info = tf.concat([obs, action], -1) - next_obs_delta = self.transition_predictor(info, reduce_mode="none", ensemble_idxs=ensemble_idxs, pre_expanded=pre_expanded) - if ensemble_idxs is None: - next_obs = tf.expand_dims(obs,-2) + next_obs_delta - next_info = tf.concat([next_obs, tf.expand_dims(info,-2)], -1) - else: - next_obs = obs + next_obs_delta - next_info = tf.concat([next_obs, info], -1) - done = tf.nn.sigmoid(self.done_predictor(next_info, reduce_mode="none", ensemble_idxs=ensemble_idxs, pre_expanded=True)) - extra_info = tf.zeros_like(obs) - return next_obs, done, extra_info - - def get_rewards(self, obs, action, next_obs): - next_info = tf.concat([next_obs, obs, action], -1) - reward = self.reward_predictor(next_info, reduce_mode="none") - return reward \ No newline at end of file diff --git a/research/steve/worldmodel_learner.py b/research/steve/worldmodel_learner.py deleted file mode 100644 index c36a50f6a..000000000 --- a/research/steve/worldmodel_learner.py +++ /dev/null @@ -1,55 +0,0 @@ -# Copyright 2018 The TensorFlow Authors All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -import tensorflow as tf -import numpy as np -from learner import Learner -from worldmodel import DeterministicWorldModel - -class WorldmodelLearner(Learner): - """ - Worldmodel-specific training loop details. - """ - def learner_name(self): return "worldmodel" - - def make_loader_placeholders(self): - self.obs_loader = tf.placeholder(tf.float32, [self.learner_config["batch_size"], np.prod(self.env_config["obs_dims"])]) - self.next_obs_loader = tf.placeholder(tf.float32, [self.learner_config["batch_size"], np.prod(self.env_config["obs_dims"])]) - self.action_loader = tf.placeholder(tf.float32, [self.learner_config["batch_size"], self.env_config["action_dim"]]) - self.reward_loader = tf.placeholder(tf.float32, [self.learner_config["batch_size"]]) - self.done_loader = tf.placeholder(tf.float32, [self.learner_config["batch_size"]]) - self.datasize_loader = tf.placeholder(tf.float64, []) - return [self.obs_loader, self.next_obs_loader, self.action_loader, self.reward_loader, self.done_loader, self.datasize_loader] - - def make_core_model(self): - worldmodel = DeterministicWorldModel(self.config["name"], self.env_config, self.learner_config) - worldmodel_loss, inspect_losses = worldmodel.build_training_graph(*self.current_batch) - - model_optimizer = tf.train.AdamOptimizer(3e-4) - model_gvs = model_optimizer.compute_gradients(worldmodel_loss, var_list=worldmodel.model_params) - capped_model_gvs = model_gvs - worldmodel_train_op = model_optimizer.apply_gradients(capped_model_gvs) - - return worldmodel, (worldmodel_loss,), (worldmodel_train_op,), inspect_losses - - ## Optional functions to override - def initialize(self): pass - def resume_from_checkpoint(self, epoch): pass - def checkpoint(self): pass - def backup(self): pass - - - - diff --git a/research/street/README.md b/research/street/README.md deleted file mode 100644 index fc2c4d01c..000000000 --- a/research/street/README.md +++ /dev/null @@ -1,268 +0,0 @@ -![No Maintenance Intended](https://img.shields.io/badge/No%20Maintenance%20Intended-%E2%9C%95-red.svg) -![TensorFlow Requirement: 1.x](https://img.shields.io/badge/TensorFlow%20Requirement-1.x-brightgreen) -![TensorFlow 2 Not Supported](https://img.shields.io/badge/TensorFlow%202%20Not%20Supported-%E2%9C%95-red.svg) - -# StreetView Tensorflow Recurrent End-to-End Transcription (STREET) Model. - -A TensorFlow implementation of the STREET model described in the paper: - -"End-to-End Interpretation of the French Street Name Signs Dataset" - -Raymond Smith, Chunhui Gu, Dar-Shyang Lee, Huiyi Hu, Ranjith -Unnikrishnan, Julian Ibarz, Sacha Arnoud, Sophia Lin. - -*International Workshop on Robust Reading, Amsterdam, 9 October 2016.* - -Available at: http://link.springer.com/chapter/10.1007%2F978-3-319-46604-0_30 - - -## Contact -***Author:*** Ray Smith (rays@google.com). - -***Pull requests and issues:*** @theraysmith. - -## Contents -* [Introduction](#introduction) -* [Installing and setting up the STREET model](#installing-and-setting-up-the-street-model) -* [Downloading the datasets](#downloading-the-datasets) -* [Confidence Tests](#confidence-tests) -* [Training a model](#training-a-model) -* [The Variable Graph Specification Language](#the-variable-graph-specification-language) - -## Introduction - -The *STREET* model is a deep recurrent neural network that learns how to -identify the name of a street (in France) from an image containing upto four -different views of the street name sign. The model merges information from the -different views and normalizes the text to the correct format. For example: - -![Example image](g3doc/avdessapins.png) - -Avenue des Sapins - - -## Installing and setting up the STREET model -[Install Tensorflow](https://www.tensorflow.org/install/) - -Install numpy: - -``` -sudo pip install numpy -``` - -Build the LSTM op: - -``` -cd cc -TF_INC=$(python -c 'import tensorflow as tf; print(tf.sysconfig.get_include())') -g++ -std=c++11 -shared rnn_ops.cc -o rnn_ops.so -fPIC -I $TF_INC -O3 -mavx -``` - -(Note: if running on Mac, add `-undefined dynamic_lookup` to your `g++` command. -If you are running a newer version of gcc, you may also need to add -`-D_GLIBCXX_USE_CXX11_ABI=0`.) - -Run the unittests: - -``` -cd ../python -python decoder_test.py -python errorcounter_test.py -python shapes_test.py -python vgslspecs_test.py -python vgsl_model_test.py -``` - -## Downloading the datasets - -The French Street Name Signs (FSNS) dataset is split into subsets, each -of which is composed of multiple files. -Note that these datasets are very large. The approximate sizes are: - -* Train: 512 files of 300MB each. -* Validation: 64 files of 40MB each. -* Test: 64 files of 50MB each. -* Testdata: some smaller data files of a few MB for testing. -* Total: ~158 Gb. - -Here is a list of the download paths: - -``` -https://download.tensorflow.org/data/fsns-20160927/charset_size=134.txt -https://download.tensorflow.org/data/fsns-20160927/test/test-00000-of-00064 -... -https://download.tensorflow.org/data/fsns-20160927/test/test-00063-of-00064 -https://download.tensorflow.org/data/fsns-20160927/testdata/arial-32-00000-of-00001 -https://download.tensorflow.org/data/fsns-20160927/testdata/fsns-00000-of-00001 -https://download.tensorflow.org/data/fsns-20160927/testdata/mnist-sample-00000-of-00001 -https://download.tensorflow.org/data/fsns-20160927/testdata/numbers-16-00000-of-00001 -https://download.tensorflow.org/data/fsns-20160927/train/train-00000-of-00512 -... -https://download.tensorflow.org/data/fsns-20160927/train/train-00511-of-00512 -https://download.tensorflow.org/data/fsns-20160927/validation/validation-00000-of-00064 -... -https://download.tensorflow.org/data/fsns-20160927/validation/validation-00063-of-00064 -``` - -All URLs are stored in the text file `python/fsns_urls.txt`, to download them in -parallel: - -``` -aria2c -c -j 20 -i fsns_urls.txt -``` -If you ctrl+c and re-execute the command it will continue the aborted download. - - -## Confidence Tests - -The datasets download includes a directory `testdata` that contains some small -datasets that are big enough to test that models can actually learn something. -Assuming that you have put the downloads in directory `data` alongside -`python` then you can run the following tests: - -### Mnist for zero-dimensional data - -``` -cd python -train_dir=/tmp/mnist -rm -rf $train_dir -python vgsl_train.py --model_str='16,0,0,1[Ct5,5,16 Mp3,3 Lfys32 Lfxs64]O0s12' \ - --max_steps=1024 --train_data=../data/testdata/mnist-sample-00000-of-00001 \ - --initial_learning_rate=0.001 --final_learning_rate=0.001 \ - --num_preprocess_threads=1 --train_dir=$train_dir -python vgsl_eval.py --model_str='16,0,0,1[Ct5,5,16 Mp3,3 Lfys32 Lfxs64]O0s12' \ - --num_steps=256 --eval_data=../data/testdata/mnist-sample-00000-of-00001 \ - --num_preprocess_threads=1 --decoder=../testdata/numbers.charset_size=12.txt \ - --eval_interval_secs=0 --train_dir=$train_dir --eval_dir=$train_dir/eval -``` - -Depending on your machine, this should run in about 1 minute, and should obtain -error rates below 50%. Actual error rates will vary according to random -initialization. - -### Fixed-length targets for number recognition - -``` -cd python -train_dir=/tmp/fixed -rm -rf $train_dir -python vgsl_train.py --model_str='8,16,0,1[S1(1x16)1,3 Lfx32 Lrx32 Lfx32]O1s12' \ - --max_steps=3072 --train_data=../data/testdata/numbers-16-00000-of-00001 \ - --initial_learning_rate=0.001 --final_learning_rate=0.001 \ - --num_preprocess_threads=1 --train_dir=$train_dir -python vgsl_eval.py --model_str='8,16,0,1[S1(1x16)1,3 Lfx32 Lrx32 Lfx32]O1s12' \ - --num_steps=256 --eval_data=../data/testdata/numbers-16-00000-of-00001 \ - --num_preprocess_threads=1 --decoder=../testdata/numbers.charset_size=12.txt \ - --eval_interval_secs=0 --train_dir=$train_dir --eval_dir=$train_dir/eval -``` - -Depending on your machine, this should run in about 1-2 minutes, and should -obtain a label error rate between 50 and 80%, with word error rates probably -not coming below 100%. Actual error rates will vary -according to random initialization. - -### OCR-style data with CTC - -``` -cd python -train_dir=/tmp/ctc -rm -rf $train_dir -python vgsl_train.py --model_str='1,32,0,1[S1(1x32)1,3 Lbx100]O1c105' \ - --max_steps=4096 --train_data=../data/testdata/arial-32-00000-of-00001 \ - --initial_learning_rate=0.001 --final_learning_rate=0.001 \ - --num_preprocess_threads=1 --train_dir=$train_dir & -python vgsl_eval.py --model_str='1,32,0,1[S1(1x32)1,3 Lbx100]O1c105' \ - --num_steps=256 --eval_data=../data/testdata/arial-32-00000-of-00001 \ - --num_preprocess_threads=1 --decoder=../testdata/arial.charset_size=105.txt \ - --eval_interval_secs=15 --train_dir=$train_dir --eval_dir=$train_dir/eval & -tensorboard --logdir=$train_dir -``` - -Depending on your machine, the background training should run for about 3-4 -minutes, and should obtain a label error rate between 10 and 50%, with -correspondingly higher word error rates and even higher sequence error rate. -Actual error rates will vary according to random initialization. -The background eval will run for ever, and will have to be terminated by hand. -The tensorboard command will run a visualizer that can be viewed with a -browser. Go to the link that it prints to view tensorboard and see the -training progress. See the [Tensorboard](https://www.tensorflow.org/versions/r0.10/how_tos/summaries_and_tensorboard/index.html) -introduction for more information. - - -### Mini FSNS dataset - -You can test the actual STREET model on a small FSNS data set. The model will -overfit to this small dataset, but will give some confidence that everything -is working correctly. *Note* that this test runs the training and evaluation -in parallel, which is something that you should do when training any substantial -system, so you can monitor progress. - - -``` -cd python -train_dir=/tmp/fsns -rm -rf $train_dir -python vgsl_train.py --max_steps=10000 --num_preprocess_threads=1 \ - --train_data=../data/testdata/fsns-00000-of-00001 \ - --initial_learning_rate=0.0001 --final_learning_rate=0.0001 \ - --train_dir=$train_dir & -python vgsl_eval.py --num_steps=256 --num_preprocess_threads=1 \ - --eval_data=../data/testdata/fsns-00000-of-00001 \ - --decoder=../testdata/charset_size=134.txt \ - --eval_interval_secs=300 --train_dir=$train_dir --eval_dir=$train_dir/eval & -tensorboard --logdir=$train_dir -``` - -Depending on your machine, the training should finish in about 1-2 *hours*. -As with the CTC testset above, the eval and tensorboard will have to be -terminated manually. - -## Training a full FSNS model - -After running the tests above, you are ready to train the real thing! -*Note* that you might want to use a `train_dir` somewhere other than `/tmp` as -you can stop the training, reboot if needed and continue if you keep the -data intact, but `/tmp` gets deleted on a reboot. - -``` -cd python -train_dir=/tmp/fsns -rm -rf $train_dir -python vgsl_train.py --max_steps=100000000 --train_data=../data/train/train* \ - --train_dir=$train_dir & -python vgsl_eval.py --num_steps=1000 \ - --eval_data=../data/validation/validation* \ - --decoder=../testdata/charset_size=134.txt \ - --eval_interval_secs=300 --train_dir=$train_dir --eval_dir=$train_dir/eval & -tensorboard --logdir=$train_dir -``` - -Training will take a very long time (probably many weeks) to reach minimum -error rate on a single machine, although it will probably take substantially -fewer iterations than with parallel training. Faster training can be obtained -with parallel training on a cluster. -Since the setup is likely to be very site-specific, please see the TensorFlow -documentation on -[Distributed TensorFlow](https://www.tensorflow.org/versions/r0.10/how_tos/distributed/index.html) -for more information. Some code changes may be needed in the `Train` function -in `vgsl_model.py`. - -With 40 parallel training workers, nearly optimal error rates (about 25% -sequence error on the validation set) are obtained in about 30 million steps, -although the error continues to fall slightly over the next 30 million, to -perhaps as low as 23%. - -With a single machine the number of steps could be substantially lower. -Although untested on this problem, on other problems the ratio is typically -5 to 1 so low error rates could be obtained as soon as 6 million iterations, -which could be reached in about 4 weeks. - - -## The Variable Graph Specification Language - -The STREET model makes use of a graph specification language (VGSL) that -enables rapid experimentation with different model architectures. The language -defines a Tensor Flow graph that can be used to process images of variable sizes -to output a 1-dimensional sequence, like a transcription/OCR problem, or a -0-dimensional label, as for image identification problems. For more information -see [vgslspecs](g3doc/vgslspecs.md) diff --git a/research/street/cc/rnn_ops.cc b/research/street/cc/rnn_ops.cc deleted file mode 100644 index 8e004d91c..000000000 --- a/research/street/cc/rnn_ops.cc +++ /dev/null @@ -1,538 +0,0 @@ -/* Copyright 2016 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ - -// OpKernel of LSTM Neural Networks: -// -// LSTM: VariableLSTMOp (VariableLSTMGradOp) -// -// where (.*) are the ops to compute gradients for the corresponding ops. - -#define EIGEN_USE_THREADS - -#include -#ifdef GOOGLE_INCLUDES -#include "third_party/eigen3/Eigen/Core" -#include "third_party/tensorflow/core/framework/op.h" -#include "third_party/tensorflow/core/framework/op_kernel.h" -#include "third_party/tensorflow/core/framework/tensor.h" -#else -#include "Eigen/Core" -#include "tensorflow/core/framework/op.h" -#include "tensorflow/core/framework/op_kernel.h" -#include "tensorflow/core/framework/tensor.h" -#endif // GOOGLE_INCLUDES - -namespace tensorflow { - -using Eigen::array; -using Eigen::DenseIndex; -using IndexPair = Eigen::IndexPair; - -Status AreDimsEqual(int dim1, int dim2, const string& message) { - if (dim1 != dim2) { - return errors::InvalidArgument(message, ": ", dim1, " vs. ", dim2); - } - return Status::OK(); -} - -// ------------------------------- VariableLSTMOp ----------------------------- - -// Kernel to compute the forward propagation of a Long Short-Term Memory -// network. See the doc of the op below for more detail. -class VariableLSTMOp : public OpKernel { - public: - explicit VariableLSTMOp(OpKernelConstruction* ctx) : OpKernel(ctx) { - OP_REQUIRES_OK(ctx, ctx->GetAttr("clip", &clip_)); - OP_REQUIRES( - ctx, clip_ >= 0.0, - errors::InvalidArgument("clip_ needs to be equal or greator than 0")); - } - - void Compute(OpKernelContext* ctx) override { - // Inputs. - const auto input = ctx->input(0).tensor(); - const auto initial_state = ctx->input(1).tensor(); - const auto initial_memory = ctx->input(2).tensor(); - const auto w_m_m = ctx->input(3).tensor(); - const int batch_size = input.dimension(0); - const int seq_len = input.dimension(1); - const int output_dim = input.dimension(3); - - // Sanity checks. - OP_REQUIRES_OK(ctx, AreDimsEqual(4, input.dimension(2), "Input num")); - OP_REQUIRES_OK(ctx, AreDimsEqual(batch_size, initial_state.dimension(0), - "State batch")); - OP_REQUIRES_OK( - ctx, AreDimsEqual(output_dim, initial_state.dimension(1), "State dim")); - OP_REQUIRES_OK(ctx, AreDimsEqual(batch_size, initial_memory.dimension(0), - "Memory batch")); - OP_REQUIRES_OK(ctx, AreDimsEqual(output_dim, initial_memory.dimension(1), - "Memory dim")); - OP_REQUIRES_OK( - ctx, AreDimsEqual(output_dim, w_m_m.dimension(0), "Weight dim 0")); - OP_REQUIRES_OK(ctx, AreDimsEqual(4, w_m_m.dimension(1), "Weight dim 1")); - OP_REQUIRES_OK( - ctx, AreDimsEqual(output_dim, w_m_m.dimension(2), "Weight dim 2")); - - // Outputs. - Tensor* act_tensor = nullptr; - OP_REQUIRES_OK(ctx, ctx->allocate_output( - 0, {batch_size, seq_len, output_dim}, &act_tensor)); - auto act = act_tensor->tensor(); - act.setZero(); - - Tensor* gate_raw_act_tensor = nullptr; - OP_REQUIRES_OK(ctx, - ctx->allocate_output(1, {batch_size, seq_len, 4, output_dim}, - &gate_raw_act_tensor)); - auto gate_raw_act = gate_raw_act_tensor->tensor(); - gate_raw_act.setZero(); - - Tensor* memory_tensor = nullptr; - OP_REQUIRES_OK(ctx, - ctx->allocate_output(2, {batch_size, seq_len, output_dim}, - &memory_tensor)); - auto memory = memory_tensor->tensor(); - memory.setZero(); - - // Const and scratch tensors. - Tensor ones_tensor; - OP_REQUIRES_OK(ctx, ctx->allocate_temp(DT_FLOAT, {batch_size, output_dim}, - &ones_tensor)); - auto ones = ones_tensor.tensor(); - ones.setConstant(1.0); - - Tensor state_tensor; - OP_REQUIRES_OK(ctx, ctx->allocate_temp(DT_FLOAT, {batch_size, output_dim}, - &state_tensor)); - auto state = state_tensor.tensor(); - state = initial_state; - - Tensor scratch_tensor; - OP_REQUIRES_OK(ctx, - ctx->allocate_temp(DT_FLOAT, {batch_size, 4, output_dim}, - &scratch_tensor)); - auto scratch = scratch_tensor.tensor(); - scratch.setZero(); - - // Uses the most efficient order for the contraction depending on the batch - // size. - - // This is the code shared by both cases. It is discouraged to use the - // implicit capture with lambda functions, but it should be clear that what - // is done here. - auto Forward = [&](int i) { - // Each pre-activation value is stored in the following order (See the - // comment of the op for the meaning): - // - // i: 0 - // j: 1 - // f: 2 - // o: 3 - - // Adds one to the pre-activation values of the forget gate. This is a - // heuristic to make the training easier. - scratch.chip(2, 1) += ones; - - gate_raw_act.chip(i, 1) = scratch; - - // c_t = f_t * c_{t-1} + i_t * j_t - if (i == 0) { - state = initial_memory * scratch.chip(2, 1).sigmoid(); - } else { - state = memory.chip(i - 1, 1) * scratch.chip(2, 1).sigmoid(); - } - state += scratch.chip(0, 1).sigmoid() * scratch.chip(1, 1).tanh(); - - if (clip_ > 0.0) { - // Clips the values if required. - state = state.cwiseMax(-clip_).cwiseMin(clip_); - } - - memory.chip(i, 1) = state; - - // h_t = o_t * tanh(c_t) - state = scratch.chip(3, 1).sigmoid() * state.tanh(); - - act.chip(i, 1) = state; - }; - if (batch_size == 1) { - // Reshapes the weight tensor to pretend as if it is a matrix - // multiplication which is more efficient. - auto w_m_m_r = - w_m_m.reshape(array{output_dim, 4 * output_dim}); - // Dimensions for the contraction. - const array m_m_dim = {IndexPair(1, 0)}; - for (int i = 0; i < seq_len; ++i) { - // Computes the pre-activation value of the input and each gate. - scratch = input.chip(i, 1) + - state.contract(w_m_m_r, m_m_dim) - .reshape(array{batch_size, 4, output_dim}); - Forward(i); - } - } else { - // Shuffles the dimensions of the weight tensor to be efficient when used - // in the left-hand side. Allocates memory for the shuffled tensor for - // efficiency. - Tensor w_m_m_s_tensor; - OP_REQUIRES_OK(ctx, - ctx->allocate_temp(DT_FLOAT, {output_dim * 4, output_dim}, - &w_m_m_s_tensor)); - auto w_m_m_s = w_m_m_s_tensor.tensor(); - w_m_m_s = w_m_m.shuffle(array{2, 1, 0}) - .reshape(array{output_dim * 4, output_dim}); - // Dimensions for the contraction. - const array m_m_dim = {IndexPair(1, 1)}; - for (int i = 0; i < seq_len; ++i) { - // Computes the pre-activation value of the input and each gate. - scratch = input.chip(i, 1) + - w_m_m_s.contract(state, m_m_dim) - .reshape(array{output_dim, 4, batch_size}) - .shuffle(array{2, 1, 0}); - Forward(i); - } - } - } - - private: - // Threshold to clip the values of memory cells. - float clip_ = 0; -}; - -REGISTER_KERNEL_BUILDER(Name("VariableLSTM").Device(DEVICE_CPU), - VariableLSTMOp); -REGISTER_OP("VariableLSTM") - .Attr("clip: float = 0.0") - .Input("input: float32") - .Input("initial_state: float32") - .Input("initial_memory: float32") - .Input("w_m_m: float32") - .Output("activation: float32") - .Output("gate_raw_act: float32") - .Output("memory: float32") - .Doc(R"doc( -Computes the forward propagation of a Long Short-Term Memory Network. - -It computes the following equation recursively for `0 0 else c_t - h_t = o_t * tanh(c'_t) - -where - - a_{l,t} = w_{l,m,m} * h_{t-1} + x'_{l,t} - -where - - x'_{l,t} = w_{l,m,i} * x_{t}. - -`input` corresponds to the concatenation of `X'_i`, `X'_j`, `X'_f`, and `X'_o` -where `X'_l = (x'_{l,1}, x'_{l,2}, ..., x'_{l,T})`, `initial_state` corresponds -to `h_{0}`, `initial_memory` corresponds to `c_{0}` and `weight` corresponds to -`w_{l,m,m}`. `X'_l` (the transformed input) is computed outside of the op in -advance, so w_{l,m,i} is not passed in to the op. - -`activation` corresponds to `H = (h_1, h_2, ..., h_T)`, `gate_raw_activation` -corresponds to the concatanation of `A_i`, `A_j`, `A_f` and `A_o`, and `memory` -corresponds `C = (c_0, c_1, ..., c_T)`. - -All entries in the batch are propagated to the end, and are assumed to be the -same length. - -input: 4-D with shape `[batch_size, seq_len, 4, num_nodes]` -initial_state: 2-D with shape `[batch_size, num_nodes]` -initial_memory: 2-D with shape `[batch_size, num_nodes]` -w_m_m: 3-D with shape `[num_nodes, 4, num_nodes]` -activation: 3-D with shape `[batch_size, seq_len, num_nodes]` -gate_raw_act: 3-D with shape `[batch_size, seq_len, 4, num_nodes]` -memory: 3-D with shape `[batch_size, seq_len, num_nodes]` -)doc"); - -// ----------------------------- VariableLSTMGradOp ---------------------------- - -// Kernel to compute the gradient of VariableLSTMOp. -class VariableLSTMGradOp : public OpKernel { - public: - explicit VariableLSTMGradOp(OpKernelConstruction* ctx) : OpKernel(ctx) {} - - void Compute(OpKernelContext* ctx) override { - // Inputs. - const auto initial_state = ctx->input(0).tensor(); - const auto initial_memory = ctx->input(1).tensor(); - const auto w_m_m = ctx->input(2).tensor(); - const auto act = ctx->input(3).tensor(); - const auto gate_raw_act = ctx->input(4).tensor(); - const auto memory = ctx->input(5).tensor(); - const auto act_grad = ctx->input(6).tensor(); - const auto gate_raw_act_grad = ctx->input(7).tensor(); - const auto memory_grad = ctx->input(8).tensor(); - const int batch_size = act.dimension(0); - const int seq_len = act.dimension(1); - const int output_dim = act.dimension(2); - - // Sanity checks. - OP_REQUIRES_OK(ctx, AreDimsEqual(batch_size, initial_state.dimension(0), - "State batch")); - OP_REQUIRES_OK( - ctx, AreDimsEqual(output_dim, initial_state.dimension(1), "State dim")); - OP_REQUIRES_OK(ctx, AreDimsEqual(batch_size, initial_memory.dimension(0), - "Memory batch")); - OP_REQUIRES_OK(ctx, AreDimsEqual(output_dim, initial_memory.dimension(1), - "Memory dim")); - OP_REQUIRES_OK( - ctx, AreDimsEqual(output_dim, w_m_m.dimension(0), "Weight dim 0")); - OP_REQUIRES_OK(ctx, AreDimsEqual(4, w_m_m.dimension(1), "Weight dim 1")); - OP_REQUIRES_OK( - ctx, AreDimsEqual(output_dim, w_m_m.dimension(2), "Weight dim 2")); - OP_REQUIRES_OK(ctx, AreDimsEqual(batch_size, gate_raw_act.dimension(0), - "Gate raw activation batch")); - OP_REQUIRES_OK(ctx, AreDimsEqual(seq_len, gate_raw_act.dimension(1), - "Gate raw activation len")); - OP_REQUIRES_OK(ctx, AreDimsEqual(4, gate_raw_act.dimension(2), - "Gate raw activation num")); - OP_REQUIRES_OK(ctx, AreDimsEqual(output_dim, gate_raw_act.dimension(3), - "Gate raw activation dim")); - OP_REQUIRES_OK( - ctx, AreDimsEqual(batch_size, memory.dimension(0), "Memory batch")); - OP_REQUIRES_OK(ctx, - AreDimsEqual(seq_len, memory.dimension(1), "Memory len")); - OP_REQUIRES_OK(ctx, - AreDimsEqual(output_dim, memory.dimension(2), "Memory dim")); - OP_REQUIRES_OK(ctx, AreDimsEqual(batch_size, act_grad.dimension(0), - "Activation gradient batch")); - OP_REQUIRES_OK(ctx, AreDimsEqual(seq_len, act_grad.dimension(1), - "Activation gradient len")); - OP_REQUIRES_OK(ctx, AreDimsEqual(output_dim, act_grad.dimension(2), - "Activation gradient dim")); - OP_REQUIRES_OK(ctx, AreDimsEqual(batch_size, gate_raw_act_grad.dimension(0), - "Activation gradient batch")); - OP_REQUIRES_OK(ctx, AreDimsEqual(seq_len, gate_raw_act_grad.dimension(1), - "Activation gradient len")); - OP_REQUIRES_OK(ctx, AreDimsEqual(4, gate_raw_act_grad.dimension(2), - "Activation gradient num")); - OP_REQUIRES_OK(ctx, AreDimsEqual(output_dim, gate_raw_act_grad.dimension(3), - "Activation gradient dim")); - OP_REQUIRES_OK(ctx, AreDimsEqual(batch_size, memory_grad.dimension(0), - "Memory gradient batch")); - OP_REQUIRES_OK(ctx, AreDimsEqual(seq_len, memory_grad.dimension(1), - "Memory gradient len")); - OP_REQUIRES_OK(ctx, AreDimsEqual(output_dim, memory_grad.dimension(2), - "Memory gradient dim")); - - // Outputs. - std::vector collections(4, nullptr); - OP_REQUIRES_OK(ctx, - ctx->allocate_output(0, {batch_size, seq_len, 4, output_dim}, - &collections[0])); - auto input_grad = collections[0]->tensor(); - input_grad.setZero(); - - OP_REQUIRES_OK(ctx, ctx->allocate_output(1, {batch_size, output_dim}, - &collections[1])); - auto init_state_grad = collections[1]->tensor(); - init_state_grad.setZero(); - - OP_REQUIRES_OK(ctx, ctx->allocate_output(2, {batch_size, output_dim}, - &collections[2])); - auto init_memory_grad = collections[2]->tensor(); - init_memory_grad.setZero(); - - OP_REQUIRES_OK(ctx, ctx->allocate_output(3, {output_dim, 4, output_dim}, - &collections[3])); - auto w_m_m_grad = collections[3]->tensor(); - w_m_m_grad.setZero(); - - // Const and scratch tensors. - Tensor ones_tensor; - OP_REQUIRES_OK(ctx, ctx->allocate_temp(DT_FLOAT, {batch_size, output_dim}, - &ones_tensor)); - auto ones = ones_tensor.tensor(); - ones.setConstant(1.0); - - Tensor scratch_tensor; - OP_REQUIRES_OK(ctx, - ctx->allocate_temp(DT_FLOAT, {batch_size, 4, output_dim}, - &scratch_tensor)); - auto scratch = scratch_tensor.tensor(); - scratch.setZero(); - - Tensor tmp1_tensor; - OP_REQUIRES_OK(ctx, ctx->allocate_temp(DT_FLOAT, {batch_size, output_dim}, - &tmp1_tensor)); - auto tmp1 = tmp1_tensor.tensor(); - tmp1.setZero(); - - Tensor tmp2_tensor; - OP_REQUIRES_OK(ctx, ctx->allocate_temp(DT_FLOAT, {batch_size, output_dim}, - &tmp2_tensor)); - auto tmp2 = tmp2_tensor.tensor(); - tmp2.setZero(); - - // Uses the most efficient order for the contraction depending on the batch - // size. - - // Shuffles the dimensions of the weight tensor to be efficient when used in - // the left-hand side. Allocates memory for the shuffled tensor for - // efficiency. - Tensor w_m_m_s_tensor; - OP_REQUIRES_OK(ctx, - ctx->allocate_temp(DT_FLOAT, {4, output_dim, output_dim}, - &w_m_m_s_tensor)); - auto w_m_m_s = w_m_m_s_tensor.tensor(); - if (batch_size == 1) { - // Allocates memory only it is used. - w_m_m_s = w_m_m.shuffle(array{1, 2, 0}); - } - - // Dimensions for the contraction with the weight tensor. - const array m_m_dim = - batch_size == 1 ? array{IndexPair(1, 0)} - : array{IndexPair(1, 1)}; - // Dimensions for the contraction of the batch dimensions. - const array b_b_dim = {IndexPair(0, 0)}; - for (int i = seq_len - 1; i >= 0; --i) { - if (i == seq_len - 1) { - init_state_grad = act_grad.chip(i, 1); - } else { - w_m_m_grad += - act.chip(i, 1) - .contract(scratch.reshape( - array{batch_size, 4 * output_dim}), - b_b_dim) - .reshape(array{output_dim, 4, output_dim}); - if (batch_size == 1) { - init_state_grad.device(ctx->eigen_cpu_device()) = - scratch.chip(0, 1).contract(w_m_m_s.chip(0, 0), m_m_dim) + - scratch.chip(1, 1).contract(w_m_m_s.chip(1, 0), m_m_dim) + - scratch.chip(2, 1).contract(w_m_m_s.chip(2, 0), m_m_dim) + - scratch.chip(3, 1).contract(w_m_m_s.chip(3, 0), m_m_dim); - } else { - init_state_grad.device(ctx->eigen_cpu_device()) = - (w_m_m.chip(0, 1).contract(scratch.chip(0, 1), m_m_dim) + - w_m_m.chip(1, 1).contract(scratch.chip(1, 1), m_m_dim) + - w_m_m.chip(2, 1).contract(scratch.chip(2, 1), m_m_dim) + - w_m_m.chip(3, 1).contract(scratch.chip(3, 1), m_m_dim)) - .shuffle(array{1, 0}); - } - init_state_grad += act_grad.chip(i, 1); - } - - auto gate_raw_act_t = gate_raw_act.chip(i, 1); - auto gate_raw_act_grad_t = gate_raw_act_grad.chip(i, 1); - - // Output gate. - tmp1 = memory.chip(i, 1); - tmp1 = tmp1.tanh(); // y_t - tmp2 = gate_raw_act_t.chip(3, 1).sigmoid(); // o_t - scratch.chip(3, 1) = init_state_grad * tmp1 * tmp2 * (ones - tmp2) + - gate_raw_act_grad_t.chip(3, 1); - - init_memory_grad += init_state_grad * tmp2 * (ones - tmp1.square()) + - memory_grad.chip(i, 1); - - // Input gate. - tmp1 = gate_raw_act_t.chip(0, 1).sigmoid(); // i_t - tmp2 = gate_raw_act_t.chip(1, 1); - tmp2 = tmp2.tanh(); // j_t - scratch.chip(0, 1) = init_memory_grad * tmp2 * tmp1 * (ones - tmp1) + - gate_raw_act_grad_t.chip(0, 1); - - // Input. - scratch.chip(1, 1) = init_memory_grad * tmp1 * (ones - tmp2.square()) + - gate_raw_act_grad_t.chip(1, 1); - - // Forget gate. - tmp1 = gate_raw_act_t.chip(2, 1).sigmoid(); // f_t - if (i == 0) { - scratch.chip(2, 1) = - init_memory_grad * initial_memory * tmp1 * (ones - tmp1) + - gate_raw_act_grad_t.chip(2, 1); - } else { - scratch.chip(2, 1) = - init_memory_grad * memory.chip(i - 1, 1) * tmp1 * (ones - tmp1) + - gate_raw_act_grad_t.chip(2, 1); - } - - // Memory. - init_memory_grad *= tmp1; - - input_grad.chip(i, 1) = scratch; - } - w_m_m_grad += initial_state - .contract(scratch.reshape(array{ - batch_size, 4 * output_dim}), - b_b_dim) - .reshape(array{output_dim, 4, output_dim}); - if (batch_size == 1) { - init_state_grad.device(ctx->eigen_cpu_device()) = - (scratch.chip(0, 1).contract(w_m_m_s.chip(0, 0), m_m_dim) + - scratch.chip(1, 1).contract(w_m_m_s.chip(1, 0), m_m_dim) + - scratch.chip(2, 1).contract(w_m_m_s.chip(2, 0), m_m_dim) + - scratch.chip(3, 1).contract(w_m_m_s.chip(3, 0), m_m_dim)); - } else { - init_state_grad.device(ctx->eigen_cpu_device()) = - (w_m_m.chip(0, 1).contract(scratch.chip(0, 1), m_m_dim) + - w_m_m.chip(1, 1).contract(scratch.chip(1, 1), m_m_dim) + - w_m_m.chip(2, 1).contract(scratch.chip(2, 1), m_m_dim) + - w_m_m.chip(3, 1).contract(scratch.chip(3, 1), m_m_dim)) - .shuffle(array{1, 0}); - } - } -}; - -REGISTER_KERNEL_BUILDER(Name("VariableLSTMGrad").Device(DEVICE_CPU), - VariableLSTMGradOp); - -REGISTER_OP("VariableLSTMGrad") - .Input("initial_state: float32") - .Input("initial_memory: float32") - .Input("w_m_m: float32") - .Input("activation: float32") - .Input("gate_raw_act: float32") - .Input("memory: float32") - .Input("act_grad: float32") - .Input("gate_raw_act_grad: float32") - .Input("memory_grad: float32") - .Output("input_grad: float32") - .Output("initial_state_grad: float32") - .Output("initial_memory_grad: float32") - .Output("w_m_m_grad: float32") - .Doc(R"doc( -Computes the gradient for VariableLSTM. - -This is to be used conjunction with VariableLSTM. It ignores the clipping used -in the forward pass. - -initial_state: 2-D with shape `[batch_size, num_nodes]` -initial_memory: 2-D with shape `[batch_size, num_nodes]` -w_m_m: 3-D with shape `[num_nodes, 4, num_nodes]` -activation: 3-D with shape `[batch_size, seq_len, num_nodes]` -gate_raw_act: 3-D with shape `[batch_size, seq_len, 4, num_nodes]` -memory: 3-D with shape `[batch_size, seq_len, num_nodes]` -act_grad: 3-D with shape `[batch_size, seq_len, num_nodes]` -gate_raw_act_grad: 3-D with shape `[batch_size, seq_len, 4, num_nodes]` -memory_grad: 3-D with shape `[batch_size, seq_len, num_nodes]` -input_grad: 3-D with shape `[batch_size, seq_len, num_nodes]` -initial_state_grad: 2-D with shape `[batch_size, num_nodes]` -initial_memory_grad: 2-D with shape `[batch_size, num_nodes]` -w_m_m_grad: 3-D with shape `[num_nodes, 4, num_nodes]` -)doc"); - -} // namespace tensorflow diff --git a/research/street/g3doc/avdessapins.png b/research/street/g3doc/avdessapins.png deleted file mode 100644 index 7cdb9657a480979060e377e59906120d104680f4..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 171819 zcmXtfc|26_`~Dfk*k_QC7-NJ!Vr-%8hLEjLmaL(WvG3bhVw62^OB5=Eh88mu(;y>c zNwQ2CV@b9qgT!S2`Tp@ce>~?r|D4x(J+J%R_jO;_b!XYxT;}JI-~j-D-@+Vq4FK4j zkDrUdY{#oNiO1>q#vX2L;Q$7MX>+!-$6N6TQ>O^~V84iHuP|TWHaa-S_k8%RFkfGE z_?_U0HTDjD0Du7&C?kiM3SCm`+X#zt>A!c&VvAjvf3|6Kk3hfnkxp~j&Q6#yr-0b5 ztLl0u#*TwSh6{`{EQPE7u1aL78MwRU=~t4ck|JKC>2+BP_1%;;B0)oFO!8H#l=tw)^jDy|x* zpr02;7lh0^cDDC=dov9V=LD&Hz6x1gDwQ46?d|7F_SO!MU$ySLP^2&tVYM0hk8$*O z_~_4Yvbt#OPQp%O$A0J3unLf%gi7;hfi*e>Uu`(~Om$IS#6AGEK3Uq5Gw%>(ZUF6mw zx04fh|L#PQUvGBaW~8U^rDB<{)*4-KE@$qFy(vczNa*D&EDR2IL!g`j=CvVlG1?Wm znVbIllGL`uo!x{VnhX(=BoJ{9ya|q14-+NIV(mkOUr3E(Yi+(^O{r9eYbWS}Z^02mGJWy+O z(YUM>Fp0d?RW~pEFd;EEHa?z4IKyUtj$-H2C&!n8$U6d5a z8@7#=1a{h|idWlMdvn^>y*ur9sWi7Lz9J)TzLBhgD~bv?8A;@h*7EX47ZBz}VV`vb zISEaHftLW7H_B=g$!Eg0!xuc0w7EeFHrSqA8^E)qmcd9H`_Hl8SwVDaWMIRLfx&U8 zY8ZS5y~Zu#OO)z1Q@*~s?!i=9ws4(tFWzcvW|1A$M-3}-_y1C2l61X9&xVZ3$jv>6 z%5)EMg8+EfU`y}no5TJ2657I(uOZq#8k<$2jt-Szdf*h1 z{H!UYE(MyXuz(y4w_!xm(B+QNCP598Q}Lb(k&?6jkhbAZhIi)X{?6^pXVwW?oaI!! z#hi=GqeVWD5cIwH90g)U&IDFH3@C+H$zI}xfB>h=$tY1SFo)f0Eq`bD=Xq_RluL;R zVXf`c?jE8@jp)n}qz_+&4=5c`Ay+e4Q}kK{euv!t{$|oH{ggUTi-q~2eBd%>n3;or zYo>^-5ZH-y!@o7k!B^bQzq|zB@C4udI;Y*UjIPFe0dc?gY|;Zn(Y53J`M^tIvnm;( zP~&bOM9Wkd0H+&*`*cdKrFWl1a986eDG4*DwNVQ8Y#G;t;3k;X6JFvO^IlKa3CaVj z=gs-8K-m$J_vPdNArHq5_U34Uc`Qav?^&Jx?4_lR^`VWuj@^I%Rzoiq`F=<6WTh*B z#|TXfH@)G}SF8(G_ew!slxr5qBEQ<|3O_GDGZdm97il=_Q2K_-!wHy!#QIQvzZ$62 zk|0_p<-=6gdc;McvplUc`k7p*jR7>)-d3AaN4tAwu)c1k6qg0-+u)`nQtzdtxn?c|F@{`NlYA#o&x^dL35iGkI|Y*QtPm+nV*~)CW7k7&JdpY# z%k}}#>-1bBt;tkCVKqh0i?N&-u%R)u-$W{7j+7@|7m9cqG&2!2L#2<$YpzYu^#)&3 zx1@r}rZ?V_FJLtIHaE0uis*BUl0c{5dlON-w+)dj=JnEEf5}XfmscdNS=O^2`QGk( zUh@ztYUsq`-*aDdzJGVx?KB+XwjYKqiqyXUnt{-@5rR6T(+?|1Hcy^6>CDlmV}>&n7~F3l2@ouA`-G-A5LJS=-c zD>APEA0Cl^vl7?T3T`#Ktsa^LJ>v>9Lg_Z_&)L+D@G04$yOU1ID_TP{D=M7?O3kj2Lgn`v4 z5}BW}{_pQBV0|VvOqon0Of+}AUuIHHrTNf<8Cz<_^YimW|6=rUIN@ho5PF#%Rr6B6;b*$4`*Ib)vNhEa10HNuz&hV%gB!EyB7aT`^f5!G6 zrnLL~%kNPnyy?())tl=vNsnyiHEJ_gVjc-Jt%kMI-O)jg!DuF}{deTbWD4AN}SB!;wsEG#6gi$bTkmUN>;#1!QU1(?|G zY?&X>RIv=)uePQVYCLU>zw^UmHa;-G_65oYqEPg@9z(P2(j6~30ru(hdm})nLMpoMJ zW5L!Vb@XZvBayc_bJWY3grc)n-2(;f0Hb7WeM#wDW_r-K8&ds911XPXe(2JCzIy%m z1c8jEMIG#~yqWohG)UYVf);4JzY!TscO2(?8XO<7l#NovDu5|;a-auu0Ljk%yh};Q z7@hh->YY{^o?blZFm!W>K{!6CJ#uq%H2ufI*IdCsiJwt3u6We~6AtZgxSs|WxTvpN zfgQ6o!C!y%0kdn+oLd}BNdqJmd86}!$p5=TJ=iGkz4lc@kXB10?0iLPOJ)_yqYwZJ z;36XP%oUWgPL$;fv!>mi1!}PY;Ffuf$HPmpPr^TwSr||6&nqF?y1Ke!6O7>?H)PnpT1wUhUH|=Yv*{1apEFnuHN8#aYc~JjI(>%L5+=r zvy_va^P1grM4_-*@cDd0@HhZAfA!pmLg5!#!?(B7GjwIb7E#@O>1OcXMcApUXWY^Y zxFPoa##!PZ3>dITSGy7}^2y2~OkZj^(qB+8FZKyyb0@)_wYB7t^nQkka$s%8=oc&1 z79v{Bx>{DRxCIi2$kwqonegz~!+$NxWM&=G^+wCfD&%$+Vp-bqyqIk=mI81~y9xx^-$+@Xa=SJ6)31Tq%l# z+kTOgTudT&FIz@VQ)89?J6sr0XTjlC2e|lw5>-S-gB#;$#^~9L?3rq8=c5? zq&7KnChab6l;;h{Dq!=&#s8QavEC~)&YI6LUx1wAPs?$=!M4<1X0#6M^g=$f#K1fP8+cl(^Y^~bQ!QvO?;}bK4P(hfnNboF>E`lJ}r<7eRlw6d}eBr!ypQmw$x;m|_Vr#oJlMr(u zgey98Z`Gz%GB0mfP@3aw9Nj%>cXw-mN(%zjb$@k9j*r#RtH46UQQdcWqrU)~O7+as z!1KaKvg&tkARzr$k)dJH9N@ENmWtzTj=zWx9(JbWnRizNuP#o{wtpCSY})33BXDw@ zkM6ALRjl#UnPZDt-={5EH`fX55i-clHy|8%e&Yi8ZO~$o_O-jb!30w=Kv=sboyV-l z%Fn2*Tbv`GPc=j4wd5EfXmcG4=I|EsyYFGZT!a{ba}7Zvi0&+5cIrf2q!-Js>dseL zW5hxQ_iIk(Y>67xpxc zw2zLg>CUQ&pNZZI;dPxbMX0bAn`$63ac0<%AY5Y#+(>)?#*T>*>V~lyfw)3#6!j9c zOB^OA@e*w6Ug?V7QeWGc>0j}tG5vD9bqF#GfOHCYP0NWvRb^zkIL-=vP%}T5cH(aQ zDud9xH;oevt+4rwK|wRXX68VjajTO8ceoI6+#i`cOhn!oui)aiv09n5|7LB^wS7jA zHahyy{Arf7OGwP#IW7`ao5wiQ|ZCwJDgcSG8?SSKL|2 zUI&b#q$H4TZ=qmvD_OEr?}d3lP;NHNt#>-w za12Mr6K1LsR`+!Ng=X|X&)d&!s(_Q+9>-6M5r;=lnzn@qRKH6T>Kll?D6C*jCMm;7L#3mM!E8a|~&)MSYMrJLEbV~1>;j`b4Gfh;lb91?(Qs3B>AZ0zVQan5l zCC0~DiK7DI9|skEdDtvPr{hWP!Sm*~&fU8ro=F~0^a$uA2R&NXdBBa2%*2xMzTZQr zOY%w@N+mSuEy#t4tl=g{Rv4L_;((xvvfR*OlZIK zP5VFlN2TAPgXfPa1y`qctrkda%pQpwk5gdfDAX0=Gb4lXRF`@B)gRHza{${iDz_+iWxRH}Bd8uG--6r(P(o5h+VUD2AwT+V z6<4HYm4f)w(qB9;%6bMd%78up`()u={5zL-_l%q#xCGsnO)qp3k~cSejscAJ+FCO% zHQ{sSVO@)sSllY@$*(1k335_mY*3K!Ic(n`0wWbLiT`8>C@aMd4l15{M=zOh5jzP) z(j(c^3-9|FWjY8|$)=@-oz*JsxmEnLI&! zm|3+^ffOTU6y`%hQS!C<$QKsiDTraZQ@-i)V|IJP2pI6!f#b}G5e0!2e)|RK>+?fo ze0&U(9?txgEg%Ac-kB^z`id0l&+4ka zKriYmZa$xoC^su3m~9BSoDVM>z{*UHJwm&8?C%eB?3WyU9Q9Ay>nN^FI*KFh?ri)$ zZg?O6*_a!X&OPQBf{6?5WwBCbh;}kLqpW^Lg?c^S@{gwo)DHv!*<@j{=~>g?6M>YB zx8;z<3RQ(EMR-j?SrG;j7&P!k^}bHkN0(?+LQ+zNU~dUkg)?XRtiG!R{ox8G1F(|l#u)4ZB z6bcnmQ#S#`QaYCIK_Dqo$K13R_ZNy@1-On<$}4{s3xmbW=l>Q%r9dHb+X9}DUz}zX zySPz69?{Km-2&gh;mVBI*+h;DiH(UxD~t^)r`(r|E#!reWC8i~Sf$e^Nb&jRYFox8 z{T7J-yd=VIx}%74X*5=m5t>#MHR=ZZH=g-1^3WYLxl}IWzVz=p&iF5I+coV}3vVkmK$+(D*mxeLyFiMVgC zE-=ck^u+ebY2xuWEZ*Oo*lW|UxBItaYtWvs`{x*S9Fw4l9)ueE+10j}SHkfF3z2TD z%-&n)?ux?I_-ckF&Jee3#HF8n-qd}P*IOjaaW}HVojLGk!lgW;j1AO^X^n??N34~* z&E+a32G6DdC}SI?!Z2m~IRxj#ul5Tjfopo)0bcmUNt~X z#CxvwyHh6uZmdq9`5>b(HS4zQ1*Du1RI~DZB=})pnH?WM<&@y#C{z#wpbL6ReAt-V zch>_iZUoxh){E!izyM%FPej*qJv1?X^hsug`ekiHEQC$lEP4(1Cd9%*2y~I-A||b| zaSIGk1r$qR;DU;e_8+bZIh-$$O?~@(@LAEd{=Ms3(TmtuZ8*}ShU0Vv`I1FS&E$81Gy(o2Rbsh#ZZz8zFQu;WtsIWsm+TR+=B?jCiIAPZry{A`;Y(V?v3fhICI zL2=R9AzgF2JnC22@`2@sCsww>wAeS_%hRoozcoe z)Zl1@h$u>0=(bTHo)twp-`w1cj?mYWeqys(8eWU2nk3@sO#_52oTd#}oaIt>+!q3f z58%u8TD<_@+b0UvqyIj~hS^BHRvLISO~9)R5YF#y5#F7X;WEToyK;rdtcI3;!5%61N&8Z`FNg&iQA| zafdT_Q3z)D`I)KuogyV!P=*%o8ualih{-r0hy~lBP*qqU-H^jjFzrbC>`33$Se!Ih)}LB=@>-UE=#RL^3O-w(&nhhiJ~6tFD=RCP=A$c< zcIT7G7wEY5QgH~;y@oHmOmOsH%#&coY83grCgg;+Cd2-778s!1lH}zn{1Vyfu3SQk z>~Ono%uD>$q8t=`83Ld>>-hwSd+RnaP`*EeDPM)&%*~~Nt<}x*dV>p2yD9@)TVb(e zjOmZTQsUlYNHCE>Se>uHk8W-f$&B_jW{T6K6s;B{W@I76>kGvju7Q6;QgjSu`ea{5 zkBiR1`8eXk1&j@iz4Kp9m+vGJX^UV=erZ`27YOC$tyq!C$40L(?&%$M$jH=5qcs>k zEj$T2$Gq0(nHEjw|`&M=VyoeSuOXMEqO(?G__B-tp+}3)`*5YpKT!A)E{r-+bsQB9n?Sy zEkgKH{buu#IV;_^doJR_FPh_}!_e_-j-%Bf-pV&;Brc+%@L!r5FgUjF@% zi%YN5gCGJ8KcmRkiEWK|+$m*hIs9W=6SLsKa|Z|GcLik{u8pM~m|L6_O!kxyB83qy`Vi+%?)PEMsu|8VbgJ54U2E-QeO~F?L;##MkwClMMhdT}!!H4C^TOJ3r5k4n(CTk`i_#rx|T;%+L7`K7^>kyd^)+w75TfK3qBNo#Owz;*BQ%ZN{tf zLt||GK;h5cy6Wb$6_4! z;d-}myaADKUz>Ga9OZOF(~&n1skIvEMjLbZGt0#J2VMf*M@rybpxl^;cDbD%^-`^^ zr(Z$jEfxjLy9?ELQ?l-XWQ43X?=nFcZcLSbApfAtyR_7|yfuZKJ&01Un9`W!2l&%+ zrc1%gUMX*odr!TKeJ42l)lNjey|~%V?pEMx~(q0{Bl!Hj&%ve5{sCCWZMdd3+)FmQ#XTsV+l_9_03mYJCm z3U2gk>)_kTPg!qCZS}@iI*&tZVPOGjowuaUd5k;%W2>EaDhc?^F|z9?(XC@Om17UL z98D;#H3>p@w$StI#6Z?xZF*;{jE@sCJER(e~8d{OiR+jP@w%Dze;JE1eu zA%W!9?`^oqx9-(JldjK2=H?(rm9lIUuR<9E-C|0?Cgvy8${Y}#E(*)20$}jSZcHW#UN+)HY0-#^UjAkEAG2XXPPXyV?RDJEn3 zS3-=?&zXdy)##Z7CB@;*xzqi`nw)346}jjtJdS1~uryz$Cs!Ea{TbW$LIf+781zUr zpsizjLzRS_nVXwi(JsltR9_A@`=1`SI-Wi=%{tQnqEc0qgix1k38Y$hgqSdhjx}yCq zYSd*&Q|W0rLc9mW9)z2W6jRTG-NJ@wPcvCt8LSgT*S80}oE)B?J^~6d;S)_%YNWI1 z>U|NX5QQ2O+JrUzdw~TEX0Pn&$0Sbo4L#-%pp^DF^c42~+1=jyv!K}`V~3JgNSB-w z83ZO8kk+w(MGCcR_NU)xxQ(C85+%Pzczuc$%N(II2euaew6fgIo@8)?zT8w3uJUJx ziNA?jqyH(Yt%mFo)P*9(-nRhA5`!G|oy&G78n!R41 zp-p5mx!oNETKyN2pq~uG#8zy5ahXw&x4KKW){__cgyNg4ou*(agzlM)+lZu!ei^(a zHX5At5ACr%@t?N!{Me(0M-MBd1lGnnDYBB_wg7~{vE>e)=LVnF5mhKn&W6OXU7j_>Mpy=`T&gL|yA*tZpsLkoW1y|46T5;`SHI1r6>x z4lnI*%x4lyh?N})tN1f{y)y~>hjj*t0r9sakz!r;g?Td-?K|rgawc4N<_ByC4mO9A z4@#0b>EF38wv>N|6j_Wmr6O{HGjqnKMltGOZ?Jzuw}40;*c zpkkN}e)5fShfbo_&j<-K;^eMqEo)(WWoscq|9*(L<=^#5l#haHqPhuyeMFvvH?$m0sqMNpThA3b`G%7@@u2p-Z+1_qB+tWmz+mKDV-C4Ef5OoqY;E zKK-WAP=h<}-(31!ct4Fz&^0ujP5gX)dN2Spgfsex^E}$;2qY$|9m0x^nf6bPmfU`p ztq7n(2ESiyWfQx_VVXZP+51`H)@L2LXXQ{HDRgxwNQ53J9&HS-NEgq7WtUy~jFG~k z3$7Jbgnv%=_GXHK`iM%YGIL0NUm=SY+KlSYC+EZORk;U|K2wSY^h;zS!)y2d=n@G} z_mVs^yokAHHfJTU3FK`;6ZMK^4xbXAa2-PDk{V)6>|JS@3*&T-$?vFVQNJHFMxitU zYpr>bHag$?6yvJ^J! zp?Sc>U|i`lm|ATGtCt;q9}MTbDEC?dhcALar}$-RUmIrQhSEVIMKYaNWdSgxzKYl6 z9Q$a?=tPI^u^TT||H)FUOX&&dnpCcJW~?4{`d95F?0vQTqEbY>K6d3C1^9r|jDA~% zems_&>Go;HO>YH_?7{5G*Z8Q%#EIRbJ8G}AJxf_ky*743&d^`@DBi~<1Mfw+Zt3Tm zTnHIw)tOM-WX3k^|)a;F9 z_N?_NK|_?ckJ({>P-^BL?u7t09nz;XXbv_^kJ*&&&NHPVCg+fx!BUv+mhZD)0FTF|+7HYZJXS`_a4g^l`anTijY>guL+Hmg zTf0dr!OgvCX`nvX@|bL%Ak!wsOLdkwWHSBp=4any6BrZtNOVUd|W_ zjpp&}2D9@hR>;){ZWcQ!yfT+Th4U_;$}nBewieMKn2z|xYxzi+X;m4!&eN&vit-5I zltB$^=dS?9LP)`@D%yj{q_^bR+dj)J{N@4bWzar88~q2d}n^pjRb zHuIx;DWSv?hsDibl|Zr7%iia7WV*drRYR2}o7%slM=1yN z_vbq*ROGvgAFSu6hn^#aa6ON8b~p)?rb!AF!UEq8449f!>Ki=vun`x})B`wTx|=g{ zWE3&aG;skr_LiSW>d%1N7DMB**iHpB2EvhH=m10;MucmgW|6dJ5=b*=;Sli%#(Ae zWGkYLz@L8kSoOHv?7~mx`~Jxo?i-b1c!Rw*;v0W7i?V_4Q><6Kx%K8|bBQl`!nb{* z)hi!cdI`A*2R-L+I|hqVNHwTT`n{+-Xrj6N-+!9)ZAWgh59iMYK_!VV_@?0kC=n>&!Ec)GiezJx{lQR?fq&Cv$JZhP-*8@l0b(6|hk_lF@LX4-Z2f8dNJy)ifjaHTF| zq&{4rFD1Z&8hzQ=`Ns&HlceAoqvlQBl8Ov3&*#-YUuQ!OHZxU7sx@k_z_$hNx4j&Q zLeS&kyr?H$>Cam19@9ujy2!7)^U^*@wvM;?g=&4 zExuH}xFS;n_!ddNNj6(XPp-a5$mWG!HAcL2u^m=1=vay@53{%x`>rFoqIUieO`KWn zP{+~2$WLrlj@6e)9I1AxA-3XVqX{5z~~jw35KwZ})op58>vi?s6TRNHL{ zpwxVjf0!vR-SAq50+&%TVc|C%6?e{R8KRtEA%=kGTHM89j*<9g#^P3Sph=jq>Dq{E zBh#@v&bjx(Iuz|`3xLUo0? zd(Uel16^;Zk78=+&FX5Pt_`$RQAH=OmeC;xmFgCO~}tAgZUbd6OtLwCi#r z4yQY;e!F!cgyKyLNN)-%yBOXx-4g5)X_8v2r+ZwtvgFuYM}2X#Mj(#riIlVlzjHIa z6sl?GaIQ#IOnE%#?%&PLfs$kI)cj$RL=?EoL7`iw&EMX-;6&)RRg~pgqRtRG8hN?u zs*o&WqUlW1&iVs?_7j|(m!W3--G)`DyMvr!zxa&j7?JvJ!*ivkZxF_6UW5`D4|(haifu{g^mP@0Gn9cFC2 zOWqp-z19t;*{&JrPw7c!6mGW+?~xFUwso|`8_(>;7g4+k_sVpfY zu zRe;mbQw07&H+!B=ujV^Zy~rW9n7bLd^Y`x?_y5Jod?>A_WFE}LlN{mWK8Y8 zxw7*%X>Mcg)r>VHsa0udVD$KElQ3zOOBd6rlM$@Rj{Rm`Sa`#O;AkBzbj8MsO@LWt zo+S&Q-JtLOQ%w5LDhummhyW$(Jq6s-OJQATEgyB>UO;W{QJy+71YGTrN2R3|?gSQQ ztlv0!vJ(sjbxbpsGZq1v)LB>hh#crrIw|tMBFjetd!driD(h2?#P;!l1fDg`ygiO@ zV}wZ3UB~c?c0NBGR9q`vT`}*RR;xdwP=i_3RW#r4i#5YsPlp;Y?^0NcA6U`NQc~{J zQGDOY_fs>Bodq@lXKfLyV^zc>XOtdlkjw2D7#w(s%T-KR9ad5;DVTYX)RD1N;r(Tv zvQ8(?_>b19cpNq#EgebLk&<@=kACkn-z?{?CAWW0j!W3wT@qX)QD+W^qA_Tk(A>gb zU5~}lo$mnV3pB3u5jzz2^ow?GZGQOl8q*HHKWbjH;NHBpxuuf7R`vtj4NkM{ZpH4F z3*jM-jSjAEOQwy)$@GN?osx>Hk>~tNwkdWBUXg)fMI7wfFY4WzHs9QBI0KC4cRnRb z&CSj&Yi&G@Cf~f#=|m;mr3XQ^;E1Y0Q(RhFbhsAPkb?X&aXs@?`y?A!yE3kP$jbKc z-c%;tXnb4=iw-Ca%@p8}5-`L9J|bQPSOD$pDDPM7r7#-$n;feAE@!866PvU0-f+-Q>I_N5=`AVq6{ z+k#q?w4!XH8bS|9<3zDpZ0tBH+toNxyAm6%$<^5jGi4efujS_JTe{^|)y za^~;cUsMPC}J$)S5RS3zp!bp)Rmq-H}dk9I%j_hLs!1P zDE+D!1k4645Rvsg8!qH!hJD!(boI93S&>0g0JsM-Rye7Qy;E@#cB(A**15C*-NdA( z@nO}I1BQnr&s3EN_GAJnvyyZ@M6=Lic%FFS> z6*rOTZ@~YsHB==1?wKhF6dk!TPW&*y)XvUo+nbDVUU}1Uv|4#22RjODkE^EBNCz^w zV_OviYI+kGY?uW zWnbcGKKA;if7i6B{PoBAUPkqAdKLZ&D~)t9HFnsdcCVh%HX&|TcTiMVR5VV&4H)&b zP2@=%oTu(+)A{cXOu)t9hy^bNUj>rC&*DV!Xskc3(N($rf>h=+Fpp`mtA{%%4XFf? zWKRV{jPbG=>-pjDE&)PsAy~j3W;ZIp1WR(<5tw5_J3H=I-b8_Qx*(=PHCEuxo5mK% zlxJy|Qd-q@)FJUGE}$MC<|TML!fYn*mdppNJEJWKb2ZTI20y!q12OQ@`S9$^1;06I z1m#FoOps1v)u2iv`cZ;pc)_z%W>OB;4*yU*YD51GcpR4c`c!YO)L%Fr%zNpD z_!bw8KhH3p>K7Z;9&d=Gx3n@M^@826vZMN6m1~2%l{Oa!MhC9{uYhzmTew?vq|W~Z zGpoMHNbL<$mC)YGCRI1O6A{;X$mlqXmF&@K_DDP`r>?cf#%%rC+8v#UmuK?|{Tvy{ zXgd#`F4^>t+62Vvgi@$GU&|#kKgF%}3FpWO%8iQF&Sfd8vxiCDd*bCT>IZ#lXZU;a zBqSY$yHyo+2j_f;n!79!^|)E?HtIKSYeJXp0{HOJ68!9`vD&)f5=}*}hJY6E=J~~8 zd~S4(6T}<=$|te2sW+YuLwxe!CG4L}u#&XwF;}|d86W{D)=gfL_p$*xt>C_{OPd6z zaYPnOQ0?bUAipcvuM;V7SIb07^6Ag)Y$YEw89&{~#vA~_P~UKk4S^yeKkZ5o^+hd~ zY=1(PM)MVGYA2>T6_HbMD2R+YS?PAm^Yd7&xEr(rpC+DiyC@|qFWMwf)7udrNIzVZ zmKrJG7<}oWTjCa@;B+plY`l6)S%<_{#U3NU$xeTNu)kqac`05#Z2q6$A zXqoE6>N3cA`oZg>4Tr$hK$v#dgoHZYnk0@q;iS!W^&OiUO#a8sj7Ya}Pm;1%Omy_H z9J_aE!v$(C1e+^a<74@z&f=poH9W{#4JdRrG>(qmG&cFH1MgN-WkwwjbtDlD2)|T@ zkJXen<2CpoqIA?eJDhvQJ%K1+!*Uvp1*M z;+lt5!Ucl^CWKriW0p_>M({FWt~mQY>vpDy9^F649FZ~7q$sxLr;J>~>-1i}{oz*gAonA}k%WvPs+`*qu)eiaNC4m|{ z5DoWb=UoQ_o=>qnz@VZhuaBXjnMff=wh{1aHn-cHBY;m#%K@wn<&}{a1tZ@rpew-0 z{4$srA6q9>Rmm>cIx%&f=B!|Bj;bzvAtJ-)?UOn=xw+!+{2GWDoiH5P~jA`h4TsGR;}(2^dq4!TMNedz7l7Tv0MAz5NS;~ z=k*AMQXcAJOTF@Obz|d0{5byd0~rf9V%%eoZ^VU3n)id_nUaz`6=|Ig{!b0pT>~eT zbeAHIc9WV^6k`sJ2X>wgw9W_I+I@WXWx1fRgvE3@V|@Z z{ZBzvxM)XBx5|!}u6P@Z5rti+pNC14d0C&{=HH(-$jziwJO0+xeYyQ>*2Dt(PFd5p z-vSHnNIKSN=qUv||4VuJK4m*ezmm&B?o=KUY}P+$+$W|}@cie`z{jqa;gVN4qft#F zvqr{)FR@HQ)Bj*FPWd83M`0}#Up9~zB7Ph;zUGQo&iQ|z7#OWlrZ+xR=a{v&dAmH~ z)mr@KilRySjCP8ZOjnZ@zX2fg`??@cd+YAcu_3Ug@+ni4#l5mjFHai_u`CW%^~A&X z_z7z_1rGc6g0l|nlAUrEQgx@j*&xAoYen{>ehmU;5L4ua-8E7TOBC2 zfBB(joc5Hp?`;7$EhH;ML&aBDGPm`MhRysdQiAi1@4EpUo!tC#U{debxch8itd6`| zIHsRl(MMsWoS4V4(dx$j^f#xfyo~YJ@;Q|QI!Vj8FSSrd!QK5y^QKjWd7muyRRHgE zKTRcpy%Ei9V{_64Wy!97^gWPp)fI1 zl=aS{u|&VJIj9j^fZUpP#b4#L`1&YlLKb1mGf|6hQAV} z)9OW^`!e6j|L+AD%$zSs`;yT9nKAi7$p)M4{k#j1p z-nMmXt(4+!!I+YOB?PRk?hZNSU2hu+-rXILaGAH4=jV2PHr30^90pUFcKb025(#Dj z7DQkUB&9@S8p;k+D&e?pZ8@*=wfcq`%6{s*VZ24X5Pmi5z#}lg5g9R%@rG6t=9DD~ z=M;p2K;m}emm-EB-g@|MUokZZ3_!P^j6}CS;#W2?Iudv|f+O7eApt-D2sbu2U)*WEzi~BFsLPfHk;L<+!#KYC=3PPYRnO;h@46qS z-7qFJA5+gJ!&55bX})rqTp-8Lv{hU0#`5~xSC4TA>Mx%^|M1tJ&fCl3^^SAORI0P# zx!RYf7tS;+^~>M?!{5%QW<9?9!?zzkeW9jIR{$wjsuWKpEvJL zh+w#FtGj8q#gsC7Y^CfDht%5Bwsj)RLrEzCZWhKeAp<8Hrd)RP@MhZY(ln-}`E^mS z0-i6Y_387cAK(9QU7x0Duo~)t>Oe+_#5{~6vs}$0Dk6!9yL$U3I;s!`^2LwR^ia_AN#9T@i&T1BH?2PCD$$FrOfx0_0V__i#pa66W zwQ4~xrzItsrb8KqQc5Y~kaH4d09CEqN{HT3C`m4o(v6NA;DN#90cf|jue+*Rv*YoY zPzIcs`)X=P%{?3>M@bJ)=>o%V*)@suVT-Mf?-nMI`6wHH262_d%;dmIQfgG*2{cgJdhd;c0 z`mfu%^=?RTt*)l-c?aG zx(nrL43Qv2z;PVz_Xq24{l@G-1ZcqlQPmJp5=bIXbdU-5EI1$tQDR^P_fS9tL`Tdr zl)aJbv= zr=6pKb+{#o!I6m|QINLYE!+YU-{x?F!a}>=7I!cRTooY9jl@XNHO6tmh-F!N>-*#J z#v*gGlt_fkBfPs)fQrc9{QmpjeET*R+D*gbo8P?shTi}A=lf$CQ~F>3<)1iw-WC;* z>AO!K^zJwc?mqmnmBN&rSW_AyWZRl`BW5BN7UslMQ_fP-n2Oa}x3z99Z7mS%TG72} zcyr>!oJ+o$%MKVwND+wCy4CI4s&4b3N^?QrqQp9uD_z z_~py#>G^qU+PC2FfRKcsUYBjHTSP!fx9|uvSx68-z)%WHn*HllGD3Q_^V6#BnpoZ~elH|SQikNOoD3~^%;d_BDYi4cH^ zIx-LdCE$!H6EO;S0I()DbvJi#zZqGCXaQknU{t#j)2*AqOw5FV0Nz2FFaiPz*zs%5 zCOn9V0YJOP6TA#toLob zM9c{Qv@>{?(mDv3a|Z#9ZSDqDTdf}Fu2|GwddjjpirQ{nfH=Exx7sxVgF#Zsz?gCdq?hM&>wOpoBHZfQdw0W}3NTo} z^Rk#)Df51_lxb?Mp-5Wy=V5c4!8Z*W`lL8O$)LkS=yLYC&zT!8{1 zH~|FbocD)uf1J{oQc7m7U4c*%yBioFA(%NhI+Bo3%0!t62~>la1ULc#0~rD!EF3~4 zB+*a`3kSe$#!p)diyqdcms~`05gvz<{a*E(?Uanr^G*R37eMfB*JdGWqg!SuP7j2UINJ0YGnU ztBnJ1s7XLcQufpCbX`$kv5m*OOYb*5nQke;&!0cPyqwLf ztDE~&#@%kpDOn^hgc=cufWiWZbQ|3=M?xeB@G_J#4(qzQXOUcnP^-PEyE_mg4T-m{ zy_~;@+(uDlzaJH2o|o$~2TmoAIf*+4G-7MZ^2_^A|M1W6z;wPmeSZ3Qe)-WAL>}5Q z{!jn$e}1?-{NMliKfSc4zy0l7bQ|&`rLt{59q;d^g9KG{*{(L9uBX!(DG{gLZkI)f zIp@;1KIEKBArf`8=2Qv>r(iX0t){|2?(S-qfWmJ{TkVaOk1aG*Z5ZX--`u_X=Kjsw zSEUThiC}vA1-(8zJUsmQhriZM7d_SHLO`CgjFg*jLWAhOuIu$WtLjh&yfH5V&E3qY zQveQS9CyuKJ8gou#8AcDUwpnx*E>jL|*0!x; z>u!p~nJBRgh~&4}Ox%`G;x@8GfZ+6N=mm&woe*XrN=yV1;Z8SjqJM=bB79wCq8{cJ z+qMA-VMwBbi?A?pI0SllzwJ8(IA8#RsRpzBnqrldlB66ES>$ei9Cn!`WRNNEQW5}| zFN=4F+;H-gv2ZS$fF;*wEm`}*^z3h^ZxdEnFMpB!v%*FXOG z&s(iuUe1@fZEFuFX4c*#^aWg`tIk8Q{cdNyFQ=!uUO5s0r6fr>O@#vHYrW31Iz!|D zV#=-i^ZBB!Z>>wtn>HY1p2lI??;lFZ;LXh(!7RQ!J-K<3B3xcQz9!6W)|!zhCmMIV z{eJ3N&*zgnlyO8lPq}#bR@dI^Zn#Tn2mnfMh}%{jscT=BYCRmo+%&B9-g=9$oU__3 zQx!MlErJsh5(7jT1{RPJ90ap3Or;E2vbjfh5ji{@@{j>xsmtl*G@s@O8^+|see2P0 zqJ1Dx1bOdfW@-?is_N!$M0oSA0>dI45QtJq!!8eHK*F}RK#Pb0xpxl-a{vz(@$go= z^{v-NL?Qx#7NKU%>aDo}0K{B|B1FCS4cfZYx;CvUQ1~c>913+^FMtbgiL+bucEd0B z!0?W1H0EXxZ5%RtzVg-Gz(bNOwXJ=`jPDRN0Yi+4|4_QQdTjN?SW$z#24%Y3cZrC!#@ zH?PB8gtI`9^s1ZJ(62K=oIueeaR8X>Jh$B_Su+HPV=^-UYKmL)VL#s8kIl?N zx7uz|Lo*A|GG-}oxjz5=!4Q2Mw{>~0b4?^4`j4^d+sE%t+snuKr=_pwXM6Mbdx!CQ zy}tGX6PIy!J=N=Zndj|tnmJ{n(>Rutgh+@90MtFD#FBbdQy>W@Nla)4yP=ewq=F#` z;1TAk-Yo*UwQ9mVBzZXQzx(#>@Bi@Kt5^G+hHY)V!QJ@nt5*+qyJHak@rR$EFZI*M zQ(}Je@L;VnC!vhtFW2XJo>eWvk%E{+QddPl2<$y5XgBVMk;|KJe){oO%W8dP6KkG~5rWNS_i%(T zKyU*h4`84hGKz^9gXv}s-d=GW9_ns@1_-7%wiF-`fe2RZO|4!oyQv@vi`@P#xPw7B zf`tQ;L4Z3l284y9Be8tVkvjBn24E;0zG}CK&`9I`fm3Y0)a!h_f85`XMN;1JWqZD? z=ViHmIem$MyZk^DbE{l++Kp>Bh^D>w*j`@N>#{gxqEM>>uHLWftmsJN%gb}E@#^lI zaJ|mw8R479Z$5td^rt`lSbh8a{PODXxIWf_M?x5;e7ht`a);XLOKWEgHQ?qQkfwdv z-;Z~9(~zbjXL<$3vU+rLBPw)twHD5gdTXw+A^tU7<`{)V7m(0MwPagsdZ;rOtBd#J64 z;eP+{+i(7^Y5d#2{`XJ6T)%yNxE}_fElYWPcvs4BdO3UAO}pK;)tA%fr>7_D+u^vU zfLiOeY`t}D8U{e2&58Yby$~hd6)HqXO)D~lTTYzE(xCw(a$=Tst9t2ez3SG)$*XU3 zt@|2&1AiEk07W8dpuIOH(6aHEEUee{yso_gOP$6EJW|PBD_II$9U|07!OSk_D<__& z>2Tcdr=3W8d478T)6d%D@!@srwOU7}>98M5Aw;ui{RI*8P>2YbyV~`-G{gOLG^QJ2 zJrFHiT>;4>2%+{(k_=Pc?T6YYOm!I9oNGh40)TX7CNe-XKpfQSSau5DR7C0i^{c9V zKA+aD3Th%nKvPnz%QFA@kMmD|1pxg1`}^I2t=7&FsQk_n8&k_%7* zBQ%VT0cf|&pqro*U{gDt&cD1*;P_(^g48x`w!M9K_wJilln@Z#ynR#l@$_`s)?PQ& z-Vuy&jjrpqm@5mVG^COllMtg&K!n3>j;K(%o5q;RJkKAl&&zpfb7RlUj!3yg9w#70 z;goXgc0N6=Thp#VPATpl57T%cOn!p|BTHfuLZ*Pguji9`e4Prpb#1=xyax~_7LH&7Wcu1K z4C;k<9@)ZHU8l51q`CIfe3`8c_u1g`vi(#nY1}^?_Q%8P_doyj!>14CcD|nPj`wd~ zfA{9~n-8B}j{Ez%Zn|x|MCa3&d0F0n`edOIj2OTX;;YvlmO`NV^QT{?yuLpC_0#(g zA3uG%yFc1Lh6kI|JnK^Dj~_0de|bJXFZ&|DdHeQ!zHZC;yYK#gqTZwDvMfu_TV}gs z?)_Z2jLh=Bs~wv%04b1=1VIjhMjQzI2bu`_MFKRFprN1v8%+>m6KIib$Y!yN-C0@b zBQhdBcg-=|thF>aQSA90U7T~aS?~Ki7vr<*-pQ~3o&SsDEnR#>9?ey8D>xnZ-BsT^ zaWAj?%W(INd-Is@j1;LpJlvXiZFKiYY3PUDMJZ5EkAoj`6YrzQ`f$Fhrln>9F)&m` zN8&*fTz~Xz-@TM_I-MuUw$76Lm_dB<$R<=vEz2^cl*Wtwe6E0nqN>Z0eeXtHZ4Gr= zstIw57P+XLr<#~uKctdbLS)t$=& z0Io|hqpAYyno>$B_91|3^Q zNcPne(}wHIwP4#ECA^$7K%36_Jgv1FvO^zY3a$^XGdC|Ptmaf5SBF}Z-WQ61iHHf= zu)2*I#ik7$v6+@4NW{VU;K^g+ghZNfS=S;WrnN2SyaeaaG^T*Ui0B=eF*yQoNDKf9 z1je>j1fviHw#moY_)OBlawdB)sR-Ix@xfB3EWJHH%ByO4IaJ-jF zfPu-2Zs=onNFk-X%&pYve#vWjcXxkzv)k>HBioO|_-uIo{Ar(JALGS^bAAl&`=^(e zzxR_L{?3oTcaqcp@xS%Qc}`V_i_44an{{0t9v+ru@`x^kyyRtCLJWB=rsz1-R;_9- zf>5O?IshO7MzW2?NzVGQ55doVndjWJTD5te&gWCd3?kd#q(#=#8j>5w{&c)WhUh#- zuJsHCB8{mJ-Xl4S7NRpi^r!?1T1yeprUf0|Jh^s6WhoE$kEhe@J8uZ=*)b(hk(SqG zJr@&DFlBR|m%R2Vjk_^7d6-VnR1lb56*LduRC$nE3(A)nIVhoSGrPBLjGsmAC8 z;OWif55D*8_1CXH`}C{h!`;<&3~4wY9{<<><$t#M`TzO9|IwfP@xQT@fBZY&`mLY- zwCb#c;Uu&Jtix^C&wUy>w-0!$HsZC05TSlWbt+k>eAjR1EA`Ji#x-pGI53MSq z5x0gliwY<(IEM(%QRt}aP>s&hd|YyU^{xnx!{ylZk+1uq-aOmKetbNhkm~qXK7-|0 zFRx7+i)(@b_w&*4Y5dmn@BipqJjD3frympVpFO+Y4e9^=XMgtf>-!Kw&Z;76jhp7R zEK7DFpuyVK$J4a@BCERNG+$p`-#y&==&qjZFP`s0O!NIob330;58s>)Zy(2DWVn3y z_HajKIv2Ebee?7rqOz9O=FD?xLaEYt`*2tfZ-OIE&IyOEOR0yd({c52Acay3nG0^H z^Vw?7^LjcwWN9vj{eCP9Jcf12fCgRaIcQ9{>(a$#*Bb4{UKV}e^YJi`7rTBpju%Nl zACHIQkqNL=H4&deKXynKkjr5%>+HS1N#j%mjKGu#$37*`BCQlSolg%B57RWKPP;CL z$RYBYvkMS>T52t=E=$3?61~&XKyaFKk#;_x0Z~+442Wnd>#}%H{Wv(@NJA98Bcc_g zwjxrQK|!UpHUCDqid?jfy(1$I!HLxnWVj%+p=h@7pRGH?rwuS&? zrPQ*P*!9jQzqUnkZKX=1Rw4M!B<2u#3pQ_aa%N=2rgiJAsVOw5wMc2&DWhX&mNqk9S9;EpqO1%ja1Ejfi3lR;kp6 z2qDGLhtleKo%g8+2TI63x|kGziI5rDNi`8s4(I@l&8i`GeTT6_BEy_I-|zd>dDw82 z4nqi+mp6{b$9s7=yf#i4g{{t~VG>GwYx zyX)7V-QB)9v2)U(&T_n+Keboy-oAafF976&?OohI`QVTL#h-oW+0Q=u_TG0tyFK3h zSO3|6b9H$!rroff7@4DTJ^a#MA{G_Im<4&ZlKMk}nXSUF@HMwXPp- zLO*8_D=34*>CTqv3}`@+Iv?2i6gwA_DX1X=u{z0%88VVpd-L_XbvnNM@S2FEA~py> z9s!^tF?)?ExWN%pEh2)T%0TG7cZh5b$f`=^npp)yh{HH+K2()fS`!gR1R%d5&KQt^ zZUYqqHAn2ot28hGLr2cJV67@J7?~*%BKhqSd6Ri;(~yAPcd_f!IQA(f&khXD;5SNd zXYV63O~4!y@U|Z7c1P-B3W6Tln9+q89g~wzeG2^Y$+Pc$_sKrG+q-W*`Q)?v*ArH` z=!Pd(H{ZOwbJzjf!~NH{Z!ex+Ui3C#p3X~-6r?#HJMVdm)vGqOmP?zacK_h!+(Pt3 zz(LRJzYl|pst8@=s}G;=)BeTH^Y!ugtH1i2Z{ECZS|5%kb$RpV?vto<4jhX+ofnZ~Z`FJcpUxVtm6)@lrHnwQtFUbkEXluLEQ9vKjAd-Dy<%%fv= zWQf~`jVyRqYGVgJun&yerrC@LLkP9$HfJI7H5czoQPx(6F@XBk0F6`ZOIeAm)tpOo z>{1G0*n^wvTC+7~^yb0ThrkZJ_e8vz#)&xf-7xmK%uC6eDZJ}@D5W)1XID4Zv5(f8 zWA=fYDMiN+$ot?N5e4sv`FvUtX$u1)f+-_5A42a^*QL@XA1Mu?NWi0`PoKow%d0QHcs-qu z2+;L0`3~8cVnf3Q(I;tItDxgRzEyqw>h)J&P7n7_uCH&_qo};!Z~+np@SbBAVwV)8 z^&A7SI>*~7d73gHc3rr>x@@&B=cUX=r8W`I9)6=IB*g7_XzwgO+X?^x zAOJ~3K~$BaNZSn+AeaJd={jyEs9kvT?lSV;7T(y zGJ{R?W2R=bMI-C#|hZ_A40j<*@(3w|?@Smrp4iKg{*n$tL@55E7wmtTGJ*I)cfY`(r8h54AL zQ&|V?z+sF5K(w6m@z#yqx1a9YRXCm=PRofsh&HU^99@jv)y2M)S`Elq*M;!xno50p zw=SREnGFW0r5zsDGK=FL0IW4O4GhfY87foN8prfB^*(r1+Kt#fHjN^YfAw6<*xnHUWipf-W6h-)q|kXHev&CmlZP0c{0Wm}n<*_%jd zwbcdy-V-|_X6GCr8N7cL6|4y{6MzyLlLxp&s%A~lktf3{kQIve-R|bn`Q`rIa=5SG zyowjsaTv#&r%yh7_Pv)E-!4bHKfYF#ejI%2)+Oh)mSr_EVzWg($_s#Xej>QoV^HAH2m?(NqH(W7&0z)kZiz+`@4T0?W@DL5CJL<3U?ZWzX^ zopY{Kd3-#sc`>Ih1>i6r7FwF_&?SiEkWq63ML<=l4t*E7NjbMNmlc8|Z~@~u?0oQt z!^3HwlkZ4@0=R$zpsJ3XAcoL4Vae^u{^IKD#?ZB9rdD!GL&{mfAq50b*Tl67iYS<6 zMN(#;b6M+I?vLK}%Nau&YQfvv91;SR6l&-EU>CMnxrh6^gEo9}^Yq2_2mkp${CmIp z`WN5(*`+goedUhtR#{f4)~wcb8M#lbn zd~osQXCMF8&wjq&*(aZU9C}}^4A_^osd0?K*cP1|FPBf#u}C#vIrV`ck|?8yaqPMK1!34&$%s3JoZB$_g&w!rzM}z zSVU@W0QltUV(50`&C}~~xfr@nKmYaP@ezZ@fgOisDz>a4_N}z4Ee3~(o&iKzd|CNd zfAy8Y7yWK@&bibfb4tO?TBdr|i+-093(D>JQKWLG(3j8|d-fhq%lX~i9jGe8)zdKz z9wi+gj(IH-RI;f2%SrYLdGK2ngSs>2+rege2Yz?iJG7a z0+0_bMNBCTF$CvKwJ8w-J7*2FDNs{YCJ>V?TC<@oaa#+K+-aUUnTO7>R9<#t-}jy) zyuQ8v`pa+HTK{bbwH!<3+IV{RfblGK9e2KJi-Q|)_H$i5@%`y|Sm#QxltyU~Xwy^> z9Fg5e8^*{hFd;J;=ELc9clYkY51$X0y*=9E z=i9s2UmvPuI%L^By~x>Ia8=hngndVQUoT_+{)^|>KmYn&|LV=F$FJV5`B9~4t}Gsu z-rRo!=tBx=_w=LdmoKj_0e|}(Fe|XNj==(-!nx~8A1XGM07gdNd@PbjR^etuo{@}l9sBq8RBj?Y+_82 zYE^>wL*IKBKrv&Mk%1Es6WzW$u`z|mxoBLskD>PFzQ#9I|Mei|yYARZx0h8Ls zK~)5UT54YMI%fpTIUf#(5Q0lCgg^{=&ZP;8TCRDn>$=t=X5<|*G-kh{@Kvly+a#TY z?3h4h)556AwlP#yQ80r|83g21m6>7+B2~@2bATYN)+(w1<~SMv1&?Te+~ipGLcc1_2!13^Vzkm7SI$d7AaASHr+}35q-RK;7 z@9LDN^Fju~q6S@v%E$^*zy!e&lJAFL(AF}yjYdS|h@F9|kz#EoYHMz#)zo)l>O*8_ z2<8Bk5*H<7Kxit!=tGKqr%2>{aIR~qSx$o=`!4$U`1shWogR;ZOW*AsIFA9$MB5g+ zL8KYKIAN=6^K5~d%Q>cP(-#xs& zf9*pkDiIBwk(eX%hHI(EJUP~wLT+U(lUP~HbaVY8#YBv?S>Jgv5v*PCfzsi1xqEvz z-gq{*K3`s5eY6{f4pVga-S57*e|`7r>(AePnOmtTbSA`1^Lo0t9Il>!YpUn&%AP*` z9&u>;aR2byGu{1F`RtRg{-=NZ-*(;fi+}yu_kOVa^5eh$qd)p%B0!{NIUSF)1Ll;9 z7_fA`>xORVdr@trfdC^_$+grF0<+r>sYnKC)A>BjlMjP7Tj#170vmZ}gsR94=y2>i z$I){vxvqqW>Kv<)2?CMS=6r~$10i+NTFq;D_x842?47D=i(TBoE{@%hMya|iS*%UR zbLd7O+wzsx)CfQffn4ae7E5i?s;U83L&PTS;chm$ySVJjlBF3DQVb!nI#Fxd&=We( z1Z>;%0*RQInSfQn1V9xCK>>}ni&_ITG-LwD4jjDaa1+8Nr2;V%Q8PhfL=+WmW{L>> zUh72Uh1pb?lF*oJvDQ^WU&$E}kA`2INRd`y0zqEM`4%b$Gp=|$>Ps8y|~ zw}<;?())0G{|2~vj_X?6p(-jnE4i$Ttn+nU){~vTma|r<&Rk^R<^G%Xc+8+Kh7Q>Q zsCU@)!+0@{A%L_#_8&aG>2_%;xsbJYxAQcCXvvE;BXlbIa5&xHA70&k_Wd7xH|#s> zaJCh!A_cK#HJe6N6_gO%&E*YJkXV&2wI+Jk_#_Rg2<0yzX8t-STjp*IMUR$O>RnVssgK1z7q$h22FrUd9-5 znMffd^pQilLfEY9o*_(qE_pd0j~;!^ShH+4qiJ52X=!E%NMi_r9XnIWEvrb1U6-6g zN5Ba(Z^C!M)~J*sS%8$iM{;0Ta>1>6Z-fqGjD%oPYiWk0SWp{+ILCb-x-pHE#!Fvo zbY-pjKsF z@|mY;I+t4WQVH2b4opNa*L*roXE_H5CAV5s6;zGE%(g}?8iFyJpdlh4@}_qI*j`Wo zz>Rqb&M_D&AUhs>8pk1|;JursX`W}q)>_VaTL}pf0G&xgBC{G(z|Jjsp|x_t559f# z;rBmuk#n9rxm`b`7;0l^02v#QcNjEaddk2%+J|9oZJ2ia;s|rpjaCp<_EK~qw#6K|NFoH zcXo0A#jn5q(|`UiPIiw(V0u1GpMCPhy7oi5T<1n1wRQPB|H;pvzZ}1O^EGu@)$R8e zZ{9r|PsfXW|Dzv$hmhXBdCYSq>i+a!{?#x3{lCkN@|sV_Ltaze0f0chTP zX;v#Bl8S_o_%ik}ic!t2wx-SQ%7a5&YUzd{?S`(4U_74}B=RY84AOuIk+7E1#A>Z; zS&FPkhK@I+i-Ho6prx+!#HG~3ajIIXF2Ja3b6uC>%smpP}< zF?*saqOFN4apU*cMN>pFfX$(Z0M0oxYptOH;QLb`*n9Tq`(@HhR5ogenlS?r84+$8 zF!WnO@4eg5@V)ntU{hisf{1{<2fG7+E7nZOr>^S_JU8@N-RbcFo&>CcEQi^HJx-6J z&5`pQS`p`6KlUl0s(kh7C#ux<7Yu$JFJlS}sH!wx2lp1BfKU@LZ{CNPf{#32Bq=S% zba{O}jQy0)t1idIsr7wIfjO^>Dil#AE!<8^{p|Bkhw;h>|ItSu^}&~e{_@r5r_+43 z?+ih}q_w-#6z^#*?cHH%8P?LIRAwJS$oaICtc?&tt9(AMQ>(yqI2}Iw`paS1i%}Z7 zu^S}fINYJ$ipf?jRg+qBE}1g<6yCgf8_8de*Pg?a*SV}ks`s($lK0#te;(ZQcrG>j zy^o1c=kt25$AeVBH0*^NBLQY|Rp5lMgw(neQ1j{dSjv2HISzeSN-LUM1ILM-+cGaP zh1vq6Z?Jh;01-|PkH_ZZ4G96au|eGNfQ$$#RkVT;ipcRi-9J1mb4jin z!+^%acyW5TcOC%%a$NEF=mIh(bIv502v5WQ2>Ots~QDEL<0g-*_!GATa<|9zk7G8Rgf$3`Qj=PVp&UGRs>i}<={-fhvXbb=eLt`aF|P3mfUg? ziA=8Rx_;@S zH84;WCEShsD(cqK>Zz0lUQE@@kN{!xjDvt0LmYevL-1U(2#O;}DGgmDg+uVihcjas zE<<##Sj)OLEVqxhCwud=-~P$X^C$aZf0Ol>zx;TunLG^_WAIS`ux*zJKK=O9FDaYq)y3sh^-kZY)DWX&zacGwDD?5muReSGD7m$7|LDoOSS`|;ma0#m zKks%u6npjcyW`{etIuz5?@#KacD0sPaxt)eh|akXY1}2}oqeC2})cKysvGa80re|33z)gwtM8HEs88<(}Tqt5escY8mkq2!9ds#;2A z^+>>mh_F$r3{1B@>;`(=L>7N5onU}Ugn;iMlxC`CP1>g5MdR&x+C<-z85V}4WWT=%1HZZ5I?Dy5(o0fb)@D9>+J~)r9!tJ}qXz>XM+oWqd3h%U8N6>IFs;)KzZBIzcn1Agfx2d@Q-vHB`n>il*4J ziyKqBiKwb`j+uarnH~GqR74bojg8F2L?#p}82{ z?sDjcQd|1I1JhP90=2ME!1s6OZ5oyIzm*5xyykh_pu|CC%t6ksq zufKjg<(>$^4ADhzprWPJ zf>68#4&ItEAiu{tZvQ8m0kd}jOqnCG_g&v<1FagpPr-R+V)V@2(B)aA%JJb4_UYxz z=Psprx%m8x&!4=wdhyZIAN}A*m*eFpzxw>!-+sy5{rZ!yZeP7+Lr_E_s|E_pE+&uI zLI6NIO-pTdef7aI$(LWe`q7WRwR>>`ZhCb)4SVtIqzzA=bbs)>zcW7J|L-sU_iDrR2Kckk$x@HPpJU=XyHiwklH?f=6VuEpAOj$Vez41?JOod^n$`bL?FgkdaA1 z4WzWw;}O+b(ewFyKAi}ll{%EljE{%oc{x|B>>U|7aH!Yq9hfOpj z-0q!V!@h|s03-gbv&jMCQXiLMssGW7XX*4%mm_QA^Eyp;57YE`oDa-Fn~SlVveg1Y z4#^6Xz}yyM6a~>zs4!F>-M**L8n6Jbilg^7*s3Z|}eS z>h*-`ojtCz!a7`a>-qMJFTQyF?sgn6y-$6A@xikfLmZHCx}3{0efiZVtZao!rX>rb z5D&R7Lb;q3-?dA;kYenE+!jKWCdb3!e6G{%tFTP6M(8X@4<$U-0 zD!JJhsy~QU?~u?~%~{2QsdLWF^K_o)-NjAc@BDt;?fa(Ocm36xAK%?dUTdCC4{zsr ziT%EjGlbpHvvX@!)p|ZXfI2O~D331P?7NgewM5@Y_4a(Csi{vLeFN)+@@*HS*$>e4FI$m5k)_Ec0|jN_A;_523VoZ9m$7JE?~TDPd>cd zfAjY3`Q0}U$CJcFm(PcbD>_ZzeDQ^YVr!`&+Sa-#d1t|K=we9GM6ETcT3a>KLsr&5n+tS4GmW{_vacT(CM5I~+tIctACJ7nmG)28AYTS(UsRY1LUljO_Mt3d2Pjuby56H_F5L=tH}C8m_M{&9Lvd zzlJ~mn}1oSd6{N#-uKC&zdb)Zp5|$mmW^FNBs1_nlv+w@H!p5}{@cI1EK|`#msq6q zJ$?7PKaCxi^YqQDSI@3)e*EJfe*1e*=c51S-~RQB>FMi#f18Gwy5u~ALo=}k)3Tf& z&-1jb>*~F~7*0*$?Opbv8@l9Tr?o1mn5&KFv*m(P#MIb(Vy?NNBSNgTGP%dw`;pzU zmeZW?AMXHl9OKpXH5(nn>3m-AZXb^ihqrIuIOn=9h3J{Rn#@x+7Byh!i5)wNE|RiV zML{xS1Zi4w78UQ=1s^(x&d8x!bBN#o$dIU%3|7EZ%&$21nTP+O}lbkV70Ya3uq%uH3m0Kjc5Q&lyqt-02id5asE zfwU&7|G`|i3E-G{+c$V05P6Hm{N@K>Qo$6#n3)I}uu29(5kcC_mEhh>-dJ*--n@Oi z8*TtW0M65VJRXmS)zFW_4PwY^%d3d1!H01e2;n@PJo%xIAx37OTx_#NpMLU#AAj`0 zxBj1h@=w~Dk8fuX0p%3pxKB6x%Q5tu6E@HD+MSj}bt%()1QanmFS%Bo*CismT5>KS z1jk;goTsHOCAm0sJDZD(URDKABQsR6wn^C$bUGbQ57jxBV%%<)^4f?nIKw1LHnm*z z{_!w~OMRa@2S(n7R-`QFvQ#v7sAN@T$$2`@GEb*@Ix+A;nPfVycdzdr^!(!b11Z~+BLLcp6s_YfguolBTVx{uugg5wChWoaP!~fA(E|XqYSv5?Oo=Qd){wk&A}&vOnlUJ7mL?_+ z{WeM&cDt*m*UnR|1qsMe(^jRibBcgOLMZr z&|udEDBmTH~zgp_`7`^KmMCf|MJiO?4SPOKR=$^z8`cIZ4j6M zkt|EqR?2c(DHd~cNJJ^VxO$Ee-<)o5p7?j~zL}SWz+GMsFJ5*k*r%c2r_W!1IzQyz z?}8uS9v-W-=QmekZmnE%TaRU#t2Pe=1UN0rPk#2F{`_}-{`%`zzxwQM&Dnd$4Bxza zU1Zhe@!9_AfA`=2*F@)EeEcVGZVz9-I@eat=am?*Z!U)2U}(qFc{)tC=Tfrg(C?B}m`;U^ zRRGKiNvXvf<^h*_>Ka=EP%r>bq~i%BP>bINUw8V{W;%Dm}zLcDwb45WoHIyZih5{nH(2R>>)+ zWhnq~;BrpD7^)Z#)>(%J6nIIg6(F^!#Pu>Oiz+fR+p5$AS*37G!rqzQ83NNGB7yff`0ulgmt6VAc!MlFwJJGhJ!cE8H zc-%ie9qy~j?e4mZ9SSOg0+J6)Wtm3b`DtF>f7pX6iO&-`uUY z+ze^xZ)yXsw$+v&8b@yp5obna zau}oK=*UImY1MlJAcCnX8tZ-ZIkj4WbsU!=#OR|TSGnY>Ik8a!QEk;2sLSlgdNR-| zAz8vMM(2#0mP>606c|;&38)%F)>-QvUY-g<8yrJb=M?4X{&;>`q>+^&w}RRr)Gk~f zr?FX4EW6zc6mJ_T7(gMeORi!+j=FEw-OmeH@}gqFb-p)Ih8S!Jg1qD;tyB<2S*->jGv&1HqmQxcyHu7Y z`uP{1Pv_<7@gSl`RZ&G*R8-_6Q{!?h5RuU)V!h#_oR^YI9W#JS@`NLoS_4i+Y^K|#PRJ!Wd?$SEJ z$fYSOiB_()NWbcDU#(t!aRo@uu6D2bQTC5t-7RCv4G;UYzVWexl8g7(hvOmFvKg+e zuI?VDW0P*Mj}M3a{lU7yfXiieu}c|BE{3QHHz|O!y;|Lzwv`Xhp1mShrj(nUTRUFg ze(|sW^6N<-$^} zcf+a|Rboo#Ddjw$CV}EyB|Dv0aamTmbUt7$wdR&`u1&ho_i?Fc0On(-nu4;bxwK3OC^Ks*s?bDgt&Le({@so6qVn z?jP>Qah|7A3!e>Fm9C4y536AlhQ5i^R!HKi+pJ>WhS>;VGj&=JskCA}DG6X>u7qp> z2snC+-T@Zsrb6;|uD zh4^$h8SCTFHHIWbLFbf0@5v!LH#spO3h|nMyR|NzbHW4c-RJo=ljpy?;PNbxQ%31}8 z9CNPXEQ2C4H?)M8_FHQWkwLIz5lEC3fLpUfwu_>y2&m0>Ye&#(p3dVqB@~NbL~Dqa zkss?JTiL$Y1|I~PW|jh6i?xlcQ8cdegZF(1t;+GV)S`)t*5b6)+C))KxGd}{)?Dn1 z@YxqHfA!_(ke0i2_CvV;;J^R&+v&6rG{Gtqi~&OyEg@pp_udM#U(zTHcaL|aHBQ(P zE6Ly99sb|1f4h|Nb}3wF4epcO?w6l^`lA;wOy%#=(ON<^U5x9Y!>)vAVjp7&R?Jg7 zh?UR{eGKC~KOEkLzVEtD1ZtM`dV9SI%e)Zc{^1}5h*Vo;Ze&}Xl8CYBZPzt28H-xz zOy~lP={$`iG!4!$vo@}TWGvL0&eJ>u?|oPeYQntCaR^->(vqZNQ_O|EwY5~0#?p?* z?4#+2h?bb!^pI6-sl3N?D;VP{FM8EoKVCn+pY}J$?iZhZqPArD{^>Bf;NpPC>3wcL zdOSZqOn0|0Hm|?91seYA|NKAw-CzFx+uuJ}vRP{bSp*eK!<1?nCv)1DtKF{iUFG&- zdj)HK{P4~e({En-PHS0muIJPDbE}UJhZUHmE-P>U=|B2;bMn>SfA#w2_47}@czFM? zfA>vG)!HGQOBcg7bZ@@;`@h+;?bOA(-r?iH&=BODWsxSrwW=bkFf(36&&v|%->IPRaRRv5Wr*nFnE?cXkF6LYO@KuKR4?2$ zLb{NUKkf@r6p31Eo(L3JsR;s@zF!f=zLy`~zpvxutrZq);&x@bV1%Vw*6LVHZPUP>3NM3Kk_06i(@#(_?p4_@8>=$Oq$tArzJXzbU2XtnAeSJ9WmvN~ARndh$SY265 zmUf$+w^o~&Xs@5$TJ4=il}f^O$&z6?KL8;Edxwgqfm*_{l$53rLhv|3wT$PSQYN{y<{B0?);EUZ`IPEt&~7%{ zo9mkx9HP9te+vp>h+Q0F>;dq6KCgCb=i{@R7=t_Ma++qSE-jT4E3sL56;o23M?sX> zIcKaft2D{O-7(EI=MrP+hqa;3I(s_ucKv*slQve#q3`oFoeu|Z`rO1Cx7n>ObPiR6 z4s5g(ZL4*E+>hfq#&~^wy@~5who&OPDdqkB>8>_sT^~A7w8rOYf`;UJ5LFRGV+cbC zF?MWtE=x)EydS-F&Vw}plrFA*5mr$(sw_EYgUEbYX)6mmvOoYTDlNp|y#*jqWe{LN zQFGaAI%`F&A}E%stzN2nkenkzG6-OYRS<5iFgCG>0zzbfpg>gtosO`g1zl^|v>bJC z#v4IFU9I~US2uAJjHm^imm&lyX|1v-=Uj|xAG-`@JTH7o=*S$Fm!Gb-x47;5t!ia2 z+K*EgJb-v;PFR^)K`YnNN}86ZC*D6UEZDDt4NzLE#riI8xBd&F)8Tn(SI(JkR-@#*|@KjtDxV2$aYZ%k`` zqop1DXsvOCuC&TDy?grMZ~yKOfB4&RJe%TnL{@WI&bxKC-$q*leE2oX8!or|ks z#dg|myQ|&q#q;N%e)=hbdHAreO$t|Q&i}PW%T7?iqWnPwfo<-HzNJbS<5WICtpgHASk-?$|+bJqJTNf1pu2O3u zf>KqJ6fH=iLXu&w3s`0qPmu^)smpSq(|H6c3|vcXMx+`9W55^-mLyeUEZ3IGA_Ya7 z0tz!2PsUZQEMhDGp#i!)?D0q!4uJ(Wv&uqf3v#UY_1wft2G4Q#V8`nyyT@I7&K1B7!(yPwNB^dFdmvTaHiG< zhS88S9?_I6)3R6}$@0Y`q=JA{N_qeO?zn$?_wK`Hb;UqwS`cV=wdt-lLysbi(9&4$ zyHh^Cs}m9t7_3}V$$x8t#A!k`~H8xmll`n>70449EHD0tx;WFhsb49fVhzcTsk_d}5;YtP@ zc}pL`YM09%x68yw)><;Ia5e16T17I}C<=f!sjW2@@rVS3Bp<^=BDGZ2$#)^zsHoFC zX=^oSZJ<@HAr=LL!U7IY3j%9sKuXrnM$Js1rn$CJY zrovT-$(mZrJYh3qXR=0>1sbZbt4@8UcyO6r1V$>>}kA~jqM-F7pi?RuJ~^XZs#Lc}K72s!7um}N?$>SGADZmw^p zWi(E{KOT?g>8!{7-#)yb&SUL&?d9tY+IDw%H@2*vid5T$ra0$CbSyQm zgMa<%#g{*NWvrg(c`4K5{w#l-yyGRcWu9AGhIO#U*W8Xz=bP)>7oU7`Giy!r|Nejc z%hUeh@UZ{G*MEoKeVdoscYzEDH$^#3r!gHq^_$rDO-~=*?`xs$>iPaSzIppje>(it z@BS|5EX+;ee4blNghtEPZE6mpQ9CcwEF&Y3nw)c)Cs4e(eO9&AoVJ_St5tmY$@a4^ zKU=MLIm7;`%t!GMzI=9@(y6yLSaj9F6yb-`L~MugL}f0y2U z`?eo8hvWFmpMAOYU2QN;ZGURx1WRjxXJa5ZV+UVU70|lQ6*0!L+Nua4l(sYyX$m$X z6CeXOX+>F?0gTo9-uuMC0!WNh$FxS;|QSx1P{8gsjt z;n1RruofZ+YBjjaixTI>4G$F&7iZdfZkJstpn5D0+?ln3Xi zwbu8&^Bx6q&dglua(%UOjtsSD*SEUU-VFEO?jNS2a&ZU9c{$~$qczPK;o8op)A4w8 z##$l}v&>8EBM~7@9*YiE=4?;?RGbGabI~g3{y&JK7m&kmuJs|jWFhE zOs8pPp=G`GeOz;K$NN_6nU7qV!612RBD}~vGeZN`xtPY2Rx{^iKb_KC5Y1`1uTsgt zZns_aURz1yQ5qw}+$vehwU%)a?^y6~oC%$?aU91|DniZL0JI;v-PP84>ib@#9FC8W zX^2Dc9rb+!F3c({27?cMzcSV{3W7LmKuj8ELjeT)p?5CiRvUtRY+e^~2Ep`x8)LT` zg7Ipo9gcUtx9jc7+K}c(W@=gtr~)wyB06goFO>yqjP=1I8>rx@P`5shCm^u{{+Iv7 zKmEtQ{_J|Y{oU{XudAEk!-uDNemp&m4tLh*G=2~jJLICr_3m>U!dHL%;r@8YN&u#* z7J+7j9QHo`=*utu{LlZ<^|LT8=WX4jCet#_>3osCt@YkpA~+qF^El0?@~c1jwW3*j z`~BOurJnxmXFu~{^;D|X5?mj9pO-l|45oLhj=IfK)7Rg9$1vXC@2ilpI!!Hv?f#G& zpDp5QJ*>ip003m@SI6^HX)PU&7t%Zt%*$*n0W`I?ABKujRjFm~=@-BF$?dbX@7?kA z^zbmA_r=@z`R6ZQJm2ir@%MlG&Ew;VTb-uqvXpfpv^oo@b52w^7iSGJwkmVU<5bI} zXlzY-d^(?(d^pYLR4Qu&an5+>V;_xG=eadjsNS1CbbYr9zHb#cQ-etj5i1ZHGR6Wl zVrfE1fQnSPsR*d`CIs6DAAw2}B}>jCk>6f*UGHPG){wQfHJFdn;gC|QeT)VW6p@ew zM5J+1DTz$aK|nszWI&JskPKZ)W>8f{Rg?jQ=wgoq)uu`Sm%b)Zh0F4fhzwc#(VT}w zA17;NX41xuMOXz?KSDP2@=ZYLA9Pl*7A#jaE^?mtkMn|WZnjsiUp((zG**pO!0ICG zwjIGLcIa#np>diY9v|nDoO7XzFK@3Uk1fxg*OX@v>3z5ILo~fHok5$LNNLa*YQfxe zX)IVk=cnXj==`wieTZm%|FbV%r8)oAH{X^j-D=H6-`u}tH7Mjrwtdvr(ws8aD#cni zgaM5BAnWzu17$9NhLwm+Fcv_Bn>Az%scMYTT0b5aAfqf#rzhi3+Wh?a#s_-3`{2-V z%D_#ziB#jPi8H5_36x)6pkVaOG>FVQ#nuL!bymn3Tx%oM5O-vs%ZkQ ztuB?+q6cq+@4Pk1*t|?xq#=s4R2aFkAuFofSOHVbVD%CYRD~)cqBaKSVw#sK715F* zXI1-9)5UNvPLFsPR*Dtr7G(pPlDD5(;Z$KAulTjWRFQyYw(k6BwHFyrgoq-9{r;N?CF{pW2cq z8<$ds*eU3e7MjGl(Dl7@QK~XZh$e(+Ii-|*us)a&{dTuzDWP*+Tz4@N8U%z(k)Q|w z?6$W<=z=>YVF>-u=hM+ytAdEu#V&L!V;5udluozX zm#b}ph+=R6_y(A4?3zUlvm|M9=Oel^_eY^3!N zW8>3#`e0nTy}fZ^aMmlZ+gKR2%AB&mB7gI@Up*X_ZTC8u*s_*fR@==#`H%l>Wh@oW zW%};Thtl%(&GQ#8UQToR;fJqP`1R{ouU@?%%6E6)9gg?GckA0{tw4jkL2ry5=LABn z@^L(Fy1`fs#*#r)Q-!6~xh?dxFwPXXaRYSBnx@jlh*dfhKKu03^=6IIPTf#js%d$1 z|1Q^@N(#>RVHK<{OLY`9dJ{~)*#KfRv|9H+{^@6b{^!3T%XbfVH6L0j+pFRF>gM(H z>+7v^j_yC)><<&5QDlb3sz55B4Ka&i_0GmtTdT;53Miz;+2F|qwN=0Xi8>681PfFV6$K<#GA5FB zAY^Ru5n~LiegJ5qs;nV)sC5~a=``moM#(!HjB|zzD2i&~*0>a|T-(JRO?v6MM^FU1 zsQe20vCRe%0OSIgSG{;nF8$vC`VS>=WXM{wmz5!X%+w=YmY|}{te0BU3%~||SuWXr z5xJmf?06p6yP+a4Ilupq|AD9o-4-k8DI zy392vuJw34TI0GnY}aAEPCi&{wlOGasd>rE!~SSUo`3qv+W?F=H&>jk9ycl7031B^g&l@9ysBG^Vu7vlTJ()5EIUozJB;F~%T?H5!I+^=kFv zi`Q4H)n@4HoZo!??*7Ap087eLu!1Re}(i z7-Q@LBAm`Ag7Z8txmC$Y8-rn0HDIlyloB`Q))bm?qyV4-fR+dp&>H2Ypa_W8ptTV@ zP+{hlnkwpLA)eFj+v|=i@S>H`_3D({gUoLf5UgYiBH~iCtW; z*V96TA^P4Kt3YhP7`lDFjn}*3*1Wh5*EbvWVVp|mHfU4j!j*HWt<2*zmDW_%h2Q|^ zB`M(b^_GDj_fO~f>HhIUaBa5^-gnnmx1X%8(zrYob28JsKYeu=(V9!?5-FHQ<2<)g zTh4h|SZf?~HSBx{iIa~M*6Zzdz2vkk3z7WEZ=etLKcU!Q11BE0bshh0|f0P7elc*A1AVUl15I?Sy|n%l$OX*w5UTdrpoL^4S3a5(Rtts1g*?tI+;^MCPw7A$7n zajB>Mm_`Ubpz*EJTxu0HAzs}+UvHhUBbUH#eoF{%d z-8D%?3Zt6n!Vsg!;DiAggBa^V2*GuI_~g~|&%bOpMsCqBR<0 zaNeuB(0BZJWT46{0E89|X{(ALga*lmfUHJLYc1M1PmQG&7C`jQ4Sh5Yd}oaX=S<)C z0Gw2fq09K&#m*R;vxuk&2{Sjg215*+oogIJnoDb*dJjI8^YL^%Pm>xofs8Z8I}imU zvDfW3u7_?skNc-HC>iu6)p?#uuFG7dad3_XW6@oZP!>gmOV!#cx5n0xxrj_4e1Vsu z2&e@DGN7U#cQ%R)N{WoCh)M(>Yb^liBWT9_U)xpy03ZNKL_t&_(nrjwqN-fN#I0RU zD24z3=O6kL0lRc8vx|*+UZeDsDjWMmYoKq>qJG#Ev#TZgvtWhxG#f#V6%94)bVgHoJ-NrYTWo{te zT2&R+=4`mWUVr-fS+w2?F3X&!MN0!i&igi>Z&ufH9Yu)1o=@`~JzcGahxzI8{;_2p zCs{^d2`q}*Ky|fU)kYR=AbWMS8rD51eE-AuANEhJinIz`HbWeSn>4r4l(|m4oN^i~ zq8rx3dbhq9yI9&h&GRHV7i*n?co7^_p;i{*k`1B|P_QPq3JAuKA)=2(TpyQva{yrPS7%YFT%^bKZbu1!6E@P*GV0lu*GLV+fQ8NiI$=G5{)|wK3J= zqFJqP`c-^-IxM-A%38A-@Y(a>H~;u2+iQ22o|eo~VA!OZxz3G@x!K%Ytu}_-Sn~kb z$Jnpe>#M7~r;O-~i^dukgANw+a{u%=9`?I2T6xiR-4IrUG@o;WWp3F~2qAhi^c`$g zQ1g;^v8etn;)^Q@9*Bdd3#*)0vNsTYGMQwsQ_RMk}-rBf}PIuG)_FFezQ^(RzYiD z{^;3%^q>6c&%gZQ`}_BA-W|ByiI_KU%Q(l=QYhBzts?+K?%{r#rsdh~m9-}4vv)2a zXcmih*KM2BbWU0dQ3WGFw7I&yeg2BTy?^`N{{FFZzL48L9`;22U~CX_=5#(jo>n(q3~oA4oJ1BsO|t_}wl~I^5I_0s^WXfFUo{?$MI!g%!(E8f zS^-^3IX~RJ8OP=B=^>Q_psg-k*hc3mYp#Z5}t(#wjhxcHONAEQo?AGPYJr zZI#&zlLC?|DFcM<8j{4 z<>W%xZ2P|NV~ie%tu`&^^SPCB6rT3Sl=9t&M}x=!F~qDQm8D>yVI9`XX`E66QD^OH zcipd6b;?Ul_wOHWub=(wN1uHE{r9;n`@8R!RGnRyGQYcjyFVHWYE+jBOMyxu*skVe ztgbiPo7;hcJRF|pTsY&?-66()7*<`^^_w10jKSEu;KFh|zq|Y4{fB)wtZsJKK2j@= zU^SkX=Gr{B)_};hrcDtVX`*G%5F{=6{oTHT4?W&&ZomBe$G4mNzy9stScdE2$^dbq zve@jsscq=i2PzhL2o`c}EYTVTHAVY4tfKqklV_W&n9KBZeDq$gx0~SHc$%!C+uK*$ z?e^r3ZYb8q`A~m1{vjQ!B?YR%hOhqk)iSnW9TG4IS#QW75*V<(53PwRNY$onfv_2^ z00of=38Xb{Ni8&}tQh?o=&_!Jw81x=VfkUySV1YWlEj6VBLNi-;H;j z8=m&l+=}Zv(9VQlj0I=?;A)+3p4~p*euA+m+o$_+%p%s}xz2U2nuozs7mNmHqBRAf zaH}ch)&d#V4-Sl^1z@HaR<6>qdEv*9B7I3GIe6&Z*Dq;XviGm%BWrOYb0^1(VwfFfYQBA`_f z<~b#&jT|oCVijr%P=f;&HCDFUZhN&MOCkDUyS{$ro_{)AwQh4gTwlHX(U&ir%X69b zhYwFr_aWHM?9NN&CdN55Xza%vTZo-=E>F4SVum12Uw!`KFrSZy|dKKfM3$ z!`;J&H`V+0{P|CZVMqSMw}1HuSga?z>cVE@&Zi~Uc6HNr#^t$5R;VPYFrsnJvxTM> zNVK65^6qe2O1T3uWof~f(~@h-eYXO_TGP~KA5qcRE`Ije=XsvLdH=1;xFV;{emFen zU2E06ym|HcmtXGh+WYU%&LX2JrFkDi=%>S@L*r0VK3mV$lSenD^Sk@^xpL@xb>=u8 zxg=n7b+hfeZna(;>&korW4N>?SO zdb}GCPv`S#3c-%&MF15|WhEd)DqPWO-*-NE=g1hldv=?bc0Nu|4@V?x(6Lq`@!m#f zH|tgCJL}0fyUg-*pO*y27-wUQ(OScejRR|S210Vo4M~;AI8V?ZnZiQa1e);}*KUw* z%9*H%rq-a=T-yA?zeY6Yae9Bh-yfFQS#Ex_T6M$90g(;Mw9HeQ)1nNUVG9kEFTD}O z4W$VHA}J}d2n#D>ZWW-RkSeQQ7#ji~0s9I7Xf8()0VvTw81_H_<)U|s2s0=Wph{~h zA|e<5fT-{#p7^o(>4KsH0I;UBx#eJVyBcn{JM8`Yhle>&C7BZa)pomGZ(A+<(+G%T zn$PK6d0B7!tE=s*-?hffi?KB9)~oGGYK5wKO46j|A`Ki}YJXgeW;bls{jiF`IZ;JW zvLUS20~i|9eB2+t{PB-A!!~&Llb^m`_2J+C?)QKFH-Go^xQwS7egEv{`MT?55ltsV zUs8HnmdtFuzr`~`2?bq+}!rRW4TuGQ~JDrYNP4Lops}S%wPC1R+UGz!Q zGVPxpKrr}D0p>Dwn;3%UNBHpeT{_RZ>)n^X{MFmL$G`knzdhV9eHcJw8t0W?yWor= zi;RxAX-*}VT5|KKTD7Fss)B%`w3?@h)e2hlo$vgLivk0@Dnv+RRS=led>s9ZafT|OL81^s z*G2M1YOA@LCYP2M>#TEL6q&P387xq6aTTwwcR&Bd&tJW~S#>DoSj+h|9;Rhl=H;}^ zsa6ChqIpU&+UvL}bA7D)^WnH#?^wu$=vU;h3odG|iAzcI=dWM<Ey7&a}M zRd0g@+gNIA4J%tQ7=&!orlrBLvSMjq~nm{p&yZ=@(yoa(%lMXsy-Ll46hJ z`Dmm6{3o};nsI&ynwQh@-4EaIkB@Qv(#HRfs5k4CCdVACT;#Cm^Zg6Z9jRpfAc^8FaIv@!hiYI-~8#n{N(3fzT~0& z{XhKO(7y&n$eZ@&4a=@U?^t=-=} zy!!M-5c%PUH?8Y=o`nM^j4@3ifWzbR^zzfsr)f$OT4~lo2jHfGAWuU~X_!(F+8_3B ze)w^c?cKZ6+jsYQyt{pVyPf3m?tTpMPWb-)-P6PQuwOaAd@4kVi3nRM1;JAa!dcQb zPbowZk?k<`Ns;5}wAK}Z#4+SF$o2KO-A%(ZIbo~)csSqP&HMeyyiQ|Mbp(dX+zpUe zzyVp1ThmsWb(7NS{@k9PW)>8x)06;#vkc5g0T9p(OaVe~@jS!R!}7FW))_p@;b6lQ zp6#xyt#QhE%m9buVRk1|j2u8oLA9ASt5sE%7=auR3|%i;8E|*|Xl=sF9MK8E$+07L zysRo*dJ%v?1c5PpOibOpo3p9Abv3*EuQGEY)P8XW!#{Ns|LI3cuiYD2)7I*`I-dc$ zp!ZhFX`Y8;094hr`Y;X-?1<{VF7^W9J8bcb#Yc#J+M84FuwQ3Ci^_y>Y<8HSdK6!bADgW$`epWjC`qyu9l&k0Y zvsbUV>*IPHC2ix>hvY8E3xL+5$ETGD=3{SXbRq@sT52itzLt|ddwwI3psNMQM46{e z-rb+~>!J3=0c;pSm7?hWNtYR|Gld|5w7~Jf^mJBP8mbXrZO01q{@r=qfBR>D_RCkV zKjUFTL)vVgJ)ZPjDy{LjZgY5r)Ff$;KIN1HMRsy{xIesqczdqr(|V5Av`IS zJdGrLo@WtZWDkKUxyOkH=oTr)ki}tcwW+;(b6}}o|Itr=`T3W>{NzO*z+r}r1hiik zW4V3t8F7W>c)ymjnuRD)9+uu7^x^oFV*;f_;qzBFFRpb=n-nr3t;-y?@Z}dj+3rSz zE|SCR&mJD*yZaBe*v-|;XIJoee4M)u9R*^y;1B@{xQ%J>;`{sgVVXjMyb0H@uT10b zzWKJ48OWc%p0>MJhEi*N+&|9CnZsZK_w(_?{Reh9osQ)=%OH+e`r6G28Pre_(Md_w zw3MZ-SKI3;O=#Y&ZMVa8m4{7C<1XNKe$e}Ot95Lkoz1`sHI2s1%!bh0YNcW+CMhCO zbN}((+wqETKgl;&c`0@G>}I;!9nMck7R2}aA0O`CyXt&@c)H)Sq+z?!jv9(1UaG9M ztkcB*;$MD+vgTo!Pw@SBkFUS}lYjQle{rs-hr_d%uXitBZ8D9I9}Z0Epa01({>^{) zudk^ml&(M}PjNmT9^T`5 ze^||i%~gsiPbss6n5XU4RjU%?^=>!haczx5h(nGc&#PVEy!!HLJS~rJ-@gyR{`>#t z&q|%Ydvh-gzkl=X_dmX$hF4~8y$@N+T5D|~@v_cqEgZsHTB%)uB#{&>^=vh5rzsEP zF%#qYR37$+hxt$zXNur_fA{#i-+Z{e8K~=& z@;sMr!8)qCJ7tt1UG=0P@7y1w2HQ#SO+ z!|Ca8I-Qr}u>;UJOet|KT4Sf2V;E)w^y1c-0&=sa-SKo@Yp=_!^K5y{oXI7MBt;*Up{UqVtxGY24~k1j%DWJaV5(VN`N5DoA$EbLvatF`Wb7!CeY zPye!y;~ycY3{%)}D!biwv)ldphu?HHreI|)b6e*1VVW4wC_Vq=b&dfb)S~a_$FkBG zPqoxlR|hrdWU$z(1PO=T_W6)M?A?0@Cg`=W8R7~UOQ~`kWxx~%XueInJu-MA3iK~rJPfYbUjA0VK=@I-7*KHOEds&4$XO)8^Dg3qC_`iK&wnN zNf2R>Bxp{+pnf{RfBMhgN=i$0i1yoW-yhFw)7`Sl)7!`FOR491Hl{8y?lw8Gfm^B@ zKha#<`@=)u)GijcK?b;%X`JHuROa&vjEYMP!Mv(3&z|pgyBCbKKRmsEc$7@9KHZhm z>3;3ZvFz9Ts*iV1EV8{CLX^wZzcnQcVAfr`sk;MRTBt}E`SK7sB4Ut`k}bUmCL}Qf z@8IqnV)A4zT}yXD6L3L5Bw!glafjyU;0zKv%3Ku?tD-`5WJ5;EEI^U~K*`(;trLKO zhQz~^w$~FNHrGpLPmCxrBXfw}T`(pNWaJ?}F8h8iIO25s{EMIeY`BVDPxt#nw^gkl z=e5)xB_)Z&Fr*AsVXo~kFL#fqX^Y`niJMO#5)$9M+}vEg6u||7t(&V65`Zwl?e*^Y&FiLXyP)$TJ%4dCZqhgn({>OE z&9u~t4E|uPw1@k^N4s$CD2?tdRGW_HxU%YrdZ~*kY z-Tv;2FF*O|Pd@wZ$8WF4oz7lQhJw@Z@~2-t`}2SMueLjQ|L!-ReE#|E%N+-%5E1a> zJwW?7PA^~IhM3;nKRiAhzkB>%yOCQpZEnYT`Syo5N}!n9GQa=st(sk@&1O5DPN%ia zd5W*T_~QEdI>iJ?YpdPu#~<%w+`f4B{CIr)?YG~Z>tl8M{_gF1KA#R6Wd_IV?aqy~ zJ0VI+ho`4+f42ai@)(iD9SGxgvkf@9tFF2p*Tdt9EX?zI_u+jf8pe$7+B-rg3AOdA zdOgM4t3jI@dI-{DB8+Lmo9j()>2z-QPmkn~B&fP7Fa$Lj(?-%rJi3#15m`5*ma>4g zO^QjPAQC#ElVx-om~OW(@Rm6uv5!fnO^PY5rD-#5s7lXYJfEgTXZR zB^~JMHgcpPM55>__2k;Us`hSRXw7xiQr6x@f+3NC`GwI9m&MV`rYJaG!cJENy)2rN zxa&WOEC3vl0S(EJ=mPMz3kySg?_OK2wUkno&E5GAT0=FXkNGG9fYh}GPz8P1pG>H) zFfXNBy>LI4xE!AL;OZOzeVV58`BYZzwfAyA$Mc1?9AXra5Q2Aq+8^ubi6NX%O^pR& zNFzy&(Kple_4Q2-K_G#_X5)i4D+G^KsZP8ZiB|xLNO3^r>3oUH79Zq?cxB{fflOMs;;p)It_VRY0I&lo+RwcEjdo+-?BSG4;}yg4S@I zsnt+h>qW2!Yp}YxvGoBUQI9di?YP-&&hu$)MQiOv`fitQub!Xg^KX9l`1{}AZLWvw z+s%ttuZFx?i~jiIcO9w&F6(+ao$Ff2aTAB_Fg?4zX|07EF$5>Z0L+9;k(>l$ib#f- zx(*_-*1p!liBk^qy42G`gKeHgRbNUqbP0kKtu!?H^yi<9V;piq^YdwadOSZooKADQ zxw%O(=A71gAg}M=-amUbym&dTWmZ>ft*aHSr~SIlzRYlS`z41kpHAhtN6S{!3NXiY znS~O99FOy9dAr@EufP7|fBWZuHlL28xTsHIEVUf&N?RXvm7jd_v)k>byZrpa{N4B8 zK5VadH`kvI>D|-e!_U6{c}M&4yYCMBV(UJ{eEaefq20URJ~dhhV~(k|wyx*Lhf^5# zEMW?1o2I%hAXHPMf^%OmneLd6%DkK4b$6>P>c^z2Sxfg34w1IcS&R*OLKw*r63 z(>QK6!?a0tt?%38{=-9APxG=~aH%P6AsRw0vuNEQXGC7k>-kid?)yW3dg$!L;2o90 zyrg5tl&F?-Q{|L`gynJhKmO0Z{nh{dJDuB`AD$4$-Sw3^rvZded)=fV<@5)(9p*fk zK`mOFn|W7LZ~<}UVk{^EjtGH612b>nCNz*RP0bz6z*vv%K1EmLEQpD z%;ISC+_n31Hcz}i73^Vu(1%Aa)pb4UxwnN?-D`6Lb08p4V2X?+z`{%vtkt!(R=W0! z#OnfScLh%(!w|-8PD27G4elc`0wT9=hKd9s2Z_PJS}P9D6sDL&_~Not_tsmfy>vQB5E4`6<+1(t;fM2jIGj|0hBT}Tw^PEHVXf>u1kKE2P=LPO47)tKD}V>2>|yj# zYVecKWihk0tcPV8auMP(FSRHF2FX{uX8;g_Aco#+44!D1;(*&wpQ$eP_;_qZ3E^~D zj>moNEd)9(rp-$M7HF;PpZ51p`>t!sBO>OI$yrP5%^KFaw%%&K-UtVD#$c=jXhApx zV8Ill1Oo7swhs^c)9HAg%W0WeG!4TrOm}Y=ZR`DAD@Ae`2-(d{`yYMvGelpPa(X&K zA-1#|;>|V+GCDJWgVgi#`+xk92z7n^{MD^*efG(&;C4Bm*utyZ z+w0Bqzwh7Nzj+hKlm^#xT~~kqem`!z1b+MW!}T?4Yx~Eic|S+UYS@cB-YsROzxa>8 zc`<$Wum0Ol{`#-J{onq>f7o1&?;qct>pX3yfLrc>Vmd`?v2tynWw0@E}%`K~VQYuk0mfCX(d4 zFP;zQrGEI~?fHCO4S>VxD4|5M>RP=%E~m9EwKsEc5w5MgeE#{@fAaa4zj$>xAAbDt zJ)=zH)w--de*fm-{$X7sgq&~y03ZNKL_t)}A{=8xq!3fcdE5=(egDRdmZfR;bN9ns zBGb~mDP3I+DTnh~PUkr=d-r|WFU$Qn_1B-=gb=BVDyb(1IUe`trQzHu4hFQ8&JYT8 z>}cMLHMS5qu>>M6-HCzOdB_MoItjQtxw4`X`eua7zKFP6x7r9SJ>Trq)zHlR`HS0| z>u0A^sa>aO<8A=XC?QCSezk)b!DG)lac#{M ztjfb%)hf2*O&S6z0T7r(X3niFT2@u*J(_3qM5d$*xw1L93X1j)eyPo)0tgZz8iFD; zuZqe{2+-XViEs!M(TUvA4FDXtYOk&L4y~J+1R0#2&`rCU3V94c&k;!d00^W*U_#?A5kWQ;&~+!w1UL^jJs z%hk>6+n2klsVNAl5cF0Fo!tCl?<26zwP~0bST?|p%IE=!ns1h?zwXE~A=eJjz4G}R>2qZDaU@0gI-`#(B z_xA0(H~Mh@;A>~50OUX$zd|#LZ?9jBTPziXimc_)LjWZ3cm403l=hi`TiXkB}xq)He5M#t!x`Rw z<4DqU+iV1c+R~IV@G($mI1ds{if|?ZV&81@cDq5B-WwCLlhn`~Je=t=QCEwFHVcO& z$+{8>8es3NMYY!19M-iwF|PHg^j$MGR0~Ap$V6kl3Npoj#_O{eLTPh_%$rTtqZQj)rGO!BLy)0YM~bffyMK5eV-O6goCb75 z0I1~}sO8MD$H(vC`)_|6V_KI|Y6V5dG>sc?lgQ+?SruO)Q%V72TGmoly?fWL zO|;v?{dj_s%KiaGoCYV#7sm_hJrK433n@8fyTdiET~b&;j4 zKzceXL=_xjj3}kHMm2L~tn0Ad#_{Dg<@4!2CErcEtK9$fpWU3#?H~T}A0GCP%r=eb z=Rf-@Ci|QJ@|*qRpqymv!x} zw%XR3OP`?9NiTJ%>TY^jwO4Gf@nW1Pp)8PA(|5H_%p#9UY}nUxcxJDS49bb9I2BOxKEyM6TW!ey^i z5ggsz(cK(TtwSgB-nIAE)zuup93YCQE86A51n%aS)-(Ws#F3jV*5~bDBy~N$$uaH5 zU5b+<*IvxavYrgMQD>rgJ*|Dd-t4Af2o$D}hGci|&p&?uK^xuN?3_#BZJaoU=h!jH zNDx%j@ERh{OU>g}8*&r^Q77?sB@mcdy_+NnpcvA1Uu-AOtE(0lrrk6Fc<-!0b1!8* zJJsGXc@RcGXxb6MzyP_mDlA~Z9f82qT)TU7P?i`m36lt=8~~g}IFg8D8jOfi2$3@f z0V75VMA^r|9ZiT+ngS1q89kd5NC9*bV#2_TuC=UQtFSO)tqZl~d@wt zC^Ga+QupUl*vzrc2Wj-D@H}Um&`~8pa=eaUU zkgyx42FfFQHSCT^-f^>?PI|=N1TY3h^wuf?ryMv+Tl?`iuV?%Em(QO+`*ivK`*mL@ zzWU_mvvHUn4|QF8^hS`Fb8Cj=z0^%6krY6X0!vg-KtRT_9>!^4rruWxAp|kg{lhU{ z?QC_gH9II66NAd}NmE2>(;#s^<>h2dS6_d9^ZfSZ{o~W~m!EYl9BrLf@HnImkj&@P ze2(Cwsc*I$0NjopJc}hE{_y^&fSVT@_n!*}mjgp}e>k-3(+_e3n{3P80)ZJmjrXG1Toarex^b}naJ zi}{LW;TkLx1oW%z4cdtShyaC$EZZ^7ZEvl_6icbU|MC6%5AS2nn|M{utJXfRRjXd{ zRTO0RA<)Y&zP!1)-EMD&tr!+>Iv@9CDWGH~WogUFpC0G;>-$$PpWoc@AKcQaM( z^L)@|b8F+cdG$K~>7Rb_^5rWKI+wNd+Pqt{dg|_=-Bq<~Z+%_Yb17%_0*!hD>(*Ph z?yXntt#@tB&WJ5upSzT*t-D)c0U#$tav|X7e1-02$`;TJ0Z;*r(AdnPYzQ2Qh$tWs z*2s?H5_;*R9EgyJ2B74aMsO!}2I64O*xeeTwQ)08?X_F)wa#kwv)kZSqJ$hEuzBf- zOn@v2+_V=k2KZ>RJC;!973m_qqU6L*&12!jgHZw)99Yggwr;Ud5CdlB+yjyE|oOz?=^lqHVC)tDzHwZ_kG zwlU-&BSElkV+db<^40T~pH8>Ci2Qf(*MITv|07FQc29&N$mA~b;ZRClmQcG6V|M@O zarWNL$pMIog%F{XdOpuF2}%Hmvh?NLAMTGLBXcw(vyK4Tmu8&>LzF{18X6HY8krM# z019}ykefljpyC-(gd`AxXs_DLx+}n}W++67D4H9XBI?I>Y=?{V;Ii38%pw9r=4Rk5 zq5yB+-*3igoQ9MFaj0!Z4c)!g*1GjpSuKx&**UfpL1G8@)=KT>=#VKU3L%ID02WSQ zr0Q-=L}>ytX9$8!AdxsG36Yp>t@G1|$Mccbwbk>Y%YxvgX)S8qQj9NN-eOpvj*rcj zA#a~Qm-Eth+Z?vOhe}AM&t(Sy=kqzqmwmepF&gz0&0Do&$F81IBvoi z)4(B^)3Sf~tH1m!v%iTkMGmx!%R}|()mmL$t0{292F^h;VI<$Q)#j$?91@q_9eYUJ z+qPDAT-=qUBcgV93*~7|fzdHZ$U`I~Yf#H_zB?K)#qoIb)*9-Ng_pzHx+1ed-i=qK zo{z_RJV>iZJbIbA+F+U2{lj@aoz1{4oKEZhupCZv%r#CHMn0WS=lgR`15~Q5 zv%4#WkcDzz?dikGR*B=}flLDBX*K3{|@2;-BF4}8vwO2=%)1I|?soJeMx~f7mGY7CrUQz}C)9M}2Nr1rH#S~3~ zAu17}lTgSpgCm&q=7@qD@1RJGgutAq3>##C)@CFXT?MBYLJF6PptfF?vMkF|dzn)B z*s%XNA#@~y%YBZJDEh@&aY3{Z1B${$5)OFTzX7#=0mlIn5CMx+)k`TB0RZfBvN~KG z-ypT9L}2zLGDKR7YN@S)LGOc!Tm=fkfd~_C#(Z_ThzbQn)aK>9PSY+B2}p_QcSC&^0K;B2f)L6 zuGULy+B-J$r7rV)UY4cNvT$p)_f~5m45rj7_O7R8HRzY$kcSiq864ducM6@cGxf3( zKw#iN*tCo}Tv~+zsvc4fY1eB7z%fT=QHA+f-+g%hVSTsx7rX6l zhX}HHY6cEJ0T4+u|XpoSIf_ruArUo6%DMtx`E|JJZ<9R_bDMCmQ(PJi4byqVraN!6n zD1yKWIH3^qFc6WtcSIphf7qF}VvA!J=0Fl8AaHLT8k-}N2uBTCYS+~eNGN*m#Ezs$ zdf3A^P)N<&VOcM+aBphsQkGUdaQDT-*u9Hvn4Q;wKeIb;{ zC_;ioTRVB_ol-##T}_a*w#xXl969FR{jjdudtDomkcom5%aCDdu8#-k3XY)YdVvYa zvJ^xgw?iIsYX(@A(NNs<>3AGcB5^{D2{;3jqd?-EbF@yXrL7G?P(&yUAv3X=?WT+n z)C)&!W`-C;WQyAQEk>7sy=DPPlj}O4pAJu}Yb&MdW{PoarLD_aYGZSfu&ni)AHJ_% z9TUf7#4lgI8s_cN7f_HZ9yX(P%{lCLo7?N%vf!_N^WD2QADqa+mX#foLz&Nea7!_z zGyuuwHV;?WtZ=TKmu73J){UT%H3%q#lEMZ(KE9v#@BR@WF3liD71G2>Jn16B!!YC& zVhY57*ja!GDKH90W=4w0!b~s#IcJo>n~cH~!yqBDAd53P8W6I`@YK1~x-Ls8i)sgO zH@p0yRZkx-&wgpz%oKa+ZRy%NAhBLtZC+b7#pYJ3)wY(RW+;sf8&bOz<3$nz8dz;- zLbrti$1E_2C4t0^K*2o)NHMl*aG8yO1G-JCaG3ILhir>8ZjwJv=3IYb!S}!$(&^sa8 z+AA$fjDo^>GqhFex~c&M4k^Z%nPQNL7`%cKfv1%F&;R)Pi=VuH@%+^|Zb5-j2$3(G zdGh_LMoj1crdmNe69r@d)PC{UsCic{<{b!!JctCQ2q4{!6F4$qCXPV@5Lzv@Hfx3* zK{`+;B+~yMl+?SKI|V@`Xbx`Zz#>f4E&&Gu5IeAYR&(O!Sh_YdFz4Rf0RbH9Ae{l- z5S57$q4rv;HV0FK(>YXg*9vYxMu?!=%-0TWf4G|uy=!p*W?^ukpjwzE2d=IPK5ll? z?#9i}=My@`kgs;zpM3hoW^?`W`tbhlAy6R0TH89GkH^D2&!rRq&Ju?dkLQlaty-IV zV1`CZt0-qjdiLU39)?nC-ffKn_(B+jLmq~4iZSKQ=7qJVy)D}@fBEyTKL6^gkT<0_ z_AWrKg2Ail4xWA(6s~kg>MC=)n+C^FkNyLc0vP3LINQ$LSl)) zsD@QrD;>?zFcMo}7A9c?aI*j`0IqJ(i3Y-ehzgaXDIp*w0(9>#9T5d>5s5K?d#g*=iePT$s2GJVp+ZMh?@hYmI0OkE$WD(>zx}WO`0HQI zPqoajtn0Zm6|HM)!c1(2!~6TEFP}YY72bVVa<*5mwnH4x-_#_VIEiZQwmjT@`06J= z|7TzS5dr`0-~HWpzx|F=loVo49Ww@xNz*O~LP{y;oJZ!UKwjHbPH|mM`||-bp$}{v zBPp_YKWg)~5>bpAq9B@DHERw^1mHk`kugiiF$NJgS5pTh z#?2O!0EHA%$|=OmAzC7VP0i720D#t)+7>mP=c?9gEv?07ebFsLfVQmVtbHM2sui?@RRjiS zaB%E@IPegW&B2lJA|O=rKSZ6Ez0gZf(9D%g49oz52$h(KiRdDx0yi|W3pvsafPfqU z1OP6Kc~#YAUV^04X(0|*SJx>HstVv}AV7}#ba==$36pB;))BF_VjWp0)4r|?_5d<5 z3Yc5hp={xS}QTM z6ys*ZLcFwCOa1?ddb1|klI%KfPwswB3_0Do?oczT3ylUK8AuSu2b!c;nd#r^MP@L8 z6hXm60&0L==w=T!*PU{TjEFP1??DeILH&RXv$PX&-1pjRed{m`TpcagX&N1{8-{t< zAIXpab1u!g*5+xR$Et>CWLM+W8M%=w#O>|fZHyj(lsA3v8ncbnjVJ|r0D5gxswr{F zrL2qgZ90w`N{Mq?<G*1ws#sMsx5kuQtYmEvLQegs|35lEt znUILMwmPc?Kyz=vp}NQ>OMUe;*RfV0M9f%=YQt>AwY91q1X-QTz|GAtKo&K&2$n>3 zo>d`qA4#z!!N{Vis9JRv+Hdz}4iF1w!vpF1*At2o8>p#4^zTP=Z zt1hj&b(gknNSAMR$9}jwT$#2uYp9!ft8<-I)dOhRvUaO3-Qw-_jg6W>)~i*n<1~%6 zPQyg~TuKx5kj_OT#%2&ra)oL3fGNIz^WJ84z*nzcefh-~uRi)5BoQIVH35fVD7NOD z*a;y*z*=jIrwPKVjwOM*Y7>!!4s)Grt<_XY?$_&*fJK&SRF>=>=#V6>QXdi3w6>;Z z?m!-lRM8CqmdQRaEe?gIK?x#JBmf9(z)dj#*~bYZ0F$F2NT?E8Ny$PD%=});LO}QM zW=-`zcq2`?deo+K(^)5oDvky`E&U;`OSwU+EdrbSNYO&4>vub=#)O3<+BDQwXN#af zH+Nus@Zizu>FIQH!#F;9bnz#D{4oOyl@@?x!+3if;`HRz=bwJK*>+$5;cI6_D5lY^ zOX*f8+bys1dIkN4A}kERgLq=DoNLJiF&%ct;V^`n^s7?3RoAck6*>||o1uCHBLt!d zK|&-`Wh3wvxC06z6A3|za9r+g#AfQv)o7Ript*;-Yd`>e`$mHkfFsYufgDJVitdy! z_d*Z`0EvlNR$Uh6lqIK>@+AET=#nU95($=I21$`p&O#|6ico?+6+}X0A|@09CU+qO zRQEX?90QO6+EwY60zn|?*KYHq4JF>8e|Y+lWWfI^R0+N&f)Yg!Km=T~_R1H-F?f?K0 zkkH$Lre{LwN?Br<01*zZre>{LH3Ol*SZYcFy+RFABvh-g3?vzoVHQS*08narRXbL0tBpxvJAS3vCA2w*|p!@HNL#tY`el4r8rvCSw!lf z*TXfU=aN_J6%dcLIZ)ScW{m6MazE}*)*XTT)qGOY-CDWBO{}ExVx&khXA6GWF&Sl zwaG&fgE2v{MC1|;L*309gog!la>@!~fD8taRiUY6K?=5+>f{8Lq;ZBUNk|}oknTIJ zJce0kwXwCyn}DPwB_S~c(=@s&3RB9nwqcw!Cqj%m9V4LYQMgkB4>AA`a5ZpE$#cg! zqd8EJI)<`NLL~zOQUFlSXBHZ$RUMgzW~7SfDaEEs9&i{C0!(dDVm7OD)kAHQd3A@r zq}|Q+AHM&-bYwo?+)c-69;ShXSAD-aTjAhtZC%RQ>2}+1uYbJJxuHYpv(~xIBVwFv zHy?fe$w!}lv^iO++4b!m7);~T#_6~_PKT++6b3mVrFD|d9e(rG@Bi?-*N1T)rdcur zFgk>CYYhmOEi7rMs)sL9irS25NB|Uw80JlNSxV)ukh1wn8qxGI_F%#;Zb%pi=H z_eq$WDnt+qcsK&2a91D;G;(B=Aarnb_k}4z3;=hkgDkDfe{QpUl@<9@%pdGY*%FMsywZ+`#nVc6$_o0HQAXQ!KOe)H~!A76hT zMO0g>Ep^Et9=&?-=-E2w0IUebD(|kx`OsYA;^79U+*}lnAmpG{vztGh|Myq}$}B zq$DYcbesxFhf=sC;MHnfN-7<*1e289GfT=uybhW--H(#6aK~N7R5p$wc769awM4Gro=G>PRQwjt}I2^~@c883}o_j|?R0Irn zQ=0-*sLwf(!tI;8tJn2z-^LjnP)Z>~GzUj!WFcp0pt3mf37J{V-JBhX#X!}Yg;5|c z50y!hWbCZ;~gvHq5 zPT;5zj*X-d2_iy}>LS!&fK#k-VRQnuYEBU02#^Vu&?pnT6yYePFI_UPx!bN!==|dR z^n81=I$3uo(=@2-H(&pL*fkafZgU+ct&V9w)ClM}?e4CSZN+`8&x!~F&Q=Scxh4Py zH&Ek95*)EWW+AV1001BWNkl)^K>{wO2(Z>!LSZ6lqO)s1Ub)s zK<8#KjC&3}+dd#%Gm22lWfx(f9E@QQ=#U_#6aax?K^W8#VQ>jbpb!?3`n)nih?>I` z8%r>Cf+UH}Ye6OeH$#ZTLQDay>fnIBM2tKf$TDeSg_Kyt9OpnLPHyPn%@trwNC2H+ zBWmkyj=;-Xa8p1`DZ3dVK6v=F+R4rB+jp1ODUYT&O%qMjZ&s_5&FQ1Zm)Dnbn^R_S z$}8OV>BZy6o01OuINjLP4~P9Qj7O_FOha1b)6Hhnufn~xmizMYC$Bh>Ya52U%hy-m z|Ni@VU&*MHb(c57>uKNKz5elfHxd`loke0~!Gyu{Ji9qDOHLh&))^QiNrYJhfU}4s zzSp_^o;jJP%);YcT5Ayc{KZqE?x#23|M=6_Cnu$( z^KslifByLNgw=;>9&c_hFRyRLHeX%8ONqj%ba}Peym;~O8I=N4gn zuVnHflSD*f>Y{6{&U0-5DKRkti7|Kp1rT`vSYVhtG;i*P5FS3sVn_scLPpM+z#YsH zF$X_^f=CQm1Dd%x3?YFO7C1X4jLH+*;PaKWFyH_T63KUCqr^!Ns6o)uMF!@0m0n7KrE(?q=>pX=%bpj7XFc ziwF@0n0pJ(NO1qj5lATa#Q^{S0ze)P$hjm*K2H1F+v}Tv#9UHZ!h&wi)Cr~J6-Anc z=77@oOn}hFdT{h^odrrXAYR6dS&oQDbp(pr)r&&xTDt|HRBS$G3u z5rE_X=77QA;2uFlC@5J_J-{5*z-PqxWxrV6n1=W_3T$j`=VchJ;Kdm-Pxe zEaDpV8#rN@JG(`ILzp?4lw1O#YKt(3pI~`j_u~f3z=`j zCNzd|oQ{1hMB-sSr;>7_u|jw>_0X9p1c`|#cY!ihV+hK^OaW-_$XQ4LfQ-A6CEeEM z35B7QTmp0sGZ28v#6Clv8!|!W=IRbYp{=4X+$bPPE@E|d(>`?!$*Kc6f=5(s)quw4 z+O$Zf2p|Ap*T679ipVvB1A1lwVhne1V>B{QK*RfAQzyvBT^)nNz}gHpU2O1l4V#h} z!C@vc0EcDv2#@Bo&BQ@+uYc3RQD2rNP%P?2xdqc<`^VRGG>8Q=@S*kTxS*X!#KWscZrYEgOi8DINV*{9EQW; zu+QFg+)+*uJXsA#ro3LABm#&!HFGOU1(|y40>HJYxjRsRA-Fjwc62nrN-48>Q*3}J zSpu`8pd==bsfDWpGc&4-qt6*=_1I98<<0wV^|Oepc^}oF;bSy1rtS>4V7Fy&=;^RA~NE? z@C1nf$y#gEJYSqY&1Ca*r*^~u_rla57{M69y}3`WE}XLz;$no5IS_=HkT^?gUae+k zAaZvAM+{*Q5l};PA`s!OO97Y*<;+0Qv})74-SK#6o722mpNZrEEUSLC-8i)U@it(} z(m#5z60uraN~wsT+uhCa-PP80S0 z(^C3eZ~!NSi;K-P#&kU19j0MQRny=NW{2haM=pSBZo2GBsDT252be0{mz|=ANl^zf z^6(%g0d!CT2xLIC03`7sa{xpItPz9=^4}#UM+ijBselC`289D4Qy_$Eotg(YFcTx% z(wRa5Nf;3w0u==%+}$~lWpN}UK&W{T0&@_BAO|=Ah;WD?FSa0^8$}R8LR@wk!5+{E zQKdixnz;orFj5AY5z{nJBzb+Z-t9-TmI(m@K_8uO9-MdDjyKmqDJahs=DwUiezLv2 zd2@CB!{xg-eZSow;=AwP?e^2>Uwrzz-~Px*f>K0|`|<7V_5SYi>gsLXo6iMfv)yd9 zj=T5!{ncI7Q8-(|{XFoo-}HSFn$(G+g}Xyj3TF=_kU#<=4{|3lBTkIy0csE~XzUyw zHcf5~A~zHYUQI`Js2Il6pIA^jMkfp`lvm~^K$MJuBB*&Z1QNt7Hdbd-kJ=P- zCQt^dV8Cn&%>!B$fUeI$s;vP4N0}yybkhzlta-`g)3_=W!5d=%P z+3qXs1CVGjiY>Dz1Awq|5V*S|a>@cZG3Cw@B?9zjNPr+j+4XR^JM_Fukn21j_WN-< zqDMVeV9)E6bHs3~zsd{rT0QE@A0u>?UWMQ^!w+MtWQ4UfYgUC4vI#h2; z&{{>*XcHl22SMZ{QSdm-W*Q!px4~mH0O%lA$OgzlTT5Vdsx4}5Tso2-so)%L5$Hf2 z2{hZ!;~qQ`r@YMs+(xgnhFZ$9?zfUghE}y1P|37jrPaww$+gX$P#aohXHGi%;cnJS z-a<%#!@vyC%#c8Ymu#OjpB`jr? zu2Ho~N?qTlz8v@CSf|99^HMyR2+7sZjVLWiRu52e3=~9XguoC;o+Bv0B{LYhb5K&F zAi#nEq`@eZm?(e%j>9C(Nityy!Fia?86XiQ;ZBVL0o+~CNOGSOmZsw{)&Y!(9HTb$ zK;mU6NV#}Uf(ULAZUV*$QOxc%@J&=VF5FRFd-uZ0n%vh1{lo1!o(!PB3!)@LrQs`+nYCU(k)ay9EZaPA3W>U z{mJRN^v{R!WFB|NeK>*XaKFL^Hp9a(-tLFns{w44mK;$8ix9C^Yt;j2mA0q}T7nkc zI-hL1^`<{rt!;m|?#hGh!#+Di%(FUb5JFCBW??5~9qNpXfGG(fEDe`aAY%&9KwLaZ zjHG5F*upIC`5u5|&D?`9nTJO}Pyk!#f_@|xVG#%c3~dlWNQuSF!NUO}!VnO{37Q2F zAtNnZ3I?DEaKvCnL2?ixSiCX_%b#OtyMNWW13*Gbh$LVU=%El8OQAVJI0ax3#!_oX z9*xw&43c7Ab}AWwh}50K5jZhsNoBh}N$HvanZQilW{9av`8dyahyA28^);<}!OO=N zr+0Vncf0o@FdT>L>$X{6Jb(TK*t>p1#GKPu=i$vyfB511-R;3fSd|SMwec|8t?490 z>(z;akO&ERSgqp>-f#;I^xo#bKsXVBBEoSQumjPF7t9){hx@&xM+8Y6!CR=#6A`*N zGtW^2CZMEdiC6BfHdr`fsWWSk1t*RGJQ}j(1`Qb~F=4dKEhDIzduCFT2v&77bQfAo z2JV0%Qk4M_DFvemsZ&sZ5i=Dv2-84yK;pbOv9W_-bjJurBtXeI2-Mm%#SFP+0gukx z6kse!IfhEP_N1Xb6T7jIG-86+w>sgna~} zFGdhygjl}Yh@E7|iYS5s%EyDYI%V;`?}UH==6Sxoy)kn)80T37y0RgrS|@Gb<`hE6 zeJcGbZP(pgahQBmuVyVc7BDITEK}{WQdrU>VE_oiv)So-rDicsNCHtwfQSf%s;gQ+ zwEN>c%qwyw66AHn`lv{eK_qy!>Ok9U8brA(YYz%HK&3?Vh zSf5fqUy<4k~)DiK6(DZpZ>`&o}6z^wk5j#XJ5QnKU}@Oe*3#` zfAi+*`*pX~y|zPp^Y-oS-EJ6$<8gw>M1_SHQr}|Z#6UEFh}IfdxH%Cm$(snL#WNUW z=0;K0EAEgew!Dp04a0pIWiW_<8JP*tBa{f%o9@}O$7aK@JE{%Urte>W_t=KjW|ce41v>7T zF;v0Sn$~%~)BS!ozI%OnyqiP{5kZpGYUOs{Guq4>`l|0a7YyoCP6eJme)Qod7ax3Z zw%_j~0>MM4?a4N@P6H&n%kh(lnMtDsLNF5v22q&lK$1L&Apzh#Tax5h zksPBCvbX9@JM* z!SC*lIh~+Ukwl?y-(Hq=Nhx!VV?B(s50h#uVF_Z5q=3h!yM)wt>$#~5ly%o_%jwyc zgC$s)N?H|3;rpeH=wDLN5E07`3jvq{kSxGW5VIr!Y}*Z=oOL4kI7~OU^EiM-Mh>8% ztcZYi*K>lkT6I(>0|&&F&Jjd|hec}ueUUk|&e|w)7VyMTkOGtEuA8RGL5w1T2|*Gw zqBgDVh&E&HAqxOvo5_uXHP9?sb-)O6Ko5kd;ucKa=Ps{!*iB{%2sts42o-?l=paOb zR>ct!n?p6XaCJb62q!Xj0%2Z4@^L>vQaB(Y6C~j*MTD6p0xWzst)YhQpv(4@0|A!V z5MkkX0|;P*HcCc80=U|q595A3jCGu+qs>z-WkggDCxj&2T0I=@EXV^X6_C`I(yxkY zOrs4)>&1{fB}j?PBhb{WIoH5IWDtNLt4jka0R@Dag?q|klVVCUZ16K~^yq?E;oB)9~>(wS_;dHo- zfS3(VR^4jNU6JbQj=7?%{QjqR z2GMV~Paa=LDGVSYxyXRyc3U27&oIbZ)Yi;uv?=2S)>fw)ULXLCHr+}s+GvQ3f#HM! z7RC-NxGF?R0w5vVIGcg6v^HY`Qxxdh;V|J|5k^JHYaJ?p)~YAl%~XR!L^kHOSyK_Q z!P=~2(~CzJ!?-(18*NpHL8&igoW`8fcxbi3>2^EmvEO$4@s=sIk=Omk`8Fj9O2_8z zKu9)DIdRj7%#4D*;@Y|u?2qqP`Jy(eK|qEE6qGnYf*QV?^lZB(DAq8g?r^y3dS;+z zNI~RA5bBELND+WRP#u_1eM+LO9g(3PouOmK@mPeOtyggve*_z8-t=M{>c`z=mR9sg zd41R)``ojH>;C2Q$KUR*)|-w*w2jC8JWLJ|l#IB7YbuarXi_y zUDtKVQ;LXIwKWgQ>@iJsbG}NPx%xB=96=-ipr#8B)x*ru!hIR-mmtG(CIcfaj-=qk zxhoo=4uK62yg52^>bOV3nY0S#$qgb(=_tY3HuoLB)YEZjDQ%xVJI_5KRUk_#u@~}Z zTH(9zf4n;0K6twM#VM7;7q6Z_x_&+W$N%)zr1jy$lb`+k zLf`N zczdqX#S2AHTW!_3z(KD5pb%z$~1%Cl{vkC!c)y z7ys(dFP>ghpT{~*T1T2sHf4t4{k!YC-Elunbu^gvAALXzeS?t00?b^^EMR$EAR}NP zh=?$yOjRe!$t|kRU6GX(2eOHelZHv(_ueM3mNR0`!dQX81x%*nT-(fDBC2X_oHv8k z^X+DP)`bG+v`Lh^KCN!F9@<zp9463cl&iY_K&@%3u; z{kL~ITM(K}EMf)$LClN`?hz6ujJBW~0`BGC7HHuBFpXnty5|05b21HWo-_$`vcg#1 z-QAp?t)D(SI}H0;r_y)No_&zJ2fzJ)|4)<-nKNhGncx)pK(U)63Uy&5m`!1xdj{3wL!?5nLi#Oh5})%mD)}0tp$xAv_F15XAzmR$lY-mk+-D*~^zNFSOR* zKDe+u`KR4a9QeVrM}PLGA3l5d_~#!U|Mow8_3Piit;dwt>yz{Di!UC%eD(P1uKweH zeRFl&UVebjUpzLmVVcn;qVw5w9;fNB+u7T&p+tvP7hyAG0hNekUk^+e> zRFGl}kI1>qY9d~U4QL2y;F!42xgAC!2h?T=!V!Y18r35x&@8t(MvryI2OScFyAnt! zEoX{3G*`SYt{@6G2)8iv00@u7L_~>Mm=}{N0yb?;TQdy|T43b&X3DV82mg8>Sk*@ZE?o~8Zmxce=4x&MUAMZqy)9j0N441lk=!(f z<9xCGm(pNjWCBBT4+>W!T2z1_W?}ZIB#fLMKR(&?7iWF12mk(?>$`)OzI*um;mZ#; z$?b5Zzxvg;V|8Xcy;y(w`HPp&&L#8@zk6d2U;gpam!DkZM9d;AtxW`Fx6}Xo@8-jA zZaC4yM-M*!=!t|9@a}Th4ac)JfB3-}&Cw-O1J1Ht^HzeaT5E)odlFW3fVoMOOsh_E zG9GUH+dte5b*O6E>hAXT-Me9`&YT_0LjyH}lZa45asWljgaHxG)A`x@@#9UuS-*I+ z*{*oqC!2k^w#(bM|NNWZzJLGzqtmAkE>1R^lT{Bt{P4r9kxMF(6`;-@n|ym1k9AC` z0LB91U{0Jw3VXPzhK3LWM1)%?F_Um>ZRx3)=N1CyVCJSyECEqXmu55sBmh__ql5$i z6a)e2eNIX}ds?sMv(KM=`HPQEFV?&L_}zE!|KY#<{_T&qub!`d@#P19@fSZ+h+lp6 z{eS#Vzq+}(eR%rh(Zk1o^>1H&{$mU5&%XHNlg}P(*Uo^btB%b5fS7iN z`JaFN)8)Gzp}+d*@xS@2FCRQc$LVkX%dZ2rcL)3IlMny$Up+r7{LA!*pMDxK;Mud& zfBjc4ADyMI{)vZU{r-2CXJ_e;|K#DPpLK`3*+%>E%{wJ~{_^DKfBNi`j~)hE)j*Us zPhE#~Xos6Ot4>NuwN`*YT#_~DfPfO9%L-@N)h{(_%O=OcYirD8b+MDXCn-&UiPTKZAtE3ac!2|%`%^r}?u7CZ%|MK@g?Z!6$?9V^@U;gJ`oSq^;0P$>dR4ycvwT=4MP^p^8 z001BWNklNbw^aew*t-HyxI`S}A6KinO&_|vE7C#P}P-Ce(# z4+m@}OxVnwytP@!7>2_*&zrPbRuX}MYbE_Pdai)|^=$p0CzUKYaJ_+c&p|iBsM_eR}@$KY9Au=a1fA z)yv(e;D7eT$(MipqU)r!7Qqy`X|*~`)BCHd`QQJ|n{QsfS*_D&pS}E}kDrOx>&vU} z-|l|>^;__G{N(h}<5LIS-Ryt*;j$i$d;H+hwqJAI50^JK94A1bOq}ZWA%Wv@XvaIn zyq3(}28$pPMWwlFYf&vgb!|#o%_@SMA(Oyzr*rdgCp6GtUbcw;QcRD5i$f+8Ft`U8 z1cfat4+lUN1S5;)f#!zElPeQi{V4Me3@mYHN|g z7{XMtIPw)Sj{WiLUp@ZOAO7m<;r7FBkDBYo5*rX_y%Q9L0-&hM<^a<`EWdQKFGy-F zGb6G9m&)^MUOziuKUppyO`AgAsKl}cV{_M~HJcE7& zlM&XHpPWpE0Y!=cf)ZKpoOjkymaJNe%IP2?1gC3(^Byf6cE|h2uf`#@P1|`lb9T|x zRaIA2>}F}RK|Vz)fKZ81_;i5>EUFgKu#Q)SJZz0CVSn^q&o8PcFAzw7cYJpH0e^G6b*xX%tCi=sZNmwm$gHg{*Z!>Y%Wmm) zCR3Gru%wHbH3dD`c+lj~Q9ZA^nr(;~1H_QDkVXRuDrb;Ne>|vKYpmvAt+kGrjhvR+ z1xf)q<+zEN4XBEWs-&2jrdhAoqVfLW{`O&4*Iiq$jYA|JLn_m83d8=WDVdT&Oi2Vm zoH4Vr^{i|0*RS6e1f4jA#AJQtQ+WJv^_A;Jzt&R6kp^24%G7z|pIjVv<#vA*)tT>@ zn3#x+VW$cROhlGIPmn_W$f+Va(UQ=DLGs|}`Lo4;|BwIPt5;{uZ1&Z+H~-{+`%m}R z_x)j9ESlf@y`TR4Z+~_;4uAT`zxv|W-^B^5S%q@=yMOn${_Fqd_o}Y?_M7WJ`*;7> z_kZx@AO6Fi{^*C7IR{WS&Q8}l6|9gM`XFvFCiQI4Y zQyB3-{P9~~^WXU|Kl}ObJ|Fk>yZ3u3*HcL6=iTrB{^!fH`u&H;fA%kb5mKoQKYh0R z-QRgNo8i0f4*$mfl_}Y2{Pe}*_y5kb#WLnZG1~&uGzBJY8ozm%>VKQ2I7oEFSXAKD z*iq4}3PJ>Ih=KmI`3I4Z2$TSbP*O+_cN?+jx8`};U)oRBS8Yjcb_fMyu_xZe%X zIuNRjMWQ4b02uKUUn+orAPT2Z6C$dRp3*g-sRZv~v20eW)>{psFD0*+-Q~s7J3oy1 z=6e}yp@A(g2JxxRU_SbK}{_77RBtl2z5GGMLahr&Mlnf#gixgDN3Mz># z3n~yA@6T54_dmOQanVVSySovo!~kO`xrF7yP3PTm+1>Aky7ukNH8lXH!!YjllW|57 zg>-upHdh0xOS(-tPJ_LX zZDlR18n9zdL(~WcEOV{St78T+gw5(~b?i48vPELYNg>Bztey%hrzcm)j-m2=8dAHe zB0`h3R+i)VVfX%FH#Ey-Yu&GY{m1{$7k?GCf7sq0Cz;0a;jTX(`_18gN@Lr)y75z= z_xo5x5Qv!oG7=#WvPENoLA4ZxSPDZyG{^uX%0d>%P>Nv;CHK=VM(DcEEZMSZ8OJaV zg{fxDr;IfbfdU~Apuk56R?#%{*)tH@)v{hMjklx0<8nqXF3vu5(>U(-heOKaylNI5 zpDpLJcJA5NHJvZpx~7}W?s$CckB^kY%&F(Je>@xxt+ffLH>MnWI2`-_IOG(vjDklg zq)XR2=L{$+CA`1C-EIz2#2OlhW0DCdQ{gP2Lf#6Uc*TNBN(3j^&_}II(JV3z zaR~ixH>OdV+B){$*8&)(baUM0l2c3snXp)kwO@EQGgvQI%P{1SLqebX2o9##aJ^0FrUUKtyab6-9ue1wfgNsFW-S zsFBCx=sW6W^~+a_^|`PcE}qcy=k>S0fzW3J1k}aSFW1$(H;Yd{d--@T4!cqwKKtx) zz9K_>f3tc2_4RN4=;h_Ap0zngIUa&9?KBQKThk%|mYh_zs=4ygb&8k_>uT2g;QOEa z>M!ng`(0%_5J4&;Y3!3KE2wp;s%j>2uCk1%*&={rKKU~DkN(Tdw^u)P+}X1^L&P@IeMs{UoM|sp6@?=;My(c^?9ZHyN1z3 z$zc8Ztoh+5T|Apl*?jS4e}62?_3DQ|KL7dW7yHeqc=zr@xZCIT+5ESE>)H1|ZM&LM zkd!dy6eMbz_U~`szkPr8?n4Y?5kxJjC$@qpozf4gCvM&elxh$yJ7x5lr7mG$w$7%m~za7WXkj}dL z$@w{=yF7-QtH&`EZs79S`e(oM<9wWUxBGAO2I0iK078VKk_(7x5kj%2Y6vqcl&q&b zsQ@W45nVo6E|v{*WDE0|Z7bgoC4{`$>_Wb&Tv zgvOvF#9Sn!L1V!guGuf_{GxjD$-LyDbaj1Z>fFm|5Q#m*D4S-P)>!9)r$o3e9 ztk$BjY)ghxiV&ayA_4(ri6s_{iFNt(`NhkpwrR(3^jtk%Jzun=HT%19_nU|Dw;%RT zo-JOUSLcgW-R!5hdH&<4v*mhU;3mZPyXj#v7xl9hcH7O(U;OIfU;OzSiD~K8g3m+$ zcz@KqCOW1vI4`YRgw(PNKl%KV*I!;Y>}$Ui*{W~I&Kl4rAWg>31UU3zpsH{^7?X_} zf+S_gIgJsJr-U1JXWzZs{EL6}>o0!u!B;IHu@uDdd=9j^MdYDw^Zn!|r~wAIGU5M^Ko?9H(42=-4un^$X)17+{vl+C{rq%r8*= zG~~_ge*1Vp2IEjj4AZb31X8oCXJ>P@=H_lMchh{)Ef>TnSYX%>yY1NbLmbUygU~55 zP8lE=_=}{qgsD)v3~XGMO(YW0e|z$4^(EF*}3L$7@+vCe|_sQPh&CNkl@&d z?N?vC&mpw4c}fS&d0x$nOy0X6e6Rl5PxgQECwr6E4m{Yx%(Lb!q^N~*$tpr?ya z4AXz{51v>{?YwFl7anDQGu^*Ce)Z+%=IU6GE}pJd=QX=>IBrG?Bm1euspL4~KuQ%U zT4qmJq=0G$FsC)<$ys*Fb`TH}#uTB5AQnj7Y;k_RXqrkyrj)ICZ8CQH( zF~sd-05!g<7}a1g5Dk^pYWDKklQ-L=YK=lR7n4khPN71Y6Zy6_MvLe)O~cSnDMoK) zu_RRLW~NBCWSYibp0_Vvw6j_WFou%FD<_qCdik{SCWSB_Lkuy;m`f2AB0(Y%K~nsi zP#*!F!pqLFwbmNTSV~q^A;ps(4ocBnfJs#W2;s#4N5w3J$aLx#8qA_4kKq{Nu5dKf zYE^ZbXR2BxY`2d$ch^y%Zf5PQYnz6wop+?h0|Jfh^ zk!3!H@t_d|ms~9utv45fFL3p zGS&bB8359lQ<35fRNmOCsbtHm<2_ zUpH=bR?X&)EtN#h+u1_aXO687+q~PA-7b$)Xj{9OH`cSMy7Ti{GZTeryV-7Tx8s<{ zDJH>E{BfJ!ef{C?X1l8E*0wrLDo~UWyt9r7kie0tJ95usDu6BHOw**NQoC}cRW3CO`a^4@jrGDb*AhvQV8wW)+Di2<>QY~Yk$ApUs9 z6jjnwv;sa3+1LnzisqEB-hM!$B9NshfGAyE-Nl@|H=wTeZQn;tYU{dfS_B_PAb>HH z$IWhgze5!3Ib!Ba=jRJG!OmQ%+?4YEv4`Ty;P=}P7wfqe*bn3F-4KE^&L_fxAOr%M zMT&Vlshi%;8XMv^sQTdiJxvRf_N&7IxtkKcX! zaDB5|t=DH4PcEKce)rw6XnFGDe7R~x^Y$^m{&s)<^s)~JFa@36-sXS)&wg?BW*m>P zZZPP+TUNIZd&J6HJnr`dj!8=aP2u8#e(=M^ul~{xyNRhGE{p(Rb1EgNAx31wttIE6 z7$Jz%*v=DF7E%-e?V1V+fYnz1(1(BdhhH4R!Bn|!y3OOmeAZ>*Zdpr-gtm}~WFk~1 zJ#7l5#2g)|G0w1IGz>&VQG`U;s&ihrD5Mzrfr!!9RU@U6P-~W{PsCDNXMhch6y5gy zyfs-y26(u;dHrs?KT@+?Eq}zh6$X9v(!Y9sF>C7WsVDTXs(_sdfc*{`zLy&gjSU_QfK9@5$Ni z?!7nF^dZPz^m2BwDo+;8ds}~Uw!U2P%;cL*dHuz9c#Q6Rnh4mHDow-w_{}%lha0du zm9pj81XicfmCi;Z#D<`b1xp34jh{ZBef!>e4GuN3*U(C^okbgVz-Lpk0T2?K(R}ZG zZISD$(P*r24nr)QjY~>X0?9b`d7Q%WC`g`&v`B?qcV_}VBNYWSW|{_OGNwtfw+2OQ z&f>jA1Ku09T-ObXv51hSAd!g*$P`P6sYuQ#A!H;=a`N1emO_RAn6doLjFeK*af%_8 zRA5gy#gM$6mBOHIzn}UsHO<;*L+bJX#}MLiTztPdJDV?;6%azboN-qf~|!jeQu2?a%@ur;$;wVKaFH5aUnX&P)=uPQl~5K78r-nM6F z%dXwI&d%pEW9;xa?GK}(sp{HUn`N|BnO{)u$A);?`|ZPi-nJ(w9J3{&44`D2wplD@ zY_Q~#O3nfqgifwxIk_;S2dkhUsG8XnqbG4as*rW=^2w~8+k3T@XG`KKGjmKum8@D4 zHLR*YDhNc#$Rway#kr)A3z90{J-kij*bj$ee>;uokW;V4RJP(-Zv?6)7K|}Y`S|?P zSMB-w_CNgl?RKA}8AGhd7ZEbdMyjd>rIcb$fr-#EGdXL(A~65}J41wyBk?5mfR2!7 zkic+znj9Ic#%9QLDp82w$F3WRs-8v;Y?@Ns6r!u*V&!IYBROw3Q{#jvwez@MyKk@U zVUOF5luV|>XV2H?=L>~;xtv*__e0!%IQI9^sB7nqS~o>70GBgrc-7ikCVf2Y!_+2x|CnMx55iv|lx6jDN*&}3t?b%ikNfoE`DI&s zg|fMMeDmgdiimU;(y%mhg}6VA{RnL4xs2oFL}@q{Ya@a;=q%2x(UK@Z@{K%wSwDT* z5se0u5JJ&m7;`L$&KTBGP_*^+BaA7gQ^7Tth^i7LPJ?aQ3`2(FkRd}_J*lh7gmSf9 zO5Z~&`NUC4abT_tk)q-$B2hG{oFaq-YKVZ5mnd#O)enE`)AfAOIyW81 z_iwMakNd|>pE9H@%72s2Vzg|mwcehZ;wQH(e$3e`0unNuWNARe))1oeJfHi;qOR&L z^zPmJ@Z@<-OFvsxib;Gu)nWb`+^R(SQY!ACBX(JI$ zBLX=~E>MaoMGLAHQ7r^0PzsVFAQKXh(YoQ~x?9h?5~UwfHMeYev23-ZsZY}=!;sF- zy0eQ_(_ZA<7RvNPO~TS#Ez{^ zqPxSm=7 z3ZPmfPejQ8I&_t*icFX@{^vZRB_zrs1#*%dnPxUS+va$E^Y-fI>&0T;Eli(wDUQ}R zOnNx%@~~}u1!5H8eBI8vxo0>W$7xIjWDK#HnUV{l@!rp7t%y#eVyTIU4U!==NS0Ma zpg3#1_f1>1v(9(i&IwC(xc1XsZMiY*5Q9hrssKSi6|?{< zK$2CB=;`}GfS?7WD4sByh$wh!K+7qLjZDUp0;rY~30cN5gfI+4KMwP*YZyxE%Wl{m zHpj;L<399#n9tVE`@{aAkaMt&pal;pZ;l%g(P*i(wPkNB(Krmft()D3ce^-S&z-M! zhiTYN&NVGndn{0ZEouPYRx(A$kR}k#;0?1w=2A*ZQHv(^&fu%(^Cy>_N$wxMww~pLS!4{(`nnHy=TFXFyjq&32W8Dt6EY|3fGqGw z&~|T1wnA)?Ylu9G)0jhBH5K7;1Kza22GBhD1plZ1`L|V#%5fUy|NKAx>|rO!Ro0Pg zag>a8P^e@PMZ;CfW71-Y6BLk8NQ|>+T@*#@ibgVV{q%f!UJaS7x4XLsOzEdTc`*(2 z`nzAcrt6~=1rN4$%UM$aJ|4#1ZhI)C##Fm)B4|3(vABwxXO}M)^TqD*@yl=CzWwII zaX(hDoHD0^#u8gn%s|cRrpf`qX zL8^<4B_&WoH7Y0osVJ6W#GIU#DoPf#DCtRW4>>_fDuoa#B9xS}2tqb&`Gg`t0XT6< z!JJS@u|R()ZD$=pDAWEpO2MLS-{Oxo(K`c%jUz-VS&FFf>>FzhTQn(PDYmXEV@l;YPx{3# z-+%SZ!*PtmK5q63X)EFa@OP+FxY!nIDAOMmzWEhAP%WT?)`l zUR^ZjZe7pT08S;x{!kg~$yIf8dAi-*5MIy+&>in-V0GV9Al0ymu)jX0x1I zB0|ebpeR$0K)~>EPl61#re@~qMl^A-wxAU;no-IL?F51#i2olJo(P%9ux0>+g6fQ! zFRIJSdE5ET?euUJYYWd`H0yP@T3YY@!=v8ZOn3MF%TH`O_v>}tHT8Vf63q7Z@%9b& zw|22K8FPZk)%yHp_v~qBXU)T=_xyNwn-9l+2&4d6OU@dD_J=s-G!(7eJOBV707*na zRBP?oPn_CY0d~}lo$}Q6fwbiDbkCSm?Gfl>fmeX`)nA; zA*(@hmXJtA%apQOK22CbixgENFdvn&C(OH+oRtkC8RE)lDk+t03^{L*OOlc$9e3L_ zO^dFrOarc{3WHzF7g~;}UJIeJu||cD)1VoMY#c{MLLx&(668sJM^)9Kq&z|hL(E#w zBn7ax5>#R#R;4T@BN%2Q1Vsc$vnoRoC=ThP4jkEluZ@5C;-YC3mEL~W-(KDR^heK$ z^ACRf$shme7piQy{`TwZ7(cIP?)-9n_G-Z@j zBx~3a3-iQ`%ta(iAps7V`aXw&G}o-HA$|I^TCS|AisaeNl~jK)O}ASTXDBnc5U|8T z76_P;5LH!B6c`jCWl_}=d0I|<+X7k2Sdu9;rldLAc2Tj}Cm@5*K3$MEzVVAyx4XSR z4x_8ge6gMnC8jAAah{asi>_|{4A2-?3B)1n`m{#}-C0-97t_|V0y0=@Ors2nm{f`s zCoS^MnS^5U%auG**m+jn=bukQAJv{hp{Gl3(zIP2isfF_9u`!BPMf4X`0YCybR4 z2-mjqM3&S$@ZOxCojrLL?0yK^r#@X@4Th1}ol+KACc}h?MFvDxAW@A{GzgGXH8BB_ zrVuPz0T2NxCB{6CX&ORK0!oNfWC9g3mIw(+RrNG6`8Xq0vOIHSitupAg-uDmq%sUT zOvey)oJJ*bR>|RbEIB127u3DKJBAOpv(C>N+c)9SY&M%?%;H=HsLnG0#3DHh027%4 zNFV1*0Ek%%vq7eec~<+bb<4T81>fKAKinSHXJ@OrSudNevOQpbOnslS6jRZ~<-#qz zB_77<>h1lMpb!XT0v-D)>{J=|VJE$1BGp19Xii>CCq?$VAs*A^vvoJCOaaXjV+J4r z0R>UXpi`Ix2ss-=F(fUOfjJ@1T4P<2LEe71{}2D>{bL^skIp(mV+^pToP%nKIT||o z;GmRZEn6dqfK-g3uB)4_>4!0nbar;eHcfpW)3{i7##U9;y3nydhM=EF}^!aMq%rj@c59V;HWF?O;|b zt0@i}(5ZpelC7&c`${CL{0;w|m?R?*D3X8z6C**=T!xE_7v0Pe;Bh;>{^qu+<_}M* zwq0I6nK!L}c*x&@s!*snn{NS^BJFlvCR&^#EHGw9?AqA{GnGQ6e4k{j$Phc}PLZZPPUXETTn8s-{BAhjHwOVY}U>oENjpS=&B7a*UD4RHor&g9uXc?4k~7sxHqS zA2x@fuRYhM{aDseO(pe*!4OxA#&M-0V-Dkx(PR}vh|E?^0ZIxnXC|zXJvaxd=|r|h z5XquMh(yG|{Vv$5`TPgZo4SmXzW#1^f8Cd2$xW|5U9T>heLv--fAyE&et7q{=0CYS zU(c7%x_UpXFKgkvz25)DufNTKu;WaM1UX0RXnj#PGaqsUwMB>RaigilY9=Ln56)wP zT!_B>;&s=2<`{nX{mX;{S;!)SDv+hHh>D;9D%o68jFT}eA|(r;VQZ{wjNcL#GAV^g zNQlK_YoRD{znE2npK$)Z_ODk)3Jdb0SK^QRZj zJ~@Bb-`?Kc3t04RG1Yo~@n5Z;WQNUdTc&ijm~XDPsla-*SS;5!`}Z-I`D(qGJ*io0 zM@4kkKU}^400v@8w&N%5lBF3R@xzCS92ZGdrI<5B&N-E&u@prV0a1k$6Zo{901y#m zWJ{`C1licLv*pF*qHbI|#`&6FK5JJUq3M-meePd8y?pm(6BF(BGR6s%7YqO7qFOI% zD%c;&+cyt~Lo5P@NlGYXoC+Sp5G*)kATbpqXEG&cv7QNuL9~P^r`?nR^Lp7{oX>}8 z@Q#^4RFY_tQ!vsX0VowA6-6N?W62pb1dK`u#;_$|$iS$xMn3<%Vt;wRJ#72%`m1z! zyB^2UT2u{+NyHRFB9oC33t0wKNTW)QF{a3>pe2WClqf|s##jLQVH(FVPN5_y8W4+~ zJbDO(BC4qTv7>?r07eih-?dF=NlHmkb4uyZ@B2emAXilv^R}p}$8Oc0U(EX5V2P^A z0%kDIIBQrQAC7m^VY~19-q*%;b!(Zdb)b~8s?w=Sst9oEf*1w?IDL^3YwuQzZqZek zb@On%x$A%ULsYNJRlQo&{WeZx-W{f*TDR`}>B6;6R5lO&)!WB0C1EsHnUS)2yxvWT zHtBHp{_)NGTVacp6$oT9LV=`V%#S4&k*cyLmz1KUl5-KowyTJ!6e*Ci3Q#1>q>y5X zQ5*_kvRFvLo4RcmR3X&IA~6if5Hi_JCFMjlpkPUb6q#8xT7%XANKR>-!szUo3LJAN zDQY_0J=_wtzA|APZyttelqh+1KEF6$_?o4J`-g|C+nYW_TX)lx$NkiFXIVIg(K|aO zNRkj0(VT`58Kg)NqsVB`5}gRM=xmiyhvQVzNQLB3WQcgi)>cKvekwptl>iXX04cK3 zkE4$w<@7hB^L2Oebm5#0V>ld+L*G9G?hZgrkuQ7Cl`zg31}%mr;5FtVrxXq%!tTLjw)CAC(pYv^uu%rDHPDI zZpl||=N5~)uDj1(e1AEcy?s}|fA_xcBN%1|gGiR@nx_eui&Z{OlytRPF;~+xj?(}^ zV@N|7thH_3fx`ZH98*{}tEQRd38!9_L{Nl8HG?Km>re`ks)z_OohlY3WmN)prYukau(R%L zyw?9{sSsU2G%;Bn;<>URJ@o_WVUEjS}`c>=Xa?v(6_vQGI-@d+= zq!n8($fgpc;}~x5r#Fuya?7baT#ZAr1=JvABm;<0jHQzC_U1r7ce7@(XzR*UoU$OJ z?T4VgW^-b`hyukZQ^-ULf=pzMRVfM-Vq%|@8i0nyt~#t;e*SDVOn!HOUoP`MzqxaY z`~9wIe9BoBEE!csU^0c6i)7=NI0GIKk%?;W4taO;IHts+B}*FP6vDs+poLA4l1|il zP!P!kaCf_T*c@C_``WO9lH={w@UQ>i7r*}U`mulT zb2s!RtFchjl)5&S2~!EjX*-2soT`=}XGuAYF=uhkT4S8!wrl6h88y0CwRLL|L3460 z)=lNDVdAPW&z^Tb`&;MKaKh;rwZz{z&PGPs(U*F%2r2tzi7ON_caW_R)HB6>L z(PXVv%|pln5K0ylR%4Z|Gi;3`CQw!5ZAUyeR#j}m*7-)UgcxF0Vy{U~#PpN$uYiDZ zE=4kw3{=^WhvoY6#f!_jY7E2rtoi(>&zFm?X=dJ<jl%^p}0gD=SsN}3xRR)axv3&W( z;otu02iutCxmA-E%O*@y3?miWxaQ*rQ9-b1K>$)Mq8~e*#*nf0qZtseh)i0hocd`z zhMmn2%HZnC8sE0{>TKaFtZI7lWQ8cZ?Z8;&D6FD7IioU3>8F{g=IiG9%a_H_tR<=#@9LZtK~zf!^vhpf zT|8SkgUi)YN+AR`bi#)iA_P#)AlX;5aq58z8xKkdD3bSw4f0|-mK@bMVhdN-5C8i= z`B&H1q4G08_wZP}^9X9$S2g84BDzu_Wnq+$k{$pl#qr6YST+@owNYD=9LABDi~MKNIv|or5m7!FiXbZ} z05!E+E$X&)1{8@`tL}oD5GM4WdH}F7b)Y380xB}7+Wj7K8=gOdqe)>fm8?ULSrmj62@#bPFo!H5Nm6x0L?o(* zf5@0(%#taY!jh?SF8naW5D{>0r)>4OY?<%-VUHv+d15ri5%WP_qLc*$h*62zBNfh> zrm6~88s_b0H}sIQ2g0ZxAX}@}02`H@LjWYFbf|6ukpe0n`WXNQNt#JQGq4!L4xM-hebd--3_CIviUjcuF(0%bArJd;)3t5e&RkJ2;NfjA;;gfd zth0+F`|{=UHtF$MQIsMJ2G#n`xFLlBY+tYDNPg2F+CxiCKJ%8b{}Bk=+DK zW08>o0D(aZNQl{>y54so*f9-0#*l=ViAeR}Ttba1Du>%k%;Jzk9LtG4Jv*9IP2ti9 z9~?h9UBpcDqb4VfBmHhNM5<*glKnb1WsxYb%jTlF^&a;I* zIVqY6mxWj>g|VQl5~>2Wg=CLF=kA=z4wNy0 zEmei>@X-&?rgOf%T~F&LuU=gY!+&=?;}4%6ADoz*`|clJznK)Ja4(b+ifAH+5c1W< z`peJns>$MZdF`B2V1b;{Sli>Gt_?ejB4I>rKKt#(&;H~CS551(HtGd|0SOQZbBqLn zkn7q2#{^^LY!(lg0LU8CG|g^^V0(1l*bJx=?Ko89P-kJLsG6)b!N;swlktPsiX(H1 zgrItWv1LF+0%Bw)LqvG!5G52%nu9}%{myFs=;bqk;?4J0uV3F>U)>YVW~cLqyVVb$ zJ^l0l>3@87bo})fU%vVN-KU>^_tA}<>aS(|%`XMA=HFkbno}bK# z)98b!6|opgfFh3s3P>4A5lM*;o(j?Tgl-i9K4`3rHKi@x*?IlsY1Ne4ccI$`0iBoS z7=m4JSyy#4ynH^JFK`{>{i0L%V7{ium)Kp5gtr!X z0Rj7VXm{&!5`C`^gD&s(dpH@=|uxk6o{6rbn`;B@N5{u4qQmO%p1jdBS5p6b@ zL6L++lPIt?tQF^=rD$PnfuM^yKY7xK_2#Jl(|_{f56v^uWDy1IPd@p{=f8P3vD0cN3$A~B5U*9PI_`kGdUYZzZ=Jp)T*9LqlK;G8789L zc=`R?moI)K3K?C9eTbXEpAeJ+XdC#mKmSBU8OdVVC4cqMC7yQUXbfe|ecBXrvA{`- zNujII8&OSJMN$q)b4rPn4PqL-J9Zj;3C^hL+d#Hg$WqI8YCc4&YrV~1c|)|K|wDXBIGQP4>orZIN-$q5Dw=)GQnZT`{<}XI+`k| zGY5OxH?)Ky?DVdU!2!PnY(Pj<`841G?c!Z+Isfn+3uhJ%a8x;fBis<-0NPGUj2i= zK)4!*-bbcFRZ&GiSrLdt4nztdqC=GekyIo@juLa{S7TdLxHzt!K7aP%$InhrPrMKN zeFutlHOt~-iax}2Ku&0$OpZSM@cH?1jp{kuSF>ePPx|0@m+yk@cDwz)-xpO?H5Ia9 z=BWMA`C{?pGtN~)#Vk6N*R=! z-F}y06yyvUsL$Gu!5AlKQ&kqFqb^HUS1-K21`QU?H$qwFFpu+`Sh#b{QT!X`|tngpRCsY^~G;7L+btd{$2tl?aOlNidl%M z>@^9Wo-{8$IG@kw!E4_)qmOMDLoC7w2gPt%GYA6o2!udNsQG<&=YWgLsxbnhPD(sF z=IJ6^o7eaI*PlPGmVIL<1Em_zWRblyin-{t^H%!-;tLWJF3u5UNX$Nt&#NlghE zVw9AUA~G9ekPQ(_)~o==QBjp;RWcuN%o=kd=7V!0;V43@efaxNZ@&76wIo)G``s!> zQ@B~qKBTPgh0f`Kj{qXgnsZiFGGg=)kun0M_bx?I2@0eD%B;eYF$VIZ7S6nfxar|% z3<#>I@BM}Qu|I;E3=bh}x4Xfr0y;J-m^G!CMh(O8czbsywQ~nROI1xmNPXY; zy-&l)GLZ>!0zhO8!!V+tlBCI`Su7^jG9x45q^=?cRARxh zF05q(X6VB(#%|0?WG%%$t(WWFrUxvlNwh^|03=s6{F6WU;Q5bEvkjbG=;hmY%P+sZ zH0TPh%A!nt>WBR}YS+cx5T>>m1{u8%G4!p!zrTBSUaN)4)aDp-gl^xyeRq9%eR*}a z6*pUN$IU87hk2B&4B+~q+ioQErYh#X@}in@NGV!t^!+sn46|WoKvha2rog&p)yx#x zRaH3~#>A#>s`+Ae>Wtg(cG3^1s0miv_V(d%-*zX9`G+rG&{Jqv!`nCaZ5uR?TpD6Y zUYMPDjXe(Aj*Ml;ODb$~N6P_r7FP?E2W1{p@6FCi3jqoz(ih?>BdCyR_14 zfmv8qeZIQfNFUBBT(9qCGmP?yr)(Ym>HqNb`H!DS6lQUz?tT91tBe2mAAh-e>}{%a zu#)dC@}_%S zO&~;YJd(q@sEQ{qs@aLlL|GH@38MMNg<-RO_a^xv)<-i4gJ@*Zpa_cA8s?O903c&c z3SEkUjY(0mL{ua#^Uy;Gn1jZ_q0CQCj%TO4eZSspFE8)1QVcTmp(1lKpPrl?rIddA z)%O>dHy4-7dfG6X+tvMgyS7tdOQz7O$>+}}Wm%zNkxVYKF(&1*gu*Qbqteo5x7qE_ zyDp`GSx7UeC}buy4uOzBRX{-~t7cF^`T0*je)9Ch4?UY^3;}HNI);v>^{F#eGYKzW z%)kEXrdyAGh;QHA{^1{ga6BoL)%WS!cUPOSuL~En2cGZR4#+TztK(1p;ADQXU~bA8 zCyB!_E?4~*pWciuZFkFd+$+pQlPgWz4R5}?`{?EDq|R`-c#%572o#3|i3_q?%0K;Q zKYQ`=xh4DI$1_`CNRYDnzT0hv>15%|f^o)#AALOkSO4ly)D^?f&FVU3pCbI#fBF38 z_P!euW&tHoWja73-gDKF0R_>#hmROQ%PcHNpqNGcdcE8G9at_WMA)owK@kB&Cc_pF#ZxAb1A{Gwyxnv+ z_bUr&j5>sNGF_BKg-Qfu$T6C(?S^saNAHI)7*iBwQ`OeGkfRTQV-aIC#;UQZS%gwD zZI9|=)-33=y!40Vf3aSy2s7( zc9qI2`|`GjMdbsuz3et^zP|hNAJW!=G^K#XaE2p07*naRDM5d#{Fu1_3Gkb zxh2r9_1}N9q|tFs-EP>mV?V^)bt$CV+vVeWtH@+fazGFS01!kF0Atx%ql7-kkVDL4 zk}{_tF{((;6a(185LsursjBQl8iOBW*N^MvB>*%1h7+z_t?6ELBPq;Y$n zRW-!W?mB};G?G!n1`*M*_fbOZqKWFqfCw>EI+<8U1uBs`B8!F%E227bSs4j)24zrG z227$VN}@yGe|S21{&X>GOxJc_esTS~&+gZoWT*7uC&z#KPk-FZ`1G`Sao)Tt=;2{? zch`397$fAIm%H))-jBN|V@&Nf_Z)@C%oXeLX?b$O&avd6?7E)z+o~#q#2t$i&Kj3& z6*AE6_3-+e<%=KA&OTg>k1JzX6?~uWF4~*Reo_{bS#feIlMqETrm$b><;CXadYNK( zG%pR|FplkrM460|P{pMoh%t@3Z5q8buBe&Ncmd3ThyYCnz?`ZQK@bs8R76z87!z_N zq@v_~+uKSR)c4!fLpG|%XX@<4kAAaim#Zjh{Fo)?$?4?$F*7<2h@I-Nx*NjK z_r02=#8ty*&nD+jCnqOWSxj=4;DxMN-fc9=aa~!e?)!E*_Dyv(=4`78QpRL8OF@Q^ zBpX0@uLNX@qTXi&aL#=4gCo*3ttXdn9zOf@DoD8rk8U#lIQpSDq zbzNM1H?p5-l@{#2@Akib{Y^LQ#8r-IpHfjSx)#HUrK10{fA)!bGja0~_F#Qf{`!^w z%`d;V#WaFWrgN`}%1PGE2hWdQ{rcVSfBfu|=RqKOH2W|ZO#5!^9;T1?bbe+uJ%0Fs z`{?5j4Hw8M%RpG&USIjJn%6bteH!;U@?yb|+u!@qR0U%;8B;ZF*4tdpU`zpo%hEB4 zdJUqWl9GU^rfjWID`WIQau(IBCEF2(Aq{1nmyb7-<60rz-CtJ2Fs-MqDyNg#Zq=Wi z96vvQUY2gVyuH7AO#PjV`F!zXftX{KXsk7mreULB-7U>z^W^2x(aB`KsI<=_rgnC@ zy;pG7PF?9@-tXFgp?!Jw0!_8uY{sky!h=dCAT*YYiwVaF!{}XQFn7i$>|S)Yw!iPb zeRFs5aJLVm!i1rG{l&XaJ~%%9(Q``S2d78B?SA*9DerD??!M2z_tB3{J-K?^eEr?+ zm;l#6D8J&meb54X89js)bc}4szWBI0eI5Xa0aX`mclzZQ7n`f)o6A^hSvUZCC>W2e z>{r{buO5EiKYg~S4#8W}ND|vk=gLo5io_ds_NRY(Zs(zGHy=LFv#IrhL65>v&kR(s zos~x*8Y39%nkgTjxXnH%)xu6=w;$t(M#-Tt1@BV`g|VbW8MC4aD2k?37Nw1;;n6yQ zOu*HUHbP9yrc~p^RfIA^43lXSf~b%X5t)NkPc`M3Lql7QJ1A3yzj?$KR(>8a}q(gzut7a^z9e7u^anc?^8nJ zX+1FpLdd49nSqE*HiQUW7tP>dmR44~czAH!z7(;QP82&~~e3>t%sqff!NyLArH zkJ0Zcia2NwA>8KogW0dQCE+2`9_9r zK5o_=2}gZ+yZYtdzT36w#Ro@|8I9uzpeYQGxBm5;o2ymt5hJFD_4>Q--Kr~yFnb~_ z55VIX{p(lnw(CxnnM{iLJ!`-kW2~eUQp!Ti465&!uvOU@%4&e;^U1U4i+ZYy1x==4 z9mO2lc4ur3V`Rp~WYP~gLv)opo=**>?fStOu!QHQ1-mETe*Rbx*R%QP$K9~3Cuc8S zo-U4^wIZ++5L>w2whPf)TQFkZ^?+1W6%o&yGP&x8cTvR{N=fpe6oda*H%=)lh7cEv ziE94YpFFLPVmGvZ^Y@?rpa14Jqo0o{3itop|NfIte(zb;lrKJfaeTIT{p#CtQvdez zZ?3Q3*7FyV=;3~SceBZ}6eMo$zUWXBXG1*S{@hyX&WDk-9> zqR63;q?&S$>9CW6z$l2T?GRBDL?k8*Oxaq=V|sr6{F6^USH4sI_5CcM)!^9Xu zQb^-44&B7i8Dbzp$!WXZUR__kJUXA-GGh$TW!blVe|vMa+m!g!oz7hK!}7(GAH*2z z${a6hiL~k0kNtYxKTeKMi~;AUYUuoF_0yjp%@);=)F&@#9Jc+%ySvMqW!EJD0&q+y zIR@>GF#^N@NXUfDY*>l$FmpZ>MG+MUk&#)fVMNF=X+pLJ>97xexEtw#B*h1pZ{aGc zOj#9mGoM#Yl5C9;Q%Hr@q`IyPXZ<*g;)TRn6I7>@=IPV3NmH>yWQtXXG4GZ;zwQ$P zn+O^mVM-~9G8qLB5o;^~X31(KLbRnt4hS@#@X@TvKED0t;@|!sf3xpsGC%tLKYCtQ zADkXHw$y4y-Eu?Ae0%+PbG4b*cD3#w9yUYpRinvsY-#lnmZ#qMm1_+FL#^!(CN3|E&Bo5H~0J9t}(`B_xP{}MTxNA!}oDZM19{Qs57NuD>^5N zsFBgck=rprq{2;$8j_^Wj~qoIF)AB71|NeEvMcq#8%Gpku!cpmKoo)9MpEiEkA|Z) zlrtyIc?jFBP2M-v)KKBaVY^=gLtRfx=W1*Fc6jsla`~{???%Q7k+X=108<`v^hrcC z!k( zFXF!KfxxiIS`59setm76scHs5V=578hdnX*B=0U(`>pX^_4><)Jj!gYtMx8%+zjhq zeg5@!n@;Cv-Og`TosRjWA@%$J^q+qH>doV`C+AJ&#$gB{#E^zQ+}y6#>wSvazT52k z@!i!5C=Gp(R^=0Rd1B#iXjX zKmO^vg+d_s+oG^5&bj=b!)X?CfmbwwL!0`zT^d+jiYw|IHV>ef#sD{nX8dEPm(!bR{ak z{_Km-e)T$px|MZV*B6(MfBo0r5v9H#4zv~#*L9v;@-e>p>LMj|g;VjUC?NqI)MJ?l z5J4FLg@_LeAw=}P!GH>A9=E;^v8t^r?eczG?(^bw!87{7%S8(D>sMc0UA$X8T(bCS z&5NViq-HLhF*Yachnyp5#Db3u$I93WjSMjEW#GXW?e}AnsIn^@6V|1r$c9ju!59Ky z5u-qaP8ov_QB&4zowdeVEP#vzyuaFRmwQ{9lrT!gwo=g~{$aWM`(LiVdHr2gOveGn zK`PgTe)#KOyuQ9zP3P4xj#pRfFrs4|hc4mpc(eNS7Z2O(x~g4S+N!h%?UA!KB{S@z zs&S6GuD!o&Zf|v1<(RWE(C+$+i`yT+sI-F4#|%c?@oyLr5Ld;G!K)3YP2DcPn_ zs+drcZnpdT^}Y+))OA1lgKHNlyGi=-PmVwQ7&BW_#~fezY`q?$z>8315Z1O}q9AHRP6X0v{r zPwM4E-)$|MW-@`*{{H(nm;dhH?vLiRW$N3!T?S{H+ne40`|p>({;cfwVeF`2OLoUx zLl{?U|BL^+`RdDW_q+Wth~;X2bYd(SOT8ar*8N^2)^A^}@4nv@HUMtz0qRe1CLLY;A$hWsQch`5>6mx}va}7blaG^QSMLzpPEUe_Xb!M^CJVs-h?xo}KCG$&*Q4K0H1yH?4EH+OGS4 zPehZ61BD#+aikZ|e^^$t)}exZ3Sk`^VbNbDRG1FMsjx z{vsE}@y-2X*+??-`4^wx-)}bS?)-dyfA=u@U{G_GtE<&&+5Pfw->lY;h{oB8s3;78 zp>TCmxqdI3P5l1t?&kV$RbjUq0CLVgi}vH-DxFMgAEOF4vnm8d0Yp0ZeUM3k85Icu zm5|w3W2`X-5S2i)09qd?#DHW7alh-7y0&eXb+OtF2P#YogCASRshlo`l}`#kqLpSebct;}^wb;=om8t%!IZZg1M}Ua#+O#_c*T zrgNWY3^01IY!sy|nNTw+h|IaUJ)cb*YY;LKAt^8!bLgq5L_i0ifLVY9H3K2!r~rVX z2YL_aKcX=X^k6oeMVO7kKO#y0(eC-4!GxPey zr#3)4GG(09O;cLb5dAL1Zr<3%(UEUMF>+W-?s|hzShPiEW)fA&hy@XD&AtXWDcH6JMZoBsJ*Pm5GKiu4Iv&7KDt1rH}yS%N6vTNJj zb_8o83_ z2SqTd5WXign6l~mv3v7i*+vhbjpb>{G%K94;Hay9$kC^Sn%$(ChB0p*J3mNHWVzbp zyC}gpENVzeHrpPc41sejOt~2LfHvmnmz#TMZPU!SFp_iVJz6xbd|0*2YOI(-2tnc? zb_5#65G9qfqgh5tA>^RepkYOjlwgQK2)Kx#5shZVga&blV(s?xu{N#X3x*& zXO(MG^!?Dq)Fbt5$rQmAO&1@X@6|ZvFq=*G`+eU9rrFua6XtUHu-dM-pM3Q4?wpn1x}AnEs;e96gD;lHB5k#V`N4^03Z>uY(-!gT1@5N{nu9sMj#(!q=J3k zrPx}d63+kT7vB=MDF;>KoB;^N@Y{Ef-(TLTjYW+HjjK>GyUKE9Vbr(Ze)IWy$Wo#y ziEuG36N%5QYiQGNOi{VIkVMzFT><5Ow;lTc7Ew%!yju1Bn1=7~QN>!z2#SozeGl!f zFUu2S3nDX)gAcx_3Zj&9Z;Hwa4SvX4g;7GFrkNNMMU-i(81j$|5gJZ@Op+xj!~2w! zgbq&yQ3YUv0~FgF&dQ{j3@7!XhLBZg=u#YWluS$+wF+K95V0(al*bUmyT`RdeKDWg z3ad$h)Sy*}&{3Bo`aX?;DzI!TBpzd6F%{NTHFKsK+I_q0)fcs?t8!YniM2lFH5x)= z5Qq#xcB(eSF-J(jQy!Qc6^(J$6ih}XX&U<&N6AnWW#vjpHpesu5lF;E7)Q{ zJ}#!^W?4B_i>@q6=a>!Ulw&{a*CF~Ok_lrckodF}d z2gUJ1cGKm=oqlGpoPp43E%2`kDZDf=8i5Hb`^5mHJ52u2bN z-shaE+EsM{F=tIcC@NWigj6h3L8t~38>@s0$`(`hNjxJOHr6U(1`>e`k^zCtA!B1G ztg(jBr7VVP04=SvEW^<4cf-&HQ`P(Zu3L?%3?Pb#MkEoUpa5Ws z5`+!1s>uS9VvbwQIfcEkV4WKSq~Jx72qf)9M?%aQtZ}Ltk&QJ#LWT^f0EOgJRwPX7 zHADZSTr5%^J(&Fy7niyP;k0*Y`uau4;OEd~|d^wQhd> z?;pFaI60n8CR3&oA!A+a}>yzi|o#$hDJ zvM3p82m_4d*ieGeOJZZIogryQRpxS1kE17Rz4oX^vH>knN6R#ftwAG+Lf(O(sHu2C zh`TcVh9KaSD4 zrgBpOFA@}D2#MK{HR2OMMl}Q?N(h;dKm-t&Eg~I~6i~^K34{$drOPR&G!j{nWLzJ+!`pZ4AZl-^u$8G6K0c_kF5r<0ezE zDGJ0~PHB>bVxq&ke*z?QhHH^sPMAP)7GkSvh%5pzP1=2QEY^8LZWvQhmLX-$Sq-qU z3LqrJl2c|XfK7;_N@Bs2g?A+sULsV-|&G-b}v`sA4if-?k~GpZFy zWC^iAL=-dhdC0LWjKb*3LQ_Vqzkjnmn&;@px}E`~^V4Zc0diO_HU7W_|zacNeSMRKxu~klL zosB_r7EsNZkQ^Wzn+#PBiA;_M-1az@ZW^^nRWL>RR2H1NT#R3$4w&H**&Jf;Lp##JVyF{M7_9D=Y8 zbB7jK$Yw=V4v9mIF^VPp|5$pHXWOpiJ})y@x7%~hJ-2`TLk|>y>WegKiXvqzBx~+} z(AG>7M_2=eP0Aq_S!8uYp#W5&e-FTGZhP)&cDL5bOb>3&_iV?Gy;kP$_kGS==O96a z3S=tQtgJas!I@j$g!f@!zGZu5PQx^f2|U^0i9|AGwZK3IL;zTYRS?0xW_ou)lABR3rd^0W!u~)SPoa-PF~Lj0$H&AcfkO z-dC%*Vv*W85VZk>M+mH*3Q$A}#z6vKo6UOF%numQxh8XBm9Ad_`l_kNn4BY#9IP8+ z@5@>NM;S*pTgT<7j3RP~|_tO*yL*-&|uz$3FczBQ_e*M|AfBj9qe0e3PQv?uBs%gri#yg)9 z6ItW@G{kM!M+TtM*ae6aGJr0zihu))5FDXMX6+qSMMXKMF7Axi(v_~D*o`^wl;bXw zB{9~bMIj~wL`FrgubvxAhA;#om7E3zA`_UA6_Bk8Ks4m6!NL;WuFX+MR7^q4JWUDF z7ga?hDNElEDlnh#X@V(^BCIe0_Qob_A*U2Gh^YY`7!lMlXU!;axZRWGEOjV%n=uWk zZ~>Kc8X;TMn`s<)mg`!R7X`W84VRrEnzAFqnIk6wWgu5jiqRYA zU5ydqG(?GF)fJwC2SkskS)3Mw0$R$PEoEnI7Ui5E2er&TvZgU<-5m9K2c8^w!#U-V z$e}CLO-7l=K4vrDZ!MT$bLNd;a?PSXsf}Dr8;&sgxo;}dmb6?r$sw!XUH4tLzV2>R zF|#^f?Y28mtDvC>hM*rfWi}REfeZ-%*V~R069t4KgmRoFNVy3`Rz-B=Gg%orjaihC ztV5Dv8o(K2EPxmyvPLzh6yxXwNWmnTjGgMbsApwaRop;PGug11H^%rJ9Y+PDWSlTU zw!$om0#=9sZ30!eospuVo4R2bCS!dlEm`24G9eYE89l2Vgkp})7;*s-L?&_pfD|=D z&X9}+9unbPP`v%$3MhptmMa0X_v z-k`C<;6jmPAlHZ*1aA#kj44Ml5T~8yLXjs<2K_kXlo$;;XT*q%ktc^K*1p|xvM7ng zx;@U5F_Yq$I2r46;wg%uJln5(6I3DPSk^^R`eED<&FkXOc^by`&3faVZx$^P#37?n zW_8w7WtpO+Bt_LYTU5c0WzV5|W!!Xv2|^v)vDh+qiN5E)}`cSy)Y zz#w7(yENs*wx~tLm&M`Xfj92Oi}Q<@H{0v6s*2Us&zpO%+--0-j@dgit3&XJ zmTO-=eslt9d^+=8x0~|Flzrj7abO^8b`IO3!5lh>WDp6#kTHRrFDh45r2-7&Fm@AU zsa>TKSnSpHN)g+6wP+e}Vl03fQDIx6M9Rbh=(H?LG%3Z206EkAV41!Po#iojk72~$_gAOzBM1CVRF`+avCJ3ou=L(r>tlw#;NZ&90?H+ z9GJj7WgZo>wZ@}SQHWfYC0Vw{t+!*25fh39$zq`h##oEaTkB}GTai@?_Vy5tsN{e$ zp#TB_7=_Hr0LEBnTyTz5_*OS6stAC9ghbAU;8BIsZEr&VM9MG*je&fNqWBYV-%iuC zUG+oce)44*mi4hAWYrkQoFz>;^<%#sC%vH&cIaoms%E}~GD)@D_2V?fNG@j?d79YT zR2Ri<*K*9;1h-lALXlLnsxU-V;K(^AADnVRy%k9r&f=_bNCK>qQ5*pRiz81+XuwL2 z5|trEmPweh36#>9HCyl1P-YA^7z98BF_?oxX3W;HF_2kRY)(`-%}UJHhfq|$t}S{h zE7VCHg=xw;8v(8tMLTmeMIL~>ZTA)j%fj}{se{WNPwwoUo}A`2eD>rC2AVI*H(oh; z_3^O_$O@bD;mPOEfAe9Vr3X&dnu&+RitLy*C1#eKBOsx%)}pmmttZLj$f~1)^Dg)h z$l^GTF=bMg-1j9FteCceVzInA&X=Qz~)RyfNB&7&}KkY6jTF7kpZx9g#{zVWR@|G6L-Ee!555X93ZGQ z&Lg0RRioCGQ)12lpyU!dV_1+_B^pDu#&0k#yE`xPy_)*ytU#@}GER|mEJInj z@Z!bU`Ng=HFJp?KGQM&dC`ZXLRn8eSh#AxvZ;Um7qar8}APQ*WoO8}H4Z|=_Ndc!J zje}03TH}O8SiST6%j2>xGUb#xD;Gt;T;VE>{qFo?wH`Oy)n>0balw+MD5wDf4F*`zB&3&}+wGsHC-QA43v zgiu^vy@)n8g|n_mkt~#1B4rI`=E;k~b}hav+Zrb~P00!rNI8q`*4@G09C*qMAynJl zx~xNu7I8zEiD!`NqxoLe6hgh(t&z%{q@5jh+p(_pbGFvo&2SY$m2>IKgE;KEh^{DO zH+VmGV=VkJ=Pv62>LU1&$k`OfK2DCTpB>I_)@x^GNS5oE#;FPi7GzB8x@j<$NyjNd z-wjnrKz2J487f-~TlIc6$+n-;82Qe!Hrd1&7$!Aa2UY69kx3DpFIaAzm_T-3Hk|Wn zwF2u-37qi)HZ*GhWhKfa5 zy#CSF! z(`L0xqk&v(H~PB|pW=?K!}*b4?$-)2#xW(FEfyN5^I$h;eetm$BW$k_s*h-3pO5r|nK$$(IR z$w`6O#EBKzTWbvhvkDU+fnziV43Ra)Ip@7YN$HlGC?cwAtwF*NLRnTSc^YF*8Hhj; zP!WNEfaFgKwm;nqk-SH1p|H*oDTptEwYDfKQj^DY_454s<@q3;FL5nv%+R-mFSPH6 zX_LEM?gl8WFRc$gSXWkc9lUb{ED;c`s~Nni70RkPCyqG_lOkpXfNYHcvH+IU#l(g@ zQ4-*ol8}mOC@K&Gpw6NQD`d@zNGvkuflq0 z>#&pzQ@6QZOHw-8KPnrycTj;=MKA_Rf%t@v(rcs+vwI#YRrEvh? zw#x$`9d#Hab<9JG#>(uV-m40)-0$McRp;C0&YdGiX&T0xb>d7v`?H7l@9g*8`pH+{ zUTk_s*q>L2dw&05zbtA)X6nb)diV1DS>}NRQGh@UIN(B~WsU;SchevL___B6SX98; z!dX8|nb{PLZX2m8Z=PQs-M!z9n{gbgS+m*oY1cKj zsN0rF$Cy^zPBX`-&fi{Db4siVEQFvLMj*~0MX}%*I4P$gr#zA|0gbnLH?8~YR$FhW zDQ5vGsbtRNZDA@9aY{qiq2s2h$+p|gs*@p9d(ODJY!oA^1IVI0PSJZ$*7(9sQ$`?3 zloP~0Mww>IavY|rDiDbibIPEK6nXQ=kn0mB9-OTw(WYAyB?C}lasx}Ww~Cbo2~RR z#9>H@LlYa%RWS?3?^hCcy1Ig%xp0O-#GoMD>RdfKW5heloOheSd+*9JCOy7${NVnh zVV8EBt7p%ipPjw1-ppt16m^OwCEay5r>A!g_d{9v{e#w5VTe(coHu1-$>gH9zOo*@ zE!>bMl9)I$vB5lWWP^eury&k9_1w(-(dpv7_l{0aT9q)dAwe1A_2qPWZ~6ECkDlG! z9N%01_=oon7Cx9_VxyC49#>cEufE%T^7->2ZH`Z7hlf?=3xbd{4&AgmyLtZXVwxBV z=BYR-hc`w3>FA8GEkl*DJ3W7*2aRIk^mXwi(rdFSUlOG z8w>~;1<9Nz6>=f4f|7}vLg7MiRVeDVSj?-cFrsnVOvBA|elFc8&Q;B8x`#%CNcKeE#Y6Z+`Q%n%R4I4_|q(+^zWYudl3w zgTwj#S+K@^@$EBjxozDr4Hnf1Ef;%7`-hU<>UwKybTk&#pfI`t$km$!xy==7UF$Cxx`d z37y+rtk1sucD>sa&K0K6tcf*&44TWCxp)8g;HW)4X=X(^Y)~hEb#eanx8I##ZW%(| z_z?PcesFyE(W$ce_M5Xme0cWFc~>kJv!(mdPZuYra|B9n?CO8~^2JBL|MvAa5C7uNPUBSk;{O}} z%YXabqc`6Cv-j_Q|BW!eQ&AOE(2VZuFK<5hFn;;@m&)P^oC4=Sxh3r^WpWxLcQD07 z2;j*Sp&a^Q9H*GMG9{2Vr7xuHZ@TSOx6&>P^fC40v^Sq0E)GzYRlEz^UEhzZx@pMO z)_Pyp{n(#hUVry&$m6)bY>ibiZWuad1T2^{WVDV!jbxV+s#xdE^02Ln<{BAL%gQ73 zG{$igH&=w@w*78-(C*Eg4Hm^{hSCR+a*UX_i|5~MQ_goDoSfXf=Zq!ps^xqbhi<() ze}3LQ?=v7E#WZwdY)YH5U;Bp<J{qFhwhs(eH_wOC;&%XV7^AA7&VtcVJ3wiC; zqo4fwJI!I??2O|Gyt+BN{P>gWFTUwA8dg?9zV%xn7$l>lg2ON%<)SRID6=f*`$xwo z-LB75Oerd)lheiV$^O|n^?h_M*kJnIR5#)*Gw+yn=(dzXPK|R`HA{}d20=s; zK=uxA@$G=OFm44TR3d^~N4Is}yWr~96RF6b{J)40IXQ84Q7ramk6wKkzZ<$Z_)u`t zuHWTuBtlUPW&3b(U@;IhvYN6u>q7`7fD@_}+IGM1__HVHpMLty$%Di1zqLdp zB%{LK2UO(9qSE*M{HQFeKsM#<6s#pe)?wRGvN>ta!or}Ma-1fofWmA9K#?+*rW}db zqU5Y1&eHz={KK2`+1!xT`JVIDp(&M*30o}wpO#_gjrk1H}g)48ovt)^d zQ8FlkwYJv1hmRh-{odn)J5{KxF^iNarf!oz}r?~T9s>(>`c_0BSL zU206L?WX_W*R~&akKVcS-uEB9^VZ|Nx&Y(|vfjpTK6~-&LVxuyPjWQCMo~b;d8@g9 z>y20c&ENj$)mKht4VB(dvW~*p+3KHv@!7xp@|&-|xM}Bm%Z2>td+)#hy0hIMu`$x?iZ=byJTI(0}pbK+; zRs<1|vmg?W`@<#f4T336R& z7B_3_^YfSU`JN#kW4vBnQkEW#S}ErB;px%X?N*l;*Q<-VseCAgv9nZyD(3`B!;l1l zvrUO|*7x3j<;OpIziIsxC`NY9tE4!^FF$+nyWc*&csb4%^S}I?AH4bAF&NGh8L)*X zf}!8#zyH5KK7V=h&YKT^^4D*@`~Evs@W}ljNgzM_?&a@)_q6-@0AjKjjgiPGc>v>z z^7PK~um9$ix4-vj*6xGn+i1$m@2YqM;@y+i!!rO1Y{+Ivuk+1UK ztNZSY&k^*8KYjhDfBtZLthbHR|NcL{_oE-b zYJ(G*i3}i@-Ob@_ak1{#tJS1Nlu%R?i?A{avM@4=DXP*J{(7@ZDeZQ+6Htk|D6H~lA zqc}`fD>>ZpuzIXre{i8A%Ce5S|?j71>?(FXi%8RQ`N8vG>oSZ|IqNuBr zliAzv+<)!OTk}Ueeltf zE~+T`fX8>|Rox`c(=BK+^N?iQO}os5N}diE3sMWn!?@`?3Zb$H-cW=*4je}osBLgl z>c;KLtA#{hOlZm}bDVZLWm`0aG>nNk0tr|sZBZ2J9o@c0XZ03)c^gSYVpZXsh2^$S zkBaoC5a*woh^V&>-UMHT5?tYog*+OA0MJi4a_*))rP$Qv>AjO# zV=7;pY{*W598dPyi_7zyo4teCn{VE~b3C7wO;d$wxLjRde)Pdtzx(jX&5nr*!E8-U z#?*~}?bVa-z5DR~{ll^#m0Wu5FJtR3Uq0_v!f3Ia`{UDUzc7x#JGjkE17&Nn1Kq9q zflOKRtSH)6i^AqnEQQRl-u1$^C>mo-OjA+#JI5ze9ym)*7_(n24rXoRox3>Seevnl zdaI44X}gL76o4f%Ge?2QEdBLm9+MZCqHYV1WhgDs*!A1>Iwe6OK;wcZ0@i#>!VpGa zkj!ZW!-mM9B~?*nZ%woPPRtHGF*x)gPoreTU>KO!`z`+M)aet7rQKp_pi5P8*k&hwgZ^Het9b|=f??vWnf zGl*#I%sS|YRL>xMq;1>2^VgZo2wg2YH=#P1I0LI%b7rmpfLCQd*SdP6hR1>6=MWn`oq)N z{$XjXPCYz7>kY+w$2gzCojV6rdD*SE%$l-f0R~Y7GytqXXq;(gl`%NP&!2Kk9h&|x|IJ_g+yCu*i)9T7(k}bi zLMa~}A(4Fc>67ypN!S`ohF}`d`j#hs{|Ar${C|Gs!Q=hX?f27eikrGFjt-Azv+&s; zKTlBs%cT71;oZCUW_RvXaVWm~a$y}1;dY(o3p!plhkLGae(Wde0{3U_xMmxw#jF*~?W}tI_~^mCI~+IOOVeT~G^SV-vj-3EzxmdKMXTMm zA9jdlXqxQziqqp_R?^KSD1nL~AR^|>!oVUH$hmUXwB58zF@nJJ7ta^_&B@&(GI73i z`$J*vFib<@4X9Lg^YGrI-+%D6Y@}*kOxSO>n{JZ<6)_X3YF!t{hs&eA{i-NltZ#@i zDo@?e#i_dqG6(`0OJqegJBLLXtPSHtUwr=J|NX_6?VfXg^P|@u%p5`6)@ASr=*gk_ z>znN#o}M8~Q@E7M@1Bffr_J54H>*l#hd%Bm-A?&a(*Y{CH!lwN zJjZx_9WSl~OxY}I#gInAtT#7%^Q?C8bmq!5LeI zP*_6>MOE$1YSt-n5;P$gW23V{5!7(X={Dvl;u5njV6kitPY;)imNZ#HK+8Gr*4xj% zx;Vew9`84Azj5c4M|*p9OJZF)7N!|D^vTChKYjA#^uf_jfA;F}NmJNRgut=VbZ61b zuCL-K8=wQl?MeTe5=k58~*+2jCYTRwFHX`ZU*NZsHTZiPTiK%1Vq+{(ppADE7lvVOIHOmOau3rVOT6mEcEoC zxOcq&rF+^IToqFpT)%_O#sJzO(#;yL*O?sMJ#LO?t}d#V*Rs9hlRdX+tj@#1Y1z(k zx7m%7M2HP#hP`HXxVOKk>Xzu`%lPHfXH9MHpU%B2o4O*+XmZZGS6+Sn;JITnvoLtw%rYT=}ueTIPUiDj`RvWO< z|I0tVc>c|0VGO$RdPigG-+E^mJfZ@%&1^FQ>voheSoYEFq>GjRgaAb=Cd;ynFvN#q}3|e15a*oe9gsz5V?IL)$V0V%Ke_Za4Ki zoVI;8aLP&90w5?UBY{(mcpDp5MT0p>&RJN9kgVq)RX7T{akJ3)eQ4w~eIU87V_mhJw%oSPKp~?5_3O%UwIK`F;>iF(x4uyYD}+wz;~wc>2}zZX)z9)TESh;vAU>3#V!;=bQ}~ zC5v)QS%S26J#VUOv(e2ut2pn-S~M0!GXn#Mvhc$2cwAo0EOz6t8tST<5X}5|H9x^cyw6!0A&S=JD@QHLsFeRU;p-3pKW#`6t~^*v*PB25{>869`1a9*gQNN5Vt*E@ zGzw~1Tv0j)⪼B)^o4MN_x+_IhVbp-iZBbG`cXlb7q8*fg`*qBVdP z+cXx_VB>^=%rIhb^9acyktm^Q+T!$RZ&4c&?Ypt-H{H;o0pm%i^|s#;5h3^E z7SJiiP>Jz9Yfmn;ZDoj5FpK&0^X`jJUn1(eKRA8-`m8CSBqs@{5%M6CDelb2A3T5d z)c_cc-#z-6vb<6Eb9eC_?LZoIh$OKFUc@88?2O3v$fbvzyTSzBI) z2^W}P1T~Rkzq9}FcVB$`yDxt9lL!CdfB$|6>Iec^jcCOXkg6kr+EqNDvJIimV+s|& z|L)!O?kZ@3I+g*QL4&5QE9W(j@Y!d(|NMXa!}or8|K#MLDsi(-<0Q_O0OKueosa|o zh(v(UYQ~|Tn8mvgmFIiQwyjn-=d-qP#xzYEys^&SJ!y`PAA8rle0IIJT*UKj*Nx0m z?vh874JKe`T_{@bYGJy**j%2!h-pA%3qgfqQlqBu!Fvy?48p8LWDFsqFtAWg3fB1A zmu-E$&ck+85g&{Rs-VV(%;omRt|Sk<-St||(U;B_9?+bfPrv@vvoF5s=W}!K-po=V zSZoK~4h)d10$Di&X5>(UFIXjCU-2(Lx!xuF#w$m!-JwKSH?y3%@1U9aBDlJ;!LzY; zZ&B>ct2C~nrp(4UYARP3#L0+YRhEsL+S09W!UthD_L-UY_QU?1wyyZ18F;;#AGEK% zd9s|_lb79gGk*1C_2|C;pMH8bPVKyEuCJM)Iz64e`DSytJUy5%B)e`HHtX^1`Nhj` z!8tS>e|N8x}zW8>HF(Fo_XxP1Y`Lb%mkYqjh>w(yZ zy@Rl9itUT^^yxOc{Px=iV;qfB!oDzyRCk*kXK7I`1hojUu<_}$oB#fI9}F4)!+-jj zoRpU+Q;TWc?G@(!-l8(3q3*i=&S9zX%!db|DC?p$8DIAOH4No)VbrJXF2!9qEQ`5k zapP#LH?1>;MRhKY@y_1fHR0GL9Zf&x?I2DTjh)<`=?C+}zz=TDU;ovwKmNrpf01_G z?5G*X3srEo>b1KZZ(1xC5(+=F2-75~L^PymhA{($;DQgO@qWm8*KI|(vYrsiy5NBU zM}aBW62&^vSy2~ZIrcm9ymw@VsVRI-Xr^(v-fX9IUDmj_sH>_%@^xK94pY=Ljd{|T zohp&{#u(>45(%j~#lj}==jZb4U!6G*`^WoL?UiiQjtVAc$Tl&RH&?sg{Oa3}K6x4% zhh(FUZ+-7Di!rWUox`(l$B%#evdDhXwug^GO2rU!2Cdq8W;N|jO5h-5|Sm(8H|YV!T#QZ`=@2)ZZ^Cfw#HHwYDLWwQyPbUH=ECl<+^qx zG)>Snjjf7VyO88BR@?8M-Msns!>SB9=c=8JnKzpaf@$g&l~x*~3?uZC0jLBp07jMm_|?n*^FRMx<;~*YKi;`t0B9Aw6)k-mLRA&B zmtFtyhcEuy|MejlXy=;JX4?7VWB^R;HFoyn4=(@ffBDsWKbyb%gYO-d?bvT6gEgp$ z75SEUEPz#66Ojrj7f-)j|NVdc{exE)Kl|x>`+Gh``||vv>vn`~JFgCwb7K(Idu!Xa zT<*`me7X|X@1`?0(fWc7y?D9%&2PRwy0f1LElX9&gwBv7V^Bo|ob`24u6G?NkvfBk zG~T(_E|=apAswwvIo|Z0l$9WLS=kngc5#=v{QRr)ufKkBd47IzzEL*DB7h<&DkCVf zpu&{$`PIq=p&-8a|9N_^W?RzZOfNG(S7z06+wbH0w1aD)(Ev!$4>^XCM!HZ)5mG1= zp$qA+=~_38&@jjungKZ&00c;&y*;Oo+52p}T)EF>>S81G3+k#W>&?jbd5mP4EvM7j zB*_p$MADkHQbsF0LzS=1=+Bl^Pg%z?BjsS2MtOoG?nUHTypLuPRfvs0!lY+V$JmE>y&gEhHOnozAa!)i6SshuA>=b|%} z;)$fr8jH;B?fA(*Uwri0iy>t!mkG(#ZZ=zR($^R5U;ouFB4vp`c=c&1e|j<-b- zCsol4J3T4f+?5OF3UZCH@U4ITbk*+Q+1c$lsQL7uuufZ*bB-}+OU|jfa)q>p&`$b1Oc=nqZOfI44$I^Vg|%Jg}5zDxIA1=kD6+~(9Ql5zxf?q?>D>a z$h&a$Fn~gSL3rz}K zL6AUMRW@_5l`-z@<;{y{FE^X+-EV)}EDB{+=J@L6b2dDg&L)$|^{Z>}zEnlMcerO8 z3S59d^5oQFI=Q)2ifrz6Q8zVFn{~$EGbiaPW1Q^@W1TKjYMC6Uf`P$FN-I*k&Wmy; z*|hg&QB4<-1xI`FY`5`QwAV&#xyv}Zd9lX$$@A-1UHQc)=j)9~rpRG$*=%EU#a?%L z?IWY^MN-CKYW2o97q+NA`1I_PmtXDH>f$5?<@qhmAdUZ7nXRlt|#L#rG{J0bp0YucwD9JGT6gjb% zG{oEOPNL@+41q8!4LEYQOJmC%4P+o90t3KGTT&VjScFAVNJL}}WgcSQ0ZC+!sEw*o zbmDQ+(06|{#aIk|TKtN(fAi7pCiP<`EbDsm`Wr_-_F;LfLmr;Kcm+N_dAdpf)x3gT zHG1zR4Y|sa)?`&tWm9DEeV^Ai{n@MC`=6f~>Rz4=+BFqXv#1t}>2x|La--jYz~SNE z;lb+swl!{IO2@#V=NDgZKKlG}d2dwTb=rb+)pSzqO&3m;nw7-JA#>Iu4tc`fVX&2U z`+NJ>^PN^2BWZA9$SLJXfs1{2xL;dJp-p|vcBaZoA0JJ|^?cG)dN(2vDI$W*lK74` z%AApeN#BiV?X+efVPGH>l6==DLE?;vC@gog(3nCZ_v2tRYUK<>KlFYWvX4>o?e%7L zyNe-Ps(Oeyr1kCioNs|;*D*^$SfnVN5#YD%8)r>nbq={OKC-Qs9@aL*!{kRV0aY=daKS;1{3 z+co;$k{E$h6cR#=VTh7gktyB&!Sd0=d-Ck-Pu~B$3vzL_Q)PMjaNf)olSz|61i==n zs>-q|M8I053^=Ra{^o-RcyPF^iMWfgjlp3B43JX0ZL=V01%gALFo`Wd7bw|`!x-YQ zzVh$?=Bv%c{_=FMnBj7^-E)JH(meg`T{tR_(IXFX)L+m!!U-SXQL*- zao48cHwHa|Q&d4l;HVgw6)MzPX|0KrBFp3g$E=2;Dpd(d5@!JtV{Fz!SSr(qI3nX5 zJbq(Zl;2^Y-7tLk(UYJ3^ds@q4-h$@oK}DE^RzC1{QK`L_Frpv+rDpa*W2}nEo3ch_7}&~dhW*vN~PYrnkuIX!xDWA z1Lv$wF<;Ey{hik$$HP-wRQh^7Wa)~^E8_)V9DS6KF%_kC&I+J_LWHWasw^v8b}g(| z+s(?~Tnw*X><+DM=FJ0Fnn!x7^z&!e@?VFG^=;R;F^A02hmcZWP8PASb}?-f@zC%3 zZcBj17*{w3s7gDXmI`p}gFz$%nWIKQqkQUjJAZa|vASAYSAs-B)oweM(045(0>^H* zX?L3(eO*>o7q^?0G7#AJgmcfa{LUcG2j_L(Jm0gT`Oz2n#3I=XHzpFaQa=GPbPD)_-q zCX6hZ44<1VELD7H-B{{FGH?`?Nk6>_ul=V$5qIuR6B%xe!HOs92OxXZJb z`?IOBYX4xlx0pP6vYjl9HjvVovY+hv_kM5j+GA~9(%R z2>~dYFQ!art<52heII-;oUBnC(P+>~Br-5-O-7LtOfiMXF~^K-owHV}QTaZ1TYqqW zQMnpP0}aZcw$>E|p@EzP#40h0RN}*0mn7`dX0_XF`lGo!+AIF>N3Xs2-h(d7`LnZ6 zKKyF4dAi&B(#$j}g*7P0oMStn6ihXh#e;|NZ~o}f(f(PI%a1>J)(+j}OWKU1QB*JY z7l%`=wIXY+*AMRUV2lFp=QwnDW?`faNOvOn1wT^kQPnV zl#U{B5NF-~(PD4cYC|545(Dgvsmf_pHAWSBT*Fr%uAjf!?7y-2=9_buq0ptn0#Rpx z#L0)Q3#-VTNG1jb78F9%q!p4PMT9Xj5dkVh7GXk!J4g=T9eGA`@?y17hCqNt$jFS~6loE1wgA+b3m{U6noyBsj$;OA#8u9iTBD17 zzCIr^*3Q)97{Cw^!?7GYT67ZET^5a>_n)ce5IMKKIwV>s>P` z^r?a=kTcFyM@OgKSoqY1FzmJ|g)Lz{nUvNjWMEFr0q%PCD1@5ODpjT=V+chyzg1`| zHcFEcfYgtvZCPQVwO-62)uEYK(&gnPzI?7}j7aE5<&)lDns2{3eSFd+H5+=les$F7 z>i_-^{~RJ40S!1QT>^v*U7ItPjD=G2jkg+r!u!b(Pm7}I)?a?{(W}N)vl{COTwb<` z`@>Q%7xlqjGp#K`0T%zkYJ{#YE3wG1&vEbyMYp!(vZPA1v$#)+b<0+A17@IU(HXLcv+1 zh!MFgEoVY)I7^NhO3aB<3Z#@SoC1hE0)mDd60cT$+wH2_xk4AExmn$wUtF~9ZtPd4 zD9X|?L%Ut)J|^WQhae3q2+4;yYE}8M$k{=Hlu5t>Iswf9ELu=yMj{PDA!KH)$g*ev zBvKkCTAYS#OZCI|9vNrQj#t-LpZ?25<~;h~MARogiFYUVi%ii?bI$ zyEq#NlD0CdDga(i^=wi>(!o3a{#lZVrxR|)d%roDWr+jY2lwP|m=ll{gB2KPhG>r~62nbnc@E1!&Y~Up#sHv-ulu9GgNJmxjL6mQU`vZmgKbkQot^2n!1c zktijUGGk&%A&kL+6X-{u{g^QMJQ@Wt1YL%l`xLG)c{L3~Zil=FvcC_1^y9rh_~E_F?fCTK^4A}~c>nqZAWf#qRS;5_xERME z0>)8QJ7bYK`ZzLWV}X*=#wh1ZRV!1WGR#8`9ddL^Ic=KfOd609!;mv0fI>oG zg#^NqGNVG!n#dIdi>vG1Zp?v$Pfj~X zAQ@3u8PtVhVx5;(^DC6WA6-p%+x}o)R0S;$EK!*3AIz7z8~RVadR`W4xi@9W*SBknrZk2~ic0q> zX-z~p4na}FVI0T!{QTzYmzP0g+EjOh6k$L`g+tD{Z`0t1#eQit=a~8y7K_E>@1K0* z(SCOu{?kwXB}uN!S%}>2h8VzDM_9zz$Bd*cYGLoKHX5SOsH`rrVCKw_1#;%5X|#gH zY;KeuwlB}0ompFDF*jGM&~sJI%EDOd#yG~&i|`OVjI(TQQfU}(&(C--94w9&vssh1 zD{9jX+qTj0A z=A0#3>r`b07&C`~Z!g=`)yDUcBRExyl*a5b_?*3v#1uK?F=4$Rz??V;N?FwyWqY&z z^yA>KUYcg6D)aj5^KZOf1C6@SphRf}$Vdbthv@dDM=gyN1w%jA}SQ+$CIOpH8k}7$DciGW(Te`(`mK8S1g*jvs=ky3fAg! zv8=Q!$COIfY}dQ97prmf%81cgDVv!YK*V&tw??JlbCUD3?c>LDS6Cks0Hwr{Q|9#L zS5JQSGuYm~Apm8usA_reM!7s_Ce6g>Bm_tVM1ll@s0bK=7=(k*Eii-l1mpQ75U}?1 zy~a)J7`?C``Zk1Qv`KL#qGBG};8D!sWMT-lGfu0bFpFkp2vUw3f)YOl0m5B3`eB5m zfC@5-M-gW0w9*71nm}W(XvmOvS&hhAE5m3}YpsLNtxq9y4kM@-fS)|y{Ka2?c)N+G z%jNF&X6tb_EpIMv|I2^=aP{)u(cWUYH^28<13E63<-wxfpEoA+km3lS3^Jucnf2B` zd3y2iwf$*T&8kXt@9KP1@$z7QxtMml`qw}IgfK3a<V_oy}i%B z?2M_ba*kcjaQLA4<`4IpGM9zt1UX5_BTW!WL=t2MVGk>>j00U5n^j0MHiddTuh&D1 zN}dSXYH4tttu&|j-1wl$?1)ASlHl9UhY?mMVV6AZ9A>((P4EL2ca9Z zrg8M^tM19Oix^Yy`=@6Y&tC0Fp&>CODYK6W6gmS+xZMt$zC8(z)j(qU!H49viL*Tn z!*;q5!4!r7MHM3o6RO`9Iqwu%2tZDe0t7J%P#e*XkgV3FTQE~u7y>bh3S&`lx9&+L zLet_LS9GT`Dehp@R%#>QYQ*K0VvqUbWq8x|y0Z zDUqhd5PhZicfN7_&by1Ezz;w2Kl#gF{x|>K_m?Nd>HcfXUn|04*v$qD-clm*{cKX) zwzo%fV_dWdt6>;pyjgE0n1ktjF=tjeDR9jmk8r5ktulOa$9iqIf3BHkgTQcTRifFi)cS#nCk zjGRDNlqP|^>R*u)R5W<`>hsIX>m5s4?Y2*!z9@?-f&JyLKDk--Z@zsznat~1s_O0G zNmbXz7&MNwQowl_dNMj^K!V-Q|Km^orR#Rz`QC%V>{}&>_~v)aWST3Nn=*4pufP82 z-pL_B@*xpX3_kd-tS0v#EP$SH9z>!7A~tc5zR!VawYxogarUc!`n2!7bCfy7$dxO6 z@>MnU;~0E&Zg#oqUwqx}?@O~_rP$e08=HYAO?m%bb#i>TS@mChzWM1-{`L?5)#LyD z|N0&8(>R!-YIf_3%3w-4u_S;Hv!G@YAFXi6L~mZC9AW4@FGg zPPMZ+WWT%KsL4dD0zt(v*pirqb7syl^U!aC?<7ae2}MzkIeE#BSwyKUz|2U(A~}mu zx^h@q4a%_5Sn+I+CXOWyLr=pn4xC6KLLvlKsvYF)rGIigzBmUNTV2Y0uI}GEy?1=L z*~Krv`fBLYy+^P9-JiWabv?(PD3_|r1fr9dK?t300L}BWmp}dKxVl~+?4Qi{n*9T) zYqT^jYdqbbJe%5s{qlIP*smwK#jC4!aJ!TFQX8=agJk2#8q^GBq2K)0{^TJ}ysjq! zK&2>=V|Ks=fEE;mq5}G)7sLfAom3=%Qt8R0)|eGyj)DT)9kMrtsM4Zc(6`>5w(gD1 z`7mzQ|8jADv%!nY-E@{qI}+NM)8G?D(5y1%ED110p^O#)^ib)`|Kw<>~%!G;v0X27iP};<(K{yG6 zB1Xz78v;tC0W}Q>P8e*Xn#0mfG>Ok~z$}HcMX3|y-iuEbLFX_IL)xsn>zn@R({-t7 zf6^3{HpWXz^I6$6l`$%2L!^CXU8tgLRyV6({PcqtpFgS0^yKvDz!=Yb8RttQ=vc z45UaG3boRttacjocH0@u#;D*!zZ*B37RhiXQe;h8l~V>CW8#>K!XTmoQs|D33KR#z zRYjdPN>QSr9rNIOpST-h60T}HTQtsTA~Z&Gfs~7soyLTKM5G8ggF*t(m>s7gN8_m0 zXaST~4lynFifWRK@qnS-^Tmwz#}}948GQERnD>uPA3iqo<@EA;{Ja13PvbcL z5C7wL-us=Svce!~z3O5}k|D>eGDpOd6Z|of&J$}3_ zoOf0iE>EV|%$zml>-bo)ZOVEG0VU68lN88zX4um0dHk!NzW?f#;;{(h=6cm#oTusj z1lfRyWbs{4XiHs5g1(R4E?i#q4~~}2WCDV_Z5tULJve>y&ArF3*P!L>9B0!iOJL}I zTE#eU0$?=SY9P3?XiGMVNP|ckI=Q~woL^qAcCk>^I&M^i7&U7zHtC8Zhm4w|up}Zx zAYjQ70Z?mqdHeEa^J*MDVySdf)n!vP0u%=C`!o#WcC#LHYpb%HOsZ<4smxJ3Ygq^= z3y%PaC9-hNSyBe0LJnFhU6;%R0>TW)&XOWh29z<^SK&YZBPd~2Zq9M!QOCzC}zEzxAB)%V{0*6HDXRkAU>e^k3kN;zn9 zz_4C-fBPTbKfml)A^Y~#%ahSu{VW^F<7e!z)+QqWqN}7V zRg5`DYTL0W-FrXy=AZoWJDMiD;n&u_YIn>6##kaY#u=j_jUac5SI9y!O9oYzwyJH2 z+boe8i>f?6*?Z&l`&CV!e)fSs za$1upObkgDO*3*i41o#@YAmPfdp~^qgYUl8t~dYnCm($D@%87Qt=|3iYX6YN7;*v; zMM!{(ITMmq#cZ$M-^Z%5uU=h!^~KG4(|z;XC-42}(Q6NaHc{5#Cg)un z*k?Z|(5&iUnt*l`>Xh*ovP*KXsApV&;xJ<0-SngPek4Q%M6CS?VW6TRQu(gTa3^sr zimHO-jP~SiH}$41`T^H#pT>IGEbB*$Vjk5H#?{S7pL}hfhx1n#S{2roMmrIklFFz- zMHC=HMnb$ZaotTr5E7I-E;k^eLO@gq44i-4FBBy#xgA;-CPmg1)|Lq{CX5(j2#JFc zbk4;8pW{Xf@4lawjG`pKz;~!qWwbU`G>F1xn_`G;@^k6oFhp!iV%LOM&5l9zkRE`-S zn__)6#OUel{!y>&RFPGHy1IcVpv>Qrecbs%z&8gDPu8 zDq=4oL-aZMEP-R>VHmBQJ2g>QLsXP8r6@wd4s_H82mVZ&w4uy z<1ifVFW>#)n{R#Nq~A`@Ui#ns>VvN95d|=H-4@XJ5v(?4;W(u@bkq6qH@Eu9zRTykxPWNW_mJ1z|L2J~s9o!e6^-rIK z+RY)4{m4Vd2S=_jmB9kR_`&nYh3HCKCo=BFVKsI)8D|Ay!W37VTc((Cbd$2I&FRDP zTi-c2IH0l!2sZ#BML%gLZNYt?QMH?wOOrVJ3zp`XsVE8EpG{Nrf=MZfK8}%_viY+=|NeVF zcobs-e)`F0uhwm6i;1#LW>1CNdB6Xl0>?r^%n$GFSBen&kmB-q-Ynd1t8*fyD23td z>GrSx=Ka0nMI5ik-Vd#rE@or870K4Q6nc??RMLi!LS~{P>k^QAnq7ZC~(V09cRH4DD zbaGPu=!d5_9S<5XKZS^SaF=kjC7OVZ9okJ-cqVqb{_w%4wsrh}W1)gNPUre2&2nq^h#0OeKQ= zCg47E1h}}_JbRY+XB98&<#e*&43mY|#}i{88oPP&^o#%%8brfFK6ALM#ULWV-Oy8T z7yL4T77}8mku#xU%p{0}!T@+zH`Kw$t{v*qSfdxq#dh0g&Wflg)O0dWK8LPnAM@Qa z05pPC`mV#6Q$j(fw2+DsjjjwTs{^SJl7y^Ki9zIOgo_vzizTRt4UzA{h=tNF<##M7an^%V?X33#TgIaalFP>cNA1@Y#u8e{R z>+AUGXSbKPeawR<=Ty~hI}nLO63%E9a8f!eYs$(2sD20`r>@(AjtO_!y1JSkmj^zj zix)S{fs_p)3S(haWlUz09CJ=8XF@2;V2#Vc34@-{bY4y-lVKPH0l@f}V@@#+Lr0xf z1}%{y#mP$wK!S=$X`_*iRzu$bM~+bx6=lAEzur46s+x%cV)p$QI}8IxZ@^ij+_ag5 zki--Vwzum|)!60H^!R==wCY>$Otd=L-*?tfX|*wGI$c~`?s68gCZ{<1cJv#iho)g7 zdgqm3jZ5X{M!8wAr zNigS}tT9zpkmvz~LoD2+D4NxfOSfMZ`$HGDL+}0AcTp>ctN?L!v)Z)V(|hyDeAzV9 zA%xr2y1n%csDh?IAe;#iH3DQ25y*%}DX3~SafQ-%Z#n@p$8568J@ClJK-1{gA01L* z05OH)`c;A~$0zfn6Sr;SYp)-TzhBIoSeK2nnR8YOwT2k8k6s~bDn(RAPr98S$S}kQ z4<_IL@qrJ{%|KKfV~Ct)2l~x-X2nD@r>@VM%W>?K>B*VnllkFsvsquH%+?yEBcM`J zu5a5M23Jh(?tXXe8ZZUlYYncTl)JhXK+ob4%bVNP)2A=L_k)tAWlFg$tHpfw;_PL= z+YIAo%;~)!pPn2wg)!Bn1o0`YM7&bkDpH69Ac6n{h$5JC6zH4jq!h=jyqDE=w_EMH zz73pN`kdo>b>+uY8tZgj)x~bTqrF3C&{YcVOgyLs<7&Gb`(d-)<}?~-HoFd45KIhF zG6FFfrIoSHDpEOfh`uT#QevOGc5EyZl`m_}IY%GX>-^P|+spH7K{AwuM_2OOZ%+^J z&HE&x;>G3F&we)k(?5T@y4n!aq{er?w>RHwS!PFj4v=F?UDt;g0Zh&bD5F5~y~FZw zS)Cra#YFX+<#u;mPqcNAa{x|S9O`B{J6)7Bk`KTlm&xemQ< z+x(kPF3;Ct1~zBeZMv5)uCFe4vwJhA6(N%Vr=&nR60I{s7`w392#2IoxQx3P>lxs-EE!AGAUXXLxJC?Jqx zQbHtv0$Gp*Scn8kC?npPoPT>p1Dd0xkdg*8$~wK+n=50-(WhjaY7)n|im@BUECCEE zQIG`<03fiaOqz7e2$%t^)0GCYIuWJB1f~R=Q0J5kXYI2mRJCbJU18?k7zd7{49;rB zFz({&x;0fZq`2Aa=J(4t-#Pxlw;pI|e)9KUesOl?#W133C;igIl zOFj1R@cyis;mNX8Dh9@w(`2TbhK9}f@<~7Vak7kidzG^LuF@QKg=VFx?{{gq(V9zg zlr;}oM5l;d$K7u9zldi*8r$Hh}}W#6uW{aTtf4H#T7oA&Eqc zI&^v%d`c;X7-IrSnuO2;4D*G4>+RamoAt!pe^?qD`k^B+4r?PNvTCa0;cLtP_P_sr zMr$kA#q~e^{L}4i_MJzOVGo!81z7K=RQji}pR zWW?=mTzCDg;;d9Gt4e7ASckYtajwN0$EJu1k`Hahid^39HqW2GynkAq7^h8C#UN8ga^|8ytwE6$l-3VE zhq9_n0aj0h5s(v#v*o02K#Xu=m&#&V7Sn_JHLDKB9d)bC_06iDx|_|eA8#cp6mll* z?Ukp8Q&%<2I&tLG3ne$zMr_wAKGBitgo#xYWOV%~{QAlAfBm1n+Z5GWk8r4jW|*)-QkW-#vKyy;F_KTELY4!(Ti(dpYbjY8a9s^>6>p=^y;=fx%%h+f=63 zNkdF=SUXM1*~BaY3Slzkt_#uUs#I@%J1<{*b8mWD6*$>;N*|-EX1o4ox?sSTC}|d( z@>HeNZ}K2jRjlpJ$%DOr_=op@^hfs=dls$RtS>FJ^U__u7`xFf4w@>b<#!MvJBu6_ zH?Pk8E|QEa*;-ACIipo7G8Sl*B*~>n!FePL?1$}UXv(Q%*Y|$tV7HFeESuV{x1n#J zSB)E@xM6$x+P!&W?w=N)eEHI+i61js-gNzL7zsJ6?24L19^UC7-VM&4Kn(N_nRkQ%uB4- zXW{mh->WC5N6Y1MDp*j`!Vpp$Qq)F=kcA^aB!vW!haB4$(r#Mun{QPiFSW(PeM8cP ze(-&m&nNSltt%Y*wkI7$7KKiE(4;GZqo#b(?VM^9*1LB1#h063d~jCAY>ku1+x0F; z+*bxMavlUIq@Kl>WRN0D$T6>Pyq}l{i|8~30bv<-w}ne5_a~>Pbx!I14?nxOTFu^= z|M8EHe)!fj4`YrA5_4iiEx-tv@(3zs=5+UKLx}3GPA>w&5&=ns0EIyG-D;W;m@?l* zL-_xAdXHyWlO(&(MMQie^NJ_x)vM}ifu3#xGvsgxn8AoWjZ6HWT#z*GNRUQl7>?*} z%uI_?uf(gTuf8ch5yAx*w+6dhXQq32c%1V)p^HNxa3^;+^0P;$NUS^Y>JPke0rNoU z3=V-KQ7{2>Gy~?q97S9O35XCQhsY6u!GMD()HE=$HeGT(8kEo}QP4S+2}(t#lhmJq zHRZg0?1L|{XZMu7!O)*Rf7I@$&py_wc)syKt%_6#&PkfLIg2gcZ(O2PVbtf(PU*k? z>71A0B#oV4znhNp?#c7*IG*jV%Wwbwt2ghjKKg|I{lEXqM~|L%(OqkqmeaT?rNkIR zBx_FSDt5SCUwwaL3*TPs5BId*uYYY{o_6%bH?QBn%ZJ_9FaGf*%=fvSx`qAq^u^)3 zfg!a9)>5Bfk~@Uv(&B*0Ukd- zH?jTwkzhpVmSelSounxfGXo({x<-b#@7@z~Rmscp@w3mm*q?8ofe=GFKRN5Vu$1}s z&den;Ez>%$d0mekj@lU-Fdr9x`_``C-G6vERtB&Oia_ctXU@$%xrwTZ);Au-3a zT^D=%`u_6m6zQbjY~te$(O7f6x}4s>y8*{OZB2&T>%%uME=hxa?~=rC(7_~FC!=H0R3vMdWz zSl9C6>$d@Ho!7IEHZiE_y6)@S7t0UdU4Xf(F_T+E1_UD_qLA}CtrbB4VeC>kjUtrS zR&3oL_BXdz!;riMu%WhDD_P*00UTS!+I;Bax>nc(H(2KN`tBAna0r{Q={iy2p-)fF z`o~X(PoF(5KKI*cDZdW=rcnse9ywkqP(&L5vwnV3nSDF`9A z%2I~b*1Owd(Q^9e%wfE}sngL*^H;CmfBVhV$4?)V+jJzfJ^+04)%$<^-J^f`=Rb{I zBJ9*^tL6IY^7p^{`gq8FU_%fEH$VvJjP4IzZd#@=aI?Y?(vXJc3bw9`TkE-QF6~vA`F#)Q% ztCItgxuK8u*Yc0Qef8ZpcS0?lgw%DBM%HnEH{BiX4~MBP>+A2Xe;4V|`LNw?IrI=h zj7}&z^}cT3z1;utH?M*`fATcOF;VE3#SX{$?c1B%yIpG~gpIocA(2`0^;mW{Q$DVb zKRfG2+MfQrX-kRw5Zk)U$LZ0jJ$_obJMOOY*FUslPEXJL7#Ahj;Tx1)RmffKc1YNJyr+;>-n?pFaFK5 zAikUJsf3&3HLa}=-R7zP`M>?_U*umf#nY4Fhl|_0DKBEJDF#nYc`efpuC5OEcT*q6 z)6;G0gLzpOxV)a%+yZxpUH$NG|MuPC&70#=!nfZYpFhtxm-6Aw`u%r@*EGHS?k>bL zY1Z-@EmSQZULVT5em=g=lf1h;#_{Z*?p`LeEMmHu4}QG7OVGk*G2)<|9>iiil-hL+ z9Yu6$x$h%~plVfGZuLQ+M1j1(d{}GA?$*={m^gGXEz7#L#a)=vHl4;8`vFxt)zHT= zu&=X9SwZ6d`T2+6{{2g>)_0Mat*JKKZXVTIUwnB;>m}I$03ZNK zL_t*9bNuzMfAZ}4xnl$`(==aP9A3VdZf}mSA73_Y_jflhzIuOmw<6$+@9)y~Wk7lR z!+bcJ@gd*ojbXF_j0*>cXzm&4!2h?zyH1)2LaVutod|PJ!o!gmlV*{MXiae0X74xLRp92 zV#k}Y-)>F?a9*T}u2QaUZa~_!v}RIDt);B9mleS|=UNMBV@ErQ$x(ptljqMLJ^g5{ zhpLl*YWF?!|ZCz2$@-KR<6$=lvitCO%V*OIr!+JeMUe*Voto@SE_H=jY|m@O%qW zD|VgkmUr(je*2rRUcSCU>Qg)^?TCyC0t8;>`uXo)Z^zAF{#Sl>_ITW;Q^`K@|)M!FTTEd@%@|oyBw4L@aFD|udY%@7uUO->*rs*y1CoeOt-hk zDn(#9wquAJv*M({>E+Aozxj_}mSukV`m*Y}pY88|^KJiC+wJ7l50}#PH-G!$_F_&U zUcJ4$zMEabcQ3Bye)x~-~V_2F}wMdQ~u`waUz$pX-15|aXS{*$RCI$dS z;OWrbEYtnr*`q!%QVQEn9|dVWb*Ce4PVy(8>bv)PDIS?qw`pZ*t(8_B1d-jXRc+b5 z1v3#hb|63iAo|ZTe((n?6apb4p*a$fI0AZo$nsS&fB+0YK*ZB@oTlY4&Dt!5E7v6M1-N1JRjE5DiQGsgN0fYQe^(;DaTyZS}~W<@ktj>5Yw;;zx=bGK;Vx)-o}8- zQfphCijiOn9ryq8FMs}%zxb7ztOt7Y^6(%2!~b#j@}}(c{Mku3@v|}LY{x@+_x|qf z#jYrH!#E}qNXG`(xA~hFH;+y)cc0SO4^`T{%pcxf|KSg>-(K7nskhgcmv8PbF82z2 zxAXt$fBwT}e7D=@yZhtC^*^+dr`?+HgnSJ#5PAfzm98(AT~NSYJW-SVadyjHHxB*A zG1$<%@eFH%ZtMmMo>asU2+*X7Rd+J87^8~<`S9HVV!Spmv6tizq-yU z{^r%;@50N?AKnoKQzVYn%Iq!{PeFalYsG-+7mk!8D!sBb(p*#uI{2BQe@& zjU+}~vgg^3f_%Yhm;r@>ygb)=oAL5zmQt?g+SQ5MdLwRb1=k<#cDQ zS>3L||JJ-aI$B#~8l+3!gj`&{O>=7}bs^p&#mzr<6X~t@sX*v#>(zvDza&4>?KrL;=>Zd$>63(Tmf0gx@l$D8G_4@xr%bxIWM)e8)GzrOz#LJ`e)V~9vPK4P*r{^4Kpwh5&D>G zp{*2>+KuravV9V}oQ97)WUI>M?uqdpe2&k__SP=W zI-3Jcb^$6J_1ykUHL?d2ufMg2)J_wZi&J@@w0@|z3?yz&(M1VJw=Zs4UgPOLgveD$ zFuZR{QWP=GB#15HNB#Cs0-tZO2cN$^(;gkI`}r-VJ^(XG*qm->IP1oge0@=zUEJAS{%i*9A;(_!%o-Hsspvj{x7^4|WQ#QN3ub<*I!O=9cWUOXc)^8q$q(WyoJ;8eJ| zdYCcW44=sW3R9;IJ^$0UUc6rHwzM*T=3QSbGhqnhYmT{$ahJP?2{$^#(X5JyrM79ZI)@jzyRiu@?iRQ0S^IJ>c5+38QVIUgp6GeenVFyE8cVxrjovlc zCe9OYaJ6awrtG1WW$|n>t~&|#Elc)|{fw+JQcXq=CF6>#mxfQps{LAb2l}4)#dO~; zF!^~$KSPN?S}i^;t`W#`6hKtN~NqfE|Lj- z>2==y4fqd&q1FNui&C5ZaA97PYH4Mzi~3FJwnrQFEhjt7|IwEB7cZCTSjGmZod1u@ z^J=yam4C;lRiBG7B^QyI4N!O|DeH!%c6BANj4}1R8(RJ_kWWo*MF(@G035P&3DGXo zy>x{>1zLI051Li1`AqR)Be0>AiHvWk&D?qYGgFiB?*tO18j3*UcoHF~pd?3AOEglS z`!`sC#O<>BYIu}~Y?l?Yv`6PhoWJyzo-Py(pS_n_#o;oi%FTxGHAtq2AB8Zf8=Wv?Cjt}k;%lG3LFlYcNgc~t z4&C$TC1v%upUIB2p1;_d1KY$tt-y7od}kenw1QMQkPn_jfTw02c`qn-dj=U5ePJt# z6y{c_|3QOq*wSwKOj4)(4GtSBeOh<(DIdB75D9!~`I#w9wlmKj4hKmT?VM`d4Z1%3 zdwRLz-r;5YH3HJ|h}Vb(ZN*X8=HD1{cJaVP$L{W^v^Qt}pW_Pt2e}f!W;D3;t!^uK zL+jBhez;`;PRm>5Z_%2Q*hO9aXpdm)tafcXo+4FCN^&!DEd{G|6|I)hJCA!%JG}jlcr$!+ zYPj6E)ynaC2{4>H{Pv;hA#ZQQ(5G8xt+kAJ_L%Zwl>3~88UI9G@5bfdx~%}fZ&3rk#@kkJ9p=yT616;F=EqTzZB}&rd zfe;z9uCSi-U6LoxyKxaVzwgTty)EKy_;u+BB|39S=A6faCr8h+ zs;~2rsH)#6XRq31smfuMz5G+0EAm)x`^O)Lqnk_e>iw)>+?~Cpw%~K$O5xb<0blAG z;K>jZFtd)E&m@NN7lnfbG7gc1IL#vW)L}WAdu_@gKVjowGxzTO=FV z&CQtBwv4w>o(*YjSCNw*6s;|LLQJlxJjYZ`%x}fPC+%(iQOCjcSvwg;>E4TV3x>@L z=3^}dDRT$-se>{D-YEPbUB{(hitp9IKEWyAjIh5+bZ$;J_HUE=H5cFoR&UBIV`iai z#a|9c%Y|(hqK9=np(5@`@R7ih*`DjJ?@`}CsQP=91v%k2iq-ALU66CV{c|;bRJP4^ zhqL~sQn@Eo&_h>c?9R=_xbUI_P>O~Urvp+)dr6r4c7eGDky(D8s#`WSbn({81(v|<@H|3zi$(L zqd%GmKF@Bu9Nyg5tUu^2xN8^-3LSk6wH@5Q!m}MZ_8@=7X?{D)!IO zO(hk>eD^+*TD_wLbFG)&Dz0ZNeTeKsiPyBW@R56<53oQJ3m~J$-5LOOmH^dLkrUlI zP-iEMq&8II#l)9X(#D0_JXpOllLl{%K(pR5?AP+`wABFVwUifc^=TF|2MgY0Ynnds z=pSt97_I!)&yR~UG0&y6abEn!2vmtVD3+6AZ;|z2N2U18X;182?QRB$*~c9GE4X#< z)Z?>^+`Y!jpO3`4jWzH}^`aRyS#P%u6f7vMZAHN!n`~=b!3O2X3VV?Pf~a>;Y`93d zOGeQuZl1jBlOC0-zcfwFVM-}KZQhA3$GX3Xf*|~PZP7azhYOC>NcDXIPT5+{&!coEL#S{aC>tgsSrn};Ny+^^E(ZXzG~5Y z_Gi_m2d4a!&Gx12pS z*W>f_G^FiP{HjjmGqY9w(ROVgiv>_w-=bAFhAL1BICR?_SJ0y@5!g-3*8ATjn}0F7v&!M@Djj zU)>qB*{iDVAg&z$6aJ0gy@?y1IR3U*t+kpTpF=Z|$5N#F`-*sH;yL?=o;v>vPG3q< zDB8O5@5z5=!A*WV{hD>Bm;MsltZ|yYMG?>gl@b_c zo$zF_6ZV-3M6JMU5L_SSNSOTw!u`lKaH&tEpcT-uvh(&9@}hX^P7}fF5kuME^-}1i ziEGq+Ak%%})Z0#P2D2PPK7<@)xmmO|r)mDZ+_SXkXn*}fVg9j7rIqp2Syc8jf7Az>+^IM~2(t68S5{5brfd~y z-2O)Dm@%SFPSQ~1@3P>By~{7^0|bx&ma&-0&UcL9>FhfvDZ0NBU^JA`!vptXc;527 z*cg{WL8{{-0C1ZvlNb$WN4B$%^u}pj z+@zaXa6Pt%zT6Z@E2zwzRSV$DFnLg)E=Ao9rnLiR)|Bh6j%3!SYT|2CBWdH}G>yHC zshRle$cLYl_gJI7gcTJ?G(vOtLKz1ZC^5Ec*hLc`C{h=d#v-Zg?Qx3{+DX%&*APaH z6%ojNJ#jbks*8u_W5E-3MKHfEQwt<1tC=LF1Z|g|Kp*986W4sp^|lfphl?!^TOxW~ zq5LaJlPac8ZJZ^cnM`Z<*Obte{W_O z|Gb5H@9eeHZFHkbW-l+Zon`O!S>uY=v#hD9=9F4mJNO=YVFCUN)Nw(^#g}zbEUB7- zb9C(AN?7_@FKLP&(Z6zd&ImQ|Q+#SVEm6b}14Ymt*nR(8Ug8WZ2^chB9z)YOFeuen zGlV^PKt-Z^_sPt(4EMLm!j#_6`cEqB-^Qd z4V@tZ3wED*?DBq{sJYFo#gYw!^;ev9qY8!tr2#2sPnOOAva#ls%`wx4K=UjC!H*e+ z_u=X>Jh@q}*~M+GCErmIe`;vue|~x-;(gneUgSRd*hKwfm(g2rVrZ+#fnnmU(!*uw zJyp?4uE(y!PpR+LsvORx!kYIalW+G^lGV93+)J))?I1t;JR$IA_jJ>#WB%Ku8#@iI z$|7sp#KGk)FefKlT0%zRUgQbN(sI$#e0S(#c(%BGw4C3|VwzsTWaeb|w7=IiOuXfO z6q_P@qVoiMwJ7cG-Q?u(a;P|m4M^`n*#;q^f8hr6gyn7_$t~MdeC-OPZbDeCVV6l* zz>?n2@0U)!lYx-KNlK@NQ6`7PlW)^A!s*)J9Eh`Srj{JGfNJ#^q37P}WK(8K6Nz2Gs|lbt06kU$7Rk%(X2 z&qB52ME$--r4T3{kTGM{qRd_^p_MR%s(S&O;8~5~y3`+i-h!^3fk5N@!pUXX86+^q#LnHUm*gv_PmRspzL+n0j%c0jXOH z^?`KnJN<_rXA3{=9R{kh%rt)zQq%rXKG`l;*%eKyO6F+rAX7wNPe}nufQ_3gWx$x3 zZPw6n@NYoHSnC@)iV3Nrm7tJ&g{DqS8BkFp6y8Kcosy@hPxKa z{QPJBPIA#t^t8V(83t$0qAgY`^-v_k9ckfdgQ2Tmsx38a4yQO^f{|d$!<$JZIca;S zxyxt)Ssu1V{Y(SU^e+^OFW72$ONAfymY@N<0QC(FX4%yQ&(ifS*U_tzakwiJ?Gob( zx`hnuMES;u>*#_=A0%}pyhpm~(A=;Wos|WUADLARCvj6aMj5Pbk)lYNP5;8e$0;M` z1Oben*QrvfLL{Y(4ko5zs1vMNmX!O{_kK8#4U>$fNPj#?_i36@&X)&33)% zUtx$S#x?^(&7gEA#m~#zzP}80%Jdl`Z|eK1;iG`W65H)54lDzA=?HrAbD4mskfA`q z3z~0tPZW96KYyDAu8e(%$2*#m_AZ=6468q!jwR2IZykmx4gNg;iQnlmnDJb2RsJ|x z=C`2Klip7+<9(b^Qiq(In!A7kISZ@m6`hsY6muDkf#pW31L$%_57x-g*{-`UY$G8Y zf)YFzBvyt0{duKR*-supchR*f%<3lvQWAS6gLf4(*PYN@VvifV+FDZ8>B$|X2@t{_cq`|?}fjqkNow#=d z2v^#l&BH9Hf^ioSF}MS-cW|OA z7-_0{ATOnL%iS~XSFQv;wPlgtu3f7vNn>W!!<=x7d+%c1rj7UCpSON#etLEr6x@I? z)tYooVzWc3BQ?_(08>8yM&2zTY`%Vt6}u9d@?w zk#|6B>^2}QB?Q+DUzZq%E&V`R{Y9n zm(4wTz^fT7f;nBN9Ps9FDRJ?Z6-D4HxB}*;lWj0uCyGLItFIw)IS`@AQca=I=28wIL=bzf2;fKR zJ6fIU*e)kP1SH|PqGDdhQN<_~1$Png3Q!3x0B5BYQ?!fjjR2Y7p#hOZ_J9lvrqSx{0xw%zDqH*a@#aTh9Gne{#elZPN{X$`zF7uuy1e$i8{Oz(RC|h#jAip- zZ?q*lNuE^G(a9r&A8G$tyR6O{WojQJD{MpjgAIt0C9mFHctALQR zD*~Z=#fbF&Gr8^fqZOGL({D$~Dq&AG#)h#{jP6W~VnhDTU;aX(&(8X8qU2$#gahG+ zmH)NkWwq(^v~zecB!W>@TMfleTnZgMoK(Y2L|@^Ukz{A-napL(O&>XG%VRP#BtB+E zBXb3m0AyDGQG8(R$rg7c+VjdzM$BZ0(7EZR{1p1o1)@XI6&)*TJmsTvfP5RNQK^h8X(-k(j z@rs*-^PkwXFN=#BrQIkpZ{3Go&%&l)ykrzF_&%CoJehpfu^_p-`Wpy!a`r>AF(4lL zPi17N+k98$RU|JUV%PybT^z5??$U4rz_k7H*Ztu1(iyPbdW?Nm{=<{9(8{&m07&M? z*hnks^_)<()iMiBCPgW!kf52K3855v&@Zv%MRZmwfWdA^1=*G&_NHp5(58Y{U=VyH;K*zg$6IV_oBNiwL{BRY|%?twsmy0%(1#X$fmJpa-@Ql zL7u{6urj)E2lu}MwA2raV*YZ-Cy+uBFyxJ5#rrqAOci!562LPziC zkNAdD1wr8Ok7YNnRS0OMjCm$+#m#*dSvH?bNW@_~(dr%)WY0Xkcq+=KS&%RiFzK@5 zO9`k=53EK{452eoO@gN|UI47%*x(YF?sdru6X>FSa0w9-6< zd|)5>*T2yP$jVH2yd%I(1qO~UB)(}vP7F-Yz+O2o49!lO^14jjUS*afMIoE>LxJhd z%qO6lx943 zj-Q7vhJqY}wo~x*C2=jff+R0nx#rS+zgrMBR8)1t%^V&LJ{;GU3l{ZNBlDE`(ot6? zn0C5fVU(8b_41*xfREpOAu*3QK+jPB@hEpZq26)WqAaGo1eg?F)(Vt_i32h}mfe#S zMD)TSCQs2y4nogB7?79>+6wi0qt-#Li~xZ`*zFV?Ibx4%{u!JQET&kRm_X<=RZLGJ zz>GRE1?7U|ZGIqLB8{b;1a^amLSYm**WP}+0?oW>S`XMn&B`Ph^&*<7ja^tE-&P%e zQ=&q~%njm*#Aj|xY(Z3Blxq-&zEAm4t#yhb>!>! zM@imA08_q~9SO|*;(Er524Qkjr|aZzXfxdlB`yaT&WV^ zr)If-ELdwRPYM-9_^ANRuIn;k*Q)Fg^?_$YkrgSBFLj{EN{>;X8UjJ^4wWT~0-t~q z-U-w|D2+U|Fo&e6bHUJ%Au9`;nLU`c^y%KUoVMGKt^TCS_BO-b62`QG;-c+Z5un@jT0DEfKaxE zR_)BGaLH3qczccV3X$ct7}UEw65&tw z#t9@p9=FKJF?B0iG-1f_rq-*jjEC}eHTZmaDgRwp_q#Y8N_n$l{u1lWFGauX(E@2* zjvbu!jT`UH>ZTUQ57MR)5{gsK*lZ_n!*GLjAS>}^4rv$-%?ilD`UMy&OTEzbGNPL{ z6cPppL9{|U&C+K+apY1kLpaELIesodDUytvUc)JL$i#4D>pG%qC~Y_-jt%pa0D_>a za-olGa3jkyvl5|-kb5YtCwvf$f(Gyx7_6E|FNrZC_ndei0W3X}f04Lh2&hNJ$clF%nTx&CjM5GqspAx*0bFelTOk+BXkV78+#dLjLWzwfohyWIs7# za~sN7{Xy(0Si=Yj!floKR$q4|-Fw&iXH6!P~FYhd|3xTj#e&O5eobFaLV8z?vhyhtNOhARx^5DUxWQArfVHNz zne}e%>Zt3}8O#358(;bL;m&+xQA4Fjnm>k73*ThuL+S;#vqPg-vWu1IRW=mS1Z(ID zt>V<>cnCTaqe}8G97tXd@N>UqhvD@51;^ZX@VVizz$4hz%!igE&=5u}8v(k4Wu#tS ziIhPgu;Ebv#CMJd^tYZ`Q#;VxxJRvNV8VL;OJ}zSsv;v4n0a}4g>X0==WTA#$+(nn z767|^Ed5?i?t{gLLo@177&F7GrudqbHgnnbHp|71FPFY;mL}CPe7PG=oAE+YnRL}H z@u)6{-R&+#1r0LjqqO-lk?4Z%0zB2|#u_9Ni-^NvZgW(S2}AGaxe^0fXLcntp^JTe z?Pn$;+r$-jp1~o^vYDTk?G}Big=NmVG{T`PMNoNpu?z(%Trbw!8Y8C`IwFr`a)6#nOg83)R%}lf4#+k}F)6F8_g~ME zqb$32RI?Sf9D&X##sL{s5lJ(^bUEEZQ?IKtu$9m+P0iPrWtwE+~P-d&#YF z-q2L)!^rjix~1;dv&O(SOAYJtvlchWlKOVH%H5SXkRT6u!o+zD5-Fsu20|2j+hkzJ zD@$FH`7t%sXE0$=PNZE*Y>==)_I>{#8Ja?$E$3>rqG>5KITor8rH#X8=w*63Gf+M63YRiptSdo zt#H7DvEonbes_$9MBJ zNF62F(k8s??h*^%RjRlj;U_O(P2anGG_~)!ciyX*z;rOJ}@fnvTr}U`g zQYs z6llTNzRNpy%ge5!LyRo?G^?K^WaFG}R8frjQW;Od$#3MF7_45|Ab*XI2Vhjts>v9u zdvRC=p{AeiLZreX?_hQN9unzyd-mOLJOTJnMKjh(P*@OJoB@Pw4Zyu=c`$pu*Gv(( z@GE|lZWTd~CB@bNkg?;}SB)LU>>6NuG}lDpzQ@Th#I{c2WL|jCcVpm{8&z+ zlS&e|8Ak`E1R7719eJ~tWFK*dJ3UB+y_kbrfQ7&$#51#Ta_U7Pg7ECx*A z+*1Q&C5`?3!9^;HH3g>)>OwJf>Lm^2!m5ift3 z)*I^CeC_8V&7~DN{AzaS*o)EC6}&g%RV%OhQ^X0cgALV=;^MM)@Pj#_srjp`KbSbb zW_1EY0D;T5W5gkd|M$$kRcrX?%b>prQ%T0KYl1bT_{&sbMcE{%f* z!-AtolF$g*$De;kD{$a&gA$qsf{^YHy5Qa3?tXxIe-oAI2 z2i>x|O=oFG9{(zKD?*O;AuTyPLL#rm1Oq^#A-`7N_QX=NxLZ*_#$-eTQK*D4&IA^O z_J*rN95gm5f^Di^V`X=7=fdOq>MFQ(DJrh};P0QK!O`juULz5VRqECPLn6|hAOxlE z(#+!Y*833Fkl5EE)$%d>HK2qH<)%%*n8iC)g?nlMcL_(u?_qt-`6fsN%<||OFb|&uNOc-$YY0$F`x`79tJJ~cw!Ll8B&l8lX_W4n2Cs3Jwdc3KhHVEn^QE>==D1qc)HHbAfwEia-7 z_m1gQF%o4^qa=r@APwE`pnbgy#V4ma+S@{i!KIbjdgp&mey@L!eW(HyAi;E^?)T(% zbHtq+!As*^Or2XhLPCh7hC4c2I;i+6j?S=9j$n8_rgU0VSp|f0!!ylRw~l?9c5xe{ z(Ua(5fM2CHX^8umHSU1QX0G9Z}1J%gP4y^Nt& z7_^rbV^tB=`=r-(R!qEvn#%m0OfVOa`qq7T1i(v3$jj@;t9@TO{lAHr@cpLK_YhVf z8jJDy64YweZqc?W{T^D~rua<3!4P?Ki2g~augr?{??HKyf@)>UF0fq--57ks$!TJ; zq39U+7Jz~#0n4sI8mgibw!OSUknLn-FDY*+q>oV3fyKfRT%joNmb|(~{7*)NQ36!p z*KZ$0v<9F67%n6$=~bN1r8Xq3?Zx{hI@Ld;ScB&%(5e@k3#Qk-C$EZ|&-q&8c5>=_ zZ6P;JJRzRRJz{tB!Ozb*HNM`F|Aq#9gAbWs-p%`(E%*dUQPP(6z(p#Y!-#&LUynNx zV~6>~Qt~#QjnFhbXL4_OQZ=jcb!EK|jhLMV$QHjRyTEF|#OtL%0avEiV9pS~)%~h= zcV=*=vaOhxL+~lzH)c|fF%Gln2@i?k-(#m&2UjOSfgOQMulM={*N4==odmUJGwl&{ zy!ZR{qd(0<6iEFY%yPi$20382pBECr^;=^IyvC9(U^YwxPxsH{RwDy)QG%4XK-Ce1?fAZk)Gs4ep)gR~M$tj|V`?gNcUMYhS8LQYm@~Ep zbp~@j`+xiDTpO+5L2%T&K7;h`?qS1nHdj&abh=4s?80V4Wm=2*>D9`o*82TaiTjy- z?q?Jy%lXfi;UhfU{ycvzURSQJuG+NKS5l>d_i_9a*=Z@YbYJ?4%BCGl?tfgr;bk9p z9~8cto2%TZf4y3et`1)he2HINTpW_|80qhfHD;knjtW3iEWJ4#Wv=m-1{^~ z1<2>yR6+V`%F?wUDqA=$i4~}AX-2W-@+7T8mUWa^*)adT_>ZsY-kDE&hRqVw%g-y+ zZW$q5{XOE|VNFkT!U0wZZyr&EobXOB<|^D)kL9O@ zkU|;1v<6tE|EzIw!-_9Bxdl<*s>{pwXD4jtw`R0Pm8w91S+ZZbU&5M_%cfmjyFf*# z7J2a)?Sv_KC#;Jd1=VXpVoN0t#@MvN)TUzbP@FEncZuC%WhJ*;|21!kRU6H|D#MR3 zCzQBJ&*Ejk*1WxQ%t3b`2^iad0G7Kx5TyR+A@;9C_}`~HN!@hSZ4x#no)V9Q2cLd7 znbEvqHnbUAv73l{X=zUg5DHs&^BW}R#-6$??Ghj$JwAVR)q=dnowEN5d-kmvhbY0| zL>wwXZ9#w^07*y$cM35eLYr@ckCB}qhzWffhGk73DIkb|3Kk&OaxnB>8L)xW>?etZ zLp(!6fIw-jLQX$09g+$Q_C9NfKihMzJn?Ot_GxvQ8N6)YAqLI2|8s?qC|bWzhi|h@ z*$(lJsK_Te-hR^`p^=ybdHSenaqsm~#;^d7bGp-Pdz|QY-u)7e$KkrC1K#Nwsu-gt ze*lS&m8y6=o9&A&r_Rs~SLUEuE3`1kM^!mJx<7bw&jx=wG&nzFx^qTc%%tIsN*Uz9 zr9S!RFgCb?;4S05y7Wd?09&BlZJWX!VE`nQoIKar z3Y$@N_DD~eKBdm$ENhpMt((?963gjmOvt_^qKb^R|IF2%E zN@4a_<{qfs+X>h!T=SP;G|6TTNYt-tYz5SM;ZNN&3E>45wh>7*B?4d0_{qnW-D2ye zrILKL#e7&vs>^diQEm3c5Ygrm`eXowF+ z(HBkC9dg3*9!({fF9E#S5H$A)@^;bspr_Bg6n&n%xhgk z5}P+~^4Q(@rZ5uN+{ZZ%l-@+Mx|z0FOvlf^F-OiWD6@meYDk%&FQnQ-rn;qXiF{-N zBKgM>Rb4jRN%-`%kQy)8)V{B{^Vw$>od;@s)^W&=_>uA>r7$H$C=Yco@qrc)su8-Z z>Iwrw7}=~N3-bN+L`ZL;XoT4DkxqNU!ah?btz{YCV44Qq*??CC)5>H*U{*f##)?Bw zoorqH>KerW*8s}W`8Q~SGggO)RSQZlMe2?uz*-Es`6c`oA`MP@+3ub<(7+7$3}l)Z zXOEvw-26b+6-0{E7Hg(=VX?HCY44u!eNZ>1-&*>(Bk5GYd((BrHw)+(DOu>`<}#z ziyHh&i62Z698Xst`9bi%_=~i9xLe~rn1O5>oHMU6*@mbJ8&#}&EN*mA|SQvuG8`X zS`fyA1aw77rRp=X&_|??E9Q2CC|O)y<=jec3N>I-ojQi9(utGE7kCl1`g;&Cpl$l+ z`#o@F<+(jEt#qDAeU<0dH0gIT{XvL+ciR2&y4|)g(IWcWJSn=jiweP93w^$pd#XSA zr>a0H693sR&dBf=IjT!2^@O}h{3Wb5qHB6*CFI}Dl zcs79N0uVMG(9Ri~y|xbfel-=)4`2Rdo_6nG3331AN^=V_-!)uLAkF&P3b) z9rX3}8J*>PPWd{R{h{cFupT;nQMV@iAAFJ~`sfcQeqQhnV7hB&%TI41FU{X#o)BH8lFc@>HPcXU>N> z*Wt?hc{4LYN_9ooS34Jrz2vqz+_(P7B;*kwF}X+ z*8Wt-SP$_>rE6)$qs6}i&8QR#7>{1+LAm)d^bmAD-tgRPt>)aQjFWw9Niz<9w@-0f1G6(^4)2AR5jTJ&BTDOy=6Rm~Z_TRj!ooYlMb=DcLCi&`258Iu1@J0pv0 zy~xZ+9TFxFwXAYp;#1$8k-8;OYE3O%WasF~dokAizzFG1O^t(GTi^ClBv1lCQW7s_ z@=79J%1a%4Cq(?B6(;>PyMtGeo&C{TwI!>`8~wZi$p=nA-G_J!KZ&+B8gR@>ocMQX z`{DEZ5ecj`$-t8V0IL-S4s|G6ow(tCWWz+1`Mq+eNEt82Ydz8}6XPgxV|}pthM>mN zwvg)ldW*)(!I>qKYSa0>`G7Xdmdv=7-GMj8ILDil9~0wFpB*ppML>uOaO_epX+{HB zZOX+ke4Ux;{*z+DepXK-6rr=@ATsxzj4kR97O+d&iKT`i zkqZKX34e9`SgLRl64==OXyCW#7dE#MJ_;l$Q306%5|*KH8Hm0j4Sp8P0@k!mXXsb- zY#9<#F{yNMev+N&o=*NQC8GyT{;2>B@}v-Q^NG#lwO6BlsYfv5>z~fc7d$T3D_S$#uWVW~L9M6f%yz-V@Tk!XpNuErqLK2s1?R8CIrJucn?q2hOOyEt43fIDZHpo2QKQ)}V-4zQ zoO{A_PlLjnoZ(O{W1T&&0sR-57qY%K)<`WQT1N}ot++EHzN>2|c!_ZSn>f#^=xME< zN>%*v`rnde(HFDD)5lA}eY9V-eqXJ3T%B`WON7=Oxhzzt(WyKx4twF1k^Z@b^f3E2 znFtx?_6;VloG($sKM(08NkRfYSsbq5GHu*hy+oo{#?GEn{Q+rxWO~ickN9@Psq1dC zeJ-x)#mj4yWDM>)=gk>iH(sBF+$5h}E90l#(zdEYLiAWSMOllwc?*RL&(*|%HULZ4 ztk&k!@z!`(ypPfR>v-Orz{;Lspa))Hq+I4n&f|40K*lXYQ?R&tgrSmjZvcD!SW;1QefyH^$Y|fphno<5yG_G1(riJ9{O3=Lrd!|5u5R4o z@HJxNpj${s)uQ~^d+e{kk*NCvdOqw56TaQ?jV{Ed`ZDrn7TRvVdZK5~jP`mn|CULk z+ZmppOJToT8gW+u8HS6s_L)?_4TH(ky~6D9)`WG6R{t}9?nhyn9ieyFr^?}tA6Rx;%+ak-?i7D3%{l zhw@7g{|pDfr5E}>gk14!IPIV`L(V7UQUYhSFAgW}v#$>Bb%eYo`da#pz5GtP(Dq;P z;Z5-U@nLhtd_zNk^#1v8yUgpG+;)8Q^#S{}XK;{9Wr*2>m0FD_A0galis+_RzP|v1 z8TX@NY3hgj{w#9sOYM#SrP;WI1Jp17{rU6w8uje$?>4%L6nZEv*Jt{Etm`xhan`wU zt{42ayLPR+Cu9AMMch%)Qk%+%r{t%UT9Ks%g$L2t5Z$;thOlo>tuXln< zB$cppWf0XQ9EH8M-UvYLDF*j#e2}v_L2@dRO*JkX_l8qXJONH`1bU6uh>qv7O?;8h z?wOrKH`;eD-jzg%uRaqf%X*D9Oo>&si?oG7ZJ|BFz0-5(HC+M@FB7l_?H?+8NHV<8 z&C!5yED-qpMJ<{@MEpk~Doz({zIf4!fytlBkKx&I>C*8V1xwZu^wSU7FclD240l38nFBPJEG;k^AsLYtWO2 zS24EH&9eThZ++XMfy&Mnk=VD>fOivN5h!k`=k%5wf%sT9%9cFiOcw{SnI$@Az|g}D zzjMu(qT+Rm8SUw{(i!NHc}Wm1wGtWrx3_NPHx9Q{ve9DiGQ#4>VYDn@71t+8Jkb_V;-+6*)#-tN^`I=A5)WwXITPf#Su3271bHklnqNU-ba$hvHwD$ z^N^G{T;J@j>60Oru-dl_$7P(Td+W4dZw6AWj;n;kPx*#Al&)* zCi$L^KdO78xgh&)c`F$x4~vGAHyQs0Pr2Src`+i?R4%$x?>fIB^WZ5G$}fRP_=Cgc zVePu9Wj1RZ`Rb>G_8#4qfAwTj=X+rKdbg)ml59|bMWau3V_I68MJE>fmgVeHyC;5~OHKVBP3Qec<^R9&bB=>!pMyl!aU7z=ImqhZ7};c1 zMr6;7>^%>Tl9l2}MplTZ5ZS!VgJhLsW;+=XLWl49{_y!1?)!DW?&r9!$A!M37W&9b zH6pQmJk%2PleakA-KT8&+}4L=Z(A0sl!YL8k%H$Ou=xe$G*Z(3dS--`3(tk~zt8q% z{h@9;uSS>u4!G_a9X8Cvn^oUJ7m*x8oFm8l*3$+w-?TP{I`3Q^DJ|&|(s;Lh??tX3 zp!}}X^r`hv;tz!gy{C|gsf=DfP)VxOP^gj8+f;Olw5C{Gral3IQBb_(XvTW^$h)|) zRbH%BmbY~~RiTuJLxC61fpQJPrnz03U+7hs2;?VKH{PrG+U3rU*s`XaA8l`+o6xRH zC~8@Rth1ILp{MHC5Q^nq)zd>yIEh)NQ$^W9Y7W5%5Z`g_GE*oNUy=;{2!ZPB>>CR| zW|!f6kj|8q@RDXYiM!eBaGvhz;-~(;zQ$-uqiS2kS$&qSj=$~Pymc#Zw7R;IQz_x& zIKOF@=~}2!`i}gIZ+#HizS_n7^*?`1opif@`L*ikLE_nNI)^I$kbW`#o9Fee=h!m*kuUrf2f;`eQ{Sj?H})j_5fgrLxBS_2u^T#1+kC+KmK6ScX-t9g!BM^F+%o z@(vKPH0fIF|3QVE33`Lc&U$^Jh&NNGGv3GBA9jPs1z^8*KzAdFJsLK{z!=!7P-K*j zbX1O5I`p536J&4EAsr-HXbGg)xR8DQ36&PtW z5k{6;Siq#Wa=3eu@oX+r!tW$@aQ3>n3uGN{LQ{E9>&ebyUae8H3-22Z zZ_y1NM%iDBJNY9~UT*nV6=N7VY6di_XA{PWOWl!j&V6~v&PX*IGz)D7#B__7#Uo~x zn4z;%0;mQG>xNmx#zSt)8K!mId$Z-Q78>TqixVqFH*) z!A~*WcZ*-Rvwih$i;Q?Q`R!kdVi|$Q%?Fh(x!Xu?%OBcoh^7JyxgY1cA`QfE3`I0u z%=7yxMVK-4v#nS`TgYZH^6BL~ENzD>Lu*Fj_{GY=!^yn<`aY#XaiXy;FLw1gH; z25DIA8Z9g|HaL$z66M0{UGn6;n zN7`lssdsq4KJ`}qDsq4mntN83g2%5F0la_fG_$JbvMCG;xaf310d|#@r^V^n3<87u zNs4{3puLQj9BQgoOf*no0WG1R;0^$QXR3ux4W|ffzVOMP51?v;bU!{fygN+H#(!~g zxqJNQPx|hK1UQxl+Ec5>S^oX*Yr^q7-na4gWy`fB=V$G4os$N)+jjFJKlSagN%~s8 z3~h>RTBsQ+pGi&YI}2bi?f9fT*;d_t5pp568ApiA1+u*_g4%gZ^g*Mi99Oc+F{|tl zy@**U+JU$QmB?0IJptC7h<;N2bbDL7a&%;9=nY-dCo#Dk1|k(*M`h$RjY+_RoE(s1 zS6$W75e%?axz>c#;*&@=9U$O`hc&t*3+a}8$&uhGd^%EbxRSM77y|Rp1++F+YFN{W zx@z7S;jm)nkEi8M7T0&N+k=cS0G)Cp@(e=2E3(7);*lcu(5vbQZOuivkc__*woJ1K znWgAI^6$>Ws6ox}IRUcL?33DCuhGT+@1DU8283IZnk%;;OiKICGoN-_b1l!v?QGkv zg59rj)k=D@oUggNNTkFt8A}!j*{VdIpPmc_HpeWX6RUHX{O#@Oq{+h28}|;kK3~Cv z`>4)WAAKi<{Tak$N4&nOhVg_IhdytLIzJJWWq5KjmMT~;2l^rz&?>1BoO|t<>K^=H zp5kv<`k|!4-K)>-m8It&2?j+HGP%^(cQ=UdY$|Q_iqvsQw_EY@TUO0NR=G!jXWguU zz6LoTe4&a8pYw~E98JbmT4+;rwBBUkAm4B@Mo~%$SPs88p|; zGMhQ-0Xj6P-az$X-Z)c6Elu+kfah?%>l4y;Si6Kj0r5ugi-6n&^09a_E-8)WlAJAw-PqS?)VKJ%YXr)2t3}TbXv%(4xmksR^b9f6QP+;QOutvX_~UA|cDfr)`FE(Vy7l`(r`=#OL2SV9_3hA> z>V<`$AyygYinBA*E&7@LK|Ks^fU78Sp2s+_0VkDU*2cDk8pCI1x zPSA0^1C?Y4hOr`V^Le!KhnLk+(&&YE1KG4;A)!lm9=7kCJqd}3z<83heZ|?usvGCK znVZv!t*UcZt^DqJMuM$;Zcz0`1GgU(Z_mxpzePDcq5qzHTV(8m&UDzX(MS#4s4nD< z)x-K~K)|bPccJ6lmy-UkQpeaPxdO)<#JLSmCKasd}0nT~>^Je@Pv^gc(oRI`HQBY5{IdI0-yHR8^!P^p zs2AzgrT10*VTBL6yhs9DEBVX^H`^mA6%mxMS9i0Qe*np2kHy`*bt;Qv4irjX%T#}b3 zWDxkdDn2gF9$!=Z3=L&~;Lray=FQ20?IrO_EiuK{!5hBgQwJT?!IdRm`Az+kew{}f zeamV(HEY5Ku?nve;(5h5u2>;*$|Al%kaGZ!0PS?0e4{7)LSdPHUq-0pMfI&I`dexM z4HcGzuV=1* z2NA0TpV;QCUaROi&N&xTti#x+F=K}Lm6TQ&f(3u@+P<0 ze$d%=a9ibS?DC$0ShDJzIjFeYelx zc>eD+RFw)0pzK~8U!3_IG`1UNpF|Ok?{&r~jGBq(r7pV_>q{2c?jE`w_!fOXs^2~n z>|>UpN&1OCJ#(z4Y_N#7O)xrsZ+kYQ<>SSz4lUn5{Qy^{XYr<9F z#OG9`&CkWoRQRInf!?g!L+fkfnc3@O-QVe%|7&;^^hpz<={hG2mk7g&Y*nB6X2c;Y z9dBKMXJP7#8>ToQcHo(T9Vn5n6JBFS)Aa+krP4+@IzNgXT^4h8UtiAG{O{1zdsY#^}Pkh7HdKd|eSxNsJH(n4R_Y%{@qpoh%Ybr_sl))FS@4 zxn=go-{0SlS}%@B!mHQ))p81`XYJy5M8%(zn-95ya^Zyrztn7Afzi5ynNOk5kL-5< z{h92=MFgC+&9vZTfY0ulut>`T(b~9%1$ThMBZjsTq8LG!T;Q7qmkq=MRD2r;iYcav`FFA&9p>#5 zNP6d~y0snURUGJZMfDNdVN*>xayAnC@mtf zie+-csevPa-eC=iKrFLy>uK=e{?NNm76l6*Wo95PkBi9wKST#)Z*LEa#3E(lQGlZH ze>7eF09LK5-PL`?RHB$2bw3W!g)wvZQu?hMl4VdCXI zLPn`Y0x%-VkoaCqRHPKU+kJk1CeIqJQZSbyvMQZH8IyY3S=M9F!*0*R;difIKxOsc z#XnMetE#}xpP_>Hjif3T3e|77L8`5t_s_*)W|MhL@TJEMyJyXl$GdE-kE-eS&Kjx^ z8XyrAf_=QstMAhaCLU?&tHQcKX4G-MR=P4It)EGzVoUx~f?lUe3Xi-^N*&`tQ)9{B zYe%ZgXP7O%^VmTm>`dyn=`qusoM%&jpX2ML#3h zLDCcv07y%$ulTViP@IV>jt7_Kb?uugd{F-9rAZ%qmbo8c>1p*V0X~|%1DmQg)gGacHq8-c@!)Jm-%Y0aI zZqiYAI7SIJt>_MPM!fxyMf=hhewE4JpBV`iVSmZ-5-yE)-aTt*OhA-Jbl%bd zBYL%p^85P-7xWY_Ss{y3aYFL(2Anma4 zirs1^VP(Z&-&tHyg!!7gR{4DI^6!89hvU-)ZMn}^=w7@p<`?&(5 zw#r+ZypxlOR^rYnt-N`Cv>%jkKzo%e7k1SIBHuCJazqWkl<(b$1S~o?4PyL9^o@pw zD4`n-a%NwPV)a%^ODZ(!IePm1z3Mq!r~m#r)W79XJxLU{+EsoRo_68Ze*X7pjZyrv z>vvMd)0PL`{<)6|Z%=G_3I5=d;#x`p-8=H1TBc%tSZpgP}9otGlcTb{nL^K{-c$?agHf?QYoLzY9}I8ORr=H z?@T_q5!z+hVWv0azo050Nd0>2Ssngh*RqJhsP&)1U9+6l$%|r76lRl$uyn5rNP$U zQYaF-qC9`p*w=Tat$_>8M4DAK>}v2PCi0$>+y61q7LokJxq3&B@4xIvkWcN)k`Mf> z2Zv@oDM#C41_vUC^I~Co2kX<>SC@bDmAp0vd>szoT{VU7EjyAL>jQ7-3(R;%wzbVR zY*Wt8tpAG))y0FUWn)`A#YLHMh19YlewouQGwal1_|&E&$Nn={v|Jys@(B z=tVOrQ;sMAph`;wyf*Mlbk#gDgs_|!vW7~)xWwtO^kCWF_Q~UK8NH2kz?qUgzrqH) zE&+(GQ@Kpi!aziNDy$Qbjt_eu_$hg1Fc}b-Qh7HPk`?K86I`La+ZyWDsE3yWiRxUv zIW{W`_In%j%eyxv^XWBQWt?kAg`J>Ax46!;V-KpWUvfGv3P8(2l?#?Un8+abFQI;< z2F9RaFf_Xk7-WKaJi_zSsg1qIt&&*ym;Ty+V-i}|#@D5!vhO_Y{d-Q~eZHXYVo*$$ zzS?E6#ye!JbGHBop}Y4?k;a~RDA`OGT?e7dN#%%4$Kzw>?3v5O6Kc`Ie<61iZ)6h` zXK#Uo`|i=VDYI_|hEn#2q@?d$Ts*v(*)E(?Cvo4D$V#SCC;`&+C~g!)Rv~Q{{jA^;;lNC(TKRkSCqn_@H*~5{6*frd1m~_5Itnd^ibx3s#If>wa>P1 z&++!9rsc zC8EU~&(Z5SaS215xGUVxKm&n681Qv{_FRr3i4uym!j|yFiqJn}XJ1qM^Ku9b3=*lu z4fp0erfu~sy)Uy@)Nk!&uhLOBD_gD8_ktG{M|lzebV6l>H5d zQP)7;%-DQ^r)ZCeE&`bfkwHD>aUZ;#%uU@1s7x(~U<&G=DL5O&=?J`CQqKM%|8f|%>Hco|2a{pU&j&8dQ3U$@k}pa=QQx@#VP5S7C2i*H2s`gJBzFZpec?+H;9}4tXfK#c#lx9uXrTk@*TUb% z3e)`_=w=iG+*Jo=PJP8HN+*MX_E*a20!+=iyW%4iLUabD2Wm7SH)K0=*xQKuk@12) zX$W!Wd+*oCl(x46#=?++0d%TL-XGgFURRJEkj~atbbs^24W9?hkS>99JHO%f+>pyP z%@qj0evsww7Z79ahuG-|`!cWaU-N||bIu^Q?`ynqSB#mhiyE%5c5KV!1)Z^hxQxh1 z=@)@*cz_Il@npBhc;9k`@=J5`Nm-mixx0A8wdbFvVZxjMo1|}`>`-((^+9-4^})td z%yiZ6YwkK8E(thVz0LY^Wb4QJbtphppsEU%@Rfjof&)>Y=_f9{F>Ld zJ#RG}%?8<-ZubC~$3=!k?vy$W44FT|$?&8A)o@)ai+cSAx;WEMC>JM9c1)@;LQ5Vv zFgsVs@4Kak6Hm7mG|Ebc(K6ibB8CatbPB=BvHRTEn0_J-(iO3F-SE<+_IOkXjSuB< zKn0C4oG&quZ*9dr&UYGu4SnP(Ks2=Ywthz*w%K}RX0@%Qt*s?0%zkOTuk*o9_GM8I z!?f!7(6q-Bk_sGeXk(hRQ6>r&{Hhnb7pMD&ai4tzsg}E}GR&Q%ZU6Mz6c%F3b9NeVA4$>e!OLh?}H-V|tNG71zBOG_S&P>#w zvB)b!&P&k1+O^^{Zk!3!JtvR}o>zt-Jn&p-#5I)|NUNdzL!&SF-(>HfZaO#FmXrPs zIU1%d+c1WIX!cgNdXrGJLLA+8UsHUzKBS(}D_WBx5%=-bs{g9`Hjqa7=XdKtNXdWS z5SE<`|7uTmnT)chxC z>x^*p)6{0}#qRmSCBvlQlnFdNQ*3O(YsO%=dJ}}g)nu_i<%q8zh?Z}NzD@J*8w*ms zBF`v&XK8}EKf{`!rW+fE+SATWjCYm+jj1s~`@$K$+%_b?f%@v`i$fo&0@djA%|MH& zXqDngW^A|dvkBNT)f%}505H7I4);PecQsLite9O4)a~N9cfK0_NY+83`{BXx z7javy*V$KxJS(hkesj(j5h-Ch_@Lq_%aVEU{2J))aIrV*hg<`N z$F66?QWG);*^oBG^J%{OI?=FP;fNe8PQ3Q$Ea#i1{SsDS1CZLq6ASG!>~hR^#kn1Z zBn4++nqPDc1q_?%v9spK>$`}41QUsvLvNpj`{bK$`GZWLLV9$vFP>^$r0bR)#_4K* zCKRzAOf_H86#Tk%&TaF;lsKNH7sSYZO_7>!W0;)r94t&wvnIrWLPkLS2co<;yIS$;rIrZUv%Hy4Xa^y}R^w4vLfoyQSD7;MBDO z7v42IfD;E(YqciT)6icoD^pG_CTO_yKmV)*#t$|4Xm?9Jj6T_4&2}H5qH-G8N=!sc z43>&$W`Cpp#ZRtQ0@b?IasFfae9MaO*!&-8npjoklT*H~0cDrEJ>bXt8; zN72TV_k=Pc?!Wg*>uYPwlsdr3phIw-R%|z22Q{ocz04DIb~;$aOIcT94=@WgOu5rx z-`}o5uv}63zEI9oib7GNeWHQ%jBg()!jOSH)|Hr4(0uryti>0^lk|#44KBk`AomL0 zHPP!^i@G~$zfsoeX$S|TLL6n0JrGgJOvIaG^aNB_Q&XX<%iUZ_K6qVam#zDH2kLS? z)~C1tqo{DxZ_TE4CGWso-b)4gN&bEi{Rg`N2E2!l(l3!YF%x)s=HxBuvb+g%P3fyo zo7~xO#dmM|mZPMT9=I?8Y1AF;?X;1>WE<+0D4qWAv9Evb`VW@W*5l^L3nO>LPq?1; zd$N7~T^SwPK`-0Cxr4IJi6;I*OaM%sL}ZFL%U2|W+L*CABN*=Q%WIKz1SWYEsJFi9G{3@wfw?yJ)?ovB{vxledO_pIru2qb&ymn#wUJ4ei4bC#%#@_FXBTmfy{j zJ_|>!to216ZYh~!AK^^GV3-J(7WYgi)j12t=%4_y%q;OKkaP=3Iyd*1q*LyAe5~TC zJ0pIcHCX#GQd1U8gTkTi1_;4M#Md+RK`JiSHh*T}MQ9n|v^R577zPa#Z@e$qO}4!_ zIzB$7D8eWB7}6CHTk{;yTgvk)D)R*p%$-JYv$?b1-$};JT}*e5$rPo5qmc0$gHlx1 zhj)QksM)@mHoe@tOqp^!Vco`_m-=dNOw00i$1!a0V>Rk;Z8(Jzq_W-bCA8k7voAXQ zy)l7j*8mb5$q6Zb;#wxv)ZiJ%>SfBQVtd532bxI&7##qR_R8PdJ!c`U+%Z#8E_l?k}MKk z9WiEoTg}qJ4C0yzHe?ph&L%%)of8nW8Lk(I{9?NkeYzeU6?#IAAZ}M#5ELY*sZ5!g z;*R8|4*N0L(lo#5rw+esPoi8UvO<|kCc~Dc8TfTIiP+1xfB%-PHd?@;) zeDwYC6#4Z0cYm5A)c~oIX9?H?D{&abSOhaIln>>NYif28;NR&>-Nnr`nfdd+p|T6H zYzm|39(dzMcqCTFlj4>foA9|&gZy!4NBGX0yEHMNG4ZZytIKwa5Oztk4CzZf8x)j? zE2DOorrIL+5^N2K`|&!@yqhon#A(VD#Q~1J1%^25=zb`s9sl%z!NgA#3DOP>^qT3M zvjfv!W(+#$l*kjKwJt6T`y|KQ{d`Xo5yNf}17lwQJTV+rjeggNexdHlnBRV-aoHlO_;-JF=?wakb+n#K{ej@bulSJls zle8<C^3SQPj0EL4pf8vCon zwo8%jRGX6l#xwj&u!;rC#`0#onH>V~v#imy6L8Ve6tJb5cl~u`a7QlDXQbVB%K4d% z%e{WTic3@FFdo0Se(uh6;uz6{oyMPF?2D?!t3M>uC(qlin{U*$QO+rmO_Hh?o7bO7 zfdw2ow_mnpZ3aC$^0}Z$^^Kp22Kn|f`AXpu6A|lMsY?Oe-+1ahJgiulp)lGTM51^O zJ4j5;$+G64UP7O|O(MM_po?!q7A+NoT;y--=xFkB;sKR3Fl;)zu5O;Dq@u_XMfb@` zX3j?qH3JW8s}h|xaoD%W^PZ6578zandK63t>0M@X>27Du>N)vz`NSf7_8{Qx()=}O zZ`rc57LK&4?s6$@MoT#tI9Y^E$ztXMVEe_D-0H-#===uEMl4Ow&nai?qXo(0fzGBcFAx_r0zS^4BL0Kusms#bqY=?4*&(HyX zxm_0NHhlHaYVl&s>T`az1>@&%gbU$6StzIz2D||!b`3K_xE1}`sl-+R0bqY#&2+Z? zjSgXlEAdQCr`)eiAen6AE|pP|#Yr$QEkg{79i(IiR&aK>&1c{wM&BRGGUtdw9Oz(8f`GRJ>yo zKE%UqQucFAgM=|8H(z|8dM~AVv465IKH7e9TDB%}+k(A3vu+2D%pR&gx}kq^dPttW z^UyDYJJOO%S$5V#8H&YauZI1aX$e9CD$ej7p;l#0ISKcw0=JiQ$_LgNVsRg|fAz`D z{)faUyk_^2mr1~@T@8wZn&|;_h!V1;LqmsAdv)*Wd2qQuP>r_q_6|nnUN^HR&Xxqu z9@|NTJTVqVsYB@;0Baasm*PdcLc2muo{pSh=&O6K5|@$i|5!QGO$kpl#Ng__dhSO4 z;vEoZ=I#9iCW3oRw)m*Ha8vBok6tx@n!Y?dEd$sXpA*9eBHAQf?$QzOnAHLZGh$cr z7`l~Fu{tAW>WS5-wrq=wR42y_dg4lDpkQ~eed|v*aHY`WnyXIvfX9L?)iT2CM8)`9 z;56^E$2r!uIabv-K;SonTXax)hNttTg>v)(6S#%_#k8 z#aj>82uQ=2$@qZ34R`MfZ0&qa)8Ugf0@k3yaZMRyx9Xa}#`%so8WHYGkx(UnV~cbe zAtsoxv~&g_v%tN196+74U~xfOG0P9822E~Br`RJHx>N5xw6!%X;_r3gQXp=VRpytrSS{k}ok zIyyPZvIbrgo8w-R75shQt17OjXA-l*cV~dW@*aCV9E{rzU}EZ`Xs__KWvA7Hum-02 z6Z+^|(1`v>-Glw1@@dAtzY3%sH#SN$q|-k|7QUsNguJ~S!kB`V=t$`3B;e8Lp^tL95FMLK{7AEWk(i`O z)d*j_E|`J%IvqLCU^i-z$^OOht&9jlsr0G=xTC-2f#mV@EzuAaDqTjfct=~+ zstT5mOQxs&Oc2^8Pc@8KKyabWwwFb(I8Hd~6JVVNcZ_hwxr(6G2T)7Xd=J}3Hq?Q? z<5O%7&j((D+adsaH=eFvh#9X1??)Icm0x7Xf?fmB(Ae|@5r(VWk+`$3SS^?q;O9rX zC;c7CM|Bk*KXno51n#d!IY;g&BUF-Hii`t}VOs;n%5C2zW&N;qGO2r6cQ<3~`P{ zjASGEwV0~P(P8l+nw!JkMw!OtlB!C)PW zr`PVKH!Gi3%#Xan!^6+&+Gu~8dQVgDh>wk1;4&-6Pbw2$3#8*+)v!60`RI*}m5DRz z%5?{2_#Dol9Vn7sTo1UDRVFQDJa-&ubG!_M1wGbv&R@?m+ot@U{kVCtvN;!q)Xn^5 z&{R=rGpme{xnAYc;Hp5g6L)cu20iPdcZhv0q&PP@7dVjO8!R*a^ub>{lL+Px8rG-2 zmrX7yuHWL2)Z~E(LB%X`WU>=r6G~`xy-=(D%&FO{fCp0YPu@)q_tB5@^I>Mypf-0& z*!X#ERjs$L*vca0)Iksc$d@kgs={~~W_HXzM;ZX3(@dxTEI|v+AB~53la-{NDw^{{ z)1S`P9Blj9t52^@iPSmGq}u`8I3IRY4P&eku#e(Y{1Mw#U4Kg);dH~b*d-}Z<9I36 zX#FxF4r@FWNNpKU{a(v|q5o9;Y^q>%+tzwjqx^-QrB8OCEIpKro}FcZ%>`_HU2465 z^T0PCn3kVT*r67T!t=gS+z;hJDse>oPH{ce5RH%i2Ng8Aw0?bs(j?@-&H4CeN>5Bl z_wn}9PLCRco_GxkI~gaYjk|tS{YFWM4jVwPT4ccq*eSEmq%K9t+X$cIcIEtqMCFXD zFBxRaM`wP8H1z$Je4gce+XxtH!L!9al0vlSUMuS0xvi7K!GQ*FGzT9fY8Hp;^&lQV z(8fq1A?ZTx0DB!E>Oq>f4MT#FpuOg;TyY4Ox*jgYO$F6bA|nFi{B(wH4w{(vYietY zI597qI4*7wSV>tC@Nii~OeA`z1q&WF?)r#7G&Op%ga*YxZ~zvMDpI&tz0ijOeZ~N2 zAwh&Gb3xFDGaj)P+B9qXykR{u9;j*Oq8`(Q??sBZ;(?ugQMIYnbyer<(I3 z+&rrr1%R2f{(dO{LviPqO2eg`@^FLOpI3VNX*l!P%^Ws~2-jYD-9f5=;LgS0s}9J& zrCwImn*bRs7%_qb02i0v3d9I0Y8c55>&8Ize01^_c8ebHV~oRf$#X(%1{8YO)xg5V+L!} z&;&ola=Ki_g77`M#k9o$##r1RB@7=L_d^+ynDm7d`c0$4+FRoHu91z6}kPpWKEX{yzE_erkxGnIaac z-k=-xv;3C#6lqk$`Fdb|UL(nBEI7h@v~S_!$Ul@oI8f?43>)gP`@V&B4Rjq)>TO(D zm^tTm^gQG82J}ea`}uYlS9&s}tK9Vv;@R#`@O`AS`TSob42R2>{dU5j~b5<}IjyLvaBB%0XyC*;ZL2oLz z-!6wd^48KRlv~e#TWD(Pav3$PU@~Fv;UX-YZH_dwaMDyF5V00jbUm2&6@NEo!{66S z7o*_p`l1;~nPp8|u&3VY>S=d#jez5VVE-?b#Ajje%dY8~8uiD~b*HPt)v0&oD2+G5 zLH1eoJ+HLLXtJY5PgX+Gg13I=fast&i@e}XM2^wkd&09lHL&=qLaDgD9dpm3W(NST zrV}f+r>$-u0*|ZUu!rM>^IZHP3eiiy5)Q&5y{+Fz#<&9W_RcXQ5ByS~20YG2X3AFO z_Cl@N_pf(}zZh~Bd9&VNA}TXh^RLe|6LS7$RX8)o zID|a~t>(Un>fqy2H|)E;a<1$7sX>2gel?t9VaS0R&wbIL4brn}iX?3pwEgQ&dw5~r zuG-kwn>QtXlaJWLkm)-JhK;Bv2&u=3=_aK*b*0F49oDr*Uj#0s##cZy>`M&XFF6GB zim#uTe#?54LZ@eY7j$(bRSpSA9=Q?^_)H{^Eu6YJXUYy2r|sxHTB|-fkC+YLt-num z8Ae~NlsdJBRL}Fz{U+-M@%w3x6EbE%qQPKpnc)MBP4>yP@fPHO1TDx>XLpP@g%x~N zltH*-F1)cgP#($vrJa5o&Mfb*l}`)on(NRgfty}a97-9PG0NL;TqvlPBiCGUJzx@C zyvskFcR&aq@n)sF7Q0BSnX}?`gLfzq`FS95vo^yMyw{hpvbVliraaeoW93?cvAP+J z7{qXM0gZ5-0jHcivFvEtv5HpKp6B=6AR~)npC6QwrKfwjE`1GO=tDuEoWVMAN{P>~ zy#OsXcn-ue4kA{JgxsflEjq7#0Ia4<{4B`a!|D9

    tUnUCo;JTkRe!JsGeVVqX!2-67-;cLP1o->BjqRyg+mPw6 zWXpLQJpOtWp{s+2aI`ZKw7Hxpd@Yb?%twcc>CYNYwB`n%lYXfz{CiJUQh&}bo=i^; zYb^GYxqZvOFHH@J>B%ubI^3s0(TDRKjASUB()+@uQmb5tp!1^@n{(i(_4)N@a?*i; zaKVKU_FxtooW2c*0$KTrwLCd`vc+Vg%P$*zh0rBfUl)^f(5z^|Kv!8#gKoGdn=$@4 zoTP&_ru@_d#|%6EN>I>B{EiOZlg%cqCvdj!f(&Sgl!``Et2&z25`a-bI1=ZVPM#M7 zT)cyKHwo$VfUGDFvS!0Vw*{17Hw_<7(hJMJu3TU%4Qw@YUPyi&y*scLx;R)~7~h&1 zIixDOaZ*@czLE&uhvpXJgI>aCxdu}{X6{*X0nfq#f$qScm3q%L;IrEq>)zJyCQ+oK z;_bn>*F>K?t%9AzoW?IooE{+P$VJf?X6R(sVBzVV1U`(1HKN&}-h+%zHZi@2O>f!- zpYI`Au$nz!^V0@K{_B~BVT3^<7k%=9I{s5{1+SW*Kv>ViiJ$()7llt~fX_fLj7{n5 zzl{mpBY);cY7!&IIA8TKpC}UTkL?*o*V=gw8}@&HjSYo%8o2I}M+XuUjR4Qn*|{Hl ztxg&ZX5HNy!RMGobN5!>g{C>uQ7Wv*i_%vZ0b<(CJwd7X!nH`pECI&?S0i}kX0?g^ zKmuBWT8dE5NQ7yRilWfcPN3s)K|4s6-&Y0Uh1683+dPAuXeYe~)l|SV4d-%$h$(&U1qMu|GZZfcNqVBC7N2@#6xg!vDzp!_^AEH1Gpvyep96*G0p}Vb?!+61L`n{-V6kG@B)B zaFPpoGV?hFQHJuj+Ii%SuUxnmyCV=aNP;>YWB%~GUb}z-WX_wwZOgB`sRBuUdV-@8 z6wm}^(65=e^8-qG_dIjqY5GI*QQQH)epBUUIeYWazL8Ar2@5VwX}Wr>-JTTJ&z#}h zV^(_WXE{A>H?1Vy@#l42_U+mC@Kh3r&$vCR_@f%mV7oRc(L{;4)b!uOFYX#wFhaQ+ch;DUKvVGiSkfb3p-heoUBXobkmKi@7++HVcI zSO&U}dTk0RO$He6UHM<(F$L`fcKPJzuL#Gpb^~Wn!kSYOw{b>3iRnqH3K3A9@S+{o zP9(h81Z|$-l`i`wGKN!#mx`*Ahna{5_O~+g4-S|Zi!tleg^-)yW6taR@@Zv}4tf#I zfQ;xjzu)SyPOe*v!;hnM_af<(t-uY^<$>@P?%;n{lvn1qb*Bdd^Wy7 zPKvrePdnb_-$DN-}GPrl2xo9YP2K#VndZ!!khYz zLjhFY8+&it^|MzK?G@&Rhw`%*F|t}rfJuSn)$^EBHmyJs<1T0I!F(sA4RX5K8ed{( z7VY#FO?l%Bg7plNPK`C@bG7yLEFPTy-}Ld3Zv1FF~?@ zs?i-0G?&|9!P|ZB@rDu%i?Z(1Chj{5Z=x!dDM<|b0K?n=mJ`9s3=~-+XymWk40|Th zrnn~HVQ-U;T1)=ugIV{}-zt&pw?#l0GvevwYTI0RarJWPdg@Jo6C&@AbF@pnljM>Q z&6tD1KPxO&j=D-eQss?8ZySnV|9jh<1?08(el)YIr3K;8R3t+RC15?<*Ag^cHW`Dm zEZQQ44}v2i+YAD!KPm1GPT&EBQaxgsJp~gQ;IRK}+bL8TF=cy9=IVGKOOt^jQuLbA zNmpg8&#!zK@?SRbjzEEgB(_1g!D3n)H_pp$qC!5avh_-Ri2@4$N|+qPq#|8bhjPXT zUgiR@E9w}C2faAVG6tZY>pJC3?cu7l7H_-j4qnq*2UKMekq{sieav|vsueMBUPJ3n z72e+ft(zm9v+b$!MLDitVCFc5ZWZZ#1mt@8Y5^?iw(lsN4m?fE z(j7C3nf3!#_t5_BIHSM2zAtIyC(2kDFb{Bx8J&l{%icV9Qo0H*3OukS_kYr23yndn z9g8(?YhI1Ef0y9iNE8Xar6q~q{M4D5u8~1ezdB4>6;|0`;QPMd_tE$VsB_!2$Hc3B zU|1;dX%SBr+e%v`1EbEhh?kdkc=!*G(}4WX6w|C?=bEGDDzdjRXwgr?gcgd0Fpp?o z4QsFL6#O^paXpEn+>9C(-)%RTONTxBuD?bcuJsO_IgQbLVwXiqycHyK2;hJktw1&ia^v^cx`AM7q77J4a zGVOtPLtIS@>L+=ZAkU-J4Qp=?^ktp1A#bbZzpr_o1HRv{+`~F!-G4M44j*lrcpl2Lk%Ixa zq}zxa={PleL{&D9HRSsy1jLI^LI8n*(N3Br5HL)E)iA^fjlxHYkl9EH6Rn#Mjp?>a z)qOf#oAeWDKf@Tu?qA>bti}xCoke$@5n<4H&+5$P7%ZDH1D~CZFWY5_n7DN+!?4(pyz+P?CDp4;v1V1F6bGYU*X57+caqf zY%mulQqqdn?jDXV3T4OYeWsR8BIrK-Qt2sevewWn#x8DPkHim#2b&IRo`zv(nQSzVlrF}|Y{sWY6s>H5(Ht!O zU9wCpI4CsOC6f6H(*w6{H9}E|hN#-ya)%iSP^&3hprDzmRkHffeaX-1LB0dS?3ljM z&nc`TMgS>EJZuW`ZV-3&X32J~+ zm|U*=`=QqHAEH^E@ZI2?RK+%MZxdd|g{g6wOeeQf@7IX^IAeaommA@mGqdWuc7m34 z%~pUI<1WAENTi?qUlUoZVlXxYu?BP}Vi(vIv#bcZb&l5nRU5;iAx~Z7Wh#8&dsq`9 zRQR#RRK%CdLTUmF3ww4lMW36|9=!lI#1MP4UfL-q>8}X!eS5sVxDLC+ai_yGWS-3P zzI3>l;b|FnW?wPPNqRX+P_Iq+kCUeTpGqV86QH;=1yyTUkctef^hH0#Zsj&XH#TM@ zqIDZw)Ax9)6Di_>2W?40;Tv*axi|!|%^YQNiSBY{PUxDk&5%A37bNKTtZn!hHkWqd zj~C`a?tIjBMr|tc}&l=r;t80+D!;M(}!iE>aU^j~MeJ0>8yHWuWz4KY-NEC{4F==`AT1l5Tv-d*l7q zn{%DaXSUv0!Ernxmvb^n;wd^0-%PsgsSfA{E)aWNjb8CfqQLK&+q+`y2l@?l8xI22 zdK#QANcX}p+O~t`M2HhgpO0!7>KG(^T>v71Z1mWUV)*Gd(0Zy-*#rsLlR*v_CeD2i z=AGYI1Frd=$Q5tE=)9cXSDN}qGcDW{bK!l?{+ciMC^HTY7z~mm z<>*7njZ@DNp3w5Aizdj|B)%W}_Pwq5369QK1L+e|aL5z;#pqa*4-`n$tIxC)(6r0Ff*kvtW^073LX~yUYjia!AF!vC$ zNio@9etHq)qr^$~2Q00vjwf4{mI<@TXgg%RR@U>5LAH{zNu51JAZDwIb#mY@+s^{D zPq}T0!sn9w&wQLbIMH25`1Tw<>eT#Nb)a`MigSbgut1+H;>Eus zqn-_agww*X0gqHk^&cMTU-xM?rql(bkL6!sJB+CWU?EyQ^Kq>a1{$Xj+gnGJbZIpD zrXxVW&T84;Me&S2NVu6CUc-rmPDiJH28l#eRw`!=hVGLMNqZsxq4B{I?l>~HT77j^ z>*mm2#F0M(^+afvA^Rx7dbyuzRT^`@hc_V4{<#?ROyMj~(8DkXQVO=?*`g$H z>>kXXDDK2lhaePk7B7ve+rDWx*&5O!{8`;P^6q?a7AXDok@DG|{J!k(zT6JoBA&&L zUFwO%)5Ejwo}RfHoH~2=sSXFD+J@K1KQvTgk(y=>-?6gSl}SxGC%759Af7BQ*k?1_ zQRZxZx3dglG&JSfT%p>FyAw>tu%NwC4Bjyn_EN4L^=W>uX!WLK#iP zq|52JEnx)xIseRb?Ipg@Z~j7Rx%7(~yN zvA$@>k-e)Lo6Nh-H&h4XsKeha*J-tlMk25cuJW-E^BFYi%;_*m5=77psKaSLnz90o zTZBH=>7g6U$tDO2r^*K8G>R2o5MYK;FCg`kBE;Famh=9}LJX5_LekYvu`tDXir$c5 zX!QAQ692WLX+=Wqc9>;|E2}N!_4YBF`d}UOrH@7uh3%)No%`ULz>pE{`)>dq#A7;xq{9&QfXlg~ZD>&N_gdEUw z;;2ep#x0CECT$Qj*JoUl?egrZo1>PQ1H1AR24%NZoSN}dCQ#~5Zf(0_P_B%h{Z-As zz|}Be%tWa`i)}5S=ESb=i1TH)kFn-hCVX**ctiUEBOc8pM8fa|fxL;(%a?K_DZ7l? z9b)QHKpgbM`?1m3MPmD1mti@2|6;9UbunZc>ZZS zZu}F4tlscbM0|#F6jcY!=7D^C!5=}av5PWpz-2*@0JBuU|7V-?o?IR`l=R}=yk9xP zbGU$J2w$7RZ(-w@m9cIizLo2uSzJ?UIZH!LIZ^rK7 zz+rte`;|tQ<}q1K^PhoF6->~NCdp^611pgj<(2`ljZt3|iQCEeilujDN}6?vBB>Qi zb)u@ihJa~kHx+$f%_OjlmW~rHdKoJ;Sb4)gbl1=lIqaD`{$gB+ycIbs!6km$UYK{I zyO7Irt*iSxEOC7L?(jM3pe;C3EUIUzkl9{9s5hMxBPi6u+(!wxsqT}O)!JSoxj619 z8g&^KYSbetUi+$_e*8I&Ek+8K`aqaAaM|Y7?2%~bH$rcZ>^potu0X8|Wt-7HeE}sp z%~x0AHVkxCXo%V}Bf{iz_RCiS7~|h8eD~Ybx zR3T0>h*5~XSqfy#vuoZ78ij(VOENq59F~hAuF>kD2EVn6Y|qdUzSniKETIJ&)Q$J8 z%5OZJXlFwCM8z-25DYp*Yc_xGxwCbJRIK8c!3PM*+#tTM5sZG2edsw`H_whkGZfR1=-t0oycfC2!^)hMgmePSn?^@Pdal41 zZ!D(YHahTCl`oua7}i^0XlacTs0QG9J|mazRj5tA)}XuNqny9Cz&SYllQrx?yG$#8 zM2aE`Z3*E}HGIZ2uS6B3&4=&;p}5OUCqQ4`i`$oRuOXBlkHK`89b=PL4KU&QKAE#;$EVYRuqrM;Trw)d(4Zb0VVsUlc zQ?q^Kmx)jLY@vh}^$ZGG^r4q&Kn97f1NS>N@SW?qa^7vL;+|uJjDY2}J{v*p!wl{p zT;JDrol<1gy`+LY-p|*R4S-h0LIi7N)d&R^ci&ODFLF%)t}1@l=$1|SqGZr`)|=jS{Y&6i z&j&@ODtXtkL%8B-=O>;;FL7a}nY`?H!d|HcUK-IbWa#+qWM{$Uh5zCmfdgM8`yoF) zJw0K(DAnsXku~Ue?#;qgt&Df9EKS88f!q2f&9HJLR1IoG)x>Z!33V-lCrPhg6B0I~ z6IBLv35SR0QU&3PAxR^Brul-2(VUr(aI=5NH?1-`sjI?KWM0^j*_gHceRsF=2!88Z zwp{5|)FOLjQRStdbdmoO*7|E~ot*rSt1r!9-g6@t#BK3oR=He`lO7wgbwQg*g6Lk} zPm1Khs8eHS`Xz|bo@wm(0+)Fj>BODmuSQ2-RF-QzV~X)6ieQ!elu)p)rMWN~6le}* zUJc%48}^b^92Y{38^<=Vbuhi>H5^d90{`77D<6&?7=Id_`RlVEV1 zZxjia;nj}k;bIxDXNMhJFCVp*A@~nIGrBB*hNS6A-L}+q7gF{4R zN~}uWifd>kLTs5_VLWg7IM^+3u&p%G_B_k4+=j@E^Al;i0@Ebq4<`pUvf{;3cRXjC z+7HK=p>xfODnfF&{A0}~ zxfhjo!!eb|vF5tn9E4)Z<{V}nCEDF& zgHKJKXl*j=6O;}vgDtOUIn_3PLb%VDb|4}RW9N%u}nD-9ouBo_B``TkXLdp1aPM?VKiBqln`y_p9u6?XN>ug5_y+0u|P zL)pFiK2m0< z+~cY>fcw-h9;IOL74379Wv>Koy|fc>trRjjz(=do19$6?l|rp;FK)S;DvnHv(y54J zD9)|8e??Wdqh1i%R6Y@M={tZ0&viw{lIoOj3wgG%;E=lJ`9zYGT{Rk459vqItC#$% z7J!#Aa=XwNCUARzcfqFXU=dA4s?w_JPxoU=2h8tF4ag!-+LJshkn|KBQL~N4%Ju zxq10As#C8H)mcq%=AuAE#pEmv*p#M({Ju4L1i|w}a_^~>tFg#sIM0)-bz6ZG8NECv zEb*l}KRxcVb(`l!xE+M@4`N+)YHTjD4`)pjB%#k-ZX<};4m6xacCf1OFf4&7bug zl@GlF$HktnmBDz@_|>=oS;4K_y+<2(nyF#c@`^oL&$Z`0otX{|3KBkYJ9@pch1Wq0 zO-CCer6mMCrD<%^rj|SpyS)(iyqLis?A5p|MM`eZ64!42{A@Bplt9A`N6iA?2RUG;9ket~E-1f*=aZ*NcatDFE9Dtm z`o_E6tPXmtOcBBE_d=6nN2a=c4J{f*?5KWM;@J+^X02FO=wJDJ>tO;gX6s9-R<|E3zpPzi4CfS-v|TECcJ-qip03tqOm%);7Qq%r3*y8g z`-4&!6>6A@Wqb?V=?pD|VR8dTKQVc0!T9XTo+2x;X$Lm zJ87)N4-TJDNAq8A?n#C(85mKkZ8=|u*wVF-h*W>Ahzs21u3KB3<2>;1k0_OjI}h>U zJQ?OYwW%bc#{s`Jp1+tPbVY`5T9CD4jt^DYe&{**Osu((Z>}Jr$X44GFm5ce3A^>w zOTt}zH9ELHZqwQnHnKUdo6co|Mf3YIwKvJOlvK`UectB$rjE4F1IFuxtv7lf(Ocy1 z0zH2>8e4(w!%P%!I21~#W;rrh>OP3L@W!q@nfF(ji+HiIPoNSEeLS!cIMz2xFyaS;CouU}U`&3%^>)R1Sn!l;Hf z67u9ppQZt?{W15Qgc+sT9{HyNb5;1zmFwujS-=Sh%lqQmAbl&7_zC{VC9HB1PlC3I zyL{fXYR_p$d9ElX9$umST*WR4cj%S7!%?6t+%pn%lr%v2p zu|#bhRL25owBfxm3HEHl+Tzoz;mLk0w=>a6pvNs-52NPhzP%54PO&7x#EG@}MOPY@ zfb1{+)$P**$y4sHO|+q_OU>s~?N`FtGZot4%cXOny)WX=KCbue60Zt=lZev$Xxw*E zM&{;G%K@!11($9PX%t_zZU0u=m2^5R#ouM+#7YSNe3KEn3SwQei#xF2^54MtZ~ zBpnv>f?Z766&$SRrJv_Exyo{^t|p{OsBIxyCm~BZYSHwp#;llOjO!Ys7bWx$pxO#E zd5?I(BwIbYD~UG97tH$PrkU?w`(}#a5~k+sA8kwRBL0*nQaIe^F6%`$OlC`O6@K++ zwF)LSt*RGtl8W`sGk4h6&oj5CYWI{}muh2M++XqU=W>i+qY#5AJ?od47U9FXmRfLx zo#WKgnI46%*3ZIKVSmsxiqA(M3K#tDY^T5Txt9tYHAw+Y9b>HaE9j?D4D2;qTvBg; zN(q;kjdm!JIdpYgR+d7$yWhFa8A|PT`m1L>2%5o}SgD&E_`+Z^kTFT>C4dV?9F030 z%zph@OyS)16*fk;;5|(k?Wnw5@TSNT50){$nqw)uKal-RFA5Hg+B$qA9S$+#t z>3M~6>4@VeXOnL(w-wKd6k-e7Ht^Ywy7)P&uJ|D7 z7jkRfHAcGWG}l6Lx+jI__LKwwER3ZzG-0*&HLv$6cl2wOwycPRAMV9~_M}BS%OTFC zUN4qC)?W!lttad)Id>L%U0|gRm_qlsfDV=Z#dht`N{d<3WZnGBMSHMG&Gnsor-1fv z*V*>Hge*a^439O(@k=u>Sr(^hNT}h{H;8t*hI2Fw>Z~um=KxJCOR-RY8+aKl8`WPe zd-D*xggu*l>E1R&_;vw(FI2xE-4Y~qJty?kFpWV-{A5hDj>%f5&BFYCl%4V7JXcEb zTX6McRSJ2Pn_^QpZDVgj_G|eSw}%%{mhzP$f|7Inla@{vdD{_cPj#^aM=WajDldaX zul^tHLZlp7XZ>1x#J){paPrjg@}V!O4o_=Y_O&3@oD0;PBp<~@=s(@?`xXmwm-#nn zX}j8Er`D@UoqZrfnKP(FRuXOXy4W-1-uV50$pAy=wz)#gUrL@KrOoH2WNKV`xtSqH5%h#VIk0N&D!Ufb zZ9&NKb<1VKU}{p{$-a&s!mViKzwzz<+#K)D!Us8=u8zGVIcu$Xpc@1A&*QHtyB*^1 zH_DP^-rsj@4<@b07Z#Oe;RGf%{747gqI#BfMtg83?v<&AHK_Dp%B^=r^<)D(GPX)A>yig9$p z#IT0-K`^^)FBO-~UxLj}h$&VJ=^D4XXioZ{yQD8z z`2&eRAsQDrN^X4HV&qQIerjJyZ=Vd`Byl0kj1J9tHBsX4)D&Ot-%Jw9VdkTj;d_}p zl?XH@;UWsO_rF`5@?P-{NSJ4a;1>Qd7j`b?Kl=%p?nZmaOA~qEa~&1BVlpP$a{NDu zQ*CEi>gl5yCc_e2nS7uV66{_-Fq{)KwH0v6$KJ{}953!pJTTQf=P#JXe^wj&?P9oC zdxk|Rg9pR5<@*^RVnv=;4Jts=G!~xO1t}4 zC2&NJXy396CYVrOpF8zu+g5Y#ltol%IO`M(jMOn(*AEFe!R!eC+(-?`$xfc4Dxsc7 zrC&jl8^JI$HAu!;y#=W7-|ThL9X9=h@H>7?a0(}ul#?w{@cyjvQpw&E@W;_~9>v4j zxN;1V(&OCg6+Q36`tYK|hDC+;yWi8bW`qw%rb~wQmKlsCerkKf6(jH)tE=Dc$#@=w zQq3j<@jFNEOOE}8N;_|DRr5Z0A)&R~DEnGfHFkHyp*DH20u87|z~;OJ&y}2#xDfuj zrWat{!wNRXuW~XS`KRAZYcR&S^3L7S`HsMOZO1h<3pYSZER~W;+&gC{URYRz{i4U0 zr|y+actj=e=Fwm6o>Wx^-AMI|c1MlLXKm!8(}GCAvEHKI_$rEU8y3rN?>VH6F8v2x z=>duU=TpVMSUWd)1!xm%%H?^+O(3|b7A?@!Pp+b(&((lmL>wAL(4HSF;+NKP+@Y=J z-+45jo!ohEnseLB3fIVR8Z4q#CLO4k?;VRp-UoNWTRwI8H1nTyAt$t63%y2}gMePM z*N9b{-6h;(ewaCY>F7-^WHZcb6%&`CE?K9Cqv`DTJ4DLeAfuSRkb#uO%qfxTEZ$9# zS;eyWb905R&UCc$B9GoE)~7pTyOcMs!li;2;B)tJGQ6s=m{*yL1^!OB7#trjL0AdN zb;ZU0kF(_eqAAm{Z}y_Rw|`$xZ@TkhRpaReLfiG)aWSxSGXq9xhCm%^p8d>_6^ zhn139J?$JiIU;bTk5eWI3CTx+!wUk}WzX8FgL#eE>xF@?Ygwn~UqPLdk1r}Orol9u z5dvso!QNVRY)j)lDJM>z{AEZQ3BrcDRN=k6+;}6gLFWmOA@jZmwyaRuQ0`5MnrnW+ z?Ij()uxhl1qEjOFnh;jTPu*Gf6cJ#{5F7ZmZ4rEv!uRU?SAZ=m|HYQXq1pTJJO2`L zuN^!t>zP!wgSUEZ?-f1Hh+U7{MO1u!0XN7%33_??eqRj(n#>8o-Q}@c%I3Cb;X%E- zl`N5>@YU(ey(e|O-OW-n?vGXLyq&ghtITN2+!w$WC*)r}$TK8*MkxGMnm{h=j!8DH zjbxap(}_kXol3>QqCNA*WBZfE$Pc8;Obf}0#zpO?oxj*=!`9rK>P;a-ryE@3KS;d< zg;@O1HwygyG>l8XzV1Rjmmae|u0EXVyj<3*u9O4}oUADkJ{_5vP91EBd^&eYobQ1J zmvJA%-k3N}ERjyTSIm*FM?61t%JR{YjMSqD_2l)vt(%%nn8bqAl^etvhQows6$Ij} zN&nk#HXs>pNv4#8WL7g6Gkg-l#XcICPaL_V@~@E_Fl@}wkw~z!Ar9~A{vqnMfX^rD z$~$3j0fz8OHwlssKi6dJpHqM(1j~ASod99QC6tF3`)mFD+^GB4ISNX9G+&Kl`LrrN zL0cBw78$JOYi6g0Q_YJZI3mnO&%pG?ZQYiWaC!NRPq01q=SzNfzsK_#?v6X{6NUFF z`atjF`Vtg{DHT2*FF1;gz9b3F1U`Tm8}bZ)?9V1Grde9( zD0bZ<8n{hvHMDr6S-XYZ{^{d-f1IK(bS~ez*IaM5&~Wal+nESGHsqeoc9R%*Et4b0 z;nGKv;T;Jz5*>WVY>aVzwOsE7TBlfLP|=&GN|EpZ(b{_Dn(n1fKIei0P*z1wZFd%u z&oc0#%TTMjI3H21q-a*!%t&A1I&Kzs)2-Nhv_ryNR^Icp?N7_jV~ zVt*h_?iJ+{pmz2_3Sti-DkBNAD0Bs-o^T30tkkH+}7-^zH_!k-Ip4Av#YK zm0x7Pf+Yl+Gv%2CHz)0EdE4#jcX%}H*>Gb0#b^ca%{a@lUvIP?0BF$Z3E)!mA|B=% z65$e)=KyRFx5y;-Tn2lqbl#x9oz}Sx((@&u3GZ78%4cP;w~ukEW|= z5<;;1(9@yBSA_#fSd}!pSy=D2328}(|M%i@ZRZ)zk?3eUs_*GDDcO$j(f+j%ng8uV z^q|(hV$>@9ne+<9ZS408_NH9JH^KDpWnc-ABokPk;m%L<{{>=*ns|Js(gF$Dfk85J2H(VK zhh8+t2M0t;m-w@LhykZ#Xf<+_kyvLOlnBt3{||!{pkadpz@Z$F79<{ZjG)+8<`nWq zjT9PSd``S+q|U%QQOW!%5n@3~S?-ighhB4Fp)H680lGduY&g*VwfASl|Lxub@s=J~z5!sHE3eDryI*39Khm|A^{Vn8{2>f1=|Ax8|H>bK zAB(?_#oxyQa9{tH7XN=r3-JGp0LDO#T*h;gr&8EpR$*kzIthL^XD|1*((N^v)XP;{ z4{BMc#XAyE>ZhWbji?f zP}}7~Rw>2(pQoBNzN1U{jNdB6+{~wtb)#_9Loc=J zFE6VUUBC!#xn83Hs?D`CEx6B{5|+R&7nu|^7?Y@KPn@#^wAzn-{YmYm?0*=?3GuZo zGl7|H_ZChEKB`*~6$U*2LkH;d_%(Ey8Uud2GKbbjP0H|}XwrW|JGU=aGp53XU8G-n z2L}i5RY77W!1(%kbPC^<>T&c9h=k96=Ivz%FN2Rd?)>W4V+B;9Ae~OX_S@_JxbZu? zkANcr^tO7gDB@dqJE&n+tZmnv2UHQH|Dcs)n1Uo5kPZ~nl#ZmP0Zg~1ik?Fwz}zVK zPt1+vbVKy>qx^)9Y<+y;{{em(U&rsL9Rw}4dDe21+N(&Bdp7MI9>&Clef79r?cbfv z1glLJ`FsADja3wE6N7*vq=9=FM4zYUWum zS}xPU4U$U(ln&UT{UmuysQFGcwv?Z(v)ws> zz`n-21dQ;i8yQJ@El|SM$BkpaU@=VoPYf0(E-W^Nfe^yXGJS^1jMceW^b46rpn0|H zAVU#DN~ne|G`M(nRk{+^M{1wH)j<2Q=oeG-+x z567X2f)9BLeB}=zKNVc%m{q1dKHtl;YXIzK{{588buFU5%)PVK>$@0{*AAk`Y43{i zqHSYE!VrLFZ8x0_T2JHnd(^kwpG8gSdBa?N$PWM{gH6(C;cP96wX`>iur|z)gcs4) z80Q~W{243rUl!(pA@%=b=Rzd^X>AMrSNpK}-+S*-W_;vCL{mb@-lb-O+-N4mp^tLm zx5xVE6m(WUr!Sh=IKDrtV>TA&M01rWl>GIRFP}pV5okDZgG{<@{&4rwn*wYU@*lBL znITZQkq%lNM^>SaBP>DYwbP9kT&4jYZ^u;pSd_^xPv-RVoid?$O4wwP;s;+Lz7dd4 z!+{z!Lh)7EQS-JLm*W~r(g7Z?*M$Wa2&8ni5M#zb5*kA_lX}64B@pags{y!tBg?fv z)&a=_A^8s_Pf*SO?Ol6KeEBcl^;9$uukAu-Nr_6CKF~@BknI9#oTd-3tC)%PmGesC*4~h(lP;okx0heyYn^}w>kk?f||08)# z8XDTbWeqxakcpuX5r$41Cse-7^3+WaHDsC_c=*nVb>3f>WS+>JQt^Yf&bhYx6$$nH zX_cI*G5!L%(@PTS-BYE92}cY^CemTbns{=2C@Wd=SOb7A#C$#QDE6P9``jQ^1Fs(j z{v+RPEyr*U%j!@hG}Q2pjbh*>E#{W=k@iCMqg;soPvipNYyR!X{O#ZV?ce?ld;AT1 z0I=ZSu*cu9$KSBW->}F31(Ifr8ekQ^wVth18x_7QU$F0lO}II0zeZ=)ZQ*=AOy)NC z?y!DVFYtXYs0P<#E7g2zszjyM&Dk3mq1VP6xlv*7YyBFJEt(lcv#{n1hP^=Z)3}5> zAXxMmbc``&l+&BYrP6rB2@aDq^pv$5Y?K8K>r_0t*>V|yDW!kVfr4}Z1ie@O1cKX> zq5~vP6@`X|P8BQX#RI`o5DF?9nwq|T*52+e1~Yr8F?E1bDEsuv3Rt!vzCCI`kP;!Y z3W(ZL)6u@#)Bh{o1pY)?R#JMZ6UzW;2@3{sE^Tb&>DnQRe}~}OF_O+x3pYo=xK6kw z1o1_T75ig|BEhUK>@Rl410{E!MFFriMcZB^kl*6wn7>wui+jCJ@j!IS>3Oz++hd4D zj9&h!UN%Rkde#nA6w$U2{^Y%6hElda(9Kd{I*~zTFMs1mfl3s%PjefiODaN>7c_dS zZ`|Zc#RUc97xVbZ=lS0qwX*sz*}v*&Ab$t5W?G_dnFFAU!sXLg_bNCK)eCL^Dwn@v z0n|1ifi4UiLTpGE`T%-AIvq9yD*$Oh4u%;X&7V`%t%H^=>ZbC5MA<`)#hoG~`jiF( z%8E<1_u23wF4T}L8);4+h8ch|ifcjSg*3ftG68*zb0AlDA0W_I0Xg5u)HKwusDa2n z2f|Ye4k+tu_Px(NsLo^oNb|tN&**WWIfF|ChA%Wzouc8z_@UzfD(@Qb)PM)ditt=e z90<8!sD>rT%sztkSU*r}k-v96;{DNIq8*|QCP2doZ?Z?GIs<+d<3A-Gc13t<0!+lE z66`Suo=46XjR!W3GFO2}5{NdsK3vY5Qz&>DA>;qyhq8)(WEabYnIO# zuR}Nj-fN`y0qD4hRH7*V=MPbOyllfcnF~+b|L0HtdlD`Y(&P4gUj1(#@UKyUz%heV zCG4Cp{P!>VYusxP3IH%kqC+VEhtGK&mGM3x)R2CByFTJy&;D;d+W>tG*jn6B@&AjS z|2l;~0>Dxfa!0NE(V6u-#IUijN*Podl!5SiOk=D1Ou46cg>I{Yd^3*n2r}Qd?)61N4y9+r$mC4Ht`oOZ}-dcZo7U& z;HS@|m!By&e?0s|&e z2k>3ADZcIR1(Go&FUhtO3RA7kQPsN0H1vHlZkMlE%33)7c>i`yF4J}YO}hZ^7ZI*h z5WZ2>#qd#?D%NYcP~-Bx46*8jU6%6yg0BU8w{wOc>K@-RK|8Ks6~23w`kfDO{ZjKR zU%khqma?ihNwRx=q}#<0x8SHerv``B52c;{buGJv)>A+_AZY=VfkTa+48Asqygv)p z0Q%5>Z@D{4Z;RQevwmSqdVM|9$fERcKT4tWmqhi|`pavD#{ zhjT#>>@)SMoM&qNYPV>+49FV$;Je9GR6w@uy14Z#gHR|v+w+=~h)Ai{ETiO^z`+<1 zrp7W)gwI4u-BvC6<1VV+oX=<9)GV@?%@YDzbsKYXDo4Mkl`3OhB@`qj}7Z+MqTjyz<8oR3H z^9tkJqL9y+csuAwEl3}4|DeUa`3>YyoI>jziELYsJJ8l_FC#0jli}+kznFmhv}W4F z1*uj*S$=4FUfKvV$GG#I;B>YXh|L8fLlpgQ&kKMv@%CI@-|r@AqT>#pHNi0|NLrLC zaFuGYIK;{t+c?i8+;o@U48KkP(0TWp;!wpM+QJ%QWv|GAyp)4EF3^ES-?+QAaK zH+|ZX1v`kHc!%a&_xWsH#cU0^BFW3s0H_gO;18R{U$3zuTc|JFpQ&KY@P}VfRl1nw zqN3>%zk%J(c2>;WabG9mB+U5So<7w%ha-qs6mfZdEb z=h;HfgrDE!N3eq0)*!47(4ljTRG3jp{qX^dX*z3+bn1MplKqf6-v9tzC4XSny=n_I)-Py5LC{_ydqfSikkLIG>1` zy4iUkzF-^~FJw@U+ippd|N4(&V$}-0JjZMUSmOeUP0F1pd^?^;#lf(zXSlRr2A1J5 z|CLc-@3X$RWj$Slb0ln8gW^0{h-f~n*8O55YT&N8ZN&jHDx0k1Br%Ecj+PPVL*9 zT@~T@{%Z>(^>iyjV?#cgi}lGRcP*-dz-#1oZ|!K{=)Yk9|i+*+I%*d zYCmGTof!M60?Cif6XjYf)k>9mPzH)=sl?9dA>^sfr&cZ7=2pM4U%`hu?>i?VUow3V z_5{e?EZyXy@GJ3uIrU56cx@uX9iCfb=plcl7X<|;xW3=h$L1Q3>lo|Fy5A@)erAXz z4lqvj-UUIOzB2v8uHtqgOZ!DB@2i;yqc{95@yQL-Wfb|p^GDAsR2~&^44a)bA5vj% z)(oUi_4TAzV-BVJ&E{-=ACLECk}@PmFD;DxR(|TI#l=Di;oBWaBGTKJ;Ns^<nPqJkhxY!Bs^H;#sK@yL@=weG^3ZVT^?FgSP*4KfOPGM1E%U6t{|6h3 z6mQhRF3fh}@x@PZ44S3^cZoS>t{fHJ(V^U4_BmHpJa2*U1wM?~Gk@zFwxqBpMUn#N z)3X`rlMgT^mEbdnjjs;`sq!*40pk#y2bP8V^Q;w8XiYcAZj#2gnfxL+?fB4xz_M@( zx2>d_5Ryh@{&aPG_ObV!vA+JV%bqb*f0#|7J*xG}3}B?HYEh zPhR2vD8R~cXt%aL;|ptx!3HaxtKrG9K(mdWFa75jOLO+FRq$CEb;j9W;){`*4}i@z zK&@jod{NGtBIIU#ccSCd}Es{fyrYT{|=Q7OKCwtNNLeJle}r+w8M! zXh<+R;(gp|;csPw*ZP;k-5bJ>k!=JbM^rRN(kwmki$sG#HpgbIXL!$~{myyt2&#R;OU`n#TY0aj&B0h!6 zB&chS-01YDb4on;?3EOKVA7~8ZI*9a2wpZEkd4RyMJ<*eZfX8GT6_%xJFh{BIC=Ga zPEDQDD&d@_U8t0{>GLbGPuI7FuX-n(A^4I8PE;(WC=)KIW`b3+EI?B6i_}n$f*RsO z+178(p#v1xKdzG>Q&V%kvRJ0)AKRC6G1U?Qn1!-BF>#=`P~-cp8oUZGj}=oXZD~R#cWIAg)MCn zX}$3#wH~OkTWafx)F?ZcB`>E@?xihjbro6~e_fo6`chJ^JZNx#w`i0Z zw(Sph=j8ra^Uk7w>h1o(Q}iS6d~kjWd%_Qp7F+#QFy=;}hMv}QV%743hSO0@-%(KSP{Kr)`T6~vLzoL@52CiZFL^IFsNyjjfZckHXDD}(( z7JCNl{c-n(Nv5I@7kDpeqZ~a(vId?UqU7DclWQwc{xA-*Q50TRdyX!Ou#fFv+xg7; zS^u@N*_dhVh>y{dxZE!#R&079w(tV~HI4pqSxRzE5X+cHpQejtJ1%7@(bbr$=CGO% z?-kk9ePO!YH28Z4sV?&DJ7%8WF6qsxvwnrdp+x;<%lY)s!D#CVd<}BZ&6lB=Ydt~4 zsfN<4FYD~|<6|@vaW1;#(xkgEJeS;!0?b_TjR|tatox22;MJ5~VXfHK5FWXSfW8!B zk%6jrlelW@noZhbDl_?NB(LgA4__&gr5ouj&F?_h~X#5R2vYvAA4iqq+>-8zQ}EgBf?!!e?JqR`3ow1SxKK-@mz8 zdGM8SVUR=by#;b@?GL_tHuy)W14@2QQ_pDc6<&a7t~rq$eKbJuLxS{9ykZ?LHKLvUc=B7@3s_ zZK928RmCiTQjYeO@o{>8s~YUr-R(!p=1cvou= zV*hD*R}vOHv)=zUx(rpmra8zE2C6-27-x+g>=}raA~`b#Z8I8^Ks9Y|e#0 zoT2%ttN=gk^GOBt^%k~Nq!RP}Ik8BGq~tO>>ca&dUuy-98DZCFCeeUZvub)Fts3Zkqjo z{7z~V*PCs&Wh-s}a74F0Hq#RReTI){nH0r4f2#H@3TcxGysa|#qu#Q27`hR3iNJ7KV9quR-1hDx;NV-+elxJ z%Th03^>F!&fPma5^ZuPGcpv)QmDD`={?W~|3%sq0Gc(D#413ha&Ci4>DO*+nAZAYupI$*uK0y$5#rN?LyV==& zkTL+Squ4Gry*oZoPN0*QliyG=i$~*5M~mNHy?TmdJyRZ0SJBxiqJp2!KrwQn9T90Q zg90cf#k392({^IN$zKp#Qxq2*Nb#?L?Rn`vEE&P{O&-royPH^_3U9(vHsjlAb#2=u zuf@MStFw)%P8zED73GIXaFe8KOGRU5zL&SR^>g=@SWSFf9uumrTYWmt?nA(pJx|;y zupODfN%QQef6fKmWO$#vP4{o$!XDgg!#B~O&=3Cx3H%fxstZxVA*s+eTr56`&_ExP z!2iv3YR>A>i|n5#8560OMZ#egO(Nh{?zd?Woe{nNZ$Qidbd+e(QbTm(lNg?5B|v(` z$@P5w5qG)+Qpf7!J||1JqnTZ={SqUSMJ2 zTVP?~BoGl`p4d)Nf5*ZiqjgkP)_ta|4AAv-vvYK|#lljLO*bHXnKnq5-|?C1-WveU zizja3v|Bhu2H0B`oDZ>|#HLWa+!^>3?xfTke%};d&s`w2{5~li9dj-%KB&|$d0*{!LrY4yGZo1zZ;zBJB# zNwp8Ftc;8Vg$bu2nMFvf)MKggN7c99*Bx<>Gr3o=LRFsRcHQ@=;gBc5su&!kUdL8+ zi1II%fymqgVO6(K-NOrgr{VsQ%;~v%9GOnaY>&#tlVO}tP21WiM=YLaeqv}DN8zOR zk;Okk;q;2F#Dd^j@HXDGM!M z~t%0ChS|VfNbMgL7VW_owSky~R8;^JDHy>WFvP zD)Az0ECaHnk{nBS%%(g$5MdxOeWwG(;tF*>wJebiQ6rIQWB`Byl&+d;JhDPh5@Iom zr>{t#KnEBTL|yH;>krIQpLNOsLIzmvmu6v11$bhIeQg%5NxD~Z%7+Y%ZcZ1gf%m2s zH#iY|MvrBNj6Nsah_ze4R!}{>aZPNhqP+Qe&p3>=_e~Xe`u@(tDXgq%#&T+>UhQWG zhL1q$ue&y#^!L=x~#+1f~{+t}{8M5|T@H+8@;}%5I&5sG4 z)rpfdf>+Y^BTo$LeXXgFSF5f1m~*7ZNsizdKUT#f-Rx7Xeo36A9x<#<9mJ3UoE0HjK3@RDlSa8bI(cJlnl@LP>HBM97b& zp(MhC@6@SUb$t%OgJ3yFfw|aT0n(e zM5&xApa=g@fys(m@W(Vvykrk>^#p#<$b!>7mX3B0wrkfgGRJd6W z`e0Tg`Q@un23Z3f|$=?%9&2X^gICjO<@w0nwXB zR(3V6yz-L^#_kC7UBx{&z)37ru6oc!9ZpOfDc;5MM5HLUD%m~Poq8|FtcxmD-G*x9 z-Ke`c*FCX!=-9G%$XM^5+8zokN{TI8h?&W+?d(7p+!TK#LV9#b0{8%KbiIVDhQS=&)+Vw%4^c`K847N53;=^B$K} zla?4?dqqIvw9Dp5J`lkK10n)|J#d zeVzZ!q|l@y@a08|XGdIk+_$(&5@Xxc=TSq%>A=Sk-@RS|e-r(7{{8AZ;mgXe?Yf-b zvcF%HdHjx>JpGzyuw7*B#n~d1r);jesL`Rm|BCy2&Nsp``>&bb*eUZPUQINXsqit`Xgd42Dq-{yG;(i5cEtE& zL0#6H+c(*7w(FSc*c?}9GiLScHyrlI+rQ2$m<6?7`Hh~xKUC{mL@0Uldw=p;-`@G@ z_?vO^_XPRm$j|vt0cMTn$wmFY;N!?b-#3*y1r__>#t@5zA*y$jB?J>W*}ZeatuPkh zaSD&`$Y2Qdaoaj^sme(`d#KpPbiPm$*mvlf%i;uhs@pkBW)kHo_u8hjf1G_N!* zE&;B6tr5*tt;`(BtajlEaZ*V>;WCi`2W_!4(J_fi@eW6&I``^^U-YCR`<rN0>;?9OgMlljnb7)x zk}K>p`m_-)G}nIK+^Xef=jrtBPt@SC=II=224%a{d;0y@zf080!|2LztWBU*td-Kg z z`G$K($aMbh#N`;}AXOHOe4$*d+!!rSTuVk1Eup1U8mVcdfU<0Q)&ywfz)`_dD^K>L zb)?H?zCgZQnH>0bU%%k#%u|7PZ(~c;#^U6u_4Kms?uKp(8b60*H z=QZaQk+H<2G!>8Dav#w!r>Q^lcy{%kp!7lqDgDE@W%YPvtIT`;hNyZ;k%MiznBe zI0kK*ZK1Yq0?lt<-)<)qCgd^hi*bsc*(cfb+W)r2EJCdg(Ij4WIBD&P-B{jys&%@4 z?nL@_22{jXk>T+i_Y)N{l|;y&gO3xBCF$=T?v0)eR}S0>bGwfIZ2k_$2ODn~jT+1B zLzEOdh0uo=xfdg(WOV$d<89*s%X?1;K7DvP5}!y%FR*8Nuj%t!UR%fYUyGzJq;Qf& zfq3(&m!FO6K7ZXPg%!MwOOK1{_050^EjHtrcbflf_+^R8NH^DLdSsyCq5;=Xs9Y&; zxvZTZ%L!pK%4=zHNu2n|>J;mQ@8njAeq->em0?^yIWi#<;l7S+&~kW{eq6G%kEU`1Tc9QtN_hI72ogfxcBflY5i%MlI+G!P^Qb z6xSDF@bPuVKF8kRBHGz)ZXv_mB>3AxXAWLN?fI#Bux;zyO|oNG!O^{D_zk`d!tznrWs!;+t#7-W2yw9kye4e>3^}ayN6v#|fw9*Uh)+KlG-q>$qSh>LNQnIcQ0?iNItYd|E<-{Z`l;#@vArI{%a%O8{fE{b>tjZK!iOH z4s}n*53b%*zG43^-xBD2L3_7#agaOqc`T>HFwpUm>GEV*{d7Q`!gwIV3}1M9eDWpM z!VY0*2GKsnE|%^G8#keEfSR*Uv|)FVw)HT(Bh zVf|SWo?3hG+`B5X_fr==*>d;5fUzl-x)xR-by5PE8$07g4gLEzLfDMSOpeY26~B%} zTTZ3jwn|v?kxte}7m;0|9BE9LK||f4sQ$Y<=0CXy4qjgFvO+?>zP^IKVuEg-_Cg{u zGBQHKqC%pg0+<#8V2G=irJsN+nDbwQ{Kq&dwqR>dM|UqrH&?*laV@RfyuIWeJoroK zKY#x9owk0C|A&$*_}|mQOi<|W8zB)vVWIyV8`Bl^_o=L|qo1v_p^Br6tt%KqLtaEo zN)q%>hyV8K|B(DIU5)-vS4j!c|Jw7vy!m%ekkH>5{FfR1i(UUb#qdj>1SIsIyq702 zC((V0Sq>IQ6`j9VoWE;>SvghAAD(|*G56RR4)>++Nn&9sVm(t)dg+I~6GYfb`}8Oz z48V3T>2r8^XlS98VsGd>?E91YKRfbEJxkxk88sDPKi!k=b&uA^p4ej4R>JO8evFBJG~B;t>z%ef=xZ0T z`p${$U!t13j!^Ke-NwZdWeSS1tYV ztH(pPZ%h9XuN2j#j=LM7kzAQHS>HWP@9n9PGT_&k%YOC{(C;z>p~{x~MF+tBR!l^D zfSczt>sZ%K0i>NzX}|o#2VI}3vc#2eo*tjw$E5_NVBVH~+Ai}^|0}y;R*HrEx1&Sp ze2#NX9$w>0gg@o4cJ#@*W9unxjdXf`{!C<+`c+oh{uUvCAyg{qGOK%ay8F#zwa+8P zfGJLKjp)PEkf#eWj{xzC&v_W_MQl~`MLQ9bItn*70V0-NcsWwe(^a``E$1uo`dofF zZ`%C5M{6ue_}|nGH2D60W$-2V_F_Z$70HYB5vj#G28IoOuYfVRv)tY-40p&)x#Plv zuP;%V<}c=7e|}qFz51a?nyXJeW-69Kk2ww&zam0K7&pk zEO#p-!bYDekT!D>Y0dF6^i*;e1d6(l&Ty@O_vV_lcm3K4PDI}R`C_!KGSBVO;YDOI z@8VijTJ<{QbjDu4vg6jD1l=(GezwbMRsHtH654mGE$sSb95cxlu#@U}^*(<69W^JvPXr<5=qvGG+rif#L6DkV zW;&i?)x_u62m>?7qmyxEO4OR8ku21zH*PYD-Yn}9xpzX^Z!E#vXULVIY`~zNly3f`*^3HwjNo?WK zhnR1xM2%o{e1EceBR@wJD2e7u9m^et9(tN}mURqWxYu3bt0;o7;sMzE(kSSk^WpT0 zrj@uCGRD(Uj%-h>y+>B#97WgF@tpA1M{+Vp<1gbOpdO~eFkHg*r{4{^>exG~_^4F9 z2U0j7iQmejI;F;=K+R#o9r79JuNy846BUJ7_H5x9wtU|aL)bX(>s6#F^l`;%$qj)| zmuxVb6Yupp^n3xr^4aMVgXE%vqQb>kTl0N6iT2h6=%!nehcKB*2DW8Ki&N%`cExb@CiKMT`uVM}@l& zmL>|@u708^5sKpJ29|Qb{kjPK`#>u$ZV&j4Y95|UTPJ^UNrvnIBW%Vm`HOYKg1-vz zQ>8rpMTV3>NJiCb*b>7C26wumNcGdWwBmMS+iT7-jE|(%xDj4$55nv;)jnmdmENCl z;xF_*o^W2$`e>{EzO!;c=!D#NGOss3d_Z+cW>}jR*M7c28eqmadgPTR;%URC_xbgY zH+p%p-aC)3C83T3c)VYqr$0U=8_y6hE-7Oy?a7eB7834CC6}#Z^382KH@4SyVtb3ISSl=Sj(5#etWqst|fkpF$M4S@N9fO+#6!^p46KxRKnFj z>|6tCbwAT+OLykWIk7z|xjflM);`n23KH$Kcst)xFEDmDCn+LdH@K?8cCm44{RMqPr9OEBHCccJ1i22y>_6>2}$*el}!%a|BQ<=on- z9pkD66!BHIzOUWo?xP^nS{gXrc*mKE1}lNDbF1S$rPYWx zCYM65oe~%aR8&)_r86z6l=1eJfb}DKRqm zf%B2UR!P~g*mRnM2bx0?bORg?;SJ3i+T&8vcYcdOXZ5m<2vgrjG))|}?tnmrtW|I~IZ&SAO zt@|H3n~Ip3CElvo(W|^6T*=khe5O54EKSdRi3P@~S^4u?u(TlRDb|rS8@OJw8%2o3 z+Z@3dsyClf`gP%$zi+aCSOhjWhR7?To3)W=zgF+Z2E+)M2ALRgEpX!DW0ohSi5b$$q1j%N-db2Mfe? z^H~h?1tzyOF#Dlu@&r2$Sc4>`l=)SwW-zi}+d`TJ%ZIf?r`K!l=IjkZ{C1xS-mIRc zBD+hGb}8E7`TY!U>KrT)wsxgi^%^Nk;MJclf;Vk-POPqlxt`0_-uZ_{>DDu%%cI}N`vZ!?&ePid9Cd)B#p_8;9RxaQ$ILqa_jdSLIc4h zwF~@pI;rrGBV(Py4ohznLH3mXe7H|bR_ALaDVPFY$1pXQPa<9iiN9NCIKEFlbes4b z#ii?ncw9BM#5)>ya%nNs_eyU{HbF)nyfq?>F@ez%G*%=jPUy9glk_F?!0kk(#F1rLQPQ1HuL^R;*+CYu_3TCNwD;egq2yARBHu#!hEjZc@-sk zX%t3H#Jt}-?}i&OziS&QXgb29d_{y^kqyWh=`Y?m-@WNBj>=6Cv#o>zEiSaf0?a5! z-Mf+)+>Fn6*_1+|k4&Eu1GohK%wfFLM}I=2AVt;qYxYkG7psAcP?Ypxv7W2UQsnVZHTIcH zY5G|d@#u61p*q{e+TsjtMGFU60P#F-nfQwFoski(K}e z%;^kcACsbqYx+EsqDSCjLEA94W;7nKYF=`FfBa@4vW0=(y=%1~H)XNRpz@we5z83% z@$UdU>_4`~H11d9dh{mlVQyDh8o6E5+L`7{X^S6mZ`PW3dnFTY#v}wy&WerS)l)dz zr)cAZBr12he^G$fOet|jy>B#W3VmI15iY7ebB=eFMt)~*%yYpm`8$vEoC4~`g7jJj z1l+gb)Ds~K=1&$38sN%5QuIGxj&WJ63P_+?PxO%AHQF=`fbDS1kS*4X4|9KcgYk zHaXN7FeSS-Jk}n7$O%S&Kr#45o$v}L3E!sq%7yu3G?9T_xO~@3g!BzK3kDg&E8}md z2nnmla^_m?DL7c3gA_QKD0#t=ZwNtTWm4S(%va?Jd?7V^V-n`$>>V=MTK3lD%N4xc z=PALv)g3YNHX6-SnkI;3g4N|+@VGw}s)*Dt(Ch~Pjb$G^=+{_Wyy7M&5dgMx+)TkH za}Lq&0tCKj+$$I_A^77 zE!@aG(w&WG^OInB-@0omd2S7Em{{PjsW;qoFZpVE4?U((uqS2K8X@V{ezESw_6Oi$ zI^NZV+>svk6Q9}V*QID`JLn>9Ais_%T5jjqwBCC@gVtw+BFq9tGEQ>FogI|euRA$I z){`=J%b-*%ozr5V-Qgg=XbM4m}!nQ4K((+y!l;~w%S@;C5mZmGQ zagXZaxG{hm1y`eMnhwKyKhC7z`z}-%0qOJ7W%r`e+yLbl^90s5%<8Lk$s-6 zZO_ibhMSm#09*<87e-Tl+R6*HTOX_Zd-Gy~FDbzZd1mw$eZ9Yb?0MgqN#h-yv#i}S z2Esq6fH$2BVmPlEY8uD5zlw{a+B#~Pv`YB+=Ze&~7 zFs#oHx6g7{;fy{oHsD9MbVN)cWhwnpx8ET-;ZkqxiIwA&Y*a?0v%k!6U}sj>0WULa zU+>M@61EMCP~DRt3*ubU`7Y6SfrtvinSrYF=D?@yRioG)7HB^g9M&FL3TDpfzG9*C zZnyT>WC+C-o($!i0#q&CDg<9PMWWpF&=tERQR1EXivL^EoAFpd%9^EFa$PG%R6Rf3 zb9|QVXnAT?f*bxFDepxE$GPICls~tS9W$o4jc^vRkyp3-$$rW{J-S}IU_++JzAE%X zC$;jfCy2akmBIQr?a*tN;VRTQ-g8EqUd#+!HL^KTntZ}a{c`Oz%Y3PKtF*xNf{2NM zFp%a{2eJ^uNIxO4@01sKY{vlUZh_Y?zeB<@`}Lkyu;7i*4-*S+j6R!cODQt(NEMd< zjLhHO4-2m7@r=5~|B*c&t-)zr;8EPO<5TaswUN^-Elg=xqQpJyr>Cvos|xVKq7y!N z!UPqOIYb7{#P*D`qYb}!e{3R?pK)9RJRog>3LlYthlu|ok&a}HH`6A12ZMM=v8b|o z6{$-r=~=$FP>L-ix`#ujyO1T`MA|usWapvvNtu~;H=3J?d10xRx@|?sLb$J+;j6^a zw8nKD9|7EJnod1gAVyg*^o7Un_Y(VZR2%kdV>Qang<-3PCvywlbPQKJ+a|@lgclNx|7_!_F6l@)m4GxxnBl%j+YS1&>ZCk7+&B;rZ6bz+;jz zgU!bjIK>lL2nMms{DRTyT-}Yw2Nx(7{R&q5%r1n!?oLCeV=t&B-C*eF^ z6)+}dBfr39<#g^gy8h0xA5|J6EU%DF0-f@>8gY7DH+adm>f&a>MlKs_Z&RZz+cY zDb*zcsvV%DJxu#>Zq=I(Y1APDvjs3WPg(KT65&9PqH-Q>GCKsgZCa3-jO8WoO&to+ zdqM0unKcxihFvb)9^ip}VZyBAST1`&gm<372Nag*P;-)0W1>h??0#I6l8s5f6~6D4 z<=WD?uwcR$BmrIP^rJ`x&=94Wi1ycbH7koIcV9wMr=7TCuDMthxevu8bCi7*A90jh_66pqpwh5gBO-~zv3K*~~P%}Fnca;Q1h zHgl^fjDC;Oht`@u1(+e%NFF$5{ksq+ZipIX|&w=?`3|#_c%B21yW*qMx*0@RvNr`eTEHg#Tq~w2u6%Kw@@G;_LP@N+`LO(u-7fT z<-0bwP2e%1vC59W28j>RbnQ@B(WnqL{!FQ^$>m^9%d#&H=1Y4-H)*hEUU6}+WIrX- z#4C$!jl3oXlFaHsqz7|mVrT` zd9dhM%K#tP0a#GafH<4=UrHDugC#{w&bbA8$~C%StT!7uPV-ca;BOPBfDD%msf@KB{%QRyqx#3Z>;+w{LX~_F6NZj$1<2J9%5$r!O5TD2 zN&GSR<^XPuQ3R!?Lap=e1CM2P@Q{Sy2lOw+e0u$BiZY-cyLA}I|3;ug#1r|&^sjC} z$}QJMDk~Y7UsPPI^iq2J-THYqND1DXiXnSTS=Uu!;#2})T96m32=>6sz|YSy$*`5( z@#{`{22VG~J0gKuWwoC7M6TE7w_@WEG0~|6hww-8Z|-#s@O{0$!eo^M!m11giu@pi zDeR_ZM3=lI;(NQ4cc%;3Dn@S1CK>(FqUyQ(rjtjU=?jW{`>$a*QpQ#$CjN#F2ZD6_ z=a8asNxh_qG7}2mgKFrEmh@^yC7PR!QOP|q&0OL7h=Fij;y~Ga{;6E!)(sg@P>9^M zAIW~Z%dKB-lWmUqR_?D_YsxcLAO=pt&89#{8yZr;g2yNU8AJ(W$99;cwSPTOD#XHa zvUmD~IYA80X5Dk#R#36y!G4fEW|$S7Nx9JOeg#UNgB}GY(t9O1QfUWjV(l+=7I!l+ z?FoG|9s`|A1FkWwR)AlXXfu(Iagoi=@d|UbHs2ew+Xp zMK*NvWTF&gU?Q)?mO56m_b>78fZ$wM$S&v@_%v1I^dI$$?u{Cq5tFD+BF_q&Q35fS zJ5|u*)Unm4u$eLgkG!=a)WKwpB$=DY-;#f=;&oIA`5)iXc&(EV=K=8xfRIbajtJN3T*OC(wV^kUL zYWOeF?!5#I{}t}^SECyup9+-Z@%xykLXJ-F??(Uh;Ms^tLlBZV0dFpsGdFs}LROp7 zO?Y>jnzZPhGB`4zfnMB~QPu9IjRnEh{M&D0uGm%GJZFTD))58?y*d=rlGSD>D>43{ z5R=^bzy^sqHP?x&!6t(G7)kJ#s=uHyAr0*>1%ugT7FjtbVrnDQCg*yh z=gRo!MbGzo2Tdc7F1)5^WJ7MR>x$N)B6D;;Zth z=+X}&lM6vY6wY&=ydMMmP+u&8n-gk#wFR0U%&p)=xc^1xg$R+z5Tw=PfLoHkYVP3)IhU8Ss{TsIjNyeu4y?e7d+3Jeq#tXS}<+&=4ifF&0N9)j8LbahcnRRnL$Po#K*S}1Rqs2>k- zAN`{i{TQ{dmAtUbUTU#)5gA`Nx8j+qCc(B^;e=#I(${Kapy-@3-Vmd&yF3inFvj|&TbOp|B2s?>X z?Em2|>AmcM7GWcr^EEQJx9sbPs`8JF|M7cdEEj6akFH0!Hiknga*_qsTg+xGA*;=O z|5A%C?`+)Mu^W}(6rEI;);YEi|0|}~{T??a@_rQ3=+#2Jr~gyJQDVU>$cmhMTVVsUvRnZi&k z@IZJv_kcC|7qrfZ!$0|n#p(dbyQ}@Fr_hDlrE1{>O5fWydyT_peGp@_U~=Ns;Drgr zg)3GL{E&M;_lV%o{fxlAc%_2$Jw3PNfcyI@<}ezZ)14&M?u*SxR)?#f#bgCvBO`2)mH%?O;?o zZ(V2dJu&uXK{&+_z(YQ>d$o}z^pK!MPrlsw6Mr(UG!)b+K`#~jjPdB^UcxtnK0QHA zbC{F-`GTA;^~sFUMm2hr;EYA%13dkRk#uzzg=^g#Wcjg)k9gi+ffb0JbEiu6XPSGX^?UL>R$3l~}@|ZrP9DV#YLp9-_70;nx54HBCxat3p>Wc!vgT z3LfrUgDtMtk;2dUvzkbg@R^2csr>PoT>S9w_uVJuo9vdU$vqzh0M!b+W8R}jB|ki6 zo$O`j14v$*WFn7G30*v^2&^p$8G)HppkrpzWC4srQv1NL$^x5SBc!G~9LOoX5Jt1l z)<;dMZk9VIV#%WJ&b8lmW`@*Z;}beQM9w?*ctOH?NN}JbMTRmQ2Y-@~NXidrINB=S zbB>OdVLwHs0Mak&LmkaZKWnYrNpcUOzHnQ0*dy&8OvRns91XgzE_XM*9kG=uxENWD zuwK`?7~uNT694Qt%2(eL8puuA>4V269fZMj_*5K_ieIzT2VQn1C$042z{rS`==QVL ztHB3&LA|c5+D(pFW>!Vp^GbcTT}e@gr$=EP8}07Uzz96gB=E8Hp{Yhb<-l1t@deMe zpu16}WoPVKp4dP)C;V%-RG7)d34v>vfnZx#Jky6>YsFlc(^_-gIg>mi{l1Aid=q0E zLKSh>NCG?A{a~@)H0u;LIK)Sx#R0lgs5D>d9n_#__9Ysa+V7kXJXTAatZnm#W5XO^|=>ZoY zF<}9E3~o>t0f}4^9s-0_E{PF>AU|MI>qTi6S9&pSZ>uQ0yiMhuL^6!Vy%7~KLRkQh z-^htcix1VMkSAWv58L@>Y&MRI3;qH789QW8&7VI^W*Sy)aSf^0Q)T=ym^5DI$xsp^ z7*)HlR_(XJyMbmfa=i&8Nf1c5i}YM*kN)jp83?Otn5bk|f|#*PQ&=Zi&%>6CQ+(VnFI1vk5f1lk(?jSaHR1R-F=eOIG=+~)Oo_F(>Q!BJQo#(p$| zM%R@+<=g@s#Ed=rGx+5HrBANTB+S#&5NrJbdp37WCLt3!`8VbK4W|mkTSU8vfijSJ zod?pGbhD4rqxR3&plcP1=1 z`Pvx<_pyF;LfFadimQac92kE__J$|>Vv@`G77a@1WVDE*h^o)8Hv4r?NTVAAZg?67 zj=}DSNTug)WE=V}C%D!vZQ{T6WvgbGncrl(`KhsA7!&a3YkzWcz*U&X zIjXu#<&9Yo16OLyG511PL7CJJsp{J~&-m~cU= zge~eO2nlspd327OdQ);KlHm4CsNC6;)eK+(YK@4jgg|g(Mo{GY?PPDLB6s7R9s+O; ziCWH&rhf{78c6vM5g44%G3nU>GX*Qq5=3xI2Fw7+A6$E0g*S96n4K3=F7d9#Uv2JT zQFg`R9`UQ~c;Vu<>U}_VZHSG5tdk>QzEj+DloZZQh^{~P%_fBfj!%x@LOK&Z>=!ng zhl5Q`m>{I$ZxCXWgd4m-Z_^wvaqu2Dc-TN{v(pnU+n<~nc*c5SXVprr@luM&cYv(0!hFo9u+ua_s8yhjCkDyL`SUC;YiQv`23>wij+Ula9wm?;5=9f zULg>kJ;3bSrW=2Rh7s~Xm!FaL3!xpB&ZX=f*GF?$c-Ev$}B$_Dsw{U^|aFMQB}M40|e`AK5x;ZREZCV*2E*e1o*QU7IA6^)l#K z4v7g&x+Fg~cV2+{OfZ27<`DG>a!#m({&hbDv99zt?A}6PobV~a1l*xzN6ifKX=}Wh z3#pDs$(^S0d}zKR5aseCLPS`5GlV11ux=3BzHU%zu4|OOLuRlAfqJy*-d>hwDnoL_ z(VX8XSC`EfWh#dMb1ATK%+gwap*gWVn(;qUQ5ZcU`l?8LG|w$Wwl}J;X(xUEI>QGq z!J^8raM=bH_QXo*sbaSl5!ol!+wKZk{@W^HTh2sDLUN`Lf)O>+pFiet`G&2tJsz=d zy*wcd3iTuqC`r`8E4Ij$@i5(a={?c7czfwd7d#h5&$-t%DUiGmqf36(a;z=8^*JFg zFjyL!(Cqw^Af0#Ep?kb6Z%^^|*XZ?KoG~C*nLKX}{JdW**p{Gq1QdL9Ay%q^O>Z=r%x`iU_ zGkB(bq@DPaCtNJC%d+yD3hOxg!A~Jeo1>JjoCWO`2zo+eMO-NkY3XH&@M*?4e38{; z9sUQVpk~TF`>blbLk@?9qM!uyVL$uWCgmD^vH+Bqh?`%rXwkyG)1aQl)^iU;bh1j@Wk}$dZ&l z^rV<`0PBy>g_}l?bh0eL*pcdu?N_WVBfXU39u$&e*d6Wb#oJY241}uHXJJe-zy+f* zt=m`$q%M}^sNSjMXvFP?n4rSVDnbpD{>fWwiyc1irdSCmnv&f#;6Rb@a$GEjUss_Y zkrmYZ?38fpN`1Ic&*koeP*wsz9#9QeWw_1?F>C)MPss15g^!%BcS0Or_aCyhS|@qq zEcEPY1#gReAg-me&h9nY!~iHiN^Wv6P=9F3pX~xfei|8T$RLOQlb;2_zZ>|j5QIU^ zdc7%Zj@IubNR|tz8YyJk0^yQOsICer2*2$Kuj0g*n(|`nB@Vb}7vslmAleaxDhikc zg~D)3|29@#M2{2C`blLoZ-6t~HeF!XYvqk5Oh`X;^(LmDMb#}jRmzoeo7T)4+dSz< zWVTQ+_`SDVMHA7)7)trFo)DNYofKf#at4&F!05`wPsO|IDffYm(ljHp;^SsYU;0%%Z$ssexjWqQBsBPCe5P1F)*9q-JJOn8?MxwvR7rVEdqG#*&ItT zIK0%ilV7O~M|sG;zx?+8rVN9nvXE&Ht!A)K{%%mI4cx7xv5&dM8_omza*1^g@`#H@ zb34sftsY2r-ONEY@_3O=Fm?+pO_7qvufF@`)O0X%qnn6ryXPA{2Q>G2P6pctA%E5I)%+6d*d3etX`p;4frJp3eK4pS|Dr?W1hrDQ_PwEyzlCpa+8nMq}#p+Hr}p z8B4}2heAyxF*2R!EIU})Dkb#2UVHk`|BB`qNR%14u8}2=|C9I+{XzdFKAPG8lP<5O z`Hh2Rq8|oWnby4QaQCLc*)g7xaDG;-Az)L;FS($5_pX3r#K;xHwJ|km!H9 z?1 zF52;xU#-VGHjh#Q)L=PBDSZva4a4qMLE$@L=S-F0QNy}85mq`?0bd@-*$&Z)LbC^2 z>+^<_XqZ1sL8Hq#qX|YRWJLR|KYZUkdO;j0xAKm2hn%LiIOcZo?kYr5c=eP${{0i# zL02S8s0#*?M-HnA^M!BvJ}m@shT<#Gl_^oYg0LCCzp8M`i|Y-V12`fZ1hVtwY?2rO zXmQP0Y*j~687wEChriMd%#6Yt?09hQ6NklZ>c4z=`@xRCWr~;l?acNPS1yDwbY7kA z<-MGDB)i4SC7&U0dR@PY0jqzwMBa2qv(7yf*U^4I!oPD>0Wut*yTVQqV` z6pzJ{Uz{WxH*6IAs3ON&WwQdhH`D?xRQclhQN(_fh4~!;1yM~7nD#I1+=lH3CwPzc zFMziHpv^Y8?RLIPT&j)r<`woT~K!zJS%9+$+s> zX)Y(4>h03EzrPnD$&7Vzt1MH0dH%aPLGM9mDQH;3{fENl7hDBuo%n;a2MgPm#?%&g z;#hF?%~DJagAKr9RHg)jACBVE%u;NE#sFibgA4UbwHZ;5FDY3LEf(^ht!vtf3avh; zq=$wbIK)FlEEd!hKQj*=;d_l3)&7H=Burj&c<~=)77wWmn?1h-F-) z{nXGcf)kBTRO49ApQe9Ay!sC!|8wz2m^3w}VXgz8!2hL~L!B0fDO8IV>Rt7JspO#I z#xzW%aryG)e=GOEyo>!?91?5QNb&lADow!>%D(wiR_QQPV_WBfl>Dw=N>T4R{}F@4 z(|fs87=l5PKgOUKeJ49YLNGh^3%wH^VDY~QQc+-&!=k&NY4H9})g~XncvOBjSBrPr z7&kFaw9QJjYHmM40~{))Pi4xAaq9**^Mb#_!-NcpS6d<+y*;ER1!s6slELG)GG+N zg|sY%lm4nZMCF1q`uX-Ep}q>cjId4tFvl08-$S}}X!W!D zny8xIhDj>Y+!RQ(TQkatSnHix_=; zid#kYHTa)OBi+%z;xA*Jcm=UVloXj40fXxAjovgLj@jn{mzIcDr zvwBJdYjB+4;tVMWI<4NQ8_kg#o9Z8ZC1vWj9CbXvnLN!}K0x{Q3UgP7!T{SzFp*^D z=CIgU-N=$&2;P(GtNTi`80((!{q>LM8X5dWa;MV~1Qaal7%Lo7ShBdAmeAE@Tjl$TdRm1 zBE^-b1_%>y;=EE^IFX5@>)iBSsA-#jJ-PS;_KnKZy?G>oZL;(VQP*Muqu3?}e%-KZ z>H9c|O3~uCo&y}>UYoj5B&|^66I})u_pXbI>E5_J zXvodB`7Y<(bUHBT(hJ&szaI<0;Se7x&=-F?s&E&WbGH&N^aAR$yNSu>)mSU9@66P? z$WVNKK)DK&sX=dym11zhxJF)oQB<{#yObO3hzZsz3ZBbX{}cory$fx&E`9djSNJbO zcqDKP%!9m#&bxWR{||F-8J1Q#(!P#dJm?BBei} zoj;Kr-Yv^b@2O*u@BPd3cRhgZq@r9RKRDjQrI!rE(-(f!+urJwg{2h6`_Pv1fay!Ms@|-Ds<-^+W_Kjg85^~%Qj#YM_ zPJ#Y6BDYCPyS5#Ij=?+J)We|RM%DO}P`C&#(|ggRQj#EMI<30laI@j3L{7F}{229H z@f{`N%@?vBxa>4KPCuLaa+vwL4tUHTO;}{dtuzAn?zOaqEXqqgFSns+m^b)bN5^8d zzkN8{SJ_z(o zW}!%T)8FYMSr zTOKws!)~a@g%9Y7h~OoHFw}7__+rv}0KaSy`gHzk{w{0-e4s}?jOzE#ctv-U>tIwH z4v||`;Pn_^K1%Sko!v|G069oCjOP7gCd$*-`1D#Qb?z#^gZpiC9F=L2W73_NQz`fF6^s2YJ z#+-*&q9yTjyk+wF=Pv}iRyBmX!?l-G6>w|@-nDCWr)?JTxaJc(nsZ#dkw5kEcrSKe zT=a;6a&&?`zsGF@Y9uoJZf;}t$GgT5HgdT;PbtfuiqvGSN<5EvSQ7aHX%a(*VpU>K zHJ}(lx7A*{!y953f}oqR@DXg_~R^(bugDCUq>f_@VgDksuye2PC=H>?XBhZS! zY`s<*CBDUCKM_DMSZ<{TKVV+s1xnDi4;WC@VSFd=r1>f5UG1xgRHwilon8Q=YOSSM zMgmF7fEiz{eVk@QZY(h_Mdd>EiiYBCJXZXCj*cfCXVFKAj!$sMKqgw+DhBFYRi?_$ zsvuH&q0+k_ji|6O{Qg8pG+f*`D;Fpd+tt67KyQ_GWVeu;!qsuL>Egb!@<}p=aOuj2 zp1lfrUTP7ik70g(w{$T}GxsyY73?*CAF6MifG&Ih$Fsng506QL_qmCPh*#T6e)xLQ zc6mi}u*Xykr9d~WDOcwf+;pJ=VA<=V+=1cgw)cR?n*^=;r?OHT3_l`8qIiFl?Ng_o z(;zPK-`lnn{VXd66|9Qki=53d^1~qb-0hG5~!8wiUQ-% zHVcqn9^jg15O5#p{vd<*_Rf4{{KJz%-rqV4ad0P&?mV_&vuglgGfN+XBwupQ|MY^OE zP;4`ln2 zsY|#jRKGP@B^gZ2$D5%kH`Ndr?O02yIt{+W&gJs#m0A5r8w23yJL0JO(Q=3L29_LKQafGN5*IS>zD4dE> zXNcmPjea+_gPjgEL-jn~cr1Zi{DDI=`DL-Hq~iOLnqUW>nmd;Z`PJ{pK4wv1Hl%^x z;AfKl#`F}y^s_?QPgKVuI zM*f=9GN_x3XEl*RGFtBaeL}sBtI}7Wjkng1D(%;r4%b}f)av?V)^Rx8Zhe_>m$t-s zSLWBS_UEmAb63YTl~b1W!o@zZkj=$pm+fzS`sXb5yhS{-AVC0txklHnm+V)i`=#^b ztM`PPlN{!sDLo~j%jpJy>~RU`S**@Sf>i`a&UhuMUv~KApIHEo)y)IEqYqA}KrY~# zz%F2@f#{a2kR>64r^#Hel;?c3POk&t;+i;>1BlYM_K!_)WB)pT z*?+))v41jb&!WmYI%&L`9>Fx2@iuW^U(1NnIE7<{wwDsSEL1G5eVOpO$TYs-b+qn;uBq608J^<#)?0PW$!9uM9#6h`PP>>UH~FG{^&))oH);4!IC0{wC@f z1V416%5vKUA8AFW(=15oH7MMtFqV8Cl}gj0PgRHSd1tKKW3tu;Gnn}LKA?<-p6^tx z4J*nr)w?G6;y-lQbKU>80&O%`_}LZ3ysn$mXI2uNrB=}dP_?np`$x7UTlH26K}>UQ zHQpkj$-1yP-MPYnhQnEsMpN&D1Rs0cIQT4D%71Wm2)dGa^Y@f`A2IXiB-#d8pu(uW z87-I*=W^WEWHPD?=OAq>mWL96Bmq+i1CUJz+QWn!s}vW4+N8A>_~EJCnT~;MW2Y8S z?91%GCvj_x#aN+PtELbfO@=v>tx4k5!=b(m0A-_Oc+cR~Ypoy5*bPDOpO>bf`WX@s zdY7Pa@8%+xm5*?4X6=Wz3N(s1MX&f0#ri(HmO z)M__Fj(jMQihQ&CwGmu}Vz#T04+4)2L*t?ODx`FZ54EMv80)J3AL13hCtbsKcTGnW zjmltu+KXU9LNS_`g<>k~Mzwj+ ze{1I4kn?yvgz~P7tuqN&*X&}gJ~pqB1*K;9M!Tl-pFU{W@hxjx%Y_Z7)jhF{WK6K7 zvV1&|AYC+@Y$ny(`a(Pwmn5(ft5x=SmA!ewA5RfCAclSzn93Z8vZnN^_yY+jrK?#W$i*>p?e#iy?&l*I48QmxRffIzcB5)J zXZO)z#MXqdB6 zVE2XalDRlz+iB$}6ZjSivS$hMPv_q&L5|C~g9!1&Ee#g-ke6s#qD_3U(k!2B~zV!CN5Z zVAy57r4?2I;llY$n%U?(TfO|3{$O@(4L$T)zc;T+8mm?O%`6dSg=Q*Nst%{$gl55& z)?Td_WaPbBky&HsBSW+D2hHrrYh_=q1gx4kW{H^l^Ax{5q1S%9Kw&!DfLCQTcYj3i zNgiYq!1UFaonzT%Ul5(r)S@d=|EHso1x`zeM*Xk(B)K=J46WJ}>`G~us*>L05PC`I zJ}_!iy~n7eNR;T0S}eoTLT&Nh_{k3N`1{Tfw{G^Yb-Jyj&Z}u(R%czTz-uD}Gd+cD zsJO58hegkVurBraeds-u@(?^`FE*=Lz3-)5BPDSy+^8)Gf{GuJF`PxbvweFb%VRLA z6})rA({5IkWL<_p{ttiFMR>ES27j&UhyS#ycUwr&oi~ccVjjZ77T{{xVWScp3w3^f zY1R4+M)eWRv)5lSK|)S*d@%;Pmg5{m6*5FnzgZz}9(kifWwI}xO!7)$Vc zX2ZDxU|t6ue1Tw9nd_M?C+G-!5j7(CFTxM~W)0vp{=}I(1)0FjRR_UFh}u&TNe+wv zf8QqWD{Rx#;<6*1t=&jZJ-`KTREBv}w?WTf)7$$M+f%FjHQv#x4K`r&!nZ%ISXC~C za80g1H%wJ7z!@&q`ld@&%xW?i8^T@QC`o-uXcc=b`4vzYJ9+wFLfm7>z7epQKU{kN zAI5J+5Y=x1jC?sUYI!jJ#kY> zJ0l#i+aZ#4+&rE`QeyX0MlGkGM=aL!MZ;O$dwBc{LE|7~#l_esMTLbDpUkk2 z!^?*bGwVu{RPeme`~KwJpA*GIWlPQ|v#SVKI(3xkuK4Ffh5Lep_h7yn{D2e$2~*&7 ztd?UzLERDnvp4=z>-VD4)@r*Cf%fXv9}#_NX)mtDQSpam(cnc9G#w`F4UNpv>R(_p z^a)M?PCKi6Hzt*^l-aN+J&>U(UfX$1?agqDr6%{~r`O*U)@K`WrDLB3H~{L>?g~oN zrO1`YB+X?Xe0KHvu2%jU$$nLCzeD@gdQ5iEwkI8`>B5HCD%))iob?+x4@1n~UZzw0 zp`E+}pr7GZ>(9S|#&;pNc<<3P9m~CMARWs)tYJzeGvZb+Rx`+I0 zvBc1KHsrzE0GDmX>DnY4Atq*huY|J*asuO2+Q(EC2Va|gPvjU*7Yr``m z?g-^n%0fA)y`dgw`?=dvFm5hDM!xeKfeaz#MxR3-odxk+V}E&|X+LtrVmU2I@!jVL zx#j9`NY<{4syPShgdg$rcO{3d9}|B;_xui~xER7VA8j%sf?!BFyfL(bGwHGuZMb4C zTO*OXgYSuYM==kXg6e}Zpr};)^ug)Q>tIsqApZvP!WaI&IDZlN)Q^B~IC@3(J?B93 zZJOA$(O4QWnMhD-6wPQhTq)ggf#fCe){n6c?-g;{4~!o`SQ+nyOU!yC(H}iN0#^sX z>I@8sD?9BlDvyNwKfQV4e0`Z$Ii@Y$TLyokP^4b9IlufpNfv~7k&`*TQXvQq*Grd^ zriq5v8?%S3KQ6Z&JyJ&s#!kHt4u{Ybr>f7#TQ)smJlN}XpA5>25?c0UY|UPt*!8l; zWKHc`kdT0r2-AY~R70>?{I6j08}xc!Bb@_UEH^~^Og2OdK1KF>uH?(E3V&s}x1Np_Lh!CL zxLNx2-CMy923D)tX$Fmh+cXmc40F{zK!^PG-2aTF!E-H?mc;O{cG+?6fyI*{CAUlX zKO%|sFVXU>RLJJ^r#D+Jg!rtcSY{`bvdL0>g%cln%ePmD95OK7iDik&53yQ6V)s=O zjELx&xIop5D$0_KQFpfKX8EAk1A<+keFI4}%S1e@S+rC0=h?C)TB;$@M>ay6OLqjI zZ_EY%dT+5~xJ>^Gw`o*kZZeF91Ip)Wk_AYrmE68fvmCq~ICVsCz+iwq)=~1`7(7_H zAyV}|sBk5~r7~$p_cLR#P?%>&Hbz>*-m(G+y4|^}0S&(*sTA*e<=5&UtRLZej5T5> zxlJ^crBX0@FqVQdUFz1N)^+7o?TZQpelIaDh5ci^UO*uemLMf4$-w7BI>NILhf{I* zjRo1lSSBRW1v<#QY`AzIcQEXAus{fk?*4_?V4|#f|9X1Hh|ge=8HA1ZzSt^ikPHCZ zTc6)%Z4w(xj`QE3ff@2&f%u;9GI5>4@3(w+Dq(bMw!^F%FQGs3s8pmi7N_#c98mVb z4}7jcDjrcpj<1B;(#f8(pOpzS?)sp+q`}uc;)d9HB!{%3_$U6e@io4jm4H4rMEt)# z7Nwa@5DWOCU>9L5#|qK-n1>-w2I_L~ zqjnj8z>Y0HU_+8*|H8$UUHKn%0w^F;N7Fcm?+Ye7z02A!I$FX-RN9VVjHu2LCo$O2 z?II{O#1C;Qu;3@dyNQkT8efk30|eD~P51e&b z6O~;25V6t!;K?=HJ=-<56=*F*y;AHtWJyxNm|nB?qr*P6H3W!j= zF}|QGP@uL6tFR0I8=?vh&Z%7+0fs!jN|E#tL3b&_GqY%IL`P{N;GP#ax!9Z#F0)Z~ z7Ux5aN}DAS(yp8~W!!1y7py2N<)xrh+%{b!SJ`a1FK+ajq@ zw@^sGqoKBN>Iz*bqnfruu1lO6Gw{&9r;<_(iL>N_9hWWkc?#0^-V+Si3Q&$3AS`WK zjpR`j>=Wycv=#(ZX8Shr?GfP8?JnL3_;d#t#}`7RMy~;Js8It-^b*bjiQw)HhnOJb zQPzf_o8n^m92IXLe&$WzT&@6MvDXW%1{178a>ld?L;y?V?HVHnrsJd?eBTgP(X?SC zv=MTNXIwI?JNWL=-{MshKfkqC?HbFJm8O->EaQiuOZbfRiaN_Rb2VQeST!|q8JeN| zK0qNYxDzV0iw``vK)!0uGeid1c6GrTaX?%mDz%;6^044bUp^79DTWz8eNK|b{)`jw zpVeqfQRN<~bg$q9RBlXRmEq-As1rU|YeQzpDbp976zw4C5$ogQ^9I9y#2u$?Xh7j& zZLH_|^8M0uB2WM?R`FxQq6oe|zT=#sY++LI(z7X!~BN@ z7evqnV3g{Oc?;2S)dteXCA+o^e;|OT4}NrpGE%5UmK4hHJbNC{3^|PYB646L-{9E1 z-1S-CB`D*}QpRfV7{$eCJM&mgn&`pk07navf}c?65)CNEAHM7RG*ujmGo{l7fQ5MO z;tBvnnqlDmhdlV3PaM@jTU_e|#kA1Tk}qNQ7OS;OoIknCzgw);?=CI2EJt>xQ zMZ!U0JZ889S0@Rmdz}1_ZMK#ZtR#Drd2-rWF%zXx3D}s9fZWMQa|&49dOK#Hl*M!{ zRa8L_NNgm9xqMRlLc!dGU>KHBl{ zaT+p8!su<@ZIfIsDh}@Lu9}RgUGC-ApwyKFjN~&e-Sf8$Q*)Ma36|?x&nD>^Gu|2l zC{fnL4Sy&a^iaTTv@q>VF@9i?W5XgQIcd0eOa1j+)^;0taPjOwkd&_^OCs>Hi-gkS zKERfu=Nk>bl6h)jIU|_cxqj@oy1^8~_euO`_%OJo?@8|}I<^?D^Ya?gr;u$hX^=2^ zsW>mky|Oz4&DZ*ybvX>zr%Rm9hXWo1P;~DU%6CNq)Dtjk8}Cw+EH^!L0>J#;z{_=o zYf&zzdfabT{%3(+;8lFL;Jzyyqg7IvldJ-|CVZ7aV#_m_1rmsB0w(Jb4wo(ze_RUk zg_s{K_vX5tJK&wo5)-gnrf-z&$O2H5k&}!s$hBGOQ}cnNnxsMsu#Q`j4HA-5o7Hpz zaCnCAN$GT&G-)9T>NXH*qWa-uX}HoRTIx61k>$!h>V~85EcyS6xdhIf?b>oo5SRJc zB$Hh_ok>Q=+o->GO7RDjLOs}RsSM;*+3R3rU=A2rIdi}XY-(XhwmGxahxmzk3m*Hn zMcs0I_h3x(x#b=3TSj(30~dSpp+VnuaE8gXuY6Dj)3yn?A{es85{x$wUxcUAygGlT z{U>by_NP~R?@(Iw5I1$MY{^FQ4m7^nh&caRORMbTEn*JR6@fWex+NATxZ=Ptvsc{Wq&?R0iz=}18Gkf<t(HM+DZX)pxtcpp#1pQ8DTS{#x0h2 z>U8V?!EC1oQM^Y(wec6BeU{*hFh`(_~7S!b^&fQB=#HLHf&amGVSw|UXxT+wr zcyWX>-u5j5kZAo3AGMHXyvsV1$bXmal40t#(ibYUT(+e1fTY!`&#q~qktE`Y$B!T8 z*8z3F|7)4H`TDaRY?L&M^X#^eO(DN+N~1eL887cYW=YzsQ2p*nXgyJncNwt%9ss|$ zx4>_l_d=W;iQt3Htv!8ZHWqP!hgtk$)p$e?8iwAp=(b4XE&Kpg75S6xm^e`ZZK(LQ zf3!%WSA*VBDyEf#2ULof00RaO-it12$Nif!C9OqGujX9v;sYMIQ6BI4s|QcyFa1W| z*njF7`{Pt-D{}oTNleP4+PUI8tJm(2_cN2udXEb~wnr%3_6+tL!{Af~<*hA9`1mE+ z@A{TK;--TZyo=T(*#@*6;0B!-lmuQz2#Q6@^yj+F5Y0_Y^x^rgPM40op3h0m`|TQ= z4aN4Qf8l!Zr{2^!z^qG{jdfb!+th|mZce-^F~LW~oNOL=Q(Uyfxa_#+2oFQ)KOruW zljM-haev-+eK4yci5-lz&0wsI0sT8i>0l;ev@TGWH=Zr?C6FNBPf*2*(+2 zI$8*wI;rT~v~U*|-u!bU(hIH`>#qzQSe{#4xnxxwt155=<>y({lv(g#4^U0>+AEZz z!U67v0y?^p;SX8iAVR-W`;)%kf7*#(b8uVYV};afp|xqpERR1oUl!Q4Yx7mJc$fBb~*EVlBAycI$tiL{Avo3Cn z{Gzg-RvuiHAl%6k9TnzuW5QR7`|e3rPTW7FRcGg4cxjF*S_g;Yt5U2fyprNURisui znn*D@J)LyXnYeP`F?+0IOFZ3FGN^zo+|oNPxSrT3Yi+1+y9!~Pkdz+BFbNP8hV=yrmdEtACA59U@{@N9a{ zRGKhvm5Gu8lzLTpGE2uid4Sv!?Z?Lc3jmITv7kO6pNr?+OykmJmh)eJ-~Omm?4O~G z>SMAMG{`iO@n4xX(`=-sY&M%DO%~P!XeBW9AYan*`2Lz_N09T6s zV?P-nL}J9O-+1=+9rIzvb2F)2>w%K&h^_&s1lyLBx29@N8i|CLeUUJGigTrh>n0Ws z+dV%;3mv$b%9k{65#0G?mjI~9XQDJ)5m$s?Vlw-EQNYN>K!*u~nCH4x;DQ$Q*L)!S zJ|d7t4}bfJ@i$Le;jlGnC~D+0{sRg??7lvJ-!8(SBls#L*hdRs&N0oQ?(tBECh~nA zIGkYLv@Jd))yVDLA-7#_m#U{IXFT}cuF!yU9aV(0Mc{_G#GS1UYTSQru zuXsX`AR^&-yg4zPue?lF6^6kQ{Nx>_!_siBywCbrpDQhggzH(hV1~$EV%}H%R@J51 zffOu`@P|)M)hZ07LBdE>@qAx4aqevo;};>B99sBkvW@kz60_NpJ-1P^jSJ()5H6cV z#NmQ5VfUD|s zd4d5fRyPGSnLwiYNhYjM|m-NC0N0pIYFJU*v}7rfXPn1-viFFKf2WGcY*gWrhuH#v#4xK4xh`sNjUF! zNo&>7UF?>ZnfoCdZ%%RZo=sPGc1KV|6ln>Hy?ZwN|Mhv`jOl;;hcgz{2^N`K%2C_b{(naYvj{$%ljl8C7J2)aXy+IBkRaHWK3TyaEe*E;I; zrp#5mrMxrTwfdQ-Vt-1a&|QoBZEm2-E2doXb_wix=pBHDGa~*G4W}dMK3)^Pad$p{ zcggAx;7Xx+!QRqbn#qtM{z@ZTlJmOwaHUCvoE{uE@7a$xoTu|}&>_lr_O?~$AU+Ib ziW6h-&L{!7)O!CyDaO_(gNr6f1l>^H#n<@1LamIiHmg9DKwpOH;Kslr{|C!OonWgV z4VWPP>FvYM>vY3<&2_`q12Sc)l>aNgirOML7|1P-sGBgt7k}k;Efp9JfD!bR7ybQ| zzpR98PFC^tg8_7+Mu|4mNv``ymDUTSU`#j+i?uEv6u+dz<{Wr-0Bwx~n5F$&ixBH) z;yBxIW;Y#k#qDCW2LK&0>^_eFnb$a%}8BlAXYN7aNb^|6->jC>?LQkzj z8%1?)qecW>vd9NWZtoVv#(DbzH^aRQP~wtVq4+I5102;Tl`^LufH{PgS$Pz{2_d@R z$jIW5TZ4U>38C#AtUu|s+bkoLMnC>h*c(xsb#v3bf`D;dI zW6Y}5pfdDtRv5n$|oKPcJe>VGlr3qHUgX4HfCss_|1sp9FJf`lxnr#aybr~m6q zHYcyQ+}+N%RPw-#Zah6O*Wg7S&=YvI%ERkAPGs?JeKOJ~XeVU)F>425Gp|SEIgFjL zpGp@#K$*suUgU?SS zB=eU{P;-||jxxy2rjl9BaJCvxX2XW|pv>#TYEb5g)iND8819I-Vq*cfI`oG8f5Uj=ZvMXCzDwPDy!2J27Bn|ZR{FMy zCDHLB|3Wd0*L%Lyr>@_vk*|t>9j|oYYXp=dz(?h~REO#pX2|h}11jMhMhiJu95ywY z1V-;5<2-Q41y@N}fPNMVKARdRcj#Bnx}Ve84a;ju_g7)hxV!M>1KQjTX?#RwPIqgcDhnY>|3 zKTTeK;X@dtDG93P(xy=x(Zqu zo7@0}vM%-Feq6?vo9m&>6u%WQ0LC$oK1trK1Z`PKi&$ww{jdz+at}GZ!a!VxY+#@Y z({+o`xdO-T$L%vF9qC2YI&Rl^ecgeX$<-40TJVAt$+3~;9_V^O9NSN1`N95DuxeAq z`Dt;3$@nen;}P3HTVwlB{h@Lmvo0$n0kJ|tO^I488)jUAsSs-fTKxb>J_|jZG~YV| z7AR86QN5k(-)R-QU zqx(QcGwVWCJ_izQ9E-_H|H*1zZ*3PnJi54(g3qoeA3l?1`1=|4#0s2mR>)?uGypKV z3@VHk=w^Vs*%L&cuqwR%cnoeL_-cI08dSDjrLq=sv1`U4rG2Yr2bE>LMkh~wo%)_Znu%3p1 z!dq}C+HW>lDXJ(JT`XOoNQ_H&r8hzLWcmP|=VI55A_VVrHA5CO4}|t2SMQ^>_?euf z`W&+hc-K|#G+8jUW1im9lrMB4Y#H7MaE^6Z-#vMja}`%Vxfl&yOqf}Ort=cWwk4B8 zWfeEzRH;=J*n$ym0%7V|%x&BkMK8@%?vVvhmA6Tv=hc6$6%tMDDb;O-htf(Ep%u!q zqisLr&{L2gF|iv-6`}7kfxW2~4hLX)c61X1V&@S)=V1!v!@5ffw}(pxFBp%K zTlc|$zq5C8)#_EICTc4T?p(T`E4i#@VWC>TqqR2_>;`=7Rpoz>_T<^C9a`0K>;c>#J>^d)Ex{b99{X=)jb6oXkrj);;r&dh$>g{WUJ)Uy5YfBx zb^v+#IPW~|Z5<5gf1$+hZ$Og?SNep|gLCFr4iydn(;{TF3SBQzO<67u&6IEXNg4$R z4%5YQ!qQsPIl>Uo?D7Ll|0HOq16TCHcWXk9eLR;dnh>(u_52NR-%gA~BH{+FC+g5P zAlcUd=bqVobwqjKR5;@WZ3nA3Kf*aMzMQrD8O3v%tvU+LR<#I;=RO`|F34~n^uc?~=ppcb`qd~Q4|%xKkB6kP&|@Ybt_a+ba&@{Oi9bSB z`W;=4`GAbO;G!X+oo2p~M24~9N0QbhJd)_;%#Pcx_$;f8b=Oy36tJb^WdUSxen6AA zq#o-KL#rkN=rBUJ>q`mKf%H#KUZd=GdmS^94Y4$Z7XX9MDrC_-dBsI>qZ|0!ZqOyU z(=b#)^Wz-yLj;j0!k{+#s^Jgg4DA^J{E+Z(T!yU(aoB-dAEJA4c-LDnFeq|R(MR1_ z2BGG6#>SG1O$X^7Y_Q*8n{053I8*xuuWe@Q5XbkqvfgsW*`-||7@un>1XKY{;{hIG zwM(Ox_#>P6Tk`NCufkR%Hx{-uG=wKAe=HZDBO2oX*9Z79q$!}Rxh<1RLy`NRGAV%E z8e2M{zj$2jete*iJ`)QDG}DjtL`ssIWD>j@aB4VXs-pd{KWj1fXbkTqGK9_yQQTNz`m^hGyC#y&m#HfL>QQpe@65` zyUNOxmocPz^Fjz2!vU(5|9f%nm7Q!^bvm@9NcJB}imMP>?MX&Zy!B;gtg)y^vtJvW zNPHY~$sG@LMFSfpp;;hODIP4l41zh0A}-q#4-XzEHJw|&GjW7npm^-EIq*S-o% zgWw_?6t(KLvlY>P!0)Yva9IKcHMLFkrSHGjcsVmm=u}HT(AUl@;Pbg-DHm({%kUg& z0v}=tI{uXFdV=sOR`86mf%pA=E(p~68)1z8QJMrizn#blXV2Z?v@uf32 zqCLq=FvP70r-V(ytHPL7l>J=$C0wUvUj)g|^B-y;UBB7EhG5 z9Q3wcO9T_~Gzpyp8JBd@+r_v%Vh>G~#Yy1v6_U78!X~RNG&-w5sG}GG`Bxq`O_QwK z{x{sb1WWvnHiUd~*|I4b3Oa7AJj(c!6!?d)dlJ|gj(~L2#Wv%b!}WA`7<9NstJl32 zULOsd6}R8nwOwE4Yb4}(^j0_9 z6ltBm&>hQ>xs=V&sG1tYj*2QhA(1RU$yl{nb8W;90J(fCoiGi1n1Y4NM=@^O-2CayN~b#+(ESZ` z1>PvI`)M%hg#S%F(K{@Du)-ikXs;AA4A^jHdXm%tRQ6qhMx+F%(pM`}R=BSpbb-w!~jNF{0 zlj@eIr1*iyLbITQb^d9faH32C^y2da)iIU8BDjGqol+Vc23lAWmC$95XS2Xhk)z7OxC?Ij`|z?* zRi2a>Zzx+j12na_oTy{P#U6C3<%|{`XIuwO2)4TN*7O!`%db|1?lzY%u1NMit>)u+j`R6)9JZ zO)}IuZ@^w_jyx<#C-6TPR;Ik`4SH_FMnP`}6*YCjkjBD&p(sq9W_KVT!jl3uq%^?I zgkw-h2uTS>bL>ip9<6gQnf`!#pr&skO?vdx>x*b3$k54-sVPA1Kv5eY8cVkSW%`wu&T#uQfi}is%HGj3ccF!!Cx$ zo-rw8q9Lh+jMR@TUc!V6Pfjb0Xe+3&IpzazN;+SX!PQW&kVd<6&213)HI1da*cB>Pi2-O7oU|75{5E z-AUlD@5dVUZP)4uO{WSFKT9E^%au!mJ(k)8tXsF#(`BqT4V%UU7+3tDS>p3%+xN$( zXlj+l9B{AatiJ{(OjLLnArf$yEVdIjW^XCQZVP~0h=ZrFvpToTzvtPHpsW@JI3l5b zV5y^iOZ-a>`x?#yn;YGZ%)(L)LMy_<@;DbL<8 z?|6#~l;tdY9;=wG53iwa`C`snEVSLplfpAX!Z*B*ie}L1rOVPaNK&?L$SM5($@54`txEp5=X!4bRh<3KYe?$J!7625IFVmQUqViaMn9% zJLi=QOu27isK8x}nY*8Ce=atsUA$*o=}X~@BZ zd(};uHv zHb$CF{cN;{3oj6!`c69#iXmsLmt)d*BNWz$?eoY)&0vq$lqFi9@mkLQP-5-OUu6>6 zmOg=}Ez0NU<;5U=M(5{Q^_lum^Hp`8IVu;@8Jp$$1TY3; z#Xpn<|I|a=|juK;v0JAYhX-g=`uLJvjcgO!tE5VUbEdkEa;xm z?vMFq${dI!M+QNQ*ZY*$GAX?N;S`eDJh-^OE*3wPVjo_f==3M^oJ$YjIQ3qXzxz@S zW0su(`%z?MEtI-$)ZY+78T^e@DY%TfF6?dkx3>utFlPI1ckP+PdXkUabJQA%sL=r-je?S1Wj8N)s)BYo;}Dx>!+6tCr5fW*)Rq; z-&GJRBM)CbP-k0$EqOVD5SKS8*~)H}-8HS8IdF#<;7sMOtdw*S_P=Vl-=5?zA6{La z$AOE;=2?4$zfrH)0ZF&kF!k3^uQmjlXOyRaOQ1xHRiq7um!aAiarxPrF5G>}oG4Un zIV3SB$~O&C9xzBy=X2?)+00*N(f@&_ zhm|yVKB^cNg{-eH-hyD&qx|n@j5^apDaa(Vzx}N`M(Hstx=ZzJBO>qaOS6Hb*R+eH z#WO7@f#ag2?O$|UtvAR05p8U6Pu;$R@DOiKIkr5RY=E#|j}~hcye=vw*PTI;@XSyu zR5A9xH;Y&|?{=#AKxvo1Qt?l?z&^lZ@u7x6-aNfHdk$a#8Dr;{2ACD2o4P+v}9v~sGV&*jkl2!Eo>&!kyo*J^;B+Mv_4)d#%E`$F8)xz#WlBxd%%_9A8)qN6eC(R_aDR|Xv2Q&w&T zig4^}OpE(fWD@<#P|qQ<=+0<0+wgF`A2GEw?QV|-1)n{ZLZMf>)7GTDHoYQ@joRx} zVbm# z3gbTSvR`)uR1tv+v;qx^3AmCk6p_`G<*oJku?0O0GC}Fb55kYyes-%6fl|Rf%!NNm zY+5g-^PX%K{N}Orm&(R;1Gv#ZuLIJ&fGW5VMexI_&f#7yb>CUN0G5d|| zczd&cxR2>_6fjejCW`7DE4005uh2>+>UESd^s;=62xL3Bh^bhVccqju_<^s@@*p)J z$}@h_IRy6E@P7HI-f|Bf729#5MzDCK_2H_G>0A?+RxNr7wrZx>V|*?LtcoxbjVXQo zJ|^jIFT36OSJIdjDka5ITorf}Vjn$M4umvlrhmLMMH&w<&`yrBb?TIZ=RfK*Qfm|i)aDpthK0Xx- zV=@k8!+Gl!Mn3I6OqNt!cmlq4(jbu!xAoU3S~Z2_J|g{QLHyVfApQ&PwslLh$kA;s zhp4*fz@JNW)OoSxr5AXgq&4QB5*ruHd!-gYJxy@?W%`{Kz3lf+ciACq;|ZcD@AHL8 z23OTlSnU`*GJA`}UgzRK26KSMzuPX5Y?W;XK2uGfKjMA%}qD^4^QHj`qz^Vm*wOB%mVoTBnY}EDJGC&?z99D?eD1Enfv`_ z1EX?Qyvny>22$u@CR4jo4H@qbMjs89GT9$!tVeq;5B5T44s5e=@I9it0QrVX)By+r z-GJI>XbX^*tG&iZffJ5=3BD)Uc*@;hF37-BRUF%Gx%JyxN}kJkwMDn|zy9ic^; zIavA>Vc`RN`agToA?KmtuNLiT;b$%?vgm)bQI6OK z}k)T#4DPsen{)Le(=#LN&fdHLcKzxJDp%O%zNG^vdNi1E9o1Q zO$&2AH%W7NDXPI?AqmU&>_w`cg(5gMgsSO9(eUQF%lS=M%5TQPJx4;@m+VXM0pDcD z^UGiwhO_*d6#K>`(V9B_n|Bm0&Is(NwsBJKLUbc?q|8kf%%_eSq>0^R;B<2eP0XoN zn*A>eyW6%Bu4~RAx)1W=r{l1R-qC%w%h$)4@w?f8xF5HoPXg(<11tJZ^J@ z;5k$4P8000lFNoo3wgN_;HjMpj+pO19I+c452zHD1*a;*gmL(bhm0uciz;wTd(Et< zUiHdT!I;66&W6OU98#ZF0S6YKlRxg7|8Phw4djLp)xM3cjKnsu_QZ|+Fr}Edtk39p zHr(j8H~=C_E%rX~DyF`AGWp1q4*jo?J^(V1q0i*C_|6~}-|?}HEBvM3OOM)(roHPFDgHnEs7xjoF@o;{w zwVWq{F#Zha@V0qRnr7GK=?*4`v$#K{rVhCF+b5n(1V>XY{6{6Jg=L;;Nj?Xv5gCLg zJtN}Xv@VLtBQQkC>p#dN^jDVUp^kBb3jz@3m&V1yLi#E#C-^t9{F&sV>L8!#O? z?Iceuup=;QA~ElBRp$VgnFyL4A{V zLD-&e229JNM)$(KwL)k zucm%VWP<>Su8l-;HQ26?tp44<`p%BgJIrNsJJ{FalhOn05zMQ6-L+4rw8{3_OJ;Hi z#>`XQ<5B#nY{KDXzA2f+sNqM*ym(NF*cz*0NL>(6oX-(#UuQW}XIMu$Z$gWt%1}29 zjBk~eFCzaGT{KF0K)EbV$VJ%#^xb!4W7yq9bR0;aJlYPVMoR24{ zm5Mb@BtEvBEooG?OA%$N)mV}NjxR1oBTbNly3dp8(=<>W^@GgKc;!0}3YM5CLzC)z z@vZ0a2>!u5l7MA3Y*{m*R((c%uIrSXgKfXJtilR$pyZV5B z_BjR<WoLqNf*j#MRtu!HIv!-YOdT!>~V} z;_GgpuM=~=+8lzBG&!d}|9`l8r|>$vXbp75Nn_i#Z8d0Y+jbhONgBJcoivSYqp@wX zapUv#-+Q0uIp;39NG{eIbIvhd4IY6pw3_Q9$LV4oeAw(18BnR*1%g23w%@VU|9j|r zlhkJab#;_rQMlNLqy(Vlx3yn$sUh4qIiDU5Q6bQ9OvuFjk|~U? zwz?HB);y19*#vz&@&U48E)cB0%w+I}!_7(l(&V{V zvc|ZuTq*yR!(tpJYjZFvwamEx$9&mLlv=d`BLM&M++TeN0d@ouq`KhE{{^-JVU9mdfn=~x-s2>B|u)HIywx(S0eJ)+=-habKEA!=0#=1mMrQLIqyfBRwRH_n?} z5gt$tO!HPAGqnA74BWquPK^;f;q#J7*?V`c#h35@HSqV!;6_K2Z&83`N-Ec%PYgmA ztcJ^W@rU=Le-w~`yUo!!vH!Fao|`_<=L~~_{We>!s%W!vgIQtF$##Fr83-hs4~Z;$ z02S zh~E8kAk%?-Iq>|JsUk+!g0w;u;I_#zJG7zXfoEmA2yU+6U&HaRMyn%q1$&k6lo*&I ze@LO2u7%&*iw45i=jj9%(RnXFa(?AYUpg4e{ZW2QT_nVKXj_66U#9+8S~f`!)!-81 zjvC!)dU~{GeSO~FG3FRoCUTHFv7_c^+kHGwDe8j~&B~>!{ok->1y*BWQMAkP-&3sg z|7*8}=s-U~I>T`vX)BSgcdPBTO9s%s3hp2aR;ls*r~RZIz#^{*2-H~2M-x|!Z3u;Y zzYWEG{MhuVMo#jzm;91C31C$hnIsfB0$zB=gUM zk=;NRglMfTKuE#t2F85-D7Znp5wS^FWbR1jYzsZB+K=>VSV@%Af^bu$X;*hfbjaRQ zS6^Z+g_hvC_Iz)tgH>J7&vpE`qefuHAks`Xgrn6c`T@3l-l~c|OIkFQl_m61#Wc4n z(~}FgwkI7jU40K88Q0lgeSH;r2vsiGjJE1m!xUWWA@>#yR@1!-TBdTuD*Wt-;a<_z zXy|sDm?sE3gp*nR69CKp>yFvkRh%=NKt_%RGIDP^Ogn-zdG5wGizg9z50U>ji5V86V}c$4fJ<)n9`X6@(OMy~=)2Eej01Vt~9ooJQwmqux@u86G~frUpj0(;JgfLGShEcXhIG zfDqt6W7|zAHw3(=!7sFBQia3Vfr9t}X*s`+H`3DZctRw=12?(@gcltCG>ArS3##<8 zGr}!z$56{^^k%A0H-MJ6CkvfuHo%SaH9ac7Ea^~eS_4; zd=;hK_}>9iFD)GS;7$x&>~Dd}3(94wz|xJ%hxTXCPbssB78iY72FtuC69}EPo`{nO zd!_-axd8Sy=K4Pmftd|%Z?w9GXOjLsNWG%*y4hd-3Lbc$43lRcNAy;1yYw(lnp8+J z5{YS>Zg!4?$NwM>2#09DENRp0>JbQfXAQ-?gz48=&|+#YTY}VoGcCKCtLEjy ze@&=e&lvf~UwxosK3lMsh_cL?mqY1zk>sS)SXIWSn(lOXc*BJ8%*!Qr#FNcfw-_pu zU*Kh`BsRpZ z$?*VM=S8xWJJ`qGLjk4@>pye~Xk{S;eL|FSfa@e*RUf0==2}@2xa^py#88MT@%EIa zH|m}_gaxm7YJM)d{wqXE&|(TC!z$naM+UN|Nt1b9sq6(dWO~NDVhQ<09Xl7o`G1ck zkn>L=8#Q{}a<{56fR?rE4_#Ctb#{=MzujGYVeS+GhzFMBz|e?qlwUM(z*}ez|6cDw zN*O&n-Jhxl516&OOya})t%__^>$N5Z4RWUoZqVIrJFwxsEg~7XZIYFjzisygk?pBX z<_P+fY1Nu2OhLRE_4)Xm%e+OQjDZuH& za?7H&a z9*MC~JT_cY!BAhUw|jQfTAaYz0L$~>upz~Ac&PnKj$k{>k>Q8Z(WD~};$fE`3>@08 zm3m8aFL;%7w#jWgITXMVok^$3r6C9df~Pg`Yw@_igE9L3qmdZxcna@U3M{L4iy$L2drMa~pswboM~sa)ew$n&Cip5v5rdLr`ss%A4CuP1h#upDBxS zi20`yjHWTnD?qhmaDj*m>zI2HTfm`d1+1<*9yM5c5fZJAte(yw^U&$LK;Z7aJc1MF zcGmsHiXe?y0%hba;wUS^BpO)3CbIIfH|BOWr2p*l=QHmKxjlpiSATpr4TKT3;Mp|S zdf9VR(?isMt!f1e$~m^4n=DHTgM#+#kye~wsG(VE_=bY>%FBTH)HWZcuJn)qnvqw zIaq*FqNCbH^xN)A013w?SQcj{%psUciJWM&raTkodlL zTv;tt3<-zEbN4$8G1GdZOLUI5xtsSubl(zN`WUElt8wg|u z&H)r*84%V)u@;EPg^c*X&)lZInap*S9dgQ9PErv30FUo~k^7OIn#<_P_WaemGjE2C zP77*EZIBt5NQpJVY%a%4@&93vKj6T<_KCY~;R88|RR6@F>T6zKsTHxn_Xw#nGtbr0 z>}{7-9)+jSysslZU(W>)eoqPDg6R|_`zydQeU@VI31@P6Xu_{I-@Zlp|3%3;ks&uk z;#kKx(A8%2UzeydI3Ez22L-&;$-BZiR1E{>$};BS1VQbNTVPE)x?=I`w8{lM3so-y zl=2w^^It{H*}QJdk{L7WX*i19xa?QOfM_;zn?z-`E?->gxbgStMa^k&qkBQL4xj%p zG2n~<&x9k=*_Wa9HWxZ_^~zK!V$66SX%sidJ8sJ?$@Hq~O=9)k3VooFiN8dYB?U>6 z=IfLy$WwEEoXlCy0U2)TxIqtD5BF!>9Px{CIiev#dC-WbN}-=T)B~o^)?2w<`xY`} zQ<+5XPdmk&mQyIB^wHq0fq>4@*+x@eZg?PSEgg$FXK$MVfrRDe9cY5b;z>I-{X#g0 zfg$LWEiS?w)FiWXK3``3M?JZP<}$ zhWw%b4Q>=OEb*=&8R?&^o==@tQ9rI2?efX}wTltU^+dY}Y^2R|jaTRO%s8BZtg64C zWF69^Q>-18gaHhs!3g27i}R_fcmQ!V*XPivd6c!0!6vM@Q!Ac*hFWXSvwfSSEHa zROjbq%+n>8h#z}})>Eo#IUGMAe=d^mcm;_MSxl_5$t+!^4Onwj?*l^#uhR2TZnsj2 z9B%8|EFPzUNrx-6`FL8;_&%`bg8%+fFwkP^T^T~_X#6SZ-@wACbB9lVP7uT6IhUU@nE+aJNAGc0 zodpnsja7s4ou2EKYT17jRpdlzq+|a70#RFAZ&IWH#Elo1NMr@|{Zqt3z(+(etKTB7 zrHLZDnT&$zQHMxf;V&cbnrRw-=f7;#il+@Sh>v+eTGihG%MC(cjBvnQIdb&h3V<_@ zRT>0hd7%Csv1njRJJ+|2%Y`q^Cu<41R%@&5I_k!|g=)oit9~rZY7-~JSb_gYyGHx9 zP%MTtdEkAU9lAWqwRGkjNg&^HRq>N30NkeRnO}wf$dg=lzj^!&LFAL(0*1Qa!||sG zUgMfnpX*&po8=lxR|K|Hpb}* z7WL053e)$MD8y`)I=%oF+HdrfW`5J6-e6}z&?^8s=Z2rE7Q#A8wtt&NzgvixU%iE{ zf>*6qP}Bnb`%-;s&3||Qn;q!1Z)B+M@f2%okB*0lsh!rU$~eDn{$}o;6je5^=ust- zu6uu2Bj#|~$1y-cq6I5b$a%$taaLxU1v!P%1|Q{CPullk(V$F>qxtsPV~|Y%X4;dt zM=5sN(vAHx-SsJrba z3-A?~-DGr>jjtolbATB&Lod5fqqs9wp!BlRBNJu4xY%wKia^`NxcMF?Nc~4X+U>bp7iN zhd2s@amPElLEdePA73iNv_V+7J*H-$rqRqntE-3G2A^BrDgix6F>oxA>-0TMuh!-E zKJ7H2H0x@iIA7$5r8w`OJG0$1-747v3ZS0}7yz>s?)`s^&983LXf|KC{37^zl6vZk z5~ywhx3bgU7g5UnP5IZ`m(1}#oQJ^TeMlv zX;kRH@E*dN!PV=gZis?ol^|^T-r7Gu9XS$m1wLMFS#oU}SR#Gy+Rl_UwBlD&^E3sk zX+vUl_Src{Y$FJWD#h4jk~f>f>NL*QP@2VvDE~N8&#p6`0YQlWfnIl~llDd#lT;+b zcghqx#x864`Uy+?eqKB%>Z52&<=OGVd`y!<@E%7!fz3i5wWUv6#;J$0(qzXEU$7Gk z#t3~g+T=EfIgaLxJyGXf3OhT_9_IX;qGBIZ(1S z0-btRMBR^9+Vz$yi-*$4+HP{~&e{lk=W>Hz&jo9?t_%8KZJgO&;U2jzC)2Tr2I^>o zeqJv_?w0?5&iWBIM0ikcfKU*yWk3vYBwP3lcs014$Ts680f?1J2993~7exbGll@EF zQ1nw`7@!#f5{1Eqr}gU0ppJBdO%f?&tfq4Oe!gzCdJ$TgK8xulTi#XT1==tE#C>aj zMtfsfJ)~jP11Q@G+(GjQU#n_O)W})QeJ3wZ@I#* zHtZBfSshcQ$;Kh;4tOIpHKa$8ZTnyl5=Gx^hQIpG2S4n&E0op^s4Yz4dQ2eJfuX?y z%ZTK*=$xi6nrFGxcC85tlWxDGY%~Nx@i#zDxq^&pPrmIX0=w6Ez8V>HJ`+2H8NI)z zkB8|qcU%9y?{RMdtAWZLmk#Ov^{d)KEani8<5@#nN`#{r-iUR8TMO+ISPQ1pUrDEKK&YQ zL5CcohPiR-v;3L{(lbXdbfe`S6kfK>7lThNYTX(wy`ho!l9=5Rx{UZW&`BhqII;WK zG4;W(06V%)J}`0G@t`AB%|S&v4xzh0ofsY4w6+)y@dfs&$D{QpB`u8!GF-jz^lq6! zXQX0-r1md=fM08|hv#PvolRliY(|&b5Urg6G+I?)VU1MMvs! zFbAE#M{J1k1W@hp$6Yt&P@-b$rHzl0uql z{NqEQdRD#r5s=q`c@e^>niXaM>VWC)Y;}wEA?0h?v6$AGI8`X)L#(@wWqzotJ=(!R znsxjosgp&xkjk|nOj{fyXsvPygeTBaU2^$-C=z7EshG2l1giL+6c7W_8u%w(%64cn zsvwtX%@~U%^l7P}u-or7)+Dh4$^w9GPJIS$I}v(H0Q8+{a0Nu8$>lB)akk`8Y)w2o zD`|la(<|L((Kjuh5jo<6@O|h6TZEQ9F*i8&om(v`C&wWVJNNBl5EXQaFdk*mRe-A( z>`=j#Y}Jh$R&iA-vR;W}IzDk3b65`nk;xk?J9Sim1o68KYWUtB>U$KcWZ+XUf!9T1 zq*1|}F5}E5jGuw?huIUu{3E-iwF|1_A$EgPSt47bk5pt(&YZ<*7=V@8HkHm7!ApTO z%J8V@TmZ0E+bQ{J?a#Dm|Eme$ zc}AyJC;?n%-TXE?ycZ?@iV{-}GweN{KZx|;un4Y4SMBq9C~^XasaL1Lm^h}Pk$`u~8N%$*?D zt}StpvDE7yf;vcwzx+d88f0p2wcHmgW?)-$2PTP|)m=&HLVjl~)gB~rUTOB-6})fD z;5ERqB+CjCa^Au@RwC@#gVBE_Y-qTK-O@<+yz{yX+v_+d zdLkCpD41o&Xc~JALraG04$X2!1B3t1xMDf5R*NNE@zEYKP9}L7&GX#TV;Q z$MDuwt-*{3zGL`LcUg<6SEri*sCF;+RbV)ya267b!l<45IkTHMRMxEEkxHP@-dk_Q z`?m2&tafqaO6b%YtfnjW$B{1}f&wBOkD(R}(bcr!rfhU^VSgLVdTf%ZzVz_pBj*oZbt++W65jD59n_j5!IsAF=~ z2$ZM<=0-*UicnkL8IAAN7WBQ@w^r-;sthv+{<+W!a{h9)G4*pC(H9^ksCcg2vQ$A3 zSj)wo4v};sG#48YVeAod9@U{&t>5l3qpB!KDg5dA9%yNghhp&MW}6`?W#gkQVg&ou z(+dldl%qUdAXDCfrB3BWqFaMcmT~;@!w+jkT3ySO9OUuf*|8C-elwC>e zbB;Cf_Z-7@(>yN3G~syHBQrgog5YCPnNhSqcE#%GnLe3QlZ{T5oRRX2u`uEiT6)W$ z=1wYU94cOqa~w~#R9aQ4{;!TiyX0m!-_bFM>D@`_MYh6eZtqU!I*!ZAXx{P10<2tf z1Uwp9r@PPsQWGiUrAJktxcn_nt4*Rm^xf0>w-e7a?~9}F-0W|gB4aN*MZ060y~@Vp z8Q~gFlR{+rR-aF%1D32e2eb@c=Njkc7QT2|KSF0h1RWOl|PHSfAJ$QpoXj!}y3^OFKNrwtn z!V&U;8{?|1Yv>;b)cE*GL@A&V%ec^YD0<84_Amg>!x#t|#5)*2X%R*{JQI3#g7wM$ zbo&W1xT%x!f&Sm0g!&dgs*}i#_i8V1@llJ6zyxBcRKbF@ky-KU$S{~wC0%=wyKsQ`+#im!D~T;^q)_1wuO?BB#xu=T(obi@ z7q&9A_NURWUU_nVcu`ne;5a`*rbkzjn%;((7Za zuW2;KhmHqC0(V6DJl0#{5EjHG_+IhAY63nw)3zZYn|$uTJ_D(0Y_5j{xz-^v_)xXZ z4HQN(-ot-arXT1V=6>TL7Fs{`V-uDSzv_guS-N^0nhYf0src0S5haJtZ5(tH?@+;y zsZA(sNNKpKV2ko8_GU^V4|N$^V)iohyGw4J??SlG-CF*-H6cO$*$fd0yTV0G5$vef z3|9LrB(bW*t~QMUX20l27piZ_Q}*dcm5Sq#*8IYjVbC(jxh z#s5(`^<}wnQA+oWffxt`ngZTE6;@NKxDC%RBO5;(Tq#f0O%)ds_2=9+j+NZn>w1O> zo!K@ITkcn_l(7J)*D2=XRP@*47lQZUFq6wuTipMh?}+_ssqdsr(PnEt-kPJ}FthUG zgz$c6yKp+Z7G%S=uhtl|_wsTOaKa}+z0ZdA#47Uu%Kdub%#5HO-k5@r2RmT7&-w|~ z-?MkdW~)hw%)BvIW+~+us$R^7<$6nGO!`BEv4?8fVe2=s%orIrv63>g`P(FFD*$q~ zx_GTucvI={^n-jEw$&r_8g)*u0$% zb_r#ZHdnHKbmheG@8`2kS$NQhIb0RDthrs>=o97A`mj+!?$dZ(#9!}BG)y7Q?7?jSJV}*hQ!0innkXIru>2NJSCjES-VT+_ism zV!FggVK~ykvHR#wwLTqgdXsmnou4mp6lEIUbF4gIeQ0KIya^A!tipdhZbyA-^n|@a z_&Rr)LTj~F!DFxywDrU6U0h(w+;P<+8Os&#%p6D(`1g68{~rLKa`kH)GWvWY^}DrQ|mMwb6=*TQ9&8~UZBANs{uBv97H!5b`vRR|6>;c{6+?l zb8t(pm4mpEd#n_*c=G2VMJq6@WxlUij{gW2`e^)>iT6T$a0PR{?eV)+7Gwh+ud=+_ z@Aun*vW)50q#XH=3oVy5nL>3~?M-#z-9}jzSw>{asGLt4s|V^B``%0qHU-Q|SP4CT z!)AjgKm(0Cy;?__T-a>XQ(!U}^T#AxXLNGGYlyDG(Lum!yA~Dfn%DCr#J?P#;PIxd zKGWuW-U|PU;{ZC(x_VHFREubR{$9!IVpEff+<|M#9OaR&=Z|c}XY_`)hSOILLP8ja zeCr*+_xqw6lUm@VF+Ru%AA4&X*akWT%P@&quH6%+)2^fF_3fEPqHpj#E9MF z6D&|TQqVWuKGV3g$C^@MxrIYq!z%VldbHjaDq6woRE~DKVF+H~Wt@dM1)LWU3rW4S z)a#(-F4GDtg;pX-06&btqXTI;c2ACIf1g1>>)1#HP7C3lrPi1G@RT81h(|T3-01%y z?;EGo?Zo{S(mFh^h)yOD)&a9M9*5Px2tLF|IB?lKu=X1J)ec!e(K7%2MI1lW$k3`G zM#$|L1smfNrWPe0vyj{8%FCtn9c7|02ngy(zZ)vh7sez#@C zVqQ@Dr6WHJ{i^EO0zt#qFni=AUOHaaIyY4b)WFFWcCK!+6vL4|(DE9>bVE9dM7l-a z(_AwSD03N(<;`M52ytAJm!{jHQV&UT>|&#fDkt``**aVOXLhKyDV3+RtsA@Xv;pF4 z;vN`>VXWykIRpv&ku-2=@&~WNJP?+Y_g|Etwfkv}V+f{)F)!rW73yJumj7{6`Er(W;tJTuXG)pkNoT~s&Kf{6Ij*en zWk{=CX`^k*wT-9PZlw<5{(L1J(C_!+V)rbALA(#kq4DJ0kc6Oue4YSpg6fgE#~5#N zj#o+DVvhN5jhUVx@)m5srIh4VRAtWLg^C2S3q!z_hIS2RB)YtUsnyf;;}-XWoRxZ> z2E-dk^L?Z7PJBf$YWJt4&ujyJvBw;0f6K?`e>u`J;eL`4{nimWt2#0CB*AvEl=(1b zevrBF$>)E}c5l){Nx|#~w;5f_d~kM~r{8yI zrocta->DhvG!pm+(lv5nsz{Z=tBcg}pVCIVg=p5F_x9c97V>3+OsYH%fc{)TSSQ== z?q8QuGJx+d4xo`q$5=#my}ur%VAM}}$mWu%qv7#%;Y~rDTq$~2&rz%^2O^wiWY2|t zd;1FT(f)1*uyp`W^kUJ?cIR+L*>RxdX>`V@M2cY>Gmft02xv6sk9kuIBMrk$*h?Jk zX$~EVW&s-q=%?Qun9*|qt964gMlqR!#~ng`7Pf)a=YTiPBpIDzw9&sr^Vq98U`$MX zy^y_^F`97iBq)7@&_Cb#vE2KGG#01(*ir9dn+IIqm?7ox+ ztWM+UNS3s|S(C&qyW3k}h|PB%KfAdS-c-E>&6~%qk3)ryu~bhrY98-99{2O*B!I8d z#fpp(lmd82(^E>*7C3PBhWiGA4M4C1gY3yeR`V^&3Sy7DL&6{UrSAuFT4en) zn6YHm-xmSE%@h29DP<)ifD<qDCG2Pm{eDA zyMXM5%(XEw!(KPYjzO}t>^T8v(1$k24#dw6D;bIoiaIo|=O~%tt17nOcF=+5#@_tN z=@9AsJy^dXx%`4IDGm)3+Nzm!>QZ@o~P%*rB8F&YmXf5RXN|_B@C=KQY>_=yxCn$wkT$C{d9j{ywx1xO%PS# zPH_1$d;WY2r`g)x=B-QG|t{6Y%TN8}&| z)bmS`vRBfalyJ9^mbfqaeSm!g#3G1V&uQquF@AV?tFB{|_){Btm_tDTk&S8OBhDW*1*?lgOhV@)!ED@++y`>!EkS*?!Q2bUQkHd=>2%9FPq7Q$jwiZ^YY@8jcX9z zCR1l7(~3@BcmpoE1`u`LOKZbZF}-g{fRt9>K@YIvPSKy}i&#JmZtrGe1P$0Q*Ot_iVRIEU~bHvojaJii=E5 zZI{oZgFKyXEd1gUMf@d$asm-cPSXQeBwOvcwdC^ z0KsG47=99;-`m`PSx-0ESgKR%lYpZ!&yc{VKjWJqaukkc(2BKh+G=oIlmOyaFSR_= zvE&-p&TUs2VGexyn2Jp$kSE!PANa-#>kbuUtC2@clgH=TJC15_v0#T&N?_|GDzY>x zmMI{;FEdW|{$VQL+JIoYuFNX7xm*`Kp38g!2a(sPPTh{E^*!5;WDGATl$pH_(gDM1Rg0g zk`-lAU&POwOIw5!Z@*Bs(Un)a7w}|;R z{>}*XtN?IWe{EH7f{}B5js?`;vBqs*@&nf-|?VELKWe4436hQi)Rm>3=9|2U#h85VcwWev# znjAOsMun!o~P|{T%M-zH|#Houb*foB&Oq( zr3S6=3d*)~t(;5T8*jV7L9_YbkH@l}@;>baz!AH=Luzx%_+spCprBP5`4hit&4~_5 zD=Pl*4Wn><%HNzUkI)^=!heJl9|qAo{2MzXd5nyec$4E+Hj`crgo6vOsl$6EfZ(AGtK6Z}hHs0CS_ zr2t--i$xywI|;n)C`kArN)dxO-M9?HDzZw`b$X82lO6p9-VrGjUCQ%jBN!40X7!V4LAi;&6i#9IF?F z3iidn(VD)S+i0?OWPpT8MG+Us3rXVWi_8O_Y^W8(Ai~wGldOodKj(}9)J_BPEe0_Y zY-7fL>0;-X8{5~Ut^TYTYwLMM&PJX>c;A`_hqn0cs6K=s*|5kM9XGcIW%Vk{NS}W) zC&aDaAcmqzeZbOWbd_7}L% zCezSWX|oG+%GJnX$UPYeoO=77EnpDhks>kCkTrrb}-B>(DJ)<^*O)m>54HlcN5 zSK+6TO^=)i5zl@wm)ThICFS)-$`QPM;SHd+?(}5Gy?1De*F^5Z@Q$spQde69CU31dXslhpn7Gu`ji3%ZdE^S?s@M@eJj zmeN$Swmgxq-ucL_opnm_xJhoVINJh8FP$A$$uG`Vjll?8OGW{=FUfBixZukb)te;D zigWKzUXb=^yhetM56!>2jHf?Lwh*a@on~z<0wS+`&Y8tq4JGtLuMlPw$n$9jMz>rU z+XZFj(gKmiT2Fb)USKBuRlXLD2HfCej-$n!eReahUdHcb^2AKwWXA^}cIIa0$)u<^ zlgF@eUy3BY)X?ASDMyZ3WB&{XXIICeB;A0gXK9qpHOX~1lu4PC{;?qTXt^cDk1FyP zd&IDC$>m%R1?bn2f}txLG~<`1zKDeQ?-JVHCzQ3{MFt`As_o)5pluVdd#z^b2YKGZ zxxISNB@lWz8(IS@DKp$0z!n#&2qS%x=K(VU^4r}E9G~+sT{G`$@zQyCLpZb5;;Yr$_Z+5EqnP?ga&^c?1`9W_J2R5cfnT#Ji!-mlb_uoG)pK<)$ z16CtI5FgRbQRMr0rf0C!r$pbNev<+7-#grrpz7^7%!g#B@Ix=+aU847$|UoP?29OY z>Uo}bCml5dxE#;=aaCK6=n@OU+-)6*-Dxz+BigL2&%Ta7D~|ZhkFvbswAcYPsmX|R zNPl|d8TCKte1ci>3@GY5iS2+1Uu4LPe>TuIss8GY|B+6g&RBt6Wfc^s$;%yzaHjRn zwNnHf?HPUSC;aZgGeY~Kak zcI)+02smz7+cSNdR_6T=ROz)%cr3d(vLfK!Za_?)B;r9xdi0;wNcF0%d0E%B_2} z+7Y<|>yM`Nj$A)W?~R)GRMK{2lEDt!;Npx3{a9i8aq;{q3G(wTu@{v1V587T1^+Y8 z`Wcu{A;<1Y4&#xPS{&yq*5pu*ZO#z5k>rt9w+`|*2oU{RW*FW_d7;OPtwo?j6)O>w zKouuZ9lzw67hRT|ovx1#e}hEWxmVK6aqb!K0pcBmAc%fZ<`4eQeAw+TF4Id}xyU}4 z#OL{ksRdVnAM{^UL%hXil;=bBWSkUB@yi2@wj|CnSA0-ZFG9{HzO5j?%@5*tSa);} z^ZTv-f=;%YXgP~46TgwJ6wi7`3&;jkaN>fEI;u4qXrU;-jWlg{D`H#p@kY{~Z(EZw zmQRhZQUEXfyC4TO#;+c>7FHnUD zg&w0Bze3ZVLID!DU-WA0ThBrP+KU1bSxSzGWyfIb%Geaell8Irq=9N4wNe*m96#9~S>BaI$Niv_~YAB7Kr z=1j$&7o{`b5VC_-wR;84t9RPK90G=6MDAmBi&S-ffOmo{F8v!-SHp5NE zYepuKk|L{O%os2M5uSIAImhQ*JZh^yCFsIwIfJ{u{J0pHASG`PD%#&yY;q37P>C>l z-kry)D7f;|*Ut^zGOd5^);v^QPhV5G1|XIR`@XI@UsQLm>MUrdh;M)`!NNm1E=u=8 z6PXGA-k)M*U>vh!agX*_2KpEx&godlcGN9 zD<|k*;dB4BN|ngqYsOripPHE{u_u9=f+2z@RTx(*yA^Y8jizl{1mB(WJLog+UzE(( z(Qc$Gh#vT?O%wkHKMST(i0XX*bwpdB9Z0b z|H}f1D_H?E<()j%;S!JM&?c+cprbz+&kU-VzuTB=MI}_60-TJBf<;_H>mV&Z$Kxdh zzYgxloJBmxF)^JtXgb50JIGP0UgU(&K!rRbs^DqOp=?JhBGtvX2s|G31ML1m4&tkO z-qEjgJRs=+;G5X-i7q$*(j7eho0}y|Na>^II}vmmZ@?;;0il(IK}7dNG0uS?z-Pu} zK7woQl7VGGexo_x*Uy3 zwNwN1UC=9rx_kfLEh=kt#)I1-cOJzQCql6zc<(BFXIONuS)J#9=!KX2a`K@gB)f zT)udU#V_L~ig1jw=Y#Al0WhZNTM(!N{>ne5e^8o9wu!v<5nT@?=(gyaC=(GJ^%4-T z;za(-J~%!AO@EJw$Om*W-{!qwD+Fy986xB2N`c5sWdOMzG43_CIjzD;!}^&A9EAMB zEr!Srlfnl^pKA^FQH4Sht^~HfkTn+}#JPoWK2^o~v!nUT=b|xU0dy2=b@OvU+QG;H zbuvbgJguEHkB5kBpV1y%i~^t%Rcq}+XcZ`;QYQ+sO-$Gc`qea1YL1(q3}w7?%fCjc z%6C3T=Fu*p5|dHVTa@Mwt)6dlT_{B-Qhgc6bz(gfu$|J#{8l*vUoMs3M3%Fo8KBZ) z^+0Oz#j3f9^>ap_TygOxzH&0F?QDEm@_4R)y#LdzhZE=-HXEl*uUNt15?8(KDQzYB zOfi&QD!uuMGOo4?EB51FWll*HkMBf#SmGRsn93Oe!NW313i?N8XLdGThYfq{wi=F4 zAo`)Xt7%JQLOgiD*;^?sE`-Ujz-tf6Rji$cSn|Q?ygrLsg2D&<5Yme-?ALIkGg{@{ zZ+GWWGs)ROHJjgd^fhLJ2%V9%S-5y;ZuS!vNt1}VfvH{UD3s(MW^tC9l~*0MO0aa2 z$*{>P0P_*F4+j1W{LOUWB~EZDP+fkFiJ28J&ZT$Rb-(+UzCxK80UcpK0RGK)zk`>p z#XM5NAGb$kM<^xkG{IWccYUAhpbMlcKQMKYp>mBBV!O%};=v)E;6w|`1gtsWRAFV@ ze5{44kzeVzGbLba^Cs$?V)978*0x}G-}pnurZ!dA%ZRke#`*p}9CWfAC_uPA8-s!2 za3Rr74{6MITR2Z=4sOq0hfm~Mz-YL_gWHg)IrVRi^^!B!8JmU)rZ-$GW*#tT;Y{G3PpZshR| zziWr4mciCSNSzhG*^l8S@~yzGQ0wm&sx|mE{5cFxymDX|Q$xELSRa7s&cJ@#sRt%R zquSmeKO`1dIi<2@82V#uFvsJOag9l8HOx@HNYw9XrP819uCwiDFsMUmAfl{awKK!T z$wBHRA+}Exa>U_2Z_r8(%=4PxZ0NAcW4%8;gU;_ z_b@WNE{})wjy$}OTu1t5pS_o}>Sck$%jhU~y!1;ntSx1iaE+IBj%zu6@_5*H z*Wo&6#sI-vo6irX_TUyw`ht(4ex40|qhKdy8lkR4IU|%N-PDgC z*2-02&ZpeHzlEHdDNWsVxK=gLRH>=UP`3IK$|DMCbC$U`{% zis~wXVdlmc&C{zS$@j)Q7W`&DPU;9o)ym+BfO!(kSjBv50#;K%-FM4P0Mc4+0!QZj zbYlS-&fxSld;;<-Bhq>Y+~vP(AJtm_S`9UB%L#CxRJc4pD;g|}1SyVQ{Bg4ftI6>m z>HaG+rO)07xS9bapaqw}sx=^%_j)z6R*+tJWAVnvCP^qZa7MK3B@>AfbG-aKFxvh1 z^AbcpS=W;;Wtl-#@UF%S;jDiv6$gZj?%&jD7~K8&E5!lt;w|puxs%@mn}i?+@NW*! zzB-b*13Ly9FxaTtE)=DTmTazOiL?-?)B*k9E&rx~S)MD0hHz=#4{mPXzMq|Jg^Sn^ z@@ZQyg?(*vnQ1a)sjH@|Y{CaCaq(N%S2Tjz|ELLZ;XPg}uFhTdB%iVz|2ELX$rZRV zqDFoLJ_22K!StQgBb5rr*}Lue-Rku-tIc#t&6*(oxBD)Fx`aKXzH^dOx`6OrZT)%$ z56fkfZkkeON`XV$cG4%l;}~;?`ob<8ZpUwx>sZHRKoFq5n_P=vYy=!KoEE6Y5OHPS z$c<0SuhjD0G>D z5GDobJG$8KNg;M0YCthI#j_-1+YS7_NJ%H^sj8|}00v+Po+0p=_opq%nODsKHk!&- zwX*c6Si%m8Qp(H_GS@+!m9n$=u~&@}#@&s#F$9cgNod0ZUz~af{=Mb!IQb1h-1<)b zKWnAy;596e-9|&Cs|`04h9mkZjB_)FzoU51LoQz)J1i(ViB-!3y60qbB&Uq-55D_6 zso_kKKp9B6=upk#zKxmwjJp~pD2u4=9lh(-$Nria=l`picL2@HX%=4Q*oJxEnHCD3 zIY)0Ir2M>tm17J86m=6?$U1(DHa>~|jBL!o4o6a?vV-dZyeNpY11E&jJGj9{Wlind zuTsJybStXU-Hti4FKCXyRC%X!eDjSTM2{3YO-}#LBeoS_mNc8Nq*dQ};84Y?8;WA%MQt}EF{Y21=1W=r_~g%@*2~qDYga7c z{eQ?=J!0$k6jtE0m`Z|Hoxg(V{JDkGd2F`d%75$ydtSc%?mcglk*o8_KwDw|yp&?i z;pbK71qmugaY7TtnzdeTg6&-IYWcp~JpvE2d5~@`?kO5w+s@JZJ?R9T9(U+rMCxIU z!w#xbXI#Vi%wPy_nvLfTIw`H{TBTv+kVN@cssmu`?6gyaIr4<{j@O_c8HU$xThvF* zM_C1YLZ0L(pPhj)t-h5cr*(9O7lLDNFVtz|K`5$-PndpobmYxx5G#uF-oMQAYR%t(pAkwGIt^}ADqoiDlx+TVP%!n~ZY;}^6;4Gnf6{g(eI?i` zGBR>k#!Sle*UiY3KC2#YjIW^ezJ?CxRtd0vTMM<~HH+M}Z#U-QZNd(b@$I5!NQs|a zSJnyo&^}%<1FR32K2rnK@(FN{?@D|-BW2U+R?5lQtx0n4a2L>B`83sxCn&_zE0onz zg397Q55bUuD-?fGkDa}YIi*xHY-1}|TeevE(reN&7{NDIxxklV4bt%7pjr)VrO)M= zdiUUdd*2fxLLA*^tv|!b3HuaAHwHI!H8pNF)Te!^J|M!ahHwukG#!vJgJ)ng(ht2! zfXT>LFES=r&(-S|65p&wGHiPc1C9$`iy8G#>{u`1NAg^HXU-7U&%XD}N`ZXf4lmJx zL{7{-`+t~v%b>ctCR%snZXrN$cMZWd5ZpbuLvVt-dw>AJ-QC^Y-QC@TyW3swck14I z{!+DnP&Mb8-J?f8148wU(~oaMCG~FxFl+u%34N~dU(uc-`hOMepsoUmgdj0zt#Lnm z8<6^C-3c;!e+TMjz1Ry+?)25|JK#Y3tyWbyYw@tH^&CIJ5yE?^bjo}O=ihw+ z|I8+wl+xK|3O$LfX$0JZ4F)1h(};AfM5BzL_Dpy)o74BX-|bap2(&)JueFcRm~&MS z2~H_k6kI|HtA=7N=uTLCO8O(E3l_LErWrDxolOpy4yyfQ*WVXuh&g`-wIJv+9MgKk zL|N{QsaYpVFwbq+ZPu*DD%Tl&;&wTv@|s?3@Chnk>li}|gLfyvLv4KV(Nz&ieeZX{ zWs9qNfV=b{J7Arpf1oD<@xuQ3s}Oqj;qGI^c@^|#vH0wB#YIlcFlF0Mgm~vt_;hG4 z=Kv}ANIeUq{Z9MCd?)Bc0=BKp@aYGxso7JdKm zIgH($8MEj5a}{%N`L;eR;F>VGfUP=BO1g~p4GhP!G(p#NV(vc8`HZYDeI_~_=KEHB zhIlWBNl}%4j2jYi!l^~}QB*wXKt*wWRjSvN>NqYJuDODP%pd^gtethiw>E;4n)4SS z8Kou^me*3q(r3*Vg_lRpS@&EajYps-VD*7n>F|BN870}7{lJ{Us957WCb|qt$W`~{ zyxv_pwqyI763e~JvgN&}=C|B!mrvvINz;9vrEuPGX3i8nb(VKOXsyf2<%6?};;2 z{)E);Z_7od;LEzhS*6xF$-!$&S)R#P6C6NS|~#hqfgVe{@cJPss1L1JQN3zN5T! zz;-TtU?Qu0@J2lZtT64ytY?fMV*vy$b$Rk^CO+V*NHiQ%B2Lj8cnua5^2h!eqyj-) zJT)~>D3WQZWiHGu-QwY%KTA?UrtBG*4e+geU~Gs z)|e%O-lE>95U)Lz!Sp_4WRY8LkKQ<0(SI;I-N!}1D9~5vQfgfms@_G2c7Sutja1ugn`aeWclIY2K=B^$GOZjW=ZhgGec<=vhc&~R`|XaK}a{DlM4jg$>akO2BSYEB{^)-LIF4>7|1 ze&$4(tcLxU3!LbM*);5Zm}j*D`inbX4H2B-uCv0>>iI?#r)ZcsU+KRe?HLE;;$|j2 zB6Gpq!?x9Wahqt>HfYNDgu?ahfN5EpT>$?ceFRAgNg0!Yp^ z)aO^0Gck!xY8`;VU?%~NLEoU-Gc46FYGR?e67iBYKD z-K`0_xJjJQK%I90&T-mDOprx6w@Kjpw^4C%XUmyyNuJVAwx=Eo5}+@CG4#SV-sYA9qgqOPifDsrIiwb8l|seSEzkWEf( z1sjOMNO&N+zajZzecye@*@P1f!*8k$k?3dusvH_!|F-zCF+28n})Mx}x&-YRt z(N&#`5zm7&3@rGq{N$~pw|F-w0rKPs8a;x&5RccEnlrkYz5hx7ACQ~5Lm-BZFbFcF zGhy7Kn1?jrp=e_kTHetTs3-&xMBn#*Fy;rWR)+lIdi76uM^~$pO+pcX1r?g4p+J}g zI6@_mRJ~g zA;?dg*6<#SoAa;PY$wWT#YH&VsC_L9wUTMqB4{7OUQGNhE?!AsBm5OR7$&T>cEFIt$|tpQOV^Ja{*jBd zZl|Ep;`|g5;K0BbqtiA)pgnJCV5+PztIqaEuiBPnz3dLzt} zcZrx%sqXG&hx+h%`51`{`pEby(GA^KTq^Nsw`p8wxK3#&4I1P@r7K+baKIRS-ks(9 zFe543s6hF`eL%i}kFnKOdRhxMf7{tz8{$2HI$(X)B%oTdqWG$k2fmL~d)gI=dr(Xz zo2$!Rj|)J~5*|1F=+@DNsB$khZx{5qz0zjvW^qyLg)iz!u-Zha^?;XLCP8ReM#6 zJ3OnT&nRIQFFSlb^lMv$suEgP@T?0+AkvTLtP#7~n4A1#>K?n9SZ4r*2W;X>J7?Zx zl>_pR!$b4UnYS~7bPI~cbo!E0xuqa+?Q<&gPsCVT^%zuI!NkTqr`oK zn(jaIwo9|x2)cPjLa`G9Wi}u9cR9IuqiJYzQq_>2KdJ0+h(N zKv^&;kX!?I&-v7@7{DB8CXLv#01A5RbOQS<5*U9U92&T8++QSQn;suhc7Mzi4$HBe z1z*E(H3DjF$}q=iE_(1*xh?wj5MD}@Wj`-Q0iB4CTa2wJ*o+5t1;dD^sz#)kKAScp z+RC$xVW+@%iX+oU>@e65tN21Z}ayA2Z_`VdzA^KBM@ zbfkY<-2uuKX*ML$9lW&gl5R>)=N8w8WegG!fehfKii)rrLH`d}r#)$wYLF$3>*)<{B#KVn1Pqj4z@Nm_YC zg6F*&tR<8~O=o1J7$|dQ`~((8)ZwR*YerhhM`7+?H;GB(fVw_nr8K6FlSIDB*5hR2 zlg2tRatP)3d}PU&OHZjWsfVg{vED4iAZ$DlbXHac%nlKO+37V;qs|c-7(q=0?u>3>}F3KId^`7&3KI0?P8-dsKGmRO}XAX z4*TM)KLQ2EY$YjxS1UuPxM=hb^&|VUS5Mcv<4%Fe^DGiljxlc-w zHytvGUuo|$6+*S!rR}|+xHHC=r`H=SBDqoMCz8s%Hq0@InA-ABvblQy&5BpnHrLLw z$nnZdY!r9wO8@er$$)!)0QN58D5bGSVJ)+GI0W_sk$|b*k3~)t;-_oz%0IzuQ}y%> zcVAVG^WvBcS^smDVVps8jSx?3lJPScO<^9O8X>qft5-`#XTm&dRv?XFr}91J0<18o z*%JV_lH|=sM+tZ?Fi_rr7jyn$XxHVV2(k(^@!X(Yh9LI^BBw%Z6c|#w2A~i4;#L!d zTdw@cks)_4ibXaOq7jv-C(}#DJ1RQ7JbdgnTQxlh%_*Y7Z1A)7ubQMoG8rzm#q@JNX!_$UyN&F|d<@-{h` zQkhDPvyGqF{Ld$6g9gjxBO`;n-eI+BG%UIatX71*C(`bwqsTNP>s4@CuSqOc=;Q*q z!72aYk?l;azg31ln2%m_tu)NQqShZJMqXpf?n__Exm9Qz>EhdLm{My=;%<5RBH}Qe z0#QE)(rho%rXV^-C{B;Rv^}#voTune2v*GAN5{C2wQNV2N_S!tpuCJOAXfq9=y*1{ z8Lr9sq2Vlz^v4eLZ~oi$S6U8V7Tnks7&987n$ICi#pa6T=R3`hUKhdC{gfY~dLgg0 zO4JAYJQOT;0bn@W??9>%YVn~wSagzOc!WTDS!FePtMz9vJLtZj(_Zn&)bi~lYPolO zgE7&3!8LWxApTk7-&i%iquHQJMyYWdNtk)e8eS=Eor#qPg8Q%*^=9 zJ@lEj4V)@cyo_)-_kqDozVngU9=M4==>73H!6Yba_tHO(QO&-YtFl7y#bbfFFmmNU z`-Ja_rp1*`ll_9BJk8>TWk`jDQpIh*;;6a)G4WfScQ#6r)$nsJ=mh6?vK~RcmzGs9 zu?oIXHoMPKm-`qYLBtR#acQqb8B@pD5iK!MLGFvCfX9bTs8UQG!hVjd&Wq5Za(9ae zK6d|f&XMz!4vuVPTY7M4oMfpF!KDYN%I)R=z#98Ll)1p@w*S(SzHJ6FJ7i5K(z1c2 zj-y$ z((3`ZtLOKgZ?)a?RCAWzdNafL3VPQsDFDC7>kK^k1o;->UNEHW3RDN*)^8>9{A~aJ zSZc7s-v=_e_BKfYI&#kAqw6mKQ;{#3KGE&bg4+SIs^J^Qv5@_wn6(1MiF3X|NnV8p zTCSR)C-jwpa33);szRBxUMjgYgC**DQX1 z7DB(`aTlJs)cwtjswlb}LP8Yxt4yw(MC|o%*bjif`*`!`fCfhrnuX5NHq;~F)S9?? z$BR{dK>Y;hT#DAvoNbxGv)3Me)XP<;6kcFg^3H^G-r-kyIVHdwxiY~nVNBHNy_dBF=7 zDg|fepV!9Cb8p{yd3>H*?$0Ryjm&KJsD3qrN72w8-EFoyGk-42lx?Y_sNK|#8sF5T zJ`-g9wO&dQyT;f58oYF0Dt6+B+%G+3HIV<$AC)AbFW96M8>Zm&E8X2YL{yG1PF!1WoL1s@?YjX1R3J8INbzSj*r)eNhY_Qbz^w1rhrNW&8V7?_Tn zONR(tPsP8Fb$dyVukPZ6*YG#pOxA>23!V#X)qgr6;M7l(HBN7$o$Vyk4$U?=RjiA&c|tn1zMe_IM3qwg6-VXHe6qcm+Y(77~HqcY4LkHchgKbOmg% z?Ki}GCeJ>i|E?t=H#}k`bXjB-c1mPIw^LyB8`oPZ{X{A+Sb*36r39BpjVuz_04M?o zXFl#MEN+p21Ko6T1(olX63Cy5{SQL#Lr~|@=E0@R_#+1zpy2vf=(G&7kyK2NZ0@k7 zEZbA(FYPu2ECvzQPPc_LpokvdhWtAgUCov{7oz?52(b!*#(F>(mnKJ0wYn_S0V$t9 zH}ZQxrrRH;-ouC4`~BbN(p|;^$GHfCpw%s4dcsR*4On@MU?+gA~4d+FNQ0Fg#^cIO%Bc@4@QVy z(*@^|gggUidEbmjjOe^bK5buuNa^UBsCM5}cgJN9Oe<`<8P_upfVCpkqjx@C5(FNSDcAHZ+@eI>jw+WMl@$JE*eMdU z(^8HzYwLE0hj$?b<5wQ>9~7dZ)VbIfdnA|Xf&A&zQ=hk&3zCb*Y9~BIk}G*t*csm% z4{C;lMCmpPpDVGp9{7=Y1g}i3Jj--f8RC-8z&@EqyZd(=LsFunE70-SGT+5pIhX!* z+}Y_$CG=3O`EswfIcI*NmhHMH=kQAp_|RP(acyySe|DKSX3EP`>Dq#f$ZM?SCVHNv z67ZwgTGrtk6E^f9+6Ig8U#5Q4x)5$DV(Q$M+|sqH9f{_Jz)N6?ihRaG3MC`)Cs8Sp zPzi;B_7GMs^IZ~v&Vz@iQE|UeagRUvm`t|4IX8)2wTR!pjG=eg%HJimKCOn9fmZMmMktalzxedu*|$om{-f$Ge+nFr0b zKMOA-ziBqfXl7biT&kne>8*RCzOHfxoA2`)mhsMkBXz=HuA|a~*tKtc8F&-uS}I-cQ)pP}xM#2LvK zHY7bZ+>L$_g5H2}m2*2?E6*5R=RqyD8LCrmu=mHZg%=G0(_FT8z`|s38`&{ zyLRr?^9)|<7aL@AHNhW<=!N}K=xrn|2wQqj9@7x{7;Z6gnP6dK>yeXPM?_-@S!`cB z{C7T?3p93+!*mGAwrTfSPa6Q@m5yd#cL=g-AVyXQOcqxfOV*i8eToIBIjysj^@%1wO-dtR zPsFoCT(ljLakk4`b`!fZ`3$kaAjpERjl7+RQwEWF6v>phG*unOWOmI{l-2xDizZBM zK^Rxd{w{hIewO+Yi`MiAg6?H()Mq9 zgf9CRP}jtZ6&4nAYCtG9eb=;_5k8!Lr35Ej`Cd;TG`cPoEK95DB#}J3{#U;2?yF(? z(eVPwaK)#MVA!FYZ+hSL~Unw;I&KR^7y zP1_<3iX-KFih^og=JfBF^aor1U4p8Kx7fG|?KU&dV6D|QAa-#ilfSbNuS%oF=*sTX zkripdrylHQgWFA$2kX4}&60`HihgxCOj>Hk@K=j>@yECE zJIRCEvapwZ59VK$#5Fo`j9LU<vnlpIPKi++B`7>RnD;sjMQi;Lj?_ zG;FpVL7|q{hT=%my&;h|fYQ&0&BcU6Y(YT%Q}JWTB3ki$Y>u*NWc_s{5#_VESq~y6 zQg7XnBAs{A0JOl(a%-X4C1WHR4e{4^=h@E`pCKmhu113+4Xfi{sc48(+LSt1L~uU0 zLj}XF#4Y^ZVh|~hh~m=8)1vk}Rz8moFvUU%tiFZ^ajacVTOb`hT!hC<#o0X=_8)EE z?^}kg);~eFPtCYCODn-sz+Qrdz|H=+^pFJF_5ZNdd&8;YL0`7hjwXUbkJ_T&CH8{o~h0oWu=4`OFjVG6H zscILL@}nCK2PTw;+?r=0Kve4S@KRhrq${WjNb39}3&WAGNefR>c|coe2VCPiPhZhN z-$K*XxMSje;vP?`HPeh6twu{-gk(gb?_3>KNcR55F1iER%pG1n;m+IxS8}oXe36Vi zuUlMo2y#ReYMJ%?OLRdu>XQy_spU8qI496^vOKxbLB_^xBlcQib%PbD?y=J8O__F} zh4DWUE#{w~;21#arC=8lX^J68SGB|mTH)kQ7NZwKS zGBCeAKl%0>9BIJpoGBrSNVvFeYT&JTS1{?0;4@8zJ0&ak`F@&sH4hRat>a0Ykjm1E zmEz6XbW_q8(11FKv;4#a@zbJ3U=TRj{xOfRQG?n<0^*=qf;?KD&lXZ^N#oFeG`fG! zYVK#=Z4A35ysF|p{v>1~`=@%{ki)1x7q3rF@7fGAW$gA8N*9<`9>ifu6lfYplR3VZ zKzQkG)sk(g+IbIyH|rhJ`&R(}h3_e~pbgRn2spE0P4@wVEo-%=q9SH9&b5+%v9p$$ zP(|t-3daYUY;3l$tv@K!c4Ch{k!6tjHp0uXp?SkVhp0E(^0${09CIu6Q?MUX&^Pmy^p}i^5 zsr|HgmA9sZ;YEEdKSh<~-Gp%KZ+(}%auaA*#vQBA2L&IuglcukakC4|$@^`g$z(lr3-+lk_<5)~u{ zLY159mKkW;U1Htr{iVhvxTuTAY_lMHu9MoWe zgUgW)t^dRHV$dW-t{!l)AYnP-^6Wsy%gzp#o*Y!);iH1!{X4>o7rrP!Pg!zjDrRZ< zz;wVMemeD^nO^=D40!l$wU<6p~Ai(h@`#- zjM5-yh_{RFnI8>1rsnhN2<7!~#jW!>gHr)?zR~zYX}@|N((%%-SQ%4aJ0A}9}Jx%lG|0`{ff3o82wBD(FK9A&FQm3)mU zGx_rQPl{o2EgLheo4p89O2>thC|UJ4Ra$I6?!{n?+6rVmAM^2s2t*QsEMP%lXic`= zq8b@%!3z&v4GxZTtxsf*H*B=waFV|`7-pwUo1QP=SGPeWL zFw$IflE$ym`e%wV6oArj2;T)MGa&Q@k}A;8-&4(=J7e#%dOd;v{fU4|XsSj3MygM1 z6nG5Qw6d)*013O^4zc0Zk@bu1A9Sn?BbfUF@p#+&M}e-1c-^)at%~fdVQF76I8t&T6|Kd{ap~A|jTsl7~PdN8bhJ1}H%!_@bL<_gKKyn!N z1z$m+r_sKHOsZOH+VnU6rDx=6{S?8={Ds0C)Enl?KemG2@=2=>F3?FsA+KEG7l@@| zJ*$GsP2GGCv0qQ5kV3-l_6Zs9RrY^y%Grk!zd|duSRMkHquw6~{BxV7D(t%DKO=wB zh}#?b2kB=*7NeKNvJm3#O&;arP-qPm+A$_GKM3o?og9_zOa~AGv*6`x8#YSfM8W6d+V#w%q~=qwP@UdPY)}4TEIsG9~T=j9VM~ z&xvlpt&BW9Jz)6eNO6YQ;SuFwAh){6Jh7jHY0ljW34vS@agL=XIKBUW$GNw|SdGHa z0774IcV_?n_b?Te_ZZe7VYen^*?9MroK`7^7{w&-MsZn49If=PGm7YL*`c|z(A_Y0 zm9jJQ`BE~!Fg(&PAf6wx8Cia$0r<(|v6>T!99~kt_^V3fi8(x*zOegDs85M-TM%*H z**-0lufM?fvTwY>S4Yh|V=7p*8rTs?YlPhIybQE(5f3k+G@`})pE`tN?MDpTHPAsh z$Fg1#&rGm%R~C0%LHUWrFe}~LlYqjpK4&O`ELSYW|0UqvduA#M!foj%OCfkuZ8g6o z*}wi($aZH!`8)hvJ$a{3?6986w+euz3H9iWxWj^gU>5kY<1AN*Oq8U|8c*FEHif}k z_J0t0984q5h}uECXu%5oVVeRz6U(`|4tsKyN1jG_r%<*ghkc$Agpb#DG+I2%!LLBg z^DyQj)lln9muM5;9F;gFAl7L-oSg7p7y$vhQa>s8?&sndHN!GqP9PY-DQei69c zt3J7-R1xlq!A#3NZGFRoENyz`q=ZtID@kK9GNX}0bv2YUPA1Ei!h<(?N(O_1j;_BWn$eSLR)?pD1GaG^P3fN++qu7ngy^`=?=@Zq-^Cs=PpnAJv&+ zciKfBXs>Mjr8-&iutHq#rMp#R_km=ACYYfQ1?(L2Yti~in#0ilCSnJ2+#Jngq> zGr{;a=~X(#?&W9yJMRF#9hXj};SUvWW5cNX-*@--?IU+yU$gXoc>8v@WpW25lo|=a z77%UjLm#8nCuwi?U^qs;OB8Lz0V8B8b9tYe+ z_g~xbm;m>j;J`7TN!_^@-fm&M-x3*a2wgdhFD{uJ|J?$E{hDR_vFwddcKcMMFqV2G zXt9wH#%r>sX}W6v;vrL*>aSmWphE)K<+(OtwG-A~MCCdHASSitXtY`p%xIo^W$`UE z+!Qn(yILfJBnB!NsN>%D6sgZm34BqEjVRjdLSTIRd7>G=ORdMHKX({|?3T!>XT3 zNc~)OP9-s60G4y-_z(rds&!RC;@kYd%aG}u`&eJ$n}|3&?rJ28oBom%+-uLpBpyZ3 zgx1Jh`kEDYN!uewRSo4hB~zy;1zzZv&V+;{P_Ng^?^U}*Ivh(T-*OtcQ1tYuxv$4c z6vTXA_DE^rXVYmcI7o-7Qz_D!ExYNDMnp4^Geh{Zx?exAoecZ<9F#>^1*I1~X)dER z`uExk>?jbN8KQWi)zmSBXD{g+%O&D zA3J=Z@QPGcahcy>N41=QXBq?I)$jO5z1w2AlwTf zDEDrQoUHnA4d)s9d2vRI`Mns&qs+uIn)k-_LjU@&K8*g|PLaLB&-Bxyz6Acu2N+2G z@`&oFbwmoS(79k$7Odzk37^~tp>NIeQe7$3X*N(yYv*sD;Pjha)@-f_1b7aJ_V?B0 zD9CYJ&n+HgleLfgmcpT`Iv(DC@kq&l7>b;-`eEfHe80&Fj(};DU%@A|o}z(|`6FguY!*0<^fJ-E^BgfJU2ovl<}Yn1Wa3nwie$lu^-ouIb=S z??-2L6*!B{18NxGf^^hshwfUouIQ?nqkCDqEW4@KYILqaGtU#k?&`DI&@mh@@k&>3 z=1(jJzb#&raddJHwfY57;aw_R&&rrBNBd^ZHJ#o) zvaK|(E8P1*%`k?Vp#EIDcz{WFF<^R>YJ|Z%}cy>JOJZv57IaQ4>;>9!C(obPV9D*v# zINBX^8~a=*oH&D{d|f%7!gnYZIKVOO?>zeL!Kg-u$Ai2?g8X|?? zDL7In1M@NTI&Hu)t0l*d3Fs?xaViCHLoj^%jcX0_%0@i5P(<7Qk`T*8agK%*$Ys5s zs^;e{gH$sobZWKiayBF;H@=G^@ytQY%KolVaZsrI(LuH3+{N6+x3O9iH?ey^d7gjB zVMB4$RHK($JebrIxjA6Wi=$OiOy_1V3f)m` zJxH-2XysAQ0IIMp zuFEfg&FL5!Cha`epg&ZhH>i;wM7nLrLuz{2-Ur#0ldsyrh0w*9mnO-kDo2Z+=YT|@ zRWDHn^qYcz_L74a|Dgqy^ee*jsH*WUG_uj?LsqbukCkxgm&YN9m3Mr!f2Ow5=$AeD zyw~?B7Sr?fCU2S+ZKoVls!#Bnq`P;*{?_*qUTcXk{Xq+pw5yLq-lZusjZSZJAVDKU zD&Z^d8yQ@O0}F(>{u#9wdVZ(vo-tcVM1I-te0a8`GR$F#0qdqkf-*U-ztZt5Q&EOW zY8p5LUj~?Yh@%hF?gQqJ;+SJx*Cv5SNfM3fy)6X6H}KVfk%Q-q>s*!=X^S$w_~7&) z)Y)yuv?W4J*3zhTi~22o$xl){V;1{Hj`ssT$)Sr5bCSEKx^}g`m&{B5hWcdSt{3w2 zf*ri|Dp~uq1J?Qm-HsBo5gO0y4qMp~-3zw;eLAhn3xlhjJEvBm%?Z}XTt+8?en0DB z;hA^8-yo)E1}njeOVInWkC+3@P0qz1W;}%qAzUL?`xu`lPFLn{MIkaC4e4x*p?Q^G zG9ek^nlgoXCT7E6Y$@(KY~SyHO={E~NA7aNu_aGtbf8bgZ79}pbPfuUzc%sC@^@3i z&q$8ylr}V`dxFFnn|@)m(4Xl}nf%7fdV3bD6s@!B{hA(s68{A`QKL>78?UJ7woH(T z;u6f;sEC=oTb<77MD117K?}v`s}9udXl=d$tWD`lK+v1_da<9+uz8rH%un+~I6FDR z6m>h@xu#UGXB}V2(I4QApAcAhzddg6Bl3#nIe+e`J2HL`D2~ z@xqNxtAP#3#WQff8Amgl$(t;)-G;s#JyY-Dq5f*tmDBE;D0mVnOH5g`^zq$zrC^K& zBhMH2_znc%u`S-K_NGw1gXvh-MFCocQ8Spvbi-(UD zDD2X8hDa!J0<9EK6}Vvqzg%85@)%F2WuJ;_QVHp-iS7oP2*QI?Xc(NEGE!?BX) z2_dkqq?M5KAy>UyAP};rhdNZop)jI^uSraLB$S0a0bI!}lNZ|#pxlEj+1y#L5~7np zOL`p}BDFqC6+F@@PFfz3f;|i);OYNVzj9oC6ojo2CR-xD!#o|%tZ_?9E##_U#(%@c z21s4UkH#5G0K1R=4PwI$^yid!bd-C)gK)_hMn78DTX-u8$09#pvOlG{xU>cvfs@Ni z3-}*v5@lzu-W2j~?^;^0b?6=;I;S-IUa-87Ei!5gnLni|Y6Zl;3VTDF3zBx5d5Uj) z(5VaHUw-#z+4Oj_oc^2H|osnEC)E++eTa zr_sYxkTYMfsHN>9towV}jp)!?;H=B+p8Ud*!MU>LbR_MWSt}?d07r9z6-O+ppsD4d zaa||r*XHgkN~j|sG94P=?tm;AJUBy*sZ~MP&?57 zQ2L!8-iUASFT}UcC(OzE^q>WkaZSZWYr-Niln=Bq+v}~2i8OF<$D1>2`@;qe%A-)O z)80^{)4KD+>!#0#iohupkUM5WH@*lh4i|>Qtm1nr&1SVCVtkwkT3d<#87?gYDyz5J z!dmS-p~#m@^~a-UVk!6+txt1Z1*bE?Ol^9-%MoL$yAXnKDBE~YV7_U&pdwU|P!cr` zH`PZGmQHEgS`{d+BhzvUxv`I&XApC{ng5X-%i%`2&^9$exVB+?N;bKj>fgMK&J3Ym zjM3}1=N3`BYS05Im46tdoC}HQYBKf3+?a>*3wlYQzfSu?lkHx9{a@P%LrVIuq$h8W zGALrcoJ}WxpcP1Bs_`e-$>g)Rt64y%u3IU;VbxZ>5ehoBiFrFJ6*hb4;~TgJun9__ zwJ1=OsT1+f82@WD!ksIrDy~0af?7TOANfZAUlu^OH_Ad*@PBGJ=$|^25rh`E5H_1z zOFn9#mKEC5VCQTAd#0ZDSvDsr59~ngtVzLw0FCNoYtb&)kFON3RqI z$WiMoBJan}b0NZ9=|Q7m^)t<%2@<-G9Gbe1R`DMEaKPE@!20aRn-RX-i($6a?hH}yLlErVldG}@uJhT$> zfN9TFhy!=m&YJ%I--^jp_BXR5)Kfmu??lNzP2znERrrSZ#9%#BQB*)Ita zM=Hz_mc9Mb`5cWsM$==Cp>w@RM!2%Gr;T)oVQX{YIaKBS&A{`c{6!*6+ z-!#|4%y@*3X)#AX)-1co%kO?3y9<)Zvq|?>I=7J(?jYyhAlVU5JF0ZBL(oVfbM82f zQohtt3AM}Vk|EzoX$mHju7$zMqo=P)n6N(X?N$>%HeQl%0(WMG>wC5{hRT~pN| z)XUfaIad?%qXc<|ha^^P7s8kkl4?0fpU34s#@kiH^%5yokB=8(r>$r(LKypMq0SD3 z-TsNo_3j!$CF@#ct3UIbjXPadnxbSaCf|ign1vh=Q#;R7rlK`8es^8FM`gt}s}A#v zZ4^~GLCuhx7NQEduH$Z@G>%%HDf3*r)}$ShkN*L}B$_0R7q~Si&hE#>sEBOoB0$=n z;AcklswC&L72!x;`%b=wcSfBSMe~+-0pZ~L2QbWYz`^+HR1_)mBdC@ygaGw&LXrWm zYJp0V79q<#m?T|k=P@@~*0YX2pUVF=DPk|V(OX;i#bF??yhxY%*M>S{VWeayCJ0|b zT11XYzsA*e(_FK)uc`onO)?>cqeU?B0}`Uj+3k@>IN@jGWMd5&% z2?GA?AWhB#wfqH6pZuh^FM|h=cv;Zk#)o*IK4x?Pp_Nw4R(1pu6pKQ{U;y+%-u4ck6{ogTIHM2-IFWdvW z7li2oZ(#HK5xej@)xyFwJ?pQq8Uv%Q8;z;V=gZzgH}f^v?dxQy|CrQsIWudr@UXl5 zLr-F~3coVW_8EMX!0H1T$O#+XFXZlb^WAae;?B?8!L)3~BaY8kgOxN62Qm;Z4j=DO zjt}c-v#l2qd8sxPd$IR>!%B`HZ;U{ckLE9r$PDfKlLuBHMbJOPtD#;FUvix4^UwVOD?5>AY?c}K{SQo&$jEKgAF}BK6%zPj)pok) zHqX2OM$f60WExv98q5PUl6@v02f4)q{j|-nNQ1dEm3Cl~AcR%q>C=Vlc(vo>9@A@= zZEZI$3Ft0lqK7Y^#=QO;f9pb^?WeTkX*ol($2kY5E|h*vj{~VQ0N<(mxR_wQXY-hy zH6?kcyvmMe35(KeFL-J+0j(ivP=Px1{^e1j-jNQ2Ba`MWj`3uM><>&{(Z&t;+J%9B zw<+Yg3Y~!3q}9sK3=v>;<|=mF1Tr{153)-N^#{1RdX_Z`RCZfJF#L^qR74F@Lg)EK zcfE9MyC13GPaGaI_w+vh(48RheEg4t4rtM@xb3#@F-OHK+#wv_iV2hD2ecrT)?nhr z1`<&mKeh6{qL7VNt-{&hGPrIzJsv#6$moO2_xD8Z#p|fjYrb!fNNb33y;R4@hcZ82$pEp z=d4gomHzBTp95z-L_>J~t73t8Llfe18ol2022|;8V6rGQ{whoROx>aui7btr=1vFPnV2XcE_tuvknz=wr3T#y6XAaK|utP)GjI5EbRMA)2=fQLz zWoxZ|zWjBfbS>(Fd^{iy^vvq@dI*U=j}0ae37iQK`?OC5?{NZP+d3tsh@-q_#4V13 z?eINA;YBfVaD84ap@0c&Ia9BD`k%fpxHCVwtu&m(nE!HL!Dw|ByLadYj1>3BO8BpP zVi@AECFZ*0CoD|;>gS|^GaE42hB!ip_0i@uLiH~EI_}i>%VQq0$`*2_#@NHEAc@b* zz4ikLZGZI?5jZ!=5LPNANkxZ0(T3)!PyQT}vA?9G)zr*h4njnBHLLZ{Bc|NUt% z*JPnLIMR%-wOFY$pIkg5P^D2LX?&u=YSrxFd|kOT;o?`I>`alEY!ZuK1kv{rs^f7Q z=>&!d;~Qax1}VAmQFHcoNV5cXHBG$NG3~dU?qFKg!wt393h8(pktohuaXww#IRCRL zzi)GAetJ%CavAqccCLQvCx;H!4K=!)N4fC>rS9$SPrvCDnlrBiVP@_m_{m}rK}g-K zin5>&l{Ja2M2kX+wE!;}TfM6VplPC3Xa<@lcR*jj1FaUwn&R=ldpUd>dH@VVNGU;E zCzbrFsw6=JbQ2K1DC}rOaNaLxE_fUag0Wb!GuVt7q(euIo8Brs`zx(3`~X*5NuzkK zutFy@Ux4I$-Vd`~_2_0R7+h0_^zW zxb)a##wN9oLL(lTL%tuFSQa#eX}X0P{sEHu+0pxl4N zQo%-2Vt>hW%_02$@#+BGJVI)-v6f;%IJiAGzQ1IJi3AF3fN;f|cJt#Pg$*1X-93I` z*Z4L2vwO#{a5M=UTTZi7PMPgh?m!CCoWnjvqCxHgg-OYOlpxqjen0CI>!@thTfl8p znuO6ozZquTs?d$| zc>R4OTYOf~CZ{#j-XE10m4p7q&60k&qg{+q84U-Q(zILKQfL@x`tP7?IL*C;eM92S zZ#Fa}vFSTtMo0N4YYeY%_L;D(m>``ucl6A6az9Pa|MA#dYW@S+)E3A z+A?xHpCtDSR2B=%0GcVxJeTPWMw?10UOVz*(?7cwc)82)AetvBbxd)(+7WG^T5fv^ zh-0_t=^+|5^{t8O1x)+Z^S&^mkiP2^>8p0Lo((VeXE)#jC-=x8{5c^Mm*-PgL17#I;)$UY!jp;@9A48wx?bz zZ*$$(q8ipnS;>=yawLz_XRKq^p5h5ci-=h`C=_ZE?az_)d>P!{aN}>?u7|wSxzJdQ zsUZcPEdlQ-Z;^8T)qlS-^-lXtOt-#$6zm2w9sR5MMesd)K#8I|{2Jn%dP5j&zxOAv zBFyr7ySqziN$FfzoqrTS#3}Vd;%) zdcm20T;u5|XdVJO%C~dr)NzMg9N)Ft6kUiGvoE(KJ0cSPe1l!)?3ZT!>k}>0-A#D? zrPe_33CMoK4V$>y^$493SlvRr*;=u2cRt*^x>VG$FZ&5P1;7>ISX$doEzxQ>L<=Bl@W1EkXhu za_^VX2NOv1DF#BwVe?e+Nsn}$$Ee~;n?m}cW+mmskA}jioikPBuJbRa1cm(^Ytff)hQ7E|zj`f%)b;Uzgie%Fp0LpO zoUdsKrcR|yLs4wN+3(EoM^y_NUPK_P=%E2I_lXWB-;cw1q-k-Ip1Ql9lg? zxblLAY0o)h2t^tmFMY>r9sK$`7OU;ApGuq7Q_RW@BKTB_U13398@~`dfr0dh;ku&5 zN_x_};{<#@-kD1+z6+AI)&j{|4tFkowDvDDo?e(X8ktqMAOB8huTV4fp zQH->$DKVOutMbAy5|6>gydjIn_j`xC3lhnyUHWE^wEEZ=*`?jh-q=QnyiR@{K?N$; zoBpSf*bi_f!?-I5fkIJz^X1xe>Q-C|_q0C8L74rD0kC(%O7q=1Rmu>sl%iQ+jCC(@ zm2RtTF!%c+;7&gf(I)-i9R<9=9vKb5$whEKzaygk#!Y+#;)CrZHNP7PRzd(hz>vuo{K$`mFtI za>UDp-H|v^c{)7NYNtS6oDdZ?#;xP~u{kdOxbTN5rM#A0F7vguxzzAVBqN^f5O3%G zEsqklam{yUZEOYi*N(aB>ntnQaHDvi`#2Aq?+QsjSb0PeX>P6%BN0Mv&%@Fv7T45r z>NW|-Fyiv3h7`w8p}5m8y0IFX|Oy4_c9D3qd|iv4c3}_0Z6I#lb(R+T|lepRNnLS z^X8^fX7C;*Xx``H+GHObZt2N$97G;m6>A2dI6}+_KOZ}?iGQO$@!IE-?#tTXplKmF~ zjj9Zf=iJtJZIPQk-gZ@Hw$Zh=DOnRz4qa!nm*0uK$QkqKX~~~Jb@pIF^_WZ>Hc>Zp ztJV4vLB}%uHtbF58NyTSl3(1^$7bn)?{n0d z^@8igi=27KN{1FsN^xIfzs(^)zQn>b5?le(-LT5;-NZ)aJl!v zecoL!HIhBN;XLj8gDm~AM!?cC#ZJZmGw(y(n8s)5UsuTGV=vSt>7Pz5J9q2m_0rAk z#P}xX{zX7tH*;YGE=@UczFt1@(C>QXt|mUel!Xl@n;VmaZ43211;T90)>$06G}{U3NZ3??^m;+Og)pGz z=W)Gb0cXW!dX@hWFoArpGQL(UC%~4jplkI!S|mK;0>lu)WUxNFiDH=h*ZY0DGyK;j ztFmbv)Y3s4f6i$M+X$gd>~uO!RnNMUs2F)HGS6UlgRgPgS5G64o%fcxJ1i#?IZ+IE zBPq2M5(gZZLw-9c4AXhyl!8{+bjA%jZly{)Ebck-NS^}*)Mj!T)oV;;m>oLy- z3Jh%Tr4w-O>aMBMIlnCQc*1ZsfnglRgSp8 z^bMP1cAqg`8MdJu*+JJ&4cNh1J|&Ux3nL@czG;q>-#w6)ab zqw9N?w%Vy>Ez2JVy^l^e0Z|4%2Nl|`UwyM`GMGL$Aev9;v}C7Gyz(_k>Bo@xt|iV= z4)2(+91jS26??_8wn!+V@OR6orwirDM~#qT=3gy1LUNM_R&v+|Ij+TbC_2SH{v`QNAbI&0xJodq8tjPt8d{Z$%IzLOZ>&PRQRx=Nav$sxR)C2b^ zKUYpY+T^ks)E0C99!}MEeYzj@bhy|~ey~^|vRDCP1!8gzkQ#&k!kL%02~0%~gB-Y& z4m-ko6U4E*Rg>qlAa_N3x#G#CH$;Wa42vAW6A{DmN}-)7O7y52fc z>;X1vPojcS16*nUs*ZMtu?OT`K^AhC(N8m;_JsSu z)nA2e?XD=c82}6;{90A2o*upMzn4O+b`vNlO2TnWS>0G#BVZSO2PZ50R`#>O(@;2r z4B~}BFF>iCekONoAbfYyoYk0EhRW~x&Z@k9DE=3pbtx$XJ-V)u8qHJ=zOLby_5 z)9km=((Cr9m+)VS1fR8^f;4y8=X6AX?(t;&9E0xS0o!`?(d-ezc3o+cqpzWVZ_ecf za00YQgdF_%rsx?}JCDS8d~egE18i!{T+RvV@U3cGtupcB~mJh}`vYA4V{VNN+pk zwkD6I*gVU5v1~+G{EfH4Y!RBLXtn+vn;I$@&o)$wIPWO1}0Z(pGO`ijIi=_AO!QW5tP_q3BfQqSU`#2_p6!AT{X zW(0MVnv7)-H~T`%H5Ixwuej;l!w;bb!i3}!)yn)B&I&%6O6p@l?@dREQg1R9_2{*C zmg*E#uZO{Hv^41R6=Wq&Z+x|v$D0tXg&T#Azm!zL+01vofb+^Z#3J#_GaS1=fAS8O zNxh8M`f4rd_~yHZq2I$1lI2Q^(H9inY#=9BIL(*K(4u~?tJ#7@8Il1+u0?-LF%C=x zrnx&kUA>zp255NFp2Ei{datO9pjQ2`{qjaP)P(6qJTdu?>sA@h+b~)z!`bb7d^KEA z$EK&*$t7{$kL}F-)3PHBEPp)qO-*69qdHv|*>GCVXLpFhI&u1)PjT>gTz>pJqj5-W zI3HI-DMaO^AmSxYQJ2F)2#p}itDftKzp1YhJdwd9im~sH>{SQ1^_ohi#%%mp{c>ty zfXb->XP=iWq=I+);F4D1OK?e5klRI5a^rzRBE^x*^pUJt%ZKGOPA~jTNN%n`i?rhV zL4PYbCs|uoluevD{8Imh&KaXxSYk#cOBc-6PSv;$;^z2GpY4!5!ZkV8PkUB;DKaqxyi=5gFwQpxM5zxU2}(MDZs zut{TaTgcdg(a^z*FTG&zslBK=YqYvLhlV=^Q!==c{!FJ#TG|5AC;hiz&sSi7)yZ^~ z{RsQJh$4?su}~(( zSt*}@?a$i+1l}~u*|w0KG2QGsLsGS6ukl-Y|Dl?U>$9=uMXFc!p>u^Nj-XX@m8@gGiz>67v7%e27eRg4AX5K zmjyHo`h;@G+Cl-$i|&Oyh|~DNO@LdZEsE62Gv+XZbvwKf3Z5BB6PuXRitDeo(@M?% z8V0%XA!_1+YCH&(5Q;;gB%)xm;^gx{-}4^1tb^t1vnG~m9qEGv!~}Rl<}rY!zw{=! zua>MnO-!yunV0(a{T2sC*U;+w_ERMKY5N3I2Hln|xUyX1U;JE|#6Tt3r0@6P_A`)* z=TXQW10$CP!c;l;4HZxxSp0X1qc18h8?SHYg3fvt*J&X{$az#&L z`RrCdvU?mHL51wc>&?&5WV4=0{#yB&x}_6`eA$OE{P|rEo3JWkiXP)-<7HGp`gqwV zMltNrcnwaM2R6E2-7`RUg)ZuV`H3sYQgKKGv) zCvAf8>y=Gc5-rKiL%Z_R>aEH_WtrLevN5t_FXeJFO}f89 zRV*3(XR7%+xEC*TjOm(FL~&o(e9`n zCt%+)u3-E;6vnfg(?4qKK-16o&L~(m$|0_kn)|~sbz0uX3W>pkJ?8BuczxQQk&}ig z`OUeh$sjzcMx|HgOOP6{`JbyFEN9CG_VpWgciUEQktVM#en+lc9%pwt6X2`My~~>v zWUNa!2`m}qO;T4u=p*iQ`2Mbo(H~~EZA|@B9&B0Ex5f&EM;qnTEw}_RZX|}Rm~Gq} z)(V)k2{Jx6R8@<9Ti;&=8^rD-AC?@qewDZzN?%Yr4dccu{6Vg_t$iJq@l)*cdzgyr z@CfD9wc(rAx};bAM8iABe_-2!wC7#|7^o;C&sTT>wI7@7PsbH$z!#Idc=Lty(4JM# zSRac`$aV1sqj8eb^L*f7Z~jd$!EW9VOacigqT?qfDht}}Jq}h#kp6{en^iYzDz)@J z)pU}=7p`;6>}^06yC;0u6xQr)5iqGtADLLX-)f(`ma=z|c5>1d!g8;vVXry4IMf$} zGUGHk6arugRi`%$AjwgEy31xe6|!jpAsbR11J#}r&;HLyF#fhCXY^*HTc$GF7hg8H zxX17+r9imgurxM{iV>nE<=)VKanzA7shoaV9uBtR=!DZV+0YA}%OOgE(X_K3*&D(m6F<}_(3h__( z$zzxI(J2(JqP}(Oalx`{jdrmEMu>m6IfW=__6`HAYZgZY@^T+%r$`i{GCf0!-Z&O{ z*SN3won~falXb5vaIYA9x1Jln2z~xH!nr@(@!~-RGdp6r5Ty{gfB7pceAD-u=U0-p z8X2D-C7A%^Z0q_^sR}FSpdCiaz#@Zj!lTs;5tV#5TuIyvPWzXT$U28@N!@XvL?P^i zLO5z~Pea0w7@r72)jN6)JC4Z7n&GS$cL~1jHFlVQI>~4u6_T!hs`Xoz%W5<9CuhpA z5nSK#kN+|jb~Ihd0iClWA{^%G0DBOSlC?z+57Lb}6=_j2mfv4wb64-UTjx+e4A1ok zzt!nvC&JCWNmRpvg{uPT!SsBDPhH>S3xc^irY$4hTF%c((_c$g!dbfzL3RmpQx@1_;V7a#2M4@b2v3!=Tz-ZTo$!0$$)GVuAd^q{#sKho4Hjwxe zkhM4(YqO(mKj;p@A}_AqW#>BYZAkC201CdvhG%>jG@|Feqv_4bVrTt1Zo^V|=kL?G z=-1wedrAY)?o0z?GK0ja6%DwG>{aeT{`?wG6Wn3Atevm$%PM@QudE#GYR!@YA{36`&mMd zl|>_^)rrKr5$fcih|kV{CJYL`FRG<{A^t02rU6ID0`vdIL=<{1x4u!n%aKZaoT>JE zJi|NsS!$^ol!xr``aPeX-;>SJ$ne(>g3v*I7bk(nrN3L1kyl4h?W(%P4jtSMAE(_e zDVeP`Akj3?v;!!JZBLrVlZg>5frv&>pu%~m^sT>-Jj`c59mK#Uc_%z6*@5R~<3Q7yqxS%IS_1ciPTQ1%=13gS>4+BEi}ooIL)=Fh=<0#G=KDs+7;neH_LeRGT<$;Pcr z6MGd)h_PbV;^c5@aT5r+a8dVlI&%(55vq6XD^RW?Mc_)ljNIez<$_E{ijU0)FcRuT zDUuF^gpWZ?F&Tn3keJZ2>7J>#qQ4nuQIkNd70vzT=3)iy!5xTmTJJTR{eYNw!6*m$ zwbT*u0eNrRbDvtR^JP@yR_?A|>kK7w*V+vt=Wk%VVFIXSOVXGVjP}BgKeJ7$5JPceGtQnPB}vmcAtlx~}=YR_{L&=w~6A zV>2DN*~Ug;#reJI;*4uj*pSRnbkh7}@V@xf!CE0|@L^cq4(HmV5bqCo%zIroc4d&Y z`jI_q+s-;ODa#gnhVOb8coBtl5rr3MG}}D7BllV8%(lT`r6=y{8%&no$?zYe;tb0G z;0Cju?&!pxLh%b~XZ>A|j8Kz-A~34OUEmWB1OA_U?2xnF9fJP)M|sUk@r|Z$_2!d5 z0dwN9n50%}VY!qdRP8zS50xZ?{@MI@cKEme7Tvm=Fqe$a2hS|HYfaYFbFV3j5mNaS z1K(MvomxA*lT0Kl_PcF!Ir#rBTP;u$($8<$l=TzC3DkKU23xlhjoFM`F8g&KdEGcN zBGp#Zu|c$yDnHhv06E-INwzv;z?K?gu=LJIN+p+pUm0aWK&D+Kyrm%a8*cdWLLR$|^lbnaLu z=uARiP2hdr`R(f9X`LaXX8`Njex9H4!HDMGX_W%oKZIkeMD#(niogVvk07td78KtW z>?$3eFej$-mP<+fv0Be6##C|5L=)(V;|GYwe)g>hNJm{hn{e zLLmf~YL(x-bb}a)YlWu(WQeHG9BQjNR*kP6OYTjY#OH>d-GTtduxp6^+#)?kEk`3@F&JLFW zD~ZKN#@(PY&FF)lJwxKXNhv3=&0oVV=F9Gi?!vsk|NK6j+&yaNX3lB+upp2_ZhN$e#65=%LvQ5(l}M(`D&{YLt#ZlWs- zDw}4aR%PbB@`0|Z43#DeIhJx_bb*^8Jk3$%TT}|0pu)fdh9-NAS_0J2$O2`^8>u&meP1=58iLhN~Kbq zb*HS!dK~|QgYPMy6%yzlRA)nc%n#aDy)Xb>(W2)NU#RJBgeBd3XRlLrY>5!;GKL#@ z=zs(qy+ZK^t>c~aX(wa!49p0e;hmGbrD<5Wd$}Rg^dJs~^j#rI{Si#J@7@edp*61&mkaFv_Yo|a{WXXA|Sa|5iWYkxr>Aq}8yf3Q2qrPo=%%muPKdb4IkHA>Z# z*u{=f0>cU8A;gblLQEO$c>1IFv-T;np@L|T>{f3~< zwK|9Xt6OCm6c7>MVHF5+jx#M1A>Ufgsc>E`Zbz^&P>l(>gM`Nh(yG}(2X8%k07cj6x zxb!07sysC)jyr(|I3yGuTWVKn5oAKQYI!H;euX7rdqqJWFl0nFRjv-p{_mEu&X+ek zJl6x7-a=el9ZmMnf(5!QiYY&dR-MZEEp8laWyu04%-{6rOrtjqVALtR18KPDkV4(6 z88SUyJ|$cFZXdl{t`QwJWu}B+`G>gf@5S$qvVh7u@<$X3L@FvORUnE|PRfYpUo^WF z1hT{fCOB78m&Q}DvIaPQaG*^+UXEg=o)%6HG&QhGKcAICW%csttn8=EU10V7vRdM8 zmE?FnI(XK&94QjcyGGlhq!(uJ`*d5Y^&cotED2JYU%<}mL%lxHWV@s5x$knM<`5^! z-v0tgcl}1Sq;`cTYiE}h=QSly${_cnhm86aPApe=qX<#SG{FRu87u14FE|wx$d;l< zjf;&&K*`3F@|YRv3dx<&2b?tQZ@tc72nFx$GQ=&l4_=*_C60n**MFK_l=MzF=)EN+ zMDdqEy2)fFS>~2K6g_Kbo#nB?{w6{DDoxaainLt@(lsZTd*H2gF(=4_S3!!($g;jf zDfjuk*Vg`${0N9H^M&UI=fgx`-HBd^KE)W{&}>Kw;4+zvkRP4pEHaE*o_6`-vUq!A zP_XSYz2)6sdt2ma$+;h*S?7K1kbxWF7LMLCt$-AT_Q%-pjj&H>-V9>pzN{4BmMGD~ z5#$QGWj~>ju+b|zbVBW_U);GSbxDu6K6mWkdMK@>h!PtOMW~}YZw9$^J#Kv64Y87g zYj91n0Q1=FmWXqcCdUk|C2)A zmAfakf?W$W2WeoyeLtWz{ZC+r&W6>_?^X|6Rioaiq~&o~uG?=U_?qffb7Yn0;AiYh z?Mu2&<_^F#JQ*Ol?I)K?C|A{mzGZ5K6%yE$64d`)V|;K2G4UQ`HOx zRAsRJy9iYEgAN8Qq(6y$9;S*(kT{1z{f64NHJtriqqa#PiI8Qal_o%@IE8`){8oVt zo3TxI7+$O3nyJZ6&wG4ADbe)Ywoddz^A zqJOQOjBQbX_bCS4FLC$Ny{S6ez7N>3-e&tN6fOZHF+P+8gZ^B{=uTuJ2e!LO*aCHk zv**jiyfXw;m2ZPZwIb~lJOYwna8M5E%2wM^-1mP!v*lo{P_+yTUK{(x7C6$c06 zbsNGEto1$~hY2S=MC`*hMF*w0+VSV)%~5wq`Zv9D1r-PwTu{QyLlR7_u9hy+oZ+lt zY1nnjQwgb*rm#QvzDaL*rNp`|Zj$7A>u-?xT$CECL5)tcmNYjlGo3O+O0j3}3m^Z} zhA27(+v(t{c{22KieIIMbfwF3cEmuvZp&@f^{;X=hL?p&qF)!T4+jZ@4|T|C#RsSR zzK|o3hIvr?<>3j;-O(dT%@@Ed2aR1Q653AYd8|;b-4nP!xeJ4&nV!xL(or-64ntO$ zcyXQtRqTE&Pgr*uZh5ESD55_^htuCKF3(Q41#WJuqvA~;v4?!lU*aD;`C4YjQFdc z3q*7baRkc~o>$?Gt7Ka%`6x_Ezj@1?LGCGk?% zfrp6;H0(xyy<|20MYY_gFDUKZSGX!$Pih{EHqSeERXZi@qRFP=e>O)HFMbmmAVmOx z0^WLU0%^cJH^~qBM|QO9zjkxTnGc&i?q7q*ksz1eU|Vv~!?N^Dk!40(A;eOq1<6|> z46z|aM0<`X-ng^@dngV>mrP&(@OofOqC(n%e*0Ja7^Z*7b-9b2zjd_;$J{x~^9lA5JY zYm|o~&8wnl4%Z23zNH^0ipRoT$-j{1e5|C(I;bzDO>zJZcvAa;wkxs!)lrPUIPjzX z&6lfubw*O)nX-gsC*)kR3LuQKdQ||dxtb$R5m2L55Hq!&I$o@gkjuJu+i*Hw=6+(< ztv2kNSFu*!&lQcRyGSfiEu}%CLw+}56DLN#|MNR4+m1)?Z&;MS3NM7NC&;kvVg)F% zojYES{~$Rd|C`;>WrSb8)dw22)nt(=#>wrkF-SJ57%^y3Gf2F+`AUf_SOW!eP9#_E z5S*yv21Sp$&g_vvW#xyQ=R#*B*OyY6hsGbO#-oYhmMdxTLu+vhk@37u73!Jn{tD5R z9`YEeFY%1Gv#!6;haS8MGroYWqFQT}SCAYX)U>!?@SoURV+1-5zO|_|M|UFjQQ>51@zYoP8YvVY@&uy=)D6(H3lLc=UbCY zwGf8MG=9$1n+A`Yz;O9FPE4#!0f+C;{661mwyWJ9Yi$Q5Q!k?(PD-c8rO-)Phs0O= zpqS9e${V9(n&J)(U?x=tua{&@v0P=MLiVxCefrf&OmUd`>=!t+@4Lq~X=`WTA!i~^ z+k7l?SY@Y80p$k1mULjGA}-v!%fzg#ziZTzvjJ16B`B3(`@sA*71(6CR9-5VMO zIug>*Iu~WLY*@}GAElM}P3JY}olo2#Gx&OEWV%kb$0Kq@CWS?@!Fs{_{O38ysL|CX zDEed4)z4uL&-cu*Rc%5@*>Agtpt=o~82jucg$^(qq!Hp1El8shDMJfSbq2DaYbe6y zW*Cx-+^J)wmbZd3QJ2F8(A|x|%4_|*Yt^qP`tCM>JoV4`+V!Gkx9{B!O=@Af`9}?U z9`}5Aem$L%E_JT^!-u5yUdI(z(b}pFOcAx=gIGD4Bi!~o!tH;f9TZsZbF4jIV*hqW z&VGP20Q_Rn0Kz@|>V#b*V89MBM#Cj<~Ba-zrnK-OvpK$B(Uq`aGr!U(DiaJwJRPnQ_%8pwXf*h|J{j%mn+`hBzMYSe2=j zRuhWL-^P$J;2E7>%yCS%kcT#~ZdAW!x@C16UjYEfMb3LuI_aCZSMN3WBT1ijiKG7L zpYHr_b3uDQMz}mpJ-~IonTi-)x}n}#Zw)cxbm7_FcISiBSdWDny9NGEL9P*(yu~yt z+bNW0$0AY}R0SxHnKljEYu!%3B}(Ku=+zaB&B<9mU^JGA%*%V(bP#tO6CK(xA&zsq z60#mBrul$~BE>tCOK>9sF#)20c>g>ca=F6W-%7N2r`)-mU(5Q0p8WQx9C$^+Gif-x z5wX|+eIQ@Gm7Zw;!%7s>TT64h%&B3h+Z`B$(WlqBQfBV@h*$2x_wslrc7OfEejm%O z`&qnKPI>|58x(2re=!vz;kR#F%m2^ya)Ba2m&#+%1JC1mNyTn88(CvqJ%{*Bw4Dn2 zPuYMPWm;lD$M7{E-VmX>YbS*MQ`Axr+ews7Bc^&akymCD>|+l1lrJ#$CY_==E=`rP z?QvJl2zHRXy$is;DkU9S)U$r#U=P9Dcc>)D{t#w|b!}r^zLe3Ueu2Yc_OF9ap#%P(gBTbp4a8&OWs=3E~Vg zrB9=U+0`i=DaL6^>+19R;;?eGLr49YYd- z)QtvB{V#vFr7)4?skUO|h>*w3XQxRP&x1P>p0q8PN-uc>&k}V}O&ygRqdPPt%Oc=X znYsJORt)j2AIv9eYBZ4?MIB$Gmm~h!g!Ijz<5fwvGl3Eh2D0SLHZp@5I}3<&9I6vznPeEXa&HU(T(e|ONo zmlbWYKr^U{uUKgOnm{)`)Frc$SCgkUu3bPSz!_^~(}}Nnds9f;KB4^1i*=h(!M|i? z7($7f)mc5|930qUfE%vA4c>NSmJFOT3?5lfvU;67zF_m+cPz3Xf!YS_X_lR%yJTdH zUcT3}cfhF{P1&;cGwc)0{6E-Wa!?f-7Q7QK!d>;z3F_qxey{U))MQXu`YA|AquO5Z zDVM^3X_gc!XWpef^gDvt4_diq`&*>Fo<}AgbU0ftZlS-#EwJY>adNa&^d$rydLF{+ z3T+LF^t}@bC{=qm?{}S9c#J7Kx4rBx&EWZ5ul+QX{&e7YZ~V)ZwPP@ze6Jhu^9%&I zZsosz&(&qm`GN3U$`ZbtT4#kK1WxU!Q0^V6&gcFoJ zU*;JKOb=|^=wgwAt@6gurhA-G9t$&kYxsBlYk~FOzS}6?+?5Se)37Hlz@=Maf7Eni z?axgZtT~DA@m6$f-c?4O>lG-KHxj3Va0}YZB?q45o^$RdByTsJ%*;zgUx-$28K%R{ zI@~^F0TK1H4^}CDC0GEsc@hvTn~dI5znZqM9A(H-H=^bKn|WDr!eWc-IKt@9F+7DO zF=$nGPwY4Da+0Vn$g*V9zdDjG_84-DpltLeHjj7&a--5_-kjg?Rd&Y-B$sDZCOm1E z#F*$qK;?D`7YVAuH6N(_p?K6HO+!@fWlvt0(u=46%QJ1?L4!}8!N&bRN^Inv8z#|1 zs9_;PG<)F95try5Yr(Wm5J>vyUUC-m&S4|#;7_?gsY#vVrKaTxi%I*UE1uB^?RBwXo?2|2Y6Q4^A&M<(@<4xvz=`B(GmnFxMhC9;4ksd zrDGBfPRK;N0RQ{yV}rGGY%6i;Fw)_gkG8p9O_l;_F# z2{iX=$^mJ&8+We7WY6UiP`B>E6x?}MPWhvg>l-#{czjBJ?_Tvd8K?fFHKs(|vsW}+ zr5&ic7k)k@HS-&B9$m$ut^!&d4PdQuZbIpDr18)xyz@tah<1(jfrIfR-Nyw|8CF#*)f)K}^P7!BQmSq5G@BmWTEXJPwO$2hh zQ^F`@(m50WMwRNW>MW0A+T5_-ZZG~7iuMC^Btfwup~ihOp* zGT=Wxa3NVAo`n39Chfzrr@#5G0&mGRXPP|lw<7nqZCXzh@QsWCxX%CB+>J;}!5#!p zTTTtSl@x6>663>m7Lb5w>t#nB;eOnI813eVkklJBqctajth+Z->SyVMbn14?QjJ%f ztr$pY;E=n99JhS42l1yq`{v6he-|Kbnw+i^^@ilkn9Jz>1xPaf15;#853D~{l~jI1 z_vFJEr<_3{YCaYdNM}eEz#?9PX+oGfa93 zLgY7!BlcRuSDRs{ls9a91U$P;L83>U~p5hO0kkklfA(~ zc?>CK;5&&5P#IfB18h|sMYi<}F?qv7<8jp*K!LtvumW8_f={9Zy?S8e~ zHXQI}hOwn|fC^DT<1&Qbe%;^3aO7&gsGL3u%>XxPhA>Zl8V631P5IF+U{aIyDDN}QdRhO0*#r*()WoRq8v3y;W z6CIfUQ9-1g0do0wy8ul)!YaAEiHF-QlrE_Y`O;c!4~+NgO>=60h~6TDw41iBTF}fj zg{_!e7I6|F`eIUP;D8L1niklf%h9;4ZbV)B9L#8lT>^{MZ5$z@KI?coX9F-prFEh? z%=s&B%mGF8AoUO#!7T7vgB~>syhNWMvE{Pa1LjS2g1fXq77&iPv}X3=U1Q_a|eZT3YKNh z2cW0Y`$nC59e@S=3)caNrUqSt0u6ReMNnxJxDze69Ymq8iz0<~0Y+2yD)I|4j_cu0 zq}TOf4glesN<}z z0-OFvZ1}aE{Q6YI8XcvoO)R;@*e|)fRe;PVt|o!s++Kn=?ih+j5Led|tz}hkvbrFW zrbe+&zp_RnN3E=DT|Xz$5VBY?m@E0sw+N@(fRpoU(cu?VF|f@ zRe!sg{{#SVL|N#2UhU5~E%7VL0^MMdKV!gk{VLt8SS6zKgEAfAHTl2*}Ww-eV0H zbAi0JkAwg7ZnkT|TT+pIqDG!p0D_?Me}bSU#4;$=rGP*)Y?b3VZGh17_vdm`tN1&* zp}fmKCnqfI;yFk7;mzD^pVZ4ViY?;wXU7e83pc~VSWRTQ)Ta$@Y?7@W6 zMG4$BtU5e6c&X;-CLHwk#`<8^hOOp;kjG*A6dS|?hnzaZNA<;N$QPVdvnwUuo;sfI z!;9VaK^jRnDiseRo=Lx}+{t$;thTtKyilld2yiiAt5RwUu7BfVKxBdV6rwYR4SBzw zDFI#ruq=k%jG|Ir<-H0g_?hIipop951ks$NJqn-yK7GJVn2*tA_`!7qH6pQZ*!PK^ z$=zBPQX$7eT-D^ihut`F%ZZh4CvYjFX}O#myjj1HacXHzK5%FFPY6N>fIh*^L>mBD zu+*~>H-K>H)tRBvw`&iVnD!FR|70CRL-Fn=P=K(K>oq;#&U&R7{r*m2!9wMe+s|Wu zI3_lc9hL7mL`;>s88ECx@gRxc39yS<3r-N!W*<7%TH~>t3|;0Gm^$;W-fO^dCi|Yq zW>A}o4E<%pHOutFczRYNlmv5i9m#oV^7SK&1G&#t=w~;Da$nzlreDdmS0lx!)7#G$ zrtMd0xH*)qYxxI{=6_i3efBZhF)4NP?&;V_oHYY!N1Zl)KH6=GJVzj?V{ zN5c4p}W60CqBiUK?9i5bG{ zCsPds9cIT?eu|y_0C8yTtNjF{LD$yp`F^&Jy4p81qhhVpQ}deK1%&(5;`kfJQ&$ll zAy4~U7%|re03K*8cNtKG1&~hXhU9>UsZH7p^14l}-X!rdFihm| zK3J%&86|6jsQe<;yN6< z%mq)Hk^!JoP_jZ$R|S+@DirkbE(|tdE5?&T`ycJqC#)ROK;tI?Q3QJt%%Rt7e>rxx zmBFT-M-2=qV-Yuq`!^+E>IY-NpPr<^VC+fzo&r=QKf2P0HOab>b?cCXaA9tXODK7c9msv@z z!1LdJ1⋓__MtxXg2!=Q^*f`{X^P;9^c_p)$deq~!&q{WcR10mRIE@C?sw?&HDA zlowMQab?8S3=GgM{}9}XzxC>#<`-uUh6CtaVo>Q|Xb^2W{`K8aLZyTlr)ZkRe}}7N z$9#7iZ;1DNK`a<$5wfVCN0|QBk|v;0=y-g zXWzdWO;rEasIo`^YPg77<+Ljo81_f2-f|%`Ms!PE&j;}MXXYc3ddJoP!FB|V^>D&C zAE+rNjFB~iwaSQc&!6@Ob&Cyol7uE?TV({Jw-{T9%_?ir;U}V1S$c48`U}n+9jsL< z-MtNGJL@_l30_4Z-Vj1!cXe-QLkZAuMR3%C%bYV9AB8-h%v{6GhEf9)QJ)=)G?ck_ z@!#o)jG$@~jPY+Dptz+0IO(UhXU}bYMz>@8R+=quV6Sd5L{NzU$G`g3{B?BJ$N`G! zwGD-HbI@+1!gE|(^t-6CMH6 zs03ddLcC(C*F){%4wQ7H5o~`2=L)0zo+VNu=lBK~D1h$zB3kfI+_y;jmCGb&``eqU z?doYlF04v%sX{JJvH?@>|JM5g#5NMS8?ELTc-&93ZA?nlD+^KHrjL$Vvx>g{W_I#; zccIeZ?ID%MVFOPdlH_u+GYYU1m|6diy|)gEt9klIu^Ib_q2Svr)TV# z=g#y{s=4UpV7)F8{*R7m@fY#(E6*J1dfGp}&j=jpB3|Y23Pgu0>;yIbrdt=(V{S$jQY-(Zh%Bgx{}Zw4lcW7+vtj>n>CBiM9m z7Sptp6U4jiE3oryu3|(UxP3n7*6Z-#pzPrUOI7?E^Dl1e@zK#`LKkAXr||;Ng1xCZ zVO`+l;<;OnJr6Tmw(*a^-+L1Usy}tnnKUXslH2#Q)oeX{T|_&KjXd2JdWqq9R)U*v z<**d<+I6IY1Cp1{FLWz!BA7Q#j=t&0*P%I(Ur0@Zf8+^5B`BX zec`6yf`zUZB+6_&ow){07YP}CMl$~L?MxC&hKMl|h;%_*@lnbf9EnFn%u2w*=S;MGl2DG}sfAcd_>jZekok}~gUjUw$*r+^ol{CSgW zy|6SH{w#it5Ta41c~Xv#3UNtaYQpZVYHTvys!g>0fIfBTb$uII)sNeIP`&G9zA1PK zUS?pkNdqg`Wmr509VKw?-VA=A&@InSJuS4mJ5?@ zmY8P6iU)yC@TR2CCg$lzxHy)aO{2Hp5JhGtM$qAG-abjmWf5s zxUed-MHcKC&I_Bnd#n*A$gu1o5p1SzWWfL2dt!DWAvJ{9`@!}Yo_it6%^dxHmW<;R z)!?h`U7`~*f|O6#?)hvMHR=KK1eX*hpXA!=O%slRrW zQ%2WiBT?v(e^om_^FJAMmQSg5tK$Z7=cfmU9qv&j*JZ_9q!(5-g~)TptGzPi~w&+CnS@1!ht zfQt&D`*CxYMMud{!nUeV%Ji)WYJqpGCMRB%tcAY5QGfYjQt@pp(JIky@KgY$B$*q{#v z#X*Je$K%U6-V;%TJeeG8sr%mx^QkYfGuPD*%Ud1Dk?>S{+c! z#df~0!0^*4Kio~PLQA_G>BJ32H3*e+aW>MqT3sc=a>r%Z?02Jt6j zX86O>bWctO9^5u|UeVSN8t!j!^uk$rBQE^TF zPGj_pr^!;Ss^{|a#w!~gaSN8s0gZK!3Oy_Gh?cLkmRap_F=BL`W7?1STp;$m59_A4 zwts%5IKAoky{h{;TPFEH%W(qd$L)$FOT*4(V3;6p?`saDZdM!cTK7ow!R(s&i@Fz7 zdWI|QfiKSglD}Ok86s9K6*!dAAQ(Pg`$|)+TLHDS3##z+vhv=Sw79!D?U;#y_Qd@TrH579;_k`IX40_%H`=&#J22-akQ} z?ul$e#jKiV@9uLyUpU`>m6Tcb`YD2g&mEDXaV0*HU;#XejPxZbsv#PNaSQE6rMmc%p9Y2DcEyNczCNS3bym zIHcYJ_7+IbpfCGqY~kr}<4Dbl{W0E%FYqzP4tNx>WWzs8*3#~O1qPPPcUa%`T)+*u z(?I|6@AjQ8%93K9uc!dX<@7$pZFUBt?{{*sD*Htcna^x=caUdOYpeeHKxz{@caT3G ztK^jB>y3l1hnlwLkZT!9fnzT7{fV3v;tS|pR^c?lRxU58QHT@SG4yA;a_uCM#(^{H5!`%YzJutwl^gsFA5Evmruh@zCFBYe7UwA2F>DmE4gL3rp za!UEoZB4mR&xb`8mW*SXAKJ96dO7l_kVisV`yQ;GU(UObESsA<-q5h={&XdP;@Oo$ zz`69|5x}Bz{wRc|xObH0FC#UPo}nLg2?-80^r$I~$t2b7m08rMnGJ}qHV^-#-a*$> zExMY%?sS+bTOV{kY&e{E+fCNlE;J&Qwk7uF3S6sV`D0K9WMlgivO3iyte@k&vVM}t z&|)GTca(0w;E0+`YE!@9dOFeL2>vVm+E^1^W+zPq>`>frMz@)l(^;)+sUqYF)TY#( zOdHDbPHCO+ZJsyX`56jIqq$xvz*5`;TwL`c5#T@~{xzYY;u+FjdDu`snqJ9um}M+i z1VU>`OiOJs-+q@N7|i*zeIk*ki&W(mQgNDMJg)a=vkD;|E%+bEHZ5l>dNdE18S!Eu zeXXhsvx$RD=z7vGF83w-Nw-3xw6NyB-b%~}bptyh{54mnf?eMhi$M~t0w5lbn5)M{ zK6Aco2~2$RJ338KxohWvo<3xVch{p!+hw5-kA95Od*H2;vgkbd-(yM(D%hFHos7|z zo8+;@L}DQ&S;G+W4_O)_*hQ zS~tZrf!pR*1v%C7!FtbIrx2=hVGcCCNd6GYaaB*Lm8&m%Wj)al&tHtjcT}3K=x?p> z_s@$?^%v_T-az9}js>De6N1P=p_7ja6CUrZyC3(MCIPy!a3w7Pd785fIOM#82CuQ9 zSd*;oNA5Li`fvN>Dfjn*h$C3uQq0c8N;a8y_Oe4qg}DmYJJ% zmXhPW5-0xB{xBhZATEH1IS5{T$}6{C;AIgG(v7tm5Uo?L7rd~ZwGnZ1WR9YUU% zY#cO~yiQrkS9HW77s#XBY9hgt_Fu#$1{L};WGEovDC>on*zr+Ym`Wq-{Hve#uV2b) z%s?hYzypuHxh^voVgd8L@lS-P6v^%V>!-$4p3>dnop!p@qH7Kb7WnqWZ^s+;Oi>rU zcup%xLDkH(ZP~WA>g*gke_`J&taMm^s%qo za9PCYrE>%<4$*{ z>YNUra2a*$oo>s5CBCfJ+}+kiU5{?IH@>>*SnrEf?difjD5`VVR@k~ht5M9Bl0P8I z*?FXU;X4bom9@=uX&lR^}HV4*uhO7syl3~TV&c( zG!O2hQJRD|?;wBYA}{kGJ1fW6u>y;m~n{(_<=){>9Y>ULS7eNmuNbetslCT`kmJ4-d0)Vg|N?!70p?6of=h}gT* zJ4!yLOxlez_c=qT7?<%_5j%-eUgEn)rv<&fdOHj`H<<)aPE+@`wvHO*+S!qAjTAfK z9sc$`w5o2qJ3nvD#!RX$;)c$2yaHKX%YOyWP=MjnDFWA`LkylrEz6F&zC3O)VjFF> za?5eS&H&`;ml8cgl7CD)-92$W3(|I0FtW3yU^lA_*{Xr(2X*&yFGAY_1UBYvyA~#% z*0^JC8RRARL@s@{Jf7|?#(!d$CLc?MX=b;hNHE1hq7AA6jCqp&^%4CK8cV7xbr2!~ zdiirGJl+Cn68`+#IXnK{BLN6HUdn|}i>U8U{?qaD`W%OP)Xt_fq{1H^b6m;dq%$I? z{^W*8pkNv3W-n-aB+aBiNWU^#x~lh+T$VLK8;p5NYkI5@65{gxsYU5kr1bN8Z?q=N zmLO?}V6xD1kII`ONIU=#W!xzlld>Pd_QU^b+X2)*x8yF+M(^Pj$ahk{i!HEn9N`cj z=A{|;#awE{-DS6HDL#uQ01=*iZ=Zj$>=7dtMspST8*p)e%Awyg6-^htuOk~;kLdL` zZ_mMZ8hO~RN39?E*Zf%6td6IR<4sIg5^^>dCz#ipwXaNcUzohnPv9J)=z!d@Tl0t{x`?ty4vltC}#9 zkno_&13lI6Iyv}dt8|6?@RQU#g$2hcrbRc~+v8%z+(G^mej9SWLfV{2>GD`*RiaZtp<9)LG+F8x+XWsst&zkp-Y8^@`aH&L_BOAV-29FwL1 zoC229y`EKdEX^OPvEQr(xtL9`?de%UVt6MxSzgs}m-jk$3dD;Q2ho>?s;V2U@E!u~ zNPk*fNfgC1mwK71!#<{`64VA&j)VzbZKY?=zq7j$)Jzqu%k2mf`VJ%dEfb!4v*zJU zKKb-%eCpke;0|=oE93UykasChFOpuuDa)=eghgf9MRZt9Sln97H*Z0-{ z-Op70%ioUZ%iqi-vT!Za-0%wXiLS*6WVacgxBgx)XkO2Fpg^nTBd^YYkiKao6TmE* zxz=2j$u&x%WVpvjAZj|l_?Djks;E+B;Xr~Ei*6;ktc01T=v6XgOnKvIn3AuTGPVG% z;gakJfrG41zw?B%G~FcFpA7gVby0$QHARQ`!{CY5SDp*&SiZ-*hAzIol8--;%z*Ea z4unc00*leZDz|9<9hN!eA`v8wi3wU|N$)>ie_g-sxHDmyNMF21r~Q@Nx^)_e$6^yay#ig|zs>d^@IP27 z*`oPRNDR}}t5+%%sF04|F>&0(d<+OJ1CK5PhcoG;{-ydijMV-{k_KJRP|g$ygkBh> z{x`3HBC z13nB2mukOShquB{ffDCQrNiM!S;5r@3Z)yb34BXLzY4C9fp72{=xs{DfHLPn#aNVg zB_NWeO?F=FP;SvLfvgJ!1E(}%AOuE{{}ckGm!uo4vmt4c*#x?xHbh3e*1sOsWw^$e zApkwL`xC9e#e&P<^?Kr{^%;W_UL^8ee#V*heczjmCQ*6DI0a+)SgOT_Fcrnyt0ZHs zQ#z6vEeB)SJA38egW`#6*+mT}wd(^GaLt%D*pa0~qY&d7et7pWg!ng6CH!om!BE~f z+-zsu?BL3U=;gVX)}mYS$tH_Z)#4SYGx>>VJ#X{&@>fd5rt5|Mo!kuE^GW{&ixMyM z;k3kYAo~8b-ZGn~s_mFT0-z_ndl0_~-8)|SWT)9X<;~Q)u>?np1{C}|$N>z1mL~v^ zfaC{9-I(ZCTx9@yRzXF_e(5kE7g&?c2n3he;|bdYyIHY+lY>>`8Xq<*2_b{{=5U?{a-MiGydb+@|!c;O-cOKm=2 zKZJO<*ZVArBch(Ik|?;JB^D`Wje;Q~ZB_8?3slH33|$tx`5CvcLS$H!7Kzf1iVhlJ zM4x*PNG6U>0UvM+(C7MyfK3TNioM9u|H%Oe7%&(Bhayj}bjlel3Lj=7oF15J@2v>b zAID$o&xQg3Yqd{qe)IrdO-l8L*smO4v!DP-3U>2Gmh|>60KEycK}cvU(CKVlJ@=L#Vl70+BBlFF@qff+5cR^#Aq2OHCH`-4 z``-|5?B;A!ViiAsoXG!0pr#ooZ#Id&nXIvC0p%_8zbkLx-iK^%7Xx6e$Po!1nQNnI z3RTB1y2?>7l^b5h<8Up;uUan{}YM>vV3N7YmX3>0D=>AH+ovt z*0Y~Y3TJp_w3bwB5+S8g^8osb-TZcplpnEHB=JAcL7t{7{>%Hs1dEk~E(Ze6#ZeRaAUn}}Pw@(IO$b5I#bdFA(1JMC+58Fpej4vSz+B=zk! z15MY)fior#t9}wp>8Kk?^s^|ODYUBGJBAED>mWg2$?QJrC|a8BcBr9eAtKup0r&j$ z!=h~#^pN)mQ;Q3GXK?GS#c-W-Ed|D>IhHmaGZU|y-VkJ1TjNU_Q01em0x-*QPO{L| zje*1o(43{BJ@XLir$=kR9HFk?^YSz$m`TcPS36s;3)X?g=E{|ol@mwZCrBty!__OS zMM}bH)(m!#cV9$KZd)(Z=B4KdiYIqoIBkz+eH_<0!eI}Jd6c|p+eJQ+;(nn13CvKK zfCStgPEJ07u^@oOmtS)I!YVgL9i~OiHY98^;zS54-?|iQ)p;9sq?%d8n0WBaiZaU~ z)~U3UEK5*sNY~QF=KR3tdt=q)l=TlN?&GYrw^09qF}onBd$=SR|H-keMov)USJ5FF z$J$Ts-)<$Q-*rdBhyb;Wsg|u?ZsN)l6mBK&sXnwE$WH5%dtS8|1EnUj|Yxd<0e4me4$pK#QRiRtM z9}CI~ybWG(H!c8sU`y`g?gB7E$)AON*<)W?tqk-DoRnGvB>K4&poC84TRiU#ync)> zq+4bAA4tr6WLW4RKm(4`Fxm3f;~6R;{wf+ z?crNL4Qx}Aaf3Ia4{^+G3mF&^y#2|eAwLPY4;2P;+sUYH+nBTJhO=IPnO^*?Q;^At zI$%d4wR)Yv^R-mfu6q+&%G-C(7?NQ_W48x2*J^Bg2N+f7lT`4Md3mpCKDws&%vQRG3 z5kR=Hay{$-%sEkRiZ_unR2*%nZdIsNBjK|j%6H~u48tyj>eSuDTOD7V7}hPu`7QyF zcrJ_QcB+!5W}-UdL?cCFv38a1^K%vLI;Wz%65!?M_b+SqbwXX4q>_21_sfSC^EIoi zY-^9kC3vk+hu-U29mkdJY>TDvT0woTLnNsJWxfoM*5-g*Z)$5!2KkHyt9GFNo)Y~W zTdy~+3KcS@rS6C~*qS~imsLjJo?WQZkMJ>(p2k7#u3*j+jn@a{E-c%ziKkAlw~BEH z6M1^+!WartDAs<~Wm_H)$u%ztc zz;pMLVk*iZa^0zz36MG?&z{G$oGI&g&I`leY(~X%mJz&^+Wh6$wC=G=+Ql101b*@g z=ubY?!2W^K)7O69mYxrOO}SnFyE7`EUEVdKx0ryyFef9U?Giv%e=+Jdre}`>hqcLJ z58!VQxKE=`kdWAESE{Ow&3yGp4I;C1<#n@qQa=Qw4tr-R%pzmg7u_j8>$of%CksT% zcGn-&`lJ*_%J8;bzf0BHEK>k0)rF7H$!4|6g3>!3_Gc?8Oq~hXgR*B4Az6c$0rtGO*lo#!ffTlDp-&oe;bxm2&f5xuYm^?6pcEY=btq z;O;O&ve==5d1k9XI_1^31DPnDwX87{zUrkoG+&3Em7tQNjUmW0i~-V;hO?^t7xJ#@ z4(0h}Pxxk?66~?_u@g)Wn(p)uYe!yWT>U(`6w0VJG2{EIuOfbmP+$-DBQI3&q?*&kZS1cGh9{{-hj$x9I8^R_;(9WM6EJ-K5+RR z&;fVX_WOGe%s-}ixmBCfq$c@lizKtj<*hMA0d36Q$8em?rfFWba5vD8TZ5Zju@072 z0w`K%DEZ+7kV?@kNmARu1(0h}o$^USq%F{ZJaxnOxID!i_b$K*IV3OdiG1fIrq>?= zm^9ZGRWzG<8;*R5wwiap4g%>@ipjbj1RJh)HT@zl-urEX#%UR<{ z`rIG#h7U~%WMXg=9!%h$Pxr@Dy7-*TlnWI;0Xi=>=1hUF=Moo7bDjQ*lur5X4X_&q zG;gl{HL8UQLBNyR?j=pPDp{^Y*<&_M0t1)(Ha11Dp?S!NY9hZ$!*tka5WfR&E;;|C z3&6XFC;LxTZ#u8rF4LZH+BB!M3hE!cE$hi}9J_lklq-L?I%Ct1tWGm6(?9bm5gaPA z5N^9yO8;PXkj=2ub;asRA37Fb5d$S`(P|p-_n4L<^D8Pd3zMcOPn`}UX=B_Zn*Qj~ zvsV>eZQ2%la250`_iVa?CpT5z4av)OM&ixXur7EwN7;cp(28?2*JOBsDtV>U0Bftp z=k^6$tcjA86CBrQ9iIJuTQLSI+n3Yy5j=v}nxB%7!4g{)x?YoWKrM+hEb{7?TC?lb zFFzh-f;5+Ee6NF=!C!_G;?5W%Vql0uYL|2|F5~-k@mHFC-%l=q%&;r>QW*cS|G7iK zB^sk|A(4)Yx^(nm)1A*|_lgNuz7W@!1=P=4%hPe8M}T+SuC5bni;U+N3Ju<&PgFz zePC^M?7zJsMx;&Q)(~g6yKgav<$l*Cw5ZC}l@^?0$~trznG*ZUe+D+8`)m%RRbG~? z4+0YAU^Qcow$7RC0TUssPS(E5^-0Y`?22eisrNGyMT#IR&~+gEr9gL|$_|pf0O?GB z?W1Dg_Cf51Nh{bW9iGu;WC(>(UY*y>JadZhw9A;T1KCy3a$|8`Dd;My>qZ3p<2D`Y ztY7*(kv}9IP#>7P;MCCgPfJP2!A8w5@N&pMyXfXGlrEJoy9B1A}3uQQu@1U9w;^T+0 zL`qB?SoRZh9$HoPb(U<+r%lzSQA`*|b~#&m)+wEW<5ylupBt30Z>c%p%Zv;RRYZeh z<}=rvAFqzrCD&o!B$c~f@9;Gg#9^LfHCt%BGX%L53)3IIK{0{E$8ECVg0L?2UN5;H z!V~r&j$i zSiL;^VN;0C>{?#__(JK3U!lWP)Bt=1%Rhu0-Z7YEw8b*HlXd1ezyZ2?!U-PMf3fozqX+-;p2q@$sYPV+mYZK5&E+asIV)-2DjEKQ&?EI=*Jm zukv7}chWAx3)pj8=u7+?C3irDh8>+$9Pte0NuI_y{uteWU05AP03$YO?qLQ1yjXo{I`bF&=GpH19+{ECt5I2aH%-ef3>@xC`l|8q_^^oZUkOcAs$E;)K0K+XjwS9cF|+?)gyU?cy#!O9M<} zf#;EK03sP4xT(~{-(YpdWu8*ATkM`}yGr-&jCyMdIh%0--NU@tgaYkHf9lkAACUDD z^0lP1gxo#|V(8OVZ8$?)|4rF;#&_8NjjW9-F)$6Zaa#@I=vGNT+@eP*2kqPH5)jOo zlxu@{!`x-YRCIs}qs94Dnu~{xq5;?iH?R7H0*cb>%&ZJKwwXI4#Y4RZJROpDDO|T% ztjT5XZYoYcZK^c5t6c@r7(`x?vm76M5LTyqK-9+buuD0MMR^ z_?Rk@b>q$h5!*J>H6#NoC1UBZ;dBj8#h@O>S9oOQ)n%U2vK4uAo6Z+7Y45wZCOhH| z)70e0GpT9EpM_5q18;dti&&JJMj8$qzw&>0naf=6k>pwL@u;OBCzWFNX}MV}U@dS! zS8J&r&zLk;PNzI(;?+y`1v}4UG|^o}c+qwM-{G&4TXG+nLgzZSeX2E;dxMV zx1I#PrS+K#vl9A&hwS)f;L4=6`@F3%{|M!Q@FYsc>7)ApUJaNi$ZW~|R>E3DF_4PrH zvl+n+2?auU8*cqy)n{b1#vt*SJ1;JcFXw#s)6T~G4Bg2tD;C%`i7FoLi$_XnUgiib z94hBr6DU<#%TC8e^M<*CKV_VpT4vowFy%_?cR3)DTfIFF(~mpYt{+INn+2uYY!OC4 zL$0mwJ+>m;X6bBr;d{b?MA}b#I+BV0RCX&a;91B>=L6*FT>cOO zZ?VLoH>*p#yw+~VfjhF{t3)Ppru}i-JVTg*jW@?Mho*&?>@B|s*_Y{_pWa>$;i=?T z0(?r5_>yWly+vrnRf|Xmfn4po@2{xQG=fz3(vHq!h>z`Wqg4oC6J_fkh?Q){ts+gfW z;KoJ1b2NqK>v*@)5d)e89;;|x3`xH5C2XQ;(F)@M1!Y>%#o&$FhYq}%TVtEcxi+Ba zY83jmUKKC0JAn}$#%nU$3tSSLLm-9ws$eeC842`A)69?>E<2Y1?@-$y^&WWdoMCA= z?T$WdS_B@*1}ZRYg}^AF^S9LpH&_6T+>Ne+lG`m0WVcyHbz!=?pg9l}5^X>w54e%( zA(NH^ZAg16&*H7O6HC%0@ZUUi`_9$x+WsY!N^zV3t()-KQ9eCiZnAA?OFX+xtzYQn zZiVX&1w610%||CyscCHuD1uNOwDGbJni4#r>s+Fkvw}V1p7<=K>+yRc-$0*xu2vi5 zn*Ykt*%F3z<(Kw+_Hva3D%ghJdRs6uxHXM z>xxX+Vj*+8i4xo=Y&xIk$UH8WSTIGKJ{_AZ${(vOxs@$s`SOmEJPQfw>7MvI5hbrV zz^A7GFHFY_|81l6dB0p@?3yPqm@j%S;5=$Jpm>`Cp~d`l(F*jdiJ6Icb8Vu_Hl(Nh zJB@`>o<+y)$+PxRIOZ#x-s37o`w1K96qL|Pd`uUHVb8I#gGHzV3&Kv;b#S+w)l$x@ zBRq=)3j~OO2wO(0vET)!ua~^s6S!k83eSzRfXzaH#v8_i3*s$lDntVpRt4$m30(X- zs$PH{W6XTKyQoMwKo_}cqrBBR7eM6&mv0qYvk0wYPR~E;XgK&!`Yq-RCiA5eds8t3Y~K>#@hh^89p}w5#p8?HKtJ@1k>d^nEI3o%(k771lzbjS!64 zfYh5QciXSHWXt;ps#>-Un{U!_KRv&qniev0EamZnDe=Zev#?4aycLH4`qKjrQV{|0%=Ll z1+r8fy}zIlpOdW7{*PeBZ-xsIWCGu&dnJ^rKIIX{?5L1j(vyCm1XiuoRfA$OHz1^Ko^%3F8{Hz>5H3MOTAaP#&SIm- z2OKBPDyX=vsEpS~u$dF{BnUTUHdsG=pgr-BrDm;{+Ad1R~SDYjbQrOS-GC-V7JbFr_ zeur(ie&||tODQ4qOXFiv{mPafO^BJc?+((17Dvd(8`op9LCLgT(@CHEXb9{kqE9ALI2n> zObT3TAKZF_vNjAF0xyMH0GoO8LR4Vx1DI`rUl=^X+=%ZeVDLm^LDy20{;6Uy8ZR#> zA1LUoVV?zHXc#o&i@t3tiTs@9nAqvTf+ix>7j+NDg`@@JR?C8Fxp?wFgi+*MOcirg z&cbrC!`+)5-_ElTKwTWufvXb`dfD&{C~#azE7qukE?7ta0Jkzw)!c8wa3BA_mdPhF z5pdnBijWZ{TFCL5zs!!^0m@;im`Ob=dkj@l-m&!aRxj0}%{vxO03{EG1Ha?ARZ0Q| zx4$P$N}iu8Y#@AZFD>T>>6q z6mu9Wr6ZC60=F)9t;W}sK;NY#;9cKft$$*s8jxlinIv~ zUs<6>Zhh%t3WEcH(AEohhl55dk|nPfqx3)KJb+AtBNi)?CJ~P8zf2_Xq5z5Z)vnQh zkv^f3i$y|(IF1~l{bOnb_(Udk^Z(bUL{tZZ87EGJGmx9N>@bz~butWBITM|>u{bDR z@7GEv8T3p3&5tjS(*zjNIKKQLJtr3OYzN)*m!L>Q&WC{DQ#{h1#UoH26zczk@jH2i zrfQJZvbvs54y1`#QX_t-cm@o&^E)$D zZNSeR!MAX3pBSuWFa4u@Ep_S3?a;j4rsa;)gRv4)g(<=u-&9XyKF0%*h1DneI`?@i zpJ!CV-A+HwAP=4dYa?=5K6KQ=b!q~7Fv0@Y&tZs39@FWOdv|aLn1*VQPN%qs?&e*c z7kRzqw7zZrZb)WneZt$7U$;fP@JSHGc3*S;MjijB_8v_h&#-L2M7bIsVvW;4 zmTWRKQeyR!Ileggph)Co!34R4_A%Ery}Fqim!nq%kf#$CL|fqvw>`A5(n~pX_uJol{U5~regk1rIDE%fOx7Tbhsf>fX$(RKszP1qd-=biDMOUEPrYtN)XMmeaRV` z9rOG=F!ez}RW9;@WSxrh8=a>Ob(MMe2a=UnX9wv8h1Yqm!;nd_cEWq+eizoLi-CG5 ztyYqM>y3qZkkoy2kBj2%`+=aqD*i`{q0yKs+P8Jn;u7|0shc5d$5r5O>at;GY?LJ0gKJOJ}CCam3HKoG0TpjHzp- zMQ}GuI!hF(m-gk%c0b(7Ubi~!tduyOx0_LDX=#sb(j$ly732*?bv@+`Sx6V?w@^}J zLHrmlEPzrG1S8ztMi0B65I&I8t;|g792tLcgS#1Ix|jCYgPhwv!Z+t3w@6G320od$!x`ybS?hMBDoameBACKGy4Kn~Mpu+%X0O5a=s8z%$R1UJP! zI<3*!9_4=Tul>km$hdXWz8Km@s$<|-X!-WbBG&vHbC|hvb$v+MeJiKM>RuqMQ7XP} zcZE5z35ZVKC*VV5nt73~XMLr2`sFPeZs+Z_WG9+zrqnzoH3efJ=mT0qkc|T-i~WfLn(~%q=9* zTe6?{4w@0-SZMe%-p?;Yl&YvL4(jnF@LqMxXLOw)2t3S>Hac5gv2GQam zY4bq*Bb*7L!OcY@m{N)=x4AT{C<2?FPAzLe+=4V9CX&%w$UrJLje|NU! z`~OM%04iI>KJMOrl>HU{63OB_t_Ij6(8{-> zZRTGBr-|=Q?BHfGUurn-2CyPognWOFc!>(%ryAF2{w9!Qg67L7yhNk4 zX(`0QAXJa18aoDTM*Opf0Nn`_?Y=L#5T*eLLyd17`&XX6^e76tUZkAi^ZWPdA5ZxN zCoVP84#Y;DK`uLkndlG^7e&bA%>S@2h`03s*F%+0>=40xor>p~+2BMHzkaWM)x& z9It6Qq-9E&m;dAaa8%OXXL4?6FH;d);$zwmU;~0iKS7x8CoTXG4QOLbf*|-mJ0w2ZBnL?GkB?6hCM!}Rp_<%ctO9w`Ml#)7I0o}V$c;R zB;(tjF3qT10?OrLE;kZO4rUV^{eb+K&*1XMM}gW-a>4 z`j|=IO}-^8a`sX-bZ32LQ@0PgJZi(s0#1jN&D-^f1BWg4U4Mu@!@_A8RYkj@4u9l8 zY|3WpmdMdEmGpcZ`qbpD^_X?z_OqGz636A*g(f6i{m?+Hk9lo=t8=D1C*vGxu%7qs&tAJiAYvFq{NKF z62o1+0D66Ni#W%B#j7zJ%f6&a{@$r+(-6&fwUuG-Ja79U-!5T>5(Q#094{*bub@rn zWFBFLSXLI{W`zO82@CVkIqO7HE_0<7X5r1ZHj{QT;c?ZGrNm9ez%9V_OqtT{m+vjYygGaG*~XJ0At8&hyxkHL>}a3LZ_YfYc^AAoPpEe#j}$dq@r~y zJ^b-*0mc?S@}ssIk2DIY^JH6Y_&+#4AM;FIW%WD6P28Uq)w)}yygOQ`*J|i@OusqS zFBpS0D9rhJy}RsRsw7KXN9)2!S)UzNIH*E%w|u6x){ja?YM;PkZzAafT9WC}b!}{3 zdw>c7-`!qPJ;m71~1PFN64bN0gl zbu*Rsv-4Cjtz!bkMzaRa+gY(OeF@v}H(tOY|469SXByUc^kQH&rU!sXF!zm_lU(=E zr>?Ow2m=23sk@WxO&lsC$vUSW77n8%1a+Oc)ANN4^EKd zKx=LS8uDuB*t0LE;QObt8%uDwu{2(!_WIu{J&m>;jlh-v}{@NY}fd3-cwE2nI4e62gR z9(af~EQC^HkoiKFJAW99OOGp;&@GyxKQATLQ$sUFe3H<+DIUvx$DIB*0aJQsVcKn za$+mqzOAUHvS1}C_?g%EA*8e{f8x{TghYbdNwHjY^*r*NB9^YYP`XRW64#1n;!NMhG_3O9zwREmFJ z0l@2w-;5J!eN3Nkx6_rG@LC(<&&&sj-vr91<%}JoXaxcTgY}yA`S|ggAvUQ((MG`S zcI)wsd5VFSw8C~7!m9Bojf(xLcxBYKO_?aF6)g^Ch^jrI+0qtH_;v`f;oIdvlSdVU zIHI>-x}-s3iP}%#Y)=zx6^yZDM*dvbiqG%uXrUbvA8Kc~-7zsiS>Jb}YlU)@AE;fI zpAOudk@E3RdELX@^WwWCxzfrtsp)?^eN;xG`iy!xhmj zzgSsVdDACBoG(fRw_SnT4CB>d+p4 zn!)h`5^7J0h&m3@I;++gVZF~{7C6g42J&_vlVA*IRMn9CP2eDGxKH}Xp>oYB3tX~j zU!N==uTV9Gu3Cho_&(k4a!-h!L&PMtbv}mpa&0C;9vF-nEJ6xLf#o2OS#0M#oOpDy zKoC*xdqp2kd)z0qdDIe=_!nfx1vfF9fiu5PRB32NX4?V*ESvUCSB-h_en8@B>Le+| z=c|*6R$*A$>_WSxL;IzcuJ2jj@k(J+41bz&^~K&4qQrTq9{7q~DogNO^8Ov1?5KiP z1ZeRSLDE=o`@>F7fWHNsU`AI|Dpy;DWT6COcFZU+kK*lgelqc?(0zp-ynHo4%-%n zvvKy#FmP_`#_j@hlsM-Xgdnlr@;SUCFQkqZUtSfIV@MEYi=C97+6oeVp+>Rt>yG3@ ziFzHiSm8YO_A+KBFV1esM(;QLrou1avOT} z;#DBPg}nNV9q6X*;lvjr;4EL=nE>n?7LpUG?tJ|9UXe-!zMuVw)Ngc>cAk=NB5NNTaY z*clS~F5*NgYfwe{EnF##4FeyQX^JO(sug439HE{pikN)mu>~L1=^`_dke>wIak7um zeMmO+w$=p*way!`gnhHz=5Y6FcwG=0pG<`3#t_?v7nD4v>fk+Xk-y){pXNf#z|6p29&6^bLqE-@G8UmBwr@cf4M)F&h@D%5 zVu{Zs@l$`?bnQr5+5$PwT*t>u&z{uRk#5 z(2uV&S){zI+ung#$uapMG{5l)za{AFx5i6T6q%6KQ@kZ8sY-k%C>~L@EER%3IcMBO z{htM z@U6IRR;K6#etFivkoDZRKy@=4YkEX@VuDV=xw9s~)eiCeYwjwL$PucC z?;vOO+WHlIQ=Z}Ib{2hU+D_)IKoW4L8|AC!&_2cjffpMQ%OtfTB= zv46vneY$DrS_IOPs4e&DKMdDrdY8Eb{#gPlLt5f)LXq{1A3^kWHIlRNKN1U1LKPjA z8&Cfg(gx0Sd|l@rXrx2m)v|Ohw3}(VPdnB}W%U_NT!ZUwD^~R4*XjIi4Hn0z#pL-S%GB>AqVtZ+N7J zMk3K!x7?w2jTYwJ+L3wAj~t=Y*kyj-l+8#irM@-*|f|}s$K@1 zOdgba-JD6;dMwlfM5AT*O){3hri`eHJk6x|>kwfvTSh1jnPrN<&f3{b8#5!Qr_tVH ziD~*ZZ%gYMa3QICfg!klsY3O36+nDtOv#AXQ==}DEq{jFRv1Iq2@s+j@s+nN9^p7; zvL6p}&u^bJs;orKqHFn~-{~k3ZO^tNTrHs1u9&@HOF{cHNn&8kP4L-e1V(n*>hitN z6pS7Ki(+<)Gs7fGAiWB@=ftntTLQED0el!gxW{X)UTO8#j_)-uy@1fU^V*-h&>OgF z=Cw9*;uv};)miRmn&>t(dH6ifkej}XvW37>Fe=yoVud2;ci=<+?bt-kQo}k~1GSvG z2$_J2an=b1?`o3~M?MUfvy=>qC@iROMkY*U z=l+Ph7vU^UW{W!f#%`ecV~=giy=)D-=Z^UR(v^#o=l8VYpC0t%Q!cap0yL-4fZmTA zJIW`}pH6r1o{k-R)9-9zB7Q{?Sy;y;++id^Oo!PoqC_kourBX96xxvCQBbnP=evXB zLsU6q1*edz8R>S0eqeQU3z?G{{_5*?c7oTzemluq#%!xgV#kPt#KI{heUy+&G5vuF@>C zYpmafA|QN)fQdTK24WJb+xza2kt3c=aukz8GGhho$)^Mc2^y*-4F^Fm1Qua}N_9)! ztv4d02YmPG6#Gq88oBoM$oHhpZZ9c){Aw}JT(9v9+N9HDg3%IfW+RS;IOS=KxOP;( zYd(*xql!Ax9owjr7UfBI*39FZeXCuM)>9OP#rU-5PQ)@rf}=6&S{8N~v`VPIqE&I2 zuu5SsSS|%`ehvFeHz=dbOtb)N63|JAtZ_YSAvmC)h$RtFlcEk}Ge_uLca$Vj7TPAz z8vt-f9|L-vdc_X-PKj%g{p(gIQJvaeKL=K@sLbLwbdO8GAa<=TdK@g0fiO6cesu1? z@-Sb!8|c0Xr0}L}qwgGh5i4q*To6@c=j65vWc{i0;A+Kco0F{&b1*}7Uy)h-vHL)o zNZ!|@kD0Akl-LBItB_6P?q>W^MA3)u_~UJYGg2Prl=~Jx(%6EXqK7E@QSKED$mg{V zl$CdCIsK`p7gBf@~M@Ip0xn%<2sQO?UE>H<4QqnB83zZUF69`h|#@igZ7# zpBJ0B6;#YW-Q|+i_Rz8iel=#rK0tx+>$h^FN+6yiwHc=ON5@a>nPyV;weg)83&EJ! zRSJrwIHbk(i&M`*=UUMrp@KviP`O=^%tlrIdUAZ0%XIG=zv}-<6_52`!225$q)}>6%%Y- zRIR?9(qG1(pVTe5Jo+f+vIJ(ZTkhb`NqS3SQ$$~rv-}w7J3vv^UD#9@P*wMNr@!PB z@l>Bk>jXzcxgkSV#^rT-*x~xB1~$<4$~=oUEMo&YT^ z?J6A#E+>r?6hfU?a2__=6AEfPUMmz_#NNt-cMBdbpV;Q3+}l%Ne3xBN$`ZjDOaZ>FL9 zTQMy2toJ~_u{za_o3q>e6kcHCE(fKv&`*iEz*YeBn~!lO0_`lmhjHeH_}xyl%4zt% zPN`D)%aN0CjagE;*7&nX{#0%7i}bLem=S6+^TPF@V6Uj34V2DGY|Jn> z{DAUrt^*VdI3?tMyJo>&xT(&d;RHo!`-F3%z;(8v_E)$I6bi&anw7b(AbIdjltlRj zJSZ)i}sP=TJo$w4G;AbwP+SfRU&V4>M9xG5r!XqjMU(S!tG%ZN+$7g zXY4G^owz5Y^s#!>5x~9iHX%n>g;Urho zk)ozzi^*E*D*s#wMNBu@%ODhVu{mAf+N_RcZtxD*7U|e3%f2&yQSAzAbMA+mt+j7-s~#64kg*@AdIt@UU~(1*Jein3Vr zhbe87(Qsaf$3~b|t=;xcX)lub%%bUu2gZlV5Yb%fDZ@z1LXD@mm?v6TyxcFyriq)S zbqvcH@ifGA6Jd0~$;t1zcfZgcH?5r`~zOlq`t3Y{EraBv`u;o#%)Ua*fN z>_qv%i8_I>+&3`J1(nIk`7m&!o0;Zeosao6TriK-(6T>pK*O^9=g;A;y`W+~#!z20 zY)R`DC_bme?Pdcld(;h1m5Hcidl!+4g#MeT3-!mJkBnG_>)#qsjUY0e8)h#&*QoA$ zYNr}hae0jJi}a(bXbadxmpbwtPNam4#q13$Db_SBPjOVXxPn{>1rJ%~0#-Oejix&o z;-;)P$yuIo=-!audZXWuUY#AGIO|}uB(A$6AP#faguA>hCC5SAlqxSHo#>)e*<3!0 zG!`-YYMOlP>;_+L-a18Pa%>{G5Huc175jcN7u)oS%>F=$6GGvE4O9?gppG-#9%Od%@pzTyo#b7Bzad`b*TK) zleL;{97mYFn_ED$f8W$M8x^wULE!?BS1MIsj+@-BaBuIg1S`E7A&usmghYv4 zwUd%<5;}G@5yBLzqLEd#jIJ*4dgVqV`C}_C@!lVx%i=%&k^y47%>}5HeHo>jyQP(= zx_W*g_742=us&iOHl_J0q5{W;($w7b`NAu@V8qAU{`Jdn8dpMBajK@KZj3EuVSysM zd|UPTrp^NQFz4RdEfpcZ&%TL9+9|YR!_U0jaw6net?v&sCRHR`9dn;6p9$=2G%7O< zmQ;r&R1OdsYO-=k=cAi=H>CXudyw6JQh$xXcutj5%s$KoslChR9$b~*2^dvm4O8P? zrj$qoHx%GOrG-z@E}L6~hi#&NHFZc;a;*pv3314*;h`ed7WQ}{6Dyvri`|cVs}h;3 zddy;1vp1x5E{9?<=JZmzOi`_P%Rtvbrf!V8NA)(nbbnC~7L<&sm;yO&R+5-(#>^dt z^)Z)I9vhs*hv_(GxQOT}`M^NqAHwB;h|&J&8ZvY-W5MmXZ#E z?;9bMbaEB?2Dr^4)l;-Myo_`{FmekW>*p2|wz^TE59S}8C`Q)7QayOd&MwBtAubkv zwMwYGW$Ku8TkTAu6263}$9F0|;TZ+j%}M2S{7sP-ThqHMh9y;K-6CtsW!nFZb#=Ct z&y9%k_q~TWqfx~LCN1zZO(-vpeRWnXokeq%lRoWV>RNd)@~LLQ5|9~ z3vcy@_$$7!NE4XWJ0d1U*A9qs_5$`!M0OZf&8sRMs@)QMNwTw23(-0w*U>$}7Aaa~ zfN8Arl$hF+m!Fyf}WFUIpO;V`Q|chrwOrqyT0~9m(PX5M852nA`6Bv3}8bdY&hH`h3<3e zjhuKl7JIJxhBWK3pP~;8zH4YnqRQ1YV_Oq#HtGdT*Cc$AP^w^llwi)WW$_wbb@Ftg zaKgkRZj{KS`r8*gb2q7+%$M(*EJX56vpa{tSM*- zd!5n{Imrn3haTGC3<+Ved>3Y+m0@LG|93zVv^A=Kits7pC%UUtuL3tq z^B5bxuL=vM8+Zc}#LwRC%9MfIWv@wQ^|i+AkSFDl2x&Aqzv|b7cEM@itCIF&A-zC5 zcLZfO87sL?Iz}L3DllSU%CxEX&4)3%_K^_TfkNUDd(2-R1j6?osyA8aHe`6EonX69 zhsFotIf1a%8-BI#8p=6+o|$NOZhj`!YEVw+Wuo3Q%T2#D{!Me#+&6#H&Ew(x#aGo> z=T%Ix0FEOwMq(eXom@>lBXD`RjiQteI12=&zdp_`I)Q1fJIQ90d>eJfz@O*sPBe0T zo~Ds}%%v*-hGTbj6Qr^mtN-=-jFhoKWqDC|j34v-JRSG2xU^xl0F&0ely*}^ z2f0#=PH3Cmhzzqda47pPdF!_%2R@a(fRE%Uk3>1~Y{$cw)b2b-=jlf@Tn>%w;t!gC z>oQ>LIxXjAlCADckrYwb(3EQH7^#-c`1^qp8H*dUbwYb)v!7;I=KdS6IK*5Rh?ZBK zOh0a)t@Pq(_cH^3B+b-}Y7e{j{}5njt8AZzpK?-a5d|M>Q((>F+Nm(5y{0_T4|S8h zWS=ouFisaCf}l3g{1uU^?4`ovuH(V<#Hmv1qA6Zr#~h;ViisC&;@P~f){+=nqQt0G z3E!Q9yAm^d1;AP2J&KO-yoPG6FVIKS ziEAv?o{HmyhI3i!N>cW1!PCrVOseC+!JxzgmK59em7Hd_6e&2CY;FQx`qXM6u~_lG zwtkki*^Bya7wzmR;^6{lLz6gB#$m2LZ-6FBDa+;t-Vw2Rgt)v^MIxWV*V{m6ho6 z%lKz48o|JA&x5Kc7Y%62f}+pS@?Xw^pYQ>?sqRhcry%?l(jf>GLL2>5RbYm~ytaj$-6AXvzwiVu~ycovbjwhG#Lpf)G`_O(9l|| zu3vUx6!mrdOwG5g_h&@9NJ5By?+a1E^XY-X?V!DTMUCoHPw8YQ4ID7y?ao*v`^H}4 z2py{Z^7q@*-^jjNk>7qY)cmfBYDCXKt(#oi!_S>{`w|heO9pXSpB{bb23~khI=IJs z!wg^LS50T_i1r$Kc6?Py3`y&wN-*`kW+l*>oZYTEl&o1T8rNr9KZzhs$ksX98?l;{KgmU$0VGG$Z+QZkwFG9YX>T)9^B!*) z(agfCLSA35brbDo590S>SOa<2CBAj8C^ih|e6oo)+FrjyR)yBc8h_sij(FHnC2<2 zya<;XmR_YJ_Xs~4@%;EZKu;_vlurI0xkZiaZNxAE*Okz`lt}(VK4$;e^Y8*ya@5KE zgcq1A`ik<*EmOsQ4=>``nX?-+SgPZmBKu`S^AqX+C__w zu}%L~6tB{eeDBYfYwsv=quQIi9F^YX-vx<=Prv<>W+}b~9I&qoN9*tSP|xk|6l~gs1QMHBb)z>)oDfu9D~L{$O|v7~#Ite|b$} z5^%F9Zub# z%xxR(l<&g<&cVmo!J4qM@iP$gYtqc%?}PHa!kCs2ua^T4%$$ZJ1qKLMgJ=hvB;PZW z>iVo`&s%D{xps!BsMSB}OQ&o|W~{xe;n3$J62Oj=Sd<$!=T#kTxpmWPtLa9h7L|s> z;N|N4)<9^dzzu%;mubS{IAiDJN>ja+&!198X^~@A?r{qS+Ml}0RTkf35}5zkTHu|s z&t3s|SwnYHV^ae?rHK43BH4CyiEnMi3CR|@97%8&2z601ZHt5sgu^N&CMqakn)Nnj zdb@$5E74pE<1WiIYBT2&mN^?;>!2nco1-TFPpKnX#526Ql#;_glSFSKMY$f0XcE;? zTvO0X?xL0wdhz~kL@I35ncZMz=G*~IlE1W-rs|=I&Dl}Esg`oL?j^@DqIqKN{xIrp z25=#l_=Bz?)oYn6{OTX)ODQ?x%<|iUFzx)^$zN0uF|t>V?L~d}Nn~?Tjg>7txNP9` zqd1lRS$Iu22)M=~9_Hbwk^PYQ?iMI^DAgA7DO6GNisKilhBWR~oq9;gVE{4dZ55t(;1P!s#s%wpj?4JK>A4Ex5l5k3!5JObw) zs6x{*A}4~-r>3iyI6sAPT*dYa6JVDnZ?Fj@u#?@O$KR#B@?%`5m$j4akUO7TpkKvb z3Zich@jD`$6M!2jVEL>g0HaQ94E6eJVFJt!){(X?ECLcr^w|*lN7p+b0>sEcgY0{Y z)C|8N!rmn9vbP?^4-maTN1?UuRHddsOxev89rzI{#dcvHBX%{2MI|a21OM43HzL@) zU^o_VLF%`s=<#ev4=!NbF8l=M#)@*Va+)y|9)=1F0E8KK16qd#E1#`TO>>3;YNWcv zNLh0 z{mhFBhU=RFp0M!uB3(m%Yd_IN&Nn`y9enSW6W+ox{p@8$F{J`~as5&boa5{4{}cYh zNe>}0adJUh^YulGFjAKU$^%KPWz>_&N zN{GVA0^wds80>hd#6>w$qtddcmw6nysjZ(*9vJAm28Sj5vdGnD+Q0cSZ}~^+As*D@%4AXD<0cG{4-XU#XwjmM<0egKz6alC$kBC5_?b;~UH+MCTmz3_4 zSPDsG_l$>&?*@gb7xUR0ZR;(*XS(GoIv}fOtSsIbH^)sW|4Qhf~>n5xzrTk zE}HRwT6I#cPlltYu5RzRpg`-v#Cn#k7ykB?pJ5w8I0jy((207#@bR_Zi7$eiaR&uh zgJwq<@J;L@F`4^>MXGVGNx8wnngOZ|$nWyGtK6)UKQ8&koXT3>k9X@0Iq5`TRG$M? z;qJy)A1BXORb`4gp&}}8KJcYB*&$1)Uc;kO#HQE`i*SVn?8Ao1#J1 zI7|7#^QhxXaVSj*iOebHE9_Zl1EFK^G}-wXM*m!U>8~&2H;%>J=aer(`A0 zVj&z^9o)~nZ}qvxi>Y_f7++_~P+Lwji^f`%zeI=ikChGu9H4F~GP`63zcDYy%Xu{} zCk@qqW~!`^-$in|l=G9k5DNoZ9H`TRAW1Pn!O&M#8R{>4Njl!Uiz&VONy6u25%P=5e=NF0OXiqT00paa4`b$xb0&;6AZy`c4FBqFS;1^(I{xq>d8yqsaNVyF(Q_Sv!Zgty5XFL{A%uS z1X0d!?tE%|-=P}JMsO!l;5e7BgejAS|4OQ-o`Tt0Hv0!j^x{V=)&vVC^|b5U((T*2 zFxjRR>vzV2yc8~n?C{M9e3jCHI?qPjD@vIp|MrG_6W;ENXrY)YwzC>`anlpHk3%UV zewYzPqk622nJ`UbxZUqEeU}>JnfNo_Yng}g#q~Nq!z(q?*Ewg}3YCHKnWA+aCM^kH zU4MQ-Z?H~WS1=ns&M>r$k!15i77B_aNWi~hh++eVxXMSwz?9GSX?uLyF6zkz*9Dzn zMx{=KYzg19{_Z)G+R3G_$}eWpfvl##4(z8l@xT9zTvvOs8G9s2B;k7dLZ9o26Y^Ua zHf7VsoVxSv;Gp%SWklF?ijcJ=p~)n&NKBPzdwP*&dT2O09&FuDyC{RYfHm5!7BP-H zxvTCOG~YxGhN`XLU}W!mno=ZXWmniH=ZOWu1qVS8jJ(OQsA~F4U;l4IkJ8n_Ls=S?@zV7#;tix*k z8W+tug$1f%Z#>fIX+)!-_kgH##Nx!B18ZB^MTtx$OHooi#e)Q^W>2c*bDqe?$|>7> z>Kl{*BGJ+>)I|rCi4)>SPR(4i->#alQms*qCVc?rC&j+Gu#%;P6ijj-#vwyix2R@61DzV);?jw`P^iJK@KIN7vyjm5cyB zA*wxxPR-2)FL9Kcc+)-gy@82%A+5z_D}_@pF06I+u`;*XN!=WnA58%=hT@m)S`wqf zCe4>~dfOF^EAiNr!n1#WWC4NSE_l>61;>whqVOJo z;e233;MB6r_WGVGwrL9*75`Zapc&O${;2pFQqu&2HGbcH}r*Xb(dUE!Y^;}f!!MmOh zOice4>=Z5;Op^-_K{0J|*ZvP5{Qx|u6QILF|NlUP{$DJY6(<0-f*NDap8)Us-{7pm z10{e54?#9Xh5rGOh4nQM#h5&DebL}QeDrB3@ZjUeip2i_G0W}7nS`^&B7N#cr6#Db zYu-*~@m6v8zp${&$gbWneEO7;K7O@hJyzEAlK=XGno)s^tAEkVzWDnCO2~l)=t*l4 zV03jnKi&U1U=;q0>%OgUeGkm(=Z>A8xfqyFXRIzBYS`3qcVz9BT#WB3kT z7;90Rpn#j;`4vDt=EEN>)&6`{j_clJH_LH~k-x6@^~rG(!#6%M1Ee7$L(h-*l6_zZ zjV<*(F*7H`s`e5XJRSt5dKDn;S<*wi7xfO5$5^u{3>Sa=J2a(#ap8ApQlzct&YaCguCn^1iRPVml#gO;Bf@WDgFzaY_4}T0yQY<4!QC2; zKUu64-;bPtSJnEvg~kR#2cbCGcV7HDuv zH*S4LF%FYyu*B-Jh^U%7K@Exp6iwYv7y=}kw7*kcRzxHj+AWlNHrR^#-D}Y7-Ch|m z;*tf@@Gv+_4lwPyNCts{TGmf+XHBhruVP&(TcnuoBPn57g7_AzhB=@ z!Lh~-o|eNyj=C-@9wzaN7Furh_0|!^iqf($L%R4|S7rmoy2wL+%a5J}Jj4{Wn*7Fw zRVfNI%y&HTeW+&E3;9*Fu zPg|yYf2xT8w&!3|$pJ z78F+*Ym5HzuibNvKT!t~E!>^S5uiuj8qa))4ip_=F2v^t>NGJ$8G!s^5ci`X;kfDX6ZsBb^gVt5#_!*^Z$OuVUz#!6)#Qw zn^%kp2sHRTHgyPkiM&RRB8#d-F>Vj5=lvhMd`cNe$z}2-veh3QN`nPREo!em6;79B zTiRn)Q2yhrz_N#9{$BPU2ScrN*bdER9GmM?S$#ph$24|i#)#CgaQXeo24zLiSH?#9acjg=Kkk`BLA(;PWdd+ z%&a(=S|7g9!H~9ievLc?fQ?BNGu~5#599$>)R2Z|^%_G)60!P+dHVlP@65c35!nh( zNk^#G4KU^&4pEgVkS!ow68OAqQ0`#*}#@H{Ac2 zjcq?d(XV-epfG&!aE!zgqZhQ)V8I>t|*xtlzGw}XcFraXr8Bi#R_ zsYAe2J{zSlYy1Qt-b(?9N!)J%5^g}C86w@WeF<=JX92il{v7aF2%rd+o5Ws)B0aO7 z#Nbh2O#aHan0Mx_NlP0|wbJ}+Tu2^(UmIa-T%xJ7cx1IHuSvi7-RT!Z8D8cB+ln!R zRNHI;IOLR*<&=~$fz{qc&Yd^yfG{No(DIZ>73gKMwlR4|l`Vw@2c$S})=04Udn*-^S~{r0~W z*`F=dIC}g520(l5Fjza~7L;Qr&b;+UO0cp~^Bmoa1Jlkw+e+AFbP4LK`-FzUd>XY2 z{GW=>4V#CM+(w|XAu>K9rmlhiFD`&XIY6CeASOT=TX>IA=y#ijc~O!#i5yH?Xf3d3 zyM87*fvxdK1CLxVh1gK=9RM_T_d4x{Vo-cog8KXp7}R$eqX$$kN=5}%efd=}z&GO} za>MG1Y)n9+o}A@Q#<<+#mV^kRpaUwTB%4Z7CP1U|%TmBIquzHjL=$^py(o!f^n41j zs`t9IL1Co~GF{5n{hax-^1#;5TriOHEkQDUw!>N2IY9`-KS1~F&1PTZTc;p--#IYl z4t=+i(77Ggx}ER#rHizylFT-G<(&s<1_ssSC|?1D`oUx3wvb=^B|hsRmO4HFY8+E& zClx5wHD<=7sq4$MhsyVtXw{8ojB)O}rIuacOmN638^1|38QI>PmsktO4BB{qIfz7VY~X zw|T4tb_3zMEN;_&aP4tJfCG`{m?bCi^@@nBvf%nVUCS)Kt;_k<+VS$%T0-kkO84># zL@6CA9qaqcc^;)nk5~gz0M%VZ2EM*|2&dT++0Bd?`?QE`OO>qXJqKs2UO2sbUa>Uw zx+pDMMPBhs*Pl2uQCh@uZZ0JDUGMB8^;fGUH{Gb>d=-_pBtP+yD3-zGsJYCpPqDHray!OxzoDolC(}v_%i%Ew2NQKhNH!*sl3fO#=_lpu3VQvwIxHh+xrJ= zz3TmE<}+jG)?@c1E>rsi06;Dh0CER4oTQp!Cm<8+cXg~gZANJmAvp%zZWG(Io~oX& zK6fzj9gEhyBK{{EDzaQqj_!>|cOf_WDvAvAPmqI-vX~!0_vN zO)oya1R`|=l_8LO0}19aK4bf0t;U({g6G{;@$1R+<5tyI2kdY@GfItV?Y6?hvgCGM z{5um>9d|9Fut9tP3=GxXPo`WUx6e)K%ysKy>moJ!I?3sGn{Y$WHyOWv}TShVMkrQe9 z4nj8JqJNIgsLvG+As}|fG1uBu1`jmbga(kiS&koe63;jZvm&cnNrcnyM2&GSek-5* z+^GtDsab%>L8bv>GV^(jS-Ww?W0lbcdJ5;1Mpfz zJ^t4EPt;!m__#$p5<01uSSr2J3Pb*7aTc#SYLfW>*}SjUAe|-SJ-@Guu?P3qw;SHh+zAnNlT7gO(+7h&nE z={z#40=T4+z3z`aj`_64*@0#^{jmwzqWxx|0QPD^>R}Zn8^~z+f6LK&i?I5ae1yZQ^8WjQDy#ju=;Hs% zY|TsZ5UW+hF3T6(*K(YP=q>d^iL1P@$y`e!DZJjifrogk`f2+EL>?v?5&;)eauzBb z?zI=wAyBd{9y7 z1Vng!grk!e`s_MdeB0!Cl199}UY35O8^cE&-*JC?`-1oG>I&*G0yOaU3Nny+$)eaZ zGXRI3EGLS2<}`p8tFT8LYY|#3`RTrE`MoXAtnLkN`YKV zvJd;G4pF34HuV7(x z-AUl9fpq1$)O2Q1buD5xSDvGkon+p)d^a$Tnt!qBZv)4Voise-d;dY9%Gt0lNmF!F zwN!NMUr9Xu1ePi0!9f!sGU05|*#hBT_Pa(DI>(KJ_}J$!aBx@}j*a>~-4ff^wrBI( zd7r2RED8g`Zg(3n=iSfu5>Gd#_P_}^nC5r+V*0EH$?jOJ4py?z?u@?KA2pKpOM1KZ z*ToZ|wtige!!&NQ-0$>M^6GBNtKU(?5*18QR_1#Jzx|oV7>Pe!x2je1p+H-gn8y}} ztn0tyKxCn+M*_Ap>!Hsq*NZj|yd$4|0F)c&^$_^Xo9vpS_2{pqmP7kwpTi1|j3hv5 zB-A-ZLcCH}7*95)gGIpq;hbB)MJmRN{uyW!c@@c-M9E$Q zDd%X~uacvV#`9h{7gBd@KWrzU%t{M%K}&VXGZX$2jg8<)n!ghjm3f=%d;$m{jrd(m zC1!ew&tbc8zR4wrH5>KR=&HraY}{<7W<5*@qinb-CMU-ymrMh zB3F0Z-U=u_k;$^N!$ov4f3E7w)YNo@4O#B44a$PfHF+AJpMqUVM2_>|A@$$rCw}WS z7XZ{GLIKk|m_dI6(7Qy74?}OWC6F41^ioEmDDQRb_a=GL3AxH_oCBWD8-xVE2?A_C z_YZvjF%;IB+3@#isG$<;mUWvo1O3~|5|3>b`)`}}^4FVz*#HKvV~9dH9fHHdtvF7g zm@$+=P8BG`@~N2xiJ5f>e`3c4|LN^4{ORqmbzFY9R}zRCr*s*WM;;3{@&LrMwq>L4 zRHTFh*8DsDeYOle^Ej;>uJjwbHCRWvppTG4;6^jP?}#*L5s0}Gs5yin<@I3MM?F^2 z&jDeg)-gEn5L)+6rsq>0JD-`I>p#t<57C&8RjmLEDCOr| z>Co(K0ok`A&krLnFbObqbHt&$=S_hx1N7nMY(-*QchckdSu%C5V^qY?X7N;nZhP_4 zz{;7nGpD_^@dZGeV?rYCSRpqWp+;37h zjsl0ye1pEwqV?V9wl4C34v+v`>+hXwPkL(RmkU;|KtJfeTTurgc(!1zPLn*9i@@KZ z@2j1pR9UZrI^`we%s(2P$&LU@P8PoX0xkf(34_D-1X{z>d^iT=Ndvm{WAYyshcm{I7`E?m2Fe^TC69{pSiAL}d4< zjw2cB&?(aS{&kKx`Opv;6z8>@HUAw*qv_7?Spim$k@RDzsWJY!LfD^N^zHrs;q0xW zs@%H2?@gzmC?FtV00Pn_jUt^Q-AYT>rW-*-Lb^-3yJ3R}NOx|!YttQ@=i2Bw_qm_< zea`#5zrW6Kj5Ef?zOJ?An)CbptT|6`rmiH&_?kD@nQy*pwKbv|e0hD+V)v})kOKJr4F7dV>yJMWlOz%{X z^fs4@x>+RSevle|o!kilKEr7;yI%BA&l>}aOrThTl9daDY&TsZeL&7H8&Q=0>`eF; zsE{Q#ppImkgs@Tg9Jn6fm(zRfdcP=K^FuVZRI zDb(ZoB)opNNpgO?7UQZ9etjAaG`pw4VH}xlq^2352gLHvK<}+AG(cIorhM*^|T5{3YhbkSCO?(uF3!#XihtxYY>r!Dt0I+G8vv zmHJ={@<+T;Q{60UwB(o^0-EU_D1}z4UZxpV%kSOmO^6<<+@kDROh%c}JU1)n^foW= zl|$6DhS$gA(e5Wa6(%4e%2XZ8+9GP9=D6aCT1;{;0na0%GN%r-M557?p=$Zm+pN|E zcCh%j9-FrXdO355sK!DPp@ zNHHxL4HNVv$~d@g{ZtG`lFu}LoYN`PR!+mR%CH~2cC;~ExO2b>e?0&NUf03dMFSv| zV^kny^Bfv_}HfJ*=jHq%pC03TPi zkylmQp_HgYwdVwrhk^*pzuYJ6|B@EOxcIxCLC zrp~E*_B?|oEqMypfUe8nFsT#pTV(E4N!~04AMQ(?Z|OR+&JouN-nieby5BO{Cqe0Dkcx0*cO&IHKG*(tfC_BV_o*W_61i0U(mK+wnt=OKc_p( zN^B5!WR%J{VTrngOb%5XprOwjxh4IPW`NIDnTN;3fZ9oVqc&Xqx^hunz~A!D zKYpXo|F7TZ%IwK(1wVC+h9l7yQEwfqx9b>RGyf$Bnn}V7;bIYtcbehtmtuAC{O3<6 z-v=Ps4$G=`eqjKYYkLCX&NjRND9|G8H!JKtAY+e3Lc(+!Rl-Tw1-}6zifntxb?CX? z>ecGfKsgSTNb1RcM+%1~o)%s8v~6&$P{(xt$|0ivAy}Ewrw(d)>BzHPa&4Gj7=J>3 z1vF_i%uYg&A1%e`HcGhW(WTiC{S4avxdwH0J^Wb>$vg^|(kXNMV$7BpzIqV+cR}g= z+bU59@+0ppO_DTPsd9(?VL+F1kz(f~!S=QQH|E~Nd*7C_CLk~rX(n^dgTWQ4;IHVz zyl5Wy$09kV)jSs4h*G7&JaIMDdjKO7ymBkY*rKn>RqN$-+PgdHewt(mlrNQ(BZ&Ik zm3e2buv+ISVj4`M+rgwwH4KcQ-#%?wO>=g2$Sl){@#|=Vy$6gU9Xr31Z40y{jECV@ z1u|+fy+tWmxhv&3SM8a7G_;?sW!YB#==H|VkHiv< zFfV8kko5J__9u8AzB%=BsQm`;DuBj6beN+Q_>b~^1<)McJZUWlb_L3K^?`_dq|w%5 zHjw`w0d5rAga2})Fq=L2ZL%8d9WkVJQA&vXW#FH%!vU4#KDb?l^pDW=TQRxI3vxZ< z*^=Hpj_$(coP$RX_O!L95r_a!V-za!4g>d;=HOZ`? zqUaQj@x*6;8%LkZsfQ5Sp_*q|W@MQN!eMC}`+rk%s(*WA2ov_=C?d^O$jB8T;<8Kv zT1ajR`1P!oUeo zg2D3_Dm&|eL+097M}>Rd$wNJI{;%~Cw5>KFMt{9&pw3fl*M|6I79#;Z7R?_Y3yV*m z;m+^Fo{RxKe&WZi3reyIg}ig=f^cA-MHAop=YS=50Hn=`kDdeCW-f>RicZ)hLaj6^ z1jLCGhF%T3{pt%h2B0NtJY+$4DP$ofO2{#@+Z0e^xNWm^*|{amWNUB*7|uyF5{hAA&@&b}e0x!Lrmx`Luu_e-E@ zhao_7VH0^J9Vw}vpEX(mNkrO+A9f$GD@3xy^8O@2WcNVV>f@LImeh1DeHJZr!xF>K z=fWBkWBE!UY2xez7OCTK^(ER8_PY=p2AM16Y0mE=lrx}tS%MK`ilBR)g96`eqN9S< zFL+r{SSQ^v%XUyecjD4m$oI=dlR2DFGLFk4z7UEK%#60pA9{RnTeaocr>h3yW-VHk zfXmU|MKc57rOWr;rrJ;JIfwKW858dFst-;uou0VzmzRTS+<} zhSm-mFerHfti|?Xw5H{TY;vT6#l7c7o-L~Upkq$YPgXp}DrC+Vt67cXt~aLdCo^u+ zD`5riZh4YNduG0j6jg6>Fl|=?{42d1k3Z71k>a?TjzNJmx(|=9wf@U5%8`K>p z&0SEHAliNDLEz{Ayf=M2=RyqdvM&S49)9P@C+bfp^lPp%(bpIZ{%`_|kQe+`WdTLk zMZ(D`rW_x~Sxi%b0UiT@Er=#EegjTQ1i%1*3&)5r1x#iQuWW3(k}mLaKq;BIv0~-f zIS^8wYk9(Xb|Mm~b5Beuw!ldRQ|sG@Kj(KggI*A(rkMm@CB};qyJ{)~43N?Je-S?x zC`u>=8x!5mc4VOFcbYXGeuOjMYOz)%QX^wpW0XsYw^$eioE_`_DMW5l_FEF=FRSMX zDB&_QSjM@^6zzpe++PpxD*T^5Tnr!@4W8aE%fgh7Gz3ThH;|??3=sYddf{@oe@3`X zgjxeo5ehlmdoX$W#;!A^0wuGK;lGH9vW!~d7njav~Vw68uQXx;g*WUmIa{o63ox5?~Ug7t?O>$S&i-{Q@dIN^6)!Y^} zvryFan9wZ_18qtDTQ&R#MkRu-cT+dYbkGk8t!xBBu0`xNG&OZps+_AKD8CKxUC1Jh z9?^b`AVP*tRWulmA>W#nEFixA@%+vuM|X25z&qlI^a6(v5bmp13tniv|8wxBr>Amj z+(k}9F0p{+KA>-khgrIAB)g%fzuMM{C(iPFhrZ41E`Otg2t;iRtbncbtBzX;B&XqNMq%)LD8Z zj@>w9+Ih|W08#HTLzWGeg1N(h|4DAD(u_<~wf4=O=4g?egG)d-(a7UqSW~SHe_V15 zBvsx+FKDr%<^Wk}YU~I6D;YPS%dFtVac&{{(~o9&-oSwF?V?gDoX!X0Y(PPMFln43 zeY(@#y_#V41n4JEud(WjPt(~ibfcxm$B?Q074x>(1;z>!z{VnOw2qT4S{K#|m>b76 zyL61ti~kfv6E8ArN_G_Z@k9Yr_|w(6?r|`JUtI}eZZaABwK1ICjes!#uGAdN9OP%5 z;y$LrXV#8=(BrE+xdjgZM;fZcGUM2?Sxj+g78k>T5cYd9y@oU!@7>BMw%v?o-5GwB zqlO?bFqw8a)i)&ku#Zn!MI$!=%gOyY#v|*RMDUbN*-Es#6!?yh?ZT(lW#C7}U{E#) zjHNL~8J}!VmJQ3HWNKCy+0J|WW&qTOZ7=83l|k{8u*+Ra>cifc*W2sGLa1Jti>-ts|0(UpU*6z409h|e?=Cpkh0mxIzi}Q}@dnczKt}b_I3lFpT(}|L z8e;0T{K?Fz!hQS3VZnL*N#KPu0IHmq7I?B2EQ&L1BQ|fJopcfobRX7I(S+R`RO=Dn zoKy=edt+EeoWO{CvY+12Whd&}j|P;%FSdTAzCzh^HGr3=w_hej=;H|Ou^IJtr-pumf(Gm;cKX5u9 zhlH@?2k}1ZI|R0?1Yn?ai4rH#b-yULZ+gT8%my$+$RY;=B96cK4}0EO7f<8=y@MJ5 zPdhk%3}_8+t?s-+(YrlSpk8hOxb5sv0xd>Bv~Eh3;*XuStk*W|iG>>M)UNf-`Of$j z@~y_U8U#`k^=AXlVTA4J%xOaP%M&gu%deaahNtDH^l3@O6~EAK_w;>YubB<6s1@dV z8fadQgd+jmPmn)*0X{Y^T=wf&@Nx0zzdQLkSnZ0|SV z@xO)rKY)iMLbU%L%%w+mxCRWa&_;}ZCE-8wa?_YbOB`zP4fr*XcJl(Wxz2g%pt%z^ zZpFM*GYhaB>5mR}%etxRc{}fH);SQ>7sY_KkzvrTJyz%gGo#TBplBy8;K(=99K`-z zw0kl1rhia94!%1TLI1(n{HgPH*4{Obz~D@u+!hsA|?Uwpw2RSzaA z^c=PUUy|b$RC}fAQmpgpklP~EH{g>~5Gp=SwtUKal#&37I!GaeV9JtjvFNu91Y`!k z0T`7KGb7ny(my45VeUk9!HYjfqomD10wb!o3ud!r_1c%RrPL_4S3SQ`Rk8m*qH^j% z+W@o2)Xg}qJf`F7P{r!mz7?V0&i}%iMw;BFWz+;-!;jfa0iGyjb2~#rlVj>?UkM#k z-*yC4;=!h^j$_8Lp1fgq1`Q51QX9Mdv0po7L0mQY`13E zTZLp6Q%K-I?y}Chs;@P(Jnws2dj8EH;}Z}Yi_J?~co(O7G={)r7=O}_dc_!7gP)zn zlUzUs|3l#rAr4jIHG5nh_p`a4FcYPS5-~KTcw$1uv(oy3nr~j-YVA*OntdaUW%yYb z00_$~>YE%&hw=+c{=>KbA%&Ba%l@O*1Na6oHXGP)=>SR48_OyJ9HPW8K=rocvMXOAbSn1@HZbeX zZWsTI28Ld?=TYI+*|m9hS0BR*^LAOXn8Nd|yC=~<5mkMe0H6(Px>bUOF#WKbPuMjN z$m^~jLOWl;iV!yyJ~C8)G45{vosgpIc}Bf00BLxf7nO}^Fj}E)1O5|(joc=}9q^ql z)$f=J`AJZ@&>j0yMaSk&t=$=J-2R~a$>~B2xN_|U9Jn0TtFnc-xqu_bH9Glx;)B~0 zmeqHbQ2@h@V_xdhvxZWf(fA&8;&(qJdL84*+m3(06+;jIcerBta#udw8wEeSRDYKP zX|Rgx5`$8PjWXM9U6 zUK=GOi+{aNl;(n(>mLS(S{&lDbvwX;PNP+q70mVK*+%N`1H?lbl(S^w=%txzGq$TV zh1e<2Ep%-s0IID$p>cLMOugJA8c>_LoDsN2yfQ_^U3yTx9YkPx-eR|!amxO(sI22# zqrQf!dpP=DUdKS;Zm&KAo7Svcsz#uEYf0yAx*fEfX{>NagZKvXC4Kr4J{!(dL8 zC}2>^ESKrx&&~q?>46gFjqAAPV(sbZ-m_3r0iYTHl^U}-76@D*LiN_BwW|&{tBA@;yU^SsELoz2t9e|dnI>25WOe^?!};l{!0FeR*|3QX+&O@$ zs{%JntQyDg_^N;m3l_ivTo{r!Qw_j}sLZ+@T961_%=I?i+=F2t7hGTNnz|JP6%;z` z3LW;~thUC8Kjd@En;~5vRi%v$eU(C=odizA@;EK6U}~_g!Gg~+0Hm4sxmag9A{oXJ zWN}|h;*Jl9m9813uEQhrnsKl8E(2`&2`=yrq;0vT`zA7{(UkpstWAoUF+gffm0x-( z;-tvx?_%hHpIc5_n;bCq`mZk798<6t0?9xU#Zo+>hTR4x3EwBKW(W8E(8Ia05vh(Ie zze;dj49vDQPPBQ2H=e^y2Q!(;^guz-AY#F!h8EK!bScX)sBBEbW+sV|=3{32^f#oz zI{-fum%!`r@qzyr&w9JG+z1GrhCHxA+f(}=hX6J2p~7R-o?4A=`yn~Mfk8>=D*N2u z+{m+qadOvf$=bKy;qStB;Ah+A{dTUq*P+v!`Bnxu06?PPF<*YNQSQD25F?I3D>|On zjArAq^W3KZ!P$OM4P+-_$IJ`kSpq%WVhw$|8=COFnSzEH0n@pTVJe8!FtnRi|)F}9uSeW8`1 z_WrP&=n!FdsuKBqWgD3huPNamF8-Y8e$uw;a8rNC;Ba{~DIq85{J96%3S4)Jy#bd> z-4<3E!wIeK(4!XdPyC<*^q*Y^BV`^m9CV>hdfv3c@)M}IfX==Ns&8&_5%#%c4&ruU z#7#td93PHU>K-%n{N4>S$lbf|;)xCbQ>S_ClmQ+pCU*&U|KcZR#|=Tz!IwZJl3@Wyb4H%s_3cM#xBUwpNkDTRt`J2j zF&&VK8Lze#--pkX?KO`MNeu#k@7E&7)zL^3l#rC9x$?UW)8-v32F>%z+EuH?A`_1Z#u0aw zPJsJ$=KF;mQ}?_CwoCL8%uf8dz1zC_CiL<4J1r0qjz2JW$D^fbD@Ge!biUIubO78O z=mIKmsu&%Ykb?T1cxsdq8;5Y6T!ews+=$-Ud3mjh5LE3q>%tOkL{b!c3(k@esTRGF zviYkvcB1Gn_vhT#P)NYj(6Z$roX!^7LPi*|rK*j#B=%p_`Q6hxyymiRjMs?!e=pYo z4)0$a--qaW)i+xqw##$LB^Cg-fskD_^g&f-p`yzf`S3?U%9v#R{b2dJmKdDGoEWnr?T_McS+b+xkz5_=^c6(4I6A^dZs}7gKUu| zfG88Lj?)*m1l+ovfcTJ+T+d@9181l%3GW^&nHO3vkns2VrQc?l-9RZ(<n|7Vb-}F=`p4k@|N6O?$6K@kmX|dazUX?QP1hI3hpPk3 zM4_TYmyCDy?$ck519P#MLlT&XDGu!^yyuvqI(<#f*FY6{Vs_=z4D`v5+)){RB`2)F{pfgi8s zyy8%{nb&9lt5juv-cM;%EO`#hggayROa?ruJ2Z~l6RauGD{a6%CbN89S6JmrdwEvZ z*HDi#2tK63jK%)HwE*MRF!``h@2j>dvs?%iB(nQXxO7XlB zo$BR|DI~r?`*K;56l)E$UFK+}0RW<(Fq@$)kg;mT) z{UA2c*6Cl>*RE%S0;ONCl0oAQU7`hxAVw7 z=jKyh3inNu?3su}I7ix)5;aRb0ew6TQbL03CnGly_+C60rB+z_qQ1HB$dRWZ=RFXY zTX8SeUF-xmHL`o@*fs1IHl~I5>uhqzRpa#Hsnj5w4;wG1c!|S@JxrF%`fNYA6}Zdr zAvYgL_??g<>51?)IqJi@u&6oBClt?#&wus`VKz^;2Wt+ei|W=*22`H3;dxzO9Trzw zjFWQa)fiwn4K_Gyx*Yb7M{W3q=2$?pb4>zFt(Lk!Whvf9xKw$ zfA+Cl^qC$Eofgk#A^Gy@(c)fs!-MnU(6!}nLW7|=a+~*_tI?wzyKTJ>CUznrfP|Mg zHrSq9NyT(Fzx&ZE--gj>jT5?u69_8dCwSb@P78vjZe}iIJ%*;)^6@sYh6wO7r~NTO-B0gLnE`(c>}^8czz!7XRZ~lSWvaUKll$9d4-sJbC8uwJ|QQF#pDX)juo%nb$-vs z<`yEPJo?dFzsL9G?>BrYlMg;Rztn^ug3RGQZzCU@9o%hdo5&O4To40I>`A{}D)e{# z5jybon(@;jli;lJ^NW3}-1|$xcXwpJqUv=VO=P_4;QurBl;5yK|2%19E_OEnGsls0^_lvY$a}2vrI6UFFUC9CoDz$BP5D$SI zwbCD!jb$i$RPqy5jGrw-|fbd=lXuD(AJbia{dtCM_sHAB&(%q$IJYHe270I|w zgJ(u~LSe9+vbUyihP!Ac-AvU=akJvoHE&Gc?d2uL#z=B+%s`u+KNd1ao_faMFz1H$)4>vJo6Y!ZN-tMzjz4~|oDI@*L{kn#m z^&V&pZnvRe=56ZJ9O0MkF4Na9P`@hFio+Ea6>_eb_}cp^&JBZ6jiU7qzm8SnIwg+=Sk0j{a?gsW^UOPZ;r z-VeXQE_AcEGjDP(AqP@IE~{}wN_F;F)kgg(5VgrSJu)j@wNqwju~WJ4Zw zhgH2wwd?Fhyk5}r+N~Dkor?0>EM~BAhkcVvzKYOsEzCx@oWbPz{8=8P(w7TNTk^xE zi(1zUu#5ChLl7`$uKlliV3~faARu4c)5NS$b`Cx@&B* zmNh)G7l%By=_c6mDY;)tz~H6xGTZUT(rIc!Bg~83IRJrI@~0E=MDA)B1EVDg*#)=v z6@6#@Y<>7{{}pr7SQ{p2tACuNqXCGpAt-C#Cp<~KWFz1n#0gakiObFtyT5}=)4lks zD6|_=N`0~`oh#RmKb@ZXx!%{uXr__=7E8laNO{l_#4J2StZkkO9gnp-p~QVVDPA7e zZ_~Ih%3XAPK~F4G@=*)oa0JEg(2`Iwy(5EAG9s$_IHNnjF9cF^E6iy9^a8&dif769cIU`5(;h z;2gQs6OwvW?r{HTjJ}Y@KRh{uZ(&v)io2<_TCt>luD z&-A~L-k5>5n_z3}1G6|TCD@S?^6GYNQ&~7`&-$&lbkaz6uqcU z$reMb@;FyM5D^SBivHQ=>Rny04CXqu`;U>j7dDvMa>3;dR}a_nnd@wdCd6Cxe3X5s ziqqZs`fC-A49cDejtN>$G@>^%Y3|{y^jqa982t)QRLm-^uJ2sBUtm^(G*Q_dYGS8* zJ?Hu}U$#llV;KGwZFR%Ce-K{59L{a|^e&R#5|?GNl!HY>7^&kgS>;XAp@A{CFk7Lc zfR^kQdGXZv7`bs$c`lVg*x_-{(O6HtguEl;4(sI#4)^-1Y%ARn%0`4sPXKDPpXGtp zE}oG(;40IyE~O{7sOf7d#{ zO3+ITk$Xw|q{6g6x}3u~!-n)o0>=NU(s!}2KW%euZPy~Ljr|UpgWJVV7$%`?(cF$p z@c{0rUubE_K*i;v&wXNGnI}fy@R_v$%RB)Hs3kjd-C32iruM}tJ$}5s`Z+7zJMuoqaaimo<39tCWx_ z`R|ZCSZR|pR@azh+!b)*aV#@u}QL29IUb6jO9^4uhNeiK4!U z$rPvU?&Du?ju~JH%vIwOGNg2VJq7#jZ%kW%Gi}=R#+5X^ny#yApdaIVBslG;i)^L5 z3D&ATs;S_jP z=FPs0Plml!oC~K?{5jbJ<1deRxO z^5=aq%u^aMX3zY;Dkvi5UYQ3}zw9aqOPH8I)=aHQ&o4f|@r{?yXv+bSFBOe-HgLi0uW9hY&=C%j22&a zTx8f-i<(}my&fA9!CJBE?Ly~sC5w*Zg;tK!`3x+E%FAMgbVvk#96MK`eVx3>A|K!y zaxxMv;kGA6C5WV!OF&Lv6G;U7s~5m5TcTa*2-U{1oT%oW)IpL{5O^y<$ZA~jhNa`{ z)+M_)vrOcfpl6*YD#xh+_ zVyawj`tEs31l(p;PYbq=q{Z)iREAzcZE}{+Cdr`_N3$44ttv&z`JG0Hw{B|CPHCWD zhJdO!Qh=`UT2z~K(4w+7)cG<<19grS(@o-TAs*&lJ*wI!!>`j{&KPcLaRk{-8A$|X zsO(K`@{xy);*>$@{GWzxRjL`L_zrywk6`Ai^_Rb1T7NL?(tMVrN%98x=t2DstjNt% z9>^`hK+@4dE?*?ei@L^X8*xN*s;H$G>BwK1JFFtQ+SA zBk+w&h;Hho^&9T}=$)1Zfv58mOy@%CQM^7GPzUJo5gwrVb#dOa*vt6P53_SQ79BSg zfUn}vs2$5>^2EEz4BDf%`;psDB4Gfj!T!Xw8OVIiA7`VTQ)F~hE-JjE4N52{a+iR* zu3u#(+%p2}K6PrM^YD5@3$othIIfzd)2!vY^cbx;{u6!qU>1n!%)Nk{+Ybw#sF0`v z-G12G*@|tg$ju5Ca~_NV^6K=3#2{|Cy-$0?$47e6ZZB~hoQKKxB7N9QUI^#KemKz! z*EBYNl6bVcmmD+#>2+LwBMKiGgve0!`9JcWE$a^*+WeIL%SD*acoUEQP=d=s<<(U9 z>6kcO6qbvM@Ra1u_S6Oko>wZWGHhL&-!ZA`<|KOz{;3nQ55MDmgO!2HQ=xoGb-V0k z@f1})7eSo1$0*QA(ZO{5%uLm}qSxI`Q_{m7*F|P#Q>84NCfdSt(t0F5rgs%0T6m2l z$Yxi>r%GGGidGubLZy|ZSCN~GV(U|~8XE(pl)GLRf6ZxD*Dms;xD3krIx(+8Y5e2j z<;B)iBwJ;DZQ9=lBK*l;nP2VM4GH)GkS zW)H}je)M^DL7YaC;r&%p9O zJs8(6<~|)}WGT+Z9B&n1!%x#6IqClCq1dv90oUK??Bqv`ZpR|1B(UKu?@Z5S&ul~4PGYT~2!lAiJ zfRCjp5pHWeB-FRHONe1LJUb;(;?Td_8A7q0=E3pt>8LwB<*0eMI3LhW`}Mlep(C6E zd1~;mno{n-^H24cR5|u;GIvxOpWtF{WxtIVZwW7>VRWSD1rJ!uVGg4+H^ud%>e<}g6U&}xZTFC>tJ2HG zt$TC>Z8$-7=kf@uUH!5A?u6f8C;`GWo4OQ$Yw^Hq8iiJcmAa_bt>%2>OpEv_=mq5X zuFg}Cxe2-|Q}k^{^{D8RXcfPp32)rq`?d97O+j_JY6U2q4ZD6>k7D$u#ZA!lA3SwX z69>%^BC;r;s%3|T(G5CrBb7|_@Rqt7lE0Jh2f{zeu4Njv?nCg;TD%EtXwB+N<$|Q3 z?^jJ1=?n~hhao|0#SFRanh#8Fe9=@|bgsgPA>+M%U`d(5qpb5Me5-7NV~;6BUbQ%m)6%?R*z>6f{L2N(T-c+o7)#&0=T=U#{r441UDO zBr5421%>_E_RIQ35eZco_FL``-l{%Y>{b*@%QEF=*?%=$M1Csu2qCugT+d)Oe@K2f z>{NGN0%y2OC%QWSwyO(&|*1h)=!jdoMBtz?CBm>GMxGi;&YVYOT+<4EYsoZZvp_xJHZ=1mQsW}X4GSTRF}zd!h*CGzO~ zJ0gN@3>Fp{@3MnNw5^1bQ9(1_+pK?#>rJw+5b{1&a~H}3C&LaG;_bV?v~Zg-K+k>d z_i}C_(Re`gjmKHnFuBmEF4sGI@G~xljU=)xhDDI?r%m|AHQ3^ch8$C8Ic@_id`ZO)RJp^1X4p~gapJgOz{ zc99(E2^qPNiA3s?Ci4~ZsUET2?3jf8vnCHMdkM)>(m9Dv@u9bkV>~9u(>O=1PxOoX z9TvO1_&m(4-*kd3;ENl%mpRz0Rsr3@ABpkaPMRyU8XpWtkbj_FYwd-!-7d28e9Rxm zKe0sabZU$Op!{<{saeHukEi&u29&59AEA{N@+Io!wzEjrJqbS*29AuMV547W>h5sT zVvVfzDmbyM+mNSiW><*6Q_C$rzPD+Xmfq=JRzC|Ikuqmtf<6}1crORbp{O=>#rgN+ zn>P2j1x7d?AGaoGKl-c}asOjHSpP012W)CzH|VgU3Fxy6G#Qqs0I$y82X!lg(jIxr z>k)DtlYeAnk@PItW7Py2qo6yWpnD^AXLJ%cqnQf~u3RjW{;+W?RHDsR9evYh?077y z8tq-j&CXrojYapl#$V4{d5C5zNNVwk`j?+sUoxZKztOx_zYL+Qd!uegLJziqWxp`S zXhz=l9g*-R*f1PRoxMZ^NHv$YYrMLo2UJnT(Mgos2~N3)~NSkUY?W7R8XKrzAr zLn*ZrQqmNCFV%xGA|H<*59x7IDs!(s&G&n;=4~<7OEK2<=n%@-}T9#zE2S} zRY#d>JjcD`zJZ}$=!`$`P%)itTR}0rwZ3TVZCjUdI^&kyhHLEE#+8`4j_!4Y18;4$ zPjRQxW$`T1ySGh7hvr8TzCARvPb{*hM*BgFo74f3%Q+F0b7lVxO{mZ!>*%+7nKI+2Q3#01i zoyw@OAABTfwqA+E9|dmwh=Vv#xG`I}M5SXn;>oFFgS$%Z&4vSjCYhqxC z<9W>uzM;@eE(zB!*)drtGx-Rce$jARI|K>MI2^I z-lJ&O+J2IXVUjgl)y|qzNgmF4UE6M>TQauRy|!bG+UF-?4D)C)I|g%_j(go zPJ1VC(17kf0atyp4{cqdZqegMw8i8Bt7#Q+{Nv1rU9P(JhO@hCiCL9F{p&dwu(Ia? z_xy*S#&v86#3n@2y;JnZB}pVI?X6x-U_V;A8s;rDjoFT5r>Q+pmQs=uaY(Vb1}7DX ztjW!!wxuMeMhh%j&b9!1hlifNi`oUsOuVt&vURklzwtmgWq_Jd4IkdHq&KL=we-M3 z&G5>ywvSDDSg|)-%i_Q`e&%=ro`2N2C&Ei-S9G}%Cs>Q6LH`JBQS-)*=3%((XS01N zyf5^TK3s)2z8-NYpYyo8C2FysW&f1aIt>8|<80@Mkhk^=4=iCVRws?Br*OK=Y z>%D8WJ*ulM;UXy^{BIg)RIK6yyGA+q#W1!KR^dF2AG8Zp3gd^jzqvV1VmfJfw)W~u z%@RO-M{)XM-6r%c$`nfj-eEW_C-$|0-!LcDOI}uyUV;l;&yrkm*gGDjq40e`V+^Zy zE3{WS?)+LeanLUxcloto@RY#=R;NC5-AOQn)0$4#d3`!K@)%>ICvaGUOlj~V|#)GscuMNO3xY4G~)J6IG=t7$g^NPKt2n|qc{J82(PY6#< z%PAME*LT%}PUPg-NS-Rwh3SI&A=w*F<#^icfV{O4PWa1RjzV>lZShUP2<50oxvgl@ znpe>|86#OKEyntN6yxM}7mUa^2SXB$5jbF3`ynA6>5_ieb67(>ciGtScw0i%{x8SP zb%%J~GcP8^RC5BCoL7nwy2=IlJ9C?Smt@-(c)P4k#K3=UY($SILGo{3)xD5Bis!Z( z!kSUl-bnI1h8kt26Dl%z#G0&G#jHc)Xg7+3{NJ#ywZiGF1|y%H#;cV(-`v5PlY5%( zzVnW&h*w_PIB{fjw%9s>SbTZV@JBEE?{w=tAA5mQQ1Cv^pBS?87PZ8Sth1O4 z(M1Q(kjSJM@jX(KGtf>MDvP~gsy1AN1lm{39GN=^d9MA7E@3 zzsh4%==jPX4JIg|gp>z*T<+JL)G&@t*2=IHA@+gA=m za(YhqzPylgdv13g8!}aS3tdK0WiqkB zivHR9%NG}J_`PnPK^}hZV#a2qt;3z8T+z>i(GFx2qB<7P5nB2BOAdC$pZ)hf%il*; z1CS10`4W*pP;W^ZB^vRvBO9?RDO>KjLXJK9CLjEsyt+d}(k6KJM_1oU20gEKo_A{& zO}?pJ=0KKL)}#C~N4Lfoy_ZhexzTsllwwm1_Qr&~ypzb7*r?IoGFh}6a9GNbmw(}u z#%JIgfyDRMyw8=3`NfC7i+}!YjFBDhDGxb8(SXwXT=$SQhD6rjSRG$V?H+t#WQX*{ z#+3<4EG?^Rb_RQE-(wXMC##e-4 zUX0pwM8azy7aB$*dfjqi&{PXKdQ}16?#GNh+PH3(QFLhC4uXSCVZ7!ZUhR1vkM*pu z?(^ek$V&}jkqnjeNgfid-+z`*i>obuSxtt4=@8$c&>a)>xL!Qa)a{VajBLKTZ+#)< z*!_n&nupNB>fEB8HXv?M2!`m1iGwEeLX6C{RUE#RetavHO_Z$=pvU-p3@l~zv&+x# z3(XW2@$-#QV9gi0udSV>ZW#Qip33vp&Jk8JOBz^a#Tw&`tVja3A@_vB18%Q?;=Es1ylcnvF?L?HWpA2N{Im-!P`LWodml+D~#~>PwR!f zWkMN>tNNAVzn6cw4r?aj8c%IS>Q^5%ax1ZQT7EV$c&ucpcNx{SlZ2G_zEx;DV$%kl z76mCk^%A#OI}Ws zj=JCRPKHp#YxAxwwWK(1f2SAkTqjO!pXY1S_|gcaSZO1h%M$Y9irqNHxcV?uLGe@b zG_U}D8Ztw`+W>vZ@@9b)Tf$7oA^Pv0wp~5+K10_lwQO0s$V$PT$Hvf7FvR0%j(&-kkl53fY|?rB(Cizt z&|{WtFSx<4_(;V}*6r$a%PTOeZe>2Ne8i7$VEN(aG-9eqX&*~jiibmTL}5)$dKlD0rF%9XDjjr^-B%_8@03CXFH zn1K1&i?zUL?W^QM<73{>&LpY358o%m93Eb_vX40*<@bKKb0{NxV?@@dMKrFAgEuhp zjm$ymlSWt8zLzcAB#Z*+WxMP692Or@Vb_Yr-Y9qIhMn_5%5~HQoi@Uj@f#Hlu`sWP zP>HUkBS=Vn((;qG3hK-Gu5i9M98l01!T?Z@s9qYw1uX_K9NvBT++S%62c;|Ci<3=q z;NqK0?fSWgVNaP`xz#R`2G82dBwqUgk=gsvNkO07Sw&G^Ps*43ej~F( zWdWkM(GE4Je!2W=X%f1Cu z1w<6y0J;*sPm$=#z&sJQhy~tTBggRNx@bKKfBZQ99jhJ#A<9>l`PQxn?_MM|ogVks z;XlgA7(IBN%ZaB#b1&T5nrP&IQFYc2QUAf#U%EkQB$aNYJEfNHhNZhp8U#Tlg{51% zk?syzdg%t~?yhJ3-g|#|{)YX`zGvpV&KcNZEu+7!CyU%~fR}m(>&85ssgWmNg z8Iy?!N;~&FR|57c#&9R^5$i?U);J&Q(wjYz|K|0O1_AZeThu;!00x@U{L7#)Dk}n_ zQ2Jtr-$oV!?knoP`#ZZA?OK)-+GX?n9R&3v|D`xwMh$AW%(#L(RctR_jKmR#7q5uo zP)Kc%F&8b`$<5<(lBbF-osLa0Xdz z=V|__|HIY%)O|g=fY4YEEHx6!A!GOHY&IZuXZtgZtV2AY;cls+Xf(u1K>6L z;Z)(5LxB(Wr(!@#O#M^rF43atHCE{N7WnJ&B|*eO%j@`FZfW@fmouG<(BTE63PAZ4 zcg&LrSX1FUdg}2HWc&?@>Rr#Yhf8@`sfr>|_r-4$wG4oO!q96vYr+`1k_N|h9<$Ix! zpW`;KqsvVJ5vcSo`XEo@0hvSx2iehUW7#8!h=U|9UzaM8+uiA4e}v6N`hagb zwTfMZmbuO}lNw1L_TP1WEYlpQ+fW%za=_OD6UN=u&A_<@^rVp-t(yu<4rJ=Y656BR z{Nj-aD0^b8LJol_8QR+9-?Bw~X2=wVmZ-;n_&nM!8l+!ItX&2StENKqMy22<%ovT^ zVtJqQJnFqVQEX%2gZJ)yJUxx4H})ss))tOl9gD2k3F}8Uqq8O36TtV8*-4aY$_$k1 zV+^%+zAsM+>Vq@yq%~#=ZF?dAJ2yPvtN%^m>wqQeU*b&2=3)MRP~|MjB8-Q<JWVm~UHX0hCK{K!PLxC9R>?siWN*!OCZ^~Ls@k?=%H8qd=YJp|fE zcK{O#^rI2dspz|x;Cr`W$AX@3P{r&(NOone^^dz@h4L(@eDj*02$aNc>08-zqT`AR z|9s896py|dx!*K3Qj`!{HXnX&*Qp|Q4s+qU#SLDmy=VG|9`OuNo1fJI@tEJRIPJJ1GR`E2uWs{MN0`*R)urc-)1wGqa`64DWdcACd07*I9J z1TKp=D%mT@KL`4v=DIFPUS0m^^>|vh996|mGn5`wNI!I|kp9)^iCP+I?)O^mNO|yM zy0aV@;J#)3oA-i$)wlogOD&$zdiDHY;cDOzq^rak$jL?!D!q_T0dg_&-I}~a_E*fj!TW(GXs_x6&9vF(S|4KCUP|b zoFBdIi!AW_`}Y48w3=olC^+0R5DBRkJPku`iz7NkDmO3sNvEHO3es{10)L%0$=a;L zxO|>0aqPm7vSpLsIXd{e`Vj?X-J`0s1d|B~LV{dA^epn5@ zz8d~PJzb{(b~{SsResclRciekPp{7vED{fR>v-e+vbQF`Ix0G6Mn5+>$=J>2Mq%js z+(Xw29o-qRAjdnQs$|9@r8}HuM%rVyu$@}*MlI6)zrsp>0uhU>4S4$O+R0FgBo#TII+*c9@3@W2z?5k26T~O z#koG;4sw4Ui*O;SSy3^PA&8lJL@=_MgIg90T3pWzFeOG1V$tDMzyhQesDAWbibgqT zL@uMT)v;6_Z6So8wsfJu&My`HnW*1!ms5#;+#kZelo{|JqY+@r`hHAbA9J0{(PqlQ z&^+a=E9))DYSysCgL8F%+PHDYhm;3miYs6I| z4NRAt|KvaK&57>C@DJ5pO7FmzmdDT5fB6aTMEt$pvHP>kD-54>X@%)Pf|0t$r|XWgyvBx2l7Xc&qWt=I0<{dc#fJjEN|PcEfPK4gh0yNKd_1 zY#X>!^MKzutjVdhjDSX*f}aW!e`^KfCP}MbRmnOm!(5!rgyE{S(GpZKc|x^jTPgr7 z(A=9pidj5jS+saxt({T6Yqto2-M?=@hQfI20t*mVIy}=UN|Kix@i1h*(=z3pATx@Z zbOYx!0vV-gE_LCQQ|V6CD7;Q9pFI;QnZM`3Z@+$o(=FFx2|(M&M(dI^+wJWRzoH5L zT?&ljDtg>uti^$*ZznrUw6hk_YWok zTWd<1x!`G-w;z(ccI!No4gs&Z)FMO)>Qiw? zF!;~9v))~zVJFJWlQl@R;4`gPF$twsTVw498hfX(ErLX@uIgsxW*i#09LYELA$@3d zAK%1TL;hgV$vHIcNS1yco6Zyz3k{9~8CH<_xPF#MwQeq$HBBK2ej|NbH~@#QkOkqL zKh~-am$LKv5l=DjE_%XQ-IYm~--*sQ|Hia`5vK7Ro*d#flFyM)K1nKAktM1j8nhX$ zvYg`X9!sEu^M^B{2F6Owtx&p{3Nn{&hGi$eYP6hd0cfjA128F(C4u14_i}G2Hotzf z3ML5y5tg>P&0f!JIfgld5yWpse4?)hv4f>MvO9$&DKQOC|2z@Q9UdP<>huTVIwnW_ zT0N*$@GF|rUu>HNRB}s|Dt5 zlYM3*7`TKMCV-(=}hvE5my1Q&`9S%@F88G#Kdq9n?JU06ubOB^@}B; zE8Jcbw3i#JKq7Ts5Mg;Y5h~VWGKi(%S)+JGWTn2Tp6S`4z&>4G+N{!|HH#Am!GUw^ z7D2R#5O&PSUv;*W6S&p5E=YJ!V*L)1Mil8mwt;X|_NyOkl^z4)V-S~V0QQNc z=XKcKBXFx9xfE29R%%YbbQ|^~-8Sv#J-3>MsW`!(qKc^CbXm!vlJXJDKVLRDp^W&0 zNWT3-W0V?U4|SaRT_ALX9Wo2z3I~GPT5>cGH;T5P&*-}WV}D3^6y+(#dUaaQlSf2C z`TwwIPU_P75f^Xx4l_(NmenYHqtbFJi2gJ-ERHFb_dE|1BrKbLhCjc}=BsOO9HNhx?P~#k9WzGV0lDD)CWylyOi29#BXjZ zy0Y^s;ZMk0SrM0inI!y#YtcwsPm%V?0N>zA;dIF{Td#6=v~ZqXB@~V?HUw`&t6VS% zfp2*KLo|n-1;Xb33GUsV=w0wpU!ExtnfFMNlkU?1A~WAdMY!mc0lGsIus5Tq!KF^j3vVbNZ)RimT3=AZhV#q=d)0T?EoQN^yO z7l+1dTw10tqjv%0CKgfT=ak*PTwaomjn5vUT6Q|Ls&jn`&c_TM*#JVJ%67;;G)BUH z=za|u1@GAds}gd5eD{6ZRb02PrM`A4#-XD<1UmG+dWsXq*I-CM2Ekc$<8eyPy~VNR z0toMdjCSh@pwtU_>gi(&DHN3#aoRDhiF)-05jjS15kLo(PW2VTD^NXI;WW710J`NT zN(KhJFB&RC1m!BD?~cZz{b4>mop_6r5peode*vjWPWDkR$57civdUYs~& zrQD{6a=!}mcdz5KB!oXim+jp0>(!$-!;ido`Nbn8fZZ!WiJeXu*s$cWh4y_ZEtZI{ z0Iiv4FOU^8f1!d0DkM_&LS02>ZhOg7RpTk> zqE0~M_^vF#8|}bqBniBF51sI+8#`=63C80tUabvKpGfYWlel%N*Z!xi8s$6GV@_NC zp{{Al^AqC2>_9gYwvJkr72-4GsQp`wsXp|Acvs&gie~=~Wgg@8c{kzVW5t70xJs2o zkuoW?p#z|cZ^lJFk)P63*HI}8Kf@|WpmpjJ3x)qU!b%moV@WWNo!*P;wHB%T<`u;E z@^HPRsW0!1@=kK7EK4*{Iz2hqqqz|zLUo%ox5j#<(&M+0tr;WN?;miMs=`9mrm=+l zntH~_pQY-o)8W9F4^8+h$*Lj0)zkv7-^Y#Qk}ZiZUYoE+{zuggBp@EGAa`B(AM8IX z&=L6LlMvgJpoKYU#V6}v%3#t7k3PO;pX-pHg;qS59 zP*xIl33ap^{Zy@1&%Lu%jH`i2u@vsyi&Spu4+omgxpW(&83-xMd<0xACMCP(M(?hD zlonW$Vel7F=BatLfB+CM;K6j^;Em+O``wF?^bf-7dFrz*YD5aEYGxR#CbqUak=I(u z42e0cImJJ7V!x+Zqvv{tc2I@#=Qyt%x(52oYJaz1V6)u;oTg;@=Z+cPCrnP1{9dE^ zD~!bNy+8LL^nCI=3;VII#T@Sqh;3m(H_8ou>L_sp=2e*zm;L)p5z>kjoS4HX^gSN; zcrQhRN)bLFDr>Kx07keMil{M{%*W=2+dljCH!`N6m`BkB-z{rO>+I z2Y!?rmT_3krlbRWKjbTBhOM5QCk$0xJbmSEt+)D|OsRyT;t&Y%vCPgFz&D=bWm>g} zf~t$hUD9XKR_WyV@cQm->n8daH?K9hDx~XtW7|8(!Mr3o8p#tQ13mgJ8=kt;-@W1O z1qfB=b(RnIlbaOuv58+8-b3$LBF~o_PJ{!_PXaUN=6nw;de}vmOM~yl9BMg&oNiwn zFpA}T_Wqn|QG{<&U8K}VM+J}%kVeewHs-i?cQcBd^}1fG_0+jw(TX9(1!ZvhY%xze zrOoAeB~{gQKqot46!>!Ce>Srodw+y#XefxGzd2JGQ+OJhi$Evu;P8y*h7jwPMH4{- zASDu<<-K20VPh`{%?BexkBQN;)_+pyq%j<_>XI05S)vfb+;J5mArpu><2OH2kd*ERq5ZSvIS~=l|w^XR{L-KoZVm4x9Hic5lzf&m!kOCv2y+ zBJOSGBx3<@w|YFb5LMeiz`?ibg#pEQi?gFWAn4%x%~yPPEB?jedkYG?m5Q*N1OLlH zH$u-rJ(@j2R`q3J4y8&O{v&Kf+D*)%%oT&NiGdk<54@C(E`)rES5?~_+R&L*+iuR1 z636VGcfm*#1J9wF=kub-d5puCB9wv>i+ju2bKLWh+-TtjyVr$~?}#X^o^qV#P{mKD zNo!11lgVwR7J+=$ooEhU2Bs5Z{VZZ=0L*ZhQEO|E5FHo_b!Y{=e7k%iUio>@e)q*u zFujjLS}^U&_rfdf^_-0eH||s7Bdb~^gP+YZ{Tt`lo-2_rr_N7QDB2b6!A1|+N#q|p$&33C6eOxU#Mtb; z6W4eY-duLb+_-M0Z6f-eyHMG!Sl>&HS7N;ke2vfkvcSy$?S=r*h7gP+E&kt#E%xDm zM(lIg-(M&+-WAm=`8kuA25?vvp;))l@4_+Ebmpnk^K!anhE#T5H~1F@VM?Hb(9G!QKMR_x>#kAF;@u+y*&X*dJpQdtp%g zRN7j$^}=eJ(uHbI-)*W_t}(HCFh!;IeD;C=PNE{8CX4wGOO6>gDO>MZjo$zN)29Kn zdJk(&9R~Qzcj^9C#TFlPW`!j_ ztpmW*Kgwk(lw$KKW}GuWPXo_59rYGq}=rTH`B*y|0qlRQX# zUKLd;UNCK%XaoXi$DnkYZzZcsWYHuYA6ygM9zH~KzxG}hBBNb&URtthg!UawAn&^g zpF4P`fRJo|ssP>O0fe@0CMbm%J`3eY(6=d-h)>b@n_^YN+7sqq;d@*eg1yzA_cb+} zTe#A}#n)RuhhYY^#H-Sp&|)4D8VSlq9*JY| ze9on}P3Zt#+gtpIU`#gG-U2R~GfPzdty;medrg7lFU>@mm2D$yO5awQCG|Z`CJ7UYK9g5e9{42LIC}uf0Goc03dW#EebL?C)_Tz;0oazJG^45 zSKexKG_ud%|Iz^T%PEC^h?5wzC40Rgs%Ox+!S{8vEYXl#+5})p*b(A0z5;lH1QyvP zF!{j`Hq$ajM0E;Xl!6~=H%i!vUwMF=g}*pAsSAX7LWEu|77EERm`RI~ENTY*D8^~C zpGtO@#r-|yKIldSzHZYv-fg7JVwyhgDUN?n%C5WBH(nsG%v03BJh^M9qjtnF?+ige zZvA)@k>wC%cq%od(mlq(UFH83Q!jpMugd-rLRfV5A1v;WVVoND71r=Se}-#X_(jJZ z#&n&>5L;uusS-!(39dEwcpKCRsCJX0n%SC=8EV@kfg52;(GF`;ZUJ4r9}0VT|7?%t zhWRm&h4K5ShM}##?cF6A>L@$CgNDq$x4jz-kHe%7CQ8dP7@|A} z{R3!BA1Gsar@1qIC6lgPP9qC>I>p9WN z9Zw3U6-hB3YjNB}gb@-l{q|v+w&{lq78xnB{Nd5z6e~7g>v&GPH!TDL?{_t< zw2iahFbgmi=x^mRHg1#Fd-<2lz9DHNN9P1Echb*i9p2ycwxIiTA@fYXoQnLFwh&-*}{4p|JGd|*jRUIP7 z1-`s|tZ-5$ljiX5ANgWE`Cz`3qFxbMl=&?=R}^X&m_-1W|0{K+S`{&!TNA19F2YEf z=!-XbCZj!iX%+XdMO^2SHCSXe6_}YJp_^KjAqWbsLTRN!LeTeOE5yN5D3z-EZpS`i zk3yrLE8jI<)IVdf~uV*dt*d#<(i3` z@+reJs9TtPiKM>`Y06<_sk==w>sQJ0!{GIj(Rs)Gi#L7!T<{bpeYaCK{44(YRQ1bW zzv5Sqz|sOu*d6fODPZlVkR4Pnl=RuBr;$ycOElC&)eDVk@5hBZR?&1d{e=%Fg`bC2!~UnT z7*>l5#!vzFBymEq>iRuS3D{Z(TR`A|1}Jnk!^!9yzcHJQ7nC?-d>^I8qz)thm(5DH-BxDe`ZqEU{Y&_t-`JgG5uzU{E_Wj!BmdVy%GdvN@jHb10#x$V?3`ZEpWAAK#mkQz; z#g#LtcMAYh93Iw<$S=5Ca~J4J3@^$QI=6FDL6iq*sOjN!<+$P3g^PD8Fo1%)2ho@K zM*Ias`9NzbR%51L;zPP!;)0|li&Q@rZ({0w9(I3s?nNt$HOY1UidM%RS+qh1(e|>D z{|+VzR1xr8^cW=csTQASe`yRS4D^5$2srE-Qcx&ZYiqYYwq?g9R_kGq+?K_Ec(>-9 z8RJ_$i!kjY4hsv^UNGqM-ckZa-^yJ0^_n7a)na0D;jzyMHIt|FITzy68Y+QY(u$Zh zv-K{9G$>l{q`7V;MsN&Cl*%J)tQkavhZ{iuJW#CrvL^_nAyN^BII>L*E3Mud5HKU7 zEVgwHadPCL=j1Qx7Mph8@P#*ecS?H?^XgZGDW?j)r%A)D;^4pret3W3Aos&_BX?oW@sdt8t!`c zOQe&9V*}F9R5!Xp z`c|uI^6|2um3#}xiML_TLc*LaG$#|{--Bh;rZw&laacw0*!uI-)W}(_7Bx3K5T&BTwJ+=pHbK8oX1?9P?ex2D^i%Uz!1!$;j;532$;p zN@oR<4@^%f6+?4O^zijRKk*?mn|`#N8&ADrW*YeX!D0*?ofPB)KB$M3WJ_Jg?hBBQ zzhbRO`!yO*j}C#UNiqe^q6Q~OER}@23=EzNb}QKEZDnIAW@5mZH{|5Hs0krt=k%f5F|d#96P3(AOYA+t8Cwe?YOC<<}JGJvp{)!e81OZ5u)3dr(5pKu4D(@txVx-r$Y|7Bb@W&!%CY_-@+^`4S>tT(0p22nd0r+IWN7%`^mG z^5?oN=75`jTynL%S)J+8jmL8tY)ZNpAwAFp3{KofHZxGdQ+>4{C$skcT4$wMV$Z$X z?p_zP>%)aXN!4=>eZQT<020AGyDzD-$HN{unSB6!8V8NcL&6N345SVWw)MJ63k&OH-)uR!rZ zeqXiZ=DTk`_wFW;c{9Qpn`CSd+%7Tyu^t1n3uvp(tn?Y@#bU(D-KfWu6uZ-+_2}S+ z@b+Y#pf0faa5>s_pz-l%JQSHKYC@r8y|ZBCBbDx)mNC&rqF>bmn+FBb9!{FNG;HDO z6FiZxL1;G(jYF0aOfQ7D!8Z8pa3cYXxa=;{JVfFUy$q0orrat&1y&p*W;Ow;8FroH zj4O{J;ZXL_{0JYxW%s&#%4Qq~#GBK#svGo^)t+ur8l^OFjpZ;QFM>LxuYJFES;bn}aPIz4BwZ`@zeA|A@1@P;(}?MtuGToniKRb^ce>vf z4&WNqy**V)ifif62v*J(kXI0Gk;7V)o-CvsOLeTgRpezdU~N~(J`45@_nw#8=cLcI zV&5%P6fh&_ejo7cZEbU2H0Yura7n9U?kl~9P#4(XZ2Um~8}7&BW4nWKKnyfrMN;i+ zn-I@zUtede>`8v8&~oRJq@~7Gj0qN%y2acnrQdXb0Tv!X)__FP&)dNmk^tG)v84nBUbb zP0&-YLiZejRsX8MxG!Vtd=Ze|2WxD&7?^;Z#M+4i!$gVZl-ov4n}?Pq85gRyJfEW9 z`M9&%TNkPf$OC4Tq1rp1Zl^fJOmGuMkeD|J89BXP5En{p7#2thos9c7Ol#KH09`$+f8&Tc<{Tqdre;^Lh6wjX7EYGL;Y@-vp8}h(%XWdz@k_n$ z0sof;u-KxO6!dRtD-;L@MS!QxtWis57_a;;4}~hn zj)spz95}=59318P86moPbj6`v&NhGhK7wJF?*pjrPFfQ*m#x79wa;*qJEI0AMy+Gq z4)Xve?Q)LewV>b@kA10jU(Z}Z*0jBba=lBgc370qJFXoiNHTkPgY%j+ECaddcGWME zNt2<7Pu+a*L1oE6_8((!bs_>+y_&z$e%+hR4I52J7=NR#I}bV+DNejONRb}4EP?{nVU{VT{*aa4Y*&`_ae?1lBG&i^jvPF-%S<(;y~IqU57i;Pz_S*P)19< zIvzPH8s3U*@|cNE40~+JA(rKAuMOU-_LgAO`Ig9gny4k_2r&nAlr;`Az|b3Pz-&pf zHDfC467h)N$kawwUg*M$oY4(zOi88cGQ}sWF=LB^i(egi6#qgoW=b@^Y;HwzW@_0I zcudzFH9ut(Ii*^_6C8Ywj{Ssd89$L9I*0}-c!w#eWm#i`P=bBL8 z%rk2&cQ;Uvj`)_t5}`Y<-h1aa?vK-l$621mju&`KHI(XVrRd+i!Wi^cS$F8DBEpj$ z)o#FHM4sP?Q8q!J70;b#%|JIn00R%7syp7=iQd0vSCsxoZ;MWd4r9i$8@rt2~! zrX*L+5|+k#C%lbZ>LysZ=3g6zj7^r?_kDPx!BL2Zi<$OE$AcZ4pRK#HQ#QI~tw*JuJSx zxQc1e>X72q&o7beHBw`>N}8xFRwm?sf-P*z)ol!88QA24D7}JKD61- zQ5;OS--Xg z(Lm6#)ynA|Q(G-Y9*lh*j*$z}pNC?Na5{kM1x{IwUF1j)?xnFrpF0Z7=PlDtMtL=< z8D2jRrhw|w;cRk&$7109Dm@0 zX=Or?$Sm$pPNkU{0})Uq1i5*SGGeKs+siMVrBB||0wS+uj?q&C;>5)cBv4TRhttEg z%eBN67%xHpQ{ropa?!K@ZTsp*og7IExP@e3r%%>pb9!PCaA&b4QehcLxzCW_vln`g z4Ep3$rW7)tFYYBZ+iKo#*bYX>ReX;Ci;OA*-D5*Noz_1aZZzg>^`@dJxAUL9BLD&j z>+1B#iXu_v~T`zc2a zgZ7ag*m_~BI**l`wZu=I7o_S1PLty4C&m6S4}mv1;Bm=0-FCLzM;|gM=o|M2_N<7_ z6sz^8<1f9PEuI$mllv7O@Oc3MUy+QBEt6blMT z8Qk_I@#(zj*WOC3_p}IOUU+(GZ$bKvlSn+rF09j3;enC_ zj2o<*-s$Tdh+rX;cPrQ`uhkpGOS{$@xN`8Z?qZAbwXcm*MD@U2*ML<*;;3})DGrdN zrIBKm*~PqTfO6%kjUXfJxJ&2v6Q#ru?+ud7OZo5A4WoZ-Lkwy8;an(5rA*+g(8HC> zc9;z^rldYLxH?5Cb-QlTss8+cv@-j#n26VI8kkCZM_B;0l*@K^B~uiAxtsEh2BvVl zmp$~vu{X*VT>)?AATT`(d7>^XDek5RK8G{lp+xULp3dDVr5~Po^#2!y)wu-Fy(dLL zjc873Z|tRWs3rQ5BE>LA6H`|i;&$?7(fcDU5<1{!>qN7V`zOKtc1wogwlmS?JIw(- zAlml*Eheb;gp0&x3=v^#v*rCHhU^Eg!)h%~ok|`<3Z0~!KHZ^Su{zmBqeRQ{TbK{7 zAl2&#$JpM!c<=+xQJ=onT7_jSMN={MgVUjnoI#}q9sW*`16g^!_LzyCsIj2{#geHY zr8(mVdy$$7mkd4qirP4kv|RefRCwt!`|sGzV+e*e#n>VhaDb!4DV$;+#|q_({wVXK zoMyLmY@5lm)Tb+ph!pbsSu5mb2H?pY zjQruIHa%vxFBEUN?N<{p1nA0-`#K|OmGc@4kejdmE%BiHo+qK3?@3rR~ zO%@Xmg@m_pgudH}f!v_3GCBYTa)a$u!@rEncuW?^`=H^Zkg<1ItTdJYM2{d6 zm%l9N#!>h2Z@oN`OD&>Pb-4!s(;avGFVGYFMJqaH zx~PNAi%(qQ?10dpXhoX`jy+-l7wDcrW+h|B?P}a2OH*;vKi9HnPO{6FrrP<^vLJUq zA}WM$jE#@aZS1C)wM*qX><{@tc+7miI`;l}=&|SL9ntJr1TGV?q=q~&j<4jR=Mpc0 z>Iv*eFUP$LHhNK12cjx3@u91qH(BeId7voK@pRa4lG&bP?B*&fc~@>z<%!sh)pL%g z>RdLGueOujr=59{j~CI>cVV}^^sBO8puFFGn*Igc`Qf=Y!d0t7$S{jCsB^p)4m0OP zu+rbeN!cZOSg^j?GJWH;IiOT`MNM1M(jBLn7@<_dMt=)JG%jyd<>EJ5W2q9xHqmCf zT-*PZ;_bdA?^ueQRFV`PXs}^NiLYM5@wQJqH=l8itxrLQB)-HaGM`ICWpm;x#jJ(6 ze5V7_tTXG_o*YrB&RnRTnE|XRE$3++<{}PkqvzPS(I}j@C>Z2ai|8Pb2igPm zYd+Zek;dybI<0E+24Wd_jC--m&!m4kJc{%Ef=~bDI(_2j!P;qh!xV&_z4Tn5GEbPC z%q`Fmj-qYh9%Y3{`0>%GJf#tqTvd-IU4QVB2edS~F`zFF;*bS|f4+{Ff`m;w3#O50 z%By;vZIwdyl{jP7+uG=Ry`g&rC}arLvunhi&M&LP!P32FY;L9O!k>U`G5)&vvSV?K0 zjlZ|?`Ake2Qk3&lpdOKN`cvF~0G(aFo# z^z0uvz8gD6ckQd5fTNMrS_YLY8{abt9X}--4o$vfmiwd@uYiyxXTNL2F0aGcWI9FS z&7Q7b@MCMWyTjZTf8?rsXo~F>mgzWQ$!(ZFe*9J9SI@f;6PGH~=HUX)xPFrSWxAZ9 z3sT~2w0iGpPQNmk?y>dH!X(W+L>#8Lpsu>X&%bqH4G#ve(CK^aAU>$GD#Po^Nr4LC zZrwlKnLrUn*UhT4jW+OE1L@)olT=H2`=hV zai^L!!qEWbsY{87IPlonHx5FFGU}B^8Gm@K4H&L(NFl3~_H>}kFU=&pb6qH(Y*cOw zBlnKvPrV90&XR3q^`KE>805Jq4tX7mPkHF3@Cjf&HZ|DlOe@F#*f*m%Uhq#1wC2cl z(TXOA55>vA(b3L}z@Kjt_)Uk)u}VCrGVoIP;1I=gG5oTzpmAsSmsdZB+v&{(Bc5cH z3P&A}@OJV!)DT|br~G&AEoqa1QW4_dND%|@=rt@FOY6*=8P33o%YGVAp4~KEZO%MM z9D!Y*CBH!&oQQHcU)z;LE&0=pq~taQsQ-oZSO(D5E)92pYSW1L{tXdqc@ndx&CZLR zATgy8T+uoJv5;gy_)q`NJyOYcGXJ0Yg3tNdMpzwIB6f;(`Hg)QTVe&6VVJ29{#gst9zn~MUPbee~<8GppmDy?$JVDJ1d?9MqzEpokzdLr1T zx;TxfAQfqP-(DD@z@C*d*P8sTrIj4oaQX91brSrX{dz*UYKouoZHh~*HN$7ETjTzL zK9u%gqRwhjMO1vo0J!oEry#M_J-w|;$j{%!KJX{pvaw&5e}%#YVw(#0jq z;Ursl*tQ&+Mcw(Z9voP?_^CvPd*~hRU)k7qtK^wC?&4+9Y{$f$?cjT1*L60*mfIiq zQRzCc@NV()+$1=aBxmo1(y1v6IXNQbHR!D>Y2V)=I11`Do5oa1#U8LY)@O&`>wT9=}3$UCl(OmC!aKdvAY;%;iMlw#L^Vy^CM1M zrN#Iv+ax<#n#-5I1z|01Yp#kL6(Eg%^1OU*OSoFm8DR9;#BMH`&imc40~!OlkV@6i z%POKDa4>Z0T|u_f7ni$X$Cwy>N)a2L!JCsu+YC(lq7{09`@6EXqmrTBex(N71Wb>? zsXK?rSuAilNbU~wZ5;~o@g$ujNya-~AhK}pC!^G&rV8P_!kk@{owmXw0A?*vYi22jl{bK_&UOB%PnC38@L;U&W#^}9(^YDGj9N%GDpXZOvoYD}$hE%RB;<~lsvBt5oKfr{+$jQvD+X3*r7H)amz$nLSh>x4=qmTIF?HZeqF z+NJH{d}+z%v``noJA(kJZFq-6^4ah${UGTvi{aRP#)zk~=IME{Z!D^k$&X)Et718) z$E&f*O_w;&3+?B>qJHhL%QAmwn&VX@?TEN>nt`1JoL|jb&X;Jt65R;gW^8@M?+u|{okMx{O0z^y zQkG(E-JKj~=smPkJfFTADf6*?Dihkf`fVTPJ$)!*F7opb;q)W+@k?YO=Eq>OC|k|c zVa@8CS{~EwjLkZ?S+te#1Etzh&)24oUGG|$KYYjAg4b{b^C3SUImU9i(x|mq9B$58 zvk+zTrB8w<{MmePqEgw0#FNtClRfD|0SYtr;fEcF+?_KlrV+Ud=*~%Os~s{JlAjsM zcDMvGSS=^mM|6q*f?FM$$)m%h;b;Emwe(>-#pN{X8FFv(rTU2SJ+bQx6itM08&A#@Cdf19IBSWAZh4c0GMM2m zzqpuTE+%4+Q51*n6bRsTUbSJ;Ia#BJYjw^Rs{P_@b-LOPX3_f`QocQ$d^lcjHJaw} zId<_3`$b9&A-Dp{AK8I(BUVK9e$TkmH!|FF{x3AFJ3EOx`**6C`*KPSe~s3-FkREb z;7%m>v!i(%mxaowqS$x1^w&#UE~dIidoBSB7c}nXxB{a~Gqz&g`Ixq(7%%#VEShXs z^?SG#dC6n^cEkKjh4k?Ia zO2F?zFj~Wtst*hV&ZHHgGs)TT_Vr>tG@I?vF4i(?lb3)}xeH0~ZkL+dP$1u~Z% zJxuZPcb@=sa4n&^$9&DivdrdpbNhwwYBh?K)mm7-Ggwz1S-o2Ws2VnofPO69L{8#% zXEaQ{9mgU(MPL~mQ^5YD9FM-?AeL=8sQ&>m_)i*H9c zsGnzUBh`OQzbrJ!2v8U-Z<2nw|0a-t{dQpBog0y&1mMs8oBlM4a_cD8VA$xs{!*6? zr#o+(i2b2CW+*`nrhETbunTs{$__HFB9pm+rcqTG@bQrY;e)U1yqs3s616MzD`-KR zKvDq9qB9o9DFT4kes+TFNN@lA5_X4=2VjsZi3CjLEVyUg`6ge9L(Ff&P>I_n;CFyr zFbob`-LFv^Z+A0Fz21;`c&0aZ@^vk)2`R*G7Os#7{IqNI$dm%i9HXc44m7W>k2y3xC_y2C`0@1$T4p--znu+0}jg zJ3!{etEyl11fEe~=$zn^i1NdRfnd!bz&zVegmN2ls)x5}fQ0%3}UK_h_+Y+`faLcq}@`R~_LD`YdbC zP8TEuT3hlp zC}ubo0;2PCTJFjdkOgEL2OaOoO0_`p)y0_&8tg-}{vQOH7SH3m%|0+F7uqH#Q23-= zk$tN+nx+M&j6;5)nv&mbbY3esuG<~i7c?;sFl>LYE{Nt#e|dRCCScb7B+%usQ!^_x zw3&~UvFn5Ynq(`vAH==*_Wzjrs;D@drrW_CLa^X2K?4jH+#y(i1b2tQ-I?Gn!QCae zySux)OK^8M^M3Z9+gWR_p6;%yEwzJszw%``fPb_(O2D0lZeti{C!gDpJ3k!2+IDMk zC@uYnck&CI!{wQ|b#@Y&&~)>eq2j;kK~8$o%&d6%ol()+B^5R4Kq^ioq1CKcT2kp3 zDT+`?4tYwT^wQh$->=u40E~RXq`c0)%MCWC^Qmws36o1T2ZxZ{V|&QFJ-YkNeC~Lm zLb)f9>FBsZ(AmKj`hfQKxFKsoFf%9J)@_6xG*274blr-Sc)I!gz$px#NhpsmKRrEzWhz%l8aKPBwJjCnP{4YzjFdFDSFZJ|=wP4&~v9G*|h z|B>yLTuNcVn$Hw)Bny-@cW9@=MEJ3n=7@*R_#U;GUmo~;u++Kpc5fdcN7; z!3dLPB9K^)wok@`Orc=DEb&9nqHVqkVPLQ%e{2HBCS(1=f835X1^gQwsQfu;4C;Py z^V_eXT4A92LD@3i0d1wr*s0XRm8JeJvIQF}KhK2Is;?KnozOCr`ucvL@lK0qN3;3e zJNPe;U40y#B1_}5Pj;{S;*IATxCE91MW#M~EaiI9v`et1ek1)oU(x(!6$uaXk&QEp>iTx$7^(zEQY=w$c9L=wZI z5n@JHX17*n(HW@Lu&?kd9wlun#_*Zy@j&NYi7hCQ9xVqE6etLBJ^1{+N#GKhSUG&i z#Kl*59~jjP`Cs}*3pzO9>lfSsn~roDNF&VIfx=C0KAl%*AIIT;wKr12dDU5Z~@F>}HNO-?DH}fo)e*b8&mP@2Ko_{U=w2Xh#g>XJOnQikt8cQY&op{ww0ujGH zBQ9CRRpAS|J^Z$Zj~88$7X92bl#r2U>?S2x`DB1(Y+4s|H9Fcwh)AG)0Y$*q=Vdr< z%&(m;e^DLb>?FOf2wnL*`4$0O#|Bm@)Ljj{$DN(w419>w$oS|doIpa5y^b#ra6Y(e z`^L5$y}Nt2Jv=UGM07@dsFfpP@)s&_{Nv&sSPcEy1$xz)jpu-bG2kfV zBkm%7soIa7xcacyRlHPmO}J-V-HxFmxJG~X5t9_y1{`2Ql3j`te3SJl5D0$1s@ znIJc&3u!i-*@~r(d~Gs-?3iy0xrBuze|{6EELgCezTw{MB5E=ia(}X$*R6ezJaFUA z^(B^rebiiRy`yTIIi@AvD3XJ+$Vnq}*TB~J~-Y{J zQi`^F-NOTpZ*l1=_O?gS1Z@x7@h05{cGLa(Or5A?ccl*Y))38fh)wtScP;VAG3CO@ z2FuAn4<{q_1=Jeuyfy5sazd|D;(*=i4ZymM~X{S8zWm1d&lU-)wLgTPX z-LYY1qO>lfV;?)@qq8m(0bsnND4F05EnR3uji*aZ<fJvW6le4RCBeH4EJKZm1ucn zUe1q5;6C^|=~#rrohZqgicI0t+_BV#>9%`;ZqA0CMHw5nYGs;fv52^g6o6V4Quyq= z*mOGILn&wWMZxpb``c#5e$}NY*df5j8^z*uDD`WT0AO*$hp!ikuI3u4DXqz7vXqD? zKZb{AzirLC{K1$D?YNDbf`17~k_w<=nQ?OQVlgUs!#^|lQ5asY4<6608&ox^1GF(9 zf9`YeV_~gSRLwS^qa!=?z?^vY&euQq(7)qCMY?6VE0;d%a@0zE!wDmdnZgF%(Sf&3 z@jL-9pS@*wj>c^xXcbi=*VK#v z?a%=C_qh$83&7LVTgQRIbE?3n>I^Oz$qs9f%CN0JC2(bzv%fjvy}|#$-hr6w$-Dsn zcpW37YMJUFQ@|_bto<44?|fPACF>XJy)W+U8hBPPZ z2u0Tsi**espeoJ69u1MESCL)!TX|j8gU#_4OZ?zzeOu*+vb36pACrUjHKn~thO9o4 z{Z6CtbCQ}!KKSTvs^KUn+vJ_I003U5Ewy{VthvfWe6X*c>A>)@kho}O6cRKO5^4FY z5q%tHzi8X@D+{r*PR6um#@OxZ8tcOs?a78)tr#mJWgH|k27tZ4Ypt###%-oG8&hwu zZ~7bH#ZS3r>{zMjxnu!L0A4eF8Q?`;(i%|hN9|yX3rw9n9!sTjIP*h-s?4GV(QR~g zKFOojMq$I(7)VD4g3lR>`-wrCG9M!Y7eWl+QEb# zWZ)&kaeEL9;B=cS#v4@#;Vb!6jU_+5`$C>MYdCIVAK)s8_vngYvGxl6oo4zKpw~s{ z@Kqs$Z&f);rnwzAwo_| zgJV0i6izG788aqq1A4RGU{3c>0$5KNc+)^IIP0&usu zH8Z-=$zw22OsASr+8^}g8sTFp;e(n#_*ISTZnc*kOS1Md$h0xN4THARyHcmS3gw+b<71{0l2Zyo5#l$QF+p_1q2ST zsnf!KPm1%U>TU#)zfcKo2)r)3Ry=dfv5oK+gYS;-KR)L?$|f}d7Q2rZs}S_D!lOSc z(>ROmauokO*6@xZdOU*E?wj2Xt2q~Zzkp)Fxrpb z1qR*3rWv2SrmP9xEI+JTu4hw~soDD5c#aNC8b=QnPhDEhj=k3AgaJzgQbv&9s$F!= zEE(TF7m-r{l};`^V}G?>(MZDU8K;Pd$|SjgX^-CwL4WMQnpQEgzo>_cXfB*1uFNBiQ)7#Nf?`-J8RUJ;Um3BB;1ww~$2C8mED; z-$a3^E5fnVZA};9aF5n!d(Xz=`|k`_a_l@ra=JCD=E2Q9$ASG^;A7iQSr|?;-Exti zCOX??S#C%n@petlw>0kgubGabZQDNz%|Fw30eheLuV4`5D25h_EbYi`I?FmKe@NG6 z;#wNgJa)004twg_jyBx8S+QM<>u?UbbOR* z#`mw;ZZe|Wv zyf0|SBe}pj+aOVbk*jpVV5f-*`DaTLB8;`ordR!O+_zbdfh3nNmt+9DOY#LR^sAF4 z3(owL_rxo_A=w@p_)J0jCcBeEK1hO@fGE0&6=w|BOKM34;BL@-j*DldQcL zV!1dhKnNB9*QsxaSQw$ovVA!1os74CH9Fal4FRn)h5Gm1xqd~fco-+p>91+OJDqz( zU5)VwCnQ)sw`X)VgpGoex3$-Ot=&tt{HJE6UgHcO9S(>2bZ{~r0b!l&Oo0nqOV!pl zSl^1DX#`MeCC;J1BuHv+=+%d9DIT;fg}+*}jK)I0&gfE>fHX^dUQwUZ{8hXK=0JfAt35 zC*~)Q7SQ?va856PHhh3$q!v#=gO926;N*g*>@mFLd#}X2C9O24@#ICe!Hr&F%D#z* zvtaE+)Xl+!@C9*d|Gmz0x(XADg^;HC!JuAnbbgcFm8uI=Ui-3Dj4tOGF3+S+02_Lz-jpjyK%){Tf^bP|NG7FIdd0Cms~Ym<=l(3%=6vmmH&= z!#iY5YoEI^3ThR|A%b4ts+3RV@L`qwy^wc-QEbrKC)pw8z#zr?qWb3Zq3LSb#v8*| zf&*AGCJa+)5|a1o0jOMSzRhPf3=BAZ?v+$na*?76;fB*wZ2$3x$~Sh5T=YqE*_CT1 zymuwdy9%muI}SPnvsxOVd`jf7VDr=#Ryq~hS{88!%$lD}@ z%DG}MIq7Az?&m*EU!nUsg;82Dl-kr_7{9z6;NDo0?Sz? zm*bh`#bORbp`R{*Jcf-A?z5A$w8YFHc0>7DsslD)qBj{Mn~jH3GxI)MwI7`<4Zw%a zE;C&39Zr$vIL653P^ql(&>KjrArKO3-`9MR<;}2As&*9*eQ>RT=ctJ6dpm1 zMXQW89rz=W6Bf=VVOC1qzS(3rV@Iz`9)gUG=42BoJ|TRQ*SjdYCv`<)jyBi@#GJ! zgC8t#5W+KD*^AeBDJs_ll&Yi5TSO$?9l4vd+}f%?Ngw?wA`vSAHyR`aDC8PsBw_;vJbRg##3n z{=}?h;nqwh7SaphR384=)4uyCISnLwU$i`$JfHJN@?z)v!t3QC4wRM7!geNf&)$t~D?l z)mjARZZ^hJf4x9h0^WkUx}Taz>9vy&p7d*{kf6l-ukrZsRw=xy&hLr}L;;xhDpdeA z<1Z)#gt-~I!(k{MWmT>k=DFBR$+5iOp~wN1?-_hD4m8%=j&|?F*PkLLw?jf#ujxAU z1=-3z?lI@93N(ruxe)suc4J#y#ZojXxRRAGgS3aImY_M==PnZ;-bacA4Kedi* zuX@}4_R0(BKlB6kvYqS<@>;9+u>N+=Eh4pPb-tD0lrQ8if|?{LlsJI}PeR?LV3oAC z&i^{Ap|matJ*cZ&h1L^xxlB6+KJ{(b4n;i+3<YotQVhdkCXqj zr40iXAx5k#8i<==aur+QYzJ!gBCV?CO^t(_#9PJjq`=(UncBD_EuoN~XNf7EWY_KW zc3}l4sALZaI4CrT3 z=8*@UvfH65Sk6;IRYE_6^*Qtm`vHrTxr^u?d?)Ztuyp|~)y}16esp0Gz}m$a z172%+te8FXm;33qnypEV33$dO#^M2@gw7^5i()VPd0kfUPKorHdP z8t>FSnr!zuDi5m21A*{3(y|9rj!fEB_{NM^+wc7fSG*dN4u#hdy+NJBlI4W8bfOr1 z+R1+9mABzh-w;jEWHQXIO*h7OZe&N6S{>hVMs@ZVA|4%DWoGcnu}4YmjVjS}&h1!a zdj@eR%lI$S(947uU7V3shHkK$?m{a7byM}zILx^!=?p=@8Dua=MZC(uh-h%H9Tuo9 zLhR%9==hXIT#urAtBcTcU5^7tl3yKTT4^ePao&FM5%-*{8b9;913}Z_TLv7F-}BX1 z|NfP_Aru5lxKN#Mb?439muWf|YbFHm%gBa^qgfS}B1q>N6+Su}S3QC7dT5vhsbLVJ zkyW!4gYBJyH0TilihHHi4U{7ATJq(&(gaE!s*>NMUC3&gPb{8Z8%K#~(-Oim%mibK z9!79Uh%>R<@0P4KsUrUR#D$lN!+?YY}-XC|t zJp{oy4?Nac0x-_+i!bsl0Ly#a8PQJFVC;H{urG45mVjQGPj|GEH`*3WNby0!0A4m% z8qN=Ffak>;A}In}7^7RNd5cZyZBozzzs= zT(ZuNZlXK?$p}Md+ew|~9qwU{&y<2Q`zf}o-vxo?;O=OWglDbT>$=do03%5q753A+ zpc_f_(S;|PbDkZK0vD-i06wBwFvXw=ldJCyjP1wB#@?jRC|ISK$`LUxD>ElxAfIQbf10Abb=HvbwX;`lgMM&ejgnmEEak>Orpa1S~NgG$e~? zR%qenoz73s4aGRO^|1!Bj(^|Xo-i9O>>yiJsAzQL@3Kt0NDT?C;V7vfa;km zw&<9tH3NA9HROR6y#&vwnyCy6^hP&p5J{R<{KC)2o!XP&YPkxFxcgA-0YIrNVe3~6 z0~25C7Cjn7^O)``%EHg4XN-{twU0ky@o(apCO<|o^Igz$*3v|PA$P-&MRibDH&)~pojOpDbh&k%n48v7@ z22X>{Z_Nz!s1d`jp z&atqn08YYL=C3KoV~p@l4TbZ*QfLd5uRTUE6wRZ}xy>WqRarhN;DL1*$bNE00b+@H zlaJ7^n@xWzT;NtE3KebY<2?5*AcM90MsZK{>s2sqQ^<9@PE_RN>6qr-Q$Ji@$Lo9G zjANgh&?5WGSsOJ$BF`P3n+EnTZ=p#660+z5I{`@e7rjsQ);)reP-;u9@P{|}&RE)B z>9^9DyP9x?4h+d{yl7?(on&)a)E){(&e!6dyo;6VG7naNN|-6ZBPx(FMny^b*<7>U zGS}nGTrSh=jzrtdSbcq~4#f4rvQa+$2_J{SnWw=m_}Tk4BO4N{7>Lw|42IM5xL?Eg zLaR&sM=>{xy(KQL$MOBu>W}F}VUhQ1vr(b!rcai$+AVo(kMUiN576Hgr!o{1w`jWTq)WLE5NHfN3;a;;L?v-^+8|6ap8CW3Te{b zCLz4SKj0stl$*qRRI#f?;A@>`$-tVOU!^vG9d_LG6j=oHR=E*n;sGr0h?32Fu@x(YcY+^GMeF(5bxv;j3D^R%Wl>r5g zwRiftD^*+f!EAL&sqye_1{y-X!#MtgY&_M4wi1kv;C>r7nKT`qfyXwn&vExA!VN=LqUNf zd0d#0`=zDB)ElPsHwV%f`Y$m;BS|d^G;~s~3bYLW%4FJ zFRWMa9g@m^&Y@f&8`$)V)o?wJ{->`1nZMkEB#F5=# z`3nZ&C`R(@)Y+Ox(&t~kwGhj^``cs2XsxiJqW?)8DSdp&O?{8y~M&Lc^#V{ z^H7&yy3H2%x#+)--=Hc?rN~E;{y$LQK9tXI>-rjjh{p8$`A|E5FF;)0yNLz>gYNz{ zuhWVgnkW^rY-0qSL>xWN@AvN`OF4|bbQr1HmDvcXE|sSNhKW~~?si7H&bAz8A{^ho zeWSqN#S!{l+Bb-w?7dr0!9b8iRV?ArIk1gD0645XlKemr4zoOFO6G`+0JRk@3SQz) zSp*%*VMov&4i4;9B@eg`3UlXISwQx1XnD^RivlZqpfjjyZTCDIDvZIkMvpQ_dzUVs z7y+=|^F(PbWVPq2exYoqKlQKYBUK1@$t}<9C{7CH&Mkg4NPoQba>PAdboHejss3>N znI*M?vcTUW5Sf4iUTQVBuarzk`70ON3?;ekmqW1k+}6@e2KQ;Dzr0yI?Ie;d-fSwz{~>=KoIeoW|6+Zt$Odw&Fmf)GY^#iG z=-~1gPbf=^(oOX-GF5yyRE&R2E77Qze1%4q{>DsYz?;G9Tbj{MPB@DSSU;YuA`)WHvYl)yf<&=T@C!$F@Ghi z703qnFYN!%dJE#C1wciryNA0DPD4u~o5Op$&uz4l{zex@tYDxUXsj`n0 z9p$X$EiNxgSu0WpJ!><|1Uo?aoYZhwviM3msz06D<}GYz@nY*{v2^)2n(_+rLUxHW zbXHleSPsXxo|?W({Lzm%=!IgBiEouN$-qMN3)&zQbJGb zy+$DWTr+?DscfNM{ARq_37#$kfOD#S@ee7etxl9u1>e)Lw-r1@&EpEynX*9Z84MN` z2gFTk1Vqfb_J2y*`cgBGPe1-tJ!yX5x;Pi*uNp68rM-7m{YOBMZ8rC1b7H< zyULe%OXzNvSG4{H!|ScZmxh{ltQw3dBuH#8Xh6gB%>8#4Kt{;O_kyf{4G|sl;%J@2 z>|?pd5X1$!CC?f{N=StoUW>HkjIT-0W*MvhU2!kZchq-*Y{hZMA;?x4XxJNLIIKHW z9^oyg|?)|V!ai2w66grzP3VeF;T7v7Kj)wwc04_+jUlzZ6UWo;u z7T(g@AL?dnWo2^z62G~Od;ST9ogfRThCp7F&r_d;gmfD7sm)0yX`X34|HDPb$0KcV zP^UfSQD&GHGy&Mj1;7C=+&dD3Huq}(UnLx6fgmi>uAd4e0JwzhYLQ`S!D90{Ju$cE z0_p=Lq8=B_1$AG4^UzlzFZ%E6>tt{BB5qab{5+~J$QKEPzF_yTNQ9Dv>eN^YMf>bR z0uBi5zqZ;BxcI;|6KQ4NqSrQ3L^^}OjFA= zqLUs4QwcqYb6-VrC31~BuydI8!ylC(WMCW6nn3SRw=TC^>v$zvZvEOgBuv;) zE-1^eIJKT>YMuY}`wm}IE1&Ji&o%F0E7jV2RWTREgx19;IrZ4TZ%ffruHK&LdLB>$*D z_;ZJO8)2f6FcqF&rbnb&oJ42~b$MO>fUZ%;^e7YTL=zh?_Fd+4-9s%fxrA;x;yfgqSWm|tD)1m$UT zcsmKe@sh&2x}_f>T~Z4r9;jlEx#3VgeQh7kz1ipN8%NoQ@~!tUhIjEjvjO~niWUcS zk=m^dn}Y1AbdynHJ!8y;USoEr+W$qtec$KQKdS)n!Fge>KF&ECmMHETk2tnX<%aIbu-UZIx<(|XAf9dKW0hNCPc z$Z^eDY|rT3UGy%b9DrpY_LhG*izcN+bH4`t$Sl5B z_P$VVbHSu6hf*U)!q|aSjbZd}Mncol_gev2@!rO3~2t%Qs9QBn4FW?8g?FVC^lV@d`;*z5sOij(AxzP96 z**N28mHA;N#WN)f!K_wl0E#c^n+@DpYV}gqTB{YD#X6^!<~$vlHX;_pXP>22w3yF_ zhw-vC@zOKKge^BLzJ{Pnx=*e_fpw{LCdqg@d#|G_D=3Hd6?wa!R?wiH*2=7NNMn7Q zQnVL7nJtIO(h$BX;_)O4z8a#daJr{oHBSqPJ6$p3v%<1do5aXKPfaQa{kPr+!gIWJ zM3H=zORRacck!XE@q4Qx#?bN+scDwjjJOV4X~ejlDO`mO=CfJtf-(&thERGP}a zORC1TQuON3Mdp#}5kBjb?b~Wp#pP0q@P13y<+PpSf7xzbAr{BG%bnmS5P< zpW|<~N(f!qYKQ56FbS!$AE=HtF?fBw^8E}J6IN4qxS6L3_@gs2LJ3`Y1^*Gv&$47g zt9R9L!~>Amt&&@69)a-opgOO3Rh-onpwWO=m@f$1HAtI;m6=KadkWO}82^zKw;NM+ zU1Z6ucUMB`EI_9G^0`;5rxnKj?EFW_if`NZ(8hk&w+S*2Ofr{jIW)q0dCN0#%|)YV z=?mY$*$W$jKuUwEMevP4L4I#}sx0^aYYxI2{*4hTGXPLb zsV->Fft|-pC)-2y4Mw!Iw%|J3WfKh`dl5epnxGw@_hyR|ssr>U$OVfYlTmbl6Zldvvkhi!6vvq}HT?0e{T0`NB`x z#&OHs9&$cqfGb$=cDA`!Yx|@kUk#aOR}1u}em4~jqXk4blVjDpsJ=CGi7_Oo8VGj3 zW#QZQi{3A8vB#ZPQ)r!uJiBf;{=8Y3=BFak&)U%Z1Zc5PpY*0ZOnGJo`3?)^nfV1T zl&?$avP&v*SU3B^i~>mXcFdxJ;6S%*pe$?5Q2_e?~Gt!B)+;7^u?eMalp_+6EN5vPfTB0W&%ZnDKq6{Ylyo3z8e`L{3mf&0PzMKPC^5x>Rjbm(3)kL`xqSD6Y z)an{SW*D^2(3pkGKE{953HM?s<}rn+Oe{!|VEJE>*eUy`NZ@VX9xu%NQ5M!LU~95k zsaH&3R6o2L@#Cc3bL1MW$X1 zbYs&iu@M?KQ^D-9qy)g&*9fkNjvOHT%5Fbb$>PyH$u<^*J%{tlD`Cz@aF5G67hIG} zeG?caR-g09`hvsa(4`(q2a0@uIQla7V1%d8eq5Dv^Xw~H3%c(3QMpL%Xc|PMRS&2Q z6ljY~{9|jyZ1E<>>K%2HlV3e$`1%{QnzX$-U{26{H4G)LrNY7PYr;oZ+-sciZlT3+ zzUQ8~7 z^6JUwH8ko6^qJOSzNXb>G7QJuZ6S5waTis zd-9}n+xc3|776p%`@d%fm$-T6q89`f{Aa@y~YO2Z*@ zd!S4H4O(+7Z9t^oSQ1;AJjlzVvO9M7yjtX`UnkKA$*q4__KvV~-#K1L>u+{lv1$ki zZ)$IHrTSq03K6dHI&3GYFV%co*DQ|C_-k6Ve{fc0;ij{C^`kB5+AmZ0I0s%&`(>lE zw+gvLr7VZdc-Zwx$!ee1dX+n4Hd_>d1O;tqx9;;z*D-@D0nnEI3xYc0BvEJqe=kb< z`_XGB(bN5|dUKid=i;?p~ zYQh2q=+EU4Mb-9lVl_OY8R86dcWO%`7wFgUaw(gQnR2~xWQ0G$!_$3S_D5rfYYLMx zb#8~IY#pTXA^O@c*_aml0mJs%gOc@l^w)SA5hf8ECvH(wH&G2!8BC^sreOAp#FAHU zwy^{W2#S2+`yKa`_BTP>a9_qka_NN;SUHhf8q)HL7of1w;dI5DLe9%`{*c0@OV>d} z+J8CaKC0InlBb^VuqkiY#k;nd5q>v|-lnsG5Ijl0l51yV!#D&I~1!|V>0 zhzgm2Fy3dOD#vy7o?5YzaaH!yIooLXaqleQ@X2;vG~az7bgiDGyt$a5tb9S#`io!T zmSj@MV%^@STem~sKS-m>wB*Z);-iu;x`jlDzLQ+D4DzR$CHl|FAN~+exk?nrnl+R1 zVOd@GeHWa5O8EV|8|wR1#~e7`M!!M;ZTmV7w_jAwf!dF>z_eK2J9?{kxR<(K)lEi4K1--Peo3M^E!30!CQ z(%|jrI<@afTrjQJ11n4rAOb)kXCRbTd5!!*alH{)30msoy}Us(3lWjk18q?sArhEL znUw|YlPWE0G!7p`MKD<5V`Gkfu}y$pTW%=C;0r*0M0^^iI9nL%G~+5)i=sy zKhCZA*Sr1vqaYh*@WCNqf9YZfO_Z-ra5&|__K z=Z&HfV;6@TQAjC*jtt@Jgs-Qy6#rc=l>sU`qED>soH)e+7C(nUzL(^d51IV~j7jak z%4}degmQ2CA%uWubJLsQEjd}+7!zbx5-0--lTu~LeQ0EGMEhjn_BBRGBlG`i;kC?v z7z7+}+ur!^(eguc=QMsoZN)RT<~BD=AxZdE|3Lr|#USA1d~`hh#1;ARaL@;5ISM4qpA zJCPp%^Tqu3=~4^McnRd%xKn>O{5VZ@-!?hv)tMEje(N8Bl3>`mn}p3qEhFbGT|ak3 zLJpzuQb?|;uY8wl#DmX8XOQHcg3g)&AX=fL7ku9=yt#&C9B!9r!v3I^dO&nIm@zq` z6C3u_p&Vs2rjm?j$%s;@4_T-P!Mo&)7lROK$>+Q8w%%1HTR*2FZ925H&wX}TFO?tE zOo-^4saEn0@xG9)9Kv&Q`tP^GwK{R0kaf->$8f59XfzP$`*kJS%xE-GpS1+NRN&wa zQ}Xh+0$z_5We)aVSC0Vvlf43nXG5I@rRK5+a@p9yRUMmPl7>Grb8BY!|Hhl?|BW{a zY+$oNx6TqrICx-&%ONF5R{|>_-h|LY*Uux?9Bc*m}Nrh(C|&c%h=&uK*j z$ORn1#mw7vIyoA?W|>q;)xdN@k9u7k4Y!6ensUpGs9(Ugt)3}ZzbjArn;5M_FE!P$ z#K4a+;kSxI+JYR*(#128`puRx;9Fgmde}-ze9nNoN`(0GVCL`#v7w zVNHX|)e4Q3%(Ryk=mNCFgYoeT>B>#vl>^l87z zB=&82xf}l4Mm{zYSn}?pp!MC_>{jGCw;MYm(j>-aCSvsxR7{}S{I@!2Nao`9a=CL( zlVEiSy!to*7^eN0X(vWPgIP|OnRtaWEK#|~`S?2L%kc=+*&Pl5i^0N3YhYlWp=1j)*Y)LNMp@n&VA4`fc?_|3skoWuN zThLU3ngboR1o=LsqQF=EDCuO{lh~>cas5$?{CF{C6I_{+y3b7yS=NfKv^BQOlPuMOHR} z_dk5#5%RhIgAqY92B<{wAR-9uJ6^YyQo|NX(<;Dn;EMA=b&6nJRB09l8x6^|FU`pVVJg>j+I|&pX!*y+L8);Qk0>7!$$cV#4Y&L8 z-j0sLlf(of=n3y0v>^;I?;P$dH>*fr9O`B>nY6hXIXO+UTp(Xij?fHU))#&y&73&J z67@DT<)WeGvx3;sr1&l{tS2N4)dh8oNGY=5h;1I;Vx4a07JXcu)Nvm&J24OAUf-3< zTa*7R6n*P&4G4*dnPA$3xK=tcK%4_3f=c{T6H)-izf5WE@p>t}WqLt{nys+Jg09Hx zN34PWBBXj@Iwc4*Neiu5PuvVoxjpuYB8*67KU|<%U-r&PIQ_Q{^~YVmQ2eV1xeMe2 zVc`CS)QYIIAxyp8Jn+PT|32pYPcXLrpNoM8D6JGkMpLV~rk!#E1OY;_rh=oXH4DB( z{KNmy(M7u*7j3%T$`C1!a_*;KS4;D@D)JmV{$61 zGW^qBPhG3cWN?|IHo131<_)~c;~ZqJ4v$RrPZbF$b;rngJi2tC zfSS7&za<&}HyqyjZ_!E`0YI(Eqg%c>E%5k$pL1(G`0gCtHw*&uqs{aZPFf2z+=gCF z(x-P>Y79ex%?1v9g-S*dBwsXLDk$)rJemBma_oVCyMGjTiMf6$!7c)7ksAE!(m!@O zs{mS1ot!oWYBJBM@Wnz`*&R$_YIuc(tsnvWZT z|Bt=5jLL%T){33OO_9wTwZ2%%bB=aeau)Bx(hqYZ}9Za~qd< zH3F|w4@ncWIkC4eg?rt~rzG!>N0!&$LuaoCZgD64p5tkCGW)A-4pmslj1(kb>(y?& z$E9N2bRO@lm{;A_T-%E*&2072{KR;`l<8(tdQcW#`=KeLNQeH(&w59$LgZWQk2F^= zH%K|8b*YlOwsE|coE2N5+Y+LZ9Hc73*nsk>8S<(@ucPTPqrkG)xHFkB<3H#cQ& z%GWltmzmg}j+^A5AEwx&{q>>aT?u->12b{<^qF~t!3Xy`IwS%l4S(BucS--W+26MA zz+JepfZO^Q#iMn%f(T>1%0^brSG+!PP< z`5dn{g+|xQ0YW1dVbEySrCw@pW_?T_tk6@g@x6`dw&b)n8{enZ#>%S zwRlpFYBA+M0lygx>dpX?WX)AHB8k+Mj4&?8C z+TRXj3Ze|uRAzSlYcyMko)M=CFjgGum&_{SKk@OCuBm56p&$ArIqMoI9lytVpk@q7 zMdSPUjQu6$NWKJ<*IJeBz0CpknRy>NxytqNn%|4B^)c^+OOZ=47JQHi%9@wUs)8Vb zC0cQl5N)t?FJ|oC-p9d$$K;8xGaT4Y@Tm&5c$rdV45H0?;~pB0lYy3c7#4DtHd8t2 z*s(e~M+;gPVF(btixRO7ba)7PC)!VI^pZz`hUPzl?upVh)IxkaNiE)6cZVy99&`jE z+;xB}w56m}E8o#H+)Fi`-2rTv2+5@Y&96pFd7?E>p8h6H0w%(Sw|NE8IpTR)!xv9; zlsP!hk-xGKngI^0g4@4P>FLY#teoGiipY{J?$2wP9RmTNPw^jl$l=&ui9C$9K_9Tp z!6o(M4v&`hoJ-}}T$Ra-#iL)v%cCcqee<2qHx`Y$2Z{R%7EKV&lhVokB@fL8p$B7) zz1;hVsu&2*B*7s9G<9ewoU-3KY6vnh<6MGy=fnTfmXIu(CI^S9m#!4d{H;mMVK05| z5uX<)AHH2HqvAp?4uevD4vhhiL1U)ZMD%W&XY^Uv@}>t=p?3Y!x&~A30Onlq&CbPb z;c>@4RDZCWCDm=cHSR#TXMTPD(^Gt$QbKbQsj38}aZRoR+<{5SG_9My;x3r>u?(>f zB%_erA9k2hC%Rl%Rti%;Ab;lU0C$m;pgK{-0t zPbr}h^gESKP}qO>e}MA|A=jIo4JeE-A1a`j`je|C2UP{;E-ocdGv6{)UMde{+o_ev z>JaclD7RTrp$j4CaF93}DMjz#UdAD_;}+pFeYz1AnDpYaENd1D?=A?xuOIE(0S0m8 zQ-Cnoe%n~?enEB8(F_A0c$a1-H`)C?-j1BHADQO$Teh4pg2O0{-Tif`qm1jZ24?*g z)&8h6Z@i=MRSWy36&EDC^rvfoMkhk`hbjdyIRBJfeooyi{`zTaOSqEw!h^YGDEM!5 zn8fp|!Ub7OAUvc_D{uoChB&NonZD9?vdx%{wR{hO2o7>>%4%5QHVJn<$A|bQYuHC1ceXQ$BI~dLh+`O~HenCG;4*CHrU%R}?w7;v&IQ=v zD@SZ|a~|PjE7w7~f_*+H$a^ebnGvW8Dp(tCeNz+i|nPyT{t2u?sVIu-YT{OET zKja{;K>7>=i%9d}>!?N_W0cRY)t0;cQ~|M#KzfRiSjNmO!64IL#RiPs@#9zb%`?T3 zG)gi}pDwe6!>rd`{Lx7l?V{QEVVK6fRYs)*H(> z0v0tH&($UyU!4@Z1YByntzT5t>q$aC#bcju4W%(^g%(yNf`u~f_qWjzlv0+({2h|` zJ%WNF;uetH+^$Cw0|3?cGp_5_a9;8?+suG};qzpCak&^*$uQFjF6xV8i zo4%s-RqyCw^!eOENbe=$bv>BUAIXZm+7;dRI+iSM4NmGEr);jX;J2?+c94`7{}3J0 zCk>fJdj2x?ZkHGSZ8B-?MWs?%){CEH2z}6F=+t-w(}WsCn9UT?scJm$p-cxCG=Z!J z)~t;BVx%htH~oT;ycQ|*vhVrHE$_}48I-y&+Hd) z1V30o-z14|#vgzvMH-(~56Ihgz(rJQp8)+eeC2(o! z#xT2+CD9K8AaMXM`2jyTt|j$--t#^X9^Ip9aOvGD>l|Jevm1w_rI22`+gq^DE7jxH z1<%gra5GcO_6lOKJ6I$~;qx&l{ufFk1pr?dUZd8nFfVE;4l&2PaPqR?d%Ut73vGSV zTB_r&kS3Z6JTDVzvraE`RY+Zs4OB31i*aWT&c%Nc$&KA3Weh7Wdt zK7OV9ey|HupUv1!qp82lsW)XFkpg>=^0CiMTb%-J&(pAS;8@#eWC@twCc7KILO2(| z*9m?*qt+I1P?~B)*_VTGH_3P}z*Mt{Jz#u0%9SP}q%k*DM)(tm=b+dnHuF+E>L1A* z_`Y8s|IVu6mKT1)=t-BL+d|_zw2XY^LtH5^1MpbB&IOyCv~kaNpzeK{sfL(D{G3TT zDf@?rz3QA7?#F$w9M%ic>b0o4IJhxN4>?rrpx3R<6i&-8l&g~^SNosxzO|JoNPl}| z)L4h4xeMm@hYn?K=7-iSoFMS@uWh$5XWotr$yP0k6+pyXr3b ziFQ^z7EgNa`@Y1R$#i%KPSB0>7z@GmFvHxODJ!mykaGe02rRQ>J6F=08~vUF@4*m4 z-$-B>%J2z)xugrDZYy5()z%{&vsp0q7*pltAA9L_PNkHeBB39L? zS&7ZQ$iBvn(8hJ=b3(>IlfPx`C%f%h3D)x;lL4XlHiuEuI`w~`iw7#Jls}3~L;jCr z;lDHAwR@I-eu|2e54~d=vr$)!|EhQV#OrpaBtSk_S;jEr718^v?E=pYe^QUsy;)E! z9pXVu192=S+7B5m72o#zv=4Zrm#0Byv>LZYla$6*LLnbc4nuY3xok9{9K)wZ7z5IcOmdGY$)_nHPvaek1 z`39NZ`*YSWduhjhaW-kJB9u~!ekQFiSX^qZ!89Itn z8MM6r&;u%_@A#;e=*!%pG(R?%=>3ljK{_a z5#w^7LEQS3*lHG02|DHsqzgXe@%=J0#&{o1%wy0M6_(^#@P#f^`aqPa+yxIwLgO7F zi!>Bd$0LyF0K(SEbOTm$6zo7)M;| z>x!j8)=*-Vx1F)%%+oik$bX42f}>Jwn5z5ZLty&PB0{b-hQdzy+eRTQCaJ)W)>K{S zlT#Me$|cY8$d$>(qG+4U%g4To4x7?pn-No^5na%ZN6c2)vTBhpF63y`2wm8x*7l)# z|Hk*gBk3lkWHjm%Zu^#-tmg4ia5D&WD*Em%=4SswyVt&D7o5P z6{0;|wk?W9@ihpsQJ25UY97jT_6jL~6D-9raJ~mCHd}@{hKuw%OqNn_(v#4|O_RYU_35vzcB{5ng4pO2rpNh;gx=7m>t=>^LUpGVjDc zdpwmixb-Aur-yV#uyB(dRK&IYiRe*jic*S8<3~2Fr_} z$?bbc$|Fkkv?O$wo0WidPe|jll`fT-4wVuOKK2iWYvY!8h!7U3yIi}A2@**TDylA= zz&ggjIy940x|YIz{Rq)tND*IWolTXUK$M^+{El3S{LYFfqUs={V5QLgDsNO z0i?ib*1^h8I|=G1QwJSLaaWoc3oY(tQ_erN18A9Cn|Wr2ck><%R+T`hzCMIB6r8_> zs$R=T$wEl?d}D_zHDEKzUR~eyBE2}d#sSW`hl|(U0%3R3A+92;o33i}?Akjnn~SV_ z;IL(V<09>SF(7l7s$zV;ciEV=8#U2W488|ZwO(ZaOYri}b|R9%*lt?!4$=zrW??&d98cFl&^3zrhRzfvhquT;PRZwWLnZsSIjae=_s zPjjBjHbk=>P<_PEop#x+^_rCPs7s*p&J2I3i=3{sSvI`7SRODL&!3e~eQ&ddzA_hz z9{WK>-D;{dRx+8VZ;rW}P{qM%`gc9{9ZH#y?}PMv=EfhF?07_k0%cqlh+Xd<^Fm~} z$aksZ**Crz$eaw&LvR|si(gwz?LbN-jJiVD5s=(27L9bA@9(ay7J@nVkX|E@>o}Vq zE<5+KBMlg#)j(fN*Eo{i>k)8%biY1Bh-39+Yb^EB9oEsz(2Ig95GH%^;)V7ptLNPf zWV%)`drjmDXi>pjlwd{C&tMz7kLWL3twj?;=XlN=&3In4e`y50wy)RjoFV?91|7XH z7uUPd%-z>WRdEJ174}2Dd1My$A8fZ)CL2d2G6-+|zFu*$`5!YL+wqNCPI_NU!b+0x zwgh@7Vr9~clQ>qmbcd@8m@2;67IlNe{O&~2Un->&L^>GrDmV z9-rA4#uLt#@$*z0mHFr0juRWf-k+|OT9!hif%?YGWDnxXyQ`~@vo+^}=KZMxfiUA5 zj%tuMTeQlBFQw~Tb{pv%ZcJ4Wu4NAztEoQ`&pC7>x;W^q={@&akIY`sx7rkpm*)*+ z=4BP!g+`?tKrXgk7A@oYExEJaUfIgePX+0zr#%utZp$y9te$)1Wz~Ram*ySO#*> zMC~0e@Zmaxob2gx!Lg(DLV^2j7T0YXB@S$oB?0WSn!*Vp$kF0{h=Ez;IdHir1I2Y8 zo#!bKC>km_AQ+5c2Ym6)WX1S{uMjhG@CFz&2k<2-wlc#^Y{=SI8j~)r_aia|7>Ot^ zo8(SmNco1ih^0`cv2Q~2cH4_S*{=0gz=~FwPvZPkJKt-JWo!z0)e(~XI=}BE4>G~; zFMA<)+!_#0)>gE+J#{(>vF$d9cYIJT2xYyK2P$9L;gwno30m5bq}!o*62B)#2+bg4 zYL{2uhCg> znip`JD)@1PXk|~gm>Ijx=j+2ggKn<(kyZJ-y40Kq>OVZYxm=%jE1D_NizZ7<_PFw; z&!wXBca9_GaOlQEG1p9YfZ(V`6~pHaRx^2rygXJt7kKQNch%(Yr+f%|@sjCnNJhe! z2~AFXQdEt{&j=$SxidUD9^IFk>Y2DnAWGrUfFv8AFn!bCJn>}hSC-sOy6=t5;{_^7 z7qBqLE4;b-Vvv8grBoe`lm>$1Ja}g^UmsXx0-G4Zd-tgTmO80%uSX^LyOd{7dtx zuBb(0GX!QDcKC1c!85$A(HseH_|vppm69W-i6wld4dzcHpvu;42nM+XnT}RRtuuBF0w62Ky+$bu;2Trr7L*zvL?{eLj8Gz}?cwOXla=juqTV1* zRca$Y2LOtAtOPO>RUFnU+wg&;%aEzD+bGHwlaWrUAjCt;mb-NJa2DSu4F$IfzgC7G zk(n&{-7X)9DU@?I_I}BnjK#Uaw0h7+0s{f_g7$1LU3aRUd+a;%1_?Ub>AX-2QO4K5 zUC5ehN9D)?T0MJLzNE$WQ%Ht;ek9K6SS7o){*#h)!aEb>-1lx$V8!nnO5fA37Dp3t z;}os!2@5UaXRqVid|P(1(&h?v_AxjeM8K9{?h0R}yeZ~hoU5J)PDe{kZXBzo7E2TW zzm}(+0C-^p?6-V~@xAq0nl*j)KN7Tpy@v_4c71vxH%$YDK^I^ODLT)@X6Q-t)Ho2l zBVb79#imt$nmfA_@toPhg|fo;m66BE(7L&m;y2UrdJ)OBaohD*DT$npC1usv4MyEu zzkbRf$8ozdXw`uA;QS!%_IV~JJ_ppPmn2^&mec3TWWZX$j6Dy9wR(Vf8(^D(MoV(IT_7T8wfV{Q zca%Lmk(f=vYN=gN2G!~BIdC{H(HaIp>owK!VG&WA;mlw+MHl< z`dRMI>2uED@|K!EifqDQSnAA|xjOC)#eFu*WaB3#W;Cz(m$W=@BfsF%_m?__^9|Qk+kB`V4r2K^hh^+x z`E?48Ks5*?m%^<4aRC*z{|!x+u%WC3$^Zom|VGk$lqp z5QcQexFo{++SGXUw{?%{Ib{EgKLNiNPwnwhAK>uUdOPC}P+oP_k55<-<(OUk*D2_X zB3Saz!Y=u)OGP8QX(TOBO6VTeuf>U~Pmh^gzfX&?5aVz9!UYOn0dIi z7P1g1U1BIfKZ#fBxI5KWpa5WE#`pW(zEPPpx=f#cMo}iwJC$c&-5+~vwfLc;US`*V zxjASPa9HNg-~186DGM@UHCFK>+4>-no@#>gd}rQ)$3heL7seRGymis>MGN}iIC`Vx zv7JG+GUg$Fc@(M$WdH~Otb-mO6@KGgNotxRpcK~>86&NRAi12meVi(F{!Fb^)tJpVO)-tOb~&>#zYZT zEs-s3pZm8bRI)LXR-S$ok_iNn=l>8U(i`O&`nlW@64(>!G+AtLvz$roi;)LK`0UQd zN`Bv&slmse*5|Eibmkwm4w{h9Na=r=KY{pOx2~I-SG$?S>KyialBu%d$Sktbw@>*v z8KX7|61Y1KD}2qY`teZM6*$KUHPyL-+t=Kt=MnvL`*~0~migcfB<4bKDE-vl z-T2j`_&4wuUD8E5WgW*1#BF-T-`f}Q-cJ}*BzDW2Fs?)on-z~<&Hk8IC#JfYichme;NBTd+ z7>vHYq#8x%VR{cY@Q3xBw|e{6HS@PQ#Svy-eMG;D%XEG+Q*+K=zE^g)lm`2|dj~$S zx@rkmID=Vx{GHO)xR$6EO|(-7?YLH!FbpU zGNa4i5JSmffP!id=+S^=>u>#wuYlzBApS#d2pJ1d299iy1)GaCkK;YB{XMwK zck_o91scVU%?gKw+`xA?iKw)H#jo@(zV&xFLNkGk-P$O;Nm^6P@mTO~STiAkJKwtv z&2!CbxOn*oOMAGW{0q2mCJ>h0kRq*z4(rxh6lnqoUELG#FNK1M$THwk%T__&YsRJmD!}_8v$#v z#*{-{2#ldI%nh^0#MEWgh^_DEtItQG@msqnc=q}h~UFH z`e{9lh5zT5U?Ms9-~#u$6;JOZ|APng4>a-*4@enwxsjAkTM)U2xJJevOp)IuF4NJr z6MfQUJpPvy{@>Yyy=@D9s5nfaiRahbIKVVgV69-H-2Y8@G01W8%=`~>*MH(>fBD=g#i`sM!2Kafm^t{*NkU&2ClW~ZH72v? z=1u;iEAn^dV)ZAIgUXC^G1~+0J7t{F6oJ$zx|^`n@rflGNJI8r!mp_Qj1~TNK>wS4 zudO8hW)PSw?C<|O27&r#Afjw4VsLr18W9oEdQXe&6v_sMT_nrEcn^tm1x40Xlb51# z(wN?&ps7n(-WCE+yWp{%o}cQ-0e|@zorqdi*jNR(>zVXgZ!9w;N4xSCI_Ku0r3)9n z#q~EI@8QZaTYnE!6zRf6(yGW#-pQWM$)556r|p*C{iKx3z~s3{o@&zX0X2|KEgcU# zJ_3u3GRcL4eaiIC_huvB=m}D*mKrN3DIq<8a+De%*cT}K49%9A;Y!I(O7$_CvZ)*96^R4Uv93_!$prZLO7qNRx$h|T2u7$c>P>9SEuw=`T9Ni zpb>;Rn;%pP9;f34>_PTLIxT+LnUQPj-(a8RHQ*^lUW<*oij5BF_6__%a!vXgwcbW2 zHR`JHTS49wRT=l5wLgJfPsF0@+Y10^A^q_3+A4_HsNAAwjLTfLv!#1e_ce2i-V*O= z-$qKrN{>X+s_(Ikj=q@Hm z{8`6D!2<{1-d4=^=ucOoJp0`7pZG6-cO}&RC0+^sH{;d+#HimD?*GK7|Fc7<#!@4jp7y7t(huZRN^ZJ z7-5>%7R`A8?L-Id$Mr0o7MCau*cg`w$w%`^*^`yHjZ_l5@_SnbVtz< z&YOcKHO$GyGP>ODl|7YS`&Y1RUblUnE{@}rky>%*16p@`s-Aze0eRYUQwYl!Ys_9z z`lsLbj6Pw#5VhxpaP~2Fdp(a?1yCy!K4jxyQEvjFZQo>PLMMV=i|0c+rS*4V#(V~4RcH{q)(uUVQoNxZIR4wOJOup zsKbxFV+=O0CZI=@$;0*yXi%9sKMV$Xkshy3nHAU%zaQ_WhSUDOH4Q+#tBtgwhzR-N zjn*V?SEPl_T$ONIX0Q4?mq%cwm6o~OCA2|jXw%R0c0%&YqQL&d>7n&xsTJouRo`q( zem#fP?$KX$u9UwCV`)$<4;L&aZS!QLQJ;vfMCCY&AixSzD(3G<@4a3Q3p~(wwnmJe<~c{Pg!MIzUhiA+gN{^+ z>1h6&;i7tgFr+c*wZC`R(pUOJ-vM-9j4whWf7_N2R@bN8za#@x#i0}JhD6>@19hoR z`F+>_lrffFGU?UE3VtmH%hpIg*1Kz!uCRVsnP^O4KW;gSVfSj}J}*TB!rtVx`-9&j z-p*D#B7o!u>*Ang09q{i6*^6c<*Z*Lz-M812HEYmBIq=A#C4z3s+PWN3=V01xoUa# zyGJI6KPWVM`-`iNaPj1sT6qdpas?;~j>pg^IX3$MrcO6rJ(4Mfuvc4Ru+SNn7PS&u zuiwQ`A0?52c$f0yeurIeZ~czw^Am#oZWMu4DZ7 zs^k`=ceXEIf|jdX#K$i6THMM_3DAuus%$eqs9YDF1xTh2c=(*KRNx(K`^Ug|^qKG` z4b>yECB89u#3MSdrLa$@@X&E`y#HAGKtueEzzxhkE|x-7W$bQ#XlW@IYKOhxb+BVN zSc~8HS=EsXkjQeD3-~mlTy8qb(#Yfu?Riu5&AX@)2Z~V9tKxW_{?WvDn9y-F`gik| zpmeGE_CZtfw1t@N7gu6P_$+}(ry?jSiwC)O`w`byK_W1#sN3gqbC}7FXPWO_i>u{E zS7cKOidl+(=xl9E$TsST?0vXnglzBmG|;rcJ( z6fcX=0*1gQ_fVgThoUdCAR}M!H>;sPJNfos^N02Kca~8Krk@r7GgJnUGMjhC-Vx(= zFc|eGhyi$;snpg6ss!%uWg5gi`k%Bj;I3ru@9JWS>uP(0cckOEE;*^CpYj@XM}&b) zyTiHexL=yX`AA=!gWHt{^f2lS1C@ly^=6E*VBf$-yk)sIS}hCz_6;*-DK~pn6mip+{ZB9LZDKtk5qlw9=DbodTnf$rH*k^?cg58zD(U? zUQmIoLcjWfGl%Q;)qGi8Sy=Yt2jUMpY1L}0h|2-i7xb~UjQjdg`g4vR^9je-eNihSJkE1m z;YJLxX8Pqk4SQ=fdT-&VMtsk52TF1ZPiGgsUl~-7OjA!xjkUK!#h--AM(ZqlX;j?B%KeZ#DVt9-+NRNC35&li$yKmn24$6o0p_fi zQ3{jyqDkcY@#&MI5lPPXcKD>k>_?q?Z5M~0Pbia|TzV$+1dJN%Z;pC&N3#_kU$Vt8 z8U^1>bT2e{$LCjZd{n7GOupSQ(g-l_cATqqK1vvHku{WKjBrTJDfHDt3VxI9A{P52 z%S294d;9vMk{lVPH!Wvs)f-2dRlLSVL87 zxILSW&rBqBWUi{j2Xu-CJSO8%+wy8JYV+i8Ask9?o^F^{Guy6-7tK0MQ|-)^D{M0D zEUok2J6oORiel`{L{Q1KAbfEQC^H-Xtl==J;Q_qD{Zj@q)?BsYfDs{Ud_ zr~=2I^zNSTc0w@(lZVYyL*4!KRJZAqV{?Tl582tYmDXYMeU%t_aFBUCTM!DJ!-iLx zR|MG?D+JtC17o7VpDQL%RQwUtM5!C(?h?wmTOML8@04Y%77IMxpW-pDWhgse{6yqQ zH}D($j=xheKd#Fw{wXACVXE0T@Y7%yHE+8?Mn|=aKhe?dQ>N^!OYPeUCY8n9XL9ei z-i&)m2t;sx+CP<+mhSGIRnJ1o(IzA?pUlYerFjEw5QR9>nf<6-@G&O3PRPN#-1Q{a zA}X|e9SK`ES{l06Jg7jX&gJZ!d{my}SbF%8j5?yO4BE*s@fMha_T>FWX6@U@>jZ^& z%C@M=ytys-4IZ%gve~NRm0x(KepVmzPq4X(=@-PjlItF!b|BM*VwYvjE-}~L#XNz@LE__tc(%j4rwJ3}%g^SE^!6ic%=M>RO%O|G z76}TYQfSlq&TihAbeEXp^%$?1Aaygu?H|fxAJNK0ko0W}RW88#>i8_nqhC069xTqI z81}|cNiE}%_Q@_L*uMHkiYwD$O4xsdBp7@XaPt;-xWzgSiu7qy^mj5wI#N8Lc(#nM zrFtCNBZ#XFdo?8jB2n7v*;ts)O zQ7$l&ZnF#Hs3}%*N_K6ls#S-}*B?OMWQgf(PQ%G!(=76EWN#62d6(=Ouk<7ka!JR) z9~O7y9+7ag2fcZ7iJ&L&m9EdLh=M2!&^=UFFoSu|tLPDE6muicS>A-L#~P(dUW3Dn zFnRN)L$F*TL70pd?Jrx}e}ni>w)A9vIg5J5pi#zDz5q>mb%BU^tJaT9{r>$y_GcTP zuumiM_&tED+D#b(^1a2}?~^6>bOVc2l+C*p_bMDVZwM9P@W#?Y?mVnlBlT9O;@!-b ze%@GA*fmal>n0g+Gz!F&H9T`DmXLu{n*EesDV|Gl^5A-`b+1tb@502;w7UrMal!Ie9fqW}hwGe!P}?DF z#K4$Fptk*)i3>g0t|LB78Ym$+bsED@wA`NaGM!0?imwE?KK;Vwad%wuV+31I3st7RlwHsET<_jETzcVx` zPH#lHFC^JkRI^yMkU)MCM7r2L_!d9SkJv|i(JTlPF-<&Rri*vpY$gRWVl$n}AVZsO($PeO_PwxsAo8BUFBEQhA_yB1Ax?sLzDU=@#Y5Y9! z{mqnLr@yyug zGq&_NE<Y`AFAYon?epM1;Y8JQQbhHJ7gV=sXvnV>lh_)%nlvu1O#9uH9U=D`Uoj zIzXPtZt$`0QtNbUh=-l1*re9&mQt&g=k;}z%~Cfmm3c4JxL43e*@cMxBAiuh!dN=i0%Zm%^u(aSlGC9o5a3yW<7P9J3#gVQPSq3O)j<;e@s~YTS~) z)N_H#9WO3k?YEvlbUGx1jpcHp)ufwk5tg_RNn4a{7d+@NsuCYRvCh;-<@KCy@`(&s zPf~nFz@e*|qb&F!zMrpm*RxQVbF&zZd_h?I*loTQ#MY7S)z++T5kUlV_FbcJvEN6; zp>0u`YP*!qt{u*!j$beFjOVN<#E+Er&Q8w2s5j=tUY+B&o0uQYkB=nT7UkdAZFIDS zc)F>Jt<&qH6ITZ{3bBL}wTi4VuEurPHO9UFLVQi)Ev;E}#vm7-DUv=>^xLPog=okJ z%CC7Tz->HIV2&-PqDe#V)=?#`XVv!GQPi#*gBGm@(Lw#{JPJRnY}d1A zJ6Y6=-=h$*Nt@T5j)fiPqfv;*JT2ilXoS%S_`Jet)KBERlj9MA+oW~j#f41%RevFV zozSZG;}6lgQ+kEYX}yqBt6`zHu52kVV=(&QJHE?cj4%(x{MoD{6f2-c$L@}89xf(# zPXnB#*>ZM*fOb zPkuh|jv}GJT3Wj+MX~sPnV09q>*;cZgvz5VK3_AnF92A`ZfQR&8`lg++i3DeVC}D( zaR&Pu4bEJ5%c8~J1l6wh#Z#WUkA@r(Y=}*g4amM`VIUiT|03(qXbUz)P`xO~jU9xF zk5mr6uxniNGK}>>Qx=nEPqoJ`S76(D$>`6gN|E&`&tS+DKL}0PvrjdWyp+1iu2Hr^ z{V_Nr>`BujU#t@&UQRh8_mjs-LvvWV*7JMH_K6>4JSSKWnmmLXy7VJ&vVw^cN_ zJFRe`vj-VL~pk|3&|;{*Yq%30W$G10o~8-a9c|&tjbNomqF4R z2^vPYiGO~Xq?=|!TkyjXlh+fjlZUi1X}{uJK70@=N0@D97-}0W|Nm@{EiVvnHsK$f zddnu4`NV5aXx>nH1wy44(P+y1)NATbdE!nFT0*`Y_Y-1=owgGmH{%y~d_f?o6w5CQ zMrYFNf`(OZ8HPjWX5uME<20Nkv2N5^s@~2=)M_ApW?5&S(CA;8Uw2j!R9cNeI2ppl z(3b7yQg3o?*O>9R+hO+nBKWAdOiI=KNSr)kxB4GX3i7x?=@wc{6_S}kYW1%ex? zUKPV6%0VD;a=MVo4O^nox=ej?y;l?1##(=vXzj3r-iq5$8KV9Qf;JA4Xqlbj9?I22 zde2LD%Wg6w2mGnDRT|=|PpSEmh$r>aw2b ztK0f{x%KAK%C862{MDS>v4u@LJ}(vmshbxqd=zq{OCGr((U|f%HktyP4y9ymL)e-Z zOg*h&5Qm7}{+vc{+-Wx3$zU30j?J{ic;IgNv|@UfAUul=CS03(3JeRp=tsHw4#x^t>Db&kaKI?5bky_N2U*J&o90(;knSDq)K(hoJ@mqzGBKL~lD zO?;TVBAJ&VnWjgEn!j3dVY4I`h#%M#7q&6*Yj?rytG zeuYANrTDV}{J&z&wiIjjhi^krYWFPpq7@JSms-S1Fz|J&#i!i$Xjk@?06 zZPyspGxqQdCp=UTFUD~H5z)^wU=+iwD}TU@5v$K9J~rixJvEgA$>fGi1}Aa$r@We} zQqTT{poMD+zlSF$6B+&$ZQ`oJMxk2ZA;zUXcYLU8qx1F;iC2qmi!l!J=>PE=c0qFe zY|&Z_A(ykVo|F8S55x9o5{UMLY2uC?@^EU7WhVCtO@vM75-1PmlSWO6PyA2MTR>zF zaS%%FmD+=?=v^C-EvRYz!&Il@|JhUp|8Gq-@Y@V<;^pe)4^N@vtcS0$qW*VMI&eNA zP{vAJ_~kv)G>Fq3Yez>t^npRFb3oZ;n)R$YF#LF}FMJY>glj_JtjIYZplC_wDpK8F zExyP~I$)QB0f8s)Fr04PWeusQ3F46^EbA}VbmJ|`1M2$2FA<+lR;>0!Bfs7USYG{C z+YHr0?T^Ye4teI=B-}15y)U$iUx-D02J2zg#MGZED23OUPcjrO`VoH71e1y>QQRij z=@De~VK%Zq1COieig|BcrCve>+y*u49j)yT|} z&HygX_{{wxQ|zPc;y_>w7t=9dn$_GAFJ&pCV_kyT@(AN4W4Xwq#OKp|7pfHbpXfZ2 ztsAdy@}v#Lm|xmGHb2BL28xn&5$IS#NZOE#yX$=+DL)~F3Dzgjtft^!I{4(PlnACI zPuYz~#4A^F0Uygzaz#e2Jlj zHkeM3Ep`}lHS1LGf@|U=&$q`1YHS;*6EieUE(58x6l*55=H1CxBXh)7M3R63ljJCp z6=WYiBQ8{7dSf)Br0Fb>ak7b#(wh)u5Qf6aXFYLgF4bLX9Fri^$wJ8%sITFh2lr)4 zFspeCvNls?+g7t*Ps4NBCFNpJV?6ZiBgcOI^`7QxGyC~cAbt1Mm4h(PaWCWWoJDaL zh359xE53{YR%huZ>MOcVUrGK3hdE~>A{S(Z&68|}oK|G|TIpCxbYhNQ*WMBN86$HM zm4T0rEIg?S)CF=lrRK{cj=D#sj%cGZlL(0mIZ%bJ>p7L>y%~s^4P!txi^Tk0yxd3z z`q%eV*jS$6x7g+ItH4j^GtVTX!Hl6QF)DFrOFY{P{yqX6*K$w7EylsaXU{m~fzMt> z>!wlS?J7}Ea*wIjO(qE-N&A1n)W2ebR+u1&@J5@c z9J96!MpI^0?^ECOVh*U}%mB|{ZEYvS%j#Opd9~bP*7=IWQChR=N+fCZ=$CC8jAG`Z z@w_8Kps935T02U~=`7iIiioZ=T2U_!`G}_@9glxDkEIaVHMW=e#oL0PbJ2876d_ln zb@mN$;oby;kteL%v{pAwg~fLN$a$sr|0f3mqHISL`!)H(^qs6`m4%vPQhf(;gi^l8 zH=`=9)0qvgQ4_YQ4hiPwP%?fZ5##+bEM!0^cq-V6z8l zItxT!@hJPC>t2?w|D@OFW8o29y(U-JMvblKZHB9wt+qeDdz_zbmqT3z;3=NwGV8_W zZaYwc&du>QkB-_AxZ`?rLf2oTfGc506$KbTR728Jt@Q)Q06(Wlj{eQ#d9Nl`SU9%*xT!`cSM5PtOPw3Oq< zTd)Z4HGoN^rH=?CIM=%s1=#Boe1w!T{s1xgBEd4LY+lmW(JDxY%B1_bkT!3c#!BrX z+Xw?zcPw{OcAaI`POs>6Ol`C{$))9cKWM~Y!yt?ma1YG`FC$oxzcG~Nf6*z#5}2@U zR~IDjhYxz=)k98 z(bqBY=c<;8nwO1Y90o|4k3{51xc9!7nSS%K)_*w$Myp|L+b||p} z+~=vv#rj$1(Vze|8v;y1;iSCoAJ3**Wo%9@WaFy~_eJwf31@6kNos%KG3blgXBA0M zD*<3Z4@)Q{#Qg=~^_taUQAZL6@Lo9^(-riuIFSZ5CJ=sUpxY0dy!N=iO_AZ=!1U<3 zx=jeGp8`6x^!)!UY6 z?O&@zj;b!#c)J*Awd%Ppx|Qlld;}}A#8sqH(2O}t7g1Sf2yV8NqJ>o)=iRNyb2xt4 zULIjLjn_NqTTkfBBi&te*fgmaX*}I_?jT+^Qy365;F$oB^|bfx*|q9>rLDoFpDjkCwmr!8L3ub3{gmux1c)=PnC*a=j*3g zcpS&)5IA_8Y?v9id!D4p5tNhVsdx5J1b@paH*xv64Y2RJr;_XPBmx!MUtAL?lieu3 zDJ0qvRDX7DFBzyW)IrxF^gOR2B(N|~D*)9DMvFI7o%Qz-LwqN7dgxat7(T;x*fvL< zf*kRM_4Fi(CgNH1W-C-`s-)`e`&jByT!&x%f@Ji%YWCs=8<)P+gc*IXu~hlbqK`hnsw$0_WajJE>H7-fqJ*-QNs^kURIq zvue$Cf*u5PtOoIx^N>J8B+TQ)ftvkSQ84%8TApbvr@yvY_1eX=YJ^Cf&C}ht2a~K* zi0pqIhVi^F;5^%wmMW3tI+KF9BUqN(yPj_46bM`im&j+MYXKVGvMigK{2b(N*J?ZQ zEekk3OT^wntp0W+koK`mYBfH_?|&4O?nl>n$!?|ARR&x4ybhLy(l2PN|j!s^j<~jB?wXiXcT!nc=#Ra%Pz z-8&r#3ak&HE`p*~UbG8+tx#`5tZe-#J*$KDnm2L-3ego3?f+n%8|~{RvK*!z+Wu?3 z@_`|sHXrZC466N1DWod_SAeTNoUbJ)DxXa;AI>Njg)k0VOO$C|f#Do!p}gl8=p)yc zn*6BU3LTgagWrp?##Wnmw}@hYIC7k-|jOl=?%arW-Z8 zLh(tLZZ>8OS1e{x+82{rN0OzTm#||eZ2R@ommEj76krW!GCq`gN`7CB9N6Snwl;ss zZEjB~y0&nrh_HPT!!+CroUckxc?Q%Y<%gfYyKu4{H=%l!raYjO5viXe=JRz7oxpiS zUYZ46hEB>__ff+1<(%^X(?d!#j#x-;=ZmvZxpPL~1^NMU+cC;9Y%jw(uerD%w&)c|6G#A=Yg{wJ5pQm;>|3=RN*~md++uh;7rTbhmEj zJ&w|O3HnS{H_rB+pI|eWL5KPGxa!1^s%)c9Y=9eqB0UbchI4cTI@ixWr%uG9sh0gK zH9+PPbhfxCdr^?_n!xrdrsVq)p@ks|%jm1i-Ag=(%@v4x+OdLbJ=M6fF`QKit({j) z8xzJ1BiSEi`_Pl2Gx5l#UV8F=WiLo4gH4=_le0zy<;G4S`pi0;Cnq+Zwx=y za#hr2dZ+tq-zb=E)}y1#G?qC?GuXeHm5_2H@_rlZ_oEyrDP|dTSel^mT587lD1zX$ zR9W^wA!AF{l`TWW&+zL4Mk7tkIXMJ<%)0c1Y64d!QRBRoVWo2Pduqcwu?T_tw!mG~ zf8RmP!RhI3HV5t(!juODRS7#02iLI*>eLgWmZXTH8O=&6%$1@Cd+N^U7|dt+ZEY^b z*8^Q`RA*t}@0$A?noqW@&(un^=Qg*4c{1!nevMY+Ycw#BvHx29)}X_&LlNY(rPeJ zlvRh3nAY|X7;DWJF!Z;jBdaNGSd zh_gR33byK+6s7pZ|MW_z(b4IR;z#|2hnSHNc2rs9E5EakgW^OB%H6T^F@9ugdr$1PsNur-(%o zVNz_=Ui^jvxVCMXo%_kgR0EzGNhQs%fldXVD!|UnTk3Ph?dsU(FH=cb2zX{~VR60D z49k3ac@sNDm}PfhC9m4da1{Pdnlpj0!d6&pDt8pmv!R&FHONnV*= z;^}E&PmR&hl(^nG=&?nORo+%H!MuR=s0mSwR<}*IpP%`CC&7_5g}v~*RI9PeaU=1Z zlVwyTcIm@JkWopn14;yZXLL^QOkzbX3+Is!QF~S1ALkwUC3%}ddvnD$NQQEcn9o49 zG}drj;l5F+|K+N7?D*I<5O*><#Q9-!4e_4nl9~d_c_7K-{B^%e@|Z%-zKqWHjs9m1 zg8g;h+{P_h1J_;%)mcCh{Zp7WZe?cYKyugCW3NY>*@3ukY0P&sGLdDZXiO(~UdOxR z;mA(@M5Df(|H@DD)ac`Rr>ARCX0soF1Sz9O+b%1{DJXl#mkk&j9wPQq_a?QF-xhe&u0ih^$1Wrr(vqz@GB#CnX|2 zDDKj)jqdZAk)@I9l81@cj=QnvjSN+ex1)-j>A!YSo8OFUs?Si*dbOgK>Q-sIKrNt1 zjnUI|>m4is3Qk<&tNd0jiNt$=k=6PhR9_P+6&)tbIQi-1-73!r#N$e$g4Zf3{Fi%O zL8^|E1(LdpjTeTe(bQ=EyIu#3UG_04*JTFYI`@83#BI+e36EAOBI-9E)n?WE4o26( z$f28|>u9EobVFuI&7^C4ELC2{#svNpPvYCtWkVUa&VRN;SMxWeK;kcjw#RJk#Yr;U z=X&^o8__klT9QzT(x9JAMXI)+>(L_-%(sZOu}N{E485@RE)-l3RriC(+!-b!cVJ7E zOcs3h48nD*id4ijT8Gec8d3%1)B9bmx#MQKc0$DTo6gv&yD|Mn&dwg_cN(QNZbno@ z71+^2s_rw_bahS@_#8}sK7NNy}uJ#&)|o2zo9q3Vmn zu-K-f5Su5p4B}pnXNJ)X)!?at`y(~YAji#Oj3GdM{3RkL04YZ<$3Vwd zZZrfNtsJ3{#!;Ux^Y(P&3g{_jdp@!m-B(OL6S3MZiSjXg@eaEpHBQ(~3Stx!r~e2n zj0PvIEQ9{ndvzCtR4I7J9X6ongnq(~~Y5f0b$d<(^1-@R8!+*K5FG}2yIEK`ni!!8;KP&l#P zJwD>3lV4!m>j~sX@ywN>!=+nH ztmdRj=@oN{l0b$jiC&`8IIkUUmiZjC6i5=|Rp*_ZOWON^xVCA(50n`v(nlOv<{Hb8 z76+%x>I(w}xC!u@-{iB6Uf3#C2Wy8%n40_!6{(7Kha`JSbKPK@bS<__0*P>LU%E z4`4|(A+ue)rk+#p4p&)6;cG_FEGFT-`xguMFp}f2qf0yYFTlI=MkPznW~n}#q-HA* z7C5E$?R;5?`5=(;NIPmvosC_nqA5e{w?)p zV;8~AL%?Cs2e-Yc5~K>e7F7~WM|K7)Elw8nRwtDo0tcifJ)U-Ytm*49s7v+7<<;sDk`id%+#K|M z{gE(Igx<|OTroLK*1mSRyPacZMvjTk-d_4x-^)k`t|`J2#2k3TV4DLIIRn0-4fgi) z&!@EQSTgdSUiB-Qp>0PET95yPOsXEU@V&X5WvU4$`(2tTlgYFgbS`%|Y^NAFAOfo&f*6Y>pI;dCp{BC5*&@R=2%%4EK2*HH7y|$1D+<4s|30G}? z%3ZEL)X8^9;56%_lfQ#m6g&g8;arwu#jRq^~f@87cr>^ z-08Ex74J~~)m}QYYYx&;3{#3LVsPoz`H?@^^29+&@B6fp6RS!%pl=dGr0B@xr@QGOhGd7P~bzBNQ{G0k;wSaXaYvj`<*aUk7W|Ri7_ChX z&G2(KNS5-a!%LhoZGVmRYO~bAKQWua6}PZ{`IML>xk*||WrSy;XE79$2db-XA~l1| z(+ceEPu|#luAe)WY|uR$;3-y~Q@t%$f_4JS#G ze7ghm5gY8z>M~BF{K)3hUV=EB`b}6>&53Y8sb}mn8C+pL@)r!#TpzRm zaABkuDQwM2KOZBrg{2)x=Ak17ozS)VpVo7J0p3O`|NTlX(As5}>SB(#l_BD5jlMGA zJFd%+hgs%^NO(|ZAZn_CT9(l;O>#mR<#AX=n1IFCEcP)w>Xa`jBb*3!MU@!jpa4=NCcmFvaJ!9G`uMW$; z4XpQcZ?YX)cCj}&TkD(M^!I88JCUY=Z*+Fgvm`N3(JqOS@$oBBAGvzJGdQiyNs^xO zRV4d=$G$(c4)6Bf;uu~m(rii-5dH(?kGsUXmQwP$l7r~zVtnx`7eZzYIkky4)5}`$ zeXrDFpvNV^jQ_2@8PPQ5Y|%y=v`qkbtDHKYxW@>KR)pmr2IhuIb#;j~O_HnxBafo* z(hv}k1u8$1)sgd_2gQ7|$a}p2yT{~){k%W5QtR!!{(uQDNsk*u@Ll8S@A9IY;*SM$ z-eBQhQ`11ZdCFmEyi`V;NNVOA$_@rth^XE#c|C0eCJu@Vy*fnUCT_3Tfi) zI$l6~%-XHe23KM#PhHa;$_`--f@OEFF(u&90%oDSUE6ox(9j0-aw8$J?YYStNwxg` zqYE0o|Dy5mf`$wT@xL@=ctKx3o-Z10)$&I;-)tx8th>a_3lD~8(n+?dBy+m-m6;$D ztT^OefnU!iu@bA_5giHE)(nOB)jN%GG6|Nwlxo1!c+5+PB2aEgjV^atK0m2M)ND1SH#P&p0 zCqWa1&HKJeMrFosu|V9dgOqJ2wka`t^wr*dh`+*cFXSyFg_!y3);<_7;_J}IlNC4A zbM!*J>LcqS9Lmu<7mqDU@^Zd5f7@AkjJE>Xi_9+kI3C+v7!#ruFWYiK=-vcXN7cMilZh*O5-pOo4A89k|s+)+km+L!W2DdGvO(*TB*%vJyE zQz8q8^yabXeA!(3U2&HV-=n9c>*$X!H)3%)G=bn({fjv#6M}1q zD;)KET*jE19dB%0ST1|qqhPQ2)gJc6C11SY_GN>C*2I2!(sY$DsilBlTOli5XnnE( zd-6Q{--AIiaZ(BE#))Ae|bBEov1Dn z486NeI)c~iU@CkBc*6T&REKqtaC19B|K-QZilmgtjY^k$;oqolO^=KdmZm+9n<87+T3 zK~(2TgS`8EZ@oR~&1ZP5Gx6AUb)IIC4cJ&zNLS^25M!&e_ba+CVUulbVDcPjSG%OU zp^S%_ndYA3%*(>_D)y`fMgLh}=RTT?N1?e;3)~xb&vD=RUJXZ{cj05$`ZxA; zGYL(G@XAYnrk3jToHA^Xd*Riz5J}X0;)AT^tN($M%Wvnwt19 zcPqHs9#^MwPdx7C{cp#tUi_uR$tQfST>KFH4;lfZ&c31UY9u$`d;I1)2xy`fia5Bi zIxtZaxT+|O+5c8O{8WK#WjbtX=1{iBfqeP}IkVZFi8XjvE(^B>C{#3MIlS_OD-$(M z2qnQd)X*#6n(i9vrMQ^o4k4;Tsb!W%cS3%#eooB&SgJDXbGu~9=?eG24u(WLC5EjGmOuQ)K?c}k#M?f9!L8bP zbWCf78bASU_F|2j8n%i4qT$KY!d(|(Lu(Uwfi)zg?1abZ7shcz^TBSu^mO7QJbt?P zwq3R#65%&`GfsxZ;Qbb%QkkBadI$O?zQr!rf_BZYuoriK_T^Oy388Ckq!&=>Oz)_S8W0OUN^EJo$ebKnMi- zBiBEE`a|>If93*?a+dVU-k)dv_n$yC$2TlOpWuCSJni;pR&Bhql$;-&;AFYH`gEl diff --git a/research/tcn/g3doc/avg_error.png b/research/tcn/g3doc/avg_error.png deleted file mode 100644 index 0b421824df123276e9de098e958a5e81860a1d20..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 98633 zcmeFZWmsIxwx|sufe;{AaF+lHZjC#^HArxR1=q%!U@;_ka1ZVd!7WJR?i$=(8|yn+ z>zsY|{q{QdIctCUaev%?9-5wAGOI?7sv6_1F*{I2NgCq`@e>3D1Poaj$=3)7k8ltW z5KkW?1K)_93=1P5pg*&ekWi78kf2m?vVUi3V}^hr6BzvlMLn_`KczVn3rC0Y!E15* zkI%Lqq`g7hdP_r#C>|J&rM}yh{?S^j<0GE_BUJ}x-$Fbze0;?BUXZ2&^JKdApzZ5&fzP%a`;TUBuu>Tfvn-Sm6iAt(T*en=uOQB{j}oI%OE{&jw5&YoMudPiOC>8vs#%iyHJ8P)3S}* z)ZU}!=K}JA{QgunW6nk@QMBeYb{g>&63^qF9Lb8wxj$^T!ZKV3XPx7Cis+L*W2`>d z2=H7=Ktx7h_*H%@PBrl%j%pzIEXL&h50!1Syj=>wv{ zZEsy*lHFcG;BxA<^Qor>ns1|QlFQoCZ#GpI2L!d=mb%+NagC0T#0hv`RiEc2C_8v6ZE|5jEs(EGVZZe(XBFT)1pPU)|(KLsF4 z2C8H_~D!}WgFv~57g1tNO zNnGoL(Ef+njYY|v2;VC7U7Z`uWCm?MJDw%7TreRNKUaxASA>Ed{A%Y!*pygwma>0a zTk`0ax_~rOU+fkwl${dciHcvnINBy6&D*CZWhhE5WTn0$j}fsck!e51J9LbSfmQ`k zm=NT}*cl#Kw@Q`b?LK@jipTpf)EC(SCkZLGg?s3!-3OFX7LQhWHUz&GwvUvwpZ$ig znI$RM#R{?Bw?8_Pyoq3=eIhIQErLtZHz49s7f0r^7ZN|H>m#gI->g1#dRqI@nerms zE)x^-$y1y?>T7;RUG58hY!|V{VHGaq_EVk5@dhgOGwQup_R{K>EeTFWR z3KAH8F8Nu#tl=)?joAFbZ3XG_&4ZBd5~b8N*dI|*KXd&g6=zRNDh+c;a=_kCH28@X zA!CZwA28rxNQc82a1vM$FdOL7UeW&47}GIalG-w&P=-A7F-~HX^h-KhDl*z+Dwl5_ zvJ_okO})y6_Rjs18cF$GZ}ie@w2w-c%{uHl!qmC*pO44# zC%?=t&fd>S$y&&0${M!F=E7$VjFHw=EXYpI5z6*bNoSm9c*N)!jnAmTFwW@mvhlU1 z3P|-_GbK;&yI!%UI;_sA`Rm88d0$JO=$b{yed$4s7I^t7-&sRo{PDQWxJEvTdP#1R z3Qb;oKCHlT{Oj0xZt|O*G!ti>ax_rg1dK)zt2`k*%34;AN{rvs9 z38|HFI(ItpIy;rbmE@KyGchx2RT~xuLruB!A_iU!*X{$C-;Sg^7bnGBm|W7G*LQY@ zEysz*#z!&6`iJM!-y2jLhNVGAdWUAeyXllDrxqXN4Nfk8_mP5QW+RU##&^v1HMEej z4Y5Dwz#`cNS#T19xHyBknYao$JxwLqo!I5LXWow4aJ?@T0UzHU+KXK61UD(>D7Y0MIWHvPQIM(U?(F)EYDXxow1jC7B5RjAvMaZ6gR znJb$<3W}_V9Lk%>8=Rivn}#HwkP*1V%EW50#Ok9K3l|6JE7y5DgPnT80?X&I^;Pe) zuMsZ@BKzr1=nj(SR*W`V&!_f=*Sb#?&ga&q*UWx(oadi<{Nylp)V|glY-DcWY{2wr zJ}x`Mz9PMV9PF&u4_y>etP^a$S)N=kS*O@(9)~;n8j0$NDz=DzH~DqbQX%&F`_Ok6 zF}IH{pZHqGTal;+sL&)>KEIa8kf`<(My)`{_2X6)%lWLJEwW}*BU?k+K=nZKp>$!G z`!h@YcoNvG`tn#;9-dn2tLazF0ak(8(t}@xv4ozj;8xJBixol&d4KRJ@!5Ikxy)U= zH(!=t7Jm*zMZ+zAZbg5LOA;v~>nMBu4LJvXa(xb2!ug$64 z|2m~3#f2+AoHW&w62>X;QNNp90aa-_hNI>fam~{FKwm09mEw@ zd1mRgWw5ojb?a$(r+K#%{5?3C=zx=kbt8<;PsAuX{X6A0>w4_>njsE(j!!4$sb+Pt=VXNWrj}fD_m}o=!n&)riZRLCAMM{#i#12NQkBwUg`WY(qv9$gBgcA6eU%oansK$kx$RnW*sYxw^vW!+1`zV4GBRBnQhp zPEKSRxjiq=Huwc9orw9*RDGmWw|S^ zuSas5e3PIVb3nDnRR7rdPApbZ;%Z6#Q!zaoJm zNtxH@3B{^_=$TiZcrR$?np$^4V%aYupk^Lc^M1Lyg*1usX|orRri*o8j@ z6GX;c>sV$@6=sqQjfR(AO6$j}P3GA(?@!PVE0do(co+!d3Ej@=o(7*6-qhZmY!jTb zDxE(%t%9@fg)PMLvHD*J!PRfUvy3Nipm)Z%L8;HMnJ%Se*cFk`}^?$ zOzn-$Sln$LfZPZOg6{mlM_V&zBT9E$8#^$+yAbssIrxFk_g}M8Q~r^~*;E& zKig^MZuwt#vIGA|w}1|^-lwp#v#_!Lqi&$6;QhD!DwghMHd>OFwq|x<;2y&49DKZj ze-!war2o3*KNZ#fucF*+Z2w&HpHlv#q#)~k5B{k~e~RmmZvlA;KM`d8hwO!)>{X|| z0)~UsQc_tR`2XO3Y=GAl@W=2U|AEiG%|8frAP5Me2(pr5>h6fU`lxZwI;QVx9%8G6 zOQ<|mMN}1gMVp>08Q`9Fqw4L`Lh}%dI-SA5_bbYG?2vSK9F) z@&HYt!8ZA;X8a?||8><6TGQe`dp4BB2hR1ry&B6?%gH2j)YPhRpxT?R@Gq`ee!{;T zK$GO(dv>rlTRjS;@KM-E0za*?nI7qC;%HJ9hJDUg&l6Wn;7W7dn;G89Nmasosm_MO zD*EBS85zoXBDM(`y(%lbWRLB<9!XQR($`sXm2X=gY|m6NHl9uBI^Uj6zPFkzF%(G< zdJl3;nyIpWKXC?jI9c!S+1;M5pclH{txzvBekGDa=9=ttcanN~xe_+|85MUkm~-F| z@1UP&F;m#WwW?`|JX%|FnaFvt0cAas3Om>$lXy`99<%heQ~9+Ng0!f9bucD z2rFxtA3BjZ1~J!nDEa%szRuTEw{jPkFc_@T6Xv8-ZAV7N>)>lOmMcb|>d8@MH=pqG zO|cES(CiP}jEhB|R1QwgtyUC@&k*}y4ul8)qLElFzGl0GKy4eTH27ZFy){9kg6=9t zNgQThU+ye6`($Qjw&PydR?p*?xpGPYl3ddsgp}syGH^Jpc6DL!%wcu+_lJ-h)H-EZ z&2ltQ#~5KTW1gU0T;H4wh9x=tL}l0dRt4LyKX_R{ zOlth`Ux!_k&&*e0FBXIYDJuw(1U;Jmd}Q+UZjIH4kCvvaw3N+wkMi}#`s%9WR+(K@ zXesJ>FKe@8@6Wq(n`w@!O$s3o=ez`t4jJdsoyLm=HLr44t(bgef8BT&lTZH8$obE% zZSFO;kmNzzinMOJ>MG8@f|sGFZ+$WNcaBi4Y^v-Rg@HS0*6q($vwllXF7iI{@%&Y` zkpvBNQY4(8@T;9)t%OS_u+2y+{KZ7<&L0k9h-vLyGtTSPLh+{X%e^fMDA70k;FtOx63tsb(zPRRlY_6 zKDXT*uTHG}i&b59SbV_5KE>7Tfse8YH1xT3tjb(bzvt}INS1u}jBW0=fkW#f^%4Ui zyxtiyY26<-s@2LT6+}6TiBc{*6CvmR?AqCMRaWCKXFxt>(AC*ozU*69E3e*+stT2( zHm!?;#gc93t&uOGg2!EP?pM%U)r`$0e+tcN`QyEatG1)2t8K<*-DfaZn0X2$ds&Vm zRoIJP-)-K-bWbKz*S=AMprCoqZ9Uw+(Nqu^c>}%czLQu(Z&?x^JN21T7V}gu*E#0! zKAy?mo}SmY{5ype#$3+Z(Gd}8S8dhYi{L?_v3fWAj@B|LdBIhe;2{`ikHUL_CJdLS zarX@8Ox~na;ceYojP9me^EH{!#T-~}@?C#Igz!G88tFB#(x|Wa*DfD**3H;AzO(7W zUi>&n>Oc~sDnp;*o}RyR-XKkG*ms=@zcKeX8-HWMTNOq_4m~gHzBQi@71*zdD;wmk zJ&1Yn%69fgh%jc<20<8hXGisvf&0>z{6W1`5zm6Vfn))^31yL6_5S2umU;!D$f&UQ zMoTMK6~-M;uI{c6U?MUM*$|1NDKL_cx4c$qd66el=(vnH` zm$2_S$*u`KvYu$QX1l}Hi{1rx^v$Ou@){L=L}APm;YZb$qf(9*(5T>?>;3w$6!&FR z?#V*9J9b7+Xxqsd&|nph6VD~SSfn-vuwUTSH z#cdP=s5b$R2T%mpA%#=mosz1<=D*zfzOi9N@zhz4(pBwrLUvdeo1g+NOoZ4wFr$3F zrmScm0eA=zC*2dMt#tZ=rMb{GtB(E7M3EIr8Ye#d$Kp(%Kh`%vucSJ(kf2%89%VIdf^&a)U(@H zY5vO&r3MWWz2wS1KF8hk8rL_o1I#?8M4`nSeR1qGUum?PajJUjj-!JK%aHwr;?0&4-GbuK5Va^(c zWMeR_ZD4~%r&1<0d)cD!iuIu8?9(fhCH<@`y$wTw)Co?ijQaE4v#awxk|i=P0}2r@ z$I%aGE{Sdpvr`-+)JcZ!yR%1!ZVSv-@i(e6ebp6DPuf#Un(uVTNCe#-g6U#D6xf-q zH>7WX28~jD1Un)0>2HdU65kN!%<7pncnVaRaY$Vo!Ixun_n=2)e`S7%QX@a8uZkyV zzR?piXx9<&N#EOgRRC(A3K&K^3B;v;iEFt%mN(2{3)()}tp-_-N|Tt*WmjFRB=<`7 zY^3@W+ljoZ@SQCs37qJClx}9zQ_9{o*t_E4SzKk!V7w=)hWy zx+jROa9(#?Z|4iype{roE|wB9J8^Cy8bs zm58+J#rVv8ZmC5>Ux9A(ROOPBw^XulEHrSUGc*F}IBt1%xt1l2qPSPj(j)@5QTCI< zi)GS`6b>8op^l_e{QARWFI4U96wZbZ8sB5Tj7u%c3o*NnTL*I^TcfbR)lF0`P5=u= zbH-v|Z^LS#hK1zPW=5x;z{_aKbxL7$?+|9AQ}&{;_mp(4&TY}F`sGH`3E{7!5@=bK zKvY4EcO_(yz40oY@0W+59b|#y;v53C1*gRG*c*JV9pn4BfiSryO=qFtYQl!CEU0vM z2^Gm_H{RP-zk~$qvhsa(YH^<$8Z}WIv?M<;sO_{jC>xZhzJY>+p-Cqhkg^R;bFv%c zzp`V0b&9@~qDGlSGg~0b5%GavFOL7b3gKB$c}3xMs@h(s2s-|W#=7}(`+##$Fk{=2 zv(L7y00YVPb)kN}W^x-cYNE|m8`^u&lif~K66U#L7sia@Y=sQKe9&S(hd2&W7`Ru+ zcA|k7WV1w+1RDY--+cxRIO}`k6DK~N2_M=x4yCmfchp}OPo%xlyR+tbv3on>R{*X~ z1i93(-U!OlD>v-)di1)q*F|&r(JwG}kd`gA)Ks#!uKS-!O)_LscwdSwRoY2wPC5Hr z3SQfF#~OIZj<0M`9zz2^^;TNG<56}ItS?J*`&ia$1}}B&PC6nyoMjdEnfJl4S^oeV zr=XXzHDG5wHzz!<84}iy3*UhcP3N1UDJ;b9Xc4@$^@o=7yIdPc6J^$Wq?iBr5V>dm7uh z)OzJGre=uLg_wE`^E-Im3TlL=LGh|>=gEuOq@d3a%B#W*7v{U`I9YZ;P8uTE$SJXU zg9|>%ZM^WKV$DVqH8}LKE%MG?&0509DxQ!b-y2KLNU@WKiR@fEff>k7(0YgP;_m-p zE#SsEdk{lVW}|SFZb0HP0q!zpE6r;WhM;tk#Ot^4&18L7&Zw}8v`%~B_3HBS+Dyo>;Iv7p7>D@)=~U~R^m0niwBlz!x>MVdW;Sft^ZAIcBU^RD^{8dI`&rN+_(!YRjpH)x zunkiKKHvH5q;<~1;H*}O>1x7D|6nqPI^z97*# zkx2b?w}2*OgQPmgzI8J0^yV$NcW>oOM=|DPh0p2K_?|^Y5;Ul@O+c`gRdD}wssj4! zAZtsWnyhkt0FvYK>t^M~qD^DtlceAlS%oJFr_WT^DA-`o4Aj4OYqD@DB5Yx|E4Z!u zo!iIz_^WVgzhnJ}9{0+;5=D8IQHuVc6#F_-pVq%}IB88NRcIneM&A)6GEM|Ni5gzg z+dLLez`&~Z@rc;_F*WY@p*X`wO@$v)eG$E5 z8L2Y4LT1%4J9f8ZF1o#P#kxq>Ll<#Zv*f@yDrjNy2{tMqw>BoLAos8_; zv=^Ql-6_yp$g`h6OO`z7G8P1`M{IFSc0Uawb>9eI_>eg}>4&%|*SC09OI%%?)4PY} zF)vJlIlv&)RsFF6F;fO(Z}5W53dA@c!ii|1iY&qpn#jzU4+u9S;X zJrwlHGf_q+k#3P&m?Y628x$mgHk*gF1DBqRTsz&o(_Cw_+OX-^n_bCUqk-wSD;_hS z!OOqu?97zN2?}%zaf0`U}|-yj>5y+Qt>h_ndg>+n8odymZR(Dtx-{}NU+h? zNEZ826?XP@v76#C^I;Y&Dw{RIKIdryW5}!Ol2oI-)Y<&rTEm+Mc>A%T(E;>!k>JlD zoHKjdt{tuOqinDrh;7~}q}wO=I5Xhf$|< zS^^@1xDhOZibXtl=ojnyfe|?uX2-uPP2&Ze*Dq=p8i|BAx(7}xb#?l-I+ox7H)zPj2(@DJa7zp=aAPXBOalD&Aetb2=GUDB z7oSAzkhLvKH)juEU!M07`An&g3TU}3o>d?bu^xzyQGlAY*0v;}8&0fQv0xm-h z)=d1xpesny$!zwS^nt^~$%o!~_hqMIYo0n6gVD}p@W3G-YtyB1pHE+Dm|+fLx99U- z#u=H_x|>|YTVU|}_C%5#$ohl8ESo1s1pketF3yfqW!V;_y+IOkhDLKq2w&Ma*gCj* zvnXtBxNDUmMuO~WB1%H+$#OuqJ({D;l`@a@>s5oDKf#oz0(`xB(d#04xyugA3Dy-s zqftvj4x(~K2Vnxn(UOJUHFL<7o`Buc{N5@t=pd4*-hjE>ON0sdRGA~il&MLZjLDGw zzRR(xNZ8JC?G@==0c6?Z%8XHOfUd_E5h>lJ3S>jLWq#zy zS6IfN3$fPyh=sUZ+m^g+@qt4Rrp7ibM!#OKc=~0kku0EEOiCU2$(x45I*fT9L(ofQ ztv23VB(t9+R>#ZVO=d`AavPl5FO-=tJ}ZrgHLn?TbP*6KlUgoxb{>~gg`GK1ei2=tB1t?t0)$;saO)ry})?DTb4wOj?F z$t1rjiA~Iu+YD@v0BRy3YvQuty+^8sM!*tG9G}1nny$?2KXNuqLQeBO;k-~i;2*Wu zTt7*oAV2Vi(xn!_M(pifJ6Ck~4r*3HEewf71|Kz&MC@>eox{>hqL9%J-rA04D^`+$ zMh_ZJl+I=?7mk#K>W91HrruVaO4VoaIW$D4Jkteyu$&kA+T zwgPBEzXQ~Sw$Pk;Bv1UylWju)+Tl)&qSUZX{7lY&Q=$Z1k9IV{%hC1goT;oMlw)4V z#6UJ6#f#p&pDdE3$y}a6&VtU2p2n%|tF84!C#Q|s_eY*72pliZpP8n9A+8{(u2U-a zb`;uo%aV)6Iq!bjTc?rS?>b&BNjkM7(5zd(x;Y0PeBPUzNn+nH4B=hBNV6iwZ9o6C z!hZho)78<)bq`EoySBT5edAW1k7vA@a zYy|p`5|}ex+mlzvOc!)YSe^_S!mr+x-(?_bSKExjZwBGHrGf^#d?6bVX5={VwX&`A zVQ|1n2v}*`aSwFP7v{1#^q~+=_}zVj*V`$P1Uc$2+5BOfXiu=D$gh#2r-{Ti{IndT zBOzhOBZi<)Q-tp_avMCGZjR;4UA0Hll!aa)p*&T8S5Z|T%2KTfMo|Sdp zWe;V?V*#@9V#Q~s($aaaYVsxSut9uC2LSPBgC1NLhqP-J>Aawc=y&w|$EnCan;f6J z5rW&A&`w{N}$X|pQ5HVZT+wc;ZQ1oCA>D)9#v5-8;9NBa|_ZTvUgM*w=y zl=c)@GaS*i1`)HgE8YzQY9`rm5^4`#pG{z`+f1*iTdVc8C#zmSR-WLL?)KP{t1OtP zK(lNQz&33}$RDeYk^bcFx#U`8&i)!$PSpiG<++9blF$7#MUXO%+ocBmuYPn3EbAX`t`2;6 zcu4W_@s+0CI95a75tAY7@x$+ef@n^MXLUDwYX#J&ef@P`uSAA@C5Oh2v6TWS-wIfq ztcaxxnuw$ZFhBr6zcG?*%`(fS!s)U- z_M#>CF&-CE^UX0DvsS5Z0*}2CuqFojvA%qnc443`2KTfMc}W8M`DHA|3Jb zlr9;jAHnTJK89l8>>DfE1-KqP|5;(3+2<$yw8o<1)QX>ll6%U+V=_SN&nmyQ`J!ib z`EWQJM`_9Q4nLWENu<*%6P1RWA$b>>5&iiFg@(Q5=phZeD2D`kbeD{v#r%vQctq~% zSIBt&+56N+UnT1|tdg{k_xur21ILs_dft;UWv(#<^F1nBWuruTah!^tO!yps#HsRU zm7Jd@8;;p#J3|C~)#5o7yj2F>K@fNg@57RK4#JVQn6NI`x}DU_n>g^!r%4`86AO^R zn!!ZE!O2VYj=JCnTBu40exlQDB#QicRfLG2T@JXH#TV;n^rE-4Jwt)2)bQ?MIq|zs z5rn_oKSmHz##8ypS#NbV^TM>tRkP?a$ivoxb#fvLqK8dX1~U>ulosOBH4fB?k}Zb#@wXIH1DDa&l< zz=4+~EWAH^Lvs{b)|^oxz>SWLDt>JQNcwCv?AQPW&64kliINy2A9|PQ*Bg2Xda1xK zm83-uMc>l!f+g=eu`0r+^T#+Mkt(y{AklKRs~AWv``Z_%xfvQ1>rimiAti%VGq55k zD=Cm^XBLE|`pzeEe*b};8Q$Na8yq+^5szOTOEgb%q7MV8%c$0Y-kcLAP zIJYW2>XYUpSfO6EaBx?#^^-BdFW_O?EgL0O#MPEGq~sGYTwmC8zDJWHn^L~Iiv%V| zJD@L`HJaT+Po&cC_4_!Lw1{?9gfI{>lpc8dMr6#uO5@ntXSf-X@JEB6!6b9F-p<81D7@hF+Qx6^1Eg?7M;r zyqWqD_Vmi=oeqYaZpR&{?=<=+iN&4^^8E&tfDi>54lGKt3LR67U%@eilUeHTqJF~! zSu>+$F({2UAUrFwYTWofy4kb9{2P{vsMv_h{-4aJH%N=Q6D50#33ZEO9KS~li(WsX zQ^@YBtQw^k)$*hlHGj{j-7KBho>pJu7fh5i)#BZjE$026diAa}=!9G@V)M@m>@374 z=bdzZ&!q6VdWUj*Ho&UB6MMn`r5N73^_$vzG_T}wb3UlCGhZwc?|~sq^LvJ^AjWh! zt~E$~$ERkWb{qUY@%Jciv387N*wZV8DsS1ab>~riTZ#Yw`y8OWab=zaLY}=j`DX)f zQe}MVuKRvdb9Jly$+xRjs=SAz6}JX2lF5G0QWj$EWU-ohSESSYtDKJOtz`Z0X|CPf z^H{&d%QCCR6$;|f7L-BM-?Pnsm8)>s1x?$p5(6pY^>r_2 z`;tfMuW`~`lZi+6Onl58lt+*fK1(K$%eh|wd$_%~0{>S&vv0%;THm92M6yX=-EqDO zT;(kdR~m?T!5t1)Q2j}Q5-j;gz8mxVe6*|0Q6zzC*$oo7bPAb{E96;MXFDT(@nDPd zT?m)?02lw&rcN<>i9utG$Jw^Dg#Xj@^S#;OuXqKUWzf!HAUZ2gx0+=;sUxU43kc~+ zB=^4b?v=F3-VP?a<{@TZX1Pj**UF9aMg7o6ysgHPN+%FlCp`h-oNV3+qvh7rd>-J3 z^AcaRL)HxGE`=PC-#1 zZZLp~8z+zf*YWTdqVpY$EFB86nUeInLRTpRqU|JO4HqKmlt?KmfW2^PR%T9`#_oy-51%v>)NC!<*6G59?se4~5iT>T9DPY{ zLPa(hnD;zRE&C;I7@AH8*w`5LI5jZ8#^k5?Ofmtkd<0xU{W&>c9J?ziFpJ%BYQ($i z{n`3hg9IBQFM6$YgGs{kuV24z_Ar)I+v$5J15tL^U?vff)Z(T(p8n)(7L8&(K969G z&iZ{~*^5PyC?KX!Lc(vZU|YRVi#>aw<%1g#SRg(0{-B@5Wp-E;wFw9>6%97(0PN*Z zW(e1@GCUgz*5T}NedRp3IKcBe+7Hkj(B0KVvwpk>lx&jHPe>T#v}#ee6 zAOSe7p(clW)r%Q)ip09Cs0NEmYolnP}u7ve& z0p-%ALJG|nAC3KJWG4)C1@+zTo6+u<2Dhu;3a5G1ei=LNtK(XRDXqKGDu&C-WLi>x zG3O}@KhK2beu`6*>Rv)c)`5(e^ogyCm}JU4d5E@^`HV;n5jIHpLhwV2%;|l;E}#2+ zQsQ^=c@bT#3XrQU_OVi<-B8JQ(IM^Y{mPjDHJ*($1wBE*sEeFZND_oR(8yPRk$tz& zcwP7=87?W{_AZa0JeZU(xVU!x$yri|gl!^5TT)U|oa?2utZd-f>sY*dSGvZ8fzAKJ z?yk)h5KX!@WfGQ!&ur}0ux?5)8U^K1y=(g9d?PFUy2J>Yh$N^OQK((vr^L4w#kJR% zWM@D7L-B*{RB0ONkmc8BV7bu4A;6dY(`~k%EY6Bl4sA4%eR_2&e(&Jjhx`_BJ7h?O zm?9{d+qkwC7cK=Yb2` zeSYNd3K5A25?+U22>LGL@cYnP9HF~QPXD=cAoOnK&DlOQBZVtWJne3*^`#pwxn76YS8VX8FOS@)lwW#Xa9A+xxw`=|F8@VI$HH(PB zEO7UWdF@IQY5aK)>*hEU8_`h0BL@BVd*c&{$!^zH0(7F^DA;f!u}r&+lxZB_3x_nF zarO+%DTgTMzsfLdZpRwiLX!ZDu{a4{B@AvV5pu`2WYGHcoS@Ow202p~|$$62n(nMgimsK-UdvD3L zWpi%^^3-ROJ0#EmY-CiAdfW>@EWr={Cw?DmmK3#od~~#8<9p2S^Ys@hKo1x=!SqLH zsvf5`uV3^dQ;Ntt{%R3XEzo3UpfM3T+l5r{ub+$H(ud|$3|L8!fNaX`^vTm*$;o{@ zC6@I=fyGmNP*?!uQSV&-V}AL4bgRs%F*M0!LCRQI$oqODs+KQ|ILYCe5bT_y4>2WM zhL(1OE1E8QR!astWORzz_ab905D}%wc$4iMimg$gsnOvYgr`2shBH6K zUDDSpGgoCIBIF1qsI*-%$e%3b7dqR|T6dxB$GTq(=>o0Y%EDc^vFE&I`CyMQ)Wf6k zq9_2ExmWVzy!azo>5N&?5%AY?%?F)Ev{&~zfvfwR_dH8-(TT3)F%3JPG~L)C2M$Df zS?$8}RpP|@5Ia;e>*&DH?!jthC%{6ot3=^@f&gWaLj1ef3Gk}-7a+c!n1%{OjBaXY z#2TAfkY?cmPD$R!$)5pm$R$fKvdjk5ZThzfPD4oh?di{4d&{#Cn|!@2YDFD)mw(gD z{by_ltjiTohYB*9Gg6!8)36%Or@4V=C%#|}Q#x}13~t2KOU8fY9!L`XT_6b%oC{Nm z@3P7_qVi31#aIm-@R#|TjZOg;So}l)>$EHv&TMl8W?k;zhp3!5If4~YP1-dYRPOtq zdbc+DH6$cI4Rzmx&d4(=vQQHM;+XwUCiDLX;`B|s_%1*k`Ir;xRjCLs1}Jj9Gcv?~ zL)Eh~Oy2=kEsM5jn1dAzS;65?gB1N9DTCbzhEVJavtbAC7#?OA)W#)L7n4yFe^%-Q zm|;R8=9U8~00>|HZ8qB-+zuI?7)p9!{p{m)>W*YiiUyA)*W={idqvi!7+C`5oRj+B zSLERK#`jMwTHo(jtgl8tY1LOwk+vNaVEGMI3x>2OiC>wXdw5)AtvzYA4NH*+JVJ`! zAPZn6M)ZP}^q;O@Qs8|=q?s9h6w8f@jUi5Na(~n?;qS~F3gFY|e+QqYR?!^ID*qT- zrH^HJ$TROkr$4gN{(Ds8#3^XgpY_KuV@9*u<2(DqE$8X8oX-KsX_4%}N=LUN!A}YW z5O2@lJrV#=Neydtxc8MSedFN}&y!Y)>twk%i*ql3!yp2LwxRJ=rH3rO5EyR!y)u1^ZzPErU56vxyz{Vbmg*vN2ls4$L)98{Q!FYbl z9NbI17|!)hWq7#Z!uJ6=#OimbGvG$vH5m7p(m1x{s5_N@9@rlX-zU7T{~cNf!UfywPS2Z6;AoCMU^}N$7C-3st(&Du3lj|rKfGs=)rupJst)`cS!;cUj`nd=CDavNXBr6 z^lN}^jPZGIlgiB&``+n3rWp<*1BU`f7LQP#j{E>v-#Gi?Xm&7Zz8YM{%mQ~f@JAe@2z7n=%nG?2TfS?MK9|C$gcH38Vq{>-{D+aaEZQm)oY6k@0Pw7{ z0+Hzca31=)w3K-k1-oWKcsbvpELU1`T6TXk<9gSJq@E`Jh0vHLB7g%M1(RHKBXyC= z!=}woaV%#L0k1%(DqnWTX1=a*%vPF2{_v5M@dIOY#&ZrFG-evd0yEkskR9r6xf2hW z#?E7pqtrKq6Q52$(f|fwEcJe4$DiznJx4*kgABLQX``bOIGP=OCL4@`w$(lt=lT#f z$n|ROs71zkqoW;2!qsqu(*Dt2&#kQPiM=lRA+y+y1%4fMb;&mdkVcoC;~l`C2_bYn zIXT(dE_gpWS9|ap2q?;cPQ7i|DUNW4-%4!dX5`Zsoh-bske_$0EPH4OID?d(rN$ys z%ZV#Knd9;mc|F+Cn+NcHJ1xu+v`}EbH~3m}fsbZ+k4QR_IPMY2P){p+GNk$L*1pT*SebL~4bAZ8BM4@jN4S9lU}K)X zZf~sClBqv2Hh!@lbx+zpYVJ40X@0#iPoNGwdP|iYM=}~p36IYX5FjTE1d_CXr1Y2j zardqaaUg&-b5JN6{DEhk$e_Pw?VF{6c%L5z=}vZY?B<9hVQu~*a=?R-rLMFKTY`n& zvpaKciz+*?udS7~_UKaS$r#|~_e6oOzC5oq(`V^Vs{H1CXIk2K}UL0S|z+!k7&Jlo0^j0zqI(#ZXlXWTXYDWih}nlc#8^^=rDjc=zZMh!&iJ7 z_Xi4zaC;+IT*rZZ%&QofA>xyrn!=eekwdX4m?54;Nfe@pMzuUlH17 zndnBoKT{>ogMH!#J6;>hR!ofJxM2%IoUj7|tR|GE_??ft#oSJ2|MRy1=%Ky>TBK>a zwy#d-f0F9J`h{Hz7imKBG?N!ophVyY-k%2$tk=d~oJtLg<`GT(uubI8F>3S65+fcx{KmaS*tr4D!Vas_M+gdI_p%YCu=|75NpT1PZeFtA$D%ApQ z7%N3%42o4!ixmJ0)8;26dV4i5laoSxO)9A*)nMXqb4j0T&{*fFb9f{hnLPwNV=PtD zbj4I<``#mO`w-?o1L9FvQNfxdC$O2R47`LZ7)z%!3Uxm7*pp#md70MVygKp|b7J`C z#Ux{&Nse>cO5!*dvQHgi#G#?-h$3$Fow0N%3FEsMBc=GfAd;6BYn@+#kQ|?N%Wsdd zT~}$1I~$xzZ0!qmq{sYB_ng0`0Y4}zBLdt+=T zf;8>YU?=9eJe&mh_}cXtjFO8Zg0?dX_iPh*OkOfyXZ$v1)YRqX2N@DmH>MyBJF1u; z>A4B5x3C*-e`=E^mACtHhqH0P<%T=)NU{?q0Ct!!s4q7u@kfMZI5MBCM^T9bKcG}pd5DULW|=BUqynHE1Z+=H--yjmv=aHis!jbFlkg7^E4eT|+4!Dy2ecwK zR-{Yt%2G+GxQe7qWZn@f3Ov7GG4rxNdpVp6Y20=oK3-rm!TM^b0WzBRuAjKw-tmXe z{ci;TPo692csI4loVLB*-f<@iL zYUTu;3CUtUyvN84OY`zfnT;^^Er#_dSu1yw51I=%aZb-*gU_CSTWJyJ=abGJ16af> zRC`i3oImm%r{3p__+x@{cxh`-kJVow%m8R-yhzudgVSxKE21q|I*bvo-jjqwWYFQS zxlZ-Mf*zw~GC+4CD{ZSU4a|Vrr`R{*Vk5Mi*u+UD*M}}r?K89^-@F)VOWBsOPjMyw!c3#foZlF9+ZZkLnBeE*mC~Zw=LGfa?~dQT3q$uZWby&Bl0G%IPfOc>d{RV zCe1K~GToT?9jDkMAHx5DIX(Y7%)vij{66CsLHOf!Yx@NU4kme}D{XOqmf51|NIh$b z3dX{@7K)XRW5!!z>8RJ7fGZGFr?z(Z5OBr=zWz@lnuttjdm^~mUlRrb409_0=;dqU zJsG`7iTmT;v5lA)Q`Le)p##m8fwKBCj7&fubx^;3uK)}c%FY(!z86TZWD!txp5Jd} zt0HX=k9>jH6BIr_GN4lm;SU}D*yUP)r2Blb_i;{fq74!+{36^L3ZrlN45ZRs=!I-A zA(&=`bBZNly6G*qTl<`OC*$-nLMoNH_Y{B%5BuLkIF__J0ysQC@DPkI5EtrQqHc+#IUUooK3?cs$QT=o97lb$vJ|9tIJ8IzJIOg;0tMzqt_C8M) zIyScW`+JAXV@WyIR_0p$KR2k;JVpA&k-wO`DN_f@_`AkbrRflgBL@m|M$NYWP({ZJzSwpjjM*zz(j z!lKc}%*TK|q|r7)+|lvk-PlD+g}CJBA%lBU-r#a{jSv{ZsJ{&uz7YkIcn_qdr++%_ zVorVej0`>$hb-)_6cB(l6LcTFhvrj9+6K72BY)TJRd=Fs#C%m4TO7O%M~&99ceHae zqN(#z@W;ZYswB9N-y^tl=JNwUK-S*{0lC0&K_@p~k0`0vj=8$SFG?UR5OMxi9r_!p z&Xii#>6H;AOrt-z`gp$oA~VJ|EDIa=f3fz~VOcg?+qj5=h=_Da2uOD~NOzYYBHbX} z+#ucE-3>}NDka@0t#l*Z+`qYfo@c-NJHFp>yq|mPf7iiy-E+;XSu^Wg=Q@YsPuQIW zxqv%Vzi;6~W=btr08#P;_a86~5KL2(1wO`>F#RmDs0=da6YtIB+?ECz{(v*Cj9=)}=M4E(n6o4JU1uk4PF}GnBX%EgGgIa3 z$WkOHE&1?pTblV~1wJW_pL{0__I%*Z6AuG{t$y1t0poM*cum9iHcG&4A4ltW80BH_ z`2K5YM*?+JG%R7t|B_ogAqn{ z?P=}FYjWDTQDJT;`sQBdyHAv!Igr92ebH~@#V|zJ{I_=kuy%G+Pe*;O^)HQoV5hLI zn)n-@AEg&fCOe9V`i}&|c1H|7g+eaem%_S(fHu~4>q!J$Q_4i}N%o1%2?J`H5s^!l-UBw(LdH#~^q|xYM zuXptB#ye{{;s0!8@bt(TblYOqhIJ)fT`wjI2F)gFElhY0znEu8n;035A8!&XI-N3@ zFSJlQ?tJqB$Nn9+p#YTBBJ)YIjfUkV8##W!+9CMxP>-BKzq{-Z>@OARSJciLaQ`|q zE%NQiUf!@%rc6e&y?_B7!I&BIox(G*M7fXk3PZ#6(H<*WbN$ z3-qrM%#bbe=Qeux@Rmu47Q&a`gng)6g7tGz0DJ7VQQA>%rCpWwd!;9C@U06=HuhmA z63uxB$^Q$$RQG=ZOkvK1r<<%K__UOiiQSQqWaGh0#YxxIc#~qC=DN|#z%i%em4ASn zB|~x2eeo5Y;8~8{d>n}ziEO9=bjsmGjvi%O%IzbJf+T{p|62dq?qH;p1OS_qI|z_# z4%d3(azXwdhVEgH^>VZ1BOGQ@OS>$C2n^N^Ks%sa=&!*2AU}~pyPhmlC{bsMJ)k!y zhqURiZ@XMOQMV9Oo^(F#A093&t=yBdUvHC%r`3ocGVnd9Xciy4IYZ3{6ftyy;7}ZB zAdhp~$C74md=5SaV~M5gnt$U@aRLA6tN@_PJq>6EVVh=l`&Iz9!#sT<-c;2Ppxv=NrCCaO(POEf>|ALIVwA-&lZ z@?bS~Cev`a+C3{)(M@OyQH8uDbZwh#>mO0XyM*2uQdJ)?>9bKB^zX_=X9d0p1 zzfdT}&&`RNlI}+aV@dGsff*bi8?o)wlC7|yW0CGL@>v|ew%tdv(;h!XH zs`d8H48`?t+el|VJdZqG#~J@Ni(MS?%0rEH%YFalnp4SBvRGi$%r|Y#;9zM}qWl`o zo@N#O9N|v6hf4fd2taZ3+f~C2p6SpuDQn?fFysLRaC=Wd(>p^c5%!-&)=4Pp~&F0k9UkZHt-m zNr;8a7V;X+$a*MuX$jqJB|!meBfjHvWBY&{-8pkv^;UJr7Xob;rrm-kv zYbcKN#bYl&1WqKD{22)VAe9Z~;8p~e$!+i7BO8s!CPM~)itECP`@fK}bUV4bQtG4M$oinnmYQsTVJ0hM!_>vWo?fdRw zUF&(P-?~UU(*N>z?VdYd4u;iAEJ&gGT5ynE1Bskpe)8G`<|kp@WJOQ@d>>gL8z|R} zcc3ofbZjKh&z9ko&j|xXEJYh*20FSbCj#}LwtQDWFJiNrdE~ZT9;^!R)g=3QFRpOW zisgE%dY9ln5XAqr@92ZGw$<%(4Ii&Dx9NxNT^Dhz5&4L(TSm$v@{sUBK0q$-=MDw- zSjI7~;}iQnDsH|{LbM8QVcbF2CSjOTJ^=rXBQ>A_I1`yas%vpH;#fL$b%l$Db5tDBb|q-i@*EuOn=PmOlLxAQbB9E_7#QaA5X|%$kOLD7TG7L zTqAwHPkkRhgW{2kqf-Gm>e-j9jxP{|;2&`J#dH2apg_A{j3HDw8TSX``et}Ca-JaL zQV{lvNAs1iN~W%Osn?er8VS{$c4)J^`JF=7Qh;!!$mBIe`q!i>dH*gfbis^XU-nl( zGoPq2Qf0$}mBJ~okM`dmOZy@L+600buD4Of@~@=?J=%1#U4bZjiB`FC(3dAO;V?J^ z#knaS2I1KLZm0y_{>8>zAWB!(zM!nY2HRTAlwzPU&74SuUDWWwE{uM%6~H^+2P@&@ z`t5`)m|n0W9-d*1wlDN%G2YfHWxKjv=Bu7HY&|0tG3A7TTAXv6>M*#a>v!Q6Dlnfz zR<3c0+M-Y|2%+cUCpUlNoj3*ukLr{^@^*bEV}ady70J*w>%7PU6Dl@Tg_b0FOEum< zGctgXx4OrWRVlb2ODwOGI}+y7hEdB`ZT&|kwSn^L&TcP{&?5be_bxb*tlyw9exe%L!C$phQS^%i|>Y&-DGag|u%jO#-9~#yUSNQnR&M^ZGX3?Dzhe zw}Y@9>;ajVXMjbBmGUp_8Be$w2FEo0kgsQfq1qAuf%`&6eVSKJaH{+d`aJ$G^!X1g zRDGo%FJ`K&8CUlNgq8z5^G9B}%}Y&OopSt{VfnQ5z^d;{w#W9%;7HVqSJ{mX?%?P> z{PK^yU0+JdkE5bDdnpIq&G38SL%AM7wgMm23=C2hKOpxbOzPN?ZfyiZ!Jhsu3U+8f zZg;e;v9ClWeggITrfK;P79IVph5&{|6YM$A!eH*D--Ws3bjfTxhPi>pf$4F1rjpOe zVLU4jTUK-C7VaPDwgtKMt5lKah-AkrI=UkZ{J+!#*be=XMMMKetYOf9w~jg)ps*GcJ6G(h_q5nzdc>Z{{u$YF23YsfwSb(qMvfTf?8ueNmSx` zjK=n(gZ4k{xHxWbe`00c#SxD3>(>IcX7>1wyEDlVd{rrhV<3OZ=6Q)2m);r*!c)GaOx=Z=>?mHEn1Lj#^l z!mkr))?^kL026$jPR)b_njdm8R*Ovb!NDWHisMk1W|~oF7Nn{B!aJ!|7P65rD8?FW zBb9G19nIDTxY^uK`BGR-@CiK~=ry`EDqqV0ftK8C5k#e{(_Pc95VhlP2Fs?$8_^Tx zvz5c|wVRG9w`QH0!k=;vuuv)#?zig-M+)9PYCY%>mqW2b5X|ndv3)HXaKdX&?rIaiad)7>Jc+YJ89rwaSTXC~Vq?3lIUcstobM z@s94O6)8RoSdk>pk?s8Ci#T$z?i@rk`(6*@WRq2>3xFthbzE0Zj<1g(`3dw2!Ba0k z=(EG9DD;|4p&Zt8%3F20KIk4k(MjI89U(XP!Oc!Pg>&=w*$kGXwGJnpCPRafo5n#h z0&gn!5JduT2o@inZxaHH4Th?%#lcXuJLT60aa&qnAjRq*AyVJ*C?Z(KD&ce$8JQ; z3kSf$iNq#702+dVoOY(w&B7kVj(`um=*eb`tRoeopwRDRPJ{!4QXxYqo<=PPRUoO< z+8xMLebX+u;PX=IhgIz!EZLwoT5&wOIJH<`|L6l>15|c+u6A7D$6w;KtMq(~AmrUB z%=D^yy{D2XFL-n8Cj=sr@r*Lk%Y&o&K$TLdC=J@8jcqdS`OSp2kJKBjiE(XMK)u4PPLi4)Kg@0PLhatYCv@j1ffm z1pW&I4h_!^cL>t*Sn&3IS;QLY2Wlb=L^686GxT?HT}0=DE8p(i4iTlcx{ex`FpYtbkB7_ zjYvGQR-#d?>Gl)cZ!Ms}Y1`7^0Sy`KMub_%$( zL2A5UFsptcMH19sg>xrDxQ4%C9nWS_%DF|TS6okP%wE0{gaB`;M}6W5@~S{qaQrm< zYNN--Y2-LDk@kCiSi2RfsgQttnG&e=3ab#HQwVk^uwX#Wi`TqcG_+xjao@jCYd;&G|J=#NR>KJd76*httTB;9ivaP2f7-`3ntv^K4ftQVG~fSPa=S zTu|;^%)AHWqikcy@BPKZHC(Hzsw#HghpTZvBP!9Ze{5vDmlsVIs1!-0L8E`cVeFN# z^w9@WmEr#F9Sh48@X(y42G&qk+a(upe+5V18;_Of_OU?Xp+W?dt>Ola*VBV;Do68M z{&XJJ|3~2-*zLiIf96_-!Kmz;SfyaCs^fQsvox{vW=_^ZRx@zbEU6qkUW^mk-=)I# zeYn&ycYV~Qs9AFE9%iDkoZt2FuVG9HEAqQG@p~lo0(QGlG&2?`=Yu8Uer4NojH5Hn zBr&sL__mGSRL|pe#`ej^h6c&5m2cLWF#6X;kD6-M zd)FL6asiex`!ACVfDLwOJo!C@2^|Hy2Vt->+SDo_A4PCK80C@UYW0knS4c)YhlZt4zyP<*Aqk0e!nTe^`UE~pUc(7{- zi>dVGdlq3a72)s3R5any4YqQRcYA+Dn#E5JU|ScffKd;Cvb3BmlV;Je$JWf3MFY53 z4Lj!+NBUk_?R@#QmEI5{n^m8D$Q8M*1}Pm-bNZ{<^*XHC-*Q^0f6Hl^g>}l|an~>w zMs+{Fc=c=vr~D_f#!qTqSZ=<`pkprKeH4cV0VT@-H6_{FZGifkxq$?rgPL^g?1N=n z9!W@emNZEb1NbMI|H#B9HF7J=&s=woFj9e2xKOOr=;r>m?sUcyOF@0pV#O(t2A9*?873j9sUNj=MWv3?;e=B+^ z=jI`u_+<21O)Lz(4dy0hZTKd-O4Tx#z$x?h%k6K}2p)Nit zkDn6@@VGq8h08WDGRJR&Lh;>_b5`?1X&_Ti!G8c>4s}JHwhwrf2WX*p$6=pSH0@~TObpXg)c#j zMk?%A2jlq8929Ij!w|m6-$nRflbNX@nw)y{OZRT_TJGT_D{GTd`lUob(39+4SkUCu z0v%q6myl3f7$vC9QWe4;#Wea5)R0djjMXJlqBQs2G*bCRxn|UxNT52v9sfoN^TbqC z6s<=(t+z67DTcc1y}SiY1bdG;)Qf3hc;Jl3$S*ZpF_X6-W2hUJL< ztoh9I?4r3X8K=1NCGhbX?je5(C0+srMoekI7_C`?DKI_)3XFM*V=x8APf%QP9v1;- zNrxy|oWL<8^wNl_9FoGPnFYpr^C3@>CyyZQaHFlfO+is2SF9wTBySGHw=7`X)Afv; zUpjknw?NO(KL&WUau=AMp)b%gq>ej==^6eMLd3R*BY%Nut3G zCAXw@f>E~H)~@F9CL|#A0e6icJbKs!u}xl^F>I3esLg+A%xwZ$N@XhWEu$eYSxROg zOBpLH1qHH{_n-z*8{aQFNk(6==scCClq}`cJl@;OX9$i<#$%W>vb$~w&^9BAZmWIGT(s5D3sMg$jsqHkveV>WxRAWta z{e47ob^iN)YrV)p?b!=MzdqmfqD!vt62=l-mVVTD@Cpab&J=<`j$16aAn7>>2HwVQ#_JrK02l8EGr9W z3F%M$R!^_ZR6R}|K)0E(BdYhj0UYr+OJFlY%-`IttXIrYt3d7mso z1hK#!PxprfQvJZNi{~XKB_V~kc<{q{je}j}yJ|dn(%_VM} z@TEmokOUvVm;6;53)zgnO|}kvKyTZ^+=O%_qe}TEq#)^QW}<(MfJuSd2vnB2l9SO_ z6xOT&nrx_RWBuy`JeI@TbDa0VMVW)PCrSKHJGj=(yUyxQzI|BUJP}ZV6p=9Vdt&Gt zRCbl9nFU1%G%pYa&(`n%Jh^!wB^AlObu}h)sgW4C})a-at8>k)EXGZ$u}8FNj^!0y!G*K;Zqny6Ot=2-YP zR`a2^U#G$X1&EP1M;a}*GmhKqjQMIVaLEH;Esq5;$^tnGf#qP6PfbqhU7z9 zw`&&(qzc^NffBxXT&Apx#Tk{!DK2^wF7n~Wk_#@@O2 z(Ms2Z`D)b;MB`|UKwGke-{NXYBKE}MXpO?2`f+i|>jt7VV1LUI?dKvt&m1LDnvXtT ziH*#aNo2+Ua52OiYC9?hzvXGvs6@6%;PXy&vZCtn!Nhqghi51<0~awKUUC~^`CK?a zEiT&_JO!{B<&j*i9oK&V@EiWqC!mn^1Oo%(Cu`d#3PPL{uk+sVJkl)I&E<&&nyQ!? zJx!l^^r88)2=%F@G_S&?J?c^VQv-MtTt%(>xv{L(wsDGhR8&;&o5UsTZT$N5v`>~Z z%xMd>1yO6w`2($(@7`3>YdRS}-lH~gu?$rzQqz7vcgg(_8M&Om>3k7VzCN~#)=)TS z{H^iexv3GGWEu}U(sK8FBr;XQ7M~_G!BO@AE;`IMo6Fr^c3R zCLYJFAfSw;_u20r-{l7Jhc};ipAG9ME2BSA0m3~Yp`or4`a(`EM+JL?!TE@#Z%r4` z$SJ*oW8fu5orQ1)JS>u&u`$;q$*+l%nG#8gRu3Z5vP=&}$@ZufzC6KvV2*_;*G(`A z4U>7oc4r|PgH5fQU<~$z37&8<_r((y39H7N66KNul2Xz$>!S;C{WgSgA%$5avGWKDTv`ipU z@RWoe#XTK-7yTU~?%P>XLKB9|JadM%5)5`i=qFh-vt-5{f=FtWB{Uoyi+P%I;V!Az zC3vqvZpEmgJ;|r+=5%{4M^1|e@}gp5*=1!<&lZUzKN$49S^1*y?eB;LNV>bTVPQAbH&)HUKSGaAXFEjvAJ zpU??LLQ**QLT2ngyE0#Y*&dw~_4B<=MRVC3rCJL!YxlA@k&n=QjruHk=SdDE#ZW@* zP?vXc+Qgj1k$4=L=_TI%03 ziID?86X#ZL^dV|0r{C0Ik2?3*LQcNCW`kua>DDC7Z8!MkU&of4VcZif3|7erShLGa zrOR51#P5&{B=p*(6H^0(U?QvnIJ$X~50|F&hxUCRj!)>%e{A1OnTEDC?-d5fLx2n@mHd~*;cSo31^RqgdqjP{9=D8;@<#i@`umC zr)V9(q{$J0G`TX>CQO?AI|M{ae3>99lPm}o*+d9_7~-a7uVs!sRr%#Z6n{I{s)!<9`ek@u}m$Z<%(%6!CF^BED319!wGc9TdcPKRxuL zHb{~g9%D&IB~|BQZmS!TS-ni~-`T7;7;T2){r96GjD&u+L)R4m+3zX=QCL;p#n&I# zGR=)aF(R|Dz<|JloER3GX8vwy`tZ4=Dyp`WLVqn$Lr&K7HV+gAyonC6xTiD5Lhqty zG|4W$fI#N*cLNz!su%jNof`Lp2VKv<5AI@DQCC-f#)5@k|LxrQqmyIraEI-4V7}?K z(55%U$ZSE^=txv)1jo!I)$6iYGh3fwj_tM0Xd|*0b zs$74UG>ek_SAF z7S2dL-aJ!L9sQUulehscv{7wwu~j={WgQiV$6$mz@<0EAZZ=)u+e=PFP+aN zvfwcf)fn^}52e~UM|zIMplF8K_n$vH;&`2{hgp~_r(VG)GU{R*-Me?s^mW2ZZxei` zMe5<>^}*7?y2FCHdB*-7=-k)z`bs{#l?Ncb)<4^ovf7vpR}HlnF%xJYr=kj4Xoo2H z`1lA=-5k*#{+P$X6ts1yx1Iz4PdQbAfkhaK^XVS?q&tT!i4wn#d2%eT&{Ava@aE2k zE4JccFQwWUxhkbeVpj8)a;*DLP;j%!*aa?G?kf9q23dX4;im0L- z7WRQYLXv~q-F1h}2@(8ptIDF6;kqp2Us0+|HDy>Wq~OIjB=eiRhP3TTtMTKB8E}s2Gx}O3C=ftB<1|upVaFe`3p7 zW>BhTY*$cl)LI#NpI|%{gAg_nE$w)dR=*}X!&Pq6reds(CjsAGR6GsE%S~65f%T=1 zrCqJtlomts_%-jmzC~x^@S-wiyJQ?GNnQW_)SJwHnxpCa;u}yfwp~3P_t4c;3j$s? z!jtRY-{@YYWGgyxQmSTQhu=&*rLf5i7&IK5c|cfPTa|eb0UOEX)cv?=_3lw`W^?mX zNVh5F#26U&!tw!|c;=bDd+4p>@yeHV=-> zAx~LHaJ?M%z1#o$_nz&yyxDK)snHvKUef#Ort^cka(=i-G^vE@D`4{~Wi*+TY=4^; z#lM->bnI7$vu^vXSqJ1+*P9GGtkXg#5oC-1=+i4$yQWUnG)%kFswaL%wmfa*X6TZ^ z3}1R4R?u+Q7+R>QCZJ&IcRLJb+d~#iyqJ-BQarCvj-k9j-tJ?!%X4<4-qfU~FzNz5 z`oXg2uoV@*b;)fr7+hnkwdheeP{$I15T>M34P_PQ%}#(>pOukUY1pWGoe-(Z>JKM^ z=OK17+y-;S`{oz7^N3A$0!hY?cx7=E`1Aa3nTs5u?>^%*t2J?j&>EjYLey4|{Qj}P z&i}E%Hjje@YaLgb9FAr9WDX0DE%yAlfLgZd1hzMR5vnEI>n-e*W8G$+N9ioHeO&lV z;&Owx!??SCS}7Q2%Qf*Aq2nS}0o*a$Qk_@Ci0;EIZ5QL60_ zC9i+_G+;nPt6kVPt`86~(>10kDvSSpEtGVtx1}JntbVhNKgcLjs-tw>up;%``mTe9 z*&FMAsakn6w?Iu+p9@^0DSzt;qCHX@cdn|zUiV<_Xk$SpT5Zm6l*%i|)ZDPtmB=ks^31HtzbS(86K&TJ zqFM^Ac;I~x>;1ULsY15#19i&4nNdt=L)TS|Hd@}=la?PL#HfLBHS8NXsJKT?A(s}j z_X4d@&GdwDv0%RB(_IYg!~>U>iuk#<(=g)*uFu#(^T28G6=V-ObFty2$ zrD~gpSqPENG%_BhL;Ur1PE0#wj42!fXp=K-o}BZI_K7i+3gZ3`R|{p6#{#9-rKYo4 zmWU~$OiR%`Z=G-?TkIH;Zqw~zB6gadTU*W);HOjwZK}90RK^2~Wv{h^p4z(|Zq$BZ zQBNz1qk8#>z4|L1<8zG7vwguQU$Scx`YrkAsy`WjB2dYZ#t+HzjMiF=DY<}v(JJXX zvNJZ;c76H78oMwLYR_tM#1hZ`Mm|qXy(nNh`6e@OP zyVDmru-ZZ!mHTRJRq2$7UrH^QE9Hn|zTm1Yne>lEH5CHa!USAgjvIzacoxQEi|NVE zpR9*Fq&q%_)6l1OvUQ@UyUFByRh>TB!?&&pPAE{1$K*p9t=`>{(9i&sZ5gQ;av<20 zFkWR;YIbpWfyr5MB)>jYKC7DF=geli^ngZ@#HJ`ANf3e-j?W%_dAuo1txR4B3Zzvk zU;Ba^;;@egIVZ)m6#5>kX?_6*mYPDW8$x>=Nm0xyv2pqFTYAn;IXNwzc>T_hoOXii zPIE)xxaH!>XwIuCZA24B4aBqas10cPh~Rua7H*(q4ZZeHlgnFd^;DQu*FocB>7p&H zb8al@nWE!TuMnD@J7>ki9CqoL%E_4>;B}Dm69=FHg z?>~yVm6NLa+SqM}kfiWm78Ol#qEsuNWU{_24e~!CYPUjf&)$cFB<6F;m!aq1?!)^= zNu14P@0M07A~=aKLe<_hSdUH#4-!_Iegp>$1)olElj%5PJDZV3r&u=K6S%uX0v=%q zk2Z#A@%7}S-uV)bCo#vG9V}txJS&vNlLL}uRUBKOfDtek-HsO_)l}+bk+DA|BHwM6 zR&Lq4LUXgSEUe?L=sSlZkjQ70LLQ?wB9p&vKlV$w^$c(-Z>8BYABd4oNvYaabQCC# zfmtn<{ZSu=+sS8iOR-*>^=it>&4u#y;~)G=*PWwcBqZ$HxonIQ7@OemvpI5_%4V>k zNArpqi+u;(ZB)YSR(iY@Y{AATf}{u%nsEDUB}u~$IWlu)}+H zVc?udY{&98<0#~$fp(Wi(VE-Q8j4!^B|rTuu2}t^#{?WiLQ8w|SH%vS`hZ#KCY>-K z7$-*R-FbPUJ(k7|sd60Z`LudzXd8HtENE0&oLc_J6 z`i*@=9UUsVGskh}!;tbWhmwaPs{O#friA!~eop-w_OR*=;eG2zLn}Y#6OV^GmsK~L zk1?Oq#A+d4229L2k)j~?HgjfORDQtmmBuIuSV9W+{Ph})jgKPq^-y#X+85-tFwZd6qc+#OE>*^ z58ZL>$-@%8Q^>=(JYVuV=)U_P(=u!@{$;4SW1?s)5)ULC;#$5DhT}NJD<%5(iUuMw zyrk;6zJK+}MaQ03d zCqj6vH|=sTAu`)v-d^n$ouq|IB|0kjgKBmvmHlG`@Z6bj!Nf_LS$Z8)6P3DOnXUu4 zzLsl40A-THXee#eOa*#042k2gdP7Bm=KYKgmDkBqPAmlT{p)tKaSnVoQ_S*Ljo$d2 z*4wtzQ(+=bvLvJGWS7^w3{ek+=WAP*s&64UUEwF0F9VeGS>Ya*Yfr<*pXUzFy1Ns1 z6uDzSgZz9vqwSNdj1x{J42qZX85A7Pmj&BRqx1S(>*5vi(DWYPSueLxFq`=fK@tLvqA zoD@y1paco|GIc#GGJ~`pn;#)ZD2|Et>D9{Spy$5D->wT-P8JbAkMg^F2OTw!?|hyp zR`I`kw}X5M{?uDEui5K{Lap)^UMw6xK88Y0w%%qj|7)f0cnH?fg)_QxuI20{2(8M+ zk1u9*NYIq3IT#v6OV`pdIHwHkq@*+HeFSU{F-m^ztCOvJ?a1#85M1A}HEQNhU``G* zGc{PutEGH%7tEI?)@VTKSwdpVX>`?QiuStM;_o_^5w&qacHS=beayU=`}}Q~YNq-P z@)*wZSQ$aJMQbg$;DW$MO;N4_45hA>*T!$ilM4jvou!?&17_K0`&?svnq8KcWH9O7 z4EHF>ko!g7=Dfou<*~=5eA9$@ZT15Q@FydY_tj?adtyjVIA49SFgbYo}LQ4bWE-OV#_r;ARv8veoA7D~-3nj4~PlrOxh zq6IfUzg;n!3IF)P@Uy%Q{a2|z{#5pRqCi0t>6nOHTC~uf(T1w)uWg3M5q~5I4gMQD zw`Hf1#GP!bj%gXhk3Z`dwG?YiM~lQl128G&E#1zUl8lGaWEgeZR=zWI0K#}mgO1bN zx9_dkJNLEgt!@1_cwuNz^ZKCg<%oB)(9(SB@O1eF3w^q)_?#r=t*K#uq9!rt3DVck zU`c6h^*me`TvqrQvgaE8oJ*$7EN#+EF;i?@VKk?>Q0%gjz&M*vEapRg*_o@UJQ1$~ z06Pl6h2YifOUQ7f0$zqcB79aED6XSIYR8e&MNcslC`S5?G5hrof%dOn^0e{CVOjFezi|}!+RmxAl4zAyYC>A*Z&ryIU4x6 zdBv1S1`r?-Q!j+;gNP6%|KfWLh6Llt72Qkmb%3fO^t$Vdo@m~gT(CE=Tr9=z2x}`EV22*-XasX7R9K< z>u?jxdZt)re`TcS+g8@U^N#x=MK*vAMh2U4s1!0@jZtr(l1c;+O`CDv4}2@I)XDhN zu~{};$Rb6-bA5XGt3!Ob?APiS67R^Fi@qfm_X;}FWx|EPPH7LxKhwN78J5Qs^wNC% z9*=cRd8#Z~q>*mc-Fp`fcJ3{BCL#{hb(TeJ$v+!4*)DTT)R=i9V68XmaL}O=^SQjK z6_w<|v<1_-%zo^fnm97ppR zefx2?*#6{Qt5s=U*Gl89#Cz7Aj`sVbn>WLvYc6dx*GWM>qF30#bNQn)(`Or#lhvx7JLg(IJZ6@mSj=huO0Trn(cjwpal+CpTdv*Z zsKEb$;L9q*A8H+Z`vxdU`*D#3AN`jG>+D{<6p(e>Tan|bhtj7nSm2$h6~zWs;e7b% zDCl~gV0_v>^gY4!@>OYeB93{@5-pc4g{4zP*ZTd-RY@ zKl-Jc2|UpZ-s7}#K6>l`SDBuCWnJF0cka+V_}$khK~9xkqEadn`5()espQr+FjISK zKP-tA1YKX)pUU&2c*%rP%&pmMNhsvZJzr8xo^juM#?-RD)xl$tF#7pi zJntjgr4jo5!EI-@vKs~-$Db9o51*GJ1YzkyRrTQ2USgQNuXgXJcDp^MFHdrf-8+<~ zzlYu2j!IAW?&Gm>)dn&p5tD|zXi8IY00KKnljjY!Q6a0(-V0rC zxHna*A6!4K&cpduHx+vhQSD=@Px4OcNQ5_ezIb`aMT}fjPS(0&5?6Ov*Y-ZB3N=lU z|1=~=@8nvn_dN;GWm>K~-HqAJkZZBwBwLE=WR2v6XnBgExfZ#3j=CHR2+QRrO3Vws zAIw&tu-)+fV3}{$Lju~uYLki?_pa*avY2(wg6uvGer-{JJCKv`79#7TB}z?Ox9R^r zH7lal{vy#MZZ}jkj>#-c5nK`TjL-60IUbxciB1nzq3pv~;d<1r+kF_XF(WBn#Vy;D zn>A*1Y(qyf3~s-PAl7SR_b56|wFDPoA?gKxa(I>*!>3nSEb)}w^lFQ?#&y~VZKQkj z2JDD|(yB;VW3t?(EsV;Lauac_3^seVa@B%J&_g zDVOfKv@t~SaA{h5>vmiG^T!tqR){zh4GDK@jsi&Rwjxd_%{Gr|`J!qx;f?#^qj(-M zN~0gJ-8ol-YQCy#4x{h({nA;YVRf}t%9g+A70oDcnwsz$`wGvV0@L^X12jTk-|r5u z*mg0o7&23GABto7jenOsC`jXbn^@)M!|;h}f5JL~g)4hq;4+3R=Brt9N2*+QYSr(ySlp!3?LH)~j*In=JW$|)kWKVa{EjK^C zAA9(%QWDolmcTBEBQUJ@E^8OY8~qqe=8inp&v2p=2%P&%o7h^g`^)iJ2&8z$S^yx1DMy_ehh zQjO(a#LAlY9BPQzzCYeWsga2!ZZm#=j9~TV>{x=YJ@FpNGwi3?&FK~u=xeujFW!F< zUR6G9U?Y4km0kWR0Cohw8w{>nT=J%JeoS+_**xZ<&Z21plZs1r?A0kThwjq(z zjK=JbHz1QE90e}_te73RaZ;S?RA^t%vD3{zxk2$(&ZQ0#+O_5bTm`m4}5)Jl0_b7ef&H+ZfmHN!g-qXo%hqnH`j-K zPlKNA=*OuBvpv_F|Y#XpQ8Wpbh_MJ#u81>w%tqGc&bU}WM* zWH+5HU$42E0hTsl3+5DF>^Eicf7y~W#aKncbyEBMPc5G=`4+!Z%AXZ$W(AaE_E3|$ zNHO^Dk-Fz!pUu>+MR0xp^5rfO!o1550%XM}1DS0W3H%dWN@+|V+fy|%#&qFP#~KaJ z#H?bOBZ(qj%%IZB;&Fx8A17tJ+$WrO4-B|z6Ipp#qIKtHiaPS!*>NWA&Ei!23pR^R zG}?9j8r3M9~EM>8#n>8LN=;kH0ZpX53=x;NwG9)~+(edjLD#`{f%sDHzo)>@mXcg_bbexc= z#@D+ncfcQqlJb*B3`ig=+8KRVe3*TZ!I|y?{XT z*v!Tr{Fn(d_z_R9Q|MrEH&~i|b=G0;Zt#Zpce{`WjZym-@S2VC2#X5?CW=h};iygd z@hKf=qU}^`4gFd&bEHhe>%*l{<)cUXmmKpn^6|#PD@Q&1_KFxXoh`b&YsMJYLhgyh zUXMy$=eKRLzF$>dz(0)#c)4Uy8ckxNCI58M5{vDFk55SFimC5pAk@#uxbun!%rB z5Y(U;leVy3KE!al*)s5k$52!);a=Z7o`G^#o~%1!6S3ts@9Mwi@4tJVH>pP0^pTYQ zqo|Y?6O~$)gQkq#W(!gMoxCApccB}uphx$z^uW}PoNIa zobU4sK|18+_divUTkLD0yp2!>Iei7<#>oX#`YM(D{cvVzm*y%GVwCFuB#5fb>C^pf zz8B`9q*%(MU-=>=cRpSrZwh((edK9&q4T*2cokC+CbruCI1j*jYWKUtN1zu^CFAh# zs{2{0kT#&~HZsX%~^b=2!M#mtj#?OFvf)g1IH)g)?pWOSxWn{(fE;T2Bef|CU z&Fu-kUiLj?c}=^FIY=PFhf=OQ*+HSpd{67<3*vdt)J6*D8Gc0Xm8&&wD&2$k*A`BU zZ%4^W!ewLWW~)Uc4XVk6tM$Xlb2RJ#W4!OL%5DH_Y&n9#w?N+218 z43v^j{Ir4dygwZA5`uq7r<)AN_F%ob42#X{&nvm zE8x2}Oc)e@l0?16(2Vw|F1*|C#h?S~@j=E*SO@Z^X?b_i`NFL-+I#=ms4J$`8Hgo+ zX^;c&1O00BI%+lFikMAXMW1@L8*_h*uo?f)W2@Uw@-b~|S@vBt+E;n?j(g;1^Dr;x zL~ZjTdr4A#C>doJ!R3U|;eCLfDT)Im4rWw!nO}26F2dO@%x&(u# zrX0?mq-@r=Rhc4?hNl$$M_?xMfTwFIG|e)!07nbi;~>wv5-ao#Zjt|%!aOyBBQKp9 z@4`<$#!}+OE-U=cu(ew~!u85Z&*o&F{4@){zvtD=oSX%tdQ}lr9JR{ARTPe68KIk0 zv=MP1`Df}RsDRlp-Oci2t_BbzuJ~ICr4Jh>n8osLMluI7tsuOguX5BryY+SS9U@X+wY?Dg43=r+(esN ze$T&5&OY90{Oygk>R)lMbJ{?3jg4#B%{NQ^6zSg@w&buEVH8_t!{LSQI4Fr2M*}JZ zm1e;r?1rL(SGhs@m+#*N>q(;MM)*zkCrSPQWQAbfjl!U1E5Tmvv2% zWk_v?MoiT7j!uR0tM)rXH3AsLxFzXDsQFW|{KqqNbq!UFAsNov%>vQ%I8) zAWoI)tA3Zr`(9D*Xmp}dKrxGU;uc$!Q_TUpKlkSM9PmlW^!=OsD@OL^DV>Qfri_eZ zvI^SPyJF-+Jk9KLLovIIGI+i8N-F8SR!rMUnh>jp*|F ztzCAcW4(fZMUePGw>B|rL%l@_LY~=ZL@@t*NFH19|*mbkx zpn_jOqwsxc<{hUh!*l~**pKBMp6;GJ@~&*!?mVN?E`gNHFucNT_Hyry2OH-Q9AAh_ zFChNy-^^Njq%-z=JB#E;Iy^@}Ids^3`KYUkNzUhGVuW5K*4}1oQ#7Rg`hVl8Zr6P!WX2}5R275K2(%W(%UxWp$ zLQ5hS9zNpfusB(cP%8k_c`*X=Nh}U`sE6JZkwwtl7s0cjcQBkay+TbJ12k&WANzd>H=;PTDl5UAYzs7ehY?F(% z5A(LS5B8yGA6Oh<$8IS-nA+B~SHTR4O!YNr`hZU^8ErJw0~{tYOOP2k4Z|oN4gt=U z0c-W}|GG>v_^Bx_g%HT*n{t!q3@St|w`#$=NCU21+sj!yNl2#12M01b_vOGz_$0s) zK_NCY_#3JH9^Pk11HF6XEgR%Ks6dJ8>%(L*tcnDI@ZjuO&-7npF56a`rfbGrD*hy8 z=g&Fg3cRG9s!aitxxsEWno+OEezSk+IffW|H?Js0QmX6%hu7+qBq5EifXU~jttnaF zX5H(FyYWwq!5t$?tm!eC^1J{G@~ff?jbJWX31R+~4+i7r5~2Fva@ zR+U@|5gdAoiX{6(=^o4H3&3^Bp1bxP`wP78RtY-no(($p8111D*gP|ogw+z0n~Z(y z`p510;1imIeh+H}|H1jHR*09#OsQYR3ja5Cx1`7eD4eSTTH{$nb8)sAC8u#?K1FH1 z;IX4mp4%S$Ky_IfIbYi{8kBw%h&D~)PbL0?bpht-BZ4F+@JsT|1&5|SBdc3B{K*7OH3mJWBU?a;jnhhSIYZ|OR zW%tEwS}O+$P&AtateF<)}{M zx1-HSNZPD6ZfJGirU?%ILWNrLbu7np`38}4S4}+_k+{%fVv#5ij;6ak@G|~n-FJ8j zEoXWd-WxS7A`#FQwxG--cTZI<%e5O;aFt4yR#t1@ZpMVShV*2W_D-}Wfm!4-XuJWn zgB$R^zS%S3kME^gqOjSUnm!6fQ!b-TiihHFd`qXZ?~oynUcC;f@n}zGXJE?;GEj{I zVUI>uE3+~;e2my3VX$@)?x}a_R#yi|vJZ}SKhnwYZA(7SbBg|Qeb4RoJdN-AAN`|6 zxq8%gM&9R?Nc`;-L!v?CV~9sP>oSu=eJb>x*!+AGE!$k@PTGz~+go$@_=P|=;d#BN z*xLI9HH2Y+tFw;4Wix?UkjZ`S4<*IM4!T|D#7dDeB~|K?Mega3gLHq&6#gh7%%iOW zX0xe3_onTt4}7aq*56dArUT6sy`C|1{G=<32;C>9o}vf)e+}5A}UQw;(3S2fROibklYCttW}NF=K`XAH#OcPdtcdVcLRe7 z8T;9H!##PAp6#dXp%Y=s_v1Mxr0?s1nerlC3AZ%kZ{*1oG;O)BT1n*Yq1);#B4Ggq zX`YUQyNP0(ErVBs((dVQ-S}8ewAU=|Jn)A^=~LYE(9kDco%R?16nZY@-b&vRANomT zMRXDyJ+a4fraVG@@}=ZA3I7_>ZyY-~Cd~Wau&(1k@%VjfkV)TlhRQqf>6AE4%*7>+ zwLRQL_RPxbR|*_AC zsr)9S6C5K@5g3BqI3P_|y+XZ4ADQZ}(2W=nGD3&oS(wLz!N_?_6nIfe%#C@|Z66?N zCBFi4R4Sr*4TQ0)&{xBa9{WD*?7g6Um9=&|SqKyw%%!mr;NaPNXOK!CQubCL0G}?} zbZ7lTCJoX(kIBYlK&x453+fq0Pw{ed$HVyI9MGtzTO_2zMAi{jStk`u{V!(eT$gR*K7ZB<+$Zw zTFQ+5qzw=};6fD{;*}qd+@6DrI9MiynO{v=HnyR9&r~s8B3nJfk>K~?^z?Ly>inSh zDYx32;h$emTo!p2g)SD1ltJWYGd+(Av4RP#jjq6ucO;CLLz{xV<142Te_XEdf0484 zIgP{#1Uy0}MP$VlL_9Um#A9$~$QvagET$=UC1#41sUkzf|7Mvw5W^nC1DbEsCk3fg{o8V?gVb~7MpvfdIYREEi;;Y-ox#J1# z0&ZXMD|!LbWh(EcrE!CX`(rZUA8Jx@J2#cwfbfXI-129x$XzX}kdZ{7v<_3E| z|9Qoj->?a-mSD4?b8=hLTZ}B(^%x4Z1!%eT^#)6U%rST=YYI{OC?RAcIXZ zm`p#Sb!E+G^ZsrL%@*@?!W<68 zQ8|8^;yIq|&vkjrA}AD8wAg!5^KZTk6`tX2HRN^zUy?#^R*!y#;w!iIwh5_rLCPuo zE}zUd5)X^vA3Eh}JvDcIL8FJ3dushmN5t8`p`6=LEM;l15bwb`B4G(EbAc4H50E7X zT&-5pTz8T%vZyq*jq3-M?&msfTBvLSYKT8WWjRZi&afs**~3@T+uJsO}$R>$@P ze>Nf}er@c9G$_13LHgMwf-V0D;G4gwJN2QjxDA=eGffB!R>3;bE`3_r%!;yS^P^J&?Q5}124XJ#)gN2YJiA#N@ zjwgB(oRb{1+McdYUg)JUxazSrf^TXXA-k=RG|Udc_4FCj5XFByZj2u`QAFY-6GbJ$ z;*tyQ?$TYko3c5sQNk)i9`$9e;fB(J6qGEUUpc1AaU*4?PrqB`vv~5bG>;YX+Fb`< zNWn1}ZN9iXB=#a4NuY$rdQcckeFR?YDXmX97a2hO`bdFu$*_&?NjpfU_Fz(whsWpax@IhFgPerVvGC6LwrF`#sn24Vogy#{rXv4N>?*iiaWBVJXg ztcXvEmSqk<&4L$`dCKkiwAyf85UD)QsdMz>g#sG2_7QR&IUCsG$A~nuN9Y?Gj!DfN zOXzapLz|u^)XH&>>BZcQe>#$-8Oy;imA)n&U0d6JFpvc*#L&E>x>DQ`=U;MMh#`)* z^b6^vR?H<^u5To+EVFzFofAKyN33-hQ{G@hvVPp$ZmvWw5wwm{C+dPw^;fi=JP$GP ziURe&NbV2P)bqAve{SIQoEV$Z!Dk_eg?d_?z)_-DTb9gB$m+(nPWH>2rv*!=A@QC5M!L*V40pYRXQ~urbX&dwjL)_xy0YhD z-RPD+DE#jX9?JM-5~+=HU3=?Eb!jIr9cJHs$K_h&>QTw zc8<1QP)M~b@Jkmi()Z{)t1csv3;K+p5)mm<{TfG8WRjOa`N^V?x}#eDYppgTG&+Np zmSo%!={xlcv$-07L#f8<__saWsQ4#hf3ofj5SUZ)3ucO5a7=958TQky@qUXdN~PaM96SG{po+qnNKl z%Fluse@S;IL$6ndUIUp|R*zQ`7zeK&RM@`{O!o6C>%Yu#V|OCMZN9^H4VTezlCpcs z{MO;LpRLT2=bvNlc1955Li@!jDmu5l0c^}kLsCoYn6fZt&AyQ})(1X}8_QRU46 zO>>&KE^+CllY*mks@RBT?$}NOb1CB$L4nA^p=RBToW@wGAS2_M+1a=Mu7fLge|q`3 znD=Br_?pthv4&?jm7 zA+cj1IITw=Qe^K}zt&dN0=%hRU)VaJv4mW-`IIn9(Bz#x^TD8y^jVsVeBxP?jJ*FW z;h*n)w@srv%fyk@1$!kmC3?hwq5WbDju0X2`{$i>ymFc@+3CmMpEjHFgCSm2Gj^?a z`#l$q2(9>Krl}FLT@_D~k-SGa-=6-;Xd}>N1KG19^J*HlLAA8tn&_J2eX(N-s*$HC zyzYw(`c#C@f_4wL4ut~|Ek$G(nPUM|vE&8iy_MA$KB#BSbl%s>E#Indqvs}Bz9wCAZaCp=$4Rtk7q4`(uO6m;W zbJXmq(in6Z&JMCRydWU|)x)}hhmcmPBaRg0ro6|9H$n6xTy6->R&mX#0tAhc{qz!M z^@;HB%~*?nQQ2ID0y?#1Y-LQM;%Q^l(*20gKEHJky-@maNQxv^ZJc~`fJ-Z ze2K@Qdquo2wAo}aWd@)vv`aU+Dp2W!G~!fMyO!R>%1K&>AVozPZsC)R22%Y9-ozG2QVMUPhMq z43pF8>RrK}JKkWo8Wz>qrX-3VS**V)bvYgIl@Bo>H#{0~<$Dmf-jUR{8Pui4jtwHV zt=?aZqt$`exRT*|ku>R#5Rn@YEAWT|QF?EqHj0Wc&;PYIwW#uc_Jot8{SAa~!bu_p zsQeErUTcex{|AxtCjg0zA2F8~mf4_a{;=#n81Wbp(d65|F=q{9y0&-ppXp+F{yVku z_&M8U{$>~>gfcl7mLf)hsC9h9uC^qYOGrT>CZ6bN&WWLx>Sq>B&;{=+>|d|J2_jPe zq2-E)Z__`?VmGIp;mZFZc>jk+K3ZnAXcs4q6b=q9zSL9{YqY9iy^^TyGsyFyl@tLf z({H@2g5aLcw4>it<&KD|x=iuWc|J*w)J5(yb-v|TLaIx_-=jq&oPQ+75yGNiQ90A~fhteM+P z{->fyl(#G>$Z=2NO;iIDrkJZJ0~qBKlxh8sXQs~~+(_ljq^{m_L(ewCiSa&Ajx~&c ze>VMbIZoetk=`sEaus9QE+0v_*6#L#MU03 zgo`yvF2777Yu@S59o;Y>4+whhbBu35gyF4H3k!~0?wg0XcMIQ;^=P9zy#~%Z_B})k-cZ~hAVo$_?%QtzBHTCl!k;>K5SconkIo5l_>tHMoKpw&( z*qSf}3`lwVwax*2$oWmZb)DafuUAqV6HDbU`;=Qg(=T{&{~d*jO%FXg#5A(O2HjKk zdyz6%j*AI~P-R_kA^Ez0Bs{iGsN2k4grVr0@Y6%zXxeyeo9a}fcAr6$_ZnpF`^pKS zTA)lBk7j-mU1^VIzbv*icQ`bt3<%o0begA*d>R8cXOTS?iOjU{_Z-NAlz8WlZ))Ji zgI4<#5o~Za%?T+oV}L1Nlpz(*Abb<5^a_(ghQa&Ys1nA09%FUpx5h~RQIkq>m>OEK ziS%NqRnRL_HJm;g!Agnpt-B(>VgEb}j7-ynLk;p~eNLpFairDTjOWwtukJqH&0(vr z>1IO88N6LDtX{p*Y4(t^B)@=Hi>%#NW-}P;zAPdpCiT@fealz#*J(q`+8_@sUSa2iuZtItXx1tqx<#=giFDneWCN2y%36N|ZT=6{|oz%WvrdaNK zY)2E;Wo_e)s4vKU8)gn6i^V^zyM%o3Bqumz)euZDVN2>j+po%3d~&@nzY)}8x8~~= z*2?xavY$;rbka~jvDt0MtyQ8e5`^-{ySFEpK>cGeoLs9Rbq5X+r}_g`ZWeW*#YxZ0 zm9Tu$yOyCxapQILP=(BGkYlYJIpG=wkWk142-U|HJtKB1qR`ixh^Fm1HpDY^bcMcg z$QcC6^ri5OQo7Ow)X@IsH>#7|Xi~zfDK5rjn#0rcKoh}K>iSvheax$odti>cn9_=Z z_%4YTc4Z45Gy=kPX7iDz329zxWaj;7n)}ToMO)_w77latF>i1t<(SBt-c*_oyh7h| z9wu&5(7Ujcy#(J8Bl5L;zSaoB_h$K9B2Y{q-<5U*COeP6^fJ`M>tq3@Qqn((>k<)#^bZgBo@5{eX>mVQyk#=Hp zznF|8kLNZi0HQ;T^MGQ{x3=j9({uWyymgNTv26$hxp6gD&5-Rxef`}8!wN;B$#^HR z4ZzaSRD9xkQ<;UnP_MrE>ES!=@T?1-A{P7`Yj{!_5xH%11xutrG|g&|uo6_pfX8`- zJ6m?AcQr*_gc7a9GoApBA7geKaANxb$Ag9HX2Vi;n%0oAIbt|$3KfYZ&2 zVEm~$mjIoVyD!5@B_r(#<&MtvOlf%)ut4M4o2PtKyzD?B{i_aTMxr348?%Rp~X+;2d_ZTjvgHHc`Vm`TxHQ zV6Y&$+Gob{y{q5NNvXWVrc@Ae0fhD9v_>QTcLNOX-}otd`Df}6-*^@j@E!gAzCMWi z@3r}2a8$s6lR)jvMo(vdXgjmRxD)cZk-L4!ViRm`mqlJbckSo0`%C?&JUqRd7GD{x>72X( zr_=kG;O@TkkxwM;7rwe{h?Db&$XGAAp8scl7jse3-y$d=@S_NUbq1k~h+nhLVHeXT zJ-f8m`;|KXiE2!kMCv5uk^>wSV-hh`s}`e?D#z2P%)0cLv!`aRMH$`Dk%7c6Y9Lsr z=Aeme=7VtAm5UT6?t-S2#HNNMb}a9`{!5ztKoDx!#g*$y7rPjG5ll^#;wYeJAF7e2 z;I9-SBRHtxiNxD!YVuc;bOQROiJqF4q)M3p>oo>pO%~=VPBUgkEdgdTU|-gK;6tCd zM7YUN_*5k8^>Fd{XUyWKjJS5=_~GxY%-t2wTN~3&O^@f4qlqov>;!@NryVg zjG?88id(gIKF1Jzpxx+US)U9XuoT=wA8s$cuOE5y`j-ll@pL`50Hmks#J{3!shcuw5yNP?_mzE##r(m+L9IC*v?h%cP;#47h_LVV zm9~g8+jT?<%&CdzGy!xL+bHuRY>Wj*JazAnUgi@n|vVF*(7^sbr`S}d6j|Mbsw_=77pd%i-_#Y>k=?j%bmcTByqf^t= z#laEI)3|KkvzoIav~yAEJ{M7+W({r=rr_fHpEh<(emcnIX8Od#5||{O;4xIlUY^zR zGL{QgwWH;RhCB0hIg6qQ?3yH@#OkAnV1J(Ebl&+sD@|co5!SlSqCM)GMmhO+O0?1D zu&eiHHe=_0lFhzI1JgyQeMRkq?`38H9BFKIl|r};BHhR-CKSIMJxAP^c4-4}*~y!4 zkvJUMr!IuKzEWObV^AA26 zVWs&XPw5Rs*JC|rNR0CDdW2_n@-d*;xQ}2A^;IhZ1T;&%Nn!%gl1RpR8U=;{HM}Cn z-^_(f?DClnjDGvC$Fs^oV+eRVOJ^?Y2za6zx#=80W%nU$59iO1qhgm0+CyOU zv)yy=dy@)tB1(LtisxJ`oT(g^!gL(`*Cv`jA7V>&4g%S`Kv1#`R2oOGS4aGAOOll^ z7tuK>u3;GoG>|g{Ig&N9;>xb-8%K5!(~{Fji``EHTi3wH)43RB*aTLi7hnIXH(I4! zuD8UDx%T~Cm*?J)n>nriX3sSIr?Mcc(rbGibnbE`c!z+oG+$UD7&(Kd8+J|g3vq$G zrzq?l@+1?Dgb>~t&xf6;q)!F1I#iG-DX-LS0r{O};f~;S>ZRHz_J4~ezMY$Wu}bYl zX;|ikG1mUWUwG>5pEbQuYoR}@!YM{7dF(=Ve6oel+)>ftK`zt6w^aB!oEs3yL=_QK zDIBmrIM;lK{FlASynwdsc$f5~4~(|K#^#nI1lKQl zAYR!ji~3K#gvl6`oBEEH_MGbGoLKOaObqlnaSJ!&fs}xWYr%HxWDGUA4`=?(HPwTS z$ZvH)9eM=x%Rt|Aa(8k4bI(_6eJcM>pfkzVS`h)nDkqkS(R5!h9ry`!Al!v66Wfmr zY9r5*Bfh*a3=r!l{YiUW$7eE^0D4nJ0<3&9LpqZx0EBDVa1EX1?nA8U_^?kV$q1tq zI-q76k{Jw#`VZTrYo6DF`^t)5l2wcq_7x0J;^=4d>9YvJq1k9&DCHSlws?@Sj@MIt z_2-2p*3mU?9G$Qtv^)X__^wb?xV1$o@@h?1WxOV_YxB`bWefr{u5gs#3lOfrdezD& zQ^yDsDpAIC4EI=+Utuo{R^;(GO*M8Im)Yblw^=<>Bc;B?#RI>;8G!Y;T)^5`P(hvH zE;J%7n`9^tl9M@(d?ieP5%~|Ck?`hh-?npava;E9*iqh{Y(a?oleY&c(5rNBgnuV^g6*|?niwgtv}0Y3!gR`bfP3>b*cmb$MmZ5 zCV1L&SIZC+3JcziqV-5#CJfypAQH<3kx+m^45~-+fv=_e7iK6I@bV>neu5^s9#u;4r~p_ve?m{yfrW>9QBZ zStgZRUz3x+0BF=L8gt4&zy}J0tRdm;jP%^1B#l;oq@3lfA<c+Cy*H8ZYNcGMBQsLm{vVdu7f2zJwkk8|gxyGL1inC6ewZmha`ykNU{?eCy zN7J=Nbx+O#i_}g_zU%UJ|nT8YZNJD~49TjHw!4BN>q3pW`Sqi;U z|4!B91ze`+lY+hDcR%N{1NhN^@6(GS>(=#xVL=-{zXP)>2&Hx2FrUEIJXTYpo~^#% zX-9v?c~(44^LY0dO+C@jj83p?B|TrdiL>k_Lk$6xIH9Qj-pH$ZhsUC8eTRu&AFh-h zXi7kj16Al^1__4ITK8vb=x80UEE?C`Ix+}W?`8DdtFT1UCqnm256%SH+AWV%@o*}J!U&DyPfRjX;%BYfZR z>|(r9gdDxcZfJ`@MCck+bI>KHd`21(?k1N*p@fT7_XjV*M^ll^jji*Ib0ixI+(W}C z9dRE)>~Ka)_`WS;zIe3}o(qTq0p`^TV7*9tVoG%M1q=r`jd~+Ff8CZq3@tXucJS>< zdlg4dd$;BU(NNzdYPydL=?+^8Z^}+pn~V0N_E)>^H!Pl~DNmt5my%45U)dGnK? zGLd#FsCvnE=}Ku&pXo zCh;-Ufh3ekqgIX*_wjmZyWnE1)*|NNKf~ehz)&1BNP4`CgE%+hA7txy2(qjTIEC8> zOWw`AHn?eADn?sH8uPX4j3EXd?RC||=Auy;Vjcij+U>a{uKB*2Mxbiv9e0$R z*MI(&i0rfU4hH2fMQS1iPkp!Tp3~`83_6-#$Fj;lIpnr@p>Gelsl9weETd+=F!@v| zr1YgSb%1n1SY5N4%5Y3moz0f1q-h_<{es9nCdmgthi0+KU0F839rsg_uwsXy^Z>UMB31Z6G`Foqf({xmd6DzDk25M=2+VaiEkK zFv16}kAYR9;orT2EPDKKH<{SkkUzh4sOyZ<82U2k47%@>c6m}>5uz_(5p%Y0_GJ}HXFF`-5kyMDPSMyHfqp)=A&-y&y)0b#e$l8h@fiKsnEf~R z-Lfo#Yg4dd8gNXdy9E-d2r^?SP6?gEJ1!OXxVefrS6hsY>nI#AzdX8kNiH?KRUmho zkWi6qK+~d`SlLW9aOTA9*a2mOa+N~io|!3EsP^`bkE@X}l=QRJ4n&|CPp}1ZvHr1U z{D_)F-Vhl@<&MRk?_TS_j`AjDewgJgJ^r3fQ6kFRd)q1kZxTtTh#QOaY?wxy$(qyk z;(Vy6C$f5>68e{zsNRoIWErNE;-eX>_5MR#Yi;4z?!GMNRIDjK3KVrBj!yL#Mv<+B-iZ2oRgd_xYPrnfy&oPn0R_irNl9?+m5THbU0=X}tlne`w> zQ%tI!WpqEoWikU2+tr3)V2kXV|BmlR6qBiI>)0P0dEY|3)$#7P+poJq`@Ya<#s2~V z`uT_A%~)I;-`c&-QC0KA6RF$>sD~sq8Vr(Uuok+MaOOhYlH@Z^p0%0L#>m$J2U<(s zFhf`lYzLeBcprI1o+YGS{E&M*PmW-Y^?Sw1EM2o1WCH)^mj+A>mERm=sgB?e)zzJ2 zeiRXgeZ;VRsqQL|&}&n+Q^?{9Lfq6_ zQ;ANDASM5Y)V(7cD$n%+=n0!wYIr{#LN2EP;?8Mo{*j?5*1WNfM)x$)L)x~yQL&T+ zjyA%)_r@N^IvXhG**cY8hh!Ol<&D+={q2R^n6$?6=!|Ld2)tBC6Qx={y{(aD1QV;~ zgTPF%obExDC;B{3(-G|un%4Rt`|nM!RvjKqQ+b(K3vC~S#SmqYP+JL!!{FpqvY%f^ z_d+9t8`}hAUMtVtt&XSxXQ8y`NVp0j=6@v?XIVyj)tdNAw9}+xRKxTzl=o3epryk( zJFAo56y7wVnQ0r!yD#3$C{4=e#+7Do{6_<2a-ACO7d{2{IJ>D zECXuSPQK3=K~ahd`{-o@okai>je(Dl?VqV4c0#UVSy@D0andx?;%1;hKl;~XMjRjg zqlOV`qCWZ-YK z8O~;Wy-MdW1onMbs-184XBE6vz1M>WjFr0#nnOA|*9#Kf>iHnazAsoHd{*lzmcYPu zIren`RV!*qZN->-V&@6G)+e%0yVxIC=Np!zq6x@F#AjzMXejHMizPS~G3B?zK%r>f zzH+MjhWrs*R^|Cudf^PIn4F?Cg0M^te9`bs+-yH+IV+mCJXGblb#zX5JY*=c35m=EkM$2ur0n3QI<+$^Im@zd!$+m9h;1e?Hk9&3DS15awGtp0p!WOo z4}>aI%E=d=QI<``a8`6SVbF*GzD{1ej?D(hGX+aXHjzw+sGELw@_JV4U>n@JeY-o1 zMTfPTA^WY$^VI;BdhiECbHQOCD8~1k zpQ7L2RZq!rlZaukcY-g)iK6gr!V(+^%1D+m&utfP{Y@)WXCUN;;45Ps?lj1bO|gQK zW@JNr#R&UdmwNr)lq*&+1d_TCdXF5`dCdYMX7Aecua0QFR`W_1uX!Y*_7gyODmBj- zB}#L~@qQ3@{dqrWj%lK1CDNK}gjxNG6If{H`@wyAoH$lH3t)VnNx;OIdt!r}dKRs= zJID#A?H{BUrb&|%DxCC%t6EZxw~n5z$AIV8HE}kcKSS>DY~D(En-I9}>F*4;Q=1Ha zM8YkU5Y=6{Xo7clP&#cEIo;LtJr57#+*e~` z;505!>~B)^eKoVVs@chHhKlCk=t>*q<+|Ka35aM(g4~srjB8ro@^g+xB`bhxd&uf<6!3-g zxeye5{R`Cbr)}mHG=`209f%Yq0*9|lBaf`%e=7^Gui_iouxsSbO4q-wGM4_Tw(`v~ zbZzBY3{ogUDB_cVp@&?^-0aO3!1>Zv~**`e}wl^X52Z;Z&1a)F6A)>aWn|Q##lC ztj&X`*_>XsPco!$jq$?sMovsR}8;!Goxpl9HP#3dX2ApP#!URSaM+v=#T91E&w44{J zCoCMM!}-6!o4}#N_AwevV~4_7t#S(E94yhH`W#i{r4Q;1e)tDMI}?sn1W84Wjo3KN z+WF%SfloS_pGUdZESrzl-LeL})`uxKdTt{=UQh+EG8ojUKWiz&1pEauvKO zb(teih7xoZW5qLn0%U?@#iH6)Jp){eAg@&_Z}#`GMupW<*_Z5Dt85}?qX-=9fMkgS zefCVgpZ}5BGK}Yp3$)I9Cl&|c2rd(8Vh&&h*zJL~@-%Km$fag)-fB`rMy3}MTwslG zl-8+as+3{ODJs!!mCvCKoWKks%QJKjt^9&s%pM^f4u_GL78LEmdGd0Ev_T)*5_XEy z78!5xFBF~Ee@xBw0lC4bay#_8rd@{jf zi%Mr#VY+i~?tprjLn@7>ANI%68oR)onvzTLZ%5JT8=@(`PM~>aaXxiX2b$+5mT6fq z&^!yC!5v9^yo{HX6J%<^kqRmd)J?ZE4XT#lt&uA{C^;)H&|rtqLH_x=ghl;KjzSIi zMsKFci;m1B;R0%6>n6iFp$E~`;XqZh7Tn;~vQ69hGX)KC1obFQE1%{MGJ+?h#7-t; z$mWc+7aTw0wHd=;i=Ee2Bqs2-7X+6@KvuTU2lc5y*`pmX$1dIPs2?Vd+ZqyYQnsTQ zc=LCnUR$;uH-tDXHC_fw2JC`xwqI&s-gtj|occp3(7>KfLVKMV0~*;-E3=R`k`7fn zXv}9{@!gH9)#-z5T81uz!r@Z1 z=JH)DzaCqOiL5w+%q4YNinnvms(t?Kv>O;6YIx`f>k>$NwC^G6M0pENat~5+(!Hqp zGhkz{Fsb-tVEuY$KwE{nQcevcdNR*+ZGY$FcICQ){=jioj7X`SwOOeWkav(z4vDOZVeC0L)W3T~E7`)<>~DJgUp2K~Tz@WkHf>)S zSDk(I|5!o)-q{W1pZbl(R9O=c4%`6Sm(?u83T}tMn`sc zT)V4@C?0%m5tA(T&X{z8B^u>7O1M$gEX<5igRp#2d5_O!AAnqLPYpV*%UAdC9k};I z*g$j8jdnGL<1M2j9ZKxnpXn~*D$Eh%Fajro<_^J)YQ?vOz9lI(>w%xZLD5=E=Nhix zF8z%14HRdBXhqi@NPW1qk<}##(PZ>2VP|@~42Cy7sKAN96|)huWnN_%Om9HQZQ(+(Ppd|i?h;HSGje@t_!qCs z)hW=KQh&?1F|=?p`6Miz3)8@vw7;^+hUkicb+c0#JBeUIZ1|>@F$zbC3{O}DsJ_T& zZw5D}<56Qjg?OU+FqTy#nt@*`hx%t5l8s{n>$(MA1 zbb$Ft#v5t4u$b!0r*A!mSv`>m|AmJ7b-8K#?Kp`YoX;Dtaru7M!Za0Bd!mxyTJH+J zpSWIkS!9d8#1xxMnTa>OXtkc{oxnxP*oXl;BfRC_w>FXW49^+o5jk(~tHVz`T~N*& zVLYUVgWSSye!ShM%Ioq{k zOR7X1oMH-_P%Cj77IC87(tt4saocY$Hj@6SAN6?6`~)LW1cIR`UkdAm`9_2S@5B=H z8_f9M*IBtHzLFv;5Ufkz)Jd+xm1uG1pSj?QTfF`$dRJQYo*dO=bVf{lmK(3T6@}RT zBhhdnK;vjMXyyJrPZ4Qnd;}=A=$n~NqAd@XjaOp4nL#5|KO}r$!ZV6(_64k#RgKJa z&PxOZ8Qu@kw9J&hP0S;dCQP+sku&Oja&2S$$j$~NJ{jqI9O{Hz{Tf1r4gN01v(j{f zxc|*GizjRDqKB^})k8y4uPT;Ku`ZWL(0!u+V2&SPi^J<=YZ-%YMzPP4$n^WQwUG)T z_wIoZ+JB2sR@us*t-d-Zh}nNcw~C)le>U^Xvj%QU+dy3&=uhuE2fNTPz!&=xx>m+2LGH?A?sE_g1gG8$jZL|47r6O~XZeuG!v5~%^_!h{vMx@#m z+^x`!V+9XN3YadqY2?m^)R*w9mwIrIYn_38oUYhnfVOQ3jp?E0eZ!d-q9 z=;syK;H-OWr@z>G%DP9XMwQKEPK`k{mT@*g;+H|(RT;evgu4n~ct&*CwHRw>kK=L5 zbVZ6PkcrSN6@^pa&I{hH6hvVOH^m_Wv_ z%W(G`8~MKubba*Vsn>`i@cYmy#x3k_dr)bPkWYdNMUyhW&$?C$o7RGndCGbf`I{-z zqPurLC!)Nfi_?`5@DIwr)#4LTRTaa>K1%9rn>ZQyPoFIFg+mO_z$E9!vONov3gF!1 zrHH|v7$P_sugYc?ngz2ECA@%j++(;vH8LZ*=~#iISCga|*;&J*-`pVlkhi-^bZpI% z6hBkoNkdvc*R2kxS*M*7u8~6Jl-$)lv#RJ^QZiLfl6C%2|PFF2qJ}A2QmT>pezj=qUeR=Lmu(M6g%+K{_$K#DN z>!?U+UeC#MgZIr^~u;s@9nLjC{0avN}6%(gQD-VCY*tkq(j)tEHyzwb$Mu z>lM_{%h9l{jXP=X!Ki-7m@-K}Tb$;mTb5TO3Asn2&V(8C!DoI$1k_jQ;E4pmZc8Ry zOvQ9^sBBZ;UpLWYI@fp^nYaIwuaj5FzKm@7tl)i)vhiP3MhP zN4hAW=-oiHs6Z&u*9?6;a9NR`qkD89?Q1VjRP=toI(EM!hL!iZ_UUQ#GZ3tpbs8rU z=xg=ac``>5JvmzswAaw}4XHAK?r<|PL)Ljmlx7#FuhsE%?99*q@wTE$p`|Lzj{7ki zizG5k=T6=E7ev61tHH|fG5!x(e;pO|8+8ri2m%5M0s=})Dk;(((p^KBA|Tz}AR^rj zL&ppy-3&-c4&7Ztcf-uQ^L_67`K{l(-v3#%7T0w?=h|nVbM~IZ$KN*{zbzo(D?oD# zU0wf+XGSP-6@b;%#s0BHSMfIGqplRXJLWQb3cAZ=gN>v8qqsBxJMJ^7&aDev?F2$L zWI+){@-c&-{EGC&r<9CSncCi_N{T&}Up2NDTv#5o>!!Z|wk~89yE|9(fCI}s)DlAz zyKKrD8YI=l3||i$@o&xec~NL@G;vty-jCZ3jQOVlwZnzY`;2w&M%Zk$&s!XMY4KLC z2kVXNkk(Rj6F_s#u*G-Sira@?n;II<> zEi4f217x!4*OF;X3_V&jcL}c$$48V{NsrR*^<^s>ky^HZjP0C5ffW>}HI}_l=665& zMzpn-1d^9mz6~Qwd4gMHe{)=$+#Ow~{nJdXU=TyZ7`dMM!Jz)NOpJ~G#Ceh3M^kH* zpiCmuRmU}|e~zZS>DIIU{FyGT%cKRnF5`8jzP*i=aQt-18>K22l*FcDef4Y=efoB& zE1u-Xbq)d!Qp1E$@5vUSQP=S_@t*|J3i~T7-!3M0240CQczq0!HbY0RiSa?r2;T5U zD=hv~ZL1*sQ>^`GEHuT$3q5UGVAw9I4OU?Qu5! zgh`puYzQSVLmYG7yzJMwvQic>box0=X2(UIE=cXFok}`)V_zFKMioel7V(UG_=Ts4 z^WSwOA7{tN>;kFoNr7oeL8Q0=nt4Nz^cj;==Icr-Av`26~-n>ObB2`|Hsr%`E&=ByC#q zAk`!0(HCn1CTxkNbKhv-rm4ycGBkmY4_b>&`&d)B%k>5YzjT=%nu}hx7Mp{kt`>+d z9uxlIzM)IcmX>^`Wbf2O*A>Yc5>Txmqjc8thEmy%c51YY{f`9J-SZ(F5v12__p5A3 za^L6IKn(-^L7=(rU8f40!|3{{{Q|UB;=6C0EIN^fXS6L=O;flufRVuEcfd2!Z+f5J zypgKQkosMjI+OTlM-mP5a(Ll+vAu{D5{>G^eC%%DItQ6MvHy-+(-?|1JPNx>hh)v?;@o(0yvk7ZJ& zl*Gi{ZYc3TTYqoAP#ZcLMsFA=6HA$avh9<+gmHe0cNXl6rHT^yxRhCAH}IUu;y-bA z6p!t2=<)*jI*4g7^2^C=@ea5sb_T3JGo2~V3R+K+tM(3hSk8Qd!KasDqaYg_{58v< zeE(y0t?=fzh0!YEKYtUImb%0KlajrEKYFth{H93CYy&IcodRkJcv5gi-V?qW} z#dw^%*{(Jw=fn5p=g&R*u%ES6qMiNd2`Y55^~hgsyq|ycPrUEMYN*kBuL#q6-e0nI zpCg}bBz*W1#AY4Em*8slmx$=+qrTu~twDhf68kXA{blC}wM9*r6dXMLm2A6TKWwSJ z<01CAF?{7l%2n{afl!5}J?6b9+@kRJ>sRZ9P)xRO!nGFk&n0zG4Ri8eMQeOCg-@w+ zXqZkb0d^jZ_>sysXc2mT&0WF%e4pixGYW5e+Z@Zu&K5|_hSksve>*s`PNi47TqX{0 zI;nV!J}NTKi7dIa-%}9#>IJ>e6b15MNe8-wB2XLEK+sNG6g{ z7ur;wt0&;UK6ofL^s6YGD10X;VJ{vCZ2yBwk#}8ah98T7~wu_%kjSwjK9w z$aN}h-oJ7(JMVw)Ld{5l4xcw_K`k(E$-g>g?_ZsKXZyR_`k*`>6lA`TVRnSi5`nGU z@ho^)h_rY4xarxM0_o$rsI0mWFXr`gMqJYI?ZlWu>A=1;jjugyMpxnD89$PROyduGxuvyS(>JgQ0Yir};%>xl4EHt5~i z32Nvl{ZFJeafO=>fj1Jymg7ApOdc=pxRD}Y>#G;na8@<((Ek6GOTVEK`yaXVWrDA| z&;I}9(xt?;N}@~r?z7v5+hGY^O=}3LXtgbJR0`9}Vw(4)Mc zSCBLH|PtV3P-SovXuT)k3PoT3ib>2F3=II@A(BVAfJ*NZ_rcxQaz#nFgZ3$MC9jM7PNk3ythS4?JVHAO(Z#1F zVH$N3_n55~{YG271kL)%TU#`QVtDc=XE2I#^BbLl#cPKzvQ<~~a*QErwZhll3A<_d zEufc%#(s(yG;O||8O{dFnh^&G?JAGYxG!Fu?UCk+_%L=Y1u{_*V9@@RdYQWZN93nF z3D8##JxbQ;@oPS=*)VEH=bZ=+-kor(3FiRU#sL2XN*}u(C0g<9m(pp+Was#e$u_a| zo|W`~vCi;57NuZ=bPCUAV&8J3(|)-2Jr_#SiXm$r$$&h6d$TY#CpG63wbpRLa%^6{ zo%UX`I9sMn_X`agfvH$iP&gVt;d9gwg(0Q?lTb%McS;ft@Uh}0kPPPW_tC6vJPL*> zY4N)9VL2lFjoa?mvYIG1*00vHwKQJHHIUh-S`05z*| za63XzAhevj%Z-gEgkOU_xJen-dic|joAebk7fp?Qy(NgP@L5C|e`mOBx{n2Hk#&>r zb$pdZlVAQ_ko`c*B;2R9V<@|LNeA=1?p|K~jW_wvv6KhYn7M8h_moXxuDseK1ERHae%3Vz~?QWNe&o3J?liaAlkaFNo6QUzE4 zMTt|awL6RPs!TA!L+6#U9tW5FOECuJc@+K}d=iSNz^F?f=}7whOlHuaTi>KAyRE}o z{+uYx<_OUv@s(Cw_r+`U`(mn47Rr3J2(wR+aCS{bpkT422VV_l&(&*=FBOgEE?6Lh ztE^%!GKP-!oY62W=oX*YGbkPU*S3#o zgM%!1l*3Jy5X}z_@Rbo8IEt+EWIW>otOaJkyqV1ZiTt7?Ayh=fXu^MdE>Vw=QYev* zBqG07l8Vgzr}y#WclpoXheo)emuG9Xrm&XVPT|g`+cqoFHTeD6iRf*%`>e@Y-JIZ| z?~RG={ofU_@9O1aDbuw}pCb-p18GNx`@_pZDAG7gChrs0;EfT$;v1tVeK+O(F78<@ zL{mQW(xgC@ZJSnQl_eL-O)OXbS1DU7)^I@Av8YuglKd1C@_VgqtC_>EPLZTyGqUyR zViIz$w~o7{d?jn}vM-^VyK5G-T#$tjt0>RBbnmO#&^qXQvDT9~*}RV0H-*eWtwdz+ z!?HmAA9?O%R_9s8noW|v_1@Qb4~oVSIQXr3|1rjdyZj#P{gHYfKIt0e6AXVyA)9ZH z&cvBBQ=v=v=f6Q>na!NU9PC7KJKW&|5)yyhW=~gGYYHKen!;wd5D%CbhK9AfWpnN9 z><`X7!A)Iq$Y3}Q!Jyo#LQt-ci6Yh#tNKBV*&`F|CY>Gute*u^-pQDI$dot+oSFGI05@>>aqa}i~Z;qWR zFcM|lef<{ZxVoy4;hic6xxsTQrC2eITx@^slUgBMxg}Pqj6K=y^G|xXKyYa*sroWt z|Bayfe!rhjKfpO9EhH^|n==uBe8t4PcwDY+hHUhS-{}ATuR|?Wi8drn>Pn|2o02XP zCukL{SrLPs{hS$6yRmjH_d}vI_gZu2aHi)oGvh52K#~EgE<1DI`KWy;{4$Lry=Pb# zt2|qHmGgZF&HjCuKfdnkr&~pXFiB*`7v^LF_vLXivWWx6$19Fg(7_^9{FB^$Wt2~& z{h>+N-mam|FCimjQ3rytk;-@CoHts?fiq4VSOMR!9Y0a zbv7)?ip6CsW6W5)!9eSI3@OXHc)xjPrv4aa-5s}SIOofb3<3VC!G5I#Z_DaHNkK8W z)+>bY7*X`lhGFzQv{^ z)K;?)1Q}5m+*>uuSG&C$e)4Gli3IUFk8-zXv!w7IkQKll<}`+tr?qT9^D-HTYjw{i z(s&&3Zz4zvQ9+L#V)Y3r0qQ>wFj;j z!TcCFj}Hno*_{HUIvoTZlRSaLi!rW~b^a80K!&cSIPA)C!MeLrAiYC=q)7`>w&Nxo ze7BF&j$OWBnO!(;Ee@RrmlIuhn(X!scPR5)l`OB=lmK#sBln8?64*%iE5q%*`JQM@ zY5d|M>yOo?Z{n+>rrGkMuli8}*;!^S0e0*JQQCo7kIwIt`Gw#GR`xqXX~C=To2oJl zHPy7w1#{no{>&xS#lVH22ymebgt5Up&bO*}xVLDD9D7FOGA;rNnU3aRy1KCwKPhBF z=`^9A>$nGw#P{xG1*2P!L2hU9qC|?y6HVKdRTrBd@@Dgs6tkRM%`dsn{e<==IiKh@ z+Bo^j7Cr+RSCva#!j?8I3RxA2Dtwib&t3a+A@^dQ=aJMv?{vX6NicwUBl_0t;#-6N zVsqg1=evf7t0%kOIxH2QfMDT2mz+W}&&XJ}3M^E&W^j|Vai0g|@75O_EL?p|DNVDI z#Zr8=dqj3pjhh4-pm!A=&g3ou%zQh=Z>`9%KqXCmPaynZql?5xP$baPpsAaMxyWa> zamLG=8xp-)^F6fUWr-&(lFZK3PR#`GjC)!tr8G6F)ujf?^A0*mPi50m)fL$U_@>Ap z_VB}yT+A=t#O0{+O(4G9YoW*RCC9-;3zVe!bHKPoI-R?S<3xtM@=y!PKK;2`EdJP9 zp|#PF#Dg8>vOsN6^DV6P1Xm-5d_p|$XwCoET{nM&;vbW;W=G8( z?<9}OxHX?uO0ktv%9N@jC7#7&{h{Pek7EO{dbbGShGV6uq1wd!+}u9HnxIQ*=|R(O zKFa31Og{*?KIR+P-(Uc5XQ@^-Ny!_1OxqJj-i#d+@2r=y}wfaoW}ToRVGS{oAH3o|yf% zSn$j!%Jfi+2W;`j=#RzP*c8vkii+EZlL{ct!{4)^NQRdp7>)~SkNa6%NSmb@N0ayy z8)IcyF8TD=Vz9KA0`y`J{`FN3@O^qPC+OXWFF^g`kVjXW-Ar8Sftf3BuG%LU2G33? z`@>m3dv)P20;8PDvxW7*A3eEp*N(>FIt3wpe(>*qTxIM0*Q&UA0_L%7?vJjM1q}^k zT#73!>SEawhmnFXysI>fSqt$KU2ZQ`a1D)!#IEhr$_X%oDmGh_q>eRO5wuXSpQG~e zfRi)$@d3_m^RCRwQay{>0^~RPQ%pp!bdEz*IZ4)j0{W(1EmmZI-@7 z!-q6#WcFIc+`NsA&bN!v0GwIJ&e9yA07>Yqn8zL^yy5OOyhD1YR(K=hRquealW~a+ zKDhkE(_+R{tNW(a{SZYaUWkFuy#v*u4pEj5L9pNnN%Qc`c=5Qx`Rm#mg)GxU-54O6Cg&gn+ zvvoc|h)CUwbyg|q4ymd^3^?Qdt=%FRNyX4%E1Ok?JY zm<7(_^rzuxh*(C@-xwU`eFg!6E-avbtNEn@`O3hTXdQ4;6t(YrH}YWPHjn+80HR#v zUB-{$#VwwIJ*-wI$!BHcFon|_4+~=n)}V-JyKfLr#Il8ZC9^%HOt~`Z^QwG#lq*6S zzm7zGNEa1do&LU>RYo{8t?5tY>iI_h7>8e)f1pdi>4~K>&EvB-(CF}S-_-E5WMH~k z7P&lIZgD-o$R@BF%g#sn1v5uxyL?}-_QyL~YKoh!&@Tl4gq22;6hhzi20txu1G7|q zMT6X&augFHN3>|>{Z&_@5i>bu?sC*N4>Z1xbw`FnR95qUzG9@X8_t41i}bkNhkW62 zEMWd!=BsPcy7eB4u{1VX`eWvA56eG6aEPc{6|G!Gh2W&-jG>F>O4|jf7$-u>Mf4ET zX1$;WwE0PeovJZ&i!QFsmf;(ugFNwGuUh#)005Q>lq|fTI2pmH~}X;FZ)UMZgHTI~o45u-qY1KvrD&+^7)6=gZuR z=riPHSFf(fX6blaUezD!8W4H|^Xw4Y9aoPP7!T=LncKzvl|)#Kv0cX$s*|u#(awse z7;wD2)!IA3?6=5cjzjm(dAj1yQ(D!jli?-;ff=hu&9dxaS}Ce{#igpDN6{HVZ$$lwN&t_Cj-UWg6VO3=&*ISJS;VlkR<7v(zNtG7gctnmLXBqLio z-FOb&;&JZfTj}^_Q8&I-{hZmb!~SfF-#M~*v3d}D)udjT)Mlnqui%t%wYEbXc{M8b z;NCCR0kZ5_KIoIO^XI`n3Evz8Z4z4TIl8MYq3GBf`q;7Tz|2!ecXC+1(fug#cz6Hw zs(q>TX4?|rL{Lpke-^fD>AipRh+9VTA#$eu*U~;Qnf0Qau=SKF@KLeRXus?Waxzj| zdD#`Ju%pKGa!uB^$=77M)}Q7?Gg}dG(ZP?^Gi0y+2m082%H*z+3rKqTyA@(8Ihw7K z2{q3hzBP*#^kUk>qaOl28kuE?08u}o7(U~Q&d;>s*w%zLPNixg<&&}2B{j-Vf|42+``kW<>FEGoVfc_z;4?0&(vkaXJ@o6NN{Ys)Bjofrf_;iZ!m)0sZCp|smqJlXF;>E-bAQDm%D z#~sGoNO=+WgJhQNi`S#sf*G}zqeOuphWVXOyn=Pb%&hDF2Qw*U3FZ!eY~W$jqX4H4 zI9>RU&0Vj$tu24;j_tl#C;d}X4@KIwx9=8qv+kc3J3o{L=kiZ0F?QT;>6a-1#!7NK zmlP=gyb`A#;Vjwb%`ABd>3o`iEkh4Rr&F>plP%lecCoz(-^2Ftsw69Nf z2SEnFEY@J4-XbOStEwM{J0&3znfg6*YpQ_3MAqjtf?l$^e$)sRE#u0G$C2zuP|}cT zSsHJ7F=0zRU!ZH$I z)Y~$u^3OGS4Pi?;4PIHRDo>60g)bF4m{gl{nexhe!)mc*KUqg`9NyE$nVohBpG)n9 zecD+&QCE5|SY7M6s()&pc##qH`O43YI!C>Z*qgEMH)wSFQemA;M?p}HVZbFa+TLbi zc6%c5@L*$gz=6`s7!gU8un%xT8;z6tg3oFNb35bN3A7@s-S z>bZg}KzCq~?nquwR_`!kXY2_oe%QieZt9EDC+8T)GIr{ME=o-~0C z3=DW5)kL4l7$8pzkT5zTe#ccsQJ;2M(?C&Gs?$eOlnKRSF6Xr8>xle{E#O+BB7@6# z7vKvgh|I$pz(Ehhg$jgYoacaNlsCy3T5HgF#lQ!;F_%8s#{QJFaS_lQ z)0nA-Lve?d!0C#lBm#&k}ynyz0aX(74ws2}Hz+=@SE@H)^T? zeyUGdxMKZAIASN75p#y3T7$6LK^A!+S3v0MtRB_c;;EMv3>Y`cjDIy*s`iCuq*d7L zYb+?YOwl#BaxPqeL^Rypv?%qXkEOVC@l-+aQM|lzg-h*)(f#MX=r_)G7MG4Lcj8(P zmzwQOrVlx@Y?te6cfp#GZ&?am;psx*6gy7jL5^a^s%tSz`7>L7KH0jQ>RUE7MlT{R zfi7Di{35?7>ObT|mL5(Gd38#S-<3D5XoUcEWS%Fso#W|nFkOa$nU1v;cG6yx*zV}w z6I3%Zi7K<3;?Fdh1@nmiJ)ZYV7E`}B^(^=GyY|6!uL(=m>;0JQe`4or`hT?mq~MMI zQ(WZNi<5V0m_}&D1H#vC(WD0gu%^(+X7W6sH-5i&RbZ9Q(j(ju)a9?`niVXAh>*AaUSzWLGu@1Ot2dn9^m-(neHICW*Br~=t1yUr`HRkS<81^t?(8Fhzu6$Kuq`R`oZI$e3$BzQUESs% z+MB^l#M0il-wQmaU)Gt$tWgSglZpGmMz-{;$n63RmomCGB6kq4{wsS&0%457PqR-e zz$kZTbf)*Ri0TYzh1BVgBSJ=9kG@L%VY!2Qe|dk{hR>ATeUw*0qW9d(?>t|y6(hwK zR+{!}4sZS)$KQ#zQGy5ls=fJ*1_}^7XU$st9{|)M)W98!fh+%47XLx;Juh=wlY1;f znbnaxN;fCHK=-+`WWK4T!(95O?yea}ZVQF``yCy`Ie|Z)QxdN1jKA{e!&XL@vWqW2 z6K49)jf^kLcYRuqtGt8Xc@0i4y#sfB^M?No$#vx~*WaUmWSdC|^v)j!30@7`2QF3X{G6YE#(EXyfLyy1Ccb$Z8RQ(pBZF4yt|vFYFXdoJPk-m~ zF<5@O<^2rzsHMyc*Zgv`a$fWWt;t4pgqo&!6{a!c9VDG7!jF(rz$u>Ew?Y)vF#371 zJF4RB{0LIZJ#ky}AHC-NTh}g-_H7_mO|!+tg4l?EsuVieXJ9jG;C)BnUff{skS^PiLd6BzC%q{cqWHVGknbp=16U>X=iGDrfQAW2bjnNLwWUtOQ<^uKxWYtx|g^-zTERQgEdsr z-(2)E+~PlZV;)ca3sW zm-=lh)FZyf9Y|U!oqL<;aFcnb+Q_ENMc-1bIA)|SRRFAgwikvT>MD@}v@tV8Du(_S zX{;NPlZG%9XF5k>!V1#2B=7_mSv4>WuyH$5eLMH=5jWJP{wrYc$-P>v&gIx!WE|av$X4rWjt!`Y4q?wPc8U;Xw!Z0Ax$2o_ zKZjSBkF9maIX+`vY8ANnf;s7@wh97+5TESvc)iPbNH`Kk1z!eM>jh0&I+3Sy!PCl( ze1HI=TQU9ElfYI#b&7ND)h?^R2sJ#noYoUxh_0z9jL|4;lU+I)sWtad`bBp#WZ%%8 z3`pZr2b?)r$(0umgl`x;oIrpn*l{4mLly15>mN(1yltuFgqN=L;sl?*Lb=7Gl1Hju z^iV#$OVoOi?nRhJ^jo@-vy^{bzs{&Zw0N~i>K^i=n(N4M4xQ-9bRW-rE+v7x_uP6* zt^UL_AVU?HDPE(RW+So<@uFk(gqdnRnJqW!!QahSds9FFWoip}c#9A8CRHtpu_~#z z*?Fk!m%&= z*`Zc^wIWO}xh-zqFmg*$3SHztK8DF}Bb+$KlPT-ME(7sP?kIf!d+4g(6PXj}PV%BI zz>R__{P~hJowx`T?!&F(<=Ak75BspSfP=bwr;9s5QLRuK#g@s#zDu(G3ZG9q3##o$ zcgqWSnG##YiH?3QZD0d3#0K|S29bkBMJL>gO2u>a32X$&_Tvso0DCtk zwrdwUuHSZ4q*goG-UZrNrf25UH3Z8mi*0*htyffwaQod=5HwaxurDr4(EGCMg!zg* z^DR4!r%`QW8r_r9ROni3)U29LvR+Y@aV8EtcZpT{p|pEsy-dVELOfyp2u?{jN& zx!5E0)qhTs|6>80>-;;p)d5As2m@^%O4l-a4v^p7phrZ$^QgG-9ek9SdA$E@T;p zuf|aK3fo~KbN*YqyFLq>p+s%v#X0vNqSpokNmqp$Cb#TmDg?iqTQp>MEUgRmDh1M8 zPd?Es6*(^DYUBT#N@kbCBImuVl5gFwgRTW06*d7)i<-DL#<)FKRyBzd_B;%k0T3qy zC+NbtUGTqaqa_;MP@yGyMLt#V0d-~Nok{>*|6@?D+s^bME10p|+_7n%exbm70aLft z<9D-q2gs-`XlPD{f0{!B-p{Zb?-1qoXJGhkbEN&ZR7DQBbEyIKaR!c#GT;N%nSRgj zE4|8pTB_~PbahvMu#`r_7pUQ@Bdkc)t zZZOGCi%Q;YX@2lgxDVdSWJUUrXO`F=@1DQ1;#~zim+oLJ_n)exhkn^9d=g4j%U$5FiX>9GV?k7U;k0H$(vJT59qY|;XiIiYy zeM>1mLw6n+5Wa33%|_de43_8dat@N``Jo(DkQgez9X_s6&8|U$ORy(GQE2(og!)HK zTNPOcfZcchvEItsnPI5WtDRDGp2Ld+DF0pH(TmfTLzkEHnjElp{}Bv%YRnlN57eQW z9&mrB7w(`QLVCNrljNJK%%?*;wXVMm zeRqU`){N*oqF1xAG^#ARU@&X)g$EOT#y{8_45N`S)1OY8cTJr($|7x_w6`+k=%*(~ z0X7P0Q$Dd}az@95?EC*^&5SFPp|tfo$WaE~xbA87Y?w^82|$yx>qJX%`;HO7rXW2V zj`!DJ+}T{@JZbu%R+T0Pa>VbIx_C!eUQe)bY*D`b9ti5^CR!PPEuSUxH4f|q?`c9# zF0*nw-}jNid;{*qFK1YY`>M+OZ;QgGNA8^j3E{j+dq`5K)<@X~ZxgmlT(YVmYHQ)b z3kUJS3rkaVkX>Z5Hj2fJPcTQpj-nR>0pfR%dGpsGETSix-@Wn^(PrYWBM9FPVzx10 zC;THgZ2!4O(O{QK!3=QBORZwk-C}IxyU3d#4;;3kI2?>f%0rrQ(?U&jD=;xU-=KzHtn?Zy^SEQxB%IQ#FSrtGauNn zRsp$f1m)#v?U+fYH^-{9#s$(iP34(%S6dmT6bPZR3odw-CYl#GPl(D_pU|5`>a}-* zNcPc!hsb7z;+h&t0ZshQFt;;EaSySWZs>mPE=TjE(vC^j&2m2JT=Wr708uT!QE+uj z^XP6O%{!QLV&^S714s$<)^`=qTutWK*gj+He!mhhbC2h}_8%hyFzlfsJMjz%ekf$o z{Cu5s-3z5%_OfPzNh>Qe&O5=Bj!s_`5!<$O4LIX>t{BDD+&p$8 z`=~?Zc~Nw&1cPj(<@3j2`RFcrj(JAG-_j?$5ElN)Ll#|J7~^L9C(aIg2L*=@+|;Xq zvWyL4YGJQ9b#dLtdJ!J7j0H6OZX|HNIqvd$DC*6}-SQ3)o5sR=UL}?2s?BpKk=Qu!0edYQb)k8js z!YQ#JzW}E#EY*PIr|m*M`izabz(iOW!vjPMoOo^JSQ(w0GIAfXKp;KoBl1~|_(`>< zq(~%5)5ufBgP0mwx;K?=~5-$80PM(TqCg zg9#t83}fkcG6Kx8*>sDx@GG%&0TiFA^2Z3$-gfX+iAlqS;S~hDGFJ`{kMUKi|AsYd zVM7w9ep3wJHO~pF+46VrL|dHtM;w5AoeU+&lDKpHB9-A^?Omj1QfAka{ysNJbw4X4 zqY`aA?Dq;p$I@}xcpoYU;{>!P zpRB;!L}!M%zbk&hoMAI=ivO)DcUvB!(&LMu&ds{Jj&ms01b_4WMqVm)cU38Fy=9|j z6X*6$bIKKdsNT?ea)p8V2tr-%%+n>d$m)#zEKYBTuhM<;wL4`y8Af|k_K;qDCn%7< z6*YdwLSbT0T;;sF#V_OqRB*I$qY}OJsdNf&-bGPSRl#%>bZ3EIRkky!{?8R)agx1` zz>2Zf&~_n=fC1v6f+LpEYk9ss1ei}wX64Qe&2d;9aZo`2aD;=c$=})i=zl9UJ8!SO zpRh%fA}-~xU#QFXEhV^cG$C>VCT3{$gD++-HKBw}KaHg*pQ7iIbhKP=hI$XU4! zBqJf1PjDI-#ZH=edv->eXKQ5+fcxTJ?D~GGyxxXid-O{OAR+n(IaC5+v8$8wcjsR) z_rrL$H@Al8_2=NI*p`O(@r~gl6JM-8I1DHVHJ`pAVa@qcIv}r@Nd-d0;Q*>u{d9Ij zw$YVB2g0H50tP#PC*vL6?VBOr>+8!((V`JWtb`x4K)&K`7%}q1mNG4$9wF_D(%mhEPZnq z7h%5I&ZWhz2Cm`_(JV#{8b~gSzibsfRo1G1JtvNHQ!*CW z_6|=`5{n!xMlX)81}`(Em2D#NK#0?S-P{kw7eg{YtBVb-%-7WgII~`SR2^f09IT(1Tb5z=G95B;1+urcnoytv%AzY@OfLNGV z4o+p-eReW1RA!`+f=<1J7(3k?0pV$DvVS}*O-)sBKm^xscgj!Ygg$UNvK9Wd+3nfl z)Yb}nC8ZXO%TtoL2=liAF(hr0ydgwA5FZp~=!>ZwG0KxI>G`kZ3Xoe?;VB20U8ue9 z-F~I4doju?738V-6$FaVxLu1nZ@V}M=i}7ox{=~A`4dL)qKN(Nj1oiX!urfRCe1_n z>p+pMTjqxL3|Cnfyqkh$t|bw``9qXv^&2WXLm=Wb!eva24iO^%=2?;s5kd`L=YK+W zju;iU{q-MM1S5J~tvDtb8n)wTh?+C1Mur{zy{!|I_~W4y2BTw-xnLk{WZo9;2e7GN z*=?yjXB(PZLMn^&eEQN~f3?x>i_Y;3Z(IIe(*1XDkpts!Jf?|+&xYD$_Aj>v)yvg8X~w0_ZvKUvFub&V&{FCCrHdv47M@Pe z9pk|y^_2)?|3%yG!%BL)B~3i9uMb!QqORwU5d->z9k#G>m*DCq6|-OTMgrG0()PlN zLeDtqSN*az2;n#f8Q98QJ3KJr^FM#*x4oyuTB-O6V zs>D%)O?8n)-=&Ujy@8wb?LWauEPwubzn!ZTkqFJbWCQJfKXr_CtVn}0U1u}Jt+nh3 zT-$J)Mpd{Zfyo8goB)SrvB?UHoU6t%sBxPTi|?ZeCf9Wd94L2VoqyY;ALQ_{ovQ0^t{~mWlssnB&#uNl586i8im)d6>=UzV zy>;lIB4bJ7TJuv_6k1;iUe;hhB{t}Y1uwt9tRV}CoJHsR6RCrJM6-&n315pSg#Eat zaprlrwrEX~sfOnNj{YS^wcGA#8f&ByRb4^7PIKQ^- z`cqHXayd7AcMwSmvC{r&)6V9z7Zr8I!N;jtZ9!pJ#)|Erb>kXx9IoRaMnPYm>!p~|!7}5sk zM7M6DcLRtIBH|PtXlA${l51?;!WY}sQ^;3xo!rOg5yRFK*~dP)u(j3*@Z2j1cpV(J ze2ybsAnf#88V}WltJse9{~vfzP4a)?0dd3=+8CxxK+9*>lm7+0#4BHb_uSU?L*1p+ zXF0PQGnv}e3V$hsIU6*G89jj0ZK4m^bcfXV%CqPA&NeMXoDm$_BCIP@4_I>bDUf!a zjHGtXd*`%NaWkVY#v7dDgMS4lk6B)jFW5$mLHh@UCDm#5+m8z+S$}NY0`;TwQ=Kt| z#U_uH4=Rj1<79746k%gNE(+&~DFt&@b94b3LVgu)8#mvf2h;07qKCR^Bm?%Orae%v zqbCN`JJjdA%Z^qtvNZim8g(+J|D3-HaLo5ss-N5W?DPr%UIom}Zk_KAnF`?XIvS^f z=UB^d!T)l+qa}o>HRl53kB}TD%|#|I|Ib}sR2ZNcFV3fRZvdADDyy(wd+ zZ(X=|V$A@-O)vTVuoCvL-q1}wLvVf;Zu!cIgCLMD=w`)~E|4@gc$2a$P3IYwMiX}e z(-(4^qqisnz!W16-jk8t?-n*(!GFd)+(~c~c3?W1_@+O2KIuc*&P?C77|y6GE^K060a*qzWdjENkEiW^fj@Xj>S|Jpmbc7U=nupJiv^STh?z@a z{RENOoC#4HpR9-V{x1l72G>W4LSa~g_|z&^H~O`|?Q9BMlA`JC+S`07_>LU79X-y9 zJs?XCEYG+=#JIlYZ*F=%V{epvQ8rY!0si|)bJ~ci>Q;WqJNXwqiZgABYY57F&%6}B zJ?FM!x*j>K7qpQIN;-T*ln$4>JuB{Gx83awl6QOUjx@a!zct_f@!`K5>NfuQj7+-V z89h_zZ@t$M?IS68QIx`dzg%{Pi&*J*hbUGk{7v`-;Fqr=gAA%G-^5D0DG&CUSc3ih zmm^cgB;hDmB-1|cYnsO&%j4qLwYFBs=!5rG6+ZJd`uiDC3GGkm%;ILvJO2^{G)<&F zpNB&B(=vb=zl=n}qRQ@?NFXN~GeEM#(d$zd*<+uXtF^v6PavUC>l)3uQZ|j9X|O?k zizM0L{nrChf3Y!zz7`Mz|6p+a_cmtPTY1!>ZEwE){~eke($foR{cxL0&jR>Iv`K=Y zTCUxOwsI|RopZuqx-pW>X!w`RZLCuhK^LDIqJ2j;=x~Zt%D4IE%h>Stjlaz#yGnHx z^}&P<^)UbwTaa;(mEJKwafgoe?OtO*sJt5mUH5e`63>C62~M|h|6e4QWBpGglHk!L zM7{p>6~?wR3S@8JWeJEH&tpNaJKJ8a(qI~Jot`)~JGjs~COM#|MM-Miz-q@lir3sX(q-A0SPWNRrPl)E(Vh@-E%JPx0ib&OB$W36@U|C4b7LtoYxo$o#)qk?ps1K#*RO zi@1s((s8kMty4WW(ey>WktPjuv&iV#tU~r1S zs=1;MD{GDD9e3lD@F{wz&Lqh@_$~J}yhBWBpP+#|{v-xykwQa=uw&Xc-AZ5)H?wgG;Xiqy>0~mctW8u~4 zE?D^Rr9W9q8Yj3uTO)($1h6#CjU2G?zHEFYcOA9ORhOmp@Y4Fn2*vPTQ;RUqTVt%5 zd|ciOjopgATbj5F!eOWL*$jgm`SiZ)c3kvC(z)x zQ8XlZ2T!B0-dCe0ifQ7v*3V3D-g^4D8e{c70CB@|owotn?T1+WOV(kOtd@Pc`HbT2 zQ&3XAS*3n^gM%+g;w8UNa;)-UTx5MWlhlg(}b2 zZpwF#X|9YFvzsQ{oX3W~vXmBHr4pfA@4uA>2lAM+XlBeqV=;qLsM<19Gf{YXEh|Ib z%^AFqlp+_FJA5&MyU&#&?Tt>QavuIAHPr!lkp1g{B~#-ATLqM?dd z=n1wF5<)J9eLm3Sb!04+Sh*=JWa`q>eyr67Q$6Cdp!MHJ;FVZ5e-<xFIK>KmVf*NrdPbZb>i$vAg=Su_h*kj|)GOsU)VAa#1&!HmQ zsk)-l?K~A6DjF$%NEBV}xP`3U8IiCDRx7%noQ>X+LB719;VWkxyDYWKUpoCLDWhmH zg)`kjfJF84>hG}CsQW!T?G;g7ybHv_KdVy~_sTZV)9=Xfmoo+KMeV)bg9L4RJ;6Z6 zF{2Le?fR1G{6XbbCj{M&jz76ru|mkZynMG$*A(WWTf?ueR#I5T*qZv;S**sgfZ2j2 zW5jrZ9~t9g!;TXlvnOIf+p`3)Hk)T=?lYtgh-A$tno`7iS_CFQwU@_wrgitt{A>B) zXS|Ge;#MV`oEE!Mp+*gYa#hjiGCDr7%`!1f73ah_+j*`4t=AE{_8T{9XMqlKd*Fco z>bu@@FUEUf6jaXor2o7(QQl3+OI5UjCR$j=*E4(ko*)PP{n+VKn(O;jw*mJnuUnh` zi;^?%7Ad1=4cVz_3Xe_qYYrtye58_N09ekw)p%5oXJz%Ujsf_=|FFE@XZA~(D?@+6 z1;sk&3=wRtdOP=PVk)bZt8D>pn@9<6doS2IzF(F7qLK|7M-5XF8wbRZ%lGx{7C)a*FmZigx~=_P#Q#%5`g3kre3$2}uEw?uJQ7 zmvl)t(jtwDAUUO!lnx02Nl9st?v^eA>7MgEbFIC-!0pq+Px=&gM%bS&*UDvQ2ASi9fCfjtjos?j+<-@D8s{L=y?HVomv95XLI!+ zet0KO$DqTlTMIocXk)6(btp76-@unix~?MjRyI8y+GVugB=nTHL4{`MVJOi1XuN0Q zg1V0ok{gz>>OUkme!Q?*WyT5YJz?e8&Z3MQyGMt`UL<)5l&txd*qdzn_iAiZZuH40 z9uW0QIjct1qocDkuLZMZxC*!qZzk&ZjWMm)ze;K1FjOp}v-BhGo03e&*27B=FMqo4 zFdASet&n#h-d>(6SezgbP$sG4QpJ17U3>XjLa3H4?d*`jQ;7inrDqCPo~iwLw~vFuen6g9hD^VB4|_~%;tWIYY9^COp{zHiRc?`>*s{M?~ycF%=$cU-AupMCR*G6 z=+ui~oirlbWnRuWGgYoHcV#LhXnMAkf8-1=Eoa2hIA1n$mz~UpKi%<>OSasYL)$)` z&c~FnwEAc-QT-5oQ0XS;U(|S4V`aZd<**H*Q{& zpe0wQEL!hEw4U|4e~==bFMG+-^`)?7`E{c+l7&&{IR8_jyK z#jYclI4?opBZ>R3L=lL{Hp@=hI6~J#S4CcS;5gmHs63)IT&@o|Y?{hs#jpOM27MFv zE!VZtLVax0w;N(T}HA z=-(RIt+R<0^!YAwtWfUCkyc}FvjS?!&j7BcW^8!!y)7&Z=icSC{JSoH}d37;|vc^?oyF3^CI z^E*kr=d!$GroGTNQ?>Kq4%x6~OE5+vYMQWDl+^$%2nO>>?FmbWO5(ML#Iu|e^U4G9 zhqqj`b9PGB**wzV%Tg zT1o0aN%(cmOy#ja!$JDFzIHL{tIb;2;vwBZKjU3y@#g*Vw=Db&M+Ex!t`aRUX<}5$ zF=CYi!#Lv|dW@HukhHlT-Z&E{6DUgnQPDL1?bU+b@zS$smJ$I_R;pEM%bBWLzi~Xp ztUNur_Jhxb7m!X*+E<`A;fW%id@jToF!nXb(%F8j?Pz|q?c^s|oOpn7l8^_g-|3t~ z5~n#nNPa}=v3*WEhUUD)m9pe;@}?{8&wlt^onh3$VdJBpp7D z96$QTt-9?O>*|yqYjhq=78X>r)aXcq-)eqfHSy{Vp@>9*MqXx_TT*9iP)pQo-6En= z$GXmaU*qcGt2dTeucviBux$0&Bf#ZJwsM^i@7~~Ad*SFtqsIPa$c5=w=s`rc`Rf5S z?8*qzz95p{3L{LVQ{X&c#AbSlU1g9L1cwRpkHX|c$U3X|sI zK5euOWp&!Dbd~WqPAmEG`kpPK7wT0M4#W>N!PV+u7jKU?vT!<_#h?9*Ns2$i?R$TS zMJeP?uf85dLj3(!Xxj~0mqkY0utmxD2SfTvuygU}oh0*hdPKLFNe^fqe)#%I>&ig? zIQO_eVPfQ#BkN#(W5D2>S7}$FK%degvg}@I@@d|~ziTVeURo9j_WDK?CrZ(Zu0%-g zAFMxT?&HaxpbS%V>d1|!Z_@92YjpHF;)c0)5dQDmH1F`0Z4Woe>uTr_%TE&zNK4{( zmeI`hRhcShP?G9}EG@P7=1aaXuo*8p*UDFuOeYJ=Q%TLd+_f*Z;#0{!+Vpe#e7zU2 zpm}}4KeEz2g74u(EprXh^CRw1#in5SJ2>CA#7~9>*GV=hlSK_fsJtIO%UR~mV^@pT zJ`7F7ar1Hf=6JN))no&TVJ?K>I+=zl&p$bOlaF6}^%SGGc@aTLZDYU7)9olK%;Nx8 zcjwFvjK7Q7HAo0kMO7(c0Ek*Q=d;fkU=}x;=QYU#w-qe&K5wqD+pqNp%Exi%euuA^ z0+O{oLcYFeoV?0@A|Cb5Mk-CyJ@|OufP~##9YZ{;4j=sz0h7OUjzav=J^YE{83|Cw zm`P&tqX?swh*ewt9Op81oVy`yw70PNGS>f4=TBWM6vx z0W=RWNc`SysxenrNpQ8+a1R&SrI>5t=RpI;cq9vXIVm1*&1H-2Wt1419CeX%Zeg6Xhk!t71akcV9*I^H2^ zyFQ?i*8D?&!ryyr_>9AC+`UaPSySN_z%q~ifn`8>GqvAmHY{o}c{XJg-ABB*lBDPt z5|=K!kbbCu(=3`+t?R2Z&_X4wqMRfmVd8hp*>IrH1W8ZEXdTH@?dW5mqxh#FjHPNtl0czVnbx+N->J1Z+FH(g3 zq4Perv1uOf1P#cyEw zRi0tZcuD#$fmIu9#N$=d(@+qgpG?y%Mn)vo zP5hAi)&;#K+Cli~t@%x|q_6Pq=CI1Ph`Yd1+kD1qD@d3d^rutal3M}DK=3eM5V{Mr*u zOvkgZuVtU77HD^`uxUBD46&&Nq}jy;UO&8#|1-qSyWsQ^1LFW>{aFpdD*0PmkXA?b!q$p$e<+2&2Q^=mj&KYUW8kGZ_@`n z+Y6%I$6TL~p!aVvjsHUuGx-(@32}j4p?rS2Xtp)6XGyIknEqsYoYH1@)|uIH`m_7) ztT%53M?CCDMb-s=sJw{l%KbSvRaTEHEl?e8hkcvz{`-30A7!sM1~XdmQfJ`48I2&Y zSW=!|p@N%v&|hfx?Upcmb}>a%evhlIR$80SeP!}?Qe)fX393y0oq+L$9_@Zu`bwC4E0P7L%d6p#Mlbp!zZO&*^tF`R+Nh z`)C0?HYjXY{>d#ZSu~bZ86K|%A1w_gAGgdSn_qu@H8xhQ=44p_^Jg?xfK?Hz2(#O`W(E;xRgJdGU^-r>7 z+^``sul|YT&0f5K%@?qd%G;EaK5wj4r?Z>HCmTg7Z8RSW1*z|{wLoR3atLdM!M%Jk z`wREdUxPTk#Z<5IaSAGWW&1Hql~LIUE4sGt*r*zWSOY3vJ0R< z^P7K#0=G|D$aDct1viHQXF_BBT}0XjN*zK(LZRgpTBZ_vnuIF)C1%MpE&JQl@CX1X zp94Wn$zZH#;dWu)(4z<^XN)MGI;xy+4Tuzp=vUkeQLOS$wF)%P9r&+hfcMT>haDii7wa!wzQKKytMAJ7mY=An<5iH1f>jnE z<1>*aEF%}JwqGF2*?ru5yMpj5;$QJAP+5jr`VWSlFtlwW@{7~zP4cVRuV=RmMEq~@ z0+P9WRi3)3ZyXIHYE*lHp<4#{C)-~!CJHZjS}f0}Ln)!pdptPX|9Dw_o)VNH z{JRCOoWcnZ@x+2uvsobu#lq$`oCyH~z-o)0mc9)l3WXrD?w|+xnTec+S9p-j@D_{@h+JJ_ z$KHtmVROF(YQmYJHQtBX&zHhf?D&?n(#b+&8I^Oy;fBgDP#Y8qG;$OI&;CFqUC+Fx zj{1s$Xvi>J{~SSq zX8%&qY>$(_%e}_UVtgfJH(peRQ@B2mp10O#Fcim}u36=v=jXr7u0;RPxITiMr{J?VR5FkgSB@URjxu5;#oGO=@0f*#jo$q1q8Sy ziTM%l%+@BqA#R9D^Q)qKTw)lclsYria*jzmRQB=W{zim=Qgkh*iO3dtBvb@@a?!gqB3NK&P}IEmFW#f;!b_KRtkNhMuL@8@LvoHE~Wp)u8SZ(bQt zA2>(cRkdwMH26kHke7DggvIk@kl70X9##Kq9`%RuHG%{mgaFG>sFsm3@OBkhxBv^J z(>L4>zqLxg#ulXYgO%u2gcNEOELC8c%v#>;a2m-~wqtZ~ zVlk@w{3Y%MAI5`b-J5yHRxWcPfGn{|y-mcjA~g3QrWA!E*LP=e&AGX!Z^Lj53$|Ct zHVeZr@7X~&8pF{ws(h6!zBBhlCl9OjFg;oQU?YTc)j#4c7K15~CT*F|sm|E9W>tw&PC9(Dj`Ks3!LP5v3rw04{nX|v?W6S*W`oxHuM@z)EXDQy)V$FG z&CLNZ;X-!sRGBq%M>-;twZz&;d_C70y1nMc-ZoxrnO__Zi~)*3PU0PV7nHO^;^8^4 zHO0VCH7o$=zm{Zg%psr|$*-Dvh4nz8Wk+=3SEkf_+YwpvDKNjQU@CMc-=I1Q5yM3v zCp~I^Xe5Mc2)bSoBUlJ|?N4GJ77b^2Q^v78W&!aM>3C0EDSi2P!iUB6z;Mz{&Zht* zZsTTUQ`8VMhaYK^6$$`9o`)+ObNCWSyvj30sx^O}0Mzi;iNtfgO z|CXdDJzFzh`}Y36bjY2XIQ$+?xM4T@w(BB+^PxSGe?>1a979yN;HHot`bK-=qT zGr1v9>02C^b4@_k9!BKPI^e{MKG1_x32+_rU%Jj8MPb~qzj5T>IPyQRA%C04-{$eR zdHhp9|5sM@?=18`ajAbTdg$PW*^d`x)w%^_#xkj@$FmR>xGbZ_F>7{&#>`haqFHvu zq}?Wdfa?mcsqu6F-Yi+DTUI~d|46Vpn&ueP&m-o){dKf02o>ui zPXwB%J=q1FP=9dgN2(EykTFPQIjZ7($-Iw1y% z9aDsjLnqL1!;Cwg?#tnZJw#;+v;zXbjmpwmWdu+1C-VHilDX`;kR_0dZH6%k0U)O7 zXGNou#1jv@azm6IumEkE<>r!HG*uRqu5_z3i=UyLf z#aU2woT(a5Ch&2Rv-;3PgMUk}-ERiq2b@?na*z~d95reCO9T91`%Cx%NR6El=Mis6 znh^CZKVB4azj)dcH^6LARV3E5Pr`^#!fq^!MLsK4Vbzx>_3AciDzwI;n+p`pAT|7y zvTkq&1E@aNgOo9d%a}rf{yAA|JGCSjL6CBz5G>~AXyY2+JwRv){tBVh1jdUevYX_3 z?$iwj-fSRP;a^#}I3klTHmJ^aUF`{h_%5|M=W7+by&2YK-Wl~VXMk-p0G=%I8k=&w z0{@HWj(l$aPAO?9Bp|%zP&gFgwS=$43c&Nb-HBmOkhc#0THgA9(c4L(T^3JXW6$MV z_Fm?zXU%LP(!?KAhu!z^l~ZO6aA0?6hkjQphf-^hM-+fao_ksOnG!(c&%X+heUFB! zczn-jDtQE-w6B^XXNX_5oLnxz8}rfwj3LfVq7v(RM@;y!uDiF;u+KhbG*OE@i#geu zkwc_~XTp4StspI&3zE*(2Z$1S!oaZi8l;7FeKr2u5AWb*YB+*f6EfJv5e0cS|lf9 zeh`~}^;+(TjAK%z0>Zdbc{(*yL_bn8pJV25j-s3w?um*g<`0nlrB|4?1b6#8rEmyg znkvaym+J0f6`9#ycMV{{NotGD8kjn?its|Ou9{gu{jjU*Nx zCdtFCIgnvX_a*&4(;CwXjDTCd;5Q*Yk=ARLn1!9I{cNK z@s4}gvvv8E1v)`58o`XJpnvMxL!!En%rL-=JQCNq5Ch?n=1Kd18KbBYvm66Y_^#*8SF#Wy`TzB`JIOHuFy|j`Zqu6^r$Gk68@_ja!N_JKjX&@<3y$D83BZ{x+k;DE`oC``ZZTjp%;2e zCG2VFSFiM*+Zwy(<2IQA6Y$#3N z6U1ASON0MnrbD?2bUP7{FM2?~%eS7=uI>ea1$O+>b$|%*w-WL1DRV2e9k;!F^1*Yd4P&~-g>DQ0-@$>&HT>W3xA1T<3Z}+b;bWsr zqEU3KAVi&Qt?O#=b;QoYvgQ(_FP|A@-VnNk$*R%41AJn^!%DvF3Y{{eMf!qz*;R9Fod9ScQD_^)P)douy!FVL zNi{7yoP=E?iO2RT;Hy`n-uJ8XLj))WnQ?qC&kLb>;_ogU<}7{}NABkbmV4jr@FSWKt3ykt9wOk)G5BQQiG;BUh zH^(J`zk<7Q3iubcz|YCra!e@Ui9ifVwA)Yz@9B|z_N@CsH4tyf;%=++J^18%?i#6C zVp!`C`dUevSitv;W54F>0~2Y~%KNCWflL>ya4Wb@xy7TNRN?9;-MH4nNIwG@i*0p? zn@rcx0z}|bl@%D9;73I^CLzmkyV3lhMA%7RYBqN>oT1yd`}xl|M6CIcF#KmQMeUDr z0bI;rdVtx{B&sMGqGN0JUJm7L+h28cKy*~p=7q2CA(Q?n7XxKP=NkcYt_ zC_TbZNG~cM{k%&yP!h;SGfB_H!#j8%kOzH!@1a9qJ@j*CwexR30Zz0{1D4QzJ6I=lAfF^UxxE% z0HBX_d5~;dQ7h@OVFPcUu!YLdIJV>`K)%BDxQd>XA=Ya%3BIibuE$Zx00osbh;KtY zi%iMyc=8lHxt8kShZK`B&pM&O@;nx5;bve7>k~{N@r-*#|3kSg4Nl= zUcxIZbX(0R5zG1nzU>9P0&x?P^!kWz1098+>aCeBhR;T+H**r|T6UArpv+-|{*AR< z56=B7ny@t|%w~8k1-0`I7}M&Ab?P4icVh=rYVXCI{MQH)=f1me9Z={%Xt2*0fhi8g zlKhwL_3|n1gO)ub&qWDJ-5bP#D5f|6{G&ON_y%-CR_g=7gBzI5NS|)}`NwxIOpqp! z(Ws&Qhily+ewu{>xF9^D4dnlDts9s%-w?VnD*9;9zg-t%&9gQ)al@P-C_*j&c3p@y zKdiVB3Q4?)s_6(y1PDXS4DeX^P=3=W>AIC;PzaQsah%LhWJ*sL41Gzq1f)6J)wf z7dL5ocHn_WQk~wD&>%LtI5Fz%d%Zj3B@vwna$)u9&$a|d;**to4d(Qh&-V6wu|wFe z(#KVuGSIqH&C<-00rdd0ABoO2_iTPXo%8?t|&I}QoA_X+}JJ^&jZnX%a4wX zTTBYx)Zi8wpV_ta>=tSS`bKADIzOX7&%ON@E?F*qGD}k0FSfsyzj?SH6kc-M?#=`n zEhfvyn?xurkI_HWqcEW2;cAUf1c!dOzc=<+JE)g6qdI^$B`ql_X|t}?aBY55vs>x0 zj*gcHd>~6DwWKrbg`LRY^fWn8v+wKIxvPA7F{L9{u__teNtKLi!T#f`n z(F;Raa#;xp(4jFM6R4SbFiX`*~47ITIClN+MEKKO|}`d)H*a!PXy#b15Wj6xmn^cgUutlctlag0Sk8g!PM_ zP7DF^IMc2F{PEF2Z|pFRfnNOprF2i+c+tf`JKgrPxw`1g8K*CuU4;EQM++}-U~IMn zr7G->agq5K>Q_wvIqrd6f$P6-_URfZ(q1|d3q$Rpn=%UereELZM8vH+p-{J|>gu!u0!9ewFF zH5G~Y(I9yJ<3X}!CwN~r!7@*`kI0&@{CoYas`RX>5z}WQ%3f^?0dVPdow-^!FJIVs zQ&`iLtD?TuatFKee1GZ}a4o!GANluy=I)^O{mFhgUaP{SlKE_Ncop@s?c5*)<-mPN zMyFjIe&zOjHQyshxNK3F@b4WxzSe=TZ9Z!!G5k66Y@$Tec1?J%h{qf7g&f`+86QjP9?Gg-NQL^I zmGowoZZ*-`nfgu`;j|GjiX>N|4VrgE=()gQ0X*LK>Zvtyb?xsFNtuiV$c#2#d-}{) z?=f(ey`&WOB%@er#liF>dB@iyt98w!#T+@rKk0q+F*^W)Q3+RY*+Q2y@v9u0Bp47E zFJ5sOg|P-@-9tMbcGNow+FO;Hu09zLfbO;MsN!veq8I6w2~lw_!OG=YpW&gs_Hi9Z zYsk%jO#XO!_H*%USTTUaO)Oi9&12@6*XLp|L)xXu|5jvd!SI)*!OGNT4X63~_I=PE zvigZ0yG1lxi9zQ1O4m@~fZ+j$vMWt}w7R(cIK_VUBld)PGE|1t4ab>A*6zfpxi>@~ zgDrAbmI%=FMAVAu`fBS@+$s7`nbK1TnX*vLHsdg`2QAKiUfvODMHFllee zw~GJNP5b(m!MW4CL+QSc<)_!4FCWa~u$enqUB4RCIX1b;v0^!zPh7f8dC}FG+m{T+ zC$OpN-cMwdUdcLo^HrS~Cm+U|nGivEJnc02bJ`wG2tQj^bwS^I9mhPbFaO&$@G4!F zVR-DuSK6G*5EinXgsmpo>Ci%WkG#1UY8EDFpo=QI*9Of$F%i^`?9YI|Q5N$#sXwb- z&&`R>Ix)E|BTev+hs_}U+r!?HKT?-H6uOHveDh~y7hdZyKY0}Pd>r#g!ydQyf!0Vy z7fXfR*k$hF(0E-#JfAHicC6+A<|H<072~D}z_1y1*NwOehsi{SJx=W-u&LrLgx3eXw09GI9z*xn{i~yaX%@$4PYcp0_ z*p9=yb3VO2A7O4oAK{!8L?Kud>Sl6km{Hi%On1Wsqzb1jGfsjokM122O;wflWx=F# zQL*beVQb0Gj`x+wQ_p@>Xe!mI^JQ)|ag}F(CgWNeC%x>csvU(LE{}B?-aY=TI@pnh zl6hi6SD;b!g{rQYdnRB@{=q?Tt`G+jDXZ7KVEMNXiE>uYb0ZjIM9fUk6#6x(a_(y6 zCe-DmiL`ZJ2y`&NCb*eL!>MT`c73$4cKk8&^9OSpx;qX;must& zt54>W)rAFGOY09#W#>tWzHyEWLep`{4u*zw62Ct<^6-AGZaCVJ0w>Tu`fxd@bkWe=lQWi zafGAY*+gQjuh9k$gW9ltwLpJ*h+b02p&l4Q?q@n7DTLUjH~HP2C1xrPDO5_yL}=z& z4v?_`zQ?z1r$1t{*VDolU&A{DI2@7{cbZ)5$=|nX-2Ij&y};QpukhlV?PugD^TaonF(adW^ zWwYEr*NCb>giHmt3q3MfhN?6gMTBasJqTa&;k)gymVVa~R znc}BcOX1uaLJY~$*{{eg>FOvq>$k<-$*5~A-rv(@SGGBc7Q$c)0dD_hNLPeKd*`y&n;p$-d; znohCXx)#l-Es0F)s;};OU+$c?8K^pFwdBf^-$p+>x&wNtU0b~tb~ySWxmM$uUwl0t zQGTUvD=Iw)4~F8snNWVnq=3NW-Pd^3O1w-jH=0g(=`lx^9>e2Z?`O%E)~?~~_wnPX zCXP#3>4p>LWm4KiZv=81aYMv@*gtxG5V$2yQu~!NS>+zxgN$Gm%pphM-3I390NT{% z6K%d^3r#V#2W^m3DK>GDx=-l~VFY%?`jvcCJR9r<{%*%_KksSGM~%TMhHT(R0BV}^ zX+pmb%kGFv=9amLx*J$m!AGH!y0DSyTX@Z(O{)n_+9}f_E=z9ncp77Oq(bPdl^qkf zDn}{L?39g^a=&uP^rneDY2u-*QfRHJc$$|L^B7h7EtV07-CpBm%h`TsI+s0rEv3xm z`-8WhSOoKz7I{^n-)czfP(S(^KQleu745i*T0Ds!qJE!enEZ+kpLH>~5t94v0-u(8 z*H7vwSHOqN<8<2IEn->8v%zC8G0{A?kBgkJGp=D#34Kq8n_fn2R(&*M2gEN|?5mBe zXyxQ)@}3OGoAzzx-YTKKHM}7|0}%*d13z61Z?1!<>A~;D#`c@Bl?DZHjY6mAOKpLOa9#lkn9_C^GO4bgJ!4(-v=D6WRTlb_j_iaI6e43hj_H;lz zt`P6J~-JpQC!d%JWLhvUwpM`2j`6tYJamm2H*L1k}iZF ze)XXMJ1Wq@O$BWlnCBup^(wa0xv5F-Q3xU&8!>eROvG`h!I``Zrk~ zy$Pj?gA%)>HxG8de0*UM5)OSd*YPme8{m0*{CF$nc_^=?21%MvG1UrqhoP0$PZKpi`jDLB-p#7BX!=f(RbRx$3pCV z3>kRth3uW{o{wtADm`|9c{CchUugtMu=ni!qi~zOAR%weO}|T zbY8xC#~&cfdwgbcqu>N_plGt$M5Sx)!bu5p&tB=gP{rpE&$zU;*)uiu4| zu&|D;Y>9Hx(c9BhOC#Ql#{l|kuA`8=>>>(@63!_g$I(h+yg}W<+Q?f`kF)UdJtlFz zUPo&^XCtFnP+GHHgJ!{7CNjq5=_!GQ}uM-O9Dd6 z5!{(0D#C0Vo3T+kO9J9fJ#MM&FXIYwoJ@@R-X*%f%d)VL8_iKn-1x@rlQik4s--e~ zFC4t@nEZHatku_-^e?bx+=T zm{vJF(V%J^wYY7;V>X&4cbacKkTySoy?7nNwc7_y-%u^F4wzzY4|#HlYWlqTM2S)F zZT@N52R<}?`JsW+0Yn*_Pnkwx_9BnK6R@kqv9n#+3Q7{f-ww*0m&Q{=CcG&l@#h~l zuE^j_UO^t8(EJboOAUD|NMepMtlMm?TdSrOk5dzmbgP>~+27N&5WUH1uI|09uITfl zP!;j(Ba1C5oG?Z{;U+nUS1==L{Oa*yE^}qY-e=JK&@;f}SvQga2 TU%K}O`143c1yUkq8vMTiO~p=b diff --git a/research/tcn/g3doc/loss.png b/research/tcn/g3doc/loss.png deleted file mode 100644 index 44eaa6d6fd835ced99fd347517d5fbd54c4dc336..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 105220 zcmeFZbzD?k+crF;0)r^hEh-_>Ffymv{Mm-~D~}fA7QZ2h5sv?!8y9^E{4YZGTl|8GKv{TnGe$FDEPc2m--^Lm+6o zSFwR_P6_=KAP^{#nS_L@oP-3Os-2ConWYf~BI_Tme&umw2T5{$#&tq1I`l^mYfK0rz4VXCYpa4-%V*ZA)&6VnYf#I&LhY!A;+~qzgC; z5DG(ztFIGcdc9M4nEA1 zyaxY;in1UQZ%g7GqoGNkuSPty*trv7J9tjQZFHXjnFRASs*-)X)<(KuSiE$W(zR88 zS4crv(C?Pzu)}lJs5>T=)=%O~CGN)IZpn$uJ7In{zy55|{{23oo0u*Y5lh9!(mS`A z1T<_2)Ay3I2e(GO<8Ji^?Zp_lepLN|w?6RFL+@n}Rn}D#nU1;iBcHrY4(CMTm74Dl zYhE*MhtibGEnLBrfU@9=JPo6{$v)?=*S^&DRw576_7fAg;Nzbsm2}_u+=iu@LLy$m zd=GU$aL~yczcw=tl-_Bw1cn{2bmc?vk1FK-4p9#=wou-WNxC8@n?u%BRd|+gm6e3O|MPQ-blV7cvaOduB;-J@tDr z)!?K-By@dj3nyzY)!HX+95jm1dA3uVe4l8bNWtXZYOU3`4pBW3k=P=uu(3Frb0Zv` zUb;QTqIplCQ!^vzjJwX@shITEnRRew5y9C2p|&@5i&l~S7vi-$qmZ~ScxA%Av+cw$Jj7?H-ct;69|_u4??gQX35Y`)6**vc$B5w2gjM zA=dZDLNfQeV^{99aneC>Ree7_z*|PUt$%H&_=<7^b&;3YRkRy)*mr#5U$zX1!{&vr z+=nQLb24FBG)fl{uVK2viTN=@ys%#qCSt@k@b+J`_P$cY=F+IZ0r73%@S(f&#<%|l zt0XO_c;0o_&sbZMr{NrTaOET;!g(aU-i2eeb7c@QOMImN6mCARK96a4t;)xN?jX!M zgTVQ<+XK$1fDh@_+024B9K~w~RC%zIyfag)OwqL76iD->M=y()(TMozN!X<7E-N@( z$B3hEZGduEbYtzb&==jgPki%5U=d*+Rqki=VunMp>4xeekG0#%pI7*AR``~aziFGL z5+;52rm(4E_KA~-2U@*1Vh-a_9X%K&QAA&P!v_cF4Nnu*1J2aMqR^L#FK_%r>NQ;t zmo>cJ^RD;hGX_HLcRT*M@4oswelGod?FE5tm?XVfc%Cdx##KUOl*~N_t6S7}l5ROh zxX97A2N=4ShzNKkN|J=(bu7$d%n!{gZ`*Gd`2_g*`yd~@L05mC+(hxV7FP*(jM#

    Nr>jkaHP zC@$kh7N>gpgSjYaQVfGQE+2BuAl{(cL0^MZ<{a9bL+66E1qxp2EL9#P%<*a_2=@^7 z2=-_vq?E;JoomHwt(H-g(U{Fm#7wA_FPU!i*JXbb({rymaq2yc*pg|T9us%G@0jMW zxVkoAHbOo;GK4?eGw?0VRj=Y%XlmPFSN~TOLaR_ErC=kcZ)_UnDSbhZi9LjjZ<*|_ zX`tfh=e){wopKFk%1s92;SS=x&y&aPW+=&N$0^S{p+9WNqn~FZ+~VYI8@%Oy6YC~v zq(h_<11rOZQjg-CQY;b{SI05Lg9l^b$mMi3Rpvh6>f$XANh ztFnm1*~-b{@vXqf(#Zat(VV{VF9PGv$Q^1@$5`3eCv360I0d2wfx0Ty9uD?)UG_q= z`>~(OT{BP64oD+=7xHf6iAG;7 zYYle|flK{%@!pMNssrbZ)x}T!2YIxMq|55FV~d50w5#*jaikIBR_`r53AQ(nB+?}+d_{3ep*MYbmBh2&C~Ar=Jg=0iq^r4w zE{Q3V7wSZ0MiNhX^!4%VaC;81T8h-T6ze;4|4f;_0MY9rL~}Pw85YI!ob&iU3MdO$ zyXZJho;cMXmK+wm@yEftS#Z~!ar-7^q^z8++(`s>){)9r{(MCB-0tj3g-KjboX}jS z;(EL$w@S~WRu)#y+@763Jzot%1tpPhaNp)SFbOeiF&QxeI-ykM>M&-!4_@h> zabME6aD!;I#G&Zbm`|JtIN7~P#s`wzlEKbDHWP;K!ALLe{OmpGE^NQxV6^TXsQO@! zWv{)Y*{dzE;VcesWZ&63N<8YpgOc3W?XT^3o%t!%o)#t56BtZF%KB56urkA(*~n~h za2n4FuM2mYHSpQ!;|%SxjO?YXh7|LF=zzB^h?r08(^crt8lMe(9DKeI6a7q~@~*mq zl|q+-SmA75^>OL9KBOm&W>R&vRq#*(wS~V0mW55>j+Xk9Os%J?qop35$H(~H_;)F% ztP;z&*Mii?-=D*-LFVN21Me|-5+--L!X9KJ0~ zx!ba@POY9C9r)4Scb0Y1*p^**cw~^&7JX54D5D#zHkM;u|8taaK!s-arHh^@vB=q^ z_HNLA-f7kO&JWUkcIADn-SP|0_0XwU0d~KWz>CLc_Fq|c)Z5NqoQ(*EarB>P9lFf- ze5lZ}IjBeO7C3X*ZEmUV_ivs=5NOeU5UqBzJR-VSIoeF@%jiSaYq*&mQylNj$nLkx zUej)m(Zk~S(m(tdGPQQaE9UCPwRMPUl%WlK4qeGX8j<(K8>0>G9DcN-6fF1}A*AU$ zwb2zzhU;q2*{fXIiXcC}bBxZ@%R3guwxl(YLT8%W+qHEevPuxQ8zDhZ8(Q+C64Hp; zD-Gj1<46^HNvHa0qub)^wS^6#+bVGhR-EhXNPZwpzn1!*e<(D@$v8K_?OVOAA5ZGx#+- zJ>8{=gM|pahLS3sgpHjM9WNUX8wb58E*%}6u$`f?;3G-tzg-TbMCeT&99{~tvm+1) zHUu}DjhzWQr+|O}I|mm#7Z)pF!D{bp?eN@*)!P2{A2<2)K9WZE26kpI9n5U3>A?Fw ze_`Y3AVNXDK7`d-(UH6Ie)uSm>sOa-__`k=DPeAXfIJ* zVfMe;UKE!o2>k`n9aLtLDvyEx=wROfKgYl?)8GCBpUnhBJb5}G5I96mQv9(K+FFux z%~PY11JWzPSE+cYA~e;m7d^6XP1Rd>yvGv>U1EOJiEc}$BsJvbhTFm8lsNQ&leqI1 zXR)|zBx6JtChLt}@$2M|uZ8+>5p2}*B_T`u>ik4>nj>}=2*#To^UHs3ggUwx^!2$< zf9hFmqMaeYq=TScg+tJ!2* zh5-AoTmDmR^}V)jepj^XUyN}Kx!_|3x-9i)>oyCKDW@4!j>8YX-lq~J`(2ecw-~}Y zAW0&wAE9I%bUb1gyIEp#Uq4!jO%Y|&{A(lsQLZlx;DeTWlUQ6gYAf8%R-eVn&^Sw) zo11@><9B2FUFE;eDbdBExeUrNcHC4E_C`D2I_HD${iZd5%8>EFr`>jx;sxw8^E8Sd zR6DHD1CKF0L@4-d)L+yoB4J7~tlA7;ZR%^?e%hx**14a2q+%#F?uqaOid5op=CU%7 z?gypfR~2+vzE0VO^}B*W&{=WFxZ!*@Q}-{nXfJM!>A6~UQEj)0oj(i_Jzk#OBh_=d zWjEz8cf48{JCH8prxJzDu0qMs{cM5Vli4gr8ru&2cfD8Ug5C=4 z_rJ;5scDi#jPAZWAhY0waV_i$jT0S@)ayX|#cmv>E?$e#_EbL_3eoYSl#8PjyX_9f zuIY<&guuyKX>*n5`A$l6EQ`qAu-bNXp_|k-(L;T5ZsVY}qULLMU91)DbD?51Qv`}5 zuhu8t+8_Q8^W_m{5E*K9G(Qr9K$zq7gG;3Mc+`ETd%Fo11>qZROG0*S)CgI3clSf| z_uuczUz{$91u`stLrk+a`{GA-L^5{eWF_Bg_9KW*Ia@Q?_7gi54-r_5_+mbNC#8gHA!~PTq_^bd!(*TnzJ?IM|&*=7r$;>)OxaVvqJ3kG{kBsQ^}Bu@i!w1 zn6{laI9__#6nTm^>ytHTEwVrMHVEm?v{T%q-{p{D7J({2;dn|fO1 z=FvKCTP@+!koC!`($m8>u^2q|8dY-(3uNXt-|r?B#5)3P+r|yoYrv#(L+9b(1E!R= zA_?%_*xc*sP3TW78#jV;Om+cWJ!2mCV~47<{jUo^+u|4Y#c|%IqD>|`zbVX2QNwy( zwUo?~koxT6cBQNs&bdL7o z>a%wa!0eOTBtC3dPUJF@ce1)TJ0S4GvI(o7@ui$^rL>7kIh|+ZvYT(;?AZz=VU6JK z+%;Za z-tF^~jV{|ZYDV_@Q@cZ<!qZ@*`EXLi$a_%WQ*!uY`E(>knbt)#2TmQ>dv zoXVkJdA1&{3OY7(AaFJpXBF{A*9}#>KRwMAaYl*fbTU0gqoT z=@UJ9lXE#ycud2BXvIX`X1Sf_TsI9({jO2paf&=?@WyE7YrUcpfSqZ|anQk-0$qZ+ zW1R!DFv7l9fUySVw5dPte(62s~_q~ylxey_eLM#~!0t&&ZsjLAig>RF5xh9+-M7CnFMu=b_+koe#c zhB`2Z8poE#^0i6p?&J)|eb>5PsQc+v>cGS<;d?uSs%!sJ@;`Ssl)o1-Mj11|r^IhJ zoJ~0h#xSG1-ES?P;^=#6wJ5y&-NM_zhz3J@tE_=*S)JEg_n5KW04ciotjzPJL9JRN%H<+Ww2yVyzJfAsa^+)qGOB_}n zjU%Sr-&>3oJfDBEI@YdDTcg&!{dw|&QKe>UY4h|$b00HRJyR*=G;W`#kYUjMljXD7 zFH=H48K^71);aZxi=vmB^u?T?oh;!sS+Zb~uxb&t8tIn$@F7%P82KJpp!K&>OiX%f zj@jKVqwtz44&YONS=RBk?}y#JDl;$3V4l{fU92D z$X>hq$kZd#F=xlf=oKNl=V3E#Mrx{f-C+xRr=IINvGUQ&^?UJSl`WGvY>I~`MDZMR z@tad|k<#t#cx{DjIRt&rs!ne>BNnzd`49V&g$}~1_o)MysOE2PcCr+<1FK;Q`_v3Y99%QkOJ-Abo;wwp$SM#yploqD;SB!PLg8R)OE^YUwbQ%X3NJVoaHOKO1d#Dqq zI)eMPyyCe{Iz3L-O;|;}OK)lv>3gr$o&3x}*Z0b7inigvHWl@NnHYFYWFW<0NS{-v zm6X1ORbXCxS;Lt!T{bXV)FaXfYgp;Ow*eA0nCm?%FT6a;UO8rXb*0qdWAHrqJcWZqceUJ+HkL?=MSVNk4biQ0PD@v! zae1{=QZT-N770cvTXRh(RP+lA+8@t7yA28$@<8 z$j4O~?=F~7)z103M&y}BzKKZT%(L*QWbBdOxN~>GgKDDEB$?;-s@9`N9<2jHKt@LZ zS^{WAh5{13K!PlK$D0h*8=fWK{@*O6Gk$-4$f49c--9FkexBXjP$MuuLdrszLlW5RKr5)~CpUW;AOY;?5U8q!X)vbo|CaLqQ(I0xHS@Bbs?pSQ zvrWj;EM<65p_Ub5_xXmOypodAyvvXrhs{J;J@I1QoyM)_e$@A6Xx$AB-6A-Zwjxf0 z|8D5_bY{{qGat--QT=3+%y7Fl(l_R7vFRK*!SXD?38t`dIdvv3r_pMH>!KOgpb(Hj z9r(vI%pZj4mYMn|dK|yqKHGQ(xI5POw4Bj+`* z93CC*th7C7AAG;jX%H$xw?0vhr!-)0kT84qj1q$fEDHzx8@EbixRN-9mUUZ6>Z>jx zE0_x-D<@)Zpp|;HEM8-5kkDQzdCIm+NC{RZC(*U8G0Iy}Gi02O05bS!For=Zt@mih zvGu1*##)ZZ`VvZFMUZ9r-(}%)3J}-T^&>J721HxaWU97c@lhv zC@FM*bAqZO@=6U~sD-^vKnhr9e|y=R@OG+0zV}?XthkTg0(-PPZ+@~n9WnW`Hv-G5 zMAe?#&#?!8DNl1bDQ|wQuNhJfSos2d6het8beqAGX*Uu3q(X1)Js@D%j>YwAt!FS- zJ+wWswROR6!fdI_zE3HpwD;J!g;Oo-T+r44{O_e zC)h8{$5)cf-<*Dvq4ZqKH8R%Cm2Tf~cWGn4kRKn&B&4oC&=@=2E{sW<%8_oLc898h zR@RQ#rkk_rw?p{FU3Qll#Ev?825Vi_?q=xgVGQ?REeb_Huvf?fF!{p9Q8~Ary}8Eu7KN{*hdD89P`~&FOa6 z;3tI5I54sk*tRMNfoNL(4U-x+(uYf*NXJ+rAN~gSZod%mcc;3yuw<<4uw=)4bSkWh zWoj(?C}uRPPOpbA;~kZ}uM*3n5ce9k-0(1lOCiLA6@nOL=y*ph>@?wZ?gtUg1Y&o% zx?66dK%@#NqU1IN+2$A z>*;LYW8S&Ar?#j1udJQis!1B}> zv+VDojr?PuFDsUUdFqUo1D=623IR9(q?A_MTcrX>$DV(78FT@BGx22;P7ZoGv)j6> z{Ri>@?E1|ng~9h-AtlouDgICPa-S*1h#Z_gUaPo7@)=XyG8L0g6>@S{iQ+2&NSmBz z*&&{PYpTh{+oN1Uv`x}d_U5of>}$B=1K|{Oia1aYYif;>%N#qf>wDJ zu$9G7rHy$LQx@h5f`WkluVrRxhp+?M^Gz*s4*iYI zscOg7jE^5-!gc!-NW6M#mv@Oyv&G@gLs@-cS7;NN8u3D;L+jrEhy!4R_EU4l8q6Gu zTb>6>l&Eir$tSN?c5ysuO)0y&GvS3O%j`Y#n~$Y8mPf8w?V2}NXR0GnTrM(&qNeHU zl%u@06PVEhLN0rakda)aTyFU5!qOC{S+g01Kx}+Ly@@ij+rZKrCG@jWd$Bu*Ah^3B z!_qX4z1cT7YAG2>b^dcHO-cb*#XI?-)8LNi%2=UF_VVeGO~?1fB@z#{U7D3?Pa9wj zTo=QF_LqI04Cb@)@+jF+9xUJK{VCc2OZHdt*7F`$T^(V_4w@RsC74(q@pV-C&D28s zppJZM;)VUFW~cYlTfT~Cw{2=TE3&%*K=h_4@x<cX+6PLiu+#!8~aiUbgC9wV2+NCQubxc#+!%;&Ym`P!B$AVWZQ-a zRJHyz*T_UoH8`yP#4`J=SS2EUO71~NA6eN}vxwZ&(egV{r@^U+)8fJya;pzFwh<|g zJ+vox;#ZP@jG#uy9$+>4)XIwiZQd1%oo$2Fxzq+>?UQb%hG9`WX41u={4uCgrt=|my-r{cyy#F@$W?ycsu6vkg}?;P_xQ^UTuH`a}U^IVLT2b zH2Tp1Va1ScG=i#P(LZMLRuaeJPqrye-MzJ0Es$crj+S7Xw`H3j@v)|x|;9! z{XExP`J_;|jZ+mF073dZu1vgUEU`|6_+hxKY;M@gfC}aw&be7!@Lqb+0ljvP2=T$t zG!i#q%*)~gAdLu)6(Gndy8?<`vl_|aW3P@;AnU%zIz%M>-rf@xP<$gYK;-|y- zjOOLz1VS=xZ{RVwFNLRHNUUpm-CJ2ny5}W4B~2N+nVSLZFX-(%RxWMe8kw$iS?2<0 z5|q4)OBOC&Xay%+q^U510;3F6qv#5%SVVgR`D1H_(_oE)HP<&C1T})fJSDP;7 z??i2S()kJd5{Lrd9GArVr`$u3(!c`p2-4p|qly z)PMvz2QHg|*S+hh4{$7T!4*3YzuW}vLq-4HVo`P5y?|I=Wm{kYM$3Vwoz6r6;H6;4w=)`w4W78@a6w-=%H|7lk3s6*G_At$XdRN(u zjiMke7@JsrtfBhpugd6qwZ@xP`JjD}^5=5Y_liMF|IUS-jeg@^R6`~T9GzcOU4QlG zKr2`P%`u>9-H=mT_u+4MP1Vpsmb3wn=T~K*s^HiFCN6sWffZDB8aJvhWeh@XKX^61 zJVTG6+lfKZ?jme;z6&GdUP_oy&ebAj*|%LQ`mo`@8=({Y5=nwLR<+Cxl^OemfDCzG zSk6lU>h9{rUxQuWOAxfeHZ?c2X4LnpUPxk~#5SeS`q@^({IXBjOd2*UCwD|HvvD|+ ze1~P?PU^e~bTxtYr(Efvkepl$MIk z+%~UZ3}*{BWj5Sbi5&Q%ip($*C8;$?&dGUaY7mfd#Ghxe7@rU%yAhXR1|yk>TQK9I zSWS>iSTqieMl?lr^9~0_`Ulbz{PdVfUX;+&o}znLdSjU=U&O z__Qqpz+*apFt&k&6;alf{%#0}{G|nhSYdGixr{jvU9ACl3)9QLc^1;P6a>;6WHRRW z>9DgwtLI)6GUhNKp_$B({)8JYrt?XDk(VtXBhS$58YRDmHUIf+yOVOsO?|bnq@?wD z%WSKgy`(8q9c_J!mSx3qe9tUL%iT^b`!W%hH+yMltx8HuMIs%0U^8x8D@i~`Z!4r6 zw1W1#o#yoci4cB!bZCF~a-yQ4+ILL*%sZCZ+|TV{{!_NTf{yfMa>3o>n4`QV+1z*2 z5M0mgxp1r!^?Rey2xXd8}Sj!WK20<6VSb7$BSPO&IUv^)!= zf$U20jqI>g!yBe*gnS{kdqt} zs7+T0T^!N(f@P(yl3_fKr71S&fE&mT>oh=~+?MKi@P#PqbJ{FDr8zNI6syk^b71QU ze%ThA_&B=8SZCTp+t*VMZ%K5}_h;TAAtu!vd)%)2@v2MKeToe}4tTAC>PHB)A3edS zi((-OF%_-i=>peOZ4Ac3Bk5c%?DG#Aue434a{BHuSiWjKv)6;<>nbzzj3j;LCg(HP zd`(CLkZZ{17 z| z*ZS?YLTnvNsK~xo89;kkp4tJm?h4=Xq{bMo$Bw?IucqoBGVKa@Q@qmcCpsf^M2?8 zM$~U`5DIM})bb&&+a1hYMx8r$OGyTSDsC|Hth3|2=$&4n^!XZjQcgGvaQD79XveQ} z4m$lT)Zfy+iKUAliB$3i*OcN^nB0)teU~<2HMJW9tweG=wi;vI+`Q!BQ=D=`miNmz z!F70)H^8qXc zk${>=7jWBeQj(G-N%-o}8MIB^uXWjbH)&(3Lz9pN0s(t!C^FQIs1M4DWGE$@Hw2_%kd1wI)LRc;G#tJo%(fWY0R9H#cK$rVh1vueKfK}EA1uFMx zegwP67LoIIhyV6~3~h&+iU`YXX()>V|KWO{&_>O(YXb9-cz)YQjhOl2;;%zuOM3;C zUVTX|#oVgfFW%;FYLmmMgtnhHa79ZoW-?fDgKnI5e zU918f459=sSz!F%-wI*@nqxqxdWKGI6^d8k_Gf}vg67!iFDni>FeajEpHQH@ZwY`g z{^49`I3S~XI^~qE=W+09TAc^ZsO0j!S-q;oDV*#jpia8JwmKC}} zRl9?^3k9F7Sv<~aaop7E04>vVFPQi^_IWF0gq0qNUVB`WI{GI7P&SZ@znY+D<1CTC!kF1@QtQ7o|cIZ@Z@@L3d%0)c+2eO*2+M^UdS`VGY zZnLlrcN?b}K1rE&34(7)fY^-BMsKI6W$yUcWo0-ga(DU0nU=dP%bz%fs&UV7^Ids6$}c-M5qMI!80&8v}_C8E&RcqSgl8sr|EX5m|jhKwAp z`<5%y^`85X?lNipfi$V-mZQ8D@9eQM5q2}Bg5*{JaY~%%$Vw`@8o5Hf`i`3sj_h8} zQ!&&n4dGWucVPtS)qz%!Y9(dYyAPDy=>wS+t_qK}GnUH-GeJP^RVJw{cJ?4%aF1^= zwR!*f!=1&R*rntXi5V%`X<1#v&5;8f%8+843@xlbuLi|-Dpwb^=o@?|2sI?0uTV?SvAz+f#8bt8YpBbOIrxaorHxPn z)3!b z2=AGm!_mTYczOs0S>q=FHaJ>Q*|8;+jcyrd zeK{3>t<3IbbWS>0Wv@H0SANq2qYRAAk8o?m_T{*C%EQ3GZHDE^|4bvm%)|ms^Z=+O{R#3l%-yX;M!@1Qy}fQFsEXG-3}thngT7_dqJ!&$nha z0${X&h6?Ju+34R=ILTM9zE1W>!FMQ7W3-6yQ~e(zEd&kP-vgO~sEU|5d>+*zC zGbCQ@f|XqOaB?>8(GiPo^JP$7TZ6qUaK&8{@W?X{Z*jo?3k9abHbqzssw~nlMya@YcYDC_U@dG4?qnPO0w+SjRxG>`J^4SB(HV7DF@Z2p|Lc%(((B_4G zw6eghUmDj0cY?s{aqn5w3!sH#BO@&yd_1t!CIF2AV_$&p0V8=!Trj9a;+M-D;yfA> z!l84f@PPqXuWVO!3?!oM)@}$GQDL#stQ_thVc3hD$_w#g>;l8TOhpR1A0&x*8Z2p;BH^RU!0Q+Qh0Z* z7tlv^@!2@IFtUrg?hLfje60!B90ORYKj|xIKWGK$Bf9ty4$yv3kkSAt_Z^kh|KC#n z7ur(R!fSMRxLu5-Cx#`ZP9)F2qCq>70ot9}z&0&BFs{0kYU0UOYmj0)3N?+4PCRqY zv)CtJ>VR*|rI4#aDI*C9z~)u2w8zNSXu8)L@FwND?Znw)XbEpZ<;*a=7Bmm7R%9vTp0_73%x}NinIa^WcQ!p4=PKwZYtRqPY#3J{79P{sx z@RF`j^FI2FD~=>1tNFRq9oe%)ui+FXcv#ac4Twsg}W*J=BDJpnxvS^20I~Vb)TE{dp zB(`fH&w|Y?t50ep!(`DM8#^1c0@PV1iw-~nVDclzIpCoie5%&&&wYg5MhQ6KYB@J( z0YyNsWEU}P_ky z>}(oVt*78%RqXq8?rIl@jZt=YeRt`<+2~O%5Z&sMhuC6HM)${xU1{&N?Fr=_`W^ys z%SD0StyP$H*u=;63x7Xn&ev0zv(#3mB?|DJFXr>vbQDp>iG_`%NBv*Li`P2ATW zW!k9ODR4U;!nk*@+JBB70Ov04@;Q#TH1R)?K@fx!Aer(P#x*6hOYSvh+o& zD2FqvCC3VFP-z2deGaL@sznBXqbp2AOp!+zDvo0s0a(_55gfN30dTkY!j53Ra}De2 z6^bK3x)C4LA;b(p- z0n+G?1sXw&eGb6bLpN_`7^{VW;seWU5pR{am?=4srYqZmh&-1cEq{X&#!p8Gd#i+z zSP{avc>uxD&BHflD@i~Gv>CDoSam+N@G8(Mmj@)rYPc#^%27ZdB%AE=W4uXy(l7cm zNzmEEST9SE^BrS-?8W7HlWWnO2G$XQY!nY|yAQW2I+cYSnd8$ZrJ((GhNsT}h^SEf zJ?JL_^>rYvXWRUU>r&9*HJ6-i!uUxXg zC=gIK09I8X0Acn!00PiVn^&zCd<}T!X@mZiXcf>QXgekB$`H2&(TLz;l;KRmHuHs)96?LkS#1WyNQ6u^1FW3%;BI|hw9Ipe6?7jm3<0f0B(7L zNShI)GzWN_LT`LWXIR|^Di~xXO>|j6R?Nok6#6^g#UxK1z~$xAu<_TZz9Iwg(g(%Y}M4NlzB zvUI91zHe#`)vBGi>9HMmquMLY8GG#ajwE+0%yWCbk!*~OjrmPj2BxgfoyYq)vK|8T zxtg4dOctB?lRxS6ej(Fzq}SAsKVY5KWcVesj(o2W+lj-N81|t7_O{U7MwwS);5%?= zjact7k8+6cHiz>@tu&xVU@ELTuFli0U~_96ScvtM2@y2c&zJ>+cHjUFb0p(M2}hmU z{1g!E8ug=wjAlQ(;?-jv_XHR)C>_6b_^Cr0KkZNg%&5%f_G#cu5SNOV^t)vNxou3- zoh&Y)@(nuLd|lSIZ}xKY63|J5XGe{;+R6Kt)onLXHzebg59`z7iv;37-^5bpFj0Q6 zVJL0lPCua`lPG|;s%7k;%rGbfWXSu#F0HB;G3^)c>?#{Oj52OxW;=e4zgbqz2nUYE zuv?7g#o$xIeETQLEm*jXYv|eQ_?4uCj|(g(D<8i4^4xDzPd{LEbkw&EIM7yli8$E! z!m>pEhB(ZOt;qbTXILD{rP(3O`0%a~`+a?nxGcZ4H>uAt`qiJ1yE&fENA@uZuQ;aw zYA!}7x97Pwm(ykypyN4^y*PJOh~Rv`tzPYmrUk;y6VQImz$FszwJLjp9E<5~5=AiHRSz)rajvrD|gN{PXt z;d0a4KmaWH14{UZ&G0%@74+kb)WWVu`8@{~PPj^V#FQLb*OqFTEmT-aaI~DLNErGa z0eX0ZR-m^RXdBpQx*=^T;6m5_J(fy`D6I2n!u8h{?9tp%>wfKgFn6Oi{b4jKZ|c9|>EKR>!_o{HVNoT-a9HV-zC1l70nq3p5lY^mB&0Z1 zS)%Jq!=(+Bcn>gu+E9(LNl+U)b*cI+yHtRVmh+TNhQ3Dt8418)%)b<*(Le%}n{&dG zW|fnIl*B=KN>S=7UqLl`7si_~#>oCwoJB=BjWK|z{T~=yjA77<1I}&acV!89J-`w8 zE`aCud^msTOaWvNRHObkOKe4kdgi`~3&bAg9VaAC9vD&6Ew&R-=18DHaJsi%!iGb< zvt<2X%=!W}oz;tEPMr?B%JTZx{L+IZzA&o+;v39GC;r+wGN%9sG8AYPfcAsTX#z-q zEMLk9r1o^>>-@;wy_N^add!>^8UUD6SphgfuA9{a4*!{Ne+z_gI5>#p|`@*YWx>9`9 zT7xuGqj)iWGk8;p!9;B;hcwtmFe=lP3?+3Z9$0uUcQQ)z)pz!HQU3vZ!3oa?I1VZd z9#YYhNKW`VcW=Tif3ykVS3{IDNX6bvR~WSad+?6y`eZ4yRPHGfn7QNsK`iliO9xXv z8iiX#1Ez?{tYdg#18~S}AWq2}JeE|F3Inl6mo^Yp9AE$uM$NIcV%{6^Q=H-h%Fwc6 zP&z-#3rgov2v8JHj==+30p%^`B_zif1`$pU_@zK!$@^0FkO~VKHif&iF)61v_P2t6 zHO7Kwd1!*BE~WcrmoolQUH}Kh&OiVe6i|#|(24^dJj+81BtW9liUO7N6%kdXE*Ab_ z7~JgIc8HzoNFJTGp*Fqn%O5;Ui2m$bO%65kGrf#piVApNr*4K#J!I;vS9tL6!o9y1 zG9=)c=EP2M@Q(PkL4s#rs9j%3T?xW3XW*SJ?-v3UKd4|(q*PSU5^q}lV8is$Z7sYB zNoP|*1Kptv=^7FjMiB6Us{Scpd*}!V*rKVzUvR*0^A9H#F{?7l71P0nrHJkkEK^69 zypj5+rQ=`Sl1%eP7k23@P70wd`qL;Bh| zP!3`miGz%k1GH|x+%)V<2Tb3~SbE7A2>vO1HA~_+{){jUIcu3Q$}%&hMTZ;-W#~VIoaFYd z?JF2Of6*TaZA~Yw#zT7&dox8rL;6Gk}r3Z(WW@ zh8nqg^a&6~gt|v`TjEBjZgczMm;a9vt~4bOMJOsbnn~tisg3+om%-Tcm1{|z7r-;z`2fJXu)!^RjUILOc(C%QbXL1})_J!F`H7zCLstI2KU$Y4iS zeAWYl6>5;6ltgA^Ookfh+V=rE0T)J4F~SM|23X|-jXFRv-UegeFsr_kB&nFkgOvZR9yC4<+|4LQtvDt{LS!OCXle=+7{{gdp z1Ar!)=MnYD`_zJ#v{j^unDuAQOGyV^xBRH+ZZaz0i$n3gWt0p3C1Fqr^H{f70yPhW z+yWT(d)?fAodARo!=rA{k)YcEvBH)ZPsPV@XqwJye?#8)CIwKdrF^%kxvA8!9KPxh63bUN@~y5-4Gz-MY6$E`N19cLUp40m zvs~K1-KM`4EZUVF>*)@PIW05D);zu`&_haWG7bVbDsu9Ie`J!H)|d zMhV?&-Miz2a8yy_^dyy&~=hI(JIakDvJ0^Uv%D_~DC?>bGX=j1(j)pYtpKrTnqW*m+i6tO zh9?7f0=_L}aAU_#emXtCJ|*ttV&0$coGSTZ@67fS8YJzgdGIdfl=9547kAJfn)NHj<4i-M!rEGJ_wbG4d z)vM9CCU&~{n6_GJ#=n)d5U#?j)b|~Tmp|8KPVGjdBW>zF>L;uvRet%NyFse6NnN#3 zCp5p^RoHQgxm@GCBfn^AY6>W^`G1Zr`W!wtiteVn0JpMeg9IE?&syt#Iu|1Q8$I z>jl~xmJ$$yhk&6I5Iy8c94&q&Fma2f11!?)M#XFa7y*gqJ8AvFgPwBx9LCN>ANDs^ zLg|_9T|ZV(T?O794711B8U7Iy+r0nuZ0;s2M1#^0dx(#2GZbYG{k;My=|#iXp;A$D z72QQf6b)I=u?A@m=^8^u{yq+Rlb(4XZr;L3skMEW z!@G_8aM`Q(wHS*Di{7x$-zQ56)zMd>)iX%T#=u~g-dl+s#gi7Lg zJ5f1DKa26vb-p8EdZ4PR>f1(0S)~3F14ap2Z+5OEdy%FhBXsj@{bS}{o75dEZ+1LZ zY4e^|lKm_C`okcli-b&^>kdoKr_zRm{8!^FUVET({dq>cq7Oo0GUl@lFfNRqh>(5< z6WePEwP=d8Zfa=8CVrtQ{aE?ho$bVnTlA<~S6>nXSkts%K}K&2%pKSPa?k!KMQRW5 zQYzAo=eig>`4S`A^+6`(yrMLE6nNtinKwHcRRLH7u7WTGuZxL*1zIiA@&qQf@QSq9 zq`Wp_=^7BpxG;a<{EQCS!UID+ba=`eG>tZ_xFdRvwpTb-l1U?n0(-=VdKQQyg)2!7 z2pX!ai1K3_cA`lmbpnAm1NtiLT++xruoy8<5h-@Z*ojN?cwUfJUh{!_!}?UT+gcN) zD<7!Qixg>~`+h|}>@FB3L1xX7jc|}=$Gh7#Jm(6e@|obv9edU`HPdqo4a#uQRcYmp`7g`AKFCf)~gCEqI~l zK=QyGNZxanyUIEkJBK&?6ZDmqoAuBRBi_DdBKq~tvYrzs+?zmYI`&o#Xq(UjI`p>x zR>OWXW#DtF%6+3pe19JqiArZoq9@gsFm_cGH~Y1$tY)Z{+Ntrwx+a1$%T_eCEb;jf)Rsi4pvO^;U{p zQVpS*(RQGu^IdQDYg|7t0I`Eko*07HxTL}2)Jg66Ah?N9vh|l`_r!UxvPQs0a%PFA z`*ZF{b(V>S01iO=f5!oQ2BFAfc%sj=dXLhyQMdMB*5_ubMbfM9PTDC+cjhQ{|;OO{cxjZa%0BbdYJdRjLD0S zBnNjrnSrs`vt?IL1)e!QJ%w0t8|h3lRw&c}iqf46S< zt-t>SmQ43@6psew;Ps~6Y7`nTH#E6n7ISKW2GX1vhtd+TWck!4Jz*@g2Bte?0sOrs)0k0vlpjXI&OX$tSQORMp3d)Qd{L!0%78JfDEg-@LGpJ-f zo{@9y#7Z)HjId7}Js=nYTSi|+gTj+JqyvixpWPZ>q8@6#O5LNis>dx^2Shy_O#kn= z>{c+7zNs%A&$lOe<$3))nDCCrO$kJo9S?JXx}h+5A`CszXOEr%407Y?mg}eVR*0kP zHRxgMknN{kN&Y*{f}ZU9eHtwfOrzz^J*CJVht||bjgQv*#P2?`6Eg)-)c$`L#yfyT zP=$NK7rT2}C+a+t@OqbVF4~i+gqC9!9Q|~NXkmS{)=~4R&b|nU^v^&32O{b}aV@}m zh+j^PhX%Fr{$Zq#6JZZ-v>f|Gd@|FY0#AubsaM0|`ozODZZJO;?v7d|i737y)k+2W;Ro^Bj|oExHM zdy;eF+2eF$A0!gQ^5of3 zMNj|PoDVF%w{vDoF6a>l73)x-FuI+iH!S$~ZG;{S-4X+3)c=ewc?CCr^y%q4i|@tr z0=5HAZ-FM+d3HTEM`89KA5R~;ysr1FC8Tq$ z8uQ(^FK%NpoP1VmJIHm|q1*jjf%&v@wL~+Krj9)$^|Iz`2|kaB8)XMzf;imxQI|?E zyTWk?K>L^O{QvU7d}pJ)X{AmcgZVgVV?hB79WVsbLOD!H*wtx{ZR6zl`5A%5>bH~} z-9jePlBlT8`I7!_pp#FzYMr(VdcVMC!R_Fc%@srfXo{i7R)y`64)sR)CHW7y8y%6t zr#IHqm6~LM$(P)f!%4s{eRPq0PE=|)v-Go9e95ztAxapC>NvhtD=e`4N9eEH0}>VT)j3xXyjvpiXBxEg18hjp2%>mGq{-wuxMC+YeO2 z*g+X%fEMJppEqB*d{U%EJzgJ^T^{kcDq@*Z*9}P3n*V@QRe2|j&@DJox9av$nE+f@ z6M3o!MLHyD#9Onu`c3H>9K&_7hxBKUtOuAwQHZLln@Li{UQ8CUQ#|ALI-E}=Fsa_q zKB(=JRGj#cB@K5V>hZtmJ0W-Z~jC#EdgpR@J>AJ!#6Iju5LpOD$u*gR`(m0#+Aqc+_>kw!-O(YDz@ z|75y&=G)rnpyJmmqlh1l{s-yrzkew3JWHc4li2-UujF{RL|vy@Gx78OTI+7H(|k1# zpVEGn(JS-ny`mLiKFrdGToSa5Crj!FK6PgUXcfjmhuXwX%-=*9I_a@r#do6BjuIYa zt5`}&Z4?(AzA}*%sntvaCP+53=!E;;4uD$*nT>;2CO1Szv=(N!*ZXpm=AV$O-qyaE z(llIGUP1r%^%Bj=?`9G43@)ExMS*N8-ilQo8(d&|<@VeJh)(lB1Q6dI=g;ZpmUej` z%p^%W&(sD_!cT##N*vqiO)-yxh%b&}`nBFG5st~g*8)ckxwk%^0)}H`HC)~=;q{OV z7d-chiZHYA0i6HgMiM&9y=T*<2I>p6D!7>(3&?^~Wi~2arSVdayqcYN5nM@D8d-V- z4lB?L2MC{ERrMnMWhd=UH28tb7N(7uF|R^7P+$Cw_OSP=c7OEEW6Zf67{2}p2Mrba z%+gZB^!wGoLt_3Z3o&}osqB#ArIc@o#KDvtp0Sgl_YRMC8^{}S3%cUdthaxoqz#fK zv27mk-Wyj$qWi9NF`${RWvQg{hv4Nmzlgt(GU#@JK^m;etVQw0*+HXAGm&mPuw`I8 zo3P$QtUUU9s^Pa8#7sE>m%igZ=8W^;HxG%c_cDNmg!SgaL7bXY_GaI!<5!>&keT&6 zXTeP(?W1;3dU(aW?@SX2{m>RV)Yyx<%r!Tq49X3Nm$eT?5Gm3yx}?=cH8D(ouu=Uz zeqH<=p9FZYPq{UO7P4ztkF#Fc#Noyz%DlHT{=lvZoV&p6^TiQSg>=zND+Xp-#@K!0 z;0KDptP6b=NE<{P=tcx@)#6?KviC*zt@aW`CX%=%yyE^DR9XB*=7QZPnk}5 z^|$$s_8u_b(O%(v&Rj&y{6jLnM}q>LpA;~$Ae8_cdFu-_pldHOv>G5l#tbk~_7`Us zJZ(Ex5gs@nG#xpgO#Drs?_}|7+Cmc%@F-pRY z^0eQvdLDcK&S;cN{>Eq+obwm+fWOGPG2(OYAI&Fv`c4kP8-$uerHveDr=l?AtzW2BGq6U@x_9QgVy=ll_#L02|wdI%y zeOq$%pju7RnBf$gSxWzwx%YOITb}iIHqPgFZO*53-`oA^DjJmWmacL%%z!lJR|*a4 zOo&4_KnX)I*K~}PAe8|7rth;KGrRw-)n-YN zis)mXq0-gWdtY98+*9vEz*6;44LuL8&*fWtpCryvwr`}*E!}0tI(-+W|uS{uLcd(OTZYV z<+Mn-HN>F&A;^(`HAec!um2amN5B-h-nT^F=i6o!!CaiVWe?8E^3379+9cb#DQp}t zyfHl-{h5EKoZ^Q$a^1wDF@OmU{)&x89+~g}7|X5*5f*dh(8gLZ;NF0A}o{y<7ZNUDiY&RJ&cz-tK68 zs+r_9?WSpQxJ)_i{AR5z5p6?DDO8K)_l2N79yloQ8Q1?@d^G=)FV#rG!?}SR2-f{; z9)adlKJ$Od41qvFekOGbZ7A_X-G7%{pwL;vbF5F4h_L6uwsm`DOk^}fFYNST%;}^h z)~-yAA7M{^cF+cd)`qhG@qW0Ds24hX@wq<(c7ss84qjLkbRO;PR37G*7x%b75gwuV zZa}g!g3berj6jU@7VqC4;u2c4#w2`ZBUo6Y8H6_bqjJ=eqwoNl{cZ{oLpSOF1B(35 zEK{T%#?JIdKaOhmWwIgl<#S@m3#3pDhCCoQd-t40hmd|5J&@8PA4O4gQ*YSpww4jf zLklR$uD%ALj2K zF7&o&NdY27l$=LbZ@7d*ql(!3jUY9VZm;~gg^Ce(*R4)%Zv`|a;z!K>A!U0B7M;L# zOCNAmmFcJR%#L4)@xTZtxWy=KGtiZczNFy>H2+*TF1MgmFhldmhlI3UK80s`m;oCA z1U-7b#?&YRebIyltHLW9fMbcZ+0PLQtOVN zawP2G(fdY0@yjb${Jw0y>hs$&JF4j4UvmCyhDvdX-Pm`c(Q^)&Dx|?x7(+sC&*kv5 zJkYo3H$IOL+XgeVj+{{4H(5}7t{++(s0&&xef0J9-J7!?zw*3!@}rBW=OeL%Mr*oY zv1s>RV{2E+T!s=|_=CN*iE_`?Bcw_I8d3mRQFlk%ZSOXmU5gvNY_#mm*`SM{u9hA* zai_au_jZZ1iiDTcQ0d%zpU77p;0G_rF$B?9fk6^=wOFvgy4ofSR7*;_!X4GziPX~V zf7nuj*Y;p?zWuvC@LKv>&=lSPsItNzpeH^>ZRC+eZr5q>NxrLdQ_N4Y{?V=bmrht{ z_TV4X9h6$TWE`(1ML4)eI3Dwb8HA1D(paoc)`+ zJtt6`E%%hn5~YI2p~zcQz)6~=OuM7{H|HQ_K%QAP*i-_49<&Wn0Z|V#2_5gT#|Aak z-7W4-$)k2@s#?O=bSKPGNtNhs=#YjDT49IS*3^L2%Kc z0+w!=PuDjA&xqrdQ=EZtZgisZ`HusWFydGF=HnEi&Iu|)bwc$h#!n#z*<6NpX0o*j z!DTEZy{|tHdFu_A758|`ZP@&Cefpc9nGk}WKCyGIU9BCM53tuh-jA0hkyJn0pRCTh zOF(rse%K^~oc5#hto)75hSOJe!{bdWDI$(99`v8F@zotX$7~B_bpOEMyAyj?Q4)0iDJ=t?zmeEF zy3?6s(iF&Z8mbb&`T@HMD7>IvGhFXBw6AtA8`@)AfB0&X(1qlQYf$f9+>#>9Ap|g= z#JIo{g7Uc9)J02&A&9;TXjDde_)Y50cDLt8C(l9{yyg&Z3p3cW6@Gr4BKdd^mq;_VYr4X1WvH_uAoO;*Lv?V-kEf)%?ZuY=)znIX#p>VeD}sSU zZSeGIh7pZl&o&9u6>GosPcxxs6?7<``Z|*nL%^0REk@BZOjkuR9moqzRa$n(V*O;`4yuPgAzs>i@Mn4{FGt(6EZwNb4m*n z4_h7c7njT}QW-T#5{g?C&9UwYHCL~4)4n*^(6tkC^nPqrX#{NtATTe%L^|2t0`BkB`;Xm$ zz{gK4tlJr`H3a}pz?Uz|6?-FgnLyGK4I1+vxPISjyCNT38BpfN>I~LQ6#0v+j%C;x zC-NCl8A$uTsi<3B-JafB!A{;!9F_P z-U@r??dBrY-Mpc_YL3m*{YhCcHR1cUZG|v8O_aDYbRX~Iyi!8IK;L{#_hr||z|@bz za>|d#Rred!Mg7redrN%%i^uXNp)U2W3mrdLjJfaZl7RK3)8E%$mj~Zv0)@p9)d$?f zhd_t#wD;Ybn^XPjOW0zjZ0J-g{o{sE<~kx)r5A$S(I}UL%}_$U8-#gUZy$zq#)0h^ zehdy3cuTL2GIq*jQk`MB`2D6Q>=z|ZuU=An3>)dsHSE)uu`deFlA>M<&Ro$q!KXpWAH^01>VjLZ zx~Yhj!E(&z*_F2N8P)Li?)0bGGVJu|_H&#oR>{cGLqb?8TWBQTd#EQDp@%Y^jOha-{Un~Qa|uYAiEo1(ocoon`GeqI%t#Z zDV)6EdAj?c09pk89=0it^77d6d?v>4F>L7rFl03$@iInn_ous(w!jN0mmxHoF`(=5 z$F7RO4&6XoOYY?1q2u|x9SyP>Dxsy-BC=kyC&4QrDbDn|;dw`qd!Y;gG5w<*6aW~18T4*NE4 z*4pxq?rR1%V--dFA>YlK@bA|T5E#nk(XP1? zd3JJ(+^TzZ6RE%Nt!+!cuk_UJiIsn{Z;WU;yr=L;S)~8K_vSR?0wo)yGUV@>8$yUU z^)vfkmj-ViabI1&`>pAl$dPxvOh8>-<8$ZH-eKugdSBn>)8xOcfy(i^aHGR3M_7bY z`v()d@v742@H-?3XGQQ%uX?Pn($eF4!`mkJ83!L|&weX1-(3BuNz`y77rG+pGKUNN zk?(sR&WE+qWtBJAcooXn+8*MPC)LS@8b)z$7v3~5u>Dl)NnL|&BN5zGy|GW>nR*r- znhKBkp>(9y%IBP_xqJ0dsM87^YAGSFIgnYJaw~8QOX|lI)3-X0;SJU~m1oMAC{#W1 zYXbEc2C1<;6%+W1z0Kl%XTu`vXr|w0)qP%mso@}=erKuj(q7b$=iXM&j)lIy+G)8^ z&S+w>`-wv>U|&w&B$A??bJMZ@G#Vyl60kq{OPWKs;GUMLv$v0;BL3X`yv%5sNHdUZ znMLkIq*g^}&a$hf+P=pU`>r?p=)FuFU(nv#sNA=EK-yDO*~d_XcNe@Ag=M{^`aVRb zmbtl(gN11G7w&xWQWdg^i5Qi2HjgXN=a#-OgNcXmSe+yi-~W-U8cQ#vksEry+<0)r zilJWh`ECx-DsHhn-cIhcXJeJ#cQa8zI=(|zu%yKu$b*-Ho$(f|4y7S z1&hQx?RQ+yEi6vp<5lB$AFb6yhCFR>N6Rc5i9}pQ+{66aX@1@_`22veN?golq*J#|UPyKb@~g>Cz4UfRD`9!YHTotBtRbA}e_M*_UYNPbM;zNm|h z!h70wM^Xc6k}Db8OU<%MfBpc1;A$2-UAX0pldn|1@gK99;`v^0I8Ao!^EOKwjb;}( zGJ68cML*P%&oFh&l=87Y_B_w))^!l|J*RXCW!@^mD(^m%tCA}Gt6QA8VKyd4r2hqn z2He=wR04QD2U~qx9+ak?+SyRAbh&lS+qLtBJ+>$#4v(R+ZL7u&Qz$P>=Yw-Igqjs2 z4zLoU4-Xf^LTw*M2I;u5h-~~+;$&w*C?#;<{m}UpWO9ptjcr8_he(eTHs6 z=k9s}HgP~cRFdmMQ=qevcB zT!=4uWVeIMv|m@0ZDY=K89DmGYmm0~)rtdkSSSoen&iu1;3a$zSzReR2(=6c9bSJ(LK?Mn#Wx#`Pu#4qDF*M&WkeMhi9T;ie-(Bw4Ac`-~FMu;eNz7-J25Aj?KGX|^O zpEx{G%n-$z_4))?@!Puv@HwUtY8c|N@lWbSuio7msqsq;nfj+iB4JJ|Y`3fOVd24Z zq^KtCGG;=Az2$TRL^(yh47KQd0e6%`57|pgfyq%8@xEfM;sOuq%4!PoiF+)_d%SW4 z{CMSQ=NH0qv+l#>tsy$pH-};~Xj#7We%}EW;=z*0*$XU_lj05r*fJ)PxGsx+^@d9W zh+)dm$SW&J4vqJs>bar2=8$47gO_S))-!Pd9T$@=g3^hzS!&yValZf6dH?e>y#Ssm zr9sshH!CDI*Riht_92}_l||JBK4pmO&4FfaXs{VXcUt+{AOTdI%i8&jk}l6Drm1af z8Qb^pnLL6O1dt=}cvJo)p#XHk&lT*}yk2@nETx zbuVf-+}k=vCcwks$TVy;nmNzgJPtHwqy6;2UMA75{n&2bCeBk^_6ZuWyH)$K4H^*V z@ht)?YJJITbl6ZW%E~vX$2pfnIyj~XT1;}sK>0_cNWQ&Jmx+vi0ow?MA!2?jy-oMj z>$GTZ^BAe7vV)K@mtV)e&-k0>rnLt5gqitnj#6pMF&)XVbnX2d6ZcEP1tt1oq7 z+!{8d6($5K-U_sjMxC!{a>UXgLoQcmE7Kyyo^#gRCPXy%OBr{!!x5n-=I$9`}(pdwu01wgN28{ z>1HN)dBZRu8p^*{0Dq;M$BS7E{%^v=h|T9|Fh)Rzbod@eRL|JK92NFbAqDR5uq%yt zOU8QZkn9xI@C^Rl!j}`ZYYHVV_BSq#bAZ~ZO~ojVhX_E6m}?w8|Kpu?5w`dt_-g}z zJZ1`;oM{Sp8XOG$SV*h&34EMw;uO_5NN{Ap^ST+Foq|RHQbBx%;(ijiA(0WQ`L57Wz z$;*GCO9N3TRlNFTp`nhAIy@VBJO@4O=pv&gL~IN7p2nfU$c6tg7zsfi8&XztaBzZX zkZ|>>r|J0Sm>i5oik;{O=PJh?Ja$xux%(WP{3mzmhiv#{^I5@Ziv;p2{Sf z-`r^wPxJJf*GEUQ74ik6_2DKhX}^gv_Zd419YSoBDJQeE2{vj=2ww;rqep!{=B3v& zXlI(Wf2uP|`sq(mTA26z4eFbTx(eY3G6k>>NN|B=c99K3T|vP43Nr|Kw#+RmJ%Yz< zGB97|Zd8MUvm}3)eH#X;uRvqn)*U^PFD~^90Z*lpr^;rtEAW!7pkx$Mip?5Y&aN;{ zvVxY;RWL3YvfH2(*DC)6q4f#>{4_Y`8{;=mJeD70cCqxR$nm1LIXy|6sWO_?;olxo z_*6;FOwrRDmDB^y_$SKOM?Z0UKJ8i%EQ3gaz7;w5CoywMyrl^72ZRU&u*%ae4(fD9 z_-4P>eO3gr>s|ZZnu@htJ1Q-F@Yp!>(@8MY0S}=9u%vorf&c*lGI~W6GkNeKE?M*? ze#6cFH~ePPkFrk4kbeL+o)zI>EZZsoY!8 zJ7RM%A|W`rnIFoWl@Mr>o6Sk?Cwk20!z{-Q+qdhD?`26Hjb`%uz>Rg>s!A!@1j2MP zNv~p~J$tgibm(=6XcDJau>lfH7D#?_9UA;ZD%^rgfHczpQJzRsEGF=EDIMehFL6E9 z#ex<$gTE%Pgsgj7gsBjXVYR?r2@&HD6g6A=5?hrP9{FVsjXnmSq|hG#gd=c*e0_sh zXUnqxd5um2^k}>-A0G)eIWaytGK)*IDl$;@hY8Ej_`N!3Hy9+g+v&AXmB~z!1phId zOMQtgP*_5df)8e?ln`A(y>gifMS$^pR2g9p|5DxX<8gPsqxs=@C-@L+Imr{F;7M!< z?RH!~ZbhRjfKc|6y&uGmMkm^O+eP@!=BFDQ%=&dH3DuUj*teZyG-%-bN@(D*ie%|$ zXh8FZq!5)bc-QTwOOo5Dc{H?mA~lQB9=9Uh{bhQL?aVVw7F9tzds75b%;d)C@oB5v zwI?ip>>UB>)r9^>=HP==zDlRHnDk`J`4LiKvVHHa8j;KBmX@koRFR`ZLcgZ@SfLl! zn;)$eYo5Gst+8p5-smf7klws+%h9WMZxV;bAoZ~#UzNQ=t83kFy{TTk;I%e3RCDq^ z21yj*5s>K2KVB|t5~@oH-HqSG~|mF!~2pqYXuw8&#fEUVafeD@5)XWRD$v$ zL=f0EUFt?3-5&;6jB&o?B{SXI=IT@T-tn8cjN-|brY=#)Uj@NJecUU3%sthP z(p4E874CHjX>3$?DLyM32o@F|@o(v&@DSG2rfoTh<8{X9@q=9Tsu~_Br+=ocgdRFI z9E-g3MfL?tBq_@E)ocBWiR|F7IN!fl zqg2yIoDS!yWoKK7%$L2F2RQZWPV5d&wk?rAcnt30gO}}KQScd$DgU?Pl*?GvDdOq!1H#Em_XM;j=MV}boJ_w^K(7+w0wem+Z)XlA6W=7BX0TRMAwiZt}t-PA#rhWF`fJT_#!JS7Mn^kvAFx{lk$mV7~s_? z)CG+h3}>&wX_2iL*yjvoAa%UvMG6mtHQj1ye4+ll@9>^4srfxCQN5R|D3MH3FL3DH z?Wy*&qM(+Vtq%^*uXH}yfB#^~M!AMhp`zpM2~7xKuzVO-rOcP?s3&>BsZHZzSkQv` zWxD5SZEk7#$Q~Pgr^Ydv$%rZDp8s0$i+d$+Oe5dp%PNc|oGMU1DkA}e78HsTGPcw>fUP8km*ndCt;qRM>b zXC+7>NuK?<&e6<$1%x&R?*b%*>a%48lZWsTmUIu~hGNbxv^@q4*G;T?^Cd(6GT)~= zL4(qJML{I<)XP&sy6v$hx!MEx!Uf@uHuc=cGxwDYOI&9CR2==^l`z)Y-34A3pvr#p zFHp7jV$Y?DtyZSZ=}^ZAjg&qi-0Ptv{-2@bW|U$=hk%~XY^}riIuegO9l5jf@$&k# z)=`1caf1SLR}hordN{51Z(JwM-?+|dER<`!MGbQfS;#F6R4wlz!I)p~t~1Q0vr5HO zCS&WhE%=J}A+7}6UnxheJPj5QT^(~=B_=vF?`!!qLxOtJ0vxzHj)P-bBhh*G(HTkb z)s#;=+t-Em?fmjmD(oG^!|WClv9M8ooGBIKmvIm=%+kE_w(iRVjSTH!0_hT7WDdWP zqBmhscu$E^;>6~}V@C=o30J|fI-fa`exGJ%L;8wUYI|H8^#{hU!GwrePKpU%M#x4D z1G!fML+$3}G}krbp!&Z|J8dqSR-CL7OpS}PC7Ee_4haaxLhA79!uq(isdtqD%v%P@ zVu7WT9mWL;Lv}>i%}&J@5da$9$b$m!@9K1^j{;LQ%Rd_Gaa4s8UOywk zARvHp8u$TYo9k%ftNi1J=rk#cSZ;%p3$>jur7>3%CXG zqPXBaey1S~D}`zicnF1PA5p7RJ!t21>`9tI{!9PU-QiZ=T{o1&c-4@h4`VUh0k-%; z^84m!rf=5H*6W50Q8zNb?vBW$8P#`;_L#YpNV842U3w!z|{ z9kOWqn5UI^0|UQ7WvT3F($VBx6-XbR`Qt;Lor_A*`&}Rwrud zmxl`-BUkfhD^_GH#$PcQrB57?>~Sja4@Mp>?Y$6ncg7#ueFHM?=<mj1n;Q?j1Ox=0IFfZ=ypN&9ZNQvh zc3WlawQhT-%DvU$O~Cg?d7r(x1IU+pf$1Np8TygL&vl|FbQBY00K%3#YL7%u2yy!4 zm`^UQXs!;R#N|z?H~8r60wyT0caK$`6RC3EUd{)b zre1VgwIJx4&*r!?!RRu>nW!BUka;hd6t-`TV%~{{jLMva(1ZZLBuI@H75-MOKWU?b z*@}qWj=ucqlj{=>Mwo4_LjRQHgT!dLCMPCXIM+93E$(s_(H!Qn*X4XKJu)S-8pL@{ zKYU`rKT#i~PvNS|^bK2$sO7{mEYd_C3yEv*;AuV(KZ>uiiGKn}g^bESkqQdu+3^N^ z2Tiwi{ByL8CDu5hwlIv#c`zxJcM zh)k^fm~d6}CfEMyLY`=nJDy8*FBx<<8aXM9RnNbH5i_$f559Fb2>xrjAh~;};F~O= zYvA~QO71v_j~_kgs%s~SBdEVpW?ysG_4Eeq7W{BZZtRyY=}jAunVY=|+n)vw>_>|u zHBjSGvFUs=+48%$o=4G($o}ZAy}!4*)KlRs>qHj1Fh7?H%Oy`}3d_cT?Dfkz2_JsP zlCS&4Fco7Y;fjf3et!r*syj^LV2(Cm>?yuJ@Xo;S9{bTHQ|6J(20n0-IZ9Bw8ohUH zXM}4NjU8rw5|q*p1}epY7%Ljw)On{xq=e5DTCV>J3Rja^IT!T@;$d^m4+bgkPo9IP z_2Iy~BSunV?qf?-a$KN3%w6S;0oj7|;e?A)My@S*vljfGQ==CgIa9EH-AY+_7~81! zh#e#RjFC&XfI~jS;d?O=n+n}Iu{!dItSxeN3J|v#u{XoBFwS|Cio-zZm;I9(`1pvW zfdhU8T z)ys!Y25wm2M9%r5<5E<-dm>3Wqem51Y&gv30#|isHlT?;1kzN4S3=h(GwK6WY;E)K z6x7-vaFbhO+`1vb7h=;B1o-DXLT8kal)_b?0z@z=o3Qk|*vf8pc@z|1z$kXIoR0i8 zYnlc(c{f9=p6|oztmb$7X~^(S+eaauG$ytF9srnf^0w+ZO907l!WTVV3mDV2+SkVV zS|C^|*)Xqb>wi;y1t-M6yxX&;n6tL1v)5$I1Ui>Y3xDcdqLI^|$$>yj6d*TbN0)-| zp9Vk_F(5mAdF%k?j=z?puA`9Q44@bkpFO&GD<-QlVqmS24RNtcSXphv`h5(iJF?x%8{ktvxXIR&V zg*;2HHi}$~SJZ&QrIC+ouNjPm3&98Q5hTo=K}6NUN^$AY(5>uF?D{M$DB+ndyZY_S z;1m}P#2Q=ek}I>?LGZ{qqrf-7^mH)}k8-l74r~xL;!9SQCM;$)o_T%lf>-@AdC5ST zx&PNPvl-pic>4SrXfVo{!qpiL!?LBE1zFEN=itD5I#*Ij>Q^V&0_Q*Xx(Z6U zQBL_q%YA?vij9Cd9!Hhjhbmkab=Tg8=G#^6t@HAk1b<-8!tC_Foi({C-rWA z$_}3RrvP+Ke(~;8jhWK43lTbVA9c`M%fjT|9M+Bov_9-ZF^tjAW#1OG`t3;|e3c%n z2p-*WYS0kKZPjM4pz#692Bd)c!@8yRvUb3zIfsLGaG@tMEtJEXB_{tF=kM7DW zGmAh%eaX_g;8@2L80&+*QW(E85gqP0SI?*i*rti&CfnF!p?8%MZ)(I z+r;5SW-Jcc+5V#4V?X38;PvUX9DS70d`y}IacIoK%YAKmu&Lf8iJA|H!;y8hN9Hs& zQxE!$6Rn1uUa9`#FC@Q_36-2y=9|iwhXz-A@?lXBFAClj=yd@s*xZAel3YYhSu(y{&% zAOzL&hP&5*C$jB)WD_Q;>#Hs1M;*WWFT9kiVLe&hSTiH|2&U$gD?;hx;t3$R%(gR{x6U4~w1FK7sM^j!8RF$Vqy!Qzf+%B`vN~d5QNHb9n zIS?}MrEql5)>5bgM`EqpRNrvw)p6KL-$l-pGV03Th-JsmJqX!zCPYZaO1~yuA2#bC zKT=63Blk9q(1W4*g4Dm16#o!!@sM@jw=-JVZauj9UJs&p^Qt^=6ipru98r1Zsz@KK zZAyLnY3L5?<%Dvi^R8u4BKHJ@jhaL~Ka-+Jd_-xDjkgMLn@Jd2mMI~|t+PjLQ#=o$ z63?v}C%-7~*AA+xo|vHA5LLa3br39uPKlaZDhuoRPW&4mIQTKtf1S3X8vLWk+huar zt~@7sqpged%`VVa=LF3O#RndTPNU9m3@^L|FetOQT7m{y)z9-m3()9s1~;-ne3@Xu zxv%N)qLM(ONp=OdXCt&02AbUAVxQnB&8W1QLc0O{v$QRt9pCM1AG=*bJ;!L76g&+` zs+4Ldk;0H59)DZ?+ z55$cZI?HCb8|uOeHT155JFnzredsdk{92Qy2(*PVt3FDAwopH3i5?+BHcrZ+RR%-d z;9^w)dsC5RkVzN~%s@DYaLP(PG-kfhV{|APqPR3Na~3x>n*S>TLfIU@du$x++iIg( z9TX_Yjf#_guMJxi+3ybAQ5;+=*2I4#4v%kw8A#`R@qPMmkoqG^>B6UuzGR&@-n}l{ z1VY-f^=O{>D|@jVx^zxmeUrm@wr31m9|r87QUti|oj%`nd_W)vEeUSyeLGXmjjJXO z;@z(JEL0dh{8}C_*fG?t#e%6KU^X^=ZC#?uP?g20E-C%5sqP=H_!7}%Eg1d~-(+p56cnrxjJ=Z19_hEW0&o}$ML*w=zOXd z^XT0#8Drb+F1~@0il(xQTO-l-mx&4GV4z%)Qr+thpeTN7)pVLX5v}_`+V`k)*k9={K5c5_GC0?iu|u(>vJq|H3a(`R*3`;>ga| z%MZt_3uWBQOBOS;6{h01m{rpXbxX@Ctg&2whL$oyY2&lb6=jW<_s22uhz?E@1?k-5 zsrR&3=%Q=eyNmu_XBSrib57)1+H#s)klxOh2uhx6w%|fnv=*r2-;_L9SO{mj;}D}1 zmM7{kR0Y8%x?PuPu5j_XwcQg&?|>9;l|@l4%<+?D+@iOn##6Z+<&UK;BZ>1_*7ad& zC#F%5Yol6*e57y`7J@Wy=2sY#D<;Q?3qHdCTV12Doj&tu3JlX=x8-UYyhrGJeG{l` zSrj>89OF}5t74$l_avgE8l=3nduCLik|0E=e23qd_T#zm(#Sfi&;6ct6P^)Zk(^l# zFNT4$BWE@I5=1SYv<*C3)ldUE2X5n5m%rdIUQq_Ei^~jW$B%ynVoCei zz9{F1_Y`yHs6h5Q0}G#yQtwmXA>0Blo!qpam52&F2sU7`)(g*?p-&sB$)OAW`TSU@ zb{y2c|24(>=i0Xl>iMm}Vz%Ipr_`}pa0m`~)F%IF&9za84I$Ta1F+tkIYpfkl1vVx z%-ChimjmAmvqCMLLmR&~2-;W>qe2c?*ByPISY`V}_ml$0Zqr;RX72=1Z$v zJnW)B$rX?WBrl_*i4&wgwh}ETgFS=BhM*2oEx&jd9bRoNFx^PQ*|&RB+Owq$ppD*H z(>*o-Z6@ab4YUEdSSvxVKRf>eS${9%+*Z{OR{sDXgKE%3^gLiZ4V$L1z88AQlWT|~ zfu46-$e}wZW#_)JQdHMVpcEkvY9Y15Kbg1AAlaDM;-eiLEH{Mch^WjiHH8GZOEtUP zc^3A=9mjU7bKM@xq21Nh)#jVq9Q88Xp^pnFj!In=Yzf`FG4qi3o_r;Ae`#<9MsDsn3e zDp6_fBzjtzg`}vETI{PfxiF2pc;R`+gCCB2C+zpMH!in96!z0kySDQ(Fn%$KmLiO~ zVD8Y)EKGCigl^aDOk0rd>Zh9Kkf2_OV|{_i&f^Fa+tBb~2@@_Nf8}$Ck%C`lnfIMS zI}lD#yQ35Sv^^0@TGfRWkipyXkMt8;HgSt|{aye!gq3^Jl9vwxZ{QtZ_wcuuvEV?jZ^V z)rL?HUPiq`lUB(aK#&RtBzqBs*lVCgQ6O z2Q-o~dyyBR(ve7EMyfYG%u?y>UxwZpq$Ka}r2gKsY(KlHG76Xa6}$u(+`&r*DSXN! zPmHKRGjI>FdZIm|`cTPd*baOPWA5cNNszev`$&|=+|LQUbBsm84dlEIqc5-?@2v&{ zdZ%sBqyIGz5X>Gu^#mG7WCWiBwveqB_4FInw7~h!`jD?cg5Wuk)so}XC8Uc$%OLx1 z)PkB^>+RGmD+YM$?TKuHQ}boQ#8Az1eG>ojh&ZOQom6JEGBH}@zOnB+M0MV?7fbUe za!qq%_9=JEzb!k=uxpNgLTGM%xYtU&y_184BQR5OqM5L(V&n8_XXSJuMHqHqRNNcY zn+8ii7pnZE*FHaqV^{0Td-@0yLGk4N#nM~kJ)VAnEPk1<^Hw}mN2C36B22PS>9Yr{ zTT+10&n501so7_}T15r2`RM+CM{qBIse$ga$1;h$B280`r{>;xrVAJ(64<478CY)1 zQ=fA+FQ4^hx1lUMJ5g3$%WzL(S{LTIxw)p#vHJ+1DpgJvWOQHshUZu-grWH2D(!WW zY5;53w0c>t(n7gzYYJKgjkIp}n+v*b$}vb*`OGdqO&cwI>yPJ?TvUwwoJd<;R&TvJ z63q_)K+Dn=C`xHLpq7(O|D(G|7JAm5K}MusdL@4P!GUF_|EN3<%D$OuCKXOtnJvzE z%H8}v+8nageo{Egi5OL_sSulZGxb&N8MFMjwvVhE>QsSdo)_OsfTL%$^WWbHGUPgt zFuqnzGexvxIjvU`WxB0&noc#;U8{Y<@~+B#4dbJgN|4KZ$4IL$Xaat)>AjmIU>SI0 zMl-NqK8g;pG4V#B>pR^CJkXqB31^dO$48LjKKveM|2`^_O;{#*cMXRWas|kZ-@E8^ zeO1(KJ4na`F)6})z4n>a!)GOp-!9@c=DHE7GprptrBtJ~-?5nP?w$1Z8^=KQXN99* zmJakd#34l)W(rL!#%YR&hEXBJJ_z~1Y{n4;@*Dj#{_Qb zU!7?H9y&iUB-zBlLTZ4%-+TY>>LQg`Ow{-9;?pUPI*r=8MzCu4`3B(DZ~FNe4BUzS zInXA=1d`S7sr9AX9WL;}pdg;3LyNX|1NX`#SY2aKt0xwbF5ZT%uq}taP~gJwCUa zZJAkqeP1iGB^|8k_U7eS>gDFPpZAaJ zjL-$OHiv7<*cFa^_V&2b;AE*b({gJyhMSQ9{Vrd~?;`K_Nk+lLcI@!1y4o2zP<`$U zNwXYGTn%+w>dG-H&h-mLD5jLzvRUv?vN^PznZ%UB^&E<~t#B~HzG{5pd=Cn@^H+WO zQ{bNa_gnnd-S;p8sX~bv3qVV?>Si$h11HdwHRl5FO5Lax!AGb*$u1usB4JZwBGJy% zdi}jk5{NQ&vwAKAt@RS!3q4az6F{&=R4L(Fu|Hv?`54HVieK#~MJ4gmf%0ig&4Y#vCeI1odYr#3e0^I-(wPA9kz_9UNPnAzA(Ox2n!qk?*-{?8CAf1v5(%lV*5RmR}M7pH=P|_eGU55sxrCT~g zKvFuSyWvoGEZ62sPt=|C1B6 zYsjGW_4R`95rc*8J@@9hC!;vRSgzDjIKV-2fs*nFf{(foATvjENP(GtDLVpxE27QQZ#}tuC#k-s){^y!wjjd`wkP?%b;4z#UcdwcnShK0#3wh z8qgb7TzPfSHWKvL!95Qkk?Z=qiHrGBHH!qA{(-Owf6n7`^{Id)sjU1uP>wueTaM59 zalv(FxVRa{rnql5l)J`>&`> zZKv4&nIFthiupJSVHn_KQq7maG{N}f>Q&hkiTz%at?)UykdO3Vy=q18k~IAhzDGd1 z(7D<62lt9pM_72x=d9g+Tj)pvce>G*!mF?w(Ru4urE_lahKzP`x`6fPjdx_@;~1eo z%&?S;V|QYJSJK3qpZr&aCgMldEbF4gG!Z{b6R)#Cg^^?B*vepJJ8|L$&c$9{vcZy} zW!(-tV=D55!#q+Lri;6J4PvDiBSLTYU0;b68+1nq@S`j8o0XVFvtHH_RGr0njvkte+5czqt zx?nA!4l7$?YI5l)%F{k}X*xivY?>ek(yTE6{>pJw15Qw#7X3meRlqg4=KJ?t zn~5UTHXnBrjF3*phHA$`5rELZY$T$l)Ed@`LEpzAa`@ArL5IH_`DN&F>T`;auhN9orcRX&K2s%9Wc$a<22s=}P2 z(0GUNSE$3S=Z`^#MO_wA8m`lwwF#wfivGBcaW@v?+{N%DJE^6}{34ZTvOQe>lFw|l zz3?O!HH0{_{E1tVhi*jhcB0NQ=50)!`VMY+oF@IAtKTjaRfuXGXBtYuACZaPI6G!veKmv{PR#wRBMhbnG68|C= zpteFOE#06&>OH3rTCi123ncvgTm8wlvcmgc5)Y%I&i;%!>qYQ_P+gj`uMF?WL#MaJ zD;le4oQ9NY{F^5@%yq$ZdtuidqYMmRQ0iX&>O(BGmDV?x+j7RQi1#c8TWI6H=KE;4 zj8!HkfWp3XT>k>6>!XwV5LLhHrq#j=$x;k@wDf#Hlh%`gQ>dtr&cwSmqcF^w{0^*1 z!`ux)Wfg}jz;!Dy6=pzL09jbQwDXW3{*tvYQ;w=U?p&`*jGj7ImMT)QG*1B}{ViO! zm!;%#tNW? zRtbvdM1BCY8lOIYM)3Cb9;-hXIo`~uB+w>0ED}BO=@n~Kl zeNw?9lLtdipVYa>uXH@Z4K46#(IKOy`ecAMh4AVH6k!&u`i%Vm#IU$QQQQ>p`K0by z8Gtv2&G?F7(W_*KTD7MlQdQ{DxdogghXW#(K&a2h%X63=JDc!$(5Zu?&~lK^eYpQ;T-^mJ(Q%3aV%l`p(CnNFwf4l zd+PL%Cb&uGYHA>;hNj)Kb#StQ${nE%`}ZwxP66*~PoJn4XWelj{1bN$6IAd zY{r+J&E?e5UJfESlW2_Dkn)!&WPiblpgE1YZuTdo;&BLo8R=v{c?7t>-X9f?kT`ap ze;8|a{Eje+;ICaa8Y#CuE=$cmDS~ibi*H^RJ5fD3YCW%}av*}7EPiyYmH;cO&UT12 zw+(z5lZ@{l*X=kuKD5ne2pq1<%=SW!Q=_A9Kp7N}9PO~h43t5--%SDJ0qI+VyyHS; zX$hg9p`-8*yF(sX6qs@nx|neSDh!yh6%MF{q~a-!XeMPrLhpZSn7^Zso9nSuS@zyS z^twAadEbiH=i|%X%0bE2^jDJG{$e-hxij3e#IPxkM6}s*m=7^Cj+;ooZ89eRLl;Q% zO}Xhh5fLZ~qTZ#cIfe=9O~?hXZ0opx6us+==MEuK3e~wUR{PRj;O>xx_x zA`&RP(0aX`VzXy>j zWH}!Jh6h)yLoj(MBlfeCVHnag*n|k?u}p6f*L99R`As(m)U4WVa|#Ed`f}5(_dEk- z)!8Np5}MG1;Fr7cX2-$BHts!wz>7=(8hrU2o z&qq3j9bM;wZuLyGHgEXziKJ#|5R2!?|{lmRuXPXCkM1hsM@_k2QP|U zWQ7?fy9BhW-n- zI>DVroI%X`zAM$Gm#_IhvH&zpo@ZFGa6>ijcV6w;g(*L+KxmQ5xd^ZVb3|pMQh6Kl z_2&ULxc|0I*{C^nJ8ZO?iUtzMo%~n8R&lvM-^VsdI_hht_7f_cw4uZxL4B_;`NGv) z7QLjFLKa2vvlXz5L=u0y0_VRsTWK*$(}5RLad97@)&9?qtEZ>RQXQgh1em3KA*3AL zDiw!?5-E=ZwC9LyKp;7AJIzBt^yGCc3J_$MltF@dHnhHgx1L0x$%2{}QaxhxW%jhb zVFIlK!iX&&&YtHJMUForQSjy-XuRCTV6}& z1XrF@X}cFTFXiJO7Sa)G!Nz8$iRRuWYw$Yn9QBr|~i&fVDbPunr#?P?qBRlJ^S(IMF{F6@U|I zPa60Fl>F3$f*pVpX-*gz06(TCT!AOilu7OoGqCENgh{bF=B&SUWeJ9IWDyyn9&_W8 zF8$d+^4GH-2;P}< z6e`Tt_Jas2Z&cE*Yisz3BM~9MZ^|RN^NtcMpEHw(<$i|*yBKv~$WFuSRgh*$e2E|m zhqa$S879j8jq`eoO~(QC<51D5MR@Y}l7=L&W*?Uvi6CMoxRq4l!L<&0y?rt8f&G6q zV&Fr++V4sPB3ba3iO;$SZ6z<)jF7JR+mSXK7cBxEmOUy2U`6rf`0QWV5Bo8~q5*Ox z+#0ERS}IuV`J?q>%Ghmec)8k3O5i9t4ME7)EyT?NTYuNbG&4RbP6Cyb>9q8fo6av~ znKL3bm3}4^Qd?oUr=9S<$pY%ku=j5$cCVj7qI2|&8#C+q*>k|PZ~SfiiSP%b#rm!L z_pH(`>t(ev!z487lrXeHeb^&{cHm z#|`t+1%44rze>N90_#MFJbpMNgjV5W!)dY{1715mJV*pRU*v3S<;1&O8NVw%2TUWV zHoZfTJ((MgKQ2RFGQWt-R*!V9pnsMFbWIcK(oh|zC-1xV{#vdq0|#}b$d=^wTFQ~M zo*30RwMhFp?e&L=c=I_i$#;f`M0MSmvBr0{5wR{x-dUPGODLG45TMG6DJ9 zKr_S2cxmGjMxOl9=ow2z6yMqdpHk#@ReD5F(kSx*{+JV&A$$y2vbn3ddKTB(e``i{ zb$Sz_U7tspE!UwvS%;DLz~n$5aq3K24BYXV(~=F`hcj+xa__tw52SZNaW|XfDKz^!weG1gU zBmL5dNMbjG&pF{gH~Ci3W&mJ({!sg+89+*?<%?hk@+kd%G|gy70r~%7iJl~_yA?Sz zAUXZzb(Ssime((pbJxh4`=GL@g9Iw?{`o2+ZmGzDew=g_18^vBGXbc4g_2r(Hw@x* z1oZX)XO6)CE|(&d0Wry{k0(eq+Kio7Zf0oo<%7p6M&>%_zonpuzh$6rqd!=y(*QM} zG_cHiUPC#m0Xn|P<#$*eS$Ft7U6eC`6B@E{^BFT`sH04iYHVFm2r{hcitqCw=ecyD zz*GWG(G~C39<38rqC4829~3<*#WkKE zTf|C`Am#Z|<$U-}%74SsgHOE1*nciIZ^K$DdiF)_bRsl7cEU^}o{ z5OKnj{{4W=ow8c<($Rjn)?;~6EFM&+QDXg(_ID+rQg3iM?>}ApV61=slUfx3bDqTd zfMpFRhy74sKWxg=@ex(EdJ*DAK|H2E2y2*`0GfW{_k~FssAd&FfbRhWxKx?g5D_Eb zPBW7x_0Dlm--RtV^UL>|YFA(2T3-7w*jx#lA>9A9f$YdAx}l40KITZUN1i+R zUEx0W_Y4+D@uq$$ZGF4!6mubw@-UG}#7_N!s`e*WOh7m$gzDP4M)0$C^4_hP6A4`0 zPT0X@n(Wo668Fa+D#BcrTxY%>@%^iFC@F+OAX{XU_hEtJxlmSXRZG@rsu{qL!qQ+e zr}o$w^aW2^KL_yz8?UE@K~Y_R!3^D{0-0U=ss2Vk))fB|6-I&ogGVJO1mS89IYGVf z07;GV*Lp4BSeVe?_UOA$cchGD!EVTs6Lo7g2YpTCrylMCq*&Mo6(msGExhlLK{UvY zrkfsaa~urm*)-KJ0_c$2wNi1`dY_Or2KKX!p0E>%KtGyHYhnLSjf5Cb`3MQwKzUzb zN6IYxy=BwDlHn-K`V z0Lkmp7ggntgKBABvCfhRV?Z~Gx`6bS@>EtR?70Vs;K$BeL`k5eCDU#5ZNB8qZIg#@{}yd6w!jGl)R3q`-!J$b-7JE{e79?I2H=S;Ws+ntwKAiRzD-N7*Kdv@_#1PjIxH+ns1@)UFq24_Y?$RL2HK;Ep`2RsRzj`D%XiT`At z{O@NKEx;On>On^@XvQNMxFvwDq0wi?vE>#jhAd+@95@Lxozxv#P|LhgmxF*Ax|)Jm zTTnK+r7eX1u-lR+O=q zJmhS+etzFRI;1e~^p#Nt@+nHVYCXtw^LU(QjVy>PSUV|1QeH8fOo?@yn-T}AeDk5A z=`XZ4l1AI9eMRMXYLm+Be65sg_D;s6)?rf8P-Fh-9Pt8NrbQ#t%Bn3#Gxx6*d7Rt= zdR*6f*rtrdDm``>r2B8rU(`nD#F+ipI->q(9fS5q3y6$urOw{mN{H(Kiz(CknAzmL zIJ`*7^)TyJ_$9Q%Ull45gq`PO4SdL6sjL|vU_dz(B=_nKpuu>(O7VW!p+8skm9zzj zG{dy1XYA7v&kGn&(uu?V>-R^|QJh9&7p3<$r5ZyLglXd);YW%9Wbd zTYlodN~a*{b@ow}gVsNYxWgL#|NPnNUZ2^>v4E1dMP}T+MCx7db{}3%8?}l))1?C5 z4k=8b15PgX=UxhI%HXh4ap5(PwPz`S4Euxn;f-??XK&qyKj>WOs*so;IA@&Y<1 z{z}s|2KU^x5GX5wqz&wQbW7Ehl$xbkEgRvbzXRHt059V5WWxg#E5@!#m9S1S0-*De#wp(GrGiVe5JI)PW0kzx7p|pxSc?Ja`m2cgryQ{=3uGAH zn0#l%#4zq|?4x~hM10jzCX>~(?WzfABG8TyXq4d2n$OjK`LNbvKlr=quBFnfH|V)5 zlBV>JSPXL9x`mQ>K#M1|Zh!4J7ERrZ6kKEY8%5HvNsR8}cvcOgS9cxQ1*v>U;#Fa2 za|u;4EfMJ8cA5l2u^qHyFy(*(oEqI?yW)emAa}$MHiFxN(-yav2h=4$sCLsV^Igod zG(eUY7)rA1W?o~ksN~*CSN;BXsIada#evpUVyHEX@{3vy!)_kSny#%>|6{$fQ13AS zduvYp?9v9+>yu>~er`OVI|<}98}jk)C&y~8OdSn$YuvQHz99sA>yBRz-Y zGv>b)KvY#CH`9p8d-4@#V2+0ZTAh(13 zr&nMUV~t~`18Uu##nh9|Ittvo17SaiAx%t=#f6G^9}>)jgbZ=yZnF&mL@*KHdc;y~ zC(0Tg8Zm7I;}oO(!2LxU{1)I+#t3I9>neJqRIiK9D%o)2ETfPiCl8VqUmpwmz65&L zrd{f|eyN-!J$l5N{*Ids!L%YoPE*_ zsl;Tjcu3sMG)rq3s2&~?f!nfO#<|5xuelOl*GdkBpQCx9BM5LL1Uu*Y%crnSnhrnzr<*V2Nm)2qcc zmn)kWGm8QMI1^!WH9g;JMJ`?fBb!S$TfN1r<3kfVg63Lt#-67mS1!Yn0f0^0hZ?$` zGkhFG^bF3K@g-L@zDz!V=jq1v&tcXYc|hZir9qy6=#(SFxT!gnP9>LM>67Ib?_npw zs7dsbJOBWVE>pl7P^%vIh=}JyAQkx#Cb+=(O2AZ9;~zEvZy$8pR+a z(LfkM1t27T)X$bldnxLW@cO(1Jaz|zqZTkMHJ&`nK&%&9BYT$1QgIepr4wgdSkD=& z0g7_jVK8p>a2-$+A%OnIEXbTKfX|7V{QfwpV@wmF%&3Pxl){A1d0?`n(&zLwj0qB> zW#X4x1gNfq{c1(k0p1rx&OHMv5wx6@t;g#G!a9hnH@52X0|Hw z)U(p>RZVH4P$j1Ih#ixYf0zSM3wG299RaY23=xFRnt;5D$dFUmQ8+3XnFQ!v|D|z& zG}WqLPc{X8m#rH@T2K@WIY_;dUq%)!^uN$B+;lJep!qKuvH-0G9YTk=BvLgs;USFu z&{;qSB*ji*q0XyoR*u9P9WJhU^z+zUXAN;>wZYGjVBUmL-BnlFoD&0yt2$Rm3!)S= zM0IhtNiLgLFHlSS((_I~fmAnXer&*vGFAqv^Xay~T&0vfGEs?XmtuZ!E2#}zCK!7= zPon&a)(BA(&@o3Ckb~j(_3lL<8(Z!@4QBEfA&;t5s`FmU=#UJRZLDoOk+OV;wp5h1 zS61BQV(C^D;3yi178oxqpw=;OtRrwGVkPJMDZhT+s#gX2T-lC$bCJI8ycG3D53wx@ z>=J$GlJ)4TSO-_u{8LX!05uQ18gTb)8R4TgV!0;At&T9_U^wnyZG1)ju$n-5zN|<} zxo22-&{NZW)e4j`>rd{ZzX2o6wD;A3C>}_os~Q4LVi4X}{{T%=8Hs)ep12uB`M#YE6Zp7fI{M^gznmCe?grE>Om0DgccgGmvqNP@?4 zrX&G6BO@s32H-!)C89~qZ{o?tNeTa>$N#BLP5`Cm5!oB}I3-ft;X>biajZ>Q8^fiz zks&#_Pa(Y0GnfbkX%foGQ*b!#AleCTb33&ar|0mtDbFa^I%^pF<-P~mR+1yCf||aB zY06#3o;H^Ra082bHhq!FNzDI9$q77&8|zvwd*$Yd^8t!MS)Ixa@dIKLY!&xLRCxmCxVOC$V1{6grf;mk29V41yGiq{I()Q1 zj%fZ)pMM!B74Jv^giW|iYK*jToHc?6Fz>1TncL)eT2(+e7iin1oe$`8Ba0NTSAn3* zqYz!%m3sUqEFLHhbslw_xIwXnaVS?n&$klF@P;7@a8bc=zzoV4GM^Z*r*L6h6ah13 zs)WXE?lEXxyu3R0aNLir+{T8ZjTg9$r`RLCXtJSUjaVe2W#U6uj*G|_KwQ*Ap)b-{ zPh;bu6($Wf`JV_o6|sw>G+d1pT0Gvd(wJ5 z-LlHV>VZyO054^(9-y@{9Z2hOE}~H(r4$KiYp540gCX>$XG`{+2t1@cG4HeIB9oK* zs^{L;Qfm7uSK@wgT22v<#m}uoT2h{v@6Er6DGD=c_Aa)#7Dh1&TS)ocK|0p~>N{czC-Py!c4p7&%t+qf%P&d$GoGImC1* z#dRb$Yr9BVHz4#&;~*X~qnq?MLg-MP%nr5$OCl1?i!I*iFbi2$g`7JDr_&iOH4dOb z*VH6G;Jt`2;2023auSNI?iyFG@;2eq{Hpy1YCrjVN22)Uyr_wF_@-r@!bR26*;zV# z^c&Zhl~_8F<8CPXByc|}buP?CCx3*iYvF$_t~;!C7;KHE1~~w@*XCTOF@owY|K!Zq zj>&~@PB%x`a~l52txO^K!q3q&OA3g>9fcwS?XNAb_{=&-xcgxcrQ4Sl-<^PbGgkJ) znaOi>A-;wz7@=o26{70Dv=*@SSQPVS|I-Z!%16b7;Cig6&CL!r1KmlS9VrHZ057$} z&&QKxTpBvsvH}jQnG?V5(fd%F7AnT;^*@fX-g-nf_<{Qan4fIPI-Lu_SLPH1D64Wm zTg%#1^NpLEazFF3A|zl>`+@zJ{eHHF(NI6t<@d`0s=A(`Ki>ygk49GzZ^+zM18#~B19w^-8w`4eb1yem7-#DPi3_yK zU;mU_1Zt3rXl=R|sQ01y{o4{3u+@N^37Z?^q#&`gao}`jX9m3?V-ow2UVM(A&?nN6q|M7#nsT1f*#3|uyV@7y&V5Oo=5XF<1)j+ri<^V?NO@zOE=Y>8H&M5f zGxGYhp8`c15vF}@jtn~>ZNT#$q>A`0fUE$h$|cD~7(j(~6>FmK5PaE<75!pMh2)R= zgrlAFoY8<>n4EE!Wxd*0K>_Ner2zyw2YrT>tD7oDfIu;-Tv78^y*4NS(z<{+#9=~prEth z?Sxr>T-Ncl0XGboU4;uS^#2{xY*Og+t^k$tQ=2W|KSs$U1AEug99} z5SZm*mW8e{RO4fR`T&b{s+zh}tZumlVY z@g~I2+&1EytU&}L>c=k~2KV0?6ELlcq9?cW%M`U+u!wU2d0EtY#TXSq0GJdZATV9M@3>GMN8Ag$Dhbz<2 z#Ox?eT1kE$|3V<0Y(}vU@ewEZU?e_7lAS%rwQ_hDiNGt%%B)^ic4T-fy54uHzWBEX z`u7TjTVRrSRCKzizdT3K{x65P_z8o1($xB19fp)@SdpxJ0B8C-N?9+$ zKadDiIVrNY-5yfw)>=O$vi~4IiBvrgLZu9m>62k@^YuifcQJo&?wSZLiDgJfdV=pD z3<*^6r^w;K{Dvv`#UDm*965j9z0W<4!3Q-0M`gVi69t-qjB$!@U`aI;$?`YZHaU04_dcou-3?ku)u{oEftsp>fa%PYCg zgj77SV!(=l=ASWb4cyN5;S5~wFBF(QV#FU7KoPY8gK12#=&?KO^BXBLV^b3*f!_kd`r%(=f_OAG+7C?hLg zCa^Q}_XzQ4$J-Gf)jVAn@u)672ZEJ)3mW;Dmv{W3SjlR7>TDnN<^i_Z!E&6sN)BOc ze?RMZg#X*h*KZY2qm~b*t6jZ)+rPkS6v43X|4Ew{4g6XX)9UNV=5{cLDh9WvJl}TF z`6-}~{F9jGjio;Q@^eU534wCvJ20#y&QCBG2GQcRu~@mQrrt>_)k1pN&DfXLWUv}$ z>4b<-H_v8|8v9GYpr?9RB^cuwA}6(@$T`5z$(`w6arW`t9BdMta~_~T9*Kbc^QK3D z>RNpT30fcUpe*3U6PV7^rK7j$-op1ofqCi4{)8i_)M&~Z=DYiG2Cuq6K?)x+U15gP$@>>ZeR_wX_mryog=+*QhzeyRp{JJS{+I!3?py}tj z-JpS@Y|1BAX!jFF5&9DHB~L`=UJZvHBgmevZO?tw`faw4c?Hpz7vy9WTS7Cj5vBwc z=6~c^4mvx{(edkZ#%I|vu~D%No+%;h^7fA7SojvQ8N!#fO~me0%G~#Tn%Q_x_lovk zgpA_#ox4}vZT?z3g7I$9&BRuOSaJ0b-I1CoqiSD@!NR@aB|VTTaB7dl>|g&*@Lk5I znSj7=ceB;N;cFoq{hb|(GPT>tMmGi8YxiGf<3yiS;dq_ei68eqs#`1p48q%N<8dl+#-G zQ0=f+dJ#__E-=)5HjUrh`%7+uo(USu?vqn_roiAV{iy~J1&!3nyF>}bMSaPq-`Q4S zQDFf^U7hqVh#tk@dmHH&KQr{xws=ZLS!Hz@2#f%Xj!&4|a4d>*BArNC6=bg0SKSvq zhm-urwD`j$>c@6rF#8DN>NW=dGX0?ijNC=a#3QQ{?FCg%c#^>bs?^dT74Lk#8U{&# zmI|$xESSNu@xr6B_~+?bWEo9%$H~1c$NJvYu_nX=1tuBQNcAtRQ?6FQ@6x3SiTnk{ z9xNb^nD}7e+Dkb{qczS`RouexhFl-aV6^#44Qe-nf2KqSf&;oZbEJ|<^x;Y$Rm`fx z+c0A!);E#{{OMsYac|!+mfz*Sntm}8TNL;*aWrJ{_6Y)L%9vdBiIrm3 zmGULCsTIhu{<{%}xXhe~wH#JuI47l>dcZT|s&kDxzYrm9qy68>b;Uwbty zE$g86GTLQZd_ft^fXq|V4%qGLCjckc?HSgZP0x6lwiL;XSp-@lJI1*sKV=Mrq7a+o z{-BS?Hxtf+Jy~}}OY!6oe5d-;2_3>-wn!qj&(BFMHdS(DU;!#tTMIhIkR?`l9FkVY zZ!OaBu{5%5mF}<+vmVCKm7XuCGmiYCgD~cj(?*x=8JY&9NYF8XdM){|s6@@-e{A%& zT?@W8XsO(gubs*8pLNv$F0`RUD$8jRHH*1+yzvDa{U-$oyTt7WBNIVD5|qtDnUx%q zK5^l{K_zGH$Np=_HTW`kdclSw6N*q=ALS#`JecM#owk3aDNFIKHY=|paO}cr8L7PS z>~8JC?wkUJYRK1uy1`CRWQoC2Ue00Usor_4MRIQ&wNfMh$bcpZ9l!F+NOo{#7dn z3(_lj3ky0Q0Adnk!e3vVY}<=-kR5XznVg6?1W4(Ot<6uk&!$03w0!nB_%gftpKL_ka?jsd@Z z@yOIZFn5J&skFZ3Egb#sC*%D3w;1B{Vgh!?X>9kFdcqOZvkQc^o_QSfGZ$AthnK^d zDZjLHXJA(~Jt~7`)up{!TEZ2vURYFBrAdZ;lYaO;05W6$;1?@? zUoF2f z-}wQ1`IP-&u^jp>uxw%d!tQCauC)>O4aFaoGIl!1DzU?F-?VRi({rZG0GGIy2geP9 zCSQRre37<4Vv?CIxBDaCB`)5?E=0#j>=8vDVkOrO%yp0#)$eS}}^kng>MtagbjmSsNEJ*V@uEazR-b^UH+kUG9d z@zZ{>zOf|f(-O57OHGEqnM#g%98A{YIGlgPPvOs=AVuM8Q(KbXzTLX8gPo@lnx5-a zraBPjOvvh=SJx@22t$+ad0aqIAK&XmvPQr!EU9oa5Np z_3aU?pU+qhWT1B3f(IhqEzc3Zk^bOp=3tQ8gCQ<7Fc(98%gg<;@7aL@$J~WGJO6_d zcxp&HRK8Hdq97(N2ZyuOh``9+;&&yfoCmjG)X`&Ug!K@4 z*x9ii8&~;K`Hy(zlvt_MGX5WO^V09*n{jl+s=8{;z zuWBkqJcjFf+N@kiYTl;MT7SEjo<)&~#a`TIAtd5xvxGvo#6$hq)mwb`jUjGD3JBRZ z%V&Jrpez2F)ah1GBSniWHb;Iq{zU7^qAc?CSOE=$6N_Roa^|$tT_t16p5eh(d0ls6 zV*HCndnU${$R`!1@nZ*#wajYj10mScuD`pYeOrng9F>7Q$gB6}-EerOd`#e<;y{Vm zi=Al5Vmg<}>nD%=79h?$d6;Co5@t%Qi#boPXMK?kq)A;jMVI`P4Hynk zGVX^L`9JFea;5OSFW=1!Cm#a{JC*!f_mf}JJU|-=z*Wo(KnVB zK%5vAHSM?;ng8I5y@5T~8HhiWJoHHO&A@k`tYPRTht0S{WJg((;LaUfKtfl1Z|9j1`=^tZ{KK@EhS<676@uNV8_THQsG+hKbKjsatd30=2fFCQh&lEcs_d6D@L(n`C+5p3_v?k3BwQ`V9 zu<-MKL8<~^o1>h|nJ0bk=9{_{ z=iUCw3d2yav@!x88){zAdr!}(RNXMVfCZ&nlWh@M3)8!;(>=)ts}?_0&lLrl!Y(dd zDOBOIRC<)@4bLZ5g((@va$^QnKYBs3#KJxHVr0MN12XsbA>C($u*Yog>etA(GRySL z#vOFaE!LQS(=T~qfC<#l`8Zc2n~t#3g?0?VkpyswMqzM#KZpk%H1cx+4H4yeSiD#| z{q+4pJ?k(D^4ucH&J7-qt)_{W?SFe$Qbm1*cpMjSGJDkAIiGZT!O;NI{$YYd)A+$6 z_;Fw*{);9ewVtf3-XplnO&dh&@P)R|-A5M)WGqNv-}kVTxV6-fml~hA1Wf&J;oipq%T_dbYAg6I-Hn}#UmtH$evEBQC2R`i5*&_SUw7SX|0Wg%f>l1I zlW$mGAZ|wHvosvdTS9v7)OImqg~m&js)6hXzxYw`!|GNpc5!D)#;L?sFq?{`l7(#m z{Ez#}Hzn~dldXIFnpi6o_ouik{V9a4Y5>)z1(LkpOKKt6QB@4)a8Y4H*W~nA*eS1B zs6+#PwkDeqWJg6{iIx4%HT$U6A}wdxu9RFeQ(COz4o=FTZN$KKd=wLxZ$_Nt;cpjhf?;Ol z2Q$t*-NqLD6E%#MF!?{|n{rNi?EoNl+W_>11J-dJJJ?40PR+_z)T4Dq5@iP{>jM73 zXuO*jMA!t^X`W^^tg>JF=l;q)_*R^nYk!DP;XbE{VZ#CqC`!)oz4e* z`Hx1Q+?{WoViZT0fu!{MbwlPos%ou=d*|%ooz&+L6}#Mdz5mT>;}-%V@O01Nw5Vx1 z8Gjldr260EgSei?2XWZ^7O?(vs?J@WDDeIrDv*Ot5ka8-1rU`S7=JjHJ^ponlaUG| z%e=FYG|4R7D_g5G#>_4}N+*U~)Cek3sN((47Qjg z%Vm1sd)1z;5ccDW+)(*W{+$sI^jxfDTX@~Ei=fbgF)hO3wf=#BsMk2yebIxy1(VoF z=^d$v0vPr;{?5tUUjpxQqxSjE(Y>35%6mc3q)gx1txDzI7%50SZdb>`d}+&u+2%yI zYVfpN9mE-1zYl_hKL|}{acvKo^7@kn{`pgh47#HYywO1oe3trr!|zd8h2E4=y~{8? z36-H9WK{MGa{fKLzReO0lQRqCQVaIH2upVN+&3)g*5eueGcP6$!~-$36#2~sjMTed z*zh?Ho9w$IWna~LFb|q9BxJ_LI!w6uA%uS`pK&pS$IWA2*2PJ=@LuX+F<7(Q2TFSN z@$KRjed%f24PgAlvzg)`Ts^<;QR%?M+vwQe$q5aoEUps7#3KInbvQNde zoJDn87e}e>KJ$Ynw&}$T(i&H}?jX%fNB_}45s<@sE_=#W1Qc4b`tBWC4b)yC0>Ook)jK2tei1Dz zgTX=|O|&u**VTF_km_B+a@M_?*H0A6yVj);m>wMpB!MJJnynVzeg~l@d~2Af!q+6c zeb{BGnFNCI1rYr?M$`Yg0Dpg_Z;9bKhjL4+ zM{AOD+syD*6H;$<$9Y4$b(nfR=o1NUR=*G}!foIW3lqo4Y4SeC!nxfZAYSy%iN%C2 z4HzSua}oM~!o0y&7V165VFax0M{^^B;XT-4UJ0&a(W6)_KnXZ|AhSImK zdaV8v#xmrBPXj1bPV4+wYl)z!?+Y4kC$bqgKNRvBw0H78KqQ)4lRuzl%HUCPf16P; zWEEHF9VcC288DWQH|6F3F<(DHdaCb0I*F|%{85vv64Pqla)DSk(0iFasb(-^xg)XT z!+m;I;;i3Pp0Tda#g#c0M|&PZ!IDSRjry*FfAJm9g<&2RG-{&!NDmcaV3I~qSOUEJ z>U=H#N$}Vl7-rG&!lXc4!&JrsT_l@eHb^Dfw*^Me=1XqqVgMp|HWEv0V5IzJbfyo} zlgwDFF_I#jB4wEaO!oJr{3L%ts{12B5UjtlCEVBQ4mEE5=k(8wAN9H6%jV@#OSkQ( z5Nh`sUC`w2u>vBH%5fobwKm(Z_=`U$RSqG$^+C*$jUC+D``M1LFdCa^@5ukstO`|b zX^v{o>W9!e&qNDu|H8q3sQ%~I+P|lR(>YH_KnERrO%j##4+26>d#v=Rs!2}UBJic? z)dw{BHDfhwoExXODlsLmWH^IX=`(N}di40YVAHpv{`;x5DGLP# zH2E{^T~KhE`6=TEBkdrh5@d+4lS`=ZeZrpl6hRe^*v%^Q=luQ8T(#bIpCTiPI3YT3 zTN;QV63S*4pmTn9f4>oot5A~Rx5O%n+M81T13{iS2Zco#7q>fzl_&j!!tXwo)`c9* ze4$*EGqC^Vf9msQoF++u;@7DO)?^h-!>~QH&@pe6SanvO3EyO2{55rdyiy`)tZ!@> z*K*3(zC)WgAqGnH&f2kbJ?1rUWL>$1&C53Gx_eXo#T4<|T8I9*$Hk?oLjwRwh=vQ4 zei>OVXWMw|)D*yanDLrGE!ro6-v3ckJSO%hzAQYk%&6Jzrnu&nXS;)FUC`I83|&$^ z;5guk=4N#=0k%f_bNM0|7Bu;tO0>|!1V}ZtJhFjg6iGIbV*gZeU;^b8uwaM}1fES+ zBDwW8Gf*pHwXtqvXy&%->pRTKbsWZCbyL(@H)dY51TniVEq|H!t*$Kcr#u~Czb2+s-gELMmEHiu0OoaH-PdN7cq;Wl=> zh5uH)*OC^Q5g#PvD`IdWB|PGHJB;nvV4#ADz~o{pF_ob$Lb`y@OSiMSaU2W8)uCkfBXN) zrkcdctXUFk0#$B;AWi4;zk9#pzcpv}s=lB0(V_el%2LPp+3V|SG@Q3(5Ol+X!+-)g z>8I=DIv{#R-1bV;JhnXF=De(>yk!jZN$ftsZh_pv$|dBgiN#Q)kM;wX&G}#LT}J;; zlZ$*BiW}|eUT1FPfsCXh8}D!H^!si0mJ6P16ju*)D>ft zT@__2OoCO1ccw3lfxV#P%_FBo0dna4tZku&3bCUH!>3f1n;dQ_@rR~&&+HV<`tfa9 zNnxen(O?sOXG_e_q0Dt-!P=CdFEp7%5HQH7$As;yrz9mBI$le2ULeu8*^gl0VV38| z6f;t}|F?e^T(^V&Df-sxVIcP0j_D58#?FcW0c!}R4^v;JhzGd0Q8xBem-Ncj1!bv5 zFE8K2EhNdt&^H<8gJxHRT8|7dqS(((x%j>4jJcKo1e@H+r75_cpeZwSZI z&sxY$L_VArHPE+GwF{yLI9^HFqCoYZ(I?Tv-j8nZ$s^awg@Rr!W#EBCcaq5PjA|?4 z(H#s@;7Fz%sXNT`hBmQ?g4Z(rWm=VZ;a^fnK>ntq(d5Le_jk9ItJXU5pL&=S5ZoFo zi|q`Xb&h2h7d`9B7vMUSBz(kUMp0N3wlPHB`K`@y_c6_J$q0}n$_-jfQ$<7w7#Iit zpqYZAKif`IUOAEAJHPx=G+<3Ch5iCMf($xqG%EbR*!rrdw*TPE;7|&*cyWqr3dOyp z6qn*oDHD7SfoeYR@8BispFi+V3{HnXL9;!ZU}_Oa`RHeg@dz;ru6 z|C(c@Dl0*k`q^uzj6_y|M~WIyk?f*OTZs=55RaiGkw(S=@k2VFBl0^QV3099<9qXK ziplBkCN*2;{1|UkOZfF5$(UlIbT(fcis&~zC#5j&0!h6*FB0Oq*_BO7Nbw8|13oz? zQUP3Xs(^99?HDu}lz;|rDX<7SgdIK;6^RO22(PsN27@EBQxl4Vc(-lr%Pm&%ew7mV z*4;l?rAthVukdX+aB)0nwv4fh0S3McRcTbb#_m!yAV`ip(h|C+LJqpR88ZQ-=n0?A znAk0fci+Sq9|rF0T)ts^PrL9CU*;;&@pOiLlUzk9*2>)!c_?K2@HRH#KKv4Xr@BNu zk%B=!?~VKyZEC4_>*xG;Wt%fxaB3ViTu8^U7LS^wDut_z(1sGZtM7|BXbK}g@^0Up zNhJp0=NC#mS~khVDDK7{y@{$4vALSnGx{F@HEx%9e2u_UHbzDL((HcqF)j?qLPx}p z;wxnkfYm=PC`6FQGhZM&hfj^6QWmXUBt}{s8z0*S&w^fm3dE>V-cD`oM40XW8DOpN z52m5e*P(cGR@HzsCjN%J(U$nya)lY2e=COJ!nl!d7zrNW8G9+Otz!#2mbbwB zzM6!RUf=BJs5U<{?brHG6ZqafS64a$y#FeykV>QZ9&^ zU4X&XPQ0m6e7CrS5>(sNi*sQ0}2_SpYd+qeJV@&a&a@ukDb9q^hl9p=447tYt9Tf2b zh52cY;cn&$nr+q0FT!nA;oNSr8xwwW4(sDNhiL(Dj(`_7N>q z-W+n(;kZszswAXb3TfNok_ezx{yR zT?g+}1DoSuv^#Y$9zhK|`hJu$GDAqZ75MpvCk!i=dO+STu(I=@|JpEt7iiAux0ISh z1U~TjBdJK??ugNd(=sXN45j7b$CtVb?t>eFEidVkECEubqr2ksvw6uuExwM8aH@!P zg~%jqMitGEYM80@h+pGSKVuErf%->%eSHcut3`h<080@@>59xw=Bie~tS+C;Rs9u# z-;z_hc3ZX8-r^K&ptIexTvLC_UE%Idv;-0k^@6SjhXfLs>O0k5yB+|n;a>V0Karf}*U@jb8WDiL?hk!&2QMVEq?Zi@ zWX=zS98nnPp8e35ZR5f=-)y)>UEJVH)OVF0Qb z^vJOjO6WUQQB~&;5bzzLescN>bWHGw9CqmbH5W73uW-*s{laz<8xJK{>Fvsmh)Tfy zRSNpwKUonm_dWxvJ{+E6dC7bOImZF^8I&>Zt4|2&)0q0~NI;H|fAb=XY-x%^r*8F6 z6g4RsMhXkgPIIfP+<1R`@!cjt%YwevX4^)#C0*dNS2oXA*jpNq?7nHC7=|@!Y&LZL z$v#kTfg}z}s4mp|mCBuxyCr?|wli`t@uUsc5-|3d)AnwPtn1F89bdQM!pL59-jCPG z=}(`cmA!9lM&7V1jx~OOhrT~U=#J`?;(FqS7u;gt_N1yk2()(bO5W{CC$%|`i2gZ_W!ssAl%E936%AtMKS~R zeBgjE7K51IgM>@5T~AxBqeh)NnCJWUaIX;2KPKtH$L4PkRM7rtJSnc`VsUWhR4A0j zPDh+mNAi>E_OFvB)kHt8Y^P3b-1K}?FH^y`E#qO042BAf!m(7+S@H9bGQ6wqgH1l> zwQQ@`<O2Ca;OC zXCcVu^DkufBFHI^E<$Fn;TC2BT`Va|)LbuSQTMf9v8lRzedY|KKNk9J$&9oY2DA&E z0efq&M;~MY*0B-U91hu3yCis`Jjq$swYhR=G}}ObOD?H7ujl<6lc1a1Fx!{8>jNrB zngSv_i%!?q_9-MSSkxKukO&mHK5PUKA*6V zsdwW>PIUgeTf`ENf3GCo?o&0p++7i$Ltc$0BrRD~D7JaNTjH1EMauTGhx^uP?xgZP z5g@L1BcD5R3iY&-cbnR5sP*gQDH)(1_K1;0%;Bp0Z@CjDGr)6iYtk~ie82F=9*v-$ z<~~5QX<*qr5K=+rRU~Bw!I$rT)Lv#E<7i9^vY$Wn)i<92jG?vIZ{lBo(D~}VItjnx z!|T;Fd0h_Ct@%TE45>80+Q;ot=IJBG2`jIkJ}>oG&cMv4zG*Kk`U9dI5cWbIWSO<| ziC^@rsSM8mT(LAP=Whzpw<4d z-vYlYOq}L7pd;H4!3?GOF^qz60~q;5V);U}HNq&z{b+PDM^HurOksoaSC!oYtqLr; zpuVj!#0&f{2DRa8R|VRBT3ia{}*tbO1=h6^H}pF4@5IMS4WU>pIha)(`~S#k^%Y> zqV$z0E|f~ZZD*gj;N)%09s-iyjdwlV!&O=29GM6dYXJM+FSu^4($m!xdcujAFU%zK z^ATs&Xe=3gLw3R)Ei7LSTUX`ELNM6d&e%1 z%Ci_>wR2TZ6E9dWrBEbjm+Cym6tQszCxgNsd>k)7<f;WGN1r>|2}a>bFC4m$@Yp^0+xX_ z#V{yG_MvaTW>rpvrhSHcrfkDQfMac(PXo#(vhcYRI>oEl+9=6p$jRJeQ+)qxiA=)9 z#gAF_K4~bYmBV**uN#qqf0!{A^Fhl%$Bt6;GW%j#^y%9>$jECLu>hL=S?;ar$_jX6>o#|?E`GHFkZEdF~su(k~40_27vu-|_zyj<>_Wi3@84sv_U+j4dzh?>Cw2oSJ zKWYTm3s2fiszuz-Eew6W8&1#>RcX|`o2unbJe;+oEx$Wj&B8iEbh5#sYz7CTu~4N9 zq$Q1&jfp>-0l=77f*RiP-_A90&4MLM3RZpGOpw)H0mR#?38Hs3su{qeNyLJDUe6${ zJ%9++jeG9MvGK9?aLC|$G4qzt*1G7^2H!U;udE&Jh4`P-Jm8*hO`6zfryxASpxKOq zxf!l(HG_TH=<`b7aqe*RH|i#UxLq-S=`U1rwRsYdOWZJNGAeJ(iesTcAxtTZsw<#s0JX^Jy}t#>CEbpZ2RWxh>)T6`qJuj9Ve-WAv3#YrN!K~%H`w2Txa?nMN)A^ zG)cKKk95Bd&XcIxn1_bXuOm8@4mM@yp{7lcko6-+CZI@>Q{r(xke#2!9 z1u7JX5-!>RB|WG0WNc@{=r)HPvY}=>TZetp4ob}HUd*;2z+TAH?U~yXIWHzauS>o9 zRB}(B$jH$yh{JA~4qn)PKGvrT%GpGnJT`i~c$N?nPmMChBy-;JzWz74({|O?m zv0Hw0ioJMM51*QB0R|vu9*+ejMi#fm^6SUl;g}d*xR%-hqQbBdz=*8Bgg_!gO1Nnm z#s{#TG%WdzR5v1v%-O8-UJ;JwztN(4SBUa21?&|mt=#+Mkwu~@@=CjmIktB4bHDuI z3EaWqWXp?}+Bn`U>)QEU;7F5!7E#NVVlMFk2mW4wc78s6$6Dkm=yL=q2fXd@XNJ&y zznBy{AAG~miI0TU^nst_gZYpusN@o~Xt2_j@{<8`;#^K`fu}fGto|{8bKbT{(k;L1 z0IkS>f^-~YU|+Me@Bym6%_L|*P+cywObBlq6~b#MiK%F})4aLZ((+%oD&=_x>@--& zfzM7%e0$K{)}ai)tzu$j$^B=q-w%qnAvhW;oe2MPAPTND9+FT!(|o8Gd)_G+T~dPY zVi!*Q0F)uOx%`<&%-t0oXqZiY8=9O5wXfKaQ#7LnC~qnpR+l65V*wSf22vk;uQ$1Y z|NIGgKGcR4fQipxL<;Pgd@;b?#+O$Aoz~Qbr*!mv9`uZz%PIXr#d1^T!$NKuL#P10 zyxx5MnsIcBfYCOBy+bHU{R5pxb`5O?foOR3#LpjSx5%32in^yUQm92$D>rZbOuV|Ny;fxeBG#|%qT|hu3jI?IR9$m?4aUBg0Z^sxI?jVzm$Rg4 z<1~b5R-I<9*F{wqYXN&A#_ZR>s8u*n<8`rfcckd3?rWee>m@${YJii5`=f+H9{&%u zx0%PGj3Vl0H*QsPovac|ahAwPKevbZ!>~?T!j!;$$uSro%ocph4YxiouilKjj~D85 z7}tE1!K&1f=?dl|^Ohguq|>>CzdyDDr;S$)FV{%d%A=4+*E6lfTn>DUaL>eq;4d9+ z(3RR11!K?{WTOf=85#ph7%x?*eUp54zRyZKbHz-JV8bwU$+>({c-Lb9{wR9g8sW58 zg4zKbBA!*NPg`B1>JeY#q8_x4lDO|mrzfv%6-4X7cUqJw@RYj#G6idx8z+Hs1H;Au zTI!B+41CdUKK3)m@0Z!D!BA3#kH*2Egi7FOs;2CD49djZXDLfG{xw*ldZKJAbP=3#P6;_6}8rJ=uEr zk&1KaS^=;3Yo-?P*<_F!fB#iGF}6*SLtbBy^`CNl{H_f`7vL86+@<4h>BS81!BWC^ z?33QI^ln(f97a4;7 z$Vz%Tp$?+$X|RAo$)$=~5ntQ_%Ps{CUqQF8Fs2D0l_j~c^uzVz77LGVPAQC*8|P5i zF^L4cR03sx{HD#dB2r3wh^)D9G&QA3hl7N8GP)Do`Kx}DMODO1|Ev+7vgu9rFnm$* z)O$Z|nb}Zx;{LC6J5G&*t6W674eD2xQ@=kyO&e@g@KV~(@1Uz(kz3*79}ef^%&q+) z%04o4mht5{QJhc7mJ)xI^`LMZTNvt}d2|+Q^sn1NOFD03dPIstT-28}cDxM1%O0T` ze{j$cpku@Gj-Hv;-@Mgb9H0^b`=mU9xjYiZVzXKG{%BiCzw-tI)8_32hAq7wK$N^~ z^YtS6f;-wvk@`20KaewsVg`AXNqH5DdbXjlVUe^u-@P>Vk&@3#OepH12w>sGi#$z2 zBwDvb1NB$4TBeY}6jm}Oak zM&~rbI%%WvI2qkP4!K8+X4H3Y`- zh8?|q>Ju0o@*21JF-Le;`Lt-cN(*UwZtb2oTk4F69Tc3?6if*Fa}&cOMhVla=)S+C zv$X5k7Z&Ej<E?F|7~74p`9U33+aOC~lCM9GUYK3pp*ydZDjxoe0Jzi-%I9Hnp- z^oI`taz!yYa6&cD<9F?ec8@q(?D_VlWfjup!LrikVqMJ?tWY;?#UBNb|r{^Xp12Y$JT-8h;C*YmbT|KViQ zY}e!hheZ?pP9tvLqip7NO?#>uGyF@NIN%;OYmJ$tU++({>t5kZZqNC~oH;c?f##jD z!l;_Nz@MR<305kxB+!PrMQ^EeyVWs6P~>E6vBbi4Z1N5ghC+{vi&oE9?!n;3&_&lB zzec3ENq(=JRhwsGu^D;RYj2mhsYhXC3nS;-nB+0#?3RcyUg%dzHc^!H*LkDf828z3 z(1{^hbhMFr_6c!`HH*BYov(c0?B|s&v;~WXM`19?OiC2}F>&WX9{eUe*BF?**4a=@ zH)7%|rPZ_jtXPZ>(YLR!yg^2A%V5C=8)O8dCD&6#AgNPXaP^D8@L=W_b0Eh!pP7Vi zEC!5i_jjZ3(?;@b;s2Y(>w$oRPCA+HQhk+Ffb<<>93aP8Mp8egvBARG))z#zU@~p< z%n1HKZ5uQABiJKwQ=Sxena9Lm-Bqy#!>ztNgH&=`{RN<*s}PSC&0%6Fl{paf=WK*$ zIjYTn6fCY5a!LF7%(3t`4cVE7EC=JPh2yslwI_14fp@$66cIA~sD8R?xdm~&0~4L> zNOhgyj0f~7=wMj&^Lbh-ryB3sn#lw}ZJRcro+Qw}Od3S^nim#N~+%ME}KhqAggo$Lf#5sZhRjrTJJ3$mto3(fR_oZ5;Fs-h_@3zfuxM|C-1f za2b?+dAc3=$+%Qj42Fxkh`B$@oa1sYbpg%$zQhk)B3e0Fy?ERB@u9CrR8mf<(tm(v zeGy7>pXCtnL=Pwwa&Ddt5Le5vMCv>yTlMp=!}TLwa+-Kb>v(c0Zvtu(i?qCA$pA%} z;50`Zd=RWU+%#sdxQMp3DaK(i!vx%EIrOfgYE;1?x!vf8<@Dz7>Y8?HSf0${&N&Wa zsK_W!z)lgpyCKAKNfmasbq9U?X{4sVuL?#j8h^N59Q74neLufflVMQvEanm)gq0|iuVCvLv}S=AZ1a`W%6e8RR({RX2&zW z>i+-&ZKUfia=PSIzBqCiJSuSbH}9CP!(`g_?0xvER>&5L^m9bG;O=(|T!b}^$`q>9 z{w)7ZNfEKNSjd5<99J!P_c0$vWkV6*mBc7jx-QOhwC{qXf zjUuD-ez1^L21)Y6s4EUObFh&YlheshM|ykwbwuTGAP`G4GLNyYtu(dWH*@e$1yi5c zjZ>|2xe9-s+HNYkRQ*Uvy49lC;Ill_ub*@`?jyj37}vpZ>^J)qfRIGYY!H!+XlXUB zDMInv80tIHse$+>Mour|e%jHAKEAp6%|le?nnt9V$LTxpn!NgIb^N<~{L6-GK(65n zx8@BN0rKiSHKq|JPv8_lc6Or1O`XZ;4i+qINUH+{!2lK_>ZCr0i- zh!T|^>)$!vcxu(l`nRPoXeX39VVF&Sa#}cr_OZoi%gClEaqnx^K48s70VUNw9pAd$ zIAXakL;ptO0L#!--nd#gFSY#r{@mnSZhUMSz&5q)AVHG}2-H)d!y7I^Wj)wg09m^pH)XH(v2HvxRk6tOPtJtJO8li*CKafPx>|8O64o4 zvEO3|IAQe=vZDZWi+eJ7bZI7{L#3P(^Yy%Su|JgvuIc;L*pLzh9^LjFu}sqp)D&fb zwt@CN{)_M6`>)95z12=$lhMyk8mc^6*c0RubIo}hv%2LD*`!Kn0CNp24bZdgCd2Zi z3cs7PnWx$LYeJ0z+F=kK3n0-$RAtI0vX0L-DVz695^ssj#1uOEp+-otfO{y37ZUpi@ z{#7rPCcFkWNgh^=@paX)i7}wBMpAXo!7sIIj867Z2Swu4eOKOb5z^Y*l z_K0A4z2~=W4j^nGei0PL^F6dtwD}gecf%HP~>_n11Bs}7}VbW9bb2)lmXJA zAW8UgG{%bqh4jn}FP8zfmP2_FvUoD`{<}sxpKa!`XW&obe#_ZMQ9Hul|5*!HmY-}3 zK&RZ^56jzHDxL$@2NPjt$L_x6$E($kZ1#^Mugh!eXw`%v?>eitgd_h|9T-yygkn$2OHvwSlyoCs-KzWBOai=rW?2!7K#sED0C$Uu1C8km*9tnde=_Gcx&aubSOFt zN0F+i*yXc3wwbD#fa1iITghte{`eBH=uU9_g3^WUpT3i_s=0E+RE{9@Bxi+v5wtlJ>Ovn$yqA%S~$y3zH9V_7N@Lvy@bTuC&?6fRkux~c9CCn}l)6 z_4_ssKW$#^iBeC%<^<)k&jX7VHAQr33 ziksTc27HcdtxmAlDwCQl=v|S1Pz;=?1e%VwI3pGxXB|9#`h1%jqIvf;K`%1|zN)7_ z5S)O*#Ksm)>IDRF&~$w!IS4T2u{e`Fyl$5d4;SE24OD;;KA=I0$?bS|H9(ovSzvW4 z&;ChB7*0llQAIriL^&jJXfw_84&+ut3Ejhow|lJ<(g#Q6TsI{z9a#>VjY@7?=2T^g zg2qU2=r{tz*7?Om9?mEUW7D8-!x;y08RX@ze6&N$ljJ@t+`a_rz1Rb;@!<1dQt7k~~O}8f#iv^h82; z&(a7^1-;B~=Vc0N{+Ml7O|5o+v*_)7R#iKE_mtO-B#{E~`68j}bi2Bff&V(N@=sCE z%wUwOJ|OWrsL(UH`+2*=aIEH^r8z8O4} z`+s7c8*EH=>4QfOQRW_{1j4(m_3^4ZT<1u@__skmk%-gZvu_XLhvM95qRD8`5s77` z0dk$=)Y_tX^;y;j0RXn2!6_njA8eTx~*V;-`R>`UPG&O3L$z@JGFD z_3ZS7*d4uJS@d_%^|JjFKBO*bR|$%&G1FT}AMUU4FpxA)>Dv_nu;Vv$SRo6Cp9Rv3 zkMtk(Mr&W4sUpLgCP@914N)W9`^4X*CZw;Vqp#*JHBP6+7Bv@ulv6Nf%amI;8oa6j zX-i%U;h)3;{}|=soPdt!hIBi2v)1PMJ{D6LFN(2pZBMWV{P$ueqsw%&>GE#wVGO8m zx)jSV)ao~_GOeT*pC!CBP+9w*u8dv)`eK+B7r>1c_LDC=$EHgP=8>KmNx8&1hS{Gf zn?D{Zp;}{EBxtF(A4C%SKIstrl^@ikqXX4WuseA|s`1F+6bI|zDgtMOVsXB>KswTp zgi}6Ij#J9KHHT)1k6fTakSe}IMijg_xNR~zoNV~VsAk$pCIYWHjzIAwT9C#`M?9c0 zGa_5N=q_%7VhUq;NmL}?I|206TeG3m+Qta!tBk@dM8N*^dw`+q8DUkC ztEv9|_42jt2Jm>J`O09w74A`Yo00Ns0+Sk$9vUjX3)tnV>ZKg72LnX8s3>ty=e*H! z+&i`lKi$_E&a;D|o#LxEaH_igYE$6?2N#Xo;i#ltg9*Q@yA=CucX*IFEBN)F*FL2) z*rZ&S1^%suMm31HrQ3+j(0ALKGHbx9JnmY0bfY(g8+{t{`*$NhZoa4E-Oj$|DWri%WJoEMOO zjNA{n4+<<4`4(8jcp=W^*J)PBBgPd1aD|+rq$DiNO_R37H1(aqogg|GTcpwr!}^1h$V!WJlzh% z=T4f?5gIK=@9}bL66}!1V-X4#Ivi2bI?ln)$NegU28SHun{X3!svKM1}aRz zW$g2!gj`&Y?eatvWeS?O%UXjeDl83#zTJitp90T!Xda? z=O^c;+z3-61*B_Vr}OEbe!48#yz!*F9Nc{%J4?ryFtt4*KL{|Ch^MJ{a@sl#|5DW? zjVMwwS4d~N!Kl{C^KR^@r=#Z0H`rKH$axX)7+>c43(0JIS`1R^5C^_{qMSN zn5GynPqSw_s#Eg*9m$w#GX=NDvU7;%f42$Y$T>VA`v;d6Pg^-8mCmR5UjGipF)sgr zbi0Ij2Z>+dOydEDlE|-Y>l!mA@%&GM3#%4<2lx}t>n`_B`U4(h_GWc6El}is)VR$h zrxm5_E<*+H$B?rH1P26RJ|0!F?(<$Ep$J%R2bl7wRr^SJldX>1f6oxZVh7^t}923+Q-;)cKN5W8sjA@#nq#E!V)Ud<1t zdgXG-U?;(k#Q162`Hk;X%t{^39|k;fT2<;s$=XUy488_psDCd)$@Mh|8#0SOaR$0`nnxCOjH@K{Og{RG5|jnaTuq}J$XQ7_`lO}96?MCex;6Ggazc=@7!TkYI6|!$4d;2h_qcFG8v6fVa}zEn zG$FAqSJP0)J(j%@5cbE?Z#NCOZ?qg)uo{UNaYF{gM-$|SQiwd7cGH}q<3$nafDbwe z_0CT3*boCIzutEPuX!d_gx#7NS`38_X;ecThbTMu%#};H2-pb9vBLT&%dl-wAo0eY z<9}$ie0&fO^m;CLeLor5Yvc@GF1AUd6&M}V-%CH%5(y&8{mT&mUbxd4gLrO343^2K zn=TZq*4H`ya$}CdW5|~FW#C(_+oe6hAiv+=wXQ@kr79S|V($ffTlRwF-r&E<^hRG% z`0&cuYkm0|&DX4#$g?Z%DDXt$aAfgPjX;U}fHbwp=(M0dgy5MW-94TS#J2~)W>|)N z2uSEsC6!43fhqf3;Qc0r5`t3CsFjN&1bPj4fhUD_8U41~^>`B_2_T|fb&(Uo`1*zyKRf#f);8CMEs7nmR&UOlBI$P37n z#2^U41+Cx;N@6#SUw8wwpN(* zmP_z);X(K)jr<(n?)zt^c#D-Rx`T`o6uWXZ0b7SF$iyH&R z@TeW3%q7#8Y*t5x3}BV7LtR0Bg(T^_=|ed*^XLs8(*;K_NYAB zjpQyV!YnzT;q<5K!Qd?v5%057ydBzOJyLk0kX`?Jc%2itud{OL5=*iGJO+Td=J5Q6 z!?kG7dqI=<1TzP9s;Lh}!G7nE@{hD>JC^Wo>79>vKCg7URK;k~0a>69;qM9|za^F@ zDmw*fv5>OD#aT;PD)Lv$&Nf*JDXDc?w#ITdh7GZkD>pe-nL7?4YvO;swZ?_|MLsFj zU!_>0%8>9Xo&?Gw!IMe+rxzL^L=p4UAlVfFk{tdnBuk6?wiE+UHb6;7nX;2xK6xPi zNx^Qa=7!a0U{a-ipsroHm6+f!nlDm>P~|`6HYU{ujdv1C;)Qiiz>9WPBs7P6nf;ihD&BNg8oB9O$0Rc*R2`LfB+4`<@E?< zPNb_}6sivbalquGB^f4b@$Hb)UG5hzN+^LlDn6L;Cf2vUD}1`UOrCfD;jA$yoY}== zHK+-O*EW^cNoZE|=6hc+ZH86g?9`Bi!SxF|tlUR752KAr-@~U@A6pE-?Y)6wMu$w5 zV{S4=5>-CyVPy6j;TRA$g6QYEE-4xGTvA`Ln{dj6pQIscBo5O zpL%=Mv#0nk;}kxUlnwlAY>8%>3$k3M213&r;o9(BVh8lTH*x-Z!vW zX3xNnuf`5i{VCSWhA7hYo2j8~VBp{gOZKF~T0W+vNNV+?a>IJXogl~cd66cx zR`4d&!2OXHBpL(a7%K^>1#6>UPr)nhLxbW ze@uqYiu-!j_2YMk&MKd;dfSP+f5lxvvjO)|Rvh6y!I?SdFSMzj6C?)hCk&4tLV_RX zteLpMLj8d{Fg#psJ5nuFbUhL8;}T@WzK?@RT!%~(DVBED07wEjBQszdvyrAAAf zWFWAY;p;oDktW}TVWA8k`0D)qmr3ibnn^L7PIs|kB4{RUR0`@aEQ-sHZyQBwa$#)Q zSn%ZajdAMK>jU;j{zv!pJn_2qd}VR!b<3xlRTkX)#&B5|JJFk5JLuu!P)%IehnA{Q z?RNIEo2`IqNk6B%*w(gQ8(?GPDqZg!j|S)kKG&)AU6sc+iu}J*LY$*C#4qdhd4a98 zvq*Q7GFAC7^R_Mmfh6XOfO5`i&Gnwq&vqpx{;#tRxVIbn++>@CADKY^Jal7_Wv~{y z{o70Fb3Bo)fu(f+%d4-5r0yu5oq;b^q&VX5W<|^j(1yxB)$Hqt*oh_2R<%F1WPBUh zABsMa4>|-6_Xi?DsNL^M?*CXE^=di;(*Dck*qr`>jhj}qyZ*cEQC0lOe_cg~Qh6}b znLvt(D2lyG(UJS9jxj&Zm`8IlB&Z&z;eaT?;qoQV8&WwY!M$CK-{3bKMgp9^=s(PZ zip=@|^vDS=rIm!@A`Cq4;c=cDXA6KQe>bfMhNt;!n~QG_qO%)n+vSR`sl8Q)F(L^n z8Tf=LeE~jvTwJQ|6YhvVeoGNg0*?QvCP$7W9!ddX@88J8yy-{9^EHrIK&&NOY*0Jj zzO7DqM|zaVSJ+--;1vqwv`6=hPcCi$iO!uH|Jnq=h2HVa%Hs%3^C{!>dah*;g&AM; zzEX|WLgxU)l%Df^s_cOrHo`iYuzC}!g^+VP-f*KoAKutTc825!{USIBTpawr#6lXR z${>u2L|M&Crya9{87wm!;arM?=<;04FIC^rQR9+(59am+p`dzk0~OvXc#XgI$vn>X zT#76hvDp;(+c&y&pc1gP@RdX{tE^HFB7lqD0ATgCIvYNFqDOjE3XgQs9l{;$X8VUL z`pLxITwdSeK04nU?9V=(=H3%bB3>7I>~rwgObXzatDrqNZ)Rg4qqDZi=)eg4)4-!R z%O-t`^*!##XkbC*NA6LXNN}1FMA1XL$h6_sLe8aPf6kgZ*Aykx&XzEqPZRr_0 z$m(op=5Q`xrtx0!Htb6$T@u;cyf6wl;^`r%_?`*iMd2weWXp&Vk|i+l+o$0qnVWA) z#mu{K5jA%vt3_E96R=MG_-pcx*K#vZgSO<`SLt$nq}*jy-##mr%MRch$2im?$XyBM z2@bf&&!)Y5o|{wVZ=SD>Gb&P@2=({wCqA&Ncqul^WK^xgM&3^>ymAY{CA3G}&lb<7*j5gpVP1N*jj@pS_%D>Aq3tWED?{#j3#U1NbBm-|2nBP4OK<`K)2-oo(!1i%cICQi~_4 z`|X8>v;pjFdoa~Zql}?8@^zgSb>|XFY|P@@ZIcFhf&LFn^;x-`TFBXMHQJgM$>#Ld zcib>$J5fN+YDl>8={eyN*LlsLOZ(SmLnF6gAzBiEEY~eT+^^LTQa(8rBFC;DGCQdz zTZ$5CtSCndz6uL55LCsZGo~dU?#3_9XYpB=M9dM-?xsPSw6gsbHEn6mKiPVeD|v47 zE?F~g`x>KWlcNy7#I=%Vh9~MR%!=}L9jnP}8kZ(@u=mu2a2Z+s1ajuOeLTZzx*D#q z#334&(LY)Au6lNu{B>5IF0H1a&1ZYn)o2jFR(KTi3{+wbi><0bq=LL_*{yO8h;A^B zrhWrA_C;%{(C`eB%<+9_&Tu-|mnQe`C0Ze&Jy9A%PNR?+M-&2G1BtFntb}}{vOFsW zbpd?L0D#9g|tZOA!-fv7)IF}j3PsJWzJbSEyJpoID%D&zu0 z32LGPoD2qa{=n=%$d@8q$OaGfhhOA$IDswGE9FF3?4^Hbi8E2l(W%dlED1g6I&lHi z76a(n?k=&anC{0o|3+?|k{CObG5*_ZW0#bxWQgM0mn5w#Vr=~;;+qgq7{NGfP zzZzn6JM9iTo5hr0~CZ^Y2T5D<$`wR*s1rKP-6pWA}^MNmInOnF@47=9{CpeysB zYPK2~y(KB{dMh4jwH3oNK#_qZ8c^@Y_%u&Pd43Rad{G*3{%GdYs2aVW*+@k5hSG0!J`!|-BC=93(MZWWzFKD3|LUAxGryByL&~-Ks@qKRlo`9~xTL|G zpu^-NuK)-Nkk)$JVbG7PQK;Of0TRnhQTdmCwiD(6O!x`?9&`Q0Q@d!S(Fwc4%u zdD1UIHK~``{r(rFl9oO{gF=hD@b&bcugP^YjRBocXacni){Hu8{Dd;d>%+YwD9Cg=2D&xhq4{O~?JPjnNBY6M&^| zF8x4I&DYGmcg>-l4>yt=QwH;gpqP!ELkDgdL2J7s)WEiHnyOxnZZc9iU0;`Ywx}hX z(O&~%`{;043IppHb6(IZJ|ky?N#gr(WFN)_PM)CxZThKwVyOU}=@)mHJd5qPXrL&jqS*)`BaGmfW zpYYX}Uz+#G&7j0Wf*_>QzpAjj3ZF?xD9Gpet3+XPI`OCEM=oML^q-%P;hy$UwP$SD zhBI$DZ@&7gOK~87-MSVQ#{HB7H%uT;%$25!WZB}8RKd*ZucCz6`(+Tq5z*X7`J+A>+mqyV=gMc@y z)Fgaj7)``_0vq3|1Mghak&x|dai4bhJ9tA-z=sM zH917&-lqRUgU7*}P1?SKVZQ*9aG!kY z^b#((+r%;+Y(!Sfrp9)Ex0fz5Q$KxAv`*AyqYbQLKKV@!X8YgtwFC=edq?I4et6*; zC%mh=+}lh2v_ZD!rvrQpA0*u{{atWe)9v5WHPj=viQGWqa^f$r@ZXr$KnM%pyF}59 zxqQ=THe!SK2_=# zV61VKX2*TS%fw^qOK1~`w(blb?Nhes`j5%wKC+xmZ3A$TrX`v_jmHv*hB0e)SkT1h z1ZGQey)$bP;8I@M5^p$4Nk34=JR;d=D{+ODzATG)!&X_xH3p-B&yz^3EqFE-`Sswm<%Ld<$wyMdHwm@xe7xX65)H>ZSHN z%b7M_Dw*O10v6$}XtsCSxRj9-?JOac%s03E)+EbM1EzQA!nQ4=s%(f6oQBTW|Yo0cQs zoD2b!kY%@j--R=y{OwpVd0~kM6EC9ZSy=Q~Y-AXS_6`gD^FpKm3vLwqTSGK)s<=yi zuY^|r=%@m9o==McWMMX`cD#JOe2)9P;0apeLnB9x>G#3$TELL?Cd&k8Pnne^?~bAV z*uSb;Hl$c1dT4O^W4mu%2p&rr)6udi0g6ECg;RsGpNUwi_-Cz|f#A%FR%DTpd+VMIB#irh2Z zobhHU-}R2P*?ofudClo6Cb=G3w7#}*zr0ZsBKtT(({xz9O`Vbt7&r92?sGlr$iNdC za|`Eak?Cg37iwGr5dkfg{eV5so>RSVRnWC>FWXvb9OnM0>KYHI;BhEiv>J%f5DgSfY#&>#PnEt`>0?Xb0) z1mT3zUDOv>PiHB#j>V|w+A_cg1VeO$PeBE!gU?*{`n9eEo)nCN$It1dyOjTKK~~DY zm5elc??2g8wr0mr8)I&WgD(+zSDwljI;s4y0qma~+hT zC_usjtM*u!mpv^Y*2=FP8MlPPJP_eN6zIlc=FM=c;;-u3YHe4nj zYnd)Z;S4{zUUx|7W2|Dn6ia!2A2|&1QE$R7oD9ygqXK&Hk^<2(S=mox{PW{^+0r;X z56mw&LvgBRTB@&bg{9|S`$X~DiO57w6BH&S#EFzfuBgTpHx<49wvv6R3Lsmx@PkD2 z+Wk$N1;q?6n*6JOIj&GD2#|;^^svZvQPjEHIw@W=Varp!)x z@y%!T!WoPm+Zig7&w{_PWKn-ZUt{ zmrj>hW4Q4rB+tkZB8Iz9?tHM{<^Q6}W$*_}HkQm~yJI!Frfiw{4?ubGjo|l+X{J@q zdt7mzt=f*{ySzthYk<0Np8fS`^lM=&_f7pcN7+sv)xzQ{E%VII+2|$<|FpW)8DVhq zTCP481=dl`-J%i-NBw_m0sLHd@1C&G7ObYbRq%st-61Q!PY8YONX}?<26W`KHra1j zAzPVau76ry5xXDhDmA+bK@O5qY~EZxq)hyEoq3x(cwcoxyZ2ddtvmCdqtngg zf;Wu-TGXdIIOa`Fm>8f}#yO4%5%bOwBS$+h`gr4QhyYwFolZO78mthDv=?kJS&fby zxIYqPeG27|jFKhz^><@gmwJyZ!`YwMZ;{9~s)50$aF*zP3-95ln5pq)p9W+X^?EDX zzz`#i$j(<0!G6pqsw@V^YbJj~Xt*$k{HMOiPIvd4diiZ;V48a|G*bh4&McOfnMnx`78rOdhweZdz%yuEs z(%Oz+sT~7a5vAd1{YIx$TYZ_-a73f`q?-G2yuFo}7wK|-3HJ(4|#EA#C zR#7Nr>&kgbaN)60=Nj(dp)i{}v#ncKb2li0Oikq23jbFd)%-#fM9xQpPXaZ)eihp2 zTR+gJ2qoQGrUWP#NGSo7UHqNu41S-QR!RDv*MYI!blwf=D_z&3uqPAObGo5Qx})GU z@@rE=fH4JYBw)TSPpk&|mZqnAkK@dR@aV##_ty(huvwBbL4trRPIR#(m}mD+06BdD+=h)=lm5`tZ*_JrCWk*L64_=Hf&0EX-&Ka;avV1kr?7(Q&ILZ zsf4#KE;9IL(6`Z2T$8DFZ*0n2Fl`|2Q5^Q22e^lQL@ToS2D0o?CIC{w-m!JA=p4~1 zK>ZCB#&due26;^cl4=ur0n|8+8~N}v|5MCk+gO{A0;Nc+$D2#7Phxh8%1;5Z?Nes_ zy%@zlQFH#Og-g$Ad}u7TzG!;@)K|yROhLqV0=8Ic_&ZxR9P24Bw7vV zdlp7btkdKR0!|^60%k~m3*Ht8IQS!Z=c(4E9|1eLc(-a?(FJBpp9o;l>>^md zkx;HxifngR*(>kU_djIxLyNs(624t1T7$>8A)YTUv+|E*hz4pG-kAdFe?UoiSx_{N zV$vBxX2;+Esi5I*+B$9AiVNW^-!<*ekT~?}Mij1F{WOX!vJ9q_K5tDSi-BE=QE|?> zIIbTy?yRVuiGR2keR=Nj?4IA%hrKga_CX`f@0&ZKMo1GIoJ46cIcYRB7xJDRoP=Vr zEzRE7RL#ikq3F~4mcDQkk|#-(n2xlmVi4PwB#{nH?Wxjov3pwMKi}cGtfn;sWLLDL%SFrKxRz*`|l7$X(%2=38U*5hbUS%sV9<+7I?7J$UTC6wmEWhc`s zZOQ9MQud}9HF*XruN|GFe^4buUxEE^amg!tycew>OY>bQu3TCT*%QJ^+)bIL6Z0Rl zb{naSRev*W0hFT#0bL*e)(8#EOxM>1S5NkT2@PWs2gG~MNWf~v71-&e7!=}^K?Rvy zR}7ujk_`&0jN>v7=-e$0^3?I|TiXgPQjn9w%GMSf>Utr0_B$8@L9evw5}dZpET)$=R4xWH z$SPRKtRS`I91gc*^B}`k-pLo_BuMMu3hW`}IJHB>OrP@Ae(bhMcUhyr8?-8^>nQr1 zv`x+1N~xcK!`&2y{i}V+{^h?e(|+)a?Jr^d4tL6#yHBOtF^hmHYNd3fkyjog4NOgu z?WC5p45HSV4V9X3hpTlFG$^}abaN|k(sVzt|9dmngH_&xJ7nYjh2Nh$H_q4A{=SIA z6i7c>5F?a0q8iL_LV2N1QpkFB2h(ecmUPMl`@OYr$$ zEPS)%!tG$L6FJVDaAOcI4SYRH9|pqcB1)AZa+e>D4zOEGZti*NeoiPTULkVSFxm?` zKiXdFdWQpE_T1O6G=<`{=kGW@ZvuK>$y|<0MmI?kE;RZtWws;lLI+x$5}q+a=hL-vHv%WUd1#cn z+49OA8umqouZ;D{Fl}C4r|y|IX4XMKgR0=H-=Yb$Wp0HEia}8N!#`M?D*;Qqw)?y8 z9JaLtc~@M97uo*eTHo@lL`UuEk14Pi`;7-=dftVl+)fhhdL4Vm)x56y=2@dvzv}Dx zrMmt#$42QbEBpI?5r}$Hcewd`q!t)xEHIfcH-D$yQ4rkN_M>KKlW$~z?mUR<53+ia ztz?2na8|J^Vi5eaBV)6c7!dQ*>R{;&g|MMV$NQe#q~OwuvKf?3gKf!D`ksWxFT-w^Jqkrx1wY#_y~bYmG`g0gEcf$L(Rq2@ zOWs=5Upj5_i7@@bZlC1PQE+OJ(CQUH5OPGaon@BU^_6Vz)7glyJK!9^3otT?XMEal z`05t#V^B@a2dwofN_Urq6qt6EGY{EQPSEPC8C?VICNf=0O^HBfSH_7A&e$nzkzy{` zsGocu*jrs;D%*P3ubNe}^X<_o0#C=aTK-vgeq!Wv!@~?G?7IYmFJQ?@S%K4okJZoQ zCL{zq(dSxZ{-Jg3E|*WA6$pQ1Y_R6PaWP1;>weP1gX-W<~a*S!3&=dql6H36IbueKB(Jh~Z9 zgQr=%6PADMe?6)r$)DRZ{icbA-8jy*yVyI7Vv5`8zYt@~4kF#*Ffh45L;K#ia^y9s z#J`P67rorAcYkh`+=>Q|InP3TENowNQD9|X1$IJ3e_E-|RjIWam2+BHE^%KC{fbHH zWn1y(y6nn)ew)Z6h;p3fh6AEWw*=7!!V(F@H-kT2mg8bmbC_0o%EZS&W_OPDjz4^!t`MAjpE+@DG~ zZJ#wVDDFhxsaroBhaX;cupNN8!K0!``NTNO)knDdifcG~gBI3pi7o?uj(|)2!vbHB(2xySD#}< z*IPpi-qv5&ddm?=K7M{}%AY2fhzFdC*lLCqiI@JH3$-e=WhncdnFK8LQ}VrP5jx-7 z?^f}Zvz=Jjb_Uvd2GoD>gcKUqo+PiwBLKc1>3q3!m=9Hr!-dVgnQ5l`JeEddQP-vE z2}HiWCoR}!HS6jEENl;SH21%SE>4DC)$KN49S}6_^9fWAa2!FgIp0G~2&R1d1>mvK z2I~eiV%;NfH@?RE_5G&AZEYCc#2JD@^mckg0LccLz#$`?EMpS(NE8!niO}`wFzZes zo01SbLXIuFx2`;re|!pVfxp~u;@#GrwV#$xe|K_64svYlR2$E5>tTe3x*XiD3LLgW zL}l;(jN56>wX4l->T)u8!&jiUQn_UDTS^ZbOk1aafpo1a?G6A&4w7^TjyZ^&<5oO? zBJYM)G&f5qRAg<`r_#{mwi~4R{%EFc-i25+N~i+a6<-e$EMDIw)bf?PoV;>}hi@!@ z>(=Q2c`wEK6QPlS^$>Sx3ywu5(}{(l==H-H{qvtq5H-EHmsH(qehs>JhM+J3GPH(b zOV7DinM11Pa)&_Szss*HzKGk*&#y}$g_#GynR3rb-6cP-G1$yAfqHLRs;9=IDw z4)4h`kXa0md`Ow`Y}s0J9sNW1>}K`-<#fs%tc*muslCM(L6&a!;E=EJvN0*ir-;t< zFhc!qN6ZJAdA+M}IXn*x2&zOoPMthnDU$1!ZR-0pvoSNEGCTKoK0~hS+Jz7Gffuda zT(;L?S+z>Q+E%-8H8?mYy4g#b$dS%>Wn8R{Vq+hS0I_tGjTIW^(G@({Io+y&?xR>+ z&&l*yY-txnITeXk#v*7CgnpYZ8!eI#LFa_}-Fslwv)tBDv8_?cNnlKiZ6L$vg@TEC zw|wX1+Xs^J;#;Q5j&xVI&g)2vcMU8XWd^Ol`b=i=BJIUisk;=#&gFlPJ)EifM?&yq z=f5v;f zSEJLo4V69q?sTMIo1*C2k%Tnf#>chOe`w${RCFZEXLr4J7S>`%Hak$yZ9i7SM$ON; z(|-aw>UKnm7=3FEm^*p5_v5zo8tp$5Iy5h#Ef;lEp2d5VKp(%8=B5fK#{qB#sQDcn zO0g|9PWf*-mOywdWspN8q1ath++;>%B7lbXgU_7h=0GgyX0Hf>-VX#%2VB^In};zbLFW7CSha7ndmNwvlV6Vi#oTo3Q& ze&j7soDzlo`i4mmQgWByqT1EM1+e6GRd5Z@-8*2))0TdSc{YM6{_jky^JjvUe1^p- zu7gQr0rUs?w-@rf$nJ&_RS(rWa1e>_)g;aJyjhLY(--1k&7;W!obrKkmrI{@U zk*z`}qR&}yt6L5cd^-GIsu~Jt7IXhBq>e|8Nn2S_M2ZSH3AVXnC+CaItXC7%QXaR6 zxUI}CvM1~ummYjBsU7ujv!Sf$&3Ql`W3^SU3@%O0n<9d*oJ}p5g1}1Zy{0o_uY;xjP$!W7_TDpYbj!6|;|b90Qx@c*jgA zB&pzqk7571d9)Po{2!!gG+iHevRYUCLcHcUO}OUV#W?n$ieB5Em#f7)j3Evf<$@ji z9RGmf!E+blN@acl5mN5+b$k z+^VNyRr4E7;*da$J^Mug&6stQxy2yLIvTp1n*PN6yUTl=lY@J%`>?mD(rwJMO4eML zNS(H2oewlabrGAmWVZJl-Z2Eq^xh9{dFa^rOR+MRwYg`Mlu;IMXgg-0UU(Y4bYvjA zH=;zaAKd0s+r3m&(KtUQHU%^Ge+&R zrPSj_`94x-@=uUsk#cZfw?(EwCXz3j%BuciWC0RNxe#Ts)85;8Ifu(H#ym?#FqNOR z%gu?w3{zAA$6q=}Hr3u=H`+-YS}EPI-)Ol6VLFBT)oGr77W03-wGSQI2yHGBrPfIx zgzeIe>g;od@f?(^qz6L*)4dW@*WH%_<4n;IFXAJgl^rp$LnJP0#=^RT@5axtC3MB_ zZ(xkQc|)SAot96-jaWf6Pkg_Jg^XCD@!1!;n+CiM8LIxMqS>tFBcG$!?%Q7g32xtR z+02;5x-9Cyg1beM_Lk`fn2D~c-=J!<)bvge0HP0_|KHH07Yh+JdSuaTZIs>44H#db{b!8$i_zW&dR(E8Pew}bkNo@(bIf`h@} zJ|z~@DG8rB6>k0U+^UOTAdTCax?}FeeYWWnHH6?z8-J4L(UMZ|<@0t>-+qJ0Hx>1} zh-i;+aMaq#{XS3(_oJmh4jHp*iG(iTIHr<^hU8tuso=mqSSo9Qfx)1wWxzp?xa+gv zbd$%@>3dOYsH1#TuPMJ`v1#fwgn&;-SjK`}D&|a5xY3cdcv?8SjLfPw%lE#(f7#@E zCjMb>;1*w15Y{&0INDF)HO!{~bkzP(!sL+O(Ltr~0RH&Ws z>o6^={kG!DF`(#LC-izpZaRP5P~`sae$-IJkAXR~jR|UD)$047Y|6k32v>GO=`*eckZ7?11fKwohmLTu-HCG5BZKwAS)e-@xGKnwk6kIZ%j$e{chjMl~@7eM9-Q zF>THsO${~h)uhzT{3cPrv;SbzgP~B2f&YPvY`ivn#)+ryxZ7}XU4Ly00_)Xzm?T@s zP290!Ix*OoZDKi?`{7Ik(p+%uzfLXb-VXs|!~P~M(j z=3AL|G#g1K!SapSP}l79tD#(i)$qr10u2*D0t!?7;<(Y402`M_N-V2uEULEA-NzG! zAMV;UZyhn2HXFJaTxprS|JCkMI)$eN-O#5;%NNO0Mw9D`HbR3W#_MvNRXyU#1PTCG zuGuWIdswM28aP1-3y661(S%QCv^^NU%$b*nZ|S+&1V323PhRa6yf}70Kf{7$*b7gJ z1K$~`34Dxfl6}1t1^Al)6*iJ$X|Vz!biebVJWfUi0U5g|c@L9f4phLSBD)y&JsG`3 zOns}p_zd*f!P6VcZ$dzQy5{ZCQ;my%6qv;73a?cT^V&wW^#{8bU$p1d>4W_7R@ag| z_TI9mr;rk%tm-5%Doz7LC;y5Pinr0+2Y+?zF!$g-m?DakGk>B>NM8#UI=A^V*C+!+ zKwkNkSjaPU&~p;rjvv1Pq3_KJXFV{+J0QX-y9vXX@7d|5>Dn%@Lv!wxHMBR96zDvf zA2W(bsuDj=`)p%q_Zwgqgx^oz)bqC?ZTwJxTW-2GvVPotyw(>lLTgvH29pJCIJL?O zBWCC_cy@nRg*|yfb(z7Z&=wC|+!=kqy`G0P-IuJLR?c_C*$)XVPgI0!m+CXk_1VJ; zQUme+?1TJ#9MaTc_0ysj-V&WhGj9BM8tC-tQS4D*?Ec~Q=rBJ%ituH|y#7yPI>?ao z4*<(vigY9;@&bPO@$$IG9Y@R^#?__o^zXZbr}M>v1;H?_z^P9ovZ1xr_rB5vegFRp zOAqccg+!k$&KOL4udSkOlZoI-x`%qPS+a69Xl?Dm>g_YMJY9P9+)QtRg{ty%xMxbg zbC+T9@KMb>ggvd^9KJWb)vK46OnT(=C`>0THmZNhyymH2Lub zgLIP=8H@bEX@Egm)93gwP9U;Sf0z`yu-KtZ?C?*rIrw|Qo(5bND;JqN8en7PsQ%wX zVfp^Jab?R7JDm+qiRv)*oL`cQ*i=#?+XX+d>TihTac4FD?PZEVb3^+L$}(cjISxIH zh7f!;GX*3F$B@gnS(#W_zM&Dmp%a$^kL%udl9kBxIS7^r+rn!4By)29}rw)`~ctp38A?iW2J zGK6nW_Nk8jDuOi~8V_8gRVGj5yngzk(jRJKr~;|TFCB!OY}MyH&=l=32Nl9b?>S7s z`+J!m{q$NT>H8a~t$PhROQ`10aO}2x${d{4r&@i|e>H&ATmB)z@Kv7&)uA}DEF5v9 z6tk7@+5E!DH@jKlD89kQem^a1)eeZC}1sLbtU|QTJNkDTy2~;_NJJ zMq=Mk>_H z5BqEIIo@+pOR_^I9mynT7gt*y@1||?C#qdzA@^Wgk-G3fAAL}U7ExuxxD&oT2-kaj zxNHW&H5|u;&%A$TI*+n|5G4ey%K)@GX4DU+3}*U0Fa{C!vAS(WH{DwELCqNqpOuZ3 z73i|7OUr-Xm~NJo8i3lJuig-L@T}C#Z84GZeK{szfF^+vGSL@z z##6O`{Z(O~i_^p$#2@Ls5P6Z=#vs`Hf~+o8QE`f_cD3cU7v z=O%P1Jj@VxoB>(v4q*NdaGr4Il zCW=H;*U+N#lAg+g&9#oQ(?ZzOuIX>aC0mII?35DWLHQ9jFZ$8`ZZ->NwZccor~WXp zO^z}_A+WjaP*WGOex0^8Lgw2e)n!i$mUk7m8nRY%Qu3NyE_{exllxMd$P*I!^c z&t!U*cWaQD0C6qr1d8A>3INpEZcm_D!W1{@SOc3cbA&^lBG8+O{T}&4SJ%;1#&p{n z+`j<1&f7|dRE);jsVdWWs1h~fek&o^+_7*AiMDzgN|q=g-ApA-z&CWa5hUy-K@^6U zh%YF%$SD+VLYmQ>@VDdap`?;(yJIuG#2g7}<=PKDSQyzJJ;ck3gt{iNmS^LXD@YTT z%D@8HdK)^de*e7EZ!tuAE9^O9dvygblmr!kg|EMP=@{X=zmxa9B0((byz4_g945g^XW zC*y|+etVx{ZcFV1F%z?qfT_kQ{BBJvT37a}=27RP4o~I~+c!Xalc|8t%gK9gsCZ0gv~Q5z%@_e%R>27^dQwpCAtqvgUu+F-UrXw9N}iA#;~ zFO&`2S_WAN*E4t(xXw1H-ASvH$3Beap~7HP>%G+aMCUI&bgXTZMf`$O+#lRR+IW7h zeCZ}>LE*_fJylv@^WNUpBDX6oMq-CX(0%)6&JV1@O(CN%eLyHdFa-7o+~vjXXl`ZS zCEEG1rFrz@^e?JBn0($NloI)Z_OBWgIr^?wkm5$GVO0`^0}qaGcL4E}XylChQjz{oz1znMCPa@>?}>QR0qJ}XG`9pMlxV^^D0 z`8LlF_g->2hEsgw)@`isstIq?3Czl$lXX31XV^nF8P?tt`X(x(f&6DGrIGz@47QXa z2Nl)ll_g6BIM_0h%~!T?sGsRRIge~=T|<1|qZXou^r!8ae~^3YYjdKPYJzbBfyu=; zqcWZ!BArJ*rY~8{&`OM%kR7vh3gaXVj9xI*lwDZ;HJI74wlWJ=qfy-7v0VT(Q9E2N zpuH*W-{%8j1SZqW+**;Oz;Q#>U=|!5v*59sMsY&l!Ed`36D7eS_f&1MJRT(vxnw6{ z(lE%s%v0?0ZA{7t4uv?c#i@(0Glf|fNx0MIeNZWz#fa7JR8?zn8N>hlr|wT>4NOqW zFJI<0jLbK&?m?#+pFkXJ697RvdlEOHv>gwImywQb@pzh{rk*$hmT^Xy=IoKzFmLRUg-70 zuro6>{)uPEdEC~}=<4JE0 zwD3s9g>db31qol1*7Xq!u|$e5gIip_`U|CCmB##4H)>o@PYqm-`AbRo3)9lVlGoxm zCD5t1P3Fgh0dM@=@YjQ(KabnjShr>uGY@Hgj%SrCH{qw_&XWg{ezP4CU*MiSmxaLI zmU5Eyq6Qm+w<#=na}>2_X{qj2o&Q=7$4I|V%ym7KS=dy{f=O$)QD3#4ENU zbI;M*pm4ttQ;8K(3{pEDY5qdR72+0?uUXv3eZq9rZT`MF>H}%M+XtHMig^M;g!iU_ z6u%pLkIs}@OOY#Q{;JVpY0a2E^_0=OJ+L}{voTe^rMgq?tZ?(;-3MDxS9miSgZa_@ z{_UgE%>btXwTZpg9pV>CVTReh2w)vfi|mJq60W#kVNt(8HxaP}N%Rzg>~%PF{CQlh zN8Guu{Pr0TdrTK}jkZTv?-<@Qd}kH&6%ZO_35q7RRO;9Ws0+3qkrEE+fNt6s*%2*p z31p7FqKhV5uDh;*7MK#=E{R$0YTpSGSy5ftJkT3KqZSnX_|Eh)qX$;BoZc!CvonCZ zIedwA2bqbN~)L6f{pT?yE^9+6H6sDz()t`p?;nL{{ZP5&tS$kCqP$( zIxh&;gc1zpbv_9gaubO&TxBf7B+dq)oppy9NySZb4QzD2n9@bkL2@Ugj$hJ#8`#aO<5tZGMc!pgI{>VU>vbP}4Ke|A${VsR1P$K$k zi#IB9p_W=*h_3uqc)FvA5MeGn=!Zfll5bh{{CipQ9R6QuLy3hyS%%Vu zs9A^P06VKz&;>*azpl|aY$S5@Rjc7~h5Tj8^UD(KF_Hemd@$>*Ot0zau3_hEp;|Qp zoRJ6cbiluE{n*>%ke_Y&#qy$@2X)z#qc$xpl|(-sM>Du-zlFOk-Gwg zp+@nEnRvrCS|ARXeMtK3hLP&N(Mx=qC2MJd6SrzequjvHQn`jhe^O<(4@$H=BpfJp;PYnRVD?RV$TMn`SZ~Q%>+s1`Y5CciPCl3{hKH z>cYk)g1SRYKjLT!K~m>m!70*_JUOjR&s1WUU+t1kt^?|15RBA4B_b%(4{2ol& z;jeUm@nc~adDIv@X8+jU|Mh0yt-HW8%cDB)1W%e+%DnB=yM^eAy*8vg%FK?K^Fvza z5C5BWJh6)Y=xZ4)3vkmL=|lFKLGv=`$3YtzkBGm>K>v7_?bJ~LTd z&J^>e1RgJ^n<849T6ntitXQq;rcb=eFg%9@#d(HDy620k3@*V zXdhZGbRvCQ1EsHS7w`h?AqyUM``U*6na^SbtYZDy{ z^=FjWkXJ|L@)ln}>B6TAVG6ea9mv<=7uT~-Z*A1jW7X7v4s|lW-oU|mW3$|pj&vaD z^r&GA%oKoyrG6_EDajf_doDDGZ&dg3_L42vc-y@FwGQzW=HHYO9$ClA608>%uND1X zos}^spKg9IPo0W1@!<>RYS(*e0Q(`P`cT3SEr{+r@|ocJM=-Rkz9DU21`=~IiCGdH z+jwzeK1$@Q9c~wQUVdhNo;IhX<~=lI({T2yvnIM~$w&QMKT9(QvzY9s!lR(0D8do6 zOQA`SeRZ;pbkB;}32Dmp7b=+?Xu(N-A!q+>T(F7<7(@4Hq_9AIShLpr(8_Bn=~f3k0N(lQVQ zA>$stXbPj7w-?XItJl5*MwcB}oC()Mdia2aTBS(9SGKaMQSjeEGJ3}MoEsh{%MqJ6 zYLEGZum{6)4!isCyF@0%`k9!j(ch}M z__a@2nXEWwsq7ry>s#oz5+I#vd`?m7g-Ja2qs;@tbqBVRk{TXe;^ zjKFrthULoM|8{6tv01vzZ8MLmcxPg7;OsB-#pPGFXNKjS4E7sT&Ze2-wp%yov7|%9 zH$Z#3dW_gKwlkJ)__>LYuS;(p>=?-DdwQC-pvmdYHzq}GzpKWZ(Hnwk$uwbCflf64 zhX!wEXTMBI|D=j;ZmHy$;tjc5`$cY%$CxI!o1ink5pJySGqTU&9cwJyYW^^nkaRbc zj;lef&#VXG)<~wITNWHmSap-fsGC_xHIHbmvZ?pe%(9V7Wm!$(o?m^;dbmqjyudP@ z(Y1%j7{VHV^wsc)neZL9{3EbgmhafGo9X=0R`cob_ARR3>HzdOGEMVTA3}T58#;O zxzbbJ-!xte&q(sLL{$Ni!EGxXILczuD!R&mNPNE>NBr#I<1diSY!s;pot5>HUE_$j zkzwrw?OPUXRqt7`w|g|`PrcY!e{PR%wiHv?{LVeSzgDkjiX`qc>+G#o;G>@YV=vt_ ze1%`Ew)dN4523Gwu+fOjebBF=1OfAbA)CtkmgeDMPSqqIbndx~$V2Cy^l9$@I|cPi z`0M%p#+}YMrsPc5PP+3H&3~j14nA&v#n3g+(qELQqf57^X@hbqZ#%o%;=H?>Q#(6-8Y6xd5BT z50Bm$SLF8jQ5R*cWJ$Kztyh4t1R@ta_8v-q7?fB zx37a6!Zq0x`X=UgUN{MS8P`l=E5H3Cn0faVL!F^|8*=@*%k4g;zVLEeR0Uv1*!bZh z7odYP$UdJ9Sj7BEvz&-&8Hxd_zHLj-AZy(^v=h&^6o-c#E=Ywx`RJyDU1j9%Tp`WF zdMm@$%mM&9B__1tay|JhWDfn-{31eL!CI8lt%`8fjmmD~SjJZ9uHNrp_dKFw_^f1rb;nCk_BwT0Mba#8~zoxj@5sRN(pn`|UfWwv3_MxHR$~lE3H#RLM z(%R3}&#;4*i0oQi?-S4={2W*06k|^0Za2v8$EoBm7F%cl7}Jt%^WKdGe8cs7F#yw2 z{fZz`?Z2q)`5q5vf@Q!J-G+=-64nor;OZ%XVn>p|*Z(|OS%1e+MgD2)j5|#ulR&;! zJXJ0m%|m~mCNS^CVtXnkbFgJ%M=wq-@5wvdS!3#m&Lh!)KTw5M0b{?d7tDh;+vP%5 zxjCY$&bY;y0#=5hFR~ayo{_VH5vG3+zHhR&%`4hR&_o8cb&`s2Ip&Jw0!m|vZ;~;K zl}%nYGJ^-Va5Ej1>65c^G3xaoUQq6S#hoTTi&UPodG_W#YGB0$8#_b-8@bYGxMacC z@LR3?zATF77aK9_M;ihgzx5Mc5D2?;yB~=or~ia>i&td7(lE|~kGdsuV4?Nu4JMU0 z%&E8d=FR7}jQBf>lDXKvVcC1_(iC0y<*TlFt}fz>Sj^&&pP#uigDH(46SvR`7i>>=tsxP&Y|Ju)Ccs6v?tH>Cczk;SFOfeYD+ zh9?uSB71JZr&R#5mwKvmH7&EQa%~(Kf`H^cesbHKL@&NMfatz(iuaU87bX5HVvX@L z#|$X}iHf&!X%AnUGEtY6>c*v)z94FZwvk1Bk4TEhpePPqcn37*@3WjidOVL#95eV> zv=(fJ-m3kvf?LF67V9appu3atJOW(|_`iBqoIN=UM5jC?B|41mSqeoio!IpMD*9nS z(8Hp^+OPQEfcY0mm#4+<`3AD~?^#>6!$i${mSJ?Y8Fp|p^moz|{XbW0K!L7y<><93 zG+L#cY?G1f)VFom#y7&G!RVm(OXoaaGfO{{xgF;g@2_lvWDZqyjhT*c({yx=9RD9R zQYsIby;(uOs3>2x&47X}?kSQRvSrGU6H?9KkJCuB*&i7mG-)to^HX55B`~za*XKqS zyt0pF3MEjf!zC5ZlQ`m)$CxHifuSEZ)(wATU|PNqJlW@G{xv3%iPz4TJ=!B-Enxcf z1NWWrR}OAjF330Y&}xQM8UyFenw~aB^wPS2K6Z!*N4XiM3`iVOK)gm5u~pE;=fWG= z;v8bs%0pWr)smne4D@qC+q})NF7BwOo2QbXO{f2#|BosM2>Lw?A<)-s>SLm}ZImLO z2{(5;?wUs)W9Gr~GuFK4tAWv_UPnISYiChHhuLlmW^ITi_YuK2Z|v|RwGv6i*~PJa z3IHE|Jqu-}Cgm4@q1ua4PyE&iW^}L#V8~8^J)wUC#o4Z>HVPu;b_*h2a|nd}&tJ7M z2K}N2QV0ojJz*{cd;kYglY6QHd=Mp82rw3*CY07If6@0gy0i!##}tK6cw7&gEsB+V z5G0Dl^tQAfn~sPXP4aiQGF)WwX7ZQ|{y$sq)yj8pag_EJeo#gNKk2Pc?RoS-XA zPYQ$Q69(-WHR(^lZ>Z&bV@Zsr^@s zI?v%DE%f^3na)hZ7Pv*HZAi!TAVI3U1 zb*{9pwVbzKQM~Wb2Ux;$jJ+_IN@UO4i`t^gA`zj1NECY`nCA9cSG7 zgu^B>Frenw_qj|g1fmMm|I)PR^>pb%C`>)jnOh@eE={7M#fL&@##n#$Wiv~-mMaqd zay>1tuQt?X&~T{YC_x0(b62@! zj_-yyll}*-6zyRWe^}02%AmL$-#%rM*neQu6F!gAUVyW$V<^1(J(}Ui>BE27)pGvSOQf=4s zH-+k$9R$7o?jwYLHB(ya>K*KHwBCfGUHf^;kHQ`3Ckl~!l)NjNXZqg8hZZe-kAV_e zK?58g?`rp>JNI#k#iIogKV65pRy2zKIC&bU$14gZVHV?75_^$mtrr=1ES^Bo%L5w2rvxWg2{%9ZKz# z-*R_qNQR#crM>Ox=99wQMKj>Hgym>`P%7u6LPy2tbA~J0W8FWW4F3^OW7lg^wyPcC zv+n)n#O<}=0T)Wz8A&ff-@DHU;kA*`?4dcdnVv&!jPX4EQSt_>ubN|ro}Z=ZfhdGF zrTH+O+D#4O{|h=P=%BMH%>f*+X+5ZsW5K(9S4(SCa6vrXWQn_r+N+(eXk316hPxZ_ zntM7|D*Thoo1fdG8R;w*7%%8Czkr_*W4sl|{ss7dKNvrucAGage!gN?uR)NFrtqih zR2doH%(~qcs{s8TSP?Oj9gE7rK@~fpL{>v-)vCr)RXJSNlSVQ{bl1p(xJ>(FvYOQSE5@?K4DD+EF^&q7 z^-F=d@z>XnYDec!-jG5vVZA-e7O5U1p~^7Nenq{z@bdWdw=V0mT#&xL6^o4O4M}E$ z`ytNGKW*;j8$Suhoj5Sj0AT4;(QLTWzWytldVrwCXd9|P${p?+Ncqu7{QgR#LZ{$j zVW;fhP=un&k=M$s)%EXJtkYfoSW{zSSAVUni!wxWF6KA5&wY;9V!zU=ksAt2t>JuS zLMw#Xr+4_=@*N!Ab|=$c6aUs6D1yg&V zQn=1o*s+4~o1mdr1 z-jSsC$VKiNh3|=*>Apt%VxM5KB2eB-lfbMz-Lh40SsSEc1}mU7uAc3U{T2HT>v?&R*x z9rd=>9UFwQ1^iK!3jtMGl0nQVCG@#=o`lkDuo@%jwBbdc8^L!x%skB$rcUD(Ft0(f zI%XZl%zaEoFEi<<*&|{=e(cGy*~e_}OuS-rY7b78Cjell!*Q|yQIufqt<7bENm-Tg z>{w21+`d576#+R!7C#uOQhFgiBmTFn2M8n?dR_1G?NyzzwAC0mfQNOYT}4>>s;RV6 zFViW&wKUlV$ra#A{K%g;9o)_H`PdT9ueDPS9 zLFwo(adUqw5I4gaA1Fs&<%zD(CJahqPV$_&)gsjtJ$#&XQUoz}#Fg$l^LjTMnG ze7JuSMsHspbNxZfDw};L0JUy;{()V1|JB4FgHtm8)0I3>M!Vh(b{10^!#V0@1|>E@hIWWHOQPu%BO43LlwSEv zuo@k8#EO8mc}u#D^wu?t(h0yFmpiQJN#!+Z0HjXZlaFHckWqtpLR~ai!rTlLcch_H ziNzGGU0EU%+~II?63)^W@Z{8>yXi?fI2AUn`_EW@w26P3Z*NhK%Lli1J5o49fS0ID6wpAJg!?TBZ~docw3#pp1JVUWpm#rYq%{KqZ- zam#?jxTo8DNe^Q|ITT;B3C4@|byX$%3p( z$xyabfw_+G=-7)yD#%HBofE$ZMgO4r8S$AF}5Iox6#p4;~C0!DlFc zty#--Yj_W+)CCb>v0tT*4>Xv~OD*#G(5_)Nfe{!I;>N;$2#!Rgavr z{7Y~8VhfI+?N9=Mmsdz_7y#SSo53S}yV~RkzAmlYFhh(cS@R8>6k``MZAHPU?_^b4 zF3ZVaTm<)?dvZ94l0k>`s)#DyB#MZy3#l*Pxuon9hfg%4ll<*6SO78IvETR$&tUDH zEuQC1E#aJe)E#Q4mS}&L%US&?pxB+s@$nbDl4m~|dcm2~2mwdhOB@j~k%2EB5wUcx z-khvKeGoZ|Riz48K0?UqNTWqI#?ln*2fLCaXVrDVLOiwzWU%g_08#no(}!6P5MQ+= zit^HNz%r}O?itAKBR<#7aOjm=X4hnpVULhyw+QdaJYjm5 zgfPjpLnIq%g)`Rd$z!_bBYchM#2h$k^~@=A&(G7TFvyq*e|O535BAQ~^nX!= zl9?hrl8*_|FHEtz-gNfKPkeB#&bHqegp%=OR|e8GA4$I!9_d*V;s_jf9!p_vg31z2 zI9T|e-|vUWX`E&#slF8cJeoo7LA-AnvjrdQ5u!wJ{UtGtaD&nO0mXrJkgt|7P?Sb^ zIAD?w5ODgn9{l0sbXIPn=o`oA6drKvOfk>O%OcdY43LxEBLlh3g<|)@BTqMdsFtBv z0Zc`y6K8&`f+G{Am{;CcpQF-_lgdPM04l_vYthO6)Cu5-&ge!$v}NbKeofT8$M&d< zm+F)#@jVA!(D<5i-fT{axom0tq)!Cbi8s_^g-ey&mJbmWI&c0LBS|Oxn{SGn|M@^* ztyDy<+cq%Eh_ckAq!T>zqY%wF{_N*EY-t3J=_spFLqq{c==DV*>Az73^zxFAn1WVM z)vjdz-$Vquybw|hoE68R6?go<3l0C0Ao{K|p9%Ikc7Mz0zi1KvRw=Y^183=n=XON> zA9}^V&jIn0P625w>QNVHQh#^%C(iITc<9hb<@kpOt;1ws z-xm4UWtYW2CbrpG}b2u(V%w!#ih} z-ihWzN=ur{^y~fbb`{couH&DM1V)R zkb+$xc0MH&X6;rXP|p4}72`z83pzY-o{fW*rt(c&yUkG(1>Eq+lIDJ@ZGejNV39K* zYW3t5kLSI~wl8~wvPYQR$fZ=5+XmF_5DTC{3iradptm7%Sp=x7>R!R~K~^KHv87mz ztJ;Glfy&Ss=@lrw94aRzS2K452ov|-XZ!3rBES8lMniwIA8$zc`5O@HMCn@1O|aIU zwD;oMH&mVEW!!W-=Y7?w@1H_tnNAN(V1N;ikjlx+{B0#5_x_27Q09{PM zd9A)NQ6<7Qbh$rLYcJ&YZn=LGn5$DP=#`M#QzwZHh>g~7IMK!ix9Nwt7g&~FSl+N3 z@hbDt*Od1*`%4%9XhkbkL0LqYSVWV0VO)32!Ai_$#5W`$#p`4c;QNc&gID1j;+sHs zvxrzuxvaGKg5X)rZH>H!!~NtTM|hQ&L`wOM{QC_7HM=X0&y2qKTyLbg8$&G*cV^;d zvHDGDO6H_grPt%u>R{kwtX*Jh&F&BeegB6yfqN0`J_e2l{Cnu&itFRfz%vvpEbfl$ z`4blIl8ngnjE-4kE$o1?dP&N})L_E&sADc>wW75gdl%u0ML3|fcRJx)i(@fzz3&C= z50g#c^(r1Sb01d#i+))B>uU~{Pu6^J{FLGqWbWZ!Dl!4Si*nE0?bX~6=VftDmbS_8 z|0q6E=d(r5!k;P*B*)MQzi^bCh#$$8;n*KOw9i6I?~F2UsxM?+=iaRU&9XoRTv+cs zZ`1(gaKcNjOBk*0-upBILY-OcflZbI5}~+^N%_8iK9{d7FWk)a_a&q#taloa<}0>{ znFc*G5#QihP@zWJ=~Ge|+{W0LWI}1Le>uFihRY-Q2nZ`Jx*a+;Gc{X@deMOzSMlyZ>yL+68!@}TEHBu(`EOXU6o~)j&-72}C zewr9S@^8h~Zyz*b^@_hIz7-Y*)_AZ`S8AXYv6UxIXKaugM-xBw(+^nk^!nMmEGWJI zwK1R}<;9T9ORdA59W3x4`4aK@K$CgFZ}yp%)3`8Zw5z(&Bp0sO<49Yf3`J;kk~5%~ zvj$1C&n5$N$Kut|MJaoN_hE5fS+iI*;n!%VKZE)9)fI&Z8}l085bQAqYt+eGQVAy> z?lqF$$T|y7)P4dq;xWs2w9qE`2Q5LvFFW5V0#D8R{T6mhu#Yx2#W;c-XA!WrrcrX= z5k)_N{RL7p1ny=NEh%;!)9#Fu$|k;yq}+qw3%3RF-n9m2Us^)ncY)C_EyJ*sUYd7n z5|(oz!Z=w@ip?jR=t7=pH)epmnF1=~jLVZg(?}6(O(Yxz{W_YF2MLs~D8?&D8>j-M zxDW{T=25KMHN)C4#*w23qb<+lSOa*( zJ~gqHI`n4Ur>z~rrAcFbD;BbU2BarLx$a=L!uP=WCj!0m`c-Zy_`v+U{^*ftppNLx=tH?6 z-<#Z39u?uXYqd$-(>u|!<;LxT0!3+$J-?oR_Rt}?MGQ}Ee-imHPL@!9{~ocn#z~-8 z#YwQ3X2(K)_zkq*D)!63QFCt-6I4~Vvb5Hvpu(?eyB>PtO&YiNOw_x#xoh>?ArVjs zv0fizj=1ah&NL;QpI@e6BG&V2aFgbre5K^76tD$u#D?aW_J8xe)tG6aBc3e-7pLG z(?U^}<9svshG2PjzTib*aeLwkx1E%w>**^0zv5lhq;$|sT2zJ;Ch>dJPRAi*)#<@8 zWx|9-?o%GW{ffA-woFa^kTL`mGo9v<;58ffxlpSqeU>qvuu}D5cMY`*+K!!K--Z`~ zzL2dp1Df6t-@K-o#uH}T?_%$8J-%x5 zw@~=*TASHqH2nh6as#(~fxMlz)rZ_8W5C>snI>!aPrB|OHC&kojfGdmop61lGjkpM zi`PCC;djUHLORopI{Np(o;ad{PE)h}uoi99caiJEyPS-a-ijN;IXAvd^JJ}N6;jgW zq>LDz{=}u;ay1?P3N6>H-f8w`glSz7w3B*+iId%Cu&gje-rmKPDyo7O1z{c8RHWVY%WU}`bvYT_tnTJ*0wQ06NuKu^@fPHNu zO0n}yy&9sm{smg7O;o@gXZ}mr?HO{?or&{x;dbU3 zO1!$RD(9)`kQ8YHnL5M0!=#LA`+N%p&rLm&+d5|)nExfS%!}M^=G<^1PY|~Zh1LbBGC`f} zW4Mj4`seYWrabgYTg{a?IY#QjUi0KAF#RTX9xmxox}^`3daastHUv#)#6;f@RGDU} zd;8`afLwrB^z_A*Hr<=0Q&Ix34xTTYlkG97BlkDkp3(F>pY^3hxi>LH5-NV?fdLDL z&*`?E67_F;e!!E$qs3$G?~!?llKxv*bFom2;20liOUk^Nm`Xb-sDKI2eryp9Qq&YT z9AEH7fGrRweUGEg;&->BbN4e++ZWj}*!d;=FyMv`XxHX8dO%Mm{l-75G=FHavR$O1?m z>wNgUppPC)KFmv{0Q|f%@!gLU-X@)0DxXkP&(C!f$pr2B>3-Iy_F!(iwEBE8jU~#2 z*SB|!>C11`1uJG#wX8`e-|%6Z4f7FW>m?@;<+Ee!e@O)1lxaa=1e~{cTNIgvxIWy6 zwuJ)-Tr2PiDpJJ9NOW!Ys-LKKUx-b8bq#Z$&-9boC`)*Q#bu4l)HcZvBNuS%8$zP1 zK2b$Jhrr_jP6y9K97$NNT@PLl74xsirQ20}VP`|!?pRgm$#c_0!L5^F{PioP zut^Ec2KP&`weMLPt;)Ytl3r(Kp-S|0!gcXxDjU}$g(+pb&Ey!2FRsr2a&#I-WOIx6AA8R%{$=B+=uIs zqhEM9>2S5Xmh|-!qAH|>=j#BgZ*;OOCzc62W1iYun zA{s4DZhkK6*0_V^Ms#AdgAWw93Kmf30tLFP1lH}-faxR=uJcGt+Vz?^-c~;Z?C<^o zwtwX5k#8s*FJs*K^(GZ&BwnI9uisjcU+!n~6O~e^0~Ung`hcE(nSh7*+IG@>Z+}E{&ry6gq_%sYp>+|7=5g zlc34mny6B%l}$?{C`s%Dkz2XLLf+-}9u!B_nreXW){q57didwxjDrRL2sSUg9w<#t(3^L8SN4ug`sHs%bJo^l8A3M6Q5 zr9SP*Uw$tf``X(ue5zF45txJ_hgYnp+OsS^7#!+~wkB*@+!IrrxP*}Hs7S_EFAUXys>H!m!fN*#H$8&_8?-eNRVYFV}HA`fcD1QfUO_ZKyIfdiWG z@-HNST$^Ws<`-B!QK1_ion`!Sx)p4!sO;ERR1g$Gkf z;S?5zAUoqt`Decx<^=4z^K?eF@mJe_h5L4E~0hi?i~L_n&nlQv zli*uMEoBsY!Cub~rS?sb8k^RqLL2Y2W=?Km8S3rIOet5cH!QPlJwPyVOlRhFCv3tu zBar7itWca9pAxh9`T;RdQg};13uLe1zg7<(e5PGWst#_7NFNepn^h}-&0S8`A}Yrj zIv&pK4a?mgNKpj_r)@u`x_QMUFo7LVIAdC)8r+;V;je%^il94IZ$Vb4$371EFuPCJ zY5N;-@i%r#?pqbya+wU^Oc@Emc&DG$g+6-{m9795u3w7mEJ zUNr;%Rbg^H>x%mmDVW-)jPC1z*)EU;6HGUEM{i&Q z>TAqAq6|$Hu1-zFblEWOg|5oD)yF z+R);7PC7)bp6@j3}!G_~&K7++|+E*GVO;+(uK-SD1 ziI>F(`?UAog!?e6)634P+Y*VVX7>1x>=aDnf!_y!&KY65R|e-H6_6B<1 ze1CV-49L?%B8{D;r2KPuID>h(bjK`yoFytllQt&35l|(kJS)d+?<^6eP(@0{@h^VU z8CgTHZp+rpUdx`D2}9ga_F4#7d_hCr5Zz~pxl~?x(bk!DfoEJ(`?5h~F~#cpzKNnL z4C;1X#4vRgC$;T?Ggro{E{xN4Cv#=5p^*ZR6?gAPoxjy7ndv$uC|l}pb;@GakQ8Q9 z!-UR)qBOg#b7|S|8?l{HH{-bzIA?FN??F}FZFkSCOf`pQCp6yns{Vg8DJS0d3Y2k9 zC8zvz_0z@vqKcN+PsXH8A)GW%MQ_(6dce#cCCheK&h5u1}4_0Z~V8e|6SwS d2@>mv3~wjqrdB?Vv4eou^()4DT1rd>0s;mA0Rhbm5Br{zmO&4OfI!B! z5EWID78NB`a6|jIuqWe+3TG$B&R+KBW0|bxa>?eqB++IQxLwy(XWp_a-~sI{8}I z&0rx+v|JSwK9)fV0Z0|`a1Q(Aq~r_}S|L#zAaS^%aJVFsc8KutVShz|{ZjLQ5b-x( zDyV8-U7oulKBv-bL->n{WcA`W)R76oLR5{6VC+K*nFo6o@ObcG@Sz|%bC;%TR_lCyR%>OSg^z-1Qk^JkKqcnqZleG@Ma=iiiXh` zn@xqNrz6*TjJi4edHu+4!a$vw%@tzOA{Bf(gu7>4d+?GwcG*S6C(F+pN@hLhY@n1x zY1VA}BfVafDh=^cT3E&%y5ACAZ{Mj1jOqPVhY*{->HHwfdn*GH7J_!O?nQ)bAs~%x zGU_(P$P1u!j0Bpo_tmwJA}oP7lN{M8cnGYxVDZSpIcncjZvRPh6-(SGy$1&^icF6p zpc+Sv#k>=)JA5z{ELs8W&_>J7`~B>xnRM-o_nZW6OoBaE$h}S(3#p9hPYcsXiR)f# zZtElMnvX21x}I`z+IUT{O3dEjY)oW{W_QK9OD{M@;xV%yi)1Ax9OgyhBHMf;(4jr`VBtf+ z?J-B)*v81g;DuL&Z-Une@*+j$PnL++n%Krg^wXb&{w9PQ^^tUnY47RO{@d^h#$As9 zsoyY>+2%_3r(9_E%fSwP+Zd%M0ntmmCOfN#9dFEqjRT5BdX3LKqZ)-#FYKL0s$V58 zUu>hBYtUY1Ftq}R1~hA&mT*of7a-D>@GCH^1{9<()T#KBReKMul~ruhSy27v1_6VB zDeyI#Q~lnx?1F64P-Z60GID3V(>}t3#gT`co%^Tjv>vFK3FzXE%4~LsgnAuU=XMjR zDeEAI+f1fA28b#urF5|T5Es;DKRd*s=!Ua|oqkI-%;tAKNrv&%Bg!FGQV2w)kTwyd zLr4mJ)azO}g>Ir6|F7_n7^JY2f$8=Gv%*}v{BR5qvcjyiFjhShH8`ixUP3tB&@uk7 z_Lx~8QoA{)QEda@YM4BGWLY3Wx>*8ADT6|$F&M>2ScNOlz4~D;#h&9?C=sQ_65=_; z{KMj5hS>_S=|lnKZSj`7YP-;ms4aocq<3+)g=ii>y+v4)BFYL{f79_|xCnR5C~?4M z2NdVEm_up?RY`m)NIn#9AQlMK6}8LLIh1uq|ByyL*p19$H4bw|A6GA^y0>Zjhr56;WLJRO&WC$7sOVp! zHRR10fd~je9KD1hta({AvG!T^7-yNfz3A~$#^@7allFSlnCxNK;pJh#aF_o2epEv= zhd42Ei}(sD;zD@L%p}Rr)HY;9l-Xo137*m)W{-fQ3P}*wkXe>1Ez&OJFKqwu{X55Z z=O0;R!)n_3%^H_A+a?3H1F_#Z%c$m4d9yzkR~4U?6#}EpBjiQc2-hWzBL}0pHFxG zUi+%+)BfZ>d7p48Ik>SX?84xZ@4SCUeuMAU%|CRbV~%Pa>H7d=ZktCzU~ z`yJa9XEjHcg>Zv??Qp}A?Tl@>Ca-3(ma4jLlSpM&#l0%9l9w8tx>*5T?st~`P=+PQ zCfiQOXw9at2BDs=!J@{4bH?59PrnP67PW7qOT$Fnn$=Uc!UR)))pXU!S#(E{z))xJ zYw?E(vTlP~(Sp7cY1KscL|3`i6DhZ>?dH|m<;%##`o!tdh0>|zC7xxE%xfZCmsF|L zA55t_2vverkvfX4zRpgLV@`bA;MBH8ui_`jJKV$x>TBxr?A0BE!yfPwXl8HZMh?8X zx4dVvIRGxZ@$6+YbkKNGpXy+2XKzRI?7FJG#dsvV^Ef}*Z=1fWAlb(~RNG$Muihs) z>6(9Y@HY_B6q4^2`en5F++8mm^lSQ;i?Cau%TJ!3`JNAClVnJuOhGE51)@zMf(Z4< zSRtJ9!X-g+8ejJenx&gb+sUBBpd~9}-LWk`rV~5>zi-bCm*RZO6<-!-46_U`mYj+Z zL>Iu`!K$a;7q0NA;0Ew0@Ys54yR1IBciq?BR|SP5AYoNeS<+l#5hO}UJ4inzz?M8H z0=X;QT6b=?kDAQVCerwJM&&^18tjS_Dmeo=E*$A`gt^|N5A1w_IwQn#2nx$7Y~7c8 z*4yp7b78={hnU*pq7$>a*jY%z@a}Qym9_H)IhpdMQB;=0_I@2A~UR!74!}&?# z6Oqv$aT(8aFq;c4=}av4|7;-HAdMkzFh=Svd@s~$DExg;(w%D=ksJ{`;Fi+HywL)s z*P}NB_+zk_lB_4&Or<7kBReMhwR*dv^|5|!D$|cxBfGWLCVDo5$ST|l#>%exT2t*u zv8JlhLcQ+^WlC$dX}#>C^f$1$ z-ewPQ2W$cIxz)JNoz1Ojv|i6Ly;ilgeQ;Y;%TP@@Cp#P2xVE-iT~E=|_AOm6>8N(z zNPP)>(fOhL1Gg-@yd3S2Q2pD_g)pl}RE4igzQgBAMwKq0GhPXuBLjPN@B3K2M86In z${VR`<m!5xT4Iuo!ItA-jQx&>0`vHinsgwF`vDCKfY=LG* zk@&{mQ&$j2;AK_oCJJ2f-12&Tj095s>UJYO7 zdE;27pEU11cPGl4H0|!XGHZ$T#K?4{h0 zHMuZCrH(3^T$z@z>QU~&Tagn$BhKIFuAf)6WyfI>; zknKqDAL?)uI^epOwU;v$$;I5eHcTjlK^@iIu~&*|88)mS=1lH)Hvz$3O2Y{P;v>~x zUr1>c$_oex$Xtu>>dxx&a=b=%HcSS_c7`TQ?l$)Cr6C~r-Fe?{ZA_dENZoC$ZJl`C z1<3!S1n>L(U)jv$r2kRG*-C(1U0#V))Xvd_l#_{riG^Gck(88_-_h8VS4B+XzsTSJ z36PsRJKOUzGrPIDF}blb**Tgqv-0rpFte~Rv#~M0mtb`Auyrxkg{<9U!?!z(SMQh zGylcH|6u7q@%oS4cfbS@`I-M4d_lzVHxvDLc@SENDSm&yLcM?gx)8np(EjK3{T>FK z?9?9E0|6lfAuT5S-5v6@1ED?phezkjsmD2cZWeM-kbN&H?T^iJ_2f|{n?_U^!ZH{A zX-m*Af0y5l5#B^^m5kF{!$Tq)!Lr4gtzKWO-PMD=>YR`v z69}C`o&=VkvKQMH8W(zU7oW1s20p3FMZ$;q|8wdN!uS=kSE5?3(V`7dDK!oj>3Mv7 zWQdniXG?ETXW%lJGV3wRatK*-7$U-YjG9&ax@Oz;0!HwGRu_{YS%&Kqk@bGGI=v7Z|wVf|g z$p9y}t^Kah+Ey6okpGy{XrZR40DOFWjHzStpK5)8HccMJckA3Nm`~&EQ>(zzS|d!L z5p4q*6glZXrf-?u?zKckqu#*}8k^H@8 z(&^8D^7&)+7@JY+Vv6`B->p6|!>8c-Kgs{nEP2faGw(VDi(E2d^y5FXjt+^A#|#+Tt5YN$k)}MD$~}*w1sK&X1zl#K8fFqw0cfnl ze(V1ua_^un$&+FNVFZtg;a~d=k|6HQ}!h{LL;*$Gb^Uvz#4}VJi4nDrexHBa-Flt4&*?vpCA1t=F zoO5i*7vZYb4?bfpV*EE4(v+a$mtvr8lFTv<+v@9Qzh1hpI@u_Si5IsnnHKUbqoASD zze@u)Y!v(7Sn0s{wHnj!niLacm^s$&aXpw1xN|)ZAZ|O%k37u_C0;ROP1W;l$#(uD z?bQ)-R~O~O*q&-4*N<65wp?ct`QB#;)%1VEyXFg&@*Gz|YKtBelLQUVP<}cVW#phHgLRgBAntc-E&DBmZqd> zkH@<3r`7a&0)r;y5+xNNjB)7xP4B%ZglZ?KB7|c8#5X9Ar^`{}G_FB0k5*3LLY^#E zHvs@#MdTJ+1@iw2zx_K~@32v3h$T4kC63TqdTFCIypKW`S*Fy)CE_8ewtqwAT?^j_ zk;^;my#P+j%u=Z|Gro8!IddIquPx;Uu02=mO2%4&WiR<#tbdkXS_(9Ii-4fzUuyg< zp3rs4VBOYs$gtL8Wb@$@PPm_LlgG8GA3`?=<+8OZ25<^a@j(1vFy7S13hM{oYXh^S z5*#xVrCgnzo9BvGbHb4dmPag%jr+MRj42z_tHcu}C;Kt-xFY{~b|!R$ieDurX`D!+ zOlpn%$mm!rfb}gwj81CUe67`mcwiEnGHT&n3owD9-Zy3cCM)&?Axc2O@AnyMf5mi; zYme*NdMfwp#xlzy7tEcUIytfRNwIup)I%dh>4dqB;uMs7bUK8n&A;mWhy-Z@FiL5^ zM4RcHtf{T7&1RuOLl=}DXP3!Od*5=p+A<3Z2X_~@617mgtM(aFvXJ@TRN;Q_9XGDU zN;+x0mWkgszTEMY*EUY+la6gaEZM8MXrhb@xxPlt70;PIQ33oOcD^hAYwrZ$#T@x$ zdV1yvqE=01Fnyc5^n0LE+iKOv_s)o6!f5}Pzbo(KY!exi@CC7=afxK5XS({ z7S7Hi|8qwJN~Qe`Gi9+G-v{2PHR3w0W99*E6P+7flYk}u$x)3MXg>q=-bsQ2DH{nZ zNfkcJrhc&Vp+}?ezYI$mtY34{g6|!@ainZ27`EYJNdT_qpjt>T?bDXHQZ_CFXjS+U zR;=l1rV1~>Xtr~^@svg4|6HLEG%8tul(iGl9?&)!7`0w}BlvvLr}JD_rfeA_st$Vb zbx!gOhEZC#TWy}%<22zKa4nxBj+{$bTDMzoRsOe%se^l43Gjlb}+Zd z_Xv1_txrx)oQUP7gdfJHjCGf&ee+$t$O*6szQ8jRZlD&Chnrjet96kW*1f*#%28_k zt{9wDZeCt(6PaJ83UUfemF!8%o`Drs<my)fF{(K{I4!q+YrV=j})c9{kOMtWlsY=4P3pL5y;nn69e2Xe6jjV!vjGWTrAx#yzK$-#$LK^+R8-Vf<5)4* zhL|Am{TzjIcD~DZ-4jg=U?oSxzhf6CL>a9mezgwmmh_V^VQuvQI&r*fLh7tz#E>{L zrm3m`beP`H)=9jhe92EypDvk+C90w+sl1H6G9$QHDdN>`9-AZ3`E0Id$VZ-c+dySM zwr8f%cQF=D!v0z+j-rL|O!VQEAyCqACRcCMn9uwavzMPrSKv`O>EWY1%y^7KE7#I?xxHax(+@J{I&3 z<{Ihez8PHfvCJgxXlu(nWoWhI=JTiAuw8_bz9Ql6x^gQP$K-YM|Y`-0Vc7xO`jCgXIfW{Yy-zsV*4^`$XcIA#!Sj1K`p-CYF#1)ejW2ScTEN5 zX8`4UCccO|NGOMfo$*$M$-THPDz z?)>d!GL-j7LwdAlvqBVBfH#!^etNFE7vk9SL4|KhUoJ=>SN+*)?``6ClD3+05d~I7 zJOD@2E!L72u?NZo+R`!0wjWj?9UusnL8op z3in?bp3yqH__hQ$YQFc4rNIIoQlZ=VC#|?qPsXXb4lpBj>hGaKtaN)oEDasV#CK%c zO;{Uuw*pAhIe)hpbQe;w=-M|$980M?bu=lSG_J;se%erP4L=kWo&ZT{#_CBPXf86u zK2=(6EXOj3gI@fLwTeFmRQo>4y_r;yJ!S}IbnzjCiFFFgP^V|*sPGwtQwqWZ_L7sh*2M4fpI$C1HVIwXee}BVEjY`GmhW zVL@Yr4*YW2kb*C!Kfp2M9+vgzTiT6-_6BB+e~CJvqZ0 z+LVW)pdC@W+lROZO%d&+OE`r8291!apRdBi-5dH#ZZ*h>FzmG=rS=$iJMxXFGn`61WjL(6+J046n$HLFV%7!#_4Ye3|!ywX@7e!jq&KEb7?WqEjnK^DMQojqN zuaC~eLpxkGRFwAB4N8Xq)knuOiCqb2@=KG)M#((0Eg$5$p5Ep=aW5;`CE_=KT+45SD zBDE@&T3pGs&m)|!H@(XX%j`%2Yg7a=Xd2lj4}k|p2%IclQwOLqOPxn==$E^@Jp9J*rf`YoPYlz!)Hq{(3Mkw%);G7d;nv3)8e+u^5 zFXTbBPrMUYs6{+eEYN;l#u$X1nm_rm8p+n7eWO_sWVU63_IQ?bU7dCLLIv?RUL4Cj zf|w((rR0fkgMcrQ&Xe|uluE4L_2Z$s^ZEVD;e9|A6#y;i*IDij^y`D6gtcg!2(ilj zqewi8!_aT5{bdD1l*ki|Rq4}jH>{ipv!P0eSGYAnw_QGaW+}ngzI%bo;0U!FEk(C# zmqr)OQhv(RsofZ`&t29k`a5?Ysb?4N^_7mKr z%TyC{V{eQkDSPR-*2lR_q^{)<-fwEnMG{Xlx@-<3F}G(_E2xh|l|yv!_J}8@9$Q~s zXI{1xv{_Z58$at2bL5^*JxqLUu|0j3TUUQ)pn3bS^_(AbU zpf3`}@*##|EEcVo#v#G8bO4flXPp1yU`KnVg=@_kA((&gJGFPp9STW|pEqVC{QJDv ziR^hBnsV{$9oBKQ%-t-t9H#hdZ<@B`61v`f$%K?iglZX68JMm)pE%2_03^EMD{7iLqo z^_;S5EBMz3%XBn1T5e`-x6WG`+rLY=F5xZs>$K@ZhqZ?LKj&M>+Mv*DE}saO+`lrv zR@;#rE(*N)ltx=?9Zjx?mmpb1JV{wD43Sq9fZ?FL*~Dll+@_bT~Nxo9%2sL7Juw<7oOP`93!hVHDJCSTO{j*qp+_r(?_*3OnsX7C zPC>a(ufki72b`%(hu~y$2rYj&gYf9h%`;DbEo4zQ*TRb@cL8J474o11d~- zuEAH)gD-5ysRX+N<;@(TKVQ_jxA(ShGMU=R2-CJUn*1I*;%1UYjU{vqGOu!x(BQTR z0R`7{%k4jPdW_mqr|s6w()8J~`j9d|RS93-wrlW4r44-!5}X2F?fZN3SeW}t)|8!6C3H{OTA*i2=$LFoC$q8ot z7s76-JE9Slm{C1i+NYuG9Lblh#Yduh#ENC48O#oD_@QaOW@GoAn!N;&`$%A|hN8Nc zVrKL{{ldb!5wNKeK(g`p?L?PjD2CU!)8*FLQ%4>3EikVKm&_gY&hMp9O)_>y^Y6WN zY8=RLkn}uJ*|-?G`)1koD%>NqN1O|j8J!EArOv+cw8)GIyvj zfzV}`bDoXPD?Pp1Yy#Ki;bLX2>BEK=^yMz!RdTD)nh7!)zC>yD;-XTD z9Np>EC&~{X1Lzpm&G=$fl}w9uycn(v&79LSCuaJN27j5_)?D4&w6E41!^T^^AX*PV zf!CDz(WZfncVAHVJ4%&0Q z&-XCW8C<;{@0-{NUGxz%1O}YQ{vE{ne=y>fa-4PY5s9p!(0q3KlmhR!c0HMpjlwpo zJSHttm${`B59`qDnpBD3x1P;bXu7Vd0k+qsPj_xvup??c9)nDw=DTqcP!F-r0(q_B6H{>TT=soc1q!`lM&k&E(>pONk^K3P+^AuifI z^xuLDsB#;(7-NNk>XdY4YbQ86$udPVkR-OY@jgm?3!T*^kKOFE5-nt^z6>tWad!L} zNupo-n|Z8P(1h!RcfX}}tE3%voqFA6fd;1*1=)3is`>yCxjwYl?|+jqPNfvm9O|YMNK9sMWIT=<|INHB)E%kT>c1@FiERY4&w9b9MS3yqLro z|9)`c3FePOeJpi{+t97IvWt2tm~G|Ji-4Ly;vSUnCI4O2o*@MKL0$$(Vf7*G*dmF{ zwM)5W-y)k2Wnw+UFpEwEe8RS~`yw);cwR{jNG7^{RNhWw;w+Vok$Xy*;(XU0Puuhn zmRWr+Nry8Ih)NRt^{CJL*8-J@mi;?j9}J#fs(oNfrBqvgnc+{+{F=zqb=bEnZUFO7 zw);&rU@@dkT|zbY!Exr|0POv~rG@$kLfXVK-GW0~gIli{C>fdTJz_s)-_nKK)U`b# zRp0qut05ec#ICreNB_2wuyw-xruWS|G+d6yUnDmlVnb-d==6Kj$4ZK|o%nT~;mntt z2pXIDx_N+wez)#p`3v|@Dv6N~NGC*gRifG9iTokVFFuyj2Ev*Si#WCip%wJ62 zq1--ytrRNYeeE4LZ?y8KAXkIgQjsBUCdYsIBvKyHHZ?10r4zy6S*PuCEVn^tO3?pX z1?B4&VG-?~z^qn(MqSdu4*yXCZy5>LBhgBMct>zCN+GoW&Z;)j=WFEj>F-VhM7apJ zB>%*mUc*xcTLa}u3eyAced@FE`@IuC-+ep-D0yLSc6oWkT<&j)zvvT|Vg?eI%=~KY z?DO6zo(f|>#RO{>8Cs++vdXbG^21gx(xyGSM~9B)Q>i`LnaKm^ zYX6}f*|%GdxB5s>#__REqRQ`fX>g&(?FzK@i0FFoEZfg&}-h$5nLcNC)Q zGLeeDyMO_(v^ul#f=wp|&c*W9a<$cHO*QDon(|kzPK4-hE+D=<$FNua;dQq^Gb%h( z(T---#;qA95ZZfDZGFf1_*oVbY;`@mX%F=oY=CT`%g?rr?u@ z4n*L(g!R>TWubcAf~Fk|oyqB!Ld4ay*lKpGuQIHGw~au*+3HxTKj6LY$;Vrv&9sd_ zy@Q8DXvo>#ElpGZYH8FELb#f3N~A;gh7?+`?h8|1@egruYNEduz(uOE=^pBfxxTv= z{+k<$i?F1hIj%oFG5$1aR;o9$nU6SsA=decu3#kZGckM;vi96KwQ^cTDMnF2Ve7Dl zJTi>%hKC_P5+-j=Q7Q_r<%%#8DX^XXC7N-y_|Rl#I0@(V*f(t6K&67IT;Zf-DoI>-pxXjHFgT4d-mSB=MXPCLk; zJa*USq!r`D`=K0EMPN4&iFsqiYf_SP{kgn^AjOsxk??6v2lP?a!Md2N^>PKe)OCf8 z&U!IEl|Y3r+H+HzH~1OYSeI=1<0R*&qT=Mo=gF^A7wan5e;e7SrvFRnSJ=xIg()uX zMep2sqe+YDu#R--Fj|VG2BEGs@z##&c#|!zeJoAX?b}|uUJ;*$C8Oq$lP0ix$R2r6 zg^?}BrObC@!^GUMb;bP+U^o%dBlr_5&B;Six%) ztK>_2QGnTR{4R~l=@>V^Ge7k;{4VR(2cz{8LRoF@lxK)XL>}qx$O1@U8aG>j!)Uky zkE+BN(7JDd#%51Glc9~#;zuD?=_#JjnYAcle8qfg8)M>0O^u~`UGSQ&7Qy2%nt5d&Q-s8Wu zR)nCb3H}nt0!7)BFinl+gTNU=K_Fc{os&-<=b*@pPG7%gGU64Q=<5UPy+G?wTi2}z zJ$XVSB-`#J;eWyman6u-#m=%PKw zO}-RS1)S(`gKw45g_oUY2ge&OXWSmg5qD;|;w^^9S6Dj{Im z=U0;!jFcmmPvD{^EjJ#AaV7}h#p4;J&D~s1n^NQYfbk=;rS z9zQUuPMizzCLo`Q>lNrpNS1+TbE|54K*0EnLPU?~PLZxH#+W_Qw)*rSlgo-vS9LVg zT^8*XrK)(hQ?K$%CvO`kX3l!0mwyw&B85s!eYc@@K?L)sq!{iuuWj1!>}4xOHBbA$ zLUnrB_&o7GB2=!$PBASq93b{^Mxr)2Z8Xc(u&L$+OYwOVU#U{0y6}qrn%@1&s>4Y%DWny4uKY?O!l7>MwQ*oqOVD|5I<_CD0)@#N+<<8z zb(*&LLAg~buUDEly_gcVCt9EZE)j8G)^!L?RYOmaFTa+LF=%jLI!D_I8aOoxXYcbb^OCFQY-_*41jT)gn7WJ^fQPV*EAU994U-@=nu zEwoSVT@OdX$1|lmavo2fx!&xBC0jxwUdw{Q|KxHbcdG z?glfaYaNn#BoEs`X&9F}uwF?5feu`LGnXkcCfx8gW5+*S@7}!o7OEe4V9BV3E5EZ! z5a2P>ogCXXb@Hsa=LIhEgsn0ed9%+eBrpqJjDLBVcPrttp4a_bCvFAv-H8L}l!u5z z6Kl9OMgC|SvtP1zTQVMnvlboygB+4XXpk?yAG6+W@WI&`Mwi9pPTLRWPvkES-4j_S zkO~GAdVD2zLU^sClJ2#79sG!ZQbSCPf)(k&h~h$Z{e=AlFhtU3uddP6RIj?@IIYiS z#^AcLr2GRpJ0vU5u_J$|Gz0#NfC^qC+pZ&@ig>rx%H$F)xjPS}X&9{E8?J3_=G&vS zR&xyz#IduVT=qDg2n}iEzamoQhyUV$>eR zEnu0m=pks^DJr!3z}*Fnoi;5(#iQvGg(p^)Z>aF?Ni^XmTStVw8URBZUBI0Xh;_ zN4@z{U9w@KU@;Yn96v7ob{v+p&YD$MgjU()K>?Fl4{sv$vB@Y&n-&dHxVTxy-LV4B_GBxl}MdRTGaxk|KQv7EC3=C;57 zvpdsuWP!6Rg-RO6YV-0wV_}>)XZKDc$phDMNl{oA{N{G&S8ETwU|h6kD+ok0Fu_%) z8U|e~?Y)Wt^ePbDWDRui;seEpOCb6ovm=%V^;=lR)aIU;dwRDtZ6*@2s_2b3PN|V< zwWfjALPZ^p+cVzGJ&f3~8e?!>s~3>0rT+C#=#lZ~uxH=9uWyu}h5zBiX4-dX)Al9b z!@SVbK>|CDs>KS8os$J(-lM|AimtAWX2Fw+dhaz)U0`Bvci)|ebsK%q>cA&nbfhj1 z7BPWu)GC(jXpJQ&{_w_v$3#FbU`DRCDYFhgf%05f$e_c%STV90M`OvIJ`eX~9e&Sv z9RM|YrH?PJueo}oO6&Cl-#Ez6 zjC+N~cd(>-LgH z9j^bGZfFXSUG7Fo;Qj$)M|zCRM5astMR>{?hpbm1fNO|O!9l=;H`tmS1~P_&x>T-@ zx7D6Oxk>M>@1z}-{l&_B=aVG*F8dRr&(GSy%YKJlBc!3>_yat;*l||(;J_~^>(TB$ z-Is$ZX@;;Cj%7G<-kRfDT6}*ZHnVWU9FBKzW>z`dgMO=ijsxP*Q((5ou+bMK*l52r z3wYjDay2+~42%t5=k!J5-Z1}c`F51#v-$Q^y7)uyZ+)qu7a=uV#%xy>!)h$Yo!mwxHmAEx-@c?eHu&sdvcbk3x z<&Q7AhC>YgEJ4i!$`v|vB229&LC!(iQO4wxmt}A5PRTF3;V!O^<$h~S&o=|$qPKfB ztxoUvT?M?l9?btdSQL`JAq z&vRtFbX;_;eDy@C3-8=_o+BlKo&u0l#QO1={c>Kq%dn-etZdc}yf8MT>Rckt9v zVxAG|6Rj)pW(0|-gJf$MP`24Nacfd}fdrRq{0Apa9^*=nfYCZY)0B_dE&V1n<@rsV=qP#NUg{ zk*5p2QGwVOzm9*l`uk%i0n(EC65*hL0LZrc?MR!E1S+^9ipj5U-fgFKW297Gs4^a( zjSb-|+TR4jI??4<^~E?1^;*ua&}_NPEh{KSq_)C#m}-@a=D)rs5OvfvBs2%LYu7$53Y<2lb=5+xLPZX!!i0d@|miS zb8Utu&j63+_;h633*r30Wmhp1iXoSTZM@`ivKJ}T?dG0vyvtMUAc|QTV{{xH;a#xT z>yh7S6~XHO+kEA3BpktOYYH5AO9Jv{(TF(MUFt-I!aoAT%Q``;A%|iN;0QCoQUXDz z*$*)Tl10Cyj;+Fi)1vExU(cU%Gz_k+y`C?l!Am}h0Fb{&uh_~ntEI%dBQ z#nTHwwt$!^YV^eY44Rj)tWuY2BlT=+H4&MGLu?)rzt+mJS-sRN_TO6|?bR{JK_6Mj zp=VDFTaSV+dXO+B?04FSyq#7J8LmT(+$W6CmXG1i8vjiD%U*VMflBEl)aG&i?kG_j z9kglGlBqKo23K1@2?*zuH~0Fu*mb773Rkb4>~aCmU+0Zq>4Ltj?z9zAm8rb68qj;Y zL+l~oD&K5&a76{sN2M_EKnP+b?`#=4T;_HX6Ix*tNiWVkCKz2Tx&+5(RT3(!bm`qO z0$Eui%+2bJVh@{2ax|^7!(s#h)kA`>lWJrz{c&rLJM#-dz925Ck(e05Al94!{WCCt zq2K*uU9+Kv*y-TqCOI{76gwMVcF6AJ63^QKua?7xz`}CO>S9B#y7u^NWP#@I1RTjY z1?W4ih$}2Obv5m=@&b50!dqtsl%y&4HZB1ZA0$24Da51jL{_nf_1nHo>&1{;z*&4K zy0R|Yy-gue001_UDGYR`K@w6E zy_BH{lw;!`0-T9-z!&u&z|ka3^|wEP@q-0xF}Zw0>z>E-)(*`1#rW${m=Z`ZPnWT| z05C2 zAX(hEUv2xE16Nk5=rh0AP`;hDH`YvrOB0*{~OF0|{Bo!P3`&qnVW zn)?ISM@3PQ+KFHb?P&yuc9aXwtFobZRqAlsGZB+HRYA|p3Cm2-drXddac17grvs^? zcaA)2RH7`6ml`BT&CU&L0WyjlFs!)1c$K>0r24`eOlR|1%0dKTQW1uk>GN=hj$~^> zw$_(rimwlyK4-vV_HzQC@V`6CEyoKrt7-jB=9}d=nOMEnwl>h!7_(d6i^#X|8vYcQ z8F5^)qW;Z$@(zYk5U%DrGxo0!SR$LOxWJ2Xrs_TleiwKg7HE>#P$%r4OU2y-CaghC zj=2`3F}#+n#>U_9Vk>Eb;;Et7YIs}M{$x90Se#sr=)&$%7UZ`mxt^~yj+x$qw+Y3s z{Y~P$Rxo)|W8|Xij>08DXKx*ot~TrX$xv5jqWNY{Qj(D2*}|5Hqo;(%Uz~YI&E27y z#UDNNTKdk~u3dLqC2Z|iEc|rUpHMs9eQ*+y7BJSv_{SXzS%pn>TqQ)hG?~;}Sfb|0 zvH?!|^|v=nA|5x_VTa-z%!^s>H6h@XxY_jDBNsvvqQSA;ALZ5ut@B)U76L);^Q{;i zYrw>M4(hCmBnReW`x!q0^dDvt)lJpDY*EZg7p&V0Jndp+=d`$<$jodB7KLsGoi@Wq z0YoRKrHx~iN(5Io6j_HiVSRd*N3eS2{oVM=&+eB!(n@{32+Eh3Kc5Iw@jkc|lIBgD z>Z#H%N$5%NG?k+R1yeBg`R!k)EIm}My)lK!2@FT)$NnAB=I3ZBOyR~hlqN5%~xhZZS zth;LSd8n%v+wck1YRb@2n19ZM4w`N;sZdU_*O^@V*AZ)g{(M1TqO@3U0b!1neuA`W zn#{ZCR;EdhQ*A{RY@`&#KB!^fXiH+-<6!he?Mme3l(Z;FN7(31@9SuBt#mZK4jYCY zPUfS6S{hv3+=QAlUXIg;JPP?{8-s#pt;ubWa*nVZwlDA~dixv<5f<4eWY^X&h8DTb zvs=p<%X2Rr_Fr~SF?6C`7(!ML%>i#P5Gn)qf2Knu?RNHH0hJ-Rgm@pc0Q^=I6n*Y9 z@q@ArEyE)*m)(vR;RO&1y!hS0MwEL6NIO*lhn{Rz5AW{mpEiivXu;ymki)9YJy;kw z?2eb#`pL}JUq77w{(OwQ{z?R>qlm;H2@Go5uvIRL{{Ye`{>-xz#qLYy`P7TV?0o6- zYwzUsr|4$9iIh0hgG}B*sT3(hx@S;JaLYM)rt%2?g50bS{^_ETj=1hlL9F1XK~^av zL^YX_U!)pL)F(!i?9ki=9kckmJRwRUbc3OL_DF05Gt-=MOT#0NhG7|#WD`Sa_WiXN z?HSR?pau?dyIf!6;7X5O1Ffeeir*igIiTG>=GT`CgkodcEbL>f&^-;my`-;o-Pjf~ z=MkcaWCkX=Am2uDhdd^cF=K_n+6wu9L=+u9$nG~z@StC~0yvC)vQl)@EDpe?9B)KP z2VtD*W9=E(`u`YaCk|o&hS3BA4BJ9lt2A!Y1RA|te#9G61(@GXTCc1>u77JM|30S* zM^cmjvq;X&!vKMBa4BzG>)sYSWl&lE(^?qW-GB|0!|gQ(E$sDUDLR+FXe+Ob7yRXM+;d_tXw!DnRne6_ zJw3fF-7~Kb(SrVZmCzwV`G1l1RZ(%YU6(}-=(5x1zqaOstMT|IPYSri!&t8uzqoKu4wS5~a-`X`J-{!Gz zQzUCP;6@0|SGF4!<$Wpil%&m_5Yyhbusoc&%@n7KMY;<g?{p0PWuFb>0%1 zd=f%gXoH=sna{%gYXPn2;EA8j-QfSQr#TOyt4h0v)(H)~rU7b!|LPXMV8Y4r)J}XO zkHTfXl_JeYd6)B;x%&J2f47Xg8N@Ys?m%A{FZW$GO9eR@G>Ejy!LMhyXF~ek9M6w6 z6AN*NoM1?ZUDd@{iY^4CqW+v?e|(Ju$(m{c$>N3x_*K<}x16P8xzBZj6v7un_5 z`}%p;I@LA*PvHmi*8CCB>opX>_KH;lz(gzoDL_LWbef4otU6R z`w+nj8U{l?lXf{GKjk2Tzh^KEMh&_?Z{3(U-7ejpIHmbYO6iOFUziQa$b0J1N^bl& zWSGM5!#3fXGR1!YXdu3@;YuXvYD!?Es^fBuukEb&BdO2iH6=~1Po2yepS?#c@%wDA zu{m@*%dC;fk_glv@!vwRmLZ6hQJFaAgwJhT%u9*y{D1Q zb`j4LX_>=8eQ2faz+<;DQ*4!$y4i?;E-Uyu60pC&y&}IH(enp+S(MpGUtN@a#-Os@ zAMvaFg`Y=410M0wvKPJZI9yH!#Kvi@vG{|azE!?YV|G8J_=^!npLfUhnqChCnpe($ zbshw%*beqcx$WZ@_gad-i>~mECVB8H5!16J%^6~`s7h65#S?J$(u>s%A*Ji{X0q9< z-Sa+?)$yRg=#S$vsEdhm9S=*vCUu|+XOYT_k61WvOIDNOsVVx4XL(nd?^+a1SCJ{W zXO}WMc6|9}>Av9{+__gdtT-Hdf$)fp{JnB)XIJ__I(;fMC=&#~_{~_&8z814;Qtca zXwxC(I^q@AApW$p%O>+SF|35_oLlh~tr98r0-6R!Ok&UxeVB$=fTA*KvlmdavxB=9 zqlBT4^ktuz!wc+8%Cj4hN>`vI@u2$@1UN?CmF3vIqc(qK+Wd&`a6aL;@9WC@(U{oG zex+f}4M`2u@BfzX*xmKty)8<_`wjzQYi{D72bMLfrvU>Pou%L5aY3Rt7D>^4KgbMr zVpuW4VxNoaNHa5dsDH@1z?5dOZtw1X8{`#UBr$~q*uTL`7jzUg?+_5idtiAc`{Y;< zLZ;dvIOJGBbHLEU-2Hywl4+O-T}4!0)Fa(vqKL_RR&=9{o=p|5(;XAx7e3q%C-bF` z@3nd-UYLONMoN+4JDdku3eb;kF6Ty3;t~syiq>|aW0;bSnCZgs`A7!ot}=o4P#Xxq{LN@QxtPHT4hrGX6_i>Z`z(U#7|SG+r32x5NWVr zMO*eTXGjV9#SSp#afIQ_%O3qs077WZF=@%gC)6~Q{i6Yn`s2b@V{5-WK4(`5D=swX<*sCT!SqS0Z z`!F!XS|^$Czr6q|A7>0ac<=|#nB^_oXIzjY%vemoc5NWo;Wa!$U5u|cg#7@F zxLf-ag}jO1#vQHjsl{&0a^MOSKMc*qWu?^}ml}M%Nnh~u{&10>z^Q%rf26;g)9Clq z$_X6`rIILm9}h?W8B>*vzcJkrV^fKL?+qBMZ`Y~r6C_BE=%jv+e!>r(U|FovYnPxA zNUbm@NPc~N~8fAs>h9x%15$4uO_d7;o zc^^GSj2fV-*2Ji~4L{Mf;v0by@wWLUy|#mkwV{qwF-AFC$gERmu>_(isA)96<7Reb zf{+sDWJrK_CLTyAEt}>8Tyt7qj#sZjO}c~H*lVgB?n!?9FiDgidF;)Ykh#IrR4m2>pI2^=*(cI~|ZQ zv!s^Vgyktj5>5+7t0fL$`c49qV_%PF?grO{F`7;uBmHicK#-J!?9DQSW)owj==DWjJ~hq^Wf{O3WDReWv5l}$ z(CE9S&eFxODS9X&50x@Lh_l-oJrhg2>G;KC<5$+`+Nfh?+KxmBPUh69I|SLU*WkJ6S&Oaj=jA8H~LGNY6D1~b1c-_m}4CrS3P z^OWOOPSw#xuJ4dO=X)x0`UEbkM`E1&Ld=~f(|w6@%RzIokaSu^-ooD#=ST}LX-g{o z=+*QONm7q0%@@0Y+jVE%_rClDSJd^7(TPj49w;q;DIHuI)Fy(LbP!OgJ#b?~hbgo*u0F~%T z@$>}A;<`OI72vG?qpM;hXRsZZz1rwcQ);N?;pbyvB_gSv*_E@UZH?@>kpr%lRhzv# z728=8*rR2vPq*$cvnDL7q7YUA118ff}Ts-R;F~ zf~IQi(1?%WW3z+|z?52_)&MLBlYQ`t{?5iy$Rn08)QN@#Z2$AKsP(ad57{`7zVz6Tzo6mr-uDftM8NYd)H)0jWJ*X2qLZnwD&x(Gm=1-Nl3NL}K(SL(d8kkRSlkG3Fy z9p)j11B@e(s^^wo{LQLWG*cy_hY|q3xF%8>V0R@=-9+tYAI99vttPj%A|Om&?U)QF zC?E?VKilJm8>}%NOZq}Y9^{mRKJ5oDf5q*OLPd$ng33Nu{6!3Y&R3Nz=A*j4)o|CX zPyMNnz8vaNLMu_eQnuboK1tqW<%yy{f27~aC)ylL<=-JLLihFPk-q^&M)+gCE>OAz*#&?i31|R;MzAG|9i{gzN9A!g6 zJ)Fn%B&MWJ6E-bHR2BvyHXap$U;9%bC+hu(gX{4lr!EHRJ!7Ztat0BzVPTO3NFFAf z0(uYEMowybXVcnt?dHf9xiYH$L*_%_dZ#LhtizQ3bDq|NMg8~mc%*k!f1_e<>K=-+ z+XtJQ-nuaUb=J`HxfnDl&Q++3(<_OQlG=>KM%Aj-41kMPHH;5Tp4Ro?#TfGb7O)V1 z{DoTzV$`nU1?z0ED@rUi$%u4=-Qxc{RfPblcT08)s+>VmH7HBP`S?UoB@|(b@4Lnt`CyJ zztj_v31#iJytI}wH@TCY`Nv?ovg0SQ4&Kh5@I%N35_*1-2O6ZrGf|f4FXdEZjMm2o3g6FMu4*mIK@aqf@b@bLVp4~f?*1*yo z>Z3u%*DhBg-WZl!B5iZ7PVF$v4Ss1F9vk>Nu=(heA!#kfAFi}1%O$KeVdMXr-(9;S z|Kba9BkgV1^I$z*P?7qDq~IB7Eq)U_VE&DqGA{fRZxrK?5W1;(C7d<3H6D{`PuGa; zeOGLiCJ*L4crLAEo*6Jh~9 zhkuRgdvIh}MP+7b3AWh6DLOam&bcWq-9WX?|Bsl9^Uu392Gtey@Zn!&TY66eIxvAZ zihIX!uhATAFMYfK2Rev?-O%YeTf6*Hmbai1+LMIJF~}}PF2;PtWoRYX_Q((YtD0+> z3P+sKW$UY-HZJ(s-_^Y94Jk(46sYr1K~N}*DL>h({DhS)ns)R$COBF{3{ zDgKp{A$Ul-MlF)i(naK9f*c7V&+f0S`7$%+FC9O|f66KS$S$#!w?1<1ujOM4wI@;!r)Enq|BVRNIs=~fYs`+)fL}{W zu@>xIA0P?s7bzHp`Kux<^CoBCF(~&PnZH>6U4I5bQNN>p51iV+&<5!pZQlRSi9n|D z8Q)$Q+*9(#DV{T4ojTLeE1XW9!%!Jiao?P~yf#B&I9`}qJzH^G7gf*XN*Tiljnt6| zTB{sF%u!BQ^btSO6K`7;)hDU|@`0u!LZ>_#!x0%-$I;N4I;=q^LOuKKca7g&qd}mK z2N6(5W6gS*OG4|8D8dfT4j0^#dsDD$Z|E#*zgUB!v~DR@ub)tGfLY3(?{s3;pK%Ev zO%|Ioc#UtA=^A@Ha8-e|>vqKp*m)@7WobF8o6`x8Ofq#K%_uX=5cGY0j?d)6D$%4+$>zaK{IZeA&seSQMgR+VO?>y^Y|L@ zhlrOT-V2!Sx06kJ54DQfT$RHlBiw+Bbm}Z$t3Bv#z#t$5!mrA#Gte*i!1sP+)fT|1rqJ*v*fUgCJwt4wHoDnrb5%3f zVq}kQ;;6`~)S<)w-QuZ7l(4Mp5n^H;f&3;OlnMcYS7!FO3R#If7I zW*c(vH@Vv0W@VV!iBN; z>_0kxrD}3u;OC0{7NIzO;_7Tqa%+_?@_mM8!;F6k`QYc3u zVOP>*HX>2EaR)1WytF%DDd8ZAQeY2#-)Up$V~zXWEL&p0Fd;MLL|xJv)eZ)&Ib21Y z1zW)+wjj@Jl$ydW<~w}G_P#zSuc*&oYxDL3WgqyC$0dcuMlTU<=qcu>JozvC}crC%D-{H$iX zTt?%Q^+%D}KciZNa5RG=GuMj&}mvB-EoXl$W&H&(Yl6Mm-tui)G_RK(f;N{Ji; z{$eTGFZ+M4b<$>_iz=7+<5j74T&!I}I;JJ<^vhML3cmQJ3@5}E>3(iQdiPJ;=8hPZ zbY|K6D8@qBWFyJEh2AX3g=fkE<=@WJSJ%31JiRM7kDJ`HpJ-^~tV}cwZegg82G*as>OnGZ2UiQr%jiE` zBZ>84lL2M`^I4`Gu9x3YR)0ESBXfd6igl|_HO-3s4|AW_eSP`7=Fjr=qDLi2zf_PC z(I4hMk$SGE)P9kZoX;;5!t>q9BCp@El=&PBcc+RcAT?&6x)O2f5KG<^O3~d!Z{8}p zpnM3BEn4GlTTxUWZL%Y6m}F`|^^HFPHS7r(I7qk^SDu~2NTRCLZXE}J>d>cLC)vN1 z?&yPIHczKEo}zR-1ph1D?Sa5(AxM(&6HJmIAro8sSM#g_FH2ZE0Z*W@sqm0*=1&u+ zTW=o2sNcc)d1#SY-1;i)B1t00V>^suVL)Yj-HU+P|bEasfqdv`kgUC+bk zI(!cI$!@%=W1pMnVqhm;xyaO@`%!!RXIkCzJpi$eE;UL$6v_O}_wu(ugNptGy*|!^ z`q-($WrYrbD;4W0StnwsJHFg7f7wsjZxt;MN>394uXl>gzCHqv6LI%i&LwJJOp%W&FDF~793W@oxu3@h1KGtRrIK}2BRKj>~9|`HS z`+g|ZBe)&Q$6e~<>Rf=zTU%IwZS7oc^KNGxwR})rINr-Z1{2g&Y4M>QV>_snsy9ft zndjdm-qmX?J*Bt2J3l@h8f5=vlLq37yrcWz^lallizBR&V_4s0?nKg#y&89QGV3+vJGFErA zHu7ag!n6>HH;D}{Q0Q~G5AggKTHeg|3;-?td!1skkbFN1>man)X928XrZ~?WCR`cK z&?kxq&=2%I(**-7sO)Z|(%Am<3N$04V*;sL#l;0Lm&_YvM1)=pit@}ULzvo-Hmc#{ zeN62-zY@Gp?Tbk_`wFUBaEj}4Urz`b;DEy&yLcmRaBdb7t@`!luUl8_4W3DAS7P@- zjM?xI-r00pjD$I@lk7P=7ySpOlz>lOu%1DZCB`bQI}q_JQ}#;{mLMW9DCk`5nw_Fd zrZCZW%YnJsHM6O$!E8?Rjp)+x3Gstym`Zf+ZAKEPJ=u(R>D#tiIVaX zpE%a^n@?}R@Gr0#p?3G`10ohAyR%c#&G8OLj zsmqR|`!_dQJQUL=o4nfeI*=*E`DlBT8(hQml+U#!Qa|lD^gnd@BJ zbpONpHgtH;1$W|N!~as}ZHA(!u^VST)THC`Xy0m3=Q<7HyA2b&iw-W`EE+f@?=zd@ zGcPl@pR)Y5anzSZZ*#+VC;4|3k{f4&lZEjFuzsr*L~rs7kYLL_giGpY4E~U~rcln9 z>EuL;TD3)Y|0Pw6fI(Q75`>4|DrBg*_M-JU~ z%eS$PsK8N{XVTvh55b4m+lTBj`)$059OAV<+EmorYs9_jVqUa~%IBrk>5xnkN3`Pa zqCKcl9emO(F`d0V7MZ-CU)yb$Q5|Y$&*92+`v<}#^8&}IK$23pJ@LLft>Rx5J6u(8U2Vax((U*_vkaNvl3(O8qpV`ihE_#dWseD7+^?Um|X&#|mBtgzBUaR5cTd6H>rWp1OQ&IU6Fq5vhNO?~I z{U_x4yEZUJsxOBG9&wXv{Q+#bem~9d?ABB2b-f8gyfGfyRruvcvOv@ZESvm5B9U%8 zJlQ2;9RfFCS$k#kf}MiEV?*}JoCOW(dN_NL-nzxi0UuvByUn_81DiSJVWtnfe`pWy zR%k}!r-TJ6ct1`+q$1{_j(lIbtkCmLYpC*iT_^lWdW=(8t?RKwHLzR!nCU<5Lm(fV zr0rRt2+zMV%lU7(zqN+?-Q68I#zVlCvYiOD;W*6qEQj9&Ak`YoK^tONM- zspir?0%Qz9fnK|URw#SbJ7PjBnA$)N`Y2|C*32f#<^uf02~EZRtZ(0zF_?h}i25qP z;%`Wwk44GUz*o$2MR%66y2Z&fAUsMO3HBTve!-(r-*wUAor@kIr+?ulf|pLCkya-q zu7SODTKsGUu-IQZV0zN=zP}hozsPO?R(Ii>^PSun<;}R<+hWM zXo14$V+T7-Bj&samtAS3qD%)w?z+~CX=O0E2)mBh4~6Af$#Vvfzif%F4)46M(Jx`> zk&&0`&Nhs@FJ9yNe!A32cL@p(aDwrPi9>A}DIxhpu~j}KxnnrrdJD8nka#**?ztmr zVn5>r>h6d9C^5mPOH*zO8YN3Fql&#UT@4)Bf1gcO)?5fvp!DN34MB=|%G6n8mf!gi}o zkaWH8SlxzKvTtWG^^K$S7aSy*eOmb*ccpbMb7d+z@+`Z9c()}ck85_!@5qGP!$!F$ zoZ}B#t18T#U)5qN zSxAGz%AqxWPLIuOn>=eFM!te5iSYwZ;ArvA&$g{hSnC-7pv;H{a zrslpTvx)%6PeM=h1iv?ERo5l9;<))k?79pTLHrJ`1*%3QS9m$t!vm3`Q9l{AIXBu= zxMWaMK~kbA)q&sTd)1qpn<=y5e)o{)%fz?%>e-bL8uyY{;uG@)S%S6J&{o&h(mGgT zBB`U|+oZa4gmaR#smXJo1Z-8}MbcCrm0MJ@HtMeZ{usZd>}lkl-5hWlPr6F`UG5!{ zeO!PE9!ET|`3ZW6ei?N4v(*s~V~CC9h|~QZ@-p_7xfTfSoVa{<@=CX_XPiP>dRAqA zEH>@FtrJ@ae>6|{QBclkLJS%ik4a{BJ+&ae91S>fFt!Yj@~x2Bl$n{r{h+l=#T+E! zp1Vx=s%q^0=B4HSmm=De16@_tJv14^yms~L_Fp+h5LzEC(CSkYKK}cjZ<~+6Z}d~@ z5^jS3Qz%oyKvWefddbXs*P@TSO^$=UTodFSEw(zEnZ#`hXNzzPSJU|7J0)KA8r9U#8@#kz8x)6 zw!AlD4I0S~G6;Wb0=UoJ*7%c_L7A#9;K_+s)G-qo#X&;V1(o6# zy00D`1q%TtsTU8ei3w*npv_1)J!g={TGZZJ+?s=|7cd_bWt1C%-nSse^jV@7fTPTP zmDBw;%q()R%uk6d&K<1!If6($zs#(Q(^3IC&K-JI$IAVVE1mPl=)YRx2}r zh$l~3{uIs>_jrnfu1TD|UN{zwllM^{WkncwtokMFRgA?}h%ELI= z0xyDuqvS<9k9D}5;oF$#=%uVKuAH)haz2|k*OTl?OaCtL)8ofJL}YQf{Pp*OBs<(f zm@9uccyq{vd^TN?%7{>6g<*qlqw292^)yf|ZqrO?;t^FRK)!5+Rl&Z&tJbI!lzsb{ z6kBiLJIYXw6utERWPL{&u7wG~gh$%kyd2>j`^e(YJNQnLXL$U{kDlk$ zg!`)gKm$K-|BMpv|GqUt=q+Wh+7Y0a+*~7FrxRn!}qm{o1h7>ApGH|j`PKN+I8Jjht&)5gw`Wwe6&Q<{j51p4rXJSDYhmD@J< zg|&#GTQ)XnJc!Tw1>pXeOx=e>*~#mV4ypv^JH9(;p6!V=!9wJUPaX>uAmi6PpQ8=% zQ7es`R}wyCTTFoKEjTI~FKMsrchG4A&{v};t0<2baV2fyoWljpcG)QFPU07$kL?ZR z5~fDL{;$Pv=ib~rJhoU$e@YoBV#x1pBKXa@W*)}hN@rU{u| zj=)`tm?TPy6JIS4&KRAXoHl{+b~#Kv;qUgo)^68!UfELVH)kVv6!cs@v`-y^tt+FC zhrU>uLL`0N7t90COw<4ta%`8Rh8b&6WLMLBGoxI(uQ&4So-R61%Mf`lv-qOkzFcL) zh&Bg1xM^%o`u)0%(}z~ImLim6ssn1+*OhnscOs+4Xlf{APK-5Z@$esXJ*e_DVbU34 zY?%#C=4I%4gPb$xfEM$85sgMElSs4ErsVs};dDiMLJ__=K|C=B8f3lry>dsS9}nI? zk#OMu5c^`8Xa8SS4xYk04T~2Y^e>0-9z>cipaJ%|fa|+E%Ah2r3L~Rr@;&xAfsHx= z|JJBJO((UB#`AJP@H<*g9qfatX?vYloTB97b% z051%JvHuGav|~#wq0ts}j70A3*WC`oSi1<7Rnb5KKN>O>E_liUx6kQ)pXX)u(Ccq_ z{OsvTy5{yLv<-dvu9WcdOMrMMF>u>>yR6obN`Oc>E-h-auU8yT1L;@|CBnoBM_HO?IZ~fI4#vUsj{7Gx(^;~N73`&>7kX1D2 zr?}G?u|70tYNZB(T7@UW!o+P;Ts)s0IUs>2hft68tEbz93A@!800(sKfUQU?8RV@qA# zwszQ8z6ktu^O`Q(dx4H*Q0(Rcc6twiJ8tBusm2zVmWk9vR`j6F}tYuu=ZkS)o^Zt}vs|8enjP=k21AVQH#=~*ok3^V?iQZU|MI6qTR+vqDj*Q#bjR!tL62jiHmWU`cn2T@a=(^7R z_aPwe10+H-f2g}-W2pD$6}{u=n}D<>iqjI{y^mgKVK$ zL;@<_XfC*Zd7`%{y&a*+R%lG1PH=6mXHE z0E}1rf^4Wqd_Uv#p31zd7Oe>${~abX{elWZsiqQ2Y~))+%r>59Pdnkdn05aF+e z(;of`sMif0xe3#iIiJ#M3KF4~*)G=tq9nVfJ?~09RLHC&Fc4E&1gbG+eaFkOr4EPg zbtT;kzOw-5XTo(ohWqTQi~9ZAGahi5bpIH2#&q7?o!jy-;|B@c^AkBRZC!()LbRoy z-fe%okE_)CG)=Xr@nO!wj-c6vv~z7i!FC&Oio<(+a`l@P@}>RNk!kH0Sbh%A_d{og zc1K(2)WexFwx&M$y3J}(eff?X4OJ!hdm<`wa-?ilD?wp3RwGbyt^pibIIRy*1aQsEb^0lLNVysXYFYLzt_xa zu7s#rH?mnsnd`!1tV((GQ7`32cUVBJSwV-E@c6fKuQi`QM0*+pC^7aazUfnEEr0i= zfuo7@^%o|MF?R3WUq^TrPC%gx8o-P(U?j%4KG<~SlYrW}VgyUo#)uHdn_b~$q zYb(wz)}`$;a~&ktYv8!!KxU~)<1Fwn-D5LkggiQYOZG;-sx*J0h;XzU;P}Ll^ zE2b@jO=}ifoOIm!w-56U44(XDNi3Q{mBC3x@Lrbp^G4d~j}MJ&M;C z`c$rIyBG+R1FO(j=5t=R^Lv*I^pEK4Cd_C^;XogSi*pV;)AKq$g zHoyWQr>ni!PF{;0brhdE&wQ&pcRw*d1VS`lV$%Ab=xjXlsjnpJdcs@hGJJXKEoW5b zPsF<44u1{aBk=avg{B6SXz0Ml4jH%7gi+3fFy_Wf9DN_rOTC_gkEI?+VTGriZ{+US zdgg#?3SV|Z0M)1tKShbyTuS|-dvNZnKmH^+%fAa53&_x1q;o7_Xv1_vggelcz*+75 zcqn3em$r>x+N1Z>x$!KAYMKUNUp+N%+tG$p19P6VaSbH_l2Y(%h%uB~-CoM^MqSD2 z46o#cE8#au;@hbOf}SNnkekLtg_LkRkQI_kGbkda%Hjx?1WAwC>!g2&4=QQwDV(LxW~kbOHJMBD3nq*6+)zLY!1Ss5 z{tD4SEvQz))~dhxO{x&e$BSbhqdR=Iw4)A19;cP>qMdqg?>BUh3&X>~s4744edWeI z8(;^M2T>KYka{{hEWfOerVQ(|<+qppiz|!+SD0kE$zBktdblIIjU_Tb5@N~Jxob0}ij0$12+N6p` z=;!z%H)cActwBp#I7jNFF{La={j>tQpR(uVnFfb`C8W3CY&C!QX^L_8rEV719x;R- zt{76SZLecdVLNBQAN$d6WGXU;IL7{I=*I1O-J1ah?d^Ob+-omV?Z2Vx=~sFWX1@}1 z74qN0=>Jqh9tHI05g&kMrZCA^n>Z!>KcfU&DjFyicS}{-F1<=1bNx1S7;?E+f_v(A zCmac)w2id1m`s)t%dYJnfNpqnQ8w9b0gS?90=-tU0+{Y~Jt^fI!!|OyxX+Rt5{rnje{S2uT6}{%k$pfd|J~m>8I_z(C!Q}*%nb3ba{NJ+K`D-S{ z&qpf_Qz`a|3n6Tq&%ai2|2dQD{y8w*tPmJ4Uvy2h=n&3QDi7}7QE%S~s|JQww=&bo z`-J~Zl4uCL3^0(k0t^q^+y6Iae@k157RWffno6$}dh}NG2LmmKw=dlvwX6VBZY?Aa zMygQ_0u_*N*WuN5+lrJB-RH(a;wAa*=V(Y^NHRXRU%5~Oxsp3b^{M*qay4$kpQU!+ z?lBa>=wQ~I2Nd?7!@)f(c}=rp9IOx@VVd`HwH8|nt7qx&!XcfWBYS+|+0#B)=cxo* zOVssv*tOs~cE^*YL9Tl-@xOqme8;*T_E490mwwp+>z1i$#2GYtAhW&IYiqim^K)yL z`=gq^oAwsPO?W#>|165yIGV_8UV4S{)>K#-2}38;=CDmyUYCY~AfdDv?hqm|MxoC9x|5KpWBJlq1aqCXG1HUK3Mzg_A zhmEe+EmF%)lVOzRNgT3`R+j_R?{ZWKFeI^%{#UmL%e#*lhF=4_oj+4N9+NFa7RN^U zjP*77>!z#;|Ax$uoHGBqyGx+uS}w{xsg$kAU6D7{wu{;YWy{^-7op}>hq?(J{n$j- z%;5L1_z^fZU9Vc@N4Y0QcYn`+mvU2$o0lp+oYb)08nneXJ@GKQ;Hvq3M?AQ$NDRQm ze4f$EHk$2D)S&R%te{0WlHG666vVmU*q&hHW*1OU*xMkN<=s>%;9V?dhgU!Rw!~CX zTl>e}l`(Zf<(A#A&Iur-?d188*>$?J2y8ynAChpL5vQg;y}z&$;Nbjb>QY}%5^{N1 zYhn)}@T2k+@ z4&)zQth{g&HxbwVCzAS4UapxL58VyK*0A6xRF5XCLH({jPN|D!_+9JBheBZ&~zTK zDKunMe2bZi#owjS1r1AtR3EkXts>7|xislh;8+lBEIZfXK;LB7E^&`+YkB$`29UMd5?ZF_z^j%P-hngUks)!i%3^?(JkG!yvpQ@aC! zA#pd;Q>iJ>#zI6&l-f+jfR4RNn5s>;y%$qO;NY(4`S1xd42_x9$X+fRhzT*_xdxK)7{mw>mWW06f} z8gG+XunyfqY?v=pj%!%wNwGu)maCCnJlpx%ea+4+@wdn6IjZa_e!FFUhM0d>nl^N+ z!uRNCF=3iEn+^&mxKWu+$j{L{e_M7t0l8lc$-vZfF1u~`=64V`*}UzzJb zMozKiIbi?LboD@FM;aSK5!*5HI%nz1^Jx$JFck z20Jo#;A(YAM6cm|+uZ$VBMABgbOwjW*~$Z_2Wng$dlNptGIX`MYf~R5b zq?3huDRkU+tLST9bD-|6w?9N#jx{MN0N?IH*T6r_oSa6FdtS&lonUi;H(qNH?LN*b z2v6IMG^kD8zH4f)SzhLJfu`^s76xU?SmBq_y99aJ44+(uB_(in>W5K390T+nf~G_J z;*n#jDfzmQCmiEbsc}^mY>z%}M-hrr*h*KKL#KKzEY~gBZ@G!_x58vLyyqp{m*>nx z#2udZp_IH0jC_C{o20fEX!a5zDI9uY;;Rz%q6GI^g&NnwKny17x@iKSh#=4x0KNB9 zr+CW$#v0h@5`9}K|{{D)%>P(Y+>2tl~mqgmj8Ub3wj~2x@y=- zx1B{TVVrEHzx{#Bzm{`7kj>83N666+Jxy$Q8Q0TyO#dyR`Ni{%ymj#NBT7x@kI$4p zF#gev3)0r?zmHlv_sbX%o+VW_>jXUW3&-vtT+GxmkzReDnI$WGxdY||KIm7%#K3%c z0`n3jaVJgR_?}o6V@|S#0$`+#;BOD3{5c?21Z|esXrQ?;5$*h6fJPjgzdv@qG@kQ& z7*+zQEQL4PSS30bMzs~$YFFeLn%6*Z!;d&?`bbRkPIko}>vH)nf>v-04*+GY>SPYM z{iO*Mrw&PD{m@}Zw7yuFEvJL#aMV?siRCHa_Vb#iRdsR4C;I4$;ondA?7U5Mfz&WCab4?wJOtTiphPPv}iY;=4SOsuCi)HQ7> z{Sdv{9&rYqGI%-fZ8|CDHWhK$pie7g?vj@LZcyEzfbZ|qUCke8h0p(9JJkY4*w_HL+Fhz|^-Y@;p)7X;l*U4I9a*Xo<~ zq%ZU4-n)|#Zh}b}*NAtg0@nVo25&2OR@w|Qm~+1`&ZVT3pH``c=b0aX{0(2N z-7gz*({|n*j4OH{~WZvafeeXG^?QiJGlX=EX=9=# zcE^vx8w%eLFW-RmfsD3IQoN9hA>$)r%I5;xowYv>KOX58q2oMV;fDWLG*5hCVI8BPuUrP=#jB! z=z4vCvp}^q7RKSaw}MkGFBBOG!o)g{cY@``q!UZknw-nY1l#t zU5XksIzsLTEn9Cux6FeZYtYV)WAFp*z-rS4ly=FF(hyE|8AX`n2y zy`m61?R;r0qA=wQ2BRlmiXPD<{StxumeI8y>6{pY^>FG#%S~YaC3$v*C!YiKz~ld7 z?5iK5{I_n07LZU%8W90$MLHD$X%HB?8M-7T1{hFKkuGVFlpbJ)Zlt@JAq6C7=&n1S z_kHiZ-*bOB_h0zz=h?B=+H3n9pue_uNVVAa6U}o)o_N5FVFXOGt|QklA4@{DY~^qS zVI4Imbv8NYzB9p(V7zHkgaNS&_j0z;J#BBP#;X7ec3gN;O%yD9>;o0$XIqatN2sV=#uD%C~KzHTX(&h*YglynT2-e;Yn8Y zQ+5^_2P$dZozW~4&&5^ehrt<-KjfJzhff4Jp}z?QMi+6INlg^6(%K{m32&4C^t=n5x}nT7E86vgf2p!DnR&-t z$RttXA)ulT-l_QX!F_xNImW7BfI)SiC4DBzM?9BR1-Xrep(vq(65-&INXca%PMLfA zcnXw9-n-{DBv?TzollZv&I&8*xZgc&caqergV7^NW9uEs+=*Y-VC9fDGJfMyBXjbf z)A2SiBaS@H{3wTA4Suu-(!O|By8JO9)R8yj2TVGoB6RLWy<7TGX7V_v*&)R@U4VQL z@YDm>n{xKMnP?QuG)gvFlB1%UxZAr?k{=iv3P{GU%>TUe!0FO>T^c|goSf*HY-PVqQ)!%C@#CGBv=T>&<15sg9_#VPN*n`DBMR0(ZJ}z@fyr#2&^b%F zUFPv!lV3MV2)_|$p3Jiu{6!bq?91AYGA~!JefUK*>--P%)#%f3-VHn@#A5O_Vjs%O z;q;37^BOE4E4=aC)(O^=b!Fu}y_#jd)Ntxxlp(5;!>! zyoHC*`h~5@EIjgN2OKUsoF?Ff*rs%Dx`YrtX8j*8fMkYO?4Z7k9I1D$1+uLUPmJn5 z(cygFo0qhYkIxkj-br8uJtN<(l@0o?{EX>MC&yQ_1CA_xuM;YB|0!K1O3r^UdFCX~ z$j$vpbb*v7$6CvRcNc{ayx*U2W!9kYr0TxQs>q7;z&7_udWLZxj2J)zaN4idG$t#S zOS7NeX8j6K`d-I|br0w3f@K;cNPT)Bgv7ir%|*sWWKY^byY8p-**7(~Ba%HW)tC%` zMp%OSc-M*Mempr9PDyg)_6wpX&xEQ&UOvMP^{wPLI40r>Eyucl<*=C>R-31*~~yaPT2bAFE;esmhTC^5|9Xm0C6L@I?TRh4o=+dR^O<5RjV zc6vkTTdP9^tHfQaZmeW(kQ_IqF3_PV$%0Teg0Tes_=`g9oOz;eD!Faw6*!FW z=t1qCtYX|&iO$p=qCRW1y>!TbF~Y{}I=jrS44j*Tyvho8B6e84T-)r31D~e(7(~9D zqWUFh_xwv5d?jQqMWSFAimb(IO8Mw)-mK=Il$+ro{QgtOlsCnU5{!zcmx=>GULg_` zxFx4o-atfL*}PrMMnw6~E;j6MVG85o{_1zhx&KoJOuMVuMxM&)huYL~jIv)ci4?db z=w0+Lz4tZ)ZfuM9sBJl22Yp4?UleTUo}aVhPW}m-dO*yl9TE{uI^9dGM&A+iv+-fy ziXPq0x)n*a2KdTb0`6bYd_b#F)Az-KzKyZ(yp5B6q%G{*ZZ3q9d`TSgpwWH(l^9({ z0k4e%9D8|symlG9jF1u*7yfp)6DPGYwYOaVhWA61`U2mFTNNo(=LNz8DZ=n(1`CBv zS@ptwi`f?U$l`46JMN}u=ZA$Hk-pzFn{>wkhfhbg_qxUZ&N^0jf!iI#fEjsiz5W^! z+|PMn=dq36WSCzrYqpVdk8+v)lP>!Yo-HWqxw&>iM8-?BjG?gdx1~bKb$fVK4Ys){ zoXZy_Q@QlD69;aOq~->6>h`OsE(?Q0s)ALDET6gl3>YIF=3hqO-XV`YQi~?C zj5W&y4(i@IeY9ytm*rq9^@G0DhSqoxx&IW2rksBlpWAv1-2s|70+jh0LE9e$e4_puyn_vR z7x)vBn4T2mP&+#%O~ByiZqqVfd1e+Enjr)mqN_fFJ9ZY4P{=}qLB`bjF+C0SqW6pN z-c1JAOs#_k%)L0cckkaS(aRn)l&hLA-acEFgOSX;Y>2~MaHz**HK`6_HpaI&vx_!J8QJ?%t;XNhMhk*r@VCwx zO2G~|BCK`BeGk%kLf8GEqTlXjh(;1t9!b{_zT65<;}4&aefSD3ENk>ue;Uji{Q2QU zR$CQ1(ul?&jHb?(#)m&+E)-)c`J@7|2UQ7Lch$JGcfDMqzhW_1VKQ z%e{{(lAS9Kybwj~X`B|Yl|J9V30qdT^eT%lrps5OCXjLJ{~^;Sx8cvc-baYVR&RWe z@AEUzOQYpjtPgrliiyI6AtYIP^)*7vl7w9HMI?8OIKcPl&k(Q-*i16cW}J~7bLc3Q z=!m2!R0dZ3?*HOl{47~R^;_g>TBCrt{YigSVzLeDS|wBclKnRgnc=X_dj9y!Xt<7PRZ2&JW+HpoOhrB_1yCOTeXG zd#R>ye#y}qm8{$QSHBtc5EZSrKD+DuzOfl|%5#DTU~#cG=KQ5uRD+I3Vz$4>aoiqX zNtk=%+yh{}1&CzJN~;T|&p00iA7CvXC|5gJ?KGZmsw1T$BzD921IJQTj!tu28AaE< zg#7ohMzm#XKUM5dgG%(Ei`}ioJSBKRAlmPp#r)AzOYY#lVif4Ql+*Mm7B{z4LnXvB*|e+WPBX zLCj8P0xvh>t++&5Ii-SSI^~GJOU6I)#>j;$zSPv!XoYQZnNP~&k^z)6u~#%2jyaf8 z)tvs-;}o|I@VDH1n_l}O{;L?f&Xj5K!YD4Hcb2nE4)HbJ3`^N|XX3Qz`DM1>S5f>X zrMUjN!vkNVbd##*>^8N|OV(-d(&~qL6qJ6X@|Fd`3D;VzreGSqiupTza52F zd{9LBSTqcPyt^oF1e&P*f8?E=TA2xg#n%Py1dod{ei4aAdc7wl79kneKBuRWofU;L zMcA=Gcp4#J3GJ*&237I*odhd6fz#-|=dNSk~SaGPI z5t*J=Gif{}Oa^kEX?Q&FQ0KjMr8!PO6=3;V_Dd7=BlS$zFZrk{wF)w8HpmwL!toz;et%$?edabla(0&?e_tP$veHRc{4 zfyvL9&*0mGmGqCJEw|~mI}tB-2s$<7E;lt=Ia-cKf)qL63toghx?4PiU8f|i3aw&i zov5$ggiN&5NIo45YKxhjD(ZBmr&8Z_YU`2`AYKZ1vA+rb&fxs}$Ph|`PlzifDGf=B ztt#VYuC!qBZXU0^3|Jst00TsHaZdAynH+PwJin)9!O~n2&Y{|ASO`2LNxwlpzi}~P zaiKR%I{`m5U8hSY2Y>~U{QOD0zrY*G1Heu-B_N6~u$m_M-fxwlJa2u9pw#&RVmsUS zS{-Xy^A3k)O2(?3`VSzli}`2^I*8h5%NzM+hHjq&jym=byp;w%2*o5x@?U@MXxZuQo$Gs0P5T;I2PSfwY=`{pxrq?l*;T z!CVqwPxQRLl>o6sR!luyHiNWV>+*gWtD-xjoE@@R+oRHB)!$6y6k&ooyP4nTZmnXJ z{E!@ZU(MjTUX_8VFEaxZ{3l6tH6Iq6QJ&9$tHBXg7l?#YMW){O!KE&Xx`Cj?Bk#I} zrNYcvp8CRS! zc{XYO01 zN4K2`xupQ&O%@f31!6zk<|ofWLJ6Gh0>;^=qi1tP%6cNFSf*zb?M9NFybeA>o6x}DVoqPwu-;Z6fD~0sJV`6PT!+800lv( zt8P?u+k{=$_k{-!^7D1|K>zvsS-hJjW|S4Y?~^@ymcm<^@yoS3Sa`RMx31Q8(Ixlk z+yGR-d=KC$*k(i~pp^Z}V2@+25STe{l--x&Q0UZwZvCH}zqE-c4l-YIEe2+4H9E$W`#OwJ*2FO;95!!$e%EHOPW%3-`sOOy z5?U6>Ew?mUJzIc zvR|us`5CGY)S{~phfNE9Q+m6|8B=?&>y$t9>u+hYGrH;cNxFKGv*#>VwBd7yIOS;r zK_}%jcU(#CxEt8yGIzQaQkd-0(IdRvK{E#A-OPQ`XTcKz?e-{88B5hC-d^|JDs+b* zD3t-?z`bR%)`x`)>y}`|3y-q_iu007^0u*)D)odZI8P=B8ihz#;mN=QGBv1Lngmr1 zoS5A_K6}KulcDbVx?n<@+db;y4`!du6J|Z6~3*!pB0eE z*Lg(v`f1XZ-|?DQKE$kNPbsKRScC1)h)^b0nnutqvHI!r)dq~p12b+A;kRCB2CX?v#sA`8pT?V!=9Kg-<4cBVANMBmVy?piVf&pjliPrn(&1Fi<_*enl zi9{?$*$W>8710HVx{v)h6r-sKEW z?9xoklr^}^|62)q4S{9L2BTn_Dq7tW>JXpSt!FM4?7c;hdO4+OY=T3p;#YNzPc3e& zM!*skP8EuwxV!9a4;}$z-AYXKI6xkP1WtD5v*Q<7hd2({FYrKkOx6e7yUS}db}4n$ zc5}avU!~1@CdmzUktoiPE_oG#QL90&9*WWW5y2K*Dm0&;2b-Zt=oOp-zFlAJcpp>` zIWBG7bx1Hc)TRnGuE|#%H>UUVL!8?;_Dt9KawGB^c<@eUW?r-CwN%6STM-;lPdw8C z=F14ojSQ51WXN=4CnBXQfl|F4b2xt3P*{b0s-Fqi$HMPQ+Xx_!muTtSZ zJax6VpX+`v|KqemJuOcp+!|Wu{o1vwM%>FtM!cK;IW0YoO-0(HlRA>aii{23)PDtlmDRR$)1{kvFvx{uq zP9Xcw$V;Ehtr9ojt4BS9+k@qMo(WT4%#y^0Upc&y&*(o4IyzY{BzSaxd>N;@adHy# z>A6f`ISn)&=Mnkxb2*GM=+LWj;a7MK{i5MCFe%E&D*ts`c^T;=@nE8h(xiw8>ggFZ zQ>$gD$0aNQ9ST>rp2o|wtFH!6s)1FZ`rb=?g_ zIq;5lZVW`rsx%*)GAp(9GR@;Voej*JaTi)^8GN)KWUqDEVgBazFMh<)6hdQmYz^@4 zpH@?!-85f>JEgKv%PmevQ|!JMRoumyw}e{TTU8&6&FKtqP)bQdE^&pv9rIWB40s=% z`0r+>eJ`54#xOoFw>;3tkQ&lB?t-EcOSoP7%JlF19@4S7y!k0=?oY;a>U}c|zG`YJ z+2U?HbtsPHRQU&|(`KlR5f2)cKsop9bJ}>BFYxf^?vLQXKo5B1Py(~h03)AAoD}W| zY$b$mzh}X}C>c08DrHylL_A+bwF6B?-VZ;&_Mv%lvi+h(Nd-69x^09O%f5Z**F`Rn z%fc&sAl|{3wo>}@QzH`c;2qM^^b?0#o@F{*Y>Krq^o>(oZKy?I2jIg4bE;42{%-ni zMv!_>ebAca%_{Azu6t_&N%@$sOewA(+Il4V^vp9B4l_Qi-dj!2R1a*%h}V6mj|l2f5x;oZWgT{+m-njj*BZVR>e&lFn}7TguQEKo^H>&<>ePhOexPTN-6S$Pr+F}K)=_5&(JV>nG+#L-ZFB!9A|Fa z{Pfv|PGK30vsDk!$-Gm%apxAk@B#UJ)3=-Je0P|_>NPkK;j2cfQEs%+W^@ZXNm7dx zi6qjG9~l~oZE-@@xObIEb?Z+maN@ou5RnqUBc8-A1`(p# zW_Hire=T0eR?HCu-Qft=Tja_#4cAi;8c^YoGAv?KU8iPBx=W0AWx=n#eAZv%Y)aI+ zO}2pVbm5={zBGn~k6uO-Nz^oqV)uxAD!sK$O|I36H3!x|*17GB>T#y^KhuBwq8(m@ z7xB*COtY3usK&d*He=#PjXK!NWX})yd+uiA*jkAl;prB)R`hM+bToJks6`R?3<*Y|yhljR^nESH5I?_WTueUWcJ4<` z7ATc-1j<=4qRZjPNFS!k8%5ym=z;uv4ku7H46P>hqdv$PHq1Rm$EQ@Rx=hXJ!q3k? zH@jI!a-U>w!DrEaW(k@$S>i}o+1#-GMEPG*`u{Dtu7v+Zt>N3;%KoXG`>56{5c9so z%U{2F->*JqZ2Xc$Oskz%ct#4MTuuP;=c`_5iw%eooQ*1`9P=c5KM zM0LV(ILPJlHGMs0@>P>?EzegLFK$!Tnobik%=-111CB}017$$WEHWD#iz+E~GBMJS z$5Qu}22>#w5+O&s{sO(YH;trxDNCC5tdt#FFIro19$6&5(UbbL{VK2?&n4kr=Bjw% zd>no0>Wo_x0W5J!E*XF%(|rQRG|J{>+?97FcS^iDwRfp+b?$x;>cbYQ27s;0&RqVB zam9~qwERhvYF=JKQi2Ho0Tg4TQ{VoM->SOuuQWLDT5ir-?|*(u z&eEns4kP|PWPNn|8BIm6V)5ET>}}aM$zRiDOG%nIPi@KhD>L*ZamXIEo$+pY@q>ETY>@-r|IWEna95-bH;WjB(Ieq>p*pi1Wi7M zn8VRL`pct6=6b-@c|}t#22J8XH=a4~(pbspOtQEYz?->En_C8KB$;$LsW^`o9W`F{ zB=?@ofydsFqXm+KI)A5;eyc8x3f*Ja6K{Q))a?DvrL4&~M1ZT&Jb>(7W0Z2)#>z#_ zUQlUoDR2f~-BiXjBZpZN|3AA0MtjxDj59jN z7TY+w=85_5e%2x5e)c^>!mYxa&nl#gEX#|Zww)FiM{%nDv$oOwt!;EAm7M6UoVJCT zfF^=Ms3}hGAu}tEKM4wgJ@jIFnsFdGjDd4dr|NumaTEutjLm7&hW;sUV1P@6n^KLt z+;4sFZC~NL4ERXE)(ih}D?aw@HGn5IcO6!ML*Wi^6MHO8tadqRs;*oMSoI;7lPiqK z#>ebnaZ%m+w;w)BwUv(YBR#@Q|6i#CM6gz)dOk~f;e!Vp{KuzPFzk6GS>>XomQW!MBVC_Vtw7Jl`o zZj#IH8$Z_ASDF=qQjD*~_E8$pIN3R6oN0-!!2kTlDjAx0oO3ZTdCY7s239Z0XxHsy zy4uh%b@ZTe8O*2xpc&$O>Yav@`ngZfJ+jRAu1d(Zo&%>In)~bODpIolll6`K%lby@ z>QkPXDMIL_&qmOV+M!mDwFp`ogc|9(cf6bouKVNkq*L$`B5-Sp%S&gK%Ut$+@651W z>t9$@qZM+^4R|K+jrV5ZU(N+AKHhq3--aI{#{C`~nH_0>zv7E+Y5z<>AZSs=)Xr*$ zQnHG`H3y3{9yS7J#v`pec^L?+mUTF*;583M#%`?=lrENHV;?4AG1Nf*d@}AcaQlQwepE$C4M-uDo4|9(?+n)89+Avut}Tc(LjClzMj>!Al?& zWRfWRfl*J`)J9mRR(y2CNW3^tIalSTY42=&yL4D=%_bx7bk9p-WD@dshG3x-<)xer znhH7s!qVugcL9)aXeJQpSkZBCO<`t$$ZC zciT*`dn@H%Hn=mq>|*Ncf{f0(dgMR-lm?;4e^)gm$SE_NZOD?{?#0b5M9;*)_I)+P z`FlFRT5uNM$@YjgjgG$E$D;>dw8A_pAHv$ANPh|sx=RxlSKoij0RZ6Drc)SG1*6(c zIcqdsEWy0QP5>;o>^hT8QoQFgCM)v(Jg~hN+bhYu!wu;vP zjj{`9M&3$|()jw1%v0BGtn8I!cR>S^1{mBOPr#~zV|Y+|IowE)s9AobSmvUOI%4H@ z5EI1hEWgaEKU9*@X#m>By%cTgUV&+qi*a$)H~XzN)Ih{C|4d`c(y< zYG@zYW=nfA%6l_;y9pHv6TU5-I2JNsSDf1T5P(0-!B;jpQFbPv)F^VUOo=K}vkGV)Xi;$`HF{J6;<+5b6u83)sg z`kmC+QMl{uMZ=A{PtkRBzTEOVH4U|YjIq}`3uI3+I^C)W1W)Oz1fz8Io@!HwTVR-C5+63SDtXyQk!^ zg;bD-@C7#G5#hpMoBFPuM=OyY<3~M$gbYEet3hpooULm>>~ZV|*e5PRJz|eSmqrGr zR7%o{?`P`mQTK;(k40STc=Pl`F9y!P>Y3~&q*v@5gK9N;oGoAwwLB8C=vktQ@|mQ5 z;RcIlxr7v*Uj^RjjuaHR-goGoGF>zH%-z^FF@Dl#x}|^R_)=#)bp5)atk6id(p$VS zQ1z0l-47k)t<8N|QqrX`Ao@+tP-n4#!CY zZQ0`d!xzAA-(asq?1EAqPIKBMeXkkX4>WUTY?`ZU4PM^In_3txENE)`YwYNBvV`|a zImfK+*`npp3J;6VFZ}SzSsU5A>6!Gz`->vCHIZuS*n|(cEe#X~hzvn#t*d(skMgW@ zNCc8#?uKIww38f33rfL?2E(yW!wvKFo~>`^X{5*3+?MJq3~U<|9#7WJ2S$WC&apQp zre|ta276g*C9jQ#&$`f-JJo+F*AwRrd~4=q9BF=$oCsoqboL9XVy}hsNc392@|q6& zN=uLBY1hFr#P*7yEwg*u&^W)ZdL6))(Ger`;VBpdns!LcJypSiKYjn-)!H5Jbm4&4 zXJE=j1kkTW)-`)75;&<>l)H?SA^c3;1zgHDRltfpnT~0s&QVLIFAur4vr;PyWb^fZ ztNo*9=i0t+OvGBych3yA)K;cMy@P%^jR=`kE|b@8at>kkucte%ZPLy6_YPiU#8xff z>KRNr(^{muRB!ib)|#q0GQA}Tym7d^+!_9KI2zVo(~cFVjaHSIZAMuw(gdaE)u8SWyi zR;#(d?tXu3>#mzGOj2QHn)8Wv`lfB`VWwMr0_XgI`XezsWN}|gm{Csi3aDxkpTyN) zj2cPLfW6s(%WCKBy?-gA`#>_qY3pIaAR(X1=xcX`7NAGHZgx2@bk~%tm72TJ6u1`e zgfaY;0lc=oSWWG+>t0cC`}2e=Ac+BV8}`KZ%61_v-3{= zO(68YC6DQrnD_$eFJb5(EzldSf1?l6|IRPQ ziSm#;AC3A>A}xKvP0m)UtE4chds=R_I$SFeY0EL_Xv!U^0@D3cIteN1-1ztpU2fep!&cIP zJ)+R#q~;4!b$Z~7zpLP>}OgsExM$Z>t#G@Agw${-TQ~(mp4mW z${Ju~Rf`vo!dMlQ{ZfB$3qa%dFrz{@KHzva0GAq<{h@w_G0O8*&%Clm<*SPrQsR2{ z+lFvq6YHOri!SGzGDr@`Rk)*jFR3p6Zrm5l`9K?|@4qdxoN-j|e^ZTWkVy0{gk*Yg zW4}S1)6!@rpD=$9ZK_V{;cA{PL#}lrC$Qm4O8ZIOlB4iF@&N24GMqz}^2Fp<07O@%P&z?M&$5 z7;?8m?-qC`Tw=qBkwxmrQ?mWDK_=n)D~kjgQeNCOVQ1Z4$=CvTSu%nVQKc!2?zT?# zaj>28`eClA;zmk~b2P9w(q`h7R~fLDnD0X|cuU2sDnkN3cimCtyjVAeZa6aykkhEs ztqS<4UYd4DAm+DQq^ACDmA_%Dz1r~n0V4x#jh>7@rGP9;j~YL;kHknRP2bT`paO%( zfp13i2Z+kgSZi4z{aoaz9*O~%$4(Fzk|SSM^?|Z498aWd#R|c_05gR8cm?-8aW~U- z7KrS}>81K!Wev^OXI<~q*V)o+EOHaICGLGofjJ|+=dN%3d0_V8U=UN*+Bx}NDbMWj zu|@TjOHEGq4+0Mcor(Qk{j@5TB;m~T0Yyb3_-D6$- zjk(cH3=f>dhi{6;vTp``C65bVRo@^M zJn7pU8Ov{jEQXu!GnP4svUJ zJ$e%STl>Rj73M#OvP}Z(B7R{x5|Y+^GD;4pY4CIYq*tuBtH!^Cwq~hC zB?{xsQokAfjn()u`WX^O@5Yt{3Bjh@7yL*-m&X}iTJqc-UzGbe7yp{DXxPnI*v&p1 zN4$bT=`CG2o~+-}pGRVdM~lGmkZD#*9_%$G2S(+Pw_IeHQ&6!Iard(fnaE`-ClD&Q zzl?+6UNmQjx#*x`@Ttaace&G_UC|lUNT;e3qwd{)M^~8b;5_K(i99{yGsM2^BK|q~ z4RLVBdZ>&GI%EC$c!DYQeh~=A1bq23_o6RKkpcQsa-Stha{j)IADH7lX2hNRf(6UU)7 zNBSQ&wDJ6x+g#}B7@!9$CxHF9Gkc}GH?4VrF)xauRW3m%CJF6VFHfeniFJqbCY}4A zI=WxX0Pl{^Ib>ERi9Ctii7UZGZ@%&TGs7}LF=9f@jc2gr`JFPP^}N%})-4J~Ln%VS zTfTB7mVq`0NjVsV(3oC;>gnwfgM&4Zk!k{VF3ak9mYtpFq-yJc>cd(SI0?P*Z!w%8 zh>xJC9LBh{rPwn#;x}%j4-f8t{qd>R$sHGzii{e6-y_0*| z5MvsF>GuV{i;UB)*k?TOBkw*6k-6c>Rqxdk5#+^=AV!t{{&0u<&5P(H%@c-^wz9)E z@^EAW$KByYjc%IBa!+>i_CnTiT+d(WF{XaD{nV8;H!@Eh#U~VCzgin&jJ3Lqh-!PD zgbWA>i4T->Y+lb>dWGj z9NjBVn;jqE=$pwA0=>nV6zc1vIeW*PAE=~hw7&evjC8U&OM5QGx`4q}`t(|xd-Pc^ zssgjl9d?h&mnh-?3G0`?VP%{4lRUZuLb7}p1xs;_onImWJ9r@?zgHU;U>uo^zt{^4 zb~CU3IJsPYR~s3`{-FW~_W&il*NPGcK&68UfI$Opy@BAFINtZW9k#}nwTqq}?oM{N z%-KTRt64^Y%Hc|82FCD}vG??}^zctxzKxquBd5+wJOb>-%uDUFc}YI1BCSCoGD}XR2{E$dmLs`_RsNLJ7Ao!a zpV@8NoC-9RHO{PTJRekF*{s3aRX_MvrqF3~7BnI35|j>ex0-%kVWmiB;J7a8Vn~;S z;=!)Py4-r4{d(XX=4B1TP~F_5vte1+gc3eiHl`0YQbxVcPV^Aqr51=@FZ8nW*IQ~! zEuQ75j}kOQv^*;+K%N5za_UE)dc%y`?|u`MTXQ7+Ph~A@s|j_!#J~I^t%F|bydr;v zxp}6z$d8o{>y^?CfydvD#&2|)N>6SjKDu{mN+WQ%;T|RiQ2@lM9> z@!zaiEaCA0U40vo;AXsRQ8|Ay8(k~2Td8n`FsFsdJVq(c${DgI=0@}Kx!dEv$)bsYs$D8NmF|{oCsQr19m|!xSg5+JApc7QEQ+{%F zY!~P)b?SbItyE{=3hb`i?2LT_S6#N!OT-}GF++dmy-WzC@1Y5SAnNNn{9076cU%oP z$7UT%m7c^${-fbT8&RmX?EL`C0j_jg{qJIbs6AKX)As32VHaCCE=r4^iZ_C3=Q@J( ziQ0ntSyUvX^q9*rW>GdqnNpUuCSaIQ@c_W7-BCSP9xIu56{Do>{yJ|KR2XBhe3{1I zh!~*wA2)rOXaq$L%TA4l1jEjh{q8T3NgA*%5vf;kPQPX{0Kk_<_<}!}VFeZUe5{;=e2O#oM1d{w=uA{{|OI z6!H6ZhjX7M=s2~jgH|(r>jK|vgx-ZDL;ualhzM#}f)sIlAYby^)Vj0#r0!Gqj9o71 z)4Q<-0CLMpP#F8$LZ!=~_mW2|O5Cwg>NZ_)7vhhuVh)-V=C;1);MF|=Ugo{aau@1| zYP@j^XoCY@*S~{H77T^T$>~F%e;{DnT?AJw1ngwAVd0!cA%ZTJ72|f4Dj-eQ_GFd^ z*W49%2L9-y%>3Av2o7149o@Z^pb$d9z4j2w&&|ZyF)H!+%NEC4Bd1r@JC3iszi#cb zLI@a3c$jf^HQdST)GQJT(b@iNKGrCKA0T3n3E`H&h@p{@xy{Y*$V?0kz9uuQl83in&ryQU)BU+(?^45I1_FX^29kxNVa6-T+B8~^o$`}edl`PfC$ zyhk#_sI=ncHXU$z=_sr!%WL2?Q0#n$_IEP39)vIF zUXI4vUuF2TaQHGoKA)t7ts^f`cTZ2R-)3KeXckJ>_(jeW7L99moN#i+D!nJ=12@kho` z#e~$v;Lj*sZYw3q(E9mWUlg*WoH+bKx84YcI0Bu{L@FLXT5nnuscjFS{%)E_dU#sG zOj5ZpBQgD7On@?;4H;SmgZfIl=1%361aJ^pHtHc2+dW+GlUw1|G8S_DRfmbC1w^(QFxD}-$&Liz|(!nF{dwv)tii>-6K{MLc z%!NN_i8lPbbcOXDnVqo6W*c)wXWo6Zk&*&^ z*#P>Zqw)xz^vcP;O7g~@J8YE-9-x-O6n~EBKkIQ3$-+Q?F1eCtC6fJowfs)p0=C2@ z(u|Ee1+EB4Bf3T%G^UKZK4@-#yP>y-TBj`epIZ8}ZSeNym`e5Kp>Uiy$tydx{u5GKk*FOets*vnFk&$#TxTyC zD4fW+)RnH8I`Iih;=S~C$W`#rrE_y=Zj?#ryV@w9nf3OPmRi3>$!=JzYJquzv9{x{~!q5 znShtqM8@0G^N`GS3ADhQo9qc5eaX*2lksd}q1g8uzTlaqx!i^!c=eF8E&Ky zq)5A#sqE~>91sz}&F@DWqEx)sXM=@g{k`3DZMe>zt=5`b-m`bcG6-y)AV4Z-9#fH( zd#SF!E$!`7eic18DJ2g=ro=n?6w$64j%dztHWbF-foIEs7t3Lj5@7Jq| z4pq&o{X`GJ{Z3F|+) zP)JtUFkkiDtpIYP>1(Lxz00nNzw}nf`;uxb*&;=x5M|YAM9NA@&O=0Qg+!G>lrr;{ z6XPvk29nr;8$&h=W-5BLhO!O>2yC<|6S`acS&CV&$3QJti=kMc=g5NhUTup7bJI2x zzyE*vV`7L|Lrgkm-1k0jH@453Iu+bO?#WFlaG+k&HwV7jnhNyU?0IRht->E$yW>gj zFk*D<+}{JKU)0(5;*aF(D|nM0f~<2YkOk=sd%e_ii;YT2^f<7i(sWMWvE zPVzpEDQmDzUzj5MK8b1V+}slkR`x;c1mPP2NUWN4w93c`@#d(=I%B~F24hqCF_j_n z5{2sLV~f<+{E@XrUTM;GGmE#j%&0^z;J?Z&W|_% zYlq5R8SjE$c0 zjW5z?jgQ_y#NuirY4-=OIPCZ1M+N{>gdD+=QEEk z&x|0~V7KT3v0;_r4!F+w}I zM?Xl3+qT);R%&lo_q6y3*#;WDEeSV5egV1IkeYWKJq($_EtC|# z1Lzf?zL9LK*x;%-hZtb)r9VW<^c!5Bno*n)hMAi5T8@!Bp3>YnmYj4eJ~wd|dZrq6a`lbxX6lQ|mRZ-tg1ZrD^bR9v zK_yjtKcy-Eu~#{3Dxdg3{@?Pp$KSl|q3gX(6mZeVgo`9BXLqeF-`*p17(ggJYP5Zy zAIo(YcfcMh57{-POA!|go7x>bD%%26>uaF)v*S6>;`2DfdCYYW9`(c%upT?!O;cUoM6yK9jaDcT|l4lQm8ZpA53T!Om=CrFUT zch5WT-E)3qvZLpBL@|n+}zFV#QWdB%?MXX z)h$<0Ss2w}qbMipx&L^NG*sAJGE?g}f?9%JRi4f^X9F`QLcwm;M|7oFD%_huLkLrOdVF zIT7Qw@-hA2CssHb=g-mV?y^ujDWL8y6jTL&)PU?q+E&ikmTOF#Rv3F1UrRlOqCIL9 zVAiL49Y|eCy%6P6af8)w^mO2I*mr(7(UB@c+@$X%tJQ_$H(tCp?J~M=UiWrC;7fRSWb^WAUM$reBe8 zqaz>~UzJO{?euAN=Jv=@wWu!MRbv!W(z0VfD0K{;JI@}sdAW|?Te9h_?# zYlWXrnN=^0SX_r*aVP!D44&V`s=IcxBBA8sSknHadWTA-8s_EEvT9ylt-bm)B+Ptr zE<`;z{7zS$`H3d~DN}Q?h~_Ug?79lRi66qapVF}PUSCqAG9*0mHQr_2Il|vu`Hy>N z`5iqN{SPFk{f+~S>Rev;Z+7@UQTYEPzN)J~+h~|ep$_gd1CN{aM6^SLnpWyZcB$^{ zVsFfONiSszsr4h@cib|dssbGTmUS1YUty+)7)I(yF=-yd7q=?-%>w(xI?5QNQnpi1 zdOn$q4@M3h7QB-Ohn_y6=uIsCJMGnGfE0Vj~WLyX@&Er{X!ja1bg zb?ySqT|i>Nys6Y!Z_%+i-z#dyhq1&e8X%*i-o=3|<&?;100JVb3qQhDqO4-Mw-W_ ztLB>zV%ci04rQH~VV#`D4^WaTdY9MK?az8&>i)Qvh08m7kU5Zq50xgLx4&fC98zz)|wmRy!bqSw!nWXD=iU$;MXw>biXs^!}C9rE0oPO1@J7Cu|o%9f)nB@B_Sj>hTL z)*+WI_M&gfKzoteOkpPK<%>u>Zp4Dn-X(hdlJ5{^yP2Pe-vzISFZK>rf7yyaY*EBb z>WiDv52#dJSlh8U{)>lC0o`e=)(kAKh=oN-<~zOlB0CN;u6)gNYwmW&km!TI-vLgt z{XLT_j?3xOAGzdEGY^i16R94BqO1!ZJ3MJ=?#9+vq$G2Cej~g`X`~basf>N`4j}p0uW=7R#$HctKhJfI%$x zV|Q~*2=NCgJTWOvrQ^RzeR!(V_y!>~clh&TC3r=flnUwzxbTVPyFp4|3ac zzewSow;I*)v!Kt{h^KANC`=-wmqv-m2UnXWj|-I_E`QH0-d*9p)N^!uavyuhz``!Q_RY6wj!A=HiFAu}{x}UAy@9Ky zvQGx{(7hOc``TVq{!ck)tp&Hn6cl&B>DivBl7_Onu-CY)D+l0TJi%=I1Wz$&pEV#G zhE|e=B0)3JZgm}U$`px*)thbLQ$K{&d=rQF)}nc_59Fe4ay>y8{YYRk<=oFfXy}Rh z=swG#5XRTZdQ!#It#^VKqUd2_m9A6t))D%PNJHel0lk)*4ooH|5T*M6s^VWoLYa?) zRudehzm)n7Hhw>bdz(MN@LAeieyV2~??G{MfNVp7Wfh&QP*EqO2rfq?cUwJc zJjGl5l@on0<19y=S%(s{>o;Z#5H*EYYwc(WIm~5d1@09A!ziub@*{Q{R|!3S_kWnJXm~>#@ad^cEtwSf_IRM(`pQG>_{NGef!Wu)$`R@dYxI+J znbMgOsXXhs!QpUc{-6G5K?Zui)FQN$^(y0W z#k3d0(qc2ap?I0NmxJZVgOwjG4n+I{CCriyBGfE8!g7*boy|A)R7qJO%GL5Vx<-C| zDh+)RE){sMKGl9ob!^Y_Tu&pKU36{V zrSci((|~)X`%D81eX2GK+w9j85sR4H)69F(zLLQf91~XK{#Vsrsdyk<)9?f#Glysy z40$5eB30%RF6GeQ{FR%p7{l@zZ5_bJ*DjyC8yirsx(Na53H-e}(X_u}g?@M44 zsWQ1%H&xqihA@ix6_{RVQ0MWN-$^G`^)vchU9qrG%k#nO&qMU>#=lMe8_Cvc*`ATi zO3#^q>WS<`1ecZ_3G*|C7Xuo^=$a<_o@M4l+em3o;UgORjl0sVOfhLTJsXAex6vKv z-F8GJkbC}DcmpSL3EO*NlR@ZGZw1NcyGp^PUfy6MX#0bCAQWT@Z(wb5`Yj;YU7)-^ z@oarlqgt|+RhcvTGUx>Sr+DdIK?pCJX^vbPak3Y!uQUK zu*u9-uZDOZm0$K}xF``3k)TCZgAM!an!m)_F8^}93BmaJk<0v3$aVGN(NSCFP~%Zl zc-m;vn2A-u3ebD!r=0t^Atw+VBY12RG5Cv9Dx|Vc0pn<&7Y?i~>G6xnf z{W6Lpxhj$eSwlX}aY|52gf5x(Ni?S_g$bUvD^L5xhsV6BDL1Y%F_O+kj(_78fFdoa z(R;%WmGYnH3cY)3p_c&9`PpY#Svq5z{^$8;`X8n7d)^_Mqn5_pg zf-6lX>ch6)k7;V0_M3{*x+9&fd<->FTHsYu$o&-j;n1~5Ok(${B!?Y)*}zT;VU4&* zddw$Gyd*&KC{HO1YEk}zOm@~wPAcMPb?!RXzL>M@@Eoh8P>A{)QJ}Y$aq`fr7hm6i zznGr|T*a@rc^~LL!qQ<6irghf^}57m1)f{da%PX*S=Tz29}GMm5QVxnzCm^FeB?SA zSUFq4znh|8WFEAs2WAC z%tYB1=Ng3$Ul=*YKvbR((nMz(fGH{O(zZ)A0H0&$U6p5n)xBs?lJ>RPV+s6MX2x+# z2dl_K!bCwQC%d}n!W`M82@(|dr{H_@|9g-B`&v)rM@N~LoOC==`f~z!y2n5&3(5K+cdUfHv(LL(3e$@(F_)%BsthR{s%Fmv~_4a+W zUO8jAdZsvA&F2_TZD!Oz>~e1h5Pw2;9zdalDyQ=)qD@yJF~xV$L4HMs4*HTass~uc zX%Df6n#KGs^z4aOg9AXhH`1y_VA+B(0kLlb=Y{%PU^&8Rf(rFDqh&Nn{c-`POI;4o z>$>iID`BaKb;)&xx!4NvEO-Z`16xy>!G_7D6u-~;AQ2#^%D2C{mOKjQhZWi7xRiqG zW<7p{WDD1f#&5rt_MRY=J_S%*&+maFeKtJRYyaTT_0+04Otb3)K@dtoM!x!Kh2{+# zWjh@za{1hW*hwbkV8;#@61uYz6>kcgPfBX&`hA4O(bDe@9zDr|-%cuPc3ad@Ar~mi z4j)+JB2CC;V>zQXh1nUmoI>+4EoHW0ukfcQrTG8)SM;G%hndHtd@X<#*`u}7NGg=# zs$uOR_uneS`>d3M?qXr(oo9^aY#Wsyf7oTniG0*Jgc)}+2_lqc!5QXpXWi-8l zcdZ?;IdrM8-1-R{Dr-l5#<(rY7=G8qk~E#2y9V}xU0%NlJI28558)1=*p5q6hkknz zKU=lF&RzW=66Pm4kji>_A^+y>W){4d`>C8{Hyb`?@hAe%gyBoVv1#0f4zbhtBfYbV z(jPTK@iQivpQEE6yD$PVMdxCVAI^T(y@Z#8DtO;ja%8sE=V>5memd#hWS!m2<(ey^ z0zI@?So8`sG{yVO$cMV1va4gbR9pXTNb1Ox)O<+oWGNSt3*dOtU6^71@KA8=5(6&p z_91m`op|RW-?!s}NbtdS^qqu1{LDVX!{!)TBa6$OC=qF>8UY)GL41_sUC}ZYG_m=t5VU+EO?Q zfj|e9B(V{=3)H4?t_p&9)~7<6?$H7VC_-l0vR&$zI$macDX#fLj$(?nGgBF3zb;77 z0|QfsY8bUt+e$sGpGYR=NQ#smtp8eq6|XSr<#lgdYcnTHAFT z<&s{PndEhbDB`kEgPU@P@=vr?MU}<8bhlbDFh9vP)f(}b0GG6gjV}w^5s~*$&@KJ> zbgyWUC8v1R^WW;)V)sSD4L@W-mDDJQgRuMM-dNF5*B-;^pj8O~rlhJp^jGGoWt()I zk+V;Qls?iBMCsY{#!`Qx%CvLj7|JiRFgXiN@)c^XAZlmj$&K|C|CUiN7c}A`FRiEf ztY$7;{^6kI0MI;KBze3@h=2unF1LAOnVlsGZn|X=wRS-3DBTW`*Rm`-Nt2HBH)6OO zM4n1Ds`4AISeq)nWvt_Np$~0if|pxmvaGq+S*6s@7PpPuax*F+qS~jq0xyw-1>!Jb z50K>KJ9jTopblYq1wR9ySN!ke+aEYx8R%w@&S^|tE!wxFI8>gUp4<>tg0d1H$;XLR zNTZX1ybbg18_n7X&M1uByW05VSv*dG*vYEe_Vq}ecm~iy`%1{&!&8uAW=);&bK$qH z)_J%u;wl_&>Hv~U_O1;s&8~OyqxD{DlAdkMtw*Coew}}%_nk*yP}c&n<9l?@UR~H3 zKVfG{mZCak`vWaAgkhBE?F=E&DzBj)s&mo{ym{`&`|)`di+j3bm8uPxmVtJsvdOMa za52WGf^M!mHhBO-5@?#&IYGdE7ky1LY;I^Yk8?i|ebUgaXJ`0yzFFaG`5Lb0|0^5W zk_Kbj_46B$;|ij?jKhot7Ro(6;J-?JwY{=?wk$Q{a+ze+!d?7VLIaBTbZp@_<@2Bx zhaoewV`T3c11;S9zULYX6pY22u)J8DQvcGyk$3&7wmpoE-kR}0d%Cq)o#!J!Uo{MD zdr|$gVRD1~6FaYfCN>_c5_JQOaSD|g>HQ98?MCyBILP)e)azUE0g)j<7jRMXYuok{ zd@fM9O5BFI+W1qsYiHkiY0|v(TZJMw6sdiVRC2f>jipJ|bbQTBQJm|2b}7E|$+8!| z$U0dS>X72V*okcj5pF*rGP|o=NZv^YT*v2LMTtGl-_>4Cx#x$GNt>q<%bCW;69Igz`ws^X5b@_WrW47|Fv&>VPQPom$LOCiOIf0Ny>bK5-d{W{-}=FMI7JVsQV+3q zs>pdLuj(k>8&pEj_uEysG&9)nD2Vfi1k5zx<Jj}b zlKUwSc_cFu6OIV$hH!TdSM`Q4s2dqKbP8*{yZks;Z*3p7hcJp+w?Tsc`W6v<;3~DE zmgqI1okfqAX>F!;?u^R+D;Jv^o5wb`iw5eS-hLPP)sN~A2*n< z(v*)c-k#jF$MBKQk~W}Z23>k*P?|c8=u3?R#3a=&b@2%5zdo<%^lk`0Mq+5BY>X>| z{bdN!qDWZm|K0cX?s%^icoeq?*lmT-3$Qnq+Q%)59Ntl!TR~e_@jDYA&G$6w9=V2X z6^JBDwN)wA%nfqx|G3oQ?(l`d&~-@O(*Ms)AZVC=CCO23I9bxmf+aU))Bb=#g! zSR-Jz;*SY;Qnejsy`Qu}EcKXSirY_e2WEfbM>6jF$JF$Pft)e;%R>?7yN^o`LrV`@ z95*^n>3-s&lN%^ldnXUkj6G%%fx%ycaEpuNee~s(gLk8OIaM268|XN3DD=u3Z>62-lXgAoJak zvsIr$d#d3auP(_XZ`2RX=(tbYZ0-zylS8kzIbC5WM8hQP^5+UygbpB-prLw1(&G_#(R@CyOvNQA(lK2tl1`F< zsY`Ewy>7m6>NP_w)4*1$;PmB*RzvM9m7g~fc^qmdT{=Ud?l*`laeL*wv*5Yegx};S z{kqw^=W}}xliCXkJdmuq8J3EBhscYWGftbZHz@`7nGjc2!ewyX#HjaWDxcppCxY*W z-m(VH)`M(>mV>^?IxJhE@WeBTKP`D4F>(h0bKT!ORtFg%wjxOyX|$~)1M5=Fi3!G~ zBN9J_el3IMq&s^JaKi6apTJ`{BXs)DjyD3R7HG3FYVsH7>4ZZjo22ho$)FcXfcoyR z8GfQc(%9ayS8<#w+4gx1E)>*Dy{0Yh_cx_S85FLyoKQjN>u0{{`cy15pQXo+92L-m zO6DhT>X{&T-H_G(TjhA!=SwHq5-331jx6sU7)fF6bQaB0dp%nl%Nm-*)JNrT@vFNO~a9S8+r=AUFE-qDnq=g4(F!_miMw z(qq5()A6rVo%*Zj55Ig4Os4yTPIW4o95+P=pgTFvj)LYH4INX^$EQ?CZ=9Y85?tbmz-E#00>^dE5|t^*9H% zD&;tm!C6a~{Pu4d?NL{VZy2lum~JZ8Jgb6szDsMvDu&9U zPQB9YkJ(z3H*FmC??MOBTzhuQS0HQ+(jFd`_R$&j{z;V+j}6 zf}K*Q5#>+T`?RP8tSYr<83|DF&~?BWn;XoxBhgHg%voXEtE&_|Xb{>H9wI-o{*bb` z+$gl-%qm_(B(6ofQOV5eB`d!>aZ;AF)@XBx*Lrb`*u!)blt)Y`9mjQ0fX+ZBX(lA+Dvla0{4bm;wSYB3k7H8)k3GQ>mR)1BVmbLoj4 zzDb8A6cPz*?Zt^}C%CfA4G%6)L3^$QyTdpfW2>3o)1!J+t;bRXm_cyrM|v24U&T*{`> ze2eg_E3iiqJoJ-uZG(xnK0;S(&}jKU&L*L6t$x$DQ!CwfaIZdE+dRE1t&hg?PU1Pr zVf|hxpQO!vngc-A^@YtxabhO1&(YM?KC}YXwhR`Bi9#bT(DIR?5%sw==MzJB1eJ24 zsAxxDgBlo<3cMHk5eqFT=Q%3e9mtB`&F*ICNj2PMKTrIIzpmMXw z`o631N-R3m?F?AgP%V6~T7#BM-Mx zioS0G*lUZ0>n#(|Ae{KZNUs#{ICHZhG&;O)Y&^|0NP|IIu0j;fAaY-kKFVM2`4d=z zlb67(Z*e}jwC%Bn#$sgx{u6PtVjy`qm&V;pheR|a4BVY0AsI9#u{ZqGqaj~7*Mu*5 zl|-h6-zBNc^>fjlS4?QYX@%30c%N&T5Ic-tD>^bVGAslGPuRktgp#(dhs=K#mmG(U zt;M9J#E-22N6z&V*+pBuM&theyV-1bI{TIGdqdKCl)2DrLZ+D9Nq~p_`t7UEfoU=g zHPy4IE_0|t!)17_*5ZqYmjHP5cxYmR+MmTM$f`|cE)lr!T~bm=V(E*WGicR~WyWzS zO6rG$4)%{`1^pvbharg9cQJ#GYy-AeS1*CHACprlq}PnE?eG)o55KKyB6cX()zzep zn1@a1opkv4d=Mpom~(3V2o#0Ze@F?8lQvE2zj%AxKUxft)(MuBFDlX^%{Y1-7BSm< zh|;tfs$ynX-ZFR}N6i1~wV>p?kL4NEn8J)Hg6XIMr+(Z6PK3YNCVrBH0SnqFDr7^b z!e$Qzva@r+Vq6HYKse9CZ5roCkKWbC9SMzW$Ly2lVpJ278T5P`g2A&PUSuBP;gbHi zSVrDpr8Ae{#^u77Vz7H(qVB{}79K-x;`p&`GiTO_rua8s6jQsbuHQMXGHVZQ5cEq^ z$J9|jj4(Eb1ltmqP5a<4VVo*R8%!j>EXdN6+HPj;dbJ2ILOzmH5G-}JbrfSQso<#@ zK7Tlj?NSxFVKB;mEoRA{D3wr0a5f?x8ObT_jGq*r$qY8Ka;)$Bl?pULhI=&H7%bN< zgt$FmP*8!u$h1z;g0xr<`9i=oe|xP}w$D77d991F(isZ94wjJy%rc88J`8E}C@Yj7 zfI*%7e?_p0JRGzH{5S;Tn9-XW!w)^es+nu#;)qDf;$R!fv4`w3DRJkKSKZwA`}h)- zrRT8Qap4Ic7)2u+EQ`neT&`VfhnRYs+%7efTH13I=LKEcRt_KRsD7-@HDnVy%LZ|- z5FfD4u}MnN|=?0NStCM$t#&KcyaK1Xkn1R{OpVaz?KeS4&8jVpK2Ai`8kc?2t@`IslK(Q4^g5T`REWS=rD zg5gzn35qAz;N5-SI9a3O+U|+W^1AL7u_5`h9@Nfr@OF#tzY2Fy_n(Hdeg&9jx*B|; zNAMMm5a$zN;GvstO}+VV*VjC}Vthl)sZ+>ogmPs}n?}2t=^Jh7$~M;?q^2ThY;mfy zMLu~NNxUw)hc|MrH2{dIYl8A7{nqx z=tx)2t-b58z4@F zOl0Eu<5z^W%LP$j!Jr-FR&$7dqNo69^MJf$yG9#+;+q@0#Bh~7ydB=fs!}o;Mi(ad zts5o(7v}9A9FNQW$=OV7XRbtYu(Uut^*Q!ZM)zu|h$N0!1HP87(y>ugcyH#>7ts0$ zRs%nEeJ{QcLBQs!b>5nBFN~MDc<1oPGR1z)6UcU8uA_=W-fE^ke1j!pEoFc(HQh%^ z(_27mpNG+^E%wFnlC{l{M@C&>A2*;hrfz@07u%2)q9OlV7MUyzxY~VM)1G~vt)q|o zVQ4LOInfpA#PJhUC_xmJc$$zLN;urg%#IcEQkpxNkYL~e3m1D zm`8V&_=|W#d_A^=S_s zHt2FeC%+NQft9CWzODgxz*s*=C)%PFy)~iLhjO3a-UfWJ5fDj0VULag>I3Sf+QG!0 zfAiedJ5x&!?QnYuheG5OVVDP@(>L)*O970JSpfg#?1bJ)_!1G&K}g)II^o?bK_P<% za#X+oPq%Zk-tXU`VazCWVkrx^yc!s~u-?sCk+NAc!(ua>&xak;mF3|*#KAh*ofL0+ zvwnbz9|nTnFG}7KFW?2A#nX6yA(D(lQE2}MX|L31vHnBzre!%rBgV2sV8{9vfdb~# z3`7jQSSkmNo^x5OkxZ{)P&8*wRJj^zRtCWDjBMDYRcO5Z4lnf>dRVj{H4o~u)4baN z^7YmC>8CnKlW zEGG)*xL7NBsE7C}dMNP~I}9S}M!}Ku-C~T0G9{yK;Z)H^p2I4f&aDWWY7+?{W1q{- zQx$8Awi8RmPhZD- z>w=Ymd26@1ug_M3Y) z0vX8sgJq9nnAX9_H*EA#v9QTPL9%^P0m-QJSCzN~q5q}=@D5waB6w`!1sKGWQ_i7vM?=x+^lfVk`0E+hYz30i^}=drW;-t zl;YmDtwQ`I1%z4IX&X8HUmapoUYRh%6Ak0B?R+y!Duzt49kXR$Y~(a}B>=KT>@i>8 z=ssn3)fklO+Wc%HHfu5g$iA2Wuc7<^B$bsQk z{%q$RRXf;1We_#m*N^Vf5uzb!$cdy{*$jtndSvmn7H@`S z+w_qaf7Ns~Flhl>evLV$c9M1(XsyJr&Awg4d#hmQA5;&zm@yBCohH3g#|ru8E)%@Z zfpkROUbMZ5ZAxDygKk;tmA)P46$l-u_I?c!u<&oX3X8LA}*hJ>uVNoy9DLS6!Gf%mjQM#<_s$Hvolo2w!&MM&ip{jlR9GxeKKAo-r zA^7;|{Q|N5Uzxuumd(n#!5CdDws}1dejg75PwvR-?>HR56>m&>YQL?5t4a^c!Rbr~ z-!k$Vy)5a)(5kPJM(WKWpME-u`M~NU?O8M&lMGAE_AIyi_}?-^ZA>(o*-XJ0S!wEj zA;?=?22lM%^Iv;!U!4%?;2c~yB25Z()B#qT4xe;8+IRZK8((N}>;9$79&JgN#rSSW z8_`+~SzWB72=Bm$+g7UHjz~Mp7tgW$mVI7su#nQKPctA`sN;Lhak*ioj_C1hk~g#G zRYZO~J99ta!9zN+v9w>QW9_mADjIfeu$E)Mz8BAHOnajrU;qxMo0%%o%#s%`xkK+T zHHu%ng`wwl(LBsz9{JFL`CdfrF=e{80*^J*yhxLC?P$;CxdS>Ro*22oKTKR}_bamj zH%$VUufLIK=TMmWR+BzrhQZ}Q-4Fbua*fx31*dDkGJp}t_03mI*5*b3yQKGw5lhiQ zvz+o<>9EuvlW<_IPcP8@$Wf(_e2cJYXNB{^I+i2JQQBETsesuq=#^e-p$E;=xm}eh zlJaK`G<4F>&v!rf*8r^M|K)c32U>-BNuKZi!3qd7SydS|%c6=8es@dxxeAMQW=MKp0%<(IGO-iSJENNZ_%b;~*2Y0*2Wy+_cqe+} z;J5mr7kn6qbdNC%)>1Gk;7${xHhtlQ3 z#uu;3jZ(c^(QEy~a|$zj(sOp|dtE-qL^6_M_6F7e+QTu+uE`B0wy1h)?e% zV4u>V%$w7E*Lk|Mpyq19Y)tZ`?f8J;>Gpt3f{X$k;|)?^{8#fI3DD=EXl3Yr&30^r zS+sy`*5HAlTZb-aw?2CAHhHG(_XCkm#(p1LluS5v!Jq3tvqyoDWvkcX$%ik(RMpk} z0*9?!C(*F*@QRLZOR4QdBKD`k5d7B!3BzxFa=|~r%oG_IK1>1!mGWZRx%~DtWI@Mw z^yY_}VNd4aQjf6J1~U`LT4X1Fa_)?MA#|A%^6Bq~fMIaZP;8&Qog+xUz?H7x4Ic%C zdCPSCOv+2n{3j$^19CaX!IcjWuU{%YzuXY_XJS=cSkkE#PnEZccQ;t0Qk|kGm;ek= zvEOCSfN##^GV}x{4$+9(sqB3@1d$3O z`~tj8HFZ_V@eXPGU<$&_A9^c;lD5Yy8qgZ!Mo65#9Y!Utp}ln{rPBdj>r_uKPSrUt zhml~v)>29nk{4iqzZeYGleEl7f-pH#a=FTr(a~v@pqEifG>zPSP)EUMjGL$Gk$nyL z{!B$|-uL$l52189ZQJvHnDqu~BrEE+wx#5YQ0Ff1^iZGSF~`?(o$}q^;|kMgVt-9p z(F-faAS%k{<$xww^IE;kOQCJ9K+aMnFJ%ZsT6ta2Y-MWYVvE7jLh1CnAQ$;u8A=as z&xx43A7~k789|XJXP?xWrNPbvJENfOA{6vLiOHh9`<6au8u(IcFdEAdwO;Rpi%#@| z&+EH>vT)dMOCijKi!mt+nlGRHmz%legpRELI(WBPJ7wY=>-X7Ldv|RI%B2{ykyB*UhMoH}KyN%!= z+P62RKPRe`Z}P^G8Op<4UMAXyF{&|C74&&m>kHUxIT>kz1@+(t^d^~7OA2QGT-@L1z*t-%*r`vWbShG6QRC|nkv|w@fARtuUC~5 z6X=#+aVF@z!I>3h4Lt~ZCx*F>eTUcb8pGxCQho%WHch6-9?(ZFXXM=Xsk8uyT=FUy3Fu6;C9Q z_J}1sa2!8zto~NZTE}p8+*^wI2*2I{%z#63e_M7({U(KwDJ_1!0AY^f4}uDFabS5w z1NHCmyGyc%Eqea%i;$729-Hie8-HC;gTQ{tD3&bq29_u)rO-KtJry5b8gJJJvuM zvM}oS+q{O(167{m$+z|m+UjPFT$o}5hXaOMaNu0oL))%pt zJbu%O_f22qCiKrhAo>8oDFyzH3Y%#-(xT^4_V+BXQeJ-05nuER1bMfnNf(Af2Jf~u z)pr&IK?QZ#XZfISb_;@f6$>`_Fo_f!JpWK)ocqVXRU5?hNkPJ&SG5UG0(fJ2e=)u|LvJC|V2x3y*n)Ha&=@eAGdYS=(OpQq{@09dcL{*Dr zKkP>|FNn1G9Vzg4eO3TtXd92lHR6kQ+AQ0 z9-!X%=exC@Rs0red1r$f!JpHM4a+6LS1V>o1x4%0d0i9v|KOW~M2z--SM+GjNXr4-#XDLxu36q z_(0HIw}P}A{{!Vc6dsW&kw5^wUREm6GjG?mkMC08|Hh#C8f8-pD*d55*q;V>r~LjA zMN}3Ll--q3y8gcA2TY5&AxhT`oLB3${-!8fkr26(ibLu)-r&aW8sKq)2d_zLv3CQw zC?^3)zt3(Z{=$_%Qij^LZv`CIJ~DkfP$*HPmvrzLIF6C@_t-ea?Iz!<<&Znz zcBHX|B_k7ftC|GPFdsL}s(HP8)ikZGbN)PP^>eM~h>ACdk{(60N@R z8L*j<@QlVLG}_0+|5fLGYh4hQB>tdK$(xj$R#35@xmUnuS}r_Z)WU&%Cm`e8Lg)Or zFXmA8hM{80h5a&D;4Yu=+iIi!(oSsuMv|ML4t&{U^tE35ll1@9~d)=@PIsxrz+_D(goVP4bh|vUuA~vGN4(mvSo%dD<}Gw1EWat_zXg@F|`hq-#H_97il*)yIu zmdww-2K(miN_(fdiW(}4A5kaWF#8#>DU*vz5)~L|+rzdHRUF8dL9@sBJ{xPc*(j-( zi{MsDQiY=Tu>=cERjZnDaZOwwzs_INMDdeZ$_YgcY9VGER&}#l9MfVN`X6yP!E3s^ zlf}B_L)tb@bMz3{40qiGq1Hr~73gak_!XvyUDzbL8jqOwU-dU{YBnw#Y|ehW^y&yo zq379p3=#tzY^%_qRW+frP_3uok6)%TIgR44;fkGC)-`P(33Wv!%8C8!!X!PbNFfUz z(+cqGE2B7>9|S-}a?p3nS4>WP^%offpw>TAs1K&~y{5?Z7xIsv$xqup6~5Se;Ury3 zQW%_h)B$THbw!U(K}V-9d~O4zK6Ll2|CcpHCj9q5GeDkqiQt(lAtZ2q{<-8#l(|cH zcL3FQ6V$xnLkI@{PXqrW%2(QN&vh*ukt@9YhV6(n^qDuqHmH%5d9kNwqhkEti>=vy zl0fGkuyV-r_`UMj8VYY1x?diDWb{WyLkTCOn~b=S{uZ1M2t4+MWU=FQc8LlYCV2&JCJe5o@g zzqPlVop&}!($x?o88f{Ho~x{039ULAn0e%mYIr26W+DdwtnMSQ8K821@fYv^G`!@r zDv;n%IG6LVySq@8{$Y!vN$YR1I3~Qy2>!H?oIo}2VDZ@h66}8>p8B=_ds?+)bVmdg z+NWJl^oozR%jYt$#E|`RFy9E)__J(o=Z^2TNe=XXfB`wh5}os9kDnCKSAxbINKY*G z9~zH03G)nnXqSrQt{|s@ATZ-YyN@Z@W=c#5iWJF36v$kI@{PNd)-MG)?fc8b7trnp z#1A*H<)ON{RdB;^FYbqiqjdX7yPS(pVmtHZ(5Ap8$&1^O@V$1h-5l2lqNwzxnC<8r zXuU5efhYWcAI$Q)3mlN6D(E0D-X?%;(`@)W33sw8#QZ)$DCJksJY!cGYB}{}IOZm; zxn}>5_q4FML{=p7U+Y!>CT2jKXP66N)0KEX*>+=ff+3F*K;%vBfH{m~iUPAv&_p$! zJ4O2J$@in6;+1@SRf2A{j?SGbJ>yW(sJRJ) zilxH!;_$AK&a!gT)N{g?>zGE=IN;*oEw5=>oC<1g+@x(+cRoL{F%hjYs?0misz!^E zPRWf*iO#u>I}Mu6?`1&rzl3KEbFyAG+ge0Ayj{=A-rj_O6}&|1{*Hc9G^LZWX`Kid zyk7xK!*5fQq#d(pa*1)qpYolJBrUuty9i6AW$ zT&XrORl|e{23{hyaD1??)XtO=NLs)hmVO9NCs54LQ^FrWpk?hbYA!K|YC|b`bdJeI zvCWuMRrfwT+8|HL{y-a*|3Rcw0Eo*Fe?m<{gIb9};#^Z`yx@oX_GUuEsYordE(g+2 zmms9(1NB!b0XkxmSZn;i2DIGZGm=EYKl;mV>cnbXDunx^*DOj>=^Mh|XPbcs_tdRB zQ=vO^u8uTHMTACy57C)&`!tLvi0TKYIc0)Nccix|w<@nJuIg{Q-$5bnWRX$;jObyX zwH=d6BZq}Lff46AqdFhgkrlUYlZw#29}N^3>{$&U;Z&{3K}XL`RVF1lPEta(Ht@`B zRzj1R?=@GT4=a&j8F!;uT@aV7m%i|53_fg3D8 zJLL8Lm~>vufqlM*7-*>Zfqp(_hD+a3TulX?AwaHzVmh_CCU}b-m4!Z*b7eDDf5_aD zeMCh8mA9X_V?4k|M&%WPlMNRkcj6S5J%;jnV1GHCN>oHMUYra9H(et~0J zK97cCxCX@Bp6qhzGHMcw`Ty|smQis9+>&VH7TlfS4k5S`971r{0Kppz?vmiH0fM`G z;}#r(dpFRyyE~8Ed9&u;Z`PWhz53^=vrp~XdsiX-PtD}N?j7H_p`LA)v+;fSf^-gn z$^IFM_yWKNd6pTr`U`g%t046nX^*i|jRzRb*sT8p^@$V{*>6~f_dY?)IRwoSR>(q9 zp9PHK3J_eT&M(|jjBDU@L6La}*`0?f(u^z{`rQL>siQGpuCDT34R=Yk$Cd~=I`kNX z54Ptgx}wcs?gKo{{&b*bE$Nx};CwYOjl~y;L=57Jz#>#hNd*QIPd8MB+26YiZU&fk zvW!N2(a=rw&^~PbL2uGKDmY$zHAxlVGHVh@-D>7tl(2Iz$UoHyKg1`HK8Ag(vgi$OTgN(LIxL6GJ9V zM|!k9LfFJ{vER&WJh*JBs#c)wWc=Ajc@YlNu7SsPeu`bs1Ak&2YIxR@C7;)r3PZrr zvJon)m`v8V-F_M2-;gXoAO9^!Ay}O)D}{;a*$f48KTP zXq|MavyC_E`gqI2x84;!=v``oki|He$gcFT;FCK+Kfu72xxSA`-MYIIkutz%D6{>dh)ij8*|?MIu{fcOn!wrZ<2i%hGj zqCd{M@U7$nu=03Jm8TT7W4D$fr<%O(R9;ETmHF~;=^MKOI_u?rG|h$PRq}QOg!wHp!|>!V_U3rN-vc)>4V(On=g(>R zBtmS^XP2U{z)q0HPG#wm9Hlq9s76LVaGrPDhdQP=8yuWT}SUt89z)%IS zCV~fUPo~`EPkBJgO}W^WTx87Du|BzsG?s#H0=1Ol1xKC9y3qY>tE|NXKEkV^L8o;t zD>Keu%dNX*w)0{9QXTnw<)CG!kutf9Jt3^9L+N;k+V&YY6Lgv4ap3hiPCwQ>oWB}S zb+Od&JY8HJ#qOQd{$?(kLDG{G6K-&vOW}XAp4DIw;TJ9czLn}YU@$`w*{ z;|Ep1<}MO7S!4O2Zxa=UzRw*EO$B1aIlN2|D%}Ixz!S07?eY8qnV416Rlg-mJ0*W& z7?+9BgW$(iq!z2-%*R_5F$v4E&Nd|I5fPjN(1?W6+p*W8Jh`e?&o+Vkp%Toh6IJ~6 zLDuh&R`!|mS_9&h%~eO8`G!0$cxSdVv?82G^_zWq`4q!+ammUlNoIY<2|0t<2YTMj zM!WZeIZC*ccLJklKNK*P`K6X@J}zxFN55mMb$Hza=QqJh4hO@E6}^2~#pvbkNdEh^ z{XaVB@$R2iLez&EDuZfpy)D*oKlMmfhWrk#gn5JH-H3v@!+S+^X-3;Anq$EM3Fn4z z4!yt{{`V(jq0+(G6QVHSarsbk5^@e5@9Rvq$3AUJ;`_fSvRhoWX610tB{_1Rmhj1~ zKWi;D`Ui^khTQG0={>mooVC)E|CY zoc^`+E&K5qt)lx!Xc{b5>LKsBi{EXk{ZlOs;q8KV^nYg{3^>x!+2!MSgS7)c($kG)EQ9RCw8A3ud$k3jRitI$W05W``d zo4!iTjHZ1yZQZ&Lwn}rj!PI5dX=;B(pQa}CgoVsoTt*_2%YsL1YvJ4u;W7x_rGHE> z0in^|ATTUjRBQ3Qqu30sCrs{)Z_3b3EqdITpNMbX32n&rm~djof&4pN!Q>qfcw0VY4|6$-fBg^*2Y9(G_{IR}CggQh zqTfyfJ=@%8MBBVK{V2lZm--gr8}YB%5594$lzA6-@i{>6r>=A9|FQd+x5hHG=sy-q z-X8DMzsGymsIOvK)pbPrd7V(iuPREqS>ZA zZoWd#%;8}U?QDX8`)AM|9@PL~%ANA*+8C7D^;C#mVv!MS6PiQKM5fqRE&GfUScWWS zSYs$?qEFcxr~#lM=ig8vFpOl0 zi*fKqMy69O2nEo`p(@O&H56FkL(GHoXc|POG2~{cHPoR7$}tB8xtKk6(l)kJZvN8H zc8X`4%;z;$Xg;5=oz%XRSChppcyRTSKyf`55|KXb5uTD&#Xm03tuCvHEtvLl&I(k1XWjDB3 z1nIHOW*|Jc_B;Sd2Y8@PhIJ^N=giTA^vv7{_oYExD0zc?cA_Tk%T17~&V7o|U+F|= z4?@I_sH1sWYgM)!^{y<4fT{Gpq`c|a%Vl&vj!XPAm$7#sO6iJES6A{~PYag_~Gk(xSvcIU&Drf)%Q(xvWTKmT4 zEI&Q9U*F!sEp%HQyy->VG%L z;i{;9RU6hRw?RoWL83HB& zUFKYfedFJ|=4wrU4J5!|+ADcP?3B7tOg8_pT7)%e8~LS@cM7L{dRMoM; zMf#Tk=;izy^a-B(thL*ukg5*+^ERG@DF36?Z295_Cvg>`8US3EIo;R9glNPqlLFAQ zt%JNX(I-$yU!I42#0J+DrDtaD_!;j$hg9Uf3lk9%8C}~045FDgRk%3jFU?wr^LIkY za0h=HyQ79LNgH>8tJ-`swjaEoQ4o|yggy#cY9quR_b*FHF1<-s*`{wk2cg3>l=(L6 zXa8}#kt6;OQIQ372CtR=FcnN><1Hb*TrjB8ai|_vs(o&y8g+p$q5=Kw$+etBl$kZk zFEhJj0Z9?UQ5a|ZFL#@j{Cu{B&cg!?mUdJt4Guof)Ug;E8rHaR^E0`Q6s731_Uj@Q z@o;XcGnP&l=t!qFj<`n@2{Rw)Im1Ln>LB4hxA1GZH!!wIGI;1{$h$*_ zeCSIIU5@eDt_5Au_MTx1xK=SHr3Xrld%ZBsLQuRkV(Y%*FuYf3)n)MLpHP0czD7A& zV4#a|;THtP6?-rkukz5KsRO1J`^6F93v1O}Ng@US!-_q7 zomNE%-(hiUz8nY=oajVeL^P~ixg_Nu2m~j1C^x_KkNi*qEm~6mKq}xZY86i*#L?TblxTb#pzEqOHoHQ zd94H!5!B+_o8`kSO~7UdMe{zvI;u2cj^85_1)jq-rs+RdQbVMzt%XNGdezFve(azhPGJJ zw8bMnd#DFTyt8O*uIAy-yP21qZS<+bPp%A+{f&yHE@*Zz-VNm-PTfF(!zBaKL*Zx+QgHeH5 zas2hZI|3S3%z|VeyMy5OS?LS-WW^hJ`V#Ens(rN0;fawL++Uf^c)A(WBlN4EjhDY( zY0EfCWcv$tNWii4=uM@V2@hZhWkPTuGLF? z#mB^q;~vPsk#c8!vu_qh$C+!%Y~AL>bB%4`0p4=V0Z4u7CHlC#gkSYfGKm|&O80kW zp8|l;NL=f+y9Tbp)QN&?qEhh3tgjg*L z*NZj3$7IY`<7!C$HqZr0bZ@V0XrGyi19th{yPR4?H@xd~i$5LQx5@S`>B)A^USb5; zimXafP1Kg^=M|Lox83ymqCUb|kVRR?$bBQVOq4IYC2)ZXhKg{~(IM2%JiiJaLv7Yy zfKH!(%jp|(%qM~d{$i9X+tr1y+=5%WUheY?6=J~7s~*>w$9Ih^`&}7Yn?{u|iHGg8 zpSi_?5T$3cOvMk)c?V5saK$cYzhJZ_5FLI%q@06_wR^;VV!#2aLuO`BygmgW?u5Dq zf2Y^$Z>C@@zV*1 zu+M!B0UrOzKgcjT0CMmCV|4PS+WaXx6fTP$5K(p)qEnJuzOm?wba8iK&#d+LcE zx|+M5XrUuT+_yRX)X5RaOxcOU+i zB}m~CV0l+v*V1vTk9wynXFgW9g$&h(S)l(bEnYC)wOmBc3)vO(&NpnKsT0)YZ#>)D z*%G4k=V@FtW5|5$=SYWo(fx?rB)Jx?FsgD#ED@~H(* z-}rX4>w1g8EwGrk(IFJd5z#PTkkx1dS586SH{*+_ig%SdC&`}rO>uc($9-{xZcsOE z$TRR1Ui?F^=>0@0vC1dY!!dL(4Y!ONL)`8iKi3kaJy&;eiG{?uA1Ci){}!*}EaS@W z-S=%t=(qad1A}Fz=(+gj8@30tn!A(VEqUjmF8i$TxLu&wk`vf+N;&ERKPzL+!c1S- z-v$!iTyq8_7p@eBfwdaCAhDd-6^MG=I(fW>UBr6aj(I?vQCDyQX1f$AxrNZ=4X8?j z2u!D92vqk}@di#;9SYCobTOgOe@;&I|K0O<$@f+#vL|)q3C2Vi&cROXc7c7q{}0Yv z=bQ6JRdR{{+hWT9>X#_G{!QiljtOSdCJgFROq$CIz0|beG;Hfy`Cq{SoYL_Gqu=c{ zYfP8Iwqb~9;I~(Wow_6L>@d;s>^kA?;I#h2ai+q3;_{Tf{fZ6i5rYlWY`0${8E27? z-hgFS1vh=Q=sn6nUGC_Y71r%s`+0aA+(neLHiWXMmd*t((_1nxfUfpQAacWu=TA_$_C+JEGwE(%l`> zW#D!lR2Ix2^wIcrv3%__o6t4B|B<-9qq-y^Ifm<=Gl{thJSc#HMf?DC3~7s(U!^bc zwzHfRP8~Cqgxj;A{-ha!h~h=mF2~P^;0X7vqpCSeCq+_=K6WNR4xIyvm_i?+WnZVX z+F9)hdBiY$wbDsj>VmjV&A`Kku~D0m2E3AB&yK@kOZfFicr0`@xqBhONpygmxA<>C z9E7{%7~>ik&K%?5^TFT1QmOGu6TD%u&ct*_^DTE7{z)rv(nb9;3^T9Pa2(OsH#hIrV^g>L%L;Wpu;? zYuK@JtliZwTxY-jJc^`In`XF`JsL+>NHjD)9P-jw$b)J4Ix&qN)RFgtFr&%>(F`6V zvC~R?57NET18zomv&DZ2mvmKKXoLmTH?l#6x}n%$7GGJQeYMhYTQ9Sj{&D~5{j{oK zXztnDxEB9Th(7j7d_pPI0rl`dO48fU?)-m8NU}_`pTv&lIPOqL!*+q`jD}5x0F$8% z!iP01LG)RyF!qh`vSHfq-6v@q71vD(ARCV~ASJ}IJ~^S%{@Ys&KpfCvLcf+=LTSnP z>IPOsjw7-E54_a|H|9`ri9P#}dD3@`KaT;PJOK)1Lk~TCa3C1+kNdlTd}YLdrsK68 z2ymiO=$KcyvH0@s3>oi8df-;yViY}vn#-dkt`|<&Ot&iE>XS4ogEu1_{^XVyLOaM( z@zQPXruDhE%dm5@d>du^bVJOK7^@U$`Fte1vieDi^s1EE6?j!DXPZ44EvRH7{YM`u z0M$=so;XNHQ29)QYQ*?A3!|IWl)Gk`16)3;8j6L}7xG^#1r+ekFNi2fB&|6vOWh_Fiz?=c#ilZLP!8s2%Vqmo=ex(Qdt2{82$W2uBi9X(Vk54@w|155YdG+0qSnE{A{<@MuTB8K9w=^AbO_^Z5?!k%uTg0KF*NQk1H7Uc> z+}lkLG&pNV1MS<-zCSlMk1Qw_F;}fKH*e{msbiFP#MG=9>q{(G7PURsQeo6pS^Mx` z7IAp2WEB6Y%JF}G`=(*yZps-wr+ps>y-Qp~kwIncmK1>%l4vN-vwz!}#tBIG!ZYN> zWynOy!A{Q*+G zH-!9&lk)EblmM2(vGR=hT-zxE?qBOzDWC#*`U%hNLkiLBF5R}P4Y9`6`$zI9*Yyev zhqhZGKu4m3;V2M$DCi>hSXC&-xjO6{e!-ld4YmB&_}G0Jem;!#m!=LrWl5 zMSwqZ=mB$JzcPJrHK={Z+iI?1n?P9{`4cMDL6*9JhE|W=+}BT2OIve?xB-=W!24+_ zXg-%bAG$%GoZ0XJPiztY`Y#%tK>zz?u|-?|n?`QDM%;reULpxQQG|v$0>Q_pEVYkQ zzJEkM3W(VN>2Hrm-+8yqBof;=kUWiGomWdF>`%kPg~2V;=)SlOSuRmh>OG~-AR8dy62d$m08anzb$h8+pw+D&QkdGoj%X6 zTHg$ch86b}3-qFua@?uZQQ8*v9E#*i>k0y7nHcO3Cy7~K+noAS(tTg_#dK6$&&(}% zDE)5@VnJf3mHvmEy!1pz5oZ`CbBZSV-ONFgN4sLup6bUK=@=7H~)l_X-Albk=7EH56 z?#eElC*}K2NjG=$Cg79z@*%B7=%muBI#CZ5A-gF{K<?NhQ?)46ipCF}4pO8P z{f}F#o$brC4Ti6tn5!0lHlk=4Jb;F4-j?%H+u-Rl0Lw=3jYx}Vomxx~Xy z%u(4UKsvhZi`a`J!~{#0KwudPHm3RHeH$2hRN!hkUE~1{k$&#TI{6v`58-PWONp+i z{T9p+pM)r#@4|DI7gFX{ZtG!IBw@=FwA4CGT~ldWo;?*11GdwLT0>oT9IQAwNp`LC zk_WR26hr^{K8e58Ey<7napQoDD(v@f;p7R-R`SuHMg8Q0;U4E^1eZlOWldXJHi z4@o}Udlf>FS7f}t7Ri+ih&C*fg6Tt!h2zmOhtwl zJwPBw?kD#TB3uvHra<}&03j{*Q2E#xPK)nPVDq+&Z6c*begi83bE(E(#L$ZX(LCH* z2duSrYi?k6o4Hf)F$?<(i_1~1nX>P2E@&r!=~It5T@oJNcJ>uP`*hxMsQ*LixSNF9 z6eIb26^b4^csehgH3hI&U%hCD{nha3#9om(uhTBn3FvkeHe0uV8?`j*f2fa1*#`z1 zKgNwk0nNnrzc+JaWu1YvO1yzvKHH+LDS5dFUihBmfgdUdtz)w!Vl+haovlcBVaKi3 z6^6a^glltrYHK2;b9BD#P<~7LiBz&U?*Ez}yO;%T`p6tye0xUPPPsJsfu0mw+8~kg z<&sga(^kZ0T~$;=T-tiHS7$q#-~O;o6b<;5*i#ak7z$wn|9tHko&kK()K8i z%G=kip+nFFD|GS_cpd#4O9WlqIBUn6Y_3C@oQjh6d5E-&vGRyu6aS?RRq)EMyA{T|c_sm#5VS?vh{8!a%-B&%#EHO0GJ526vn^ z?ODU3w>uV3gXW%m)%1=F*KNCanf<}l#UUh%U!%#s|K4S5M|c~|zs)*$|L<7`^*;q$ zYF1x-_vO#1-!y8=Z_V%&(W35xsL@cM<|`NJkVS={kj#}^e@eFFyZ8y9BG(7=d<}OM zB)-NJ6aM(D1>xr26DSBW&r05ay5?OIKrUBqfsj`$`=Hi+m^b!$BH-hdl^}>NV1PS* z6}Hs1TQ=OqTvv6@JQ%A>+wbgCEEspm7Or%Ag7wUhNGE8)aV{{jS3N)$)B5cbF+Lx_ z(qw!NY}-v|Id`?dkHO@Y`PQMuSW&+x73NzQ8n4RqK(tEnt0Rg`y zMS0cRviic;Tr=IVuF=lHXA6HKLjSh8{uX8+pwfEbT-T>9!7gDDsL@k&g7LxbUI>i| zEl<$#Bmig{7N|zn-N~*&uKz0EiGzTeH79ELY56|6>l&!s_~A;}pz@irNZGe^GL zD0QX+OV;UFH!@a~n_P|o#hxe3h4nsZn|??U zp@~?$xsiD>CtG*sRu4?Vn$joFaO5Pl1ycL@GPG{g-u4L~G?|>Jy zTa9H#yPWO7I?jF;^gy(7Nz#1|lFyheu3FFjT`moroiSaBJzR~mnOS`q^O8-ut6gV% zD`1?Dw287W7QckKxPShq)ZM+Q_QvT4=^%fE_K(HBQ`AsfhwuE(OaOwP78|w?0J{nh zBahX6t=MflXVdATfg)Cn#S>C?Km^ipGj2?%R3FgID`6o@^TXFKmTKKmTxZxNh)KUP zGwa}RH}A|A&UO%uXkqZW|I#|v$Y1S~VMDf2Cx0B`D{8X9HY-Y7DtYrZ)(GwhQ=QR~T) zgUmne1!%0IsK|QFT?I8ve)mIuukj6{Hg_B}v7aBu?f{*4-7rgJmgE*J;HPMODJe|$ zj0PK1U#i}Fy8E^ln9HSwyQl~>e0&%r85}H(UkIjOkIczcepl8fKcjFzE`=u2_vgfd)R z{_ThxGsXTn7v1eD>5H5~ukhyF3CA<_obN)TNYgmQK3g>RAhMm9S$1qA9wGhQzn<;) zT36|B-q#e`^bbM-$CHTv;fostt1R_TwF>h>+gnb3?z09t-=o@Ye)S#yK%B86U|XJg zvvf9H=+A?KM&%9|Osp?X(8I>It6Z6f*g;i&&_Ag&P zjR`R%03E3xHjh#6lnUz;RRishAG3*DSFICE&#!ISjqb31`3pqRuLkBF43=HMj`C&O z4x}om*G-4kXY-??ZOju%k+Z?-kwVMYpRTU%ki6YzV-uV+=CGZBQiWH|G%PAS3f};g zh)LQJBS-KtoRrHw{`~aLYP0w;AR5lmnUT31ZLKu!mn7qb5A`g8kKcwTt_INfrC)q_ z2tp%}nc28FI^McGYU@|=bZ==l+KT@h^)nwOHm7z+I-yII8H|I(TU?=YH$~@`h7T&jX%}x*|f?;TD8iXQCyz8PNpInlvqr1j$zD`T6xR?D##s&Y!%LXcUBq zs;}2Mzwz+dTYTe1n0Hj`XT=Hi@utc~SQPZZ&L zLtMX6<9@nrf9z&Ve`ZW56I_ zT2#%aW>3$9FM%ZTrD|`9(~mpn%3bR8_iE|N%k#^hczeDYd+_ESDe2c9IcbN`bK=f* z?16G;|LFl}MODib+KYU8U*fY~AW?0HvAe&Wj|LSp7JeA26ha=XYG6q>Wr;XE#6KtC zab(jn%PoQ2b!#28)xC6MiNw)B>gQOGZpNav50I+)we@$6$c=(H<2U1(kab~cUdP_+ zQy~ztjMT@jKbX*xayk@VTlxpAQalB{i)(EwhP)Ye6&-StQqt0N;ar!A}sb($iA21lu{fW-E}Ku$?x)o{Qk>AK1$lM zmsIyikG~`(jOIugcOIuCUQ>Gp6YL_X3qIp2deJgHpn2{`Aq7w#;%caW*?Kb;1`KWZ z?V#^hRh(;@HJ;0%ZS<0B9C8>vfuHIN<`r!XGN9v{jB4G}2MG7a0vhtf1PE*9V!Im- z76Fivf%na_dgULMQ!j!Z3eIot6RG>N49!mIoQ=i5SSKfX8B92j+>eT$nTt6V-T@(0 zCi?mx#iEt>$uIISNgmLcQBiEvpyB-IA|GLI{reS3kt2bj{+ z%(BvLwlg%*l41^iQZ*|Y8>MYf7wEM|DIAE{Qgf9s8coUBiq#9ASNp1yuL%{G^C2S_ zru24^p^K>63(6hCL(oV^WcI>_JfQzEd&eW##XAdy{s*_w_2-0FcE(JC&vW%J?m?D~ z{u{rvR{bgkB>FuEe|xr)HI;~K01&7bV|xDwK*itiTCjzYAG#+UEo@V^FeD?{erUi zI8_g|PBU1Y1~#YCUSiB8vgzhH5?p*T471y>4UTL&BKAl5?3f|7R-FDnU#*c*zi)rWCI4xocl=Cu{e?=yJ z-e#GMxM91QyN84G-MdGMGpUVKw?YW7I){rRTSqn%CA%uc2Kcm(tZwj!l&90_= zbs8hv=dv=c_aQaf-Alt;WWV$gH4|%o*n#T4wsl@3hIY2(!$9`l8-C*ErOQdHx)&IP z{Y=q#{0L})dy~&__Hb0tC;0;yU>IdhDc-k}=Ah=iATA;Xc+wWB=)ma#Taaqd-_lD2 zO%(k)f=sHP&hVM-7u*(N#lFVA?H6uJ6N;4Sscy_lNB%|8L+Xx+<&F#B+4^C{O3pkjU*BCUtHL zBtI&ZAZHt8J=FEvx89Lgv8}ha_Tcj>k;&1xjaHnzenHy}p(4?+|`Sw_PWNTc`Jb@21=u_XYFbcSQ@41omm>q%KwhZiq5t z^|+D2z17%%5dJ?US45jC=vZXIKHJOLxnG-99QF(;?k89GnGNWn=hzLh&0=Tu7=T|a z!)8d{glDvbhGAsWD@EPJaR_SiSyOVXVO@MLl}m8%<%Qy*pGjfSGZd5zn3T?|NfqNZ z!L%o9Iu;5&AF=o#Z?XfxYR(w4fTodT&5`=YsaB*gwu9D;3Un1rbQjVjUXuoK69TGAxeIvfgMd9)fKZwponV`t zC#!ahfn%IV^Y@Pd#d8=+E=wUJVVYbGaN^w2yB^`rUP3~E7hM)S_~CGSscK<>LDYwg z(M5NE&y~}P`;o|iq#vgADN|y?Tj|JFquQijXlo8ux9Tvcyqf8Sa9qkciB&hXxAz2| z_l$8ts2>I5E;_`Ma*}R7i>pi+c58|!alcp4$3dyPkY6f%i%%#jp9!%=Ac!jvK9~mU zfwX^Ln?n!uUmhhveq+uDI5`zZLh$N`01LK+Uvb7liNq+UxYB;8zfqj?3#V22XhyP- z3|Nw@K_$FABXj(>-wEQkvW>MfT}#ecOO7Iy%Yqx7$_}Fhsh$#n>YsvM@4q|t>McNN zYTz<2TQ&F8L%|WN7-@%zpWx>uGoSykmsDna3Ng>~SDrL-C%Y)8$*Ms(tA1H8l}Qn_ zw`bNaZgqZ^cDNKSY|6wU{f2duSiCS3?^tulErnVi$ix^`yyR!+RkE`aGE$&HHq)8wl}73{kgPuY z1!8&JHzNd`&$R~hlC7C$XnwT(vjIMH8GvgOwnhNBFhekUSan|y4l7ZA*~92HfQsz$ zmTh(o9HZF;*2KMiF0~Gik72Tg^u572Ng1Q{D%(RPzQ-Aa{Ox#rr2s<&J-t8{*uo%I z*v2t7aS#BKK2@EUsGuGX<;~HWnr&3WeoSD>F!WmdBDmSUv)Se9zk=Dde~UNVX=3jU zV2!TRe2DdTPOkL#iB#I)%Yvwm4Qh7;(bkHe_$ z_U?#6NPHwH0qkqO;Dk*(QqkS=avCkF1EEa(PyMlEXXSf0v?O0lXSfJYFC7dn%poyQ zyFQu)|BM&g+E31D9tDRCz;ptl>4Ax12pebfu2-#Gaj;|IwKsC<7pn&S?L7@Dg{jm! z9IT9spKJV!2x{^SD%)6!n0Mcg1gch^eQo6#d~+v=!C6O!A<>FGS&u~%o~=8BV^HsG zk%!5SKen^+%z$mlB*#7PP>8r4Tg)$$4XSul*M!DEddddtX~|<8m2%|;oi@DkUp{Rj z?M{j0v(pmU+m+v63|L6A&ikE`Xd3s<=pi3qRqpPx9hh zcTaJ$VM89?(aj#WMy{4-f(MF*F<16~b<7%Fj5~013_goibnke> zKrZ1H=ZU6~C&f-(26Qs5e53PCB>Lb9yCDpnSGR$bzQKJ^>mv6gnbXc#p$jz%{AY9% z72D}=3!hQ%=X7;+R9cgbed`AX%d;k-$S(udmnclz#o_oDHx4D-C+A|M1!$VlCXnq2 zN>!MD1)Cr1zv%LRFXi4uNvf z7*;6dG;7kBlw2n}f4xMfy=;q{oFuti(B2+iGyf+f!dzsx9ZU6iGQIte_Ix0zlFncw z^hXgaE5>s8yK~0_Av{RcRrC)^}5FG3a9**1GJJuUlUtm>NPhUd2oYo+) zcbMODaN~9|pikk^(J=xBY-WTBJz9l*o#dp+;pa|kE~sqf^Yc>{Kq-UKK`+#9w(gHS zV?n7>PVO#l;Jd9K+W{ShBAHvs*eg9n2qKrdp-YCxK;91mz@Mc~)J>N+y@&qK5SH;w z*YXs7Ug%e$M>j!n&CuqRFk4!c3MfE@yn;7|r&QHJ3Wp`ygdkRh()Y*MIU`kMZqKi; ze?IY2!RlWrO+P#Hgy%j+;LV?&bgH$EbP(!(GJAJ=g(slV%)7)hVe~NmYDKXAsNebe zLI3uwI6}{c zuXBI-Pj0^=@IoJ14u8385Y1dwJ}mK_x{SM`YNQyd-pvD;nv2e7o_0dT@@`d5{fS^j zMTwN6@G2##bjHxT78nD6P@N7yf1KT#F8rEPUx4nKxf}B}i1&q%{oBjKT#)A|*%?gPKUcnl2xpX8CG_6^- zj4!&S{8lY72nN~PXn}<-DRM@qxJ7VoX<@UNj-(ZC9W7;!fG)$`PL9u7~P#X+<1ZhTXEetR$=bUN)*Lx-0I&uB< z3|#&%0o>rEs-TG*Sn4rM;RC6zn>JC=#~yc>H2m*|OMd@)@MUFW$(p~Ig;Sqp^tgRX=hj26FArvPh?(0tKSv3b z1&TZM(6Ny*ZO~9UqauDbz5GL+o@PG4-g%M&)pYeff?*Z5I)!7Q%)7sf8l@I*)VLA? zVs%%Iz~(!#4zW3IVXqjVp^>AuA23yi^fIh}dE48!S>U)rUkT9OP<2AI?4w#xtK3snlVC1 zoBvr;VG1mN5Vu8&6!hjjrzB+^#CUt|njh6gFuE;R(~uj>#U|Rt0(AamR^tV!rQAF=@b zn+W^&3X#O7*h>!9HBRzV48kftE9nE%^30;$yio^nz{ZJ@CWWZ(X4=7z-3F%;r=!(J z5~Q)m*1^ST{>etS3OS>jlS@|~`^V-ta?$}7+@WakZs#Z<7Ah zbr|#Fh8)88BFYV69;!4(C-CRHcGue-iLs5+RvNj3BBPHFc$W|M>b%QWPZW7TlNq?1l^D}TOZ7;lm%DAU-wDHeUMzH>}< zuSM6CU6a(uwG)ERwF7>l2%^=CniSCskJ7rTdisUnLEfiIcuY8U+K3QJc{=f26^rC> z_kKdn6L2;F^qrN?X8-9oz;wG@r+805AAWLuy!n_eioXYDhOiLW!7oY*h}I*#$ugVuQaTh1^T$m zR<%FAydhb?{(}92SKoI<+n|X=aJZQCon2j7O47Frf@OlEd98j-k%FnbUmhyB?^j^M ziZ@~O=*Gi`ir9G$I08PCae5_~h3r}8IFqPk_F+h61Up|OcLW@1z4>ZOu^QUpP&yAN zspe#3y-@o?;SBu7WjU@aGbpp~y7l)C?G;lO^i7Lm1+DNV5qaJ;kL*@sT=^n_KuRvDFgk0onkhJ(Kpjz(t78fV)# zbzxUo@>)Pvffb^`^$sh9g6#8fmW;XO!$+3aM<{TwmUPabth7}D31doP!DZ)`A5}bq z(-Q19|lHSDpD-Zd{*@J_N>( zOw?<4?j9<$(o>w~quS{?70TMpI2lQHh#fdY1K89KNd4cO77!YqQzIXQig*AnC(tbu z=<<^TEd%TZ$@iDkTqiM~QK~3nT_m2SGd=@c?zYRR#aUM%^Jcg;_X`QltRO*O7?6<3 zPYOZhAN6?m|DqoGELHBO8a+dXq~cu3BPI)R^ndtIwMYf*KG;Yd4f7V@JD3AuvJfEH zY_q?H8_+}^=);&CbN5VOdN=+S;4{boXmttXDs<|kq>gTcV9KOiH@Cq-Kl>*4qS(6p zBQvl5boGMaClv%kS=s<$RPv1cQZUZ28p)L9`GT5ZH2u#!e1Sr-aPKg|LHG4`$HC=f z@0cI3peUq7OJ6e$MKBy}0`_BH4+UWzL0VkA4c33BKVuXGt}NNDUMz09PZ@Kl!q?A zQ9eOADDK7FCQqHtQwd*;Nj;8aD5o&q!o*7=Ef@-NZMJ>OW9diiyA`4xc>C=RDb-b? zUkKqB=uA2@?j@9Ig#uNsn152D~zj zHn=a9LvaaXU2<>FW5;d{z5*#~(F?R*FWGHAA0a|WP?o2}27bQ!YGgdA5lN194Ss#? zLu8SCUYQfMdmt_9w(Y1_vZ~xoA0(ORYPlDQw#6?-YGlIY6@fH5)>00}zR`{DqsT_f zrN@npctiAA?f>ls;Kca<3)TOJdEdwdB7<(_K)$EBxk|P=i;mOG-Sb@Wt(u;!aE|!N znZ3+3XhRB?feGRf@KmEfVtxW8b@S}CA6ZN=vLBZ)OkBba)MqpI^7H&hbpjY2V#Dav zw6WSNJ8(aQO;RkG{2V;3HT^2ItmYZCp3+G$q*LpRJps&`flV0Ukh+Gs08mhflpzD{ z7;(e}bW0cyRIm#-SRy_X@J;bZK1E$Br|B`8u(TP<7+0U8%<+yKRxf+QwhCV;N0=0bMKS9s`H~ zrybhvb={c-3MP=CkBp+Q;YY`rG)JAjLz`v+_Ae0=azn$D!pu?gO7coeK;!YA2s)P5 z3iH#}`Koo}_Z6JHR83Xh#twEMzovJ(9``Nm4N-!A ztp?RY+Rzz}->%eO<@NHHJ5VDpL)WBpfRX19?>_oOjBkFDQa-&|(o~FP zp4*3)x>KKlkogsH?~bXqTNio}g$zlTa%P-a%zgNZm8wFgmB*j9q6A3^yPhOUy;;R} zTeVJ*^ToJr?@N%0Dn}x_hzsFnjYf}H_AtUgx78~$nv9$N&-M{EpL~;-c((^JKbijj zk`te+{q02}WAaj-0(Cob8Av!#hwo4g4Tx&zO+A5M$iA70YcnBk~I>Q!T2w z{9u@sIvJ}qoc97)$hC_>Ocy#kM9hkr2z43Q;bh;HZxPO~jw)(Nj}aW6ISAVSwq%Bh zKV?(8H

    V<+|;Txr4z#h~3SR?hOZ}AMTu{x#L__=gt?QACH6N?hTDiyW#0oOY^`( zK#w$J$r$vgy{ZWQ0UfR3|N4_xyLn+|wTC($0V2RJb>&vlUeC7sQ7bem z2G3-=FOaw2O7QoF1_4~q7d^6unCF4TyEs|-+wn9-Xyc;GS6I%j-iR5xD!3wI7M=r@ zJl`N_*w7vJlj$eP?#P!7ak{(n$(YKw8$*Ti^`m^Ke14j5gCsUhPc>?C<}Zy zwHWP)f%D z(#^#a|NYOo5BGV$?r+ChYp?B5g>CN{DvB-gCbWF;GuNM?5M)q=qJ+GqzQNIxTb~G@ z<|wq5nbv|?6u#s(M%`IM$=N#P#iR@5GdW8%LnFJU5Q8Uf?G2blzrJ%r<=vsErpUK~ z=vcjbVaDDEK~RSfiD&d{D-@YKlfHtd+WR@VG;5i1g1+32DZ`@Y7M?FoO!UQT=5X}5 z7VZ>P55I=sJQ*cSGoCw@#I6!39(}u#dZ>Ebj;w|Mc9KyZw3cxr82;qxAsS^eelKF% zBez0?qeWZ(Ch&kJS)l`Zepq_kF&ncn(2?JT#D7mldn1nQ{hwvc*o z`zPwkaAV&Ode3^5HmRfBa1EhxixExwK$0b$5Hqal$)By-G4_sl_!o@tlgf9F&9k2k zg3(5;^vcUp|G(L%<6@I&>gQLCz=?b7+8)dZ>EW+E0j`LgK$0XX7Q7Yfg!!OiyUG`n zh$K6;CGVURNiP~xK7qaZ88bPE;89)A+(AI-25#UBBqWsMT;~=Jh33pO+K_+D2@IkCOMezO- z9?)vP=BfrK{o*uOP`-~hJUJi!Ccs0~>3))-}nTm8<`ebdwFgtnOr(gypiKK$8sP>)n)8W+X2Ssr9wLv%Asd?7rd!5C3Z~;^X7da)!7Qx$={_6Oe7C!^j9%Uz48lZihznobuQ}Quz9^S3 zqb_d0e>*v;!0Dxo40|&vdS3gxC42F(Pv%B@x4nTM)uKHm5mtnSIff8Ppj3#2?u%@%cOnxZaR*EU*3Nm6 z`+rcgk<>s^YCOq_O%ZRx=VkQ{lf)X3)E4SOsj+eFvjxw*X<3h;A^uWoq{k(K^O_@8 z3&Y<0#)9pu)(_-O1eXeYl#HAlA{9Y|w^|e*j$W#~9Qv3%B(1yj4Skhe->m?2t+1em zQ`h-;f$B1c>w}S zL%|B0r^!CO#m2^73JgB8wY42xiV93`J!OGV;tP7+;-irx^gjQjAG*FqHq&g2Or+zU z$9)Ac%;lKnK(VAI=Uhwr0dHNHQt2iA%S)>5Q5pzTIZUOMjgqn&bW82}1pLfsZ?^@&ZgA9N*s2ej>Mpx2 z=Ao$1s?Xi#fpQJW?S1qXOna7))2?4$nWj8GI-_wrbh%$fFl9!&jF2PSi8AxW;~w_~ zL4<*uw${WKA^4#^e|hu5)dJ{gtrC2ubWF>!D?6kA#F{tgCAEXTch%hc?8fZ-x>7AV zKi-^b`%O~+(vcW^UdQsV?d-f*@MgLC0sL=PXuY)ZIhU2=9y7_8_PVqpXi+Du@^Z{T z0j}TH!T$F6lPvxUvGpZsQOc2C=WiUVI!=s~j}fjF-Mhd<#@MswoHl3yU}S*$C7ql< z7#1i#W6GE-tBe5N=0p=eWXU;dSh0Vi=hF>Gk1`<=Vqu=+KEeiWFK9P=QR7B#x zd#vwP2-${!mi=~szmgZ;S-wKA4As|)X}5j7{A|;AE{dqlm=xr)S0INZVqlW@G44KB z%uFtqN>V&nC@J2s@Ku!F_@s<(WZiIe9-K`ZHi~kRB8;eCqZfA-Nk~EnI=M^--sP8b zD2RcMZX1W47QnuJN!s}(oqZN4YAB|zl++r2$0||E^`&u@$wS;X;-;;6ABsohS|1l6 zOZ=UFxrIVulLmF81`pEBt%K zwnVwS-*`^bEClorHi)dP*i=6(E4$A*+tV)J&-pdj9^j-^qB8og%K7%2pDP{GtWkuN zwf4V~+a>zo2TUVdM-(jG=_z(${G6Vm8wWJ(6#uMwuWl#2u zmB^Lu@X}DZuqlHG{V6wHZtwiYMeY8@xYt1b@{sl{lxI=@i3cvy{>1}Xrbo7FyY)Hn z;|Xfx9|B3XgLHWDir&&aLQNPw97vZx7O>`^G-UDb02ozB%3n+me*cJ(3`-%j+YuA6B_g$BNb&}I-b%^~C-4fNs&~*oNdGI*q*1w!kAuMPGsRk_? zT*a#aXNsccMIkfj9+1@|9dITJ6aR}#!dwy4M)lS0(c#4|ba8Dg)6X+un5!GFYUssl zxR`)jl^Y?9nV7e*&2D1T)L)-oX(b1SYBb!bTFyAR|pzRVLPbz!f zSaZv!UWEp8Vp)dna_oz#nx`O_RQlTOZs0l|f$|UJpMH9`7TSJE8AqHATuSSpYp>g$ zn%w#_6*es4x<$+6@v$RqeX_Oafb-$G(4jxGJYh!#xUTcnn3r7@M3XLe+$bsXT()TyVgu}MzqZM>Gl&8 z@2Po375IRr2&tFD(_G=GAyXH6oJNaSxafN+#V%ys}~zUyV|j|djIUP zF%0FC@T#M>?@)ZtJhm%)D7+1 zZTxr_h6E|a_uq*3{G#q2RS!LRn)W`d%^HOLtY+fKkaZ?H;VDL(LWI4<&}5Kj1*Sis zCT`1k@xGDcn*QeFT5+|^Z?$lU3KvmZnR0!oEP)wjUlh*_9uSHBQ@80`^q20?2^rXy z{$~gNH$Zl;O8i{jVpw;c{8HhlFmchG)LyHyd|9jRZUP6b+EfHxt4>N^4D^Vm#zFI5 zEA5=Gh^BR74kg$wig+6Atylp(5Gj>6R6+$W&La=g3q}u5IUl3+V$~(5uji;aL@`o- zQUq{ilpHWDG##6&*IkE<8B>{J=)*LZ%2?ACd;VqX|!iF=V_HcPhSsQ9MD$#R<4 zo&`-%J$wJ_dmK=KOKbIw;tFoq{p+6t{Kt6d|xL@T>jBwBJ^*RP`q;&4JkJ*Ac?Y4m09Y)@{k4*d^REP4Z66Ykm zqtYl|I!{M)Dy%pve5iJ-gO=_m`b4%Uo7deFU)tarL~2xgfZcSbE{nk zDxaxfD`W)Pbe`2Vf=$MHleg2Llc^KXwpR-9$g@Y6$xDl!_JL#?5VE4GU-?{B3$W$M zdO;n;Fj-mqYb`J5m)8@KW0fz?iS9A}asJ}vo0UX)O~wLx{iYe82n~6{v;_LA2PFw7 zdIzA_TAFf+a-!04Y%-otADZ7UtKTyJ8siSBpRuS_p_cwH`F};j|4ubYluO*_fN%J@ zvtvPaGN1&x+CTObUP+)>egg7)V)v`m zw6%?HEP50?yyTVG)D~c6AH1Dq6Pm+O=aBPOXDnI(9`$&5eCOCLa11CZui~X7r)dO_ z_qo9}-1SIo!U_B@Dz5I4_-N`wZ!^1TObPCY>)}FZ@NGT&MrY9zt*#!PF+?iNtPwqJ zs_a3_#n&f^eW--@h*!fGd6jGL?8L77CT!0xE9dKX#r3?8doMY&?*q+*yy3~-)pfic za(<^A*(ZHO7Bv&G%pMA?CWMqn4X5g25nQYk3x};KEB1^gQa@!E-wjY(#Mo}gVgcss(boBC0$Fiyrhb?^or zlwQFx6QH3W)Ui#d+0&ar<>O;@Dm|g0v!u56UWKUnPErAl(;tO5Ra+Z$jCtT^o9P`u zTVomq+8lt0Hea`UT=sH4M2Nrzif5{=x$V)ZyY$RP{CxR9_zjvCdsPf`yhHh6=ohKN z4b+y58F%sCIJtH7W$Bmua{a-l^I}o-a6MkWFAkU58@p)+pQTLiND-QjO#;d(_`B~! zBhfQEPkZrg6>9&rPP7+xd9u^o1T=YUuEz6kQ~qDQ`rp($A^NMxDV9Y``F@8Giujej zC8f(;JSk5?s_EkTJOKD0p>-JdO|Z~q4#K2#w9sUD=fsU(mSSsEcq3;+w7`H)Qi*e8BPyO5 zP%@oq`hB`d_s8{Sz}_dv;iIMvio1T?&Lb>*CZ`q6F1EYA38As%E!?^UC7uf&PtY%6Ta8sv_Ta1AN_u6!x=nY4N&=_V~IT}mI1|9nd-S6hNwNFCmD5s*|)VL^|-! z(WYMpNs{+#aVPS#lx<6%;Gbg~9`T~WU&(pOx2UE6YOjZ+&w&S`z3u5qe;H4~O=ORu z0SMwJJ87E{3tc01@fm*cA4ptiYdvjUZ}Bu-n18QqNzqIzGZKXN2Kc>8Ag9qF}I)#;UJ45s^W`VO(#4h3LU zwmEmr1IZBqzYAWdJl>~pHv9^}S1`-OXEBR&O@)U)ZP}{7euyR(xETvZ7 zs?^x8_Lf3HeIOPxHxHx&Kiwb0mQC(LdvWs(1YqK`HC#8(1+Ks+5w~s1_d4HDH`hK$ z2L zZcv|_x#6XrnixBOd=FZlPtBcgP~G@;$v5!1gqDIt;qrW^U7H*ZonQ9mzI=Czy@M~x zZep^vE@;;HXZtL{jG@;jHpJ;Bo*9dR_izg zH|}VB`+B_gyh4$8mbG{nDhco}FfkpsWu9r@4x zobWdeti_pL*ZFDs}n z8_A5ilb;2VS57#X*mvaQ>k`ozcEsUN~_*3q{#4gbew!7??cfmd+KI4lG#s-{R8~k6Nk+u@j01t=}Kn6 zEoygYeb&Vbg@lPuz#dd!;`BxS^rBCo{6J!5C-%N}^wZHHn=AkS03<1YcT-Lb%KHBX zNI3twn~wE80+mX1PohZ@iPlN{Nqz>O2+R_f$6M5j7t|z$CSU31nLJD9`G6An^ z+}f0&W~iTg7dYR1)VHdY_QRo#Bb4I~*Oi6#`9qmH$!O(u7A4W4gI4Bfk?&+RU-$HO zfz*$RoYnK%Bfc5l8SCR%LOhCf@G}7O`g&VBvtchCFgB&KqX%HlxToq|z4y@8d|4zH zSfgQS@|hvN)Cq4NUdg_>VwM)EvPZ1GQ{mQ1 z8}(UvhI;22(BT*;X9-6q@S>HZk=aL%+faM<2J~Z9wRr*(Ryjrl9#5ig3TUkRvst$nAAUC9`=$h zA)Hz0S$L!Ao*pe&G2a%Dx2Eyzu0iefeKvB;yR+NEnL#W=KO@F?3~u|^P>HLetRJ(L zC2OdEhGzd`O)xrsjn{p-I>oz9@t-4Hjo;sZT0wTi189{1;bTC(k&ge$cSbIMK7Rqp zpq_F`%O$FsxXUo#fp0cW5;-I-ePSP^fjFYg#Qw!x>*njuU$8dpZ>Z;#;i$HawaJ=5oTw!GkU zh!XUH?b!WkzuIS~6`({Sh-?XNbgx5uQJ-U`8bOU|mr^8v<~2Nw*tpEV7+32fLrOxX zU%-ig)LJ2ojcnT8<72@*DZDXJtR@gLTF0ZhV!cTpyT7_3?)*Fqe~ZJ0=oN9H;|^({RWtZoIPxb1g0XZdsG8(R};oIrEEU(yXFV&8X)v*RfSTt_4 zKR?oIN!u67GfzID=@|pd!uRk%J6Ry1&%T9QFzcjy^Umsehpd#8_~-C zvV3A;Xko6*WBC`O##>oJK7$tME~u^M#FOfoLbi0;k8^ zfxDIqC+)-OPRbGk$y%b_zL?+D>$7A9F*u0oR*ALFN(r<5{Ehb$pr!G3sja_hd;HfQ z#pQ>o>ZQ4APoo77_kW_mCL*hF?Y#`I>0jWQ;u)w)EgQ^S;C|bphJLExBO+V2prF=8 z)>#W?yfPAaU}llT(U6ea_&4kM4taLubJ@a;`T_b2uw}D3ywQAVA;lK|;6wCtTjoE( zki$~MXE8;o)7_$TUnU4gocI?@D4DkLHPN!03P+A~d%B9EA|BCf<2K2)$;KXPB$3gb zAK$B~pgPn)>6=I%>H@DAOcNGm+HN(f&vzF;>Bcq5-3>)S9>tdK?cXe4CmbV2M$oeW zWmTjV@ziq?OjM}E5YyCIso5#(B=1T5r;hJ-Ei4&T=K#Njj$4+%7eKJ**&isXQ{8DU zF+x^&pj#&}r-8v+V}VDsZ;#3n@A#-R)pxl1v)y#v4Gl@!D3Ue`HMg)Hwn|%biVVM} z;2~-W8$}xmo?#Z$D!-f)?bB}*C53t0MPs;v;jU4wd}?q{_Y0r5g1imyOu)_Ru znLEuF6VPtOM#umTy5duzskQ^sJL$>n+Z@xC_7vTg(N5vQk*5;?c2UHxobDL-I2+#- zzSx}$=r}B8Pq-N0bqo0L+KP5|V}SA$jUKK1F4`Qb|J)3orUPce&+Q`Hr89dNSxZtg zkfc}-;)jc(TLElxfv7~XEJvt5u(zaY1`X7dz=s7inU_!nKkA#BA4|FZV8eVHdT( z{sZgCL-a#_MmruELz_6w|K$6L5WjsTZ|T;(B~(EutGA`CNtEYhRe@2&)UWfR7VSp_ z8sFQN&&XZ~VlzvLNGph|liUf{(vYnRYMbc2Fgc+@i!f@gC-J+RTVg&bx~d#k1pj2D z%OVd=pIS-ZCH1rt^FzT3)#QS%%P8~61NBt z3XqhV8XjpmykyXb=~tSrdI>CiG-kmSuLeopj%J0P?Vo?c7W1l{!>o(4s@3W^9=Kfcdm@W%M;x@P{q!cH5mj|6 zu;o0iL7nH4C&$~#wueb))XAyLXn@@Y%~u%@$Dl63@Kou0_H^K-+`-gr;-^CNK@DU( z?H@JLvvs}tRcu~{_!DIoKQ>iv-)iFZxVlE*A%Fe}0oV`-MVO!0FyWmOKGrT2eV@i) z&OE($SiSzzeoxX*{g(CsPpT2k-hDXXdf0#LAm?&lJ!<}4NL2tq3H5zRjOj?8Af^|a zqI10T(p3E?4;4RXiNC2x{^MNd^UWnvqaK*k@3Czr@h02U`$}ll+M?r#*C3hUq_Gr~ z@1A9$LozV>ToS&&-6mt6mIgDb>y-+B)ql|KKiwv&kl~wewH=%1<>PFfywzHND!mo^LbC`@gP9pEK6z>90z-*iy{!JZy)r%P8XE zw2H!^VVwkrni@VY_TJKYXSDYDf6oY@qP3-F7a+|>;EwrYB|zpFJ%Y--7N3RfH(Au? zlpdn`M4HG5(*QG74=We!7dYamS-Cti`I_)x~A;dbVKetXco$vx+{JkP_0qnZSq0`YnWd zQUj;jSJPHi_*NVM4y-t)KgeA2fJQCR(QpbkS{phC8!Xl31}jU`uFp4-uBO{MGVawf zDp=pybfGhRMzy`aq)n^TG>kTQj6_F}KVYeVzK*b_FS?Y2w#DolcRKb>N0_&b+_})Q zP3JV;s~M#;BQ&Lj;>{98awJ&*F4g9Y@Kv_7{Pm3V@Ye-N95e%1CG?C&&e_edI#|CN=FC;T^FN{w%svR;FU+DqnpSWX z+e_RDzU2`;Zh3UFInLPe^b#u3C6rd+Zqy_M)OF%d0t4M3f{ysI;^g#ZgX#bn=V__S zQ5MpV;+8KtB8Y3k`8&e5=^xW}4Co+<=ik;!*itT`ZkK2iby^P>Rt+)9Di(?#EfwtU z5ui?o(`xoEntdR|;a}~ueeo!j9bRLY2W*Y7dowI}_UeP^-6^Ew>s4hFtREZ&dv~+M z2X-D+5^*Yh#jE=94UqjZ)uuN{I6{VhOXNfna3z?3H=2kwRsO_SItYW1=PbDdhIg7@q+E-I)^!-R9N9=!>_+>5)g5 ze3VoabvI`_OYfg6$NDRPE~d(Is>0%*kT+nrE*+U7bA5qRXWRoTh99-QfLIK&&ZCG< zC}X3Rz8}lm#?g9z<&bJuI{Puvr<=2YKx?(B{4nZ4R)%!Hz%fsTiQ=NLrhbzqf335W z&Qs5WAMNkEB%ojN?Ua}It<{|g|40)`DYN|$kP{*dgnW(ldHq?>NIPA;&{rbS6_6}1 z!|S$^!0(#0drTqtGC}857OEwBrYZD`y!fli}sf z9cDr+|JP>8GEG{LaUF%j8pfE5TZi0k+a+&mq!*;ZZlTysiNLUZc3OwMT`X3oRpe#s zlxXVMR>KykLhGO>|GFLfw5(?u@%pDl4UUT)CBKyfP&=MDOJckEll@DP}@M|RrbD8#>$n~=6tOgImhoRtt-NTs(@hz#KfM=SE+ zIQ~Nh{e2^*N~`ghdFw_1Hn;==>nF~Qk~yZA{0gZ7thF7vrO(7|rUnMte#}8w(x7{b zCGwHfOzQK?JRKvwsi~(EL;k}!UAC&WN%}$=5-jx7{P*(*b_P~9T4M7jO;v<(X$14< zmUCb9TBA&K;tR%z=^E%BbdG(SIlQ544>y{^AvPK}Zdd_5(v6JP+LLN}>(E5R*H`Ek zS*Hz$n+qJT_f-2^hPlUf+-TWos|fK&6uwKL`fE%eCSkFYq`sQI*SBaJHvpm1{Wu8U zBr%%hF8quTisM;G5S~U-3Pf>>C(Owd4u6=RZytny-F~BeD8BfM=c3HgQ4Tp}0nBc$ zp7p#FBK$Fvp?`aX^_tJ$E!goFn8@{zbyrOaqIIHE|S6s1}wqJs=80=jnrCE_N&A;LTm_n4wGT!K)u|Rp@lk>HggMS=(Jp!6dHtf))iD zC=a99OIaNVXbVQwlAB0LhZvt-SjbxL*gWCd3CJ@367 zPU+OfKD!*!l)^G*qUrqc=jWm#_qb(2+xUU&jZp}?ry+VG>n$?ijw9&Wi1YoBsLyzD zO}}b(G7#qxI^%!LbA0KHEA(^K>6QzU(`>QD=e-^PpYMaPIDG=Rf(vVjdq}?9B>+`u z1}p!tPIyUO7Kbet>Ah9}b7~l7oE~@hz_X=uwB`RnZ!IblzYpnS6eA;zSyuKI;}iPN z_PUO%@!Vd;)Vo3FH&22GrjU7+hJ8ic!qA>)4q*P>0qP<3Z0n%(Em+S6-1khxz37-F z1iS9IrHi;nn*oeYy+p5L&1hCcR>i&&YCD?NvXrP72V)_mX^;qIfT??tG+#34PAW!d znoqNcTb9c>ji8!(wODe`_%El1E1CqH1Ru~WGB(<^o|kuNrmEsq9(Jh#v{h#_FB|Q@ zn6BvSKdVQ+p6!g)8s`lWb5!l_2S<>=0Lia(*_CsSMpLLvnLF)tI@O z-_$UFXYCkpT-@WfMJz!a=$=P>{|Qq-F80*~dL7os_$&()RS{h3v!)$sUCcamY6n^0 zt)4{CF139x#j(9$oi1)@KnK!jBosw)78&v@9` zMjT2EAe|bO6QQu^z&`5~=(^}a16wYS7mc>p{+-`8+{#zjX@Nr$5lKz1TJVk`_S}Tp z&k4aYQ|M6Bw8i&E^;@${nVQ;Md2{KI;DY^RsvR<_ zBWQ1Yiav4rYbZ_M=Rsbt*($=-)$Lhm?iolODs~y~ssBkZDmQ~kV$`n9DhNCJt!KB8 zwuKX8P_OLwFy_7FzJ=C^p;F;HhPByAZ#sBfxZtlFMfEGnPPdipk6GH%|4c>uXYSs+ z6MnmR@?S8pSm1Mce8QJpCau3778AoddHv1G4aO)5MSK-=oL2X{tDv#2=Jz{hYLj%n z%OzO>t76fE3I8Pw#wv2deTH>@W4+s308Bfo!3-E4Ok$i)2WK^yO6VHnd0g)G=ldXd zh*xBqog2Kb8%DtNnNpgcSHvVCI4_E1m`^=8v9QkVNN1veudWJ@Vm>aC5&1jPYwX&t zS@NzuF&cqzG1qTyQIfWk%Kw=8^P|Zq=#}ZUcwt zLl)D}XrEsSn$ddo{z=xmMNIG9wB&5$P0j5H|4gD?Kw#Uwp>3oL#3D^s?avZFPd}h9 zBYwWBEq_&jdy@D=uL0db-yN|y>e8o^?N&mjsOmXr=g zp}V`Ifp~^w%BD-S2%7AkMs!%22!Wy6PfCIH3T=#w@#vRoZtm{s7vt~(#naPdAq)WZ z+A;g3kp2B_)R_6tzo5)tvztSdzmfs0yngoomJCq-Lo(oYQM~5U7R1&)|Ist6eZASw zSYg{8syy*jh%MUGZ%*x9qxk7r2Vdi2bk|h5!XvwB4%Iyw6TKpR`bz4H8u4w_&E^2n z^sBK-D*NK6Qggq@Q4@Gu#8KV-Aq|9L?=TIhhcFVtp>J(7Uj>m!W^aH$JbM)`e&@eR zlzGCzc~QIjs{r({G4TP8TI0OpTA^5iOY{2su8a?0e?KsOvZM4IU(yGO28Em+N-Ra| ztpDOqHyWSL)1|Ky9ucsXM|FdcP>qlF_@rmEL1N{(Pn|HvG%P%7tfKaFxoxso;K4KL z{QwO9GxYi5xzVNm4%yb5Zup6eLU`R~oRh~Gqewh*1ulrW7##frXNYBoJ+-}t!?D$T zny+z&HSC8!`({5}huAI|hj{lIR|_*TVj^s$ailF^`ENG6;NT1;A|Y(cK~*=PWLZ0| z4A#VR?M>hewC>#Lh~Y$esl;{v{W8&4H*Wl7Y+sU{&Xs=B?!@m$>-DLN{3J)g-~k4p&ThI$m1*`jYde+%KhP@Pi>Cnd66;_z z2jEVzGuJFKCK~8)ef!Fa=y)4Vl`1zb=MQ_@NY&IA&ZN4B?-~{Y+Ay{5KGw2MHL4>7 zPXUi!9m7>AHDqqRi{4tnX#HmSgk3sU+y^+4KiW{Kgbwo?`Som%!@=X@?DHSbgGkrD z9$(`SGCM~6OPAOFS5M`w2>s#suLS=0+U@&Sgs(iM)Ul<&Sx;iih};qF-H+9t)MI33 z^m}%&;Nd!O1h@f84?RJi{7}0&mO;1A)BDkxxnrF?*-}~0UAZ_)DH1x}O9G=08AWig zpxRGDhyzI^TsBv)A={b<^p~(a};40j;~D zsp8@<1CU*JDRWekX`nqy4xBminT8H~);0&bwTN-sEC5IHK@Nq;oz8XSF4y!GuMz4* zN}ocrR4$1@4;gQg)|fgO8qz-zM_pCFf-Gq4aaSRjLh&)$J74BdsL+1^1L^cw8AK$kxDDEirgyLR034<{$qu+YRbB*tQ-;l!4R((!)&D5^>Qq&Mh?*momR*BKEC=0eNlE1x^``3d1zCa2$_v-Z2RdUgoz%DOIc-*jNix?g_6nL{L7^4U>(IlLjX0lQWq7SGj^03V>f0Vn3wkKF6<6n z+bBIcOrx4p(kRRqDgL}z5bO3>%2=(CG=!ySWa;#(J1<{%Q#D<}O}RV_5bso}o$(tL zD-T?76DXO!vt=Sji0&muhBd6vaWLKa{@vHBWsge5Ix;gW+aK<&{U6J94(00FCcGxB zFO}CUe!u)5_U`|qEQJp7gZ8$%#CWUhp3OAW(fk{wzm-=VEsL6I`8)|y!UO|c2OC^p z)->&@#t|o$oC%z{n61eixuNaG0VOaYaEW~q_!`Skx};2=#-OCju6(rVWD96 zwzsS_f|aNha2mb+;X1-yGyZ2`L&hVG95n}3Eh|jS#q0{o@WTV03>Qffp4=-3G23})>R z?PAu8J`Z!NM^nosb`GDGrb%x)5!z`%41V9Rgn5nS)=}g!^Kk#Z$geV~ywwjF~5TjA;a;)kGqu{bqE5 z|8z^I?sJ_RQ*4@E31nP7Oj^q0<3`>1&kvpyl=MC2!P^Ovhbb0JKo`@lQ=tC*DmN~6 z!1^_mnHy+OMS@ZJ7RToN`C+@%?gCjWbi`Ria)f({OTQ*lk8g4Kz}|F9dzG9?{WLPS zs;UiiU6)j*@;|T=C(DT^NzA><7hxyP(*K8w_9$cf%M&()*xyWO0d#9Sb3g-G=TwHX zsvr6h(;6k`PpQw)4-f0ejkfvLU5tHZ?K(HV2%RqZ8~B8BQ=f6b90Sc-mCW%6S!|H< ztlxc9*k(@|(lb7Bq+Ir>E)3=SjsoId%QRHz%cu7LU?5TZjvNgVS{u|VYaVxsd3duANNUM*iMBG03|G9!;j@<0+UP+ct*ag^RTwmTM{(EtNMSuB3>Cb- z5(RC@bKe3Y`X#EUk#$YJ%_bIS_F&lj+Y7*gxA~VxrcIg7hW1G+ZSIwW{;QFhHe14^ z$dwz9;`S|UdzCkjiJZdn53t(D$s$1v7w;9znFRi<-$itd`-!qOWXu?*v@`rCucyw) z+a{dF_g-8Ieadm@?1!Mq2SPtt&mz}NX_+)^)2ux5kXue{d%dki02sP1R0DU?8XrYX zO9oIz_(OR9aQ_P-xD5Y`SrO})6#W-bbQks<5R($}nya=C?8CTh)IPQayb&F`5%wL! zWjFTkgic=oMv=L}GZc!Fh_Tmqd;Rctr8*AMX`*;*)3-cm)uf1q8w}u>^ zvimnmVmQ`lhF#;h3uLbGE&T~M#urX9Fvik15m7z~A~*l<;p}$O!Syhawt{44zZQ2++mmsRKsiU!;#qHMi5wVj{Rz4XsGz zk6^rCK+Kru?JEkYr8r_yi$Yl0z}T;N(RHv{Y7;JP53rsqtX2Ff<#as1ba~t9&7mc$ zoSG)7Dq?zrPeWp4FWgJEK(^$%Iw%u2FW>VThva$J#y*6^{i+l$(6gw}`Ck!75nyd*vMwFsGirR9mt zU%{+cOToL||AG}B{(5O-=Vi9lYS|ynwNyqO7*)X7!`~Ackaq?-Zv=0 z9{bcHI|)Xt;*L}DZ(+qvSGKq{Vxy)z86+xlp7&hpz^~R+5ivMuri7GsHOW1Amoym! zzjUi!!85R`>fk~Hz!&~+?2r9?#^@7i#oYo2f6bD0tV)iU%ZeqBnh<=cTUtrPc2`if z)5os^3bfjLOqzRSnW0fr4RiH!a9AciQrDdudH=EEtT*(zUpp+Z5FN|cQt)4Psain| z38C8N(B{cTcV+)zX+b;*v8w6Iy3B*<@F_n~ep7bEqM7`fVx&m7#e;REsHmuIT(DLo z9YPnMFmK$e%?nrw)<)P1f2nEf!-Zn0PW0|^jlPU5ld6FAH`U;aK^OF~+6s-;i)KoA z8SE(T+`9pEEH=?CL+yYaOZtb%Lu~Iy5DS_|xJDljMKL%0(^p|TMi>n^GhxMT`P~S6 zSQ2VuEl&b^-MiAn4|V>N?1Z#NzIu)i)xgL5AmG@(H*hK5%zv$p<9$`HlGl*fv8ssQ zf`rYH-Uof&B`(xSI~fmp^I3}u!N{Vlw3kLM(+k%c+_^R0FOYy?;zylll;_Bg%aN2E zhh%xz0`n(1_Eh4|3=zZ^9{@1_r+EO_waRy@717|zsm=D*FhbJn6L_rvP&h5-dy$bv zz|DW-OgL288!K*gvvpJI1WmTjvao-D_M(%S^HLC0IFvTtw#(&eEomk6mU*paVe?ym z8y)0O*;F-X3+GrNEG#eecLQvyz{5~umItdLTKQCT1{I_yk#!@gntv7ijyc9zan)lM zoYy$SAL$w8t=l71gn?wW2JsA9aeruZ`=w7=Vz_+y9nt;qw~VN|$JXL(C&i7eOH_Yp zf4s6%4Q_sdXwjOpP6uyq^&@!qxH1NRnuqx#pY35i_u8ZHlk~U4{2mXB(8)Egoo5t? ze`$}?o8D}XdZx7~o~)B1xDbw@-$8!b0Hu+0j z(Mt6F`1hH^zgND;vVW|6S)I>*?DN<)5t5$qCE6db*297%0yzEYbIasx<($~bv|eF) zw!F7$KO?~tfElwq+t0Lo_aClrv7p4EO&)?E;Sd8^>H0Z|ib6q>NpB6Cg%`9#x!$>S z77llr`s(WZ#8EK6+iXsOg1ZAjd+h^yEK@+W(hb{{mo&2}_dUs+LeMPU+ZxwRUE8I5 z8WPtGAQHQ1sws9G1OB9LU5&7PGIf&&3E#_)k?P1f3CQPW{zAcfDLM;a!Z|OVJ&f$;zX3aJ}2ZSd&&DMP260Vb|Vd4)E?bt))X0nfCzF zYL;KL25G5ZyGK4ic~PY(8oxMU+Hbteu}tMch{WFA^@eVDOwI*HSYe*@gM#{zh6z6DC8`gPd;|Zk&9UzQWk<1O}+{H zKV-dSRMh7gc>mFu_{|vpO!NQ3t*t^X6Ps zdW`pFg}sIG?{wqyrqC(M{R79=?Vhb&pC1m7&A+|xfy8y)M=oA(I9pzk{6e#fXgrOk zglnSx1-CuzMbOS=$8cTElSPqz(O0Eq~tATiHiwi!HLS52t;FhF2%v|x(l(~y>egEh0guB&o2=4 z{J1WyYYtZ3xf!wo@i6PTG`@^ap9DE^razWN>jHY6`Gld3?>vkuNZk?v`+7M|rBPSea4CaJ(gjgZT$1 z`IpZNl8(9n&IcBl1{Qb46^}dSGuEY(IJm(|u7nEXj*vnbr(IotEGnc=EWlagOhjEM zrQ)V@p|s~CsW#R#-T7PRrPt0`we8_Qq+jh>aJ@?YA-mR;BsH{r+Z}qkK8Zq_yGPH) zW>hCCl@1`r>lT{mRk~eFjD6JdE5K|>5c+_Ncfj$)m0H$%azg$OV51=Q9}9>Lcwt)K zwfLNt_vaRRJpPq6zU6C_C=*UviKSU0dMET}<@?JsDkwRCa!-%#A)JxHcDA>A6>jk; zwUNL-qy+r9pSLidqkfa!0$7?5TCDv&M$d%%^Z5Tz3gi3q{#PpcFz+Wb6|jf{@e;9f zBCzG4ORxp?(IrpwASM>OwpjRsl)A|rEFbF?&rqAAQlURD84NW*Po&&aK^ff-BPb+z zD!;}1(q(@K(L$KpvQT|f_+g%wR_~3^ri=|dE(^AXO97YCmr&QBy%h)ap#+i9Q_Z(| zx|-tsYtv%hx@`cZIn%&7JOIk5X)MmlKzZjcxN;b`R!?$m+`&f=br#OT<5wwXd#LIQ zDo+!R3r&gZ@7GCceHUh;7HV%dumqTm*!4GBuEaAI+NLS{H<8nASf`9s1Bp|VxQbKz#XjV`WnFr@(QA3@{+0P$@1lM$ zM2EJ4zkJ*FYOyyvc@eMWS>Yx6q?4~pp!9;gD14Qw#tO| zrqH8PpdD6I$7#PabsOr~N7`*(SL0Z}r6m%};Lns1SZEUB;0jD5L<9_!^+(g#s|Muz zUG`#E{TZ|!9_7^ED{IC1J_mXzA22WDC>`TY+VC+Nl8r#|jTNO10_z;6fDavpII7cB z8!hYot5NKY7>P0K9)A(rnMcn>FJKby%atRlNNo$V63EpxA9+G!8H z4J%hK&$AWctuwR{ybyzx-Awz63+Rl zsUfxq0Vi}UCifx1nl8m-qA>g-_j!4Yx*<-P&E7Sxj^msek=$&{zr!!~r8Ztlc!=3W z(+5W6hmJcsmDN3N1DVQdFmu_^x=hCW*p8j){uF7vKMygdD+7k&jn~63*ApUnr@j`Jv_YpFT|W zY+Tc$Yi8t{hG+QG4Om>pe8CvmOBqG0eBg)8#eDGjT8fTLQld~}#&&~gE4;cZl(Z+J zyhzCu3@NSEy$edZJ+}&})jZ>I63G=Gp{xxe(K&2pXZEp9zCf9v#8=oCgrzvfVwrDr z)Z+XRfSI5Rp*3Kly4BDC7_Ufz%}C%ShxaehB8E(4O(O{-+$Y5~;d8e{_bQgAGlrNL z*PwRMI*uWV>g-b&EHMlyO_8#1v)}|t>z#T(mM@X9oommi#mlfc-%0Qc*YK~{&-F^n z0VP5Xcx)EI@S?8MFYNZkorB5waW4u(VI3@dCAp6}L=0tYgk5X9^NTpnp2XTj;b-q{ z3049KCM#PH;4ztceJaB*K05?gZ;RTZdA&(IkF5qj=?X|+f-#-*wi;oxW8vZ^jy4aRRzgmWK zP}eJ7k9z_46P~qJoZQnk^aA3x&tgMQMa>V7DW1@ocPIbS%5Nv?cmF{g z`tAK=xu!KE=HB1)KtDqjMm5DwthI5O3G0{8v?a8xUD{~nY5<*I)A-b%KN>W+?M`ZeBLesxlF;JIy{$wd7LYIK@08e9Sq-b7llx-ZBa#fZibYAL4JMjv|AG|qP)FYV1xY9 z4DsoxD0lC1m_pV{bTTj%+6!?G4eq{E{4#2Aa=n7onji(~a(ega;OXm0PiKWMWPTVd zozqD;oCx@A#g2O_vxV*Dr8ehy%mZsIY!D920O)C_#j@SL2qAp>{EO86?s!9fWaKni zMbxHhf%0sIhy&Brg5#cHv-f3f_MCBU=Oc$vj~=27-U>4e@k{ge zZh@a!__dhbomtEa?Y=N?pDzZfT}PM&Eot0Bu+9ArsZcGOntkat%J^7sX+Sb$)4=Q{ zMDUwTp_lxb(Q+K$GgcgRiKct26Y)H7n`37JyxNQC;;9{K88Z$H!Sc7{lUzk*_(2+s z?Qa|$VgVX@8x0{OTscOz(Tta>EuekOKq=B(Y1Ui}q;d4J&{t=b{v*k({bRQ*8`V-v z#jpdv6cwG=moIFi?`gZQ$_WNc!wm0J-}F})3Z^``&#J0H=Yv}z77U=q1{s;0*1`O# z+2vj+G0DkNZLkrzSpat%Bgiy4+i9@4&vN1-!88b^xT`5LJf3T~--G1lGRE|rW65Hr zVQ|^)Vy&(gE>-0F>DIoN>nd{&pB=}{r&fgU7MG4Fm)X8Wc=k`b@wX5rGF z7D>eCc;0UY0t&kYf6E6gkGR6z0vzM)1koeTW5Iy?(noXu8OPh@YpPe4mEjck>;C=F z1OWC6xBB!(lF8tiH+mP*%a<+^TVGjsu_Na5)MXsbagh{{zXhJq@P3JNi!92UYe5Dl zCS#g`e6F-n>`z3!dH$!E>9VjUi2V+mdkV1_sQ%LYber2aXpFr+RBW9 zKF&?>y7uWOzegYtW3T(*-U7_7M(N-Dt&XIGPZvv$O3nxSicwB;sR90d1N;aXc5O=( z%0*1DN5gr>1G*TVygJRdg)Z2*b0?Bmbk`MZ4J&nBhBFNEXx5B=)wQ3 zY<EJ?@)OfP&$2H7hEmK|)8~KLz5GH)ND#>JJU2k#OC#qWt9exbjfy`T-uLVjgZg_SOZmYF41a@y8 z&x-Ljo{-Q~Ca@$UCdkA9MVrj}PGi0=xlxL%vrA}^2_h_q}mOx=ztiz7;}1$ zI90R!+iwcV)7Ql_Rrs2ICjDml4BRz%{#*m6(tZsuST4X5u>0-hWR>7$NRkDL3C!%i zVdSPadsE?3P+7oVFxy5&Syv9N&zmG3TG%;8F{x@PCaaw?eo$;i29-mSC4tJu&Ohd4 z`sC+8g8pjXjwW2fGOp8W3PpP*Sd$@|XKexYA$A?xg3B8o_L9ID!dGMmvr;drzk%~l zQVknqc$1>h3@whkI2UeYlUv(h1uOcHCXpEzlg?|hZo2X{O6UGGp-%K4z^S05_q;H!KH+4z{KFE zF+5Ip_2wGSC}_ZhC8;*56KWF-ishl`l4!ANV(=b{civ6?MD%`9{2NiDP5pDu+PQrj zLC`5~zX+*ukZRqYjb-F@BZe}trVRNDF{a}TWVR2B!fKk!Gz|~ii4?TQTAxHbtP-aVu7aH&bwjw8= zRMaocqfh1s8kt@wN9eFcwYqzyE=iIDJ<&J%D|2E!ArVfc#Y96_SXoV4Kgm(1#U&zh zJyg9C>$1A{m?C@zGZ}L|4@`?ay(w;mEOqoO){8IrCc7OCOUpa0e;1+B5V{R;+@4xI z<}HoRzVjP!R6MZ8aQfwhESIBvr61y6(Gt{Rv+y@fwQ77QJ}0E2+lYq8BsG@GMwF#z zpERw|N-Xh@>EA}^2+tMN^pw-A zn*7?c;3F6<7M@$rH>k=(pn*3i`ryrRe;MIaN_#@>%THbybqtd4h+dXvSwt^A4Nq{| zef9YH2vMrMXn{1qIV}LOaa*!mb8=^ExQ8M7^A`}egpABq(VHhz3#vJOoa*~pXok#n zBCTAl)}KW|A)R7KNvM?sj1Hnx5Gdyw-24)^xRil6P{QWms6!@+DujIG?3oD1}>i>ghmhgq{BMrmnj8I5_*GUGr z@-eufyc<%mrV68q(@Q8aN~qY7f{Ls%lBwaj=vBMt3Wk!9Dc~(CrWDwq5;Mm_i%^Q8 z5SCU}HSAs&W*oPT6gBkjgfBpkmAzR?^J45~$3H4^y9CKydA}Jk=hecCg`nbbFKS64 zaM4FM)0QU%CyCjZEvd1|3C?`1M&TqE$U#XPIL2Tca=-g*>Q5*Wt{-KZrTy}?eEJ4c z-BaXJs*&I6M%AH~$0wKOuzYGYKVUA$uw3fqv3fjiJe%N-vy^h##Jl|=Q=sxX*LiK> z2n#0IXba|xbIZ7F`e|ed*i7f}%}}AUV`?ACIi}-(HMzds)6-wqhIk&-p?~;lmU1d7 zeJ^Rb`&Lr!qXXyrvYa>N)KL>)^l>+fl21oe4S&gmP44HHb*H^SR+BFtYkTqez|T*D zlvPZ`GSV_brZ2Cgd&Nb9T&KtPIB-JJzDL@(&kbE0$w%k$F z)mKD^8R(B8u68FRVT!mt$x?2~ zc*{+VUYb^`UMzsEmMKtyG^Klz$d%Z&@P=8I!L~{`YN$96CecM|%O9Ewa4d+veIpf2 zD4TcQxh=!e+R!FL@qMp+h?PB=_GnT+pXzb)!KC~L`q?uxA1(g^S79t01DWHAhjf+_3UWVHp5w|ru8aCn zr-DDUQA5~IMg0F1?HB)I;8MShcLcpzti}XA)y2J|-ASL8#xwcD#zH5q~b_>5kZ( zI#ay~CmP45Kx*e2#uN_o>JMPHL3E*blu-rKXg(P)8ZuqS(_g%|n6FXS{Cc=Sh7_rv zqB@gI#YAUdxSV`etbaZTDu|w@N}{* z4XbWxf}A=~XXdf5eFqSPWF%VHE_s_93FjbhDct)DJq)#st)J<7BydSBm4gRo`edBVpfgZ;|Z+`$o<4{W$9&y64HRsm zfwy>09Ci@9hVr^%MSl(|Qfy6YjR#da$ZH&OO60$m%D*sl4JsWq6I$s$|KvLHs4F5a zGPay%3AE@!T!e&8EpkA2{3q<~fRg$ZbXRQFVV*`zq;yVp_uM$>6nyZ;`%4 zEb5RKMD6+`Aa2{h+G$uEvwm-^l5*SOXf+ z*vQJJ1j0r0S*W+_>(IL~%oky(mH_72m}#H+w~XC5$-8ed`Ap}2%EYo>bdnOSXOK;m5HQr6#RZ?2W|EpDNg5X6SeN;mW0zW+ITaSJ4tx ztE2m$5B5y45T-|=jZ)u%vo~5&Q;E`hpA@7&y1qHL+R&hPfKwC1=pY>vDqK*?hZ^mW zbCs>DA1BR`zALSS9hdy`=AMVS4%m1+OZ(^qL|hBYjbsg@A&ClMrspG%m3+lOL`-5e zVqbb@`7?4|M>in7V=N6b`t(l=69~9wCDaC68MhbeVKSVWyS%~l%%ZoUsUdGROyg2z zRn&26kZb3T>rr5OHZh~rCHOWpP1J{cfwuLN)WI*2XqOGOV(vYyA+7LXbD7hLzU<}~ zrm#v6k%g=}6Q?%C@yOBDVJmKz?V9`L(lML-l~T z>qPb`_q#t5QY;d^3f1j}Lj*a^1NGrT_|wIo2IgO7XpoTH4khI?Q5^bqAALU&A9|+P zBYI$fale^FKhyTDbT;U{;!G@)v&_i8z9=Np5g>LLEhQ>U@>7~#hgQ7I^SW1Q6`o|T zbUt2x!XHw2zWA*1zJdAHcfW#Nt&<^_I&Tsh!>BG?(1tdx?CLP8QDpb>US*t&@U1#} zHHV#D?Wb*9ncrC#BM`o;$D=VVL4RO1#NgvBW*B1*eXB!(Kv&mNnVoIyEvp3k=7XATKT3;$71~K#W>&;B$|7L;+wA;FWLaA}DvpzKv82jTgh3!Sj5Qh#Lcf z*@6)dxxmq}1hqh7Ja2ONd!171ZeED6T9#GFwH8m%?))WLX4L02qRi*JGWzXOP|)p# zjHH`=-Jt%|u}$VyrN`rva*fUBxM>^#}*0>~Gc!(@e6{e%oO9=c7%sQoIW&I5d zQDecvv1`70eY640~5ZZlFTaP-aJKxN^4Fml&RP25GQZz1#!s`oV zuM$+C#|iBiisb6-^o$fm zWp*)4XD6-CQZeZkB^?-4oK?TF{Q)xk36trSk!@M_u?Lih|GHmuD**R3sQ540y-0c1 z)4CXP5Aikw;Q)oTSUpTt+xc3cqW5O3Sf>TPDs-x6KjBg75Cb)T_^n8f!gZSgaPRj* zy&{iH5^!le!OBFm02@YR!1iKp{S*n()jXDK!Wi#iL+Ji3#toPnOkcqUqd9C5#@pU> z!q_m6XYr{P-YmoQFY75GX@I2ZMn686Uu=eishis8YX46&&}M)lf%}>eEfe+V6wwGEj4cXe7^eHYG#xFQ^Ky#@0b9 z%0isSdV0)Zi1Lrs(c=G6I|uy##(Wm4(!GD$uJ@`L^zdWoJqdZxfhTHzArL?Hjum;{?Oa{S*GFL8ir}> znlv`xu)O9hqY%`!pncv_e&nxsK|_u)F>fu=Dr5QY`s`FM{Y8;t*w_7x_@YFXcbJ)& z@|d=r(qY+K)ED@EGlz>1T-OaSpJ`AYr1er_`UPqNw;zuhKB}QvwRlC%4Vn7iEhtq- z*tRpQzh&{`uWua(wM^X;E?wK^*v0x4RtZ8++amzQ$G2`q7pO_oDddA3y(g8s0x0^? zKfkR>^w=F_oM*Hzkp>28cknpOj1lMFG6&`L8&r-zQ%<$}a>6 zagpYDUZS}KsIrn;80zrUcK-PDA~8P>_Kl^gENY2^6;>{a;niKu4K!ohB`>YCpTJa0 zY|wo?5L9(9%z}4c&Xs7){yiBgB&>?s3i@CJ3XwMmJTnZPx`r91%!#7YD!zal95(Sk zY}mE;;{ABA)nm?QA%Bt|LIIYXIiz|2PV zh2R#628PKyoDLtv$B1QvyW8@h)EO`CwKRqPuiAzUojo0M9`ie1&8={p$z?OMB?T+= z`q=(VOugK?{CVK-IwNZi`tC2vt_76@#~D@g@@xJUAD+4I>Hw8NxfOK;ks;$(Q;#8% z)_4wBm<8u}L8&o_%jg&cN3cXR9@hU#dr9ekJ)okvj ze>7Whc_zDjq!jFSwI8X#GIUtHV?J$8v|6zmBiWi8%lQSxiAA+=8nff5`|y3>$^MGX zNhZ++2dBhD68OjrW8+Q=g(PU@#pi&fz`BJw2Kl)j-ux1+DCY%*G;j1c5By^OX|(d%z6 zDl*hWKuw-o11bKA{IcB`H<)=$FS>6gS;)C>oLe_4wqsycsv)EeYKgu1y5VvTzBWq9 zd+3)Ta$Y&B9uMpb!3;eoU0#GZhHv#+m{=AL^;cSC#M}X7i?Z049Y3j3J-IvNWXpop zdnQUM!5i6whKNiJ|Bjt9i36je<=4MoG`;3Ck`WSo2JI>dEc%>s@kyp9X~zUhvx2M= z9S~~|{D~JGqBP!QjStOL;jM1MRP$+u(e<3u#XbZM1Lx}AP$6w@6qiDtWEi?IZBa^y z4o0BW?OF(v!+ydpI(H!OgZ;eyGrWw2M3%Fe7)K?=bfpP6TrWQ?)MW7@!s99i`!@Zd zsqisk$MS0V2(R-HbNs_t7^x?bJ&r=AD%1whso$}Goo+i85#H@1VLkcLw9~o~>D^*_ z(0f}O$T&I2wTy!zwjDc^sjrMDLBAmW3uQx61MxtZ3*09HHN|c)f`}6nwTzYQ%D2ny zvH%~DDy+L5_}f0l*yDp2UQK)jbFOB5?gf+ng|5)r@SSzcEs12l=3zli6J?8c z^$9l?wOk*5KkSNLbi94uTLjgW``56+tEJzge(G}mUvq$CKdvX)zFvBOs(A_Kzsptv ziO+|eWyPo`6lMMd4sBUdg@L}BD9$>P$WD=>x)yO*PJPRD=0a(7F@B!Y%feZ<9;ZFq zauomK5}_1=o9tlR<6o@AnHk0kCaxrBeY93A55N%FBw@b9MU9Q}=C>)bw%NZHIX=%? z4G*fxLJt_ZR`?6Xd0Rz9w7<7m{|5XiF1-w1*TDC{!Y3q=6UPcI|EOe>1k#t1bE|mS z27>U2V*}k98e%7(@9!KoQSc-+=EFtMgo+1m`tCDYP?9V`fE}_$N^$Ztqnls=~r{{oBZr@IoJev$_RE}>&t8kKK-3UO5h%_Pn@JLTPuS{7|jX4{h# z3g3L64Q4mYb#FLJfI>Uub$U4zSBGrhT=1gJ<)WGm74rXimrF*&U-6daG6Fb8hU4;9 zzCNkt^|yD-U_D@@lad|U$|<$ZuZ?dp_jt1y9hd4xf%ue+OX;Oe!?y8hB38zGT3u#L zat7fp`gx_@=57Oc5@!jI6bSh5qtVM16&hjf)vAEMI?@RvpbH(-Ey4BWve*`LSK|C~ z>AG%~`4_=nNjQ7MPF+#H?#~5nf%nnORQ?TGzVtJj6A>O^KW+>%|a547IQ2Q1W5;_I_;G;*R=qP%lhS5I5RPrqV=T~Kl zmDD@)E|I=!3ZS*#JvaoDrfs@}>^x_wt2WqZn@OC}OgS6)9LpknBs=OcFT5GH z3O45^#jJ}B^H0B)O|)zQu<;2dvlapuFr{w&qua1(YyLESCQ%67qU&k1@WOqXdlfkT zd(-utPPAA4Q@Gw&{*&&yqa4~MRH1kDt68fCGh+c~qV(2JPrFVtq_dS(c`If_u9#N} zLuEsX@6owe`f`f$E!oBl1k{U!L?XI+yd5yG^V=2ePHLU5QDj2=cNL*&G|)lb$*?ID zouC?~IL#)gb2iViH(-p@zVXRRkHo%k>?=d^=ct(n3ewP%2O#cSc7PKbb^ zMg1{OoMzA7e$MGo zJpCnAwD4z>d#-PzdC7l8yw!ljsQrGn+jF5zpLs1!zP8G;iSDWol`0TV8oFCn=fq(jrMV{g5AL$gb!Mr7<@ z*SVW?MiaU^mfQK@68Yv6!?QmH?=*oWSnq;os7|v#)L7n_8DY1f&C}~ycef@#X1Y9G zlK&adIs#{oVUlOvs%RVU?y^v7h?|sYg&yfXe)zSETT}=jnPHaA*nyXL3+!(EmET^l z2t|s^Jo@&yTp`)b&3k+8Pjz~oOEp4^I|&RLvbe@jVs%~C~Nj|Z&d+YqlehhcF3tLpl2<_5gVNSt0nMK`C(q43Ok zgZnGi-uy!H{LS2ux=6v_4wH~EsIqKIpvhhjBuSl`|;c4;JYVh zT=f^OeCh_qs3mrO2#ZYSoQ=~&iKE8eUb(jj%s9*e;f@kbwtbsvmg(j>u|@>qc* zFxPR&xD40A6N|f(WI`RrD|NSfvDkAJq*oSv7qU7gTvAC?Ltad2&?RI4(?qJua73g02Kf3QD>D@CogWTkLI9aql58 z7w9%DZ!kOtD}(qN6AUvb_Fatbf5>=wbZ06j1dR}iC2-a!z{qzp;8s90H2xi8d zSL)vG{A;5s#+ww0R_`~~RQt&;>~Io{C2+Nq5T=KAJd%epeo#($syIBR$15u7sAcM2 z;Z52rB`geV`ef_u=_n%738&;U{pd+-;iyCpo={aV+m3q;Y8sXdEvr?q&@s%hJZ7&9 zfkF*3?2>LCH~I+38n=$&FB%F_Ulv#d0G==@SGE%4xi~fiJNVsO#{FGsJfaJM4{HPg zvF$d~zjJcl`TVf>9E14%EjNm-5ba+!A-CVq{`H@1@>}CcXEBL75Ig^nc|1N_Y7)nO zPquLyqYbhhW?yBz#mLJ4fYPvm;4WOlTC9o-kPwcSrSbd4nu3{wITY3XF3d3e8Cu=7 zVWViUEoABXXODY)3a6^cE_cApF$-2=ax^@tK^~>PuNr}7uc{HU!h*20Ht(Rq(Qkf9 zNmNqET-bI-^xUJ-GObY_;lR;7Fg!u7l_uhL=W9LdD(4j4o~~b1wnws_Zi15YK0QfZ z7F8B%`%^98Xv04)Kwtbl5A)8m_a7S%-k-fT_50gF|DfQY%=&9v6-Tl&ToqGR$K5tB zM&+)6%E0HNx2Dp{ih zZfe51{*j1YO&&h?y$;(=_cNH2uLolWk3eDCBwpcFQ~Z+ntFYe}jr9-YrnlWS9rsu- z3>-<)^}WECe7$BXqg0ru2HV3NR(;aHV*G2@T8qL{#Z2(2$2B#KMMP~pu_#efru?Y4 zzlQN|rYwN1D?BvY2B2tj+ph$dLx%|dq$9Dyi&_s^pKM505QdNaWxhwv4`Y@ohfmE7^q||yb1zF@z5xa*lBtKIFy{jrto*n9fOKl%? zhM*gci8baGcq27@^6Ij|g<9$<3_QI;?G`f1d+gjoa{hj=Q6G}z=)Q5adzZ4~zvoM7 zd0Qg-R1HU)iaKLrKDc2%q&=;v*7_rvLgd5*pH_(er(}5AD5`$aOo631bfGn9q9N=2 zl79>zmI8v!v?n>b$woehr(;Z@ax$unn&d64TZM-uzQJU;#;Ix;p1XOX-~B$z310mC z+0$p9#G-0Lq8eAYGRzkH5o`7e(a^ zYHul@4b8E}d{Y(HtK_*xq$4C zU(;4h^hI#@n8?PT1rB-Kws3WvFOx_dMv*z+p2q;qI%e8i5Dh;A#B1p8w~NeD_ir0x zzxDY!I2A>yY=v363p09C*xFWUw~C>926{Ob$YsnH1`Mz>`F54aip+Dqq37}RifeNi5648y;C=Jqjo&rc@RSFl8l^D5 z*I2|)Q?oyvEW`8|@0x>X)UXj`J3e$|qy`p=r-u6J79;&9wuLk>hsV2&LPxu&?=+CY zZ;Hp<`se%?0)i0;Z^~>w0jZ5GSn_tdTr6Sy^w&~L@*Fe3*R#I$!)R1tH);y@$#CI9 z`_j$Ljr0E42Q15Dzd2bmkmG$9VpgF3_q+Qg0&56LPnv>_ol1v2Sq5dUgKg30kb1dX z+ruoD`!-Cvv{%wMw9nMm@MT`L|30!nUb3J&WCs=OxU#ew?-Sh~DitWvwAK3=*vrQ`?aUYZRYW9BYWzrZa!zGPZ;B9Gsb~55ao?r^z zjn&~Hi8bZ^V&ea_-BF~~zaf+B4Nv=IcNcGVlHc?E4b7L=##kZ{Q(LcDM5E9q4N>Z{*dEOjzjayl<&?NwdKRo$G(GJVbVHg0ILaPDZx?SvONUWXq zR$(&2Lk!SvD@ceHD2DxurT(1rhiMvc==zTD(UXScuX@1MMeF>|{CBgUYsJi^dBXuHuqgGOiCC^RZV{iG!t)nWORyq#FcbLH)$lQkFOJ=?< zZ4Brkr+LU}4X!W;H{jtZ;B{`i=D3y3#ma&0qwDc4&iOCTPbgc`$+p**XNVfy)WqX5 z4tIR^o854}y*G8XeI;KH@|_|^=Ea^+elE~Q$G@qybEx;e@D_-7fCX&7XtasuYhD;} zxjnJjcecip)nL-}WNINp)ZV%WIgflpg0AsnH^#2+C*poz;N0RH@r>q5K#??(EyzI_ z?{??WOf>A2mGYS2ue8icucw{j{T$Oy-+A-DRM~ z!;nNwmHK>XwJUflCG~!A4O)EdSzF%bVR+ReM}^<>gt zb_%({Od8`|m${ETdc@LRic31gm@8$Nw?j2xFw0l?;MvBD7E?3LmJ>HrkY8ZC+#)v1 zf;+5XcMyN{T17JJlx&a)ngkVzc;?^cG#;~GL{^f*T*vAH5~?i8(lF83O!HZ9!G$Mt z!DmFxouYY--uS=O1`lGjYMPKfHyp8W=9doXCfRl{71cDxo`mNkxYRyG{~6&_h+9$F zt0`IE;Fzz*Ibop`;zKB8r&+X@+TtgP|40n@^;_yF_}(!LI3rx8CG`d#%2Oo-J8CFQ z6%uqWIPk~&Wi0DU=$GZmaC>vhm%ttcnzyq(=D>2>ZVc1b3(b>5H$G6XG==>kCos8E zNF>10YlQ1CAWbs?0h_WZi|u7YY?bxbxx@!f$RFNX?=TU8yB`h6 zaX#l^BJ4+PAsbn&8Z})(MveP$_g{HTVE)<9YarPAN=(y*jRrGE> z6X_oqcsaKQNn%LFDxn-HJc3I64hC(2uWD^pSg}?+rJIq!KH4kI2f>VCm5=D?Jl}7QHN{l2eUBNS|PQpIS?Wx zqGL^3r(VSD#3A=M#c7^a$UaTX&!%ZQ*q|mWBQy^D{oXWaeo~}m)3*9=IG-Q68aryE zs+I^F)V){BLoxH=|MvozPNQo3UO`<6Vzr95_*i(oYv;V%$OD|AFr2@ENHpYd&g14w zU8JrPK^Q^I5+`YgEFV3p(tkTiu-cJe>uv|#Od>c#PV$q`or%))f9<$QgSq{6Q^q24*?6byA}L$vGFh4)XU>I_5%47)d33-Z2qBl{cP z&!rUkbzkjOx%{At#I6#pqc=$KHcm;P6G?(UzW1Ur!1d3C!>yzk5G^H7G@k3f8Shm} zUWEnRwitJOKw|MKf4w3KMU45#(vzv+9h7BbG(+?axVTw<62$N^WG4}I&V@(d1b zhb(Q4cv6&)!LjIZUl=RPq(^LHU^L>|Kl(I6)m> z3n7L*2n)T21`Ny#?w9R1JVsXv2C4z_@*$VBl7A#yXvHQGT!jJ174S{{!+hIy_=gTB zq=`#W$x39}{+#iLaiRNWYc--m*Cq|Y*&Yw5rue#E0ikDGPAiG3q=d4r4@;+urD!b! zWZz}Ua;j;b?_?`3sM&^W1{OCby3GkP_2e8-tuh)((}#V#b$($trJHs!+L~we=y}+B zK;bGCefyb1SNDhJEJ5=`MMk50muA0~I>I0xw`Gt^u z;4zIUv+W%PU<=2Y_)s}orsC9t0E+23sHzQY%w=I@t60=}L%S3Y-|zo!0b3&1WlX(4 z@o8SpNACM9=qNbV8%d8FtU8$Tk=JGo3>Lh;T0`@ofS6jXICY;7E6rSt@&_}%YGf>NWW4rjy>};TV!R2%`r9;R`(RzB?MBD@THY7WSqVR*j4{Qv9duc|hqsFFTn(nhap*r) zO!KTFPRAD%`R*jRlk^IGeO+U6<0}P>r!Z|4XAb3uaID??NQNMx*X5vgC#WRhcT1xY ze<)CzqNoL+k+CFSYqBwldSL(c%ju@;&2O1z@wLwK7&KO=?vY?B!t@=Y=~aF8hliVD z)7|B11g(Z6-O>c?cYZZI3!b-)mxj6$jL7%7MDEx4_BM|ir?kvI7*q?mP3bvVCGQ&L zHXUah@vPy>#8X`!;6%RebbyvCub6j)41Dfa^LeD?{YnjV+)RiE01lDhWMjc#sj*zj z97~_^w#{0-%f?#@a8J3y^AqM`2L*SvA_x*(y^ zpr)y?s2uq*CrPh*cNynTj8$w)ULVs>zxrcKD5*~w(259k!e-y}68+7;(<7dB~m=zcAk40O& zOE_A#%=gR$5jPG7kt1t7$x9FL%ZKb>Q?lhA-IkS!j6KUhag=uW(;0`U=%UlUOXnm; zGzDKA#w?!D;gM5Q-J!vlE7k+rIf|{R-QSDBLUXdxo}`t_#h}Q!%H@XXeA0IY3$<6$ zbD#n-q{nxSC5);RGHSC1NpwK_Tav0cA=e*io!Q}NGiK=Mb=bI$%!90#`tn9@)D&6YCd44!m=@?Qpih~~ z392R3+7hZi7!HZdZ1UR9JbK~hs&r@i;P=0^)X>RGf;-=s+!ME)`%o&>{@L5x> z-}@D>b@5!mmzJ#Sx(fGOHJ|Q?jKN_lLP}qaz)QBrp#|*X_pdqfKdQlB&Aycy^Bcyd z*HdJK%W7{bw>2*}T6Jt6g-x|W!QLMM_*ov6vJ5d*SbKm}zVz%*>j{HoDg&y1XCeZ! z4R&tDqZZ&l#gjHY-#Bf0XYqUh5q(3RAN^XT{BCOvzEU?H?6@2#JI;plsc5-MXlKW` zRm`K43EZBtC)KHS#XR;@2fAi*w1-ImkL=LVjh%=E8YYqFsMtvF`5I+CLh`V-UHtd# zL*JL~cgreeZNuP2A?GFy0kvhyK|}88z*uLnyu8t(s!1mu9OzL2H?j z`4%p&WVcVvlN2&@bR#To@u6)`G$AH1si8d75djx3WEaosV{lVK9^YZi-xrOYE?${I z)*nwL5jexB2A1Oq7RTLd4DyJdXVv{2nP0F%H_$E0_#KZ+VG3-%HB;aja#zY`K{vU_ zQa(?D7YPZZpAzarP1SzbavW}sCgvfLGNZ5n!{b?{IBT-bU4da)V`5jGfp;ZKs)DA@ z9@|m|gNY|!-i2-w;z|EZ6ZE+zqytLMIZLzGTEy)@kgV~vTzOwmi zzM5ljvFX3ec=rEM_7+ZYI6$-D;%>o`puwF44el1)T|#hJ+}#q~-Q8K-9fG?qEFL82 z;;u)&s;j#9dv{guYW{_(nV#;R8MkqK?Ee|dzx>Zwp3J28|4khsWx(?JUi;=()J9r( z{9+b7`S~*H4SyD#So^DovQ3X^X?%wcw;BV2<>y-anYwy4#b$3(r2S~U(H1{Wp_x|+ zYyvF9<08&Xgfn8*nYSBYa(yS44(33kvWZv`k3(4uU&@ai+$w)R|HjiF6?bQQPrx`MY{K^FO?zKQ1gf@4R?ZB;G zpq6igiY+oBDOd*9d8lp)Bqy(nq(Q|lcYHTpOd?#H-gzM8NA-l7h(Eq4C7dwq0@%aG zS?NOVuhTu_3?DUP#p4Rhy|_m_qYSL7G-iw+vgTb2lnmpF!LCYl+UFS+kI3?L+nhH#LBkj8=t5aF8eb)Xd~ps>-#xB9tG ztj|hN+4lBO?P>=kbd3VN43lVt?v=ezOw2f_Yh*m^Zt#*l!xK_Yxw_qW9Y>QU?KjNZ-KgPJ8kH{sk73I(sVoHcZ5~t> z)%jZ0fvdH2Ua5-*?5(pATuOJ9cH{1OouNhx3kWAUyhwEMd-Pkxxx z2P1|d56Yh}{lP`y(HdUK?#c4(!fGs*eX1ReVOzG~%1TEFSeG7sw*N;5B%o7~9nN|} z+>aJlTy0L#3Vqzg^>&VLo)F>pB^W#-@3#rzY0WZU$;GE_;$d23m-g|c2oe|>Byl~j zKCL-`og&_J`M{XR7R#400U0v%=_s1*Sm--7WAe~RbG|5T_B#ZWf2hMmCGL2m?tRXh z^yxhQv(plw`8J@#Z6M~szsBIXZ@f(bgH#Qv52cB-c1tbcAUw zaw5xk_H>fLuB5`*6FVR>_jxq;Wwp#Y1iANVG+SQW8g%cSuu;NtI7n9z@>9OcP7))e z^6~Md76J!-y>~HUbmmxncN)drGdlkyv3`2MIYPcz?{Zq2Yam{FVuZoJ4 zUnX%|wVe{YbY@f>Uw&BAO-=nQ<$N0|^)n?P{q>>|4WQAN7hX62EFy23}aGH^ro!S z-n_-pN?I)!qBw@GG{V!tT;Cj#!?ctH{BLAGi6*g)R@`qP- z&;BIL51*o`Xf?oqYrQr-bl<|BVFHk*(=})$Q8r2**X%cMcG!+UW5w zG&M=0>;c^QZ0cs8-3?n$mkmCzg27WBGr5><6n6e5@8`EXArQLR925mw zE`8Gjnv1IL`+Q$@n3_{k*5#CLmogw3aU*>8eY(*td$r)8<5g`7#So`(KPPR+;}Me1 zY^lp$OhGR4H=EFyOID5G#}t8$~p5;Au0Yn_||3apsp? z2XZPsj!UKVarOyIsLFzwIUDJ9>UY>ts_IL=UOeKTI@XRoc>YrM^uG+94q+U@ait{<_)EVYfho&1r)>Ku+8OHvK|wzQt*=#^>(o66EHaGXXtr zo^u2?MRg1oH$lc?cow*9H@>&CBY53cOb(j2TiqWh=Q_;@-cP9CKv=VmJdh8#Hd9@Z zEl!A6@poCV%VP>SB-|w9`tD0K-$>e*iABq;cI=a_t{y`sJ8W#`)}4YovE zlncug(}x8yh&LxH8mQ~B+v}9I;kywIiU*~j+3MZ3BLb9MDsqtBBQA;E<)PEY=Jn%U z>zy)_kJolf({D747ti0y#i`O)a5OS1uJ1?A`kNN@u+bB#0;eM(yhae*%iQmZR->`o zPGZ44s)K{nZ}0Atxb0oxQv|-)!HQFt^QCXbqj@X+OfqW9neTz@3{YiV9BI_?mTxPQ z7Umf$4-j+k!fN7t*vvJ*_m3)(YAfl04*B}t@z$V5+H`H?@U$8yi+qybFZKF%t1wUK zGF9qBbX#)ENKwn^eozv>6j-RU`b~y(bz%wI(ny|wuZ@h}rKNES8FBi_D$u1{FMoPcAr7#5(jkNtPpKxElIZ|Ogrq9XD?R!5XWjIRF`4FRn8 zG=cmQMi7S1RRSg2;GKG|n>$FH7>BJkj%Sk)~r$`xt)6^;(f25TsQgwIFC zByX>+yF11SSW>Nt2F-zH_Ok|l^Yl$#xeMX$THQOD-0xU2OXPSM_sndL^N3oTj1|33 zgzOv1>*jQ4slP+NcUoJdI+kwrsAxn1K?2FDkARB_@6R^1#50B4+mynuG&V@fAUmU~ zqSDuXs_ZU{B&5IAq6*`FM{&#HzmSR;DAJ$sW-xf1mMTPo7;vZ51Kdo>TR2*u>tum* zo5A|td!DRwUA_`Vx|(S4sLN%O3{q;OT=Z}&zLEPt5+vB)NG_qz;kIUQBbl@Xzc_Hn zor$my$%?cWWe5+xA&msi&g4qDj5SlrpPz$OQa;KPbwBs*6O*_tB&Fm`YLW<8g)Dxu zqHHW&E-99*u#;MpJSa2p9b9+5I{>rsw#SuEh9;|Sxcd4qR$>;ydI44r3-A%+zOCYZ zWUAQz{agTpuANZ2aD^ztK1C?;y}6;mBPa4cxb6#C?3__zezi}$Ok4Ux`K(G#9JLI) zGUQNOSx9*?yy-e}8XzCnO#2OiCZTYnmbT7*^t7&TiHkAH#weFXmM7mjyr>*XoZe)M zxGA0XSiTeFRc4Zr^%6ivEX^afE)o!=@y4iNEf=F9VEsS+f7p2Luy0Dq4Q?*ZO(U}5 z^AeH`M1^86GlM=op@)Y&ghs3qpobt!>U& z3~zWCAy%WB?(8Jb1CwrtQCiThxE@Z1|NX?G&QYK$J*5$(x7dhVaL)*JwPH%$<(o4^_YPx0e0Cqan^|A47hKozV{B)(e!jqUowQ46 zPzCxkG}=ftz_cKumxEm7;#N$xS-u{F9Yk7@dyYQ--qxvU`DRl7QW_9rgM5MJ&|c!g z-lM#eq39&{@hCf@rvY3PwV{D?__$OZhbdF7{H|z{N9UvU2AHQyaZ_oiB`ql=-vm-U z=7LqxNT=kZ=(BquvB+8VCzLGlWz^7`fUpI6R z(N(ju$) M=g%dD_lbQgE#gv5H?2npNXQt=+D*vKG)*C&9ykj#5Ngz=d|W4{pbMr z)K832AT70VIe9HzMvad>%Nz+_NwtyFM+Z=MA@g%}=+q#PL%D&tx_ru{4jciMeD}GW zgQnR0xb$~2tiV|&qFQo5MV&YuoCZtlpT9x!*&B?fQ(gCD!W2DCPr2GDFuXzSsIIqA zH!V%K45*po01q6TxTQI$z2Y}eWG8M7ya4Ty#-%XhjH41({;VlWni0LePD+&X9!WY0 z6eB}9R7?x{iEO_+lWf&kdyayLlMXFeF7^^y+DaL)SGrFto3u36IASGX3D$EccKe)V&Ftc=sB?8KCge)W?+u= z2sjQO?em2Zz<59=kjkxwX-*1+j{r1q;J>$p^+zM-au7N|e=UeEuOKiV^G0#s1LJyB z!vsEjvHXK8GKwj_H_v&2cC>qal!SS(I4lcDh}!qgl5Ta0UL1Y-i76S?82;sO`G{-rwMs<@4gCnk7)E>Iudc<;!6900W3&O zQLqrQQ?$0cAhY6FeStpf^M3Ch3P*AMLUau*3OW_C(p3C#B@ zGEPgQZ|20oj`{BKuHcohM2d;s5leY><->kPLwj}G%HJy&ZC!IBpPa`ik=hiWr`iA0 zuKc?Te=B9Kv)ICZG*fEhUW0b`YtJ za?Ok{bka+5(VMT;uT`J-Iag{iJgBj`feprZMxc0a{NS?mgiOEIY6JBizU@%o&YMSY zj;|J*UY8J;w`(&q0Y=E@W}EB<9eYi*{|gs|97U_Od!og@hnB-msLj+;KJ}Q*`4)AP z(~trY2_&c)7%sD62Ex|`h$sdWQq8QY;wJbTBI4e4)-zc7%rYOym7pxK2f6;*GU9NSRoScS*}1$9S)c*FOeg3MA03bb#V?lB47DL z0)Sj@ddxeQB^||f1w)8pF;yAAfT=IvXWRzWS5<*~WGhC`$LvS$Wv|!g0)t;Z&u@$8 zQ9F6r6~_hpHqSSXb=844>fTan9FW|aUC&lCJuo1^xKEkEg1nU3{WvL<;`_C38%K`| zd_}=!;_Z@;;rWBCYFV-0A=#IAIK(z8CY?9*qsa}fAO=aSxi!EHAuE*Gu6&f!!CeVGDMlLIUtCU6W3l2OQ8tITo zK94>Uq$Ldpp}W&K#Blg?kx&O7&2)mg3Ok&55QV(8DTUuWVKbS1HqQQnyXXC%ZO2(nB(Fg5<<6c+7#fG79Srs4qbTs^^%fysFvH5WW_j^vz~v#il~3h9-( z>^fVT7`-peYnWvF@RBg4tz3M35?R1pOnZ2$SAui-=GZym6Xs`J1`@4ip;5aM58Cc#tyaW1|e=V`$Yqm9K4Ak=t zG)jKw>N(a*3PQ)gzXPKLg1qmdeG_WLHanI3d(<#JeP0Gz6;wvBYFJivUNwcng5l@j zPw;+*Lq$QE&Frr`Q;92rtZr3Z(QL7VMoLGek+7@E2AJ1?71&?C#HECgMDe(~^5#lO zZ`dJE`j5HVo7ygFcwTs|%mQ~}?605}%d6Sko=s2=txh(_h~^2UiZV7IOUuM_>97ur ztwhVvx&5>cVFea3hq$XS)KsZU#h@X9i}A8utC7QZ{X%o{yja+kNp>Srdn)sO^>l(u zo{G6Ml(9oSshze06YXByT%17(f;%Jj4Mx$e*-YmhL&yfNkWyM$T= z_W`$$CY*$>>&9R9HuW>n z_}sU5nfI!Aay@^I2f-p$z>-yGTanoq_|y;xCG!2R(hE7Az3@Lq+kfQ<=QVaB zZ4M(e)PLfbpD_G-g zalPUdfAyY%nFD6poh)w%#@eHSY%^_Z@`x5a!DIYeqaO9;MvyeE3mfSWv*r!>eh;(iZ6R zl&}l?m)^DS(y$o!)}wX1O&t@ER3Zih!!8EiV>Y(Jjru!pfWKVDbB0as5n?L`!K_ll zJ{$+>)Po-8LE?D82}3v=YmmT<-8H5Dtcfnzd-)XqTVA109L?d63(;se>_&p|Xhy3H zYxs_&Xazfemly)VT@!N@#!n~G8EcuQu_zl}56mb!<`w=$komh*(>gh;@Y4rk05_#7 z*8=}{ZAMQ|uih5IP!*>Mh@YQ`TFaS_Fn;PlxVY>O3eDEF#1>jSv(v8SN*_^amwYKE z4;s&L{F0i?!`qkgE{3Cb_q`R>10FAmrZ;EBll)AvI{P#;Ch_a90T9oMJO@^~YD5mr zl*=wT&u_9poOLZ*Aws$w!YQ#++y|%2?jgVy)5a()+dAc^SKO-+zt*#8sYEuLfAX|n z>6^9ZHSgQc%m3T~354yge$xfJt4BoY_pHetX^##povcHw1XQl#Wkb{9jCWxQo;!WG z(E1f<(%jLH;v^ynA<4`cAW13*gzz}33suq?oqxtTM{i;y6=Sv$XS@%{fO*DK2_{yS zj+4z8nup1hjWCW2VVcoa%r8GUd#FU3-=3w0nY3SDGQ5sQL*2F9mga51Rh>R0$}{q| zV|9)Y{;C19!k94JuoKd(_l^8Y7Vl7Buy&>NTT)3PzeRliO%Q;p(_8GX=;n`uS&2F(St4SQnb9Qr4xYRfD%5d1b?Poq)gD} z!P~w4JI$o-4#zy-$ZuL`I(Dp!QK*Ibl z=k#R^l{`Up0yf&S{@&M)%4V#!xm?fvMR@)Mp&IujcOxNsGlw`=5xH&&K%L7E)tYJi z&RK|wYKGLMM?Ue20HZ)>&Y9vMAkazfhVn3k1!HfF#@P9}(3Hl^KfyAmvZ1qNj`zdP z^mY1@omF@MEsYQG$HutLuii9f*B$ogP2U*ZB6@Sxg%6C}?6o-| zhFT=j2&+Fb5H(FYX>R9rDbtVZhcCssLlBLw^ptXKZ1n!3D!H(8YhB|-p*tMz$JSqR zJ}>%Q&-uydhgmLE+%E*um0(<5=Ze*5T?VE=;8qZ!53esxSw4TAU_{Mir1z`J=wl}J zJ@?sGx?Ip)spq(U@5J&UhMh<{6{YlR^2Ou;N#E;JJvlzw4{lu&w4J1H0*xI}S7H;h z+z#wW)g!*yB1)^-=b~aEP)R5?Ac}`ma8nXRSy9Ibd7=U~jAi7pM@PcJuBGx>4f7Vd zOV@VHm<5n}pCe2pod-}D15sRtAcW#fQ+PR`iLmdy{?&C^LZ9-mC<%~y%(t|6k9u+F z*&Uk+XnUy2p(U>rdIN&iOyP109O}oZ?31>~%O%&$ng)NAt2ui>h&E~$G)T+g@w?Onwj%=&UdC}s1=rGGoXkqi zkN!ym!KrMIJ6PTDXC*~9?p$I%l#4L-;oO%ZZ!0~+EsSVboDP}Y93$B2zmZA)IkUN8 z4;^=8tXGadtKlkouClYP6fqjU^!#?B_y`F-p=?Y_W`ojo12IZ1gp0LC%iP#o^7gdi ze8+jjzh4Grqmb{ZV_?KnZFe}#u8-v+gC#NxAN7MwskU-;7w@2Go5ijHXLqJa(9O^XDO<_q0jwO7Xa>*K`f_YN9cOxfzTyY}g< zECU&_>k}*{*>E7xJCmucjn&1UBq%k!kmR~nIJDyzm!rz&_7FN@n7QEVCXSVeXk9Cy zxX0iZlx(Vx9t3o{BS}!!ZZBvr&D3d<+hzIGg{LnXp5#m%4*#!-C8#x7P4%jMlt96( z72{)c1CKt+*gL@$KdowJD73kDjYrq-)stw%Y`Sh_(zlnM)aMb3X4e>3_7PXcEI@{y z$-t~KEo`msE&(lf)Vvk7CK9eI;x66LaYDXZlLks>K1kAlItae!IFnY+oMtpHAZJ9? zgUJ>Xg^{_oP(YgD6~RWQV|dRifUEzTCs2XyqXpASvu%_S{~|QXj9^E-;p>?!)5M?Z z(NBj16!w4N$Ne~aIP7J~n=rf8V0&4Zy%w*5NZ+F_zYY|{tJ4>aF-W}n3|g8W%s#i- zbnW|~a{!vc%r;uA2Nk@}g^U+xcH2B})Z|4$11t{{`2OMm>WKV3o=s2bj|aO6(vEuAdT^v4 zl^nW(v#$CV!u!{&-6Zs~ZbsZ<5Z)C&fzuE9uO*hRikV(_pv&9bGbe60`xEU z?K(l&owelB3ZmCBZRLoD@GYFf??8>L-%`A_DZ1}VD{n#fjY0&8U8L`Fo%pts?5;2S zG~dL+vn9N{_oORisH7_~@(uQ`|Eqo|td#rj2n>>g=G~Q-fvG;+8jd%1BsLO)q*|Js zjm=S|saf$--ag+ZN_Ea@=7>&GX(vO!i>+G;=&N&@%l6Ppd8klF^lk-caZ|Rg-anQ~ zSCy;HyzgY=tA(L%{U34-28M@hA|WIystX+Sg3`giG!&pq?RKwi%#2K)pf~zxv(MlbAdkHvL@|y=zgug_PY_;5!l~ zI)<@?^kY^n_jN5#-1eu2rg+8;e|^S^QQK342`V zk->c1Jbkm_qaVZz>u$8_!r*bCY*u+uI$dy^jfH*t7upHe5@Lvz911hy_Vhay@;{=s zj&Q-T!Sk}PFrpTDLA{8Pz*UFSn+j!8rH4AF?L18j>Z8g@>O&^2 zQw1wAYU$RAcb3nH1dueu+NXS)DLzb-6doQ+ZkZ1ff!0fr>W17NCweKhbGPx=q&e4# zY>iw*6dlvI*yLZ%LNs*n46`sJ;`CNYKN8m>ovc~O5)oDYl#z?kod!bvPr=cyIjfC0 zQBXD6ghoKUtXv0uS>7rF4AIRjMm#F8vWKcqI01;BeBG*yRMJ$+rTLo|IhMD)O}VwYWT!wXkdT2z zk*Wr?ov7*Cv6d_6Za%rn?2ObHVvwUg*&0HYK#a1YCv>})lzhi63+?wdMCxp6TCzVCAR= zk3&m__$t*>Xq-}j>n#g|L$t@^PLoaNf4&p3n$AdPNfw(d|3ah%kh&lBt+Bbk$dI)K zCI*o!A`dO*!}`vQH>7i;J&clVU7@0m_UjKa-%2no_te?hZ~xJLerWb--@W8jM{ z<$~ko^YcwlItgZ|CaGKIHFVXq;0GmFH7E!(FwwN67+Owr`gAID@m$W)BvR0q1FLkg zbcL`*|Gr3~XLRZh=>BTUKU@xpf8e9PQl+f>uSZcl%vSiuJLUVORv_aMT{mZ~F-bVf z2V4;rl3NtQFx5qgT^WZ2QjNBdl7Yo#%_0AnuMBj>oPL!GL#NC zaU?zt6{j873h&s)4i_)!-d;Ri;O>y@{p`H1cvAjjKY{{Yj?1DB+M;z<(RDgU4s?{h zeS4O!00OK}Ao5W5a_@v=o$aV=Gck|Ye`hCH#ZRFWvxF)rphL0b* zpOASmLuwGD_Ih`2zf*J=eW4xCV^weEE~omgWxCq4jkxTkZ5)MCpab1~km3D(H86>L z$~j3ODW){CLQiJQVp4VGs51JXgW+dY(fIN5UCUy?N8oq-)(X@tNAs>jH-5c4WCCP@ zB_Xu8W7vK-sChvMf&YnY%QwO-$1+OH=8<*Xua=~#bi%-4rWK6r=sn`7i>t1n zuDuqUAXhl)t>_q~RJ{1&{b2hkUw!B+&s#%vnJBAk~|97srzs0XG&U%O?zyrM+B6&4T6bB5eNavdKIojVO~><8 z9E;fUu$;4W?Iw7;V^tI>z(tJi;Han$032U0bOA9%H8uV^Jg}`cdXx$<@%= z4t{WYCLMAFj5$L0j{FD9gHgmFxRp?{SektVyokQdd-^GbE$k(54#3kn%Rc?2`h8dr z#wRavzqm!NDaNm;PqXJ=Z?gERHv`Rx0dGHge znpof7T?e6(WJL;>a-@ctpZF!kpbi3LnZT2fL63h}|pipz;uQpB`ZkxokjimG9L& zR7;|0%G9*OM|0q8S!U?aIScE*EN5pC9UzBG3aA~-m#Lw-9!l6znP~k>nVbJ9X4M0t zcSZsKjZOk&Bd=;^%vR9K>k*~v)t^p(j9f$&#GoIzuCL3?ZzmEOKJmLX1J^9k()#W1 zWlHj}1Z*ym4sXC?>!{cx+YsHlryktoslDT3FYcABm>XT1Ib+a1vZ8z*t6n+4@dr4P z_IBQRvZ{Og<*nd-JB!+Qj1LmsF#~!#&o^z$=o`~UsRgwtknXegFl_46B~8;A#z@{2 zcHU3e)tvesed7pM>rX(-Bn4 zv@KhtaR2FKyyHmC5S=xFIxX9~Kd*ti&fX8n!S>^XU8wOi#Jkn-GfNb3@Z|5S1d+eD z{vwxE259{V1GTn&sWN2wjF5fP(*GNw6}Pj(XM;8)xEQ6hb+AF0o68|ro`3df0cb|W znM;J6KGF9%fEmiYiGn&9&EF7E;~SYsWfq{-Q0@3(Kmvf&L7KA#0LvN|)l=G5OnMYW z6JLzoRBSLdYi6^jV=Limsu)qM4D+S0O|%bwo~Zxc?VqPmv*YPqSZE_n9!puo!!XmYNQAtmG8O^`xWcrZokrCdx9q+0?3( zwcLDO)#Fr(U~Rl4_qnP+i{~JaA-uW7RFcVPX(O2G#do z8~80y)IcTTiA(g{r9%4ZvQn8-$`=jH%=jufEJPkZg2)@>(YXX$f-q~@ppy6p3{h9~ zpHYr0cyN>dWz%!x8;X3iuEiGABL7bqRlgZN)~zGNY>y`Go?Ag$>pxRI#}Z7_*|qb2 zZnLO1{vzbJvb{f|AET8K)Ny)kUUrJX$6 zM$L_l_=-}nun{W42W)Ii)WY&yhIKa@!kj}X<0dwVD4b62bWKAafpbdT6FD0QP#9c? zTACQVO&;TWmD}V@#AG+ura#Zw^)x4xR98K}CYn6VH{Mcf0_$}!n&;J#N+nh@Zt+I$ zr#*mbGpP`fSj92eBtd~X>9LOiR}NE41d;pzmmt44in!+vE++$FT{H=`V%y&%+oyCF z785yImQTls?_!xIwPa;i>M4~avJjoP)S5yxyZ0JLi%)P@Y`H5A9pw+gpmYq}XPf-T zGIn8md3qdsuaC=zzv^YpWmh$Fo-bP0#5_Bnhq_CQ;e+9qe_-u;&BNr@(cWjI16Mc$ zqkVQPD_BqL-a_bWjSZt>6I$GJwab<)ZFngchRkdXj&#{78!UY zMB=@bJ!W^E9O#OiZqL~Da>kizE~}A^`0AD}FL_$c7i~z(go5jYyS|ZHD7;7|2zvTb zlx-V4jk2++tdzHB$JCpO?M_^#H+~CbZSXMAP;)DQbJ7;nC2cWllG}F<^dyo;t(Z)+ zm=BE>QcV`W_{u;DIsddASsrr}Yv2<#BP5mM8xR;m{gzWkLd<6}G%-(FZ&zT2Km>tE zXz>3D@wYPeh|PlHw&4r{3Hm0f`_Dp-3&kwSiZLr=Pb?et`o81c0iLAEY2%HzItKp| zfG04i5tMe-bx6HtN8zw|Ki{vr#H`4>*^zs%D)^m$xVdXaK&0Va`!{On{reyLq+3Ws?E{J^EqAm+SHW>-|_(YdGFc6O!n_^PN0i@>dB?64n z#cBE8eX@S<0lWY6b`L#^5Ffx*Q&|k}Byc=V>ov#T3is7sH+!8I>PXA~L&kIFSSp@i z6?dtKZ8y8u-*te>2;?l|#GR$^gU`Ps_q}AXA2n)4$W9xt(n%s?S6L_FQynyYqu>Xu z%m@FR9!j#Mv`Ly{DqZCwo1rR)#TvrUNCHq&yN=1J1r*7|hCt|Vtjd{+sn!R{rB*sT zcLMvvP2x-qGmq>?p6I%2>v>3?*F#qbI0}#ZVEX!)=4`2J(@m&)MQE??rd-JBeMLfi zpTh?-iK9j9d{Kh6kcM*pa$T`owbqMA+>>D0SB1QnAr@R8KJy%X*Z6_eoR0|}pEgq! z5~h|we;vh_V%VrvANElpYnPEmOT1PjEyF?k#sj#G9^q1Z_!+(p`^oHhELqB375ZV4 zk>W0Td!0^_z2;L=iv#=PKMVkkQcq>7)!+6YIJDVSbKg<^VKBvi7%T@7t@r;0gMI&p z!M;a|0WZPUekM|6UL3f8M#fTQI;?tZI$RN05V*JeZ9FcU8m7b+n@8e@jmH6NSDHiY zEik1dBZ`TYm>pTm7woxuGc?6!$GERI?7yq$lLvHx6IwX}4L4;$ll)jdmjW6gt1`PH zSWL)4@SKk`u`g-A%BczU{11NuYGXf>hfeXyxJrm-nSi*;xDGH2bTWsXf-96 z9FpOM^b;-#4QCc|GnoyE~SUzXnOVcBBkKR$WQ1p|r6u}$GUxM_vyUE!5 zCw07NFL81oR=osy__m$eGKpPT(yYEj8KWC?YN`Nci9y`h>#sXpKRF_dG>K~r_qt%W zS=)|feV@^uDOb8bDA6hE*hD?=jb;evi%CTbxUQ8j4OsxPE-?eO3tE1CF;xR!kBCvG zL(gWEsqS+xHgn^Z7(QJLE`V1{CK-o`Go$(a5$M&Y5Loe%4&X8;m2Qc)%4x&rW@wr! z!*(|J12J8_oid?8FT=Lq#);j!UQOi{dMrK!pi|hKKi$5kjxH*0#1=skV#4eELo5v{^?}f{P=6a^KJO?=((q z4JElH(o)Z?z{$UqJ3YD678`w)&p6DV5C?|b4`fv6ClccL!}R^h8QR$#zsc_*$c{%VZB!m$a{kdwx@AlVKsTGFVGUbjro&r_b4D>`75dh~gHiBcXoG(H&j;DGo?4$GHpJ4sP~=Xq zKfowOS08DN19Vrt2x8xaCGr@S`;%MSi@kTj_?uy0KI{xnhjN;$+-%KF)#=){K$hEi zYq8D{PxrqHS0G@W9M9ve&s>Be?)NyM4&?8i-<{alHN&pYG0QFSj1(r20GYXdip#K{ zWz0j6*6WrHydf_#e5)?k=9L8qSVm#1ypjFvbvpG?ih>rMmno3x?7*z2_z38WmtFJU zdH#%LE}N1Zao;~4`g`vLO8YghBEMBc%W@fEnoJ3ZA#~a^s|#VRB?h$Q4tBmV>4dWI z66#(3zx50=?7tjuy<_dMlaD{}*MOla*xKnZE9rd9ApMQ@{3Cq?z&MMU1O1%e218OS zVCpb=^R-=D1QAnkCPO^3 zfs_aI{E?Hl3c&C<({xheY&kKyIBA4GKKGMWkaUHx^EASDr&aLesaoiwp?pd#>wS2C z&hU3W(`eqsCB0$?$Tt$6WIFh96yj^qxMD;;=tQ@6jn7e%;pJWu4}wjW9UoP4fHdpS zXhYNh#dQwvQolDGwWNp|gF0{b){SP)1CrMw-}>L(*E}h*da~DJvr}Sgxsu`2@}zM5 za+m_q^!S@zAnUH| zUz6tPH(ZShGaPMWlqCCly?hT8PX(u_t)m5o$-+wZh?_uBOd~drJkvyO)ZIh#O!}`V z;A;*1>1mwH2BL}v@K$N$Ws2~mF6;RjDa+yHZWLcZpCV=+8I0WZwFBnDLO`&-FE55) zoup|C^7VJHRgvSYZQD7afB8FuRLd214ij0zPYAH9S%3@KHi;UYECh_{_IFL+wqd$` z9$j^toOukUOpo?^w!Iuz!{z!R3y^^}K>zOoBKo+m22rri;{&|9|Mrv~1V24G(P%#$q z`o~BCnsoGcLydo}hVwTf?7=tg=8+J(CqRb)UQ5sUSU!H;*^f#X{7qi$GBKO^L@rHC zhaRQsm>gu!BRO+_KGiVe!jzGy{m>kZ?6GHx296|@{*VkX(KIn|PUXLZ+xht@#b{^h zzLs!_IXmKY+VhEZiqN2UIIbXCH4jFVifC9s;z8@<)QO+$!oJ4_Z)|pV_25LV=)Csc18*RA1r1!M^VwVl6|Nixl zN0~K00;z(s__>ue>OsYDy}sL4XwOYKZb8=Bhc`B}#_-={-#v~G`@f@Q{;_{oKgNhd zz>}PszKfImN9agNIL-h;SiM8QA#V*eN~jCSa0ln_HB)WZ+R8bHoOZDbOfGIJ#!jZP z){->7qIi{fI6O+!x8CokJz^WR;y$pGaW(bp*Qak8Z~NS5n$KSEZ^l~cU<4PBQ~j}$ zWb#rbUd*UH8i}Tk5xc|LEL-2{8sj91e#+E?UG_(Ndr8J6r?3Nx;w^fD#g>Oal1sJx zLGswq+C3Ik4SL6@$;U9t?{huXW;Ee=`$Qi;xRzn^uKBNF=aE72M7}d`= z-H^bO7PxB_PP6AX&&JAaY$LO8ZBd3!r5fFXcna=zL7~ID(rhFpsVN4ai`oM4`=ttv z>)0QW_3vnLnh@;|wYw2s0zM_(FJg~L+1PilE3m)$vY8j_35j9J=_4F9d^gKuHm?ms zu|MsOhsXhlNUWv;rBb}UtNB-@aC`6gROI$WPU)H5N_3J;G9)ACqD_F$mL}#i&L~)U z8>^`Ovl|d{9e-YUF?eTLyHLAr8W4Z4FDqLxxOv=g|2vW0_&j_oL|hcYK?5&)U9lI+ zDA~jBB7e}m?d1mfTT<6_ypsT&@WFE58)|>uSFx6=@O|^F-PUjMPxEp6?v445@l=t_ zl_JYA$XeiBCcFCP>DiMU&=iKxkB~#KWS(r+X3Sq7P7RwjI=4t}4*Xs!V}esJTNK`% ztg1 zaTyIW{5P|2o|+pL8@C1=922M+pPkzKgAc)f0#0;Ar;X$g_3d<_^JsPj1pjbG zj>O1wh>9HRlaP)5(8EVm7Kc7d8JG;K{p@uE$V_N*6lrEYm;@xExoX)}xNN<2T#)zy zkH}iG6ySJqGc!2>`Dhl`nDOQ)Ro3ECUZcL=(1|98OU%ApkBLr%fs&F-eymNUv)GvT zLovKIUpye9o`pEsuDMx~ik&zlF1^qnn%r<}x}ey%7rB2SElAmoWDh%Q!w^l#uMe8NdYF`ZA=hk<&%R6=s=wly=vA?*`_`pl(TLMF+$tV2gX6sh$ux z=fcMM<9lj8Rm= z`U+mQu_n{AlgesYO;?e37b=17FbbZ_o|6v43BBI<39c!)Cpqh)zDQzgS}KQV!905} zESMeYq8^Y$+0U3%ETAsm4lrax#ufd!Ug>}%VpxL4B{>eL%oz7HZ2YtWD+sfp&$s=R% zI{gr@6;XjTep2>gftv7!rnmCfZoS(WSog0*F7YES_#gtL_1Ps%BR)3iJM{m)&Sc1I8#B+M6iQ!IT*}Ur79(yfuR_wZFlP}$M!?p zxk1&v&z$W;hVZXKN$-kiEsFy!Nc1tZAse0duQN^JG>CN=2+Q{@Zm>m}<7#5@!3 zy;xgNrb8%7OQgtjJ6~IDiEw+ywggQufBBDqYej`6_*)_0-gjQc_9;=D_iI0ce?Mij zIG!JPIHw_>5GpABdIgC(R#PjvOII(ZBt9@OS{qBtAf+cT1OTM^HOkyNY5bb@tkXIXW%xivVm1d}Si?lBhRD9|Xeuh)uev>7lRn?O0ah+TX?0FH&Z6OYhxVUl zzrd`#1MsG>E^js1&zGRiN0%!2Wll@+~?WdI5juQfZ%u1v|jm*T>L@>M+M0FKjM-SXG@704Vc?P z1kC^|W6?qbF+cx*jE6%H&3radLjmdTmO-`>7Q~6jTA^mhwaCIK?#RWc(rW&nc&lVk za2;r9oeyEoVx=s^g^lph&5@Ql0LsPZR0h5cH2!=o$)~XXeh1=gT+ZN;J{ID5ybqC#V*OPM(XJ(N zJ)Q&gG_2jAbq9vOJ_bU&PxeznrEsu%ge)03(URh1=>&=hmIE~zS`@o24j~8Rtbfk{Y&?w_<(+VxP z2{Wm$R`aL=^thy%d|ygMSPYjP3R@LvEwn`5HI?1Kd5|rQJRtbSc+S|ua(jyZ^RMK3 zb`8kdoKAL3;p@U}pVa?&47C4w4C*zJN&e$7z-Bs=d&4!)a@NGgD=XRV*fCqAIdt#P zF^>J!LjCp?g@ecfkUf4{O$8uYdzhf6wk*~F7|;6hxbrwr(tiUzUOl|yxQ$O|i;%Kh zPVV|n0sj|ee-#vG6SfPZI3WZGu7d=3cTaG44Fd#shru0!ySux?;7-us&fpT<-GAQo z*RERYTl-+|>Z3WGr@H&PFYAy!hv9}#6PrnEQ+~}wHJ4&B^VZ|3J$2ccBt4PV-)MHA z5J?VEN!7?EDbffLD}(+Hmkm|bGj*ujwe9;rW`r#TJFib}&8iTN{`vp&qY|0nn6g}~ z21s>H#fki6^b?b>*XjcX%wXH9{ZbjCHU6BvB4{|?Id;PQey7UCc~KZ8FSo4O?UVGQ zp|FFq&I$c@=hadY7azy(%-&L1Ing2x16{xu@y70=6ei}S6N&_IT#h_c1JB|Zy`DR* zIj!L4^fXxtx%(JI{}4cMC{`obb|x*@k~F2d5qL9V6!Y;K2MIh7S{}aY5gluPs}@^T z-tlC^1Og-#@yU)0`-f_DGK9>_Z4ndn8j z26IxO*|D4ry^?@hAdpf0sEG+6Kzaw0e52aGes=Q@fql z70}Gq2prNJr!QJEI!hIrNtz5C!YMz-Ov|Z_D8LPWv=}oRvN5jR zVDh;3^yS@#1sP-8q#JuB>IaO$^c|VYSmkS+6iAb)?&?KNWD{+=`slX16tQ!~0vZhY zGWbj}T%9MskFnPOBG2`WRmQ!VIg_|5DyRU|qF5I*?Z&a8(*VKtT~^x_-s-ECZk*^v z625A!%}=6T4@q_&8M$JQaQVhDZd9#$`CO}l4Hlt6t>otpCS)ubo)#OOVC{;mS?tVQ zAq;5usEiJ zF^u_SE`}?G2{wCljCnSAI%q%pVSo>PqKzhYF9W9p?b3Mg5JN(U0CgB@w$=Q_x=$$% zm*M%`d*iw4-QQ*5egHui{434saDhTPAgE`vhxqu!ya8s)+|ld90)}uQRkczrw8^?( z0@n_pAxDH>O1>Gk@A^V&{qc5YOGO2;Uk4)6Z;j{OXT_#2FJ_By$fTXm9D?ip^1(n0 zUE`{+l20Q83Vk!a^mP zn$BL=v{I%btA1A5vX=ZGV`IgFu(9O$G5Fsds)7FWXAwd7iYJOtC_5f{uJ5w|K|BB1 zglOC54WIo`n|WSl{?zb^>-gj~Lh{8#!d^f1k}uF3`s9i|NI+4Gm{)M2w$nR5rH$n-BybN&w!yX`c|XWE3tGc6nb|n z!f*2`6h__BZ5P1okxwo5V+n(}CJRyfrFwE0>VM+MnH7m1F z4!y6_nC=As;^#!=D7iTAE%LY zGVG=XcZrZ#3(3TfPW3($FeSI8V&Y&eC!SWD;#>*arv9dGlsz&)lyVOl@FN|LOB|r` z=KE%~k;;g9a(VnUg9AxLHOu=nVa?J9?xb9qV!Eq`D+_x!bj25Otu5q_iA>{GyYhZQ z7i=S;H?vr*Zi23*K6mtZGOu*#M{69$c+W5cf z-!SXi@2|_h3jANBJ*k$+|0|0kTD={=F4SM0F08lX^~57s=62Ifh3(hEqJsAh54~W` z4M8l(iX6ib)hW2rgztmD2$)B~3$FBgRDQhEL4m3Tfm~s`gL3Yxe5yO(t9V5s=VXGG ztpfDl=$mmVV1en&%EacPO)v#fwWo1OmfJ#vzi*Fd-vHsoU?NKDhVA|t>+?<2;!4=~`swKuH7W;Pk+Zbmc9;+SL|`bHMAki}o=Ydk#BePDd7w;BwzpBH*UJLzEbG_ZOo*iW5ilz`( zw5y3yjMm0hwCmlhcE7gn&Hn?o=vM`egO?s2coKxT_&^!y>5k_w2!3vW+M-Z%Xbl-0 zF%^n3ES;fQczRqJ*T?pagC5)E?f%+`(Lg@lZ&OW%W2n!UinfD+I3w_g+CB1T$}k;B zOR@^$_LWHZI5zZe1SaKw!2->)s}(`JVpf$SOgfD-#P|@4A4xD8lW&zAJXh z3JSkXz%C@`Wm7ZV+{*ybgyP66ZZ2*G|Fd6;Z$8z;GPQ=@F7*Wak-M{e68fjd!%Be5 z%wad%8?3_z$erBcDnvZY!{fSZMUu+l%b`okXDqa}6#I@D+nhE{fPeHO%Q4$h`l&FE zZaW@6&3Jp6f#AUd40D_M$!}@S>_e2QkmPV_Eu5^Lq0s&JP+>ln0Qpa_70!F^aPg_cP#zmr0UVRu=U^PvzOEI)NpU}^lk;{M=F>2%Lw zi9Zisc!^k9=sr(*_~TsYHSt)?Plp0PKU#TX;DQ8F+}}pZ0`y!UHel?>I3bZ66Ul4~ z+T9;i3ZOU}pi_ok+m$il61%n4f?cDL0&ib5zeAGCgudwxETMIAxFmb#!*of=$@5~3 z%R!6RqaoO23|NL}%?EC7`?&-ALYa%dh>GrZsCQvd|SyhFv zMdfWG#h>0PPMRU|uOl0ti;V*=)_7((T0ua}2{mSyx6RAh2nCA z#_h_O{wo*%WuGis+tu~4!W6I;C6U4rTk<3DoSq6h9P_#f>x`2Wo7Ou<@XTqVg9jk3 z(Xt?vT(7wrP|vDzH;dY&1zzm#g?77CRPj8ijd9~LeEjS8i787Z!uN{FYlCok#P@ab5*WB+tQ)Bi^ocCW~l(^)psmn97 z;61}hxFpwY9wu*=A=Rz58QhP^eIARwt%dg5lt${h)7q*YpDg5mvmqDOeg`mZPc!|R zq4_uWW6ngJM)w#7?Ofx=S&{!*{)tlf)i!j@!UrK&wqMk0QqLJiw4DyauN^p2*Ydje zKk{I3gz7)ohW-th2v|6(RLh?B<6qSn8p@pe?fh!hRfCFt#QdmKLM*wd`%ILRcOBm3 zX_Md@yr0nr@tj0-l!hW2avII?Yw6Qm(v-|wk?r9xkYBpE)iBJhe(!%UD#T! zOc0cirO^ID!h`?+hhwa(#(TpY+5V3vY2eAc2$6XE+j>^A_|ty#8b`kK#k0mzCfjvT*^(){%^>e)&i z@)VL5EW(0c4x>4xo6EQ)Zac}EwZKrbO>=fOaR++^2=p;FseW8uS*^ePvwWPKVldd#zdW&Cp>oFto1=07jE;Z)b4u1civi^WR zm{R4RaCsfo%X-b8k|n24h;~g9qk9$=3blFuLJ+}0(*i8hG~~Emzk2ZHWu$%h%dmJ5LGeQF^j3Xy03%+{_x)=1Z`n@x z=jRbC_=sibtJPOTXRKciGN)lbEzF(?jP+|4h|gV$r4|iUdQG&T&GpJo3L{TiQ-zD4 zNdlzm6%~#8P_2x&HTax=p+x5PWJ?0vE9Iv~)CP4ut*w8@;XC7z=_XnfS0?E5^y&JC zneH@ZSsckY88x`k5v#VxK2KZUQM6V!k3i;Gn^XWyEX?L8-fT@-Vi6VtD@5Nj# z65Nz|byoAt=k=dE?j+|w@1?M3@Ki6%|6G%eA4ZSK#z8W$_qZ_7FapWKE-6kC_U0TD0FiYU}ghp*z>n19ZEyB9i$Wxp-kNw}Z_X^XD?E z8K#WtC&_*CGuQIu`O2r-^5r^u&DIGQXvKXxf;n~Tw7)Sc5zGP$)UIknqFfo^Tg9^; zOac~-O6_`D!`6|^z1RmH>t#bZ7+%}QpA(ewAF?Nr=RsbmUvdzoFae`oik>sy6{-k> zfk##N0w*aiT4_3&F!Jy>wUnBAte*1DiXloSxc;BjB3E#JO?4yrg7zDHW4Y7}Rp-_2 z02Yp~cs<>$%f_A;D`yD#vOj)e>VQ8mSgim!gRQ5Hjd zPc$12vDL9e zp(G-Xl$5H{OJ}gC576_>Tc5uSkx0f|5#<ttB`FB$6#ft zjNk!aL5$5VoXY|IILla@Aanb_2t|J;0~wAi>(1Z>=jJJH%kY)fziifMDPvH=Q+bM; zL2g^aYC=+6P!CI3DMAJ%s5;42L^A1i41q2q%e$N!>e;gvN(DcM3v?b;O8}Q|w9#dO z)mn5sFB)bU)n(;hp#RQ06bba+X1)}$PxeciNEg^A3RlxkIGj6^vPF%??Z)=StG_YK zd`anu-OYd-RTtXdl}`McWPLvrY85yEyCh^U&b@jFZ|WaYQ1_}?ms|G=Gq!NS^FOy4 z;_wHKBq99~3VC^Aj3vbR**}Y3TF}j#D%f1(lxA8e1jPm;AMiac)R99@c%}lLOF9JJ zzZ=1hf%a#W-(KWs|iKX&vPJ8=f8$YUf)#DH7E%J*vR3V8IxE z1A??vFtg7)I;7sa*;CDpeXF3cof7tXkUR5R$RPmDi|4Pkg>5sxZ{0c{AHOvLUe@F` zAWFftjTR{=T3gt@H4F7OPS0{5m&mYWW%^Vn0-^%y|EOQ^6#P~+5cvx||Bme<0!`zsDg zNBe@(hHIwi*&O$Ubd?6GQ;v^cFS!s&annNJXNKRlYN6zM^_pBFZIt~Xwd_9rUsJ$K z=XM7+*H2(H@Y)|1a__arXlwRC(ac9p2qhS{H9uMNVku?Q$BT4YY8!m5G4b;vl16pE zvC=irmQu?5L)dgqL;%Ip{0T>fZ@czdh|5!I2CYGpZ>czvmhmNd;f*lC1qKZ|q^H6()EQfS{)mnh7>&k?F(-JVy zT0xhcxYrqUvXo|I+_O;14*&R@#Z|bMi91G{9+G{4u9Z@2&2yHL8VfuhTcHpO8K3)EyvM?-h{bW1(HqFYb4KHGVk$Il1Q(Jol zjW8dQw(rX0E?pUE!c^n8Le(9v@ityNX?IjuXck}A+KM4|v!DzV`;@)n;S&r| z?NZ;_^#$?|!R1`#r`$|Jn1m+{9k3^@k}5v?>0FT##C;EHWXHm+R!u8gnBVfh6Ld}9 z4I|(YjQCvMSDH(g`9x#pw}Zu3bAC0Ko#CuPj;Mx{lo9}$!P4wioz|2TQ-l3IdJKQ* z;1`=+jYSSkVOXdem;X`o^K;s}cjM6P{T6=|D>w@5Sj{fpWEOy$XWBJ8JIBr`Y-y+V zEh4zw`{~`McS302xqatM>fhK{QHmztiMjvA#Zkm*GgH^^bBB2}CMut%`e`ZVkJpH& z9p<5<)MiV=@G1I!5p0Pd;)Qw@s?#<@%8fEUDL@nyW8c_Qjc=0q(gc9{k86tWnWx^m zANM3Tf3VX3+`hdqlP3Rpd&lUumdFhKKX}=~gcLQJd|8{?dydtG4BBq|4i%R(3q2i? zWM@;Wj=ayP$RJHEGUa0ODEoo~a7NcW_59A0Kt^?npL^)q9yz)S zjv2zxsbCAY#VAekful&Ul`--fQlOYaX{L0x=;ago=+YMRBV7CRADCw>{5tDuW&Ay! z?j{s{QWCg8e@dn&D0>Qb)|osWO89MPB*KIV)>u7=~$FD~Vy2hcUDkxH=?G0|g2x@-hO zyySblw2D5togLuY)RLcfY5UPHPyp|JD`xP>2+|B}Z69zZL=mPg`hep&f7bFC5Tiq6 zbDT{nn0%IL25Y2!tOL#rqo{CwVU=8YsP^HD2H+=j4TRC9{x&u zopS4-G9Fh@nB^TBPLNok?@BpB1(zW<#w5&&7uL=smoP==r}&PYx-FL{NiJ`VB|8EO zh-o$x)!%`+Hfq!AS$P4RZ3zkZe)%dG5sfn)y z8qcrWSF9Z*BLJ<1(?FkzOK{>ek&Uy4RrHtnNpk(ylxOSEn8nFnqi4%dOzJp7Z(Sh-N=*k$*yyRjfG1-M~ZBQT6UJ~=+l4(q^Tx1KQmOJVm>{kQ~gLkr* z{ZsY@3gsu0O*ecC50W38nEJ1~?DuOIs-J(`H4lVe|5Tjr?r1^;s9tBA;_+xUE!;N} zyj_p~Rk>qQUI+ZaGAIN5Y;GcCzU`HZCm?Mt`|X646>D(ug*^OtZRoz2wg#X z410P`n&1P-tQ|7cRlDmw<9){K+CNz{MA*5OuFB~-`$!EXbBQ-; z<_@G#g8Cf)jTQ=tZM+coc_G!@=`r}cKBarhQE}4$p=Bq+ZV&;z)?RRbc_z;Yy)s%R z;yi)Web57hHssP^1^pG;LiDq6`4|>-thg1k+vh;R$0ls%S3Ikc7$q(mimv3LocMgN zt)E`oKr3@moKUu70Gp55S+3(?s1J=6(H@`A=pTkG#wcen2me8GNE41rG>C7{+cfzW|Zm_h18tfE9|LioGP0;@*xu zHBKl3m|we8%WqW~iHVxuP4wVIjQa zlvE|f=4Mkhh@;Y7 zpYczbe!fS>;o1Dym72oG_iB(zS5|^FCGgs7pzFpf+T$=xlj;-GKUI`)p~r&xLcNTM zJ)2`Npy!amMrlsSY9L8RZ2q>?rZ_Ohrs-@ZZSkOp=T^%mQ^G|0yBv9knlvYRy+BJ5 z?fDwdi*W7L!Jg|t{3DnAP$Sib!b}&St`MT&IX%75OOw9G{NCUV*8K<;SeJig`M)%Z z&6NMl+01DaL_DdCwR?3#L>-4F&7GJdUtggf;n1u^#N3K;D0Zm_$1K9PZaiq2wxo58 z*+v9_73%KXqnIiearWu8%t(!vmKH{-PK3-2a-Vs5VJ>VCzMEWF%1aV-APJa4dv$C$ z0*LFb-VkD)ri$x#I#_mwg3oMBp2sOf;-OIPKHO9W!d-i;x}K*)L{CUWvYzcqG|Pi} zo+tZgLVnHGp@K+-K9k$H+E*<*rxToROJg1>+8a}~<9~IHsk6W<4Ba2!(#C2LZAsB? zBWTfdHCkZXKI2#Y@F@hGcbZX0v?)hiCv%;|y>_`1*Ux&eH8*$F8 z8*uBt=bHGP(eBFQ+<}+#C%7OBh(GD)tZCOfQaV6M@`=be3C;X$NZy_ZslS~+*Gz}I zG&Z*mS#*h%KojiLfkx|2+M-@fPifm9nt^t9ubb4)`>^$^SLb%|$c>)$5~V*T@lh-^ z-KX>Y_n+pgVXe6+>qyVFgwLM86UVt#qDp8wZsY5&kUzhkgr~eE!D60{R4AH=L#_03 z`#0JioA3Q|W>Z3iYj$B4KEDX&fOUj3OYGvqzjc3lxEW z{|a3B^5}viK3+nANQ6b6b*z(%`)z@ipj~Cu+T=Fyr|-Ve zK4&2y05oLryDo(=X4u@;(Vv_&C^Qq=c8uA!?zxWT_OQzcT3JiYwnm=BP$?8gsj{aT zs`>w`FTDUT;uDYD6&^-fD*W-E@LFWYkY*D&%9O2Ve0{*$=N$6ap?8t96pO$a$IU%u z*r=)cOG~!QCSP67OHZ+wAnq!4%30Ob1a8*Jp3nn$n(rud`{)aOU|d}KV3z+c7}w@M zj0<7fIuNz=myfBs)RgM%biLoK8;A0hX5yiM(~y-BU7u1I_j6AtRa_tN(5>2%{#lKE zu_`f15C$bpe(*;_7A9ttBcZD(?WIb9J=pY&BOeW9%gx$L4Ai@t4IfI+&wB-#(JG{H zK=aLCuE!@rk@)ELuAi)sKx<6nATTLlo1M(5#_ThRI76jF(Zy}mEgL{W>3h!YbZtEP z2BEVhiCqJ-2+wQJ8Wb9$cD%p10Ik>3G=N=%Hs$B6^wH;H#H6NfRfP2=U^yN78L|Xb zKNcvnIHU`i;WR0sy~<{S)un~QVUwY?(DHG!E$j^IN0c`S64s-lmL}GIbj*?41aOxNKxBGgeJ;N+E!oDa zh!^~vEJvb_0b(>GGlLTJF7EChU(p7JqTcTiY+=>@{0$fkC}kxrkZ={mi=4wPiN=~ zJZqYlnl$%1aBF`$=6h$moidW9mAbFfD;S__{hzfEdf3NZ?0kmO!jpu*@IE)b6$;d3 z1QjT6KT>>0efIu0A)yBoG@#lks?*qjTlRvyp=pw#H9Fz7pCVWc`|9wDD>N(>ts}C>nw4x zn7(farB{46XrD+)rBd3<`DMpGzg`Tz2(z4?$_c-k?Ao>4Mq&8RhWUQc{Bt^Vw?x87 zap?}Tj(gDSmRZfJS-bZa+IYpm0IU%qLI$`m|08eo2?rGNU*af)weMfAoj-~du}hB@ zA{*1u{7p~~HBhX{obmTQDf%#j{X#f*@$sK;oGOw}J87}XI!){z7JvH7N0FTg&PVW` zx}~WCtxmrm6MW_2WewUlUT3TKm)es7s4Hj98$5#1#|{#{`OX z;|Vy!Z<&?^fk>PlYYJNOLTS;@kT3#H<*d2?Wt%|mHVr?M zdJY{&gBuhCM?ChMvkkJlF>cDevGc&k=sX7}SBG+jqKJMiG264Hu?|Z|Q>VpFEB`u4Ho6XGrp+6cOWo*Iqff zDx+gAX-Gj^G4fTk9D`tw_EV9C&+N5-Y;^{TX8{=E6xjCSUp0Ebxh9PVbTLb_Tf=Hd zsq)o*2gFTRiP+Otc9w^5cVd&9{o8_d10vdVawrMOa7v+U8|ypwSJAjVb2v``2|8WJ?-7mTV76WAp`8AspH{QOaIHHPh4t>8%v5|Y@;KXyr3Cb{~q z;fyi<6x4j$UO6%)_tN8JuWiz;7#SdcRx)c^qq;qsRau%Lb?tePw+TRK5y5FBsg)+<)G9U$G zXD52hMlPeK5brE=dyl4t3@n<_Q=EwQ$(7F6vuq?;b7rSSlCAAQhQRD+gEBqERoyQN zCBVPIEx%nm`93DQUD_&iR1~#J=T_SYoqyWF(qy`EFxsxGmh`67Wi^m07t6mByU#bN zNs>BrfhV7`<`PtSA=BK(1(A`%Gx(66d&{)8Zo^quKKOlh0t=}ppjnTJt_$=BZ&xjb0xQ?`ye|%YgQ{G+yg zrvKkB{rV3{q}w>&e$xQlqlzY+KmFcA)#Iv&maBC|&PSR{l}l5L@*S4txQIhzoONqQ zVAHkl9?<8m^#>hV5SkL&bUhF7ofcenNCqrU=qVB9xBN2#vTCyLq)MZMhU?9R3Hiyg zA9qY#K`aUvbKNR!^?jj`gY(x8p&i(0vVFB*mg~F4$E>o3%liqM+2W^FK3E>YwKU(X zXH=0wVp8Kd4`Cdqa7{zKiGQ4Sq2%!1Z~R| z{&cKq5;ome3iZ>;DLhWKQJ?h5Dl}JX7-PnMiyOK;jX7zwveh19=Ykw14UvV`|Ec#2 zfJy&z70-P6i&+V@P8)F=FyC!c(tU2Db=$)r{G1{?jC&>FjQuG>rqT0#+{6>EX~b$` zq#CvPj|9NEg2<3QbcK$D4vUV?n%);+V+HWnMyqh_nXJS0ie#bl4QOltFjTsv<$?u@ z5;SVXWK%qW$jO#l3#gUY6|lwtufhE1Q7a_}SrUyz^{I6JSf(79sdMc_YxpcyB&g=m z6i)(2y{s^3D+2Xg&IAF8E1E*tz0{i#IRbD^6x-hMH2*nxTv5ox_X<(UWz$6)Cx0E&+tg@NH_5*})R++$=ts;NO z^o@aCgUxaCilFAk_@!uuufT01x6P@b`BF)`pKZ@+c0*-mP9r`C2w7?r%Zwj$Gzp>o zJj@5uXoMx5N0q|>mfBD6XrSd4Dl-o2pqmaWe$t^tYlqtZ+wT0}lBm{^NRlqf7{bMQng4_B{|azJ?38?R2y;T>(%eNy z@msszWptghsIjlbm*u#^AQ)kNVE?Sar;o|7^C=hNPqZsz&*pzuEFiY$t^4O#-Cji& zj>r32o8*E;zY!z99GYNHo5>(W;d2OV@ZKfk@Nip@sY_FzSh5&oE^~`@YRI;)p{;JD zb1o6*@SBxNI~)crQ**%n{uf6&&NC%~byJiAG`<#MqBJm?*LM4YLJPF9c-J*IZs4%# zhu^G#2x{=c-s)?|u2!bQY`Cr;d&K<;02I64WKqRvc~;vbC+0p8X12UM;_kYp>ZXHK zb&B>fkpsN}KYf`>uduun0tpKxTce?SSyWui7mYTh?eC;t^0ssg7&$9mGj;UoyouLb zaqlcya_D~cXdd@}c(~l?#?kfA^xIIFEEnh7S;3Ku^5a6pMun2)YLJ5Hga(zto4hs( zkwa8bzZUkcC-R2o1dKAkR9ZXNc%-Cs@;Q9AwvzHx)4Bu=I8S0vspI#{qBoOQfn&P$lZt*>jjp0SmhOhVcp4+1(Rg+?8(E#ea0%}1VN4I zb>jLky+tjD0Oc=9I1wc{It*(@$jLduL~<#dTy8UA{!s@8kft-!N)1E<)o`eV$6tWu zf|ie=8cjDT?34q456e*)ze8lty9S+F^}wE+i!;9F0ABUVw(Z#8+XMWSMr%_E+#^n% z)8l4{*^?Gcd=!4v%u)CHoX%GlMntTel(1UhR8K%yNE;AWE+ok71Q|_LtU?g|eG(kSHAD(}Bd|9MzsIwOFmD z92U-d&m(CWM3XwH6}8^cz5}yxGKg}0QG)-w(Xc~ae?YZF?3pVI%l&O-<us8Y zfmd8-Ns$>*ypYJ>soik0{JIG;w(ZwWt~Zb8@GMPNmBVo?BXhG`VtuCm8Q{!=@oGxW zd~-rjK~zWOvICl$0ny}<$QO~Kz4%k~zLJ^o&d){oy_1l}h$M@vh%QBh_zJ zcYAg|2r&i?h+u0|n32c36yJyYJN-r9<#qcTSy1so*-ZfRxd~&}ctbqVDJxb+3C=}U z9EHS4*TekzHh_83ydK8F|I<+ukEnPjf)wCkAFG~EnZ8DZ@AUaA- zs=9Y3?CTX_Tdz2FFLe7kTQ4kprZR#8rNA1!*11QXk76D}eiTnI2Ik|nkxW_O!S}tU z1tOfUxMH`YCqwQN9&*{UJ}c2e_XPqKJ$2Q%=g|@KW;`d@%MR?Pn!3&VVjWQWQ*TUkZ~qZ!oy-Oe@Y0&c=%pR`q`u}tR*bv32?b(qpN^uK|uFNSf%6MsR zTt#?;>c3KGZBBuEDEG6ZLo<)?3e*k^Y)!gCR*jS2W!tRn?PFxkr3Xvm-W75FT)S!^ zUc<;?mnzP&AF0|}_pMwE;FLy}ImPh#+(^fhTNI$C=&2ZcStlw>z*CLO>*B`iT;tra zu;qs$nc!;%?n|md%=kh2UYoB5vL4X`-#@p%zHz?1 zqky0@FKiKD*sY*}B_Myf>v#=lANhRRvanbM^V7PIV%kRdDm}0fXexc$asfFD_$@?2 z;$(PF7D~v(YH&x38J`wNH$+8svLLxA$Qa-r@h3B37R_0xbVY_pY!rt3@a$cCkK0(8+U1IV?)6Ny|{uSmZ{y{`&^zkBqS6NHsa8|GkU&N7#D?sv;cN;vaKlJQWnf z%7+Y&5i+4q$r(gO_gIkce`9fPJ7`DFSkjw1h=%iXoMHlnj#QW81bFa9lG|_W6nGWHTmC z|i?$zQq2zxWz;7XRM?lO$_cyQJes99mN(Fh4Cq0(cc z94`z?kn4gcEBaCROO~IXVe)$a0`C0utabutmx%2PD|J-BUWjA&7o#Y8*X6=ro*ac^l=mPJ!FJ0Evh2D(QD z`wJo-=H$avkPCI%TmsLECysrkvFa@Y0I|c|DM}L4TWrsoYby#+0x>M6 z<@M17_FG26xI-Fo%K8@1-uEmX&O7*S!7slcjb*}^rRdDsgp1|1k$%-FAR#qsW^hNUXht`hF zyg<5oRke*zhl@F`kDA_{%q08^s%EJVtV~%}i;FQ5rlqq*RA_ET5wI2I9J`$8m77V~ z`uIiYcz!-tlt?1isNavQ*WL%_Y4YI>gJCU_^qj$ z$6W4BCV6hnjwe_lD7$aH^KJ&A9QP3t)>vugZ1T@uv)*XuV+H>ax}lBmaA0-28$n8` z$_eNBvgBlqlztgyGtTC~m8^apg36(#f5%k_y~!QJ)r{U;TmpvlF?zo$G^+EZbM78_ z&VJO>BSYnoO`JWByAD3?Lp6CGZ0gtEOsNC zb?2CM({{WrN+%(|<0zHT@A>1bOuFxv`qt2dn&N&=a%*!GnHC3h@a?-@bleF3DT66( zS&HVfY>MuCbju})IV-`E@+p=xlM6r>IXap0T|hD}WAb?C##a7c9jE_$HEy=#f5A!t zf_sC=r??#Nz^$tQV;CJ%-)poSw@;Jx86 zbOy{Gm)}DX)K>26iQHbZ*(;nDW*>3BHFn1k+-N>x^(j-J_zeM8uf-{k-;Rm6!>3=8 z%&f$}(SrZs-J8dzaN0Nc@4LR5Mq6!n3^z#H$->REa&fX2^>?M^liasJn+BK$m;`%G z|P$XtVRac^f#lIr#;^ap}ygo47m6 zHuSviU2+{G;u&W|jxg*j{Ejjv2_EO5dJV3{UbB^-(KhHCNHiduyerT@X$O7?ruVWm zkbpdoHE~TH=DO7%!ts7nIX8ckxO|E4;OP^615%WIkbXM>rU%9ZW{uZYQg0es8?ei; z1xjY|YY^d~hSTLC*7H|I3!NaOSYflBBy^GH(Nx<=?VA#`Z*p##rlC2Gd8lF$>)EdY z^fnEptIFSBKy0Euu?sx6GAdNdmp<*jYw}!dbr3#?$La7NY8@vldiO7tHJ2T?g|e#@ zsm@C7+mkz!W%~t@R{Enddd>wK6=5U++KlHRi90r zC3Nv`#INR6SCwE<6RkZF6X5`arS#}rsjgr?YWu2^eu%fke=+2S#WA4(^mL9oe>OYp zY>RLHa%e#3)fQoH!-QbbQN2JHcAXzreCWfZ@BeFZzC1#0Y>3ZKX_(H%i{2ES(_gvuA-w$6LrWr+K@5|y*rPJ zB~J)9y5~EkoaU%OVQY@>1=)ia=nNO%!9BXn0y+P3ZD$*6PNgCfAG=g^i=&UFS2l1s zz>{QujnHaDWjzNJj&kL-z&#T9RgGI+w;H4BHh<7NU60aArtbbO6`)l%Jp_EDvSwX% zy=RaXbO22S$&5|LX5^=~1-^^BGTMarM7;$WVII?S#H)gvr+aV~U%zNt6`XfjNGKMW z>(BQplAFP=?D1t~rweFjb*RdjwwkqBT1`E!FH(M>9Pr$Dj&DKp-Zbv#_x^+Sy~A_q z6}1(y6x3Na1b)xn>-C51H(>?MWBxR|fCELt#v00}iHpJeLwh2PmlvA3S@Oykj6Zn~ zjh4CnLRNtVUiXT|HY49M(I0$f-?5N!8Nn&;hY7*Pdi=r z{Q7k|%6IAmxS2-bd)D@irIw=nKB2#|d?KUyiEPp08 zo{|7=mi$IjLf|<*(TP+EBQ5OVN`P_%I!yR;i^?eS(NBjq!x?9wg2Ah?uqT=_2#qQ%Z|#cCt9iZ zDaL6?M?O&|vJ4vp^zdxL2jYHZz{AY!@k8R8<{Ct>4z!Vr6@l@O3&Nl=v*2J)pQeyk z$Z{$#ubz8-f19XE_ZMG(e`q+;LwR18Ta>+Dv$qAEzf81tADalK<oXNOza1lkm@(fi_? z(`HH_JZ07ON*}0wT3r1M3U~8lFq@s~+T8((wxbdqm4k18>AIfny>+l*3G}r};)&MC z=t>6U6dBVis{55)O=#%k=i?L3sCjjO=S>d~=V2@_zl<3YeO0PtLmEhKuKT7z6s~cP z)gUmHsjB1m#f3ryg8N+P{rA_`uQi?+Xg4!GuLyw(!|}p3JNCJqBp&=4eDMxT?Qx;K z;x?R#R6*+QUNX(Nnk)^43?D78x!eaX=mvbVISp<9Br5U*N1yA{u_18&$YR8y!f)n_ zhMtx~wz^;JT?Uag$w>(E*nX(#h|Ho108ypchgDnSJ9rD^E4-Qb-q%c+Z>MS!6Q;Uf zXcCjGMsvcG3jq~OSWRu&2NGhQxmL6fe)I2hT_&H@GGZu?XbQIZ>RJu`J;n<2zV%{q zK%Wbjrpm{VG|i~RjGKnWx#u=dL+Q~_o^%a?A#vH2f?$jGwITgRaf}C(lpezBwmy{K zE>n*4wFbLb64wk^q*V&X{l8)eO7Pk_gwzbNLX^gE7Fe>QjFbfV;N}%aLbRCk!^Npf z@b)Jzt{ItSx<`kF7j{SBi!-pLrDYBr3|BTSn1)1Zug6gmX8qMA zV6wU0!ZNapbjv-+#9LsIiNJ`21x8Sw$zPnZ3#2vpls2=ucNRn}3`G_stbH?hUZ!+e z6n-lnYiEnJOs$zxvM`>{_`$h*P6;^9+Bv6MXDFv3c~|s04*q}mddsdj!){Brs&Eey zBxsP}65KUd2o{2SaCf&NxVt+9cPrc-g1fs*;Vy@FpWS=d&-39`|+6l93ITsyVr-PRlj;n=FN}9u3UNQxHP+GginyzG`{>8_ zYa+Mr99P@CfH8gY+Pdp{ht>KP)|%iSa3uTZGei*7`Kj%;#N?rc)Ub*@9YBnc!a2ve z76-_Z5()Slw2bT@Gf;^0ZW)8}qUiN2irmibk#2H59t+b)rqwcYH9Rk|p&k88N%Kn{ zn?tx#Oyei~KGT3v9{z$|Ad{^ySreyQ%$Ln`+0XOl;6F#r=B!lNg2n@x?lU9#RUq*j zW<_nAthZ;?7(tySfa4as{4hmw8G}|U{)f%zD~NoA{8UA1n0xyhug>T&6BJ)m{C zg&*GaPbJ&hD3~<&Hk(D900a<6wrefJR;Q==nLQgg7iRh3mSk7vy!R2xm(e_5y_9x_ z9F>oTm{y_S%YBNL5+9@yQd4X-h@vKnhlU8}Wbg;)MM0Hy*xwl|-czGtEM)OG#>DAt zhH`M3){G@^@F0f8R33f3yBB^*cQC!7HX3h8;bGuPjg0r9IG345yHe`%4=m!i02qv= zFVHxpj6sp?V5M3R8TVHF`n8nq*LMB*RxzNvzOK*2**Zw6ffz=zl!KFFOF0F;6Ju@e z1*XUOl*`-jl-~Sg-shNgN{I&uIrP8@*#=nN>qLxgC{~++L|{-^ef{V_Iy?014IhEmp&Z0wB)pA6)_jtP*Xr>MP$JR) zF&vG5IVB`)8A1Bi@_%ZMFr{x(tT3$Qc!FV8;lUq>Vf+So``RayT8S&s&f`8 zeYs-18x4UyYdtggb1r4kuQDr_muDKo4<-eZ)271V$jNo$03#aLkcF9TUjaJrSnwm5 zMDtFHkuF(9K3F}jiNx$GDWqIf9YOTE&b%Uoy3VvST0zCy>^<&{-L)AoIE6jz$5&!^ z^Y`nkqz0JAGr-Hc6654GFgyWS8$Bs~`aqBV9OX~{zW$>-BIp@yz)r>Xte__x|+2#aN@lrI}B+%lH{!t z;G!_7c^D9b&D&-_r{O5AQx{EVS*}_f^J<)4&t~A7Y|2gG$z1;60C-`ZYd9&(u46J= z5|!YVtS2W}8dTT$Ez4#kErnh&-SAFEc_@~CEoyu&Ug0g9X^>lK(Y5;HOvy6f=K-^| zFe3TnQrGCIIR2#1U0L)3LeRop`K4=@CxIyCx~0J9FlGg|GBtpA_8Gs#uP}t263WMj z9LWj?=B^^z8&+*-n1-F*$URbX=Ih)RDLwuAF|2}~BGQ1_L&eXQ04Y@CuC{=J8MPXE zRk9%ea2?0|_*xhHzw}4bqEL62FL(X`jMfdtNs4@8b5$0!zcYZ`))=X*Qd(OWhXG8H z-jN-2d45A!MVvXG&^`U9r+scH-lt&#l1~_3zOukELlyvSt~PQ!#iysbPV0}INe6aY zt`lM2y}+ms#D`|-3WV9?)D~W`eiAa{Vm#rI+GAKCdpMq}x>_@^*Az|6e&dcTPd~;g z2FDvqU`>dE&CK}6Nl`;1X^9qHW2Dx-Sd{FpZZ$!U&AMMs|86H2G{oN0`e~N^sa;|( zl-H?vI`=4{j9K-49)>-EHxL`BPFfNn1P9eTV?*P8jV zrq!vLj8#{x#PSlKu^3N3&sqX&#TYB@pcFsyzB3y11vT*XYr6ycw&aVBa6@~NxxnbO z<#Yk@D--R58ovQwq+^ETM=zYnQ+aWsxx9mFNiXAC)*$H?tJQH%OS^L#P%uRn}d5^G_Z?dBGW=1xALEn?%}?#^`LXMkhN zLh3~eWIVY79p?e1C%!>hK>i`ViG`_^6R-)W(%7xl5we+pW?LF{mub)j zh_Rw^+=_q`hQs-oEkJ5>y54g6Bg{xLhl4;Lvmp6pF3CDA?sEO1JSw`PwW{#6%z4sh zqboG`5C@lr>ZcF8z%P^oX-sPbBKnO7s{C5y3dkUQrVpAXAh)z5~^Y%@tNEQ0=l`<*$*Jmn5pJ>mr%{#e z_{bD-g4oRJw!U&n<4;=5zo*UD@1d2cojSMEtd`7ypHFx#kp}vlbYs!-vN1 z)Zi^2UFrN25UJJaD^b}YI8wIiJdRbW7xx|dsK;K{H!Q|qYX70XElWCF%txjAsBw!* zTW)uMYqIv+sT~a|Nds7g^Zf+jVtD?Y_q_X~G9Rm7({@HuJ%?oHV2x-Vq8-0TmA zJ;K=XSWV$M=L-e*tPQ{ko3AQ0ee>JWx@3I~*2T*ni7>S-Mmdk{vSMm~A>W;@y%^rg z(7_X__+(P&=H;##pW}Nw_z$VRi^%I+`ZFsL(|`5 zSyG^me^{=JVUl|XL0jE;t_-R4jKInLbt5kgB-(@JQ-{0spuxlgcD$-5f zr8!}TF*htU1~0k z9Rq<=ZpS;T6?gu|HBBPXf*LS`8HE@lQp*FPOX8C;8!u@F|J3WUGh9?^Z*DQ37?>h_ z_|S>@4(USQp)Gi`jJmGGT-PKsFD#KIf-8f^P>P7cP3;n4{cs`7q^P%<0H4$IM#5ME z^_$muzC|dF#@4(`e82u^oua$lHD%_sehY{RIRCwP_yY(_vpu=>S?$V=_J2ka+XS{(?$9`{gjfK-tS=E?d{aVKMV{F z)`1a(n_x#14jBIPn><}Xcn{AqbGx(Z_lETz@gL_Bs&%OAv(Twlaaxp z7NI}C-!(z6*~Ous*Vcq|7ao{Hu?dTbO|B9BED*U4cb#QLNZLE2o zCN)*s;scAISRSOV0(gtSwadMr`ez7Nks8jZR8X+Fs zy`w_f<(AD(tpctvGy{dtpyZ>>rQ%K%l3nIHm#CO~q;ISCi_?f^$5osEglr!P5$X~O zVt3rBpqT|@#WL?;)6>1`<&(+6TX(GzA4c}WT6R~p=N-Gdr8&{`OP&5}3Ne)VT_k9L zszf~QJY|@DBEPm<3)&ezpu06n{U(h~dT>1$fmB>8hQle{kuT?cYLD-x=rcRWUg9W<8f*>8Uc zZ_2PNnQ^*>J4;~yvQ3%Vb*q3DlaEqfUQbdL(*VC6>*BJr z=_LjIa^rF%@G5U)e>-FF#2}Ckp);N}4Q8nNa2!+Uto8#+XpEFlBW|JF+`BYaQIpXY zWCa+`Spy0~A3v=H@U%j{wo9*3=2xp4JS^OEdnGikkip||5v`6Sc2)B8hWK>``v_UB z`Y6a}%-{l-$C(P!UiXw&qJun()>$Ay7t73#2JTsKFt)p16^~O{qUI>jhRc+r$#pp1 z`7Ft^^{A@xoO-dAc>rU`;#pzC zV#&?%eVyJM`nrdhABD5tmC{^OW6#z$P6gt+zJi=_?D z2hx#5`o8?FYqdW%ajC2C+mkA_$A{;^IfvoiV*@9MOPZ-cBV|V7Axr-?X}yyi?T`6NsXQL_v%INORdGamNuty-DjbwgP_d z#$6Q5OShMR9D(%p34=gp--NSQceBSQ?m}6tERV;lGE}%D1K%tnjab&?^0smE>@0ss zC2Nr`Z1(ciH(pRszZqUyd&|(Fb?Qa1?#myJiFI}j#!g{utNbL*xEZVKX>M6^q;snwrxZKT-+zAcfETYJ+dtxHo?| zk_(Kzka&?-rL9j=4@laF;JOpzA9nFM6c=jmq#Bm{6uH-PqYp0L<0*T&u%u!K}X|^G@?mS0Y28m}ao1get@9U#9?fOv(w(t0>M+P_nNlWrIV2|RQ2FKT12H5{vvxyIhb8Q0y^R5`=UNPS`p z+I+D#zMVW6j3z8rsZg`hY!Hl~5Kc_7gQ4q`wtBZ~Pkujc?&}gCscbX&>|$3Mw2{(7 z)qP0amy5!oq4JwX2Su$IBjd-Bgbf^H$7NqG*{7H#dNjo^Bc+1Q<%QTU|I0D{AC38! z@t*9< znzX*1?ToUPaT$p@DrGXTSaFT~D-N&fdPSPQs8^jHV>89|JhQBVZb{FF*>jLs!HQiI z8O#KeEs){MRm+V^1%HIOu@K(bn2SSu3p!7B4h;>zL;pn%R zsdB0$4<}0NwZ9>*k9FU0n$qKZ+gBL~ziYX==@#%gKHjzhR!$D64dTr=42gsZ_y=_4 z=er253_R|oB?nw@{pCZ87}#;`kVSLe)aJ8vvO`6l0Y(cEjoX!K-;Bmlb0r~noU!?oNfG%nGW#_%nd;0!*B;%?}NlO4O(h80= z%AmGu=p;OPaKjpguo!*G`5?GNC6~EHL zh#D|QCq(cf!GmT&cZl)y!q*CH<7dl4`gc&z709V;f2sxHy-epMZ|KAp1fMsPOUo_O zNkx7kF+$sUx0I>=aRD*Pql`pVA(u)ViDPG;mLDJzpXl*{4-5OU@WUAzs_EGhONaXp z;UY~2$M7-jkoj%IY7hL`!q<4X*__alt4rne(XBsoRj=CK%EOTT$a7G2!h+q0B{BcH zBI)DxSIm*faf)>RAqP4?esau;@Go81X^0CuU5suX62UV94ZCf70BoyNA8}j@!qsJ? z+Mc>qUo$>_#5-3~@CULUN(0ILVHJ-_nX$liO`-y2*PdBkxY$hPb#zy@8PaQ@V!Poq z0}c|02wFT_Eki|ecgw@Z^;%7tsQyZ=0MVmOQAzZ^%=&!-c;6uit2ssngo&?W8=ZD5 zs$f^O&*FVp{F}kV!-I1PMWt;9FF3t7nLTXtZ?9VQ)t~{ye$rjDW`igx;z*%ZV#cOE zN=X7pY)vzfc?@ddfVjqy`NGyDeip_Nur}P#Kuy6AiJ4eKKZlrSBvCwuP@a!4_~yC1 z)kl3kO0K^2_!)1o#13DaW&Z7^I>}DjHdX-hE>LTDbm5FK z$O`r`=Hj2Rg?7B&#bErbfm+7cyqaQjMU{u$M%? z>qMfc@mtpH{Jf1xrrQ^TwK6n}>*ZBK`Ak825Tq^Fdvm{kygjf)#j93nH<4?$y`y7+ zE!6$RL-QTV*LK8Phe@i-!#BxvozdqNQTa@7q~4;B(@ZFAUe2Z%kmQYS^Oi#)069Js zQ<&(ryttIpjNHs4;Vt>n?q>0D3VCz1r~G{7EOh;PY(Oh_WNr z|2^xRugH*rAXy$Mf|UjtCm)#wNFUK~ExSvd-9fKXrueV7>(JWG5UgJqPKj{{OwYs0wutPt6PW;MMW2>U8g8lr|$}u@6_exF-Uf8wb`udaG5^=#Xyg+3??bv z4Dy7Gu?yWx_HN*6JBoEzei`*MFt6CbW@eAJ^Q-!CvOV%gc&(6AKn(%-LZXPpo*}RZ z@%Vi1@aU|8FN>e2B;iHqG1~(mP!CPkerU<^-=f&f=4!@x1J)^Y`_&Nft2>>MEKfB7 zP{i}Uj>en08APGoX@>T(>^C^6K_k=3D?*?1imA?H6RAsuO%aAdT-+~q&++Cp_fpcz zJowG_TXz^M@I%RSz+BB!cqIdGRXo|a=y1JeN-IC7Z0;W=SFo39se8Rai1hcPlfvZO zp02&_QNl1^Ir^^c06e(r!}_SeBgsUDX6~-v(O^u*bK_Dg_U-`pp~rw3*{;-Gx3D{} zPgUtEA$VMRRJ(%)RUfqFIP`W8CW5^-_%MS0W%NR)*W3Dy5Ks>v@EU2U)TV>{HbnqP z_KArj1-gpgf&+~Tnnxid5sP6Gwj)No$@^UILp-Jd&tqnqF3qT-iune%_D0646ipXp~ym%amGB9yKajJ5I5 zWRMh4hjpQqv5V|%NOin4EUogAS-x@gnA?)F+n(e5s8(t(iqETC`>Oo4S`I>jk=o8D zlw7lgt%x6)s|4bZeo@l}8p`_8xG)JA1wmhM9%9pt{vwr&iSC>2U|qT|@>SkkN(#lm z$|)luF3hQIZR9_qy_L8Q1Vpewh0LMgMe~QFbw|pMe`?>DjsF$xS4JA(-om-gc;th- zcry)O;AMODzo?@1&#at#dG=T~#Mqfu@s8I7-Z~}1Ca05-!WS8SR*UfZSGVM43Jypp zJuN9T$}SA}fq&>vQfu}vKE_8P{?{iqFFg#flhMf?m_Kj7yM;46hFaJ@qD9NV zl9ZQq(4#Ps)6WlfU7Hpa|7Kzer)Zyjoil2sWs>Q2_Uc7j>9^uBW&atD6+@p3Glf9E z*V=mzgE6)I_PbJE`Rvk&(Ctlh!~xg;`3n0WtvB{xHuRUJeUn~3>nBa_u!jv?Dj3A* z)}R6PMyr8IyO!-&K7}n~%jAG_6|S=CE9Ul@L`GetCaLZ?DVb?KGF%beu25{k{cnJIpNrEXR2p)cugb2!sR1o272M;IPH2m= z^XujY3c!ER^eF0C#N$yQCNMmNQ)DB$h?%AlChlu|Lg10c2#`19>#`*Z9#a+HQ&!hg zocKm`2q2$GE1j|2N}+bpfD5)CHOU)WZmf5!emj3k>-qg4q zT7z6hX_Nj_Kp^a-6lIJHqvwKK>rA0~FZ+lk_{6a*us_GCsIsu!=%l^S#p8(j>gxKg z(r!Y;y@LOIJ3HobBGLNfdo_QDg5j3`PsB2rEH=ck#gg8bMlP4(n?CC_O84jSW(!rFa|FqGeSmDjF zrH@y|xr=wQe)IzV16g-}QII^9l!y+MtoIZA48y;TFz49M=XUn3&224M^hW2xQZ+Pz zksuB7@p!E2x_>;N^^vh_a<~OdIbJQ4Q~ftsG)nn>xo(Ra()6OvYuls{NI((Px5pZ@gC0+p(6B*Cirf4d-X4Gb4PC z8ghl^j!uu8HscIMT>`9?0QwzIKsI`uGs}7yx6$B+yggOca4Dop5_u-d`uQlWW6S>U zHzsN~$Z=-ov`%NjW2(Gn;Z7~65LjJMi>(FoOI66=b2Y4&B0T->=NQ;;c~+wn_tCML z_6nLWd=ehmYD~yjOjm{O^IuYa#tn1%c>i-1cidcg(n73#*%RV@O<(f=iRe@&YX74- zspE=|#NrxTdgE6@XH3hR#A^4GT{0EcQpaP=V*w!+M}HCXq^dEX6#J%J0o#ey={!AUk91%Ac4T-eM2%gn?Gi;9v33xK>RRH{7Ql&y)51C^PRAEUD7C|eaHXCfVbA#Sa z4ERR@?YWNaWDZFEqXr$V@Kxa6mv#d{Q?ZAl;q0&Nf4(l>oN0R=R8VvbLm=~+#uE9u zP91rh^K~1opV6P&o6xA|WoBcl=Kt?_l>Yrc9Z@>pEXis z*;#@o0X`qk`1~q);|ziIX9NhFDd>m0BDMPM1%0LrKr%yB!nM9{3*Gx zm;*KfVXz@$cJ}7IxV6z6cs7lPf9akE%SruQ@QyHAJ)fp&H9fj))SLurM4w_>si_sx zH%~{y(S((wAq6`udfFf;DS#v1892}ObBT=Lqb+R|nHwkdIc%!FuK*CDEDbcIS+zf* zT$`k&5gzj*PTzFJC2gH;>QQ=cRZtb`ZiQ<9AmesK`Lm`~xNQbpTml^+oE;x^q?T@m1#Y6JHT-@ z=1!Y!4%+pthR`*w-;cL5pk-1QwUI=?tH$gSovJfu|@6&FjQTX`q>Ni zNblM!s5Bk2@}8DN9S8etHZyPSi@-&a$s>Oz<`ED?WC;oSc=P$;FtyX#>atF`12LY8 zjvGJ+mNvc)o~uM=YjpUn;CE5 z=3#+NQ`)j5YUfmTKXMVnrG3%JgE+9_vMbnI$}ctP0=15WwLJblC{LxUrRTt+EZcQO zQV|krc=O1{*!BPoR@M7c@e~Cm!Y6Y8)e{dEFeP{udc^Q?+{H6Dv62$<>%9mBS=l2Z z$L2{jFTi>!lyWd1R4MN>UwI-4t9WbU6A}0E)wmRy!n|w^7Wzz$9VI_z6)`GZPLLiQ zE^`3T-N(Anc{^jKSYE(_A|05G+3mW0pr{0AV#9)q3PaVl2a8SK#g8Af7jaahTe?5@ zur`+88wy9?ah|J1r}3}Hggr)~qAnrle!&C`R2v@0W^2=8T&_9W$_aB_46Tv-(#h%` z)M+6M0If6Uy@1?X^y`m3LbPF0-6|*;PFS6Sa1qNgYfc&D5fCX*VHs>BOReP>6-1Q6 zseZ5Uzw|f;o?+~akjH+ig#ZA-p=v5x%ybJ5MF}nyZoqtNq%$dk;a`R->%evgvefKx zM4bF_7Hj2;jNBAC>9wcFipgZmJl)WWAKSgpRP-S-I^C@{g+UB#RetkT#?m4#5<=5z z3Y782=Q(?{Px$n^_@`uUy?v-=X3J`W&3Y z>LQo#8!nwpeMvq=_D^&-Js}bUENn7Rma`yV?Horxz$+{|ocQwP0WjK)W<&bcd7B<``#^93)3ByTKbJn1Yo&h2J-HPk4*ulJsDY@c6Lo{e zsIoj^DJ{4K_ZwO5`y5x^Iubw3^)y?P^aycN+&(fxEW6p8<0=KNiMLHCZHouMtO3fy zLJe|#+>>8g^Lqc;%dg`LC*k^Oxbz30BU>-K{Ucd!{CLA*JZd${aKOH`(ntt6Kqm#W z5p_yuCj2xO$0nO#Wfs&He*H8|_kI?$uy!o%e1-r4kaQLYXvK<7@U4Mk9A;{|?z{Ds z-&DNyvE^%g5V1H>`@~-{6xP|F{TB{S9?YMLzZLws#+!eTagBgvAqYZ!Q`_Ww>cdfZ?bc`IiUCCF@lF@yf)tc=0s&wxheG7Zzt* zXMIS!b^?jqwAt%I&bBR#8m-8}x$uj$=!QR4yakB!<=t~5Hv&gI8{eXxnAEn%ZCYQl z%B>4J(y=kRqjo-B7BxoN@FqSl(ED?ahcM~KUpNvc&!J^_d{7RkDGl2d655R2$>Be= zfA}?-f_4O@F;5nC#G&I9Cc5a7%Y> zPemPl*YU8H`ph^a!A33$2eQ3NUsCFuz^Xvl(7wS16K-v%uvqx((Dh0zQdx(1USYg7 zu5iM(m-^7*+Q1SWES0rg3#=E%lPyVNaZ^!Eblgd&viVDuIWeTt)L&8779adl0LWf3 z?L7qq7+GZ3V>PrtCWim5X+QG#!T>w++q;SVCBhq7s@TPcqyNslKZARn7F3H3B7Y!3 zG{|+~APorhB+dW&VgC0<#c!3Uejvdh!^qauAxVE3Lzo5I2m(x6#Q;FS<%IjUi$G1E z`%_SIF}qpA%FBWDh~WEVp#AIZ+!w`DYx=&Z!yh#f-O!>^o!8yum^nOwH%YM>IZxkc znlPQL{m?;jyKYb5fp0CVhs=GWNWgvkv!>#J3kwd#!!zosj4nHq2JB-8&# zJ+!26-9ABK8deb-8@s3CwkHIus&YLDKqcpxI_opmXtGX;C2+kvTFK$@ef*mYI(~Se zI6vaWU1RIP*$VZ=Vt1PSLcIuH|JELc2rcnHeD9Ri?ErM_dwV=e`j{P{LY&40h0VZ3 z!d+u-$6Cd~U9?_TvUl9dU7Z_<$PB}1-^*#FkIozCw&dTFhw+>2p>{}>sVe*pN;DN9 z<+P>bvGCZ`fgWC=3_r~lUvkl?&jIk22e)olz|Lb!Gnwv+hmlJDyMQO%Ue{yQH50rx z_)nntFIl28YP8T;Cxy9hd7I~Hx^cOcyy+F|d+GNxyKV$O)k-6j`~+DdLsy6XDd13;WHO!%QuX@LXMAu8&o?Mz(@VU<9Oa z1?Yw1B*#nXw}z?hDd_)*ef*h!F^at%gpM~{Sh_Op#uFT*`aviO74eWdRB}@;y>$KY z+RL}XUzKDD0D+MznJ{A19r_S8_N#CSM?gE&Vj6OEJ$>=(mk|2pgmSZ1a2zYGn+kn*_f#<#O2XggaU#n;s?0;%T zvRQoFU2=uK6WS_H=UQAaP!=N{B=xZ5oN`-yDYEmhbu5ex<5^Uk`dH(!6@U|qA|DV$ zMLm=soaF6f#wy*HJKUa8G21b$C~;T{{%p-MM%|H_%vLYKPzHCHkTSq!x`Nt}F zJ!P^Wzt!AnRr0-WdN<+|grV)HtwHL-$u8kwbKv+ElZsg7HHFXH9j)%bw>3W<{#7RRaFh2wGU;p%XJ z8@hI?EmzBNoT(oY(qciqs4-jkc9T6Jl3JqzUKGy$aE-c8ksZ8d5C88A2f9YBr};*p zUi{^5X7452j%zIJdl)j`ERjcqmmNYg2XgJ9@9^Hhx4##QX_BtGUpTI!?q+|3hh%qq{T@csq3?+?a3H010Jav%F+q?C zPWKw|=XnMQxJy(Y(JV5#{3b90TESI&cKElq94O;G4b(!I0C2#KHC-~XfZr~8v4m;J zexeu~Q%jcjK)91L!H*)y9q)GmmM^|B?w=c*T;3)ew_AnbvM)!Gu2`asbB{2|a4wZR z(4StUd@UVtwcT6_D@!nIGU#aoz>jizIwmFb-@?fCTiNoPGzd6GyCOsEZ_Jl(ucvPd zJ6fhj_UIUID8S1i?b$VtcF`*0ZptA;c>1bZx1Uwrsi@ce8*2{TtmrBu;eMMk>$M%8ZnI$AyL#FjS1Kc;VB3QO!n=^)r5=z1Gfxe@Vun8!XIA$ zJTklkO7f}b-4KW4P^elx8IYr@T=VL-z7x!(IhMSyjLV)cF>sLqinBCqXpIP8L-Sc_ zuC7NK6ZI)`e$*#cA>YWsQvQqMFJy&Sx$==kcKQN7(8#L3NiW?!yYqg}68b@|Fke)lRMVcVrJ=VB7@!<6?dhprNEP?JUmP?gZ!`PKOZ&T?GAc$Pk8D_xL6g9J*U zrC?i$ce{dOg`#e&4QP~6?mD<#E*2^DH8+ zrM7tn9MYh}O)|R#bDhjqj8;7MATA_+%RQY*=&aV(Om54wiQ|7k>`b+ZQYWx)lUHO~ zYF%?`F5IgWx@#_AHfuw=aTpa=TA#AWuBga-{rm9IcBdzZF|X`Sf2>p;P?PvXyH=jc zp$OihSJajdKhpg4QSA9sTz&G+{Q((@NzjuxK@&1V9{t54b!Ov0F`*`K z4nBO7YPl(_p6Jrm%Fn=JFPhVL`BN+hg)j8EyyZ|Nr)Tlh4PrFw3oi z-tyzX|ogK$*kjCt0x=wpe z&)zIKQG9Ah%vaYEN#Q4ROH>69SJ&Y+mmUfSjrKQ&%gr+b8sjedI*=vaig3%IF?Nrdo0$bGMmMJ%{Ki84-B$i#WeXlhDdlE7j&E#m2ck( zAvU`AWBZnh+x8xBgll5UopCBCj$i=ao%lDon$afx*uh zf_DE428iL#!95B*vFN;|;IbEb!XSYUvp4QFf_lp+eu~H=gnQA4oXUO(|`OHo7@=oqy(Dh zA%vKNe#6u$S5^I};@T%n3pU=Dgr99h+grm=8VjFdLzF_ID_kK?n6;RvViKg0hj@<^ zpm9PWqA8}ED$c%QA!dxAR}4Lg1}bKZU+~-9c;{#1N!tzvh*7FwA|G&2)j1JcNRl=bxd3Ow@77%NE^bvmc26rF zWAhuuQNNN}j2i|WgrwW%&R?QS^cnHn)h#@U@mHsli!XEoWg{Q>Z51>l{Oz;`;_x3( zT6-ihPP%FR>g6BZ?$``y5~MVr{;0@sy0D-W`I)%s#t#^Cu-1R%syZhbyb$z0sRkaH ze*siqY<<}aoo~m@sQ@C%U(Qd>nRVO+X!=y^O(y*BWrhC=@9qEkPZ={k^y-0h@ihWN zZ9jAx5Le?_3iAi#sQeX8M^C%&O4aq>fgabeUOl5`6A(k(4tWvmkZfk!5mka9Ngx9&e{Pw} z{75oxw)!E=`rKn?YUtYg(^YnS11%{lwJznks{ZdeZ_iaj@9&f?CRGmrmY&GOU&f|; z#y#;yCg*$;8X&c~rpe_Ca-<^S;-5gU)>eT4jO7+O>{mvnR*!*6%7o zv(#Q03i_`J9`K=Qhj|zp)~;E)>kiq=f>-qjC*x~%xR?qc8}mOz-CtNl^*$>StveC{ z^y{T-l@(%sjXBw<0N4(TwM$1c8}Yzs;ycr;(lRrgeeCwMtQ5Y-M-*FX-tJIi&4wMv zEzBh2;6jsmF9sS2@rj=W@;+Jc`xj%U2+R5wR~TB@3HvVnYzGz(#hKcyI4p{9v4s7+ zc2@$1(X1*7n03R6fe##y%+`?_U19&qR~MNPuLW|@+@`u!ncM+2eb!Ri@iibU)_HaZ ze_C;jlUiRLu&{izrgRPwgLfy|l7T*Tol6_GT!HYoSBAL#zj&=IRI_$jw}IW?b?Er0 zcZ9SkuG$0c9c6w2+-P)ge6XNa^~tfVlxBqQq3u!)Q`|oq!VlhI+r+eEr_nq>9t@u)7YbzXUc+~Y`rfk&GYhh*;KO2WiMh74z@Iw0fd!~8~{^iG=#G&ye z1<3I0z*EPT86qsL$Y%v6%tZw_Hkx~-uH;CI9wD0*qX$pw=z zPet$&K30@GzXC9j7)t-D1NU03=tO*Fb3`L6nyRV(Do!~Rn;n8tp4T-K@lM0yiAL*v!jJhELH z?$d?B7hY|S@QKUhCim8Wpw-0Elg?z7K`SG4->T$}WnF*)9xcilgmFxDJl+r($cu|K zPek+HaEGoF;GfT){c3Q}q9yUV`?c9xZRUW1;*?}*Z#ZmxW4+x*nrDUhLta>0b=0F7 z4*ZPq_D-Q@)^gV;M$mHFv2++vqVi4`oY5tm`WAj&WU5p6AveE!bB5K{>-jDLO{BP&69)Dx*ImaAAIQ zRF+e{=o=kUt$*Kf@y*!Wj+K@dzc#ZiN^?A?P&(l?_E_=@NPv}IrZx*$D{6H!R{0kiC}zjHOg7RTpDBv{`=`%{8Tl`)-Y-O@+u$!nouTT()P51B(XAQN-rw*j>06iBtp)Le#hX4~QX`ksEg%WnCKM&lmAzTIqLzsP$G za3)Yxb8>-)9x7W;KQ$#_<2R6&vb905b?(X?*U(Xz)0!a_&WXQ+f8dLgaTJNLK;&;w z**sMb!}Z#RhK8Z$;DNBxPUW!^Lw!2Y<;`~*3kUH;3#7Pw7{>5i&x-ISM@lcj!sl;Q zZ5w#1MBQgKZ_*FDE$^c=A;7lB&oW8=fB1UG_DsWWP4J1Wif!BG6B`w~V%tW=wr$%s zE3Vi{#kME!p6)qz_w>v!xIe9RUndto*(nmCCR&Ie47^juP23)Tkpnr%3S9+ZG^^GD za_c% zt?67YGqSu(yD^*mEl24eYME3Ta@K3h2Ha;5rH$_ru(fAuYbY+#_#v1Cyr;~w+p`^E z>Orm=2O1|Kcj4opjS!7=0^EV|E(u)jU$Yd8TWCGa?fn8CXRa}Y^#A+vFkm46PgIh6 z^T=?clc0V5AHDW8r`NpT0DkB$sm@Nbz;Q9tR32Yg4p1n|pIH4UxFU~EEgb4^Kb_AL zl>a)vsm=>pDV;P0;Zydc+LXn~Ro7$T!D^mdKhQl7$$&F7cPUEOvEPGEr0p+-B*foa>fz z603Ss_|OvVDTC%CBE~hHf}uwPjI8Qrxd5ktLofQzQ7Y>_`sF~`y%`Lo7uwoSmw6C6 zO8kIG4~%6Rv<6wE)YM+U*R1VOf41abP!0M0?Cx)!^iT1-FH9U?s?`46-(bw{dFVWU z7OE}ffMhYDN$)41nrcQvX)E99%x{ix5^Ddr=>XLj)Presz1#D?7W*NVx{iMhf-XcN z>gE%pwV_zf^!~{Va3$&6LunZup~0jR+%N`*Bgv| zi981>0B0mogo5*Crof&fiS!?G)Qdr3ysMG6BN!ydnqo@G9n{==v0s=|`iC>2l<&lX znDBV;2$rhXhY#oJ&{ux#YJcW zudB{aUtX`x)fBso`r4i=eg0a{znsm?P`MB!P^(mz((kbQ01g`)adG$@IK9_~zFSE`;PXV*qqB)B}C^lXc zrp}$f_)00W5M&#;QRawvccCMz#rk&RxV*kc0PeaoW6Y6Fpj}3GD2Fy_V;V+t75McK z)nrN_KVd#zSeyqrG2SCY{m<de?%{`B2VDUCgw(8tR}PCJ#yg8%mhLHw@`LbY*Zv>3a2OaGnk6QhEVoq0HU zpbDCxB0MPz`P{9EbyMGaHecqa&!gxY?&s<q!6m{8UG=e zQ<+QX`w(jbo97JcyV=_H_^ZU{Z#yU(JGTj^z;oJW{gs9GR%RV$wo^uRrCqC4tDr^i zR|9P|Y~NdY;b@hg`>gL7g8Ggo{1M64D&hNvLRCqAKfg2O4*J#QN&Rpnh9=IAT-QD+ zsXhJv93MNk#RFBJpzqFC%#RMM1=D+aOL?MPzQqY5S3nb3AHCRAvaN-Oau*(2q6me8 z-l@`rz(kjmKQUx%i^0_nTvkW62Kgn*2gWFel#AZi{cr7PwH1hBST-U>dM9z`$;ZuPVm z@H5vz{odgW0i}~yTdHMrh6b!|Hp3x42tXZZXKL}+E&(1-nlRFaz~bI%@r$$Zxr{l# zp0%Dwm$e#Z?UvqQjurhBc@TAY+{=v)?Z8;Odt1uGE#~P=rP|MwMyxDPtNly!jzyZ%EMWadFbl!?FrRY<;IuQ( zw#NQdwZ8v8y5I_%`Y9qLj@^gCU~j40wi;kH41mh5R_ zjg344hi&T*a5TI5RKqr@8w0@_3({|d1y_r#DK@(7 zWw7X~_Y@rKp6p1qWq@}x`Yv&(%qBgvzaN~`R?1QI?kGPJq~pT4*u6Jb^7mh6pNnZ) z8whYYe3`#1TmdqL2`t|++Y2@bey}k@0&^;6>)MUucdBI|o5zHi)YNQl;t-|XNBF6f z*Kn>cB_HO8_zx~Ia;8O}@4E9>gbO_%jSm%l+Eeshe3HTn_+L71gK6f0Xo@}@-pk+R zW4J7F^+dM14dY(sCL^)%T~g7>Q8OMG0aK<K(B?xu9yhD z`H;Zvk+%?h_(V z^d71_wQ1=XuTXwaQ&}Epm8ZS3*_(S?-p;_%v2Wq}cClP|ygSV2v^;(7%Ms{*yY>F} zn4;T%Vv6mF7k4cGu0EHv&nLD&Q6s^?CuN*WAsJPYn2u`cXv3J*@xYDq`6Q8cf;PKZ zspEwNpc-`J0cA-fQdmb`@@CeKP3z*XPwT_#`mCjgy*IgAp(LNP17;>|B}t z2#fdGz~~DVp?L;v>MsqiI~ zfKh)Sbf@<+AF3xeSeqbc)=1D$L7VXNz=hEBF4<53DXM@WskX#Q%`pp0&oM3SRIcMP zZ&g-$og90yq{#@knxGs1RCHMM8w$8FGT`ITlb|dw7#DIb(dOOPapcYntwx(y1#;gAMdK+I{WuICt zC&`?ik4T*|JV57*%~VmY=fx%_MgatqsLunYFvS1H0wc^B4E}+dQ3@LZO@RXp>RFbF zAuK~wZ+SC+K4PZWK*A(=%yWSyH0c9Se zn9kymR;P~J{s%SGVuEPr*tA<_tZ*nRdB{%jK3`eLofe)QZascE7MR6ULO9 zfQ>)B5gN%O8o9l0a($Nu|A*5a9$;OGo6c!)dhQcz+H;L zyNbfLxY+xpiZI~RQ}BfGUY`*2r1W%hayjeK>E%;D``doeC}Xzpv^C=uf-OHKy*C*A zwR9O@9g^=B5~6PT?W?=e7!e9H*WiR0B-|!;@L|2KGL;^eB&-Y+hY#w8$M!h>m%uIu zUvZeuv8d^JPj$xM#1xW%cx8=!(p-*EF^v*@?;c*(AhohB^O z69ij#b={wP%d4xjf9ww>>{aODmgUYnXG|Nlm{=`xUvKd_*bx1pw_A3YJuIx|Doov@ zJHsmjhSzOV6p9F45Pd))rv%xz)#;Smg0;Rl(PN+zZ-tgZdP7sj_BUPp) zBc?plr~l|3iIn}_V6L>OU2nA(1gRep9V73s^0fI%@)G~9Yy|6MGC`l$hmRXRcVe#J zqMNm2)_|>Ikr%z=AHhN;WkE=$G?e|8BkcGL+F0ghA8Ia0`uRZ{1gWfs>R+oBHW#4m zVeh1{wPDtKj+zu2GF5V|EPK;^cLd?;X;@s`z>O@dbK8)4I1^xFdxCA7K-mYkruhP^ z05>(;*vQdwRm#zPWouJISW-!v(us9i_tEzB#P>ob z`GUMJB_VEzb)6aH>LhGPCVqN<@(M_3QXw4)*MRy2N)+4{$xZm-y}Y#F{%MBvV14yr znf|Sqb6b3!7V1Wn1vq^Jy%m#DrZcfw|I?X{A7J)azzX;Z;`D>RYJn8`eSZM1b6IU~ zeuP(~sP9x53eO=O>GV|PoUSTFhAr-YDkd?|Lso0+Z%@BRVVQNh%S%mY_z(rjnk#|` zC};s+=1QLG8If*|)HJ{2j1L%TVG&XA5EB-H&S9Jkuu2FOdQx=L^T>g0_2xyiE&uh~ z)*TsL=#~PG23|XFYOa2*pbs?$9|c7cb>QbbG84w-@07x}1DLiG?cBSLXr+HxkocUh zZBweXiVIA>x;IBCbr+zq{{=>#@X%6Z33=u?Rk0q}qvxE(H;uDEri{Dn3%`5UL*`&H zTj2$92#xcq7D!q$&MkZY(J$zyM(C&Hhm+TWBt zCNnDEN;Of4 zg}(&a?R#uz+dhfxWC7QTbe7kFD4|t4qQh=fS367UUrSlA2WyNy2qpzM22~?9zWbqk z^iV=d4Bkky1<)VDthUxUiSX+Ebw{iZ*Ee#??4n=vm)Qx|pBH6iNwy5}Bt z5|GuT@z-BK!--x7(lT2iOjoBs{rmyoWEL0+`Lm00AOY z5Z#apr`EQxqG+|8CDyUlt0vq+Yew3SD@%5MxpGUfT0ma77*J21*)u)qvFIcGFfc_f z`?F9BI%PaCCMs-5RpiZ?zJy-pRisGl8?6|$$KycmF2q*=8gx^CXF7oa1co> z)IzJtQm3*Wn1|6E4#u$VXu7|>uYxfrf@I)GXnLhJg;f9n&1SunL*uoy0}(>nn$(Z) z<#O(4W-~!KCBKxgIEIU@RE@Oh{`SJO1I+Cegz5p~MB4UfPmn-y|CGHh48=3#-(-<=koEJ5bN$J0VC zP#308ErWVZiNIfY8K`{=+qA&oMd+rmSeYtQu1o;3&r@rSXnE{VQ`!BhW{az13e-4= zCA7oc5_gbt?o?} z6hh<%PBp%oEUMH6lA>NskEbSJcPgKbG_@NXqeH(NDWF$;U8(U`ZqOPSCc8-NAJLK5 za|>*{d=!xk;g7V1#9gbpSllNhcSqpi{~2aOM%_i#+t|p^scy?IsV)#`RQf+fGyanG zu2@j4&G$v`{iMlclr4YEvqO9oaEi90$?l9tHE}dxoj}>A830rNUF2O!iCcuDNTw49 znCJ#e+tRnN*<$ zvPCRv_xj3ffu(z8-Nc$CxmfKAVZI5P=_nJ*5ykIA>6adhzg9u`;--79o4u}iO!B2L zE@gn_M6W?p&VNEiM;~UXTW$P~TL5IVNB*DL^vyewk=0O^=uKgg(eWBlbuh_rS?E^%}be<^1TQ$m`VEBr~YK8UMsD`!3;oxc^nZ)CH zuc!6orn`F%x#?zXm2>@jy(ExSs@B!kvNNw*BslF{kOmOV#Z4WV-J4iBdW(nkJU%_w z>+0q>_`3#U@Un?W$+erk15hS#cD8Hms_HAWj@>0mLE7dO!O&p=@(Fypw>gYno&(0+ zI?5ALy$w4!u|(3RfG$;l6Lv@QUmgtQM7-xG*5px#I8Du;AXEAJ<$|zL@?%WGtD@s1 z6Y;QLJvq0(t>Q~7NfjH zUVU@Y$(kSVMJW`y7Nh4?@N(+*mKI&tO8cOd3X_;Fr-Y7_HwV}t0=ds?uO}cnK{x)Y z%wk_`w{MQb;MZ*`*ciHku+hR(Ecs6r@Rgegk5ZiGH=`E<-@LtU&?~Vt@=pQ;eGJen z3kxM|>{)m2mh4*21aCAkpyV`D9B7e>C!9yUt@IX6QTg{%N=@iKlOIxM?lllrYt2$5 zS)>S1FbM-)%Ftv(HB%+EA21 zyUU}9BQ(7dTEu$MP(}GolSau~nmDF32A!rxbXxM;g5&i65h)bCG(^`V3{tye23+^L z9rpNi3V)Ox8FbOlyys^hvj$t<(+9&*Rsg3(60-+R#=G!Ux3Cwfqs8rw zC;sX|s+KAm!7rZ!e7r1mQT!MkYP@QFa2MVF50yow|5aIpZin`wBefG#`<0Fmy#o{R z>o$Ya56y0bP3Qr9kqSFWv8%qwaRL`k?>IfZYg)f!ZDwBzLkA3A227Tdt*L$m8I~z0 z3!;Q}W(P%^t|+}A>K6;M!D>xzTfrJ{T5Ie3uuE`dg9W{5=pHWzNZm8v_UB3G)1XT> zggx!52LhdSSe4G;oi`k^5R9?YA;Pf!mb8{6GaQpfVC%Eh_H(|?TEqADtbx0@<@Cjo zpWqQ{(mDtlO{~oPAt+7w_4JN)ikuINCu0&NbMw^p!C4kEk2I+C;t-GyS~=-)lLtbl zm;}LtG%yzwHFVNref?UPTR;_-G58Z zb<}hgqi&481!Xx_0(7PG9+Nls-wndLZKV)*m~VJ1J; zi(s8{_h%tpZTS}bCf8ywJEjRX+NVJ#J#1`T`{y}2oqa;-ZEA0hV25eWE^`{>V)66y z{FkYgS(FzIj4P@!L(7Cr`u?It%9d1k*N@YF-?5p=rM-1eX~)dU=vnK299tu=;7L$i zIjd{zE_}zMw7&gR!=v$|V$@U&0Qq%DbJa}AB-AL}#(<1Fw%l(U>z{j#2LO3Neh6Cz z*hrk5D5%q9@-dn5%N}w+f?^qmsEoAhYYzQvq-k>1NEwzhlfjnx+Y$$*Eb*8l(M-)h zSmNB|@sYalM;@V`2rDum-fTvqy=G}>3F9Nv@e_*DSHvd1YvO0TF+%XNH-}ag-J`;1ZzBA|`R6&x#&n=X?+WB9rP9HK%7J#@!VaD>m{2Bvt?s z`uZTFw*@V^1lb{%cy5&QfwFr6i$5k7SK!fs0C2fqzJSR+qCuUfW7HjW8L3_+fU@&- zMD?G!6zb&U-*X{qS3mtpOuJ2o*EXfVH{D*N1oY@x60I(Na*cPB`&vPPe@a|L9~td% zwF-ocT%uv>5-G(DHH~%)-x}{=^kj{$jff2H{!H%a`Wk#Ifj5r)0S8O>wD`A*xCg=P z-nV!Y!boyl&kvz-_5Z;4%l{YOPqRgJ+n&&;!ugTx@_?|~H(U4dhgTrzNEVS%&OImH zR;w>jh3}8DYy#cqG7+P^Ic!IW+raM)TezcI|I(D<-+6|@>JGKI9$pv_TAffaB}t$a zC3_rghdeR|^HhC}kKumaeFW~WSkfv};(+Fyt96&{-c`hC5xIbYlvN*ExR8NLgq@Vh z20+mSr6O{l^?J;v>-CQC!Em6Idx&WqkluS*z59TLx8DvB9o0+bpL03A_58$-}^>kll{K~V-! z<<|ia(P#M7ZTY)$kY>O)6PJ5;+|!}dy_^fe!)n2WSv*#U_WL6D!g_Y|@bnmx@3X6Ia9qy0QM%SnY5A|lehDZ@9{>R#xVG_@uO zVJWl4pwrkn;*-6ROf?hoYooxXg+J=}33Az7dJ92u?4i%4GTJfW+d`tax9~5IeF=-y z=0?j)$CosiGCnRQLCG`8QT)MgJAmD3aq^m6*M1?ny!`MwAB(Bx zSacbZPKU>nF=mmmyz zkPnE%HzIBF_7gnQ3>9lXGhyPeNRd!e7+05 zxJ<~IoTHxTeAIzm7!gat9!*Fl1-&OCSYlj@9v+3fJQ~%^nm(FFA+Plu(fX%${TdWR ziOk8!I5R#}MmmnOnRSNNFf%Xf#aU^w-&-PF+-kix_D4|?0=mIKJ(gsg=AaksD9k#ENKtY=PtSv0F9ObtmdDB zk6YkE4HbNEew`hip4+XfoTiYFA-YNUXlU$Z4&hSwtiE&RY$KwD%rBq+WTKcOrJ{bJ zB~yw26zwB#BO#4oA{t`>V;y9g5?EmUF8Nm0UD91z+P?Bo*u12OQhMg4a<_F`>VDd2 z6fkRl5IEw$I^j1r!*gSBQ9nwIXVDjjCvanm$Dq&`(>dF$+z$WLR!SWIB-m;9MUAUA z$e|b~yz;hl^YA)3+b&MpM?ndL76m5(MF|KK5+x$Bhert;dSnLY6!$qtcHg1(i@eq_ zX;cRy&jrvvLyJ z9zqrtdu$2%2~Q36q8U>C?`G?QA@iZgD z<_ZP+{Ea{e4l581#2)%-v-6QCzLZFg_@mF@M?n?r+nCAl+)2=`WDyZ2SMw-CEaW_evwDo zqQ7rzz0FZ4xqCfXu87lYx~rkq)l9F?8vF20Vm(EaOP3H%rAA#of+R;F*Dq z1bMlx^w?-$0E#S4-G{;*mygLmE!qAm9<&IyN64Y`3koOF5%0LTHk6VTLCHc(ZcNPV zkxxppB9Gpl9mF!JSR@PIR+C#+|C(qPzsi|xS2dDr`q_#7? z(9!X1qe3Xp0&f&QqAF)bw`5@u_(q1ptTy{vk|N46ehS{75)p*4(lT;C9}1+H(#z6~ zm1Qqj(xhN)w1*QPvty}cAICOAuN}O$GLUCkp^ zrjm`}8-l^vnMvdxWlIR1$S;PhI9Tjg?o^Z(!s-2FOR4tX-%`U^An~Wut$VD35L~Ys zH#7ZyB^H_&vBsBrCcE}$AoagJ8 zb%s94xXUtn>tG*JM&S$mm=aLu;#3mueSqPAZIBgz(bxfMQU@qQ4>Ny3xAxoEo)^TP zevG9391$(?Wt=tQ|07nZHiPsjs3-C3?f5!u<7u^ydRAuEMDX!yipIH zR#w6PHb>#+))ma1KN06UsjJZPK5lqV#A@ey2yXtD=|e{0N`V&r^`=pPyDwl0z2Wxp zhD>Wr%W6W|{fu8DM`{Rs$NmukM`lBw3g)l2c#IO zBc%Hwq!ETC_TF==vb;9C1C7&@jO?GdLO9y;C_3w2Ej*|isE-12Ww+%TbFtDeE$^}F zRow%zfY#tcgF6v1$*OJo-XD8A+eWZ{9*m&Xs-rB951SeM zG?ZPpj2e$dOep1>aCgccX>DMTh7N*2%sY|7Qo{s`qhmJXnS#w@elmiX18!qFOuj9> zK1kD5TH1wzW>CV^NE*=D0Ncnz6Gm{19lAc|8mM}LVe@<~ux`KaqBbU9hh_+Bb7h$H zH{vlRc601b=pIT$%Q)VCN7ie0D5j3;7f{dkl$9styAreqG)CStbzn*e`uk0fH*vnz zjaMf|pkB7xA%JK4? z=&~PR{z*L3jx{-*4xJ$Qpt{s-wVs%!hVWdHNNuqrx6XB;`&eJ9F-%17h3w+aK38aR z2z!+|85m13C2Aar{B-zlsMeDdctCrgbD@nYA#E2d$C~@quspq2_#z0Bcm%O+ z=Oyu2N?$SXoEm6&=d04`1;Jyr`7yZ>oaE6rK{GKn&Su+}B08&i#s705?^Sxd=K~eaHVJG^fq4T(&8m_%%1x%= zFgFcrz1;e!a<%xEjXZLV*8AIBu?uQYnx>ogy}R^@Sn8BBxapn!&zmUu3AKXl1f1aQ zOlmFcwHxhQz0U2#wtC3__uuYgk%%dXB8H9i>T8#tEX+U9V4nj~7fS>Og*MoWV4fkg zcP&F}+4AGauXEn@V$%xD$lR5*uIpfUrddUSZ%baIaY9Q=+uGh>sO8FikzlPQ8-Wzg zwz0m?RH$J&Z4#r}FK?Se@b*|mWsEX@BHpG|i=*}y(`M(|7n~dB1N!NJ5N7s~0ILbt z_MM|hBYE3uzpx_1vZ;kOyKB;T1V-ER%4ZN;UPPWf$5KYV%Rdh~j@p76HMcQ>^>H8f zGm0xPrAZ}@j8;TGG+1s`Iy;jK87$9_etD@~R5_w=U%}Xpr0w@UYG1)79T~o~JtBBJh2em-d44)({=pV-?zdvtcju~50x;M2Q17A>o0tB5L z!x>F7+5bX>!SsRbG0gQtsv~Csw{K)>PuXif49|olVs=hHuTw(z0ij z%v91b4HJ7BUc<%)A9wU$b$4;3191}+pxE`ViFX26)Rz_lZk{! zzD|dNI>x!?QTO8A)x#vPnew5Z(sOekC{FPF03%^FVnKHgAS<03Gc_qyo0_-Z74?bu4E1_mfkMNSZXtSj0U!ka9?( z_86Cl&8q)7WUdDds_`iLmI3)PL=j?%Ag%LH_TL z%pDD4oYfk<$a zRg8Ed5@DC`3Sg~F+_|l)rq?w#5owh5PH1813YV4a6mcgxo9MS``2!~MK2soaAm{t_ zsDdVUuDOVQ`#)6I`HbWPC&USIAhv1Diy2?5YIK987QQtb%&yaJHO)u_a|`->6mmz^fzONOFO(G^S(^bBjn-QE z56EoxK72W`4VgCK0Cg}Wd2h#GgymxLIoKUC7|a3(r9nvjU`q<{ScuZf!`!PvYBxE`d|PbmE!Ry&ImO zgS1&u&?Aem13XuCC-b=_rT`V~?c>L><^#USwct};J`;Gy1XRL~r@`)&E%N*Hm(USGe1h0L)Oxuu`2B}2 zu742o&_%JRNAPsEYT-Eq zpIHg73*BSp+W?J=5VOQprEMbRD}|+uF}sWQyr)PC7VHdeQzoR4Ri5rznMBmx?Ao@$ zPC`bVNju3@@q;UHr0KM($0HljE11nY+=f2O$~*mcOOJ4gZy@*IMy`)pl<3F$DDno@ zgG!JP!7mBiTAv%%+`3x73yRLC{^}phK82#IG&Ms>VO)hQZ1MVcw@2%ocJ0$6A*0ph z?GSR-Hjqg`e}+cWl=2CJha51d^Cr|~4g3KG#0D9%H1J=>1fn>>`Q;Y8;3xu~2YH)* z_B-X&2Z@r!N0iXNSGMUz0NXxSn+C{|V-_jDpb}V2+_suGA>SZ%uogp{N!W+aq=R&E z=kFt2bTkNFLgDwxo0>72dY~XBR-M5SuU&ZP11F3pu@cKO2r(Pd?dN zlh`@3f)Tn{*Ad&HtgLz*Gm-+@>yxvnvTM$xnt}{?ahMHEE_ub^nTCg|-aojBiB@ne zM%EK`152Rs;d~K@wF_MH&;BRTfG9X-JF0xCSUhe#>cV z*R{WGE37$A)gE0${!l7dAa>~^FhdkKTU5Kj9H7D<1CxHViwF?*auP7-I<)vcfh8I9hzHdbf28zlX_IlFa;av2 zN}MSFIgkntu#`uI8JNC{R4HThp|u7ml_CdR9tv*I{y9)&0iv$-sOzw?$W?_MAeVPr zZ3ESesXcKrj}MEd$<8!mat6ekDY_hutPjG+bNEmZlhG{-7Sm9EsdIPhwabOot%ifd zZw&ldM?HRVX;VnWJYQ_gy;_Ash0UiQYw5CIFxDheVG|}(CTkKM4Pkx0JKy~hY%a?nMLDEeW`Z0qNP)ZKk*}7rnB75#3p>DRk@@J*8-s6)J z+b$^cl~T`SUIJC&bWlEo@`Ec_6|1|E4XMH{1Th*PM>)oo$~JnJgr5DEhxKa#VJNkH z!UER6a9RR!mU)c^jvwn1gLmO=o4pP}UQnKdL8_P)AcAjiq>H2PZdG4@c0Q`M8Mk~F z9iOxF#`~z%ipxJ~SDl12nePgS7}j8KET0cEkMsr`SMQsW7>t|~uxG>*kp75XV|@`2 z+G^#y{3&M4^ub{Ke@K)6d)8tQK;u^gINC-uaz(%?pomlBrN3SoKUZS#aGG}WQ8M>> zP%`J3ocGF*`BsFWNnkdWB}E${&*@?eEApGGCNK^czAyW5N#Nv2O3$R94tisV@;+R6OIxrgi>?35o^}?*lNdx`gUNh4a9)Q;I)}< z)VEDGN0ae~*U?Ged~X-DZ|ECWNXj-?(B|CoS{RS&i;UT45;*Nh?J-p5%> zKmr>-L9f6P6^QMdG0JW)DG6th)j~HEACZ`23#rp>T2x-p#7!q?v4(J=FAyoQZtjcy%!p z_-W(YANIghtOT{-UXBXyKb3;#hbrA%_4MA0rL#qJB4d&FE{-~)C=!krpPTe;a65{v zaWDrW6p-d91mYUW4ECrm+K#D^f-Xm)6=9xBq)V}q_GS7{wXb*R0QYlT!-tTu^MprT z?n+ou9=TryMZMpZT}YR8{Go2E9+Y~gD5}egkqp2>1@LAyh_dVnc@Dxo`wPJh(X40F z(j?6>aC#R*l4;v8Ym}MAK{gv}aa;2M^`t}V3r(|ipqK41qBJ|L5RQ#=L1?sfsm;ZJ znYI{8qhM9cY(9$zH`kf?2bKAZO79RhTT{FEnU2gxX?l@VQ!Ba}I<+_Yq>SDdBKHs} z$#s+1PClXUB+fdfCVOR2Q?F1O)_f;1@{~Zmxb|{!Z!JDbq46e_kK(-)wVW*Fc|kK*()x={)X(<`>auHTRQxycrxW3b?^@vH zM{(bsabOZ$_e9wqQ%>p)F9mTAa5(fz@hCr;jQjSvP3qqUKY@qR;9PHPy{FkJzBEZ2 zxy`8E=9P|iLU~9$Cs-i3g+8aqX~(N(|SbN4v9K0`i&BX{0+aA`C_7%V@!+2n{%;}9>2bu*@TA@N;tFSYB zdYsWuPDW~dHXjKE2Am|h!`j(=KhN>)T!kwd8rB-p4UedG!}dO}cY_|Y?R*p}mg2g_ zIxoGR_!uzxGiPN(;|MZkS1-1|Vxk*vHqZjq#K;c-2P*@=t2t8WTQ9WTgxAzpL``Lp zxy$M2)lG3}i1N1IBzgl(*>ExC5!V0UYg?a$r)s(Yc{TpN821~yPH}u4!(v|lBsf&! z$Rgk32HjC5zR{AtG=ksqYE*oAx=OU zXH-nd3B)%helKA@RI;Ma#z$^I z_^^$X5SOuDt8AIGD6bh_<3_eHu{zHeqJq>?0@ry}cc$k3&s!DC?1p*-Z0(|rVW~?E zf$DU^B~k~0BR7E%+~uM8G(wn%N}iICIkx2kf4bbVX)5o6>8c|EW->pXV3f45V8&%l z`PhEwhK!y)3Tx>Z;}{{mK+gEpIPKy#?*^iccwIT^qN><&3J*12IB=J`cp67i*1r<^ zhChT{{0R>g{y8_5Ye1w`W)ZN%P<5e_9#D=K9&s~K#3+~(3cu-*Ii;B+@4|z~^;Xt7 zX#3o*#^~lah|R&Ntey~N;`#ekHl@5G*4ph>cc|cH_%DFV2f!c>lP3y=mt)Q3`6Wvd z=&B$Rxj5?XPagI@J#yRK(2!iSiGB<5-hs?Mzu8;qTYjo$KfM`y7gySsC5C*SUbY=) z-K#P-Kq72xr2Z+670i4hcZ-IQ;`1zW|F=DJM=(#M91WE6bTZ2odwOT!_T79p_}YKa zH!yk&DmOQJ>kYc^%NDdRExP=d`_Ollhz(gA-;QPKPDtj(^V~TwddtK;fJ@?jPcb0N zXQ-OnV@bh=Wykp`6ZFXZCAs$7>O=4`-Ye5ixnk_?-?S?P}<| z6r&~9quo$8M65p3waHIA6^8Cs{3(Mr(_@Ap|zNt(eG zohTvi29B&GE)vLFrz#2=+$+SQ2z18U4(J)%Epe>Kyt1*A&7ly=K_ ztu%XCfZ>zkcKcuw_zDzH0H?v?ScQhp3WkK?vtcHpZ7hW5r(zgXx#Z zLf2X+)h##U@zu7ya22@1{T-KZ(GyC1YuNT_EXC|40aZ{3w|#wT)gGNsBx+sO0G@$v zp`>*ed{lmg-01A<9T#pfE(tCwJzOo*w2R|hab1>NH-1(VaB~zn;G5qC+8Zq@Ab{%oe92riaK2}5`ktw0iuuch|5qmn81hlGY!H#aY_ zdl|-Bk0+3~m|f|yP~3s{w>+{ypYFyhX*al|e1=*lOdn?8;?7kN2)R=mv5tENpHf5X zHJK3`KIjXPh;kESdqHyxs-#u}x(0kXs4uxKTU40>E z%u;_7%@2=H^;h|_E^+KALC9{=4@)hstG_5I7wAT7_620tmXL2PWW({eR&2VvL(g9; zw~VY9yq@;^dJZRZn|Z=4EOKnU4my7vW(g(MU1ih<$CX3H`%&gPXW*|aN5)EJVo7+l z+YN<3G(=!~IbWfiU7R&0syTaN&p&I{mw_cR2DuiMhGzX)c@9K34^r3k-0K%=3h$O+ z#md&$OR7U^j)e(GKi6m<^#L71R?*&qF0h%XrJ`VMrnYkD}+wUp^Rttvkg z<c>RdjPiogiNNMnE*#ArqSN-n;rrL@XKYI%?~u!w&!9X z#%)H7%4Nh!EaNetbRNB5CdUL}CW!pD?}M_ab}jO=IEu7KNCfZaPn+nAcK_+RZ2pf= zMx+dJjB~qD6ixF6XtG3%%0!ce`lC1;sd-ljZ(M?i`e2(p6 zm_OyR@ZFlzl?JuKMEKs{RVlj~=P=&8XzBLH7NrS$jS6|J%fq5aH2DlBuK_&8fuM_IQbmGqThjgF^&^LNzrF1u*AA_Nv+6P?TT6oq{vCtjOn zw&`K^vfY~^B~E`uvraz})VsFse*BNYe($5LsAYBcMk;j zhM>XSgS%Td8r*^ecXxslTpNer?%IvJdvH5E?|XLDdG?3%57w$$tL8n&J;pU&R!3^C z1?()JhooY3X6xq5>r0`0biZsDt1GcUddQ;IT&~5u5#_TS>d4k~MN2?GyV=^o&6u3z(tEQ=K|rn)=WaFEXv|J;F}bu4gkSTUOZh-NtDXy6mySD~FLf2d)`ZiB zQ(#a#d@PTZr-;AAYzq~ZzBC9Q^x<8$duF7_@ItAd2kX)0ED)_OvT)`x` zx7G?z%@X%bwEpQyN=3w8zR}ynwMsW~2~c_8j4HA=7ch3MUiH@#$C%*?!(;eU0;ye| zs3<3lGX-zJx~z!ac#?eC!vbX;3FjQhwcBa``eCk#m-&)_Qd6{Hxbhs!SAyaQqpJ!` zQQ-n|mo=G{S(QX+VXxp&T#=uVET}-$Se&mk>GKz5fFDqxs)PHEnNzRi$m?&W{vw;I zw#H$c+x*j7US}|}55ns1G?IG@f=Iq#iMG((sU`o4isHY={mty8r_--iutSi{mV}TA zDF0sJ&au6SX<+9LM}~=u78P>ExarK1ws)BDW7k}xS;D|WrrFg*)cr?bwQ`1yJ{Y>x z^f@%>yEM|9K&*OIy20^_cv&=)Ap=d&5tu|HmDZYKw+v5S*oHVwEucpH7dGky{8J8? zD){c(z;sJ7WxS#Fo@sK4xUW^M6ys+z6S_prGC1+b&q8womVL-%#;{7)Ytv@$-EBpw z(*8_d|4iwGCd3%NDN>9uZhjAA^D{iaYoO;+?cH}<8(ph87lk%_!rO13b5uTo z6lXBjJWL)FtUs4*OKstk$K-L{Spg^GGF`?#<7kNgv^$xIzx%5fLiT%t_H^hFGtEec zKp~D-^3|_H9C##s*ymSw$DeSELNM{V-=4MXMZh3+ou967IIxD>boe~_adG9E$>TI3 z9Zq`M`PR(*-I>|*y;fC1Wtg-}g`qhmx8FKQPAGiF3Ml+K)b52VMD~B}JO4R`g~{K3 z(U*IN@Q!Q*q^Fxg{Mz!)(d^f}y5rV02;4m^;YW5}ggE^e?RYLY2O@=y+&IiK;~8-J z1A;Bcx~ARh_Dc=3aZ!8W+{4stGUOd*EOYAXpXp#`nx_LfHrsf@5JQu#&fnI6m{AC$ z(BHXys=A}fDoC;L74DMTuO*q!VVclT#ey}Q?SxGwP0cSGJb>Z;q|{HC%?wLz_oCw( z^Y!_;`nDlg+DMmN6V>hfNn-hp(~_TKT)T{zSFw~1-|buTViUx4JZz?!bInw->nTn6+)U7WAn4O0cs*b9&0=4;16V;AeiFp<=h_zi!!?*v=qw#wg z*Ks%U=%9+4hqr-6E_v!3wb7#HhG1w_#na&W&ome!=fpygDH#JaXg|?>#y79*J^Lc(kW4%=Z+Is;Le>PaS(;A_ zGQ!UZEN6VF{YH!x>2ealWr?3hdHKmm?vXE*6*=I#+(CiJ+&hzrrH~M5T|72Ko>C*v zH9!&{t1_t+Y9SqqZCAMw>=Dk%(ovt7Pv_dQm$Bgfovqj)*u+0Ei7t=+2W%Q#4#gsW zcGH&wh9?S>CNJ*G{U%5XwbKpp*Mg7&sY9ld~(Rp?y&@gU1~9xPmwB(5cGth z4&d?~`x67JFXMSWAW1fC-{@)v+wjN{(S)R1#&Kie$p0zy7z;l4?_RV6>C>PvC~D6? zPr5shdq*Rvd*w)0>$Ncu$B~lDx~M3m+bCr_4*ZMDx&oNC$=hZem z-P)(C$<27<%Z%uJkBGt?!g&A#@SpaIyhKg&cB+Xi zg5Nt#bKf4|qlC_Jdm0*thhNk*G_Sr~7z$h98+wC@QlIZm6~GrAWAbG4rs-wDzOwvz ze!FZ_NhUD6Aba<$EWh`@av?VylaDe{Irr3M<V7_527{J(KamIs0!NV!&Xb zd^S#32!BVS__cFZqrsyh|1^)481N9<^7osqfqN!*J@ZEU)b@pkVPVd1o&)G(exOLF zN=}-K7to0;dT;gP-t}66@Q_<5<6p7nBkA#$o1xG_PqM3-crMUra|5o=EPr;Yw z=#mAo(33;YJ%H+(P+am>XsIzFht<}6E~en5u+c>Ov1;o{vhGyxX|3b3Zo2Nt zpOZ8{9hY%6s{zs;(LC?j-*>2U%|qAS=QWbJq2ZUb zM|~*6v)LiXqY`hGrl)pg`nB_lyhk);gEx!4=f<>1w8uxpV5olwz1MZxupDX5TPadi zP}6*{_YP7F7OL9zdpwd{92nX+F3qU-&Z*RKi-On5^q#(12uH~ACa23T`arjlZ}2683<{#bX3nG^j+|ECp1{Ex#fii0h5ypw;WFk4w|ln!2$F->+(~ z%G*s+3Nv78m?`JckJdg$pyVj7qX zTt}!mCQ2)1jG@RBY{S|>dP(xj&aGJyUsI74@%pyIQmF70{rkf&F^>_4x6KV~n}1WOTg`W9l|y}0D4e9Y_eltV&IC2l}exRbAj zk-?Tbq~&%gzcB*F4przN9XECyy)M3^ZLE(k{NZ<9XG4o$wy33z9s8j;3CA}1ttbBV zfV{T*H)s8x>XXeLk)_!1CzvTsU^P2~j~I3PCg6r=@W!r-3zxV#8G38~bbqacw7|U3 z@sY}B*!64`wBu4S4}OxjJFJJMx7S%tRUYK3vBfZ3WM9z~M z92PP=mIh3&r_t-LDD*Ut&mHChnnCri5uT&6^8Qiy?XsX)C%EJ{Tu^*rF-Pp=3;-UI z84`&d#~lVR10Y@?SjP_G97l)T=y=HC7c@W!g1Q>*UN2Ne+?TG&X%C%RZ$^kG@v7=x zR;d?}%}phduASsD2e*gbT0G~cr-43;Rh44YdH`{$>OTF6{-o#U=lNjJIpt_JlVKZN z1^=;um@7V|hZLLe^fVfw?|Q6@3m7tOK63nwvH$vfx83PGsi@T&?>=%|0Jeh@=Nh6w zGDcb{3l+uhRs~vUmef6xts3+A-16B1^{r;+6u`Bm5Jmg_>PKL&&CrNPai%>d2ZW0G zqze|{x$k>kTE8M_3if>u-A<*MqI~+KEIWvkTYMQF!o+ug38w&A7*EJyhK)F}MPISF z1zKLo5sa#7R=5Tfrl9f<#6)VHWtV5oLX36Y$ie2Q>aIh-ZOhS?AT9?VY^_QMc6oLv zSBOr_3NX>Pa?`7f6NZs7tE}P}nd44L=`u~56_2xJjdrMK(CuvXMwVQZcz@ou$&C=e z=!#cljmM*;=1w5qLX~q z3(`CodYbXh)mwNNOa@EsQp(a`nLC6mtw7MTsz32l7pGp%TI;iVr?+wx>&X26E&BUxe>MArPh6Q1xWpW+Fi|Q|d z4onIQ1=LT{p?VJ=-sOb;i3vZ1OUGB|SqCz8a^sDuKNZ1GIzxP4By>saMQd#^ZA{B=A2TFc_-3;-WB_zT`2te#w4 zkqyb4m4iEi!Lk_(1tbbvFABORb%ssjtIyeP5W~*b9D=1;`UVer;``}f^X)B=-yN;j zxOVZK0eWuCOaa&FXEG`k^2yS7?J@Q75`YzMf%TtF(m?=TPIfD7u^2YROGRZr`J%aw|Kx%35dbvA)E=j6bvbw0Tgz5!u!=NbmX$f36z+a4`A!%`}^9>8h=4A)CAS;*{(q zr0O5PX6CD1I1R>PJ$uwvps( z$QxVJ>^IliB}Sep5HGDKsJ{r7I=F=(ePt}(6=-FJBuW_2wimkcw`VCvXRu1|T2t}+ zYNIu|xMz%Qx6IuzJ|)bcF2DE%3b1O_N#0mZ=cb%)YMkuHWJSf>9oZ5S(Z%Ykrb#Ug ze{$DfWLkB$T&j~R6uA4e;vAC*okNhZM1S^e<-pQI$6?MZL?gdOFbn{Z*vm48M`}*w z@)$X+Q0|vZ0^0rV8T0t6F=*iQs+{9<<;An8bo$y+qh8cdB`}WX%2HS11%~8wY*v74 zuXyp-lHSQuKugMoTPj{?o1__7ZgD(@T+BcBUMW9xDvKd-I?~&nm6;a&tp$NXH za+^oQ)h_y|U8pi?!FnZ_5Y&0=L69N_x>d6h?(yA~p~-LD2`7<-Aw#-!34)LC$b}e! zX2W49>@@KkY5Oor#2^jkB7=4fMFj|RWLX4gEvA^pd=4HjP?i=>RDZcP{4we}Lm!^d zv3^gZX(yJ!hhtsPBJXe5T>p|5ImNl=u7+8nZ9N?%ykLgs_f?>_Fu#O3%ix#u zXWNO*^C-`*-@TV_PKB*jV#mae8-DTjNEUmN7nGOfkB{@2 zGn=w(*=cv-HOwzs(=mbf+OLJPl?4UPn;TWhqG0JYq@(IYODBl%$?Tcs6v4am=JHr| zQV3p{cjCkz*WR$fH@2xM*8_hC?tM7TX#WbF1$J8+eATCWnGa(=`AzfmRunWI-UBD4C$>YV@fRkv!PVlv^{38UOP)&$&=^Eg96-@17^U| zyWYE_JKAq-!y-IYSnHl7742d$}U62zk!Q1vX@!EOdH~P46&{D zLp@-~SSCDB5Mm*B^Lg}?&yuGMN`a(V=7g-M1NjpC@ZZG^(%{%^i8G5VNQP5Q zUqOMfSAUFhR_ZqpmgbxX6}9T84IY>5y9_+CxB-pSf8hNM06vA+0xh>_M@tXP_%}Xk zVhh6()#AKN_y_cxxT>w0sF&i)@jtZWr!|vdhtkB*z5MJ7pPvV+s^0@G7xEN3aX8ua zx9>@-c!!3&Y{R{~3YShA&l;W|2mWAk@Jtk?pOae;0@LUy*&APV0@L0G$TJw|&2R`8 zVvN_gxkk+m41OeRm_Nm#PVjKZd2Aw4kUE`#3lF2b*u3aw2@|K);mKOvfZ`YH8g+mu zuywNh)4n`fBAnI0d=)Gu!v%DRYPsoy$5)JGKm3%W`so=>8D$pzmOpyo8j%T^J3pzn z=eCqno*}ODSX}MgJh~s%vHK1$E*ue zmIY*}&2+0YW=iNBP+{n7FlFNYL_+J^5J$Xqa%;0*6G~&_H^n^k4+YW=(5OFRK~Xj+ zmcPAHtuIH0tE#q;Y)?Gy;w}1Um6@-Ol^N9ulj|_&UPtJJ&v-~p+h7V)L1B6*SFROEw4SD&DJOA#;IvJ@W*zZqiczwA;M`9$QY#onDe1WYa3Mc$`pQ8q{YY7h75Q^&;U|+b{x8GYoQdXX$ zA=!~pIx-j%4HO@@;kPfFhQA4>|K9$g{j>e!2bzv5vBUJ->LVXuySAkCL+LtS1V@a~ z)*Xzyvvu`Mcgj+r=Gl3fb#yL1++;F#C>3!twmz+pEI9%S%e^{rEba`F`Z)$4} z^yiGwg(gk&sR^yi)mE|y|^zu@8@@ppb9_ zx9y9Xe)M%6#`rzR0h8*v@fo)8>v~4lYUwbn3?0mPgFWUu*!(frR9sB=omlc2+=|qj z)ORgh-Jj)_rdpsbR=perYWkd}Bm8{jc%tyo>71k#^X2b-p} zAo={z@~PDUCAY=ph?d{<`9qnFj)K*Ff^y`2dR~3{r&(H<#%?$Tj;umVme)&cIAcNd z+uDznw&t=hV5bdn2B95&qIIm&x#jZ``w2gfp-%fYB!hJKShmUl#IB{|w6AYzPuSx> zWrE7C12Rnq)NS|s0cEHl%z3$sN-RQP3z3jKgKY{dlL* zpUQ2y<;9uK{$62*69RBTFYh%d*@J0{exRYEECFbZ$bH>2P)++~xBilM-e0$g9UccC zb=pN#^t&qT0W;M_II?nYqmSR%MnHQ|jR>uL!=PkB@B!B&o{@$av4TxiKrmmV&;{9V zqxhP^!w8-JdHNMG-{REm8TUfuZ72D6QPq@nT)lIlJv7)`>=O;-L8tc3XI+*C=1(wk zfAm5ULE}$PgS|qoHl-E()k_>0d^x=g-`+s%D^*asU2p+ptz`B66l1 ztlX}C?OX6{OLD`W$r|;z(Vo?Y=&r%%-FD+9_O|hU=LmPZKD2e&l;A*=!CjNn=xlH| zO;h^+Gs$75E;@0?qZ764GywDsEa!j%?q@yxCx=Hu#uX=v)pj;D5`7H(7u_Q#<715w z=lN_99g{X{7-Q=KMg;_uc*Pm9z-HL>7v>Y>=&XhoXr%NN0R{86`rOG?fBHnZ@x->tF;4MZWmy&9%kJcoce_hk3JLi&svCPV=&pl}wdh z8xodJ3bX#AM90YFcjCGQ16Q!iH&Icu0U%XiyZy0^ZGL+>67ptd|Lg!bJlZz6WK@g# z6rbwv^sJwn5pZEXedlqVxHVyHz$Sske5+f=nN2}5!u(mO|5-Qv($B0)ox5IJ!H3u! zBbRWR>~Ph#QSbK4k?<@?n+xNLKv*dWzXA`n|IlXeq{Ze4Xurt^VjORqoi1ZVf~h_1 zW0bazH{H?XMD~@o%BE&ZaDvN7GZ+Ei`+I3QbuKex%$L@ah1iCxP^Hm0QZ{e1oOsvD zWn+RVgT4*^S~=;GwLVNN$iYY-7CT5^K9n9cr}4ekyGIu<7Z?;?oJywYJ*tU*1Mm8* z#dMG#DqioLz{$%6fe)n@sHR{^c8REi0l2Dg6+=Nhv; z+OKHXu^47#=on(yB-xYTB+TMo%FC3Pe{j{ThjYdX57{}LsMV; zm2#wjo-ua4h*@~NlmcOzso?fPq0Q8$3`o9PM4Kv2Vj`ie?g35yUJprcC}U@@ouHfO zcTTW#I+%4CSR(M{0^hk80!CEE9%H&9*%RDQtkLUoIXIJ6*Z6dnm=i`WqZzbtH%cKW z9v)J4?XOo#qy0CWRl6^HSQ2j;;JanNNA$K6YcZ=R(2$oQHm}g?L#kA(S!C9Y+t-8i zw*YWHRb;>Pi`*GJ=BmxWr(4z9A@|bTA3V+v@i7-e(nq&XLYK($QSEK|Lp{e{3tNYipN%xLmD6* zcW*?LsSSe>b2Zn4=9EQ7)_c+;y=nq%(#48o1hn@B*ZfV_Wp6S%K!C%T{P9W96%KWS zuRh0?h^V5~OE~Dfph47~{LS6+>M@3@7&;tXiLDkJHiN|F_I$o3_7JN)?df}R);>jx zfUf&WivI4ST&wo!NfB1lh$*Y?u2d04DDM8$N~>N^E4cDQ#hEPe#8=Ts6H|UmaK7at zPgP?3A-(ibQNQO}(B&?_uZf>mpUbx4_V43zMQtL`{^6#bt_+XWD>n|Ga2V5k;0F>76Wz z6QjIBtZ;QNNjM!93t{)7LQCw6v|-Hk?cY3^)H;6qm!h4AmMOiQ8XNgUE7PH(B0gQW z4fA9JFG{mc7;1OX7rB{{D@Kv}#BBM-{pOD~^@y0g0?f!Ql)~uCZYhF$vpb86!HY!# zPggS1b6?r`5N6|23`M_OAcE?Y0UJ>fR46-t1CD|RrCHgdC{wiGr%M^nC^a)^ZYUez z%8E98e6oo)(Ult@#MevI69P@#z8D4O@3?=Iv)&8)^J_i2qWtkH=HPUc1J>!=UZdvxSJ5 zH-eg(73Y?y6MVqjO9S&r^tzMN7Kn>(0fizRn;{FxM9X7BD)gMfYQR)IechhzgZjDJ zDu0s&Sz2pPr8*S@XVA~Ah8JF4h{goAyq>YI9QFX~%hEu`uqT4XL;{QITYL&_1=ej9 z4?(kEGRoQ0&qiQDYu~cjx%l>n@9kadd%vsahak>uoMcvX{J|3%#i)#Mx*$2)CS2BR z>%NUYj!%E+XHoccmC66ZGAlgy6^bn!|7VNq5L3%H9 zrNxnaxjT`&p5?6yfrp&B9r}P!7W93rW~d+oK5bMxJ?gsNe?g&f#oEp1qz6d{xI8Ge ztpkyt+?K2P=DkBd;k=>L`(_*tOx~CMjE$Tc=sjo}E)EwY-R{J7DZcN*thFmQ!MWAb zaoqOX1=CTPL>db@emWG_gBNu?|GB;6;8UFH85T0;MP_6|6H+B_#Eo1~pSNA)9!8Z_ zwdOaC*7FjFO%(t5cOpNs0{*i0DDL#$g>pAm{gD#Yy3`Z@=;6vUycNLWG5w6<(Q-es z03GaFK`>hMuc)>(yx}+!ID$9W{hA(8kJ7BZYIaCX8EZKtzt9Umf#tDpzWLBKb_-2A z!q;uFj@nv>;fA=1XmvY?w!}-}*UGscKzWjw?l%g)dcy$T3p#v`n=VD9)0Nw9sSi(T zgfkS(;0_ca@!a-c!ez2@nnugn-i;exsTy{?t>6jFB0{=8>UFiEHtzmvH{0$|D2(K{ zk%_UAi}u0bO`w+qz-o2#ki54H!6ONjq#*K~_SPhAqFc`Td$V3Yhjm{rITDAlvU=O3 zq@bjgj*CE9CE}%bO&UrF|7-3-#cR$_x)mp(m@w4I#nK+u3bu63!){P{8m*fKWkc36 z^Ae@RHWwpifciZCYQlK5%BU^d#6_W5z2?4})~+tnv7%gh*n(4NewTfs-U6a}WR`?! zlb@PuV!H0H0x%{q+S)+wKc-6XE$_&knyuawOLkMRL@g?&jkqaSA6OcXqXxg>Lb(A} zoI5y071A*^#qr3iI0wu&_C>6zbH$KWBb5jjlbhe6zp>k;K4ccB@+QsSaM8+hy?^w2 zeQ6)Sr)?SiXRD+Anaug(PyZ6(!rfGEO*|dZM1pDb`aAgI?F9nUT+uvufLZ}}R zF`^d-F%-Ut6Sp2XlQUw1C)Hxjr5<9mVAEIjGGpDSj9{=H#vjRRrD8}d?%leQEp01? zrf=GxO4?mk=;uRrm!HvvZCD@H{H!80nH#2h(4n*Ylz>)}x)?*R5Y(7rU|%>c*o{1W z6PxhZkmr}NVBow*bQDPj@CPc}8}#a74z+P<)$8TseN}uwK{*L3-vuZUfxnnjV`+Z; z@){tvlP0Pduen2(i(;(;lS&I4lx9am6Mmi@oH}#Gl)gfm#n`22@s;A45-edeqJ)7I zVU{@;$6R)0fPmulPNL zva3R&7|YtQv0ERcjQpEv=$0?|yUL5X;wLLmm^clhbgHRXAb7894E((6b&`kQKB|%J zleQ=0a_n(+%)qnZP#dc&tmaOaoVA>sv#w-81$a5v@H+LN)AHCNvS;me=&3w&vqit+m&dDS@?m@FTG^a zvHvIj!~MaN%TlBLCAjMHJ>>~8NweZ;OcOPHB0X8Abq-a_7c~$SPf*cArSP`Sz!L!D zxNBrAXEbgp+WHTm9$kT@BzRj>Yoehr=(DEIP9I8wW;zO1m+J6GLe+Lg^o>X*x_&`` zlCJS}ajHJQ;dP0pX4aO%^SE>9Qh6Fq-)~FrEZ?q~&dT;57e|grPiG^nbzQl}qvq;r z3;jV^H*~2c-gY%Ew(~~8yP_HULO|FzztAxDg0?39tDEhNwHAl zx!Av@GdEF7c8ycN6S+-gx0UUAf?P?@Qe|ivkG!4g+J8e8wdTz$tO7Lape003?No*D zJ>uE5u?K3`#>6>@wmciG$U_KeeJ|7R%zloN`>{Y{eD%!Vp0@W!fNfWQwbe1d(K*T^ z{SOK;Oo&@?ryO_oOh>(;bl&8zTmOW`dglfgNb&+)#2I~eyDv3P+F;taZ6CU2=fYhi zf9KTsANPnaJifwKuOFV-%nGjSI@Mf?krlHKzBuzowxrtRlPA`-x+u5|+b}@(iR#SE zXXw2yyee}v^V|vl-(bfZT#5rho& zJKkqZF)f{v1_)G?H{O8Z?~L=i!fnQ>s3~PX!C}txK)YY+vD4yv~Vo^`=8Jy$wUzdW41{JB~7i%8L|&E95X_vEbQ5#qMEm(Qu- z-7y*H4)Z>Cogl+n&BRfoU};VPlvS6Bd$pFMR5(;N_MSs; z>M0Q`lIEQgOp68MZWs@Vtqyj+h;zx#RHN4xgIiYlqWR~p%DvCe4jvlR0ut9=_Z%=B zNuGxNKF1&x>SvtQaNF{gYqj?xYs6Iy#akxLz&mE`q_NV0UEW>|os#qc(QSu=gvuz1 z9(k&}XT}AA8zX~?eJ+{CA!5I`&KBh9zAQ*KZx`K&kUU(uVm-u9NPF-d zvev^sd$gjs9LpWHcUV_uBI82C4Kwtg;!_0DsQTHIVBrj&pCTrQ$3?Q|9u_t9wJjp1 z;dR}f7?7;`5ugURAiI-p#Wf7on_5dNw?I=V;7JQMb6=1puFe8VPQ3_MdrhujPY0Ye z(q&-e*~v)-*m+H4X(Ah*uEGEWisIHlW*36T{9LE5y-7@Tc+Fd!wmcV(pe-;J9Iqpi zKSux2DiEKNSl?D-3xj5zzv_N!=G}^+0p8l!o2`O~C&pV3e);duGQ!pFGgQN{tpy_S zxsGlxbsASp6J28EiSw!O_i+m|9`wv5xwhQ~;?C|oHluU)G?);f`vc)ru4Eewjb$|r zf7I-795o?%u%=ZK4*81=btJ^RccaXWz&R;7k?yaq%2o}n-aY2tw&$nk6P}mNqZmfb z@Bf9Ay6gMrm^Ng}W(ONm!KA&2A#DWa&ytMIF;529<1yN% zrXhtj*ZR>4Ajhi%lc9M(){#x-PSR zyJoKhQ1f#}wD0@ek2fgOt^Gxe#1)~PVu+olbe(rAMt_P-d-eZpe@F~kYD#X}yQzp- zbNmkrzz1`f2AXuoei@;>pPm=n*IOKdbzCnJpYw@6Ir=5=u~`-gP)W1?3G-9-ZVr6` z5Kk#E%(?zEo~_3-4$-N{u^D~|taa^;Xx=wPQ|i{rpImX0l(5A#^$+oG)&{><#c`;b zKJbR0hS>Ut$f-x|cm0gu{{8hn5#UD^tIqSXe8otU$V#&~9Ty9wB8E*XSH)v?HY?`l zHhNn+WHlBN(#;nV5=z@5H%`CczXe;!SLBF0BAr4bQXGz<{RMjk!UirH>=xoyoBKQ!5 zb6ns>f-*O}n!WA-2}a;Tv{Xf5A(WQl7Ozsr9JLAEG}w%UlZL~#aA+yK^nkl|07g z%LysDHF(cys76#}-AUeTK2jyr)Y(fS`6l12``sJ9MB)>Hxcko7e5n(!$ciAGilP>e z^pU?g28!l9bX?SajUC;F$Bd;jS>{Oii(B(_EcDLP|`=>=%LrKtbo z!+zV3{Hc-nXGq0cBY0E`{j=>L?1U_iX&sJ(0)wJsz1Uv)C%R-%FKYxM?{E}NzLZyq z3pP9Wjl@d7raYyfCv9cC1?Z(QiTe>a7?_wqc+l9=4siDGPr_#H4|yWn|#M$H6tm#EOV?K5$R*Kr$J0g3Latcz$GRI z#PKkrSM`a1C=#bYzC^N|RvwNyt}{84H17i+lhrDDpCZO0YU|cJHJ_|4fL5&LqZH*A zPXpUs-LIH~L`p<2#b{<;4CmdpNMnIX^G@@r>MVk5cqDGL>(!BXU2<>yc6V_J>x zmEGdwGV9pJwDO<+();Smef>@!V++cJ_&Km@^2PG$!=Ut*>IoRQedCP_2>Ka$W!^EX zHWBwcKR>&BO*5@%PzKpAPgmfn1Im8!&jJ2k@%b+Wae|15IYuyb!iz*zA&VH+M>ZXq`kn`{bgJGmU1oEg2tG? znHfuO)tbM{PdWmN!R2v~u1a;%T|frT?)j42s0=JoJRR@Sz&vQ9&}|jimPSZB>42T~ zJ_6>V#I@YD9MD+8`Ab15(=N<;x1n}GMnCP}QC9o&d#L6rUjnZT& z)eeUN_>#o}TUEEL(LkrjND`x8qAWo`_e`+dBlZ{RrOesg-ZnYzZ7Wsx@QW`Y&4?A* zX}!H-2=QJHT=u2{#MD`cEr{73Nf#fPRA|6{{zm;J|60?F!}*^4Gx=5NCkZ=$%*-Kh zQ7}jYe*Q`!<5m1QPg$Ifs!4kML+I9riWmdV$EaN-cQh)i1_e4;N=6p_FJ-Uf)f$9Hn7( zg7Tb5>Kk#qC^7`Il;MT}rhA4rRPbQ~Rmd>6hF1V+4p$w-Xm$Wcp(M0vSB#UXR;BDa z(np|1MjkWA^(Il!^!j`*s^*-|T%(Uk0GNhqYT$EB2w=c@4f_WYVL-3zjO)}lv+lA? zM>$4`?rk}Okv7V$g{6{9_!{Pl-iS31Qvas~!?wMptC=4&3a>33VSQgvW@Wx@<~GwT`q>oN124rqet< zUU_YjUYv>8(Qc?-U#zI58?JtP4@)9Q`NsnWf})99rGX%2CMy0B%ivpzh(vet8ans{ z9_-PL$ej;hlWU@-5ag1LDV3#;*GbMAK3`+=x7-{n$}oY-e`yO<|9H|~xXb@q+QeO% z_EcgLCZ~pLiIU*oe=w)1o#vUxMutbGaa-#}pH?fd{bhI&;YA3pQA0X)pYFt7s??64 zTfmdNvt$NGk#{V-9KQO_rr5LU)GkU&N|qB{k%M9p&mLZ1u%a4`GsKq`Q@Z&oe$vv4 z+li?atMkn2pl;i#Y%|^DN5md+nrBjWh`U?Qh^MhnA|&_e&9mj$+~@WrFWr(}Cf15c zNuFH$jakHP3&p&4ZBX(zSqJ7e?E${^6-S9llic6%05|MA=ZO&P!EaUSM*>^i^Rvz0 zzE`{zGI@=+DU?*39Aq+V5qAeH^NLVK{*~t)40#Gdry&WFp2&smQde;(yxS2Y>hrbA z`%xXP5;ACOOgTezIknW%}#bHz>ue+v%>*EpgegPxUSq*x`l(xh>;%Qu8n`6qi%lu12yY zpFDRd&uc%9_j749H-PIG-&u$iSw$Raveq|boq+weU;*r0W)l5Fg-06IaTiT4=LR+v zOi<*3sybLNvW7rJFC-qjd}?fr?Z}?R{YoqA+Ff{%hC#-z&nJbG@~H7yEkZA>zvwZ| zgz-ejP7Ae4eeW(bV|~T++oO!K%}DBKYgw1&44m4E)6R@dSZIBK0ViAgiquEkmQZS#WPI97Qb`8gU*HfS)-2_ za7H_U1qu=YbBvcUO(j0#up`xhXC zcd51y4BkWOCztUc>hE)P;qzk|Eh@l8Z}cvPvNFdiN^Q_# z;m$I~Qeu}v{#VatkoBNDkKhlB{f$Q<+uu)Yz1j`WE5&vVsBULLKM24%z`J04vGja9 zFML&#<6MxdyGUzR>p4=;azT_POU>Mb?@ulECuI$~N?7_mJysT+sW8Bl8C9g3Novhm z27WUWld^G+$!`y$@`j82!DgXQkQ0){r%0Tz0o?J+{LqaPy%}|J5)u7CeVq*`Q0ayf zo!ooYX$uHNj$43s%-U!>v4R!Bzs(1dW_iVMuwoLtW@8515)WY&AZ+XuPQ=(4g-UVA z0Rjn_6(?Eq4Zx(31%&kB_(R_N&mqDm`&w;x#e(6dod`(Fj}YNsD+T)*hlsUA@{dmD zzy~;HnNyAn>L_3otetNVpkkH6t5OG<0vnA(hEu`auy7-Y=Y@M}w~Tn7Tz=k)&B^)G zbfo$5=Mai-eib-F3Y~FdraZ{vhGhjgp<3VCbhK4zvgOtsd#XgDo;>j`)F)a)0bk2D zNgJx`deDx~YIc+Pj*hK3MI6EhO9rQgntre{DoG%>NWfAxM#SEnr4GD zmP&y`={p**HTpAp8`u>+R{CeWqOEcZOS!O!z^O}f*SmA$va&8F9OJ{q8IgoAqKIOO zu7(^j1qD>1b!-3^8xpcmWx0r*lE@!?&|LVARWdrXACdg!y1Y84ZuZRNY7o8f^zMs~ zl>L91f&Q6&z281kLGmc~=Xyyar)qK-q{A5*Wr(h%SX2G)>`--UtJZ#Y@pv?zaDP+~ zsm><4DaGL&x4%9y)~$GtYHvcQVOra{@c2()|I0SB{ntm(=;7g{LywYzKB*(lNx0H; z$Q}K&vk@b)(+GF9gp&wT-TaabaRvx0qX;{dcSyk!G}N6SmkzaZh)>OcQLz!vtKFxX z*fqpWWg`^4`(=LXL*8uyMMK9u-sJIK1gt*c(nUpJIB-JQkZZk+9?>#%rY-Q9GHB4@ z`M$i_cd?@M58FMmh}yw$YA#|S*f zYHs4t^vV=Atr(*#YW-B{urzYyB8;wd3Qox-BI=d{APoFFpL|dl?<|U9KygGyR?+ zgkt7YAs0z(Gdk@7V@GQ9Rdqd5XwW*}Ej?SkS?kA#BG7Z)#2cXpT29qE5dHTW0=hk} z4daITHHcAI%l6cvC#dn8f0Q} zKC_-c#0uk<35Me6#Wg(_I%u!CRT-+5tu1E`-pKCdTOHOVoCH!t`E}r^Oum-Kx2NfJ zMyF)dS!*QMv=Qa(!V3BuUP^6C*q&NZL`440#pNtfY>(r(D(yRt{t_z(q(a-DCUsLLT)w7_rQ zz1LWp9n~5&`wVMQ!z(ju4iMln_G$?^E_!R{nj+b)e2~(*4iKKk8`6)l#?qI|_kz@# zcThv%i#{<$kCiP5%5{oC#Pc8vi1Gm}9GB*>Mp z;U3&TUEd}6WtXhBMBDS$JqD(}=6&a|4~T@4<@^FDr8$PM#KYWwH9(^Vnew(5u|bmC(% zO#=y>RBXp^ir6N9-mDokv|YH#GgprHApTSX7Fd<{X-D7!V{hy+6ZODLeT`pS+xBK= zKkE6Igj*(vQYssD?b`SfNlpB)>L~d;9P-D^J|?G)Lu^-l5zNiS*f!T%jzY6uOK){P z-IR@(x%R09AOtJt1XL3QMy8UG)^7OQWuy_+1;4T8#XNC7U;_m-hE$zFb^~Vbc2$Sp zwmbwyDQ$X?j|2%)*4AZu=^CkV8f{EW`)|Liyd0!C=qmzx+!RTrKvlIbeuubhD>;e8 zdI>FTl2|602wO@X$DN3K8UPs^Pd2NEvBRq2I%?Jh7hpmknBo0n>wPhoPDr49_;(j( z+6kb5Oh)RvKvCC70Gm}p+I#-J9eY&AzW-&voXd%>59*WekoMA$^<)B8%^kjjtQB59 zVdvUGB~O72-e)UI<(0ot)jplLE-0+kbXBxMP&j%FFwDUZ$K3yiu(x2UgIl&m7akzE zgy8NP+#$HTyL)hV2p&AR26t!S76|UPfQ7re>m}db_trl9RK52PX4UNOG3MwoyOXt< zdkXZz7_?nDwhS|3!#aOCef!}KQNWcB65CvyVg2SV;4s+w6zXZA+QMDwN9k;)UM~Hx zUb4HA*1Mb(YGyzqA~4WDxn!=gbJTf3aEH+q6Z2k4Vq;`nqW>3Z;``gx-xEIgd~C7h zuP1lV$CJ#~HQD`@Qrsh%VX@ujsO%m_aw)qP?xeYqO1OJCxvmf7(0uTrOLku@`ye5a zO`l?$gN*}~(rm)3$3FS%%ccU#^0g!H1~-$KNd^+^;?ZXVu#wg{|+EKEI zU+iQd&7g=x!M6G(vZnn_pMLn8eajxVo|cK#`O10Ygo=V?V4|Oz8NbXRNsg9R_C)N` z@Vrw-)vNz;2VAC^S`x79u)j5_oM#Up$H`6hSqb&yhe%`S8Lq&I&sVADF8y4G`}-w1 zi%Jqosw$RaKLdrv9OiYe5HPkV8wWCkK%9d_GItE%V;Xt(xssYcTX+Gr{?Nd*r{zx3LweY_8EUE1{pm@;0gReO;l;q66~o60et*Yq!&Ge{pEe` zp7^7krC}3=4B2WpS`+=9bRfkDzUydA`!*}j5;HVR5TtORg&aieLJ@sZbYdhYR`9B% zjMnZMQlzY$%227(?8p!_2L)37fg|g;rq_FIJlig?c1=-wZNU(_91ZTn;XPHiZbton z()MZL29vPz)@IxsW|-YwQJBXB^&5mv0jJ_XH+2N5^X(w4PutMlq#&QI9qST(4xM4F z@}<78HMOb&&J8MVzN02m2j{ZeUVYKxYt5GXM(xYPdTocGZkfXm;lNxWMPh1H%nt~E z5B!u131NE{{AnkRuRy2znG&EpsfMc9tuUD`&C#_DFUi6C8_7ZE+cmf&U3Cv52dd{vQfLIMGj4{$&rs42}QC5pmum8IyA z=Bg!b?ry5!{nq@zajXb_yQNRNWcqRjk@$;;$emMUnvelK0h-HfVYTDz{9#^#M@ZMec#yQOrkw^>!Uagt7>!0p_6{Gq2k_q*M9l7H@)&l|yI`66dkGj)B1`4p)^Gip+(>`Rqw%O@z~V(k>2x^KW8YSx=m zW%QM)c(kLlMUh~$aojE6`K}us4sC$p05pX8Z|I}?TKU_@*k#2!Q_d@Lk zC(dRcN?nmBVkdTbd*`xP)*B09DvnJQlnNP2_;j49jAgL+E+VO~Jp?77s#nOPO#qgi z4SW|_osAV{^kIpf%XBoTFTHKNQz}>MdQKM_`qNF2ZVy9>HSaFP)Il9%$y3y;3{9i+9K{GU0#>cMT|RTglNp)XfIkA^{QHR-nK?{*U-ZoL z_0-1@rBRmZu2${4I(HEat4YgRvX{`nO4n{_`!*2Y!kKb|qjVu;F|tb5 z6yNjsoj?Z%qKO@Io%-$*3OdxfgpJ|b$D0gY?ZGp1-wvO{>NH%+>Uchf`02MI(JC5c zHRTW;(OL_(Ql<9WkIlXD2>1@&{vq3MyzJ|a7mq(aChLEpkvx^73BZDhve82l7~5kR z|0wpmd`e7d{xJqt5F#Oy9W80qNaRFxQ$vz%*bO5f2U?(!kYu8CgyRq17r@S`NmQJ* zi{@)Wteh>K56r)6n?zs?J4olt%6C2V9gO(79nJoc-{eZu)(uHn^9%c_DQDI%HYMeS zM%)tI<##*}pJxN4JwI0?>oA>_&TBGjIWS6NF9i{Ge6Qdw7q zZudW|BC)-?YRUOc=Kq=Nu56mG{^pI@YI4 zYae5sJ!lAh>BbaPelY4q2NXcI9}+X>GSo19Mf|?oMlOSr7g}zH3cVK85-EfoH@Fr^TvW(9MmVL5b!?`P z0pf?9Waf3*Apsf=eXWuT5l6CQR64-IrQ6I@33-`L=HopNaVK+ zQ$cxzH7*Crv42y!=Gap-@}3Sv74BqL$VetUmOt-njKiGbr-&n5F|ZtCk!H;b&I2{> z7huJ>B*Y}5>fEKB4h*JbIvY*epOpWq8bZ*i+D>Pd6WD1L7&TU4J47x2Flf}|_e{!p zxsi+{ z*j+%S__pD=Sdvug(Ue!0VW=99m^J{s*f`L6k)@zrHEoYKbs$0XoXtL}x$U5yy!H)ZU$@pAJ}=lAx12%k3Q=PAd&U?K*k!) z*P^TOhwv4Txu#|g-6&iLYr_Z4sa7}p`ZT*rx52XDaJJaiFbqF9=pJj>H@gSDlv?#) zL~Rl-AVU#IMmS6j2(Z%EK|%8ECIQE^s)j6|(>KSGysLPwAV!`~O`47$SXrW-Z^B*O zil68xyQ|+cr$A<|S;2nrals~B8d(@yKKK5QEG&IKp}S&t`ETa>raPO~=OSPU6LvzE zwe4uwrC;wA8G=WRZL${Oyzk*8gUpL1V!Uok3yTj(MS-&RH-qa7x`7gB(ZdQ!t+XcG z$Vn!&D*1PaYotSu{`hz-2w&J;`uATFrHl7}<(;uH>jda9_-Mx{tHXy$;T*g!l=fR@ z*oy8{N>7zJvBt@A##`t5Y_H_s!OhF*0$HpbG@p3ViJo58Q)AJeU-D;lC-IKmlV55YB%6+mA$*5n@Nkan37=*6 zqEBjjv+rO@180zB%yvkb>DG^kAx~&MQZ;IxaUhNNLYs8pZQ}oo#5TU(w$ZEO*UE}u z7zPb;Hei-cK4Q%$jk*W0VM z(=~6Tu8S!U501di@(6*gjrevYK~FM5{43h_t|1P{|B=BP!}kiFBZ9OptL{FTEYrB# zRV#|RZ0X^quUW$6=yiq}q8+AzX7pLJAWXsm%C_Nn+ih#H((U@eepcJ)-~2TT(&nrU zW)Pnp2f?)bIm0K%Lmt+!vD0Q&h-&g<_xl#cykof;XW9sRU;uEpo))~xFXZA1yHKgG z#V%?-({K_UNt<&s9zoo)x~3UrCb1x(s5b!kSbcsLOxr?Z9Ku*mZ2$wtcSo!$LyFx! z4wT>kr$=Yq60N;G_!ZxWsBk(x^= zr7zZO!WOinK>gVP)6?tb(yQy;U?1zvBskr9GQm6PYg+j$#_oJEGwHmQ5!>n1lOYa) zh8R7jKr5P+0p>Y+W=YwYPNY^<}5r^q^KMSZDN<<-g4eJ6j@Gf0&64X~_G%fB=)=(_i2Z@JbB4G?ge6SBZ z=Nr&A_O7=WgYNrOS*r*&S)yGXe;?)iS!LE;rQ^?q=~(;++wiq8#w>+PNeVOBQ~;-G z##KeF&9|ubCRbI#``_T^Di+?`pBfJ@JyJaD>d?R5X&E`mYh~RlpiflM+c>-X2kgaX zAn)nM8(NK9?r(aCL+Z~3#O(h272;T8suBTsxs~1uM376!?d(+#vKn>rdPFgFFbe{g z&z!7;q5ouS%^OXP`gV5&3F5GaZNo8 zO<*q1kL`fnHGnnr#MIVKMU&4;6P7@>ZXf`dqnpCa{y#B=n3@?2267=gQX203%P0Of zHgHQZIP`qL_E$o>bieIfBn8k`KbT-u*J9+;XuT-IYPzb+nQq5ws6>B2v6Z)>cawxE zyjOn->IjF3)IEYIBSszGgQqgft*M%7XcR#C5ac%Ei*f?8zCByo9>RCdhjLi`QjAcr z^8=GWV{Jivv+~E`9Y-c!12DN|p#~5|Ct6*v3KORb%Q%uy_(LA1z3z{`*$~U)U~CPH zaGU0m{#$2xg@r?3YXLbg`6iv z=NC+p)#Z8fVBg>XH}nX{jxsXt47o3LC%qkRnkyd@eCEL-A}6atzp9RqqLGk)lVBoW z=xFO4KXo;Gp0T%YBA$iBliSWY?(qzA@M+DJklZUxaqd-?2q8s&oaKv!#^f^Mt8<#W z6td!qJeGhKTh_o^-_+kiu7s&^joq*tM~|ev9u$UK(AIxKKQ||9CDsm~qISw~40SOI zBe==yiuU_H@vpeZp{enHA^TZ=Xs2>b$SIA?xA?xbN|kVlmJ9=4%z~-|P627{vTdGM zI;7&RWXe-TfYJ(}R<}lLR_`@1m>{aC6Aa}HJb+r$R+!Q3Ap?%I-$apU8@)0O~hYi6}dL!fic!mFqZAiZt3x0|;;_As%dt?B=@y=J?tAz(3zGSNPH{dvfx2X-L3iaIE0y?pV47x8c!;l^=A` zuE}K2_Zv6h<8TT)4&@kd_Np*xO5(iGcg>HP8PbsSST~6KvglIJOa4z&ehj;s6O29s z&Y8iv`~iVg3z4Dfg#v2e4}7_J?xYrtLmY6ZGljZ37mq z0bsbnkdMV^{}xjIUG>i^s8PFb>q(zF>u2g?_=m;o%6?*FTH47WIePEW!0E`@O`8+? zH4rqI##eS+Av+;bGN3FUpAt}}r|hplGh-np@c8iB*r)(j(T=$hW`8A$vW+;mnu>^M z-7U-3RWAKN*Z6aMX}{Wfmie6HNdD)SeppY?p!6t3&LOSRuqSCj(oY#baP++@ozu4& zU>HMINmDSg7qTJ_fCVDK2v!J6j3tVAAHy(!T`CCLAYVj}<9S~pRkLRGH8blWxmsXH z>Ge^=Xfe#+JqzkMG!!cUc3D9?ndQAT36pH0e`1{#NDqPwAT7Q6#88^257zqpb{jPz^CS|Jzb#v49zK298j*A)RjQ9kE z1)xdUvqrUEZ--2OrqPTpRhf_t8cYv3La@OHLa@Rw`BeP>d5fr+_r34Q_!4%*y}n1- zWgXJU)@b~T1Vx^tm;$q+w? zB&Qlw+g-9gBpZzrmGCnQbhYbo{CkwmDZy)H0moPpA@C14};cYR9x zMmS9Hy+5iwl6^p>;Kg*1#5mxSzt$m*cUp+XL?(2(teaJIY2cH8;nazX59Olap!@toiTO3Bb)xFI((M{NbD1Za0{O3`fpE5E`hmL9NAA0+Wiph1F$(57&6l5p$76xby zgjYRcIb^}Tvtpm$5Vi_hWi^KXU0>JX|6VDyR1xSqYm5LUA`&ru(iY~FPGcjB4p-YB zd4VH(JU`p|F@3g?C0^J{*|C#JjF`riBSN4m%hBtk7V+_=nvC7Ww3a9U1m&y(v{D$J zx8?92v59Ho{q|U^=hPIx-C`wf~Brn95yC)%(gf#jkl$7*0H-$nCx3%iee_2p&jvS!*ZurO z$j7GlCRIknWO>tPXA3CdbIU(Y1z+<;z(3TFh;SOX>&+Ll`P&GFo7kGX*jn4WdA+@D zVY_yU1-|f@b~Ssyeam#X`1f0VVFDB9c;R4O^$c0zb?`|fS<~xbpMdeqZc*l7CI8pQ z`{YIZYgU{`29bifto23{`By;+c)(p@b#vwtOWSD%*VEAjB%Km#Lg7enRE&01AkH67;| z?-d8cjPLVLb}SHK;T$fDDdg_!(Up|lg{5oWWKZSdBL^+Qd#yB6z@Ozo-fDU=MaRx$ zM7q~^mD~uFm1+j9YD%Gt&(AkaGDl{8m?t@eAXwqs);z9(L zIi)s#=l*e@A(@kG@L9tm1N@D55rReo*40e6a)PoI#hZatVQQsoxzzn$0G5QeZM&#g z`hKl+r>Vi?#S5_5qWfFN+s6B~lA<csLl|-sWeeep+^G6*@bP? zJmzqSJy6A(#Ez_zY2UOJOm*VE`ini+p1qARRG?79Nn%Ecksh+ToxV%QCb!WO&qyiI zZD>NG@#M)^$)ymqs%1z(Bi~{qUymbz)$AH+|Z25)f^)>NfGKy&e;P7agU4EnZY zWckRX+gZ4DI~Q6C;HVg_Q5dtFhYBKz2*gY(c+2>{wBX`rGPEZcnMCrR^*7X4v?MP< zH%U(VeZjuT^wIKvd{%cG=9i*kOC?G^K+^L(!uUOJko~)W7gcpjf(TdPf~LKmN=C72 z%zgcP0IHv%i?qle$-K1BsIXxiW-N#9O@qsmr6_BOw>p^GY7#^o(Q&V068?n0609pN z){$-q!hzc-e7tS_E(s`!>`&-}-lc9J5%N$T6>4+iX!h8br?<(s$vZun1OCO-TzjiR zk~+JoGsg`7;WgQZ?@$Zbb*e@^|Koz+@4FiV^Mm0Ne1dE>JTGBup!AnXZP~f$WV)C{ zSyW3IOb7I83o+NZIm!wX_hOBr3|ugvbiBC(Tu<8=R9CjGTwY&mL_ul$%ZdLR!TXZ# zpnIi}91N{9?}}c57lW>)YMAjm*LAhZvg zBUc%o-Kb|($Um#8^N2L3ySzJ?YVVt!^S6+4{+%q$W-!~em6siPDdCZZuTf6y=e;ue z9RYaZvDfC@|Vj230BIk0^Ii$0S}6VrOhxiGzNk;2Za- zVLwdyhHUnpzJ2Q^~czZglg4}990&2syuO{`muD{~0+*@35i>XvV?~_mMUn}P> z&hi7_A0erT`gRP?*v;}P4<5WjWL-E367phYHM0L7sLcO9VmxJwCD)=hu92Q_`r$Z^ z;z%1cNSfwG%eqK^LJfu!vNR~u!+n0@lKi`4>ke(HVP;DECb=Agx z_ILG8 zrEfmd;xf@Cwjzf7luu|xvA`C0-6Pggv)e!;u`e8xCwp2&@h!i-?ZDXsw%@MM!9bg^ z%GHKd+Ld=Mo|y)$pfL~Ye7=-Ac-?n@S58&)YW}m){LdNJX|UcCo2D?0kd+5$n;WEi zYpE}W8XNNwqBx42Q1xde?YJm(TMoHE=7IKI@+kHtIew-L=~sHGlF?R>ix|6gp8JDB z(n;;>w{p95BQ@f^!J?z^_um~6(l<&>Zwr15$H~0@)`;r-VUc`F_Ku`WQ{$BqlrNG~ zCSy=dGnzv88Lv3BkqzH0`@}_&Af;rCHJGe9|2`hm+_C($F%yCu6s#u2k(@z zGr|U~s9jC8x8qjX{1akfQ)inf_M(~F^~V88MsCY1Y=(v-zXt%2O-iT6wA9trb@77~ zmBgY5Oh&wRfKS9zFu|fibr+!rX#hzo@^L`fyUMUG(z%HSP-?PWAwo82M)A6%Hg4-j z7N8NeC7Z1mSiXqXc)C&PGlQi11Al3gWh4f*Mk@Crh0L8hU3MP`Z74eY+oE?u;&%*vj#Z0|L_~`Ds zFEtIdyQW0mxC+T9a>z~gyK*z7#cTNb;{nU;75hO}Wxr$x-LvB^d=#+2l``9gUESI@ zX$|A*rDHtwdepm`xbrUNh+!KXRV4S-PRr6NTnGHsAne%2=spm0aAiwOvt-`qq8XppuC$lolO6?|z-Hc#hL6I*dG`?H}JOLi5( zSV1zUbB>U!+6SsrbTVT-ykwch8Bk2w(lM?N@ms_jV!9dy^2WNs6SJ4D*3X zCw?w6hiwV)>9pg-tLUy1d~jT22X<>Mc4fdaab1y*50H+aOtP#m z1dhk>c_rC-nRYPPXPR-^Y1uHark^)6ayL_}_uH8}6Ep+HjF3%QSa^N@2qzPbXont= zHS19iKk=9i5P4_vGq*0u13%wJYUo+NJ=GI|No0~2mv`iVr@t6D{5OaZTwd&HPl2_Clt2Qpa5~_Gj@hjV$7}L0Lt#?61Qrls3xqu{ec>mX4e4D($e45q zXim1xx0xAUhEggNN24rOz*?IlRhGTJp-)D;SL1D3+D zhOPasGmKA411(%*4Uh}_SQH+YEFT$OMWb$uQ>`K#N*llxAHFy$J#D@j>w9*}00ela z0z+YsYVgIGu@u%d2fTuCtf~~uev7cnk>75F5m+Z}yB+-@^Hyt_che9*sU&(yuY4Wu zaKQrawEi98onk@TM0Out!ed0feJPgy;@D1^# zbnO(XEKRuR?I&03{w_VDiS2G!A`-2q>?GB~mxrjyR2DNu9O|h2x;`S@Jddh51AXm& zzO|ikvk)1OmJO40+O2H{>mH_Vp1E3rBuHk4% z2%ew&=MQUDSG~L<>M@nzpPetj~G%Z$EZwPPXeGPSfXYok*_$fPbnBLBdn_XI5(ymF!7@B38h#yLfw~>D_gVXLbA$X? zm}OYY7F!&E2?&Qo)ty?#%Ii+(-sD3e^IW^Ydry>bMm5HxrOiAbS0;sS4j+-gm>|U^s!-{x9Pgpbdzzy5{ZNJ7<^6#umiz zX0v0vxYXuV!Np<7no(0$YEu^ymAqM^Ukhw;mLk_iIq%NN&KSb(qge))MC zNY`~uI8u;dDH?wwga3pj_K8*+Rc!7??~O%n^^|3tpj1r%KF4!ZaLdd-ecG?+=qsn+ z!KM`KANJ`j+(5G~y~4_?vH_#|fW@s~^GKhA)%m1zCPzCW%D|=+l+NtrDA>LEL_x}1 zGU9?tzRp?baFdm*R}xX%vn0A7vNkZVx5w+;bM;@pHwf0ZTdeT(tc=L;5sw^pySM zx?^ccfz%#W9z7lVtb0z4i)3|3y6Ssn7$&rh%J6N>?%VbJSnl-C_+|wS2uNxf?z{VaO*gIfv%T5^IH}5U-#O ztqMWD;S4kA&K`VZp6z;Vp|c%0hDLY)``%GH^aDQ2oQK~}$F;q`9+I(G$_1tHA=Oqj7gF>Q?V|e{cx*q;?f%#ER z>%s^RfXj;5#QJqt6H8V@n&X7JO~1!^E*zeEK>misS&lJ{a~2i9eyP`n+#bQXFC2e% zezz0)7)sye;Ga$W-&p|NBAoggcjlQTx0;amo6HjFfX&a_RCu1;GPHr3fl9lqjIsM< z31#AhOWMhvt@0SCD;S%CK?ri)n?`)xJ8J2D0tJ*w4Ih!mWasZj0 zhJ`EA*lkpXzgczby=%9nPtE7ZMI{-I21s-m;r@$R!duu#56@9?&KH#R4oeXeHSe=7 z12c))v8E?rRg=NnWBqN6jG#TM1@`g6)vE14mSbzj)=di7+aR$F5iHXXg^uOsYoE+iYao{HKLJkq`B4k z4yk03w^FMDa?rdJ&Eze?&kNqAboFe&W?~!ax%sjuF5cATaT2_m_fVT3Ps_WT4l{Kg z;reVDoBz|R<@xt!G1Q3!PDqycJtcl7(h`1}B;2AKzKa|LS)v)xcJC($NA+8*hRoQ< z`f-5dY&0cz9hmn##k6rCPWu9Mb{}tZ1Tfdk7Tb}s8 z=40W&(!Us}s>&0Pve;-ukfcvqu`zAd3QDHK!Bj!R;!?dUp#NXt_abe;mV<8@yYhGT%!PkCS3 zh=UiSP1KiE$84Pm`pj_O3&AqqrMO;m0uj16ZC=U(3a7nt(XI0j%H0>62JKQk=(zh0>0aAxw;LjMDYS8xtGEBb&%I~; zaa6Xwgf2bldZD-YRiixg1RJ3qSrN-b4+r>s2fyFS7b%_F%9XiAw_(e%NfW0Dan;wZ z!Senif~+$PYy^&aVrj~pmn@s?S3B|iy_kn*jg_OWv9_?imPiyS=yrAmdpjg=&Uev; zS8k#>pfTG%d4pg)|3u{aS>kCL`sV!H3@1l-bp*X~0IJ%Tb4+LbC^VKzt$kSC6DL%q;a4r;A4w`%VLgI4V`h-{j$Mq(TI@lNX(>Evq@-+$kl z(aq8s_9bWgkN*=xt&fRJPIhMO1s0i9TnWaIrsY(D8y!RsTSpJr{pLkF(fisnIA0(d zUCyjZjIv{+M|@y&`~5NM*+6>|=Q?2ZhhM(p-Ev0~!9dg`&ek0L$7}$QDhYmVMkg?k z0Q{0gD@1@qOO)SF{W5=5q|lK{<-KvA$JZ7#_L@_{YUwb?ytvNxK<}kBg5Y({?g5no z9Cl{Rt-NWBm5O_LTI;l6Lg9j30}SNC>fNr?87ejOoIeyGJd50Xe41DwP~c&b{a>9U zXqf*n_qBoGZ6)-QcIJ+g4Ai8lEPqNkcu}yI(R*u{i>o=_NHPdJ5L^HFE~TGlmYvrz zC+GRadW5bQxu)OlRCol%L(bsld1eK_rgVTcRx^!9APB&jzpG1wrMMTzILyf@Ssn#>lh*Fc9PtUM9 zQ6;{m_o*=1B3no$U%qM=A{}bpvy)57)3E%`KOt9UNzMa4GwU)l?F7mc_syFNMU(m0@tbf)d z8jxscB@I3>J73UiT0A{zt7OWl|601JJlEdwTGv%O->&*-1sW4$b*XoJ%jG!<=M@H zQEy@fq!aP%ZW)epl65Qra0+xR8|v;vAn)bf!P23FSbb!@VIG_8GbV-3)As)4Xt0xB zT<>#l^B=9z@0>a99(m`rSXKLZy@>+Jr^x+xJ^C9ndu_9Mfer4S6xj1$US+*v5yvzT zQ-6$JkJ-QI2u)M~pNX{N8R`@$6@*h(J+y=SkD^UPWaT+Z*RG}a`s!1_;Fq9pDe}o=jVf*z`i?m#;O7gl1 z;F0iRcLqo7|12Y9lb}^ACw8w^i!B8;4d&%o41#&z;N5fg(}t>sK~uMfk-Sb$>8s-W zM)u^52Ktb-RBqn<Z_tmD@WdpS4OV&p9i)LFegd&PqzJ~JE_mHuq7>yV0@6|FBz&{{VUYm~ulZ?B z>AE8qsG0PxaVJNsb9gS z7fcO*roDsjJe9&#l{9Z@8r^tpyY~gjfn+epd8yCeRC4XX>VAm&M^@3W>>nJ zD7m>V*W>eae-IttvhhLbiCeFJ>j$4o)TKqwY}hZ5L45pCUVApm*E;iL7~stPh{6H= zsYO~k920JWoEW3!%bz2ND1zy0Mjo{9jzB+Jv)|YMOC%ur3wVN6QxF|}RqU~f516B* z!`8nobdqKe0LoZ;NLyv>3aHjsf`c?I!EXWN!OYN2Gvl|Vy-e<_=dbF3-dQnHachLC5^FZ zFMF~6yM{eX%yG)T3v;Nypr&qUjS!X5;4K`Qd!p9~E7FVl5yz&-dRRtgbPW08M;F|? zZlq!oLs5Ym)TVY4cj3_=w+Z5kXJ}$^K~~lsUOruWV>S&}3cRy1+?(9QvCWk{FJkEr z5@8emPoFr{hjl6q;itA7(QP6d5#x62ddZh7%uu$}iN7+#Qr~z2aw3nJX<44u!TPrn zcPv?-?oS1?jv^$jkR!T*xUAPXtgPAoD^T8e=Fp8T_Tk+rV{*oMVWFgKWNnb&u==h? z#*1+Wheg=pwp*}69BnFW8nsW6cIX+lrTnDx!xPGA(S0oAY$AN+jxt-=Mlo#*_zWL# zDMSkSjPk4RyC;gT%nK4)j1 zb$O~Uey=vz@fCqTiU)Z~l<=LqziuNe!6KEcmWVy%>hmwPWt(VyYr(<->S!mXU_PLE zNGJ(3G4h|UJ#|q{ zlTU8za8q7@TFW069aHtI(#ifbN*l-?DoSpE3WerYB=HP~+KZcF*$Gtb&w}$omDnpg zK|CUx)7Fsi9S+0K15hj&rsh9~xg}27y-9;El5b0PtRqxF55q-b#ZV=Cf|WBXwIe)W zkaPC5X{O|M@JYKym(dxG@6L? zlnj5ly=_Bn4~fs{m~wO@FwEDio=hk>&u(Qt21YVwPO2ZC8P#JHs@qvqB^II~j1C@; zl*UYs(!}-N#s6wRFh&UW>279=_61!ElvX%Y1|>0swOMdrUD76I6fKON4vxyouYrGt+F* z>%PN)n!%;X6uYuWWCJK-pW{c>CzW%<+qG_-SlU_T`xvTlM#0?&rnmg3)Nw@Jt27xq zhuo}v_ftd1@66J0@n=bA3JcHV(gB;Zt+>3>A&5`=huIqU^DqP3x@(F z-H&bG8Z|?|M+fCFD;EPXCN&hR>AR6U)G@VTx(+^E!_Pnk`~s&}Ve#JCHlD6TWJKcdg(%ur)>yW&S#inLyG@-Sv1 zCNW_*0J`9*lYraoc1YmBRh7Iom67YwBp4(H^`mG(J1lHq_u zNP*R{G4do+L03(e>)V^fyQ&sk$kCR|oEV)w%F_zboNw&EJ;aD-QYNWf5C7fp%bsAN1kKQ5natmGW0TQ5GZn7bplE&ky=~e5HPFBP zI0V2!QL9|hfWDA6metf9!*wg#Bu5RENe5U$Lnmg`^+8okq_);H$5f-ArO#3Hi`+TH z)6|3bnS}6N2x=@r(~(|KRtldTq_Lf`{X<&@VlHUOC95iq&k;YaOwyTPgzNQ}YzVZ5 zoyHU8(b@7Ts^-*kFdF)$C5YS8#FW`|49(Zt(3yuLD@x1p?aj6}Kr5ynkCM8$%mI zvDMgS%u%OU(ujXsd$>c%l^BlkdX@2c^FKB@(Gxv1X2}!Qh#J{gS%P_=K>w2PaD$}? z_uWbVfNN^=SO>1oDKdVeGuZ0>;>x~rf^b>2pq9e^pcbx$soC&jtNUC7+RfxT$H1F5 z$UteyIu~{Q_Jfz0eZH$MJmmu;)~MfW#z4h#N)yJXiCWM4^k$R`!vP|Gz2YZ9^ZbQ1 z`ZPH*a3M`;8WZH~B1<4?1(#T#?*#ec1?vY-Bb zL15J2$+Y;NJ+SVXUw`RrSea3VKM?Zm**#^ck#pIY$n(v6`Bo?yqp`8i9kMl2=tP;O z4q+$;X|X+IgwGfUa^NkuKOD~FM^>APcHBF{$5SRypcf77(}TM zuA+|ZyL{j`{fND+zU>wbpnXe{Qf_tzn{X_H|5#6PWIqLIEXGf8Tb*&c1AvWHF;wxY z-yKcC#F7=73p#`zeU}gP`oCf5+zq!_es^jR+R}N20hY1a?Dmj(jfaG9r#ailtP|j$ z3nZjEn}$M{K#-MJ^Y5O$qimuJjXWQ^XtId1^iWZ30{KLF~g_?MHmPd#*%t$whp&ZFjJz5Z8O!v=2qd zYFjMYyk(cxrJFDlj{aL$YqhYpn)-L?oL)g=Bnd_w&L5o_`Migp(qj-`_j{QdvO9&@ z(KKN_vbslB?BNLi4_R*&7H7D$={9b`-Q6uX!2`h|KybIj61mY4L*qax%hY9v~%@8KXG@ZJc? z@E-G+9lG?8quTNGyS&@pNmB|J5D!TznfQUvOg``xV9CFeGw&Gi3R@2qk^p2N(Jc*x9uHOcNe{~489#qt1eCe$~| z$U%*(p8FE6H<%&IxqT>573y)!ymie4lbS}v8=s6=0m1kz26lh>BB~`@vHgR>ACrBd z|D+TYpS}6}jQl(TIGHm?3&Q})ZgUTQveNu1>(y3o3zi3*afVe7ciB}pN&_`7*OG9h zFbCUQw$J^>UQfcop$x*W>6*)77U@`|Fj=IH-gMy!wuNgsHGYj<_Y4<4z~XgykehBTr?p}URK&jYX(D-NnCT>LcNy_% zg2<;Sy%|UD$f2m;%~n)i>vx4Sz{8gmvSyz>n>Wc2&RydH;ZAXSDBGwQhkMS}PoN)c_!Bd90kxr7G&y`NQX?kmr<0cAVD?0Vt$WOG5RD^^xX0tLsqNu3_30N25wr$r&jyo%7{F=3 zuAgrnwYC`L+wwO;hrcp)u;#JklJ%nMvAM<=OY`nmCQ@3bdrf5dqyIDMz+woH0ZSoy zcOdAzeLk=5>zF0IZD_H1nmU!&7sYJQ3Y%BAvFrE{bJ$ydOTfVq(My6-%YlSy3j!Vf za`O{!RMfGGBGf_RMog%22MVrN%B6vZ$@EmWNb4=%J>;+`-K3Sww&>DN=2W|`qM&3< zL;oRQNHOS~US*@hz5^K@*u`0a(?V|c4|^%b0j1Y(1she$Q;r+)*LB$0&CiW5)9c>4 z*bR2o>n$>O?(3;@Nw8AvV$W2O-`X?0=NCkP1+lfP_G!lxhbk!hT7~K0zB-+9Bv4#G zJdsC}xg(Yg)0cuwOgkp|DFE(vW}L-KUsh}P{r`C0y8Mf~ATnf+yNjdpzV^GdqI zzj2Oe3WO!sQ8XE4v^8PNmZSRR5Oys&_=o5 zajqII$i2)7njWxNzPRUGW_hXVqUY{yh#9K zIml?(NXIJsB>xr%vv;MQSoju)YG7)T$xH$THFR_9(@bd($^G<|RW1Z2dp)#`Qb#t< zXcam)RyDE5*ak-*p+v#xAB;h$4d$eV<@NSE=dvn!4>gZ48?}Ef@-`z@;znB-3 zo_)t+ofMZep|kdo_cH-ack%U{Y2?@5C;>^;A%vuawO_NnqC8w4^T9DP*9GSu3t9D@ zhYc6)>+Yzhf*hH|9u~*YC^}`7Luo%B#;&`ax)shOQ*oNyNE`)WExyb%Cib81AqW7M zYWA1Z8(*VW9}fa)3`iTlJajDdU;3>tSsgAnWmE%$GMr9?KsS{x3dC1Z=>^vx4@BFg zX;tjU5K+0ZGl5X0hGyDLFr|UA>kz(ux?M6|y8TB9^f~e)oq>q-PaI1BYew}x@gK|L zdvm^Vpl=v7zUccW-A=0zi0|Ot#w0GvN!zyOGG!g7hEi^y<3>FTTioHN%xUxW=Euf{ z6iYMzP5zilg?tx6<@D~o?9hy_PNwmwA4`vWe&*#x2Rrw^SLAy)4_7(70?El+mE zWO&>if|Obty{VNyFKZSc_rjNsWVL*wagPnGG%5N4U&sv2E5&`s|2B(?$_4NPC-tDw zK$TYxA_r`j?r4WN$(a!?`Z$+we_xW_kqH>78SAY6#v}jxp~?1?qz>KtiK)wIdR*{b z@A3=N|}Zw1;!xRzlkqO)In`+W(7a6N1O&_Kj1uHCd$`E zlYKX^RafWg_0HDp+t;>)l*0S>gI!pgfhyF&+`CW4B0YRWPE|~)x)YJm&CG8({pcQ! z^c8<~jD&+Up3iM6>k%4&KYivjSanSlAEHP&I+qL_M)ioq|5((T9P^E!!LRtCfl2i{SW^MM)f~gZuFRFoaw)_oXUT)oYa1G zG9#12n-x7|@1yIb3SC<9;5t3EAP<-H*VY_}A^mmcgqnp4;GGl*LEw$y(Ilv&AzLxa zl>h-2QDu#Y4d$9GU_f7Ux|(u2>_W=GU8jzK>vX60q&!Mw!ihbdhS!TKAL|Gup|P{3 zxBUk1Q}eP4rR7@cTy@5TNB-!AhA7zc^d)*FX*ZC{(8dqU*cLe>c8qgJO*Np%YAg2h17za) z`&*>oOz_~_o%%jD z&j<%syTZ@9@h?LSu*Ejjyd{{6NpvUj8$a-{mmA=_fsZ68Og1eVaq7MNk?`Kst7r!y z1L3asFc9Ew_qFZgR`Y`kOue^vZu3%x6mG_Np|IorCBE-sqwribvF{bnJozqvKkE{I zgsFm~5DA|kA+@o2Doqe05ezqjj~<9+D^es>f0W{m!1p>c`l0PVLu#R`b^FaHr^FYN z!+g^}hYriw~%68r8Ns2tF-`3?*$a)bs?wPbr16J;vu9x|Ftpe3`gjL`q zog~FZ9UHIT7R@`dl-MhQm_&S_n;bI8+V=@4PxFM@FXh1DHEe4AwCYcHJYXU8t8jau z+|qHU^KpDuR~@&@>FIjS@}=>aG|l$X)M}CA@BA9%Id_BEl^%txs$l=!3Ge$l=X&GD zul}#n{vlOO34xI(19CgI2a2R-$F8VatdYY(o34V6XBd%PZOlV3n~^}z@s~~2WF}nW zc+_8>FASYp!_{yz&lG8_OBs!#N3y??S*S}+NAYyq+(oLy)@(lT;akNyu2;>O>6Mup zp0OnyI$z+`(4IKUe{GqGPFB9>e%OsVWjd;V{a4e-{U4Fq$_M7erpR}Eh+gLdi*z4P zHUPanO})2tB02H__^K18hC7CWUsy1AJ;XCv_rR2I%dZ?5h$;(|n|7-7=(2 z@d4-IoErJAP|u&1V%7YnYvnGjxz}dEE|Grse|DK9zD*WZ;2p;k#AxAO{mfmXtjoUJ zX$c1;<2N!O?mh~IvLy@ zwL_4pmDG#|T0%DX!{_mvqA1FpC|JXNc+P~WWoenF=9^XGite-pf@~4n>76s=Z)7f9 zs!>a)m2V^Vyq)sJggb-3D|O{Sq6#++70%* z_M?>#1v&+V0PSa%`o<>nT5pYHGyN4DOW`j`NUat6c_MP8o)y+A?~)Kvn`#?<`{o!Z zp&EJOFS3glqz|h>gIj}-hu0`V?KnvfvsZ&l{l*?f zxN+Ol53E?h`Y@JP={7>Mk<_<%X#Xl(f8M!q{k~E2nz%^n+l|-r-x>a^|6$pGy4?Ha z^LPuN?i6~cZ}Nbt-(wkb*7ok(Y2_A8GfXyQ^$pa~^g$cjZbv8l?Ovt4RW%F^roSarj zt-dDiYmZTdFZvKUwjwY2b9{;uUM_Y6$?Px9a~TM=JL3MuCL4BQv(cf7>x-;K^KD-c ze!Fv3rrZ0=vFpW@#bw&53KTo8EccKrTsO{X!MDhOX*fUymO!D+v45|6*#L3@?~F;H zx3E^C8f)OWL!n);gad7g8m*46I@2-q_BK#{ZR=_^$}85Cx9OCZ*}1ANDTo9T^vtKw zdIrj>D=hSI_yBvkhFc=wOfRZP!sOpb!TCopRVUEF(?l7WkWlE7MnkR*?@HSjX#NPP z_>V=uBn^6>`9?2Wh};*WBiHQvBxkCq<;KJ;DW8A0DbA7wMNq|C(Ln>HkD+}4bI@Pk z09Mp5lS)g;J5r#-utMa!1 zG-sDeujl5rj#-4fYfrXB>MbuM+m?Jw5n?L&BJP2HWbPR{&PrrHZ=m~>ZFB@IoJCDs zt=E4emav6FMd=q;L7ds)>)=p9Mo)2r9G>kuJWJpB5Pi8+H(`K0#giNOGAPhax4}Q*y|RE69yy&t zSkOrfKdC7ansBuP{oeUyK6Iv-1P#v@FIMX!)gDN9uA$IZr=|!wQsfXkO_1cB2lu}n zm$shdxMc8x)}UJ-fl0OFn^he+MEJz|)P^X&xwbcz#Z($$PZo9OFu|6pZxJlN(4@}AZ1&lqGC zW`t&NGsz_-N@Jz&Yt9b+Gsz%NtqA45&l=AQHs{mECtUxc5PXew`6eR*pMQj@a4i<* zH{b85voq`=H!%n;q({{ApV6MLTSSSH$S4~bs&Dw8>mQlr3DRjtQwMM;eh!;08-#us)Zt|fg->oua2Bu1(77UHESMSyBHd|S z(|4TH;P>KWYSxpgghoxks7dFm=5?wtv!dE;Dj}WcpUR-J`?eItY)J&t! zK``K}aGdN^%+j9fx{@a-hyQSWpB5HK8Tt(KplD_=PmYYMstHs-dR2=3DJD*?<$K;0 ze6=WX_EJligl-HxNr)^_T-0U>GKq!2a%kwxg9Q)1gPEZ;9i_inVFjpk22t z#OZVEU1mF%6SCN*J?8o}V$dm?dB^2C8odT0#$%ToWvt|K6s>U+1`(O8v=YH~kQ&)w zCY4RQD-YlQQoP`=Dl0OEM|bb01h}vX8a2mKmYd*d#mX*m#3k~YNc6n2>t-{}(@GLq zmx`U#Onse)!Z>`~AY%5^t=8H;nF{Qb7iwkq9qGV5ZQC8siV?#j#*eB?3g-C=qHR~p zRXtDodhRa~1uD^Z8^dkz^7r$Z`|kZH+MM*imUG>$;WXaw&87sS+)|Ricupw(5$#AK zTEAhNWf}W3TxIF;Qq{gWgTkzlVp)9cR@5M-oril{^edWkxVdqyq@$vkW0*J%?42}2 zq8(@XOhh*#w+?fy^vq0T^xpGfS$_|L4xan3t?EMXf1E1EBc10NUiN|)1Q)cvVenNVguE0#n4S~6R_=SNQ&62EDZV_LP_eEYTk*~#Yg2qE7fy6qtzxpy)Edv1pURbV41_5+QryaFf(!_NG--L^?yp0Y_LQfnq+Xf!U6Jw#-yQ ziaiQ_)xz(!N)LBgmH%lhgo((Vhfn}YMDPFf(=c2|!5}Qe zdl072#ZifHuTG}qp+qKkV;W{DUxDw@cRgxSK)qP8K1F^{&)2#>@tCu3jc(1fV+K*21PJfSxJe-g} z7&^d;zP=pWzNH5r5av&P7UWeR)UOu?dCGXD<-aJnPo(g&WUFP$kt8agshku9J!u{E zPqPq_s^bm4btkJj+x7JSGOf&Gq1^$c(_tAT@h!&<6DNjEc{A~sNU`zAMd?0giV)Gr za*i|&zXvz$-nHC&?XvSjs*maly1dsxth>;=d}a~D(E6tKTB`YAtW~BsopsOmSA#%3 zXTt+mtYGut&V^jJ{9$_5i7^h94$Ch-)wZ3aJmUmob}Al)Zj6Cz?GT7yMJ1aptD?#T z-cpjK{;EX>N|NAd1r)H==>M#^|IcD91;Ygkc>8G@f~~#)Y&Q%1Nw-WUWo2 z>r_NYnzJcbd1_Y_YU6JUCAK>0zFiRxs>=wuS~*%^%YtRpK-lUNY;0=exGwZv2Z`JG-ccSOPb^QXve!9d^L`>p_Dcq2-ksyxzZaS)?&>63 ze-)=qNiQEQd#enuJ6aSOn_yZFN}AKQ){h+N_ph;}TUQ^)?ltGaW4W{9^FFq%26Q|6 zHNW_lJp79S+%p`_sLN_P3(aH6^$aP{uHaC7w|p)y`W8?dGF8Jwq^JAkPV)TXR8;>1 z81ou&QWP*wsM}!4fsrL+kQ*3bS>p7a;gGUR+=9LODac;mIL*yjD=HWf6IK_t_(^zSKYe z+ji{PB^1!h^=t`PhDSExJ?Ip88ro2lrtJ!CCTZ?eHNq@>C9`jb#ga-R`RCZmjiPVO z;Bwx3q~G*4Rr(~`O#bytyrj5Ybv$Qr--IlZ#`7}YQf0_l!z#6Q+}hs9{f-pZrvmcO z-c=>hs+ZZmn}BQuz{as>IFsj`0Wl*%`s%6E2 zJj#QY|FvaYveGi#`9I|L{FZ0<2R`YQdElZI%KljO+ZmgUrS0ZA5}|I(Pqb$&Aq)LA z35&N|!%U&8tD}@VU(?P@YE<(7(kMTEc4ZZQ^f11u2F8a)+Wu=9y25gTSraXhU;P<6 zJca6|rKSBAT#~2)bX4Y`Q)P2YBfilW`OC#isC_M{McGmQy}o!6)2GG8H*3qY>ed=^ zURX(a1jdcyOJY&lVc>>c->}T6p~R9f53wDO`&Yr6SiiaE+Mg^4a8m*@)oyLu8A7ke z>i_FW%Fl3I+NVSBo_{@oQTq)lH=BFpb$h(*A}Q?-(`l={T%PaZ#W`f+-$`^-EM|3m3~Py#|K6&>6?gWA@sobx!WD`i{{<&9Mx4qV?fwBWM4? zY_22)?eK3bH=a|(9azFsQh#;G-~0_GsaZWZD`Zp+FP43I^oE07c;y>^RR`D=RUa{4l<)oYk01XwK{M49{+E1b$!3td#twFT zl{&N60SydCvlI;ahuzE?L2mzWY2RVcaTRuNq%DOLygwJY0+L-Bww;sv+!Qo8pb4ul#+?Ncn90l* zOU=N?LTdjQNAU?2+T^_;E|V($H481xV zS{hpa##v0@i9Zu%&!Rh1H`G(L#8d#2SXsbTNBTUkJK!7X-peI?(4e_HwR5iq*X>*E zFCL@qh*lZmuj$U|?e{|MN)8H*bmJqb<2{QHzx$=1I;2I;6zAR4qm)G&w zEG)%_j5b`azw)$h+ML(ctU(;%m8hsIy2%C6Qv(y zjZsm;^Q~nASFLx8?*nToT|?^XkMGWIF6x?^w`&=~lwv(}R1r;o!UM9i^AH(A!=nUq z(FDR$6oB`$->zF(`hQAAfeEvkKO{eSa2gWcv9s@qRr; zBkUZ1A6MD93R(y4XSSxVf4NVeAIFp5E^3BD88v-7Zy5Ub!8MV_1LlBJ~ z`p6&7$c`%7&tSM&>ON@H5H5ooHfZtTE$y=(GE`#yyT*QG0c1&zm)*1ogR?krz~k07 zAZ-A!dhj8{)qMXYU;>hJK*YnG| z5gh6jEnFSwp-$8E0temSo1 zdiBR*w;Pdb4S$&Lvn=$u>(a#rTamw>?N7&|d>_-LOQjS_`%e3+%l7DW+lYPIcAFm= zK_XIf#mK+E9tgtnw%X8O9dy_+28$uym41)>Ij*(>kiFsFg2yWT0HA>%ZZ9aJUMUjr z(HIAw*(+u?1YQd?a@3e~thOYz%C8+=k0i;F=%>!hbGsA`g|qH)l}a-E=wCuaZ}8pa z?yc`_4DsAQUnILrq_wh7t&ME@u~gFt?xO1Qpm*l}wPWDZw#xp=s>nKu)L&8N7L}+w zGv9Dw`Fx1~KGpyv{q{v2oW!elM&oGOKO6ujJz8h_U1)T9dD`_Tt+EoodEyyyam`{{ zl8_qt8%`(#h6+Wl*f;-BXiPXNsODk6S)_6b zO<)UQ!l(Dihf86THC2`j6Yb9<6T90QA|()k7F*|gzg9%d&wD=J+HCCPmB>u$uzz$v z5G7il8~J5eI2r?bnLk&=kh(TPd%QTIgc?-8xF-$KcNqcP>Ij)N9GU^yYroA(q4yXE zGa|+jH}DOfLX{R=7TWQd`%6(jp)JJ~y##{qSpl1rw3Wo4)_d6kt`(M&hLBaT8UwjK z@$q}1O4Vy&FNHaU))615@hKylcp0~#3)DtyZaNo&Tpwvzp|W>9CP+G>h{a-}_#X)q zOkDR}vIcBnCvwFSONx*t5jq-fRHpXV&wU)%7y?!Csa zgU9{sUn)h#qUhvIAC|`*GaIqTQ?$RnzL@4^b*4fypCjTyhBr2H`6WKllOYN2i(bZw z74#Fd4P;$Y+%v1vw!T&p0}bxQruGt7I|ftPz#mv z)Opi0(UY8uh=4iyS&-KeQlCT1URoZ6DUmsoUhmG-w{kk@t$eUFjZ{#&1mUDLO>RC( z4w7D=RAbS3lt-z0BUr|85yIZtyTs@a5FL=-$~5};4-_FBXtKe18Sqx}p&Ia~Z?iy8 zge5PgH*Sz>2k@zHZcoJ(q{bx`*vc&hsB+nS=hpV%w5M(jL3|MknWJYoJ}5ftJSNj^ z{1vH)>EMauV`FEWh-_DrdDjyU+}sioJdGQ~9V$1eJust277E8{x)wF;BSGwAAw@6c z$%?2VJgfFPk_mA6EL!?USD>>SEsNOm>u=O2cH2KiB5$%t zmWDt<8|GIb?R?ek{u{Ah%E##I;?3LXm#4SCyRFYf0({T<17B)KHH@Sb@rr&{Fyy2w z;zcETA>A`}bI$AMBTmxAq|()R7B} zx*n8NU2@vLY!+c$K%qh*#cQ?M^pqC4j0z4f%3?b5zG=cP`jnD^qve5s2SH=lI?AD= zIh3dQHs^1Y4Gp8?aowbfU+2m4e}jO~4^K0>e3x`Gi-&&i#0 z2lG5GSqib04~9tAG7gL0PYHTpZobVn0g#R{DR*%)ljU~yhw5Uu^&Bjb#G;H3TVbhL zKS5di<=E|(=Zc(F^cDrpkTqj4fG)i?cegtI?ZxseWU#L{S@1^+M$8D$~!w7E5#+5 zcZ#zixj(VjhZ)MX*X8R5@47ZN{k4MK*2UEOIun~Aoc5OW7M&jNtPShGqfgnTMH-IP zNlrh)*PPK|-z^|_4fWT7CTIK?lcXKsNWU>L5fOO2Qsa+%s%>|G!SV5->r(f#7{gZC zk@c%V`#W|HcZcN~&4%`C#}4AH2PehfE-lGzzlOg7LUqG5QT~5cK(UIXjG{+Ca>7V; z*9ZJKhN?XflWLi9)@3l_BA#L{>;KMr=6G;aMj-RKfv@kpkcG-SBsEOsGOfYZYNmU* z#9w3^6z1``EWtHvb}WrrHm2blF%c_Nwcm&o`*+wV3n{_?^3V-1T{I4@-NCNs^=lp| zr;Vab=}ScSq{=qEoG39-9{MmpxH-WvtF^tKDn-@cjgWOk>LP}yXN3J&J_}XWaerCe zF3`JZ__#~biYn^Mj<+T`tpR*Z`U=`YVXNCyYwc9RW+TK&m)mIax@kGa>Z|(SEC40# z%tJOAYueeu6p>3HAb}nK8gL!&x4W%XrCDJ!E|f;HV%ZlvmsHELaD}%`49g3@(aSOg zN?`RkVs7ijU3g7HIc3tEn7Oh!jrJagJZRv+HMa@39ig@E?u*v9`nptjZ#@bARcO?< z6EY?AOCqj(O2}|H*lgIC+?`Lc{+H|8gPhpLDtb=d_Htn1`om~WAG(2dB9tq@OQLQ= z2vNu<1LZb53Jnb@y)g2N7%KVPC0$1A=if#JiWC{wO$kzn+MY29KjN}rwXC;416#%m zoimeM8M+mc##vOW#U`pR57$Gav4l&^u@aiD^l(lg4DQr zc5=<6M`Ie-8y6m;(S;`PMy)98enTe7gX5gpCRJP*95@g=OLg~&=fF?KN-QecB?*~h zN^1Lh*ja~d9giG`uT+P|IWGjJV!^oF6k9iZMAP6 ztM8N3DLCsP+EW(6>pi4O?;u{Jei; z%a6ETKvCY^*>*sUA%1F!Rjntd0>*ECQ29Jgm(c2>BgTli0Fim%hw4m^knu$0#$L*I zP$Bku6Mbc&NrUU;^V%fAq@P61<$ve_9Y62Hjp{=t-w&So&Da4&N zUThZfe_~M^HpdlvJB=jqh6B`U$M9IvZ8BC@=hrgDWjXyg*^sJHt9wfsq|~-x|7h8? zpc?7E5;?QwIoS6q5^*pyDMCjOo!-cQ1Or~R#!WaT+y~_P?BPz5P1@3DyAs=j`Ml_0 zqCdsSf}UfRLyv{i_uxb(X9IX!^X*DOKj_dqUrId^=IDNI9fsegFYQSZwZ<+~%#cyC z1bW4$?MDxPO&}J*ZEvdf*CWlr@<=l!GJ8J>atY_iTA??;eq7i!p1Bn|$*^jYU>812xeQe7*4=R=`ST_!b!X{-%4=_&i5chpP z`z2oN1TUZMwG>hNNs)5at`TDQ5KvSyZ1_3ZpZtPxj7F(@!vTN#w=y$|-6X7D{3i#G zfET3Nx=+zU(6MkYrK8{EktYjG_FuPgLDqJwO6#~{aDbi+!3Ibhwt;zjMD$%)A&VZ* zt zZ#uY*W?Ir%cM&|7dd6f&MvuFICYR&HVs`B3sho#R1yCHi0;X@S1d@`ZktuDrtk@-X zXL&`7SbPJ>3V+!Moh$v4*BAa4kbHKna8{;pswA+xq?h&uQmJ^{9_^q^Y^;pT)R-Vn zs9AW5q%aGLK&Po@8ay=~AmeiqF(O~vDLWUA*oZ}9`qmaXJIV(8fO5+ug0ygyy=|!D zdYm&%d^J=%f4n%NVk9%y3Xm&MNy(+$|8gLxLt8113}-<2nbS;=&-UN0;;_KpO>8BQ z_`}ob_IfDNCNy>4mj>*P&on!K8^7^y?tGtq-1PQPpR0P?p|k$=EAEN?8NYs`k}9Ql z71uO*Sv`^;Y_BS)s*BlKH|M7vscY@DN#+o=jC`0c6Ze7Lzn=b5*gbz(#O)dIK5dz> zo;3hP{X64vp8T+qOqc%uftuquu24%DD(xgGqiYi5l67ytUA>n@f=|Tgun$E)Q#mHYcdwdBIkR zB~FSLUJRGqx5q?Ta|28^1oCYQvk%?X*qQ%?6Z7^Rvrmv8Dt;+>{Ci`*e%YK-`aj(x z4!<8xsNX4!%xH1(Yf)Fj2m2r<4nUQaVCBIP&+K1&LYi$ZPS?2*AkIC3ZAo0F9nA|R z#2l%Fb zg1x5zKe|C^iRxlo(b1&zE9;56D?Yla$J;i zC-%obt2ZGwtU>QrTCw(-e}7uNXbAdr|AIE(JcHF_vSL|wkowx;D)^$8v_nne#4&@j4da%f*+i}v=C5VO5793Db$=M9>hr7NwK(up? zyPIAxd@BB7f2;3yP(dPw-RX32esW&Ix1F2K2PV`nYow)3yp8FYc%0#KinTp=7O2`+ z(bmD-42o6qYe2COpG_S1eY~7mPD$4NxKYyX!?!Fdhx2d*%Cfiy)g%R1frHwQ82zGr zV#(EO+ykQi2;aX1oH3DWyy`9JLshoy$bM1HDlk>|U<_0?jG4IoOIxH9nl>q08E$p~ zVe=OxvzU|50b;|RDuNqz!=JJ9@*c2PO^7vwEafx3!rO0k7Y=h~YF8i5@qbi4?Gk-v z1&I6wJL;RFvr3TXqTinHJ?Ce`dEd%^myp5(BSj+c_~~ksbnS^P!2X;?zO)E>CK8>U zSjgPmEmQMzz{$R?$WdXY<2KhgEpL3S_wndI_kF10vf;*Jh0d;O2iUFuqajmBRk|~K ztc=ChS0@MNS{X4_!AOY{FLGN3IW|jKgI)Vs)fO1?v6Cb)YhC9A@Si>)~+eM(gjCPR{r&Y>z?_m;N17 z&oWQFo=d|6+6;vnMA%}?xiPHC0XARy)~8l94sRx&H3&&3ky;g`IxN~9-?x#K73wAb zuYv<3=a0B3lD+c>8DfZhQ)PG@|M?y}#G%S6QE8qZ%Mydmj)eN;<3ol&V$Hh!CNBoT zj-k|Yj!y)p-rKgHmidj->UP>P&ucPCn^Au=tswp9B1U4Qht01jFyK}>FtPV5^B+#t zunV1cVhJY&pBNXMfYvD3iQT&wcL-*|dzr=C(%o&C^w5_eLd5HID=MFMN)|AZ;Wx#? z!>*v^g-p>(h>@%CD8h(4_%9i{(x!*-1@qb!8p}*BrxEVkoQt+0b9LdGs?T5=Kp&hS z%r;URW!7*ryWshR@OI4nwpwdGk%+krLECOeX|TOz2P_f;hc*W;^i{R6_HCc%S9w3_ zCpD0wIYjqS;@J;n8mvVB1hQ}qah`HREeljfjV^^%fy$D7Kfko?rPrPK-(H}!@p7aT z_ynBq{7s-xZu|po?@AUrLoDPE9bsoP(uhBTH?Zv?l_VT7(C%Q-p0p^L%JqJ}(MEQ* zsd=~yQ304eU6ah>7!{<01FZO5j8DQQgjjJ1Uq!5Onzm}WfSc(wFA5o@NyUYX=YhLt zU%ksKOQUYX7n^@HmO9OqTq8fHW`0NNzVA2?HherOrKf7z&tKS<1ZY{5CTy5FA z*LDNpcUSEhfk_)}b$OrV!p6tZx|8<1`a^!POOX$UJe9I| z(=9l+e;>={#&*dlrYgTqJUYlhXjtwFXRRbLsCM@U8{Mw2#Wpz0qnH|(jr9p@LrKb_ zEm(w)sZe$V4@b3vg(W%ca%neG7|e|<$*(u>VSTJ|>9+~bpIR6?40U0F=lGAgqaS4} zD%{(PqK3e{AX~dj#ruMgfv-0XG~`-k^p!k{s$h2yQMJ@Fx6JJvTAs9~*T*7fDo#h@?R-aYtt;tDwFs8)#*g`R*GkkY5) z{MnuF8@Wmk3^LeVB6`*&3Xt~BwrT%4cs+~IzllBhwo7emsbz#^`N^j02A8yN+uL~^ zpE#ra*!u!jm=gZ(>|KAcVcq1zqpa>cAIqxXgzKk2%dRhR_JkykyYlKjD4Q1koE|I>x>~ptlS&<9 zRgbslraTmXM_Nke_i4EXb12GzP^v5rk_&V0a%KI!{lss?AqqSOo8sp(Pr$1=)(@#u!$Vag6zoJ<{~bet-kY zhZ!0u=d30?pZ8sybQ;$6E=`8#{`Ro1S16tkb$K@tXsQU{+1^C@e{VTwluT^r?bKzC zpp$~AE@euI!9}t$L^~J!L7#6Eu6K%0X|%6bfAo zRi)glZreJCyxhj&-tR7(%mJs~ zDIM*Y?C&}beLP_ifC`fB80V&=(q1G3(z>GdSLSEM*$5=E$MZf>$8>iJg*Xp20dQr%M}Cw52n38!&?wz? z6c?+MMvsi@x)70#wrJn;zhJ;*(g!}AGpkb_gTs zOZOnVLixcf8)^vk%S1(Gtwgj;n=wt}z1x+&K# z{e2GpU*Q!$meIH8gcuD+2T?>dPYqMWiyz_@^$h=hLchV8G%FYR^n4vrHt~8^N38ukg=w&hk34M#b*6DM`a)c_j zkP~?A$r@SvfLBcQR^{E%NVto-;hvpqca%ni3~1hA&T-HKz0-|AkCWZcZri@q)&Z|! zIvaF-uZiVdicYHsvMe8me2z09@EF8NaW;D1-Gb)m)!w^iN~;nmk+*0jf`3`FpJPRG zV{y3z@XcT-cBIqiEC*B)DaLlED2cT~;(lHeuytfx5c(bfAha4X;0de7Z-(p?(FcNN zRta=l&HN&lX@~$KdvkTP@M{8}W_3@h;55!mZKK^tz$L-Ca>3}#+P(1E**YE+lDL0b=hKfT{wJ3wjPr9F)8B^I9Ae(79a8s%x`(C#(21Ag$b6{G#@R?&dgoUO z$zeYZ&}|xp9{l!x{+OB?$yu?<46p0K&@{kqx}mb`i(Gi?S$hJLFX6Af=?Lye+Si_Y zT^X{jhQ1^LYmvQI0;uV0<8OASk6pMrylFK|J^Cxq^jL-bFHHP)>@_>54%6Kg6GLQqmL{bZ!L z{KDtePUm3|>{nk}aOuKGVDvVf$CE}~g~hDoo*B3dJ*gHKxQbLD8S z1)J%WaNB)JYNwY?V;>a9(1GFaz2JCzQ}TVM$jM$(&AMOR@K0Rn_uO0N5(aC>j_2cX0y~vmKI5Eid|Eg0`ZTtt>>j|vvo<+`;7rWUa&BT(w`Fw;t{veQF1TBp6MVl{YAVi>}1 z`T(6r&)!#=PUP_|eb;vQ`6X#Yx!TzP0xf9bA63YURpbqDQg zgXN_-$YIwn$Fp9;@6#NH?eHV1*&)Adj<7p64-LSfLIFjv+!r*+(PvvNKN(Aym)akpuVU$ncZBv!?|#i-{AkzzH7WmJjn{^s3I%inIsqekWq@7Bks%Zo zAeCqvKvLUdTJpJPpQgLx>Ln4e7-40jBZQu7o7Wf9i1W~2JO&YlL0#hCpbiYu&!nLj zg)V8ldwjO=D4Fs%|`wCIDQ2eb|Q0L3e@5I_x%uPX7Q4XVuL%TF+3hQ58slwT^?RNR;4A=+1 z0>t7xyaM)rH8GUnwHb)M-mcFfz5HD_blxD(u+K;!Q2?(PnU zweMZ$?7#4hF~|4Lnom{fSoc^+6k1fFwH_^&un2w%f)U66MC*P;^|oKFjHlB6*2iP= zpq8`x(y&4dM~zrE69lg_)XsYn=cW!s`u#i1n8#4ep5@-vn6gG6;iI`+rNsEJb1j1E zRwaRXf*{t0R?QaLGRU|P}Yl1|ui=#D>;r|I%2-Rda5@D8j< z{*6A}?;-a~N8nzSaohC)r~pH(c!f-o>t@>t{jZ9DxV^AKdcRN5;Upf(^{umME?Axu zWgV-6Z-&~sTi02Hiqd^Vva9}~Kx^vW{uBCmd0)M^l3hnm;DdM+dX@n1L4#}cUVCh|XU8-~ zup)KVF@E#hbn$l&r-O* zWVWd?Yb(GQn<5dChv`DC;t{tk3f_fz)ey+9U3+Ob14(blRRry36kL>@Gl8a-fou4A zv)^gqt>2CkoZT40>CaWp9O$dY`kk>rjKqEep`;Jm`18OR&xidOt6$I73uUkjv}p3rhnD z@zKcYFZ}Bg2XNZIUb&?b5>~H{ChrgiHYbkN?eww3p3fd8NPCUyTdFK0Ofdf6Tl@NY zmKdBJ1?i19wEa@s_zAKDxWjdHgdk7ek}3W^7k{YL$N>NIxvG2|+3Ey*#X6c6Dremt z7R}?en4(ci4U+$+uDD)Yy$&lo1*c>J-=Gtq%puamU_w;M62jhiPa&**ebu0`{$%Vy z^1UZVC2=8_$h$^)anpiczv$!F7QU69$wQcq{sOxVZ136v90nYnc}x@d*SuoDI*VSS zK=N=j|7U&5$)AVUmtzz{Ad*zSCuBsmFTV(tk@2|EqM{u|Jl=z0+!Xct0x+y*;3zv? zcU(~SZgApSmJvd$u0jI!KWqfSx%b1x1T{gw=5rxL_cD{~~oEGt)~(_FqPnl$htKEo*o#}Nx_ zeR_k-xvXeD5N%=>GdL>_LUMkdnoSm2&ba@L`RH`b<%4^_DJn8D2pD&!#fG^L?*ldf ztMfm^jhX3A31pq@Wxr|~7c*9HrmLQ)tI9eOjI&Ca!P747KN`-q!z&!i9d|eCcEE?@ zGM+TvS@#CC1hs_Wn)yR|;84cCVBcZs%<{rI_8~no*DILl&JNe$9p6s-dD0}G`4Ln$l`g{7>L9F20fO^%R#(d{u1?aq4=(^fL4u ziIe3iaT=}}4~_MI6S%5UmXfpX6x|sg*=xI5|8sRuFC&VW2GicQ>Ft63fWS7}Ds8t1 zn|wEGD&S!>Je0Z2FJLTyXrqYyl4vBq(4BPi?=G?y)5-SR9?c7lG}8`Uz;*#g{r*Rb zo_g?7FKHVTB|iS<`ex!x%c*MG?r65Wrkwv@qP@di6WAYcfY!^~Xi-OtneH%h0>0m* z8?O;9v$Je-ebX*Fmzco+f6N&M!~2Qv3G(}Do$)x5!hLH z_(18^(ew8G{KFl)kL^;h^}%qObvPED!K{OmxP^<_Ru`0cchcr{VUGsGl|X?hnE~7jQK+Dz(27{*0h3%|AV6 zE=7aZl4Zj}@!AM}5ntFYEjw`!oEwHY1WE8Cj>tyV)i(q#$1coE6R;r2e7bFoEM$kS z7)uTaRI>WZx|VusKgIF<0Qh~!fQALg&5CK=9^X&h5L2$JFT6c*1F>y37EwKrKua(! zT}|aPZ}Cepw0j*+uN7`CF?Z{Y-n3~=V)PP~DEJwI8ZTre@?`9Wl6z(B1{ zmu>fvg1pSRooFI_@}U4^rPy6nY-TFM6F2Yx{@`STf=wT5XMTD1ca;6SO<;>t1E;RD zX#XD=P*o8P+sEM$I=LkYllHdS9uj@lA@xIi5E62p9hH?Wj_fY3n2tX2+H$Ep*4eE| zmCvqMTYjk0&~2B>v+&N~p#hZ8Ksht(wq{S?S_U2c(f4EqBVOK%Y{GsY=lg;<%!EvY z;pps2+bDT6IIzw&A3X6jveZ(!;pS$f&W+`8dKdwDIaNO2X-+~E_5^-0A@4Jr@;JO) z$GF|jml?W}EuCt?s`pMOueoDj{oKp>jB=rLn+gh`&ZS?AK9hNw_$D1PRlPkIQH9-l zH?#(>PS-ze$i>h%`C}*uye)CNy;82UG0lj=ZDBeix^K1HyebO-|e7~u!CuRC!E&ol% zIj@7Ax#tN8g*7WS-ZZd0Ol>T$9la*CHJG@G<^9PnF!wpPDaF31rS(bsPVbxh>Y|sx zL1A-gZf;anUQ)8uNrs9tiOGr*QtaX z`RCrsKJ{2DclA7oOiKU1_q}13cR=)ZxHv}lig)dGTF?%+Jz>t^&G_~(@`L96+)rq_ zIaQu@(lc+8lh?|v*|)?f@cj<;ek_Ngs>3iNyg{5xbjP~+@PVUBLUoiJQv!dh877s9@V%$)6OLqy-I9-J=q;Y$2~Vc4Ch}z= ziLY{8?nKcGa_MaY5Xmts)s~IpzyS0crUuy1v>OYzXH!-ut?&v^huo+|NALWG`K1VM zMewNJ;y8~9gfy?u(KHr)9y|Dcs~(?Z`MNLZP{}pQzc9JN`Wo`6Myar3FHdHp(!72t zJeM1IkPS^*b4qb_N8qEr@fFmuEGaLtoT<9C7gr*iV`;Hkzg=tXiUAhN3L=}kusP}Q)n`TWz znG)OOx?&aD=Zp z%8|6c@L@@}t(LBKmr8;ScD@Y#9*T#MMZ;>-;%T!>0@+<7t=sT~i`~TYk8^2ky(uNkVrIV4he_riHUL3%s_8z>z#Tt2|lZa$?;M(9M3yH~GPJ6p8nl=870$%KEe z&RCajBbJ8_IO}uxQ!fG${)7=>XhZ3W$wS9)@S;#yONZN0`qb`eau^TW!@R!H*AhPa2D}o!#W-b=)kpVh{WqvR36XYX2UQ#eG6uHDgGJ75-I$R* zY;Ig5`@qI5m1SzfNhOc%>|)}Ger5Ck7f}~BJ_lT?s;#N%ZO5H~Y5U`a19+$$NHRiT zx3pfqAelyVTcB-tVHA({gRTJ#h|FQvklLzpfE&oh?U)suMX`9_xj#Rw%kMup1h|-R}+>q*udB1{;bcxoko3a za${XoB7paO9B{EaFo1|3YMiup+N3p`oZg77v6lF183cOd7DeRzd?n4#>l4j#9%6vh zBlTyHRuxQ~@CxZcd!#4uNnOyGJrjYXSkow?s~B)|9xCfU;C)jAhX=?B(+}GuT>wInm9|Wp&iOO1_Oh@^y^99 zKP`viO3AIiS;Vmw<^QT&m)0VDQ~~dRx=q+K!$V-U-ueigt|H3{H75b2KSX zX?idxvccGQ)1Ec)R+z+TF18}Sl{Qx%v5~F&eU1XH>>qS-TTjRs;jVRe%-*$+#!cuS zH5^NLRviK*x9Xq&;{4?@4f< zaLH{H``Ux_@`&iyFwKhyoN8R^rC^hJYOEp^;Z4ctuCnn6gY6lJTia^~rDcl{}btd<`n1vA(F%4($uf^2Q>FG~|)~-0)b%2j6bAx9Lfu zcIc?Y{y`@b1Ys^YDQVD`Db_YDL?nw=Pzz<@RaCvFV&+JwDG7az7Xe3zD0#09qGaX4 zNPKeJail0}vis>)a3N2v{l}^9PePbFr8ZG$^|MmX(CxCjL4Kdi|NU2g!A(~C4~s*j zMOL$CA#ypJv}9OPKVJYA1a=kclnfTzL}V$3F8sQxHE5p5xY>?7p7U$QapVU3wK?#_ zQJ2Cd8(H&Lz{!jZ*5-ZgaQ@612}^V#N4Ak}SDNhlkqh;MNCS_xXwysd|2Nlb&-YSo z=KqKy+rQIy%+GVDCuXW)ewO?P)c*PdO{ZI}vi}yDB zSXVb83Z&xbmwbEYk)#TO>pz|<-po_a7L>DeYv?naS?me$7h6|FnDMQy0P0H4;+j2= zAQQ?gLpss$o#l8u2KqQ?57t)|t#b0&SUpn}uLl`RAu`J+)YtP|_R5{eC5d$Pjeu}X zWQ_TUm48Y|$I_G`doQH1pxM`1NTyZflFyw(C$r_hhqKt7oN@ zSTr~2$AT$_?R`M{8+(oA!acNr2f_^*BsbCXhA;af_OcJR`e6*YHc{Qg)R~_7>tc`# zjAw%ed-dlpSoG&1ECCQ=fEh~DPs_2N==$1gR4CqGI@Wo%c9?xmbkh`m_7`_C~YW-Vw$2aS>H=_~1L1lo*dPj~BIt zx>{oG))1jTR|o^G6&uYd0lZP-M!)L3>*Ucb9s;XkrBF55Dyt(nZB4yK6U^k6xw{2D zSp)s0MXyFDUV%%^yW*vfPVTUnv;um^E)6mW_{!V+mYIx>H|;`6BJjR=nGt?7wlh%k z8{1Fk$cALFdg(>BDz;yK5qVoPJD7N6TB=LKR|<$4)RCn=Nrn8|Wk|$MC-o?)M?)|IZNr(IBWfR1$ekOj=U$buHPchIkEE=mDyYT{dctWfS*$ zw-jeSu=FK=u$D7mhOrb{Fm<`m&=JdO-0FC55?ca&HJn3B&fl3tB0^d>M7lM*pJn?> z76*k74@lw0hHn4u!mNq$c6nqu|Ax15w_(wKIg#`WRRIkWGkY5hiLGwy^~-p+bYXcL zgQ4=_MfV_Nkpm$8os;$t1@K&Q74IybfKL*MbYm*!_}w;^L~Ae|xCCp^PUerP8c-)< z<+*<+Six7yZ3f4`7k{nm!q5H$$|pp0!055bFFWvLg_-|Rcbil<{O2}Z)#sJ*=lQD3 z&ldwEi1hXKH!#d9fWlNKmybUT5jye%XI9)d+7XUASf%(R2ovwID$mXa{%6-FuHTsx zm~Ub1BGS!-;=iPiFEKeBG3 z#3pB7}PG-i~uWEF}cJRtzLz)MaFFx-ddlU^qaw#n70dr02$x9 zivk&=ZBFm&t2Z12&c7ndE5Mr#@e7Ha%bo01_{$=rnZ!;IZ2iAi9+e4UM#||{39rcs zTolY&qFU-*4jUVzBBlh9psh*SnfC;pf|H&obYyIXqp2d^L+7L&!V*E`oIT&@qCMMp zM7o#tLVo?vSQ$}CyJBq&L!}DteJ^4LG9CID*J60XO`#TbIinNAWT}% z+$x%Zu7+RfVmQDm*}%)Ky?Ga;P?cgGq4U{ixG05)q#3zwIcOr*Ai(wJX569rQ{3&x zvlY&59v;k>1YoT`{~bR;AMudI0PBx(2@}IlIUZWi$q0fw)fKbGREskI57Hx&{LhJ& zQ;W03X#2&%vuLI5{XVf+J40`P`gG3oqj#&am3fOD&vE02N==n(Dv(vLiko?9AE`q| ztnpF1mLoiFRIq& z>-gsBncdbVi8PmUzw*fmHTvMh`9aVr!UC}}ZNQp!B%~IAfUZMG3x+qI&)k3qeO~ZK> zAx;etKmp|se(#OT4+M|P>40km`lZVFOt{Y@`sMt#=w%AWx)slj{KI)=kYHYYd2sDVDjD@es0#&YY*k$oyGNemONo3gakP-wQ9Zh?-I3&- zE+TV@vG)RAhOdCH$V+13{=w*$JwMBb00#$C7Mx^!5c~Pbb!G=PW@VILj84Om;I(ce zF%fAcE9ObHU5LXNvQW<@Mg#L<#U7in9S-dy;i4onGlAZ|?fI8YkZW040*YB}k^#+%l#O;M8Earf6x_rfFrObHg^sNq9^VGCu z@ms3N0*+EJ-3bYGp8C3#3P48NBb3CD@I+n~{F-ZBm=0Zk4Q6c$YK3E{wzHvxC)Cz| zfn1g-SPlus;0sU zn?Z@HRU3IDNxoC!Jk7jVbvQfPlX>{>3Qd^^*NVPsh5&Tvyvn^1^xvL_hz2Ymf^Tv>RK+`wY`7^>BONDa1q}T;~3#&@+5&XLM6}I13#GC{lflg6uPL>j-2c;5j;P_2h?54(Q@0nYiH7#MF55JQKlR5oLC6o7 z$Np_s%CF|5ol;y!)Ni2;54WMpdtN(?pLNtC{NU_bXo~<|B1&u_0XMvNABLAvRA7?K z+2X14t2jy4cug^vTW3P*qjWu$L+eHy8n%@6MzQCTT`Sck2TKt4X|z#c*ghPXO9Y(< zC$Px}G8%V$bez`VP2B`dIP-pexNW*Uo+A}vb9xPBdfy1Ihiu!dWk{P0>dtPCI$mfT zQI|nrwX?;;bxjej{TFG{%uEjW<7lZrs)ge;jdVN2L`Y~lDTFnoJ}R~;fI1qa^}H-c zcGa-lXm1xW#YQ}fnGR0irUpaoV68$F^Fw%<+og-K;g~b@F0*x8PDJ?te#ch<&{{fZ zQO6o9c47bxgoTG;mJP+KgC$^PdBFc1JA|e))o;J_kJ+pxR0qAQq;PTTc26trUi_po z;}j&ALK*f?xD0}>rT=#R_hdCAzrxz26)qm!`zh6_%HKg7I{NML_FG0ioAiqY_8lz> zDdSNys_4rQta9K1ljBnJQ@Z^jUk~{r%-_HqJb^f1X4%IiE8IqBej+GzbL4`1UB zk@u&Lyx23w)wL7EdK=8N6}^$z$NX1>`Eq4OR`hc33Y;s}UnKDJ$JEphSZJNSPY^DO zmy-Vwf75(c^06^bsbs{&@9^)0+lHUVh@n2q%B+n~=y1*GE$^ZZJBnt&$%O*|UW3d3 zdQmN{KI=M}QtUg-k!I7zZ`ZbXDk+w0Hq5leWyxj{-ba!7Dwq`vn91Y2iJc)!7nXSQ1slyP;MyDxEZHEH6jxc`B0O9`2PwBqq1lLaU? zu1s?(&o*{>6XA1s$NUY;HAma0RL#U{3BQC=HeSBtu$CtC5~5rE*XhUt4vSU0f--HW zn#39q3lGo%pg=41w(p0pj*ThuFck&AI99d&kGbSJJX z=LvQI!vnrN%#_XmA@R;#u?1tX06s>kq(_u-ln3FmbGC&<@~4(LU&r1lr~3Bwii#J1 zR?$r)xfu*rWKgjKOtNiGy|MH7uVzNGY+~c)!X|=AQ&NL=N!6 zc>Sw$LJUrUtaofv{&vn!Mu~OL1394CW<7~AK%q^lmf_G@y=9t@+{+Ed{cC+i`Wa<$ zd>}@x1gb2hcT|)cCASi**h#P-4C^%*C4isvSKY-0aOn1|_TpD-l;uugo86V_HBEEN zdrp8wJOqYNbiqCy@gMQ>??&^Wno2>3HRz9?-`s)t{#Z=JkM)2-S}V~a{9aX%-~hV1 zOQZo*^Z3TNX$LZ8(u}g)`bge}{3t%wT>WJX8^DB#LsbhaoMx*+f1DLrahgT)JK<{x z9@*dJ^>D^ziKySf$z%(@29&Iov-pWSLrlNC%Re}^ArHzobca|fK$7i$ojlW$2bvw6 zm6Yv)e07-@eMt~IActVIR=Ew*h!$5JgzNhGQL1kG3^ z2--zKd@eRDDzhdHxXn<{n@cX?WT+KH?B_4pRh^HyLW>||jb8X7DxT_sywv&?O02JM zhx{(M-9Mb!Dd{@sr+fGB2i87UccDm9^YZdq zP;b!&Flo@8eneI3wlgWmvt?RRrTwaq$ZAms!&dRhVyV$BUOM2+W0Db2)k3)CTCiOo zbU{186bR4KVjYP0JTbHb;ySoi-t*gS@O1v)rWPWp|GbygW;nf!c5h4A8&+#oN|1I6 z2pvXr>NV|p0?lTp>*qrd5vo$;7Z~@9%wP1$>sc|^%wjSTp#+xLz2@OXM4)ygR3!Gx zNSh0zr-#9Ox5IE`1r=}=#q+s9%T3#fJzjpklW5CD!Ml>RuSDU3`=OH6S^`vfDR&Ao z0MK8yAL19BbH$>n1HmvGP#?NMVF-i(dDyQ??(d%#s|jO+^dE&wer{E;GMD^J5DeWl z_)>v*##r^C0eFdlO3>`G*@6%D;(W4+rUZW;dt8Lh+I9|?)k3S>CY*5 zBSM|Ry{r2&YY!=8?#o+meHG3eOtOo^qB@cl=YS@%)6|&JPt?LW9Dn0zW__Z(H&Ts{8 zCs>4=d+&#j8Q>1)^Qep#i~sR}=O6uk{QAqz0%))3e{#n3F|ZL`t$Y-_DgvGM{S!!y-yXtC%GMd$1wP| z(*P~AL1CMwJ^TnK`OU|8Pnw3P1}8Ho-yZ#8SEiSQveXotleswY1q@}=r$1@h9wFFas}^AdK6`X@ zG+0lZ8U)Ww=-}HxX9(j`J=SBVmme%?U`w?-14Zu z+09vJmX*Ce-hVWcfVL)R&sq~>*#N4TV{Ihr#AJ1?QNzAdl5qU4-WfBJXaIfaMq+t< z_PhTumYH!f>HuR`A~oHnUnY5CbfV`#y2wZG9U9Wou@ZNix-EG` zJ2bPH2$RItZ`!5PZ-22bt&4-pIqe$JUBxi;sJ#K4#-p%|@s%M-hDYagu)gMJnQMiW zKKHbJAMkqQV>cetijr__VxRB|N`RS0e@VP#WH>L{Yt?h8`JXM*@2+bMU5h!VW@brk zKC~G`k#!-y5`NgjoVHCY`Y&T0>x3uB3Kb>)-;rYkB>RW{(_UR8iJWdh{R;h)<5aDL*c~5xD1f{Iuae;qb-Z zClBiL_TY?r=a^BTaQ$*Tl5;@JcI8I54#tk)Qz$GO$x=Ooa#k6u_{k-t;TC8 zFp+{BHjhm&B*9TwA!+_=WQ#5^uaUOwS{tzCsgj`WAUIsqsx3Jlio$zv<2Wsr*9Z}8J`#s+{0Y-Ihah+jMjEK}w$ zdgce;zkM&H)gS{6nncyxS^!u99CV+g-V@+3=g&?X-Dk(?JSY0d;Su;8&;qzK>BPFq z^k{-8fA!mtj6s*z3(SEuxbTY?VnWqoJHLbQx{>dm4&Z+^It+X?NMBfjqMxll!5L9Q z27Zs`)3gO!Y&&;Rp}G-KB#_NxWSJ8s;Yb-U+AV%ZZKFW6Q$7oaAVspy%))=@xtt#W z+A@6nB2L)!>+chYj3}jAbfv1NH&mvK=txeNh)3O5!8WZuc_ecVu?p}RYa9iFrFVyQ zc3jJ~c9s4|sk;xlv~saz%QC|`2fzJx%RikR=yzV<6*XNUW^G&r81u*yDLpOGA!vCs zz0Z^Qr)=JLevp};&W&~KB!WNGaaRlH>3-1FsM?U0u#*QEpz@8jFl1)d)!i!=viH!} zB#B#RD`lCDt*K#45n+B^YJXsA!`DWZpu4w$U&!YPst5El1%oR+C9RjHcT74?{a1Kp z0Xu@H#+LNtP2;<)`_-px5T4R6B*>Mez2jk0*7?XuEctSmHH*}%0iv1#Q^hM6Wu9q( z54wj%A3+71^{!J$<^QC?bG=IS;4aBVRVW1$V~||l`AcTE_MxCJ#4AR(8f@^R;VyF% z7Mz`lw-oFmDsf;*&>_0cEx)X#WyQ3I2}~Uh^H;@&<{jeA$9uy#45XQ-j))mX8lAw*VnWiZy|K^Ocs z4oTO2ntE?L!APR^b2lKcUwtoHNvx$8dLUGAV>O$>F8N`=PYe>6z?-z>JZ5a$^aCNi8-b# zMJoW%7sL*~#~pb4kGhPHv0hc$&0MyG(7`E{p8q~E1Q6;jUGJUAdy8|!L&f?%Zb>S< zS>m`J>xq>8vz3l^7K!|mW)J~n*p;+aNb}>>qbBHElXd3!_ar7HVRAz07-6j5*N1NmPhq^pDfg`Boh_`}@#LN}Aoe5)ON#(dEjaqCtuIwDy&!lnqWg1L>H@t zuH3p@dSunHkEofpI-8%79dI(RPoA4vhM1@8Ga)d>|1DXJnZ5i*(m&%ER&(*(*|R|JN?9kx&&Kzg(~wG4LO^n-C2`kUgN{qf;Liec<2Qx45+xOop7Wx4tlr%P*^> ze(0C>VZx6@9Li;zF1%lkW!f5^@(^~CjtYtDt$6kViT#kT_}VV|i>LvenQy7Tm(v!W zlx2G-Fw+eNmc^jHf6~QE=VMqZ4T@b4yARAAwUU!?3lS_T2)Bc{V~?*S7*I347XndCon}qRhA~% z<`;#xOjho*T=g7gtm%y8t!f&-*D5QwU zXHl^by58%`hTZR}{#-mFjtS8}fo;@oktFtqo+bkQ4TVl&;K33mX`|bN=A)BcTEjnA z2XX$Kch+`2x;G~@o3s1lwCxd|dpduQ>;V?1n)?i@>-&G{kVje@C|zU#&>)SYK13#x zELsel(n1h)`uSa9mB_lc<+0YE3blPF>q~w5^^j*T5*RpJhwxPKyIpafZMi|I|levn&8lJ99+F z^NmuD3jDv3*LUH@{ke6rh7wdTG8`l)B-I_<>o%?B#`VZT>plItn)bK-02Dcafve-r z=6h}}d%GlHlEG~DwXguIM7#IN;>+x7m~?pLlLTBOO$Gjg%~?hz@ghS-qwl9?1arE;0R{8OMqU z8pfXfLlPY4tAvCU8}2KU6g3>s9kP+4?qgg@p>qJA3w$5;5+9BBVq*0yI^dp*>>Um9 zQmfIN3{hT)jwo2FB#`_%?or-5$ZS58-d(Vp9z0y>s0FOKR@427=|zSv2DLvjJyQ$I zrZm}b;V-FwYrNq-b3ZNgd+iU*&3#um#r(Cj+eclHyFu-_iR+~Ab~LjT7KZx@=vH%i z+$FA)6q>Ic!@W+!>xM7M2#g+=8@a1hDDz99Y_3*?dNs%~9amD#0Blz`JRe(E?HMW! zZ@-fBO(OiKFv_aD;1vEUpeL)BbU_V)R9~ z82_=#0LV;{?o&VmAzGJVL9PTK@3Bv+m-1QNl_S+Us|gJ&-P^W<${up>FIB4BFrynz z3IFAFx3(`PDuw`TC>a15S+ZkNHVdW*X%SR5?n_b?t5{n+K%G~VUo+#^s5AH#7;pJt z7m@X4Lwzb8gUueA&~p0AyCD{opmed7#5#+r(?lvj~ zIFMC&%na1OQTjx&dC4GtN-HnlvP`=k(YEh5Jg8mjt$iPkQ@PN8y1IHiWW25GO}#A)(~Z01db^L;ds53+ zLp;WE$1v2%-J}p)NSBo)eLFq`>Qw6tMUWQxh-^|B73#xf}Io@e@U6Afq zC#ASzz@>!(vOeVOham|wj~=nY9RJlKIE*r`r9MVC&23)d7~zf@4SI5;(*B!D#|f61 zVsqPThrn-ZEb^T{J!Xan8NBFu=rB*K14fXOewmg&S}SiXPOPYJG^5fMj+M2JMQ*VB zqP_Nl@t8BF5n922F-@fg<4F)4iE?ugdO=YwHU#9|f6DDl?GU9U8DfaI^wfoCrvyX2 zR#QA;IHGO7nC3{e&=d|POlp-xdZ$f1mkzdqJL??`9Z0lS9USguYXjVjk3);G=D3eg3~WU#)JzFh8RvfSKDP}AsMA@Xi)Wc#xZ$kxCMoTq?ubQ!P8EFf?3*osxV4Kg$zdAaNZ8AQ=;r6RwuSn{mTyvmgvLzJ1lmwAA5jKQ?8o-%f>D*O|jIpe-| z`G(uVM|KFUr^o&{znqTJcZl=sEzed@iT4r&_{lUo8@x;-;)DFyGwEdeuO|IpIm26lRxKJAy(PE!nhO`gz$`(@YH@p4K*E*NU|8A13Xk?ydW-nWe>V zo2s%uS^HJ@q>a91pKayzzKcl=0H=E}l+x&ty_XeLXZd_~W?xG78ScbY{K2+o1S!X- z3*D0%C?AG(=tYvEcyr-W$d{vKt8zaBt7}6D-0v#7z)M0UfDl`$$W!ZJ90Nrs^|)e= zNAKO_#+v4dYoIY8d13E@J$9=Tb) zcxZ&mA}>P1tp8hs`1wB?#NWeYVfLEM0^`MJTAB0C%Qq0*bw4Z!3rSt1-bw#Iwew*r zS}Z0h$Pn70RL#;6y(wW4IhZ&{fAiPQ9^TKgU+lf9PA?1xwyjO=_@JMGbXd>KIvK(@ zwR^+ln_nC}$JN#!nb)MoHJ2?aTXBB%1mC!^L`)s0IcyxEuq=cCpGK2W2Q81J%t z%y^2-e2yTJX_z>{40YdZAvG{9R65aBc!eK4salj)&T-Pu{sPr0b)fxQlrBi4iIW9q zL{K>&jUxsUQ>#S!Z9+$>pG68P+-8S075tzF7{_lkggu_FKpG!bieBn+$FVBsFiA2X z(w+E`uedcP$$Sb0BXQztW_y9L1If;D?u)5`VZJP{_+#gVZ<}}?y}Dl>X!SMr1ja=l zB=GMU{6#rZd=tV%^CvSz_x?aNn@I+zT#*m32l>gD{tYM#-??Tu2>60lFZ#|~|Dnt^ z911RPra8>)dbCZ!K*`$h@0lLae}b!Szw?3N?t~ef_`v!~&>?*NMK2mQ@5xIOiouT+ z6-EJc&Q&?Ql&hXK$8g~vZ+sYVcD7S3ULz%XGrP92KTqY# zc3mloNt>N1S10~8=ZJj|McBS=9gRHf`mCY4C=6}5k2BBt;p4Kd$tWfHYED-Ak6G_9 zymroIzzuNO>v)c2SZ6q_Xm@|YwxBk)GA#vID1axj>A#iHAGnl~^8QTgabtE_aDV$gBQoO<&zK{Y4zq-O z$-Fx2k^SmER*Xw)f`)3JY*W-zQ_fI;K90#0=A5wJ32)$CSHC$~#VD@dDUtd;sy52b zcbbk~x~wc0F~54*NXy`L@=C%FZxB6;_iWuPa=R|2Q}cywhv9ESK#tc>R7@Ho5X?a{ z2tWOH^MJ(I)|mX{e{mygitqoM*Hk%tix2RpEe8mx4IccaxzN8TIIPH>;*O+$NF}mj zdw`(DIyv>UoiyDX*E?5=!_FW&S-iw7dOe)kNU4!7+qruh1$)_X*n+^~#oGa_cnQAMH##s|G znY4U!E*p~7>Q!;bqXK^$t148SY(V}!bI&BBJgzcGDkiAz*8p%ys1Zes4KR1`r6C{Z z-z>QdrN$cNMzkASB;|uC{$JJ(CD0FJcBp#EBw9U=P(55ed ztHi$OAmB6dv+i+^_Nd{_-s~+?u90RgY|=NgK$4T`E29$qsp_)(E5BfeoNG_PELTDB zy{zB2M)EdYVpbp_O8N3n1-RkavamC-p-T@2_Xr+WZ5-WEcY8g^iJ|=owg6qWL!&^6 zF)x)}_*mUMTQ|bjOcgYh z#esECB=+&*&+_!_gTxrN+_GuvKhQ7|J!Y-?d#_yA+r0Jvq3bQX;@YBZ(V}oCXmEE) z@DMx@+}+(N0tp_pAb4R^MA=LoxJm&;Inp8D2br6qQM;$`NS8{c#-suA#{_PJ$7?o5e7KnP(Pqvnan zI-}sgQt(~a6bv-JKrdtow$x>D!U!Sk^m|1v$>xF%GL_3+S0>MET_$0E7{i&ciqWwD z?ay}Qc19uBTj|wysST7SmxLl@?&+jY#WRK0kZ}ynAUw&ii z#1!6ASoq|UaI+0y+PR9mhyp`2LPFx`%!KYuI10=Uu$3f(>U9m*RCFI2axDqKIrV}? z3?Ca`G+$aV)MUjG8`$E`=8~VOXT1|*m!B7dr|ly?7B82=js&7C?f2gnu!{j+IAf!4gNBiXgg<*k9pi@y3M+7(gGGZI^_6$=?52VcBVV7Yewz8;9Cy(oA z|86p$-{^v|`*C%=b&E@{SFO|EGb6%AG0RyJix1ikypp8eL9~p)D3R)8Ly3z-e3J{- zm9bpq=c=E9ZcMa!UDHYCL;|#OghDFVFW&KoFzjSor>EsK63Q%shXOt?Y8dvteK(eo zSj0=Wo;R~OKZ?Y5LbqoSCZ^587Cq;5@G@0hXEN1hXBOtg2RRn^l~YF^-eWJ#H$VQB zh`HIyEU$7L(*ij{QdcJP)xoMWAKW~0)|L`v__rS~iiIb8s=Xb8FMp#bo(5QQv5^ud z*S5WUUxH-MAw00tAY>&KV7}h5S}jQsFrMKuK+H#<&yw#jU;6gT9``eS?dnV;9XI9W zcdgYy;WxSKKVw1?C!d6f$xiBVpQTg_P;p$}H|CBKlkC?&EF##yl2GiCvS~)QD5gyD zTGjb=911+KI%-w~(}?I^B-1JA`|E}xPpz}v@@*f&+XtIg^y4YEgyRPPTS+ju!EZ^4 z48JT*@X79aBe5v(DN%oE`^?Q-d@K;vAp@9IbM;-w^F()hn}GAP)vb1Cc%ZVNJ;HTd z|H4qfm>2<@UMfJeFA+XhKh}ssGrbH7&PcyWb7XU;i(%ZoN6HpC?dCP%p+{n7gUIC{ zWeQ)i+S=JdnAsW^T~_aznP<#FIhK*Fo1=^K-$)*VJdUF?%3ZEDF@Bxeyh+=d2qnwg z>^gBn;7R}hDFAiqvMknR)3A2OFRrbUJL}8&R>4{9T&BsT{CLMD1}>9pl}fJ_bA`_HLraJN|tr{ibzC<<6q)-ec^%hT?% zkwXeEW8Z#VQEcL61l$eb6f>ah8OueJjO9;Zu3n@1R37-ZHU6mB)}|<(!yDsp&B7xw zLe~gzPv@WWUgJ6XIiT{ucHpVH#lru%UCNR@+A~2!D#>h6JL`(<8ISr*<%|lP836f% zUJqpPf-@=XSjh>JHlCThIqE4yluF4A?jD5CoSYhVf-(^Xvf4xsOWB%7`T`NlK7N)X zQok8JUF$!HsV&!vGj4S6tOhLawQYPT*b*Wo&?Pf~C;w?d@>tbHaxbxhdPH*y zq->l@sVvBCnre&&BLVD_q1r*7@i#r|b!S;4vA+iK8XP=(PVNRVkMzkOBMa5W*ni}86a<~5f728X zhh0h;Lsz4W-Mahgok91us(8tvADj9Af z`dr>t!RfmW9GV4TOM3Z@wBgOy)0Dx*Xg~ocEtPZ^Is+apt@-52UP$qNEmJ2`{9lc9 z;qmnIME66S?Kq5gsdtY&$RS5wg&6YMh?CsZnZT+CVm&^Ri=4@OWT7g@vhyFF{@>8^_Rg0U;gVD*JtG?8ZKKY8pF~fA`p4q5qrJ5WE27 z#toEh*G#!coNk_pLnn6uwW~>4y4T;ftRs7uX_9zlNY{!)HyGQH_*KnTC zbzL@h+;ff2SQu+U@((w$US6Iyvz5n(gB!JrXYd({Uau(sB zRd)RI+O!)}(Ax7Oh41kM*GesVP@N^#=6R# zV0|Edo}F=SvdDcAmU84Kci&~=S^loJ?Mg>>!J6tv&SQj!5-sOW%38%yhLME ztmS~}yIA^oWUATZ>9N)xu^zF!j@W!(-(wjMb?L1R7isSllK4v;=P%E}_0LzVDLpe5 zJta(wJ~aG1eLafYk4-aOrN+x~mRkdpla#jKQUX8$+ETb-a3-2JZ0GWgFpa&_csiW3ZcH=lu=e}*>FO#}k*){UA% za^=_Vq#;hmB2@6yY%jJDJf=d?G|c$-xYmHe%wL`a%P@6%cl2qno7gVF#3IL`^VWar z%&H3UFr$H-(ZB_H;V-7dovEGeyLPL$la;x3W$LK3k6XO@w)-!}x(#NGshW5!yrBzZ zBGCn77(6N@wM5A@8*Rs5cxF?KDYSGA-XnLxp24z4tFKse zF(*2U4%b17=dgR|;1!fRosX@*yMX17fzktw>0V?SzZk*uQw&TZ{9qS&U%PttGITo0 z`US_#*9a_jGE8E49wSrT-5kFxW&nP61J#*I4BlwpQ1X`o3hP@&wR`o&=TN|h*9k>9 zz?=tCPB7vZ#T-Rs5=sNU3H2!~SizI%TW(X>rQ@l@w&MS9*zBPBksie^CjdvrYzvoxIV4==S7`pY|vB!s}lFS&Ys0sZRK2y8A=0~A# zYgljycpx*LLpJQBF%`w!yFs9sm%8iiJrC`o$m@qMⓈFto4SMLo@{R9yaIXe0;va zO`|VM^Pi^jdcCEHhwK#7f2fxe<78$^(UNC3Zq#qDR`kBsJ zB?BXl! zoJC4~4^Kt8jce3>rgJQ&JXe*Oc?_;6(nwv66+&N2I?>i~CG+nOHY*5oIgLZ=K6;AQ z7r)U1YI2WOfb9h;*9UHA{U_X_@O8!FGvBOcC4~C7C;$4+)_hU2kgYupaQj+H8dsp# zIP=^DTy}StrB=5i-Uv(4yOz$$nWJ4G5_$|x{F&w!jO$j7JKu0XW?$o}r*^WOb}I$> zaq}{nS&Y$9L!|p=rSJA;or11T*Z11cHCmdBW;~jGKK2q5(`Sy&SASbjY?I1wpKUJr z@lrxKDsNoLYm$j~1<_HXC5k|uPFWvgEB$A>=9fxDV7r(X(ykPsG5VIxV}E0*`-^=f zn+nDM)cT+j1#n3*(1tx?<#y|IKJC80A#0sjG@%pKOmVL5rb_FpXB2LBXN<^4uus}b zgq3#eXn+KA(#wZ%INC1O!qCIdF48+38PxC=B=CLEr=BUt@v0Ox|Gjb`RG`|}|9P_c z70noCHK^0)m33~xvdtlP)VfrxqsU~?D!lb#YES2M_1I-4(+kx1rZm_^-zNQQqys?J zzIq7H^f=Umq31G&kD^G0;!Q}Y8;U5I-#4Oa$WRDi6+|+xIdZeJ8#4kfP|%$dG?W1p zwZ+O>Ji2C%c_G=OE(dt_YQRZS2^0aR$MLey;Vv{EW0tv!2Fkfw;Ro?cQ&!wD%EiGU&^&V#6`xT*^Jm1ZdrcDv z?*PEvMYK1r?}=(hm!KD<#;|PQ&aXRb?tI(2IBEyOCq52!tU90O9v&acTn&YVHJE=x zC}}8sh1!_1EBR9fKvIVa?g!`^-%7n(+5>rgDp3q!93ZzyV${OCSubc zRwIE$_SHTs^HV0y;Ys=;{>{G#V@O2YoQnfL=9b~xz+kB@l#G=;0TchWUEpLkFye9< zwEC#5z;LC_$WR_knSu^YQ0KZlXi(hsV;skbn; zX{Kg&Ek_jiTpSHTyl*RyE1ZRW6&G;8La=uWyA}#i)I>w#k>A^Ausv@Ea(QNxy7Q7n z{y`&dxw`>zz3`EXBKbVl8C~XU(dUGN{LMp6ra(N$p)lX$FawBB^$aKRTMPNGIDP-= zOfUgwawA>W(KW*0tRtP-`F6h-Tp=%KPy z3V>fQcg*9u@{5&8U0~4GJ9Q%MP@HR`!66?YVHfq^9KaLKLMq0+Y{#W*fQx9Be+oW4 znjiidwQrx+nuPAp^U^Ddrfw;4UHbz#UIjE%@MB{QUJvZuw6jhutK8Okqy~WZPewa` zlQ0iv`VMyR?u?+AJ1I=GJ|J3BoYMOtXrJ9yyE^IprV>6^gvY`Z*lcS7nXTbBKjA5o z_&#i)XTo_Z00ZUc2mWS_W^gZgU&SsRFRi^s&jN)ajZf=Q0F=E`Cw?8qputCe7@BRXs%6}@4wsMt! zHLSw{Qqs|B-HSe3+uBtrceDx^>EbQsNse3B@wVSmPt>3VM-kG~TTbdJT<@k0S7Pvz z^asHJCk504NIKh~wPz|;kKFa+-zQ9mm_HJgfXFn~TaWco|L=sPDg3uGn$#iw+>C9{ zDXE97j83t~l6#^Q*wdiFyLONYQ{uJZkw1HkK^2VV*Wv~0a%5Lr7fx?f*NgV0*QacDu2blw{^Rr00(UA6o6L%i#kJtYpNNF^C2 z)XI5touI?@er~6PWO!cTt(~rUr4h1FiRj_PJi3pOKVwW{8bt)vwADNKX)Czmo9c~; znlGmF;wynujV;!tN3*u^QBSgM$p)PUPrJgGPrd)%?>c_MEN#xoP|JVWly2W+(IotQ z@7x$jqt&^^tglY6KK)(eacAHE;;gAZV0>Ech=wafOq~02^ZXZ`I{fd%X$}4JQ767q zujy9)MUA>b4#ylf8W|t9g$Q!Xax9(hO zPAjGEMEZFh+1VY7{Ouz+J*$>6sXu=z*fvwF}sJTCYRl>?YlSgdW7mmc!_ z7KEj#y5e8YbCGR6RveB@U$#r7mm35<6n&4c6G!HmG_u%s~2lyV4t-tSr=FuD&LXJCsv6 zh^}XjkZ3QO=0F0&KC|2oDU&lTODvG=&#fcU`P1+)h;y+X$LKL6l`45|QNpY2yYJ^g zk5r;*=+QRLN7fjq^C8c4e_WZ5QSb{jYuaev-53`$n}u_&cAHZ5!gaVtBGclNOY(c7 z00X(JtYdwB-7t-txNu`zZ?r6pwZFstlPNez?8s8b+64MY1htXHUjP-rdd`-1d?TuE z8m%UaA&LH)KXDb+vST_n>6WcUdVhB!HD|ljtx69$KQw-!clBSZ|E0zZq}Uw4*RI?IPsZcaNbU!2LTmL|MggfF;w`g;x3;wrw@a-;!ct~{Zf!)=Gz z&j9XSgIQ!Gaq%}W9pYX3#($!<`s+KIh#~6iagbgsy-vWtuE%Kr0uv11gO6bceohQ? z-#$+bEA3=+VB78V)TA&J>3OUKt$u!t{4vU&T&xbixghADYG-+>V{fY#NJzJeKPnha zE1Zp9^2JeiUo(!;=}+l)VWwh~N_(hPu8kg}h$7JMj7S|ti8#XowIubHe2k)AaUnk(3Cb20lv;5c=?V2 z>kFZwQj%H%~jg@&H- z!jVD$ovXJ_^6$yYn+kTXDjnB}(9jv%9ZBh}KE%*%xntnpP0F40lWhpswEKyjFt9WE zt+cKnP@>OA6qeHJQmP1^)og?=Dm6M8m7QokJOaXkCn}i8JRi1tF(!2^1s0P4H@7{y z3h_<5OkILcY|1?Bd3hYkt3UYcH@78Im1InRY3{K_!-5!-bcKirfEidk+U^s`$*Z48 zN`5`m+dGR?0}&ed-oeOW2uc4XpvbQOzOEZpkJSJdMVkdXfklxAV7dw@O2CE!1Ypo; zQj!0vHT}SzMfwZOghs_z3d4$5{wj`Py$Ed|`$c}BZCR*y#c>-M)GY~P4I(H>8D2L5 zlrrl1WQ)+_fdIZVfJ+>oEOZthm}NE=X0#USz0|(%H}X>OdX0r1f7d;+?od`Ijv_rt zldvc9Z3T?55lf%weD_dztL;vxjyGiXPDMm_2R87vL25qZnK=0A^{+Ss-z-L zMy3gFrWnS`j^%aEbxYNkAF8J2A?An=tTm?o=|6AWHB7#6M|V~*-V>U_HtS^%Js?2r zS(OB$zXM#{oR+;`I*5n|@z`5&xZ19jxwo^6$sQL;<)KzC^otAbhgNdNel%s_Nj)Q9 zjDdg%PZzxlja4NWaSAl2#7mBI<~P}TYqh7j%Q&wPs8COIt&mE!oVo#kKdYGzfAZr8 zy;3e24yIzEhtn+V#>&ntpa~g=$_!PwpMd!(zfHdbbuW~s{xaP^8YY;MLF9KNTpHrF zU1;uE6CJejeKN~se!Fb61MoJ#!vI|#EQ&Yz?Cj!bUBRwr{^92(&R$*A#LJeh3_(tn z*OGUn<@v1G?Q)3)48h;BQrLU-TbSR0!s>goLQ4Jl?ySU}&;A~S#A7#~w^R8`%9F8? z^dVl<%^(NJc!CovZ3|rl0Jxvn10%%lM_5XA9D_>@XE1EgrjdMSMk26blrP$=O! zfU_>mcR$`@bKlHQkn!?Ob(o^fAfn&cIA=z@E|Qrur`s%Ax%^(d2;GP5e6nFWzkFHQ zl3S>DWGkk@8~^W`_l6k+{&Ue||N4&eZO73=?rA;ilIy~xB&n(Vs525X(i~`&R{k?9 z@~G*dl0QU1HkR;Zp=@k{XuNUyBS=T&0-S_-`95A6HQT7$AFwnUj>H~&vso_=lP?E# z`wWNcb_cDb6i@P6-{M`o%zWiYxW`<;{PqMjL(@PXKnv*rA&+b-BQs)~O+boeU^0+Q zQmS>o-EUA13}(ycU>fO)OqjwGi&vw#(TEH%lhGff9pu>PPl!y_dEV{EULRv;isO(n zVJv>?DE6R_5|IHrll6|v3?j3D$Ml1Rt1bQ0J-^S)v*GMO(*HcG5G|D(2-x_*3M(O) zi_Ry0bY#{2%k(h-d9kL+0*41IdnZVDvgKI1o#v``vWdJh{M+}G16p9&=fOr@VO$4eWH`_W^RXw9OB#Mv9$-ZD3V3sHkBp_B|3zR8L^xqqduN3G6V;4uViyiT?5jIBzpTNZmw#Si$2<{( z+-*|PGRg8n+x2Vb3dqSl`9-hqUE8vOFI_A`eEzNKgy!j_cy7W|SgRjCzZy_0Ccyz6 zA&=o(!Rkj^_;6I5+?#6Qdk==YndN9WP-W920a$DLTL1dJPU4bFXjWIBg3o>FNm!wn zc5a=#;BdI25`EXUP3SC|Ouk5RB*&oIFhVTHN>q`wM?EF0YJB@a4oYuDdLGLS8@r!u=x>0!%>MfX^tntdXQm=kT#rD}lR`Gj^`HBFz@2wBznO z`I4D)R+#22y6_=`3jV*cz<{TM2s#c??ZTlvoz}NYs5Gv$+oZSt4bW^Hoqhntt5wYFEdei*L`FN6J3%cjpJaAye zxay_19_yEh_DLBX1{fd*e=6S?xz3l}htsZhfWEI>fPgR0y7I>*K+)y}7!LWD6^xUP z3`)oFrgBGB%*w?jIqAR6DX;1n`0n~ZL}`lKp)~@T1PK^zTeq*%^?u_P77t05gZ!O? zWS6RDV`T$Yf|@u67?p#ZCWsKRF;r`Z7m+OZW3_8}y5}G>##Q)S5Ey9deXm&Q=pzOZpKftW6 z5hde*939G;6!KbAUHCj0UpgB*Nc}ibRkNV$n7}5I94XYjpUgbDPp8y8U}Aov==k^q z$^vNgJ-*>PhMxWH{WB>vd}&BBu6^4kDCcW!ItZ7lfuUt@AK8zSo*$nmlFJ`!j19sT zQ8a1>WnR3fuARN?KPVSF8gNbHNY2jBd#`el-jubk{d7Hr^g-GFKwPI$!V{)3d9=bj zbLnEg{-I&otVwA9@%dYe8_Pf17(>wMs%R>I{V2JWiX@$d3ZMkfSLS--cj@a{)jOZ= zmDI`km*sakRmUArrqU!d&5b1oO(7fXupv^O!x?R7-@*4UWLmDoJeB?BrCuR(UUsV~ z#icb0piTS3Rg-+rL85~tuZwOwq|2O-?{bjQADBN=wJy>#5BWC2?j2;gQJ~L-@*&ud z58=?O&NQNCk{@=P4l%9@7^9RmY7#AVHsaT^wXzkd9Fg-5>e@AaMm}#Hg9+X(V^5%B z@62ttrhh>f-B&IAu)hBhh6WF%(Sb(#fqSR3Umx!FHV)^~$w?3$#B8@SP6g2~P!{-# zBK~jvvugP*lsvxwU1sW>-rJ7sWhm)rDmK6c-|qM)f@IQLv5V-G%#{eFZ9{2`)vZ9@lSYK7UV-VS1O7Nl{8;HSu2o28avEI(H zQ}RoMNE=E6D<=++6JFzhA5OyNOL5Ll?!*sZ<8UHdk|@3_b{-s>j4B55?@Y2^pA=~9 zHg`)_1YDyO#)GNDwN?v{qqjNXAgxz1NIW8;-3Nb6Oq=5l&~{`OknYZ_gV@b*nz)L| zeVLyqE&)HY;0z%R zNNznFLBY5%k4KJlCcls7qh?xNP#}}aiXxr~(QJQ3RZJqY_d36{Gcy~wS4%Z#)q-@8 ztu*1?#1<5Z93vd*MlOB7rqx~UqKjPcL%+t*9Zq9gI(u9Ej~=mbPry>Q`{$=^+KP>r zK=bWb9gpP(iCom#%KS##(IRuzFzdSNV%t|bE5!LH+ZlU#c_8nJHheU%mFxQ z^eh){i?7&vijYv_BKykgy_-e08?6Cl*G1hX(0+`gorzkhSlp+ALnW)I>3Jms(ZjaB zc-k4^L&*+~0nNax;Ibx~!ZrnwhD__i8Hsd{9n@!S0dW)oZ4@a}T?NnG_8*U=eZt^d zrepU{iFMFoOas$`LwR=rT+jHd%f_jjrmsff@wmD;g9fYup!NGGXihxXF2miM=@zi~ z5SyuoM1GFbZu_ma=v(dl^JvrEWoVhM*{zo&b#B)D^?KN& zOewYsoNcVzZW`%NJ;~p7rJh=MDln(X_1L1f6GTizQu`Hk&$KDa%rS9X zAeIQ09~x_CgWVPGzi3W565!P)ISwcO5aWG955ujFNqXg!Emfxk)T5{b2wHCxV<0R_ zsj7*j0YCxo=0|sG&om@Ae=AJ4-H>alTrI{0PK*tQJj06JDD znz0t&Ic$5M9ITqPzsJHYt1Rix#QwJnfUdR*!j-l+!Y6~)jrlIa=>ZYp7~aS28HcW~ zw4w#+l3aTn{Oehcl^8wOi$77wGRA(Tu6apkqJu3WVo}-|hi9+o{!epm|BYbv^vyAG zZOz!HPK$9}qc>uKr_Uz*1e_C|8{V2#^Cv=AV5tfJ!}@Ruh>PF2TkM&KV=;2&!bZ+_ z+s;p2>gQgY7qwNeVov(dHuycsk35JfRmlK`6O7052<1Fv)9C&&o&5QeVv($8`?P9@ z(0QJ5#owGQcuh^7B+o9Ikaymw@ZQ(Yuf&UtRT)L6@K{uEi-$jl##(u7K;%+=h$l~t zYZ?H~NhvmfrSIqt)UeHuH0ctS7@DhjN*{M1!Ci*J7c7o5PW?%>wr*wx&W6@O69J65s0mkYxuj zl2ICFl1O)d+&xoT{iQIy{!&0muia&7i7e&gBXO1t?lmP@LJD@l_nB^N0u4=1!Pv{D z4Nvln#`e!P<~c`SlA`)m$6Fzrr2jtyL(AC|Dgf&yXOteP|9wxklos&iuXZrt$C#n292Z6dhnu+5$kUXH$eP%b)Ki6<*2iD z^VyZDow8=E{w00p#Npck_(X&2w!Zs;Qfl}u*_yc+hW$&20k%>}Ao`YBSX~91`Ek;x z32YzS4#*_RUqlDZ*dkZsg5zyw3~2)lb1IInR8d69g)DIK0iI0BbkTlL=*TPFC=bC9 z(uZR`a8~;$JpZ!ja?Auxb8*GjTQ_5wZ71OosL>ve)qD6!JEL10{_7zm&@0d9e#>@Z zcmt2w{I*Vrg_6q5ByMa8us?5}(c=$Ci28V7)U%Z~J$*k&n|@zmW}(7a_aiX}$n=Vx z6m_vY1f9IJ)TZi2&$Qo#=J*$$rOs;x*>lt3SR=NqG3D&b`vf9E42vP^)Cj8%lZn}hTulzG!csQ)~YYlZL@unlP`rEEq{AYC;itB zB51;o<6YlUFW2#_{oN8*0^1drfy@?)YEh#<8j znFWTcLOz@!0>)-^t}$Q#r;kUAiJiOGO3mg&OQ%)=4q36cE){7aLBEEg9N zQmv7mF+d*GEheK&(0&dMciBZ<%?Y0TiXuRZ(sFTu>J<=0Cy6-Oq(Ean-P9qjCi8i_ z?L6(y{w3!tw9-B^wHu}$nlhOdetWCsT{PabI|dE>W3g1-xHPp-cW6+vieJrPDtRhI zJZNLv9V2*qw8h#>7ynh{I68!BDZLyDBNufi{!XBBme_O){%~oo@}B1KuT@<|gv(M@ z&71i{I+yCE>9WH4wsG8xNcaHzTHHf!;GX84Wqsig%~GSL!Pd*UgB2rWcP}%r&wL^% z@2ji*+9v{{L=z2*`2#whh5b1aJ<~_|`T3WpcP6H~9Z)M;`k%#^SQK7avDJUU3LDp{ z7o1+cwN=jLBJqxV$oqMrkAf|q30I0gC~JRb^{w12c;vp=xSSMF%D}DPBw4>qvS4Ow zsQ&lgqZkS#)>!2gWW#>FXJSB)Y;cLC+*Ew&*4nSCOhRZh9!H6^>FugzzAT?Zzpu#* zz@By+6=SsC^O-ID!Gf8S@S8~Ge97=DQXY?jvrRky@l)9CT2JZ!Vv2-c{z2=b{DK#d zaNVxe6SXmkzEAD;NP+b|R%FXey9J@tE5?qK7W@|XA<#>L+a-P91v`GSDgmAOWBqWR z!9G3O1g}TBH$(Sn_o_8sRtiEWhb2|Go!x<&|CynQYEyg5`H9rUKM(X*GGgvXv&Z`D z%;@_KyTOc2l;_ku!XzeyAvu-7C`gMKgqNg*Ew#s!2-Cdsr+P#KV1Vmh@L8LxEY02q zPD;O*vlC;=1#@_av{EFDIGlPB_`<3TAIi{66Cmn{X+2zm@_h5;>U~FH_>No>g(MAF z*}VJX4owOBas5I5EOlwt+6$K2*FY|-S|^JREa>T07cqKp;k$~8&j!)gt=Fc0siD8o z@yZ!P_!$IRTQxZFz1k_UBMG;)V{*G@HZ&%ldtPjcNH!;aq$~FU)*#Q&E})YcUDQE& z%pU@keXk@TBSo21!9m46o#lj>F2=PuHEc5od@m&gD%s z>r^x8VHKfwTq`=jXoosAx^TYHe7$b3O}$vooD z9hxK#_=*Q@D&$NbCcUgq&ynLh=^wit1*hXNYQ<+lVD)KFz}{pnBdZw|2QXvcbB-a> z7OsUF;a2lK{q2oS%Kt}<@ULxkiL;o?#Na5SvMrMKPx!pDxkIm_OIZ8!w4KX+eB#n# zkcKwftWDiDVTUg}v0*b&hY?V|Nm}>EGL^0E@O+uOUE5$JPxR9=>+#L*9BX|s`1&Tk z(t8CeuxN)KF8>G9?Tq{_7)^!$?WyY&xDBWLnMQcOK;)+6dR+-zK2csMz28lMRm!({ zy{s^-ndGjWa!sPNM=Xg2^SDaiCx}Wo5`tt85yii3Do5YNq~@u`btqJ8D9gdP_-}0( zI&oqR>4fJo!7+jVE~;@TL-R}U8fX{Pc3!S{y=6pm|L<+}VHuC-^JkOv^7213i-E0& zWOq1Z-Fixz^t)fRj#YaR;;+#O4*(>B(PF7hV+#DwG^w*`&`Cj$qfT8Oo!1I74dMX_ zp2b-meXeCod1sK-Z@%bDQAuu~AN;nkVp;AT-JGD& zoaaYvg8@$Wj3GDW?f(9<)5)OuWv3}N z_mxbtIJ37wruCqLe_g$N`(1_}%vskE*@~t&RZ0asuxmwBSNHpj{Y37kJBjoeu87o) z;=9f*yr{9Q(r*o_IuC=^Kc%Lz^zQk)Jf0_E5PMxewU&MH?(0n?KjSiZ({y}CDMOss zXr|mVg4W;Z*~SweH!1d5IrMnbE^210koQdg$X2+^zig~%VY4!#i@@mYs`-&7qi6Sv zB<Qj6c1Q9QPODt(oQq_1Zn6D zC^9O@OHb=gs@1@BYqwHrQEBVi7q$rjmlWdM-o_)hx#5c1UewQaFU$D0b`^+4EDeM# z;GSf)I_Fwm4tJf%bl0Hnb<s;0jRyoe@A;IBwwi)k(POmH;#I%-4f=Qef>lfDkj@qJhP@OO+DCnK^Jt3vN7Ezj@dvO7N8k!kPJpEul+ABdf?m0!#c?S(c)d3T6rXDB+Zl z%1~2WYLSz8i(%%4j?v%E3rVr}^|!;qpS+A?E>@xHuCw!aviip_7Ag4pU*;ybbho$t z6wzbJCf_i`5k?+MS$-#y^-?NAo;M56(LP<%y7cXJXuPb_V?NNa8+vr_$TFP_>6PWz z-chQpT=`@Z392x!#=q1CY=)yd8%UNkj+r*mkJH4AJPy#$VrF&--Im)NDk*1Dxtq^O+x8CA*%k`Y*)&hTY^S&v~X7x{I89#h3{e#gG6? zYEhL7gx|nz#i)*JeTU>71p}d{_xaOej&T?^s+LX|7KAhQ ze{bE46mjXEU?dSMi+{n)l-_uYMlw6A)0H#r|D3 zP79+8P8+{&(X*fVAPpm0K)^kw^1hN_)xLOZ>isa^YCE+YNBkj}Y#n#}5dV9ZMcj*t z&)x;zsNpe?;a~P!Kh((C<*Ik5O7k!Tpl)a$H4Y~LEO`82K_y=f-%E>s>B413r_mc| z0n3N7)7Bcr612se8z8gjQM5R0Ds{c)pWY4s&{15o9-Ui7Hu4t)0R-#o^y4>osuM@w zO(VqAb4B+AH55cXs&PSuK(g02k$keTrS zeKPD0`JGEM94xG_?OW3){|y_1Pl6bv`9OZ&=Ei&01&w$YiRR82BE8zVGQ~p=~>~S{opo^Cn2|M<7B2`PO zKDsM1pA?E+Srs3}1?FF0~{1lF>>m%}j~LxH(m;2cpJ?9Zk- zHQ)wbmR1*}L4>~(RVe`T$~H;x6ym+Z`$EWH-=dx6#4Q5lNpge%e3Ub!u+qHc-q!X5 zfBtxzXjHQv{~RXk73W7z z1a`|wDU+)|1C1y~n~YQ}Wr$o#rq#?D=Zn#?KZ#=YqWCG4S_`2-C`@tWj^u{)Fn1Y$ z-kj96ts2-C_s@*YATa+ix-c6I$O|uyk}qZ9Ua;rBGDlw3?KK;%ST%lpn)5b!?C4lp zN(DJM^sHrCsH^CDB-Ya}K?Q8qUN#Y}3kf;!Sed_ zkZD2jCujbLGWRhOkMT!gCmMoF%Ey~{;g?!PBP#()HYM#n_0^97OG5#oHq&ol740eC zUw%wPNb604k24vb|ATn;`#*#!M;)%6lWQHK!10LYv;BQT3^*vWSM3-ENKAH)jCQ)| z14r>sigkVVulY}$m~Wgw81j0uN7`Q88xb<31x{2EKgtlKPmdb>nnw7l8Asc_1SFm>#z^plIxT z`u$Vr68EZFFH$Pvp>hKOF=ClcwnGX13}s1xq2gsrAys9^?MBr@mqX>%Skn=QA{E(x zWF{571@=F5W~>bLZh-kxBvqcKaj6r@=usJZb%=%?D+P}~3wkZbb$Ehxk)%WC5z-bn z7$HPlFm4(U3C>1vwi+6ghe1%lyQ~nT)$Q1VU1H&@9SgE7Y`LoxloJ?1Lows^oSmxq zK*JeFco2jQl?A#pDj-8DqKgWDmlwkQM8x}gtMnc@OLRKM*ehCu$e)_Q>THe6^2PXh zpt=2#T%$x3pn)GJyc)RUItNW)Bt@)t-_?^|Ers`#DwkW6zI(G>@`Y-$3g7OP*<)+5 zr9iA#@;`VS8b3!Qx#e|4%Yj$wUvl$ds9Ia!%@-omG395K1DS)cixEIgyr0ilSQ`Y6 z!)R0NJWE0z47&45ua4%=zB^4kAU=5Jl!m;NK&voRwRH4*;P7Ee>Mj|t1(QwiRMe+b z_J7Y-Pno)%>d4xt&Tdldy1Ti`FS;BP*N`te6&@cD)&!9+92KzIOx_5c+-)6ChJ!+% z%3rTs#7pU+!fu%>!jSGUffuLJVJ$vUXz_Yz4MBG0bxDf(v!Bj<)F)N*H>3> zI^WkB%SUJU5uAo{XLW67GM`$Tf0R4! zG}4#8Ea=ClJKo3d9R3fF?s%Gh))a{#2)14&Fuf?9fenAfWfGWkn8$7@LwfK{|oCeg(ndlE>tbZM_cip0XuH)=8>{Q##F4Ptg{mP^hAN2G!>+%4T90c zd)m2}Lo()%?bXiOT|J^;E;`g|=Ffe;zriFvIMl5POAo5ecgK^SopL7iN7S#@v!nDn zRMUhAduWZ_B4m?Gl519?!!$vdn>%}AE+@{#Fs+7nMg~(&a`Sf#>5-rm&RdB0f!*2z z%PC?#apEM-FA+keiK5dV?5k1~{6%_1 zLg$>4qEp5bDGGwzK<{qBHcAOUk{IotN2fJEe#)b6+xXXU!hNp z!^g)yXBex*as-12UW&I0fMXY~xBzP~~kg4*=L#;l(Y-+A&27cT_`QH7FcoyJ85vaZpa?=+?Rd^azWci@* z$4%L}D)ij;BcIzM`Rbu}Pm+|9<04`7N}o zRAU+`ORfD^RWrgA(V3$}7MdShE(qZdk&kT)8@=*uT{Dfd3!r5!!AZ|^(gJ7K zu(GY~#`MtaUkHx~G7~Bro`> zitf1~{}qos4}u(RG$U44s>?^E;cq#>gT$dPsReynyzWYE0}V`}NY(RhZq;^f zl<5iM%awQSlI10fJXtAf365@2n;N+Of_(QX*0_?#NdZQhK+DZ#9Qek?bwDzgud_v! z$4f!P9jMREhlorsh&C*yAHZ@jfb6HGo<$dH(9Jl=H!FExHu8D2QmO9TG8}h4iefme ziOIAV;L0JHl6YC2t6o1EXHXM40NyqhvnvTho8-{PvJg zRdfBWK>mjQn(_zZbJ9>n85>YyRQfxLo~4$gh9nC14l1*MgKNxE1AeQuoK@}lO2=}I zcYD$qM=uoHrWIE9Esyj(d3mi#R>5iMIcKGN_y3KtbTqyLroFYrm44=yb`bf;X)844 z(>dn@#-&-sh`V_5!bU6S`_jxfS6u$4Wqv0zg`6^DU}MEPcDS7zL@=Zzc!gpmseR+o zX63oXiH%bn=C#Hw4fTr2Yzh@%*c8lutAVEGBVUt_6yecn_OCv-{AP-T#%|o`THA1f zYNQ)C1omk&nqLSkT!tpmxrtUvM%6$P;-e=yVTb#BDd_wm;U4juhPTdN9T&}!=*+Ks zmBoH=kUS?u)yExe?6H)g?nGRmzy{Jue$Uu&zvl{2yp2q3WmD2<#%<>*c^lD;rIgoa$R-hdtrH_tUxxrYUTd|pfpWXxf^_`W|*>>I;PfT+*S z4>cJ(Db%n?8@7xa^*iP6-;V37e1}cMu_L+EcW(FPWxnG$i_H5bZTzg&R$muo32ai0 zcm~}NV84j|yCXD0F+88Yh_~ap%7_v!sueQPi(JS($BnCAW}EeWJ2Y2fNT;;fyFFr# z3rwXI`|#Ez%PXz#?^R##3yf*>VSkpW#sBF@{-2Zi_ka45RAeZ(+vrQx2E08o>Z)ph zwWNlqpa?G;pdd6tiBr#bT*5#E%mZgS>m&B2toF8cw4gZx5H7$>!_Hm+Y+K~8`vSDU zXj?t5Tg9N{1@&c7vu{=g!SIX!Q>rR6CzouS*?bN5A6ViaOsvt+R^L0;)0B0K<_|K zQBFolg_#xGvjH`57fES4B!zklv+Qy$_n1uQ;Ze}2d_`kR#cWyBn~QDB{D8-+wZ8c9 z#W2}9*<|Y!<^+EdYzAk_r2F}Qf2lG#T?3xJa_fq(_&%ETB;^Nxx&^JWA+KawFVdd8->?NIk4aK#R*lT9?YE)RtAG$(@IPPqFwT0Cs&r1 z7zz~``bGKfZf4wKWbev$WnS4mJLc=ME${K+-NbYYT%;H{?|tQdbP4-*YEsS&vHYnt zYtnj;YFz!N*2lpGwQ zdBITny>8L>xpQBPAOw7gfKX(1b3WDp)=e0V92^{c0Y=YfCwNj5N|(!G8{6tzfuJ<=bzeX#(BX(D%XQPK%;U zm+*2A^ZNU0i{b+mJR()zyoowC_N7ui=S({8Yl!su4XGAtB-91&zC070Q|)J!x_U^r z()S5bUNZ%SQ+XgotYeY!dIo3$~$*tz9_TYwY}bK0V`khfrvb{ z&`MV;pp*gib7{?}o*hVnf`H_IwWR;23^PUlPYL!noW#_0u@C`)CMH--P*vjI{$PezR)Gdi zyYVs=0R_PaH>4QBn`;;T2#g)~ALCjDFiG<@NimFna)B6A#7 zGwW{~7lT1^=f47F{Q|Q)tc%CByy{H0-r2v5qX>Gm2lKzGSaQgPwFkO1om7{9DD>8U z3p@aWc&2PG)(;ua#%Tk74XXONQ$;3+dL9yd`M$4W@~wW}N}K8a^FpF*CK(CdN-OUE zD=gw1&Xg(!WC{WEKm1lD=X}tX=H!3^(ln}R8#P7ee!7sf!k!C)+&5?LMS3Xu$wUZ) zd86+sJ?43;@9BDIriO+hnF!|)ud?|}h<=BT?+co1AZ{1y#$9?3_@*3k;BZ=94`=Bt zUU%I<-%_j!LM0bG6O@mt{=DQ)>Xzg%w=P^=jEK z{)GcWxyixd*AqE%Q5w@kGsx*Dnt35V*nVC$`S+94Q-D7%-kM95GKhNH<@#w+X?0Z+y;^eTh>i`3xL|2Q;2 z-r(;IH@P8a6Rnr^>#eO|Gw?|vSzyLUUG=uKvUfOsXIwK9i%DmDWI8VECer6^j#&}Q z&0?*IVMv@Hyo-IY4w?g5C)MR=>1Xot*r`WKd9WqS`KyrRp&b6*g=ot)Nq0xVT$)Z} zZXQQ!LC`=jPALb4!p07GInHii_u6;l1QM#W5wRGzJQ1&NxsB96=XogEp^5WV;q-6y z1iS)N2s+rIE%criEn4AocjnIse3bm@yIvtYgnvXVx=Hfcu%-eDzGIBaW* z@Hppie|u1#U19BL`_;oM_oE^Z1}67Q%tRLHz-qN^R3DGtu*1fC1pq<_!FJU_pbRsj zf=H8Pi0kBjC?rfNq{ZpLen{6;j?LILOS0U%TrTO8fYW}8!c4bL@L-{#<~#Nr7rCd9 z>|bR)`T|xDG^~II^!ruzS;OEJM96>LFSlY~;FeKkpxSO_C&eG6xz$P2RGX<#Mxq_c z{G(C*ZkyY@_q;#F-cUxJ2uxOMWnDI4*Bh$JgA{xc*IzQzK``7kK=y;lG~p(DHqX%& zX+DQ9($%vLyN~~+A2jl(;=0SmpEktwD;mwuiC56*#QDN(cH?ZeCpj9K1J@W zt#qJFYSnf(m>Y%dEf%dG(NYj0ZewC+TQ64{)>78GR(Wyr{KoC^B*DUEXqY(M7zErJ zrL1C|;hr#>n!&1O+={7oN>#zY8zoqZVcsQ+%y`Bxz^pdZ?Jh`i= zu-;={epswYIeOv|)#b}J3uF|!kt3HvDYQTVxC>x8yUsPc6hJm-??40r9Ia;ugWgJ@ zW@~*N64(~v7OUzH^T#`UkC+!;i?A06dQkXHZkSdon_|aYF1lkcV1dM1cUj-LIZ9RP zX9MgQTmEbm+f3uAr8M_*v?ckXM6#>=pF`BiMALwV0;A_rUirzr-d;U#e4N%YpIq&i z7wBW$9NcvgWk+6|ZjjB6s9&C4-;e!#Aj&!Mdb1EX=;HoWdFp;r{x% z&AGB|8AqASiu>gk_x4*ULHrz2=N;7mpTFtJI`!pbb?#0UuP12tpj_~a<6(1j2r(yQ zBH!DIw=-(C`&Y}lH&`5GXP01W091R<{Vbg}2AcKDDqQaURC;wpLer(H49a5ED7*Y4 z<`dpSU#2qx(wI<5T3Wb`n zERbNm-8^-n`MuXuvFXWT7B9jLCM&wA%>C6+zqZ%B@-Hb$v;zKqd`e|`p4|)Oc9U~L z_vWDd*LtOHQ0N`t%SkuBXEx~A&}x?L>|Dn%9e17cLLeXVsCVi7!$-T1q=hQODCEO~T( zeMIEog-o|qR@@Y4FUq6^D0r(1V0(P4i<2N1%WuyF-&-IpVwj~){r-cWN`@I` zt|IOkZYAJPHJ|lKjJl2`o3bwFN);)M>&cK+B0}W72O(3bJBq#|ph1}MZ!%aL+^@l7 zTHnQw`NF!9NCCFDnlBl-!%Zr|g#>rm>m<9d;RxJFv=JZaV17}C-;n}h%J1A5e@%`T zoXgMonA0Cm!L|HX3IuUl22*@C3SJm(++c}-`xru%McfGm(^ob*ug1+HDt@9OfM`2Ez@{;&7zkx|0s z6>kTeTpK)6jKOUyQQDvFi^Rs2H6yvY_@A)52~u%iSRc;-gvittuJf6b&gD;`_!$h@7j);pY>Ku~vKDpy zvrnaNg_PV;$;Qyy&USHr{=abEiwb0J2(81MgaodZjt}nAN~@V{6F`ttF2cb^c(7t$%dd@ zfJD;_g6oU@af7sT6yQfW4Hn+WBqBNsDQlC&HqZiD%Jdym-}4>(@lQ*VH#1A{I6l=cVG&pQBHH=sQ zaQ++%Xy3Z-)CjMk$Rng53Q`jCON72hP<8oJE@Ga4`afYZU;*p-@BM@w5Om{mr|XTA zHZ9kKs%lF{&wD>!pIt8pvRno>9vd%3VDN-bIn5%H#|p*(;b1ry&9-!1P(Wv5E8C}C zU;mGg$@Q=Sm*UU|BBgqkAEl(E@gW_VH04I!Or^a8{Xc?07hH_)Cv9s zd`AYYICnPgv-Io(Fc=7Pve0CqA>U2$kK-$E5gAS;{a}SHP^Ir*9oKm{D_PcR&L1EM zffb|+uPJ|-RC2}6Sco?qT-p0ds4 zuI@-M_0Lc6O-j3!^g_3gOS zVI@Bo`Lh)2zV=54}eu{`Omdws$;=aY3J3%`GE*g?#*d6AyJY@Hh6(VK*FaNtGIX~dSs_Xgkdg^) z6_T9#_BYk2gzzSqohsFH7Mw>#&=9>*+J5C{xE}w99Z}W0FW(JHOE9$-L(%vOcdPXm z5f|cd?1`Q9Xz}iZz3p8mM*e{DvA3Of11mhcF08Q20yIPkx~rbvo_gF$1|vORZ|PRm zzV8|Av#kUfV+XN0DxtA=#KN@RnlsG~795#P>UJcAEh?T~e7_6(t;q7vv00KlgCP+d z!6AjB+bv^1l!V$<|0Z8dfY+t@o)vA5jL0f|)&Cirid>-?Qo+ih=Q*7r?P;DgI`B22 z7a~1?osA`y-2VC}6Icd%P6hvfHr#3-$IdUAI9B$v9o}vE_kSBy`rK5?W(kt@R+pu^ zC2y&;yX|l!nyuuVYBi~ql92N{&B3a5h3+BH`N+TOJY4&qNyz>Q8gR0fbKMaEhu?oh z;=U{WbRtkfS4xx8v~e4`Xx-Iiw`cbp{$XXEZREL7Sw~x{TE}2FU9-=|)-;OwGtEe? z{gvB8@&93({imTL&hu5fdiT|Kjs57_kj3Q@RP5R26O&PVKnE?8)GtB!k(vKfB|tj;V5ev7+8OG@~$6-Q-(NTrt(n_t92 zmRvQA<-%9s&a*UYwAxrkia5(QLj;1?%zFiYBlH zU~!T$)m$FCb@J@D(u?VW9Y?{8fb{R(eP~(i81b>2Eq_Q`)V6ATdvf9S6&;D@eSy$w z^%>G!IYDu#eQ1ZC9uf+bQ`y(v)hJLEVs7#9%66W^DEl)mh6cMKpDV2nvaqQ>7|_sd z8&7$RMb;?;DhRE%YQCvHlMoIc7||+vCS-c{bM9w+9?VvDyt;4vsrwMDTV)W9y(635 zm9b&TEed--tM;jD23w;D2IFUt@8UMuROuRoNRN<&2jThYGx1-HZrx^x()rVg0&aT} z*mBDnVm?NbeI23lZ1cNnLxGn*gO|%nXQqoa-#QCAR+!#bmjNyvQL;HEB`U~)y>-3u zh+Ha)*%cm(n|N`RgcqCO$mOscvI6IeRljy}Cl#4{n3vZIL29dd0jE>)S9I@AuqHaZagcXZ4kdU zHcV5-AmR4MEEmpfncFTx*2gZ=SN{9D@V-QdBv+@7!L|y$$ZzZzebF-(MJ`ix_NN_x zfuVXpuK7*nzy9r4mvcYyo~M3y@xGm1)~sQZ+A^+K&ibn;BdyC?X5E+9m@wao8MNck z{O{$GK3x_^L<-tl*Fjbihxp$na;AAp3#R4HG!5~sWm@W8i=tdE8_?v{tq(Js$6iIg z^)Eg8demaX*MAw+!F{!QzagB;`aMKNc~Ll<#kJ)F3Mf*iFk*E8Z4+~*ZN4&7uBDX| zhL*GlOWH9H&YsHc^>gzp853eMK`cyAIdFytGNsh{Q6B*@KCg_MxBTmC50|!knSZIB zd`cEGXykk*pd%TJU2@O*mEovwe@qDrj^O<}>=>DRKIpod(oGNALKn}#8f&AR&O@*r z)&%7)Tb^Q1GJs97GuRAj-=V&anDHBcr?a6(grSnRS!vDK@%}UxU4k{EtA6wnlEVJl zQ}`c|%735w+&5ic)(SC5JFHKG#CQ^`vSzTx^hxtW3BQez{g>nDNsVFr4w^4U%3q*U)*VYW>2%~eURR+vJ1Peb*$BRx{aO^$bdy8tYf2EHX_az>{4*cDhT-ZX80r~Cyn7J6?FpU1735VCU$*0;WT0Sk(jm;X+y-c96tidC z>naDkYjaYivwL4lELP_qptrtH!Rl9}VPFGO_7DAt-KUHbSmaD>h+~GKEcA=wOJ%0?v7z*sH@$YC)K`J(y3{kdr zf=IuD=Q{pmB+Ijh7|ysJ2(+?BGLY6yPrfjvO@KI}GsMy&cdfH%AfGpx<;|pAu94Qg zSmxFav6|#xedg$YH4YZGiSwX%U1E|~@|#@6IVmeAHyfSgn@vkI{X4E>^mi(!pmS04 zVC(_b`1+B} zRzMFb%CxIvHDyo?kUb~GnL0->_k|}gWepi{vEjz!3KN4o`AZ(Pc~`vZLou^xg^eOB|*XSE&-@&#LUja|V;A`#sJD8>i9^C8M^0;0` z%2XU0q`JrxL>)xabz;ee`3=V!2Ig(u;6`Vh?-W%oibnv3DIuw{>-WW0txG=wBj64^hvK{-y~GUZ_2WMJ#{(Gh@s^WU(qtY zqW1h+El|sT)q+28{Cq>YGLH5#a)9_aPw^~cbtULWwRw^Q(&j84ZRxIlnWf_5Z48p9 z-FMjM3$)^pTjrNT-QMUn1EU)Hd0^xeAr+;2P};fq9fV^h$de2^_$ z8PY1u{qqi|s?}04zX~yb=wV?~9Wv;rpp4o3- zMjsYJjT@5o;uz*FZlGy}T!|e2o)>7n-g}$N^KaZdy;Z?KnxpX~n0?ALqy|Q_=7+x+ zc#U|u&$EDC98~a+le0?~Gorm7ug%a)Kyk$Iz`YYU`qm zTTG3+ne?GG^siTFv2)WgV}xj|^E9>g5u1;NQLOCI z{75#sFGI33o?(t6se^_bM`nTB>aezPAtMicO7Rr(boec=n;T|E`~{<3VNdJR1G)~Z z0VbA4E5Ori{e8vntkLL~aGK?5IiTI5;^HdTinZCK%}E5KD)f=-va$Rl4wzXJ=30r#{bIsx&#-1=qN$OP=waOhL3 z)3VA&q?5eqXw~<%et7it7tcox_HSIvgvrC7xr=mh{;fR(Q;S(VpD3vt8HjM|(CYnGZi{sCH&IVk{pVJ+fyhaAQzVeFidDHQs-u6si)6{}H_p=+zObiseeu`{HPRJctN@r) zsDB)X0m9&2cz58Y5`H1RcSwSiK`y8;fkZI)-AT%z+p{KSU9hN>GqcX7QNG*fOD#vO+Hq5of1Kqi4d%M#>x~;_rM#d(n3;EBk_R>0)}B!v z7db1+pRxilO_(u}kw*Qkg?_vnECK@E)~mX>7sqDJwwcZoxY3U>Z@4N zBqI)plTFEt$+_t79NOPjB7A5>KhH^g4S$CT)*ZV+^C;@OdldR$c4;S*&D^7V4+ld% zF1_4ap=_9GEv_}E2wU`ha7fQ@|2)th$qBDwv4mi%O`?G}w~vkBBBGmeZF{WrvEtkL z;!K)>f9ZoZ$BYpRy#Ui`nwqJfbL9RTSIQLT@hU$7fhv_Xemc@}+Fyex&pKCmcknhp z4QZNW)4tPO4{FYOBTK_MvtiDmasi)SW{+^x_h`Q%KoV18@Vg0FJ}+?_{#_SX8I58i zg$CF!VGk*_keg&V`3*Rt)layM-yP+(w8Tu9E-ISAqy7nvQWOcZ5qkCtw_Ty2H zd0)q`>^!gxFP6?CwN!}_OC5n6;Uh_mC5M&V9x=7%0WB!k@_$$W8akhJnZn2R=a6RZ zywXaaEX|rRNiJGi=~2KYONX5AcFq=B^(L>Mgr0|BL>d|e=dKIB5R8D* zynrT3@__I>Lsf0Y?Jv*8iqHrY@L*p^Q5}o~A~=Z&BD9nYkHXd>$BUARrO(?eEnbL_ z?A~<{`!}uya`c zC3#QkuJH)q+nkvZQQCJ{*ufMEnn$g%io6wOlR0ybf-a8=h#v&hzP>_P2fY?tQb7@^ z2@S(+!Ug`ISJ;rH>hYjzHVya2e_c~~^{>+>O%h_}-18^Rpk_yTquUEn0L7VWg~y(o|(%g&?^d zg2?wde82BkWuOnMs;a7qqMLv8gv(AGng?W7zvoYr?`JFe(@B=*y2cdVJ0(TG2CwpB zfu7)y1kb&kLFj#G0oCNqQQZ`UY0to245}oKB-5-RDcxOH6e+IZ;P8UW5kpMqZ0IIi zspvHunBMy|LgTg{vS}5IC{Z3dG(IBC5?6Sex17QQ>U`ODI&xxikV-?4rwNs!FIVMV zNp0ui&+k_>`2rIDu|Q%-mk=OvRXg`D_{(J@xVEnh5Sc!rthi$uk!Me!D?%up9hXK8 z0#l=t46I=rb8Wr8(>0xrZwt9`C*BSlhA4(EAO!{t((v6mJq6Y0mYt8!Mg?OZhR@=) zpRl5SQ^zq=ueLVE)24G)@KGRbSi`}5YJjk1zGX|&WASIdQt3v?sa3i2fZ?diqt@tJ za|0PhEi7#Sdzpq!FZk-)HM7~J5j24ht$-P-fz4R7_kF4B!wS|~v&o`1Y0~6(>EFmX zDr>S$0MbU}QfaE61D*>@c7sr(b$BMedezJHpX=o?F7p{7lD{aivB#8lEZ8O{Rx;8o zwQ9pY7&*aA87PHq$*2);v!ar1UJ~w4HjDLDPIQfOzmr*=6PG6{~ z{!#@!R_d9ovOTsGm}U4Ze+yk-W5P;4|65*H7T}>M@ey&EBs2ha{tcZPB1n<K+_8T35*!^}$O(-$DpzqBs4F+u~{m?o(S_CkO6hr1zq=?Seh1GPE4$I^L?VRly_ z4e5~@^mXEDR0DaTAqubQq_sQR-ni)d&13PDCXp6?Vg;wdyv=`fytNiIfv-WKY+BJZ zT)r-UkCiNqIPfl~1tLR|LV-wDJZFo~IcR(Ytiq~b6&1(-8m(k4MQfXSYk8D;Ys6rH zo3pdAt9$cW`H*pH?%(tnUYpQ{r6?CuKm!chnU$S=`zr{sulTXf1bDGMkr9zYi%ZE zMd56q+?Lix8c{=E9M3*>v8b?N4VALW!EcEqu(yLvK8NVai!8DertA8M1MUpWL79~0 z08&Bn4;VvaebW;+l@Er82(c!Ux^NL-EPjo9;-0_RwpaPbBq$r){}@oQ1>#(!J=Wpfgiz2l?*l%J_qc|m9U+{aA)`EZhN16 z0ZOzCA~P-cl=-ZDtPS-^mg7@bLMgMrvrd+vz);`zqqp%8Y5DVT3tT$TF9=hjcK)c zMzfBE!0MM_Jyq}Q!8~(qdVluyHOEye9~f_1F{MwDYYMY?2v=(#S9S1)U0~K2;X#qt zWEb!fxoBC#>u#Y5uNQ#W-gEwZ?#*m}fONI#zfTTr%m?+~fl&(}L0hCcKuCyI7wUdq zfiJAi8zgUVgxg7tSws@URhd%*wp0vD@#-RG3MmMw)N~9}ymEcguJ3Wg?{i;K~k)ZEnGI0+y=K*Nco8m%+-3@K_CHx}G~;jj~rPID6uWA$4KU zkJLNUbmg;?RWO7MQRSm0`P; z&m6vh6D3~GD1*|?=}W@@Mw<-Bc#l$UbjC=?$uQ0sWzi2*)A5oucy~aK($fAu+In|Z z%)fqln(A_s?gPK&V37XGN+EMIwsOo|M890xarsP@rI1FF8<94Gh2sZb#OdU$trUUu zNw|;c1e%K8VMqU)k=;?OV@|MxeLF@9l5=f+r{-x4r!MO(n#80}GmkZ4oi56OMf@RWo zZg#7>>p{Xd`;mGQK2KEs(IEyPyXh9)VA=Ix=IBY1D?tqtMI2M>zfD|PS?=mV^^e}^qJlA9h5WO zYwfZ&*_@DdKOu4@tgO8LFo|%5V-bh#5#IC8YatDR?HAf?Id4fXbN2WZX({4=lwh$vTzzQoY_=b$Zr~|U*of6& zhT7V8$i0BWSTl`)TLySwO~|0QwG)Tj*g$@{W@j`CL{hng<;ejLekC+=> zzi49+sf5y&-cty-%ku@+3?$9x4gQYBh*Fd$7@o0tD{aNLREf(Z-QrR=G@YOuO)ZS7 z{pH(HOgq0!iwa~H2Cy9+Ej)}`bl?<8P9x)WpI#S>rY^cW@*U)y>V0oS!6~4q3#=_! z3~d&BfB1govORnB4Dt@E?L3ab3>O^6|FvS-}S5N zs0zQ5SC8P5u+jV8l@2VcxSV}Iy!Zla-}l!Z`!)(3dtX z18X7WV4bOY0)C1wp(-HBQKRTiIh))k3F|wj*&`v{Je`*;rsnoP$gL9dJon@XEEe8_ znM1@L0#F>g(f=J@2eSq*X=59&Ik`UG2m>4jTDaLfdYa?qcuy_RjLIrEF|7Rc;X?mC zG9j}@vwqaUE-mPrgu#vs*e5FLD#JeIm8^X*vEHWnjJSs-gKq1N^hR9Q9ce@} zdc{&^Y}h7*aQx>*o&4$keR-Zb`H}<}nmvAAr-r|qbRbDPXJS7gb!NrEO4`6nyo^n_ zGjFOYs|kL(uyp`pTa&d0p^wZSPW}+l3d+*ccMe)lQ&tN$bnO7?HjY3+hgjdIu^lDX zwj5}drq5eabve2-e%rxkX9MGhw&O;WAP5XD(3F3cMw63)BcUpb55kqK;ulko|HdQq zc@;#~)LH2ZL&oaPma|Gea`UU<^YsN+BOCU$huq;*cn)h51)F_nm9}aj38-nLmwe1c zIO_w_J>JSNpK{^jhvhJ`aoejv2#PK)n7C?QPEk=+YjGVS&BBs0EQcnS$lwGH$^U*^ zuAg_SP`nIEpztbWs9(8=~P^G zkE?h^Tn_B4uoK>KmDpq9-RN~Qd@EdidrR*m=7)~p|HH(puGAN3KI`aE(`QN#Q)qd9 z>PGN{=*4o&ejMTZ%=uo`7}C{%M)L+kg{(xB-({mj1kvCcZ?_Hd6!RCB{9sR|0is`^ z%WVbZ^!z~&-xe4`X(S>xg{%2E-`}jA^xn2wSf3B{JmH8G=*n+W7*z=eFZL{?)p34E z`HR?avm>R|V4VsgCpdU}f2ifYX!lHp?A*rEvop}ZWCDS_gCs1RphUM`XId{ri3Gc( zFqb+rodWbI07An9Hczi9Al*-*1$7vIm>-oK$GkAURq|*sV+oH5?AdLjuAQ;;>dv>1 z=Btvp8KIi8seOjn{5V=85g4wY8#q{P=-cRE2#p89GiBxdQP0VX@9SKx;!R@K#AP`U zlIH195cGMS5`xHr#eWK7hH74M2|Wv9VRlXoFpdWyB#7M>>8eylZEE+9RC~H)0Mw~Y+aT+FZfvN#h909D$#-Kjk+pJrc z723_0N+d0J6|_taa>j+%aEc~WY=IgM4X74r&UfX!;+8MKo`~l0UJy8=M(j_2*S|20 ztn~?1$HS(O?ZT@!V^0ttSs|pqBhVfYv6FJp`4O9RoeKRtQ*}x-AHK|KhX79nk<*r zUH{kMduWj6dQs0aYoueetp9AyURulJG~Cl)^#&)<)mri8AU>M;MY0r&sRy{iTp@Oz zvzz}})G8f6$8Sq3+3lYUlcw3e%CbaK#Z-OT=8L{JxrEk;j5Q87>p$AN72q7cb5f zsLmP!bYTvH4yv~Hm~e$ zZ~dgsw2vN9kwdq#Qfh{l@l7?k4GZzOg-U)ezdD$>EE0oeLcvGa(FQVd0ZjpH@IJT2a)WHZqmQk7Bu#XA09hYh7hSo9 zSp2-eDt4VRfj`jC(w(L-wKt(um>As{qmpCw=88HXdrmv9@I$f7CuTUt;WX%{?!_ex z|9|2HgVx7s3`0^XA$GPVd7z2P?$z`)HCg5XdU-w0sZSJ5WOdC~FO#PYV0AHDj;M61 z0UQlvEO$@g{vOXYur$9`=IwS*yI8p~XM3I*=+f2U_K<{yIwKjj|8B5`5yYJ8k@Vz+ zq}Y?%yJ`RZ=eNfHv^oCs*d-mfpst4tCHgko7|N@>cdV~D9*6mt&W^GFL8opCMsUTYbc{`|P38fpry_Cw7&LjY5_@L6AF(_-Pa^v+zhUJVALp)79b)s^MB zuwL8g6a7h7)<~JOL$+VvU_VR!qEgK|XP^kVi_?$50rpP^n*@xkV!BB8X+4u|bh6`F z#Z5UywMh$17uaL0L2bACU4Md#+zpX9an^|hHDTHoJy{EuG~!2r-veGWG8&!&{irC4 zb_}HHXn9SQ-$w+-qbJ{Ub7m3=&v&J*-(M_w8dZjYlvjKUIKb|=EyM6B@GMd|N{OT@ zE5haod+ciE&D-rF*>E}X<(Q-Iy?K?imT!Lsni4%so0=^QRDe zoOeT#pU3*gzXaUyc>Gm*8&*_>CM6?y56N5_sj=8+ZFA*huJ^f zd07A-;O%w-s2-sbc@bsXsUYu7qyx!P$D2Y$JZsQPLSu4E0{TCym5Y$e-%D6;cXa)W1|Ntfe0ZFS34 z=7cDz#<6aA9KJtPJM!1}vbTSjh$}rh{Hzo>VDT&EL59Fi#6Wu`h{=OH%AFzwzRim( z_m%j#;}0*XcP><}1s%>d(b&OnWxps2&}p-5R_Qg4J;l=T7-vQ4>$KYoWJxOaxR8O2 zdBDciZ;zjFP)DMabW|Y!@g8_3Bp~6i^Xho736+7`-hyG{mBetV-1p-Gk?IOY7tQUK zg{>CPt90dgo_6I452Y9rH(${~-@R&e!M$LA(NC?&_AlQXo!E2_aDMEI^Mvdwc|NJBTkn$H9!rqbr<7ejCek^v&xvC=`4Ve9T6M6JNa z$&o1hhb-loDtCn64R%nHdKg+-`*vO0Sw!7sxn=~{(b*?7|CRatCppn)r$PppwSNzS0WCvq@dsJ8fHc*myXqhm-NF_N!k}e? zuH7JfuF3^+F%OiaVf6_;s5G*Z z`bkymhq53Mx4CV9R#m-u%lAOPqd%I4?jxBv{&)t{e_+ft{QQHnC)&VHgzpAF#N9W zLljLlhDyp#o@)e$;}e%^j?#$4W5aY|a=Iu`^mezd{$lcfS>_T}H!AAS&tUZN4=9I( za|;{~p*r%w$wvKjSJQqwCj{SOUDmiH|6qFm@>dMup<^n4TKxpGT-124)4=TIVn=3Y zHAexFc}W2dvxyp0q}+hjgqZc$nU&#b{8FD z2&dtT*nGtlY&(NF`yg!-S`lgwKma+@7Vm9 zEFq^8k5y&6?7dI_MWY#l7r424R<1t{7O=$(v$6Uoe+wgq-y$w4npfwP{7ATK1j6sc zDJ3kc8;Oqq?5eY(sBD-{TQ@Kud2-c*BDzkJT;ldYh_Pw}CQ>a=U3))PT|E+2awzm1 zdbr=;U)&1)&nvw?ZYj-Sv4ozDC?7;=RMhGeeIpc;AOxz+uW}t-GZLQYD&3kNUj@Mi zuXupXyKBl~@1+{hB``&bx-$QeKjO@RV!OMl-Q4Lk48O2{)**L0~$$xv) z!obmXT2d9$!ca+GC4F}#8TGILebEcac#n<3Au`EG%VqQh6jXHfcSRD;&QBc)^cTvO z7v`5mLz|nTV#sLaI8dVm$>MpRa=>}jgx3v9wWKyD0|GJfIBIz{PlH8AK>gTUP zDnF;7;SY%PZ(DT$%jGp#YXZuz4S4!yhp*b{X!i4BK?3-O9KU>z#h{U8rTNk^e(p zjec*BEpmn!p@}!ZP(a4mnK#!w=X^;bL`$16TfwoZ1n zv@WUOoLCtSECiEI!X$~8!rdK=MY@&%5HTzjEQj1u zL-6EMhFgb=yn;PpG{Q!VlPjM*CNKvOp`iqHvQUb>@s8X*zm0ILzx;HI)2VD8Dy2v3 zGf_{?x1HuaTfm^v+Z5;r_Tfyj7o_>cm56V^MU8@?GxDxJ*vTbFl!&NF!5M9x(w}%LJvJ$T+JT({4TQ&KWgT0N zBJL}J@w=za@~hKN`MuZuQ^|?BLzA}~$#Yz5^Xjnm*+ie07pPPo=_?(zxstzcvU=e} z|GrXQ+yresn6BqPNKrlm3Mfv}(9+t{cfT$nAi%%B+6X_;cY@4TBG<<jiyWpk8{(yCdkNUwCm#)%isIZ zNY7Gg`Jp~kWHwY1`Q?CmW`*SOIwNd8a4k}1ocw1i{#jlo|5qpW?eZY44W)KKmZSUm zbbUoZ#|voJD&UNLG#Gj`A3$KfQCjPMG!yp6ej#xBOs@Ibko2(5-b;Hq9YT9Q zQh`KeFe!qnUpR*P+W*yneIRK_aL?nXDa~}Ft3%Jk+mS0*am$d9+p`gEd41yzv!Et$ z>z8!L$;XlU|6{hr3K$w-028iA0NdqFKP?friNPW0D-Y>uPy`Pkg0$JKJRx1>BpfAN zEsG8CTZ=5m;4uusUV3*MkDoVvU!~A!$@2^m5cNa;(n16i z#mukJbiJJD;k$q%oDY#bMsm)aD0!Bw^bTm*d4{MCg}QX4w5!?+d5gl|-*; z{~n1b>k&bV!!_ez68pteiOl0SS~r%>-kgQ6?|(PD%Mv$_crP!(vCZ8x%=U<= zZh@$+Yi^DkEzWs*fQFsx1-$Uc5z1v?-!Kjf8b1|>$lp09(F<1H*)Q`qEC3u0-$I|^ zfo3BhG>A^cx;0%e!PVfAC9zKus+#etGEU%%GZpWs53*o2L@(e<)k7xum&6d(2}2Pc zUa+y(k-9ER7~akwh1BEH;Bz#zqFv)!5!^@iVZY$!O)vN4hoSK`6Mz#Z8g#@!vOwvO z{>{XLlHfV-Q|H@ARLx?lD?)z5$hQn=GdP)J!m9c!^^@|v)}G^{22c?_9bEi$6vAv! ztA#WAXU4C+r{V}n=WR<~cPZ?-sA!=BUo@VpZ}$OKZx&uB=?Ts%p989_t*;Z7#4p0oa4yL}z9V2uTJsZ;lQ}=m?RH#C3<0P-sidF;(z+_2M2O;$GM zE4ju&)jywN-*LjfpN&iJP>Dd)xrS_p5_KepO2!N1iM_E5Rw}{3 z2Ft>h;V$fh%ndu=ldrSOfjzSyRujm%QE+*GMy4sas#|7Ylrv*viU9aqxBxxUbzMVP zmeL@1TG1sE@g)^o&N-_2jjZZ%YuQWMDM%7Zm}))1rdFR1^Ck#T&1eto98uM=4&9ff zdq)wv5hFa)Hs@Y%8W(ycjSB_v2JI%K;jTYNHyBJ(EO@<Mse$*CwFv3!krCfTg15z8b#8<-wd33&U!|)6VQjG$m}6uX!8LjNDUha$SV3 zBY0Ar*jYV|(ie>Vy8Kex9yVs-^Ri#8+JBk(3FZKS=WiPi9yVS@w2g+12T>mce8ByahkUFA=QB&Ki({=SrdS@bE(Os{k1yC_wVOtf^hlNqSU!T4Z-8BS7ALMA4Kp~)&eW%${Ggn{*-qOp?bSjS2!ix(4B z)oGHp_x^@DDVn}bqHAo%_LCwn&Y0fK;@#%+fOr%*+VzaG8dBq=aqrk&4n41vf7H{f z__s&My5Q|sqvO&qoI%LPU-mpCTdLE7L@!6DDC4jsfbCNF6>mP_Qr5Af9pha{m^)~y`aywsI%_~Xu{X_M1=Nyrr# zbq7}?2MgPlGjr~EZ5?4T!4oCv>^7lA1H#_&HTCsd^R>1aO+_zD*RoDI(>XJfkL_!X zmfKB4F)m`Oh%=61?*E%tEbvz7H)-{|H|+u)n7Z!V<}EYjc)Ky^ES48^xT`moz`f># zyM-)uib*Q8f}MzDqT@8n_HjAUR@F9E+2zW#&f3Ri&bSXqN~gpd%`Ew?F`_NY{Db1Q zA$;J=&VjSR^R%#lg#~Qm`m;$QtdQYwpi^0nGaN#uB<+Sa-j2uSrND#OP*jVTBCExt zT6Yzoeb%vx8n}sbK66lz6>RbbkLJS0=kJF=G^^mr{?w$C@sz}Kj?vNyMXsyQdgo17 z{Z=s9J{zGeBdknIr&aZ1J@%5OTlu6q@jN9NN~R#5nFczAQEv~T!1_fxJWx(p*43^t zAlpbxJx{FMCe&GG@ph>~Lr2H__a0}kRM5O`71~$e*c!gS)WmsbaTL#F!8ga?=6qBP zwDFbixgKmH?!ayjDxCEn3-wq`AU_Lud)FS#cbO%?qSn_Ddg~_lXbXZxlJk>5Qn~IN zq(uO^0D-oSZLV!l3Z}Ra$smW-fQ^%Ow6>GOFDBmo`m;Tc(}_kM7B_7e5^QZ}Cn{m-(U9SDDTz$sEL0>ilqq^3vB`S)_&fy z((5d@m46An^YA8Y5!*C?6X$Tw2|AnlY!)_*=h;eshuzURc6i2`oFXbxrp#p6y?eN< zWzJ5i3$S}myQv4*&_0>mR*21S$O!;Te?W{Mc3Jp+H0H){S^S3ybQ@G!->c1=Tv}O> zAHWxAuF4YXyy3tzAP6~+rFdUCU{w&{WAV$bIp0wq;TwlZL?0vZvJBF5f1#KUZ@(T% zn+GjuR4S(FjNTq;M2UXq9T`hcR%SC*@8KT)K{(%+qeK@UnjA+5>gDLCY>11uM##>@ zpVxPr|3K7X``>|g5B|@2Yghm!hgiT=RDS%OYx@3pBHxrhTC9U(1Yl z4;9NZ%LlW~C6OK;xSKl%0IBt;e=^8|p@v)*S4jN3)siT*a?R+A?Q}I&Z+H8)_eXIZ zvr=3e<`?`As!DKplIf!mk=kBE%rCtaZH)EcGmJ!JYT4kgT+4Bn!6h)^olfI0geOvm zUzl&<$abR#2&{kUTSTDBmT-Ip?z5TQ#ZimYO~Vv*EUq@y*Z z%IKbMEp%$-$iv4%w)g!2DwgK>eZLY7=x&h~+sb3{PYe5_(j(bls)&K}zN;}4ihy0wZCfvzMAiI9c`QRgW zgSlA~@s+(>P%2lgf%ar>!ZdJ(glyD~nWk8jc1#m;_-A8X&Qv43?%36lfrf$2LH4syNoHx3(gq`A@&^gbDjIm!Wlf<9?EB*=@~%LX4YI#HayECX!pGFtD(v z`SO0yfUi~t&#%QQhpcSu)HtVXApQ1 zX!H(p%pV)C@51x|`}Z#UC0kUNW!MR!Ad6u>%mDP0vBoz0#(k>_x87-26f{nge~W7H zhvWCtGs0UNN_QqF8-9i@HTpU&$3|s#x`yg?C=Ji4AsYHCcyRg2Dwhphs40bx@gKj0 zON>VF;DZg%teKtpoB~J}JaH4>P%OLK^G#lpIMeST6BgXxRN!rq5XL^+@1-u0kJ8v&P&`hc3lbdtN1EuJ|U*mdx}e zl)z3Oi$Q_B<&G?@U=f^?)H#T;(LUq3WpJZ6NRXfvqB#cWLT7j7RW+Z4bW+u=_P~?N zT!(ws*8AYPeywmkM)%&1$`Sgz-}OX#nRCb!eYKRNN}iZjY>PpuKo^2U1{?WORNJ~A z7Fa*J*)lV;A{8Dxr=iE6^-6o^Aw4!gQ5oslIa*5@AM0LWAxQf0^rc68U6AL8U@x6c zXXEIz7mZCcC%^pGK*R~tBu@;iC))2jlxTc&z^Z}UUS-FP+VftQ4>!)g%M1Z~dh@o> z>M(+>_UsBD41NA%rZ}d6X)pWORN z?v{TW6uD!VisI>pLU4X3p2r??tV_b_^?UMdHh4QIV}e_P;T7X|Z2SKZFw!-0LGQDq z8YKw-T}1&{k0Lom`_sj=3V&-lr%${*=Rpx2 zgfo1lxkRg=uc&j}^G|$0$)k$!krl$<&)p42erRCZU~!2frpl0wRcOvrM`)>o*vXWg zKbH6bIjQVHw~riBEXO*Tf_7ex=4T#Vm=uC-F73ZrOt0M>@+mX^e*uZR1>b*6riKTC zNWp-vw$ytN6I;U0n_em{eUoZGtzwL}v-NNF*;$ZpD>9e(bOGg83EE&lO8|W+!y!B* z)i4w~tH_its9R&C^QvGae3WUQ!i?iM>TE?5P=9Lt!Zb`l(VQkLGA;v1&JC>=T5X$4 z+@TcOb%p$ViLAnoswKYv1^o8xgqDnpVeFI@V})!;r_y4k%Nxf3xGKb%<+(?qk%pO?^PojeZ0g0G<>Q}zrl>xl7y9D zIwFFRMQ`CdPMY~R=4a?L$^f+*<+{d6Bm0_CFR(h`nn(i^^N(YcNlnM|7Gb$>ueip> zs!hY(od6*N(y|+*^`WV8AhcSacX3QZ)g=U^JmqfZUhu+FGlr@ce&6Ajtmia<1)kb- z9++^+@sA&*Fl2xMaGy7MoFBrV)eb{*2d?r=@MIZolHJG|ryI#+E+G|2e&KyJxUqBL zA>`M7o~4j8rHS|!Z5s=H@F>W6xTdO(2@YC& zHb&WGX|^~P>f}5)#Bb&P_+7(sr{f$EY_|^Xt~}td_+rXE=5Lo&WB2-^kPKS_u7!t- z#$*NMb<4?c(Qb;|sX8KFuBUr%{?k9V*fv>dxImrPS!W1@-^8(%;78aVq=vu?EK^%c@ z2e$3L!C569?g2fbGMf;n?`yjq8UsA&tUBs?QKPSDyCYh@CO2wqBoAwzD<8_5cbay^ zEYmlOnmwD~u4yl7{ZRyYI zTi^BPv7L-~E&w?qgz|Z+NSpjq+@?(ems2~(H>ScGx{gIn4H}-FfmY58KSUU=epgQA zR<7sd1?@8-_k4Anu7idx-UZnV*gj=exE%;)4Fk~{C{kyaw_q=n){*KWhMmBNQu6&O zC1~AxuZ08QJvk9Ys+n*+nzuF~zE?yPc7pnc0x>r1BxC}z1{c*dV`}=SRPo7c&rjUt zp0z(h20syXSFFbwZbhT9q1^DR5ueQ`Ts(QzbZfs$0i7Pi+BD1Uq@DjCmgTTHE3H%(zha^34=2~HRy!@J;sVo=6|qe8(V zLn+&wj0b4q(GB!`szs*eJD@ScxiX$5t4@}WUf=tnee@M~#RA`hwEqyOn(vcKa1Nq2 zl5FDm!7Z}TX;`v!G*Zt@qSQ89DdTmr!P3Uv5&yEmR{5`5&?xXSztu5ts9xnjMMZEj z_AieQ?*GM3gd~AVq%y)i`=B6cc4? z5U8iBODR&B?Y7wfkcDF2H(|`T7H^x@q|%~+oJ8i6A#mNY<-I@lNe)x7YY90ZPHqF% zQ&2IWs>YJ$>3raRF`=0sNkG<`_2ZCU+~6JacB2NY?f`0OK33ScGGqSG>23ZT>!GY; zv1WrUokIt(3ou5A*p7_Nlw{x;93Qw+osKMzb1kmhsqc+*`OXuG(UXn!I*it`AedxP zaaqt~Hl_JomLJ&Sdi2WDtZX{0nk!V*_~J}7*$Q?vx{2d%N&vx$h76wr@Rw8EZF9T_P~gU*EisbsVkj-&dICYbw%6 z6x=NYq>tJ-gSF8_-eTl)eDvEM0};<|zZ;xrWKFw#gT!NQ+t=PkhZ>p45I_7yUd|Hc zp+9D_KeBxt@BiXXF3)AsCEmzA9NOk@*3UWihgN`!8V8{I1)aUq30%^Ly#E}Ltscz1 zY`(A&1DVLV^@`Zl`u1L2TXz*|A0&1MDYK3LRmd)DxzP~a7tM$erRI2lNkd#EiirvBAZ)Zv^oczq`UG~QWuuX4A$^^>@%Cl_nn`6wc z%wqU7@)}s~-wGT0$o!a_!t6c}Z&H`R%A{i^s?Zjfx~hq07gDo-nKnUP4}!1h7Udg0FlwXTC3acQE5Mbl|s^qyqru(7Vx<8o568d(NYiHTq;XHADSxkXP+R_GYO!RFfzJoVf#?2MmKi7I ztRloGaDi)mI;K_M1vPbSEoW1~N+?G{w-P3-r5v2-v95PV(i*Mj>!y#UxCQt#lX=uf`vn&Rt_F4;G0fIgAK633 z0`R5~g_3-=Zf4Yk>>P-O9&9c11ek|QY(aJN19{Gat0cFBBV2CeOGO3EK;mjtGnC zNfjW4B21kBw{G>T7c+JHeW^}c>qBnKN?8LVQ_0#0Kd%b(tV~%;(sxO;oq(W@k4>hy zWgILtEgCX9>#E9ygPXRP7^lmAcYVqKzw5bM(0|r*LJ36sC1(Vi_78d-Cp>SFGs=Ap zEglbdY;gb~AxTYZscH|NjkvOihjAH&-!_Qo;XAkw+ODLwB%wj^&$jh}=m-ydxG|Nh zZW1-t@ES=KTbKwFLqTUkq3z+%>ZB_HEqiwe;XEiUgwUcAr1LCgq_>kWa>!4_Piy%bYm)eA1 zoN4AJnV*ox@)*BDOVCTX6Pk~JuFywA#kUdAn{01zpDV)B})QU;w#nEr|R5hIhybl1esXx2h&t{L}A$O zhv0Z!a(Q2bv(#g~J|n%lnqKp-3(I_o38l!BLz0yJaM=Q1{|+vx~T#%4D4``s0Xc`_R@dW2z| zWqS@}>3MQ~Y*I7{JG~Ki)nD%e$NoZ;P=R?PIMpaM7^btk(2}`cgo?NX>TZU(KLrbqh%I#nCZ=2Ux zd)Fj{3jg-E$>(fqTn0xao{XeYl(9N zmnT)wvP9N1F)VyrP{T7y)DQ}pm}6n@H)XSo-Mcwwkf z516Vb^0(;MKY8q>8q%|^qZYG5*>#7wZp_n&w0k8w`VYWN6fdbEZ)QoCN9$;P%&4pG@t;s`e) zY<;gVlVv8V`({Jx(_Iz4vHW}A&f)({|4IZH7&v@+6=X(<9_#+Xe@T?Ae(T1X`nl2>}ppsiH2 z0WqW1XK`kk*H|*;WGZka2jW_Vo;_@T+%{bGDISnR#5Lk*JRM87{^~*6-%)fTm5eq zfY0qP6QR3SiT__j=C)0yDYQ${<>VA{0SY2{7jZFhhomziFqn1+l6w$%S2+aq7 zhyX#63o?wwg7G*sV zeqi>isPp6UhIeFMlC}3NFF#jAf;w3EY-*S?_!lDO;l%;Y^ex{t5v3ETIt7rhi22ic zu`_B*g?T&zyEj&H>cdEML-+K1v{kKb*U9_v2akQ&=gdrWG>z79n(io&-G%}+L>?)W zcW1vGrmiq#Lm7JCxgPv6ekP4)EGhYLGL%r;-F@(1Fk;}on=)5c==@kE*Ys(~2=g;K zRaA9vn9F-ns?#KKF6pn%wy^_{ZNJ}izDvK{7`%7;Zr%I*?oJc(&hFS_qA_%or={Kg z7iULN#N?RFKwnlK1gd)2wENih)&7ehz1o(Cr>^gT7WT@n}kcIEik!Re2?<7|+E1Eb%p`F{NzO50LOov3nf8?LXb-`Yu z{m#s$yPW)MTOppw;29=jM@S+ zpBzV4cbi^W$5wm9e_VAr8?-bLfEgL7i8t|!&36FA|AhYbpqAh4FF#x!Z=5-Sr7AJ< zsV_RSMP?Xxj}RGByl+k=y;$OYP3016u9;rWxYg8Pp7#y-rOolRST^YBET+Gax!4>6 zFs%LCRc^%Xt`}*ud3zGpR|P(aMn=~sr=s(L`DNE^;k^334a&ygxx(H!PtR@-L__L% z+37Z(V}E5)7H#jla)kAZeWs~B%Sw1-tG((;)E92i?)oMBvF0hEP31BlDKu5}Gr>po z4e~cqgT?7|1!<0+f|ZndOkLd->ICj9HJ5FyV>Yptt;)7T8C&v<7`bDpED_V_Q&i`TGym~p z69$Qhp}^Sy58q7?|2@tRojWv1_<@pvlpC5SuOs=p6iE3ERsB&z1AQ;E-+PoH3=bwG z8qfsJ@O#;izv(ER?qnL^UAW#QY(q~5=4yKaaWTz-2ycXmzKN5suTRaMO6kp4~0kIsTl12u_<57oY z)hw^-@i>DyzOZI2fd?c4+4m(K!{!Gq_Pl&^y4+N_FN6wsP^5$hTwuIgw{WJym+b(JlQ5oH&(1%@(|@>WfByGh&80( zb}{f#1WyDX`h6r;RAOjJF0yvbE0Y+S!1FAN2mkwJmoUl>{Zw@NuOCp_g#Zz;^G--z zJ(=I*?3Ay4rwE8jA{x)y1ny7M7(E2a)NGCT&?gC3@ZKIveEio_Av~$l6~7}E)79^E zH5L4oVP{BBX*VNgX(KT+zkkzs-b8j6G)ARL?&B$A8)rkG?yiU>#1*B9Qquk2^)cb zvBF_CB?*8;$KU2v=Zs&jQkpN>|t2M$kd1#>0ruRc!o(X*Tef z4BRd&-2M(JYt9>I-&x?ha>B4IbsQ0$iMufXN^D>?A0nl+HoKZFJL7dIICEL;#s=pp zDcNJ49$f(*z8|&qtq}%BG5iLQ_C_BM zOJmO4y&~-3`%NGs$;0Y#sGGv~r&{^YPK2tM;m-;BM- z>Ra1$&C$RGh>`oWCIS)86C*kda;4fTsJ~Q)?&e)Xg}Zk|MU3uNolL-b0s+kDYb|&g z>4zY*)wiq>rCj+9^xNGxKK;B3HUq_|i2)x}tv|72HkH0aj3*`WI?V!0(1 zRlUyD4}IeaV#zCAjSU1r6%oF1E7E;0h=yx9v|M5%o=G z(wWQD;#Z2y!!k8p@xv;I(Q&FUdU!ewXe0kOTVow2;O!C}zubl9l7R1AR*dZorFyMy zo1n@X{WJd4g2{xqJRPt4n!4Kuf!bA@+J5K@7x2<`W#{+X37n49z2lIS755X z4aiO+57s5<_xkhesuT4z9>$h3DrLLZp!{m|rLbM&%_sQzg|uVt1(HU!z9yOc#|}n$ zl_V71Dr7tnm5*0+E9%YpkDP^ijmTo)%C&WIc8PwuyVbdaB-)#7y@MICFMaq7VetIu zW<$bnCJFxWoGL-zLBqG%y55UV1PBvY`8aX{Y52mHGOt_GB6H(KDJ*@|^!PnSE5>46 z2f=Kpy7tbx?Q9FVpwWmUn^fJ|=DfXytNL9^hn>PYYP7jC*kTJpcLp2{iMYBJb+AZT zJB*H~B~GvG^}C!@X`Bb(fus-3M6_;;_mE8MgLh4KBSKzl?CJSOT@xcMiyban)2~P~ zyURLmdr*yO75Tqb&nMpf2@^m!=7_{!9teMatz4CE03+isvE8sd0hbJUYFD^>#!M%r zwC6gJN*bYvQ zChz9M|0|$yb5Ft`VC=(Px{7m?*=N7F^KP=NFRA@#8gT-`v$t((2X^>l0bS8WodIhK!2jPFHE#bq?3OwPnv(oj zOZ>u)Y;VO=B+oA$llqc9G#6|@E|xC*cQq``&i2hdsP8r;iMTL535xyQo5fk z7V=fs^J&lRvR%r1oL~64`QYBwr4_qmphjov+AhS1wy^v>B(DMdegMhftZUZ6Sk`;C zRM@R-khb>N?X}`gZRqB-8fjhA13MNq!)<=*o?FEm@eiVV(6w;w zKm<3#^XtpdsC<@q$gPp*tL6GZ3Bt#Rs zdZ;p?3Pb|fcjaeYRvNzU(FRo>IG5xUFC57qk)BG1#R9>mjyZceFTh*J2Y~{z!JoAz z4tMpdHTR=EI)S)?{YoL+2`L6J-9Bs6*f{cW@s}<*a-oW;A|dw}kFpuz{%@rmLXTKE z&xdBePEtACp=L)_@x07vfQvVSpcL4jjR2R>^>(EbV{`;kh=uJPQy(>?~4D;P)=>GdSO)KO)}nIQnxRJi(y&Hj>V zDmv7sz&YVafjg=PgxrfGzJL3l-4NQ62>;z-bG!ErasWA>>103?kcosojsx5!aENUv zg%3N**rv~ikphWsF26t6*|;)ubG@w6udmVOsD`WCic0hxkEtGF7}>*|{bWVcKs^X{ zr4aeHxB}z*`SDRqg zCJ}WJHx!1`FRR*A!jEB7T4zp*SLsx%ax(G#YKaF94Tw2c_iYu1^(*?5LQ>u$w3W+bL|(Jc|A`i4P`p(iYliqT|*g_ zV4+Y}U4w;a3-6X7GYqx5P<5W$tiCqD=RF@EN%h|d_AEgQgHJX73EfU_5eMU&iW)&2 z>7+e;B_BI5)l!Ko+gh`{v2l*Dyvu#D)BR>#_hxd5B28=>vPaSr=C>i z{#NGI>?W+m>!At{E}4iSXo%{wP4Z1QD^C+Y5|~36kn969g^g|3+htnQvL>^iQ`Fh} zL`C0@dY3d?BHkSY8^#3%ukNpyzOafs^VrV<)042qQwfNqQiRH-ASQB_K+^Ix>aIJJ@3$Qg`ntK?*%p zqGTbH!*}APEr%o$PF|Wauzl&(!phuvFnXq`Wgy+(WL0> zlS6`W`!^`rmx%|v>gUJYyWiBdCM@ifKEu#gRj|VyK^f68n~wMSq4mGqzD13LSl8~g zYSUjhKY%bS*Wcz`Se``c7K;>mcTe!hYwUUi(|4_Yw=j-msu3|%BV0?UYouz~+W!9K z@RyU`@&CcE2!U-KB=H`}aP}kwsy(K^qw4?|s#itR<0Z4}moYqCq?iqWM3}*X5tq4L z8P5C5`^YCZVj}C1XanSeLXmqIkj#Ohl^0sBVIB8+2WMJ;KY$mVJJQ8VJUFM$=1O4+ z^lkw%kX4MNGyr1CVA&}GA|mMON^vrET6mRc1@@v~ZNMJ17-cQiK|$l^pVD#0(d9W` za+Ss4XZn(6PbOR7p?DzWsR^jG50PD6%&0d(v91+fL$nmri4w&w`H5K&>h)YfDpFFk?{RZ?9wWMUTqM9eDU;?|5eL;}U@7$iZ6p$yAc2x+Hl);02g#ZUg z(%oV@P{)BEhX{-V6)9TqD$m>H+1jx&HB-qUp;JKeTguy0%>`u(0ca@vWbqP!fC~DxVDqS*(Cfl<+9+{`e5@k<~>c`tKC(QRQSw-j47_L?wAq z5b+&Yn%5Z1-e2t+H?eb^f6qK)KP)LfNO2<7Yi^()0uWJ$*yXkt2zt=RvO`tHW~FH# zr;7aycfc%=H0LcB6BZSITR{&$KTpusj&hTeaqVPf4Z@S5BMg8@r6$diJ4`~4lnqg`X2QlX5Y4K zMI68aL%qF~-4Ymrcq>`OjP;WvAe~0BZk7+Gx-lid&bF9xJr5@TqyX0S{^oa(Ywm;GmsUiE%l-}3xO?o-tmioQ!UI($k93M+?PGyTM)fMQp2 zQy^GcYmI&^qdbiRw1zN?BSNPXL}w;{=?9T75ZR(wR}xf1ySm3jjkuAP<1>#!SIysj zZ#cN0LTVlN*khA#ktV7F#S_uAMh-I1~q3}|3Cvp-^DgU|f2sdK4Bqd3=*1w#P!HKap ziox=NN1uL;B+i>%&$5mH_n$&bPTHZtRB>(=g9NlW;!r9!m#g!ocA=)$Ba_#RyGzxL z*_7k~v`g20J*uGglmpABPpKF%aWY}_iYBJ&!xQOQfvk;)C-Z9c+Ee_jG(NGMHb_14 zWxg`|olSng`?NBPq%nfTDI9IP{A=iHQq1BMsAsu)bs~9ANA%4mYeMzD(ZqbIUaEra zk{NOkKH~QSElg*e>wW^60a`rw&DJ7l;uGId2IIfTp7h64r5KLUz}e?#MK@pOvUP!=tv*jc+}1F}vXeo)*v>l1uy00@*n_HU7pNIFt!IL&Co z7F7n6XfRYjs3hwEsox+SWzt`Qb}w|2YYrl^G#5ruE{gXICvX-N9OU*c;kRh7Q6F%j zo=wy1-okvA6*z*W4uj;2x&D<*T=c4p7L|>BNM7NOcq+kfv}FDG0|?Ge;m(rtxA{t? z-xr_ftZnw*7PdRJ_Tjg0{n)OpM98$t=!`Vwp+wMs(G6bA2;XvIFB*g1*8N^$lZwys z54Vo|F@UX&_5V`ko{K?$o$lfmyN9?EiPb^mk@1#BArJTg!75;Jt+EN7+uiwGs52e> zIMHd|J9G%`V~gt_m!?h_0!JTSB>bIkr?nS&M)YThp9d26+V*;Fv5Esoo763w#u@IN}>~qw+e1*ws znN?mOhdlYq@}9=20ZS{15emK{f_=R@yZ$rZ+YKzG=J+9V>!$XJlaxT27pN$VhfBvs z&-=~KpiRM`FbK9ZSN~vMjrP!abO#nuZjdjjr&F7#x6cN7Ieu{>w zAV(lnjlMI;0s^q%!lrugm9M-1SHYp$_r6xHN5SK-JQIJqP| zquW{YfI0u=zn*#*uGbHHFD#Syeq&8D;4hDyQxe$_s`Q5spS5?DXOf8SVhE>aSc}ia z&grm#w5=A-B7C7&*{wfpW9i1HaY{NaD$dt$SesTk$>-c_kr)5L8A`uLw*qEqzGAVlxDA>8%$Hmt+7 zTJ#AK!*t{Op75a<+)x8Zc>+-*!}>LPSqyxrU_!jp+M3Q5eI%~V|X=+VX*R&n|Qf8YjE->RKkCUQha-&<0 zC5sd)vevmJ%9!d+Wcb~8=2l)S!lIgtZu`kNH$k49+!ZE$-m}RK8fnA~A>zv7eyb_x zyxYlW?^{&zWUvr8Y)S^+LIUNVvMnW<_-S3Y$-BxQsl^ECtVAFs(89N2G=4x8OWq!x*g4V)bTr;{J^0!Gp_?J-G z$b|B!z;6G;=Ep0|Hbvhb7T{NLt$_Fv6BT%eTg%T%_ zjE)TuZYhE?a?n9Rtxpx?QBX3&6zfB0&lZ%H6&^-K^S;7bIFJp0A%IvQuF9on3((eD z)I2V>|y4jY+Ko z0$f!Fw<6@6sHkgp2iP70*cR!f=$zUD1(X11RvOw_icxJ+T!_c;W@ew_&=M$;0lg=P ze_awS!&*C-PjsKkSqAMDY7WBL1hgtIsc{}q&r^Nel2YKdXv(+V!BOrF6x_N-n~LXg zbTv1r9K2V6es$9{uBT^t-zBVq`+A3ev#%`xwXx`@f2$9u zOvCXm>+9>Zp}`(c7#>sp0F>V|XDp^)=4{)x4y&zh0nAqHU&@LCUB_J!08MxVf8tN{ z%)rAzeOjCPwFBW?r+Df5t&n>Pv!=WsFMr*seqHzxQXr&&S;wD0!=M3m_D$e6^8<5@ z8{_!g7_2pugguf)ca}8qV|WW!H_bPzp2z$_g7^Lb+i>5n?SQ#>FA@)5Awzfo{5pm& zfaVcG>M1F$;{MBUMRO7plmpu0&*i1UEbJxFyrQt0+N1=VnxBKvRJx7wFu`{BP1u7E zY_^a7;CjC{{0J!!QXr&2NP*p>KnQH_9^Jyb_K*Ud>xq#GL*Q1T&a?}#3_!avELvkq z>IV~17()O6G>sM(dlI0!ra1@*Ff0f{3jH7k!G#aXv)z)0=Lqx@6QN`J8y)Bd;2g5U zj(Oc4mi%Au_S>(%GB2R1>Y!{_mc#2?2B}*?sPC6&B!G%N#kdo`%a#)~j zUf6TNGN3=)Ss--LmSXZE6~wm{ljocz_q(ty=FW@Yzx7NBZF+ye;3?5o$__vAbX$4# zIev|pLRtB_oJYC2W9k83j_d9n_dcW+7pNh@I!n1G5(s_($_~a;CWZRgZ^2yN1Ylvj zN7dl{x(8O3o3QpA8n#kEcv)}<@%ajDW&4CJoHvKxTuWIRS{4$|6wn!AIa*pB2&;rZ=->j8jLR#PlcyO8iR#0ec4 zw`~aTi(8v)Vz`5h@M6k?cPea<1ak@L0wN;pXQ0DrAO0Gp(pcmKe|O(|+xB;bJI_A~ z^lWJdG(qVkVT|^2OIwQRuKfZnX-0v20$|k7)iJA@80$}A>)gDF{*tlb(Lws163mrY zGDirD0xTJelsAE~O?OfPNrfh9ojHB`Q`bJS+TW{C1xnZ|U`YKl#h(Bo%~et+tzUUR z&oMjgtiVfU`vbzA=Y#@xUiRi-{G&z_&`I}E6MI`aFsVM~CF3aboaQhc3Fb0&LH3w` zv<|61>a6ZtbnJh6|5M8cPvowAf8GAgq7k8=DHKNU;1+xE;lJ8Gvk#?zpiJjq5uinD z%N*++>Ol#cBQ#SreShkCdMsBdJPF-gVX0(!7w{BXk2I%og3YIMt`+Dg<$Pep8(4*F zU71q8%ldo&m%PgFdE&>Zxnu2Dg||HM@J@e^5C(?V26WMmFOvLLxiwDt7tjXG2CufK zyA5`4ecUY$Eb|(*k79`j$^=;?<6glrSCsQ^(MFp>z!Ve#?Dn_e-9tEzX$N@1w4vca z$U+-HYk6~Q=z7=|&wYtC*Uxg=Igkr2oi|`Gi=Nt?*YlPO0P^ik&pB5#FlYEWqu(eq z%F6RzOri~aqpVWA%|C7{2}PncYNwmGchFPS?&?==t&dhNLrm;-oA1LTYZya}at~J~ z?}e7;nF0TkQ%JICJ}El)5{KRs;O5uGDIQf%cdr)pbCp?}_b2tK;7@%?^PO9vqd+LJC;d4WD6rAQ$>PCGMULrc{aFeHzeQ!dT8m8m$odz;c9FWE^TtC0PTC zDrUozXRM-_ds2fu2aufSY0L}so{ZuN1F&61LiqytmqK#!1iVu43<29P=RObVeJ-0) z2b=eh_6_iz35kwFRC`ySmA)!Nh&!kK; znO@$!c{6YRpYOTvmu7^7j3ManH}ih={%$$Hd(S=Rd(S;Lra(-Am;x`70x_`tA_*72 zwr>QdJXsgtH}64fZSNp(f!Y8|Z#n;5r@25(SHchg9pvPj4zcKBY1LT517whfrY+0Y zToCR8U?!+S-%sCDQm6ngf#5lS?NiUc$ez6AW6E6kmWqTeOefS?lt0yu!ZUnDTY~Ai zo?ytSgxaIua+O}#qwThDT?SC5MP0DCf)?^Me*crJZTi7YDDMmaXnG@79@XXbRyF2O zE33wjH1`IW5zy7O(N?X%Y8qg?v0;o`qa~SoEPw&ya4#bguL|L`b}KR@U$ub@i+km8 zyq91w=S_2uqs-K3tVi;x$r8Dt5%l4`PhI$WYsbIqIKENixJgz8Kt1;@Z?#H5<%8$U zCNH!Heswc;4{Sg8&@Y(vz@P(GatRc3w+_QPaWFBFR7CC2o^lB8OO`LN3FD{QNESMz zD&2a0aQ&0CG5IZ|9DouzzL^W|68cG3`BwV_??&;L-&C^;K870-SKw9EifsO45AD&n zhy%Se6lmYDF|<>@mDeZgopV@#BJmCJ1}S^8Fr*UjE4G@ZJ+s)}v}Ua;0iYIB_L(U# zggc=mt`oBOtFNt}tO`i|!Y8jRp6Czi#?)^k!lEZ0v2-!h`3oab8bm27Z^BBlo!1z+ z!r%917I<2*^l>b4>+FJezHi&R@ySa^f%~s}yFNL531*KlI%#~0tEqp;~`eu0n*91e;r@Swnp)aPq}QUGL;21DTI1GqSA@7Tn80rX|X zBTUS}Vmn#m?jS~(Nu;Uk&d*3pR^D=JCHGJX-W6XX%AGd~7A(0{KpyIvUzjyiIA^d$ zt~vyq$ddce<99!s=EVTpU0YZ@QUvg_@0_&=;3YTWzVuh>kN0ltY+38%)fZEl!oU-> za39txZYZN-)q=9JD3&}RkXhm7c1wIhu1AV6P{CFrtC@l7Dj_d1K|3aq{R>kOg+ zI3u%`ma*u2y23`%%uZx>CCr?5rJ0%uYhLrTHH@9XC^~v2?NS5xG)>x6FqV=GBHpid z8(5BX&YW#tx6;NRbTn4jYO|r|;$`P^{mg9Z?rgO#W_`{&@`*H%6t&pj>t>$)uZtooSQe`4S>{<1LzVhY3*h$*mV6o`TCJ)=_`YLqF^ym*mM zO_j1F@KnkZDg%3j4;0Cu07!TUAgl!aZ9jji9nT5xKuE!&Mhb%eT#0ZEfq=xy^VW5X zZ1$Yj`t@Ja?sq@=KKcD7&rza*8k|vg@&|%_<94O+K9)nroW>`EP%}epSUH{Z%i-%6WrQ;ITp)AzsW zU)se&|Na1~dJm;-7#Ag{zv;d9&<&U3!*{zPNbC;os;;;aOP=rh>0>r)+GHC$b-XoI zHCZ0-CnGHKlBvpD#(GpZJO09NaNM(P{?ETlog^>h)$1Wdx$(B2+N1_RS_+#$NzD)S zV5yw4G=nY02+XpykW#;7-D(@^%h@3_4z)>B4i3J!MOz1ZiBJS~30cIJrG^&JLXqdW ztWO0rA#V4Sl>OQ_j4?AVI1>Q9ON7gX)P*Kv$D6C1Kc`i9gS>im{KWZ-kB41cAP5E|z zvVhgHTy&tFJPX8-kUr(G*}F|Iz8Og`~oj@o?jIF52}LPcW$Nlj&E zOYlG3c|AV&TNq?I*nWKTukG|>kFYU^OyS)M8-p911yB9K`g{A`7ryH8C0G8p#~trp zS_<5E&0C%ADHr1ZVN=HuEbqg$Dug%?oGBbUtb#k=caB(iRiOxU*sCrt+@RkOAg#`5 z#|^%!&8=$X>r~)_0~_jtg2k-*8i9mV7mrg5Q9FpryGp~eL>xNbX=7kUo9=61k|CCD zk0Vgq%SznZ6u%#_Y@Rav_;9hy{>1>>yF@YSmz^ye0$mH=P42oh&>*l{-I=%J!gtep z7H&xlJv7)Qc}uQb6edr(`)~o1$@TIZi2$XVsV{f41vx;evUHFJVw4d}{S= z{w6`g(JwhyI_fUm02JtMSrau`nxER3`UV}vmDK0x1dff4 zlEI~3F~GM5@KeaP8eo%bD_*8SIr*_K!MLR~k;28`MqeYRs%9*UX=D7OMoRnEPwmpL zGz$e;MDo1&mM=IA-^mNm_JKbZAePHb_@P`Y4U-3V_lA*(vn`@|#W}lQgTWJscQFNG z3d9tMDe!_6h=J`FBvBmT&q0CaB>>w1nXb5?thnSM1xglq(n_Q}6qM5-^z)3(MSFx5 z=@3O&k48$3D;iR83QC7WjqrxQ=7LDe>L=}(KjGstg!;?e|K`T)13g6%=p))Voj4Bb z7B{4v4PZS`g;kT29U2QH{ldR~-JW`8p^I>Y#$`aU-T1%Ppp>Q%=rm1FUnB@}vPJfV zq!8xH5dbArzVU7E?gQ`oTRZ#gGf>O~NJx2;5-FvRx`1M<1+eD6?frJg zH!s4n7#A3t@(UpG`*~|@_V|8d4MtWEue5L8y4>bYskFgP{CU?@A`qTtQ(0(oXt)ob zsTlw+{Cidn*(vY*_8x!K2X4C#kUA*lMG0A+C+waF9(M0TrE)u14Iu!8nX*h7i=jz^IJAJ7=;Kyk`XGSP z6{V$i%(8<-+#)t$=T&!_=5XU)%OrkBY|F^wlA9I0}gy2(kMZR zmzA2_L`eM(Skj11JY?o>(~ED$6xfjh8&|CfZJqiXEWc&liA5}JL)J>p4bT^u`g z(ze$hzvqXX!(ph8nGB2mNnvDgNmKH*5L<0ct!4YVtr!3CRTGc1SG{IupZ(h+icj_{ z1@8Xl-@t3sM+AUjyE_Zr6Yqk*!T;%&_^~6bW~VrRk+!c@x*Szh{eU-X@B@B0^VL^S z4nh%LFRO3SrZ~JFIRL3^`Z3J_P^ZQ-$qeGb9!?#f(j$HMdFHegdyB`ERaDi==Fcbt2&?1Yz8vRyO;=)lx+D3=a zU+9SdXnj6`MY3*IJ5NY>BdjQ`ensG}2E%2g?H0+3TR^jaQuW6gbk_@<>l?~-nAe%` z9rcX`jB5sxHho*4@iA_KU%$|9(PIzreM7wo@@OcCzE`x-&3Z-9xAuCf2c#pQ-eoBP z+EE?zJSw=}SGAI6 zq#(J{qA6l15y8)^lw%YbDHc*R3Y+58bTP>c(cQJnp0X2P^TsI1U(NAXKfEpwLCQXx z(+S#$E-v&zBq{6}Tmw`g$cc78yzv%$_dDNaU%&b~dv4KEtf#d|3N+=N@BPnzI#4Uc zt*Zy25a2nD!)FPE!vMpwQ26%uZ@2ef{7$>%BmabQOIo}r1@L>{`Wg#%m3dtKePI!n ziZVcN1U`YP5(Hm&(MN0zi${8>0MZf4abOZCzF~zRv0n znqc+S)l6?LV%2h4d;zA{GPQ3nfbF|(yc#-C&VnK;f%U$J9zE4nBGM*O zOPRVTIt>wb8SxC;U{->0zjgQY^L*IKS zZ9C%lW4WEM`HwHLAN=$o&QkW3Fa5Izm58o3sR1XIO91x}g9On4l$w())HJocMAOl| zHUQQf8Ux$A(mK8#T?)**{Tp7_<^NL3p%{wRVaVVobA&}B8<`=Jp{^Fw=ISa+?Xc6| zIl38$V|!5)xa%AE09MECT@U><=i)PB<=Vt6)vtt0f!ExV$^N!Y=e(~t2*THFNYKh>FRavE1d|4 zgxPO@Cq&`<5D)JKteto`S2XMlAthYtv=_=nlrtPn958`2VxKw4dV^5pA)2;X-rodk_1-7nPLK_u-QBw_VRfA{F z6AJg#2Z-_LvH1LuWOcR@>6L8aRiVw&#E>&&bfZ=*Ll5t-um+N~wd4@BSINp+^GUw; zef=l5$d&<$EVKid?Gbz9SIo}x*ae;@`G1IKl3h+c_hJvax4UP5@)2Iqx+BO(KJY4z zC>bdYK;MVX(B(!uO0u*(ZM7_%EaMx`3-2ipPAv^6%J4>g8^%F_C-NL!M`afsMGv`? z(_+ce8-e<9RVdmK1OMu(ix~M7?KChgdiub!s2URF$eJ7EbLF~jd;8~qy0^uR!^aee zDG*cOPf{QTw*Mq);s<|D3anrBtaq9&7=$jh&?6R7r2}+-$#jO1fZ|6w)@UK85fm)9 z0OA#$OT7sKiFeW5R4>Y=&KS`2xku;OtKV@+xc67I|Jnckk1L~r6jCOgxK075F+z-d zOAj#pt_uInTiV)e@sgGH!yo^YZvg0kK+{k+#@afzx`1-cRbRqtnrXf~6Nv@pN=TCr zI=SW0&C9;{4eRK|f&l?%%9Lr=+1X*ey?s_+S7l%O(&t@yO`y!Ws*B*m%a)VBInK|& z`0uUd?oSdwz_9XarNFyY09Tz>Q`=;N{adI5m?k4o$;#{L`5o4=l?6uums2d(SW#VS zm1S}fGHhpl@&+&Gp6q`Alkd327zOrx$Qt@rzq`+-O&N#3)^e+;#2>4RfTE%59z~@j zoKfVU{)cpceDUu}5W)pZ4?TB@kC)^pR`rpGAsz%;0sCnzwog80ywh7&+n8-L*f4CZ zZ7sMP8R8uJ@{QzrCzVcGDOU4IUx+GilS{^0KMPonnciSyW}UIy;;x$iGn=TgAlP4Z z-7joQyIFI~R=ehFpYv}fN-I~ILMiL{93Y~mWM;8mQQp;7^%Kai#U!;Lr@)~pGmqG9 zdhyMe0;5HN`)|X25d&zIRW&Ta)=#~pEztI>>*~FJR#sHFukMk2P?%18{Rc)X={TN! zpujKx^E?Ka8So3N4xXqJ+5U>hh5-uoBXB^lOm!w=ngX8z7sQanBYg&!zz02(vd!8F z=X{a(;i7J~F>Lu#jlf4SXvG!ywD@=d@K9d#p5h~ZrMf+! z67-Deq!7Q}QxN5ccR2U}sNzs3QP`B$lp~IcE_XT}kQUpx{_z=A+o`afmWABwn*{*a zi7m?clQ7_*bI}+&0hVasT7$sq;|1cV-!#^w2xQ}2Z}TnH?}trs7f$t5-$OLzgGJvo zI+u(hS_c3|&vD5$e&Y6*pXY3efNk%?LmJAbf5$?4QM#VLZTaEE>(BQ3<@L(Jbpb#D z1d5W?vnU?POO(XRpz>imN-))vh?aip;X~yM*F08|+~=B2&E?oRbHhcvk0}sSz*^S= zZ2P8$;w{Q&eCn1DYKrR=+G4~zvdFkCZ0-VFluRQ)eM&729mHt?UI&nds;PXIUh^uEz!A&l}{ z;CY^gGa`5v_q^+Ng6n;P(vN$5{*o7zGo{1k2l@r2Q_^6sc*obm{dgZ!Af`Y}fl;79 z3~Y}A5#uQKo&p;dKS$?B*QU;qpGK^`vt;jQuHdR44IQPF(nt^q)7OLY6WxoB+hTF$ zbGxFT`yCq=+TmxMxA&QSnZNPV?|nPa6k1BCn>1;H)isW>B#SGJpETJQTagkkpK~28 z>)axE!TkBQaM23u>+iGqPdsZUpLCqfnt7Pj)z#SI#mj8TvSkh~3AlA&0RoS(c+@Lq zO|vur>%;hiP9YR7TeHq;sw%mK90BOA zt4P_WzIwIwtbZIy6aSS7RvpLcAXSP#YwCdeq{IL7ARu~)ECQ$o)C>n>w=_3nVcc!M zzV<&@)f_Osq0-L3a?h;+9=`c%(nteJQxV= zw>5Ut;=383sIi~l{iID9-)L>wBv!C#JNmH0D6m`t&|YMXp#cYu8W@n!pJhtsz`%e{ z%^U>e&f#js^P7bJl;f4>U$9epci;V6>*(sRa}H+_AGKSarpKIsvY@uD+>>XH%6mZ~ z_V?u@ykcDAPI1K-F$F|{)$?z$)$2FWCSVJ4PecvM4fI$~`v$8XdkFWbvkZ($Yf6>X z+MIJQjR|)EP~aC|djsD_Ft*x0%^jYm3+eZM!iD$UJQYtU{BuAy&?S6H-!zB=+=bcY zvw1uIjbHJ8N3r-ln2YyAdM|Pn;;}>kqWH9cPn3OCB4vYosZZg`^T6KE#Rv5(rcSwn z?sxSxY-0K1>CW>>&o}W-h0rDEA*PVFPgl|_JbpP@YE$opWfx;=-7 zQ}?4SI`_DBlcI;J7=;NLkZ2HXNJnO(7_rE6|4)OfjA62X!FC&3TyA%48PA?RjQV_ z)O6u9@H@F!$_q5+nH%^hP53}q_lX54G5m0VxnL#kz_;?k&-o01w!2?~ugI<1(Px}r z$o1YGp1kn`lt}}uDh=Sa`dQypcJZOY3?%Xn1wuF&O~iRZKPP!!hDjk7y)L&;EH!?sVkHfjf4<&fD%fA;t=iyB_Z5X zZyK(7SA8p;x&#PqQTMKNGFw;Kw3Gk(R*}E?yH~qlGGPM1XJai>(Q0k&hRrtR;OS5f z0S>E!EYtY6wr{mD<+$^xDHX--bN})syW^hUW2KPxX?Ox=o9gQ9lJj3_zq#)rn{(1J zcG#3AOCj7t%S-#xX0OZP*MF}8xbwxQ-YLuJaE>MLwK>B?X36J1?aPB^a{OiLdDR0Z$0Qh?NSSrP!a#>%;I=377G3atF2 zW(hNx!3Q^fcKfjuf8v3At!?qs_D?t5=k;Juc8}hEEkdm7AoU9CPzj!kSFW+4BEWSv zDa*3vafQTBe_6=(sCk=C5I6JGyS=f6^x_4BnIBs*~Kz$sDd zoT!5WT^Nwmwa3Y^xQIo8sY@j0a)Q7RJ=0=hde>zK#o=)Ua)$gUPAXma*Hgl%KL~w+ z*uR74;T7Uinx2eJIr5mo-JK5yq{Vg%14*K(e;XB6--&X<^K}gL)sb(-PXh%y24IEY z0R!ebd=pVemi3}UAXSJbP=;p_{X{E|KirGb62*2s*R_6<4J9>(0p}2|Vd7-H@`Brv z#~z}tslQj=LYkotDc=IQ1K{iy=PIj!8Pf8w;o#>=8>Yk){>V>=O?e@2s%KuKoC+SN z@~ut%zwRpksyn-GqOXER^*;)upvRv+ch}(YbxeW3S_*7lv((!gtpytF+?aOc9U@S~ zJJeSD(OXK8C)7+c81oG9OCsZr!<|tU{YWz<`!ik4Y(;hl^e$m=JylYQ91oAsKpuaR zMZ6E;82UKwiYFA((owuaeU%uXW<@$6?$*?Ut|!er@k|BV>+OjjyQg-o*UoDR?_s=OC5*0}2lEc^D~v9kZs-eP&`$RPKNP1|?0QEjMEoh@UDCtm0q^2J+BftUiLM1dIC9wkD?(Zm!uP$@8`uE8$7=v;g2 zn_hzvH2#r`s{mEYtcvOTd6dyCE-s|gB@xy-yW0Vs6Sigf)3*N6-`O!Io?*%AI)GEa zO#tkHo_1?%-He+GSy>NR5`m>B(__n4uD3*CCCcjR3Ojt(;g%gvS0Q)&0Cx_Ey(9P)$I=5U^GKN0 zLNq0S=ZRv#VPf2{cCF1iVum#~B`w>zng#Out!BazL1A~_e+b=Jy=&EViPDl1B`8K9 z!`kKzNSMV~WY^ny@A~L&%e?%NU)pzXeb82{-VCUN<^I%K1{uojU9Ud~TaQq#9DW2{ zOegLv!2D1Fh!E(M!~H`k0}eh=;R}1R*baGl<(odXQ+oH^egi<`7CUCiZClpi;-}JH^EdB^Dftpm;D0~+N@>0hAeTtt*I)#E zN;6x;>tX3A%p7U{u4ao3v5D^s?AG0|>8R_#Lr!`F7TYM;LHTCJ7l4D#x8Kt&8m?h3 z5rC+7rJVbQFdw{!-v8%sf>eFZ-)xI;0$ z{5ooL*J8u62v@Hv9fk+=-`SB$oBRH6d6;+?Qy`{5Oo7p$Kn!e;1{LEtVhS9<6gcOU zlWgL6!0k)kXlZ7Tg zsLYyXoQV;%qWrb}lUI`M;>glpmHkM{A2J{}b&6_sZ zv9k^V%x0lSep+bKfg6OtVbQGw3uuA7$u`~sTp7+l16E%L{MU3o0pN0nlF*_n{K;3T zKnE%QgMj;)fnHb0+qbmYrJuiQx3%%9dvCHUzWFOl0#1-Bg9|A;_~0p4%|bFyKl7|x zh>uW)9FdktKqhaRidX{uN3fQ^?biR>ZM?hQTz2nuR^QkVgjcs-!6*0FYa{6*n|SgYc8xB+jwxWPANw_RG5qD=BdMC{G?QQ* zsX?SXgGsquFKvkiOdZpLP@6Wcw(gD=JLZ@(Z2Hl=_iY@L_RmX!Yd&>4Urnx-U{nqy zz+2?olYXKYKAPm7gLTq*#Vg2;g8&LI%P_QY0m4D=X7CVw82dUq;f>#LkWsl8*tnC$ zHUL2YAf;7DV5!Hd;9*mok*4qGN=kg4$GR5q!Jp^GZx%Fk-Gh&XTM<|wt2BiS+cwK> zDer8GHv$Z%&ECDm_JIg&w>7VGfLOHDHmvXA$hE*<4KxRk)*osI0qBwZnJzU=UpFHQ zKLI8_7)*Z`Jap;XXh1l`qo_qYuMZ&t-ryZRz9{MRh44z-eROkEU^dO&zlX~K3Reg- zcFHux@`Bsq$K+!>0NVm@7BN5so^FwXQ#KWwrrj~w+nWh5* zjWy#m3@~Z%tQcL&j~b)}3@@bO?VO~13;d6E?Kj9v;V7P>{EUz>)FHTN*5f2LdHMkuK`B(n@KKxL8OEXF}lgH47 zT01lLb>7MBimFUuv)V;Qtg7 ziVrabVhY3**mnxV!1lh=FpeRnzyVExvrj$Q7e4vS$1bq3;~D@+%h8PxP5@X_xb-M0 ztF#&xW$SKlv969S_UKQqbAVbtvYS_IvcqN{V#A&IRj#bH4*Vah$@SC6%JtdP4q+wHkUYpe}x?v8;W zd}pVv9HBCSg{54S2%xz9UoW*>H;YLQU_H=IoR#G^QeR}%Lk-rOZfE)$f>BY;o(&6I!XLV)d&QU|c*YubHd zBapcQADn4RMdiNkNElAwVkLmrvLc^A`4w7zs|iJ30-&Ri0^f=x3?t~}0p*kFO1@2x zCDJUGi6#38E=-D9pi$GLi``OAE4b&NaT@FCEC5ld^zD$6M1hryQD9Lr1!}>Bm_@)q6zyIEQrz&tqLqv1`|N__PdvvCJ9@?+#S!25 z^HbnkpO}LJ8?3;_bdQV?uLvks^8W(^#!2ShY~*cAE#U(MoL>% z&2vY1A0**kxKqdY)>Yu9X##%$;SX!HA zoT&W~XQ3U!Bs^XbTl_<5_&v2y@d&_OhthebTosA86b1^#H+J0g{=`FH+(PVsxc(AP zhZI6t6m?-FO!@V$k6b$!CXen9ypUJXBIG^fCRk~!DB^+QJ1Q4Hth4hhC|`oy)w6lP z()koKQA@47zRuHJv8mI3IB&US(BamtU2T`V^$0s^MxCH@|5JadO?=Du#GA>b?KK~{ z*6+u=m;x~cVhW4~1!7=(G^iNI5mVp*rodSzpMXE)GCS=Rv+T5!W?Ds6wOcwDxz#X> zgJ6wNT{{jJ9)s4~-Dd4;p0o8&%y%$z7(phB;nn8#tu_c~G?Fg1CA~@O=6)FfY$hXj z74r2gS0)IGvINGZ$$L&9K7>Fmez>u@)n%h0Txs+&9c@Jh_a`j$CQv+$)o?nUvYu`L zT?8IkZ~yX+J1mDU-KT%`V;fuFU}g1Hma3=&YX$g57!`;-gipMx6HX&uLCFYT61*fY zxe&%BLYrJ>Oq{Xjg5smMUXKw%iS^@}r7u@(wN>Tz+*1z&c9*$-Sovd>LMwAK#swUl zX{xWsXF1?BLaIRhB80vnEYcGRT$+$4S$E4CeweB9$#mNDm*YHLS_(KlWM9AL2i6IV z)~;XA!kYDV{Z*f#2oRISUVwpAAg7ay)0it_@8}CTmO}eJtkXxZPCT5&D61MKk_B0< z57@-x&faZp{I}13+!ictBTsxsgjD%G7fqFSS)wH+#MjjLclQWnu`LChFo~j4yXu;& zcN_1nH(Q>$!-fYsDQ_uYJHWQ+Qe1|wavY^2DCZau=<4fpA-}b?)fcc#LU#>_%sA!k zyGFNH*H7JdJKq4h0aU0C2<(!-a=FbZ!+*SlYKhriPZTpWlYWuIQvKxDym7D6iUYrN z6j<^s3M}nG^-~MlY6|WKW<_MNsuGYS-m>Uv0agh+_4GH|%p(td=`#CQobpv4Kb3YU zFhSrWyqa%T!fY1J?2)ycj?UxYiL&4>#dT3xd8t)bRq`w6yJAbrH|%6q(3X|Y*>G16 zi|Y>Ag+>@S&8cEZRFT8kBqG(;*0M z(z9?LENpejeZu?t07R#F^xR{1KIu`BB=ceD`*K(SDvbE71NHZAiS|xe;x~2OVigYCeH)!9%!KU!<9da*3z5=`)Np=*qhi$JOq`+6&(Jy}kRO z9jPguXj2O&5wQiA|Mjxo)g6B(9ZF&Jj{bLeSJ!)^uPB;md+@vO^*93?AvOA2$wndi z0?$BiB2&DgpLmd{68b;S3|F3?f-WWFo9&=Hy4&VnOXX5lg}X!Xw!!|4r7^A%{gN0c z4Z_Q14AjL;-Q~?)cFi5Xw}Ypo?5$@UV&livDt6D0x>2VPnd{4y+Uq`g9naz|ra(-A zm;$3iff(2x9XiIb#1uGSDe#(8PXRE-_cUPgXFm0AtFNoXQo0<4Ti~mNAASNIic0~& zt8wSiZW~uTW@{dtZ$;w|vatuvwx_Q9rX6w0iS~F$w{7VwVnlwWHPqv8F58D2fMFIn z?8WMM7?3h)y}cQG&xtHd2#{NQ#4Bz2@9s40bZy#+v+d?bR$-kDSWbQf{$?{QQ0R(( zk+pYrJ5cb!H_x$4{^i@2>&;p7(xvv$t>3nT$BwtD(`O<;0W$YemK0V~vyOL~yBaB3 z4S~%CJH#d(C9*Y5KitFY$?k^#`l1c;mi&2>DHhi}!B#Bz1vcCOv_v>{$|NkgSvUr| zX+cFCYyh-Nk;mPLgD6_z9HEWrfW=r6 z6$gIlD6rv)+iBq8Yc}J4r>bTGz)#wWH*a>o?t{!|*n&@g0gp0}I_=&2Ry-6Rkbe|sWPK%YxqvHQ1XDOeLem5;KK{-h-tOfwP~w$bPd~t=||cL@AwY~ z6a`rbXcqV=I;lPDr)%*?9fwIb;&VK5-XOqS3O4YnhZnyPps0Z`@p>uinsy$6*$zN* zOMr*s)Q1rV)u5eUc!4I!^`0I?-)YCA9__g4S)f~>r_LR45-9%k*$eF!&1dmQ4IgYrlENtf1OA|dlc^r z?N5&jK|Cq-McTwCJQ^Ot`Us=@daC&N$?97H^h_`KUXdBDNM`w+z1{u(+IM-HGFWj? zAHH3LuHt$f>3H59$X4!^AMrb7M;QrnM(B5=l;L&c)yv4*McJ*yg}b*TE)(TSFuEnB*5s4R=YjN%n96m8Y#hS{}iHuc%Xm;WqE zG(N@@h$#?LV4o=v1Kayd!Z?DM0tYq)UOVShOQni&6_P{1sIt%e-Fa4o?`&VV&=v8Z zpvvV1kSv8k(E^xFmsMEDmW?*BwZ#UOF1L*Ugg<|589)Le5H2!60uXHa9oR0ftnkGK zy8);3DD>BV?`rGs?yx>Au~+}<7dH8bL+zTop8_~%8e1RkOuD*pUs8cJQo_oZ&eqe- zO5_<9Z{ht9zU54N{};Z4r8A38VyV}$snve|ozK{*M;&Vo6ArR+0MODT?l=xPh`dTV zlfo;d5vvO#p+lM~*Zkv6? zK>)T`CGx$pxKEMW5-G@2U$J|O?L8*MJ_cO>#4T=#DNtxjdp~kK02AdL=#UoFJsWGsx|}aXUEfbY^HFE~?LMV`*@xb1(+@k$CQO^4KnC+&f6~9r?lgj_a6lw``!1w-O=Q1)7DPF;A*Q#mD;#Q-1zWa z8DFzs{Ps!w`%kgM;Q^=4Icfi8?nR`ucFEJ*>Si%~E6MDZ+kbJFZQa^om%R4^>MBYm zbyzNbCLDRji%8+6d1cv67rSDgU^b6N)CB5@@2NcEo5GK2rp_-k$f`yu-kD{Yiw>WBs7%`j0V1%-zoUrdKb5smb`_cLqs_mQAJq$l zP4G%|q2~@F7oPcj-ls$GI}y*;h0e7_ptjC+C*0Q=?+`|RdkSnXe)IvVW~dYHqo4fv zf^PH;n&~qDyzi2Qv-0|DID*hJEJ!d?0i*%w}zogmj ze`u-oWrpmq>2+3JRBpA^wN~FW#%ggRm}cg$`o(f|N(Zz>6W(JkunnU#$;hAxo|a8B zb27=mpseXhN_kt=+-{eA@mGaR#D|yyF$H1@>^lWwV0+(b7{?G(;DDyU6(4<@)l?MQ zE6+I20iPm(*MUR@25DI4B4J2r(@%>SN_rb&)PK>MEMi)P4fVI$uWB`=;0J@wfSp%RU0|4ju5-JB)Rt zz|hiEE!LouNms5_a)7g)*45Q!iFW1E`Xok}rQ3{prXw7Qk32+QWeA;3T} zR#-#$vd(4kNuJHv-+ksvf4+X+jnF$-WD7)5X*9qv$ReJdojq7+w^}VgPSdz1EZwmH zmjVt@TvCp81m@zp7Ci?OSb3A@Pyg$6cH6J9W_<~_$A9-7$3hZVX$rK@^mW=mS2Nc3 zjZ9-~vQ#;XV3I%8w;X_zEZ}5234UNFp7Xwa%K1M7Y|EX@5HuOzSZ|kHa*6%@Cq8Q% zTQ=LbuKKdw{;Rv3UK*SkAnny_)&QhoMc&zKzqse_eM&oidH+%1zMoxfZ7g7$WpPz4 zs9IB3XXD3CLYsmoG2kRUt-dl%+cN9S)!Owp+-8SNn`G~}@J(uzHu><~1C0Bx1Usj+ z{Mjc0|DrC+YV@(kpS9I%*TYN9-t~^RP-o!_@S|+UN;~W{fbN&cw&=(2A@4wO0*6s& zM_|mdh=#>EZycf>mY{&kiZ0K09F`CMku*wih8Y)mdu+jSTfT`!fjhU@Nv}B64nKs! zFcj&UBQAB&QR^~j3b|W>adeP^?<+nmFvEY;1-B66zGxdjj)Ia;UbG1ga4?|2Po;W` zkLypMhZg>ezz(-A*Gqhy037IoH~4mbq)fp*iqgt)E`XGNiQg%&J5PcCXgeT*?Ul;{ zT~#g>&1oS37=k!Bu^rU$rv+N-JlX^VX@m0qy+tq4-|6b~^4Oeqo`ez#Cs7oB)O$N_ zs!h?RXWO1rj}%njzwzMdQIO|7E)uYf+@ii(e`Ln%WB8uBjJ~G;Y+b7?d{4fsu2mkt z=k<#lWWb*lQjO|`FCMu=;RH~-y8zzPI~wE7>i!MO{`+(grHt2Uzx*ER$6_fj!DRtWX2Ux4_uRrYnhBQ^|NGerT+^cNOHX z5F2JTm)w=E*IF|Dv@rO-2sIlSg1Ju4{MfjO#ac)g5DlOE)^jrGqMpza!47 zyRTT#rU))VebiUY3P1EWuv~<`Oo~H9Np_M#A4jr=zr$CWz{f^2`P$2veb_xa<$5WM zCNp0eUhT-}%29w7sSWDJ4J@C>ve zZIHy6H0F|^X1}mAyn%b&5O3Ps3wL%0c^vu1|4LB}uxLN0nS9}Xo`}zBh8-Gb}_yZg*I-J!`z{mo( zg>mGl?O<*3Qo*q5C1v!c1uhZiAG8&jxINm61*Qh4M--M)3M9W?5j(X${szB~ze2m7 zn~zgWP{B^3uIR6!(7~5>Kr(8&4A?yx2zT+wt>Gl!!}0@dO_h`>__d^=kGrKr6e8${ zD#DM^!I(;yoSbg=NHz87t2WN0lQ8MJC24@2wH-x>lMbGQxLV*MDkBUEwGi zN1nPleur*D)wpB6Lv>aqr znSHLEGnRqm5;{zc+J#iw+ijMj&iVHL?t#ZPw7ZBjT!yD2=KLOSxQg;u5>`kvJVN!$ z-=-JVu(P$ZtCoLP9>r(>oD8{PT@6DFna&1*!pvo4-IGJjh+CK0!)O&0TH)GhL(e8} z4f>0S{0e}lclY(xS+cx)u<6p^#x!SmPnHC`K{m!X{A)sruzX901d}`dk+AGt%65BW zR8&AoZPVwzq`u+AGPxq`N9ab%ENjmE+`_beu~IneqhV#C$e#Cw>QVJ9V2K??MT5L5 z1-AtGf_@M{7ab`xYzrCgXN`YYhsg4>te48|NB+OhGX4?&S>%gpF*jrB8~`d?p{ssJ zU=**0Z!WtFRm_=qF|?PBI#76#z~IfDuYr+C9qCQN&uAG*9Pk{X-<3Nq$$we#;E~P% z3gGC0$x;y`72Jt+iaPgV8T_S#8#h(lOShKB>r%B?&sA!uE5i1ComOF{C$xaOkX*7SsI;(?KK?=g1$mcB`wLjS2j{JXsqUj z$<_XoXL__S7Ln*FwfDU;df7+yH=-f&EsDNH5bgU1D57%RR@YP#5k zV^g}sBAGLWV0*?ubG@K1h3$D0lF2{{z;|tWX0O!`uszTPU5)0824w5{LkpXCzO*kI zDB)x!g}8r}PXpTb5yLZknNTw{TH!V0?5N%~0*V`Kwa9(%M;N52N%OIX-MjCQkIhED z%tb8?+6_)Vr}wI8jGm$}W?0D5$l05?iQxc*Y{R;b1E*!~CrKt}R)zlZLILZQTV|6} zXv+&@cQlk`+l>d&f>J3pEoAX8&UHpBP}ZQSWuGK}LFDHuc$9#;dbta!W@Ep@r-D+5 zY`I5!@H3HpW%}r?2=Gk`nezEZWui0$Y#dl}dqDsTiwc|LBELje-UXIDUq5qP^ZZtN_30(y-)d2RUD}#@@|@QQfb@4; zr5v$~AW(PW()PM)5f_#S4u9fAs|uhSR0e!SHnR{w7~h$BMk1J-;zbb?oFbRC6QC5_ z!6G}uSvP7RmC6q}#$;YVk>@o)+qebcX|xWX|bRzbzNQ8dkT#` zOZV{kj=EFGCJCeR!E(=@u#8ciOIj$mb%XI8V|JLqc z77E|TjSv|4Kc3r98{Bm6f9iJ>;$6+p>klr1&j(FQx-i)kmJOY3-M`VDLCWL>U|7#= z!UUR1fkP`c>+$5N|n?}iuh=oZG%-B0ibXsXnpVFaK@hi^k_?glnZi(ICb_X*$%WC$ZS$nh#(%N{0Gme&4td{MHgU&VQbS?HSHuxQ45vM4G~~tz4BrWNAHT{vD?`FD&)maVj_U zH>WY6izdGjmyL481_+-L3&Rb+1-v-_!^%v2>+zmiLrKz?IiU-YC|JojCTk5im7H3CEake}(_D27$yhf9>zNAwcT{=>oq_`rN$e zWAy393C;Q?q&*}QycJ{kURM!Tw)wF`!nn@yX^G%~)p_|-i4jXfOsY5VSKjVvKEfJv87rVA%TbHFO8$H*|C49byDLEniK4m~!jx$IEx)FXMk$|=w%l-p zr4aedYvsg4137E2Vd}trh)Sx2C^-oc|Cr%_YUU(J^^9xW#c+4DX?+N1N1K37)xxL&5LzryBy?U!EKbCK zN@vw3T{h`t-qnHy7e=Od=?L)xtSyiFOUQ@GbPDA&`C%ZTbk<7KfJ0c;+aRZHP>cGu zXaz!Xz?Wf|m9Kxk$a?ed2923SHm3o-zXvdtYk2wIbCtu4;+RP(HJ3!b=Rbcdkq6yd zou7d^#Cyumr%`SxP~U#v;~X<7Lr#?GMJj&xyX#!BYdMj+V)CDOacVD=cM6j#kwygd zb9dgw!|E!g-|gtczi#PI1WI|*sZ0{gHQVn<7d;P48kaiw_Vn6vIUMM5%o?K_UG zVjfF$O_r*JXK8f^6@FzJmtcYjF4a4^-;Oc63%;ZRA=T+}_M?J$!lzZ>e>}RrMW4;- zf&+dR?z1@wA*{Kc{l$lnf-{2ZL=sx0wv-%ER!E4-!`z+-Tk9UfF&`J7|>C+`dlmtLO@E5uzib2WZ2yYP^0z z+GwP;W+_eTeWO3_J@`+ct#9LL0dqaMhb-mbThHSu86sMquoc@7g-^E@DfQv#iqOZ_ zgfjK({>P=DwME4rMIFkWVrz5y1pArEz6P;r5|MZzx?Zf`KDB7YjLR)JN)+&_=USmu zRL0o)qEFXI2+~8zm&*$q=Gh(B5z6B)0a(jtA6F0ebE*uLq>)X=iUKpf@ftr;#Mt5` zEku+i8uPr=)R^A;Ik-IKkOLiS=ZW8%eCbDyJvW87SsI&qC2_WG8Xb|pTbE65LFzNA z?f1}dISH!$2olqJ^*?~+wOuZYzJHdjTKhjQvg!gZ`IYM}_z#qwg5V@(Q*v#XwT3(Q zMnKh2u`xUXUJs4!*N~I^067-K@OwzX{j)pL_qF%YQ<8$YVTw)$lDAAwVn3n|C=xww zc+xUAK)L@PW4DWZ>*02f?Hu3R-H!%XHWEA$E=K<*07B1WL-_m&!1zwptq-E3J8!Mm zH>itvEpV9$*agjOz)h&Pm(^8ghTjVgmjdIRr3;pUY0#>{(thA%;P_geI>*0}^vGl26hw%YsPZ~};s?zjE+saKnPxmv8OIwE^rRJ?RW_eoOOj8vpk z(ylUsI)@v+eV>jz(x314gjzc+(TBszvM+|y|8#6{uQ{n4%mqz-lw}#5{S^+zah!{S zfnY5|-AKJ0Z{x#<>m^PrYDP4PSG=j&w;hT9Yp3|nbHdMYd|yu=N{<|6Iu`Nwd7MIRq}#oZ&Ww?9A4pS~41HL%jrBKLw%}AoQ#_+7`ArFM z+M_b*NsfimcR+_(dI6$f3?-`)>C&PQop@E-7BgZC>HFM$4uvBU09<<3b$pTKpBeYs z(XGnNw&U*!uga^rODG3t-u z<=+Rvvkqqu1G6QUUJ4*}L=A7ax8#jnyS~`9k^y6-p}4LaQC+7>`3|`w>kO~G$oi$O zcW3h}w<#^GSYhkJe0&yQL*zEO)61b}GUJ%D+eDantf1LEvsclwnfzgMDJ)H8fU84U z5s~#tmiJN}$jjE+X~`>ld}V|>bDeK zCh1Fmbr`FgC`(8dK^${v@$jX1y&K1vhJbEi9Qm2e_Ze>4+iRF4x1Af7!{1q@{*@IMi^}fu)&_aP>}RM``j&O$2ye;?(W!=0nkE#W~pdgWi z_=l?9yq5#T;-xQ8SlJd+@szuGa0u zE|(@5Q^D^+J1)3__+DS%U{5gai0sOQ?EG(gt8u_ca6Bctw1RtD35G z4ALje_)c^+h#VTCI2ybWDm=jw8kV4PE}NXn@K|a`&!BlUnIf392#9qPtLVgHDN|^56H`0x zyQjQ5*t(rBlm+gTos2fQAd?FQPs6!ZYdH+iI#?9+rp>a6`U;>HNsb z#_L9}f>z1@5~js&HQtX70YkvAs!vWfy)Fi@8jX%z(?Mn%=ha;6%YfhlUW$CE05~@s zre!%S9LRvH(=>$9yMMZ0?X$;|O%S|P;Al6sz%d?v&ax>3@ifbOykp@{Dbasl&n&3$ z|3X0j^Z&?X78=HF16i;yZOkv9MY>u)<`AKk;XGIa*BYV2l`5IRCJ7y1BXYT@Td5mN zAdFt)_zY{2u-INi?6l&dXIcWJWj8Qe^p`x=O=+4F=3CDhv}e%ZzEj@jgU_TTdG~$W zQ_rp+{F2~G_?(S}`SR?2))WF38q0MKl->}W+F_J|{u(8RYp{Z)?p2_l4==F}WkEJ= z>d0!gch1!fZ7TkSh%s|wN7TpOdGCKucqc{A4t(#NgJYUXVi6Xtj3d6wgHwckSa6o! zC(QLF^!9d!oTWcXQwq6cHuhqI`DLWyV;zn{o=Z1feUYE}YaPl3YC9HF>_%2j^nF9s zsI@CAb`1#=6*P5`L*?->k+C^Ws8mg?xo$x*5~RgMLc{ZoxxY>Rj@XK&Ps1hSP3V%;4BINfNG=);!X^ulLBG8fsM!{^CcTF~YsfLNY<=B#;~S^)^4prtzH#brpDyef zoR_U^E=D9n++5bY8s0R*FkmH*#D?-j&XRr-Mm!)Krcem_ihlz7Xd(7okjT$S=V^MA z47i`a*+#$Q>tZWMoRNgE!kZiFWifd};;8SPxZ_%D9JFh@`Sx;(t#RJmarRx*H`8_^ zmUC+79Oef~X~$0DYirv`p@*F%^uA}hO?ftGYd}d^)N?8S)H<>Uz@|lar_%8DlY)P& z&tvDwnwznqEtE|i;$m{Do4^+-e#(ahLAWBiBTBUvk$` zQ?&c-{#s$=o>G^-g+Ine9%|{77woqwG3Tm=@0W8A>m^~UO|Jrg6`Sz-uEsMx9}HCk z4TK>hzKYVEvhUS3e^`KxuAhdLBhufn4`RVght#X5~ibfjuuoPYu1V)(fzC zE%;#DM%Pvf;50~w>|6&%6*(gP9mgL=}Onbx==Nh5aTN=1ZD&r zN|SPUS3&GLxSjv%GCEGLnE#Is?LUeS3G!!#^QXdH5o_soY!X7Tu|yrMn+k0>TH4#Z znq@;X=%U{IybD)}%$|nWKQKC*>ksb;z`9WL65}}cg=f?Nn|sW=uI-+#{-+(CPvLj` zqtScNx<^qGjs}}6MnyTq024^_{3iXX{}8c%@}kV%%RBoh1(1wjBB`ml+{NE9a8+>9UO&`E2TZWY~=N_v* zNL%cWWgbO{T*-FmwqyAL453Y59o?GKFcG4)5c%q>i$XjS%Z!?i(?p2@BG5}OZ^_zd6bEs z`S#`>#1(gTZK*chKiN8|2t>L~v&MZTosW?u8}`qI zAd?Z^q$cbxdGa*9o8AI4G2|Jpm-Ky&2lvCeNp(vEgqoABpSX$^63Y`yTF*vG;bY;1(jMRn7=* zC1V2i&Dx5qDwUXP->jFUDD?6SLMQHW^Kwi$c88XM-W5l zS1)Tx@%sO(CJefjbKIwG<>XTXSs3u3u!184 zddY=1R&DR&|1Jf?e>V9=^ON>pXC*wrW$4sL zY;{2ubrmxK5ChrtpPmw{5GHFqJ1IG#fW;uRe?3qM$^~OB2Kht7ii-<44Am92b~yHl zAA2b=@?4IuQD`{$f1!ngh2g!bNUmZ=$e$Rvm!yc`z0Yg2(NI_sa>OiRd*E9W@rF>e zLyw+)JFgS@OtN3EWAiAYN3PoZUw}QFk-0xK+7)FWaw(lW$jaTwOcN7m#l4voC=PWJi!-eSo?R_OBNj>RE#iwd; z^Gi(-E%E5$4z}~r-@b(K0FuIKIZ?s*ZgNXJ5P>@wRj*lRn-A3hn)l{0(LIK-%WGN5 zpr$!hVtns7wDijNaUEuCW2K%=^Law&IOk#yGElBNhz6tfiO;kEzaJ}KG0IUL0nzVb z;K0eI-1Sk?IvCe5t>22O)0{OKu$sxP50=&5mTLwbOhUK@FI}03x2Z1ElUuG(F(^|c zoV%s2CuT6!vS;y6lS>s}7S3pU%VHsHpHfpmob&=zaGV3HP#Vk(O4(*(t&c3>{QgaK zLq;GbQQN2>dlb!+v5f3PP+V`EVjnjG2jMI9#j;CCTP!nNAGeVnSsK5?$``%u+xjF%kxOr0)dJ#<)@Owr68HUI3>6?SCSyd- zgik#_M6opA*6PpgH}4RSV`8dkp5jI1=}SRICvX?;?g4<#6%*ZxnzBE<$BGxDsa?-l zZiiHc(-K-Dlt<1?4j^_R-t=Ru?N+J7Gf^M8^~9AJnCRPncSDEaMCLi~iQyx!~zh@?s zPwo|wYy-oRa6(dzgnkY*^$0<%VEBb9pYF#{WfRo=y4AGa=;#}XV6Zj*Yn35z!#IMM zL`=uELWR|83Rq-@lF8fFhu?>Q)8MKO_aF3bik`p~d2#n3LR_10RGx8h(gLfCO z;o>5+Qk-S8pP`kH1c6Dfm0K%fWF`$$Dlsq10fOF>LVA@IQ7iZB)p~}I;H+~g9YLDf zk^7mtL|%>IhE_wUKVK@4XXp+ClxSpoS+O1QJZ?g$0h+1!w33<}3^dw@roHK6ehK)H!VkO}o+P_{Sd!B6L^rBflb2 z)t>aK(sjuV+G|3(ASdwH)LP0`h(C;K-fcxUm39pJ22;$>T0bM$%j1l24jXsKGrrx4 z<8%^^?!=C@;3aXO>*!(|r(Z;`Qx;W@1}gX>(l<5RvH=4S!9<$oh!vNluew1x+C++P z*;$#FWj6w+?;CFekgJ)*3MyJWGHz(j3pVM3YXzq1Z1p>}d=(7|y|-U+d@Ig!AslR< z^Mb~9Mq=i)j6Ji})N7PD+zDe_r?UUH9~AfGFX{(V1!@k0J4*0dg_f?Ed{JQ=$lY>J zT-p$G5|j>dh&E-fg5}Lc&TsBV3)^w&zM5=%Hn2ahV}Nevw7vh87P`H%VcO0(9xTm;|rhy6~a=;?ue)UW>*CJJdf5mQO%evfet?ueO8Zv@otZJKiXc7A7 zH9m$;cie}H$KF3;gI4*p(QIYUq?o>Me>R7k-W=fJX>u3ZZo@K*-PO>1v)_`jl9!Br zA>5fd&Kh`?zW7;MAm|IVge4)fZNrS?-{rC=p}Kq5SkP*l8;;K8GUOz(Hk0u{*hHGu zo5Z}^J4m6vY=)T|2K^$ron;{Z$B%zMagMEB^&!;*LKVoL7l>}B5+(s_8SM8x9p*L# zEMeVe6oj)sY*YO%42xwuBLBA@EtEd%Q7S0Hk?6A?F`;==Hmyx7l$Z=DdVick_~bP< z7@cv?nWM6JEWBT&c*5bzt)>WvsyVXR30J)hYBbT^px9AbARAx(PHbcYG=sJDH4&SU z)8%jj(d>kwPB9ee5YN;Wqr~6``j7*pgvFibZ=3)i`E72>S zaC&9F(Be|Gz5l2tzEyEp-vzZODsNc+X8-nOcQgqoPzmxKsd)Se^V@*+-Pg2~!J6I< z(qi-%{C=bsd7+vi+iI$%qdwQN-usyG6iJuflFo4P^by}t#v$Jk;kS>2E*;KK5EH~^ zxaIWaI|>Zc4H8O3cM5BMNm*OU-%DfvP5{NFt}GPurN&`6zrq1T0c94+3|3FEU6BLw z5SheUaU_R_*IcRj)6HAwi!5x@3omcrZ@~JU{A*Z6oauHi65@|VnuY6VN6h|ua`|=x z)y}D{w?Ue$2Eo@x1;HH)n55aU5xm->eCDOi>_gM^4Y5sAn84%iCGuNwpDTG=@rSi> z0(Y|ZWwnSY|9{>g?u7Ei~e)xBoE0A+t z9#v7LBh~U@fVhKq>p+VM0A=;)j4EQ__jdnTGHFaOGZ&b<4&IXN~Xlfmyu;mg_} zsz5JO(=;P3WN#Uo2m|kgL}mS@9LbB8#j$K{2f6L~hfe=Y4}@>|Oy21RgSTg71q+8k zByMxgfvo|TX^8LZ3+l7?|K(13sD01CknKevSfaHSuj~#lfl=df+WUOe-TlNQjuMO# zFa=mF)I`;H*&tNLfp~d*(&RM^Vx>mXPT>Shz|japfrSin#j*c}2%3;<{g5r$hS}DY za{|-KJuH0Xhv@j}51v9`$gPFzRd`>g8XX$Z>JdRZA7TlN52g%|L;3oB;}gO6H2P_L zw}+)o32{rH+g)FiB9a{r5myf&`YGfU&K5&U>pLq=Se=;C$-dJyTTN87+JDrWwcz7I1AD|R=S^Gv zl{;frekziOY`n82QCgHSC{!9AV`UDkj>ZAsY%?2BA7ej$06xkDSo&$c-QAZ5`Md8dWUuK4-}bA*A6e4dxNPc^kp$rP%RW*nq+!~fAH3=F z)~v5i{BZez@qvAYYXwY=)e`mGqv0$g%NwsrRdmHad)8X$KPI~}Ev)&6-8C{b!x!rr z1f8U6jvLY}EtQ6y9kAS9%bmYp=-{ouA`9nmathss7{&Ua7ND=BE6jV%F0mFh7LUQ2I+YOqf;@XVO-iy1(@>)e~NZkof}J9VB|}p9-8t{r=^V6C_nGg z)MI72(Ffm1bJtVrn^eCOWF|R(sa&afn4d~um&tb2Rq@&EhHgXp%apNlTI$)|;QyN4 zo@w@ft5!{~VrrFSuDD;vexmJKa%_=z_AFfu{?Fw?OJay5^%PG8$p#Qx^yRytsuwO{ zN3=aCcG);gX#z&fp_X9ssz&GlR$8_;x8*2v5y6{IhHui`%v;06;iAK-rJ~;rFn`7+ zqLs>DJin~<1A9oFKnQAIb#*ME4CDDR?uWztchq+&Tgd3>;qnR@^-Af6hr%9zynUQz z!h&_x8bcR5omX!YOlBBzscuuI(9xmjfa*}SMq=B`cgv)11A+VzWb#K6!stq60aKV5 zGh$wu8y91hH(maj_)!-S@Ui4j0_OiVoX_dnE9ytJKlENUX~WmtpUwR$@bsM8ck}E) z0U-<0F+o(+j8ORD5GkYHi7muGX#}W;e(o>9i5bA_b^WS48L`?*ciiHe5GftiwBG%4 zV&#ESJ(b7V?B?X(k9@h&#rPV(F6T;ROB?k|@~w8#|K+2=ays9C_xD~HH{FFKxU0O! zCmvgtZJ-T`RSJH=9zUfjPbTf!OGy$d5C)|nZRbtUa`66v`E`WbPiC!BU3bQivsMrr zvV$*;B*u|m(T`Z;hdM}29{Y80Kc(-F%JM=xa~{RT-2!*$pM?-yzYq*K>Ka{YG6PTs z>clkKV^0Y|OThRG(n?QG<4vd$Kw9Y|$4w+#&-cwGmhO@Y{fjk zRboGDFR|~N;CE%rO6-@azlXom`|pcSz2ysWp6sfxH#gC0vn1)t`ITKY93KJUTP75u zE5adMNxReS5y96i9@9XC@J-b|VM}xGAk-AI7X&Wr1)cK916xt#eSqKHQpsoZD!!C$ zbwPLOuIJ&-gHlZ?qK8J_?IQscicGm`kpk2TC{DsXNeVWP*W(k@2C)%-oPLWvPaLxA zMGd(+fW&`9xT0zD+pmiXS^^C*CUvl))-bV93+yBJhUXGM?^1yQYIBK4CgqD6puB2< zJ+-p8tff{ILa+t(ej({1O;Yad+n33|t69v$pH2?Oy-R3vden1LbhS-u93Q$FpPej6ehOMB@QcNl_sVmLT@kjTlY}77@ zEuR=_icng7hStG!6X!y6m1@Gp+o|7DqbKodG^*^xnmn5o?PAkU&>#;BBiMW@H&8>J!PH`AXm zg^#+Y7X$Znntz_>x%`evvh=7GoT8=_hA8>QDJI`q^O9?s(P2^!kl`sYcc?FRZq;A1 z7QK67RJ<>IYo7I*L?^v6u3}=J93Qb|$dZI<19Ia&^YBnZu>P=$!rRcmGy1;bB!xxE zFWqDs6fk!S?E9gdy>1EanbV>ldZ+f!K=?;`AkvGJ0}MBkt=D2c4M{m z+D?f{9ZLf6hEBINxur#>^$1~ux?R>GG=={Dt-T9T!+=tZ5g0 zZW*c#L=RipDtubGYptZ_cfptAr_}fCujAf_B&hmN$&R$G&%(t zLOwCxLUf210#8scTUr<7sI=K$xIQ_q;x-# zcfh*&mw2fLZ;AZ9CT5$uwi)eFD`ALM^Mp69n<$RvU;U<&V?|L2HWbyt)<`l#l>DJU zJ(IHf5nuRFVG$CVseS|ZEo#wM{`!}ww`KxA4>8&RCE%(QkRdBOe_?Kw8NUITzFsr` zjq}n9d%kFzq%C(6zcGs7@s`UeM;E;@uE&-@XTmwzq2a24a0+Utd%nxdKi@1!s5{KV z^)M)?w>02B)j7!W@^P%_f)u#Q+;H}qe115UL8abl!nKiqINjf!d1hQ|firY|a#o8x zIawFs(T~Eri`eVC{lFlz74YGg8axcU9_oduRCE+|i)P7FhNg3%e@8pXq~Ol2zuoONOK#F6kW4U3#lhukjrHq|B|Jx@Yw) z4gFuEx2$;cxvk0S5wqXngOt&QZ5ko{Qn?{7H)sD@k;%5Xx7jADEkv1yvYx=8$H5fV zkimnR;C#O|{XA|I1m64B09M75sOu7akP(H<3<7+!ARt398-#B6Ls0nklAN2!!?u}n zp+w<>F_yUCgM03qhUq#!e+>$Gfcd)*L?{&HrbF(T>*KP6(XRa1*Ub#O?!ohpGYD8X z6IPyQyx#U<6}R0&A(3=&yNYmA68Z-m4^k3_28YQ~j>BP4(VtYmLvH=&&&U*DckidE z-bGi~)^Q&jfC{9!oyuvY`t$v-ijPveiS%${U^f#tos5Di#iJ?$_7P@rv*h!%55VUN z4rZJ9s}~R~dL&A$>e8>rNrru!TVHI4z+MB_MwJKqJn|Fr2N4XWV(~JO zz-W%dcl>!77OOyntdY^1re0WDJ{!>lotRm0r?KmDuN|kUGoWcWVa4S+3}CLz&)ObU zq3~URgjU$S{a#Ps+~c#zNHXJ21511HJo@Q^HUezzv>Kf%eXkKWu>xT5?B>opj$~jH z3QHs}QKpa2RPY|WM(_0xZ6*X3hifzCKc$?*+%&w1YK&D{rn@wX4K&%hW^dMnwzuR` zOEcZ?^MgCS9C=6t?~C?Sx`rtY|sOIb9CLzdGm6wag@CwG`RsOIYxh4Bi-j4oUsQ$m4%2wDSHg-JjhvbWK{VfZ_3k*X5MU@zqUZ znf!Vr{~n)BU8!h8iNi?71ue~liqnX$0m-2$PBb&3aOEvs|AhN~2>${nx7w3o#mO-> zE$IVF;*1Kvxy}593dU?oqT0{b+{2gp65+NLR<(e9-^0{><1JjQT-U-aLj=f9zPx~* z1wzOSOA&#+21e9RKA-G|ea61TE)QF!i-Ho)8o%bXZ)crU3ap1XrPjQme4DYdvc81E z%^87}sFcnMg61?Wf!7&dZWp??RQ!{oAN#hi?nh4waVXlStBR$`B-q9e;}gKI>o;(5 zmQU*2?R!TkWaj#i^oizJ3K9CqLZ;auI&#;6MHBk_#u4lPk)mvqy8B51mGOt}fPf7_ zcoHlxW!xnyv|d7bd8sgG)RrLOZUh;P!O047eiE&AsjRhlCd*{Y&vIh@49bswb6clu zu+|}CZ2|UDZkH%Nd$H$Evv;zRBrOh59O{0OeC!|0JuZb66pHl?g3h+W>>E+*g zc$}>(MT~=U1&@nJrOZ^Qg8{ah>Dar$TW}o$PK&nL!|NG3Ut(_yx<}rz7R0JRbfit{ zA29>aH9=V=p%sF^Jk((WKu;16!SL9N)WsU8(D3L&H>WO3+BzHa)Sntgf9!wcaQ;4T zxu@u*qJ;Y+J5bB`SwJ%^(7H@jkA?Q2%zP#A07it}FaM&!?huLy(Mkb3aw=JED>oBk zTm&Bym9v8d=5hMF_pBxC;Bn3uRh~|;Zcya$aDec$9MLn>kve`)gAbMUG7o^8cir(| z_!K(5BTDaf^38D(%FWhgi+m&bFCSDJSm5l_C?2Er5dn|*w1w#uA zM;8r$Hn4%K?^b>PQ%|AdK=K*^DkCIFcCvG! z$<53z2(d2_5P=B05s`9e_*m>?12=;zLbSZ--nY};U1j2%_p3J$gV;D*6w+c}GU}=H z_}IlyZ1J~(kZ1#4-lTuY8xTQb?h&P5ehqgMpH;8V7{5#7p*=v&3<=69yDr8; zQZRSlg2eAQp$4D9UbOULj~kHl2^7x3_Nkz>x3z5-D6EXkTc3pqy>KEJ_bc!nd3bNW zDD3INWNq~K3rj9!`Rr)5Zx?}1TMHQks#kAL(uK(NJ1O_JikPxUcNiFPu9_%RcXZr$ z(@&0?+(7Sh>>0eCznHW-mn0V!SNXc{qbt8Wq{Ch`cl4M0aG=d>X1k)w7}bSxmRB0y z5oE+MYTpIxR+vAU^og|%6AG7w@hCAduO`0Ybi%m z=JA=+3&&LBa0fQq(B`U4c^fwTHAZj0jUWVU7`aW*OwO!tOuD(cY0tJnyPdv-F5rE* zKP;VF+2s+j@_cV~m;eE=?p!+sbd|u3d_v_O*0P6)+KUx6yJ803IVRoL=L@8y7h5UM zR`b}7!1nxrhgl@__6_kj<&dpeMcp`#0Q%IjiW8%t#(0E$)&)lUn#Nd`<{23tFNmVB zXac+B3Z?{^FO?l-3xDYI%w=%n%u4_^%9KAJTBsB{_qmnul6uA%sf^Y!57lOZbA*1n zF0Xc@IoWhyXCPOHVmyXHaq-j$D8+s|gY26#4_FlhRCJ!4{;)dEGwUy_8pyem-KE8T zur`y=%<$ly{A8;>OVp2eAphRSMzI;4z(O`ci9jEZaUn`#UpYlYTQ=b zBw^BjK2UrU;BcY{QQ&V9qsJI#S^ICg!z1`_x|l(Mhm_nFYx^oX@x z8k3o?V#Cr(f~7}{ii8Rkqk|C(Og_TAB#k9u>-!Q-YD~QD>%p+g5#39B$+wDotxp z0b9V1GLl)*%66}=!E~bQXeyvf&^V)BS=f%}Fb~rv6+W5E^`KN;L)qGPm~qlt7@Vg3 zUQm$SpRkwB8kg@gA@PFohljz)MRwc!;jbVjm3`y|>_rE>eZ7i0ieg{*#spRu(hIZq zar(h&trTg!;%}E=`O3qFl{Vc9A5CN!qt?bhd@@$bWRxC)g_XIkiQZhf=ylSjYa8#$ zWYr+?n>Nykr_n}})E~8I(Jq?kb`)++e%2QgP}inCl!%*pn6smQqX}%)==KJ0|I(0n zjCJ`ukv9Drl<+r&AjV(A+s3Glx=@S$*7826`7$`Zu-Up`HF{hb%uc#1kP;ot66c<{ zdoQqYAOHXThZ=#-qsYy?{k+0AM;Rlt!x*>$EMSG3`P(~%l!Nm8kGmf1t)S-YOXR1Z zwFi1QqTLoJ%hPjA7uZk_9Us(h4CcBW^kQvmzYU$m!wu~MB|G}M9s;z~Go`gPxa6DT zo6F+=Fs+Y!A>0}t``)G&#J=qvXjNFlHT%08c*9BH!!(9KRlO~MIuyi-_)}0I%= z3kJyN?NJuk9=2{rFrkDS9pB7q1WS#ur##Q!Mnz?(QB$Q^a%bmxnJJ5+YT0i;vs~KR z+7bdRT$Fy3J>%mgNVx_2!lR&n%VIg;2{@k`t*Ga(pTq#~{t8GnFz6oIl4QhhPS5T% ziPQ-W`>)k>BtR{o0#;oVH#R#xSdzWbmhO9*nhjfIJ~9lsP2Z!XIX^S>=fwld)}psWKVz# zhn@zeqXnpc(M=~~y=b51U{3h4wQh_2cUjGa(kuTQ^j;ceSbYRPt0}3cPs$Dn3sMfn zQ9iZ;ngp5bMQ^X9>KfgmVY=Te-ErSsSO~)#c6iEcO1Nt=%N`j?lVp?}X1spiL z*6?8&q0t_(k)mg=on!$+2mm-Nj20FMvzr|y<2~IC_tt;K-*LBVph=cz?1BBl2EsWG z&^^I^FPxsEt>tK_PfUIXVBUf_7j^(WjNj4s$4!~aF29I7R}Qt-y1lF$3OupHsJlQ z2NUj}3mbheLHJwKCQ(j09~nSofL^ojm+cR~3RA7l&ej?hRcCT-S7XiTO*#`(2Xf2o z!F6@Db&G%Y&O@Xyh4Awzq(G2O~~lI0@o0rb}Jav4`ct z-3UqzV%4~%O?QjPVr$Qo88 zwW8%0Db48=_;%$HxlDE6z^WA5Zb*GJ)S<(x=u7C4AEu~^9?4J9SH~#p!{~?8pRlpN zDEcXRI$qBxf29D5R=!cLy18|XYY3z=i(xvRGiq)(+4_B7Vl_FP0*^%Sii7hvo#Ya! z8ABs?c;SRSi1y^z--jdN{9%s&iHXS)@k)Xwrj<~bWj3@FW6S8!X*$ZG_6H10^@q#_Xt8v_G1s(rb~m$p8~5 zu8!X@9-vuknqX_WT5TWs+g)M4+=Z&<&r7n;tpFEZt+gZV+gNia+@1TCCBsM3tL#py z(8vE>Qmy9yd9G~mjR@Zmyc3#$do2B^L5L~^yuMl^YwwQ8u`vB$Xydd`D#1F0_(jz< zZ-Zr0FsB|pmt+UwP5W9jVlX?v#S-L-P4~$suc*IZA}J!3B==Cmk!moluhYK4V;eEl zFE~&Q(9dWb?aI`&8UWNP1;x{^UF__|gNCUqM>^N>(p(ur2|;R5vtkR8J3`@`T=$y9 z-!`S6IJ#tQtQo%v{n9ln0?Go$!>cLbz~nuAdTJ>&q>i{BlLg$?ely6%S6{S{hx;YK ztSY;B#|w{B7W`{xYWzY~68`bghDeDrqJ3m#oRy$Zf~PYtatyMHGDH1U0k-rdcB7^dfa~e%1@(aR=+p;e)&k8SqYx2rG1A| z7elD^y*F7C z-IGYQ#C^_11mo*r&~x;n$LLi=;DvhxI34ukREyl~fb z;Hqx2%UO<~ZoG5(t`WW6o(AxUE7p!%)khL4V7IcGPPX56^;YkXqORdwfq`yj4|XO} z%ZE~$i{QhaW|!dxUZ5- zB^)ZKw;5>qqzbEsUfsU50sc}b{r1X2dpCGMgf_|qtsAZ9HRum$WUk=mJeA*Z$ShvH z&jSDK`J1iL5ATK}KwocAm6Li}#g@XhMZ0#m7za_EMpl(9@j(eA!gH;}{VlSlMkKiU zcP)yt)?X52EKrTF=358bkb6)?i%ssyeIo4cWJBwJZ&--vf6gmhRrKyRJ|&&1(scIZ z+^XSS8oIBiT}?GdyJPAuUZ0E3ljSB;8n+*=tdwIE)InJyyb(sUx=ih;dj-30l`JN; zAMW2@yQEZ94}c>AGtqCTy4s7^Qo#zgQqEqtwQi^XAPRPkDYe}4NqK57LpCY1MBrkw zsdl7TngK|_3`kQjqgXExiPVu98wA6#@ct(`rvOk!VmTXWbWQ;NK9%weqwdF|@XNDK zmasW0D|N?Roz72$1WK^}CNOQ$=8;9T9n zYJgpT-1)ue$GAS_Kb3OYh~G12Rs5vRaU!<{vV0IAsKRC5$=;X06vF<15D_eg9;1%( zctDlJB@3B_f-Sz6m|%dn(?P%RpwC@1`JjKhL58*%qeno$GICJz;gFxfkS5e!U5F% zfa~r^{LJ#@3)}(UWLpuD;3qaCeZBer)=no@3BP1V*)&J9j%d{X#h9X{DG=&I#UqR;vJqB&a2n5g zOW+|yXU0Ydr&U|-9k`zY=1apcqQ8}EoTX?Uaydz`{G&VLBhJl(vyQ061JIPea6&o5 z1}W?*5ak9VBXR+?n69K4yN||(%}FM}dbXn29=68hjjt!iqMkTEq%ng~B_aS50^LBQh15n?)_aJXHOX+Qts&5LC>4dIttk3# z(w3t;l+Q5s_g*RUj07!Vs97B2xAwRxR}$~Z7}UiESf(Sx>TtfY#kTETr7GXqGFB`N zx@6ACR#}^mLTH)IaVu<0Gt+rr5#-F)f z|6v#&99(uWUuN9S@6is5%+4~W;r$U4G2idx*z>%~2o+i_ttU~63%JZDLN8Wr-B>^x zZ1CwL49&!KS^je`TieW|+SO4VwD$MplopcG-T|E;{1h$mDR_&p-=7TC@2&sHvN&deEIUmL&L_HnL z*T!v*xaxJcLYNv4TL6tj*_Ym7n3Y-@`Py1}a1Zc2X3zzUXRP<^$ED%qt8By@PXBpk zDkuW#!X+ef?4YsPr+95-HkypjWH-7s^65FT?efoM5bNKj3GvroiJjkjxrOu(Dvo|VBv!YYRL(`PuJPcrbxWsg<_BMw7NK-W`-T*iFvqr(zA47wK=kt+E zS67)0v+mHjXINd0(4(0v#qT@%pFj0pGNl?-sF)oP_p<_BDdvLUltub@FRg3MkJ@Fjl_ZC=t!2kU%g%a z6+}Qlx!$C#02cTV2HhG$vQy5bWwpx4g3>xpTX*KIAS|KD0wvQyh71`VNDK};lTVin zF*`Y-*}Hh=21KAO-xN4VEI(${L)T5BEuV|2;cw)h->YsyavL8ry1bhk;%TNNZ2lfs z{!0!*5%XDlc7LLC@U2p3=STU0B&mn)rwL?fi(xxr>{>WP{TPxB-w0N0q7gLx`cFqk z+v?T#xu$;q2Z)|I2q~)g;*=+#b*IgDh5QW6`gG#v*vCO&g59^ww`xFgFmhYJG|#Hm z#((Yw_E(L(L5a65uJkM*Ofu9C=kmUY&F2i1P#Ak&^vHjHXtH6xr}j*;km}&DOC<;I zZyDu$HJuwm=Qx46l96*U;$v_3;6}()We3(3^LKSdWzS;3Mx?(rpHo8D_tv9+m zwqQ&2V8=+36zHJqg7iL^@mVrlK2a;ax=62A(E0w>KxjK*n@}H(x{^}PaAmTGin($j zkKNlt@>xx9F7tIN3Gr)s<@r9^4T|}VYSaOiSzqzzqcN_-$_L4I5-J!A!Z&!Y5`*jZ z=%eZ+(3VPTXVr3`rOWtOP=63Rt&iTEt(HXG(W+@n2-Kb8?40jEo5jbN|EMZ{5q1l| zVKMhqj(v3Rxp>G~8 zZ2zLDH$+e)9~JaPoWdPdIGN4`j&^$?4nS4T@Z&WNv4_{0uEl!G^>KIt@&{7ZBRxB7 zI}gwsu;j=2G`q4ocpt2xp`ojPaHLl_9qm93$0EV!i((FxQmz5xl7yGaKkC|q2psw{v6)OF7#OIZF-h19_iqf64cL=z2 zu_$3#;7%7XIf1bTep$-;>OmOpE*^d~Nq~h0&+@eI>|E_X-r13a=;IfQG~0tk&c?>a zb3&T-Q~Sp$cq|N1$I5NP^Qq{&;r09xTLaFl04 ztG@6;*uw?%-FGlJy`~+#{WjPNm}tr{upa1{9;%W=JP$qU7)&;l=!O{_r)GlX4?Xvg zMSkL|eUu69E#ht;yc2wtDz8VLXt zLQ}S&Qs`$)n~T-`K#%~9JxU>x+|aVacBBT#b-0!GbkLdpP55H=1l>lxNXb3?fVLDr*0b`u=7^zAiTs zhrnulbLZVG-a;dt$&eO%B@+^w*Vo)~u8~sSgYUz=^zhq)XT3bj zDx@=ao)N!P3=PF53x~K;u z@NM-pBae<}(ny7@nmMNI!N5kjfo@NXmFnsHTQ@t@9|X zWq6KfYwGA3DG6L8*Rj{J`0`tHv_D}vsyoys&MJ1%E!xKw&U1C#Bc`SLBb1vcJoX07 z;Zeqa0_?J{^4ZyumX@!)F#}fN-QCI9Ft#v7!9G{6h`SX`?jPrTwpXcUvyHYWWI{yr z=9hqFS|K?0^DaDW6`{Y8$9U^&p!NPuiQooTH%}t6X7B}^{dMP#cEc2r>P%_i*^eG` zq|eKgO8ALGNPOY(c;0img1?jQSoXh+*Ke0bBi%(tzo4Uq{7!ES$Fnh)W}B^bPhs>r z{D5>$bK;ldR?nd=1oa}7oKIGX+n})c#!UZ{tG-(&vRad^^n>Er&pIuaSm^Gj ziS1XP2|DhtUfCR0ft+StdFjhvb)fPQ;KxUx|5mQx1qmg9~J#Js&yfbNcnLs}yVU_(!wJt5QQBW8Wk@6=CT(R1}H?mZ6b zvVbjCTayHR`$>Tj<=W#f#a`>DM08X#s0G&W__ zJhiCB9p#~2*^S#%Q&iVQYfc7Jr#~NtK3C5YU|5>eMki+Qt#4=V3{j8&_Zk79Fd)w3 zFQc~~uO|X5iF$tUGX@1sE5{*c?Na{zXrPP=Wy}?XwC75v#q6aD+cUcFxRE>Z_EkN= zX}EiONX?7`)sNt_f$J=4We1#_tPUqUN@&tDG z!Ys3%C>ZF-kiHaP=2lhRQ)IDtEwH|MvFYG|_LOAJ?EiP}DY2pM!Rp|$9Z&;H|DI=p z8i)q+%6EFSrO$Zhc!G6>H!QwG7WM7p0q>YgM>_{anaht1h5DtBXX~{8U3!qIS=e!q z-}d_$6GB7dz$WYr=$1UrZhCefit9e+65^fl zO4a&S@B3AfV09_jX>LdB8-gR{28=)_xM)sf^)M_?Bfih>n((fQ_2KJwVFS`2W@Eca zo>+J$dIkT)U-wkglM{@3hPlIoBsgj}*;UaT?igoHlcVu+mafL1XY2*|A%sSWSW5%D z!|en*ZW{qB4k@#rC-v-3j@DC2SVuw>=26bJbIMX{rVn-2ovwdYaeq!&H;b=zb@&SL zz^^cEZItS>``4JR=Sj&++%ZKqi=iBzxSxSL38X|WRo?HB`A#2=R1%~0>DAPV$j~-a zLw$zwJ%7`q7a_W|&e&h6(x0J0rjLc8_)7=m_kxlImg;%5aF&ncSki;21|7fn0aiJj z=CV9<`b+LhJ00SQsRd(Mh~Xl#R`}0SbWdw81PpZqn&tHxC7N3YYd_>EZ&%gUj4U#r zRUV%RI*f!KCt$W4BLu)FrE0vZq;EOnCR?yndnf+SS3lT;@Sf3w*RzF;K5gKjsgdm% z%_2FTlBLBHU!?V^jRT_T!icOa>-7rdQTQjKbED7bLVcDCzrs{FGg5CLW%^Bniaa;$ zEzg^1$NkR*OSG$Uz_UmBK<*8xw}N)14$kagrO-Dq^?58#<6f^#(Ga>Y%6I`|Y#;k? zzGn?#4{GSQz4T@PRQ{&*r&OIcSoBx4LIeDtjgbKg3r|t_ng9Y1^anV`*iv^zyJm>R z2s%zr0RLbtgE_%(e9K9RGx6^{jDbDxU_)C=JT*UjyfbCwsKtl@&&kXy_p$fjHl++u zrSDFT%e1BUtGJ@TO3qs?tQ^2PPH(cqTrG2i`(mrYBev`O6_KiPdo~K3G7;!>NLcQ{{>1Sx`W-Yvr|SY`&1Ufr`Cqa zcD)n$V3UX)dh}^2{_xj#fKyP-nbzzn%!$E1;J5=uu z0%`v5C(=-t>P>5I=nVKUg(d56?m|OAH~2lfZ~F-<;5U;76cRd*V^aNyyl$?fHzQhO zKXg!vBnYONN#hhJ{R>n2JJkG-=cY>b=NV50NP2ZEs0vXt%WskyW?QR=g%MyU^M-i+ zf9ZMR&jFd=UE_$cQ%@O%&->|Qh&kYxiB$76D!%^Ul~b3m!h^lUNI({|_XzFXIPh-6 zy&bANGVe6gxPkUIZ%u2(vL!{Oh>#P|zg~(?HKh0@w)t0{h!-ldfAnwK*55_sHahGH z`9r|Gi+?K)4^;GWGs!f3X@+&rJ+gsvLPUz*$=?!udz*?&p6dC&&)S=1 zVOvD%mAwTRv(xjIXIc)|bm$OWa=fP7ja#SrQtt_X5wi$;;cWHf2*ikTsV@78XUEE0x z89z=TnRBxE`ruBjulctSM5ZapBzo|2XVdjtqN1^T+1sLw8hl?^}%cAx<` z?+9cDx#*IG(^<-TX|7^%P8*rMUpvdfR#dWmceLENIJ^8xgA_kBI8^5Tj~4Rq;V20S zydqA_W!Gr$qpEV0obp1=7n(K_wfF#TAe7o zl1}1J+VeE3x0h!{o7ozj<`mD^7rAvoCD>x1M2W4N9Fn9*udHV%5{U<44H_zBHjO3h z;?q$6yBv^g4!CR1sR;Om zdE}SnXAf{zB-?o_q)ILnLtrf<-xm3f&#}aWu>ZTKqBkL&HeGv9D?A|XhgGMLfzYQ) z<*=*RE;gmI5^|LK$3e*ZgJMT{eqzwHVbdF=%Yp)GbSP_pF<=hP+st&xU908jd-k7k zwbcCJ{1dy<#Sw&)NCsPlVkST`T9H~yMLQG;Y*i>{sf240EO=t)KvC%{Hg=NnCNxyg z1W_kAn9@uwng7B);kL_JynkuOFZs~Y7u`|90`(U_VQ2CVl#R`*Mu2i&z0!%6iiqf; zM$^q8UWb3jLK9ZQsw+Sto25ptRU>R8JJ7^zG=NQFsd4R~r1quhj!_MUHtC13^)d#Z zxY|$C2y{zPG!B-7a`f}?a|FiG%i)5w5U=?rQ13%iKGNpZK$GUuE_av%ux2F$VXCj$gOHS*us+Slz8%mPQ_T?BWYCY|uxj^WQ4fB5sK z0DMjl>WigFj1|H;Mazucrx(a$^^hCkNFqe$x&dNb`m%6VoAOt`uTCBH+)ZkN={l<{ zQO$67qH1Y~T(K+a*HY21D-{8yt>4;aI4SHVE@F6NQ%m;(b3O* zcR^s;5M9~;Mym0aLnC3b#^)n6CrNjBzheA9^yR^xz5(t>@bMSN~@PS z%XP74u)lg3(=A$Ba~cwj1W#EEJhljbra7rT!b~Ifw0&*g!cU+*qhXgWlz2+4@E`rm zrnVo6bXE;da|W7M%G6Xf4<&mZCFNu`>T)MRvHjM>O{*r0gOT9+5h(>wldtQUOK~37 z0k^s+urlp$Zg4ZzYt^imDnc_-udIW6{>OB^EvK}X6v(doC58|*qc_++2~h!j;M zgIpV|4}TYMF#Swu5-qb$7XdS33rsrKJIK7?FrDW7o%xVz zp>gtQ#iE7ixjKMDrpzxUm4BEMhznAGyQzok86W;u1EG;#8z&<1BD;plJnNT9Wkr6e zNGbm$ytpwTu5cHP(>eQ~u8;>ySX@W9Q{#V}Y}m5vJJi^4OU%q^=l@yne;|-`9C>IS zLW~h141hHVK^kNSr}4cmx#yr4m&YQSokG3y1PZR8)g6Y!L-e zjl1$`BZK!sorA(7MMG;&2g%T|xv+b8#W(K8jh**Id}~pXnAJTP!vfWsUEi#YP_EDC z>4=sLyk{N8jjEWHC_VQUqqwI{QO8=x5vCQ@*lu4O`Z`DE0~A~MrZ}tH2PIFg6qzitsEEub zs!|n~u#gR+jAShL z&=*?;VuBVe6($1Oz)Srk&}?{;6T5=`rSDre5)z?aE$*9Ms{EhKV{P2(??Shf5x;fK z$9aFBiK|ay)dbkzK&Iugn!U0Mq8vAkOATxe6^Ot(D=O`3ht&zCSH zPY+IP&BD&LP2cr&`{t&<&eW6zX!P8su@7^W+SE|4-ZCwr$X% zx8J#s@W>E;*}$`inZS)_f!i8;9JYeIe4Br_p5B^u;QVjMIAG*YYjfjk=-z8=YtOm# zKQ;74l7YL!C3}C31E1aYp%|6G>WR>gtDyvb@d0t@Xuo&MJfC@qwIcadYDvHu zuLM*Sb@CY`7)+4<+`R6U_+IaxEd{eu!kZaT66l}jpYMG?Eml7e)>rSPMo9rp%K;u% zlpZ!q1}(r>Z&`n^K_n~sEZn~}{m zHxk%%+uzm0u>IX0Nq%W4lJ_Eh?t|IgQ%3;8EMwXpM0NOvY4+0ajJSnB#6CSsXwnqP zX}Kgk@pK(4M6WNt#`4?dzna*Nx_ zxl!>6{i*bN7BQ=$)UXHQ>o}3Ltc8*fPng!#!VV+<0E3bSNpUeG!PB3MGB@MDAz~@P zj#811_TcP1&Knj99Tek1q_lOXML7TD#MX_9^-x>;Z>VmwEdm~OrYz22Pk&rC@c(B^ zOiFqK&^uafyXfCYU|gJJ7`8CQAOsi7uA2ElMUf%av`VmbmbZmbzoR1mBK5@b`|s?S z2G$mG<2|2aDsbNW+aE4#M8ILcRwF0428 z)`e(MO`*b^v8QpnV^iVe**ZP9R{kI;%wK749IJsrE=D3_V{nZ`T=SX&7}^@}pWuD7 z!Adnf-z1*t;-8(%*H7{CoezRq@e8Qo8NwpAWGuxIgs*9Aw*q(0dv~&E=^9dHOv+l{ z^dDXC`1VQ+V5Zy_UnIXfIDBx}FxSF<%%UkSFQHz6t43t&BvBRmA&?OQCL1)7<*%j& zKsQ6&qR>}xiOm(oV(l76Z$JN$3e}}!Gwo_~l@yFcIS}_`xD>oy^xl^HWh_1Fd)-!O zJjC`Bh%q!hU;=!RJzk+9wasV1q>CEJ@os_pdmzyDgHcg$V)N-KXN}J;!u3?OakT~5 zTk*J6veneK*BegA02vJSh*o}~+l$q+(M5AUHBwQTXfzbJD1J{k^?qF?N>q-e)jc@@ zPMPe!_*Dq{w4A=?(qrHXibPiCyiv{nbJWj8cU}JPF@Yco#($yPd2{X^8Yetf-D1_i zvd>PP$tlCA#DcMQyen5;^XHK7T9Ag^zo;jxu9$?&E_A5dFmRU9pyW&F7^l9vuR>yk z8C@DWJL_F_lLz6AsQwacPOI|^Qm?hu$`n7~sUZe?^dyO$|DSg1 zUT7-R+fDj3Mb=wK<#CZ<%(pGr=?nC>&aMGmQ}&`@cd#zCF7EB!Fp}xpaq`W;YG#XO zNfD|2=0;YQ@U$ZuL6bRSt4SpV6-uQ+a1aLf*K%&Zuom%AxFPn1ikE$5YGuwyz_(1l z;o1jh??<&bq5@K_&9Jkaw1s#BvM5%@97m>87Gf^2{zmt?YSfCE2E_9LdH&YI$v)bT zU|;eQ_xJpTMKH9!Bl&*@O2eBjM870HJe4F6ta=O-Rp*iAeWNkWx>9aKXt8>PU z$t)&rZCfuKZL+ANuWc@{SRjeNjeFR5ZO@41qaQG*M2JF0`rPx8m#RYzlNw>u>moeh zT~r`;W7!@z{`korF4Boc>*THH7&Cvw+mF5xVajmKGy&N2emn!gdtxZ9GE^*Q0cpyJ z`%e=pd7pmmM}NoP=H=zJ9H-fRjCd#Cag$EISvO~KCUjE9X}98{PGbNeQcFInKVnD2H zI=xM4cb0;*6xeqL0+XBAXb#tjMS6wW%`e;QG1xYF4H#sbykn^tMl`H~Xm6YdkKz;t zjj~Zju20_17Cd*+fI`ny|Qbvj6_b>#we0cemZ$mnDNnNwp%v(}yw4n?lGlraR zVS$I_q>U{V?NXoDUKBNu^OFxTgbLUB@f%Xgdz{jw$I1xl=WJ3M)c#!{ z1Dk&p_HPK{;aa)b(@H4fD}P2YlEXwNG!*YkCWYz(sX`AW^bJ2Ch^QrcXp^0(2khyz z_Fr>yOl^b<-K~^_ISi#`w%_RX3j&ZySjK5Fcl6R$%Z_gT3Ns`p(c09>M-BS_9x|_p zTl0b5cF|Z&g;}&ukHTClmHaz+<#c>VI zMYUgHEV&#?^X(R`6sLW|{oOk0SNzqgiM=Ftxf zERj=Ryx&=eRLl`Rlb9D1H_jj|k7)(yEk~Jh4A}QA%}2$sI33qbqadjhX|di_tEFcW z!ySrJcvhr?GF1mJW4IG&Q3MKTUtfuB{xp~jI>>88znqdYaKR2c^1S|4FpI$uA+Wlr z)7Vr}m_WiREv{P*%*CXYR%IyC%P6|>o6X$9^whJV5X_oTc^w_be$eOn{L@c3?J(Pw z^JXRhzbFo<_iOFQX#2QvO3aIeK>LjWGbE;}^x6)BOQRto|jtyt_;D`8%p?JkC%jEq(`&!asKTyVQRn}WPZf-!3MvVr6k`3 zt(VObh&o%*rKZZ)S@%^K0)0TFL{|IeK6U%)t)}*pJy=uY5>zHwjxy(pcci1-40E;0~z z<1!IKE(`8GSR7+}3{Pw1?ENO~+a6bwd>xqG-slORUbfaKs%2=t8Kq7{O(NTj2}auN zBFkVQt;Hu?QMyUrHl96?vw&ubmET+*2CMClM@I%<=IwZ4{bwCr>&m^wbeqGH(kzaY z`QK4nhk-NXLyzjC4I!;PQtgA58&~YoVO6-7rUa0;QJO2HT z`=)NuoJ1Pz+J&>ZG%N*$Po)6d(Ry@9@baSb=r}5n;p-|DE9abPKqfgNrUf^ zA*SwXcVYi8mATzwVq0aWq;pyLEx7mK)!!?F>r3X+hqFs5e~SbSyjAGnPy4g|N+WbQ zYnQFGHjs}*K;4xQ`OQ@YED3V5#u4_&E*%sUJYCWXwKGq(RMu)OJUsgwr$gMaC-VQm z^v0cP<$Oio?^CJci~ScZ*BU!-P*i>)=Y1JB$gyk`{50N#ERi~^6Vsma*}yF?ypG|o zHhZiFvL%g*7Hy`Wa+F|x3Mi-<&D426sWvyLIV)bIT-JZe_i{b)ebDvC^eKp+(9LO) zhh3%@OsN{#KNtrbCu=wpxF^#D*~;?!8`*3Uu_WE;iNSK<<1~}dp2UbVtyYHuEGlD} zChONfU%-9KhWAr_>{L(!DPieTCa$ZbV@85DVcX0D{o3H6?f*!q^%dVF)X&bbC}W{Ggr89OFJL^FhdB*3 z*(_Q6Xxr1$o(ynFwZfJKZn6jcz*NzuV`xDF@?FHlLJb5f-JcK5tjv@(OMyY3u#@m^ z2M_4T_sBeF5{!I3-RQM$w}M8&u<>g%xA#2=DubZ zk$!Qy1r+vX=-Tw&6mmb)>O$kURAylDtL6R)SyQjZ2h{XU(kE)ex)SPH*9<)vEvpS) zFSRgjTVg*+{EWgEj4zCYTpDLb#WA?j3KLQ&8zwhaTWnzXo=@ijgEy?VmDt8ViM5Yo zqOp*SLo++f${Htaw%VkCn-<1 zYN|{6xxO#kq6%3D58xMDyPH*S_%VMJ-+Qy29w5e-ZBXoSwnqp^5_X>m$!l5qMFUd) zf@L^KOBwCjyRMO#d!9k#gNaobpHR=>jLnZRzlZ@{7}`u}QA`i)Os?Clqt}7Bz){%N zaE$3%)@x3b*Vjknz8&}(1UyZMT0GhBtWJGbY zC?0&q8SgMa`<}~(yo2f}|NBB_x+FUup2vcUt+tTm{zyyLZ4?4T+;IoG?Jf-Q7Uwky zEM274BdfvPA{D&bNE58u19Iejt9t4T(w&S2*jl)j!+K|_JP3q#vxxNTp_MUQqV-3%?RQnuRr?%=_{d`?Wu<)oP)KhTEPa55_Jj#D1 z*6wKkhvU7V>*Nckdz6XYn6LZVXP!19hehsCzW`2|H2%LOoJD3SWB>Y#%?+Uav7%uFB9gOVXiP+MEmYUG7kzdf8Q@<;`K)Aq%16LhSe@U*5rfw=M;XNNCdBQvO-wfDEppTQuu|MjBi zy{AjFU>_%m!(~-)qLQ}lgb7$&HAk@wg*$*?y~$i5&~Cl|9w3zeIY5XygA2t_&=U~M zM~vUm0xVc{eo~1XLL3EZ0ed9)XO;YraIm>mjKE%>W?^KkimAc=-Upbib~`lnLn0fU zdbaopQU`K@nE-gMjz35sK$L=cg7#_LOR;3MBCX+H&b@wkg2w!O+qpT&_5D6Av`Mgf zP=Jv#k20{2ns2C}x3RCbbp|d{31W_$Bf)H2?Vxv_Og(fy89?m)dg%W6xHHilT<2Pesq2(A+UVk>C>0pZ!`U zQ^%Wi{K+6-$E%}#pXMO?@{0pwzDn<3fFdJqKKla{`Alkg56(yJys4y=W`jzz-J}W! zpN~`=tOmr4QefVD&n*~u78^DkEO)hZI8Yw`ZDo=I@zUxcC=N#82P|FrX08=*yXU2iK28@FXKrftXd7Wu$3#%Nn1?w=N)8`Zv)X*YoAa7I zWc|+mQ$sVg^7W}R$|KL#mVFi63j#nz^lZiTqA)UU(4=A0zk0Dm>VY?q1Dtw0VR8^}ez~ zo-kuZB5)Iyc^PVc}%UByu!q z7xi%ogZ~pIyj?=pojw&ys~#$2rp(Qlqxv`UL3x|C%!ax*6;tRvS^ifchFNRGBu~!1 zBENswH+*FDE5B zE{tEAtIQzM!W^jskxA+}OHA=*7$tha?6u7R7BT7tW^E1>270=p!pDhxgwk6x%I)b6 z^isQ?&VXHRb*9kdY`6EFIL!;K>X`(Gy|O_uG~xI?-&!3x(N61PLqE9!5^kR}5e+Av zJ-!DaH3~bTBm9UGRrz%OE(OK*B-Da0p;S@@_(zy^V7rCYSe29iHL#El8^9Kq#tIa- z@etib*^Y?WsF$9e*S9WAzjUXspilx_+S%+a<{k8XgR8KygPTtf zqN8Gq{W&Z$jE`S#YqMh7>t)MY0PExaxqWd;MV3z^@&_);3>iaFEy4M-(vn|m8i(GD zh`JSzYMyiju@tS4J^MS@m&P&)y-$Gq@U`cr!oB=G7Gv@6pd{5u$vlgXf>|D3bQbyr5() z2lz{``@RhM4{~+f=iO^BLK`RhH0@re{@g2re33$3sY~JR^Ba!~T`l$v1Mejlns3Qk z_7>hkSo$5aTIbzdIYMMttyAoIw<2C&*E@+n#*O*i)_@rjb;_lQHctMw5ea#c09foc zLS`7c(ssU&M{XTiCVlstAdVTf3%jbfVXg@!%XA>;-NJ3T9@O;=5m9;wz8EIy-%hrh zC=L*_yCw!`XTCs1>S4NXe(i-WJV8QgB_>Z-P>o}eM!y(F+#=vC3Yb@L#!8KBj`XU3 zxj#dd{y@%dx61j2Jcy=MW~-1%Nnkr@iJI$UWt-l=CkKLoY91x&^qhOud$MH^s-O^j z)XAHC4nME^AA)@D`x{p-Z31;weVEl)5GTdKgLZaS2A)P_=b>-|A+jzn`AHybue=c( zi3?QonrCp>P9f;P=lZ(?a9n_ecPFV^eYVkujzD<&pXr}ZMZgziMI)9IlbIX|YNl(` za+9yGv`rGB313FjD%o@?v{gOHFH!A4;uRO#sZ*T;*&-dI-rTPyViz1uPU|_y+)Wa- zFEe4E7~rRWT1z!6K$?!wC{4**AS#ltmcvoL?Xl#7lcO?LV)*6|p4x18+GXZ`xYWA@ zoRUK1jDn+D&T{B=4+%5fMe^@#`akN6A-Oly#z3EQBY{*HPA&sX5-G^<2J8-VEib2< zdNXsOvV1<5g%l z^qYLj@j%*(c+<-;^)d(u)(g!?FC63I_tk=$Yu7Ec+M0R-fF~A8l2vP0*raj0DIlkB z@!a!#{kSoHZd<>uyYQ;hwrel(+`xmysWGI3p7-@*T)CgXuEuXhkW-in9;m(wJm@80Xy~cFF)|I zt$E+x_x6^Pavk(7!Ao6jpJr;vgIRND4nAPAo%!)Eyr%eG z&xb$xu}|9DW{kEM8lV_h)!_X@UUWm0N3}5}3_hyO30j+oeeQ#Y*Xb*g_nlrL5)kdhE`X4~K+5oj5z7d{B#yxVf(&pwS_jNRNSfK|y zNjf&`>%`D~MdenGGn8zh9u{=Z5pD&ud*jUdlf5N9st>(?>DtZMWDy%N4!a6{UG0}| zN_}9{~-qx*M;$W`4nk#E-ZJ+&*b}&#%_wF-Ze_;F3 zyZ`JUTTI3@ia-?LD;^~ebo3ovF?;a3-&k)~uZ@^G)uzon*^)V?Y(;Bq)r$F+08mJx z1T?m7v^J)431E#e;$4c@?k6t1#$VDycl-fQSOF7KL!^J85O4t0i!u}NO$(5a7ju7a zhfSZh8wy0Jt>1(fJIaN7=<1mPJ;DR!E8W`-%9?OcPP79_0hAhT$GcjT5t+WU16zpR zvYXdg*@$s2SE)Kc(t%#07pH&+lqDHU2*KY~35Jv#J5pc-o;zSHaME+&d$)%`8S;4C zItL@5bwS?S0|0l9;{wMzC#7!fx^V`rMZZIwK1U{jFn(#^=b4nr^Q{O39^WlcHMwA)Ny&K`ei0mfj`rtdNqTJ%2KW$FZ!%AB2a z{5ikRrW^@tFg;2`32_?2B*DrkPcT^y$->(-cJpmFSc2za!r&c= z;shyBAM${YDsNFZ{(4!!G6Efo0Pf*cHJlazEidkvopjoJw>hr=edFVIUT?b}bfmX8 zDt{z{G-Rx^wGm$Su`C|ij_3Vwmxl%ca?B>^?rdkUCd;7Ok^eim9sYOA!aFP*XK(`E zd#Ty5n-fHY~oI=Cxpr&Fz5o^g*<5 zu^0n(aQ~ZVjyDVP23ll5Ut~1M~r=1xewRZ9OKX!Vo{U%l0*uD3)VN>8B zuNi5x@4SF_Ec&N`MQaf(@1(iMWfQrgj$TTqtNKL-7|t`CC3Do+8xq$fOVbuiqk zvgYLtJ=X`TRsQJHwZ4c3tff~)>5oc=O?@$48lVnl4*ny|V46R_l?(^3=r5(BkzP@E zDIZF=bY-GoQ~Ch36gIzgN(0G0Lr^!M@AX54rx|pE=~7w-w;-SYh+pS*)x|kjhtMl_ zJK&(fN5Xw5flvZlk-(B?pZ4~_t03)z&t#Sz#0ed%jtbcJwTo6?=k`HR>8R{pr)e*+ zHC5MD7xkQ)8Z`-no3{gs!0#An7YI`~sy*OtTlCaYyX&uWupMJ|*fiF4s%F++s>15( zN3igAnWeBBtK%svPgYv2GG%EwfsnouRP`JtM1i%R#M@tpL8n-XO~<)lwh3{RRM`<9 z`ORQ*!hI-#Py(R@UQz-fu>F!^2~X}Q5?J!=e3F8q4nQTZ4t2HyRHUQ}gp*g5I%`*= zUHumDO(zcRf%@d8O>1oJ8aM)_xdWc=3~j*Ji90(Os4(`JIrDYT>ioNZ=fJ!YAH!-8 zU!jzLd}1sEu<*BA@3Tzz8aSROT5|ZFHh%nYOQLVAUa`r#tM>rVjM{1zjoiF(G1I$R ztqg;srl!umf9*A%_6KhV*aoQEV^j}$1hlA~ia|BZ6&=CF0`ZFSymg5LiiwnzWlI;h z0fSWiPxgoFEpBRny~Y3wk)skHLn$eRuA{WH5}xO7yxJ;|F{AQONl2mTUBA*QMol1# zfzk4!g2Yub68VM0S$PqlFOMb|((z92v#H}pOWLj=7J094J?Nbr7ww+8bFHHl zfEYk&UK6sy8@`N%9*^B~q~}HXcPX)#+$4~JQ!WAlhl((Rr>GWQEMSYHI8|Up>zzPo3m($OzRy2L(ZqAx9a%1)JBHwBz?=u!5_KrzAPT zVpUVz&=vnQsh11;zGvSzLl*xjjm<0OkVo=G8zzr!&7={h>W1ci8(9;zWw|OWBkOTI|THs+aR= zhXfwZJ7T^X7yg-dM7^WFuUy|udex3-%>eS(FYt<;x5FH;sjTjKWZz-U{d+fCzfL+`+G;FdsvpqD6;JmOIAK= zyNm%uqiufXk-uAADo5GXAaTy>_deQ&jhSo@-27u?Mo&unm4H)S>l+C8Q$M8o?QOLV z{HWh%oPd|?tK33h`&A0;4Z8h_zud%IrXOPq`syGdDhtvPobFEGGHsxj4hlzUd4avS|ZlgwzP-Gs_;4BLdTV+mt z3N5bmUkxNjXq&Y#IlT`K6j_Vc@9A->zg6EO{SXfUo>5)bKzoo61?A^Fa?*f;p}t&c z9UO30x383;wfUbyP+zWiwRyCDr1Qwb>xb^=x^C1t!h138{*ZLj;33H_$Ymr1wg==G z?m`I+wFLfl;rj;ax9Yh<4+gWm{gO>Y9aS?xg+S8R8aU|ij)?3o;K&4XMLaV_w#(m z=kxw7%~9el(*WM`Bui%Dp^FjNNH)g_zq$B=JvZc=C>CxZ1wsn!ehP%Z_U?ZNVGaI& zD6smOXPqY&C@q)CEQLg1G1dYo@KQ{*;^(?X8V*@PhkzFe5DNX!v~{CxC&sp0(7-?+ zdGtFRn9#Zn3}p)r+x3tvRx7B9Po@ve7 zw!;7@#u)V%Gr>KwME&Kh4_Q}NKQtJz{SP_Crcd9;&V2hpPPzMU`Hf#Ye-a@&WECD8 zzXP1i0FcB++_6ab6#X9+F3h&p*HbpCa+n#N5C|#xBLI&jtYlgN;RQrXF&R-nRmDSA zEJlITvSOB?qkH=rHd=Ajl%On*O0}n@-4L=HmJe$`O|o&($J2a3H7k(9J>&5C6d1T29C=>?zuyqES;A3y)^&k$NfAx0G6 zS%Rp-)KK3HS%eSsjtbp!KnjR^9(c@7JnCSWEkJ(mNMrdcrAS_08FlElnleBlkYmLG z&%zo(^~ogZ+-$6+^2MWaBAq!k?&BiLf^ZUY7)o9VgW~(Yyc%W`34McQ!!xtV7S*Q=4MV&)r5JR4m*PYwzf?o<8?=a<82L zeA4Tpe`*Hcy=hL~cPz&C6SrHEHCOYghy3DQH*RhqWEHXg3pGdfTNj~dW>24H^N!ea zY4F0A(@WSaO_a*J(VvKODp1aB+_2H|@kA*sEwZ-FD|q$2w)niy*!Cx`4VFkvuFytf zcuc{n^;z^1>0J58@2N+AJK^>VuS{^*lqMI>+gumL6!af=NgE+N3*!94cCexjq=}eyzGS zKlrB~;tzs$1pt@!$9NN6<(JY1-Kw_mt%Xi!G!7_3YeGYC~x|UuHLE*7s>;cy5H?Fnz z)=hwlSf)|VF(;irN+A|)3OeY>T_1|%s_*?fNi_1jZi+FcSikJJ)p9l<@mUvs(b`%X zt*5<~a3fs+?)k*5CJ7tdzCilq02*!p>(2PFJ^0i%TYSa`tuUHnXP+5>@@pSmLRq+k zk1TU$BNNSbcF8_4AP!Y-uhN&$1s6aq-Nl+p>TL2CO zzk`!}!r(~Y(tm#FzyH*y{sjVKaJ3gJ`p8NakXC@=5Op7X{>R+MZ$Af(X{$n>4A7n~ z;!qC}wnTzr47#g)9!XtPrf32k_|QLX+8-d^`2DK}tfah}=U}Otj9XV?1UMXb{rU4? z!F0304K9`?YX;05mPab%kRy(=@^Phh=iN`+@BjE$tF7N^U;Xe2Tm#d{Lx;1+bFT;$ zv`|Iq$zmu)bAe4oC8b3Fn8YA_-nMbQ0~czj+Aua3Nnzy4PEnGG21>>ixTC9`2o$ul zX69^>>J7EEH`e=@Nns6`=<5N@R%oJJ+tN<|D41XsB~>U*u((meSLOv-`^At_DypzL zd;@J8pZGm}ly>Olp_vl^tX?b7MHcD`QKu__*c_NJIuQ@lqhEY-P{TSDu`s)z5iW6%49wJg1n zxeak!AzVr(T3_>~U=Ghlp_%UXIdWqw_hE5XmXoxZM}7E(k9$SGLty(Ak!iPI^vEA> z^!0hnnpxg(0)fQWI9MdcN9#CaNb8^aR~|9jBe>ved3-bzjyjQx=+e@f$g4yf|`g&aMo;WF{U(c!|1mglBmv%1u6bD18V-z#?;PlLE*HB z=joZ+vN)mj4QDquLQyPx=Rd_zsIL5T-iO^CvAdy}vn5`F=GYONyD$W{ogCpuNP*Xw z0>At3lYEX6EAGHH<6GXU;%j91qPb0TmG=4R;}vOzAL7%4eI}1!K0wyUZQo)m9B$wr}e#Usd z<~7CP=J75!D4}&W_x6+5<={)c^%_$nEH0!#NP#yS1wvr^&Gts$WbIk{^b=UZ!VI&} z1}rrTAxyFuYb9w@jLnSW}-zt_GGz?1Cz3^ ze~XShY?o8)<}1Gj*h9iVEN**Aq&7&*;uxI#h}`%Ays9Tov{-(HWw3Zxjo;T=TDMrD zX|=6b{k$D{@La6OasbI_a75OiW=(0pqwFbm&RGHT-@4*q+6TZpu*n4)&?d_+KhT+w z0MLe6L4uK>A`lu`1daiLL}34@prSaq0=5+gc?h}%^u>w@y4Ncb zWbtH&l@(2PTFT0(eZy)issw-)=|x|drLnU0EwN^7bC6m)2u2dYf=zXkE3RC;114(p zgf82$?I@FDx>J7w<&`XS2&<>TS|t|=+B(XGu}66bPkP^Kx;D|mkLg*I6+A+p61*rA z0!T2G(6*#D&+50e*vuIdt(+Lq`2f7IfV?|FUkXIR6HzP-2^hhGQ8Ca*uv(qE?|kB; zM{V`0EnG!gc?VO+=Qy*bzS__{;0;jU_PX&ssiwvzO_^+kWmUGdx!Dt}Oqft(9c?Wn zhp^sa8ciiinMa5^$nql+j&9Wjwrkr%bF;2Unt%23FXeohK z;J3!jO_1k4(; z1*aY8ac46S>dUH*5% z)qEt@-D=t67isu=9n_Dt>060X#h9}feDhoEc?R{=`^c}X*2$gPwr|?#3_H#8#F;A3 z6)g~wV4O(dT$}oyk*60ybf7lAxN@LBw0%uwl(u!6jmcSrGKErO@!NE)L5{OLYMwy-=7_`pROI%G$je7+vH$8YhrR-f0^&LE+dz-?lBvi1r1 z(|gJP*O@ZiI-9n7^VP5+`g#WMPu8*h@gB>`FLt_IdF!>--_d8$oPWf2igTA6MpnYvxLv=`;sCyGKb_8sT&*LugEA(0| zYeo?s4<)5#_Ls%;LSvV5ZHb* zz0EgaTb_CxU|USNfC4eT2-AH{n=EU~Dw5AlAo*gKC)KMen?OFZOl!+8v+fL8p^?~+ zIJRq6J_%sk=fKwZ%IUV>A;$zvH;c5G@*@`SdItKYpM8bc%mps!NSGMFLaej18*4Y> zhhdFfK|3l)q*qZr!%C{ATI;4KE#AHjWji0E8_@+4G8K~UH z1TV-vdLlTcinwJmZB%sxv@4WFAn0%iX~+d53175F_13<-@%oO=QwHx@Cq-9+BKoeFDC=VK%+lftAM3!;I{f2H>!79;ZFW?Ff07OJ! z=Kg!{f7l*+^hv<%A^_B+_pLgt!`qq~Z9>I3+uqh~#d%SiIi<#m^0O_6=PlZIrd3ts zS=NubhO$=0ItY{cBg;XcNYjkc2K_ zr@rsMye+%?qi_A~mR+$QZ9;*-`%Qzuj=gX1PYBX_zy43-LVbsSvQjA;KVhe2t+cqQ?JjE$sJ?IK@e!F{u)^Ra19estSD!9SN(N){_-V%^Zw+5;OG;36U4Me z!S(B#ZiN)X_O=s`vB@>nfHz0JE_M1>9`e+^*Dkw2N$oxLv)n`+q!;;vYGTa z!iTT7!8lP7!7p9K<~*IQqCM=&cp%{C*mDLksGIo0eHZQdbECH@{0J%V8d6~C4IlRL ztZ}N4h#Dy3spRdaIMm&p$Y^+`YFri-k(d#WT7@}sIY^^TL&-;1FkdMDl;$fLw~$U9 z1zVP_TDQe(EOn9$O_V1{0Cz+@8)MxqwV8+9!%+K-eHmr=yuh6;ZT)uVgB!{9)MFn# zrO2xHA|zR_cmO!z&@B?B#`IB$j?BkCBylb__P3y3riUSCfak+dvR#w%P=k8Ax z9v3y9NLUx3pt~CbZWk0(vdFVwr~6nm6^dcV-u|Cg>~d=T`iGxG-Vu12?F+ORioU+M z$2;xqrH&-CnzvxFMT;wJ?>P&tukKNV<-i&}nZN=VE9vZ5uEjfAtO#v)L2-dw%N_QP zPk7z;+xJj_#7oFek)Vt~`X_SPi*kft9-iAOs0*66Up^OOiwvbf}Mn3=ii4 z2$$&ZaY17k`t-uA#84P0d`T97?vTKE3N0!bhk!-=Z2(R^yJO2{%OhMzu<;xRx%A#( z&c=?)%#;5Tvcx0_$kscM1tU|BS=4g{+)F@J>^BLL zMF7qc9936cFQ(5y64enpxbVn5Vj$OV-fqvY-{5sDBpxkQ4%+Q3uk*j2;LL#Tz!gn zr17Blq^_uAbRmBys(9Iizbj_6|4O#akNw`VQ_a>7xIc^siGXw-!JpWu%! z58a8cFRup(rpGUM_cve3Blf6k+Uo1Dw8{55)%)$JVkyIguc(g)S6D&YER9JuoZr!6 zlCHi5wv|sUv8R_VBXkoAEXJxprvik}Czr19J|S!ELOecXCH;l3{`VeFkw^b@t*|#h2$1!f(#_WVuDFw zk)I@oK?mWuBJu5Z%=Zn^D$z+9Vu$gx(p zX%h-w83kwy#z%KA3TgO*+&n4uG8~jv_>JptyvU&0#49Ly%=`Vh)-ZEgs_^VNU(3d-U57BAqa3&a#IIL9Ww9L;$fx)>f$aPfU*yIFnrO7cv3(aE@%IA4Um*qFA{1D9!$+LA z@&TtoH#Xk9{qV1hWhquAw`n{E;X(j_xz5KJV^MO+*f?{EO5;w0d~|HBURUSWj%*pDu2 zYHADODzh-=ql07hhEO zjSv6K85{{VIn~5T2G|v_CrdLiLSlEsI7pn}@c`R-4#bKfa&T76kFMb+F;6<~f;{z> z)s71wFiAsLCrdC1tO{VVpkwpXH@M50yRvakK)&Q2+4wofGR9l6qvVPXe)+Sgj|3c^ zB|*o9CyskfDAxrqD(h}?is?OR^ZSfKizGm`=*Rp)F08>0dq7jjF`Ft_@H>2#Vwe=&(kgvh?$ zUN5^KKObNm%PxPWo~>t5zTd8W-nP>q%W|fk1i+kQvAkl0Sdvl7+82gXN(5PLDCvq~ zZEs!rkZoG}C{_<7d?e0sb@db*#~2ukWGfQ#1FSQ)3Cfhe`u~SYy+uF7_SY8Rptjfsbmu)k$o`o#g{Xf*prctk0TLghtbfE(G`%33s{;N zTBC*qeqC@-_pTzQzi(V48AJ*vd7x1{nj!FKalRXX~hYfl{aKZ%`LS}^?f#dGBP*emTIO>vyv&~DDt@r`LRE3 zYge~o`HgW7bJqy6Zq#`Q@###TF~XoA-{s;loQG_dFU6qv8^WfY^yNLnfF>Yd_!&|l zq`;m?fe_f<6W>Z$ryUene%ED=S*8G|`vK^bkU~Pj>c6eB#&ypE&H)zZM{MSiZ?nB; z&9laa6&BB#W$9RnZNr+iVe=}BU=R%tOuva;4jsV>L};4X?{aSgK(Sd>ce;g73j zb?ksK_4K+pLjrFxdao+%r6HK8En>GipZXa9D;B>1^SV&w>MDUAvP_a?uY@U8Be+Mx zm3S#x8U+$i1e(ZFNzYCL8py&(&zHL#2e}vqOqHNLD%*uPk}?TUa92C7jVC)f&qe$f zuja^$Zt+864>PcMC*B+30Wx#)t)ynIb=5z}Q7IX8R+p-70y_mmvqw1;9c=oo_UpnO zbkPpG{AIYsl76Xvqu+o&s<$ZOAkgT2e!a?~{wh;RR3uF1PCbQp9wed6B9nLlaf_q4 z#jAv#j%FL6AJsj5pEa&0)^$wrZh5br2`q%uHeu#;n>KwGF7*S}*tXr8aHDT&BPVkz>ja=dLy)8|Zg1aW6UWUNWi(MLKm&!s zA$mP880Ish4H;g{ZD4rMe zeO_E~d5Kw~j-+qTdN`HL()sqtwYS@$Cm!qXC0;g@wneOF^3>O%hT$XL%oO<6S1&~n z%eQgmg|_&p!Y`)hlgV%hm>;$+bFYe%5+51_lnIyi5UN3gFf` z&4=?2I_Q-az2mwN*xo^kJ^III7ggCY=e*a(&6s0bHf*x3t5;hU)`wHd$^d0z7)!8* zV}%i8Jr(bgS6%_1fl?JE@<^n>w)LlMTYJC#$Cti^GKBC|4AHdsEQV|Zeq+CRd)p^I zMsje%j7dqN(Jb$@fP3H*0DDB?;_$p8)}Cb%1}h4qqr4~@-vWt-6cP)MM6JQqCw$21 z=}C=8=LXFY>Z=}(M7)#wv+ngdL$`?^(QTu13G<3iD8kw_wfH$cM!(fgDTe*HALBQ5 zQjQL7G6)EGz@V)H@Rjq$z1z56b)z!u%V;664&8t8aei<35mMl-M1d!N&3KherE$bq zV-zv2G!A`W`S=?hfWdhmkba@A3~u6E9?mL+Uz!H=$!(H7&1#C7j{vZTVd&*qI(Ds| zsdY_8G?ILDk7TS&=7@9_zZs^#{>@|7xVg~^P@LVrp~vEcEG#69Vkh29-I;~RVhNj2 zm|-6|AQ#WhQrq{WqmXem-X&E8bzq>o3^B<3(9$$y&pumcU4-V$Vl7o$hrRA;%f}e4 zs4!;b<)wJJ;iU|?e!``@-Vp7r)ZfCkh7@=+QUHEKQ(d@)6xcH<@R#ep3}-)L3EVnc zTDxrOWWpc-atZ`(Yl~aXa2E@4+Nx@%VvSvjaL=L%qnyUN^yUZataaN)U(Bl~&9waw zJrzJNaHS3wZ{nT4T^2FBb`!9j9>N-j@DE!Wn+Wfbu+lQDeE_xv*p2{P!(PCBWLkGy zJ@qAIL4GB`H%T_oQoDO8jD{E$#Wq$54<)=i`=YPCsN(DH`zF^$96ZlJ^{p7!vKn-d z8Ky<yBwH+acr)m39V`+XW;O9|FLWnaZ+)tXy};6-ber zlV?LkCDuE*HUO6eo{w#`Eh9s*GgT)YhvuADx!MBPKhcD9v=NecKhu*X=uzNUHMvHB zi0d-M;?S zORST76z-DVT#9Om(&j4?_GLjRHN z93kG7Ffh!#WGPhkJ?`B+<@MV-p8K`aE+dO^LmT~7V1b^KU&t%zwA!s$yi?a2m~kbS zRZQIiV*d3fx^2p-A9?*M8b0YwNr9%7fAldhh*wcMQ!)7i0TL~X^>((9e74^jni6)` z11s$7|NFoHG7Z)}aHBs@bG1BK+>(;t0-B_p)10IKnxD`~+0;YM_?MsgRx4opmMuO% zGxIU;F|Q${YfW_!0Ags}8c1M$rgQA?zi8)^f`-8M8>3g<@Ws(aG$ zy>()F7%}puw6)uThaE)Nun}wR>UJI`-qS_Ck|fI2Vw*T+8s2yq8WfgVT;q{-6NbiG z+d4gt{ZyMgXu^-v7i) zj*z<~3!wOb_kPU}jwMMI&*4_yV>}df9gDY3LENeS&AagP+8^EYgE~9k!#op&f@5Eb zGmg8m-*4nrt_}DZg^JU8@qb)2x&=Q2C6VSS7ePm#C;mteoqzb^vCG5nkOFTR3Ox3! z3w->%aExf&v9j-=!H!>k5o6THPA~>X2QJs@c)$<%_av(;-FNUjTij!c?F0jT8mXpAibz&j7c zU^yX2&-l(=J6aO{2q_R!Af&)cQ6L1iUy3T>u|0+YfB40h(D<^zvA}|<2?u5|WI+^| z2>2JnWvZqAIjgAI%L=fBivU1+@-`O7RC37b8rtlk<#jfB_I@^h!BLpDDy)MnO0aPQ zOnv*?Pu}Hied_L?Ab?0HNCPOFZK>Pp0)cP`2V7b3vswHH2I4l*+s0Z>azGaUlB#Lk z6JwF@fHw#P5Lv$D7fpa+7`F2+{`!mRe%kSe<3=0nGVs~}wP zKmnQpvj+%k5-mM6u+~NupGNa8%DUhq0bPFrRoTRT=nVwI%-AGM&puH_Cu%5-+D($>mO#adM5z z2C#IV%&myzT1#16C9pwPcazYIhWhUqAa-HKH~^(CKx|o)(;f;O#)4n&!ise~LVc3a z7JmU5B6 zw0TW+NCG7mU9tR$md-EdJ>^nA0oy#3dShQr(v?z#SK5pzJj$Xj_ zKj|Em_Nupy%butCdT)9Mql__C(R-Jrz6AXt=pnkOU;pO1ACk~5Vuv2Qx1D<2e26dm zbi%4Yhwk<^t6f`fMZ|k9EHCm{f_cQOZ))kWveJCp=co_(wc$rdfmcg`&TWLjV%^C= zIXN;y%yq`Pj~V)IUth{Y_++B6?5=y-_CN0YSNoV9_kHT(kNF(7*OW3_eBd1VJBlmD zK|G_H@m^=?H5)W+ZU2P_+F)$Z=AZETdQj}B=uZ6Dw0a%mT{EI4T-JH_q(O11wM01r z7}K!`=Ld&v+C194i?_P<>w>u+I{Mt`wF*A8FMvCVCs}fUB-Jbz2OoLlPS@z|!^hg4 znGfvrec=tSF9rT|&G&6$=1QyGMr_#9Dyv(u#)^8=R#;YHiFk|EZ|ks@0X&~b{CvVY z|IKnzEEH!jlCtXmhb@blz4zQ}9d5jd9$^zXLCYQ2~6f!XtSyIqp@$I9;nv ze2n-$%~P_X7N4SABRcNhbFA~j0QZCcohL+&O>p^t<5z^xW{=a z74D-*2ZQJPA#c&xg&8*g(8D<&ZXpHUS`>KXmmgqsNM2$rAU6a)wi+j+X8e>{XM^F0&)5aJ1b$*b08R>p)FlM<|@|ui`n0{ndBrzmb zbEGAPb8N#izq8&Ak1@X`?2%^^omTh=sRi>Kt4D{7T|*6J z5XrWp*)StRBer%^9r$Jn1_Qtuz%~znJ`f@RF0sDJB6LIs;G{490Kf5`M%oX<0hp(d z6@|qU?LC+L&x`7Q)WU;thb2Zcp&_!c^cI>Pi9~UI$3+$pAdaB3|6Y^r*kcZ~$rAxS znj0)Xe?0t6%40D{31OnJ7@Okj+X1l078|=kKbs|NVGt$RST%T>d$*n0@2in zO{@soKE0~M{`Za{n^iO3Dz~k)2@|JUAx!K5M4o@ad;FeP`>}4>8lKJ=p`%GKcAE38 za6AfIBS0xd`{c$v#QVJB>K`Gr!yFR_H77c3haY;7ty#ClW=^ZN8B@nu8mp6{LVz(8 z8!=qOmEcec56vwz7oGQNs~O%GQot$D)!Kwt1unYr4(3)gJM>y9?UYP1JCAS$Oi1q0 z!I_G2Ay#7Uo!1n7;^H=l4 z@p&b-_l%gGx_2oSi%HAvDrRiMCtR+8wjShF$y#0RCAOO!xb zivGbQf8@##;H|{gg_NiK8wu99w$>iN@qYW%kFKYg@IKT-yqn4u7_LqG;-SP>Id8%m zs$)tlp6Ps6aJ>vl_{I1WFT^<1<@!xqfW6?Ubxynh4dbAaTxa63B}G&gd`d8u^@v~% z*Z38qC4Lv?PsQ*hUokjI@qzqXc+4>z4Y!a2Z!HS^?HBJGZLr4EKeTK*rSls$poj4g>@0*yJV#E<=L1GfJ`$R4_ygBiNU4MsS^zGPz$?ISzaS$ zbKha9Ecgq;cy6y-L1e$XZPxTzE{8DqDF-8y5t54ISwd=iMl$mp^NHsZ(Gt!aGYqe- z=SJYYGe$jHUDlOM!^215=f#Ihxkhn5KWLp_U)yHOmNvLN`XARlqMC$(8GL` zW4(L+IVS)l<^duS8iMx#;hh;%8?y+pkwLqM5&Y^`KW?>;FR}cI^DMu5sxAG^uk7Hr zPloA2$l{+r9AQeo1u`qliD;)!eTXqq(?v6gWo;W#b^vadR1^UC%i7?oS%W&?L0tyI(GiM25uJl}Gs&D)3AeTw*#0P>_ru)K4m7EYg1_ z0&HJa*QiwI`0a-h#ty z1u~YeZ=VWv(zw0$;Euh^LLsU5=IL5tLA>IYehf3b-Qw|uZq?h2ZuBeCP zKUuqhqaO!@q~cC~-Qnz!0|~>Xq?9L>m3Wx^Fk9{>^Ul|+iEna&r;NuVbg#Y>sQVCE z2B{Yqtm726;I!GS0Mr`lfZ8slaf(~qBz09L2zl-g^*MkR%D)=kvORsJ!K$lS5j+ zUiYCYWGp6@x#5w?|@z%_k2%NNM=Yk2BJ6 zQ5jUB^-`bpk<6e!y>Vs!f8h82`#S)C|d{g_H^iqfjC1k1pZqWcjR3YLL4dAQT`DW$@%+4m9d(2sc(k8 z?K!)_b-tl%dB4K@4pq?48}I{4CyYGbBr!}Bu@ytCazp7*4n_x388ZZU0$&6H2}mE} z*XgIp{F}P8X0a1K_Cl@x1AbW90YnF+>g{{`XVA! zGIndXb?+RTrGE%d@{E^*6`U=6skgz61<~$i&+{fQ5F#Yn{$P&M`R)UBNkUw4_fd1o2&4-iUgTrP&+LG^z35e%425qLn4;Vy*(Od|d=M`GKwCxy- z;di!U}QV-vmW`n-6>njbX?J|LayZS zw^wxdJdL#IU)bN0pDXBCEWyOyjhhY6;Th{0$B(|h1{)iJ`H0^W$Vwc_r6U~`boo}l z$V4*X8hFjD3S8xxUxf2cFnsjtFk2~lkc^DVCRWV__v{^B^WazAtbNIGy31Y)%Boa_ zv%dT~;yQ+0#H6?PDoVbAUYp50Cbgtjqks`uB)t$7&;pwsX1h{0O{&SY3uRtMs}To4 z7H=r8(%N)VDmH^KTf9et^n29SBav>`S`F1YO#W$;c15kKgp)dalpEWhk3b&4E?cv< zevcXnHyc;s&f{lpgd`kw+S>u-05 zppSnjr+!vx$hzw{wowK9SVurxed)e{!g-QPnAgIGRuwrA)c;M5dXSes+23??&!mtBhPrvfu251326f{WZchy)yGNk#=gE59Y_X8$wgk;H}J`g#7wc z+zKDmzDGJ(e}>ICt{2&Xe`Sl34uv25X;Xjq-)4|Gf zSol=i%Oj&Druv$D?WF5&OlMR*ne;V>DWgd(%eR}FlZDg)vz*GIwnWE~?IR$A|Lsks{jk2|KL%JP1zXu5 z%OwL6$ryt`&-M*=ev$ufJ{Fs*0(^mRMGk>{MNn3D{{PG~-E*0~|0FpVxyswAT>H{S)w1(^+xE`!2JG#PkrZd#IJ>*9ViZ?^6 z7kcKtUisK=#*EX-cV{4yrYUa&c7tCeGQs3+cb<>Uj`_|y+zV^#eqfLOxeGUg?+BC` z;y3-SYuPN{&jDrY(+2BQHsO#+Ma8GAUqSZ6@0@m<+l9O%j#;?nw0%{@Pf-oLc$HWPo@52IW4xxsFU=+Mm}4r#P2f z+hDc8%W;13JC7*?&`tERB;T}-K?Enb$&U53o!kY085LPBUV}n1rJ<#d4LLSf5i&!f zVN93SgE=_K91oiK&9;n_`8tzYpa^a}PZ0U7uc;4c`nGAI=TJs}fQ*Z=tO1wlfT>R; zVSLNs=9t+s1YEb)7=3B)yHEd+FsID{kC_v1yDZ)9G4U}QxvXj{UmZNX+HWT8H@4JF zp=|n=PGs=UT*{)4GQb9}kkk5ZnVxnQW&YH<$2}Ei`(iaMEFv%NDOujql2NTHecDUm zvp|kGqA0#p_9|D#vz5h%O>$2-K2T%3 znMfoH{eS@(Zku#^rw3pv9j7PLhn+PIpgrj(|@fS2NG1**Wq3kJQH_J5S9Dgwey z|J_!fl1j09x)1S4HtM7}id>KE`$$ve%o=cOyrGBlm;IcSxTy)*>$?3I#;L)i`(l8X z=V}4!cho;FlI*hD+1TTfBo9h3R4;u$OX}jO;&*9w$VFr*@Z(!>YxcO1{yZf{(Lw8S zO0Xh>vKw~(!S4Y}?cZsyZVW>5Y_(4{=bqHdeLcXVUwGlDi?8BDhtCn*=mV+u$Kn$d z*-Hs?$v->@=QK^;0u@^f>a(^r{=Ypo{XZ)A&lx`Uf*h;)^kk|o*YF&sPw;D+g+qOB zxY*hW=RAmXfi0uax=t&&AA)(b(QJD3vHjEu9zOEt zN=)Y!ta0jD?PZq^7+HiYDFE6?9J2NV$D9>a<_hvoqBJLozp0Ug8B+BJ)I;|h0V_AD zfo3!Pcr=Cx0offZ^I?E$^4j4#DR?yUr2X1Xu+SOi5CDGA zxDubEC#J?83L~9{|4AT-R8tsCPkW(WD2|^_c~tv4u-n_V521!31Vd$8)Zk}r7H z=~zf69Q_N2=E{v zbf^D5MN6;hQeZcu{10%o-ptz6_)W{oDso;f({k2)(jGc~g@x-(Dg~ntX*HzqVoUem z=1a9ia_>(xvTD>*o2G?aH{YAFP5gq{GO+fq$7JSaRH3p-k{7%bJJ4d2mO0F2by3Fm z!F_e%j}^}aq)5u)$HA4e8~qJYM`np|#zV?!Qakt#$~d+aVHG()SgrY4IJ)1a`mKB4 z+mZ4br)qFJT4h4qIt=Nn_Zlx%+{96`uLBR_P7y$pRU0BSNrp?^y{#)#^uK1t^{CC%~E^bsdt?* zpDG}+3htnb@ZMb8EJ7CJ3hq>vsOE&EXCxg1qqhEKY zbw4`{I!NDoIrTb6ebhaiCLdi&oE8Y!8XE0&dA^xKAel_z@`mUj?TSWV2~A$GNdM0s7?Y#(+> z`|r9#&GkX~Ig9QKFDS)I`~S>B2C+rmq@91Ky|sz^DZ7EWHmHu>!x^E9}qlq}a zE}1(VBojp?W34#CJ}d(sU&eHI~akFMr1z68o;D`dGfdSf@83c`5?UV}t!VOogHjo(T7;1V|L_RqXG)_+f zrWVQbtRu1JVr?v10@PO0z_{o8+h|ttL|NNi=V@1@xCfGO{%oO!8f8CV(a1Z7goqqx zOXM4*mu8C($508wc$ARXz&B=yCubTcGBNFQqM)>kN%dNZ$0fv9xj}5ucaycDHb-`SEJKKmbwF*d)I%;w_ckKADOu4$!AJuiEBsU zZHDya7NPbaLL3n>ZcdB_a}B$u9G%dWyIiA;~u zEkyz`h={8|@GAt6^i#`wWf+h0Z?3nqbm%c{t0XbeLFl>G=BUQj7{Hne$ z?1WZe2q;~){Q9X3DSVl$B>3S9R*pBgZk4Y{GpOWsUEom+BRkO?5CP39Zw!5{rZ6>* z9)Jv>P5&Zgr;jnnX0c1FP@CsiPQl8s-Wa`fV~P27a%3P7nA=p`j6`s+IX2oBJnNCs z>R|r9AFW8CX|64A3jOu?re94OYU>mD%=XCT z3U`;^TpShZVEUN>k;S0d>b!tBr#dv!(xC-$+nPPimRU>3=lte5hYLS<8Ai__Nxu(u zz6)+r*7q`Y2dk8?W2mdLt;1-ipO>72{)mQT)w;J9{A#NyJZ*RxL}`29s7QfM<4*_% zQ6fhxxoI__`ELq%^oTGp_@is+!&Yxz2F0E`zx5JsEMaMahwqugq;s8w3T}G0bsW}< z!YzxHZ)in!pk$DAO;psAdOFe@hToi>#lF(MY1y_%6y!9TlXeDsV-01fKcb89r?Z4qpJ$$F%N z`0Sq65TrAkxFDl(BmL7?IGJXl`-=0ivOaG$TcjxRya`LT;g9W#eyaWNxo%_QyPizT zf+T4IZfcU8|`|djDN*&|V6waM*D%LY9CI}+2bm|W+A7F+` zW8#3tnYW$D29IJ6pSH}t+kO7Mlvzy`*}{~8##?El_^sd2d`=vvzxM!scb&Gq8G9VC z%B?1Z2jq%SC3f!#dO6zsA!64a7#7&zeHd-D?Y(D`!I*yKEMY%=2ihSW4-O6zb0r-z zO#2E8lo0=R!}_BeHgr$! zj-4+JS;5!WygoD`$@4*&w&y7ZV7nn*_rC(Bsr;zm+tTx(zXwM5qHB_DXi0EBq7!hs z5R^_ku$|je?q{f74!Lf4r@O$le*VF4qap6*71+^~__VpAR5iW$v>Gp`#NTHFJelOA zT?PJd%Y^MmxsoQDGX9gOW{vQ~i@tHdo&>b0u}3Ll2=VZ$8*mgUI2?>7W7g~7Oj+`- z2Wp;71S$0(kjPTgLqb6f=Lv1|v71ICX0hqIDSD%3@+deg?=|7hf>Le?1GRc z$no_PfHn~6qox|&5n9+LL0Gq-qfzq4Fj%P=Y|%1nr|7L&?dZTK=l(+Ciq9cOD2$V%FRj_4|A> zV7Hl5B;rH=!-;ozu{ON0(9iPrO+TQ>FcRR3R8?mgh12<7tPvLQ(T+Qgv&t4=<8#fFs<3iMj#nECTVgg$V9a zOjZ-F6?+#KK>Vz3sVGRXweqw*+kOrobbDD?Wk6Dkac|f07@@?2QfUQldDbyT4jmeP#Z_>_Af7 zQMXRe^lf=h8uPp*xy;&$V8=uUaF_-&&%Z{5J;03PM{hm|BdcOcyl%W;b% z63}$gBD;jzBcOVXN5HLKkL{64$`tZ0VFm?WY73*T_aDYZ7NWim}fFKJ7k*P?A-$BAfj}RuueyKL$KVCz0^WY^UX^Dt1*hkXLK&Kud z-U>IcK~2u(u|$T&lk?mWt;CdoBP+UvIN-F=v?C@yS63*GL0D6hm5Ioe>n|jAz4;WG zTviF9p66-i>@Ug8h9R3GXMa!Jrl^0h{qXeu><0zYFWNJ*cz71c%6YIr?xHM|o4~Dc2o$;y^H@ znS$Tlw2*y88Z~@+wh7v3SRt;yjEJ0Up*|&SnrI9#Mq$kU=H3`eY5%H3ukzpuDVPhNlT* zmoSpCLy|f$F}QJPI-hwU*h5_hrW?6?1NZVT%V_cxqkg4NkOaNJO<<4guKzEtiltDOOO{d=yiqG6_#X2Q`5o^4>GH=Facq$9Mpglo#e`n-ns z(sMFfPuEV7ziIIJI+irtrD?fC(fadlbmcwj(L|Mj-!&{uJtL;%swB-H7*WG=iev+^ z-_Vj5U^UR3@?-v~ChP6{A@OBxq@WiG!f&6po0E+8&5<{dLzi90TN7X4G;g@=iy@1; zCFMfxfo*o`zMt0?$_QpbP_BY#6z|)f%T;3sIL`p@ij^+{yK`%9>dgAS_jGd$OVefZ zo4~?100&1N=e1wctQtUGx%E@OJTK5Sy0CUl=(%cPuBk7P31uJHxVV#wDt=QHCwlD& zi{*qnu$`#LXNt=Ig5)B-Y5_Y@o9?h^?%D$Ye3O?J6aMjSFAUC6l8#7i;?L0YUrxR| z&x2fHKn-0ljST;b&o>fct?{B(H!<7JzI0zDFSaf<()vS?b(Q;3Q`EeHdOU!oAPcLc z(p~PJa{6zODH^I#N9$Gd<2wVDukPhrPg584N5CTX?bR!{n);^5cj_BzB=phashs!3 zEc@jVs?N#!@O`%T8=H?=W-r}ST!FKX!tPN)eUzhdmvUMJ?t@oMujB>oYg!c&i)i%^ ztjjNmr#Wv=Uo!Y?zQ8YQE8QF}OBWHF?kC37u3ntiiau7=2IAmo-X55j7gY>YHeaJB zbkoq$o;cXZj*eM_AI|ASYexcS`dX8N9f_zprmV<+q%HVr*&S|ePp_8ge7*VUcj(td z2W@hBDZ8Ao&+u^>60Gq**0xG-KaU$rx?cp@GSnAG#sEK!B4i8f8HYHtsze7=>@u&h z^SQrh!h_L))WGsmKgKH?1Pk zKW}S0E>!BRvWXf5G+fdIb$!fjlaa_hmM+CRRoO@U-#3tb^L*g9VLULk3<4(*k{k9f|mF`Dxi@GvW5qtz0jnbZ^|LNTc z0QIam765a4$dg3@tLThWUaaFF3{jvz89f%}RVX!@`Z<}lZS50v7ko{0#WcQI0!s}|7wCoMer;EwL^ty9OI#fQHl`kuUR(pJv^KyB zo`EgO2r6Yza5zO2e4JwZ83(MapLIgS&%^~ib!g*^w7)`$iYr|8@3y)^+dr~4S^Y8< z0eDy#AMy^%CLNWo$cFe$1rOYph%ff1MxA57NLA>pE7AYr@dfMpKthRP!k4Vv5$XOl zj|5s@jMX@G&X{PtT~RQMUDP|CUFSjD#dV?phkNADnj???blNSJ30XdYfchsX)#0U zg7#EyB0_-RKMqnv%?zz1HTm7~; zl<`w`9AyL16rRndPWCX@gd(!_L=69(XuTh()y{)CS{eGjosWf5J31Y_|1D8Z^OfP< z`!QN@=%szl$9}-h1GQHB5Ye8Hrr>}5|cL@RQZiZkvRPg5GY=^^AiCwM$!x;w8 zS9pnXTEPX)4p~hihsnyZ!&LYVUtK~f-CGf1_c0pdkmG!S@4Lc4Drl$V;YD-&?07L4UrCMDK4Vz`?+jS*xMNNVC zkK6%DD%!pkcq=94IIAL3-=qot6ZfjV6Y5xyx#QA8g_f;(W_9;3PbIn???j3P-WvqOf%Ci)E=2QtAq>LVGWB%K;W@Nkhk{_v8BC%PUuBgX}3L zPUkJw(!=n6Pix5Hk3vPffA+J2^UBW#U*g?-s!QSc9QWP+k$76SnC`1D`Ge&k@XRYm zx=nKwGDYwi?CQSqO>G&hA#){&YPs$bE5^Z68G$ld;m849LJ1b%aZsfbAgA*8;&*0# zd3tIsZXs)$fzns65z24)2zG4tlE2%$n1I(wFV~+#LfrntqpMcctq2yPvAsXRpxKB6 zY!lQ2bdPIyIE(`%Ez35w27qn1p+=>{x$0QsN*{x=Blga+Kl%9Kl-5t~bzc|MiOCd! zvLJbdA@;L5mNmt;!KbY*E(f_v0^Xe)v?;tlC_8^7KL_*%C}nx{F9*qTTXE2ID4dgb zILT_YVYC{WqtdGck_U6*D-|%7$s_zZq4qU)MCL}4)f7Prb&p|+iCie7Wh_yaqALuB zB$066n_|NZF~)mQ^S<}fVe!9rD%F2SH0luyk~LkdrzNj%DyMN~*M<88(`4F~?FaxFvM3BpW)gb4%U zOaYxEBvoAah(Y7~)X=umSB}oD{FZmzO*b^H>)Am?Zai^Fwp1Wqh@txnunu&hzSQnf zS5vR*B}vU9rU@w>bc5o2vJQ9U8qL>r8!lgB<>V;kMTKa>7y}hIx+6vA4CTfZ*#ui9 zUZoxtmoNE_xO6|htcfrzZ}$_c8H*Ad`iBC z(1q}tr>b;!-bR-PSWPgQ$$o8pKeOaVmRNo+la}TjGWw&s8a`FjfX8MJ1gN_oo;!#H zzfngd+B&Ew5_71vpJv8s@JytE!ZRJ0Tj?ps=A}16AIaHh0{9z>$vE`^?7tdfrSc#-38MH+F z44r}cRYvgH8EjL1?%~ISL6iORAu;BAt>JZk?)qVjtam8-NZ)J3OM2EkTpMv%3g=r? zs!aunShK-{1=%c4t6Vzm_99!x7_1grshVN&O??kj|J6^U`EejN+uX&5{(TeYR$@^?r6qWX@R}9o>8MuD6%6oK)|1jf(0{t|!x}}@*D_9Yp#j{Ms;5l&7 z(}-%{E-qC<6~u*!1hDk}S2&e!L^=Lh=KCkxoh#z|nzeG}B)`ofoJJYRN95aayf!RX zTPv?u%#3@eG~yt~ZCH>*h0EsiMF#Gj*U84|@53bY09J)^N)%knchk?c;2WjvLhZN* zU^O)JriV*z@q8ds`cxmIps(>Thw^OP{nAR@G}+Z~|M_9oWwUNOTf6Pe|NcVvrbRZ; zWea(>yGcmA3DVjMX>?1ZJ@%K0BNZtLoPggNK|i9AP2S<)l_dk-R3?Ua2{IW}*GjCU z1PckMqaz4mc(}o_!*{T$YPb9Y|5(7v&V2)>HT~5+vz@>pY1_(jx3RNxz9)~9?-6FJ zDv7iZ>ayeDz^n;ving%{=TRXF73x>P)r*qj*l2`f+i8#F2811rwb5rpp#ZecOUqxa z(<~>~&yZugB==3S*D%HgXbf1KRTQi`oWZul#$PUp^LFQrOib`Q;6rWRK@k4ch?7-< z>r=i6p+?mHSA~+xPPy)fO|E9dO%L?y2`AI`zkKpHljp#35;<3pG36ijP@Z`1TASDA zk#SMzGgQrl=@5@S-qFZpHr$8jSL}&N7Z1_>UrSi9ji!Glc8lv2U=XYtQUkSjCHILd z5)-be5JL}*0-^1x7fD|u|z zSxH;f0T+B}znEVp+%!5GlaDb#{sTb%wKUvQR~f7BnAUgF!r}grIryE5--N}OYD{KO z9y*F<9M;dmw|$^p97LEMe7TzW!qezge+6-+(j^`3+V1gk=tjd@(2 zCKdcR4koQ>(ArXxkdxT9*v~-5+}D;#bm>B*?vt3wbM+*R7_fl1|Dpf)RsHO?5gd8J z?56_ki$dA>4GI4vQMk11=6-1Tb5+4FZ00aRjDEi9P|1{c3_a5kKw5_5uZ9j#wD&{2 zOn?2-(g4hTRGUBa^Ko2Py)&$hLH#JdlkG4mm>L}hU?TT`N~u^z;Tlj&SysEW=plg- zbP`!n-KHI^NO9^-HIoa>`o$mlOVxUh>%fKYf}^ zWAoe*2B+6e9sYocX{~LY+?Yoz*xQmEygtB^BoGkCqVa@Ra|}Oh6G%*GeX--_I3;MO zTV_`S&RJ8i2pQn4IlDblWNmIKxGjd1R{Wz_{xegYW3{5|U72y-L!&)M-{+8x_qxB& z`sLM|pT=G>yt@Y?A-FOu3sUf>^8MF3 zWnykQpV?<@e|&~1yLB42#&2EWM{K)}H99v;#|V;IL|o<4IgIGuLcZBv-zbfH4wQKf zd62#q@$h}jVUPxN$xL`B?p_ zTAwrXZ9V0V#DH-yeaxMQbjIy1t2YWuibx{k8meghZidSuCh)oqsn^AZriEcdMhWE@ zu(|tH;ElM9X!#IICUuoN^HQ8hkGU^31TIRX1EOgyFS+m*ag1!sH$@LLJWPCC~tv zbuos-2ZGUY8g14s$27|8)ymYDxi5KU7b>di4oSRMegPy6k;{En7uNqx@?ZbqdV6*# z4gLb45l@*#Pm6sw+|iW0u{rhLq#=Sv!thh%VOU(4Z(2%{PAqR|z%Kjay13vAF6u(U zW5Z*5LNoWtx50g45)Tqm(_;1rLx8Hmf~Rd6u63+ML#aWK*v)|gOo>Zj*|UjR|2%Xz z2ns->Y&Qo^)N0bS5BH7v*s0!_W7rXBtmhdegr@+6+yFA@A^B*LhVVgtcfTIUT$C8K z2ih07o>(*LR2w$~jDtpA-orwxa+7riwNx~KoNqzFvc7%S7pt4hT4E}c0ss&yz6bdYGHHvp!bWj7+Tj<_4_1y5CyKA^+Xf);yOB8e?%I*CjrwaF@JL;iJA=KXjiY-*2v07};Dmk* zB@5GlMMH!hmO^uFD!Zwv>5x>AC*@=1!m|_`!DO|!1f8LnqOu6m*6%R)wibbKJVZis z!aN_44ga^+W7VC3MGoY{Gqgh3w5!|W#70IcV!;Z|%6S@+Y|ndZm8`6D$q2h=0{FRA zrR5nphr&yg($&t2NYPrc-zuJ= z@AnLHqRR}{I7rpbo$dtZ4F`--9jt#y?Ge0THV;fZ)Ro!V1Di9UoGt}fq!f5B;|4x{ zNVpp;D(ANSz5g)2l*>IUE&bYlI?oqdLY4eOq&F@O03Q?=#EOY#q9jz%V~g$bZCzG; z#kSf)ml4gBo9qi`SsQV6jkp_e>m$8D=wQK4xJgR}MyGc6KD$2sQeAg8M1aERqc^h<8mKI- zAP@Ay2AKQ6YGTK66KFh5wC!fu zQY14c9+T=d1x*{y7uco$|} zmz;W}%JlpHK)H>7H3a|mJRV5+3rV_U_uP|W5)aFAYD{YbV(4sJ#?Od79lORnU7pc` zxr7KiP;4tx>r7{95qAtDRPYcf6gZYkkY^9|yD)I8;UUfMg7%rUGXd-Qo&O%n#OTDx zqC^nG3Ztu*L=fwKAQ{1+j=_PRu_RqMgSu%!etpTJ~ zkqQFy3fymum#VX~i)~~Sk<|Ivus+oJNRUT`XwE^`2fIG~!T7$E_F3dL@_-sI&{!$x zaYHENJl20Ta=Gpd@lKVNBAh$raA(v~Rhli6ss95J5|;n%_`FA4g7ijktpAmj zjXBgLn6tFkE}RT%tn4^#5D&L|JliZZcMQDX&(?BcS67I3w+_<@u23YNIJf*JyoQ*$ zYmV*YWz`*!7}AGUTYy(F$;>36N_9B{661w%Yst#CN#@x$9-W;$aw55ll2}-_26g$Y_qv zAiPCC%G9pW$wjMJ>>_?Uwy4(x!dLF~l=K&1^ccwJE4C>lm{8Of>nOhSYO6b3WJKy4 z>wn5EP>(9Y#E#%e5 zSi6R>KK(9wp3TqZc0TSj<8X{Ha%p~z0RMGv-K=c|RTLeM7}R9(G+Y&~M=E|=BTRjD z%=w2=pPR`OFKvJ-H)9o9w2u5uug}Sb-)6Y;gv9*W;s1yd=ODd0y)4GBLyyfml=i~hqe3WT%Y z5a`t%=B;8Sf5rS04@gZ3xj1vZG*-Ebh-~W^A{kNl1$i@=(kwAKEKnfYu|CpEEv-)Q ztCLTgR&Lbn^017E`=F2+Ev0GnPosypd1%dUwhQf&Rp=^z(hU*DBa zj+e?@mlZq3?1j2h&dB2opRJ)x4ssg%I$E%0PyRVFrpV5;Q%{nuHRDt!d(7~d%1%D9 zE}9`L>V@7g|B(!UpF-1vl7lshqa|-3n=Z~YWQ3raDVBQV4nRvB>4o>F(d=+5h^2l1 zf%5{#@2C-QcleCmJk4;(oJB>jO%Vy%Li?NNK5k2` zL74aolfTGYxFcIE=ZTm{6KYz{yEaJ)K>rM@NK-v;l=@!GPysER@^9Xe5@yL6(nMe zqV+VwuXam2+sN1z9E{nhjv*$ySTi5Ot4qdt}^t4P7%b!H_8lUzSjVHxAUiQQVq3{>B&ZF(hg@*>h!k* ze@Qw>l0Xkr+bHNy&yQ;dD;!-FE6X5X(xqv{$Nn`)z-0Rw%xLFJ>T}p?l@;5M=c#p- zq^R%s$NRPr%$Pmdq&Uvdm{5vQnYhqvWTK${Poeqyjc`|2{hQO?sj>2pE+DbRK~bD+ zq`q(YUEhPgjoWuJdfe~S&&}B-={JYPoL_c*h5hfa51&?RF4qptpL^W=1GgEgB@I+j zoJ?ZY!fG;PL6>hC`@Hktu`aK;)4W#X(^3_urQ|!kz&(n>=0SKhup{i-mx0$A!BKMq z3g|5yPO?%sg*#cAc8?99U&RAGOz=F?CdF-+`rAD;i`$WH4GoY&{6oU!5GDzpBwpjI zB;NmxRMtlN>K77xW3sMZ3Mi7X`?R)eKVf$v(|@(`Hv2cjE{V0rDIkHY^~B_-gUecT zlID?v$@HzZET=90-VoKkGb~_#qv7j=gP_B9vR-F%9J3esBufD${YcIcy<1 zX}LIUDE95SwF0hL-VPby{ok*=q54-#XAq9>^8yL~2= zy_Bn~f7UO8ly<(=+RO^uKQ{+1Pd{{PWrP4CM-J7} zxm7`J)iMYaBhYzih*nqwbxBYZ2#c=YiNCa6>@ob5$o&o@pOOn}FHY*U$^uBKj6@VN znWr)R_d(`VIAQ6j6e4tW_R4p9v@WrF1OKWi{R}a=U+bp}5lsD_k*HSVXXAfP?`Lz4 zHJuX>01CyT?I`;jHpRpQ_n+R;tAg7d#5yYp>}zne={L&G+IN; zbY4SO7q@RUEboOh{YpyuChqz{+@!@~A=&}yx~w*nwx++*x%fOOlVc=n9NV=xFSg$W ziID$sOcc3}23Xm)veMJMXctZi=?sgAWPCoDf}Wd%^-DQLkE0_6?~o=?Cutb7_GD&7 z@?xAX*$W3wp@}&PU`|dF86uPqV#BL&GD7V#?4g|Zn;awQ z`K1K$k^85@lN->yE?^|5!tnAm(*vR~9JEY9HBoUn4)ICEU^kMWGKu!suC_Ibc{b2p_MiiM$ zlb$1Lk73U=AZHZ2We88o{cx+#w`Gnm1pEFc(7bh#B7ikrH8t~ewUmn)9MV<96^zb+ zmrpvaBUIO&`E?%=%f9(s4bx97#r5yM37hDvz z6cigbRkwT-1gwRI?EPV$2s7@j3k;1o$#5A|=$3~@Glr@!qBC+pirq2(b&D#yUy~tIw3DKG!Ziajjf4r7qn`qkAmct0|T?_w-nEM21TJE1Wa2@ze0 zOrT&GYK*^jyikXRuIKp% z;_6VMFtn|U__v=K?(W^O?G5eH+gEhYtt^#Bh73IKRL~R#V8w)^4@S`!SxpO$JEKi+ z-r@=vYu(#Yu;ijn29{KxqVBC7%57GNJrAv^XgJAdDTk4FI49teqWp53aEBqmBaD)r zU4093y8)*sP**2P2QR_ZLE0ydkpY($V(44bqjAqUUyx&ivHU;P#e?LO6Ya>TKV&ON zj^85ONHuE)ZM#0iL0!?B)HCSHWO?0bMcwJJm4R8TZe|=CH99GJgkg7%J)zcFk-k2Y z;sv{!eCEP>ZQ_P}?2Iyro6Au;q|(LyEH#43S;lV*^D94hizfTc!m^T{9v8_sD3!RM zSoDHkcSD3*QL;TH_V0bO=M$+`VI_0Tii^B&P3$=j502Rhp8QrDuxwEjV=ja=OuVPIIlyj)pB;)=j}rj_(+xp zO#+iE`T*`R`7LCQJc0!FSh1^{TVbd#1AQJaqvoljvnIVYs~xyqNNRy3!hj%S1t+0- zCRk{F=mR;V*q=PM<1oWpN++dw+$jMj#>9C;ZIgqk+R~hq3oG2|w})V}lS%=(wD#DP zLW(y|INajG_1;5Q1*T4XJ{$lv0-T;f_87x7)Xm8ov@v`*iD31WAVfAY$c|AX#nDrZ zM~%+M9%`)vMoFFvQ28{XYH8u2eU~&ZX6>ITJ}*Zwg;{(Q*b^68Z zJnmUW9`CA&fX^qU&HD>`XWm>uJ6UYGh8IHr;2MIDyMQG-xKCS=?ckHD{Pra+t>(37 zSB;){C{)}EtWOojGc~~bbmXsPEq-(nG zu^*krc=@!mmN7A5`NO#WqI8pT39fo#rJRt}TDvF8@f`+z7Son6Kr?|LM)1BA8xc3r za%*b73~-2R$YHG9`d;mOQa8pYEWN3WqUKDWWzALCdN%RhVHyW0o46`rER0xwiyF&_ z3M2fW0n1PuUZ|yj&E;Pm39M}09N|0~UW!(WwEo@y%9@iy@Cx^t8?wpw1{FCUIMm#s zQ9w9~EJ8?YwxvMaV@87VM=>`}IMEV&;U!(4?R3*FORi#2E~xEGMR2J9P0iNUU$TFl zYn@!+5<>=f`9XD@=yO1>`LGaTuc^EA`C4Cm%mb=a^YqyGa2Uy zNf?@n$|i6f>|rMa`kFZ1$xDPG8s#UF=~>qHvE}vjo@B!3{-q}5mM=#Pn;pn`T5;++ zgX++SKFb+kn{PbRnAAKDU8UwC@{4)8WpgB*!CqX>Qm>A|naqLCiaF@8A*-u4VFcbM z@(Kmx|7%y}17TOTvM80tsFk-%fUS^1iCU^t0BZXByl&Ep9Krc!b~{Hppij^@c=99) z8<7!-4Av~r(swWMB5^e(O8tWz?SPzE8DgvUl2 zJG~+cOv@r(^`SOtE2*;4^nl>O+)xyd7oPwHEgcd{k8X5YGJR7#W+ZEZmgR=co~&J8 zJ+>2%;IF6VPymxHanZRxwvPbCJ73eA0I-O78NjB$eC_bd^yZZjTssr!7 z$=MVSz2m~Y%N4| z7f*gXHN8SQRlG{MmH|Jxc>S`{&%e|8^?kjRUU}Cq;$cdd}9q1dD^dQoiI_}4+$4GR=ns(|_OkJ#+5l;}5V+=uVrXe9 zhg3>=xp&>pkIj$Q;HGBxwO3=DwL4&!Cwm89@rk^hJ~#-R0ySX6=!>BYASNWy|6^9! zg2CFf8wV2Osc~v3qOG%1>>54Sy^O%?(P#Q&tw29@|DZn#vX(iZpj!9)-kD;B2?fP5 zD)DYuFNm0D`2J;AgT^**ERTe46k?*-&h(Edf?j27|1)?~r1dbk{P-d?qr!?A*LNax z2m{|KUaFpR#$SeVzol-C^C*^y zZ)2Snu^Afw!W@31>=AkhmE@!79px2KTbB-+9omENlc)IkRVU#f6^zpM!n^#T1BT=Ak zSJnIWx5Ud@5tT+I@pQsl*V5{|b&sz_a+NLComjANu3Ur(oXn6&Hg=Jq_pA>X$g0GV zg!mDy`zEJ$CaX`asP* z#Ef%)=lYyK|Fc-#MtKs^`b5Wmo@#0+3vC;3+iUXi>QfrBM+s{d1YsM2PZKsDGPV)P zteq+d7$d~;0{nFTRO33P8IEVvI@k}r4i$c!0J{x+A>KGk+YqzYTf64ly`JQM8)N!- zSm`zi_#CiyJcrZi`tdv$&c7R3u%#2o`Crk?1!2FuzrAcm#ek3mk0{f|eOvG#7-5xT znAXXKRgm8wulV-Q2goBy2duzbOG`6Di?mX!$c-Tb3pr9@z}%RZ=Mm30zs=9n=sdhD z$Sr)G>Sy0uKXMu+?*1i)*I884k|E9l&}d*T&YiwiU2pS*;$su+1inSGO8gUryf9a8 z*Jx|_*)3+HhuepJJU}=;QaN&Yd+mBDjt^h{!hi!61#wTv(FeOl+@(nhsb&PMtctmT zS4V$|?1cAxBs-6)gpQe}ax!6PTFhMDNwlNB;_5PVN;;!>VA#6ty)(K=8jY z+3h52ZBkvxVdHZ`BW$|2b-+&g~p`L?5Sv?`OS949Em-yHx*}qK%CxFnN>l4Pa`a6?HD;BFCdtz zySDdFb>?q%`wQRFAIF6Cnf6%L;Wc4RqySa}v5#DiC*T(m@4)VN-U=F~rYD3$EpZmU zysZs`t0}@)&@$_bS)$s6PF`qd2!X@y(?xmA!@O+l>ov4?w-x@pTA=QAS>(GHo(a%oETr)qv%a+09_=1PZ@RM+iH*>9K2-y3wycFDSaQ}`(bn^O!Q+i3J!?*tm>?WR zS`kIF;-$&Mgl*Wm@r$lDH@%uF0l~(I@m{A9Y>QD9^o0NF(JYc_Eli% zb**4xLDU8508s0A@T}Kyc-SFXQDI!YWis9%g_~PSEjr4rj6x}d?#C(gQV3d^H5Ss_ zGQq+sYA$G)U$U_fRRpyDQPAFu0j0S1vS+Xvr!(++b$4}N$>Uf+26R~DvjG>G-%DNp z@e$_#My?5zm>tb8MY+2itR#W`rKK_sdmYN)@!k9AeQ5J7!5v<{GG{#Fyw|oun zw2373G-zqo1k<6owAnoLe1np!9M}g>O-n-e7v+*Y6!PE?dcK`buw4woWizY%k-!$H($ znB(Sbbkzvi;oC&W@mw-%KX1Rey>_nN2j{#)=eP}WgsE8u#3M{=+Dun(Se)W(X- zzQRM7l$`2aMCy9*&_=TQa*_6ATzZ`TUdE(k+Zt8qen&Q8$DW}gsJ92ck}*l({D%$G zeh!UmN6$PG^0cN1;?r9l|GuUAn#rQ?a%ahr&s^pRk?!ZBzpP+D zy|$>uwUZk#og2n|;rN?e$$ z`GVrYO2S@Lx8!kzbK`|_tiIN zdRXYEub7V+q7{2*NbHUD_1S5Vja*aEV<&-iV8U44$BKG6pW+S;2LQpMA zhjd0qaxf?P^~Ca6Q?cp25|d^v1Dwec^o0 zlor2MBfxcaKBpP_q=#kE0L86&g()Sb-kyjpmY(K&rp^puCkV5ZPU(ZkPaW02f0*Ds zn^?aQ=RnyL<(gn$6Ga@NsfhUFbz9NL@1P@p>#ATv9>H^M`NJKu&YM^vuy+gjW=3%b zp$z~MuyzMNamg+z)Z@bREOFXtO1>9x`d?X%<3wVQBCRA8(VfYS5Lv0eP3O>O>~ts( z$_!Paw`h=Bjf(YWdz@u|Fx~mTdLB&sNwXllmh}8cqF>Kx;BI7$o4Ue)3F24g8FGm= zg}1DzCNT7zQ|_QnhMy5Ll~;6|Z8V?=W%wmw_VznUbr_=&yrKBnD#K+X?41Q-CovB2 z5rnC6c{zE1iceG!J0lu#dj;?f6*Rr%^g@{s^9_p;=|Rik`IrcfvXlG>KQ4>4KN zY_V@p9m=Te5rQCMv9=_!S~d=yEa9`cCXqZ=^7yx>^BIT@ zMS~hx_p2e~A6kO|vN>K8POIRjr>FOUssmc${Wf5^^mwwGq^fWZ>V|M&3W4TGNJxa$ ztg1waet1q#H9LzL_XaoT7oetya$2yJ$~(qa1SV(XUxb$_H6ZMREdtirI^-g`&uc(5 z-QV=W2w|B3Qvh>A$dW3psRt1*q;W$EMA`xPt=P6mnYQ1$|LT$+B_*m;ejPau98Zvw zus!Moq#JZUyrYVcIJjUrTR&Ih_r{Y$-1~i?eVxmZM1_n*RXr`Zl+4$Cn#55pQ_~GR z{8I+DnF~@S%EX2-v5j)NiX^#RNTaeCE&Ak}=Dgbl%3J0BbY$=wEU;cL&gT%OXAX&H zz-8#R9z;)Pb`4k%pK&tA`6c$fPF=pRWt(baC@bqz{F5<)2jr(?(tXA9V9ga63$pr5 zrsM10Oz$C7U0w4EetsOn9-#*?U`q`4o{OSrypjlR7L%+MR$s@DZVN>K%3XSl$>OdE zR);d(5s9P3iX(LVZDj=+RmZjNFTNh|pI#l*m*X|QLE%|ZwxC#TTivcIn@yWdU(1Ug zt3=P<{D-bR+pV4uVFLf>40lzvY@CbEun!skWfPpX0CQ;)mtGt}>h2j?!o;a*neQ`{ zO5c~Y(8U}1(dZ}?=6{+1pF~CFA~J$NzPFy!vgW0JK3DbQ#8fI;buuf$kb?Z%iH6MT z<~T3k@l7R(SQk%DttuVoSVWU2Hz_wK74$D$@e#pTPXDxMs5n(1ncxPe7i+UFTiNG4 zy1opn2-SDkb@j=6Z(5i64(of^h<_EV;1ttFx4;YD!a(+Wvr}o_6d((Y1aCb4Sy%DS zBix}gHFbB|8I2ONAJuiTN8us26=;hh2}J#vy?wbsakUsF#uJWDhBgJef_$4-Z^r@K z2eLVyyDxZ}psrxY&L}~LjXj8%gxs`pn8-3JayPUe8p2}}{@e9BJH*uBv`={e*}jT* z_9eqaLx2Mvf=OH&sjtOY<6SfjSKCKypRS)c{T489(=nli7rzu>&cvS5V^xR?GLh1m z#!E+V)_Z0u{NVwTOxc1pkHi-Cad(4$Bc}Z-`KqRVq&wBqno770<0I=6D|A4YA1(mC zx~uz+=xc+za^EUNmLaM?deEiAiXLIqhPlUt*GFu2$=B_sDGJ17)#W^hGlrQ*VEXr_ zA()RnG^l{}4=&yQ5>c_hCfUOR>Jo-%sB_`=Y3KOLL7CLRPIPNs@HH&eCd72~H{4rj z@IEo|+r$scQ+prlDFRIvLw1=O|!Bd73W%>~US35^w&uTizf*>czBHHw zEPQR-^@bA?1I}q!tKKDR@IU~hA9r;D0er|`6xG!YuXtC@=q><$Pg~<8284z&qZpds z5LO6WKbb1LVZ^J)dr)Nu>ciJQIqqcH5FHmtD_WxKjXm;&Qh<_~CVw1zSPx7%&Gerqfep3^K=o*}ma_iZ+0*dAymat0#r~qvxv2`(TEtD<@y>#B6A8s?bWiMA=S_2}osMxWH2UkvLmV0#^{<$N(|g}hu-7H+ z5b(DCS6)BzKi!Ew^nbwRAmKuBR_g@S=iIFx5LWs@Nm&?FR99W`s}?I`Lv=<02HQ+o z3`(Fq-*|p8JY`=BelhaX9!!vcXioY5U_4n4gsQrOtUc9qOCKhd!tT`VYf}c&I;aZgFw&d@+iX-IPPQZdWfGb>>Blf?EJpMo=K8+ zx(K%iF0XEXvxy60yS|eWKkW!e5<48z9@Bv(1>0lg)YBg<_6WPJxjf7i zT&pD;Q2;7n3H^=YW1*xe#f08IQ#6X6Jj>#zJD;os(O`N{c)Esab`ip5)QFNbI0{B- zH!$E(@ww`^ic#W+?(0KkM^$r6Q#SHtI?=PibFhFxNNoufb-cf|5&h{_KBamikQ&$3 z-&C0t|F+AFqpfWp9*%d7lX?sPwsUBFuC~?U@7dt05I7h_>9-;;Lu>k)-M%DuKq+Q$ z=++gZB6;6MNLhBo7S{?jExG1_;vA3bp0CkvW`XcTUcp0tZl_aGPTD%4Q?=AEq15PC zm^ZdK2=>GIoHFA#zwdvC zvUJ}FbRiA2jUB#jsv9`XQVqr}Vo@uq0&3H*G&=jDZK=JWB);t`+0hMl0K5`!jd7de zWKd%oGZoOJo@0XytTzqXAtv@oaIQ3?*LdhRPcQ5i8+ z_|YK=y~?!17w9-@Pq?imEq}%8M4A3??eqZum)V4XO6kMTj=56ngBy%X?BXxPC|#cd zUMXs8W?>vKSaWnTId1!!Hk$H>sxqMMScUGQIq|*+j2c2Uh()PkTnm2jmw%lcK9`oz z(yCXy`DKg&Y3jIT*>kRud?pMbF74Jq`c|VGLtz4Oz7Qn-?AP#btWxXq4V8D{FdE;Y zO_{n4)A7Pqv3;7WtkzBTD5L8}?0N{qok_RuFlx=6QO4453(Ka*WrHyPLm4;9rd38H5fL&^R4Yh;;_boN;^>X=Np3pL=3I3qN9nzW*eecQ7=cK0G> z>^gqrwczRC=h{pRlilmPJP!+GbBiSKaBEdrYg8S;b-eSgM|!k&SxG!}qt_%zB(LKW5(C@RVR5%LXJbZoqW52;=TX zsgq??*gn@?_=MA(Wo}0vf~(1bV<{Q7voK&mZ)3;K3(<6YR&n+f4K!MhNz^IJVce+12%f8ckJe4S>xThu$B+1RTMOHYcbiU@2aG&w z5ZuDS_^>mo=<~8m$Z{$YT_&!e)kMHTXw4Q~I<4Msh+YTAu$Jm}ry8IKi0WWTeCW!f z^;}}UcA?YOIx%WlkbTWm41xiqsL>*4cy*2tWdGxnD_u0TM(^_`4yY4{)xty%(sWmd z7Z~bwU*j8BO!MUDbb+zp=N@0QVlnq9N8bl_J=nZmji{b6MMPe=V~B( zWlOFbM}t>1Mgu*v51ZHCN3wUHU={v{Rg(W;Rf*C^kQ{TR(<-ziL;NWbojPE$N>>qb z4nkK=RrB;*3K~oh#(^qNj{H<(ILlnR1$ximp2r^P(O^mWqFD}KkyerY3v&_X-ya5T zYW3+JV0;h!e>p^zr#4w;ku;u$iyW}7!0`u^ani*LG;wtHTb{N~KMbyPQsv4=S3wYx z>%}g~LoeF4Cxh`|W0Pt+Hz9Y3jq8>~Zz)fHIgB1qV>wB6+4pJ{^j05HWD@9rCXC zeNk2q7p>89YtLbPh}cZi8FU-tiawO^`9KErq%YAwMKv|v75JWZFYN0%BEf7>^RqQu z|E&M)Xo)8-qlch?#guHMJuT)>z2tbrr&EH9P}YCTDP-Gqdmw)PN})cjN-3RT<@HRM zU{DaY$nh=fwTQGI98>zwJRgc_^S!!HOnyYb>g-~u&=$P4on4__<5?WDaxTRcTnNnI z6iL!pm5h?--sTff%MAEFi&ZZ|1=D8|P><6}!?czmFPP3{7J1PgvEZ4|+zV6(+EJw}is2@BSZdQd_EVaOjZ*2|}A z_3a;#f>ml5bVC<86QiKisM;=+JnDymYHB|?77<(arncTG^ZEEbM6rOw92pk+A{q1W zq(u50uZ{%=H=dO3FmY&9z7N6KLr*xq>tIdzKnJ0&h+TW&#)mE<%VC>pKVt|e3@wCB~1m5gxRsxLtaR0 zXDA-MXjP>w+Bgjr0uyrZsR&C-`~PE;w3!K@J8aky=-P7C^9X!!7d8!CG2L$SQB}%> zU@5i$e9vvpIP8@zY~k+Z|0B7fDBYRl$U71Ix#yQ1GE!tNaT1h zrV;FiT@(VP%C_;3^s^HKdY)h<{dCo~+x$=t7i!=sf7fV&B)fU!Lrg8V+dO>}}Y1CYl$Jq>O z%PU5S4_ofAGHu?Jt3CVBC0(z9(6VDDEQo0l1{6MO86ga<9}*qLB3r{TX%?W0Bh6;t z$mlNub9jy0jQ7h;&sl_$+9i8Ymzy|W25)MB} z-Lsg)OFPK{M}uTgiYc~*(z8p;pj0v6#xo@ST|8nA5HmcM37j*#dJ{Z0$#^=_ z*I-?=8&G*!gI066-AnX%jtaJY>TkRJ*?8yfw!SWScbE6)utIQ>!`ZH$ow&lI(I+UC z7rL?se;aZ6Gg5tc7a5+;ZX@`>L9}W4blPh%)!iH?x27PA<=aR@guj7piHO)MpON`; z_iFX&dqbEO{{ z!rzo+e$jHY+$z=6S;PFn&hKZ7Cccm>trOHs9;*K4qXDV5<t|1Aw~*c>T~e^Jsn z?N7?-vDXoOCmFM)l?5`O);q4A=x6PQrD5L3gbf6d)C4g@Oc(X0izDcKVL~|rJVJhk z8s^^oZgjD()L36dYVSXB49k-{Z~P&BP6BCzK)tX{S0oJ4QE9h%2YW@$D3C}>5@$b< ze&F^ud}pqh^^I*>hlybQct55Q&B+ARJ^9=olFW{eu6s9`qgiiE2CuPZH9TEgFx9nS zG!(+I>{4r}w8nX_kPjUM2Zd9e+qApLxg5;c9ZC2}=PMx;CgtA1V zI#FRZ=)M3t4Td_U1Eq}2l zwECP(LvCdSRd@=_~&0arj`Yz4!7Y7;ID z$Tlp~Nw8;`M^d?MTrdo3sxA4;vnYK0}o+23r88orcRdG1Wcd z?Ou)Em>xXmEv{qI7tl{2bKjSyr9}i^p>TtQ?7p!3iZ>)epGP>p#{l26CaN69x%Y9R zO*%p&orN*V;gvo|6>rxcAItt;tMmHL3KdcmHqzEuKan-HyIw85Pd#zYl-nC?bV0_| zRNuNR$d|ql>QFGj?k{n8oO5NepQC(}=;KBce>(6VS^ij{0Pq<%xuwliE%X}A`m#8Nz|v{!Z1={{Z*8N z&?XY&KsfMClV{Vs$Iz*+cB#1Y-u6I!hvUDlg`)DGrapw%w6JWKI`H)VpZ*aJIQ+@_ zjBrnQE(dEuuNI+%4$CJ!q?>7`7H+2-xu&>>9dHqPu&$ zI%^`A{uU-hUq59uIjLzf}(*9{^1 ztH8(L-QEtmMR*s`5x%SR3khFHvo)Yl=G z#;$Zd@Zr1hslr}&l^j{)5Uak*zGrzXd5-VAJhSJ(S*$a`S$t`@E?G-?4kcntoFPX~ zzC{Y1=L+_G5R~9w@Fz_5{vGHy&dQQ=4!V=}N=MA~L#k*+Pub)JlCHWb$R<)%Ui8Bj zqBN(P4ykR#bU8e%X&cmr>`?pz&qpj^{HU(~jHgrlf%ohFlJn*O{@C=mI!a+8pjUK; z{Wbm;>PYA^v>th%7j_4;A}F3 zSPkmO=DY&Wuek}p@FCr0{gnY0>Z_2?9T&X5V_bkfg1BM^T43Xh@Xi5GA4}Rc*PoWP zI-QWg-^??`iMhuis@`gve{`V&7p>YL0p#{wAn0QU4{Zn7*2yFGrp4m?cq(py$pMp+Xrlyd`4RD)B zvcp=33}nYW(8KwpI0DAg2IRR1_TWA{{+~erP^&>4sa2flDO+D> zlhq-)>-?BlX&K9X=L;59vD1L(nswY3Nn;BMN{r*j$kFLKvwSqzkmtF{Bq^F+8}XMs z9KsqR`aHjl4#!HIHP_mG_ZmuE;YnZYvTEs8>W*zwWdKs4AT4H4vZD4ia zq{k3&=b?$oh-->tSEV@zB-&u$@_I&y*+vZn-;5DajYzBWpRMEr|&mZ_Z=1f z#$;dS5wMy#jdY@;?X4Q2#?@DUrSNJucaJl^PvErR9|^tG`+{imXEdrf`}@9}Jy`KJ zKCyz`>0DC5Y6zx+mu$BfDe>Jb8eEotBeB-h={^8!Xc~z!$#&sILOCt?OZA1LIXn9Eb?> zd*|JC;I!71J ztqQK1K(17%nkoH1%pn_W+K4oJEQ>gn7qunV%_{7za^H=>R33kR{o5Sj(X_b0-l~{7 z(9V=NOu8P3iOJq;qRCaUwNEH6skx2u>nuD@2-Otll~!euK0^|BU(@>3=tB=b(hSVh zKITup{ z+TH_eMIQhBgZekO2cqsi9wY0PdK%3;2m0QGYDJub=}dkL`ya&7an-Wd-edteCN8Bh-h8|^`?tP9 zFgrYjy~I#nYwKC=7B^&flOeCm-v09@(O@OSxk=5WiE2o4mC$gp{<*yc&W+*KhszrA z-+)U(Rf=f#?tYEOj^kq|y1ogclj!G#K_s}RGd6g1Pnh%8SPSAyF&+a@%~q>2D0GA+ za9na=V<#?Pf;BN!;bRsz^0TJJOGY2$|8)-xCUS_j$ z3wqzsNS<{{-m3iCj+-cBS$IIZg7a#_=tIY+vyr=%jahsrNl#0pXok-k3DrPh1ZwmS zwLmO2yYYBO+I4Eg0 z!J^{cxss!#)yPv8Qvyj%WMSJYEZlX(Yn(6Ndt+|34IUeObb#Cl4Hj@B_w&;70y-JhY)2EmGVSJZ zTbQAi?s0xyI57UQ8iGQ^xOVH5NFDATpy8W{7>ZgtCWJCMq#7k+xZPH6jgWB`A}JiW z7b~>Ha1jyuE1D>)-ZCH@YgW(u=ta;jRrlI*{ZFwCP46^3*pl;>R|G>^4|FQl#9D)= zhA0CgBdroI8CLT$c!^htyzdFgm_)pmQbUF8_+9@E(>4cQn-%j28?Tisx{3QS1MQM~aYMMdsah0~~g zxZ!cJzJ1Y+dcF?B3W^i!Ds~^zMw&4XJT7XmA*q_IRvpwgsJLnU+6lt8V|z7)^cylV zP{)O=CRKOi$HQ=t?^!=W<-w)#Xz;4QvL6C-Wb_a4!Udu0$JrsUd0$LxIF~?qHG@3? z@UQ5_2QtL5%GsuKWDtLS->Ljg{z2D%G&<#r->*a|7nKiHPXhixY%k77^b=t=DO`=> zZNj3Kk)H~`o5@B)4Bw-vXWGzuE*fQ9+zM+f5EV1vp0RrtM;B8pw0v7>XMfh$+#A0O zEYGR_y}6r@@}wlff%?_t;IDgkxOTvD<8zf zMI0t_duJS%k&u$&itf9X_NXj5d$Kr$PRk4kVY9fsK%$-fS*W=rFt;t+Z>xXJZ z%4a3?oG#*YKE!BtlFMi*>3UcNa#jNkGC1vW8NWc_DlmBh;kJ-REL~Su$lNsvXm(>3 z=>+smOC^mLId6YGfTvZNfBy^n$IOO7TmNB?mm%!?9B>)VqKbt}6b`I7yt2a=1LKX% zGAycRi1U6xnA7lPm(FZ2eYSZbDWsT%-Z-K06TU8=+48 zWuGGs=#hI;{lDW5pW6Y)^9lo^*-+IK(@7#{!i2~kQRm|qK?3f7U%wvICIo(N^%hGX z#q;l9<4CG#P(eTVnTcfEAtGgXnX{=X7ZJr&VS-@K~} zQZ)b;SB0X)TFkX)fd66DjPP#`Q^U8~W!H7#Gk3h&yCN|5V{urx*Cf`wIOPpW5Z7MY z=!Z72=e!W3vaGv5aJ%baeOg}dUD5gr%beiH>bULuYp4bU7Y~5~x7E){&s(^P?*#!I zhz-qClg$rNU6q!Dy}>wT7`>Y#n%Se#s-5!=mI}?tCrN}x__82h{jxy z^F1kBv>jfyC)HCT*aYZRzB}18s>bF2QvS;*eF@S6Y@)Ru$Ua;;f`SNgZ77KC(N4d? z^n;I3v_XM=VfYzxtMH^q=rpLWtsC6mx%K}1uzt3qdGp%Vuez78e^OC>$-Ymf4gAV$ zmM>mG%aBlByCPPZ%VXDbT_w^Pk+Dfh!*cL>(&jZr$eybQVdFJgH!XstAzWijEG1Qv zX4DU#P*Osc(|37Bs@96}k5FxYd*m|@=XS(Oq`6l7jGiL+W1ehhs254`3O1gGGvPhw z_go_Rr>v5`;5+}|ndnH*aPjvn8wBj>h612>+0j#sbvJS!cuj6ghf{pAk3SVVc}}ax zFJloVah}wX9xC=&ipvH$IJ0S=mGrvij~T9y?f>FlztZT@5P20rSh>c9&UKec`7e^` z8;KP^C=l_urvx|>GFDn9iVy&j!X03)p@&+U=p61N;~;LE{_=<7N><0xbzoSt%pNL{ z8Erh}vTbs>S(FE20Vz386deL%1oOm{bO zKKA1;vZ?JkXclohp6XU#Jy47ZZcv*ZB$dACu!ED3clsVga9517mI?Ddc#?|Ah*>MfFkI>F z`x&2*!SBnfwr**y3$%jJiWc=30tJIJUH=N(UT6zIN;KrB@UYE>lW!`3-J5_9SWs=< z<9*$JqaUlkEH@;pbbO}ipbz^;3eu5k# z0?fW$d%fjOZ?@9KWi8UoMa~rQ>)yadtQ;==7C?p!gHmM1nV5X%PYL)&1i<8T79OhI zXxXdkc6VBb@B^#8n~9b|r1}D!rE4)wX7SkR&C$*L2U&%0DN#D#SOe!G^Pz^RBix6{C$S#-gxB6*5L_( zWb6n2K1i{83?&7Jnnh{V8RA3|zY|agZOPm3EuWstcI$6+QqLK;(v>8aLgSc@o4HAC zBvW5R(i2ynt7Miz)+SoZtM=?M_@H>skamQRgXeb*8pKL^<=%bkcO(jen1usX!sb4% zrdzdk8g*&V=ra$mCB7Tx){fsS!6HlyTOvu~xXea__p5PCC8N&cnPGB5l9p=Rx z#>AHp*;0Kk>Mt3jkjn{?I^|sYCUpBfbSnY-tq)06lvWWQkxpajUZ=P?P+LHgC$#E zQgu}Eep~^nIZvPEggxz?%95n`q81ltWl`Nfs#1R}7|Q-7?>{N%7#lFxJr4SLz!Bi zE!8mnXzCs%h%9bQ2M>h{_|I5xII&gC?K|)dT33bJs&$(BC!I&A-sgNu3rmGj+fv@Oav(92p(a5hIEjIycO+g{$#~>qFl#|zYW+- zj8^3?gY&(!c<&Hm@LxBx_iR4YyNX2J^YZeQ)S?rR$9{||!U@~tBW5zwkfkx!6%Wer zxW7aHo&d0yu9pZI(!?s4pl|`*G2sNk^rIY4_y$T#dFzrPB{a@abrw;aPEG55hhJqN z|HvMn=ck{3)f=oRWL01#~bLEL+0C2+s$ z<0Lz2({i&bVu>ZOPUv6q9Zxp-BJO2M?|D$l=W2}2ZyL#Jt$rZ#vn8@HPdv$Xl>fb$ zh`juwhdm6t&9V99Y%C0@^2IKd$+t0CdH+Oasbf$3-~xO$U9%6*TwB4~rzw`H;m11- z`=);ExfCcWZnUX1!7uspT~6byKjfEo43Xv!td0n=I@v|0>xveE$DlJ8a%AS_m=|Hm zOCL5?%nzWmvd0PTUzKFb*x|(lLgRW9w>%d{e990EpLNO%CkG~rNptIb`&CZ+W%J=S z4lYyxrYZt?0r2Z-9o5=HIzdF;e9LgUZuuyg$0gyT>-23p`((Oib&vqY@TiZ@u#YoT z^J%_ePvd4<-+3@hoae6Nh=De~xO1)|P;OK8eU}e{6!X;6qsO`$1Mw_`T;J{Pp3rZp zkRZ%atwr9owqeFsuQ4Tu5>BRSPE70eD3|4q_EN?c7KW4Z zAVjUJ)s91rlFRWrY{lwA*)5laMy>1Lv)Q7TjfMeFza!IO$BIy_tS=Scm$QWat7>RM z_@}wuE;>oQp0}}SLx|OwVLkyKHDe~M_3-Dw1+Y$qODT{ z&hIBxkGdnADG7Tz-)%6kePlEgYIt#B8W?sv|CNc0C>ohe$pZH!+DD3TMxG$WjRM6C z$)J*C+LPcAcT`9vU*y9w@XvY+;FO=j3Jap8x&p@~giAq$us^dKbLMgPBmTYMj%;QL zAn)B78_yo9=MbSSNIO!4s%XpQTlG{wP~rbx`4X#uUP<( zlN-ZN$x>xx0TkRCa**#z6!!(Z1vMn5;@l`QU-o*7PYg_KK6#^W`D z7-EDpI$lpP)UK9}do#QOnp3WqCV2;#Yq%uk5vcB~R3-E*P2N@)#3fR@|>E z?Y3jeB?6{Af}@~8E-?)!aV%bSPYU`k!=C14*L=wpcLHNXEpR@{A#LUYulIYchw^Q& zH$$my{XOH`0Rgi5t`bt|jW?bFP(i@TzTxVau{fvCU^JIPKH#{-NoYD{j^8wCwWM}z zAmLbHQd6MPOD7?Mbi!Q68>ZH2#{*ZZSc7T0&&Zl$sV6Rk9ob`AB<7?gqyAWYONW2Y zU}pf0Ds2i4sx;dqxO2=ReEtvNj(2P?X$w7uK5ts5&*k#`K{f+dn1zX|<#a zgA6#E6}qT`iS!IkM_rg$TRHWTBVn5tr3gsDU6wR|_FGX=kUwZ;)e<-};Co38bI}kC zMxIZ1ew7X1(aga)hw?+D)_IvN!NhY*-O-*JsCr{OM0G8Zy!LtVZFZ*vf)u?kcjO{q znpoYZ{HO>f)mnB2l8VA)%XCXlAXStwX)mJ`3E{F2MLOW#*!tB7=9kIVu@DdD^vcSB zF4@TH80QAdkdxn+WX#z*qAm&Y=g~CH(hrsYRi7lt%=$toiN9a?#(p#X)i+TqH)Jp0 zj3>h{){d6T9e>R0Li4YeE|tWbL?bxhf7F=uhIa;$4LWGG*B%*w>XGt1zaZGdPw=v# zIfw&x*GR7v19!`!TLGh9=T(ua5hmoCjAae5`TuBK>@Ls z0hyoumg=bovfjMP6ST1!%=K+{3wtE=@y>TS24OYftK6?hCs9*FxEyQh$E@&byKxjY(K2@>c2LN^i0 z+y0j*Kg2p(@{&QuOv9Jp&mY^r11+Ia_yG<$jRy0uVbpyKICal>NB7Kvom<F2MNE@q&Vl1E!@wT+LA zhs2Fv6I~X&=j++~c7Ku9I#|r!zT+I$oGYe!@xQsf11#s1q`1~}NzNY-c<+dRN7M_( z{gC2#cCy-;1-w&x5m` zVv9q#FkPYNlf{XupvF;!s6nv zZFM7Sxxr`u_>w31O1>14_v~;A`f#XOf!OWLKfm&x^J3x1bvXJgTW<&8E3Ckrw*xvh_;?>()j$9ZZ8>W8fwZgr)1x zZ!Kz0w)+g+6@i5obu)=>I+EqWx3kO(Hab8gIW#h3jTXD@cbW4XcQZ z7pG5|Iq?IJ8c(C=bF1RGV+X1F6X+P$$1>E*Br4vs>@-;X+MfgIp<5RpzKyf3aN)OM5UV^r`28Wv{ZW~5uISMShXzTMT zDZkc1Dw2qu4bUbb!PA#sCF#F)h?OBjG-Zs66cLF1l26TO`%qB&Sl|G2_>0?nLzv-5 zLOacdjOw(ahPC7CfxHiFX1)I&*3)v|n(2+K>~`Itvtfu0-iY zXYS`!BjybJ)$0aG_YM~glS4Eqh4#reID!`ES*C}%-QX#094ybagQA^$-SfxRt5tt3 zf9hH2{fP-^Gj%GeXHpyBI^EcqNKsHy(c6ft@4aYg#aMyMvW`q))s<|?JizF85#06! zw}q}3h)b`e&(&V*2)kIqoMf`I(19F$_Hy4=(a$oDv7w`Vlr&7VI{nqxU+wZ(OAT5~ z(j(-(@_f%m>c#qTAfdRlZ_F3tEwQnFgMxMi;5wt{*~HRh6Bis4_P1qh-d^8xCD*0y zUR8p-7F^M`R`=7P=Qvdf!=a9xIRbO4VuIzy)Y)@kTOilA?%UeSZB(kM{jEIa>&z_d+eMc1=^FUtQFe3>dw|jHcdmv^J?^5u;-jz1%!j>|!Z7dP zc_Fj2S(b2cGG=yv3BNkLdqNQR`xh7)e2l0G5sNDMr6R=cu`V>a9iFfYg z#^;^eo?WOabnuJ(4#;(SVmLLmiwQ(%CE(U5`hu z)6Y%z9K_9dXAvU1qhO^wsr1{vX zO2|V1DZIml2S$`()_TE6`1U(K^D^1`#nOa1Uv6yp{lY_Y`Xx|-tL?{*4Vq)t`ua)y zX<8OrIZcv;xDWmMHHx4h}cZKFtzJ8Q9zKW;1|cmY~xu^HmR zJ?;|1J@<(xWyjm#TrpK)j<(UO>ckfioSLn(0W9Z^(woEoXpQ16E-7qZb0P6O_n&#A z)H3#@B@mp$y-qJGU-m|9*{P7aqVQ*0w0k4IlhoOwe0kl5TzRQ)f zu7JT<#z2|P9!b851^52w{Mjqbr$6=)@-;SEXyV(3XCv=%ewE-A=won{C^El{pS;(( zcfEMwcbuBVT!DiUB;tMTuTMd~0Q0@X{g|RB*pwmsbvjVj^`Xq#!)jIZbBwD%irSQn zU5^Mk9D!+N`BdRykcOvg3Uu?a$7S`1PSeqOIes|FXhQIjKkOa44?>I#pB?}CoL+^2 zbzEOUp;c}A$PshefoZ1{GP4H*-NQz4hqIWHfG4W5Y>mLW(Gp+lT?=^^hoV@QK+8Fz`Uw89i5if zvpAO$Lr-qLm9E7#A4$Wp&z3^=MQ z(owOFIY?ywhgcz7Kvzq7oRc-Q=W0eK4P z?YZ|qB=~=8_RY2<{-0Bj*!+jZUm(5|P}HhxnO2*4d80GAnJ}HA1_sO`@;To4I}2Nr zc+wBm7S%fn%MO+8pn3fH_imbk#Wn7Fuy$BAGjak)!{CMHlPICO+fmuuxHFxwFZ3Cs z^#mJC>sqne5Jdz5DJ8)YxZhVfMd1PX9nBIU-N9EgZQD)jzh<)cWZ8y-H#YIFl6^-b9wmwYM#Vc5l(dF1Z#`PC=&TZdulld@o? zJ^Ny&0es%JOSQ^cxG?ZUvi}J;m%&Y3Rqz6j-NI%<_<;biVHji)MU!Z?~?od_NIXu&4<8OFWl*sC*9g70u$(Pi>; zLW{!m#?>#-6209*pHOF@e2MotbZ;_p3*CB1Rz#<8R+GGqG!nnAZA8=X}prgz0cgwC-O&{ zw#IIAqkO=@ztg<$YHH`O%6#liaje^* zcS|3gQjaibdgIP0F}gn$qRtrZ~=PWaR!?BfeU{Cwyw$h)F*^&-+SI)s`S&X-2@uei*| zrVU%yd!7t4iNngFHjPNu|2oM2k2-k^cKAnn)a>7R{Nru7Sl{qGL->Z5kK%-ksn~f) z4@j#h_1?weqLEoT>~So1ypbRd62A-=#i-h@52<~>5GaFUhy@uY@H!MOXg{qQ^>*2c z3LE9^;Bt007@#lGWc~~6=1{jqGp+|=5`@|d_Cetw5x?|%@G0gYHDZObK%+!apBoNx z8ipLoOtFU@U7-g5IES6>Gq}hr|6spYo=wk|Rl ztjPCqW8jAPf$R%netj#|;z|VDhQ7%(9o4WK7ucd{Ceu<%0zMxn!Ams4GKLg@ajax) zG@E_w)7ta4F7f;%_~;3__U^oX#I4JImOp%?zTA*pP*wGchS>f%LHAPy%Bl-d3@yYK zPkmG}Mm3z^_?+st?#1sn=N9(nY7==@^EAsRdjh`g9|!OUJb2}5^($!LgNVip|3aET-QUJXKmgQgU4N5zj4Jt zs%RouZlA|U3E0E4gA4p*px1gx){9dj{B@o%uWi(X9u~EiRQB0SKLp9s;a0m76g1^4 z^w_|LDz(}r_ODCifAkJr-nXmfQ_d9gGi7%BFNO7Jty24O`OG-sX!=aL-Kp7y2^$%* zXA~c;Cu_I*h1rycJB4I6$pwk^6D-8a2h*W=uR3NXLY`l*OTqk?}tuO>I{Rmt}ercsQuIW=g$dEcsr^973wkacMH`pkv9mSnwp_{ zbcAcDoY3&%EK7B7wC5MCI8?V(cB=rHyB~d~CLW7(Q>L5|ZvYFF+QXz^772Iu%b}q* zz&yfy?{_81@+7Kih^cSuZdAP0((u$@@4FYg7avJ^EwYDrEw{eHK0xxFVjWDqrFpq^ z^ zep}9~M~9$eJ&}=;KSt;qbB`qQacQ>e2Q2E6DNmE{=!+?gR_@EnN~|k!alG*gaIfE$TNjUpMs$r>YQBS+RNdDymTY~1@|8Rr zh_R)C%DVb)Nhg|gAA{L0!iZnIvTitfQOl0AA|~K1L@kL`tn;Nga!A?9CT4F1Lj`Db zdlh+(#P&W`5TE)G>YX*q!#6G+7!K4?Q&+l8yeaAz zC$u{TebIocHkxI#zL=Z^Xvg=`h1qTNICy{IR~5s7R~|9-@D{CZ=E{u4z-Y%m2p%Y; zT{aD`?A#T9hUecR0cJ7-{AppWVaQOb@cFZTqCf;eZlwG6^JwDA^02dj%YF~_QrYVW zE;c0}ScSa>9%)t;!{#?(tA6EvN1YF5D=bgi8Nx7#-1_~ztFYS=Bv0CypS~mJ_3ucq;W8QShIZ^&kh0XQjkZjx@WPpY)LR} zFxBT;@VvCw&G+Y87U>)A%**(c8rt5u9nLaYX9|I}fCIi4ZBgofD*dX(=qF=yY8~KD zcI5V^$a#AY=7d-J&Fs*M0J-VqMkz;NZk$(mHk33Ja~Dfb-IV~kpT%5%WrcvUoDCDkDP>CiJW%NGy89LU|2$zd7gl0u~pQ{!mD6oKwtlpGRm20d6l- zUd-{lGW^)g&W~0Q>gNK6@EBJ#CFE;0xy--@HR3F%zMGp-oX)D|N$}*l)>#@6Sl?b4 zNm;k zl4468!;$+eHn5lfg+vOk6^Am{$I^Bk`7uUX%Rb7yX?49m2QH&{FU9huQw{3`?c1KT z^s*0~|6h^ni2j3M=X63dLSrbOVtCsTb>a6H5E4WEqrMY$U4Dk6PX;h+Tn>>h~%0Mze2-QQi!hQ=@zh@&ZX}PJa(%mOn+b-MccB&k2*{o*{xsKZN zoIv^#hc*Kp5`au*UCBBn99(tyo+ZKd*$CdBJR(k37^z&aNC zIb!!luLw?(9CGSUgARI%xFrhR>v78VaDY6+vOQOJtg{19T*=2l>Sqz)oj!SMh(o9_ zOdYz~Fs^3rc;&8YcA58U8Mw!Pfxd{a9a^%D2#9m|9L~6t&hv=i0o4Gh0Xc6X04r$S zI5m;As#Z2h<}0LHfRx~cpaxgZW=6m9c}k=m6e%m~{5qAe(`smxpdO8K1?ey=%p%AH zwtuLQHyAF@XQ(A8KpA<|R%2v~)BkTF=sY_YUq|?XZ=w1)*=`!WiGj1RrMaL#!|w zsWHWqEUVX`m`pe}qy83f_z_|>g$zHdJ94w!SBeZr)6V5Ays$c zu<-LeJM0YvYqZVaGkYxd%BAX$WJ#!tt{{e4IeeV3xG7t3EI%Lr76C^j%X=J$P&>y` zAB8m6ZmAvYW@ar%lWL4j2E6~uTK;z@eM=an{%1VR zpw_>CXd{3lYq;e*ZThtP@*Bvb2wJ+jvGGoY0Zji^^^0xf;tKf0CxHpcZNav5wm^CYvkeVN$+|k}&Pax}3M-cwbSAZ$sb?*p^buZ_)_0TAWIu$8+4)GFi}zqWS6% zRz+$h2p{56u!V6iX)?KyzLJTcs6JM5;_B0u!?^_=!>IQvEf=MVAK7ZrPsi>F_p85a z+R6Sk(Qzp=fLqTjyvS0iScxZ3hB=pjVH91m4stcAVfoS^p4Hm*g(t`)YbwH1LyFP& zFAOh}g|tS3xavUU%YKG6bg&1K59kC`Kp{1ZQHQhHLKJd3qLtA$bt1bXbgtdK9VU%r`62Fn*-Q2Ut4zt>{4{jcZ!;u zm-t3npnU+x@ffbGk@qTFmgKr?!kF`wmko1;eWdb$OQkX+$r<&hT)7mVL7Js7K`-vF zew*>`t88gm`FbK({N9_6deAr;b7F*DZgt5Ww<6dN%bsK2Cz)GO)zwBS8}dhg z$}nepY=^XZbkEi6*PxkYVkja$zHx6m_B73eEJ>@(G;HrY+{D{x^o0wOEma*pBqbq? z2k0r!%yL*D)TddVqtO;wU0g+_#EaFadW*fK$68Qf2+dwj;1~V0t|=i-;2BHy=zd@e zqkSEggckq8!oUmj#|6HxzZj677$E<6GfM(mVhbKEGF^k0H30u`^)fVz@Yb#{!84)XS1)9X~zbg6pwON~aYbYTQ`{R;X)SGkE4jllBuewBtsHC&f^rK;^ zZ=nwus2p@0SJI3hPJB3Pr@m&~i2s`y1!_hI^n<*z8Oyte=9A7^_T8tOPxBHCb$)tc z0bpYNtgqHe%#i}#!pp*e~Nu~h8oGO z7D{x)o6asCZc~I-UYbAY?)Q1my^fZ?F3H~BYx{|tzCH|Gfy*pn03-lXV-*Ah z4QFr(r#z28ZzoY2Qut|YPCbJ~ZKD{0Gh2!sB=54rc0xS<*g@?(CIBswRaoa#0)+Wy z*pPx8d4cXeMi}Fqhw=`Bi1r?Ec;XSPM`3;;Ih~_%GQf~G5##z9Iq0oNDeUaLtRK5V z5vrr+_YER$1DIOTBM`b^tPk;Fz8;GCNET#xC6GESrwm}MdGIIqLvU?aqV`yeJ_ zKxly1$@Kl3n%PlYJX?uM@LkR4I~PZ7h69K2pr5p$-LHXx$e=nC+J44BL>#RzIQWLA$(h8W-`(+eKvepF&KkX;4MPwU?=@URT0gG*26Im+Qk$@%Z1>1$X z_0Ds)Bw$Uf80Ob?*xF;R3Kv?{y}8%u3knI0wGWewMcj%X%V6TcnX7^g#hjE-nZvs0 z@qSCd3`X@1BG?XBb(n2W{kl6Ko=#3y%;K+{w+`{l?z}nodCr&^(FMx#CMKfW;F%f7G7D9?xoveJtS9TQGUF`> zsnbSZ2|;-oroq(8ub#MhwUMdedk!iF=a;HlL{<2 zBpqs_DsxTcI;};s^<9XG30D>R=?8zg{i>ezT1=P97g6m9gUkP?x=C~Y@sWKf=!Cq7 z8#et$Vx)IJoU`viv()Tf6hX=jpQ-Tj7w#5#_?OhA{G{ehhk)9fOkna6S>a3XKpS%>K)>GzD6@a3^XRMTS|O& z`xc=}Wr7G$)*wIuylu>{<9oWxkFLS&{O`;k{2RW(EAvofC--L#n*yqLf~ekvHpSyy z*19&%aS_vE-G?d3VDBdn;u)Ukzw014JFRwTwqnH6B`ch|pq}_VWeoP^Sc}bxr2t7b zCjl}e(;nbF4^+PWO||>PmD^_~HzLg-D@eY!(SysmsJL9ED80hqn{+Ioig9xWooHxK zd)<}HWAH>WT`C5gdFzNsk@Bg**!~v@X4y};yPk|^+SymAbx>USneS22{Wkm#HA;hD zs+P6K&ID`DIYDKbOShEm-J|2scfUD^bSL{Vi@)Huf~QK!rNcQAKJ?m;#6CQ^F*q1W zUR2)Cs}EaPRh3X-&Q-AXr%U@`V7iSKh3VwHT*%;6cL#c8GbtKUHa!Eyqi@kF}MxJxxi3FWDquW z7G!&v8lHA<`SaeP^vc=O-2qu59pi_`<8l|xCq1iI&&_OZp^GvRWcbWA zJ12qjg{H0FzXG z?5&3SLi)bJ+1)}qTHP9*sin=A6!{(#jqVU`p7q#r>?eXgQV&pR^`;qgVwt(x%oJR+ zWv|>Yv1^KSQD8$DOd)HU%NEw_oj6tzO#2cNwueH+<-cZsA{RKF7~ z>-9{-m@54e8`nI>>C?oOOA?dB!xUpETU2a0I+SJP-t*(6ZC$240m25<1GU0>A>Cu2 z)0`*Ev?J8&IYvJMUtXF0eoWDJ4&56dxLGZuf`XgqC0#frR+glxLXk-tV z&Ehk3ByE?|-(s@lPtX@s0e(*TvJRUos*3R=?)`@dl<^|-l4Ku8P3238MjD%SZ-8p;4 zNG$+rc?^Hy+u6ohp&DI^d_D?(K@++A%gaS_R<1PqOb_5R_^k)rM>=IFL)NgkrMI{j z2?WW)p)=lDY)i?^$k08%ilYe6qAz8+FR`23V7r_dS%GiTTi%)Nr+DmZsVAQu9)0*E zRh-}NFYyLZc)i%bP`9x6Qz7+SHb~hcp!3Qs8FBwp$a*w(1g_j)N*dcyG*brDM`=uVS%OI*qm!bQJDXp!Ulb)!Py!+e<8jZGMa6Bcce_6O;F zG>MJH)8l0m&)bqZM2QGH>~;3Me)tWHF=UXNXw@f|-3m7_!RH5luZ6l|beoOgXD?#z znqEUXU2_?_2_cu0-7>t;Ang?BEH~M_&vTO5f$jdEd2W52B2=*kxNA#C)3I7Yyq@@; z!u0p5a65A8P)6yab=`#qlMLb`tooyXt$;<`*w){J(_Ip2X<$$AOJ|+>NuZi;f-X`y zbofeZ0hdUWhN;Yd`sn>2QBeULA=C9(#N8xsX49Zl)Iz6`+-k;!O4N#!M}b1PtR|tT z>arV-;qy|9W7J4G4}nJ!uqg4nY>p_K{&4@xbi&G$sPA(ec)5ParYVE{?<;-xlO9v- zt{kxuUFtGk3CIuNj_!9N);@%c;oJwG*1gADrD_n*4{zY3j% z4Ex;gq$?}+S(%Iyo98JO$9gy1Viu+Crc|dXY5Ysyhke<2v6;-zS?yB|9JGs)LYME4 z4)^Ud4P>;MI5|HpK8|P=R}hj(rsI_}+ICRuX9Lcopi2ubS0{H%t-JX9>>kAhMOP5* zHMfO?#4t4A_b_4=-Fh+F@_5^{AofZUOa?n;U1@9EubQww0fs<-wzoXl&`ZbT<(TaP zq-`n%<;U2LEghv0t<#a8WxU z(}^M~$or=BnDet7HY9;gq<G(~RMbft7XUiQ-v%RsNhZD`C}qH3wQFMch?Mov!EC z>{or9iM3_1*K9NbfvogHj)kmr&`}X|Vs$wbc4fUSIpc+rhNg`1(2V>A2P_sV57`@~ zNUD6Etf=*dA|p8M0r|~L1B(eP14mU=w=vJ2Ubp^2?Mqsp=t|Mhj1*!fzUqWR4guco z5)T*rd96W$sXjFD`70~+!6tM8*Ces;$?^yn8p7cinoB%cRvYIQYfEa=BRy0Hg-yPH ze#<@GRW}uh5Z_Q_D@+EzhaBIoXdQ3pdEroLVR&@`Q$AD;jzTw7T&4o8IoweQ#30eS8Q9-Zi|sM z=Q~N2_u9zuuO@%JS5wv2F%<29J+LM?JQV0_vWPvHK%uRq6A6(Ol&*9)isBvog!ahn zA^#K?9sWP|JJkX#Zs>oMU zBi2tuT!JKG$l_VG1r=XzAtRRV^f@BuM|Xp+9P9qxgwb_#{i=$WB5Dhi1$tZSD96cn zhjZGp&DoY!XtGGtR^Q?A_^1BQnx4nBa=6J#t(T4f@iY91Bmvo@vZ4T(WgEe}XJ%^R zW3miY8eno00Xt6ai}xtYwI5gw_>A#?TKqqsf_4K6Ao;u?D)NB*<{v7*10&1xUX-eC zA=3rBkNDd=>eQdj37((mjysubt97|-zHQR z_sgZ~9j8F9Z<7!i_c2*_Ak{)3X1kL9?5%*?P0j7R*%+%KCCX2v_5#_b`V#>*O*Gf! zU3yxw<-tjJ8#001sZhtC0Z7ch;4DlxhhcqT2jIjm0lHx1MA}jf(`P>TbL`fo#%DD`3n0cfSZ z0%hR8{eDR3bw`0Dhe^7++faMB3VN(;MeKNogxOU~Wp}Qg^^{?1^X`1i<+;{MH(qVv z3U`t~_J+Yn3M806X}1MlQmwy*tt9=b4<@kC5+%K1Ic@HOV@0cEh(~3x-^YH0hgT>4 zU`+mUAi#e{Zkr2RTc#^_Eo?d+0l48odoeaUUQb>OF=upg1EazOJ-zSx$^S{^4wzrl z#2|^vHu(uIyAj#M-?Cx7<9#RTY}IqTnHkY)7wGHG&f3aUXYQYo@%ZW~-wVWzLOo!l zv59zh@3&+#nKgHGZuAqQyTcR`C}dG;negm`uF3ig355NN4!&$bSzgTTFE9sfM9r{$ z9*J_0s-T-dJ2(R-Js61SH!YCvvuGZ!!e;Vg;hLjbcF?xxQRlWjM(ZSMPPa;EG|m;O z-qWEjtTA!>qoKg_Yw(efa%pviBD2~gLsa_XNql^WVrA2c|8Y$w6!puy&q^1-unI*} z?oT-2BbN%wA1^?RNH>UNWyV~!VTDSgMP`jYZ|(v zZ_f>8rUA~_1Jmyse96&vA!p>!I}@89;eo&YMiKp$L!lq`0tH;OWxmn+J6KEHh< zy&_OTCCD%Nb9I_JWO`*$M;`?o*t8!(vi*)xuRj`Lb%;`gJ)WHPVh<&0ijK&XLxir0X!Z>Sd7vZmi2me~9C@5Jd^;^C3-mUZX?DlaOaE(nA zeKc3MhvA%xNf^our|p+widx1j!n3&k@jFeGVTSpk{e2p_ko|ve0snhiVEg*dHt_3* z@29j+;}V$|H}Z>z20Vx=qjTOeo~jKHa$_ajvxqOV(n!KVSPpY>&S)fNIT zjQGd3GW~2U{Uz&D0o2(VN=hgY^GO%>f|7j3atozij`#5WG$ZIsDuc1M2q2D%S)o%J zksh%EN(dLe?BavkiHidv8A+|*dwQjAE!jnmE<&z9ynU-rwGWVI0|YN)$;#uC*F8Af zUKsXQY@u7h7rsrbwmt0Kf_J#EYXeFVImgLGOcuYZ5b&*}hq_4qT8?k4=(hnruu(N; zX}byC>Dz&nFS zlDH6N=9Lzx-8>hJ;q7Ke8j#Mj)El~(Cy^ys+*;QBC{Y<}gH?F4fRcaYwoUl~_K1+a zIW+X#!m;CTPwi23+rn)gd1Z}O?S*xE1OrQX!xer(FV-_ID7NAWMq z0)_DUe-9fN{nX^eeE%0)@7P#p*llgE*l3dmjg!We0_v`fo<~7eb#yG~FLq{o}{N%Ck(@t!lnwd%?I^AVS@#xWR7yStuy`ovp!t{)B zIV}?Kp`n=a+n-l^i($mWl{+Z%Wq;D}W(RYE)r@8gY`{{S7dg@3GYS;!dr5??r5z(c z_pKD{FpSo0h8vkmIENT6Z(F9UVfX~)o_&G=%)G!sNUWpB>-qrV?)#%%#TVb>V^r@f z+G$^hKU;n7F$&0|B&+F%W-Kh_Zao^V^{j6OiKEXSR7VJ{plZ{XoeE#wU81Vd$NiwO zd5f|vAYE$Krf(v)37CBBFV2-bh3P}tnol3yKv?&N0Ae`d{e*XbD~u8C4ORb#f5{JV*R-j93$W`(=pcUzq@m$&9l zuV|hw1KC$MKCdU+lw%%prG5SwfRH#K;I!I+QKAgQrJ0V(8bi$ef0Z@0ji2k8^={tP zJRDZh)Z+B!G4r)Ul40t@ryXR~=Ez}~oXXvbrkHA_%iSl%R)I{3Mj#7>D$!^z5w z&kR#L^;0nCsGqz@yIlF-JxwuQHDbuVU*3gE(|1PRA!`z zc~0KUC8**8(`a!BcDqYk)SNjV%G5E9_BnfJdmJ*-Hk)p%>{hMnu57-b9F*mwevE`X-45PPI>2g4wv&BVosHR`&a`m z{nCM+F5%io908MCDqAn5#>O3KGK0oyC6D!U^@lFV%|9hz<}uurq5otOeK&dFNLiBZVT^7r=Y@gjVxIP7N;ie5MG=YQTpUD-tcvjDM-bj$S{5c^rM&a0}% z*ktm$vw<|>M^K;mj9;$zh_;38)z9PnRC|ejKS^EhfYC*Cf5ZsEr}L!6{+Zbm;M1MrecTD}a2fGs11}dmkV-=5?fGWE{~>9B+t5(p zDiXYNs5^xNgD)#a-*UF|)4h)}{9;XXD_;kD3wZ6+L!{>=a~bKsl|vSLSk9JK=J}+F z!))q5zqqL7WQDiZn!d2Hgj7Eca_Q12b(ET&aWYoXPB2m9sUJspVt8$PD@mn5xappG zdZdG|c32G+y2uhJXUM9^8B?0J-mGx;ym{Hw@cgW5-(Tv-Fqq}BVOpA9?w0bWzzM#n z^WU)yC(RY>1*iF0vtun#Rz^cj1ptgw2Nq)W3i9h#0&@$7t?;AQ(6{A2m-q{AY&d%8jo9Q~ zQ**UCvzg1e+U1@zKaclwC~6z{@0^W>WDH<=NEz*@3{k;)Ta}Ng zK-+0ncws>OdHwR^@Q51bzWt(m^Qr?8j>%)HW6rmdg=i)L>f%0f_>Uv9~z4VxqP=xwL7- zY8{x~2U`65XPY;{MmmaXAu2=Lc-OM9MokpvT`EPC?Ddr)=;&R*CZtcNkztF!ETpjdk+$vZCGAQnCJA3yP0aS-wrv$pJI`6ty&wsr4RY=7@! zD`-j`Zb#{EXT+6K|HN}n=eUn{1G?-@&5pS}@E1O+Prpu=iW6LQdjIFJTkt=J-OW4V zD>Qo!N&Shy>jebV_svTke>dfVeHBa`ZjWUE+FHvC2B_NE(^@=UW-L_ed{0w4OJC&g zU*WjzgM)W_+rxuHO_=ywpxziI4)(t%6ma6lN73zn!Q`oMyy_9@zO#NAb)}bWLpDzh zwuAU)4&t+5kkP2YFy@j`GjcOv&}#=#=FHO>rWJ7L*fqO_Kbl`XCa%Z8S3=!Xt?sU+ zq4s8w*w6PxsK$=ES!eciA6N*8`;UvL2xTFiP+Q zfR^aUDed2Hsx4_SzsPq2TsBgzyXq}Yk&{)E9cM_cq#gqRc-TM^DsS>kNB`SUrIq`` zBPkA-4MY&PO(al>3w5UdU?vJy_h!n}o}Q2AYOh}nk}XCe1MTnr2+A`=ebZ7q%aOZd znHS|aXeG)J%H(lay3-Uc0-6!Df%UA_fN94~VcW8;lt`}^IL`E%)e-w>Bx91H<9q3M zf7X?N+tTTCy~;ma*=Hl<@dDn372>-+9D3O?=jb=ff=*uh)d*SDBhkUie*Psru&o}v z$gYZLjbSO-TE@qd(^Xx5T*25Q@Y}e392kxoDlP5Id>SA0gH0(5Df2%D_5DG6BmLe5bm<#nLpSb-@6d#CJh_;<%5Z)B> zmTch%*#X>akW1L;>1Aail-+Q3P_|*cNC2$Rn3i^GAue_7f`zrx?6K*fcGUR@yAOiu zCv*hxIy#OIkqrL2XrySp-IT$5Hy#^dZ<&Xvet9=S1U0xcM2>vg;_*DVGF2JDjIAkc z>6Kb;w1ov3y}u{5MyAGEx^#|MyxIm$>`nfF=gJPD5Y2)DuN2s@L0>=$!59JjTzxm7 zM%Qt`>}&#NVJZ1z_w@qLo5y8Uv=7A{=J8mJ;`_@5;2y*7OyXK{BFem1!5{8>27CT* zKnb!i>33rlu@aN;eAnuK`se`>C!)3;`DzR5C0=B6i=r%1OfTOf7$f9v@F&OUNh|Lx zuoz~8jT)X~SIc}(NnAI~I;AX&Nb`E%cfUen%e|-GH*mgk!nM2qQp+mp7j0YZ+IQC% zrLs<{nQ8C>6o}cz_|VrE+N=u4Q+TE=sY_`EwXAI`a*^1Uurqxv^jk~@El0r!G0uPd zakwMlZ{EQmmz#eh)rERrJ;QXr*|2jd# zj9||2uKFJ^eNahZ1v?dS#&-8iGW*AOroVJt*SxQrp3UTyeMa2#wIM%XDLvnoY32Py z&Z^1P42mQgn@I(!N9YsaWup|B`FgG$K&UdZG0h<5@G+m_Nz>tgE5Mdh z2Izz;muvvNAGEu(P(+kn4t?V;BmhqYs&2qcNThIn@Vtb$bN{LF) zO1v}Z20-J802p;Fr=Z6%A%dE_=3b*@DlIUQL(dGe2m_9lV z?lHC!qT!plmT-LdYI|SnL~TzyXAYocEDA&lpA3YO_aIE&A>^f_%9!T;NMLu9*G(67lx>%x&&97S}1Y)HZ=A{(k~+l9&_ zz54BKKCeoj)q8epmaHf9El|6;)7!N`We>s-0$h686&jJpaoeV<;2x>r>|*fD?;NN6 zsV}~~#grzvNN}~mrW^@iKiCRmJ?voi)sFcpsn$sUHuSHJ`~`!@AjTcK^v6hy+E4tV z_dkLZuIdJUgW`p zlwO<2B*;M z4l5fQ*6n!Z3t%#VFDPK!niVHxzr6@NXI`(sFRMAlGo7cNz1ktvKZoXr=z4wLYo%>) z(WQ*2AyidWY5ow44=1%S&J?6;&sQ~(NkxOHE<=3SP5qYzFvKxGi=>YjLyr8m6mV z@rq+luUYJ6sHrlP_6hZi%QuRe5g*vnXKWk1cue5YJKCl1UwF;5uvLUL!25M`f{yzu8|pSg>Bu}w^rlP-!9v`+6C1R z8{?$|ui~p3J3f^>owp4dJEP8d zm&fMEnb^a*6(Mfhk6&8A0dZv5Q-b%U_qW`i-Dzd}3bd3%>f|HIW4~27%p*u_={$YC zWVm*Ue9_l7buM*%nHSsmG=o|;N%Y>yw?_!))~8LZ2sSLxl7CnG*=I0Qq0CMqISDXVgwdWz!$#W%x%9zRRt~zZUa-1;Dy3R$NvGe|5S;Z1Omu#FY;w8|x~OS*%0;(7 zz8}`OTj#I8HNr4xzIiX_%kp*)6JK>1`I4jQJas?P*E}Et8)N+*c2UPu9x6~Jh)Jj* zuxuce5PVB!J2CBA|4)AF5|q~OcgYkp>*Dmg*4T|n@GdN2AhyVI0yN^}vIHZfCEaYH z<94{~!g$v^ML3MWPja>o&Z;`gg*cmxldR28!$EcC(*P&XHk=fHtO19b$j#vCk=GHH z90)!kHb1ui-v|``pAl%D5Re(xD+1oxxkX|v4_nEcWl&__Ci;T5z%dmNVpgFNl@6UR zk-LiPSY%TfSxV`sZ;sV*_GNE7md8j_1St~9*HSAOOrFy<>_!dM17Bw!?!!OE_80-5 z&J8ZR`(B&zqsTEMKSAdTDBbnkuHaihG#OgK(XqZ&w0_Z&U?q^eVov%!K+XGz0f57q z&sM1j|6L+^j*yZK!~sJg=#lKX5yuKs=@2Ehn?OjZ`gWW)T&C2tHXA0Ff{LaKR-;&Q zg@^^^RnWH24=~vB#QVLJLVii+A%M?2E)EVw$&o{GFX*PIt*tLT`OfF2w^4mMy%`t8 zf{;9B_{VuWdDiCpRAe9x=UrDxNZ%V4OySAk(giDu z;~_PZ`whOv`-y-Jk1H>0KBLpV8)QaG9#mxaUX1I^UX*t&!{QNd7TUW`g51-5GJCj^ z5X5XF>XdwL^Wz>?0M&#UD^iPY03zty;I29gzR_UPaw7XjB6Y~-^{k#=BoXY?x4VZU z2clGY(o0RSz_|Dk*y4B%zdpUzZG5X*-Bf0S9NNF`Z7xPvLvU&b( z{s4|g?MLntDU!A!(Yk#$L=hceiIka^gQ zh7E}F51b-^?vr7y>)UBiRE`yeIfS3JYP`TM^B3~nCiT1^{m!+qIH~0Yy9@ZeXV-UO zjl_E7Y`3hkb$DQsXD93S*#lVh3iVP z?R7=x(QPqK$0)5U3}O1t)`Ez#`Z?-8GwzT>YDbBW)$OoBTQkf~8U9dlUoOrr)WFEx z!#$hZ3HYhyis}8RUG360vn6|7PJ>|tZ7_H5)SJ?AAx3XTWCx86Eh$$1V6DvhLA8sV zi9>Z=!Hh)H-gP9M;k#=i#uO%5gFbBj)VrdOV;!(m+;eMqY=W~!D?2>{y^ozioBpu* zhppU63VUu#g>vt^+RXzNI2rNA`}_MPVU)54ywr^PM%r`(Cpo@LQN^dhLM_!bxwgIZ zhqD*O4j7o+C``p5mGQm||_%q+p% zBFaslug|Ta>bgi-TMN9tXP4{6(&<345XVbtSm{}NA&osW9X5~F>d!LQ;hFdC0$I+D zbQpXuuh05quHx02k@5$792%vA6D7sD@u&)2&#P*cg_#Mvm5_K||JFA*e)nSiH>>|U z`oWV{ou$Yzx`JCH%IV$;Rek*XkF#mR z81AX-AypgTa~F~f>AH5S1MTG=oZ{k2ZteCYq1Eb@Xy3{MDA@LMFf_D4!wK25Hx9DAw#wLk3lW*#|qrxd(5NmM)iH{rh1HX zkTe;Xb*;#nVP06|4q!it7g%|&_5OF>t)QE7H4t3 ztPnH$85Kv}=V@?pX++ES9LzIMx<nM9% zbvl2{%V%_4%$lFto(Mr*VwQyOWjYJ(xW5- z3*XHrkk0+sDbCEhfIJ#91~`dvQ?(!jdBoyunl7$CB;d?;O<=Pl3PV$ahZ?;!QE73Q zwT)-c=%;#maa5YhdMf4vTD;{QMUFfn9zt*~?OoWACp!nSAJg2^A_>$a+2A!Im5ebz zj+ja*hHD$s8Tz9N-yVu-}2H9(tr!nV()9-$((jR(=(wrBKYqmQYD6&{2!*T`@4szp|5t#Nj^8E zy;J>_pNpauoo-NKYqn^{Me=nHRRXSbOF0(1XX}L-M?Dkitb;*`T+cuRp1UW0#K(Si z*GfHE28S6~H)^AdSp#$FjK?euVVzu+`Gne;5|mt@j>z=ttNzN@!~dI!RHRpZDw+C( zF@p`x$dRZSQQ2F0m(g6KO+Ug~4xkymBaVpg-Y#g_%};+f<~;+Q;Kx~6DkbekgeoJn zFvO+-%HC>5Ub^k!<9)9y=b^{nxKEeJJE9AT*0QXjIXo4c_Z@l3AcQ!Qk;da5CMvb} z(0Sz~Sx#5#mu0;sTWvT;Pf177HzMIcVPgW{6 zz0KnTPxgqGrD~Ax4v^lz5A6@*3MdB$Cmk0LqV3oP3^fS`kkb$ zPDO@jkA-PZ&8nd6@HtTndPurgMXt9MyZaPXy(ITN-3Yo{;gl8=gW_0`Ug^$$!YYUuXZ+?lZ z(k+rvWSS#g%8EfZ4(xS(!~8tduYC?~|D3X$xDL}Rk8ao&uw1EC1D4?8j<5tBsi4&> zBC9ocH8t$7&zjWJSEiSYJ7f~Jv=}w`%<7hwjWyQf@QnIdX}mY*D7WJFJZ(euigN8S zf4sXwxpZCcjbj2bPB^=R=u}L>W#xU zavU2q15xBFkds3(lAV5oi7!Mp_9dEmjDmG6*BtG9xEsBJy@t@E60FOYmup)yRP<|r zJH8y#*y^*h#)$?+wx0tSLO{r@H%86^G{_}7V-?{59hceJ22yzvkdhAGnlcTR)X zqh`BW{~W5<>&eP}6*2UrRIj}<(T1bCXo@UfcWs1CZzH9`a~tzol7mUph|zBQ(E$xW z)9vB7i*7GGq(ke?m#K~KK)EPT_keL~{$gZQW`&egmD1UTrFA@F-fqPFS6JomqqGzD z${P2=HWu@D#~jbQj#k=s-|&`S)>vhbT}Qt_T7#YR@3lHFSDwY1OzMBfdGDDoURc@J zLYpY1M|9H`uzLduhjvBpcP;P&^#3>`RHMvcNSOOX&|CA1^M*te zfZ!~zm}0(vqnP_hz~sP@^P8;D8hI((PofBU8mSG z%P<|T{b2s?55WYe$jQNfU=nijOH(-%tha`PvHEAOW{@G5M7~LP4icmj>U@*#hFIgD zvJpK#l}PT2`?fEU1!rf;*^+=RcR5YI=qxG#ovN&X5PxY8kewFP>NL%rk0`H6VKhmK zhu4&)ORY7b_)3`4XnppoENXeM7?69v{6c_mrTMsG4ZMHBwYvPU1ecBCRI1QNhkBP5Sz)s z3ic?mO^inQb=^u6yKuC0wBQ(UtFY0+Y_`8KZ|Onf=EyJbIAeCZcd`5|grM7_J%Puo z68CBcFEIWWgL%P_CHZqJxXU_UT6>pk4~~K!$>!xyz^URig=akk-~?$6Rt+91Uzq^W zUgj0^Ri+Z&@A;F803!1>OUGkhX^viiib|iJXQr8=q8*f$qdpWOMM5}p-i`b^sx(P` z(tELV)i_G?ec=SIjeuDwDM-A2m(>+X-k!!p21Di0*ZoOF64Ap#vJ7W zP^P!m;X1VPpYHi6b!4QG>+ZO4j-W^!0cQ#7_35lU634tY@=Gv znRkb#1@g$gYXZ)fSfm29Q!{}>E9`cS;78wtg3u^ti%eAI4%^1q&X;U>0UHQ|@pCi7 zuAB9ZT6Vi);R3=+5bb?`PZ*QHfGJ=l%addw(>~Zs!DTSH%5rvXwFaMcuU6mWmhJWSt3Ki!Xa~>z!1CRWDPAfoq!zqW#795aM9w9Hiv5!w- z$N+Qx&i;XD%DR%}!j+c7%esmNd>r;uSezp}BjE)h%nWuobixVgK=*^^#;Cx@o<-(H zUBE-xJVu7vS&SAKaTv+{C}0VzMA1S$^DoF!R9+jM5)yZ?^slBJL@8Lq>CdvQ_C3s8 z3f_=?!D*tXt6$o4U?b`*Dm#am9CY?8^Jeb_a>^Y-e{5U7=+YI)6efi<|H2M^{Yp3D zq)BxbhK+GR^f+$^y^EKKUcC#`2u&!Hw`m$qHfj*5cf_rTD+b^xxyEgob;#-f{IiHM z!vSgQ=8DgsHECtqTEG`~df4sEH1A6*J!mM|#^7Mrj;ukyOFjq&4v2q$phX1iV>Ttb z)*0@dV|YHVW|Hkd$7cBfQVWT7rN{}G24S9j!QJGN{Iu9X%X6GRqBZveEk1wMf4-?c zrr=O=uT;J0?48V`B#Mtr{JVu|HMkGaSw68{&OvGJO4@WIm78f)3T85gw1^M-Q6`^* zn!bqVBDDA`9Fj{i)XaEnL2M3f@-vC5;fu$)lSw|-dHTOswy(fUNCYJQ^SOB1&T%nu z#znS!cg%s^3#X%moV}RFi#rU^Ga!t6kwn&R9_}%|EJ7xnG1E_siaU@9A0}F!!x*XT zV$9EnrVqZFVZa>Q!Sm&X@y0TW(YmwUDrjC+na9Na%)HoQ_g^(7 z#2?a2|5cs>iXSN?5|XXbr^lUh#7J{?8w*FAQ+|=i{5SDF6uBG?*;9&r$ln+;BRDo! zBa-27S3Sj)vfrk~Ozhtegp9zpmfK(9r0*{bauDDFSWK@vo4zvpH;sB3=K_DSKyYM> zieXP8{MT&?K`Gpp-$MYB0e;EN?50*1Uv%J+0Spp%ZJRDi{-c5GA577BUjni8<& zoC{qgtIT!#Ne}55Q+K@5J{XuSeQ3gw9t3v%ch^OdQuJ8^w>jEQd_IpoK5%W~UuuQ< z5hT!NkTK37+lg|MXoQJ2o1aIk<%Tbes=V%Hq1lC1_Xnh%i=j0SJG%Ngd*Z-}G4Vf2+Ir2G(7O4LT;b8(0_ zu+mmi)v6o_U0NAX8*6TOPE&k3TwK|(ox5n1%^L)2Mv1${T5k$VZAKC-**f}We{5`N zx9L(-SyDVZTM_h8Cyracs(bu|*s?XM7lsc@Df`XQv^^jB;H-sZUz1a_*5T6I3lrbZ zez*SWgIpf)!uqb%imrbihDPLg-XAYOZ=kDjs-1s1tg|W4b4QBHD#0)!1rx>tcJxn0 z-dby+y!J)sm8WA{q*F$d1Epg(lFjvlBbC#JCfhtQctnn<;EVL>fwKvNFSmY!w?g<1 zA5{840LYr#WYC?xnIOQ*;+LLufTo0LB7^=aZ=NO^0x~sOC*~(tPwxdf@X(Xs7{M6D zvVHpjB~kX(AjvaUB4>e`VoT?z}&N*;+? zHhQEtil2ASNM_tVD~c9cjWtCp4YOX{5Q9O6VVr^fLQqhyuGFc!up^<^H4>FHwPSB$ zw4IZ{!0!;ey_^m5iDagmZGJ4UpRJtQnUd^~ar^vcd{B+(!S2PPzb%XBYAde}mmw|Q zoI29J+T?$S$$o#|-*7!@jtQqGo+*usixqzrGLImDTuV|=a2y?yXe{}|{J|HK?4ECQ z`>u@N zSsivwR!~?;dBPHLAki5SJKKrxl*<0>g?bGbr^NhjQZVH^_Fo}(!y1m~=@fY+?U&g_o z+AK{}`UwAzPapCyANK(tK3j<#E0;Ah*7m#TA)z6eTiF)E&Ril~kqkN=-9_e2v(=;c zVdj4VNL%AcAQbD)WzwxdUv)|S!xt4GXO9f^hWWr_87@bJrzTj(3_$XhHZBpu{!b$& z+F6_-+qDoUtk8Q`9oD)KfGJ+Vk>*MY$gENc*EG}H@3G73 z&i%B%kh@D5Ul+_A2KSIA>^l|wv0?&%W(0k7zTQC+tLm;p>p>lm5}UD9wU3UWd7dW~ zo`pJ}ln5~-P6JC{%>(5$vh0jdg7O^YS%(9912+Reye?@o2b1ey5K2a|(c>z#i6wq@ z$X&FJ?`fp_!C0c`YwFo`u3nWlWep7@IJh70>EtI`@cM$w1I$L$$wC;3;7ugbNL>RU z+7~tg)V?4b?$gw~OjMy^LeibWd84mo zJkd9)m*jD1R~`%WZ%KiszS)?~8B32##it?e*J5nt)RBJ6NLnc}$X-#brE5l8sonnb zNtUD{5Y&XajbA7 ziC^q?K2`Jfuu^zk0+V3&!;g)eHkhjrQef{z>w>VR_j0NB3>svE@}|;}o=m+@)GQh7 zZC!^cCK+P0B((ekwqWeyV_xm-Zt^|tbtg~etCWT?%cRTCc>9eCGuSG3Xg=}Vc9E>~ zFlJ2o_w58OLic7pRqlqTte{4#{dHATOSfyQTDAV-7pSp=o&_od1w_cA(wjA+_=Sb4 zLDvfR5ko-bqpkGPgg00!Z^w)y!k%|8G|0jLM-QRX0h#A+D&BgvbuqX$yv(vkSyf-; z%4fWxVZhYNb|sP~OI8t2`5?5tsb+v`q);FkRg7|U zVXoV(s4&qflEo-3n2s-6bnE9KjiDNf^s8d8b4ZsCSVapTJl82)zWNvOB>yF#iFR2g z<0T*|di&eo*=Z-(v+>+^w+1%=EM|+OJ`CUJ=3X(#_SCcg9+{4(Ap z$LGWJ?E7Z)<{R&eWRB_@dVR-k+oE(kZlJEvobtaZBScd3M$qx}hAmHt5Y|anKH{+v5a}6QjOopDCSpIqn#dT4!mDcX0tYL>~t=dkocZEiDcux8~+`0$lu`J&TCMBfQE>k2-%bE zbdvAuod*`0k|$!c0|54oXfI1=hZp4Cr{!7*dB=nc%x+D6kq=>i ze(QXQOy6_irfz3T2^gu&sGA_JOSRoUy+S3xIL>#sAAX*+35Fa~2iQ1WK}BLovv>N+ z1buv~T(I3s6ikyq1WADA{GXj{=2MVdhu!PravX;X_l}R_qtMh+3UaZ)w84j_YOWKM zcqGxFA+bdh?ELGm1EFLb!Emy*Af8y*;j3k~ zFa~;Fg7<(=GR4Q!JLT5CFm9xjhXCb<{kCbB{8E{C-8Wxh08dR)qQUqW8&UVk)$;3i z(w?27u5Z8e;mDdgez3pXcU-Tktqz&&!8ndJ0t3>#N72ASnbSCG+>4C>^ARROLt>(X z-Et$I30Y}e*3f(P)PFr*^)Z(maYEhF1UiNd8$}>t_s6FF?tw#Lpa~G`1_b||fCMmQ zGftnK7v*N2X37JIE>iSVgJ zOGz)|I)1<{%s%nBqjaqz+Was|dHlW$PH(r_Hhyy2Vm;PJrCPeR{Nra0w%Hc;*mTpI z>I_yRuf2hEou!;XL^CC5)p;eiCwt%<`8UTQ3Fb!=@KxS(mj=sou~Dj$di=I}%!z*j zpW#``5+u8k!1j=TAaQW(lC~W_t+y!pCZO(w_^2gM&U}MJW&~^wwzSj(RspMDa<;N} z<4K6Qs%iVa6FvD-ve&`H>4$)l6w?57S$_GV>&3?TLm_J8JH)1*t#~^B==J*H^g-Wu zr|xK9OReN}F?WM56%*po93#mpu3`eYH{HfT(UGXuJx9#V3Ig9=GFrY{(%lF8m;V>= z?%F2uYo+(5-;FLh#^QdS4O7QvWX+uV`{iL+I0^5JC35Y&m|mQ->1E_xX9&I) zfxhK81x*E1+l7EED=^W2w9aR&lp1u|yp0`fa}b;4;DnF-b`s$vV|-%3uuwrYU}&6L zW5o|!o}t&<-+AQ!4>iJ`;(u~tC_#!>v-V38S@}MNNJ!ue3}6`cKSS7+se;N@uz}-$ z5N7@U0lL5i4ei&1Nr3HOEO}sLa7*YaK|I}k0aO6tv_VR;`FGx{&>cz2nA$@3_;mY) zK|SVXuiaPvn#sw{$haLaL)!qxZLU0CJig=Aj~3h&fAq9=8us2(+FY*^Q6_&?okSb& z0AXuQ(T}UOh-)uH28-0jM-4mA<~ux&gxDMQZX)p54%Bh7?$xOYiwVUtdqjBZN1qo!J1o#NH z-VGb1P1CcDjdP&sjSyf*tL*{bL&vEBcYq1#zQ04#?>d5DnCw}_$zteB0NBggD3{_Y zyVMouARdbmMWWZCH?Q>#}RuZ&ci2GVv7K7D3BnKlfS%d&~_M_%XJP z@9R*-INq1IJZ#ZEs@rgwLa{C{q=NXlD_1(ami4Uf10I)nlRszpZthXk72w3!g^I1B zS1%g0t^5d@bMyPTG*8Vd0q&M+f!K!g!%xGt)&pm`|JKasq2m7mlV#Fe153kMOvr?o zazHhfWc}PM3NsL!^e$?}v{aZ3F9&#y+QiW|*s%#lcv6X){Fxicqv729{xB^>}u#zFtU&*$UGZv3r}7%OcPncFuJuV>fQ#PcdnKnC(*J zGyU8x^_Knae;aE9?pkOY?UW~WQ{SPWfKi8NoXWr9IPRV?slmLHzWs`4+wL;K-r3uw z4Jdfci|!5;^p#g&?Z%kQfDb)A$05RYFV(Ii9ym2a<3uZ~Mf$i}`3X1>J6*aiqvbn< z0=OT}`tITR_4Y+gS7p{!thPlXS;uznN&j7M4_H{6w&Q>8r~lDE%9Zk9UHCb;);ALV ziv@o>4IB~n&LL0bK2C-Pkk$2JmQy8eX2q0W4FmK z!8k_Gw;k4phwvzB^Zo;XpIS|BAferh!@tJ!1U^M%J1Mw`XLHo%G zRbjP(iIt$GBjlMhk$P+~m_FY?BCDmGnE=)d<_9t2<|%{xv!h$z^1pBY&*~f!_&>+F z7z$|qZz183nh$4?mjHberG7zZZ#&H}aS}jr5j|6FwQ(*c$K;wsip^^E0>&86`n1c? zEKPTxs_ujVTk2Z14fTjWTgi98j*R*)@YF=~rW<_5`oNz=kGb0GewDwGhxZ#uIAv`#lQ;5Hdk#K(#f9IYM^6AG``Lss zT4KS*+Nrj~r-ZiW_xEg84?Lyi6%B=%s}M(!uMCcHjm>NahhG2t{Af-i z5m0~ezvU~zb~YV(fmC>ShC6?eR>I4Y62IFYw&Y5Gmk8!qWh@x7kD;v4M%Q(gVG|h( z;l@O<0ZcOBj>O>!h4c{mVcJFSI$+XD=NP3Tu~p`-?RUr8z8BMQznsxi-Su7oM`x7M zLaZQBhUCd^08L*tchFZ>wJ5$X)UNkG_}%6NZVzQ@v6aZw>l`8zBmdcw?wWE{(L^n_ zutBdk7^KUjvw_L8BiqPU+N`&X!UGV(!@|ls8%JAOTDMFJWf{N0?pFNFp_6XAf!(Yx z-PrKic!r^EJtSzJu7OBjh^m$_O+^jH0CN!rEUzHSX63d?+)^B3tzCSZ`Bp$zbl#^< z_A;oqX6`asUMMRNoq$(EdYEQ4uXMQU;$6miv1H5H&DG}8x4ecTs`$-?!gkX3w3-#? zLDSQ$*2&y8ZM#obmvw<`lIJJtg!OM5){MS7Fu?4}uo76)aivEptvXZEOYU-%Ce5+4 z%e)HLiUR=O2eiO`-E^JPNQ8aO-5CShf_W*wyE}CPT{*d}{f@L~$SYa8IR`jb>@YDL z8>JtDT9a$+iE~`+yoX>DKhBL$5NtT&2T+DA^SEK{W_4MVo5xN|rXq;@b;Jw0J9%ys z)ZbaBH~Ea#yUkQv&f-TfTGPeO2j#OSghJs$o6{r<2g) z*&Hsc3KwyebKaBx6Z*1;mD*}SnCNmBd}SXV7K3*3Te(?TNc-9?h9xCS`Cas&y^fi< z?*1iB@Gvjgj3Xh9DFv717)Kxl4XmED;#aR*wU+5hcRKc#NrYFqDZ*U_FG+_chPS2p z2`eFcn8J<-m0!*CP^3?!VH0j1Wxt_yRg9PAQ>J~apX#*zcP=AC{g3@FTOQu$Co&FP z-Nov@0FvM8A3-+%ofySw%=ku=g#7I*%K%uH+A%%+W#Oi!Bo!HO8vUF>Q{|oPqyq&- zMN_1Epp?Hr%f1N(DL5kdTmNz>O_~lUWG`azH{GG{tEcNn^$yy7QXX6%_6T|xYv@TI zlNH(8AN9hD9xZKSI6#NKE@@f?ErlfG5q94&LLtrbC|yN;L>d!d2Y2=3t@h#SUTryt z#0gGRAoe4W3@+~gFpMEjeJQYx=Jl&m7N){};!lPnDB5j;^+USg>nV&G#+|%#AfOk7 z0#71m>|T3Z*oM=GL)#Jz4v=(b;I>aY6w@~l5JFLoBX=hNB!=Y_H$xFpnX}ISXcJcF zp+|wEjO+=5d}uz3iR%ei^}o#Xev6yOfK*3@Q#KsNk0#b=)c=B=0J;Wue|(z+k@HO2 z5HD}0Rwa67g8_^m^+^OsCo13igXaFkl3s>I-=e#SW~H9Q0&uX=0+pprmnqWT2seJu zj0T5LWJk$DRQ~o$%qfW}Gl4}d|C5VHNaN5m2ym#~_FE(~8h#kZxLh$fwvOODos&a5 z*mg^Ec){M|?6w5ogI+3okynkmOtSe(DX3d1wIT_iIIKAATK_g-BxKL}mY<*`e?$fg zw?_l0Nk`g)-ZrZ&hW_i|f~g^(^G)Q7F9JrXX8G$;dE_sIah9K@P${i4`610a>LSVo z@abW+oP*jDNv0fh3XFUdb4VFBZu9_rVNB^=v~bc@yZ+$!#q<2?4Tp&7qO12xkQC$c z^=*CkvAR5|Y3w+qc}aCWz1*;Ab!%Me*_DO48o27V4F6peN+qR^E%aq^@;r0|R4o(z9EZLRto8t&J5Bms@E0 z^FESGoVgoisl;kSJNlg8OADG-=I!Wu&pWT9*M;>Y`RLn_+0RGioHfU;x~IZLp3c=5 z_PJ?Yz}c~|C7H6K-VIHp%=dbCeSAd4rq%nP1G>6!n_x}gK zKtR81)?4f9UCscJ!6Z~X{UMBKA^IsvoQ$e>f81Xjd*q`a9Apo@~)|I6EZuJ`cl!$}g_XaLHzZdYFb6c#JxjomWPRgl>}t zCjtyylv!z2)vHg`>-gqh|NKAuY08x6Vm9sCwxz|kV;p<*!Zr4;i;lM9(o);Lr_N>Q z+ZSDIkKBC?vc}+o@wudOvd#N~jF4&ew=aEwOy*B$J_C=d;9x7`8wv{ z9|yhc;d?9}&Uv+mFssh`plW;exph{zuimZZP#=|-n~PBli@PX|^sWkkw9P$#_ zyLls%X_j0w>DQ8CZAz%gbuSx3Me+i+r*%6M^SBk~*4pr3I@a=96o3p5NDvUK89Q$X zXc3~~loENMNjY)9ffWclKG+Yv$HLc49#{O=>2}`NuOezeEP|f!R>jLJ4JwOF04%o* zp6gOp97;eMQYkj+!n02tppLKD0jvV~^avmcK!n?5k!M%^`DhdwTldf{U?4F2aU+5eH6-;>3kQxWXk906Ox14FZ-Z9`VV_Yd4$c^Pv|YIwjDh zPPDv~vMz90XEF;26awljJsppVlzsD(pCTJkM(`r<4FeXX1HvpW?!>!#*!DmRq4(z& zXWE~)0Yb=gUC$~#)SW+Ht7UwI%)9c;6+kIKTYzf}uL|d7&899cctET$)FP5mka81M zMmh?v>JgWCHU;1v@y-Qmb6x)wo@W$*qCnp0#FYW~ke}ovF zA)avnE@dHF47@!fwP(2aRc;O}L#U2+29Ip%s$Q>OIvQzyY z=-+Ex=FZ0)ti;g0{P`W@R;x-0ly`(lUJn{I&wq?g&JN3jPEs*Sj zYNN+CK7KET1dI@Xk{Q$wQU72EuuQyP!==@>tu@`ojjbfl!!~VV86MH91Nvi9AzQ`> zn5~+tD(h;Mik#wn%fB7`^8fp9zRU_E0|1HIcou-OKVj=O>;R0%Tezsid7kIzs{SKy z{?hT}7T~Nl;L4vmB!dXar5y9@2v0Pd zk=|er%wCo|<*Vs-!7qYUlagP2XGT3U9 zX3k|J!=w$+&&A57Lw6GaEGSI!rmHH%Yj^$b^d6zev zN;i)s)raEIxcVknW*X~dFdGCQ2p0h+^zO8CX8qcJa_c)-F_5rb4XxI}%8Jkb=oj|H zy?>%iGHlUze(JKc{5xN8`+2B5Rk-bK9ky{>gB?42Dph>gmVWRvZ%WpG^V6TWGIL`K zhBK9mR+Pk8&E*a8Dj)O$%ILgLzj~hXHzjj^q`m}v3HTCt10`_fH&3UZXSEmos2d

    yG2G}Cgk zwTcb>hOwGa$lmp%JKsRb_)+;1@Fn0&;Gavt2e$vYeEhq<1P+kEt_^FM+dxwzpj@%lhpy*s+wkEpXbwCtZ#0ftUAF#1gXC4su z!Th-USD&_1-uF%PBmfjCOB_n8Jn=#ThEY*^+nZUk4%ZsXzSB^7hM-q*fF59nlqv^E z>wVHEueyYG$_D*O0=#az_lHio;sq~X6sk!j@N|fN?8(hci}uJJgQ!`nCxV2ykyK+y$q z%Sa%wSlsA)+SXZixX^OgG%mn$!5qAzQQY~}%0j(1@zAmwZHkSvr($zBE-tv1k7 zLxIdw;THdYkH)8e50@}k+0c{9>f|9w1joD;AW8LEQt_Z)QpFX#o0Tca&K=4lfT8Xn zn_;&xDDtMv1O|(;W(bf5kg&J-eM!o;BEJh2bCuqao{}_y+O39~kI?x+P^*73Eetb3WeDg)~{xC;*Q#%Fj+C z>{83bEB>ej6Yy41{UGf!=%~gaJ9#u8|BizPlODb63QHTTN2tS4pY*blpu-CDp_D<& z;{9&Gqo%AdjPV7&0f|_mb|0R+-2{iWEAS|tbPnUeFE0O_5MDU|fN=~g;-*iBerIo8 zy_=Lw8b1bL8|o53+Dtr+JEAH3>%;49$*J$Nswxa}#2uiVRE_~wqXZh)Q)YO`vl(ty zS)nDn(+D3~LUogY2eWt=RaLZU;FvSs9M4f#_4xSdSB~}oQ30OnYxyU+IPg>bhT4K_ z6FiIvi*kb&t=|PFbg6A!4bMK^cT0E5s!Oshj?ALuYtfODt*t9&jeEM8>>)QMdEp2i z&NQk_`i>CwD@eErj1!^>QC}eFTp)Q4n>=P!j-H z)2%AudK{xxG};S|&>$ZCeb(2~VSO9+K?$S&Qs6cTpe_c9+Zk6irjV%8B~z_>_H>?6 z^}q;6^9?dN4N?c>rJV){PLWbp@Oaa2t52z_o?z3DIL)P3VTa-FuNpsoRFtpV_pU9w zybeT?P(M$9pz%m$CF4LF9Psn!%%>hx$tWvt^^AHU`V}&k##rgbC3j^cnaDr4^w0#h z>z}>Rax3T42SOo3+lV`3 z-OtgF4X}!X`UuD%!^1n`V4i1hk4M}KBq+3O& z0odj{ps^2~!vWpyTe@A4eHOZ{VQ;5x+QR12sesk@vzj=CZZp`B9EzLj_H_il7oVZrW{z@PG=AStC4_^Yl1pc>5zz4Sfx0bx0 z?^h*(o$J=RNw{z&flfL!$m<_tuTwTqHyD@&RtUlK#&jWK@>ot6;3NQ5%E8E=yqaA& z+?Utf==Jj2T6g`m_;}{l3vJqw=a6=k3uzlpAs{`Z1wUQ@os{iuSuZRE+{!2uORINt^AP}fvD2Lyx0I>M)N5k6m*lP z2u}~8sPRs;lg;l8suTFf10YKYl(;mO;!AcxdH#|vab9l~0q)uxvfq09x-5qcNwuk(r~)!FG9AT8j6CG!u*113B8SRs6u?v9I(LyM z4ag%t0AKJxmn}HtD2On^)uI z|9{zg55T&PD*yjTdhbbk>dlfYce%$sc6uDg=>Y;E9Rh(+76{8`|CG({Qhu;m*yYEv zKb8)G1On+KKzenW;~G10;<$H9R`2EM73u%^o+}vw32Q>wB&&CAy?5WebLY;SIWu$S zd(NDb$2)Qf3RpluQ}IhCN{db#KVh70!+(55X@yM~r(k)31*-z*(U>ww_5ijao)2e7 z0W=2yDDy4eU1zJGc+5^ZeK~b6$6`BXrC1ziWzn9{hz5cNxO@QI&ciA;P8|pH##!S) zHH!)Q1dAiN-a!pu)wa)8+Osp{{nTZz4D@fq_E=$(=ZT zqGjXiqNF&RJ^(rlpK9Dp!OJV3IY_Wr%(?M_2^+XYllo{B|I?-ASThmmFb-&(fX8SV z=m_u)=$tB9U_0vC?BcVJ#_Bz4KTD#t8x|E9u_;F8sXr zSVu#LZGC>Xm6Klq<7fm$#E1o~gXGjJiLdYZ7oK4S_}_M%T;ss-xTc;51M?2?yy`*Q z%JP}_RN&&`s70sKAA|Wy{62a1+`oETHN5yRjR1d9G;zAmOVFXKH)-w365G4;Im&Fd zVDU0y8chGIm*=njf^92rwy`r#qn?2HjN7eE^|rsc!D^?>vJXwX%5G`+8M|}{7|Ol= zy${*UFWzcTe;%co+Jz$?Yh#BFu9h?pGA9<|3QO*BG~cAeHz=7;JnzGQ?P|PY{`D{2 zX;BodK_7!R;jxy6Hrw&!CVYQq+uX(TtOuaG7c2ZlZ@c^z^GIJzQy@)&GzHQWFnjiA zS0W4IT8p_}s}hEL##Uqr##J4HaW!`i2_Tj%&K#k&&qEKtV5^_pYPmx(cKr-lFQ9TG zZV(4>jnc_}Smd?aG)bhy40uLzahI63M~}Xsu~Ta+DP5 zh#?YraBWABYXV{Ghpfis?oVJf7bIygtE*g^!br>-DKOBHoT0T5H+1lUKpNN{Z7rUz z?_WfLJuf~lOe{Etzelv{K07lZj~s$E9N=YU*H-bsaxj>P-9^W+ka9RZll6$;A~3LW z^2Cu_fARN$~2?v;oBYkT_sH9vB|WTZ+S)I?wff{25eq!U2S6)%HGH&RzsPS3UM z@3OAOIDy)Vt%yZ?4uGYA-Ym+a?QpVA)S^CKieLk1BTYV;>uLWdz(7Efq@5sdwSeAE z=DN!BI#j>lK`(`Fc)9$VXhU)r7WEwW_~2h4U=0D@W&<9!M~()B=(GHc9oBiQI|jP)zcpVTGHC;!Z2k(68wt z=YVD+oq*YpAcy!_=y_-pC1Gc49e&^?usy!5sZTO{S; zB@eJa0???St_iF_+gsZKhPP5)`6rj~9icpfxKt>#nkkbAu8A*SDw)KWvMeFxn^^$@ z2ccOed?TNNoVc~50DQGym}>&mZ@u%zBP{W&C;RxvK1sm9HJ(6=fDlA0Da^MsPd~-x z&7W-t4<4|Sm!DxbTz3NjAGg_ogUys7+ZPj}>(UbzxLb_Y_84iKuo%Xg7}PbA)ro`N zddn*wkCl26+eo84M_nlY;`z0{2<%nku!1(dOCfKq1wS`9%r@w=>nE9PDF>XrQ;xU=Rb_>6W0d-!GI zef~i(w>oca>#{wJHF=rsmOnUPJ9ai(VO51yPhE&*JT9ih@6Nw`Lo1DU>YH>*%fTk zXOGxX(pbN6;!>0biBmJkU_>2^?mgjxw1)OOuq}eB9i{b^<{P?|$%MJ7&oen>oGK>go^rUK!Cxhm@0(=T7zWBl+04 z;#Ql#^dj5x(iZrg5XN0?gxl=1;vxl39kPd4zGPJed6ttgU`sFg#F1qArcSeW8-D%ctePsercKlX$p)A3T(Sgu@nSDGgC9K`t+?BK)!~N+d{cJlU$PE zTbA1>HkmWmuYJ*W@rUH=yT`(;n`DIYd?SV)>`3e-Zqn6PEMPxwFLMWT$p|$Cnv0$`<{RgzcBJ*zv%S~ z&U9T*^PHa5OSINg{;WF!>ukwOq}F;u$tjaoA|yxgTVPAx^DV!c{zy|GO@Yx#fi$o^ zI@>*6>(@wu125u=1OP(9f=|GX8Uzc~;VGQSH~8(}oGakRkc1CS^nfk8K4O8Z$y?j0 zeO4CWFp?kd7&Cd&Yb@m-KX&(ZU$SGDU*hkQu%P#Z(5y%B8Qa``$e!77yUm?)0*1w5 ztE{ZCo{lzvVQScF9Y zgz{~$OcnuN6a*kYbC7O!Cr^4}}%1@L=%zuy?n!7q9m3QL=0;j20v$>p-Z z|9mSenrwX`!1nxnK+-ll&>902>a)_wPO3!U#(^H>uZU&lmt$esWiPT7@HY>=fQ77p zR?$PiFCdgaAikF_8E@yGyU3=^zsTD{77-BAg-8Ktvi6tj09jVba$Cg|05;H%4{z!< zkmz@-PL(^HodXzDj)g}70w+i(srbKa>}&JKe8%#3SVvb!@G5|4Sdr$!Vw=~7GiA{XO`&}SHp^Wm~E5X{`2a%^-lgI^4UNmGS`B_$&ld^GD#a2@@ z1|RwrcJ5`Lc^U0q%a5x*@hN-$*|j!v#yC6S*ke#AVf{j&&+4(2SRNO!D9N)Qe(&e@ z>)&Dt4gcg}m8)%&_8!W@ zt%O!HHA0m%RrqrmOw(^`SN=M_e9jap*w6%edV@m>x;H%jsllT zsUd}j;5ib|*R)eX3mox953Vy}u`X-NKH9-H0mRx_;EOPx(SI#Sa$tw}V7EnSgNr}# z`nx7recfd~=Fq_KxE6uIm^+x6N1vn&L+$q5+5@(`vCGC}ciQP^ovNV?U*!2%vXeH^ z$#3cef%WHrJ$$TsQ&IhQW8-){(+)MUefO!&xZFwNlak=OED|+XMgW})u)fbF0H~BC z@BaKx@nN}_=c3lKZ@VRW+pVFqo*Wq`N$+tER0wj{SdoBlwXBm|5xGKs@yQqxx36+Ci{M5LM3k=3lpL5io^(^1z#DTc~ z*_|jh2reqWnQRGGKbBjN_q^>QE3d33@L!>=UbDs)Et+pnta!>Ud(VZ|&$yJwco@fG zs{v(7h;mHClK)7LM+lhO$F7m7v*4BbIy=zbjr*Ud^G^tV?>N+ma+3vl$s}7Qu=g!~ z?0@D4%b$Fjx0h(IHk8{M?JBsa{BnEvSilC_w(r6FeH+`x@JFKv*l3=Uqe`Yz60>;ehw@kg#1x-^RO@TB8{--JM(#?O1oGM|7brQ0R zb zctzSt#(IK@M)F>uvlt_?~MW!&3!@>pG4XppV1OBpP zRUK6|)f0(FQj*IYvf7o;=mD$*6)3T;G1I`6UR7`1_{8Z~O6&t*Wfh=AJyA zTmaf#m;?3_ZK!-a(F>Rr0~{K%Lyhfr--=Dv(yM@41@^x47uoo+|BFvzxmt|P34BvA)*|Bxk|uy#fE_Q=7Dzp%prjQODu4ggM|^(zvU98xB|_4{ zXxa!ximdk4vRSe~K?s&l?>IXRIzTgf<=zFEUO+5l;Sc`LAy^(RJM!}L+=q4+R?gu} zw)XQ(c6i7Z&zWKi=Fh?9NWKTm%P+{GU&u1q?7+T#R#jDFOV0Vw5tsSZ5?=j@PuSMY z8?C)tBEA$8al7`*pRvYo-FO1$w1DXx9ks z`Cr$*2Y@kT#cV@WJMZ1!Z2RjwZBlc>+DfqGDbB;vw3D3+xHO?G3N|Rq;oca$id&Ct z6lMx8sqQcI7gGE97m_jz1&fb`;!U1an#qga$&c6fSpCz>VNL1dU0vgfgJ^LT{XGvLo(G)2v4&z-g!)?sezwE%Y0uc`{ZHIWTj&qQ zR|jUCVy8=Ey|o^~Wy>8mSsd%sY>@q2>aFqq(?5HZbV1(~PqsB4v<<6%1>Mz;7>(G9 zE30=AMSjYCo3NQuY>P`Z#0je=i{UYv zg{sC4jfdo}YH<_A-4IF<#w=NPj~P47e!F@X9T$ZKyGUkFon=SWPP3`wC*pU!%?|SZ z$y28Qun$;QcPBhw=}NbrPONd+>YRtNv6KL(y*-^aX3CK(sa|;cF33S3W?X3mfhqO= zEGsV^%RJESZm#n2W1b0pC(S;?a^c;#@=?F`7uMRJV-t>A#@MMb$Y&tNBie<*^^6OO z4_EKeJG%gc-#Wl{+qQ>j$CPD{nUB>R{^jX;k^>SRbW}UM;_l0}u%O(sqxpyHnLbHV zAWeZZ1=19FIR!S|`gh1w%n{6oTCHdYq5M5=+a_Uhw7s10wZ0JQF(_;B_Ma;xo;wD zN%lbafS~RMfM=9Q7L!46Io*?0ZvgD?s*l-4|CY3Oo=|O<&chW#K44S4*V@_+S>wTa zw)uBjKY;ciAWk2Et^CS@bJz*zEub762v#bWpOU8z{dVC;mI45s)COTtqSJ5->4y&5 zJ=WA^RmIcrjT?50N=5!21Wk9VV*!V>d6M#m$}6pC!hDP57g}*iDO;`McK>(2Y)#D_ zw(LCudi6$G`0_R)nJciScnT*RnYI)nkovr!m`_M&4Yz_#e9b9Y06&`Qd~ zZQ$3HKkX&jIxp#MT=VB7u&VC7Pu9k|&WF?8KfEmfwE-+W&>HntS%YUNGyFd1xZ|88 zzyI+Z7TaSdRzZOv@T0(Dx%kK#hmUQ&nRlrjdjYz7T3W4b!_xp`-FRW4e)xyZXzR81 z?b`v~@CjUwuVR9_gm8}$qJ6Tk!tblDwGaT7>QE)i@YirGJ>_)k?Zh>XELOE86L3}a zm!++{o1v~LtWN`<=O|RLd`-${JBI@4IIpDotsEB|Cof%Ov#~ZUFTpCAEt&;+#RLNc zR0H%?p3;wmac|;uhbIIUip;9dg43^j4W)niBj5V^zuIr^xd%VsEGTH5Yd&+Ojl-{Y zVR4zgu<1F10qwP4+;WHWv@CUHQG*oke57_2SR9JO&qWbC^X=yXfV0@awD-jg8>vf= zO_{z3OPzcxuNuc9C~P<0cDL=^*WvuBe&RqBMbh`RpT5W@O(+sR>U~n+HJGQpaN9HR zIDVz)O~fldeFLS46ctivD9~Yoz?|J3%{H@ksx3I-)Zw-uPv{gVYTz9Dh-fHa&zY9~ zIQ`i3UQUeH;n&7{zCm3Wv*3+zb}|0e%Y3nxf$}O$uvG=rq(f4E%0YS5Bh>CO7E%J~ zGb4o#x&~vlcwaoPf{~K?D!Io{&^br2Q$oCkGKGGgWbW9v?Iqi>=~>$?_f_mZx%TEq zf}IoI2YHA#y!&U@T1w*>AiS4>@xt(Ho=SCF5>Px-Zg1Fqz$4I3E=T~Q8LtNiF*|2< zsvR*JuQDZ^=29U;gF{3JH~t=%`!$Gj=p|Tjkt~ zX|G_cqA{FzG#!f;5%@qZr{4DdafkC<|AcztoBNQ(wsybN9jdpfi{It-3uL5WIYE64 ze$u#=OHkJ9p`VR8v4cwN%Fg&ku?$+hHq{R@11tUiwZ3z%pop@5;$ZDxW zl4sblBw;fv2aEFzyY{*VtzybF>uud_Z$D|AowBe7d5LtqPjaN*r^Ag^kb^QY96J7e zX^U+qO8O&Bfl*3tKPldS?MF$UHca1gv2~l|N~L6S5-f>TChn zo((tXt9t^rJ6uU}PAJ@plM-4#We%4gdxL^8#}uhXhMffRn>z6PPFXB>;K? zzq9(yb6bA^PJ~Z3zztnjU^)Sy4%%=ZQ3b1j8r)|zTLij^?tRdE$p;>{3*WiS-Zn33 zhgPm3f_K8E9<$IUPML^n4FG3^qOR6XE3N==!SYe=IhyvY5>cHhzNMS|WtA+5$IGVn z)cZ&i)FGgJ&^E|SF}BPX&2+2k4lF0*yi>8cb8&T|Kzahi1R^VtUM|+Cae(SV0_K&> zS!!{%^9vBW_7flQZTRoI`hCz0dhp5wu1P?!xG(r$*2n^>fPWF(lBAKp<{Vr?xFUh_ zi9VH6&UIi=bk#dY_)Nd`C)z8W*M)OV`5)ZVeQN2X2-2(8x)vOMpPvH7bqD}eVp(bU zKd~WYoOAq%!2`edx$=&G#bUD}0LTE?LRhY*aO03S?j*`6pe1Q_`}bHhlxKa1w%Xno zpToaunMLr4TQhU94aPbx*3(O5_9(udb8O?y-?AdCo69S!0=FNy+8Dt9u+>br@r(cX z{l8a~S3Nx$@U*=v0O|rkmpaOV90nI4pBU=5LHcrx0DK|5_fDxDXN%^}vI*nMY0o?W z-&_mfdM56n{j0bL0@iW zvH-RteT`O%-|&L+BAbbCZn_25=P2qjaevX+WKpbZTWGfpEm8o9nNWI&J;+u{9r49+ zv#acrA3fW7F$w=xm*mop1Umit?bJKV=6&dGZVe5!U2!l3*gZgi&erw@8^pq=2Jn31 z3=~1oR%yQRl7A+W{<&X(XFpU1~+pHI@^%#b>_InIyQ+gzZ>+Gj z>mpN&V&8coV4FTn|LCS{H52C14Nsx39>P+(0#`PLJVQ`cb~ohW zvZS!M5O-AsX2r#d+PS~4o3TAcfa3WhWcqKvcRzc#wX}Dsai|w$fL^^_O;%Jf8BiNH zIvinKSLm2>)N(7SJaTJrf9Os2ad74Jj77cp**>4SkljnTdTDE_vsg!+6@~~5$qop- z1kq#Er!%bP*!Or17N5dA0)5s${^kX^n_j#Z-j*Vm(&taH++yYql^*_6&+~(GU5hfV zrH?cP(iBKjAWeZ+rog`Ep0Z5bL~1V3{Fq{{*Um1jEPPBw4vY6R`8*NIDMcOw)J3k! z7zVht-t<@-{MPQpA%cl-e)eIjoqGx{?n;r{g0)7lPLc4(8S|0kk|guLc4f*Sq-Fj0 zD4@y^$WJP*#0pJFf&56BxS+L$u8*v-bpPcC<<=$Ypywo(=$)y4{(1Y1H58@1Sg=jDYeGyXuaA|F>WM z>h!NP1=184RTM}A+oP(v)0G{ez^d!7AqW>>Bn$I?CeavL;+(Q69#}3G4}v2mnBcoR z+X1#`0hWO-_zF!Ed2p(ET7P8YldA5^_qKvCA`0*fThsA#`;pK5Gr!*0)_2*AghRsR%# z#C>e@SU*oOieGV(axJtJ{rrBVY~iIPMqN((JFmeE=Yk z1fmF*mg^t8o}%yul^5VjU`~LQQAzOx`$gLf+x_^@tq45os;kZglfsf0 zx(fIl09?%OuTwQ$$P>jxHt~#vx)`9>00N}^n@1(Mg6N3;0Z6UHPzAs(aB0Flz*9vY z@4+%!>B<0^LSEp2^a1&uNXp*nr93hxf7tW%*XjZV@D^0tdSQYx>AiDLI8iBHc07L5 zS8VzeT!g4Sp(kX)|9Y`StBxn%4C)ytxLmh&?cZ#Lax((pyJgP{He=ij3$fjNa2NBhO||n}R>VF}UE6YndXfjtd-|mhdnC(r&`fOWXyAP(xZ*fjFhy5{*-g2@1xR zk6(1{49n-f<4BAAs_EvlH`&+T7rB1kEiQ)^tFh{O1 zvTOhoon90PNkGtk+9S>a08EzV4HhL**6@&sVzX*1?em{Ho4%twvBc$?0&3p>A9vxx zFk%ber(FuX)B7GO4i-Q$0^y1I5{#pj<<(YGJA?c(tq@_AFHEXKXs$bKW zjyh@vdY}Otf8uGcN|)Dq^&IfM=i|GZC~FquT^P%*jA)UCOUC1C8-Al4Km6^D_MLBj|MzP5r%w-Tyw@6< zc@K=N@mgb{=#)Y^5XVou=$4&Z;^iutu*4=$tNqg%{uy_Fb;Ezy(xY&>#ayHLTcg}d z9b;{FaXo$CJC@klk5$|I?ZPYn@z2Qpuak7~GO34yJ4pb-=`Y%O;;LmSgT=S}^`m^g=<<&S z6;FSrDUhZ>ngajJDNz5?W(y&I3V4>}Ap;&wMu%!{z z0iGK~E^lnyhXDqz#E7dBX2+To>Y8)?pS~Gu`}=zJ7N7G3Xv?x&a$Wz0O1%>4Zmo(B*-_&wI^PYw%B%Br$5pZ z7^M_Q1KXpt;YWM5uRQ+@TwZk9Nhh9w>bVfFQx6uRn&iTwn&)#`q!q;HUrj4JY^QKe)zDTK;|n9Y9m8 z(Ixz-t}c*x)exTqVh#hr0y#W*8YL&0*DFxkOL!tJ_a)MJI?|Ms{mwtAXro}6zM55r z&@2I%-BP#Hswd7A*&GydsEB)_Q@{Ls6QohWEL#q2v3OrQm@<~82yJe)OqnB*954Hp zb$5E9cob`BZNmo`0+a-vktfci`PWkiuR6@4|5sjaBi9PV`f5Rdo$MW*B+;?|2 zR+MVH{@#87%LrBtSo{J6W@6be0C1Cypp~un1Ai&V;MeZo##x4a?emv8&1ibC9fIWX zFWK@uFV?GhVZb(&E15m_N?PGw6MteoAu&{tO18@V(mF1wbAC(-W z{7y&JRXpLszxt`@AhrMUl;n!<086os&H=P+?o3%MuMB_ND0JXg`8}6k@?c>i%4gb`ibDIs-@VJ) zTlQN8mdx&kgL-cI`GZz{@;oc40c=F!5&}FOVw{s@5V)kZH8j#&hw#l^Ve?OWuN9S6 zyCso$_hTRZB=^itnqO-Z3yN$q{+ne@u0`4Slh4-4>w3Jf_6caB@kzV@f%RWZ!HIzF z&R#6CDQ8E|AwcXt6fvl-=;O`pbyib8jRhb9ask=I|GrQZpOBv{w8aMr_(bzVQVTJ_ zY*}avEY^KjHmEkj`zKXD8$O8?l%nA6BuPny4 zB;$qLb%X(>weXY=ZQkU3;@gV(h~Ni2gIy;fDIBtL*grMR-Fd-}huwXFmG#xFU)rQi;+P>xd%5B40q#uXgu8Wd?{dC%6pH=gIp1d$uL2_wNdmk2kN@4x-*&{3+85tPLemsT`K}#OzG0$L-&G1 z>yn^+!8MglhvWyHOP<>M{2t^PWLM^0?Mf^l)>8?_Mg;_^&ZpXvm$gn+l~P0Oszi4l z)|v^ofjtSWEBK#_3`QV#$$QW7hp?*V2WW0YY16M025gb zfF}YB;ae53OUeWXFGw#7`Q4k>xR2&R(rZhwa@?eMnH`F^5 zu&=Z<&+-9G<6zzQo^Ewm^sE1PfeU&$4Z1& z+7K^M-;#goL9EiHP{~vpLBC!ub_f8}l>+9Rd-9p~u8ZGh?>cv>U3lR;Z1bi~Hh#)< z{9~`TU*7qcb;7R{#K)Zx_Zn_Rt_rB_DPJb#Q0s?>`mC71rDKcotxG#m04ECwxZ0aw zw?lS@b!X&SZJ;Q=*B$X;h#WZfqYG2pgM>TEIk1Gy!Zk1X0+q+s(FqAMhWm^Pg zf)qA6SsByrswe+x8u66U1c3L&AN|}&i4K4N;7ga2 zR`etteONr~2mFj#HmBmNVEn|`UN9f|QV0BIr+8*ayGaDVi|6H&Q`>m~hhaO0E3CGa zC(i?oi%Xnm2V?E*QcA%O%u)OsGj_VokrJZ4)rKPQErn-DBrP=2vy|Wa3`)@f>MmxH zrG1PsS-jGNvPN!BA~_*D?$h5qT(=`R>D;#px(8#bFMI*^TiPgJe*mP9jEU-0VU&-D z4jr;z-uhdg7q0%~ht!ZvElHa+_n0Gj-QhI5AOBD1b&Z<75Qjf}-V(3;Ui+!CnsaJT zeRBcZhbzL#(E_Y8@3 z1PJ7bZ07px!HDnXa(RTsKmyr8N^-?flCoZM4uUW_YN4PJ8O3`2BTnl8P}jSI`Eq1l zmi(Y}G7Qlit+M&BJyLT09-dDnljx?#_zmk{vd7leqgamHSawksFz*laM{V-ViB>M< zIx1u7^sPWzd#6bS0qR%5|jdAB@n6<94Zj5bVI;iAgRc| zuK;P6TJ#9lbx+R+=_Mq|O+i1~)?=29kJj!^1+K%wHC)I|U>N}KFaVk?Wd)=vuOw{| zgZ}>4--+PU=Xn&iJj`9aU!V_VXAzu*a3m1P%RqTZ;NXrH$Agw_@A#JiTYU6H`|SJY zk>`@u^c5wx^qhkfXP5e zMJS}u0nNE5xedCw+X=vX)xr-wuXJ92&Lo3MQsn6%z@5$`)|M>s=LOgaIf{2R1F&jG zf~WN7-}R~Qd6uvA@xagi*``dcpe1-8;BOXxxuw$x4mlkFpF9LSkV|s=I&1yaFWD|0 zfNI!2;zPB}l5>x;K3tMCZ{KIdvu0aW?QH6lX$^ZeThor0uqLM9xJ;P4>>OLL;Lo$R z94YVm`+w+`D9&r54@6V16kDonAnYn3^bv1!2Rg{YG_Z&TLWI~ENnGQ|x*9SCZfOFL zTkWMjflFelpRj5yJ9DG>_2cD32+=`92^1c*fBE?{So4z~AZU``IqW(ZWL%I>CY zVjFk%;QL;HI0Ym>wY&@X)GY<59Mn;6FZc})7bEDOJps2B{U|qxEFYu66>v2M;28$k zuItXX<{<%|+BXo;$w6_^hPGDyGNI$AKXjVSnnJ)u=p{eYxhNsJyE-id$QjBh#)Sn| z=ePw51MYWrx4XhBQ$Y`*O=Ve`_2AB9%+xdCi-hgpx7TjF<2KrbXQ&HQhb97T#rtMA z-0)M*U*BWnGmjq5Fc_z(pRCL2yCDG5C)V79Wp*XXnTWS^6rbya?n5y^d*jw4&JGhP zE@ZXjE=N!hy}X~PU(qUBX!1`Lp=GF&#v%BFwh#}3G}O=ez@0p zt?9`*$1vufBK1in@v>Ibr&*}{uLmR5N5J`Q0 zg$iw7!P4ovv=%pf_G}un#fwwr?5CXuuX%EMtQYZ%)*pGyFEKvtI^XH`RH)w$8V%NB zMQVqbJs(=s*`d=VSLJK^tpC=Gaq(X76vKREdqgsu$`h2v`E$jR-(NJDEgmp$=RD)y zWqUoJRQYmW&vZJE5;$~V;mFcvi;)aMH=HeWy}2v+-hBAMHn9PpL_%k6dvC3(vPK9z z-CmAgf*R+VsRsyODFw&`=on8ZnuD2_-_(72p&SgQXRH3J6Td;dAJA5Noh6?N^=n!_ zCQL)trHm2c9|XIl$`a>@v~?6oO^P7TrC?hKoEt5anGTx5?{U^Hp9k~WTHk&5H(3E%^b+fEzn0tp{9(m5&x`jDRsV~ z-=}qFZL0QX)rxUhXWQg8l$Dk7BnbnJKV6wF#(pXA@*2*9z$G16oygp+LS^+uhW`F_ zSGGk(bla>6uf{P!fg+Js0@monIQEB9l7p<^GZPlAU(tVp3$(@m2`&^P2`-oU{hisd z8~W=fa5S2JAM{7XR@$y*B3EDl=WrtXvC+@EgZ-PkreLx)ad!pw8)&E=HV)K%U!zBl z*u9g?0FFe4i3UPYLJm`b!J#lYYnu=2!||D2FIqbO`&gZ+y4@j-DPv6yqv!Lv=->pQ zZYEt0Cs}bOErkcD~in9B4GOI|$sJU6kBV!${GvsP@xug|JsgX@#Plvl*IN z*o!LUO5Cds5C}EJTYVbzBZOWzY!$E0f+N#!r-0lS1;Scoy7kScqN8w3Z|0Uv$}0LD zU{{)#xjkV4sVj1?{1x(Q2v z4ofvi`4L`Dvnak%kaSp}@E!;U2Z_N?bw8p#dm=;yLY23=Tl_Lranb{Ie8@e^tb2P4 z(8w|km940@4og*+cO-Ql8!R#Nmh}Nyi#~9*)7pf`Z-p?Wa3K$X&)6E)c%PY|!E5al!KPzdRox2y5nvsz{-jOurc zP=bhIW$mT$jAT}o8jO7b9!`CJ8w27Hq(6eRj`eoS6s}4Pt!f`0o=@oFg{0uN z3$oo!r6S3SV_vfl0JSK!fX(1`F`erIM=x5P(oz{|2p&fL-Hc@^?61~XKpBHK?)tEy zwPox9QCNMa2dZ|3JJWxT8XOoya%7JaA~}<_)*!5XOswUeU*h zl}w8Jel`_&S zv~C%&f+u+TZxZDGDa7Mn2Kt-Oq#!MMLKQ1JZg%%c3?I9tKVxUlxz%jlUQ*HIOjCfT zH9r|cI=?q0L1jAqg`JjEhAj2cl4I<8Y&8wACbqq+>WS@&g&rhJ%3iytcxR!S2FzJQ z_l?ECfiaAwC^^$R;q6|=qL}Hw{I9AA;v}+nNDfvSF$`S3<#NkQ>Hf4*uNu#*o;QMz zo@c4~gNh3;AyJ2ZfuQJzJq5n5eb+{68r!|t4|6}gdIms1$Y$tQz^?A<{IG|wJV*O? zAM5Gg-pHHoRAJ6D!Ln9-TgGVTlJM8?o;kRXd;u)H;=2%h=@v(Nw|JQPma$2v#IOhZ zT4VthEWB&T?5>KxQnG7LF;7}w*`1{$04S0JpBr=YQD zCA~c>Ys!MxFvKep=*6BM-xc2aa>eH?0$%3w&i)M+ptz!0#ASPN-g3q~kyIbHm$x|6DllJ*>AU9;*9h;{qIODcv!Z|E zgw)9pIqSEjYvpHma|it9;@Q##m1#%QUnexA69xXWA~dZ3uQ;AhBiXyt=W}Uf2huX> z=-MFsif?)v;MUalfY49ZCbVIbD_;VOD7}73Pc`w1YS?B%5wtkd6d9~8WgI5^o!jmO z;0gUX&R!$S1#3dBrj%kbJCv^TRhT(Ub2s;+m|)$}dgC!SSBSQUXF4E6;@D$B1fAei?Fa6Ctmqy zf?%PhBVB;}01KuOgW&`pA78Q8>v-bcDr;%kQ`kx$o}3&to@xHA)Vr!jGEB^cA;lKX z(G}u&I9-=p3BCWW@eK7i&V2+ynHk9wgYYPk;GK&vEL(I=;#om= zx-&4TtQM3ZeKS!ADXu>36!sNcRg_Pu!Y~9rvhhI6ZnJoR{T1I^m`{Mz9S|)$XB+T{ zvh@Sx-IW3*OKnHG9G!bNkGTL1YdMTnE`Fn{M26wog1Vg?V0!el)%xp;^`0hJ%66YF zL&D3Fq3D`|?8sI3CAuWoJ;jY7buvWW34HU#PFq>kN;aM!n_-_`p^OL}4`lO*RZwMS z^pr92!1?j!;=TmK(zM?wvGUOitHW`^fBe7e5nv!T-P@ z8A&e?as$>pGYw>7^E+dJPDD`L?3uJ_{kxNj%mNEuEp6^}fWC-&_pvPP$^K#l$?9qQ zW9v6&Reib7iwSUmRGTZwTN#0Tfx5`HmrjaLv^O!3)%+xr_~OG z`R5{`rl|2U!xTA&%|@&a3^GGXa1=ZZOQqtwQ@WdAx zb-Zpo%JVz_^1v)61}PVxb^8wvLhF2UD8Ndu#lkN@yuCH{V)&U1bsJ`+a;alJP!ktL1ThZA<6x}@ir?buKkTJ|8xsU$Z|B*zOHA9S67 z82*R_LK}SJ&QrY6JGKENdw#^KW5Ut<^v4Oa*&o)<&~5Oby+Sv5sQv%nqiSuQ@RgY6wSw ziN~_PZj_o6endUBC*m|!whj%Ev-8Y_hWY;X^CL7qH=3?`I;aicd2#&8$tN@9pG^2N zFR-~&4@OTXqg?>>SU4Tp2Zy@A)@N1!X<)yf+3{E^aGAwkaY!FZ{qLsIu}kAU z=m(JYFZhAA2?-^XR*+s_2$W>bX_gr?4gWDQ#o|I>tvoIxoc54xbR!?8%#p=o)>2g0 zI%iG7B()Wso+i!cUfG-KCRPA0vlw!s-DSi71f;zNQj{QBo%Q>uEFTTnPh(U)hehT5 zZz}&^PZk0vyUDf(aYXku(P>K|ng|FMuQr8bRll=MYg#236K+x88eT=|CN%N!;0WZ}MTYbOlI%Q*)!0`@ z?hO60cK=r{{T?G^2!q6+AP&G!>x8^a$kk~=_?L!-C!#=T(wx(@5IP(yX^XhHF2rwDk1WW5QDPr>-HpBrl z$adan)fQ9pNGUpG0+;rW(+&6iY&zxc_fGXKd4tvLd~Z;Y3i{AQkc$!cph;8+v{f;@ z_vSMpt}aMtifkuxL-mc<1$EneHn3=M3ws*D&?v5>6RdXV%{5c#N6UDI6>+~pP$eTH z8!&q<7Vxe7CkuYZ{mzrm$3PS+^BNC?HnL1)AW8cVqmI+YA8Q_A;$``0-S$Q8RhQ*Oov}eps%%x?31>sI{n?&I|j7QF+5C#Jk;f0MGze&m4q-K=R zVaaY}Oen{6vr5CWl*VRJg*%NWm-Ol^$pn0RGD;IPA}-8ddq*T-Fxm09M!t9+R4|Hn zGP^WX*fBw#GJunhzBT%x^Dg+p5&9y*fD0qww7vtd6;Z3wk4v zHn}x3c0WYjc@;gsCj9hhg2484HB?>Pa1;wg&{nYHjjoB3BoD=saJoP=oN>U78mtdR zAl9-Sjr_;}q)h>RLn(jGXY1@R%VrQVs5B+qMD96wwA}yIW#HuvR#N`MXWs&WVsmw! zTBbmQO34Q>_;oqm@p^5O`l>*DLfnGIIt5Kn@=aM#@u#1qV)!1B(e-@mcN57V@p-s| z_5+3eCvAIymtB+hN;GJ^qwtjOBXNb9pZ=ccZ~HiVyE?B&d)TqTqFsKzJ0>6mRA+aq<@b8CcodiOY|zy9W7x#lCXm_!R-}rvGFUfAG7S}f?{wolYw~|~dYi9k z{CHA36%qa^RjJ0VxmK5%2ScT0PPq_VcfS+HAh}XlzSGP4MMJtUJ?;NC_ITN{pL)yzLr-yax_7xLI*fvY{4}u-hSNNnEt)qI2 z8+vba8jetD*R_}EMAetXyggR;;Uri3K^SFv{e`>JbMuBzR3@hj3Y;rTE-LfYR{ZCs ztxk2bMk?}LL!&l$8yluW^J-niV)!q`j*3-%DBMCF-OxlCEMT+>Y(J*M;W}e1^Zv z|9NT_lmZUGj#zYXlY4v-CldHE;%A>6B%sMghugBt@18|&z`@pqY<|afK@Vuu<*T(p zd7=pE2{lG~Vgt<9jw3Tx2D8H?S5Nkh)3(drz&RhuoYMv0;n$i68UulaYs*`Ij6OEf-iPLmY z2vbw|*`Gnsm*lG(Q+>mb3pnL&t}03zN0VCK&FzaTO&_Cx^lUE_{~%_E&X}%>3>Ad3SWIfnYJ7IsjicH1T0{Qj%a#nS6i`UQQF1eo4}jk3 zyFqXfwOjJx?M3=i(c_76)_CzPZ8Ql(NaIevc1{-%;%g*XAkMsKinzD@i@oeLr8s&z zZ$}3QDC0cd4~Ml$FtZKwzPLRwhMI%gJgm`O%}7W^sN}bf1uOiVR!zYt)r&HW^Z>*G zE3-lg$Ot7$iGiAmcAJF0298Pxu7GEqgdV=C?WD%E6NX{8o(V;*{}8ICpbRLU4V zMnE$RiZrAQZTPicUyAq0h;_TuL>uZEo1x}s+*Y^iaD{vaYf-v>8QVXed64P={vw)f z4&f(g20n^uYOO|1{yPV?dA}&ZD#rUVN*IY^GH$btLQPt?wRtMpCCFu5%{8k`)m`R* zMSy;_a+rJ#DarVchZSOfX>_OOp$JOZX23H!`B$!L;Fr{2&t$rMRiMy}3&=P1X zzjNVCJxH}iQuG0}GIRqG!OO=MJSF|dI*fpJh3g9M7YE|$UP{GsDZCV`1Ez|GJ=ue1 za*_EjC3^2HyH_RgYAEY~Vd$>mpxm!BdVEr7-6qPe15OtS;i(SN`Q_n&ExL{RosFjw zHa{ef1^#zb1tbryzyHc0YN0k#Bq)oYWhk_v{S@fb)b$AR3(K9frH#nw4)2rlLq*n-D*&J zXsC2FS!o2cv3p$9WW4?nU_tykPtT=>9d%L?@Wz0t3xL?cS!_?kZk-l2J~4ka>? zPI~s(Zc`N!)o=iX8UyW{cD1Q(F>rG3CB^kzy=SUAlmHLbgA=Q-tc=fv^NaO337~2a zR9yM>s5SOt-APWP)qr>m&MBlZjq>w<9mLvy=2F zlPzN@@kDuxL9*7yHKdb8UjRwp!;4Bxb0pyH4Z)h1L6QD7{qCu{Lbj_}v$(lCj&+cKGbXKG6bd9Pevej&d z6vnFWe~GGVNaATx^lQ%Ex@Md$W1ApJoyLw(X}W4`5~J_=;qVqZai&FU<=v;uKn4oH4d4N9Ww#bX7gMhr8^D6&^yBG+j+E4 z9jH-aA+AD9G^h5!U~%hRjO~Xn%py4x!>EJc^(uk<#Yv@K?t%tDE}(rZP(sVAB^9$* z9#DGCt8IC*a8_Lipl4uU0Ol2dmuzjYNuhCRz~b+cGzy;pG&EeadpBCDmNpw$E&H^E_E{T=*9mL>RD)?9yuxNdyE?#AHicP&nF}BlDA5>{t z-WTi7(T*!PWu(Q3>#9tfL_>&>o8=bkdbD^EI&p=zLX#m!Mcbw&y#46KtF^n^_59o# z{T}>zr~Uyy@^QQGr0L~lMpJhSNjevT3Q;tqoVyZ4l=8Go-v%_`d~r*2;apRCWp?m~ zbE*a45htffbn`{H=ZxC)jL|4?+SlmPd!=3-so*s_4um7xTk>?&hhThLZU5NLYd_bsZj=2NW< z1OfFG@!z2|TBvx+;>)%k1?-;^+hBOAUD8=Mzhak;Q(3#jDu7a;SUHK7hnVxRGn*Xg z+Ov!>EWS6*aq13W;FlP|&QP_V(#H!a&^ds##b~lDgQ3_?GX-^xJ}$QkE80xD;^Xm7 zhZTj?!f+4Bh%Z?w0bW?HYHgOP8ZBpNiaExT5?V#s0&*yU!ryF%tkkj`dt$ed#G#Q`P=V+;p8Zri~Uehd_xnL6#6nz9wx`XXv zZAO4Itos(m9hKbVqHS<_E&I*D@;Q?Ca}C(?pC%TsZwnu!;hgUW_YDuMf?{^iZiwN| zjyuy1ER|lc&jS_y0R(Lk*vmOM8U$1A*)P*&`VHODe_;XkREw96d9=QB{Z%`$Y7cL{ zJbUj<%0erWe)94KDZ|v2fjVzJE^Cj<8+9*ve%lBJK13T?f9d~+Ec3ik-VB{n*fJs5 z>-xp}%SfF+%d(@mB~3PCr9r@$I3-`ykZEF(2`FDn`#|YKn@!T`uSEPhAJ8SQ@6Rh$ zXydPk^Y)Rzzt!D%euC0stog|d5E`KDdG9f^l6$8(m#Y7jRDO4xyi(r6Z&cRUxG413 zv#HYFX2}AFakyrW&Bb;Md46`@fjyObJ~Q~wnBroo#RQe;zL9m-ao@z2wkd{g&@WiA z?4Iq2$AjTo<dVoyn^9a;`MaIzjH^^RbFPn4!v1f??Rh|dCiMoKe1YbT- zPwyz~9O^sJh@!rq^1V;KFa$bz#Hu5<3Ah@oqxg?0Cl@wHTlPgGi^B->} zSmdDqdIi8wTfqgFh~e^Fs_KF15Sl21=*lXaM$jA1Pc3`k8r$L_xkw>?EyFQt+PU<8zce0TN&rRH2+p{e;)tV*5z?01NMH2HSEv|&iiUxJQaP6~Ye70nyWsk>!yb5BswqsMeZRDi!9w+A>c z+L$|xoBcJNG&)$T{bsXKk`zeZ0s|)L+_9mrMoclW%E96`LVb<^2+ON^9aEF^ZQCS0 zlHp)nhjzXVG`#CBN}?)!>q=c z2}ftRDA`rh!rlPo7jQqtB9j@&t*w=OYgfq}Ut8qEAd`8Q&i6sp@W6yM7@)}OpyEQqw z?GcZIS>&(8Wog>n9fj3&xslAYzIx@4gEr;1yXp#^n&fCrUO}+f8w`_0gN%duueA0F zc}CN#U019taY$*i3RBCf>25W_{eEdT-;?C=HT4QjgYfpSrSi-<@#_(SV5Lc(A&oAvQ_+P|ddls^k``yYPX7+1;;Eu|F z`M2-A(q&PIRm)qnHIQCj!D}Up&Ej-v^uF?5U*MAGA8WR+zWfy@9-$P9Nc{s|@aRic zvz{H(I)=glc6ukXn!(GIUFQuA#}AV|NdD*t_T;Y5Qw2$J`}U4CV249mI8C8r)30ib zclPIh)`6c<4+nR6Tm6@q3tBg=lwR`eukWlEPa799lPP%P@%WiPA?g|AyYC|~$&<-P zP&4LVlQAY--oGY!Ifk|nF9DZC^6H^|ckIfE4 zzRR2EZ;E^WTyvQeIQhNez)Qee(ZFf-Cc30)xx*9FQ0E@lX(F@uOJi>_1bsKm$%M&) z5SqXFY}TiDc*s5`Ww^Ta*jq;2-tGqcE-Batz4 zB-eAnH(mDc|6Ox_k@+t?!m2O+-rAw#L)MlP`qGd(4dt*RoV?N7pg*p3#0A0hDXo%` zl-9s4QH~Mnb$iN~vwD1YB^2tHKlO2UJe_*7EfC`}&9v9gcFo?$a!%ei6usW@Zjh6| zCBS*sAqbv4;WzQ}U&aI;Xd&vdbFTScOSV)bNq5Bazf>rEh*ljFfUVDXO15(N~SBeDAqrC4Oi0FiWac>T6$2W za1>lgKNmH3wZ5~1HGtK6pJJvTwc`-UBiek`*8gTxkOihhv_Z+Qe@YjfkKo$%(JbHZ-Y;w*kK^T+u!9@X5zRIzWsVm z-E64CRscf^X+^K}t-Bd8QQrEk&p7{aJ}_r)t6x>a{36MoH4-B|I*>nkfJFvzEMrS!T90OK=%+t}j!Z`!Io zdeyZldo9b~u?epGc;9F00GfN)fR8`U6cDuJg15EgX`wZ**(ld5pLo!Xjn}gSI`&IA z*T{KP+H@%T@Jt=Xw_^a=2I zYH2=FnPNen>&R25+LPJ8tZ>R1Ra|&+FlCRT`G(K;{8iRF${21`7DL2493i_&Rcfoy z+GqXLZK?f9t4ik=ZjO6K^&z~^YipI}Y);3#?B)=q8Ty*){YV1_)A!c%N_Wd3N4+!{uEmMUgbue$Y{o5n(SAMvZ?L9fw8U~GY3@}m?NCIZ-+PdIm{(`3kjOtd1hOzb8k;w)VJh)+fVf(al=WO+f&w0lur~_hIc2b zs#9dF-9OzO?Yn&R&u);m-FL0uI)v|dMFFf)iu%hf>ks%Pjb`J^>*4!KzHs9zNq%``HB)nj9@?yY%iwWNz##mJyQbJE zbI{GFi(B7MBHD17sn_`5+RrJ6o$)$+3aK8CtSgGa0<3`5+qq!IoXN*coMsyDS+czR znIlENF!YJbFQd%QBAn6t0znFCfRs9O>UHt9yW88P_Bv(+;>Y00!49960(ETD?jIr_ z4^HB|IIgMhY*M*um%CV;zJ2R2sTYoS6K>`Hl(;)(Lp%0V3>z8ucE*(^;c#XOtoD}g z)^_$i1Dd1E9rQm+^edn{U~+z47}3SbVn6W8_wKO#(?lawSQD`YAo-$xUN}c9yIws{ z8!xnlol*VKu|jSqGqa_mE}n5gd< z>zScKC=<7NjonIphMp1>WB-KA^^e!Ov9%dF0Ne)<3e>d>x-!;FED%&}m_n z4~`0UBI+QfXp1oVM7>QljXT2EG)9gyup$|_`(22h|hmQ$eltEA=(~}vu~LVj%3WVug%jFg$SAB`0N70q>J>Zgppjo9=ia5l8r-s2PWY3d2)v>v$)F%YFVg6pL$V2L@f z#Q%-y#ICIHhj3@@AymOYh0x5s?V@dLnaAim#wKDl-uqF{3cLtOkHm!jv;Vr&UtEot@P zwR)NdXvukwJmluFauc#j?zyyj@Gw^0Yk{znEs;zcJgM_$}zR)#n1UiiYF z+%+KR1kzV&GFwL-ZlMvwv5CzH|C>*!$%ILK0yb4jQ{r>^yDCY8!IZoI)zI1A#I-2KmcH)N=C6&6xWwkEw9Hl`Nbxw9p@kNKH;868tSY_yCjX5(%_{L1Lbd)iY>jT*DK<)rVbtCUtr z=mDw8RXqkvZ6kzE`N<{%EkaBuYM8xSTvLST`V#F{OZf zP(GNCX}tzJ9#{aw8~Fp{P+>ceV=%SuW1k^KR4gnmMRdoN#q8YPp;}6!DmI%d^c|-z zckPmap@Oz1R);_l)^sJDg?s*<{cahqMDKUW$Km3AKhrK0Y}P60cY3elC^6M+(=yh_ zlK={Co51l|Qow6KK*|t-pV0(`2X|NLDjmM`oBI2rUl#uP=i)oiIumM);mvNUvFAb$Gqm2hL`fyT-e4ytbEPK`mmj^7?cwgzMUqoo z$4tiiQ~h=){pon_k|FS88dsLRGQJ45lZt??&sk4GtM%l{Dr{E#;RA6=HVg%q$0>Gi z;R4wdqGE&+!X|ZdqkKU5)Ki{3fbThTg(==*B?4!5UcMz!ltISXE`UVlIWn11S=*Rq zf*LtwezCkq6>*}AcqO>ba3Z3*(577@O56Up-=?%xN}Wq_!;ZzS(xT&^nF)>B(ook! zHLJ}3BbhJO+IxV#xY19T-~`5A^eg*rSTOYy3N~Onj|hWPC=@%Uyo)g;U>^-AjGfa- z6b+}I?C7qac-Jtm^S1v&c#4r%VyJnbd-wLKl}~b>(7pjwMTF2Iu*H_3DbW&ua;rEV zEC(&kL%59HbEEu#isl@E<*5b32TKkje;@_k=dW)C4Uxc#Z5j5G~J z>=P4;#qAS*c2X@4d&_EgfnS`ud7F?gb#DVTp?;&M#j7qCbv31KT89HP)KJKS!wUg< z9gmS|;z5kiS5186yNOi@u>gx&+>`)iJ7JHHWnC9CYt8CiIeR#Ud0bAHNFDmgA6-){ z4n85`JQvvI3Df0<`QGX)_fp^4RT*F+Q%YvK9YBUq^7hLX_X=IZjRBp{-!j?a>fh|_ zg;#4Gnkw2qfM7!3f7k8ZQ~H2BL~zkekgm`k8DoQ!pAuoHg;SWE9{JszVRwdH&kn{Oc+)L&!ebYm_7)kF|y|Jyl|)ozDGV z?La+*t2ipuy33Xdv+8ak=SZ#;#T0oQ)j?0|+kIfPAdD1~%}vUlD#=Hbc;(79mmea4 zDJH(TKCx3X#OTcGj__N^QPgm1E8UrJqP2^wZ zgCkGLI6c1^C&)6qXv0{B5Yu?1TlbwdYV+x36$ACSm#!D5@^+w?Zly%EIT|TcgpPOF z@xO1{9mB+{9P8^haev$`4?f$s-wNz}F7rQfJ8r=7C#BaP)8kTw%(AtVI_GQh+w0ql zM`Rs|s3Ou*#kQIt*@@osUJETlEo}EdGp*%MvyupG5ob8p0GGS%5W7=!G-KHVPX=D> z?Z^AyWJ1#QkEDbU${Y)u(aeYQwue=hN&W|q$u~U$?gy^p@$uORS`J zfkN=VPf#M_H|0Y?OnNjSIwX;11*Ik)!fvx5d+=|2=6yWt&{DQ;lhjxa9COD4a|U{V zcu0=Z*}~iU!pYv|d2pKXClurXHW?8@4+cmVNnBzJ!z&vZXAugT?tpQ(13Z#c@6i|- zt60Pe8oTG{-Ti2C1CE0R*fmvlOFAv2B=jwh!gtYz`!Sq(3q>e&W4EhMkg>^0;ytfz zYDE1VITv0yP0L(8gT_>@)w7}KPU^sCg8arlHyB-t=sD&dYSCw2SK#c2#4hL%BGOWB zqm$fr7~byDL}?mUBeaIlXzb-W#Y9?ysqz&=_ZGL$A{VIkbqHEJ$`m^{HC|09Wki~Q z&a=hNfzkX7FZVsW`JP`bPPU$e*)c<#cNVqkWq>5*FPK87r+D>F;ws!4yrDCrxGcW~ zUP366by7H&rx2Ln{3tnS@h6P1g1|PrL5v8N5L>5TKL5hy2oTLUyk8$KprEXLX(|Cz z)Md+C22UT1z1NShF##Q63p@T`YpYYPvFQWIut@B*WjE1jyBUG7|ESZtb9}D-bBcu4 zFb!A>#ZX8?-AEf+PVEvaVMhz`omMF?>nf{a2z#gP_lGH1f}iz|dl9_X^*sDbvV>=g z`N>LiMzUK!t>@^+2QXiW6ImuN&^$30(uhsYx3i8p5tiuYxz6B%43jWJP37CKR@EVa)anH4wP)+gY7 zYf=~b*^8g*N`KeCKSB;X>8@;y5nby*kN{vHx-ccdu6s9sQm$39>oT1qou>m`pIly# zK>RAink)iJes*nVe~udCKZZo3EpwjJB2u}GSCX=@yA+mvP8nkH1!dd|ab9C3++L&Qgxfx-kW;(=dcs+;qs-T*^D$skI&LSIEKlSB=*gj{<2rD@ao(61_QM;0VM%*u!?rIxyMHAidtI4mgP+baK{ z0lp1{S$`d)8!l9&$IWsM_fwj zqE;tDz%87LD9Dwv>o)7Q=dPw<>3}@%2uZCbv%3H2t$Ew}v@f7CXOI9ayO$qyeM|Z4 zt&{Z}pPhNlXH>c7A{FK8mm;n*z9(!ZVmWs`HNx1P?SH;wtYB(3nLdGh^3CFj1g%;p zNO=3=DsYDezxiR)L4pg3q3f+^4Hf-%3Re~IQxxJ{Y#?q)M|t5XwDEv$gZ)8Kl%rVu zY)z&~SQD+ooi{s3_giCw+8Q?eLha5SO)2p&>fN+uYt?FkB8az^e?of$I*APtF6onT2jWR4B`O*(d_#1h4O?v`T?y;IVah@m z+YtO?-Ms>2+B9;g>(HI$1N<^QdiL~l1pT#|qJ9&NUYzONjT$7mSKLz{6Cx#q(G|7J zKj{?|*CsNt&IpZ~4Z@ky5%kgoEQk|l^ScKBeW_M4KH%Y3SvYj;ps|ya$#z*X^FLSc z(z@w>$i^=qQmm;-d@t%;Ps{d*OO4Hi7?3YxSL@n5qd{4}=OtM*HF!L3I_>;;ZCxh%a@B#PlUrwKV7@Oo z;xON%UgHchWxI3;lZY{pukqTNOn+ZeUs{of%kXb=~7B=XOT^B!sg^&2B!8-_U zUbhKyTerG+{YX2_H28q$2y+kYqR9C5^|qgzN zb-n3ht!a`c%N&1KfHCSeydH7}T}pOyG70=!3bUrdY2?dcxd&0-!E5#fzRfVnT$@2p z^!geO93Muf72+?Ar?1bbk2J52kGKM#s@9=IaJa%Gi0C7nxgUkk@L6B4hkMWp;ybgp$kG(P%$#@v7X@4LC4DL?td z@tr&h>9}y8t*AW|btWBa#4Qzn1!y=Iyz5_6k-vdFEK}qvYVjz|d-&<9r!+Gdypg$) znhDPNJq63xGd^nc u+&{R9*)Y8iRmSJ*SnBT55_4m)(&QY#uY0PQSgyf$2s8C@XSMvEk13v)Y!k^2~sC9z>!!nYBT?8zk}y@`jN8G=YN8q+l5cZzAV?NLGXx^d@>dN3f3X3g|;jj~rEnu&I#xuQTe= zkOvw&4FAv&bHF{3S*Voq&^_?N30&nyen*%_YYC@~SD7Z9$OG z*~UB|<{fWn$HO=F*{5Hi9SwC=OB@RPRueN-$Vc0UT>9i?FlBmdUdSNqm$!FKVF|aMRc2aPpes1&~8x z8Tc#@4rqQQ>|HURKdik(;Xu=lP((~zKODtvCy_J(%x4}p1aEuO>%iRp77mk~{JfE> z=Vv=joXCn_cTX6=7*?1n(-;ld&X29e?O`g^qu>6naW6Srt%>&jTg!3F+U1!QoOwYV z^&~aK#y}zjjS1j}L|d`G8J32i6)(PF<{n0doR-kW$ag8)Kbu#}*$84m#=gls#~`g7 z!2|Xt<)-yn&rl9{h)=5M4JSS)#Nr7VE#)Nwe0(-aI^BxOiLX{$!!?D2k;kAH=H`H~ z58Aw$bAyV%;D%x>%z9l(iUuQ_Cq<~E|F+McKm8rz`^boxx!y|rzKWTmCL+xWdifJ* zXgFzH-;r~UZS#B0p+QabpB8vbItMFul(~>4MQx8D;jh(y&s)xCwR2<8NRtJIk8 z!|%7O{cPsrd_>mC94mGAa3@xl?l0J}1#U{aVT^vpfsxo1NlA7oiQ}KuPz8>GYn{$r zv5~4ekeSdWachscO)IZ^G#%dsvR~A%UK8f}ib+D(DT{yNB3nWRTwcSF@(E7YgY0?Tno`F1DAo>urXspHME1euGaaa|NeSic8Kz zxc8K#6U)2a^sW97_|FXeRD1Q1`l*nORXRx36>u_7Yu74=9!Cqem0epm0hi}-(dD&1 ztk3&zw~o-RA9=o-OAZ1j9$t~~V&oEilOt9)wV{P-B+KvY=y3OB1OOXX9!YX_@N$mqE5Wd=aESlwiE;r&d}_=|xYHdz{e zX7k}xMO5z482>)!*7y`k-{zX%eVkyo(Lu5)`xpB!l=4XAnFMmw@{T??C!jg1H2GxL z6$#AOOVU5b7zGs@m5lrfX;^E@4-4KVY>NV19xC~v&`PApNATc5f*JJgwPsRPLU;Av zMZ4K|thl9#t;Vo;AR|tcxm`v9bCD(klH_J?+6_1Wve&^#Actz7pvv5HRp~4+=O=zt zxxCelq)_^6k`h4pYnjivIZP0>0xx#Sw5@_#1(Wubzzp&Xg0J96vS5}F(ei0|?&0v4 z%rZLkg-MruFzz?Fju3T9EKwAMC8&0M(k9}p@L0mFa6RX}YY4U=Igw2BDVRFIVS_RgGGf>0eIOv|QA z+AJJa7)L4s9A$9~owz>;WHaO<_x}N(Kw!TbBqSJgME8xK&C*3GX4l4zcG)NY>4;gr z?DA`E*}~b@!sd#K(m3U$#rER;T!ik-adIJkb1J@Tx3{t@IH)SDuF zq>~(6az41>$8e8qnsXBPyobSOB!GkA54B9jb_a@eM2X7<&MSdhlQ!^bT>N zVS9-Bmp#D|2S`-iY1M#q^Ymqlt-O3Rzd<3wa}HF)vED7NYYjcHCrqlOi0saR3Q407 zNN3zei9$sOEroW4s41_K6s3UIiJnDHT-RxP0$x4xTroDZV}aLwy`9`E$`TDcxP~^9 z=xui}G$&e!?TZ2W589d6|LMT2XX+KceB^_0M6(&tnWvv<63h>1$51Sk4@qGjIA4pYrIvzoC3up@F_BtR9th+u)K=`=`xtt{)u~ zKe{5SWw*5&LjW(0-X}bLMFs6Hg`R6;Sw+`p0LsetNZt~*CiB7}`UmPRFEehXIe^Wo z1U*Y_lXSyO{G9nWcN{UVM>@IY-v8n>bwyotvw3R_dXVPY8v2)r&0hj#1Ts+B1S$$- zTypO9cKbIz$Y<)C^UAj-Tqe$3YEdXaxFIMLvWSaIySvra|L%TPr7-A3mj!4?p#o7P zwyxF0nI`fyY2xjN{trjKXcC2{k1l7~{Do)R&WG+}!^w6#=49#! z8?QbUrRF~5NFtf^qjAzM8(nE(^vmUhymsu5TWw2^Et*zjWAa+98#{6Kbzgd!csln! z;W}PZ-^ygPmTPOUPx_R3>XNgmTqa8a{1fq%<%}5n2ls#Ri#O4(I-xhoV{ZeZ#o64I zT4x}h&8ieO!=&7)FqBl}vmZM%vLKs10Z5;`#MLkIAOZR6_u8?KMY#;%nTW|`*|-!+ zU~rsk2BBG6Ix$7W{5Z=ko@d!%R)#f0PsFBKCtmU8KgiRY;mhXVJ!E&@x0(%hqc(0# zskN}QyT31CO{|chbD>>gU(p>+R^M>NId0*l-iqw!N1Hfj`Qc*BJ6kp%s*;Cf0n^9- zC4mi3+|Pa)qx~|qDYYj}o@ui7*$o?Q-KOWUEoL`de}#>kIq&~UF8zC20%-}n#U-$9 z?dm_+Jkou9^(*dA1n>GBe3vgn2S`sAjE924>o*;NpMWocaq)l$Y>9wj_bvTg;O7os z%g=|8%JM>1>&cf1(s#p7IERk|F!J&@5k1BQ)IAmQOShx zm9#1I){T$Yaf{Ev=g4#aOyMH(HGIXOR=3jE$lt)};-gkIGRaPmhysf=*3s%a(M6{4 z#kCiU0CRGKL)UFf&%New{?i}Q5=cwn=#xMi*gpE|JY8ZW(0E`E16ek$Vqo9n8@nmW zje#%&x=jv0@+}IRKXQcG!X4+Yr5E ziQ`Aw0;Dt~hQRYdo|yq)n;HZdWz#VrEC+}pY&I${R+A&n#q0H|e6!JIcM~$n0#pJR2P}yQ zb1a=|rNtwyptR6tFFyZ>xqa7rKEy-_u^~m-mS0$aY@k6QiZ#LW)l{XrK^=vVN62c3 z(Ejp`XIT-EzkO`J+0xQswLO_uIc1(zL+!Bhd7`Q_V-}@s$qeWh`r!8lfE$39HmRkQ zuH#z?32q2ks{_=C*0S$KnvJt0rgsAw0?cHC8%mPk%wH1D#jA}1L z$^r57!k!iq3=_Mj;@9MT%BK7rIk^R{@ zny{1-aZ2_NU6(x)kN=W$KkVn<=$AvQzvJ~ycj7>5FyTnuDSr&`!kNk^xne-4x|5Cg z-I`6VW8!4j05^ud9<$&yHq2#%KKir_?1Mo2Sgg}p*_?LG(+@#ik!ghmxkPp6V@rZf zMz!gt`msSxj?kwi0sRHe%N}b(NJXg22Fh-cq#$lSV~@zrvNNu-)|!Jhdh!hH0eK`H zMY2o0-b6{eUumna^wXq?P1Q=7yim7AGde7jNie0_+7q|4KJwkyRP-PE>kk1!7g_-} zBFrX}y-+JfS*g|D*2zSX2H|Wvj6GFrq8V5(nbeAtZ2r+o&>=<9lXt-G3?^aJ7lr|~ z1$LJel-dakm*H0=;fBw#$}#2E$tJRD&zM8E*PX0DTCi-+pH-&+`Nu1)vA)haS^@7h zz$Y&e@~p|U?3}Qbj+tQ{-AvX4B1o%i4mC5`*JZ_QHadCAIBQ{Z;s+mj#;*Oq<-Tu# zCQ7~iQ^!RmiprJ^0c7C2CYV*L z>gO)H?qmKr{qlzr*!1*D=-im#k&txoMzf|)QnvR%l^r-(YtyETcYFBG_g)!XlYUK0 zAT5Entpv6`^E6|OKaALxPxlAW+Bg8d9}4H?C(7Qc-_k$-FT0E_QQtg#1fMl_kbh1H ziu;v-++3sk^sXbgZqUYz9W`XZgRz$ON+K72F0lLDML&^&zuN^hV>B-qD>E7n@}yoL zgj!<-jU8GW8+}z?XIC2=V^?_Jp=<{ESNZTYJP-xHLBR&MD0Q)Y+;N*AasUHK&M;uy6t9 zEIq-`y_sKPFFxRa8RK-^790_xJa2w*tDU-H9z5EOw!fy+8afB;@&y8iaAxp-RJLg4 z3PdMhOmA{}fEAZAqOc)*T25> zGL(+4S0uVX9b7>it+>UuJZ;%!MHVSeT2)<(QwPjnI>e=``v{3uZUh#nJaQ!E#0eb8 zWI3LmCwVIZn;bZgbuRQ7VL(;^4i1X*Ohr#B(o)>G38FWXME`1&(*b}gl|h4Gk4(ie zeCmPw?C*ba-%I&_6`vme*_W)ljYx4ER*w*;rV>Pvhgo)rPwcXa6Uhto<*DoWissWs zoxbTIR5Jp#6_GBQ4>`<V^t|Y7J+F*$7XYYa&y)(7PM<+M-#btRvQEyY|*w)(E2P z$!{bBxuY=0%r2*$L+=8xtU6KjDLHb-T4fgfLZ+gaxt2g{*F}NQ{X~dl10ZX2y-YwW zfpWmcqyfOKcQQ40R3(2CZR)tOR#IAmuE1Z-Z>sHEzW+fo9e_X%4s!0~Q!cRu3+B39 z6}6%|ttWa_hYo$S7ytl307*naRH$A2K)&KqM4YZqA?X(9JX(i5XwVS?^pzg*K;%Mn zJ@Qs-)~1kZZGGMC0AY%HXL2T5V3|21NQ3K?b0$?psSykmR0{565<^kFEuGzV#(O>y zkLW&1?mTghgl!KibVDecL0ATE>8tVmIMU%E@=?dC8 zb^+arGij2=s*~&-*)vVhy|nTm4!Azh*KT=Hro%`lD;$TOh?OvKZL8d2Y{Q}}0ry|y zWxVbO$23SK`$gee`G1ydR>qg?Pb7J1ENLh z)PFyTET6NGwm#m*Lf5f=!+M)LXRdAExx<#9u#C+!MbVYPfWDc@xL#~g!WJBV?4MPt z?|tS{JGj5f_g+ZS&NH!9QMlHXpD4655k;RP`vGilCg65UYo~Sg^Z=gbQr2v1>Wta* zJNMdx`Li9^?!(7Puo5ZGdzA94ZWBzbDJm{OU#CcT?_(&lc+s^1?!05C*gN0(9?kP_Ks$k}v&CJNt(_v3newA52GbyC8;MKKtoU+tu&8)NZ-`H$J&qT0Fv6 zTxcU`Q65bk%(p`sy%wT>`{0eA`u$tepVJaZOWS>~TP!C?kvgl`rB3SKrlZ4f!kT9_YsdUnH>3>eJ=R$k&zM68H`phw!`lJwBBZ zJfPKoN=fH6)_N%&o<)WNzl5jaZzxd7L2?F!N>d($1K)DK`16&(T+`Lw&LnCXeTV!A zt`dk%enj)wOAwFtg7M#cl^me9u9MF}0p)jiDV0H$b|??ZM+hJMq)U!WPp7NXM_K}D z3B3I!kOsEj{&k!#;SnUzxPLDlttdvcF`g(M=rHM+lYHK|Z>7zevXFBbRt2|fCw#a| zW}R(?g_R8CnLVbn_2uh)<{~;>b>80Xy8|GuF*ytE&9yeN5WAHN z$enl=Vo{_XUnYNry)_Yaq-u?nv=m1;HY0uNw`x1-eua+~jVX z=MQJe!DBt&Qz2dd)zi~QL5<2^;MB=61d!qx?i~8HdNz6DTDf!51p!w0WOB>6UwD?T z=L0Vug$U>=;4|a7wN_Bhghyr4cI`i)TWt11MUj#YWz?bPC_+Nz@*TcOQM44m+W^*3 z8~{otaL6|AUS*4>oCf$%g#IYXkDSYKbx;Wq2?`X^AqZF>092Pl=v==19>`z78PF-` zPkYz({_MZR%Y~O*L%xWlhT1@&jzA#&4f7d$93k?M7G)6e90J6;{ynE#c4o>>I{%s@ zX8D3kukv~j_#B4HpkH7zvT*tu($OKBlUAro0>?T7N|4SVs+-79MJgASmOH&r2GQkF zXh@Ek4OIzkRWfr52ISVyXW^%+FGaWJ2=ro7fk0;T7H1nB6lF&cB`)Qpt%K^Rp7JQ~ z)G5<#(Yz((A!4<4^)`9(L_V8U6cktp(5?oGk)M6{YIItI0J4RS2<(=YO6iq%wFwOX zE`9fht#A85>)v+ILg++)5}xl#F4?pc}38(#2j$xmQyu6x+4hM(EGWNmFQ8x!k>iS5R|!=`?_bz&xkd0UN+9GtptLa?Gw)o_ z%=K4)#7+&^*ci8k+)_xruC#mhD*0o3+Y z7qURIC_FA`$2?f}%unc7{lFoZU2V?~)g{+xa3^XD?F}$|=z3kIRQ2I-+FYOs37HBF z0D}kW3qNqu`xSkbyoTmgeE8@Wkw!~9`1^(ZG~BR(n+!U(NlQqS?T1EF~Z4Y^@;ep zwTh*rAjgUUgmWiBqayk*+O>1@OYVpwS~$+8%w58#SAKl_GasjZh*F1kD+cY5+Fk}L z87Kwk717=Y*h^#A3Jo5XMCMEt5Rr||#y1dskv6G}VgpV;;bfawHrk>wB5R?GnKX4W zdfVbUG_s7yXWB?#tQXV6wpyj7mdA>Ws@i%xX5OS%p7EhO9(we5w&53lPqcOuT=XgH zX0lT-ad)pK6Y?ytu*`ZG`1ki`Ien1#jp!Fz33$(hJNN8*@u1U#X+p4<@;d#RR)1jg zBw>|Xwn=p@+vmVHw$S@f*(&7pwJ(0cMpurvfBM=tq?%5(BOOuQ>$`MS{gA#WJt_3F z+jn^>=ABDM{2UwnW00)4!{BL|ZY`uG?z$ApU5464`n8;rvYSG7N5u zZcILu=m|aFq!I7$wXRmhA9a5+qJ_poZNsDKsFbV{Os6Lp%8?=;5oaUT5*PKNvl zjmwpnz)v3PIMji7$HEUXoP1ZGlR?17Ntb-&_i~p0n3h0V0!Nz!(!lo7R^#dNx&*3s zyg*+`=SY98=xe7;;H&tJyE+a52*7{LU{=eaLIT2wJX18JgN%xTq{DR}g4Ddr2S^Z4 zFbUD$3Y>ywfxouKYMXcB8E<;2_Z)cKGRtSfW37QZAc#B($%jME$N${%Yj(J9fQp2O zZ4Nmpa)qLmE8L-wECX;=(h!ua`;`jcaX4ygSQ^bezIlm~6<7gUsB?-hw6-SL5{M4Y zFJZ?xq6Bd0G8h!ck*7x4TKX+&h$MA2dPEQjDa25KkYujsI1J-R5cdS5Y0rh85cob^ z9Q;?-f^(ek0l4bUFY;fwL*P7o;9d;nJ4jMb(UBnHP?I33jy~JA@=lv}$_%1fiLfO@ z$1b>d`0-pBIFJHM#giFk$$AJ7VaL73HC@LVw9$K<|JYh&0Ur z+{z$2GY{V1s5X^h<2&u5e(cP#wtT@9TXF6Oj+o^OFT9EfZ$(I}E>xoePy#?%u75<& zL#H}Bhy>+V(XwdLAOk=*Gn}+u9A@!TXVRvnCwe`0{v-%@!#*6&{z=ni$MG*W)lVUTvyT32KFM9q$_80)kY=Y8ReN0jmBzx7MX zueL-!GHK65v1((&erP$gxlbHU`7Aat6~zRwKd*rP{Bz0BHZ>z4z^@PaxlQGBnD$CW zZ4iL(x|4GN-rxuyZcIYJknEWG0kGZh{DZE)2M;y+IvaM{!*}26gIFo2HggmWlqQxGIj-k=A4i*UE^$R&yZ!;9`SbK#o2I1Ilvo0vUdRY~ z0-K+D3?E11V0XHy9DFxlDrD z@F3)nhtf5%KyrpZl`??2G3?t58gtGzimXbL@USYIzX#hd5MPxmS{q z{EawZR^8JNTM<02-N(FxALK}JFTj-KD?p%Q<0^}HR1u+<59~8&1%=GwgC^&YejXsO zHXZ2#(25ad7mE~FAH1kpI4^;dMSYN(f7bIoO}RWg${oVM@$oZcNT8+aj}QFkX{uaZ ztQ_lnP-Qv+Wbx9W^gZI*pT%W-FCSIm(S2hkF0`s$JFWH*(Vn`6-;Q1Q&LHifB;0|N zjsYA-iPvk+oS(mVD!?z3Dg4WdB5&7=KsPjC5cjgEh_c}^_ob4gBtCVDhJIv1 znX$~KPn^ORfsd*L^oh@va2ofMesx`g6#!%x03hY@F2JrE5b(jxk2W>p8P?fwT= zD(RpsdIp~-Po7|BoUtNEk~4>+WR;B}d!_en`PP0?PY2jbV3j%ffHM9@PVgcXU%OWg$tUBXnB!zx3rQ}AI z=yLKaoj`o9!-_Un{hoWt)h-{8CL@136h)q5vMEwDnL9|2^OB+Rqz^vO<-?3QN2dGu z#3w#3snU+aPRk*%K1P2M;+X-hM>hJbsCCr~GLx*3LlmFjoo8Rj>W4h)l)MA(YSJ;I zz1A|IbO~XLQrUpRksNGEz7-W0c$9ayZ+0tN7@|MzuvRF2mYu##MR`@n-py-l^*#S? zJGbtl4zK~eaof|tgcEy%6fh|)&h!R0&jpsJJ{1L@W$(WHQd_u$Nii3a>P~$hs)DYl zRY}=23FnidnkeDCKnUkrr>*NgX|C)@U`r+Huzm7#Ux7NN$fE^iuhix>ajOkzqk!rI zl(`A}kcGUoN$c9}JO0-)3*c52>ezCWMRB+7dXU8w;@01GyA*El_5=cwnZ7YFoYo4I9(<%;)IRjfQyRAS5_5Ues@i2{tY`Olt z1JtYRG=7y%xhscVR(~oFV|c9VJ+1m<^{ZT{u^c_V_rdPs4Ee&9V+uXJz#k$BrQ#RJ z_xZg;P6CJe>K-Tu1!!k6nW;WSE4oB!B}zhdH+p0jds}FNRCJ!&bh;luBN)2`{#S$v zX8BSX$`^Lg0dCUQvwZ@a&%t;c`Os%cHkwTCrH@^H-usl5^pTc8S^`I@1k%9vQCjgw zd$IRz+2|dlfCF{58pH;^0mBM9X9rj~qd|tQQKze}TQnWKa1C!nP6`kop50cbtHF7M z7hNlGNlwP7DU;vy;@+|PcI44#<7S^r6fyjwfKgg5R^L#|MpcdWsn37j?*8W0M1Zk0 z9IxeHA84~I%I^H+0BE@k%yfSyzeU&)FbSAJY#rbQFb0U+r5AiqE8#gD;qfTJo&$TW zd+RD9wFfP`lqgrO)4)8=hJf8&M82{lJu+ehU|c?%D6&anHcO|w+KEodlVc~2VL>e7RnC+%Mg8f1 zMJEaXa86w!M)kb16@O280Tz%^oK5cb?FN9v18wW+vs2Ex*6Uq5BM`^6fwV}#9W;U4 z0t21bl|1_7h?hY?l=O+~_#2K$&}55vUy}faNuuEr{AuZ)&WWpg807Zl|Kv&F+*z0Z z<$&$2Pv3(Mh$9@Z6WIw27E0gM)o7E)kM{^tfn91uYBN9n`9rp9?PdVr61(fJpCgHv zAKO+FO-s9q5cZx8Xee-Bk*u*^czJW9Y=9T#x3JYWLO-HtLndQ7iIBeg{zt7l8MUT1 zKsFp-MKx;)r$Fxc3&xWU(S+1J8P$IWWdz?ixJ%86sz^NI((6H%e1@T zZYyd<&x_;R3+O&|*`>DomS5ZSkAKWkMDMh>)&UsCh@t^F=h}F(*Li!jS=}IcAW^hU zqDEh7k7{tJHUkU>2E{>K{eH`l=6fTnhr`och{Pk!*PM9_j*F z^3NwJN#ENqve1B1M*vi#FDe87)ONjZQ2#-mR2~iDvRV0(#l(o~2S6bu;T{!u(AwHM z?5{uZ1vZ7N<9exrWQeT!eDTF+*$p4Lj!Ts~Z?qfrOjL!Z9}HSkwvCAGC8gN8L>Itp z!p1E<$t2$`ftG`PE6=%SL*>D88lPbVVSkY;^=~W;ETo=ldx$ z)`awe35cZh06mgS23dKrWAj$496f@zQ0eDIP1Oijvm(elI=j$mtui6sdNwl9A#hyv zuX>cWDyNc-sC&pd?bD}XT<)826rvLin$3M#TJQbi-lS_So@P$x>Bx2ow)liKL_L3r-)H1mmW+T_Etdz|; z;W|6!*hQ}AvKx6!t~G5~#VUkO3o+TF5)5!&c~M0Qzz=27@Bp~8pnw@*qgTkQ>9_jq z7r(jFe*BZ4yDVPKOV!qAZ0*B8w4K|ZXYUL8p2VQ7-_s^MM{wllrVaL$E2I1(rCy6?zi@+iGX7iQI$DRvGn2TL1j7KVp>; zeGziZgHv9s-7+b+HnWsYQ@!4PC2zHh->%>MClz_FqPX=KjxG1Q(-3@lo4&U0ddMtq z0{aV$qlDhNtf}UpJ-=?F)$KcIr@!yLP-rnBOZ%!AJLzpIg*RWZc0cwXL%x6D!^sZ} zCVf~%1TB~P;!akj9Au;FyaM>_nZOx0wvv8U6L;di$09(prYUE&1@-f?rSeCEF$OkB z6J+$?;`i@{vPnLnY_=Le9+OpW4b_(TE;zv<^K~zY&;rbaKc>d%q{W*0_d0`8Wq_cCi0X+;Rn6GiD0WU+1$yZkPa) zo}La%;Gc&w2pl1rI}>^@`B6&8+XsET`~wdr817fS1*jjQe^oos*oywj+l2R33aDV^ zT3e$nJ@Z{}dAX+xkd{DN03{T+bjEbr48FX#_C8y~ z?qHDZ@BFS#S74oY-Z-rqylFVAdjp4csN)}!%djKxS_9h~|K}Nni=!3?#Lg}qZ8^miwrBfhJNt|U_RPai*#2$1ZOqK6Y;sm^xh(Zg z0Unl>z$dJn3P9k`>xDW{?iESsWtIcRUs7Nb?quX~Xof9b{sGDuq$yBZ z`lASP9_e`qmhlben=-gAN%w{hm4)lQLIgySLxGjPnH$cVA{F`pMFk*Ee2t5IALWZTK+L$hYu;h>`GDGG6(!bnd+h5q@^_W9Mv?4p_ zFIjHi{pNTIJ;>Ue^NdGN?1rwTJ>i(b+C{MH}+WOt=ao zIMxV`O$ghqY~)A>*zWql=Y08n7LkqFAi!CqsZI18)KndyrwS`o{uOZ^DjaV!mR;ju zRiyfP%OV?tG~IMEA6mgh79F;+xqFx=DI!@XKr zqUdSzNuC9$MtHV1gDc2mb3~q%g$y+KKL7oHbwn9|`5$hg$kHF8Xlc$NxDgexT9g|M zqH_ROM8^~Y^iz~gm`y;-vY|1^9%*X+VQf!75r&1(8UO*1AvWf<7VCx9gn1kohNuVf zq~@mfrU8@M>f|Ls#CGkLdjxUvPfafa&iX@M9~$&zXXkK_05ZL)#Rlzq?&((^u`I9V zp&eRhHcI8yFW*lk}&~Bumwqex%;`;C+eP zY}R5hxe`{}&#ADl{|+Ecw_!>cOw-IQaZJd+yO6+MeCJt+B3^$>)UKxA73q9bkeFpo>Z1 z-d=(8dK#R~tcdx{jUTaj0NbiVforNKrxh6j2Bd2`AM{Xs!%n*XQFoHP^o}1ng2^Or zy6DKww>@Z$O-$bz73C-UE1GdzzQdy!WgC+g%Qx3#rk7h~Ah49jPNr=+BmdXuiwztLBz@yNcoJ@;=o z0bDB{bxp#n(tYn0Cgj>WS^>M6{NuKK=*;qSiI}A+d*6bMacTe3JV zgp|`qS^}@F1gds!^#t9&kpfHI_@_R=Cr3G_-?Tpl10=Us@}YZU?bgyzWqkwP1bB2> zaX~o~(?$5$tlsGhf^q&ir2*%}sWD5rn! zanhNp*z1APknEj-SSW8 z6Hy%$qrM3+Mk2#G(M-KN@H}50x$qkSjUF)w#3gEzl6>+>ngXIVK*$259sqe!1oAUm z4-zfcW&_f9iO9CKxG!fB~;&fIt^YQOEI+rZJxc+fh zTmgAHylo&)MRSXiA64VvpSNk6c09P<>Db!)*!T0P2jvCHfQ!E22BgDv$es z<&B!a{Q%!=eAoWOZA7C$<3JnG096jSCY40RAq@{P$)uXrz_pL4d`0&g(WKq*MK|U! zIYC=N0|1c~aj%>z;>6f&uFnqc+zAjIvSSCQS{*9_N=A;edUF&dgQ%vU2&?kqsoTqqQxQ2Jy+V{-ZRpVk9kcHIBNn>^B3WsxjG znhXP=^Wk%5A)qM~5XendrKb-75UQ0-bPfU7=rL0*dt?Qi+#$;W7|hCtLWuUPRU2A1 zE5NURH2GEIl%1eGiqDyrXw!%`*3q!_KF+CKBN^U%NxQnX5zv!oF(8UkjwIAD;!anT zo02;5(!YLLIbQ9FH4ofHU63x#QEgBSE@*XXtCB-7(Z^|OfFfWd^;y43_5qkkol>C! zDnz}bl?&1b{U%!^fcm}H++bBTP_4)y@{8!1q5!%#eE14G^YoKgeNZq|m;8*ZWYWe}WnpDVL$%dWC&jI`mP8js=W(cIH@_*bS}B2+WaRT*y!=`{!!NQoq>~aFE4$5g zQK)J90ccac0Tp`6`3zPEcpZ^eIIO5a0qO!pnfybKyWwb_J!1kOH0??=Ahao;2&6XG z?%M0^EE^qAy~(y|aw&mbRlkzrS^cud(0HVHmNu|d|D-Z$qDT6pOmH_WlC3D11ocR<${DuygFCIbyMf6b0P0LdS^`QT z52VR`=rI#gz7m8p0=dFIAm;Oys7@mc9wBF3QccE3drF7C}Yg%eARL?bRTC^=LQgE$5_T z1Ky_w`ldXoPn6>Ei?{i}zDVQwLtnr5U$;{?M9VX&u9Y0(Q2rs5PyU5EYgDV;?)ncynkHH?XC= zs$SI(az?(0x*OHFHVF5A9XlpEsR;dE5Y5WHL)Uu-WXhE+Y07pkz52g-PakOsytWdk z+PN)IG<9_xK3J>A%N|RxHf&HC6JcD z(JFy7uzj@F{83)+z0W;E$10#gqrjk3R2Rt@l@3xv3IVo3M=J=*fgY~YjIsva>Ti9} zF2|HV^|u-rYY;zt2%J&c6|XU{{ktD&`F)VKJKf4Rxsa2s;sh`H07&Xz86c4t2Tb){ z_i`xTx>ExuWi>dsk1v7SB4H3&os8JW|8s}^@@p3}Fqe$r#_kI?n3Myev^_0QJ_m1< z5TJo4sTYJ*QI=_S^*B0;c5y!jC!T?@qDvd4S*6AV1Q^j$;BxQaE`PIrQ`+5 z<}y=sue>y##2@GQ93G-?2sY>AXVQ)(h>R702(aCnHO3O8lj`ZQ#_BqoFm0S=5b--e z;g+0uHR<{jcrLP6dF7}{Z}{hV(VJI}f8_!ss7esv=laMgo#Po^H)NO7%A`)~|KUT? zUW3S011v@R3aFZQ;;Dy!Kbk)5TJvi*Lo4?>R`e8e8PuwDMO?y)h6b1dxYI^(8y@%p zbpU9?=3I)*SEQ)gxmOHrX*kMC@7WVCwCvncYdx^ty4OBnW#v;G@N$h|BRfruv4rTJlP=%$KB$5QBW2BFRSG>Jn+ib_V+sa7gry0<53+cyVGKW9%LXNl(r zEm~9tKt#S!f<&_2;}$eL)m+VSWLF3RY)`MW{|gn#led0@2v4Fu0V|cJ_;P&!4~W$lOA#?kL^hG*J?;H8PdMS(-M+uO z*?OD$0IymY+;`iwNh>^0Bl3$WSfBMIIvC)H6F+QSEIW;iq>V?>Q#^FMul8Rjn`xr6TsweNWmk)5>kd^7(+2Opr*Iv3Ig)GEn^? zqVf6s?CxEF%=zdUKpfwdp6r_b^<(doL>6l@h$g~sY%B>V>XSUO+3LTLqEe->cI=8P zxTnq2>Sc9A&Hj3uciaLZzcOs>qD5RAvfY~=hZZ1d^|eg^<{4JP>HvX3=Y8n2{QmOe znV-V3jU3&63Q)q{4o7UOE+r%1K$E^DQoh8ZCDW{a(<j%R@oxGEqVTD6(I%njL@&d)jx1A5YD8F>wBi%rczMSD#4}shK4DeQ z{=)ViIAA~j%_iI1$VR-hZRx6XRQ*#_V6*Fs&ZX>=TD#3BogySuR8(kPaEOafTXmTQi0_jc z=&G(gd$GKk%{n%mlaWo9u`C(6pQ3{zAqjurN|mwN16F;9kA! zR~_E|@L%O?JLspnc3Y{5P^BLH6UCSNsD2lh`29+VKet(fw(Oe!9{M%?mX^S4CxL@I zUqB_*N20ZA2cEPmv48T38$Qun(yz*!C)0eSt@KayJ>IWK=Y!OO zK1rAByV?eN&$E2|?Cn9P@SpkJ+n+8QvWaRhzkf6{;lK2ZG_d{qGD&|6unBliT1|D0wKwduEH(yvZsRs;HDqX`np*xf_Qtbd0O zzWYE{7jW8ZN%g|OC-|nT_1o{&dyuToatWy_+8p3k^d!d~e}c}wC64`@H&7QU7y9B- zp*~zxl!?Mg_5xTGCFx6mH5lZ)03>EDZ2z8ZcFFahc*SL1f7{TS*K-}Q|zbD&`822{+^(tE&OVpRY$BUV&6l6#2M>;vGT9@>ex zhWWro6u}b@xK&s%(hfyqVJkXej%5+iTUcB|G~@`tqkPMuT^PK-YHA54HDb`?^g`K?VuQ{&`VfzILf@h& zaUgW$eA8Eda70-hc=kTzDlJ3a(lbYokPCoeUqX@Vhy)-~a?v2h;?TdqNt`6gd;qTB z)}BUd+gWGF%wJB_cD|LAms!{+TzarcOe*xjvkrYquQm)VMq-S1H0dzwgg3`PDW~pWbX2 zU3i`q=jYiy4?b>>{PrmuRUT|W+XKII51?of*WYy0jW&JyG-Mt`S@-tFpc>J{9l#bf zqeFwk{+?Dl?v!^^6$AG4t>3i>fOd1+PM)tEaixG~fifCIa<4YDOhJjGz9Ga!g*HSM zpc!J-!2l8PnxKg?*^>dyNCe<^-~JZcyndTqboNqsv@5s=@Et|Yh7N=nMYJ7WZtQn( zf3M~Uot~zuy%SSP#5Yg(0ql@moVLSP){uYNu`-%O_eD#RpdI_>g`88wyt)&V%uc@G zY9?tkVFzria!@x1tD7x{3y#0&BFoRpwub8M4%!cboXUO{0URf>%X3b@LW#WWSa*w7 zD+KmKk@h4@T~a~F#h*((%Mxf~>HJZ1tfb;N`{kFfw|F-)LS7KOAPeYNh)~XfGgNyq zDEeG#q6tPtNo&PStcN}X;9rr$s+yt9(%p&hf z1daw;L~o=#^G_!P538N00K%nT4q{Li>5gB;If3T>8$t_Z7TE1~J;+`FYyifpgc$my zvZ@ZW>ST09g*8JFqs?>MHL*o9kN$}q+e5|uliucB7({VPR9o+PYl7S@&)Ro^;%8A@m1A3JVv)OYbeY`OeSogE*haMDELSX;ZCe>DGUv4YZp5!O!QUE+E zNkE+gaPr**hN|*JV;q5!kO`@#-r>pr!)p^N$5)1YKx#^o*xp z(h^8Z;0>2R8rXirMV@|OS^|Hb1RnkNWezk1sRYsF?p*r`+z9%6EeCt4^VdL`bMiIZ zPtb>9XIxH!90Z&VM!MoD(?H(`wt%}i+3*EsvoTj*h2>|oI^ZK(fSuInw2gei-BcMfjb@#^(9%8w7Umcr9 zPO*{jGb?AohMj<546bWxTdcGs&t^^<3m_7*t+fSEB#>{?YiT-UHT$*$>S-f8Htifc z(-tj1-=^nnvJ8(D6!ifPltQQ(H0n)`Q*uDjg+#g$T{}n=Urhttr9JJ|UA@jmO+JB0 z+d?1IOFk{lO%6i!b`u#(#Gy8*RNkjg7zdyOH!D%Uvllb*pns&LfsH^imnnl9F`0(l zyOU$bpO+6OQ6=?y;ElWwUE-g4;VK=0jC83E0T+JG9lYQgMV5;La@L}S{_9)eW$zY% zZB39+E+6=$3T!TO`+@M-{kK!U$ETT@lFy|`~wjtsDg6n{+smpw$~PF%X!CQO+EzxZe($kAK!D{wQy20V)5)*-rx zlr~PKE%(KH9T@NK?65v@aYI}4x8MBH5oNXese5?bkR_oq1^}a-JR6DRz&G662j^=F zu(rR4hq6&-GGx6~d#!fIF3YZ%VO!Qb3y*%pPCRS5mBE!Sz)KO}5hBHtfa|;<&p3mk z4C_`+7qCQ?O+RM?(7qXt-`;t%%Tp1|1JE^Sf~&KqlNA7Ctl?0V^GTN!7Q1~F$f}8~ zq<}ys{xrcSn<6;67hpAng62?mO?q`g%kYca?zN|$dBy=;HD^ucWr{ytU?KIV>%aP? z&)B?qvza&xfD73H4GUQYfW8Pc)j*GTfL>9Jj2u6OyvTwAG{SlQ*suQ?xLHeT$OF;y z;;)uAM*+q~hoMO?O>79jR4qrKc5&bY{T_%E*st#Rt)0GNj!hgtnhi?xxqkpq9Ugd; zOdRu~fC(b9I7eHNP1EFrb21|b<+g!Mp=61qlZs%JF8T@mN_v`H%!=j#Rs$3x!<0a4 z{w0W%AB3A+*T;ReLDEh-?>e4C>*R0H0dG-$B(1nG*RqKYmOa+sRsg&9a}Y0m39Eht zz|LBJWSx5Yr=P*d@GSD?`6kc(*b%p^vIRrWQ$9*7XIk;((`@(Jhpf527XJPotA2i) zb=K9gaa^B`9a{`F1UkSVzaQG1;i!5=rN;q$wKs$4RU`uGhU8E=ajbput~Ezg_x;;8 z(x)iyo!a4AkEE7C^4w8poYP9BtKN04?QiO|F-#n3LRZn!TA|?+7F;Adl#6XpKBUKI zpR&LfuQ+9{&N1m|e#uj+Pb8W_~S+@Q89i&d5omzB=z*N7Zz2z6r6tIfl4x=bIV92D z2Y9j*%)5spGd=9!PO>djPeuQ#;V*nB-oPR zT+mke!XeL2jhXw^fORSF4(ut3p+mpvU&wtx`rgAo@Ie72*cf@s3LF_eWV2+)^^6m* z|Jm^A^m|$YubBi6?%Y9IXr^>m+9o}9l?(LsunNns&^73{kLCEz<$({P!3;mhR}BJn zIIru2i#hqyp;XmG)DH{Tb`OSh1Nt;?3j7-6N7w048_>9c57q*(z4XkBgCEkbX$hny za8ybl4QwBk)t)Z&TUi1R{oA{6=;`=z`iH@bfQK1pLDUNF8mQaH0pQIC?dtl~_X9}C zLC+clgvHs0F4Q-}8%(fdIycucC(JSfcu*3B3{-YCHrr?-rn`EYEZW}=ND>St8k$me z%d>S3)(HVj0H6uT5y&jc5Jjm%+CoHIhV}G^Y+gBF7-@EOw-CuHC{f@!4vc3rWQ9Z` zDdJ7bm@Am}k;ziAn_gED~a@wOdA zk56}gOARa<8*7k5r*G7lghoODib|QmzC7zw8qQqq;oEIZs8uHw!3N%*|taTM`z+pWMo-sdAT)f(>$X2MR${1 z2)8jnTP~5+qb5(a@yE_}!0P6E*V(wDE}K3Ip5l(UZQi)q7R{StW5!gX%TRPMxR0Pi zhyhtd?D5;5vlNk{N!mp(K=Op83vBZ7O8^0~keB2FKu10WhD&+H1+2+{B>55@Mgj_e zj@D*?!hqveQM00{x#?eTIijpKulh|OTeTL+S9-=jrI}w?VTpDkMA@7(vHzg8LH!UT zy0;gwTpQ!jSea-d;+zdYYoWvt7x(D03f@JJK%A?9eZYFrI4DXtPClgQ+0a@Xch!|g z%+DKr@|XYp9UDO;zB0CR(^F>k8;P)oo4jnM4aUPZA%}@R(c2*I9{9tf*vSkgCgK1& zS!_@$kXW3N9YoQ0WEBD~5_!*|jT~cu-T(BJ?*No{TSH@$+k&}sj)= zpo?U|V=0}WUPLW|x}aNHRf4VnGOd4RH}X%~g1M9717~BpBIsqH&(O7sI8^_jI+8y6 zpjF^F0JEn>d#*`VB9eY%3E=ecBkAwpb(@ zhe0-Zg-iPg$I2gkEC991quW>V=#j|ixP!BrkR_*2Zp*TdeezS*A8WLZh6C2w)@1vC zyUOaDYprV~;hq5J`@`n^>M@D9!k zQKGCny#4OlQHt$}+wOh)v9S%-!zPe1R-&AL;YpU6Q)c(x{V?`KD{1;IzaZBd8XByG zGK3W45jaIzPTD7L$wTz$lHzaA&&TpWPhTkmFaIeomY68PyotMU5+pHJ;R zPJczyq+jUAzR)`XT&s`O2B&HrvJY)7tpq5PVDFd^Vb$CSfb5e-h5)-OSmlG=K(|pW z=&sl>xi@Z^Y)IVRo3Pb~>a3v`S~w`+!mNbQWZlQ#f7a_#zSTdu+WimtYHE=(hQVh& zPM_%rD3Sh#&+D5$YeG)`p#MAs?D=QCC^_CRJ-RyhqK`ZxlE`L4C}C~g30t$d#-4a? zpG}-pX45AY+3X2LHgY6=icg~Y%_O6DR=0fUW~Gom(h_)0B_Lqi`+oG)SA;UY^rj#O zk%Oi@G{ziytOlIYO5XfFd=rJK=(9H?zNy(Lm`R@N@R&z;2W@Cbuj!A2$sE$+UX2Bm zu6!(4`Jwh0q^q{b&t$2&SrOZ5U|aG`A8833trAEB+ed57r_248mB9Vqyg2B{>CNQ~ z3`cg!kJcejLXLm~t9;jKb^MAt$qy5W2WNJFoDGSHe92(IA;QxX-bR?E)8ft)kd-@b zrWK4omWaJP2gsTmcUw-X&6+wotqTfIn@%% z1W}|w;WlS90};T9ZlZVux(zaz$;=vLBfrr$Va5qIc_tJKfM0_|Ne=ck!$mp@0I%2% z?A-!~^-ha*G*bWuMnnx}0C=}|#EATf+0?P+V2B~xw5Q(=9ICd4s{H@~Y??Kq$X2}L zYAY(vw__rk0yjtl4ELKD{Obcwl6Y&MKfnfclwb5C8l+nakcvg;y|E6PHur336N)U> z-e}u4+|R(Cs71+&GHXLZIcADJC>E>_kuFOrGeo;xh&SdZo_?_HMJT zjwbua=f3ue3;XyFZ;%98LeFG|fGyQf(T7u0Uv!b-iJnCxz z3XG-}O8^2#uqn9!ocTl$S1{Me{8Ly@Ii_!7yX%j>U#q_fIX0o=}$cS z%`NTz_sp{!Y}CjiyX;*T6T#g>-DcX%MJKsft9PvBo%_g}?$ZQKFi50Dpvo)nLPrEb zW5>hjv+6nQZIt}zoxCIgot1`w)I_3-&wz;7A?aaO6p9``$xf+FNN@Y~EdJcaQH}us zX|OK#X(e(W5WZ^176)v1?r9;vG5edZe8o4#9047aY@Y9f?C2o4@p&~yO>8uqs%HY@)AGMY1TW!Jg0vqV9wlJ%J zsy9DpWiw|x6_CoTI+DFm+fd|rK)s?#Aw`=36l-+}#~=~m1)+pR*u0cA0e(<*e(n(47bqCaRY{YB?Ngs-wFwBg_|rvIqv-!0u2H15 zRtY4?mq-3XNz_d=d}+m0n=-o7u33CCpmQk^ur*dZrrhnWz-i54K+wesk3PUiIJ%KX z3=!|C-sfcr0r<}vJK46>)Ucs#j}?|owMQOa1?@;L?K&U(1;;;=z6C^z7v*Hy)QRKR zGXt6=>LQDNF#{P~@TH&qagks5?*h1`-&DG7`%N0;emw;KgI_QqDvt`g7r}?hP#=IE z$=*4@s~LPg_t1tk=^=2crvp8Mi(f4+m&w5?=*$#sOcxV+McB=PV*Dw9>m2Bs1Yjqb zR0%=3B|j(C(`WnYT5VTnkJWZI+6^DL;`Jz&J@LaUP#f86`FH~20z0dRsk`@M2(;F9 zZh=*3{M6t+_redKmtD|(eye;0|AlihIeqjkDd>&pTLd7ALM%+gdPjRN6YDAa@k85? zNsdhz*=3iUHp}y(gk&3ZobutH`Fr{$ErHik0ta_IkB$X;rv6T=*>p(nhILQg;Y)|k zt0hR$UurM<-F;+~UH(A^6W&3a!9T)3`2EBCR1)#A6=~4oxhA+meXUkm>d>3=5KsaI zCtI4TY}uKY`0weLv;@);I65Ve2DXpRdQX@7Ei8e%zYd3(ykUF?Mb!<+S-=4h2rAyQ zVQ@|WMev*8JEG)>0*UnGT0YTvq7xB_7-k@>!AArKL6OIUgmCu(!sRnq%_|>grE}k5 zokV>WWOv$z4eLB@MJyHk(M9u92 z)CqJ-6l_IJfgWx>2+$S;$U^fQBgjT^h(Suw&qs1^A6X_(?!>)FWvb>p0JMn22tb zBBaPkJ}63#3*3p*28Q}7ssKiZe8A!3qb3L(Y(&=joV7Pi?o;78F=}do9te zV=XI>2-}GCo~s#*dIU8Qj@kswsVtNhI-Bfcqreo)`$I9y&+M~a+F=*~KvV^aa@BL$ z7K{lF$_)S{ozk1-7`G_od;kD-=6g*FC#!e(H zx?rkhPMIVm4E@stmLhinbkSRFbf`$)J}6gO8XK*%v4Q32`2?ATt$NRXYpFqYM7Nfe zR6utDT>+rmIbZmK1pOC3zH-y`wqW52*gmL-00g%`ev6HrFq(*Ic+i^?*3sN*4edQv zK5jJ8!-Yh_mY^Ph&d|ifi5OO7p@Y$gte>Wzo0kVALm8Wh!b6U2am<67*5qCr)G!HX zVI*Xk2?3eefVG(bz?wKqNRKcE+CM-M#bA$83#YWn1@MRz6x;aamvT2Xg#H8@AevQG z*T6$v?K-WAx~8Tk_9|%R8QhGXe&>e)zk2=dO*}hjLxX_sIk_Rq&}XG13weInZu#%8 z`f46(m|-PTP1rW}qNL3cL5^J#SWR!^X=<5p25pcTM_)}KfIFd80_cLGF*RA^D=h?W zBg=m1XhcacAiG8z31Jf-yXQeW>GXwGid?m7M)vNK&)@n7I`RM5I}ZT4sv}+3^z`H$ zH9eQ+Ft4`ITM{gwsNIQL&YydDNMV9zpjnLc+ zBg;$kwVMH8`?NO(wwK)YRjv)@LEwZ^F9vy?h-mI3puBYW3`;MXV7JYA!DfsvwW|C! zmp7qIoHMya@H6ug+UWV>_ZW^F8R(uig|UtU;^k+uCM3FXCM!LQoS{TulgE7Khko?B zehq$j^xj{Q?xW0RoOOlwp$9&>{a^ms!Tpm@K8b>(Eu_U6VjuYYXY3a@5cQ30x#RA; zZ1m{Sq!@Y1$0CYMRL*|o-4)GOf8fg=TS4^rZ-zb?4_n~X2cF4hgDSkiowUXq6H z6Jc%elx%toa*C)5cmsf%tHe9vrjm?mD+=f0lqvNTIoV|t9v1Jx-wK)t%qLH= zAh9T=K;OF159Fku82F>_rJE;v*}b(FS&2TFs~tO%C+H7KrI$?_7wIE81w?V97HmYe z|6$QaE6oep6&H>`U&a}!|G4(N&)xaj+o#^AC~!z9uw&gC*Xv}9z~ZjwI`8W}Oh3ry zN|7^oUaXHR^T2n5=LcW4whN3Nv{`h0;5_n`!rQHDeVyhL@6az~*PsjW9PNwJp=awl zefINAz}AMHcG5-EC`cVC3Zy7-%u*l)Y#*~jpX&J0N`ZT?e-Da*7Igs{TIgBywbt`b z#s&Xl5a^u1Qz;Q#frtFK=^wJkZu%|8Lz)%k=U5&QFj9;aA(~0_VII*|adPwO=@8qaK)Twdh{)T zjM{qE)B*qt6m*Qz$2!jX(dHPM4aVXdDRFjs;I@FuD~G9L#fer(=O|1-V30# z!+{%`>g`q3K%gW^`H6Y~tx^DIdSWlVy33wku*yz2<1!mNk*H-1p<%^6_P~=%i3S2F z+Wxeq4?o*xOd1V%-eHfwu)^MVJW5!y$0O6b+FC3|M7p->hr;<>10V^I(XeNi^%Yf< zHlo&Qhp`P^TWr}TF2ltm^@u_jSQ%oAd`;SZ3=>6NGX?^bID`}GHv&;rR8UHpoDia! zD60RY$jKm7)UEPIO4(DNLY!AYF&R#JmWEo}=2fIkg*0OTq(RgIkJB^x%?Y19dJLT< z3+97+0D=JQWULR-vwByB-YXCpulMHAsq72^|dsr>6jdD%8{416i)1wGOHQQl#! zcx(rpI`FG<$*Uk;N9H)6JC6Tpd$!Ob=DQ|%jOcaop9+`?)KsdA%uMDOOtNPAJgcgz zu-PY`Nkp+&5T4*wU>uXxroZdMJo_(?%`ZI^@DaSGpG0|W>k3?nCb6FHC(Sm^b8ciY0b4>{mHtY(CL z^5?()%f|c-&YU=IsD13}PuS1@xE|&^&dnX z69vh$X4lkMewmymGD+cAfa_B~uHuRR|KqpczSN(cg`+M>+8E~{0aG$Kl}1MGkx|LG zYfi*@z93?y#t{&osX~;@2O7=w0Qdk@N>Lz34%RX9kYcM!Ps905YgXITNyA8YQ|$9X zvM@xU)r-FxfbjqS*1rCVhaY*w+nFv$p?Ht41*8cMt8AXnMBqJi)moVVkn8W)E+x%k z(K*dZsjE;jWRz`Q@*vULEjWJxkYvR3Pjgd6Lt}(Ar)xKpWiwvf5Ae6Wt%k zVa-y_4STI} zLTxH#?fn%pm zwk}fN73Y=O=*h>syo~LuBhVmhd8}vs+3D_N$x!VAcJB4#OjG@rjLDM^U1wf%r>AfH zD4*0H{gcR}m%OES^a76NgB)AIjy%64_{h{q~ zfz5M1iEgZ=pM@P%Pe!AsC6M}zBB1S{0=!iSZsXQY%P1LY!$wX8q>YfOV~4Go|Geer z1A<@}XQD9+T+tSJ0U%`+ir6E1kI1?(s8~KmZANZ^opaq?_QcKC1xg6_Ev^`8_s#?8 zLg$bI?vW})XNLf56onZkf-_2r6OtpKyvObQlScw}0D=IG_jE>xSnQ-d-TYROc8moD z>2i5aPM&~n4DXy$2RGEmc&vRu3ktxLz`HL4L0~W@2D(l)g(k750q-(w)#iE|GU0N5 z4_$~N>jU`hZEFNLDY9LFq%A!$8&Xne%K+;(Zf&r4kJ}B%MpO`BVLyP(rmcG|#5Pyq z9ExD;0k{Ye9W&*OGi>;Xkr)*KHtIO(9^!F|0|IiTlNUEk&fPHJb$JrNU*NRBA2)_D z^r5N7!*imm!2lmY@kp6^yq8v|oFsfFje&&mukT z9xdk2RywQ2hvlC7}*G|3izX8Sv zZ@ZDP1}NkE7(lpEvEa&JECD;=9~tmH7_+()+`)B3M~h*&xq!Abz`1SfRyt3On=snO zjUVU0sUl^Cg9A$k^90MfAD*3g$;Y_YUmn|+J>n6<)I|*nz!HruHdl`3Z8!hXhD@xn z{@JJF#0pz45xIJ{taatZj{%BB0rxyFIpAdwNh2^nlL*o@fN-{8z?)2Di>A*H9tQZyW6l20^iHhnFOw3GM0-GRC`BJOhm534H6 zh?X60ue`bzz_HkhGZI!OO19ls6j4}{z~rPN zn8}GAgFXUC<**VxRd6)U7W5u=ru6bJ!Z|syBtzmv>CZZThMjWU6kE7zgAJ=Fwu#fm zk@Fq&16i)T$PrRR^dpzrJ1D$HPTCIgKj#2ue)EU_@}?+z({RyiRV3vOT5_&ky1tZ$4% zJS~@XDscR2_k{R)(itDIX7cDaZG6FbzZd!6PwJN>`OaezB9yr%#(1<)47rOr8KfK0 z3=eo(a@o@sXla%sNfU_o;weS27v`7PN3Xl}&ztO@7eDoT#x7~Cot-vf{48V+{8GP# zIj- zSA!LVN80}OdV6Jat3ADCw_W+JbM51&wvz+90vTIPDv^ZksoPCqK6X^`)P#5mIV@>Z zLL`4W&W4?P*g1paLXr29NBzwE4CGvh^+!8J1aK?;4ueRm(Jg3)fbTee{^5^H-mu?) z>yr+0Ge)cDYP?d%!BU`W(=+HzIIP5{uzSB8VA6381YV%O@U7D3AfS9L>e$vsU_~X@ zu_nO7>C~i9nk?x{A^17Hv;v-j2T8A!M3#y9)3{bCz0xRZoPh7r;kAC`P|l1~4z|f} z>iWn3{lm;<7{g~@pr2^nbqDj{eCJhSMq0Q}bgtJg{1ewY$j1emzf4L$uW!@P-(P)s zt<77!3)$XhV~~T@MI}~@zB8m|h-LHJf{Z-NA(B0d^jC4#7R@20^-HjOLS+l2OEMEJ zmL0WozH-}}>Qm~X6a@~30^3%v^2=S{gm1Nm3dq*s`k!9;tnc-mc-z1CNx=!98=CvU z9MLr%45U>}i!!!~W(V)|h0i6wmfsyXukT)S1nYWmLe~a|^yM^a?rgV{-~GnBwFa+D zy{0ITqQER2Qr?~78E7vS~K8S8Va;5`6zj`&;EyY z`^^nkqd4~xP}5_*M7r)@zlx~NSHP0VQ;bFt!AOn}PE$$_zZ~*x7Z>DE%rO@|oU|lt zPp+sMY8U@tjy-eh_fTdiY}3eg$Ps7R=7smUlCOf2ITVW$04Y#h#&6;Ch&Ct)niqf4c6Q*4dG;+Tm41o?=81t+sP7 zxrb>}eO zR-wJlB+;WeC8Ww}2B;x__Q{vqt__Q=mvjJ0jL?_>ZBkCOw^EEz&JuPuM2J@GjgWpq zQPzNpX#i&W&&NtbAxDG^cjlx=(o2!&+i(B!x!1Mz;otw-c_0BW94F6qKM`DA6lu#= zp;o0{0a%msNd|-fgD^l6TxTyl^_=DA2q-@R-Y>I)!V;GQO0SX*5GsC(DRqQ^26zU} zfw$r|^WrxKwl^()(8Aj8jrrUQ(5k$_y@2^KXy1L`!EOWPn|y7dESjDc%2Wn zb#I%UdH(sYqtoI3aQTJjF!up4nefV4tQ^ORo>oD}4Eh;r+i&Sa3YHQ%*i4T9&M2e6 z*f9Eb@dd{dQb&3WoO2NX&^}~u69D7>wqE#y`3w2AyT@=9qGlEmYYXD-v-;ad$T-3q|It!jc6iOM~kQ8(9VcH91&6eDL1v&0hi{qIRLyE zYvhGT;`40DYW0kvX1xee^P$2%^TT`JupdjGxtAplfDl2iNN-Oo1|8!(Lxk6}@&p`r zt%Z*O{h)c2HbgZCk%Qvf2td4eK#|maoGW2XRNAZ@wjVUETy5)jHrTUUV|LQ;p`=(D zWd+=?Y5!hpt8cPic(=8=gLGtJQk4|jw3ALkp5V}cS0~RPDi%iAx@jSNz+3vLx!cR` zg$xHL;0sbqNcJUg8ZdkS=IsyN!NityV`8?ijCH>lU)w%{K-YHi;`!$6ku2D zO%^pI3X94te@F=qxg6BKA_ubj@Vcs?q;_k`MJ8uq&Wss;>qGtL!5iME1|PVsfHTQ9 z>0v?80y5FxqjT$3edLY5*UymcOq1Xo_u(Ub!}-&f!J1*zi9zq>^T--@8$zOvcH{{< zb5%_(vNVfgty))cEV&FpzOo+0S-)8%nB%PRh&bt$3L-h}n>QY4XX;Ig0*8tMgTS^& zWy8P$fE~OZ0Jvq(NH^tp?e~M9{fmKlB>QUc@b}_vCXD9QVC44TtIq464%ZC_e>m{! z->@d>JFT)hnke*q+9fGqdw@i#mlOq#NeZNZ?PGG-Q=LAVDX{SmU%-fFL8lXn1d&pp zTztt;t{a!y_MTv|&%n69{JWcp5KP#oP9wKAph#nD8`08T6z2l)!hlTxU_~*eE32!l zYGgHrYL~5Cywr*)NTo>2yqqkn9XY~2@{338SyGY2P+(4)J;k0}yw3`H*SHd|=Z6*D zsec@b=1V3369n8+5Gbd3lI?D)b7Q>)P`eetrLC2yK7b!Z0mcCZ+grP=g{_$aoM)dq z-O4JfZ297qcJcc!A+L2YMm!2VN=^VOD+eL(`LC0d0a-*)XVK0OhILI<5zhv2;`{Bp zn(d1}xW^_E(UX^(u!=;BWmOEf&c39zlX|4Njz}O>qR+i&1b{#nU`O0a%ZKq8qKW_s zJPIB9%2pG|r*OlBBGWMAz<2;1fuW$a=aWpdu@$?L@sM#ZFbIH|y9)$TWVer&fKGvZ z9<08pe%2Q;^q4oQF8jZzEiW#$QYgQ&Urr8Z>jG#f^wq|#;Nvfa4{fJWQ;dwKSn#q%hr zcdJzoqnH&#(ko{W1+t9j z-bZMI0MLGbFBsN=_bdP`<#`qWf5Mg5yzcpf9~M4(AD7F>09d0P5kTrB=?CI<4OX}C zMI1%50No>&M>>Ldd9J089R`5bLy=WQt`p5JkRQO`_YQC$?Z!|MRM{LVb{R&V<2&j*_)!}e_eMO z*GVSHK?3lc#Y*GRx6 z)bQ{jI8RF`AW9^-(kPMUM7$l=T#VGl(nTMG;ocuz>axOhH1&pgrj$&+o=fLtUC^2?St}h_fXWDyv{3{Z1gUjyaBb47WQFFB-n+wy@BbX!);co?UUL~Y)gK*lF7qU4f)#;;|0xkPUb-^+}2!aXwl%k{vV{HjX_r?I1 zv5nHiKcz|N>uj>-T{{3yy4YSW#aqTNV6dW0rBq=2rlULwnCb>hQQp&qk3B;~anweP z9&O*Av&f#h>6?D-?3rUI)VRT>jpV^7TLO{YxaO4>U<9LV&<5G!QuGVPeZbZ%xz81o zINL6JiB9Yy9f^tu#YjQYO;qge9eb>OXRDQXU8> zkiz6q>GVNLV;_->ib%{R$8c^=nEc_~f&o^$n%cYVhyVF1zlz$#f)4V9QuHmW++N(e zgT5CLIhtV~I}4g&bcjxQB{cv?+0alRGk1Z0-WP$&+*3{i6$JF+o7^=PArEP0K9Q<` zjN}SeB#pLYONo_1lm-f_=s`go3GylHCW^4mAwns}cnL(&w(&51?q}fm8M#wwC(2qM z;EBd2h~^ee>2I7=C;3SXJ))T@wuFrXsM*L(5_ahq{<;Xqzx7!7>w@PB-X z->5JjL=R=zl+!LH&wCY7mE{iBh}SfB&5e8P$@~6;h^jh3pKiePUeYr}9FP>aB*0JO zCS%8W-klMg8{kjH*NcB0+~(Z(|MP{{we|kr{T#MXHeeh>Tqj3G7+`Dr&P^0;Ewrs0 zR@xPpUyf1FU;}FA5aFtP#hJ`qe&E}(VM_P}UeKHa#FAr4O26|CjZ23|JzWvs&${&E zuY3OBhlPKd3va^b7;EZF%f^k?y}1$4tBt(j6p$S<(sOuclS(6Qq+ysjkum9nCSC5t z3L|@z8(IBSj&S{pLR-&GERdahOCDqrahHt&n8m9A?zxXrJQUvlhi`omeh%aG!U08! zq*>=HV(!QtSDtew&I}o-v>BS^;w)3D896xwUI^5XQ76DS#9r`R>#(Z4W-Bhvv{h@H ztfY3hC9iWIsU+AT0{v8=HO{zo z5~aQPnfs})umz_LMPEHCT7V~XVWJ5b2la(UfNb!(=9kuo0MzowLnE!Pay~1q5MV`d znSJ#qZwzcdbN|0PeQBN=&r|fG`6EYW0+1tc39Vzr52}6BgRwMlsw_}ZNqHnsA^kE5#QsEn@sd}5i)NHs zW$jq#kxe9blhqXWTOoOUC1lh#(Lm#*`J(pfdA-RZTf3{&MhznpKeN{gk`4B!r(U%C z9$RV8Qo#909xI-`gU3m?(fkT_llk0*PC5jLh)+U!<89=GaenTN{y4}zfTOJWL-5-- zy6#v#*S2E;U6&&!;8oE)DZp2ZD_#-*@t;T-0ATEjL(cZcO?hEf1|_|TD-Us7ZV1}zOBJR2}Li*!c?C!Td!NPp)KE@n>C zCzw6Z!RQ%lfMmI+8xye2d!-PQb571^cQ6GLSYm?D0poc8ljy0P=(#wGL+lhu>mOyi z*ATTHNm>in=I3WyQDK?)MMdEY3)w-*F2NkuN*LI|eS!U&&zXSTNo10Med(>|d{4Zd zIt~W~Ha`4K0s^{RUIc))_*!F&bAz!L7~MfyssNzYxM5=`x1Bg1r;T0n0qJa9eH zBPtjG1e$g^Tgbd<6FEH(mi;V(l|;z)Ni~2dht6V#3bM0i2iBu$~Uz&>8hCHbwMS)|M0x4ko zm>v35$B$MDH0|0>(@~aK=w(a_peA7kX&K0;TEEMxhK(Z{1Vcjxr!)Yr+rM>nt43U|rjpfRZ1v)5_Q ztz0=9H>%P;dg*j4udJ|Pm4)0R-AczRYVw_bJaW_5No!F6cta$8XFEnhrxg?xJ0Dk6 zk02NGW(NT;)TF zo+8gScT?Wz2%v6jOT8_g{{RLpfsD*GrQQ)RDMLkuO$5+M3csTIl|n{fnv`@!>uMK( zuF>4mJj?=M`NqFJ`MS2w`RxsE6mM9$o+urGSD7|u^av}js3AI>$Xgk+fYpk|)%+rW zm-)i86veBkX93)_UuiRB=*l+GA#N9_E{6yoyq_VqEgygBM}iCf?C(=^Z{v*S6_;yX z;mp}P_YuLR+@HvAo~Jz89RRJp7;eR9&SZ{c0{rGO@01@u2m0U?qQ6l|yPnkr-DlN(4RXcH5UsubDeF*WwJAN-#`YvJMj`2Gvd zg2plbS3uFch>9da3l+d~1C#dZdl^{z?4T*~vOKHHiZhcqL<$JGE4AtoL#(pAlxRd! zDRq)s2Aa!`RoaW~yX)-f=U>GUMp04vE~knd6^hChpdTXAS!qh-#L%t)?P|#)x=*P( zy4a$x-7NyQN7j_tKmGen$df+43k+cKmq$wS>IcF~d>p)ImS7R*QmpsSk2(MKQyfJB zpBD1Hz{(Z#{{ZL|M2E}iqiyiYKQH->7=^FI+e#&)DCTrqwq&Kvo;kti=IG%ga7GHq z1x$oBLjt_SL&$6aHoC9+BcR`Rs_;C`7Zn`Te1T!r6+7qif8F-_T`Qk*kgTx0)>?2% zg)&NPb$y1-993j5&%NH)4b9C6?GPwEZt985={)+xI>2rtk9r5#?q=?92K0e5g@Yn=~q(0t`oi@A_d5Ye$4+ zhSp7uS$~qK>W2OH#G?<{lI^)RYBW=Bu3 zrnWYV0p6TC6Odbv5ns_YIly?LJJr=+ff@qfp^(!=@={KV*0v7&_6?8NbIaCvKabR( z%}W%yuQeJvc)QhF%?SQ+LNZwZV}JHg8$0QL2DV?@-YM@7glh`m{%@+(Z<^;Ml>^6} z93nQ3dRHV_dwf>`-$}2G5sli?*k+SXzQh{$?nKXOv$+pFW8sdS>{zL>)yrPCE*uYg z2#}32ZUxNG3nmZ4$)uu^MOH?HcL_Uwv`ZkrFxS_ci68ppe>dh3zWe?6E`)!BK1n~; z;j*9p1?$0pEMN}Gp{PSq++r^Q;5sDvB+qhknyoCi*_w98t#)*dl@!lHXRSlNY_vVk zw^)4#x=$Y5i4NM+8)BE_XrIG5tOI!zVa+27SWX$Gw^N9>c9O|qtW-y;!*Rhs-s8_p z{WT*GRTYp8D_fcos(SqSgdCl=W=cL%KQ*KAZLNLkn!LU#i_0YHz^n z4qWT`+PSJ%+9_weoV?P96!hb-tm9fU-RaF-&>j3P(wnuod;f#}NM~`oj{byLV0>=z zJRMrsywC8uzjAJn2BMK@qeJbGe$i6D(@ssLB5|6e{-h{ytWqEaY#*y5pX&EfOMyL` z)&^RF3<)jmC{n|KZeKmYU$0mj{3E#w?*wzQW`k(e=%5xv}F)&c+2x$Z`I@#KnpIc^QYKbiLK7{NuKb;3=t?#655>X0` zq>Lq%gB4@zvzHGC`~V=NT@c7=A;42AlK?tJzsXQhe>_}5eN=L68O$h?0>uU5k%*Uf zz$4IH0QXp;DzCqNnbSysO&*c1mhoJnf~2vqt76&I}>H^!z+nrYFx?Vj?&fn1C@6>Lkydx&nT0_Y(X z2je{B(r+B#&o}$#-k*I69+L8`p#*x%E1}TZNf?kh+>;o8K-o@GPwd^X)mAKdn%wjG z7}k|UiUBmbGOh?-MMc5`@J3vzKA3|l`X3Y(8 zwkp5;v)YsKV2EyEd8Tu8oRrJmqf9wkhll}aUse38RV0IgED2z*!ctLK3hNK89V z8CMNmXsc+349ZmBad4gbv9As`?TEZy`;ki|UYrIw?EJ_keL^-7vg5`Kv*EQBHiQCt z`T0Zt(y??s>L-PbbmX-JVZX`I_AjgqPyvn$;F}?QAJ5D!>UzK5tSkc`6X-)6UlC zb55OQKl;V*pc;=+qiK-V0m(S+6mVIl?_y03CMA`>9log?eJW>Pf2>)ceEj@g6M;jr z)nDPA;_?x^<$iqE4?wNxV4pNx1w_r22vFLj+|c{`%556CzyH?9s`>X@&Da^p;D|NU z?P2{Wvu7x-+CbFtaa|kfhm2*`K)`4@EyvF|n`^|oI49||oEJ(h)Jgq?MGGEK>U5$d z^H^7KyaFs~&2d8-*&kQmxlfF_lGLsQ)`W<7o;k@^o)6e%yyh>?oO~iOzi|D%Z;3=J z?z-~b)j?g@! zA;raGW?y(vx1aI>*8hQH&HVwWIr#ZldyUt>3~3=I&Jlrs%pgbrzrlMVwfI!)YaeUV zPUYjK0CZ<#pSA68_Ea4Cg?V<en9XKR12n0@p19Fi#UJI3)Kq?=)W|GqvVvy>xvW`5mPG5p#(K zX6v`L+hg;#+tl$R?5tV&Hnx;p|NB?jij~baiBwh**6_~egl%0<0dgFIhGWRo#uc9-oRh!Kbj4`t}_SpJwFXz;c?B21E2MVpQaauW;^S$5Atp5NKqg~fn${dDPa3p9r;whk6H@sqWGgL%~F6|0SJ_uAk{$9 zI$|vtqeOWr3K~T$Cxr4V&{Te5hyZP>og z_L1wbySvw}zHDS*_(M-9#6Kx+JQ{5gxB_59niqg7(UPJY_7nuC*b(#-%|j@#kXfo4 zd3eo646h-lH<4g~9T{a6C}`=nY0GX~zH*DL-VBg00EGLh0|G_A|CM)>l7OwLJYNc5 z50RNwM0}1q;lt|SJN~%w`&T)5sBN23##_TGutsAm`iKRjz{kn$D~E&fmq*Bln~_s& z#rZ@(QGjmhW#2q#I?egnHQYAw&CZUW+O+A135>yJdY4?Lsp#a}Y4j41piutKv`U_~Hn8E59Pj4cTVZ9Dhq!~j+1S{Yb@ktzD< zcN+URGDNZvszTZfi;|DLAE%7A9CvgkY~5C(_BJ=$-o_qJT_VS0Q**NwkY78hsBU(G z=uiqAIo4$Kbwa~4PMB`r{qZkcE=lspgx2#~*4$;=8rXS~6}PI=K&EK^boIt;XH(Lq z48v*QZI+DVSIF3=XqEG^+Aq+LmOE&pK2%gFf}1%>L}GVOgfvMwAd?8>9^#TD{nEk;SDHxhyUyCUp)GlkB)~ViAESd`UdODh$iu zSY>|8&=)Yy3**Hai#+g@R?sGJZWh~DfA-*8qR-0vzXX^iAdPVsKS~(UmwukFT-TXZ zCs|(QGU6EnoDaEne`l#agynUbx3f3bJj=c^|ehC!ED) z{7(1fpVqH1&OYs25O<&vIFK#J9Sr1rA46tl<`nqa9P4ejSp6FM9Xwm}VKeC@KJ|~c za=#;YY+L@gw?XM{v_n9ym?%0yX zp093kXs&BXkeu{%B=C>c0<9O?nGw7fh!_0Zwh2A6E^ZsP?zfFyC01IPZ}m-kY*bc< zeg47%+p|ATdLJYGPlmN4>T?P!t)O-o58z4EMA)~rp0rM?nSj2F<2(ad3~|tJ0l0BA zGVMv(l4YOyFCuGG$6=ws>OTN%lX6gsX#mt}Y9elob;Yx8hAip*ZWv7Vp>ihWNW?)Ht=D*Y+|m|4HLvGh)VuCL{3cJkFX z4?N@0ylhyuWPtxQ2a)?)?=&Bn5YiX4-UR^(ocD8jZqOH97tm{8g2zkz=+wVb8_bs!t}(R!q)csKz>qp3 zg@+dzo>Bs3M8w&8Dx*EFDvfM;*LGr{0Tllst^f&14uu4m3Hb2`x{=yN#BVlR`xS9k zLh+T_GQgdZ39f)SScL+p6aXrc*F{d}64C{zFpq=OJR_g2;{5<|Qlu2=b=*k@U%;+) z<3qG1Y#X=l_BN?F91V3P)QgSLQ-L5coDY~p7he6L7)>2STheSipQcKij`9H99- z`OR%V`J4@@z^MUUB8u{pbHR-WIY0CUg%=}E3N1v#c;ph8%nKF5FqP!QKJ%&{9W;sV z`ROOzAXOx#oIV1&a@g6R6b5oI#E8V~Xx@iW0~iT!C|7b2(JZB#HZfMH_TCopunZ1a zKiVEF2Z-j6?%_Z;JmqOFaE^TS=Z_t9TaUyK9=!cmz9B)zN;X^c)fdT^j=g(r{Ugsv z78Y3m06+jqL_t&%c}Y4E0EW{)@*WE@Z?c$Ym^I8trRia$2yIP70kxry22jAZzz%`3 zN;A_!%7&nGTQ86&v!tJ;Dwc`kJSF=g3Go(qeIB5UF$s`3G}ISS!we(#la5#|aao zFWu}&k#nQEvf8pJR7g^M+qd?4i;?1@pKayh?OvQGzq|WIcRV(>w857Yf&}QPs;+jR zTd7Kv?kK{2^2p^s?YNQl)$d&Ia;vAile8Hv{+smnrN|LEC*fe)FL{bN$Vkdvryg+l zi-6hSb627Qamt(noCyxj56I+-67so|lBNv@p!m3x*DWusw=z<-j6Z$`eoXC7Q7-j9 zivZfse&44D?(w&KdG)zD&@pg`^#QV`1ImZtJIU7-FA7vvT99DAtDlPC9y{p--_-== z;Nd96jQ88>rHgP5kl$9<@>_i;-~sp_?$CRtzVnLYfW!du3O-Nx?gjzezL9%NIq&Ek24POFzFg5eb8l%9BB!5oj6FS%Le}Q z!pndF0t~bxtGj8ZwQqaCMxXWts~mN_H8$-hDwcc06Am=;I|qOV=7q1(^jD5wKQv8n zTB)F&_#sI@TedAgR(J6QTnA0mzEeK>&%yOa=Mb$#?pznYWd+>wy)WxU+qpWQf1H20}yGU)q zt(ke~2k>DC-d9dIRt#IV`0%W!_y6z$*0>CJXh|1U|9no+4$Xly$xE&I=-+`pfMnnr z%?kaKyZ8C|>uuYXCTp#4v^Ij!8@kCuk8T*niBE|@JGItqTz00-oKa#m$Bm|rg^Fp4 ze)@gzoEXK<_tmvi@I7H|E#1~w-vKS8U%^Y`~^3{uD8pumbbUqPn#FsRaX zG%$>=4>0SikCHx!dEtHZQjg~5v*y6yAz3>JK3LLW( zNCDf&?9it=eza0x%gPn5)Jq{k+eVqDU>NBKl-GINj%7qn<+7u*+bT-NSa(AsmjEVl zjld;=P1;86#;&~HQn=8Ll$t||k&Ip)T*+s7<}ucZK)dk6pFcpZ##e5!$LH^`NtIEW zAmyH|lo&4P*xW~{MnX2_;?G)dGl1BuPZEJlBr97>Bgq~NNuH-v15*6BqaK$?lBS@o z!7?y5rK?Fxi*&cqZh@J!0i!9MZLTsDvdFU-B9E^WNmn3cphE+tG!PEOTk9gCF&%>{ z1LLQPlnG@R>;53WHSOho{VK?UkyKTlPkUs5qEyN_=X{9hAsOf)42*&?r}5pv9qrqm zB6no3ZQem1ZRnFt0l+l+4biy~bSBGqpqL5{8BmVtvqvc~ChA0ELxY`j<&jOraB#gm zbXPz4>#yR7(fEcuQpLwgIy*)}5V--*0KjNBK{_dE`T-L~w{*tHbK3&wTz>6;9CU;3 z{>jzcMVOudBF&>9SEgQNq==W5UPO^0QF0l}xE*xhDBVxM`|t{LOWXaqkogj{S!rg1 zNEU_@;Jl@=+3NT1=C%?0@!X9E-PR-VgNN@R$FB@C<}*{D)HuitfVp%xqC|0h7o=7J zrh{%cspy;Lp+^%zH}Sjn#VTzD+d**B$Pp2;`hCprM4PQ%1t?lnXy;vU0r$hm2l$_I z?)xmU?I*QGp(+`OGo>46 zlcJR~*$OThk&#W|S)7LgTNITYL=FW2U^cs03Y7W@5L0Pk1d4i;KlGEV6zH5mLB42D zlSP{;=C}Wq03U&ecJ1%5t#zID>#GSikfcm%C$z z7F%;$Cyvn^2h|1YtG;hvYpZ3jTcV!`;YeS^YDlBg-qvm>U;3E?^!!`BS@z_u>R`Zw z^ilIvPO&s{es5a(v?e4g33Qb+3*J=ZTaI#I6P4UW8W=MEuofuqKC&9QqulTev#&*- z;srS%sjXs^%m@i8p2r#H30ZHQ&H}ZS>LhSr#(n&BO;=aUest%8w?rBhGxNEqcF=a7 zPCH|a3+vU$GcB`dj29;EWzCIIh&aLe9TYtyLK&XcoRxD|bt{x}Ut`_ z%h)N9Px`MeY8~X0YA7Qs4}ypSIQ=Y1XlClO@}(-0Q00QB-aX`?p#)fMEuC<8y|T*sOCd zJLK(}@}U6aey0Q6@#Yoq6>B5&M7u!b5KKbz_I9O(QvibGBD+>H~d|Bw)SQi}c1g(V~OvpQlX_zti?K0r9l`&U+SH-TG}- zMTB`bj+F>&!QRf0?QZY0ObW^FjilMc@+`al>!-2{1D%2!YfW(=RP&1ac>oK)lYSn@ zK^DPr6bJBc?~K`=Jq^}e*J0oP!;-^5fz&NUfo1o61xI}QfDDx6*CE;KZ+O)?MouzP zd=Z@X@A=3H&M+?mp~<==y+Xh6^%wo23pv{e#AQX+JNXif>u>fl2Z}oiC0&8Rupa}_x(Kc!2?2Rgd z8)LSwuNUl9xiK;11MQmMVU+fD+q*t~?E#9_FTaJHfjewkH3$bvT2~_5ve+i7^c(^a zCZ7Eb^*17b*e1ope%bD4urG8!;X z{HLqe?DKnx7Wz+zfJ=d^0-tE8-#SkbmO7w=eoM>vPxRy=v`O?44T7zCxbC+`xWBhUFbQ`MQo+f|r+rjUe#ajV{PtUxuUzmU3Q7~)n%Z9U140yNg{F+6}IAL=LJRuBU(HK5WzSoXReaaDF3{VE8{KmQs87-dXQp< zoAX=!%$ABV&7!BT>l`{?m-fWL~XY z{G3gnIh|bUqZtbs{!A}qR*Xof5huRe=Ug^(ZQru{aHRFw+~?NYw}1TWBTAoV=KR*5 zSIBnpLI6Dn>ESqm#J+VH-pBxGjv^1!iD(b2s3S5;4wg&+nhnx3yjT16pEn}S{=|;;jr-E<>+ch9I)hp(5!-Rn zq)a&eT&@h+_D$;%-P`E1vDF^{p0-Cd7aUk&T>%V@kV2*a@KX`LN;{)S%)-J_n|9_$ zUfZ0%`u$UP-#}_AWB?$N007Omuotk*0EngLLeMqK%pYcBrl0V-=PY~TCe}RWuP%~& zWhIbMEoYa(qUZj^y(ApOnL3Ds&tx8Rsq%Zqn7I8!L&jMX$wh1Tx0oLVb6?qd2(Dk13B*WCfHlJ@I?jj`4!Jxy*}Df2MXCZ2QQVQb1Dz^wmE4r}CMFF@|X$NpfeH?>$tgd(Ul6KvkH6@bTe8uf zr^xtsK7A5J&RM~@mw0k;9XS2(en0T6)Juv2DGH=0aBNZ_1#BOi-OAIc zNYyxCx1zga7`P8ES^_YaZ#hIW_fpI(D=XX18z!(L0BrjDa{+ClHvP2AZPRkL*~dCb z36h6_(r5Jz_2dLB<#_;<14@{0;}if4$R+JB4p}u!F$D?4oLvH#fBb zIh0>928gE5GAx3KD4~c+3{|Bp5hxc1d=n7S+P04yh3%ssy@o5L{0VB}Vf_EI9mpUk7NFL;hxe&v_}2!6g7ao?n|>!ziD@7Q;Y^bx<2nK;XD?BKD_u zQZzCT@Q*f=HbtO`^jvP`fT)b4Tq4!`BCXa*3IjP5l+Hl?IQ^aZv%78V?z_ zA;v^tP!PQZS5V|sppT4BH*PzW z-92RYn)|yOh~lKkq|%T;3%cRJtNIt@6<6y8R6#X?rlOM^XUv5(qU)3zMzm2Oy{w#4 z-vP06^-?RVF1E>&jt3CLxd0FtW?nHnxQ~io_SyKeuHe0HN{~0(aI=^%d)B{X<4!ny zVAC5BZO5ubfgHpcqUboaiBpgzo*Zx`Z;7@251IG~*Ig?!DGcvSh)GVui3jgqLhGs9 zWUXC$tg(v_WsYIToyy;kwKeUt9b1+IR2N!hsT?zI~?*{Goi{ za2!hMN2#i`}F%z2IqLke6{ItAs{*3O^+q>y}Ph0@L;txc>kzJ4YEkOAZn z`rb~Z>Uep<5*s(M+C~qpM!vAlX+PhX}Q0(7Cfvz>Gu@Oeg_ z^RN@0^E!O(;=Qkh1E1w66gbxfc$`ZSUg;;1eairocROw6$R2ya*ZlWK>d(@757KP$ zNiYxPAdS#}$*q0+_S^8XVZJVe0Z1obbmRQq>*jtLR>3*zLU)jdShtW% zasX=Ahh(efgCwr5OVB1-f zVw*#>_E%9~Iue7g-Jx=5YVDLvW`y{sRZ@SsKKO~h__zE<>6&(~+G$;F>_X(HMP<}* zAh@HnsFc7B>=)#=0ynbIIYQ{GN>31-7qSFT2{6EE%veRHY1ibWqWf zGK3=R3KvRH5DgL-$I{329X~|DW~(EBc3fMS0kyj+ruD~pFJM#&zOq zI-_K%&`CB`+=os%5nzgfUAs35ufez_G8^D3LgZLpel=|Yl=gNKq1dn<5TV~nYsr62 zXY^kb*?hcgR!7E$PEe*zU9>magLk+R%?>jJPrTj|v=8TFo40{V23 zd$zr!**^3AyY$WLj_2my?e|iDgUCjR>lP5b8KlF|mr0xZ(35uCopS)Q zxPT{Kaq;_OriZKdSIO%e4QR>gZQDFP>hb#vmG6O@wqch>#!N^*@%3u`V-22<_ z0T9727?S}|#Ayex0k0V+@sU2e@uE~XM2tFEsq`f9e}p-bnVIheNkhCUWr@IVIb{+U zz4g2I*@Q9GRzOsw<|C}{k&$PA?#6@b&=I=&p*w%WoX~v0m=ee*A;S^K5&?ija$|fe zYEF(LFEYs-35{oX_5Ip!bfA_xlYtm|PYo09!0?p>Y{g6Ej<{N@R-DmM# zo2-xX%8u0CO`U<-0-8cZWIM%KOJ-UJX(^OTUh==JB!@9qY5=bHwMOSD;zoXT^+c&@ z1n{=C)Ui9J+)lXQ>esa9uYbFgXj`po+2qO=AnDPN0cGJ&IWPm>jHH#|Og-;SKj@b4 zvU`Wlq%%GVqOs-p6o@2HH;mklLkEqS@4(xBg)YS46V(x51E-62{qU`0QRwTCgwy>D`5*}HuNc3!WX7CT7TXRw8 zv)LJQfWH}kt%u?z$v_9x^bkM937R=+*3;QxxdlbMOQ$gO9_w#h%1_WapzFx9zBF+C zk$HLfnfqy%#wgGUJXMAqNzg%S1HNY4wQZ*r<`vro|LZeHrctkX%H$7A1_sC4djUz0 z(j!JH67iv;r^7gqwKj+|B-a)_InTCi+=*U?!-O3=JJznY&W2`J-Q)z;I(qhX|7ab^ z*e;wYa4tO53a|E9El!}D?)ihYldGNmdK7f+)@b@XVo<5pf>V-mroU)(-7rMOY=cPOc6;PCKZyLrf|B+7q1?%Vc*b z{BibYZVTFx`kSIaiUKJL9HSIS0o%vuz^A%>^ip8U>NRexIWX-Y9mzt=Uq3P_M*uHuG@I_~RkxBH)+ha#_hg5*l1Lb#N< zvxXAI%4;`fvLpIIqn1)a@j)rr-8$TV=yXPW1K&m^ShZ#{sML zTLA+Jo+o8c?NWRD0UMM$g|Gdyx+nufk>+tJl_-jWlssx%`DFPNM2@38Vhqsxfxd^x zL)dDo8Vk@7BNW6VLX>nUn7KS(%4e2+a6*se6e))zKoo85=r6P0^eU%=Dv}o#C<6TOsk02^Mlom;u1^Cukoyq{|3JN;}x;0WAM%gUb`9_ zaU$khPX|DGVWnf|LEQsnThk8wG*R2Obrgo`0VM=?sYe=a7Vy zSQdRrE&{FlmHvY!jX(K4{@t7T^Pk_oh_#JcpUg|x&|+E-G|$}=3worrMce)5e1Qgv zpiICM+T{_`criD+dU|}DxJQu6c}5%54#)*RlL@$&y-V$NHf7Qn_@7!0N{Ph1AQ|_| zwezn1<(p~h|9H*jhrYohc_6gt?FHb-E4Q+VSCU?YNKu@kNt`kXUvmU7is#@l`lnq+ zQJit&IsH?y*X~${?Q7a*V@FLR#e*Cix~`iX+r>oevkvkYjj{4(i=tudm-{ z#W-}7o=Uu?>!hJdkCsm11*(w)(#Tx}*1UpRQPQjA-`+xGj}8G;!#>YJ=>#m znZNp44o9!u!Ti&%BCQ>2*h!aOb0paGn0&@_zxy=l6gv*|UG~-BZYYhly$=b zJmJAG5{o`+13zV)BCWmbTp$G_?nYCx$SSA{x+T|5yEn@T~Z2fuK9l>MSuo}0MzWm#PhoxRp6i87ZMS)|J0x4ko*c|s% zua9C1Yri8GxK80q7YSO z?FiCu6u5yr|JetudFu+Rt{uf)QROL;5++3iF@HQ)0JPLVLCt^+fCkFxt1aQ6$(&c) z`9{y6?P-A6!DdwTOV>-;N6D0t%I_3KsNBB-q!YXi^&Zuw!e1rD#SU%^A31X%)r&VLcrNX7YKiVL z{Dsnq{@O4rW17~wq38V&n85S-qZb({AOjeA&;+1gK&m1ru~+yV(4l}9hU?VxPExB= z$3alwmHCfD<_LwWvYkh!ka{7R?6Urj9oCmV0gNTxy4EhXN1yyTX*UEU$na!7FgJwL zh|iUV2WSh>i^xv+PTP)|4L+4*zQ};dAh$HuiEZ7~U>n!2vn$?nI$NmoF+4H>!r0;u z!v!fDB1A0Xlo4|P{+;pJ|2T-=9Jx!Cilm1;j@is%ZK)P-X9$#sCPFP7*rf|#bhdrl zZri?krCoU8=~iA4fpKad1+(uPTvGV*{Px-_*fHGlQzv&g5I*|f?M8N8BFi?FLkO+SX3=|M9c^%cO zfDhjL4iD}vz)(!q+tEtoEBpn3o5A^hWblrKw;%(N3&@$ySgu|CxxeJ2#yu3pBjt@` zry_bKQ%g6eK53x>!*adVR^DN8d?%~}(WcC9t%ZD_F7T7S zhRL%nuv;>`KZ$I>2@#_3paZxXGsa}iD@3N^1U~I7_@bl|`cs@x^9R`>2d+SnBR7f+gpw#)6&Osxh_5;@RzNlcTM}EV@qMWa7JV>CN zoHOZ?e>~S(*I9bcPM(xzBhI>(6G!fN`Gu#|G@oyxS5aO8&IJLE`~z5WfKV(A9Zx*- zq9fO^|HqRie}pj|IF1&Od)>>A!P}BJ%qiA6MQ$gPVY}(xr|qdHme~8pW!j!48=yDy z4SrExZBO~b*fhas>j{GJ-4Am9lILE97hyQgDHWKC^}@^WTbO4ku%VRQZCM#O+>lWT z_;vLF#JE2xO}FQ}ZJLu(=jqb>5`^a;mBZjpR*S{e@R!$a58@)mkFH)-*fk zy;lu9A@y>oDe$L%zsh30N*kj!6~`w!j-33y6GSo>K9CLO!7e=9bFBNQNRwyewSJ>h zz`oYDx6$@*-fj6gWfq5r8FWi@qZ{P1g9OiKh#E6h!>E-|&F(5hYmA+3G59~hS}gw1 z*ym>hh$GjPXI`lsRpeU^4y}pPzxc0*y8Eg7r6`c1K#BrK6$Mhj_E9xlsm7)#K=H3- zue!pmh;6hgSF+HXP^F~eqF4!3mD0`oASzoXtiI59{!Y3UA2Hlrx6+lU)>7D&2O981 z5hkAA1H(Sr*=e^vc)tV1F%;>XynNp(I=ez!M7aw}o$`6-4;f+QwPS2c{TkcR^s-H< zKF2mKddZ3l^N1p(z?_U99xnr1fS)VoXm0|56&d+bbB*O?U?5QJDuYN`74_=l`V9J` z_9;42IZtCm#mUeXyr|r!VLG8pv{f@op{NjGkcx+80yM>mnilwQ(fEkf6cZ6d34Ba5k^Rlop{1UmT-)n9*gb}(7}p0 zZNL}{OZimf1d)*1mh1VLWuRf?vHu)e24FQNnj13OW~wwTag4nX#S#T~C+L@eBaNw2 zIVAi0?DX?ar@g7;U?_0SC;kqa1BSsHN~iI@_nv1Pw`{ZHCmv_V4INJE9BAIvVDq23 z(ZMD~DXLhUKpExO))r40qcSf09i72o*Y|8$!+c^dNwusS=9zEg}%G6=r*-4{5xWXvTJY_C=bD|FFLgmmVBKwIT4 z&h*^GGRiUXiD>Qb+0HYOUYQgFB~nd=@VY55cG1<}Ibwa+gPQhiCsK;`sGZ0cwOas; zZ#71q2IH4zojpxRjlg-8LE*$$ENPv+&DIy|v4Z?cA`gc;eH_$w*%biW`WyfjNCChN zf!#rR3*HOt!H{*C0-p#x;bN{+&gXv1S9%9{v!`X7#Tqx*)~%cE!fSr|7xZb~y_0dF zZH(dituI)8$96ky;`_L_Zk}epy!UpOjeGak*%cIR%fy>D?fkQ?r>%}YvAd?bkR3Yk z0P?DgT(F)$l=luu!3QV4`^^I=3l}c3<40&W3r@SDan2W-kAdTrS9xOnEL*Tii}BiH z!K1fBlYSdJpk*sq}{8W-hHSK(thQ}4U$ahoxz+Dz~Za;0nR7_Z?uIHqC2il_DK;yO!}?(#9O&;*o02 z(R@t)=xadkyBVCf>9KM76R;Rjq&O}$kD>%GeBoQavc^`N$>=o0JL{~1;(Mx9pfpc$ zoc5?Utiws$l1+g{)=fDRS{d3AAl43c`v^c!vz=Sf zhY!QCbHf_$>P$_$cqnbrRB}C;H#(ye`pVyPZC_s-oKHOu=>j^-&-JSB73|Q{)9K)U zIt6$&f5N1a2%@?*ZwCNubXLt5UoUx|K$i^)zp`dBf3;?4{>l-lxu-^4_@U3MHL2q; zP~eX@e9#Nn`xrCcUSET?G#x$B^&sg#tj*f_BVA$80RkfovJHHoVyCUvw0)C}DWUjs zJcok9A=|Tdm$i3=tV8Ptg~fC7OISy=I2pTUvPn-CMyJ4bVhstSYshKs;5r{QN>RCJad_+8Fx5 zKY>xGUcvupUz}Hj3>uP0g2-^!Cx4N$K5(txqL~XvK`oPxj6opsOG$n4m7&(GN{5E_S0%QmaTmfh@wvH=*v7#8`JV+7G%EKCyvadS= zkY`{t5M##mz->HN(UBjR+Gj-+LnH3T``eq$w3?CcxAl8=*|1?lJ>o&_?`hj>V`fcv zC9=D@fnrDXwtCGP8+Y7UkOSfYUW=xRST~}QP-C&hXd8WUB8r^;bN~dv58>JxAF>ns zILP=9K!O0w7YN}%KX>)*-t-|vUm{6Gvxe>L^H1ZBZ_i`N6MyiA4f3D*twV~pct<=I z#Ys_6ScKC6An3pW5jgZafn3@_-~+`u8F1n`84L_vaN3PG(Kz_t?<{`CNOp$i_kGQq zEYeeFr(FFT`|M}G!8ZCF>*$GDMQN_Jw{_SFGsoIv&#d5?>@*l%VdqVsVXId?h)+ zn>D6U0NNh%yeon`oqGvvOkmN!_v>%Ze3)}@`nmIQUynfkq%B{##A++D?Sgk-hOtFw zF}OVKj#4#fw*+G#xJBth5=?Lhuo-7X_bLsKBH$qoDGAt43w`w;1P7?l_FD3zx+5sf zrd>#m`nSQ+w0F1VvISoJ00b>*!u$oSL8icc4n!!OLk`hZUc5`uiGaPmM7=8YNT1gd zD7TV|VNSmwjS2iT07S&$sQ`!wfNn$>4|49|mAL@lc{G@eV+>u*r$L^NdhTL&kHBw| z>f1K1wwb42@E25T*d-?cd@H#Mtrb-!{zItyJX2(soSOnHyBh0k<5Tz8^clxnHW9|n z9UE=(1z-M)d;Ya2=DqNuoiqkVDV@kF9D|$_J18w>hv? zJN{&7l4;HLI{~m>u_6HeZlaFsHtn`)GfpH=GHE0(`{3(-`~UiI)3?|K08K=D6+4dL z2=z2xtYhgoS=1(XmcqNV5T^2HUfJyM5rjXIgG902bqsMT(+OHi?Mf z0XcsA0pYZ|d5(X{e#t_{Lk{2w$;~hN(pyVKqKNHo_#w%dR!sZ2WfoS@Y}Q|jJ4Ykz zcwl@3^1?&#oXcabl^7NOiGS1|MdWw3xA98Ur@Xrw8|Hm(UXc}-R`FRWvE+baCeVHd z%^5ETw&7_9v~^zm?Xs9|Lf9B7hwxJQ^F)cXrbU*?9GQCM_xY4M{u&BQ63DHEk7M0k zlDco7WBrmR0`Qr6{_s;T@Nca(wzN9WYiTczcG@s%uP6t7{Qt6d9)NXKNBSSVqTZY3 z-ia_R#Yga>fB@U90#n+!lLen;QQ`{*h6%76|>|=lt$SrT} z0PL1Gcfh}z$GzPd4|~y5a>JFDSJ-0RjbT={F0k62o2+HuKC8vJ->|gPc5H38M&`1d zQR&vWH(?{EPJ+L~$Oui}gpfWb z2J(^jQreV0E*+BbMtw9c0~2z+WZ)2Ur`np)U|MJ z=~4+{T&wKs0;ME8`$WveUKRqkdrtaQQXB#WA{5O8@ZF_3oK4CyF<4RJ+j~iv$E2^A zZ}J$B(1(L3&qWwek}8DuM4|)W3vh^k67CAR2e+(@CwtCY}G;Uq9uC@bD;O{6Osbhm_s733l~kb z)T}X7AD|~y%d+vFS@Ser5-bVC<2HA~k$4iZ#Gm&pEGcjxR-m)Ooe1!5?jV^9WeUW0 z4F$S~C?oie5UXZTTj)Q}5QxWFy__Exz2$i+DNOGnFjs&;TuW0q$Lm2$eee^(S%8@Y zk%5gUVrW-s}^f{xaUwJG}@2 z+WZG7GW*LNw{Q&pqipH2dV;*@ov*2n05B%KTd->J2aN%6r zxSgfG#d($u$lch~4ByE^Gi>8W6j~z&i|OM_0NwKeVqySgJ8VR8u^n~k!Rz_-i?4jf zI+|+iq>~o{@~3-(MEAm`-$o?hL;o)Nb5vg1w1~hg@wIrs~}TVg{7D zUieHNO^yU;B!JH&7@yQ{SFhb5(jfd1eon*K4T<(P?yr)7y#2G_0j&ZZjc$xYw1E3;j$^a+O!@a#I zOM8tWs(|{7{(L-vzA@s;DqK@_)h?1IE_@BWYIj#aYrO3RiqJ3Aeqq2@^(~Eks=sfl z8|}07_8;1jb7xyabFGb@F$3@4cf6*|zjvUssg@U_t{O*p2BrFt*GIosvfo||0SfIS zk7ZF(9_aBd;(TvjWi1VrcEUMVb1cP*hnK$UG0#5yCs#DnsJmA8bYnbGvMnX`R6O!t zk~FT{yu-%i<=DlHA^+(1vriq*cmd%dxaJ|nOi#lLPP7q_AeI&Hx^b6w=%(I_?|9VC zKJ_RoEzV};kpPBF2Ngs=8LPx25dpc3+aA&g;34ke0(vML&rQu)%o}=d?yyteWE|8@ zi$9OSFk$J#=2+px1)?hBYbJDt`U04F294rv;^3d=cO4S1=^Q^C7^B__o8TUR0=D_B zIZ*SE40S<>9L))8H^~CDAm;~vYp(D)i!aK*;n%=lN;s{RRf<8bw(4%IF{@i@$6hh? zxdkdWF0+Qc8-0FNd~|^j0-=J93Tson`J1-jy}|Fd+2{02k>}VB6o7l<@&}ynZvM-& zcJE`$0c|B)1~89S<;lQX+TCnZGQ-Fpoh}y|D}Z*Uq+1MQOjus1^fk4P#w$<2On=nW zA_syVt+0?I0U^X95dnqdxPZ5?jPgIlwyb}~>G4*7ycXEzncCEuf^NjuV0;!+X^zmp zI4gJb9^EqT(qA-BbS7F@h171X&Gp{*hUJezeC1u}vyun3GDLkyGP8`v>T4p1Mt+x> zybr4r^E{4=Rutra>dOnyzfwn%+YnIT&)+%+V`{q--5pqmlS}HU{S|%2oc6ruKyG*@;YnL@?^e zvE)j~v*x;(HL&U-ljOq@+BlsAk{L3*Gd?r$vO+KgZ*Eq@W&+M43yW_~0DC}$zx&!< zLqLH8x5(#j{~+(pyVG9=`=aQn?&B*U2Ktl$>Zlx~6dFwZmyMo8O9q|E^*3l7h)09- z<=}gB9p#~5_+5iXneaKL`B1%)@BF4mzkHg*dY0xz{hNQ)Eiau*eo0awNr9oGKoZy< zI@%^nPEuelg=76-!&tumB&03JGzI z6mBJtlv3>;i74<=;iCeOxJDkO4(Nx|SzaHpEG+YVpnE>3mtxxq z@D(UCsFVpTQOFVB{JR7ld8k8flo-`r1u8@a4<$L9ymd(C<#q>o#z}h8CENx5K+f%y z(---%H}g^X+@D>@3j{OTfO-p*mv^0ed_XH2!xbjQ$`sHcfJVBbzYgJ5hD|^FHC0%4 z@1G>jaXo)xLVoGHZ(#bAg zWc4i#)=@`{#4x-=JQ>w7%sZvxadXK#=UFbmdtP3?J^jF=_HW;MZQSC4H%jjQ4h8=0 z%Buj8!uF|;U&Q#J82tRv=e2^PwYkYwtypePEn7|73D~C1-~|D4$ITsY?|T2|{!YpN zx>L>7d)x!pfjf9i3>+ve0>T(C;c|edpHocGz)?{JK|uH0<5S(-^Y> zfCX;*Y6yPItBekYMZ>VT zs~WVDg$??2HdkB0u+b#HY6sMkaWhx}(2rs$psNjj*tuhk^$|CG-WgXicZF>DsNt`A z*3ZqB1o0<5lfm$r?%-FXhd4^jYtH)xH1Bj|czoT|BqaZS9Ly)&hNo zWu@8Nqvtag$1PS@W8=mY2L@J;hf1i9x>mfvx(HblwM>ACnr(Y*>Z~c2o||f$>$B|c zzdZ!_-f5Y61y(wFw7p~UWScr}Jo6j?e>`Sg86hhjJL{jR;%ly5d*>%n&CHGUfriDO+4#hICHeZMo4amwYPXU1_>aYgd^$L@`#=M!RvMLkZ${kald`X zb`rGJ@eV0&P*olUH|(gsJYDtHxmhP(F{E`F>cXdA769CO&c@qi*njo5<^d90vr>#e zMvO0=yfSI#SnSuQzR@2~MtiPpJ@`*baP$*Gim)G6l=Rx7!Giz#sHmrutyjhwDF z(72~PjU-uFVdfOKKSDccjPZO&+ViKCsV$9WPcK<#4Q+AT+K_66qbAsniVYTTt+P)b zn`b3s(0v+u={IKGgoG-aJ`F_#L#xJU4ZU$mp;T9aCYkqJY{0&?QHu zV=yW$D*@n6x6{9}_$_|>1G~7D|MdYXsPmF^4M|`RjKIq56Jq(_Hz0T`e&3~^PFWjMtdyaemEh;3r zB`J`kzz|a)32YBBfs@52DezWPV8sm=`?7Au#vEMcBo7%)wo(WXivg(tjU6ata%PhD z1ZZ5K*q@r0b<8JffPyeew@=opD90s8OK8F^8-WUet2d1psPd?jM;i~J2zi-o+_H(H zq+AHpPzjun5aB`v_alhAVk{+F$6NsiNJsG3l1q7l3lQnSt5HJxhV~JX7E}VlB99k% zgz%^^BOx7Nb5A-1k2s*PfFUjW;tFD6m)J<+d(S)K2$VU&;w*ocwymrz==c9Lh_S7= z{KD{}hN8lMm+wl&fWP3$B2@uOblK4+( z1uH~2~?o`yXWJ_K6@>( zf_L%G+yd

    x1}zq&qXVLmBMdbJ?xG#9Db zuzt225*|=4F8ZtyyLg7zMG`0 zH|;lUIrEn4sNC&q^ZC+$0(&!aTD#a1p)Lvz#zEK^*L)#h)zQ6_%>GzJ%W=&VKQqU6 zaf7|w*BL;d8Zj@fKjN=3fT&n1n-I@RwwaX3Zn1rdWVlICf;GK53A9yC?Fp`?ShwmSR+tolWd}cQ^1@S9dF-)y>#DvQp{pm9%`d% zSchk<2X}fnY>FP(0 z=7Ds+4e5}0I9!LAjOR&6=XNNpwB+rcNCtPz%@WxeL>hK9?)Ed9`A{1Bd|=#g|3NxF zSSt&F?eaJzzzGWU12EImGRerh(ak;2)(uJ2+U7nZ9i-CI`f-ik&N1nu5=VFj%m5ixH|Fm}t|}b_8l^2CFMAiG zjloyke0WzudzNA`|IRFwGLJ1V9eQCH9ciuI40MVdeI3i&+u^%+wciKE1Nb#2QGgyAguZU_a2F6J(t%6P z@k@1wF}+@^=zo{Qa#w&+5B+a6`aGQXbo|c4X=lHb_V&o@;g=+<1qp8ih5n}n=sS2l zC-iXsXuHvIx85_&oSTl3Jp*)w0kgAUrs4FeW;~p9Jkz^J^@qq6l4Z%q_);s-;A zL)=eh-uW;#x7i+X`sYs%{U@B)H^%0#6m;vzh*gMTmZ8PTa-FyVoBL!0v1*hZb-qS- z`u12w3rG38fA!Mz{7$M&Ee8 z?X6P6-B_<&eQqt5hHp{~o9;Ja zP`g`yIIKK%;@4#REl*1UAxXvzlk4R9@3ls%9(h3$I65<3oe*H6qe&8fKxR4onCf|$6yIYB2ZjEeOJu{jB7`dG%m1WOkju&9BBuI$0?sg;K)p|BU+gs8vFn=2XfFU6P3DNVaZ zm>Yd0|0bKLf8*26*PwbEyJwWgnD!rBT;HwYZmP{xQT7t3gNJ_yLtZC77lu@{6XkA- zeLAbfI?;aMOf%PX=E&QYuZDDxpPtQ0E$x&sW1onhoF3i`NIxlQyn~?acx~iil|PkT6I%oAwDzj@&~x1hrRjR|#2#r-TW3YV{rr+rW+jolKS9hCOPaYyDxou2MDOV%HqQ97kI`=bB;UR_Pd znO|7jU6@D}P_rA;er}m+8y$Hz-0`rz#n$IzEuV4;p>xCWxMTi{@NMvwb>df(VQ0OA zz*Y(JquUmGBbH^M&1)C}h&Fmb5Ex)}eawdDF?PXcC+)0?ZJ!zyHifRQfX5na8SG$r#uKnIG&Kf%~OpuSDscPI{+XO^sU6%E1yArQ3r zE73s-5TxT5V=5M4>8C5B823eqHw+AS(1e1@vlw4yiZS!J+hL(H>@kFSEUW7&nUbz z{hU;n{`!mTH{Il^HnzR6j^xT`U#D)2e=;C{7&!I3C= z5$&2L-AzYr1h(dB4&%((H{HC(BNu31j6Yi2;Nk+BG#0A~*2l$H(Hn2<;e-p1jdIFD z-K&q~QkRYQzzBxtPJRFg(n;-RG^T*Si(og&>r~R*qFcgQ=(XASTp`Wy`J_jQ`vGU>o*PKKxbrWa6PxgV#Zacq!=Dk{qXmx+C9mQKglqr$C`m z?D{MVAxG>|zV|^`YYQeiI`5~E;9Pf3PI7Mjyta=cU!}%6S@ZTwa#*X4sqJy=cKGXh z3%<8SUi56~oB-fYxEu(Ln8jFmvJY@VJ|G)lDk%xtyrbL_F9vFcdSW0o#cH*6A)oOE zp(E~Bl-t^=z1~@)oDzg~Y<-$MG<5T_5hh#qj=xH3>&^|`CALO&)L2s|Z&9S|&iT7L z$8|NKgPc3%4RN=7Yl|q;r%w%NfvJuQ$Ln>woY)wZ35Gi*%wh>3S_WzRAm(^uz%Bke zk6!|j2p3WK=n7qo;5OUdgQEh5BvycQ<-~rf>9Os=wi^ivQfZs(3yYg+ zW3AI0UibT6oP+Swzgy?0rz?LW|MbYsmXN?l zY5I+2S=nJaO;nfWo3{TZ)i3r4RELfz@>srkL5>jnwI=WU+qP^bpIHHQhBiD5UCQU$ ze?WTJAe-hC`_r?nuS32=+_8zJ;~|dbA-zu1wnXnC?L>ljwa>n*xJ3wu zW&b#$SJZZj+HVJA;6-ghrsWR+@H$g+dtN2y7En^tCbJIE>K*ep%udjyVg1qL-%zw6 zGQ&|*XL`ARB78~>5Pwg2P!z(z8z!fesn4E(GRED@r$q8yAD^ns5GPt=KI+J8!73 zSzIXSx1!P>@j4f~A;!u8Ej?M{#>`lsdf(!CHg4lyWInC2M4tWX=G@&l96wySj7-)F z4ZgAjf{T{*;j#ib1T{kbNG z+ei?Bl;)Cj$Wb?F{@*AE1o74{q>3?~9xDf`gRx>HWopRuqHm6(JDYAC2B6*F002&| zgVh4XOo?rtl}vs_8GcF>!^QK@+)tK8ypjN*TSP@dN!grAz-Vm z7~zCIT_vTZT_EG&{$q6h@V?5kWtL&YWn@$Wi z%(m?K&evD-6q82VQ~ySc1o{CxBNgxr*u~wP8dH`@#4DpuyM~GNpYA*8ctU8%F0772tN*l#^kZZ%oN@ETWHoHViKGi6unSJGnH`N zrDefgi$Q$>>c=nGyG==RSrl+9=M5sSY#m2uhzN;pY5^?0Z+i{}ts^jlC31OeAKn1v z%#?D=aQ^Hc-^euqg@hyfRLX|)vc@O4v_To>zL{}dIv;C77F8a8)8NtZx?z|MFya(S z;9-P;==F;;pSF5iUeQx#V`nHn37cioy4xFrWz^#|Hj02T!ce!7pGiWIn?QB3_^~)9KOV!O522V)*4HJ!v72cukog(NX2=IR{B0uM6iUe#6or z1l7W^)6?_m$fZ`bklY({1!D^eS9%b5oM@~IW%du`^^>DzVu(%Q+zlPWAe)(~w^L1= zbjj9w*@4Ohi?RL&ALFbSRaz@X>a$eHUDG-~AS;9WOl6QEN!CWIUI1JIH_`aKbBxQqMf(y*R8!|A4-k{@jPo#m11`-rhFMrso>!FEKF$3#g8^ET%8_g zNv?RS^|+4dCD!4-8cDh^Fh3ju^NX64!K;qb2f}zn1?lqbSvl{2DOEJ`G0kd)*|L7%fm(8m27HcNZfb0f@g)NwoBjHHA|hKLC@TTd}b%tTBXdxTShdq{M*QPLM=k% zLe)clxfQtjx~U`RFPRx2gTN_oRpo+=HFbP)?V6n>?+m-sCY_S#+m8m3V{)!HI2Gd@ z*;yDQzH&qUiilhf#d5m>8N`AfO9G2|WnvOTv={&tJ`HNQo;L5^@3%%9nA00!MA203!wPL!=4SRX_7ss z$~P9Bq3imXhO`)TusdRKd)n^qq^3apXa@v59N7x|BpL+XCh>tY>s}n(P(qojQFW4s zVrx1v-_7I^^ zjek1G8Y#OO4l~9ilmrseb_g&P${yjqH0+$}5ZyjMeHulAPN@Xu0k(o2*0ORR+c+dv z@kJj;Dtrr&=PT_4A%>N6T83#>5_9QBMa6|hA)9no#%>R!oBa$i#ONK&ztKlBPk`?A z9ZPbFg>EElFEF1mpYo)0jF8?7=WZ4s~b0=&hLO2d2aygFLQI zbQ|1b^k`8!37^KPUck=HU*ml@bkt{|FOTBy9uqXAt>agLp zIDtR|s1trP=0j{%o6# zRA@vSukPTa&soFpZQ}~+Xpa!Sj9yRtKTPnmz7|7XlT~{cd1|_at#{fMEjHS)8dkt6 zCm4-*B_`;YTaWb$65+QpU@qzMFE$IT(j}2sPUmVnX=u9hW1`RflBTTn0HL&vL#5OQ z5R4M-O0<9KNL}%0B!ip|rl(#J)NCpCcVeIWgA!jgPKdh$V8sB`o=}&b>|YH2nqSBs zN))0+n@?Td_mZoo(aAz?b*NKE45w6j2Enn|%ygIxSfd23gYXaDlQA5TGBo?@WOZpu z?8J7+iK`sem9ZX{fc6hO0Gc2jYGtp{p;x{RGlLwIvc1((Rl(sYF68Nx){aD{IcNEh zG&P;3J3_4Zyz|<^bBm!*ThCD@lWbmZq458}2Kd6U2=Wd2#5iTAH>O-SR9Jq1=$6j! zNamVv{@_zLIPok79KH4FV=17x{*6knpaTIwoEbE=S*hEjuPRzs@jPpp3~srqgu_E( z(+0KeP;?TTXZF+cDiJ>u<~-yha{Bh-~(3- zIo+XIKxBE}rno&dm1ANv$Sq@qKZ@+x!86&!08role7f1D!5EGb6|J|`RSns5Vw=Xr zB6)iM3}Jhc877~cJ-Hx{bD@#%vfdB|!ZLH!zZrVW9<=y?P_15ydv6R`wY9vfPUYln zKO6CIsvfr1`h4-{>!qXbbBlA@ILzWLK|o*giY1kNgGPPbRX^`9WB|q?gxk~thma%0 zPpH+c&7gwpkdu?+q>2)%UubQ95r>CSKP2k)3^BY#Drdt*z4U|bnYyu%v&7J__?l;d zRmQ_MFUk0%w#tNKg2Pu2Yoc%k=w^6EF&B2%aXzo-8>{?{2 z7||}NxhAUlmAirAEk98@yIT{1HuvwZoztj8P*(^uJS;EMR8vdj#T9M$l|%Iau1dRJO@V zf&K-c{MbLp5kJkbRLul}dDJTK5iJabGcwV}kmVjmjb2V|TrL7c2OCzIpIvBJRtdjgA5h|k-sNtHYB z20*;O*y=}rE_!-}lrHonD&Aj*r{qp!XvD>ty*KuRyYT+`ewm2!wvSom)aeD3{Qmdde z%d|9%tqu@^n)*OFE&q!x3w<~(a&X66wXt?XSwxuEbI^Q{9ztg@0MX1+#p-EI*&97C z!)~AjGvg6Dx)A;(g3~9%;;w2dr>kykXdOmpslKeQDr?A4J5iM$NF5D*z_QD#@e#$1 zrWF(rnh>ddS}g6}(3tu~6o*JszV@P6K%(ATUBB`0X0c*C*D|PS`E3ZdR{iqt>W^st zb-6&gdxNK8XDS;BSBer?m*0|Flv$V~4SU8CbQxQD0Rv?s_t&p#5j{<-b?4J4l+%S% zH0L_cz$#B-mbRoWS|r<=ZlZgx+_z(RhQlTSm z4J3(eFKEvB=EvS- ze7OR@aV9ud^NuC=FP|k(=Q4j30}khIPZEox2P%~Xzb@@dc_W@V+;FdaIE4>Px|YtU zNosx7!hi<7-~(p-%KqjxbMkA?r=Ya($$46ZlvhbF?zDBZ>2_Xe*5mwLdLXg&-kKTW zT2olRSXW}>^z2H2i2bwS06MajJiAh&)*H@|yu{;Tl$X*P74j{BLnqCi5UJ-{5;6$|k|X z@qOWP+2bVL&hLTt4z*;9W?OG3YjS;d4pEb<`nex%{&IRYH%F~(12+FJ(C~I9Jm~$F zhnvA00=RikE^L2TKaPLM%VHXaCEt!TYWf()j zU*58GS$r2zYMfzMj9wDW2+GLI8;sAsop=4&gNLPzkL+hMmB+pPdW#2k*A58xiLzy8 z_3_V(8j*ot6TJ_e8N`c=h z4(rIK2b}+9xW;G-fwymrgbm$IS+1TO=THgYe-r)Z3vLNn#vTAky*X}u%UV(HR;pT} zd3s5*exLy^CI*WWF@~w<{NAQp0&Kbi(#07P4-0G%`a?YfY)>D8vY)1#8jHuYww?}> zn1E$FbT#f=IvyK~&%wM#n~teH_yB3o?Ig}Bx;sO`_E*3+%u8}NkWA_t?cA$-zrWB{ zErX4|_P1ii{*q_zy5C7~x)x+?IOF;A)sA@jH97ys#sf=n#(>6cUw%3<=4R<8&elNh;_+{|;@0a>%^AXTTQvWfgo=2#iC%97Ysfq9av{ z++tY&gpyF_`=`a}ni%6)*=3Az%O6mo|7SQQo|1M!?Pmq%Q8!Zi0C*nuK}c_FT4||< zq#0;scz{;f8`q4Szsl*nq*KI_S@2L!U%r9+vaiNKR0>d+O~Nqn#1s_E6C~LKf%2m@ zq%pV@jpfQG!}oEb8^bXVbb29Z4J=pplG2v}^Gw7>m`}ljl=q}z+LQ6Wjm3xGA(d|S zc2D{)Bz})%yXn`v8yCqSqQ-ORl%IYpR-W#|=~9|8eC zIp$a{TZC}l`<{yV$2$LcWP1N%(PZJFx+>?&J~q9vzUozk&+ix`)$wc^u*^HOTq5^p z7rQcsk%?bmo5;ySb0L$oYFcUx~WoH*7so8S(qq4dPSOn6462GoN;3SFlr2IwE&ub3$9MeR|$Uj z2$Q?9$M5B?OVcw)gj$B|F=m;@!Gw#3p%^}3snaqz7QNJPt+BD6a1AV~xwnRABsEYh zVe~1Wz|pLdgX}z;x)3I;g&DLgyfa>a7Y6>9oe0JC-TTtx(XLqzZBvqfEy05fV)$FV z!`45f5YJ8spYrys9}i4U2A6hCpG8KWtmSk{&24?Cd1sn8lv|IW|4FtI*332knF(P6 z{J5H%_o#&K0Hy)Fjf-fAq5^!a+KpCHlmKP2bNKjnYN~E$$Sx0SC>y7G4t124?k6h9JAgXBB$N9Q249~Og)RQLCbs6Es z7eKlJLowL9NTHYA%f8bS^WTN-BN*QF2AfseV6XDXyr@HHNE9#R%N^EypG!CY z$+tX#PjEBT$BaPJbgNEnn`BGU3ZnNQu?c%Xqt>4tfv#uy}n&2f_wNT&D zDTV`?nv7t&f~@_`|DOeDRwN$;v@gJSRQqKdhr_NSx%In&)`-xCgAdhKCToo8KSH_5 z)AS4p@m<|^e%pkt2y%5JGewvm#L8L}lb@Fm4+emc0$1?sMThnZAg2KvGn-TQFVNWS z?NmwSw|sEA_`2EFtCgSW5?Q04Skq->fJtH>{dpMWW{+v6y~M!Y@3vR_5cD}t|6MKr%|0|-6GMCk;?J9`jcex|5z{9PG`)a4bMV@>#aXJ&o&&q(i%Ol$|8=B_Ra7eXD9B0Zf_8@)uC)k1>~o%q0$9OsW=`y6az5oqAqIaj!tt!HM zWCT1I-bUpIK{;O>tfz~F$@01j-X!&Jjm#2hH1H3;zTfrG* zkY~I?8}NNLWnx~}!!SA9sF@7Jd>)yT_joU7bIB;4@6N#q&n$%@W**QIH^@;GiWne+d-g4j8 z8tiq1gE4^FIa?9Zd;+43=kQ@^be&U!cPL!w%fFN5{odR34s^Co8=1MdVdUM zqqA11+psOW0)Oe9UDLqqnC*qOJZ2B>9DqhJU$A4~-pb>{%P%j_3xm@E@u0MiGcw@w z!)`4vHhM=)pBa`#WhBbGI@D55A;gIF3$0qYK(Y;Wchh8f=8Itx6?11E9Kf>&8)8OU zA59WirhG^yy_kWKHYuhmH$aleeqW3Ce;{p>4nvod| zJWK~B0_4}~jQ8);tM=UEeyphf6G5__WDafsta5?Ak!Lw$l*+(L^w$92ige=rEk^{B zvLeWH_q7lK0%6cE_o~}M6wm-2fuKS8=2q$QyPqf#JQb=OFt9fE5cWmPZ;f>M7I*fw z2j3Rn=t_$rDyN}~|3Rh1@qvRFx00}ePqgV+yk`T*Fvd}6x=LC^f6R?O-8d54a0s=2~S~gmNWWrY0oHlw~$&aNfNyBF*GTMB=+^8=N*?!<=R2n#eI`gO65R4=7fs{j zT<|T{EX;~^sb`iUx`fmi(YyzK6a@msC73tID#8twZG@EA-JQT12!&fxDV?P1G>DdF z@niMI%>RKhBktAuT|0DqhLg<+z~45W-jX-EE^^=UEU?1Hj70EX+ch4}Gg(O=TH6m6 z+^~C+tH2+otol+)e8n@rBjdfF_g%5#0I9xY&^IHJst~$YW!_*A=jsZwJ=MYL(b)@# ziU**;f@3asZ`27q&{wk8nTn!tEUTm}Z7vMQEc zkhG-0ZB^~tR%r$hMf3p17tE0xhRl(k|1uNpJ=tUw{cz8KmRuL=%9D)I50n{exA_3a(FO9!xGNE ze|OKgtitdIsa5UU$ytEI;mrZqsyj|&dwxDgd>$d=GCmP6EK_W7(>&kAMc_l@@s>}c z;FR5-`!?O1-^t<++?lgs5-?5YcnGrqGg8WZTT_9fM~G`uKoe9fJcq4GZ7;=_U_q|V zejzOjrGLK}=j3c44Ihm@``S^VyLb|4w-6Jzx&PR|no|SbGiCQNYEh<~Z&HV7X6NU- zZR}O;8XgvYvSLzreqL5qCR#JIMx3EIEcy~#otgk%K0XoEb2a=m>w-0;5sV(!(ok;D zXd6P4Wz)TAy#@`?!@6$9biR{q&*^=f6eyMfmH4zGn-V;L67J&O8t%O4e4=gK4$k*x zH*a9uu31o)u3uy z{WSj}6@)!?^0P(3T9l z;sgx6b?B9-SMern-vHP5Gj<(n+{5TZ8~W2BZ}GtBmQik7wxOB|-bR4Jv7G5@>Fjx? zE;v5cb8|uCFL`Wnbd6P|?8bAAIYoPyD(2u%kqlyS9J7@qK}NCqh?Jb&<=%G)AcOg) zPTFbIBDLvy(~e%3Nl^C;G3K4mSpbDfwsrIXFc)COg3150xIR*Sn_KQy_8!>%Zvh)09aIAcc6S1#GCFc?!cD*6zbERBY?Pks@SHJ?=k@v8L$&gO=rei-=h|wy*~TK9ckjY87|^ zat4(szq0XDGBHQS6QBl-|AV#O#2`eSZ>#{}tG!OcJ%)cCrRS4Ke6J#&qXzNj(4<&! zqZY!^5cz!DirPW-`2tCt)QtcYlkwpqo0vMh@mq~% z!4>(Ed8)ska2jAJSqp3Cp^LVxYUDwL!g`8Q^mn(aD{baNe5Q~pT<5`>Fp2+k2D5e@ zGw#6N4G$+XF`O)r7`x)~G4A~1)qh(*wA}u3s zN4|>kkx|iB@wE<}^S(`1Mm~{?2bHfpz0-$MQwF}-KJ))qU6P>H%pi-2>2w%LJrxm! zo@%L-+@%#!eqjIuLSD-NUEBWw2CjF0s?g5VDBm}d`DU1xmtbxHcl{37k^K*aK{+v| zb<6+-A^jgPA8kV|G+mrAu06D@&PQ!K>a2y`-56MG4R~GDkhUvt@~^$Oday>%-!!%u z910gNBL0Hoi~p|77HJ4+R6ri!n<>tHN?}<++Gk~ zxc=ky0U=KJF@PR+D?g!e!LI7J{=IUe%9F^jA$;za1s^Hp#aKx{@2U3f|4BczqD(m3 zIoV&s<$)RJz|O-6&9DJ_WO?a_bfy2;67d6Kfuqp-2H+*yofFE-O+DRRBX;~@C)cPp z2ne$2vuM8w5E5{kJ9~2@c%stN}IEF{1$c7QrK7y#56!smyK2BWTI_ zMHU8ox5Ou?z#$prz`Z8-bb0!BQxo~y8ky>sMt)BJ<=&^HM3Hxpe=ty(Y^m7YKmP2Z zAO^Fmk$|0UDNygQsMO%!3vNajJz6L*{0zl7KvO@He9zYPdit0(5%8cP&!u|r(6^|X zO2-K=H)ifJ#EOzO3Jx_b@Ml-xP6NUXlEE*PdBMpa>15u-Dr~uzYlwT1D}c;bpk7<{ za~*f#$7?fVWxrZh12?-ZE?Lf5%|zL4lV^fQe%&n^9;CVV~91JA*z$(xhnCaElwuM_vJ zNzVclL~cB-tUPVa{OJ-VMKgvxV&h-7BCkwy_^22$xMczuhkmWe)0ww7nM2EhC)WMa zF24*83)7d~x=sDOc!t3kTG-h^x#1z+G}-(}kKN3|%S@pY*-}2en9+G!$`tA(Vk+4@!Gwew)ZoQ;g;tpi19XdsSY- z_(VQ*lnt(ydlem$w0*GP$CTFaqGS_(kE&_>}28olPQ)n7_?!0+= zwz1;!!PN-GJ+8jQR6YO+mREJyQT=S@h(u8@pOMuT@Si)J-JX>bli;I$a#w|u-a?VL zo>uOW{i{?c;AwZ*@yN28{1gLXu*E7f!pzp?vyo-;*=AF`}l0exa);esysy?_wmhN^fK5Kdm&{9kQ zGJSW<@lBS)iitU+GH~gpARUNuQ!9fg9Yd@+W4w1W?TFzfE%046-M(cB&Tvzb;?=}z zjE$+kZ>(&|5Avb;IJ<^9?qK}W!FK?n7Oi1eW~DGw0HZeG1xUjACm#<72}Ejz9zp3I za$TOe$Ad;cHFFg-DI?f12Jf}t6ZYLSgHpgcR~n?I0m2**KJsyYnp z5|-WAemaoyz4{iPWAJ2Fxq_B^^)w0K3E9AJ&J=dm25yanC!EejwUG%;PI{%y)%Ior zl5LZ%lv@^lYQ5u4ghQ5?=CJoqwPGZ$Tr{m}b@dRP!YpJykYS-tYZ$<$$fr4LI z8PTBeSJNO^k~`q{s433}&;zG^Aon9va{?;V`3=7WUyp#O|Bpk_v|Q8!r_Won9oN$e zFLu|W?TpLHCXeJxB2OPxMc7lfKKR(TeMp^TETAZ^eK(M9x3zEDZnT@5ZJqY{k$<RE~ItR~je3io0fgSiGRhLd5r{YAk(#9c@J^i{x|mB5`55>p=Q*KQT=9t&n?1v%#% zFmE7mos<5iBhpjlQlExXziEQoz@F)l;H{hMcZ{#ozNnjSRAe43$|^IoZm@cf!)yxG z@OSt;|2)hIuq4g;j(ieO(unCO?54{a*7_jxAdad~86G3bbXaGa08oJ!hywm~afAm` zT4zGqb*)J`+rk0~<`O(NEtVK^L}#A1Yu;rKh7Tu@B;jU1%@VVC;{cdYG!ymcRSa<6 zD1*0^0g_u3q32AY{1(P|7;)0UZIY~>3*9dm%OMU5Y^PE3eyMP)JU@u^^^S**XuYiz z(~v1UAmlojXZAand0s`vHac`ho*o_$NFg^zG=E6VgI===pd(NCTif!c;qn5|C!6j@3Nk+!(CWZ%3=0%j6YWTa*G+17rVkhD#=~d^bIOyOh5fTLO>;F=AE~lE`6LZH z?|4B$6fCjvxm%$)<992t|wfvgC|yWH~q?ys+2{W6Sv`_@q-4$qSq zu{ANl0Q)Eg!^W9AI%~_3+Z%`A12q`D9%8(8ray{12`E-y^v0D<3NOn*@xJ)lk);yr zThrtG7))cd=anW_g${6SKrJ z7XN9+mqf0y)hIHL^MvfBLj>r*35El}GCG?9Rj~65lSHU7BON8qOc#YitH8VI&K_qa zyg(FX9UZS7%Z_39|DDwP%#9UZoNR^L&s?T27$U0}nV)_i`3$TD-{g${-B=w( zUyFinUr1F^F3kd=GOR73S=Y`GHaRUq-4=vY zWV!QiW~L+4Gh_|mzybN`)5FGvj+MAvLk-5g6rfN3yMdkHXkncb8mfqT-R^M-ytTiB4pkI{tHksT+ zUf;Apab~*JJ%1bOvqVW6(5v7O3toxYbT?)(wo3=mCv=OVz(9td@|>7&joT55DWcqZ z%fQW2U&ehkd^@>cs0&<7J#6;&cvds#Rh=N#jI6)Co*87Y5E60Pstb|POT`(l>hW)b zAEA5b=Sh~!uy+7}T$LAvG@{Q|{{kCv6d^so*vxNjAdtx+o_E|qHP1KRn?#xT(Jn$C zq`*bl#ox{*L7~VCLiEKhr!tj|zE+ruB?BfsxEBlJk>K&wTO^Q#P8t)`0jjh7p2$B!ycEvUg++fkSjijD(aeygnpGd z5fd*?NuO_vW56^lR4aadeNW?bXMm5dOsSA)()q+b`&|<>wQ)N0D2cn!*yVV|q*Jcy z$;o6PIqT-5TD{hN;3`HMXpwy(t8$>H^g?)EKE7T4Qn?>38LYWS97hu{6# zTFAdF2zYfX!&{xjAG<`XcaiUcV0K6hS*UBZ3!=zH;&qKS%<+7xQR@*It^!3vHCFyC5`VRn6 zP;>vu_ur){60B_hEeql3^b@-QpER4}8LV0AO=~T|Z)(~eDI@}YM{rsgRCy(Il=J4l zAUy;hvy=VQw_#H`qEjFJt@G&DU1nEtj^P8sLgRFQc0fNPo){*Tfflu*mb*p(0YbOF z=FC(yB|@&h^a+GgC_IQGxlkSM}-HhsRx$L?UsI-1ubX zvmpFMb4xrOcHj1fWUI};gq`f?10F}HyKVcY@WyP^`W6$;7TJc8VwEAsnsec z$0-`W_h!;uO?{)Yt>f|hlcOUci8<$%Rbo(TvwAjXBg={DuBT9fe!zpSN^kGe={A=* z)asj*dy+3P9(lCsPL=V>N3B1h<{`yLQi`C*=cc7LJpD?f z#1#x;t4ZPTa`%PKthIJ_i(LE)6l7z6f6boqzu59v0agi&t6?7dGnwX9+2?M8x9Jy7 z2`byvrhaSp79d=h68gw^=r!V9mJ+)eraauEw#JJYINV1IIsB?@tKE`0@YGwA=g?Zm zKLb|W!OCe3rg$({yH_Y0qgyGKb1W9RWxTe1g)L?^z%8wl&IX-j&kAALbBOq4y_ zc+y@&UYI2@lp?1}!_YneM|UsRBnDfBVK^Q)Nq1G|<8%Z%W#G?4U;b>`;mVI|;R*`q z(M(*6;%9%%BL<(@2#2O9hb{8P81Ze?`Moup20qpgKGZ zv@dEr-*b4P{JH?!FPa7)z#AZaydjVU`g#0`@AQe4w}DpXgwTs&5%nxCqsCON^Iv6) zx)2>P)Q-O#mboXFC2vaxps%W;gZ+iq-fvy)xMMvlNhc&BCA9URF_wWu) z?#LjB4YAS1B(pN5h|NFeUj9J%KE067#A~LMzRH){y$VM(@G z{2@(r1dGA`HU@`*fUaFx7_%WQq?Tw#{9fB#CWpCm{~+!EiC11??1q%e+8k>V6Y#pvF& z-*!g<7#1i7u|S;f;WpB##07%E1pCtimM+$woE5$>ts=b)Z<>2V_U%(yy_d-2ZX23n zI@7Lz7D}pEb^1YYvuAj-8n9}XQ!~YyfZ=Ehye?KouN+{k&vQ4!3IqTMkLqJIEXojQ zy3HO`N??8<6sMf4;8sARQ-f_7auF{B@;8XTQex-Yqb_!tyQEkJ7>wFqg>Jp~rbIwur-SawYiWs;lw3Lps{-Gk&C)Ni&W-|Jmr<#oSm9##zY7gn zw?|!+7W{~Wnb}uyuM235Nck)d-RIhszHQnp&ia^fmlr&H%7xc^C)X?j>q}d&P+Wyk z^}4@HEvcj%ANlF9DyzZyWwo8r4itjgzq#5r-xNbkOtEqs5Uai4e4n;pnO?kD){22( z85TOW$X`#O06tKvsVwgu`?;C1MfHo5gG1{Y8uzwiL4+9i$DGaPxokpmh_VVDarBl6>-KRllxb%Hv?}+;3=q!nQEcr)wC#l1y{SH6V z+@WPVNu^{b-YrxVX?E; zJDEiM_3KyUpDP6p1!kj8=ezqnAcyGV-Q(*HZ<0f2`LzW&klhIZ|DxL$wx0>OU*(he zFWD5SwVaX8dP|e@u3`-vmpms!?g3$@Bee5^r2grACy70oGNiifp*XI&$J%2dd#(jiWO&>BNLKIuYq~4dcS}>*4dv$e|9;SB9vJ&Xm-Z&)hyUA%M-u1NB(tu5Ql}1<|Vao?4eTKAh(fyJnK{4JRcDVRzseF z_4z}8UWJFgBze~COhCw7PP>&QH-I}R&=AGvB!K`e7p>yWrERP+N-v$DCM&2b`Z$Ch z4#K?5_Y}hOv}lvR+B1}sRX>+ES%`!Pl1}Pr8~&IDR#x6n;ZXjdr~K&kFozyx=L5nl zQ|$2@J?1Nx{k_WT-`eP5%&YjJ2~Bq1zZoM~J^RJGH?SBzQd;)}>FnZY@j@{yOxcIL zdywNd+e+y8LX*Z`-&mVWN?SoiM}}39w*uOVcYcM@gO~2D?*@l`pe)YJS&dyj20#t7 z#6`lcV7pmqJ7PNH0i#pG-N_D$4@vbwSbo2hMaZSqFd<%jk{xN*O^re_hi(MG;q2sv zHl#BfWA-;YpNPHh=9&T@y|3@FR?WIsho+&k?&&Cax5taYHFA~FAkmz@?E)e;_nR37 z0(GC(Z*#7lJ#_mOaU@%)<8-;K*O+s z_m*kd;v8Z)4W7uOqh0yeJUb4@GZ#lsqB*blVa#&F>?z)hl*^G92I`+W?whQgD3EHJ zF7~fKIsdm>LIWsTx006jaR#nNVsSiq* zbQ+W~**K0AK$CiBQxFQq8I3s!T;)t}0#3IqsvSo+LLDFsL;F@Br&k+XD$i!wUG z2ZN8^sB{U_*S(^;S0AEtdYH2lSeErhVG|j6fau_IfBz0Dfrd>eff*QjOcMk%AIAhN z64cLrnI!xZIp3jGV|q<`l~me48I~Pyokyvq;pE3&lzbw+Y6HK^I`1qVEqRcQxJ`T@ zD;JuE^J}KMySrK4wc-|^U{41-WQTlM?0XcI-xE zt%}l#y{UQals%Ye9#7Xy;4<{C#tzQ^^7cCDdjQDAwCZh@d%>ZNKH@gb_mS1BEF=T$ znhqh`s|x93ceu;_&Ydh`%wDqWUjAB2BPOY;;wq;I+)K!-;_oiwQ5oMm2`II<6p5Bu4<@$WP)!v(cL}VWojL|p%h){@{bq`Vl zLNxa>AAp=POv0(m$m!ae1uUi9&5Msf;?Zq_ zO@x54Be@ZAFAk%={S-6;ToTwXgxdbq!P{`f2m;>q2&wY_gYAC@AF zhr{7=dHMYGvRRa}J;`-hmXfA#-h8WCub=tzv>ymR=cHCYz)^| z7JIl$X^N>p$;1V>O>d)<6zh;M`CxRPI0po5R(0E!E9V9b=RZ8*G=WIB4OkQmgLI5C z%l-Y^Uw-$lzOMi0cR!7n!vT-y%cskwJwM-W*OsS4I1&qUj^Mz^nKLtzVW0tUDyA`N z^)bRR48VXOMPLNxGy#}pg3-RZ<3NXZH8q2}^}vYe1j@S%nPg#$7l#;VpJp>QgMDZ0WSKwx4gaPZ8WGC5kfb1A1X1GF#i zKmVR&T=90RpFThLipNJyvK&tj?gk9QwDpFbM9`ha*mcC@7NZX}MetOF1pv&$zGlbJ z2?c_LQkIz`Ll7Y{W1goZSxsvjeW=-3mWOS zF$f~uA`}p8ITYZ!uw0|}F4QxAoNESw_1y7#VT)>pmJ#!^z6&4Ve0 z!|l5H0K`n56J*(ofP{T$55<8IG}MBF+$BjOVsI&C>!bHk)lbLM>2PFX=5)K=)U>53M z3j}JNh7nXi2dB*&U@##=FegFsl%{+r5P%&E0Y_*bb?X|4DaSGi4+7aCP((!N2m}GO z)-i5bsy8Uiz2fcCFE75{z&hF>B1GVnCL|ypIrHhwF)<`Y_qz4kYj+=#h*C;ZTJHFC zT2c`=s<&FNy_ct%{YQR{JRt;!V&7nN3Qd`dn%DYe z4|!OlRr^qPhr|hqfgw>!NzfG>V;F`oN6C}I7~KNQ%#=)bkXisZy1T0*YTKfzSG^5Y zHLpX{#HA4NzJcLs%4ME{sgE(b+i0C_m@gCBi1pTbyKFZ-9@oriF86H%8?=}BkqL!@ zNnM-jumAvc5A-t2;c%ShS<>~(r|achwd%wS2;h8}PRreNyw7UAOcY?F_ZSXawLiUFTgAs_m@J$}AJ@z4%gZ?` zOtLTvBQs&pu96N6+6%v_V7Pl|gaZbG01yBZQORXW3juqx*wu~fYF(|>y&nJp`(cKf z1DJ;pF%oeCAlC>Zim(Aph>oG^fDRQROqF&lf|`rD8l?DIZO#m`m#ok=v^xtB(5-F( zo-&^95)dq>gXDxsjwQK`fVQ3It!}9tNlv}%^UIgN`4|7{U;fR%{`If^{BQr`fBN@- z`ybxF|8Si1R1PF*Dko3!`F19~o^MwJXua3gSn_CtLKv5N+nzo>Z&&jUf)k;P9w?DZ z`Nc23{mpN_sqNCYan9AaC(c4V&GMJO{(%&3PoL@VN-l|NG(b-Q(e|zS{X^XiGu|4D7X^pVo4crQFM7uIpw)0f;am($E3O z-~@mG0T{$+U{wuFG{VS%IWW>(e)#cwvGo4;UvB3!1i}CeP9h{B&Y3wEPD}`lNdMoT z{*QEAhhZ^<(sala)`7(x-5r@oIME(yMcTQjq_JB^AP_7-LNf{D={OUYRCt~Yr#BCri!dTMxOoqVNQlgwB^{2L=`?f@Clqq9 z?wi55?bb|-z;L5(76z>i-z5}k9*I-RrAT2j@Sr|4dOK?PO!Kou6iEySd37 z!w~>s7xe<}%i(^avs+ESa-+T`X+SQQOGZwTa$ah)tzL);0jv)uaO5e=d@3-xy0vCz z)^uGf1Nu>qOFmFehzQP#?gSoDP5ZUQs>D=EI!w#)Fwav_uuO1B+yh42w&-Xn9cRQ; zO6k2Dwtzb4hg5P_K}VGkk;1;duGKoG5aeP^nj|M+FmD4wNJL5mnxkrH5ZPFk>GARH zOsMN7z=Z9#_S@wgM8aAqB4SS~L4>)dlnttN&xq^_DO&u~%Lj*W#e|WPFtebr69ged z633K@LZ%7ATW#8G+xA4VA)yU3i%`%p9K2Nv)oQn4+WMfxIUkRwSrRe_3(QNIr)h-c zPBuwxWClP9C?SOh)y=%uz6GJYetFMyV47Qvp#vR;j&-#*w)6R{4g`K(Zzv#nC&P1D z4kEp7BLWF>D#9rr?x*{=hcc5#C9zs>+q!*uIrkc_M3fbF@y-55i~uzR2QW|)A_mTq zGe6$n-yKeOrL^1a=YROQH#d`)=caL?Z0cie+vR#)H=9wus(&1HpZ>Fd^Kbv%_usxN^YZ;KfBpP?%lRZZ4>tpMZ=b$= z{O#GNW1+%%nUSMr%Na>W=3%XB(Ua0DdYV5w0-{a>BApiUOr!S zi%Y*=o5E59|Z&=;qKH1fB{2$P&gZdrYutM?l>=Z^L$uxN-0kzPflPQVBXt0>gM6K zVeM5lYI8J=dVRfgE|WI)Eumu(ficWsuOrTRn&)B>fTW?r8`u~YBSu%VYE`?$pdcH& z5dkw218VQvtx;Mjr$9wyNu(W;OqmK2nW{l(000}H5KUz+3m=v_WfH+sa$XK^(_GCB z#cH3W9Oq-zs-t&ZCGof4|6)1J>$W|=y!T$g!v-0J6X5VMIv7&0JDBau^ge;m9$7*p zL~`>TQx$u)0{@gC*?FXHTlMHV#;Cpbd73bSiKrAITqYs(NPJ(8hF)vi+TKXA%k@?_ zounSqohpb^0BX1+Ad#VYC^|$l5lqH@)81}>0J_t(R6GtFBS3O?c29$D!>2m z2@q47QcBY_5yn>QRPx<&xaR9Ky}|$rIi7Os-QAlF>jY@b!cshN>osLW&Mba9OmE-3 zN$#&*AMVqSzk2-e`RV`u_V?PgVS}9Z$sK_V+|^&!l~aHQR3Eo~@oJ61sCBVOj2vu) zq^9o183EI>+yh8&9!??!V6y>cgHm=u18{)W$9C)2r+9k5Zr2E>{Yeo7K_H@l5I`OJ z{N)y22wYrRwe#zyEx-ssA}E6TY8bu6;Na0XAz)CgI%Nh(NpSQ4LdFD+phV08q=o?j zArKy_C`gG!IOnC-PK09^iwEfL_Hs8wbaPV&QVeiYz##IRVVTo%oLC?rQszA6S=h|R z81pjKwY#btSO8NhdC4+iNN?tzT!SV-fzvdl%#YvPN#TpRYl5MPn97DHl!^q5;F-62qR|qn6LkS`e?}y|3@NncDnAn3= z?RLJs+&-V5&-ENgxm7drz%ic~B_}x@7RfS_Nv4vd2h{TWq8GsNy!hL|7JD~#{ro-Ww-@U!BueIBT$YIJ1L>V!`{u=-T_mDUP5QLf& zI7gVfMSv?a5HRg32B8=MJJB>O2DpvpZmOf{uz2k=9!|^gcz<_#T>JWZzLKcS`m}z^ zLVP-c5K~%jolrDrvuz!>Y6odt+x70@biS-3Kr#{oPi2XdQ=5-7V+eBs28iqR+Nv_l zQCqudD1|u!8dx~^a26h${{E+**UM>ZK5oHArEnCfvtcYpUk?hoHMP99jbcORVX z+h3f1__H6s|FiENPV>vl>2|(p)9Z7;^#=H$gIZr>7{`P_V+@jx2neOEwr$%qt0}A( z^VS6>p(zooBX|&tfklM5r9|_xrkQ{osSIuUi}Im0AS4KrcD7*SQ4`vAdrK7^$-w21Xx&@f&#`(TPbq5 z&r>-)-rr4!nUMZeK#YAaO3cEn9ten-7;+XOO>On{b|$F|llL_7X_~URN7&wy4R;8j z0D~~qu1!O2C)T5UP5J~|b2p4Y3}T@y5?~%(0W6dV%9K5k7hn|x1|*<;>D(1FoN_sy zj`LCGITHpF&WAEDr)fDoCLzj~r)LBd;i$20+vxiE?(T;_`|j<#H@EHf;b*^{Kera% zSvTq%1@S$(chSwy?Pe13JuxjdlH7#pzO-yIKgaUa02 zZnvNQ*B@*kGpl0+0E)Rvni93lxz};IT+c6;Z{EFoeDfyf>HUY#pUy3@CUHOn6!UQN z0e))(td$UsNgG2o97KVEi75ksAgK-?3JK8}oNhNq>}W6b6#^5WJ~62gFp*m+qA14$@wneOV4x;07K+u4S<)m;R?-I)c3w>5}ri7_P%bbYXHh}+}VCcw8zvjw3!Eo%*!$z z)8RM=93&@BKyfT3Pi2C@-t2bW*7N9k1?g1P<0av}UzwLY*cr zIRI#1pa@V{gqXyGj3tK+Btl?DMr5EIMqlspkr0U^2SNl=H~?oUr*eAt_M4~pd~O2` zB{<`Ph!AKV0E9pu2pB;e5N4>_-30?M5D>tWnTRD3gSojypb!&KXjcsFibx)WoHZ!r ze0O|AC|^F_uIu&V%NLrY9OiU%=2WHx&Ks?WkP{!K6N5j$yr1H>UE6>8&;K<`%oDlW z(A&Js58s?*xz96~ELM9MjM_AC3?0L^wvLNKl|mE5Aa->?qAc0nwd-rWtTz?1(X^zP z3eAV9J36*{ZWp|wI3~tCVT7YD^K|!S9`<^^+)|Fa*P3mci?esF@01U$`&a^Zp?uC18|I9jvy;=NjQ6wLd9 zo4a;3!dOnHL`Ao8yRJ5bZ~{aOA!H)5a5L3##7HS)K*A+L1+zl$ebsZD?^0$WH$gCP zx88coDQ8X@vvoH!Yiz;3$X277>$+N405Wk&Wughm>sT-C+P4}WWH`}M9C0e=m(`W0 z)3M}bkYBf#l!~RHHk`_Js{wGiwRv6u<5thjYO^YcQ<GsWEjAD8kV-+%l#MIe}*pW5$#dfo@)BtEnw5hVd;E`^0v u715X(JoauL5)sSr9pZ+BI~Upk;Qs-gLfhiPPtrpG0000` is a numeric value, and literals are -described using regular expression syntax. - -NOTE: Whitespace is allowed between ops. - -### Naming - -Each op gets a unique name by default, based on its spec string plus its -character position in the overall specification. All the Ops take an optional -name argument in braces after the mnemonic code, but before any numeric -arguments. - -### Functional ops - -``` -C(s|t|r|l|m)[{name}],, Convolves using a y,x window, with no shrinkage, - SAME infill, d outputs, with s|t|r|l|m non-linear layer. -F(s|t|r|l|m)[{name}] Fully-connected with s|t|r|l|m non-linearity and d - outputs. Reduces height, width to 1. Input height and width must be constant. -L(f|r|b)(x|y)[s][{name}] LSTM cell with n outputs. - The LSTM must have one of: - f runs the LSTM forward only. - r runs the LSTM reversed only. - b runs the LSTM bidirectionally. - It will operate on either the x- or y-dimension, treating the other dimension - independently (as if part of the batch). - (Full 2-d and grid are not yet supported). - s (optional) summarizes the output in the requested dimension, - outputting only the final step, collapsing the dimension to a - single element. -Do[{name}] Insert a dropout layer. -``` - -In the above, `(s|t|r|l|m)` specifies the type of the non-linearity: - -```python -s = sigmoid -t = tanh -r = relu -l = linear (i.e., None) -m = softmax -``` - -Examples: - -`Cr5,5,32` Runs a 5x5 Relu convolution with 32 depth/number of filters. - -`Lfx{MyLSTM}128` runs a forward-only LSTM, named 'MyLSTM' in the x-dimension -with 128 outputs, treating the y dimension independently. - -`Lfys64` runs a forward-only LSTM in the y-dimension with 64 outputs, treating -the x-dimension independently and collapses the y-dimension to 1 element. - -### Plumbing ops - -The plumbing ops allow the construction of arbitrarily complex graphs. Something -currently missing is the ability to define macros for generating say an -inception unit in multiple places. - -``` -[...] Execute ... networks in series (layers). -(...) Execute ... networks in parallel, with their output concatenated in depth. -S[{name}](x), Splits one dimension, moves one part to another - dimension. -Mp[{name}], Maxpool the input, reducing the (y,x) rectangle to a single - value. -``` - -In the `S` op, `, , , , ` are numbers. - -`S` is a generalized reshape. It splits input dimension `d` into `a` x `b`, -sending the high/most significant part `a` to the high/most significant side of -dimension `e`, and the low part `b` to the high side of dimension `f`. -Exception: if `d=e=f`, then then dimension `d` is internally transposed to -`bxa`. *At least one* of `e`, `f` must be equal to `d`, so no dimension can be -totally destroyed. Either `a` or `b` can be zero, meaning whatever is left after -taking out the other, allowing dimensions to be of variable size. - -NOTE: Remember the standard TF convention of a 4-d tensor: `[batch, height, -width, depth]`, so `batch=0, height=1, width=2, depth=3.` - -Eg. `S3(3x50)2,3` will split the 150-element depth into 3x50, with the 3 going -to the most significant part of the width, and the 50 part staying in depth. -This will rearrange a 3x50 output parallel operation to spread the 3 output sets -over width. - -### Full Examples - -Example 1: A graph capable of high quality OCR. - -`1,0,0,1[Ct5,5,16 Mp3,3 Lfys64 Lfx128 Lrx128 Lfx256]O1c105` - -As layer descriptions: (Input layer is at the bottom, output at the top.) - -``` -O1c105: Output layer produces 1-d (sequence) output, trained with CTC, - outputting 105 classes. -Lfx256: Forward-only LSTM in x with 256 outputs -Lrx128: Reverse-only LSTM in x with 128 outputs -Lfx128: Forward-only LSTM in x with 128 outputs -Lfys64: Dimension-summarizing LSTM, summarizing the y-dimension with 64 outputs -Mp3,3: 3 x 3 Maxpool -Ct5,5,16: 5 x 5 Convolution with 16 outputs and tanh non-linearity -[]: The body of the graph is alway expressed as a series of layers. -1,0,0,1: Input is a batch of 1 image of variable size in greyscale -``` - -Example 2: The STREET network for reading French street name signs end-to-end. -For a detailed description see the [FSNS dataset -paper](http://link.springer.com/chapter/10.1007%2F978-3-319-46604-0_30) - -``` -1,600,150,3[S2(4x150)0,2 Ct5,5,16 Mp2,2 Ct5,5,64 Mp3,3 - ([Lrys64 Lbx128][Lbys64 Lbx128][Lfys64 Lbx128]) S3(3x0)2,3 - Lfx128 Lrx128 S0(1x4)0,3 Lfx256]O1c134 -``` - -Since networks are usually illustrated with the input at the bottom, the input -layer is at the bottom, output at the top, with 'headings' *below* the section -they introduce. - -``` -O1c134: Output is a 1-d sequence, trained with CTC and 134 output softmax. -Lfx256: Forward-only LSTM with 256 outputs -S0(1x4)0,3: Reshape transferring the batch of 4 tiles to the depth dimension. -Lrx128: Reverse-only LSTM with 128 outputs -Lfx128: Forward-only LSTM with 128 outputs -(Final section above) -S3(3x0)2,3: Split the outputs of the 3 parallel summarizers and spread over the - x-dimension - [Lfys64 Lbx128]: Summarizing LSTM downwards on the y-dimension with 64 - outputs, followed by a bi-directional LSTM in the x-dimension with 128 - outputs - [Lbys64 Lbx128]: Summarizing bi-directional LSTM on the y-dimension with - 64 outputs, followed by a bi-directional LSTM in the x-dimension with 128 - outputs - [Lrys64 Lbx128]: Summarizing LSTM upwards on the y-dimension with 64 outputs, - followed by a bi-directional LSTM in the x-dimension with 128 outputs -(): In parallel (re-using the inputs and concatenating the outputs): -(Summarizing section above) -Mp3,3: 3 x 3 Maxpool -Ct5,5,64: 5 x 5 Convolution with 64 outputs and tanh non-linearity -Mp2,2: 2 x 2 Maxpool -Ct5,5,16: 5 x 5 Convolution with 16 outputs and tanh non-linearity -S2(4x150)0,2: Split the x-dimension into 4x150, converting each tiled 600x150 -image into a batch of 4 150x150 images -(Convolutional input section above) -[]: The body of the graph is alway expressed as a series of layers. -1,150,600,3: Input is a batch of 1, 600x150 image in 24 bit color -``` - -## Variable size Tensors Under the Hood - -Here are some notes about handling variable-sized images since they require some -consideration and a little bit of knowledge about what goes on inside. - -A variable-sized image is an input for which the width and/or height are not -known at graph-building time, so the tensor shape contains unknown/None/-1 -sizes. - -Many standard NN layers, such as convolutions, are designed to cope naturally -with variable-sized images in TF and produce a variable sized image as the -output. For other layers, such as 'Fully connected' variable size is -fundamentally difficult, if not impossible to deal with, since by definition, -*all* its inputs are connected via a weight to an output. The number of inputs -therefore must be fixed. - -It is possible to handle variable sized images by using sparse tensors. Some -implementations make a single variable dimension a list instead of part of the -tensor. Both these solutions suffer from completely segregating the world of -variable size from the world of fixed size, making models and their descriptions -completely non-interchangeable. - -In VGSL, we use a standard 4-d Tensor, `[batch, height, width, depth]` and -either use a batch size of 1 or put up with padding of the input images to the -largest size of any element of the batch. The other price paid for this -standardization is that the user must supply a pair of tensors of shape [batch] -specifying the width and height of each input in a batch. This allows the LSTMs -in the graph to know how many iterations to execute and how to correctly -back-propagate the gradients. - -The standard TF implementation of CTC also requires a tensor giving the sequence -lengths of its inputs. If the output of VGSL is going into CTC, the lengths can -be obtained using: - -```python -import vgslspecs -... -spec = '[Ct5,5,16 Mp3,3 Lfys64 Lfx128 Lrx128 Lfx256]' -vgsl = vgslspecs.VGSLSpecs(widths, heights, is_training=True) -last_layer = vgsl.Build(images, spec) -seq_lengths = vgsl.GetLengths() -``` - -The above will provide the widths that were given in the constructor, scaled -down by the max-pool operator. The heights may be obtained using -`vgsl.GetLengths(1)`, specifying the index of the y-dimension. - -NOTE that currently the only way of collapsing a dimension of unknown size to -known size (1) is through the use of a summarizing LSTM. A single summarizing -LSTM will collapse one dimension (x or y), leaving a 1-d sequence. The 1-d -sequence can then be collapsed in the other dimension to make a 0-d categorical -(softmax) or embedding (logistic) output. - -Using the (parallel) op it is entirely possible to run multiple [series] of ops -that collapse x first in one and y first in the other, reducing both eventually -to a single categorical value! For eample, the following description may do -something useful with ImageNet-like problems: - -```python -[Cr5,5,16 Mp2,2 Cr5,5,64 Mp3,3 ([Lfxs64 Lfys256] [Lfys64 Lfxs256]) Fr512 Fr512] -``` diff --git a/research/street/python/decoder.py b/research/street/python/decoder.py deleted file mode 100644 index 715146194..000000000 --- a/research/street/python/decoder.py +++ /dev/null @@ -1,244 +0,0 @@ -# Copyright 2016 The TensorFlow Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -"""Basic CTC+recoder decoder. - -Decodes a sequence of class-ids into UTF-8 text. -For basic information on CTC See: -Alex Graves et al. Connectionist Temporal Classification: Labelling Unsegmented -Sequence Data with Recurrent Neural Networks. -http://www.cs.toronto.edu/~graves/icml_2006.pdf -""" -import collections -import re - -import errorcounter as ec -from six.moves import xrange -import tensorflow as tf - -# Named tuple Part describes a part of a multi (1 or more) part code that -# represents a utf-8 string. For example, Chinese character 'x' might be -# represented by 3 codes of which (utf8='x', index=1, num_codes3) would be the -# middle part. (The actual code is not stored in the tuple). -Part = collections.namedtuple('Part', 'utf8 index, num_codes') - - -# Class that decodes a sequence of class-ids into UTF-8 text. -class Decoder(object): - """Basic CTC+recoder decoder.""" - - def __init__(self, filename): - r"""Constructs a Decoder. - - Reads the text file describing the encoding and build the encoder. - The text file contains lines of the form: - [,]*\t - Each line defines a mapping from a sequence of one or more integer codes to - a corresponding utf-8 string. - Args: - filename: Name of file defining the decoding sequences. - """ - # self.decoder is a list of lists of Part(utf8, index, num_codes). - # The index to the top-level list is a code. The list given by the code - # index is a list of the parts represented by that code, Eg if the code 42 - # represents the 2nd (index 1) out of 3 part of Chinese character 'x', then - # self.decoder[42] = [..., (utf8='x', index=1, num_codes3), ...] where ... - # means all other uses of the code 42. - self.decoder = [] - if filename: - self._InitializeDecoder(filename) - - def SoftmaxEval(self, sess, model, num_steps): - """Evaluate a model in softmax mode. - - Adds char, word recall and sequence error rate events to the sw summary - writer, and returns them as well - TODO(rays) Add LogisticEval. - Args: - sess: A tensor flow Session. - model: The model to run in the session. Requires a VGSLImageModel or any - other class that has a using_ctc attribute and a RunAStep(sess) method - that reurns a softmax result with corresponding labels. - num_steps: Number of steps to evaluate for. - Returns: - ErrorRates named tuple. - Raises: - ValueError: If an unsupported number of dimensions is used. - """ - coord = tf.train.Coordinator() - threads = tf.train.start_queue_runners(sess=sess, coord=coord) - # Run the requested number of evaluation steps, gathering the outputs of the - # softmax and the true labels of the evaluation examples. - total_label_counts = ec.ErrorCounts(0, 0, 0, 0) - total_word_counts = ec.ErrorCounts(0, 0, 0, 0) - sequence_errors = 0 - for _ in xrange(num_steps): - softmax_result, labels = model.RunAStep(sess) - # Collapse softmax to same shape as labels. - predictions = softmax_result.argmax(axis=-1) - # Exclude batch from num_dims. - num_dims = len(predictions.shape) - 1 - batch_size = predictions.shape[0] - null_label = softmax_result.shape[-1] - 1 - for b in xrange(batch_size): - if num_dims == 2: - # TODO(rays) Support 2-d data. - raise ValueError('2-d label data not supported yet!') - else: - if num_dims == 1: - pred_batch = predictions[b, :] - labels_batch = labels[b, :] - else: - pred_batch = [predictions[b]] - labels_batch = [labels[b]] - text = self.StringFromCTC(pred_batch, model.using_ctc, null_label) - truth = self.StringFromCTC(labels_batch, False, null_label) - # Note that recall_errs is false negatives (fn) aka drops/deletions. - # Actual recall would be 1-fn/truth_words. - # Likewise precision_errs is false positives (fp) aka adds/insertions. - # Actual precision would be 1-fp/ocr_words. - total_word_counts = ec.AddErrors(total_word_counts, - ec.CountWordErrors(text, truth)) - total_label_counts = ec.AddErrors(total_label_counts, - ec.CountErrors(text, truth)) - if text != truth: - sequence_errors += 1 - - coord.request_stop() - coord.join(threads) - return ec.ComputeErrorRates(total_label_counts, total_word_counts, - sequence_errors, num_steps * batch_size) - - def StringFromCTC(self, ctc_labels, merge_dups, null_label): - """Decodes CTC output to a string. - - Extracts only sequences of codes that are allowed by self.decoder. - Labels that make illegal code sequences are dropped. - Note that, by its nature of taking only top choices, this is much weaker - than a full-blown beam search that considers all the softmax outputs. - For languages without many multi-code sequences, this doesn't make much - difference, but for complex scripts the accuracy will be much lower. - Args: - ctc_labels: List of class labels including null characters to remove. - merge_dups: If True, Duplicate labels will be merged - null_label: Label value to ignore. - - Returns: - Labels decoded to a string. - """ - # Run regular ctc on the labels, extracting a list of codes. - codes = self._CodesFromCTC(ctc_labels, merge_dups, null_label) - length = len(codes) - if length == 0: - return '' - # strings and partials are both indexed by the same index as codes. - # strings[i] is the best completed string upto position i, and - # partials[i] is a list of partial code sequences at position i. - # Warning: memory is squared-order in length. - strings = [] - partials = [] - for pos in xrange(length): - code = codes[pos] - parts = self.decoder[code] - partials.append([]) - strings.append('') - # Iterate over the parts that this code can represent. - for utf8, index, num_codes in parts: - if index > pos: - continue - # We can use code if it is an initial code (index==0) or continues a - # sequence in the partials list at the previous position. - if index == 0 or partials[pos - 1].count( - Part(utf8, index - 1, num_codes)) > 0: - if index < num_codes - 1: - # Save the partial sequence. - partials[-1].append(Part(utf8, index, num_codes)) - elif not strings[-1]: - # A code sequence is completed. Append to the best string that we - # had where it started. - if pos >= num_codes: - strings[-1] = strings[pos - num_codes] + utf8 - else: - strings[-1] = utf8 - if not strings[-1] and pos > 0: - # We didn't get anything here so copy the previous best string, skipping - # the current code, but it may just be a partial anyway. - strings[-1] = strings[-2] - return strings[-1] - - def _InitializeDecoder(self, filename): - """Reads the decoder file and initializes self.decoder from it. - - Args: - filename: Name of text file mapping codes to utf8 strings. - Raises: - ValueError: if the input file is not parsed correctly. - """ - line_re = re.compile(r'(?P\d+(,\d+)*)\t(?P.+)') - with tf.gfile.GFile(filename) as f: - for line in f: - m = line_re.match(line) - if m is None: - raise ValueError('Unmatched line:', line) - # codes is the sequence that maps to the string. - str_codes = m.groupdict()['codes'].split(',') - codes = [] - for code in str_codes: - codes.append(int(code)) - utf8 = m.groupdict()['utf8'] - num_codes = len(codes) - for index, code in enumerate(codes): - while code >= len(self.decoder): - self.decoder.append([]) - self.decoder[code].append(Part(utf8, index, num_codes)) - - def _CodesFromCTC(self, ctc_labels, merge_dups, null_label): - """Collapses CTC output to regular output. - - Args: - ctc_labels: List of class labels including null characters to remove. - merge_dups: If True, Duplicate labels will be merged. - null_label: Label value to ignore. - - All trailing zeros are removed!! - TODO(rays) This may become a problem with non-CTC models. - If using charset, this should not be a problem as zero is always space. - tf.pad can only append zero, so we have to be able to drop them, as a - non-ctc will have learned to output trailing zeros instead of trailing - nulls. This is awkward, as the stock ctc loss function requires that the - null character be num_classes-1. - Returns: - (List of) Labels with null characters removed. - """ - out_labels = [] - prev_label = -1 - zeros_needed = 0 - for label in ctc_labels: - if label == null_label: - prev_label = -1 - elif label != prev_label or not merge_dups: - if label == 0: - # Count zeros and only emit them when it is clear there is a non-zero - # after, so as to truncate away all trailing zeros. - zeros_needed += 1 - else: - if merge_dups and zeros_needed > 0: - out_labels.append(0) - else: - out_labels += [0] * zeros_needed - zeros_needed = 0 - out_labels.append(label) - prev_label = label - return out_labels diff --git a/research/street/python/decoder_test.py b/research/street/python/decoder_test.py deleted file mode 100644 index dc61f8b2a..000000000 --- a/research/street/python/decoder_test.py +++ /dev/null @@ -1,57 +0,0 @@ -# Copyright 2016 The TensorFlow Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""Tests for decoder.""" -import os - -import tensorflow as tf -import decoder - - -def _testdata(filename): - return os.path.join('../testdata/', filename) - - -class DecoderTest(tf.test.TestCase): - - def testCodesFromCTC(self): - """Tests that the simple CTC decoder drops nulls and duplicates. - """ - ctc_labels = [9, 9, 9, 1, 9, 2, 2, 3, 9, 9, 0, 0, 1, 9, 1, 9, 9, 9] - decode = decoder.Decoder(filename=None) - non_null_labels = decode._CodesFromCTC( - ctc_labels, merge_dups=False, null_label=9) - self.assertEqual(non_null_labels, [1, 2, 2, 3, 0, 0, 1, 1]) - idempotent_labels = decode._CodesFromCTC( - non_null_labels, merge_dups=False, null_label=9) - self.assertEqual(idempotent_labels, non_null_labels) - collapsed_labels = decode._CodesFromCTC( - ctc_labels, merge_dups=True, null_label=9) - self.assertEqual(collapsed_labels, [1, 2, 3, 0, 1, 1]) - non_idempotent_labels = decode._CodesFromCTC( - collapsed_labels, merge_dups=True, null_label=9) - self.assertEqual(non_idempotent_labels, [1, 2, 3, 0, 1]) - - def testStringFromCTC(self): - """Tests that the decoder can decode sequences including multi-codes. - """ - # - f - a r - m(1/2)m -junk sp b a r - n - - ctc_labels = [9, 6, 9, 1, 3, 9, 4, 9, 5, 5, 9, 5, 0, 2, 1, 3, 9, 4, 9] - decode = decoder.Decoder(filename=_testdata('charset_size_10.txt')) - text = decode.StringFromCTC(ctc_labels, merge_dups=True, null_label=9) - self.assertEqual(text, 'farm barn') - - -if __name__ == '__main__': - tf.test.main() diff --git a/research/street/python/errorcounter.py b/research/street/python/errorcounter.py deleted file mode 100644 index affbf9695..000000000 --- a/research/street/python/errorcounter.py +++ /dev/null @@ -1,123 +0,0 @@ -# Copyright 2016 The TensorFlow Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -"""Some simple tools for error counting. - -""" -import collections - -# Named tuple Error counts describes the counts needed to accumulate errors -# over multiple trials: -# false negatives (aka drops or deletions), -# false positives: (aka adds or insertions), -# truth_count: number of elements in ground truth = denominator for fn, -# test_count: number of elements in test string = denominator for fp, -# Note that recall = 1 - fn/truth_count, precision = 1 - fp/test_count, -# accuracy = 1 - (fn + fp) / (truth_count + test_count). -ErrorCounts = collections.namedtuple('ErrorCounts', ['fn', 'fp', 'truth_count', - 'test_count']) - -# Named tuple for error rates, as a percentage. Accuracies are just 100-error. -ErrorRates = collections.namedtuple('ErrorRates', - ['label_error', 'word_recall_error', - 'word_precision_error', 'sequence_error']) - - -def CountWordErrors(ocr_text, truth_text): - """Counts the word drop and add errors as a bag of words. - - Args: - ocr_text: OCR text string. - truth_text: Truth text string. - - Returns: - ErrorCounts named tuple. - """ - # Convert to lists of words. - return CountErrors(ocr_text.split(), truth_text.split()) - - -def CountErrors(ocr_text, truth_text): - """Counts the drops and adds between 2 bags of iterables. - - Simple bag of objects count returns the number of dropped and added - elements, regardless of order, from anything that is iterable, eg - a pair of strings gives character errors, and a pair of word lists give - word errors. - Args: - ocr_text: OCR text iterable (eg string for chars, word list for words). - truth_text: Truth text iterable. - - Returns: - ErrorCounts named tuple. - """ - counts = collections.Counter(truth_text) - counts.subtract(ocr_text) - drops = sum(c for c in counts.values() if c > 0) - adds = sum(-c for c in counts.values() if c < 0) - return ErrorCounts(drops, adds, len(truth_text), len(ocr_text)) - - -def AddErrors(counts1, counts2): - """Adds the counts and returns a new sum tuple. - - Args: - counts1: ErrorCounts named tuples to sum. - counts2: ErrorCounts named tuples to sum. - Returns: - Sum of counts1, counts2. - """ - return ErrorCounts(counts1.fn + counts2.fn, counts1.fp + counts2.fp, - counts1.truth_count + counts2.truth_count, - counts1.test_count + counts2.test_count) - - -def ComputeErrorRates(label_counts, word_counts, seq_errors, num_seqs): - """Returns an ErrorRates corresponding to the given counts. - - Args: - label_counts: ErrorCounts for the character labels - word_counts: ErrorCounts for the words - seq_errors: Number of sequence errors - num_seqs: Total sequences - Returns: - ErrorRates corresponding to the given counts. - """ - label_errors = label_counts.fn + label_counts.fp - num_labels = label_counts.truth_count + label_counts.test_count - return ErrorRates( - ComputeErrorRate(label_errors, num_labels), - ComputeErrorRate(word_counts.fn, word_counts.truth_count), - ComputeErrorRate(word_counts.fp, word_counts.test_count), - ComputeErrorRate(seq_errors, num_seqs)) - - -def ComputeErrorRate(error_count, truth_count): - """Returns a sanitized percent error rate from the raw counts. - - Prevents div by 0 and clips return to 100%. - Args: - error_count: Number of errors. - truth_count: Number to divide by. - - Returns: - 100.0 * error_count / truth_count clipped to 100. - """ - if truth_count == 0: - truth_count = 1 - error_count = 1 - elif error_count > truth_count: - error_count = truth_count - return error_count * 100.0 / truth_count diff --git a/research/street/python/errorcounter_test.py b/research/street/python/errorcounter_test.py deleted file mode 100644 index aeaa36092..000000000 --- a/research/street/python/errorcounter_test.py +++ /dev/null @@ -1,124 +0,0 @@ -# Copyright 2016 The TensorFlow Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""Tests for errorcounter.""" -import tensorflow as tf -import errorcounter as ec - - -class ErrorcounterTest(tf.test.TestCase): - - def testComputeErrorRate(self): - """Tests that the percent calculation works as expected. - """ - rate = ec.ComputeErrorRate(error_count=0, truth_count=0) - self.assertEqual(rate, 100.0) - rate = ec.ComputeErrorRate(error_count=1, truth_count=0) - self.assertEqual(rate, 100.0) - rate = ec.ComputeErrorRate(error_count=10, truth_count=1) - self.assertEqual(rate, 100.0) - rate = ec.ComputeErrorRate(error_count=0, truth_count=1) - self.assertEqual(rate, 0.0) - rate = ec.ComputeErrorRate(error_count=3, truth_count=12) - self.assertEqual(rate, 25.0) - - def testCountErrors(self): - """Tests that the error counter works as expected. - """ - truth_str = 'farm barn' - counts = ec.CountErrors(ocr_text=truth_str, truth_text=truth_str) - self.assertEqual( - counts, ec.ErrorCounts( - fn=0, fp=0, truth_count=9, test_count=9)) - # With a period on the end, we get a char error. - dot_str = 'farm barn.' - counts = ec.CountErrors(ocr_text=dot_str, truth_text=truth_str) - self.assertEqual( - counts, ec.ErrorCounts( - fn=0, fp=1, truth_count=9, test_count=10)) - counts = ec.CountErrors(ocr_text=truth_str, truth_text=dot_str) - self.assertEqual( - counts, ec.ErrorCounts( - fn=1, fp=0, truth_count=10, test_count=9)) - # Space is just another char. - no_space = 'farmbarn' - counts = ec.CountErrors(ocr_text=no_space, truth_text=truth_str) - self.assertEqual( - counts, ec.ErrorCounts( - fn=1, fp=0, truth_count=9, test_count=8)) - counts = ec.CountErrors(ocr_text=truth_str, truth_text=no_space) - self.assertEqual( - counts, ec.ErrorCounts( - fn=0, fp=1, truth_count=8, test_count=9)) - # Lose them all. - counts = ec.CountErrors(ocr_text='', truth_text=truth_str) - self.assertEqual( - counts, ec.ErrorCounts( - fn=9, fp=0, truth_count=9, test_count=0)) - counts = ec.CountErrors(ocr_text=truth_str, truth_text='') - self.assertEqual( - counts, ec.ErrorCounts( - fn=0, fp=9, truth_count=0, test_count=9)) - - def testCountWordErrors(self): - """Tests that the error counter works as expected. - """ - truth_str = 'farm barn' - counts = ec.CountWordErrors(ocr_text=truth_str, truth_text=truth_str) - self.assertEqual( - counts, ec.ErrorCounts( - fn=0, fp=0, truth_count=2, test_count=2)) - # With a period on the end, we get a word error. - dot_str = 'farm barn.' - counts = ec.CountWordErrors(ocr_text=dot_str, truth_text=truth_str) - self.assertEqual( - counts, ec.ErrorCounts( - fn=1, fp=1, truth_count=2, test_count=2)) - counts = ec.CountWordErrors(ocr_text=truth_str, truth_text=dot_str) - self.assertEqual( - counts, ec.ErrorCounts( - fn=1, fp=1, truth_count=2, test_count=2)) - # Space is special. - no_space = 'farmbarn' - counts = ec.CountWordErrors(ocr_text=no_space, truth_text=truth_str) - self.assertEqual( - counts, ec.ErrorCounts( - fn=2, fp=1, truth_count=2, test_count=1)) - counts = ec.CountWordErrors(ocr_text=truth_str, truth_text=no_space) - self.assertEqual( - counts, ec.ErrorCounts( - fn=1, fp=2, truth_count=1, test_count=2)) - # Lose them all. - counts = ec.CountWordErrors(ocr_text='', truth_text=truth_str) - self.assertEqual( - counts, ec.ErrorCounts( - fn=2, fp=0, truth_count=2, test_count=0)) - counts = ec.CountWordErrors(ocr_text=truth_str, truth_text='') - self.assertEqual( - counts, ec.ErrorCounts( - fn=0, fp=2, truth_count=0, test_count=2)) - # With a space in ba rn, there is an extra add. - sp_str = 'farm ba rn' - counts = ec.CountWordErrors(ocr_text=sp_str, truth_text=truth_str) - self.assertEqual( - counts, ec.ErrorCounts( - fn=1, fp=2, truth_count=2, test_count=3)) - counts = ec.CountWordErrors(ocr_text=truth_str, truth_text=sp_str) - self.assertEqual( - counts, ec.ErrorCounts( - fn=2, fp=1, truth_count=3, test_count=2)) - - -if __name__ == '__main__': - tf.test.main() diff --git a/research/street/python/fsns_urls.py b/research/street/python/fsns_urls.py deleted file mode 100644 index bea547b9d..000000000 --- a/research/street/python/fsns_urls.py +++ /dev/null @@ -1,49 +0,0 @@ -# Copyright 2016 The TensorFlow Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -"""Creates a text file with URLs to download FSNS dataset using aria2c. - -The FSNS dataset has 640 files and takes 158Gb of the disk space. So it is -highly recommended to use some kind of a download manager to download it. - -Aria2c is a powerful download manager which can download multiple files in -parallel, re-try if encounter an error and continue previously unfinished -downloads. -""" - -import os - -_FSNS_BASE_URL = 'http://download.tensorflow.org/data/fsns-20160927/' -_SHARDS = {'test': 64, 'train': 512, 'validation':64} -_OUTPUT_FILE = "fsns_urls.txt" -_OUTPUT_DIR = "data/fsns" - -def fsns_paths(): - paths = ['charset_size=134.txt'] - for name, shards in _SHARDS.items(): - for i in range(shards): - paths.append('%s/%s-%05d-of-%05d' % (name, name, i, shards)) - return paths - - -if __name__ == "__main__": - with open(_OUTPUT_FILE, "w") as f: - for path in fsns_paths(): - url = _FSNS_BASE_URL + path - dst_path = os.path.join(_OUTPUT_DIR, path) - f.write("%s\n out=%s\n" % (url, dst_path)) - print("To download FSNS dataset execute:") - print("aria2c -c -j 20 -i %s" % _OUTPUT_FILE) - print("The downloaded FSNS dataset will be stored under %s" % _OUTPUT_DIR) diff --git a/research/street/python/fsns_urls.txt b/research/street/python/fsns_urls.txt deleted file mode 100644 index 959ffbd5d..000000000 --- a/research/street/python/fsns_urls.txt +++ /dev/null @@ -1,1282 +0,0 @@ -http://download.tensorflow.org/data/fsns-20160927/charset_size=134.txt - out=data/fsns/charset_size=134.txt -http://download.tensorflow.org/data/fsns-20160927/test/test-00000-of-00064 - out=data/fsns/test/test-00000-of-00064 -http://download.tensorflow.org/data/fsns-20160927/test/test-00001-of-00064 - out=data/fsns/test/test-00001-of-00064 -http://download.tensorflow.org/data/fsns-20160927/test/test-00002-of-00064 - out=data/fsns/test/test-00002-of-00064 -http://download.tensorflow.org/data/fsns-20160927/test/test-00003-of-00064 - out=data/fsns/test/test-00003-of-00064 -http://download.tensorflow.org/data/fsns-20160927/test/test-00004-of-00064 - out=data/fsns/test/test-00004-of-00064 -http://download.tensorflow.org/data/fsns-20160927/test/test-00005-of-00064 - out=data/fsns/test/test-00005-of-00064 -http://download.tensorflow.org/data/fsns-20160927/test/test-00006-of-00064 - out=data/fsns/test/test-00006-of-00064 -http://download.tensorflow.org/data/fsns-20160927/test/test-00007-of-00064 - out=data/fsns/test/test-00007-of-00064 -http://download.tensorflow.org/data/fsns-20160927/test/test-00008-of-00064 - out=data/fsns/test/test-00008-of-00064 -http://download.tensorflow.org/data/fsns-20160927/test/test-00009-of-00064 - out=data/fsns/test/test-00009-of-00064 -http://download.tensorflow.org/data/fsns-20160927/test/test-00010-of-00064 - out=data/fsns/test/test-00010-of-00064 -http://download.tensorflow.org/data/fsns-20160927/test/test-00011-of-00064 - out=data/fsns/test/test-00011-of-00064 -http://download.tensorflow.org/data/fsns-20160927/test/test-00012-of-00064 - out=data/fsns/test/test-00012-of-00064 -http://download.tensorflow.org/data/fsns-20160927/test/test-00013-of-00064 - out=data/fsns/test/test-00013-of-00064 -http://download.tensorflow.org/data/fsns-20160927/test/test-00014-of-00064 - out=data/fsns/test/test-00014-of-00064 -http://download.tensorflow.org/data/fsns-20160927/test/test-00015-of-00064 - out=data/fsns/test/test-00015-of-00064 -http://download.tensorflow.org/data/fsns-20160927/test/test-00016-of-00064 - out=data/fsns/test/test-00016-of-00064 -http://download.tensorflow.org/data/fsns-20160927/test/test-00017-of-00064 - out=data/fsns/test/test-00017-of-00064 -http://download.tensorflow.org/data/fsns-20160927/test/test-00018-of-00064 - out=data/fsns/test/test-00018-of-00064 -http://download.tensorflow.org/data/fsns-20160927/test/test-00019-of-00064 - out=data/fsns/test/test-00019-of-00064 -http://download.tensorflow.org/data/fsns-20160927/test/test-00020-of-00064 - out=data/fsns/test/test-00020-of-00064 -http://download.tensorflow.org/data/fsns-20160927/test/test-00021-of-00064 - out=data/fsns/test/test-00021-of-00064 -http://download.tensorflow.org/data/fsns-20160927/test/test-00022-of-00064 - out=data/fsns/test/test-00022-of-00064 -http://download.tensorflow.org/data/fsns-20160927/test/test-00023-of-00064 - out=data/fsns/test/test-00023-of-00064 -http://download.tensorflow.org/data/fsns-20160927/test/test-00024-of-00064 - out=data/fsns/test/test-00024-of-00064 -http://download.tensorflow.org/data/fsns-20160927/test/test-00025-of-00064 - out=data/fsns/test/test-00025-of-00064 -http://download.tensorflow.org/data/fsns-20160927/test/test-00026-of-00064 - out=data/fsns/test/test-00026-of-00064 -http://download.tensorflow.org/data/fsns-20160927/test/test-00027-of-00064 - out=data/fsns/test/test-00027-of-00064 -http://download.tensorflow.org/data/fsns-20160927/test/test-00028-of-00064 - out=data/fsns/test/test-00028-of-00064 -http://download.tensorflow.org/data/fsns-20160927/test/test-00029-of-00064 - out=data/fsns/test/test-00029-of-00064 -http://download.tensorflow.org/data/fsns-20160927/test/test-00030-of-00064 - out=data/fsns/test/test-00030-of-00064 -http://download.tensorflow.org/data/fsns-20160927/test/test-00031-of-00064 - out=data/fsns/test/test-00031-of-00064 -http://download.tensorflow.org/data/fsns-20160927/test/test-00032-of-00064 - out=data/fsns/test/test-00032-of-00064 -http://download.tensorflow.org/data/fsns-20160927/test/test-00033-of-00064 - out=data/fsns/test/test-00033-of-00064 -http://download.tensorflow.org/data/fsns-20160927/test/test-00034-of-00064 - out=data/fsns/test/test-00034-of-00064 -http://download.tensorflow.org/data/fsns-20160927/test/test-00035-of-00064 - out=data/fsns/test/test-00035-of-00064 -http://download.tensorflow.org/data/fsns-20160927/test/test-00036-of-00064 - out=data/fsns/test/test-00036-of-00064 -http://download.tensorflow.org/data/fsns-20160927/test/test-00037-of-00064 - out=data/fsns/test/test-00037-of-00064 -http://download.tensorflow.org/data/fsns-20160927/test/test-00038-of-00064 - out=data/fsns/test/test-00038-of-00064 -http://download.tensorflow.org/data/fsns-20160927/test/test-00039-of-00064 - out=data/fsns/test/test-00039-of-00064 -http://download.tensorflow.org/data/fsns-20160927/test/test-00040-of-00064 - out=data/fsns/test/test-00040-of-00064 -http://download.tensorflow.org/data/fsns-20160927/test/test-00041-of-00064 - out=data/fsns/test/test-00041-of-00064 -http://download.tensorflow.org/data/fsns-20160927/test/test-00042-of-00064 - out=data/fsns/test/test-00042-of-00064 -http://download.tensorflow.org/data/fsns-20160927/test/test-00043-of-00064 - out=data/fsns/test/test-00043-of-00064 -http://download.tensorflow.org/data/fsns-20160927/test/test-00044-of-00064 - out=data/fsns/test/test-00044-of-00064 -http://download.tensorflow.org/data/fsns-20160927/test/test-00045-of-00064 - out=data/fsns/test/test-00045-of-00064 -http://download.tensorflow.org/data/fsns-20160927/test/test-00046-of-00064 - out=data/fsns/test/test-00046-of-00064 -http://download.tensorflow.org/data/fsns-20160927/test/test-00047-of-00064 - out=data/fsns/test/test-00047-of-00064 -http://download.tensorflow.org/data/fsns-20160927/test/test-00048-of-00064 - out=data/fsns/test/test-00048-of-00064 -http://download.tensorflow.org/data/fsns-20160927/test/test-00049-of-00064 - out=data/fsns/test/test-00049-of-00064 -http://download.tensorflow.org/data/fsns-20160927/test/test-00050-of-00064 - out=data/fsns/test/test-00050-of-00064 -http://download.tensorflow.org/data/fsns-20160927/test/test-00051-of-00064 - out=data/fsns/test/test-00051-of-00064 -http://download.tensorflow.org/data/fsns-20160927/test/test-00052-of-00064 - out=data/fsns/test/test-00052-of-00064 -http://download.tensorflow.org/data/fsns-20160927/test/test-00053-of-00064 - out=data/fsns/test/test-00053-of-00064 -http://download.tensorflow.org/data/fsns-20160927/test/test-00054-of-00064 - out=data/fsns/test/test-00054-of-00064 -http://download.tensorflow.org/data/fsns-20160927/test/test-00055-of-00064 - out=data/fsns/test/test-00055-of-00064 -http://download.tensorflow.org/data/fsns-20160927/test/test-00056-of-00064 - out=data/fsns/test/test-00056-of-00064 -http://download.tensorflow.org/data/fsns-20160927/test/test-00057-of-00064 - out=data/fsns/test/test-00057-of-00064 -http://download.tensorflow.org/data/fsns-20160927/test/test-00058-of-00064 - out=data/fsns/test/test-00058-of-00064 -http://download.tensorflow.org/data/fsns-20160927/test/test-00059-of-00064 - out=data/fsns/test/test-00059-of-00064 -http://download.tensorflow.org/data/fsns-20160927/test/test-00060-of-00064 - out=data/fsns/test/test-00060-of-00064 -http://download.tensorflow.org/data/fsns-20160927/test/test-00061-of-00064 - out=data/fsns/test/test-00061-of-00064 -http://download.tensorflow.org/data/fsns-20160927/test/test-00062-of-00064 - out=data/fsns/test/test-00062-of-00064 -http://download.tensorflow.org/data/fsns-20160927/test/test-00063-of-00064 - out=data/fsns/test/test-00063-of-00064 -http://download.tensorflow.org/data/fsns-20160927/train/train-00000-of-00512 - out=data/fsns/train/train-00000-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00001-of-00512 - out=data/fsns/train/train-00001-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00002-of-00512 - out=data/fsns/train/train-00002-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00003-of-00512 - out=data/fsns/train/train-00003-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00004-of-00512 - out=data/fsns/train/train-00004-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00005-of-00512 - out=data/fsns/train/train-00005-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00006-of-00512 - out=data/fsns/train/train-00006-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00007-of-00512 - out=data/fsns/train/train-00007-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00008-of-00512 - out=data/fsns/train/train-00008-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00009-of-00512 - out=data/fsns/train/train-00009-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00010-of-00512 - out=data/fsns/train/train-00010-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00011-of-00512 - out=data/fsns/train/train-00011-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00012-of-00512 - out=data/fsns/train/train-00012-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00013-of-00512 - out=data/fsns/train/train-00013-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00014-of-00512 - out=data/fsns/train/train-00014-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00015-of-00512 - out=data/fsns/train/train-00015-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00016-of-00512 - out=data/fsns/train/train-00016-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00017-of-00512 - out=data/fsns/train/train-00017-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00018-of-00512 - out=data/fsns/train/train-00018-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00019-of-00512 - out=data/fsns/train/train-00019-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00020-of-00512 - out=data/fsns/train/train-00020-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00021-of-00512 - out=data/fsns/train/train-00021-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00022-of-00512 - out=data/fsns/train/train-00022-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00023-of-00512 - out=data/fsns/train/train-00023-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00024-of-00512 - out=data/fsns/train/train-00024-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00025-of-00512 - out=data/fsns/train/train-00025-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00026-of-00512 - out=data/fsns/train/train-00026-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00027-of-00512 - out=data/fsns/train/train-00027-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00028-of-00512 - out=data/fsns/train/train-00028-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00029-of-00512 - out=data/fsns/train/train-00029-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00030-of-00512 - out=data/fsns/train/train-00030-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00031-of-00512 - out=data/fsns/train/train-00031-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00032-of-00512 - out=data/fsns/train/train-00032-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00033-of-00512 - out=data/fsns/train/train-00033-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00034-of-00512 - out=data/fsns/train/train-00034-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00035-of-00512 - out=data/fsns/train/train-00035-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00036-of-00512 - out=data/fsns/train/train-00036-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00037-of-00512 - out=data/fsns/train/train-00037-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00038-of-00512 - out=data/fsns/train/train-00038-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00039-of-00512 - out=data/fsns/train/train-00039-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00040-of-00512 - out=data/fsns/train/train-00040-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00041-of-00512 - out=data/fsns/train/train-00041-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00042-of-00512 - out=data/fsns/train/train-00042-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00043-of-00512 - out=data/fsns/train/train-00043-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00044-of-00512 - out=data/fsns/train/train-00044-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00045-of-00512 - out=data/fsns/train/train-00045-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00046-of-00512 - out=data/fsns/train/train-00046-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00047-of-00512 - out=data/fsns/train/train-00047-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00048-of-00512 - out=data/fsns/train/train-00048-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00049-of-00512 - out=data/fsns/train/train-00049-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00050-of-00512 - out=data/fsns/train/train-00050-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00051-of-00512 - out=data/fsns/train/train-00051-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00052-of-00512 - out=data/fsns/train/train-00052-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00053-of-00512 - out=data/fsns/train/train-00053-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00054-of-00512 - out=data/fsns/train/train-00054-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00055-of-00512 - out=data/fsns/train/train-00055-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00056-of-00512 - out=data/fsns/train/train-00056-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00057-of-00512 - out=data/fsns/train/train-00057-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00058-of-00512 - out=data/fsns/train/train-00058-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00059-of-00512 - out=data/fsns/train/train-00059-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00060-of-00512 - out=data/fsns/train/train-00060-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00061-of-00512 - out=data/fsns/train/train-00061-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00062-of-00512 - out=data/fsns/train/train-00062-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00063-of-00512 - out=data/fsns/train/train-00063-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00064-of-00512 - out=data/fsns/train/train-00064-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00065-of-00512 - out=data/fsns/train/train-00065-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00066-of-00512 - out=data/fsns/train/train-00066-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00067-of-00512 - out=data/fsns/train/train-00067-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00068-of-00512 - out=data/fsns/train/train-00068-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00069-of-00512 - out=data/fsns/train/train-00069-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00070-of-00512 - out=data/fsns/train/train-00070-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00071-of-00512 - out=data/fsns/train/train-00071-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00072-of-00512 - out=data/fsns/train/train-00072-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00073-of-00512 - out=data/fsns/train/train-00073-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00074-of-00512 - out=data/fsns/train/train-00074-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00075-of-00512 - out=data/fsns/train/train-00075-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00076-of-00512 - out=data/fsns/train/train-00076-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00077-of-00512 - out=data/fsns/train/train-00077-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00078-of-00512 - out=data/fsns/train/train-00078-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00079-of-00512 - out=data/fsns/train/train-00079-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00080-of-00512 - out=data/fsns/train/train-00080-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00081-of-00512 - out=data/fsns/train/train-00081-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00082-of-00512 - out=data/fsns/train/train-00082-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00083-of-00512 - out=data/fsns/train/train-00083-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00084-of-00512 - out=data/fsns/train/train-00084-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00085-of-00512 - out=data/fsns/train/train-00085-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00086-of-00512 - out=data/fsns/train/train-00086-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00087-of-00512 - out=data/fsns/train/train-00087-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00088-of-00512 - out=data/fsns/train/train-00088-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00089-of-00512 - out=data/fsns/train/train-00089-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00090-of-00512 - out=data/fsns/train/train-00090-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00091-of-00512 - out=data/fsns/train/train-00091-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00092-of-00512 - out=data/fsns/train/train-00092-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00093-of-00512 - out=data/fsns/train/train-00093-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00094-of-00512 - out=data/fsns/train/train-00094-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00095-of-00512 - out=data/fsns/train/train-00095-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00096-of-00512 - out=data/fsns/train/train-00096-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00097-of-00512 - out=data/fsns/train/train-00097-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00098-of-00512 - out=data/fsns/train/train-00098-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00099-of-00512 - out=data/fsns/train/train-00099-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00100-of-00512 - out=data/fsns/train/train-00100-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00101-of-00512 - out=data/fsns/train/train-00101-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00102-of-00512 - out=data/fsns/train/train-00102-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00103-of-00512 - out=data/fsns/train/train-00103-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00104-of-00512 - out=data/fsns/train/train-00104-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00105-of-00512 - out=data/fsns/train/train-00105-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00106-of-00512 - out=data/fsns/train/train-00106-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00107-of-00512 - out=data/fsns/train/train-00107-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00108-of-00512 - out=data/fsns/train/train-00108-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00109-of-00512 - out=data/fsns/train/train-00109-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00110-of-00512 - out=data/fsns/train/train-00110-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00111-of-00512 - out=data/fsns/train/train-00111-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00112-of-00512 - out=data/fsns/train/train-00112-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00113-of-00512 - out=data/fsns/train/train-00113-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00114-of-00512 - out=data/fsns/train/train-00114-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00115-of-00512 - out=data/fsns/train/train-00115-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00116-of-00512 - out=data/fsns/train/train-00116-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00117-of-00512 - out=data/fsns/train/train-00117-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00118-of-00512 - out=data/fsns/train/train-00118-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00119-of-00512 - out=data/fsns/train/train-00119-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00120-of-00512 - out=data/fsns/train/train-00120-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00121-of-00512 - out=data/fsns/train/train-00121-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00122-of-00512 - out=data/fsns/train/train-00122-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00123-of-00512 - out=data/fsns/train/train-00123-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00124-of-00512 - out=data/fsns/train/train-00124-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00125-of-00512 - out=data/fsns/train/train-00125-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00126-of-00512 - out=data/fsns/train/train-00126-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00127-of-00512 - out=data/fsns/train/train-00127-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00128-of-00512 - out=data/fsns/train/train-00128-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00129-of-00512 - out=data/fsns/train/train-00129-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00130-of-00512 - out=data/fsns/train/train-00130-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00131-of-00512 - out=data/fsns/train/train-00131-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00132-of-00512 - out=data/fsns/train/train-00132-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00133-of-00512 - out=data/fsns/train/train-00133-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00134-of-00512 - out=data/fsns/train/train-00134-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00135-of-00512 - out=data/fsns/train/train-00135-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00136-of-00512 - out=data/fsns/train/train-00136-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00137-of-00512 - out=data/fsns/train/train-00137-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00138-of-00512 - out=data/fsns/train/train-00138-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00139-of-00512 - out=data/fsns/train/train-00139-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00140-of-00512 - out=data/fsns/train/train-00140-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00141-of-00512 - out=data/fsns/train/train-00141-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00142-of-00512 - out=data/fsns/train/train-00142-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00143-of-00512 - out=data/fsns/train/train-00143-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00144-of-00512 - out=data/fsns/train/train-00144-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00145-of-00512 - out=data/fsns/train/train-00145-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00146-of-00512 - out=data/fsns/train/train-00146-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00147-of-00512 - out=data/fsns/train/train-00147-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00148-of-00512 - out=data/fsns/train/train-00148-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00149-of-00512 - out=data/fsns/train/train-00149-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00150-of-00512 - out=data/fsns/train/train-00150-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00151-of-00512 - out=data/fsns/train/train-00151-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00152-of-00512 - out=data/fsns/train/train-00152-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00153-of-00512 - out=data/fsns/train/train-00153-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00154-of-00512 - out=data/fsns/train/train-00154-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00155-of-00512 - out=data/fsns/train/train-00155-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00156-of-00512 - out=data/fsns/train/train-00156-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00157-of-00512 - out=data/fsns/train/train-00157-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00158-of-00512 - out=data/fsns/train/train-00158-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00159-of-00512 - out=data/fsns/train/train-00159-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00160-of-00512 - out=data/fsns/train/train-00160-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00161-of-00512 - out=data/fsns/train/train-00161-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00162-of-00512 - out=data/fsns/train/train-00162-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00163-of-00512 - out=data/fsns/train/train-00163-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00164-of-00512 - out=data/fsns/train/train-00164-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00165-of-00512 - out=data/fsns/train/train-00165-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00166-of-00512 - out=data/fsns/train/train-00166-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00167-of-00512 - out=data/fsns/train/train-00167-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00168-of-00512 - out=data/fsns/train/train-00168-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00169-of-00512 - out=data/fsns/train/train-00169-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00170-of-00512 - out=data/fsns/train/train-00170-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00171-of-00512 - out=data/fsns/train/train-00171-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00172-of-00512 - out=data/fsns/train/train-00172-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00173-of-00512 - out=data/fsns/train/train-00173-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00174-of-00512 - out=data/fsns/train/train-00174-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00175-of-00512 - out=data/fsns/train/train-00175-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00176-of-00512 - out=data/fsns/train/train-00176-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00177-of-00512 - out=data/fsns/train/train-00177-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00178-of-00512 - out=data/fsns/train/train-00178-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00179-of-00512 - out=data/fsns/train/train-00179-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00180-of-00512 - out=data/fsns/train/train-00180-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00181-of-00512 - out=data/fsns/train/train-00181-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00182-of-00512 - out=data/fsns/train/train-00182-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00183-of-00512 - out=data/fsns/train/train-00183-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00184-of-00512 - out=data/fsns/train/train-00184-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00185-of-00512 - out=data/fsns/train/train-00185-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00186-of-00512 - out=data/fsns/train/train-00186-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00187-of-00512 - out=data/fsns/train/train-00187-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00188-of-00512 - out=data/fsns/train/train-00188-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00189-of-00512 - out=data/fsns/train/train-00189-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00190-of-00512 - out=data/fsns/train/train-00190-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00191-of-00512 - out=data/fsns/train/train-00191-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00192-of-00512 - out=data/fsns/train/train-00192-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00193-of-00512 - out=data/fsns/train/train-00193-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00194-of-00512 - out=data/fsns/train/train-00194-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00195-of-00512 - out=data/fsns/train/train-00195-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00196-of-00512 - out=data/fsns/train/train-00196-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00197-of-00512 - out=data/fsns/train/train-00197-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00198-of-00512 - out=data/fsns/train/train-00198-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00199-of-00512 - out=data/fsns/train/train-00199-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00200-of-00512 - out=data/fsns/train/train-00200-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00201-of-00512 - out=data/fsns/train/train-00201-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00202-of-00512 - out=data/fsns/train/train-00202-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00203-of-00512 - out=data/fsns/train/train-00203-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00204-of-00512 - out=data/fsns/train/train-00204-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00205-of-00512 - out=data/fsns/train/train-00205-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00206-of-00512 - out=data/fsns/train/train-00206-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00207-of-00512 - out=data/fsns/train/train-00207-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00208-of-00512 - out=data/fsns/train/train-00208-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00209-of-00512 - out=data/fsns/train/train-00209-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00210-of-00512 - out=data/fsns/train/train-00210-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00211-of-00512 - out=data/fsns/train/train-00211-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00212-of-00512 - out=data/fsns/train/train-00212-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00213-of-00512 - out=data/fsns/train/train-00213-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00214-of-00512 - out=data/fsns/train/train-00214-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00215-of-00512 - out=data/fsns/train/train-00215-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00216-of-00512 - out=data/fsns/train/train-00216-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00217-of-00512 - out=data/fsns/train/train-00217-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00218-of-00512 - out=data/fsns/train/train-00218-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00219-of-00512 - out=data/fsns/train/train-00219-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00220-of-00512 - out=data/fsns/train/train-00220-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00221-of-00512 - out=data/fsns/train/train-00221-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00222-of-00512 - out=data/fsns/train/train-00222-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00223-of-00512 - out=data/fsns/train/train-00223-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00224-of-00512 - out=data/fsns/train/train-00224-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00225-of-00512 - out=data/fsns/train/train-00225-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00226-of-00512 - out=data/fsns/train/train-00226-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00227-of-00512 - out=data/fsns/train/train-00227-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00228-of-00512 - out=data/fsns/train/train-00228-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00229-of-00512 - out=data/fsns/train/train-00229-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00230-of-00512 - out=data/fsns/train/train-00230-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00231-of-00512 - out=data/fsns/train/train-00231-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00232-of-00512 - out=data/fsns/train/train-00232-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00233-of-00512 - out=data/fsns/train/train-00233-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00234-of-00512 - out=data/fsns/train/train-00234-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00235-of-00512 - out=data/fsns/train/train-00235-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00236-of-00512 - out=data/fsns/train/train-00236-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00237-of-00512 - out=data/fsns/train/train-00237-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00238-of-00512 - out=data/fsns/train/train-00238-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00239-of-00512 - out=data/fsns/train/train-00239-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00240-of-00512 - out=data/fsns/train/train-00240-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00241-of-00512 - out=data/fsns/train/train-00241-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00242-of-00512 - out=data/fsns/train/train-00242-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00243-of-00512 - out=data/fsns/train/train-00243-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00244-of-00512 - out=data/fsns/train/train-00244-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00245-of-00512 - out=data/fsns/train/train-00245-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00246-of-00512 - out=data/fsns/train/train-00246-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00247-of-00512 - out=data/fsns/train/train-00247-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00248-of-00512 - out=data/fsns/train/train-00248-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00249-of-00512 - out=data/fsns/train/train-00249-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00250-of-00512 - out=data/fsns/train/train-00250-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00251-of-00512 - out=data/fsns/train/train-00251-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00252-of-00512 - out=data/fsns/train/train-00252-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00253-of-00512 - out=data/fsns/train/train-00253-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00254-of-00512 - out=data/fsns/train/train-00254-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00255-of-00512 - out=data/fsns/train/train-00255-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00256-of-00512 - out=data/fsns/train/train-00256-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00257-of-00512 - out=data/fsns/train/train-00257-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00258-of-00512 - out=data/fsns/train/train-00258-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00259-of-00512 - out=data/fsns/train/train-00259-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00260-of-00512 - out=data/fsns/train/train-00260-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00261-of-00512 - out=data/fsns/train/train-00261-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00262-of-00512 - out=data/fsns/train/train-00262-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00263-of-00512 - out=data/fsns/train/train-00263-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00264-of-00512 - out=data/fsns/train/train-00264-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00265-of-00512 - out=data/fsns/train/train-00265-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00266-of-00512 - out=data/fsns/train/train-00266-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00267-of-00512 - out=data/fsns/train/train-00267-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00268-of-00512 - out=data/fsns/train/train-00268-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00269-of-00512 - out=data/fsns/train/train-00269-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00270-of-00512 - out=data/fsns/train/train-00270-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00271-of-00512 - out=data/fsns/train/train-00271-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00272-of-00512 - out=data/fsns/train/train-00272-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00273-of-00512 - out=data/fsns/train/train-00273-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00274-of-00512 - out=data/fsns/train/train-00274-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00275-of-00512 - out=data/fsns/train/train-00275-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00276-of-00512 - out=data/fsns/train/train-00276-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00277-of-00512 - out=data/fsns/train/train-00277-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00278-of-00512 - out=data/fsns/train/train-00278-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00279-of-00512 - out=data/fsns/train/train-00279-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00280-of-00512 - out=data/fsns/train/train-00280-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00281-of-00512 - out=data/fsns/train/train-00281-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00282-of-00512 - out=data/fsns/train/train-00282-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00283-of-00512 - out=data/fsns/train/train-00283-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00284-of-00512 - out=data/fsns/train/train-00284-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00285-of-00512 - out=data/fsns/train/train-00285-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00286-of-00512 - out=data/fsns/train/train-00286-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00287-of-00512 - out=data/fsns/train/train-00287-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00288-of-00512 - out=data/fsns/train/train-00288-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00289-of-00512 - out=data/fsns/train/train-00289-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00290-of-00512 - out=data/fsns/train/train-00290-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00291-of-00512 - out=data/fsns/train/train-00291-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00292-of-00512 - out=data/fsns/train/train-00292-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00293-of-00512 - out=data/fsns/train/train-00293-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00294-of-00512 - out=data/fsns/train/train-00294-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00295-of-00512 - out=data/fsns/train/train-00295-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00296-of-00512 - out=data/fsns/train/train-00296-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00297-of-00512 - out=data/fsns/train/train-00297-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00298-of-00512 - out=data/fsns/train/train-00298-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00299-of-00512 - out=data/fsns/train/train-00299-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00300-of-00512 - out=data/fsns/train/train-00300-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00301-of-00512 - out=data/fsns/train/train-00301-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00302-of-00512 - out=data/fsns/train/train-00302-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00303-of-00512 - out=data/fsns/train/train-00303-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00304-of-00512 - out=data/fsns/train/train-00304-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00305-of-00512 - out=data/fsns/train/train-00305-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00306-of-00512 - out=data/fsns/train/train-00306-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00307-of-00512 - out=data/fsns/train/train-00307-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00308-of-00512 - out=data/fsns/train/train-00308-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00309-of-00512 - out=data/fsns/train/train-00309-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00310-of-00512 - out=data/fsns/train/train-00310-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00311-of-00512 - out=data/fsns/train/train-00311-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00312-of-00512 - out=data/fsns/train/train-00312-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00313-of-00512 - out=data/fsns/train/train-00313-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00314-of-00512 - out=data/fsns/train/train-00314-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00315-of-00512 - out=data/fsns/train/train-00315-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00316-of-00512 - out=data/fsns/train/train-00316-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00317-of-00512 - out=data/fsns/train/train-00317-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00318-of-00512 - out=data/fsns/train/train-00318-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00319-of-00512 - out=data/fsns/train/train-00319-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00320-of-00512 - out=data/fsns/train/train-00320-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00321-of-00512 - out=data/fsns/train/train-00321-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00322-of-00512 - out=data/fsns/train/train-00322-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00323-of-00512 - out=data/fsns/train/train-00323-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00324-of-00512 - out=data/fsns/train/train-00324-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00325-of-00512 - out=data/fsns/train/train-00325-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00326-of-00512 - out=data/fsns/train/train-00326-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00327-of-00512 - out=data/fsns/train/train-00327-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00328-of-00512 - out=data/fsns/train/train-00328-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00329-of-00512 - out=data/fsns/train/train-00329-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00330-of-00512 - out=data/fsns/train/train-00330-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00331-of-00512 - out=data/fsns/train/train-00331-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00332-of-00512 - out=data/fsns/train/train-00332-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00333-of-00512 - out=data/fsns/train/train-00333-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00334-of-00512 - out=data/fsns/train/train-00334-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00335-of-00512 - out=data/fsns/train/train-00335-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00336-of-00512 - out=data/fsns/train/train-00336-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00337-of-00512 - out=data/fsns/train/train-00337-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00338-of-00512 - out=data/fsns/train/train-00338-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00339-of-00512 - out=data/fsns/train/train-00339-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00340-of-00512 - out=data/fsns/train/train-00340-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00341-of-00512 - out=data/fsns/train/train-00341-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00342-of-00512 - out=data/fsns/train/train-00342-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00343-of-00512 - out=data/fsns/train/train-00343-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00344-of-00512 - out=data/fsns/train/train-00344-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00345-of-00512 - out=data/fsns/train/train-00345-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00346-of-00512 - out=data/fsns/train/train-00346-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00347-of-00512 - out=data/fsns/train/train-00347-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00348-of-00512 - out=data/fsns/train/train-00348-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00349-of-00512 - out=data/fsns/train/train-00349-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00350-of-00512 - out=data/fsns/train/train-00350-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00351-of-00512 - out=data/fsns/train/train-00351-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00352-of-00512 - out=data/fsns/train/train-00352-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00353-of-00512 - out=data/fsns/train/train-00353-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00354-of-00512 - out=data/fsns/train/train-00354-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00355-of-00512 - out=data/fsns/train/train-00355-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00356-of-00512 - out=data/fsns/train/train-00356-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00357-of-00512 - out=data/fsns/train/train-00357-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00358-of-00512 - out=data/fsns/train/train-00358-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00359-of-00512 - out=data/fsns/train/train-00359-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00360-of-00512 - out=data/fsns/train/train-00360-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00361-of-00512 - out=data/fsns/train/train-00361-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00362-of-00512 - out=data/fsns/train/train-00362-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00363-of-00512 - out=data/fsns/train/train-00363-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00364-of-00512 - out=data/fsns/train/train-00364-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00365-of-00512 - out=data/fsns/train/train-00365-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00366-of-00512 - out=data/fsns/train/train-00366-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00367-of-00512 - out=data/fsns/train/train-00367-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00368-of-00512 - out=data/fsns/train/train-00368-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00369-of-00512 - out=data/fsns/train/train-00369-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00370-of-00512 - out=data/fsns/train/train-00370-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00371-of-00512 - out=data/fsns/train/train-00371-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00372-of-00512 - out=data/fsns/train/train-00372-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00373-of-00512 - out=data/fsns/train/train-00373-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00374-of-00512 - out=data/fsns/train/train-00374-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00375-of-00512 - out=data/fsns/train/train-00375-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00376-of-00512 - out=data/fsns/train/train-00376-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00377-of-00512 - out=data/fsns/train/train-00377-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00378-of-00512 - out=data/fsns/train/train-00378-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00379-of-00512 - out=data/fsns/train/train-00379-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00380-of-00512 - out=data/fsns/train/train-00380-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00381-of-00512 - out=data/fsns/train/train-00381-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00382-of-00512 - out=data/fsns/train/train-00382-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00383-of-00512 - out=data/fsns/train/train-00383-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00384-of-00512 - out=data/fsns/train/train-00384-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00385-of-00512 - out=data/fsns/train/train-00385-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00386-of-00512 - out=data/fsns/train/train-00386-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00387-of-00512 - out=data/fsns/train/train-00387-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00388-of-00512 - out=data/fsns/train/train-00388-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00389-of-00512 - out=data/fsns/train/train-00389-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00390-of-00512 - out=data/fsns/train/train-00390-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00391-of-00512 - out=data/fsns/train/train-00391-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00392-of-00512 - out=data/fsns/train/train-00392-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00393-of-00512 - out=data/fsns/train/train-00393-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00394-of-00512 - out=data/fsns/train/train-00394-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00395-of-00512 - out=data/fsns/train/train-00395-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00396-of-00512 - out=data/fsns/train/train-00396-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00397-of-00512 - out=data/fsns/train/train-00397-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00398-of-00512 - out=data/fsns/train/train-00398-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00399-of-00512 - out=data/fsns/train/train-00399-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00400-of-00512 - out=data/fsns/train/train-00400-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00401-of-00512 - out=data/fsns/train/train-00401-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00402-of-00512 - out=data/fsns/train/train-00402-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00403-of-00512 - out=data/fsns/train/train-00403-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00404-of-00512 - out=data/fsns/train/train-00404-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00405-of-00512 - out=data/fsns/train/train-00405-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00406-of-00512 - out=data/fsns/train/train-00406-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00407-of-00512 - out=data/fsns/train/train-00407-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00408-of-00512 - out=data/fsns/train/train-00408-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00409-of-00512 - out=data/fsns/train/train-00409-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00410-of-00512 - out=data/fsns/train/train-00410-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00411-of-00512 - out=data/fsns/train/train-00411-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00412-of-00512 - out=data/fsns/train/train-00412-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00413-of-00512 - out=data/fsns/train/train-00413-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00414-of-00512 - out=data/fsns/train/train-00414-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00415-of-00512 - out=data/fsns/train/train-00415-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00416-of-00512 - out=data/fsns/train/train-00416-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00417-of-00512 - out=data/fsns/train/train-00417-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00418-of-00512 - out=data/fsns/train/train-00418-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00419-of-00512 - out=data/fsns/train/train-00419-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00420-of-00512 - out=data/fsns/train/train-00420-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00421-of-00512 - out=data/fsns/train/train-00421-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00422-of-00512 - out=data/fsns/train/train-00422-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00423-of-00512 - out=data/fsns/train/train-00423-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00424-of-00512 - out=data/fsns/train/train-00424-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00425-of-00512 - out=data/fsns/train/train-00425-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00426-of-00512 - out=data/fsns/train/train-00426-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00427-of-00512 - out=data/fsns/train/train-00427-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00428-of-00512 - out=data/fsns/train/train-00428-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00429-of-00512 - out=data/fsns/train/train-00429-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00430-of-00512 - out=data/fsns/train/train-00430-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00431-of-00512 - out=data/fsns/train/train-00431-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00432-of-00512 - out=data/fsns/train/train-00432-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00433-of-00512 - out=data/fsns/train/train-00433-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00434-of-00512 - out=data/fsns/train/train-00434-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00435-of-00512 - out=data/fsns/train/train-00435-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00436-of-00512 - out=data/fsns/train/train-00436-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00437-of-00512 - out=data/fsns/train/train-00437-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00438-of-00512 - out=data/fsns/train/train-00438-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00439-of-00512 - out=data/fsns/train/train-00439-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00440-of-00512 - out=data/fsns/train/train-00440-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00441-of-00512 - out=data/fsns/train/train-00441-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00442-of-00512 - out=data/fsns/train/train-00442-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00443-of-00512 - out=data/fsns/train/train-00443-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00444-of-00512 - out=data/fsns/train/train-00444-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00445-of-00512 - out=data/fsns/train/train-00445-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00446-of-00512 - out=data/fsns/train/train-00446-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00447-of-00512 - out=data/fsns/train/train-00447-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00448-of-00512 - out=data/fsns/train/train-00448-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00449-of-00512 - out=data/fsns/train/train-00449-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00450-of-00512 - out=data/fsns/train/train-00450-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00451-of-00512 - out=data/fsns/train/train-00451-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00452-of-00512 - out=data/fsns/train/train-00452-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00453-of-00512 - out=data/fsns/train/train-00453-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00454-of-00512 - out=data/fsns/train/train-00454-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00455-of-00512 - out=data/fsns/train/train-00455-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00456-of-00512 - out=data/fsns/train/train-00456-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00457-of-00512 - out=data/fsns/train/train-00457-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00458-of-00512 - out=data/fsns/train/train-00458-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00459-of-00512 - out=data/fsns/train/train-00459-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00460-of-00512 - out=data/fsns/train/train-00460-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00461-of-00512 - out=data/fsns/train/train-00461-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00462-of-00512 - out=data/fsns/train/train-00462-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00463-of-00512 - out=data/fsns/train/train-00463-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00464-of-00512 - out=data/fsns/train/train-00464-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00465-of-00512 - out=data/fsns/train/train-00465-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00466-of-00512 - out=data/fsns/train/train-00466-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00467-of-00512 - out=data/fsns/train/train-00467-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00468-of-00512 - out=data/fsns/train/train-00468-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00469-of-00512 - out=data/fsns/train/train-00469-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00470-of-00512 - out=data/fsns/train/train-00470-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00471-of-00512 - out=data/fsns/train/train-00471-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00472-of-00512 - out=data/fsns/train/train-00472-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00473-of-00512 - out=data/fsns/train/train-00473-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00474-of-00512 - out=data/fsns/train/train-00474-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00475-of-00512 - out=data/fsns/train/train-00475-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00476-of-00512 - out=data/fsns/train/train-00476-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00477-of-00512 - out=data/fsns/train/train-00477-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00478-of-00512 - out=data/fsns/train/train-00478-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00479-of-00512 - out=data/fsns/train/train-00479-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00480-of-00512 - out=data/fsns/train/train-00480-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00481-of-00512 - out=data/fsns/train/train-00481-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00482-of-00512 - out=data/fsns/train/train-00482-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00483-of-00512 - out=data/fsns/train/train-00483-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00484-of-00512 - out=data/fsns/train/train-00484-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00485-of-00512 - out=data/fsns/train/train-00485-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00486-of-00512 - out=data/fsns/train/train-00486-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00487-of-00512 - out=data/fsns/train/train-00487-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00488-of-00512 - out=data/fsns/train/train-00488-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00489-of-00512 - out=data/fsns/train/train-00489-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00490-of-00512 - out=data/fsns/train/train-00490-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00491-of-00512 - out=data/fsns/train/train-00491-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00492-of-00512 - out=data/fsns/train/train-00492-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00493-of-00512 - out=data/fsns/train/train-00493-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00494-of-00512 - out=data/fsns/train/train-00494-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00495-of-00512 - out=data/fsns/train/train-00495-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00496-of-00512 - out=data/fsns/train/train-00496-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00497-of-00512 - out=data/fsns/train/train-00497-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00498-of-00512 - out=data/fsns/train/train-00498-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00499-of-00512 - out=data/fsns/train/train-00499-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00500-of-00512 - out=data/fsns/train/train-00500-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00501-of-00512 - out=data/fsns/train/train-00501-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00502-of-00512 - out=data/fsns/train/train-00502-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00503-of-00512 - out=data/fsns/train/train-00503-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00504-of-00512 - out=data/fsns/train/train-00504-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00505-of-00512 - out=data/fsns/train/train-00505-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00506-of-00512 - out=data/fsns/train/train-00506-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00507-of-00512 - out=data/fsns/train/train-00507-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00508-of-00512 - out=data/fsns/train/train-00508-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00509-of-00512 - out=data/fsns/train/train-00509-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00510-of-00512 - out=data/fsns/train/train-00510-of-00512 -http://download.tensorflow.org/data/fsns-20160927/train/train-00511-of-00512 - out=data/fsns/train/train-00511-of-00512 -http://download.tensorflow.org/data/fsns-20160927/validation/validation-00000-of-00064 - out=data/fsns/validation/validation-00000-of-00064 -http://download.tensorflow.org/data/fsns-20160927/validation/validation-00001-of-00064 - out=data/fsns/validation/validation-00001-of-00064 -http://download.tensorflow.org/data/fsns-20160927/validation/validation-00002-of-00064 - out=data/fsns/validation/validation-00002-of-00064 -http://download.tensorflow.org/data/fsns-20160927/validation/validation-00003-of-00064 - out=data/fsns/validation/validation-00003-of-00064 -http://download.tensorflow.org/data/fsns-20160927/validation/validation-00004-of-00064 - out=data/fsns/validation/validation-00004-of-00064 -http://download.tensorflow.org/data/fsns-20160927/validation/validation-00005-of-00064 - out=data/fsns/validation/validation-00005-of-00064 -http://download.tensorflow.org/data/fsns-20160927/validation/validation-00006-of-00064 - out=data/fsns/validation/validation-00006-of-00064 -http://download.tensorflow.org/data/fsns-20160927/validation/validation-00007-of-00064 - out=data/fsns/validation/validation-00007-of-00064 -http://download.tensorflow.org/data/fsns-20160927/validation/validation-00008-of-00064 - out=data/fsns/validation/validation-00008-of-00064 -http://download.tensorflow.org/data/fsns-20160927/validation/validation-00009-of-00064 - out=data/fsns/validation/validation-00009-of-00064 -http://download.tensorflow.org/data/fsns-20160927/validation/validation-00010-of-00064 - out=data/fsns/validation/validation-00010-of-00064 -http://download.tensorflow.org/data/fsns-20160927/validation/validation-00011-of-00064 - out=data/fsns/validation/validation-00011-of-00064 -http://download.tensorflow.org/data/fsns-20160927/validation/validation-00012-of-00064 - out=data/fsns/validation/validation-00012-of-00064 -http://download.tensorflow.org/data/fsns-20160927/validation/validation-00013-of-00064 - out=data/fsns/validation/validation-00013-of-00064 -http://download.tensorflow.org/data/fsns-20160927/validation/validation-00014-of-00064 - out=data/fsns/validation/validation-00014-of-00064 -http://download.tensorflow.org/data/fsns-20160927/validation/validation-00015-of-00064 - out=data/fsns/validation/validation-00015-of-00064 -http://download.tensorflow.org/data/fsns-20160927/validation/validation-00016-of-00064 - out=data/fsns/validation/validation-00016-of-00064 -http://download.tensorflow.org/data/fsns-20160927/validation/validation-00017-of-00064 - out=data/fsns/validation/validation-00017-of-00064 -http://download.tensorflow.org/data/fsns-20160927/validation/validation-00018-of-00064 - out=data/fsns/validation/validation-00018-of-00064 -http://download.tensorflow.org/data/fsns-20160927/validation/validation-00019-of-00064 - out=data/fsns/validation/validation-00019-of-00064 -http://download.tensorflow.org/data/fsns-20160927/validation/validation-00020-of-00064 - out=data/fsns/validation/validation-00020-of-00064 -http://download.tensorflow.org/data/fsns-20160927/validation/validation-00021-of-00064 - out=data/fsns/validation/validation-00021-of-00064 -http://download.tensorflow.org/data/fsns-20160927/validation/validation-00022-of-00064 - out=data/fsns/validation/validation-00022-of-00064 -http://download.tensorflow.org/data/fsns-20160927/validation/validation-00023-of-00064 - out=data/fsns/validation/validation-00023-of-00064 -http://download.tensorflow.org/data/fsns-20160927/validation/validation-00024-of-00064 - out=data/fsns/validation/validation-00024-of-00064 -http://download.tensorflow.org/data/fsns-20160927/validation/validation-00025-of-00064 - out=data/fsns/validation/validation-00025-of-00064 -http://download.tensorflow.org/data/fsns-20160927/validation/validation-00026-of-00064 - out=data/fsns/validation/validation-00026-of-00064 -http://download.tensorflow.org/data/fsns-20160927/validation/validation-00027-of-00064 - out=data/fsns/validation/validation-00027-of-00064 -http://download.tensorflow.org/data/fsns-20160927/validation/validation-00028-of-00064 - out=data/fsns/validation/validation-00028-of-00064 -http://download.tensorflow.org/data/fsns-20160927/validation/validation-00029-of-00064 - out=data/fsns/validation/validation-00029-of-00064 -http://download.tensorflow.org/data/fsns-20160927/validation/validation-00030-of-00064 - out=data/fsns/validation/validation-00030-of-00064 -http://download.tensorflow.org/data/fsns-20160927/validation/validation-00031-of-00064 - out=data/fsns/validation/validation-00031-of-00064 -http://download.tensorflow.org/data/fsns-20160927/validation/validation-00032-of-00064 - out=data/fsns/validation/validation-00032-of-00064 -http://download.tensorflow.org/data/fsns-20160927/validation/validation-00033-of-00064 - out=data/fsns/validation/validation-00033-of-00064 -http://download.tensorflow.org/data/fsns-20160927/validation/validation-00034-of-00064 - out=data/fsns/validation/validation-00034-of-00064 -http://download.tensorflow.org/data/fsns-20160927/validation/validation-00035-of-00064 - out=data/fsns/validation/validation-00035-of-00064 -http://download.tensorflow.org/data/fsns-20160927/validation/validation-00036-of-00064 - out=data/fsns/validation/validation-00036-of-00064 -http://download.tensorflow.org/data/fsns-20160927/validation/validation-00037-of-00064 - out=data/fsns/validation/validation-00037-of-00064 -http://download.tensorflow.org/data/fsns-20160927/validation/validation-00038-of-00064 - out=data/fsns/validation/validation-00038-of-00064 -http://download.tensorflow.org/data/fsns-20160927/validation/validation-00039-of-00064 - out=data/fsns/validation/validation-00039-of-00064 -http://download.tensorflow.org/data/fsns-20160927/validation/validation-00040-of-00064 - out=data/fsns/validation/validation-00040-of-00064 -http://download.tensorflow.org/data/fsns-20160927/validation/validation-00041-of-00064 - out=data/fsns/validation/validation-00041-of-00064 -http://download.tensorflow.org/data/fsns-20160927/validation/validation-00042-of-00064 - out=data/fsns/validation/validation-00042-of-00064 -http://download.tensorflow.org/data/fsns-20160927/validation/validation-00043-of-00064 - out=data/fsns/validation/validation-00043-of-00064 -http://download.tensorflow.org/data/fsns-20160927/validation/validation-00044-of-00064 - out=data/fsns/validation/validation-00044-of-00064 -http://download.tensorflow.org/data/fsns-20160927/validation/validation-00045-of-00064 - out=data/fsns/validation/validation-00045-of-00064 -http://download.tensorflow.org/data/fsns-20160927/validation/validation-00046-of-00064 - out=data/fsns/validation/validation-00046-of-00064 -http://download.tensorflow.org/data/fsns-20160927/validation/validation-00047-of-00064 - out=data/fsns/validation/validation-00047-of-00064 -http://download.tensorflow.org/data/fsns-20160927/validation/validation-00048-of-00064 - out=data/fsns/validation/validation-00048-of-00064 -http://download.tensorflow.org/data/fsns-20160927/validation/validation-00049-of-00064 - out=data/fsns/validation/validation-00049-of-00064 -http://download.tensorflow.org/data/fsns-20160927/validation/validation-00050-of-00064 - out=data/fsns/validation/validation-00050-of-00064 -http://download.tensorflow.org/data/fsns-20160927/validation/validation-00051-of-00064 - out=data/fsns/validation/validation-00051-of-00064 -http://download.tensorflow.org/data/fsns-20160927/validation/validation-00052-of-00064 - out=data/fsns/validation/validation-00052-of-00064 -http://download.tensorflow.org/data/fsns-20160927/validation/validation-00053-of-00064 - out=data/fsns/validation/validation-00053-of-00064 -http://download.tensorflow.org/data/fsns-20160927/validation/validation-00054-of-00064 - out=data/fsns/validation/validation-00054-of-00064 -http://download.tensorflow.org/data/fsns-20160927/validation/validation-00055-of-00064 - out=data/fsns/validation/validation-00055-of-00064 -http://download.tensorflow.org/data/fsns-20160927/validation/validation-00056-of-00064 - out=data/fsns/validation/validation-00056-of-00064 -http://download.tensorflow.org/data/fsns-20160927/validation/validation-00057-of-00064 - out=data/fsns/validation/validation-00057-of-00064 -http://download.tensorflow.org/data/fsns-20160927/validation/validation-00058-of-00064 - out=data/fsns/validation/validation-00058-of-00064 -http://download.tensorflow.org/data/fsns-20160927/validation/validation-00059-of-00064 - out=data/fsns/validation/validation-00059-of-00064 -http://download.tensorflow.org/data/fsns-20160927/validation/validation-00060-of-00064 - out=data/fsns/validation/validation-00060-of-00064 -http://download.tensorflow.org/data/fsns-20160927/validation/validation-00061-of-00064 - out=data/fsns/validation/validation-00061-of-00064 -http://download.tensorflow.org/data/fsns-20160927/validation/validation-00062-of-00064 - out=data/fsns/validation/validation-00062-of-00064 -http://download.tensorflow.org/data/fsns-20160927/validation/validation-00063-of-00064 - out=data/fsns/validation/validation-00063-of-00064 diff --git a/research/street/python/nn_ops.py b/research/street/python/nn_ops.py deleted file mode 100644 index 20c3b5028..000000000 --- a/research/street/python/nn_ops.py +++ /dev/null @@ -1,253 +0,0 @@ -# Copyright 2016 The TensorFlow Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -"""Ops and utilities for neural networks. - -For now, just an LSTM layer. -""" -import shapes -import tensorflow as tf -rnn = tf.load_op_library("../cc/rnn_ops.so") - - -def rnn_helper(inp, - length, - cell_type=None, - direction="forward", - name=None, - *args, - **kwargs): - """Adds ops for a recurrent neural network layer. - - This function calls an actual implementation of a recurrent neural network - based on `cell_type`. - - There are three modes depending on the value of `direction`: - - forward: Adds a forward RNN. - backward: Adds a backward RNN. - bidirectional: Adds both forward and backward RNNs and creates a - bidirectional RNN. - - Args: - inp: A 3-D tensor of shape [`batch_size`, `max_length`, `feature_dim`]. - length: A 1-D tensor of shape [`batch_size`] and type int64. Each element - represents the length of the corresponding sequence in `inp`. - cell_type: Cell type of RNN. Currently can only be "lstm". - direction: One of "forward", "backward", "bidirectional". - name: Name of the op. - *args: Other arguments to the layer. - **kwargs: Keyword arugments to the layer. - - Returns: - A 3-D tensor of shape [`batch_size`, `max_length`, `num_nodes`]. - """ - - assert cell_type is not None - rnn_func = None - if cell_type == "lstm": - rnn_func = lstm_layer - assert rnn_func is not None - assert direction in ["forward", "backward", "bidirectional"] - - with tf.variable_scope(name): - if direction in ["forward", "bidirectional"]: - forward = rnn_func( - inp=inp, - length=length, - backward=False, - name="forward", - *args, - **kwargs) - if isinstance(forward, tuple): - # lstm_layer returns a tuple (output, memory). We only need the first - # element. - forward = forward[0] - if direction in ["backward", "bidirectional"]: - backward = rnn_func( - inp=inp, - length=length, - backward=True, - name="backward", - *args, - **kwargs) - if isinstance(backward, tuple): - # lstm_layer returns a tuple (output, memory). We only need the first - # element. - backward = backward[0] - if direction == "forward": - out = forward - elif direction == "backward": - out = backward - else: - out = tf.concat(axis=2, values=[forward, backward]) - return out - - -@tf.RegisterShape("VariableLSTM") -def _variable_lstm_shape(op): - """Shape function for the VariableLSTM op.""" - input_shape = op.inputs[0].get_shape().with_rank(4) - state_shape = op.inputs[1].get_shape().with_rank(2) - memory_shape = op.inputs[2].get_shape().with_rank(2) - w_m_m_shape = op.inputs[3].get_shape().with_rank(3) - batch_size = input_shape[0].merge_with(state_shape[0]) - batch_size = input_shape[0].merge_with(memory_shape[0]) - seq_len = input_shape[1] - gate_num = input_shape[2].merge_with(w_m_m_shape[1]) - output_dim = input_shape[3].merge_with(state_shape[1]) - output_dim = output_dim.merge_with(memory_shape[1]) - output_dim = output_dim.merge_with(w_m_m_shape[0]) - output_dim = output_dim.merge_with(w_m_m_shape[2]) - return [[batch_size, seq_len, output_dim], - [batch_size, seq_len, gate_num, output_dim], - [batch_size, seq_len, output_dim]] - - -@tf.RegisterGradient("VariableLSTM") -def _variable_lstm_grad(op, act_grad, gate_grad, mem_grad): - """Gradient function for the VariableLSTM op.""" - initial_state = op.inputs[1] - initial_memory = op.inputs[2] - w_m_m = op.inputs[3] - act = op.outputs[0] - gate_raw_act = op.outputs[1] - memory = op.outputs[2] - return rnn.variable_lstm_grad(initial_state, initial_memory, w_m_m, act, - gate_raw_act, memory, act_grad, gate_grad, - mem_grad) - - -def lstm_layer(inp, - length=None, - state=None, - memory=None, - num_nodes=None, - backward=False, - clip=50.0, - reg_func=tf.nn.l2_loss, - weight_reg=False, - weight_collection="LSTMWeights", - bias_reg=False, - stddev=None, - seed=None, - decode=False, - use_native_weights=False, - name=None): - """Adds ops for an LSTM layer. - - This adds ops for the following operations: - - input => (forward-LSTM|backward-LSTM) => output - - The direction of the LSTM is determined by `backward`. If it is false, the - forward LSTM is used, the backward one otherwise. - - Args: - inp: A 3-D tensor of shape [`batch_size`, `max_length`, `feature_dim`]. - length: A 1-D tensor of shape [`batch_size`] and type int64. Each element - represents the length of the corresponding sequence in `inp`. - state: If specified, uses it as the initial state. - memory: If specified, uses it as the initial memory. - num_nodes: The number of LSTM cells. - backward: If true, reverses the `inp` before adding the ops. The output is - also reversed so that the direction is the same as `inp`. - clip: Value used to clip the cell values. - reg_func: Function used for the weight regularization such as - `tf.nn.l2_loss`. - weight_reg: If true, regularize the filter weights with `reg_func`. - weight_collection: Collection to add the weights to for regularization. - bias_reg: If true, regularize the bias vector with `reg_func`. - stddev: Standard deviation used to initialize the variables. - seed: Seed used to initialize the variables. - decode: If true, does not add ops which are not used for inference. - use_native_weights: If true, uses weights in the same format as the native - implementations. - name: Name of the op. - - Returns: - A 3-D tensor of shape [`batch_size`, `max_length`, `num_nodes`]. - """ - with tf.variable_scope(name): - if backward: - if length is None: - inp = tf.reverse(inp, [1]) - else: - inp = tf.reverse_sequence(inp, length, 1, 0) - - num_prev = inp.get_shape()[2] - if stddev: - initializer = tf.truncated_normal_initializer(stddev=stddev, seed=seed) - else: - initializer = tf.uniform_unit_scaling_initializer(seed=seed) - - if use_native_weights: - with tf.variable_scope("LSTMCell"): - w = tf.get_variable( - "W_0", - shape=[num_prev + num_nodes, 4 * num_nodes], - initializer=initializer, - dtype=tf.float32) - w_i_m = tf.slice(w, [0, 0], [num_prev, 4 * num_nodes], name="w_i_m") - w_m_m = tf.reshape( - tf.slice(w, [num_prev, 0], [num_nodes, 4 * num_nodes]), - [num_nodes, 4, num_nodes], - name="w_m_m") - else: - w_i_m = tf.get_variable("w_i_m", [num_prev, 4 * num_nodes], - initializer=initializer) - w_m_m = tf.get_variable("w_m_m", [num_nodes, 4, num_nodes], - initializer=initializer) - - if not decode and weight_reg: - tf.add_to_collection(weight_collection, reg_func(w_i_m, name="w_i_m_reg")) - tf.add_to_collection(weight_collection, reg_func(w_m_m, name="w_m_m_reg")) - - batch_size = shapes.tensor_dim(inp, dim=0) - num_frames = shapes.tensor_dim(inp, dim=1) - prev = tf.reshape(inp, tf.stack([batch_size * num_frames, num_prev])) - - if use_native_weights: - with tf.variable_scope("LSTMCell"): - b = tf.get_variable( - "B", - shape=[4 * num_nodes], - initializer=tf.zeros_initializer(), - dtype=tf.float32) - biases = tf.identity(b, name="biases") - else: - biases = tf.get_variable( - "biases", [4 * num_nodes], initializer=tf.constant_initializer(0.0)) - if not decode and bias_reg: - tf.add_to_collection( - weight_collection, reg_func( - biases, name="biases_reg")) - prev = tf.nn.xw_plus_b(prev, w_i_m, biases) - - prev = tf.reshape(prev, tf.stack([batch_size, num_frames, 4, num_nodes])) - if state is None: - state = tf.fill(tf.stack([batch_size, num_nodes]), 0.0) - if memory is None: - memory = tf.fill(tf.stack([batch_size, num_nodes]), 0.0) - - out, _, mem = rnn.variable_lstm(prev, state, memory, w_m_m, clip=clip) - - if backward: - if length is None: - out = tf.reverse(out, [1]) - else: - out = tf.reverse_sequence(out, length, 1, 0) - - return out, mem diff --git a/research/street/python/shapes.py b/research/street/python/shapes.py deleted file mode 100644 index 1f56ef05d..000000000 --- a/research/street/python/shapes.py +++ /dev/null @@ -1,217 +0,0 @@ -# Copyright 2016 The TensorFlow Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -"""Shape manipulation functions. - -rotate_dimensions: prepares for a rotating transpose by returning a rotated - list of dimension indices. -transposing_reshape: allows a dimension to be factorized, with one of the pieces - transferred to another dimension, or to transpose factors within a single - dimension. -tensor_dim: gets a shape dimension as a constant integer if known otherwise a - runtime usable tensor value. -tensor_shape: returns the full shape of a tensor as the tensor_dim. -""" -from six.moves import xrange -import tensorflow as tf - - -def rotate_dimensions(num_dims, src_dim, dest_dim): - """Returns a list of dimension indices that will rotate src_dim to dest_dim. - - src_dim is moved to dest_dim, with all intervening dimensions shifted towards - the hole left by src_dim. Eg: - num_dims = 4, src_dim=3, dest_dim=1 - Returned list=[0, 3, 1, 2] - For a tensor with dims=[5, 4, 3, 2] a transpose would yield [5, 2, 4, 3]. - Args: - num_dims: The number of dimensions to handle. - src_dim: The dimension to move. - dest_dim: The dimension to move src_dim to. - - Returns: - A list of rotated dimension indices. - """ - # List of dimensions for transpose. - dim_list = range(num_dims) - # Shuffle src_dim to dest_dim by swapping to shuffle up the other dims. - step = 1 if dest_dim > src_dim else -1 - for x in xrange(src_dim, dest_dim, step): - dim_list[x], dim_list[x + step] = dim_list[x + step], dim_list[x] - return dim_list - - -def transposing_reshape(tensor, - src_dim, - part_a, - part_b, - dest_dim_a, - dest_dim_b, - name=None): - """Splits src_dim and sends one of the pieces to another dim. - - Terminology: - A matrix is often described as 'row-major' or 'column-major', which doesn't - help if you can't remember which is the row index and which is the column, - even if you know what 'major' means, so here is a simpler explanation of it: - When TF stores a tensor of size [d0, d1, d2, d3] indexed by [i0, i1, i2, i3], - the memory address of an element is calculated using: - ((i0 * d1 + i1) * d2 + i2) * d3 + i3, so, d0 is the MOST SIGNIFICANT dimension - and d3 the LEAST SIGNIFICANT, just like in the decimal number 1234, 1 is the - most significant digit and 4 the least significant. In both cases the most - significant is multiplied by the largest number to determine its 'value'. - Furthermore, if we reshape the tensor to [d0'=d0, d1'=d1 x d2, d2'=d3], then - the MOST SIGNIFICANT part of d1' is d1 and the LEAST SIGNIFICANT part of d1' - is d2. - - Action: - transposing_reshape splits src_dim into factors [part_a, part_b], and sends - the most significant part (of size part_a) to be the most significant part of - dest_dim_a*(Exception: see NOTE 2), and the least significant part (of size - part_b) to be the most significant part of dest_dim_b. - This is basically a combination of reshape, rotating transpose, reshape. - NOTE1: At least one of dest_dim_a and dest_dim_b must equal src_dim, ie one of - the parts always stays put, so src_dim is never totally destroyed and the - output number of dimensions is always the same as the input. - NOTE2: If dest_dim_a == dest_dim_b == src_dim, then parts a and b are simply - transposed within src_dim to become part_b x part_a, so the most significant - part becomes the least significant part and vice versa. Thus if you really - wanted to make one of the parts the least significant side of the destiantion, - the destination dimension can be internally transposed with a second call to - transposing_reshape. - NOTE3: One of part_a and part_b may be -1 to allow src_dim to be of unknown - size with one known-size factor. Otherwise part_a * part_b must equal the size - of src_dim. - NOTE4: The reshape preserves as many known-at-graph-build-time dimension sizes - as are available. - - Example: - Input dims=[5, 2, 6, 2] - tensor=[[[[0, 1][2, 3][4, 5][6, 7][8, 9][10, 11]] - [[12, 13][14, 15][16, 17][18, 19][20, 21][22, 23]] - [[[24, 25]... - src_dim=2, part_a=2, part_b=3, dest_dim_a=3, dest_dim_b=2 - output dims =[5, 2, 3, 4] - output tensor=[[[[0, 1, 6, 7][2, 3, 8, 9][4, 5, 10, 11]] - [[12, 13, 18, 19][14, 15, 20, 21][16, 17, 22, 23]]] - [[[24, 26, 28]... - Example2: - Input dims=[phrases, words, letters]=[2, 6, x] - tensor=[[[the][cat][sat][on][the][mat]] - [[a][stitch][in][time][saves][nine]]] - We can factorize the 6 words into 3x2 = [[the][cat]][[sat][on]][[the][mat]] - or 2x3=[[the][cat][sat]][[on][the][mat]] and - src_dim=1, part_a=3, part_b=2, dest_dim_a=1, dest_dim_b=1 - would yield: - [[[the][sat][the][cat][on][mat]] - [[a][in][saves][stitch][time][nine]]], but - src_dim=1, part_a=2, part_b=3, dest_dim_a=1, dest_dim_b=1 - would yield: - [[[the][on][cat][the][sat][mat]] - [[a][time][stitch][saves][in][nine]]], and - src_dim=1, part_a=2, part_b=3, dest_dim_a=0, dest_dim_b=1 - would yield: - [[[the][cat][sat]] - [[a][stitch][in]] - [[on][the][mat]] - [[time][saves][nine]]] - Now remember that the words above represent any least-significant subset of - the input dimensions. - - Args: - tensor: A tensor to reshape. - src_dim: The dimension to split. - part_a: The first factor of the split. - part_b: The second factor of the split. - dest_dim_a: The dimension to move part_a of src_dim to. - dest_dim_b: The dimension to move part_b of src_dim to. - name: Optional base name for all the ops. - - Returns: - Reshaped tensor. - - Raises: - ValueError: If the args are invalid. - """ - if dest_dim_a != src_dim and dest_dim_b != src_dim: - raise ValueError( - 'At least one of dest_dim_a, dest_dim_b must equal src_dim!') - if part_a == 0 or part_b == 0: - raise ValueError('Zero not allowed for part_a or part_b!') - if part_a < 0 and part_b < 0: - raise ValueError('At least one of part_a and part_b must be positive!') - if not name: - name = 'transposing_reshape' - prev_shape = tensor_shape(tensor) - expanded = tf.reshape( - tensor, - prev_shape[:src_dim] + [part_a, part_b] + prev_shape[src_dim + 1:], - name=name + '_reshape_in') - dest = dest_dim_b - if dest_dim_a != src_dim: - # We are just moving part_a to dest_dim_a. - dest = dest_dim_a - else: - # We are moving part_b to dest_dim_b. - src_dim += 1 - dim_list = rotate_dimensions(len(expanded.get_shape()), src_dim, dest) - expanded = tf.transpose(expanded, dim_list, name=name + '_rot_transpose') - # Reshape identity except dest,dest+1, which get merged. - ex_shape = tensor_shape(expanded) - combined = ex_shape[dest] * ex_shape[dest + 1] - return tf.reshape( - expanded, - ex_shape[:dest] + [combined] + ex_shape[dest + 2:], - name=name + '_reshape_out') - - -def tensor_dim(tensor, dim): - """Returns int dimension if known at a graph build time else a tensor. - - If the size of the dim of tensor is known at graph building time, then that - known value is returned, otherwise (instead of None), a Tensor that will give - the size of the dimension when the graph is run. The return value will be - accepted by tf.reshape in multiple (or even all) dimensions, even when the - sizes are not known at graph building time, unlike -1, which can only be used - in one dimension. It is a bad idea to use tf.shape all the time, as some ops - demand a known (at graph build time) size. This function therefore returns - the best available, most useful dimension size. - Args: - tensor: Input tensor. - dim: Dimension to find the size of. - - Returns: - An integer if shape is known at build time, otherwise a tensor of int32. - """ - result = tensor.get_shape().as_list()[dim] - if result is None: - result = tf.shape(tensor)[dim] - return result - - -def tensor_shape(tensor): - """Returns a heterogeneous list of tensor_dim for the tensor. - - See tensor_dim for a more detailed explanation. - Args: - tensor: Input tensor. - - Returns: - A heterogeneous list of integers and int32 tensors. - """ - result = [] - for d in xrange(len(tensor.get_shape())): - result.append(tensor_dim(tensor, d)) - return result diff --git a/research/street/python/shapes_test.py b/research/street/python/shapes_test.py deleted file mode 100644 index 87b3c737f..000000000 --- a/research/street/python/shapes_test.py +++ /dev/null @@ -1,171 +0,0 @@ -# Copyright 2016 The TensorFlow Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""Tests for shapes.""" - -import numpy as np -import tensorflow as tf -import shapes - - -def _rand(*size): - return np.random.uniform(size=size).astype('f') - - -class ShapesTest(tf.test.TestCase): - """Tests just the shapes from a call to transposing_reshape.""" - - def __init__(self, other): - super(ShapesTest, self).__init__(other) - self.batch_size = 4 - self.im_height = 24 - self.im_width = 36 - self.depth = 20 - - def testReshapeTile(self): - """Tests that a tiled input can be reshaped to the batch dimension.""" - fake = tf.placeholder( - tf.float32, shape=(None, None, None, self.depth), name='inputs') - real = _rand(self.batch_size, self.im_height, self.im_width, self.depth) - with self.test_session() as sess: - outputs = shapes.transposing_reshape( - fake, src_dim=2, part_a=3, part_b=-1, dest_dim_a=0, dest_dim_b=2) - res_image = sess.run([outputs], feed_dict={fake: real}) - self.assertEqual( - tuple(res_image[0].shape), - (self.batch_size * 3, self.im_height, self.im_width / 3, self.depth)) - - def testReshapeDepth(self): - """Tests that depth can be reshaped to the x dimension.""" - fake = tf.placeholder( - tf.float32, shape=(None, None, None, self.depth), name='inputs') - real = _rand(self.batch_size, self.im_height, self.im_width, self.depth) - with self.test_session() as sess: - outputs = shapes.transposing_reshape( - fake, src_dim=3, part_a=4, part_b=-1, dest_dim_a=2, dest_dim_b=3) - res_image = sess.run([outputs], feed_dict={fake: real}) - self.assertEqual( - tuple(res_image[0].shape), - (self.batch_size, self.im_height, self.im_width * 4, self.depth / 4)) - - -class DataTest(tf.test.TestCase): - """Tests that the data is moved correctly in a call to transposing_reshape. - - """ - - def testTransposingReshape_2_2_3_2_1(self): - """Case: dest_a == src, dest_b < src: Split with Least sig part going left. - """ - with self.test_session() as sess: - fake = tf.placeholder( - tf.float32, shape=(None, None, None, 2), name='inputs') - outputs = shapes.transposing_reshape( - fake, src_dim=2, part_a=2, part_b=3, dest_dim_a=2, dest_dim_b=1) - # Make real inputs. The tensor looks like this: - # tensor=[[[[0, 1][2, 3][4, 5][6, 7][8, 9][10, 11]] - # [[12, 13][14, 15][16, 17][18, 19][20, 21][22, 23]] - # [[[24, 25]... - real = np.arange(120).reshape((5, 2, 6, 2)) - np_array = sess.run([outputs], feed_dict={fake: real})[0] - self.assertEqual(tuple(np_array.shape), (5, 6, 2, 2)) - self.assertAllEqual(np_array[0, :, :, :], - [[[0, 1], [6, 7]], [[12, 13], [18, 19]], - [[2, 3], [8, 9]], [[14, 15], [20, 21]], - [[4, 5], [10, 11]], [[16, 17], [22, 23]]]) - - def testTransposingReshape_2_2_3_2_3(self): - """Case: dest_a == src, dest_b > src: Split with Least sig part going right. - """ - with self.test_session() as sess: - fake = tf.placeholder( - tf.float32, shape=(None, None, None, 2), name='inputs') - outputs = shapes.transposing_reshape( - fake, src_dim=2, part_a=2, part_b=3, dest_dim_a=2, dest_dim_b=3) - # Make real inputs. The tensor looks like this: - # tensor=[[[[0, 1][2, 3][4, 5][6, 7][8, 9][10, 11]] - # [[12, 13][14, 15][16, 17][18, 19][20, 21][22, 23]] - # [[[24, 25]... - real = np.arange(120).reshape((5, 2, 6, 2)) - np_array = sess.run([outputs], feed_dict={fake: real})[0] - self.assertEqual(tuple(np_array.shape), (5, 2, 2, 6)) - self.assertAllEqual( - np_array[0, :, :, :], - [[[0, 1, 2, 3, 4, 5], [6, 7, 8, 9, 10, 11]], - [[12, 13, 14, 15, 16, 17], [18, 19, 20, 21, 22, 23]]]) - - def testTransposingReshape_2_2_3_2_2(self): - """Case: dest_a == src, dest_b == src. Transpose within dimension 2. - """ - with self.test_session() as sess: - fake = tf.placeholder( - tf.float32, shape=(None, None, None, 2), name='inputs') - outputs = shapes.transposing_reshape( - fake, src_dim=2, part_a=2, part_b=3, dest_dim_a=2, dest_dim_b=2) - # Make real inputs. The tensor looks like this: - # tensor=[[[[0, 1][2, 3][4, 5][6, 7][8, 9][10, 11]] - # [[12, 13][14, 15][16, 17][18, 19][20, 21][22, 23]] - # [[[24, 25]... - real = np.arange(120).reshape((5, 2, 6, 2)) - np_array = sess.run([outputs], feed_dict={fake: real})[0] - self.assertEqual(tuple(np_array.shape), (5, 2, 6, 2)) - self.assertAllEqual( - np_array[0, :, :, :], - [[[0, 1], [6, 7], [2, 3], [8, 9], [4, 5], [10, 11]], - [[12, 13], [18, 19], [14, 15], [20, 21], [16, 17], [22, 23]]]) - - def testTransposingReshape_2_2_3_1_2(self): - """Case: dest_a < src, dest_b == src. Split with Most sig part going left. - """ - with self.test_session() as sess: - fake = tf.placeholder( - tf.float32, shape=(None, None, None, 2), name='inputs') - outputs = shapes.transposing_reshape( - fake, src_dim=2, part_a=2, part_b=3, dest_dim_a=1, dest_dim_b=2) - # Make real inputs. The tensor looks like this: - # tensor=[[[[0, 1][2, 3][4, 5][6, 7][8, 9][10, 11]] - # [[12, 13][14, 15][16, 17][18, 19][20, 21][22, 23]] - # [[[24, 25]... - real = np.arange(120).reshape((5, 2, 6, 2)) - np_array = sess.run([outputs], feed_dict={fake: real})[0] - self.assertEqual(tuple(np_array.shape), (5, 4, 3, 2)) - self.assertAllEqual(np_array[0, :, :, :], - [[[0, 1], [2, 3], [4, 5]], - [[12, 13], [14, 15], [16, 17]], - [[6, 7], [8, 9], [10, 11]], - [[18, 19], [20, 21], [22, 23]]]) - - def testTransposingReshape_2_2_3_3_2(self): - """Case: dest_a < src, dest_b == src. Split with Most sig part going right. - """ - with self.test_session() as sess: - fake = tf.placeholder( - tf.float32, shape=(None, None, None, 2), name='inputs') - outputs = shapes.transposing_reshape( - fake, src_dim=2, part_a=2, part_b=3, dest_dim_a=3, dest_dim_b=2) - # Make real inputs. The tensor looks like this: - # tensor=[[[[0, 1][2, 3][4, 5][6, 7][8, 9][10, 11]] - # [[12, 13][14, 15][16, 17][18, 19][20, 21][22, 23]] - # [[[24, 25]... - real = np.arange(120).reshape((5, 2, 6, 2)) - np_array = sess.run([outputs], feed_dict={fake: real})[0] - self.assertEqual(tuple(np_array.shape), (5, 2, 3, 4)) - self.assertAllEqual( - np_array[0, :, :, :], - [[[0, 1, 6, 7], [2, 3, 8, 9], [4, 5, 10, 11]], - [[12, 13, 18, 19], [14, 15, 20, 21], [16, 17, 22, 23]]]) - - -if __name__ == '__main__': - tf.test.main() diff --git a/research/street/python/vgsl_eval.py b/research/street/python/vgsl_eval.py deleted file mode 100644 index 7db00d6f0..000000000 --- a/research/street/python/vgsl_eval.py +++ /dev/null @@ -1,49 +0,0 @@ -# Copyright 2016 The TensorFlow Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""Model eval separate from training.""" -from tensorflow import app -from tensorflow.python.platform import flags - -import vgsl_model - -flags.DEFINE_string('eval_dir', '/tmp/mdir/eval', - 'Directory where to write event logs.') -flags.DEFINE_string('graph_def_file', None, - 'Output eval graph definition file.') -flags.DEFINE_string('train_dir', '/tmp/mdir', - 'Directory where to find training checkpoints.') -flags.DEFINE_string('model_str', - '1,150,600,3[S2(4x150)0,2 Ct5,5,16 Mp2,2 Ct5,5,64 Mp3,3' - '([Lrys64 Lbx128][Lbys64 Lbx128][Lfys64 Lbx128])S3(3x0)2,3' - 'Lfx128 Lrx128 S0(1x4)0,3 Do Lfx256]O1c134', - 'Network description.') -flags.DEFINE_integer('num_steps', 1000, 'Number of steps to run evaluation.') -flags.DEFINE_integer('eval_interval_secs', 60, - 'Time interval between eval runs.') -flags.DEFINE_string('eval_data', None, 'Evaluation data filepattern') -flags.DEFINE_string('decoder', None, 'Charset decoder') - -FLAGS = flags.FLAGS - - -def main(argv): - del argv - vgsl_model.Eval(FLAGS.train_dir, FLAGS.eval_dir, FLAGS.model_str, - FLAGS.eval_data, FLAGS.decoder, FLAGS.num_steps, - FLAGS.graph_def_file, FLAGS.eval_interval_secs) - - -if __name__ == '__main__': - app.run() diff --git a/research/street/python/vgsl_input.py b/research/street/python/vgsl_input.py deleted file mode 100644 index e4495c680..000000000 --- a/research/street/python/vgsl_input.py +++ /dev/null @@ -1,150 +0,0 @@ -# Copyright 2016 The TensorFlow Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -"""String network description language to define network layouts.""" -import collections -import tensorflow as tf -from tensorflow.python.ops import parsing_ops - -# Named tuple for the standard tf image tensor Shape. -# batch_size: Number of images to batch-up for training. -# height: Fixed height of image or None for variable. -# width: Fixed width of image or None for variable. -# depth: Desired depth in bytes per pixel of input images. -ImageShape = collections.namedtuple('ImageTensorDims', - ['batch_size', 'height', 'width', 'depth']) - - -def ImageInput(input_pattern, num_threads, shape, using_ctc, reader=None): - """Creates an input image tensor from the input_pattern filenames. - - TODO(rays) Expand for 2-d labels, 0-d labels, and logistic targets. - Args: - input_pattern: Filenames of the dataset(s) to read. - num_threads: Number of preprocessing threads. - shape: ImageShape with the desired shape of the input. - using_ctc: Take the unpadded_class labels instead of padded. - reader: Function that returns an actual reader to read Examples from - input files. If None, uses tf.TFRecordReader(). - Returns: - images: Float Tensor containing the input image scaled to [-1.28, 1.27]. - heights: Tensor int64 containing the heights of the images. - widths: Tensor int64 containing the widths of the images. - labels: Serialized SparseTensor containing the int64 labels. - sparse_labels: Serialized SparseTensor containing the int64 labels. - truths: Tensor string of the utf8 truth texts. - Raises: - ValueError: if the optimizer type is unrecognized. - """ - data_files = tf.gfile.Glob(input_pattern) - assert data_files, 'no files found for dataset ' + input_pattern - queue_capacity = shape.batch_size * num_threads * 2 - filename_queue = tf.train.string_input_producer( - data_files, capacity=queue_capacity) - - # Create a subgraph with its own reader (but sharing the - # filename_queue) for each preprocessing thread. - images_and_label_lists = [] - for _ in range(num_threads): - image, height, width, labels, text = _ReadExamples(filename_queue, shape, - using_ctc, reader) - images_and_label_lists.append([image, height, width, labels, text]) - # Create a queue that produces the examples in batches. - images, heights, widths, labels, truths = tf.train.batch_join( - images_and_label_lists, - batch_size=shape.batch_size, - capacity=16 * shape.batch_size, - dynamic_pad=True) - # Deserialize back to sparse, because the batcher doesn't do sparse. - labels = tf.deserialize_many_sparse(labels, tf.int64) - sparse_labels = tf.cast(labels, tf.int32) - labels = tf.sparse_tensor_to_dense(labels) - labels = tf.reshape(labels, [shape.batch_size, -1], name='Labels') - # Crush the other shapes to just the batch dimension. - heights = tf.reshape(heights, [-1], name='Heights') - widths = tf.reshape(widths, [-1], name='Widths') - truths = tf.reshape(truths, [-1], name='Truths') - # Give the images a nice name as well. - images = tf.identity(images, name='Images') - - tf.summary.image('Images', images) - return images, heights, widths, labels, sparse_labels, truths - - -def _ReadExamples(filename_queue, shape, using_ctc, reader=None): - """Builds network input tensor ops for TF Example. - - Args: - filename_queue: Queue of filenames, from tf.train.string_input_producer - shape: ImageShape with the desired shape of the input. - using_ctc: Take the unpadded_class labels instead of padded. - reader: Function that returns an actual reader to read Examples from - input files. If None, uses tf.TFRecordReader(). - Returns: - image: Float Tensor containing the input image scaled to [-1.28, 1.27]. - height: Tensor int64 containing the height of the image. - width: Tensor int64 containing the width of the image. - labels: Serialized SparseTensor containing the int64 labels. - text: Tensor string of the utf8 truth text. - """ - if reader: - reader = reader() - else: - reader = tf.TFRecordReader() - _, example_serialized = reader.read(filename_queue) - example_serialized = tf.reshape(example_serialized, shape=[]) - features = tf.parse_single_example( - example_serialized, - {'image/encoded': parsing_ops.FixedLenFeature( - [1], dtype=tf.string, default_value=''), - 'image/text': parsing_ops.FixedLenFeature( - [1], dtype=tf.string, default_value=''), - 'image/class': parsing_ops.VarLenFeature(dtype=tf.int64), - 'image/unpadded_class': parsing_ops.VarLenFeature(dtype=tf.int64), - 'image/height': parsing_ops.FixedLenFeature( - [1], dtype=tf.int64, default_value=1), - 'image/width': parsing_ops.FixedLenFeature( - [1], dtype=tf.int64, default_value=1)}) - if using_ctc: - labels = features['image/unpadded_class'] - else: - labels = features['image/class'] - labels = tf.serialize_sparse(labels) - image = tf.reshape(features['image/encoded'], shape=[], name='encoded') - image = _ImageProcessing(image, shape) - height = tf.reshape(features['image/height'], [-1]) - width = tf.reshape(features['image/width'], [-1]) - text = tf.reshape(features['image/text'], shape=[]) - - return image, height, width, labels, text - - -def _ImageProcessing(image_buffer, shape): - """Convert a PNG string into an input tensor. - - We allow for fixed and variable sizes. - Does fixed conversion to floats in the range [-1.28, 1.27]. - Args: - image_buffer: Tensor containing a PNG encoded image. - shape: ImageShape with the desired shape of the input. - Returns: - image: Decoded, normalized image in the range [-1.28, 1.27]. - """ - image = tf.image.decode_png(image_buffer, channels=shape.depth) - image.set_shape([shape.height, shape.width, shape.depth]) - image = tf.cast(image, tf.float32) - image = tf.subtract(image, 128.0) - image = tf.multiply(image, 1 / 100.0) - return image diff --git a/research/street/python/vgsl_model.py b/research/street/python/vgsl_model.py deleted file mode 100644 index 7533cd8d5..000000000 --- a/research/street/python/vgsl_model.py +++ /dev/null @@ -1,601 +0,0 @@ -# Copyright 2016 The TensorFlow Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -"""String network description language to define network layouts.""" -from __future__ import print_function - -import re -import time - -import decoder -import errorcounter as ec -import shapes -import tensorflow as tf -import vgsl_input -import vgslspecs -import tensorflow.contrib.slim as slim -from tensorflow.core.framework import summary_pb2 -from tensorflow.python.platform import tf_logging as logging - - -# Parameters for rate decay. -# We divide the learning_rate_halflife by DECAY_STEPS_FACTOR and use DECAY_RATE -# as the decay factor for the learning rate, ie we use the DECAY_STEPS_FACTORth -# root of 2 as the decay rate every halflife/DECAY_STEPS_FACTOR to achieve the -# desired halflife. -DECAY_STEPS_FACTOR = 16 -DECAY_RATE = pow(0.5, 1.0 / DECAY_STEPS_FACTOR) - - -def Train(train_dir, - model_str, - train_data, - max_steps, - master='', - task=0, - ps_tasks=0, - initial_learning_rate=0.001, - final_learning_rate=0.001, - learning_rate_halflife=160000, - optimizer_type='Adam', - num_preprocess_threads=1, - reader=None): - """Testable trainer with no dependence on FLAGS. - - Args: - train_dir: Directory to write checkpoints. - model_str: Network specification string. - train_data: Training data file pattern. - max_steps: Number of training steps to run. - master: Name of the TensorFlow master to use. - task: Task id of this replica running the training. (0 will be master). - ps_tasks: Number of tasks in ps job, or 0 if no ps job. - initial_learning_rate: Learing rate at start of training. - final_learning_rate: Asymptotic minimum learning rate. - learning_rate_halflife: Number of steps over which to halve the difference - between initial and final learning rate. - optimizer_type: One of 'GradientDescent', 'AdaGrad', 'Momentum', 'Adam'. - num_preprocess_threads: Number of input threads. - reader: Function that returns an actual reader to read Examples from input - files. If None, uses tf.TFRecordReader(). - """ - if master.startswith('local'): - device = tf.ReplicaDeviceSetter(ps_tasks) - else: - device = '/cpu:0' - with tf.Graph().as_default(): - with tf.device(device): - model = InitNetwork(train_data, model_str, 'train', initial_learning_rate, - final_learning_rate, learning_rate_halflife, - optimizer_type, num_preprocess_threads, reader) - - # Create a Supervisor. It will take care of initialization, summaries, - # checkpoints, and recovery. - # - # When multiple replicas of this program are running, the first one, - # identified by --task=0 is the 'chief' supervisor. It is the only one - # that takes case of initialization, etc. - sv = tf.train.Supervisor( - logdir=train_dir, - is_chief=(task == 0), - saver=model.saver, - save_summaries_secs=10, - save_model_secs=30, - recovery_wait_secs=5) - - step = 0 - while step < max_steps: - try: - # Get an initialized, and possibly recovered session. Launch the - # services: Checkpointing, Summaries, step counting. - with sv.managed_session(master) as sess: - while step < max_steps: - _, step = model.TrainAStep(sess) - if sv.coord.should_stop(): - break - except tf.errors.AbortedError as e: - logging.error('Received error:%s', e) - continue - - -def Eval(train_dir, - eval_dir, - model_str, - eval_data, - decoder_file, - num_steps, - graph_def_file=None, - eval_interval_secs=0, - reader=None): - """Restores a model from a checkpoint and evaluates it. - - Args: - train_dir: Directory to find checkpoints. - eval_dir: Directory to write summary events. - model_str: Network specification string. - eval_data: Evaluation data file pattern. - decoder_file: File to read to decode the labels. - num_steps: Number of eval steps to run. - graph_def_file: File to write graph definition to for freezing. - eval_interval_secs: How often to run evaluations, or once if 0. - reader: Function that returns an actual reader to read Examples from input - files. If None, uses tf.TFRecordReader(). - Returns: - (char error rate, word recall error rate, sequence error rate) as percent. - Raises: - ValueError: If unimplemented feature is used. - """ - decode = None - if decoder_file: - decode = decoder.Decoder(decoder_file) - - # Run eval. - rates = ec.ErrorRates( - label_error=None, - word_recall_error=None, - word_precision_error=None, - sequence_error=None) - with tf.Graph().as_default(): - model = InitNetwork(eval_data, model_str, 'eval', reader=reader) - sw = tf.summary.FileWriter(eval_dir) - - while True: - sess = tf.Session('') - if graph_def_file is not None: - # Write the eval version of the graph to a file for freezing. - if not tf.gfile.Exists(graph_def_file): - with tf.gfile.FastGFile(graph_def_file, 'w') as f: - f.write( - sess.graph.as_graph_def(add_shapes=True).SerializeToString()) - ckpt = tf.train.get_checkpoint_state(train_dir) - if ckpt and ckpt.model_checkpoint_path: - step = model.Restore(ckpt.model_checkpoint_path, sess) - if decode: - rates = decode.SoftmaxEval(sess, model, num_steps) - _AddRateToSummary('Label error rate', rates.label_error, step, sw) - _AddRateToSummary('Word recall error rate', rates.word_recall_error, - step, sw) - _AddRateToSummary('Word precision error rate', - rates.word_precision_error, step, sw) - _AddRateToSummary('Sequence error rate', rates.sequence_error, step, - sw) - sw.flush() - print('Error rates=', rates) - else: - raise ValueError('Non-softmax decoder evaluation not implemented!') - if eval_interval_secs: - time.sleep(eval_interval_secs) - else: - break - return rates - - -def InitNetwork(input_pattern, - model_spec, - mode='eval', - initial_learning_rate=0.00005, - final_learning_rate=0.00005, - halflife=1600000, - optimizer_type='Adam', - num_preprocess_threads=1, - reader=None): - """Constructs a python tensor flow model defined by model_spec. - - Args: - input_pattern: File pattern of the data in tfrecords of Example. - model_spec: Concatenation of input spec, model spec and output spec. - See Build below for input/output spec. For model spec, see vgslspecs.py - mode: One of 'train', 'eval' - initial_learning_rate: Initial learning rate for the network. - final_learning_rate: Final learning rate for the network. - halflife: Number of steps over which to halve the difference between - initial and final learning rate for the network. - optimizer_type: One of 'GradientDescent', 'AdaGrad', 'Momentum', 'Adam'. - num_preprocess_threads: Number of threads to use for image processing. - reader: Function that returns an actual reader to read Examples from input - files. If None, uses tf.TFRecordReader(). - Eval tasks need only specify input_pattern and model_spec. - - Returns: - A VGSLImageModel class. - - Raises: - ValueError: if the model spec syntax is incorrect. - """ - model = VGSLImageModel(mode, model_spec, initial_learning_rate, - final_learning_rate, halflife) - left_bracket = model_spec.find('[') - right_bracket = model_spec.rfind(']') - if left_bracket < 0 or right_bracket < 0: - raise ValueError('Failed to find [] in model spec! ', model_spec) - input_spec = model_spec[:left_bracket] - layer_spec = model_spec[left_bracket:right_bracket + 1] - output_spec = model_spec[right_bracket + 1:] - model.Build(input_pattern, input_spec, layer_spec, output_spec, - optimizer_type, num_preprocess_threads, reader) - return model - - -class VGSLImageModel(object): - """Class that builds a tensor flow model for training or evaluation. - """ - - def __init__(self, mode, model_spec, initial_learning_rate, - final_learning_rate, halflife): - """Constructs a VGSLImageModel. - - Args: - mode: One of "train", "eval" - model_spec: Full model specification string, for reference only. - initial_learning_rate: Initial learning rate for the network. - final_learning_rate: Final learning rate for the network. - halflife: Number of steps over which to halve the difference between - initial and final learning rate for the network. - """ - # The string that was used to build this model. - self.model_spec = model_spec - # The layers between input and output. - self.layers = None - # The train/eval mode. - self.mode = mode - # The initial learning rate. - self.initial_learning_rate = initial_learning_rate - self.final_learning_rate = final_learning_rate - self.decay_steps = halflife / DECAY_STEPS_FACTOR - self.decay_rate = DECAY_RATE - # Tensor for the labels. - self.labels = None - self.sparse_labels = None - # Debug data containing the truth text. - self.truths = None - # Tensor for loss - self.loss = None - # Train operation - self.train_op = None - # Tensor for the global step counter - self.global_step = None - # Tensor for the output predictions (usually softmax) - self.output = None - # True if we are using CTC training mode. - self.using_ctc = False - # Saver object to load or restore the variables. - self.saver = None - - def Build(self, input_pattern, input_spec, model_spec, output_spec, - optimizer_type, num_preprocess_threads, reader): - """Builds the model from the separate input/layers/output spec strings. - - Args: - input_pattern: File pattern of the data in tfrecords of TF Example format. - input_spec: Specification of the input layer: - batchsize,height,width,depth (4 comma-separated integers) - Training will run with batches of batchsize images, but runtime can - use any batch size. - height and/or width can be 0 or -1, indicating variable size, - otherwise all images must be the given size. - depth must be 1 or 3 to indicate greyscale or color. - NOTE 1-d image input, treating the y image dimension as depth, can - be achieved using S1(1x0)1,3 as the first op in the model_spec, but - the y-size of the input must then be fixed. - model_spec: Model definition. See vgslspecs.py - output_spec: Output layer definition: - O(2|1|0)(l|s|c)n output layer with n classes. - 2 (heatmap) Output is a 2-d vector map of the input (possibly at - different scale). - 1 (sequence) Output is a 1-d sequence of vector values. - 0 (value) Output is a 0-d single vector value. - l uses a logistic non-linearity on the output, allowing multiple - hot elements in any output vector value. - s uses a softmax non-linearity, with one-hot output in each value. - c uses a softmax with CTC. Can only be used with s (sequence). - NOTE Only O1s and O1c are currently supported. - optimizer_type: One of 'GradientDescent', 'AdaGrad', 'Momentum', 'Adam'. - num_preprocess_threads: Number of threads to use for image processing. - reader: Function that returns an actual reader to read Examples from input - files. If None, uses tf.TFRecordReader(). - """ - self.global_step = tf.Variable(0, name='global_step', trainable=False) - shape = _ParseInputSpec(input_spec) - out_dims, out_func, num_classes = _ParseOutputSpec(output_spec) - self.using_ctc = out_func == 'c' - images, heights, widths, labels, sparse, _ = vgsl_input.ImageInput( - input_pattern, num_preprocess_threads, shape, self.using_ctc, reader) - self.labels = labels - self.sparse_labels = sparse - self.layers = vgslspecs.VGSLSpecs(widths, heights, self.mode == 'train') - last_layer = self.layers.Build(images, model_spec) - self._AddOutputs(last_layer, out_dims, out_func, num_classes) - if self.mode == 'train': - self._AddOptimizer(optimizer_type) - - # For saving the model across training and evaluation - self.saver = tf.train.Saver() - - def TrainAStep(self, sess): - """Runs a training step in the session. - - Args: - sess: Session in which to train the model. - Returns: - loss, global_step. - """ - _, loss, step = sess.run([self.train_op, self.loss, self.global_step]) - return loss, step - - def Restore(self, checkpoint_path, sess): - """Restores the model from the given checkpoint path into the session. - - Args: - checkpoint_path: File pathname of the checkpoint. - sess: Session in which to restore the model. - Returns: - global_step of the model. - """ - self.saver.restore(sess, checkpoint_path) - return tf.train.global_step(sess, self.global_step) - - def RunAStep(self, sess): - """Runs a step for eval in the session. - - Args: - sess: Session in which to run the model. - Returns: - output tensor result, labels tensor result. - """ - return sess.run([self.output, self.labels]) - - def _AddOutputs(self, prev_layer, out_dims, out_func, num_classes): - """Adds the output layer and loss function. - - Args: - prev_layer: Output of last layer of main network. - out_dims: Number of output dimensions, 0, 1 or 2. - out_func: Output non-linearity. 's' or 'c'=softmax, 'l'=logistic. - num_classes: Number of outputs/size of last output dimension. - """ - height_in = shapes.tensor_dim(prev_layer, dim=1) - logits, outputs = self._AddOutputLayer(prev_layer, out_dims, out_func, - num_classes) - if self.mode == 'train': - # Setup loss for training. - self.loss = self._AddLossFunction(logits, height_in, out_dims, out_func) - tf.summary.scalar('loss', self.loss) - elif out_dims == 0: - # Be sure the labels match the output, even in eval mode. - self.labels = tf.slice(self.labels, [0, 0], [-1, 1]) - self.labels = tf.reshape(self.labels, [-1]) - - logging.info('Final output=%s', outputs) - logging.info('Labels tensor=%s', self.labels) - self.output = outputs - - def _AddOutputLayer(self, prev_layer, out_dims, out_func, num_classes): - """Add the fully-connected logits and SoftMax/Logistic output Layer. - - Args: - prev_layer: Output of last layer of main network. - out_dims: Number of output dimensions, 0, 1 or 2. - out_func: Output non-linearity. 's' or 'c'=softmax, 'l'=logistic. - num_classes: Number of outputs/size of last output dimension. - - Returns: - logits: Pre-softmax/logistic fully-connected output shaped to out_dims. - outputs: Post-softmax/logistic shaped to out_dims. - - Raises: - ValueError: if syntax is incorrect. - """ - # Reduce dimensionality appropriate to the output dimensions. - batch_in = shapes.tensor_dim(prev_layer, dim=0) - height_in = shapes.tensor_dim(prev_layer, dim=1) - width_in = shapes.tensor_dim(prev_layer, dim=2) - depth_in = shapes.tensor_dim(prev_layer, dim=3) - if out_dims: - # Combine any remaining height and width with batch and unpack after. - shaped = tf.reshape(prev_layer, [-1, depth_in]) - else: - # Everything except batch goes to depth, and therefore has to be known. - shaped = tf.reshape(prev_layer, [-1, height_in * width_in * depth_in]) - logits = slim.fully_connected(shaped, num_classes, activation_fn=None) - if out_func == 'l': - raise ValueError('Logistic not yet supported!') - else: - output = tf.nn.softmax(logits) - # Reshape to the dessired output. - if out_dims == 2: - output_shape = [batch_in, height_in, width_in, num_classes] - elif out_dims == 1: - output_shape = [batch_in, height_in * width_in, num_classes] - else: - output_shape = [batch_in, num_classes] - output = tf.reshape(output, output_shape, name='Output') - logits = tf.reshape(logits, output_shape) - return logits, output - - def _AddLossFunction(self, logits, height_in, out_dims, out_func): - """Add the appropriate loss function. - - Args: - logits: Pre-softmax/logistic fully-connected output shaped to out_dims. - height_in: Height of logits before going into the softmax layer. - out_dims: Number of output dimensions, 0, 1 or 2. - out_func: Output non-linearity. 's' or 'c'=softmax, 'l'=logistic. - - Returns: - loss: That which is to be minimized. - - Raises: - ValueError: if logistic is used. - """ - if out_func == 'c': - # Transpose batch to the middle. - ctc_input = tf.transpose(logits, [1, 0, 2]) - # Compute the widths of each batch element from the input widths. - widths = self.layers.GetLengths(dim=2, factor=height_in) - cross_entropy = tf.nn.ctc_loss(ctc_input, self.sparse_labels, widths) - elif out_func == 's': - if out_dims == 2: - self.labels = _PadLabels3d(logits, self.labels) - elif out_dims == 1: - self.labels = _PadLabels2d( - shapes.tensor_dim( - logits, dim=1), self.labels) - else: - self.labels = tf.slice(self.labels, [0, 0], [-1, 1]) - self.labels = tf.reshape(self.labels, [-1]) - cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits( - logits=logits, labels=self.labels, name='xent') - else: - # TODO(rays) Labels need an extra dimension for logistic, so different - # padding functions are needed, as well as a different loss function. - raise ValueError('Logistic not yet supported!') - return tf.reduce_sum(cross_entropy) - - def _AddOptimizer(self, optimizer_type): - """Adds an optimizer with learning rate decay to minimize self.loss. - - Args: - optimizer_type: One of 'GradientDescent', 'AdaGrad', 'Momentum', 'Adam'. - Raises: - ValueError: if the optimizer type is unrecognized. - """ - learn_rate_delta = self.initial_learning_rate - self.final_learning_rate - learn_rate_dec = tf.add( - tf.train.exponential_decay(learn_rate_delta, self.global_step, - self.decay_steps, self.decay_rate), - self.final_learning_rate) - if optimizer_type == 'GradientDescent': - opt = tf.train.GradientDescentOptimizer(learn_rate_dec) - elif optimizer_type == 'AdaGrad': - opt = tf.train.AdagradOptimizer(learn_rate_dec) - elif optimizer_type == 'Momentum': - opt = tf.train.MomentumOptimizer(learn_rate_dec, momentum=0.9) - elif optimizer_type == 'Adam': - opt = tf.train.AdamOptimizer(learning_rate=learn_rate_dec) - else: - raise ValueError('Invalid optimizer type: ' + optimizer_type) - tf.summary.scalar('learn_rate', learn_rate_dec) - - self.train_op = opt.minimize( - self.loss, global_step=self.global_step, name='train') - - -def _PadLabels3d(logits, labels): - """Pads or slices 3-d labels to match logits. - - Covers the case of 2-d softmax output, when labels is [batch, height, width] - and logits is [batch, height, width, onehot] - Args: - logits: 4-d Pre-softmax fully-connected output. - labels: 3-d, but not necessarily matching in size. - - Returns: - labels: Resized by padding or clipping to match logits. - """ - logits_shape = shapes.tensor_shape(logits) - labels_shape = shapes.tensor_shape(labels) - labels = tf.reshape(labels, [-1, labels_shape[2]]) - labels = _PadLabels2d(logits_shape[2], labels) - labels = tf.reshape(labels, [labels_shape[0], -1]) - labels = _PadLabels2d(logits_shape[1] * logits_shape[2], labels) - return tf.reshape(labels, [labels_shape[0], logits_shape[1], logits_shape[2]]) - - -def _PadLabels2d(logits_size, labels): - """Pads or slices the 2nd dimension of 2-d labels to match logits_size. - - Covers the case of 1-d softmax output, when labels is [batch, seq] and - logits is [batch, seq, onehot] - Args: - logits_size: Tensor returned from tf.shape giving the target size. - labels: 2-d, but not necessarily matching in size. - - Returns: - labels: Resized by padding or clipping the last dimension to logits_size. - """ - pad = logits_size - tf.shape(labels)[1] - - def _PadFn(): - return tf.pad(labels, [[0, 0], [0, pad]]) - - def _SliceFn(): - return tf.slice(labels, [0, 0], [-1, logits_size]) - - return tf.cond(tf.greater(pad, 0), _PadFn, _SliceFn) - - -def _ParseInputSpec(input_spec): - """Parses input_spec and returns the numbers obtained therefrom. - - Args: - input_spec: Specification of the input layer. See Build. - - Returns: - shape: ImageShape with the desired shape of the input. - - Raises: - ValueError: if syntax is incorrect. - """ - pattern = re.compile(R'(\d+),(\d+),(\d+),(\d+)') - m = pattern.match(input_spec) - if m is None: - raise ValueError('Failed to parse input spec:' + input_spec) - batch_size = int(m.group(1)) - y_size = int(m.group(2)) if int(m.group(2)) > 0 else None - x_size = int(m.group(3)) if int(m.group(3)) > 0 else None - depth = int(m.group(4)) - if depth not in [1, 3]: - raise ValueError('Depth must be 1 or 3, had:', depth) - return vgsl_input.ImageShape(batch_size, y_size, x_size, depth) - - -def _ParseOutputSpec(output_spec): - """Parses the output spec. - - Args: - output_spec: Output layer definition. See Build. - - Returns: - out_dims: 2|1|0 for 2-d, 1-d, 0-d. - out_func: l|s|c for logistic, softmax, softmax+CTC - num_classes: Number of classes in output. - - Raises: - ValueError: if syntax is incorrect. - """ - pattern = re.compile(R'(O)(0|1|2)(l|s|c)(\d+)') - m = pattern.match(output_spec) - if m is None: - raise ValueError('Failed to parse output spec:' + output_spec) - out_dims = int(m.group(2)) - out_func = m.group(3) - if out_func == 'c' and out_dims != 1: - raise ValueError('CTC can only be used with a 1-D sequence!') - num_classes = int(m.group(4)) - return out_dims, out_func, num_classes - - -def _AddRateToSummary(tag, rate, step, sw): - """Adds the given rate to the summary with the given tag. - - Args: - tag: Name for this value. - rate: Value to add to the summary. Perhaps an error rate. - step: Global step of the graph for the x-coordinate of the summary. - sw: Summary writer to which to write the rate value. - """ - sw.add_summary( - summary_pb2.Summary(value=[summary_pb2.Summary.Value( - tag=tag, simple_value=rate)]), step) diff --git a/research/street/python/vgsl_model_test.py b/research/street/python/vgsl_model_test.py deleted file mode 100644 index fd2396159..000000000 --- a/research/street/python/vgsl_model_test.py +++ /dev/null @@ -1,248 +0,0 @@ -# Copyright 2016 The TensorFlow Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""Tests for vgsl_model.""" -import os - -import numpy as np -import tensorflow as tf -import vgsl_input -import vgsl_model - - -def _testdata(filename): - return os.path.join('../testdata/', filename) - - -def _rand(*size): - return np.random.uniform(size=size).astype('f') - - -class VgslModelTest(tf.test.TestCase): - - def testParseInputSpec(self): - """The parser must return the numbers in the correct order. - """ - shape = vgsl_model._ParseInputSpec(input_spec='32,42,256,3') - self.assertEqual( - shape, - vgsl_input.ImageShape( - batch_size=32, height=42, width=256, depth=3)) - # Nones must be inserted for zero sizes. - shape = vgsl_model._ParseInputSpec(input_spec='1,0,0,3') - self.assertEqual( - shape, - vgsl_input.ImageShape( - batch_size=1, height=None, width=None, depth=3)) - - def testParseOutputSpec(self): - """The parser must return the correct args in the correct order. - """ - out_dims, out_func, num_classes = vgsl_model._ParseOutputSpec( - output_spec='O1c142') - self.assertEqual(out_dims, 1) - self.assertEqual(out_func, 'c') - self.assertEqual(num_classes, 142) - out_dims, out_func, num_classes = vgsl_model._ParseOutputSpec( - output_spec='O2s99') - self.assertEqual(out_dims, 2) - self.assertEqual(out_func, 's') - self.assertEqual(num_classes, 99) - out_dims, out_func, num_classes = vgsl_model._ParseOutputSpec( - output_spec='O0l12') - self.assertEqual(out_dims, 0) - self.assertEqual(out_func, 'l') - self.assertEqual(num_classes, 12) - - def testPadLabels2d(self): - """Must pad timesteps in labels to match logits. - """ - with self.test_session() as sess: - # Make placeholders for logits and labels. - ph_logits = tf.placeholder(tf.float32, shape=(None, None, 42)) - ph_labels = tf.placeholder(tf.int64, shape=(None, None)) - padded_labels = vgsl_model._PadLabels2d(tf.shape(ph_logits)[1], ph_labels) - # Make actual inputs. - real_logits = _rand(4, 97, 42) - real_labels = _rand(4, 85) - np_array = sess.run([padded_labels], - feed_dict={ph_logits: real_logits, - ph_labels: real_labels})[0] - self.assertEqual(tuple(np_array.shape), (4, 97)) - real_labels = _rand(4, 97) - np_array = sess.run([padded_labels], - feed_dict={ph_logits: real_logits, - ph_labels: real_labels})[0] - self.assertEqual(tuple(np_array.shape), (4, 97)) - real_labels = _rand(4, 100) - np_array = sess.run([padded_labels], - feed_dict={ph_logits: real_logits, - ph_labels: real_labels})[0] - self.assertEqual(tuple(np_array.shape), (4, 97)) - - def testPadLabels3d(self): - """Must pad height and width in labels to match logits. - - The tricky thing with 3-d is that the rows and columns need to remain - intact, so we'll test it with small known data. - """ - with self.test_session() as sess: - # Make placeholders for logits and labels. - ph_logits = tf.placeholder(tf.float32, shape=(None, None, None, 42)) - ph_labels = tf.placeholder(tf.int64, shape=(None, None, None)) - padded_labels = vgsl_model._PadLabels3d(ph_logits, ph_labels) - # Make actual inputs. - real_logits = _rand(1, 3, 4, 42) - # Test all 9 combinations of height x width in [small, ok, big] - real_labels = np.arange(6).reshape((1, 2, 3)) # Height small, width small - np_array = sess.run([padded_labels], - feed_dict={ph_logits: real_logits, - ph_labels: real_labels})[0] - self.assertEqual(tuple(np_array.shape), (1, 3, 4)) - self.assertAllEqual(np_array[0, :, :], - [[0, 1, 2, 0], [3, 4, 5, 0], [0, 0, 0, 0]]) - real_labels = np.arange(8).reshape((1, 2, 4)) # Height small, width ok - np_array = sess.run([padded_labels], - feed_dict={ph_logits: real_logits, - ph_labels: real_labels})[0] - self.assertEqual(tuple(np_array.shape), (1, 3, 4)) - self.assertAllEqual(np_array[0, :, :], - [[0, 1, 2, 3], [4, 5, 6, 7], [0, 0, 0, 0]]) - real_labels = np.arange(10).reshape((1, 2, 5)) # Height small, width big - np_array = sess.run([padded_labels], - feed_dict={ph_logits: real_logits, - ph_labels: real_labels})[0] - self.assertEqual(tuple(np_array.shape), (1, 3, 4)) - self.assertAllEqual(np_array[0, :, :], - [[0, 1, 2, 3], [5, 6, 7, 8], [0, 0, 0, 0]]) - real_labels = np.arange(9).reshape((1, 3, 3)) # Height ok, width small - np_array = sess.run([padded_labels], - feed_dict={ph_logits: real_logits, - ph_labels: real_labels})[0] - self.assertEqual(tuple(np_array.shape), (1, 3, 4)) - self.assertAllEqual(np_array[0, :, :], - [[0, 1, 2, 0], [3, 4, 5, 0], [6, 7, 8, 0]]) - real_labels = np.arange(12).reshape((1, 3, 4)) # Height ok, width ok - np_array = sess.run([padded_labels], - feed_dict={ph_logits: real_logits, - ph_labels: real_labels})[0] - self.assertEqual(tuple(np_array.shape), (1, 3, 4)) - self.assertAllEqual(np_array[0, :, :], - [[0, 1, 2, 3], [4, 5, 6, 7], [8, 9, 10, 11]]) - real_labels = np.arange(15).reshape((1, 3, 5)) # Height ok, width big - np_array = sess.run([padded_labels], - feed_dict={ph_logits: real_logits, - ph_labels: real_labels})[0] - self.assertEqual(tuple(np_array.shape), (1, 3, 4)) - self.assertAllEqual(np_array[0, :, :], - [[0, 1, 2, 3], [5, 6, 7, 8], [10, 11, 12, 13]]) - real_labels = np.arange(12).reshape((1, 4, 3)) # Height big, width small - np_array = sess.run([padded_labels], - feed_dict={ph_logits: real_logits, - ph_labels: real_labels})[0] - self.assertEqual(tuple(np_array.shape), (1, 3, 4)) - self.assertAllEqual(np_array[0, :, :], - [[0, 1, 2, 0], [3, 4, 5, 0], [6, 7, 8, 0]]) - real_labels = np.arange(16).reshape((1, 4, 4)) # Height big, width ok - np_array = sess.run([padded_labels], - feed_dict={ph_logits: real_logits, - ph_labels: real_labels})[0] - self.assertEqual(tuple(np_array.shape), (1, 3, 4)) - self.assertAllEqual(np_array[0, :, :], - [[0, 1, 2, 3], [4, 5, 6, 7], [8, 9, 10, 11]]) - real_labels = np.arange(20).reshape((1, 4, 5)) # Height big, width big - np_array = sess.run([padded_labels], - feed_dict={ph_logits: real_logits, - ph_labels: real_labels})[0] - self.assertEqual(tuple(np_array.shape), (1, 3, 4)) - self.assertAllEqual(np_array[0, :, :], - [[0, 1, 2, 3], [5, 6, 7, 8], [10, 11, 12, 13]]) - - def testEndToEndSizes0d(self): - """Tests that the output sizes match when training/running real 0d data. - - Uses mnist with dual summarizing LSTMs to reduce to a single value. - """ - filename = _testdata('mnist-tiny') - with self.test_session() as sess: - model = vgsl_model.InitNetwork( - filename, - model_spec='4,0,0,1[Cr5,5,16 Mp3,3 Lfys16 Lfxs16]O0s12', - mode='train') - tf.global_variables_initializer().run(session=sess) - coord = tf.train.Coordinator() - tf.train.start_queue_runners(sess=sess, coord=coord) - _, step = model.TrainAStep(sess) - self.assertEqual(step, 1) - output, labels = model.RunAStep(sess) - self.assertEqual(len(output.shape), 2) - self.assertEqual(len(labels.shape), 1) - self.assertEqual(output.shape[0], labels.shape[0]) - self.assertEqual(output.shape[1], 12) - - # TODO(rays) Support logistic and test with Imagenet (as 0d, multi-object.) - - def testEndToEndSizes1dCTC(self): - """Tests that the output sizes match when training with CTC. - - Basic bidi LSTM on top of convolution and summarizing LSTM with CTC. - """ - filename = _testdata('arial-32-tiny') - with self.test_session() as sess: - model = vgsl_model.InitNetwork( - filename, - model_spec='2,0,0,1[Cr5,5,16 Mp3,3 Lfys16 Lbx100]O1c105', - mode='train') - tf.global_variables_initializer().run(session=sess) - coord = tf.train.Coordinator() - tf.train.start_queue_runners(sess=sess, coord=coord) - _, step = model.TrainAStep(sess) - self.assertEqual(step, 1) - output, labels = model.RunAStep(sess) - self.assertEqual(len(output.shape), 3) - self.assertEqual(len(labels.shape), 2) - self.assertEqual(output.shape[0], labels.shape[0]) - # This is ctc - the only cast-iron guarantee is labels <= output. - self.assertLessEqual(labels.shape[1], output.shape[1]) - self.assertEqual(output.shape[2], 105) - - def testEndToEndSizes1dFixed(self): - """Tests that the output sizes match when training/running 1 data. - - Convolution, summarizing LSTM with fwd rev fwd to allow no CTC. - """ - filename = _testdata('numbers-16-tiny') - with self.test_session() as sess: - model = vgsl_model.InitNetwork( - filename, - model_spec='8,0,0,1[Cr5,5,16 Mp3,3 Lfys16 Lfx64 Lrx64 Lfx64]O1s12', - mode='train') - tf.global_variables_initializer().run(session=sess) - coord = tf.train.Coordinator() - tf.train.start_queue_runners(sess=sess, coord=coord) - _, step = model.TrainAStep(sess) - self.assertEqual(step, 1) - output, labels = model.RunAStep(sess) - self.assertEqual(len(output.shape), 3) - self.assertEqual(len(labels.shape), 2) - self.assertEqual(output.shape[0], labels.shape[0]) - # Not CTC, output lengths match. - self.assertEqual(output.shape[1], labels.shape[1]) - self.assertEqual(output.shape[2], 12) - - # TODO(rays) Get a 2-d dataset and support 2d (heat map) outputs. - - -if __name__ == '__main__': - tf.test.main() diff --git a/research/street/python/vgsl_train.py b/research/street/python/vgsl_train.py deleted file mode 100644 index 8dd830897..000000000 --- a/research/street/python/vgsl_train.py +++ /dev/null @@ -1,55 +0,0 @@ -# Copyright 2016 The TensorFlow Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""Model trainer for single or multi-replica training.""" -from tensorflow import app -from tensorflow.python.platform import flags - -import vgsl_model - -flags.DEFINE_string('master', '', 'Name of the TensorFlow master to use.') -flags.DEFINE_string('train_dir', '/tmp/mdir', - 'Directory where to write event logs.') -flags.DEFINE_string('model_str', - '1,150,600,3[S2(4x150)0,2 Ct5,5,16 Mp2,2 Ct5,5,64 Mp3,3' - '([Lrys64 Lbx128][Lbys64 Lbx128][Lfys64 Lbx128])S3(3x0)2,3' - 'Lfx128 Lrx128 S0(1x4)0,3 Do Lfx256]O1c134', - 'Network description.') -flags.DEFINE_integer('max_steps', 10000, 'Number of steps to train for.') -flags.DEFINE_integer('task', 0, 'Task id of the replica running the training.') -flags.DEFINE_integer('ps_tasks', 0, 'Number of tasks in the ps job.' - 'If 0 no ps job is used.') -flags.DEFINE_string('train_data', None, 'Training data filepattern') -flags.DEFINE_float('initial_learning_rate', 0.00002, 'Initial learning rate') -flags.DEFINE_float('final_learning_rate', 0.00002, 'Final learning rate') -flags.DEFINE_integer('learning_rate_halflife', 1600000, - 'Halflife of learning rate') -flags.DEFINE_string('optimizer_type', 'Adam', - 'Optimizer from:GradientDescent, AdaGrad, Momentum, Adam') -flags.DEFINE_integer('num_preprocess_threads', 4, 'Number of input threads') - -FLAGS = flags.FLAGS - - -def main(argv): - del argv - vgsl_model.Train(FLAGS.train_dir, FLAGS.model_str, FLAGS.train_data, - FLAGS.max_steps, FLAGS.master, FLAGS.task, FLAGS.ps_tasks, - FLAGS.initial_learning_rate, FLAGS.final_learning_rate, - FLAGS.learning_rate_halflife, FLAGS.optimizer_type, - FLAGS.num_preprocess_threads) - - -if __name__ == '__main__': - app.run() diff --git a/research/street/python/vgslspecs.py b/research/street/python/vgslspecs.py deleted file mode 100644 index 36b5c668a..000000000 --- a/research/street/python/vgslspecs.py +++ /dev/null @@ -1,534 +0,0 @@ -# Copyright 2016 The TensorFlow Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -"""String network description language mapping to TF-Slim calls where possible. - -See vglspecs.md for detailed description. -""" - -import re -from string import maketrans - -import nn_ops -import shapes -from six.moves import xrange -import tensorflow as tf -import tensorflow.contrib.slim as slim - - -# Class that builds a set of ops to manipulate variable-sized images. -class VGSLSpecs(object): - """Layers that can be built from a string definition.""" - - def __init__(self, widths, heights, is_training): - """Constructs a VGSLSpecs. - - Args: - widths: Tensor of size batch_size of the widths of the inputs. - heights: Tensor of size batch_size of the heights of the inputs. - is_training: True if the graph should be build for training. - """ - # The string that was used to build this model. - self.model_str = None - # True if we are training - self.is_training = is_training - # Tensor for the size of the images, of size batch_size. - self.widths = widths - self.heights = heights - # Overall reduction factors of this model so far for each dimension. - # TODO(rays) consider building a graph from widths and heights instead of - # computing a scale factor. - self.reduction_factors = [1.0, 1.0, 1.0, 1.0] - # List of Op parsers. - # TODO(rays) add more Op types as needed. - self.valid_ops = [self.AddSeries, self.AddParallel, self.AddConvLayer, - self.AddMaxPool, self.AddDropout, self.AddReShape, - self.AddFCLayer, self.AddLSTMLayer] - # Translation table to convert unacceptable characters that may occur - # in op strings that cannot be used as names. - self.transtab = maketrans('(,)', '___') - - def Build(self, prev_layer, model_str): - """Builds a network with input prev_layer from a VGSLSpecs description. - - Args: - prev_layer: The input tensor. - model_str: Model definition similar to Tesseract as follows: - ============ FUNCTIONAL OPS ============ - C(s|t|r|l|m)[{name}],, Convolves using a y,x window, with no - shrinkage, SAME infill, d outputs, with s|t|r|l|m non-linear layer. - (s|t|r|l|m) specifies the type of non-linearity: - s = sigmoid - t = tanh - r = relu - l = linear (i.e., None) - m = softmax - F(s|t|r|l|m)[{name}] Fully-connected with s|t|r|l|m non-linearity and - d outputs. Reduces height, width to 1. Input height and width must be - constant. - L(f|r|b)(x|y)[s][{name}] LSTM cell with n outputs. - f runs the LSTM forward only. - r runs the LSTM reversed only. - b runs the LSTM bidirectionally. - x runs the LSTM in the x-dimension (on data with or without the - y-dimension). - y runs the LSTM in the y-dimension (data must have a y dimension). - s (optional) summarizes the output in the requested dimension, - outputting only the final step, collapsing the dimension to a - single element. - Examples: - Lfx128 runs a forward-only LSTM in the x-dimension with 128 - outputs, treating any y dimension independently. - Lfys64 runs a forward-only LSTM in the y-dimension with 64 outputs - and collapses the y-dimension to 1 element. - NOTE that Lbxsn is implemented as (LfxsnLrxsn) since the summaries - need to be taken from opposite ends of the output - Do[{name}] Insert a dropout layer. - ============ PLUMBING OPS ============ - [...] Execute ... networks in series (layers). - (...) Execute ... networks in parallel, with their output concatenated - in depth. - S[{name}](x), Splits one dimension, moves one part to - another dimension. - Splits input dimension d into a x b, sending the high part (a) to the - high side of dimension e, and the low part (b) to the high side of - dimension f. Exception: if d=e=f, then then dimension d is internally - transposed to bxa. - Either a or b can be zero, meaning whatever is left after taking out - the other, allowing dimensions to be of variable size. - Eg. S3(3x50)2,3 will split the 150-element depth into 3x50, with the 3 - going to the most significant part of the width, and the 50 part - staying in depth. - This will rearrange a 3x50 output parallel operation to spread the 3 - output sets over width. - Mp[{name}], Maxpool the input, reducing the (y,x) rectangle to a - single vector value. - - Returns: - Output tensor - """ - self.model_str = model_str - final_layer, _ = self.BuildFromString(prev_layer, 0) - return final_layer - - def GetLengths(self, dim=2, factor=1): - """Returns the lengths of the batch of elements in the given dimension. - - WARNING: The returned sizes may not exactly match TF's calculation. - Args: - dim: dimension to get the sizes of, in [1,2]. batch, depth not allowed. - factor: A scalar value to multiply by. - - Returns: - The original heights/widths scaled by the current scaling of the model and - the given factor. - - Raises: - ValueError: If the args are invalid. - """ - if dim == 1: - lengths = self.heights - elif dim == 2: - lengths = self.widths - else: - raise ValueError('Invalid dimension given to GetLengths') - lengths = tf.cast(lengths, tf.float32) - if self.reduction_factors[dim] is not None: - lengths = tf.div(lengths, self.reduction_factors[dim]) - else: - lengths = tf.ones_like(lengths) - if factor != 1: - lengths = tf.multiply(lengths, tf.cast(factor, tf.float32)) - return tf.cast(lengths, tf.int32) - - def BuildFromString(self, prev_layer, index): - """Adds the layers defined by model_str[index:] to the model. - - Args: - prev_layer: Input tensor. - index: Position in model_str to start parsing - - Returns: - Output tensor, next model_str index. - - Raises: - ValueError: If the model string is unrecognized. - """ - index = self._SkipWhitespace(index) - for op in self.valid_ops: - output_layer, next_index = op(prev_layer, index) - if output_layer is not None: - return output_layer, next_index - if output_layer is not None: - return output_layer, next_index - raise ValueError('Unrecognized model string:' + self.model_str[index:]) - - def AddSeries(self, prev_layer, index): - """Builds a sequence of layers for a VGSLSpecs model. - - Args: - prev_layer: Input tensor. - index: Position in model_str to start parsing - - Returns: - Output tensor of the series, end index in model_str. - - Raises: - ValueError: If [] are unbalanced. - """ - if self.model_str[index] != '[': - return None, None - index += 1 - while index < len(self.model_str) and self.model_str[index] != ']': - prev_layer, index = self.BuildFromString(prev_layer, index) - if index == len(self.model_str): - raise ValueError('Missing ] at end of series!' + self.model_str) - return prev_layer, index + 1 - - def AddParallel(self, prev_layer, index): - """tf.concats outputs of layers that run on the same inputs. - - Args: - prev_layer: Input tensor. - index: Position in model_str to start parsing - - Returns: - Output tensor of the parallel, end index in model_str. - - Raises: - ValueError: If () are unbalanced or the elements don't match. - """ - if self.model_str[index] != '(': - return None, None - index += 1 - layers = [] - num_dims = 0 - # Each parallel must output the same, including any reduction factor, in - # all dimensions except depth. - # We have to save the starting factors, so they don't get reduced by all - # the elements of the parallel, only once. - original_factors = self.reduction_factors - final_factors = None - while index < len(self.model_str) and self.model_str[index] != ')': - self.reduction_factors = original_factors - layer, index = self.BuildFromString(prev_layer, index) - if num_dims == 0: - num_dims = len(layer.get_shape()) - elif num_dims != len(layer.get_shape()): - raise ValueError('All elements of parallel must return same num dims') - layers.append(layer) - if final_factors: - if final_factors != self.reduction_factors: - raise ValueError('All elements of parallel must scale the same') - else: - final_factors = self.reduction_factors - if index == len(self.model_str): - raise ValueError('Missing ) at end of parallel!' + self.model_str) - return tf.concat(axis=num_dims - 1, values=layers), index + 1 - - def AddConvLayer(self, prev_layer, index): - """Add a single standard convolutional layer. - - Args: - prev_layer: Input tensor. - index: Position in model_str to start parsing - - Returns: - Output tensor, end index in model_str. - """ - pattern = re.compile(R'(C)(s|t|r|l|m)({\w+})?(\d+),(\d+),(\d+)') - m = pattern.match(self.model_str, index) - if m is None: - return None, None - name = self._GetLayerName(m.group(0), index, m.group(3)) - width = int(m.group(4)) - height = int(m.group(5)) - depth = int(m.group(6)) - fn = self._NonLinearity(m.group(2)) - return slim.conv2d( - prev_layer, depth, [height, width], activation_fn=fn, - scope=name), m.end() - - def AddMaxPool(self, prev_layer, index): - """Add a maxpool layer. - - Args: - prev_layer: Input tensor. - index: Position in model_str to start parsing - - Returns: - Output tensor, end index in model_str. - """ - pattern = re.compile(R'(Mp)({\w+})?(\d+),(\d+)(?:,(\d+),(\d+))?') - m = pattern.match(self.model_str, index) - if m is None: - return None, None - name = self._GetLayerName(m.group(0), index, m.group(2)) - height = int(m.group(3)) - width = int(m.group(4)) - y_stride = height if m.group(5) is None else m.group(5) - x_stride = width if m.group(6) is None else m.group(6) - self.reduction_factors[1] *= y_stride - self.reduction_factors[2] *= x_stride - return slim.max_pool2d( - prev_layer, [height, width], [y_stride, x_stride], - padding='SAME', - scope=name), m.end() - - def AddDropout(self, prev_layer, index): - """Adds a dropout layer. - - Args: - prev_layer: Input tensor. - index: Position in model_str to start parsing - - Returns: - Output tensor, end index in model_str. - """ - pattern = re.compile(R'(Do)({\w+})?') - m = pattern.match(self.model_str, index) - if m is None: - return None, None - name = self._GetLayerName(m.group(0), index, m.group(2)) - layer = slim.dropout( - prev_layer, 0.5, is_training=self.is_training, scope=name) - return layer, m.end() - - def AddReShape(self, prev_layer, index): - """Reshapes the input tensor by moving each (x_scale,y_scale) rectangle to. - - the depth dimension. NOTE that the TF convention is that inputs are - [batch, y, x, depth]. - - Args: - prev_layer: Input tensor. - index: Position in model_str to start parsing - - Returns: - Output tensor, end index in model_str. - """ - pattern = re.compile(R'(S)(?:{(\w)})?(\d+)\((\d+)x(\d+)\)(\d+),(\d+)') - m = pattern.match(self.model_str, index) - if m is None: - return None, None - name = self._GetLayerName(m.group(0), index, m.group(2)) - src_dim = int(m.group(3)) - part_a = int(m.group(4)) - part_b = int(m.group(5)) - dest_dim_a = int(m.group(6)) - dest_dim_b = int(m.group(7)) - if part_a == 0: - part_a = -1 - if part_b == 0: - part_b = -1 - prev_shape = tf.shape(prev_layer) - layer = shapes.transposing_reshape( - prev_layer, src_dim, part_a, part_b, dest_dim_a, dest_dim_b, name=name) - # Compute scale factors. - result_shape = tf.shape(layer) - for i in xrange(len(self.reduction_factors)): - if self.reduction_factors[i] is not None: - factor1 = tf.cast(self.reduction_factors[i], tf.float32) - factor2 = tf.cast(prev_shape[i], tf.float32) - divisor = tf.cast(result_shape[i], tf.float32) - self.reduction_factors[i] = tf.div(tf.multiply(factor1, factor2), divisor) - return layer, m.end() - - def AddFCLayer(self, prev_layer, index): - """Parse expression and add Fully Connected Layer. - - Args: - prev_layer: Input tensor. - index: Position in model_str to start parsing - - Returns: - Output tensor, end index in model_str. - """ - pattern = re.compile(R'(F)(s|t|r|l|m)({\w+})?(\d+)') - m = pattern.match(self.model_str, index) - if m is None: - return None, None - fn = self._NonLinearity(m.group(2)) - name = self._GetLayerName(m.group(0), index, m.group(3)) - depth = int(m.group(4)) - input_depth = shapes.tensor_dim(prev_layer, 1) * shapes.tensor_dim( - prev_layer, 2) * shapes.tensor_dim(prev_layer, 3) - # The slim fully connected is actually a 1x1 conv, so we have to crush the - # dimensions on input. - # Everything except batch goes to depth, and therefore has to be known. - shaped = tf.reshape( - prev_layer, [-1, input_depth], name=name + '_reshape_in') - output = slim.fully_connected(shaped, depth, activation_fn=fn, scope=name) - # Width and height are collapsed to 1. - self.reduction_factors[1] = None - self.reduction_factors[2] = None - return tf.reshape( - output, [shapes.tensor_dim(prev_layer, 0), 1, 1, depth], - name=name + '_reshape_out'), m.end() - - def AddLSTMLayer(self, prev_layer, index): - """Parse expression and add LSTM Layer. - - Args: - prev_layer: Input tensor. - index: Position in model_str to start parsing - - Returns: - Output tensor, end index in model_str. - """ - pattern = re.compile(R'(L)(f|r|b)(x|y)(s)?({\w+})?(\d+)') - m = pattern.match(self.model_str, index) - if m is None: - return None, None - direction = m.group(2) - dim = m.group(3) - summarize = m.group(4) == 's' - name = self._GetLayerName(m.group(0), index, m.group(5)) - depth = int(m.group(6)) - if direction == 'b' and summarize: - fwd = self._LSTMLayer(prev_layer, 'forward', dim, True, depth, - name + '_forward') - back = self._LSTMLayer(prev_layer, 'backward', dim, True, depth, - name + '_reverse') - return tf.concat(axis=3, values=[fwd, back], name=name + '_concat'), m.end() - if direction == 'f': - direction = 'forward' - elif direction == 'r': - direction = 'backward' - else: - direction = 'bidirectional' - outputs = self._LSTMLayer(prev_layer, direction, dim, summarize, depth, - name) - if summarize: - # The x or y dimension is getting collapsed. - if dim == 'x': - self.reduction_factors[2] = None - else: - self.reduction_factors[1] = None - return outputs, m.end() - - def _LSTMLayer(self, prev_layer, direction, dim, summarize, depth, name): - """Adds an LSTM layer with the given pre-parsed attributes. - - Always maps 4-D to 4-D regardless of summarize. - Args: - prev_layer: Input tensor. - direction: 'forward' 'backward' or 'bidirectional' - dim: 'x' or 'y', dimension to consider as time. - summarize: True if we are to return only the last timestep. - depth: Output depth. - name: Some string naming the op. - - Returns: - Output tensor. - """ - # If the target dimension is y, we need to transpose. - if dim == 'x': - lengths = self.GetLengths(2, 1) - inputs = prev_layer - else: - lengths = self.GetLengths(1, 1) - inputs = tf.transpose(prev_layer, [0, 2, 1, 3], name=name + '_ytrans_in') - input_batch = shapes.tensor_dim(inputs, 0) - num_slices = shapes.tensor_dim(inputs, 1) - num_steps = shapes.tensor_dim(inputs, 2) - input_depth = shapes.tensor_dim(inputs, 3) - # Reshape away the other dimension. - inputs = tf.reshape( - inputs, [-1, num_steps, input_depth], name=name + '_reshape_in') - # We need to replicate the lengths by the size of the other dimension, and - # any changes that have been made to the batch dimension. - tile_factor = tf.to_float(input_batch * - num_slices) / tf.to_float(tf.shape(lengths)[0]) - lengths = tf.tile(lengths, [tf.cast(tile_factor, tf.int32)]) - lengths = tf.cast(lengths, tf.int64) - outputs = nn_ops.rnn_helper( - inputs, - lengths, - cell_type='lstm', - num_nodes=depth, - direction=direction, - name=name, - stddev=0.1) - # Output depth is doubled if bi-directional. - if direction == 'bidirectional': - output_depth = depth * 2 - else: - output_depth = depth - # Restore the other dimension. - if summarize: - outputs = tf.slice( - outputs, [0, num_steps - 1, 0], [-1, 1, -1], name=name + '_sum_slice') - outputs = tf.reshape( - outputs, [input_batch, num_slices, 1, output_depth], - name=name + '_reshape_out') - else: - outputs = tf.reshape( - outputs, [input_batch, num_slices, num_steps, output_depth], - name=name + '_reshape_out') - if dim == 'y': - outputs = tf.transpose(outputs, [0, 2, 1, 3], name=name + '_ytrans_out') - return outputs - - def _NonLinearity(self, code): - """Returns the non-linearity function pointer for the given string code. - - For forwards compatibility, allows the full names for stand-alone - non-linearities, as well as the single-letter names used in ops like C,F. - Args: - code: String code representing a non-linearity function. - Returns: - non-linearity function represented by the code. - """ - if code in ['s', 'Sig']: - return tf.sigmoid - elif code in ['t', 'Tanh']: - return tf.tanh - elif code in ['r', 'Relu']: - return tf.nn.relu - elif code in ['m', 'Smax']: - return tf.nn.softmax - return None - - def _GetLayerName(self, op_str, index, name_str): - """Generates a name for the op, using a user-supplied name if possible. - - Args: - op_str: String representing the parsed op. - index: Position in model_str of the start of the op. - name_str: User-supplied {name} with {} that need removing or None. - - Returns: - Selected name. - """ - if name_str: - return name_str[1:-1] - else: - return op_str.translate(self.transtab) + '_' + str(index) - - def _SkipWhitespace(self, index): - """Skips any leading whitespace in the model description. - - Args: - index: Position in model_str to start parsing - - Returns: - end index in model_str of whitespace. - """ - pattern = re.compile(R'([ \t\n]+)') - m = pattern.match(self.model_str, index) - if m is None: - return index - return m.end() diff --git a/research/street/python/vgslspecs_test.py b/research/street/python/vgslspecs_test.py deleted file mode 100644 index 69ea28bf7..000000000 --- a/research/street/python/vgslspecs_test.py +++ /dev/null @@ -1,122 +0,0 @@ -# Copyright 2016 The TensorFlow Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""Tests for vgslspecs.""" - -import numpy as np -import tensorflow as tf -import vgslspecs - - -def _rand(*size): - return np.random.uniform(size=size).astype('f') - - -class VgslspecsTest(tf.test.TestCase): - - def __init__(self, other): - super(VgslspecsTest, self).__init__(other) - self.max_width = 36 - self.max_height = 24 - self.batch_size = 4 - - def SetupInputs(self): - # Make placeholders for standard inputs. - # Everything is variable in the input, except the depth. - self.ph_image = tf.placeholder( - tf.float32, shape=(None, None, None, 3), name='inputs') - self.ph_widths = tf.placeholder(tf.int64, shape=(None,), name='w') - self.ph_heights = tf.placeholder(tf.int64, shape=(None,), name='h') - # Make actual inputs. - self.in_image = _rand(self.batch_size, self.max_height, self.max_width, 3) - self.in_widths = [24, 12, self.max_width, 30] - self.in_heights = [self.max_height, 18, 12, 6] - - def ExpectScaledSize(self, spec, target_shape, factor=1): - """Tests that the output of the graph of the given spec has target_shape.""" - with tf.Graph().as_default(): - with self.test_session() as sess: - self.SetupInputs() - # Only the placeholders are given at construction time. - vgsl = vgslspecs.VGSLSpecs(self.ph_widths, self.ph_heights, True) - outputs = vgsl.Build(self.ph_image, spec) - # Compute the expected output widths from the given scale factor. - target_widths = tf.div(self.in_widths, factor).eval() - target_heights = tf.div(self.in_heights, factor).eval() - # Run with the 'real' data. - tf.global_variables_initializer().run() - res_image, res_widths, res_heights = sess.run( - [outputs, vgsl.GetLengths(2), vgsl.GetLengths(1)], - feed_dict={self.ph_image: self.in_image, - self.ph_widths: self.in_widths, - self.ph_heights: self.in_heights}) - self.assertEqual(tuple(res_image.shape), target_shape) - if target_shape[1] > 1: - self.assertEqual(tuple(res_heights), tuple(target_heights)) - if target_shape[2] > 1: - self.assertEqual(tuple(res_widths), tuple(target_widths)) - - def testSameSizeConv(self): - """Test all types of Conv. There is no scaling.""" - self.ExpectScaledSize( - '[Cs{MyConv}5,5,16 Ct3,3,12 Cr4,4,24 Cl5,5,64]', - (self.batch_size, self.max_height, self.max_width, 64)) - - def testSameSizeLSTM(self): - """Test all non-reducing LSTMs. Output depth is doubled with BiDi.""" - self.ExpectScaledSize('[Lfx16 Lrx8 Do Lbx24 Lfy12 Do{MyDo} Lry7 Lby32]', - (self.batch_size, self.max_height, self.max_width, - 64)) - - def testSameSizeParallel(self): - """Parallel affects depth, but not scale.""" - self.ExpectScaledSize('[Cs5,5,16 (Lfx{MyLSTM}32 Lrx32 Lbx16)]', - (self.batch_size, self.max_height, self.max_width, - 96)) - - def testScalingOps(self): - """Test a heterogeneous series with scaling.""" - self.ExpectScaledSize('[Cs5,5,16 Mp{MyPool}2,2 Ct3,3,32 Mp3,3 Lfx32 Lry64]', - (self.batch_size, self.max_height / 6, - self.max_width / 6, 64), 6) - - def testXReduction(self): - """Test a heterogeneous series with reduction of x-dimension.""" - self.ExpectScaledSize('[Cr5,5,16 Mp2,2 Ct3,3,32 Mp3,3 Lfxs32 Lry64]', - (self.batch_size, self.max_height / 6, 1, 64), 6) - - def testYReduction(self): - """Test a heterogeneous series with reduction of y-dimension.""" - self.ExpectScaledSize('[Cl5,5,16 Mp2,2 Ct3,3,32 Mp3,3 Lfys32 Lfx64]', - (self.batch_size, 1, self.max_width / 6, 64), 6) - - def testXYReduction(self): - """Test a heterogeneous series with reduction to 0-d.""" - self.ExpectScaledSize( - '[Cr5,5,16 Lfys32 Lfxs64 Fr{MyFC}16 Ft20 Fl12 Fs32 Fm40]', - (self.batch_size, 1, 1, 40)) - - def testReshapeTile(self): - """Tests that a tiled input can be reshaped to the batch dimension.""" - self.ExpectScaledSize('[S2(3x0)0,2 Cr5,5,16 Lfys16]', - (self.batch_size * 3, 1, self.max_width / 3, 16), 3) - - def testReshapeDepth(self): - """Tests that depth can be reshaped to the x dimension.""" - self.ExpectScaledSize('[Cl5,5,16 Mp3,3 (Lrys32 Lbys16 Lfys32) S3(3x0)2,3]', - (self.batch_size, 1, self.max_width, 32)) - - -if __name__ == '__main__': - tf.test.main() diff --git a/research/street/testdata/arial-32-tiny b/research/street/testdata/arial-32-tiny deleted file mode 100644 index b551e2bde5fbfe10648af75bb90275d50fa6f201..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 5038 zcmb8zcTiK^x(DE$gq8qb5JVsl1PRisG*J*l%BG0YL21%U2u)CG00EV1sM16TMT8GT zdJTwxA_zgGH&K+{n?X2uyl3Xjy>srJyJq&RJ@0zoHNV+^J+t!=z(3aq%X?oC&sY&C z1d|Ka*2zHv=k4NT?eAjm>pqC!C1sSp?h;?J3{!#!W-GlLv;i2nzc30 z9S2{32L}&?IO6o5uDc$dw)XZ8_SSapwmv@0XHX~o=H|K>VHJ6Pb8|qNh5Hf<3k{-) z1;O}dz`?@~chb*X!h$GbK@eFU-MFsFc#`NOAS0-C%>V!(H%{Jp=zb@*{T%9fvcWyI z%zXd=wfOsih{*K+u9<+>RE?a&R-WGXOc~G)ExGCWXm zNhXz|Zt~S=D4%%24KAaJM0NG-l4`TgtKbdT8NpQbGmx|}j3~3#qf>9rW!i@&ew$x7 zJMG^ZBoCi$WU3gPJrMXFJhhj#HGgs1yj*AEn%=puViB_x(V|M6l0y40GzRn`nXDD+?FlEL| z2GLTcY(E_vEIX7r{^*BONR&nDj`$2@Go-rf3qccV|J|s>;!qD?FY=tphE*fNDpTZm zboRP8oV&Nt%HKHnRUtw)mH!sNbimLhbJ(D7`07SWmlJZ*>~tJYdj{s+;? zDP(&IZf?S>sQDtZj0jI!+WVLz4`l|v76?~Gj6<$_!lZDU8P#=@0kBck^UjMT-|%Zm ziWPyHM(*x|-PIwhzDth5MS*jE(9;45YLTTn=EPGxvmFrSl$csb=8O zg2}hqyuAE;J3sc%so4vMyuyl#ii_X4JQcn*jui!Q#tKbSv8T9sz7Qf*(L1x6y)KPx zPEaOk5<(KBzDZ>gFa?m0mXnuXo_WeBip7DVDHx^U_ZXT&?PKBLBM~qFmd*>h*m~Y* z>GF`xS~ZR&5KS#I2VVQ|qak~I=VyDLWWiC^LnuWw{)(6ga-6#A<&Elcy?(69J}-Dl zS9PtLUbt(Hlj}N#8u)ShoQp+0TA)l`8=Il#4+cy;>(by+n|vQpq(xZK8ox@0)pXW> ze36^I68@LGaksm36|=Noq|keDykr7AC(Vdq-Q3{!+EhSFf&|BmaKE}!;UUzd;=z^z zf2dnQ7Tv?3G1JODRhtqrEuf_N^%(HaN|kAQ7R|;zXp+12(9S7p4+tS#zxRBg`{VW} zNt)cbhcfhw$C?D0H$S{i6Sa!2+NNyJi2(w{_6A8!c>hWshE;%cZGpeoX#$!9 zBM#yBoInehyZ8AY-X*7vEIcgZEZDTj6~IOl;gb*^byP5jATT@)3GLh)4oGd1qp&<^zP(pw6kV{qs3gFH6Unvz z5*;?x@i;EtC;7VP!?=j^Y5v3fl?moqRBU@|5werH>*$986?Z%%kK>t@WuxHTN6@S( zTjPy<=BDcK-nlq|q;=M866_ULTI92D^;Rp4VLTm}>pFM|^BmxAV1DR-a;?@S8;7;s zy~$7;A^iBtXWw|NEMHN1clLDtX_N+O=DZ|1SD;oD=6V0Nx)%_9R$NLX%{PBoI`qn{ zo=@AXI1|!+ol`~k>GqlRbg%$&(?IpRHf~uZSb*FRFysXN3ipy>5bE!{}gx@LHen|dACn%K_2`4GVSHjOA!zh-Z zuq%xk(et5#D5!|kVjFAM20OiI3WeG5RA98)bEg)JiyJlo)O-GX1Cxx*GRomErcq#% zVIl1YSYa9Mwa6=^Uzhk@G_f|OL-GZ?HRkJ3rrqlIt^MM7FAm++zbY=WXLH%nkp#$_ zjrfnmR%qO7j94r+@$gb^D`v~FJtUjjrW&J!=+Gf|H1zBLq-A=-yYSIVCBv$kN@?}3 zcyTAxH|q)x0;utW0v@z=fk%gp0L`s~Y5|v_~A*&b644 zS>(>CzQ-XejHHod>9sT`e(IyI>ls9IISRK+1-~;&l`g~S*vl>p?5py6a#6Z42q5Y} z!nq;csf9-T&dZsyyw(mQ)GLcg#E4+gI}b3y0J{XA4?dERLv{H?J1_fz)AkgXyJa$<~8wMr#>oUJO+DeLjolfeyvh`0SS!?xK~R zb8`keM-5U#lu)nzY15Z&i%V5?#*sU$xE|J1mEUO`eONtT&p!??yvUWAK_ZEWsTLR? z+3dGQFcsRP3|pMlqSq9I{L7ux@~6fF9f~w}bQipjZl@V?{AwSYA_Xa<{UQPXZN_(V zCAQJgM_m*RIL*l8?Aphv5Cb{o}6ca;=B2RUNMJ>v=^z(7d+`PE;n|%qP zI*w^=0ffJzsJ~=_I2Z@g=t!AsQ2pB_pIQSz+EN(=_a}j z%jCInD^hK~;D}tkcbAy#o?uV&&l~QZN!c;}@yZ9VGbb@hoWa8vFb+a*_A8#IzFA%LQ|yM0_&t~JNb^se_){-M4g20Xuw3N@%Sy7;ZPsZKms|To* z+h!%A9RG$uOp3!luY85wsin1WHUHYS3U^Q0j$b(}(60(xPr+bmQYrV8adcC|q3?iq1a^hB(@@BRVRP1?SY z8{9e5h6J{-%WexyuZ_u^z>5f=FhG4A4re7i9tYeIe+85iVO$%xSJp4u(igM=#Qlg{ zBQx6a*_wvs6ya2zYWO-bGo&g~#$?`(n8aDmM!h!$U?)AhbZo>p z;-S7$k~wa|QE|1zTlLqRhuhBFN@zfD=c<(u$SSTN%=oBoyveHtC`yLKT+yXQU|cv3 zJ9HDNTX(5I8SuCY$uiGs0L)!h(zzY4bt~SZpfi0?5bC@ct3UZQ61`*9R{}BX(qt;B zQ=W!n`?Nw~8b1#hYt*H=R%_8d&jM$aIRlwU+@Jx>Oq8?m<@X03Zr=`H>e-U=hOK(~ zGURc6POCZl*@yCrJuiT-v>kHq$ShwX@~5CAOH=#44C4@V9&m5aJ)VbI;gyuzvyX+p zSc{)g;bG^;Xq5t6T$hKILh#LLytTc|(u4e(C0!gK+BG8^4RK%K=)OpYpSty^*bSB6 z3w$bfvizAAaPufeakyE5q|2)BaKIUCB1N@7nF;9FEEwdDCP2$vrm6QTN<#dWHm4#f zkfOnt`Hl9g_S%OFK2SY2E&nfyW*WBB3B5H&nozDaTj9Z_Elqqf!q{;sTJ@}@E96?O z*27FnarW0L4^~^W4I}SvJeAI?oU8d|_d&E9+(xnE(l>DPoIdpPmW6fY#V;R4e=4iI z9@c4M-cS*>*T$*I_dwxo{Po_|5)!Bo8^XlpnCRtPG$x|?lP~vQIG58C2VHLnGO(<9 zC9$3Gfit^#Dqdq_Yx5lYEd{?StT#u~ED0^_u4AF=Xuo2(y|xJNLE$U@1?S-IA&nVE zkPPwP)N)4mA7o&9Y5D|N`ZBzl+k_zW1y^VG2&nWcgvWDaIa<;YRcq2~U@w`_C%2au zFOo_Fm0awI>rozrN*7FIzCG&jkcmq3mFCvCcRxwgeUiRT?nlR%$@;@&_}>QLfeYU7 zv5fJUyR)gVSXj3j!M8>1@co5MmQrT^NY0)rsPoOmdQpk`J1>d1nGBsBew#VtG1tK|}mrGgM>0Z(!@ckYVs>4A-2ksNs{UpxL$rRe}8h@;afctr}R zZER?aY=#u$4E`SS0#S1*56Upnk->r)-FL}%5_~F6tDuGjAlV2X7=`4NyDYVvg{h8k zTDt8~SAm3$R<_F}88LJOxi-?TOq!SmaC6a1j(aP)7e_g$HV30zLM4vhahWU;#1@67 z1~#83SvGyxnRv=a_%ynfNY6-lotv}5K-$T4(f6k|(J~NclIm4>1~^MlTlJ@OO{HO! zC{0AR70vyF4Lg4D{@i)pbjjVQ(5J*OilHwBcIIxoIw-;`q;SK!4AQBUPjX-g3@;&X z($?V`cBnUB)w_Ba>%)s&+y7EM-aeR8xKyj(8WLf0z8E zDgF_?9D0l0uT)QFXACWL3j?0Ar5M}S#wdZ19}JJUS+> q{gB8xrn%Q<_|3=7a{vFt&o1QTN7;Xw=nsDW10(*Q_BpU=HTegw(f>*S diff --git a/research/street/testdata/arial.charset_size=105.txt b/research/street/testdata/arial.charset_size=105.txt deleted file mode 100644 index feec47e0a..000000000 --- a/research/street/testdata/arial.charset_size=105.txt +++ /dev/null @@ -1,112 +0,0 @@ -0 -104 -1 G -2 r -3 a -4 s -5 l -6 n -7 d -8 . -9 B -10 C -11 O -12 W -13 Y -14 , -15 ( -16 u -17 z -18 i -19 e -20 ) -21 1 -22 9 -23 2 -24 - -25 6 -26 o -27 L -28 P -29 ' -30 t -31 m -32 K -33 c -34 k -35 V -36 S -37 D -38 J -39 h -40 M -41 x -42 E -43 q -44 ; -45 A -46 y -47 f -48 5 -49 7 -50 b -51 4 -52 0 -53 3 -54 N -55 I -56 T -57 / -58 p -59 w -60 g -61 H -62 “ -63 F -62 ” -62 " -29 ’ -64 R -24 — -65 8 -66 v -67 ? -68 é -69 % -70 : -71 j -72 \ -73 { -74 } -75 | -76 U -77 $ -78 ° -79 * -80 ! -81 ] -82 Q -29 ‘ -83 Z -84 X -85 [ -86 = -87 + -88 § -89 _ -90 £ -91 & -92 # -93 > -94 < -95 ~ -96 € -97 @ -98 ¢ -99 » -100 « -47,5 fl -47,18 fi -101 ® -102 © -103 ¥ diff --git a/research/street/testdata/charset_size=134.txt b/research/street/testdata/charset_size=134.txt deleted file mode 100644 index 5c7fcde2a..000000000 --- a/research/street/testdata/charset_size=134.txt +++ /dev/null @@ -1,139 +0,0 @@ -0 -133 -1 l -2 ’ -3 é -4 t -5 e -6 i -7 n -8 s -9 x -10 g -11 u -12 o -13 1 -14 8 -15 7 -16 0 -17 - -18 . -19 p -20 a -21 r -22 è -23 d -24 c -25 V -26 v -27 b -28 m -29 ) -30 C -31 z -32 S -33 y -34 , -35 k -36 É -37 A -38 h -39 E -40 » -41 D -42 / -43 H -44 M -45 ( -46 G -47 P -48 ç -2 ' -49 R -50 f -51 " -52 2 -53 j -54 | -55 N -56 6 -57 ° -58 5 -59 T -60 O -61 U -62 3 -63 % -64 9 -65 q -66 Z -67 B -68 K -69 w -70 W -71 : -72 4 -73 L -74 F -75 ] -76 ï -2 ‘ -77 I -78 J -79 ä -80 î -81 ; -82 à -83 ê -84 X -85 ü -86 Y -87 ô -88 = -89 + -90 \ -91 { -92 } -93 _ -94 Q -95 œ -96 ñ -97 * -98 ! -99 Ü -51 “ -100 â -101 Ç -102 Œ -103 û -104 ? -105 $ -106 ë -107 « -108 € -109 & -110 < -51 ” -111 æ -112 # -113 ® -114  -115 È -116 > -117 [ -17 — -118 Æ -119 ù -120 Î -121 Ô -122 ÿ -123 À -124 Ê -125 @ -126 Ï -127 © -128 Ë -129 Ù -130 £ -131 Ÿ -132 Û diff --git a/research/street/testdata/charset_size_10.txt b/research/street/testdata/charset_size_10.txt deleted file mode 100644 index 93fffbd0f..000000000 --- a/research/street/testdata/charset_size_10.txt +++ /dev/null @@ -1,10 +0,0 @@ -0 -9 -1 a -2 b -3 r -4 n -4,5 m -6 f -7 . -8 , diff --git a/research/street/testdata/mnist-tiny b/research/street/testdata/mnist-tiny deleted file mode 100644 index 1470119aff9891557dcef02981c40ae958e6bd11..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 1004 zcmY#oVgQ40F`{i;i&(fMxdbwE6Vp@m^NTXmj+*B2%(UWj~ZUUEKATIdlI*L^0gJ4~Gc ze(tBl7 zZS|cCC63mIN3ERRy@WgLR!6IANK)pzgpHGaa7C;MQ=6FZh1KPV%Z-pygNQvxi`WBS zF3AkqnfchOTH@dKErq#{e9nJ9ee&dIrh4VoE9D~JNLH*BJGXvMn17&Ky71b6b2TRT zlt!pN*HfvAwtCC;a3Z(n#cz|Z^Cg(%9NRI4@1rqmtAU-I7*ovaea4^e_MKJRz_)iz zZgk#OM)vLVR!>^;D>W`k;luZP$L>!loGI70w4p!afnL^HeunDsNmGpfEG!c-zS^U{ z^Aaz^GEFiquD$Ba-2w_CPgg&ebxsLQTq0cDkWfd7K)5JM6p3>2 zK-6ZWW~OHVBNi0fGF;*?;k5js+{6+gb}nFy1LLgX-ge35jF1@FXZf?3YY8*%L?ebR z(U{>+G%Q@-7||2W6GpCwj9m8^u_PKXpi>};#=%9P5y-)oXc#6y63x*G2L+i7I9Sd* zX{hur5%_iD2KzO|Z;r~Z4;#Am*43QofBLLpa-DYfjf<y6H_X}{QWZ}~;X zuju{f7kyw;Vt`ttgPRfmI-`}UiI-+rWCXSAt@Ia8_u$=?;&<=p5|!hrao!p{C%ZHv zm|MIWWc*idlb-YUq2Z>@e#;&|{rWp@N`BDK>{!Oer7@N6Rp%u@0f{%-#1N5*6m2YA LBu1N$iANIvYkOCk diff --git a/research/street/testdata/numbers-16-tiny b/research/street/testdata/numbers-16-tiny deleted file mode 100644 index bb0c11fcd034fad1cf650b386bf94476c9c1ccd1..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 1600 zcmZ=}W&nfLSEuddI>5#y$t94Po0y)epI?-j9$%iBQj#IWD#gsj=*%U~#RZWnNv$Xm z;^boIVl^}{vozsSL%AZM+syoqGFaMNOIZba}nYIc^T!nN1SB*KI}C25Pzm z3aRT&RQ-50U;Ruy@4acumTUdA(r{S5@szpK0@tMqDFPyblV%9+tx;|D-~M&&Gmmw9 z66)4&Q+7-7)zRUavXrI3ap{^5J5tQP9I@tl)G50z?mrLP+IGkNt9Y~A0)jNp_1dh6 zih00uV_Rv!>-sJ8cXsye-@fS4y48WpD-ZQtJ*BcRaD(9XMYWPuuQ?Xy-<_+QUEX?f zW{c@HgWOWJvs1hFU$;2^BjGG}wfe5lDaQHnTlnL3CWi}3R7x|h_@3z>etMsK-`s-I z^pjevEyKTeoiwmrGo5Gg0?(`4*rWbja6KnJZNG{9ZmucczxY3E-n8va(ZPlcF;Ok^ z=zrW?O&XE&PHUxs(u}98pUXO@geEQ#E^bINL`vD3z(fxfg``_9DGn|+aJuE@66NB7 zXvj#-OwTBRUVsXx&P7!zJFl!iTPo&uTi>mp<&@dLr0|! zg;p+IiN$J5|IaOZcCYx4UCzV>N?caXd?HslIJ6?&yzLZ~6f78=mb-L?P4#T#u=V9# zabsR*g$0+B@2$rxGkvdk9ot%HWx!UsquOk@(XX<+cVhf}bz<`Fx4(Y8GBPdtGw^ESA5KV^FHS2_vwe$wk*=tJec-vR^ivXKa;+dCqI7XclU2~#LYj}%-Y4l zH*Y?@oR-sj+wNQ3ozHEpJ_$cR#9ldf``YY3i;bMJ(-aMw7w_D9J$h;9db5b!KQH*! z#q)iOi_@7sdGc22tV@NdwFS?7C;4sP@*(B_+a${~?(;W2eim10sr={5ikGEYt?%o$ zm)|zc`qv!u`Gfa{x<6XH)`@NN-T!(%4QsScTX`d+W%bn^Wm5O%-~TVX?blL<^IQ2D zgKa()-74P6J*n?V{}%g)Dw+mmEWvDU?0r4X?tOx~nw^4yLLp@enh`FF-KSQ(3YmCx zqCZCi3zOFpuAD&8mWeJRS0-&tV&JVzsVzFu=KCr;i=}nKf96t=2kpLVvO_>A4{u_Z R! -1 9 -2 8 -3 7 -4 6 -5 1 -6 4 -7 0 -8 3 -9 5 -10 2 diff --git a/research/struct2depth/BUILD b/research/struct2depth/BUILD deleted file mode 100644 index ffd0fb0cd..000000000 --- a/research/struct2depth/BUILD +++ /dev/null @@ -1 +0,0 @@ -package(default_visibility = ["//visibility:public"]) diff --git a/research/struct2depth/README.md b/research/struct2depth/README.md deleted file mode 100644 index de1d7e7f2..000000000 --- a/research/struct2depth/README.md +++ /dev/null @@ -1,151 +0,0 @@ -![No Maintenance Intended](https://img.shields.io/badge/No%20Maintenance%20Intended-%E2%9C%95-red.svg) -![TensorFlow Requirement: 1.x](https://img.shields.io/badge/TensorFlow%20Requirement-1.x-brightgreen) -![TensorFlow 2 Not Supported](https://img.shields.io/badge/TensorFlow%202%20Not%20Supported-%E2%9C%95-red.svg) - -# struct2depth - -This a method for unsupervised learning of depth and egomotion from monocular video, achieving new state-of-the-art results on both tasks by explicitly modeling 3D object motion, performing on-line refinement and improving quality for moving objects by novel loss formulations. It will appear in the following paper: - -**V. Casser, S. Pirk, R. Mahjourian, A. Angelova, Depth Prediction Without the Sensors: Leveraging Structure for Unsupervised Learning from Monocular Videos, AAAI Conference on Artificial Intelligence, 2019** -https://arxiv.org/pdf/1811.06152.pdf - -This code is implemented and supported by Vincent Casser (git username: VincentCa) and Anelia Angelova (git username: AneliaAngelova). Please contact anelia@google.com for questions. - -Project website: https://sites.google.com/view/struct2depth. - -## Quick start: Running training - -Before running training, run gen_data_* script for the respective dataset in order to generate the data in the appropriate format for KITTI or Cityscapes. It is assumed that motion masks are already generated and stored as images. -Models are trained from an Imagenet pretrained model. - -```shell - -ckpt_dir="your/checkpoint/folder" -data_dir="KITTI_SEQ2_LR/" # Set for KITTI -data_dir="CITYSCAPES_SEQ2_LR/" # Set for Cityscapes -imagenet_ckpt="resnet_pretrained/model.ckpt" - -python train.py \ - --logtostderr \ - --checkpoint_dir $ckpt_dir \ - --data_dir $data_dir \ - --architecture resnet \ - --imagenet_ckpt $imagenet_ckpt \ - --imagenet_norm true \ - --joint_encoder false -``` - - - -## Running depth/egomotion inference on an image folder - -KITTI is trained on the raw image data (resized to 416 x 128), but inputs are standardized before feeding them, and Cityscapes images are cropped using the following cropping parameters: (192, 1856, 256, 768). If using a different crop, it is likely that additional training is necessary. Therefore, please follow the inference example shown below when using one of the models. The right choice might depend on a variety of factors. For example, if a checkpoint should be used for odometry, be aware that for improved odometry on motion models, using segmentation masks could be advantageous (setting *use_masks=true* for inference). On the other hand, all models can be used for single-frame depth estimation without any additional information. - - -```shell - -input_dir="your/image/folder" -output_dir="your/output/folder" -model_checkpoint="your/model/checkpoint" - -python inference.py \ - --logtostderr \ - --file_extension png \ - --depth \ - --egomotion true \ - --input_dir $input_dir \ - --output_dir $output_dir \ - --model_ckpt $model_checkpoint -``` - -Note that the egomotion prediction expects the files in the input directory to be a consecutive sequence, and that sorting the filenames alphabetically is putting them in the right order. - -One can also run inference on KITTI by providing - -```shell ---input_list_file ~/kitti-raw-uncompressed/test_files_eigen.txt -``` - -and on Cityscapes by passing - -```shell ---input_list_file CITYSCAPES_FULL/test_files_cityscapes.txt -``` - -instead of *input_dir*. -Alternatively inference can also be ran on pre-processed images. - - - -## Running on-line refinement - -On-line refinement is executed on top of an existing inference folder, so make sure to run regular inference first. Then you can run the on-line fusion procedure as follows: - -```shell - -prediction_dir="some/prediction/dir" -model_ckpt="checkpoints/checkpoints_baseline/model-199160" -handle_motion="false" -size_constraint_weight="0" # This must be zero when not handling motion. - -# If running on KITTI, set as follows: -data_dir="KITTI_SEQ2_LR_EIGEN/" -triplet_list_file="$data_dir/test_files_eigen_triplets.txt" -triplet_list_file_remains="$data_dir/test_files_eigen_triplets_remains.txt" -ft_name="kitti" - -# If running on Cityscapes, set as follows: -data_dir="CITYSCAPES_SEQ2_LR_TEST/" # Set for Cityscapes -triplet_list_file="/CITYSCAPES_SEQ2_LR_TEST/test_files_cityscapes_triplets.txt" -triplet_list_file_remains="CITYSCAPES_SEQ2_LR_TEST/test_files_cityscapes_triplets_remains.txt" -ft_name="cityscapes" - -python optimize.py \ - --logtostderr \ - --output_dir $prediction_dir \ - --data_dir $data_dir \ - --triplet_list_file $triplet_list_file \ - --triplet_list_file_remains $triplet_list_file_remains \ - --ft_name $ft_name \ - --model_ckpt $model_ckpt \ - --file_extension png \ - --handle_motion $handle_motion \ - --size_constraint_weight $size_constraint_weight -``` - - - -## Running evaluation - -```shell - -prediction_dir="some/prediction/dir" - -# Use these settings for KITTI: -eval_list_file="KITTI_FULL/kitti-raw-uncompressed/test_files_eigen.txt" -eval_crop="garg" -eval_mode="kitti" - -# Use these settings for Cityscapes: -eval_list_file="CITYSCAPES_FULL/test_files_cityscapes.txt" -eval_crop="none" -eval_mode="cityscapes" - -python evaluate.py \ - --logtostderr \ - --prediction_dir $prediction_dir \ - --eval_list_file $eval_list_file \ - --eval_crop $eval_crop \ - --eval_mode $eval_mode -``` - - - -## Credits - -This code is implemented and supported by Vincent Casser and Anelia Angelova and can be found at -https://sites.google.com/view/struct2depth. -The core implementation is derived from [https://github.com/tensorflow/models/tree/master/research/vid2depth)](https://github.com/tensorflow/models/tree/master/research/vid2depth) -by [Reza Mahjourian](rezama@google.com), which in turn is based on [SfMLearner -(https://github.com/tinghuiz/SfMLearner)](https://github.com/tinghuiz/SfMLearner) -by [Tinghui Zhou](https://github.com/tinghuiz). diff --git a/research/struct2depth/alignment.py b/research/struct2depth/alignment.py deleted file mode 100644 index 0e9417d48..000000000 --- a/research/struct2depth/alignment.py +++ /dev/null @@ -1,54 +0,0 @@ - -# Copyright 2018 The TensorFlow Authors All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -"""Common utilities for data pre-processing, e.g. matching moving object across frames.""" - -import numpy as np - -def compute_overlap(mask1, mask2): - # Use IoU here. - return np.sum(mask1 & mask2)/np.sum(mask1 | mask2) - -def align(seg_img1, seg_img2, seg_img3, threshold_same=0.3): - res_img1 = np.zeros_like(seg_img1) - res_img2 = np.zeros_like(seg_img2) - res_img3 = np.zeros_like(seg_img3) - remaining_objects2 = list(np.unique(seg_img2.flatten())) - remaining_objects3 = list(np.unique(seg_img3.flatten())) - for seg_id in np.unique(seg_img1): - # See if we can find correspondences to seg_id in seg_img2. - max_overlap2 = float('-inf') - max_segid2 = -1 - for seg_id2 in remaining_objects2: - overlap = compute_overlap(seg_img1==seg_id, seg_img2==seg_id2) - if overlap>max_overlap2: - max_overlap2 = overlap - max_segid2 = seg_id2 - if max_overlap2 > threshold_same: - max_overlap3 = float('-inf') - max_segid3 = -1 - for seg_id3 in remaining_objects3: - overlap = compute_overlap(seg_img2==max_segid2, seg_img3==seg_id3) - if overlap>max_overlap3: - max_overlap3 = overlap - max_segid3 = seg_id3 - if max_overlap3 > threshold_same: - res_img1[seg_img1==seg_id] = seg_id - res_img2[seg_img2==max_segid2] = seg_id - res_img3[seg_img3==max_segid3] = seg_id - remaining_objects2.remove(max_segid2) - remaining_objects3.remove(max_segid3) - return res_img1, res_img2, res_img3 diff --git a/research/struct2depth/gen_data_city.py b/research/struct2depth/gen_data_city.py deleted file mode 100644 index 7e18fe5ac..000000000 --- a/research/struct2depth/gen_data_city.py +++ /dev/null @@ -1,158 +0,0 @@ - -# Copyright 2018 The TensorFlow Authors All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -""" Offline data generation for the Cityscapes dataset.""" - -import os -from absl import app -from absl import flags -from absl import logging -import numpy as np -import cv2 -import os, glob - -import alignment -from alignment import compute_overlap -from alignment import align - - -SKIP = 2 -WIDTH = 416 -HEIGHT = 128 -SUB_FOLDER = 'train' -INPUT_DIR = '/usr/local/google/home/anelia/struct2depth/CITYSCAPES_FULL/' -OUTPUT_DIR = '/usr/local/google/home/anelia/struct2depth/CITYSCAPES_Processed/' - -def crop(img, segimg, fx, fy, cx, cy): - # Perform center cropping, preserving 50% vertically. - middle_perc = 0.50 - left = 1 - middle_perc - half = left / 2 - a = img[int(img.shape[0]*(half)):int(img.shape[0]*(1-half)), :] - aseg = segimg[int(segimg.shape[0]*(half)):int(segimg.shape[0]*(1-half)), :] - cy /= (1 / middle_perc) - - # Resize to match target height while preserving aspect ratio. - wdt = int((float(HEIGHT)*a.shape[1]/a.shape[0])) - x_scaling = float(wdt)/a.shape[1] - y_scaling = float(HEIGHT)/a.shape[0] - b = cv2.resize(a, (wdt, HEIGHT)) - bseg = cv2.resize(aseg, (wdt, HEIGHT)) - - # Adjust intrinsics. - fx*=x_scaling - fy*=y_scaling - cx*=x_scaling - cy*=y_scaling - - # Perform center cropping horizontally. - remain = b.shape[1] - WIDTH - cx /= (b.shape[1] / WIDTH) - c = b[:, int(remain/2):b.shape[1]-int(remain/2)] - cseg = bseg[:, int(remain/2):b.shape[1]-int(remain/2)] - - return c, cseg, fx, fy, cx, cy - - -def run_all(): - dir_name=INPUT_DIR + '/leftImg8bit_sequence/' + SUB_FOLDER + '/*' - print('Processing directory', dir_name) - for location in glob.glob(INPUT_DIR + '/leftImg8bit_sequence/' + SUB_FOLDER + '/*'): - location_name = os.path.basename(location) - print('Processing location', location_name) - files = sorted(glob.glob(location + '/*.png')) - files = [file for file in files if '-seg.png' not in file] - # Break down into sequences - sequences = {} - seq_nr = 0 - last_seq = '' - last_imgnr = -1 - - for i in range(len(files)): - seq = os.path.basename(files[i]).split('_')[1] - nr = int(os.path.basename(files[i]).split('_')[2]) - if seq!=last_seq or last_imgnr+1!=nr: - seq_nr+=1 - last_imgnr = nr - last_seq = seq - if not seq_nr in sequences: - sequences[seq_nr] = [] - sequences[seq_nr].append(files[i]) - - for (k,v) in sequences.items(): - print('Processing sequence', k, 'with', len(v), 'elements...') - output_dir = OUTPUT_DIR + '/' + location_name + '_' + str(k) - if not os.path.isdir(output_dir): - os.mkdir(output_dir) - files = sorted(v) - triplet = [] - seg_triplet = [] - ct = 1 - - # Find applicable intrinsics. - for j in range(len(files)): - osegname = os.path.basename(files[j]).split('_')[1] - oimgnr = os.path.basename(files[j]).split('_')[2] - applicable_intrinsics = INPUT_DIR + '/camera/' + SUB_FOLDER + '/' + location_name + '/' + location_name + '_' + osegname + '_' + oimgnr + '_camera.json' - # Get the intrinsics for one of the file of the sequence. - if os.path.isfile(applicable_intrinsics): - f = open(applicable_intrinsics, 'r') - lines = f.readlines() - f.close() - lines = [line.rstrip() for line in lines] - - fx = float(lines[11].split(': ')[1].replace(',', '')) - fy = float(lines[12].split(': ')[1].replace(',', '')) - cx = float(lines[13].split(': ')[1].replace(',', '')) - cy = float(lines[14].split(': ')[1].replace(',', '')) - - for j in range(0, len(files), SKIP): - img = cv2.imread(files[j]) - segimg = cv2.imread(files[j].replace('.png', '-seg.png')) - - smallimg, segimg, fx_this, fy_this, cx_this, cy_this = crop(img, segimg, fx, fy, cx, cy) - triplet.append(smallimg) - seg_triplet.append(segimg) - if len(triplet)==3: - cmb = np.hstack(triplet) - align1, align2, align3 = align(seg_triplet[0], seg_triplet[1], seg_triplet[2]) - cmb_seg = np.hstack([align1, align2, align3]) - cv2.imwrite(os.path.join(output_dir, str(ct).zfill(10) + '.png'), cmb) - cv2.imwrite(os.path.join(output_dir, str(ct).zfill(10) + '-fseg.png'), cmb_seg) - f = open(os.path.join(output_dir, str(ct).zfill(10) + '_cam.txt'), 'w') - f.write(str(fx_this) + ',0.0,' + str(cx_this) + ',0.0,' + str(fy_this) + ',' + str(cy_this) + ',0.0,0.0,1.0') - f.close() - del triplet[0] - del seg_triplet[0] - ct+=1 - -# Create file list for training. Be careful as it collects and includes all files recursively. -fn = open(OUTPUT_DIR + '/' + SUB_FOLDER + '.txt', 'w') -for f in glob.glob(OUTPUT_DIR + '/*/*.png'): - if '-seg.png' in f or '-fseg.png' in f: - continue - folder_name = f.split('/')[-2] - img_name = f.split('/')[-1].replace('.png', '') - fn.write(folder_name + ' ' + img_name + '\n') -fn.close() - - -def main(_): - run_all() - - -if __name__ == '__main__': - app.run(main) diff --git a/research/struct2depth/gen_data_kitti.py b/research/struct2depth/gen_data_kitti.py deleted file mode 100644 index 8577c4c67..000000000 --- a/research/struct2depth/gen_data_kitti.py +++ /dev/null @@ -1,149 +0,0 @@ - -# Copyright 2018 The TensorFlow Authors All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -""" Offline data generation for the KITTI dataset.""" - -import os -from absl import app -from absl import flags -from absl import logging -import numpy as np -import cv2 -import os, glob - -import alignment -from alignment import compute_overlap -from alignment import align - - -SEQ_LENGTH = 3 -WIDTH = 416 -HEIGHT = 128 -STEPSIZE = 1 -INPUT_DIR = '/usr/local/google/home/anelia/struct2depth/KITTI_FULL/kitti-raw-uncompressed' -OUTPUT_DIR = '/usr/local/google/home/anelia/struct2depth/KITTI_procesed/' - - -def get_line(file, start): - file = open(file, 'r') - lines = file.readlines() - lines = [line.rstrip() for line in lines] - ret = None - for line in lines: - nline = line.split(': ') - if nline[0]==start: - ret = nline[1].split(' ') - ret = np.array([float(r) for r in ret], dtype=float) - ret = ret.reshape((3,4))[0:3, 0:3] - break - file.close() - return ret - - -def crop(img, segimg, fx, fy, cx, cy): - # Perform center cropping, preserving 50% vertically. - middle_perc = 0.50 - left = 1-middle_perc - half = left/2 - a = img[int(img.shape[0]*(half)):int(img.shape[0]*(1-half)), :] - aseg = segimg[int(segimg.shape[0]*(half)):int(segimg.shape[0]*(1-half)), :] - cy /= (1/middle_perc) - - # Resize to match target height while preserving aspect ratio. - wdt = int((128*a.shape[1]/a.shape[0])) - x_scaling = float(wdt)/a.shape[1] - y_scaling = 128.0/a.shape[0] - b = cv2.resize(a, (wdt, 128)) - bseg = cv2.resize(aseg, (wdt, 128)) - - # Adjust intrinsics. - fx*=x_scaling - fy*=y_scaling - cx*=x_scaling - cy*=y_scaling - - # Perform center cropping horizontally. - remain = b.shape[1] - 416 - cx /= (b.shape[1]/416) - c = b[:, int(remain/2):b.shape[1]-int(remain/2)] - cseg = bseg[:, int(remain/2):b.shape[1]-int(remain/2)] - - return c, cseg, fx, fy, cx, cy - - -def run_all(): - ct = 0 -if not OUTPUT_DIR.endswith('/'): - OUTPUT_DIR = OUTPUT_DIR + '/' - -for d in glob.glob(INPUT_DIR + '/*/'): - date = d.split('/')[-2] - file_calibration = d + 'calib_cam_to_cam.txt' - calib_raw = [get_line(file_calibration, 'P_rect_02'), get_line(file_calibration, 'P_rect_03')] - - for d2 in glob.glob(d + '*/'): - seqname = d2.split('/')[-2] - print('Processing sequence', seqname) - for subfolder in ['image_02/data', 'image_03/data']: - ct = 1 - seqname = d2.split('/')[-2] + subfolder.replace('image', '').replace('/data', '') - if not os.path.exists(OUTPUT_DIR + seqname): - os.mkdir(OUTPUT_DIR + seqname) - - calib_camera = calib_raw[0] if subfolder=='image_02/data' else calib_raw[1] - folder = d2 + subfolder - files = glob.glob(folder + '/*.png') - files = [file for file in files if not 'disp' in file and not 'flip' in file and not 'seg' in file] - files = sorted(files) - for i in range(SEQ_LENGTH, len(files)+1, STEPSIZE): - imgnum = str(ct).zfill(10) - if os.path.exists(OUTPUT_DIR + seqname + '/' + imgnum + '.png'): - ct+=1 - continue - big_img = np.zeros(shape=(HEIGHT, WIDTH*SEQ_LENGTH, 3)) - wct = 0 - - for j in range(i-SEQ_LENGTH, i): # Collect frames for this sample. - img = cv2.imread(files[j]) - ORIGINAL_HEIGHT, ORIGINAL_WIDTH, _ = img.shape - - zoom_x = WIDTH/ORIGINAL_WIDTH - zoom_y = HEIGHT/ORIGINAL_HEIGHT - - # Adjust intrinsics. - calib_current = calib_camera.copy() - calib_current[0, 0] *= zoom_x - calib_current[0, 2] *= zoom_x - calib_current[1, 1] *= zoom_y - calib_current[1, 2] *= zoom_y - - calib_representation = ','.join([str(c) for c in calib_current.flatten()]) - - img = cv2.resize(img, (WIDTH, HEIGHT)) - big_img[:,wct*WIDTH:(wct+1)*WIDTH] = img - wct+=1 - cv2.imwrite(OUTPUT_DIR + seqname + '/' + imgnum + '.png', big_img) - f = open(OUTPUT_DIR + seqname + '/' + imgnum + '_cam.txt', 'w') - f.write(calib_representation) - f.close() - ct+=1 - -def main(_): - run_all() - - -if __name__ == '__main__': - app.run(main) diff --git a/research/struct2depth/inference.py b/research/struct2depth/inference.py deleted file mode 100644 index 042e2be17..000000000 --- a/research/struct2depth/inference.py +++ /dev/null @@ -1,416 +0,0 @@ - -# Copyright 2018 The TensorFlow Authors All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -"""Runs struct2depth at inference. Produces depth estimates, ego-motion and object motion.""" - -# Example usage: -# -# python inference.py \ -# --input_dir ~/struct2depth/kitti-raw-uncompressed/ \ -# --output_dir ~/struct2depth/output \ -# --model_ckpt ~/struct2depth/model/model-199160 -# --file_extension png \ -# --depth \ -# --egomotion true \ - - - -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import os -from absl import app -from absl import flags -from absl import logging -#import matplotlib.pyplot as plt -import model -import numpy as np -import fnmatch -import tensorflow as tf -import nets -import util - -gfile = tf.gfile - -# CMAP = 'plasma' - -INFERENCE_MODE_SINGLE = 'single' # Take plain single-frame input. -INFERENCE_MODE_TRIPLETS = 'triplets' # Take image triplets as input. -# For KITTI, we just resize input images and do not perform cropping. For -# Cityscapes, the car hood and more image content has been cropped in order -# to fit aspect ratio, and remove static content from the images. This has to be -# kept at inference time. -INFERENCE_CROP_NONE = 'none' -INFERENCE_CROP_CITYSCAPES = 'cityscapes' - - -flags.DEFINE_string('output_dir', None, 'Directory to store predictions.') -flags.DEFINE_string('file_extension', 'png', 'Image data file extension of ' - 'files provided with input_dir. Also determines the output ' - 'file format of depth prediction images.') -flags.DEFINE_bool('depth', True, 'Determines if the depth prediction network ' - 'should be executed and its predictions be saved.') -flags.DEFINE_bool('egomotion', False, 'Determines if the egomotion prediction ' - 'network should be executed and its predictions be saved. If ' - 'inference is run in single inference mode, it is assumed ' - 'that files in the same directory belong in the same ' - 'sequence, and sorting them alphabetically establishes the ' - 'right temporal order.') -flags.DEFINE_string('model_ckpt', None, 'Model checkpoint to evaluate.') -flags.DEFINE_string('input_dir', None, 'Directory containing image files to ' - 'evaluate. This crawls recursively for images in the ' - 'directory, mirroring relative subdirectory structures ' - 'into the output directory.') -flags.DEFINE_string('input_list_file', None, 'Text file containing paths to ' - 'image files to process. Paths should be relative with ' - 'respect to the list file location. Relative path ' - 'structures will be mirrored in the output directory.') -flags.DEFINE_integer('batch_size', 1, 'The size of a sample batch') -flags.DEFINE_integer('img_height', 128, 'Input frame height.') -flags.DEFINE_integer('img_width', 416, 'Input frame width.') -flags.DEFINE_integer('seq_length', 3, 'Number of frames in sequence.') -flags.DEFINE_enum('architecture', nets.RESNET, nets.ARCHITECTURES, - 'Defines the architecture to use for the depth prediction ' - 'network. Defaults to ResNet-based encoder and accompanying ' - 'decoder.') -flags.DEFINE_boolean('imagenet_norm', True, 'Whether to normalize the input ' - 'images channel-wise so that they match the distribution ' - 'most ImageNet-models were trained on.') -flags.DEFINE_bool('use_skip', True, 'Whether to use skip connections in the ' - 'encoder-decoder architecture.') -flags.DEFINE_bool('joint_encoder', False, 'Whether to share parameters ' - 'between the depth and egomotion networks by using a joint ' - 'encoder architecture. The egomotion network is then ' - 'operating only on the hidden representation provided by the ' - 'joint encoder.') -flags.DEFINE_bool('shuffle', False, 'Whether to shuffle the order in which ' - 'images are processed.') -flags.DEFINE_bool('flip', False, 'Whether images should be flipped as well as ' - 'resulting predictions (for test-time augmentation). This ' - 'currently applies to the depth network only.') -flags.DEFINE_enum('inference_mode', INFERENCE_MODE_SINGLE, - [INFERENCE_MODE_SINGLE, - INFERENCE_MODE_TRIPLETS], - 'Whether to use triplet mode for inference, which accepts ' - 'triplets instead of single frames.') -flags.DEFINE_enum('inference_crop', INFERENCE_CROP_NONE, - [INFERENCE_CROP_NONE, - INFERENCE_CROP_CITYSCAPES], - 'Whether to apply a Cityscapes-specific crop on the input ' - 'images first before running inference.') -flags.DEFINE_bool('use_masks', False, 'Whether to mask out potentially ' - 'moving objects when feeding image input to the egomotion ' - 'network. This might improve odometry results when using ' - 'a motion model. For this, pre-computed segmentation ' - 'masks have to be available for every image, with the ' - 'background being zero.') - -FLAGS = flags.FLAGS - -flags.mark_flag_as_required('output_dir') -flags.mark_flag_as_required('model_ckpt') - - -def _run_inference(output_dir=None, - file_extension='png', - depth=True, - egomotion=False, - model_ckpt=None, - input_dir=None, - input_list_file=None, - batch_size=1, - img_height=128, - img_width=416, - seq_length=3, - architecture=nets.RESNET, - imagenet_norm=True, - use_skip=True, - joint_encoder=True, - shuffle=False, - flip_for_depth=False, - inference_mode=INFERENCE_MODE_SINGLE, - inference_crop=INFERENCE_CROP_NONE, - use_masks=False): - """Runs inference. Refer to flags in inference.py for details.""" - inference_model = model.Model(is_training=False, - batch_size=batch_size, - img_height=img_height, - img_width=img_width, - seq_length=seq_length, - architecture=architecture, - imagenet_norm=imagenet_norm, - use_skip=use_skip, - joint_encoder=joint_encoder) - vars_to_restore = util.get_vars_to_save_and_restore(model_ckpt) - saver = tf.train.Saver(vars_to_restore) - sv = tf.train.Supervisor(logdir='/tmp/', saver=None) - with sv.managed_session() as sess: - saver.restore(sess, model_ckpt) - if not gfile.Exists(output_dir): - gfile.MakeDirs(output_dir) - logging.info('Predictions will be saved in %s.', output_dir) - - # Collect all images to run inference on. - im_files, basepath_in = collect_input_images(input_dir, input_list_file, - file_extension) - if shuffle: - logging.info('Shuffling data...') - np.random.shuffle(im_files) - logging.info('Running inference on %d files.', len(im_files)) - - # Create missing output folders and pre-compute target directories. - output_dirs = create_output_dirs(im_files, basepath_in, output_dir) - - # Run depth prediction network. - if depth: - im_batch = [] - for i in range(len(im_files)): - if i % 100 == 0: - logging.info('%s of %s files processed.', i, len(im_files)) - - # Read image and run inference. - if inference_mode == INFERENCE_MODE_SINGLE: - if inference_crop == INFERENCE_CROP_NONE: - im = util.load_image(im_files[i], resize=(img_width, img_height)) - elif inference_crop == INFERENCE_CROP_CITYSCAPES: - im = util.crop_cityscapes(util.load_image(im_files[i]), - resize=(img_width, img_height)) - elif inference_mode == INFERENCE_MODE_TRIPLETS: - im = util.load_image(im_files[i], resize=(img_width * 3, img_height)) - im = im[:, img_width:img_width*2] - if flip_for_depth: - im = np.flip(im, axis=1) - im_batch.append(im) - - if len(im_batch) == batch_size or i == len(im_files) - 1: - # Call inference on batch. - for _ in range(batch_size - len(im_batch)): # Fill up batch. - im_batch.append(np.zeros(shape=(img_height, img_width, 3), - dtype=np.float32)) - im_batch = np.stack(im_batch, axis=0) - est_depth = inference_model.inference_depth(im_batch, sess) - if flip_for_depth: - est_depth = np.flip(est_depth, axis=2) - im_batch = np.flip(im_batch, axis=2) - - for j in range(len(im_batch)): - color_map = util.normalize_depth_for_display( - np.squeeze(est_depth[j])) - visualization = np.concatenate((im_batch[j], color_map), axis=0) - # Save raw prediction and color visualization. Extract filename - # without extension from full path: e.g. path/to/input_dir/folder1/ - # file1.png -> file1 - k = i - len(im_batch) + 1 + j - filename_root = os.path.splitext(os.path.basename(im_files[k]))[0] - pref = '_flip' if flip_for_depth else '' - output_raw = os.path.join( - output_dirs[k], filename_root + pref + '.npy') - output_vis = os.path.join( - output_dirs[k], filename_root + pref + '.png') - with gfile.Open(output_raw, 'wb') as f: - np.save(f, est_depth[j]) - util.save_image(output_vis, visualization, file_extension) - im_batch = [] - - # Run egomotion network. - if egomotion: - if inference_mode == INFERENCE_MODE_SINGLE: - # Run regular egomotion inference loop. - input_image_seq = [] - input_seg_seq = [] - current_sequence_dir = None - current_output_handle = None - for i in range(len(im_files)): - sequence_dir = os.path.dirname(im_files[i]) - if sequence_dir != current_sequence_dir: - # Assume start of a new sequence, since this image lies in a - # different directory than the previous ones. - # Clear egomotion input buffer. - output_filepath = os.path.join(output_dirs[i], 'egomotion.txt') - if current_output_handle is not None: - current_output_handle.close() - current_sequence_dir = sequence_dir - logging.info('Writing egomotion sequence to %s.', output_filepath) - current_output_handle = gfile.Open(output_filepath, 'w') - input_image_seq = [] - im = util.load_image(im_files[i], resize=(img_width, img_height)) - input_image_seq.append(im) - if use_masks: - im_seg_path = im_files[i].replace('.%s' % file_extension, - '-seg.%s' % file_extension) - if not gfile.Exists(im_seg_path): - raise ValueError('No segmentation mask %s has been found for ' - 'image %s. If none are available, disable ' - 'use_masks.' % (im_seg_path, im_files[i])) - input_seg_seq.append(util.load_image(im_seg_path, - resize=(img_width, img_height), - interpolation='nn')) - - if len(input_image_seq) < seq_length: # Buffer not filled yet. - continue - if len(input_image_seq) > seq_length: # Remove oldest entry. - del input_image_seq[0] - if use_masks: - del input_seg_seq[0] - - input_image_stack = np.concatenate(input_image_seq, axis=2) - input_image_stack = np.expand_dims(input_image_stack, axis=0) - if use_masks: - input_image_stack = mask_image_stack(input_image_stack, - input_seg_seq) - est_egomotion = np.squeeze(inference_model.inference_egomotion( - input_image_stack, sess)) - egomotion_str = [] - for j in range(seq_length - 1): - egomotion_str.append(','.join([str(d) for d in est_egomotion[j]])) - current_output_handle.write( - str(i) + ' ' + ' '.join(egomotion_str) + '\n') - if current_output_handle is not None: - current_output_handle.close() - elif inference_mode == INFERENCE_MODE_TRIPLETS: - written_before = [] - for i in range(len(im_files)): - im = util.load_image(im_files[i], resize=(img_width * 3, img_height)) - input_image_stack = np.concatenate( - [im[:, :img_width], im[:, img_width:img_width*2], - im[:, img_width*2:]], axis=2) - input_image_stack = np.expand_dims(input_image_stack, axis=0) - if use_masks: - im_seg_path = im_files[i].replace('.%s' % file_extension, - '-seg.%s' % file_extension) - if not gfile.Exists(im_seg_path): - raise ValueError('No segmentation mask %s has been found for ' - 'image %s. If none are available, disable ' - 'use_masks.' % (im_seg_path, im_files[i])) - seg = util.load_image(im_seg_path, - resize=(img_width * 3, img_height), - interpolation='nn') - input_seg_seq = [seg[:, :img_width], seg[:, img_width:img_width*2], - seg[:, img_width*2:]] - input_image_stack = mask_image_stack(input_image_stack, - input_seg_seq) - est_egomotion = inference_model.inference_egomotion( - input_image_stack, sess) - est_egomotion = np.squeeze(est_egomotion) - egomotion_1_2 = ','.join([str(d) for d in est_egomotion[0]]) - egomotion_2_3 = ','.join([str(d) for d in est_egomotion[1]]) - - output_filepath = os.path.join(output_dirs[i], 'egomotion.txt') - file_mode = 'w' if output_filepath not in written_before else 'a' - with gfile.Open(output_filepath, file_mode) as current_output_handle: - current_output_handle.write(str(i) + ' ' + egomotion_1_2 + ' ' + - egomotion_2_3 + '\n') - written_before.append(output_filepath) - logging.info('Done.') - - -def mask_image_stack(input_image_stack, input_seg_seq): - """Masks out moving image contents by using the segmentation masks provided. - - This can lead to better odometry accuracy for motion models, but is optional - to use. Is only called if use_masks is enabled. - Args: - input_image_stack: The input image stack of shape (1, H, W, seq_length). - input_seg_seq: List of segmentation masks with seq_length elements of shape - (H, W, C) for some number of channels C. - - Returns: - Input image stack with detections provided by segmentation mask removed. - """ - background = [mask == 0 for mask in input_seg_seq] - background = reduce(lambda m1, m2: m1 & m2, background) - # If masks are RGB, assume all channels to be the same. Reduce to the first. - if background.ndim == 3 and background.shape[2] > 1: - background = np.expand_dims(background[:, :, 0], axis=2) - elif background.ndim == 2: # Expand. - background = np.expand_dism(background, axis=2) - # background is now of shape (H, W, 1). - background_stack = np.tile(background, [1, 1, input_image_stack.shape[3]]) - return np.multiply(input_image_stack, background_stack) - - -def collect_input_images(input_dir, input_list_file, file_extension): - """Collects all input images that are to be processed.""" - if input_dir is not None: - im_files = _recursive_glob(input_dir, '*.' + file_extension) - basepath_in = os.path.normpath(input_dir) - elif input_list_file is not None: - im_files = util.read_text_lines(input_list_file) - basepath_in = os.path.dirname(input_list_file) - im_files = [os.path.join(basepath_in, f) for f in im_files] - im_files = [f for f in im_files if 'disp' not in f and '-seg' not in f and - '-fseg' not in f and '-flip' not in f] - return sorted(im_files), basepath_in - - -def create_output_dirs(im_files, basepath_in, output_dir): - """Creates required directories, and returns output dir for each file.""" - output_dirs = [] - for i in range(len(im_files)): - relative_folder_in = os.path.relpath( - os.path.dirname(im_files[i]), basepath_in) - absolute_folder_out = os.path.join(output_dir, relative_folder_in) - if not gfile.IsDirectory(absolute_folder_out): - gfile.MakeDirs(absolute_folder_out) - output_dirs.append(absolute_folder_out) - return output_dirs - - -def _recursive_glob(treeroot, pattern): - results = [] - for base, _, files in os.walk(treeroot): - files = fnmatch.filter(files, pattern) - results.extend(os.path.join(base, f) for f in files) - return results - - -def main(_): - #if (flags.input_dir is None) == (flags.input_list_file is None): - # raise ValueError('Exactly one of either input_dir or input_list_file has ' - # 'to be provided.') - #if not flags.depth and not flags.egomotion: - # raise ValueError('At least one of the depth and egomotion network has to ' - # 'be called for inference.') - #if (flags.inference_mode == inference_lib.INFERENCE_MODE_TRIPLETS and - # flags.seq_length != 3): - # raise ValueError('For sequence lengths other than three, single inference ' - # 'mode has to be used.') - - _run_inference(output_dir=FLAGS.output_dir, - file_extension=FLAGS.file_extension, - depth=FLAGS.depth, - egomotion=FLAGS.egomotion, - model_ckpt=FLAGS.model_ckpt, - input_dir=FLAGS.input_dir, - input_list_file=FLAGS.input_list_file, - batch_size=FLAGS.batch_size, - img_height=FLAGS.img_height, - img_width=FLAGS.img_width, - seq_length=FLAGS.seq_length, - architecture=FLAGS.architecture, - imagenet_norm=FLAGS.imagenet_norm, - use_skip=FLAGS.use_skip, - joint_encoder=FLAGS.joint_encoder, - shuffle=FLAGS.shuffle, - flip_for_depth=FLAGS.flip, - inference_mode=FLAGS.inference_mode, - inference_crop=FLAGS.inference_crop, - use_masks=FLAGS.use_masks) - - -if __name__ == '__main__': - app.run(main) diff --git a/research/struct2depth/model.py b/research/struct2depth/model.py deleted file mode 100644 index 873be26bb..000000000 --- a/research/struct2depth/model.py +++ /dev/null @@ -1,848 +0,0 @@ - -# Copyright 2018 The TensorFlow Authors All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -"""Build model for inference or training.""" - -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -from absl import logging -import numpy as np -import tensorflow as tf - -import nets -import project -import reader -import util - -gfile = tf.gfile -slim = tf.contrib.slim - -NUM_SCALES = 4 - - -class Model(object): - """Model code based on SfMLearner.""" - - def __init__(self, - data_dir=None, - file_extension='png', - is_training=True, - learning_rate=0.0002, - beta1=0.9, - reconstr_weight=0.85, - smooth_weight=0.05, - ssim_weight=0.15, - icp_weight=0.0, - batch_size=4, - img_height=128, - img_width=416, - seq_length=3, - architecture=nets.RESNET, - imagenet_norm=True, - weight_reg=0.05, - exhaustive_mode=False, - random_scale_crop=False, - flipping_mode=reader.FLIP_RANDOM, - random_color=True, - depth_upsampling=True, - depth_normalization=True, - compute_minimum_loss=True, - use_skip=True, - joint_encoder=True, - build_sum=True, - shuffle=True, - input_file='train', - handle_motion=False, - equal_weighting=False, - size_constraint_weight=0.0, - train_global_scale_var=True): - self.data_dir = data_dir - self.file_extension = file_extension - self.is_training = is_training - self.learning_rate = learning_rate - self.reconstr_weight = reconstr_weight - self.smooth_weight = smooth_weight - self.ssim_weight = ssim_weight - self.icp_weight = icp_weight - self.beta1 = beta1 - self.batch_size = batch_size - self.img_height = img_height - self.img_width = img_width - self.seq_length = seq_length - self.architecture = architecture - self.imagenet_norm = imagenet_norm - self.weight_reg = weight_reg - self.exhaustive_mode = exhaustive_mode - self.random_scale_crop = random_scale_crop - self.flipping_mode = flipping_mode - self.random_color = random_color - self.depth_upsampling = depth_upsampling - self.depth_normalization = depth_normalization - self.compute_minimum_loss = compute_minimum_loss - self.use_skip = use_skip - self.joint_encoder = joint_encoder - self.build_sum = build_sum - self.shuffle = shuffle - self.input_file = input_file - self.handle_motion = handle_motion - self.equal_weighting = equal_weighting - self.size_constraint_weight = size_constraint_weight - self.train_global_scale_var = train_global_scale_var - - logging.info('data_dir: %s', data_dir) - logging.info('file_extension: %s', file_extension) - logging.info('is_training: %s', is_training) - logging.info('learning_rate: %s', learning_rate) - logging.info('reconstr_weight: %s', reconstr_weight) - logging.info('smooth_weight: %s', smooth_weight) - logging.info('ssim_weight: %s', ssim_weight) - logging.info('icp_weight: %s', icp_weight) - logging.info('size_constraint_weight: %s', size_constraint_weight) - logging.info('beta1: %s', beta1) - logging.info('batch_size: %s', batch_size) - logging.info('img_height: %s', img_height) - logging.info('img_width: %s', img_width) - logging.info('seq_length: %s', seq_length) - logging.info('architecture: %s', architecture) - logging.info('imagenet_norm: %s', imagenet_norm) - logging.info('weight_reg: %s', weight_reg) - logging.info('exhaustive_mode: %s', exhaustive_mode) - logging.info('random_scale_crop: %s', random_scale_crop) - logging.info('flipping_mode: %s', flipping_mode) - logging.info('random_color: %s', random_color) - logging.info('depth_upsampling: %s', depth_upsampling) - logging.info('depth_normalization: %s', depth_normalization) - logging.info('compute_minimum_loss: %s', compute_minimum_loss) - logging.info('use_skip: %s', use_skip) - logging.info('joint_encoder: %s', joint_encoder) - logging.info('build_sum: %s', build_sum) - logging.info('shuffle: %s', shuffle) - logging.info('input_file: %s', input_file) - logging.info('handle_motion: %s', handle_motion) - logging.info('equal_weighting: %s', equal_weighting) - logging.info('train_global_scale_var: %s', train_global_scale_var) - - if self.size_constraint_weight > 0 or not is_training: - self.global_scale_var = tf.Variable( - 0.1, name='global_scale_var', - trainable=self.is_training and train_global_scale_var, - dtype=tf.float32, - constraint=lambda x: tf.clip_by_value(x, 0, np.infty)) - - if self.is_training: - self.reader = reader.DataReader(self.data_dir, self.batch_size, - self.img_height, self.img_width, - self.seq_length, NUM_SCALES, - self.file_extension, - self.random_scale_crop, - self.flipping_mode, - self.random_color, - self.imagenet_norm, - self.shuffle, - self.input_file) - self.build_train_graph() - else: - self.build_depth_test_graph() - self.build_egomotion_test_graph() - if self.handle_motion: - self.build_objectmotion_test_graph() - - # At this point, the model is ready. Print some info on model params. - util.count_parameters() - - def build_train_graph(self): - self.build_inference_for_training() - self.build_loss() - self.build_train_op() - if self.build_sum: - self.build_summaries() - - def build_inference_for_training(self): - """Invokes depth and ego-motion networks and computes clouds if needed.""" - (self.image_stack, self.image_stack_norm, self.seg_stack, - self.intrinsic_mat, self.intrinsic_mat_inv) = self.reader.read_data() - with tf.variable_scope('depth_prediction'): - # Organized by ...[i][scale]. Note that the order is flipped in - # variables in build_loss() below. - self.disp = {} - self.depth = {} - self.depth_upsampled = {} - self.inf_loss = 0.0 - # Organized by [i]. - disp_bottlenecks = [None] * self.seq_length - - if self.icp_weight > 0: - self.cloud = {} - for i in range(self.seq_length): - image = self.image_stack_norm[:, :, :, 3 * i:3 * (i + 1)] - - multiscale_disps_i, disp_bottlenecks[i] = nets.disp_net( - self.architecture, image, self.use_skip, - self.weight_reg, True) - multiscale_depths_i = [1.0 / d for d in multiscale_disps_i] - self.disp[i] = multiscale_disps_i - self.depth[i] = multiscale_depths_i - if self.depth_upsampling: - self.depth_upsampled[i] = [] - # Upsample low-resolution depth maps using differentiable bilinear - # interpolation. - for s in range(len(multiscale_depths_i)): - self.depth_upsampled[i].append(tf.image.resize_bilinear( - multiscale_depths_i[s], [self.img_height, self.img_width], - align_corners=True)) - - if self.icp_weight > 0: - multiscale_clouds_i = [ - project.get_cloud(d, - self.intrinsic_mat_inv[:, s, :, :], - name='cloud%d_%d' % (s, i)) - for (s, d) in enumerate(multiscale_depths_i) - ] - self.cloud[i] = multiscale_clouds_i - # Reuse the same depth graph for all images. - tf.get_variable_scope().reuse_variables() - - if self.handle_motion: - # Define egomotion network. This network can see the whole scene except - # for any moving objects as indicated by the provided segmentation masks. - # To avoid the network getting clues of motion by tracking those masks, we - # define the segmentation masks as the union temporally. - with tf.variable_scope('egomotion_prediction'): - base_input = self.image_stack_norm # (B, H, W, 9) - seg_input = self.seg_stack # (B, H, W, 9) - ref_zero = tf.constant(0, dtype=tf.uint8) - # Motion model is currently defined for three-frame sequences. - object_mask1 = tf.equal(seg_input[:, :, :, 0], ref_zero) - object_mask2 = tf.equal(seg_input[:, :, :, 3], ref_zero) - object_mask3 = tf.equal(seg_input[:, :, :, 6], ref_zero) - mask_complete = tf.expand_dims(tf.logical_and( # (B, H, W, 1) - tf.logical_and(object_mask1, object_mask2), object_mask3), axis=3) - mask_complete = tf.tile(mask_complete, (1, 1, 1, 9)) # (B, H, W, 9) - # Now mask out base_input. - self.mask_complete = tf.to_float(mask_complete) - self.base_input_masked = base_input * self.mask_complete - self.egomotion = nets.egomotion_net( - image_stack=self.base_input_masked, - disp_bottleneck_stack=None, - joint_encoder=False, - seq_length=self.seq_length, - weight_reg=self.weight_reg) - - # Define object motion network for refinement. This network only sees - # one object at a time over the whole sequence, and tries to estimate its - # motion. The sequence of images are the respective warped frames. - - # For each scale, contains batch_size elements of shape (N, 2, 6). - self.object_transforms = {} - # For each scale, contains batch_size elements of shape (N, H, W, 9). - self.object_masks = {} - self.object_masks_warped = {} - # For each scale, contains batch_size elements of size N. - self.object_ids = {} - - self.egomotions_seq = {} - self.warped_seq = {} - self.inputs_objectmotion_net = {} - with tf.variable_scope('objectmotion_prediction'): - # First, warp raw images according to overall egomotion. - for s in range(NUM_SCALES): - self.warped_seq[s] = [] - self.egomotions_seq[s] = [] - for source_index in range(self.seq_length): - egomotion_mat_i_1 = project.get_transform_mat( - self.egomotion, source_index, 1) - warped_image_i_1, _ = ( - project.inverse_warp( - self.image_stack[ - :, :, :, source_index*3:(source_index+1)*3], - self.depth_upsampled[1][s], - egomotion_mat_i_1, - self.intrinsic_mat[:, 0, :, :], - self.intrinsic_mat_inv[:, 0, :, :])) - - self.warped_seq[s].append(warped_image_i_1) - self.egomotions_seq[s].append(egomotion_mat_i_1) - - # Second, for every object in the segmentation mask, take its mask and - # warp it according to the egomotion estimate. Then put a threshold to - # binarize the warped result. Use this mask to mask out background and - # other objects, and pass the filtered image to the object motion - # network. - self.object_transforms[s] = [] - self.object_masks[s] = [] - self.object_ids[s] = [] - self.object_masks_warped[s] = [] - self.inputs_objectmotion_net[s] = {} - - for i in range(self.batch_size): - seg_sequence = self.seg_stack[i] # (H, W, 9=3*3) - object_ids = tf.unique(tf.reshape(seg_sequence, [-1]))[0] - self.object_ids[s].append(object_ids) - color_stack = [] - mask_stack = [] - mask_stack_warped = [] - for j in range(self.seq_length): - current_image = self.warped_seq[s][j][i] # (H, W, 3) - current_seg = seg_sequence[:, :, j * 3:(j+1) * 3] # (H, W, 3) - - def process_obj_mask_warp(obj_id): - """Performs warping of the individual object masks.""" - obj_mask = tf.to_float(tf.equal(current_seg, obj_id)) - # Warp obj_mask according to overall egomotion. - obj_mask_warped, _ = ( - project.inverse_warp( - tf.expand_dims(obj_mask, axis=0), - # Middle frame, highest scale, batch element i: - tf.expand_dims(self.depth_upsampled[1][s][i], axis=0), - # Matrix for warping j into middle frame, batch elem. i: - tf.expand_dims(self.egomotions_seq[s][j][i], axis=0), - tf.expand_dims(self.intrinsic_mat[i, 0, :, :], axis=0), - tf.expand_dims(self.intrinsic_mat_inv[i, 0, :, :], - axis=0))) - obj_mask_warped = tf.squeeze(obj_mask_warped) - obj_mask_binarized = tf.greater( # Threshold to binarize mask. - obj_mask_warped, tf.constant(0.5)) - return tf.to_float(obj_mask_binarized) - - def process_obj_mask(obj_id): - """Returns the individual object masks separately.""" - return tf.to_float(tf.equal(current_seg, obj_id)) - object_masks = tf.map_fn( # (N, H, W, 3) - process_obj_mask, object_ids, dtype=tf.float32) - - if self.size_constraint_weight > 0: - # The object segmentation masks are all in object_masks. - # We need to measure the height of every of them, and get the - # approximate distance. - - # self.depth_upsampled of shape (seq_length, scale, B, H, W). - depth_pred = self.depth_upsampled[j][s][i] # (H, W) - def get_losses(obj_mask): - """Get motion constraint loss.""" - # Find height of segment. - coords = tf.where(tf.greater( # Shape (num_true, 2=yx) - obj_mask[:, :, 0], tf.constant(0.5, dtype=tf.float32))) - y_max = tf.reduce_max(coords[:, 0]) - y_min = tf.reduce_min(coords[:, 0]) - seg_height = y_max - y_min - f_y = self.intrinsic_mat[i, 0, 1, 1] - approx_depth = ((f_y * self.global_scale_var) / - tf.to_float(seg_height)) - reference_pred = tf.boolean_mask( - depth_pred, tf.greater( - tf.reshape(obj_mask[:, :, 0], - (self.img_height, self.img_width, 1)), - tf.constant(0.5, dtype=tf.float32))) - - # Establish loss on approx_depth, a scalar, and - # reference_pred, our dense prediction. Normalize both to - # prevent degenerative depth shrinking. - global_mean_depth_pred = tf.reduce_mean(depth_pred) - reference_pred /= global_mean_depth_pred - approx_depth /= global_mean_depth_pred - spatial_err = tf.abs(reference_pred - approx_depth) - mean_spatial_err = tf.reduce_mean(spatial_err) - return mean_spatial_err - - losses = tf.map_fn( - get_losses, object_masks, dtype=tf.float32) - self.inf_loss += tf.reduce_mean(losses) - object_masks_warped = tf.map_fn( # (N, H, W, 3) - process_obj_mask_warp, object_ids, dtype=tf.float32) - filtered_images = tf.map_fn( - lambda mask: current_image * mask, object_masks_warped, - dtype=tf.float32) # (N, H, W, 3) - color_stack.append(filtered_images) - mask_stack.append(object_masks) - mask_stack_warped.append(object_masks_warped) - - # For this batch-element, if there are N moving objects, - # color_stack, mask_stack and mask_stack_warped contain both - # seq_length elements of shape (N, H, W, 3). - # We can now concatenate them on the last axis, creating a tensor of - # (N, H, W, 3*3 = 9), and, assuming N does not get too large so that - # we have enough memory, pass them in a single batch to the object - # motion network. - mask_stack = tf.concat(mask_stack, axis=3) # (N, H, W, 9) - mask_stack_warped = tf.concat(mask_stack_warped, axis=3) - color_stack = tf.concat(color_stack, axis=3) # (N, H, W, 9) - all_transforms = nets.objectmotion_net( - # We cut the gradient flow here as the object motion gradient - # should have no saying in how the egomotion network behaves. - # One could try just stopping the gradient for egomotion, but - # not for the depth prediction network. - image_stack=tf.stop_gradient(color_stack), - disp_bottleneck_stack=None, - joint_encoder=False, # Joint encoder not supported. - seq_length=self.seq_length, - weight_reg=self.weight_reg) - # all_transforms of shape (N, 2, 6). - self.object_transforms[s].append(all_transforms) - self.object_masks[s].append(mask_stack) - self.object_masks_warped[s].append(mask_stack_warped) - self.inputs_objectmotion_net[s][i] = color_stack - tf.get_variable_scope().reuse_variables() - else: - # Don't handle motion, classic model formulation. - with tf.name_scope('egomotion_prediction'): - if self.joint_encoder: - # Re-arrange disp_bottleneck_stack to be of shape - # [B, h_hid, w_hid, c_hid * seq_length]. Currently, it is a list with - # seq_length elements, each of dimension [B, h_hid, w_hid, c_hid]. - disp_bottleneck_stack = tf.concat(disp_bottlenecks, axis=3) - else: - disp_bottleneck_stack = None - self.egomotion = nets.egomotion_net( - image_stack=self.image_stack_norm, - disp_bottleneck_stack=disp_bottleneck_stack, - joint_encoder=self.joint_encoder, - seq_length=self.seq_length, - weight_reg=self.weight_reg) - - def build_loss(self): - """Adds ops for computing loss.""" - with tf.name_scope('compute_loss'): - self.reconstr_loss = 0 - self.smooth_loss = 0 - self.ssim_loss = 0 - self.icp_transform_loss = 0 - self.icp_residual_loss = 0 - - # self.images is organized by ...[scale][B, h, w, seq_len * 3]. - self.images = [None for _ in range(NUM_SCALES)] - # Following nested lists are organized by ...[scale][source-target]. - self.warped_image = [{} for _ in range(NUM_SCALES)] - self.warp_mask = [{} for _ in range(NUM_SCALES)] - self.warp_error = [{} for _ in range(NUM_SCALES)] - self.ssim_error = [{} for _ in range(NUM_SCALES)] - self.icp_transform = [{} for _ in range(NUM_SCALES)] - self.icp_residual = [{} for _ in range(NUM_SCALES)] - - self.middle_frame_index = util.get_seq_middle(self.seq_length) - - # Compute losses at each scale. - for s in range(NUM_SCALES): - # Scale image stack. - if s == 0: # Just as a precaution. TF often has interpolation bugs. - self.images[s] = self.image_stack - else: - height_s = int(self.img_height / (2**s)) - width_s = int(self.img_width / (2**s)) - self.images[s] = tf.image.resize_bilinear( - self.image_stack, [height_s, width_s], align_corners=True) - - # Smoothness. - if self.smooth_weight > 0: - for i in range(self.seq_length): - # When computing minimum loss, use the depth map from the middle - # frame only. - if not self.compute_minimum_loss or i == self.middle_frame_index: - disp_smoothing = self.disp[i][s] - if self.depth_normalization: - # Perform depth normalization, dividing by the mean. - mean_disp = tf.reduce_mean(disp_smoothing, axis=[1, 2, 3], - keep_dims=True) - disp_input = disp_smoothing / mean_disp - else: - disp_input = disp_smoothing - scaling_f = (1.0 if self.equal_weighting else 1.0 / (2**s)) - self.smooth_loss += scaling_f * self.depth_smoothness( - disp_input, self.images[s][:, :, :, 3 * i:3 * (i + 1)]) - - self.debug_all_warped_image_batches = [] - for i in range(self.seq_length): - for j in range(self.seq_length): - if i == j: - continue - - # When computing minimum loss, only consider the middle frame as - # target. - if self.compute_minimum_loss and j != self.middle_frame_index: - continue - # We only consider adjacent frames, unless either - # compute_minimum_loss is on (where the middle frame is matched with - # all other frames) or exhaustive_mode is on (where all frames are - # matched with each other). - if (not self.compute_minimum_loss and not self.exhaustive_mode and - abs(i - j) != 1): - continue - - selected_scale = 0 if self.depth_upsampling else s - source = self.images[selected_scale][:, :, :, 3 * i:3 * (i + 1)] - target = self.images[selected_scale][:, :, :, 3 * j:3 * (j + 1)] - - if self.depth_upsampling: - target_depth = self.depth_upsampled[j][s] - else: - target_depth = self.depth[j][s] - - key = '%d-%d' % (i, j) - - if self.handle_motion: - # self.seg_stack of shape (B, H, W, 9). - # target_depth corresponds to middle frame, of shape (B, H, W, 1). - - # Now incorporate the other warping results, performed according - # to the object motion network's predictions. - # self.object_masks batch_size elements of (N, H, W, 9). - # self.object_masks_warped batch_size elements of (N, H, W, 9). - # self.object_transforms batch_size elements of (N, 2, 6). - self.all_batches = [] - for batch_s in range(self.batch_size): - # To warp i into j, first take the base warping (this is the - # full image i warped into j using only the egomotion estimate). - base_warping = self.warped_seq[s][i][batch_s] - transform_matrices_thisbatch = tf.map_fn( - lambda transform: project.get_transform_mat( - tf.expand_dims(transform, axis=0), i, j)[0], - self.object_transforms[0][batch_s]) - - def inverse_warp_wrapper(matrix): - """Wrapper for inverse warping method.""" - warp_image, _ = ( - project.inverse_warp( - tf.expand_dims(base_warping, axis=0), - tf.expand_dims(target_depth[batch_s], axis=0), - tf.expand_dims(matrix, axis=0), - tf.expand_dims(self.intrinsic_mat[ - batch_s, selected_scale, :, :], axis=0), - tf.expand_dims(self.intrinsic_mat_inv[ - batch_s, selected_scale, :, :], axis=0))) - return warp_image - warped_images_thisbatch = tf.map_fn( - inverse_warp_wrapper, transform_matrices_thisbatch, - dtype=tf.float32) - warped_images_thisbatch = warped_images_thisbatch[:, 0, :, :, :] - # warped_images_thisbatch is now of shape (N, H, W, 9). - - # Combine warped frames into a single one, using the object - # masks. Result should be (1, 128, 416, 3). - # Essentially, we here want to sum them all up, filtered by the - # respective object masks. - mask_base_valid_source = tf.equal( - self.seg_stack[batch_s, :, :, i*3:(i+1)*3], - tf.constant(0, dtype=tf.uint8)) - mask_base_valid_target = tf.equal( - self.seg_stack[batch_s, :, :, j*3:(j+1)*3], - tf.constant(0, dtype=tf.uint8)) - mask_valid = tf.logical_and( - mask_base_valid_source, mask_base_valid_target) - self.base_warping = base_warping * tf.to_float(mask_valid) - background = tf.expand_dims(self.base_warping, axis=0) - def construct_const_filter_tensor(obj_id): - return tf.fill( - dims=[self.img_height, self.img_width, 3], - value=tf.sign(obj_id)) * tf.to_float( - tf.equal(self.seg_stack[batch_s, :, :, 3:6], - tf.cast(obj_id, dtype=tf.uint8))) - filter_tensor = tf.map_fn( - construct_const_filter_tensor, - tf.to_float(self.object_ids[s][batch_s])) - filter_tensor = tf.stack(filter_tensor, axis=0) - objects_to_add = tf.reduce_sum( - tf.multiply(warped_images_thisbatch, filter_tensor), - axis=0, keepdims=True) - combined = background + objects_to_add - self.all_batches.append(combined) - # Now of shape (B, 128, 416, 3). - self.warped_image[s][key] = tf.concat(self.all_batches, axis=0) - - else: - # Don't handle motion, classic model formulation. - egomotion_mat_i_j = project.get_transform_mat( - self.egomotion, i, j) - # Inverse warp the source image to the target image frame for - # photometric consistency loss. - self.warped_image[s][key], self.warp_mask[s][key] = ( - project.inverse_warp( - source, - target_depth, - egomotion_mat_i_j, - self.intrinsic_mat[:, selected_scale, :, :], - self.intrinsic_mat_inv[:, selected_scale, :, :])) - - # Reconstruction loss. - self.warp_error[s][key] = tf.abs(self.warped_image[s][key] - target) - if not self.compute_minimum_loss: - self.reconstr_loss += tf.reduce_mean( - self.warp_error[s][key] * self.warp_mask[s][key]) - # SSIM. - if self.ssim_weight > 0: - self.ssim_error[s][key] = self.ssim(self.warped_image[s][key], - target) - # TODO(rezama): This should be min_pool2d(). - if not self.compute_minimum_loss: - ssim_mask = slim.avg_pool2d(self.warp_mask[s][key], 3, 1, - 'VALID') - self.ssim_loss += tf.reduce_mean( - self.ssim_error[s][key] * ssim_mask) - - # If the minimum loss should be computed, the loss calculation has been - # postponed until here. - if self.compute_minimum_loss: - for frame_index in range(self.middle_frame_index): - key1 = '%d-%d' % (frame_index, self.middle_frame_index) - key2 = '%d-%d' % (self.seq_length - frame_index - 1, - self.middle_frame_index) - logging.info('computing min error between %s and %s', key1, key2) - min_error = tf.minimum(self.warp_error[s][key1], - self.warp_error[s][key2]) - self.reconstr_loss += tf.reduce_mean(min_error) - if self.ssim_weight > 0: # Also compute the minimum SSIM loss. - min_error_ssim = tf.minimum(self.ssim_error[s][key1], - self.ssim_error[s][key2]) - self.ssim_loss += tf.reduce_mean(min_error_ssim) - - # Build the total loss as composed of L1 reconstruction, SSIM, smoothing - # and object size constraint loss as appropriate. - self.reconstr_loss *= self.reconstr_weight - self.total_loss = self.reconstr_loss - if self.smooth_weight > 0: - self.smooth_loss *= self.smooth_weight - self.total_loss += self.smooth_loss - if self.ssim_weight > 0: - self.ssim_loss *= self.ssim_weight - self.total_loss += self.ssim_loss - if self.size_constraint_weight > 0: - self.inf_loss *= self.size_constraint_weight - self.total_loss += self.inf_loss - - def gradient_x(self, img): - return img[:, :, :-1, :] - img[:, :, 1:, :] - - def gradient_y(self, img): - return img[:, :-1, :, :] - img[:, 1:, :, :] - - def depth_smoothness(self, depth, img): - """Computes image-aware depth smoothness loss.""" - depth_dx = self.gradient_x(depth) - depth_dy = self.gradient_y(depth) - image_dx = self.gradient_x(img) - image_dy = self.gradient_y(img) - weights_x = tf.exp(-tf.reduce_mean(tf.abs(image_dx), 3, keepdims=True)) - weights_y = tf.exp(-tf.reduce_mean(tf.abs(image_dy), 3, keepdims=True)) - smoothness_x = depth_dx * weights_x - smoothness_y = depth_dy * weights_y - return tf.reduce_mean(abs(smoothness_x)) + tf.reduce_mean(abs(smoothness_y)) - - def ssim(self, x, y): - """Computes a differentiable structured image similarity measure.""" - c1 = 0.01**2 # As defined in SSIM to stabilize div. by small denominator. - c2 = 0.03**2 - mu_x = slim.avg_pool2d(x, 3, 1, 'VALID') - mu_y = slim.avg_pool2d(y, 3, 1, 'VALID') - sigma_x = slim.avg_pool2d(x**2, 3, 1, 'VALID') - mu_x**2 - sigma_y = slim.avg_pool2d(y**2, 3, 1, 'VALID') - mu_y**2 - sigma_xy = slim.avg_pool2d(x * y, 3, 1, 'VALID') - mu_x * mu_y - ssim_n = (2 * mu_x * mu_y + c1) * (2 * sigma_xy + c2) - ssim_d = (mu_x**2 + mu_y**2 + c1) * (sigma_x + sigma_y + c2) - ssim = ssim_n / ssim_d - return tf.clip_by_value((1 - ssim) / 2, 0, 1) - - def build_train_op(self): - with tf.name_scope('train_op'): - optim = tf.train.AdamOptimizer(self.learning_rate, self.beta1) - self.train_op = slim.learning.create_train_op(self.total_loss, optim) - self.global_step = tf.Variable(0, name='global_step', trainable=False) - self.incr_global_step = tf.assign( - self.global_step, self.global_step + 1) - - def build_summaries(self): - """Adds scalar and image summaries for TensorBoard.""" - tf.summary.scalar('total_loss', self.total_loss) - tf.summary.scalar('reconstr_loss', self.reconstr_loss) - if self.smooth_weight > 0: - tf.summary.scalar('smooth_loss', self.smooth_loss) - if self.ssim_weight > 0: - tf.summary.scalar('ssim_loss', self.ssim_loss) - if self.icp_weight > 0: - tf.summary.scalar('icp_transform_loss', self.icp_transform_loss) - tf.summary.scalar('icp_residual_loss', self.icp_residual_loss) - - if self.size_constraint_weight > 0: - tf.summary.scalar('inf_loss', self.inf_loss) - tf.summary.histogram('global_scale_var', self.global_scale_var) - - if self.handle_motion: - for s in range(NUM_SCALES): - for batch_s in range(self.batch_size): - whole_strip = tf.concat([self.warped_seq[s][0][batch_s], - self.warped_seq[s][1][batch_s], - self.warped_seq[s][2][batch_s]], axis=1) - tf.summary.image('base_warp_batch%s_scale%s' % (batch_s, s), - tf.expand_dims(whole_strip, axis=0)) - - whole_strip_input = tf.concat( - [self.inputs_objectmotion_net[s][batch_s][:, :, :, 0:3], - self.inputs_objectmotion_net[s][batch_s][:, :, :, 3:6], - self.inputs_objectmotion_net[s][batch_s][:, :, :, 6:9]], axis=2) - tf.summary.image('input_objectmotion_batch%s_scale%s' % (batch_s, s), - whole_strip_input) # (B, H, 3*W, 3) - - for batch_s in range(self.batch_size): - whole_strip = tf.concat([self.base_input_masked[batch_s, :, :, 0:3], - self.base_input_masked[batch_s, :, :, 3:6], - self.base_input_masked[batch_s, :, :, 6:9]], - axis=1) - tf.summary.image('input_egomotion_batch%s' % batch_s, - tf.expand_dims(whole_strip, axis=0)) - - # Show transform predictions (of all objects). - for batch_s in range(self.batch_size): - for i in range(self.seq_length - 1): - # self.object_transforms contains batch_size elements of (N, 2, 6). - tf.summary.histogram('batch%d_tx%d' % (batch_s, i), - self.object_transforms[0][batch_s][:, i, 0]) - tf.summary.histogram('batch%d_ty%d' % (batch_s, i), - self.object_transforms[0][batch_s][:, i, 1]) - tf.summary.histogram('batch%d_tz%d' % (batch_s, i), - self.object_transforms[0][batch_s][:, i, 2]) - tf.summary.histogram('batch%d_rx%d' % (batch_s, i), - self.object_transforms[0][batch_s][:, i, 3]) - tf.summary.histogram('batch%d_ry%d' % (batch_s, i), - self.object_transforms[0][batch_s][:, i, 4]) - tf.summary.histogram('batch%d_rz%d' % (batch_s, i), - self.object_transforms[0][batch_s][:, i, 5]) - - for i in range(self.seq_length - 1): - tf.summary.histogram('tx%d' % i, self.egomotion[:, i, 0]) - tf.summary.histogram('ty%d' % i, self.egomotion[:, i, 1]) - tf.summary.histogram('tz%d' % i, self.egomotion[:, i, 2]) - tf.summary.histogram('rx%d' % i, self.egomotion[:, i, 3]) - tf.summary.histogram('ry%d' % i, self.egomotion[:, i, 4]) - tf.summary.histogram('rz%d' % i, self.egomotion[:, i, 5]) - - for s in range(NUM_SCALES): - for i in range(self.seq_length): - tf.summary.image('scale%d_image%d' % (s, i), - self.images[s][:, :, :, 3 * i:3 * (i + 1)]) - if i in self.depth: - tf.summary.histogram('scale%d_depth%d' % (s, i), self.depth[i][s]) - tf.summary.histogram('scale%d_disp%d' % (s, i), self.disp[i][s]) - tf.summary.image('scale%d_disparity%d' % (s, i), self.disp[i][s]) - - for key in self.warped_image[s]: - tf.summary.image('scale%d_warped_image%s' % (s, key), - self.warped_image[s][key]) - tf.summary.image('scale%d_warp_error%s' % (s, key), - self.warp_error[s][key]) - if self.ssim_weight > 0: - tf.summary.image('scale%d_ssim_error%s' % (s, key), - self.ssim_error[s][key]) - if self.icp_weight > 0: - tf.summary.image('scale%d_icp_residual%s' % (s, key), - self.icp_residual[s][key]) - transform = self.icp_transform[s][key] - tf.summary.histogram('scale%d_icp_tx%s' % (s, key), transform[:, 0]) - tf.summary.histogram('scale%d_icp_ty%s' % (s, key), transform[:, 1]) - tf.summary.histogram('scale%d_icp_tz%s' % (s, key), transform[:, 2]) - tf.summary.histogram('scale%d_icp_rx%s' % (s, key), transform[:, 3]) - tf.summary.histogram('scale%d_icp_ry%s' % (s, key), transform[:, 4]) - tf.summary.histogram('scale%d_icp_rz%s' % (s, key), transform[:, 5]) - - def build_depth_test_graph(self): - """Builds depth model reading from placeholders.""" - with tf.variable_scope('depth_prediction'): - input_image = tf.placeholder( - tf.float32, [self.batch_size, self.img_height, self.img_width, 3], - name='raw_input') - self.input_image = input_image - if self.imagenet_norm: - input_image = (input_image - reader.IMAGENET_MEAN) / reader.IMAGENET_SD - est_disp, _ = nets.disp_net(architecture=self.architecture, - image=input_image, - use_skip=self.use_skip, - weight_reg=self.weight_reg, - is_training=True) - est_depth = 1.0 / est_disp[0] - self.est_depth = est_depth - - def build_egomotion_test_graph(self): - """Builds egomotion model reading from placeholders.""" - input_image_stack = tf.placeholder( - tf.float32, - [1, self.img_height, self.img_width, self.seq_length * 3], - name='raw_input') - input_bottleneck_stack = None - - if self.imagenet_norm: - im_mean = tf.tile( - tf.constant(reader.IMAGENET_MEAN), multiples=[self.seq_length]) - im_sd = tf.tile( - tf.constant(reader.IMAGENET_SD), multiples=[self.seq_length]) - input_image_stack = (input_image_stack - im_mean) / im_sd - - if self.joint_encoder: - # Pre-compute embeddings here. - with tf.variable_scope('depth_prediction', reuse=True): - input_bottleneck_stack = [] - encoder_selected = nets.encoder(self.architecture) - for i in range(self.seq_length): - input_image = input_image_stack[:, :, :, i * 3:(i + 1) * 3] - tf.get_variable_scope().reuse_variables() - embedding, _ = encoder_selected( - target_image=input_image, - weight_reg=self.weight_reg, - is_training=True) - input_bottleneck_stack.append(embedding) - input_bottleneck_stack = tf.concat(input_bottleneck_stack, axis=3) - - with tf.variable_scope('egomotion_prediction'): - est_egomotion = nets.egomotion_net( - image_stack=input_image_stack, - disp_bottleneck_stack=input_bottleneck_stack, - joint_encoder=self.joint_encoder, - seq_length=self.seq_length, - weight_reg=self.weight_reg) - self.input_image_stack = input_image_stack - self.est_egomotion = est_egomotion - - def build_objectmotion_test_graph(self): - """Builds egomotion model reading from placeholders.""" - input_image_stack_om = tf.placeholder( - tf.float32, - [1, self.img_height, self.img_width, self.seq_length * 3], - name='raw_input') - - if self.imagenet_norm: - im_mean = tf.tile( - tf.constant(reader.IMAGENET_MEAN), multiples=[self.seq_length]) - im_sd = tf.tile( - tf.constant(reader.IMAGENET_SD), multiples=[self.seq_length]) - input_image_stack_om = (input_image_stack_om - im_mean) / im_sd - - with tf.variable_scope('objectmotion_prediction'): - est_objectmotion = nets.objectmotion_net( - image_stack=input_image_stack_om, - disp_bottleneck_stack=None, - joint_encoder=self.joint_encoder, - seq_length=self.seq_length, - weight_reg=self.weight_reg) - self.input_image_stack_om = input_image_stack_om - self.est_objectmotion = est_objectmotion - - def inference_depth(self, inputs, sess): - return sess.run(self.est_depth, feed_dict={self.input_image: inputs}) - - def inference_egomotion(self, inputs, sess): - return sess.run( - self.est_egomotion, feed_dict={self.input_image_stack: inputs}) - - def inference_objectmotion(self, inputs, sess): - return sess.run( - self.est_objectmotion, feed_dict={self.input_image_stack_om: inputs}) diff --git a/research/struct2depth/nets.py b/research/struct2depth/nets.py deleted file mode 100644 index 1cec1b36f..000000000 --- a/research/struct2depth/nets.py +++ /dev/null @@ -1,525 +0,0 @@ - -# Copyright 2018 The TensorFlow Authors All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -"""Depth and Ego-Motion networks.""" - -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import numpy as np -import tensorflow as tf -slim = tf.contrib.slim - -SIMPLE = 'simple' -RESNET = 'resnet' -ARCHITECTURES = [SIMPLE, RESNET] - -SCALE_TRANSLATION = 0.001 -SCALE_ROTATION = 0.01 - -# Disparity (inverse depth) values range from 0.01 to 10. Note that effectively, -# this is undone if depth normalization is used, which scales the values to -# have a mean of 1. -DISP_SCALING = 10 -MIN_DISP = 0.01 -WEIGHT_DECAY_KEY = 'WEIGHT_DECAY' -EGOMOTION_VEC_SIZE = 6 - - -def egomotion_net(image_stack, disp_bottleneck_stack, joint_encoder, seq_length, - weight_reg): - """Predict ego-motion vectors from a stack of frames or embeddings. - - Args: - image_stack: Input tensor with shape [B, h, w, seq_length * 3] in order. - disp_bottleneck_stack: Input tensor with shape [B, h_hidden, w_hidden, - seq_length * c_hidden] in order. - joint_encoder: Determines if the same encoder is used for computing the - bottleneck layer of both the egomotion and the depth prediction - network. If enabled, disp_bottleneck_stack is used as input, and the - encoding steps are skipped. If disabled, a separate encoder is defined - on image_stack. - seq_length: The sequence length used. - weight_reg: The amount of weight regularization. - - Returns: - Egomotion vectors with shape [B, seq_length - 1, 6]. - """ - num_egomotion_vecs = seq_length - 1 - with tf.variable_scope('pose_exp_net') as sc: - end_points_collection = sc.original_name_scope + '_end_points' - with slim.arg_scope([slim.conv2d, slim.conv2d_transpose], - normalizer_fn=None, - weights_regularizer=slim.l2_regularizer(weight_reg), - normalizer_params=None, - activation_fn=tf.nn.relu, - outputs_collections=end_points_collection): - if not joint_encoder: - # Define separate encoder. If sharing, we can skip the encoding step, - # as the bottleneck layer will already be passed as input. - cnv1 = slim.conv2d(image_stack, 16, [7, 7], stride=2, scope='cnv1') - cnv2 = slim.conv2d(cnv1, 32, [5, 5], stride=2, scope='cnv2') - cnv3 = slim.conv2d(cnv2, 64, [3, 3], stride=2, scope='cnv3') - cnv4 = slim.conv2d(cnv3, 128, [3, 3], stride=2, scope='cnv4') - cnv5 = slim.conv2d(cnv4, 256, [3, 3], stride=2, scope='cnv5') - - with tf.variable_scope('pose'): - inputs = disp_bottleneck_stack if joint_encoder else cnv5 - cnv6 = slim.conv2d(inputs, 256, [3, 3], stride=2, scope='cnv6') - cnv7 = slim.conv2d(cnv6, 256, [3, 3], stride=2, scope='cnv7') - pred_channels = EGOMOTION_VEC_SIZE * num_egomotion_vecs - egomotion_pred = slim.conv2d(cnv7, pred_channels, [1, 1], scope='pred', - stride=1, normalizer_fn=None, - activation_fn=None) - egomotion_avg = tf.reduce_mean(egomotion_pred, [1, 2]) - egomotion_res = tf.reshape( - egomotion_avg, [-1, num_egomotion_vecs, EGOMOTION_VEC_SIZE]) - # Tinghui found that scaling by a small constant facilitates training. - egomotion_scaled = tf.concat([egomotion_res[:, 0:3] * SCALE_TRANSLATION, - egomotion_res[:, 3:6] * SCALE_ROTATION], - axis=1) - return egomotion_scaled - - -def objectmotion_net(image_stack, disp_bottleneck_stack, joint_encoder, - seq_length, weight_reg): - """Predict object-motion vectors from a stack of frames or embeddings. - - Args: - image_stack: Input tensor with shape [B, h, w, seq_length * 3] in order. - disp_bottleneck_stack: Input tensor with shape [B, h_hidden, w_hidden, - seq_length * c_hidden] in order. - joint_encoder: Determines if the same encoder is used for computing the - bottleneck layer of both the egomotion and the depth prediction - network. If enabled, disp_bottleneck_stack is used as input, and the - encoding steps are skipped. If disabled, a separate encoder is defined - on image_stack. - seq_length: The sequence length used. - weight_reg: The amount of weight regularization. - - Returns: - Egomotion vectors with shape [B, seq_length - 1, 6]. - """ - num_egomotion_vecs = seq_length - 1 - with tf.variable_scope('pose_exp_net') as sc: - end_points_collection = sc.original_name_scope + '_end_points' - with slim.arg_scope([slim.conv2d, slim.conv2d_transpose], - normalizer_fn=None, - weights_regularizer=slim.l2_regularizer(weight_reg), - normalizer_params=None, - activation_fn=tf.nn.relu, - outputs_collections=end_points_collection): - if not joint_encoder: - # Define separate encoder. If sharing, we can skip the encoding step, - # as the bottleneck layer will already be passed as input. - cnv1 = slim.conv2d(image_stack, 16, [7, 7], stride=2, scope='cnv1') - cnv2 = slim.conv2d(cnv1, 32, [5, 5], stride=2, scope='cnv2') - cnv3 = slim.conv2d(cnv2, 64, [3, 3], stride=2, scope='cnv3') - cnv4 = slim.conv2d(cnv3, 128, [3, 3], stride=2, scope='cnv4') - cnv5 = slim.conv2d(cnv4, 256, [3, 3], stride=2, scope='cnv5') - - with tf.variable_scope('pose'): - inputs = disp_bottleneck_stack if joint_encoder else cnv5 - cnv6 = slim.conv2d(inputs, 256, [3, 3], stride=2, scope='cnv6') - cnv7 = slim.conv2d(cnv6, 256, [3, 3], stride=2, scope='cnv7') - pred_channels = EGOMOTION_VEC_SIZE * num_egomotion_vecs - egomotion_pred = slim.conv2d(cnv7, pred_channels, [1, 1], scope='pred', - stride=1, normalizer_fn=None, - activation_fn=None) - egomotion_avg = tf.reduce_mean(egomotion_pred, [1, 2]) - egomotion_res = tf.reshape( - egomotion_avg, [-1, num_egomotion_vecs, EGOMOTION_VEC_SIZE]) - # Tinghui found that scaling by a small constant facilitates training. - egomotion_scaled = tf.concat([egomotion_res[:, 0:3] * SCALE_TRANSLATION, - egomotion_res[:, 3:6] * SCALE_ROTATION], - axis=1) - return egomotion_scaled - - -def disp_net(architecture, image, use_skip, weight_reg, is_training): - """Defines an encoder-decoder architecture for depth prediction.""" - if architecture not in ARCHITECTURES: - raise ValueError('Unknown architecture.') - encoder_selected = encoder(architecture) - decoder_selected = decoder(architecture) - - # Encode image. - bottleneck, skip_connections = encoder_selected(image, weight_reg, - is_training) - # Decode to depth. - multiscale_disps_i = decoder_selected(target_image=image, - bottleneck=bottleneck, - weight_reg=weight_reg, - use_skip=use_skip, - skip_connections=skip_connections) - return multiscale_disps_i, bottleneck - - -def encoder(architecture): - return encoder_resnet if architecture == RESNET else encoder_simple - - -def decoder(architecture): - return decoder_resnet if architecture == RESNET else decoder_simple - - -def encoder_simple(target_image, weight_reg, is_training): - """Defines the old encoding architecture.""" - del is_training - with slim.arg_scope([slim.conv2d], - normalizer_fn=None, - normalizer_params=None, - weights_regularizer=slim.l2_regularizer(weight_reg), - activation_fn=tf.nn.relu): - # Define (joint) encoder. - cnv1 = slim.conv2d(target_image, 32, [7, 7], stride=2, scope='cnv1') - cnv1b = slim.conv2d(cnv1, 32, [7, 7], stride=1, scope='cnv1b') - cnv2 = slim.conv2d(cnv1b, 64, [5, 5], stride=2, scope='cnv2') - cnv2b = slim.conv2d(cnv2, 64, [5, 5], stride=1, scope='cnv2b') - cnv3 = slim.conv2d(cnv2b, 128, [3, 3], stride=2, scope='cnv3') - cnv3b = slim.conv2d(cnv3, 128, [3, 3], stride=1, scope='cnv3b') - cnv4 = slim.conv2d(cnv3b, 256, [3, 3], stride=2, scope='cnv4') - cnv4b = slim.conv2d(cnv4, 256, [3, 3], stride=1, scope='cnv4b') - cnv5 = slim.conv2d(cnv4b, 512, [3, 3], stride=2, scope='cnv5') - cnv5b = slim.conv2d(cnv5, 512, [3, 3], stride=1, scope='cnv5b') - cnv6 = slim.conv2d(cnv5b, 512, [3, 3], stride=2, scope='cnv6') - cnv6b = slim.conv2d(cnv6, 512, [3, 3], stride=1, scope='cnv6b') - cnv7 = slim.conv2d(cnv6b, 512, [3, 3], stride=2, scope='cnv7') - cnv7b = slim.conv2d(cnv7, 512, [3, 3], stride=1, scope='cnv7b') - return cnv7b, (cnv6b, cnv5b, cnv4b, cnv3b, cnv2b, cnv1b) - - -def decoder_simple(target_image, bottleneck, weight_reg, use_skip, - skip_connections): - """Defines the old depth decoder architecture.""" - h = target_image.get_shape()[1].value - w = target_image.get_shape()[2].value - (cnv6b, cnv5b, cnv4b, cnv3b, cnv2b, cnv1b) = skip_connections - with slim.arg_scope([slim.conv2d, slim.conv2d_transpose], - normalizer_fn=None, - normalizer_params=None, - weights_regularizer=slim.l2_regularizer(weight_reg), - activation_fn=tf.nn.relu): - up7 = slim.conv2d_transpose(bottleneck, 512, [3, 3], stride=2, - scope='upcnv7') - up7 = _resize_like(up7, cnv6b) - if use_skip: - i7_in = tf.concat([up7, cnv6b], axis=3) - else: - i7_in = up7 - icnv7 = slim.conv2d(i7_in, 512, [3, 3], stride=1, scope='icnv7') - - up6 = slim.conv2d_transpose(icnv7, 512, [3, 3], stride=2, scope='upcnv6') - up6 = _resize_like(up6, cnv5b) - if use_skip: - i6_in = tf.concat([up6, cnv5b], axis=3) - else: - i6_in = up6 - icnv6 = slim.conv2d(i6_in, 512, [3, 3], stride=1, scope='icnv6') - - up5 = slim.conv2d_transpose(icnv6, 256, [3, 3], stride=2, scope='upcnv5') - up5 = _resize_like(up5, cnv4b) - if use_skip: - i5_in = tf.concat([up5, cnv4b], axis=3) - else: - i5_in = up5 - icnv5 = slim.conv2d(i5_in, 256, [3, 3], stride=1, scope='icnv5') - - up4 = slim.conv2d_transpose(icnv5, 128, [3, 3], stride=2, scope='upcnv4') - up4 = _resize_like(up4, cnv3b) - if use_skip: - i4_in = tf.concat([up4, cnv3b], axis=3) - else: - i4_in = up4 - icnv4 = slim.conv2d(i4_in, 128, [3, 3], stride=1, scope='icnv4') - disp4 = (slim.conv2d(icnv4, 1, [3, 3], stride=1, activation_fn=tf.sigmoid, - normalizer_fn=None, scope='disp4') - * DISP_SCALING + MIN_DISP) - disp4_up = tf.image.resize_bilinear(disp4, [np.int(h / 4), np.int(w / 4)], - align_corners=True) - - up3 = slim.conv2d_transpose(icnv4, 64, [3, 3], stride=2, scope='upcnv3') - up3 = _resize_like(up3, cnv2b) - if use_skip: - i3_in = tf.concat([up3, cnv2b, disp4_up], axis=3) - else: - i3_in = tf.concat([up3, disp4_up]) - icnv3 = slim.conv2d(i3_in, 64, [3, 3], stride=1, scope='icnv3') - disp3 = (slim.conv2d(icnv3, 1, [3, 3], stride=1, activation_fn=tf.sigmoid, - normalizer_fn=None, scope='disp3') - * DISP_SCALING + MIN_DISP) - disp3_up = tf.image.resize_bilinear(disp3, [np.int(h / 2), np.int(w / 2)], - align_corners=True) - - up2 = slim.conv2d_transpose(icnv3, 32, [3, 3], stride=2, scope='upcnv2') - up2 = _resize_like(up2, cnv1b) - if use_skip: - i2_in = tf.concat([up2, cnv1b, disp3_up], axis=3) - else: - i2_in = tf.concat([up2, disp3_up]) - icnv2 = slim.conv2d(i2_in, 32, [3, 3], stride=1, scope='icnv2') - disp2 = (slim.conv2d(icnv2, 1, [3, 3], stride=1, activation_fn=tf.sigmoid, - normalizer_fn=None, scope='disp2') - * DISP_SCALING + MIN_DISP) - disp2_up = tf.image.resize_bilinear(disp2, [h, w], align_corners=True) - - up1 = slim.conv2d_transpose(icnv2, 16, [3, 3], stride=2, scope='upcnv1') - i1_in = tf.concat([up1, disp2_up], axis=3) - icnv1 = slim.conv2d(i1_in, 16, [3, 3], stride=1, scope='icnv1') - disp1 = (slim.conv2d(icnv1, 1, [3, 3], stride=1, activation_fn=tf.sigmoid, - normalizer_fn=None, scope='disp1') - * DISP_SCALING + MIN_DISP) - return [disp1, disp2, disp3, disp4] - - -def encoder_resnet(target_image, weight_reg, is_training): - """Defines a ResNet18-based encoding architecture. - - This implementation follows Juyong Kim's implementation of ResNet18 on GitHub: - https://github.com/dalgu90/resnet-18-tensorflow - - Args: - target_image: Input tensor with shape [B, h, w, 3] to encode. - weight_reg: Parameter ignored. - is_training: Whether the model is being trained or not. - - Returns: - Tuple of tensors, with the first being the bottleneck layer as tensor of - size [B, h_hid, w_hid, c_hid], and others being intermediate layers - for building skip-connections. - """ - del weight_reg - encoder_filters = [64, 64, 128, 256, 512] - stride = 2 - - # conv1 - with tf.variable_scope('conv1'): - x = _conv(target_image, 7, encoder_filters[0], stride) - x = _bn(x, is_train=is_training) - econv1 = _relu(x) - x = tf.nn.max_pool(econv1, [1, 3, 3, 1], [1, 2, 2, 1], 'SAME') - - # conv2_x - x = _residual_block(x, is_training, name='conv2_1') - econv2 = _residual_block(x, is_training, name='conv2_2') - - # conv3_x - x = _residual_block_first(econv2, is_training, encoder_filters[2], stride, - name='conv3_1') - econv3 = _residual_block(x, is_training, name='conv3_2') - - # conv4_x - x = _residual_block_first(econv3, is_training, encoder_filters[3], stride, - name='conv4_1') - econv4 = _residual_block(x, is_training, name='conv4_2') - - # conv5_x - x = _residual_block_first(econv4, is_training, encoder_filters[4], stride, - name='conv5_1') - econv5 = _residual_block(x, is_training, name='conv5_2') - return econv5, (econv4, econv3, econv2, econv1) - - -def decoder_resnet(target_image, bottleneck, weight_reg, use_skip, - skip_connections): - """Defines the depth decoder architecture. - - Args: - target_image: The original encoder input tensor with shape [B, h, w, 3]. - Just the shape information is used here. - bottleneck: Bottleneck layer to be decoded. - weight_reg: The amount of weight regularization. - use_skip: Whether the passed skip connections econv1, econv2, econv3 and - econv4 should be used. - skip_connections: Tensors for building skip-connections. - - Returns: - Disparities at 4 different scales. - """ - (econv4, econv3, econv2, econv1) = skip_connections - decoder_filters = [16, 32, 64, 128, 256] - default_pad = tf.constant([[0, 0], [1, 1], [1, 1], [0, 0]]) - reg = slim.l2_regularizer(weight_reg) if weight_reg > 0.0 else None - with slim.arg_scope([slim.conv2d, slim.conv2d_transpose], - normalizer_fn=None, - normalizer_params=None, - activation_fn=tf.nn.relu, - weights_regularizer=reg): - upconv5 = slim.conv2d_transpose(bottleneck, decoder_filters[4], [3, 3], - stride=2, scope='upconv5') - upconv5 = _resize_like(upconv5, econv4) - if use_skip: - i5_in = tf.concat([upconv5, econv4], axis=3) - else: - i5_in = upconv5 - i5_in = tf.pad(i5_in, default_pad, mode='REFLECT') - iconv5 = slim.conv2d(i5_in, decoder_filters[4], [3, 3], stride=1, - scope='iconv5', padding='VALID') - - upconv4 = slim.conv2d_transpose(iconv5, decoder_filters[3], [3, 3], - stride=2, scope='upconv4') - upconv4 = _resize_like(upconv4, econv3) - if use_skip: - i4_in = tf.concat([upconv4, econv3], axis=3) - else: - i4_in = upconv4 - i4_in = tf.pad(i4_in, default_pad, mode='REFLECT') - iconv4 = slim.conv2d(i4_in, decoder_filters[3], [3, 3], stride=1, - scope='iconv4', padding='VALID') - - disp4_input = tf.pad(iconv4, default_pad, mode='REFLECT') - disp4 = (slim.conv2d(disp4_input, 1, [3, 3], stride=1, - activation_fn=tf.sigmoid, normalizer_fn=None, - scope='disp4', padding='VALID') - * DISP_SCALING + MIN_DISP) - - upconv3 = slim.conv2d_transpose(iconv4, decoder_filters[2], [3, 3], - stride=2, scope='upconv3') - upconv3 = _resize_like(upconv3, econv2) - if use_skip: - i3_in = tf.concat([upconv3, econv2], axis=3) - else: - i3_in = upconv3 - i3_in = tf.pad(i3_in, default_pad, mode='REFLECT') - iconv3 = slim.conv2d(i3_in, decoder_filters[2], [3, 3], stride=1, - scope='iconv3', padding='VALID') - disp3_input = tf.pad(iconv3, default_pad, mode='REFLECT') - disp3 = (slim.conv2d(disp3_input, 1, [3, 3], stride=1, - activation_fn=tf.sigmoid, normalizer_fn=None, - scope='disp3', padding='VALID') - * DISP_SCALING + MIN_DISP) - - upconv2 = slim.conv2d_transpose(iconv3, decoder_filters[1], [3, 3], - stride=2, scope='upconv2') - upconv2 = _resize_like(upconv2, econv1) - if use_skip: - i2_in = tf.concat([upconv2, econv1], axis=3) - else: - i2_in = upconv2 - i2_in = tf.pad(i2_in, default_pad, mode='REFLECT') - iconv2 = slim.conv2d(i2_in, decoder_filters[1], [3, 3], stride=1, - scope='iconv2', padding='VALID') - disp2_input = tf.pad(iconv2, default_pad, mode='REFLECT') - disp2 = (slim.conv2d(disp2_input, 1, [3, 3], stride=1, - activation_fn=tf.sigmoid, normalizer_fn=None, - scope='disp2', padding='VALID') - * DISP_SCALING + MIN_DISP) - - upconv1 = slim.conv2d_transpose(iconv2, decoder_filters[0], [3, 3], - stride=2, scope='upconv1') - upconv1 = _resize_like(upconv1, target_image) - upconv1 = tf.pad(upconv1, default_pad, mode='REFLECT') - iconv1 = slim.conv2d(upconv1, decoder_filters[0], [3, 3], stride=1, - scope='iconv1', padding='VALID') - disp1_input = tf.pad(iconv1, default_pad, mode='REFLECT') - disp1 = (slim.conv2d(disp1_input, 1, [3, 3], stride=1, - activation_fn=tf.sigmoid, normalizer_fn=None, - scope='disp1', padding='VALID') - * DISP_SCALING + MIN_DISP) - - return [disp1, disp2, disp3, disp4] - - -def _residual_block_first(x, is_training, out_channel, strides, name='unit'): - """Helper function for defining ResNet architecture.""" - in_channel = x.get_shape().as_list()[-1] - with tf.variable_scope(name): - # Shortcut connection - if in_channel == out_channel: - if strides == 1: - shortcut = tf.identity(x) - else: - shortcut = tf.nn.max_pool(x, [1, strides, strides, 1], - [1, strides, strides, 1], 'VALID') - else: - shortcut = _conv(x, 1, out_channel, strides, name='shortcut') - # Residual - x = _conv(x, 3, out_channel, strides, name='conv_1') - x = _bn(x, is_train=is_training, name='bn_1') - x = _relu(x, name='relu_1') - x = _conv(x, 3, out_channel, 1, name='conv_2') - x = _bn(x, is_train=is_training, name='bn_2') - # Merge - x = x + shortcut - x = _relu(x, name='relu_2') - return x - - -def _residual_block(x, is_training, input_q=None, output_q=None, name='unit'): - """Helper function for defining ResNet architecture.""" - num_channel = x.get_shape().as_list()[-1] - with tf.variable_scope(name): - shortcut = x # Shortcut connection - # Residual - x = _conv(x, 3, num_channel, 1, input_q=input_q, output_q=output_q, - name='conv_1') - x = _bn(x, is_train=is_training, name='bn_1') - x = _relu(x, name='relu_1') - x = _conv(x, 3, num_channel, 1, input_q=output_q, output_q=output_q, - name='conv_2') - x = _bn(x, is_train=is_training, name='bn_2') - # Merge - x = x + shortcut - x = _relu(x, name='relu_2') - return x - - -def _conv(x, filter_size, out_channel, stride, pad='SAME', input_q=None, - output_q=None, name='conv'): - """Helper function for defining ResNet architecture.""" - if (input_q is None) ^ (output_q is None): - raise ValueError('Input/Output splits are not correctly given.') - - in_shape = x.get_shape() - with tf.variable_scope(name): - # Main operation: conv2d - with tf.device('/CPU:0'): - kernel = tf.get_variable( - 'kernel', [filter_size, filter_size, in_shape[3], out_channel], - tf.float32, initializer=tf.random_normal_initializer( - stddev=np.sqrt(2.0/filter_size/filter_size/out_channel))) - if kernel not in tf.get_collection(WEIGHT_DECAY_KEY): - tf.add_to_collection(WEIGHT_DECAY_KEY, kernel) - conv = tf.nn.conv2d(x, kernel, [1, stride, stride, 1], pad) - return conv - - -def _bn(x, is_train, name='bn'): - """Helper function for defining ResNet architecture.""" - bn = tf.layers.batch_normalization(x, training=is_train, name=name) - return bn - - -def _relu(x, name=None, leakness=0.0): - """Helper function for defining ResNet architecture.""" - if leakness > 0.0: - name = 'lrelu' if name is None else name - return tf.maximum(x, x*leakness, name='lrelu') - else: - name = 'relu' if name is None else name - return tf.nn.relu(x, name='relu') - - -def _resize_like(inputs, ref): - i_h, i_w = inputs.get_shape()[1], inputs.get_shape()[2] - r_h, r_w = ref.get_shape()[1], ref.get_shape()[2] - if i_h == r_h and i_w == r_w: - return inputs - else: - # TODO(casser): Other interpolation methods could be explored here. - return tf.image.resize_bilinear(inputs, [r_h.value, r_w.value], - align_corners=True) diff --git a/research/struct2depth/optimize.py b/research/struct2depth/optimize.py deleted file mode 100644 index becb3ab69..000000000 --- a/research/struct2depth/optimize.py +++ /dev/null @@ -1,383 +0,0 @@ - -# Copyright 2018 The TensorFlow Authors All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -"""Applies online refinement while running inference. - -Instructions: Run static inference first before calling this script. Make sure -to point output_dir to the same folder where static inference results were -saved previously. - -For example use, please refer to README. -""" - -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import datetime -import os -import random -from absl import app -from absl import flags -from absl import logging -import numpy as np -import tensorflow as tf - -import model -import nets -import reader -import util - -gfile = tf.gfile -SAVE_EVERY = 1 # Defines the interval that predictions should be saved at. -SAVE_PREVIEWS = True # If set, while save image previews of depth predictions. -FIXED_SEED = 8964 # Fixed seed for repeatability. - -flags.DEFINE_string('output_dir', None, 'Directory to store predictions. ' - 'Assumes that regular inference has been executed before ' - 'and results were stored in this folder.') -flags.DEFINE_string('data_dir', None, 'Folder pointing to preprocessed ' - 'triplets to fine-tune on.') -flags.DEFINE_string('triplet_list_file', None, 'Text file containing paths to ' - 'image files to process. Paths should be relative with ' - 'respect to the list file location. Every line should be ' - 'of the form [input_folder_name] [input_frame_num] ' - '[output_path], where [output_path] is optional to specify ' - 'a different path to store the prediction.') -flags.DEFINE_string('triplet_list_file_remains', None, 'Optional text file ' - 'containing relative paths to image files which should not ' - 'be fine-tuned, e.g. because of missing adjacent frames. ' - 'For all files listed, the static prediction will be ' - 'copied instead. File can be empty. If not, every line ' - 'should be of the form [input_folder_name] ' - '[input_frame_num] [output_path], where [output_path] is ' - 'optional to specify a different path to take and store ' - 'the unrefined prediction from/to.') -flags.DEFINE_string('model_ckpt', None, 'Model checkpoint to optimize.') -flags.DEFINE_string('ft_name', '', 'Optional prefix for temporary files.') -flags.DEFINE_string('file_extension', 'png', 'Image data file extension.') -flags.DEFINE_float('learning_rate', 0.0001, 'Adam learning rate.') -flags.DEFINE_float('beta1', 0.9, 'Adam momentum.') -flags.DEFINE_float('reconstr_weight', 0.85, 'Frame reconstruction loss weight.') -flags.DEFINE_float('ssim_weight', 0.15, 'SSIM loss weight.') -flags.DEFINE_float('smooth_weight', 0.01, 'Smoothness loss weight.') -flags.DEFINE_float('icp_weight', 0.0, 'ICP loss weight.') -flags.DEFINE_float('size_constraint_weight', 0.0005, 'Weight of the object ' - 'size constraint loss. Use only with motion handling.') -flags.DEFINE_integer('batch_size', 1, 'The size of a sample batch') -flags.DEFINE_integer('img_height', 128, 'Input frame height.') -flags.DEFINE_integer('img_width', 416, 'Input frame width.') -flags.DEFINE_integer('seq_length', 3, 'Number of frames in sequence.') -flags.DEFINE_enum('architecture', nets.RESNET, nets.ARCHITECTURES, - 'Defines the architecture to use for the depth prediction ' - 'network. Defaults to ResNet-based encoder and accompanying ' - 'decoder.') -flags.DEFINE_boolean('imagenet_norm', True, 'Whether to normalize the input ' - 'images channel-wise so that they match the distribution ' - 'most ImageNet-models were trained on.') -flags.DEFINE_float('weight_reg', 0.05, 'The amount of weight regularization to ' - 'apply. This has no effect on the ResNet-based encoder ' - 'architecture.') -flags.DEFINE_boolean('exhaustive_mode', False, 'Whether to exhaustively warp ' - 'from any frame to any other instead of just considering ' - 'adjacent frames. Where necessary, multiple egomotion ' - 'estimates will be applied. Does not have an effect if ' - 'compute_minimum_loss is enabled.') -flags.DEFINE_boolean('random_scale_crop', False, 'Whether to apply random ' - 'image scaling and center cropping during training.') -flags.DEFINE_bool('depth_upsampling', True, 'Whether to apply depth ' - 'upsampling of lower-scale representations before warping to ' - 'compute reconstruction loss on full-resolution image.') -flags.DEFINE_bool('depth_normalization', True, 'Whether to apply depth ' - 'normalization, that is, normalizing inverse depth ' - 'prediction maps by their mean to avoid degeneration towards ' - 'small values.') -flags.DEFINE_bool('compute_minimum_loss', True, 'Whether to take the ' - 'element-wise minimum of the reconstruction/SSIM error in ' - 'order to avoid overly penalizing dis-occlusion effects.') -flags.DEFINE_bool('use_skip', True, 'Whether to use skip connections in the ' - 'encoder-decoder architecture.') -flags.DEFINE_bool('joint_encoder', False, 'Whether to share parameters ' - 'between the depth and egomotion networks by using a joint ' - 'encoder architecture. The egomotion network is then ' - 'operating only on the hidden representation provided by the ' - 'joint encoder.') -flags.DEFINE_float('egomotion_threshold', 0.01, 'Minimum egomotion magnitude ' - 'to apply finetuning. If lower, just forwards the ordinary ' - 'prediction.') -flags.DEFINE_integer('num_steps', 20, 'Number of optimization steps to run.') -flags.DEFINE_boolean('handle_motion', True, 'Whether the checkpoint was ' - 'trained with motion handling.') -flags.DEFINE_bool('flip', False, 'Whether images should be flipped as well as ' - 'resulting predictions (for test-time augmentation). This ' - 'currently applies to the depth network only.') - -FLAGS = flags.FLAGS -flags.mark_flag_as_required('output_dir') -flags.mark_flag_as_required('data_dir') -flags.mark_flag_as_required('model_ckpt') -flags.mark_flag_as_required('triplet_list_file') - - -def main(_): - """Runs fine-tuning and inference. - - There are three categories of images. - 1) Images where we have previous and next frame, and that are not filtered - out by the heuristic. For them, we will use the fine-tuned predictions. - 2) Images where we have previous and next frame, but that were filtered out - by our heuristic. For them, we will use the ordinary prediction instead. - 3) Images where we have at least one missing adjacent frame. For them, we will - use the ordinary prediction as indicated by triplet_list_file_remains (if - provided). They will also not be part of the generated inference list in - the first place. - - Raises: - ValueError: Invalid parameters have been passed. - """ - - if FLAGS.handle_motion and FLAGS.joint_encoder: - raise ValueError('Using a joint encoder is currently not supported when ' - 'modeling object motion.') - if FLAGS.handle_motion and FLAGS.seq_length != 3: - raise ValueError('The current motion model implementation only supports ' - 'using a sequence length of three.') - if FLAGS.handle_motion and not FLAGS.compute_minimum_loss: - raise ValueError('Computing the minimum photometric loss is required when ' - 'enabling object motion handling.') - if FLAGS.size_constraint_weight > 0 and not FLAGS.handle_motion: - raise ValueError('To enforce object size constraints, enable motion ' - 'handling.') - if FLAGS.icp_weight > 0.0: - raise ValueError('ICP is currently not supported.') - if FLAGS.compute_minimum_loss and FLAGS.seq_length % 2 != 1: - raise ValueError('Compute minimum loss requires using an odd number of ' - 'images in a sequence.') - if FLAGS.compute_minimum_loss and FLAGS.exhaustive_mode: - raise ValueError('Exhaustive mode has no effect when compute_minimum_loss ' - 'is enabled.') - if FLAGS.img_width % (2 ** 5) != 0 or FLAGS.img_height % (2 ** 5) != 0: - logging.warn('Image size is not divisible by 2^5. For the architecture ' - 'employed, this could cause artefacts caused by resizing in ' - 'lower dimensions.') - - if FLAGS.output_dir.endswith('/'): - FLAGS.output_dir = FLAGS.output_dir[:-1] - - # Create file lists to prepare fine-tuning, save it to unique_file. - unique_file_name = (str(datetime.datetime.now().date()) + '_' + - str(datetime.datetime.now().time()).replace(':', '_')) - unique_file = os.path.join(FLAGS.data_dir, unique_file_name + '.txt') - with gfile.FastGFile(FLAGS.triplet_list_file, 'r') as f: - files_to_process = f.readlines() - files_to_process = [line.rstrip() for line in files_to_process] - files_to_process = [line for line in files_to_process if len(line)] - logging.info('Creating unique file list %s with %s entries.', unique_file, - len(files_to_process)) - with gfile.FastGFile(unique_file, 'w') as f_out: - fetches_network = FLAGS.num_steps * FLAGS.batch_size - fetches_saves = FLAGS.batch_size * int(np.floor(FLAGS.num_steps/SAVE_EVERY)) - repetitions = fetches_network + 3 * fetches_saves - for i in range(len(files_to_process)): - for _ in range(repetitions): - f_out.write(files_to_process[i] + '\n') - - # Read remaining files. - remaining = [] - if gfile.Exists(FLAGS.triplet_list_file_remains): - with gfile.FastGFile(FLAGS.triplet_list_file_remains, 'r') as f: - remaining = f.readlines() - remaining = [line.rstrip() for line in remaining] - remaining = [line for line in remaining if len(line)] - logging.info('Running fine-tuning on %s files, %s files are remaining.', - len(files_to_process), len(remaining)) - - # Run fine-tuning process and save predictions in id-folders. - tf.set_random_seed(FIXED_SEED) - np.random.seed(FIXED_SEED) - random.seed(FIXED_SEED) - flipping_mode = reader.FLIP_ALWAYS if FLAGS.flip else reader.FLIP_NONE - train_model = model.Model(data_dir=FLAGS.data_dir, - file_extension=FLAGS.file_extension, - is_training=True, - learning_rate=FLAGS.learning_rate, - beta1=FLAGS.beta1, - reconstr_weight=FLAGS.reconstr_weight, - smooth_weight=FLAGS.smooth_weight, - ssim_weight=FLAGS.ssim_weight, - icp_weight=FLAGS.icp_weight, - batch_size=FLAGS.batch_size, - img_height=FLAGS.img_height, - img_width=FLAGS.img_width, - seq_length=FLAGS.seq_length, - architecture=FLAGS.architecture, - imagenet_norm=FLAGS.imagenet_norm, - weight_reg=FLAGS.weight_reg, - exhaustive_mode=FLAGS.exhaustive_mode, - random_scale_crop=FLAGS.random_scale_crop, - flipping_mode=flipping_mode, - random_color=False, - depth_upsampling=FLAGS.depth_upsampling, - depth_normalization=FLAGS.depth_normalization, - compute_minimum_loss=FLAGS.compute_minimum_loss, - use_skip=FLAGS.use_skip, - joint_encoder=FLAGS.joint_encoder, - build_sum=False, - shuffle=False, - input_file=unique_file_name, - handle_motion=FLAGS.handle_motion, - size_constraint_weight=FLAGS.size_constraint_weight, - train_global_scale_var=False) - - failed_heuristic_ids = finetune_inference(train_model, FLAGS.model_ckpt, - FLAGS.output_dir + '_ft') - logging.info('Fine-tuning completed, %s files were filtered out by ' - 'heuristic.', len(failed_heuristic_ids)) - for failed_id in failed_heuristic_ids: - failed_entry = files_to_process[failed_id] - remaining.append(failed_entry) - logging.info('In total, %s images were fine-tuned, while %s were not.', - len(files_to_process)-len(failed_heuristic_ids), len(remaining)) - - # Copy all results to have the same structural output as running ordinary - # inference. - for i in range(len(files_to_process)): - if files_to_process[i] not in remaining: # Use fine-tuned result. - elements = files_to_process[i].split(' ') - source_file = os.path.join(FLAGS.output_dir + '_ft', FLAGS.ft_name + - 'id_' + str(i), - str(FLAGS.num_steps).zfill(10) + - ('_flip' if FLAGS.flip else '')) - if len(elements) == 2: # No differing mapping defined. - target_dir = os.path.join(FLAGS.output_dir + '_ft', elements[0]) - target_file = os.path.join( - target_dir, elements[1] + ('_flip' if FLAGS.flip else '')) - else: # Other mapping for file defined, copy to this location instead. - target_dir = os.path.join( - FLAGS.output_dir + '_ft', os.path.dirname(elements[2])) - target_file = os.path.join( - target_dir, - os.path.basename(elements[2]) + ('_flip' if FLAGS.flip else '')) - if not gfile.Exists(target_dir): - gfile.MakeDirs(target_dir) - logging.info('Copy refined result %s to %s.', source_file, target_file) - gfile.Copy(source_file + '.npy', target_file + '.npy', overwrite=True) - gfile.Copy(source_file + '.txt', target_file + '.txt', overwrite=True) - gfile.Copy(source_file + '.%s' % FLAGS.file_extension, - target_file + '.%s' % FLAGS.file_extension, overwrite=True) - for j in range(len(remaining)): - elements = remaining[j].split(' ') - if len(elements) == 2: # No differing mapping defined. - target_dir = os.path.join(FLAGS.output_dir + '_ft', elements[0]) - target_file = os.path.join( - target_dir, elements[1] + ('_flip' if FLAGS.flip else '')) - else: # Other mapping for file defined, copy to this location instead. - target_dir = os.path.join( - FLAGS.output_dir + '_ft', os.path.dirname(elements[2])) - target_file = os.path.join( - target_dir, - os.path.basename(elements[2]) + ('_flip' if FLAGS.flip else '')) - if not gfile.Exists(target_dir): - gfile.MakeDirs(target_dir) - source_file = target_file.replace('_ft', '') - logging.info('Copy unrefined result %s to %s.', source_file, target_file) - gfile.Copy(source_file + '.npy', target_file + '.npy', overwrite=True) - gfile.Copy(source_file + '.%s' % FLAGS.file_extension, - target_file + '.%s' % FLAGS.file_extension, overwrite=True) - logging.info('Done, predictions saved in %s.', FLAGS.output_dir + '_ft') - - -def finetune_inference(train_model, model_ckpt, output_dir): - """Train model.""" - vars_to_restore = None - if model_ckpt is not None: - vars_to_restore = util.get_vars_to_save_and_restore(model_ckpt) - ckpt_path = model_ckpt - pretrain_restorer = tf.train.Saver(vars_to_restore) - sv = tf.train.Supervisor(logdir=None, save_summaries_secs=0, saver=None, - summary_op=None) - config = tf.ConfigProto() - config.gpu_options.allow_growth = True - img_nr = 0 - failed_heuristic = [] - with sv.managed_session(config=config) as sess: - # TODO(casser): Caching the weights would be better to avoid I/O bottleneck. - while True: # Loop terminates when all examples have been processed. - if model_ckpt is not None: - logging.info('Restored weights from %s', ckpt_path) - pretrain_restorer.restore(sess, ckpt_path) - logging.info('Running fine-tuning, image %s...', img_nr) - img_pred_folder = os.path.join( - output_dir, FLAGS.ft_name + 'id_' + str(img_nr)) - if not gfile.Exists(img_pred_folder): - gfile.MakeDirs(img_pred_folder) - step = 1 - - # Run fine-tuning. - while step <= FLAGS.num_steps: - logging.info('Running step %s of %s.', step, FLAGS.num_steps) - fetches = { - 'train': train_model.train_op, - 'global_step': train_model.global_step, - 'incr_global_step': train_model.incr_global_step - } - _ = sess.run(fetches) - if step % SAVE_EVERY == 0: - # Get latest prediction for middle frame, highest scale. - pred = train_model.depth[1][0].eval(session=sess) - if FLAGS.flip: - pred = np.flip(pred, axis=2) - input_img = train_model.image_stack.eval(session=sess) - input_img_prev = input_img[0, :, :, 0:3] - input_img_center = input_img[0, :, :, 3:6] - input_img_next = input_img[0, :, :, 6:] - img_pred_file = os.path.join( - img_pred_folder, - str(step).zfill(10) + ('_flip' if FLAGS.flip else '') + '.npy') - motion = np.squeeze(train_model.egomotion.eval(session=sess)) - # motion of shape (seq_length - 1, 6). - motion = np.mean(motion, axis=0) # Average egomotion across frames. - - if SAVE_PREVIEWS or step == FLAGS.num_steps: - # Also save preview of depth map. - color_map = util.normalize_depth_for_display( - np.squeeze(pred[0, :, :])) - visualization = np.concatenate( - (input_img_prev, input_img_center, input_img_next, color_map)) - motion_s = [str(m) for m in motion] - s_rep = ','.join(motion_s) - with gfile.Open(img_pred_file.replace('.npy', '.txt'), 'w') as f: - f.write(s_rep) - util.save_image( - img_pred_file.replace('.npy', '.%s' % FLAGS.file_extension), - visualization, FLAGS.file_extension) - - with gfile.Open(img_pred_file, 'wb') as f: - np.save(f, pred) - - # Apply heuristic to not finetune if egomotion magnitude is too low. - ego_magnitude = np.linalg.norm(motion[:3], ord=2) - heuristic = ego_magnitude >= FLAGS.egomotion_threshold - if not heuristic and step == FLAGS.num_steps: - failed_heuristic.append(img_nr) - - step += 1 - img_nr += 1 - return failed_heuristic - - -if __name__ == '__main__': - app.run(main) diff --git a/research/struct2depth/project.py b/research/struct2depth/project.py deleted file mode 100644 index f249ebd3d..000000000 --- a/research/struct2depth/project.py +++ /dev/null @@ -1,326 +0,0 @@ - -# Copyright 2018 The TensorFlow Authors All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -"""Geometry utilities for projecting frames based on depth and motion. - -Modified from Spatial Transformer Networks: -https://github.com/tensorflow/models/blob/master/transformer/spatial_transformer.py -""" - -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -from absl import logging -import numpy as np -import tensorflow as tf - - -def inverse_warp(img, depth, egomotion_mat, intrinsic_mat, - intrinsic_mat_inv): - """Inverse warp a source image to the target image plane. - - Args: - img: The source image (to sample pixels from) -- [B, H, W, 3]. - depth: Depth map of the target image -- [B, H, W]. - egomotion_mat: Matrix defining egomotion transform -- [B, 4, 4]. - intrinsic_mat: Camera intrinsic matrix -- [B, 3, 3]. - intrinsic_mat_inv: Inverse of the intrinsic matrix -- [B, 3, 3]. - Returns: - Projected source image - """ - dims = tf.shape(img) - batch_size, img_height, img_width = dims[0], dims[1], dims[2] - depth = tf.reshape(depth, [batch_size, 1, img_height * img_width]) - grid = _meshgrid_abs(img_height, img_width) - grid = tf.tile(tf.expand_dims(grid, 0), [batch_size, 1, 1]) - cam_coords = _pixel2cam(depth, grid, intrinsic_mat_inv) - ones = tf.ones([batch_size, 1, img_height * img_width]) - cam_coords_hom = tf.concat([cam_coords, ones], axis=1) - - # Get projection matrix for target camera frame to source pixel frame - hom_filler = tf.constant([0.0, 0.0, 0.0, 1.0], shape=[1, 1, 4]) - hom_filler = tf.tile(hom_filler, [batch_size, 1, 1]) - intrinsic_mat_hom = tf.concat( - [intrinsic_mat, tf.zeros([batch_size, 3, 1])], axis=2) - intrinsic_mat_hom = tf.concat([intrinsic_mat_hom, hom_filler], axis=1) - proj_target_cam_to_source_pixel = tf.matmul(intrinsic_mat_hom, egomotion_mat) - source_pixel_coords = _cam2pixel(cam_coords_hom, - proj_target_cam_to_source_pixel) - source_pixel_coords = tf.reshape(source_pixel_coords, - [batch_size, 2, img_height, img_width]) - source_pixel_coords = tf.transpose(source_pixel_coords, perm=[0, 2, 3, 1]) - projected_img, mask = _spatial_transformer(img, source_pixel_coords) - return projected_img, mask - - -def get_transform_mat(egomotion_vecs, i, j): - """Returns a transform matrix defining the transform from frame i to j.""" - egomotion_transforms = [] - batchsize = tf.shape(egomotion_vecs)[0] - if i == j: - return tf.tile(tf.expand_dims(tf.eye(4, 4), axis=0), [batchsize, 1, 1]) - for k in range(min(i, j), max(i, j)): - transform_matrix = _egomotion_vec2mat(egomotion_vecs[:, k, :], batchsize) - if i > j: # Going back in sequence, need to invert egomotion. - egomotion_transforms.insert(0, tf.linalg.inv(transform_matrix)) - else: # Going forward in sequence - egomotion_transforms.append(transform_matrix) - - # Multiply all matrices. - egomotion_mat = egomotion_transforms[0] - for i in range(1, len(egomotion_transforms)): - egomotion_mat = tf.matmul(egomotion_mat, egomotion_transforms[i]) - return egomotion_mat - - -def _pixel2cam(depth, pixel_coords, intrinsic_mat_inv): - """Transform coordinates in the pixel frame to the camera frame.""" - cam_coords = tf.matmul(intrinsic_mat_inv, pixel_coords) * depth - return cam_coords - - -def _cam2pixel(cam_coords, proj_c2p): - """Transform coordinates in the camera frame to the pixel frame.""" - pcoords = tf.matmul(proj_c2p, cam_coords) - x = tf.slice(pcoords, [0, 0, 0], [-1, 1, -1]) - y = tf.slice(pcoords, [0, 1, 0], [-1, 1, -1]) - z = tf.slice(pcoords, [0, 2, 0], [-1, 1, -1]) - # Not tested if adding a small number is necessary - x_norm = x / (z + 1e-10) - y_norm = y / (z + 1e-10) - pixel_coords = tf.concat([x_norm, y_norm], axis=1) - return pixel_coords - - -def _meshgrid_abs(height, width): - """Meshgrid in the absolute coordinates.""" - x_t = tf.matmul( - tf.ones(shape=tf.stack([height, 1])), - tf.transpose(tf.expand_dims(tf.linspace(-1.0, 1.0, width), 1), [1, 0])) - y_t = tf.matmul( - tf.expand_dims(tf.linspace(-1.0, 1.0, height), 1), - tf.ones(shape=tf.stack([1, width]))) - x_t = (x_t + 1.0) * 0.5 * tf.cast(width - 1, tf.float32) - y_t = (y_t + 1.0) * 0.5 * tf.cast(height - 1, tf.float32) - x_t_flat = tf.reshape(x_t, (1, -1)) - y_t_flat = tf.reshape(y_t, (1, -1)) - ones = tf.ones_like(x_t_flat) - grid = tf.concat([x_t_flat, y_t_flat, ones], axis=0) - return grid - - -def _euler2mat(z, y, x): - """Converts euler angles to rotation matrix. - - From: - https://github.com/pulkitag/pycaffe-utils/blob/master/rot_utils.py#L174 - - TODO: Remove the dimension for 'N' (deprecated for converting all source - poses altogether). - - Args: - z: rotation angle along z axis (in radians) -- size = [B, n] - y: rotation angle along y axis (in radians) -- size = [B, n] - x: rotation angle along x axis (in radians) -- size = [B, n] - - Returns: - Rotation matrix corresponding to the euler angles, with shape [B, n, 3, 3]. - """ - batch_size = tf.shape(z)[0] - n = 1 - z = tf.clip_by_value(z, -np.pi, np.pi) - y = tf.clip_by_value(y, -np.pi, np.pi) - x = tf.clip_by_value(x, -np.pi, np.pi) - - # Expand to B x N x 1 x 1 - z = tf.expand_dims(tf.expand_dims(z, -1), -1) - y = tf.expand_dims(tf.expand_dims(y, -1), -1) - x = tf.expand_dims(tf.expand_dims(x, -1), -1) - - zeros = tf.zeros([batch_size, n, 1, 1]) - ones = tf.ones([batch_size, n, 1, 1]) - - cosz = tf.cos(z) - sinz = tf.sin(z) - rotz_1 = tf.concat([cosz, -sinz, zeros], axis=3) - rotz_2 = tf.concat([sinz, cosz, zeros], axis=3) - rotz_3 = tf.concat([zeros, zeros, ones], axis=3) - zmat = tf.concat([rotz_1, rotz_2, rotz_3], axis=2) - - cosy = tf.cos(y) - siny = tf.sin(y) - roty_1 = tf.concat([cosy, zeros, siny], axis=3) - roty_2 = tf.concat([zeros, ones, zeros], axis=3) - roty_3 = tf.concat([-siny, zeros, cosy], axis=3) - ymat = tf.concat([roty_1, roty_2, roty_3], axis=2) - - cosx = tf.cos(x) - sinx = tf.sin(x) - rotx_1 = tf.concat([ones, zeros, zeros], axis=3) - rotx_2 = tf.concat([zeros, cosx, -sinx], axis=3) - rotx_3 = tf.concat([zeros, sinx, cosx], axis=3) - xmat = tf.concat([rotx_1, rotx_2, rotx_3], axis=2) - - return tf.matmul(tf.matmul(xmat, ymat), zmat) - - -def _egomotion_vec2mat(vec, batch_size): - """Converts 6DoF transform vector to transformation matrix. - - Args: - vec: 6DoF parameters [tx, ty, tz, rx, ry, rz] -- [B, 6]. - batch_size: Batch size. - - Returns: - A transformation matrix -- [B, 4, 4]. - """ - translation = tf.slice(vec, [0, 0], [-1, 3]) - translation = tf.expand_dims(translation, -1) - rx = tf.slice(vec, [0, 3], [-1, 1]) - ry = tf.slice(vec, [0, 4], [-1, 1]) - rz = tf.slice(vec, [0, 5], [-1, 1]) - rot_mat = _euler2mat(rz, ry, rx) - rot_mat = tf.squeeze(rot_mat, squeeze_dims=[1]) - filler = tf.constant([0.0, 0.0, 0.0, 1.0], shape=[1, 1, 4]) - filler = tf.tile(filler, [batch_size, 1, 1]) - transform_mat = tf.concat([rot_mat, translation], axis=2) - transform_mat = tf.concat([transform_mat, filler], axis=1) - return transform_mat - - -def _bilinear_sampler(im, x, y, name='blinear_sampler'): - """Perform bilinear sampling on im given list of x, y coordinates. - - Implements the differentiable sampling mechanism with bilinear kernel - in https://arxiv.org/abs/1506.02025. - - x,y are tensors specifying normalized coordinates [-1, 1] to be sampled on im. - For example, (-1, -1) in (x, y) corresponds to pixel location (0, 0) in im, - and (1, 1) in (x, y) corresponds to the bottom right pixel in im. - - Args: - im: Batch of images with shape [B, h, w, channels]. - x: Tensor of normalized x coordinates in [-1, 1], with shape [B, h, w, 1]. - y: Tensor of normalized y coordinates in [-1, 1], with shape [B, h, w, 1]. - name: Name scope for ops. - - Returns: - Sampled image with shape [B, h, w, channels]. - Principled mask with shape [B, h, w, 1], dtype:float32. A value of 1.0 - in the mask indicates that the corresponding coordinate in the sampled - image is valid. - """ - with tf.variable_scope(name): - x = tf.reshape(x, [-1]) - y = tf.reshape(y, [-1]) - - # Constants. - batch_size = tf.shape(im)[0] - _, height, width, channels = im.get_shape().as_list() - - x = tf.to_float(x) - y = tf.to_float(y) - height_f = tf.cast(height, 'float32') - width_f = tf.cast(width, 'float32') - zero = tf.constant(0, dtype=tf.int32) - max_y = tf.cast(tf.shape(im)[1] - 1, 'int32') - max_x = tf.cast(tf.shape(im)[2] - 1, 'int32') - - # Scale indices from [-1, 1] to [0, width - 1] or [0, height - 1]. - x = (x + 1.0) * (width_f - 1.0) / 2.0 - y = (y + 1.0) * (height_f - 1.0) / 2.0 - - # Compute the coordinates of the 4 pixels to sample from. - x0 = tf.cast(tf.floor(x), 'int32') - x1 = x0 + 1 - y0 = tf.cast(tf.floor(y), 'int32') - y1 = y0 + 1 - - mask = tf.logical_and( - tf.logical_and(x0 >= zero, x1 <= max_x), - tf.logical_and(y0 >= zero, y1 <= max_y)) - mask = tf.to_float(mask) - - x0 = tf.clip_by_value(x0, zero, max_x) - x1 = tf.clip_by_value(x1, zero, max_x) - y0 = tf.clip_by_value(y0, zero, max_y) - y1 = tf.clip_by_value(y1, zero, max_y) - dim2 = width - dim1 = width * height - - # Create base index. - base = tf.range(batch_size) * dim1 - base = tf.reshape(base, [-1, 1]) - base = tf.tile(base, [1, height * width]) - base = tf.reshape(base, [-1]) - - base_y0 = base + y0 * dim2 - base_y1 = base + y1 * dim2 - idx_a = base_y0 + x0 - idx_b = base_y1 + x0 - idx_c = base_y0 + x1 - idx_d = base_y1 + x1 - - # Use indices to lookup pixels in the flat image and restore channels dim. - im_flat = tf.reshape(im, tf.stack([-1, channels])) - im_flat = tf.to_float(im_flat) - pixel_a = tf.gather(im_flat, idx_a) - pixel_b = tf.gather(im_flat, idx_b) - pixel_c = tf.gather(im_flat, idx_c) - pixel_d = tf.gather(im_flat, idx_d) - - x1_f = tf.to_float(x1) - y1_f = tf.to_float(y1) - - # And finally calculate interpolated values. - wa = tf.expand_dims(((x1_f - x) * (y1_f - y)), 1) - wb = tf.expand_dims((x1_f - x) * (1.0 - (y1_f - y)), 1) - wc = tf.expand_dims(((1.0 - (x1_f - x)) * (y1_f - y)), 1) - wd = tf.expand_dims(((1.0 - (x1_f - x)) * (1.0 - (y1_f - y))), 1) - - output = tf.add_n([wa * pixel_a, wb * pixel_b, wc * pixel_c, wd * pixel_d]) - output = tf.reshape(output, tf.stack([batch_size, height, width, channels])) - mask = tf.reshape(mask, tf.stack([batch_size, height, width, 1])) - return output, mask - - -def _spatial_transformer(img, coords): - """A wrapper over binlinear_sampler(), taking absolute coords as input.""" - img_height = tf.cast(tf.shape(img)[1], tf.float32) - img_width = tf.cast(tf.shape(img)[2], tf.float32) - px = coords[:, :, :, :1] - py = coords[:, :, :, 1:] - # Normalize coordinates to [-1, 1] to send to _bilinear_sampler. - px = px / (img_width - 1) * 2.0 - 1.0 - py = py / (img_height - 1) * 2.0 - 1.0 - output_img, mask = _bilinear_sampler(img, px, py) - return output_img, mask - - -def get_cloud(depth, intrinsics_inv, name=None): - """Convert depth map to 3D point cloud.""" - with tf.name_scope(name): - dims = depth.shape.as_list() - batch_size, img_height, img_width = dims[0], dims[1], dims[2] - depth = tf.reshape(depth, [batch_size, 1, img_height * img_width]) - grid = _meshgrid_abs(img_height, img_width) - grid = tf.tile(tf.expand_dims(grid, 0), [batch_size, 1, 1]) - cam_coords = _pixel2cam(depth, grid, intrinsics_inv) - cam_coords = tf.transpose(cam_coords, [0, 2, 1]) - cam_coords = tf.reshape(cam_coords, [batch_size, img_height, img_width, 3]) - logging.info('depth -> cloud: %s', cam_coords) - return cam_coords diff --git a/research/struct2depth/reader.py b/research/struct2depth/reader.py deleted file mode 100644 index 444e4bea9..000000000 --- a/research/struct2depth/reader.py +++ /dev/null @@ -1,344 +0,0 @@ - -# Copyright 2018 The TensorFlow Authors All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -"""Reads data that is produced by dataset/gen_data.py.""" - -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import os -import random -from absl import logging -import tensorflow as tf - -import util - -gfile = tf.gfile - -QUEUE_SIZE = 2000 -QUEUE_BUFFER = 3 -# See nets.encoder_resnet as reference for below input-normalizing constants. -IMAGENET_MEAN = (0.485, 0.456, 0.406) -IMAGENET_SD = (0.229, 0.224, 0.225) -FLIP_RANDOM = 'random' # Always perform random flipping. -FLIP_ALWAYS = 'always' # Always flip image input, used for test augmentation. -FLIP_NONE = 'none' # Always disables flipping. - - -class DataReader(object): - """Reads stored sequences which are produced by dataset/gen_data.py.""" - - def __init__(self, data_dir, batch_size, img_height, img_width, seq_length, - num_scales, file_extension, random_scale_crop, flipping_mode, - random_color, imagenet_norm, shuffle, input_file='train'): - self.data_dir = data_dir - self.batch_size = batch_size - self.img_height = img_height - self.img_width = img_width - self.seq_length = seq_length - self.num_scales = num_scales - self.file_extension = file_extension - self.random_scale_crop = random_scale_crop - self.flipping_mode = flipping_mode - self.random_color = random_color - self.imagenet_norm = imagenet_norm - self.shuffle = shuffle - self.input_file = input_file - - def read_data(self): - """Provides images and camera intrinsics.""" - with tf.name_scope('data_loading'): - with tf.name_scope('enqueue_paths'): - seed = random.randint(0, 2**31 - 1) - self.file_lists = self.compile_file_list(self.data_dir, self.input_file) - image_paths_queue = tf.train.string_input_producer( - self.file_lists['image_file_list'], seed=seed, - shuffle=self.shuffle, - num_epochs=(1 if not self.shuffle else None) - ) - seg_paths_queue = tf.train.string_input_producer( - self.file_lists['segment_file_list'], seed=seed, - shuffle=self.shuffle, - num_epochs=(1 if not self.shuffle else None)) - cam_paths_queue = tf.train.string_input_producer( - self.file_lists['cam_file_list'], seed=seed, - shuffle=self.shuffle, - num_epochs=(1 if not self.shuffle else None)) - img_reader = tf.WholeFileReader() - _, image_contents = img_reader.read(image_paths_queue) - seg_reader = tf.WholeFileReader() - _, seg_contents = seg_reader.read(seg_paths_queue) - if self.file_extension == 'jpg': - image_seq = tf.image.decode_jpeg(image_contents) - seg_seq = tf.image.decode_jpeg(seg_contents, channels=3) - elif self.file_extension == 'png': - image_seq = tf.image.decode_png(image_contents, channels=3) - seg_seq = tf.image.decode_png(seg_contents, channels=3) - - with tf.name_scope('load_intrinsics'): - cam_reader = tf.TextLineReader() - _, raw_cam_contents = cam_reader.read(cam_paths_queue) - rec_def = [] - for _ in range(9): - rec_def.append([1.0]) - raw_cam_vec = tf.decode_csv(raw_cam_contents, record_defaults=rec_def) - raw_cam_vec = tf.stack(raw_cam_vec) - intrinsics = tf.reshape(raw_cam_vec, [3, 3]) - - with tf.name_scope('convert_image'): - image_seq = self.preprocess_image(image_seq) # Converts to float. - - if self.random_color: - with tf.name_scope('image_augmentation'): - image_seq = self.augment_image_colorspace(image_seq) - - image_stack = self.unpack_images(image_seq) - seg_stack = self.unpack_images(seg_seq) - - if self.flipping_mode != FLIP_NONE: - random_flipping = (self.flipping_mode == FLIP_RANDOM) - with tf.name_scope('image_augmentation_flip'): - image_stack, seg_stack, intrinsics = self.augment_images_flip( - image_stack, seg_stack, intrinsics, - randomized=random_flipping) - - if self.random_scale_crop: - with tf.name_scope('image_augmentation_scale_crop'): - image_stack, seg_stack, intrinsics = self.augment_images_scale_crop( - image_stack, seg_stack, intrinsics, self.img_height, - self.img_width) - - with tf.name_scope('multi_scale_intrinsics'): - intrinsic_mat = self.get_multi_scale_intrinsics(intrinsics, - self.num_scales) - intrinsic_mat.set_shape([self.num_scales, 3, 3]) - intrinsic_mat_inv = tf.matrix_inverse(intrinsic_mat) - intrinsic_mat_inv.set_shape([self.num_scales, 3, 3]) - - if self.imagenet_norm: - im_mean = tf.tile( - tf.constant(IMAGENET_MEAN), multiples=[self.seq_length]) - im_sd = tf.tile( - tf.constant(IMAGENET_SD), multiples=[self.seq_length]) - image_stack_norm = (image_stack - im_mean) / im_sd - else: - image_stack_norm = image_stack - - with tf.name_scope('batching'): - if self.shuffle: - (image_stack, image_stack_norm, seg_stack, intrinsic_mat, - intrinsic_mat_inv) = tf.train.shuffle_batch( - [image_stack, image_stack_norm, seg_stack, intrinsic_mat, - intrinsic_mat_inv], - batch_size=self.batch_size, - capacity=QUEUE_SIZE + QUEUE_BUFFER * self.batch_size, - min_after_dequeue=QUEUE_SIZE) - else: - (image_stack, image_stack_norm, seg_stack, intrinsic_mat, - intrinsic_mat_inv) = tf.train.batch( - [image_stack, image_stack_norm, seg_stack, intrinsic_mat, - intrinsic_mat_inv], - batch_size=self.batch_size, - num_threads=1, - capacity=QUEUE_SIZE + QUEUE_BUFFER * self.batch_size) - logging.info('image_stack: %s', util.info(image_stack)) - return (image_stack, image_stack_norm, seg_stack, intrinsic_mat, - intrinsic_mat_inv) - - def unpack_images(self, image_seq): - """[h, w * seq_length, 3] -> [h, w, 3 * seq_length].""" - with tf.name_scope('unpack_images'): - image_list = [ - image_seq[:, i * self.img_width:(i + 1) * self.img_width, :] - for i in range(self.seq_length) - ] - image_stack = tf.concat(image_list, axis=2) - image_stack.set_shape( - [self.img_height, self.img_width, self.seq_length * 3]) - return image_stack - - @classmethod - def preprocess_image(cls, image): - # Convert from uint8 to float. - return tf.image.convert_image_dtype(image, dtype=tf.float32) - - @classmethod - def augment_image_colorspace(cls, image_stack): - """Apply data augmentation to inputs.""" - image_stack_aug = image_stack - # Randomly shift brightness. - apply_brightness = tf.less(tf.random_uniform( - shape=[], minval=0.0, maxval=1.0, dtype=tf.float32), 0.5) - image_stack_aug = tf.cond( - apply_brightness, - lambda: tf.image.random_brightness(image_stack_aug, max_delta=0.1), - lambda: image_stack_aug) - - # Randomly shift contrast. - apply_contrast = tf.less(tf.random_uniform( - shape=[], minval=0.0, maxval=1.0, dtype=tf.float32), 0.5) - image_stack_aug = tf.cond( - apply_contrast, - lambda: tf.image.random_contrast(image_stack_aug, 0.85, 1.15), - lambda: image_stack_aug) - - # Randomly change saturation. - apply_saturation = tf.less(tf.random_uniform( - shape=[], minval=0.0, maxval=1.0, dtype=tf.float32), 0.5) - image_stack_aug = tf.cond( - apply_saturation, - lambda: tf.image.random_saturation(image_stack_aug, 0.85, 1.15), - lambda: image_stack_aug) - - # Randomly change hue. - apply_hue = tf.less(tf.random_uniform( - shape=[], minval=0.0, maxval=1.0, dtype=tf.float32), 0.5) - image_stack_aug = tf.cond( - apply_hue, - lambda: tf.image.random_hue(image_stack_aug, max_delta=0.1), - lambda: image_stack_aug) - - image_stack_aug = tf.clip_by_value(image_stack_aug, 0, 1) - return image_stack_aug - - @classmethod - def augment_images_flip(cls, image_stack, seg_stack, intrinsics, - randomized=True): - """Randomly flips the image horizontally.""" - - def flip(cls, image_stack, seg_stack, intrinsics): - _, in_w, _ = image_stack.get_shape().as_list() - fx = intrinsics[0, 0] - fy = intrinsics[1, 1] - cx = in_w - intrinsics[0, 2] - cy = intrinsics[1, 2] - intrinsics = cls.make_intrinsics_matrix(fx, fy, cx, cy) - return (tf.image.flip_left_right(image_stack), - tf.image.flip_left_right(seg_stack), intrinsics) - - if randomized: - prob = tf.random_uniform(shape=[], minval=0.0, maxval=1.0, - dtype=tf.float32) - predicate = tf.less(prob, 0.5) - return tf.cond(predicate, - lambda: flip(cls, image_stack, seg_stack, intrinsics), - lambda: (image_stack, seg_stack, intrinsics)) - else: - return flip(cls, image_stack, seg_stack, intrinsics) - - @classmethod - def augment_images_scale_crop(cls, im, seg, intrinsics, out_h, out_w): - """Randomly scales and crops image.""" - - def scale_randomly(im, seg, intrinsics): - """Scales image and adjust intrinsics accordingly.""" - in_h, in_w, _ = im.get_shape().as_list() - scaling = tf.random_uniform([2], 1, 1.15) - x_scaling = scaling[0] - y_scaling = scaling[1] - out_h = tf.cast(in_h * y_scaling, dtype=tf.int32) - out_w = tf.cast(in_w * x_scaling, dtype=tf.int32) - # Add batch. - im = tf.expand_dims(im, 0) - im = tf.image.resize_area(im, [out_h, out_w]) - im = im[0] - seg = tf.expand_dims(seg, 0) - seg = tf.image.resize_area(seg, [out_h, out_w]) - seg = seg[0] - fx = intrinsics[0, 0] * x_scaling - fy = intrinsics[1, 1] * y_scaling - cx = intrinsics[0, 2] * x_scaling - cy = intrinsics[1, 2] * y_scaling - intrinsics = cls.make_intrinsics_matrix(fx, fy, cx, cy) - return im, seg, intrinsics - - # Random cropping - def crop_randomly(im, seg, intrinsics, out_h, out_w): - """Crops image and adjust intrinsics accordingly.""" - # batch_size, in_h, in_w, _ = im.get_shape().as_list() - in_h, in_w, _ = tf.unstack(tf.shape(im)) - offset_y = tf.random_uniform([1], 0, in_h - out_h + 1, dtype=tf.int32)[0] - offset_x = tf.random_uniform([1], 0, in_w - out_w + 1, dtype=tf.int32)[0] - im = tf.image.crop_to_bounding_box(im, offset_y, offset_x, out_h, out_w) - seg = tf.image.crop_to_bounding_box(seg, offset_y, offset_x, out_h, out_w) - fx = intrinsics[0, 0] - fy = intrinsics[1, 1] - cx = intrinsics[0, 2] - tf.cast(offset_x, dtype=tf.float32) - cy = intrinsics[1, 2] - tf.cast(offset_y, dtype=tf.float32) - intrinsics = cls.make_intrinsics_matrix(fx, fy, cx, cy) - return im, seg, intrinsics - - im, seg, intrinsics = scale_randomly(im, seg, intrinsics) - im, seg, intrinsics = crop_randomly(im, seg, intrinsics, out_h, out_w) - return im, seg, intrinsics - - def compile_file_list(self, data_dir, split, load_pose=False): - """Creates a list of input files.""" - logging.info('data_dir: %s', data_dir) - with gfile.Open(os.path.join(data_dir, '%s.txt' % split), 'r') as f: - frames = f.readlines() - frames = [k.rstrip() for k in frames] - subfolders = [x.split(' ')[0] for x in frames] - frame_ids = [x.split(' ')[1] for x in frames] - image_file_list = [ - os.path.join(data_dir, subfolders[i], frame_ids[i] + '.' + - self.file_extension) - for i in range(len(frames)) - ] - segment_file_list = [ - os.path.join(data_dir, subfolders[i], frame_ids[i] + '-fseg.' + - self.file_extension) - for i in range(len(frames)) - ] - cam_file_list = [ - os.path.join(data_dir, subfolders[i], frame_ids[i] + '_cam.txt') - for i in range(len(frames)) - ] - file_lists = {} - file_lists['image_file_list'] = image_file_list - file_lists['segment_file_list'] = segment_file_list - file_lists['cam_file_list'] = cam_file_list - if load_pose: - pose_file_list = [ - os.path.join(data_dir, subfolders[i], frame_ids[i] + '_pose.txt') - for i in range(len(frames)) - ] - file_lists['pose_file_list'] = pose_file_list - self.steps_per_epoch = len(image_file_list) // self.batch_size - return file_lists - - @classmethod - def make_intrinsics_matrix(cls, fx, fy, cx, cy): - r1 = tf.stack([fx, 0, cx]) - r2 = tf.stack([0, fy, cy]) - r3 = tf.constant([0., 0., 1.]) - intrinsics = tf.stack([r1, r2, r3]) - return intrinsics - - @classmethod - def get_multi_scale_intrinsics(cls, intrinsics, num_scales): - """Returns multiple intrinsic matrices for different scales.""" - intrinsics_multi_scale = [] - # Scale the intrinsics accordingly for each scale - for s in range(num_scales): - fx = intrinsics[0, 0] / (2**s) - fy = intrinsics[1, 1] / (2**s) - cx = intrinsics[0, 2] / (2**s) - cy = intrinsics[1, 2] / (2**s) - intrinsics_multi_scale.append(cls.make_intrinsics_matrix(fx, fy, cx, cy)) - intrinsics_multi_scale = tf.stack(intrinsics_multi_scale) - return intrinsics_multi_scale diff --git a/research/struct2depth/train.py b/research/struct2depth/train.py deleted file mode 100644 index 248c182fe..000000000 --- a/research/struct2depth/train.py +++ /dev/null @@ -1,259 +0,0 @@ - -# Copyright 2018 The TensorFlow Authors All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -"""Train the model. Please refer to README for example usage.""" - -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import math -import os -import random -import time -from absl import app -from absl import flags -from absl import logging -import numpy as np -import tensorflow as tf - -import model -import nets -import reader -import util - -gfile = tf.gfile -MAX_TO_KEEP = 1000000 # Maximum number of checkpoints to keep. - -flags.DEFINE_string('data_dir', None, 'Preprocessed data.') -flags.DEFINE_string('file_extension', 'png', 'Image data file extension.') -flags.DEFINE_float('learning_rate', 0.0002, 'Adam learning rate.') -flags.DEFINE_float('beta1', 0.9, 'Adam momentum.') -flags.DEFINE_float('reconstr_weight', 0.85, 'Frame reconstruction loss weight.') -flags.DEFINE_float('ssim_weight', 0.15, 'SSIM loss weight.') -flags.DEFINE_float('smooth_weight', 0.04, 'Smoothness loss weight.') -flags.DEFINE_float('icp_weight', 0.0, 'ICP loss weight.') -flags.DEFINE_float('size_constraint_weight', 0.0005, 'Weight of the object ' - 'size constraint loss. Use only when motion handling is ' - 'enabled.') -flags.DEFINE_integer('batch_size', 4, 'The size of a sample batch') -flags.DEFINE_integer('img_height', 128, 'Input frame height.') -flags.DEFINE_integer('img_width', 416, 'Input frame width.') -flags.DEFINE_integer('seq_length', 3, 'Number of frames in sequence.') -flags.DEFINE_enum('architecture', nets.RESNET, nets.ARCHITECTURES, - 'Defines the architecture to use for the depth prediction ' - 'network. Defaults to ResNet-based encoder and accompanying ' - 'decoder.') -flags.DEFINE_boolean('imagenet_norm', True, 'Whether to normalize the input ' - 'images channel-wise so that they match the distribution ' - 'most ImageNet-models were trained on.') -flags.DEFINE_float('weight_reg', 0.05, 'The amount of weight regularization to ' - 'apply. This has no effect on the ResNet-based encoder ' - 'architecture.') -flags.DEFINE_boolean('exhaustive_mode', False, 'Whether to exhaustively warp ' - 'from any frame to any other instead of just considering ' - 'adjacent frames. Where necessary, multiple egomotion ' - 'estimates will be applied. Does not have an effect if ' - 'compute_minimum_loss is enabled.') -flags.DEFINE_boolean('random_scale_crop', False, 'Whether to apply random ' - 'image scaling and center cropping during training.') -flags.DEFINE_enum('flipping_mode', reader.FLIP_RANDOM, - [reader.FLIP_RANDOM, reader.FLIP_ALWAYS, reader.FLIP_NONE], - 'Determines the image flipping mode: if random, performs ' - 'on-the-fly augmentation. Otherwise, flips the input images ' - 'always or never, respectively.') -flags.DEFINE_string('pretrained_ckpt', None, 'Path to checkpoint with ' - 'pretrained weights. Do not include .data* extension.') -flags.DEFINE_string('imagenet_ckpt', None, 'Initialize the weights according ' - 'to an ImageNet-pretrained checkpoint. Requires ' - 'architecture to be ResNet-18.') -flags.DEFINE_string('checkpoint_dir', None, 'Directory to save model ' - 'checkpoints.') -flags.DEFINE_integer('train_steps', 10000000, 'Number of training steps.') -flags.DEFINE_integer('summary_freq', 100, 'Save summaries every N steps.') -flags.DEFINE_bool('depth_upsampling', True, 'Whether to apply depth ' - 'upsampling of lower-scale representations before warping to ' - 'compute reconstruction loss on full-resolution image.') -flags.DEFINE_bool('depth_normalization', True, 'Whether to apply depth ' - 'normalization, that is, normalizing inverse depth ' - 'prediction maps by their mean to avoid degeneration towards ' - 'small values.') -flags.DEFINE_bool('compute_minimum_loss', True, 'Whether to take the ' - 'element-wise minimum of the reconstruction/SSIM error in ' - 'order to avoid overly penalizing dis-occlusion effects.') -flags.DEFINE_bool('use_skip', True, 'Whether to use skip connections in the ' - 'encoder-decoder architecture.') -flags.DEFINE_bool('equal_weighting', False, 'Whether to use equal weighting ' - 'of the smoothing loss term, regardless of resolution.') -flags.DEFINE_bool('joint_encoder', False, 'Whether to share parameters ' - 'between the depth and egomotion networks by using a joint ' - 'encoder architecture. The egomotion network is then ' - 'operating only on the hidden representation provided by the ' - 'joint encoder.') -flags.DEFINE_bool('handle_motion', True, 'Whether to try to handle motion by ' - 'using the provided segmentation masks.') -flags.DEFINE_string('master', 'local', 'Location of the session.') - -FLAGS = flags.FLAGS -flags.mark_flag_as_required('data_dir') -flags.mark_flag_as_required('checkpoint_dir') - - -def main(_): - # Fixed seed for repeatability - seed = 8964 - tf.set_random_seed(seed) - np.random.seed(seed) - random.seed(seed) - - if FLAGS.handle_motion and FLAGS.joint_encoder: - raise ValueError('Using a joint encoder is currently not supported when ' - 'modeling object motion.') - if FLAGS.handle_motion and FLAGS.seq_length != 3: - raise ValueError('The current motion model implementation only supports ' - 'using a sequence length of three.') - if FLAGS.handle_motion and not FLAGS.compute_minimum_loss: - raise ValueError('Computing the minimum photometric loss is required when ' - 'enabling object motion handling.') - if FLAGS.size_constraint_weight > 0 and not FLAGS.handle_motion: - raise ValueError('To enforce object size constraints, enable motion ' - 'handling.') - if FLAGS.imagenet_ckpt and not FLAGS.imagenet_norm: - logging.warn('When initializing with an ImageNet-pretrained model, it is ' - 'recommended to normalize the image inputs accordingly using ' - 'imagenet_norm.') - if FLAGS.compute_minimum_loss and FLAGS.seq_length % 2 != 1: - raise ValueError('Compute minimum loss requires using an odd number of ' - 'images in a sequence.') - if FLAGS.architecture != nets.RESNET and FLAGS.imagenet_ckpt: - raise ValueError('Can only load weights from pre-trained ImageNet model ' - 'when using ResNet-architecture.') - if FLAGS.compute_minimum_loss and FLAGS.exhaustive_mode: - raise ValueError('Exhaustive mode has no effect when compute_minimum_loss ' - 'is enabled.') - if FLAGS.img_width % (2 ** 5) != 0 or FLAGS.img_height % (2 ** 5) != 0: - logging.warn('Image size is not divisible by 2^5. For the architecture ' - 'employed, this could cause artefacts caused by resizing in ' - 'lower dimensions.') - if FLAGS.icp_weight > 0.0: - # TODO(casser): Change ICP interface to take matrix instead of vector. - raise ValueError('ICP is currently not supported.') - - if not gfile.Exists(FLAGS.checkpoint_dir): - gfile.MakeDirs(FLAGS.checkpoint_dir) - - train_model = model.Model(data_dir=FLAGS.data_dir, - file_extension=FLAGS.file_extension, - is_training=True, - learning_rate=FLAGS.learning_rate, - beta1=FLAGS.beta1, - reconstr_weight=FLAGS.reconstr_weight, - smooth_weight=FLAGS.smooth_weight, - ssim_weight=FLAGS.ssim_weight, - icp_weight=FLAGS.icp_weight, - batch_size=FLAGS.batch_size, - img_height=FLAGS.img_height, - img_width=FLAGS.img_width, - seq_length=FLAGS.seq_length, - architecture=FLAGS.architecture, - imagenet_norm=FLAGS.imagenet_norm, - weight_reg=FLAGS.weight_reg, - exhaustive_mode=FLAGS.exhaustive_mode, - random_scale_crop=FLAGS.random_scale_crop, - flipping_mode=FLAGS.flipping_mode, - depth_upsampling=FLAGS.depth_upsampling, - depth_normalization=FLAGS.depth_normalization, - compute_minimum_loss=FLAGS.compute_minimum_loss, - use_skip=FLAGS.use_skip, - joint_encoder=FLAGS.joint_encoder, - handle_motion=FLAGS.handle_motion, - equal_weighting=FLAGS.equal_weighting, - size_constraint_weight=FLAGS.size_constraint_weight) - - train(train_model, FLAGS.pretrained_ckpt, FLAGS.imagenet_ckpt, - FLAGS.checkpoint_dir, FLAGS.train_steps, FLAGS.summary_freq) - - -def train(train_model, pretrained_ckpt, imagenet_ckpt, checkpoint_dir, - train_steps, summary_freq): - """Train model.""" - vars_to_restore = None - if pretrained_ckpt is not None: - vars_to_restore = util.get_vars_to_save_and_restore(pretrained_ckpt) - ckpt_path = pretrained_ckpt - elif imagenet_ckpt: - vars_to_restore = util.get_imagenet_vars_to_restore(imagenet_ckpt) - ckpt_path = imagenet_ckpt - pretrain_restorer = tf.train.Saver(vars_to_restore) - vars_to_save = util.get_vars_to_save_and_restore() - vars_to_save[train_model.global_step.op.name] = train_model.global_step - saver = tf.train.Saver(vars_to_save, max_to_keep=MAX_TO_KEEP) - sv = tf.train.Supervisor(logdir=checkpoint_dir, save_summaries_secs=0, - saver=None) - config = tf.ConfigProto() - config.gpu_options.allow_growth = True - with sv.managed_session(config=config) as sess: - if pretrained_ckpt is not None or imagenet_ckpt: - logging.info('Restoring pretrained weights from %s', ckpt_path) - pretrain_restorer.restore(sess, ckpt_path) - - logging.info('Attempting to resume training from %s...', checkpoint_dir) - checkpoint = tf.train.latest_checkpoint(checkpoint_dir) - logging.info('Last checkpoint found: %s', checkpoint) - if checkpoint: - saver.restore(sess, checkpoint) - - logging.info('Training...') - start_time = time.time() - last_summary_time = time.time() - steps_per_epoch = train_model.reader.steps_per_epoch - step = 1 - while step <= train_steps: - fetches = { - 'train': train_model.train_op, - 'global_step': train_model.global_step, - 'incr_global_step': train_model.incr_global_step - } - if step % summary_freq == 0: - fetches['loss'] = train_model.total_loss - fetches['summary'] = sv.summary_op - - results = sess.run(fetches) - global_step = results['global_step'] - - if step % summary_freq == 0: - sv.summary_writer.add_summary(results['summary'], global_step) - train_epoch = math.ceil(global_step / steps_per_epoch) - train_step = global_step - (train_epoch - 1) * steps_per_epoch - this_cycle = time.time() - last_summary_time - last_summary_time += this_cycle - logging.info( - 'Epoch: [%2d] [%5d/%5d] time: %4.2fs (%ds total) loss: %.3f', - train_epoch, train_step, steps_per_epoch, this_cycle, - time.time() - start_time, results['loss']) - - if step % steps_per_epoch == 0: - logging.info('[*] Saving checkpoint to %s...', checkpoint_dir) - saver.save(sess, os.path.join(checkpoint_dir, 'model'), - global_step=global_step) - - # Setting step to global_step allows for training for a total of - # train_steps even if the program is restarted during training. - step = global_step + 1 - - -if __name__ == '__main__': - app.run(main) diff --git a/research/struct2depth/util.py b/research/struct2depth/util.py deleted file mode 100644 index 793768646..000000000 --- a/research/struct2depth/util.py +++ /dev/null @@ -1,252 +0,0 @@ - -# Copyright 2018 The TensorFlow Authors All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -"""Contains common utilities and functions.""" - -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import locale -import os -import re -from absl import logging -import matplotlib -matplotlib.use('Agg') -import matplotlib.pyplot as plt -import numpy as np -import tensorflow as tf -import cv2 -gfile = tf.gfile - - -CMAP_DEFAULT = 'plasma' -# Defines the cropping that is applied to the Cityscapes dataset with respect to -# the original raw input resolution. -CITYSCAPES_CROP = [256, 768, 192, 1856] - - -def crop_cityscapes(im, resize=None): - ymin, ymax, xmin, xmax = CITYSCAPES_CROP - im = im[ymin:ymax, xmin:xmax] - if resize is not None: - im = cv2.resize(im, resize) - return im - - -def gray2rgb(im, cmap=CMAP_DEFAULT): - cmap = plt.get_cmap(cmap) - result_img = cmap(im.astype(np.float32)) - if result_img.shape[2] > 3: - result_img = np.delete(result_img, 3, 2) - return result_img - - -def load_image(img_file, resize=None, interpolation='linear'): - """Load image from disk. Output value range: [0,1].""" - im_data = np.fromstring(gfile.Open(img_file).read(), np.uint8) - im = cv2.imdecode(im_data, cv2.IMREAD_COLOR) - im = cv2.cvtColor(im, cv2.COLOR_BGR2RGB) - if resize and resize != im.shape[:2]: - ip = cv2.INTER_LINEAR if interpolation == 'linear' else cv2.INTER_NEAREST - im = cv2.resize(im, resize, interpolation=ip) - return np.array(im, dtype=np.float32) / 255.0 - - -def save_image(img_file, im, file_extension): - """Save image from disk. Expected input value range: [0,1].""" - im = (im * 255.0).astype(np.uint8) - with gfile.Open(img_file, 'w') as f: - im = cv2.cvtColor(im, cv2.COLOR_RGB2BGR) - _, im_data = cv2.imencode('.%s' % file_extension, im) - f.write(im_data.tostring()) - - -def normalize_depth_for_display(depth, pc=95, crop_percent=0, normalizer=None, - cmap=CMAP_DEFAULT): - """Converts a depth map to an RGB image.""" - # Convert to disparity. - - disp = 1.0 / (depth + 1e-6) - if normalizer is not None: - disp /= normalizer - else: - disp /= (np.percentile(disp, pc) + 1e-6) - disp = np.clip(disp, 0, 1) - disp = gray2rgb(disp, cmap=cmap) - keep_h = int(disp.shape[0] * (1 - crop_percent)) - disp = disp[:keep_h] - return disp - - -def get_seq_start_end(target_index, seq_length, sample_every=1): - """Returns absolute seq start and end indices for a given target frame.""" - half_offset = int((seq_length - 1) / 2) * sample_every - end_index = target_index + half_offset - start_index = end_index - (seq_length - 1) * sample_every - return start_index, end_index - - -def get_seq_middle(seq_length): - """Returns relative index for the middle frame in sequence.""" - half_offset = int((seq_length - 1) / 2) - return seq_length - 1 - half_offset - - -def info(obj): - """Return info on shape and dtype of a numpy array or TensorFlow tensor.""" - if obj is None: - return 'None.' - elif isinstance(obj, list): - if obj: - return 'List of %d... %s' % (len(obj), info(obj[0])) - else: - return 'Empty list.' - elif isinstance(obj, tuple): - if obj: - return 'Tuple of %d... %s' % (len(obj), info(obj[0])) - else: - return 'Empty tuple.' - else: - if is_a_numpy_array(obj): - return 'Array with shape: %s, dtype: %s' % (obj.shape, obj.dtype) - else: - return str(obj) - - -def is_a_numpy_array(obj): - """Returns true if obj is a numpy array.""" - return type(obj).__module__ == np.__name__ - - -def count_parameters(also_print=True): - """Cound the number of parameters in the model. - - Args: - also_print: Boolean. If True also print the numbers. - - Returns: - The total number of parameters. - """ - total = 0 - if also_print: - logging.info('Model Parameters:') - for (_, v) in get_vars_to_save_and_restore().items(): - shape = v.get_shape() - if also_print: - logging.info('%s %s: %s', v.op.name, shape, - format_number(shape.num_elements())) - total += shape.num_elements() - if also_print: - logging.info('Total: %s', format_number(total)) - return total - - -def get_vars_to_save_and_restore(ckpt=None): - """Returns list of variables that should be saved/restored. - - Args: - ckpt: Path to existing checkpoint. If present, returns only the subset of - variables that exist in given checkpoint. - - Returns: - List of all variables that need to be saved/restored. - """ - model_vars = tf.trainable_variables() - # Add batchnorm variables. - bn_vars = [v for v in tf.global_variables() - if 'moving_mean' in v.op.name or 'moving_variance' in v.op.name or - 'mu' in v.op.name or 'sigma' in v.op.name or - 'global_scale_var' in v.op.name] - model_vars.extend(bn_vars) - model_vars = sorted(model_vars, key=lambda x: x.op.name) - mapping = {} - if ckpt is not None: - ckpt_var = tf.contrib.framework.list_variables(ckpt) - ckpt_var_names = [name for (name, unused_shape) in ckpt_var] - ckpt_var_shapes = [shape for (unused_name, shape) in ckpt_var] - not_loaded = list(ckpt_var_names) - for v in model_vars: - if v.op.name not in ckpt_var_names: - # For backward compatibility, try additional matching. - v_additional_name = v.op.name.replace('egomotion_prediction/', '') - if v_additional_name in ckpt_var_names: - # Check if shapes match. - ind = ckpt_var_names.index(v_additional_name) - if ckpt_var_shapes[ind] == v.get_shape(): - mapping[v_additional_name] = v - not_loaded.remove(v_additional_name) - continue - else: - logging.warn('Shape mismatch, will not restore %s.', v.op.name) - logging.warn('Did not find var %s in checkpoint: %s', v.op.name, - os.path.basename(ckpt)) - else: - # Check if shapes match. - ind = ckpt_var_names.index(v.op.name) - if ckpt_var_shapes[ind] == v.get_shape(): - mapping[v.op.name] = v - not_loaded.remove(v.op.name) - else: - logging.warn('Shape mismatch, will not restore %s.', v.op.name) - if not_loaded: - logging.warn('The following variables in the checkpoint were not loaded:') - for varname_not_loaded in not_loaded: - logging.info('%s', varname_not_loaded) - else: # just get model vars. - for v in model_vars: - mapping[v.op.name] = v - return mapping - - -def get_imagenet_vars_to_restore(imagenet_ckpt): - """Returns dict of variables to restore from ImageNet-checkpoint.""" - vars_to_restore_imagenet = {} - ckpt_var_names = tf.contrib.framework.list_variables(imagenet_ckpt) - ckpt_var_names = [name for (name, unused_shape) in ckpt_var_names] - model_vars = tf.global_variables() - for v in model_vars: - if 'global_step' in v.op.name: continue - mvname_noprefix = v.op.name.replace('depth_prediction/', '') - mvname_noprefix = mvname_noprefix.replace('moving_mean', 'mu') - mvname_noprefix = mvname_noprefix.replace('moving_variance', 'sigma') - if mvname_noprefix in ckpt_var_names: - vars_to_restore_imagenet[mvname_noprefix] = v - else: - logging.info('The following variable will not be restored from ' - 'pretrained ImageNet-checkpoint: %s', mvname_noprefix) - return vars_to_restore_imagenet - - -def format_number(n): - """Formats number with thousands commas.""" - locale.setlocale(locale.LC_ALL, 'en_US') - return locale.format('%d', n, grouping=True) - - -def atoi(text): - return int(text) if text.isdigit() else text - - -def natural_keys(text): - return [atoi(c) for c in re.split(r'(\d+)', text)] - - -def read_text_lines(filepath): - with tf.gfile.Open(filepath, 'r') as f: - lines = f.readlines() - lines = [l.rstrip() for l in lines] - return lines diff --git a/research/swivel/.gitignore b/research/swivel/.gitignore deleted file mode 100644 index 215593fb2..000000000 --- a/research/swivel/.gitignore +++ /dev/null @@ -1,14 +0,0 @@ -*.an.tab -*.pyc -*.ws.tab -MEN.tar.gz -Mtruk.csv -SimLex-999.zip -analogy -fastprep -*.dSYM -questions-words.txt -word_relationship.* -tensorflow/ -rw.zip -ws353simrel.tar.gz diff --git a/research/swivel/README.md b/research/swivel/README.md deleted file mode 100644 index c5550a2d1..000000000 --- a/research/swivel/README.md +++ /dev/null @@ -1,185 +0,0 @@ -![No Maintenance Intended](https://img.shields.io/badge/No%20Maintenance%20Intended-%E2%9C%95-red.svg) -![TensorFlow Requirement: 1.x](https://img.shields.io/badge/TensorFlow%20Requirement-1.x-brightgreen) -![TensorFlow 2 Not Supported](https://img.shields.io/badge/TensorFlow%202%20Not%20Supported-%E2%9C%95-red.svg) - -# Swivel in Tensorflow - -This is a [TensorFlow](http://www.tensorflow.org/) implementation of the -[Swivel algorithm](http://arxiv.org/abs/1602.02215) for generating word -embeddings. - -Swivel works as follows: - -1. Compute the co-occurrence statistics from a corpus; that is, determine how - often a word *c* appears the context (e.g., "within ten words") of a focus - word *f*. This results in a sparse *co-occurrence matrix* whose rows - represent the focus words, and whose columns represent the context - words. Each cell value is the number of times the focus and context words - were observed together. -2. Re-organize the co-occurrence matrix and chop it into smaller pieces. -3. Assign a random *embedding vector* of fixed dimension (say, 300) to each - focus word and to each context word. -4. Iteratively attempt to approximate the - [pointwise mutual information](https://en.wikipedia.org/wiki/Pointwise_mutual_information) - (PMI) between words with the dot product of the corresponding embedding - vectors. - -Note that the resulting co-occurrence matrix is very sparse (i.e., contains many -zeros) since most words won't have been observed in the context of other words. -In the case of very rare words, it seems reasonable to assume that you just -haven't sampled enough data to spot their co-occurrence yet. On the other hand, -if we've failed to observed two common words co-occuring, it seems likely that -they are *anti-correlated*. - -Swivel attempts to capture this intuition by using both the observed and the -un-observed co-occurrences to inform the way it iteratively adjusts vectors. -Empirically, this seems to lead to better embeddings, especially for rare words. - -# Contents - -This release includes the following programs. - -* `prep.py` is a program that takes a text corpus and pre-processes it for - training. Specifically, it computes a vocabulary and token co-occurrence - statistics for the corpus. It then outputs the information into a format that - can be digested by the TensorFlow trainer. -* `swivel.py` is a TensorFlow program that generates embeddings from the - co-occurrence statistics. It uses the files created by `prep.py` as input, - and generates two text files as output: the row and column embeddings. -* `distributed.sh` is a Bash script that is meant to act as a template for - launching "distributed" Swivel training; i.e., multiple processes that work in - parallel and communicate via a parameter server. -* `text2bin.py` combines the row and column vectors generated by Swivel into a - flat binary file that can be quickly loaded into memory to perform vector - arithmetic. This can also be used to convert embeddings from - [Glove](http://nlp.stanford.edu/projects/glove/) and - [word2vec](https://code.google.com/archive/p/word2vec/) into a form that can - be used by the following tools. -* `nearest.py` is a program that you can use to manually inspect binary - embeddings. -* `eval.mk` is a GNU makefile that fill retrieve and normalize several common - word similarity and analogy evaluation data sets. -* `wordsim.py` performs word similarity evaluation of the resulting vectors. -* `analogy` performs analogy evaluation of the resulting vectors. -* `fastprep` is a C++ program that works much more quickly that `prep.py`, but - also has some additional dependencies to build. - -# Building Embeddings with Swivel - -To build your own word embeddings with Swivel, you'll need the following: - -* A large corpus of text; for example, the - [dump of English Wikipedia](https://dumps.wikimedia.org/enwiki/). -* A working [TensorFlow](http://www.tensorflow.org/) implementation. -* A machine with plenty of disk space and, ideally, a beefy GPU card. (We've - experimented with the - [Nvidia Titan X](http://www.geforce.com/hardware/desktop-gpus/geforce-gtx-titan-x), - for example.) - -You'll then run `prep.py` (or `fastprep`) to prepare the data for Swivel and run -`swivel.py` to create the embeddings. The resulting embeddings will be output -into two large text files: one for the row vectors and one for the column -vectors. You can use those "as is", or convert them into a binary file using -`text2bin.py` and then use the tools here to experiment with the resulting -vectors. - -## Preparing the data for training - -Once you've downloaded the corpus (e.g., to `/tmp/wiki.txt`), run `prep.py` to -prepare the data for training: - - ./prep.py --output_dir /tmp/swivel_data --input /tmp/wiki.txt - -By default, `prep.py` will make one pass through the text file to compute a -"vocabulary" of the most frequent words, and then a second pass to compute the -co-occurrence statistics. The following options allow you to control this -behavior: - -| Option | Description | -|:--- |:--- | -| `--min_count ` | Only include words in the generated vocabulary that appear at least *n* times. | -| `--max_vocab ` | Admit at most *n* words into the vocabulary. | -| `--vocab ` | Use the specified filename as the vocabulary instead of computing it from the corpus. The file should contain one word per line. | - -The `prep.py` program is pretty simple. Notably, it does almost no text -processing: it does no case translation and simply breaks text into tokens by -splitting on spaces. Feel free to experiment with the `words` function if you'd -like to do something more sophisticated. - -Unfortunately, `prep.py` is pretty slow. Also included is `fastprep`, a C++ -equivalent that works much more quickly. Building `fastprep.cc` is a bit more -involved: it requires you to pull and build the Tensorflow source code in order -to provide the libraries and headers that it needs. See `fastprep.mk` for more -details. - -## Training the embeddings - -When `prep.py` completes, it will have produced a directory containing the data -that the Swivel trainer needs to run. Train embeddings as follows: - - ./swivel.py --input_base_path /tmp/swivel_data \ - --output_base_path /tmp/swivel_data - -There are a variety of parameters that you can fiddle with to customize the -embeddings; some that you may want to experiment with include: - -| Option | Description | -|:--- |:--- | -| `--embedding_size ` | The dimensionality of the embeddings that are created. By default, 300 dimensional embeddings are created. | -| `--num_epochs ` | The number of iterations through the data that are performed. By default, 40 epochs are trained. | - -As mentioned above, access to beefy GPU will dramatically reduce the amount of -time it takes Swivel to train embeddings. - -When complete, you should find `row_embeddings.tsv` and `col_embedding.tsv` in -the directory specified by `--ouput_base_path`. These files are tab-delimited -files that contain one embedding per line. Each line contains the token -followed by *dim* floating point numbers. - -## Exploring and evaluating the embeddings - -There are also some simple tools you can to explore the embeddings. These tools -work with a simple binary vector format that can be `mmap`-ed into memory along -with a separate vocabulary file. Use `text2bin.py` to generate these files: - - ./text2bin.py -o vecs.bin -v vocab.txt /tmp/swivel_data/*_embedding.tsv - -You can do some simple exploration using `nearest.py`: - - ./nearest.py -v vocab.txt -e vecs.bin - query> dog - dog - dogs - cat - ... - query> man woman king - king - queen - princess - ... - -To evaluate the embeddings using common word similarity and analogy datasets, -use `eval.mk` to retrieve the data sets and build the tools. Note that wordsim is currently not compatible with Python 3.x. - - make -f eval.mk - ./wordsim.py --vocab vocab.txt --embeddings vecs.bin *.ws.tab - ./analogy --vocab vocab.txt --embeddings vecs.bin *.an.tab - -The word similarity evaluation compares the embeddings' estimate of "similarity" -with human judgement using -[Spearman's rho](https://en.wikipedia.org/wiki/Spearman%27s_rank_correlation_coefficient) -as the measure of correlation. (Bigger numbers are better.) - -The analogy evaluation tests how well the embeddings can predict analogies like -"man is to woman as king is to queen". - -Note that `eval.mk` forces all evaluation data into lower case. From there, -both the word similarity and analogy evaluations assume that the eval data and -the embeddings use consistent capitalization: if you train embeddings using -mixed case and evaluate them using lower case, things won't work well. - -# Contact - -If you have any questions about Swivel, feel free to post to -[swivel-embeddings@googlegroups.com](https://groups.google.com/forum/#!forum/swivel-embeddings). - diff --git a/research/swivel/analogy.cc b/research/swivel/analogy.cc deleted file mode 100644 index 5a3ff9b3b..000000000 --- a/research/swivel/analogy.cc +++ /dev/null @@ -1,365 +0,0 @@ -/* -*- Mode: C++ -*- */ - -/* - * Copyright 2016 Google Inc. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/* - * Computes embedding performance on analogy tasks. Accepts as input one or - * more files containing four words per line (A B C D), and determines if: - * - * vec(C) - vec(A) + vec(B) ~= vec(D) - * - * Cosine distance in the embedding space is used to retrieve neighbors. Any - * missing vocabulary items are scored as losses. - */ -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include -#include -#include - -static const char usage[] = R"( -Performs analogy testing of embedding vectors. - -Usage: - - analogy --embeddings --vocab eval1.tab ... - -Options: - - --embeddings - The file containing the binary embedding vectors to evaluate. - - --vocab - The vocabulary file corresponding to the embedding vectors. - - --nthreads - The number of evaluation threads to run (default: 8) -)"; - -// Reads the vocabulary file into a map from token to vector index. -static std::unordered_map ReadVocab( - const std::string& vocab_filename) { - std::unordered_map vocab; - std::ifstream fin(vocab_filename); - - int index = 0; - for (std::string token; std::getline(fin, token); ++index) { - auto n = token.find('\t'); - if (n != std::string::npos) token = token.substr(n); - - vocab[token] = index; - } - - return vocab; -} - -// An analogy query: "A is to B as C is to D". -typedef std::tuple AnalogyQuery; - -std::vector ReadQueries( - const std::string &filename, - const std::unordered_map &vocab, int *total) { - std::ifstream fin(filename); - - std::vector queries; - int lineno = 0; - while (1) { - // Read the four words. - std::string words[4]; - int nread = 0; - for (int i = 0; i < 4; ++i) { - fin >> words[i]; - if (!words[i].empty()) ++nread; - } - - ++lineno; - if (nread == 0) break; - - if (nread < 4) { - std::cerr << "expected four words at line " << lineno << std::endl; - break; - } - - // Look up each word's index. - int ixs[4], nvalid; - for (nvalid = 0; nvalid < 4; ++nvalid) { - std::unordered_map::const_iterator it = - vocab.find(words[nvalid]); - - if (it == vocab.end()) break; - - ixs[nvalid] = it->second; - } - - // If we don't have all the words, count it as a loss. - if (nvalid >= 4) - queries.push_back(std::make_tuple(ixs[0], ixs[1], ixs[2], ixs[3])); - } - - *total = lineno; - return queries; -} - - -// A thread that evaluates some fraction of the analogies. -class AnalogyEvaluator { - public: - // Creates a new Analogy evaluator for a range of analogy queries. - AnalogyEvaluator(std::vector::const_iterator begin, - std::vector::const_iterator end, - const float *embeddings, const int num_embeddings, - const int dim) - : begin_(begin), - end_(end), - embeddings_(embeddings), - num_embeddings_(num_embeddings), - dim_(dim) {} - - // A thunk for pthreads. - static void* Run(void *param) { - AnalogyEvaluator *self = static_cast(param); - self->Evaluate(); - return nullptr; - } - - // Evaluates the analogies. - void Evaluate(); - - // Returns the number of correct analogies after evaluation is complete. - int GetNumCorrect() const { return correct_; } - - protected: - // The beginning of the range of queries to consider. - std::vector::const_iterator begin_; - - // The end of the range of queries to consider. - std::vector::const_iterator end_; - - // The raw embedding vectors. - const float *embeddings_; - - // The number of embedding vectors. - const int num_embeddings_; - - // The embedding vector dimensionality. - const int dim_; - - // The number of correct analogies. - int correct_; -}; - - -void AnalogyEvaluator::Evaluate() { - float* sum = new float[dim_]; - - correct_ = 0; - for (auto query = begin_; query < end_; ++query) { - const float* vec; - int a, b, c, d; - std::tie(a, b, c, d) = *query; - - // Compute C - A + B. - vec = embeddings_ + dim_ * c; - for (int i = 0; i < dim_; ++i) sum[i] = vec[i]; - - vec = embeddings_ + dim_ * a; - for (int i = 0; i < dim_; ++i) sum[i] -= vec[i]; - - vec = embeddings_ + dim_ * b; - for (int i = 0; i < dim_; ++i) sum[i] += vec[i]; - - // Find the nearest neighbor that isn't one of the query words. - int best_ix = -1; - float best_dot = -1.0; - for (int i = 0; i < num_embeddings_; ++i) { - if (i == a || i == b || i == c) continue; - - vec = embeddings_ + dim_ * i; - - float dot = 0; - for (int j = 0; j < dim_; ++j) dot += vec[j] * sum[j]; - - if (dot > best_dot) { - best_ix = i; - best_dot = dot; - } - } - - // The fourth word is the answer; did we get it right? - if (best_ix == d) ++correct_; - } - - delete[] sum; -} - - -int main(int argc, char *argv[]) { - if (argc <= 1) { - printf(usage); - return 2; - } - - std::string embeddings_filename, vocab_filename; - int nthreads = 8; - - std::vector input_filenames; - std::vector> queries; - - for (int i = 1; i < argc; ++i) { - std::string arg = argv[i]; - if (arg == "--embeddings") { - if (++i >= argc) goto argmissing; - embeddings_filename = argv[i]; - } else if (arg == "--vocab") { - if (++i >= argc) goto argmissing; - vocab_filename = argv[i]; - } else if (arg == "--nthreads") { - if (++i >= argc) goto argmissing; - if ((nthreads = atoi(argv[i])) <= 0) goto badarg; - } else if (arg == "--help") { - std::cout << usage << std::endl; - return 0; - } else if (arg[0] == '-') { - std::cerr << "unknown option: '" << arg << "'" << std::endl; - return 2; - } else { - input_filenames.push_back(arg); - } - - continue; - - argmissing: - std::cerr << "missing value for '" << argv[i - 1] << "' (--help for help)" - << std::endl; - return 2; - - badarg: - std::cerr << "invalid value '" << argv[i] << "' for '" << argv[i - 1] - << "' (--help for help)" << std::endl; - - return 2; - } - - // Read the vocabulary. - std::unordered_map vocab = ReadVocab(vocab_filename); - if (!vocab.size()) { - std::cerr << "unable to read vocabulary file '" << vocab_filename << "'" - << std::endl; - return 1; - } - - const int n = vocab.size(); - - // Read the vectors. - int fd; - if ((fd = open(embeddings_filename.c_str(), O_RDONLY)) < 0) { - std::cerr << "unable to open embeddings file '" << embeddings_filename - << "'" << std::endl; - return 1; - } - - off_t nbytes = lseek(fd, 0, SEEK_END); - if (nbytes == -1) { - std::cerr << "unable to determine file size for '" << embeddings_filename - << "'" << std::endl; - return 1; - } - - if (nbytes % (sizeof(float) * n) != 0) { - std::cerr << "'" << embeddings_filename - << "' has a strange file size; expected it to be " - "a multiple of the vocabulary size" - << std::endl; - - return 1; - } - - const int dim = nbytes / (sizeof(float) * n); - float *embeddings = static_cast(malloc(nbytes)); - lseek(fd, 0, SEEK_SET); - if (read(fd, embeddings, nbytes) < nbytes) { - std::cerr << "unable to read embeddings from " << embeddings_filename - << std::endl; - return 1; - } - - close(fd); - - /* Normalize the vectors. */ - for (int i = 0; i < n; ++i) { - float *vec = embeddings + dim * i; - float norm = 0; - for (int j = 0; j < dim; ++j) norm += vec[j] * vec[j]; - - norm = sqrt(norm); - for (int j = 0; j < dim; ++j) vec[j] /= norm; - } - - pthread_attr_t attr; - if (pthread_attr_init(&attr) != 0) { - std::cerr << "unable to initalize pthreads" << std::endl; - return 1; - } - - /* Read each input file. */ - for (const auto filename : input_filenames) { - int total = 0; - std::vector queries = - ReadQueries(filename.c_str(), vocab, &total); - - const int queries_per_thread = queries.size() / nthreads; - std::vector evaluators; - std::vector threads; - - for (int i = 0; i < nthreads; ++i) { - auto begin = queries.begin() + i * queries_per_thread; - auto end = (i + 1 < nthreads) - ? queries.begin() + (i + 1) * queries_per_thread - : queries.end(); - - AnalogyEvaluator *evaluator = - new AnalogyEvaluator(begin, end, embeddings, n, dim); - - pthread_t thread; - pthread_create(&thread, &attr, AnalogyEvaluator::Run, evaluator); - evaluators.push_back(evaluator); - threads.push_back(thread); - } - - for (auto &thread : threads) pthread_join(thread, 0); - - int correct = 0; - for (const AnalogyEvaluator* evaluator : evaluators) { - correct += evaluator->GetNumCorrect(); - delete evaluator; - } - - printf("%0.3f %s\n", static_cast(correct) / total, filename.c_str()); - } - - return 0; -} diff --git a/research/swivel/distributed.sh b/research/swivel/distributed.sh deleted file mode 100644 index 6aa59f751..000000000 --- a/research/swivel/distributed.sh +++ /dev/null @@ -1,54 +0,0 @@ -#!/bin/bash -# Copyright 2017 Google Inc. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# This script launches a multi-process version of Swivel on a single machine. -set -e - -# A comma-separated list of parameter server processes. -PS_HOSTS="localhost:4000" - -# A comma-separated list of worker processes. -WORKER_HOSTS="localhost:5000,localhost:5001,localhost:5002,localhost:5003" - -# Where the Swivel training data is located. All processes must be able to read -# from this directory, so it ought to be a network filesystem if you're running -# on multiple servers. -INPUT_BASE_PATH="${HOME}/tmp/swivel/in" - -# Where the output and working directory is located. -OUTPUT_BASE_PATH="${HOME}/tmp/swivel/out" - -# Location of evaluation data, if you want to observe evaluation while training. -EVAL_BASE_PATH="${HOME}/tmp/swivel/eval" - -ARGS="--ps_hosts ${PS_HOSTS} ---worker_hosts ${WORKER_HOSTS} ---input_base_path ${INPUT_BASE_PATH} ---output_base_path ${OUTPUT_BASE_PATH} ---eval_base_path ${EVAL_BASE_PATH}" - -# This configuration is for a two-GPU machine. It starts four worker -# processes, two for each GPU. -python swivel.py --job_name ps --task_index 0 ${ARGS} >& /tmp/ps.0 & -python swivel.py --job_name worker --task_index 0 --gpu_device 0 ${ARGS} >& /tmp/worker.0 & -python swivel.py --job_name worker --task_index 1 --gpu_device 1 ${ARGS} >& /tmp/worker.1 & -python swivel.py --job_name worker --task_index 2 --gpu_device 0 ${ARGS} >& /tmp/worker.2 & -python swivel.py --job_name worker --task_index 3 --gpu_device 1 ${ARGS} >& /tmp/worker.3 & - -# Perhaps there is a more clever way to clean up the parameter server once all -# the workers are done. -wait %2 %3 %4 %5 -kill %1 - diff --git a/research/swivel/eval.mk b/research/swivel/eval.mk deleted file mode 100644 index b8db8c86a..000000000 --- a/research/swivel/eval.mk +++ /dev/null @@ -1,101 +0,0 @@ -# -*- Mode: Makefile -*- -# -# Copyright 2016 Google Inc. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# This makefile pulls down the evaluation datasets and formats them uniformly. -# Word similarity evaluations are formatted to contain exactly three columns: -# the two words being compared and the human judgement. -# -# Use wordsim.py and analogy to run the actual evaluations. - -CXXFLAGS=-std=c++11 -m64 -mavx -g -Ofast -Wall -LDLIBS=-lpthread -lm - -WORDSIM_EVALS= ws353sim.ws.tab \ - ws353rel.ws.tab \ - men.ws.tab \ - mturk.ws.tab \ - rarewords.ws.tab \ - simlex999.ws.tab \ - $(NULL) - -ANALOGY_EVALS= mikolov.an.tab \ - msr.an.tab \ - $(NULL) - -all: $(WORDSIM_EVALS) $(ANALOGY_EVALS) analogy - -ws353sim.ws.tab: ws353simrel.tar.gz - tar Oxfz $^ wordsim353_sim_rel/wordsim_similarity_goldstandard.txt > $@ - -ws353rel.ws.tab: ws353simrel.tar.gz - tar Oxfz $^ wordsim353_sim_rel/wordsim_relatedness_goldstandard.txt > $@ - -men.ws.tab: MEN.tar.gz - tar Oxfz $^ MEN/MEN_dataset_natural_form_full | tr ' ' '\t' > $@ - -mturk.ws.tab: Mtruk.csv - cat $^ | tr -d '\r' | tr ',' '\t' > $@ - -rarewords.ws.tab: rw.zip - unzip -p $^ rw/rw.txt | cut -f1-3 -d $$'\t' > $@ - -simlex999.ws.tab: SimLex-999.zip - unzip -p $^ SimLex-999/SimLex-999.txt \ - | tail -n +2 | cut -f1,2,4 -d $$'\t' > $@ - -mikolov.an.tab: questions-words.txt - egrep -v -E '^:' $^ | tr '[A-Z] ' '[a-z]\t' > $@ - -msr.an.tab: word_relationship.questions word_relationship.answers - cat word_relationship.questions | tr ' ' '\t' > /tmp/q - cat word_relationship.answers | cut -f2 -d ' ' > /tmp/a - paste /tmp/q /tmp/a > $@ - rm -f /tmp/q /tmp/a - - -# wget commands to fetch the datasets. Please see the original datasets for -# appropriate references if you use these. -ws353simrel.tar.gz: - wget http://alfonseca.org/pubs/ws353simrel.tar.gz - -MEN.tar.gz: - wget http://clic.cimec.unitn.it/~elia.bruni/resources/MEN.tar.gz - -Mtruk.csv: - wget http://www.kiraradinsky.com/files/Mtruk.csv - -rw.zip: - wget http://www-nlp.stanford.edu/~lmthang/morphoNLM/rw.zip - -SimLex-999.zip: - wget http://www.cl.cam.ac.uk/~fh295/SimLex-999.zip - -questions-words.txt: - wget http://download.tensorflow.org/data/questions-words.txt - -word_relationship.questions: - wget https://github.com/darshanhegde/SNLPProject/raw/master/word2vec/eval/word_relationship.questions - -word_relationship.answers: - wget https://github.com/darshanhegde/SNLPProject/raw/master/word2vec/eval/word_relationship.answers - -analogy: analogy.cc - -clean: - rm -f *.ws.tab *.an.tab analogy *.pyc - -distclean: clean - rm -f *.tgz *.tar.gz *.zip Mtruk.csv questions-words.txt word_relationship.{questions,answers} diff --git a/research/swivel/fastprep.cc b/research/swivel/fastprep.cc deleted file mode 100644 index a4bd7feef..000000000 --- a/research/swivel/fastprep.cc +++ /dev/null @@ -1,692 +0,0 @@ -/* -*- Mode: C++ -*- */ - -/* - * Copyright 2016 Google Inc. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/* - * This program starts with a text file (and optionally a vocabulary file) and - * computes co-occurrence statistics. It emits output in a format that can be - * consumed by the "swivel" program. It's functionally equivalent to "prep.py", - * but works much more quickly. - */ - -#include -#include -#include -#include -#include -#include - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include "google/protobuf/io/zero_copy_stream_impl.h" -#include "tensorflow/core/example/example.pb.h" -#include "tensorflow/core/example/feature.pb.h" - -static const char usage[] = R"( -Prepares a corpus for processing by Swivel. - -Usage: - - prep --output_dir --input - -Options: - - --input - The input text. - - --output_dir - Specifies the output directory where the various Swivel data - files should be placed. This directory must exist. - - --shard_size - Specifies the shard size; default 4096. - - --min_count - The minimum number of times a word should appear to be included in the - generated vocabulary; default 5. (Ignored if --vocab is used.) - - --max_vocab - The maximum vocabulary size to generate from the input corpus; default - 102,400. (Ignored if --vocab is used.) - - --vocab - Use the specified unigram vocabulary instead of generating - it from the corpus. - - --window_size - Specifies the window size for computing co-occurrence stats; - default 10. - - --num_threads - The number of workers to calculate the co-occurrence matrix; - default 4. -)"; - -struct cooc_t { - int row; - int col; - float cnt; -}; - -typedef std::map cooc_counts_t; - -// Retrieves the next word from the input stream, treating words as simply being -// delimited by whitespace. Returns true if this is the end of a "sentence"; -// i.e., a newline. -bool NextWord(std::ifstream &fin, std::string* word) { - std::string buf; - char c; - - if (fin.eof()) { - word->erase(); - return true; - } - - // Skip leading whitespace. - do { - c = fin.get(); - } while (!fin.eof() && std::isspace(c)); - - if (fin.eof()) { - word->erase(); - return true; - } - - // Read the next word. - do { - buf += c; - c = fin.get(); - } while (!fin.eof() && !std::isspace(c)); - - *word = buf; - if (c == '\n' || fin.eof()) return true; - - // Skip trailing whitespace. - do { - c = fin.get(); - } while (!fin.eof() && std::isspace(c)); - - if (fin.eof()) return true; - - fin.unget(); - return false; -} - -// Creates a vocabulary from the most frequent terms in the input file. -std::vector CreateVocabulary(const std::string input_filename, - const int shard_size, - const int min_vocab_count, - const int max_vocab_size) { - std::vector vocab; - - // Count all the distinct tokens in the file. (XXX this will eventually - // consume all memory and should be re-written to periodically trim the data.) - std::unordered_map counts; - - std::ifstream fin(input_filename, std::ifstream::ate); - - if (!fin) { - std::cerr << "couldn't read input file '" << input_filename << "'" - << std::endl; - - return vocab; - } - - const auto input_size = fin.tellg(); - fin.seekg(0); - - long long ntokens = 0; - while (!fin.eof()) { - std::string word; - NextWord(fin, &word); - counts[word] += 1; - - if (++ntokens % 1000000 == 0) { - const float pct = 100.0 * static_cast(fin.tellg()) / input_size; - fprintf(stdout, "\rComputing vocabulary: %0.1f%% complete...", pct); - std::flush(std::cout); - } - } - - std::cout << counts.size() << " distinct tokens" << std::endl; - - // Sort the vocabulary from most frequent to least frequent. - std::vector> buf; - std::copy(counts.begin(), counts.end(), std::back_inserter(buf)); - std::sort(buf.begin(), buf.end(), - [](const std::pair &a, - const std::pair &b) { - return b.second < a.second; - }); - - // Truncate to the maximum vocabulary size - if (static_cast(buf.size()) > max_vocab_size) buf.resize(max_vocab_size); - if (buf.empty()) return vocab; - - // Eliminate rare tokens and truncate to a size modulo the shard size. - int vocab_size = buf.size(); - while (vocab_size > 0 && buf[vocab_size - 1].second < min_vocab_count) - --vocab_size; - - vocab_size -= vocab_size % shard_size; - if (static_cast(buf.size()) > vocab_size) buf.resize(vocab_size); - - // Copy out the tokens. - for (const auto& pair : buf) vocab.push_back(pair.first); - - return vocab; -} - -std::vector ReadVocabulary(const std::string vocab_filename) { - std::vector vocab; - - std::ifstream fin(vocab_filename); - int index = 0; - for (std::string token; std::getline(fin, token); ++index) { - auto n = token.find('\t'); - if (n != std::string::npos) token = token.substr(n); - - vocab.push_back(token); - } - - return vocab; -} - -void WriteVocabulary(const std::vector &vocab, - const std::string &output_dirname) { - for (const std::string filename : {"row_vocab.txt", "col_vocab.txt"}) { - std::ofstream fout(output_dirname + "/" + filename); - for (const auto &token : vocab) fout << token << std::endl; - } -} - -// Manages accumulation of co-occurrence data into temporary disk buffer files. -class CoocBuffer { - public: - CoocBuffer(const std::string &output_dirname, const int num_shards, - const int shard_size); - - // Accumulate the co-occurrence counts to the buffer. - void AccumulateCoocs(const cooc_counts_t &coocs); - - // Read the buffer to produce shard files. - void WriteShards(); - - protected: - // The output directory. Also used for temporary buffer files. - const std::string output_dirname_; - - // The number of row/column shards. - const int num_shards_; - - // The number of elements per shard. - const int shard_size_; - - // Parallel arrays of temporary file paths and file descriptors. - std::vector paths_; - std::vector fds_; - - // Ensures that only one buffer file is getting written at a time. - std::mutex writer_mutex_; -}; - -CoocBuffer::CoocBuffer(const std::string &output_dirname, const int num_shards, - const int shard_size) - : output_dirname_(output_dirname), - num_shards_(num_shards), - shard_size_(shard_size) { - for (int row = 0; row < num_shards_; ++row) { - for (int col = 0; col < num_shards_; ++col) { - char filename[256]; - sprintf(filename, "shard-%03d-%03d.tmp", row, col); - - std::string path = output_dirname + "/" + filename; - int fd = open(path.c_str(), O_RDWR | O_CREAT | O_TRUNC, 0666); - assert(fd > 0); - - paths_.push_back(path); - fds_.push_back(fd); - } - } -} - -void CoocBuffer::AccumulateCoocs(const cooc_counts_t &coocs) { - std::vector> bufs(fds_.size()); - - for (const auto &cooc : coocs) { - const int row_id = cooc.first >> 32; - const int col_id = cooc.first & 0xffffffff; - const float cnt = cooc.second; - - const int row_shard = row_id % num_shards_; - const int row_off = row_id / num_shards_; - const int col_shard = col_id % num_shards_; - const int col_off = col_id / num_shards_; - - const int top_shard_idx = row_shard * num_shards_ + col_shard; - bufs[top_shard_idx].push_back(cooc_t{row_off, col_off, cnt}); - - const int bot_shard_idx = col_shard * num_shards_ + row_shard; - bufs[bot_shard_idx].push_back(cooc_t{col_off, row_off, cnt}); - } - - for (int i = 0; i < static_cast(fds_.size()); ++i) { - std::lock_guard rv(writer_mutex_); - const int nbytes = bufs[i].size() * sizeof(cooc_t); - int nwritten = write(fds_[i], bufs[i].data(), nbytes); - assert(nwritten == nbytes); - } -} - -void CoocBuffer::WriteShards() { - for (int shard = 0; shard < static_cast(fds_.size()); ++shard) { - const int row_shard = shard / num_shards_; - const int col_shard = shard % num_shards_; - - std::cout << "\rwriting shard " << (shard + 1) << "/" - << (num_shards_ * num_shards_); - std::flush(std::cout); - - // Construct the tf::Example proto. First, we add the global rows and - // column that are present in the shard. - tensorflow::Example example; - - auto &feature = *example.mutable_features()->mutable_feature(); - auto global_row = feature["global_row"].mutable_int64_list(); - auto global_col = feature["global_col"].mutable_int64_list(); - - for (int i = 0; i < shard_size_; ++i) { - global_row->add_value(row_shard + i * num_shards_); - global_col->add_value(col_shard + i * num_shards_); - } - - // Next we add co-occurrences as a sparse representation. Map the - // co-occurrence counts that we've spooled off to disk: these are in - // arbitrary order and may contain duplicates. - const off_t nbytes = lseek(fds_[shard], 0, SEEK_END); - cooc_t *coocs = static_cast( - mmap(0, nbytes, PROT_READ | PROT_WRITE, MAP_SHARED, fds_[shard], 0)); - - const int ncoocs = nbytes / sizeof(cooc_t); - cooc_t* cur = coocs; - cooc_t* end = coocs + ncoocs; - - auto sparse_value = feature["sparse_value"].mutable_float_list(); - auto sparse_local_row = feature["sparse_local_row"].mutable_int64_list(); - auto sparse_local_col = feature["sparse_local_col"].mutable_int64_list(); - - std::sort(cur, end, [](const cooc_t &a, const cooc_t &b) { - return a.row < b.row || (a.row == b.row && a.col < b.col); - }); - - // Accumulate the counts into the protocol buffer. - int last_row = -1, last_col = -1; - float count = 0; - for (; cur != end; ++cur) { - if (cur->row != last_row || cur->col != last_col) { - if (last_row >= 0 && last_col >= 0) { - sparse_local_row->add_value(last_row); - sparse_local_col->add_value(last_col); - sparse_value->add_value(count); - } - - last_row = cur->row; - last_col = cur->col; - count = 0; - } - - count += cur->cnt; - } - - if (last_row >= 0 && last_col >= 0) { - sparse_local_row->add_value(last_row); - sparse_local_col->add_value(last_col); - sparse_value->add_value(count); - } - - munmap(coocs, nbytes); - close(fds_[shard]); - - if (sparse_local_row->value_size() * 8 >= (64 << 20)) { - std::cout << "Warning: you are likely to catch protobuf parsing errors " - "in TF 1.0 and older because the shard is too fat (>= 64MiB); see " - << std::endl << - "kDefaultTotalBytesLimit in src/google/protobuf/io/coded_stream.h " - " changed in protobuf/commit/5a76e633ea9b5adb215e93fdc11e1c0c08b3fc74" - << std::endl << - "https://github.com/tensorflow/tensorflow/issues/7311" - << std::endl << - "Consider increasing the number of shards."; - } - - // Write the protocol buffer as a binary blob to disk. - const int filename_max_size = 4096; - std::unique_ptr filename(new char[filename_max_size]); - snprintf(filename.get(), filename_max_size, "shard-%03d-%03d.pb", row_shard, - col_shard); - - const std::string path = output_dirname_ + "/" + filename.get(); - int fd = open(path.c_str(), O_WRONLY | O_TRUNC | O_CREAT, 0666); - assert(fd != -1); - - google::protobuf::io::FileOutputStream fout(fd); - example.SerializeToZeroCopyStream(&fout); - fout.Close(); - - // Remove the temporary file. - unlink(paths_[shard].c_str()); - } - - std::cout << std::endl; -} - -// Counts the co-occurrences in part of the file. -class CoocCounter { - public: - CoocCounter(const std::string &input_filename, const off_t start, - const off_t end, const int window_size, - const std::unordered_map &token_to_id_map, - CoocBuffer *coocbuf) - : fin_(input_filename, std::ifstream::ate), - start_(start), - end_(end), - window_size_(window_size), - token_to_id_map_(token_to_id_map), - coocbuf_(coocbuf), - marginals_(token_to_id_map.size()) {} - - // PTthreads-friendly thunk to Count. - static void* Run(void* param) { - CoocCounter* self = static_cast(param); - self->Count(); - return nullptr; - } - - // Counts the co-occurrences. - void Count(); - - const std::vector& Marginals() const { return marginals_; } - - protected: - // The input stream. - std::ifstream fin_; - - // The range of the file to which this counter should attend. - const off_t start_; - const off_t end_; - - // The window size for computing co-occurrences. - const int window_size_; - - // A reference to the mapping from tokens to IDs. - const std::unordered_map &token_to_id_map_; - - // The buffer into which counts are to be accumulated. - CoocBuffer* coocbuf_; - - // The marginal counts accumulated by this counter. - std::vector marginals_; -}; - -void CoocCounter::Count() { - const int max_coocs_size = 16 * 1024 * 1024; - - // A buffer of co-occurrence counts that we'll periodically sort into - // shards. - cooc_counts_t coocs; - - fin_.seekg(start_); - - int nlines = 0; - for (off_t filepos = start_; filepos < end_ && !fin_.eof(); filepos = fin_.tellg()) { - // Buffer a single sentence. - std::vector sentence; - bool eos; - do { - std::string word; - eos = NextWord(fin_, &word); - auto it = token_to_id_map_.find(word); - if (it != token_to_id_map_.end()) sentence.push_back(it->second); - } while (!eos); - - // Generate the co-occurrences for the sentence. - for (int pos = 0; pos < static_cast(sentence.size()); ++pos) { - const int left_id = sentence[pos]; - - const int window_extent = - std::min(static_cast(sentence.size()) - pos, 1 + window_size_); - - for (int off = 1; off < window_extent; ++off) { - const int right_id = sentence[pos + off]; - const double count = 1.0 / static_cast(off); - const long long lo = std::min(left_id, right_id); - const long long hi = std::max(left_id, right_id); - const long long key = (hi << 32) | lo; - coocs[key] += count; - - marginals_[left_id] += count; - marginals_[right_id] += count; - } - - marginals_[left_id] += 1.0; - const long long key = (static_cast(left_id) << 32) | - static_cast(left_id); - - coocs[key] += 0.5; - } - - // Periodically flush the co-occurrences to disk. - if (coocs.size() > max_coocs_size) { - coocbuf_->AccumulateCoocs(coocs); - coocs.clear(); - } - - if (start_ == 0 && ++nlines % 1000 == 0) { - const double pct = 100.0 * filepos / end_; - fprintf(stdout, "\rComputing co-occurrences: %0.1f%% complete...", pct); - std::flush(std::cout); - } - } - - // Accumulate anything we haven't flushed yet. - coocbuf_->AccumulateCoocs(coocs); - - if (start_ == 0) std::cout << "done." << std::endl; -} - -void WriteMarginals(const std::vector &marginals, - const std::string &output_dirname) { - for (const std::string filename : {"row_sums.txt", "col_sums.txt"}) { - std::ofstream fout(output_dirname + "/" + filename); - fout.setf(std::ios::fixed); - for (double sum : marginals) fout << sum << std::endl; - } -} - -int main(int argc, char *argv[]) { - std::string input_filename; - std::string vocab_filename; - std::string output_dirname; - bool generate_vocab = true; - int max_vocab_size = 100 * 1024; - int min_vocab_count = 5; - int window_size = 10; - int shard_size = 4096; - int num_threads = 4; - - for (int i = 1; i < argc; ++i) { - std::string arg(argv[i]); - if (arg == "--vocab") { - if (++i >= argc) goto argmissing; - generate_vocab = false; - vocab_filename = argv[i]; - } else if (arg == "--max_vocab") { - if (++i >= argc) goto argmissing; - if ((max_vocab_size = atoi(argv[i])) <= 0) goto badarg; - } else if (arg == "--min_count") { - if (++i >= argc) goto argmissing; - if ((min_vocab_count = atoi(argv[i])) <= 0) goto badarg; - } else if (arg == "--window_size") { - if (++i >= argc) goto argmissing; - if ((window_size = atoi(argv[i])) <= 0) goto badarg; - } else if (arg == "--input") { - if (++i >= argc) goto argmissing; - input_filename = argv[i]; - } else if (arg == "--output_dir") { - if (++i >= argc) goto argmissing; - output_dirname = argv[i]; - } else if (arg == "--shard_size") { - if (++i >= argc) goto argmissing; - shard_size = atoi(argv[i]); - } else if (arg == "--num_threads") { - if (++i >= argc) goto argmissing; - num_threads = atoi(argv[i]); - } else if (arg == "--help") { - std::cout << usage << std::endl; - return 0; - } else { - std::cerr << "unknown arg '" << arg << "'; try --help?" << std::endl; - return 2; - } - - continue; - - badarg: - std::cerr << "'" << argv[i] << "' is not a valid value for '" << arg - << "'; try --help?" << std::endl; - - return 2; - - argmissing: - std::cerr << arg << " requires an argument; try --help?" << std::endl; - } - - if (input_filename.empty()) { - std::cerr << "please specify the input text with '--input'; try --help?" - << std::endl; - return 2; - } - - if (output_dirname.empty()) { - std::cerr << "please specify the output directory with '--output_dir'" - << std::endl; - - return 2; - } - - struct stat sb; - if (lstat(output_dirname.c_str(), &sb) != 0 || !S_ISDIR(sb.st_mode)) { - if (mkdir(output_dirname.c_str(), 0755) != 0) { - std::cerr << "output directory '" << output_dirname - << "' does not exist or is not a directory." << std::endl; - return 1; - } - } - - if (lstat(input_filename.c_str(), &sb) != 0 || !S_ISREG(sb.st_mode)) { - std::cerr << "input file '" << input_filename - << "' does not exist or is not a file." << std::endl; - - return 1; - } - - // The total size of the input. - const off_t input_size = sb.st_size; - - const std::vector vocab = - generate_vocab ? CreateVocabulary(input_filename, shard_size, - min_vocab_count, max_vocab_size) - : ReadVocabulary(vocab_filename); - - if (!vocab.size()) { - std::cerr << "Empty vocabulary." << std::endl; - return 1; - } - - std::cout << "Generating Swivel co-occurrence data into " << output_dirname - << std::endl; - - std::cout << "Shard size: " << shard_size << "x" << shard_size << std::endl; - std::cout << "Vocab size: " << vocab.size() << std::endl; - - // Write the vocabulary files into the output directory. - WriteVocabulary(vocab, output_dirname); - - const int num_shards = vocab.size() / shard_size; - CoocBuffer coocbuf(output_dirname, num_shards, shard_size); - - // Build a mapping from the token to its position in the vocabulary file. - std::unordered_map token_to_id_map; - for (int i = 0; i < static_cast(vocab.size()); ++i) - token_to_id_map[vocab[i]] = i; - - // Compute the co-occurrences - std::vector threads; - threads.reserve(num_threads); - std::vector counters; - const off_t nbytes_per_thread = input_size / num_threads; - std::cout << "Running " << num_threads << " threads, each on " - << nbytes_per_thread << " bytes" << std::endl; - - for (int i = 0; i < num_threads; ++i) { - // We could make this smarter and look around for newlines. But - // realistically that's not going to change things much. - const off_t start = i * nbytes_per_thread; - const off_t end = - i < num_threads - 1 ? (i + 1) * nbytes_per_thread : input_size; - - CoocCounter *counter = new CoocCounter( - input_filename, start, end, window_size, token_to_id_map, &coocbuf); - - counters.push_back(counter); - - threads.emplace_back(CoocCounter::Run, counter); - } - - // Wait for threads to finish and collect marginals. - std::vector marginals(vocab.size()); - for (int i = 0; i < num_threads; ++i) { - if (i > 0) { - std::cout << "joining thread #" << (i + 1) << std::endl; - } - threads[i].join(); - - const std::vector& counter_marginals = counters[i]->Marginals(); - for (int j = 0; j < static_cast(vocab.size()); ++j) - marginals[j] += counter_marginals[j]; - - delete counters[i]; - } - - std::cout << "writing marginals..." << std::endl; - WriteMarginals(marginals, output_dirname); - - std::cout << "writing shards..." << std::endl; - coocbuf.WriteShards(); - - return 0; -} diff --git a/research/swivel/fastprep.mk b/research/swivel/fastprep.mk deleted file mode 100644 index b1798d0b6..000000000 --- a/research/swivel/fastprep.mk +++ /dev/null @@ -1,60 +0,0 @@ -# -*- Mode: Makefile -*- - -# -# Copyright 2016 Google Inc. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -# This makefile builds "fastprep", a faster version of prep.py that can be used -# to build training data for Swivel. -# -# = Step 1. Install protobuf v3 = -# -# Ubuntu 16.10+: sudo apt install libprotobuf-dev -# Ubuntu 16.04: https://launchpad.net/~maarten-fonville/+archive/ubuntu/ppa + replace xenial with yakkety in /etc/apt/sources.list.d/maarten-fonville-ubuntu-ppa-xenial.list -# macOS: brew install protobuf -# -# = Step 2. Build "fastprep". = -# -# make -f fastprep.mk -# -# If all goes well, you should have a program that is "flag compatible" with -# "prep.py" and runs significantly faster. Use it to generate the co-occurrence -# matrices and other files necessary to train a Swivel matrix. - - -CXXFLAGS=-std=c++11 -march=native -g -O2 -flto -Wall -I. -LDLIBS=-lprotobuf -pthread -lm - -FETCHER=curl -L -o -TF_URL=https://github.com/tensorflow/tensorflow/raw/master -PROTOC=protoc - - -%.proto: tensorflow/core/example - $(FETCHER) $@ $(TF_URL)/$@ - -%.pb.cc: %.proto - $(PROTOC) --cpp_out=. $< - -fastprep: fastprep.cc tensorflow/core/example/feature.pb.cc tensorflow/core/example/example.pb.cc - -tensorflow/core/example: - @mkdir -p tensorflow/core/example - -clean: - @rm -f fastprep - -mrproper: clean - @rm -rf tensorflow diff --git a/research/swivel/glove_to_shards.py b/research/swivel/glove_to_shards.py deleted file mode 100755 index 4a9cd23c7..000000000 --- a/research/swivel/glove_to_shards.py +++ /dev/null @@ -1,198 +0,0 @@ -#!/usr/bin/env python -# -# Copyright 2016 Google Inc. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Converts a Glove binary co-occurrence matrix into Swivel shards. - -Usage: - - glove_to_shards.py --input --vocab --output_dir - -Options - - --input - The Glove co-occurrence file. - - --vocab - Path to the vocabulary text file, one token per line. - - --output_dir - Specifies the touput directory where the various Swivel data - files sohuld be placed. - - --shard_size - Specifies the shard size; default 4096. -""" - -from __future__ import print_function - -import itertools -import os -import struct -import sys - -import tensorflow as tf -from six.moves import xrange - -flags = tf.app.flags - -flags.DEFINE_string('input', 'coocurrences.bin', 'Vocabulary file') -flags.DEFINE_string('vocab', 'vocab.txt', 'Vocabulary file') -flags.DEFINE_string('output_dir', '/tmp/swivel_data', 'Output directory') -flags.DEFINE_integer('shard_size', 4096, 'Shard size') - -FLAGS = tf.app.flags.FLAGS - -glove_cooc_fmt = struct.Struct('iid') -shard_cooc_fmt = struct.Struct('if') - - -def make_shard_files(coocs, nshards, vocab_sz): - """Chops the binary Glove co-occurrence matrix into shards. - - This reads the Glove binary co-occurrence file and assigns individual - co-occurrence counts to the appropriate Swivel shard. - - Args: - coocs: the co-occurrnece file to read - nshards: the number of shards along one dimension of the square matrix - vocab_sz: the vocabulary size - - Returns: - A (shard_table, marginals) tuple. The shard_table maps the row and column - shard ID to a file handle containing the co-occurrences for that shard; the - marginals contain the marginal sums. - """ - row_sums = [0] * vocab_sz - col_sums = [0] * vocab_sz - - coocs.seek(0, os.SEEK_END) - ncoocs = coocs.tell() / glove_cooc_fmt.size - coocs.seek(0, os.SEEK_SET) - - shard_files = {} - - for row in range(nshards): - for col in range(nshards): - filename = os.path.join( - FLAGS.output_dir, 'shard-%03d-%03d.bin' % (row, col)) - - shard_files[(row, col)] = open(filename, 'w+') - - for ix in xrange(ncoocs): - if ix % 1000000 == 0: - sys.stdout.write('\rsharding co-occurrences: %0.1f%% (%d/%d)' % ( - 100.0 * ix / ncoocs, ix, ncoocs)) - - sys.stdout.flush() - - bits = coocs.read(glove_cooc_fmt.size) - if not bits: - break - - # Glove has 1-indexed IDs. - row_id, col_id, cnt = glove_cooc_fmt.unpack(bits) - if row_id > vocab_sz or col_id > vocab_sz: - continue - - row_id -= 1 - row_shard = row_id % nshards - row_off = row_id / nshards - - col_id -= 1 - col_shard = col_id % nshards - col_off = col_id / nshards - - shard_pos = row_off * FLAGS.shard_size + col_off # row major - - shard_files[(row_shard, col_shard)].write( - shard_cooc_fmt.pack(shard_pos, cnt)) - - # Accumulate marginals. - row_sums[row_id] += cnt - col_sums[col_id] += cnt - - sys.stdout.write('\n') - - if any(abs(r - c) > 0.1 for r, c in itertools.izip(row_sums, col_sums)): - print('WARNING! Row and column marginals differ; is your matrix symmetric?', - file=sys.stderr) - - return (shard_files, row_sums) - -def main(_): - with open(FLAGS.vocab, 'r') as lines: - orig_vocab_sz = sum(1 for _ in lines) - - shard_sz = FLAGS.shard_size - vocab_sz = orig_vocab_sz - orig_vocab_sz % shard_sz - nshards = vocab_sz / shard_sz - - print('vocab size is %d (originally %d), %d %dx%d-element shards' % ( - vocab_sz, orig_vocab_sz, nshards * nshards, shard_sz, shard_sz)) - - # Create the output directory, if necessary - if FLAGS.output_dir and not os.path.isdir(FLAGS.output_dir): - os.makedirs(FLAGS.output_dir) - - with open(FLAGS.input, 'r') as coocs: - shard_files, marginals = make_shard_files(coocs, nshards, vocab_sz) - - # Now sort the shards and write the TFRecords. - filename = os.path.join(FLAGS.output_dir, 'shards.recs') - with tf.python_io.TFRecordWriter(filename) as writer: - ix = 0 - for (row, col), fh in shard_files.iteritems(): - ix += 1 - sys.stdout.write('\rwriting shard %d/%d' % (ix, len(shard_files))) - sys.stdout.flush() - - fh.seek(0) - buf = fh.read() - os.unlink(fh.name) - fh.close() - - coocs = [ - shard_cooc_fmt.unpack_from(buf, off) - for off in range(0, len(buf), shard_cooc_fmt.size)] - - # N.B. we assume that there aren't any duplicates here! - coocs.sort(key=lambda kv: kv[0]) - - def _int64s(xs): - return tf.train.Feature(int64_list=tf.train.Int64List(value=list(xs))) - - def _floats(xs): - return tf.train.Feature(float_list=tf.train.FloatList(value=list(xs))) - - example = tf.train.Example(features=tf.train.Features(feature={ - 'global_row': _int64s(row + nshards * i for i in range(shard_sz)), - 'global_col': _int64s(col + nshards * i for i in range(shard_sz)), - 'sparse_local_row': _int64s(pos / shard_sz for pos, _ in coocs), - 'sparse_local_col': _int64s(pos % shard_sz for pos, _ in coocs), - 'sparse_value': _floats(cnt for _, cnt in coocs)})) - - writer.write(example.SerializeToString()) - - print('\nwriting marginals...') - - with open(os.path.join(FLAGS.output_dir, 'marginals.txt'), 'w') as fh: - for cnt in marginals: - fh.write('%0.1f\n' % cnt) - - print('done!') - -if __name__ == '__main__': - tf.app.run() diff --git a/research/swivel/nearest.py b/research/swivel/nearest.py deleted file mode 100644 index 0fa828b66..000000000 --- a/research/swivel/nearest.py +++ /dev/null @@ -1,76 +0,0 @@ -#!/usr/bin/env python -# -# Copyright 2016 Google Inc. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Simple tool for inspecting nearest neighbors and analogies.""" - -from __future__ import print_function -import re -import sys -from getopt import GetoptError, getopt - -from vecs import Vecs - -try: - opts, args = getopt(sys.argv[1:], 'v:e:', ['vocab=', 'embeddings=']) -except GetoptError as e: - print(e, file=sys.stderr) - sys.exit(2) - -opt_vocab = 'vocab.txt' -opt_embeddings = None - -for o, a in opts: - if o in ('-v', '--vocab'): - opt_vocab = a - if o in ('-e', '--embeddings'): - opt_embeddings = a - -vecs = Vecs(opt_vocab, opt_embeddings) - -while True: - sys.stdout.write('query> ') - sys.stdout.flush() - - query = sys.stdin.readline().strip() - if not query: - break - - parts = re.split(r'\s+', query) - - if len(parts) == 1: - res = vecs.neighbors(parts[0]) - - elif len(parts) == 3: - vs = [vecs.lookup(w) for w in parts] - if any(v is None for v in vs): - print('not in vocabulary: %s' % ( - ', '.join(tok for tok, v in zip(parts, vs) if v is None))) - - continue - - res = vecs.neighbors(vs[2] - vs[0] + vs[1]) - - else: - print('use a single word to query neighbors, or three words for analogy') - continue - - if not res: - continue - - for word, sim in res[:20]: - print('%0.4f: %s' % (sim, word)) - - print() diff --git a/research/swivel/prep.py b/research/swivel/prep.py deleted file mode 100644 index b72a6fb2f..000000000 --- a/research/swivel/prep.py +++ /dev/null @@ -1,317 +0,0 @@ -#!/usr/bin/env python -# -# Copyright 2016 Google Inc. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Prepare a corpus for processing by swivel. - -Creates a sharded word co-occurrence matrix from a text file input corpus. - -Usage: - - prep.py --output_dir --input - -Options: - - --input - The input text. - - --output_dir - Specifies the output directory where the various Swivel data - files should be placed. - - --shard_size - Specifies the shard size; default 4096. - - --min_count - Specifies the minimum number of times a word should appear - to be included in the vocabulary; default 5. - - --max_vocab - Specifies the maximum vocabulary size; default shard size - times 1024. - - --vocab - Use the specified unigram vocabulary instead of generating - it from the corpus. - - --window_size - Specifies the window size for computing co-occurrence stats; - default 10. - - --bufsz - The number of co-occurrences that are buffered; default 16M. - -""" - -import itertools -import math -import os -import struct -import sys - -from six.moves import xrange -import tensorflow as tf - -flags = tf.app.flags - -flags.DEFINE_string('input', '', 'The input text.') -flags.DEFINE_string('output_dir', '/tmp/swivel_data', - 'Output directory for Swivel data') -flags.DEFINE_integer('shard_size', 4096, 'The size for each shard') -flags.DEFINE_integer('min_count', 5, - 'The minimum number of times a word should occur to be ' - 'included in the vocabulary') -flags.DEFINE_integer('max_vocab', 4096 * 64, 'The maximum vocabulary size') -flags.DEFINE_string('vocab', '', 'Vocabulary to use instead of generating one') -flags.DEFINE_integer('window_size', 10, 'The window size') -flags.DEFINE_integer('bufsz', 16 * 1024 * 1024, - 'The number of co-occurrences to buffer') - -FLAGS = flags.FLAGS - -shard_cooc_fmt = struct.Struct('iif') - - -def words(line): - """Splits a line of text into tokens.""" - return line.strip().split() - - -def create_vocabulary(lines): - """Reads text lines and generates a vocabulary.""" - lines.seek(0, os.SEEK_END) - nbytes = lines.tell() - lines.seek(0, os.SEEK_SET) - - vocab = {} - for lineno, line in enumerate(lines, start=1): - for word in words(line): - vocab.setdefault(word, 0) - vocab[word] += 1 - - if lineno % 100000 == 0: - pos = lines.tell() - sys.stdout.write('\rComputing vocabulary: %0.1f%% (%d/%d)...' % ( - 100.0 * pos / nbytes, pos, nbytes)) - sys.stdout.flush() - - sys.stdout.write('\n') - - vocab = [(tok, n) for tok, n in vocab.iteritems() if n >= FLAGS.min_count] - vocab.sort(key=lambda kv: (-kv[1], kv[0])) - - num_words = min(len(vocab), FLAGS.max_vocab) - if num_words % FLAGS.shard_size != 0: - num_words -= num_words % FLAGS.shard_size - - if not num_words: - raise Exception('empty vocabulary') - - print('vocabulary contains %d tokens' % num_words) - - vocab = vocab[:num_words] - return [tok for tok, n in vocab] - - -def write_vocab_and_sums(vocab, sums, vocab_filename, sums_filename): - """Writes vocabulary and marginal sum files.""" - with open(os.path.join(FLAGS.output_dir, vocab_filename), 'w') as vocab_out: - with open(os.path.join(FLAGS.output_dir, sums_filename), 'w') as sums_out: - for tok, cnt in itertools.izip(vocab, sums): - print >> vocab_out, tok - print >> sums_out, cnt - - -def compute_coocs(lines, vocab): - """Compute the co-occurrence statistics from the text. - - This generates a temporary file for each shard that contains the intermediate - counts from the shard: these counts must be subsequently sorted and collated. - - """ - word_to_id = {tok: idx for idx, tok in enumerate(vocab)} - - lines.seek(0, os.SEEK_END) - nbytes = lines.tell() - lines.seek(0, os.SEEK_SET) - - num_shards = len(vocab) / FLAGS.shard_size - - shardfiles = {} - for row in range(num_shards): - for col in range(num_shards): - filename = os.path.join( - FLAGS.output_dir, 'shard-%03d-%03d.tmp' % (row, col)) - - shardfiles[(row, col)] = open(filename, 'w+') - - def flush_coocs(): - for (row_id, col_id), cnt in coocs.iteritems(): - row_shard = row_id % num_shards - row_off = row_id / num_shards - col_shard = col_id % num_shards - col_off = col_id / num_shards - - # Since we only stored (a, b), we emit both (a, b) and (b, a). - shardfiles[(row_shard, col_shard)].write( - shard_cooc_fmt.pack(row_off, col_off, cnt)) - - shardfiles[(col_shard, row_shard)].write( - shard_cooc_fmt.pack(col_off, row_off, cnt)) - - coocs = {} - sums = [0.0] * len(vocab) - - for lineno, line in enumerate(lines, start=1): - # Computes the word IDs for each word in the sentence. This has the effect - # of "stretching" the window past OOV tokens. - wids = filter( - lambda wid: wid is not None, - (word_to_id.get(w) for w in words(line))) - - for pos in xrange(len(wids)): - lid = wids[pos] - window_extent = min(FLAGS.window_size + 1, len(wids) - pos) - for off in xrange(1, window_extent): - rid = wids[pos + off] - pair = (min(lid, rid), max(lid, rid)) - count = 1.0 / off - sums[lid] += count - sums[rid] += count - coocs.setdefault(pair, 0.0) - coocs[pair] += count - - sums[lid] += 1.0 - pair = (lid, lid) - coocs.setdefault(pair, 0.0) - coocs[pair] += 0.5 # Only add 1/2 since we output (a, b) and (b, a) - - if lineno % 10000 == 0: - pos = lines.tell() - sys.stdout.write('\rComputing co-occurrences: %0.1f%% (%d/%d)...' % ( - 100.0 * pos / nbytes, pos, nbytes)) - sys.stdout.flush() - - if len(coocs) > FLAGS.bufsz: - flush_coocs() - coocs = {} - - flush_coocs() - sys.stdout.write('\n') - - return shardfiles, sums - - -def write_shards(vocab, shardfiles): - """Processes the temporary files to generate the final shard data. - - The shard data is stored as a tf.Example protos using a TFRecordWriter. The - temporary files are removed from the filesystem once they've been processed. - - """ - num_shards = len(vocab) / FLAGS.shard_size - - ix = 0 - for (row, col), fh in shardfiles.iteritems(): - ix += 1 - sys.stdout.write('\rwriting shard %d/%d' % (ix, len(shardfiles))) - sys.stdout.flush() - - # Read the entire binary co-occurrence and unpack it into an array. - fh.seek(0) - buf = fh.read() - os.unlink(fh.name) - fh.close() - - coocs = [ - shard_cooc_fmt.unpack_from(buf, off) - for off in range(0, len(buf), shard_cooc_fmt.size)] - - # Sort and merge co-occurrences for the same pairs. - coocs.sort() - - if coocs: - current_pos = 0 - current_row_col = (coocs[current_pos][0], coocs[current_pos][1]) - for next_pos in range(1, len(coocs)): - next_row_col = (coocs[next_pos][0], coocs[next_pos][1]) - if current_row_col == next_row_col: - coocs[current_pos] = ( - coocs[current_pos][0], - coocs[current_pos][1], - coocs[current_pos][2] + coocs[next_pos][2]) - else: - current_pos += 1 - if current_pos < next_pos: - coocs[current_pos] = coocs[next_pos] - - current_row_col = (coocs[current_pos][0], coocs[current_pos][1]) - - coocs = coocs[:(1 + current_pos)] - - # Convert to a TF Example proto. - def _int64s(xs): - return tf.train.Feature(int64_list=tf.train.Int64List(value=list(xs))) - - def _floats(xs): - return tf.train.Feature(float_list=tf.train.FloatList(value=list(xs))) - - example = tf.train.Example(features=tf.train.Features(feature={ - 'global_row': _int64s( - row + num_shards * i for i in range(FLAGS.shard_size)), - 'global_col': _int64s( - col + num_shards * i for i in range(FLAGS.shard_size)), - - 'sparse_local_row': _int64s(cooc[0] for cooc in coocs), - 'sparse_local_col': _int64s(cooc[1] for cooc in coocs), - 'sparse_value': _floats(cooc[2] for cooc in coocs), - })) - - filename = os.path.join(FLAGS.output_dir, 'shard-%03d-%03d.pb' % (row, col)) - with open(filename, 'w') as out: - out.write(example.SerializeToString()) - - sys.stdout.write('\n') - - -def main(_): - # Create the output directory, if necessary - if FLAGS.output_dir and not os.path.isdir(FLAGS.output_dir): - os.makedirs(FLAGS.output_dir) - - # Read the file onces to create the vocabulary. - if FLAGS.vocab: - with open(FLAGS.vocab, 'r') as lines: - vocab = [line.strip() for line in lines] - else: - with open(FLAGS.input, 'r') as lines: - vocab = create_vocabulary(lines) - - # Now read the file again to determine the co-occurrence stats. - with open(FLAGS.input, 'r') as lines: - shardfiles, sums = compute_coocs(lines, vocab) - - # Collect individual shards into the shards.recs file. - write_shards(vocab, shardfiles) - - # Now write the marginals. They're symmetric for this application. - write_vocab_and_sums(vocab, sums, 'row_vocab.txt', 'row_sums.txt') - write_vocab_and_sums(vocab, sums, 'col_vocab.txt', 'col_sums.txt') - - print('done!') - - -if __name__ == '__main__': - tf.app.run() diff --git a/research/swivel/swivel.py b/research/swivel/swivel.py deleted file mode 100755 index c69660c09..000000000 --- a/research/swivel/swivel.py +++ /dev/null @@ -1,489 +0,0 @@ -# Copyright 2016 Google Inc. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Submatrix-wise Vector Embedding Learner. - -Implementation of SwiVel algorithm described at: -http://arxiv.org/abs/1602.02215 - -This program expects an input directory that contains the following files. - - row_vocab.txt, col_vocab.txt - - The row an column vocabulary files. Each file should contain one token per - line; these will be used to generate a tab-separate file containing the - trained embeddings. - - row_sums.txt, col_sum.txt - - The matrix row and column marginal sums. Each file should contain one - decimal floating point number per line which corresponds to the marginal - count of the matrix for that row or column. - - shards.recs - - A file containing the sub-matrix shards, stored as TFRecords. Each shard is - expected to be a serialzed tf.Example protocol buffer with the following - properties: - - global_row: the global row indicies contained in the shard - global_col: the global column indicies contained in the shard - sparse_local_row, sparse_local_col, sparse_value: three parallel arrays - that are a sparse representation of the submatrix counts. - -It will generate embeddings, training from the input directory for the specified -number of epochs. When complete, it will output the trained vectors to a -tab-separated file that contains one line per embedding. Row and column -embeddings are stored in separate files. - -Swivel can be run "stand-alone" or "distributed". The latter involves running -at least one parameter server process, along with one or more worker processes. -""" - -from __future__ import division -from __future__ import print_function - -import glob -import itertools -import os -import random - -import numpy as np -import scipy.stats -import tensorflow as tf - -flags = tf.app.flags - -flags.DEFINE_string( - 'input_base_path', '/tmp/swivel_data', - 'Directory containing input shards, vocabularies, and marginals.') -flags.DEFINE_string( - 'output_base_path', '/tmp/swivel_data', - 'Path where to write the trained embeddings.') -flags.DEFINE_string('eval_base_path', '', 'Path to evaluation data') - -# Control for training. -flags.DEFINE_float('num_epochs', 40, 'Number epochs to train') -flags.DEFINE_string('hparams', '', 'Model hyper-parameters') - -# Model hyper-parameters. (Move these to tf.HParams once that gets integrated -# into TF from tf.contrib.) -flags.DEFINE_integer( - 'dim', 300, 'Embedding dimensionality') -flags.DEFINE_string( - 'optimizer', 'rmsprop', 'SGD optimizer; either "adagrad" or "rmsprop"') -flags.DEFINE_float( - 'learning_rate', 0.1, 'Optimizer learning rate') -flags.DEFINE_float( - 'momentum', 0.1, 'Optimizer momentum; used with RMSProp') -flags.DEFINE_float( - 'confidence_base', 0.0, 'Base for count weighting') -flags.DEFINE_float( - 'confidence_scale', 1.0, 'Scale for count weighting') -flags.DEFINE_float( - 'confidence_exponent', 0.5, 'Exponent for count weighting') -flags.DEFINE_integer( - 'submatrix_rows', 4096, 'Number of rows in each submatrix') -flags.DEFINE_integer( - 'submatrix_cols', 4096, 'Number of cols in each submatrix') - -# For distributed training. -flags.DEFINE_string( - 'ps_hosts', '', - 'Comma-separated list of parameter server host:port; if empty, run local') -flags.DEFINE_string( - 'worker_hosts', '', 'Comma-separated list of worker host:port') -flags.DEFINE_string( - 'job_name', '', 'The job this process will run, either "ps" or "worker"') -flags.DEFINE_integer( - 'task_index', 0, 'The task index for this process') -flags.DEFINE_integer( - 'gpu_device', 0, 'The GPU device to use.') - -FLAGS = flags.FLAGS - - -class Model(object): - """A Swivel model.""" - - def __init__(self, input_base_path, hparams): - """Creates a new Swivel model.""" - # Read vocab - self.row_ix_to_word, self.row_word_to_ix = self._read_vocab( - os.path.join(input_base_path, 'row_vocab.txt')) - self.col_ix_to_word, self.col_word_to_ix = self._read_vocab( - os.path.join(input_base_path, 'col_vocab.txt')) - - # Read marginals. - row_sums = self._read_marginals_file( - os.path.join(input_base_path, 'row_sums.txt')) - col_sums = self._read_marginals_file( - os.path.join(input_base_path, 'col_sums.txt')) - - # Construct input tensors. - count_matrix_files = glob.glob( - os.path.join(input_base_path, 'shard-*.pb')) - - global_rows, global_cols, counts = self._count_matrix_input( - count_matrix_files, hparams.submatrix_rows, hparams.submatrix_cols) - - # Create embedding variables. - sigma = 1.0 / np.sqrt(hparams.dim) - self.row_embedding = tf.get_variable( - 'row_embedding', - shape=[len(row_sums), hparams.dim], - initializer=tf.random_normal_initializer(0, sigma), - dtype=tf.float32) - self.col_embedding = tf.get_variable( - 'col_embedding', - shape=[len(col_sums), hparams.dim], - initializer=tf.random_normal_initializer(0, sigma), - dtype=tf.float32) - - matrix_log_sum = np.log(np.sum(row_sums) + 1) - row_bias = tf.constant( - [np.log(x + 1) for x in row_sums], dtype=tf.float32) - col_bias = tf.constant( - [np.log(x + 1) for x in col_sums], dtype=tf.float32) - - # Fetch embeddings. - selected_rows = tf.nn.embedding_lookup(self.row_embedding, global_rows) - selected_cols = tf.nn.embedding_lookup(self.col_embedding, global_cols) - - selected_row_bias = tf.gather(row_bias, global_rows) - selected_col_bias = tf.gather(col_bias, global_cols) - - predictions = tf.matmul(selected_rows, selected_cols, transpose_b=True) - - # These binary masks separate zero from non-zero values. - count_is_nonzero = tf.to_float(tf.cast(counts, tf.bool)) - count_is_zero = 1 - count_is_nonzero - - objectives = count_is_nonzero * tf.log(counts + 1e-30) - objectives -= tf.reshape(selected_row_bias, [-1, 1]) - objectives -= selected_col_bias - objectives += matrix_log_sum - - err = predictions - objectives - - # The confidence function scales the L2 loss based on the raw - # co-occurrence count. - l2_confidence = (hparams.confidence_base + - hparams.confidence_scale * tf.pow( - counts, hparams.confidence_exponent)) - - loss_multiplier = 1 / np.sqrt( - hparams.submatrix_rows * hparams.submatrix_cols) - - l2_loss = loss_multiplier * tf.reduce_sum( - 0.5 * l2_confidence * tf.square(err)) - - sigmoid_loss = loss_multiplier * tf.reduce_sum( - tf.nn.softplus(err) * count_is_zero) - - self.loss_op = l2_loss + sigmoid_loss - - if hparams.optimizer == 'adagrad': - opt = tf.train.AdagradOptimizer(hparams.learning_rate) - elif hparams.optimizer == 'rmsprop': - opt = tf.train.RMSPropOptimizer(hparams.learning_rate, hparams.momentum) - else: - raise ValueError('unknown optimizer "%s"' % hparams.optimizer) - - self.global_step = tf.get_variable( - 'global_step', initializer=0, trainable=False) - - self.train_op = opt.minimize(self.loss_op, global_step=self.global_step) - - # One epoch trains each submatrix once. - self.steps_per_epoch = ( - (len(row_sums) / hparams.submatrix_rows) * - (len(col_sums) / hparams.submatrix_cols)) - - def _read_vocab(self, filename): - """Reads the vocabulary file.""" - with open(filename) as lines: - ix_to_word = [line.strip() for line in lines] - word_to_ix = {word: ix for ix, word in enumerate(ix_to_word)} - return ix_to_word, word_to_ix - - def _read_marginals_file(self, filename): - """Reads text file with one number per line to an array.""" - with open(filename) as lines: - return [float(line.strip()) for line in lines] - - def _count_matrix_input(self, filenames, submatrix_rows, submatrix_cols): - """Creates ops that read submatrix shards from disk.""" - random.shuffle(filenames) - filename_queue = tf.train.string_input_producer(filenames) - reader = tf.WholeFileReader() - _, serialized_example = reader.read(filename_queue) - features = tf.parse_single_example( - serialized_example, - features={ - 'global_row': tf.FixedLenFeature([submatrix_rows], dtype=tf.int64), - 'global_col': tf.FixedLenFeature([submatrix_cols], dtype=tf.int64), - 'sparse_local_row': tf.VarLenFeature(dtype=tf.int64), - 'sparse_local_col': tf.VarLenFeature(dtype=tf.int64), - 'sparse_value': tf.VarLenFeature(dtype=tf.float32) - }) - - global_row = features['global_row'] - global_col = features['global_col'] - - sparse_local_row = features['sparse_local_row'].values - sparse_local_col = features['sparse_local_col'].values - sparse_count = features['sparse_value'].values - - sparse_indices = tf.concat( - axis=1, values=[tf.expand_dims(sparse_local_row, 1), - tf.expand_dims(sparse_local_col, 1)]) - - count = tf.sparse_to_dense(sparse_indices, [submatrix_rows, submatrix_cols], - sparse_count) - - return global_row, global_col, count - - def wordsim_eval_op(self, filename): - """Returns an op that runs an eval on a word similarity dataset. - - The eval dataset is assumed to be tab-separated, one scored word pair per - line. The resulting value is Spearman's rho of the human judgements with - the cosine similarity of the word embeddings. - - Args: - filename: the filename containing the word similarity data. - - Returns: - An operator that will compute Spearman's rho of the current row - embeddings. - """ - with open(filename, 'r') as fh: - tuples = (line.strip().split('\t') for line in fh.read().splitlines()) - word1s, word2s, sims = zip(*tuples) - actuals = map(float, sims) - - v1s_t = tf.nn.embedding_lookup( - self.row_embedding, - [self.row_word_to_ix.get(w, 0) for w in word1s]) - - v2s_t = tf.nn.embedding_lookup( - self.row_embedding, - [self.row_word_to_ix.get(w, 0) for w in word2s]) - - # Compute the predicted word similarity as the cosine similarity between the - # embedding vectors. - preds_t = tf.reduce_sum( - tf.nn.l2_normalize(v1s_t, dim=1) * tf.nn.l2_normalize(v2s_t, dim=1), - axis=1) - - def _op(preds): - rho, _ = scipy.stats.spearmanr(preds, actuals) - return rho - - return tf.py_func(_op, [preds_t], tf.float64) - - def analogy_eval_op(self, filename, max_vocab_size=20000): - """Returns an op that runs an eval on an analogy dataset. - - The eval dataset is assumed to be tab-separated, with four tokens per - line. The first three tokens are query terms, the last is the expected - answer. For each line (e.g., "man king woman queen"), the vectors - corresponding to the query terms are added ("king - man + woman") to produce - a query vector. If the expected answer's vector is the nearest neighbor to - the query vector (not counting any of the query vectors themselves), then - the line is scored as correct. The reported accuracy is the number of - correct rows divided by the total number of rows. Missing terms are - replaced with an arbitrary vector and will almost certainly result in - incorrect answers. - - Note that the results are approximate: for efficiency's sake, only the first - `max_vocab_size` terms are included in the nearest neighbor search. - - Args: - filename: the filename containing the analogy data. - max_vocab_size: the maximum number of tokens to include in the nearest - neighbor search. By default, 20000. - - Returns: - The accuracy on the analogy task. - """ - analogy_ixs = [] - with open(filename, 'r') as lines: - for line in lines: - parts = line.strip().split('\t') - if len(parts) == 4: - analogy_ixs.append([self.row_word_to_ix.get(w, 0) for w in parts]) - - # man:king :: woman:queen => king - man + woman == queen - ix1s, ix2s, ix3s, _ = zip(*analogy_ixs) - v1s_t, v2s_t, v3s_t = ( - tf.nn.l2_normalize( - tf.nn.embedding_lookup(self.row_embedding, ixs), - dim=1) - for ixs in (ix1s, ix2s, ix3s)) - - preds_t = v2s_t - v1s_t + v3s_t - - # Compute the nearest neighbors as the cosine similarity. We only consider - # up to max_vocab_size to avoid a matmul that swamps the machine. - sims_t = tf.matmul( - preds_t, - tf.nn.l2_normalize(self.row_embedding[:max_vocab_size], dim=1), - transpose_b=True) - - # Take the four nearest neighbors, since the eval explicitly discards the - # query terms. - _, preds_ixs_t = tf.nn.top_k(sims_t, 4) - - def _op(preds_ixs): - correct, total = 0, 0 - for pred_ixs, actual_ixs in itertools.izip(preds_ixs, analogy_ixs): - pred_ixs = [ix for ix in pred_ixs if ix not in actual_ixs[:3]] - correct += pred_ixs[0] == actual_ixs[3] - total += 1 - - return correct / total - - return tf.py_func(_op, [preds_ixs_t], tf.float64) - - def _write_tensor(self, vocab_path, output_path, session, embedding): - """Writes tensor to output_path as tsv.""" - embeddings = session.run(embedding) - - with open(output_path, 'w') as out_f: - with open(vocab_path) as vocab_f: - for index, word in enumerate(vocab_f): - word = word.strip() - embedding = embeddings[index] - print('\t'.join([word.strip()] + [str(x) for x in embedding]), - file=out_f) - - def write_embeddings(self, config, session): - """Writes row and column embeddings disk.""" - self._write_tensor( - os.path.join(config.input_base_path, 'row_vocab.txt'), - os.path.join(config.output_base_path, 'row_embedding.tsv'), - session, self.row_embedding) - - self._write_tensor( - os.path.join(config.input_base_path, 'col_vocab.txt'), - os.path.join(config.output_base_path, 'col_embedding.tsv'), - session, self.col_embedding) - - -def main(_): - tf.logging.set_verbosity(tf.logging.INFO) - - # If we have ps_hosts, then we'll assume that this is going to be a - # distributed training run. Configure the cluster appropriately. Otherwise, - # we just do everything in-process. - if FLAGS.ps_hosts: - cluster = tf.train.ClusterSpec({ - 'ps': FLAGS.ps_hosts.split(','), - 'worker': FLAGS.worker_hosts.split(','), - }) - - if FLAGS.job_name == 'ps': - # Ignore the GPU if we're the parameter server. This let's the PS run on - # the same machine as a worker. - config = tf.ConfigProto(device_count={'GPU': 0}) - elif FLAGS.job_name == 'worker': - config = tf.ConfigProto(gpu_options=tf.GPUOptions( - visible_device_list='%d' % FLAGS.gpu_device, - allow_growth=True)) - else: - raise ValueError('unknown job name "%s"' % FLAGS.job_name) - - server = tf.train.Server( - cluster, - job_name=FLAGS.job_name, - task_index=FLAGS.task_index, - config=config) - - if FLAGS.job_name == 'ps': - return server.join() - - device_setter = tf.train.replica_device_setter( - worker_device='/job:worker/task:%d' % FLAGS.task_index, - cluster=cluster) - - else: - server = None - device_setter = tf.train.replica_device_setter(0) - - # Build the graph. - with tf.Graph().as_default(): - with tf.device(device_setter): - model = Model(FLAGS.input_base_path, FLAGS) - - # If an eval path is present, then create eval operators and set up scalar - # summaries to report on the results. Run the evals on the CPU since - # the analogy eval requires a fairly enormous tensor to be allocated to - # do the nearest neighbor search. - if FLAGS.eval_base_path: - wordsim_filenames = glob.glob( - os.path.join(FLAGS.eval_base_path, '*.ws.tab')) - - for filename in wordsim_filenames: - name = os.path.basename(filename).split('.')[0] - with tf.device(tf.DeviceSpec(device_type='CPU')): - op = model.wordsim_eval_op(filename) - tf.summary.scalar(name, op) - - analogy_filenames = glob.glob( - os.path.join(FLAGS.eval_base_path, '*.an.tab')) - - for filename in analogy_filenames: - name = os.path.basename(filename).split('.')[0] - with tf.device(tf.DeviceSpec(device_type='CPU')): - op = model.analogy_eval_op(filename) - tf.summary.scalar(name, op) - - tf.summary.scalar('loss', model.loss_op) - - # Train on, soldier. - supervisor = tf.train.Supervisor( - logdir=FLAGS.output_base_path, - is_chief=(FLAGS.task_index == 0), - save_summaries_secs=60, - recovery_wait_secs=5) - - max_step = FLAGS.num_epochs * model.steps_per_epoch - master = server.target if server else '' - with supervisor.managed_session(master) as session: - local_step = 0 - global_step = session.run(model.global_step) - while not supervisor.should_stop() and global_step < max_step: - global_step, loss, _ = session.run([ - model.global_step, model.loss_op, model.train_op]) - - if not np.isfinite(loss): - raise ValueError('non-finite cost at step %d' % global_step) - - local_step += 1 - if local_step % 10 == 0: - tf.logging.info( - 'local_step=%d global_step=%d loss=%.1f, %.1f%% complete', - local_step, global_step, loss, 100.0 * global_step / max_step) - - if FLAGS.task_index == 0: - supervisor.saver.save( - session, supervisor.save_path, global_step=global_step) - - model.write_embeddings(FLAGS, session) - - -if __name__ == '__main__': - tf.app.run() diff --git a/research/swivel/text2bin.py b/research/swivel/text2bin.py deleted file mode 100644 index 6ccb13295..000000000 --- a/research/swivel/text2bin.py +++ /dev/null @@ -1,88 +0,0 @@ -#!/usr/bin/env python -# -# Copyright 2016 Google Inc. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Converts vectors from text to a binary format for quicker manipulation. - -Usage: - - text2bin.py -o -v vec1.txt [vec2.txt ...] - -Optiona: - - -o , --output - The name of the file into which the binary vectors are written. - - -v , --vocab - The name of the file into which the vocabulary is written. - -Description - -This program merges one or more whitespace separated vector files into a single -binary vector file that can be used by downstream evaluation tools in this -directory ("wordsim.py" and "analogy"). - -If more than one vector file is specified, then the files must be aligned -row-wise (i.e., each line must correspond to the same embedding), and they must -have the same number of columns (i.e., be the same dimension). - -""" - -from itertools import izip -from getopt import GetoptError, getopt -import os -import struct -import sys - -try: - opts, args = getopt( - sys.argv[1:], 'o:v:', ['output=', 'vocab=']) -except GetoptError as e: - print >> sys.stderr, e - sys.exit(2) - -opt_output = 'vecs.bin' -opt_vocab = 'vocab.txt' -for o, a in opts: - if o in ('-o', '--output'): - opt_output = a - if o in ('-v', '--vocab'): - opt_vocab = a - -def go(fhs): - fmt = None - with open(opt_vocab, 'w') as vocab_out: - with open(opt_output, 'w') as vecs_out: - for lines in izip(*fhs): - parts = [line.split() for line in lines] - token = parts[0][0] - if any(part[0] != token for part in parts[1:]): - raise IOError('vector files must be aligned') - - print >> vocab_out, token - - vec = [sum(float(x) for x in xs) for xs in zip(*parts)[1:]] - if not fmt: - fmt = struct.Struct('%df' % len(vec)) - - vecs_out.write(fmt.pack(*vec)) - -if args: - fhs = [open(filename) for filename in args] - go(fhs) - for fh in fhs: - fh.close() -else: - go([sys.stdin]) diff --git a/research/swivel/vecs.py b/research/swivel/vecs.py deleted file mode 100644 index 806173f6a..000000000 --- a/research/swivel/vecs.py +++ /dev/null @@ -1,92 +0,0 @@ -# Copyright 2016 Google Inc. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import mmap -import numpy as np -import os - -from six import string_types - - -class Vecs(object): - def __init__(self, vocab_filename, rows_filename, cols_filename=None): - """Initializes the vectors from a text vocabulary and binary data.""" - with open(vocab_filename, 'r') as lines: - self.vocab = [line.split()[0] for line in lines] - self.word_to_idx = {word: idx for idx, word in enumerate(self.vocab)} - - n = len(self.vocab) - - with open(rows_filename, 'r') as rows_fh: - rows_fh.seek(0, os.SEEK_END) - size = rows_fh.tell() - - # Make sure that the file size seems reasonable. - if size % (4 * n) != 0: - raise IOError( - 'unexpected file size for binary vector file %s' % rows_filename) - - # Memory map the rows. - dim = round(size / (4 * n)) - rows_mm = mmap.mmap(rows_fh.fileno(), 0, prot=mmap.PROT_READ) - rows = np.matrix( - np.frombuffer(rows_mm, dtype=np.float32).reshape(n, dim)) - - # If column vectors were specified, then open them and add them to the - # row vectors. - if cols_filename: - with open(cols_filename, 'r') as cols_fh: - cols_mm = mmap.mmap(cols_fh.fileno(), 0, prot=mmap.PROT_READ) - cols_fh.seek(0, os.SEEK_END) - if cols_fh.tell() != size: - raise IOError('row and column vector files have different sizes') - - cols = np.matrix( - np.frombuffer(cols_mm, dtype=np.float32).reshape(n, dim)) - - rows += cols - cols_mm.close() - - # Normalize so that dot products are just cosine similarity. - self.vecs = rows / np.linalg.norm(rows, axis=1).reshape(n, 1) - rows_mm.close() - - def similarity(self, word1, word2): - """Computes the similarity of two tokens.""" - idx1 = self.word_to_idx.get(word1) - idx2 = self.word_to_idx.get(word2) - if not idx1 or not idx2: - return None - - return float(self.vecs[idx1] * self.vecs[idx2].transpose()) - - def neighbors(self, query): - """Returns the nearest neighbors to the query (a word or vector).""" - if isinstance(query, string_types): - idx = self.word_to_idx.get(query) - if idx is None: - return None - - query = self.vecs[idx] - - neighbors = self.vecs * query.transpose() - - return sorted( - zip(self.vocab, neighbors.flat), - key=lambda kv: kv[1], reverse=True) - - def lookup(self, word): - """Returns the embedding for a token, or None if no embedding exists.""" - idx = self.word_to_idx.get(word) - return None if idx is None else self.vecs[idx] diff --git a/research/swivel/wordsim.py b/research/swivel/wordsim.py deleted file mode 100644 index 2d27663f8..000000000 --- a/research/swivel/wordsim.py +++ /dev/null @@ -1,93 +0,0 @@ -#!/usr/bin/env python -# -# Copyright 2016 Google Inc. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Computes Spearman's rho with respect to human judgements. - -Given a set of row (and potentially column) embeddings, this computes Spearman's -rho between the rank ordering of predicted word similarity and human judgements. - -Usage: - - wordim.py --embeddings= --vocab= eval1.tab eval2.tab ... - -Options: - - --embeddings=: the vectors to test - --vocab=: the vocabulary file - -Evaluation files are assumed to be tab-separated files with exactly three -columns. The first two columns contain the words, and the third column contains -the scored human judgement. - -""" - -from __future__ import print_function -import scipy.stats -import sys -from getopt import GetoptError, getopt - -from vecs import Vecs - -try: - opts, args = getopt(sys.argv[1:], '', ['embeddings=', 'vocab=']) -except GetoptError as e: - print(e, file=sys.stderr) - sys.exit(2) - -opt_embeddings = None -opt_vocab = None - -for o, a in opts: - if o == '--embeddings': - opt_embeddings = a - if o == '--vocab': - opt_vocab = a - -if not opt_vocab: - print('please specify a vocabulary file with "--vocab"', file=sys.stderr) - sys.exit(2) - -if not opt_embeddings: - print('please specify the embeddings with "--embeddings"', file=sys.stderr) - sys.exit(2) - -try: - vecs = Vecs(opt_vocab, opt_embeddings) -except IOError as e: - print(e, file=sys.stderr) - sys.exit(1) - - -def evaluate(lines): - acts, preds = [], [] - - with open(filename, 'r') as lines: - for line in lines: - w1, w2, act = line.strip().split('\t') - pred = vecs.similarity(w1, w2) - if pred is None: - continue - - acts.append(float(act)) - preds.append(pred) - - rho, _ = scipy.stats.spearmanr(acts, preds) - return rho - - -for filename in args: - with open(filename, 'r') as lines: - print('%0.3f %s' % (evaluate(lines), filename)) diff --git a/research/tcn/BUILD b/research/tcn/BUILD deleted file mode 100644 index 39297d4b7..000000000 --- a/research/tcn/BUILD +++ /dev/null @@ -1,213 +0,0 @@ -package(default_visibility = [":internal"]) - -licenses(["notice"]) # Apache 2.0 - -exports_files(["LICENSE"]) - -package_group( - name = "internal", - packages = [ - "//tcn/...", - ], -) - -py_binary( - name = "download_pretrained", - srcs = [ - "download_pretrained.py", - ], -) - -py_binary( - name = "generate_videos", - srcs = [ - "generate_videos.py", - ], - main = "generate_videos.py", - deps = [ - ":data_providers", - ":get_estimator", - ":util", - ], -) - -py_test( - name = "svtcn_loss_test", - size = "medium", - srcs = [ - "estimators/svtcn_loss.py", - "estimators/svtcn_loss_test.py", - ], - deps = [ - ":util", - ], -) - -py_library( - name = "data_providers", - srcs = [ - "data_providers.py", - ], - deps = [ - ":preprocessing", - ], -) - -py_test( - name = "data_providers_test", - size = "large", - srcs = ["data_providers_test.py"], - deps = [ - ":data_providers", - ], -) - -py_library( - name = "preprocessing", - srcs = [ - "preprocessing.py", - ], -) - -py_binary( - name = "get_estimator", - srcs = [ - "estimators/get_estimator.py", - ], - deps = [ - ":mvtcn_estimator", - ":svtcn_estimator", - ], -) - -py_binary( - name = "base_estimator", - srcs = [ - "estimators/base_estimator.py", - "model.py", - ], - deps = [ - ":data_providers", - ":util", - ], -) - -py_library( - name = "util", - srcs = [ - "utils/luatables.py", - "utils/progress.py", - "utils/util.py", - ], -) - -py_binary( - name = "mvtcn_estimator", - srcs = [ - "estimators/mvtcn_estimator.py", - ], - deps = [ - ":base_estimator", - ], -) - -py_binary( - name = "svtcn_estimator", - srcs = [ - "estimators/svtcn_estimator.py", - "estimators/svtcn_loss.py", - ], - deps = [ - ":base_estimator", - ], -) - -py_binary( - name = "train", - srcs = [ - "train.py", - ], - deps = [ - ":data_providers", - ":get_estimator", - ":util", - ], -) - -py_binary( - name = "labeled_eval", - srcs = [ - "labeled_eval.py", - ], - deps = [ - ":get_estimator", - ], -) - -py_test( - name = "labeled_eval_test", - size = "small", - srcs = ["labeled_eval_test.py"], - deps = [ - ":labeled_eval", - ], -) - -py_binary( - name = "eval", - srcs = [ - "eval.py", - ], - deps = [ - ":get_estimator", - ], -) - -py_binary( - name = "alignment", - srcs = [ - "alignment.py", - ], - deps = [ - ":get_estimator", - ], -) - -py_binary( - name = "visualize_embeddings", - srcs = [ - "visualize_embeddings.py", - ], - deps = [ - ":data_providers", - ":get_estimator", - ":util", - ], -) - -py_binary( - name = "webcam", - srcs = [ - "dataset/webcam.py", - ], - main = "dataset/webcam.py", -) - -py_binary( - name = "images_to_videos", - srcs = [ - "dataset/images_to_videos.py", - ], - main = "dataset/images_to_videos.py", -) - -py_binary( - name = "videos_to_tfrecords", - srcs = [ - "dataset/videos_to_tfrecords.py", - ], - main = "dataset/videos_to_tfrecords.py", - deps = [ - ":preprocessing", - ], -) diff --git a/research/tcn/README.md b/research/tcn/README.md deleted file mode 100644 index 6f9632457..000000000 --- a/research/tcn/README.md +++ /dev/null @@ -1,559 +0,0 @@ -![No Maintenance Intended](https://img.shields.io/badge/No%20Maintenance%20Intended-%E2%9C%95-red.svg) -![TensorFlow Requirement: 1.x](https://img.shields.io/badge/TensorFlow%20Requirement-1.x-brightgreen) -![TensorFlow 2 Not Supported](https://img.shields.io/badge/TensorFlow%202%20Not%20Supported-%E2%9C%95-red.svg) - -# Time Contrastive Networks - -This implements ["Time Contrastive Networks"](https://arxiv.org/abs/1704.06888), -which is part of the larger [Self-Supervised Imitation -Learning](https://sermanet.github.io/imitation/) project. - -![](https://sermanet.github.io/tcn/docs/figs/mvTCN.png) - -## Contacts - -Maintainers of TCN: - -* Corey Lynch: [github](https://github.com/coreylynch), - [twitter](https://twitter.com/coreylynch) -* Pierre Sermanet: [github](https://github.com/sermanet), - [twitter](https://twitter.com/psermanet) - -## Contents - -* [Getting Started](#getting-started) - * [Install Dependencies](#install-dependencies) - * [Download the Inception v3 - Checkpoint](#download-pretrained-inceptionv3-checkpoint) - * [Run all the tests](#run-all-the-tests) -* [Concepts](#concepts) - * [Multi-view Webcam Video](#multi-view-webcam-video) - * [Data Pipelines](#data-pipelines) - * [Estimators](#estimators) - * [Models](#models) - * [Losses](#losses) - * [Inference](#inference) - * [Configuration](#configuration) - * [Monitoring Training](#monitoring-training) - * [KNN Classification Error](#knn-classification-error) - * [KNN Classification Error](#multi-view-alignment) - * [Visualization](#visualization) - * [Nearest Neighbor Imitation - Videos](#nearest-neighbor-imitation-videos) - * [PCA & T-SNE Visualization](#pca-t-sne-visualization) -* [Tutorial Part I: Collecting Multi-View Webcam - Videos](#tutorial-part-i-collecting-multi-view-webcam-videos) - * [Collect Webcam Videos](#collect-webcam-videos) - * [Create TFRecords](#create-tfrecords) -* [Tutorial Part II: Training, Evaluation, and - Visualization](#tutorial-part-ii-training-evaluation-and-visualization) - * [Download Data](#download-data) - * [Download the Inception v3 - Checkpoint](#download-pretrained-inceptionv3-checkpoint) - * [Define a Config](#define-a-config) - * [Train](#train) - * [Evaluate](#evaluate) - * [Monitor training](#monior-training) - * [Visualize](#visualize) - * [Generate Imitation Videos](#generate-imitation-videos) - * [Run PCA & T-SNE Visualization](#t-sne-pca-visualization) - -## Getting started - -### Install Dependencies - -* [Tensorflow nightly build](https://pypi.python.org/pypi/tf-nightly-gpu) or - via `pip install tf-nightly-gpu`. -* [Bazel](http://bazel.io/docs/install.html) -* matplotlib -* sklearn -* opencv - -### Download Pretrained InceptionV3 Checkpoint - -Run the script that downloads the pretrained InceptionV3 checkpoint: - -```bash -cd tensorflow-models/tcn -python download_pretrained.py -``` - -### Run all the tests - -```bash -bazel test :all -``` - -## Concepts - -### Multi-View Webcam Video - -We provide utilities to collect your own multi-view videos in dataset/webcam.py. -See the [webcam tutorial](#tutorial-part-i-collecting-multi-view-webcam-videos) -for an end to end example of how to collect multi-view webcam data and convert -it to the TFRecord format expected by this library. - -## Data Pipelines - -We use the [tf.data.Dataset -API](https://www.tensorflow.org/guide/datasets) to construct input -pipelines that feed training, evaluation, and visualization. These pipelines are -defined in `data_providers.py`. - -## Estimators - -We define training, evaluation, and inference behavior using the -[tf.estimator.Estimator -API](https://www.tensorflow.org/api_docs/python/tf/estimator/Estimator). See -`estimators/mvtcn_estimator.py` for an example of how multi-view TCN training, -evaluation, and inference is implemented. - -## Models - -Different embedder architectures are implemented in model.py. We used the -`InceptionConvSSFCEmbedder` in the pouring experiments, but we're also -evaluating `Resnet` embedders. - -## Losses - -We use the -[tf.contrib.losses.metric_learning](https://www.tensorflow.org/versions/master/api_docs/python/tf/contrib/losses/metric_learning) -library's implementations of triplet loss with semi-hard negative mining and -npairs loss. In our experiments, npairs loss has better empirical convergence -and produces the best qualitative visualizations, and will likely be our choice -for future experiments. See the -[paper](http://www.nec-labs.com/uploads/images/Department-Images/MediaAnalytics/papers/nips16_npairmetriclearning.pdf) -for details on the algorithm. - -## Inference - -We support 3 modes of inference for trained TCN models: - -* Mode 1: Input is a tf.Estimator input_fn (see - [this](https://www.tensorflow.org/api_docs/python/tf/estimator/Estimator#predict) - for details). Output is an iterator over embeddings and additional metadata. - See `labeled_eval.py` for a usage example. - -* Mode 2: Input is a TFRecord or (or list of TFRecords). This returns an - iterator over tuples of (embeddings, raw_image_strings, sequence_name), - where embeddings is the [num views, sequence length, embedding size] numpy - array holding the full embedded sequence (for all views), raw_image_strings - is a [num views, sequence length] string array holding the jpeg-encoded raw - image strings, and sequence_name is the name of the sequence. See - `generate_videos.py` for a usage example. - -* Mode 3: Input is a numpy array of size [num images, height, width, num - channels]. This returns a tuple of (embeddings, raw_image_strings), where - embeddings is a 2-D float32 numpy array holding [num_images, embedding_size] - image embeddings, and raw_image_strings is a 1-D string numpy array holding - [batch_size] jpeg-encoded image strings. This can be used as follows: - - ```python - images = np.random.uniform(0, 1, size=(batch_size, 1080, 1920, 3)) - embeddings, _ = estimator.inference( - images, checkpoint_path=checkpoint_path) - ``` - -See `estimators/base_estimator.py` for details. - -## Configuration - -Data pipelines, training, eval, and visualization are all configured using -key-value parameters passed as [YAML](https://en.wikipedia.org/wiki/YAML) files. -Configurations can be nested, e.g.: - -```yaml -learning: - optimizer: 'adam' - learning_rate: 0.001 -``` - -### T objects - -YAML configs are converted to LuaTable-like `T` object (see -`utils/luatables.py`), which behave like a python `dict`, but allow you to use -dot notation to access (nested) keys. For example we could access the learning -rate in the above config snippet via `config.learning.learning_rate`. - -### Multiple Configs - -Multiple configs can be passed to the various binaries as a comma separated list -of config paths via the `--config_paths` flag. This allows us to specify a -default config that applies to all experiments (e.g. how often to write -checkpoints, default embedder hyperparams) and one config per experiment holding -the just hyperparams specific to the experiment (path to data, etc.). - -See `configs/tcn_default.yml` for an example of our default config and -`configs/pouring.yml` for an example of how we define the pouring experiments. - -Configs are applied left to right. For example, consider two config files: - -default.yml - -```yaml -learning: - learning_rate: 0.001 # Default learning rate. - optimizer: 'adam' -``` - -myexperiment.yml - -```yaml -learning: - learning_rate: 1.0 # Experiment learning rate (overwrites default). -data: - training: '/path/to/myexperiment/training.tfrecord' -``` - -Running - -```bash -bazel run train.py --config_paths='default.yml,myexperiment.yml' -``` - -results in a final merged config called final_training_config.yml - -```yaml -learning: - optimizer: 'adam' - learning_rate: 1.0 -data: - training: '/path/to/myexperiment/training.tfrecord' -``` - -which is created automatically and stored in the experiment log directory -alongside model checkpoints and tensorboard summaries. This gives us a record of -the exact configs that went into each trial. - -## Monitoring training - -We usually look at two validation metrics during training: knn classification -error and multi-view alignment. - -### KNN-Classification Error - -In cases where we have labeled validation data, we can compute the average -cross-sequence KNN classification error (1.0 - recall@k=1) over all embedded -labeled images in the validation set. See `labeled_eval.py`. - -### Multi-view Alignment - -In cases where there is no labeled validation data, we can look at the how well -our model aligns multiple views of same embedded validation sequences. That is, -for each embedded validation sequence, for all cross-view pairs, we compute the -scaled absolute distance between ground truth time indices and knn time indices. -See `alignment.py`. - -## Visualization - -We visualize the embedding space learned by our models in two ways: nearest -neighbor imitation videos and PCA/T-SNE. - -### Nearest Neighbor Imitation Videos - -One of the easiest way to evaluate the understanding of your model is to see how -well the model can semantically align two videos via nearest neighbors in -embedding space. - -Consider the case where we have multiple validation demo videos of a human or -robot performing the same task. For example, in the pouring experiments, we -collected many different multiview validation videos of a person pouring the -contents of one container into another, then setting the container down. If we'd -like to see how well our embeddings generalize across viewpoint, object/agent -appearance, and background, we can construct what we call "Nearest Neighbor -Imitation" videos, by embedding some validation query sequence `i` from view 1, -and finding the nearest neighbor for each query frame in some embedded target -sequence `j` filmed from view 1. -[Here's](https://sermanet.github.io/tcn/docs/figs/pouring_human.mov.gif) an -example of the final product. - -See `generate_videos.py` for details. - -### PCA & T-SNE Visualization - -We can also embed a set of images taken randomly from validation videos and -visualize the embedding space using PCA projection and T-SNE in the tensorboard -projector. See `visualize_embeddings.py` for details. - -## Tutorial Part I: Collecting Multi-View Webcam Videos - -Here we give an end-to-end example of how to collect your own multiview webcam -videos and convert them to the TFRecord format expected by training. - -Note: This was tested with up to 8 concurrent [Logitech c930e -webcams](https://www.logitech.com/en-us/product/c930e-webcam) extended with -[Plugable 5 Meter (16 Foot) USB 2.0 Active Repeater Extension -Cables](https://www.amazon.com/gp/product/B006LFL4X0/ref=oh_aui_detailpage_o05_s00?ie=UTF8&psc=1). - -### Collect webcam videos - -Go to dataset/webcam.py - -1. Plug your webcams in and run - - ```bash - ls -ltrh /dev/video* - ``` - - You should see one device listed per connected webcam. - -2. Define some environment variables describing the dataset you're collecting. - - ```bash - dataset=tutorial # Name of the dataset. - mode=train # E.g. 'train', 'validation', 'test', 'demo'. - num_views=2 # Number of webcams. - viddir=/tmp/tcn/videos # Output directory for the videos. - tmp_imagedir=/tmp/tcn/tmp_images # Temp directory to hold images. - debug_vids=1 # Whether or not to generate side-by-side debug videos. - export DISPLAY=:0.0 # This allows real time matplotlib display. - ``` - -3. Run the webcam.py script. - - ```bash - bazel build -c opt --copt=-mavx webcam && \ - bazel-bin/webcam \ - --dataset $dataset \ - --mode $mode \ - --num_views $num_views \ - --tmp_imagedir $tmp_imagedir \ - --viddir $viddir \ - --debug_vids 1 - ``` - -4. Hit Ctrl-C when done collecting, upon which the script will compile videos - for each view and optionally a debug video concatenating multiple - simultaneous views. - -5. If `--seqname` flag isn't set, the script will name the first sequence '0', - the second sequence '1', and so on (meaning you can just keep rerunning step - 3.). When you are finished, you should see an output viddir with the - following structure: - - ```bash - videos/0_view0.mov - videos/0_view1.mov - ... - videos/0_viewM.mov - videos/1_viewM.mov - ... - videos/N_viewM.mov - for N sequences and M webcam views. - ``` - -### Create TFRecords - -Use `dataset/videos_to_tfrecords.py` to convert the directory of videos into a -directory of TFRecords files, one per multi-view sequence. - -```bash -viddir=/tmp/tcn/videos -dataset=tutorial -mode=train -videos=$viddir/$dataset - -bazel build -c opt videos_to_tfrecords && \ -bazel-bin/videos_to_tfrecords --logtostderr \ ---input_dir $videos/$mode \ ---output_dir ~/tcn_data/$dataset/$mode \ ---max_per_shard 400 -``` - -Setting `--max_per_shard` > 0 allows you to shard training data. We've observed -that sharding long training sequences provides better performance in terms of -global steps/sec. - -This should be left at the default of 0 for validation / test data. - -You should now have a directory of TFRecords files with the following structure: - -```bash -output_dir/0.tfrecord -... -output_dir/N.tfrecord - -1 TFRecord file for each of N multi-view sequences. -``` - -Now we're ready to move on to part II: training, evaluation, and visualization. - -## Tutorial Part II: Training, Evaluation, and Visualization - -Here we give an end-to-end example of how to train, evaluate, and visualize the -embedding space learned by TCN models. - -### Download Data - -We will be using the 'Multiview Pouring' dataset, which can be downloaded using -the download.sh script -[here.](https://sites.google.com/site/brainrobotdata/home/multiview-pouring) - -The rest of the tutorial will assume that you have your data downloaded to a -folder at `~/tcn_data`. - -```bash -mkdir ~/tcn_data -mv ~/Downloads/download.sh ~/tcn_data -./download.sh -``` - -You should now have the following path containing all the data: - -```bash -ls ~/tcn_data/multiview-pouring -labels README.txt tfrecords videos -``` - -### Download Pretrained Inception Checkpoint - -If you haven't already, run the script that downloads the pretrained InceptionV3 -checkpoint: - -```bash -python download_pretrained.py -``` - -### Define A Config - -For our experiment, we create 2 configs: - -* `configs/tcn_default.yml`: This contains all the default hyperparameters - that generally don't vary across experiments. -* `configs/pouring.yml`: This contains all the hyperparameters that are - specific to the pouring experiment. - -Important note about `configs/pouring.yml`: - -* data.eval_cropping: We use 'pad200' for the pouring dataset, which was - filmed rather close up on iphone cameras. A better choice for data filmed on - webcam is likely 'crop_center'. See preprocessing.py for options. - -### Train - -Run the training binary: - -```yaml -logdir=/tmp/tcn/pouring -c=configs -configs=$c/tcn_default.yml,$c/pouring.yml - -bazel build -c opt --copt=-mavx --config=cuda train && \ -bazel-bin/train \ ---config_paths $configs --logdir $logdir -``` - -### Evaluate - -Run the binary that computes running validation loss. Set `export -CUDA_VISIBLE_DEVICES=` to run on CPU. - -```bash -bazel build -c opt --copt=-mavx eval && \ -bazel-bin/eval \ ---config_paths $configs --logdir $logdir -``` - -Run the binary that computes running validation cross-view sequence alignment. -Set `export CUDA_VISIBLE_DEVICES=` to run on CPU. - -```bash -bazel build -c opt --copt=-mavx alignment && \ -bazel-bin/alignment \ ---config_paths $configs --checkpointdir $logdir --outdir $logdir -``` - -Run the binary that computes running labeled KNN validation error. Set `export -CUDA_VISIBLE_DEVICES=` to run on CPU. - -```bash -bazel build -c opt --copt=-mavx labeled_eval && \ -bazel-bin/labeled_eval \ ---config_paths $configs --checkpointdir $logdir --outdir $logdir -``` - -### Monitor training - -Run `tensorboard --logdir=$logdir`. After a bit of training, you should see -curves that look like this: - -#### Training loss - - - -#### Validation loss - - - -#### Validation Alignment - - - -#### Average Validation KNN Classification Error - - - -#### Individual Validation KNN Classification Errors - - - -### Visualize - -To visualize the embedding space learned by a model, we can: - -#### Generate Imitation Videos - -```bash -# Use the automatically generated final config file as config. -configs=$logdir/final_training_config.yml -# Visualize checkpoint 40001. -checkpoint_iter=40001 -# Use validation records for visualization. -records=~/tcn_data/multiview-pouring/tfrecords/val -# Write videos to this location. -outdir=$logdir/tcn_viz/imitation_vids -``` - -```bash -bazel build -c opt --config=cuda --copt=-mavx generate_videos && \ -bazel-bin/generate_videos \ ---config_paths $configs \ ---checkpointdir $logdir \ ---checkpoint_iter $checkpoint_iter \ ---query_records_dir $records \ ---target_records_dir $records \ ---outdir $outdir -``` - -After the script completes, you should see a directory of videos with names -like: - -`$outdir/qtrain_clearodwalla_to_clear1_realv1_imtrain_clearsoda_to_white13_realv0.mp4` - -that look like this: - -#### T-SNE / PCA Visualization - -Run the binary that generates embeddings and metadata. - -```bash -outdir=$logdir/tcn_viz/embedding_viz -bazel build -c opt --config=cuda --copt=-mavx visualize_embeddings && \ -bazel-bin/visualize_embeddings \ ---config_paths $configs \ ---checkpointdir $logdir \ ---checkpoint_iter $checkpoint_iter \ ---embedding_records $records \ ---outdir $outdir \ ---num_embed 1000 \ ---sprite_dim 64 -``` - -Run tensorboard, pointed at the embedding viz output directory. - -``` -tensorboard --logdir=$outdir -``` - -You should see something like this in tensorboard. - diff --git a/research/tcn/WORKSPACE b/research/tcn/WORKSPACE deleted file mode 100644 index 87d592329..000000000 --- a/research/tcn/WORKSPACE +++ /dev/null @@ -1,2 +0,0 @@ -workspace(name = "tcn") - diff --git a/research/tcn/alignment.py b/research/tcn/alignment.py deleted file mode 100644 index e6ee04c8e..000000000 --- a/research/tcn/alignment.py +++ /dev/null @@ -1,133 +0,0 @@ -# Copyright 2017 The TensorFlow Authors All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -"""Calculates test sequence alignment score.""" -from __future__ import absolute_import -from __future__ import absolute_import -from __future__ import division - -import os -import numpy as np -from estimators.get_estimator import get_estimator -from utils import util -import tensorflow as tf -tf.logging.set_verbosity(tf.logging.INFO) - -tf.flags.DEFINE_string( - 'config_paths', '', - """ - Path to a YAML configuration files defining FLAG values. Multiple files - can be separated by the `#` symbol. Files are merged recursively. Setting - a key in these files is equivalent to setting the FLAG value with - the same name. - """) -tf.flags.DEFINE_string( - 'model_params', '{}', 'YAML configuration string for the model parameters.') -tf.app.flags.DEFINE_string( - 'checkpoint_iter', '', 'Evaluate this specific checkpoint.') -tf.app.flags.DEFINE_string( - 'checkpointdir', '/tmp/tcn', 'Path to model checkpoints.') -tf.app.flags.DEFINE_string('outdir', '/tmp/tcn', 'Path to write summaries to.') -FLAGS = tf.app.flags.FLAGS - - -def compute_average_alignment( - seqname_to_embeddings, num_views, summary_writer, training_step): - """Computes the average cross-view alignment for all sequence view pairs. - - Args: - seqname_to_embeddings: Dict, mapping sequence name to a - [num_views, embedding size] numpy matrix holding all embedded views. - num_views: Int, number of simultaneous views in the dataset. - summary_writer: A `SummaryWriter` object. - training_step: Int, the training step of the model used to embed images. - - Alignment is the scaled absolute difference between the ground truth time - and the knn aligned time. - abs(|time_i - knn_time|) / sequence_length - """ - all_alignments = [] - for _, view_embeddings in seqname_to_embeddings.iteritems(): - for idx_i in range(num_views): - for idx_j in range(idx_i+1, num_views): - embeddings_view_i = view_embeddings[idx_i] - embeddings_view_j = view_embeddings[idx_j] - - seq_len = len(embeddings_view_i) - - times_i = np.array(range(seq_len)) - # Get the nearest time_index for each embedding in view_i. - times_j = np.array([util.KNNIdsWithDistances( - q, embeddings_view_j, k=1)[0][0] for q in embeddings_view_i]) - - # Compute sequence view pair alignment. - alignment = np.mean( - np.abs(np.array(times_i)-np.array(times_j))/float(seq_len)) - all_alignments.append(alignment) - print('alignment so far %f' % alignment) - average_alignment = np.mean(all_alignments) - print('Average alignment %f' % average_alignment) - summ = tf.Summary(value=[tf.Summary.Value( - tag='validation/alignment', simple_value=average_alignment)]) - summary_writer.add_summary(summ, int(training_step)) - - -def evaluate_once( - config, checkpointdir, validation_records, checkpoint_path, batch_size, - num_views): - """Evaluates and reports the validation alignment.""" - # Choose an estimator based on training strategy. - estimator = get_estimator(config, checkpointdir) - - # Embed all validation sequences. - seqname_to_embeddings = {} - for (view_embeddings, _, seqname) in estimator.inference( - validation_records, checkpoint_path, batch_size): - seqname_to_embeddings[seqname] = view_embeddings - - # Compute and report alignment statistics. - ckpt_step = int(checkpoint_path.split('-')[-1]) - summary_dir = os.path.join(FLAGS.outdir, 'alignment_summaries') - summary_writer = tf.summary.FileWriter(summary_dir) - compute_average_alignment( - seqname_to_embeddings, num_views, summary_writer, ckpt_step) - - -def main(_): - # Parse config dict from yaml config files / command line flags. - config = util.ParseConfigsToLuaTable(FLAGS.config_paths, FLAGS.model_params) - num_views = config.data.num_views - - validation_records = util.GetFilesRecursively(config.data.validation) - batch_size = config.data.batch_size - - checkpointdir = FLAGS.checkpointdir - - # If evaluating a specific checkpoint, do that. - if FLAGS.checkpoint_iter: - checkpoint_path = os.path.join( - '%s/model.ckpt-%s' % (checkpointdir, FLAGS.checkpoint_iter)) - evaluate_once( - config, checkpointdir, validation_records, checkpoint_path, batch_size, - num_views) - else: - for checkpoint_path in tf.contrib.training.checkpoints_iterator( - checkpointdir): - evaluate_once( - config, checkpointdir, validation_records, checkpoint_path, - batch_size, num_views) - -if __name__ == '__main__': - tf.app.run() diff --git a/research/tcn/configs/pouring.yml b/research/tcn/configs/pouring.yml deleted file mode 100644 index 4cfd96271..000000000 --- a/research/tcn/configs/pouring.yml +++ /dev/null @@ -1,58 +0,0 @@ -# Train with Multi-View TCN. -training_strategy: 'mvtcn' - -# Use the 'inception_conv_ss_fc' embedder, which has the structure: -# InceptionV3 -> 2 conv adaptation layers -> spatial softmax -> fully connected -# -> embedding. -embedder_strategy: 'inception_conv_ss_fc' - -# Use npairs loss. -loss_strategy: 'npairs' - -learning: - learning_rate: 0.0001 - -# Set some hyperparameters for our embedder. -inception_conv_ss_fc: - # Don't finetune the pre-trained weights. - finetune_inception: false - dropout: - # Don't dropout convolutional activations. - keep_conv: 1.0 - # Use a dropout of 0.8 on the fully connected activations. - keep_fc: 0.8 - # Use a dropout of 0.8 on the inception activations. - keep_pretrained: 0.8 - -# Size of the TCN embedding. -embedding_size: 32 - -data: - raw_height: 480 - raw_width: 360 - batch_size: 32 - examples_per_sequence: 32 - num_views: 2 - preprocessing: - # Inference-time image cropping strategy. - eval_cropping: 'pad200' - augmentation: - # Do scale augmentation. - minscale: 0.8 # When downscaling, zoom in to 80% of the central bounding box. - maxscale: 3.0 # When upscaling, zoom out to 300% of the central bounding box. - proportion_scaled_up: 0.5 # Proportion of the time to scale up rather than down. - color: true # Do color augmentation. - fast_mode: true - # Paths to the data. - training: '~/tcn_data/multiview-pouring/tfrecords/train' - validation: '~/tcn_data/multiview-pouring/tfrecords/val' - test: 'path/to/test' - labeled: - image_attr_keys: ['image/view0', 'image/view1', 'task'] - label_attr_keys: ['contact', 'distance', 'liquid_flowing', 'has_liquid', 'container_angle'] - validation: '~/tcn_data/multiview-pouring/monolithic-labeled/val' - test: '~/tcn_data/multiview-pouring/monolithic-labeled/test' - -logging: - checkpoint: - save_checkpoints_steps: 1000 \ No newline at end of file diff --git a/research/tcn/configs/tcn_default.yml b/research/tcn/configs/tcn_default.yml deleted file mode 100644 index 992f36d77..000000000 --- a/research/tcn/configs/tcn_default.yml +++ /dev/null @@ -1,115 +0,0 @@ -# These configs are the defaults we used for both the pouring and pose -# experiments. - -# Train on TPU? -use_tpu: false # Default is to run without TPU locally. -tpu: - num_shards: 1 - iterations: 100 - -# SGD / general learning hyperparameters. -learning: - max_step: 1000000 - learning_rate: 0.001 - decay_steps: 10000 - decay_factor: 1.00 - l2_reg_weight: 0.000001 - optimizer: 'adam' - -# Default metric learning loss hyperparameters. -triplet_semihard: - embedding_l2: true # Suggestion from Hyun Oh Song's slides. - margin: .2 # Default value for Facenet. -npairs: - embedding_l2: false # Suggestion from Hyun Oh Song's slides. -clustering_loss: - embedding_l2: true # Suggestion from Hyun Oh Song's slides. - margin: 1.0 # Default in deep_metric_learning. -lifted_struct: - embedding_l2: false # Suggestion from Hyun Oh Song's slides. - margin: 1.0 -contrastive: - embedding_l2: true # Suggestion from Hyun Oh Song's slides. - margin: 1.0 - -# Which method to use to train the embedding. -# Options are "mvtcn", "svtcn". -training_strategy: 'mvtcn' - -# Which embedder architecture to use. -# Options are 'inception_conv_ss_fc' (used in pouring / pose experiments), -# 'resnet'. -embedder_strategy: 'inception_conv_ss_fc' - -# Size of the TCN embedding. -embedding_size: 32 - -# Default hyperparameters for the different embedder architectures. -inception_conv_ss_fc: - pretrained_checkpoint: 'pretrained_checkpoints/inception/inception_v3.ckpt' - pretrained_layer: 'Mixed_5d' - additional_conv_sizes: [512, 512] - fc_hidden_sizes: [2048] - finetune: false - dropout: - keep_pretrained: 1.0 - keep_conv: 1.0 - keep_fc: 1.0 - -resnet: - pretrained_checkpoint: 'pretrained_checkpoints/resnet/resnet_v2_50.ckpt' - pretrained_layer: 4 - finetune: false - adaptation_blocks: '512_3-512_3' - emb_connection: 'conv' - fc_hidden_sizes: 'None' - dropout: - keep_pretrained: 1.0 - -# Loss hyperparameters. -mvtcn: - # Size of the window in timesteps to get random anchor-positive pairs for - # training. - window: 580 # 29fps * 20 seconds. - -svtcn: - pos_radius: 6 # 0.2 seconds * 29fps ~ 6 timesteps. - neg_radius: 12 # 2.0 * pos_radius. - -# Data configs. -data: - height: 299 - width: 299 - preprocessing: - # Strategy to use when cropping images at inference time. - # See preprocessing.py for options. - eval_cropping: 'crop_center' - # Training scale, color augmentation hyparameters. - augmentation: - # See preprocessing.py for a discussion of how to use these parameters. - minscale: 1.0 - maxscale: 1.0 - proportion_scaled_up: 0.5 - color: true - fast_mode: true - num_parallel_calls: 12 - sequence_prefetch_size: 12 - batch_prefetch_size: 12 - batch_size: 36 - eval_batch_size: 36 - embed_batch_size: 128 - -val: - recall_at_k_list: [1] - num_eval_samples: 1000 - eval_interval_secs: 300 - -logging: - summary: - image_summaries: false - save_summaries_steps: 100 - flush_secs: 600 - checkpoint: - num_to_keep: 0 # Keep all checkpoints. - save_checkpoints_steps: 1000 - secs: 1800 \ No newline at end of file diff --git a/research/tcn/configs/test_estimator.yml b/research/tcn/configs/test_estimator.yml deleted file mode 100644 index 4e4519477..000000000 --- a/research/tcn/configs/test_estimator.yml +++ /dev/null @@ -1,29 +0,0 @@ -use_tpu: False -training_strategy: 'mvtcn' -loss_strategy: 'triplet_semihard' - -learning: - max_step: 2 - optimizer: 'adam' - -embedding_size: 8 - -data: - embed_batch_size: 12 - batch_size: 12 - examples_per_sequence: 12 - num_views: 2 - num_parallel_calls: 1 - sequence_prefetch_size: 1 - batch_prefetch_size: 1 - -logging: - summary: - image_summaries: false - save_summaries_steps: 100 - flush_secs: 600 - save_summaries_secs: 60 - checkpoint: - num_to_keep: 0 # Keep all checkpoints. - save_checkpoints_steps: 1000 - secs: 1800 \ No newline at end of file diff --git a/research/tcn/data_providers.py b/research/tcn/data_providers.py deleted file mode 100644 index aa2a5f4eb..000000000 --- a/research/tcn/data_providers.py +++ /dev/null @@ -1,505 +0,0 @@ -# Copyright 2017 The TensorFlow Authors All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -"""Defines data providers used in training and evaluating TCNs.""" -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import functools -import random -import numpy as np -import preprocessing -import tensorflow as tf - - -def record_dataset(filename): - """Generate a TFRecordDataset from a `filename`.""" - return tf.data.TFRecordDataset(filename) - - -def full_sequence_provider(file_list, num_views): - """Provides full preprocessed image sequences. - - Args: - file_list: List of strings, paths to TFRecords to preprocess. - num_views: Int, the number of simultaneous viewpoints at each timestep in - the dataset. - Returns: - preprocessed: A 4-D float32 `Tensor` holding a sequence of preprocessed - images. - raw_image_strings: A 2-D string `Tensor` holding a sequence of raw - jpeg-encoded image strings. - task: String, the name of the sequence. - seq_len: Int, the number of timesteps in the sequence. - """ - def _parse_sequence(x): - context, views, seq_len = parse_sequence_example(x, num_views) - task = context['task'] - return views, task, seq_len - - data_files = tf.contrib.slim.parallel_reader.get_data_files(file_list) - dataset = tf.data.Dataset.from_tensor_slices(data_files) - dataset = dataset.repeat(1) - # Get a dataset of sequences. - dataset = dataset.flat_map(record_dataset) - - # Build a dataset of TFRecord files. - dataset = dataset.repeat(1) - # Prefetch a number of opened files. - dataset = dataset.prefetch(12) - # Use _parse_sequence to deserialize (but not decode) image strings. - dataset = dataset.map(_parse_sequence, num_parallel_calls=12) - # Prefetch batches of images. - dataset = dataset.prefetch(12) - dataset = dataset.make_one_shot_iterator() - views, task, seq_len = dataset.get_next() - return views, task, seq_len - - -def parse_labeled_example( - example_proto, view_index, preprocess_fn, image_attr_keys, label_attr_keys): - """Parses a labeled test example from a specified view. - - Args: - example_proto: A scalar string Tensor. - view_index: Int, index on which view to parse. - preprocess_fn: A function with the signature (raw_images, is_training) -> - preprocessed_images, where raw_images is a 4-D float32 image `Tensor` - of raw images, is_training is a Boolean describing if we're in training, - and preprocessed_images is a 4-D float32 image `Tensor` holding - preprocessed images. - image_attr_keys: List of Strings, names for image keys. - label_attr_keys: List of Strings, names for label attributes. - Returns: - data: A tuple of images, attributes and tasks `Tensors`. - """ - features = {} - for attr_key in image_attr_keys: - features[attr_key] = tf.FixedLenFeature((), tf.string) - for attr_key in label_attr_keys: - features[attr_key] = tf.FixedLenFeature((), tf.int64) - parsed_features = tf.parse_single_example(example_proto, features) - image_only_keys = [i for i in image_attr_keys if 'image' in i] - view_image_key = image_only_keys[view_index] - image = preprocessing.decode_image(parsed_features[view_image_key]) - preprocessed = preprocess_fn(image, is_training=False) - attributes = [parsed_features[k] for k in label_attr_keys] - task = parsed_features['task'] - return tuple([preprocessed] + attributes + [task]) - - -def labeled_data_provider( - filenames, preprocess_fn, view_index, image_attr_keys, label_attr_keys, - batch_size=32, num_epochs=1): - """Gets a batched dataset iterator over annotated test images + labels. - - Provides a single view, specifed in `view_index`. - - Args: - filenames: List of Strings, paths to tfrecords on disk. - preprocess_fn: A function with the signature (raw_images, is_training) -> - preprocessed_images, where raw_images is a 4-D float32 image `Tensor` - of raw images, is_training is a Boolean describing if we're in training, - and preprocessed_images is a 4-D float32 image `Tensor` holding - preprocessed images. - view_index: Int, the index of the view to embed. - image_attr_keys: List of Strings, names for image keys. - label_attr_keys: List of Strings, names for label attributes. - batch_size: Int, size of the batch. - num_epochs: Int, number of epochs over the classification dataset. - Returns: - batch_images: 4-d float `Tensor` holding the batch images for the view. - labels: K-d int `Tensor` holding the K label attributes. - tasks: 1-D String `Tensor`, holding the task names for each batch element. - """ - dataset = tf.data.TFRecordDataset(filenames) - # pylint: disable=g-long-lambda - dataset = dataset.map( - lambda p: parse_labeled_example( - p, view_index, preprocess_fn, image_attr_keys, label_attr_keys)) - dataset = dataset.repeat(num_epochs) - dataset = dataset.batch(batch_size) - data_iterator = dataset.make_one_shot_iterator() - batch_data = data_iterator.get_next() - batch_images = batch_data[0] - - batch_labels = tf.stack(batch_data[1:-1], 1) - - batch_tasks = batch_data[-1] - - batch_images = set_image_tensor_batch_dim(batch_images, batch_size) - batch_labels.set_shape([batch_size, len(label_attr_keys)]) - batch_tasks.set_shape([batch_size]) - - return batch_images, batch_labels, batch_tasks - - -def parse_sequence_example(serialized_example, num_views): - """Parses a serialized sequence example into views, sequence length data.""" - context_features = { - 'task': tf.FixedLenFeature(shape=[], dtype=tf.string), - 'len': tf.FixedLenFeature(shape=[], dtype=tf.int64) - } - view_names = ['view%d' % i for i in range(num_views)] - fixed_features = [ - tf.FixedLenSequenceFeature( - shape=[], dtype=tf.string) for _ in range(len(view_names))] - sequence_features = dict(zip(view_names, fixed_features)) - context_parse, sequence_parse = tf.parse_single_sequence_example( - serialized=serialized_example, - context_features=context_features, - sequence_features=sequence_features) - views = tf.stack([sequence_parse[v] for v in view_names]) - lens = [sequence_parse[v].get_shape().as_list()[0] for v in view_names] - assert len(set(lens)) == 1 - seq_len = tf.shape(sequence_parse[view_names[-1]])[0] - return context_parse, views, seq_len - - -def get_shuffled_input_records(file_list): - """Build a tf.data.Dataset of shuffled input TFRecords that repeats.""" - dataset = tf.data.Dataset.from_tensor_slices(file_list) - dataset = dataset.shuffle(len(file_list)) - dataset = dataset.repeat() - dataset = dataset.flat_map(record_dataset) - dataset = dataset.repeat() - return dataset - - -def get_tcn_anchor_pos_indices(seq_len, num_views, num_pairs, window): - """Gets batch TCN anchor positive timestep and view indices. - - This gets random (anchor, positive) timesteps from a sequence, and chooses - 2 random differing viewpoints for each anchor positive pair. - - Args: - seq_len: Int, the size of the batch sequence in timesteps. - num_views: Int, the number of simultaneous viewpoints at each timestep. - num_pairs: Int, the number of pairs to build. - window: Int, the window (in frames) from which to take anchor, positive - and negative indices. - Returns: - ap_time_indices: 1-D Int `Tensor` with size [num_pairs], holding the - timestep for each (anchor,pos) pair. - a_view_indices: 1-D Int `Tensor` with size [num_pairs], holding the - view index for each anchor. - p_view_indices: 1-D Int `Tensor` with size [num_pairs], holding the - view index for each positive. - """ - # Get anchor, positive time indices. - def f1(): - # Choose a random window-length range from the sequence. - range_min = tf.random_shuffle(tf.range(seq_len-window))[0] - range_max = range_min+window - return tf.range(range_min, range_max) - def f2(): - # Consider the full sequence. - return tf.range(seq_len) - time_indices = tf.cond(tf.greater(seq_len, window), f1, f2) - shuffled_indices = tf.random_shuffle(time_indices) - num_pairs = tf.minimum(seq_len, num_pairs) - ap_time_indices = shuffled_indices[:num_pairs] - - # Get opposing anchor, positive view indices. - view_indices = tf.tile( - tf.expand_dims(tf.range(num_views), 0), (num_pairs, 1)) - shuffled_view_indices = tf.map_fn(tf.random_shuffle, view_indices) - a_view_indices = shuffled_view_indices[:, 0] - p_view_indices = shuffled_view_indices[:, 1] - return ap_time_indices, a_view_indices, p_view_indices - - -def set_image_tensor_batch_dim(tensor, batch_dim): - """Sets the batch dimension on an image tensor.""" - shape = tensor.get_shape() - tensor.set_shape([batch_dim, shape[1], shape[2], shape[3]]) - return tensor - - -def parse_sequence_to_pairs_batch( - serialized_example, preprocess_fn, is_training, num_views, batch_size, - window): - """Parses a serialized sequence example into a batch of preprocessed data. - - Args: - serialized_example: A serialized SequenceExample. - preprocess_fn: A function with the signature (raw_images, is_training) -> - preprocessed_images. - is_training: Boolean, whether or not we're in training. - num_views: Int, the number of simultaneous viewpoints at each timestep in - the dataset. - batch_size: Int, size of the batch to get. - window: Int, only take pairs from a maximium window of this size. - Returns: - preprocessed: A 4-D float32 `Tensor` holding preprocessed images. - anchor_images: A 4-D float32 `Tensor` holding raw anchor images. - pos_images: A 4-D float32 `Tensor` holding raw positive images. - """ - _, views, seq_len = parse_sequence_example(serialized_example, num_views) - - # Get random (anchor, positive) timestep and viewpoint indices. - num_pairs = batch_size // 2 - ap_time_indices, a_view_indices, p_view_indices = get_tcn_anchor_pos_indices( - seq_len, num_views, num_pairs, window) - - # Gather the image strings. - combined_anchor_indices = tf.concat( - [tf.expand_dims(a_view_indices, 1), - tf.expand_dims(ap_time_indices, 1)], 1) - combined_pos_indices = tf.concat( - [tf.expand_dims(p_view_indices, 1), - tf.expand_dims(ap_time_indices, 1)], 1) - anchor_images = tf.gather_nd(views, combined_anchor_indices) - pos_images = tf.gather_nd(views, combined_pos_indices) - - # Decode images. - anchor_images = tf.map_fn( - preprocessing.decode_image, anchor_images, dtype=tf.float32) - pos_images = tf.map_fn( - preprocessing.decode_image, pos_images, dtype=tf.float32) - - # Concatenate [anchor, postitive] images into a batch and preprocess it. - concatenated = tf.concat([anchor_images, pos_images], 0) - preprocessed = preprocess_fn(concatenated, is_training) - anchor_prepro, positive_prepro = tf.split(preprocessed, num_or_size_splits=2, - axis=0) - - # Set static batch dimensions for all image tensors - ims = [anchor_prepro, positive_prepro, anchor_images, pos_images] - ims = [set_image_tensor_batch_dim(i, num_pairs) for i in ims] - [anchor_prepro, positive_prepro, anchor_images, pos_images] = ims - - # Assign each anchor and positive the same label. - anchor_labels = tf.range(1, num_pairs+1) - positive_labels = tf.range(1, num_pairs+1) - - return (anchor_prepro, positive_prepro, anchor_images, pos_images, - anchor_labels, positive_labels, seq_len) - - -def multiview_pairs_provider(file_list, - preprocess_fn, - num_views, - window, - is_training, - batch_size, - examples_per_seq=2, - num_parallel_calls=12, - sequence_prefetch_size=12, - batch_prefetch_size=12): - """Provides multi-view TCN anchor-positive image pairs. - - Returns batches of Multi-view TCN pairs, where each pair consists of an - anchor and a positive coming from different views from the same timestep. - Batches are filled one entire sequence at a time until - batch_size is exhausted. Pairs are chosen randomly without replacement - within a sequence. - - Used by: - * triplet semihard loss. - * clustering loss. - * npairs loss. - * lifted struct loss. - * contrastive loss. - - Args: - file_list: List of Strings, paths to tfrecords. - preprocess_fn: A function with the signature (raw_images, is_training) -> - preprocessed_images, where raw_images is a 4-D float32 image `Tensor` - of raw images, is_training is a Boolean describing if we're in training, - and preprocessed_images is a 4-D float32 image `Tensor` holding - preprocessed images. - num_views: Int, the number of simultaneous viewpoints at each timestep. - window: Int, size of the window (in frames) from which to draw batch ids. - is_training: Boolean, whether or not we're in training. - batch_size: Int, how many examples in the batch (num pairs * 2). - examples_per_seq: Int, how many examples to take per sequence. - num_parallel_calls: Int, the number of elements to process in parallel by - mapper. - sequence_prefetch_size: Int, size of the buffer used to prefetch sequences. - batch_prefetch_size: Int, size of the buffer used to prefetch batches. - Returns: - batch_images: A 4-D float32 `Tensor` holding preprocessed batch images. - anchor_labels: A 1-D int32 `Tensor` holding anchor image labels. - anchor_images: A 4-D float32 `Tensor` holding raw anchor images. - positive_labels: A 1-D int32 `Tensor` holding positive image labels. - pos_images: A 4-D float32 `Tensor` holding raw positive images. - """ - def _parse_sequence(x): - return parse_sequence_to_pairs_batch( - x, preprocess_fn, is_training, num_views, examples_per_seq, window) - - # Build a buffer of shuffled input TFRecords that repeats forever. - dataset = get_shuffled_input_records(file_list) - - # Prefetch a number of opened TFRecords. - dataset = dataset.prefetch(sequence_prefetch_size) - - # Use _parse_sequence to map sequences to batches (one sequence per batch). - dataset = dataset.map( - _parse_sequence, num_parallel_calls=num_parallel_calls) - - # Filter out sequences that don't have at least examples_per_seq. - def seq_greater_than_min(seqlen, maximum): - return seqlen >= maximum - filter_fn = functools.partial(seq_greater_than_min, maximum=examples_per_seq) - dataset = dataset.filter(lambda a, b, c, d, e, f, seqlen: filter_fn(seqlen)) - - # Take a number of sequences for the batch. - assert batch_size % examples_per_seq == 0 - sequences_per_batch = batch_size // examples_per_seq - dataset = dataset.batch(sequences_per_batch) - - # Prefetch batches of images. - dataset = dataset.prefetch(batch_prefetch_size) - - iterator = dataset.make_one_shot_iterator() - data = iterator.get_next() - - # Pull out images, reshape to [batch_size, ...], concatenate anchor and pos. - ims = list(data[:4]) - anchor_labels, positive_labels = data[4:6] - - # Set labels shape. - anchor_labels.set_shape([sequences_per_batch, None]) - positive_labels.set_shape([sequences_per_batch, None]) - - def _reshape_to_batchsize(im): - """[num_sequences, num_per_seq, ...] images to [batch_size, ...].""" - sequence_ims = tf.split(im, num_or_size_splits=sequences_per_batch, axis=0) - sequence_ims = [tf.squeeze(i) for i in sequence_ims] - return tf.concat(sequence_ims, axis=0) - - # Reshape labels. - anchor_labels = _reshape_to_batchsize(anchor_labels) - positive_labels = _reshape_to_batchsize(positive_labels) - - def _set_shape(im): - """Sets a static shape for an image tensor of [sequences_per_batch,...] .""" - shape = im.get_shape() - im.set_shape([sequences_per_batch, shape[1], shape[2], shape[3], shape[4]]) - return im - ims = [_set_shape(im) for im in ims] - ims = [_reshape_to_batchsize(im) for im in ims] - - anchor_prepro, positive_prepro, anchor_images, pos_images = ims - batch_images = tf.concat([anchor_prepro, positive_prepro], axis=0) - - return batch_images, anchor_labels, positive_labels, anchor_images, pos_images - - -def get_svtcn_indices(seq_len, batch_size, num_views): - """Gets a random window of contiguous time indices from a sequence. - - Args: - seq_len: Int, number of timesteps in the image sequence. - batch_size: Int, size of the batch to construct. - num_views: Int, the number of simultaneous viewpoints at each - timestep in the dataset. - - Returns: - time_indices: 1-D Int `Tensor` with size [batch_size], holding the - timestep for each batch image. - view_indices: 1-D Int `Tensor` with size [batch_size], holding the - view for each batch image. This is consistent across the batch. - """ - # Get anchor, positive time indices. - def f1(): - # Choose a random contiguous range from within the sequence. - range_min = tf.random_shuffle(tf.range(seq_len-batch_size))[0] - range_max = range_min+batch_size - return tf.range(range_min, range_max) - def f2(): - # Consider the full sequence. - return tf.range(seq_len) - time_indices = tf.cond(tf.greater(seq_len, batch_size), f1, f2) - # Get opposing anchor, positive view indices. - random_view = tf.random_shuffle(tf.range(num_views))[0] - view_indices = tf.tile([random_view], (batch_size,)) - return time_indices, view_indices - - -def parse_sequence_to_svtcn_batch( - serialized_example, preprocess_fn, is_training, num_views, batch_size): - """Parses a serialized sequence example into a batch of SVTCN data.""" - _, views, seq_len = parse_sequence_example(serialized_example, num_views) - # Get svtcn indices. - time_indices, view_indices = get_svtcn_indices(seq_len, batch_size, num_views) - combined_indices = tf.concat( - [tf.expand_dims(view_indices, 1), - tf.expand_dims(time_indices, 1)], 1) - - # Gather the image strings. - images = tf.gather_nd(views, combined_indices) - - # Decode images. - images = tf.map_fn(preprocessing.decode_image, images, dtype=tf.float32) - - # Concatenate anchor and postitive images, preprocess the batch. - preprocessed = preprocess_fn(images, is_training) - - return preprocessed, images, time_indices - - -def singleview_tcn_provider(file_list, - preprocess_fn, - num_views, - is_training, - batch_size, - num_parallel_calls=12, - sequence_prefetch_size=12, - batch_prefetch_size=12): - """Provides data to train singleview TCNs. - - Args: - file_list: List of Strings, paths to tfrecords. - preprocess_fn: A function with the signature (raw_images, is_training) -> - preprocessed_images, where raw_images is a 4-D float32 image `Tensor` - of raw images, is_training is a Boolean describing if we're in training, - and preprocessed_images is a 4-D float32 image `Tensor` holding - preprocessed images. - num_views: Int, the number of simultaneous viewpoints at each timestep. - is_training: Boolean, whether or not we're in training. - batch_size: Int, how many examples in the batch. - num_parallel_calls: Int, the number of elements to process in parallel by - mapper. - sequence_prefetch_size: Int, size of the buffer used to prefetch sequences. - batch_prefetch_size: Int, size of the buffer used to prefetch batches. - - Returns: - batch_images: A 4-D float32 `Tensor` of preprocessed images. - raw_images: A 4-D float32 `Tensor` of raw images. - timesteps: A 1-D int32 `Tensor` of timesteps associated with each image. - """ - def _parse_sequence(x): - return parse_sequence_to_svtcn_batch( - x, preprocess_fn, is_training, num_views, batch_size) - - # Build a buffer of shuffled input TFRecords that repeats forever. - dataset = get_shuffled_input_records(file_list) - - # Prefetch a number of opened files. - dataset = dataset.prefetch(sequence_prefetch_size) - - # Use _parse_sequence to map sequences to image batches. - dataset = dataset.map( - _parse_sequence, num_parallel_calls=num_parallel_calls) - - # Prefetch batches of images. - dataset = dataset.prefetch(batch_prefetch_size) - dataset = dataset.make_one_shot_iterator() - batch_images, raw_images, timesteps = dataset.get_next() - return batch_images, raw_images, timesteps diff --git a/research/tcn/data_providers_test.py b/research/tcn/data_providers_test.py deleted file mode 100644 index e50123102..000000000 --- a/research/tcn/data_providers_test.py +++ /dev/null @@ -1,69 +0,0 @@ -# Copyright 2017 The TensorFlow Authors All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -"""Tests for data_providers.""" - -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import numpy as np -import data_providers -import tensorflow as tf - - -class DataTest(tf.test.TestCase): - - def testMVTripletIndices(self): - """Ensures anchor/pos indices for a TCN batch are valid.""" - tf.set_random_seed(0) - window = 580 - batch_size = 36 - num_pairs = batch_size // 2 - num_views = 2 - seq_len = 600 - # Get anchor time and view indices for this sequence. - (_, a_view_indices, - p_view_indices) = data_providers.get_tcn_anchor_pos_indices( - seq_len, num_views, num_pairs, window) - with self.test_session() as sess: - (np_a_view_indices, - np_p_view_indices) = sess.run([a_view_indices, p_view_indices]) - - # Assert no overlap between anchor and pos view indices. - np.testing.assert_equal( - np.any(np.not_equal(np_a_view_indices, np_p_view_indices)), True) - - # Assert set of view indices is a subset of expected set of view indices. - view_set = set(range(num_views)) - self.assertTrue(set(np_a_view_indices).issubset(view_set)) - self.assertTrue(set(np_p_view_indices).issubset(view_set)) - - def testSVTripletIndices(self): - """Ensures time indices for a SV triplet batch are valid.""" - seq_len = 600 - batch_size = 36 - num_views = 2 - time_indices, _ = data_providers.get_svtcn_indices( - seq_len, batch_size, num_views) - with self.test_session() as sess: - np_time_indices = sess.run(time_indices) - first = np_time_indices[0] - last = np_time_indices[-1] - # Make sure batch time indices are a contiguous range. - self.assertTrue(np.array_equal(np_time_indices, range(first, last+1))) - -if __name__ == "__main__": - tf.test.main() diff --git a/research/tcn/dataset/images_to_videos.py b/research/tcn/dataset/images_to_videos.py deleted file mode 100644 index ad1a7387e..000000000 --- a/research/tcn/dataset/images_to_videos.py +++ /dev/null @@ -1,86 +0,0 @@ -# Copyright 2017 The TensorFlow Authors All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -"""Converts temp directories of images to videos.""" -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import argparse -import os -import shutil -# pylint: disable=invalid-name - -parser = argparse.ArgumentParser() -parser.add_argument( - '--view_dirs', type=str, default='', - help='Comma-separated list of temp view image directories.') -parser.add_argument( - '--vid_paths', type=str, default='', - help='Comma-separated list of video output paths.') -parser.add_argument( - '--debug_path', type=str, default='', - help='Output path to debug video.') - -parser.add_argument( - '--debug_lhs_view', type=str, default='', - help='Output path to debug video.') -parser.add_argument( - '--debug_rhs_view', type=str, default='', - help='Output path to debug video.') - - -def create_vids(view_dirs, vid_paths, debug_path=None, - debug_lhs_view=0, debug_rhs_view=1): - """Creates one video per view per sequence.""" - - # Create the view videos. - for (view_dir, vidpath) in zip(view_dirs, vid_paths): - encode_vid_cmd = r'mencoder mf://%s/*.png \ - -mf fps=29:type=png \ - -ovc lavc -lavcopts vcodec=mpeg4:mbd=2:trell \ - -oac copy -o %s' % (view_dir, vidpath) - os.system(encode_vid_cmd) - - # Optionally create a debug side-by-side video. - if debug_path: - lhs = vid_paths[int(debug_lhs_view)] - rhs = vid_paths[int(debug_rhs_view)] - os.system(r"avconv \ - -i %s \ - -i %s \ - -filter_complex '[0:v]pad=iw*2:ih[int];[int][1:v]overlay=W/2:0[vid]' \ - -map [vid] \ - -c:v libx264 \ - -crf 23 \ - -preset veryfast \ - %s" % (lhs, rhs, debug_path)) - - -def main(): - FLAGS, _ = parser.parse_known_args() - assert FLAGS.view_dirs - assert FLAGS.vid_paths - view_dirs = FLAGS.view_dirs.split(',') - vid_paths = FLAGS.vid_paths.split(',') - create_vids(view_dirs, vid_paths, FLAGS.debug_path, - FLAGS.debug_lhs_view, FLAGS.debug_rhs_view) - - # Cleanup temp image dirs. - for i in view_dirs: - shutil.rmtree(i) - -if __name__ == '__main__': - main() diff --git a/research/tcn/dataset/videos_to_tfrecords.py b/research/tcn/dataset/videos_to_tfrecords.py deleted file mode 100644 index a17411f36..000000000 --- a/research/tcn/dataset/videos_to_tfrecords.py +++ /dev/null @@ -1,458 +0,0 @@ -# Copyright 2017 The TensorFlow Authors All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -r"""Converts videos to training, validation, test, and debug tfrecords on cns. - -Example usage: - -# From phone videos. -x=learning/brain/research/tcn/videos_to_tfrecords && \ -blaze build -c opt $x && \ -set=tmp && videos=~/data/tcn/datasets/$set/ && \ -blaze-bin/$x --logtostderr --output_dir /cns/oi-d/home/$USER/tcn_data/$set \ ---input_dir $videos/train ---debug $dataset/debug --rotate 90 --max_per_shard 400 - -# From webcam videos. -mode=train -x=learning/brain/research/tcn/videos_to_tfrecords && \ -blaze build -c opt $x && \ -set=tmp && videos=/tmp/tcn/videos/$set/ && \ -blaze-bin/$x --logtostderr \ ---output_dir /cns/oi-d/home/$USER/tcn_data/$set/$mode \ ---input_dir $videos/$mode --max_per_shard 400 - -""" -import glob -import math -import multiprocessing -from multiprocessing.pool import ThreadPool -import os -from random import shuffle -import re -from StringIO import StringIO -import cv2 -from PIL import Image -from PIL import ImageFile -from preprocessing import cv2resizeminedge -from preprocessing import cv2rotateimage -from preprocessing import shapestring -from utils.progress import Progress -import tensorflow.google as tf -tf.logging.set_verbosity(tf.logging.INFO) - - -tf.app.flags.DEFINE_string('view_pattern', '_view[_]*[0]+[.].*', - 'view regexp pattern for first view') -tf.app.flags.DEFINE_string('input_dir', '', '''input data path''') -tf.app.flags.DEFINE_integer('resize_min_edge', 0, - '''resize the smallest edge to this size.''') -tf.app.flags.DEFINE_integer('rotate', 0, '''rotate the image in degrees.''') -tf.app.flags.DEFINE_string('rotate_if_matching', None, - 'rotate only if video path matches regexp.') -tf.app.flags.DEFINE_string('output_dir', '', 'output directory for the dataset') -tf.app.flags.DEFINE_integer( - 'max_per_shard', -1, 'max # of frames per data chunk') -tf.app.flags.DEFINE_integer('expected_views', 2, 'expected number of views') -tf.app.flags.DEFINE_integer('log_frequency', 50, 'frequency of logging') -tf.app.flags.DEFINE_integer( - 'max_views_discrepancy', 100, - 'Maximum length difference (in frames) allowed between views') -tf.app.flags.DEFINE_boolean('overwrite', False, 'overwrite output files') -FLAGS = tf.app.flags.FLAGS - -feature = tf.train.Feature -bytes_feature = lambda v: feature(bytes_list=tf.train.BytesList(value=v)) -int64_feature = lambda v: feature(int64_list=tf.train.Int64List(value=v)) -float_feature = lambda v: feature(float_list=tf.train.FloatList(value=v)) - - -def FindPatternFiles(path, view_pattern, errors): - """Recursively find all files matching a certain pattern.""" - if not path: - return None - tf.logging.info( - 'Recursively searching for files matching pattern \'%s\' in %s' % - (view_pattern, path)) - view_patt = re.compile('.*' + view_pattern) - sequences = [] - for root, _, filenames in os.walk(path, followlinks=True): - path_root = root[:len(path)] - assert path_root == path - - for filename in filenames: - if view_patt.match(filename): - fullpath = os.path.join(root, re.sub(view_pattern, '', filename)) - shortpath = re.sub(path, '', fullpath).lstrip('/') - - # Determine if this sequence should be sharded or not. - shard = False - if FLAGS.max_per_shard > 0: - shard = True - - # Retrieve number of frames for this sequence. - num_views, length, view_paths, num_frames = GetViewInfo( - fullpath + view_pattern[0] + '*') - if num_views != FLAGS.expected_views: - tf.logging.info('Expected %d views but found: %s' % - (FLAGS.expected_views, str(view_paths))) - assert num_views == FLAGS.expected_views - assert length > 0 - # Drop sequences if view lengths differ too much. - if max(num_frames) - min(num_frames) > FLAGS.max_views_discrepancy: - error_msg = ( - 'Error: ignoring sequence with views with length difference > %d:' - '%s in %s') % (FLAGS.max_views_discrepancy, str(num_frames), - fullpath) - errors.append(error_msg) - tf.logging.error(error_msg) - else: - # Append sequence info. - sequences.append({'full': fullpath, 'name': shortpath, 'len': length, - 'start': 0, 'end': length, 'num_views': num_views, - 'shard': shard}) - return sorted(sequences, key=lambda k: k['name']) - - -def ShardSequences(sequences, max_per_shard): - """Find all sequences, shard and randomize them.""" - total_shards_len = 0 - total_shards = 0 - assert max_per_shard > 0 - for sequence in sequences: - if sequence['shard']: - sequence['shard'] = False # Reset shard flag. - length = sequence['len'] - start = sequence['start'] - end = sequence['end'] - name = sequence['name'] - assert end - start == length - if length > max_per_shard: - # Dividing sequence into smaller shards. - num_shards = int(math.floor(length / max_per_shard)) + 1 - size = int(math.ceil(length / num_shards)) - tf.logging.info( - 'splitting sequence of length %d into %d shards of size %d' % - (length, num_shards, size)) - last_end = 0 - for i in range(num_shards): - shard_start = last_end - shard_end = min(length, shard_start + size) - if i == num_shards - 1: - shard_end = length - shard_len = shard_end - shard_start - total_shards_len += shard_len - shard_name = name + '_shard%02d' % i - last_end = shard_end - - # Enqueuing shard. - if i == 0: # Replace current sequence. - sequence['len'] = shard_len - sequence['start'] = shard_start - sequence['end'] = shard_end - sequence['name'] = shard_name - else: # Enqueue new sequence. - sequences.append( - {'full': sequence['full'], 'name': shard_name, - 'len': shard_len, 'start': shard_start, 'end': shard_end, - 'num_views': sequence['num_views'], 'shard': False}) - - total_shards += num_shards - assert last_end == length - - # Print resulting sharding. - if total_shards > 0: - tf.logging.info('%d shards of average length %d' % - (total_shards, total_shards_len / total_shards)) - return sorted(sequences, key=lambda k: k['name']) - - -def RandomizeSets(sets): - """Randomize each set.""" - for _, sequences in sorted(sets.iteritems()): - if sequences: - # Randomize order. - shuffle(sequences) - - -def GetSpecificFrame(vid_path, frame_index): - """Gets a frame at a specified index in a video.""" - cap = cv2.VideoCapture(vid_path) - cap.set(1, frame_index) - _, bgr = cap.read() - cap.release() - rgb = cv2.cvtColor(bgr, cv2.COLOR_BGR2RGB) - return rgb - - -def JpegString(image, jpeg_quality=90): - """Returns given PIL.Image instance as jpeg string. - - Args: - image: A PIL image. - jpeg_quality: The image quality, on a scale from 1 (worst) to 95 (best). - - Returns: - a jpeg_string. - """ - # This fix to PIL makes sure that we don't get an error when saving large - # jpeg files. This is a workaround for a bug in PIL. The value should be - # substantially larger than the size of the image being saved. - ImageFile.MAXBLOCK = 640 * 512 * 64 - - output_jpeg = StringIO() - image.save(output_jpeg, 'jpeg', quality=jpeg_quality, optimize=True) - return output_jpeg.getvalue() - - -def ParallelPreprocessing(args): - """Parallel preprocessing: rotation, resize and jpeg encoding to string.""" - (vid_path, timestep, num_timesteps, view) = args - try: - image = GetSpecificFrame(vid_path, timestep) - - # Resizing. - resize_str = '' - if FLAGS.resize_min_edge > 0: - resize_str += ', resize ' + shapestring(image) - image = cv2resizeminedge(image, FLAGS.resize_min_edge) - resize_str += ' => ' + shapestring(image) - - # Rotating. - rotate = None - if FLAGS.rotate: - rotate = FLAGS.rotate - if FLAGS.rotate_if_matching is not None: - rotate = None - patt = re.compile(FLAGS.rotate_if_matching) - if patt.match(vid_path) is not None: - rotate = FLAGS.rotate - if rotate is not None: - image = cv2rotateimage(image, FLAGS.rotate) - - # Jpeg encoding. - image = Image.fromarray(image) - im_string = bytes_feature([JpegString(image)]) - - if timestep % FLAGS.log_frequency == 0: - tf.logging.info('Loaded frame %d / %d for %s (rotation %s%s) from %s' % - (timestep, num_timesteps, view, str(rotate), resize_str, - vid_path)) - return im_string - except cv2.error as e: - tf.logging.error('Error while loading frame %d of %s: %s' % - (timestep, vid_path, str(e))) - return None - - -def GetNumFrames(vid_path): - """Gets the number of frames in a video.""" - cap = cv2.VideoCapture(vid_path) - total_frames = cap.get(7) - cap.release() - return int(total_frames) - - -def GetViewInfo(views_fullname): - """Return information about a group of views.""" - view_paths = sorted(glob.glob(views_fullname)) - num_frames = [GetNumFrames(i) for i in view_paths] - min_num_frames = min(num_frames) - num_views = len(view_paths) - return num_views, min_num_frames, view_paths, num_frames - - -def AddSequence(sequence, writer, progress, errors): - """Converts a sequence to a SequenceExample. - - Sequences have multiple viewpoint videos. Extract all frames from all - viewpoint videos in parallel, build a single SequenceExample containing - all viewpoint images for every timestep. - - Args: - sequence: a dict with information on a sequence. - writer: A TFRecordWriter. - progress: A Progress object to report processing progress. - errors: a list of string to append to in case of errors. - """ - fullname = sequence['full'] - shortname = sequence['name'] - start = sequence['start'] - end = sequence['end'] - num_timesteps = sequence['len'] - - # Build a list of all view paths for this fullname. - path = fullname + FLAGS.view_pattern[0] + '*' - tf.logging.info('Loading sequence from ' + path) - view_paths = sorted(glob.glob(path)) - # Extract all images for all views - num_frames = [GetNumFrames(i) for i in view_paths] - tf.logging.info('Loading %s with [%d, %d[ (%d frames) from: %s %s' % - (shortname, start, end, num_timesteps, - str(num_frames), str(view_paths))) - num_views = len(view_paths) - total_timesteps = int(min(num_frames)) - assert num_views == FLAGS.expected_views - assert num_views == sequence['num_views'] - - # Create a worker pool to parallelize loading/rotating - worker_pool = ThreadPool(multiprocessing.cpu_count()) - - # Collect all images for each view. - view_to_feature_list = {} - view_images = [] - for view_idx, view in enumerate( - ['view'+str(i) for i in range(num_views)]): - # Flatten list to process in parallel - work = [] - for i in range(start, end): - work.append((view_paths[view_idx], i, total_timesteps, view)) - # Load and rotate images in parallel - view_images.append(worker_pool.map(ParallelPreprocessing, work)) - # Report progress. - progress.Add(len(view_images[view_idx])) - tf.logging.info('%s' % str(progress)) - - # Remove error frames from all views - i = start - num_errors = 0 - while i < len(view_images[0]): - remove_frame = False - # Check if one or more views have an error for this frame. - for view_idx in range(num_views): - if view_images[view_idx][i] is None: - remove_frame = True - error_msg = 'Removing frame %d for all views for %s ' % (i, fullname) - errors.append(error_msg) - tf.logging.error(error_msg) - # Remove faulty frames. - if remove_frame: - num_errors += 1 - for view_idx in range(num_views): - del view_images[view_idx][i] - else: - i += 1 - - # Ignore sequences that have errors. - if num_errors > 0: - error_msg = 'Dropping sequence because of frame errors for %s' % fullname - errors.append(error_msg) - tf.logging.error(error_msg) - else: - # Build FeatureList objects for each view. - for view_idx, view in enumerate( - ['view'+str(i) for i in range(num_views)]): - # Construct FeatureList from repeated feature. - view_to_feature_list[view] = tf.train.FeatureList( - feature=view_images[view_idx]) - - context_features = tf.train.Features(feature={ - 'task': bytes_feature([shortname]), - 'len': int64_feature([num_timesteps]) - }) - feature_lists = tf.train.FeatureLists(feature_list=view_to_feature_list) - ex = tf.train.SequenceExample( - context=context_features, feature_lists=feature_lists) - writer.write(ex.SerializeToString()) - tf.logging.info('Done adding %s with %d timesteps' - % (fullname, num_timesteps)) - - -def PrintSequencesInfo(sequences, prefix): - """Print information about sequences and return the total number of frames.""" - tf.logging.info('') - tf.logging.info(prefix) - num_frames = 0 - for sequence in sequences: - shard_str = '' - if sequence['shard']: - shard_str = ' (sharding)' - tf.logging.info('frames [%d, %d[\t(%d frames * %d views)%s\t%s' % ( - sequence['start'], sequence['end'], sequence['len'], - sequence['num_views'], shard_str, sequence['name'])) - num_frames += sequence['len'] * sequence['num_views'] - tf.logging.info(('%d frames (all views), %d sequences, average sequence' - ' length (all views): %d') % - (num_frames, len(sequences), num_frames / len(sequences))) - tf.logging.info('') - return num_frames - - -def CheckRecord(filename, sequence): - """Check that an existing tfrecord corresponds to the expected sequence.""" - num_sequences = 0 - total_frames = 0 - for serialized_example in tf.python_io.tf_record_iterator(filename): - num_sequences += 1 - example = tf.train.SequenceExample() - example.ParseFromString(serialized_example) - length = example.context.feature['len'].int64_list.value[0] - name = example.context.feature['task'].bytes_list.value[0] - total_frames += len(example.feature_lists.feature_list) * length - if sequence['name'] != name or sequence['len'] != length: - return False, total_frames - if num_sequences == 0: - return False, total_frames - return True, total_frames - - -def AddSequences(): - """Creates one training, validation.""" - errors = [] - - # Generate datasets file lists. - sequences = FindPatternFiles(FLAGS.input_dir, FLAGS.view_pattern, errors) - num_frames = PrintSequencesInfo(sequences, - 'Found the following datasets and files:') - - # Sharding and randomizing sets. - if FLAGS.max_per_shard > 0: - sequences = ShardSequences(sequences, FLAGS.max_per_shard) - num_frames = PrintSequencesInfo(sequences, 'After sharding:') - tf.logging.info('') - - # Process sets. - progress = Progress(num_frames) - output_list = [] - for sequence in sequences: - record_name = os.path.join( - FLAGS.output_dir, '%s.tfrecord' % sequence['name']) - if tf.gfile.Exists(record_name) and not FLAGS.overwrite: - ok, num_frames = CheckRecord(record_name, sequence) - if ok: - progress.Add(num_frames) - tf.logging.info('Skipping existing output file: %s' % record_name) - continue - else: - tf.logging.info('File does not match sequence, reprocessing...') - output_dir = os.path.dirname(record_name) - if not tf.gfile.Exists(output_dir): - tf.logging.info('Creating output directory: %s' % output_dir) - tf.gfile.MakeDirs(output_dir) - output_list.append(record_name) - tf.logging.info('Writing to ' + record_name) - writer = tf.python_io.TFRecordWriter(record_name) - AddSequence(sequence, writer, progress, errors) - writer.close() - tf.logging.info('Wrote dataset files: ' + str(output_list)) - tf.logging.info('All errors (%d): %s' % (len(errors), str(errors))) - - -def main(_): - AddSequences() - - -if __name__ == '__main__': - tf.app.run() diff --git a/research/tcn/dataset/webcam.py b/research/tcn/dataset/webcam.py deleted file mode 100644 index 962813082..000000000 --- a/research/tcn/dataset/webcam.py +++ /dev/null @@ -1,491 +0,0 @@ -# Copyright 2017 The TensorFlow Authors All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -r"""Collect images from multiple simultaneous webcams. - -Usage: - -1. Define some environment variables that describe what you're collecting. -dataset=your_dataset_name -mode=train -num_views=2 -viddir=/tmp/tcn/videos -tmp_imagedir=/tmp/tcn/tmp_images -debug_vids=1 - -2. Run the script. -export DISPLAY=:0.0 && \ -root=learning/brain/research/tcn && \ -bazel build -c opt --copt=-mavx tcn/webcam && \ -bazel-bin/tcn/webcam \ ---dataset $dataset \ ---mode $mode \ ---num_views $num_views \ ---tmp_imagedir $tmp_imagedir \ ---viddir $viddir \ ---debug_vids 1 \ ---logtostderr - -3. Hit Ctrl-C when done collecting, upon which the script will compile videos -for each view and optionally a debug video concatenating multiple -simultaneous views. -""" -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import multiprocessing -from multiprocessing import Process -import os -import subprocess -import sys -import time -import cv2 -import matplotlib -matplotlib.use('TkAgg') -from matplotlib import animation # pylint: disable=g-import-not-at-top -import matplotlib.pyplot as plt -import numpy as np -from six.moves import input -import tensorflow as tf -tf.logging.set_verbosity(tf.logging.INFO) - - -tf.flags.DEFINE_string('dataset', '', 'Name of the dataset we`re collecting.') -tf.flags.DEFINE_string('mode', '', - 'What type of data we`re collecting. E.g.:' - '`train`,`valid`,`test`, or `demo`') -tf.flags.DEFINE_string('seqname', '', - 'Name of this sequence. If empty, the script will use' - 'the name seq_N+1 where seq_N is the latest' - 'integer-named sequence in the videos directory.') -tf.flags.DEFINE_integer('num_views', 2, - 'Number of webcams.') -tf.flags.DEFINE_string('tmp_imagedir', '/tmp/tcn/data', - 'Temporary outdir to write images.') -tf.flags.DEFINE_string('viddir', '/tmp/tcn/videos', - 'Base directory to write debug videos.') -tf.flags.DEFINE_boolean('debug_vids', True, - 'Whether to generate debug vids with multiple' - 'concatenated views.') -tf.flags.DEFINE_string('debug_lhs_view', '0', - 'Which viewpoint to use for the lhs video.') -tf.flags.DEFINE_string('debug_rhs_view', '1', - 'Which viewpoint to use for the rhs video.') -tf.flags.DEFINE_integer('height', 1080, 'Raw input height.') -tf.flags.DEFINE_integer('width', 1920, 'Raw input width.') -tf.flags.DEFINE_string('webcam_ports', None, - 'Comma-separated list of each webcam usb port.') -FLAGS = tf.app.flags.FLAGS - - -class ImageQueue(object): - """An image queue holding each stream's most recent image. - - Basically implements a process-safe collections.deque(maxlen=1). - """ - - def __init__(self): - self.lock = multiprocessing.Lock() - self._queue = multiprocessing.Queue(maxsize=1) - - def append(self, data): - with self.lock: - if self._queue.full(): - # Pop the first element. - _ = self._queue.get() - self._queue.put(data) - - def get(self): - with self.lock: - return self._queue.get() - - def empty(self): - return self._queue.empty() - - def close(self): - return self._queue.close() - - -class WebcamViewer(object): - """A class which displays a live stream from the webcams.""" - - def __init__(self, display_queues): - """Create a WebcamViewer instance.""" - self.height = FLAGS.height - self.width = FLAGS.width - self.queues = display_queues - - def _get_next_images(self): - """Gets the next image to display.""" - # Wait for one image per view. - not_found = True - while not_found: - if True in [q.empty() for q in self.queues]: - # At least one image queue is empty; wait. - continue - else: - # Retrieve the images. - latest = [q.get() for q in self.queues] - combined = np.concatenate(latest, axis=1) - not_found = False - return combined - - def run(self): - """Displays the Kcam live stream in a window. - - This function blocks until the window is closed. - """ - fig, rgb_axis = plt.subplots() - - image_rows = self.height - image_cols = self.width * FLAGS.num_views - initial_image = np.zeros((image_rows, image_cols, 3)) - rgb_image = rgb_axis.imshow(initial_image, interpolation='nearest') - - def update_figure(frame_index): - """Animation function for matplotlib FuncAnimation. Updates the image. - - Args: - frame_index: The frame number. - Returns: - An iterable of matplotlib drawables to clear. - """ - _ = frame_index - images = self._get_next_images() - images = images[..., [2, 1, 0]] - rgb_image.set_array(images) - return rgb_image, - - # We must keep a reference to this animation in order for it to work. - unused_animation = animation.FuncAnimation( - fig, update_figure, interval=50, blit=True) - mng = plt.get_current_fig_manager() - mng.resize(*mng.window.maxsize()) - plt.show() - - -def reconcile(queues, write_queue): - """Gets a list of concurrent images from each view queue. - - This waits for latest images to be available in all view queues, - then continuously: - - Creates a list of current images for each view. - - Writes the list to a queue of image lists to write to disk. - Args: - queues: A list of `ImageQueues`, holding the latest image from each webcam. - write_queue: A multiprocessing.Queue holding lists of concurrent images. - """ - # Loop forever. - while True: - # Wait till all queues have an image. - if True in [q.empty() for q in queues]: - continue - else: - # Retrieve all views' images. - latest = [q.get() for q in queues] - # Copy the list of all concurrent images to the write queue. - write_queue.put(latest) - - -def persist(write_queue, view_dirs): - """Pulls lists of concurrent images off a write queue, writes them to disk. - - Args: - write_queue: A multiprocessing.Queue holding lists of concurrent images; - one image per view. - view_dirs: A list of strings, holding the output image directories for each - view. - """ - timestep = 0 - while True: - # Wait till there is work in the queue. - if write_queue.empty(): - continue - # Get a list of concurrent images to write to disk. - view_ims = write_queue.get() - for view_idx, image in enumerate(view_ims): - view_base = view_dirs[view_idx] - # Assign all concurrent view images the same sequence timestep. - fname = os.path.join(view_base, '%s.png' % str(timestep).zfill(10)) - cv2.imwrite(fname, image) - # Move to the next timestep. - timestep += 1 - - -def get_image(camera): - """Captures a single image from the camera and returns it in PIL format.""" - data = camera.read() - _, im = data - return im - - -def capture_webcam(camera, display_queue, reconcile_queue): - """Captures images from simultaneous webcams, writes them to queues. - - Args: - camera: A cv2.VideoCapture object representing an open webcam stream. - display_queue: An ImageQueue. - reconcile_queue: An ImageQueue. - """ - # Take some ramp images to allow cams to adjust for brightness etc. - for i in range(60): - tf.logging.info('Taking ramp image %d.' % i) - get_image(camera) - - cnt = 0 - start = time.time() - while True: - # Get images for all cameras. - im = get_image(camera) - # Replace the current image in the display and reconcile queues. - display_queue.append(im) - reconcile_queue.append(im) - cnt += 1 - current = time.time() - if cnt % 100 == 0: - tf.logging.info('Collected %s of video, %d frames at ~%.2f fps.' % ( - timer(start, current), cnt, cnt/(current-start))) - - -def timer(start, end): - """Returns a formatted time elapsed.""" - hours, rem = divmod(end-start, 3600) - minutes, seconds = divmod(rem, 60) - return '{:0>2}:{:0>2}:{:05.2f}'.format(int(hours), int(minutes), seconds) - - -def display_webcams(display_queues): - """Builds an WebcamViewer to animate incoming images, runs it.""" - viewer = WebcamViewer(display_queues) - viewer.run() - - -def create_vids(view_dirs, seqname): - """Creates one video per view per sequence.""" - vidbase = os.path.join(FLAGS.viddir, FLAGS.dataset, FLAGS.mode) - if not os.path.exists(vidbase): - os.makedirs(vidbase) - vidpaths = [] - for idx, view_dir in enumerate(view_dirs): - vidname = os.path.join(vidbase, '%s_view%d.mp4' % (seqname, idx)) - encode_vid_cmd = r'mencoder mf://%s/*.png \ - -mf fps=29:type=png \ - -ovc lavc -lavcopts vcodec=mpeg4:mbd=2:trell \ - -oac copy -o %s' % (view_dir, vidname) - os.system(encode_vid_cmd) - vidpaths.append(vidname) - - debugpath = None - if FLAGS.debug_vids: - lhs = vidpaths[FLAGS.debug_lhs_view] - rhs = vidpaths[FLAGS.debug_rhs_view] - debug_base = os.path.join('%s_debug' % FLAGS.viddir, FLAGS.dataset, - FLAGS.mode) - if not os.path.exists(debug_base): - os.makedirs(debug_base) - debugpath = '%s/%s.mp4' % (debug_base, seqname) - os.system(r"avconv \ - -i %s \ - -i %s \ - -filter_complex '[0:v]pad=iw*2:ih[int];[int][1:v]overlay=W/2:0[vid]' \ - -map [vid] \ - -c:v libx264 \ - -crf 23 \ - -preset veryfast \ - %s" % (lhs, rhs, debugpath)) - - return vidpaths, debugpath - - -def setup_paths(): - """Sets up the necessary paths to collect videos.""" - assert FLAGS.dataset - assert FLAGS.mode - assert FLAGS.num_views - - # Setup directory for final images used to create videos for this sequence. - tmp_imagedir = os.path.join(FLAGS.tmp_imagedir, FLAGS.dataset, FLAGS.mode) - if not os.path.exists(tmp_imagedir): - os.makedirs(tmp_imagedir) - - # Create a base directory to hold all sequence videos if it doesn't exist. - vidbase = os.path.join(FLAGS.viddir, FLAGS.dataset, FLAGS.mode) - if not os.path.exists(vidbase): - os.makedirs(vidbase) - - # Get one directory per concurrent view and a sequence name. - view_dirs, seqname = get_view_dirs(vidbase, tmp_imagedir) - - # Get an output path to each view's video. - vid_paths = [] - for idx, _ in enumerate(view_dirs): - vid_path = os.path.join(vidbase, '%s_view%d.mp4' % (seqname, idx)) - vid_paths.append(vid_path) - - # Optionally build paths to debug_videos. - debug_path = None - if FLAGS.debug_vids: - debug_base = os.path.join('%s_debug' % FLAGS.viddir, FLAGS.dataset, - FLAGS.mode) - if not os.path.exists(debug_base): - os.makedirs(debug_base) - debug_path = '%s/%s.mp4' % (debug_base, seqname) - - return view_dirs, vid_paths, debug_path - - -def get_view_dirs(vidbase, tmp_imagedir): - """Creates and returns one view directory per webcam.""" - # Create and append a sequence name. - if FLAGS.seqname: - seqname = FLAGS.seqname - else: - # If there's no video directory, this is the first sequence. - if not os.listdir(vidbase): - seqname = '0' - else: - # Otherwise, get the latest sequence name and increment it. - seq_names = [i.split('_')[0] for i in os.listdir(vidbase)] - latest_seq = sorted(map(int, seq_names), reverse=True)[0] - seqname = str(latest_seq+1) - tf.logging.info('No seqname specified, using: %s' % seqname) - view_dirs = [os.path.join( - tmp_imagedir, '%s_view%d' % (seqname, v)) for v in range(FLAGS.num_views)] - for d in view_dirs: - if not os.path.exists(d): - os.makedirs(d) - return view_dirs, seqname - - -def get_cameras(): - """Opens cameras using cv2, ensures they can take images.""" - # Try to get free webcam ports. - if FLAGS.webcam_ports: - ports = map(int, FLAGS.webcam_ports.split(',')) - else: - ports = range(FLAGS.num_views) - cameras = [cv2.VideoCapture(i) for i in ports] - - if not all([i.isOpened() for i in cameras]): - try: - # Try to find and kill hanging cv2 process_ids. - output = subprocess.check_output(['lsof -t /dev/video*'], shell=True) - tf.logging.info('Found hanging cv2 process_ids: \n') - tf.logging.info(output) - tf.logging.info('Killing hanging processes...') - for process_id in output.split('\n')[:-1]: - subprocess.call(['kill %s' % process_id], shell=True) - time.sleep(3) - # Recapture webcams. - cameras = [cv2.VideoCapture(i) for i in ports] - except subprocess.CalledProcessError: - raise ValueError( - 'Cannot connect to cameras. Try running: \n' - 'ls -ltrh /dev/video* \n ' - 'to see which ports your webcams are connected to. Then hand those ' - 'ports as a comma-separated list to --webcam_ports, e.g. ' - '--webcam_ports 0,1') - - # Verify each camera is able to capture images. - ims = map(get_image, cameras) - assert False not in [i is not None for i in ims] - return cameras - - -def launch_images_to_videos(view_dirs, vid_paths, debug_path): - """Launch job in separate process to convert images to videos.""" - - f = 'learning/brain/research/tcn/dataset/images_to_videos.py' - cmd = ['python %s ' % f] - cmd += ['--view_dirs %s ' % ','.join(i for i in view_dirs)] - cmd += ['--vid_paths %s ' % ','.join(i for i in vid_paths)] - cmd += ['--debug_path %s ' % debug_path] - cmd += ['--debug_lhs_view %s ' % FLAGS.debug_lhs_view] - cmd += ['--debug_rhs_view %s ' % FLAGS.debug_rhs_view] - cmd += [' & '] - cmd = ''.join(i for i in cmd) - - # Call images_to_videos asynchronously. - fnull = open(os.devnull, 'w') - subprocess.Popen([cmd], stdout=fnull, stderr=subprocess.STDOUT, shell=True) - - for p in vid_paths: - tf.logging.info('Writing final video to: %s' % p) - if debug_path: - tf.logging.info('Writing debug video to: %s' % debug_path) - - -def main(_): - # Initialize the camera capture objects. - cameras = get_cameras() - # Get one output directory per view. - view_dirs, vid_paths, debug_path = setup_paths() - try: - # Wait for user input. - try: - tf.logging.info('About to write to:') - for v in view_dirs: - tf.logging.info(v) - input('Press Enter to continue...') - except SyntaxError: - pass - - # Create a queue per view for displaying and saving images. - display_queues = [ImageQueue() for _ in range(FLAGS.num_views)] - reconcile_queues = [ImageQueue() for _ in range(FLAGS.num_views)] - - # Create a queue for collecting all tuples of multi-view images to write to - # disk. - write_queue = multiprocessing.Queue() - - processes = [] - # Create a process to display collected images in real time. - processes.append(Process(target=display_webcams, args=(display_queues,))) - # Create a process to collect the latest simultaneous images from each view. - processes.append(Process( - target=reconcile, args=(reconcile_queues, write_queue,))) - # Create a process to collect the latest simultaneous images from each view. - processes.append(Process( - target=persist, args=(write_queue, view_dirs,))) - - for (cam, dq, rq) in zip(cameras, display_queues, reconcile_queues): - processes.append(Process( - target=capture_webcam, args=(cam, dq, rq,))) - - for p in processes: - p.start() - for p in processes: - p.join() - - except KeyboardInterrupt: - # Close the queues. - for q in display_queues + reconcile_queues: - q.close() - # Release the cameras. - for cam in cameras: - cam.release() - - # Launch images_to_videos script asynchronously. - launch_images_to_videos(view_dirs, vid_paths, debug_path) - - try: - sys.exit(0) - except SystemExit: - os._exit(0) # pylint: disable=protected-access - - -if __name__ == '__main__': - tf.app.run() diff --git a/research/tcn/download_pretrained.py b/research/tcn/download_pretrained.py deleted file mode 100644 index 4d42ee732..000000000 --- a/research/tcn/download_pretrained.py +++ /dev/null @@ -1,54 +0,0 @@ -# Copyright 2017 The TensorFlow Authors All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""Downloads pretrained InceptionV3 and ResnetV2-50 checkpoints.""" - -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import os -import tarfile -import urllib - -INCEPTION_URL = 'http://download.tensorflow.org/models/inception_v3_2016_08_28.tar.gz' -RESNET_URL = 'http://download.tensorflow.org/models/resnet_v2_50_2017_04_14.tar.gz' - - -def DownloadWeights(model_dir, url): - os.makedirs(model_dir) - tar_path = os.path.join(model_dir, 'ckpt.tar.gz') - urllib.urlretrieve(url, tar_path) - tar = tarfile.open(os.path.join(model_dir, 'ckpt.tar.gz')) - tar.extractall(model_dir) - - -if __name__ == '__main__': - - # Create a directory for all pretrained checkpoints. - ckpt_dir = 'pretrained_checkpoints' - if not os.path.exists(ckpt_dir): - os.makedirs(ckpt_dir) - - # Download inception. - print('Downloading inception pretrained weights...') - inception_dir = os.path.join(ckpt_dir, 'inception') - DownloadWeights(inception_dir, INCEPTION_URL) - print('Done downloading inception pretrained weights.') - - print('Downloading resnet pretrained weights...') - resnet_dir = os.path.join(ckpt_dir, 'resnet') - DownloadWeights(resnet_dir, RESNET_URL) - print('Done downloading resnet pretrained weights.') - diff --git a/research/tcn/estimators/base_estimator.py b/research/tcn/estimators/base_estimator.py deleted file mode 100644 index f3832ff0a..000000000 --- a/research/tcn/estimators/base_estimator.py +++ /dev/null @@ -1,700 +0,0 @@ -# Copyright 2017 The TensorFlow Authors All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -"""Base estimator defining TCN training, test, and inference.""" - -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -from abc import ABCMeta -from abc import abstractmethod -import os -import numpy as np -import numpy as np -import data_providers -import preprocessing -from utils import util -import tensorflow as tf -import tensorflow.contrib.slim as slim -from tensorflow.contrib.tpu.python.tpu import tpu_config -from tensorflow.contrib.tpu.python.tpu import tpu_estimator -from tensorflow.contrib.tpu.python.tpu import tpu_optimizer -from tensorflow.python.training import session_run_hook - -tf.app.flags.DEFINE_integer( - 'tf_random_seed', 0, 'Random seed.') -FLAGS = tf.app.flags.FLAGS - - -class InitFromPretrainedCheckpointHook(session_run_hook.SessionRunHook): - """Hook that can init graph from a pretrained checkpoint.""" - - def __init__(self, pretrained_checkpoint_dir): - """Initializes a `InitFromPretrainedCheckpointHook`. - - Args: - pretrained_checkpoint_dir: The dir of pretrained checkpoint. - - Raises: - ValueError: If pretrained_checkpoint_dir is invalid. - """ - if pretrained_checkpoint_dir is None: - raise ValueError('pretrained_checkpoint_dir must be specified.') - self._pretrained_checkpoint_dir = pretrained_checkpoint_dir - - def begin(self): - checkpoint_reader = tf.contrib.framework.load_checkpoint( - self._pretrained_checkpoint_dir) - variable_shape_map = checkpoint_reader.get_variable_to_shape_map() - - exclude_scopes = 'logits/,final_layer/,aux_' - # Skip restoring global_step as to run fine tuning from step=0. - exclusions = ['global_step'] - if exclude_scopes: - exclusions.extend([scope.strip() for scope in exclude_scopes.split(',')]) - - variable_to_restore = tf.contrib.framework.get_model_variables() - - # Variable filtering by given exclude_scopes. - filtered_variables_to_restore = {} - for v in variable_to_restore: - for exclusion in exclusions: - if v.name.startswith(exclusion): - break - else: - var_name = v.name.split(':')[0] - filtered_variables_to_restore[var_name] = v - - # Final filter by checking shape matching and skipping variables that - # are not in the checkpoint. - final_variables_to_restore = {} - for var_name, var_tensor in filtered_variables_to_restore.iteritems(): - if var_name not in variable_shape_map: - # Try moving average version of variable. - var_name = os.path.join(var_name, 'ExponentialMovingAverage') - if var_name not in variable_shape_map: - tf.logging.info( - 'Skip init [%s] because it is not in ckpt.', var_name) - # Skip variables not in the checkpoint. - continue - - if not var_tensor.get_shape().is_compatible_with( - variable_shape_map[var_name]): - # Skip init variable from ckpt if shape dismatch. - tf.logging.info( - 'Skip init [%s] from [%s] in ckpt because shape dismatch: %s vs %s', - var_tensor.name, var_name, - var_tensor.get_shape(), variable_shape_map[var_name]) - continue - - tf.logging.info('Init %s from %s in ckpt' % (var_tensor, var_name)) - final_variables_to_restore[var_name] = var_tensor - - self._init_fn = tf.contrib.framework.assign_from_checkpoint_fn( - self._pretrained_checkpoint_dir, - final_variables_to_restore) - - def after_create_session(self, session, coord): - tf.logging.info('Restoring InceptionV3 weights.') - self._init_fn(session) - tf.logging.info('Done restoring InceptionV3 weights.') - - -class BaseEstimator(object): - """Abstract TCN base estimator class.""" - __metaclass__ = ABCMeta - - def __init__(self, config, logdir): - """Constructor. - - Args: - config: A Luatable-like T object holding training config. - logdir: String, a directory where checkpoints and summaries are written. - """ - self._config = config - self._logdir = logdir - - @abstractmethod - def construct_input_fn(self, records, is_training): - """Builds an estimator input_fn. - - The input_fn is used to pass feature and target data to the train, - evaluate, and predict methods of the Estimator. - - Method to be overridden by implementations. - - Args: - records: A list of Strings, paths to TFRecords with image data. - is_training: Boolean, whether or not we're training. - - Returns: - Function, that has signature of ()->(dict of features, target). - features is a dict mapping feature names to `Tensors` - containing the corresponding feature data (typically, just a single - key/value pair 'raw_data' -> image `Tensor` for TCN. - labels is a 1-D int32 `Tensor` holding labels. - """ - pass - - def preprocess_data(self, images, is_training): - """Preprocesses raw images for either training or inference. - - Args: - images: A 4-D float32 `Tensor` holding images to preprocess. - is_training: Boolean, whether or not we're in training. - - Returns: - data_preprocessed: data after the preprocessor. - """ - config = self._config - height = config.data.height - width = config.data.width - min_scale = config.data.augmentation.minscale - max_scale = config.data.augmentation.maxscale - p_scale_up = config.data.augmentation.proportion_scaled_up - aug_color = config.data.augmentation.color - fast_mode = config.data.augmentation.fast_mode - crop_strategy = config.data.preprocessing.eval_cropping - preprocessed_images = preprocessing.preprocess_images( - images, is_training, height, width, - min_scale, max_scale, p_scale_up, - aug_color=aug_color, fast_mode=fast_mode, - crop_strategy=crop_strategy) - return preprocessed_images - - @abstractmethod - def forward(self, images, is_training, reuse=False): - """Defines the forward pass that converts batch images to embeddings. - - Method to be overridden by implementations. - - Args: - images: A 4-D float32 `Tensor` holding images to be embedded. - is_training: Boolean, whether or not we're in training mode. - reuse: Boolean, whether or not to reuse embedder. - Returns: - embeddings: A 2-D float32 `Tensor` holding embedded images. - """ - pass - - @abstractmethod - def define_loss(self, embeddings, labels, is_training): - """Defines the loss function on the embedding vectors. - - Method to be overridden by implementations. - - Args: - embeddings: A 2-D float32 `Tensor` holding embedded images. - labels: A 1-D int32 `Tensor` holding problem labels. - is_training: Boolean, whether or not we're in training mode. - - Returns: - loss: tf.float32 scalar. - """ - pass - - @abstractmethod - def define_eval_metric_ops(self): - """Defines the dictionary of eval metric tensors. - - Method to be overridden by implementations. - - Returns: - eval_metric_ops: A dict of name/value pairs specifying the - metrics that will be calculated when the model runs in EVAL mode. - """ - pass - - def get_train_op(self, loss): - """Creates a training op. - - Args: - loss: A float32 `Tensor` representing the total training loss. - Returns: - train_op: A slim.learning.create_train_op train_op. - Raises: - ValueError: If specified optimizer isn't supported. - """ - # Get variables to train (defined in subclass). - assert self.variables_to_train - - # Define a learning rate schedule. - decay_steps = self._config.learning.decay_steps - decay_factor = self._config.learning.decay_factor - learning_rate = float(self._config.learning.learning_rate) - - # Define a learning rate schedule. - global_step = slim.get_or_create_global_step() - learning_rate = tf.train.exponential_decay( - learning_rate, - global_step, - decay_steps, - decay_factor, - staircase=True) - - # Create an optimizer. - opt_type = self._config.learning.optimizer - if opt_type == 'adam': - opt = tf.train.AdamOptimizer(learning_rate) - elif opt_type == 'momentum': - opt = tf.train.MomentumOptimizer(learning_rate, 0.9) - elif opt_type == 'rmsprop': - opt = tf.train.RMSPropOptimizer(learning_rate, momentum=0.9, - epsilon=1.0, decay=0.9) - else: - raise ValueError('Unsupported optimizer %s' % opt_type) - - if self._config.use_tpu: - opt = tpu_optimizer.CrossShardOptimizer(opt) - - # Create a training op. - # train_op = opt.minimize(loss, var_list=self.variables_to_train) - # Create a training op. - train_op = slim.learning.create_train_op( - loss, - optimizer=opt, - variables_to_train=self.variables_to_train, - update_ops=tf.get_collection(tf.GraphKeys.UPDATE_OPS)) - - return train_op - - def _get_model_fn(self): - """Defines behavior for training, evaluation, and inference (prediction). - - Returns: - `model_fn` for `Estimator`. - """ - # pylint: disable=unused-argument - def model_fn(features, labels, mode, params): - """Build the model based on features, labels, and mode. - - Args: - features: Dict, strings to `Tensor` input data, returned by the - input_fn. - labels: The labels Tensor returned by the input_fn. - mode: A string indicating the mode. This will be either - tf.estimator.ModeKeys.TRAIN, tf.estimator.ModeKeys.PREDICT, - or tf.estimator.ModeKeys.EVAL. - params: A dict holding training parameters, passed in during TPU - training. - - Returns: - A tf.estimator.EstimatorSpec specifying train/test/inference behavior. - """ - is_training = mode == tf.estimator.ModeKeys.TRAIN - - # Get preprocessed images from the features dict. - batch_preprocessed = features['batch_preprocessed'] - - # Do a forward pass to embed data. - batch_encoded = self.forward(batch_preprocessed, is_training) - - # Optionally set the pretrained initialization function. - initializer_fn = None - if mode == tf.estimator.ModeKeys.TRAIN: - initializer_fn = self.pretrained_init_fn - - # If we're training or evaluating, define total loss. - total_loss = None - if mode in (tf.estimator.ModeKeys.TRAIN, tf.estimator.ModeKeys.EVAL): - loss = self.define_loss(batch_encoded, labels, is_training) - tf.losses.add_loss(loss) - total_loss = tf.losses.get_total_loss() - - # If we're training, define a train op. - train_op = None - if mode == tf.estimator.ModeKeys.TRAIN: - train_op = self.get_train_op(total_loss) - - # If we're doing inference, set the output to be the embedded images. - predictions_dict = None - if mode == tf.estimator.ModeKeys.PREDICT: - predictions_dict = {'embeddings': batch_encoded} - # Pass through additional metadata stored in features. - for k, v in features.iteritems(): - predictions_dict[k] = v - - # If we're evaluating, define some eval metrics. - eval_metric_ops = None - if mode == tf.estimator.ModeKeys.EVAL: - eval_metric_ops = self.define_eval_metric_ops() - - # Define training scaffold to load pretrained weights. - num_checkpoint_to_keep = self._config.logging.checkpoint.num_to_keep - saver = tf.train.Saver( - max_to_keep=num_checkpoint_to_keep) - - if is_training and self._config.use_tpu: - # TPU doesn't have a scaffold option at the moment, so initialize - # pretrained weights using a custom train_hook instead. - return tpu_estimator.TPUEstimatorSpec( - mode, - loss=total_loss, - eval_metrics=None, - train_op=train_op, - predictions=predictions_dict) - else: - # Build a scaffold to initialize pretrained weights. - scaffold = tf.train.Scaffold( - init_fn=initializer_fn, - saver=saver, - summary_op=None) - return tf.estimator.EstimatorSpec( - mode=mode, - predictions=predictions_dict, - loss=total_loss, - train_op=train_op, - eval_metric_ops=eval_metric_ops, - scaffold=scaffold) - return model_fn - - def train(self): - """Runs training.""" - # Get a list of training tfrecords. - config = self._config - training_dir = config.data.training - training_records = util.GetFilesRecursively(training_dir) - - # Define batch size. - self._batch_size = config.data.batch_size - - # Create a subclass-defined training input function. - train_input_fn = self.construct_input_fn( - training_records, is_training=True) - - # Create the estimator. - estimator = self._build_estimator(is_training=True) - - train_hooks = None - if config.use_tpu: - # TPU training initializes pretrained weights using a custom train hook. - train_hooks = [] - if tf.train.latest_checkpoint(self._logdir) is None: - train_hooks.append( - InitFromPretrainedCheckpointHook( - config[config.embedder_strategy].pretrained_checkpoint)) - - # Run training. - estimator.train(input_fn=train_input_fn, hooks=train_hooks, - steps=config.learning.max_step) - - def _build_estimator(self, is_training): - """Returns an Estimator object. - - Args: - is_training: Boolean, whether or not we're in training mode. - - Returns: - A tf.estimator.Estimator. - """ - config = self._config - save_checkpoints_steps = config.logging.checkpoint.save_checkpoints_steps - keep_checkpoint_max = self._config.logging.checkpoint.num_to_keep - if is_training and config.use_tpu: - iterations = config.tpu.iterations - num_shards = config.tpu.num_shards - run_config = tpu_config.RunConfig( - save_checkpoints_secs=None, - save_checkpoints_steps=save_checkpoints_steps, - keep_checkpoint_max=keep_checkpoint_max, - master=FLAGS.master, - evaluation_master=FLAGS.master, - model_dir=self._logdir, - tpu_config=tpu_config.TPUConfig( - iterations_per_loop=iterations, - num_shards=num_shards, - per_host_input_for_training=num_shards <= 8), - tf_random_seed=FLAGS.tf_random_seed) - - batch_size = config.data.batch_size - return tpu_estimator.TPUEstimator( - model_fn=self._get_model_fn(), - config=run_config, - use_tpu=True, - train_batch_size=batch_size, - eval_batch_size=batch_size) - else: - run_config = tf.estimator.RunConfig().replace( - model_dir=self._logdir, - save_checkpoints_steps=save_checkpoints_steps, - keep_checkpoint_max=keep_checkpoint_max, - tf_random_seed=FLAGS.tf_random_seed) - return tf.estimator.Estimator( - model_fn=self._get_model_fn(), - config=run_config) - - def evaluate(self): - """Runs `Estimator` validation. - """ - config = self._config - - # Get a list of validation tfrecords. - validation_dir = config.data.validation - validation_records = util.GetFilesRecursively(validation_dir) - - # Define batch size. - self._batch_size = config.data.batch_size - - # Create a subclass-defined training input function. - validation_input_fn = self.construct_input_fn( - validation_records, False) - - # Create the estimator. - estimator = self._build_estimator(is_training=False) - - # Run validation. - eval_batch_size = config.data.batch_size - num_eval_samples = config.val.num_eval_samples - num_eval_batches = int(num_eval_samples / eval_batch_size) - estimator.evaluate(input_fn=validation_input_fn, steps=num_eval_batches) - - def inference( - self, inference_input, checkpoint_path, batch_size=None, **kwargs): - """Defines 3 of modes of inference. - - Inputs: - * Mode 1: Input is an input_fn. - * Mode 2: Input is a TFRecord (or list of TFRecords). - * Mode 3: Input is a numpy array holding an image (or array of images). - - Outputs: - * Mode 1: this returns an iterator over embeddings and additional - metadata. See - https://www.tensorflow.org/api_docs/python/tf/estimator/Estimator#predict - for details. - * Mode 2: Returns an iterator over tuples of - (embeddings, raw_image_strings, sequence_name), where embeddings is a - 2-D float32 numpy array holding [sequence_size, embedding_size] image - embeddings, raw_image_strings is a 1-D string numpy array holding - [sequence_size] jpeg-encoded image strings, and sequence_name is a - string holding the name of the embedded sequence. - * Mode 3: Returns a tuple of (embeddings, raw_image_strings), where - embeddings is a 2-D float32 numpy array holding - [batch_size, embedding_size] image embeddings, raw_image_strings is a - 1-D string numpy array holding [batch_size] jpeg-encoded image strings. - - Args: - inference_input: This can be a tf.Estimator input_fn, a TFRecord path, - a list of TFRecord paths, a numpy image, or an array of numpy images. - checkpoint_path: String, path to the checkpoint to restore for inference. - batch_size: Int, the size of the batch to use for inference. - **kwargs: Additional keyword arguments, depending on the mode. - See _input_fn_inference, _tfrecord_inference, and _np_inference. - Returns: - inference_output: Inference output depending on mode, see above for - details. - Raises: - ValueError: If inference_input isn't a tf.Estimator input_fn, - a TFRecord path, a list of TFRecord paths, or a numpy array, - """ - # Mode 1: input is a callable tf.Estimator input_fn. - if callable(inference_input): - return self._input_fn_inference( - input_fn=inference_input, checkpoint_path=checkpoint_path, **kwargs) - # Mode 2: Input is a TFRecord path (or list of TFRecord paths). - elif util.is_tfrecord_input(inference_input): - return self._tfrecord_inference( - records=inference_input, checkpoint_path=checkpoint_path, - batch_size=batch_size, **kwargs) - # Mode 3: Input is a numpy array of raw images. - elif util.is_np_array(inference_input): - return self._np_inference( - np_images=inference_input, checkpoint_path=checkpoint_path, **kwargs) - else: - raise ValueError( - 'inference input must be a tf.Estimator input_fn, a TFRecord path,' - 'a list of TFRecord paths, or a numpy array. Got: %s' % str(type( - inference_input))) - - def _input_fn_inference(self, input_fn, checkpoint_path, predict_keys=None): - """Mode 1: tf.Estimator inference. - - Args: - input_fn: Function, that has signature of ()->(dict of features, None). - This is a function called by the estimator to get input tensors (stored - in the features dict) to do inference over. - checkpoint_path: String, path to a specific checkpoint to restore. - predict_keys: List of strings, the keys of the `Tensors` in the features - dict (returned by the input_fn) to evaluate during inference. - Returns: - predictions: An Iterator, yielding evaluated values of `Tensors` - specified in `predict_keys`. - """ - # Create the estimator. - estimator = self._build_estimator(is_training=False) - - # Create an iterator of predicted embeddings. - predictions = estimator.predict(input_fn=input_fn, - checkpoint_path=checkpoint_path, - predict_keys=predict_keys) - return predictions - - def _tfrecord_inference(self, records, checkpoint_path, batch_size, - num_sequences=-1, reuse=False): - """Mode 2: TFRecord inference. - - Args: - records: List of strings, paths to TFRecords. - checkpoint_path: String, path to a specific checkpoint to restore. - batch_size: Int, size of inference batch. - num_sequences: Int, number of sequences to embed. If -1, - embed everything. - reuse: Boolean, whether or not to reuse embedder weights. - Yields: - (embeddings, raw_image_strings, sequence_name): - embeddings is a 2-D float32 numpy array holding - [sequence_size, embedding_size] image embeddings. - raw_image_strings is a 1-D string numpy array holding - [sequence_size] jpeg-encoded image strings. - sequence_name is a string holding the name of the embedded sequence. - """ - tf.reset_default_graph() - if not isinstance(records, list): - records = list(records) - - # Map the list of tfrecords to a dataset of preprocessed images. - num_views = self._config.data.num_views - (views, task, seq_len) = data_providers.full_sequence_provider( - records, num_views) - tensor_dict = { - 'raw_image_strings': views, - 'task': task, - 'seq_len': seq_len - } - - # Create a preprocess function over raw image string placeholders. - image_str_placeholder = tf.placeholder(tf.string, shape=[None]) - decoded = preprocessing.decode_images(image_str_placeholder) - decoded.set_shape([batch_size, None, None, 3]) - preprocessed = self.preprocess_data(decoded, is_training=False) - - # Create an inference graph over preprocessed images. - embeddings = self.forward(preprocessed, is_training=False, reuse=reuse) - - # Create a saver to restore model variables. - tf.train.get_or_create_global_step() - saver = tf.train.Saver(tf.all_variables()) - - # Create a session and restore model variables. - with tf.train.MonitoredSession() as sess: - saver.restore(sess, checkpoint_path) - cnt = 0 - # If num_sequences is specified, embed that many sequences, else embed - # everything. - try: - while cnt < num_sequences if num_sequences != -1 else True: - # Get a preprocessed image sequence. - np_data = sess.run(tensor_dict) - np_raw_images = np_data['raw_image_strings'] - np_seq_len = np_data['seq_len'] - np_task = np_data['task'] - - # Embed each view. - embedding_size = self._config.embedding_size - view_embeddings = [ - np.zeros((0, embedding_size)) for _ in range(num_views)] - for view_index in range(num_views): - view_raw = np_raw_images[view_index] - # Embed the full sequence. - t = 0 - while t < np_seq_len: - # Decode and preprocess the batch of image strings. - embeddings_np = sess.run( - embeddings, feed_dict={ - image_str_placeholder: view_raw[t:t+batch_size]}) - view_embeddings[view_index] = np.append( - view_embeddings[view_index], embeddings_np, axis=0) - tf.logging.info('Embedded %d images for task %s' % (t, np_task)) - t += batch_size - - # Done embedding for all views. - view_raw_images = np_data['raw_image_strings'] - yield (view_embeddings, view_raw_images, np_task) - cnt += 1 - except tf.errors.OutOfRangeError: - tf.logging.info('Done embedding entire dataset.') - - def _np_inference(self, np_images, checkpoint_path): - """Mode 3: Call this repeatedly to do inference over numpy images. - - This mode is for when we we want to do real-time inference over - some stream of images (represented as numpy arrays). - - Args: - np_images: A float32 numpy array holding images to embed. - checkpoint_path: String, path to a specific checkpoint to restore. - Returns: - (embeddings, raw_image_strings): - embeddings is a 2-D float32 numpy array holding - [inferred batch_size, embedding_size] image embeddings. - raw_image_strings is a 1-D string numpy array holding - [inferred batch_size] jpeg-encoded image strings. - """ - if isinstance(np_images, list): - np_images = np.asarray(np_images) - # Add a batch dimension if only 3-dimensional. - if len(np_images.shape) == 3: - np_images = np.expand_dims(np_images, axis=0) - - # If np_images are in the range [0,255], convert to [0,1]. - assert np.min(np_images) >= 0. - if (np.min(np_images), np.max(np_images)) == (0, 255): - np_images = np_images.astype(np.float32) / 255. - assert (np.min(np_images), np.max(np_images)) == (0., 1.) - - # If this is the first pass, set up inference graph. - if not hasattr(self, '_np_inf_tensor_dict'): - self._setup_np_inference(np_images, checkpoint_path) - - # Convert np_images to embeddings. - np_tensor_dict = self._sess.run(self._np_inf_tensor_dict, feed_dict={ - self._image_placeholder: np_images - }) - return np_tensor_dict['embeddings'], np_tensor_dict['raw_image_strings'] - - def _setup_np_inference(self, np_images, checkpoint_path): - """Sets up and restores inference graph, creates and caches a Session.""" - tf.logging.info('Restoring model weights.') - - # Define inference over an image placeholder. - _, height, width, _ = np.shape(np_images) - image_placeholder = tf.placeholder( - tf.float32, shape=(None, height, width, 3)) - - # Preprocess batch. - preprocessed = self.preprocess_data(image_placeholder, is_training=False) - - # Unscale and jpeg encode preprocessed images for display purposes. - im_strings = preprocessing.unscale_jpeg_encode(preprocessed) - - # Do forward pass to get embeddings. - embeddings = self.forward(preprocessed, is_training=False) - - # Create a saver to restore model variables. - tf.train.get_or_create_global_step() - saver = tf.train.Saver(tf.all_variables()) - - self._image_placeholder = image_placeholder - self._batch_encoded = embeddings - - self._np_inf_tensor_dict = { - 'embeddings': embeddings, - 'raw_image_strings': im_strings, - } - - # Create a session and restore model variables. - self._sess = tf.Session() - saver.restore(self._sess, checkpoint_path) diff --git a/research/tcn/estimators/get_estimator.py b/research/tcn/estimators/get_estimator.py deleted file mode 100644 index 30b850edc..000000000 --- a/research/tcn/estimators/get_estimator.py +++ /dev/null @@ -1,60 +0,0 @@ -# Copyright 2017 The TensorFlow Authors All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -"""Get a configured estimator.""" - -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -from estimators import mvtcn_estimator as mvtcn_estimators -from estimators import svtcn_estimator - - -def get_mvtcn_estimator(loss_strategy, config, logdir): - """Returns a configured MVTCN estimator.""" - loss_to_trainer = { - 'triplet_semihard': mvtcn_estimators.MVTCNTripletEstimator, - 'npairs': mvtcn_estimators.MVTCNNpairsEstimator, - } - if loss_strategy not in loss_to_trainer: - raise ValueError('Unknown loss for MVTCN: %s' % loss_strategy) - estimator = loss_to_trainer[loss_strategy](config, logdir) - return estimator - - -def get_estimator(config, logdir): - """Returns an unsupervised model trainer based on config. - - Args: - config: A T object holding training configs. - logdir: String, path to directory where model checkpoints and summaries - are saved. - Returns: - estimator: A configured `TCNEstimator` object. - Raises: - ValueError: If unknown training strategy is specified. - """ - # Get the training strategy. - training_strategy = config.training_strategy - if training_strategy == 'mvtcn': - loss_strategy = config.loss_strategy - estimator = get_mvtcn_estimator( - loss_strategy, config, logdir) - elif training_strategy == 'svtcn': - estimator = svtcn_estimator.SVTCNTripletEstimator(config, logdir) - else: - raise ValueError('Unknown training strategy: %s' % training_strategy) - return estimator diff --git a/research/tcn/estimators/mvtcn_estimator.py b/research/tcn/estimators/mvtcn_estimator.py deleted file mode 100644 index 4a036b435..000000000 --- a/research/tcn/estimators/mvtcn_estimator.py +++ /dev/null @@ -1,165 +0,0 @@ -# Copyright 2017 The TensorFlow Authors All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -"""MVTCN trainer implementations with various metric learning losses.""" - -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import functools -import data_providers -import model as model_module -from estimators import base_estimator -import tensorflow as tf - - -class MVTCNEstimator(base_estimator.BaseEstimator): - """Multi-view TCN base class.""" - - def __init__(self, config, logdir): - super(MVTCNEstimator, self).__init__(config, logdir) - - def _pairs_provider(self, records, is_training): - config = self._config - num_views = config.data.num_views - window = config.mvtcn.window - num_parallel_calls = config.data.num_parallel_calls - sequence_prefetch_size = config.data.sequence_prefetch_size - batch_prefetch_size = config.data.batch_prefetch_size - examples_per_seq = config.data.examples_per_sequence - return functools.partial( - data_providers.multiview_pairs_provider, - file_list=records, - preprocess_fn=self.preprocess_data, - num_views=num_views, - window=window, - is_training=is_training, - examples_per_seq=examples_per_seq, - num_parallel_calls=num_parallel_calls, - sequence_prefetch_size=sequence_prefetch_size, - batch_prefetch_size=batch_prefetch_size) - - def forward(self, images_concat, is_training, reuse=False): - """See base class.""" - embedder_strategy = self._config.embedder_strategy - loss_strategy = self._config.loss_strategy - l2_normalize_embedding = self._config[loss_strategy].embedding_l2 - embedder = model_module.get_embedder( - embedder_strategy, - self._config, - images_concat, - is_training=is_training, - l2_normalize_embedding=l2_normalize_embedding, reuse=reuse) - embeddings_concat = embedder.construct_embedding() - variables_to_train = embedder.get_trainable_variables() - self.variables_to_train = variables_to_train - self.pretrained_init_fn = embedder.init_fn - return embeddings_concat - - def _collect_image_summaries(self, anchor_images, positive_images, - images_concat): - image_summaries = self._config.logging.summary.image_summaries - if image_summaries and not self._config.use_tpu: - batch_pairs_summary = tf.concat( - [anchor_images, positive_images], axis=2) - tf.summary.image('training/mvtcn_pairs', batch_pairs_summary) - tf.summary.image('training/images_preprocessed_concat', images_concat) - - -class MVTCNTripletEstimator(MVTCNEstimator): - """Multi-View TCN with semihard triplet loss.""" - - def __init__(self, config, logdir): - super(MVTCNTripletEstimator, self).__init__(config, logdir) - - def construct_input_fn(self, records, is_training): - """See base class.""" - def input_fn(params): - """Provides input to MVTCN models.""" - if is_training and self._config.use_tpu: - batch_size = params['batch_size'] - else: - batch_size = self._batch_size - (images_concat, - anchor_labels, - positive_labels, - anchor_images, - positive_images) = self._pairs_provider( - records, is_training)(batch_size=batch_size) - if is_training: - self._collect_image_summaries(anchor_images, positive_images, - images_concat) - labels = tf.concat([anchor_labels, positive_labels], axis=0) - features = {'batch_preprocessed': images_concat} - return (features, labels) - return input_fn - - def define_loss(self, embeddings, labels, is_training): - """See base class.""" - margin = self._config.triplet_semihard.margin - loss = tf.contrib.losses.metric_learning.triplet_semihard_loss( - labels=labels, embeddings=embeddings, margin=margin) - self._loss = loss - if is_training and not self._config.use_tpu: - tf.summary.scalar('training/triplet_semihard', loss) - return loss - - def define_eval_metric_ops(self): - """See base class.""" - return {'validation/triplet_semihard': tf.metrics.mean(self._loss)} - - -class MVTCNNpairsEstimator(MVTCNEstimator): - """Multi-View TCN with npairs loss.""" - - def __init__(self, config, logdir): - super(MVTCNNpairsEstimator, self).__init__(config, logdir) - - def construct_input_fn(self, records, is_training): - """See base class.""" - def input_fn(params): - """Provides input to MVTCN models.""" - if is_training and self._config.use_tpu: - batch_size = params['batch_size'] - else: - batch_size = self._batch_size - (images_concat, - npairs_labels, - _, - anchor_images, - positive_images) = self._pairs_provider( - records, is_training)(batch_size=batch_size) - if is_training: - self._collect_image_summaries(anchor_images, positive_images, - images_concat) - features = {'batch_preprocessed': images_concat} - return (features, npairs_labels) - return input_fn - - def define_loss(self, embeddings, labels, is_training): - """See base class.""" - embeddings_anchor, embeddings_positive = tf.split(embeddings, 2, axis=0) - loss = tf.contrib.losses.metric_learning.npairs_loss( - labels=labels, embeddings_anchor=embeddings_anchor, - embeddings_positive=embeddings_positive) - self._loss = loss - if is_training and not self._config.use_tpu: - tf.summary.scalar('training/npairs', loss) - return loss - - def define_eval_metric_ops(self): - """See base class.""" - return {'validation/npairs': tf.metrics.mean(self._loss)} diff --git a/research/tcn/estimators/svtcn_estimator.py b/research/tcn/estimators/svtcn_estimator.py deleted file mode 100644 index 069f7e8dd..000000000 --- a/research/tcn/estimators/svtcn_estimator.py +++ /dev/null @@ -1,100 +0,0 @@ -# Copyright 2017 The TensorFlow Authors All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -"""SVTCN estimator implementation.""" - -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import data_providers -import model as model_module -from estimators import base_estimator -from estimators import svtcn_loss -import tensorflow as tf - - -class SVTCNEstimator(base_estimator.BaseEstimator): - """Single-view TCN Estimator base class.""" - - def __init__(self, config, logdir): - super(SVTCNEstimator, self).__init__(config, logdir) - - def construct_input_fn(self, records, is_training): - """See base class.""" - config = self._config - num_views = config.data.num_views - num_parallel_calls = config.data.num_parallel_calls - sequence_prefetch_size = config.data.sequence_prefetch_size - batch_prefetch_size = config.data.batch_prefetch_size - - def input_fn(): - """Provides input to SVTCN models.""" - (images_preprocessed, - images_raw, - timesteps) = data_providers.singleview_tcn_provider( - file_list=records, - preprocess_fn=self.preprocess_data, - num_views=num_views, - is_training=is_training, - batch_size=self._batch_size, - num_parallel_calls=num_parallel_calls, - sequence_prefetch_size=sequence_prefetch_size, - batch_prefetch_size=batch_prefetch_size) - - if config.logging.summary.image_summaries and is_training: - tf.summary.image('training/svtcn_images', images_raw) - - features = {'batch_preprocessed': images_preprocessed} - return (features, timesteps) - return input_fn - - def forward(self, images, is_training, reuse=False): - """See base class.""" - embedder_strategy = self._config.embedder_strategy - embedder = model_module.get_embedder( - embedder_strategy, - self._config, - images, - is_training=is_training, reuse=reuse) - embeddings = embedder.construct_embedding() - - if is_training: - self.variables_to_train = embedder.get_trainable_variables() - self.pretrained_init_fn = embedder.init_fn - return embeddings - - -class SVTCNTripletEstimator(SVTCNEstimator): - """Single-View TCN with semihard triplet loss.""" - - def __init__(self, config, logdir): - super(SVTCNTripletEstimator, self).__init__(config, logdir) - - def define_loss(self, embeddings, timesteps, is_training): - """See base class.""" - pos_radius = self._config.svtcn.pos_radius - neg_radius = self._config.svtcn.neg_radius - margin = self._config.triplet_semihard.margin - loss = svtcn_loss.singleview_tcn_loss( - embeddings, timesteps, pos_radius, neg_radius, margin=margin) - self._loss = loss - if is_training: - tf.summary.scalar('training/svtcn_loss', loss) - return loss - - def define_eval_metric_ops(self): - """See base class.""" - return {'validation/svtcn_loss': tf.metrics.mean(self._loss)} diff --git a/research/tcn/estimators/svtcn_loss.py b/research/tcn/estimators/svtcn_loss.py deleted file mode 100644 index 261780349..000000000 --- a/research/tcn/estimators/svtcn_loss.py +++ /dev/null @@ -1,217 +0,0 @@ -# Copyright 2017 The TensorFlow Authors All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -"""This implements single view TCN triplet loss.""" - -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import tensorflow as tf - - -def pairwise_squared_distance(feature): - """Computes the squared pairwise distance matrix. - - output[i, j] = || feature[i, :] - feature[j, :] ||_2^2 - - Args: - feature: 2-D Tensor of size [number of data, feature dimension] - - Returns: - pairwise_squared_distances: 2-D Tensor of size - [number of data, number of data] - """ - pairwise_squared_distances = tf.add( - tf.reduce_sum( - tf.square(feature), axis=1, keep_dims=True), - tf.reduce_sum( - tf.square(tf.transpose(feature)), axis=0, - keep_dims=True)) - 2.0 * tf.matmul(feature, tf.transpose(feature)) - - # Deal with numerical inaccuracies. Set small negatives to zero. - pairwise_squared_distances = tf.maximum(pairwise_squared_distances, 0.0) - return pairwise_squared_distances - - -def masked_maximum(data, mask, dim=1): - """Computes the axis wise maximum over chosen elements. - - Args: - data: N-D Tensor. - mask: N-D Tensor of zeros or ones. - dim: The dimension over which to compute the maximum. - - Returns: - masked_maximums: N-D Tensor. - The maximized dimension is of size 1 after the operation. - """ - axis_minimums = tf.reduce_min(data, dim, keep_dims=True) - masked_maximums = tf.reduce_max( - tf.multiply( - data - axis_minimums, mask), dim, keep_dims=True) + axis_minimums - return masked_maximums - - -def masked_minimum(data, mask, dim=1): - """Computes the axis wise minimum over chosen elements. - - Args: - data: 2-D Tensor of size [n, m]. - mask: 2-D Boolean Tensor of size [n, m]. - dim: The dimension over which to compute the minimum. - - Returns: - masked_minimums: N-D Tensor. - The minimized dimension is of size 1 after the operation. - """ - axis_maximums = tf.reduce_max(data, dim, keep_dims=True) - masked_minimums = tf.reduce_min( - tf.multiply( - data - axis_maximums, mask), dim, keep_dims=True) + axis_maximums - return masked_minimums - - -def singleview_tcn_loss( - embeddings, timesteps, pos_radius, neg_radius, margin=1.0, - sequence_ids=None, multiseq=False): - """Computes the single view triplet loss with semi-hard negative mining. - - The loss encourages the positive distances (between a pair of embeddings with - the same labels) to be smaller than the minimum negative distance among - which are at least greater than the positive distance plus the margin constant - (called semi-hard negative) in the mini-batch. If no such negative exists, - uses the largest negative distance instead. - - Anchor, positive, negative selection is as follow: - Anchors: We consider every embedding timestep as an anchor. - Positives: pos_radius defines a radius (in timesteps) around each anchor from - which positives can be drawn. E.g. An anchor with t=10 and a pos_radius of - 2 produces a set of 4 (anchor,pos) pairs [(a=10, p=8), ... (a=10, p=12)]. - Negatives: neg_radius defines a boundary (in timesteps) around each anchor, - outside of which negatives can be drawn. E.g. An anchor with t=10 and a - neg_radius of 4 means negatives can be any t_neg where t_neg < 6 and - t_neg > 14. - - Args: - embeddings: 2-D Tensor of embedding vectors. - timesteps: 1-D Tensor with shape [batch_size, 1] of sequence timesteps. - pos_radius: int32; the size of the window (in timesteps) around each anchor - timestep that a positive can be drawn from. - neg_radius: int32; the size of the window (in timesteps) around each anchor - timestep that defines a negative boundary. Negatives can only be chosen - where negative timestep t is < negative boundary min or > negative - boundary max. - margin: Float; the triplet loss margin hyperparameter. - sequence_ids: (Optional) 1-D Tensor with shape [batch_size, 1] of sequence - ids. Together (sequence_id, sequence_timestep) give us a unique index for - each image if we have multiple sequences in a batch. - multiseq: Boolean, whether or not the batch is composed of multiple - sequences (with possibly colliding timesteps). - - Returns: - triplet_loss: tf.float32 scalar. - """ - assert neg_radius > pos_radius - - # If timesteps shape isn't [batchsize, 1], reshape to [batch_size, 1]. - tshape = tf.shape(timesteps) - assert tshape.shape == 2 or tshape.shape == 1 - if tshape.shape == 1: - timesteps = tf.reshape(timesteps, [tshape[0], 1]) - - # Build pairwise squared distance matrix. - pdist_matrix = pairwise_squared_distance(embeddings) - - # Build pairwise binary adjacency matrix, where adjacency[i,j] is True - # if timestep j is inside the positive range for timestep i and both - # timesteps come from the same sequence. - pos_radius = tf.cast(pos_radius, tf.int32) - - if multiseq: - # If sequence_ids shape isn't [batchsize, 1], reshape to [batch_size, 1]. - tshape = tf.shape(sequence_ids) - assert tshape.shape == 2 or tshape.shape == 1 - if tshape.shape == 1: - sequence_ids = tf.reshape(sequence_ids, [tshape[0], 1]) - - # Build pairwise binary adjacency matrix based on sequence_ids - sequence_adjacency = tf.equal(sequence_ids, tf.transpose(sequence_ids)) - - # Invert so we can select negatives only. - sequence_adjacency_not = tf.logical_not(sequence_adjacency) - - in_pos_range = tf.logical_and( - tf.less_equal( - tf.abs(timesteps - tf.transpose(timesteps)), pos_radius), - sequence_adjacency) - # Build pairwise binary discordance matrix, where discordance[i,j] is True - # if timestep j is inside the negative range for timestep i or if the - # timesteps come from different sequences. - in_neg_range = tf.logical_or( - tf.greater(tf.abs(timesteps - tf.transpose(timesteps)), neg_radius), - sequence_adjacency_not - ) - else: - in_pos_range = tf.less_equal( - tf.abs(timesteps - tf.transpose(timesteps)), pos_radius) - in_neg_range = tf.greater(tf.abs(timesteps - tf.transpose(timesteps)), - neg_radius) - - batch_size = tf.size(timesteps) - - # compute the mask - pdist_matrix_tile = tf.tile(pdist_matrix, [batch_size, 1]) - mask = tf.logical_and( - tf.tile(in_neg_range, [batch_size, 1]), - tf.greater(pdist_matrix_tile, - tf.reshape(tf.transpose(pdist_matrix), [-1, 1]))) - mask_final = tf.reshape( - tf.greater( - tf.reduce_sum( - tf.cast( - mask, dtype=tf.float32), 1, keep_dims=True), - 0.0), [batch_size, batch_size]) - mask_final = tf.transpose(mask_final) - - in_neg_range = tf.cast(in_neg_range, dtype=tf.float32) - mask = tf.cast(mask, dtype=tf.float32) - - # negatives_outside: smallest D_an where D_an > D_ap - negatives_outside = tf.reshape( - masked_minimum(pdist_matrix_tile, mask), [batch_size, batch_size]) - negatives_outside = tf.transpose(negatives_outside) - - # negatives_inside: largest D_an - negatives_inside = tf.tile( - masked_maximum(pdist_matrix, in_neg_range), [1, batch_size]) - semi_hard_negatives = tf.where( - mask_final, negatives_outside, negatives_inside) - - loss_mat = tf.add(margin, pdist_matrix - semi_hard_negatives) - - mask_positives = tf.cast( - in_pos_range, dtype=tf.float32) - tf.diag(tf.ones([batch_size])) - - # In lifted-struct, the authors multiply 0.5 for upper triangular - # in semihard, they take all positive pairs except the diagonal. - num_positives = tf.reduce_sum(mask_positives) - - triplet_loss = tf.truediv( - tf.reduce_sum(tf.maximum(tf.multiply(loss_mat, mask_positives), 0.0)), - num_positives, - name='triplet_svtcn_loss') - - return triplet_loss diff --git a/research/tcn/estimators/svtcn_loss_test.py b/research/tcn/estimators/svtcn_loss_test.py deleted file mode 100644 index f5bdfd980..000000000 --- a/research/tcn/estimators/svtcn_loss_test.py +++ /dev/null @@ -1,106 +0,0 @@ -# Copyright 2017 The TensorFlow Authors All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -"""Tests for svtcn_loss.py.""" -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import numpy as np -from sklearn.metrics.pairwise import euclidean_distances -from estimators import svtcn_loss -import tensorflow as tf - - -class SVTCNLoss(tf.test.TestCase): - - def testSVTCNLoss(self): - with self.test_session(): - num_data = 64 - num_sequences = 2 - num_data_per_seq = num_data // num_sequences - feat_dim = 6 - margin = 1.0 - times = np.tile(np.arange(num_data_per_seq, dtype=np.int32), - num_sequences) - times = np.reshape(times, [times.shape[0], 1]) - sequence_ids = np.concatenate( - [np.ones(num_data_per_seq)*i for i in range(num_sequences)]) - sequence_ids = np.reshape(sequence_ids, [sequence_ids.shape[0], 1]) - - pos_radius = 6 - neg_radius = 12 - - embedding = np.random.rand(num_data, feat_dim).astype(np.float32) - - # Compute the loss in NP - - # Get a positive mask, i.e. indices for each time index - # that are inside the positive range. - in_pos_range = np.less_equal( - np.abs(times - times.transpose()), pos_radius) - - # Get a negative mask, i.e. indices for each time index - # that are inside the negative range (> t + (neg_mult * pos_radius) - # and < t - (neg_mult * pos_radius). - in_neg_range = np.greater(np.abs(times - times.transpose()), neg_radius) - - sequence_adjacency = sequence_ids == sequence_ids.T - sequence_adjacency_not = np.logical_not(sequence_adjacency) - - pdist_matrix = euclidean_distances(embedding, squared=True) - loss_np = 0.0 - num_positives = 0.0 - for i in range(num_data): - for j in range(num_data): - if in_pos_range[i, j] and i != j and sequence_adjacency[i, j]: - num_positives += 1.0 - - pos_distance = pdist_matrix[i][j] - neg_distances = [] - - for k in range(num_data): - if in_neg_range[i, k] or sequence_adjacency_not[i, k]: - neg_distances.append(pdist_matrix[i][k]) - - neg_distances.sort() # sort by distance - chosen_neg_distance = neg_distances[0] - - for l in range(len(neg_distances)): - chosen_neg_distance = neg_distances[l] - if chosen_neg_distance > pos_distance: - break - - loss_np += np.maximum( - 0.0, margin - chosen_neg_distance + pos_distance) - - loss_np /= num_positives - - # Compute the loss in TF - loss_tf = svtcn_loss.singleview_tcn_loss( - embeddings=tf.convert_to_tensor(embedding), - timesteps=tf.convert_to_tensor(times), - pos_radius=pos_radius, - neg_radius=neg_radius, - margin=margin, - sequence_ids=tf.convert_to_tensor(sequence_ids), - multiseq=True - ) - loss_tf = loss_tf.eval() - self.assertAllClose(loss_np, loss_tf) - - -if __name__ == '__main__': - tf.test.main() diff --git a/research/tcn/eval.py b/research/tcn/eval.py deleted file mode 100644 index de24e93e1..000000000 --- a/research/tcn/eval.py +++ /dev/null @@ -1,63 +0,0 @@ -# Copyright 2017 The TensorFlow Authors All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -"""Calculates running validation of TCN models (and baseline comparisons).""" -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import time -from estimators.get_estimator import get_estimator -from utils import util -import tensorflow as tf -tf.logging.set_verbosity(tf.logging.INFO) - -tf.flags.DEFINE_string( - 'config_paths', '', - """ - Path to a YAML configuration files defining FLAG values. Multiple files - can be separated by the `#` symbol. Files are merged recursively. Setting - a key in these files is equivalent to setting the FLAG value with - the same name. - """) -tf.flags.DEFINE_string( - 'model_params', '{}', 'YAML configuration string for the model parameters.') -tf.app.flags.DEFINE_string('master', 'local', - 'BNS name of the TensorFlow master to use') -tf.app.flags.DEFINE_string( - 'logdir', '/tmp/tcn', 'Directory where to write event logs.') -FLAGS = tf.app.flags.FLAGS - - -def main(_): - """Runs main eval loop.""" - # Parse config dict from yaml config files / command line flags. - logdir = FLAGS.logdir - config = util.ParseConfigsToLuaTable(FLAGS.config_paths, FLAGS.model_params) - - # Choose an estimator based on training strategy. - estimator = get_estimator(config, logdir) - - # Wait for the first checkpoint file to be written. - while not tf.train.latest_checkpoint(logdir): - tf.logging.info('Waiting for a checkpoint file...') - time.sleep(10) - - # Run validation. - while True: - estimator.evaluate() - -if __name__ == '__main__': - tf.app.run() diff --git a/research/tcn/g3doc/alignment.png b/research/tcn/g3doc/alignment.png deleted file mode 100644 index 7cfdfece274ec65fb9afbd1bf56b3a9c30597cef..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 84303 zcmdSBby(E>_AX2#jWi;qA}I_h9S+hdDUF06-60_;gCL3`NSAbXH;9C!fPi!kIRnxR zFy}kq^ZfR?&h~xx*?Yf#yj;V@%zR@$vDUrTeXliyX{gB);orbVLqj7{RFHjyhK5au zhK7E84F`CKL^q8A4ULe(77W%<1cRA0T%D|K?XA$z6vATkaJ4`7P^GkFl9CxPVLW=^ z6il&>k*Vu{E9Vy(pw4prS&5?$7kOyque=H0{@2 zY{4{7-SvtPDNX)Tr_?q^p_KX(-n^R1{R6MAf4cSE5r2V^9jo zM=#OS(&D5=90tawPoaI-zhB5wcHwc>63!9Jx`_5d_CZoRrAs-pBo10}PtWyLbZMIq z-wY8S5i)VKvU*Z7tQW79of8P{l$^r})!+a8B75?n595W3RYiy`8k?dw|DK2~=v~yS zjG`CcsHGc;o~WydCKBv|U$a_19fat|6PEe(dbxbRpyIdUx|^6R1hJ}92-)hQUa>5L zo~M4>ZDA2p78ea=wjXsj*NDFJwAN84z5>h=hrg>RqvVbCyyZ5Nr=S2befV+|0T5RK8~^<0QCn2jr7P zx!yRh+4z$6S0qD~;tDPnn2?i5LiarbInPpN3A>`M%FGT;_MS}!ADFJ%lbOT zt;5XYM7GEOja&JxmBR^Q6J4`t`{c5Y&u43za|7b~X5}8ODVC++TPtovE6WeRcGoVj zUBhf{B>@G|R8xTHEYSiC1Hn>fEOdBi_Doo|FK`Q25;P;d*I%OrWXPZ&liy+>eS31E zAn(J(xX}_}Yh`Npo`b$W*4HxlDe2RtpS7&8ccE=O1VYcJvAyUIEI1+ul=fLqufKea zF8hX;;bylefe`aNlFpcI%LgQeN8M&aJhyl~<+|uK^=mcPj9)!xA{LL4`(W6=NR1z8 zHh`riO&vkSG|0Vz*mBnD{(1eVO@b+~oBrwi&(<5MSp2(Ro6H>FX&@xPyEHXUb|*`I zWIyJhx5qgjbBOdl&Wt$cZ?@2tlG{CZd|O*eaz0FE7)0M`Q0g{8xpn6UTHFL};dQ%C zHN_o$7V#L}_BDIWM~?9z;=s|b)nBUz5BEsiDpvNCe<(+HB_^^?5;j-3t>T^7uKTDs zdEeNJYr}ZgkCok2l+2G7RblMu-e{#TX#dLPAd&ls3$2(%BmPjeO9*5B3qRT#c+OqU z$*isfdtO^io~ti*{Z1Dz6B@n-q~QU<8v1QB;=M9lwO0Dl7gE>IuQTD?366K}9FY-P z7RTj6QpXjx$*jW^AkA5V2zrZYj5Kn=%Lu(h3S@y7LM$ zbp4(zBd<&$>GLnxyRv5=KzHyJWura_$i8^}0lS+olY$*weXHTavt_+yELY;XV0WgY z_l}t)J}>`saqck62_A;_)|d$03W=^ZXg@+WUf$qH{4e0f%O_gHGKwZ1f9>$cjLbz$-vNEPj? zuEm?;)TXaW+G`edyd?tATY|inFpu>x-ll;|Z`EE8#>0Ch(0=m)Z+cQ`q;rz3iJ3<@rC#aiOYN0Tecqx6&NzFktIgUbKG@croWTS z>=EUw$k-ij>0d4(@*+u=>ODcv$};Bi$g9S${VXuInq<#+2DeXf1HTe@h~>DU>mEtt#*i0=HV7R&5g}xeB`9XIHWNW zmw7#hS1;qiQnVa3rbQfIFzw?A!HC-tQxWP*?p;2SOK%NDiam{>wMPj{f{zo#`^ox6 z`wbIPE8`3<4B`zoD{oXX*e?D2^i!(}Y6Bl?&Yh7m@oz+U4;)AB%72|3m+|28`0T#A zxixG%Mmst-LNwYxJoEXvNsVb_de`@FLsMy91|{mL#qhkr@wv1>IV4Fo&PZZ>=X76V z>rK!Q?=?PBx-B6aei|VG{s=)XfkJ*iOIco5UM0bwW~253W`$1To!&t%Z+HF4vB{}F zx_?x?d+#n>wO?gPH8xQwt{F5UKp?~kD&T!?qsD*4Hz-&l&|)i7>0H)bIpH|$7-k@9 zkZq`|Z&LH1D5>yRcdAJAF6rG`HByz_BA`W z!7FL8cDihG_sz$Ok3)Gs@&+d-L?(R__vooTVijU_xMPj+iY1HR7^~L@xVyQ2a}!%U zjBTiTo{d01qW;)_ckeDdd3wowt?h7Pdw8X1U*&LmWpc%8zVk4D-?yFbiOXYz{$SI+ zM*c<;-_a*Yp<0S4&nIH(SP#E-%cb4Ww0DrPD0t&sr;F zUZoACdB}JLd%P5B8*9U49$+Q_bH927&H&dyB=IT;$svNOGC8kQ9!ss5*DBUBH8Nw! zV#yaqdQ;d^#nYWkX)lg;=TT~?ee1KalQ6;|M2)-y8#wAK=|+NB9S4T%^4+lA z455z)3`O8RGSY24d%Gt|C;bG3R9wbGO+(KYU=O=LM?dU;^OlPG9?Y1mHuD*~mF?>H zIRXcQZ}@Zf-kAQ-&NQsd%!TH(ralXg2@mP?`qaQPSBGKRW;$H`-F)R!jHz-hi=MKB z@;7CvlEuRM(~6nF#6X6}$@TRPZ$}d7?ZWJ^?VL*X4D@ue4RkerR0Q;%o)YyDvCz#r zBvtJyon-z>PGn2`l2|Vk>fkv2q|s{o)1cu<&20WoUhY(Og~Lkq(Nx`(m{+OiC~S1* zasA#1_eF6-1E$xwUV`o?I5VtgZqMFndiIm4Q9#~oPE(2dT4t0KVK>^9y zO?|DGKVI9N601o~1a!kSEQ&m~VWM)z>*mh-e#eof9|N2G@9Zn=X~t$$b*a2_5L1&f zc@#N1Sioqb;I#Hk#ePfpy6fk0A#pthjf;sFtZ`2}_*oFDMg`oDx zx=J^R{)z;~Bz57yz4lQ82Wl{nNi&7>y5AeGZ}Hz4V%1Gt8eA9izl}lc)W$aq=?x>1 zuw%roP)_yf=D;}Jl<`!e=v}WQ*#3sNR|ouSR$|-zk{SG#F&DAJ!x^y=xckh?tiKD- z=Ikco$-SyJ|_AeG=wlX_FeAJuQ6~s zYDwHL_5r!>>}nhi?I5B^3>fnz>;3FcD3I$XJ4u6?gNZHrezvDKP7f9o4!aeI4ZA;? zV1p)xMzzsqw{Ty4x&|lSM$?G4bmGZlDnI&65rlkY1?SHbMlVgpmfj*mYoDjL!nMCk zs%6eo=i60^wlNR-v3yI(qcpbjb2~NRLPvMEp)s0*Dw^N*cM*h6jI<}^)KN{it&>KR ziR!mxy<6t2Zp&;pm9$3wQjbe;;N9j)6b5Jlp0mPZH#9UV7Su1g;-fn|0E0-e)z){{ zS5*gH)ry8uy=G5^_IAGd4?$P9Q8KOEvCy; z-0dW8>8olmft_5fm;|{6xIwoh@tK&I#9b||MIXt^{k|M{C2`Bf-Q8J~hsVpyi`$Ez z+sXAQ53h)b2oH#lhmY?baK=41A4hj{?|Y7Jx36yU*L`HI+$>yeo!xDn9GOt}HGksd z;VyCO7V3lk{<#{bmACExe3GNv?`Z)O3370Y1K$QV}? zNaEqq0r8*aI;Knp-q%`1mBT2m!pOVm5N{c6?%Pb=-P1V;J*drr_K|yPaei^}UNirk z4@3NhXwF6>bZ`4qZQ%ZxaaiFV_!u3X6zz|XP-%=H=2=`OG>ktzxY4m7+<0d=f4l{0 zD?P-UH&j1Fcb6&l(fhq}i4q80GVYH2jAfBrD*CVzbo zw6(GEIVUwB;hpf}HPP+L5sa6}ey0F1F7eGY9=`0*Z~pf z&HTK)50dnwB!3uQYqYd|kh!ulfoeS8!_W|pGp+P-w?PSce(Sz8odVrqy_Qp*+n<%W z>t3b!Zm>Us(h5F{E-Oj>S~KhWM)IUb{rlEr#X4dRDarCtQ7Tkt8cN-Aj6LHy)s*_&K~OHZhRYvl&N*^^Z&8eC-PrP4n5WZ|-NiU7Ih|uwQYc zA$B7JRotKhS;{<3(XeKOqtYERmWOq}dO2+mHpc2&3oW~XA+xh4_3PP*Hi*M*(M-E% z&%R4cqq+TB?%9uRISaXQ;|Bh_hzqN`AJa$FrHseRefF$f?bz=>dis<#*|nFw^7#>L z`s`&Xr&oaD_VdH7^v&XichQb>7l$<#1imc14~qV|1!>>~O<;EL1nj=q|3a?5mi82R zalW4;b*?xkp$98&D+xT`thDY=Ffv>9`Ys_Mu@6in=@2%f%u#uh_sJ{C%7%tSA;)Qj zrLM>v#gBJSB#a-PYlHG&9E~fV4A&rZjywLvPlC*WCBr2fa+#`j5arT_XRxOFeW&3g zh$B|Q`sa4*iKT=C*%)Y~Hud}PeOv7~mIbbRHsIBq z0r%AIQcD)qNmfEeTj+6t_B$MpcNX?*0ueO^IVN%?j4UcMaJ;R|2F$k}6*hMfWth7#%r^iifKy?XmziusYA1@k%w=R={AnN=K~ zHQ7({PubO$zP=st@M(GT$Ts(@@Zp3-)uB_%d9`HAcN1IafZmwz$x7@9u)DUWZ6pC{ z2jZ)77LrV1rvKbD&@d>;yFVTK?DgNT80e`Oke8}Zlm4{1GF=v%Ea_kG=oZY=6gX4w zaay$jKZpM?bn2&vzKW>t$0MvzQ^+|&F0)DvBOF`K4~<%q#k{POLHFR|Fp=bWHpGA! ze63lnHO9En+hsVQ?;kzp=7BrG z%Ob^pDGW>IBRi>0lSaA(1}g?cIT65qIk);GkqU15*-s>=Tomme{P1a05f4geDJW}%1#xa$SqrQuhfKX>AGhj4sfZpmsT@t$3x*XAd7bj2;BA76V#Qu92^jv-{_f!fng}Rx5$#@KKY%;!ias!^* z-u*JS4mq8{`^~#=3*DfmhZhL=VE@R;T4nLDm^$Sh(K$q%af4?OG51&*>0ox zFg@p>KAi#JwN9iyDAz5==7*2X+aF(iTHi|QHc@MPZw{y@tFPz1bAjphU!2b&n_xMo zVvNj#`9}*&ky0rITK&UgckVL@{qq=shQ)D9gAb(r)B{|cr5wA8QwMKGxSQ?mbdYTN zbFhi7XMQY49y%dEZDt5HA2q?(1Edg>gZlyZ{rIpUC5^wrCFMfo2IL>PLAAUJ^BRZ& zDA>DhB~gt3X_}EzIDbebM2n+VTZ+Txkry-%?Yqy<(kVge)4GG)q28v7U9T2|adGNT z=VmvJagR<)y)})I8W%y z^$Qsr6?Wsr)5o0?bzP#29|jJz=m4M2C^p4@vC);BgRj%%^|0Q&2w$PeEA?U!c@`;Y zg6+rA>K@M_F+H!JtXIYE86NW!o}5~cxRJA(e#-WCdKZ8fUJAXlKgDfX4phyv}HeB6B8DW2raLlc@5nuKCAItpC7()pIE{%a6Dedao;ezU+apfEzzOT zKhA)?+QYGSY`~96@jslLuHDEe=dg5zhwno)XCv{ce=Ol>!jeD2PR5)BAvdOqW+Z+* zXHDLK;-uuK*1HXyy9^esYHa1)Yu>NCtmS`=dVpM$Qe3t2Pa>opO zz|>#Xz(vb9w}ECX`OQYLIuvjCafur2Y&vbYsA~p3HX{b{m0HC8L(;%xfbq3egXlmV zxvws|qqz`uIv_%ML4hO2VfF~|=3I|PvMxPQw1&>8&VPDb`4g9BzmHT1fa;UnP5a+J zTjYxd<3JoqvznKNniBi(rGbFi*LyUaPoK|5iWCTW9A)MAJinj$Jc7|PWU0WySl5vV zHuH9ayRAL-fhVkt`*uxB!<Eh<`rtrT=Qg) z`({TGUW?2hGM~@P8bToLIp*AUddeGl@Di;lSXYD&9tV~dV$?Wv1F{ zoLSOtc_O23F1->UWxOEoJGL8=9*g+!^Z<lL*wDUIv zDaDxjd9GV-B&V4A>ev*_ND154(QK8WK$5H|Sm9vT1|DhZ*}1BLCw4TUgxTS`=OvPN z6yyuf6{8t*I+Dt&*`u(-g3R8>uKwraZ{-Ur4@#%g%^Q+Ghn{=q=Fj+Rbe|qcMWvDW7o#*A=ybF29IYv86L0g`2~)(+ad(Bs$HO*NrpX;d9n+@-o7{z6 z2<^3(xFoCnD*BaeYU2h^{o3d9 zt!WEmIY$XtfR_o(Z+hIZ#T7Ag(xCDtBnwtI>f{w50|RkwZAviyVG3wi?c{89H2GSA zM-6jh=F#|zqc5czr5x9?B=I*Peug!U{Sv(6M2XYydx$=cir`~%9MHdndClg*W!wX& z;XmBKQiAXBDRm?^#K>nDJtBTWi3sO_Oc0$!Mf03sQxmF9a#yn}zpi#JG6!a$?Zuma z9^&R=+OR~OqP6YyvVVK3reVgl<-FVtQ_$;T1M9A*y41P5W5ff`S&IUC8jft($HbBb zSbZbh!>GUj0ksbhIY3SvW^z1s-u&M$6<`;B4?Baf}1 ztRQXZ{w|$Mby8gY!4DL;VufJM2AD$df^jbH^s0mb3DJZAdfBXyzpLr|Geaxd%g5 zs^Q1eq6gX@Ug@VUP(7N$dW5>ff%Z%ny(xATtS;PtE^CV9kAM+Ou8<2(+R0jEWW~C} z+V|O#Pcf9IjOT+tl`9!;AU_dRuq$FpdBQ$mk`bs@o;m&?wo_4;a zREd&1kMB%Vw_U%P0BITp2DNxzKMIJxkf}tE#FR>?fLb*2xnf!-$yZ>OQ%6=n4bffvQQnO2U)} zj%ZgOd~wd0XZur*)ux6Pw7{gPKQrvTSyU6XNBgZKO_Ho^3RbT2w&2qfS3^c?(vg#aQ-U5eKt$UD&ipz^i@DLUC-0 zIx*A|3Eb8@k_jZxYCdX-Pe^`{k!#U$)7YWETV3TK&y1ph!Y0=@U zd|<`~*aOR%`a9jIbCRI+1%F8E5L&3FR!?iGr!dPSizGP6NG`tmp0AjM19nDls5N^O zksBL31iRv9m5ZtjrwEp%Ha_wrYAu1~Hdt_qAuJ-L=^awk!9#w`h`E}7D)Z(1wfR7W zidwfMB&ligqv*0ro@k#8aXlqqYLC5fZ`qu^>3Y5edE6keR6fS^wvOIh=B4Hm1To5ZhD}KT5N}|*ILUnh`iY@OPo3iXLN>CB3di&_{-8d^D^WCL?(+ZOO5%UfUSrBdJ*QJay zF31ty)yiUGbR2HWP7<<6)^?OW?YglsQ>Uw!^vvWzPj+5JKBMo|;!^!vi)(Hcw>|xE zvU$MBY+|ZD#dp?QUoWC{sM$G+XhgEjV1U0T#cKMHjcj-xr7A=f>6n0va);gba zQT6TqDz+53oQ@b(khxzWCq3SU9%`nsSF1yQwK_`JkZ1*s@?9Ee5Nl#GoY$zBvp7{C z^_g7YC(Q$J2}KV?Bzm+x`0=}+wb^MgH!=7)LXbVczJ&o>TeMT71kt@lXtkD^{|ebN ztW=H!wy<%Kl;*}f=otqsji}(=P}N=nLOa&J%=)kj8Xc*9r4ROTV8wfYY307P!#C(* zX3BVUoQg7Az_N$&0}Cu|LRvLY)(5~#(VZ)JAq+}KC(g(Q{I|L!S=6-l|ATeF!Gah+ zXPZK#6l=_w=LU!h_4yz@Mktg7dB&!WyNJ;NqX~^m)+{rt?vUuqt<7%NsBj~!Ebow5 z%iU>7`?7V=6+Fp+NW0yE3?{1>VGK&AhndTTAVbovp)^;}ks@;m9k$YD6HTe*61eUm zj%K+qkha&2yVq$9^wi4D4MM3c&;gvd*3h2rtg3Sj^tB^hC!&;-2LD12y)llYGg!oW zo{|t3S%78|5qb+0aosZn+=k@9qFGwF^2@iumwx$?9i)=kC8DnE=*-|--gm;?aW7io zZTfyC<$r3E?^`uuWo0;T}EY?J4YmM{N43j9D(PK0+SY#Fy6QW?XUiq)9~X6z9F4=emhg#3fC_d*_S? zOF%i)tj!+J@3r|tAamMdVXUBZ>^0_4U`|+17eH5L0=Y7i9vYe-Z!=}j*)IJOHD$ep*_F=a{=&+;jw%%!m(t?HNx$1)=AKI)Eh5%uDDD4s?Fv| zr-qjeahS}S%fCdK9?}JH&_VI7*E=nx%Zn1kLFSE(oU}w2dMJa9xq?>8D`<@fGFMF? zTi>n!S^UXrphLZSE$42IdVS=7Xn_<0AnmOCDyE(Pp&fACps99Z?X^y7z(c@6Ta#rF zU$5e^m;y0Ea>2I|*1YbQP%Y1L>)*m^psrHw7Q5#klFWkHp}j{2Z6M6yOXwV8N7);F z3@MG$_Z#EUH>=YA&IB$a%6#CTvkd^%GG&9K-gd1$LmWz~&rHF-PXT*|#|G?~=9@rE zV9%a1c*+`SnfG)C-mDBy^@+Q_5NMt|DS|^_EjHZ|fsZ5r-@C9NrTO~z21==2#VpgS zn8k+!QA_SI|L9&7U)?`CR&L}p{+Fx#J#^>L^gBhUOql?cmR_Y?m$3`T9XYWesHCJF zxopF7Wou8bZ0&@YFc~hm^|Mkz?_KLq$_pTbGgKYRf4u^=FM^bxjLy1b3XbDLX|G>U zLlTlLO0Oc#P77#sQ3@x>{H(9z-8CeEGWW&O@2H4<6%{`RnI8r89^M?%srRDEr>3V{ zUGo>bE%orA%Q&NESG^JI4xM#l=GHqP88g0+k?RtqeD!Ijd8n5=uqC4dc@|YZM?zqY zxN%&Bb^(ozo(TY!^cM-A=#?*B<@e8V{|>;nBnEtr6<-!l5D|V<)nXfKj2`BKsG$IL zE%#OA9p!&*S4%j87P3_h*Cw9^f_X%np!hE@F z{l2xdz>3!DXSK#HLCE(JT~Nx{{5@5`@=SDa1_%jOgH&o^ZGxRT@j)Ik1wkspupp$1 zc9yrQK8PCH?!t2xnZbc+_rOf}J>Iy)j#P0kVWDz4@*x9_5-hkC?O}u^hBrTWXC!y-ejLu z+S9nPg7M*_=A+vN)my#^!Y&w1C#%G#17409cExqU*8QULM|k|6!@-ghVXZ^5q3VWZ zt?zy3GE~MHj5Cjj!S_MhL_{NcyYXL0^JRfKF~|Sw9ERG~?0BP~WF&-|+Zo1f6ze6Q zj*l?$+Z}Y@EK+9sps>uI*l+Ej@7+lmI}DT;qU@%c&s7t|j`;kG?S;V56tT!3#r|;+ z62?!PZu6}T`!OYbZl`C_)lPw&I}>%}F%!~e`WS>2Go2V6_NKU2VqTj{9|vY$X^R;- z9X0NP4yudCr`A8h+$8g-yX}Quw%i2r%j2>2RA#EabFNyag@naojX?jQ{8Vh{y<^6w z9>45>xSxs()!UI8*w`P3pz8QXz!9&I)6I`tfgf-nUNZ+T_IMXbMjk)yU&~ptKiJ1%?Xt)WxOL@n{t^Y~=#c7Cuz5Z=uzuo(5 zT~f`VQ9YK$>Oc{@px8hbd0O&qtqoTXn?cO$Ga21ALgg3c)eS~wak+GgVW*n)8!7a5 zxn59%rsS(^zt5Wg_#2SizBEm~pncLdJs6CjDrSe_AB^J`oj!J+4Mw`?vbG3u#qIex z2Vf(Y3e^G~t`Q<0JI?_}^m1DydoHLSo8@9O0~ELSgl)IRcDlCg+&$?YO8UGf;52Lh z%u!OZq^PE59FHtDbFyNvp8!+meJ+f{Z!wHfi$kXbey0&uUyuYB5*P70a|+mdUcX%p z9|o%D!o~q7?X4D2HSH&uLFRa%K2Kac!oMJtU)sz@Ug1AWZvm^SbzUH( z^{>P%m3962t|?C+Pkrn&)ilLn*0=y!5qR>&8A{yYw2@6O<~b|^?MpvB^}QkHNmx}4 zONYT6PfrH*#7ZKan~|V|vL+8E(Ej)N24gOe_Lw!X!^p$ua8O^RLw50}72m)sBL8Xj ztJLoG_x5qSn&nbtRDFknJBH}R>@AsFM=y1O7h33NgCr~r{-tKC=`T|`<^u!RCgCpu z4uuoI`2u;uV%3sriF^YDR1)sGy#Yhpv*R-{!WdZ&P?ES0S+d_RMY+zOQ(`u4!A=_u zWQ^WsEMudiYi2v2-xR00i>m4wx#W@G5gNWYhkpl?NGK&ICl7#i9dih{;~0Innwy(H z3$k}ims#MVbdiTsWgl^Ydd2UYU_u4qxdDU&r`w>)j8w~xN^svmE|AkKFj#8?5eyZy z3?>&C+_jHQHJ%)vTC~~!oyM$3whj>=fil{WC=wEJH36@a8SkYUKpp*n8STB(CU3hj zYHeY5{9QSanUauO-}%Gdb4S?i-bA6Qp_a36M~c|5XW;<%xMdP>GNy)nlf2`svsN~i z{0iQXqshYJHOSi% zb7NBuVHUPexBI38?7x7eZCA$FQ#5&%G%y-Gx4(x*Fd_pc81}vQBO}p@^N#iPM9C`i zqs_#-I*^MCH#^yCvp=WN42q=bh$nHPQTKh!CRCRb&=jiT4PUdj$MnY3Ksx1 z9{yx;?_r@Hie^8V)5;B&@Hkosr<}{;wU8s3Fa8N{L0+#%z;M@7o;RzrrW_A9N^XuL z!vUdry&_dY(5c0@%({PYt@}3X_oc`Z0>Nt0uC|xBVd9=*EuH zb0R2#03wN%`Do5OmjgZll2>P%6bO0Luk530eNFrJ3DTSvCgeN?j z#nLu0;5J{(i!Otd#zp5)RR-c8JoABtD?OBJ+9`>FK_|b^bVj8^L^M8OMRG$@ zW9i4FFYlw?%CF#WKYNT)YJT&AE=GM*QVy0A2TMZ<@CNw78`=%V1QH8xV$*&XhaWK0C_=Y z9S8DEzGGkKzciQJ<%tF_#^k^Ys{ybg@zC0E=w=QrkBQ&CsO-IFP>V8jdILuw#HTIkwJ3Vu3aasEM-Pmg;Pb#YLFoq7; z8MqS`BtUU;n_A#+2K+`X)jib>^GU`G9%0!q3*D+l6#l;~N>clnt@V2Dm?c)kkIhNw zqoP~!agA?yxi6n9hmLv&gox9(IrpoanLY(i11cAXokwqrq-`F|nos;ijdK!WHOg%{ z8bpXzg8@<0>_?WfBajJ^Eg)`;Q2Vf@hH>LLo7Vhb{TokdXIxn&kk&P*5t!9LN^yK9cA#o@p|5cDOI=7j?y28T0tY#@%09uHfcxyO4R%3c~Bf9z~l7kYiu_u0+?ZKb}i?qDP!fv z(>5Q1i>}AXXFp5L2B2*xlxL9@qUTl2pslBE{=gCA^ovtea@`R?k895iy7}VO%DYEF z%m)maD&vyI6-UG!m#Dpe@v-qb%~zg0z;9ANHC+b?XP4w!cWpKx{fsjjOjY#L-fqo6bz**gZuf%IgvM?W)3n2LP6K@LRHv7C;lSX^-@Hk!p`Mp`K6 z57qUn1nFQP`G*(AAlUpo5-MY@NEP?vN@mb<#=|=cYhfw|&Pc~84wCu+gdbfgFd3{m;r@;C(BO_F4wWIixyY`ugi z$Q-qMJ5s15iWNnt4mX9(2!a;S?r?blz!t_AEmCPbp2Sw$F+@bDV*x+_7gSLHO%#xX z>3n>$*B)F)Lz#kbR=qUap?n(30K|OYnE<=ab*jFs?wu(T;e-sTLCoaLe$habLl0A=zFN7vS1ZS^ zVwT&BN(%-*TQBG2Uewyr^xHVc0tFh}8orZ?-;AX;Te#q(DP3hRwCw-@ngTw|&Guj) zCi3Fv^MOV&JtYJ|=6~6GPl+lD4*JIV;FK60;y3M0X#k-12Y@=)?r7FL*RwXryxbNa zkTA=W=gc@&0C*F+r?S@;JPAynu9)4#H!eR&_pYel&-p;VN;W+_s=iCdMeYu_YXg-E z)LQOHq0ahnsZjOax~eo06CQ~LEub4ad^X2miOMrt`QLo@Rt@8S)!w|!;uxUfXPL}; zbwI}pJwRbs4lOe3QTl{O&yJ`VRbS&iNav+pFKSYii&^q{&*lSBej16*FQuxagM~8h z@c=9c>NLztIJ$c!vM6794$wfjA)ydo>|g&=R>o{%?`y~CRJ}<}mU{gl9yuLt|Fa!b z5e>J51M?{adr$hV(b0UuVt!w1ecG7x-J3_PNa6fPjd!`?-ncE)QeSjw#smohL7@B9 zCc`nKo)ylIOFtzZ<*&gFGAG8~B+>#3G@EIt%4Xm#2_YVezce(5yj7NH`-EQKl$TjR zwMj%@V27WCEACulaTs@;;xczCI@2hh!@YQl@*%Uft=Q=EfE_77gvkR6<%~+=r>#iu zwfP+UOS4M@UDcYJF+1u%MCUTb6g2)0r{_0-fhdpN2Uc)w0dxb6z*_D}G&@j!q?L|H zNTA7;Gs%uBm)lr;zI0xza9kxRKsy9uRc+yfy? zCdZ8OIygUGwL1T0=d}8j+TF|Hhy>^{apy z_*=;P3$}$c#gc#!PQTL zXi>^>Y|4DSxdg!20#$RXot6i;#7zMt%~LMRpAYQJ-I=}&M>l4*{#N&{AiH4tFIh~W zgoq~}42adbq1Og0p0xF8#XiwlNZ^nQflAtRYQ?q#HwuwGZU!(1l{&Q{@xylf-nPEa z;qJ^2Mp6O+31FtEs<8U+s;Ht_u4Gi9WI_Z`jg$d0D8J~f$XD1E{zPZGFYJ1G*b(Px zF~AFLw5zn7E6xXHK*y;l7Qh6mED4&!2k5WjB>eZ#rLKk^Lx3U(06r^*-SVRGtm4X0 znWBGYy&)8Uky`=V&&( zacw*hp%<7F@|l7is9Iv-j~M@st2-^XG-zNC%(^wA$ynNuN-xZ74X-vn{c7X$@BA%g zS176*^3JETMP!TEptS6la#)Py0A}LexohmK%6q+Co_44g_YRKnd?2Ch&SMO!K1ms% zdmm}R6dXxLN^^<|4`-2Kq>{bwFHsZY_Emja2(C!om%Z- z_#i&P>Y|Y_tW&U4GiFgxreM*ib~^;=qLt|`p%03+?kK{(=u-W#c)$eW`pa#?$M2xf zjA~pP4iqhbCWActzbb{DJzsB!kyX`gAP$`}VS2EklIt|9rP>}~z`Q64D?VCWxp!?g z%GYH$t%IZ4a4+;Q2s>tgWuwTFRorPI1F%ZYtott%uvWcFr2Ai8CDQL(K_7mDv;S`d zDEVErb#<7?Mb$qa)87&w49DhIHxAbcu>-QH6hs=Ox-otMV6G^tKw#bclR_5<$$6)J zPY?r~QcqC;)GZ_M2mPbY|DW{Urhl&Y+CUvagX&&h_a*lFT1HGS%0o6bHr5I&2GSw} z+{1HlHUBrk_hFFvm7X#PRAj_&0YKt?ZI=d{z?wnyI1ghy8ilFSoD8DT1sE54??_WU ze`mPIrXml-e*)RWU#Q}}4}{1MxDcQ-U%1G~hi4`L;VQcLdbrYYW@LX?EhWot*$;=<1~$z4?Ob95ou{raBAG8x@X`{4hW9or$5KJfB3gY7+0o|cmnZw zcws~ru&k?i;=Di)dG-|G0QE)I`|H|Lwn2c6r zWWd*ceDJL+9@A5qxO7^%Z{+1H{{)2HkE;VsTwcX1-;gmtb9nsn{x-5Zx1Mgdu;L;m z;N)A?{(u-RMr1Nw;TIf?4wRV@Q(ToUa~o;@&RbTi79Q@SBK^L=amw{8Q-Rp!zrQv& zJ``ReG}}8LXmX>m2AEh;WR2)?YAnv20~W-uioKBVLQmlp-0o6l@E$iN5oWx7QA)np z->p@UlM^~1i6|J5f5-SaJA1*$VS9UfKZ_?>9ngJVJ+nhF%B2T?u`7P8GH%-P9ztZ7 z={A@Qoz6R^85Da>c@rV7u_dTUnbyx>=LX&w$d$qYfhi zsy=JUB!h*(q)|3w0*sFD_O(^5$XfPC^$eFw{}1}^3FC?O8pCt@d-NWo9|k&9)VNiD zy`-W?Yn>IgIeZ=TtQoc;}v&@3k|9mr6n9=y6vZCotas9LF_W&`HT=wGS z-g)S7#P&Vnpmbofw}D1(E#K84A&DT_ZD*y-ugj|0h!wG1c&l6!Nsu{1LVn|a$gpk^ z5stl7hBfZ#-jI?+5`(i6@d;VgQ)Gd!dqrt}Jv=04#X8Gww%ZB9)qSL@`qnLWVNlUO zbm1k*1)cwvg_c_vqxggz@GT$}gQ^V^R{ciyeLyB^K}ttVhe=ILv;<0R&oqpmbnn@Q zsCxImmQHIey{j6wf84n7+_Nx!Hrei=8EC)2#OQ1s@Ydu-!RhWYb~2E$i9A{OSDMcN zku4YYt6}%6({QflZQR_W`InL3NAvo>w(exbUOb>Sb`Jpt|M|`8s^sUw!#N+Q(j)ZG zM;70ny?+k#d7Z6_$R)o~j3-fD|8Zw19gO)*moXOjUJ$?kB6%v!5QN_E8t_fSP*i_y zUmWj%{A}VIs!zRJ-x2)U>JmL9D+jcb=i+(D(LoplnJQ}cZosbVjSZgtOnr>xuMd&3 z&mSe-bHxzzf{j9xK;{pYpU>o$=hPsf~Pekt4Z%-(~SjZRl5Tbi7t121dn%G2lEmXiT1OlhrRr5!m!b zpGi&kx{uC;a_ahx<9W!DyjQB43q;;_Kbj#CMV~)f|3WU6F#dG&hh7q#+X(8%b&~q) zuz;FVZ&o?=bd=Tod0ZK8(oh8^qvBz!S>rlTiLi%q_a1sYLb;4T;{Kx}POmlEY@ktu zL~6a|P0Vg(=cl`X*n6EoLUy6H<1VUyV?LoAOY%$U5C@h3L1-%y=_n;+bM3kN!cMa*`&$)K zW9koJFqjLBLS;E6ywUeS)S&4!=QN`;Pn!d-f3VmZ6OT0dR5$vXt-vqmap64@4H^^XVL)Zh^ z?vE}*`eJq-qS>jT@ES=+j~%I4EaV(;V7$AC^a2_+e@+*~jB2E8LpBO{J1HNwC}4IJ z7juh3llGEu-*ct$4~l~e4q*+r$qfWNSp_92>ipJzYIBQ)?H!yYNhMM#(?3sz{7=5Q zt;aw8p`kV6{D=|r@M3gqOoUcF_~O7+Z^+PTJ9Ekr@s5~CNB_9{cBB}i)^2z;8jlS`kEScC{5aOM<~)Q0FQi20?t3)`VEo#eq~550f}DO1 z$A^kx<$!_O2*?er4cf$)e@V1WbpfYY*AAbHMhau+teg+y54iC{pSCz{WfF#)_(2OiM2=s z6<7N^{*kQwhZX_|zl|(=dh{6Jz$E)4pvUO$_obxJRDtD~qQ7VhiL#Cf&}*8HX72ll zEADbXjI>bt7j{t`zLo|RYNwWBfsBCiDi&S}Bg&gfe`|1vpX@*jSpm0asQJ-1FO9jNxApWv<{q6f z|23hC=XWRBhUErWwhAuA)|D(b_)c)N!L2ADKBYRxLmdYV?LHCee*pvo6xNYEKwbqP zlyVWl2kU@{br1NT8@1WLoJ#}Hi}b&JC{xu4t6bN}F|~s!{WS zKEnDfS0u*i-qrE}Up@=FQWNfD0`0)hOcwoO7e*UB8(+Uem4n^^axWknnBJ~=V5G&3 zchQng8TeK^g)L?=5N&@6_+rz=VE z>u*Uhyj4Rr2WWPaC`cI2(K^q>j<6Q4y|id$ygmC;GoihO(Qv!)k2h8g(qiICVs(!h zsabl*fsS(@IU`>nQ)4qP1|mT`Ne*xXW$Fs3K4lz->zBQ_zvyW^8&~|e>q5yM0xhQ_ zE|?vbm(8tK0v&BSD0#<7Demb@>bnntJ;ymdFI6rFUhwJP}%^>F(2nzK~r^a%KejN!?0NBB56S{2V$FLCSgqj zyqz|81wZAMS6oj$sX@-#$}Y z?Z*I8&v*GvsMPai`_7l&Y`QiZr903W@KR(c0Mr_VO~8EeKo5HXDjd}AOpO7)lrgR- z_!ZG63b|S$cx&l%DXZtiYrW*_E(2b)Ld*T1fA@{UmDLSp!bamzPWWG#ia&ky_H+@a z`Y5W6G`I1w_uopzYSq$10TdY%z_LsNI5Mhl{{Oa1fB8Pho#BH@Jb21uCG4jhl(@aj zr($giA@1SSpbQ5CLHh1@K-S@ZMB1kg6ph@1#8(y)ezlr4;}yjb?92H4t4P8~Bl^L- z%WyzSV&G$xT@ot`W6b?Gf4TYCRn2eZzc3$)J3w2^whM#buasiRN$*VZn?F{qgIhtN zpY!Pe%*r>5OTI~9Z5>S03t-lZjzT;GUh+&?5tOh(0|P|7bT$?x0JG3K?C)cMQKtnV zY3D(9zhlH8GZhq&lur;5Vo%mmFxK082`U$d+$K1`}cnwQQ;s7$4-*XiHNL9 zQL^`TBt#0?o8;JJWs_u=nU(D9RYtP+&ffFh|k&n+d@PmpBa!zA^r#kq{XbQ_cWF*Jh2+4JX6rXgW(wc&JKD7|U$gN7c zy%UA3(uyaJpar23GV906@x)%V%c6v=+nJd7hOV*v>wt_Tm*_-r7WT{FeO9fHU5WFt zE5SPt&&cEPDx94o;Wd_gS3lkQZY|kNuFx@i?IkFe?U9$9=z9?iUn&ok)DZa*%&Hf& z(y8$_slNJ_-T8F$FI4~kjV_5paDqzCI=Sjq+b=g>wda{VCN4PWf^cvD)Q*?{a_j_{ z6h>7q)S-C+Eyb^rgqRfJfAJ&RnvDN5B@>4bR=Oo$IQ=I6)~JW)c$Fcf=y9zCz9QqYDAYs&Ir6WzlPndMn21JLCiXF&bnMMVPQ3YJ}iQ~Q4X#|lR3J_X~P_#%SwR9I@Ci>SD z(hD$b@js6B+&Wcbn&Ov7Be(%OZ_U6gk4_vg{p_xnVehl1jZ(CH`s4rXMnIO3v93TQ zato>EtOr2njoD=ipeRSbyGIFvJ`zdkIHba%ez08uNEjXvDe^)`B?7o@M=j)IfaGLM z?Cf!u0pYHq`~vC14KhUh2^ONBursIE zq#xI6y&-Dy_}Apg|HczmOanf)Vr?Y!lmnR$3ODRt%DT-7rrB>Tp?$MJzy0C0&MJ8* z>`~)w$Ka5)9P(Stcs}ve{e&uM zh%2?yvZ}`sI{y`Z{EOt=^>hUd1T{CB`1<@8 zmapT3Hju^GSNxs}XGr(EF+S{DXjyhblCG z+DP{4<2KOM<2H~UmRQrKLdt=(u;0tIu!)xr8hLA>#=|n&F=Vr#Y@!sKKm%)U&_jp- zQsq&;(W3b<9Vwv#pO{aygXBA=DZ?MRdVxbKbP_6b z59(w;LV@VxWxE6de8Zt#yUZHi$>b_WDvg!z^&beKViZk@@~{`D-Dn01c;f)6iN^17)G zjr^RJEpm~BZ7tu4mqj%vJOaQ#8JsJoY}A!+_6!whUjUG!e0Of%YSa@ywiLf-a!|e) z009mwwW6ILiZ~PRTnd)B9@9FPv+T>pnFQFzj6H?UdokB+7Y$CeCEsvbauj_FfGPD| zFV}56?dOd`&Qg)NmL9*kxQVC4mONxZIVMOzwcY|8ohlMII&JRHZGp#Zj*)0?K~DgyQ#MqA6m(dEM+@lO>3@+v>e8UNri6P=* z!&X4Iy(X0Xp{y*fd_8&1_+U3d?&;H~S<*N@JzDvZSrsny0TetpiqJu>1NVqTwwayR zVsvc-?E|wuuRb3nGyFMl|G_Uf^DD3?FJQrg4tyX`G9gU6`cGQZ=3aW%FJJ%PI(L`A zo|{VB*<@7#I>tl30IKVJvMm3VWxKOj&1Ag!(~>1hYN}7^mEB^=+KdmAOKhp;%19O8 zZZ)}KXQ-@*?_RL|687{p<=i-`S{#iUKZ{_Vb@CN%C}!MIW8 z)h@I~UE;2%r0a}Tb~E_~CL?O2YFSl^tlS@}wWzIm{*Kd+0HtfxzGP0g)f_{Ju41WDFNMsW2&AB@FMGo;ehKde&PX?0yjxF$~+WP`KV6SMCk0|Yg>IIl|kB_P;BQgnaQ;f z2`(F%OljriG6{QN7CIHs9f#w}$>+^;Lvq8flI;nYk6-}6I6_8n;p4;BAN4P9{t$#I z?|^iAo&Ee#UNyh)n_zxpluwe6_5&E^09iX)l>OVeR~BRgdJO-y#u+1|I+?CD_xj5$ zFofSd$<`hdD%i$~=4M1M(r;Kiv_3Ne*t4yUZa!?eCKc#IiJ8Sxx_6F?J0fv*V-kZ` z4)(rD^zhq~9lko8G82&oYul1WYp7=1U59QvJ3FtB63M=C-HC{qkr4eP+V#W!6zS(m zi4c4@gtK~gWe!L|mP6B}j5zbzfyRg06=whsSl?PF4>Vqm==zn9c=a5MNh7WnBt)XL zWG$n;wa#Eb?%CLAofr}x7!v^gi7(BIO7Wg|oCpFiIW4T%h@Xlja4T`!+;y0cRe%f#Xu z0Cu+>EZU+cpZvsRy8aW;;Gv%D+sd4Aj%mIc+G*Uz16%lJVODDVb=vPdQuGLaEn|>z z3c%`^zPyOa<;O_|?R<>tZN`re*#lFrK^6X*^hwroIPG3#N8(PeZ-FQ2fhNV!+gCVp z^ETGjfc73M0YxDQM1AmIKvY*JHBT>99RXcep^}1)r@N~W(>I6^>SEH;HF0t|*JbBE zeY$ZF=V>n(M-CCVpSsj>3Q3gofP9^J?3>R0laXPW=R=R$B!Yszku;qU=1WiJ?d_zj zUMYbD{frNGd5SwuK9>~xjnU|b^d8xl-h^{WeHW}kzke`NioqV~oRl|2d}ySNrHccI z_?|v@nYQWhDV#{@d(ohmLizzp1$L%&#rL-8-h4|UO8x#__@muQfz@J((UZuz@%p*F z)6?x}iD^vI`cBYwTol^bzCEb&Gr@65`&PpCmyejzb71U_CLlulyQ^slK<3}I5MrYM z56d|4GQw&qgqrVudjr+)S;eT?5oVAx6B6R=w_+XFDu6(E3&)lBCL07fzBu-gYKT1} zgHopb_Cl<~Itk=V5Q&%0ae;H|7%y!gq&2nV%kTQ}BXVyhw_jypSUw;C?5~i#YkBC4 zRg)zgw%{yF4k|xN;ifo1Bz+yzk2I+VPBHp#IYng-L>PnatRyk~wP8(qGEk@Ea)0(R zRgjfPwAQ@Sosy#w=A{}M)HgN`fXb&JLnFt&Vp%Jj*V_Ij^RKSa$xaj=*pnY|cxa_xIaYpMiAl82;y+Ow#lU86z7qX5JFo;B? zkn$uUo&c-H?he30SI&)H#UpDGvRYQe$zl-=vs-sp5wc=d#o5oMrWaWNzw6suEym@F z=P&I(g2(?gg|RZja7^72I|f{p&;KI8wqJ>&6ocl4Y|GaPnrxOB2lU`&UjdTfKa0)DwPOt4d{Nuq+C7GQbx>ZH@i*2OdixYW}7oY!w{SXmpQhG#8YA4e&S^ z{vx}{ct;))R3{+@-(=^O=qZq-=jRQQ^n`)5KZxkVetupr$)QoGOHMBEVs*`iX4?tU zyNC7S$eG_6JKZs%cLogTL&Rj>Q)0ZWMWL9K)wh>H(vX_D? zh9$-+XYS+_v=pHPmP!L>+#vJ%emmLD$oRu5>e#3c3?r_E4QHsx-SvK@GDLiZ#|0DR zalvGA1_elRU??_)J39BL!ey!y)77es`WFIFVhm2`9i6G^V^aT!4?{;ObC4g7rMY2Y zpc}gP5}m0QHmb_5_b0)z?jGT5x4@z>XxRhnZ{T3yuG&7K7&hvgFJxnX+W9)#^+5L9 z#7rc?aOir=?gZVn4hCg$yw`8}{4GE6mqiH~prvKpF9ps*atLni%0Dc}#iAuC-FcBXoR?ROR z0%vzzF#;M6G4n$CAhcLAUoJX%DjvC{cY z?e22LLnS+q21fm+qM3mmFD}tx8D|ewePI;`$AIR+33F)Debj{sA@s&5>~Z8xpGUrV z+Ky>!vZ`~=#We_xT!0z;acu{IT0!omtoJ6MA6I%;y}|Cy7vz3ksd2~y;MD6 z(z{i4C>5ZEb3hC4ifk7H4{BsMfA&Z)b$5|)NMy$eJ;#xw`*`9(!4K*0xz*b?=pf$M z<>hT6F|*J{&Y5DWD+m${6;qI4aNrIZfzvM%8JaVfwl`iLLaBV-u*md4mt5XP1EjG+ z{2A!OL|`uspM}}ak3&T=D40)Ja7D)~IA3mr)DiB>EL;kP&%=!SZ#UK@u~=6>{r8$N zAj+Iz5lIi-6HOjRmivD5H-P-8K`4=mFj)&^rvXozt{fos#WI0@ZU7&5iOxw293)S^ zH8$ilkmHvSuwPEKs_yksmq+7-YiEI+X<@W{91}19_Gr<}dZD9NqJw*-9D~mU9XKpp z401C#v@b zV5dRG?5P4?@M9esc{mhbJxP|AMX9)4G_9lKFxFlIZ=IqlOBCxcX5 z8qnjHZ_fhcWMw5_;Qyv^gu4{*3}Y~7)sa7cg6^A*6N&}UhqwaV<}$|1SZsVl`r85q z|1{L3Yy69A{NZHd%)j?=Km~q0>|``Kq&GwGdkamg#xuyba&C--1K$dTD|6{1rF={p z8x;Uz7gTwW;7^$km{J2A@Rsm)*BoNiltk83N&= zz-E?zgBB@$04>2>s3q9hmun(8wizx6zBh%ZaBFqMc$~UTikpaT~4N}OIL>3!& zk~wVl4gJkxTKia)tj`h8 zDx3vqEWSL}C|Wdjm`gXBnN3yg)bk2p&KUduK?`Bd{s|&_b(|Vs&j=F3qi2?lGYyex z!~n4fEkrkVC_pF5XidrkQ= zYpU6@iv4ATH^Q{~Hy6x021+7kEt5Ob^PFuK2Waem4j>u%&cR=UR+R09UtIerd$8i; zxPj6pCP&EvMweO)IrAYn>i<-nQ^JR=drr00dy7gL#qlx(J(86rigVsnjSHd{y#s32 zwZ6Xo{@Dk*Hxj;QqjAAD^dl&(mDg`}cb>c>M=!u&@4fb((#T)&%5T`mBUkb2ZwrIZ zr{_BK1L~e1NT%0C_7y20>RfM&0ZjndJ5*-=cXo^UfdO*N;p7ONNvE*w{%g+$_VoPN z5=}HeZ<9q|H{%0ioJ)(}D;ymi<1Vh;j~3q3lDraH!bf^J-f>aL0h+DX*8Y&&{>eJ% zYq+WlwC3Pnv`e5((bpI|#y8kYiw?YZHxFf?5F%~|(;y``lyg?MnYVm$${VsoIRW;w=Sk6q*A*PX+f$90>8U?Cf* zd~6;89yHaRUXkhX`U7{p6E;b_lS3cgbX(0;r(0*!!SE2?E$aJGY6yuJS6_#YMEl}c zUiP?|c@Lp-aCx~RgCWoq6#ooj>Ki0g=!`)A5X^IMap;|uXpr1DUOWOJ<%EkT+62v# zSH{->2oa`?K}HJZwhyhs0o%TH_CZ2A`^v6SP_AyZhn8 z2dh5gRMMyQGPKxl|I?82Y@_{guX>b8(Oyc!oD_I##AanO@Ye9Cf>Y+w>SExtACtnP z!cUF3s^oZ4UcjG{`|+L{8=e0`c=^p)*eQm58;t#S%H7ISN9?t~f_E5W9z-IN|1Ekq zBoQ`9LN=dNT`O6qVtc-Ptxa(8oPEXhr+s%iQ>2^wy~sbGWnR#+pCp}b`(88qLqCW> zTg!QqMp7Y2q?~8PWk#4aKR9q^&$eu-wNIcc-cIx$O!zEemxl6Nl5=%pHxKBp%o*)( zZ;8g!wWq-uVVIMyV3XJ@KQ#O?spuMl;dr7Smc??kt>#0uoF6(8ELNVUvR$q~j~UTf zf#o(wdyF7J5<>KDM`%6Al#U8P3wyGDRkX#(Qd`lcMJA7~vY(zg| zMpC8)-4OZt!{p+Iuy3u)kli=|VCS9z>xoZ$eJgKR1nwc()T}gK{rQi}2ZAS4wF{q; zINyNib2Yl8MFD~yxqZAcDQ$ne95)l{rCCHnQ8V8vBLXMX#kk|=ITX=#0l zBO=(5b#<%!2Lc^~J≦&^Knh^ z{t(zlcr;YleE8B+6|n0(lZcc=$XeXZ)ffDqG_fAu^bpIuS^VA_g+9_D_Q>Cwa`O}e z0`{`m#ArJ~dEfms7-t2`MYOfGt-v)O0(3+=H;v-$QK;>|PkLO&wB76O?*7(qw#3q( zT3TBA^M|{V^$qYXk}Wc56YL|rCKE=r{t_&k`&RKpJV9n7l<4+m4E(^-6*A%&!evoDg)ojg zM@S>90=RP{X|27&e{)InW|9QmqR_QRL>wn$?3C~JoIU=B8Fusd*FtYi6Tl?9MsKcP z-0(**-RVP^P*{w0!Wo+6u}BQR$0wE8AQld~70jk8gw?zTIF4zz5|x=vbLp_rel+$o zm>`D@;M_bl>YB?y#Cs8|=C^@%y#YRCwel+?xY@#C)%-Qs&|pa?Y}21{AefO$3|G-i zf80i=4w4h7O!|}t_N-ZY=_L&<_I>o`Ug(cwx(r2K3~ZD&1B<+9%ywp+UNI+%n-=YxJ z!v($H9Lv{s)L{FXQq4cz=c$bKE1s=k)c}0PmO{qhnxj3Sk4V{ntWovHv9zTKH{}=wn5L^k!8vbWIIki{cX8|-`I-|jOjwZ85ivrW4FC> zw0g^9@feN+rIDeat$bm2D08rRdFC=)xEa*(*5&58Ls>MAWZg8@nE&Q)RAvOocmy>c z%k}0!0?T!qVGBd^Ld)3hddVF? zVI`+IXYHUhaGw8kWhV$zCp8#YOkaA;*IOGz1%v@q@!u@=3>9SLy&y}h>eIrVixEq2 z%1=>kKnCq^$dwHNmq9k$zQCX%y|j?7l@{8!`^qX*${+L+U3?(1*TJ21aoM z7zO!kQ+?HolIJLkILxD0b%0VsQNbUCI{x-!z1lq1t6Ca3G=ugZK~M`aGTiv=wh@POWUY>5lUmI#SN%Go>3=4HI`F9uEliJ`*a z7xjPlnJKgVM!Y&rduZqOJHs8Q-1Q8`Ab)GfIsX`_FQEAR!;Q22SYo_`kfpspS$6mD zmIpKF6MO(HW(S)g&22G3h^tpddu_PlJ#$Sm=ASprW-46lQ^-Fnhk2OY9HG zw=h1%-ZE$%HQhg@-j3n@j%F>LOoI|W(c*vg>IxJ)@;XMBHFc5zryZV{|J}nkl?3u~Obzt8zWuQeY4)m(2 z9^_mCHN|ZhXg@!z$?;=~OBWNWJO| zU@A)h?%zLSE&^RwQj`KyF)_ca>Mygp%g~0@-Gt=*cu2VC{;6;A|N)+Th_eD3em@fyi`Nj{bB{4={Nh^5gbEmKF`=Wf_Q3=C?Zs?+@Ap`@mS2x!GjffM+nOrhYg>sMr5$QfKo8u$abR(Io{pgA z@_r{m3Mzt{S+#M*7nKN$S9;wg zDxh`$yC?%{v^Ay(!M{y1H8mx#qT-v8QS>SBd^0u6YaErpT+Qxp|x!zUAT)pVS`ezi{^4mM2P&8cr+H^%O2ndl1mz!QxmK zM1=}6%o-iFOC2nvUy^>b_)xTX<)~W94|?zR#cU;t_f$=3sET>nCDfk|0zfg_iW4%O^padW}3zueGnTa{X% z!ws3!$W52w zKP4e?9r>E5Z^K?O>~-qQP;C@X!o!{{a((COQTeVOy83XogUHx@%U1n*tPkclYs_(w zLN8}P z(tuqfJoN($b;Nvqm(rcN1eQk|b;){ZjnhMLt|c;pkPHFErSc~-j=Gs zIUOynj|3I*@!7$l`HkQBL`BVnHTG$5CoTmC2NOS06o06%e~w88+mt7#XzPUme_>r; zxBFaYSJy)d&w^r?gK6HNg|n|m@Cj!SIhDUbFs9>dnOu{sj_5w-cL-9k*z+bjk{+a+9vA>^`V5 zXMFV8TBk@a&9F%5`ZL*%h>_q&rNYl561nlijT>Z5&fX(y=`t)9aR8yYhN0&|Nr;D= zzf`&_x1EA|9{3aKk}`MkTRmnErf|j$Swhv24cSaR_}Obo1se>egL6*PF>hD*cXWRK z$Q|6h8tSVYrF>f7(vs@A<#lyISWtft89A7puYH0p+1y;X`own3z&}X#F@>ND1|@3c zvE9~>G`+}`j~fpfWKIeB-r46AD{n^lHqE&*;!n-xYl?z}7|o+0Pkgl+Z`~A6=@Nz9 zmOA*8?-nbC);B~L3P;(Thy79T<#m(zc~hJUhC%LyPQ}||8nGf zKyaEonU|iSn|h?4A=1Xsad9^uJ)2eD?F?=^bu_ab&f1IJJ&2#Su~k-8PEfSYXseKk zW4*?lhK-e1GV}I9cdE#SJeV!~{F)lO?BmDR}r=&168*d1UYHFLgfhoYYQ&pxj> zD6@ul*_KTZH$Vp_>72-D^anmSz76(Rd zEpLxc>WMJtIPck5qYt(+ch;se0e6fiSTaDV@VQ99SaC-96lzL9q(px#(MXkL?@)yYGz=MjQr+i3H)6BK1SkdlxUC zhL;~KEGg4%SC)3i3gl}R+jdIpDjLP+vY$tMMjsAmfN^hWYMXEx?)7YqfrUusAuoY(0U z!xS`OU2P{6Nqildn&7fKy9qtKQXQ*cB$rzvjni$8yNA6sY%!C%Z{0=(hwZ~Zt#>?V zaG1G9w}nwxwY&UjUL++@FsoYwe+69drz;N_*3xre6#GoxdB>Ec190Q%o(cLewY!!a z9knFz4BQMBbJkl(8|(|A5e^>=NnF9WIrf@EvuiDWOK3b@YbpRMNO&{y6Sx$o;l;+I zjcW38MDScUk(qpkj=X2bOI9iN$1z3*YoeKRfr0`qKf`R(I;eBSpFUu}K32ZBsAv>t zt&;layeJaSbf7r+NzS9j5X_!CzDsv7XBFd^!xHR9m0j$Jl}UnK<+~bHscj-J7nEMs z)#Y<4Al@-LBMTtxcXJz`G(K6S(~!nj7hKpeB3gU;+SLaWh3paD+E)Yj>TYEaInPMa zZ52Kn6h{tIbr0IQh8B@UG?V7hvM=+VA9~)ru)Za24K6%QTUoIfE-l6_&n8h~CEdu2 zC_`1&$%aPleNr=+w;kWdL&CizJ!C^qBY*JcN%NXWT%bo~@H@-*2fe53dv`@uaG~tl zmJ@H`x^JN|V|AiiE0!heirfW5$6^j=Njn1~oS$jw4Wf`JeXC80vLC(A^H9Zt_f;`< zLCW_zPyLV)4=y7klL0OtzO=Dfq?@R-eyP6W$#D6>BW=4VhyH#}RdTImtf5`}ij9@gqQ4r)n;{@%tb#D zX|tf)A1PHX(OOh1sc}63XC|qPDr7=mn+Jx@dWoZsEU`k_d zi7DJpgsk0iovE^0g|zdCva>;UHj1&kp00vEVWy$&}rv_H*Od zVN?ZTa}C+UBk$tM)dI4M`CRWb$p@H{MG9QfpHn9G6tF(eq~f-o{^eejgLrjj*ZtUg znBWoU^{+&ESI6Q;k+9sE(7p6XN|*!_rpRRR5XXD}fqz)K(WCtR2LuSIxH*ao7v1zu z-;sIat%OmtO52_GbFz=rh&@Gj`pcqCH;38w`;3KsT!~W%_Veb<^s!ic_$|v^lx?YR z5&bo)vx?Phj?732Y2Ys)4Fqax$EY7p^Z7hj!wkF&knd=C>#8h9jknDeH$7~N=NH2H zkZO^UvM#u(1uhoWqU!Xt6E*Ubp|TmPNv`e2IgAvVp^PsTD-GQ#4)50ND1lGhX8oIr zkBfwVYr{tPUz7=adm0Dsz{Enr;X7WR&X75rL^&DfG=obh8hq~0bdkwT?~WkQk{A2} zzLKX9N{@uq5(a8@d)unNV#DwmBvz`J3d$4Tl0r@#oGAX}ksCdU5^-Kf{p7LP$vL*RW#AV*j z)@H*U=v({G{r*@>Zb-tIM-4D4Xn`|{iePNuJ2vEbq#i7;Ud-l0ippD|DYjWkvYtD*=4 zaWb7WaalX%z_hT0Bk_pUu0OeiXL*xD)75jH0#rAc4hA1 zqzvN~C^mrQj=0%B6WwTdNkNzcZWZ#q;r_}t%aRykM*(C%-%FLRSrnXAUlh=RFOBj_ zb8dRZw@I&%ZI}Nz1Mk3X%#a`7Ufq?=5H}#FvAg7!7=)|~XvON>_D`(1;Mq${_TWF| z6OdbAqsaU@5{`kgS;jpIrr9S#;W-h?EeuqK!_97{gomui=I2Ry5!q}pW=b2U=64+W zO0NGu(u6LIni|c9V7-VIxBqQ(0muNdrl^kcv;)@f8TIFpTy_y`$PgQ%mOLH<7Q3vc6qg5QdzzMG5# z=LTZR9BGvPZLcl`(9Pn&MAt@@$QPjbuBo`ihgXx4>Tk)6@J8&k9;~B zw$9?n&-gjdQ}q(sbTkY+fVt#E0te(v$z*(htQ2jIWTg@PTUPYWPb}&bhRLrtl{QqH zi;9ZHmS-;XRgQ(k=iI!cI*elVA1#S@ zBKSN*n|0YuLy`>o1bp@%#8fv_}np2eY>w_D&6!J z&OLoFlt}4Ct2h~O9bsMfS|>}=G9iIMax#e(#0e)kM2rTALIiQ4%y)N{j*NFyna^sm z=t8jTfdZm*>#kl~;P->qmshiO@eJ3>F;Kl<1$-1vtS?9X5ONygCmvnl6zr7B%PA(u z{_t6PO*+1e43`-TIwXJI@@-y+>dcm&iD+hIer2pA3{uQzf1;S{7&TK>S86Gi@2ahr zYn(z&xifEw#gGaJD}@r3MRbNw#b<-xI9kR)P6#<#MIW!DGyRh&h=cSFeTAQFJfzw~ z^lan_38L%b+tV;Vo`}p9169YR22U}NGEFIsB|>VT`3HJ9Nq~4Sv}vr&MCZj%Pr=0a zU>ORkv*?h~auRSphyS?{_&bLC5kpK__|06X>`Fvo0fIU%8Bf3dIi<70bNg}fhKNyd z7rupom>tzaUKSt-Qy94DOY=4j`aAU%xkzcXTw@z~+)g0%}WJtQ6J&AP_ z-Wnd+AShWbcQt37ev2@Yn0I*)BGS!;lkcc&7f$nGkLCV^u4q^DrZxyl$+n`&KKt`t z4!vb;ZhAH!7mF^Rs5vMmpr8a&u%n=K3ky}dG9E@L;G3Sv0ee#Fnv&7KG-8AWFXX!b z^5pp**b)ikt3UkBJo!K5CP3`68Fiyf+Yj(jjENbGOv9+s*y*X+#>KU|#Cj$BaRO@5 zljK(5?e~#Pkla!WZRBOTGq?19`!pQ#PIlu#)z2Yv9Ru)VCC~5l5N+f+t6rEgogI`l zzJ%UQ)UjE@Lq>w%&H;XV#f6qd=a|W(4C2m72EkEzE{g^@38#^e&R3{+1686{a)Fy2p?i+Isn#`U=g_?|G}d+6p2_Oigw z?fag=Gl?Z%(83G6T^;*HrantwEZ z9J4)H+S!NWqKnG1B0FIk#pdq2gTunA;2H{+aw~9WLbLZXYy?cAA|7K(tJnmtl8 zgcqsC%zg34JIOdF+Gk^$1n@CK5ZteaHiWCeH9Ag%{Pd-~l5`9c{U!)8(RkS*iTZsd zAg2TIXOaug^7%(8L8f3=V@3X`eH?@oDo}mm(kd4|E0ZFsb(PDaa+yuHL|NPQfMAX! zVLRu>;daT~W`9L$`QBh@*H*c$AGj1tJ@^8+klo8+_5oVTc{4vjVLGm;DPr@r<}+1S z^j3m$kJcKkbu})2_jr(*GAL*Z=4h7eLLIs@b=r*gSo&2X9U0v8;L598?D&;(qJ?6eMHK|g^)Pz&T@|8@xS!9oRz ze3CC)v{rp(J-Q&`ciYW*YjEq$?uh)!avz+W;9ehW8o`ae~Tar+#Gz(zM8G+OK_zO19kLX-~D;$y&y3LmHT!{`(PRw zmxz4Xb}jifzkIqxDa1;k)P#+D{9cRK3^{!570o znc3CEbb~7Xt};7W+1C8VCx>~>Y$ZDMHt$?Y%(KOGQ?C41_?R#;?~ob^du-VJ9-Uk` zhpv$cE^@}~8XvwCIsRjy?7e=}}AfFVq;QO@Nx4`qOzjavoVbaw{!hSAsO#{6_Y zyyg((2+7w)?_Td}Wa8SuL}8rPyT6@fTc#{LEii4rga%W-$~`)JTXTEE70P8R0cK03 zrd;}4w=rri-d(j%U`478B~ykU?2mGgp_1~I^-KeUKw2?TlOwMhz@VwLliO}S&`^!v zJp-`Dq?sA@ZJly+UM-@bJr=lso)w|AL#Fe(saUYMwRMb`6t z8>1iK`cuO_$O{!V!K5 z>Kj#VmEI~gOxOYh@vOGf>h!Xs&*Q+jKFug>q{}*o6<#QcHyeWE)JH~WF)C%>wHkj z(xRYb0?&xKycjG&OW*umzl$X`Hso=#co!yb2Y-Y8cl_~;jFj|Tl)i-;XN!t8?;@Mo z`&>;@WzU2-wKDbau6NPw)Y*K2`^j8`O68c@Ayb`7DMlK@b6z2roFM%}r<`@4wP(%|ak}!+gg<}x&G&}^bJ>HGN7Bu%on}oaf66QkP zD9WGlWCOtr@3duh0V??0*IZkBEHA&hZ;V>^I75zoO7%2Nv$C@3ysaf$r!E{GAeUTyFE#jS+|c|zC6L{CDeJG^-+a)J5TIC| zF3eClmLnHk*Rz{26ym=J+-G`UTY`Dlm!^3oy?y>TQ%Q86X^3z*oQaI@zoxxm9QMEP z3$p)ET>f`R?TG90nBQ<3h;A_^j~=~rB2sBk$f@ipe`!T>8h*H4FRF;%(5+Bda@o@K zXzUg}94GJ2P&Q(XeMg=8^EK=etvAsz9wH60ncg(nkl|_?tI^?4w0LkzuXopGKJ7O! zMJXitamq8S>BKT*5M)6WPnA5!UI-{^;-|`9GdpLfHW{GfmsuU7x4;_J3Y|2*NZUJl zw6H-(7Uc=?)>J#!sh89Ww;r|tuc?3Dq_VN&G%T*Yrm#oErY*j}0_1&gpSe$#mA(`tERwbWmo+ zZf23i;coB#&RW;B$QBFG8l$+T?@P4@AVcF!J{`kJRrz(NuI@M;yIxE2KvQG@8>Z*3 z*6S2%`hZZb5dewXVTt9WARur4-I~Ye6e{{@4&G%)woPI0rw#W#!9|>FjSWyb1hDYL z-Yd$57^uXF*1;NfOtC{dEZ96YbsZ*b-q9toiAtnCyRq<5>MQ_yEPJ*U-@jsRVL5+& z^gTe0rM_OsC(%j1H-McFP~(eN726~)OoidbA~ykKd)d%e9iYaSEJ)fkCUmQ@rLPJC z)Y!yL&v(6z9bgkY4A#0h283{hr&LyO$>mCtL+?2kEE3xlMs^rxG~Lby)wWI{BM${g zK09|_q_5~g&?);L6hx;n;e~rn7(M|E{Q??vz6v_%`^x=$Y%W()8G$$N=Z%jQCifID zg88HcwuZj)Zl`Qw9u224{VT`>8WjH<#+dD2aCJQb zszM+BNj%^?!od$+{&fSSyPb`=lpvz$_U0Lt+SkJ@xMUj#iupYDxPiP=ymF3Y&stQ~G1iC;K_#YvG&oFC_T5rqDyd}Gm%Az0dlS+1W zf63soZ%^&>JgRV@`1!ZnQh%**Ofg~2R3Z;FLtU%GV$|&8SpDfD7+4KeL1ANPqeSU+ zA0(LZXDpsvbY=xco*-k#Ye)Mf-MyiLU#UrgQa*C6%8S3?+zvU^Nn>yu9kD-Gbovr{ zOu=M%_5`e=JE^q?Fj0^0rcqkwU0&CON4vo4W$e9JTLqXTlv<8F_Rq9y+abUy3lWWd zzqzZVB}pI>w`{U3#Iy&Ig$eyC7h(A61aXX-sL0^-*`XsY!A|}nxgz#-aR)X@&@{woS zJ&Ji>iI7Gpc*rt+rUYN#b#p?Onst&PfVpL7L@ZXrd*4qixhj>o(i}XECiFBcyxUeM zm1xjOC8~odrp%l+w$3ObNpm=vu;y%mN9P1e`t_@p4Q7zSR{e>>7P--XOXJs14^jvv z(oLF^&5r6glK}jga|`t<6)-5hoIfzAJY2+k(?cWWK?^2DGZ0oDxfk2oB*BireA%Mivc(JHc$0kGj4J|t|w z?jF9A5@r}xp1Umb4kOsr=hf9vDWDE7TYN5ns-KuUB_2R8>7o3&SRkntZ|dR!ZEnb) z2%TWVtEb_7CSJqNieF>A)Qb0q^> zR^>CV?u6Y~t<>CBH(uN^F`XqX-`1-D-$*EE*0KaZfOq2M*~zk#F`|YGFIBJbt~zq8 za#eiB{SLatpg45of?i#Ha>Xt2et?s%-+uFT*OtB`pK?urqL{1_ar0H|&=duN-M>x6 zGgp8kY>iMPs$5M^b5#lNs7z$~CuQPaal$-eL~PTM8d-MzRLu4Tt!?Od-^m^w?|GeM znXx-~pwo4hf~WD$g(<0q*n9LxedW2Rv&hIJsZHn}N$nW@YZW^||IB^`JC?4Gi1(qo zH8hf>;*b4SVakJt2UAr{$6dq;@3*rm0GNy7G*}HU1s-oqLh0!{*}L*gx)#-$PAf^; zPwyNSBuO2{`&Gw^#(&HVZm6&cYwWSagss&ErS$>l_*SNT?8MnKo;dsclc0Ek^(ZLT zvku>4PR0eEKt7bnHL}y6{j*#^{Bj1h{Ix--0oa)=RsI~LGP?WT83F6*%yScLGIrry zE*~=kj=pW!5_p~ZP#a!=f(CN$IB>e@m2VR(Qyue2VvqaUg_lqI+D|H9RaRAh!Aj29 zQ+9qHAy#nvKEUi0yj*vtopfVMv;w$FZ%AB_E`t1>6-#LL0H_nlMcYHulJT;kLhTWy-6c%enYI@{NWH^c12XdS zIc%PVAf!Q0@hA#WoJ1i$3=}^_iM;TL{0Dx07InA|rfqfRPvfBnK9|rAiG9c+AEn4Mii|7;1mJlJ=%w0d z@sW`{s5?qb3WveDl1Yf@=GsngS;{`<6E#pcR)Z$I%H;;WJUf9g`45Pvi5buK7Q@!{ zAA`UI=f~lM0#jp0s+3MJQLk8xaw7ktnr)1{Krxxlq#N$l3}BO|gB7H2t5mB!`at`r zqTRD&z3?@6FrH!d*e+3lb>C- z+a0iZ0K-_S*`MFkev9yuXG~~+Cu>~J|J^h4B12_}pqZrCvg8`;aK$Aw_XN>SuZhwo z@x6OUy(F?sB#F>xw0rj)xRKa@BmOcaJM_e_4=o+VR6o$6;^x-_C! z(ev7GyuRbI{cGCOPo`w5Xn#fV*GqK4i0chpnTNbY5Fj?J8vcjwz+wAuhh3I7Y!c@ zZwxLCC6{lNX?A^+kdPfHG8<0R+3gTjOgQ+N+68t+uC6T3rV^#8majhC01Hcnb`SIm z?AOSwC&TF!04Q2JmyPbr?U+k7)zw=Jz(DCKH4f(woI+ij?be*Qwy~}<*)-*<(oJK# zI=({M(=pqKprTs@ow%RjJPU>A*Q0gGmI(Z+Vds1d-z8By?$5FWA*SDKCF=`<&0j6ur3)4gSOv{EJmH-`p65^nee377fra4|*hp{t=U-Pj zH(SR8@rS*n+^LS~g-Jorl(HQMNdY;nwEgWegwFrMzTeVBd%nJsYc}j+J!qYvI=uUx zhI-4BYv1F>o+*0s1(ECa_#8#MS_bE*8RelpaG&}Wwi;1o^ltXB99X2W@E>sE5^Pui zDZLc3jFe~9xWOx-_;y9%w5noTP+}`qe#8_J{&nS{xaL#vx8+7^7p77~`Mfuu$rKF!Q75G@H*}x4%?XS18@eL)|^xka5QzzKo!A!JW@J zBZtWF_N%Bxd0i zqCHOHCC4)S=(Sv0im_(ii#ytdm^GER+ViKmH1U(vCd#M9N3OlRgMtLE@3 zCF$sCFdXDO%>~)cW(VfASHbnaP7ftuTs>jAZ#U=l*h}zJBC zb>YUt3XKDG7g2Mxv&p2dVp?ovp+S$ARdKPiKdJm$L>gsdKC0<0}36VUB^$Ue^zO>q{B>z%_}pe7oDEm)ofSrXV4>wrg09DdBV18F-d@ zzKrvb2GON7uDp)r?zM;;-zT)+ zaA2kquqrdeEs(o<Da6MixFmJ~v&MW@N`r+yEjG)Pb?6c?nZ;#a3s02b2#p>D434g=LfhUTdYivx~ zLIdaLka|=Y2NH-9r#1g8hi;(Ut#?tG5bks|~vb6Wl4q9f}qW4#C}}xDyj`e_S&3-6|`u_Dccpr}8li06+Y;nvy-{o8YUa8TWxQ z=upO7L+dqWx%CeDuIp`~@VI5-)u45~X#HuPaSm6jJv(4fSWWh)-8}393<1LDgb8mO zfVd$Sv0c^2mPlLAK1&U|1Lf4;Xav)E^UJYq>SsrDUtey^HH<47h4&J%uf9TWiRk;# zG4*|kL5BE9A!A+~3)3Xr70w3-IXYJ6kDbw%L zS&Y6frnH)izamSQNAbq zl38tsL1FFaPoCP<0x@TYeI70PmL+Xe>^oKb zsH-W6rklk%kl=<822~K%`nT(3cq+k7I_S}oC_BJ@-5Yo!G3M|%76!vT(j@`xi(+Y2 z_dQl4FJ%5(saMn+&<*+*QW)$Y{s3w}Dw-&<6g)w@XncLn(e%HS#xKio=oIZZ5F|ft zIVI({>Lvc@cClUw?ZA=M%g(|7$QXAm282FQ7$n(io@V@3n69Rq4Gqt}gVfBzmI7fD8>YsmQWcs%l z$um$)Z$lp4dKRjf9$u#0vJ$le<4fzSz-VZvQ-+y3eH-;8=`Fj0{zv61@_XqKS}Ses z>&j7%iwxFdC6O5c!XppTQ`&r|%V@39mSU-xu5lIA5PhsHEI|Ql_qUu>6J`c;X! z{@2u1g;H>W9UMIL$x8Z^&3(}UhIR7Q0fVf!)1v%cka{PdIiP0hQ^2pqFf>N7jUaBo z?7$j)>%KiU^E>C*rOkNM;w!F$lbgPH2s1e2V%XK|KkNndP4^C8z><43r*nkk)QkK+ zDylB!7}wrk`AQ?OySq>3Vt5RzBPF}I2JyQd%p1S$aUgTHS02$c0GB1b>0N7x?;?%`wV9^$(3_%c`sA! zNpVHPI%&13sJz1rwcuhBv4v;fY_Z(k>sJX-k|>OJard;x{?g@GVOSK1}wlP`R=v>A!4;M)ks6Zz!r-xkRm#L=03JdUGYZWBhx` zJn*+)R8AU^_KV1SB~GlOVwEE$>=WV=a!c3;?ukBULE+v;GKapWpJdVzCO>1{Y$mJ6 z+jMy&uX7@psLd1tL5Ak~q1jPT&Bf<{zgjiL&?`c>bM>P5gAjtTug97zx!M4!bgrxf z5-i$6m2Zd-{<_ji!n^xO#)6T5Ff@K*>Fm>Gj>wV1JR};qdn1&B7uGKdpZ@h}64ma9 zv2Lf8a2VJwPp0_uf;HE7zv3+40zUzqE>C@9C#EQ{ga2tHaDN`=UNWPCB&?^me>K@y zHwPHN@v0vv_}2+0Luwi}vh^#!p^2${qaWV(04UB^POEukV;{xa8NS1SY@eYur;AgO z+qXwaZ*j^pz2$DDi@D6;s6&vP)-VmPJy)vB9$x_;BltY7kK*0SCaQR_by)MWJPt>Yb7W+`>uIn-~^p3T;8GUjT8KJCT&#Ae>oTv zG~%}0mHQ@t7eAhu%$#UwgyF>Sm;swj_NwTab03FmxX#3I;xAW99f>Hq5eBVsQ92K5 zMvf$MLQQ!uP=OuJ_@d=t;J8@0L(BJ2T5tG@jx&x>l4BVYQl9(wqCe;M8XY0vHq5VXJ5;=Vl-2zQpfDb?G{ye7 zCCBifbl}P{R?kXu^J)9%MxUO z>g<*iwGJPq4pSJZ!B1bK%4LbwI=4HPvyDo(D>!84!RJ!oKxGR<58<)kk71!&xoYu7 zuhV0Q%NUEHeV{?%OZvpk!~#XS*VmshO&Tjs#bpU2cE{k#5l8vvz$2;^*oKwByw_q<&Oxc34w09IQK|N}7;SXs9-;8o(P&@mQYUVtIm7TphTzDQGT9!@+ z3FrM)TeeQD@7rvb!x(|T$`yT5zjS2%A8fw6`^)Qbg!TVbBG3N+O5|lkKLB&%I(bCA znGWhR7(Cn|?^3c`x(;dmw~Wbdg;evOPbaDAvwu?Hpl;=S?Wz4r9w2N@)fRMA34kJU zHs{<~&2R!V&EM0h2k+O zC~0ADSr-_+Q$qkuyBacO_aOSw&WO3_!o12QINayPcogQ`xbV)_AN9vlbG2p2O8>6s zi@jdWa*{rpe{3M)yy2QjKicfGz23saCJ>2Y1;By$xOc15!ky2WT_&%xM5MB+RyK^c za64CQbJ8)sP*f|;oG;X~xMGI{y_^{;Jv@R2yFz3?W@l}ECr5s29GC2>`ly1mF@9Eo z1VjRFQhc7ANhv{GxVU2+l~VkH_#&p&*G)U z|6ga6AD^@`bvBlI|QvIO%**!y3-R)Rxc zT!~J7rk&aUSALBPptSxM>#Uo?Sp@5^HpGr=K+UC<#t`SLI|tlWN!7k9%+o=hNbJ77 z`qVSi=abCNds^fbpZ*89Z{7u^%o8q!>({JldKgleB+3v>N79T-r=6%5i_X`y6b)-5|1}Z@lzI-}WJuL8Ijl=V!}gL^BUA&w(oQFX?IWEpFMh4M5OCU#JBQkxgkI1@0_H|HnX zZ$BGcWc#6$9p3jf?ejV*I(})^5?~d+mR*mn*cf2-5N_RNzi;sDGWp`~#ca3qNaL_F z1!3yqU^9v#xp}5`_Rs0=f1{lR?4b5*zUOTw$wR7rTkjRxv{y!=@m&n}49dPZk*u67 zLiiUZ>K{m<;DR=5RFd}R45Kzu)aLNK!{t%Gg)ge9^W5M|3a@7GuUn~5s1c~^Vwivl zWA^?{YR>IFv%>5(e1=Vg??9ic^-`?1JK~d9oKqk5yBBpJZZ*`le+&=)Kxr%bA~gIvDyXd@D!A3v8Z7^^#(P6CcV*bw7)3* z`d4S-fSMOjR`-ucm(x{Ag4=ZfWeRuYr)xk3G2fPkt#M_}kOMgm(x`E8yTmy3M4^!c zA(`w9EqKr>5!%*UBR?mD+NkujD0Z~O_yg0H1fBmCw_w6pl|YJH#)Qr zKP32Ng0u}^4L;0N6u4Oie7l}5etNVp@~18rO}~OUqYBNPW1l=VT}DA)Az*8f3iucZ zsVa$M_Um9E^#=m6ZIIOAWMn$W2FpG(;Pmg8U@g~ar@s%hCy*yK8IX8r66F0=ycfwb z-b{MatQP$0_eDb5(Xfl;;qD<0mP6*p6yDPEHebBJot_>*9O0KdGWp_MU~Btg*cxVo zDH5du+i^Vneb@0ne(}KGFRCWBcP#eb(Fs(;iZqx2M^fhG&$1O~FS7DZjuU|`TF9MP zf3Isog*pVj)4UIEGq7sDb@=Rn+M$_BtJoEWKM5E+uI7D~zB_I_4ZEFkuQ)6Sr^sP{ z3)?^yCsk|Zn`EZj50N%HhAUME@Bq_7F;RUIHn+V@_FNxv4h@vfuPH!^kYp(n#7qhz9-xp) zd-LXsl$_ot0=lG9uTT!Ip6qow*>h&)Bc3{mw$+9+3gKE@5NF^d1<1jt%-y<3 zIs*x|to;w6ekxu~t3mqNJRH}}WbxCdh*}y%%}~xzI~b`zSVI-1c#N1Kx}^ckZBQ+l zo*S3)BrW-~w1zuo+_aO5Zm$?}B6#_qb}_bg@FwtjwsX8SP5p%1Tvtwaf-%^GHaBKq zxh&7-aewFi3tZJWMavP1cEL*L7hBpCsMJ98n=zJj2RLnwEuB)@)4NGhr1p5ImqaJC zg&uGRHAUkFh|yQCh1mndcVooIZ?!8{@OQ;rpYKUGBEs11=i%kZj}iF-jRxV7*S_Q? zzL=^+?D8$+0}M2ByQX<*EnSqmnjD1BV)bF=vjiYk0MlEIp31@fAfqnw7{AEAPi=(d z&&>4)_vpDpnZ^0!splF&;YPi#;^rfj4V$7~k}?4f3s-yOwtOzIvXtKCE1hGAHr&Ut zG@r}}^<1zQ8NM-x(-c_|n z73pXKs^qelQl!82yqugrP?D>ev4B)P`=O}pqBG?wgd=bMV*8I*KsbPaAKn=#qnEZa z&UgdeMu4_M{P+r0NWQrcOpM;^Rw;I=pC858lFG5CHKYUv+&?0sorKV#U3z4^}X|0OK(psMwM zDKY3f6#eN(@g<&BzA~dIxV!bQjtml3u1Fn&y-bPuR!zsC=!ro8iT9sD6K@m zMXCDC^^=J~{5k#>DEUg_WsF7!$YkI@|K}5iW3{qKkG>sx1)tZcr~176J-WkfupzY? zWp@9#YHwiv%v+<9bHK1W9w)8}h^G0yS(O}4C9pH%tmbOj6)w#?5=D0^@0G%XBBo7| zZ~|eaSS9!^h{xKkves9`?Ypu-BElTq``FDDtfXnQrqfse_qg9Qw|}UpmgZ#~IO%E2 zM+vXN#i~=jE~qI9H^y&%!;<*tcJO|FtNjlD%M4s5aJ9MldQRs<6p$62ZE{dPErZw-uiQZ3Nj zS9j_b!=$G!#9OfFhB>6qnqL|-DR}JkToZ&K6{cbEeOs}ajVQMYk0+0ZeL(jM*TF=< zko|0i4~MfAm`lx=%Oq#Tqxog!6PTxLjQgX0cl0mPm^lLSo84oYx=uU^T4aK+0g=Kh zi;>Zilu(#);floaaCGpcD^qwGQ^&RTt`10I5-pYO*^th0WHPKav;6TJtlp0m_|Ysf zo%;Y?c5i-W$G6ObsseddD$jb(X?%C*aSe)=rm$D3^g3qcZb~!PK!8~C5&-os2v>)K zAAu)0t$?uoMXCp{qWhv=vnSFS>#JY5{ z&0P4XYzhOjkEe{iJ2!#2RQ&YnXv9w~q6mwW&BPh-<)=A}hBlf2mK78^5v@7QO4qMiA;g_wCIAfyfkX;iQ;qLlO+R$+&-R?_-=Wgoq zi4rf|c0cKHjdr79)OLX5rX$(Ll!VQ^PC1Q8M0k`7Vx?`Cemjc_*{?UNdWGaQWjmzs z00P|T$fSc#&)82d+wr^O>p2e(Nor;u|7Zk|05FVw7m-EK%DH!mnZgtp4{Kcdo|x$0 z7j?fk1=vueBYY);O$T7^_rQJJ{1A>n>K175p~@zOA|@y~zsIA+K^5Q5Ib; z;-AWiiI7j8Yb@g_vY&1t`d2}hnQ?wHk>whM5s3W~#K$VuWv$X>nu4~Ax{OGoN`mKB z)19a&;_vQu@S}yz;Mv^+?gRo;+4EcdBBmuzO-WQy=r%i6QWHhGT<5^2%CuF0_P^XQ z*&5^omzGPM@dG*(G zRBEY)Ho~rg4=$R;NN4j}X5%*2hUVfYrVg<7g(28ux z&8zI4sg1;dMMd_&By5{JZ*d@4LG7`l@1zMZb2)axMnY)_7P9w7Kr2;n&O9_f9pG8e zzf_64auTAMNLF?_-moEZt}t0TePY~w8GMR9pkTwh2jwtwGEfH(U4J)23TN5R>zMZ^ zSikAlx4j^vvT^+V6Y2Kg4JxG*w~zEq8H@1D+gR~?dcM;6no{c#vp(hcjvzQ!ggbN% z7iIXczH!`VZ96SDK~uB1HN|Hc6WVnu!Y+r#WQqd%X?iU5Jc z#E`?QH&n6Qx}c(yC66D76S7Cq;(xVoX~zy7O8DPksZ4r;oM@bu3XN3S52bx&+S+0%=97n3wsK#!-C1wbiBhGbURYC0so3PX&%l5 zHI48;9smNZ0;Y;if3=@n^Z_CQTX|1()`uILR*%&&4Uwt->gX{Y-8NYhKrsA)8kr8s`v% z|4Uuk7SU8CXjvq1+GDtajdC)v1*eQ4tv@A zUpL*>A;-GH*tymC0GwUk>FkJEPs3HzdUEfNQ2gsa2zU4q>YdJdf))W3A_GV7QAK1&5Dm zgzIBp^$)N?XiOII`MsTr*2F&zE9929dtvcK6xNj%QSeOk%T_5TUJ?tv4wYLCC&32xi?q^kBVqYD>R z0HjJ`ATiTL-W`9CqK=U#O!m3QXwW$zbdwlnUk`AV@Dp>r9TR?XOJ{EEeVaHZPq@b} zf@eXz6$qzR1+!Xy@mPrIc-(LKDt`Yi!OXu$y`cp+plbBX+jM$3)0C1(plsTDR2#q! z>%$wDEuq)W_V1P;^?fc_9YNrB%L_X_1+i)A+<0u=RNG%hs}S#Tt=PM zo&GVT@30Pq6^GdefDa|dhxb{rE6;liVvSAaQtC==2GU(pM4-^h>wlru5)wg>&$3tV z5{meW8oFqREN0NZTG%{JSv(src;aGY&77w^4q%}R73!N2m)Ew6u*deN&qRKj^x>vh*eE8Q< z#=q6icC7jcb5=lm@CAUbUS9=s_H-_v zMK)lPa5O1WIlwiLGl4fT?NF2kJU@Bg%GvXq9~-$|zlVWzcnwKkPrDkw^$wD6zH#7^ zb#%jk4`*U_fUILp70g0+46pRiok9>|fHOTcdPqyo)IYkofIMaU=jA+d83j4RGyw}6 zDccNZAn9dm>{b0(!~hj90lj&y3P-hYcl;A;pt-<8(1k9_>vSBhae`e85vjx`i|TRf z_2+arlf%D|!Isz+jJuK2SMExgvBxh&f7kps9A=02=(sCkO5=IW#j4n~0RbPvr{eih z1np0}k}Yy7rSLivUUIW5$kA`{bLo-2*5NrHBo}Do(&){yMtkgT#_MSvp-WbF$_%T^ z3*}S5M<%bBK{xn3CuP^XXO7P9GDrHk z!M$JGZ8qn47r*1Hls#M7OPC7i5Q=g4_D%|fNTw_tHHRdg#Chit@$KJ(Y0d=Kb5jzL zAN^PFH`=v}L`Rj`?gnL_o$kc>}qD7)E=X z?m1b$XyQW5`|cxM5d6#ohw!}9^f9c^wo;KR&BFrbV8D6kG@5QH<%zNz{1$`J&-t{M zghmYbj;Cm3$GzGKk6cQRr8sjeWhEei`B_*{*YM-P&Kg>!I`>{aLVb*uE^CY%MnLV$ z!-7v8QUEq1V9{%KjQe0dq17*er&}d%9H`S-!z~g7fU}?TGcjxg%ODaXF@63kf}rOttev*svR4!$ zVO{UE(9fW9b>j(XOxU+5v*XEkD0wEfWHLRGwC8n-!5$!o_@0k|Fe#r-$XsuTD$Vq zSbH_{VNZKxL4epwcQ=IyAkM&WS14{1*TT0AIUjgKdLT;leVuHm0SlQbCMT!6zW!sb z@M}|5HIhp!K{8kO!ZoxTepk^$npnjLCeMk+$N_J9Ygg6l|L|{N$A}TomHn_4VJ7yG z(-#ceYDXzUOJdI)5H7L$S4u2P;+nEii%cKI0iTYCAuKo?rZBPa$zVHKPEi;iQ`Th3 z&L;Hk1iHV9-R#=U4d;z>MX)8TmyyUpONJ7Ip$LV!`tvgTv;SV)Wf-PwBkl`Ltxu+= zEX_YreTfhNhV6S`&xLUbe(`tK@AcIZy~l6g{YNPI_OlOzUG(EW(d5a9fx+sPcqE8S zXgX@%;$xoFP%nv|D^@r>QG$-oS`U^mU9WHg5DE#qeHU1BKCjejYtc2cgCS&qZi*4e z`~YK0kLRd=LL%d8m@Gn-UfcXtGs1HBx7ACnMSGhzE!LeN{7I|Mkv3p|tnUzUIY*l-8m!r*NiB>OXFZygzX<*6PJ-<>3SPKsM+)MV?W zUmOYOxU*Gojd9BAi(~d=hP{^}?VBwQ8?3z1@=R z#P45u@yc~1`M#xHKwL=aFnB)g&nen(D@W3*j!I)`v;h8dFq8P#TJ8E$isr1y`)@;^Na>|cC zlsz^`8p1+)eP;8>OACljJf7oZVP~-ti4iM6rq@n(42wOLKQ`GsDg%2!Mr_NifplGn zWLG*to2W~1sER(~j~M`DpIny8(__1Iu#N2%A>Thtk`3lr&GV5*QKJ3(OCAxOkbH?V ztUM=37K1})M75|IlOLqK;NRm=aTKc`^tUJWA=x_pTu{>kdYQ`$>SODSFtbgtUEYj$ zkH;?T0+yB0@_`O0rjsY|tAS8JZSjJiW(<$jT{cW-qu%5sTxh1k;4+rpfdS&?OT~_G z9sQvi-yL54YOO3ja*o}dFJ?(^13cWNV8&y`PEeOh<|KJJao4W4g(}RX85|1_qz8c! zkqXB$Qw|lcddQ4X#}ClAnU5dHBvEh~&=98<4&xt@%=Rktbj?11Br%&!>)-yq&m3~p zP$Hg=n0EMW`-K*sAfE~IGFHt|n+Q;m8caZuC!E$p1+!n%*wnstWB8FwkK?-5VtkK) zps2Irb;Vf7{Av7-VX-gtsM>L@)%Eyx&v8y<6K;rNHjr7<8Vk3MnP74pg#UFsDtVHm z-qkpB;O_1{yaLPr#QeuU-ZR`?!x}l&$Q8tBP#?#@ z`|QTBy8!58go+MWGco8Cu^rP=-^x)(E}@MmNG7QN!^bY65Cnw~S?$c(wjN;Td$dhO zC>SeGaaU-3Fs?#LT6iaQFb{(fsy)tm=<8Fl&(DWkc{bZw;3np1NMN-wG0=a3GJP07bCOe{1V&OYvZ7} zKYNWY4z}KP2`viKigJM|Gg1h83IR%~8mw)jPv=!c&fGDVQ_R(1ceG#{v}S zSwzwOgPz*P_CXvG_;oT@ka=gHh$Cyt{Ssj}2SGA}4}!`M{MNpy?pBrYqmyx66UFAm zEo#1Vwvs3P{4!2U_<^9&dYMQ8)maB_5D8&!Q!Cb^ak@795B;``y@s$n< zVrbeqeaGd3tliZUZWze4O|Tl9nrKP}d4lvm*ABdMH_lwgc`99Zt?*fod_u+(?2jwv zR?s^PK3mof5%Vm<)JvSmMPx`Vg(PO z5)jtx<(Ee_ju6|8!hQU~g8bTS;At%kO<@N0{vQSsHjMV6NcLEP6YsLpq5I}VV#?=y zP&3{3C!gY3DY=JG#}mAx!U)s206;y#;xy2pf_^%}UnL#Y?6HS2@m7_NrcgK#c>~mz z8Tg%1X738c{`X%@A4Q02q-+z$7sjl{U}AOntR^B7XN*iyxjqM96#$hFmYKD8I8%g+`5fw%2P{D3j z*U|uJs@k?Su>4QrPfu1#KxmUaQL)|0Q+{Raq(pya!_n_#qjY_?m=Z+_fI4fBNsIK} zFB<=yz#&v7k($QPFD$V8yn;W)M!4DKc#ylq;S3K0u&?@+o}$cpgtI)WyCjs|mK8gP zAbF;`_{U6lpD^_nD+F?0fNQ%~Y*o{_HzL_80X2#Vl`@XMVa(WIK2sE$ju%8-%C;n5 z8Apk}_(l7JF{vwUJAWt#+1)QMPV*#?yap#9f`{_2`zJhCyz`$-?e05HZE)wc!2uT5 z_6{@G4nqkvt%IZfihlfh$jZ?gh|Y#s^LSYb0K@pdOgfz}#Za2gTa5&qI^P<%Y#suCXluT1N5^4Se;}o&(jeM+ll!~KuW1`BQh=b2I!O+{d zK}}DY9{-Ro+))1l+rdUB_o>^+%&GsgzTuSZ>WiaM4yL}DG^wXcQuYyGCcww3a z?tz45`PGCW_42c-=PfyvXy5Yry9<-)svQBr zLoi-c9{Gh3Ctd2P0vV$xPWJ`)(f`ZhyqM?>;R2T~o46H^^%i*4A9?ESB^g-xcKcQJ zv`Z?ncz_r46_c@N2QUVcsQ1Sk7X1MOwQ=m@YdE_VQT`S~k`60SUZ zBcf)dZ~EFb2FuFY9bg_^6dQ7RKl)wlb%&Biw+z*zfGRaT%~JyUp45c@EtifXw1@0{ z{>j>a%yAhKSut{K#s}vw-Wr#SuEg{Xawy=Kp>wKaKFM_vPZQa6_=QZ@Eiu$O95G#Og!9UKW+(b3jW7Z4a$ zG`iKv5yKP!8z3|d;kN|g#DTN=KR(f}6UhePS1b-TiUwB*mfU>#22+9Ikms#yHIo-Z z<>f{poYQ3BT2nN+gto24I2ugfcEFBKV*snL32jqP5ljncpQ^eNAc*AbV)#+W^->)g zv(aKO{T5 zu8cr9j_NQV z-y+Gt@KR1!z0BEQp6TZ@*T8P+aHPo(XA)CQ?hb50cbs4A16xYEUf?d!T81gunEP{m z^9>3}sk*CeYhIl@jtqVy%;5bHIPXngade(@Q1o8#e`aoZzlR4!=R%FF|5fJx91F

    VJthrBrm&NSOj4bQ35KK1kOf9W6ok>d|BE(1vj zZ2f~*eiSyAkUeRko%}s(&WPd=S9ew_wbSpu@$A!I@kjO|a{!W<72s@4pM2$~K_*TI zi8^K7nJh)~$skQW8zsJWqrUX&OV5A(4-=VwY#tfFt|?&=fNnp)4uKVm#MRYRcD3VN ztOH^UFobD)C?ek>3f$@tORQ08LH?9jHuuBh4eAE4Y?X|4lj)>%CmMINk#1WSN7Qfn zsq*?Nt-=i0Aq_x3Zt{d;I4&!zG#rliDitLU&Zr{67<3h7*qzNi#I zen9+C}5mJk>qH23hTi1+UZ<7|HkCr!@C}M(oN3p<}k}G6Qh*ve^%Hq^}aod0V%l{Ao8&H9+KqQ8AD^|rphE` zQ;Fo#)bz#EYY!beS~_r?O7oy&<^6R+`z0AI#3q;C@0qt16NRggy!^s1k}eokUXMLN`>-@(l`oP!#S%btrH06Vj6zl> zha{$#MFvLH3@d{PTE$h|CHXAZ4X2BAU5vyg&P&>$$|4o~983why9!-lUNXv71AQkc zqIQj(0Xhr>>LsktKN$NRD*fu^jGtlXjj&2xWIJY|3{gL(kil?LExBPHHFB4uNB{Oj zcFE=e$RIL7nLj2#bv|L}RwGx)Q%#JbMmj0{t>l3+`DC#OGO8$P(~mdqFo3emFQ*=b zL-V&-?3>;FM*JJMs~GcCyMqW4TTF#qIHC{QAhiakAjfir92C)MHK@Wyk^eo+tA|AT zf`qnyy{-3HFGs+pNev%6F?{rx6KbIb*1L=-EMUw|ut`NrhRq0!lDp^dKlymHsbS!6N+iy9GG_}cJw9e@xB{nwvoIy9xqK)4a$GpuUipk^^rK}R$Zy7FoBfc!EQS@A)f{Q82 z^#-Ue>MN3gGSX5BKkK$jhnYbV5GV8h^k@>-s}UFn|LGIi_S7@SpM3f_UlHskD<;us!NUo{j7$oKPpT`Cten{-uU>;# zKn3St{Ez?a#-;0V12Gd5%mNgV7LO{dHatF39PMZHiVh%8q|bEDgpjfXP!O$@LS`R2 z@!$Wee|721hpUUrxW;4z2|;4%k<#vC*OlFq`TGvw2867v$FVXJwmSp?SD>L{)y!;j zgu3=|k6$Eo?`S7IxGU_OFV~_{5yc;3sZ+f}21-tja?9v-C^Yf@2WO8Rd)!In%{Q!f zk)@c>^Cxyre)!(2M-EJa3*vI%356n)30J3$7t;xetaQ8^v-8KFcv0(+6bFKasm3O3 zRV)pREv#&e_T_O!Dg4Z50~UJAF~lp5+f-6eM>svRK)I+^u}L%O0C4m_KDOu52j?^S z9O=_cHjgOemqwGtunAIXxCZEKG7nQXuKw1GpEYRLpbVTbDPT))|AE6Feeh0k|D)JP z+5iAR07*naR4!&DoMLB)&7c})s4OJm1_CHkH|CE&`bAAm0b<#TA~vI(hsJhZIN99D z`lWmZqn_n~sCOd800&Y=Wg_+bbmpyhPT&8?(=fOQ)?f*J$2i9)_Fubxsjr_5D^(;7 zq6772O&!Se5bwx4i8^bmmBBuOr*g};J@J<1{&!rHKL7yz7j&FkSPq7QZIeyKfdslf9~Yc!ku|4T$?@r z;oApxj}bLZQb8+InT2Ec>a-&HDfA1S}`}>Qy zg;eAr_KD3fa*n=&k`wbw<-hsszk2R#|420`T9uYwyYRu~^QRtr@DLVKjbd%clV#J2 z#|pwsx37?X5L3{GHx>IX;@+JnFmAM8a! z#F1()LgCVoP?o|sDn%6w2}Al%@v%*T4hA6uT!o>bA-e90=>%mtXp;=U)u6BfFAbhC&T-PkH^@fA*~>AAdxaIhhOb zISvd%RMtK4OB>b7)$3EIfA+oOPko*Qj3Uc!eClW4udmD;KDZlPL!1nsSg}!c?Fi?` zC7zr8_J8_!U;F+4+Q|F4jfQ^=PF z4t-AWgas4rw}1E-iB@^Q^F`_C_Y<@cTbF@psm zLWhuHj!lXOfR-5n?DF!mjb|Cw*t~Pgzh=Fpnvh=t@Ub;nCt((qh!}_#0Vl#zst+Qd z*6VCb6NU%r%Z7;oD81XyZl}-y_aMqd4XPtvyL$1RH-7Telb@k5r5RRIei2$5DwQHa zjWC#-8pt0wedcsNQyMt@2r>%UXzIB(zWpcv=Ke#wbAv^eN360eiB z9CP!lZ-4(UAN%S*=;|z|xpDRUnfKm!;_-)AWM2>Kew6dmscN$_P%M!r8yUz%Bk^lL z{lPOqF+YAt+XoRhT9|t2JAd|>M~~u!2MLN?RLCa@r92{9lUldj2WQT}%6lLE5=1Wh zz;7>}J9*{YI}hQFD?AOEKAy-ksBN+U|ueR8$>)9)0l!{U`tUPh~Q=dNf^z_Wtu<(fYr(jF&|nBM<_!Z z{PC@eQZ{Ztoy*^oUKgTlXR^Gx%_YN;CM84EkYo(<53bHy{)Xl`j5Z$|BMdykM9_wMww?}EPtp} ztv3d8Mbf#2w9m5Zm%sbx&wTY?I@!GVPQj*Tyf%OF?E8lg?xHw}{hQbsgeE-#)*6R2 z^4L)6jkn(`^^cD1KcRKgsJXcQoj?6Q_wOF*FA@^TXq$1R!1>8T2PN~aH(Ww&zy5=7 zKk>ph30^o9PNm|HUVCY6@y5Y}P$ntr2Eq4wwF=>NSovt|qoHE@tyli~`9dK%bf1c& zVdr|~+kf(Z96K^u7$_hsSZ9kZj4tF*Hm#Gv=9`&c{n-z`{q$G9p-8Jx+YLH_d-GLz z)KtuZKCsC-1^~#inn-t4Fi`qB%r8;0|(zQtIFTi+|r&NbT7@!FDK_ zAk(T7!$k$b9BlN3c=ML$R)6}PKY#Xj|FPl`SO_xNqK&H`UHI_iefuY%EXL)w*ubt> zZ`FrVB^FnD#nR59((7-%oz0F6A9_eTCNZN<{kwniuXpbrn%q5thBr(D!dZc;WIJTb zR5GdX)$jh`x?_Z3SyjGt709>B#}aKaiG)*iJRHOdX$lP~|^p=ZDBWcn;K zS=ik9X>r7--6u!PD+bhIJ_S{ScXP>25`RGj6C@=hO_%(Lyh7Ya5V2_i1@bH*=#c4x zYSfgDE@E_Qn?k$Moh2cD1Y28f(QQ151#0?kH`i8P`u2Z(_<>_NBT%tZNp55EQLP|P z)J=^J_n*Ib^};JZKKS(KS)Wh3QAS0(@$O6CZEP$bIdGVDM=V{!BBsO}*+&G4k`C(i zM*0gcefQ6Q>ucXg4h<+bKn7)L`iFo07mq*s5cw?)R$*d7%MBl$j*(0edZ+Z>snZ>Q zeDD1aXd5Lk-L8M{Km6MRyGAF*M2Zvx@T6HjB(U|oy3;0psO zAO?uK>_tbL;3L6|5P&rmR^uk~6LlrB0l5hfkP3=hd2NB!A^eS;6M3_fyA%XbZbc7c zv!+e!X%wqMYiLSL1|osS84`CQWEK|}Iq4!13u{yN34hWR(6Jd;JL;@1Tsrgq{(ZY} zHBqu!TY+qskBTmV=AH4u>}zknMsRTa(8GyTADMpfeN_M9xBl(^N!ITTA`o1BKP*NN z3xj$r?_*Vv&#e8s+c&*`Gde|FQnTzIs@bh+z?qYN8A9m$4vomoMEo|N4*ad*W+) z1V!5kH?Dkm`RqFnKXN}(1cx3$g!otNRqH?9LZNSLs7QUo=UhLx_fc8VNKq(Hs$dga zg4FE?sTv7zV-4Eh7&Br|sANPqj~+!iTDr$%sDgD<^(IBWtl1?d?jKQyy8kN{89&Yi>AVZCL_}8n`^K-5}@5$ua;TV zjLU$2M|iRlh>SZ4RA?z3keE+an(YjFK$HV2XaZjt9+M4Zg|_9sUus;Rked;l^+pY| z5K6#W3t88O4*|D&EST0dXazE7EDD3L+L()s`mSltzpEJm+O17lX}0ysE$23;4Su!Q zH`9TnO?C?&6sliSI3I-qL{m@|%K3GpQ~cFd=*wF|3&*Hdrp3iKsSXwHPrB zLWxn0rSl=L!*vt+d=4CfWpOGxkvqifcJ|^CvB@of-v&y*$|Z!S1+pT=!j5L4GjnKI zMDwMuU1w-`1T#SH4_ISkA|qV+38_nDQuz`u4Zn-^rn z%6s2+cv6>GHADA2EPKs->KoA*G9@8@d0)6Q@4-uwJcE5tCsH#Og}k zi*xMt8kV)zC2lOvEoL%>T}K~fn>Orh;3R>{PiQ&E85$d7ZS{o<=OAPB3ssx!JLIzs zS-6tylv$h>wHhbSoO%9*-*Ga7Z0Cm?3p~|`Bvk&`!=F2Ud3temtr^$c6n5hV%S5R7 z>9BfB8xF^787PY8f%`B7=-?p+h-K0J%QZ8mD?1MD-r8~(6D zSvby0=h?aGM;?7>sHl2!x@E-de4ut@e7l2_yED1`nKNe?Jf;R7pqF5?LG~i9C0i9Q zTCaxfi`V9!{`_w{Mb@clCkM4c3<)z#fnaTd@!(@mzjx-+8Y@$^u74xRI^y<598zE( z`az-^)~-xlXJz*Ao;|P<$_+yVlN%47z26Q`9%Dz(mBlq0MRT=Vb+XYUk2_TGHC9B% zaeaP%{`u#h*OGiQhiyNk#x*_JnpnLA8MnP{^CFcDa8#=?Ki!F|`}d6D0ky+g2B}N> zCT>V5Q7b3&Jweo9VtnMr<&T_@6=XQ0aCP+Nt3TVfcUM4iUxcb?211QLM&PnJHBpj; z`@r#|S1z5G;SRvt-pZv5rMy2dG)fjpiyC>5T@eHj!Vl~Rf)m>s7#*=hX1UpH;@F2(xmJ|2|SP@>C7qf zplmXbbwcEVwXZ4d8LGNQOe-i^UY>4-xSc|YJuz=~DxqQo2A3Ex))nC}GtF*v?B=bv zomH1FoIUg4{U@3P(eXzT3RSf`jZRyYK5l}bMkej$jvP9A>D(FBpVBF{d5PAg^QR9T zJwgp71W8pSbsWepMonCy8Fw1&4O!^hv+rQooQ+p5seq`{Xs=H%E?hZscsFq~iDrX$ zLWVE_1um2itBNG>j~_XF>C|cE_OX^*?UV1kyJy!6YMn6W^FrKERNXQq}`O` zooJJM@{vr34<5U6`J#!kX&*iId0d#fxO04XaG;Mx$-p3q+Z<~*TT&z1X-BbfcYMca zG3~B_dL8T@5xw==D~~<)7=+ETdk^yPsL^gJTSKCNy=O^hB>ac&f8fmf?}0pNFQ?`5(G(_uA@ww7f(Y?EK_ZD<7VE=g~)x8#)0C znzmN@;N}?4MOI_i+<)}Qg>&bn7cq>q$+c0`H-WXy8EmMRx8O`aUCN9Q4GQ2 zJ{o5XzY`vy7$;k;*77-a`Zh~D)sFt>GwTQV%oH4g_f|8+QzS792GKnkQWp_LpaOC} zTL7n8I%5nKY)A<$(*&rnl8vvM3w8Ou&CZ(a_604FokVJChmWX1dJC66Mk%CN$ryQ= zfQX-;!O0ZAS(>LcP7sgiou16JUETgE36or(x+ zu&k`5vvl#?$Y3AAYnJXJYLFi`5RU<&itRJ9CKJmz=^BD zHfx~WtQnyGP*$IvJ=ov7X96>A>DndP^MKTIu^3N0csM{$lPM>o1q+y$l99bc7N%Jv z6jdHQaeU_b<<P{0*YT=&#kaE_{7voH%aFtefI5% zY7MI$u_D+JgS8m+K<0)xAjXPbrIJN{0vj#I=@N$Nl&_Fpxy;u8I;bYFe>b<8A2rG$yLvygerVHpHdlTsU&>|DQkO&DjOc$=-(@{Os{k&)4% z5!M(f@4=$81{MWPP?V$hXCU#01_xQVv~um5dPGVz8Xvy*?gPh;Gic(N%!E-<5&`Rw zQ9zbr%TZu(;{Fro&LG?%A9S>P@#MQl?>ks3l^V4s^Hec7_6>SPdqStpHq-9p&Rv!A zM!d9a95b-cdhg_0_a8e7pK8xSgnEn0P~w$@9Ux82$_V*-_{hO)*RE*KcY%qV7QPd7 zEz-W*v9z?bSvu3bMZh_ z4ZBYn1rkz;PPWibu9;nANXVu!<3vDYV26zf$QNovD$gV^h)yFt`48ln_oAv!HEb#U56D&BZ?bptq+GOvE3iUz@TaeKIPhAp(~mL0 zPtYx_n;7(;yyIEy}&J;V<0WPBRkQh;pW)15OI>N_ctAbzm&|xv#MGu}R=UYH$jKrw~sUS$AuS>%Reb>{{g}dt?%zm;}fq{?K%B zV?*(VsLmhZkU+|d6i6$9X0EKWOefh6jV~q`5KES|MuTb-!xoolQ&HUmMj!hpFle=v z#VL3+CpTGG3eEi4CW&A(qz)xG#P!(si4)SyK3{oV5gA5S)XB_1Q&=aU86m*rkVvQ41%~HOX*pCed~f17 zh;coqR2lM8KBGDAWDr!k*{jf+Zfvl`IM}a|8Ds{nVCVrtd8 zs>p_K6p!fb3S7WyA}PXd4XiR`W3*fm4b1lKv`bJ8=EWtWH;GveO;9u1{4*DS$&Wi{ z^@|#ClbN9%U;4`L&95~sPc5FkG&8fXapwGuk1owzpIQCz{M6;^%f-Q+&;IW3%Un<% z7<`E`^gpg7@XYqz|I`YcJpzGiw*GE?&4Yb7P_W=uO4Eyt#SRn zJxA}`b1bP^ax6TB4EbLGm*|8_(+@xQ!k&Z2FHbF9np(VgZHX~oygGOB>cZvgOZ-hQ z)s8;$WNvh?-WUOz{S+lRq;5=30tLu=B`3RM_j6zR#(X)tFtvDPdiC6uxeM2quFq-u zXD&?7EZ4p4z!P62O3voa*h$cK7vqqs?__`H*{_ce?wPy3R9nro+@?wp! z`SR>KF|nsU^V~>b%m7RAk-HtRF=7$c#V5IZ`Eu8+?Ay4rWK`6&y6K?2}Nvq6<&0?r_lt+~+mHKLxHEKzXIVhv>`qZ_-;eJxJ!~(g6$QMe0 zDp5x61G;vC)sYD|ok=HF*A^7e^t|Pz*_{)^5mgO9T__gbwL1eoKRZHdXG8MV9?_!3 zr8(_bOtPWIK8chkkQ8k}9)O*#fGYJ0@i&FfwPG&CnAt=%mkX+^^BNut7?+n-lc+6R z#N&(~sVj3;Z&Abd!&fAh+8iA$-ne#Iazqlz2xwrn1us3wF<#E6r>AYHqeW})*R3Z* z`}b5k?J5Nb#Daw(P%Bqcei1tiXrpFXkc%tJ6Z`krI+HtF6Ij@PnVD89G*0h)?e@a( zU?VgZM5BNrI|Odv(6SLE$zq}y+K0|%%{WdJ8jO7)sf0`B*xQ9gbHtg}mN4#tH0!OW zrmqYQ4Ji5paFuuBh76k#u)G~+il}PW?(mIVj)3*7L{1`EUs@Pqb%091wERctW;QO7 zWu^$iK%!Wlh!}3WQ(M(Ol%3Z6-1P8Z2_mH&OPZDnDIy3|m74d4LjbJ3p-qrm-dMAd z9X2~>vqHVR(|87E`dY9L>3}Lk2Ro9`VJ@GkRLis|;NV!&`aKUkJ;p|BJ(x2-_(xG6 zg)KRCb0cGJ;8QM4!7|IYDbHdn2<>QTmZ}rt^MWkpIps#9e|SXBl5A4xqnn2r1ponY zCi=W-ZehT(-DkeQS}L|PBEQHm;2lIztyyDZ8(JY)VWQJcwboc$uciiu#RmMT^2pe= zZgOyVY-)a98;HxJWS0!Gq1HBrviL%kY=;q6Y7Nq6s;ZM&V1x{J;RTJ1Q6qxn1n&x4 zL@XX8i-sRA3ntA5O{}c!Bo>!e{Y-(t)6IG5tUFPR53pb%Ci#oao%D9?-NzaNM$5|v zxK(baqKy(zK!eyLb09R(XhtKWquRs^sxrR<2U=qz`>R*4-XwK?zViUf33qjEgqyl&&^?}Vzo)1q^7|K8PV)e2;2 zK$sO@OFr|Jpb=W8#=c)+b8LKE%7&j!|A-XRk%q#}xm*q*$z%F%)Ur-Ewf`Vv?a^s3 z&(G$4R(Fs}!BJril}MaChansg$O12tY>8|!jhNEgsN#AeHt9(AB9=Tt#1st4kNhCFX*xCnWu+!a#kH0EZnlm-jz zG7ISH)Xov>4|t)j78P-Y=`_N6kR=Y|&o9gwp$J=wlGb+cmI$89JvEJ13EM#udETqeYO|$MX=p6c*h4 zO_(&|qkG995x{ohmBqRK{t{Y@FDCW~%2`w{c=DC`vg(Vimar$aGDh_*DBg%5=HTD} zfgV5&SWx8(E91!sL83Ke1r#)~V|0Nk1Ui*?cb8XJNZ>(-P&{bogjq5agGBapLMmry zs#x@wSI}0P$?DP^E5HcKBNKQ8*5k6mR;DIF!7cqoz;a}~^ykO7-u>n$croQ>JK&o054rpIE z%@s;JMk}?dVv$_30v0iV^`{|7)PO;gqDSb2ATu(D`}v9) zhc`vPyg!YbIYqv1te5k-3?rvCF%dhj6Q9ECMeNrJQps+FozfY9ZedpCRGqN0Fog?1 zwo-20iBGithraun);4exf%Z=vX=ngir;{uFAgEzaeXXqD* zYAes#>%#Lbhrzs61yGVVwsY6|Mjayy{>G>KjESNh?T}cSsOx=n?&`$Ee#I1+ zGi~xHtkH6cle_CtytKGlZ<^@2FvlOcSJJ1vS~uo|Rkj~}^wH#=19}oPG8`%r-3af8YyBC&)YwQJbLi1$@T;Vgn8BpM&40m4EnvyabRCRdT&t86BTAK_{5bFt031hT6ptM8>dx(QrPA zzXM^=N>m~dmYe0-h3jXQFP~bUy-=RJxH|RG%FMay>QuY2mZp?1tYQ-X{mh{!$_z0` z65g#}N{K||Mz3-gNPz$B-CUQ=09wqiSjt`bd$+bg`6nKavSehkm=LC!r=dYoJ28BT zta55A)s?xG>B}oO&hxiEcXj3Zx#ii5_3}JFLDykzjj9mIGvg-kxcf#JX(gCxbvhG{DD5ootLwi(|xcWrZNTQF^N-_1)Pj)>a${>O$% z50a^67cGP-yKCuSTO=U_-SJ?B=v%3$GWD|+-0dNpxh!{mK^jan~4f0g`8bhFF4Kl-HNFp|c|5>`a z^U$L^4}=?Q?7)C=3PmzCXpSK#(&P__fN(dPJL@I2;tGvGi>|b8*7cvb|5?1&dZ<@a zW3B*sNm4mKM2zGB17frqFRaqP+Z{rU8Wj;^Le0+3QZ$FbYIqL0OLz7$OL2T5N@d=5 zLL$UDv~EH7EM!6 zL?R0JEX?Hd<$9G}y4Vd}lu02oWC^;kd^*j1vB;PhJ5q?!CZbT<#0H;|S17cq+~fzu z99cZ2jr1_XFf}o!s~g>_jXNOdITk63LaUXsv>U;bZuOM{Ne5Gl1^BJI8lr^BE-T3 zp?VS?2qxAgaULiMpq1=klaW*_^D?wfa39?zvnPfgSA}o^f6l7-GwMtH}!*Pp1lP>&7^LQeG6UNW4fAQwg|5s*E5_OPN;_ul1j zM~4P>ja9E*5BzkJaDj@5u{Eei2w5@5T4iXj$_x11*2sZ93Tkh6fI(;m++DkN?bxwn zeC^#O0V*#am%~Je^nU6nS1d5!L^8l90_X+bq#A3qfaD}Ir1Aq=O zRI)Fxnk@VY-Y!kiheHW%o1Fq!gTUfd+);vGB%ar+xNY)0*f5dJZK|ZEDUlPkq@o1n z7vyS$iLbXMg?ei|`dr%VIkh!)WD zji_9!4++b) zWHvcDdG6df5Xx_GsKqb8^3}+7*U9G(95{CM><4UGpJk&HZ5e?ptTl!FwF^GZLM5!P zHNww6^^7!-fT=@G=wQg)2*t9AExZoy7`FLEej>yc?A~|iwU>X$DhGA~CCotyE>V@b zJVRDIBoI$*oc{jl3(r6QHOwRM1LTaLkW#>t-hcS$OW%DzlMjXlN-8K9ZZvl~hJ?*m z=jx3c{lnuvK^!SAC4692$Vvn7g30?1zyJR04;(wp3%<0Rljs$+NwEMAysZl~R_S*Q!Ww;{FIL0v!Yk z>b$@awMEO4(nu;1xgOW76}6|TnQ0IPC&mQ!Q9HP;xVaJ$9MU_y&>~7_s8HY?m`y?t zXs&FD{oR--idF0Sk-OL@XsbqCPw`>^_8Q^u6=*{^!g7>C0DKAp zQ9W$D!&Yf}sUA*mgRDmeSr<15aUoY5QPuM^loF!F5LoOcNm?ghHIC2r#0qd!t4-d) z!7DOrWqL7^-%2S%WL9`F38o1!>6`;#5}})zm;fw%)f{gHtls$Kv_=mfoV#?oQYrW4 zGAiJLCpq9eNFI&ict8y2ndnAoXvk85#kpV$bT@HDgz@_I>u@Dc6Zf&d-tnoCv+}V> zqSXyZkU+G^4Q2!}P`@sX0Qy)h*+PsI=hv4d?6KlZNLNM9n0}brtv1Vp*hD?aCL_oQ zQ5a++MFdaL02|W>yaj;(ce6ZH^jV{>icC=}_@?NC4zVO)Pi?P3vlvp!BZ7A{BIT%J zRrVDzDOQ$eCdC3^g!haQ48xR-@D<_U4N{?`HNy8ZVZzkc@oY9%b@ zidt2w7?~6wu{>fkXkjYzHX89T(V6~1i7cMIg`sIy?O}Bc4Gl`u?b{CkEX#NC%hD8# z87U{8mB>M^cwPZGaD$~QKIxbRkw_|L2h|aO=QXZuxiAPV(VUm4@_I%#RdCwrfNWVM zDC8)6G(Tbjigb};*h>n~sK&=?dr;0w{WR}xFk7qJ@E@f;E_=kuOm*FIWrI8u)uy-* z^0yK$`!M$P1-xQ`O@w(r9Am?ZQu1;~ctoE1CS?wB*XofWwEJ`7r?AO^&f%qCY^Hf2Pz0=&ND@K_Lc=MR{drfqx>&6Nb zGx5$$G&zxFBs0nZ7Z@{=jeXgOx>hMLL*CklAmu%)Nl@8L2kDb!1FEppPC`;HvHeCCu1Mk@ELY=+!Xp_%wTNUF4)wR-gU zqn|Z=jGR-42#0P}uW8vbJ~d-}6GrV}8NrY(7x>AZ>IQug1+rA($ODi5=*K^J=z&9p ze1_C7@qLQu#jc$YF~p*ltJmhAdFG43&`$W1CKRF&PnhbwxcS&;Klc~^;om*<;89jJ zng}kmpueuz($1&^B2GJ2mcgDMKJu>fl5Gt^o@DFv$g?I-}DFrV}c8IiV?WEJ^S`9 zFP(Ywjq!oOKzA%@>?f92~=fj2@kbjd=CBxXS)7AZYu)C_;4 z4&KojR;giFdhgAjWK-rv&NzX_4czkWlaEJCG$tY;AXmilGGrK2`4Dd` zhn1C6@4f94Cy_@_mIaG~(3Z+2gKTbLW%aX9Je8#`UezqxJOpM$I!ZV4M{@{eBd56f zcYes05LS#7u&tYEaN6Y}PNZN>^xE8K)_%1DV_k4B298*cGt^bDnaxIQ%jj<08HEZg z)tD}AZx~nPk2#kb$E?vi*nxwfxOwy%Rh*gLjOC;5jHVTMa3IJabHAKKRb}vq`PgjY zwtr_|4!6;?SMt@VuA_h== ziE2#^Z0l@jZp&euhArQJnNzHvU-IapG*C1G*Nc#5jxGEN^vROKELLJwu?q+fvx7-O z*dkM=!$IkjmqeW#3~*Dr$l^KCsA3`3WUK>!N_GG#2N)&9gTp{mG>Q?2-KA_;!8u36ks(gVAGzc)EDPnH1=dV!3PPJf_GCJ0=<5=c0eYz{aLR5d8yHvzs zDRIUHwPT4`=CW=e2n#krSx&RiC0*_Qt8+zy)U|ps(8VOeW#kw5L=T8E5LGOW3jNZU z2`H;$^{B^93(QF1!c9dVGbD|~{NLe~S&CW`EMa4IU)fhk4&4c)!3+{pJzza)_sQPw z+B|#vVFe;XBKZve5KJMYLWs%`s#?%Uxh#OpE-(U@+>i=i|6aAVYA4EgTlYn7j(W z!3V-}#pi<6v2nB+IqyEA$P45n332umz9GCz@Dr&*3zW!#GVUZ3kWL3$f~aHwB`mbs zY1V7Z0@#5oEEZZ$XW9ILd4PKumX|g#41qv(NZcxa1|@2dbUQHZcN*`V;OXJa&^r_8 zJ;TqIQ1~kVE3gQ~pdDHJ%79AalLw#wys`^wzAKV+ zYN*^^;;IcY?%1(IzuaB30wNfc03eTGJ;Rj*2ACdBKJejMkWSWEWzUxXAWVIrN+iQr zTEU*17?Wm_!Wd66LE7Mi)mn@VEGdq|P+1VcZU*dcg-5T!h>Lg-PzdpQ$p=uD?1M{q zVKi+hso+uBS_7fxUbPQ3@|Y<%7fBFIAu++WA6tXr8+ZV7j~R5Wl)1mQIFj1@Uusr{`|@H z@2r+9tO-Gw6EB0bsg+-;)=Io2ikjm7p)b7nB_}X@;N02{n6ei**|TTQ$&)9K9zDwR z83VWwkbxtEBC6DB5ecT)jQe>tb8CR&Jmd_diZjWtC{9QjNE0A#s7A;O>T?T3t4T28 zvmhHJm=$lq`NPp8EUPFe0a0*chXII)4pET@kAQRHdfm&S2l`5`0$r@g{W3w=lj&0UYU=)~O3EGx02@AIspVG5h55x0>_Z5}^R0I%248(gB zl#|hdTiFE+5D@jC3}~&*D~t$#@a9QzCy}<__C|4`XbJG>x_6Czf`3f_LIX3D*x8>&wU;6S_6-ZLRMdDowe)9l9OoMS6 zk=DIxbr2>3j|}fW`Z>or`@#FvTj2(WVL6jcHySK&1T+AZKx^{BN1n;=qGpG9X23(R z6%A>6cyz_l7rye%AAavY^QK>3a|EljWel6SGZa-vb_R!cANuT*Mza7_1+TiGp)NA2 zjURmk?mm0@{16M|9SZglf!YBRQf2u#39^k!{m8?gbB1cdhY*aa{t;hF9ku#*P9A#hMJ*69raTa5BsMP0Fpyd| zZrs?rcQ5B#y4-pydx-FwjC_jdGxFaMt!PhP`w7)Rw7{MCfgvHKFvZrP--LbbYiy925yr@1MhOWa)B+?lwtDW<=bU|p_xXIY*4q2@Ak;z<_+6@ZuT!g5 zRaRDJR#sM4ty=84!};oQxk7Pb~$pp*$+faXz^jt$Q~`z(7oiJVVQIAS8bl&=ZRZGGE^ zKJvAH`iG-SOL#SuUtVu#`4<_=a?SY5`A2zs;^@3azSRU9PBrz}}UqA&0yMvsEV^0WFG zyBIeRG6TtZ#hF%Qj6h3J<2vAIzDA?f63B_0(}=f^I&`58{)<}iuuX@DCn2A`eWu?4gdD$qvjN;*ea zR3e-7`AD)m8CheV!->pkEOrYK;R_ZD_ub>;Q%z2996rTxOSK!9@a6L`uAzW(n&aTwB1 z&nt@Z&_DbKC37ksE>Gd1!%LveMK=OBy=+JfuWp7-h!6qo&skA(3oOZlJf+!F$U+!4 z;T)?eY!K!P-M#_f@jh+}rf0K!!n;8%>Co?N0t+Yj4H^PZUu1~}Fq3ml z&Xp3gIC!Zc__*ZSGM@qA(1S8Qosg6TM7EI-D&^iakBRReev%&XgC88;clloXZ_(e{ zE5yd&{KRXnyXRMa>!HtmVR;#uTR*eJ=|)=?mmx3j3(u^cUYeL&c;^Q`ILm%;`aYt- z(rg^-uo%n>dC0lv zHRh`xTUh$VbDutv z8s)~2b>4LWH}QMk^B&B2`(W%b=(coS1L`ksR#mLSS6Lr1Obam)b|Vgz(cF40om7k04EMIdtp$ zr{|t{{E;6$_YB%cyDrL~j*U{?x@G%Kcig?}#yfZom+r+e#^F-@=;wXHk$yz9MhAEN_8^BP@Jzms}R18-zydfX*o_VbgK z6PzYCzWCXHzU78}Sn(WXyTmai?8b4un5pOrs($CTtq*B@dR{XwsowKSzex!y7uw=fBeSCPSpAMGi@XW* z>Cb=S_L~kd3*Rz7v$AxS;TU;leU5>VJ(oMS?c?Jv_r3f6?U!GLJgfEt3Jn=;=xrq0 zDqfdBzksSg85CL6hfGP+Kg$3FH&m@Y8r6>0hzbXtk!wLEe-+_*P{%_^6i1E|B^_>Q zV|2LoLtN*T7^}7~d@hbEI$S}P77cAT1`J5nkN)uyiy?TuvIdCKp{&vuv>gY_C}2Rs zQ^&3?9@)^aq^7m4fyyvstc7^pQ`#Rm8lVFRF~GA$#(|pUZE>wqq}VY8nW#?;SQ(*G z-M#uNKhF&=_O{USREV zR6b(~7D@%DEiQTZCJ5w#O%|6z5h>rM<_2#O-m$m^BEk4%!7 zEr@)not~O0WjhHDD?nxx<5me7Db}JfQ_L<580bcoPm)5s+F0$ z7;PpJ$}%x#HsDAq-_T1^3b8hkJ1&?mZXO^HKH{pYa&{R;{+49d@J4sK{`%`5eDFa` zTL*V%t&DS^Q@K8=PV)Qk4jvj14=XLmtuu45U&0Oj$?M%G`pb925oJOTZ z!J;lSwIa~4>_}1wc&6^S;|>famiJ{pfwJfWwT047ps!DZ`TW*nk(k)0{E_0ZnCZUEW3oy7~dtsOv-pX-#PhGY%?Z^Nib;S%g7F;xVCvV~2sN zLjp*u(wPB*?IbgcwrSS`Jw1g6$zbk;5P~C{O{5$;li3TKfPBeO+i!fDhF} zQ%OjTT!0l7Wk%ONclN#SeedAGgA{Gk1KV?<72j?%Z zSxhW!**3jn*VqE?IqBSFxP#Pl2ff4>2)V_xdG*y-_myU~kr^820>gdU2Gd;6#M@@O zY#qlDL2#xexU1-Ye2PiCF{yE?;GHT0FH~sLXsk)k+Ab@vwmUQ+^*gERx%q_Qdyr3YBWECu%u1k6(WMox3kPaQx`e#nY$22iN(Q9V#+Z%3jVL-?eXQ zE1v{(L!!%L)i6j`?Lx-3&Hn0d{jVpFA31&MI1OR@lA{T5IA53pSQfU;@4G5e&4{*X zOviaSIm43Z%yqZF=gMnuJ9+HL(jrI3!xGma^a72~>{-}(%kImkxAO&IS{F7s*tAHs z2iC%Mszi z%JFlc23kXwI-&j}QST{xQZdalY)r#wdt*)|HgA&a(x zL?p&OqO**Wb^)jb12NF(47{l1wnf!e)=2DvSd^OctW+WKm^Lml#@Q3LYBbq86^|0m zA2sU6)ON1a%ZeyiVjmKRToEhURl+6uEk@VMsS&i4z*iG~>6d34kOzpflR%C;{caIj!aehv5{VWh8t`JI5 zIf@=}Vjn0*hGV^kmzs~AU!vy=OHhGrz>o!)B&R&zwF^K^jI2K%2l?)Yk1g#yRwWP1$}j)+r?cK>2Gt5goO-lMWp^ z^ys6H-uaF@P2_*N3*sXJf<;XIi)9t#3V~%DcNGmqg3QxohYoY%M|L4z->fMRu?WJr z%#!gqo?pqL41f#`w$DfvPH)dB1!z2pfE85+8$bacBH~rV#DZ4zV81>J;n_sSK4Hu| zD2@oo&}as}Wn{$^7q1K-$`3V4I9`ET_PcQiPj*l;>~X4rMlBWEqal@Va!Ls3;JTN? zwjGfcWy43~2pK~}vQj&J-23sVQzuVy*wMZB-pdKUh%0POhN$O?A<|{p)5vRY`K8~z zX7S~teCvh9Nz5FT6?G5z9?`Z1UbJUiaHD$Mk8~m}7saN7Bq(0XAmG_NaNt1y6kUp& zK(5eojm;HWy}rtsm1r0Yd`Q{Uy?v>gL}8ak;1$QqmePY5n&tEkw~DUuy;I17(P8(8 z7jT6GPgn*J>vfpuU})cT+wY1?uDl(?Sb!&(RRLFz5g|E_2r)CF8GM(}%%m{;gT{VK zoEJ%X&zO2xjhYY9hSsPM{Tg-+tHL+um~I z$IpQ9cOU!C?j1Y#?YokV&z`Njux01vS7N$nl@z?_iTK%Kw**5jm|qKXv#i2m?W+QM zz7l~>gf40GGoaQ4d|~^6+um{AEz-{($Kab%>QIDPoun;?rgA$0z*MpsqiGA)!u0sA ztM7RK)n+rKvh?M;_hk$8D{uE=38olbF{T+7JAng3=XW2xZ}-7_6e3C8*IodqELiyk z1aSnv)KPnbfx3Nc$M$=Et3&*CYo#RRwGHP>tLEdzQ zH4sk*0Y8-Die}^eL4?g3wYIcU?Gi;&5Ys%H+;PX--hTVrDwjZcY6c`t=@EZWcd&Py zaV&sxh&#&Ao@EEG?+{qj6$N*37ABK3^Q#Z$S9n3NE9jLt2S z!OC($I3&V54nLtM5~pg1)N{Jr#Y+Ax(p77QHq6o4|Ml~qzUlgFd7VHDgs=O$YY1BS zLMwyv3?tFR#*K%r`Od>%d*}Nu8=vI0eOrhoq}y~YD45;dPxlwD6(1oZ7A7W*d>qwj zjUUCYjyZNNJ0Brj?77+D)*9~fhIdz#q{{8WBMhzSZz4P*g-sz9;mTw4^rB+Yrl6A| z71UVW$pV#$SD8k!kIq%wqfnxH$k0?y{jah;u+yHv(D21-QBe_6V`7(fk!qJ7P`nN`7Y>CG&zeA#wP9XxEvlux;$JXm zd>X^W);kO6_-!$QFo!iJvpF}TAefX5F;gzZf+@NlAVsD&l65)i5wLp9g)1^Cxd~dz zRLZ7wZm^GX9ZhGv6VH@7?^4x`w4p-*3RDwqq0jrAM*0HbLO~_HPaciiqb^)~X~5Ek zz`!bE4-0Q+-+J4P6k#@m%|x&EeGR)y7jz|I!ifhnHn;e)eZV@LqG3v!p1khO=%RGT zSl*8Q8h9YfL;XSW+Rmyt>3l~*oagXM!!zg6QR`5>iEA_pL!U(Y1lC7>b_TWrMa+Uj z)m6F#<20mjw&1(aC1hmN|KXAq)(?h0W~xRkS%IUkp9(aozGJAm*kMy%ZNCo0jGi_pgpSO|QAB*(P{f4T_iFijU2ErEiX`FeP>r5%#!(kPJ>tyV4sx2P*+WfIs5J$!}3(}l4yp?Ua6ro=~hd+NI7s(ie>?!U}FGM zssM#25Qq5)yaq9ez*E?g%%I^sB<%xpcEZRaJQPqKnNBs-<|=|-4KU&owF<4WkfYLn zk>jqPefGD0>$iC85Qj@SVmg#J{}q7=juD^cyoSAsb+Oorz)1;4w!uU>IH&ESfXdLk z`R1GHgSd$?aBF_T;tMWCwyH=XZ0JZKP9?!P_k$xeD#W)AssM+Y+*_SpK#{B3kp?0( zm?CDz4^m8PJd`^K4AcgBfpKf$u&nYxb_wolA(YjHA^DI|TL*B*D{$miqFM$x6H=Rv9^S?-?pJMReFYoxT*Z0 zi3KLN&+OT@mt8Sk4jvaFm_|ZXs3*|4Hq25XlWzDwHMVtj``pg$SA!}Ih)@y1?{5|S z2Lh<*dlX6q+8QSDbaHCT%Qz7C93*b&&8Dcp5n}h71eDcYgZn`M|coOu=%$hTJT^sq-4zJy$bAnj0`_%+@^Od7~Ol6TMTwCanUDUddQ0XL4^yivAu5cIOnf7ljvyi+nMsL4unXE^MglQG@d4K_ zwsMCG%~i~8Y~?IE(lAKkVrTY@sOnK2$S)s&V%4xGBQVbi>9oH4Gq%9HTw8ZTI|RBT zyWQHL4=_Qjx_G4|Aq0Yhm<_n781fDsy1_;Q#~vp6vXYUqpzqj`E&{Nd!@c=I9?hTD%=2Z37ZOnH;LeaPD^Y z24z}BXg0MZ##X3HB!OhK~Gnu1Or_zeW=x z1WJu+Fjlr8CM3mK%&@KvMh(@(R3JdgmWUG=-rCepSh{|9Sb4x@nHmrD(EN1IMPh#>kYD-~V$yRITy@)K7Z!Fd04*XT6?n4&27nScu+$L& z6F>)~+E~{b+Nkkg{^ef|95}!&mBLokf{lqr(H$$D;V0fux<`*jgXDAD9+MPvv3iFk zWCTx1fjC~tn5$_}E$z#&sIkXw;6%|O{&{N06cd9Lfr%;DVDt(wfkE}q+pxfjf-jnn zuWDiEm?sPg%tAnfH3AhD4^&Zv7e=~DF}Sb(M0HxRU^a|n=q&-M&E>S-}=_K ze)*SwnFwZBk+Dch9_j#z_dD?;lAEi_ajg;M!pZmW{$EU|u*lFPA5(vX;3ci+zY=(t~VmVAZH%3L&h}aZE&wc!Ja9`qu za$~#!9ra|w>e)pseLhd8VZegAX0`&U>6kvp9YzRWj3)z!XKvZ;M2F(oQQCgFm#AQrpJO@~ZJ{>2Df%|G<_W+GlTH1n+NBn6j1l)=9zYpy}e;;54(uNMH zHIn0^(Eu)FHszs`^Xu7^qq`A8CJz3kOb6^-$fb3w)QD0U7T}TH(NN9pAjOE`rlXNh zX)k{|`VAu^u|T?%%d2eg5M&rfzUHWAo+d;0$Rhsmx8=v`HDgYkGhM-uEaM+S7H!@YZaE}qjU;`2 z)a8#25&SAXM8#ZbEaOT0(Pwjl0eM@BO{nct*0PzmG@*M0^7P)op)C2VO)(turJ)xL zGp%Ass~?25fYVEKdE%{npJ4<}Yi;)jRaj9L&~T#=$IxU4<>?b={k#P)n59cI3Dr1s zIEAhlh;9wQa3yV!4>5%p?u)SShBvyX>P@1Qi zTEskTZKs5@HFnambp^u(vatiWn4qo0_q*7+BFwloB?46ghufs+1;|s=JIjFM`%SdQ z%3n@rtjeTQHzr;^1&B+>6{@BE6}yDM=)z~pIXd8NZ+jcrNWrC{%^{ecR2B+dpp&h@ z%H+^pw{G!DI~$kTQtujdK4i&s6buoWg!ZRyyi%dF0=S7+kv&29H+hL$RDujw&>&9@ zx?;}o0&VSK?0)e3V;*7R)Q2`oCGn9^+y-eQZbk^5VWSyn4=m_@ZqG|xhI{rOo)J2Z zz29wDo%jdoqQCIr6d%%p_#wOLB(j^N@r9NKrSQu{a5Ja<-w~xCaOD>W_>Y>*6m_aO z_`eih*2sBnnU`C6*PiJwh(Z%)e3{-bZn~SCjjaL2f#*x#{N^|By6Y~8MlT@et1wfl z-$t1`bxsF>z&3-Myc8UzQOKQt#Ta57J#gRv)jj<1!?oj*Y%bDNL5V9<5E{2_!Z}H# zD058&f|DttE@6FpTG34}02Fx=hfqZ~uJ#&#DHVbb^9RWyBOMc;$TYv?$M0b0vR*Pd z@g|N4kytRTy7&SyqI^YSBse*bU?f?@4B+oZUb`NE1EIl$K`dur!lG#17h;b{Dab`Z zW_QZATRCvms%}63`OmY%5$8MHF#aQZ0dH7`Rd0U0bD2n&Lc!2^Z4HFVOHE)7kM6WSbMnNlHh1{$xx5 z2YF|4d%sw-ufv|$@NqcHAta&%@|7oZY2w{4{R&cHG!i(Gkgay87ryGLk}(b%PFo4k zbyLGgvQ$9v$r^3w%2-kGMr;S#Q)ADPq8R+tL^<#fCPz?c@@NlOPODDLEXd)uNheveRS(3dW*{ zA`Yw~6Cxtj>{ybATKrYHP1guQlPrL1{oS zqztqW95T{#o9csuog!-8IiwzGo0*G2uwH2957HfcAkzln%XFDYoW6s!usxBFs(H!* zZNmsd&b^gq?oN$O)@1;(Qe2emK8n>)&z>pb$B!TX_P4+N*0;X3bXp^CDhDG+v8MAZ z7e6HqL^KlL^`cX=EpCcU?dfqj^hrL07+A^XYE@d^lN2zV1TaU}n_pO9U;kIW@|Ei0 z$eJC@!OxF3S{k(zp3*d6YOve;90=$r8U1OsJ)LyXv%+rd4$+`IEDpr#rMkhJnVA580WBR7bv*V+k$X}RC zMJl&wRN0Y7jn`fGOd`lMNuYr#rBq~49yWLca^(Rs`{%6PzyfxD_7y+?azKs0ME3Il z77$3QhNj^`W=Jj=2Ql%s!#;>fb_k0)CDM}=Mo<%i9ux&>ooq_YsK>O4Qx_eVdG*G2+$bgZuENCItBemJSErq0zC3Y(`y1!xT(M8go{6sIYTo{3P_9z^THNOf8D;;u-MlZVix><2llp`RaLtgz}9q7;dz2|WZNv4-i3 zXccN#afv(>Lv<+V$zrD`qBlK1XezFk6tKs}`C>?X_0-VX3Wy<~OgwE4+R`7GAd_%3 zFVS!dZc>Rx3=1__CBtHb*dQTPN1ON-nTjZOpoHRzi zB_L<`AF;fa&K!Fa+C+`j-HMEOCse!_3mv7qza?n>wH0e(*=0H(Sk*;mXYi#n*6sP( zeDMO2P(bz;?zr^`!xu8B|5<3k_|7gat%G3hrNclQ>NiZPSfiu^>5!^j+#;je-gK08 z%0%xq?U%+?6RXvwj+#=EER>>(x>iNi{L)Pw5gIXHO@q&ENA5Sb`GQ%_O?0H$6lk;I z!l2s2%EGn3?<0L;rEtyEE{spHzO8b#34})XFn_XbhEoLY9$ffu4M3aAog+Kvw)Fyl zMjIk5S)Lyw+y$~Hkp~!Rj9&#mZ6S?2uAK=&6E-#7F@Ce3_9ZWb0%=SE zm~YaPQ7-gkuDoN#+m4@l;8ROyINOxh1A|MNP+D8N23;cNdg|!&9MXq|k@c+N^GpUs z=Q#XVs74r1Pvxl*E=fr?tpU9oF-*-lY_{VufotG21yMQjWOxxEhDEF&tI&?!6fje{ z7u7%u21HFXEm*a9avHDDKk5?bmxlfpbG&pS-$(w;XFhY^efP0Njugfl1|FIW$uxt! zwi>%8iHtbcjEJmySjbtLF`Zo&NKmtK^)?}(Km%4%7dTMY``-6HUgE}C3`0rjbu_p2 zHVugsVeSX^JjFvwS=$m)SHl}mt?iXVWhm5I&PMj8=1hdGBLqZ-X_Sh-YQaq%X2Amz zF-yWGhb+lkWFddX4S^MgA-%HH1rw47WQ5RhP}vS~c)uBUmIst%rsu@X&BIK3X?l8` zb3eXgwY>b<&wlorYp&tUO#~cr0h!rCSJ8QYi=nX#?E>}!PVbnmO;oi9oNgnNUoo(W z3@#AvzyE%=lCdkiFt=F2P$$UTy2JI_km7~XuJ90U$V7i*29~Er@oI4rCwAnj zv!gte(4gfjx)%%OdE_a8Mgwat_7GBOa#6x&#)Yl>*q0%|hv)3-vGZs35jm$&#o-jr z&Ch-AbG2F-MhBhBKuMQzq&P}WG?|O0vCXYWda;g-=>j355*1-mvVjqY1+s;4Jj`Z) z;h7;E71O3+NK(!_G3Hj*G!ZxABTGRuuKcR3?II}fHk|&Wxe6%-~V|D`h}9 zPPZ(Zv!AeF#7J;69i>T9SR&A2ck{VJOgxOp{JY-uE{I9(P_n!m#EYJF6*FZEUZ@xw zn`-DWMOUEMO0!7th!|UTBv2go70Vdx?zrP^4?g%H4o8UuRjVM(Qc!`XqQ#xaM&EQ_ zM4p|W2o75@qwrDpmQ0+#v{!||uLW6b2GJ_MP%zJ6a?ivRbjh4-4m!yh>S~|J2*6rV zW+O&q3b_ju1snT#kf#lY#8X>cDqHaB$~s_;W^**bH$=0+1~(=JVO%;Gx`-$QD5X^D zi*LR`!1uoQz3d0aY~U#r=1P0(*J@6W73o6UUWGRbxU`P5QsTdMwFXw27!eIeLrrDY zkS-DO!o-q|+wZuIlhavU!a%HL6iW(5u))oQK1nUr;10F6ly1L_Y@G7 zA=g%4L`gMB6YJ}!Bl=)-YRt^efA6u!arA%S10R5XzOKY89E{X< zv_MZy#%yh>gFMwZQbEVXo9a3gx8sBptu2wEfY^MYrA zPz;ltLZi)36s-6I5z=aUVp^b> z6cdSz_BbC%57WaSAV{9Tw14^Bj%=PlRx4I!c%UHp^EpqeqLeU?W&y7uWju;0uBpk- z0VO7cdB(@_5tG8Y7q@gzQmU(R+l5fhw*+L5g@c2{V@-wiA?`tv<%JCS8TLfooC!3q zO4J=yU^$d=-f~>xA0Rh?{Hb3L0l||Hk~BlIkqV(ESdlVJ%%eO3Xdx}$mA6&XH3S0S zp^k0Sfoy1SYm4(Mu61?v83Iuq!oS8Tl86Ca78h1rsl_-2(}7z}7yDD(wEpxsG#DSo zq%5#8t{m?I$Qt<#8)m$vrFnU1Vhdg3NP1z&LY>d(XattMiC5KK$)Eeb^&uwClaU(k z!=R0 zj?eSz@bRlBHEV&VF_>K&s4mwGq1U0Y=kpQY$Z+1exdza$Ut<x0RTjRlJ~*(BJy-Cvmqr`1#iWuFJ)->NBO5e!duJ&9W`Xv^*2WQgdLK0!59@ z+O?xmJ)?p|I_G%bMr$sb_Pg$i!7Akm&Rl!_G)1j%8<@`#jxT?q)shM=UV;9ZFuT)-U zTuToG&VB*%(dw&D@ofT1Vbt}5h-Zoao#^9#jwq0 z2!d%Nx-P~MbZYfBG6A^S?+mjfjsD)XA_nv(#5diPD&%+s%u3V;;kxUtyYIet{?%Xo z?6Jx>e^EpZNu zbbC>Y88mhq$Iiyj7e|$RaMk?E7A3D{JAihL4mQLPZ+w3HJizv`~dnZ6&gc}uaEUV7=z{_M~0 zx#u1Z4ImxzLL`iMNGOB|OcU!&!>T#fN+k}@+N7$oSf4Oa>xg7vi`7W*nSsAl&Dcgo zTwxQwe*BDo{nvlZ$2STqvn9Z2vU#eO??4md8-4a2I7ZWKom1LSjJN0Y-aza8TBZp$3P$|JhBpg^7JvYuNF|*GGWU7n3U?hbZCO*Z4F)yn)j8xkQG#k&v* zhj~iAzYtKp64r$Kck->Vq|Bt~^qrc%8p#Q)5rZq+tfd!VbwsuXID&o8Kr9<2Jxn%+1WN>L%_&LF}pn zd76N?!B)EW&AsgC6BH_r7{^ehQ!aKp_3aU?|lQfhHS7Q>~T$@sVXq|H}ejWGU?t!tuzR%VCOqQOkMD0H>0avdFq` z)cA`X61gaVJ_CLjM_G9dsi*}x@l&4|3E&!j@iqhTGCSd?0}aGu3m@u=TTYr7vs378 zL>HB6Dy77gW!=C0%fEd0yWfq&9fihNP;)9M3>*A`W(I-K#aDqUg=Zl&<4tukJYHG1 zktkAHZ?SzxQc@%=;E!!i{-Tcw8x#NfFJJ!b=O6sfzwjzSQle>9ox()C1!ake?PyPe)I3i`x8+-}nu-gmc;w8NgOC zXjBPYS-0q+|I_JCNPvJ zB5Rk^OkjLs<$&f(cG(K)BON`nxHCa^wlummUzk5E3;J8fKaH^+T4M zD+L5FlVFhjJO4`oSe@OU8BbE%^At+aBxIDVl0Ip;Du8IVh%Pdt(ZasJdTOsrM$}7Q z=j{dzfR6l1S(!%>u332i^u{qHv^xH03$~L8x@xjZ6Mij!P`<#WxO2_}`o*@187zlv z@XlH}6gx6vf*zgyY@g3;(4$SXu8ymHxbNzMo}k>y|wVsyuBbG^6{y7~6cKegobyp6KOA zPhkRNSbllk;5L>F|IQibV6!=$O3?!U#U3 z5Ofl@U+hcuG$v*z*iBzay8hQugPk~3QN<-^S8pOJD@s(THg|WI_BU8wT9IMyAx)xK zgZXA;VjOiXs)7#o1g@rcjVMvyET%nfv1rY?nZ6-e;UGjpK~s^}Z!R5T)x$`F-f`YI%j-o@uj~?NNE}xb zuls%Z%U}Ne-~auquDZ&~BIdmCK0iH^Zj8xO&{GqakfCn0S@~bTj$k&#UzhF#=^PLp zC_jra%MvJT0w0c<{=+}~Ly+O@3tsdfr9w?m$-^cWWRC_g!{-rDDAv>AoNiErl~BCR zSENp78PGYB#Ll#&>B0I7<+v67aWRNc@TYNE}kGitqX(b9!4mM;g zdtP9Ufz>WHI{TQK;0AVgFbt`A84Wj?x>p9P-~I064?OVk4}bW>9FtmRN-?py*Uy7q z39LKeYX(9`W~YsC3KH{%Kk%U}PYHFcQs-YOUXwi{SSo+?M}PFgAO7$QU--h(nKSX^ zq=fstvf~K!6<`y!5Hi?|b`YM1s(CK;l;Wa!(YfY(o2kYf?2z2?)U<~M2jKb?5ddAB z*3OT78gvZ-i^pKI-&rI1%my)RyT9cd1_uY4Y-~%6kjFm{gz~2#=?mMo?$W^o{Y(MBam||{{DW&m< z5=10V+8CBfs}_hcJ3TkZbiDzuGPwybcU0TZipJE1n2^S|5ll&nv$-Y4a;qgt*r9x@8e)Bhfla2hGoWN7^ps!$@dm^(aP^EZ+ z2=J&X>tsF}J4pz3{q3%GTiHji5`bSPa@BDCynQAp(mmB_1f}#=#9wV5Dym;A&@ql% z`(sfSfO=TkGjXfv=#?}w6KcTHpyZxwFr>W()Crh1K=2P`wz|sr^7XHO{h@~*`jbET zlX`ze%JYHQrKJlp%Sr?#O(Kn+dChFxSA(2L*!e5y`yi(a3ti|Qb@v_{0{ehdOuYG) zTYmLde-#zV(NzQ9MM^;C_=|T+Xccr{rmcw(rS(0Xc@Xq^sTS2vV^A(sb!4{)1GAtQ zqS7KF>K_SCqJvIC_cu3DL5%{|H5vl8bm=hJYLcy0ktf9KrxZ{b{EBW2z>Jc}N-YcV z6dD*JLvEE~Sn)OiPH_)7<{|R+%F>XYnZ;iQ}E1zxqs)CaCbO5({Q zNk-8M=vFNc&JVOTk294Au!_fA(x8Q5j3)(s^lIkrCtEw@1BP1+Ir)*=(P=C^VpKzG z7_6))!YM*emhZ0=hNDb!!9nIc-<^~+%gHKlCSD7K5rPw|l}d^~pS7*$`q#-HVr3~;^OHnR9w%Wpj&nM$j= z)6)xnbwjjb$GH~inYu~$_c#SYW}p^sND~qy_^qiX=!(AjFs_Um*-!`;X?Y<*Oql z4^>*bDgJ75PDPoY(;_g@g9s#AKv@S&8V-!Knh2z0Cl?=>X6r5{=7&D?A%-^yLIxr{ zC9-9VPmhk^od)3P2p}sdQH(^Ul^90srJ-3q;76OxyGevhuzeIL;lbCBN0@m#V=Chu zE(3fnY%2N8XFh}58W#*_8kEW&ES-gSlbl#oE&v(``!tAjNZB%LhO;(V`vKW&+9yi_ zg_QNT^lpkCl+uejZ*m}mIq;PN?dKAYcpKZ>m9|Xy>4jv=Fqo@+BK%5q@GH63?_`2O?HzHr%|UGIMX zFYny7hmKGa16YZff{oHK$u^!77?MFmOIM2MZ?|Hq4sSp$=B6_vc3b%1B7s^Em|`CY zz{(GP@PmgBAO85K{`t!NS6_GV#=Uzl_uxpj*+6(_Bc4vB3<6Mhc|nI8G*H@w(cPyx>YklfwwiD21uU3@eEcx>_F6#q`ERs?ACI}k|H2Lxs$3ZVqzed z_6t#pYb{+Y^CrE>7u5n2AX*Y?USXIGeEf2_(#4CTll*W_r4$uUVAZ|w!V8>^$^r>r zp25wLa_yot$9G0&q-#)<01Z;FVq91Ar4Hp(wDiUEha30vfu8-bVm{9pM%e}|CHM~I z^!jo&vw&Ak)d(g(eC<7Fn^K4;G|^)D(b5nCfF!g4V|B9<=$KF!1;o+=B(^7zzfcdl ztwjEWdhKDV4#d=%u8f17Kw@YzEXg;mvvaKtMy+K&k7m$z=K#Bwzdfy{*{>lB9kN}sGvO3mz|!j#Iv)aUMbK8=Fm{Y)*3b+R8+Uxi70G;Nra1WZ!$CH zZ>fE1rq?j% zw@}^Rgfxi19(WsclVc>S&qBm{Fg}i#GtuJ6*mVc4z3lSs-~86Y-}(4sobP$d^*7GW z&a-iPc6!#?KHE6iN#-UoWq=M&h+^SyBpeKU4-6MGyCZp3Dk!k5xMc_# zlOYcD463Dw!jT$oNcA|Ov@cw?Ls@dPG2Z!>jXDPEgIM68KcN;{m6tY{hEZGy_0uYV z?O-9IbQ3Y*ryyYfAcC$cNNvea9)2*#U8(4K4(Q-;A?ycU;o@7Vs3xBAh%2~og{LsI z$sk1=<&_hgVh2l_Vg?FaBO#=!^r5#+W`6IO6hb8rX@;T-X3@BfaA^V&lPIe!WFF+GXnu-t$R)rDvG2^^ah_Ss8{Bix4-c6zi|XdYZ8 zDOHO*5i>(iEk5p{D%1p~U@Hi)!`*bB%8NYZftZ9ZRfeIjs$t5G2V_RjNl=)!;?}Q9qJTAbc0iWg&t*E^f>5 z7Wy8|Jwif%FR?*>upI8c|9(CS`tgr{92I)#(19I0c9Ce?p}E5thJy>vlnobj04p*9 z&+)RRHaXA9(Y&+(4hFXU22m|W$wrSWy$o+`0#t_uB3v915+#Z+6oH>SNTpWBktnng z@sMo<#Y0+vCq4_FP*6=FYle%BV(5wMHWQ*&UP1MhZ8ccEf5QXN#cPh-(06Uz6eoeY$vHa_ zd}?*dt|0_YEG;5tZ=$-H+0!Rab&pF@EKzOPR7&ImJqrg$=O<241pZ~wSh|0G(rX8NO!|mTt83my45Luzevn>qK=hX5wRyf~;7C}Sw z7+0u@=bfo5w->9o>{Yp%9Ogri3QdKF4noLNS_8u_3Mu(G-gjn(2?gE%6)H9B&nU2E zk2d6mDmy;onmxm&@%{?-C47X5e0T%5emNOdc@L(1FT8Nds5j2{r65rBJXtG{)k(5r z+v^sJgQI*bd#0;1oT9e$qLx^H+HRU)vZ?nq=20Ru?sf}zdzeZ>_446WnU+e!>oCtN zwKn;Opj%dCi+a6APgJ}I(@^T~D`)hokJ8pAp4?XMAm4fxn`?$;D|g-ZEv#0Um?B7f zLeEO>`n>gsT9u7f`w0B@_$U8)9i`$zm7R-k<&nfgWQ!JE&QZ#Qw-)IW8!bnw>WDOw z6fUNSqhlYEvsl|fB$HBDxww`EFiK_%nX8AgIWn4FNX87Y$Y|AFeJPwlmki{rBBlap zcw+*0Noyu7xBP@z+3GsPoCg`XEqYF#N;>yix)`g7;SDR3J$IOr-0CjKRrZ}$~f?V96V$UR%(9WQn&o|BU^Z0idV7+PG?2WhQp--VBR!D<`L=~QGW z`z>Vk(`Qa0x(!^y5$Z@o3Tum1tdRvJR|!06lt}Lq0VDjh_9_q8*ai^KYEAFDRw(LZ zuhO}qDFLGGhIvwU3i!K)BIe-O82b(|J}?NFs6!17d0?9NmbbixNGZgcqSr7(PQFDV zmxeU}Iif)eq>%&@Lx`4iO-{1M*n)x#&>|^5Bp0Af;HpxXSOzYUK81HkIXnSZBb!o5 zu*=<2G1oIHz6fxy_{+J;l z+fb}TYGbI;^whwI>;j8SRik7VJFilnKgE#@dM3)FNWOW*(>xAwl}cf)$v|N0#V$N% z2e|BI>c;Zfr86(R^z!McV~fXMKK|m%Cyvg|PG57)b?j z1FeKTks7H$C9TOF{eTIbilJRgNjo>dIHaYF-E#L(Df^la9J=PoAN=qe-}>gt^6K`5 z`Mta6=f_WGH&B^uw!I6oRV7v&>{4lyQ7P%{2@JU;1x7$&dy4nY&A1Zf51Noj{b-Tq zNV$`pAKJTkWYspd`b)tiDUrU5ncFO8*2%zAAsa#{Quv8s8Fxvf60zz-3Md$uK!c?L zyQP+oEuJ`elBJQCkDNyGdFP#jS&{IP?Y5GBiGEmWknlr;92c>}cW2|ZibG?c<|T;W$D55j6!sCFX=4lP}rXEN_VtmVSm2--SMXJr+2SJ60&c`_*&f3A(=cz?6v+&54ABm=j zq0m55eP~=+2iy2H?6c22{o~WCv!_oSIrh?vXU?2rxH)j}nmg{eeSU5``N>;4swfFn zONa32+@OjPJCssG%vj{Gn$kZA4m8K76-L?$BQO|h6I|8Bs~T)nWK#F8cfIrFlP9qp zKK^oF$*1|W3`hbI!#m}8K)R&IK0W}Ze9ww%Chx+ok>k-D_v!%9b}j#@-1R{G2QIBjT0F-@i5RS8N89p^;La&Pd|O^ z^x0EqmX5!Cba8oQ>#oZo4TsW$}({@hiPGk|i@Bnl>E-1f3XjQgRb!fkfHRyER$DhjZb0 zj53PJM6QAjRhUSlP8Za zomt#}t$mOWQILlYSiP^vNJ_0=R>y1_wUH(0jI_3Bi3OE`)eqJ|mJvotJVUk7SR)8A zAZxu3ANhDw;89;!*u>MW%{&P8QUIV$Teb4Nl}N8C`>+UUw;jUVR9W2sJ{VGUhp8xt zN$#uqM0?cgIoFYgO?XFI*BTQkur39JGz&2$p}#3^d@LQ)-DX#IGyLRGT?2kq1giQn zC!sHHRn9w0Z0M_9<46~4jhWFYfeT-15UjCebA-U8esCe>sRpcRAZITts14 zH0-s9W~&nio2`4^8?xfQrx`!g@x}&6qHM9EClWCA*(<EOE0=0H<=tleT$qO-JV~}1ND5v@jokUP z5-QSZBw<#1X?|J+QPiQtxboBTF}$Y6TJVzH#22b5VkWc;gpmr!W=fd34Yx$vd*L@I$k|oR z-wsauJ51^Fr2ZmbU5CZ<=v7@xBV!EFnHk}@4U`6O$Mzj}zx~eDx8Ar?Z48GE)YJ+pOYb#iKM zg>!(%bBtPf@`9sNLk@F5*KbTLt*$-z&_gTV`T_XNoOy274Oi*;7Q&+d06+jqL_t)# z((%ln>Qj`lQz-xXN}$g;I_R^1|MyF)Y}p&XdAEk6)6q`v6~Z55O7(2&1v&Oyte!e`;`zgevHlsl z*zd-*JW%2j(*zi`X&3y7W0+2EUtrUiy1BDm5Q1K#b(|wZ$6-~Z4xNOKzoSg=;`}vR zV8KB&GIynyldMMu^zgY5n07)XyxSauI5q2GyGtv4m|J~<4n9! zXTJ2$AAfP``!kb+>9wP4ODEUYFPp|cUndvOeCqH1pOx|XjkT5Cvr8QMSbJ;S z7^s7@BaRg;PfX5WxGXIX{`H|R4K^N{ot|DkbM%($t{@vHZH1CV`B)Nwsp&LYm=Dpc zF0F7(*~;0~!%rMuo8CRQbMEGw4(-_Uj``VnTzn9^s$u+vhSe{i52S$0;l)KSw5u&z zOZQ>d&7CI)wURvIcaczTVrt|$O+a`mJ~J_Y>!Dlt4OTWz96R>%^FKIs^u%CsnfaVq z8B`#@vDQ0ybBwaQx(AC-o;mvEuYCFFSDs)$-?p`5>x)OOxbiZnfnchMun28!px?Qc5{ z*cM@*&yy}kGiAE2NX}uj)*#Zyb>Rle^p4pV&mN^Tb&(ZgUoP5e^8)oqey{{U==rP5Se|nl&L15lHdHl?o zfBfhFu{t)nZTndAHR-|ou+>l|x~)6Q?XI?mZYU#v}PoF%wb@wd9 z#(2mwh%UN}giJp<3ut+2W|AHKjJ+pLE`IWF|HtXcy=#M&%cqY_t)IGT>WaZKC+JSk zPVhDWBLc)i<-(!?T;U}FG|I~InJ<3v^GjcQjP#jP&+fbR5HlrIy5k!vXM9ziqduWJ zB$iPg?X>a2%g=oJ@Bgp0v6;1%#cTJ@f)Bep6&QxMjj=%%1EdE-2|Iw%bCWYC7neT% zPak8u=k(;_>hkI9c5_@W`8>68k{Pu;qhVIYY;KyNpPil?H0sO`pL%j*ZpXHr`|wHa z+O?~Eh|rA!#k=Yh6-W5-!b{bf5R%Ct$!dUs($%fvOsS_bs3w5t_DP=0$SU6T#}t~M zom*QOC?T#FW;{UA>g>kJjU~Rueg)Yy7>08vnqW7M2#Dy~*wVv~e)Efu{urZX?$pcsc3lf< zc2sxI&>mzGMr%)OY%tKfPabg3A9?BHAN}ZHde6jQY4^q=_9qp|i7u$!GJ69%B!=T- zQ94im_?h{|_4#c(dFkQSn{S$1*fKdi4>mxAe&mk_&*d(-pt~hYuu#4&5(_+iL|({{ zbJ+wGTGoZ+O1>8J*{J?RRH&=g+j#jk35I%r;nIg$U-V$ zX(O6UPcck{hwK>y@$tdN;K46HIQaHcD5aV8m#0=vyk*;g$+bydvyve0nAX-pmD6Qu zCrb>=XIG{treApenUDYd-$9efwHJ5H(lb4@ZeogE^}MX2tKII(<`NuF_7?w~Iy3mf z7av-l{oXXXZ7VFufAm?z#5Vp3)#?bhdQeH=MhCJ=l>5lqA4~x-#%F>p_VX1QT=ulOZV0F0Q5;#w5u_!RH8?BBmXZsHO8k3#I_rSB1&Gxq##%NwI*TU-?ms!#*c z(VQ(?ww*YBig_OfDifm%lo%$B4jPr(Dja5ct+5O8acpcEROvOrmIKY1gRya83f;Rl zSYPJFm6^HqwPo4^*W%NtRRApy{ZK5Ufic$sZ&t{oK?Jzphkw|C9f2 z7MIQWF9hwC@^@Ny6v{CF#WAluxsFyj8jlv*lu&vpNtM2dCY96*3~?C6?A6CNL4Nhc zev$J(It8lpb44wdi;63^)vk;qZ8-E5@0p1M*Izp}*fqZR-H8d0yT!rCJex^1o2w|| z)&nLDIt;mW_rcDcyWjD_kBrS-;VZC5zVq1Ezr3=tiq^>cq~=X2Ym1HT8DW^ITNW1X zyW#eoH{883Soz++{nKD&X^LYT=zQSSPZfeXquvaMtZ_Q&%3$B#z3+JM2gfGop8oz1 z7N7YyfhK?ycCc5D7-i(c7S3Ra&AMyXj=S!$tOHHiQsvrqlkKV($Hp_rAA5ANfxEgs|tlw$^8O9V6&ss)KhB5U^BQwG*#M zWnhyRfy9#=kH7HJOSCsrGwB1n^bzY-cbH@}#i>eUVcXWb?!0St&)xcEmS6nx=l_wa z^w?<@FipUqz?lIb8PBul%T_r9y!qyv_uugWlAnF#v&$!+<0R2pURRIP1HpimRuv(E zHN5u+%G6-aytAoUXE~=duxM;W z*E5Uw!6RQ=U3^)f`V-ybB4SQp;J!OGm;z1l;`7GJww*ifxcA<%=`C8>9JS3WpX6bB z$9$!v#qm`OFwOKB+-`cH7`<^p+3QZug=N%EUEv)FGJtPPO^;u;Z}(-FZ8`b;*x|>I z1rw)ZU4%AJMuaJ=zhZoS$M&7K9lCwX)pt(LER3Cf@mmi*Fj!ulnV+1QnnlcMxyKbI zGooS-mNR~L8w=@uzxKPVSs(r3!!P~l3DVs*lvzDeMUC&bL|kJSP5S`=B|i3*#w@7>q;MJKl2pp*!v&;n~CAIr03XB_8Iq!&k8R z#HNwSWyE%c4W~EUb=SgW&VB)mNm=}Axtr!{!cSsN@N8iBsB*q8*S@a-4YG4PYB9eq zRs3v$6wzoG3`NQO^vH||cGFs;;|}oq8W9gXAO~)^c5G$W`ibvujGYAs_U0+sveLh3 zg82T#t7MFgkoVsIp|P1O=n9L^e(UgK{~nyvn;fIs$WuavK(4GTvBpD1_rCMa9f$5* zUs-l>v8bvWGhdi%wrxN=|!0zLL@>5+3T8%q&z#-(|R(se$ZO5)H zTX%1nT-)`?(Qk6Hh+2fRC($zont&5&b!Jk-1MeYp#vROwNq7IJ506b>xwbl3dFhew zedi(OGORLAkMpVSRlHb*H!R88ZEMBs+`>J#+`8+gyH}SNzxSn&;f!8|w(5MRc}-K9 z7uE=Y8ln0Aef#fv&##P4Zh7k4Upsr^snF9*$tKhLV2dzs@{!p4e(6`{_Us*7KJwKs zezwU*%Li~x1)2tYaDe@K45z2|Uvu?M_uV@-!`5%tVk(3LWWE4j7z>d9AyvB~ulrwk z*Tx;iyMeEI>q>`ZIM}`8*0JTwjy(56ttk2CB^^*HhTJqS+k%zzh85);PRO~_qATG4hP(gm+JXBpDtmD(U{awE?K09;d zN8fts>Bkijq}waSVV)fyYmAPDwCFz=AKSfi*KOC`vg^QG)|b!z`zQYoia>KWlwdQ$ z_tJ0hpW>mT#^Ko5ZMVMV(CzPF_VN6aPaOU6cS(`m(m%PvzY-d&C3S5b%i-$VZryR^ z^}MNLpTl+<;a6a9y0}{+`@u;awJ|nj+I(|9cQ!vC-c?RI1q6hH)Wx$`UwjDY7qN~4 zP_Hyyxdw=}nA78$APmf85+MD{9Cig}!26KrnC_xb9jX=B- z(be37JajWB-pAwtT~cC2v>h0v@Q6gpq6}*Zv&vEz(2DYvfjk6jig;gA^(T{$_O9rn2&g)p@f_wzYndupp$KV%wqsT?2 zkhZE*$Su5`Ma{s3LW{f|DzhD-M~!`Zcog*ld}r)OukGP|QY(}y`AqGLV0mLni3dbEQ* zWrKAb)^uBbb2Ib!kJy<@6X<%$LS}x< zkN2r9hchVHNBBjvbIaMR3~y(b^UXK<#N1Y7j4lHxgcJ>A#G!;+KO4;`w#Ig0wy5zz z5apS{VwS|FfXqsFQ2%tp9B;kq-sMzu4y#yM8n}gRVuqEf4R*NNRdJ4DPE%lZVuSs* zS?6Oc1U8+Sl`Qu7&vJvZwONil5`%W22USx=%n-E^mI)qHh*3)R*#z-=gr0+=Z`ztz zJsAAaSG{Utk01H~ov!1@C8suwaG2*fHU!;%YJG(vd}eZd2DN~)(lImR4pAO?t`(|_ zO*E*Gj51!)^pJp|O8>Gz=jOLi>l9y-jNi&NX-|oXCifWVY0jn1&;Wx1Mao0d*|Ck; zEwfu?B4d?2MLcqq^~G3xmH99us&Z}3il~xfmzmIl7eDQdk)J6V5>Ck`Hu(JGDtw%o zb}1mCs0V|JN5HIfZtD@%TkOb4R|GX zKvig4IiO^36JD@P@kN4bK)tF!&dSU<1rc5u{;PucKYga23q=VyShinJm?C(8`?^Gc zsh;21z`e-V0*L$!SxMCd=)fSzu)-+Hl@waaXhj56323LohYx@4YhUA2=eT0&tu;{v zc>zSiUnr2n)Oxa4m9(^mM4@;^5es4*3E%Fyg@Nh!k@r&+V3_``UC zt;IAM2hj8^$2+XA`TiOmzq*>;UE#l0OrrDG`Hm5s}afX~2*StSFbDqR$%W252 zc~bs(-bAK)zOj}Mbn-bZu&@rm40o+sfZ-UkkV068RA@%e2Gva_%$UMPZ3AeaRDHGH zFPS9-B@rgV@PIng85}Aw!eXCNj0TfH26rRl0Ad!IEF==l(1l9x?pihm=H9o`=RYTB zU*xBW6bDBkRCUzVClV)Mk3uC#0EsXjXalW-Ht>s42?rcUOp5jthX?R&1BOZVUgKJY z-F#MM2BuIvK1(2{yfr8UHG9*e?qLJU1H(NVsi+FlVz zy%gsmGm$u7!6OLi+E5@sFm!`5m{aE%nmm0Y0vzN>taG@=t-jHE?@8|%AFgGM1_J0j z>d|QA%pZ=OO22?MS?;KoZRl2Ra?Z@2J#O-!DVYn%*;if!&>~#txD{Xn%}JwgJ(EZO z4&9KBP&4hkR&T#--QOOEa!^fR;Nde^88CIn%j_f{N=FPOc_O5&58N`~l8`Pw>@)eL z={en;@^;~d6xppx?}dEHbe)Hg2CdQspcVDH1pktPz+-EUCccjGsmUpAXfK4-?^bZ-xpIKECEqfJOs6>(F zPkM(h959o?rNrm*3JiV=BMR;2z!;iNWxb3tjy&pB?&wYQ5ELujWq`{Bu;S^w%!Yd| z0$m>5@tab*PHv4y)Ep|r(p9(ytOS~>>v)V>}{;b?@OoNhD8=%e=y=Q}iFk8!^83s<4i%=Gm9>dG>2l(Ub9HmhoK z1gN0!Ta{LERFYNtm+@AjvTy>gEKN6iLqnwvCL#wva~2duSf+v(o*2Qr*Fguh1RxxN zVAnN79`gDv4-_y;p{Zth!$WS#9QjWnZkq%+wj!jycPY<^fGTHJ00%sSYJ*K&m?Hu9 zk*(+8$dlJUin2oL5PEt}jj+@qA-UQDW2#wJUuXxUuQz-6o8cJY?nNG#-#Gk3>+|xE z<3jSYl;itqv%Kp~UobVaEXXDl9OzSq92r2y=-fBWT5Q1UH5}{V5eNQt5Zzd`9 zmBBJL>2ywG3|mNxw^&xDZ?bWU170@9x0Jo%QYAK@JL%Xi05sZ~>+pg#1bmn&g0hJW z{7pO1ONbjS0H>Y`QcoQ5s`LtTl-)znKSd0jWl92lu(up~kbR8^7m40R4-r=bH91u& z6^EryNX7?_=yb^IV0l>=DYa~j&AZY}PFLZ0%Y(U@>De0l5EPI4@D=Fv z8_s#7D0@L39xNqkjhYue{S(%xoG`ef&EiL$wXXM(6Fps|Rjc<^d z@0#9t#yFo<+9nQpZreo5$8+K$FgNX zAbgY~Js(f4eCwKPu9c4q?x7~YN5;F?g^xg^BIe!*27Ekt@L<*Z8c6YrB0qBqoaeOr znPc<|1l}wP&~K>1yPO(bmK?@9$?q$vGuk7Rx%O5 zfQ!$tUQ&*VNAGji-@@DsyGnpmqenLl6AdQI?By7+^JZfi(~=x9?g}!y6@YIp{>R=Y zf)3*4U!1qhF54mQ%rt?lLF%cJnfAE-0Yg_L^^9@K7zgC7@;zrJ2V>(4>?GoV9voEb z6FRlrP05sxy^U;~u_BzK0K}?f3kkL;eu)uF5z>jyNQ&qskjOZvMZS5YBprQy!6IGJ zOZy9`;vfS?0jI`u;PBzN%Od|%Ll zch|RsCd_v-;bd@Su*^oP1wWPR*__T)%-HfPdLHG$ z`Ex5+@N966U7u^?b;N`k+&M@wg^HT7W?-$u8nwt%=I_iOGFQ;}H>+FCd=@)a|? z!RoRy2q%@6*&J$ZVbK9gf+$p%*k2w|){;+)yb-=Nr)|}o*tuD;)OsDf2djdI30783?=hA9Uy)+{^ znC@eQiY51SH<&XRuXm+1mG)Jaub-D&FBR0!TmEyJ#x2@x)NfT8`i8%H{Cmhmid>Yf zM6SepnC%u>fbs4`WTKBc#f>n_u@CF(r_m`0b_5FE<8MWVaX2C@ybrGa!qI>-%tVR& zI+S%M=$4{8Aj_x>&evZdLoNF6Z5K$)v58`@dFg3Xq_LeGr8ZdOxYAiPP}4u9Bh*|F z1z~`uu%5pwY(SoxLC;O$T&5t;Gl+;rh6n~K2+Mkz0|x?w34w&b*%fmKTe2!A5Sxgt z)gcJw@laFOJS?La+2>K7+AC@zmo394Zg5^=P7GKdW&MGi9;L0(B-{%9%&l&flY}>i zgHyFg zJAHwg?7zc5oJ_L1wlv1OGYnoXG60=pG=Ub&#PfDz6yjDLXHz<+-PG|@9A7ig3;;0P zM6B`kuBxu?o|)dA-PqWjUnhG1js8bA=C|pX zjg5|t*@&H~Hd9Wfs$@!rq9jtJM9~u-c;4>ko9DfYiw8phd=DaLart^yF3#4E)7=cc3=C0z@47jMemXkCbcCc3vxFIeTgQN{6Kn%(FE-q~#^j~5xQeY{w zBpiduCi?to>tS>*_z1EGy6Tp$yF+qCk%n4!dAUQhl?h5vOTkDXm?re?kmopRcL?~Q z2l@bSlhCqz*2BLMJvX+IO-@c;zjkBfjZrtU9KOs$=cF@0s|h^L!>qNdM$`x#xW4$} z3lxi}0@1yqAOOhWTXHS|IiLkPyPr(y?OKIKmF4`p$+HQ z8qT^y!P^7uE|w$`Ky>hrnZR zPnRl{oLMITVbK?UY@7WFARF=;9@p2?BO{~h>+*NxPjei8Rv=(w>a)_?<94QZ(WtJP7H3ei$oi~@G8cZF>EoICWScSN}`-5-6WMf2cyUMP*0jr zAw@w3sp0Gb6C{(Z!I&UVPF_%6LxdMbg?hWp zx`$#V)(~m&+TpDNMJg^mO;wPHiabs0yknhVCB?mFydvwov99Y z72-Q+tKNe4{9)8*2)D9DdIGs?01c8ImM)RDItpNOtyY{S&wMH$qA=e;-$aiN0@R0z zBT_v=#R<|p&~cdd;uWTZR|)6=-J2X0lL0_Tk0S+|fd(A`U4eIs53|^g+PJQwp)PcH zf*f;1Aj*ZpL6d5x)W{ng7>FH*3KbS;fZBk{o(DCRXYE0T-lSr4EJZWD`fE}!W!Lvm ziS882#GcC0f5B)pMWjJ|yCAZ)4|Y|L_?s)ET{o+U(zVkzn{BGb@Yodjls|f3 zWbI@+;m`{s*m5-`+wyK74<6St5OL=+ZcNGb!X0JPZDq)AS2d6;<&yZt5m7-=9o*H9RV;kwrx5I$galL7DU&>5b0H;gL%nRieZ}KR z;WU^43*5OA?OjGUe!=6h{$*#3Z69$;K;_^+U}dy`PfC1%R9L2};a`WXGO~PO%Z!?= zODxMOa*wVp;*e16$%JMNc>3J>1y2SJ6a>E_nJS{m+)PU19dCI@o20$c zEWMF601vT)1b9U`Xj(aV#D1~{c}JQNET}4@}7JQAS^*^ywE^Pgs=gaD9O zSub>5Oyjz{+^{Etk~S7aqXtG0$XHg6I2V&7F!V`vNSUrW*Ky>zNL+%`;qhkxkPQ)H zC43Pd=%}myD6|=}oUW4QiO|*gno#4l%ry@!75t~()|UQ(DvWZhR6Za2F;bzOg#B-! z@p|PAd4@DZy5(3tTi@%KuNYggB>+BH5QvLnB@pA>NR8n9G#HB_DGTWfoQa@|)iP$p z;Juhs0*CQRR= zLhY4k>pQ~>Uu4mSk*!HKpX@ErumDTTD8(>i+)QyCIv@o600*#a1oQ+y(X6{H;5{C& zhMkjBFNAH3ePUwb^Upt*#-AC-cX81Sbj0{2V7>-tFqmL@`r(Hkq8fhbrI#>16&rV5 zj|7k_&>*qJz@-5dGBdA;6zdh=&>+P{Bwq?!d+xdCIKrJTee12aC>DJspf(^-Amv*X zmg_)Zp#4@gT3UQPQ&&E}&?ZK@yz_8+(KkPz@jN&?<=U6IIt5|o{EE|FPLh<$D$9=Q%i!!yD?lPj6k zI57-K4)G9?%>5+QqkEGVS!o!h9WLfs@JE=_p&!h>By5^E*_c;jz z(5Lh3nMaVTgnW%r3DD4HTTP0wi{x&BEhUSkK&4oF!cI^{ zvC4;+S#bvUW6gvv1Cguui`7FFqF(i6s09T#0l#>vp2DxuceHK@*JHc7dJ5f3Dj`RG zdg3xNhU_iO!Ui!mDu{i+Ttc;c5nU%-OI%Lrl}qH!kAE7cxX_xwhM-WYXPrX~5JidW zkJSL@Q%7LG8re{YYw?C696LkBLKt<7E0^MMw5rh#_DFiuiPXv7P{Z`Nv$NB+x9vCo zxMR4(-vivnt;NwcVL=x{g%p5%0w2ZqyP-(xBY_A{=#dzpVBrb_ZC1I&sl=TzCDz%h z5(6}5@d6t|1B2nk@d=>=h)ShIVPKyzMX~|2BbWvt5+YPHkk?qmqu>^b!fo6#6+;ab ziF;93oud+JPzcR%hy-cyQ>~Za&{||hvtW#noRrsK-UUR2Zgqf>t6G3!v2`s}a?(G# zqV3E5bWYfMmPLX@BnK2mhD|}Qy;l{kW!;b()sest;I-~huNnZ;%MjmU=KED2tDgoC zpsLT8T!w}bBrv|vE9pw=lF&ZA5ah5RgGsSAeuXfAW$}@>>@1^u)}g0v2cXrVI9XO3 zU%(Q5obu@qvH>+bLk@1J<5AKAHs+vFT{{h$cy34Tl-# zwxX<#IY+#Xvl*%+$jfG*mg-w2Gk7V159|j6`~XKq0td+&694EOQqfAn7yn^Lkv0`^ zsYAJK7jy*(n;5O&@;4~g?OK@w)LP%8LMe}&q$f?2*r9xsFe8w?Czmj&?CQDnOVFgS z1Vy@-W4UUniG`Y>z%E3AVc3Nbhd_v^DQ_pLUM)}$6&2beNaGxCv}XEl>JTY$`M%vf zZ@IBrV|qpaYhSnJ^-d3vr#Xj)|?#PJy+dHbu87o+hzE;}c)4k2K*G6}PVf zfrPSe&)6?@trYZ}0Vv2frd~t>YR%ceu(Jnp^Z+)Q2566}<|Beo z14@$Moe+;kF&f9m1s7mqPNJDR4nt6DilSUH<)bz(J(3FNAd+d^IArmBks|<+6Ik1< zh)|`>_mxXbqA>olJ$uocg>6-7pr~LNp+98)hvT9hH+C(MNE^oX%9lRSO*qyzqyu*Xpf~ zDkxm)64%?Qf}R2fy^?Z}V;i;C8{z)&b@m7lr~=Lg-YsTF z*!uGxG%P^-Xaj1Yx`P(-RHgTSjx`JlVdh{49qx107d#*zqVW`Bh%m0@l}4GyskW9W zJ%uzwU{!m`S=lPVAxJ}vA{&<}j@@oz$b;AF5^~Ri4LfS9xgklof(L6|8r7tK6zh?GWV0@HC?i7Q+D)dMS8g)%%DWn2O-}hN5j-Ob{ac;Fjd9GP zh-~yn`Az~d^csdVr^Iqa=?h`qy1Qo;3&rtkRG?_Xn%&`Alxbm?Ws`mRBkk3p?-a#{ z83w&>MI?aVY#2NoAQ~t+tR#9^*H{Nu+hRoWSS5!LB5ZxERKJcT z3R!|0RE-gNob`K$3pPxUPnub66GXh1gzqq`3}6|UhB-LYt@rCZNudvGeHjUhaGVfH zZW)5)3V1xmZ;aN$TyJyf?6@gaDKVRJUWzz<$8Oc1~Bkj6$~!Uj5$@s{zCJFiG@G2E$(^k|>a zL1V3q%`&ow6$?x#f$%{;*RtQdc@w|DdE*f!7XM-?`#F$h`(&W%NNrb53q-w))B!2673I{mUTsDttA8Y=zbhq-H(E}JmT7?}4n4k(85_{o`MFwQgrW1kwQpHqV*MdmCqm)OaOC<)Xs%qB^X zfJjQ>5n<&9uojM?!bBvM=8H#KhJ6I;D>rZ84U-DYi7|{JQbH%vb2Nc;i%{zn(-NZ@ zASpo>4KifKwxSk{#@WBB?7LqEK;9c*88U62&@@=W#CI>_kZ;V4(K6Ev8}ORSB9co! z`7pte!6{~6oCw53S2)-HKuztiFl9m*1FmvOEEf=LM*>P9N9I@HD##Qr#C$;WUFE21T+ufGS>-3e0!pERe9%7Xwi2NaQIR8jyccl%9wvntO;6q!WTX zFgLOZDHVfy?ODW&E|$lESh+~KsDH~brV5(0hnaOUWIA7X@@5wa#yP^?ZJ3p7UCtk5 zrpG*0_U)Sda6wg#`JQOrezZZtjfqEqk>zn4;hG@XrbnbAHXeH9QE!IRL)A%(Z*!ZB zc*TW8C*z$UUVys* z3RJ}~a4Q0N0edql#2HDT3O}JQW8=W*(08Ow6VDI>kxrZ+C(WZzBOaS}=6IU<@)mT# zU0iqbFam{pVSr`|7rxi2fh0(IZ=XD1NTEgo+<~oU6J*Ws-=Dm?BP+qa#j4|@fF5LN zhB>_QItU5sWO=}rQ3w~AjNtdqEiN4l;}?ewAW|f;=%mgpE5vX#iCzy5h9*uefi;Cu zPq?u_J)JrnmjmsPqR53;C?d%(CpD%hiNq2dtjZ{&@*R6;=?Z-l#Zj+*2;ZRCUeRur zuo#=@J`_L}R8%-Orc%))X(KGs(6ij4qGMT_ zi!9^O1e@R)1#z_M_OHlk01#E#?Bz*)|u*MjNk>-^(Hl+uE7lrAUZjLiEt`EQ^|1nq&HMDB&8cilVU&oZ8}c zSjXDgO0(9EGz764PFYR@krWZaPjmp2aOXlzX=tTDFaCg%RNd2&3V+CqR7&YMF|QLd z3s4NZe4cVOa0xw#-VAFL=o$6b3s(iLxvaC>-AaTxgF#~8FpjpJ`vux3xk|#hyI>>w zI_~VL1MZNwSQ~;CXaYOD1-CW8f~MFQCSQ%}UmOn;aAbS979PAqBv5aXl`uGkS>kdU z#tSz_L?i}SAwVKAsyr%DKXt1(&_^VOO)<8=1fe(419tsDQ`8cI&qE1BB5AUajh%rc zLOL^NOTq&v6h}Wez=kXGWi+xD;jm>y9geHOSUrd=fh=fPbASK_V8kv54S>OuSN7wb!|1m2^miCa!8Uitss2LQE) zAOj;ukl|p#i}axF#k3G66wal+C7}rHlwI~HI@Kg6tWqOQ;%a%-Rwh*w;B)K_1ww~+ z@812xAO3Lh;>DL=ewhR`hC2;K zrx+AtR!m+>piB~T_mVXQ>@BjTRl%dx&Y;X};`;UL=gyrY#&TKhO(nGUkp|FmE)=YI z>Sr!Pm&9n-Gbkp3wU#ukt=bBte6Lu8+m3kT6=3lo!y}hvB(NQi6i#A*Y_<3y>1DL+ zBr`ldeE9G;zxfU6jS^^>M+ROCy#Q}1XGc7OO7!gW^YdP-{NL!6a2$C0AyDs!Pk%#? zTi6Z=&~IS}%qkITVH3tmcBu#%n%%YNb(z6&s09}Np{i0kB39%jF`( z(NEnfomsJjW)|2H{H6JjW)MlA6No~QxDHhj8GsRa6s%TpkEl79vw@`?=Icxb$R-qk zeI1;L$2wYo%e#sjlS-*nLgXqjMk)Z_vU40-5(VbVaa0Cu6h*CwkA-3h?E||Yiv>0l zb?A_KZ+@^jGz$(k{@aIe_b>klu2j@gvry&cR5(8c4#XD=YzPG-B*wm)A7>2$bDc0L zryTT_m_P(6&?_`mQ~yF{<5w=^6A@;K+7|3)lk_i67?%>ckjvAVxnvk74Yj7$QWZ0) zE>&VIFK{S1Mw3OlQW-Nl`f^Max#fxqYftFQL~VRHm!|1Llk{<1h%E7B3XMZSCT%Da zC|p{IJuY&st*x1UQ!Jt(M(>g3K(A;jmzRaGP4+x4Y60)jOZhYh?27;$%_dBM)-pPH z*2GWPct12eg7-%@l`poPXQzKSkw`)hFlQ(e(qh4~fEp341nopW;kY-wM&pU}HLL@n zy$i8UKn&^m5c@PTt4A1ZL(g;>Sx#z`G~`M=7dH1a1A92?qAW26H#dM}ajAnNSGWz1 z5M&oDg!5|qMW2%hR_fK_d{107o~*IOCw6CIsk0d~v=2Kr4R2FpTS7b+KutTg9yX_l}` z`9d<5#Cw506wsiXg@3-|GPm z)#Qhe0AJ$=Ni$rbqm-N>eyBC+a18WPlR*+z<9LzQp(!4gMOCyMHVPePpg?vfh#sV& zmg1PTGCJ>h~ zGuH9A9KB0dS}z~q1%f(-49H7AD2k^KI!!vLal(O)B<0XK**dWS1z^)P>4PwA(82VZ z#F7{p8p@SKERGDxiMilh(g-eaV+`o9eWNAy4wyDQlD9&&>$U2`v=Vi)=ImNll$ugY z#o8;TZAs-ILtB+&(6n=zX6YOX(GD*A;S$Idr5q!p8iww%;0{K>_KN<+{%h8*om4_Q z&m+Ccm3U~JtVtH{cOIm4m}Q=AN3=)X+`gq;;?%)nHV301P%R_#i`}?o05C{@$DWQI2?a``{cK>6oP8k*cYMF!89!(e%CY$}3}IV?6R|=TaP}vMk~QbbEJM4388`1OZ~az>Y=Hk~iLX z1CEELm;_|xvWz>Xw=_&QG%hq0k(LFkWCWO@p&^>YMH74*|L^ZKOX-&U5UT zn8viAx%Euo;QgsoS1w=TwE0+qVNbqFvFku~Omg--Mz>|ovdnR)JKoGBc{9sEKULv? zpd_>LjwJdftZK4M%|lowFoTeB82X_?wUl9AMG?%k-2{bW!L@Uio#t#zMSRDqRp;CX zD=}xuAoXIUoJuB`mor04#+j2K1s9pUV^UelSCA3NVTuq`` zE>sHHe7w8U<*IdebThGMx`G))3XzkRU~0?6Qo7_C-UV^ki`o(MNvW+KcNtVkg^CL6 z!p9|V&3h^}OUB0*hv^n37Nyn&LJ=>sx-1#56w-Jk+Nlt)t>K&XaB_*DZy z3`xFJ$TQ!L1;a7Ut3ega9#cdMtW5Dyfi6b#JZ+;_;;eJ_cIFNTC&$NWOdJOFggs=&y%@L5ux@2SHBHrST z41=`CY=W#|)&S9xX2{HgnSx>0BO~Z|G9k@+Fu;_o0IgSWBpoRf^CV;TMKU~rrxJ8X z`7}juB_aoZwJL*jF}nf0fPyULaK{0u<1`Q>NBlrkC@dqxVCkTNBKt*=;iRUMITfnj zN-+{FcO=5a0!u_iO}0T|tq^xc&D4&Bq3JruR8k|VWw0jGtP&b*Fot#rbami}6z?uL zdZEh`j{=%_XRJq!%ruJ?_KBm#W17eDnAjLH`KuN=;wBbb+t`5Zpmk2BkcOFyLVvrl zD?t&YGudN>oPa^Kg?F!;du4($TdHY#)%574e`*8PU=L_Kq6#!^yS14wwKPVh2l^1p zYZ+k_)Z$gNb2-?Eg`=rx3Z@-{Fl*IBB$7aEWz#?{l*9ohq@6EfnMQyCM7xzCA@vkd zu$GTdBF5N|o-fym=%W~y=>4b{={W4Q7nQ=6(B_~#sG+f9;WLDLRZgJ6*dEI|T&lVe zgmn(s2O7p;Cw{jnt(4Wck3b`c5yH;z-L@bwCXH3*Fc5u7| zOc$lGtT;=>Y^9uo?2}TSsY}vtOQjr+S#j_R!=i`CWxjP`XG!V0vw;e#-hr?&?>z#p zWds0wCtx#Si&4uqIVulsMF+NVsCkFnO$MK&<6+(t?oy-dC<-RhVK)E)b5RgRTTq=)RGySnb}&h2+5T@g&>? z={|BDItd(dbv?}>Pp@)T^bTpRIM+|XmKeI&b4XfUI$DBd8*Rt}251%;z&XFH2mvD| zv51(@7UB_9_Hc2OyTOiR3I)AZBB2Fmbc!$Pay~~t%59{R2$T3YU=cUVxDSRQc6vt@ z9Xvt^c%cw%$>|?54#4CXq0WgMbP>FU;rm&vNuolL<9DPM6n&LaC^BWUUSMIFArESy zP*x}t@lc_d5#IS!)cwRVVAY6+*3f6O&PvA@pJCWx{+lb%9#(U-;YsLV*;Pvi4v&Cq z3(zG$5CJnmo?-I**a`NQ+=qHF3h~z4tPG2+^VJ&65O+>2mQ`YjaITO|q&V0U{VH1~ z^a^DTYGDFAmdbe?NU*el)Q4Rp3Ro%A%fX(`9+?%_@|E&>sFu~zGpoIs9P0!ef-4ap z#>JvOAcSctrB?D#vKr)BA3(@waUe{uDbeqQ5jrnOK|Y~v(&vg-OG)i=RaxYZ1IrR- z!mRoiQ&D&W80KIuWS?BIgzN{P;n7K&ia^538jBPtk=ZOIsROx1D@?)<3`IwtfY4|t z$vKU#u2Llr)w(XPNsxB|I)ffcMn-clexCvQOC9VkgAH?g=e0C&Z1xbqNcT>B{B}PO zFf1VZ9ldD*I{we$qf@6zHf`bWYU_dAWR! zNUTBMdFLI>d-1M|{tlwrTRmq4Z$(ixMR_syMfS)mFR;CZ@H)N@4I{ngPk;Iom14z3 zW45QX7=Qb%7V9{uVu9QDzV|&!fPe5KW{+}qEB_Gj1_2sFUUwHBPn|q{^|LQn6pP2? zz=UI#XpAY24^hSTEL7yaRH!7tz(kAkAM7Q`pzexd_orZ z(jMtaK;a(ftQHCZM4x{8DR}fMg*-Iu9$(KO1a|Gl&)~NlH~C-?@H#&|nW+Lwu^rek z5~aJMY>5p9`}DiX{RW57=l@yiZ6J3!$`g-lM+U?IzDZzNJT8zV9d)}cMan$lpRE)wMPkC2!rvIQ*$G{27@C*Lzs)A z{y@jVln;e3Q+X+2B#=u5tTo44ZFM8t-P5OOTO{1s+qbr!Q8ZIVH;t!WT>zTtHHD&H zu2wcO>AwB}H|4^D4&{6mT_MAa)QWD>g))Icko-Z1jy}<>HK>j71A|gYghQQOJy0*J z77VmNp2VzbFn7X45B#}FB^!S`*D}S9&OYj9Ug8IlA7138J6OF`VN(Ul^rvrA*i7@N ziwhvxohdJ2S|}51>Dg=1u5Rqxm@hL!Mk}Tr)fhS{AR4DT0`D9oUa735GszPp_14w` zc!|p9sQ)RLz>n|FQ>1MzNA>yLbz!$$wsY{CS<8UsYD+6*GBC<>>OKIZ5i*ncR{ z)6U z7+hUl^&Y)esVo}VXm-!Q&{{?Z{Q-1=NcK2&JsOZH6j6~#m9xCMGC1r$7PiKa-pR6b z-l-`y1TZ*Z>LUf9G;t+V%D|3#C^VBUq6!us@6c1>g>%U&^R{5vM}2+Q1FuysG?BImkJmrGbUxSIjNPQ6A25U z{^1eGk}(HCB95tJrRcWgFjPTYVN4)tKqMH;tmXRq2F2B|NZ#e*>!^#D5Wvx3&}#@7 zL||+1FfUpG`U7{Ah_Gk`xxS$hEJ^t&L?gDYup~*BVqrt7LYCDbV>Xna@N@n#r7}fh z=N8Wx%^3-RC)!>x2w&Frphzg?mKpfuB5RcS;=teughL3L+PXSZ`CN`F5KHKK7=ZJ` zM4u4~EK{FU^W{n=Uruy(sjg6{cW`K7W!1@ttOICS(o9J~m*f-rdz{TE;8JXBXC{DmfT>@uxP5|+bNI$_j;Ga`D0(Ju@&g zM8#B?Ix(Kf$&4Hm0PqgmWye@Wo0ft}=SdkUOzU%{^7=+L+1n>Lkfx(^V*@=hi*`~q z(}|poBY1Wze^7wDRI7tX6i51~)VhX8SW=ToG|PCbjytyxh&Qq$k49Nw$A*FJ0^Amt zRwCU!hAQ3(PR>U**x8OCq<7{8YL&aTCuqFc@)xi`%v+*%d9VVZG_n}JKq6$bOY-CB zgF%3S8$`%RpMU;&9J4~Gw1ig-JoHUELaUlX6?Rb|0q)cSEwQ|T zn_%pSk@l&PdaXz7RJb@52mv;l7e9$|ZF`~k{@_{n16 zvR71xju@96e*OO5<@n2d@-PiB405*?;o8!x{2B2vMZUw%nrxSEoAk9**8baa%}ZW35xO6CK-jK_gz z2m-An_h;y{kDCevjAa-okWpYvgpt4Nz^V6cJuy$vgs&3p8Xd2O;|r_lY=L9nl9gZ- zyBRiZYWk!-nqe%jaP@fgtFOjq=9yp19s&E!O6b)1^!-Ne?SOahRotD*I7}S% zQWxqlgp$YS?;hyt8@zt=Mk0ZCW|WCKx;=~Tzbf?>pNvO=Xe{*J2Y;NNnM3_3EIBKn z>FMd)-`rvkCXzkot5|TkW8AUqKoN|H5GMPhhYJg9s3ymprl#6?avUmoxVS2#A|(D$ zxXfY#bA3!j85TA7jMSpB_dmWeK0SvhubEq_Yvk1Fo44ZD>3X?R zkr`b!os)i8f@~Z+?_9R@^{s`zi8D?`Pv1?BPt4r><_8o4!it6KdkCdN5%z1P&Bt40#DLco5;jeGsOXExc<)dwK z?(Rx<_g}wtPii*#z6|NaJRFZ;uoS~}IIB+)l7D^c{^ax-6c*B93my$-wG9P8522im zoTlGwxqz}AM}{wu2qPuTWohibt$|N(gbBfR=PPI^n%ov@9{r%BSQ~eC1&3Aw%KZ}) ztTwEytkEis11J#~jaUxJWw7*=NDu3-1+IN{bBwV-y&L9Ej|q>CO@4dt0h%Crvt^;M zzLIHU5Hzl_;ZGlbe)80GAlfBn5(u1_I=i%%&7?WLl4)iNr52kXWGaUq0%cb)gkCV6 zt6aZ%`;^8Vt<8kH2O`Pt`wJ_fDEvnnDYTNThdAAglr$)Q(KNmH;g!=2i>Or4f`o&o zCMTEIGb`&k+_PgniB82SvSWo(Ae}BolO2q(>)F!vukW0iVlb5^Oh54kyJ)Q%Y&fzg z#vMRp6SjJ)w*8?`Q8gGurQ)l^-z81F#`4w?X00*ds*?+=17r=2`MLYqn za`nrv0DVA$zo*V$kOGL$WK2qQOwOLY@%3F)Rcu5GN94GM(JB;4bl^FPApwu-rRt|w zzUUhqWmmfJ9!(CNp1r@gzP6r?a4aG8!7?)@hZsp}%TSpykKbA@fA!0+CNEx6mRclv zaxBr)`_-+x3{Os_EL9-uaBQ3xZ;Z!LBIBv_qbpy|oqdi)IS|3DZes4t!tx5cQ6WW^ ztRivjfN~fuLM9AWsH0iETUcEF>g#WY#>TYpR(<>ct(6IPc?`q{+jay)iDZWx`c=3V zB`g>Izi~a1>I@`0C6Y;<8iCO*t*&JXI8e!j;^9m&pQG0?A~XJD z433ZZYM6DJYd3F?PRz+sJGm;M$(i$C-M9rVSWu8zNxq1WI5h7Y8*C(EhnSqUzyJP+ zLnp=q3Ct1Dt&^;o+M*4=3QgkpvuE!h;}!xVHs~*5eFr zEPa_p|A9~jQv>1D| zPGQv%v)jdxMIn4CmHyxdKfvJt#O#%}Js0U|7{zk&mXySB=HC7L_tCJ!V3}?Z$pz=$ zMZFFe79(!$1s)lKDU;8kNldFblMp;23*y31+oM~3Q46>O4typ6w0hfz$Gw{m`hdBi z1}`G0$Z>q(5%BiBtiHo@9U8ab5!cw4FJGo8zRu_nL0acp9beBN1j2v&w}1N#Hv72A zPYMA#F#L#8;0W|&cmZJ~6*MoVEOU~jYGI}TcNBxv6g_fcc<}9aev8{+Miv-pp6y$> zi;bnym@@>!2$T07EWP^rPm?34jdTnhHVdezx_Y};78mZ^xff5xnRT=Emd(UyC0X;u zH83U~3rp!_=fHD6{Bxvm&LUCsV)4@_PXGR`e~!jdm{%1`97t7VwubnQI9n`LbA|Hl z2TMQxmwz7ZKdD23A|2?H`}?~;zjB4yniK_$+$Cl*nwPR~ok=BA@`dH}#LU^z3oqi~ zfI1LW@j@^*I{e$;{x+FRC6XyzpXvx26jkik2}ZKT^2$d3!Nb)*`}4mF^e|7*ZqOYs z52OH=F%m%&)#{EZTOLV`=!iigzNieXyH-&v42_)l^rH{4(&A&8Dj@44J!{4-m2fSc zDLhzQn?C!(iFrVTgpEKi1D{A|sw*6Q@4XK?yL+)1XEKdm17kfVcxY%=Gr84uyfRkb z{Hy;J!JWA_hsiK4IeBXA?O*+44EG?+7MMM2PR{Vp=FtE%4K%sm+*`W*(rf+WXLY0u zl0l`?dt!L`{{4Fo9&u_}Bo5vyc+;aOw)jE-E9o32=N(-G&%OEQ91sl>rDv>`M@NU> zed}F_3a9c%5*)K$K*5L*Ec-={L8gK2m8Dm z@|x55?}Qb;nC)*DE?ltH8Xk8_b@({1zgrRZNUtZrG0jA_yvD<~b>BIgK1zgFt+M-! z^Q;MbU*rY$U2r|2SH8_^m>4kna18fu1!`>wIt+zJhlbyM`)yQp;SgtAg-}pPZy3YM zFAEv3tYz;#Tz~zIKTD0yBAT*95n&v`40&+q!L8d1j~*oxc)i3b4Q7R_8!ZA#2UM&r zEU%?{hAzDEbMYTeCxbUdqZ21j{^r-e!jc#MgA9xsP&y{b&!K!Zz-F4;_ZNQhvwsur z8)F~~L~)rN8X4~Y@Q)wnO0`%l#gK~Ssg^p5)gpV^=(Z=SrMEVJ^4@n{Lk`U zs;|JsGafeLu#`NjQzf=OHqedU!_uA>^{*xcq|v$z@o(`I0+`G z6&5dl_Lu)duwx*=nO$(mQfYW>_`?t0M>&O_3+!k)vlK-U1=mBloXr*Q-CsB}|NQWo z=L5kMD{L}Gk0wXDyWW21op=&?1_^`DigGVHcN*zzp8YH7Y~{_L{zaf?NNt0_90moD zZI^WcFo=@TFt}T%*p~J15v(AxyXWSOn`2`Wa*7SsAY6w%a43DN21v0^#K2@K*%c3c z{K;pSPIGEmx=@ODbZu~~MKqR|IdEWUEtkpHe*9N|jSK)|XR^=Y487yT=*T<2{T+zY zQn5P|m3}5`(`p2#4w*vb*1d(xFa97kIadp#U1iC?813wcmdhV~{Bf$ID-um0i_tDA zA)xzUIGxTftx|vOjeqsOY0$wO0WJ}aj-NdB>wo@71RKT>)XDTGs%Pf_neh>w$rc_h zXXv@XUKSN}tOYV`vbVRK&fmECE#q1!*^w`>3W~ohj$ex+8s#godk+?(@zg6n{Yw#8 zC=Niti%~U@0rUcnqj81Z*o}KlRJVxJHs3xxdLEaP>co{op(|Ie;7OB!mxFlM9!LM! zY9PP|1D(CS%L@-~BZntDS!F@Cko|oHS&seGxJR$txxX0i75{uOKr=|E70Vq=lh zqkX^m)qhKM^&sZ6F$-^SREq^~hSl71U}LSYu#$fLCqE4jun#FI(~xkyySH!Y?(MbJ z6%3Cvg(5q};I}ZwSR%DiC}awyM~iI!7(4sMzobL4kqKWt!H%xJc>Iqayq`#Qpb=)7 zAEqsjCgt)*CWjkvr1xw-@RPs#pS7wd5k(%U`^Qdyd;P05%#cw-AiZO)j_AM&1wK-0 zQPkTDi>wy+&c5<`Ac!9cd~RT9qS=c&ySo~&5;pB($U}NJC-1y9I4j4V7d;mNV0?U> zRF6$;X-v{1F^g^J`Z2-VvxeiByF$Rw#A^a*sgU|iTe@q87QFz2ppP1n`uuwc@UHvi zmtXK|aBu+4Bl=>v!G5S>pY1|$LO`}+$%4#`?I!}&Yp=a#L}hoN!L?hxk=(<3_wM!e z^^!m+5$VtF!p3tS(L!NN<$u`ibZ2zUVAJaw|q5@fdcvU2X}AvIy|xh>hNf@ zT8BY(3R}Pkt*@^44UNWohL}(TW7jT+aVMv-%?doGl9)%Z#u^0jj4UiX7&NwjL_=9r zH-+}_IDh6W)Y8bs-{AfG59pHgh3$nL$2{c_I93xsl+Bp#oin`C@iL8VkEZE(0-skXF^(kYA}7fbd(cj*Vo6ElGzDm+&5 zG0;F@l7rciv6-$|?}NqrC=;`p%6g`>k}qa)6O7SssdjQ~X8QTp0-pIp1<@;eCRY%W+2>IatEPABFV}53z(KH zu%|s=SzgDSv}184gGO>AS1ScO#wO2?o_{G2N|gc~%sBC6zz))2GBrIhy^$>Es1g&5b&pR!KQebYP)p+c6CFDfEmY}D#u9yl zV<*NJS98m&={3@4nFHt9^|_9fPc=R?I&H1R%X_R0+jP> zFI>FbbrKzM0#z~}Bx=^UXrczNsgJv|Q+JaA6#dNDQQEdpCfEb#REo9*Id5`ul7Lq! z@!0V)L#7YXNSZE(kKypC$!WIBE-tTZtY_$Z*z)Al? zIRC?#0kt!S6mr_j?D*75y12N?Os%w@2CD4$4y>VxuSVj@-iw#t2=t()Nn+Vpl!s~d zmnJ68%%d$xvzD`p8lbe6E^QQR`9g3V4P7mAYGQ8Ug&zchU6oo~LKIWAP_(meaJYYP zX?ZQ3W~X)~TR?qKTutZJGxboPv#yZmY(-Wv#YvH~D)l$@nk4NT8pj8(%dR4gqn zXIYV#!hEIBtP37Qlgq{(| zSA(hEx#^j74x%kV!0BQDU#4i(&^2YV)kJFW-19$-jZD{SDXc$H;$d0OsDldR)a=>r zXkzivav@*cpo64~`7}-rI5-V=5VhfnxtYta1p+A;2s0#is$4jj?3kIEMniG;0Ua!d zQWDGeY`%t_RKwk9h@5dT7OS{{NSru5-PhB1 z`|dr|?yKp{dak;P7*K$#fTHk;lT%YKzX4mQhS-3D{@raBiNxmS&MdEDRmSR96%?#( zWY)9!Y!MUkWH6k1{_?9KErww-nUn%jcE_Rl^A{?aVlG$2dkO1R*!6Ct%dCWDa>WpX z=Iq6x*-L>MxjX zWM?IuWMht|zq?9s%8VIGCn;&{v3Jc6xH+(q%b^4|O3<$=y9Juc1zfRGZK;OaWN*HCL}*Wq}#M+G}3<0lt~1 z5wf?Qnt1<6H|3O-fAi)|1o%dg$A1q10fWEY0VYll%y+^55pjUZwj~9Lo|>4j*Nl@V zN8ftuty8B?+Ij+yeA0~oYOua~#C(LIe?tbvW2k3~cb|P&a!J`FstR`Gqx_{?R zcXyWqlsG*BE^8+e8UdzP+))#24Wnyp$8w02-L&RvDg;Y>T)YgasKGAi#uO?bTg!i#_cy#U0+ zcaa7)F-qrw0U6L7U(Xf7C;vIHjEe^%>o);fh^LQSW5KuU&zLY z)v{-*)kc@vhc?*`LL z_)yexQlEkyP?@Mju)3A+0YKzzFLd%-Y-D17WK3JPL(=cb^hjDuu!*?}d1XIN0f0~$ zsY$k4iIeAFIH|qAN+4Vqcs2c`-d@308x;cu97FK8f`MM=@XXm^xfdWYhp?dyW>;xk z&hqesE#ko>4NGSgh7MHI7#fi5A3t|_{KADmAnj0`!ozJKoD;QBF4-|K#xqJ()J5IC z89Ps0G*4}|;TbitIj5t8xVLS;Q4s(G{4?d=V4+MiSbfP`x3DY7A= zOVKr`S$HvO-O~|T6yFPIk8;WDVL zFd`S}?T#GGDor|9Iqvd$Xd|LEf58-DkQGjixCr+usw1&A^&C+tp5#h>EC-@#Ga1qP zg})KoWBhM-3fG_I81v;X6&*15E z9j7n8q!Ny^<0Z)+S zcd4nA?YRu~pb`vXYh!I7veB^3S_L2Az{PB~XW(>q*J&Whz+tcx2jFL4CQib%w2V`7 zKm;mlxXTCef$meYFOIS13h-P?V8XD)fp~h-=u=~xIY5+}pL)WvXh)uCc-q3>zL@K( zOCUK+4#GCcMHU7rXp9}UZ0{yI7r8dF6yf?;yU%Q^)BgnM{XVgvGy>01?Ph{N<1 z4OcZViYNff7gV_zrX3yXYXBYbjK6_oQZm4>zJ|Aa6^L-@+zGM9sO4Qq=y3UX8-w9T zI!4buKYI3}LkE%r!#XzGK#Wld3{r^TPZn4@*@GCRvcf+)es1KXNWt1;Dr@IiK+vuu zQSF8DSW*bY@KanKf9VI~LI-0e6vc-?ClWARm+59mVt~@8&_a5Cv=dKhz2j$L5%c0F z^l^xj4*``LU~+C?YDRrf;<_OTxf+ZCM*s)`%cTg~JL= zNDWS22JbwR5gjr%^SILK0*FrhgUG?Mq%tyaN7%wKcmM49{`s@YRmAldtW4e6^>?Hc zmXJ7gki~wb=fXWQT{3lY{0wI*ur?;DpndShoS1Nx6&gGV$lf1|U|z7d0|&{tD#Mm? zczS*q9cwu+ywgH3j?BPIliv_-Q7E(i1RANi%;_iUrXpUEu=}GR(qaV}43q&(9=$&3 zPva8qa5I?i@M4!J3a29L`w$Eg%Pt9zqYnlFbCb0nlpU_#B4OSt>KLbw-7>8X&4ODK8L%} zq}N&=x%dRfBN};qf5}37NRx;sP!#V>wA zBzbAS*K7Q}`7zxSlXP{_uxCj3?OTB1_wSmHW6v)CE+z z;5nx8N4*Z`c@jK2rLe4>fFTzFF2_jW_qu@jZtWta;Ze$B=Z+aXzWeUG*h}r0h4d82 zn7AE}yt0Z6n{0PRR@||Ww-=ZzO5gU$DkFiomqRtD0*dGqBBC>6qHrxzptVzzb|a7#Q5@419JmYE(2!JsKztS@l40DZGj%#;3Y$dUDlft5_}!tuqD%FRs!kn9Q6>=+0K&H%4Qy;|<&;wGa@ zT_*bx06z`7ZG4?euz4eg8*CdfxNF#Y)!CR2B34Od zBevcz2&uWw?o3)jcQw%f(T+yzBM8HRvRFH)%k|u*k7|@uFTP%`{xlMz>~^v4j^w~X zFVk$gA+a9YbSYYP^LVe7Z;F1*{k*K2GeMwp3$rqq=SO?ldD#FEJ9kLW>EvFt|KAwmscc(>4fM|SIy%v&e zy&Ca`G8QIu$v{GC6fY@{FjD#gJErXwZw;jWWM2BF016S9Fp*8gHYM{CSTZn30Ga`b z`0(+>&ZcDB3pt7yzr0b0(=NQxTC)kuqiCI12iWmoj$#oro!#soy(}#Wc=dvM)M{2H zEg=9};iYqP^Gi!BAN}bHXS+d21tjTOF&bD8l+%II>iy5JJX*M4EpsGzg_Bhw9!_e< zbvWm*j-DJIoSq4!o|lJi4)tL#5!R+S1z;7k##%^v^~NVF_ikjC9!bJLPUGm>MDogi z{yi?dy9Y0$_b|Te(`tJR^vlAoLQZ^on z6!THMjdLVq0BvtL&k`DEBs^H)5Vu6IZw^w0t1*$l;b4W$8%$3bQP$cdAeUXahmK)l$|Q(&zXdy+yijzNDY#D1OkKlu3j zZ@h{BE_$-VV_gm;4AAHrJ0WW;N7yOF{^P6<(rV_>empwI`5F}GX6G21KmPQi@4xag z$>b=#R*u#-w2GWwxOe@FM-T6DY!7k-Vp*wDU{696RuV~_7@h2&y&Q;hm|Lt8Wj~7a zurXH9U_Tgb1s-mE@%iGzuU1zUDna;kOe_7d*wz2%kFeIBL3UD|4fTzOYpF`Cr^KP2 z4avm%8JZb!V3yP3eX)V5Z|*J3OpaEH*%&9q7PB1qTmsipO|UFm3PzI;7uJ@lsc`=U zi*QIoxN?-EGh~=bK+Ei`(iQLIuiaR_emlRuo`YbbvIQH<6~FvHZsLNid!%pl#mk5b z;ISO(7lfWA#iV~(ttdFNml)3jIKeV=8vA{k`q{q`jj>untf$9e&unkedSRi(>XTLzyERHB|S+(0L6%7;|^hF$Van_ z#Ul@9ai*d=LX;gAU?{UZ?V%Gem_74z>OPjUm^Zhb&+pN6`Wh2d6dXtnrmHm|?~$b? zuc(C%PB*vezH2--^+z6ez8XE&pS}^diRGR1wl5YK!p-OrQ~^S{9}PUsn%F9RI`Fs+ z#>U$HcT3{unf35CMdY@^+8V*iDDXT4;>cpae{A!taH9|sTP~~wsG6_|oCHV)gFQq` z9@~KZCh%iNdA?)RJhs^Y4G|jjJvL=)6M#xEBX9@K7_dR;F_})MKL#33%?x>1~6x5Eml+jiy?P|LH z#Z^425i>n^W@2h`YG&rl{5kUCH*$P@lHubY|LZTyD@%BSsIVOqE@Uz?`b1gi`>tr> z+zT%~SXjPw?@=WbD`@`&8v>#gj=2s-s#x?#lN?!p_rb!OKly1Ou5%aoOzwOy%WaUm z40MX6hadk5C$<}F>#2@}?S@1QwhQ6p;e!Xi{7?TUMC+R!<1c^t%OZG8#0WR>0dTrWaFN(^FTC(*ap}(8hbaHqpvg&|m0(NYvUihjER`O7{PEQ*SJu~8IYS~5i=f4KNQ`5* zj>LPa=+HRY@Zrqo%{KCW1|3>51}J=kJ?6O z+av8xaqg||jmuv}Bd$e%8D<#!(S7W)1lpo%8pZfIFAN&hm_MeGs?C2jV0cvswsJh6 zI0YIwbqO4X>UgxUi?#w(MjhRT^g7{en6|8+&C~?IiFw z!+YC?fCs3p0S}~G0}a}C4Y7gApU)+BJ)+(n+R)(UB5e|7;||+SnUz^gbGnv3d!3E| zb8^#ew6uoZ3iE?|dw5%}t>|+?z}8&y+sbZR?l{xum0erY(54`ibx4b%kfe3hDkj-RtNt$s4#6#^npvASx1O~e|BnRQb6#Cn9HTt889F4nEhL}o3 zcegrVcmIdCDa;6tBxZ#l6%z|kBZjC}-H}!5zR~rRU3*7!KavOsMs#n;c#`M+KUZq~ ztqZsF=I>(ORLH}iACms?ZY`qQ(usC^yJNh+STs%d@87p%{`eg;9&Mx_LH@97g@-K2 z;Jdy+C>9<^mYG#X*gZ)WrQfzE;Z8d*ZtpwK%#}T1L5E zv%CNuip1Vyy93R56b4g?u_WDX>UBwRVsNHiXeu)Uz?+}X2#`29Q8 zjkPGyso}z}K6EzVKm?;6UYj@~wDVv8+y5oj(R1b6H#crCe06)_e*Mn1TOjfF z)f;y?%;Tqj@voyj101$1kDeiHzA>HAF|+)^J=gu)w?Fyt1JFksFgQSxjK!nXQVBe= z;+aY%I6{4NXz;E7@?Y8ii!*yHqJ>BFzk%>uzk7$J4lWwz7a=@8hKd}?w9)M2zt0i` zj*Y;dC2X~@tuSblimXf1nNg{r5ty2qvW`uH7P`f7FaQ8R07*naR95!{MIn@Izlhw* zam>pY;-~niWvSoYXGnv*!?knl4hbI{1<=f-3%-eWp`#a5XdzRD_uqg2u}yu-5^TY* zn;$ke(4V5+@&cWL9>F|;?m!rvHn*ld&(_^gnW3IOV!`MDA7m4}Mg}|-Hg&_TebOhL zHnOz*?~ZJW&WOB)w=bk^<7QT`A-CmiXeto7kbTD>5#pit9b4FxO-Eh3(Oj#gv1HBf+VBj- zY$FS^P{y63A#y4;d@vJ z#lRrkl8G+e&_+G*_#L9s@W9;}dhY^7_gd=4UGU|X8T>`GTYdm;2oH8{D)k;27*Px{ zu?LSwW1Zj!=>ft*me$X>?Cye1QiF>Vn-vasL9eB0+1)?T07KkW!P5>@x*?jbMCUzF zMoVnjksLny=p&OHh$r%Bn82P5_V*;hO~`6`+_R)Mzcd(vO6FhR``-8bIBj#yDjr=h zl;c$d;s|^CG8An%oj@geu2McTImXejHx-QINDx2j9P)@-ws`y2SC=lHEf(_~F$_hkg-SJ=NajoE{_q;W2_@Ja zqp6Jb_mv*p|8`{d1fClpC9iD?=u{AOup(X>gB(NCeg5S)&wmdcbWQ%^<4Fq9`(he< zWciLS2RUj!%rTw;IlkcQuqtLY5W%FlSh;!q+CWct4BrKKQQ)9#?aL8shLUwWjocO& zQ}N__p|E!Ao1QZl3n9D&;_a#WAOGXuojG#`@3%6P_WKUmC*F*(Lg%K$}UFSi6qWCa`I*rjq=r)tbs1PF+`)`L@IgWMC#hD8&hY_2jiTv zfvTc{fjDZ}yWd>9_}uwoDW8hR@zswM7fr+q#UcO%$e3+Mi=W@@0+~Cpk*sO1W5b zhc}3>3Z<{XV|Q-?H-lyL_;Mjck!X-*bzJ3hKxs9}0zjp<^7YNWxeFx@rHzQ=86G+2 z+K(ygj~*sXM)e+#1J4iyo+2*)3_5L1|LsYN?& zqIu#GXtde+eZnEE1N%;)er4P*s!m=SGF67H`$S4YZGVt zsJbOdpmxSH408;Fl-Xi5&Vily@U;4Yb35Z%B?z4uV!R*a|$e;QO^ z-p|b$K$)NMRn$^G z;O`j<`?0eF&_QsYg5&JbZrlBuhgGr``f1{ z#;D!Q7){x9WOxJ!&<~r*%&GcO*1I?S4R>48)T1_e;P%mKOM3Tklr+HFBy zf=xv zPx=wk<31bhN03#45g!sDn-M9&U-doq(=NB^|*0v9-<$v;K zH=!ObYK+vT4}=*dUwiGf|NMXd{g+oiTV7tof&mn>hXadE=&VpJ%fyuKfk|;9j?NFe zTgvQ=S1bATYCeM!FpL5gZDO^!zOopLhGOvu3gRjr1_GgMsl?gbIMzjbi?y#KT{OYf z_0=UdZgF5|;{y$eb&(iHxA-5(SAvyLio+l)I0=t*2Vxy;q2vXs!a(P6nKR)zWHOkn zhU3*pjL+esd&(a&x3nT#7WO9oJGO9LJL*;5IldBYp zQiPYw_$at=@#4#`zK;1cWK`ye%bf^_l%_;(d_sHwHsMhQv&M%6Lj2Z62Kt4^zyJIH z>-zOCmsgg$QYcO%;1LuDBVmk!an{ZcN5Jt!SBTSFF(8YElJQ8fkg4XfVU+Sf1nop6 zpI%wOuY)YFOYF;xa?mKGO9LpXh(l`O5gI{}Us+#YL?)F2zCOPO@QtlA?uc-xAz$HO z&luL*HI8YB;FG!wm3}Rhtb)PLVR?Y(kZ_I+;3Us*6uMy{!?7TnAfXZtWmZ-Oy1I&a z+zpqoa0idoQjyjQ(OePfsKH$&moMNZu26tri-q#rZ@qi*;>A~AebrVd83A|1&=ITq zcWlFPYP8k8?f45O4!YeY7A9Pv@K>hFChY_?=I;@*RAnwaXjJq=(w}tH1SH z*O)of^ZE!|@~JmWdg)sn@I%bortL+xxUyY}gH1D4V0m+w_eT=#;jS4$kFA}-bMxAU z?>Yr+rf6G;ahg+uQ4`Y<7%92C(X?&#H43&`Jgr$H`iTDefxkzUP92O!*!ykbg%a$y zNA@rmIEUc@&ZLUT}oVBnB()=oe7-wGmdnqODRR zZwyhXx`jmItEX*2!@Cvl(Lmd3+fTu8494yw!(jc>8&=`e%QAx(xt|7qXK9R^dOWoe z@ONT=->i2@*EieUW>oD6(l%jkcU?{8!>A2|N5M9m>fZk&$dYDhQ&ccaQGkas!u|yN zFZ<9b4Ptf&hCq^{{0xooexS;5X5;>TT0()_tH6Nt?%lgy?W8eIyPpEvnFi!=*}a|h zJXzVm3W)_)W&_ic6?U-PW`){>P>QhW1L{A*NQVFW-~9Wpu3b%Utd%Q8w1+4b@!}v~ zKm3VAu?&@-5wqb?#OAE2Xs}w$V;pLfP_5$g4FzGjBOb%OFLGs&!_cBpeyEj6Lq@cX zs1wVjqRe~o4p%EFm#m4=PCBO-((%?WPB24Ji3JH zNW&245r)8H1$ySgYL(AE`wXhMbm=?Bm_mHtOwB;<2gr%gnkPMtbsQs&M{TN%qx)A$(* z>Wy%Jwn2L<9ye(~xYp0@QZm$V_kK2E@E%Fo<_iom*ke(GnwWtN382jtZWeFd5alK+ zPGXOYeyD3p2lUWNz+uO5XARv%jUmerWF^#5=@7d4S>Hhj#9>X$jElNGFdBH<-5RTf ziHgTasj;P1`}&UKZ2q3*$JCR2sWlkTG3lZsw6OhF6n6^eqLGNC7%YP?1C&KRN#hTe z*I3Fn()B~t8?aoUUc4^>>s#K>5x-a3!J2Z&)bM|RhJK2T=Q?TR$S*KR)sRXH>14N$ z@PU0(m*hp(a~b^Wd~CBMA6neVrD3b?X1(rj9k1)|@=|(%%~HHjy%z8JNdt5NR(TDB zj5|lu5vT?FVaQ|LVR@Txu*u$&e#{>E>u_rb51Ye{@_4B90y~xD=Ygd_kSQ*RtN)x$ zo>}JA_bM@jd%vAZYh@hLm$y7=TbW#eWV}c>vre9<3`OKpv=>UM-dj=SzrxG{Y;zYoQl&q=C9( z(P6|@Tt}11O#C@k1u)@M)a&nSZGy%5V?Sf20`( zf{>h8u#yF%maubr5DNW&?7i8SUB_|mdFE4v!bA$31+f5d5J`yuG?d9bMxBr-a^ICrNL-$(!(%14#S(7zS97IW^IEW-b5S&4f0D&6L+`lirz4Pok zb!s|wssQ>{;$TevjjXOW2~Vx0_u<4kCN|Jr0cJuv(5p zURrF=&ddR>$|(~iPMK(9LB?84oR7QmMKcOxzLXPcxb&!Nv@L((0pG3T_=3JHiRvci zZ~nBq*u49w2iU1+n~nD3B1cki?!k|K@I$_}{<+V6j*~G&6>zo>J~Cld*yUZ=I6j6U zTfHqe8yvfVwpTfH;NX4t-S@;3zsAmrcOA8`_G%>^V1sox7>|Xp6j$mEc^K;pa@djaHD`_MS8J7iS-&EYd<1`#t+m?S zT77153G4I0Hb-AnXS##hQm0p`%~tCB^esE!&g{qJqvsI0<%dHhcxRR~uVv1z!^eJy z6c|~>;*BG?zN||J#n#ZFy(Y&tf$Y_{gq%b9D%>#8PT z$eLpS&N83mH zRFs71UWDtx)twH(ri-(>XzD}jB4S_HX+3c~~ zOeKrxrNl`i8!y?OI=~-tb}Nl})X@tn;pdA5a4WSpC<`eJnTeK)^68$j>&|X)ZCy!a z3|G+8qn(U2>_;N16BA)F>buFUT;Uf~WqXWu!?fK>?Fq}8eI6>X34vK0@Dl>vrD{5- zC0|pBpzer;zU~qV=_Tw zbE?0Lmh9S?VEsSwcLuS-*7YM?g)#Cm5#KLvM_gIknyVMP(ULP+L( z%Ybm}-$tFCdcG-!{6zag2LryRMv;=m_*evxie9s*JGMYd90A3xtMGuCGskc~^)2a4 z%v9?NiqOB&E?P^!JUAwLqDWYK-yJ+syNUIwUB(i;^C)s+7}5H3S0BFTo_k0qlQKLP zLj0*Xm>QN$RRsac&w;qkPF%-tp5PX_r zmXnLMb)u2%@n@hB4#mVm^Qo0}j;+SnpaLeCEiou_k{Lm+!I-1*m8sOh94lwNg82|^ zWG<8?5f~breB`W!jTi(Vf*Qj*9NHsZsg&e~lK4L3JJniT`pNgc$AKI7-*-QPKy#fF zFL83M*M#Zlh!VaOu(F|>9SgK#*yH$!ofmquPzp!Nb%UN)UwiF6ci)Zo^h~pf>#%o= zwK}J3bb*Fbu~|iJ+mIb$g7tZ} zSucx780Cjgk47lrwgjqNY0mEJ!a)qmdhk%H*P2+IJDf&Rt>fbiYeY{DFNWT^HZG$u z14Vq~Aps+h144AXgt?3rSgy_Z9P^-}KiD@j%LySr{PuTn@3{Z|`{|L9Az~toJYZr>9W7;e?x6X@rPE~gJVykuzTyqRWJb_y->x0@vF4X0x_1lG6kwjiD zS_N33cp6&dpv%WKC~u~#EVr%&yW__|NMHqA!yuSUi;N1zrySgzsRRvy{_3l*Vz*+! zBknn}0;MUppMi5jpn)z$;LS%;c47R83o%I056l8hwlp!JKU{`QN)ek^YAWRBvsIjm zUlz%yW&n>yg0*6=PC|FeRIZ6TMBpHdd-25=G1QuyreP{Gl@`|lLPMU0JT7vhMq`~S zW3y5%1L5k3=g#m7A=8{n@n9d!LWLa*e8WT-L@yYGY?$o`vtgCZ7x2nLd9#MKhIb$! z7q@OVWqDF;9`gDn0iMyYP;Mxxit7#|IcTU!k>-MC0TO|?J}roNQ;_@EH<4ovo)rHC zt;H*(g{F2Y>Q32>G0}FX8q48&3ez0ygH6^P$aH#MN`F_=yqLS?INF002qcfH7Mr6X z@r!h`Am&B3yrgv|Z;!n$zu*?IbFP4;O0V!?!!MGACdB?vl8H_Isl3*rYa^(Jg~3rz?D zgtu_4NI7IUY+vD|<3$7;aS9ZTA>%QVlD8AlV2c0{{=!PG9KAw>SIZM*CddevQ`U4N zu#C+-rzN0&0-0OznQjwfp+WtugW^ZDs7U^cd0L!BqI)0@*D6pdzlwPd6 z+wmk7nRxtiW~IoW&oW1^F)8l&dY49ZikKQ6gbaSz$D|{Wz6(6=^pd2pDeVOOz zEgZn;C=SVLS-~q5Lu7a#YYY=&)k}UsOFI^BaB^5xPqFxKh z3hn3CgP8K}ARs6#8#3;(l+oS*)1bN^_|&jvpFVx+=+OX%KzYBL?)k*sEVxXUI37cb zuaGMW?4ST?5Qn;UR#llD3+4sj;Ujo*OS{~HnLg&`kYR|cwDyLOd2Vi&uLa^Ot#8Ot zoYRDR_~g=iZZQ_)+)bK@3wh-P%o3&p95A>J@bM;Hp1de69OEF{viPV!A|t1fsFga^ zVFY!tg8)}dgl&wLq-thMB+Vi9X&2XH{=!6Esoj?^uwZiHbY%cMjuZy@uzUAtI(JXz zkPwz(jX;f2^HJ{YWu1e;;|kg(xm0CPzI5uv56s_u&%O7m&sCmAM5lFjG2AAe1wfgw zNJZ#*SI1yicqAjGDK0rEb zQpwW~bOQP4LpJ?&0#kMREvLyLED(K=H+7`iVGBxVOt}>wt5TlRN?9f#9<|a zhNyZt1fgpd0fJKTb|>OSBYI!PHs?bIccP&Ym|6GrvD}z|x@Q(ig9&W`o&=wQR;tT4 z9i%0Or3|XTvAkZ41o4qVC_!iza(aMgv1KiG&|`80lZkH=WFWvn201oI>{goi(R#X0 z+{ET9(pd%)9AqH*TQo*v^AKFpwPJq7xQ~&cEbjb^xVXhUv#@l{bY(-a^)uZdCQX?^ zJTHNaY9}U30=%6FpbRZV*aL8yCr`F`e{JeADxnG`0Z!N49k)(mCaFl(8=o1&C z$SZbBQX%*hcqkqM1==t+7O{-g+roRavKa@_0^b2XjA&mRI>D{y{xpd;+W7FERtS(0 zRRJSo@u-|(p`C~Yk7CBf#7RwPep#Yn9uW&ck5L3o2-qDMNn!^-g0|b{=J1LM1+HOn zL{4hhsRA_Bq?}^#2;mBDfQ5j>PoF-W1SYp`E=8u;`dCm93f*tlG>3$PIvDdCqldIM z^&0^g5!AR|+aw|t48;j-CsHsV-HMe3IB7b=@+h1d*{O)Rx7f;a)W71P6`?C{-G)P+t4!vNlU( zvW#i!>5QFWEgKMr_v^)EN6tgOlVduY<(UM_(>IWyAdyM}lDKb$E$d5(t!U}^@#7!3 z?bf_O{dM!|MAlj{Q_9TlFj4J?#5Nz(t*{YtBO!1d?u{sutuAB`!)PqR=G;^YU#?Rg zR@R#A{>VVdLxU9!illQYnT154IeFDJ9|X?VNN~Gx)t3X~ms6VR8ZEq2KRPA?A3iwQ=qu2v13QkL{&l- zATH^bg~Aj&7@}D4!n7g9P~us8OAA=FNZh~BBRQv^pN)ok-i4Ur$LaqPzOhnDFe+YV zpGO3-iA3h-lG-m=94pU1OH!osaE;t%A5F#=UU&g5lbthSu}Ii$0$;y+P%K+;X!PjO zqj*!2$oi_Cm3?pm#B@;tKI)5LYDXR0G4X@AcGvvu+NG9x%*Z;LFQqHG!{f?W)?gE+ z|I8aqqrw|IM{1=q$0~7-m7n6S05I7N^r0e(9er@z4{~{DjqN%C4my1@_>t=DYRb=gD9yH zp#Wluo++|bY{!;|gQTY!hqaM|&+v?Mi*DN2vDK%ZXvul=%{QMqb!uYZbcec{VbMD% z*6VQ!T_==K3%(({#0r9F96<`ATu+jonrCeMH*tyMMkRpnCVX{qv`_4e z829OQ@*F7&u8p8>oS&NW*b@?AmB^Nkuo#jRA?z1Se^`)-)qJOn?p&#}yy(XnbL<5O z0yIQ`VNW{#J)8u`_g{t?I}@XE06+KKbBKKp6{qY>YvZB9HJZkW5+kFhp_!Obc)ClW zSDKZGNPbDYWKt~rvSPVGf@z6QKUK2|fHWh5$qF&2Rf~NSZ+R*|FRXpI%+5hzWlSYN z6UG>zJi%>b=hm77baT4P>?SK|i&v32E)!e-RC>A@AfY5^%@;*Ni(o&UD`})^CK5cb z70G9sDrV~r%eQ!zU5NyPWnvazb*s@4i7a&jH;rHh0SVj^n=lj4&(D)+Cit;|4IyBcAN?DxMAf8&p_3n;E^Gl?zOGl^d5voBNBSYwb37JV*00Sj<&X zsr}N=nS&T?OM{wzT^w3>e7&Qq4DaIR6pC3sAh=Ncl9k`HV8=<5H-HXfZ(@(7JbCqB z>Cs6Hg|3{H35|J0d5ZBITZJL77!V7TbUT<(*iV)1I?Cj!$jLMhB!?4lR!fplUZ`2N zn+t28jwVP`56ID|noEgMOA2KUJNv72feon}g&dEl%D@~nDMkzIU|uwv4Zd-<<^&l9 zuMfB-g>adRrJF z?uO(u4=6APNh8*#7wNb3G3h zh)y|i;sn+W;!b1%E|$z|Y~ybTMwy*LXl8(9X+)ACu(UPvug}t=O`NoV*jr@?54UZh z{IjhHdv=JvE^%%6XoMeG+H@4<;0nY;u&+p&{Kf*5cQzs58Mow+CPMKZ8JvbQViI0>Fo!!1+R|4JD5MEYQasQhE*&!` zY(P~TCM>tp3L1Z)9rDUBd2f^>K0%4Lj6Au7X|=c+I5dr!*gL&#Hf@U05vDeQ*CZe+ zAIgx`SU%krVxh~XK6)&1n^HD|bfFBxkFPk=91g1BZ!#A0rxtbZXyTSzZow7E4Usst z268~fkx=Xob_%}Mp`n_uI~vlA=W6)Z$E1uWnv8G3N=_Egm_hGRl@*8^rL%*JtXQCO z40h%(q=Q*JRbt&C4(W=$+nF|pP(90x103Y>8=>kXi^UjEWo{XcdGdhvN=-VS@KJq3y~o+ZbS5O@cwt1k#x@TnW4`eeBpV zB4!^~N45%ltL0Ne;6uPh=5r^u3TMmZnJ1zFfk(*VlLZ+Bka;5Zv&cMKqew}-#+@II zV7}+=DX!8S$PRwOKE|4L2>@opj&@|1(J0iwm{Kr4@XPRspmebl=5beIV#?YDR=QCv zj1bgkUrec^8-t-~1(O$2vqZs5)_NaN0Dv$OWz;~maDnVzMhG~l!+(PJ#C03Fo3=t2 z)+EXJ0<{Fro2HOM>(XqGr(jFYV?Y7%^@@MbV?I1G&{-afbADXPZ!O3~0 zk*yPJnT9gg>O|a(VN;u1qbL;bX%zaTl;X^K`st@py2t`0h(@tuZmO~q5wOBcocPY| zx~(Mqz2rbY8TDe+OIBfLOw}2f9P~hU$PZ^J<uj#DY+{zy> z31<*hdWd0Gmp{8~CN>{!?jIsCHWutEpd9mrPRJLEpGlKld1 z3TtdA@Jc1fFuTghBUG9~doG%;QeD(~N^lHp1i6q-0UFeE1tM9YnBA#nATS&x!Bd_$ zAR()9&Ix~mp*&*G4&NwL&7C-g_8tlLAcOOmfAl12k?~ zjd)!ej}jD=lsjcLC6+rHm%E3SAH5!En-umX`wTlm7QZU*0F$)Cd72ya>-77M4!0)WOLvTjjJ=hE*{X4%&{w!MPzKKAwO< z5Kl?@IIDO%=(J0G+7;bIk1^4Tw4m|J8y{BRP7L};$1RM1zI(>YzxGFPmPxmrRyJSP zZE;1x#i1DgvZ6w~c{p1u8B8&InTBlem@3k7#0hgy1TswY0%?=5ReY5(#w^Vya1_kv zW~eN1Fsvc&8Y9qy(ATZuxlw6gGTPxOE?w-hii{)6Xlw*yI{D@x9~1%~pF1%D+{Ah! zA^F4;1PdQe#_b-DRef<Eu=4f^{zQJACLhm#ZrYEf+DnSDa3%+6bZE@WZf>9%+JpwKX(BQsSd#)UBmOrQxE5W zcx=P-Fded~6(**iSP|1vT+djGD`S{cn1Jz;FH06b7^XD}Z8kUYTU!iGaZ6NLudvmF zR07g*PykO#gk%gIN4V|+G2Iq85J713sCxuG(swABN65nAM?s_zYJ#y8DN!YmoV=L? zr!$t?%z&s2Z9ytuL|{>uIK~J~b4I|?T%N!QlHQCOc!AAvYY^%mV8SBn?wD)Fn#XO?)(@lH-Me)lXANJg~USO~Ys*BuWTG zFuWCQxuwE-mgQP#GC8Aqn0>?C$*nhL6i~pj$@U~PpSZV@pAdIMFArotM7yxm15pkc zX1427bur_Bm;Dd|vlyK( zTdv4bEysJeh(j$}VkG6YkmWq*UIfVpLy$x*$}d5NegQ$_SJE)3h90wtqr)F}=?6gh zn3Ef2xQu`xKU^PxAKV01;GlbjU<+cr+iT!bq-h_I=aJW23A@k7tzfrTb^{9JgJFYW zd)fa9DPUP_fk+%_P9SpZ*s{v*>+7Bl5cN~G+?e7-d|0(!g772Shh zwK{`*k9TnEW)i)Z%J2uIEZwrcBIm12bfs{-Gvb8(;xOw z38Jx2Fd4J9LM@&e>DC2AyX2!GaRrTMo_U6?g58ImMS5ZJTNlsj`G9k zs`V*0>TPHriww6Nw97m$*u^EL$2XljsnlI8aSZ_iF-=bmnH>lQ&S>XN2%ggr zXvmaIG~A)4M|uM$wCjxu=K!!{pSazGIZaq|1u{J8mD)O>B?M%*)b7e~tm&oew#jQs za%K3jSsm%!_HUtv3F3wT=v%r*b6$6tbo8C{RA&3_8bWFs$cz>m^-#|SdB`Z@410ON zFcG|8h_Q}#EFiK>A>@ctm7%^AgPd``R4Ol*kYlB;^z6(+9*#(_!x2Kh64q%AccWc z#2K85pOzj}2-Y2#xGX?28DkhxF@}L#%ZO;ulu5@5Rx zuUj8Z!7p0J85RdFUSwIf_!Kf}k=~{KcyLc(A$rhVYdvca+~OyDFOislp>n`}zS0w` zRqs-%&u>995hk(*^-u*-D)`a>qd@9k0N2Wdq5TKIs%{2)R2Rz>1NNEAhJSb0b?VFGB| zuhM5HRp|`MNZF-9snstnw@M4B=x89}6fWJPjIT$`F$CvM2h)dH=7^;hB>L5_elYV3 z)osUIY2}P;t#^9lF8W|b#|d52KIFN;aODB+9NyYC_=^Z z(&sobxnK{w;FusskzhpzEq~4e4dHrlO{FtCdoaa`6cEHB*p)5hTySHajf$sI@n9#o zCpkavt*EM@3SN!OuFo;i)b(i1bS0x`q-hop&@)U;%14T&F#A)=H; zgg4vyh^w2|TZ($1N-CvrI}H_+9Xq>pc6vRom>2O%z`2;sI^{-XSwLpw`!|hFl}h&B zy*4YiYEfEjZ=xGnF1qRUK`k|9P^`?Ed3*U36!zJPpr^? zamtM2%5vyiJW4`?DuR$klKmTz$F}0-TJk*pDT*OyLhf`ex*4m zr6hO?i{T(Frhw*5ylyX!^x2J5yjALAR)ABBVq%F=$M1Zp@-9D%5$n(i+bPo1gP*H) z7hZeE;F92*%cr1qU$U4Fks2WbgwNGYLr98WSz`(efpV=+p1 zZ&`9r41{Zl%!m#5=UYVpGIG?cKZ!QNyRCv6Ti#T1R$_|3@Q%D9y;xu@gzY3URNN`I z1t?1`h{k9p&afBV^^g){7RfOYLS)H!!2>M}Qr(bINUAbp9zzvlo-1+;d+`!`B5K-c zPStlT;D`#XIEmXmGBJ0QdaJwQM!DjPXU!oY;K($G5YmVJu*?KvkTzYRLYQA%eXH>g z`{e#M5?FUwu{h(qtLInsZlMfI~x)*Zi`Lw{QNvGvXGv#wu8YKy2Tm-Oj_jOER@x}3)^DM zG_?i~__gB!0Jls-mse3(%9}=d7Y*eDt$+#0?^0=i=D-g_e>e)Xh!?CnU(}) zdO^aN;TFmb0u#Gl(eW)&Zx@*8J{c2y(h=QbkAIn7*N2dxEK>wRWRjDdn3(((-Gigh zUDRMa!*8CVQCyw@p|RV_PaLtC6h9)E{>4xhwVe$h27{P-xDvBoC$3H0Dl@1!ktc)0 z!t~(}e;DO~!*&^F+sA4$G^o`tUFwuBytDM`%V(cG_4;pLef#-0-#K;a^`}q0{?u#d zPMu$Ty*;>CX2Zp+T)lRAH;yJ&S3IZE^36^MTKOF^Xax%>!(-rGn|d_I?DIfM=n)wg zV#U%S!buusVo&Q*l7LZ@jdHKs9w5PRisDrXDc+ROV@%CE%eDARag=l>ShlO;R^Y=R z=ygghQcK>!g6(UpmK3C%9)9!VLvvvwRC8*i=X4ct;Xo3^vLJM9FD8MmWx;(3Ya^Pl z66LYvePEbkm881*8;})Q9oRnb$Ygc;PNaC2!AHVcQ#kc|4FtpjKL|5}M75wlV^uKy z>>0a11OgV?Iwp{nK)F?l7!-ulA4n=xpvf`!!)50s77BEA`5d7wwZ$I_SGd%t6=(QchSM0Aj+_qA=@v8mD zjvc%D>RCOEldz0c1@#kQ*lEA=(y242o?lvgH{NUQ@W`v%<9T4MRI4Ak=7wXp-rhWT zHNsSmU@Ko>lN_E_e*z9(RG2xm79zcawun^8)*TM3+p;K!cX(E!YZg(OA}ifBt2%Jw zt+&pfI{gygf9qG~G>~{Mjis#HruWxgbJhF@K5+eYSF!#;jsoSiAe}#Rl4BX)dHXHm zXcej}_wp((dDmWdykKK6v zHQKIc<#8hF=}_=N_rja6F$dpy`z%L=FeiE1T-LFk{d%K{bH@!I`XfBlpsG6vZHW*P z0^5(6S+EkTI`Q=JIzy%{SWG$-VY=;(#asgz@VjnZadaGR46I{+3rbf)mH1{*_nW zICc8;#fuAUpuvGek)kh40djz&DvunV@w%hF!$2)R+GU@7`pLK7eC^$f=dmPffcLxJ z^R%khA#y)3f4n(2mp$s#GwG?VJ5U4MOcPH8USY8-vD3ldbfY^iAk23-n0?-p<0bIW zLl1rIV;|e$HZ`GO&Eit~^h;-OfL&Z})AxS48Qs_HsW&R~Hyxcn_Wpeh?T}^c)jLMn z3bcIT0_)?Y*B+xhvk%UrbMS$K0q3RGW^TRxj%#nYsnpn)*(NY$YE(>f-LXL6iYxKS zlPACOm9K!KXJV0bq)c}T6oIF>#|*c$#iMSVD<@lY62MJHp!oUo3#ZP!{N|f)wNL;s zF6lTbRQFP+)jE9Wz-`BGzUiiG*+oEqW|3?ixC~1u_7={)ffPM=_RW45t1*a6TC(O~ z4?B49(EI0)F%>t7#>By(0**V#+{)1#=RbJcUA0mp9`wNBoi!<#KE5Iz zp%v>mrxLUI;vtQ>>0HORPk{{~xH*c^BJ^=&y;`e;P^>OqrnqWu$VeB(aKn^-!ddy1 zMpiF0%+>H^JCu}E$nA(k$E{q_Oo~VdXKmS3oe51IOQaEyXEAbDwoVxkhtLv+s;e*r zsH_zF1H>_o=X8WLuV^}V!D>;~N-8}X@k~q<9?jmFdrN@Tu#4SVm77y02o(e!7J)S8 zBj;HRI7cdM}lnmE0ZX>rUIE2Rf}d2e~gCUXD;M0FVkLVRZkl9Pfr>~8WM~Y z(1cY$>vIOV-;teH2LjpNts%rf97Q)lP`wakPGNaky)qUH@5QfV3hb;*zmj|YKRIq` z5gmzmZlhv8`skzVvV2>d@X(E0uBqbIC^BEPQRA)JG zki0rN2vUf{n2@9=FD?A+TR-}*|K-bvnx%cLU=lYd`|Z;5%jceW_^r2Jzv=3$Ylmy7 z2+QpbCl*%wJm|t$+JN%&L_(ylCoRYHtVj^+ z1c*P(4ZNrMna_NtNSWOci{?VHEw99E71-=?P0G`l&TKwO3r1)WQ6=;Mr7ZG%v|2j- zR`0RLPac??ow;UyP_3a%)+%*0xbC3KZsFC%g-5^t=!2iT@7VRTGlLS!4nsgrdHu5& zo__q~>EB#+)%$LE{{aAVNHtepa$`GKuMXaN>*P-!`u-n$<-e4U950pTN|jmd6E~0Z zx+US131vkDGGZG0>Q}#NN;=E&B5l_Nx!p{&lBQWx*(cy%#>>dO%8GMPQCso|+-_re zeMpLX>bt$tzyJK1*#ie=58X72^^pmVIk(C;e>;s@{X%E)p`SnV!8^(yx$~OCut}U} zmi0>AbEUUl`rXfd$Wx_jZ`fZN>8oNi|3>) zWT|%qctR#)uY)0=RQmM`Z=X8-#=*k}YWu`2nV>LmP-38$RbRNU{L^nf{3l;~@W@Q5 z$=+98~ol9>$`>kL6^H=|m|FhJ* zq12l#)u$t|Oo}vY%omcpTqs6;R;d9p={IS6q`Pjs6i(KbnTx6W?R$G;%Ej4jng zrS0>vFgJ<9K|Juk^YF=Xzjg4yjrBtsw+;M+3CPZV8SBWSr{8}3^trEm@#Fg|eBl;E zSe7_k;_cGnJ5PM?p@Rqa-+UdKgFLO|SEW~5VihTMo_+Z5&%N)4J3jlRQg5!*yqeds z2UTTnbv72K(g+bi)AYegg~H6^x+0xDFPvh+7IUW_Possl5Z$27`)x#M$VmF?c8l>< zm{?kh)&+I}L}$nZH0A)KHzVci-O_(N`~s5a;DPry@DwIg&fy^NTk*Ps3V z!}~sd&w)EXU%~`g-A{ukp@`{txBzZm*w7h}*_ydoSbIXU-oX^}rN~xhBANpp6+%Ax z=xYvfQ-2dl83wk96`R-;h$v*S00IEy`QNv`^)2|w!)7uQR%sR3JlJ8$jHw~ShN&=H z9F5gAPWZ+xqrAj>OJD!q|9$x2p+oOKh*mc!>-hUJBLQ2NYzJ6&2veyR76$Bp2qkc7pdV|s4FC8e)J@M$1ts@tX-gei~o36zpx3t`w zDc7q#J`ZkVZX3DD#Nz2FCxq~fn{}`};0)mY?BHE3m(^Am#>on-kHQCHF_D=#EN0r- z!F?W%OEfY%IwcJ_s$~+;5MWE1!tIPFv zc~GkiY8X-~d;ldnfPn4Pbtq(o4?FjJb^dVJ%z%ftYtDDiizEYirIOjP3w#W+>|Lfz zft8bT%E0%)z4s`2H7bGrpEO37%`zq2uI$WKrpS?6Jr0y6Y~Qc9yG7NS`&@-D)!DtU(-1pGL!E*(#8A z%ENwgYt_=iatT8i>ORW=t-v7m3|xu(mctX+c0T;@BMYrewb#>4>c8>EPk;JTm}_rS z3X6j?O)$4Xm)IF?wqiIGBgbiw2O+ym1#9UKhJxtXI|6b zk72GkSX#RD&_90N9HdHwMwxe#A^?hj-S**!A0Ax7AdqUVwUtK5JmlM@_P4+NZF&bg08RjM zx)K2zmgq+wc_faWP_Jo3z}KEKhy}RBu7<*b7(>puAy;v4qzTHvL4n)(00K0Jd67su zTa&oyMqkvO5WFDf_*Ri}?^r|#uf5Ttn~WAnN1&k*l9Gk|dHCTcb@XjiF)=T_@WSJd zKMtPA3d9t`s$OeCNnmm%g>UgOS)N8}LEC)LZWYzl<=tygC5!IE7rFx`*z(A7qHB>( zcUD8#ofI?&XySL?VJN{kPp$RnEE>dXuP-p*p*HIab{UA^K-Xyzk#*IA&_lYqCjZ~MugA!MS$qO&Mz`0dM5>8a#YBFw5s9yy62AiZuv69hNP6{hfr z6sG*cfVHWPcjpMUe4=o?H@xW-%%A9Ydi?HzaA@y_{kZ*fv~SeL0j?B`anPg9AJ zTqJ@;5GkA);%{eB(urFEKh3hKbui&w5GhU(Jl;|+X`nqp!$k-dFr9Ac8{c>YQ0Aa1 zPwUW%Mqwo>=ze++=H*KkylNTMvD^LU|M5SVytw>>3|ylYa)Vm1mGK7OKk|`}@T$b6 z*Ir4SqtR7}XM#@lj&%dd9<9(;ntc>^j}TTQ+9zgR7@NwZBW_|-rB@XKNn-`7P>;=s z9*qD1KmbWZK~!x8>y*cy)44)e*K^7$7?UPsE3B#`oY-%@^2RG#;Nn#<5@{~ANH>Xt z$$^7&h~L*Qyw0mMlQz7CdZsN`XaWMT74`D+I$S60p$1r`+chGLR0b{PSIq82G=Q_w z8@{B(F+j#u>AYC`=}&+1wiCCUJo)@Lzwz(Ceey|`FnTQ;F$+kXZX4Bk55vY?HH&Cf z;ut^EB2uxF4Z(;tH2cZ6nc{*`xYa}~rq~dfx2j_eC`^y27WPWY>alwuMts;!-9*PD zq8kNnR@^HMMl;eB+pWO7+Z-PQAs8UqUi4Z7F8Q%_TZZ3w6*!wOlj7XhRr%u^4z+wPE z2Ouee7>ounwUEL&UHA{!w8!nqlP5p^@sFcWf(dF$udC&RNUhnxu?wk$zJ(6TR)-I= zNHLvKpgAaD_$CgzEEu8<(}Ia(UBU4wHM?anw2prSNp0BC?#L~>f1Y1qb9S*cYb6l}|B2iO|?iwi3eoCAG7f1mpDneGq(DSEG z@)V5}0>B-}&7-m=Ku^a1xpR0ua<&SpwuHqCFFbe8y?2vKMRK7YEN-{EjrxpU#SLgT z-E_mnix=8U9IqL*RCBul#8USi!Pbd|l!p+2_Q2q}-Db93A3S-s#}P~*149W!2-^mn z1HknLn{QbPfHeok47QBKILbjEmI@@#!2@#$`LkyiEic)YF(FT%KF!L=Y@(#cc7`GQ zb_~k#XoM9Fk<#N-apcJW&$_9Ne~%nmWT;>xrXgN+2l>2vyZzU!0T~)jV~j`aj4PPv zL`FDh6Q;;VBnn1&Y^;7%!|FyW9xb5Zc|?l{D3k{d;O5I&5J_}GqquN^GFbD;l_ROr zv)~8OzL}q&NB>bXv1X}<6S2{~1~&i@M{aYai6BXzM}UYdKVY6BeQ0Y-fu zTp4&QH$Gy;oE=|(qtpbMVQe~YRAbG2SND}MjQWJH0Hw`eW6rJdUlYn!GvKH!k6AVA z9N5PL zb;cMw(2d&0-iK=^zXm?M;W4a%i|U{sUd1x4%I1rCk8^@+b?kV(qx0{-|Mx%h+0TBK zG5noxJhaeS?6VApMN&3dTuLzz%3etv^<35Cr!92iDHBfvKX%m=Qmd4*QP{=PtrN#( zfw63saRpHc-#8(cmIA(v#YD)7u2pd0$8xDs?B_usa`DE^jmz;jP%%Oy)PtU0sYlYM zep?(oMLvRBpoq+r*wo<5f}$Mk5d6}|1EY_0Q=d%L500n+A`e-HG~<8o`uU@u`OK#u z`0RbX&eFI3{a=@sFX74Cy%rIc;c$)L$yg!7O&AlQ#C~`cY zn&mitUeaz=^;@d&k&IHyKSW(wamFXBfCMp(de~_EAi-(6RBLuC4e>07kw%lO-i$O% z&p`qD#CtxZ1c!54to1ihm2qqFzE;V-^edSHg<*dsqq&FMZc%_`*ovCnZJvGhSz1n- zCDR_wz_@t9-YQ;sEe$;Wzw^$8ef#G~lt4iwDd;J&17%R%$VCh1?R#!BSyS#~|3yKO z?#?O|jwh&1tSByzu0~;G6I=cB=lPn})W$i8(Ojm;Rw}%p&G(2u`N>a`2mL=vI$uCW#}whZGN8xCZ-rWMMnNn9fcU8w@5G zzBSLSx5wZ{trX~jk&px_ zeUlMLL3Ijy@FBNZ8WN~P&(x@7)?mPh!WB*idlrGFF3{0UIq65ST zq<9h?I55ZT$gC-wL~-3>;NxNAc6rSf60z=3i54MFstqMUDc$aV1iL3u zWZpji?!kkHJ^i74K=zUWDRhD!0XmSyTgh;cfTJlaqzdRkPqM%ba&fiJVTO9Ek4Ti} z5c7y9iY62uK5B7%`A#7EG%b(x>~((;Q|jxC}|cG$wItBL0o@(J-AF<-@D`U$Fc5A z)&moSUI<#qZL^G9203KmafSOhT@sC@+d7G~Zn{y1F^1`rMF#>BNOzXU-a0_m$}$A! z^1d+<{>x0Si3`h6p~^I)o{aAM^UpueI*#IJicPbbEy98w)Icp@46k2R1DDYo)>4(! z!3ES!bSFRa3%1#@H|SRNKE$9|Zyq{u_+$6{-WUJuzlGLquT?L6sZDM(P+`8ryqBHf zhPlxe;k;;SF;5u&g~jPNOdW{ro;T!UpaM~4DVDKV-BM>|PL)M93!((_3`HA0 z$|x~tG@Azw9{Tvb_kQ`${){trvRY)Qr0Vl0;0#%5@`|7w3!z*c77+Hh%y|Mt6JGFkE?DZa^eJT$DA2L0xVNHO$D>bHz=jqMlFEj!-(|bX`G)Y zjW4~IdSz3<=oL%UKdAX!RFA!i&*9f7X|_n z8JMXqkRxOT(7MQ?j8!7%v1>sy;M2O?k{fEuB0!QBMIu?Cjm{^DKRxvYgx#o{z=X$6 zkCn{Lkw`Gk6A{M%rgH@WJQ-1>Fdjh%t5(wi4HT*JTGC)8!0CQIY=~$TA>E0j6DR|l zqF!)z!1ro6zqX}E-?E35x_qXA(3ai_uT4)X27F%r^~21mp${Mrb&i|BcM_kb%g!(N zjV?Q40%CK)&WOVc=rQJ;E z5$3xm;06$PCL6>64ADRjWQY)WubiD8h9TnQlI3E)jzxqyA%|k=XZ==dX=xdcNNvnq zIH2m1>XHQ`C__rU!ifYaXUzy`B>qwe2v8EM4MPsd&e}Cfnw!3Q-C>VPcM`3@(EKg- zddy1`d8#aQi0^JhzWeS`zs70(pcrKq+{5nKv-k_FBbBPCz~ zEq*zi?fj)FAaf=Fj{teJQ*3%WU?;1ah6?=!6M!(%xH616w*mDAYB+r|`0Lby7qrC{ z_+Hu|H7O=O72Pzq6iCyM!9*6U=PI0eO*S^_6eFm)ScQPoYs*?E-A@oWAZzu)qx|&) zO?!S3A-D;TXROtz$G`vkzwc=XPo2I#z#YmpVji&~M7fOV6Rn0v;LtXuLq@Xb$W+i^ z^ADZPHTn)K0ZS8!P~0LUVU{?RwJDE%1aW(5nMCSx<;)4H07@SMm5qY$=IW^!=Z&Kk z!e@RQ5i0ItO|n61iyffX)Swm6^Nc2foe|N&V^NdE18M$@NX+l=c&Hs|y zvPOi0yikeDvZ;o2?gY{3h*AQd+$gD@GaG%PY!Sj zZgUDX$5r!uQ|Z7#SW>HuJj9Wlp9T<34}T(jK*Rf3hg9%YNjV^gGcxVUqb>>vUE#nV z;RgUlAlnuhxbrh7WRxaDWvHW6jzr1Wsum;NFtX^lKWTPc7Cw%{XfJIN3KYluB=z^| z{O6;9wKw{OFBN-H>}Xa6SfMSxx2v6m)d~2>8X)5sg099bG9?e0I&Ruh2K5N6xJ93H zVyD|i{iQrQC`y%e1yaC`k_lj@BFCze33J!yI(nZnHi*zDC_#!J{NM+?49>{_+>%H& zBCc#~owxzG9m}S5SjM*<@R;ysY`csrq6+j-+#xmIA~a+om@@7-Uq)srqf)G9yrV5H zCk+5AKl^c`g#D2!I;0?W9XKYC-fAzyZl0Wrn`ol>(p1D^J8@cM8%&GrhmA(UZ98Bm zu1o6#lMWz69A}fjcGTYVU`ry!CT?YQ#d^JIe;*(-ZrLWWD$qn|OBjMPe*~S;nj6~k zBKn$vr&xv(xG_oDF@$3mZ808ZW;iW@=Kz`-SV*wotaJ?uj5g|!Ns*A})-2;V3%uU^ z0Tbf@+Ns#?N4&{2dJq#Nxq=e=8C;&3h7g2AlNxTW!qAEt>b90<_nKM2d%IleASuP* zGNiDFWTw!T51n9ciau+P6F`}O)O7@>OI}h&;FDskfJU5>L{{NB# z|NQ548QjP+XWiL}8;+i;He-z*jazppGVD7gy`bnA{h$Dt@F1&R$Bhe>nm1Y&SRz?~ zRE%$a=%XbR>}vk7TVq$Sa)U83=B@~C5CqJB^rIhP%ID!0x6UHbV?@j2JPBfeFneTJiM#$3On@r$7B^#uB-fT}6C0guD|4KmbgPEeP|x%||Cg zSTHBu*iJx)6;-ekK#*Wlnm)jXBi6$yOklBNN#aWDsBre^hkdL}Fa)i@>6|QWgstQQ zu)FWSo zD+>xI7OYifopfjdCP{|@ZAizPot)(J&2N4aVp5j^Tq(H@oGoX?N3r7RnySWe?%CTI za8|a*AYfcG&4hGtU>4$7V3fAp=+-NkHO#*6fB*X|F+2znwz<&WI=pGtU;vvz8vsR6 zL11(P4hkUd*ZFy(DJ!8ZakizxX{JKGT&;1YA~0jt)l1cx7BWpC5;YdW!e}d=bP$r1 z1w8DS$|i9r8c8UIKt|=WU+mZfg#hDIx+BtXqJd~_I1iIaF@X*t;o`aq8Yt!SRygJ; zDKGBwgt#YVcfE<-DkGfPw>}@$i6O38m*^wbVI2gVS%<@_n{|b6-XUY^B|L2qe_|LP znVwl{WqO5JuOEDr;Wj>4Ofqfb(t9PYFbbI2R~S-z z=v+PuplDhl00koR^Yct^JcGPElaeA6Y6d*FicJ^q7b2_J9Z=;N)!1IW)1yT|`nqk{ z69Rxx9z3pZN)l&;GtBthAM68d%9x}RHnXx?n@=zIID~UCZ6M(qO>oQ7ZX9UTtmIZK z;6+~cqA|MK9X`UV*=m+00?MY0l7X*RRmcCS&3ct1jb*)I40wJkb+f^kB>3EdHa23M z@;?9h&wB!biAQ)Uwpr*?EoU46X*5hv?0OuhfZ@!jijeCHCA^6(Co!im0I+Q_{?W}0 zle#I8LSTymor!Sh1S|!1aV)LwAJxUg2q28u{NWFOh>aM#CuyVr3e1U9(!_V+ox8d@ z+NHVq`FTblGZYza91>MGkAS9hApn^>9yu}kfX0}oYfdEH^i0J)z_`ZXm)N2J$RLaQ z4CB~KfH1jyGz+gc0t64sRAyCY5TQX* zrqt|~!GIEYQjM>*Ks}B;B4z!dxYagN-+1Sp4`$MeDd4`-7C}UTEC`@Vjp>t_hJsMl z8fIT92~_0j2uKq2a0V}*zTRjd-5mkFs+@qP6Tk`s2i6VVl{Q3L0h8%K-#QdgDHlXU z67`@4h;bSwp9bZG6S4?f2ftaS4YYZI6hbBNLt^nS0_~CuVi_#yb=dtoyywpA zFL|l_Z#JsDi!2PPiwpcQ^x5OTeDF-&g679J}_7Sk_LkN&Xq(ez&;v+R;C75`0Ey7 z$MD%ODqy*C@xr?lfV_Gj(AIpUF1JK*CC0vt{9h&=$l{uGL;h-ht{3t_vPPMND(Ag? z?|a|-%x6Bs+cRo0{5)tK%Fq|na19t6B)iue6k0o*)*&PLCMmc<2WW^m<<-D{`?r5X z*`^6nTwrZT8|OgQ74xC6YRCg0+x!5hNC=+kDyAOoI&e!HrWh$|FKo>lH@uA!^c`{Z zcYpVH=tu2)ZCQ7j;)qvhPV?}LfmMoVZCFfgt0O#vlP)ImS9eZZEeqn44XI^ncKD;Vk?%_yAq=Ij+4tFl_QJ&zyjEcAb zV@Zg%3f&B<_DU5r*G{d+Lj#`7)cKYBamcmK74a`j%{KW4U&DF~uQM}p47q?i;0SEF z8B?@YCzg0}r$U}5B)wLkcS zKY(R4N)w>1hAvYmb}hvdRXP0-TR#>e53p2V`^fSoNubs9=jUc$u!E=BveZKsW)KWD ziQFRTOhvAgWDZh`bd*RxE=v9F% zn&XLZCsVPJq2=>%g=qEmdk`&W;DMih8jb9njXmlJwZ!H2Gj8RhEioW7e%Ll8er zw2>yrW!=e!0Sih?CVmn<&N-ffTj~-zvcqRb+1Q_mrH6?Mz%-{ufKPDC%j2|;52Ga& z@fa<`aXJCa?Np)r1R#J3SCAn%g)3timpw$5pXEcP?X*>_OKWhA6j5Ghw&)m26gCaPfybh;4Dm!quO>-muQB#yZgWVH zJfZLx_yi~5M+^tQ_O-8J<)=jwX_Q>zgjB3hdkazb!WAX}1<{yv#fpiWI23ch9j}t` zc+A7TSZoU@jxAcwbQR4t8N4tSiBsUin;O)>NJMiE+(Ib0=9x=GqkwuLNydVoRw5tL zKCy^e#xB!qfmE_PG)=!|p zRced%Ht?dyCre~ts9cQ&mB`^M;vZAGSLIAzZP_l6XQ@+J z6*V2dloA|_=K(VAYcSsRGSupFs*=QdilnU7ksK_Gfik)E%Ifq~-hzolw3GxEglIjLOrpn{ znU@m^qcpe+EM2U!p=B22Txw(8NNSW)BXLYBc%ecQvaFy`AYoO63L3@RU5?wdXWXzX zfHOYD;Nb2g%$+i#X;CIifpTjT4<{W;5&!}P;2O2)wfk+FIF|xW%?K3-N1_l(E02I8 zW9D*4VF#*scvywV)RxGjp<>Bmoc!W#iaWyS<-k6UrTfD_{6mi!S|N*gDw#FEi+@ZO z#H0lk5?}>k0y0=7m{2wXLl4px?Ci9z>-Q z4T-C@)L6KH{mtL}4Ng^9vB@?5suKawd30z0rgjrEJB9;ds#@)up5dXX*tt7^rKYl6sIu#E+aK zEBX@oPN4v29O1@@gJ+tUG?#Gj_)oW^ZS2;)DFC)$0tfNegM%DqLi@$875kgwo57J; z;veu0(A3}-I0PcLYX} zyi-OJ*Xk5ayCFskL^CFs@re@*EtO%slfo@mF9ej(J~oGr7D1qCvrmyQM!Sl$^0HY; z-@2`lEUKr96aYisc83&rg7Z)<6*m6drot&}HzS~7OgzZlG-z{p2=-85F=$9)a0cM|fg#A$tGt_`0*1^LP%0Us z!x9z!T^1;}npvP!d{{~KhY+|^LLP`44loH!I-Wc7V%8VF@C970!PXR`IJqKUguM17 zf3cGLxu!cZ35Y-jd|cPujBX9^DZcgATQU0k2r?&brio!m(Q|wao->nC+_EGbQUy|+ z$V|i$1l$tju%V@b8%JCb7Nnx2IG!TT^(|MlKDIFIWf;#ciPpLuI;W<$8DZ1_0VRsZ zX$ko?U{Vs}UVTuT>6fbVe&JOW&Y;DOQ3F3;nEY>qtsR9QaS+L*!3psI;Okd-!mHulO$sJ44}_c!{~Lvt+i zyrfYoH#>Zqq|}t>tG42xSf?14T>N!Zx!dQY0(r#>E#bxF$57hkyC#yycs8`_4O;DK zQ_*QRq%6q;Oz#v@(u^*QLp*J{1EFHzNj&`Z7%QglL_BegA;%LemT9*llPp2VWzELl zIN!)m2RTXt`?pQyyaGt)=xV9fA5<~&%6t)P8kJc-V5>ODm?zT3eH$Wj1|6?2wIC1N z=E&hvwb2{YqdLd#H?R$N%hgJw-WkANIMyju7z{Y^;36RRZAOTHJVvO)jcy-L?wXK* z=(SRZ#|BuP*&%fBAFgobJ8eqCQT0Bb2$JtPFuSE%w^wOn+@V;Rsm&qFVkC21J!U{5 zJmu70u7U@yUBSfR%F(q&rev_&3BjtqWR=)(QCov2}`Ulob*!wO1%jUs89R%q@MPBkqg) zMIyU&Ui-pY$L^N}jKbF83c#+ED42sGI3wJN<8g_70hjQ<|NFmBD7u^YbkIS#%q@Y! zR=mcF^=w%f$E@)yhTcMw;t(OU6|*2?L{!R@n1t6_5fuhEXB$1^%+>s&v&1RDts^d+ z645+RG}-Lo>4sKEXU$w!blg?0kgNM^#Q0hcSn$bR9jAfi5t%kQcMc}|nXiL+T_L{~N-Juum6cIuZe?~QDZDZxfM(mU6 z1nY_qFaqvic4Qi|Q?(!!1>$vAH0^v>sHZ9JuhJqFDiJ~I62^5@DnpR{qJ%ZN{KKLLamoK&%b9zBvh!aT&r<7gAb=}6; zO06HjKXumZpuBtMDW7slKY#xGzx>O;@J%WXpF!a!#a&*%iOZD>{tR{Z;_%_aXb%7M zPyh6*U;PSvoR|<}_$4Nin7V9xRw_0@q&SozsK1RxeB5o>@JL(xY^NGTGe*yi@M+@ji0gu3bYSXr8$4S+pe31HpuEhPb# z1v52zLEe0%?|kEdr^_tCI9ZdJCh4Y8*|;Fh?Zl*ZM`-@ov153$V=3lkIPg!MC5s*< z4!Zl$fj}a~2q|~tZtP?U&;UK7WEL1(QrxT~=<3ep78ib=bDTPL3Vpz5O(X;%c&eBZ zFG~cG&KTPO8N^h%JUe&r{JV>#3hty@&3PzBPay&?IANMA4>~$$4~t%T@a{s3vvR}L zDcp&%wbpC1`}T3Z;$ka4Ac+aEGN62?wS>cl&-LaoziOqvu-qvDpFtvH=h#`qiX8qi z{dS|&X>bBY>wKfUT<>0-tM_W{3yuC#quQ(Dg22WC?qGT3gxyLwE{KUS^RWFko7H*^ z%jax!rr*`K&h|B$t)*oir31gwn334QG+L?FW;g{%=L#kU(NW!ioqxGpB8^(!Wv9YB z7ctkk65wM|0}qjZD{NkmB z8YX+TI_hm0i)+os+yTC^wA@;R?HFUR=5uZcAFO6XhZxb`s9K-9xX`IoXK*3cGznbn z?H}7}A)_(VZ*h1;slQY!^I_P|pmVWS?Nqv#>J6;-i^* zC>!Y{BF8a&So!dZz9mIuxNn#a$%o< z!p*M9#VBK1Q8HD$LLz1d$bg$Wh@}40KmF71eeZkdTNp{{lu^a*K<_A%0_oP|QkkuE zY7O$|rs5n_ws{C`UsYIi^8I{XWZd7>FET+m74dLQjNq2wG8A*11L<(&Ja+6Dd-YV& z5qe57dR>G+Qs%PUf|zHSTgOt&Ok^6e)k%)DU~W!%tWt8M7w-O8{D&icZ1&2;f-QGt z26yO3BP{m5eb_*zslqe!Ekk=)Kr@ip>t4VMI^}6dNk?9L?KR{NPy6XUPd+F^)+$u4 z!~{6velZQd@|Caf>L{NEg_~%YXi*qM={gD+M@PUc73Nq##x3e!e1;^vLv;EC&+Nu_ zk6N)GWDcz$2tWuP*p#uxW(Yjw2M2}|(twmJaPTvod6x4RfAJSsXLy|jhk6feqiU&@ zQs`!)OmR%5#HmE$2suXZCV{PKA~D)gQx8NqmCfc@29{S`i#V2c~3JLocWr&yS{sj*JKAUwY{$vV={!qa7KX*Ddy z4voZ40FT#7v~em{h5@nucqq~lA_U7}GL;{4nEqnWNt%QE=v=NYPerqP1pUi<3#uT` z72I&&`}05la~^K7bn?2lnP{9XnT;xii#XH4k(6TY8gb~)Z?hZ@o(x;n?snbD38_IT zg(40la!c2^5~p_r^AzW@C!To1p5T1?lJsK5D>-gX!J`j<_`~!CLCT_oBQ0HkJ6zps zy5L*&Wk}9gkXNvDq_3VYTj5s$8dGxYY+Ts}O^m0uY$z;_fo;qnoF^p@-Q@(t;}hPo z*s+?h_@ePf`SD;=#VtiV)VZ=DLSdtBBJ7sntX0?xx8d`&iD%?=2ZfeLyqvy5Tka7N z$`H3;@fDaPa-Hh9g?jLEytG{13lK;I@z?`Nqh#ESWWp0BqTg~0`*@N~;MokG)bz-p z49)S?&FyAN8cU2TOxqhYjZv;#sx>%!dw*-O%Mn-@6)`Nby!83RH!h{I7I5a+QfHZ) z%7G)LxkDIY1s305eLJ`IzUyv0f9~aLj~rxgC#8_%7R#l%8I06REgVoA`#SyV(sK8m zON;Y&-c;hS_a@L+Pc@Byb`Jest5RNGyzu7PGbg)Cm-ytFJW?x_rEa^ie_yv;yZXkP zue$yiU;k+|_s6#lF-FH)%GBd;xqAKkkDh(*xx=+er;QhC73AB?-P!uSUb{P}H92p- zhv#v%acOzs`kRl1-*AkbOcdlp2m}i_LRF6N(z#E&71CH@3iBvb#Bn#lt01)^ib0m_ zO1U{#YwllI>NV@-I&8#n%musA=v+Vd5Xu4VB^od%h)xC6n>DJrdU+UGW^(N~v#(F%@ zLn_7$!rQpi!B$b~Ug8~__aDdIJNtJi}`PeLJy^lm=ecS*h{*CDKO2I@(S+e9%@^o05jZ^yjTq(_@ys>34M!4**>cdc}fA& z2`*WT7wfJU-<%uBN(hDZWMu|-zLSdnXUcy_?-~RTuai!s) ziDV;2kD&~?gaqM=ih!ZJfQo6vNcsHdKaYpkH@@)=URz@&CW|XwfN|zHw*+oUBvS0r ziko7_Xk2Sp*nC&kPsL*Qo>tz_I8t5~8Rw3tF=ix-Tyq8YlsJ7PP%QO`EWZ1JaU2Wd z0}nia<0QolfikY*h=Cj66>>#;V3xT@;0&HTs6bt2K%O~shTOy>%9v&Dn)^tcu!(+U z^EdDHa8l$cD^#VFI2sv0m)X3Jz|ZTUX)pn4EgA$*2BaqxJ*8_*F;eIPJ>^}|M<0C@ zOB1FFY{Vp*YdNed{V)BZFZ2;B7}Jp59nc;ouj(@C9oQW-i2` z<5cvZrQ`@Im;Kg!0Z(uPXZXnJ6UYkQ5iM+pZWzM^``c7ZSvZ9WA$MNIGu@rL%jE5*$ zi3r3qe#LSSw_LQ25ER=uAk&I6aIFd(uyU*Sd)M7RZEf)N&Sc_5JjW+DUiyWjmTuVSF`adrwt zJY3xjaYwF@D?`i5>U#J<22Pro*?-3!AAa%Fs~8&jY8gJ7tyTwD-A=dPsPmzhQoGl! z)*DYhck;H6eyUVIh)uB*Cq_dT%)~dIxc!ZD7jQCdclsFbXcuxV^L<^+#`Rfj#LJz* znb+T{&K#V(=0<#7`Bn^+TDc?6aIE&12Hl0z&pq+WpM7`n!pqIUC4T#A%Z<{dt7iM9 z<#XlkJHLMT`%gah!{$u6*S?4!D<62(he9!I!#))FdTsvJ5AtO-EY#)NY^AYpX}O0% zd2zW{tM6MXSC`B6L1XT%i;Ew)?T*si0eJ%rRiiaI2(K{{qvx+j%&F$3*~%1VIY_rF z8v)_KSsz6$mu3#!cIQVzMRCTI6FF^q4CUfFW&j#yGzY| z7>*HFFao1FXL{Up%Uy4tUAVN+?zH98D?ZkAsy@bK9sV)Ev$*r>D`y)s2j;FhTISo- zEm>xPCfLndC@Jc&lH; z@4N55r=EHWtqXG%Y6>FNSGX&JZjuEksr;G^C^gh4xW&Wy4xo9ubSQ%k^U` zD0G3~I2(1Gxp=hb)99>bv4pT5Q^0D9OBPzXBBFG`nifd1Vxhip^$v&K!Zp`igFho1 z*Sqh&o9?&^sb6%HS|m;))`uEE&>^!Mt|U7-6k~N-S=jETMdoQOTtQC3p=(9NilffG zb{R8~b3*V#MA5_*kv$SLp?6$?Kmke;@rWG(yQ104cjKNKL*vYKetw=e(AlwL?1O`- z7x2zAu{auCmws)gYmhplwaoU}8o!NxCm==E7vc~f!ICQzsUc}sF~2+l!JI9S9V+>R z3qF;MVFohk5~#T82FDa+=d?+q;h4&6CxygfG6v%?GR7(dwyxnB>RbBD zr#%ZcRgiHy5&A-$h`DA4dFrv;qthZkd40j@#d(+Nx_U8dY@@r7Xx4yy zk^ZaN4mlQD~6lNIsmt3UmRfBgM_{0GilZY?ZgExvTA z#lg52FDx%}CT$7Fh|=}PZn@>&Plj7`{{1~hf$#}r6v>94hX)L0B;z7L#x66L<`53F zj?xkY%Y;#3oP#QdO6U~d!`N2a%TN5~v74^D4r70Zy7^mf{REFLW@N4Qc!EQ#f?4KCjU2XH&o1+E6;gW% z6H;JX>iC3h?p3>jDS(U~nardRyqt3-j>YZRv17cy&&#l=ChTMBP}=RVz#wEtfb2kz z3rpPw(7g_gsJYrmvIe+Eh%*p2&UBEz;6j2c5OSH$VhyJS(zzuY#yKk>6D?cyO|ie8T45}vjtTkpsmi>V)@F0 z+`IF5w&)ITHxtW2dEs#lP^E(&ncm}ZYqYuIY`sCOoC7GZx!`Ky?vO>qs*yg&M*KeEaQ?>w04<|v$K z;EKJle_-1~zrdXC!y``FJMyJ3J@MNo@d@JVG`HO{FBc88PlzEUOnj7XpAYgFw*_LZ z!8R(|z6_axK#LyO3?zRMj`_~Zs-zP+&Xx)7NYJ^5wx4 z&@+ax1|iI|I!tfB_{A?UGP1T}c1(St7?{#W0uk5R2JVc6O-|=1IhKVhR-g4yNndlRmx z?sS%}St7wl=wK#pUe2tE+PDnb0x=q5IC&O~E4Ae`^Ue|y_^Ce4@zjkc=txu!@#h3; z;&fKTi9pE|NwvmBMkd!6%Rw-_}!Ln1QPVtnK?FlLh24bL4 zh5!XzhKkk)+yHUXS2VV}H{fF=i7=HgimhiR_EScVj~LXC-1?Ev-t>X9ufN>uw^&>7 zBptMQP^zV&+MGFX%{3*CT;Qu?tU47=G*GW#Jf}*8zfENw_TYiT$FDvzd*E=1V;{=(z79U-NP;mT*F7}?aWIr9>3*y<=`QIA1^$KP}b|ML9f{#R1O_{;7|YC^4Zsy z7ngL1vJT67_Qb8nXWn;+1DFpSI9R&oTKtZ+4-{19;7~sTH&HO3+X+#;PrLU5 zzC)2>EAWw}+(#vhZ%)|K2aLXz>u>we^*4RsjaSbM`0!sv)(=!577C2P^+xmH`) zW(H=wY2VwM`@FeoXIHyHcTack-Q8Ij?9QsnO7pI?Z?0T<^iLnWf8xZcvC+flhmRKf zN6=ZO7}Of|bUq7w#LMY;ZgSnC*wE}rj;Z!L|Mk|hPd$~vE_D@K#;H6EZ=;UUXvBe0 z3v8RTsaNMrCm=Ys3xPlrZx@pL zB%EYhNtU>^>TMmfe+%F7o$q`HI}9i$aGg7M&WI}davP968V=fz?V8{{I--?tSB+(D zkYV}4o(3CZ(IDoCGx!MfW$Z*<8=`>&7hO2NZYT|jQ#A{v&@U^?WWRB$CVhSCC8Vpp8)gaQ#BVj-AiJmphwnwt`T$cg)f1m z(|bjT^v(Pt6gHr>0NvzC3t`;*7;dsdS}4BW>5zz^YII znW8gcptw>d*>8wvGG$5iS+hWPKWmnZE&S4E+E-+UWLriME)(?&Ph5oY|6l*>j~C9K zJ9FU)22~8d_>XBYiXfC;PPhO8q#w7NHrYrdf>(OUun$d@&U4Pk7wtUM&+T=te}1D2F+lgHa{zkTM+8D_=2A`2Vo zceW!-%Zjn)36Hu~cwX{q*&&;xoE;B$@7`rm#012&AL`BILdKE4WuITZcQX*2a`UY@ zmcs3}i+oX}En!}WmPk``gJ(oBoB|1tLz!pT0GG28C?)H5w3$(B4tL@h+U{|uXWh5zSJ{M#kw=L382U($H%)qUU zb+OxE7JjX?yLzh8qt(`>S5&e&$Xh+mk16CT6S>zYPb~WW4Nq5*e{g@z>`(} zF#?W1wF>B~BpI>h)N+IYE83hFUU&hH6~kOGT+GPm>rJL4#)__YENe5liQB)`$z1ov zYOn6eN-=qFzP@;I*ejmXJX+&P)k9tk`N)V3QIeg4<>dhw3@u?12|llL=VZM^v@-0W2AfSRNUZ~R z$x%532B0IqB#qe|s|D*JsSIrEm5hiAg&dtsNuc9HJ8{KHMd_-6J}}@hz^+FFV@IAC zR;JPgCk}+d$#B?YMw)ys(;~{3TYCcqU;%S-+s+K8`uhh?4ubhvI1-y^tzb-jYrMAj{Von5TF;yHO(1Z8*B1p&@yg476H;MGm8j2P`ja{BxpU{pJ`yw$ zZ(llXfD(@nY?P1)yz@5kXicYOgje2y1Sx?GF&S%OG8+k!1vH+L15NTmmdPEz_AQ^A z>~H{uA;S|N?3%If{zIS0HN}B&ks!Phd@&9c>n%G8W1RN=s#;l5Y39bg9#T>>os9*1mvZunlUMEu*j(N8{hczTBU?O z2(}{;;>pt|0AX@`!i}66zlq7^mxcc|V*v^`vDeViRIkvnT3lGTcKriX{{WC_Gv&lj zn=Ki%V^j+JQh|4xxHY5L(TA3X)lvB8=HQof8cRiKf##s!!E^@wj(J*ev%Mx$7uqsBBmUnw-Zj-E%w;lQ2O0L%b#!_hSlWFFOe+9+Q9tc$b zsD_w-r2@kEBU!&5wtn#4e1mjMKonH!AnTFHkz7y*$pVXZM)6w`@#&>Qo5@2eAWyoP zfz8Bi>9_U-!71m~Pqbyylsy>;5#{&a{qA>UiZXoq%Lw^;h(vl|F;e01#S@gv`LcK^N9$gjDrV*2d!xi87vk=&&)h=P!p z?zhQBF08?~93ej17D}Vt@|Guznh+>MVz_daQE^b184a@T$ePE7DkeocM*T~=uuitT zjK0J_DdZEwpMg3PE=0zjKdKe%Dv4we%RtCH;M3>VBC-*SFT>`|7i>eOlce4IOCo)O ziIt9wR%OM*WNO&jpOK~4C`U&k4No{<0HiQ8YQqqKV-?*nOlPP?k*tS64GRmXL21uR zxma)b5r3?A&OfzfaN@+-^A~^r`}baY<*V3#fGKW3{k2;;6iYi&>io4pqDHAqv9T!W?ro>;k|M*69En|# zt=-b6u^o}Ym zRPwWfy>?% zYz}7WP>uBBOnO<(X7>}`m@x1hu1cD5G>JP6g28e#J(Z?$U4B55Yo&*Ev89TBN)Gm;(_cH~`4 zN)w#HzWn^mJX7?=`Gx7JdrVV{{e29JnI15@BEgm;1#ZncO>)PPM8oFlJX!N?(r4G* zCNfLL3c>Nv-XbGR+_HubAdK6XxAO{2xR^W9Wzv;td09)K>bA86j5q+ET}L`^x}9TF z3{P3LjIeu8XXaKZ-6)@q0>QBm*-95R2;f`fmqt?cRBN$NA8vDyY6?qOsD-eXhH+9p z(}LB-OB%!Zn1D(}HCgC)Lnb<2rgoB`8)!1tBnwQ=Nhh;(#>764M8)NA&Eao(m6V-5 zPd;&9cmKyH$s8-v?}dwum?3Zh2)dSNlrYKx4bv!LAV@!*90~c1F=#<)Km!Qr4y5Y9 zp2A43Nszr{9J$6&)@dQMs7eg@stAvDbb5|deICxLof>BBo7Ho^;xWYn2ZN<1RSp`G zvw9h%Xf2wf8MHwvwOFbk!}+vwz! zQ5xK3$Rq`{L999f?CRguh03Sg96o~-OAeI+9H=_DD$I%SEln1DF?}(!Q}dZ@xG3wI z(PYq(ksvq=zAjaJIthMSIp)Mjnqzj5HT!KDMWidtWNDSC8VD3A=A-%rCkMxnhx9CRt;9VmWt@n(Rf0*RdzaSBdh8lHe(q=G&I6%=UW*BL;J`K znL5;J_Oq83^NtKX*mm2fvmu%hh1x@g3dkDWEkb4>!SF+*3uS@2O}i<{ay(R>5zF+$ z8D9q}cvM5uiDoqG%1FVk6~OS&p7?U>pRDlNo3}o-Nx}Jt)HoKXxLECA0RQIieoHZ? zMS`|FG@-Gdsxt_BHK>00Xti(XY1K!8uF-~%CP)dW;KjDi~LfwoeU&UpNq1Y7H5`LqgJ0za7t$u z7i;$)+^SX7ic%jNwf0~?yYt7MfA-SY#3-y*yz)$W>qw1g&7FK=ZB!Bvylu-xEUb0G z)Bs&F#>Wpl8Jf9$+rq6qAr>4|Knyf-LC~N)!4e1+6S3l-rj@s2V-x)53JlBmPyL&8 z9$nLl$t(K6CYaOuAzJhpQ!FHFsWQVuHnKCIXDd`XUvJdtF`=K>?i4ksBL%jvtAZ}S zh?k276Qx3i?i4dd@1#WC^ynz&;M7VK938rxa`ic~@T~T&DY7*mE>Fcy-T6dX8lbu9 z+*!0? zd8jq=q>|Pc61hqh?{kaRvTKpatAA~#%hmc@zkBETOP5CnMwMI_iySoE_J?1~sctEO z@g*cW7x988WrpPqo+%lL>fG_QBY!Pw$|PmgMd}l1sFQT1o`#02Q`&Pf0KM#ry~R20 zhZLe&=Wavx1;1hcH9!Y6PMo%K( zTX5<>6Fe^F?F__O%|^WudlYzJ62*MjCVr!%!x%rJU1A3|14+6r&5fC_FT3IiK7*Hf zy~Z92ri9beQ&+FSOmTE-rr2Dd>CeC zmHEY5JCz$79vdDU>K`aFKPOd8RF=i;ZVqN!hAd2>hQ5c99OMh*OSFVQ0~QaghDS%3 zRt*jg7Yju<-q3gj`65AeS9DrzFUpCNQ3vJb6&-Bp9&(H2^2Zd1i+O5WN;|+Xw9T#AY!v(Q zi*xfIz5V+K^G$db_SdB|xzW*aCMxI7pC2C^>+ffaoD~?W_>$v6yWBdOGM&rjkP!x9 zcyfPwc6RahXLrg2t!%Eyc@&&~-5GMquerfaqiU9M9zL9gYa8i5divz)uc14fm8LKk zUa1S_wp)F@moKZ@6iA9Oj0P7|V)~uiw{BqGck1_V^L%Io>R@bSbcg~!F*b^4mOmH| zGuEXCbQ6nyOFFRToMXmd8lL z!06=g=p>HlXwWR?f_tUPFnEA3rsVe*`;Z_P z;$zq~G%(22l)-{iZK8L8k6>k3%jSi2{@1VAi%#|fgubl69WT7BSYh|H(`AzOo%pT$IU>u0Je6( zvfr&V5HHCf4Rk0rDdUy~jk?K70B!82pMFl=WXP?t^iW@4aR9cRfqh^9AjfCr`WVk; zs;J!vSSljY&=|wA;}%`h6pRdva*MXq@llLEqUPdav$8nFD+ZjDA}&^sh?ho{K`CQS z3KHZsU7>O4gillH>mavQCN1S%L~r9!TblhZeU^3i8xLW7eF7j>xqk7BUm$ww2{NMlm-$VVMJ9iFD@VVtSmC#hQ zw*388VYL{^9YuGmVQkNL*ycixr049p(++I}=jABIDq5ZFT-hXykBFkvNsYQQrQ~_ zSaV=$p{=1Ipow8LfNH8eq_t&Lkj4LN*RC;4nVEgi*FOwhJ2ZNzI54tUUKr0FVWh~g z9{+x>?!INF=LU`RK^_Zed#PkqOVCp{Cq4UbPuo;o%(a-`4~H(AMoXM>eE zHe%ZKv4ut53%}MAQxlm3pK}+fqPKS1H!{sZHDnNv2S(~ly0Sx)_wP+-3WFT7Tw~DQ zs8e;M-70$6UpQgY5K(J>eleZRmr>Y99jMW&p$IN_Rtg1t;fAEnx4tEiR+Gg56jHQS zbokWKLjPE9V1SKI&Z{dK_~QfBLnaBE9_#od?oo$Gd3RA}(RXxO1!SG{yGAN`(_%YUO!&cYs%cWglk-h6)u%X2{TyI`bQ~4|Fo> zVvJjZ!-I#99lv<#BtvA<=J6*E0w&pp`ay22Kww4oO~ni*o#z}C09lMu&-LrqaU%tA z8y*=O93IOKj98mb6Vz;J-s`ok)aG!!igq)oJ%Y4qH0ulV9^E69U8Y&j4-BPAe z!}`Jr-Z5LS$>>kfs9|NWP&j(x)VYhII17E6QKK(T!N8?HPUJE(D9wLX)Tu2@tCWDf zeEBQ`nrEiycVLR>dbn6}#zX*|5gkqc@K|wVVzJ(;*Qz<}C$i*1cONy2`79d0BK-vg z8MNz!fs?<18o|E9&`|2dZFai;31n5Q%`cz$2iv0up8ypy!IB~3R z^l;z6pgJPGcD;>r_#Z2W_Uj++Z-*6HYnKuQ80xTZa1egr48GJPE9{#pV@<{ zX`qE!1l7Uep$8A9Qt1ix`_zTyosl64;Oq#~sJ_`MFP7S=>SB7aQlG8V%Z;>lp4Ab( zHvGXB12!drKRjXNASDn~AuF{yrv6OD5*ja7KSMTff-!= zN)%RVk>;iKj`$fNdX$!D;G%AD$m{HV=T7nd+Nb{tgqi%v02wC?c4_Da zbxkD$l@hK7;NKnB2^pp+I0Z`!0#K<)(~?L?yWbZ1G>{t=8qz5lHUKMG z%-4d@dj#K#@;I`L^SFN=HmqywPop3~wm=Y`i1vj6A8%GY>7)?7-GKsm5niIdyjbv9 zfLVx1b6f)^u~Drz9i16j>McQi+f1WSNDsOpor|q1b)sagRp(89pu5GB5@NgqV!v!P z$VctUfBX=cW@!MzG1{>9+umVkxLSiRb8rDzA#^QG^|UZrSOsb0Ii3xB^Yew~v#e_w z8OC8@)=xHnaF4)83VeOfh}B`XMD?Z|1$;xYJICnpe)S5Bf};Apy}|hSjIL!yxXW&q z#ivj=pS=z%#iSF$2+CBv>Q!ji5DQsJ<2pj}xt8P)`MlnXUajuc%xmGlwdw~!h>x%9@*~6e+ulp!-~t*O;wfdT-r*+Du79AsuA$pO zi4&@A=xT2c(?rF|rGr3&O69T@CW~CF0NCYP)x+mPF)bj*oVY@P2`w%A^9B)?N_WPS z9esHdd4HjD5>fs$XEw=+N&k;}6_d0xx=5=&GlZQ6)p^9vd9(9_A}bmaFzW zk8gg`qtES#KQ+*iXXHv*rj7E3OQAwi|9d7GoO-cj14qH1^*#`X(Y<}EXLX3bAm}taD{Aa4)pH<0av`Zs>FZyqsB`LU!wIQVRN@(r)yW@pMmJ z|7}ubDff>2XC%pm2)%bHPXWSr+Iwn6Z`sQ5vj5bYcr9)Za(KqmRCf|&vUvV_vj0ZE zK(YBzEO}+JSVScOsUY>C1pOshttCjDDQj4+v+pb=*Q`dSA*8@MWx{|XsKj5 zbQ{1kFkjQ~QHWO@EW9ONG#yI*aH%&W_xZCWLL$rMpK%{4LX1A#NB#|6>q+pl>3{Bt z_T&v|vN(T2!RtL0a%&1?bscYoW4Fiz;HR6YsauMx^L;h_E^IY&VU;?Hg&@t?v(y@u z!FMj3`g^X}!GH5K*a?sYi@e#x)EV&E)!hIrv=1(2&tRMN{w8%sI8Fm<vT%$lEqRd2x}#=Cqg<5wo>#iHX?=z>*siq zQxb}qkD?ys+WO(p?g7g0T}=T7ekE;i4Oqp*5p26cHOD2i=mP3YY)gu|U8d}8HUWze zBDh0}hK=D>!Mj%WnKAWzYlgGOMPR7-K@@6q@_%JKjVBMdlZJQo8QbU5X))`|i+X zFg)_o^HEq8!%G}5>t4mZA?54>f%<4p0KrO}t8knU@fqNoTrC_?Pu3(C8@yP&r#e;) zc)~ihlTXq!vL#z4gwkY7e%DHP%a=2%1wOzmrXezpd%MJLR<$e`%_CA{M((lqGOD{C z^=X2mmIiP&yl=aSvm#@QlH(O6S{SPg!y1l*sTFLtEa=1h=f5b@r;QtGW19ImsCW}? znvjK9L3)CQ(V{sQ*bQBjrAFYdu9JQooqLK`s0w)xp%J8ec{we5wL`vue4H}AmLE&HcKFGgH{uwI*S$BYi{USAQyT}h7GfgPo z)evnAgWOxK$eC@11RK<^2$eN4DCYS~VY|FXQhplc_25=|{ED;|kcb_8rgaWbi?&1# zv7*6L7s3sIN|IxR4t|YuKDLBLMkC%2ad6!?A`NKAOYtaMvVhIY5C}hS*1+Xjwqvr6=2A0T9_>X0bWv4c#2N%-q< za9H)kU(?^Tyg>E@`X;B;ai7Z8i(z3j$?e1fX;i#2$2-D0%)1;usQc(|wTOftW5UJG z#IeG!@v$33jR)_;e(8Lx#CIyM=|YPRTLUao@)|ngu9K8ydSSxndK@$p#>=qLi2RCv{`9w@pC2!Dw#=rdnf4-NdAnXKK7LAe`)TJ{Q^LBnUs|8MJ zri7CLtxV>+Li@b_Y1Man?};FN9h!iCfkBeKq!H(F{XmM^Np0xV`=4hB>y}(^WL}7t zd;tFcAXk#oGU1rX`(lDlzL*FtqOhtNvJ==t1LSHW#YB4AO+|k=4*K}@(he+<&c>M7RR*R@u{HOR#OV!Je&fPXiSG&XJ}AcMS>;I!5mmR%X{B4ushe2e3IxQ{c8q!|+Kc>hJA>68@o= zsUw`~uh?)eyDM%)I0O5wFY{qi4`>fPlXHE;EID#4H62i&D=Kt;B{oOA4LZmhpLeo- zNfUTY#@!+XZi8cd*xgDX%;Qcbwt^l}Ef>t2tcT2~RJ7z`m0Pnyr+_z!QJ{p%DyrHi z^c1a1CkZ;5hQ|K8Ujdj}E02BIAHu1J5b3u@LC7B#YVz{&GN7Bswrat}KIHnUOpzv5 zMk!gx^Tre#Y~XxK9i}Z79&29)`Qhi18OikAW^DxbgnxBl%#`}_bOm2-eO+xk#pz4c z9u1g>-CH=5-tQxY_!bl*?@7O=u%}A$Dp{hHr>+2(x|Sn#kWo>`*d#4F63M~L1ihg) z`@Sq(Y6WmQQvucr{0G$vxMSCvvP0IH7Nm7jBoI608~qY)z3Q?A1Vj6Q9W`e9u^?xc zwc63QKXzSBb>X3XIpQ*Tg@~;kJ57(9rexdpf?&fl0(fY^XvK@`Ae5A4Gz>~bmBk0i z?lCmA_6&@~JOsTz&V|{#>WE1Zn+y}WD@&y!v4!XYA3>V6d3hx;8l0Pj;bGhEq_6wI zGGe*GV6_9R4bK@QU$KgNyeJqj^ zZG+Tdip1#@=hZ~Sq^bsd6B)0h%pZ|l3z*%&WdK8&-oV|cVHg)9m| zZpSJd45sL)PfZl)atg=+>r&0i_DNQJBqV$PKTaDA!{)O& zM-2E)C1ozV_T^~$UE?YnZ;(U>yBO52H{H+phFxE87l36|dLRT;P+T4Bl(3i~XxS;f z0y{K_LnsknhW2ir3r z9@ha5&K9ldqx&PJ~jXa$&#>N{ld^N3xJE34hhyG2mO5ILmw}LMweIId33wodmzkQ7DPa z?cNEjY;l2`t_EuoL$Aa+^TOKknG0U*PHZn^$*8TaOx#B>@sY91yIK&7K|LXZs!A{BQY?X zyZM(6 znL*Emm)wWkn(Ef&T0mbPitpqDQWXStmBC1tEXi*})s@ha>}94~qT)hC@(;tC3OSpm z|3J(e905C5&HQR=JNR4w6mcAm)aISx zsJ%8$<6^`u;O7%%%TcAt*FUkHVS>!U8{AUzL$#TfMoxwhyZ=Lryer9})!h&}YDu&( zr>GJ~(Tg4}Fhf`&2VJ2$S<}7HRR&xN_nFJOY%p}SGJ{&zblm3VWn?sLD4hwtMDY(O z0*N6-o$MZg93T<>4hmLSaX4a-hbYA`JRqgu1io5qlIS0OGLo&J61OlAEMu+hv_;L3LFF=ox#juwz zSfu!4zZyQZ%BSXO=|~p8hQo(Ub724b(W)`EkZ*n&^WE~XYy+}3Z3eZN zJC6)M=Glag*wJz+UrG4$U@D$wZ>%C=X7vx>mS;;s^`9(_a8mJ4Bkxow>((r)h8mH1 z7Q-3q)&gC2e0mXj{W>)~)*`MD{u=OXZBlt%x5hUWXNZOsM20n_)E6!{cpcD$ z#X%%+jy(y{6{){c{a!I>gvPFWtwYOePBHA+;!WcA;h)9vcl^F%=c4F)UiN>c2%V@Y z;;aFZg;64nY6+yC)`Zq|5xZ%>hD+eQ->8Kl-OY3O50%SK-fw56jOZULexGViJl{px zI|4LSl^ws|j){x5u5K+*2Zj_cWww}H!x##llOreVj4KBDEp%N-e~`t? zeY8oB*_pivqjBgF|0>iar?B}_+Jbh33%i0T6(x#zM)Cm+P!m90)1Pp-1~c)BQ?eW) z&K}@ssc=&s>mqRfD`0^7nGHQs{+kbZ0iq#(ss}^!-p~m_f&jFe_+D+$r9-79`zRfv zE*rY8IJB6MrNu8peekYO{%uMCE&^$$bR%q29*6_Hud z(H?pNJT|&3ZED(Cy3RvEVsbPO#=BR4oaTVM)4-^1_jnw};Wi)TBF%r?w-U7(lROXO z%nFrQV8XvFpT}7g12BzwHy{%DiX%UNON3ek0*WX3y*WNdM2kYS)@4)-Arslk-B^$0)s}(4$xuqNb4vh-93J!m1PDoMp@N)b%RnZ~U@h8O!LG9A> z0&qoGLB7r6IW80@eDbYh^I;Tx{NPC#Do$`xUtr*{&kWGkEvZV>Yjsx8>>-FGI$Z>W zZ7qtxo--xz0MR7O$KHr2fD{h)MiTHwmE6AqE5=KODLA~Q_&}hugEiDzJb5*tDzG@A zS0`%VV?U2VQ6_im-kF2`x4ZpD8)X7j>P?0K3N=R7uEjjr<_PJ)#OQL2ssoGFC^c+V zxL}ho(8uhay5UloFx2>?WG)%QA0^W@Le6vU7LcOfs2a@$t%jKSc{&jdDRF-fO=}b6 zpSraiz9sYU{TX+o9B2X}0H2iwS|Hi+V#rPtLBatYJ}Db$_Z9PmSR$t@ zjC+B~cEsGF0_6rE35Kp^3Op;I3oVh8P+1uN^O7PALZJyN+?_Qf@jPZTs;qELkd+3| zuZShH*^>$DyWS|d;!3V|lTw-`8Z*Iz$aWg;tJgrSW!JGqkbJ+l~G50d?_I7q}^DIg-@hcyikefPyu`2WLc`2qvl9c76db(m}I3tjhF$}|K@1Q zT4uK%xFUNWigk^OI4{-clmx!Ezu$1xWkm8;HU}r9gHx2)@%Zsf{~tLI5b#J~hFkuI z;OKx_Vft`}3>@>cFaumXa&=*3I`f=rn4|FL!x++zr&_@6JWq+W%C*P=X99=qx zQ%rUcm9ZZ{BvI__Q>;}^umuBBu%XN@`o|SLBq(NJBIM(RtHR!L>~oMGaLn7K(^PQOvx-) zYgw~fvH;7xhEh9~;zPMqIu~HDMP<@priPm{wA`@SJGiwYIDi>V_0-t&^V=kMzS=t8 zUByjN-fR-M6bw5WhL_Kx4$oufha*}m6dE+PiQizXR!oL?r{Gd05?Y0IlKMcc)D92* zHaZVFVq_;N-f)qMVUwP)0GZ5J<4=y9kv@sEUi*q0WIG%H+XHp_7vIR_wJ82CB*9AlsD_Yzg>s%7n z^J?wZ%N87H50fkbvugA|#5GChjb(f+H8QD9i<7j>iypGka+{QXj9eLoIZ8lImYfZo zBVLze_UQMZYguxoFfniYr}>PlNegJMmqAs@k#bm3JJoH?r97L$2Z<)=Q!(XWaOCc? z8v1Zu#RQK5Y=&elyd{QzfOH{aau7Z7 zvjn5Dv6D(OL+G6Nyq-(^ep!93PggLUW|n5DhZ>!}S15-Ppq}2|7Ch?2*(oZ6JLB<3 z=(1H7an734mkj78PTnsw%i|~~j1*Ugr{u>G{Zj{%@e;IHi;IgtKF&-MSK}*rKP&;{ z08lQ@8{&N;DuTAuqNa|H2FIUAjsu(+ z+fC2aq4}D~z}J5^e5Hm6i+_+KPLa)}lc`{$&usRdw!8`t5+i@-d<=umTWA?RaJvPVB;qeSFo1-xm z>J#`r9ba_ApUSHjJqbu+C3EoVHXTK$<&iKt4ru>4VYkEMp6G|dwNj6TY{QXqxNRZ= zD}(y+F`Pg4tk73JnL%83?bfkIUScj-ke%LX| zM7ceVor!_hkWFI6@jx0;!7c|AxSX<7N*$#S$;wXSa(jc=8dX->An>G7@^P&aV-=j< zzDP9d9J|y0fi=SB{tUYCZL3U{ZW2rmc|8!1RkAsYs4LEyIUXgK-|S(RogKLD5<-@~ z>DMfF^eV^$Afi)d*xcEAtCRD z*X?rbmZq^LW0;SKy%VL@yOG+j%Y65ZZ5j=y4~Hp>xB;*avAOu9o!77mLC{B;o(W{E z4+TuB{O_n3BZwbg=;niMiAW$%5|=sVVc}vA1zMGU2iOMXDjGKT8rzHR2mNS$G6Q?nLl{I_!T;U3W~e zduEAH3i+om&89-y)Wq(%=zk+|YbSu?0e9lwED>4SQ5k;KN@nmiTC|HPfY{jk2)=`F zt&6ew#NtQ<1~W@#G_}7GE(3zqO5=w4QWQn3_vs%&orrR|?Rg=lB2%Y&(ulaKEo?bqr5hy#r&tGc+^+C$K?5SzM0vDKqVBbE%e3HORz z8B@R{k^r_Md=Ao`B2+-%r;4S&eIxJ>_ zTSvpfM#Sea!dqqf3Hk0Gi(~n_8JT<2rk|7Rbmm9sNKZ)38O;Amp*i~$rhm+7+jl=t z71tI)h^K3jO8!Q)+f00a-_-yGK17SM8J5O@%I4#Yyu99N-wWb~oz|DF2*jVm2%GVvmEo}-l?Akqttr9FOSFL2J zTt4$+&-Hs#pspy88@isQT*T$}W$g#Ycu*gjc8FqLAz zmCfn>Rlq&8SwWQOqhZ-O)8~8U^3jEE zs{nh>3-zHLWW?D*TPsToeVK~Pv)DLH93YFX+beAAuVBajy)S+9eReKX94W^LJK5|R zMI=eCE_3AMnJ~es{OVTg?R2)FUeH4%hE0Rq*8OZ%p74iNan6-!$BP{Xv()_~t}2%8 z!|QrsG1vQ%NI2xn2sjADBA(R>?J49)<4RuqXaQGcX&LeR<)6A?g&n zj`zs~Mfw1xH9&4B)A^Rcv=2J;EsT$f4gTwKeECHGt71byu6r?*`FQY$o#7-H$PnrF zUXD>}9$65-_vK6x>+AbTLgR_}c$B&PiBC6n}>m>mPU@h?cx3i_O36thYAOc zf*c}DY}@x_%gB#_hF_@|eI>Ig_vdx^zJQ}V43S{_NsNp!RhSE zMPs?jx^HI8{ouP>@B7AuaJwS5BGrjjK7NupDiG!H-kxETLN&un*2CuZ8%5`(K;8!l znp~~^pY~e&-N&;=40^qx8W7efO-#vx?RYuu!#+bD1_YkC5aBQcO}e&b3hsDVp^=Rs z2&HRc7~-vn@D=LLd+(uRFMCi+n$ z0P+R~k;vzfm{knLwH{vf25WWrqG*lJieUs@q7`c@SqQd&0C!KbKbx}rKgL$^cIpQ4 zIEwmw+GTWXQp*$N%%yU?58g@Qm9U`Zp`QyJBW7^ezM&V2P^6qjn+O63X^X&68hc4R ziiDqc@_s>d?ll?$JBmE{$8+Jowj^5UMMG>Zn(ce0HW9Bn<-+u1&tVk{9|^wizv1BE z=HoSH#ZN|=m`1{xUpEJgLSp{X`+jOG`a=DK{BbD!fxaY%&=%VixI9}_z=);UIW*8kQkb+5|Z|NIDj z6Ztf3E0NjIWRh7wSA^!e|3UlYGU?15F57Y1+Eg0BJJsSuvUKv45+sSPQ z-m5fwERoOogqs#%rYk#G=EHsgTT>+oq>lg(g-AU<=zmbD;)3vZ>KK=BLX(`DH9I|y z<@0;J5)Y@NoRvYMTH5Yf&$8ife-D3u-QAI(fc?H8D8~lwNSCQlvP+tXNj%Br`P`&d zw1RSU5A+}q+KQoJ%oLT?F;dVwGn6(1S0|)+Hyhw7@fS&UgdrFk+?eS^A-tYNoZmbR+@rP`y z$j~a{nne(I~#%*s*_AnRBJpgD1UMC-kayP++ zZFHvZy`TO%IMIMuzLmeCO2+RW50zc~LrE%p^8R$N9eIBm7}aqzS1 zXLXSSj+3R6OFD&O!997uho#Tvpoub;m@R)Z9%;c|KHhm7mRGKpVk-r`YFmSkn|)fo zn6@F(uU^q=9*NR__hfm6+o^{T_#{_pf^ItZ4-NcV)^Ek^^ydKMtHU>uqr(MDgix6A zXU*XEvFi7^iZ|Dhl-FFSi*4)zVfa!w(pJfgPjW=70<3@zFth)GW9-JNS1b>E6DgO| z%}tn@_GSloR>t9ZU$3Xe#^fkATOD253^o_?V#>$!Z4#FN^?ZNcBpOo#eeAE)5!Z<5 z4icyPulYQ;-hA#oiv#55Koj(+nzeM%#gpB!WvG70o<}*S$1(4i_0u`N%bpsO!c2X( zQ_C?p>^1tmfDNXI^?XYFc&%UwXRy`p$se)#F#eDHAft$*n-*Nl0IZX?6%C~Y79uoN zt>;=jk9YPkGVv9~KPo|qg`{I>lPSN}CN{e~9wN;3&$xYlOEKmjJJH8CcAkCIXO4b$ zJD*Lwef|O2@ZsdMJ0E<;152pVInjKDyhb11TFtz6nbQ)|8#y=lg;PL!)hgC zl9adtEPR~tSU66j`j5>@;3R7Mn)r9^5)oxyg%jC9?%Kkxx{8qGnWRX*Qznw4^p%Ud zXXW@Z$sDBlQy;(=0UP--veeU?p<(Y7Bcy`Hhgc4`@+00pjhUl{n8@w?oxa3ykWog9@y$&}&SKs(~G_O8Ia`CpQ2j^E`fDR6)BTNqG z#J`CBuzMBB0tF6E^3@tu0@V(LEFq}^Uq_b|lD;ie z%sgS=N2ykUl18CH7U8(uo>fTyiVKKK$3SZyN%0K=+5WE<0P{kn@;^=u3|)yXOw+jz zp<&@R=vJQoS9L`d%erc!3iiTTJ2`0iuvYI1@q$_lS%5qo{DN$G*TVX3R3v;SNl_x` zj!JIl^YBUAx<(9)b#cG1>wMDZYrx+8_~j|s6zcJAI5CR`x8hqIp^?A22m`zQY@N@R zZGz}oL_P4b-~%PczAy?ny5y;w3gSy>BO4BySwA*s5(c`t*{PdFGsmpIK6>t-I@@}8 zlMjUsKMpBj@4Q<5=*1ft%OzO!%$gS zRvESdqLV8kc4PXV%>op8x|=iu#b*dZ+N1!-+P{5APE3RNJ;sXEkgDaBsbx2(B@#UR|BgW zR%Z5C;`$21>}kWaT2qUk8JU2ePf>twnlkpIP93N}R*{i(>fHRt;UUd~!MRqglJEkX zg;5wGTDpiCmy(HzDRjeq2-)>kHN9(@UysC^Y?ieOj3T~b=g^+r(rA~k_Yow?a3k06 zW7IF0{5XXg+8D7dwN6V8g+z1V9O8^_e7fb*tf?TQM7QC0HEx4#sQ$=1%^gDpsPn4u z!1{&on?`jBUe{!>-K$yh#eZm0vA&Na`6n=-v#Q!1QYhB(oyPNC>E3qV$JzWJUrT4k zQ`J_@RKo63wL|C8-xdY5sswEZmofC$yswAK!w)<4ml}YUZg=gvA5#ioF`NHbcQ0Hr z>ZDxawbhr=x*bM=)vZ0K`~6dw>`%|tm0md4p-U{ZM#=V{i0LE{M#)XWW&Wo%UK%&f zV#@?kBFjvXNGH&r+y0W=|3Gh`9&|ah2vy;YyK&CZ`VySS|9XfLszKlT(IEaO$}nWM zBhnuy?hh+C(S1;ee`ZDEkydxgDx;}O&W5=A*CQArR>ZCRWNOJRw_DE33H`^gw zjzzsRLy)Q^=@CSPIK{hm;^(H5B}o}hQzMP7lZdC%zx^(&QDmv(%PTR!tMH(>SXeA0 zt{}FZc~YQTkbISokWeA^cwbVvXedG~3A`@Yo_Qvc!8*7x5zuU(pHmY9r;&U+LUsq=LioNaVY(7SPAI_WuV_ni5DR} z6FQ#A`f8x5*AZ-Vx_AVT=kyLxu%C|3a5@e#5`}U279ugmg%reJ<3Y$16*8e|+U4^A zSlI$ge#JCZYs)=6O1N<+ok;IY`EhV&>l;(IOp>HEJou=109_6~Bwq3pf&>KR6yx9R zGhvI}bKJv$9BsKprx+|K^ZTH%S^nyDrey!&U&$7SK{s7p;i$sJ+UT%C&ddFOyuLP? zAO`1Xzz6Y(n;S<>pHv!Ljnr`t#ZWy787(xiZAGa<>_qXD+?W*g?FjjK9D+9n_sZNw z!^f7f2~JJGGK^XON`Wu>{4|VdFT1+9>9~6vARuserzA?6aG1@7^}t^-{fzl7wJXxn ztS~5?iauG35#i?{xQpN`U`m>6gJ!;$WA*jIo};5KS}!4W32@EVB$~w>mjHWG4fd=} zA*DeIp}q$}6@ljBj>jxVHnRcputk&~1u-BiW=r*I1SOK!((7CzvBDA(3XRC|NY7xC zDO9nB^!@oeV{8pV4p z=cob5%xFhzJ7F6`!+?W-;f`3;9nDDW4``Ysz&hR{moyqiUmzsIo8l|BlL(&G(7oU_b7BB@ms4AYl6i zR1<{;Z|{WQ_1%C27^2QVPJjG4#85R#huvv$>v2NL*aKG-1}{e_;UKow6x=-5j4J7A zjbSdzu+AWgFIa6>@5x6w8kei`>m^7mS%}S{3>Q|ekjSw|D$r0-pu`{CtX1JXTLmJO z_rk0Pp*UKG4~3tXCXUa%*1j%UzQ_G3NIXa5%lsAulp0(YTo8UI7?B{k#;0CO$}Q1* z8$tLOdNa%Kf(F~)Q*hvs1*_u=ujdU-X#U2Lv$<^(1gnG6J-KwRKvm|Z6O0bht`cV3 zc(s8+>k6D%GX)M^lrch1Tf*`d__Wp}oRE5YQ+t4s#a6M+21^GY_!hV2*taSKd@6_@ z&|{=-?U}!F-&lTg{aTkdOJGb93%>1FMF>?tAghrobge*S&>ppIFnSF6@hEZ=&?cU zez~hqxN1@GB9)6|;1`NSO02tZ z9r(Z>(csbRT3=MfwjNwys&oy3xF-E-NhUdk#N!mrqKRq+79nR^%o|on*A5VyOotSu zAFMCaTQIl;&CO|$ONyuXnw=72_ZWp9IZI*-cSs>`v({iHTNsh@FPZ&&yqamEUGi5e ziNxIIi(;LcUUK~3HOO2mi;6ufit}G*=s2d>T|`}6V*QNZ+cvGu>@tOJl)gpfG17AK zI&OAx5*ISnO;pt!2jQs{d5M^4(SXiLKj26oima`sFbgA+OgIii2OfIoUx&$7WhP|- z#MJq?My2@i7+R6aKfuCT5;E9x!VhA*0~uX#WU|q8T_h8U9d&fCBYQUj^aE|~v!pbm z-`t5qPg$jp6(xFoD8{6;_&YWoYQMlkxo5Ju#eq^LSV^vi znuGlEITS-f$jPx%e4?s?wh+X+_2%$6JD%l;rJ|SInnDl2=i^B(DjN9q(o7;|Cy@nf zX25@ZR^3}~vxa2k<#RcRy3wWA&M{k&Y+g*Bk=JI7>fg%|8Lv6PY^35Qk2!iWfCWe`Da>!? z&X*n#XsQn~3s0MjHlGMw^n)!HT4>ZVb}=Icp38TJG9h{Hl<4@YOb4)R>F5Y0%ApUq zyb5kZEjEN|;6YBODMuc>9Yv6qDwgTG7>?I;JaByY@PKhNjo-C2^g^$dy|+m=XHA#I zmg`oIx9cRJb&9Yu#-WzUeh33=;?`d?APurG7U~}kE$J{rcykAe(KJ9%$FP;A8{?u| zCs)$Ro+Db}HNVy2{u)uGx@xkU!?gnoC&C-;5!NN9^nDF;e+iQm81sS)uxO5!*Pub+ z{(VQIntRlLbHTmWE~aU;U1(#?7(5=w1C6gCFwx~&NAp>DT1huY)i3JAm!0LP4c-yR zfsji^he2-Tq_sO%a$1B+hriUtDW-)u(5wr^u(&IP`v$sS#q6Y4qD+KV8ocF_w24WZ zhGi(J)|$X%oQ{NC~#0Lbpgk`GBRNnD&HKmWh8e#|x~a&LG4;U2K=4cx#=jLRGTZpEmu;M4MdVz1n;>uP%4e5F0QVXqxXeWwh2~bFo zMa-9WveAhr1Enfcj9$reTlvbN#+U)&wL>wxf^z!0C+}%rjE?e1(TE#OqjHKE>H_A+TR=I`9%HZRve7 zcY(@j?DlWOQear`gMc~(Z}Z5yT#o~*9%*3tclX;x=vKB=K}Qphs|Vt$D^VPh_@=`! zk+t6u%r0j?4L+F0c=-EoEgUtiOJ<%j$wi!3M5p>)fE+u;H{{(4%I>i zy5UBd^Vx{|=#0zawWjyO4eay>hD5*AeaN!UE47x zZP*paH-&w(S^*lg;WKWXAFEK;%cVrYGtL=RxX#0-`|3dEv4J30rmCu<>lqf<}>aEi(E7>OgE*}ZKTGrnSa$9 z^~$-jL7f!IREK+TBX3_!NFYA&8|+!JUvbSzoSE90xdY1*ZU13&YPP~_4&GFVI}C*g z0sD9xF#>f4;=~h={=D$!@cV6--c2%H^Ilg4kzwoBBnr=BQMBw(H@766W2=Hma>2Ia zp4P{MPRd=$fZ8W5==9X~|KixUUdDXjpyf(GRx#j;%hB4h1lb&<{Z`ohE$VH@^{;5? z(!hqZ@753l*}dv*bJ!)U3={-7z+XUskPrz83JMGih0j5gw7;tc=ttPbdJSHqjsR;&n>W5jjrPM#6+f5s!$?h=Y(P9H9p@|*k_@!9o0U@Sf|kELU|ffX5`Oz`!{2J%;v#*@30 z=+A%85)#syFW8!uZn;Ve|0a^r4bko^{VZt#Tol=gR<)SX6>V)->-GdNOlLYSX`ytv z#AsEOV9EO%#2gRVdRbut8jS*r)|Gd@Unz=w^x6f3r{k(hgu2NDY-MdN;*oam_9D(N z5iyMuH?{%Vt-4{2yVMaowp98KWrGHdEW&0TpJC!!hV8>cjad_?Q}h#vl zD>rbYST|VN=w*Zf24|GKH}}1e+Ro4)hGi#&PdLZ__h0*;6g?4xhy`M@3cnGhO&mEH zh&>fhvS*snyS&duGrM=9Y;HW@)4~-dE{j#DST5nn0^X|Bs7)YIT9cJHVN26yQ!0wT z`U9MrTQn@qLR|+}T1qkcV}@+haJ3(9F6{4}@e34eTq#^>$;QE|_Cg8GhWqh`Xe^>V zWkp}>?>rlu+cXv=SF4u>E(Z+q`4^>F(J>;+%Yat(>6FQ-la-3Kurcvh!p8lO0$@1% z`D&4$kMRYcV`F2rU!R)|oCU#lx&!`0AQeU+Li_Kjs$)kM$i&QWw=dx>@^pr(8vmA) zlXH4oKL$CEym{s|8UJ`m@8Wz4TZp29#*MbNj;%yG;7wGC}z}|^L*|`gcL^* z|5QTQnvc>7+Jha_EHstPcNaHq>Qje`*EVKD#N#?VI{kEsXBv5e{MJ%L7fSWJe&jse zhAUO)cf2c?x8Ia8Z78cZZU!Z7JagfTrHyC1bfAh-bN}3;_QS=+i+bB??j4$Na6ktnK|c` z{=)jzhyq3EHjA^*V1|N#2cfaYTCdlFB(#p-nr$o(6mTK0t zH=JWjtPx!=Z+|VMnCZPeM=UV2qX3c#g{FuahOU&NK37@zOV>sVkAi}d>wO;~AHkWx z>Vu(_5DgNzX4jqahkGLO%(gU^=XGjn>65?9?`X69;3&PkY~1kAv{rSnU)zBgbw+xx z-{n#{M$f-$sJ}y8!w4gqI*q+`+BO^GU*jowWx@OK^!t~Q)aw}x)WP%MsacZl_n z|63maH)8)jd|?A{zocn1M(FePV&0VWxJ_ANgRcCu+zc-Vhq%RAs1XY$DWTZ=;c{HK zgNL1u?Wq%j?Ox|@rC4<77?r|el6k9U7knIuVPo@If_4W^tD^!L!^A$DjrXZ3xG5T# z;kAY4SYQT^&NC$TCt#3)!tt5WsKVY}KRde_b(5Pljk3Qtlhz+XHwi`>y11vm#4RI^ z)A_yrtwo@;+I#EKuEMr|Y`Tyo&K9m^P8T_YAG#lpq0FA%cE}*(ENSFDoiVSvIu3#T z99LpC)t%SDyzc|2>*lG}saj0G*UBo*3R|F#Y-QRN!>B2D zGuiEXKJF(WFz^YZuH0}AbeSom9RE>I>BkAdaTu&&+<73^KDBepD1# zCNEeX-nz@|nfcrhAKHau{I)O#ZVqp>6K2QN9H0Pm57{FfpmqPt>+csBHnPnlwP)?J z6`I+KhG?xq*Jr&Qhlq%nrvjXK_1aKs)iDl&84hnoGE|yTMEEkK$Z=8V88ls$WE1Y+ z3u715T(6VhHd3RTLF9>GLlvj}JBX~jz262T;qllwzbRia$w#hIfj#SQ?FobWVWEbk zVSuCl+g<)Y@vub!o&RwlzLgTpcW<1#!udu8V%ys3bc@CH$ez%f!UHS#bT)~DTjBBo zlVexCPU0YbM*L#5#XVv>98^)DCEPqt%g&zeoD?^pDeB$wfpp?=Ivm^Tbn_wZHhxAa z#Qf`{h!dipC?B}$Hq-JLmEFTYHTX0eA$PJ=?6Qsb-M8^Ie160Y zFnC0TDv3H@QZ(-1IKp4ARagD9wC_s3$so9#B*1tGA2SwG3?gsqR%#q%^w~F%sVB0lI9!IYh_itD-wz3JKn-!+p`bxq+wkfC;p;7c>gtwt z(Le|e0T%AMaCZsr8Z^P(-95NFB-lcN1b26LcXxLQ4gucE{`a}}o}KJhHH#{!8q6_! zbocn$p+bDYNeCoAZAh8;`J*c#IX0d|uZiVMv`DDg^0@Yb2kLw^}g z>uq(K!DCRA7aN8W`k6xfDI@3A&Jl+`qFXsiGh&yyoiwgdEBfj+Ac*vnaZVoZ(XsE+1|=V0`jNfYy775fOWBf8@m_E;`Mb64)C_TMUWHS;9|xQ z7Am!&9||oV@Lx6z195hu>Fn)?9q8K|Jwl1t4zv21#E&Jz@C3!hHp==8G z8p5$kYGmGEW_gMpyZy~Nu)8zV93CXk?2;Bkz$;#ZUB5d|i+cyzI$$1ncfMZFv!<;m zybwmb7tbZj7EKq;ZP2T0|)2B83l$joJi{Te^>_pXcJ@>$P9>1ecU)~e5!BKdQ~3tzm6zjaGIV|UEL}N z2M0ax&PZ$$+#Kk$Bpjl|%a!@$@=0d|5Yi~?Wl`C`B)hfA9db}v3H4rV{%Qgbq(Et* z&eq`5-VdcuH#9U{=;@eZE8Gf`as6 zFxm(Mo~RUwA9YFTp146MYwDU{WR2T&Wir&*90pfu%lNxLs}7%t*?)n9uc#MLmDG3! zU`&YZ8qW*;uH{{-+iOzr*iYY-TCsz*qcfp_r=-2fqBj+S-$hys9%+ z3u=~M8ZBy<;I&4U!$US*GD&>D#_?Y-9!}>^0y#KF=ic9{%h#t7d=}H|L}M-?Wn&h+ z(VM;S7oqh6N0)<$dT@6dGA5X15i{G?mM^?wNKwqTKF~D8p8Wk5I>3eOqf);XC(rt* z#gNpT_#MUm4uuI49m$|ag7^74`}_sJ8w=tD~E$CbO>1>x+!TeaUKV6g{x z3mF>Xv$C>+f`S-;)S|0mP$L%%h-8!=+RZLeU+=3XdsS{EXG(IgQ($)=!6OCJ1$+qY zJQ8%Sl{~k?D9dzOm&o4SO0uA&NFn9j2fXL!^1QW*A*UlZ!~?^`OAdb=|NnA_1^D|i z19#@GktTsUT3S1E$-vj!?;ZBOSLWbxC>%H*VyFBrxLtbs^Fq<|baDv6sVX zL#Nt@el`4HW#XF6sWcBLe1pkwy-NuMR^U$o%;Ccp)PAGHQoG=e|Km9Lzu*bX1RHw9 zdif4QU*fcr8CiKM{a^i+xUuyDwcsJbA<%rlK#Zx>cXQtu-c9%iW!+WXjHCjHu zF8dYuA9ilVafOdjp@d^*$^2ml|CdMO|8A@`&{wge543T1d~B%DIxhIWN{`>Vvl0ON z5$!YU1{S?ab(tW}Mxt&GZrF^CE9tEc?DWrL>J6i{LgIq&V5`kw4%2qUL)L?2HxU|b zL~QBkh9``?Hb{7Lu2i#Sm!y3l>620Cex)l~dlYr00irr0cz7vrCkf$88fDtVz!+Sl zgOy<Tk|sQ(48yoJb-_tI#@Xq~YEt%hOPw$^Vfl*1C+ zGdeAYz4wNu^|;y_U0htmXk6eYs1nBEZ49{CExQKbp3M@6E+D z7rsjxHbxI`td&*YilV&XUh+X6rh6c52mY1~D8td@HE61}$8^9G<2eksgddQ9VCud5 zf4L*%$n>1zpK((R&cMpDv60(J#S3{r^ zv9tE1HwDR;GhSuDw)wpB-*PhOwvEsp1?_)s4Uy}wOdxV2L4$J;O_e--*w6GiF0Gg3 z14q3|=gy6UJt(ZklMRL5scwDJaqo@5&R%Ih{k9YA#K!^o)E?(y6)ODZ@45{I<#Vv_ zept)fTPRgt^!Jyh@1qnh2@}1EY;I8@P2%1N;aQq+XvV$#E`SBU)v-dq3Xy#E3HGgV zZjF}9@L z&z&$tX}qq{-xqb_gh+T6TAEC}BHJ4M5Uo^2I>>Lb?b2}aqLPXoyS8eQCo=9b zK-z+mcaIo`3pV^yUx7}eLr`tP@A24Gw(W#JcDf1eDT9RLTPoUaYs;U(LxTIA#C3Hu z$LR>|94|52_Z9=H#iLQInlXMHE7E)lpF*PSUrd?n(h4ibi!)-F%wJ0J0_qNA;K;nC zcx92Km*&)z(=nmww3rbUYekcqV zba66-LnVoAYn^?WjWO=|&csPM4>my0e`o*Q4QpQMwZFLAoj60Ocy?p_UE07r)L}@< z#KiQG<{KSK&L=|Fj*Ev3@i#xQhU++OR@`2n?@l)urGq9zV=cw$aCJvYnR}0SvFNo5 zK2dF&@Tw|^+5YNpu4o!{g4pB2zz}_|#Vd7r3&bwf_G*1OEyt|hN@-NO=#LGQlN86l zSp?0myB{Y>^gV%Y95_HR8igL!iQ{{Eu^`v;b?>KEQ^wbaokG*>U*B^u=OyD%J1tx6 zT&$wr?O{0rCL+q)UmjEW?urb#9&!mH~F@L|u4rm~|ySuNhuKM2My3fq+ z1P(Mu`ciAOT~EmfJS~pPXYg@za6n@rMoGd|enLmXpRhAY&~M!Vd(oW@uOp{_VLk8k zzsAv;^a1-cs2j8;hafvc1*|yrJoLj&^G(<8#|6ChXUi{CEDA7ffLOIyxYc()PkZgU z9$QBI5#WkM{!eCXuub2>lt#rrd>e}-9MX2b8y#-^b#REF$kw70?}j{oxUOMN`CIjq zN%n!$Nt4Rk9%+_()|LsKLo==GGr?(NJFu5XH?|6h5Gz_nQ7s_k+SRi3MKy$APU-#o z4a(PF;&rhEM42OEIF;Qf-S8pr0>lB|E>()J^^QK-wmd)9>xiBN?)h7rUd&mEvQFE+ z?ECOP@1$rYF1J}RubRzVew6}OmWea03$XCKtah-{r6UXs~(=3g$1vNB8ur4N@X<8}%J@=Ajej`zV*|#VBsN_p-9`d1-JE z2!Z4Fzg|yT;gXH!)$`o%LPtu5mRx7Er`kMFIT9nSTl5SAI$f%p6A@)<|M5`PAV6z`Bj7T z1f1{Eb(cLlQ!6W}!a-a>Ju&R5sN(O@(*?8gOQjcGqn78Zm##vq!w~}Ch`cN9zcB47 zzi$U?Fm3SoP;j%`548^leA|m_cHKz4E+=l_)(6k%eAW#1rq89TMhKyqi}^kv_<$ zHg$gLv4Lsqlfu>iJV`iQXS3#X_hFq)HI)b(x*apk#!h4(C&s6oC4JeZIZC&^tYHiO zwUR?crgxH@IK_?ayx4>Sl9c#=KXzwu-_iHE$10aZ^%6URk?XQS9_qQL@g#dW9w?&?XAp-X}1?eT$al z5vM`)&+*K$u;?{X+hH}XSLQHI%M(-20ZQa)!Awt6rWUbIh9^e9+g~%NUgj#_%+yYmKMY*oI?}q^>e|_a1M-QIW z_xZBszIeL95D42enXuAtOzYbX=LGg+%f(kThdE(=$4Xl1GjrHI>d$l=ip#@#8{IDH zeQl@hOqoPK7-6oHtl)Sj?UgquRLA+*YwLcKsX#-Wtuc3Xu^vW!v9(oF($%B-ojoH0 z=Xf8wTap7r0#a4iLtMbm>UOdHux#asgpO)&H2N_d^O^soDVW3;EGUBOBv;6A<`ebQ z{7Yk>Ak9L_k*^5#xm*~f1&`=v=h zu?=zU+4h+K^dC(j?HkA*xeI<)+)4g9+$|P_#1ZdbC4+ENWW0ldzuOWJ_}bq%LH|- zh`~E~8?FBUY^#WmIFF9r_zwdBhM(azyWFrbS8VAZXgy#Ky(VGQ{US>u%=Nvq0uII;Z!)Xwd&1ADchhCICyoMGc& z@~r2|Cp8WGj75nON>NHzF8wn!%G~D)HN0rBZ;9$x`pTj>ln`2VhPLXr#?|z^p{?BM zL*Jz}fW0&aOY2pcUX7f!_$<3YW$%{S?Nz9@S+qiHvy+W#y6mqo1uy@P>o?l#yVKz? zwx8xTniYX|OZDee3L1_82vn5+c>Jp6-Uy*g!@W#?@PlcZT09fI#6b)!!RQ}5HeL?C~yWVbtF`jFPcjd(4-er8{o;E5|e80p8{c_ zp9o!B>a!o3r5J3K)CW5%CYI_bovgOhS4>P&Iy*C#OP8wVnJsP=Ih_GG^Qt(Ei=qvH z#ARNrExEX@t8YEcj$qgtRp6pwI(~qpYjd=_y}A)GmExTG(HIkgOt5w9j{CyTKfKZq zvj*K00-sRR>+Q5uMkxi=ZePybUF9OC?lPSH?cwRbCReq0uAwX~2<6%4S z#+EzKsD#pvR{kU}N8%1kfIVF)u(xh{c_Xe z{c7vwx=(FCzpeC35V{Al@{9b+E%c8@Qafo3oZ;)}gLBRBoYQPY$yz@n#Z$FM7n-7Y z$7j)?&E|63%Rtar2)CX6pbHx+hmvs1Jv{!dpzq=?ouz2v3sr^h%LjJsEWF8$0cer& z9SNVu=}d6T51;27{+I>Vo(%ZvT2)+aNV1>ZV+U)*qq0z9@^e0-dIc~AYxDhXbS8cU zk@!q?0&GZLXBCB6041D{V;-;1n%T~b!R?2*iBfZuN4+>#i{sIs34`1=bdK(lz8Qce zaz*iD=WcLIF6>ZT4G855Hb5ppEKK)8kA5d{cJ`+8M{D4EW{2iZc)(0szu(*Uj^ z#z;xdRcVpN1{No0b!?&gDvcC5$qzf%q3O(2Td4rT+$ApOiUNkL2+}?ew6zPTP(*K` zY?P&4mxfja*{7YOYo8WG6cJ9yU=IhWSeB24;5x-SkHN0C~`!>P({T^q#6xtaW&{=RH0u0;q zApF-6e~`VEOVB0I(F$xmJ7ySLU9>KmJPEPcS!seDn$fTJ-=*1R8*L6eYf%3}j*VaM zR!#hpMQ|rR@$}8tg6+p1K!LBbRIAdU_x;Ppdj}ux@e1=Zb^~4OvjHczDkyS5%t`5N z&wuimAk-KI#5H=&x3DlcbT+mm$Hm%54K1kH&DgNt1Mk*7&s*t&gDoO|*jjw)lrmQD zc9u;nP`-3i&bm)g3E{^N6q`iarjs_jf}&~~_nZq2OB|4XUb2Fv1I&iS09nugyWEJ? zsTmDljhr}RfiO+`#FgkGI#cqQUmg#^a`zj{=`@<0!)zZ5ocqylg^$dPr>bndg+X5N zzFWOpbtOt{lYD&(`|%`U)N(O^l)pi1K`XykUqu4eihb zSD`Z{AAa|T&yf~nxhz;yAC>vvVhEPX4K0A64oRn-g#~O7*8Bqnv14@{h=XLnfSIiXDUELnn5>9A1|mM9b~~-l zGs+73y~E|iZYYLTC-gwrUWy`h+4-LA<|@W}nKnLY&%NHBR+JWp-JyDLendeTn6g;# zFF{y31J>_=f(u>*)%Q%2;^C|v`|*=5{^wdOE>E)W>Ylh7zL6(u(lb&mmsUF&A{ZLi zH-f~bIL{NlF1Ys!2d|tKP$7H?{0`6EuSFk@pY;mXJA24g`0IIcATP()v0T21(9$r^ z>Ie9x7(R%>9x|T@cA-D-OGlwMlOZj9Rbk*bWQ)&nJMD(r z$dP^wU(4}T=IgJj*AR?DWnwkNE`e|Ui*UNF~7kqz$u7zfh0k(4-~ia_9t*L<0_+4s+v;7Ks6<;tcn*f!m@ zzV)q{2-BC8+__9#*OU@g0(|x%WqnQt+kH{s=$H5XX2>92p4T8n(#EGRKYP-bjsYQ+ zl)(23@&0nB>8-Q@K$Y#26Kfau&a$?{UK?&+PrNurL~C?yhtNg>HpcLH!(krAVKxFX zueH)>eBNKDwgPq+6Z}dUl_a-@S9j4LzQGtTc{neN@15=GNzz0z{U(~1BRl(Fh$i=D zHpa&h%JyLMMAM(~;54N}Kf#C-R3Q|!YUNBK+iaaqglkpz*?_3g6l`zw>xRCc_2 ztECG41RV4Rq|0$A-suxM+8He>Et^7Rg8fX^p9Z@6Y_I;wX-4ZyUNAJGhGm#LNFZY< zsUab44yjlpd+^N4&F5y9EVn9tJoRyKg2$ID(0Ys0p%Qc#_D4w=xO^Qla0`{V$)rN= z*JSbCl6L#=8#a$oDT)gf`fnkbUvqs&EHN7b;JzS(O|;bp^HOFKQ{I|Ddxen4XO)eF z#z3krAp9Nk<1*rFCywP{1cFL&W#UF68jf*$fwamRxeTuuznW_E9pCMYvgZRe>2Q3h zCTIKE<8c+6;l>N$WY68G7S}fC*!`6#ue%Wsq4sZ}d$(p`@+Z*UkA3qKz6;)CUF7|T zz(0?1tqwCTV=fmD49k0t?R*AgZ56^6EmAbEw^PFYtf%%PYcr1JIB2g@#PKV>;tbDZv8SH<4NKO>GEjVz%zGV^RakG|FWlIQ zT0q_4Kl!w9Wrsumlghcp!;FS9Oa7IgXtTxxmdX309j)Jnjrn+9fXIs~M8s1i_^ulP zIj`97Tjv^b~~=}zhAp`VlA*@sdgTwcJ-0s;0#OFy5u3v;@Tkm zAvxO;6ut6%UsbEgVZAh>2L$%55QxY#Y-2+#o&oD)f=^m6Mq7R%-8}i>o*|UUoQfy< z8g0e3*8T;Tbu?1?%JO~qAlI(re);4mfOyjo3rF1Hjub6~Txk0P05N%)J-935PG;SH z;y|-;EBNIx-N)xH?vN)22>et#-4N6f5>zreX&$7Qu1K!RE=-5WBQzm3Ra7En)BB4< zmdwlin7HsNuy|nx7$jpPACK6IrpQEE+|VaV8cFuDBZR4a_c(8|TwZ_gSl?;ZD#&-Y zRieX}oSxLBP{e^7@)q(-m5aoF!f<1ZRg&%(_ce5h7(#D{yX|H|pOcNsZp$Qjlg`iD zNm8GQ7kFpa*jxb~a!7kCPOCe{OpX_JuA~UXr0+=+8BbgkWIyREWnDQ;3)jEVQ4zJ* zaI3aR6D#7{)K+9u*RolvvF~O-w`#aMYZa4jY>ZflVkzG0T*W7R(9`LzH~;8(pF5Nz z0lp*-R+UF3|GXrOi8nCxe|?DS`xxHe*M}Www3W0ePq)P&M}==?LV*H>WwqAeUQN&9 zQUX|Uc=Eo7eezPU3gkXym8Pd&P~&4=WkSy3iFWz$Yaq+?Ht_*&E?35mv0!L+SLC!L z2zKF6cH2XCdTd0cncZd$t1dQun90R^zMwBz!Hl;Us*6Xf)$1m8w>ZCe!_3_WV`(aI z1$oNnls|#+eKaPrr2$O3>ZnL1TKn+b-3gy=DjZ7~W7V2I-~}8p zs39DlX5pvtEoIS)?zz@*GqURm=jsK#lDKwW(KPYhy8F1ctEzLlF*U;MWP#kAc^sLP z#*2mWWX^6HNAik^v)*Ff(io}V*ArVM_wMh#a=kp*R~zU^?M|Gj$|9O()a}mtcSg@k zNVaV+7~6@+@;Y46B-68<5@+qlb!W0Lmii}or>=fXmG3S`EK^Ij_90Jl?6Wpt0x-2F z7B_5`%He-OZ#2l=_kALkSGHq!IEf6PY_h$h zej=PDp4u^phGa{y#-C^x-YOVC4aEQfPA79;D=dOMoJX{nbe5yr6LX9ZG&qvYS4K}p zX(--_zs=prEuq7u1+kl?pRQ-*pj zunqke)dskQ$lxL<_yww1b;$BZw2Th{x-`0&2@K*jtoGSmb($ z;p);On|6kMtoopPAixBuspkIGKn%pSnv5$?;sjZ_?fXVcR+erwKN>?dw&x&rhb*aJ z*bT!_T(4lNp9)dO?54Imy?D~l?B#b}8qE9HHPODYKp7OS#}T|&=lc6SVvT(YKo z<#Ua1bL}&%;1{>3cR@b3h-(^Z-8>UO$b7Vk*Tu{sQPV%Mu;@k*%Ec5D%VivygxNTW z!6*V;E(hs^&-QmzLx~Z*FfF}T7wIX?C4Jd7s=1mI%LKwnJZ~3f1}HH_B~}W>1?w_Z z0#kKu3`LZD`(U<63TE!o#M~7b-5Y23VWooZyv(2@1V6p7)aK)lBgC=_;WtD5>fz<)gE&58ja!g_mbDHCNpmP&GpXTTlL$Q+Hr zx%2%S`zU8-2K0k&7gojSx0s#VWCi<{`qrMH!LjErPy5%OpgdCcAxZxD+TX&+0sH-+ zR)b!N{VHvQ-yM*Nl((;dxX*7nU28S5Xr61N+A9j1!BFy5CiZ=No zt4c0`{b33;wFGH0Q&12LIWr@cAx@G}_wpQ8gq<2kuTgy%+)7_kSYWlqA6dgD2DTp}myCm#C77R~xkKm-1ZX+pt@sF5>C1>OrR+ z!<%%7U~)|WZ0qcsY6;050*oN$@}hzwu5!|^T~)};ID%-oddO%%V0`{z7*!_Xgy{JlwxtjvX6Tb$21#hk<>hM*GfC(rdcDUWV)`b95Bt^jQESf8J6+A-W)h| zlu2SpPfayo3bkQ8kDYnSCew5OVZ|SYF2=;N{>CTz*L3!B=@4mH4vVZLp2-7uy(?TH z0}i*f@w>eqiYmj8+=GixNH7P(ivG}%G%fW_i|q)SW-@ejd?SM^09@VI9jVj_)bG(-s|i_l5fz z0|HhOIJf*Cex85*ItB?*|1r5YtHNTo2%LzBJeC%80MGmgHR0r$(xcPE%0B)Bd*9fv zE|iC>(Fjhxz_1`vEh9traQUV81Qydr1})mhdI6DePN+ zo@0-}&!|fQs-MUPJ8m3_8__Zr_8((vHN?JAsz3F};?z(8GIu%gLxS5&_yN@?_*vWzY4+vBkwf*WD3)DXTu;u60EvzW=yHeg!0KjFJngSyg*~-YxOfl4b3$#UqhW1yF~oJ+2Z!aIdMPVE=LdyqNrBHMuq+W zQF@yw#D})M;TYgvPjRv>b~Zx>_=AER8~{zqGwmAh=}D2NX61h2D)C9U<7BKy62@xk zI%v7cQ@)!X@TA0*l~d?eM70@%G^(mWggWipjqc_g^4=CMn^ZOIJ&BlHV+_pIFf|@l z)@~XXUZO<;6B&ODJWR66AU+ctR7sHnx&Ir?oCe zvJ$!2qWjTdf4$Y(=WGhcJN3?3Ed{ncuH`S2#_t7cBGz*z`~lVI)J?4Y@u4f*hiaIk zXmN?2S$q>#3b0t%qfS@QVr9K-+>1H?wEa*ie0fIpz~f-? z@V1Fws-q6H*h`ZFJ3eObmuQLLf-9Znw6|z04Wurb=35-@$lfQi_UDj>4l|W*(}hj} zBP+rWoQe+*yT|#iywBa9;?A(+fPUKOV##ajo2z7_>3-&Y$|0nJcKLy!l4*9WAuS~jtZq>onBm@ zFg4&0sObw!_Xe^AC#aM5?E^Tc;kD>ftHo$4JA&Rv+%5xRJ%jI!(^ZYR;~745ltop5 zgpVsd#yyaxozvs^>o)^q&;8kXMa*d83t?oe#RT2?9ARbPp=heZ%hXkN0?|Kk5-P$q z7SM%QuMS#W@zVovv|&vDQ{wsOoUd%>xZ9?ApoxF zGwi1k2U;{bE`@c_fjtP-bSi)PMN#~)WZ%?ATN$)d4n!01E}hW0sO)D(4ZNIS6*J~L zOn5nLZ5-89=sgJ`yrbBoAys5lpmwKJLcj)1izMU%nqzxcUP%Pah(^E@tcQ-4aZU## z)Q;m8Fe+N+xj|T`jJBGlPNtX9rT(>{CgIlD+9u{4xK`bh0F zy-4YkFKe*ER!UPdGa~anM`pa5P_kpr3uJ5_8=xoAgw!odv`PvjP{dukPR{tn*5Nw(( zSp5x(I???FwlMk%GFng9=0>jefE}Rw+lWxSx~9Z^t-d-lMn-UMy|zprG*3qYMH0V> zOw>-F$?lCWU7Ax6*m`c=&5co@hmgbZ&bo2 zG+1M;6Owi@nObacQ=b#-2jwEw&+a$)aF z^nZq?%pT1agNx__*-qX!vY~{w>hLMC&Kv#nDoj?ebVNLynV(U=^V zo}7>8PM7OruB>#Zb(jKX1^>NKJ!J55N4tZ+-Pk%z5Se5ngxbl(!%2*kpk#kQxaYCl z15aBQ!kHwJ*gs1)PJZin7k8S}KNMKUgG`4^$Cnq_PB%cl`O#?@H-4MP3(M5VKT9#m z!LLU6S!ed~&z(oa`i_Gm*j<{kJ@$OyzRuSNiarH=TVif9>_)#2iAxuZGx&ASYWArA z;IWH%w1OhA8@ob-z+15|<37bSVLBdS3NlBkg2(LB=?u2gL8XMYMqbx zRPuT9)CKGm=IZX7iHi*Ru@e`ZHTaJON{9QKYeq|_1piIT|K|`ex%?8Q0-4hV(RJ#q z7Qxn9i}(qxB(=`QonrUzc&*=6EZE<5b&+Q|^e6R9Gr`ombzqvLwhOjCU8)5KNPim3 zdt7Z*ueXwh{{u!lB8Z7o?!`KkmH)!@*rZI7IJf&Pn#{!u;FeVhZy%CX4B8h~Kk{Py z&#@Z=!OM-?AdYnY{P-T&dp-jJ>LJ=sWqkHqqKfEgLd1lXgBCphY&Ay+tRBo(Bu0h* zT&jBaK)FJB$j(V^z=_LVv5 z`^Fs}ht)#0Y0TaCi`ih>%pWfp;{Se^4i}i~7}L@<{!NGf7n*lmk#x|wz#!va;Pjw{ zX1-^hMg50Y`hOp^;l`2t9~<6iA@e;H&DaNb_g)M0e;9V!YVib^b^YF5MFpHBP-o#z zBInNlJ2o8T@q9VB%B@@}Ok&H#0`2z#CjZ|-tPQ!@uYYP0Omx4ZqB?Le)bq~9$Y2+h zu|PAY^Pd2;8<>l3jW65mA3)9beVTBtNMR!8=t)LCM{_#Ztp76>LKYu1mZ zKPgFi;SX!19<0=2yWU1sMQPut&_Wz;@sG)Up@bSM`%5WKRqHQ&xhW*d~Mu-BxD#%5G4Xk4SMbQ8I(=U#6V>%>dYHt)o3Vx+l`JLZEy47`Z@Pg`1k9N&&AX`;e`=&ga=|So znS}>JDXO(r&%4GGZSQ}3gTQAGUiR=&iQtbtf&;LXP!kbxVS~WDjjISb$Mh*Ui1rTY!xgwb#5)7u2Z%C=mb$B9$@q5CRA;oSVNJq>gPf&5%7|9-?I7qK^ecKRKa+1Cm{~zc;=@Fh9)y`I7-fNbxz3GhbpPHUE3#|RTYDcRj7lO zs>ES5V$W=}jeh^AFLg<5sMdK?`h^bFe5!b(xRUZKLvyd|Mjnl-(gP4#! zbR2)N@0pE$CCu9hO=rffNf^(&Y5=*ktb6^%H&*Dz=c1Dz{l0`*I$m}jFKL&RU>PK` z8`2dIZK+^Rh!uanFDIJAD6)}%t}0t-F0DAK$=9|4WJ`{5033xNaqW9)t}^kG*j=~t zQ~Pbm4y-@C8FyiC1*^XO;)uM@X|CRn<-DmM_nb+x;=)BTl~r zho^8pUihwkZ{e^HTskfj&HDC{AG8!M)I$p^(zD0IhJ=n$ZU1hl2KE$HfbEdo390*m zyGLaUeyF5ZP-rwDS@NI`-=V6+G0aKT@s&t#~A8o(4;R2ALLMr?tRGH1!UWDx4{88p!Yqm_EGzXO=e&h1Ux5!e+VL2or7@*wpou3_3sLD0oSYj@}{Gr5;5o! znHRAX@_BY?*&s0?v0`!(4(Xxnk_2?Qo5O%~a8CQyMrU_PFcyVW{j!)MfvxFDk{m{w#Gz-6_NU*JMA@vU*?ImITama z!&9VVPVI`*Y+{fTbzfqiWznG^_e~YG5(`d;P%J4imdWSJ^FfKb6Zh=2l%i%ad`k61 zwNAu{8@Wh&%|?Tpykc9XmHf@~t1w07UY_35MKsn%0W;wtEe))~~z&8kmHilk6n1uuD+eyx*z%)P9JB1y0cvvINieLTOw8;H4hQJeGa5 z`>lOGb3uKTYq2{E68}dzfyw-T9;Q^Ni&Vj7u_5x(h4I**oHp%_i&e&-wCv&XX~;kR zieUB+O|K4w%al)y8sZ6+n)d#6Sh#%N=6X2w*HQ zf-X+4bI$;y5w>|^5u~$0aK8zC%r>|tI=pTAJpYQT0nYZi*8x`ABznt!MPB(q!Lcj( zG}ThN!?XiJ9;cnbfX%z-gQtMGz##Z37Tb6_6JXq73+C`;7(HbJe4BUwTiw<}+6{Qt z3O0T9`f5ah=t!Nj4L2i{esQG@*Uh{$=i3SzI<@>KU7ziBzN^xZ zyjvo(I7a+nv2UD}#x4?aMulU;z7#;F!-SDpj~0he`x?k zAj<1f`STLhxWZpE1>XsFAoQFJF5m2iQOLY*620g@i7gwT597vYi-$!<tjHYO@AQ08#(LyYaJ7sOm2=Ae0* z&i-}vxjSV=mF9%^eVHvjZ;S16p0iBf6dlsCIrgkrunn%-sSD4XbP}C85w}Bu?X`^^#L2t52&s|C!b)zm@YM}QQv%cbHNf+AWBs}R)UvLYG~h z#b=p-)M91e08aYejyY|6#F!VQ$4*fY51X_LJ=3DBFnn_`&pMs?6d>TXL0jL`AH!G( zQd930XjPDb|?&_oH`J3ah zDIg!e;{9kYZ2ZebBnr7^54oGz`OWx{e8ukJr*T3;LNlS~*J_mt`yBb;FL1oYVb@ZG z^gm2*iFB%Ma!TU5nZ8H>)+kn=HHe#zsy3F6sI*NBij&m9;})*++&7_ty(QtWIO({bKfd>ziWCD z5^|Q-1IN&X*@kTMutmSjshy^#IMpLJ$;LfA(Tk%x#5B@fGU#tpZGIE{R^d$GVn*m_ z8V`tP7Y6!{q5jiRJDyFB-|m&b<0#|`M2(VE(FWLJup}pcGEb-E9kMap#RY|5YzLxv$ zH*n&cp!zqG>_R}?b7;cCjDFJ45Q|G^g6CZny|z5<7P0RZl3(iKH)Bi#nW+*IyoIUQ z?(OMma&i!Nxw5|d-d)ipQ)nJ*6wiu{KB*#wloooBvksQC_Li9tjSX5{_%YP__aj*X z&M3Ffz|-Rbr%pFJc80^6Ax-s>g^ThF@5OJx9wo?WL>+zfn0z_3LNMe*f?Y>2g2gte9)FrEP zywX2WohJGhYhr*c4~boQ3&Q`ag#SkyBYg99O}X$C5NF&5SKE1Y2P3V&+}10S7BB5i zEg36oz&KFs+9>vzo47YZpi!GQQ7~%6+Tb_NE*DG(aw(Cgmo1*A3S0^1g+yWmI%{m`oAHlYvr-PTo2d3J@+rR{OJ1bhu__DJ=t4 z7zh(7_?D2BQFJC=wcOZ-_uzzw@}BF@(iyh=bfKO&ATBmR5))@$|D0_TMK$(ZZBWs0 zn~ii?G6CxnQ;}mVll08?p?Tt%;f7uXH#R%t6^o_EI6{vB+eHxs!@V$GylRBKXxVf@ z1(k`SqU%q1C&}@N-s|q<8yyZ!A3chH1-O|0e-abHHa-{f?;Of>URQ8CgASMk9o1Cn|T9UwZUCioIm8zaV!y(f4)>&D@rT=LMJK{3WTh<^Op`6LWm^Q zRV@|=m8^>;9@id`;u{A)OBn=zjpjA+$>>LE8ttjEcITU%(Pc!@ zTsC-A=#gYq#(NV*^vKpQjKv46rKoO)^Ck3;AfiN~{N7w=-!dyl7n`9rqEwXBPtm>5 zsZ3$>nE9Og5xZp9IZf^4*5hNwV;BU#HB(0c3icx(!8dC2ziZ*dv0u>9F`Ox*7t75F3;$2 z)HzKRRc4Q*71Uxa&8*Btok7slCO^N)cjGMm+e*V^^h@AB#yS^ebNDR*|M7g5g<|^5 z)V`Y6c$@l(P=LP{+|>taF}q-^XCwk955V=#@YNEC;Z0wg)MN=)kyv>gAK?kel$gAR zyc86^8LVFW+-dZ2MjuLjM*UWWrFFq+6M9)k8PUVrB^PHo&v>wCf`wX??qB!9b=#3Xr6y*TGGr2MC_~J_K3r_J0t>zee zUM~kM$kqojG1!_}d;WyGrgVLXV8Mp}#?4Og-M>Vd05ahH!DLFj$)EU&7&5|N(8#8Z zxvXbi*XNnQ$Hz2d-#qnJHXM~%ePR>`GY7|TEW%ea2<%RVYxv@S1-^Ato~RvY3}~}U ziw&PL_$>O(nqT;g6A)l+rplHJOekm&167Lo3T(fj=xb~5d@|+H|5O^MaOfP*fw2dV zD(VO!qEuLT>`~pGj0k$PvV3UN(nB~Ie`1hd9q+p#1LwiJt8rb0=Y_hZ6gm>ZEXX-k z8n`s}*;siWrl34aA(cl-5qeSF>S^+>qLh(hKysh3fWusO<|&6cw*m58#~Tsanf{)iKQb789UD1>D&m( zzUkBRkzJ@|Uf^*qqu+;?3qC>$2Qq<{_zGrt#Zg>yjz2j)H;)*MuTH-Nk_lu3FLV;ad`2ik8Pyx{t1! z39}FlGg(YGY$e({oXRTv`8vwedWMpf!DR_9?>JpL)UXY)=OfPx;v+Q#v^3MZp0%m# zwKX*NgKXfd##-gH|ENI&CA(vv^Jv!Y;}VT$OG(|L>_TB5J%MSFee8t;CZ@Tr&>6eS zB)052=OU1m7aj0dNw)T3Hu5h{vbPYX0WNf;10pHktVpiySux)^yQrIzTX({Jb=+6Rkdc#IqRDpp_SYf0Ujc} z>bBdBU~$#obTT?I$noQk?#yyBhF|{?LB9Aw%leY9=cQy0))tq-=6`=(wRL?>Q=x5t z=pC1rm1i%`l8q&IPRv-Y%aTlE2u7E4sK;9znlq}6Sm2q_$#BhwqMtDbx+-6EoChjQ zCx=-Lf>gOnO6y>?@SU_|=+pR0n>3af6cvWqr2BDSV$)m3hE|?rx`=AU>exSrqL+Ol zab@SW>Fb(891}zNB$}8O#)8y=;m$I89KllIrBmVN|tm*jf#O>)Xruw_est^%3xoWxReX5CoMny*Y-Lge} zlZwm!Sf&SoM=`ILU0$LxmUMMcL2~ZuO@8@dbaDvgG9az;*qK*@wsXY5-JN2%rm&=_ zs7O*vF8+6dOaGFzr1jg7THBQ5`DizD@`9ygn++=fM-nH~t(27=WtVmu&2V{69#iLp z#WPCjKC+rBdd+>=y7zQFyg$h~f`w5|CNe6SRkVzqL&@1rfnrOY-Fd4gOy#g-PsT0a0 zt%*HRg732e`8(pP_>RiE{yMMR`zIn%eE>@o^G*RhTz%K_=f|AA-3jv|pSltuC&yAr zryc8)@-7Ug1+W@x|x&gA9 zHi5p!b0eR%Dx5XGKIrG5lXgW%MYSGctaAfzkBG21pJ72iN9$-%EbZe{xc}^Hxp-Di zANFV%INX_oY_g^&Z|B=hR(q>p^CF>s)&$dMiHa>4i<$J`p?4CVJ$RuT4)15KnqcTQ z=@+V~ z6+ZS{%Mn#lL7krEl^lO(wtpc~NBDPCe+cVlbVg8&4E9(wPQEz$)JbX6h9=2oHsbxW zS|hsJQe6c@4JA3ElqS5;;FSxF^VG*CKfV!0ON?c(<||*Zg%&w7Jr^VVjR7Qa>yM~P zZW$#8W4GSk%E7k6;k^AHOssI2(?TJwJ)3E3gCd|EJwD12i^X{$!|1oJ*Ia?*_avqf zGd^2aT)awwTFa-0K|ySJPY%=&QkdS;8c(e{o;T8tOrNH+A*bfo)RVicpba$sEV14~ zB14l$C99;=tut)8h;s8gik08=Ac5gP;je5UmhAh?hQE4f{vXKWB-K=~DOX1vjJ2vJ zQI_fR=8AIk=dlVqx#43aOTaPlrDo{mPd zu&G~T)(YRB9r{ODnAjB~LB9#Bm3vjpEO5FvlcA&H8G($`@B+=*9i0%mv85&e5-ze5JZw4r3A7ygqJ_+GQb`tp~O zHn@X7-N+|bE~~MHuqjnJkUZxl^-&Lx_J+3Wa*5Khdtp~BTJr-J=OW5Hqv>U-Hrh8t zb&(X67ab)&I~n!)qFcUmUJi=q=4=I0&~ov$adnNQ?4=l(RX9a}^bqVxb3Is;S1bZw9L74DQz^0xw_ z$vk97c~hFG%vo@g@2cGp75%P4YNTHFCCfxL*eYF3a>kSdZR7VT-#Q$?!GSqHpGI%U z7m|hIL9x!GY?JFWI(+DNoJwo4c9O~b9dp$%O&y&6t< z2QII|CA|7X`^qs>AYy6Rra_YG?&(}1)_W2R@XAF}O5oDVf^{iZ=tQVu z@hMHo5CdFF;+_nqvmt>I8X7$2=Tc@4!l97e*wN#8fj_6u(=r=knoOpA5R`S8AW&uj z_w+>Y&I=n=b1F$he+tXIE2X%rC09_-`mZ;yOW-Pz39s_sg``;gd&@4ar6TRuKhK~6 z`3Ao;wwFrUS}L)=Dn=PlQS6{c%FjP6OQKoMgrYf#eLcLg;!zdIr-P1O@c5EtQoExk z^25o*>8Fz1C{@klkk2GFm6o~4DDKu^DNL@0BLjN@5dHG8izs#nX1V&u3L`Q@ z>EED5zPNsy{NrSL!GGWuzlV{)52X~OqIGPGGfta1vqODL^uN`4*+kJ=>(mNE^(}@< zmbrdgr=>6hgQ^9s1X|}Pp~cBgx5Oj$rHrM8qbJ-XYkPqZQT5-23Lu(TJshGwD#?2& z(oJ3L<&pHE29C-4N0Gz_y?*c#Z894nTM(-^I%I)bBB}BpNUA2)TijZ@#eqxvWx}x> z(s_AEeL}@$-Bi5`^RY)eez7=UOqS7Uh}a`NW70R_783t{9?HaMgKBV~nF4?B=~-tiHHIl2)08?V!`YN);WKMhAxmdndnTD@uw=&Q$i~Z;IA6Xs z8$);*U3z<1e;3<$&2VP-@5@2D%V_q+La!XunTvG+GFl2~;oW*2LUhAV+S{<@tD=$n)p=R~s@W+Lx8|&WH0k2zUVeLuy1idP1-K~zn+P$?-h&&Yn9z%|#ArdCW_kD{ zoW5a>{3~xjD3_xEf`I&7ST8>St8-d1yEX=F^cGJS8M$l`MclZ>zNxIUGu5x|*05JzT9K#@#5WKB_-jIDL5e!!5`*b8-Z+-8x$m{`x_%^!ZRGFX2QYVFnzt?oB zQ`9*$*Z{vwFvRcznfDEpy2FHBc9_zA9OHKYv${M$qeHTf$58{aNjg4l=pKB}YVTb0 zziNi4j+6VI;!&CF|HFOwUl501ujGf;Uw)MK{&JG$$9wee#1f%?V|EUoK_~TW1w8hF zLOlE1gUN2(`3fH-{)C{VBsr20IeC|GIZSRWv`=hiT__=*x5Y}EtuH(5Hkq}Q>nyo-Heu2b)IR;Y z5e)VIeGqh}ms0;zHbSQhNmYu_Rtc{}c^rQQvc@eeigw{?3`@5WrpHI0kdLVG*x4Y4 zx187KiDZz5ve2KF?#x3p?j>oXc-MLDzVqKg#8wVvNV`y@GPd$~6E@8zp1@nq~Pg@oD63jA9dPiGn{Oq2rnZ#ncIy4_rUniVa;KD z8fyToDsp)K%~#yuBhuIVwBO0b{&(&2zfQ}0PE3KCAb#C)ps?c6U=jMnz76Z4EB-^4 zN72rX;$dZdt<7jKo^I@mJhSGv5c+@>f}`}}k?UQw;Wf|hUf2S4aLo&D5=IntORI0C zzD3n;?_7w#$dMW^!U_I`F;$tqMd5-|C+Jp92UkQoz|{?{ zFv%?<&_bAPST9^QVTyam0!s#_mvvOB*{1mwgh8vS57h~)b3Xv2THQ173+lb9KjvtE zLi{ew!3-hW*VnONq~lB`MTuB8$&LlBrnmrywCb|4-44#S2TJtO@nbjQAsfrkQ0;%u zG`bAmmG;@lewC;mF|mYzYzboiW9Mjf;#>Ni8{di7L*Q|+{dGq+1oD#SRQEeqDJ7NSW8E)9r6Cj`w`J!$HSzBvipw!IMUUYz z(uwgo77C&L#}9+#%8 z6^64v?v>>*nE3&+mXRgz!q(Y@b3WqvYbrmdR!x~F`JVyjIWK(8-1#EHgv^GtSK>=~ zod@h`+X8@UGou+N2JbM{$$G^_flbkcr~$IFOD6GEZB>;p0#8rk;2z7u9|_rK_r2Zm zW=1!)&NmDs=w^##I{#+mfH~(xr2uOxJ8)NqQb0J(Kl@iLBT&(D$?rsXZs!v$k{WuH zeU6?jy09dAEs(>bgR0XpKjod6#dv9J|L7X_b3iRrh5G(k&!k}p5&@U@3l~YbVPn|cw{C7@QihzA!?j*EiGo93X zoNhLVkBs9gCi_dhR>_vDe|869>eZR7@;F4vHV}j}nL53FZz6OW zBmW<4{C)rafazJEiMeCLPQ^P0z#p*$_)gV^e@b4tkz5ltcO$N1`{Yr^KPjBed=moH zKS0_!&|`Q4xA|Qp*#%u51CyLTIA~-tY5CnPj#3)p2#cZ#j+Uv zeZCb4CWN`o^fq>i8EHt1VsD^ZbulM~`i9C-C9}vhonc$^XDY1RSV%c=VqP%Yo|}t0{6Ah77O7*%S-a;^`aK(Gmic>;Y#I!%PTT?QHi)L z@ke^v*yNfI^DRTVx8`LO`tpZz>UC^^3#;dSnp+k}dUR`9SVH(t$@EEuku15>H|sC! z;Wm(Ur?1_Z^BM!3zmW|m-GwdG$v*_Gev;Ih$Hb)PU5NI?;v80jQ@5V4ZYPY4ZQ|fl z`{~TdwzQZ)^B?kqC*++k=Z!-D=EeX4o}0xf{`?roAm{qdWRHNM;_G1@pZSuKNmZMN z1KZ-fXivq0lgf1NrQ1OR4>WS-ZDm}qmG=Neb)lSTj()^EHz90RXq*|G>(%xlHN<`v z)1@1eFwd3nE$NP1`X=y7w@L_{>SY7vK>`W}Ip}=Z>g(Kd$mW(5s7F4i`&_m=gJ_{( za*e5@$op4~!^H^O@+CDzUPf2f*XX>>*`4stGxAE7<@u)tVVzyw`0;)#;nSMkruMSw z;%$4<32xY$wVNt9cTDdkU}c}TH;cG0RsKPvNvsb)tsKBxeza8X;WR4m!3o>NS|)#r7R&votPI(oyWDdHeyOFF$k}pg0C@6nfn5;a3Eor;zbK>r`m54%AQ?Kt1@AaBctDW_KY=!Wy^yR;|3} zn?^k7_3rF4_BX9G!=78(@dAq-aNdXu;?H`|?Fr=LzAT!W8zMOiFYmT@k(|eA6bE00 z1AoexUFVJ!SddefbA(3d%~^Pp1~+W-BMyk*-Ch8&LVUzNJa=1#z>m`Wl$1DPA~xg* zwWYMrn=^-8(EiuPGW~!W<7%AqQMj5EcrXI!!SGX^rFXrA1PjVjKeLWIo*QZyzZv32 z?^Yq6ddb|=Qjuj`32Ds7lXYe3+rM6}`_-wT9-c#nhC`w)y%w)Ydwu669ZYW4F zFD-8YqI_(MHkD1?$4->{pL}kZx*+gyYT1GN)>H)nwm)uDGGTs9cuSajB}Fe?x?glW zH*HUPF@2f99E|<6bUrATx-pcjAWu}d7HDbWC^moPaL`J6f@Rx9H;F)D>o({hwpa| z-)P1Asg~P*j5woW$M%GIa?VFe*j8Ti&wIZ=rSDHBLpF_s(UM9mm}kXw)FTdX%3;~U z^|mK&eC4#*X_|SboWb3exgacdvs_MQ)#FA6XY+;E?WfuHK0|tU+SZNWNnDw8`S!l+ z2dzaK0h^jaL#1jzwR(Pv{@1W;v(dfM#l`FQ5-0bccL z)$zn~ij0{!kv0rBgfL8JKVED2T^ji)1_fF(GEGk>v7&_8HoC!^PF^oRseBANq`JBS z7H?P}9o?lNRF27+Q?77m5D?cI%~Q;x6}lS0V8C1vj22qbxtWo%wJ2t) zAk^s0-E}v^N9Bq=&!*>r1VS(*yxb8#-t;~pe2(WdQ7QBFak2tn*GBw2Sdh!=uyg!7 zNp8Fl+3m4hmi#E0rc`8n#3?Q#sfoGlLMk?Q1DY;~CM=7)_>C;*DQx!;HfP_-H4@%s zZa68nZ9b&9JX!t3vH!kiXOjJDuJDI3{7=b92&^n_b8unglQ51a`_qCAzB-PG_jh&N z=I0PrHfu=l?#Pq$pwia!oSahlHuOV2_^t3WMQdSI+j?8!XTX$Ntw2{{arZ9>Xbi)y zPXS724ucXUT{&xOZ!+#_Xh$PB|oAeJV6TKc`0E(2u^fhu|GYsgtTs1i+YuiVYyj~Y+ zh|>msjmXg`Gky60CSQ*22EAv^-tk7RkZ!Qx+oqwc+oDQbum@=is$|s)8)^ zqYC1cc?8`nW8X1cjPF^50_yer14r_?4p(QoqQY`L^9 zqO1>i=^496Oi*gWb@+`-oiaWpR^W>Fzm>t(C)z?LXr3NV%$d^X#x8`?m;4lQgnIXkPAxy@R4{Tf$Nx*ssB7N_66%GKlw9GYSOw0$l#_ zPJRSLs7%18DrhOJgZ`Ha;X1Gbd$>ns-K5JagJkp0NpWVD>EKhRQ*5^DM&&Xxe=VnV z1Bxly%oOWl^w)R08g1ZVO&O_?HrZsc@l`RV>HaH}e2P#eIxECslj7y6hqLEXRr_5O zZyN>~8URbjeQ^%g#&z|+?Ybh9NaEuhhjn38;kL61g) z!EsG^e98nM<+ns1x#*7eblM`TSAvFOti5HQ4AMu%{(d<7akyZ6au?I~yBpnbsdd&T(`rD%?z^S+O6`8weA}tcT>1Ck zvYF+F0TZ(;=fqmcm0h=+JqhwJWj?QhUXRBvc!S$UR^&8HkZ0@bZ?l5!m*Y0=$LDGc zg!u$hwrPchuu9I4~#*h>g5b$SW^^(C`0h7;#uH^(P!7y@;2!+qu21Uu43YjKza&(FLpj) z46C?HF5kJhs}QH$e8VxM)!av8&a6i!sxs8qt(|%_D-2NePo>xYy+Unx65I8$0K+Pf zq7#Iue>UpFTPP*{Tsc{|2kT!6ppk4WLnhk}|3#H&i7NPq)?WK&TKW6=Q)T14+ z-ayg+V{efYbvoX6QnYm}N=`~`CTtLegPr@%W;nCSrdKh*9_p@tPuOY{wc*M< z6;m?&cZ7~0LdHM@HOu?7^UepY{@)l05*ZGSbWDjI^7&3Q)v3x85JxP*Sl$LkB>jpi zM9*p4ffcY?iS0Mz;OD9}@B6B^P1*J9yW_#p!m`0ZXCqpezOp&14-oh%J9wLP+CQl} zQ<-G2@;NP?#rh$1(vLrB@(6(zzrgI^^h+9z4OH54(!bMn_m}uEZ3%1N{;9-z2L_(x zd&l`B_w}wD4f)4$&NQwH4(9i>GI87bH8m8IoT%Ogsk>P?4BIH?nwM8QQ=R6}FAtOA4SV-^zne81c?2-2U7!+CI%BRyD;ANh-`8tLfMu8e`y; zjG}UeKq6x8>pG%w%)?HH+RK@2tbK>vOmc^Ix8XC=!V@);&oB8zemq`pm)!A^Vj(do z*$3&cT3CK%g-07*e&}FUHk3+fk4%T6VTm*z$`;+cY7-kdx-%Cc*)SVf((rvsbT4Yu z-M_ZDp**R72mlJ#)2`HP{FEGlQKlM@jyX3__U`Ene)Ev8n@@jcwy2^XQCnnFuaC*^ z`}*2_m!kgCk7pgldzGXZHyNk3!k>6$NEuScTK-b^&ns(aM=FXs!lT$V z9K0!q^z2^@6~8zNyS^c!rb4f}w3)Deh$UX2-on0`}9ZNfG}0}zkFf7}*qz5JXFzJkW(+$6nB zI)C-MZ$0NfO_ChIGF@rdt_0tuwm%gSzHAuEpFOUFPr-nPh0D(kdNsV-IsCTfgRg#qhgW<79pesTb6W0)ziR_%u0^o4mHu|K5R3gL zp?Z=kTc(Cj3oVfUm?O^V9M&dMQh4^VEU#DXoQJVX2I2C#_872OW;)R=!ZlsdMY_7- zO4)i8WsB?&r&>+Qjadc-%B3?A(RV3yJJEaH3ISi6o_Vjjv|bw$J`1WdL@O^<4{BI2 zZu+Rx#kP(}G*iBXWP>~C8$37FTc?J5PUqrNQ&VF+ckb7o+s-Mc@7`d}+{Qh;tIY>` z*@uIGVoT++s=4@ zmFMzk_VYvO%K`YI^WNvKOz(5|Ws42WCYEHEF&en<{AC^d;I*EK?jiUZChxs5FYqSs zbC>LWoa|GqBMZLHeh~M$pJi;_-hVp{0pBvRCUXpfPd#p|*RLxnV@zEdlCyk${CRZ*i{gysDL)`c=e34dV}iMjwjMnQkvM#! z_T507sCvtNjz<^IUX71PdS`lAPP;$m-N#$sj83wBLbMPql(@*dE=2ziOm_pDv6sGm zgC^>I>ZO|IE*)I{JR8kKM~kV9!v8SZCHEQ zTz_g^ZnGage>sw`uyua-j3a#AF}-#jMF!7|m!}DE-Jas#p9;A4ux;7c-5kh*cPpzG zU#h&{D3gMF+n?{-FAJ)?%=MgSt6s8w+`(hEtJhKEZMUTu!$V%+En)E0nNEnmyQF4) z_mK!*R&~(XmV@z1Io-QrDoP1&^F`s4bh?E#dQtJnX8dTCcfYKU*D|eIOa=F4kL}wY z_+?)2D&5|O{P5Uz-niMsr^TPBTfJL)3_jXp^xmzqS!#3etT}w^JbxQ|GcCNx zF)P9zL$B`jdql8JF8!zSy@d|yg!|>DY8qisa5B4zuyW1g$fYyH5^3IkcQc}USBa)= z0QY6JE=}6M5_)XqpkO!<8uI+D2sqX9U%@k~Y}FN7nE*o$=RWTdVx8 z=svIRa~ixXmyGrNu>Ib1H_{Plh)v67UrFi~r}uTkI=Ic{XBRKf{+Q5fi}BsFMqxj* z{o#R9eZ<<}(8vQ%h3Bne{i%Y`1MD$Zp>dss{F0FU+L&!=YMA{xqL*Ocd6=kXyYBp* zGy5r>(Q~bMcJ=vfGaaTVhP0+_H)UZ`#g9} z<^JUqEl^Jg&7W7{mSDs2hzz5kv@i${W9FVLW-i3iq6-UMP&AB!{`^iN zBU-4=ohmqaN}{%Gd)BzNfg+8I^6X!~cdNSQ@fb?0pVNME+2y{EZ#@|SQ_(Iz0OLNu z#r~J&*$e9lA=|=3t%hEB0*(3LjjES56BA1mQWcZlV0@AtW`x zVnG{nNt{hPo$s+IlpH^cl3G0XX5zf9)mde^OtUzU4;^RoI?rr~V^pNOTd=P3f}A-u zz@jvL46-G35s?cT=i}CJpuwY@v#+f5PS$b0w5CN`j(uLPUH_&dl^|uYtwguqyv|Ro zCea`DO!*1q^xTcS-pWRg?{uS)J-{1i6H*V@~gt}16=wWFo_0*554?;QD%u| z4_9C2HYaXiTnfyvWHn^m8OHRiLM5L@*~5iL>`rM2cQuDi{sOR0(=v;O{-uy5cY zkyx8>vFGlf0t8@!a0s}x2esa>>D^~MY?FUp^Yn_IRct!Xg~=mw*=dLot_!M4;JV|) zsu6AIgCwi@_0jN-D=`>KLiIoB6CpsXQ`jFonT~y zHk7+L6=u+8!U*E@1$UcPr7BTBK9*g9!XN8>c;1ti&jivX_II`kWDl6IkQ>Q9=q*_b z@PaZCL66_{VLC&924u21eIyh%U@_-9v{0zmKu0F~2KnC>IGduHA#K6)LdsF&`2~IM zhlPVmo4T(9`g54n0@clrB56VOpMPeQbFVnkVX#|5MVZ_&Icw7ExqCrEDM+hM#qh%} z@>uOg+@4i+WZ?MhSa8Vo2Cgfwc(8!54IXkX1flj63Z8{4^}O9 zG+Ku$dDU$|2=?|LT?l9yzZ*RTgbFz3O4#mT5u~l@hXuZtMSdrv;i>04FxUa8XK8iN zDxwBNoVrhaFYHrSRBFvWg3ka@p~(M-001V+m$Pp3oytUf2!M}vwAuYbA33RZ!m$W~y1T`qC!*_} z*0dvOUqAXx!Vp#-P<0zqhLs$v$f2tMbymyOik`2+F~S+dsbJ=|EHLjOmoJ+zrj~8n z;G3U2N&CeD+aH&3cOBS~@;g6%(qz7AB@G*pl0>ih_T3GHraH6`j0`|V=frt@Rf;yZ<0WW1AA7y$HycYo7mF>dAuyfnpB-eBITtjT zmgT@rbD28NwmaedoD^CRHO%=w(SmU#W4SaU1RqpI>x_QexL`a6e+A0}DzAoY#_4M+ zrwx-Lj-rQF(O$8tWfpdy^Lp8APB3j#b9Tq8KDGg1jnUt&xo%f*;wsO4Zgbokd{@YS zQ9CWZVe-#JA=%n2w%sziH#uOMl~_v$k{`=8O?6 z!1o(zGXivM=5;hw`bIe2MJ8W{{}%2*=r1wk=@Dix5Fp!p=CwhJytb?x?$9g5egc~zl^o0{?#&&E z1fG*rZR?amAuM-_r~38D^t#h9?Qk@)oVflPgpqk0p{LySu%vaS^RVd=Lg1+{tfTX8 z@xC{vv+%?I%FVVf0$KZRzWuCqf3s4`c@eO0i&qC;p3ZtUdows?BXHi~yjw80gksIf znnQB1(NH0hOD8gNOzKNLeNgAy>|6r*f00TYiqso&&yf!`P)-;{&^EXD3hjxL(hA$;Ff7QPD^$| zks99*b11|<{E4RXd2t?Lx;}o%VF* z@&HV?;wa4B_dWCCC_k?sG=0NyM}8eyr%1PL-zI?349ZH%hSYFM;KuX#a2Ue=2e=-qDq}vnsZOH8HeJ zQ)MnOB<@;r#Tdmy0*zQvQ9xx^1?*L&n317j3FZ(OrCbL6g;@na7?j zRvO&l$Hz_VGxOAo-AEU7jOGE<3`GQaiN;Rqx5XsvJqugogZ=Ps=y_pL)Je8+P$vI+ z)KUEyOT#wBqmYws!sG}M)Y8=@>EcltTxEG&qkb`?l2LAltLF?v=zfjGMD>o7QAu{0ibJvZ*+XT(`n5G(qKzz(gN*OeA^A?r_`(dt#F9Wf8 zeX3dm-V%B7$3j?ID*eKOn5HHE5lK}j$?#!_^0eIrIL_LO3(81d6T*q~h@+?d16@qg z!g_iFu%CkqKhR5(QDH>@?9p6^auV?)N@4th=l+~3Qb*VCe>OJEbrkoozoA|rhM^2aMNF22>mJuwo&J_k+wW(h^ zIqEDp_-rIwFG}e>^l#|hzXu6z86n3=_G~hWa0sl2nhGpTZQUtH!T>ApWoE~a6R?M{iL9Z#qB@{IWJYcKy^#Tuip8^& z13TfQhiTb_<5~cn?TX$a)abh_)JJQvUq@Cg45-}CoQebAg5mn7^c#U81XGMgL013r zM)a5=b*dZ0V4@bs7mSQ2huJ88yIA_1e517hMl;LZ3Yvzzz>D5aV>r z+Jj)Y{D3=@`lL>spZM!j$qjoKy8UOW8G!N^6+>jLR`V!$9qLZmjY_W;awQ2XXrzTC zxtnsf9o3vPb&J?jrZBJm3YB>ZSW~-lws3>fKM*p&5&GeTH_#5Imaz`t_knt_P;_-U z%=ManY!I3b=$mMuB6Z zCN8x#+NsQz@s>Z~!wM*{{8|P$#Bu_~5ULsLr$9nSALkBvyM|(Puj?=pPaRQogV2X*$pYw}coX+-01{UJ!A6+uEL0RfzXC|&0mbpmz zpa1q+hENV1*nzM=3`uiI$i3cqb3lu5F6cyc0-MmxpjR1UV#`P5y@FZf)N-BItd z-5t3Ff_S(0Y!MpZOM{WFL3{qLovJVYbwnc4g+6XTjmi`$=Rb$_z66R8@omeMXy5lDP_VuV7NL2!3AQ>y1}!SSel`Y2V*)^m z?^9tnHH*Ko!OO-^WB3e`?am}A!}8%@%%o?Kegn}zgGdZhsg(Yx2QK=K&!7xbVl}0x zwn;H;6ht8F4V=((GxPeAzwe8Z(sfY*+Dbk~{sdzGvVfu!x~BM;DIw z*y}ng_Pa)eDE)T$1N9B)G~Ayf7xz!E*nhoYl3idB6UE!(YZmGDlzmCa6qL07qCq1I ziJ#-l>jF%;TAKCIXaI8w0ZAUeSs1Y%ha<&iq=ucHL{gf3-PX8-j;*<8(k zC!VAYvm$cDL=ZumgcL`+*mjkquB(w4y{*vaYZa`H(TP9*M}pr@HUO6}qDy~4bFmcZ zp1E#vAhUKz?-4no)5(=F7??<%W0D}z0;KD93Z)A*gEi2or?MRhRS_>N$Gk@nMI?_B zSI_x{kpAhEp{!@iPPnuXg_9W2MWq%zz=1*}HuaqMEBCOrA=3A2c63nAg{DPSZeLKC z26vX(A~a)k>5uM_e|;IG6T21$N)cr$WptRqb} z+MY^m$e3O9*BnWpg3$vGrd96`yNDu%9uuS;i2N^{vC_PKWd@aTM3J1vcrb7S)Xu=} zB?YHQ58C5^f^bbxK!81mttcpI$V~F99BLS0AGQd)s`)3O;y_C^6{^!ZWL<#+S9ghd^MYP(XTN-DO+iQ)CU47n_g%Pbvr z6F)$IhHhtewo?jC{FZ2Sx0cqn5ErP^^I1C0|7I}U?DFSTS{Nor)7R~Rs~q!Ocl%#| z*3h|oncDM$a&0a}){=dHx^ah-F6bz;Y=0R4%ovRab70|Wpu`q6rHw2s0bPwQsHF(S zOn08IpeDu=uo)_(Lw%B1^+g!*I~`zT;SH&FDExP}<>MbA#A?YBJJQK1hd4mowPrWs zzZ4x1sOs5JbJ@;;MeMsR12;jQqG6Tz4_|r}iegJGeVlY)GEw%A4PZD!e@JLYsV_CNkx}L3(fduU2~ugtE&+b7A_G>u$dljwj=lrMmm$_ z2i^!2s&Zs1e@n!KE|oDoHVTUXPbVC1_U90M$yCM}<}I@7&-m~q_)HybE+*65_FWV6 zSTJ20!HJqC68dC%Ia1t%By@Jd!2#dqq_)$ik1VU&*Z@mqa8kj$?|x}ljDb6wDwQ?; zeDVJU()wGs(U8#UK`*lO8o_K4 z(kG*l0UAAaJz6L>ZD|=Br81{~+3i+n3xk2cEh`NPiSxF-!$p=)QwFrWB0bM-bb3MN zJ5B0@hejEkYFA(jB}ZZhr-(&ede=C-{iN8TVpYI)Op1 zdw&G3x$SmQUpSCmzX9aGTu%+T%5+^P4WN`UWOGm;X zsQ^L%Mn32-*g)}7QEh+m0y65=YtAdEDW2Ip6TZ}fxobO}O$45&9t8#4HIKM%d(a3n z`ASTy_>%nmP$nZ0A4D!G%#Vh|e5mmH@6_+Mb@zi)jH22gX~`NEe*S_&3zD>wwNFt%o%%+}zo zzR%aX_)y2*UN02N9u`Zta{)Z=mvibXTFlJh>RY0Zm!gGYFQio99hb~yjV(^%_6M^Z zL5YlVsXPT0KWe^wjZ;oy)Zwz+gE`2VRv;mlDPhc@m8l35h(?#O!Orb|NSLNV60-|J zPQqS6vik+u6(aZ~GuEN;|__kox&Cl23EC@`e;;bnWZ1@y;j(~+?q zHvL2km{2zuSVNl$2{GBp%vT?BV33LguUW&B_eT>+#uchGOmT^BRIKNdbEh;Qu%729 zSwi|v^hrrMZUDUdzemX`s3z4T%feudH>*YjULK#xuD-L?QbjDNMrF|`8EgltAtr`!#@)m7yOcD=N{}`=(Ovc7M$~u_{`+hH}BW7 zu9M3ueH_O;B;%v8`HwJ7!52yP+wlbHS>9W@x<{foiETmANRN0{!7!Uz2U_jVHp?r5 zhm8ipnJLF)=6cl|;|*)~D?6qzH-~WFTb{i`@V$G8zis3ZXoMt5ZDdjkzK*bi|ELh_ zl9YbqBm}&;gdpM~lti~){GM1OQ#4ZP%hYRxKb;k5?hE$$ZvgUNzi&i*IjEr+qpJuL zQn%Jkk>wh>ySp<`lK%~7n9lj<-MvjGKuPy>SB0TY&e}>n`M`o|umhGi*klxyzD75; zWXoum>TL!@IxGZjU(nLBn~J$cv*-m{H*61n6Sir525N!bw#Ub~dWX2%D&??zR#BUu zFYulDJ(J_2ix-Bbf~5Z+!rm$WZ$%={*$P} z*5=KVIPNTPzWYE(s+-jCLLG6jp*ThiUC}@x_Pi?7Wp*yHb9=S6;g0 zA^J87_eg!Ta!qs5qraDz(9y&Bjw~S910Ao%Y3!j+Wi`B3gIjOY7Xwow&5u@W4$B}% z%b6RHylR~kKjh%R56?GCmWiHg%Pzae;DBbmflSx)dDEeyWkbK0CU2|7p3B{I_k*Mu z)Hh7kWD-lR<>{j4^?o8rmSDcI_(|MIJ~Pj@!yKnmjbH6=S*yoJcjD?prokiMVY4w3 zUaIHdtwBkB&rVK;x5b5JMVt4wicf8JWL}s6@A0ERBAVp&wc6A51a`Nz-&+Ed` zJX71nV>9HtURe&n57eK0mQ81L=xWu(r|Y$$QPOKFq6Fn>RZS z%@CWW^f%`Eau5f#cDFluGybmjgJe2vYB4BStt4H>;=Cn82ThdDTOX!~U~TOAb_u8W zu6|1Pq9xNq?u3_h%1i)BXb2dnGO#B*^!hLkreVRs4 zIO$inis^ykefu`9ZM|3D?}6Rv)MM~=6rbm2?T>Dmviuz&7FrDuTHwAvf3$8p9zEZ* zdFjmxdOCVqE&F-kGWVQ$ED`0pbz?^yLyYsdavfzEbai^NeR#vS_B*A#W!OUZY)L7K z5jeflXf`lXCnMT;?)Lh)3zi_!=x>Vff(P=wM54HJ)DJeE1o9|hJ5y&FOKdB8nDoq0 z2h7Qz2D+mR6eye}zkA~)2_~|gAFz~}gvan;^u6I8Sj;_U+$v@9uhDwll{{$Y( zbM`QlWmzlKJdJ8qXrHC%z8>xFj(9(peTVowOqe?yeJcp!NoM zYr1&9Jp7UksNboC+@|HqmFkFTm9Ve_&=tzMOsNG(DW&%BY!Y>yo$fcq1!}u)Vn_RA z?;cXZlEtqX*snOfkGwg6UfahmytZ4dO?=l^MvH1%Hf>g?D^9A@v+Bbh=`GvrhM9Gn za~Eeq3+5IU@+3D24mrifZbGrHn%i|m)tg@m(&kpGYSb#MyEqn`vpk~OL(A%kwupIb z>*nnwI(|DEYgs>hQtQzNqIfYCDC1L_x5==d%r2fFlyR?$wK)FmRVWhaD)VU7sw~~X zetaRTg|%#V3Hc9CXMtIx-@XyivBBsa)33~-Wc*poo~jZ{Esakn?I1d+%-WB< z|2Pl-)cD66xG~_B#EV|_miI3#&=k$4op)!DjMNqX^sj$jk)T^xHBU^t;kz4eR+%9l ziSYH95wK3;uG_p>q&i}K00F>JoL9f?)SlM!Xqkpnp!SP=QI7rxS4wh-UvcA+8*MI8m)37r``id9;RP}`(&h)xi98aD` zNhf=8&bU(m>81aBUsUT?UpX!47%}R`^gjc~?vVMy(Pl%&QUfnTws3jblm)5S5HT8; zmj`x6!J3Ga<)QyXq)ct8Ha)f@9_E z7M0U~CQ^IB&Ll zn%v_-w2DkzF4V5ZKV}B3oEk;D+Dc02EOF^(42M9&ima}{Bz%2GvFOSbyP#wA$HkQ; z-s5e&h$k62zI!7;(aPI2@c21_{TfnsYz#B9?NJHRTdV#Jmz`#>6*s&Zh{|TJ=j$=n z%l(*Lt6Hfj4!qw2!K!);fgaQFtRLl!BD3ud(dCFu9=G>n83$9F1&p&=a_057|T^=bD$kN8zN@I3n1*i!= zV)2=o-5GmgtD~RcCh(ZfQUC&^*h$|UrVZ(67gMVT;JKYTM((BLf${z z7gjkR79oiqhBqaO_bHkvEssR*cl!NZb9Rdpe(=dh2oku~8XfxrZEI*gt?p9{y;plv z95S7kK6WQwE~|N)xylwLQdRz^t~wpnp2TDv`0E3ZX(_;JoqnY@J%i-vbC53fR^VWD4Z1i|IyJOsX6z@fUe+1TW~tlPoji>44~k%XY@mABp!GJ$2HLM{FwB<;!aT7Ib9;XHAiH22T)I z4K9^+NH*3$_&*L6WHm$~m63HfG4>00)CdFud z&~f(>ZQ?`!Q*A0Mb`Fvi$PMASN8HhF-rmoGK=5+IM773}8r+SXgD?+JHUI<>A2?nQ zN;5T{B)6dm?9{2fzJyyNBv)pm4s{7~g{o+T zKz+0_TKL((jdO|J+NbxnPAS$0vq?rsas#VPh{eb+FuMY>YOa#RJlE<%rB59Q$Z_~e z8dpkdjo*pGaM@8LD~}&OsGau`MkuyjCY)c9V9kygYK35op#St2wgj;Qv4hA^iJM7` zY3OsN79Vw}(K#x6SX_L8#e@ebT#vys+^3wV91hc#hdRqweJv**5F2s@q1@qC_YC<5 z;l=SGyMy{4{5o=7@}Z%80>8-n>>6DT_ZJw8lbC5EtB7*kx@az9_(6+KPS+?A%UJ>7 zAM(>&~1UGm+81sk|*i^jcvbm9a5QxGu>zL8Z4vJsi94x zDnGjEyGEbwa<%I>h#m2f&?&u2&7}6UNuaIA^Ld(+)EOi@%gQh;IJMa*Fvt zmen@pXkKB-ZG#x#o<~mCM z-a=3gbu6o1G|N~?9Ja@9QU$h8c*OZx#~ls)^OlyuCBsgM9W0uy?p$)RF<=iy#+mYe zv)NyL$0#Z%-Mn@B#=NYRX}hgQoj}Aq3F2SA6WJvP7Dc{ZBJ78ZzPkB0UvGJnZ{X}q zJH?ogq2d%@u@E@d`4x)!Xi-lJ$Y8*)e+!|}YWCgI*Xm-U&dT{(q?9;Em>UuC=;J$T6o8>0v9_G`_q=hW|i_I=o&zmBZ z@^_~%MY$?j0EPh>pznjUEfJQxe#3YlW~P3)6K0>Ca$rfqsWeMbMB_T>xVD1Lse7I) zq2=YJ#iHvH?SJp_cC|D?Xh{h4=Pzjj*iJlo75coYS`&Xd#w>p51wOUp5w~I#vUcjX zkWERi^5=nQauu1be;OibZ?ZT^;<&}%4yAAfwtZC7_~oiA5~@}34&g@nloXx#DP;1l z$Q7lFnwp6uGlP$mo8qDKHFdWyBO8!Xa$PBw-ISkMq z`#hl9PVv7V@xRZWxW3l~Av|gM!{nNH7Fe1}dA-RSWaZGE!YTRrUVo{O$~y%k(SPkh zatSu2MPE>ZSVdWxkr_9T0d^&Vk;PjJDt-~BlWAI?d%2Z4*Aq!Em}g>`_#iTraHU~a#~XDFNm`a8Ry*e z6BSte=+Dwj2)uE{u@o8}f2efV+_{v%XuNu4H+)kmXAzff!-_=c$yu*vGyX8p_EvD()}+H4et zVwjO+NL@a)*I58>GyK``@F~!l3XfiLGG9Wn8B0d9UoPtTBEn?*i)X%&&?bvTg-M<3 zxA#7^x6VOH*I7R^{)WXNvzCKJ0vYAt>ph23-uLGE#U3~ng21fs=zI;Gk3@FZV!fFr zN)`P1pY48|^@!WLekM?H_K&N}vRAl70*8eo1k`_C8uq0BL&~jUTTmB|$K3;NX5d28 z49y+^k{9u)R7})B?7AnVO(dAU(GE4!z`i`)6!GT|Yrw?_ZzannV12;{h>?g}{p8$` z`sRlHe(_53xm0AF0sFAvS3&>mQTgAie5yvZ8tXs2LG<3)H`C4ct7GdxVl)D^u66?( zE-dF%R}xIJ?l}VO$D*-B3m`pBbnfSQwowl1`dv=0p+J&^@DLCh_eUky?_iB(<`ou- zter1D+m!X1Mz}+tg{h3%cv!R%t|LgfL%4gJXe8J_4M|CPSkeDrqy17IG;+o77lB+~ z<&~UzugKfuMc-0giSLW^Z|I}aKMD0ZOA4#`%h~l+%k3pzviDYF?CY6WP0rkmrXRc5 z*9u^J(m{Xd8GX}V7~LFW=tc1H649cNroWYO(^y_V4tsU!4{a;e`?zeLP_MYE&!%Uc zbBNzigp)xailG1Y@W23sUX`IEV6QlTeJud9I4PG1R6%b|{CmP61IQ*XyV%){TM>%X8TBGRPT zcwGX9&Y&FA&}?Erud~bP=)aez>M;gObF1S*^>UlXdj`mYbH?+{=p@C(JPgzoc>j5i zjKa{G#!@H*mSjU`0YjyP2h`#qLFu&-P_*p9wl&@dAt!D`6}`e^ezXMQlKw9Z|J(1L z#-B_59Q^yS+Zx>c)~3NYSsz{PhW?nd=-4~ZEX&w80BNnYq=iz8ws1(}3x1_}?)e5e zFYt(#Yq*~iY69?H9_Z`UnnQ4dD97q->JJ7N zsrV~-_m*9ztm=rGTX+kL3^X8ndINcPx$BGuRC?Ci7_lsCfeOFrwKE;M_NS;`&nJ<1 zCWQ~S<37p(x5J}|PxC+5Sydkg%ZcA#eG%6-#1ctkkX&*E^(ct6f~O~ZF%9Mn&CNHs z7!7E+)mnSA?*6xE`3-Y!6xCznYjn2p-T`pWsu)ue+YM1f@ySE6v6F#MM_&;ydVpE= zH+_L8FPRY!d;-@=vz1@AYa{J9I8~!tlj@$0LVC|OFdv)qVou$q8ty&4JN*|`|86XZ z+kkI?#7x>}a};QY$~_Q)=j%F^6UkWDJ#bXUd%-|7zdwi8nnyp0g zR~T(CY<3auu)JD6Cb8mPC@SyKXkWU=-MQ(TpDe7o=BRf&PL|vF{0BYOJ=QCDZlS80 z&E>&hO3sYDKtyNYgo2dI`MY=_OkHp#a<#HXh%CA~$Uf0B=lq@9{lCwp@Gu$so=}6O zncT8F8BvkU>pys6-_Npn^_vF?tmDbpi5Y!*03h?nQZq1Ap=@Kb558MkAvX zNX-UNu^x=&=zcSAI*|or%xQL+CA8UKUWper(oSP{l9JF0JFYK$RN?eTU=KVd@N6^+mdYC} zowrHC;6cj>pLZ)@?;E^^oGoO@gvo_#^*B}fn4Nr|0kls zeou6VHjhdz8C;@hRw5sMs99Xb%A|(iw6V4H@NJS#>;*+W7?SRz#CR>P z_61o-;YbSMAwujiBi1(hpYM+QNZrn6eO^5}E-G`jtQwX{OrI~7(q&XztN~2vKFcat znk5}Ers>O;^56oCUZ{-6dP84&8ici2U{}Ia6@-1igUJxk5-SrU%IEGRV7qH&lm287 z3z(n9q_48lxvWn)@!Tt-^@2w!@1AM7;n@q$K}C#~B_oxub(}{W8WRw>Z#=ZAS=QOM z_+d2H5AI9J+jyf>2Z&jd3ZjrORGsrVnj=?FU=un0vBg( zr>mZVWQC_MYks#(Kvi$Y9ZMH}d%}DM1}*Okmse9a$tb>c559xt)Xj#UfJkA^)ym|L zg}1e3QMmM?MfPm|6|y=>7Z)5+PwBhRF8DEb2VC-i4DX$gk_yt>c32lyeGcxL%8RZk zS&zLWJP;FJfa{{$dH`Nu6zS_@N%^wPsy5PG9a9MUUO4H^I*T|FH;KlV9c_~LcAZ|! zn|Ok)xTtVh**Tjf86Ttl;?O`EgIr&SHM#!sI+^U5UI8&BwXo+>D<0K(-uqC<>#$_| z22bZ7na#q!p&W1I$DrpZ&idWi2i)48>bFn`z|U_aF?D;r0_D-3AoeF! zthI%5eop(Rl{}&5x4dGMA~!o2zi&QDBOo z5XB?JJB^P<^6&Gf5-^7nSf);GW7~*<=#A!=Kg{SHMG}Umco3JvE7H0LVIAyE{ zt7$m@Q{OgQSrUWX*f%V_&CmSja@DQXo2M>Gpl#Q67kh!mZ6t714u7tBg~V(h$Y6Qp zyqIBk!{z01E7D?p${_B&d!>W{e43sbTgTwFU)Ap#eRK5^JZ;(Fyga7+_Gf(piq8vnwOyw<&>V^CB%<o|0!Fnv`q1rm|iIMBUB2fF3+febiG2v?K}>ST;?!*T6QAUGQ3!6 z8QdIRp11%TK(99D91b^+6Jea*^cy}jScA0ns{6~+q`UT*9 zb8yRH$0I|gEf*W5F8mfUEpf6h<-5DewK*%ZBbNg9u~nOepRuf;SCL-SD2oXwpW)C{ z16Hg~6DxN~EdDX)L{fmhxa^C)dbGECh!&|pa~9{oL3*?%?pG614U4Cho6OW13XDvL zvn}42Wf1d1q>DMT(a=k(de?%AQ+3HwZumf`9X)la@^PhdjS|$!9wlP?+Z^bw|G^!P zlnR$u2pw)CxXYq;VQy}#kuJihjUR+$mZE5J^`5ZvtM6+fLMev~a+8H{h<31CoE$w; zLh>9V93{=Sv(OTp%~TEj@E~cB^6fl=@^wxC(u$?|qHDK_5LjX@f0f_<6;RQ+B3xE`raV18$|s|6I{c-NS%sE+RB*Aon>_5i&`ZGgB;atzdwo2ga&?ly1557^jE_2B15WpRa# zvQ%wNT{axy#^SURMYuY@W?(=63ecqBeG!;DStZKsePkZc_w|>8P zN;p1qztnQ0u(qG+{S5Ayq`=)8McGqFm-zD)w5`ylRN}v$<~kj~17fuST?m}>BsHn` zS*rwf;xmcI5pVIHMB7sj(@KU`EKdDBjbx4ef?&*CEbtC=n%qlI;&~JVPdInj~u@s z)myhsituWzHSg5RIwbr)y3m4-j?}=P3}OqaS`~r~Dpg-OnQJZZOl!9bJ0a03KC)T4BFtUL;*e|xinV9c8ZWF8`Oc2!GUV_L6DAs2qo8f(U z?V_*AQLE-zvv)N$R(!V%XVtnK2+K~yZ`7V3(zIK)!_wyR5Ok@fI+-=(2O$o}5}drqlF#&~eiuL^63UxbZiRI3HM+Z#dJ?%=#1C$ooo`ly>*= zZt)$d?|%y3YH}~D5^ZtR#q4QCbM4vIr_EVoY3>B36ZOZ{kyYYpE{Ef3rJ{MZyqU?uRRi4vo-R$Q#ucMw|#ecVs#mfdQN@oD`yjq4Y zytZd=0n!Q8T9X$SDK@Qcb_1_nY;`j@2Zx@EwVMM<1;5hIRgY$M&-%`! zQ&}O2gfI7Kcy$T#ys1b^fL&2HUjK$NJs516oMv@O8Wh##v9`0QUC-BCxPps`B^T?a z&6RQ5qpGo&F*$F~Evn^c;+S5^SD8B3t8?nQ70{HhV(V$T&xqG+4h@Zt>phN7^A;2} zxBA#wnIisD9~!ptHiQz4l|C_^5wJd`?ke*hCa+DV=L7M5ej=;7Scy;22(p`T;8SiP z#}(ExM*cngkw)vIgy)N1$<^e!K;EQRhsqnTv95x z5BUb#zfHM&g3QzY64RrFfm1D{A44zfIdpaRW<>%+%Gzr;zFo!@7~l$Y6wI^2)3i`ghz-}? z0wU#g_fyUy7Gv`a6{+;aNP@l)p-kdNLIZ{(id9Hwx_8)Kx&;9|B$n^Rz>{2+b_YMW_@dksjS<};e8#ZIpkRbR z-he)R0Vkf}=>4aC#!#PW@y=4fN~@>){p#PRJQ(FPqd+c+<7ldu95aLpDk~xhtd2j> zgvJQ3j|MX4s~shu9QhJju@DXORbq(8$#Br+3jGMO=GW(oIs^s zX}7w={+D{vIxH?HgTutt?9~(B4}gFK^tVnHg6s zCNv*nYq7!yn$t-t_R(x}mN)9iM|8Gd8f#8}p!fu1twBwLgc^eqll#H(N51l${V2#H z$QFsPdecjxHNmg_^Iy`OdX0{!z_8$|D|oVep{24IDW1{y^&;Q10(_Or{b zFRHPh55A8bzaZdeHKhkm*;Z{BDm>Fs7?@#iIX5_Q8#M2cheD^%ge%MKh_31sijkZS z_1L%YbG}d`H-+14V*%*hx;9+ZxK%~*avC`fQ9{Bp;Xex|fDp`;${{InlQ5Sz1Y<_rma7)vRE9Nt*3qva?Tp2-Ro91toA@L zhQBb6`wWN~DwL6x=KwX#ImiXZQi)^@6z&X%2b#LV+cX7}_xhE#YZl<-Vr!}DJiOm( zC5nD6(brBKi1@61EtV@7ta}}A*G(pjYjg=w@hBbPN(J!J$GU@ma2l{0Nv)a5su6x| zqPWM*-A^8rvx%QeM2)#+v zYJkif=Oc^{M{TsCiVU6OU^D%}&i$wQBcN$~O^$L{D0vof5-N6H5edX-$gmzSn-d)c z6X)`O&Fu7(Q?)Kt$UE$&p@3i!Zg&+O|5HW;wrlZJrD^B8O%UFw+xyz}{f@p>^G;Ui znByo{`XWvHqc}AUn-;5(wm03$Rbz$iQ$K!oVEYrZ*tjBqUgjbAm-q$PYG~HeAguOy z?N~)IsJiw2BK(!X*J-+Yo?KrjLb{AWgBl$4pt&XUZEgW2rBZUK!k^!0rrOd?)R$rD zdt`JE-OoxMV%9S$iSQ0p_oamzlI|)Hlc}?xUlbczPb>kE1u>2C(V%P0L{&)MWl6a@ zJ%pAuVF{65<$|V4fv1aHlxPz*|N91BBKi~pHU!Ak(;wa13)5wsl2-9T7Sqmcf))12Ci7yl!j7~gmJ0(h4-+oRu(DycLoaL*BePOX zXu|F5r8*yoE(oTP#kfDr{661r(!Uaq!qsN>y^Ky)5pQusA7vix7Lz8F*UIF!%kyCJ>kYb#6Pt zKeSgXk%S#M8*n2QZ)b(qq6u&4P1Z3nkBvQ~F0M2bQX`jN7(t&vc&ik=H68aA5x%Iq zK4wi+=RG(2|FbMASYbG21M)>(VqFSk-W*jM2hz7oGpz<186+|@iVf=YnlrJ?)f%s3 z0twBX8&QeKOSWhIEw^>4z(2#E<$%uYQM8XeDyD39bl{%vJ*tYdb#9T6AT~+UMdeW##%bB3PA3d!5*-%icB!@T={^q3B zVExZl4q9qQ7m?9jDCpTJT`o<(nwwZHk^W!ZV|mj+q^<0FA$XPhVQ!OsZ)R=XM9so`qOQV`vyp8HQHPAI%hoi05XyZ6h^*t9*vjqpdXtC6{h z)qK#G<#37m03##y%Xk^EKf?IQiMxrkoubUZ@pFUI$!iX{^cNX{ zbsDzBf}k%;iQ1{P@W0*!CrDW5+AuCpEC9qPuSQe{K+~G|WS3j1O{$q7LbfSw3!cnU z)%m*0IQu183p3*J=ES6=UcL~^G~9kq*||E-ZT4JOaL4$4$*k;J*iR`0xwuLB%K~y> znrI1YN4i60N$-p>#3XyhF(FiYZ|c=q8GUGu6Ez)g01ZIco6wIyVG&_+%-bVo{NSLZ zn{lZx@vrIrCM2g1gc)T1-{(}}E2-X(J9vp<5!a5aPLrEH&Fhi9y$?6`hk<@@q#jqa zTA~e3I2})A4g&HH_+t;+a!>xK3g@X}F%Htv0bdQcWy;YeYwN;7|Pm>gZqZ zb`HxMA6J@c(+D}Nm$vY9%WLY=FS}0Ja4s5>nz6b?4T8VG*iVeMueO zm(sSry7?sVa_xjByJ)ftYu&UzMl%s$Fq-gDo3H_5*i(M;6OiXH%K2C%DaUYeuv~vc zd3^s>cUA`Pb-f~pv;mJUU^VG^V?Bs?($``W2u7T@+W%9sAywP5Scyj^z?L}>tG&A+ z=F@OBIb>ZAy&#;dIiiZxz6guB-!-)zoE_c&;V zrVAWowlI{S=QZFS&wEdt+sg@@&xt>$pcjQd8DGlqzQg3coz^0P$Vc347)9cJvnzoq z@s0>-bC}D3ktc=mrlKVf%VYPRyUp7sboZ4GalmL<2h^Tz%TpvO}TTM zTIKikC+L==t?h1mpT+08+Tk!YH|Hpl)W_w|oxXOX>C%W#+l_c%#hW9?kHq6CBvIGx z9J1J_>%4zTXdm#Fj+E~D^x0*}?y;m=H-qi4mh^RR_&eRQ1-JZ(93BM$1?lPI8z5}Hrb-N-(=jNMde@Ar?z0<;hdIn79EPPNm>J03#9V^v8eLyTIF``w zopyDe5_6(BMe~Y_Ufb=ktiSMA2H`;>+b&)4%{4urz|`?r2@ANo7k!6{GlyEM{OdM8_sh{ju*n36kX5(KXVke7Sw6X$wA&OU zachp^=rNqo9xPe@B9~L0Jo}+kE*q+aLjljr3ai=%;x|ftAlMt$J)8jS3olq${W4uw zdZEGlu6XU05av(?tE-LRoVK>B|FISUU|339_Dx>$KNTuV8Wea#pQE}h%R#)m3-?dAa(umNGm>)r zlnR6Tdrjq=c(;6a2+Cs8;enwX3zZdCZp6gI={Tb)xBAipGR<#s&F;y(e|1y$&@*UK zrd33i{yl6}3p`B7)vY=QDde!*SZjo$##fdu@m?gTCY(<&Owy>LwDkN^+Ci1cCbAiw!LTNmM*`(`%iRahs4-6FMIZ69_;6G+=4+QOsO(H1Fj4nkfmFW33QEc$_BiE~0> zE-NnbAyC+#WBDthe$uC41Pddi5~JyU{lWCq+qA+Qid~$P&83~3y8WpF)X)8M7IKH8 z4e7GN#%gg=<`u+f8dddq^Vjy)_0O|8Z@%zI)J@M;1g0ZS5;$Za$1J99%!WX7#<$Qc zEWoRzoACdJeamhs9K~W zY2ELQ++J~jc8dW9O+L_&2g0#yYEl<6M(U?8uPxu=emzmg{!Z{zdN_*%yIKv1k6n_~ zHh#P#R4G;rLk3&+@I!J$gzSmj?tyuj3*O7Ax4U(9(kPXN^Qe%$t_NHB8Qqr)YEqx( z&f5d36|YmmiZ-_Oa-N$x2afGI1?+(zo2+L~77avOux_8GF*+5r5UnfUrM42q_0rz( z6*=YQMod2BHj7?c1+o;^7dSsWi#7{9<%4?j8R^BZB2Ba8)KyEygC&c$(eb(Ek@BfT zi?~0CZNp`H!L%&w)t&dnFA`pd-Bgdu!0i#;bf1aIX$y&VKR~Biv2&Jz5>v(&q5Ry^ z`AKS`Zr_i*dHJphl{y&<$YT^yi-xSH&3t4maB}T1HA?^38DT9}sqzS2DY3K{=oOWJ zz;l1Yk?yLmS*qAR6Rn~nvv;zSGI4{Yi+ofovf~< zqaNDzRrhY~+9fL^#twtWx%jU#bM^Z#Lh9{BoYMhDW1Y9*{7YrwB9?vuaHKv|;AuLS zX2u>fJ|BJ6jlAB#0W{h)e@1jxfRuptroU;KsUkKHKKT6SeoXI!LxLnZaH-4OC1 zW!^2j#gGe!PdN5!&UG66i}#2!?;0l))D-!$Qq(v0s2B2=yGJ^8@VG_F}wY2Rz#PDwM#X}5yrt9C+ts&!;yH};qp#fl!T1KKipY$n2#3S_(=ifGEyU!F8vbbf9HxVi>W} zX?(W=u0d}sq~qXl;pD|Z`2I0vNh@<$k9vX6A6dlzkEH$VweogFUsYxI@nzvUmBk@{ z91c#NmacHlL+EH(J)6ckoGQT##GHJ?OoeY4M&Q9z2u{S87VL%zZs8EE%v6TYPpf`B zb%S!7eid)iM`-ADdQMN%A7iwvu#fUtcB?S`|9{f$K0oyWibJmdRn|yjQNK-3o19}` z9LX!XGe9u1>qFQ)9@Wok$9++Q1eet7|K-U^o=X*zqUaXBDTs5_GcU-Xnf{R@^T9=b zZeH~u9GyYmt>>XPHOSd6Sv-AtIgIh7r=<#qRP+ zjm!M&38xHq8hI>Pw0$ZTX~f8xQAi$ya=Vh{SG+E$ybQcd|2K0()CmF^v(cU_F;&tA zOQfo+b31ocGawU{(02xf;W1dFRW;_nxals{6_n&Qi3C}_2SMpkVyWB7gukhdfhc8y z=mB@j^dX>s3cO;epS&wmQkk2FJj6r&QO3fnV+!qi>n3Yuxq7Wsu!g?s7wp~#+n{Tw zW_?tz>u!g{(D?EPshiIZ?!v z_*`Znt5mujUf3U5r9S7i5aXG_*#-Q>e@_36gx{fD8J%@&Y|W8CDjm+*NG|_pu<)DU z*Y}m!H$F3j+08IiY(kU$3X~^|tB6;uPG{9*Cy13YJ6>Ry?HS@;=&U}`4GGpd)<2gv zp!ohRa9|RdaA#d1xjt5%p2<<#JUr}c7`@pBcAWok{AUIeKuL(wyV0*D}|4sE6_G$CKt~65tNiyV=AzgPwvvb1xOpWpGdMw)C1cKXZ zy_PmO+#yrwU;3o(%>q!au6%zZIA%#qoZ}Sou>a6(!vUA_bo6o8!pIr#KM<5e%hFK{ zuKkzS(f<)-mA7pb`>nsWJ%?3>^3gR33HX0l1UlbPXE0iTMSnGqOx{{FW4;d`Qn#l%CFBlF6o3e&32Aw*Q+7#a8NRf^%v zJ|hv;_hOMz32@+?WC2{J>DqAwJA)P3my8{oxPO8((l`I59d41)mlqlsI&FV&uLxcK zxxqj$QK0ivrXtvsrT)z;XQn+GZi7lSgPb(n$RgUz*ZkTei4lyS=6)XALgV zKFB-TU7xoF?z&=%L-+F6|1Ol@0s+NfqyzQ`2|iX~{ZT)vY#m=F&}#JD{iro0gi4r* zxN5OgG>lcHk`WOQ??F_k+gt8fxERnovgW4}YIXRFg%6e~&THrD$2p`C_*``L6;n%V zaZs``R@|ig2d9p-B{60A>O-M31&5*$5U087=Ahs~Yf65$YMA;#@$|^FI0b|IgHFEb ze&xn|6Uo`1cMqDdk1y301}w~*z^1Ky=Je>aozKOgSq51iiKoEjDU?n%RtMvcj+9Uw zD4iVO26wDxBL2^MIou@jDYz~bL=XgHz}wyupD$^^Q&pClRJBh`vECA(Y+$~*?oV=! zrNmGF0VAG08tPhB9Wqi^-!mDmG&>5#mU&fr)gJwFJw4NF5OGbXmoWnT?&bEMF|svP zAWR+ad!*}Hn~a{ZnZdBaM}Q;E4=P7h*bA~@EC(I1*iZBCjcokRNqV{z>LZzx|7p#T z(Zif$gp-g1 z8#jdB-48M_fXaZhqO^EPCt9_7AV#OjCWlvqNART72*?E&ow8^%6Xr`YG=?#}vWJPm zw05%0xqu~x`6oe89ElHTG?mL?F`*{0)7~TKbT-bjx8~Vgk7At5acdS-B2gIYBko>U*DT&Xj%NnZf~X8|ut|A@p?{iQD2X*;)rl zLn_zpu=l?nhASP2*-QN>%CU71{}}X5NHr~_x4n1auC+<%eJT#A<(*-fB&q*VCq?hy ziqut8R#w*1!iOxz^SP{ZrnD( z2g5e?3Dr}vw}uh7{na?MZoM~7)Tw&;O=YIRJEc@%?W}eUab# zDV3MZ$&#$z8}GGyg%M@|rQU|)zER(x{~kSz?XkVCs781gvZ>15O~bhvO?+kqbWC8$ z$N$t**~MZqe0Mb^_kTN6lMP@)P5-xe&-azx7iYZt^#|p#7Makxy~5wciE`i&rE`6B06=g#~52P^isB%ijF2khjiD1uK93N^d)asquL{$fb`b(N6BAlmvRRe3SV}VSdZPhPiw|T8 zf{5E2s0v*)O1;9Ff8*wyfTdlXb9OYn_^_s*_!zq#FGdN+NWVYKHSSnZ%QiOc<-A2} zqr3T1Ie(NPIrw3j@a0edQ|_G`5D?KG8X)`A=nubWZ8E^FyC~GaX@F9#slnh>XN2xj zRS>I5_W9}F!KPt5CMRl6*NC2o**jPD%VvL@4XJsm@mvn$v3OmT6yaL06$f{<> zM1br3uZXv+o@!{+nb|n*)-N;Y-mg@whInCI%G-F$1v$dCZc8L`^R|Xl?dG;NKIVKka^0{e{(~ znv7=W`$N6RwFe{MV5H8(q=+Z;^uxmM?=7(>SRp0D?(Dn4a_j%)-*O^0DT6diCeKrQh6O#_%IgY20aw4+spX^tZ0vF1r z^j!OW|NBAq{LLDk`>wX1Q`bLyUQn{g^Y&2J9e7^1a66Fbzu3Y#&eWHg2+1ngeU}H| z#NjxaCXDv-UroyWIAcq~^S^(F0_b{5ax>Vrf7AxKZ+)}FS{Pa8L{b(0%VL$0nn{lP zB?7O9vKA~4wbVJJZXu+O2f7b!K9YK39^YM{$-{Ab-^zB(wacdsk4I0d`_qqQmL%4M z3^kPp(Za@ z)^)d0%VEL$dDMXNe}`7U`tz5NX{V2;VaEcbO{l?M!Y*FdIDY@5Ma0af^`wJt=p?GL zo8LVfzDvEBd=9IQlDZx*%Y+xr@8jvLybV4U)%M#5CbR_aL0#4FbI;J)64kDfqdnh!0>CMr4|*{EE%FeE4@J5y&H!C#|*a=-ZPnOTh14EU%^-v zG>TN&9}-zoHAnb8Dfrj9Jmd9rFsQ>$zn9(qs9p!b8dwIl8~azBOB5O1Ez-`Z&v#F@ z2RTub(7Z8iowi`U@9)Y^H5AOor0Tz%B{DV^HXZ8w-18FpUaj6q>r^hRk3}i)zo*(y zY6oIJ(Nr}hw(5F4&b3Kq-`oN=Juffg1lfF)5^CEYWc@%3JDj0r`)Bd}g>}E- z4NiS`$kj>?(%^e)8wC%~w)y$H@~(&9^)bv;tboVmdGktbli3BoJOA(1m`~1& zT|xqm7BRd-rh)0{45l*($0!f-cLe{@z#B z-TbEsjMsZ}Q^8RO8_XZEBh~Nwgdg=wNDq(ix+b3oYxW&}Rl@vpDr5MMeBsb0^ANAy zNa^_Ly2j(|i17(Vq#2|Q%5=Imb{B$w{?*rKtP+TZqAeO&-!Htd6U$)N;0<%+;{~o& zrhjo;f?%Cm5E@db1%(T5QWJKAXuQqfB@zFDf}oAmu4&=NmNVpyRGTl}3SpYlWw*oo znS^&C9x{S{h4Si~_ezOYZ|1FX47xez?2mpp){zembIjL58~a3f4zasogc9Rb(aa}- z2K^ZB%dU^a;HS=7raDD6jxJB+HB+V8Zoz>e&FaiawiC?51R?y_`WcZpdsGdbWL4A{ znTXsz@Z|w|t=f%cHT>7XCd!z6`$Yir$&dOX{&6RMiHm0UJdpv3Ad|Qc;TS*$i>K8O zBS4oAT8SAgj7wbu8lR5BWW7xY$q^a~_Gklr4>B#TbMV1;EdY5=i8IHQG#I=T^s%;Q z@_+t`Um-sIB@DIRSF>|cLJ8Hpgy}tK+Z?Jk|uW7 zAMC_=a7=|vJzK2&YZ0tx=jM52m;yLB3&K}ofNxIbiXJD}!;TU$IFLJ0#)l)<8iXup zyc^WC8q(aUQ|=NE+j*xPob21gLjhfbhmO&(hLs~h1`5hsHir!m8$>)JgdB8sp-z_3 zoc=SM*lvZ+T>%vc$1}285rrm&QYaEcj!N}&g_@Par1N|wj>aqLhbCeCTIf81$ckcR zyk3qPSVmNgk>GTA;CW;`LHH@5N1NbscwQbkUsddeMJSre@||h`nfL0+$awBvl#Xho ze{gkhW^`J}4%r*e31Hh?!8i)6Y8Rsc^T5l6f*;Zey>QkQQRej}IeW4LrN*(2O%6k; zsFVsJ_V{duzntFZ>qTi9C=-5asgd-rmd~s9t}~ijOZuKB6&DH$w6{OEkyS3p3f00{ z(yT`RNeOSBa()PV^=~!ML;&N>0~un_7y-7>mDEM|TXs=dq36|?Eo-v8Atc*1OFEKD zqYn3W(2lMibF-s-_x3WuiKQ6Ej$ zCu{*45k9x<`tfbS=?sBm6FGA?Ph+`du6F+8IC>>bu_?y&G#fZGwcTrdccyGl?x=C% zV={^vzmB|7N)~F}y?-GI-r@F+AW4MN)&{5<5+R1;?VEIi|4o$F@1*Z}if_{sMlAN< z+Q>?{b2sm4BGRbY%mA47r?06}giSRS@KUmu^NZ?{Y}yu#_D=+{ZIKedPYl&Mw`@3s zsH%ps@3y(AiP)Y*wvw#ThW30>@kdy?NRz!LAeYl`+1WXr;m2Z-VV~09m-_k+y&n|h zdxC%S+egSHj)G5J-BJkr?sjb5)~^?kdZOQltZSa5C|Tui^$D~W8K0z-C-ER8(Y_<# zH1E`YbM+f86XbTAw16{82Riu-%eQo1|C2j927lNDKONtl>_p!?dzwd8eD=~W(Ff%# zCNFh9?u?Vkx~-WF`+>hyw>%hf+VKM^C4Jc48D8Axj#8{^GGBTR@k4T632Ko{J@vY$ zD>Ug+F$`QM$kV!k=mc%Ay*d}3(4H7P>fq=jj=i*+@$378xf35hist`BYss#&2nd~o zDwV2fp5EVzebqnG2K=XH>5lYMwpBaJ5zBzK8k@bfy5xKHBA@SS<^=fnW>=S&Zlt+L zSOX9BMxDuqQ(_pB*J9h5W}Ucr#k<4$xwTo76>HCvcDpq3{o2lnMIF%H_;XX<+~>;8 zD!EIS+44ZEzV8c_8DQlRV~8>l z$(N?kXL=XTDU;cco@2Xhr&WT`xI7Pa>CD*u)*dJ5<-JxF)?X#@+vRLUL9aK)`qnK_M$ad5vcB@N)<{wDWtalD`nb?G zj{k0(lJzhtTmR6&WK927xl7(6brav@Vw3e)9a0Lb37`%JJdD=#84gApWKEyPT&m!5XK%-o4!sqVTJ~yYd zeB4xZoR%kX7v(5+!&F{fqkbOs6qm4hL>}N^)L5BnskN050rNw_7&n$ng4LMYs?TMt z6U)xTA#6-YiYHv&ub2eN@;0G?eSO ziW(l|c@^9mZ(Xi_Pw|H)RH!79N225!JinOM3H{IHz{Bn??}KZ)NcyE@iPA6ylQsPS zFo)-qFHgyb^8XAIX5%)U<>3^hbEk0tgQad0Nv32jNpn-?Q^!A%u3^n@d7(bK!@9ig z@wsXR4rSLNjJ=DnNg|0|l+|f9o($NT%U$o$8YLAPmVa1BGvcS|_=G(Vs(^q<`hyzo zv2hC5G08@$+;0-3KTVU(e_kFR%YJJJ<`WVuDC=U<#XE24KjMbdA@tvWEGe5>c(cZ0Ww6(g3oi>pI_4Q7 z8Gx{HYa16{^79LQQGM}!C?(Ryq#j^`#+n^IcY=!O_J0(_TUMo2-0%TY5XBmIm1?;C z;2oA(gR%QMu~9FElEru>TBcFRi-f!0vUtp+zK&d$bheF}p>?h{jFGCG0Fy&Frcr|S zkU4P_)fXfb^O;cY`~H`+K6S8-=Gc@0G9gbHN5*{0frdT< znPhtfUD`;Gq8!b)vbRu$WQ+|iI?BGK7t*E0JcOINr2A1d`G_iLZSFnkv&g!8H6lwa z1qS8n#1ZFo0vT_5h#w~H)tNi~rDqGXSZ-V)dOtg3{~s1WL6z_XV+OY+Vl0P-8I~^8 zsTAImF8?jppu9>rFm|z(v@xH~@?On8ZFgD&5+>_cvphZXs3!M8fKgF6w)3FQk?~!O zYKo|fSkVX0H@pno$Raa4uiq>>Z~cJg3$hTc6h>JQV)|`uN|Qg7nA6M%D2Js&LwRte z>XkJ!SbES<=n)@ozPjqqB{zV6enguCNT8*kl*zEq8m_O#ajW{znB`Ywqpldg%LEjA ze>JtjkS_8s;?=o{UCmuwA_LnpmWLfc$X?1^bb#2r)uOWa~65Y7i}&zpR4 zn7Fm-f2tNlFe6aE4Vq{%3ZV7dQc#QLQxjxLya1Nw?^Ls{UUJun8oi$ z@~up0F?eg(9gErP$gH-T(o+_Dul8G07rlrz1Jo3Og@@EVP4v~{@SH!1gs18wtN&$M z9gOZx)Q=++S6Y;Id}~u$vwio+)x;A2k=FpRT;y7QSQbE%tmWzOfEDn?yY{zS?9Xck zSCD-r#i3P|=1i|Pz6jASI|T8?K&r-|F=q;0_m8l;h|d$Wrt^7w`K{Jr2&IllBBL{N z%3e(B-+pE(vDy8M)M65M5RtlrP^UNAdWvvbm3-1pfBAq+_52ZD)9!9+GVfWojVvrQ zwf3~CV2G{q;Y2HNAMR|*=aAjs{B=JJGjntdLiizeo=tg|59{>dJTcK9dEpm zelxTLUQ{C?Ns^eM4IEQ9-~5a{D_xmh7E}`yn8@D&@rrv2UXy!tG5Y~{H%br0Nn1Bq z2sDHd%aH8Ln=;PP-qrD#JM-VdGv@cJH_iwrdaT1Qk0HNx?-I~M;T|13m_|qD z8#iK5M_U=aZ95bWEsNN(*twR1tB@$!WokQAP4`eaH@Qej;22T+ZF)H1PSkzBUOYmM z#VW?49V_%{p%JH5O%}c*?HtY<9-w_MupjR~0VHFrnx7)w!5~}*agfAL*z5h&#TNC~ zO}OzQIFLOisA;zo%fJnMQB?9IW<|-3y$`<%@f?a<-hg`omCO$ zR}{vi_@8EBn;%{Zxic6PH1FRi*mJJ$Ec152IO9{c-&TTeUs(BS%aNHxYIz%G7*IEn z4KxHW?o0g#^ghPo9An1(C2CZrX#^!QH7G5-syC0fJje*p&u8Hm20So^!yjCMbk2oR zfIvsbk<@nGrG5-Iui24_lU&?pMx0=7Ky_du+I!(w#%L|UG7^EYeB|<$@snvD!u3J+| zh<$Pr3_o+@Ff0Byy4>A#tpwe4LeJ=g859X7!397&)H^I(ct{W(GW%G)h5r37?-6Uj?~EPCK}+rBREFLI*H+28n*Ea- zV|q<9PIhXxAtbqW>y<-WX@y?uKV%+f;%vE1CCvmQcHdq$D6MT7`Bj{myrq=;X^)mDi|h zoAX$7j@AZz2t^^)lej-&u5HXG?&RB#=KzDKvt0??7$vRNpF8>Pg4U-S?%TEOUU|$i zKFZfoaR;f!R9*x!-8ZZjQFxz;w^~bLnkIOPTSDL^>J5C@uz@o_~62QXPvMJT;%R(#B6~ zD~fKNXzc`Zcu*N^H+5Itg$Fp8eataaqpB2CQ5I(@Y3*3obeXNmBr}=&VCvI&K{;{xFlS!VnzChj-8GZN? z?(jEV3}`z?Dc7pS%1<&0Geg$g6Uox9K~_@5B13F<53`qezdL2i==`6yQTuR~MlZvV zZ(KIbVl)5)BN;t=SAG`#Bljmn#zLHSo=CS%54twwygeH!(Jmof8A>lXRj=p{Go*Qg z)1Jz7!t&L+rwj_b3(g2Qk7OB;%_JDN>O zh-nNGl6-gA*80PP(af0Eg8ZSzkuvs&ac-ZFlMNmBm7T`mF-UJ(X&M$O0@2~7Pgi-!my^P~hs~JGGD2of%-=9FjpxN9SwL6}y1gD%Gq2C^>Bc(^CzWkdM-kT1+ zmqJH4g)Zif!(+Ky9!KaVQfa{ny=Irp881CP22wVeCGt-ZDb+f3_wsDDMlqcGnG{>? zl^RMQqnz&d;_&|rM{HDyq6sDfjIKF**&Ga2cvQJ!P|2*f5ppsJJ*!JX1Y<_QlHDfX zZ4QzY=Px>JG>~s1Y~a%jc_P#-3siWd>n;hq#Ye&}rPdp`j399BS1MEmY8<$H6nMdU8y?@FrW1yKJ=LOy!ouhspiRQ-CEbM{|++1NXH`R0f#uB-_gkdcbUAb!JQ$D`dRV=|RR3<)m{QR&B>Q4=aL!qyFAK z!+0}Yz7#Tp#z98rL5^|03CQF}G=B%4+9%XOc$&%k-PFF()YCvn2;vzz~6p%Ip9uv~w%8dWWDeU@lD zUrACbRPWyOm~?SZ?4`oc=#P$cw)R}x=mA3ajV&BlUy~%rXnAHP8-|hYr--Tpqw3Wx z=}s0#$xD~+{|Fb6Qi=*!1HHJD8^iYh-a8YsmG}{#CR1QRw4>d(9%=Sr4^-0N#$vIG zoiRiF#1ikm>~9g-*d_ihdQA^eR)`DqR&I$%^i;;;<>u@|bNTI<(eBxqV!TOKXMLBJ zr)eV?2MV1Gk$#xa_ld>qrLdqW`GhECm)O$GAb>3(?f_%|m-E58?YxFsQQ4odW3IO% zj=0dv^jJKEeLtnrLkFuDI`HV{D~bfYx!L6vl`hdjWZ$1C8Gj60yUy2WA6;$^^x0<& z%zkGv*kcXMg9QIjATF#6!fwZ2EN5q49rHK!noa&RzDwi#|4JkOUxWHe5{UMyh)=Uk zV^wjmzM|EW`$L0st)&71z-b|J$TUb37YSa5Fy=W~qTdrLAltY2sPAdfb-tK7Wk{_$ z4`~9ypW<1_Zav69%GkkS0H8^>vbufg`l+0*hPUp%^lm?{-8#6)d)9HfC?)GKC&c|M zdC~Uyj@=OZa;yk-SKZ6tCXGcTC|pm4H@rZju2!$oqVK&hZ1Bu`-npT`Ya8hSiBjnH zu$pw3lkp^1{b)s*<5He&UV1JLm2>)3dLFYjr?lby*%b%M#aVy8^?IjK(&#|YdRQe%D3W&3!l;;i<)x)kL32ySMy&~uvs zjZqwL|FTQO5rFXg9a0g@EJQr1>uzfZ!t%Gof_z7jg8EN=g@^CP?4&M4Lz){Mg~?mUm%^i-@U8z&>Ncs?hgm+PqplT3RI^eGR*eJPgjY0rd=E5 zXP(nUty9|ufxDj+>^(yq{qEf7dXeFpQUc2^;Q*xaI z-skPEhztvp`Q)+;PaBZ*MMr6>ipd1{l!woj-qFX#Ua>&p&EDkfmEZ20Ugw)7g8ugl zfR?Qf1wvv}>Hk#UQ^!8;M%M`K+LsdYima5Q>5@CVyp-48(l0OSKVk5N@r+2h);?gB zd!~ArY=H7hI0AbynEG5UcfF2z1nKZiZjt=gIq7WAlc1#R{k-7}Yq5g3*B%3f7sH`0 zB*8ReOl~xauqtQ4=ckZ=bLP3PnZQ{5Z1;v0R)N;HKtZ}Iug{$~$8NomK9!jdn?*{( zb+@gpoZOebUp)KdX6Syb58(xrogZ42u#gZ`Q2TSUm&T!(`qAN zGe#APcv53l;G)w&fZcP{meW8FSF}}9Q6bG^H$u?)rc36W9N>F3^E$`!wxb>s_SeMr zo;lXk_m}g&{q4CR$>kTqrH>Yf#($>=XkyR8AL?;3ZKZxcLh~*?m(>`c~~1N>M?mcrl0Plt*v49wiWy(w}{!eC_Odo4vD~D^~P0I*DSG z^Y)ref{fCR-`fTz1C8N2M@y9d-DYL@*v1&(iWyPECtZd)v`A4&n2Kc)Rf4ogX{>D5 zb%H#jiaZv+Z<|nrlWL>=Xx-1o58i}+iv%xWgpO?MOxoW<=4C|b8Pomm@}QcVQ4G~%Wm=a#jzf$e zznGv3VJ{ocIW&xi>pC97<#V#G#-pa>#hjqpyY6OXQD(>^*;Hu7dA zxH@J$FjqM49<__V&5qC~;wP6kxmB9`_=0CtuXc#wd4Dm({$cA!j>-L9AFgNCF(1aq zud>MUd|57t<$k#`i4(XP7W}v_W9)h3>GL_Ye&rM}RkgTxK z2XfBbFlnJ)=cGf}^U3JeaO$olNHh2;e|qj7ACA>;y1dBN_^#;d;MLBU<5s06LV5M` zq?_J66~s;Cwv@7N9c~U&UUVFmc6Xh;JY4!88HGJm%MNbY1@F!s!Z%#rNHn9v&2a+ihSJnqbTkbbsSlT+yFHRpetf zbzlt);oLBNryb7LIuu6|xWbZq?RWodYG|ULxc4-jz0-T#{5N*^f`sF8WF&cizP8>1 zsM!g6ebwsNU#RKQ0D6hSfxj+7$ac3K$6hN`@BAyt#qK^hf%e)oY(QY>xse&J>3F?> zQp5OgeTy+#Zzx{=tdk_vwwCG<@dd{DIo(JPNGtKDR#3o~Jvb z39Fi>N&Y(q4{K>nVEoEehyx;7X>uL3nnd37uS09BW-cic2-x6ztp6#{t0&_lB-9~^ zQuVO0IY=qO_#PNO^dEHiU(|IG3&{kV`_T72GKD2)1^$n4=MRr(dCsi|VB#=F)?N4O zg<@8FogjDDuT|rDlmgMS)lj|7z?%VoIR`xN@j6OGoK>o}ku*26$leOo93*=8@txsC z&b>VOvAS~C`G++-=06taVMcE|VT6dA&4jCKZWF==ve*&-F!u3g$b zi1K~=6Cgwp;TJUKvi}A}`TmtCT$39@Bcq9xVd=KBv;YE=`T@lVsdf6s;h7SS8r~9D z5@vEY2Nhb*Ch5CtSzKhaZnI$YFFKQ{=FU3GkNo9QLoH>jvwSQr>M}#S-dkH!3=@GH zlpkUIwDQ(A3r2%Etut!RZl+}{DiOY1$Krm?BaVRs_oFlxl)mm8*6&-otE;nhM@^g- zHUfWhK1Old`nLJqH-sUmB28yZCR2TOvvg8htkrm|!fc7el?7>ao>r*T-H1z0PvEK- zxSdW>90nL7bBz+1VfG>Fz!`0yt4AZVHAE}22bcSh_(>^O=r!9s_9U$3(%yb&CJn4n zm~p~w7uCUK>(r>}s@ZiezUWXc`o_X$HS(vXuq?r;T)tReGDv1a@`?%7X1Yme^fELN z3AS;Bsn!A~`)9NYkNaG`g#xAEpg6Vyi4=d6Y{JqWdChCk5-Lx$==;WGijO3O|6YDL zU8l2P426otwdvWbKxo;d!Kh!}MLHg|G+q$bVxDj+r5MUZSQmFB?b0@S00{8vO zYoO^0U5O`8H;MMCqP7^SB6e!1!v&Rr#XGiAU9cr(R7S1(4mv^y%FNj^Ed1WS192AZ z&yx(-fO3aQT|>+cs>pbuo!xJEb(~MD}1y zeml^0?6m1ubI6f1e*=y!x=d7@x1QP=2tH5lQ4%(A>af;#O|-0c8W^l_BLyztSw_b^ z*Xrf+y&V-`7jW1P@!`p0&X^Oslv_4C?1+pbh{B*qsiW*-h-8sNX#qv08%$P6s`bix z#YyMcfoC3VRLY)dOS+RZKw9*{(c~9B^~A?I*u?EKW8h!5YR1Kl`;mfW;abU<6PwrK zZ5lMJHH3HZC2b9?;ReC%u74Oh`36M4iLXeDwp$zK#%V?n19>F4q<_uS+14oeZ0Gfm zR)EiP3-8tfp=bbN^bG~8GG^$evfWyL<}@EekS;B{3Pt zq6TySrSCJa%5EE1QfIf>tHUx{x;Apfmkch+G0cd-7|d?i9Lm&mto?a`#~1cbJPz6- zjS;2m`9C8>u1a`@wMQjJToOX!@OYOq-fEZ5NEc|`jO*A_FdzT|iRc6c5;>=lnNL29wEZ zTNAi$u1KQHo!I`#W@GvHRcnrWYb$MwzF1DLN1q>JnJfM)2m-ip?CPp zJy0J!3byi9_*>qu^iQn4t|ofZ7d4%1^s#|il>6IHbB5eEm3SaWMovAy)gHq0lshRD zR&jFTE@NguQwodjv$)P$ZbhUX!iKdXmtAoq&V(c7So4wk92yElrxM&M*uv1UGu9xK zjSqpahK3&F-i{X?lkt7O);Y{_++gU@^4QwFT-A*YUSqKHW)zQ zmz6nvoK)0|Q7NR9+Y7?5s%x`-RzR|3-F&a)Hx_dC-&4bLoj;m7Yz+UNB4TnF#n>IF z1<NX7_8qF5(|tyRu2k;7p7*mQswRc53+rE1QEM)ghunj`TM1&9bEm-t5s2P zRBNcrLK54a(_hQ3hTcY=%3)De$7J(DOKXwIXkA%bHks#q$ztbKQ1~8y{h*>ZUDh@Z zn&_Hme_u+RlRqgJ1^&HVA9!G8qzuGzoz?rp{+;X2noE@OytRV zdL{4~i>Y2vP0Vnk*LP~`OmG9ICH?LZCD0lF^jz%LiZ|+NmT`a{n z>HF>v>e#ZMWaZ{IZ6NYa*i9Um%cmOLNh+6MyOjiCD5(7~98_92-la!__ohl#*cBpZ zwc~cpr$ahC-63EVgAX~+KWODwSW_`*d$z7_8<*~WKI<>zwPD>IsguNW-c?-0ZG5of zJ-gYFY#_E-B5b=2ki@fulr0u~|EqzE$bT@xjBD3CsKI-|aa{yZX6CEi`tz;pRcEEM z>k*6FW<#E``zoUzj&su|5#NX3elbbUZS$#?G?wdP8Fdk-{kjw%$>(;1d$fAZd zxKDqZ&J=3P0frZl912{U@QWjI9}~I6(yKID+OHNIl}*>(WAc#rJp@U-@MXK+4>BW0 z^}PG(UoUz9Xzp%}j1u9sH&!Cp)OThu%N_{(1Ik+jP1^s-iZC~xd*RwFqXdrxaqZQL z#s>#D7(!?o!nf+Fp~-~ZlLkGxW9nYr zHYg0|cy2Y)c^u`1ZI`%j+HO(TbT&o!6?In`(<2cglL{(XfJnYQf#$&jl09&WP(FloAadi)$u z{nikipC5fu3|ya`d=z++bH(zOmX;u&P0xY3VQmxw@WaD$lG<~*mod^}JS_wrfuMO( zFvriS0OfQ*`Eb4rG%;GgRZe4H*f~_f|drosdOA2+jF6n zc70s*0AjDuRH1O=$$%Shv9`|kGQzBpJyU}Pj)=OJYl%c}r$M+tqmQLiraqy>T)CSi zHzFH<`kzkanhvv1MKZAoNzSJ{T&3MqjxQe)?n1=Ip!?C7w8@xAa4p`l2BJkU0%;T1 z<#37!29p#1EN~c)W&hqi^s@Eyp7X7ZfQKUrO=7@_97AggewTwbf=+{7Xmxc}`vTtT z<{6ZI2y-MJ4rw0o1aIaqa7er53irI)9?CNh39f`ADUifiY}uwsGf(3+LyQ|vERj}+ zn?H{Ya@dBGDXa(jgq)d}GV3snJ{{)UXkQ{wu6+=jfnD-|I7c$J(7RNNcelO};-ya0 zTGJxhMSo3O+>&I3S01xphP=+|4~_Ym#Tr!^eCHNnN_3V=0NMD17!;u=4of;JG)>K^ zzO-9Ps+pot+xy1%c~L!bfiX|{kXuBv%jat?HDOOMKEXW!RNkuCF;FT}(M88Sk?UXn zKjQzVq>LW?`(OTmhTpMnz;o_+RDa>RR$#&)hGTljQN%va(j)!DPrLd1Htp2De37-t z0F38wC1~z3Ly|bh5q>xAAgYN%bL=4;ltGT0KydVr$3!|G+6OWzgMB3p_BrVa&Oa62(p&#i#Qj zt83Y_o%kRlsta<>m|1YHR=+Jm8O4CCH@PEjV=zsV9RA>1q0@c5Ng#3#G-FJb@jIo{ zpZ@)6D}u4{?J`H}Nso{nGKQe9?=g^$WI0@j{&lk0Ir%kJuWAL}mIdFrjr z(Vwm~**U24po(F4E8`8{CUIBY3Khm^d;IyW-FW@RiD1%2eiJKW;R!x*$^LPf7zg;K z=yKDAk4;=v1%ch9Hd&t3y{N-1_YcKbEClYDf$vA=a170-I#ph-=Qc&0N7}w==}}6H zNlcv1U1!mG3)i(Toh;a`w$Bo|OrBju*=i5F@ejp_I9q=qaI1?CM**`D&29My9X!3F zT|Re2bHB`oF1&79X7S1ksOZD*U4*{po^D+J2jOq5$4zH4D#!BELD#Y|KccQKN53!~ z>4nXo?~Q{-mx3I%x>{}P<_k40Xo+h$ZsJ~SWJB0pnx=G1Z<_a0QaRWI5jC84Zg#O| zKw|gUDQjlHMc;A2iE?A~(w|4p{Q$*>e2*SBMf?|otyO6Mu!r4Cl|b2@DM3a{DVIJ^ z;T=jCT}m4kO}1v}#r_*|7Dzy={ct<7VQ_4egRV~2 zU613g&Y{9i;pgI+7JT5(rt|@m;RrL4*{Z%Of(QY75 z_o6g5Qm(2ep<&fhy44Sfllqga%&%Fu#LBkD2dQD%)g>lBH{6QNUVf0)DS})Vtf>YY z47u<-#yLthd_OH~nTu&L!xn}1%&tgzgi)F@Vb=H9J&>WBUIlxPcA+}(!Ta*#wKUuP zlHGyGiAG5%T6{=z%Tr;yybc{KIh3PH*5x+?`vAnD=rZ|$3G_KXovF=dj zK7x9SC{Qg2Wskz~t&J|qC4PerNv`EZRGK6O(FGSlx=*C=KP^TX2{CXNqaOuk7+)?> zQT`7sph5-}#E*25Nhn`gnmkgY=kCy{)m6?t!ak@w)pSFvvZA&BR7Vxf>Qj9NAUtpY$LylKMEQ1a`A6##w7(&$z3NDVY2Z`D~ zYN0)bPDg3(-pY5-iGl_8g#5x7L>E11GnOlSig2&KcwuTIk&mkT(O<*C?UeZP9kV2U z?hZkVy!#x($alE@@o=>il*r4toM^(<$Z=`v3+Wz`*0m@KIg(Xth(t`GR(mXCrI`0n z@feS69dL8Dgk7O|?l2D8kuWhcDdAb-+GQHUTXP~-x;OVYQiP%#VE{#u*0sbpO!qUN zl?q%9^y<5tlif;Dnv9T#|I0EhmZ}w!bv`2b*${ZrcVFzLC%ZtuPkU~6FUeBNR^9o- zh^{3K88*NSO*NCwMEyl9r0GIfV3_n-iG=N!-sdn;=4u3f3V#c9!NP5mH(Y%jv3tp%p&p) zc0ftWd96Aa`a6=8FtINd9-GpC&tWr4W1Fk<{B z(X7%#O+RU8GUh>>-E8*YN$fq8`add8kH~($nN+`SzcI}mGhWW7& z*+m82;=N_)42J<0S?iTT#2~}}sC(E^`|b6t;Y=zTyv#ll{PuwXVo0*8cq}w8A&?SO z!DTwIrD)%PQ=wzLlUcPN=JfS$!#wluoTal&XT1ou>fsCu#Vb+`ZT{ zQN%Z+Dzc^Y_a`0tXuFQBeZ`_iOP|hMdUN+pskXDtZ;HD}GT0Tqr8bNO>3^TtL|dnb z%&Z$B>DKDPfRdt;yw_O~oGW$zg}nzPs(` z#T!}dEmPH)qh>zA{!!aYhAVEglCPK0dPau9{4Ne7b*wsBv12?1WS?7n5)qR2q5sGA zQy4+c%Z-kf+FHTZ^nLTPSzMPp&N@_m+ng}B&50&7PON%bR35vTMKhT^{*eY9N!{TBy`{G?gX`Kb6y!QE+v}UTB#ImjjaD5Mj1b*Y_JT7}OO}63vi+W_r;5;n4`6ngGWKiu zeY6U{MGUJ+_0_bSC?Hwfkek*m6&n$8#!#KyH}_N4nBOJu89NX9M$-}pDuDW z;B8HGQh7esl~A;_ns|I zDDcYJlm7V+Q(hhcf6M2-*SqFX-~P~#AC5rRHt-M3lVRPgQg-sNd})$Xc3vgT{u z*1x!8I2E(F3_SHH4s`bGY%#jS!ckN)bhMPJzemmGU;z%jzvqyL6AoW~GHh_sNV5!o z!vZL3=KTK{?PUL~EezyU2PY4#MUJuEN~@Pvg!X7%jfqM;Mgw`~*ZLeqs@ƵI{M z80X*KC!cO})%AjI;C3|~!)4ed*^T?w&78Fl8CHJo5eREE+`I8+J1=SlWu9(%A7)q7 zbXcs107DUvLm~~w$JKL^W%M1uv**y`n(k#yG26Ow z=XvR{%lFw^)T#E@m9Ca8iV`(_r_ef@J91j_BG$ScQyD$0Oiw;v|ISu}8TI7k>9 zLxtXLzjqJz`syi}afCbADj}F^X zPLorp%pj^5i_fWN*Jz*h5Gkc(iXy|0y$ZZMrVEqqXDxiiSXd0D-t|X=_DZa(!IJyp#M84vy*<9 zfX9amkJ*?WmOPdFRwSfG3|X>$ehE;DG?cJ{2pcK>s;!*Q z=@qm)NX9zwYEIJ!@GLc?Oq4M?i#g20z;;gj&LPLs-CvX)+A3@g`4WVRs2(VP{wL>~ z^iQM5E;eU!ysy+Cpj1jWu#Lrm_$1j=ufaYI#Ds2TF6$vlEzQ^+Q$1QLsP0@$BzXMD zmMxxB72T&Pd;Ui?Z3cHQ{Ex7juJ!|cZ=s`AL@Q6)wLeBjMJe;q*_!yWJhS7YOYw%YS{9a4S&q52^yw?|TsHD_p2Wlv3$Y+p&p+!F3wsMG_Jz&)?2-o_xMZ zwsf05S%bc~pGEU-AkOKVti0RGQ7(((=#Gh7Z(i1Reoc7Qb?6R;)!|NB&PY&drS(MQ za^M(c%X!?fw!dV4X&tEHKW}VM5O~(qX>Jo`f{YALmYY^ceBdt0_8dY*EiTazh)kJB zJ4xV2F~0}_d_4cS8RlnEuW_f8MbWd)F1EukG!$Q*vt`2^=n?8gz<#dt@ z@%Q{~Z4aIZhVj^a{KBuA31Su;;O=m>z6XDpef@pPDS}ppGT7f)^zNwP*RY&U9?~tT<@PTmdIx?IlcFW6rLB(O+Fu0r085?3tGkLjp^&v z2=^0uoYhh0`1q7$yZIg*TYSH#da;F@`q;Rnl=Hi2E6F)CsKj4yu1ER|{55coJx&&_@~b=&`=f@k0x3*?d>3?6iG&*+F@YrV%S@nB?kUR{~*w?`|q;>>1hKjzcqF z9xu(zpjDbt;{)&|@|go!+CLvpHVsm%z7i09N`my7nzMv*{55}P0bIo2uHAWVM zo)|$W=9nA$_na?Ygow@E^BXK{M_W-9fK(ud#TrjoYW5~J zs_OT#`Fcs^HjgOV!~2MtY2SH#{>T?Bk%H>V019ZGa9{NU)=c$m{|R~=Q1iOC z?|zOw1=!oU*gU4JeQGbQ`yXz&#Z~Fb>s>a5BH`B`IB*)^F}WOJ;8xBZiB%xrbC|{m z1TFro*Xw9*URJW9nn#-wc;p0y0Q#C-ZUS`Ks)@s7EUdSJkg&b`u3cmJ5d}TZI5O?S z^9;Q1UF8H6&SP3u4~7NbwM11J_sJ^$ulBAps0nNfvnXOg5JW&hniP>hM34?yfyfF{ z1?gQ49TFhYQHr3n?7Dyg2?C4KivdC)0Ux0Vq(})ZG>HL3Is%eNcfy}liH;7vvO<+ z(9zrlrV2@*znxjywN{iXvbf9Q1#pPAlz9?r*q>jO-9mv_9}0$ymK&^s*;@3#rfu<( z-hw#2kidm3zg+lK0N=-fu=j4Z{rc=Lgh`p0utn2RrRrkA!WGN}KUbZJ|D=@lm4P?5 z4)*aIN#xG5#FM)(+UdmU`8S8S#2V2RW#?5qjTX!Ce51m3Z(l!0BK>t>6>7YT$0Mxp z&mwY_*^%TgqSadA`w~3j&Ey(ooEVd~sf*r1me;hg=kgZglLiaIKX=;c`-;_)liK!` z;3UF9$tIbY@=iWl#nL4H(Zt_e{eC&f#fiAta0N1sW9H^ucLR%=p!=FBR7U1#0e&DbeY%QN9Or0fM$##`5RrPlpt(> zq!wc;f;H(Q=7^H8k1crUx4mGS+*LaOGbxrlx+Xi2oR$1LPuXaL%Ysw5JH!1|)8!6! zl*bEQjZD5f(Z)rp2>AOCzS_Od@5s{2sv@b>YXQcH_ukSb4qHpq1xAPnXW-ewUcQN_ z>Fi=SbD1kHl7-Ja!ebw>7dB!{ye`;Tn5a^G+OI=SbQ#-c4u3K<+nyO0dEGD|9o`pS zJk!0!`9As3myqXwQ^Z|%_>ur@mt|B}b(pPu0q|Z8{Hru@)bOASaL2X6BY7=Jb!W2C z=3JO#5zQ0xe>k_JLOcW1!^~($CUZJMpdp~yKdO#I={E= zJj96sI9?9&FfM{MGKj22#mC0ks~(W|VUtYp7}IR(Ib#unCy8|z<&PPb&1AW3OgM{Z zWv{k_i!X|}l@k9EjlhV>`^?!(A*7BJIm2h)W+^z9?q@aK>rimQ!Z`RQeD0B56XFl(x(cD2%08D$%xwe!ZCf|+ zg3POg`|r9Q=TWjwFFmwdL&HW}*QhzuLlx;^$6zbl>pS9?qA$X1Qr_Qx~ie?UQ&y0Kl6klHxkB2h^|2i>9#|WU3j=CX;SG3*Je16Pe%B9)F5iVuK`2! z0k28Au4TM6P0EE&A(=^<0)Di@8Us?y; z6vTF}VZTU+De3TN8!5L$EY;?K8CbXIfvNGw+&%HzXfpF^c~+HL^+T4Ksog%K++x15 z)SykV2G8y_r6}Cnl)$Y&8hII=*k#$l>#Tb3u_DdPasa^S{XS%O|Ag$i_o3NS%`#Os zr}MsYi}s%&A4rj(z3iFrTMm$wBv4sF&+>0ME!-EQ92M*6R$9V_Lbqx>|lj*+t3XY zQy~%!vV)9+&pEU#zh*V{Rg)*i4S6RZG?IqV9?%y-FZHi@>#ryuHjs^9=A~Wmp!e+8 z;G-h4ntA9P>^xTfkd?(V;hQC9U?byS(15Fg0`bHok$NAi`X$hU=h;w|CAs=jj<+Rc zz=$fRrc$tIg5*(yX31oK@?B*}tX`W}KRM@6VDv7}?6Q{XW|Tfpwj3-YCz1oZ#_5l7~K(Tvz$zul1U5+%{%a@hWG7iS7?T<|9 zwV30WbB9=bF4*Q%kMMB3x)?inoi+#oogI30po=x>TB8{pJUykSiY)0rLD6IFP@%Xu z!Jne?#{#d`#5gsGj-_(^#CiDWDaoH7{B@#X2s$A%5a~)o5i#9_TR^;~cc5ACSXD27^XwCd*z~tElq**MWu#g!zg44{9j=H8{UVamndBdCN<%b)^_> zT_-%6S(Vm+ugeYJm4JSnm^J8M6-J{wgKYv*?fd-*U7n=!BHELaop&)Dx)lWeho%1l_6WW>2IzD{iFh&+N7TUrr94`{s8vpAM}y@;UtIu de?q~-I1a#yi+I$fO$5imxB)gdsy1+s|0i#$43Yo< diff --git a/research/inception/inception/BUILD b/research/inception/inception/BUILD deleted file mode 100644 index 21fc27aa5..000000000 --- a/research/inception/inception/BUILD +++ /dev/null @@ -1,198 +0,0 @@ -# Description: -# Example TensorFlow models for ImageNet. - -package(default_visibility = [":internal"]) - -licenses(["notice"]) # Apache 2.0 - -exports_files(["LICENSE"]) - -package_group( - name = "internal", - packages = ["//inception/..."], -) - -py_library( - name = "dataset", - srcs = [ - "dataset.py", - ], -) - -py_library( - name = "imagenet_data", - srcs = [ - "imagenet_data.py", - ], - deps = [ - ":dataset", - ], -) - -py_library( - name = "flowers_data", - srcs = [ - "flowers_data.py", - ], - deps = [ - ":dataset", - ], -) - -py_library( - name = "image_processing", - srcs = [ - "image_processing.py", - ], -) - -py_library( - name = "inception", - srcs = [ - "inception_model.py", - ], - visibility = ["//visibility:public"], - deps = [ - ":dataset", - "//inception/slim", - ], -) - -py_binary( - name = "imagenet_eval", - srcs = [ - "imagenet_eval.py", - ], - deps = [ - ":imagenet_data", - ":inception_eval", - ], -) - -py_binary( - name = "flowers_eval", - srcs = [ - "flowers_eval.py", - ], - deps = [ - ":flowers_data", - ":inception_eval", - ], -) - -py_library( - name = "inception_eval", - srcs = [ - "inception_eval.py", - ], - deps = [ - ":image_processing", - ":inception", - ], -) - -py_binary( - name = "imagenet_train", - srcs = [ - "imagenet_train.py", - ], - deps = [ - ":imagenet_data", - ":inception_train", - ], -) - -py_binary( - name = "imagenet_distributed_train", - srcs = [ - "imagenet_distributed_train.py", - ], - deps = [ - ":imagenet_data", - ":inception_distributed_train", - ], -) - -py_binary( - name = "flowers_train", - srcs = [ - "flowers_train.py", - ], - deps = [ - ":flowers_data", - ":inception_train", - ], -) - -py_library( - name = "inception_train", - srcs = [ - "inception_train.py", - ], - deps = [ - ":image_processing", - ":inception", - ], -) - -py_library( - name = "inception_distributed_train", - srcs = [ - "inception_distributed_train.py", - ], - deps = [ - ":image_processing", - ":inception", - ], -) - -py_binary( - name = "build_image_data", - srcs = ["data/build_image_data.py"], -) - -sh_binary( - name = "download_and_preprocess_flowers", - srcs = ["data/download_and_preprocess_flowers.sh"], - data = [ - ":build_image_data", - ], -) - -sh_binary( - name = "download_and_preprocess_imagenet", - srcs = ["data/download_and_preprocess_imagenet.sh"], - data = [ - "data/download_imagenet.sh", - "data/imagenet_2012_validation_synset_labels.txt", - "data/imagenet_lsvrc_2015_synsets.txt", - "data/imagenet_metadata.txt", - "data/preprocess_imagenet_validation_data.py", - "data/process_bounding_boxes.py", - ":build_imagenet_data", - ], -) - -py_binary( - name = "build_imagenet_data", - srcs = ["data/build_imagenet_data.py"], -) - -filegroup( - name = "srcs", - srcs = glob( - [ - "**/*.py", - "BUILD", - ], - ), -) - -filegroup( - name = "imagenet_metadata", - srcs = [ - "data/imagenet_lsvrc_2015_synsets.txt", - "data/imagenet_metadata.txt", - ], - visibility = ["//visibility:public"], -) diff --git a/research/inception/inception/data/build_image_data.py b/research/inception/inception/data/build_image_data.py deleted file mode 100755 index 894388b7f..000000000 --- a/research/inception/inception/data/build_image_data.py +++ /dev/null @@ -1,436 +0,0 @@ -#!/usr/bin/python -# Copyright 2016 Google Inc. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""Converts image data to TFRecords file format with Example protos. - -The image data set is expected to reside in JPEG files located in the -following directory structure. - - data_dir/label_0/image0.jpeg - data_dir/label_0/image1.jpg - ... - data_dir/label_1/weird-image.jpeg - data_dir/label_1/my-image.jpeg - ... - -where the sub-directory is the unique label associated with these images. - -This TensorFlow script converts the training and evaluation data into -a sharded data set consisting of TFRecord files - - train_directory/train-00000-of-01024 - train_directory/train-00001-of-01024 - ... - train_directory/train-01023-of-01024 - -and - - validation_directory/validation-00000-of-00128 - validation_directory/validation-00001-of-00128 - ... - validation_directory/validation-00127-of-00128 - -where we have selected 1024 and 128 shards for each data set. Each record -within the TFRecord file is a serialized Example proto. The Example proto -contains the following fields: - - image/encoded: string containing JPEG encoded image in RGB colorspace - image/height: integer, image height in pixels - image/width: integer, image width in pixels - image/colorspace: string, specifying the colorspace, always 'RGB' - image/channels: integer, specifying the number of channels, always 3 - image/format: string, specifying the format, always 'JPEG' - - image/filename: string containing the basename of the image file - e.g. 'n01440764_10026.JPEG' or 'ILSVRC2012_val_00000293.JPEG' - image/class/label: integer specifying the index in a classification layer. - The label ranges from [0, num_labels] where 0 is unused and left as - the background class. - image/class/text: string specifying the human-readable version of the label - e.g. 'dog' - -If your data set involves bounding boxes, please look at build_imagenet_data.py. -""" -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -from datetime import datetime -import os -import random -import sys -import threading - -import numpy as np -import tensorflow as tf - -tf.app.flags.DEFINE_string('train_directory', '/tmp/', - 'Training data directory') -tf.app.flags.DEFINE_string('validation_directory', '/tmp/', - 'Validation data directory') -tf.app.flags.DEFINE_string('output_directory', '/tmp/', - 'Output data directory') - -tf.app.flags.DEFINE_integer('train_shards', 2, - 'Number of shards in training TFRecord files.') -tf.app.flags.DEFINE_integer('validation_shards', 2, - 'Number of shards in validation TFRecord files.') - -tf.app.flags.DEFINE_integer('num_threads', 2, - 'Number of threads to preprocess the images.') - -# The labels file contains a list of valid labels are held in this file. -# Assumes that the file contains entries as such: -# dog -# cat -# flower -# where each line corresponds to a label. We map each label contained in -# the file to an integer corresponding to the line number starting from 0. -tf.app.flags.DEFINE_string('labels_file', '', 'Labels file') - - -FLAGS = tf.app.flags.FLAGS - - -def _int64_feature(value): - """Wrapper for inserting int64 features into Example proto.""" - if not isinstance(value, list): - value = [value] - return tf.train.Feature(int64_list=tf.train.Int64List(value=value)) - - -def _bytes_feature(value): - """Wrapper for inserting bytes features into Example proto.""" - return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value])) - - -def _convert_to_example(filename, image_buffer, label, text, height, width): - """Build an Example proto for an example. - - Args: - filename: string, path to an image file, e.g., '/path/to/example.JPG' - image_buffer: string, JPEG encoding of RGB image - label: integer, identifier for the ground truth for the network - text: string, unique human-readable, e.g. 'dog' - height: integer, image height in pixels - width: integer, image width in pixels - Returns: - Example proto - """ - - colorspace = 'RGB' - channels = 3 - image_format = 'JPEG' - - example = tf.train.Example(features=tf.train.Features(feature={ - 'image/height': _int64_feature(height), - 'image/width': _int64_feature(width), - 'image/colorspace': _bytes_feature(tf.compat.as_bytes(colorspace)), - 'image/channels': _int64_feature(channels), - 'image/class/label': _int64_feature(label), - 'image/class/text': _bytes_feature(tf.compat.as_bytes(text)), - 'image/format': _bytes_feature(tf.compat.as_bytes(image_format)), - 'image/filename': _bytes_feature(tf.compat.as_bytes(os.path.basename(filename))), - 'image/encoded': _bytes_feature(tf.compat.as_bytes(image_buffer))})) - return example - - -class ImageCoder(object): - """Helper class that provides TensorFlow image coding utilities.""" - - def __init__(self): - # Create a single Session to run all image coding calls. - self._sess = tf.Session() - - # Initializes function that converts PNG to JPEG data. - self._png_data = tf.placeholder(dtype=tf.string) - image = tf.image.decode_png(self._png_data, channels=3) - self._png_to_jpeg = tf.image.encode_jpeg(image, format='rgb', quality=100) - - # Initializes function that decodes RGB JPEG data. - self._decode_jpeg_data = tf.placeholder(dtype=tf.string) - self._decode_jpeg = tf.image.decode_jpeg(self._decode_jpeg_data, channels=3) - - def png_to_jpeg(self, image_data): - return self._sess.run(self._png_to_jpeg, - feed_dict={self._png_data: image_data}) - - def decode_jpeg(self, image_data): - image = self._sess.run(self._decode_jpeg, - feed_dict={self._decode_jpeg_data: image_data}) - assert len(image.shape) == 3 - assert image.shape[2] == 3 - return image - - -def _is_png(filename): - """Determine if a file contains a PNG format image. - - Args: - filename: string, path of the image file. - - Returns: - boolean indicating if the image is a PNG. - """ - return filename.endswith('.png') - - -def _process_image(filename, coder): - """Process a single image file. - - Args: - filename: string, path to an image file e.g., '/path/to/example.JPG'. - coder: instance of ImageCoder to provide TensorFlow image coding utils. - Returns: - image_buffer: string, JPEG encoding of RGB image. - height: integer, image height in pixels. - width: integer, image width in pixels. - """ - # Read the image file. - with tf.gfile.FastGFile(filename, 'rb') as f: - image_data = f.read() - - # Convert any PNG to JPEG's for consistency. - if _is_png(filename): - print('Converting PNG to JPEG for %s' % filename) - image_data = coder.png_to_jpeg(image_data) - - # Decode the RGB JPEG. - image = coder.decode_jpeg(image_data) - - # Check that image converted to RGB - assert len(image.shape) == 3 - height = image.shape[0] - width = image.shape[1] - assert image.shape[2] == 3 - - return image_data, height, width - - -def _process_image_files_batch(coder, thread_index, ranges, name, filenames, - texts, labels, num_shards): - """Processes and saves list of images as TFRecord in 1 thread. - - Args: - coder: instance of ImageCoder to provide TensorFlow image coding utils. - thread_index: integer, unique batch to run index is within [0, len(ranges)). - ranges: list of pairs of integers specifying ranges of each batches to - analyze in parallel. - name: string, unique identifier specifying the data set - filenames: list of strings; each string is a path to an image file - texts: list of strings; each string is human readable, e.g. 'dog' - labels: list of integer; each integer identifies the ground truth - num_shards: integer number of shards for this data set. - """ - # Each thread produces N shards where N = int(num_shards / num_threads). - # For instance, if num_shards = 128, and the num_threads = 2, then the first - # thread would produce shards [0, 64). - num_threads = len(ranges) - assert not num_shards % num_threads - num_shards_per_batch = int(num_shards / num_threads) - - shard_ranges = np.linspace(ranges[thread_index][0], - ranges[thread_index][1], - num_shards_per_batch + 1).astype(int) - num_files_in_thread = ranges[thread_index][1] - ranges[thread_index][0] - - counter = 0 - for s in range(num_shards_per_batch): - # Generate a sharded version of the file name, e.g. 'train-00002-of-00010' - shard = thread_index * num_shards_per_batch + s - output_filename = '%s-%.5d-of-%.5d' % (name, shard, num_shards) - output_file = os.path.join(FLAGS.output_directory, output_filename) - writer = tf.python_io.TFRecordWriter(output_file) - - shard_counter = 0 - files_in_shard = np.arange(shard_ranges[s], shard_ranges[s + 1], dtype=int) - for i in files_in_shard: - filename = filenames[i] - label = labels[i] - text = texts[i] - - try: - image_buffer, height, width = _process_image(filename, coder) - except Exception as e: - print(e) - print('SKIPPED: Unexpected error while decoding %s.' % filename) - continue - - example = _convert_to_example(filename, image_buffer, label, - text, height, width) - writer.write(example.SerializeToString()) - shard_counter += 1 - counter += 1 - - if not counter % 1000: - print('%s [thread %d]: Processed %d of %d images in thread batch.' % - (datetime.now(), thread_index, counter, num_files_in_thread)) - sys.stdout.flush() - - writer.close() - print('%s [thread %d]: Wrote %d images to %s' % - (datetime.now(), thread_index, shard_counter, output_file)) - sys.stdout.flush() - shard_counter = 0 - print('%s [thread %d]: Wrote %d images to %d shards.' % - (datetime.now(), thread_index, counter, num_files_in_thread)) - sys.stdout.flush() - - -def _process_image_files(name, filenames, texts, labels, num_shards): - """Process and save list of images as TFRecord of Example protos. - - Args: - name: string, unique identifier specifying the data set - filenames: list of strings; each string is a path to an image file - texts: list of strings; each string is human readable, e.g. 'dog' - labels: list of integer; each integer identifies the ground truth - num_shards: integer number of shards for this data set. - """ - assert len(filenames) == len(texts) - assert len(filenames) == len(labels) - - # Break all images into batches with a [ranges[i][0], ranges[i][1]]. - spacing = np.linspace(0, len(filenames), FLAGS.num_threads + 1).astype(np.int) - ranges = [] - for i in range(len(spacing) - 1): - ranges.append([spacing[i], spacing[i + 1]]) - - # Launch a thread for each batch. - print('Launching %d threads for spacings: %s' % (FLAGS.num_threads, ranges)) - sys.stdout.flush() - - # Create a mechanism for monitoring when all threads are finished. - coord = tf.train.Coordinator() - - # Create a generic TensorFlow-based utility for converting all image codings. - coder = ImageCoder() - - threads = [] - for thread_index in range(len(ranges)): - args = (coder, thread_index, ranges, name, filenames, - texts, labels, num_shards) - t = threading.Thread(target=_process_image_files_batch, args=args) - t.start() - threads.append(t) - - # Wait for all the threads to terminate. - coord.join(threads) - print('%s: Finished writing all %d images in data set.' % - (datetime.now(), len(filenames))) - sys.stdout.flush() - - -def _find_image_files(data_dir, labels_file): - """Build a list of all images files and labels in the data set. - - Args: - data_dir: string, path to the root directory of images. - - Assumes that the image data set resides in JPEG files located in - the following directory structure. - - data_dir/dog/another-image.JPEG - data_dir/dog/my-image.jpg - - where 'dog' is the label associated with these images. - - labels_file: string, path to the labels file. - - The list of valid labels are held in this file. Assumes that the file - contains entries as such: - dog - cat - flower - where each line corresponds to a label. We map each label contained in - the file to an integer starting with the integer 0 corresponding to the - label contained in the first line. - - Returns: - filenames: list of strings; each string is a path to an image file. - texts: list of strings; each string is the class, e.g. 'dog' - labels: list of integer; each integer identifies the ground truth. - """ - print('Determining list of input files and labels from %s.' % data_dir) - unique_labels = [l.strip() for l in tf.gfile.FastGFile( - labels_file, 'r').readlines()] - - labels = [] - filenames = [] - texts = [] - - # Leave label index 0 empty as a background class. - label_index = 1 - - # Construct the list of JPEG files and labels. - for text in unique_labels: - jpeg_file_path = '%s/%s/*' % (data_dir, text) - matching_files = tf.gfile.Glob(jpeg_file_path) - - labels.extend([label_index] * len(matching_files)) - texts.extend([text] * len(matching_files)) - filenames.extend(matching_files) - - if not label_index % 100: - print('Finished finding files in %d of %d classes.' % ( - label_index, len(labels))) - label_index += 1 - - # Shuffle the ordering of all image files in order to guarantee - # random ordering of the images with respect to label in the - # saved TFRecord files. Make the randomization repeatable. - shuffled_index = list(range(len(filenames))) - random.seed(12345) - random.shuffle(shuffled_index) - - filenames = [filenames[i] for i in shuffled_index] - texts = [texts[i] for i in shuffled_index] - labels = [labels[i] for i in shuffled_index] - - print('Found %d JPEG files across %d labels inside %s.' % - (len(filenames), len(unique_labels), data_dir)) - return filenames, texts, labels - - -def _process_dataset(name, directory, num_shards, labels_file): - """Process a complete data set and save it as a TFRecord. - - Args: - name: string, unique identifier specifying the data set. - directory: string, root path to the data set. - num_shards: integer number of shards for this data set. - labels_file: string, path to the labels file. - """ - filenames, texts, labels = _find_image_files(directory, labels_file) - _process_image_files(name, filenames, texts, labels, num_shards) - - -def main(unused_argv): - assert not FLAGS.train_shards % FLAGS.num_threads, ( - 'Please make the FLAGS.num_threads commensurate with FLAGS.train_shards') - assert not FLAGS.validation_shards % FLAGS.num_threads, ( - 'Please make the FLAGS.num_threads commensurate with ' - 'FLAGS.validation_shards') - print('Saving results to %s' % FLAGS.output_directory) - - # Run it! - _process_dataset('validation', FLAGS.validation_directory, - FLAGS.validation_shards, FLAGS.labels_file) - _process_dataset('train', FLAGS.train_directory, - FLAGS.train_shards, FLAGS.labels_file) - - -if __name__ == '__main__': - tf.app.run() diff --git a/research/inception/inception/data/build_imagenet_data.py b/research/inception/inception/data/build_imagenet_data.py deleted file mode 100644 index c054735e7..000000000 --- a/research/inception/inception/data/build_imagenet_data.py +++ /dev/null @@ -1,707 +0,0 @@ -#!/usr/bin/python -# Copyright 2016 Google Inc. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""Converts ImageNet data to TFRecords file format with Example protos. - -The raw ImageNet data set is expected to reside in JPEG files located in the -following directory structure. - - data_dir/n01440764/ILSVRC2012_val_00000293.JPEG - data_dir/n01440764/ILSVRC2012_val_00000543.JPEG - ... - -where 'n01440764' is the unique synset label associated with -these images. - -The training data set consists of 1000 sub-directories (i.e. labels) -each containing 1200 JPEG images for a total of 1.2M JPEG images. - -The evaluation data set consists of 1000 sub-directories (i.e. labels) -each containing 50 JPEG images for a total of 50K JPEG images. - -This TensorFlow script converts the training and evaluation data into -a sharded data set consisting of 1024 and 128 TFRecord files, respectively. - - train_directory/train-00000-of-01024 - train_directory/train-00001-of-01024 - ... - train_directory/train-01023-of-01024 - -and - - validation_directory/validation-00000-of-00128 - validation_directory/validation-00001-of-00128 - ... - validation_directory/validation-00127-of-00128 - -Each validation TFRecord file contains ~390 records. Each training TFREcord -file contains ~1250 records. Each record within the TFRecord file is a -serialized Example proto. The Example proto contains the following fields: - - image/encoded: string containing JPEG encoded image in RGB colorspace - image/height: integer, image height in pixels - image/width: integer, image width in pixels - image/colorspace: string, specifying the colorspace, always 'RGB' - image/channels: integer, specifying the number of channels, always 3 - image/format: string, specifying the format, always 'JPEG' - - image/filename: string containing the basename of the image file - e.g. 'n01440764_10026.JPEG' or 'ILSVRC2012_val_00000293.JPEG' - image/class/label: integer specifying the index in a classification layer. - The label ranges from [1, 1000] where 0 is not used. - image/class/synset: string specifying the unique ID of the label, - e.g. 'n01440764' - image/class/text: string specifying the human-readable version of the label - e.g. 'red fox, Vulpes vulpes' - - image/object/bbox/xmin: list of integers specifying the 0+ human annotated - bounding boxes - image/object/bbox/xmax: list of integers specifying the 0+ human annotated - bounding boxes - image/object/bbox/ymin: list of integers specifying the 0+ human annotated - bounding boxes - image/object/bbox/ymax: list of integers specifying the 0+ human annotated - bounding boxes - image/object/bbox/label: integer specifying the index in a classification - layer. The label ranges from [1, 1000] where 0 is not used. Note this is - always identical to the image label. - -Note that the length of xmin is identical to the length of xmax, ymin and ymax -for each example. - -Running this script using 16 threads may take around ~2.5 hours on an HP Z420. -""" -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -from datetime import datetime -import os -import random -import sys -import threading - -import numpy as np -import six -import tensorflow as tf - -tf.app.flags.DEFINE_string('train_directory', '/tmp/', - 'Training data directory') -tf.app.flags.DEFINE_string('validation_directory', '/tmp/', - 'Validation data directory') -tf.app.flags.DEFINE_string('output_directory', '/tmp/', - 'Output data directory') - -tf.app.flags.DEFINE_integer('train_shards', 1024, - 'Number of shards in training TFRecord files.') -tf.app.flags.DEFINE_integer('validation_shards', 128, - 'Number of shards in validation TFRecord files.') - -tf.app.flags.DEFINE_integer('num_threads', 8, - 'Number of threads to preprocess the images.') - -# The labels file contains a list of valid labels are held in this file. -# Assumes that the file contains entries as such: -# n01440764 -# n01443537 -# n01484850 -# where each line corresponds to a label expressed as a synset. We map -# each synset contained in the file to an integer (based on the alphabetical -# ordering). See below for details. -tf.app.flags.DEFINE_string('labels_file', - 'imagenet_lsvrc_2015_synsets.txt', - 'Labels file') - -# This file containing mapping from synset to human-readable label. -# Assumes each line of the file looks like: -# -# n02119247 black fox -# n02119359 silver fox -# n02119477 red fox, Vulpes fulva -# -# where each line corresponds to a unique mapping. Note that each line is -# formatted as \t. -tf.app.flags.DEFINE_string('imagenet_metadata_file', - 'imagenet_metadata.txt', - 'ImageNet metadata file') - -# This file is the output of process_bounding_box.py -# Assumes each line of the file looks like: -# -# n00007846_64193.JPEG,0.0060,0.2620,0.7545,0.9940 -# -# where each line corresponds to one bounding box annotation associated -# with an image. Each line can be parsed as: -# -# , , , , -# -# Note that there might exist mulitple bounding box annotations associated -# with an image file. -tf.app.flags.DEFINE_string('bounding_box_file', - './imagenet_2012_bounding_boxes.csv', - 'Bounding box file') - -FLAGS = tf.app.flags.FLAGS - - -def _int64_feature(value): - """Wrapper for inserting int64 features into Example proto.""" - if not isinstance(value, list): - value = [value] - return tf.train.Feature(int64_list=tf.train.Int64List(value=value)) - - -def _float_feature(value): - """Wrapper for inserting float features into Example proto.""" - if not isinstance(value, list): - value = [value] - return tf.train.Feature(float_list=tf.train.FloatList(value=value)) - - -def _bytes_feature(value): - """Wrapper for inserting bytes features into Example proto.""" - if six.PY3 and isinstance(value, six.text_type): - value = six.binary_type(value, encoding='utf-8') - return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value])) - - -def _convert_to_example(filename, image_buffer, label, synset, human, bbox, - height, width): - """Build an Example proto for an example. - - Args: - filename: string, path to an image file, e.g., '/path/to/example.JPG' - image_buffer: string, JPEG encoding of RGB image - label: integer, identifier for the ground truth for the network - synset: string, unique WordNet ID specifying the label, e.g., 'n02323233' - human: string, human-readable label, e.g., 'red fox, Vulpes vulpes' - bbox: list of bounding boxes; each box is a list of integers - specifying [xmin, ymin, xmax, ymax]. All boxes are assumed to belong to - the same label as the image label. - height: integer, image height in pixels - width: integer, image width in pixels - Returns: - Example proto - """ - xmin = [] - ymin = [] - xmax = [] - ymax = [] - for b in bbox: - assert len(b) == 4 - # pylint: disable=expression-not-assigned - [l.append(point) for l, point in zip([xmin, ymin, xmax, ymax], b)] - # pylint: enable=expression-not-assigned - - colorspace = 'RGB' - channels = 3 - image_format = 'JPEG' - - example = tf.train.Example(features=tf.train.Features(feature={ - 'image/height': _int64_feature(height), - 'image/width': _int64_feature(width), - 'image/colorspace': _bytes_feature(colorspace), - 'image/channels': _int64_feature(channels), - 'image/class/label': _int64_feature(label), - 'image/class/synset': _bytes_feature(synset), - 'image/class/text': _bytes_feature(human), - 'image/object/bbox/xmin': _float_feature(xmin), - 'image/object/bbox/xmax': _float_feature(xmax), - 'image/object/bbox/ymin': _float_feature(ymin), - 'image/object/bbox/ymax': _float_feature(ymax), - 'image/object/bbox/label': _int64_feature([label] * len(xmin)), - 'image/format': _bytes_feature(image_format), - 'image/filename': _bytes_feature(os.path.basename(filename)), - 'image/encoded': _bytes_feature(image_buffer)})) - return example - - -class ImageCoder(object): - """Helper class that provides TensorFlow image coding utilities.""" - - def __init__(self): - # Create a single Session to run all image coding calls. - self._sess = tf.Session() - - # Initializes function that converts PNG to JPEG data. - self._png_data = tf.placeholder(dtype=tf.string) - image = tf.image.decode_png(self._png_data, channels=3) - self._png_to_jpeg = tf.image.encode_jpeg(image, format='rgb', quality=100) - - # Initializes function that converts CMYK JPEG data to RGB JPEG data. - self._cmyk_data = tf.placeholder(dtype=tf.string) - image = tf.image.decode_jpeg(self._cmyk_data, channels=0) - self._cmyk_to_rgb = tf.image.encode_jpeg(image, format='rgb', quality=100) - - # Initializes function that decodes RGB JPEG data. - self._decode_jpeg_data = tf.placeholder(dtype=tf.string) - self._decode_jpeg = tf.image.decode_jpeg(self._decode_jpeg_data, channels=3) - - def png_to_jpeg(self, image_data): - return self._sess.run(self._png_to_jpeg, - feed_dict={self._png_data: image_data}) - - def cmyk_to_rgb(self, image_data): - return self._sess.run(self._cmyk_to_rgb, - feed_dict={self._cmyk_data: image_data}) - - def decode_jpeg(self, image_data): - image = self._sess.run(self._decode_jpeg, - feed_dict={self._decode_jpeg_data: image_data}) - assert len(image.shape) == 3 - assert image.shape[2] == 3 - return image - - -def _is_png(filename): - """Determine if a file contains a PNG format image. - - Args: - filename: string, path of the image file. - - Returns: - boolean indicating if the image is a PNG. - """ - # File list from: - # https://groups.google.com/forum/embed/?place=forum/torch7#!topic/torch7/fOSTXHIESSU - return 'n02105855_2933.JPEG' in filename - - -def _is_cmyk(filename): - """Determine if file contains a CMYK JPEG format image. - - Args: - filename: string, path of the image file. - - Returns: - boolean indicating if the image is a JPEG encoded with CMYK color space. - """ - # File list from: - # https://github.com/cytsai/ilsvrc-cmyk-image-list - blacklist = ['n01739381_1309.JPEG', 'n02077923_14822.JPEG', - 'n02447366_23489.JPEG', 'n02492035_15739.JPEG', - 'n02747177_10752.JPEG', 'n03018349_4028.JPEG', - 'n03062245_4620.JPEG', 'n03347037_9675.JPEG', - 'n03467068_12171.JPEG', 'n03529860_11437.JPEG', - 'n03544143_17228.JPEG', 'n03633091_5218.JPEG', - 'n03710637_5125.JPEG', 'n03961711_5286.JPEG', - 'n04033995_2932.JPEG', 'n04258138_17003.JPEG', - 'n04264628_27969.JPEG', 'n04336792_7448.JPEG', - 'n04371774_5854.JPEG', 'n04596742_4225.JPEG', - 'n07583066_647.JPEG', 'n13037406_4650.JPEG'] - return filename.split('/')[-1] in blacklist - - -def _process_image(filename, coder): - """Process a single image file. - - Args: - filename: string, path to an image file e.g., '/path/to/example.JPG'. - coder: instance of ImageCoder to provide TensorFlow image coding utils. - Returns: - image_buffer: string, JPEG encoding of RGB image. - height: integer, image height in pixels. - width: integer, image width in pixels. - """ - # Read the image file. - with tf.gfile.FastGFile(filename, 'rb') as f: - image_data = f.read() - - # Clean the dirty data. - if _is_png(filename): - # 1 image is a PNG. - print('Converting PNG to JPEG for %s' % filename) - image_data = coder.png_to_jpeg(image_data) - elif _is_cmyk(filename): - # 22 JPEG images are in CMYK colorspace. - print('Converting CMYK to RGB for %s' % filename) - image_data = coder.cmyk_to_rgb(image_data) - - # Decode the RGB JPEG. - image = coder.decode_jpeg(image_data) - - # Check that image converted to RGB - assert len(image.shape) == 3 - height = image.shape[0] - width = image.shape[1] - assert image.shape[2] == 3 - - return image_data, height, width - - -def _process_image_files_batch(coder, thread_index, ranges, name, filenames, - synsets, labels, humans, bboxes, num_shards): - """Processes and saves list of images as TFRecord in 1 thread. - - Args: - coder: instance of ImageCoder to provide TensorFlow image coding utils. - thread_index: integer, unique batch to run index is within [0, len(ranges)). - ranges: list of pairs of integers specifying ranges of each batches to - analyze in parallel. - name: string, unique identifier specifying the data set - filenames: list of strings; each string is a path to an image file - synsets: list of strings; each string is a unique WordNet ID - labels: list of integer; each integer identifies the ground truth - humans: list of strings; each string is a human-readable label - bboxes: list of bounding boxes for each image. Note that each entry in this - list might contain from 0+ entries corresponding to the number of bounding - box annotations for the image. - num_shards: integer number of shards for this data set. - """ - # Each thread produces N shards where N = int(num_shards / num_threads). - # For instance, if num_shards = 128, and the num_threads = 2, then the first - # thread would produce shards [0, 64). - num_threads = len(ranges) - assert not num_shards % num_threads - num_shards_per_batch = int(num_shards / num_threads) - - shard_ranges = np.linspace(ranges[thread_index][0], - ranges[thread_index][1], - num_shards_per_batch + 1).astype(int) - num_files_in_thread = ranges[thread_index][1] - ranges[thread_index][0] - - counter = 0 - for s in range(num_shards_per_batch): - # Generate a sharded version of the file name, e.g. 'train-00002-of-00010' - shard = thread_index * num_shards_per_batch + s - output_filename = '%s-%.5d-of-%.5d' % (name, shard, num_shards) - output_file = os.path.join(FLAGS.output_directory, output_filename) - writer = tf.python_io.TFRecordWriter(output_file) - - shard_counter = 0 - files_in_shard = np.arange(shard_ranges[s], shard_ranges[s + 1], dtype=int) - for i in files_in_shard: - filename = filenames[i] - label = labels[i] - synset = synsets[i] - human = humans[i] - bbox = bboxes[i] - - image_buffer, height, width = _process_image(filename, coder) - - example = _convert_to_example(filename, image_buffer, label, - synset, human, bbox, - height, width) - writer.write(example.SerializeToString()) - shard_counter += 1 - counter += 1 - - if not counter % 1000: - print('%s [thread %d]: Processed %d of %d images in thread batch.' % - (datetime.now(), thread_index, counter, num_files_in_thread)) - sys.stdout.flush() - - writer.close() - print('%s [thread %d]: Wrote %d images to %s' % - (datetime.now(), thread_index, shard_counter, output_file)) - sys.stdout.flush() - shard_counter = 0 - print('%s [thread %d]: Wrote %d images to %d shards.' % - (datetime.now(), thread_index, counter, num_files_in_thread)) - sys.stdout.flush() - - -def _process_image_files(name, filenames, synsets, labels, humans, - bboxes, num_shards): - """Process and save list of images as TFRecord of Example protos. - - Args: - name: string, unique identifier specifying the data set - filenames: list of strings; each string is a path to an image file - synsets: list of strings; each string is a unique WordNet ID - labels: list of integer; each integer identifies the ground truth - humans: list of strings; each string is a human-readable label - bboxes: list of bounding boxes for each image. Note that each entry in this - list might contain from 0+ entries corresponding to the number of bounding - box annotations for the image. - num_shards: integer number of shards for this data set. - """ - assert len(filenames) == len(synsets) - assert len(filenames) == len(labels) - assert len(filenames) == len(humans) - assert len(filenames) == len(bboxes) - - # Break all images into batches with a [ranges[i][0], ranges[i][1]]. - spacing = np.linspace(0, len(filenames), FLAGS.num_threads + 1).astype(np.int) - ranges = [] - threads = [] - for i in range(len(spacing) - 1): - ranges.append([spacing[i], spacing[i + 1]]) - - # Launch a thread for each batch. - print('Launching %d threads for spacings: %s' % (FLAGS.num_threads, ranges)) - sys.stdout.flush() - - # Create a mechanism for monitoring when all threads are finished. - coord = tf.train.Coordinator() - - # Create a generic TensorFlow-based utility for converting all image codings. - coder = ImageCoder() - - threads = [] - for thread_index in range(len(ranges)): - args = (coder, thread_index, ranges, name, filenames, - synsets, labels, humans, bboxes, num_shards) - t = threading.Thread(target=_process_image_files_batch, args=args) - t.start() - threads.append(t) - - # Wait for all the threads to terminate. - coord.join(threads) - print('%s: Finished writing all %d images in data set.' % - (datetime.now(), len(filenames))) - sys.stdout.flush() - - -def _find_image_files(data_dir, labels_file): - """Build a list of all images files and labels in the data set. - - Args: - data_dir: string, path to the root directory of images. - - Assumes that the ImageNet data set resides in JPEG files located in - the following directory structure. - - data_dir/n01440764/ILSVRC2012_val_00000293.JPEG - data_dir/n01440764/ILSVRC2012_val_00000543.JPEG - - where 'n01440764' is the unique synset label associated with these images. - - labels_file: string, path to the labels file. - - The list of valid labels are held in this file. Assumes that the file - contains entries as such: - n01440764 - n01443537 - n01484850 - where each line corresponds to a label expressed as a synset. We map - each synset contained in the file to an integer (based on the alphabetical - ordering) starting with the integer 1 corresponding to the synset - contained in the first line. - - The reason we start the integer labels at 1 is to reserve label 0 as an - unused background class. - - Returns: - filenames: list of strings; each string is a path to an image file. - synsets: list of strings; each string is a unique WordNet ID. - labels: list of integer; each integer identifies the ground truth. - """ - print('Determining list of input files and labels from %s.' % data_dir) - challenge_synsets = [l.strip() for l in - tf.gfile.FastGFile(labels_file, 'r').readlines()] - - labels = [] - filenames = [] - synsets = [] - - # Leave label index 0 empty as a background class. - label_index = 1 - - # Construct the list of JPEG files and labels. - for synset in challenge_synsets: - jpeg_file_path = '%s/%s/*.JPEG' % (data_dir, synset) - matching_files = tf.gfile.Glob(jpeg_file_path) - - labels.extend([label_index] * len(matching_files)) - synsets.extend([synset] * len(matching_files)) - filenames.extend(matching_files) - - if not label_index % 100: - print('Finished finding files in %d of %d classes.' % ( - label_index, len(challenge_synsets))) - label_index += 1 - - # Shuffle the ordering of all image files in order to guarantee - # random ordering of the images with respect to label in the - # saved TFRecord files. Make the randomization repeatable. - shuffled_index = list(range(len(filenames))) - random.seed(12345) - random.shuffle(shuffled_index) - - filenames = [filenames[i] for i in shuffled_index] - synsets = [synsets[i] for i in shuffled_index] - labels = [labels[i] for i in shuffled_index] - - print('Found %d JPEG files across %d labels inside %s.' % - (len(filenames), len(challenge_synsets), data_dir)) - return filenames, synsets, labels - - -def _find_human_readable_labels(synsets, synset_to_human): - """Build a list of human-readable labels. - - Args: - synsets: list of strings; each string is a unique WordNet ID. - synset_to_human: dict of synset to human labels, e.g., - 'n02119022' --> 'red fox, Vulpes vulpes' - - Returns: - List of human-readable strings corresponding to each synset. - """ - humans = [] - for s in synsets: - assert s in synset_to_human, ('Failed to find: %s' % s) - humans.append(synset_to_human[s]) - return humans - - -def _find_image_bounding_boxes(filenames, image_to_bboxes): - """Find the bounding boxes for a given image file. - - Args: - filenames: list of strings; each string is a path to an image file. - image_to_bboxes: dictionary mapping image file names to a list of - bounding boxes. This list contains 0+ bounding boxes. - Returns: - List of bounding boxes for each image. Note that each entry in this - list might contain from 0+ entries corresponding to the number of bounding - box annotations for the image. - """ - num_image_bbox = 0 - bboxes = [] - for f in filenames: - basename = os.path.basename(f) - if basename in image_to_bboxes: - bboxes.append(image_to_bboxes[basename]) - num_image_bbox += 1 - else: - bboxes.append([]) - print('Found %d images with bboxes out of %d images' % ( - num_image_bbox, len(filenames))) - return bboxes - - -def _process_dataset(name, directory, num_shards, synset_to_human, - image_to_bboxes): - """Process a complete data set and save it as a TFRecord. - - Args: - name: string, unique identifier specifying the data set. - directory: string, root path to the data set. - num_shards: integer number of shards for this data set. - synset_to_human: dict of synset to human labels, e.g., - 'n02119022' --> 'red fox, Vulpes vulpes' - image_to_bboxes: dictionary mapping image file names to a list of - bounding boxes. This list contains 0+ bounding boxes. - """ - filenames, synsets, labels = _find_image_files(directory, FLAGS.labels_file) - humans = _find_human_readable_labels(synsets, synset_to_human) - bboxes = _find_image_bounding_boxes(filenames, image_to_bboxes) - _process_image_files(name, filenames, synsets, labels, - humans, bboxes, num_shards) - - -def _build_synset_lookup(imagenet_metadata_file): - """Build lookup for synset to human-readable label. - - Args: - imagenet_metadata_file: string, path to file containing mapping from - synset to human-readable label. - - Assumes each line of the file looks like: - - n02119247 black fox - n02119359 silver fox - n02119477 red fox, Vulpes fulva - - where each line corresponds to a unique mapping. Note that each line is - formatted as \t. - - Returns: - Dictionary of synset to human labels, such as: - 'n02119022' --> 'red fox, Vulpes vulpes' - """ - lines = tf.gfile.FastGFile(imagenet_metadata_file, 'r').readlines() - synset_to_human = {} - for l in lines: - if l: - parts = l.strip().split('\t') - assert len(parts) == 2 - synset = parts[0] - human = parts[1] - synset_to_human[synset] = human - return synset_to_human - - -def _build_bounding_box_lookup(bounding_box_file): - """Build a lookup from image file to bounding boxes. - - Args: - bounding_box_file: string, path to file with bounding boxes annotations. - - Assumes each line of the file looks like: - - n00007846_64193.JPEG,0.0060,0.2620,0.7545,0.9940 - - where each line corresponds to one bounding box annotation associated - with an image. Each line can be parsed as: - - , , , , - - Note that there might exist mulitple bounding box annotations associated - with an image file. This file is the output of process_bounding_boxes.py. - - Returns: - Dictionary mapping image file names to a list of bounding boxes. This list - contains 0+ bounding boxes. - """ - lines = tf.gfile.FastGFile(bounding_box_file, 'r').readlines() - images_to_bboxes = {} - num_bbox = 0 - num_image = 0 - for l in lines: - if l: - parts = l.split(',') - assert len(parts) == 5, ('Failed to parse: %s' % l) - filename = parts[0] - xmin = float(parts[1]) - ymin = float(parts[2]) - xmax = float(parts[3]) - ymax = float(parts[4]) - box = [xmin, ymin, xmax, ymax] - - if filename not in images_to_bboxes: - images_to_bboxes[filename] = [] - num_image += 1 - images_to_bboxes[filename].append(box) - num_bbox += 1 - - print('Successfully read %d bounding boxes ' - 'across %d images.' % (num_bbox, num_image)) - return images_to_bboxes - - -def main(unused_argv): - assert not FLAGS.train_shards % FLAGS.num_threads, ( - 'Please make the FLAGS.num_threads commensurate with FLAGS.train_shards') - assert not FLAGS.validation_shards % FLAGS.num_threads, ( - 'Please make the FLAGS.num_threads commensurate with ' - 'FLAGS.validation_shards') - print('Saving results to %s' % FLAGS.output_directory) - - # Build a map from synset to human-readable label. - synset_to_human = _build_synset_lookup(FLAGS.imagenet_metadata_file) - image_to_bboxes = _build_bounding_box_lookup(FLAGS.bounding_box_file) - - # Run it! - _process_dataset('validation', FLAGS.validation_directory, - FLAGS.validation_shards, synset_to_human, image_to_bboxes) - _process_dataset('train', FLAGS.train_directory, FLAGS.train_shards, - synset_to_human, image_to_bboxes) - - -if __name__ == '__main__': - tf.app.run() diff --git a/research/inception/inception/data/download_and_preprocess_flowers.sh b/research/inception/inception/data/download_and_preprocess_flowers.sh deleted file mode 100755 index ee045c164..000000000 --- a/research/inception/inception/data/download_and_preprocess_flowers.sh +++ /dev/null @@ -1,96 +0,0 @@ -#!/bin/bash -# Copyright 2016 Google Inc. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -# Script to download and preprocess the flowers data set. This data set -# provides a demonstration for how to perform fine-tuning (i.e. tranfer -# learning) from one model to a new data set. -# -# This script provides a demonstration for how to prepare an arbitrary -# data set for training an Inception v3 model. -# -# We demonstrate this with the flowers data set which consists of images -# of labeled flower images from 5 classes: -# -# daisy, dandelion, roses, sunflowers, tulips -# -# The final output of this script are sharded TFRecord files containing -# serialized Example protocol buffers. See build_image_data.py for -# details of how the Example protocol buffer contains image data. -# -# usage: -# ./download_and_preprocess_flowers.sh [data-dir] -set -e - -if [ -z "$1" ]; then - echo "Usage: download_and_preprocess_flowers.sh [data dir]" - exit -fi - -# Create the output and temporary directories. -DATA_DIR="${1%/}" -SCRATCH_DIR="${DATA_DIR}/raw-data" -WORK_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )" -mkdir -p "${DATA_DIR}" -mkdir -p "${SCRATCH_DIR}" - -# Download the flowers data. -DATA_URL="http://download.tensorflow.org/example_images/flower_photos.tgz" -CURRENT_DIR=$(pwd) -cd "${DATA_DIR}" -TARBALL="flower_photos.tgz" -if [ ! -f ${TARBALL} ]; then - echo "Downloading flower data set." - curl -o ${TARBALL} "${DATA_URL}" -else - echo "Skipping download of flower data." -fi - -# Note the locations of the train and validation data. -TRAIN_DIRECTORY="${SCRATCH_DIR}/train" -VALIDATION_DIRECTORY="${SCRATCH_DIR}/validation" - -# Expands the data into the flower_photos/ directory and rename it as the -# train directory. -tar xf flower_photos.tgz -rm -rf "${TRAIN_DIRECTORY}" "${VALIDATION_DIRECTORY}" -mv flower_photos "${TRAIN_DIRECTORY}" - -# Generate a list of 5 labels: daisy, dandelion, roses, sunflowers, tulips -LABELS_FILE="${SCRATCH_DIR}/labels.txt" -ls -1 "${TRAIN_DIRECTORY}" | grep -v 'LICENSE' | sed 's/\///' | sort > "${LABELS_FILE}" - -# Generate the validation data set. -while read LABEL; do - VALIDATION_DIR_FOR_LABEL="${VALIDATION_DIRECTORY}/${LABEL}" - TRAIN_DIR_FOR_LABEL="${TRAIN_DIRECTORY}/${LABEL}" - - # Move the first randomly selected 100 images to the validation set. - mkdir -p "${VALIDATION_DIR_FOR_LABEL}" - VALIDATION_IMAGES=$(ls -1 "${TRAIN_DIR_FOR_LABEL}" | shuf | head -100) - for IMAGE in ${VALIDATION_IMAGES}; do - mv -f "${TRAIN_DIRECTORY}/${LABEL}/${IMAGE}" "${VALIDATION_DIR_FOR_LABEL}" - done -done < "${LABELS_FILE}" - -# Build the TFRecords version of the image data. -cd "${CURRENT_DIR}" -BUILD_SCRIPT="${WORK_DIR}/build_image_data.py" -OUTPUT_DIRECTORY="${DATA_DIR}" -"${BUILD_SCRIPT}" \ - --train_directory="${TRAIN_DIRECTORY}" \ - --validation_directory="${VALIDATION_DIRECTORY}" \ - --output_directory="${OUTPUT_DIRECTORY}" \ - --labels_file="${LABELS_FILE}" diff --git a/research/inception/inception/data/download_and_preprocess_flowers_mac.sh b/research/inception/inception/data/download_and_preprocess_flowers_mac.sh deleted file mode 100644 index 154905635..000000000 --- a/research/inception/inception/data/download_and_preprocess_flowers_mac.sh +++ /dev/null @@ -1,96 +0,0 @@ -#!/bin/bash -# Copyright 2016 Google Inc. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -# Script to download and preprocess the flowers data set. This data set -# provides a demonstration for how to perform fine-tuning (i.e. tranfer -# learning) from one model to a new data set. -# -# This script provides a demonstration for how to prepare an arbitrary -# data set for training an Inception v3 model. -# -# We demonstrate this with the flowers data set which consists of images -# of labeled flower images from 5 classes: -# -# daisy, dandelion, roses, sunflowers, tulips -# -# The final output of this script are sharded TFRecord files containing -# serialized Example protocol buffers. See build_image_data.py for -# details of how the Example protocol buffer contains image data. -# -# usage: -# ./download_and_preprocess_flowers.sh [data-dir] -set -e - -if [ -z "$1" ]; then - echo "Usage: download_and_preprocess_flowers.sh [data dir]" - exit -fi - -# Create the output and temporary directories. -DATA_DIR="${1%/}" -SCRATCH_DIR="${DATA_DIR}/raw-data/" -mkdir -p "${DATA_DIR}" -mkdir -p "${SCRATCH_DIR}" -WORK_DIR="$0.runfiles/inception/inception" - -# Download the flowers data. -DATA_URL="http://download.tensorflow.org/example_images/flower_photos.tgz" -CURRENT_DIR=$(pwd) -cd "${DATA_DIR}" -TARBALL="flower_photos.tgz" -if [ ! -f ${TARBALL} ]; then - echo "Downloading flower data set." - curl -o ${TARBALL} "${DATA_URL}" -else - echo "Skipping download of flower data." -fi - -# Note the locations of the train and validation data. -TRAIN_DIRECTORY="${SCRATCH_DIR}train/" -VALIDATION_DIRECTORY="${SCRATCH_DIR}validation/" - -# Expands the data into the flower_photos/ directory and rename it as the -# train directory. -tar xf flower_photos.tgz -rm -rf "${TRAIN_DIRECTORY}" "${VALIDATION_DIRECTORY}" -mv flower_photos "${TRAIN_DIRECTORY}" - -# Generate a list of 5 labels: daisy, dandelion, roses, sunflowers, tulips -LABELS_FILE="${SCRATCH_DIR}/labels.txt" -ls -1 "${TRAIN_DIRECTORY}" | grep -v 'LICENSE' | sed 's/\///' | sort > "${LABELS_FILE}" - -# Generate the validation data set. -while read LABEL; do - VALIDATION_DIR_FOR_LABEL="${VALIDATION_DIRECTORY}${LABEL}" - TRAIN_DIR_FOR_LABEL="${TRAIN_DIRECTORY}${LABEL}" - - # Move the first randomly selected 100 images to the validation set. - mkdir -p "${VALIDATION_DIR_FOR_LABEL}" - VALIDATION_IMAGES=$(ls -1 "${TRAIN_DIR_FOR_LABEL}" | gshuf | head -100) - for IMAGE in ${VALIDATION_IMAGES}; do - mv -f "${TRAIN_DIRECTORY}${LABEL}/${IMAGE}" "${VALIDATION_DIR_FOR_LABEL}" - done -done < "${LABELS_FILE}" - -# Build the TFRecords version of the image data. -cd "${CURRENT_DIR}" -BUILD_SCRIPT="${WORK_DIR}/build_image_data" -OUTPUT_DIRECTORY="${DATA_DIR}" -"${BUILD_SCRIPT}" \ - --train_directory="${TRAIN_DIRECTORY}" \ - --validation_directory="${VALIDATION_DIRECTORY}" \ - --output_directory="${OUTPUT_DIRECTORY}" \ - --labels_file="${LABELS_FILE}" diff --git a/research/inception/inception/data/download_and_preprocess_imagenet.sh b/research/inception/inception/data/download_and_preprocess_imagenet.sh deleted file mode 100755 index 6faae8310..000000000 --- a/research/inception/inception/data/download_and_preprocess_imagenet.sh +++ /dev/null @@ -1,101 +0,0 @@ -#!/bin/bash -# Copyright 2016 Google Inc. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -# Script to download and preprocess ImageNet Challenge 2012 -# training and validation data set. -# -# The final output of this script are sharded TFRecord files containing -# serialized Example protocol buffers. See build_imagenet_data.py for -# details of how the Example protocol buffers contain the ImageNet data. -# -# The final output of this script appears as such: -# -# data_dir/train-00000-of-01024 -# data_dir/train-00001-of-01024 -# ... -# data_dir/train-01023-of-01024 -# -# and -# -# data_dir/validation-00000-of-00128 -# data_dir/validation-00001-of-00128 -# ... -# data_dir/validation-00127-of-00128 -# -# Note that this script may take several hours to run to completion. The -# conversion of the ImageNet data to TFRecords alone takes 2-3 hours depending -# on the speed of your machine. Please be patient. -# -# **IMPORTANT** -# To download the raw images, the user must create an account with image-net.org -# and generate a username and access_key. The latter two are required for -# downloading the raw images. -# -# usage: -# ./download_and_preprocess_imagenet.sh [data-dir] -set -e - -if [ -z "$1" ]; then - echo "Usage: download_and_preprocess_imagenet.sh [data dir]" - exit -fi - -# Create the output and temporary directories. -DATA_DIR="${1%/}" -SCRATCH_DIR="${DATA_DIR}/raw-data/" -mkdir -p "${DATA_DIR}" -mkdir -p "${SCRATCH_DIR}" -WORK_DIR="$0.runfiles/inception/inception" - -# Download the ImageNet data. -LABELS_FILE="${WORK_DIR}/data/imagenet_lsvrc_2015_synsets.txt" -DOWNLOAD_SCRIPT="${WORK_DIR}/data/download_imagenet.sh" -"${DOWNLOAD_SCRIPT}" "${SCRATCH_DIR}" "${LABELS_FILE}" - -# Note the locations of the train and validation data. -TRAIN_DIRECTORY="${SCRATCH_DIR}train/" -VALIDATION_DIRECTORY="${SCRATCH_DIR}validation/" - -# Preprocess the validation data by moving the images into the appropriate -# sub-directory based on the label (synset) of the image. -echo "Organizing the validation data into sub-directories." -PREPROCESS_VAL_SCRIPT="${WORK_DIR}/data/preprocess_imagenet_validation_data.py" -VAL_LABELS_FILE="${WORK_DIR}/data/imagenet_2012_validation_synset_labels.txt" - -"${PREPROCESS_VAL_SCRIPT}" "${VALIDATION_DIRECTORY}" "${VAL_LABELS_FILE}" - -# Convert the XML files for bounding box annotations into a single CSV. -echo "Extracting bounding box information from XML." -BOUNDING_BOX_SCRIPT="${WORK_DIR}/data/process_bounding_boxes.py" -BOUNDING_BOX_FILE="${SCRATCH_DIR}/imagenet_2012_bounding_boxes.csv" -BOUNDING_BOX_DIR="${SCRATCH_DIR}bounding_boxes/" - -"${BOUNDING_BOX_SCRIPT}" "${BOUNDING_BOX_DIR}" "${LABELS_FILE}" \ - | sort > "${BOUNDING_BOX_FILE}" -echo "Finished downloading and preprocessing the ImageNet data." - -# Build the TFRecords version of the ImageNet data. -BUILD_SCRIPT="${WORK_DIR}/build_imagenet_data" -OUTPUT_DIRECTORY="${DATA_DIR}" -IMAGENET_METADATA_FILE="${WORK_DIR}/data/imagenet_metadata.txt" - -"${BUILD_SCRIPT}" \ - --train_directory="${TRAIN_DIRECTORY}" \ - --validation_directory="${VALIDATION_DIRECTORY}" \ - --output_directory="${OUTPUT_DIRECTORY}" \ - --imagenet_metadata_file="${IMAGENET_METADATA_FILE}" \ - --labels_file="${LABELS_FILE}" \ - --bounding_box_file="${BOUNDING_BOX_FILE}" diff --git a/research/inception/inception/data/download_imagenet.sh b/research/inception/inception/data/download_imagenet.sh deleted file mode 100755 index f6c77781c..000000000 --- a/research/inception/inception/data/download_imagenet.sh +++ /dev/null @@ -1,104 +0,0 @@ -#!/bin/bash -# Copyright 2016 Google Inc. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -# Script to download ImageNet Challenge 2012 training and validation data set. -# -# Downloads and decompresses raw images and bounding boxes. -# -# **IMPORTANT** -# To download the raw images, the user must create an account with image-net.org -# and generate a username and access_key. The latter two are required for -# downloading the raw images. -# -# usage: -# ./download_imagenet.sh [dir name] [synsets file] -set -e - -if [ "x$IMAGENET_ACCESS_KEY" == x -o "x$IMAGENET_USERNAME" == x ]; then - cat < ') - sys.exit(-1) - data_dir = sys.argv[1] - validation_labels_file = sys.argv[2] - - # Read in the 50000 synsets associated with the validation data set. - labels = [l.strip() for l in open(validation_labels_file).readlines()] - unique_labels = set(labels) - - # Make all sub-directories in the validation data dir. - for label in unique_labels: - labeled_data_dir = os.path.join(data_dir, label) - # Catch error if sub-directory exists - try: - os.makedirs(labeled_data_dir) - except OSError as e: - # Raise all errors but 'EEXIST' - if e.errno != errno.EEXIST: - raise - - # Move all of the image to the appropriate sub-directory. - for i in range(len(labels)): - basename = 'ILSVRC2012_val_000%.5d.JPEG' % (i + 1) - original_filename = os.path.join(data_dir, basename) - if not os.path.exists(original_filename): - print('Failed to find: %s' % original_filename) - sys.exit(-1) - new_filename = os.path.join(data_dir, labels[i], basename) - os.rename(original_filename, new_filename) diff --git a/research/inception/inception/data/process_bounding_boxes.py b/research/inception/inception/data/process_bounding_boxes.py deleted file mode 100755 index 5e9fd786e..000000000 --- a/research/inception/inception/data/process_bounding_boxes.py +++ /dev/null @@ -1,254 +0,0 @@ -#!/usr/bin/python -# Copyright 2016 Google Inc. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""Process the ImageNet Challenge bounding boxes for TensorFlow model training. - -This script is called as - -process_bounding_boxes.py

  2. zn)E3ut{2i>=C{o3?zqw41Mv86{Byo9c5F5?6S(-XxDBZ*egnQdJ5K1=sj z*p-t{BJm;;blbby+imLBU7p-DRRzOs1IjvBvH0;-6b6Km*Z%-U00U=G6~MQo9hBX9lbR2RCch@Y8*|b)5OI?D!2o(qF3DQX0}Tcv7ZU6 zM+_J+RSlf6D-*`@_zU%SFZg&6!xErb5-U{#yq1A9zO{IZYTC;rh?lRbw|5Py(8fms zGvGoCv(>i=%WF3CHP>C(?xJefRiV@sl0n3d;Gk}`+(|h0-EGmohra&+qQvNKALnEu z{icN>=`}Z9y?6WV`_UiH{!fE%!O`%1SgF`CT5UfCehpen6q1Q!i-1)bygD6;Z|eoE zgfQt$DQ4H4@W8KF4Vwafb1${H1HwQ=Rqrz#@_t4Fr~m;#0N4lGG{()CX=Pxjf~?1& z4=y9nLY8SFa_Hx4y8&%QcT>&7q!cQ<0Xr&wG5OeKX$k-WoxMkTnA7O=VUtZfPF_EW zK(*V^?BGMLP@0Ir;LHU|6VeZUChaHDz|$s~wW)PdZ9dgtIs>lqH>HTR-t>@=#zPGv zwammDg3SF9#J$DL`1`5wP-AEAbp6Vmcu&9lNA0N7^&tK;jkp%MB+`bIrFU}WKa+y7 zn~iO#9Tdx(0xgtb(bGz7+tuC0(a_gnJs|BJuPz16QP)xi05mlu^m#`{05mnB0)o9e z!kwCZEHublWwxzqE(5T;_cw3E^l@FA1y0Vr?$6@)y)1DU8UaDKq?&#7yDAuL(10_QBbdNgg7wE1vJhY^-&7@YM zynzGo--W4Uq?;{~m%^1}@cT-sW)Cxj+ogJy;nAp#;XHNSnWA;Qcb5AwI4OHLS2sy{ zE1Kz%^B`Fj{3U;!_!8B14H+!bn+OZM)Xj5W+Q`{^*j+yg+nDSph*%xAvm4sq)_!gk z9*C%Ke>4znlZKmEDfLL#cGVLBwGDBUQiYArQtHpakHk(ZtFqs1s)+Y`ybU~ciU`=G zg%lPILY@uQDfV-I{i@HG^pY%+u?Xx9o|iw z{hPb~t)Mbo;YCmjNDV=`*$Zm5+x#W9Eo<Z=1jAuLX!Z0m?QoR&A=aWlLYyJ?4G-_#c-%L2m2CA{aw`{+t&J4|AtK{NS+AfG z2-)i*rLAsMv9n=|0vTH6ZI`X5CGER(d2;5&I$dgUAveb4Ef@t6v<$1zye5rpSeL|+ zmQ}Jy-vd=+@yyEXjb6)cSv_R9q^i@*XbB;hDUnAK z=O*mK>n^q|+pTkA4M#7Txbvxtgh`U_5lHxmML2CEaK!H&w>J;8*yWSx-YE4dtThnY zvId2Ms=0eoOI}FiZw;3>iM<*&A=L923K&k)rZpCj$mfGG^^|YYSk2wVywRUR@-WO- z!8G-JSrjyh=iS))%(k?8QzB~~Snaj|THrzAKb2#;OI6vD{r>J7n9F66$RSlARYIx+ za>`i>JgX#W=w@f)U4azkZw*6RQ~*WGCHYkv{Hye_r_)dk z+qt)G#IZOn9E2FGpe2o3J6^tA*Y|wY#7;mGqlQ*fmZ)yM4qV=K-iqQ4byPt#1BYlK zKvr~hP}_1zer?>?m1Ap>p5l6HdTma-R=Bv+XcYNAJO!;hr>>ZCic36cD!KG77Vn($d;x`HX0 z@6VUAmb+%V0$p@tRW$&WnyDE8pQ{YbEBV@qWP7b;v6mba3#W__096Wtr_2;A+rOQ5 z2ARtNTabW!mh^cWcM@pchLf=t>>Cz1_I(ezzj|Y*$NZS+#WB;4oNf3k_8l0al0HL< zou|wDp3GPG3SvLmM9Ca%T6h0UuzSD`~LvMe%g$!IA#T1 zzFXhT@_F{-iCw#Po{U;|KolNIb&k86&cSiCH%;9i`g`7PHdcdlo|9iU$l0(}K^+u7 zDf_!PH`Ax(+tq{7uv2GEJnp|o`dIM@w3;uJZ2P(}Lfl-Pv;u;Ip?>em!pKOVr~;>< zkL6{>0qUiCf8E8k=v>qtL#xtn_OV01PKK=y=Vk8v{9Na4YM|7Lfta;6Zslg|uWJzd z*`bAHbeTr287)}^prx<9o=bO^wZfCuVRX7kWQKT1WvVLHrm|I|XS*5?c8oC{K8d82 zm<_UR@C@sAc7R-p?Rwet@Fle;5dxtdm_|!)GbDN4h_}&A&fW~NM4$ixAXETt5v)ko z^)<}TxVf&xY9KE;U?h%SfW^0(qkD&YHP+c}iDPBQ1gvH`8PZ0nT)Qqtp;z6JZnrk~ zh`qaHv~b#>sWvML+Zy`2v$8YGYj9JuYdw|H?x;D!?u@_XqZZ~xwHoiInoQD75|I$C zZeF&vd`DuHX(qSlT&v+>rz*ik=0)9W8j<;tV2aUwR_NZFM`GnSFrqm5baYZ141ZNn^-mRDdDk+RZRkZrjBE^Z&_qs*+# zts}^>n`uI|rQ^T5W^UH%*@2*mhXfBER;Z$>C5!njORmO~OYZ4bj%2-yEh@J%E#1{s zkHS^$wfoPFe>>zc2z8QamPgudNZfH8M4wjH1H0VHxfO99swo|*GYFHVc#JHQP*Zoo zjjf5h8_T(if~inT7La6O4#6t-k>9~q@jN@@%Y!H(<)ypU7@v7!}S7RQYNL9r_TcuWH0oj!@qk4+v?7q-T#!DJt@iDGjLWdu{_5xqfZ?m{X*UWmRW&Qq(15JT?p? zSM4>ui!-c^CF_NYO;B&R6uyml;>x@mF2BLd_whJN-MrW`#SlrA+2kd`d#cRK8rw!y z61RnQ?Z4t;c$taRY1~I5o|Qc;I7)6eKcty2UFE{*^sI!3XxT{whEYobDZZ3z@7=qH zeib8(D+<K9Pt(AcBmUMy%Xy(8|<$laDqN67h83G}kuW-%^#8TNg4YdBWs zOLJ$F>hTvOZc{F=mqN-?8DnH*6s0uN?jj!KyhQXdZnu={wC7Z%Hfc!_8f<)xCCu4l zz9o4)-p&Drtg#}*fQkyJHOhBZ=XbZYCVXtGGi)C(lNRb(qabtuYeUg>;C@aF9KKV= zRAz1{0YF}i$+TS7qUGMf(d(mqKA;Nl-Ab_^d}j2QE+ReXhwxQ!I(B{R65U@FHhiNE zIGbs-swvb=wfT4nZP~cDHe3P3 zpilzzuoT6_z|?3YMQ!@f&1DQm%)mchUL1#=(w4OtHB~si~@k>GZd){pnA?{GVY#ik^&Z>HAW@ zU|om`yGcC&d}jcMl3Tw zqN!p@EX>EH-Op?6Gky=XHk@qRigbJx#ANhl{FC<7NJ@gy3Iaf_43(vOxJH<(IXzXO z>gcO}b>40Sk6VV_hoZaH9e$7WaLP7;P)Xac)A_p?FChU*==;_Ct;RuLAm{}2t$t6V z*}#%RN^PO(ss3Kh3NZu?w5H9TUB8gMtk@vzJwF|3f_nfGHlMq%dlgNj^wjLA1OEU< zY%xZt3U;59$>jX6M-)IVr2+CCpMn1X7wKY*R2tBbNy@io7WcAmcD&p(YKIR=6i=ws zwFx39l(d!Wc^lQQb}h_m^cuk^k5dK7d|#4VYc?6IY`%bPke97af#Y+#_egJ&8z z(v^}!Yx$eJC2T{iZX|HWLabb>lS!Y>*UAm{XK~lP+1i+DjJ}c zQ@cp=wR1Oz;oTk*O-iX}6>5!#Y?aFY096@mm-dqX0IPg)X05x$6jYK2s?2iKSsL8T zxqCQ>>{u=>AT>`7vPSLZ$0Slh(XZ)l&1MoU9a?FTR1Tp+NqJar-E-#3yVqs65;rPi zBQ(M-M6}*C;$*MYaV%TDvo1MbIOgPy29r+GO10Z1CYhqs)AhF|Zspk4My>&w_-ue| zDy??T*LEfD#=dqm;=vp`QNUgqrP4DQrImYj+{+*5;KcMVEzD}9gp6(BXY4t78@2Uc zq`MNNLAb|xSX45bVx(*jih+1sM1L2HWVcNwG>Z2g1d(cqAhSx#TOF=m61)1;l2pHQ z*0fwKLkRIS_R_n8O0~RL8LshojV?^tTZ1)`?&)>&ylKm{T?~HEgWyYj$uTfnpG=09It$=k;t>%}pc!0NL53 zlO_6mxpa|UkEztnBvP?p8Z%tHYC09-UvjkBu4#P8F!~9Cz=T|mcA#rYvZ-vAMPl7| ztcluJvxk{EWO1mFo-UvPw&rlLC}n93oCaw4cIHd`J9A=PCAj6HA&(;&tP=3 zj~mRY)@AV3J2=!XhUIeL1fXNJ!Wit5tKr|4@2!$P&G^(TjEY3`sb+T(aV3#ryRoQ_>wIT>A3T@X*!GK{(m@Qc30SS_ z9#GOt3#4rj<1c%ws zw6VCdjhnd{EpGlNFryc7TdKKPws6#Ke-hi}eAdV4E*0c0YeXY3N14l>)4KY&*R=V( z?>~&F@=$;Zk*X>>_pWGs+1Ggu?%_E+-Z59l3VmVBC;iR5Z)p^ zJKKL39Ktg*BI=nM0<3EpQNEr}63yH+db4(~PiS*W7E&@f^Sj@;RQ;7c&c9_=*^QW? z_$as%GZR6vM@Qcn*l{1u{FC<7N~=-A6=FaNE`N!Q_giZIHYpmaCfur^Z9NT2Z2YWH z5X?vhlqA=v{n_voYyzJZ2YY7gVvKg0{q3jH&4Q|=Pzzfr>FI5Rk%1I#We8OjuSjA) zfIn;JV&vM(*Yek^yNY&dE)=iueIKKM>`rww1E*(cZK%8b?$#?>T28_8epVS5X#j)e z9q*57`M7CWNw;YV8I4bNG|$@Xk3I6@O+KRlb9Iv+LVY@ z(RC#_O)Ym~SZ{Mm2sVk4(ZsC0Qd6Q?iUxr1)KwRl4~*c^3@v4j7_{>L?7AbVPXf znl`mrvfU=*chy#PZAp^DN~-N+WRqIC@?6r~jo|vbo7Q2Yk}3`%c4|?bi}5$CT&lTJ z?CGA(?D8zE0j&>DO-id0KwGVMRs9-wTQ(Im>MUCcCYeQYhK>|2%Vl-%3u-&Ttq*5lf=%f;1e65cQb+NWfTaSKL#v7&!ly_Xtabz=(phIL&+ zm8fYP_^Pe{0NSyy3%kbR@8!m`L9762V?twLvVlmAnR_V6&EX%Wm}%hwl4-1jfxgVv zuFlRqh7b?4DUP%tF|^4gs&65D^~+bOLN4&ayH3~^1#0Pm5$w@kLD_;2-pV}2y(8I*mDlXe&pph7z$ya-c znK-JFB0gtEejWJsjUjQSWd>?mV^9|>1eL8-s;*18@y#<#v9|splU|2V7&!z6xt(8w zO^w2rQG9+Ss?w}-E&1)TC?tfcLj^_%mtIBuzH5 zIMjnet1J!bwK1M8uc*?#cjD*2e}%-tyacQ>$-}llx0XZ;YTrbf(9rKlm#grRZif-h@*xm&$Ab} z83+`NM$?5X>EQU6I!7l8OB(C=c+}}7j6)2Xgm&5)L0a%+zBXwkk~mj{`LaoxPg#d3 zoIO1lM63W{tCbAO0nXOlCU04PXC5h_r$C^t2@6EqfCo!u1FW8bPVw_s=HPQLd!w^W zPYXs=)i-iPQcuZzBjx<$7s06AaEsNP#-r{2D%Ce$` zB}po)TdsYVZ{^;=k7=EmUf1ae4(j)9Q+Exn@a*%O5$GeexKccs#m>d|b~am74-vRg z_PsnqFhLwK$wMS=Na9l>$cnzI-%$20Uk=p5YSe}>$nnSuupp2wMP1$}ABV*5Zb@vp z6*Zb$Oqyv{N&(z9+I*_Xb?e^mE*wa$uRzT3yRQm~ON0_6LBrbntsRfpX5Et-&TGy~ zaG-^zl&t|(QQ<)y_t-lLziqf_(Goo=ph(u@Q~)=)mq{ycaUJdSZOelvFPcasU_dHJ z={t4}nByH>uPGZzHv_i2Rqk9aiD80)>;E3yHr$x9DXgQWh-@fn|cH1!s`=7@b> zR!%1s+4AtyHQ9HuJaV2C`n^Q5k!h$!BTS0iU8&(inQdOiCDOqe7mf;$LMT$Dc5SV; z<-i16Yg2Pqbf>hAfhy`9_>(YB(r+qY$l*}Lvs7du!Dn|z%r z2i@Prp|;$19i)@lTfXk*?d&*P02QepAFDx`Zd5CUL){h$==$2{gud|I%GsVfQ-Bgt++-O=$D<>C3rw|4O!r^HY*uC6%VT&KE+LH&ak8~a zhCK_FB4xQ@v2(ld*Eb6<6B9=j7`p`EfM2eI8`$>!O93ri&;iLx`c$1J`Y zGaanan`~9S+_vsq%;^lm7}09QEH>`k%`+<@kxujf0J6uKb16g`R*48y^n64DIT0G zaWEBUlA}A^dZQ9{ckt$CB*SrX4dq$3TUzzpm&U7S%(DzNAge|QAc0-GO0w50ru1#* zSl)DJRIF9lkYl210wwwqL`tP9u<1tcE**1fb39a!AP0=8d~H z*}EcO$E^jPpcx6V&}PiWTbV;7mwt{lW_~1gtKrzNRk34^;4QERkHt30#2MthABn~A zlZ%rQ=(Lh7a>o-oR8brj@gbHx&lSq=zR$gKC+NwXnBYjt#OmZ$Ha5w^%Oh}FOxYq< zTcovYNw}QN=C8?mvA*E;R`Vt0+<>0hy(`NQq~4 zS7yx|lUv1W?N@6+N=X8tpmr^8!%G`1knd(#rbV~2fdOL+v9t%Pj__U}ThVIGzLMr< z&5a9blIe91B&MV^Y!WuStDA+E$C5|$FuJWWBSiD5cG_0URTfV(gwZ|Mv}-K0ag~uQ zM}i|Gv?zdFC|w>AB_ocxQd?!=X7IQEt)CL8j7Fx*mPTfFLL-*Yim9fCMU8z_b~f^6 z+{Y3D;jYe_(`9Wo>=>}ET#lb7Pw-&UrCVsFNUx-N#s@DT+sq?qg8(YBrDuVIHN}3q zHn;Sht8#QU!g_YpfLnC-uKTRS+Nc)&Q&D18IciQ_=1XjB+)m1~KW5w+AuJ>#XkCPb zIlN14Wi64rFWJP?YQmhr#LaF}RAp;d)-Rpr_Hib&Lo|_#N>Vdx3W0WNUE(s`z2<#h zHIq%Lk@XOpQaL5#0a9+Z;hs#k+?kiN-o~#~#58awngu|rh=++Jjb{uhwriWw^zUsn z(l`;$tjLrk)ZFZzUh4VxVRiSDtB*TGM$M~pgm2aEv$No6d2+~_iJ3j7tVm+F6fyak zt+l)Db}cjx%TZIZzb|BWutb8!Q9?R4kIKaZm7wY-n4mYaS1H@{TNg{Q*KJ19C_eVX zfQ1OVXeXeL`a7+QP@9iX=)sCe%+YFG%OC(N2Bo!%r?{(fHb?I(Vm(HRK`0`vLqZPq zBv#suUiaf*@n}h@r^)2|mj(nhk4K_w&~#lqi)UYQH*6OGs?=7r=&4reBJ4Lq=TBFNeox9Au7#Vo)CelQWWR1h^D+aSBY+nft6>_lJDP5a>!lxU4)9A+9VztJ>_DT2u z0Eqpy7$Ts;y?ZyL{%kF*h**FwT9ZoEQ!+JAd3S$Dn-z@okOg|_m3HN?XU@a8a9N1~ zUX-aFY>KUycYgbQ911W%+eswceA`!Lht2KxZo)doRclPf(4$}otJ-GSm%HZOi=zda zn+*@)?(gQ!^RZDt7P~+@dVVjZb-z0dS(Q`^t6h{G6=tXVKb68tkN|tDv&v{Lt$uC^ zxTz+iZo~Kb{VW$3?JC6W8%W7**L2&TwS>@U_G)JY3Rfb1W1l5XMDKjuY9rC9Aw)1~ z_+xE~O!LZZ+~UY1U&Y)@u`6?j8jVK{j2d?!?wq%=yTo?2SAAnqtBGzj+E~CNNg6^MsEbx6sqFC=HqO&}SX~Dyj!89ZU}+=)7QwxIw-w5{ zf2D|Y+|ngF%`|2mN+VQD5ft{ABnQ?a@c8QA*IRBRlanNaO-E=Z7gfrODcorxZ0;iGW!Kdjus z$5&#@(i*H-n_q0Td$(zRMc_4_9K1E-%{B6x8r;2f8(Kq8Ilgu4ZtOg>P8tYx6aqnV zQZU?SXxXiikr_NmRlm(G`K`%_buhG|c%*$iPT>M0y0b?miR$BxS1q}(d*oJa-A^Lb zA%-1rF-tn_F*qVbmQK`>@hz51v&jQL4>D%u{G(RYn?(otfQG&#?%InkT(Vl68(X(^ z*jVF?g%hI_(gKuq`Y7@$eb-`58q_angR|tP=KfAJgBs&g&YFyHL% zuWJnYR}Tp*4xB2RorzUUy$f?StDAoj>C&wnNTY_$MRroH#0-p*aGljw-RHN%xo&)3 zjTtvPEyP+_w{$J-o=dLXy)2H_!~it`*veEEX`5;&OR9mp+5Z6IE*4H1TTX#jlGq$x`thh)(GsiJ4~LI z8dnzSX1E1q>X0JJ&Wr;`W!YnAf2NmXfvIvrN-a{(O0yA~(~+Z9PZB)&Y(=w&gFI|W zwKl}00Y)>RWf_xgw((iFrbT&I!o$xbDqOTt6iuPos+><}p!RFoRit+Cy`tFVj>l)i zg#t*FEbhC->03;1MZbx9jz9jd!jV<3YjLR4Lg*3n8msuK%w91v$zruqTcDCDEuLN{ zf~j;beajZFRza!6FS|;)hY;Gq8f0HDgiQa?2Khx%F(*+@iH~tiD8CBlf;xcN{+W+?-uV5 zhkcwr-Rau!r({AB;evSanLh%6?7bj}Mmi}5@n{z7WUSkB&Wh{7h-AR$GmNP2lxma-C zWM7$xpwyVot5C*S-2ec+h9rgr9hef;<2LsXy(R{(Cl!TO8$y6osjITHew(zay^L^L zWoX%iYNT9t^fL-mqU_ta)M@natT885=#U<+>;C|?ga8r%p*7i0 zmkX@Y3NrMycDIVv-fbkMbrroBQRDAx;*p5OBS2BVe3x!W> zyLbEeV;}+VHvvtCi{$<3Prv+_>HXVgHva7X?{dPcH!zL3vM{Jffj{0dGWob1ubEKB zhr@bS+WB!vf`G=iF)dpDjRw!hz!5nf$f%&H=#sZKZu0&v7764?^=b;Kdhq4hKqWwF zf1Q^e$eD@ktraALRMp1%A@o@a@LVjBWcG zVLtx=@gKILBz;Pay#(C*50|x$Ahc=%iZKANUdbJ+X_n>maM8zFHo_~kcE5uPO&N)$ zqpP)^-THQ|{+0n?PntIEId0lB+P#mf^LrPNWTOj0DK)9GnQPj*6b+GZ1T0xgn_Qx)dV#7nYvH~z00S#*w8 zeMGXJn$R zWY55|T$_)LXDHKYwEA|_%OYr_T8GpuA(luBNqmdq*>us{#PwUtl2_((=o3;T;`0o{ ziv$U7Sk^R>*^Xs(N66eL^GMH++B~;bk?7{tN$rzK8pKga<3Xp>!B&11jp4%=i9G%; zp_{R&)pF=WFQr)Y;znzvXq=d~ntIHiHutw0C9S4eCDfBn&3JL2og58~4>IPONVS~G zWs#w2)Y3R>bS*OdYh9KO>}|i_i8;h(UT+dKk+gXX$}1p=nmnr1R9jll)jH?H!KqT* zw4l?;3Ib3^L`fDX$LQUf-rdXEYq37FUbkr=4>5X!#Itd&^4hnuw`&@d0IZV}6-7lC zxV~+fe;>7n=Q^2qaUn6pDoHGSIb}pvR^lfYg{N$)^E*b3#KUkDDb$Bj)h@)beAe#q zQBQ`sk{#dB-ojJ>qlks7Di)gsO52%jvwOsE65RGYT5F4_y`x|=ov*)hXUS?`bZ+^5 z2Z`h?@)fFuBq+2;ZAL`q|y3E~-5~GJ93L`bVF;*0c?cLw3U+mJ%Hkwkc6~%yMp^n-& z3uyf!vTpBiY?CwNg9TQNLQMgq0`~=u^~)n}XIHu6#K5tMo+F;(Bd9l&bl+YpU6aI# zJDDF9G6GDOk+5bda>j}3qq&Tpt0kOV%JNx=S3wao>jS|erLnrKkt~YBSy-uFBzETD zODBe|k~D3PXBR^7OIHVBRSQhJ?vt&P>4OqO_U?Fu+@Z+gpOzdJ^mmjStnWfomRNG@77 zFIL{(D3i2%s_kX+HXBbpvrez6nB275h8<97e%q_Gn}YZBC$re-%5tcplG zb0>_syG<0>8kH5(xS(_))Dga>RH(|?yh|H~%a<2{KxxtZo?p$!DQOr+p4N@D^z3Sq z-+%60JL&@zW+u|K+A?nRuSXK%yScH$<#&QQSe=CYi`D7o!Pe@?RaGx(G^ib_W+8vv z?P3_0)tM7`07|L5GW04ciIBS@$%JH?iq8|wzuNq4qEb;mIW`V zRIQ0KpK5!Zx1O4avi4PfSlBdIk>VFBDCyZkF6z3uK5}AxO0ZbvXCx__d!_E@e;^Dk?6KIV^ilY5ePxkw2J-vdY29Wr5x8~J1hIrpMUv2-h)K) z1Fn55o)tET)!DCO)prwjdAG5s)5l6jBdGY+w=d%0Lk6Nz9Vu-F&omrI2Avt$62(+p zO*1>?-t)0mG^a<)h?7{sgeVbBpfv|a5}VyP_Rd=@kj)sVWT+s26EAw~CYMu4d9tNm zJ(kH$`5^qJi+Oz?wQ6y*A0OFm2XDoMC*%Gj_S7)VENd}^NJelNi=Qwbc;f9ksW)t> zB}k|*mNRwvTHmLe6$7Pqn(VjY^A7fFZ(hy|6Gaqk#FI|Cpv`x7&rcRy*AjLyCoe$m z<*8llIz>jAUD=(`sJ(@Ds@0b4w>{U|zOTAWlos5!pdgBX!ML?n?Jr~fpz178#(*E1 z*_SOVn(g1!;29VrHp`feq?@cC8BWgkZ?ldoI>rXf#84i!U9ER^>#e%%E{jhovDWOS zmo;+dT-}>aHJXG9HJW&f3wo6L$NtT8?=Q}`610t7AXYQ8?FCz9hixuy&zpO`Tdvge z8MNu_x+ef2AcCW{nQcz}%C_6*VN6~bfeI364r6d1tYx!*yMnRlrk_!&ZZj|x0V!+% zY;A!HNnh6E!_l}Spfz?==I!)X>0+s(wKfdZ9~j)lk%b-jkmiIbPW%5-QoR?f-yX7MGR zeJ1yrwNi6^UbxGrj#R!r69rMLn)!zjwHN9?%j89}N0SGZ^4MARJl*j@f&DVXU-m-sT;nZ zPYj&F#JXb{Ru-*Cb*<|inK!OqM%+BQXW>D0NQ{Drq}8lm&d*mJ+xb;{siV^xBubVl zfKft|XGwl5qP5P}@5z5>5yg7TRw0YC#tHpofq0CH+uS_Q>lyIUURW$gWMbNpl2hj^ z<@&t7vp*Hz1)wS%+~(0}>17IBjH_;BWx4oWo$M<|l3T-8qR~resMHC=Z_)b3&%N5< z5s_B0leU0g!)Lp3zg6oN_4JOCtP6pbRv&FCv+OcxD)Z{qg-X8vni5yFtHjKQRiCc7(&hTw3TIMz*88bua<9(~>vSTJf?S5hS6 zt3*;aD;mpnkzC5}!YJ0K3yFP^vpYtw97!|lII*I-yo^|JD$dqcO|i4_BavDE09)X# zxNQc<(`q4FMDDE_Alt)^^>1X8Q7UlJcNY0(c0D%A83!c#&d+>Kp`BX!B+=C}n!vBxZ{(+(b)lXWY$&l*Cby} zNLiXIk*@K)H%l$Uabpt!QF>7x;pAkx8JD!K&|KZCairJi7d&!BIJ&DvM>V;Y_@8mN zWJy0)hDd8`5CA7=18^SotCx2k)64*UQCh4guDga$GtJ&?YXTl7x>FhI1gJZ!)T$3> z{hU>tYI%mBs+Aj&t!i^`UhC+8OGl`RHM>_PosgQ*i%5$lf7!;PPpd3r*0iyk4J~E6 zjkR{iE$-{)@KdvH9$iYFXUokLNe~MxYC7eCM~UC!!P>jQOkF4eOiJJ|D_R1qt#^pa zVky2X?(Pw3$A#Tw!mtqNDW zZ^hkR-qS8Ujlg#TwWX_1!};EhH5|fINj393f+z*3S~O*+mH3VGyI5D0mUR}ZA1Uc? zCEkt#q5;-wE3&k$qo;3s4{@y|BDJql{nqw(e|l5z{{Sc3hF)c;i%g28+lb*>!MwpY zju|;R#In0RH3} zeC(Lh==qhf{d4B4Vz=2Vz;gjFy^efjS+@sidgns>NY^ieN?a6#Jc`q0G3B2 zg%nj&vnu%%eC%-@RFwn<17_2){*DW?1yY8;B{2@WmdV4^MB>%}6GrU6c_dx*?%?XQ zQF~qj%T61fN#lGwm(E25eUwakOKgW6APkqQ5Q!uq?x%5=i(H@BaW1`)f%y zrHob5D&ImyIFg;-x_P(=oW;Yj0+GWakDOR9N27w;ZeueF5I`W4w$qWfN2Q9*t&tX% zfW&n4<)dzn)Az7K>m*QskgJlGfo}00UhePCz$R(cMK(x68k+7Y^nW7_9J0J^N`b~g zIyzdNU7NdIo!kM$EKg~s1MlUo_6zWRqFHFMg^`%C8^9Z_sm+^nu(t2xOu7vS(CFnN ztuYK*?7NGxQCROgUe1YXV^OGr9wxVdVunLn1~M9ku2pxpHJ8|)vk>yWPj;#(%W+tP zS>%1}w5W|YG|Tgw3(NUh7-H2Io$0(w9kDNx%G%y$M743uIVP--+K7yZy&Jh-Z^s+u z^f4bPIZ>I6hq4g?X$yI_t+)#u@S{?IEN5UDKufzR__=V+sMeKQ5kLxqx31e!f2E4c zsd+6Ma(rDGi~QVL(lKTxwYRG=+5XdEb-EPXw8n%~)q&IS)w^6*p7}cnsMzLhK@CK5%{($g z3WQ9-UrQeid%Lh*cYW?`4|eqML#okfLF2s1aFTu5Su41SZw4&6mN$009BxIYk4+Lx zUr`#euId1jGdWC)pD*^Emd2799`4EWelxscQo_Sk`CT+5hR%MDihsL^D2?yjkoMZuQcv4XxZTS8i<=Fa`D%vYu%VKusReX^7p8q2Ml zeddb!xUU_#k(F#oouyE-2_}JwSka!Xvvxi|h?3b7aG^_8=g%GYwj&U#@j+m20HUhD z7FJ0pt*1tC`DD69;#lpT=4~_T5n-9c+*qoQ9rvA0%)DD#O+NBMz3E|!>-d^r!8IC} ziQQ<-q=t)uFA`L566m0bb7g<>+x?`|=p~L+uq=X^%M)>Fu8uDkvbJ7{m2Pd-vr4=e zkR5}d3U->Gn~8OiL&R7Xw=mf5Is#L$oAPG4vaUU|Uc@}ak#}F6@h_jay__@(6(Lk& zKpRU`3%ksJ7ZU1ZOV&1LPbR~Zp7VYxs^dvEol*5l<|15H%SLwZRno+^t9Q-8la|0n zo@7VX6(LkTZIx8M^tNY;Nc=^%J}wuHBAFBga=iuVTbI(_!>ypq1=Zu-n1JrRatdm`qw zGmN>cxo*MLPzb`3Ni=Iz8=lkmesogJR5O|-z=3fWrR>>O^xd}WcVt-cs^(ts!ei4z zr4k||hLTBSegrV=^-k7JzjqCMMZlMoq-RbMcxc5&YoxvkRb}GZykzq?+jkIio@(4E z1I7v(+yJamx-`9oL(^~Az70s1lFCRak*)y(BFzx#Zt2`88I4GHjP7m*GGf3OC@C$? zNJ(j=1(ok_&-*;@|FC=aeSNO;JdR(ohRN5zFvn=S(WR6umuogxd!6S!FWD2@Bse{I z%l?kRevW3_88@YUo9puY{@0!bpyr_#1PGD-59ja~_WL~2cJFU6Z+HnITwq*NPXDU+ z1(R@P@@Rt+WIdU~5;`S$?m3=$9r?sfN1jpQ;i`J$LA(80c6xM;+V+| zpU4ZrHkkR{JEUFMU7==m#SXs5oJxk`f4bzRJWgf26p~PK!{0<&7(Wv7E&h73ft^biTe2S|(Bc~kHkSnyT8ar` z=|AQ9*FhcVHaZx3q80DwiJpuf_};`hbI0Tpo5*{TmF_`fHVo#kyLiIDCpt1~fq~ab z-}%f|;Eby$sLPq_cps5e54;~{;9B!|C}cGM=GHS&i5(SI(&4BB=3U014->9Vwei?NuzLFJ+DUrO($SX&lf60d-4=5Y9R8 z6Y)?CZ=C-zgd-mL8x$=5_w@9!@K4?Il9~1yc@(hT`*e><>(&qTiIsOIe-N)I9nn)L zC#yZ-V9w(8HI^lhd^Wf(P4^8V*o#6Dn@dlu}7Be|IeC`p%AgLc31VlW>6@-Ln7d{#$ahnC8aDsyI{ z_^^nm>txltN1ay15ve!BMih7z2RYN#8$-3uExdky zK~kX}qr`LG2Lwl43J#DtABx>fvEYruPmR;cwlMWmKhnq4jI!Km49xx%g4*R_zmFuE2dhEEXg)J~S-wM;{*fFqDPX!z|iwJT66ErgljJ zySVT_@ak>_Lg{~I@lA!uNEyfb0AAI10Kb)d!%80iJY`2?r4X*-$_ERta0y!j1z4WC z5bf`r^RMSU&Zf z@5oxMQBD&7!Oq(=R!0KNoqew6csh8AtV}g5Qdc7470{6%VN!s}M5ZjSg8wwLkK*?H zp$#+oe&yR?mVD;t5qF4`6o&OB@$*zttuBNog{(B3xpH5Abm<*Pv-b;mYhjtH8-;!0 zE&RG{eO<%|x%mvQX@IopFoF|Bold&yMOVjA$&7Pav8UtzxPSh*6YMd^bWp5xJuGkw z*&_fZ6=5Z~`CyvwD1lXa*lGtLx1bC3h5s$VSEBvNqgw`6ZhJp#)0i3g0x1EYn~D#q znHGf6eKll?Y{B!ikG5T1cY_1ZY}dS{G59afAN9lS(Um3HFPy<5wib=bQBeK9SM2Zd z&)aYO;>=m5s;p!LIt?@QroVSvV8uOrw*zId?)?(VO6`bpZUD3=8d@zFNsm#ssdOdi=I9M zh&MFiZFcrgPd3MS#!wxv1`>mhBY>5=x10&ZblrHMY_!TMup`M=_-2VAAVv4V^Zr6YM1QRMaIJCImON00@Ul*K< zZagXYb}*X-%vx>XC{iZt9m?97I^`o{A#BUvHFJm-NvbfX$!rk z%Wi^W*U3w>2d`X>cxWpVn&ZT`M!y~mrrmCwOA6b+cl8yhN-c_lQGMwoK2V3FUvpbX zlm&O=zu#&jtbR^3`6bRW*5%z7id^*$@bQD|ma&c3?D&NVpK&F#iJ7sjU(x>GvCqr8 zTp8~3rEetG65H0k*lTg?jC}^8WH?IU_fkx&GOY%t@n-d4sz&M+m_)L=L(<>fyCtgq*R@fKB=oAvX!)ct13p@%=zk+p@rd(63}mS6 zDbs5v*bx4wF!pEsrTG&sj0xf&+KxvY-Cd@?!i=QD5?fox+`O0@%6YOzqy$fTvTZnX za772IJjcLB)SRfmtF6wd?z@xwVHL{c#z-vC9S2F+rDEb1Tk)lNh1wdZqH6Y>jdo+%EpqyS_6fc3UbGr)A?`S!Q^?C-jLp^e{a%2mm&DVHAFP>w;^&Y0w zXT88XIn!vgI+0DmNhd&Uajg1v_r;9kFtvV2c z4Z0QK^cDF76OY~aLPNqsu$hE6VC53a7X+lW`<&fj;KPTMiT+r+Nb_-Lb+#+R?3us+ zR>kjY^Fim~=b|vvoafHwF*)L(g>Q=v>AXKKRWm`9;pZHbvKi-DJZpODZoP*X(Z7F% zr1mNrw8GW#4FpS<=hoNK{!F=x+!U#f#6||1`ihQ_2Fc}oJK<0KYMJ9Hzj?6PD_2rJ zCc8LHkT$e5)4gGa>jiY97EdqEjfVRhyEfm$|Be7Ta#LrZyc?AakCoG&V2AmB33rwvcEEEKlu8Y%~Yt8(~ z8MS~qq>M^J*CK)|rQ1Ji@82yMWjXhySvE|ZU*SUv+&FeGtl=@AYE^ht-ci#_oS+fk zR%HLfkzpFkP#t0x_fotMU^fz+Ml*jn=9pstRk3N-@qMHsL!v%gqFkX?Fw@r7<6_Z^ zU@SDVJhXqK*Uw(-l%{z9aHa+`6lO6{cWfnB7{swK=;uMC*%-CEi>T~^?fZkD^en4H4JHH_`O zKXece=WF8hePT5I6YW(g4X7YZY~=t?XC^fvk>I4uUz{*iFkig|twA*DDhYmwB;mKl z9(FI`aZ3eWI#(Wv6dI2Pi8FK8R;zksL9S-Ec$?0A9GROFQRrN|hM(<*#!0_2v5HAw z-72vGoxdGw?*^qA(mn#=iuTk2#cS?F>vBUGyEP2ote}POJNo&XBXxE@JK{Qv8eTq= z)ejup>ZR(IF{T>f7a|vgZyHNm8<4hAm|=x$0fJ*$#|#b-(Of=ePJY{1R_Gsw@;u4Gs7o%!2=zZOe11vLH zi}}O>*@c{Jze!3ow8wSJ)j1v?>lVG??~M!H+Onzie!;6l;KtkkH5mh#?;1v9;Hs0I zj4}+nid~t{Nom~us627`2B?Gx4Sf1)OY;x8!+igZsye(+KysN2QHisYg zxT{e4IQ!v0Eg>aSEq%LoL{(vs5VM;8dTG0Y_lwsfj&B0EagEy| zW@%2bNlF_IoPGxnH~%ciE<~uXGe(KM>oXnhICkGMz>EA4nyg{|hUznWl7NC9Xo<)a z>aH#u5r^;FWwcuJ50F+|-6T{Qm4hTx12jiVpS;emyhNGy*ls!LdcT4XZ%@Q-#dTC_ zeV4{zo#}T2(y5z+BMopg>&AYX$E3Ex0OBuMvs8J%Rm!XsB{OiQaqdaOO!qN1L~bu8 zPW*o9;T_kic2iVN{$YKC4Gg1_L?y~<0g-Vuc9bReanP1DRCa&zP4Mq{lL(EmH7bW$ zDs7UKIwOz9nUxQsmQLj#vc@LvK6Ha+N1&5=T;BS%O`2=F?ygqKyf$jd`OvUj=--g0 z!kAAM2RrtBOelH$A_2iQydBjkS73pJn>k*Hx2aOQ)C5US)s>s+wqd)5H{w^OxldFO zv0WYAPBrar7~8hGh7+^ND{;sWF8$Y zvL(MNPNL5hQT3fp?dgPL&(zFXOfomWeJUTJ*0T+YUV`cnK-r3zOlZtk9ji<`NRKeW-n+|Scm^94C#5o<$rWei5DKAe?D z*9#2j%@bCzWWIjAx+w89XM)Cy)wQ*?&G<8>rn5c-13w-*OK~c4MFDwnaPFjB1@x^( z{#qE?sk)LyMsJ1i)|E9?^okK`3IjAjaB~eqIu1BrfBICjO@Fsph#cC)YS2WDgR&^5 z<$-P@lrY7(`UV^KjY3=I)>Q2#Z5!Adwy(1m_Hgvsv_`KOo%nf5G{{PWk_7z1Xk`(mY zl34T>QdF@N>FW5jFY2T`FZG-KAP<=%dv=NU(@%UUo4Fp%XYJ}Lk8sALkfAMt_+29% z-6KoN$186)RQ<31HBkV>vo!Z6@}ZLsHMZq3G&!5RjB!l5E7Tw~*&|K28~e=c=pI%Y z0K~k00aSTSU8tO``>&8A-l^7N_4OY^?|)V&wUPOt8Kp6HUkE^igh?ZdM{!Q@d%Ity zAI#`v>%nu^c3$h8U!S6}y%RM3#H`Lr}|H8WsQ`bq>OUc@r<364sL zEa8tQBf4EBk3o+|2f33gKI=S+&Hg#oM9!kSO-&{9vk8yhHxoI_=rU0{{tcHkA+U=sklvLf+o$Oy}#Z%r%RrFE)55$(^~sA zQnlN_txkEuZV(2kdDR^+Kkscni*q=!Ja)HT)WDa;97K3M0v?^<0~@*((i@yb&Q zJE+DZ*sh`-G0TmNAc=PMu%lXTqDq;gzWZnCmJKTPEC4>E3hIV&35TY7BM*)FA2~Ri zN|}hHy)F+DnX8lfwhL3%;+R?QKjf*YocN6n21MeK%bgWSq1cDIMOE0homBPbDIcEW z)Q)*yrBHbb`Xa>JP)T9aKvKFy-1eCraDY|Zp+Br`Kf$RIia$*#t4NEA!modWF9Pac zns`cKwmMN9hYqEy046tZAi_Fi0Q26r^5wkkw|1e&9I4H31xn$M9;Fe?>(wBfOiEUr zHrISF-jMd`H$|$v{mj^xFxX|@xMqm`ar5&SNC;uuUu|E(CMJ>1Nef?>lqcIA20R^p z(YVpliOg>#q{mM4dktg$nOR3Bvd5WHSxHAi6=!57+7T?z&Gm(1^5#Z1TMo6UR-;8F z`gp*#RqwtbILdaREmQ5!K5RyG*Ks<2n=m&z`Xkb+ksb4q>_vXim($^Bw^<`jR&zcJ zOs+SS4&Kae_%+^zA>+7~D?E>kqT1Kay|D*MRcFT&N&N@c+xrCA89cBWzk0hJThBqJ z`Rt`Q`GNzu0($J$qya&ZJW}mj&eDz5Ic|nRC0hpphCT?q_FJ`qA1JuaOauKGrF~t_vSvJ#s&=$3a$Bna8%>y zdUUX5rGt(sqC}j5c-QW@rCuCG*V!%0Q_DllLqIcPwvx}g9SgwP)sCHfo*R57dmsPD zs=ejhT8K=DIKjt`&&`x-CVH@GMeAxvtgx7vbPFRg?Lw@Z4@AlFzK)3DPiBjHpEos9PI zAK3rSwQ6L1vhXTJS{*#09LB=NzXe{;qaA_xq((7*&yn?el9PP{`@aw|ZX3RqDmQf1 zKSO~_V9qq*bTt`qHZZ09mVf3M205BQ4R)*v4Xj zOM&`0@QL~(Q9uRchrJ);P$3HXm~Hp?yIQppe~hz>gRbv!Z@GN^m%FB<$FHaH zW|%GPbcpoD!BxFNw$PTC`|*zAOm!hL>srw7(@68cgp-wJx4N~58;&_w*NAtVjvODB zDS8E==Alp^phk@<&rzIFNo0KFpaA2U7wYr@QQ?X|(RT2zmV>LZBPbtT;Ig|V95qpI zv%*0m4=A@=6x!PobQ+187P@m`>sUS?8ZbNg890z1__AdaC~R0>Dh_8b2^>gGtXn~v zqy4D-e$hCN9`7>}?fqL>atY5RO$aNir!FsRFm$_wBdM^PUv4E0dxu&!>*h3!V5^u% zsDuSI6fg^!=&_bQ9pXcvh&jO}f9|yXX3xx1Y_lA<)ZPG|x2Y>lZ?Uy^+P7~Mb%^X+ z;`4l|QQPc_-+N4U21WBLjdRdz7fXMiv%)Z&^O=b@9@zxMcYxaP>Yve0qK+739+&oF z`dseJpH)Pr_7y&_0o>mNFI+VX_INQ-E(frOC=R#L4Z=ADx65{&{+S^*zs1?jx!Bw1 zv1ul~t*HIUnULKzmfsoC08@#`f-x|EBiD`UlViE12g*vIpevt0_>`Jljncocb(P0# znWKKM!B-`!)yzDnX=~u@j417eF&@K|=`^G<8DqFF8{u0I>Ns7ku`Vx0K8fJ--D~#? zZT1>+J2i9ABlemA!}tx~I0*yG{nN?#w2H$xok4cPj_DTQaTTm3?b1#|^wh>mk6MD4 zhhmO#Vs|1Fd}fKkW{BLLrKu2e2)fWCYuzV08bxnssbjnKE|%i!zY9`4(`5zcnh%@? zLOMj^|7bin`%Ks;uo>N&l?-x0WeQFNL$@xpDe8A!(+v!k&DIvTme!UQmU7K;53qMf z6mlLSN<)$L)5IpqA^0HcYO*7q#AN`Vj}HP5u@5>$5@}Q*etv6*|CN<_WkW4SJ?bo= z(zTQyxPlPZbDS!cW7V!qG*=1|qjohAxR61KRum!Vy3|7v-!R)>Iw<)Z9NRRFBBUGV z+K(6q&dW#P0vO@uq=RSKl@M?POOVCd^zxQN8vS+qz%V7L*ig8Ht$d>K5py^k!6&eu zAbR%gBy!HJk{E%ur>}K{3OHSx`Ze7PbT?lTu)lD$Z36Hozg=s#3MyZJH{L4=$PI{Y z(E-WNyb6g|Kme0tLK@qQpSX>U(5K}d0pewo+t1n$*^-c6tOC<)nsWO`oU@gme>u9)}B`!5Zn*#?5YI0KAZelH1C^HY$nR%H-XniWr9HFJnr%jKK02Rne` z-Cpyt%w2g+sCHOZs7Kdpi-HNbgnne6c&_s`d^GgA%PBVHEa1*EP!Sx!qqTH<#aN9c zq;;EEZX24t#5dDauUpC3=<58_lT4+lCzquNftR4I(3KvNLDc#@DxA$Vdg_ndLIK-y zgC&;vgk!ZHUeLcAg<;3JQ9P=n4|>JyTfgcqq?g^0O+e_%N7u8$Y~1Jk69XJoCC3MC z$mkmj`Pi4$!VNIdgs{v6_H$qO{!Z$Nyn@_+g@tEs?>>8d?89PVFvhWq*gS2Roh zx_jG*TlBoG*Sn!xk~_U&&3zYa%aBo+I8KW=^KJ6`Y4d&sG+NVS$&ny@mF%&efdy?A zqpto%PyNqVO~(J(CPuLS)vge!p-icXs^5~5m0A~)Q{CjNb_?o_1G4<8opx#Jp3bcB z)&$3?Dr08(u{hyA?-P7mW^wiez(U06#KLKi7A7B1lk-&5M-0@YSb^}o5%PtjBt0He zR(M{1OC&jpS4(j6MeJc~5V52ETl>%00PSBi-CCM{{qQ`jBT~n+ z5#N>=zo(kK`c?-v4L(*Fzj8NwWk7z_A-($nw$acCG^11y4+IAv*s2RS)8=Wo32tLL z?$Ass6R&h2HYK2NR}zc!SJx@-A9Y_iv>-wpU)&a3Kia>#7_gn~TOr$Uae4a?XJC(N z`-4?=LU=^vr9)-G%U(#2ub=H8azZWtAEz2F)2!=`hvFTuxPEjASv_v2pAmZNu5ZSw zUW-a`O5|}=<(t)0z(pbvGJ7aMkzc99)`7kbX;KAxeQMA;{^Vf~xD2anN#e`XYvt`U zMRmT}+SVQCkffl`Qjcdw0&~rZAg834YQj^MDq624=+aE_-kE&zqw)FoSFO@ASWIeb z=sR2;&3m3R$7n8q5Tc$Eliq9tz=l-2cHcvKgQPrUh}4_9ECqW$e}2tt?zPw?AcH+y zMpZP7Qv4;bDK~%b;c2(E?jG3bwtd-V-F3We z)eMf2GtW32(L>t~Ux&kv7)iveeT+lCV?kxV76-Z=zn$B%<;Esa5M>Aq5)H|iSn10u zeu$T`H`-s>W{}k|ERN%NL*?gp`+0o|PdUus+y_>224D2OVX;_Q%6s~Qc!$0dG^%SY zaGZ#N>j#;kCvQ}(eAJ5-6z?;=JPjoUi;dTt9^j(C6Hximst&v?FWU#%@pNj}5NE@T zi)&E!yue{x_MiKhowsKaY%v-Du{n0j)^MAe+9nj+Y^T4Cfu1@2lQ(v8tei^Hnm|<`D0{^u@CFi|zR((X4j=%}A~A40@r69Bb5r*z%kw^*~o@U_p{5jY9^5?ze@ zFy!1jrc&$Q|4xaLx~gbrUR|hVr8;~mSf$OzcjQyeyt4QrAN!xT7>)$69WCo^So424 zC)Dib9KdY#q!+0B{GFRP_5eF_Lk`LURgJ{~m=>ec7&SB)E2+J!7F7m-3BkM+X+vAP z@lu->NiArfjlxSiOWK`|B$)2Iik22aN1NP6SBs80@1HE38bk4goWtEcMkrQ9SxEp+ zo{!c1BPEu}aou+J^>z#5w1HCY7d-5dHRiI%f^~)nshbw5*M@)HLHE8DElxs<$XZVa z$NC7pKqyP`jVAz#5Sek6xT~g*HU*%a9MneANrab6u*__2HDi-u3JuSUc)=O%{Db!@ zbzExGrw5nFIhb!;I9w+*7Z3lXoY@V;pwNZ!i!yfvXefh-W&u8GX zpPfM~9SQGBre;c+7_1kc_t>h zc_xgGFLKTNc7ePukcr2kHQ*9-ocg?+KT!xd*#)mkGTQn;|J*NBApHqT}zg(q`L2xYVBIB=TPal#AYQBizL;>lVRzXI zDXJ2J-~vXkPgdnJSM)jf_3Ha9ya?f)d{gD?ZL92(+b`GFv4-2xZ&bRTQP)#{9Ht z-47(_D->UQVDfA|^PxyI)O=j-Tk(&sUv2EC0{LVWXb%rGvR`ikNpaf$HLLLBh^?B1 zG##u-4Z?(PJh~BGMR&9IP<+g=t@b#5{f2;W)u6DLUN+*lS4+iK^)3Gf-v172D(seC zQATyV^5}Z4leJ?3u#P=R3z%?a1(gOlLwd|aDd6N%xwU{nkKI_Qhu`%;x%L8IZRkI4 zIg0EOc$=o-H*J+Q;=WMd)PViJY58Ud4&W0?<#OJGD-@oY?Rh+6|YJbriFbePVK3w3HO-?cx8 z{vB7DyguWkd0T%%LLG+(@~d<}@s5i@gG}Zw8!d%ez4ygu(3-gM?+L!Y%b$C>t-!nE z|M&NrS6XZ;$RvG+MDoMEiIz{f1K3ry%K4zZ#BmgSKRI)HE^vx@A)IexYb3IuomDs; zC*HD`>i>Ds#l}oy7UN)!<{DTY`D$7{XI7^+Kltxe(c9DejN{b~;SuI$xXNY*sW6jV zchfpb(v{17iN{cJNYj=0_3OrjMkGN_7N-HVgbpcl`ONYXK@BgT^^1)wEWm*Py0%-l zauw()u2-|EzBw0R+0gJTkec^RCE$nh);#{2jx(S&G<|)b8{ugC8a@xbQl0r6$SR`r zz`Hr2iO@-^%HV=qv(NkGO6rS2y#S3G8Q*aToC%C3Z?;y8-w>)Rva6cE!p|lLw6CD@ z51g*jsxo+z$~Q+JnV4$UfW+5b)&N}YaPJk!17#6OzJ6(ziV=Mx%4b-VIqmSN>w~Dn zP_=afH1oaSXMsS4v4LtML;Dn=1L1Vnx|X4+J`sGjYIlc?Uf_B&o*4~2~I~>mcA!PtIriY?I8tv{9A79$9Qxi^9Y0Hao@rS_oZj%e>kS- z=-FCc&bNe^Hq_fOjYh=YyG8XN%LozrgDM-1I8N$0H>o7(hum&SlqB`=x7z5Nz(P*f zWTxGHN!iL=aA$7ui~UBY>yFH`&0o_wJnZ=rY!ojd=;EjNleT0%ht^qL5|{0>4#t-? zr9X4+G-KLm`1VF*-=)cS=ABBL!;Xbe1tXO+mjZ4w!BrBfAb01wY!0Ei-4UxZRgHzy1CZrXQf?Ix$r3VA6O`H*E`%YhuH&;niysJ zC3n3(nQb}#!dlL}2TsKAByPi2A8WtomVyz+RhkA}i>WmfyvSDm<=QojlwOP#{wwUF zbcyL7$)4z8nL?@QlLp-evSV5G$lS{F=Pn%4E5-}27wuHWt7nEKrL#4eVDD7FS9JA* zW_c$Bi^X2oZ+7ht2s^WFQdp5PT-dqy$q->*^A~M3Ar%95#V2X7?T>IOe#AK68B3 zz|Q%N)1RB=l(4ZQ7li~Lnj zi;&36noib{K`7tyv7DNF=6&zm1uEJ~l;F?XAo&*kd=abgY%<}*R?{*Spex_$S2y5V zLw#2kJSU6D(3n=mY%c7$Tkn{Fh%&d_cw`TK(@*Q}0F!?y9yUk6>TK(JZ@ z*LQfj+$koW>8 z%s&9Nuw`;RAXi25+e3h7hK8K_oSU`UN{H0)WaKGTON->$TkNf{^IIaM^gMJ^E77g( z2h6rbxow}GQoQ}|T}z-;?Qk}uSJUpju{!rpYK7XlCdhO0VB$0sy~YuZBu$V&)^-9O z3&6deN&oBx1dbBE z;EUvx-q?U9zBztC82F{xJVE79$>W;rf0bYMEV{!+6({Ii?LyBA89kK$RbDm@Z6H>X z3k-tkus53=>0Rh#N44=_QdQ2lTT=1=U2Liu+^G>-WspG1#sWh8W9An?qys-TUar#I zZn^R~fuyw!R5feJ28Q$$9&1jB$8ydqr1n*q)@+G-nfB`sZw}{Q!R@LW?7!@OXSnjH zcWz@+Gw*QF5SO8g5Z0)eb)^X@cZ1z=7={RScW35+=;-AeG!blWtj_Hi+limRrDDU0 z)~$3`yAXvi6K%5z6^ZUNv0LM*Ui$sL0CI3JXrD;y!rI*1BVYm*odz-PC}or;Fi`FH z?FAgGa17Z3!ffVwn`n8`FkCc3pAUQ8?ehx20+%#_3st@_J!K3-b+SSk%{ zO({9hMJnsFFw=VU_u6A<04(O|Tq3UBS`S8*SC5`z%V$nNHz%L46*8O(Z5jhYb2_A6Lrpk;kEcj-Z z_~=8Zzl-V9c~3-B+=^4bz;sLy0rA9VcD*Zmx6>&fFHr}JC2}Pki})wFug#x!V*3^y z6mlAkwpOLCk25d!z>5fECO1z{+It|dlLv?=ZaP_R?J|~9NKJYTU>Me0_RhFmfV$R zX1;|4=4?q382T8O&8TRN%y|gV! z#UQuyR_NiIBOVExP9qVLqESDI&j)?6Ms>-U<_pn&mMe)Jlf}5ewvOG~(wU1Bg?udo zbsrEH^6lwV<{}4wUZHY+T;Jwz#8Ovsz~q&qRO?|at)3cayyREw6U8@5mG;|W9DyBv zxj%z_w7l&DfR|k^b#45FrN#QSIijl*9K)5)V9kNFzpHYct+U)s+T=4X1qnLiI)l_5 zg@!^_lGNBST2`f>!lFE9^2oJcMAJ;wP5#nyupKfk%(2sR7erl)d`9&1G)4y9?L{jwU zOuf8-!$3Ev~4|JUa6wx}`1O_Fe$Jw|!%&q2i+>rn5&E8qxn5 z^YJmT>8H9QS)T`QtKL6j=GI3}e{N1U;bTDr&)btSMuUw0E)0k^4l$%dz-{2#8JT*_ zhqV(kDP97?IWtny6Mrl#-tzib5Iiv$EgcIuLO5W|g1^(i5t;BiN4nz&D3ag#jPE6*^P)-5^!$UBeb2L7 z|Ikh^rsV*Rv`Rhx5>o@=%5!zKXx|_oQ~vY+aE!fwZO^upXtXmJgqI>QbM9?^^K`f8 z_Ubxmd^$#lMe*fw7M=eD?>F1&N^1@|99BZL*754TGq)bpoV~PY=wKRh{(T?7?O!CO z6fBZVzRtMwL5n|yy{#VQHgwYi^l{$ZO45S=49r8wa2l(;fLymhwtm-lt?m8W{~u)o zSoTcAPH*_^!{h$g?Rn6wE8u{6>AG{vbbp}I)>?_~5$&^iA3<&6Ik)E;nOe(23z|*~ zIerUr3om~5+_bcIe^sB2dG$5*o>u1MxGe5OPy`kS=c*93g z>r3nem1vTGgy?ZCcygpBL%-g5K2)wy=Litc(Up>yYG}4N@2B}^dBIABii>f!yYb(> zMYJI8T% zsoyWPY{?BX_Ub6iCldi(|sn5l|VO)9&aOf_3$>}_rX)%O}flguB;7%UCvJSOe zU#F~+L1$168*FUSgj<1OyW8xTcvS9lF%SUw_?6aB(Xb;!QZF!)3YRMgPF&$mveJJtY`Mp_pmBk{!eE*A<#Yp=_+Sd`<&MdLabOSEz;C=EaG> zlQbKuFpEt~X`F2l3Za?bda!vOqS!uIYQ{3Lc+<1{1wdSuYcE5h;CV5RT5@?6CE&1J zBTeM2>%)o?e2vVY%g#CYE?4nLpPv`+kMXehqle$98%!-yydx(65(lAPDZr9t#C?zJ zLAlvo=M7(ke!l-T&XBi9462$OIea#A(8j$uPZ3`hwnygk(U4(_W#Z=jAYfX(jHhZ) zH>(l}j|(#O-l}R})nTrNEH^o|!!c*?TVB2|iGOsych8!X-|H5mWJg_*)M_i^j;6Ed z&CeEmOu9{_w^Blm2BWy(SrW*P2R41F4y857mT;qN#_P>io3Lf;X z(HY=!)!4M>_dSeoxS!i(`qN1k)Awmf%o6LB?kA3*F*IUrZzZ4VfWx*uKe2|G_R)~@ z_F9)b3JH;3y*V9Z(mAIWxBM}v34w`(s_eV{&UTng!ZV`LSaO|iU(HP)j81rDs_V12u8cu^>ju)FA@_7@;Tf|R>%5k zHlbed8J{?N>44TFG0?o9DF;jf`wnrx0L_@9i{sY%G*5rh6FUS%j+-##)ELI!(W#Lx zt)DWz5^$g5SY471`GC-1O4Fc)^JN*LP1zi`=&`0WvA&kcGs_70-WQh-fLF%LEbq$d zd=3`7Z^;CX-;eue9ksDAeMX6Yfc_eHj}f9^6g)kBQ5>K=9=GteI*PSm{4hIUd}-P{ zE0fw%+)oogz3}a>cCAzO#r!+4*u)cZ&^RsYR4{irNL7{mTHMSiCFq0hr@OTRX;-f1 zT%{+QT%bOSe1ep@dMmOO%VlZ@s!ua#f4Gq0G#;|k*sV`*am-K&IV0W>l{ z?XwwW9yJZ7EN6_Dgb^<5+bzp1Wh*qAZ$mjP--ZR&?&z+q^}7Vkugp*GfT*nSletB#+7?_X`-`dKI}# zrq;rvZxo_w7M?DxY18`FLTdR}UW6HlJvueQ5|&EnsTi~eehJx#^Iguco&N2G^`zUE zql6t%F}6fO4Gv?F*Nw~)Ie;}zR|0$ok1Q)jA%H`+5JN|CxfZQ@*lejuSt7Lql%n;w z=P-4*nWc0ygI$8wdPY$T%M_DQJ!#d~E~BX@x`sPO7LB=bkf++!pQ?C1DUez0qquxn z9Dg*cBvl0oO?dcn<_;vf(&pQ78tf*krbi906!#w3WQfQzeTuTPzo>yqwmMwg6(&8< z5S8$OULzx4{-kfznig0m}?QX!7ULCWTfN`^!z)l zJXW>WKrp$2z-@VBp%>crrZwr8xWy#*?As{#Ry+}NRnoMjsfceB8t?H=7LqPgUEYUh z$z|E_bqwSIU&kd|6A)MuU11!Q!WY2_hn(79d8yL9X8R9^*{i?y#(Oc7mhL@|be*s~ z-3u4-O3$vyI*>%PMOFFaTM1xrdG6?CPX_{<9S% zBhlBl!7^U@@y4{QHb~loVO5EL$+MBzg(cP-JM1jq3goC{a-SIKU^(>%3+DC%;j(%!n@k&a7izSBBz9292z) zPKfQn5=w}|FV|=_r&#@EGK2wl+qbR`hI71JwYS+IS$?`TidT9|T0>W9niak6QN9#< zitEBDe9gODR+x9(A#(K}4zT%&iQ+GdZVv{w=+c9<#ya&p64*l5E3vcplX*6^J2t;P zo+U=$;wq?S5;V&ObvA6iUMyGJ`c}}dO^%Q~9K=VH5|OUWm~r;W#h9F&3>G6wEV0yVF|++Jk(XQ>RBr(Sba~@WDX3EtK?q2-cY>&K}d? z0W8L>WTNDfr3`8v@kviO6V$HE4qjQe@G06~1>hCSxkUdbJcjFJczMmUM z*yLS*fKB_`e)()M<=K`k;f6q2Vvtk^rc*T01LQ02#~W?1`?Is)2j*@mZqSmh5}u~P z&Q@FC@?iY@v14EEi}G;tPnH>Z#t?yb-)`K#s}`aRSOd0rKdw{P=srjK~s%_qvJ5tC=f@|@@X}bX?lEpUfzY=l^m1EC3@k7|DkN%vk*qu${ z@$Or2b*THPMupiASh|riw4Gh^ObV{h`w0bRcJJNgOuJU*42#Lrm%DHUsd*GaUI3^~ zFNY~4voIgn;%%Z*o_U6W>8mV9j}XHrS^WXl!99*+RX z@bQF31DNnCc2UsQ-PG*$!av9DdlQw^IS`VBf^Y43;SA*QJ_FbhMso1x$>FnV^+zS{ zOOnQ0gA@t}9W2E^zVIU5YZ`g}RLsExJ%=l$R54=AaMmnDaXyEd6$8dB3gp^_!>bU!5G1fo56YRt&~!_AK~V zg_e3=iFk)mfp=xJud{#6&TIt<+5DI5I^ANJ70&f2Sn3$DJGHF(4(4H}%~BenRrgb3 zWgv@JBX%c$L32&s0FwdE0f`MC>Nw zr)3dqF~qQIp-{PKiss3vb6eSoh8tEo=;$$Gp(dC&P4$%92ihM*hch37U}Os#$S z(N#uJE~eN3W;XIwO^R zq{$%WK?`R#SBIsX#XQQK1nu)5s2lZx)uKC~TbYRn>Te9;`Sy=B%Mqi*TX6K=D5ml2 ztginDd}Z(JQ;tT}d&|`^=+kr!9x93kf#tSq*YQPr3j|QENc`S7uA>XW*-$}fruwZJ zRG4cih2b#T*jOD%5Z55@*V~d$aOS55_69Vf727Z&tp8UnTpS?6L}eO&fj{Db6mxCX zMq?)t5Emq_bvuT=SnyrSKHWrgzD{%q(^UWL6fQYA+$f1V>LiPA+f;A^*&D(5U07Vk zRf1Jez!ELq|EKFhFhAe*nM8`t&M!=cZmWLOa<{fAd0oTGSX}2D5iOSbx4WD;tI|2b z)i!CF*-Y#b;el$p{cb-ZUD>iYXq!b^Gr5mpx(&V_ViaZZuf+4_&9h#bg1nYDr9DJ% zO^qkMRuWn928z535J7HYk_x3(X5;@we@$F_hDqwcSb$d&hjK8p@dY0?iT)o;@8Qqp z`~L5T+N<_TsaiEk&1fm5HnnHP-YZr~T1t)Dd#|d!#YzZLdzGN}3R;v}v1^oi{qp&} zzkfm=k2}|WU+4LJ9tYoe${(8$~EGaswR@2>pyiv;!6X%QBC z3mTl*ynP5*j&9=*GiQAVz)$Fs4Xw9&-sH%qLNkE;AiHWE?}&ctu6}?`TVaq=W3Fw5 zsqtrfAyqTfB3^DBttMLNGOE`n@J@?8F|S%5<_+I}W6xaMbyGU>&q4i$C;Hh`e1Lep z$5Y;cgLc=?YVR5jq;u%^=t_ukR{k(OE=A52KV&S6xUwZV3TvMO`mG(P)l#yJ^144zad;XP-@CvRBUBt=77< z<86x(d^9;{PAhB(g^VnN-@fm}esXt+Bz9#yD1Iq2cP&vbFoSi4V+{W=Gqr^7V9saS zV*?VEs^2#dc@jx~7y4_d3miT>c`WKWuaDHgG!Yw{gi})d`koomaDvCsQn>-;zLg|; z$E>Kc>u9{#=GYIzLNKV+}RADAE_^q6Gr;u^x?T>y4yMv@;96fP&V-@#;d9F=fhJBB}M9 zDni&cJHXWUSN->KPjaG~ekZqAA>P?EBWfsV@}i~I$shU4gJgz1|N3pmKW$t1cXH2S zDx84kv1K#JvcJA0Gj#|FH%GXnc`n6!h;724lkIyn^cE9DMZ2S4T z>s+n9oG-CKP%NWREhm1Yg@+nRscxsg0b=MJkRxmoL-pA9^$jH`?g*XuSZv#xq)<ZCnBD59p?D!7?EiOa_|vZYURP!Pd1jL1Y4nL!)R9 zHI2wAt*Va_Km^L!VMdhZ>X$w73%NY zr!Q$$fw!Nr#O%iMWa&$t9fm5i^3^TWmEF&$mF(d=%<>GFNeX9uzf(6mo z88|5tC5GN*yLG>GIEUF4Xrzl9xD}6rmiPA-(N6MpItN?TRDMjveodI*uHX})K;f>s z#b6bQnuJtqkOk8R6SbFRDV?3zAe~UU5kE#D^+z7Yl!p6XC-(jU8>!2crCsqOdaplz zpU#XBVJygMLljy~L!I}xk%wjTmdV({KNki>+ymHIDSa1A)ZdNzk74&Xh`B;CW zkfesj^uqlW3zA@iyY|Gc!M0g???)}FC7fbP>F$M55srz&h3pvUtOjNaRKB;~d_9L^ zSAf;T`27b6w2!p^A``GaQ6rQl6~E`~fe!iL-I1el{W&jK;Ss#_RiK{{GYR>l4!&0zw*#8N^^&T7B3FF76c%_g+hoT_Lc|ItX z!_J6EpbipH2PJefIY;?jh*5?+N;xPg5))%(y_`G7S65a)`&iUFKX)ab<3ioUKH=-y8A-qx4 zSW>vWmS_9E6pgkGV?wsHCqe{eIX@>H)Lc$XF#0Yp{!JXbt6Byfe9IV9B_*W6MO`VG z4+l0~e)uj!8ee5N0YK7vwElLt`UQp|U=T~zgcuD*suHxYC0r-zBM|MyWRe@VP3<+_ zNm(ci#%m4dpJklU*bPMqau&xB6rW&)UeaUUUdR8oH(4keTce8kS=0CzwVt1fMGRW5 zd6c(>NmBxK=!Hg5@8FW%<;&LdqAjAkOyHQ8ga01fr;P_qF;bXaH7dXUv&L4fJT<2$ zDkAYxN7tdX;DFE^HK6&jHykiN6Fzs14mnIv<}I`MR@_Z2dTSY^8vc`1X;BC2)I;D( zsz$J=bhEf9q-Tze+QK2b07i0@@~1cjyE^ATfJ7}2>%bU%=OzIsu4V&+9~C7 zmoIi_G}pq`q`RYE-~){3ok>SBvr0nWj_8l`SV;RV_W10)oc{Pa{b*rpOxNal;nz=f z5)LI!lgf(UL4%~}dBPO(dLG2bsEzjce;M8Govr3hO9R)VA6C*SG}g)fqAGDWFmDw4 zswXG&YcqEIv)fOmT`mMIw1#-)4sq5f{k22;r|ZsCHN_N}@h5pXqro4R>Wq<2vAf0r zL`Dt2FB@lg(#h=O=7=Grb17(+P1Es+nlAu4SJvusZIXk7rR;fBaIv`P2l;u2h!ROQdIM z;LqbWa>^voVRYO%d$sPq!NmJH-mnFmjqD08lp|h5~hec@#TBB8~{dZf}#TY5gBI3)Q;OZAn6bu5v`oKaB3dT6aRc-5F~{+(w(M zs0&db{5jUKhSltA%Zm4N-y6BCwGRD*out9Kj%>ArrM4hZ8|?$(xunmgZRNg}W=zj^ zf#O?Nf^qhh&O#y7IYrpSwxU52+7I0WAPhO&$ynxyHzYj-#^!UujfGD-%756At> za7>Ix$=tr~N^FbM(NLdX>V5uJgRxdW9jU|gRXuXBpzX(dG5=4^&w@*|&f87o(Cu}% zx)Zn)9iRv-6jE9#6119)Tc7#`5L`ppF+#d!R|@6N?3~gKAUDQfByMS8nJxRTxnT9& zr>Y`*W5T)erw0XCx$lwj+QJ&oeVv7L58mS17g}R3`8o>@G%@YDHg0oje?PkVlL`Y3 z1q)E=O+z2)yiB?lH=8!&7=!u0*b&1YJyCT(68?3<`)6wKU!m76AvfcW41LJqAya{> z{(N}Ry-C@dXT{4sv$ib2`)Vut>{h6)O0(>TEF(z;nYXO!7UtnIx?d7RNCE&4GXgfh zl~!&J^@02s%KiiF3`UeqcRH~1f2hUL)dq^LJiav>s%PD+1u|=W2{o1ozSZ56c&-^e zB2-tH^*ymu{mSQ6d@N~Y-Is=R!mF&aE%}ELM2|}5^P#uKJ4+FU_hy_&lMAN2*r}DN z<)UZ4GdwRz2l}`~?!5d{pa4f4@^$fBt=jKT2Mc?cm!)=vFc|JuzFT_ZO#5xBS;0At z9OdWmhE%*0dFmEG*T)uWmNKn$FtxR5SvCGab9FpXH%;@k?`Ro`Jf(zl#GS~rvdo^7 zb2`7&k!U&Bi?4~&r~a&YyHb}+Xs-ZMPxu~eW8l>9((3iN5WW2gqX0+Qfu-o^q7HLC z(=>wsH0_Zhd5hJTmweso_o))3WqW4JMNF{E^$TZUM2xggo=E(IcqEvQQvd$ckv&BB z*tI;-;};}9#KKN$`l0iJK2!Zb;D3OCe+#9txg1%}$c{-y2UmFip(m@<09!v^$399N zzIimPYTP?A71ROFqf6GUcN1T86(K?S$jez0sdjF007`KSNqq(XCFH zbCl0$bYFW8$>XZ{`Zn(XvGKQ7UKh`5KDaCcPUq+FMDviR1)u*!n-Byddcgf^U$1OTJ!qHlqW@#lsuCZtEuyI(&5%NAc6F$-MJ*h%3o61O-@Li{^cR= z`sz_Dy`WyYUEgVEpzj}cCea$*rBXHn)>#9@o1ly7=S*3@+xq!LxHQtZxMj>%n(~8F z{j{^@(y7E)O7A{{Ev`S3X4gJsIUzXxc`f@jM@l}(4QV?s@u#?cEo=!z$qCtmdjwP< zG27eYD=lbqmO}J()4W*?8uXF1Ct45qBDtdmbg+BNQL$>Nb~~-fJ15fM=tV~#5hr;x z;w0Mj%X)%u`}N7bHYbq&nIo-0rt2P72KE-#IKA({Q0Cr5p2=un=I9=5vQ~hCH7;A* zrn;@9)O=U`Arq(Z_ZeR4CDF8a5Y>;eqw!u`q-Or2%xYjpneyRJ*XVkNJGXwODO_=B zsY})TJ1i(jl&PRutYa?ynkrk=4@q-m+PfYt0u{PAw_gOvS zc*c{zU%$T2Ju-iukfz6tCU{KkP(D;9Mm4%`V~V{CZucE)k7`w$ahrN4fQszQ@MRCO z;r&dseihU(+hmzT5c2F|D9?ovz-nxh_%LV*=Tnu?=vgEGXB!m z3Ay+?fX~U#jZ~|c`ML_Z5`eIX03-UIc*YK~+$&ErHw$*Ff?ObM7utBNPOe|CkEWPS zTsLg}_dt34d8*NkETFl(g+VC2Cl}ZT4scJC1K&U=Z*9dV{yWb2bHDQ&z>lsY-Zca3CQlQ;XWKld~*#H ztAkPyDn5)ZJ@p+h>{(!}|M658bnba|EQoS~{$ygTG!?N574*n!t=bcr>0APTWZqY@ zTc@S51X^AK6d%;K`zYajHspm83po*vD1n0j3@?oxHZfwz8*Sf+wW?pSf4LC5#r(^ z%JhwZy4xRTwkbHFP`&L|N@5Nmh{)tvO3;m2LyTqTakMmuxSD-}_Q z+GJusa$@LKcjN+tOw3lt7A@jrR0($KN3Yw2wE#MS-$#DG_f>!7=q}W>gxKu=CQAn? z51bgiGZj#Bx#`elS`{|ZvT7{=iYbDsA)jdK<3lX^2FUEQ(&+1$K& zm<&`~@wZ0~o2?BhP|Vd|9A5$fx* zE=0?CFw@VUr^pJqvM!@h!Q=6)^yYC#9EXWtU@o-o{#%tVcw@c^&=oKeVb|VG9}Tv9 zj(Idi3hN@~E{`eM1+F61SN}aBfAg;6JwcM|(BxF>umvpY{3Fh;IIOKnj2cML4UFG> z3G*PaBTA@6{@OXn`?r=v+$ilz29zb=UBfp$K?6$FhC=B6QV5vNNl;3c>`|BytQJ8( zR?>r)H0eBjp%p0epe;J5sAERW`V=&5la=2UD9ZagFIKno^`Q#LB<2mtJ+*Zxsp7444@sG^KZPxT;FKXATv_^y6+~<* zB@LkYtnW`#P{>Iyz*O+dwx$%Rd$fMmSe&6*Jx8Se)@q*kmj=2D?hebJSKxHAbxe+@ z)SoWOG`~x1O^}iFqwq+kPC29pp+9sNB))7mpi8wDlg&vF3f^9dDUD-H)6|zl!*9qR zvBGoGGrvrY#J4oBRb~7;@DTX{lS93X|CjY0RHmatFYup#t z^NQ-xj7F`tB>(EOh6-h<2Qr;LOTS@SvSpHSic6T5d0(#@a>zfk=P>R5+;M&L1#{D; z3uhyb&?WeZ*J_s4isl2Z-l`7Mr;J;U4v#uC-$6B5=n~l7c$jTx^cQRL zt0*p5T>CDawXy(}fuNcq!^k4p5@Tu0hmjDS25)fpwym}pm&A(k{oa|$@yL7|RuV<) zunA)WGnjk@YVu&cJx*!MO_x6-XkbE|AD;JXAMjIZKm=UHe$L4SviWQ8_N)G=5L-ppwQ1VZ4;tjX8*X;$5QkJwM@)D zDXEEap@KCwNaWJC5i{bHz9#>j7~IKOuc=;c$T&Ev!Avb59j>nSLPxy`G#MX7L5bdX z%ii_<1PHh?T3}V-J}rKM99@U>{KB7?EA4ilOsp9FhRNMt~*L?QoO`B+r3}EUq8<9AcIr^Eg{V$tp(vifPH#iRD($ z_M63xTEfeF1Ler#t}P}^KC$(yIexV3{etVZl6+U$zZZ6jBn6I>594=iv#NuOK6F1E zyf){}Ls>89aen6cCde^lcGY*EI^^wgTYHS5ss>>M##@)DK#|>S6lz9xBS5N`Qqali zH6~K&GN@aaP2eR5sKPF6UyE1o6fZo-=uPf^E$kGuNxpvhwz3!Df=NXCHad9g^&)=O z3tj*P6H5wd#voUMb8!hq4NUmp-B0YX=g>q$cr?c|{1+iLnA6hiAxn?AL1MRyUgZB2 zc12mQE@Bi@1edf6qY)FZwH4LZizd2prghZNgjUE=$lH2$NizKN?FgWUX5qZHoUD0% z_-&&em=MYkQPvP3Il@AI<6I8Al8k@@HymDUpA*w$K1+1{yISwdG(yH9m>+wsqO8uF zf5tpAai{g8c#mD`I?`rV2L>32lNKxi1%LbpfYG}_P-Z1YPmFo9lDc@`zkTOtvAb(q zWIhYmW;dP%)}Pmpr}`M_a{oL;r>K^WFqui0KHad&CbFJ=GUldJmd?Zeehe3SFg7pd zZsw%^kJTl?f{bx#ZnduoG@Aknn+&;hv_0)|Uho^&(N7CDk`^g|LBQ~h%I|B*QY+nN z%^Er9{=n8~8}S>C)fK0m#h2$l!*UvLT5nfKI7k~X@rp%H2zq!4WiP%Y&@|~b{0;OW z7gj=4v;h#zJD}=2icOvOp5ca+MuK$hYAvE2KSojaaswyT zp@Zvl+#6ys5j-4aY2#B@;-mh7W_Dt!zv#qw)xIImmJrSZvT9@&bdbYh#Dg)~YZlnF zrF!WMi%FK0y;=`-7hiY6_m%TTDH8Fj)K?{)8qUCMNw43}T$$=F{6@F&*d5B${0X} z&pfPfyFuw`6OlK#)AwyN`_C7tWrvPqI+WL-#xl8`+t-U1iCR_jdt^}%xv>GCBcsY@ zemmy7f;DXs4D;5sM7Kn=elmZ)se=|@S(Tc^A;xcY1N;YyT^c^Pv+juTSEJW|yuu(b3F* z%;nU(XK5PP*8PZw;xi*}O*B$98;UlC16{8+z`j7O6#}4eVcmPVch5blh03y)lq_pV zo!sI>YYyZB@iWd!qRDJ|?J4BJaI5=doy>8J<sKLzAGC*L@^V&iMC^_e5L0ify(7ea&@*~= z)>)svMj@pNH7+NoCR8El@U^6I%&dPCkTu=>4EPndB0JCyuzdwbzBGMV`?v+_HSbBS z@|tA;&f+zr`ZPD!Op%Dbi!@m3|y5(ovb+yVlmC8NW`dTPxb!X%@}EnlOFrn`~in(DP*sKC*n?xA^1AMKZ1*f z|8<9M6bhrAd`)+UdSB~1c)iG3N`Aa}a3j)ve=yD-Pm0iMtQz!1{ajR_Zrd585uzFM zd`PMWN-!#Na2tH-TiI9m@hTZ>P@l6c>$JV2^m>^rZ##Jp?tNPmSuX&6PDc#hkcnd<-d+JyDK_Gy5RTVE~2;p>{h)4FsYt=Ckn(+N;WDV_#=Xf^n`_@A&WHCL@ZT`?^u*}i4=6knNrq=6hHrY6HU%T;ldtZ?=(?8$E70(B1Dn4AtDQY5PPy z=}ppr8YAHS>cOH!bxSJ+oz9m48Hm>pmnlqqrIDdfQC?aX>>RC~Pq#mU24fV%-_B2> zsxmu)5cJo?I00v=DvJ9W*TP#jC?Bo9b9o>cQ2vEHHlxUzf z{oM-LR!t5|tv>|klQhE>d8-UHc8_3Dzk}YMXUVX;@M##f*38AOF289NS+x5*Ur*36^ugRx z#{D6rwfe-ziLTirAD|qC&kMIV)&52aI2IZUK}~8tzbtS*I=MCBXU065;$75TG36;YZj-TGP01#zF)1}50R!;>mz?I`0ojw(2ok& zP1nWL%+RI{6Zscvt7lF9Q%dX>Jz`G04V43<$aY-)5X5|UZ?V?H-(5W_{^ZA;d6ZeK_vA_ zTU*Y`MbLA}VEgs5&t`F3^9U zvbH^?;hv{5bSw0P;Y=KOhHaza9G*(KiY8{AGgVZ+*Vtu}@;LCqvU3~#HAl3D%o$X~ zB#hV_9$n+dwJv1?6)C}Gfh5Ha?j5k${{TURCvF(u-AAEq9oWUA5BoPc?OKslyK;&= z%`6Bg5C11|iyw^#?H71rpohiKtNSx|Qg- zztfGMl^0Q7pnhlOvJi9|9Bi}ijafb1(5!!TzE~8O*6u#vvT*QyZ4j+N)VQo8B;uX_ zn&#y6THKh4usGgyQcj*jA^diQ_w;X6P&JZa$!yU>Pa4$-aPVO8y;pWEu56^G@45CL zz=e3B(oK5jTC6;#Qb*537G&rDO(4i+Yh151Ho$Q!QlKk!NpG}w0jyzy<5RcMQ~zzh zC8wUf*=y^HCBy1=U0gh)|5l!|qByQqkm>2pR-?3_p(O7RS-Gt6^ndr@6+mm(-h{h{@bN$Wexnt40-{H(ZPi8(7TT+)S!cgdHj2krxt)21FdCLiQdq)}UD z9-_&h3nt5z^{ldll9l<)a$`_X_lEQ8Q0c1^spg4d_W`G+8HlUSR{qCU6ijSb?_Ohd z84|#xly|h386;n@((tK6NOsO7ZweKFtA5-W;h}=u-`$nkrSyPx1Bth?T@)$MnoLJf zi}8JMcj#HE*2d(wF`cqYPs)JRD7>h7oy*RUF%9NbWBMDYhX38;cPCRFLc1+Wr74qy z2IDbHAJs0p-7#Gud|P=H0()9KkR`PzCuYsft&R}v<>hf>oo#K3610R+#y%CcHv65? zb^g0!VX3F(MAaBY6&GMkWJBmO=s9@$z*d^|7ZC+Ts1S|mE*jank9e13#eR>s?&yKP z=WGJW;+(3+luZIIUAlI^6U@6AD;zr3YBR0o%x3oVUjN~Cf19+&8Nn>6Doe#Wu(DwB z@Fx>d$5bQ@Cbo5Q3D#^CQ%1q*B;xVjyW9~C3kXtxN~P~5#yx83<<7=obhdDRwUZn; z_X<>m>H`7(8w*p5*@5PBXS((ehIwj=c50%=^fvxIahFQ1>872ckHTtL;_JF77$R!g zO)*L!?0pPQ&af7RUOGJd*;{i`Ix7wOqwik<)MT_5u(yqOT@-+9y!>#{7{}|ZE-fdk z6V#6|$|u!w{q{*fL8q`FmWu2?xmKh+W5sBo`9&-_KLb_WhwP@nJCLV#m6p#|3WoOT zm$sAAF8KPHc`8JIVdz7`OF4o^yA-tGEmHRbe5dxA-*vha?mHARy z(@?E%ExU%=_cx1Z_J{U*_Hk|*&-lL@SU>7gh;(tC3kD6W4DyNVRP)LL#=@TQXMoMW4d0P$q5y4^>EhT7)mfZ5~t8G|ulzuhK z7?{`2nfKH z&2TGF)GMVI!Qd1gi|$&*HZF(s8HT0_2*0I(Ap(O}Kuu((kWhen-57Thq+PoKHh~VJ zuB<-ZI~Wu4I15@`o454UVdi3!^dzj*Lo4) zq4Lx!79t8lm(}>Ij+_coQ_wp76>*pK^w0KX%_=T`qq_vMS&5ap9wR%RVvpY@UfvTu z^RIX8$wDVT*K4#M&LyA%-@yCTip$L8g<1urP6#WU*2X%^FyS#-RfMrR;Su)d__3Pg zd}jS?K( zAur?FZ}VcsIWH(ClM$jkJG*Oc)x3huu`RzU(x-_#@aRfj_1lowD%E5zrd+F|k4DyV zBy!ZANM6=k>M}*3_Z?k|03_nQs*H!j_ITD#{{Mq@0>$wxGqo!yWKdV(i6{PQUT8Oy zShnf1XxdFIuD8;<=_P#Zc?G%!%Hl_yPZvL!FI|8`sPWmHHVlF-Nmlq6uZO}fW$bos zBQ%p&8zzJHMR%R1{GpTy@oVrMaT)SkYOVcx@v>Jkq4D z74aJT*5;#=AKwpEZ!nZV=hJ~4qZmH==IkV6S-W5eg9DiVf>y7~p4C3DE?RV3-;)1w zG;O)q6iXb&$Pp|LTr!(J%RO$WiW%wm*%W6HYd79%MXpZNb+#PUX4#rYGRe6IJ>J3qAuXbP(lIawf^ zAI2jfA6^$EFhkJeGe|fB7JU(+6pbqAd_J&5tn*YlB<;>veW zx7nxl_6|0L9Q;jXN#Y`QgRf3*4C;j=J`>vqnpaxU_ z!p$HzYii1PqI3V*dsDqPLKXY?VfG{5Ak}jk*DjOM{C1f-^veKW70dPcy@FDx;xNV& zs=2+TigBhW79&ngVaN-WF?x^5^lQnF|E*dPoE`5~?L5ILZd;ijFn;b^5ns^69_wIV zPcsI$Z4Y#>#kVqs3Ny7Of-Iw+^D!;)8jwW9eECQ-!Y0n2LhGME&u0P4bp74X@k5=^ zO>59jhE zaex2!F;ZL?n($Hi2fDm~XKAwf7m%CU*yom8rkVs~x_uC!`fx0=3H~}}_MX=uzEnzk z7A(m^D}iP=Z1v<{gJao{#+NyclkeFqN7X|%imqfpjKZ1W6mik9`W^}iUx7Bp-b_B+Hcw^+sbG?F=v(wUm2pru@gwQxb=)=Si+=`K&YA!rUlbXf9CHxRw8pL!KB1)GLDdgJ3 zokq;-jHoS9Go?vtM+KRiZTkvO^d(gN4A-Dq;mk~r4hfRs)s*>iqdA+WuHBH30S_Nh zTfqQNo&?2_}q>L}CcCKzhM)7FtmBOTi|Doa<3-`Gu) zW&#B6zZ2X)1H-mAm#+QEY6fPTYbipFGqwF{q1nITZ7zP~0f1z#(N}TUUoF1MTT3&D zYPSgMV%_A&>8FjP+V&+ysZ+n6N%&LXu96t<8U7}_NAgM)-+Xdu53%&+eKalMD2)U7 zC4ZjZ98AdBe4*Z1Rj2grQ{l^?G0S%~RD>V~T1ra6plHaPIw}YzrpB&T~LF+!Cil(vpguR@GJ1yPF!4qFa=y&`EK$h0sPM2YBtN+VtlBTaO zi!1%#vFqRrx?4x!tP59MbwR;N-&a5%1VU`^8XdLytTGC<(NOQVQ_XFL>drrFKxw^R zfp^u1XEV~dPG}%OBVG@~`@yOblf8R%mtw>kTZA zrqD0O=YzGJqZeOIS@5aMBXZIH5!Uj9YG1e(kF=h1JL+ZKhc>asMu6zP6pM@r13wIM zacfDo;9g=4!CJv5gTCra-1r9N{QTs+WLi9S;67ho&MduSaw3CAgBIWGH!;#;T0&1P zVFu6AquZu!zqt|*Jj_C~A1-*vc*r>zlx55tdM5_vw5(ZXvF9w&BNG(znyv(_?U`vok%M(%!r-oLmBFCoj>e51j#W@SrMHN zi(y+%nCLAO_K9(dzA->?^SwlK!weM7!o*0^n$+|)P+<$fLddvfkvcB}o_258_}MRZ zpIvJzQqHxE91wD`q>zNK+Zz}p!~+EYmKI&QhnJ<%9$t7}?88J{$;@~+?BCq5=yaw_ zn#fCP+*-SqvBkA}!vJR2{;HfY6+^?-5_-5j;8#Aa8Ng=@VBSooFm*XTP%%G6<^O3* z*44rED+}_@BCZlDAzeUh{tYlcG8_TFl~h#gw|P7mZqVk_MllfK*ThChAu$sa>nE|a zgyvk_`knf~B3ftxfj5YRS^k*zbSrUbDc^9S63}4k*fa~09O=xLfO$E6jd@$*JJr1>$7S8P3 zS{_)Co;_>+aH}y0jC3j{To|Y@lausZMYo{Cg(_V5);<}7aDJXv`L#>E??>$2U1e*d z0w)P9hK}9&gnmhOI!Xmupi+1*QpS_CH`BQvN z8Z>1Pne|I0;stl9-zb5W6Zpfy&@(``iSLmSESn(pKa?$ff zB!Vp&&&~zi4==R+-vCvG(I!kO0VtPF%ghg z3O*0Rm#dI;=Dd35>nf!M5aIr0R_YZTzHo5eT9;FHDueqo{c#J1T7(DYC zbT$Dz7!G)Kn1CDbx^zK%+Ruw9D=VsMqlVj;ah6@sC*;lYnQ-?3@Lak}e*VNT5karz zC+hZkHuXPdLK>KzryhW=5#=U#|38ga_3ItrY=FeFVlf=bG53)F;^HvmPmZyuiJKuc z@c@a%d)Q!CW^J?^$*h4s)Jb29?P5DgI-n(Ft1XEeEjb&IriHlIv08zG2@U0fsV4K# z(lu_4CWwcXEE9;pvBhAEHdT8K^BFgtFt!Ns28I^yVfRCyC4?PPk#$~Woy#f=nw6S` zg+sQFy1nnkvfLMO9YdFjVzj;1OJT8WN>W5kiw0))N50IidCy;*Tm73vj2?DwJ#tld ziMj{!`h(xfDj1d!cLohK$>UBjFc>0~+l01axvFAti7RAoHFFKo6jczKcCKg;d5@?K zzSq@ynRXp?q54du}i!w&!{kMHFfhltS{Z(P@B>RV;>Dy+Dsel~0$S6OYtJk$(ry2`Up>QRJ}ugmCss@%m=oG__5Ps`7mCd%wk~^niIxeu!vF}{O!Kw4FO|a5+ zn>$j&L9M!CtkVjOLrts>@R8G#Pz;>c(u;6CsLM5x5WTzLB;{gJrlnMsmdTg1X5d?b z-F(&dS(!1CMNTiL8lWA2NX6B-^{ce_)t}iz=i4?l!XtBt6VkOa<}nsSu4#w-Fi$wo z(qI<2ENQ3bN#CAfeZ|V!W7yp7fef)S8cK&!}&Pf$-&tVqoj7xrkah&E$R4`I|+mMl^LseIL|}W~PCho9ns5l+iS)2SG(f z_^dr($LI&n<{X8V&zy0c+VUX!geFk3EJdqZ;;}uE6we7_coEc{b6~57bJ)fHcYrix zdyS4pDaBFcs=N13F3YN-nv|ksp7(*loWF9!hI%td?zzeJ)H=zphv%ArA&Sd^*_B@8 zavrzJl@yFy4vnALCVHPBNk`)(@269eaY>7EQBdGRmmeeUOTx|hYSaztiR`|LD|QAG zRx*A9(hQQ;ZuC0x()}7LE|>o3oz~4{KhyFu`hVAV2XFko9j?I9?K*SNdMl?uaVw3N zZ|qw7|4aHF6~Wep&l`o1-N8;CwwBtyLJ?VLaBz*h6$vUVJSW3icYI!IJy_cvEvP_k zg8+7~+)fGAhRWZrLfZT`V-*!S0ywbBo~ScVOh=hy6}>eZ9@YuLC1=Yp;9>BextsQo zS#luo*KyRunrZ6ofv|g-iBSf<4rs1wD`?(X7dP_tQnJK-rG`<7g$feLzwd)tytMj* zZdK2xuF6YY0~nk}sGep8+GM{~@cLJ-)Mm*z2~=D;)5YPSM;@Z&YU|a+1o&-G&8XYC z#3f4*Ee-2dV6R6}y^)3OGw)6?_cz6t8gt>9Old&B^)uve5cEE6TG=c#fzT<>DE2&_WO zscTPyaX;Qf#i|V(1<~oo-6>Pn5BAVKqG0jXI_Gj&Jj-qhv7VV3XnHvOn`7>llTwNxoeBzLz8Ms70D~S6_Jq#9og5-wq~L?M90Qfv*P)_ zZ~u&vk-*7NPfgc+G=Cc+%A2d?_9)|pRMiP#6iMyU->$<`JLzSvWqHjMje5mW6~C_x zDwuVjhJX|9_3!9H^0Al%)w0ylI#*`dOoQrJnKpxo&!99SuLpYl@2=iSYFE}taARxr z-c&v@b1seEH<01fJzd?lLq6yc?vbqf29<`@c@KGO6YXyMr$F43nmxL+8T3YLMS6QH zwU);O_G9%VW@D&ojq);h#;k|&G&}Y2&eed~43YBL(6XsXf&r8;KbY^VG@aCFtV}%Z zbplIcr&ebORK8Xd^{F@9f3kXa=||sv7a=lUX%K|dQp%5Fs0j2!0Gr5Yp_5!A@U;9D zO{}B$-W123vow2Ke^k_~nmWZ%3ha}D-(Nu8f2&zyIL$+YoYe^Z4HnZ6>EY)AC?D zi-)mQAz5ta$2sTcCdM0*lNWi)Uf4w&^!2mWXH!oWU#0y_T{d;bd~~6D=Xb>U$^In; z1=SNDwTZELtXo&-ckR>0kq35G+Q#A(&=1y7@W&EN^rSm3&e%95K7u~E!zeo7NAB`C zl*lkUm0DfHqU}IL4h#c-e2Z+O?JKHX&QyW3ii7jamKe;MN0O9qnQFl8 z{dWKO293l2SWLIzQ8ry!+UPehzYQx-!9YP=L0nxB*!99`Wqf6Izz~w^f8?QWZ1_cB zOy1OTBoDW`I#ZIe<>J6f-AL2tBrgMNB3h1rSA*n|RuyzwmD5>^KfE3ft4K0Q)n@Qf zut{n8F+EH$F-b9dEr&`P_8?*~IJy2X6J~`giLR|PB06Wl4*)1sbwsfzf9%Ot@D4!d zs{NM-8=52KWcQBnv&>LxA4jBE#iT0qkKOhxM2<;4W4pSy`DLD85Ul zo6=$WSCs4!{!FAvuF09xn#ZLB-v8bsL`^KKI}@Pz@w1mrX)F~y6C~-M#R%W`&Trjb zvA**JbGTOyaM241tn$2Xa7w(HSn?BIr>SSm7@*|44u>=SI^Op%*<4FUdA-hY3Quj8 zh@%dl1jen!eQUj}_>s=!~b^qj~}BRC|pt=NL; zk*Xwwzg=yvqo%VGbCuXg;#UKZSC;eD1*>G3-|T#g&nH+h`J8X=pK{LtJ>LTVjfa_$ z=r0h<;C+}fehg=sFp+`0-&}Qcz%c%VzO0h#?_3(2{~ojawq@%TpWY^Igi*Ewo4?gf z9WQ{i%MUlh<0xw2HN=deFadPw)9v>;2@}In;hIn7=CitxY(^97S3cZtqTCdV*uX`6 z7>5@1omI%{`N;!8)i83grfMf{yD-$D+9GBjln_8pr632j#hew)+o1ZVZwVR##J`|kZn%Y60 zDwLmwPLCScsqCgQzDsB~6M{Bx>QtKYeG`Q&LaGmH&D&9RYqzR1emef+l^V*%+!60{ z>Y9h%p!h&YH-67{nd7ewnSz?gw<3qs1TL>9aUI7dn_Yige@Of+DN6hWUne(~-sR|G z2|{+ci&FiyvRn?V;1LSt5&Nl;xYX3dpKZX9D(aLgDa>xM%GPE$CD~9LypKA; zr8|l)_$;P-=cgUC%_)~BRZX6-N(-!j9)h+sIph`}H0b^OUXzefC7crsHbdTwL}30u zy52jesjdzC4IrR21t|hz0i-u6!3RVshAN%V0!Wb>l28)`q=QN?QX{>W&_W0hnsf*# zy@i0FAV`xAiacN5dEYbhoilU(Ta%r&vvxAsYwdMk*Y)!^ffQvG`!&aBAfzX@9Yq>t z`7??1wMH}zG7Eg78!Ns$n_zkbF?@@uL0I3@Y4YM#eZ+n0 z=={VUuRag)N!``raCfyv$5OOX6_&~}TK?ej?rN96+S*2WSE?Q*)(!b9D=B8SRr5;9 zC?zS9k@sEAi4%H%zXuhluQ_QOowNN{e2H+yJx{bm+lWN}L8c>5i#>iklolYOB-(s+ z5tb7-^sXkQq6|=DQy94i*?NInUCv`#DR!I5|DXyeg?;Vha%C>7j_nn*q*8@3-8h%R zkv7-+lI0tDnQz7CdR=^Mrb-@007kLjpf(q;m{#Kg0QUM%WLX}`59_G#2HQYX13a$w z{f>sxb`mxwCoujKcN)TDF^Z7A)cUNAQ2V(24@f zv?^rIx}7^9uirFz)Vn?E%z%2m-(%r+HFI)d@BZBsQ{Z%A_|w$I-C0!Hxn zpxXGx;T_+XsZ4n5tBNGsuu$8#!Q}S@CuCkr_-8f2i5&Z!ZF1$F?b4#noQs+T z$CU|mV0eqq4}5*b+lWTkl_ImT-73@3?NbsKc_NkUn_@Ts311jR`-}SSmT@ZBoI2{M zxy5ridr)yce~bGx+~m?Jhm`1gw_|!gKB{ZXBx|dkS0Di9>hkB8okBiNlB8<^m56U| zt3^#W*>0b)LexPv|12sLmsGXQH#`_7|6$uUcgc6w)9{x^Ntrb*-FfqOch_bP&ciAg zwQ<+^s`7+Ik%*yEKWNWEk9uv8U2r;6GADOz^+h`Q!hPWyd%Q9SXDB}G`;z&yn7yk3 zv-^D0b>q@HMvGKq%w0?IIIxtn)qVfnnq3x5x%&D&>H&9XvYCsEGfsI2O=n3omWuPH z#b52#-4|;*FBv;M?Z9|FbBqDl7FFL4ABHh*zRFU0taDQ_a>}}Xxzve2icgZPzepqM z!gT^>05pmnne_S@N#s?=Javq zL&1i}CE83#Ku<`w-52M_&u7n@U&=k>0nVxCtY~iGi=^b+_HPY5L7Q@x=$AN#*GTW#gSezjNKW-0Y;GbD9cN$ml`ygq(%5@Ob0_BnvCPMhtIA>Efgc&>>A9ph9?oRAwMY^8~QImIvX< zHgWTg;6rLS(O!PE-e82h)wa#c|1O<+%*i&Cv1+_+Idx-ILn>F%GSqWWT)%N7#x}-p z;J1tC-^E)laq`vI;NSXHcAO#;ui+rF4# zKL|FL5#jJ)_ar@soec?d<#W?sv#Zr+k_dykmwqAqt>*OA-2E58LPeK*c8M|VNTBPoB{v-4>u|JEc+H`W9gW+!BBT{8W`=4;feH`SxP@7 z%=^qoiv-w^jYQR}+zzg5DOA2k105v1qmy%tDCTJ=>??~e{=55kMlZaI6mL#+XJw*K zu`VhJ)9I9WGwhI@SHUxm5gNI+w?|f1i1VL0Un^CuIRxDJ#~=#iX&h$H8IPj7M{CBI zna%w6hivT(8UALG)T|VD#HB#+(Ah`%J_+1?vn9QYY1YQLxH?0U!f+pOrlZCd7B29< z`Ql-YckV_z{9$S;Oh~*|GzB1la9I?H@TB)R&C<74CWO@vk>In}OgWQ2k>71nFJsOL z=1bFGwxoK5a-5P?H+%vD-*BE55s|^O*M;~q(z~!Ys#F=>EAww4zHfaM$msOT{s+?q zELv57m#x4SbGeyGVgOO<$91K(c)?!%doD0hsuqO05()knGg8vO7VdO|o$O(*(@Ke9H9NmJ~5 z@o?rS&{yeuSK!u-K)A95SLK_vXHbGwpjBNm9ZOvAomNhvg*d_|FgHr}>#r47hIB#a z1@as2n)(oB6q}y)C19Kxq#Ssr5!q|5N%5sE*=URP<`@)lNHAEEK)F0u~S5tvbF;V^#DW4Cl70LD(;lJ5rH# zOn&8AKd+B8iqFqzZPg^V|?#U+cl%C;{JktwU^8kw&*Q zgGHvK)o~?M^1~{3OQKmUBKdyOyK6IY??9X-h|A~Aui|-r&XkD?h-;et7L&0kRSL&F z<*4Q)%$}mMg0=&nl(SrQuy@ks;ZZbME9JR%OdZQx4YF#8F0%EIjZU679{;|G2flo+ z#+lB4s9y5YY2wuM{i+gneYgYG9Pgj3CzR$KH*_l*iYJLe^ps6icC zp?W_2Yb1tuy7L;CH@ZFdgv#MHvw1VMngi73;B{vXn(Y}NK_JV_!lBfhe!q@;p@xdI zHXnK-W4=J2p4Sul(peRx;|Kuw=MjKcQ=HmW+l5`-YW>?KXl{Gxrh4y?`bi@Hehcq1E2vo(J*yi}Ypxgc#5~bG^=cE8`NET;?Kmx8 zx>?e!tY_zj+j_*NWl$OVIJ0)@D2IX^Q zgUYbTC=dgn($KP0feC3_la!bm1^92ly?HEgQR(40Wfl-m$ivS6viLfmkoVTL=eZ|k zk^3L$#$A?0ztzn77}($G|5}v_?pA7yvY4XxSS74kCr*y7Mk&Jgh#waX#w%IH9cGlISed~V3Lju@}u7HVm<=g;Ly?UTg6 zUHOd>gY;&!UDj87TG*6o=A^Kj(;CHCCl|vxJpES*k{_Q4ddv%adh@&j;xTMsazjM@ z)KoGykN^9jx8#g`cmpTBQ&RZ6^glkk0@gE9KYXtOsyzS^q5vd7WESX`TF2I^6BfKZt?JH4l|PO0{mbN$1Qg;!kO6H6y-_l zv1D6@JaIkThv$EYpLI25Zckns>{Mo9`0(Ib!REs7wES@OFfrit*RUXz*W4|(A)R3N z{;zBF-h1-#fRsKq&3v#5T=|B|O3TQuvSZcy|{U168e- z{&xEow6&#E_-b{duLk0kS=gHn$K93h`WUVoNx!WLdY(JLC5o@7*qEJGtW$a)<(;fuWr`Ts|C+2z*{e9}(DUxIH|DMw zuk)-JUF!mAN{~QLDH%A(heiSc-R`Q2MX)@+5ptYt!<4v4ncc$VK^@o{Yv#ED%pF7M zVZ%Rbr&Yp?ttU}s*yYiv6p1+GP3*p=bgB$Z6}8m63ETm#E#@pl8Gu8gK~J79M^MX( zi0g)M3QqIYcRhMIiy_V2MJF)q*oN{PA2}JtjO8<@!r_4^S?~1R!H%g2Tq7NPm2AMqGxiQ%Rdi! z_ZP=8LOT|w8#a4?>kqyz>1Jb?39Y`e{CP`9dXUs-4iNw!#78VBj`Vk{&WSFKe{Z<) zoPdXPMmddDsRw{mhYU-ymsOM%9c&njYkH8MG(4V+R(UMW?7{Kwo=~4{u#-2YoKvB> z))O)}Dwrt-5Hu-i-7rkqjo;HD%z1vFdg$WhZlqne-P}+E4xV%YgO|EJY}hA1Uwa2@ z*^rdgSs=DM?%E#K#aF107XVm_jI0+Mv5KDwVwkExkyV@fu5MEaDX`+W9?3Qxf99U! zkD8iC)?6*C+;$(#m0>RpcE*>-R;##T)s(_ z5&v^s>6Ww2foA9Qw^+%?Kr5+xp*(R7d^zmAO}oy^J#ymW&qmem0oX+6)k%ZZgM``N zmUKdeg9Mh*4;ya%FP{HZ_Nt(cd1xCyuF)QX#y)kgYXx6fWNaV)^iIg&j+w>EfQJ=q zcHJUnq*;kgD*S;_4RGe|V}xO~3utj0;nIDD=xzkB=#V|p;i){rVs_R^>zF>`9c~vV z6NV3~R{O@BpQ-V#((b>Wg@$81E?~2Hg*f7RS%{pLkJA@wmww@4Jvy3NE@GvoR- zvG4HHJcx?NQM8#7X$@P@-o%@21w%=p)%iEwJV_&uP-0pw*`yuM`@wQDj=E{=1)P<8 ze$taW^6{-h5{|yI+t7DS=vkP@GGQGOz0x&$A3C*6(A}*R_P_(7b#}~rT=~2<6QzB{ z(Vx?Y(727SUCB`*>J6c>8X9Sb&U3RU0{P=rMzN}t0omEo?fWTmG!xePJ54UHQk z0ra+E*6AmhLzxK=e@}M5DMkGm=5->)$S^@u@v$9}-}3aWm}Eu!`72wY@;aA)U;^$! z)unr}Kb-pMLgP0@l$1_`Ge)~&jf~$5yZfke%(-1hQW-F3{vIYQBJ^T_~$9Q>1uhM!VJSn}-KlR_3Hx zLssB8<)_NGwP9Abd7?<}>p`ZSm52U-6xe%HIIAUEtN_G# z17!BvCl}M=^=q)Y;Q^q`VEn~r8xf!0kA(!cqP4@icQI_!fFzyfBl78fuceu=Iu(n;qlsc}*ekH}ciT<1z9r1NOKC^ZX_J=v=E-l4Y zN)hB!1OP0Hro2yhuGh`~y8oXWY-2uOgu&COV)EHMraM#3RHXH%S#AvLM%)tehZCPX z!;}wj(g9+DAm_0O2G+cIxG`e(&x?*x{BL65D_R@E!~r{%*Mp*}6Zqf=O!Lhm@y^vy zD^rlbt0oc}<_pOxso&mp?rxycPb*WU5!#n`ju}=A#0m#J2a!2*qpmo|o$nw#QSifh z?2t)N;~hTo_Z;B7!mTDo0v@8SZ zDos>@b44(QrS#g(+hosp|3v@tj=MLna)(Z(m_q_Y^`c=*nHSP0`+|fjp43c;mq71N zqW?!jW!&3bu^OYMSdeH5O?A1Yuq6PsI_hF0#`k#Iv*><_ zCMkpi)0JSE4U})J2f)O{Gf#7*c2DXHGSkd>iUrMPl3Seg)7MuNvf6Lc+Vh2v%CyXD zK9lqpcp6JMtaEXH?tWdHMnSzw$SFe(*M5O+E-iaEQd~3PHd7BGV`uVPg1)C>Bdz#j zXloW0Bo`QMzMP7YBQ=zsZSl7>9!0sdxV3<`4i$Zx9+}9=gRb(=2m%kXbI>D8G5jl~ z38Iq1_H)-ZK()oOn~oJ6Af(NAfVfR1p?#*Sp1*l28#BFmVJ#;mr9P$bj}tr1YWW^- zyTtRhar9g%)hi$KQgDHlfE}eCKD10+>@UzM)LaCWsj9=-&t=R@D-8`6SU!8w^5@!E z)D{8ESmLZf0dEJwy22J33z{M}ooysa@39}Tb>$WD?ZmF7fyBYaria#g7W*N*QjdKO zS_%r}`BbKh9Le&mfWfH;9vC2Z znmUoSt!jt4t$q6WP5vfwoYAfK-aV!MBdp@`7 zHUFJzSMvF>iaq>%v^^wq4m3-c12tH7z0fC8(ugU}kCW3n$#=N+APu7cJoWBry|hf+ zyJMb=92oc}vA=6dljiaGwCY9`IQq+zH6rEL|BUI8;C3s{PJ71CzTWib$y)D$A#`&KM|X)*B2h_$`rxKCd;EzyIl~wECy=-K=03M$ zq#b-g{AYeGTCja--SaV3O?_*?mx1S+@dkI$$}Cl%DMW2s6^{{B_OoB|$?JmWN)MH^ z3$$m3#-P@HRR6Q;_dTH`%*8~+T=05hdA;|YTz7Nswp@g?SWp6n^XM~HNPn{b%ZSzl zHYPHZ8wIGjOEscMoz3k{cov;}T2Lhtvao?)kKO6v{tcit*3vFev0Wd;zg!-`Za=fv z@H0vvWQ-r@EOz8l%Of}pZNc&8(VC>R98@3#lY!z6t0?*I*J!`<{)s=HjJPSa9bB`f z0H!?Fn3;O4kEZVD)GiJ%(RlL5uN4;jTxtRt5&{{lVD~@7_=|n4!u!a^1}`0+{E|=F zxiY{U`Ij3VO z@YJhcNxlIq7(XG_!a3faCo2$~@DDG8K-K$nMUaUY%$-wX5t0i|*V-OH_^Qx8aKTJf zFln%qZig43k-O{gJJLyY=?}!>piOwe0YY0JnvgEv8&yO~oB2=x?`ZpU6}C6wLiT*@ z9rMY-l4xFib(xgAcg|0yCA_u*`7F>Eb|t*SdAlR0 zYN-okj|I6)jhfe1#gzEE#?DZ{p5!1Cy`6re+jUG|eThvOD9r-+q+5SX7uz;u+Tub4#BTf=)IqE3Ne;49Y8u znx>N;_w7F|a*+%taqDI%G!tq-@qx+fiWJd(CSj`|&g5Km!&w0!691y;bra!KF!@(E zZE|jLcqM=?%Ic7mRWo|TOFhV~ZdP*J%*^!RvyrOB-9RMT1F>s8-VZ9A%Z1EnzLAM>rHab#AM2gwhHR48M}vdzX`5-Nj=a)(H7mAAKzHUu zbhlF}Kb8#j&&!~X9=TB&UrF9VyTH?kN8SP2+Kf#xAj9?LBd;$blI9NXW=4g%28Yf> zG@!n9T+N{K@2#5d$3?vQR9h-EoIjt}8kFzcjXMoC5YHb&|Jk|kQfudL)2Ybi-e^`c z2|}I+EmGadCaD3e#utdbDDPKBC29fVbUo;ej~ud)4=!y+J7ms;&xiI|{?zeJA2l&r zY8!VFcyx~MvHFvG(coglZ=6eyxi^@;ASB}9?)s-R@;85fO><*i-ms>b<;wGBPU2+U zcan+HM&KhQeC|PM94nHRzaVkhCh)OmA0JJ4caAfp2up}|JW-P2rJJ}~GuqO&`}5l@ zTdxR}yE<<^4rPQq4AG<4lRw8udKO z@{9O>LW_`%ECU@~hwhuZpS#kFZ>U}Ib zD_Rv?qS#Uv`0Xr}nousjKl^v_soZRGkC}vyGG~teMXlU*%oO8OH;)%)$w%z*DI!Nd zmw7+2`-VU%daIE0Ctna<(D=;xlbX@y1@Fl}{|!Jfl0N`^Gvs~-k5AEAeY5)|Cp_Cn z;|k~htt;_X1Ns{n^jFFrtOk@y@|%$)M@#W`ZIScn<(p> z==ts{Ga1is4)TzLZColm5QW*ZPXXSecLv6UVBuzck;dg7a6yiLZa)RJX=71%iZuMu zm}ykEy~zh*xJ^4q)@3`E()}=S=dk|%JN_O4Zo&M2hB&Kie+zV@UOrzGa;sVH zS?Y=cyHpeuE*`i-&B%~ya?2xhZ1i=r55nxT4h43yq#s^c@TTp-<)XesW2Tq)Y22Zj z&MgE@)mlk1X`l6jH^0eeqQ=LYz2LHqWXw?R#q4+D+w9lUOdcnx5ccIQ_8L*1q-1%GZ%k8z1+`ub@SPKf>oE zH^uJs&25{)eAxUrA~|RaAENnD^RS*X>u0#8P&rCBw&8_I)z4}8msPEeZzpY@P3(=e z8f;<~69k|DOsXIbB$-$f1B@KCAyc;gS~93!6(`1B-}su5h4rr}{j@Q8<;rXEI#%;q z+b7^1m!ADsUS98*UsqI+8i-YCbq(^{BilatH2D*&BP0&=1K*%+P7CO1xwfDYhszv2 z6xD{A?mXAyoZBLPooX*T)PqiqB=NQMp;AP(-7dJ7^!BXb2qAji$z+fns8wwuQEhNvZf2DXt2nU z24xJrs&eJcd~kX5OUYS~|56RPpBNz~4$R6m{)G3wm3jwx zTti9LWIcSytw!)zGZ%weH2wWXh_npvH8XXqaH0E#{jwxZ2&mR5=SanUj2MK7d(<*gv2?rDC&3eT;AelMo zk}bh{_+{)8ltMqS{O(X^Qr6b{I+lPI{{9t1-Ct|!UXediVY-i8n0hZeFlXak)z~oc zNezp7xO=)Vf`4Tx5#y#)MKiA9ZqUEE1IcJ?@0%RdQ>anz2PPGsfNj?$c5Xjh=n%|0 zc$KMStJ7pr%Ndz10&V7ow`#Wb<1p_bM|g;Kcl8*S)}fJB4UVX^#4iiKPs{$ZRQKHmlkiRuRS;+4F0os=ak3xDD`GrnPb<5#&~-5 zjyqX*R@zsJMnye^IK|F`CH)I9sao&+*qqE0F8^3vd+t+FOYRH>8o_17-GH^h_}Dw+ zI5NdN=j6El2^8mn99al4`MRX>M7Ikjy~+qsoXbE4!v(KvHAb4HG+Il)+k#=*J(T25 zIBcnf8lJj$<^C%Gv&YyzP#>PYQ``koKs+MzMYo4`|_?OD{HGg>|F zbao?_zVa`E>3iC?OQf1zdtJ5Xqwbp$Af6-TN8xKY%!CXfiJ;+` zCRhAZl(^${_e|w-{%PAHXB1)PG=uI03C`c$IV_j0b!BIzR!3Lrt5L5swRiS)58NJ3 z7Hc3)UA>>STVudUUYwM#&I(j;dG-YiR&Y>7H6dS247qGMg}-zw6laU!!+~B+&!AIY z;VF39RirnQAVG`4ng=JC+;)ocIr%fQa!+><$QD!;#_Kx~ROxP)9fGpzko2S!BXyiNJ3KoexY5WBFQ zA}77Rf_n+T;L6=wAMd2s1}}O`jRYxO7Z**If~O%$qMFZqvL6J@#g8d3AzWV*9x<$! zh%qab>k!6yJJVt*hDVXd;duMVIo<7f{Q89@Ehs>ru&UFIp8r0dh@g|s@kt}Sj#Zy z0O9a5IZ0ebms@ehn5mKR@%Sznh6aLWF>SOAOz-&dqW>b-?n5peS@A z2mGnE>!D z_&l^y^Gz&MzdXiFp|q+|d{LQ<=6V_k1ZnFu;fg$g3T7mP#>;P-IiH0CS%=TL*8qL` zgMGuXs37g6uFk?>p~Io~F!@#WljN~m<*>A2caO@YK>Rt;p-7y)LMTnJ+Kl4X*&)4m z1T(OQUd^{w$|SkwIYJ0X_-7`+7Dimxr29Rrt-(;dr{%u@EfU`vO# z39)ut>drWd$%c_H66f2QJ_D`!%l4OVwVR*i^N{e+#?tUxlpzWo9fcg87S&HPyVr)9 z?YUdD^ja`wnsD1z4eX+-Uz$G1>nNZw@2ljE>d57cs}kI0GzpJpU)kQBjm-+_>79@( zx5(BsL1t^4nEhrHC>dQuC^9N#U(LxW5=4`2?KoN�KH3co%U=0&CZ_JFD4iMV)^V zVW{B5qL}$DB^?z(PyHP7cKhPvY(K^a$d(0*A~DLvpiqqL&(veCqA=E-yu`+HWI`N`5xwb6HRH*!+_bsk^8oR>)hh*q1%|1jjc*>7yb-MB04dmD?(>@rpx2k*X)x;*L za7Q($`Tn6O`@Q1}@VVv{DIKrq@U#a3*N-gN!CqhC$3b}WJ;l*a*KU6N)Re|?`DD6` zi9d=xb;fd*h;@d4x*_yJRyO#OR0o~_TgR79^CI7DUw2E|&M(q7{Jz$A$B)@(CiG)z ze%&7AFgX3n1Vsk1NS%u^AZOkmiHtc)K5Bh}Wd9Y)8I#g`$=AM96`yqfK5|^drxA`~X>=*Az82LW}SN z5(RKIF)s~IQ-u8BBfb%@<1CBxjc6h~eejcrK%??vRCP7-)<_)Zxk`bpAUZyXAouBwMWbz#C^H`Py zm4);``GOul6>a{)OwRxk|C`31zR<9Md*=Jo1q$E~ZaAVKT=w8Zyrz}a_T3rW27 zJgRO}uxp+-sj|`IX0n^C=8T*yGB}-6d#AdphgVOS8QV0Oz2-bPzPkqMin^v4#pn+( zh+s{?! zD^=D{4Vq=$8fzYDsq#JloE5l~{K0x`z<*nxR=KbVsCRwlFKIA3% z&X;yLZMt_2XKqf~=Vf09l5GVe6PlrlN)@S4fJo8tBqFtE6$52;&z%xJKBzUcZv3!= zwVpuXxq4bRM08?=*>0-)Na*zTUf*$D`JmO$4UASq`am*FS{qnwf&RtF* z_D)_q_HQ+9#eLxAosUSW$jmKE(_X8%e_y&=D`6tFm>TK$3^QV_oaNwEdUxQskQ5TZfvJw< zi%vVMH_?_H^Lpu0$Q*GQ2XMEI;QZuvmnWfC+D;Y+G>bXPTRWe-657KqK0c;Y4~-2a z*-d+ar(k7Tfpwfu_Xtui62)pF@EOV;|lwzCSV{?Ex5kHk1v3_2>EYfG2W zp~h7AIFv=xJN>=JZLFZ_`KB(TZ$FcRbTA2Pj+DUMf4cjNr0$4($WM+;k5Kc8Pu#Ts zA|@_}>&Gdm>}@(ed*=L(;hsc*(kuailDes1MyG=5<-pAH#{c+bT@}T}>R`JdX~sb7 zVwW~gi0~FGc`pW}+cF=xq`U177P$Je;V+J9zI&;vYZGQxNoYPsvmJKLo>Kt z8hWF#pmMrm_W4ip9y*%VDwDe0y4_XzAMTj)rJeRnDw;^JvRzB&ZmdONFJYfkg;aN? zpwA;f>Ft{G8;;{(q^zb-ID1aJJlAjCPQ}qLFfD+9w8SBeYuO&x-o+WOgDDpxNHJpF|UB8TD zLuSW}nIir^%7?rop}@g%Q40B_gQ?@y$ehy7p$NA*E%V8Pnnwx63M`{(^V6sBuWTL= zH&Q5jC|TJHPC||rxzHqC+Z+zz;syGRedVYK9#?ttq9|GuQ<%nHQTu`z+8R*JQM7$_ z@sH?t{e{=F4q+QKMa8hQU!__`p+|nR#OF6BaD}Pg`-<5HRxfYHl+U$4eY$wes)BemvVdY7D> zbuzI%hWEV@^C*b@DIbt@R+|CzlwB9&+`lJJYqrijPj+U;3-QMQRk23J$>0xkT>LVa z3m4C<1!dH$H`RO^!>t&`)&esL$UAt$v`BpV=nNx_NNQny=1?oa3~^{=n&GicV>oSo zgR?oyNf8R?oYr>KTB@r?PMuuLKdJY4*Ej-t?aVcz3+-pCfI2{Qo`g zk%1!b|LgE-6J-ds%#@4p{Qr*Qw=i$~f5)9@zWnUlOUlKG2!s@R|6Jbq7*O_G_)f*; z`);}Z;X>R4Z9e*)a_*}Pt_DKS1~e18Pdb<6?7=8B|(oj>rz%z3Y;*$of@4E!H(Xd?yz| zkUy-OOrv)*5pQb$@C3OFK(Pv@=fg6@9}&~hM3g%O8>M6Y2Sh-^_!O|xquu%J0aM#& zC;tL)(UQ-LkM6|L`@+Z7D5Rdk#`4BV1O>M9IG>6zX}$I-Mb%LIiIE|wxhhG|C?zY#s?ZM=pWARk-#hY6!%+>T3sYUQ>@ySN?w0(Oi*Tt z(ofOx7|rN1_^0W~a3+_CT~g?FQ|L^pemJ317`bg$4la2jqxIp<5|U|0&930@Z3N?h z>RG~-1U2h4d4a%IO8g+VlZ0wT5N+V^d-e& zX$N{$1G=tz-r046N34GpkI8l%!wU_2ZW0^cNZ7i8l(i3dIBLH+l2nzi(htf=0a#5< zD9>fV$P++nLv`^HUuhX-b1P}Ez$i_Rsxm2uC0Nm;$#eF5C;I~XvH#tZI!Wj9%9JMt zvqdjTj_P7Lt=KTE{7n_h`NX8P9qB+S^;#NdBtVCnkZ;7}aumW|Oc%D5ony90%3k}Y zZM!zhE5KOt$vyu3Y|<&dQ&`&V>cK0=$$3C_!A7UzYp;qbYr!mNW0)JJflIi3Wo-xv zTWn2>z7&b)11~CQLw6pgdxm6MxTk#U=dH?)ZHc0ulqbi@?dMW( zsRyjWEJMRG<$?{xj_Y{b=2;G9DQ+~*=2lgrZuljmh-#*B>mFak-chi@l>rzn81QH7 z6v!0&ASLY?gjvy&`xXiPK2ISpE#vkhqa;g)nGX_@gSG1?_yzKoSDo6bhi$B$GQ-sg zmE(9`74lZH+*;G(nUHzqO(+f4UePuLt><&<>-@=Wp}y5vjk8@00nDDCFCn2X| zZ)J?iS!F10$~dh)*uNpxdqby9`YQ>dzTDt_iu zKU}8>U97HnbWsKpnnxG=O9pruzUxZH!cQ$e+prlQ0(&&<@-`hw zR~_ALbKcx)F)3#>o57U?p_heOYO-$e#}6BT3ol zru=YbF?W2qL(XjoC2PoiHg4NcUmRbF4pHRTpfDO=H#f4>n*=<$lmblMJpIL03k#N8 z`;-*gD9DgKli3uS^Wogvxbc{Y7GOqoIU}Y?P8)a&F;v|mVR>0T*Y799fj2ym;lY=j zD(`RAf`Lr8ifDi8S2YoUciV%(M|DElbG=$g$GN*FBb+7bQS__LBuK05$sfGqs*VYj zsjdynv~#!iRX=!pStZW7rzE@`kUSOCVVKBqEz^%`%q@gs#Dj4p8gKm}%sFYIJj&@| zZj7vl_+V7_95`8bP(Y(GLY>8}vwAdxQCErQ&ZgZ~fK^CQlzn67?fmT$;9L+|>zkvM zvFFXO-otmCKkQCF5@x>kZ#7H={BE*_j4yctm_f+A&a+v2?NH7*9!Yoa(!KHd!wmNF zk*oF~CHf=TODPeMsT<%N$QXpLoxsygsPgA0U zZJqckef@{GFp?Q3CJC{Se$Rgi-1BAT{3C7w)7J>O$=I>mGa&Qpk)a8h)&eO@DG2Uk zrFdqfSPwZF^}~NrQgWQ}wpqoLz>o~aswrhb8}L5+TVTL|czx<`+?zeU+amA7ABs9V zPS>r2SAShLkvR2aJ8X}h>-VW~Cz@~kp8!Y$xBL`zY)F4P@BaW$!&A(l;>xHH8{Kk( zT9l2{cH06OG<85hrxsDs@=1;v(q%NL;>)(30Qj3mUEC8Ml!DzSi!FL}U-&xui5&JQ;5NXDjtE1~b7Zo~oB$7{!Hqv@Y@o{O= zu>-4(H(&O+C)o{8N#jjl?{RR^?f$x7@o__<+5K$4;$voyr)RB~{7e@bAQ;l5OD|BT z=`l}Y+5K$4;@~f_nhuW|U-5BIW7vONFZh_Z*!CaR%l;-dXvh3X5B;VK4H&64uM$q5 zr?Ui&D%G}}&*iUw7B-YyP-q>-&~EB{2VFfGdMNMYv%lTNrUOswB>8*z*#Q&;3QHiM zJwlI*pH4}w-nL$vsr@IGG{+Cwjf0_h($nbiF>~pdk=MqT{9FcYB}m#UEWhGoZjYz+ zvi|^yi$;%U^|Jo}iHy2Vy879F#Kz4hNhj9J(totYqerv)*?+{vqerv)>3_w*rizs4 zq_U6uTv~KnI(=-v;$WX*+C@EVzvAM)$G7^~f5gB8NziNSW&Z#Z1dS%s)8fniCMq7M(uLoww@w_zl3?wWoc{2{_oRH+x2|> z8fp4XPIuZz&&8)tvbEoSpQnRZI)7Ow^zm=A?DAg!0Mo@ro5}T(b#!*|Yw2na>m>f3 zE9|1XKC(`)pVQ}HU8M$sg0e}`OMCdbJpc##*+~2iEiu75UkP2Pm?=~Jgp-?V=4;E}?E2lS7e9WAWwo}D_y8e#V2iN-@t*0!g_PoA`O za6yqAbzgosT(tLg>2go!8Vh>>P}C^hj#e=H7Ljqk!)_!SD;VsR5Y;HI4; ziOCkYM>qdufpN#tW&FkK^wKX*oj!B!zg*nhI#WWs#Ze*sUqAUj@8c$9Wp-xxyKld9 zCJ^%WVH#J|-#9vOfET8BcK+Uzrvh%@NsG67e{S^drN4K0;Mzog@5$>o|GDxE?(R$eEoJ)nw)qv2$yUU?iaUgrU~lsfo+yUy=2%WnrYzOg%Kax@svZ`s9hq1ZD!eu3}`&z}>gC7w03 zqrVvLup3{Gqi~n~%_ZoE3Lj_2FC-rPSeyFl9A&Fq&-828ZQ|V)7RmJZkH#a#4}MEt zujmJ?`pk=Ah0fc%as~f7XEZaf&A`W(Fqsl=d)9_OzwVHYNx#|s@0R|b zrUudwph!_X*Z)6m^It!%1DyVup!1x{->J{&rPE{ra)=L>RsZ~S+U;|ahf_pw8V_Xe zbr-%%zqG^KbEZ9(E65BUdUZ?S?bf!`Mnss5rR99M&?nDdA9$|tBa`mP%4ZckS+_NG ztkQrh7dZ`Ucqj0<9m^*4tNyA=DI2 zTS`q@CGF5_zdm4P6_Dgmo0mQhB4q~o9eP3$k1kdS5(C1MW)GP9`Yv}Vm*n@Iq)``l zk=ahBPnmz2_YeI5x}yd8VcPuNqXG`v&!_&T(O)#b4>%q37xT!!xdA%`;B?UR8-Mnv z|L8aWy#62I|6_XpEnNP@=zkpVAK3YiAokB8{2$Q$2Xy}d-G4y$_uTS7P5J)`IsZS0 zoHRC)ash>ZQ=RrSpOLSvZn50KnQfVfz~@zA?@b<47l^`*y((6Qk1P+SXNdf6PJ;LK z#E-V{!J7E35~o`HEQe$qHMX*o&$LB_&v|9Ub8}>DdZ<*2I@3U2#$D>7Fm)ZR9+}Bu zF0!xq5tw`8X8SM~q_k7_)>hh+y;Z23lGX*$z(=psDm!Y=8)D~*&WW%9ouf9d+o zM*)q#L@j6+otcEMNa03U`K@!dSf(ibatJ}^TRIc;^O{rbyg=yUhkl2Kv zld^dME{93qQIyO;*VU-HtteN2`a9YK{(f2i7EpCMpN1;TzLlfr{dC4TN`J3i#fHh& z@Z=JMWS^?ljnp!4v=zUQh?EOUZio#C4L8jQ2z{|k@+bJY;uNsyv|;73HD_~<+lOr5 z1B=|duM$c^&->!cyxK}&sP!Hps6 z(qAvOsZcSKGT%?T0^Envis{205Zh@b-II%)Wq#sl*1TX8FMamX2!nOn6Gmm2vC{t8Ggoso*^$$^{-I8QF@FRn9wS8QYObX5_OQfai^%b{y=q;fIkreAwB zJ1Mb|?J!9JMMrS&R}-=whwo&*2_MyO-SF_AgBVf{Y~#Ikx`V^)q}+X9F`T=o#Yqp! zi|;vg?!26|gT@69r~*y6(aU25lZKta3b8W_oC>hJJi>r@bhuJ^#rzW~$Yr}{7L@oJr$Vcrg@KxQ>g6wCdQ z9qA(+VNl>0)H?!1q%01ID7dcRC--nH%i{$gqOMdV*=lkWK0&4zDx%XxrQP?fi*7X( zfm#hb%vNB!sx3D^#a9?yzU2Syz20tuJpuO^4Zj%e`zQi^n%^$@%1GMb?)4k7NmpLi z+Wzr90gQm7D~N_nSd=`YzmpYjML%HlZ<^hA*`MFV%}A7ACdb)<*QztKqhpe#$myEg zR6nmPQrdjlv8NT|aWAoJZ|ayJTTiJ3Z&ZsY{<_ln$`?rUv@aYWN>;Do-?-Id%46@` zO2mvNqK^;EwH+2~nz5f$cjvEc`rJixtsgx(u+ofXhS#Gym|!onBqjatDQ-b#4-_n~ zpu6}b8<|82uWuEnP}=TzN~)ixefN_LeG*RCrgC2BlXFzSwujq^U;RA5bA@JF1QW#0 zzj|0>a#wJ0$N7IN>Nv|UW9#v^11sx~8a>&dDV;@?6IRSh`8=&pC;)GOm}$`Q)V7#- z%GLPiwiQ(qq+gwVyOY6^en<~?zuj>bGkl6sX=PooQ&jPHR(pn=Ru+&udGObzgdns1 zaxv4T_i_oJynfXR0i&1NRBSAhyv?BsHtaNiWOJp909g$vx}R#T9M;fvg~_f&!A*A7VN#>w)oXWZDc~DuIAOKz?O(!pB`xjHCw(tEN#GH$@|$D7`nh;J~`)YI_0C_bhg#t--Lq9C3yZ0!A_y)e4515m`0*S+K<)WC!r zpO+SdqGr0R$Sk)tA1#ksWT6cW6U;D& zP>4YlP_UNZV@vzFc(Mi{C}llg)v!kD2s?9rSb4!O^Z3*8q?rl!O*Q9@F7bCu#4J>^ zFjvgmE`>etuM;%+DhQuz{cInUv7MMX+epK|jTt#O>|b9iySEmdfi%fBsN~E0@DXI; zf9TR{!X3N$+nK%s-a>U(z&Yyel^}&bplI_G@=x-2<1?wT`b)&dfbl(bzwvL3FQa)K zJ`4+dEq~F?0oP#V$nEs(h-JFK2-cl&utpbs*piu%DHH~&qZ6fiM`!T3!Zy>x3jYHh znOZR*C{;x(I=;}(EA?ydN0Bd(S&YTC?y*S&F=Mbl3hDh%AsvEVG{la=_4w4$Aw~gz z(C_i5%UGXd9~IWxKKRlhW;R=oeT}`Z(hYNYlgLfC`ktWaE@q?7rYYWYzE-%>-^F1h zcY4o;$hc%(PB{c$RmQMxqa=m3+G4`?{6;$8WuymLuRKAhP>D%e$R8W=&&4Zw08oBF zp7klNR5C7VWvlS1Yu~(j{|}!z_JLO_Kbmug`aeC9o&!jNTEn7| zS?gxzZ;#*ba}n+f0Q~o`r=w&ewhA2D#!@huS*XZwO}IbV)cNw}MTt^pS&uJ-W2Sq_ zQ6!_6LCOA8c2lkGTqQwPwTC$tJc;rZa!NGCngs*`A;~wW#UkZRN|#EeU7ab4e!Vzt z{ij*eWg@T4GACS&Qusr>k&Xo-vAKvhQ}?*B62hD*8R%Luawq>0=YCh>6V{6>2U0lj z79&^1rrA11U4e#pw5uV!$1`uyVEvWmJWs=VLt69u-B}_o&Jpj@+dHlq-871t%`xJ^ zI={?^Hyn^lzyQzesB@G?#n%4{J=s7MX4B?eK0;Rx+NR__u@k#}_;G`ZxNA4e{MUywiY~THU#QD$dPvv#n?#-h z61hs9=tB~fG^6SCTifth_FDIvX%)z>mxEpdb_`Pn?AR_u`L8X8l2mwWfh8(cF`9#5 zKZecho5^=uhhPCB!6p5*3sV82Z1Dz@lyn%@z7ks_pV=%H2{QG*Ut3*e2TZo7nKUsL znH3(4`h$T;r`-W8glzrUjyaf1FeuqHBe9n%{&yYx@gR3udi+^w4M&s3(V=3~_C*Mz zNP~AwAQK04=%juVM|88{dA2oE8CRbt4gpn4@4%>Zc_L$P)NhTkw$K+yMzPXbaZM=V z86nTHcm7&PR|2t3OX=<_)QD}Kb`L4ZpyE)?AJ6&wQNl$6P>tV3P!L;Yqq&HN3$G5f|qD zR4})3%~Vc}^+;1|lcdLkdZ0Ax75DKBzR7ndk=pOwy3bas1T`KTg%jR6gbM2xr6E0C zg01cIhOC~2^!x9de-PE23VeGt=qa>hZn&OXKQSpYZwmF8T>BDBaxl68M2eO=7_TNn zM3esK9fYu1m|=b#^1b*G8reS^FY5ZPMOf!bX<<7b^&rB{s{@Di6J3w zIe}c!B8y~h(vdB{`6oW8=QgOnFiCvTw7)=W&u&%?5^2aQY&KkA>3U`!H4{$Db2w z+fZw!GMOC00Img+u4U5)9=(d;`JFS!z5J`%L(*RzKH~bdy}`9E>nn{*i`>bVhg&X4 z?KbXavyoizdfK};QitviglCw^)#p*guyRG2A-+{D7^mV=&_@2c$a?>0w+;_StsE;G z{M|k!qKcYoICWLDW)K?Vslg@N<22<eS};&!UyWveYQS6@wUFMgHC!(=Z;wdy!F6qQe39wU$CGj`-m_61)_@^f4f9nx*x zd$k#z!-tNLb6L(*8<=ZSdKER8e{sMVD9FL6dtd@6;!V#t6{?-{aY+IWb{!|NMp~O- z#a=7M#p+(cAoKp9|xU#!H`!a`7EC!p(SNN`2U|yCKd|;;gf@kx9`U^WrC=I^o8r>WG9#g1a&o<|pJT zdwl&gxe)hwLmOiAFR1gW4=P;!Ept1**3U)S`Z-n17%6B%RBSLxH(&A~Wt(qtZn4+5 zp0KIGANvT&@2qH7w9i;V`Keoe=xfL@q^7xyG?hg9gw5n2tnu@jxsfn0=Cn3B5H3fLy zB0vxPcQ$!+#G z2}$~Ob^#8k!fi_UN!E1b{An;XlU(nFocgCkv^{BqGlRz``M$Q^JUnuxQfsV!G}T)N z?AQ7i^qq9t{Q$GDNea}ezR478Ov(g_ivtZU5uG@hM|E|q$jIWzU4ISqWe9giF=)qa z>Q&L@$;*dgx<7WqMUi#4nL`f!R($3z^Vw>}R3{yT!A6O=!u);E-7U5y(!Lm|e<+`a z`1NJXEe}{Pp$V1-(#_Pezp;?)7?eLuY;?;s|HZ%?T}D3R)$2ZP+z)sS#^YCDh@h-huM!;h&z?6bJ5~ajPVhiHjIc1uThV+ zEIxeAG7a_jgPjoMQoso+UK+UdIgwcS0%{s>l2~zO}CGHy^nT{oQ9)|t&+h0hqH7% z5UjfP)o$N@zaUOlY{a91C7Zlw~xW4M?tw@AA%;cbppZeX!>uoIbpopZI zI-Y!R;Y;L@I4v<1$ z?+G~u5CK@B75f2P7ng@)X0xQPuCBO8|L2j$j}2H&|2@Kalij$5r7rxtrU~ehnr8P` zS_P{7&lEdZ+GDl~eQXWr!R*xOD<{Bkf@o#*&vCmbr}mY>Y_&>z2V~P;>4^Vsp78cc zf+r3M6^=XbUWZY*Os0les!7lscsK;bxQHpziFvdht~b%i?<%!p6ls;GbVzK>fh6IG1>|RCQ9D&3=JW1o3`L+8V0yT|}wd=mi-q zyAg<9;dx$<{0eWZ0uAOlI(Yf!`^|dTgm@9rguWV!jge|;x5T;^y;sO|3@>*^Cv+W0DvbtxiBATmpa@tPqu#BkGHqR=V$gF)V7j z5kr!2kX|ky$!-3M3!_j_H5{3x>78Vn(bxIm-mz~TFX+#nVKFnBH_GlTcTrjK+t8}_ zbOm#oxLPWw2n*@USIBI(!vrYK+uGQK-Y7CN-6-@bbvT$1-t3qfsV^+ASe7YXn`y+f z;p)nKRYsc>bQZX_$2u0l1}M-ojpF_45! zA=$}o3PeZ56h2R4l%)zjgDCN-tX2!9EVqyWgNyAq29V2L@zqueg$G+$;r(f-CpLk_ zY`!W)-MMIFsLr&%k<(@y2HL@91ul*;hpG;il&PxpO0K4RV%I`h-QVRvyWSG?o9-6O z$?in-Ti04Pscll5%E>1_bA^HY(h^xiq9xDe|8f)=0SgCbA$JL07CE(q)Vq_)`*{l? z1e$X2hA|>0H}*bekivPJ3QwkPp&4e`7Xl}VW?STohO1!eklN}9%)Hgn2LsdgbWUHL zCt-|6=275hJQ&1s>99siZi@v>hUw+vUrgoEUp@n;-SaHOs7Un-id}<4X$nkRYlgO1 zX1%L|6K@95j^4<1enaGOghkbt36mn8?>U(>RO2!j zDqL^>)OivhBTVR|Qjob31}4I~(<_OlEgzluY$qZ|8h2ox-uNmlHK$tF2(o@5`8iy% z5uV6Qtvj2H(cjU_-We=tAg{aPIca0}y%eHh`hRq+tGs)^EY7v%Y(~}Sg?gapa zO;PLq1we{V7XPaFAvt47)UrWf#FjgHBTT}JS;tMa9>XTdcA;jZNKkk8lZ5W>+S-?@ zfm|-!2*YB^z6(yq$IEv8DTr=@Sks`ymt1|ri)7wD7#=u4V&AYFAl#QK-u1+uhTqG0 z#TWa8Q>meOuryuFU{>acB{a*2@H6rzD2J(1Mq5-UB+zRa+sc1|j-yDgq!|X{j3@0M z#sq%$6{~JBn25n7s7{Kdz*wxwE*5`1M+I>Ik)rpVqs;RYXP8yK-jQYV-e}pVC{EoJ zQ}22}Jtu%#V%7Z*Ra{>_!JX$c|mxqJ)X8Iwdxt}>`G@0pztg=7qglsvgG zl>tyau&tL?t7{cZfT@R7tZbDN>#L@u+Dqk~jef9;hlkYUKF(^06;NEZ_1B2@`hq_?f>wBMI>X0p5K z(c$pX>cO}PD;}X(v>Hg%$@P3aj>z9(KK-7NPelE`;qrS#>FnBE#<=m)CsB8cDYemt z`ovX#TeVs@H|3DAZ+W^(An!n9F1~Gyn-gYWYDDh~N%O{4pY*haBZF%*wp66>3EnBA z#UZDqXwwW=4euKtm4UpgHWisw^O(jts_M-SoCFAp6k(Y6ZtWDX`mV=(X03(s9y=Sq zg8-|}{mDAqj@b^PPtkLh8TK``1TGEZm^3VwU*Cws?f`l`e5psjGgzv1AS~)M6%R0JmTU|N2O99a1v8nyONVqOsy5q8#qFC`< z{t!1XL{*f>GG zU2$;b6bi8ve|F63!$f;WJB;LqfvH*{D8h|V_SzM@`aorjY5D&)w4Y_FNNH(xgj|t3 znA*BHWx2nSUUV2^{k~grqjQ8-K{AH@mhBo~ZR!6HYrArB3l73rJP7ovf^|vxIPR%! zC^)^bO1MRoTbn!wts8Q zoPk(Q!!4D-`Z5o=zh+$&r~rgyF%^+ym{sKM&{xHIO|lL&r$QUU?b>N(wx^pCob}Gx z-5*2EG$ZRA73myxI!w2AjM0||s^HBbaZ(TsC;`Ih%tG4(`|JZe>dH68w@IZ4t!X z`SKoMMkaJb^v%fD5HRF2+bBRq^WA~Tz+P9J#Mxs)r!7z#zcbx#5TsUR|9wARBtOp+ zP6!S@XkKN)na*ZU)w$CJW?7OX`(yBR1C`A?>P}`waV|N{+3!ZDVzGK0M)OcsSJ+^TuqvoTt&`3mysB)*N%l zjgl_`Jh`&}i`5v=0wjX=MBCS!8w@stY<#J7`;1<1PH8>J0{G`qfYBk_+0fBk-~@BM zG3TbyRd!YQay)#03{0obP*N=5C(<9PE0NMAOEpNUeMcOb&b|yuhH2L|Qn3)zUh6yT zua-ajk)`I+QV>jFaYVV{3!zPTKe>E}M&X=k97Ugdp?_1(5Tw-n zpx4tGew1xe?;-2sb5PpXZTo{i(ZS04$M*!PfOICH{s&C}B>oJ0mr7S^kmGDw=Xfq{ zAfz+fa4)7Y)52GHh*Vimtu@ScowPGOG0B|Q*f~nqPpAIQ?=4aKa6uq@os|QE8JjcN zs(rL`=^0<$j$1TiVKHmNj|hYL>EFFu0;u>;n!Dd+R2bypMUc#`*39t+hy*e9K_hJ{n~suz zIC@e_slG4G>Q*ln6Q5+mROx4^m9N0)v4p?3f>FgpW)C?fx1;uQ-DTt`F@sCCzJY zN)h%Je-=?Ggb`2r{ciyvXEL6eI`lnUXTQE~OVqAX4D{%aS$=8vb1U}IlpFwy@L9XQ zI%3*}XjSTFQqbX1(;jLbp3-=J?-S74f_-GhL|kdD$<^zB!;RI~xSzd{6VFNz$^9>) z`JGEIqhEXFxWXpc8S2rRw4|#<%2nOK6zC%qk>uj|;V|yC)bPY%f3NWxPeW71Ofjs9 zvQCxU1h*}*Z+1|Y(Cgy+Ra;)vQaM<(%*YM>7ToV+(7{PNu;4FwJLhWoN0)WStNd|G z%2{4^mMaq5G7y-60cz8PYx%-Q(Wv5m*7~l`#l~Edgc|Y+`GTebb#8S7qmlmOvq5x)u!OAIirft3R-gO7`DUvPc#(#_tcmJ!n zbi>oAH=#-eTGS}Z*IBfllTpq-^i=t9-xopWXvq37@hGLzslJ6s_?kqSz#IjbUERUE zT+D$(NC7>3ab1hNZz^ynWea%UyvE}FMVF#Q)gg!lf;IQ zg0H5ID4Q^w#sVuxlp%(wSx_xlO`@D%l$fRRc59P|rofj}Iu2ZaTupeZUEvO`!g$A)C)TNLs7zU%Ggxp1}V0Zo{tSiL8OgbiDds0eO) z(UgwRm@l9xto3sv#{?MY@dW&YOyjq)Nm|e0fs zRs&Pq+mr(8%k>6T7RJHsJwgIRAlc0xL(e&#aRCb*yU7OT+4?$El9aOg`#5KTaR7wE z^`#vE>cqF4bZO)w6tTB(@HD;YyO_AX!IFNL$wiyyFp#Rl9pkfp{QuIv3;4|V#1Y7$ zU^_0pJ3{)*6Ete8oEzUizpa}F384!-pPg^x;k^!KoKBG=;_|x0e^h?C_&$C}3WgMJmj~;a`%eASbKqQvI8)}U}&*L`w6=a54mvV?3l&4W& zoM_)i4ONH?6*_UY?-ZY^pE_pt)z#p@)Mge%fv=M0nN{n-ixo9H>nMn?&5%{t4Ev}J zIzqq)S)%XOM-Apu5Y4V&V`i6CGCA4j*o=X=H?dhHY|t&Zg=cM?(9FJEr3^Sk14Ey9 z>cTe;EOtxA1H<8Vx_35eY`PS~l%nYR9n}}KxxUKA{>GwV`)Zu36>_z+BaM*4_>J=Z}HO%!E6z%TNWstblGH-tEd2c1uC6 zk|b~IU#>9n_}QINC~}Uq@QeFAZOM>;_QJl$rH^S&vd3g7zXsYC$$?V*A9#RQobc<@&0Ik2V zSF~4G*2<5^9qamJL8bL01g)vrVfVmiGhttwBw2iIkkn-daXZE@e@ z^=G~|EAsR{nkQ0{N{W4|vM9E!@Io2^wBsTXgegG6^N{3hO40|3>$Y*^?5QR9NDI+m}#2v=K+bIasZ~2x$sk+d4zVf<4!fI z1;R9V6u*I{;j){PCWNTensS&DoGNk4*5(m?sRcZ$^az6`-K0%4@&@BXVPC{h=<3R> z1yGMsV#ci7;@tDOfz3tHoW(&rYIpxFjBkGe^Rat!!!}UBUz)#jpuC6W$Y#E^1!OTL z+4-_ogt(1|9)wucSW<^7l#NKwGryyKZ1Sn67KJ0=E6%FbhAKE$a5xq@)c1ADJ4F7W z3JhU=;+QF_4wOeL!aZQ>UIC$@qj6k(l5l5n{Kn=EY`ylV;XY7IPJNU!d(bK|{$-^y z%SEwYiBFx1$@ecZKJ$BNZa&J69Fo15Y@*h5Q7zsN5?fZBXg0}IJx}?1sEBNnZC!Bu z%>wuOn`%<)&$INfm6j$ujV|BkggLeX}@v%$3I?juuUQJ^Rh;|M1tE&_{u9qu@ zBfHpCcwjQt(ObVw)}8G5Gd5icQ0F^3OT%U{N=7&4O)uM|Si1#dpfgCcdGV}i7t5L( zyYS@)3lTc7+t0~kE2mZ?MU}<{g?s1Gq6xi|K|{n92q~KFV(cDywbrxVge!lzS(q0p zHbKmAw+u(^w^X@kA$s_hb@B6=MY#|RS^xA=l8xb`0S{JOVeam0zd3&eYjvNGr_t-k z@0yFnlYD}A*H0C>7W4*5zXIqT#cRR97+G0Br%D&RSjw1G;*CZ9bCr%;+TK`tkQYHk zEmXmNdi$H*<)+9a(yHE*fh}N|2}l8NF#vgS#8tqd1~u(z!IoPqlb+k3Z71D$LQ|U@ zb!l)9^aG>k$!~nCp)eNIN0y`*;XJVV8+FkxT72pvHeG)qv!1jwj$V@&G&&0{ z7o$B7J<+sku676hDmz*k3-3+#-tgtk`Do{s!%h3^FY$Tzt>~4eFX!iEfoPI1o6R02 znaz`2KqXjqw*-N!;Tv^gW`%b$7^~t&_rqGwdf2m0@C>8Q(vWCu+rC!K9Gi@FpcZiiI5x)T|G@ZDgKB3=sn=io9}Pz><_NNrgmo)clQ&!x z_ymmYhzJ7&Vrt9K=`NN1FbS&iVG3?*f4e=NIPLeGATOq+!44H0vJzbjgWVWp)~68i zlK}b{2u2aNn0}6xK#I-+rG=wIQB+)uW*Ardui+ljF@tMHekKJJ*oVduMRT+W zj?Bi5{D^7Xw?e`upzZ#DjY|Op`pk_|7)K@0=Q@W5p3s3b@2RbHdDFu~f$kDSrwiuY zv{BLG92lxQ*%%M<@!>?XAGRk^9Jq4^caFKYer8Dxv{aR#?oI{sVKKX|g$9kR7Ap2( z>I|gBhC*EXcXr~C_yv%rL=5Zb&m?qZLGqN|6QKr0wyMatz3Td~2>P`2xtb;RwTsF@I zQIH6w{~?ZmfTFj`*|Ofz&P%O6I?2$q$ihLR&eJH-ernY@>Wh&62_%_6Xr5>#b1JKL z`n7?#W9Hf+d}8m>%H+u7ng_+%w(D7Xb;~lQsfyI-2t(pRK%&`vCHM(erzSs(VcODR z=c0#9t3b;?WBy;hFmde7S6O|H_opq?Eu~R!Ojg3nZ4zn|jTuz%=Uz+s1&@EL* zpG#z|n=HKU(k{f4Bf9S_1;ejNMuC@la?N_{ANNW+*ah3r+~`ypHXc8yIP;En@8L86 zc+MZ5E&Gef8F{6BHUWjyGfgDhgj!BRQ?r)z{Bkj5PRnbH>urrTzNdyMlKd>gr63etS#?$i29x2%nE;$=vC@Me;YnJaBt{U9l8k{K|kH!Zga2Dn?t?7tX$2~fUR zq1wPAe!~!peoqw4Q55~rvD?yA_Ol7nXR(in zZi3vS$};Z*lf+|$gd^)-u&t_aYS40heDo4Rh(gtf5})nTA!sJBJC?(@e%YnJqm(5S z_h~fj>Nk(^6g^&aY&@@{{fd||?%nfwH9AP8i(G4;R*Vev-RmPA`QSECav{w!{^`o1 z0{>!_Cw$~y>9j!5;JfA*mwz?{;~c--3s2gj8>5XPr{K0XDL)9yY>1SfM5u62a1VY{ zv7s$#2YCcgI7gk~1%}CfFL~Nxe>J1xR!8oAs&?|?g774wy52QvY;Cp@MEP;KCl^l8 zan;)>VjprlYuN(%_Sj2DDtZjTJl6SDL#*k2rb-TGc8HSY30fTM=JEBLH!g!5fyRGB zsjzN6?@@}1(}GMMjQHntm66lx7PZ;vbPD z>33)l(baM_5pP`8`+!mNTHN$4bvzio>As0B6@7MFT}GbMd$J{(!fM4~UTp`anU^EX z(i*l^Q>LXQdgJ3yf|ot`)syg!dFJ^rxG!p+B2=bW57$T;Lk^6{=L0j$OvY~<-(BVc zz?AN{Y1vM+ULde6%uuQ?)vA_eZ^rYPHs~78h?*>_S*egoc!{@~uscOo_;R zz@zV|Z2P96Qp6R&TnNeRDt2aeZDs$z5FzDtD_Rf_c5?D@pE9_LJ4B)SO4ubA41_Ce z6=KGs+WH;0tfao*of1j{zb3oGK-BW#(pS?hbh+6BHHEmq7%>oR<}NMa-Ice)WY9;t zhl`eBq>KEniC~@X(5EDGv(r!>)to{(5r3@t3W6+Cxn_MxufDR)EUh?eW#krJs`esJ zx3S2RBjcu0Uz_|O-_>1&)=~cVylPutq@cYI(g#&w+9k0Ai$#w+{pg7hPC3|HSr((o zRZ9(RjAEH?3y>&%hHl_{I?@&|)Mt4bNS zz`*wFo(j?q)2cP};b>s|VC5UxS4Y;H-`+TR8ZE*$_TEM%X)%EEOzJmaYkhoB1kpgkn5r+`eP));Tz(*JhEB0Z2seEKZ4NK>Uu~MY! zqP-+L$9M=rK*4=|*I%(TQJRb$Wsmd>YQBAFu!6*;x7kQ2wkJ)} zQlzM8txzgU)P=TwpVc%=OYt3SaPu|X%X}=Cw(+5imsW2iW!_VW=;#MEUM(NO1KxQZ znHn5x^CbD=^@pV4No@cK&{KA*sA3*d{0#h$$9>iU;`OozG#xa&95lK;nU3D6IYee< zh8*)}(pZ2_92u+e9JX+E%v?#W8KNvjBkcLa7p*%bI83)B@O)i9-p1Ne4@%`V_GlHz zQ7HnJzY6ebpaB0jg1ys2rBq7uFl?a2c7RtEmD}S zFuD1c#S7eb(Zz!vvbvZlP8t{8R)6C)oaeCEsHv7xj9*=OTO~Dg?|%Bo(vogw3(#I; zvIzOM=I*j-&&R)Gxm_DLt|x)NF0~N$&0k%3r@6%6Hk|qy3a8dmb8j=Vmu~Xtohj2( ziXUUsY=vw$5STKVY$4E9U$o)1<=39?SKQ;tD8w6N5$7qeo zK`~>3c^06s%RKp_g0uyYVFIGspX=-Z^2>hiJ)`y-uk{WLo-)(2nAHa}wAU+w^$%%i zw~Rg#`Qi3zi~$;&m2%-cCx(E+x+I%kCE;ob_ojI6zmRyp<1>9YK|m~Iy=POcH8C_G z)o!fJetjiWwpSmarOynX8{f&LP0#)5jxHL(w0>sF@%%hP4&IFJXfaZjeO3A5l&?+f z7m-Z|i+Yl7T4ZumZMXGJzm@PP^97W^`g8N2&kHmO`7y%=ZN{@*HgS4)p&6-d`f8;u z>uNA5!}Uj%Us$>!)nSO|>jqPAtZ#>3Jq?8*vsyiDwq2qcJ%0>a%BiJ$dDsPab0!#m ztn5HJN8O!r&b=yAPuQvBpz*NP=>*gg$bZo}t)S=I!0;mflsbPbe99h?n!QyRPq`!R zCq7-tH3+rBh{#JeuQV<4>2^>_SXj!q2nvRVC!9VG$y!YBE4w0vw zB!R1L$zlTdaWSl)yzqH?l4j4vr)Gyz@i5YY)euJsH+I>kJ4T7Eu-sIC(H;Kp>RD#! z#CZpwJB^<{jyCNY*6rm`L09ueQZvlpiQoBzF{7kGgKwlWfr^D&4xl%Vs4K{!h{g+& zq#WLaot6Apeu$l zODxH}FS|R%-sQIjt4qNLxI@PR_s3vH?MzN^TEKho1Y*<{DDx2aU@|caB5}t)e7umh zIH!-oyt@YfY|pVQjtU)dU(@mt)vb8cIsYOUK}IT34pw<%yBJb@wZQDN5x?0X7Xzv_ zE+`1+kHwrd%wICRx%MvlVqjn(U;M>DDuMICR;Gc*q$CD1j@--w-bKi9@xo0V=7v%UX~`P>_hpy})wj-_WPs|oknNyH64+ktRl#t`pv z_1XNpU38+7=Tqx#b?~ai>~-P(UCF%p`$i1xf-6J^=tqlD)#omfsx~9dRlqJ$f+~Fy zQAtn)l7bsD-gQ&Qo&lKZV|7Q!Rrrtz#wTe7Xtd|u57pIH;Rod@!bDE2c9!wSL~CN^ zBJhKGF$K}t5aMd9CS?@0;Ej3Z)P7rVMsKep(`2wZFStlwgz(!tM9|y3`B}uVy}i4+b6~O zSvC&75_0;XASiLP?=c<0AO}Y}&K69*6H2aw|L`Dh`Ivr`sdN;WFR|keZ|RSsXn~H4 z4y06_(^U4_n-$O>=f^H0?}oTW0*nCNX_u(P zY0AF-UOF~~-|;MXD;MI!%G^;h( z6tPm1)t)kl@f@B#%te)9phR7QAcdWF?OUjA^iX1w&)8R%6D!&yd+x|^)p19tPx_mE zBywV`&3?>mHW}+;LPIvl(;GWJ?}>?+@fPprl4?TX&^plJ4g0VaA7bTb8u{#FIi4)q z^nQpf52aK-Mk_3R$Ut+CXRP*km`)U~^KHdSIz5b2J{wdO^gtC&@*std#D!e9<%h9E zhS^ZY`#a>Sl;jsGMmVpDqH;uc_^2sc$K4d|>LxIJc8ZI#r1l=To_O)O#f8bu_|aG9 z`OTxn?R>rdJtTZyUDG$6P>ZFu2RhnW?3tSOIEJo59AX&n=4u}7DV78#(ZqX+d!RENGT*$JI{!t1V_)0YeTOvYjIW?8R5h$h1zY{tzS+4$VABVI@_ zM=h0nRW+5^Me>kyj_0vPo>OP$U`|pi^Q0q*fn%d=g>BQb3hRFMvdYuy*?}646rnV? zmfb|{w)+Wo#qVAlv1zDz)=|IETk3korfT#_xz%K;%Lins?FXc-bVsTf$3*Iev2yBN zk(_QHDak`SKgO?hR@n=p9P)gWV_0-G1Ups{gf*xL8RBvgEF-x2r6S*s_JDSnWxAN( zM%j1um19dn9VWS-?nbrEm=l1+KaXDj+@T{yh-uryd4DTmec~NmE1ZrcvuZ=nGC9VK zqPJeVV8k^F+v~85E$!4nXQC~NGQG}vpW(%$;|j(l?#xCXcJui|@K#6LERGkif7EG4 zX6Yx!PyjSkU*3^ETZc*x62CK&>me8+!%-*f+`D`8IihL0|4J;>xK_Zl8jl07j7yG- z!~(?MWd1Z&iKD))!$!y(k7I;Ax54vcLQZ84V?U6kc~GfQQ>_l>DT~Wc7;8Ym)Q4ej z#b{C+?iX+(BqG@#6T05@N8wa1jN9(D-bq@e>d9=eD`WeMFIh@^5eis;XD9QCJeTIm z^_(KEo0fJw@J{#w_4`aw8p^RQYO1hzaF>U%uPeKmm%oPDgar{#k%doqL!j zDtJM6Z^mX7z76JW-5wPa%{dVJF}N|jZMGR(q>F1%Hm(~Gw0)(3pL;)5iQTTnf0xKT z{iISz`plVgcZqpHc?q4sfK6(xv5_!#Xk;lZ(c(0RQvG7EaCR|)u42=oe=;d}_%`Vz zGK+Y^#xcmLUPrJOX&&=h>R*ubxnt(s>Q8V!i2gFDlN=Vb_+T=~{lZq4waYjIg3U4c za`$jeiA_A1tAMfWU|#<_m#X!NTu%`GdfVIPm73|{gMr5WhuQg6Vp*0~yr=i##cfqN z_3AlexeRUv#c>4%#+1Ngz4x2GfU{L=ZIltO_3eX?Ks{tC`n6WfrGM z0Cs)e;96exAP7d1IX4v9I$M8BU z0w=xP%)n19bIjV;6njI5zX5QmC1S36l2Oq=vI5+hP~yz2q5qgegXw@Jl=?Zm!zW4q8{q=i1?mwpl8vIZxo7 zh@P_D=3TMB10)b8==A-SuHDtV`q~aDLzcrf9L7!e>~7pn`tVGx`#3 z&t;xrX*Cy+Hc%Ck*7ht;Hfy~0!rWK~P4y^0Z-?fwTU^eS^>xKJPEwl9ENck}UE)=A zh*Azt;IkHH%4_^GbMBOrsDdV|*kWG6?KWlZG1`Ev-W38uFW)u=7QM&Ar_m8=kd-gH zO9{Mtn%ku=?0r?VmB7lcabIk1Sw)+{?Bvvoe{tU~%rzp|>Z4+>3cl4Y=7#7S)l@#8 zl2$Jj^tFze3S^_~9n+N_D^)(LICd-GZzopKVkqgJN&Xs%GdIHH;>sQum5C0XKlXz+ zgR>>xv@F%aGVNXZt#^I7{S9n48;^0Y?Ik?vcl|O}N-3hB&7yTt$JorF7{hV>YL%pR zm3N8Byta;&=(kWx@$uHY0j^Zopa?OT428YaTF?8VPPg)T*{jfV_pF=2b{!|Z1K&!< zbWc6sG9Bu}Fdd1#k(D8J0(r9($C?{+E^9fHw$UVb>v~NMxU{vKVC!mASk(UO8PTH% z%04;sG=@z!oc8OTNi{W*4-$Jq&-zW=yNKpm18ZFDGrnsrm(m{b`ewxs2YjD)=ug!f z(D4=)G}aY1*Em4I5#^`+&NBrosthSyX0arXCVmbB5v(3oeyTJAcvOH0>kc zkmOPfbsl;CYH9pkUR7{YO1zBHn-2ceVNEMu#o{9Qk1I0eY!w1-$I{a0^-g{;p8D}c z*lXNaJu>*QHVvBUAnXs9j}#Yn>e%G>W87#T%HBq;x8}%D|7A>nvh|I3_L#X0E z??jHJWB-yg@1j$2o>$t@j3vfQnCb9~Rb2jJ;9%dZFYm#YeVy&|DVFS<#AITw@uT<8 zFh@o{Y3}Of8;D8LS+`WJ>U4WnR@SThyf21TR8R7BsmbDl+=2Mu_0FEWHNk%v73*9b zxqQ%d1VJIBt3-w!B>}=_Lt3A-qY@XzbE;1^zf~}3EbF+)o1Ch1%o&!kdtJSwImIq`=R2|ni1RUwzJI$vBVtT*p+TAPXHLE3$kFt%N)&;B|#wpz<+%Ep4@mF1D0s|Jb#WDU=) z7R5O%G)pErhyJmDnz?)09{LgwJ=(dOkyyQ?k-@wCgImH6ilM7VJKJ4>`VS9jD! zKO|*?fl%qd2(4qHD;`1Y7V%lM zed<**;cxi+d#GQ?HSXNkNPFHuV{XTCz1f$(zJj1^#k_$u(uettbo0WigYukoY$;B% zd>`r3X3}0&(ezI7>!oz(Nxvlu;`k9vu^tw-Ze9GnMSz_93;^@JOjpdlP(3O%;x{!@ z^1q}He|(5k{QbQB$;7wK^&y#7NR|ltT2fSTFZJkFP|#kua5{*QAsK(i{RA7oNTrV% zI9}#$Tp=)4u&_L9-tatetU2(hr_~)kI!~vnA9lVEmEO|c%&x$j6sZ|a9Ibw>>`<0u z%aGW_uuL8+QoP4_|04f&qnBT^ZRV0x;cA_O>VTEHy_}!TwY5dJ>@9^q#6%poETe{O zNug&W+KGjH_q;^8A+7rOBgo_)eZ!eoBDJy1e_;rAVGiK7mb%W4tC@7Yzdvv9XqLpX4uqLL=f6A)kqWdF#2N9XjCEtY*qUnm{x9#O=Ve=g@)xcGiyzH^S7 zpe=Qf#9}EmrMaN%0IX^T)fF>eToF+N9mrfG(Kc+gZPC>RoLlEfh0GHtt+fD|C;32u z3d~bCs5sT2QF$g!{;y~WfgUZhGv9hv_**~O+dQ5-cHu{?gzg3Yyczcj2kT1BNqjaJ zjF(`lyH;4ynZ&0Ic|#GoLp9Sqebp&XifLk6-Y>F$WGYi9`y5~$d*eX~g9E?vGpvOIN%gPVclN7uKBFNxC`Vo;0Q#yRxJ6c67@u6*Av(g?pGq#j~ zkZS?IWrP1#W*{|O!q4)w(sL5ZYkU)w+%L>sD(vErg}rQ#&n$~ktLt4-w25o5kUQ#WT{mOB_RD6DJ z&GNeO8Oq%#8&aqqXoC9e51-}15?J+1Rmw7(*lp2@p(XKPcuF5)(rrj#^}Mcp@@p%6 z{{E@kmB{zGNddoC`Cn(maN!1Lb#R+$pJ#YDmte*o#6?hk_AAF~xEC%>HJOC}E4{bZ zO**=P1IW4&(0<%@fSxcOx z76-hbkOq%5uNPIa!~5wk{QJP&C{cQ->Yg2ZJYb*-AvoD`AmQx%C+g|gt41u1*N<|WShU$$HXFSNq)=5dMvdmrJ^|NE zi2T~ybJscT@mE!Ci^MUMe%iX@OZEO(k+{;n-R&R3L~M8h?(f2|><k(Ma z9HYkm7a=O6JuF*>N{0;9{b0~QlPC-|&xlMY1=MQC_q-Ev%TzzN<%9nw7#mWfueO1_ ztEGq2uQUZ6ZHT*JouI$g>UYOIZ1vH{1x!a6XNPeAAk7b3yV9oA7^~S^ z{kS*HA^U2t!K?X-xHigAui~U9zBwMR>e)T!WD|4yNa1NyjNs|ww^U>3;QvRIOWgsy z#ri$lvQE^%D@94{6U!WC?K&k{h&Qct*OAr!YHAL#_t(T2i{CwuEx&V(%;;;Z{s{+% zsXVHI>Yx)OrNt+s^c>HvEtZ~{OuI@GRW2*j^u5v+LLA}bbhWU@N!KO67ME{?>8jtF z6q$Ks1D!ROi+KE+zh{zsB{w1yG^3={A8$Rsl%lrqTG{0k`HGR+%Wv6L#_CQo@>k5X z(;F|U(vMEhDA%Q!CajpIWDr25r*Iv@!r=k>Pg61Hd9dvgK_PwBYS@356{z6LC4~ne zimjfkbXgsHX3)v?jV`a**R(G0`Jkkh&Zp?#+|R$g7j(p9oM4fWoRdmvI`d;|uA`Sv zNk3t?>9mF1#fWK9#lW&gV{mdKvPKC>~8q7S7m7&s+) zm{6*p{%y08W)u;?PO^V=Lbt5LzhS*kAp%>De)T*-{lcT;T;`4RtQv+hiW+m`DA;NPkI*FWI1PzA-8?1^yr z8+P0RnEus6?D%1km`KK-{pRd{{e=JcC5*4{UWr5RU zM$P}$w~CnV0s4MU-=&RPBfb(r#&{UX`71i_>I$w7o(4 z|1s`E)>$O{e+>FYR1kG3KBGtIckar6jF!JY{7$53i^B2Oh*4|S zP5V1%?{}CMOqLGNAIru5Lq&-ScU`kDnD213Ngak8>Mmy8`kGO_XaIcOI!ZQp;h6MV zW8>!STmB(;2if}1M=t$DB%I_%?RhrYFOI7w3nUf}qkh})-~Emeg<;dl`)E}os6P1I zdboi}%=^?yPYUitE8Sz;?C_VRDR`kw0`%i%-2X2|SuWnyAE;N&=)-vU=(+1M2aa(@ zJDr_*@Ymm(C4GHPQE<26{{<9b+VFp5)(CMpElfN;GgSNPQo-unx4K-5-lr8lTS}%a zQ|_m}awF36nB?jf%ltKwEgOVBhX3@GV4f~U1P}WFkK|fy#6`pSDCxgiZhZdTQ1@Xo z`pvMm*;G>x)||)gBn)Epv9cpN&ryG0*>aREJ#&9u{ojta zZW)Vzddb3MZ*TV_%4(@Yak3#tcsiY|)XJ6MsMnTH{fa+QngCdtyu)&SD~z`+24p2Y zmqkO2)-OCB@e-G1@10vO3K>_rQaD#3|R$@E=>N1XCe z{maEfz~oqx@h*Lj(yxAbp}$pdb@SvnKI2)N;fCD7FvpD6dzzWoKi%EEC*yML`#%=u z6PphUVRND5d@@_B(huh*Ary}B5h59tOz)(>Ocs7!!Lx*ayny8X-h)p%3LVw`-rw9@ z74!fKdqV%y6S5cN3bNHg&PyK$!(DZ&RDmVhvLJ|c^fTQ4X8Pt+l(!JYrH2QYimQhX z!x4QvuFG%foaW{p)WqX){+Sw)FF`ZmpK$p8%A#54&ZkzBw3B`e@?KTvza^9AzxsJ; z?OpU?{K}UX&b7WVr^F_{eap+1_dSw#BstSkG3(D)l8e&$ury75pjMgM3jpx@%FsHl9L`({qr3&-p7JhX15i^{y@< zYZ=+fNd2}APGYtM4nyDw76-%EQ>=u3O_cL+ZqJi5mZv2Avyxob*EsHs zwaElC{!w2=(Bho3>%y4h64QBxoj1MQ<}_VJSLZUbykz|AE&xwjG}Rpd8kRSb4F2Yc z`~71|Vghw3BqwTz%evm(*I!wfObVa-)f@K7DOp*Z?#&xXmF9PLCU#j{9IVT~GrmN6 znti8ZN5tbM=FeNe-D+iDVx&FtMk&PaqRnujE^ksF`aSh6&ct{I<1MtrxR>yRhGDo` zvc}=T&^x*T%`bnYsc4$pjjHOO-n$XhO9lsUNp^in*38UMdj8Lr@AzpspJ*o*dcDg+ z%!@P){d|{C@7a4Gek#}xXFt~gC`}LQzntBC70M2$(%e|*Ygk{p#^*M$BW1;FvuVa@ z`M&~ayd2e6CPqB^^1)}xC60BX62SjTq6_8{s<2#Ld5ErHW6R)J;F_=|qW5^?=x1aVLBD55l)k z=E}#%hbO^Z8a3cnJF6%#6Yf}xXS*;{WDtpp^pkGe}MXtXg}o5ozpeK#Z)#){@Zg1h@J>A9_&TDDc9JGSEd$b>gDUD z&Ok8grvqSDP^t_nogK@JjKqfo<3>O(2GX)Iizdw*F(E2^zhYpUb-nCloj4_#43gwH zuD*Y8uQBb~PHRQy;OIJzX4?(CxbcOV4UabOM@Ge$|K5Xl@7eni%($Ly8J61fFpXGr zsO~>MzdxcIpO$|Q*^>B~Giu3?r>ptXx(V-v?MzFCG`G9m$eR+_vDYlE2CGw)Pr1XD z6V9;7F~ScWep$=5Sz}(}vuI|}rWQ<<^EF?+6Upa?{rNeH=#tSlou|g(a+G(=LI$6G z5G3;M+pFC>_X{I~Tk3W4xVPY(MrelXrswhXKMUH0`9cnRyrdt$Z|Co97}@895?P*h zQmcW=1gn#21yfR?lp2k1i8rAW#`lPXElysauk@41Lu6!R7IR;3kc(<@{XvNPxR;2M z>yTmvz4A^$^3bUa6Uj$|p(Pl!#H*0{`NMjHE-{$WVQBtcv^ zm2Tya=1exK>oq>lDj$KD&T7+ML?SfBLnP>y@{S;!=$}T&Nt_GN>>MGj6!Qr|2Wx?e zl0C;7SvTMPs?dSL<^@7ZXUmfdvuV;4ZKgl?`$Cz`9d4k%4dLy^2?>9H!P{LWK@f6H z8*|~R#TWY_@5t;`;`#Hbi0PXmb??VRt_vT6I1Qx=pUA1bb6g&Mqo0zUY_7^K@nT~E z(J$~2TsE2Cxthx8$bIeOBcoG#tB8F-%=p)0iV_B#MQFl6>?-cHF_Z9P^6c*Zh=+w*Uxx`Hj%E*e* zEp#9=y+10al65KS>W-B}lcAiKW)kjDDJuPlxOo5C`v-Wc4PDSS=%+_u?QzA`znp&s z6mkTInVI<-%-Y+cx76@e)??N;ZJe_c2{-FwHJRj3|9lGiCcyzpApx++@W`=``rd*^ zmdEnV72hcK{;>v>P7@bzrGGjC8=_}`+Z%D8@CkgD{fr^ggI$;&GQjSWUY^hYh}FpV zL|j1~5?Z*@gmU+llL_VHHhbIu{NAkir_C`~;E_(JiX=r>Ul@ETfl5BPV*#LJc_bHN zPsR}m|C#Bv<#BJg{*2TrFxwMp^|ZWTBzXK}TO{~pjOe#`)?;lKIv`0W+m0-EuDi&D z30x?e4jnhh*8UZPW9TN@tZGC9*>t2L>#*K8)<%lgl4x5Kcgil%A4nD;y(wNb6`1-=qsgiadqx@vzVO6EAnW1I=)Gafxa4*#lVzCzFszLM@mRFmG?Nv$ zQf%5esajW$2RqxXJ;MsiZwJGU>p9P=p0t$sqbd419Nk&ujC@Ph8pLl;*R|>J#gWJG z(v#0fK{6$mx*276lsBYrO4wZ&E9@5g1FgP%mOcNKHdZc_0uZO1T)8^q&-GGhL!}k3 zIjqdY3A2ugr8eaOfIdga|KoLI_0ndQ`8zr=CYRg@cRphJEyuI3M*9RTif3H73m>^C zqSc@#0%JIQ;2!<&&u`hE2n`&ur$?V@yni>*=4KP@^M)aDK?B$V9zj-DyV~ z^JIKo66QGe!hzSn;(C^GBXca2P9tf>E^JAP=QkI^0A*e{tA>=p;6YE44YPR$A09V* z4BnA5_dw~7Nlj6`wwvr}NEM-UU9pfeS^mWCa+tkvMoDf&fp<5`?Jz9H)waez_7YhV z=zlH@3a`JIhQ+9#9<1>`lbRd(eJSnYre~PpI?N8M`eH=fr81(vkn1@;)1r#`BgA|m z&0Sj>p=rj5NGj{PEcPRoR(~tN0B66yFIa`t&8)1ywTO44E|9jkuCMljB~@KL*h$)+ zVOSBwXJdQ|IEc=dGhROpnNbS^!Hb8_`a2O~me{#;ytsQR~M(5I&r;X-xLmV77e0s=Q+*cfC71!T{A#i zh!IbQ75L6q1hUJEMy6#qUDjLw3OoaNzbKb^$I6H>BasZ-voz{SHE-`zZPFB(ok#T72_k}uR8IEnjmn_INxA?PSQb7M(tc*dV_908FAi@F zKKC`usxirVd9Dt|;4rIqefx83YjNr2ULM6>ibyM3Vjd@DczOFDZ2*e)Y3gp9cDsfn zCouW(=-DgiHRj5zve+IY_&Jg_51`>$vibrjf(*5*})yj~m#iF29oU-`~t zZ}X#t!)2wH@>?CFmC}W6cqsydw=yK}`hQ@YdQRMxZs|J96p9pH7oqt^m9 zC7-02lACd%N7S_~MAa_)vd}-rVW@g>F0Q&i)cT7g2Y@M!0y~S)>C~`SP!4mLwq~^$ zMR}?f_x+)Mpt^wqlQds}U|gxWI7I67;ULe)^s-YSa!NPB!YskUM1|17@K}sBt`OV= ze)NK+!EgW$k>^SNoCsl*)SLE2nK#vZBOd`W)i3n3yS{K(9ab-XV$-a{6E3T;f;N1V}SM5$i01aX7n3RYT;Zf(iaJ>h<|w+&ljm1XBE4*hy+iswyZB(KaY-xjl{8)5kZ$?CHat z=f0{;D2R%R+F(P|{)qsLm2lEy4uFM(E_O^br!yH<&a};oAgX@NyixXC%j}q*)1>IT z<)Gx~JM-;#$O}&X0tE*0UGN2-1ndQ8dr^MqXsDIg358cCB<^|pMO}JC$YIg*kuUHC z4i}w=7hM^oUkY_SXPcSvPGR4>aWY;Yb1(B}Qtol=T-K%k+W!YgY>@1FCVtA@tUa%h zNj{Z$ex#XQ?n1x6Hdz{6T`@&V6~riCn+U&!K9AD@Swe#T0a6$--!&Tg67!W&?btd) zErf>_h;-MWkoR_;Z|6DRVW)q7d)AFe#$8)#E{;_jWPd1k-nfAg>U~$yGOv>!uP(Iy zLix&6Ul-#5OC*$p<{fE-7>bZ#Jwf_j?gm$1mzN=&&v?PKT)*qq%g42L{bDPDhp4@+6uEhr_oe2|6cp*)0Y#YnYGUf=G1uQE}KL|+8405{@qVJ;nI0?^ND1(Jb0?h(2(Opr*n;J3Tz z1OnQm)%oGZr0;ppk`w3a!`HIDx5oU!EK4ZK*OvLVqu0jT-rh89y>kCxkGboz7Q9d} zA$=@wq$wy43i?S%%}qhEi5`^BGm5t|u=J`rOG|BFPY5AV2NxBn_|!I&8{1hmD2m6R zo*7H@5bun+5xOXyJZpC#k5geg`KJM*1+|76&4xx~>Ud89A`UIN{PxV{4Xu%aItIWK zhZk?p;}+mPdcQ|`d-VYV&0`**Uj_7c%5{CYQ~8iE*i&=E{dc$oNBUwpW48CpFeeC{ zpiZ)=;%@HCeO~!Qc=<=>xeCV3@TI_o;sNWwj1fUlwefspf^wV~=e~GWAI708kPA46 z2LZ{NR5inCvF~+J;!mLr94lHLTY#{tZ4&m)yjXCwB@QA(=^hlM>m`V$reu`lzxOXdIGVg zYG~!zJtc=*!xI$hOh zaqw_JI!cZ-3;(6UZzO1Q7)jwUX-Z}$q2*tvtr4M@4?BVNhDyo6kGa0lnlq4}Bt_Re zO3bt8?jFL&Na7B2n!>h#=cvj1E;-c*&VST0FI4U4X4nQckllQNw3Yb6?0Vc~G6w}Z zfnAM~4;Q+SCXNeD6ESF-m}w*OS2%m>yU(EPl;f=|lV&!D`LUDXE=xDFZ|X3f+yLNf zb8m`2T@NumB7Vx=pa8H=OuZfJHd#AnFWqf`7UZkU7TDC1504I+EA&QYZUlsg6FAS= z^-cw9TVg46ByE>2^gO0Pn1pAQqP~tpFHS8UpigWo?OYk)nS`A4333h*?6i8nw4$Qo z;lY36MRgF(WbBs%6C7Q!>m^w|o+m|G?1mzzwVwV5hk4XTt%qf8s6e;d~ zs_xaLN4Z0>AyDQPF^&@6bPhmC2eNr0cF%Wb?1bZwba4T|4hfGQtJ2%<;Eww=sTS{y zkg01w8@zljsrr!RIV-=P2LMTxy2kRyy&&B>w?8nr5q_VLkFmpLd9yLK*fcoulJ zn0xWQ*xtlu@ab!7D~lZF9R&^H*ky#gCrbzuNdox=zfDR*8}pkx2;i@m<7Y)Gb)<{j zFWs?y8cmqN_Y`~>Fnz~Lyj^CZ8pbY31^{QIN1HS_ z_WA>%rs+GO|h0eH-yX+(i)M~tqW z!13)oPIyWnmzFPV#mxFkv}9e5={H}aG%;eT33EV>MR;JIpP!Z7!zGBkGu}~G?7FVf zCF7Ndl}eW+sy+YhmR~D3)7J|LNHLPCli%GsDx^L@YD_ydmPns&UDz zz2vpHhJ;lcA$znYzW6llkSx{zSS>5m5T7~DQ`OXkbKH1DD5)A4?k%@ zf|rV|qWD<`P$ZVD&Uc2lx)}lTk-$B?3PGm^?-MXkl&}*V6L!m$`K7Zp)CVWHSCAwa8po+ zYp)DhePlY!a=RZkm~C(hb+b0pRXpC|D}4+J`>tb+?_$rm?=^>tx-OeLbT1wM3E7~t zr?aOzp&N4yJEFI~q>d!2Ap>@aV1ok=b3cx)uXe7p#>Z39L?~^Iy<9RFh*{je`!2-T z(k6|awETQJrs>w`&zN!$Ix(xS9p}f=f#n;_8sB%y?#h9ad|A~tPr-S>XSD~NY^r<& z7?@|MO>ImwXMWD|S4DIeZGm^fs*=M3Moz9CWqecCQkYhgN=UP%`r=*mO$2CpT6~d0 z;Yf*eK|Acp`eOLHByJ|@B|oh!BRFi4rA(W&zS=`xERpiy4VFHH^isWL>qDew2ST?1 zn4ih13U+Z$HuK_DThecYVpu{2avf{M6(|K5zg|gwNf`Zp>xQGLOi1>X9A=q;Jp{WsjvjFYrLi+Q<Z6 zzS3;&g7a;eM}wmdZd-4Khp72BD|y;Ox20w!)H=BnmuLyR=?K#P z6>a_d`=3~D1Do?vRb5B5QZ_lq6npI)*8CjY7)+@iQVPlXi8$n_$)|466;_BuAuaXd zm45bTIHY7fEkLd6Ls0POgzSVE3Zk_psC(+0bnaLhZTxqbm5%L5z1%t1lWrt-F{L|E6erX_B)-P2B*n>iN{{!5(>`LK2^(w z*PjhugUvK)v2)mROk%_MNV7}ewV{y+PYX3*q}dHwPyVjEdwM`Ak;>$50%e?g$z{h|PP;%pPeYnMNx!^$XK1<0PcE)B(%A)V9lr~W&pwik2t!@~e;XtHPb2~{5CX;Zkw zuCv+epm>7bK!?MC4$qY{Y&J)Jr&=mK6aYR$$$i9ha+{e-5MImzbk-@*i6rg|ONQpB zHU`O~azcxJNSWI~biUOnNSqDD+aTg#J_vEfOJ2TNYG!EJvu-R@0#`;5+KUFQRB6?x=cV@{-HTlzs$=5TEm|{x|8U`M=ZWb zGKgmQDGGT*lYxp%Ag4xX`Qyw`{j_Fw-nPmm;werTR3RDrX{8?aA^eU>>f6|-V&dwX zzApnq@plgv*uH&mzMMeYrcu=pY=D=+Bd=iMJ;5irRA(HD0z}idPPPl|C=Z48Hs`Y$ z_HVt%(Jt^J7UJq`kH4~#HBGL9{|Tnc|$fi@ZlzFOzPB zHuB;HQG`yTR88Ej1uaK+0djf@rG?NLK}5BMMC#A+$>*whX|n6BGWJ+Gdn2sQ(%h(4 z=e52~4VaV$RXuJoON;9J`>8u54H1>Ndk-8Xf_PIQ#SR4Zd;`3;fQdzO#wjk;ww!i^ z!rnL39l_|_)%;*)lSsx1ORpt{c&2~^?Q~G!#h{#k;kRshU&YfYfFxXmR(lC`b2{+p zgUM{T&3avk@D5SUBt)b+j=g2D1$J_YJXj1bt#I!04?svrJt)Ff?0u6<6u$e{y2B5Q zyD_(ZNM(IGqV*AQ1_hF+Ab|^GXVEG85$ss?e(P<bWF`~-Bx9x^UT zK|@IY!ET}Z(YY!%IBR#|$mdb^Fsn8@;Fi%;>L@6QB#lvAAozKyk`7F|Xqs-J(nS#n z>IGDhFBu2l)B?%Ron1bKU|yt#E>zb{O_{~wqmX2Ud@LtAb=xMP_mF}*hIWf2~yWT3Lcv2@sk&lT&3w+UoBoINBSiU z9^3i1+FdxlZfp+;PhO4-Gkg7R?=u`KS!e_!)S%k2F2A8-i=<_q5g>(5HEUbR%7mO} zl>-c@0enoo*>uk*4`qVv=+KM5BqXjH4~c)ruMSZ=ff`ar$F z9eN}E5QMH{-s6)l~7x>@qbgIre#X@rp;3N>2V%57od;SFI=s8hz1H+ z+xDwr!uVL_i*uQZ(f^9TQ+oBGmM=`|6hA#JA{W<^n}KOe*97)&n|Uah zp|I0_5Wl0_Q$Tjek_x`o@Xw(52@8=1Ywze-6K++FZ|6cm60<=-9Tj$QGEScb(zQvY zt+^StR)8SZHKqF?rspu!@dEm<=m0V)P~lB|g}#jPJE5;;rDDfTNdbLnA*nF^10lab zzFj}r+ma?N^H6*RN1!0|fe)$FG|{u~5BLtvjDm$8j|)kc-fbV}3JU2%nY{9c+{3>% z6<8c?OPvRwtjF)d&N162nII9_;vFuz2(gMkj!0w9(F&vkDpl-o;a8;&0;-V%z{i`t{v;hP^f-xhxMJC=tfrXk7r{HKE<# zSgMx-dX%8{@prZ2hU1F{D5TVbgB5f_5UG>j`~Z9}CwgTaI!NVTtGP7IvrtylJ~&GP zvkmYzOnvW9NX_cUhlm;xPD9&ub@l?2-m9H6$Ku;^r%(h6T#-7(MR(|q?>_l#+#f1q z7O9j1DTy1@K$xIoDS((LXtq8}&QfoG#?s|meLrV-ihmRE`iG&5254g#qBdm|zbmhV z{Xpc3jCT~8+%ij2OQF^zG8=A4NWkqeCTK~})+ibazg4t2fF!Puz()3D)ysK6FXoNq zxuV&%_XO-u5tW(Npc9bAuaR?VBWWN-#%J-lwFuJ3eWk!jWVHH%-ihlJz_i?v)coO@ zsF%s-|G6s4^cq4Dytb+MB-Fr~w8CEHbLpU2=LKS?7opuvFJ^isZF#vg(o!i|9xtZ6 z(`F$aka7#C8d{Ig4N>RvX>Q!_Dw-wbev-#toZ!agX;je#{E1*HNkQ8D?uW;Ld(Y9J zSp?kNOS!&KMll!)xdB;(a&@XsE8nSrhI88;10A7&IwZVud-%=+)y8m~zL(InNf@L- zybxTFPn9ldQoCq8oVNIdtP9Bn{nDqrKzv=hfAG9UlruIPJkY{(S`KO;UZhapI3|pH zL_#fBu=@dtv33fNAHD5(hafk0dDPrBMAFut;x0M^-L|Wx1tZOeOIFe!6gj8|+ROIyPmz`3sy>U1m%x!W6 zVF(e3MaGgnn)A)&3>J!{lh7X7LecrBp%#&V>?Ygk~>kCJQOzQ`QUIHW()lv3RIzOT)fPP)jZdLLK;11Jt}&l%IB zBhVLviyF=*7dZ$CJ&?oX*PeS0YtMbpXRtYfiW>%?#D0=@qx!%B)ffM4~|M zK(L7#-83*YxQGb7Yqx?R#0)YBUF=Hy?~>PggO_ry0RE+u7iJ>NuIK{@QlgHJV2$%)|X~f^VP`g z6-0>P50_0+$gtJ5NRbK(Yc3bi0hNK$;V4X?G?8{~b6v|dA~XhQ4P}y{>fKfupb0lr zHY!8ddrSzOjMx)`rlAdty`1tX_14K#%)61LTEQt&Oj`-+GgJ)HOH1 z(`ES+)U79K1g2#Y$G?tcLbnI;eM}DtW9bFSNv4}1j@>q6g{P<*;dQ5inJ>}_qLY5O zpJ8j8lGk<54&6y_7rqI39$}ynocjIS8*@O<1xFu=hwSI##n2y2E#s59i@r~~urggk zvuGj7dAOqK8ICKWp|t?c`$m#PKha}2JtLA4QmY4P3;}mdA_FzylwkWe4jezvVhhkZ zIUtE@+n5*&(3Zu$06aVm2ES>a`jqiRN7cCvG)C0F&edV2$QcJNNG^_yoZdMdN=)Ay z46^xhp?r;TL6@hGjI8?6ejb8?GA(Y{QM0DD9#Fg@CtViVW^HJs-Lo7h%m`0vS=r zlB4prOt)r(`a^Tz1^JQI8yWq6kUSy~l6)Y>b8;g9t)&AyD%!_W7Ae zsl}NfEHct`czB3XH_nN8dmZrMh!N6!*abr8&y3-lg&n+O8I~$h$zoGpwVAYFA`_l> zml-7Ev*28bZ!FxvxsD)qrw_s;$x$kQPcvGQByjFOvP^h_>wtqwDUZmW}n_SPn+q-B!LzN9jzr z1!MRpe5S57$kT8WkB)&@zNo|4INMw zo(`RlK8D7Nms%H!<^@Dh6c7v=8VWd%COzupAAlm;uKY4&y+W3saY{Gzr{bQD=8BvW z=cSnKhzT+z2F5_PUpE^^N(Ks9SLe@{{ip@sQ?iZ%2bjkb_!exJqAr3wQ}X{N2UDJ?(UQC=g)Bv+dvUgk|i&r|8lYg z^Ur>br!hps!?X$xuuD?rm06$0RBgLra8T(wx8?rJ)wf*nd<32Xh}EC8>crto=;&G4=He68rbkyrW7w%sWQ? zkS@dE{%MSlrej*_dSO~#tjYDfbM51^N)>}Qs~l%L^5@%^?QJ6Bhs{?vBPSu*#H0L^ z4*Ie``)XM)?#|Pi+I~2&^G2$c+q=WVZ}Gk*AU2ZTeKj(tz;v(d-I8up_GQ+U;j}el zKaG~ven;E~YGtGBHtB3BA=A3l8ezH@D~^I>2?ADxM10w}O(2uMfa zfR^l1JxBw$gly}uyL#y(1&+pI8-E#b61RZiW?BCA$}OKk;!cSe{=$j@O}35)dKrH| zN%nZ*IZG5yFplnAMxYy1)Ok1M*Q&|{1~|H7L(K3&AhbGtCz1|xkccs`qWk2yow!}^ z@y=X&^?c~3UP`p(;^xk#UmeNxdnpfTNq zSL=lj#waCB%qsbe+1EJgcDq>rCV&xR3}xu@h^x+zyZ-Y8UaMk>qpxj&N$Jj;A#X+B zt&u;k^euaTR@&XOipw4RGz@4e`~qMf36a)su!aB}2p~j+3P+D0*YaDl{^tq}u98hE z!U9Jdi;4ywa{hj)Th`n`xy~$W+c``OAxXRn^V{nxDQZ^P^^Z4;jo1EQC(;QmTdo=W z`7auz6N}hLH&ztRb8^Nt^0JEgQN-JtRJ%arW@I-ct(QmN3|zyx_dtgeiHV6dMGJ=# zwH&~J8g{XpeSM0(Ir<+({2@ed5W;b=d0TA493h)HW6SEGf8eLJS|iq~WBWmNr?qNi z!+NO6#_qxP!>p-Y!8xh;R$jj1}4 z+jWoNeK@qF$O%asjk9mVQIJRLOKIz^KbYPMZL`y?$6%)>&yHU``qNG;z)pvz$nOqq z%LK3{;$%ehe9X^C_7gouy%*8li?cr~v;-Kms@u?|e2B$j-|HhC7fK-zJwNL1?tbq) zbQhhpA7LIh?6kXEMn>jA`10Fu0_5mMchL?L8D$3(nJa#D{io&dT?7-6%k>!8Zb&%$ zljH7E>TAmDIFo!o0S;XKHt_mC5od6q^-Jk?L!kFqH;AK|1Uffdh9rDJe%-06YAMz9q$+|tVfrDcY5pqH$sSz3JraAn4SgTeE(EYrDMb85H)DyeRuk4F)JC}KAzGZEva<4l-h!LneuP}`KX=K_F zs<^uIdChK}3!a^f0tLv)mF`FnHs&^O!tH3F5O?K?j$@~A)K5bWrS1}xjBAse{+)4u zj<-p=WRPKmE=dm+N=GHrpgCWEYrl0?NgC|`-d=Or_bd-{kePcQD zN}y+7&GJCq`epI@axvxF+W2~r5l`#3eMcWBSRl(kUu6IA=RG@o60j?Tc`stOr$E>h z4=~gwH`O>CJ~?zye6Lvo-r?^u7~!>EVM5Ag2xg<;P|B8A3GskEtaZNQ*w5pU-)VP`OY#kw z1@DALuTP}h^JDCL1RNTLp9ydP#ZM|z7+YySh zckkZ!Kp>DSsism_6wY@X4_p0lMqv1{5-lt2q-KMOmROjdy>4S3Dn5>R#!BS%Hf3JdEQae>!%0|8*S z;2yr<{?DqBOoR@uQqUOU&{}WISh?I>*XLaiA!?;pLR|D)-2pR43Snf_zVqo%MY44; zgpp8N@v2AL%y~bNh6yCS*G)73? zgP;LuCXnwH9JiOZTdEJ&&80j^^c@EizW5>>&vlByRmq4sK?y-b>b&38jfg0=S#8Nf z0)MF(ZD9ot9-cSd`ZgDT8d8K6?h1#@V29SZoo6GP#7ex=lRcw!_gm+|`owi!ou;Xc zMVUMG9{&KEZ#}!2H2W%(_ip668XE(l#RL}3#B}4b+g*Ic)DHcwZa6aWm+c|>JYAOm zUS#x6xaZHe?q+46!{%vj3PJvz(&0Y8)xeSb%Zdx}N?y+GHe71ZpUKvormHOVccSEfkJqMY?j+n?Uq;kf+4m_Q`NA4 zRZVJ~3y!YalmxP}`V6j01pCa>vN~%W+==8hE+O`EIyQB`^=nv0$)WqLgQ z7_rfDOhQ7!gh%m$$JP0Ck>*THXZVN2d571%k;Va!{;%yFoKO-wE4YD{C1+X|)1P8ZcDR8D6$l{!PJ%(rf z7#8PIn?yZTEgp*9WzypV;;Q7qV%CK=yids^VS=8>$abCHA5f~R&((CYJVKCnL_T}1}5buVv z_>F~dn#94WoV91A?v^m)uJy35Q9E_*q0}%;x+KpYAl3ckOzM8<9JIdLCqRSl62k46 zdj95txZtqjynS^YVbOO{cKqeJQU2DOUfY}r9YWBzNXM_bt92#z_)qL$#DTHA(5c=A zR@~yrbz-L#x8qDXh5PO8o%XZIr{klq^+hcT-897}9EBr)qs$Yq3l{)Bvoi^-NVC1A zMMzAy<+|M%Gm`J4qX*S>Y*de)I8iA$i-7Bbx$Ly6cHK+c9$|C{BT#I>BP`r`%I_Qq zaFC=KVTv=ga`9=oh^dw1!JWsC85w;BGySY3uSVXR2qrN)2YKOq{?&)l5%oB8W9>hK zJ=oGe%KRXGeGgq;?f65Uc-EOEA5P7^o_OTC@W^d{$LiRfM~KFU>5oloMHM+2k1p$5 z{~U=H&Qt-zWZBvFso}Ho+XfD7LXGzaI&R_8p6p8z`8W^U^F~}kSTWB@7P9r0vnLSi zaVwpjcqA;KZSjfhl?wAu<6)Zs7Ks^@BJMI)1F@?Wy zht9lK@8+r`ZH&bwI!TwBetROr$-7KdbAth|a*#o1HG^Z34Z)Z1L(Ix?uBn+Y|GEp{ zvS^pixPv;v7vdkhaqAY#%4p}BDFUe)yuoxEaM^%qKPw~@7SfB9C&=cF%~5QV9ivSM z7U=SuiiE{}V(dIosv1Y);^c3vX@t|;P%)VMl#{J*gOkgSrNY) zxl010x7w(8P^DwGW%-q4+vmHk-}C6JMh7A;npAEEe)Xz)8` zVtL4hFXNCj{=3e8O7Ir)U2^UFghuIq2lWHk?>%oVth?LHyU%R^Lzfn zpVD}J_VS#vfyLXCqE{X^$U zRVV4NErR$iQ7$QbUcMez`-z*rmJWr0F1zWF&X@EMpy=8Y78#D*ce~RB2 z4f+i-7T@yaw15~`rDFazTY$oSYAx48FT3)-Tb6&~dE%AHVJd)Ietv%4 z7i#R~Bu1z!fI2@Cw(ybR$k0Dp@LCy0pQ5MEW&9oK?=!sxSXbZoCh5*5(j02pem$e< znXbSet#eJs;8Z3&`a2_nfro8_k-L_+j^s5VEQ;`C@9mM>G}e+4Zcxi#UKlt^fsrGY zAahqTjHIrB1EGtltUMDLEELYwsy#5+b-R=F!nRn0D@$DH21MfHAX#3RxuEcw6SHO=CJW%CZ+Jg-w?2~`>kgZ|D0iH>0N8Js5R9JHGy`pyQSe=M)_y7ZL~xMm~Y z?8)U3A&P?=n_zSY_M~-{kYeXBUUzKliF36>C)p~A5i1O&DUVX@QbZJv(m#vpF8%87 zfhi`a8kvVI@Ijw)x#b^cBcSa$FoA_X4533O_qiQ=ykp|wpKEdW^YkZWibD4A%Y&yV zezH*CaS+q!W((f4(=@_<4Wr|*Gl`KPGK_o%F_8#;99@9ij}R2&HZWazL+D&V4_Vqz zghEP=;5t#%MSy;T8Tg`~#3P}Vw#6)3WRM^-GBqTAgJtvYG14R~P=&7P@O3A<3at*M_`rPvUAw#QQ?6WL43KhLssn;D`|t*- zA;0obzHcm9;waO+=nkXcCi^^n#`fke7p$4A`TgpR@u8au?Bu`9vhUutb-W1grU=Q# z1wx#wzwSxs2KqrZ-l!)rM-DI4dApFp>qpxZ`ea3=MBvbH<5lSXv2vdI07yfd?GbW} ziRWXS@aoV6l9}FqGue(D@0s=}(BTB1)bW1AD zrjbUvyTz>nf>Hv~&89)RK_nHWOIkp>B?ZCnc{k1+aOV8x%=gb+j^|vn-{*N(-0NQV zTJHoO&2fx~zYfZGx-nUY-;1FEQ7ZW;dap#KuQuL;h7|kPh4l`>(+qT~cA{3PegO?_ zEFj4@N&c>D|KY|YkW1Z0LXh0r`a6l=Kdm>>39`DhdT&X3hgavFvQVmm?MyP81EMO4sJzl3j25qE4B>uugzwg_c}+s3 zSF&B4UCY(U(l=gK$b1K$66u`rx`5-F1PkFl{vOIdYn=t%uF9kQUt4vsVf@s95&G6>>2nd%G+I!w?sz=Z-4T+9Q3W1KUykp zfL=2)8q~L-^B*qA<3NKB4%(JrlL9qbz#krW?CD*PQ0iRG-+CA~Aq^hXh=9*)sy3JV z-TpNp-uWiSws_JVkRNzA>tYDtmk9P+y??Z>(}5D(C*N(@EW@X~cEt)=QNjZWT-z;l z!VaFS0g6{7PxY^Fn+zER8h(couJXsH{O$PSpp0VH{CrJV&kRH%g!b|cVLcxdAt0-4 zL?hBtpvPxnXU7LY8OP`7=!ih4-wbdkPwGJ@mVn}0h~HO|w(`2|y!9-3FGjToe*zce zDfKo{e*}!p4pp-q-wB{$1dh4f6EO>l0WgRwl4=Ia*kZsP#Bf;RQ5BdY8C-oJ#>=rD z9Q`8yb4$Jvssm=V%Yef79NfN=Ob0Yc=O`AyT-KfGV+#<9TIe70pkGORG8Dtb>BOD*C1W%_FBhAn5X>G$A|eT00zH^Pv-1xGyQx4L<8! zQd18m_+AG`&M z`uAh1r|aX->SuNNt#ZqAej#^hkOGTJUeunl6A%UAz98(9Tp0lDBpqNU<6a)3qLWeE zf*Ec<%v%*yedeV!L3xg?^Yf+^i`h9M<6I0(wLzQcxS!SG8G_qclc2Ty$*DWo^DB+M zy*=AsoX`-BF+wUIQx&|374|>$A`E5aE6T2v51)R(FLi!*QpaK+cog^_3AH&7FH zYvz1_{LjLlhG}BE!=>&H|5s`p4YzY9SE#xOQD*FUp#DN-OBEQsS}zRtV*Z<__;w`x zX>@e71-q~RR~`r=1Q{^Z=FNxSOB)L+z}IpJ_o2Zdt6W?swRA@vkop4jP##cU5W{Kr zr?A^